diff --git a/.gitignore b/.gitignore index ae3c1726048..7ee8f093d99 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,4 @@ /bin/ +.openshift_install_state.json +.openshift_install.log +.vscode diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 6c60ecb6e96..92d04b444f5 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -86,3 +86,9 @@ aliases: - Gal-Zaidman - rgolangh - eslutsky + equinix-approvers: + - displague + - detiber + equinix-reviewers: + - displague + - detiber diff --git a/README.md b/README.md index 41a086ce10d..24a2e310d3a 100644 --- a/README.md +++ b/README.md @@ -15,6 +15,7 @@ * [Power](docs/user/power/install_upi.md) * [oVirt](docs/user/ovirt/install_ipi.md) * [oVirt (UPI)](docs/user/ovirt/install_upi.md) +* [Equinix Metal](docs/user/equinixmetal/install_ipi.md) * [vSphere](docs/user/vsphere/README.md) * [vSphere (UPI)](docs/user/vsphere/install_upi.md) * [z/VM](docs/user/zvm/install_upi.md) diff --git a/cmd/openshift-install/destroy.go b/cmd/openshift-install/destroy.go index e813e095a3c..df0571d270a 100644 --- a/cmd/openshift-install/destroy.go +++ b/cmd/openshift-install/destroy.go @@ -14,6 +14,7 @@ import ( _ "github.com/openshift/installer/pkg/destroy/azure" _ "github.com/openshift/installer/pkg/destroy/baremetal" "github.com/openshift/installer/pkg/destroy/bootstrap" + _ "github.com/openshift/installer/pkg/destroy/equinixmetal" _ "github.com/openshift/installer/pkg/destroy/gcp" _ "github.com/openshift/installer/pkg/destroy/libvirt" _ "github.com/openshift/installer/pkg/destroy/openstack" diff --git a/cmd/openshift-install/gather.go b/cmd/openshift-install/gather.go index fde2c203c5a..fe09b0ecbff 100644 --- a/cmd/openshift-install/gather.go +++ b/cmd/openshift-install/gather.go @@ -28,6 +28,7 @@ import ( gatheraws "github.com/openshift/installer/pkg/terraform/gather/aws" gatherazure "github.com/openshift/installer/pkg/terraform/gather/azure" gatherbaremetal "github.com/openshift/installer/pkg/terraform/gather/baremetal" + gatherequinix "github.com/openshift/installer/pkg/terraform/gather/equinixmetal" gathergcp "github.com/openshift/installer/pkg/terraform/gather/gcp" gatherlibvirt "github.com/openshift/installer/pkg/terraform/gather/libvirt" gatheropenstack "github.com/openshift/installer/pkg/terraform/gather/openstack" @@ -37,6 +38,7 @@ import ( awstypes "github.com/openshift/installer/pkg/types/aws" azuretypes "github.com/openshift/installer/pkg/types/azure" baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" + equinixtypes "github.com/openshift/installer/pkg/types/equinixmetal" gcptypes "github.com/openshift/installer/pkg/types/gcp" libvirttypes "github.com/openshift/installer/pkg/types/libvirt" openstacktypes "github.com/openshift/installer/pkg/types/openstack" @@ -228,6 +230,15 @@ func extractHostAddresses(config *types.InstallConfig, tfstate *terraform.State) return bootstrap, port, masters, err } masters, err = gatherovirt.ControlPlaneIPs(tfstate) + case equinixtypes.Name: + bootstrap, err = gatherequinix.BootstrapIP(tfstate) + if err != nil { + return bootstrap, port, masters, err + } + masters, err = gatherequinix.ControlPlaneIPs(tfstate) + if err != nil { + logrus.Error(err) + } case vspheretypes.Name: bootstrap, err = gathervsphere.BootstrapIP(config, tfstate) if err != nil { diff --git a/data/data/bootstrap/equinixmetal/OWNERS b/data/data/bootstrap/equinixmetal/OWNERS new file mode 100644 index 00000000000..06e8c730e33 --- /dev/null +++ b/data/data/bootstrap/equinixmetal/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +approvers: + - equinix-approvers +reviewers: + - equinix-reviewers diff --git a/data/data/bootstrap/files/usr/local/bin/bootkube.sh.template b/data/data/bootstrap/files/usr/local/bin/bootkube.sh.template index 91bb31587b5..e3f6092bf5f 100755 --- a/data/data/bootstrap/files/usr/local/bin/bootkube.sh.template +++ b/data/data/bootstrap/files/usr/local/bin/bootkube.sh.template @@ -275,6 +275,7 @@ then copy_static_resources_for baremetal copy_static_resources_for openstack copy_static_resources_for ovirt + copy_static_resources_for equinixmetal copy_static_resources_for vsphere cp mco-bootstrap/manifests/* manifests/ diff --git a/data/data/equinixmetal/OWNERS b/data/data/equinixmetal/OWNERS new file mode 100644 index 00000000000..06e8c730e33 --- /dev/null +++ b/data/data/equinixmetal/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +approvers: + - equinix-approvers +reviewers: + - equinix-reviewers diff --git a/data/data/equinixmetal/README.md b/data/data/equinixmetal/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/equinixmetal/bootstrap/README.md b/data/data/equinixmetal/bootstrap/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/equinixmetal/bootstrap/main.tf b/data/data/equinixmetal/bootstrap/main.tf new file mode 100644 index 00000000000..06d784c9388 --- /dev/null +++ b/data/data/equinixmetal/bootstrap/main.tf @@ -0,0 +1,176 @@ + +locals { + arch = "x86_64" + // TODO(displague) use an EquinixMetal proxy + /* + coreos_baseurl = "http://mirror.openshift.com/pub/openshift-v4/${local.arch}/dependencies/rhcos" + coreos_url = "${local.coreos_baseurl}/${var.ocp_version}/${var.ocp_version}.${var.ocp_version_zstream}" + coreos_filenm = "rhcos-${var.ocp_version}.${var.ocp_version_zstream}-${local.arch}" + coreos_img = "${local.coreos_filenm}-metal.${local.arch}.raw.gz" + coreos_kernel = "${local.coreos_filenm}-installer-kernel-${local.arch}" + coreos_initrd = "${local.coreos_filenm}-installer-initramfs.${local.arch}.img" + */ + + // extracting "api." from + external_name = "api-int.${replace(var.cluster_domain, ".${var.base_domain}", "")}.${var.base_domain}" +} + +/* + +data "template_file" "user_data" { + template = file("${path.module}/templates/user_data_${var.operating_system}.sh") +} + +data "template_file" "ipxe_script" { + depends_on = [packet_device.bootstrap] + for_each = toset(var.nodes) + template = file("${path.module}/templates/ipxe.tpl") + + vars = { + node_type = each.value + bootstrap_ip = packet_device.bootstrap.access_public_ipv4 + ocp_version = var.ocp_version + ocp_version_zstream = var.ocp_version_zstream + } +} + +data "template_file" "ignition_append" { + depends_on = [packet_device.bootstrap] + for_each = toset(var.nodes) + template = file("${path.module}/templates/ignition-append.json.tpl") + + vars = { + node_type = each.value + bootstrap_ip = packet_device.bootstrap.access_public_ipv4 + cluster_name = var.cluster_name + cluster_basedomain = var.cluster_basedomain + } +} +*/ + +resource "packet_device" "bootstrap" { + hostname = local.external_name + plan = var.plan + facilities = [var.facility] + // metro = var.metro + operating_system = "custom_ipxe" + billing_cycle = var.billing_cycle + project_id = var.project_id + ipxe_script_url = "https://gist.githubusercontent.com/displague/5282172449a83c7b83821f8f8333a072/raw/0f0d50c744bb758689911d1f8d421b7730c0fb3e/rhcos.ipxe" + + // user_data = data.template_file.user_data.rendered + user_data = var.ignition +} + +resource "packet_ip_attachment" "node-address" { + device_id = packet_device.bootstrap.id + cidr_notation = "${var.ip_address}/32" +} + +/* +resource "null_resource" "dircheck" { + + provisioner "remote-exec" { + + connection { + private_key = file(var.ssh_private_key_path) + host = packet_device.bootstrap.access_public_ipv4 + } + + + inline = [ + "while [ ! -d /usr/share/nginx/html ]; do sleep 2; done; ls /usr/share/nginx/html/", + "while [ ! -f /usr/lib/systemd/system/nfs-server.service ]; do sleep 2; done; ls /usr/lib/systemd/system/nfs-server.service" + ] + } +} + +resource "null_resource" "ocp_install_ignition" { + + depends_on = [null_resource.dircheck] + + + provisioner "remote-exec" { + + connection { + private_key = file(var.ssh_private_key_path) + host = packet_device.bootstrap.access_public_ipv4 + } + + + inline = [ + "curl -o /usr/share/nginx/html/${local.coreos_img} ${local.coreos_url}/${local.coreos_img}", + "curl -o /usr/share/nginx/html/${local.coreos_kernel} ${local.coreos_url}/${local.coreos_kernel}", + "curl -o /usr/share/nginx/html/${local.coreos_initrd} ${local.coreos_url}/${local.coreos_initrd}", + "chmod -R 0755 /usr/share/nginx/html/" + ] + } +} + +resource "null_resource" "ipxe_files" { + + depends_on = [null_resource.dircheck] + for_each = data.template_file.ipxe_script + + provisioner "file" { + + connection { + private_key = file(var.ssh_private_key_path) + host = packet_device.bootstrap.access_public_ipv4 + } + + content = each.value.rendered + destination = "/usr/share/nginx/html/${each.key}.ipxe" + } + + provisioner "remote-exec" { + + connection { + private_key = file(var.ssh_private_key_path) + host = packet_device.bootstrap.access_public_ipv4 + } + + + inline = [ + "chmod -R 0755 /usr/share/nginx/html/", + ] + } +} + +resource "null_resource" "ignition_append_files" { + + depends_on = [null_resource.dircheck] + for_each = data.template_file.ignition_append + + provisioner "file" { + + connection { + private_key = file(var.ssh_private_key_path) + host = packet_device.bootstrap.access_public_ipv4 + } + + content = each.value.rendered + destination = "/usr/share/nginx/html/${each.key}-append.ign" + } + + provisioner "remote-exec" { + + connection { + private_key = file(var.ssh_private_key_path) + host = packet_device.bootstrap.access_public_ipv4 + } + + + inline = [ + "chmod -R 0755 /usr/share/nginx/html/", + ] + } +} + + +output "finished" { + depends_on = [null_resource.file_uploads, null_resource.ipxe_files] + value = "Loadbalancer provisioning finished." +} + +*/ diff --git a/data/data/equinixmetal/bootstrap/output.tf b/data/data/equinixmetal/bootstrap/output.tf new file mode 100644 index 00000000000..0285e7e3e79 --- /dev/null +++ b/data/data/equinixmetal/bootstrap/output.tf @@ -0,0 +1,3 @@ +output "lb_ip" { + value = packet_device.bootstrap.access_public_ipv4 +} \ No newline at end of file diff --git a/data/data/equinixmetal/bootstrap/variables.tf b/data/data/equinixmetal/bootstrap/variables.tf new file mode 100644 index 00000000000..1df22b7c518 --- /dev/null +++ b/data/data/equinixmetal/bootstrap/variables.tf @@ -0,0 +1,40 @@ +variable "ignition" { + type = string + description = "The content of the bootstrap ignition file." +} + + + + + + +variable "depends" { + type = any + default = null +} + +variable "plan" {} +variable "facility" { default = "" } +variable "metro" { default = "" } +variable "operating_system" {} +variable "project_id" {} +variable "billing_cycle" {} +variable "ssh_private_key_path" { default = "TODO" } +variable "cluster_domain" {} +variable "base_domain" {} +// variable "cf_zone_id" {} +//variable "ocp_version" {default = "TODO" } +//variable "ocp_version_zstream" {default = "TODO" } + +/* +variable "nodes" { + description = "Generic list of OpenShift node types" + type = list(string) + default = ["bootstrap", "master", "worker"] +} +*/ + +variable "ip_address" { + description = "IP Reservation IPv4 addresses to assign to the bootstrap node" + type = string +} diff --git a/data/data/equinixmetal/bootstrap/versions.tf b/data/data/equinixmetal/bootstrap/versions.tf new file mode 100644 index 00000000000..d9b6f790b92 --- /dev/null +++ b/data/data/equinixmetal/bootstrap/versions.tf @@ -0,0 +1,3 @@ +terraform { + required_version = ">= 0.12" +} diff --git a/data/data/equinixmetal/dns/dns.tf b/data/data/equinixmetal/dns/dns.tf new file mode 100644 index 00000000000..7f3e83cfa96 --- /dev/null +++ b/data/data/equinixmetal/dns/dns.tf @@ -0,0 +1,57 @@ +provider "dns" { + /** + // TODO: accept dns update options so DNS can be configured following EM devices + update { + server = "192.168.0.1" + key_name = "example.com." + key_algorithm = "hmac-md5" + key_secret = "3VwZXJzZWNyZXQ=" + } + **/ +} + +locals { + basedomain = join(".", [replace(var.cluster_name, ".${var.cluster_basedomain}", ""), var.cluster_basedomain]) +} + +data "dns_a_record_set" "bootstrap" { + host = "bootstrap.${local.basedomain}" +} + +data "dns_a_record_set" "masters" { + count = var.masters_count + host = "master${count.index}.${local.basedomain}" +} + +/* +data "dns_a_record_set" "etcd_a" { + count = var.masters_count + host = "etcd-${count.index}.${local.basedomain}" +} + +data "dns_srv_record_set" "etcd_srv" { + // Verifies etcd SRV records have been created + // TODO: verify that these match the etcd nodes + service = "_etcd-server-ssl._tcp.${local.basedomain}" +} +*/ + +/* +data "dns_a_record_set" "workers" { + count = var.workers_count + host = "worker${count.index}.${local.basedomain}" +} +*/ + +data "dns_a_record_set" "lb" { + host = "api-int.${local.basedomain}" +} + +/* +// *.apps is considered optional, don't validate it +data "dns_a_record_set" "apps" { + // TODO: validate that *.apps matches the lb + // TODO: permit CNAME, dns_cname_record_set is valid + host = "*.apps.${local.basedomain}" +} +*/ diff --git a/data/data/equinixmetal/dns/outputs.tf b/data/data/equinixmetal/dns/outputs.tf new file mode 100644 index 00000000000..16d01fac9cd --- /dev/null +++ b/data/data/equinixmetal/dns/outputs.tf @@ -0,0 +1,22 @@ +output "bootstrap_a" { + description = "IP Address of the bootstrap node" + value = data.dns_a_record_set.bootstrap.addrs[0] +} + +output "lb_a" { + description = "IP Address of the LoadBalancer node" + value = data.dns_a_record_set.lb.addrs[0] +} + +output "masters_a" { + description = "IP Addresses of the bootstrap node" + // TODO: this assume 1 address per master + value = flatten(data.dns_a_record_set.masters.*.addrs) +} + +/* +output "workers_a" { + description = "IP Addresses of the bootstrap node" + value = data.dns_a_record_set.workers.addrs +} +*/ diff --git a/data/data/equinixmetal/dns/variables.tf b/data/data/equinixmetal/dns/variables.tf new file mode 100644 index 00000000000..f5f3e59663b --- /dev/null +++ b/data/data/equinixmetal/dns/variables.tf @@ -0,0 +1,10 @@ +variable "cluster_name" { + type = string +} +variable "cluster_basedomain" { + type = string +} +variable "masters_count" { + type = number + default = 1 +} diff --git a/data/data/equinixmetal/dns/versions.tf b/data/data/equinixmetal/dns/versions.tf new file mode 100644 index 00000000000..d9b6f790b92 --- /dev/null +++ b/data/data/equinixmetal/dns/versions.tf @@ -0,0 +1,3 @@ +terraform { + required_version = ">= 0.12" +} diff --git a/data/data/equinixmetal/main.tf b/data/data/equinixmetal/main.tf new file mode 100644 index 00000000000..f8e7e4687a8 --- /dev/null +++ b/data/data/equinixmetal/main.tf @@ -0,0 +1,96 @@ +provider "packet" { + auth_token = var.metal_auth_token +} + + +module "dns" { + source = "./dns" + + cluster_name = var.cluster_domain + cluster_basedomain = var.base_domain +} + +module "bootstrap" { + source = "./bootstrap" + project_id = var.metal_project_id + facility = var.metal_facility + // metro = var.metal_metro + billing_cycle = var.metal_billing_cycle + plan = var.metal_machine_type + operating_system = var.metal_bootstrap_operating_system + ssh_private_key_path = var.metal_ssh_private_key_path + cluster_domain = var.cluster_domain + base_domain = var.base_domain + // cf_zone_id = var.cf_zone_id + //ocp_version = var.metal_ocp_version + //ocp_version_zstream = var.metal_ocp_version_zstream + //depends = [module.prepare_openshift.finished] + + ip_address = module.dns.bootstrap_a + + ignition = var.ignition_bootstrap +} + + +module "master" { + node_count = var.master_count + source = "./master" + + project_id = var.metal_project_id + facility = var.metal_facility + // metro = var.metal_metro + // billing_cycle = var.metal_billing_cycle + plan = var.metal_machine_type + operating_system = "custom_ipxe" + ssh_private_key_path = var.metal_ssh_private_key_path + cluster_domain = var.cluster_domain + base_domain = var.base_domain + // cf_zone_id = var.cf_zone_id + //ocp_version = var.metal_ocp_version + //ocp_version_zstream = var.metal_ocp_version_zstream + //depends = [module.prepare_openshift.finished] + + ip_addresses = module.dns.masters_a + + ignition = var.ignition_master + + bootstrap_ip = module.bootstrap.lb_ip +} + +/* +module "prepare_openshift" { + + source = "./modules/prereq" + + cluster_name = var.cluster_name + cluster_basedomain = var.cluster_basedomain + ocp_version = var.ocp_version + count_master = var.count_master + count_compute = var.count_compute + ssh_public_key_path = var.ssh_public_key_path + ssh_private_key_path = var.ssh_private_key_path + bootstrap_ip = module.bootstrap.lb_ip + ocp_api_token = var.ocp_cluster_manager_token + depends = [module.bootstrap.finished] +} + +module "openshift_install" { + source = "./modules/install" + + ssh_private_key_path = var.ssh_private_key_path + operating_system = var.bootstrap_operating_system + bootstrap_ip = module.bootstrap.lb_ip + count_master = var.count_master + count_compute = var.count_compute + cluster_name = var.cluster_name + cluster_basedomain = var.cluster_basedomain + bootstrap_ip = module.openshift_bootstrap.node_ip + master_ips = module.openshift_masters.node_ip + worker_ips = module.openshift_workers.node_ip + depends = [module.openshift_masters.node_ip, module.openshift_workers.node_ip] + + ocp_storage_nfs_enable = var.ocp_storage_nfs_enable + ocp_storage_ocs_enable = var.ocp_storage_ocs_enable + ocp_virtualization_enable = var.ocp_virtualization_enable +} +*/ diff --git a/data/data/equinixmetal/master/main.tf b/data/data/equinixmetal/master/main.tf new file mode 100644 index 00000000000..8cdd8176885 --- /dev/null +++ b/data/data/equinixmetal/master/main.tf @@ -0,0 +1,23 @@ +resource "packet_device" "node" { + count = var.node_count + + depends_on = [var.depends] + hostname = format("master%01d.%s.%s", count.index, replace(var.cluster_domain, ".${var.base_domain}", ""), var.base_domain) + operating_system = "custom_ipxe" + // ipxe_script_url = "http://${var.bootstrap_ip}:8080/${var.node_type}.ipxe" + billing_cycle = "hourly" + project_id = var.project_id + + plan = var.plan + facilities = [var.facility] + // metro = var.metro + ipxe_script_url = "https://gist.githubusercontent.com/displague/5282172449a83c7b83821f8f8333a072/raw/0f0d50c744bb758689911d1f8d421b7730c0fb3e/rhcos.ipxe" + + user_data = var.ignition +} + +resource "packet_ip_attachment" "node-address" { + count = var.node_count + device_id = packet_device.node[count.index].id + cidr_notation = jsonencode(var.ip_addresses) // "${var.ip_addresses[count.index]}/32" +} diff --git a/data/data/equinixmetal/master/outputs.tf b/data/data/equinixmetal/master/outputs.tf new file mode 100644 index 00000000000..5576addb5f2 --- /dev/null +++ b/data/data/equinixmetal/master/outputs.tf @@ -0,0 +1,3 @@ +output "finished" { + value = "delete me: Provisioning finished." +} \ No newline at end of file diff --git a/data/data/equinixmetal/master/variables.tf b/data/data/equinixmetal/master/variables.tf new file mode 100644 index 00000000000..2d38293e883 --- /dev/null +++ b/data/data/equinixmetal/master/variables.tf @@ -0,0 +1,29 @@ +variable "ignition" { + type = string + description = "The content of the bootstrap ignition file." +} + + +variable "plan" {} +variable "node_count" { + type = number +} +variable "facility" { default = "" } +variable "metro" { default = "" } +variable "cluster_domain" {} +variable "base_domain" {} +variable "ssh_private_key_path" {} +variable "project_id" {} +variable "bootstrap_ip" {} +variable "operating_system" {} +//variable "ocp_version" {default = "TODO" } +//variable "ocp_version_zstream" {default = "TODO" } +variable "depends" { + type = any + default = null +} + +variable "ip_addresses" { + description = "IP Reservation IPv4 addresses to assign to masters" + type = list(string) +} diff --git a/data/data/equinixmetal/master/versions.tf b/data/data/equinixmetal/master/versions.tf new file mode 100644 index 00000000000..d9b6f790b92 --- /dev/null +++ b/data/data/equinixmetal/master/versions.tf @@ -0,0 +1,3 @@ +terraform { + required_version = ">= 0.12" +} diff --git a/data/data/equinixmetal/variables-equinixmetal.tf b/data/data/equinixmetal/variables-equinixmetal.tf new file mode 100644 index 00000000000..9a90ed88cdd --- /dev/null +++ b/data/data/equinixmetal/variables-equinixmetal.tf @@ -0,0 +1,85 @@ +/* +variable "metal_cf_email" { + description = "Your Cloudflare email address" +} + +variable "metal_cf_api_key" { + description = "Your Cloudflare API key" +} + +variable "metal_cf_zone_id" { + description = "Your Cloudflare Zone" +} +*/ + +variable "metal_auth_token" { + description = "Your Equinix Metal API key" +} + +variable "metal_project_id" { + description = "Your Equinix Metal Project ID" +} + +variable "metal_ssh_private_key_path" { + description = "Your SSH private key path (used locally only)" + default = "~/.ssh/id_rsa" +} + +variable "metal_ssh_public_key_path" { + description = "Your SSH public key path (used for install-config.yaml)" + default = "~/.ssh/id_rsa.pub" +} + +variable "metal_bootstrap_operating_system" { + description = "Your preferred bootstrap operating systems (RHEL or CentOS)" + default = "rhel_7" +} + +variable "metal_facility" { + default = "" + description = "Your primary facility" +} + +variable "metal_metro" { + default = "" + description = "Your metro" +} + +variable "metal_billing_cycle" { + description = "Your billing cycle (hourly)" +} + +variable "metal_machine_type" { + description = "Plan for Compute Nodes" +} + +/* +variable "metal_ocp_version" { + default = "4.6" + description = "OpenShift minor release version" +} + +variable "metal_ocp_version_zstream" { + default = "3" + description = "OpenShift zstream version" +} + + +variable "metal_ocp_cluster_manager_token" { + description = "OpenShift Cluster Manager API Token used to generate your pullSecret (https://cloud.redhat.com/openshift/token)" +} + +variable "metal_ocp_storage_nfs_enable" { + description = "Enable configuration of NFS and NFS-related k8s provisioner/storageClass" + default = true +} +variable "metal_ocp_storage_ocs_enable" { + description = "Enable installation of OpenShift Container Storage via operator. This requires a minimum of 3 worker nodes" + default = false +} + +variable "metal_ocp_virtualization_enable" { + description = "Enable installation of OpenShift Virtualization via operator. This requires storage provided by OCS, NFS, and/or hostPath provisioner(s)" + default = false +} +*/ diff --git a/data/data/equinixmetal/versions.tf b/data/data/equinixmetal/versions.tf new file mode 100644 index 00000000000..d9b6f790b92 --- /dev/null +++ b/data/data/equinixmetal/versions.tf @@ -0,0 +1,3 @@ +terraform { + required_version = ">= 0.12" +} diff --git a/data/data/equinixmetal/vnet/internal-lb.tf b/data/data/equinixmetal/vnet/internal-lb.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/equinixmetal/vnet/outputs.tf b/data/data/equinixmetal/vnet/outputs.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/equinixmetal/vnet/public-lb.tf b/data/data/equinixmetal/vnet/public-lb.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/equinixmetal/vnet/variables.tf b/data/data/equinixmetal/vnet/variables.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/equinixmetal/vnet/versions.tf b/data/data/equinixmetal/vnet/versions.tf new file mode 100644 index 00000000000..d9b6f790b92 --- /dev/null +++ b/data/data/equinixmetal/vnet/versions.tf @@ -0,0 +1,3 @@ +terraform { + required_version = ">= 0.12" +} diff --git a/data/data/equinixmetal/vnet/vnet.tf b/data/data/equinixmetal/vnet/vnet.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/install.openshift.io_installconfigs.yaml b/data/data/install.openshift.io_installconfigs.yaml index 2091d6a1554..4bb7073b0ec 100644 --- a/data/data/install.openshift.io_installconfigs.yaml +++ b/data/data/install.openshift.io_installconfigs.yaml @@ -266,6 +266,10 @@ spec: - high_performance type: string type: object + equinixmetal: + description: Equinix Metal is the configuration used when installing + on Equinix Metal. + type: object vsphere: description: VSphere is the configuration used when installing on vSphere. properties: @@ -534,6 +538,10 @@ spec: - high_performance type: string type: object + equinixmetal: + description: Equinix Metal is the configuration used when installing + on Equinix Metal. + type: object vsphere: description: VSphere is the configuration used when installing on vSphere. properties: @@ -1241,6 +1249,53 @@ spec: - ovirt_cluster_id - ovirt_storage_domain_id type: object + equinixmetal: + description: Equinix Metal is the configuration used when installing on Equinix Metal. + properties: + apivip: + description: 'APIVIP is the static IP on the nodes subnet that + the api port for openshift will be assigned Default: will be + set to the 5 on the first entry in the machineNetwork CIDR' + format: ip + type: string + bootstrapOSImage: + description: BootstrapOSImage is a URL to override the default + OS image for the bootstrap node. The URL must contain a sha256 + hash of the image e.g https://mirror.example.com/images/qemu.qcow2.gz?sha256=a07bd... + type: string + clusterOSImage: + description: ClusterOSImage is a URL to override the default OS + image for cluster nodes. The URL must contain a sha256 hash + of the image e.g https://mirror.example.com/images/metal.qcow2.gz?sha256=3b5a8... + type: string + computeSubnet: + description: ComputeSubnet is an existing subnet where the compute + nodes will be deployed. The value should be the name of the + subnet. + type: string + controlPlaneSubnet: + description: ControlPlaneSubnet is an existing subnet where the + control plane will be deployed. The value should be the name + of the subnet. + type: string + defaultMachinePlatform: + description: DefaultMachinePlatform is the default configuration + used when installing on bare metal for machine pools which do + not define their own platform configuration. + type: object + facility_code: + description: FacilityCode represents the Equinix Metal region and datacenter + where your devices will be provisioned (https://www.equinixmetal.com/developers/docs/getting-started/facilities/) + type: string + network: + description: Network specifies an existing VPC where the cluster + should be created rather than provisioning a new one. + type: string + project_id: + description: ProjectID represents the Equinix Metal project used for + logical grouping and invoicing (https://metal.equinix.com/developers/api/) + type: string + type: object vsphere: description: VSphere is the configuration used when installing on vSphere. properties: diff --git a/data/data/manifests/openshift/cloud-creds-secret.yaml.template b/data/data/manifests/openshift/cloud-creds-secret.yaml.template index bd5c13d68f0..9014ef8cfce 100644 --- a/data/data/manifests/openshift/cloud-creds-secret.yaml.template +++ b/data/data/manifests/openshift/cloud-creds-secret.yaml.template @@ -14,6 +14,8 @@ metadata: name: vsphere-creds {{- else if .CloudCreds.Ovirt}} name: ovirt-credentials +{{- else if .CloudCreds.EquinixMetal}} + name: equinixmetal-credentials {{- end}} data: {{- if .CloudCreds.AWS}} @@ -42,4 +44,11 @@ data: ovirt_cafile: {{.CloudCreds.Ovirt.Base64encodeCAFile}} ovirt_insecure: {{.CloudCreds.Ovirt.Base64encodeInsecure}} ovirt_ca_bundle: {{.CloudCreds.Ovirt.Base64encodeCABundle}} +{{- else if .CloudCreds.EquinixMetal}} + equinixmetal_url: {{.CloudCreds.EquinixMetal.Base64encodeURL}} + equinixmetal_username: {{.CloudCreds.EquinixMetal.Base64encodeUsername}} + equinixmetal_password: {{.CloudCreds.EquinixMetal.Base64encodePassword}} + equinixmetal_cafile: {{.CloudCreds.EquinixMetal.Base64encodeCAFile}} + equinixmetal_insecure: {{.CloudCreds.EquinixMetal.Base64encodeInsecure}} + equinixmetal_ca_bundle: {{.CloudCreds.EquinixMetal.Base64encodeCABundle}} {{- end}} diff --git a/data/data/rhcos-amd64.json b/data/data/rhcos-amd64.json index 76e9cfc5c5b..676c4084ef1 100644 --- a/data/data/rhcos-amd64.json +++ b/data/data/rhcos-amd64.json @@ -141,6 +141,13 @@ "sha256": "ffac43f96bcef2a129734bd50145cdba0ec191aab09641f9a5f0d3e1d48ef8b2", "size": 801607680 }, + "equinixmetal": { + "path": "rhcos-45.81.202005200134-0-metal.x86_64.raw.gz", + "sha256": "fbbe3f1a6cd60ec0344ca88925efc812c1556654680d8663bffd0663ae55843a", + "size": 878855135, + "uncompressed-sha256": "4a211425bf1af046ffbd91c9c63b0d179db67f9dc4165a4a106a2f0a1c74df7e", + "uncompressed-size": 3807379456 + }, "qemu": { "path": "rhcos-47.82.202010211043-0-qemu.x86_64.qcow2.gz", "sha256": "ced6480a9f4960484ba83e5734334bb1afab19cb38e8a8f4901919eb222567c0", diff --git a/data/data/rhcos.json b/data/data/rhcos.json index 76e9cfc5c5b..676c4084ef1 100644 --- a/data/data/rhcos.json +++ b/data/data/rhcos.json @@ -141,6 +141,13 @@ "sha256": "ffac43f96bcef2a129734bd50145cdba0ec191aab09641f9a5f0d3e1d48ef8b2", "size": 801607680 }, + "equinixmetal": { + "path": "rhcos-45.81.202005200134-0-metal.x86_64.raw.gz", + "sha256": "fbbe3f1a6cd60ec0344ca88925efc812c1556654680d8663bffd0663ae55843a", + "size": 878855135, + "uncompressed-sha256": "4a211425bf1af046ffbd91c9c63b0d179db67f9dc4165a4a106a2f0a1c74df7e", + "uncompressed-size": 3807379456 + }, "qemu": { "path": "rhcos-47.82.202010211043-0-qemu.x86_64.qcow2.gz", "sha256": "ced6480a9f4960484ba83e5734334bb1afab19cb38e8a8f4901919eb222567c0", diff --git a/docs/dev/adding-new-platform.md b/docs/dev/adding-new-platform.md new file mode 100644 index 00000000000..5d0f2baca9b --- /dev/null +++ b/docs/dev/adding-new-platform.md @@ -0,0 +1,310 @@ +# How to add a new platform to OpenShift + +This document describes the process for adding a new platform to OpenShift in stages. Because there are many components to an automated platform, the process is defined in terms of delivering specific levels of functionality over time. + +## Terminology + +* `User Provided Infrastructure (UPI)` - The documentation and enablement that describes how to launch OpenShift on a particular platform following the supported best practices. +* `Install Provided Infrastructure (IPI)` - Infrastructure created as part of the install process following the best practices on a platform. IPI may support options that allow portions of the infrastructure to be user-provided. +* `Cloud Provider` - The set of controllers in OpenShift that automatically manage storage, networking, and host failure detection by invoking infrastructure APIs. +* `Dynamic Compute` - The 4.x cloud provider feature that allows OpenShift to automatically manage creating, deleting, and scaling nodes by invoking infrastructure APIs. Exposed via the Machine API (`Machine`, `MachineSet`, and `MachineDeployment`) and implemented per platform via an `actuator` controller. + +## New Platform Milestones + +### Enable core platform + +1. **Boot** - Ensure RH CoreOS boots on the desired platform, that Ignition + works, and that you have VM / machine images to test with. Platforms that are + not supported by Ignition can provide support for their platform at + + ([example](https://github.com/coreos/ignition/pull/667)). +2. **Arch** - Identify the correct opinionated configuration for a desired + platform supporting the default features. +3. **CI** - Identify credentials and setup for a CI environment, ensure those + credentials exist and can be used in the CI environment +4. **Name** - For consistency, identify and get approved the correct naming for + adding a new platform to the core API objects (specifically the + [infrastructure + config](https://github.com/openshift/api/blob/master/config/v1/types_infrastructure.go) + and the installer config + (https://github.com/openshift/installer/blob/master/pkg/types/aws/doc.go)) + +5. **Enable Provisioning** Add a hidden installer option to this repo for the + desired platform as a PR and implement the minimal features for bootstrap as + well as a reliable teardown +6. **Enable Platform** Ensure all operators treat your platform as a no-op +7. **CI Job** Add a new CI job to the installer that uses the credentials above + to run the installer against the platform and correctly tear down resources +8. **Publish Images** Ensure RH CoreOS images on the platform are being + published to a location CI can test +9. **Merge** Merge the platform PR to the installer with a passing platform + specific CI job + +Expected PRs: + +* : Before starting any work, create an enhancement request. The OpenShift team will surface reference architectures and dependencies, viable paths, best practices, and expected challenges such as potential blockers and open issues. Follow the enhancement template at . + +* : adding your Platform + TODO(displague) expected limitations + +At this point the platform is said to be an `unsupported IPI` (installer provided infrastructure) install - work can begin enabling in other repositories. Once these steps have been completed and official documentation is available, the platform can said to be `supported UPI without cloud provider` (user provided infrastructure) for the set of options in **Arch**. + +Expected PRs: + +* : adding your Platform to types_infrastructure.go + +### Enable component infrastructure management + +Once the platform can be launched and tested, system features must be implemented. The sections below are roughly independent: + +TODO(displague) Outline repo/files related to each phase, add context (why is this needed) + +* General requirements: + * Replace the installer `terraform destroy` with one that doesn't rely on + Terraform state. When the cluster components start creating additional + resources on the target platform, Terraform won't have knowledge of them. + * Ensure the installer IPI support is consistent with other platform features + (private config, etc) + * Not all platforms will support all features, so IPI is taken to be a + spectrum of support + * Enable a CI job that verifies the E2E suite for the given platform runs + successfully +* Requirements for dynamic storage and dynamic load balancing + * Ensure the cloud provider is enabled in Kubernetes for your platform (this + is required for `supported UPI with cloud provider`) + * Enable cluster-storage-operator to set the correct default storage class +* Requirements for dynamic compute: + * Enable the cloud credential operator for the platform to subdivide access + for individual operators + * Enable dynamic compute (`MachineSets`) by adding a cloud actuator for that + platform +* Requirements for dynamic ingress and images: + * Enable + [cluster-image-registry-operator](https://github.com/openshift/cluster-image-registry-operator#image-registry-operator) + to provision a storage bucket (if your platform supports object storage) + * Enable [cluster-ingress-operator](https://github.com/openshift/cluster-ingress-operator#openshift-ingress-operator) to provision the wildcard domain names + +At this point the platform is said to be a `supported IPI with Dynamic Compute` +if the platform supports `MachineSets`, or `supported IPI without Dynamic +Compute` if it does not. + +## OpenShift Architectural Design + +OpenShift 4 combines Kubernetes, fully-managed host lifecycle, dynamic +infrastructure management, and a comprehensive set of fully-automated platform +extensions that can be upgraded and managed uniformly. + +The foundation of the platform is Red Hat CoreOS, an immutable operating system +based on RHEL that is capable of acting as a fully-integrated part of the +cluster. + +The Kubernetes control plane is hosted on the cluster, along with a number of +other fundamental extensions like cluster networking, ingress, storage, and +application development tooling. Each of those extensions is fully managed +on-cluster via a cluster operator that reacts to top level global configuration +APIs and can automatically reconfigure the affected components. + +The Operator Lifecycle Manager (OLM) allows additional ecosystem operators to be +installed, upgraded and managed. All of these components - from the operating +system kernel to the web console - are part of a unified update lifecycle under +the top level Cluster Version Operator which offers worry free rolling updates +of the entire infrastructure. + +### Core configuration + +An OpenShift cluster programs the infrastructure it runs on to provide operational simplicity. + +For every platform, the minimum requirements are: + +1. Control Plane Nodes: + 1. Run RH CoreOS, allowing in-place updates + 2. Are fronted by a load balancer that allows raw TCP connections to port + 6443 and exposes port 443 + 3. Meet hardware requirements: + * Have low latency interconnections connections (<5ms RTT) + * persistent disks that survive reboot + * provisioned for >= 300 IOPS + 4. Have cloud or infrastructure firewall rules that at minimum allow the + standard ports to be opened (see AWS provider) + 5. Do *not* have automatic cloud provider permissions to perform + infrastructure API calls + 6. Have a domain name pointing to the load balancer IP(s) that is + `api.` + 7. Has an internal DNS CNAME pointing to each control plane called + `-etcd-N.` + 8. Have an optional internal load balancer that TCP load balances all master + nodes, with a DNS name `internal-api.` pointing to the load + balancer. +2. One Bootstrap Node: + 1. Runs RH CoreOS + 2. Is reachable by control plane nodes over the network + 3. Is part of the control plane load balancer until it is removed + 4. Can reach a network endpoint that hosts the bootstrap Ignition file + securely, or has the bootstrap Ignition injected +3. Compute Nodes: + 1. Must be able to reach the internal IPs reported by the control plane nodes + directly + 2. Have cloud or infrastructure firewall rules that at minimum allow ports + 4789, 6443, 9000-10000, and 10250-10255 to be reachable + +The following clarifications to configurations are noted: + +1. The control plane load balancer does not need to be exposed to the public + internet, but the DNS entry must be visible from the location the installer + is run. +2. Control plane and compute nodes are not required to expose external IPs for + SSH access, and can instead allow SSH from a bastion inside a protected + network. + +For dynamic infrastructure, the following permissions are required to be provided as part of the install: + +1. Service LoadBalancer - Load balancers can be created and removed, + infrastructure nodes can be queried +2. Dynamic Storage - New volumes can be created, deleted, attached, and detached + from nodes. Snapshot creation is optional if the platform supports + them. +3. Dynamic Compute - New instances can be created, deleted, and restarted inside + of the cluster's network / infrastructure, plus any platform specific + constructs like programming instance groups for control plane load balancing on GCP. + +## Booting RH CoreOS + +Red Hat CoreOS uses Ignition to receive initial configuration from a remote source. Ignition has platform specific behavior to read that configuration that is determined by the `platformID` embedded in the VM image. + +To boot RHCOS to a new platform, you must: + +1. Ensure [Ignition](https://github.com/coreos/ignition) [supports that + platform](https://github.com/coreos/ignition/blob/master/doc/supported-platforms.md). +2. Ensure that RHCOS has any necessary platform specific code to communicate + with the platform (for instance, on Azure the instance must periodically + health check) - see [cloud support tracker on Fedora CoreOS for more + info](https://github.com/coreos/fedora-coreos-tracker/issues/95). +3. Have a RHCOS image with the appropriate platformID tag set. + +There is a script that assists you in converting the generic VM image to have a specific platformID set in the [coreos-assembler repo as gf-platformid](https://github.com/coreos/coreos-assembler/blob/master/src/gf-platformid). See the instructions there to create an image with the appropriate ID. + +Once you have uploaded the image to your platform, and the machine stays up, you can begin porting the installer to have a minimal IPI. + +## Continuous Integration + +All platforms require a core continuous integration testing loop that verifies that new changes do not regress our support for the platform. The minimum steps required are: + +1. Have an infrastructure that can receive API calls from the OpenShift CI + system to provision/destroy instances +2. Support at minimum 3 concurrent clusters on that infrastructure as "per + release image" testing (https://origin-release.svc.ci.openshift.org) that + verify a release supports that platform +3. Also support a per-PR target that can be selectively run on the installer, + core, and operator repositories in OpenShift in order to allow developers to + test incremental changes to those components + +No PR will be merged to openshift/installer for platform support that cannot +satisfy the above steps. + +## Naming + +The platform name will be part of our public API and must go through standard +API review. The name should be consistent with common usage of the platform and +be recognizable to a consumer. + +The following names for platforms are good examples of what is expected: + +* Amazon Web Services -> `aws` or `AWS` +* Google Cloud Platform -> `gcp` or `GCP` +* Azure -> `azure` or `Azure` +* Libvirt -> `libvirt` or `Libvirt` +* OpenStack -> `openstack` or `OpenStack` + +## Enable Provisioning + +Since CI testing requires the ability to provision via an API, we define the +basic path for supporting a platform as having a minimal provisioning path in +the OpenShift installer. Not all platforms we support will have full +infrastructure provisioning supported, but the basic path must be invocable via +Go code in `openshift-install` before a platform can be certified. This ensures +we have at least one path to installation. + +The OpenShift installer has normal and hidden provisioners. The hidden +provisioners are explicitly unsupported for production use but are supported for +testing. + +1. Add a new hidden provisioner +2. Define the minimal platform parameters that the provisioner must support +3. Use Terraform or direct Go code to provision that platform via the + credentials provided to the installer. + +A minimal provisioner must be able to launch the control plane and bootstrap +node via an API call and accept any "environmental" settings like network or +region as inputs. The installer should use the Route53 DNS provisioning code to +set up round robin to the bootstrap and control plane nodes if the platform does not offer similar managed DNS services. + +## Enable Platform + +OpenShift handles platform functionality as a set of operators running on the +platform that interface with users, admins, and infrastructure. Because +operators handle day 2 reconfiguration of the cluster, many "installation" +related duties are delegated to the operators. + +Operators derive their configuration from top level API objects called `global +configuration`. One such object is the `Infrastructure` global config, which +reports which platform the cluster is running on. + +All operators that react to infrastructure must support a `None` option, and any +unrecognized infrastructure platform **MUST** be treated as `None`. When an +operator starts, it should log a single warning if the infrastructure provider +is not recognized and then fall back to `None`. + +When adding a new platform to the installer, the infrastructure setting should +happen automatically during bootstrapping, and if a component does not correctly +treat your new platform as `None` it should be fixed immediately. + +## CI Job + +The initial CI job for a new platform PR to `openshift/installer` must use the +`cluster-installer-e2e` template but with an alternate profile, and the CI +infrastructure should be configured with the credentials for your infrastructure +in a `cluster-secrets-PLATFORM` secret. Talk to the testplatform team. This CI +job will then be reused whenever a repo wants to test, or when we add new +release tests. + +A new platform should pass many of the kubernetes conformance tests, so the +default job would run the e2e suite `kubernetes/conformance`. We may define a +more scoped job if the platform cannot pass. + +The teardown behavior of the cluster is the hardest part of this process - +because we run so many tests a day, it must be 100% reliable from the beginning. +You should implement a reliable teardown mechanism in your `destroy` method, +leveraging the OpenStack and AWS examples. + +We **will not** merge a new job if it does not have reliable cleanup in the face of failures, rate limits, etc, because it blocks other work. + +## Publishing Red Hat CoreOS Images + +RHCOS nodes can be upgraded to newer versions of kernel, userspace, and Kubelet +post-creation. For this reason, the installer launches a recent version of RHCOS +that is then upgraded at boot time to the version of RHCOS content that is +included in the OpenShift release payload. + +Once a version of RHCOS supports the desired platform, an image with that +Platform ID embedded must be published to the cloud or a publicly available +download location on a regular schedule. The installer may then embed logic to +identify the most recent location for the payload and automatically provide that +to the installer provisioning steps. + +## Merge the initial platform support + +After all of the steps above have been completed, the pull request enabling the +platform may be merged with documentation updated to indicate the platform is in +an unsupported pre-release configuration. Other components may now begin their +integration work. + +## Integration to individual operators + +1. Machine API Operator +2. Machine Config Operator +3. Cluster Storage Operator +4. Cloud Credential Operator +5. Cluster Ingress Operator +6. Cluster Image Registry Operator + +TODO: add details diff --git a/docs/user/customization.md b/docs/user/customization.md index 4090ecbcf11..bc4ddda2b7b 100644 --- a/docs/user/customization.md +++ b/docs/user/customization.md @@ -54,6 +54,7 @@ The following `install-config.yaml` properties are available: * `azure` (optional object): [Azure-specific properties](azure/customization.md#cluster-scoped-properties). * `openstack` (optional object): [OpenStack-specific properties](openstack/customization.md#cluster-scoped-properties). * `ovirt` (optional object): [oVirt-specific properties](ovirt/customization.md#cluster-scoped-properties). + * `equinixmetal` (optional object): [EquinixMetal-specific properties](equinixmetal/customization.md#cluster-scoped-properties). * `vsphere` (optional object): [vSphere-specific properties](vsphere/customization.md#cluster-scoped-properties). * `proxy` (optional object): The proxy settings for the cluster. If unset, the cluster will not be configured to use a proxy. @@ -83,6 +84,7 @@ The following machine-pool properties are available: * `gcp` (optional object): [GCP-specific properties](gcp/customization.md#machine-pools). * `openstack` (optional object): [OpenStack-specific properties](openstack/customization.md#machine-pools). * `ovirt` (optional object): [oVirt-specific properties](ovirt/customization.md#machine-pools). + * `equinixmetal` (optional object): [EquinixMetal-specific properties](equinixmetal/customization.md#machine-pools). * `vsphere` (optional object): [vSphere-specific properties](vsphere/customization.md#machine-pools). * `replicas` (optional integer): The machine count for the machine pool. diff --git a/docs/user/equinixmetal/OWNERS b/docs/user/equinixmetal/OWNERS new file mode 100644 index 00000000000..06e8c730e33 --- /dev/null +++ b/docs/user/equinixmetal/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +approvers: + - equinix-approvers +reviewers: + - equinix-reviewers diff --git a/docs/user/equinixmetal/README.md b/docs/user/equinixmetal/README.md new file mode 100644 index 00000000000..d336b82f217 --- /dev/null +++ b/docs/user/equinixmetal/README.md @@ -0,0 +1,7 @@ +# EquinixMetal Project Setup + +This document is a guide for preparing a new EquinixMetal project for use with OpenShift. + +Follow along with the steps and links below to configure your EquinixMetal project and provision an OpenShift cluster: + +1. [Step 1](step1.md) \ No newline at end of file diff --git a/docs/user/equinixmetal/customization.md b/docs/user/equinixmetal/customization.md new file mode 100644 index 00000000000..6b3c89bf066 --- /dev/null +++ b/docs/user/equinixmetal/customization.md @@ -0,0 +1,5 @@ + +## Machine Pools + + +## Cluster Scoped Properties diff --git a/docs/user/equinixmetal/install_ipi.md b/docs/user/equinixmetal/install_ipi.md new file mode 100644 index 00000000000..b0a63f037aa --- /dev/null +++ b/docs/user/equinixmetal/install_ipi.md @@ -0,0 +1,13 @@ +# Install using EquinixMetal platform provider + +## Overview + +## Prerequisite + +## Minimum resources + +## Install + +### Install using the wizard + +### Install in stages when customization is needed diff --git a/go.mod b/go.mod index 25e6cdd05ab..94f97889b20 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/openshift/installer go 1.14 require ( - cloud.google.com/go v0.57.0 + cloud.google.com/go v0.65.0 github.com/Azure/azure-sdk-for-go v43.2.0+incompatible github.com/Azure/go-autorest/autorest v0.10.0 github.com/Azure/go-autorest/autorest/adal v0.8.2 @@ -22,13 +22,12 @@ require ( github.com/containers/image v3.0.2+incompatible github.com/coreos/ignition/v2 v2.3.0 github.com/dmacvicar/terraform-provider-libvirt v0.6.2 + github.com/equinix/terraform-provider-equinix-metal v1.7.3-0.20201114035505-e4eeff216bf2 github.com/frankban/quicktest v1.7.2 // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 - github.com/go-logr/logr v0.2.1 // indirect github.com/go-playground/validator/v10 v10.2.0 - github.com/golang/mock v1.4.3 + github.com/golang/mock v1.4.4 github.com/golang/protobuf v1.4.2 - github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible // indirect github.com/google/uuid v1.1.1 github.com/gophercloud/gophercloud v0.12.1-0.20200821143728-362eb785d617 github.com/gophercloud/utils v0.0.0-20200918191848-da0e919a012a @@ -53,15 +52,17 @@ require ( github.com/openshift/client-go v0.0.0-20201020074620-f8fd44879f7c github.com/openshift/cloud-credential-operator v0.0.0-20200316201045-d10080b52c9e github.com/openshift/cluster-api v0.0.0-20191129101638-b09907ac6668 - github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20200120152131-1b09fd9e7156 + github.com/openshift/cluster-api-provider-equinix-metal v0.1.0 + github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201027164920-70f2f92e64ab github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20191219173431-2336783d4603 github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20200504092944-27473ea1ae43 github.com/openshift/library-go v0.0.0-20201022113156-a4ff9e1d2900 - github.com/openshift/machine-api-operator v0.2.1-0.20200429102619-d36974451290 + github.com/openshift/machine-api-operator v0.2.1-0.20201002104344-6abfb5440597 github.com/openshift/machine-config-operator v0.0.0 github.com/ovirt/go-ovirt v0.0.0-20200613023950-320a86f9df27 github.com/ovirt/terraform-provider-ovirt v0.4.3-0.20200914080915-c4444fb5c201 github.com/packer-community/winrmcp v0.0.0-20180921211025-c76d91c1e7db // indirect + github.com/packethost/packngo v0.5.1 github.com/pborman/uuid v1.2.0 github.com/pierrec/lz4 v2.3.0+incompatible // indirect github.com/pkg/errors v0.9.1 @@ -77,6 +78,7 @@ require ( github.com/terraform-provider-openstack/terraform-provider-openstack v1.32.0 github.com/terraform-providers/terraform-provider-aws v0.0.0 github.com/terraform-providers/terraform-provider-azurerm v0.0.0 + github.com/terraform-providers/terraform-provider-dns v0.0.0-20191209223915-3fb1c1918eb1 github.com/terraform-providers/terraform-provider-google v1.20.1-0.20200623174414-27107f2ee160 github.com/terraform-providers/terraform-provider-ignition/v2 v2.1.0 github.com/terraform-providers/terraform-provider-local v1.4.0 @@ -88,11 +90,11 @@ require ( github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca // indirect golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 golang.org/x/lint v0.0.0-20200302205851-738671d3881b - golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d - golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4 - google.golang.org/api v0.25.0 - google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 - google.golang.org/grpc v1.29.1 + golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 + golang.org/x/sys v0.0.0-20201018230417-eeed37f84f13 + google.golang.org/api v0.33.0 + google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d + google.golang.org/grpc v1.31.1 gopkg.in/AlecAivazis/survey.v1 v1.8.9-0.20200217094205-6773bdf39b7f gopkg.in/ini.v1 v1.51.0 gopkg.in/yaml.v2 v2.3.0 @@ -107,19 +109,27 @@ require ( sigs.k8s.io/cluster-api-provider-azure v0.0.0 sigs.k8s.io/cluster-api-provider-openstack v0.0.0 sigs.k8s.io/controller-tools v0.3.1-0.20200617211605-651903477185 + sigs.k8s.io/yaml v1.2.0 ) replace ( + bitbucket.org/ww/goautoneg => github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d github.com/Azure/go-autorest => github.com/tombuildsstuff/go-autorest v14.0.1-0.20200416184303-d4e299a3c04a+incompatible github.com/Azure/go-autorest/autorest => github.com/tombuildsstuff/go-autorest/autorest v0.10.1-0.20200416184303-d4e299a3c04a github.com/Azure/go-autorest/autorest/azure/auth => github.com/tombuildsstuff/go-autorest/autorest/azure/auth v0.4.3-0.20200416184303-d4e299a3c04a + github.com/equinix/terraform-provider-equinix-metal => github.com/packethost/terraform-provider-packet v1.7.3-0.20201202165003-a5613b748108 github.com/go-log/log => github.com/go-log/log v0.1.1-0.20181211034820-a514cf01a3eb // Pinned by MCO + github.com/hashicorp/terraform => github.com/openshift/terraform v0.12.20-openshift-4 // Pin to fork with deduplicated rpc types v0.12.20-openshift-4 github.com/hashicorp/terraform-plugin-sdk => github.com/openshift/hashicorp-terraform-plugin-sdk v1.14.0-openshift // Pin to fork with public rpc types github.com/metal3-io/baremetal-operator => github.com/openshift/baremetal-operator v0.0.0-20200715132148-0f91f62a41fe // Use OpenShift fork github.com/metal3-io/cluster-api-provider-baremetal => github.com/openshift/cluster-api-provider-baremetal v0.0.0-20190821174549-a2a477909c1d // Pin OpenShift fork + github.com/miekg/dns => github.com/miekg/dns v1.0.8 // Pin to terraform-provider-dns github.com/openshift/api => github.com/openshift/api v0.0.0-20200601094953-95abe2d2f422 // Pin API github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20200929181438-91d71ef2122c // Pin client-go + github.com/openshift/cluster-api-provider-equinix-metal => github.com/detiber/openshift-provider-packet v0.0.0-20201117162756-512a178614c0 + github.com/openshift/machine-api-operator => github.com/detiber/machine-api-operator v0.0.0-20201113194109-75933fe1fd83 + github.com/openshift/machine-config-operator => github.com/openshift/machine-config-operator v0.0.1-0.20201009041932-4fe8559913b8 // Pin MCO so it doesn't get downgraded github.com/terraform-providers/terraform-provider-aws => github.com/openshift/terraform-provider-aws v1.60.1-0.20200630224953-76d1fb4e5699 // Pin to openshift fork with tag v2.67.0-openshift github.com/terraform-providers/terraform-provider-azurerm => github.com/openshift/terraform-provider-azurerm v1.40.1-0.20200707062554-97ea089cc12a // release-2.17.0 branch @@ -128,7 +138,7 @@ replace ( github.com/vmware/govmomi => github.com/vmware/govmomi v0.22.2-0.20200420222347-5fceac570f29 k8s.io/api => k8s.io/api v0.19.0 k8s.io/client-go => k8s.io/client-go v0.19.0 - sigs.k8s.io/cluster-api-provider-aws => github.com/openshift/cluster-api-provider-aws v0.2.1-0.20200506073438-9d49428ff837 // Pin OpenShift fork + sigs.k8s.io/cluster-api-provider-aws => github.com/openshift/cluster-api-provider-aws v0.2.1-0.20201022175424-d30c7a274820 // Pin OpenShift fork sigs.k8s.io/cluster-api-provider-azure => github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20200120114645-8a9592f1f87b // Pin OpenShift fork sigs.k8s.io/cluster-api-provider-openstack => github.com/openshift/cluster-api-provider-openstack v0.0.0-20200526112135-319a35b2e38e // Pin OpenShift fork ) diff --git a/go.sum b/go.sum index 71ed03369f1..001d513981d 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,4 @@ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= -bitbucket.org/ww/goautoneg v0.0.0-20120707110453-75cd24fc2f2c/go.mod h1:1vhO7Mn/FZMgOgDVGLy5X1mE6rq1HbkBdkF/yj8zkcg= bou.ke/monkey v1.0.1/go.mod h1:FgHuK96Rv2Nlf+0u1OOVDpCMdsWyOFmeeketDHE7LIg= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -13,47 +12,41 @@ cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6A cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.49.0 h1:CH+lkubJzcPYB1Ggupcq0+k8Ni2ILdG2lYjDIgavDBQ= cloud.google.com/go v0.49.0/go.mod h1:hGvAdzcWNbyuxS3nWhD7H2cIJxjRRTRLQVB0bdputVY= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0 h1:WRz29PgAsVEyPSDHyk+0fpEkwEFyfhHn+JbksT6gIL4= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0 h1:EpMNVUorLiZIELdMZbCYX/ByTFCdoYopYAGxaGVz9ms= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0 h1:sAbMqjY1PEQKZBWfbu6Y6bsupJ9c4QdHnzg/VvYTLcE= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.6.0 h1:ajp/DjpiCHO71SyIhwb83YsUGAyWuzVvMko+9xCsJLw= cloud.google.com/go/bigquery v1.6.0/go.mod h1:hyFDG0qSGdHNz8Q6nDN8rYIkld0q/+5uBZaelxiDLfE= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/bigtable v1.1.0 h1:+IakvK2mFz1FbfA9Ti0JoKRPiJkORngh9xhfMbVkJqw= cloud.google.com/go/bigtable v1.1.0/go.mod h1:B6ByKcIdYmhoyDzmOnQxyOhN6r05qnewYIxxG6L0/b4= -cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0 h1:9/vpR43S4aJaROxqQHQ3nH9lfyKKV0dC3vOmnw8ebQQ= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.3.0/go.mod h1:9IAwXhoyBJ7z9LcAwkj0/7NnPzYaPeZxxVp3zm+5IqA= -cloud.google.com/go/storage v1.4.0 h1:KDdqY5VTXBTqpSbctVTt0mVvfanP6JZzNzLE0qNY100= cloud.google.com/go/storage v1.4.0/go.mod h1:ZusYJWlOshgSBGbt6K3GnB3MT3H1xs2id9+TCl4fDBA= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0 h1:UDpwYIwla4jHGzZJaEJYx1tOejbgSoNqsAfHAUYe2r8= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.7.0 h1:DzdLPI8Em+DEk7IzA2a10ivq3mxIEASC9GeNJ6FFt5Q= cloud.google.com/go/storage v1.7.0/go.mod h1:jGMIBwF+L/tL6WN/W5InNgYYu4HP0DvGB6rQ1mufWfs= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:sk5LnIjB/nIEU7yP5sDQExVm62wu0pBh3yrElngUisI= contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= contrib.go.opencensus.io/exporter/ocagent v0.6.0/go.mod h1:zmKjrJcdo0aYcVS7bmEeSEBLPA9YJp5bjrofdU3pIXs= @@ -85,7 +78,6 @@ github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEg github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503 h1:Hxqlh1uAA8aGpa1dFhDNhll7U/rkWtG8ZItFvRMr7l0= github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.2 h1:O1X4oexUxnZCaEUGsvMnr8ZGj8HI37tNezwY4npRqA0= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= @@ -112,7 +104,6 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/Azure/go-ntlmssp v0.0.0-20191115210519-2b2be6cc8ed4 h1:jxtswewdgihgXM6ayHYtISwzkAOaRzyXpgUMamb8mHw= github.com/Azure/go-ntlmssp v0.0.0-20191115210519-2b2be6cc8ed4/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= -github.com/BurntSushi/toml v0.3.0/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -147,7 +138,6 @@ github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8 github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.6/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/OpenPeeDeeP/depguard v1.0.0/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o= -github.com/OpenPeeDeeP/depguard v1.0.1 h1:VlW4R6jmBIv3/u1JNlawEvJMM4J+dPORPaZasQee8Us= github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -162,11 +152,9 @@ github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWX github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= -github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14= github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= @@ -175,7 +163,6 @@ github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4Rq github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083 h1:uwcvnXW76Y0rHM+qs7y8iHknWUWXYFNlD6FEVhc47TU= github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083/go.mod h1:otnto4/Icqn88WCcM4bhIJNSgsh9VLBuspyyCfvof9c= github.com/ajeddeloh/yaml v0.0.0-20170912190910-6b94386aeefd/go.mod h1:idhzw68Q7v4j+rQ2AGyq3OlZW2Jij9mdmGA4/Sk6J0E= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -183,13 +170,10 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190329064014-6e358769c32a/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA= github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190412020505-60e2075261b6/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA= -github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f h1:oRD16bhpKNAanfcDDVU+J0NXqsgHIvGbbe/sy+r6Rs0= github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ= github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190103054945-8205d1f41e70/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= -github.com/aliyun/aliyun-oss-go-sdk v2.0.4+incompatible h1:EaK5256H3ELiyaq5O/Zwd6fnghD6DqmZDQmmzzJklUU= github.com/aliyun/aliyun-oss-go-sdk v2.0.4+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= -github.com/aliyun/aliyun-tablestore-go-sdk v4.1.2+incompatible h1:ABQ7FF+IxSFHDMOTtjCfmMDMHiCq6EsAoCV/9sFinaM= github.com/aliyun/aliyun-tablestore-go-sdk v4.1.2+incompatible/go.mod h1:LDQHRZylxvcg8H7wBIDfvO5g/cy4/sz1iucBlc2l3Jw= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= @@ -236,6 +220,7 @@ github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3A github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.19.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.19.39/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.22.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.47/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= @@ -243,7 +228,6 @@ github.com/aws/aws-sdk-go v1.28.8/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/aws/aws-sdk-go v1.30.24/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.32.3 h1:E3OciOGVlJrv1gQ2T7/Oou+I9nGPB2j978THQjvZBf0= github.com/aws/aws-sdk-go v1.32.3/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs= github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= @@ -348,8 +332,6 @@ github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkE github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/container-linux-config-transpiler v0.9.0/go.mod h1:SlcxXZQ2c42knj8pezMiQsM1f+ADxFMjGetuMKR/YSQ= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE= github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/fcct v0.5.0/go.mod h1:cbE+j77YSQwFB2fozWVB3qsI2Pi3YiVEbDz/b6Yywdo= @@ -357,10 +339,8 @@ github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8Nz github.com/coreos/go-json v0.0.0-20170920214419-6a2fe990e083/go.mod h1:FmxyHfvrCFfCsXRylD4QQRlQmvzl+DG6iTHyEEykPfU= github.com/coreos/go-json v0.0.0-20200220154158-5ae607161559 h1:xKJaV2cuoPgrl9Z0y3tIycVvaZydc2IblkvJSQVRSAU= github.com/coreos/go-json v0.0.0-20200220154158-5ae607161559/go.mod h1:FmxyHfvrCFfCsXRylD4QQRlQmvzl+DG6iTHyEEykPfU= -github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-oidc v2.0.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.0.0-20180108230905-e214231b295a/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -383,7 +363,6 @@ github.com/coreos/ignition/v2 v2.3.0 h1:TK+STbzVe6KZp4tQ2IaNSRMiWX4/diNngep1F7tP github.com/coreos/ignition/v2 v2.3.0/go.mod h1:85dmM/CERMZXNrJsXqtNLIxR/dn8G9qlL1CmEjCugp0= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/prometheus-operator v0.38.0/go.mod h1:xZC7/TgeC0/mBaJk+1H9dbHaiEvLYHgX6Mi1h40UPh8= github.com/coreos/vcontext v0.0.0-20190529201340-22b159166068/go.mod h1:E+6hug9bFSe0KZ2ZAzr8M9F5JlArJjv5D1JS7KSkPKE= @@ -416,6 +395,10 @@ github.com/deislabs/oras v0.8.1/go.mod h1:Mx0rMSbBNaNfY9hjpccEnxkOqJL6KGjtxNHPLC github.com/denisenkom/go-mssqldb v0.0.0-20190412130859-3b1d194e553a/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/detiber/machine-api-operator v0.0.0-20201113194109-75933fe1fd83 h1:z2fl8ePjKWGUxmL93zw/AEgd0Ll6Vbgwiwi5i6OmpbE= +github.com/detiber/machine-api-operator v0.0.0-20201113194109-75933fe1fd83/go.mod h1:9LGA0JD1H4tdfk/jrQvHD/aHH+CIjk3d7roK7buMlaE= +github.com/detiber/openshift-provider-packet v0.0.0-20201117162756-512a178614c0 h1:dAPtmNqbeURiYuYtYp4lHqXlKrg9jB+prDGAeAXkQY8= +github.com/detiber/openshift-provider-packet v0.0.0-20201117162756-512a178614c0/go.mod h1:6WozR9dcWRsEM3n3zJXZaExTRMDKmYNWRWNAKfSO9uo= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= @@ -494,7 +477,6 @@ github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6 github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -502,20 +484,17 @@ github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZM github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.4.0/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk= github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -531,7 +510,6 @@ github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56/go.mod h1:w9R github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/getsentry/raven-go v0.0.0-20190513200303-c977f96e1095/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= @@ -555,9 +533,7 @@ github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTD github.com/go-log/log v0.1.1-0.20181211034820-a514cf01a3eb/go.mod h1:4mBwpdRMFLiuXZDCwU2lKQFsoSCo72j3HqBK9d81N2M= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.2.1 h1:fV3MLmabKIZ383XifUjFSwcoGee0v9qgPp8wy5svibE= github.com/go-logr/logr v0.2.1/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= @@ -580,7 +556,6 @@ github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+ github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.17.2/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -588,7 +563,6 @@ github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1 github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.17.2/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= @@ -599,7 +573,6 @@ github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.17.2/go.mod h1:QO936ZXeisByFmZEO1IS1Dqhtf4QV1sYYFtIq6Ld86Q= github.com/go-openapi/runtime v0.18.0/go.mod h1:uI6pHuxWYTy94zZxgcwJkUWa9wbIlhteGfloI10GD4U= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= @@ -624,7 +597,6 @@ github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/ github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/validate v0.17.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.17.2/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= @@ -692,15 +664,12 @@ github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= github.com/golang-migrate/migrate/v4 v4.6.2/go.mod h1:JYi6reN3+Z734VZ0akNuyOJNcrg45ZL7LDBMW3WGJL0= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191002201903-404acd9df4cc/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= @@ -709,29 +678,26 @@ github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200j github.com/golang/mock v1.0.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= @@ -775,20 +741,24 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-metrics-stackdriver v0.0.0-20190816035513-b52628e82e2a/go.mod h1:o93WzqysX0jP/10Y13hfL6aq9RoUvGaVdkrH5awMksE= -github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible h1:xmapqc1AyLoB+ddYT6r04bD9lIjlOqGaREovi0SzFaE= -github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0 h1:pMen7vLs8nvgEYhywH3KDWJIJTeEr2ULsVWHWYHQyBs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -796,6 +766,7 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v0.0.0-20170306145142-6a5e28554805/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -814,7 +785,6 @@ github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsC github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk= github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= @@ -826,14 +796,12 @@ github.com/gophercloud/gophercloud v0.6.0/go.mod h1:GICNByuaEBibcjmjvI7QvYJSZEbG github.com/gophercloud/gophercloud v0.6.1-0.20191025185032-6ad562af8c1f/go.mod h1:GICNByuaEBibcjmjvI7QvYJSZEbGkcYwAR7EZK2WMqM= github.com/gophercloud/gophercloud v0.6.1-0.20191122030953-d8ac278c1c9d/go.mod h1:ozGNgr9KYOVATV5jsgHl/ceCDXGuguqOZAzoQ/2vcNM= github.com/gophercloud/gophercloud v0.7.1-0.20191210042042-7aa2e52d21f9/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= -github.com/gophercloud/gophercloud v0.12.0 h1:mZrie07npp6ODiwHZolTicr5jV8Ogn43AvAsSMm6Ork= github.com/gophercloud/gophercloud v0.12.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= github.com/gophercloud/gophercloud v0.12.1-0.20200821143728-362eb785d617 h1:8Kj1KcL5PXrH3CIC+U5LnbyfueWr/Fh/4CdM5yP+XKM= github.com/gophercloud/gophercloud v0.12.1-0.20200821143728-362eb785d617/go.mod h1:w2NJEd88d4igNL1KUHzBsKMvS/ByJTzgltTGWKT7AC8= github.com/gophercloud/utils v0.0.0-20190124231947-9c3b9f2457ef/go.mod h1:wjDF8z83zTeg5eMLml5EBSlAhbF7G8DobyI1YsMuyzw= github.com/gophercloud/utils v0.0.0-20190313033024-0bcc8e728cb5/go.mod h1:SZ9FTKibIotDtCrxAU/evccoyu1yhKST6hgBvwTB5Eg= github.com/gophercloud/utils v0.0.0-20191129022341-463e26ffa30d/go.mod h1:SZ9FTKibIotDtCrxAU/evccoyu1yhKST6hgBvwTB5Eg= -github.com/gophercloud/utils v0.0.0-20200508015959-b0167b94122c h1:iawx2ojEQA7c+GmkaVO5sN+k8YONibXyDO8RlsC+1bs= github.com/gophercloud/utils v0.0.0-20200508015959-b0167b94122c/go.mod h1:ehWUbLQJPqS0Ep+CxeD559hsm9pthPXadJNKwZkp43w= github.com/gophercloud/utils v0.0.0-20200918191848-da0e919a012a h1:Nn4npa87SL5uLA6rYbQp/u3JwVTeEJ7VfaWrJw97F5Q= github.com/gophercloud/utils v0.0.0-20200918191848-da0e919a012a/go.mod h1:ehWUbLQJPqS0Ep+CxeD559hsm9pthPXadJNKwZkp43w= @@ -850,7 +818,6 @@ github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= @@ -860,10 +827,7 @@ github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:Fecb github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 h1:THDBEeQ9xZ8JEaCLyLQqXMMdRqNr0QAUJTIkQAUtFjg= github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= -github.com/grpc-ecosystem/go-grpc-prometheus v0.0.0-20170330212424-2500245aa611/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= @@ -873,7 +837,6 @@ github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.4/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.12.1 h1:zCy2xE9ablevUOrUZc3Dl72Dt+ya2FNAvC2yLYMHzi4= github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-health-probe v0.2.1-0.20181220223928-2bf0a5b182db/go.mod h1:uBKkC2RbarFsvS5jMJHpVhTLvGlGQj9JJwkaePE3FWI= github.com/h2non/filetype v1.0.12 h1:yHCsIe0y2cvbDARtJhGBTD2ecvqMSTvlIcph9En/Zao= @@ -883,7 +846,6 @@ github.com/hashicorp/aws-sdk-go-base v0.4.0 h1:zH9hNUdsS+2G0zJaU85ul8D59BGnZBaKM github.com/hashicorp/aws-sdk-go-base v0.4.0/go.mod h1:eRhlz3c4nhqxFZJAahJEFL7gh6Jyj5rQmQc7F9eHFyQ= github.com/hashicorp/consul-template v0.22.0/go.mod h1:lHrykBIcPobCuEcIMLJryKxDyk2lUMnQWmffOEONH0k= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.3.0 h1:HXNYlRkkM/t+Y/Yhxtwcy02dlYwIaoxzvxPnS+cqy78= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= @@ -906,7 +868,6 @@ github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9 github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.10.0 h1:b86HUuA126IcSHyC55WjPo7KtCOVeTCKIjr+3lBhPxI= github.com/hashicorp/go-hclog v0.10.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.13.0 h1:Do32YnDMnq7v7FU50AgH+1ExKCOkl9HBxvSI1JWr+rA= github.com/hashicorp/go-hclog v0.13.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= @@ -916,14 +877,11 @@ github.com/hashicorp/go-memdb v1.0.2/go.mod h1:I6dKdmYhZqU0RJSheVEWgTNWdVQH5QvTg github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-plugin v1.0.0/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= -github.com/hashicorp/go-plugin v1.0.1 h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cRWgnE= github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= -github.com/hashicorp/go-plugin v1.2.0 h1:CUfYokW0EJNDcGecVrHZK//Cp1GFlHwoqtcUIEiU6BY= github.com/hashicorp/go-plugin v1.2.0/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= github.com/hashicorp/go-plugin v1.2.2 h1:mgDpq0PkoK5gck2w4ivaMpWRHv/matdOR4xmeScmf/w= github.com/hashicorp/go-plugin v1.2.2/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= @@ -932,28 +890,24 @@ github.com/hashicorp/go-retryablehttp v0.5.2/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY= -github.com/hashicorp/go-retryablehttp v0.6.4 h1:BbgctKO892xEyOXnGiaAwIoSq1QZ/SS4AhjoAh9DnfY= github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= +github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-rootcerts v1.0.1 h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8= github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= -github.com/hashicorp/go-slug v0.4.1 h1:/jAo8dNuLgSImoLXaX7Od7QB4TfYCVPam+OpAt5bZqc= github.com/hashicorp/go-slug v0.4.1/go.mod h1:I5tq5Lv0E2xcNXNkmx7BSfzi1PsJ2cNjs3cC3LwyhK8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-tfe v0.3.27 h1:7XZ/ZoPyYoeuNXaWWW0mJOq016y0qb7I4Q0P/cagyu8= github.com/hashicorp/go-tfe v0.3.27/go.mod h1:DVPSW2ogH+M9W1/i50ASgMht8cHP7NxxK0nrY9aFikQ= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8 h1:PKbxRbsOP7R3f/TpdqcgXrO69T3yd9nLoR+RMRUxSxA= github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= @@ -970,7 +924,6 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl/v2 v2.0.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV3txagJ90= github.com/hashicorp/hcl/v2 v2.1.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV3txagJ90= -github.com/hashicorp/hcl/v2 v2.3.0 h1:iRly8YaMwTBAKhn1Ybk7VSdzbnopghktCD031P8ggUE= github.com/hashicorp/hcl/v2 v2.3.0/go.mod h1:d+FwDBbOLvpAM3Z6J7gPj/VoAGkNe/gm352ZhjJ/Zv8= github.com/hashicorp/hcl/v2 v2.5.0 h1:tnNRfUho4o/6qLTqd54gj9Gs5AWmdc0tG8YdElu6MEw= github.com/hashicorp/hcl/v2 v2.5.0/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= @@ -995,7 +948,6 @@ github.com/hashicorp/terraform-config-inspect v0.0.0-20191115094559-17f92b0546e8 github.com/hashicorp/terraform-config-inspect v0.0.0-20191121111010-e9629612a215/go.mod h1:p+ivJws3dpqbp1iP84+npOyAmTTOLMgCzrXd3GSdn/A= github.com/hashicorp/terraform-config-inspect v0.0.0-20191212124732-c6ae6269b9d7 h1:Pc5TCv9mbxFN6UVX0LH6CpQrdTM5YjbVI2w15237Pjk= github.com/hashicorp/terraform-config-inspect v0.0.0-20191212124732-c6ae6269b9d7/go.mod h1:p+ivJws3dpqbp1iP84+npOyAmTTOLMgCzrXd3GSdn/A= -github.com/hashicorp/terraform-json v0.4.0 h1:KNh29iNxozP5adfUFBJ4/fWd0Cu3taGgjHB38JYqOF4= github.com/hashicorp/terraform-json v0.4.0/go.mod h1:eAbqb4w0pSlRmdvl8fOyHAi/+8jnkVYN28gJkSJrLhU= github.com/hashicorp/terraform-json v0.5.0 h1:7TV3/F3y7QVSuN4r9BEXqnWqrAyeOtON8f0wvREtyzs= github.com/hashicorp/terraform-json v0.5.0/go.mod h1:eAbqb4w0pSlRmdvl8fOyHAi/+8jnkVYN28gJkSJrLhU= @@ -1051,7 +1003,6 @@ github.com/hooklift/assert v0.0.0-20170704181755-9d1defd6d214 h1:WgfvpuKg42WVLkx github.com/hooklift/assert v0.0.0-20170704181755-9d1defd6d214/go.mod h1:kj6hFWqfwSjFjLnYW5PK1DoxZ4O0uapwHRmd9jhln4E= github.com/hooklift/iso9660 v1.0.0 h1:GYN0ejrqTl1qtB+g+ics7xxWHp7J2B1zmr25O9EyG3c= github.com/hooklift/iso9660 v1.0.0/go.mod h1:sOC47ru8lB0DlU0EZ7BJ0KCP5rDqOvx0c/5K5ADm8H0= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= github.com/iancoleman/strcase v0.0.0-20190422225806-e506e3ef7365/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= @@ -1059,7 +1010,6 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -1092,11 +1042,8 @@ github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5/go.mod h1:1FEQNm3xl github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jonboulle/clockwork v0.0.0-20141017032234-72f9bd7c4e0c/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= -github.com/joyent/triton-go v0.0.0-20190112182421-51ffac552869 h1:BvV6PYcRz0yGnWXNZrd5wginNT1GfFfPvvWpPbjfFL8= github.com/joyent/triton-go v0.0.0-20190112182421-51ffac552869/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -1105,7 +1052,6 @@ github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -1113,13 +1059,11 @@ github.com/jsonnet-bundler/jsonnet-bundler v0.2.0/go.mod h1:/by7P/OoohkI3q4CgSFq github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jteeuwen/go-bindata v3.0.8-0.20151023091102-a0ff2567cfb7+incompatible/go.mod h1:JVvhzYOiGBnFSYRyV00iY8q7/0PThjIYav1p9h5dmKs= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= @@ -1142,14 +1086,12 @@ github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM52 github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -1160,7 +1102,6 @@ github.com/kr/pty v1.1.4/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/pty v1.1.8 h1:AkaSdXYQOWeaO3neb8EM634ahkXXe3jYbVh/F9lq+GI= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -1174,7 +1115,6 @@ github.com/leanovate/gopter v0.2.4/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkO github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/libvirt/libvirt-go v4.10.0+incompatible/go.mod h1:34zsnB4iGeOv7Byj6qotuW8Ya4v4Tr43ttjz/F0wjLE= github.com/libvirt/libvirt-go v5.0.0+incompatible/go.mod h1:34zsnB4iGeOv7Byj6qotuW8Ya4v4Tr43ttjz/F0wjLE= @@ -1190,7 +1130,6 @@ github.com/lightstep/lightstep-tracer-go v0.18.0/go.mod h1:jlF1pusYV4pidLvZ+XD0U github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt17nSPdS3K5e+ZTBY= -github.com/lusis/go-artifactory v0.0.0-20160115162124-7e4ce345df82 h1:wnfcqULT+N2seWf6y4yHzmi7GD2kNx4Ute0qArktD48= github.com/lusis/go-artifactory v0.0.0-20160115162124-7e4ce345df82/go.mod h1:y54tfGmO3NKssKveTEFFzH8C/akrSOy/iW9qEAUDV84= github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -1218,7 +1157,6 @@ github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859 github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -1247,7 +1185,6 @@ github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOq github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= @@ -1257,15 +1194,13 @@ github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyex github.com/mholt/archiver v3.1.1+incompatible/go.mod h1:Dh2dOXnSdiLxRiPoVfIr/fI1TwETms9B8CTWfeh7ROU= github.com/michaelklishin/rabbit-hole v0.0.0-20191008194146-93d9988f0cd5/go.mod h1:+pmbihVqjC3GPdfWv1V2TnRSuVvwrWLKfEP/MZVB/Wc= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.0.8 h1:Zi8HNpze3NeRWH1PQV6O71YcvJRQ6j0lORO6DAEmAAI= +github.com/miekg/dns v1.0.8/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mikefarah/yaml/v2 v2.4.0/go.mod h1:ahVqZF4n1W4NqwvVnZzC4es67xsW9uR/RRf2RRxieJU= github.com/mikefarah/yq/v2 v2.4.1/go.mod h1:i8SYf1XdgUvY2OFwSqGAtWOOgimD2McJ6iutoxRm4k0= github.com/minio/minio-go/v6 v6.0.49/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= -github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.1 h1:J64v/xD7Clql+JVKSvkYojLOXu1ibnY9ZjGLwSt/89w= github.com/mitchellh/cli v1.1.1/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= @@ -1281,7 +1216,6 @@ github.com/mitchellh/go-linereader v0.0.0-20190213213312-1b945b3263eb/go.mod h1: github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= @@ -1296,7 +1230,6 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.0.0/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.0 h1:iDwIio/3gk2QtLLEsqU5lInaMzos0hDTz8a6lazSFVw= github.com/mitchellh/mapstructure v1.3.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -1311,6 +1244,7 @@ github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2/go.mod h1:TjQg8pa4iejrUrjiz0MCtMV38jdMNW4doKSiBrEvCQQ= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1324,7 +1258,6 @@ github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1: github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= github.com/mozilla/tls-observatory v0.0.0-20200220173314-aae45faa4006/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= github.com/mozillazg/go-cos v0.13.0/go.mod h1:Zp6DvvXn0RUOXGJ2chmWt2bLEqRAnJnS3DnAZsJsoaE= -github.com/mozillazg/go-httpheader v0.2.1 h1:geV7TrjbL8KXSyvghnFm+NyTux/hxwueTSrwhe88TQQ= github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= github.com/mtrmac/gpgme v0.1.2/go.mod h1:GYYHnGSuS7HK3zVS2n3y73y0okK/BeKzwnn5jgiVFNI= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -1334,7 +1267,6 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= -github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= github.com/nbutton23/zxcvbn-go v0.0.0-20160627004424-a22cb81b2ecd/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= @@ -1346,7 +1278,8 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ= github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U= github.com/nwaples/rardecode v1.0.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= -github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= @@ -1363,8 +1296,9 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= @@ -1374,16 +1308,15 @@ github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 h1:yN8BPXVwMBAm3Cuvh1L5XE8XpvYRMdsVLd82ILprhUU= github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= @@ -1413,14 +1346,14 @@ github.com/openshift/cluster-api v0.0.0-20191030113141-9a3a7bbe9258/go.mod h1:T1 github.com/openshift/cluster-api v0.0.0-20191129101638-b09907ac6668 h1:IDZyg/Kye98ptqpc9j9rzPjZJlijjEDe8g7TZ67CmLU= github.com/openshift/cluster-api v0.0.0-20191129101638-b09907ac6668/go.mod h1:T18COkr6nLh9RyZKPMP7YjnwBME7RX8P2ar1SQbBltM= github.com/openshift/cluster-api-actuator-pkg v0.0.0-20190614215203-42228d06a2ca/go.mod h1:KNPaA64x3Ok7z538kvS2acwC5fEwvPfF0RdTx2geQEE= -github.com/openshift/cluster-api-provider-aws v0.2.1-0.20200506073438-9d49428ff837 h1:6qTilJ3hni/s6A/X7lyI7zGNNnMEzh0DzH6VBZzxprA= -github.com/openshift/cluster-api-provider-aws v0.2.1-0.20200506073438-9d49428ff837/go.mod h1:aXOt4gMtzXQxymPRm98vJAVmGjDhcTXsrQHauiNJK3o= +github.com/openshift/cluster-api-provider-aws v0.2.1-0.20201022175424-d30c7a274820 h1:KcRhMsepLIShTZT3xVBPUWHfxckNfpPP068Pwpe3U8k= +github.com/openshift/cluster-api-provider-aws v0.2.1-0.20201022175424-d30c7a274820/go.mod h1:rDwmh/vpz6mUU/l9QLWeaoGpUeC+b3yyI34xnp3tIf8= github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20200120114645-8a9592f1f87b h1:xVFJ2MCv6QBBk49FEzMmdQF/21qr8x5UhW1NFqZ13oM= github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20200120114645-8a9592f1f87b/go.mod h1:LPNjFna6F+ePHaXM/7QIyCF0sLsEtfuN16yY9sFZJ40= github.com/openshift/cluster-api-provider-baremetal v0.0.0-20190821174549-a2a477909c1d h1:6+XwaVvSMPHm3nFdZW3g+iXiOHpf0Y2ajY5/Zr66Dt0= github.com/openshift/cluster-api-provider-baremetal v0.0.0-20190821174549-a2a477909c1d/go.mod h1:S+wtA0Rm2FZ5ccC9zNQXUWUDesR6Jsdn5eb6HjAR+Gs= -github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20200120152131-1b09fd9e7156 h1:FigvMhtTF7jCNkKvSwE3xP583ylW/CFcr0Jr6ZH698o= -github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20200120152131-1b09fd9e7156/go.mod h1:KCyjaBfEkifs9bqV1HEXDJUyQylgeLSqiqt2QnMn7is= +github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201027164920-70f2f92e64ab h1:SpWhjMCXdiH1lvttWHLqAmnHWv4Wt7J0EuYUBBuQPPo= +github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201027164920-70f2f92e64ab/go.mod h1:S38HjVtBmaX6PHq99updVereupkHcwcOEM5jq6rTILI= github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20191219173431-2336783d4603 h1:MC6BSZYxFPoqqKj9PdlGjHGVKcMsvn6Kv1NiVzQErZ8= github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20191219173431-2336783d4603/go.mod h1:7pQ9Bzha+ug/5zd+0ufbDEcnn2OnNlPwRwYrzhXk4NM= github.com/openshift/cluster-api-provider-openstack v0.0.0-20200526112135-319a35b2e38e h1:qDiAnW8lxrOqt3iiC1Ph1W+2E1s05OGO4NlIs4YIcOc= @@ -1428,21 +1361,13 @@ github.com/openshift/cluster-api-provider-openstack v0.0.0-20200526112135-319a35 github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20200504092944-27473ea1ae43 h1:JO7t5tJcLiE0gk7VrdzKrJAcOv73GirpUxH/OvrOVms= github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20200504092944-27473ea1ae43/go.mod h1:Vl/bvZulLw6PdUADIFWGfoTWH1O4L1B80eN7BtLYEuo= github.com/openshift/cluster-autoscaler-operator v0.0.0-20190521201101-62768a6ba480/go.mod h1:/XmV44Fh28Vo3Ye93qFrxAbcFJ/Uy+7LPD+jGjmfJYc= -github.com/openshift/cluster-version-operator v3.11.1-0.20190629164025-08cac1c02538+incompatible/go.mod h1:0BbpR1mrN0F2ZRae5N1XHcytmkvVPaeKgSQwRRBWugc= github.com/openshift/hashicorp-terraform-plugin-sdk v1.14.0-openshift h1:CuH9qNELLH3y0QoSaLchdG+7We75AO4kNBy6P3+oLug= github.com/openshift/hashicorp-terraform-plugin-sdk v1.14.0-openshift/go.mod h1:t62Xy+m7Zjq5tA2vrs8Wuo/TQ0sc9Mx9MjXL3+7MHBQ= github.com/openshift/library-go v0.0.0-20191003152030-97c62d8a2901/go.mod h1:NBttNjZpWwup/nthuLbPAPSYC8Qyo+BBK5bCtFoyYjo= -github.com/openshift/library-go v0.0.0-20200324092245-db2a8546af81 h1:bNUcSdyoACkjI2USyvDbAMb6lCtghdz563b0bfhPC8A= -github.com/openshift/library-go v0.0.0-20200324092245-db2a8546af81/go.mod h1:Qc5duoXHzAKyUfA0REIlG/rdfWzknOpp9SiDiyg5Y7A= -github.com/openshift/library-go v0.0.0-20200831114015-2ab0c61c15de h1:V984tJombwXeUvZaMiMSzN6yOiHUdd1kWLHS1a54Yrw= github.com/openshift/library-go v0.0.0-20200831114015-2ab0c61c15de/go.mod h1:6vwp+YhYOIlj8MpkQKkebTTSn2TuYyvgiAFQ206jIEQ= +github.com/openshift/library-go v0.0.0-20200909173121-1d055d971916/go.mod h1:6vwp+YhYOIlj8MpkQKkebTTSn2TuYyvgiAFQ206jIEQ= github.com/openshift/library-go v0.0.0-20201022113156-a4ff9e1d2900 h1:/YeQT9OiXUb8inRI5EB+1hbWxbO8vxo3BYkC/aUXiRY= github.com/openshift/library-go v0.0.0-20201022113156-a4ff9e1d2900/go.mod h1:qbwvTwCy4btqEcqU3oI59CopNgcRgZUPXG4Y2jc+B4E= -github.com/openshift/machine-api-operator v0.0.0-20190312153711-9650e16c9880/go.mod h1:7HeAh0v04zQn1L+4ItUjvpBQYsm2Nf81WaZLiXTcnkc= -github.com/openshift/machine-api-operator v0.2.1-0.20191128180243-986b771e661d/go.mod h1:9qQPF00anuIsc6RiHYfHE0+cZZImbvFNLln0NRBVVMg= -github.com/openshift/machine-api-operator v0.2.1-0.20200402110321-4f3602b96da3/go.mod h1:46g2eLjzAcaNURYDvhGu0GhyjKzOlCPqixEo68lFBLs= -github.com/openshift/machine-api-operator v0.2.1-0.20200429102619-d36974451290 h1:0QnZvWW2X/4fCmIlOWsm3FmHZnsh2sCBfsQE/ujGhsw= -github.com/openshift/machine-api-operator v0.2.1-0.20200429102619-d36974451290/go.mod h1:QkhH+/6BXabl+4HmiLwx9/bmW1ieCGF9km7xz22Ozl0= github.com/openshift/machine-config-operator v0.0.1-0.20201009041932-4fe8559913b8 h1:C4gCipkWTDp0B9jb0wZdLgB+HWC7EzVVwQOeNaKnTRA= github.com/openshift/machine-config-operator v0.0.1-0.20201009041932-4fe8559913b8/go.mod h1:fjKreLaKEeUKsyIkT4wlzIQwUVJ2ZKDUh3CI73ckYIY= github.com/openshift/origin v0.0.0-20160503220234-8f127d736703/go.mod h1:0Rox5r9C8aQn6j1oAOQ0c1uC86mYbUFObzjBRvUKHII= @@ -1481,7 +1406,6 @@ github.com/otiai10/mint v1.2.3/go.mod h1:YnfyPNhBvnY8bW4SGQHCs/aAFhkgySlMZbrF5U0 github.com/otiai10/mint v1.2.4/go.mod h1:d+b7n/0R3tdyUYYylALXpWQ/kTN+QobSq/4SRGBkR3M= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= github.com/ovirt/go-ovirt v0.0.0-20200313072907-d30f754823a6/go.mod h1:fLDxPk1Sf64DBYtwIYxrnx3gPZ1q0xPdWdI1y9vxUaw= -github.com/ovirt/go-ovirt v0.0.0-20200428093010-9bcc4fd4e6c0 h1:9dTL3/s4HXoLerbZL/N6EVHX62JWXNMIxJ+ephNTTYI= github.com/ovirt/go-ovirt v0.0.0-20200428093010-9bcc4fd4e6c0/go.mod h1:fLDxPk1Sf64DBYtwIYxrnx3gPZ1q0xPdWdI1y9vxUaw= github.com/ovirt/go-ovirt v0.0.0-20200613023950-320a86f9df27 h1:jHcZg49imi3zydtFqly5vniMnFX7HxW27L9M095eLhI= github.com/ovirt/go-ovirt v0.0.0-20200613023950-320a86f9df27/go.mod h1:fLDxPk1Sf64DBYtwIYxrnx3gPZ1q0xPdWdI1y9vxUaw= @@ -1491,6 +1415,11 @@ github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOTh github.com/packer-community/winrmcp v0.0.0-20180102160824-81144009af58/go.mod h1:f6Izs6JvFTdnRbziASagjZ2vmf55NSIkC/weStxCHqk= github.com/packer-community/winrmcp v0.0.0-20180921211025-c76d91c1e7db h1:9uViuKtx1jrlXLBW/pMnhOfzn3iSEdLase/But/IZRU= github.com/packer-community/winrmcp v0.0.0-20180921211025-c76d91c1e7db/go.mod h1:f6Izs6JvFTdnRbziASagjZ2vmf55NSIkC/weStxCHqk= +github.com/packethost/packngo v0.4.1/go.mod h1:aRxUEV1TprXVcWr35v8tNYgZMjv7FHaInXx224vF2fc= +github.com/packethost/packngo v0.5.1 h1:y+jWcMnyArP3hVZRsBkAXTLhokuqdYg6JUmkshX2SMk= +github.com/packethost/packngo v0.5.1/go.mod h1:aRxUEV1TprXVcWr35v8tNYgZMjv7FHaInXx224vF2fc= +github.com/packethost/terraform-provider-packet v1.7.3-0.20201202165003-a5613b748108 h1:s9ccBJwDTOmg0Nk3b7y58oOl9oXyzIUCL+3bARmP1oQ= +github.com/packethost/terraform-provider-packet v1.7.3-0.20201202165003-a5613b748108/go.mod h1:HBMb6iZY/8T2e4rB4G6TVdawR6a23JSIMjzNe5hTtAw= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= @@ -1546,7 +1475,6 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.2.0/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= -github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= @@ -1569,7 +1497,6 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= @@ -1586,8 +1513,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.6/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= @@ -1630,7 +1557,6 @@ github.com/satori/uuid v0.0.0-20160927100844-b061729afc07/go.mod h1:B8HLsPLik/YN github.com/satori/uuid v1.2.0 h1:6TFY4nxn5XwBx0gDfzbEMCNT6k4N/4FNIuN8RACZ0KI= github.com/satori/uuid v1.2.0/go.mod h1:B8HLsPLik/YNn6KKWVMDJ8nzCL8RP5WyfsnmvnAEwIU= github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/securego/gosec v0.0.0-20191002120514-e680875ea14d/go.mod h1:w5+eXa0mYznDkHaMCXA4XYffjlH+cy1oyKbfzJXa2Do= github.com/securego/gosec v0.0.0-20200316084457-7da9f46445fd/go.mod h1:NurAFZsWJAEZjogSwdVPlHkOZB3DOAU7gsPP8VFZCHc= @@ -1675,7 +1601,6 @@ github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjM github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q= github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -1689,7 +1614,6 @@ github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:s github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= @@ -1702,12 +1626,10 @@ github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.0-20180319062004-c439c4fa0937/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v0.0.6 h1:breEStsVwemnKh2/s6gMvSdMEkwW0sK8vGStnlVBMCs= github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -1737,13 +1659,11 @@ github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d h1:Z4EH+5EffvBEhh37F0C0DnpklTMh00JOkjW5zK3ofBI= github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d/go.mod h1:BSTlc8jOjh0niykqEGVXOLXdi9o0r0kR8tCYiMvjFgw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1754,6 +1674,10 @@ github.com/terraform-provider-openstack/terraform-provider-openstack v1.32.0 h1: github.com/terraform-provider-openstack/terraform-provider-openstack v1.32.0/go.mod h1:Xm/accuOkyS8NkNp9HwqNMPu5rAFnHP7g/2uNRZbO8c= github.com/terraform-providers/terraform-provider-azuread v0.9.0 h1:XLzFgVHakq6qjJ2L0o/tN2yHu/hT4vIW9sKtejr7gPs= github.com/terraform-providers/terraform-provider-azuread v0.9.0/go.mod h1:sSDzB/8CD639+yWo5lZf+NJvGSYQBSS6z+GoET9IrzE= +github.com/terraform-providers/terraform-provider-dns v0.0.0-20190724194907-24de60f297b6 h1:7DIDiI0H7f0ksjSBnw5ojEfUeWBlc87oGp3TJ3kfJVE= +github.com/terraform-providers/terraform-provider-dns v0.0.0-20190724194907-24de60f297b6/go.mod h1:aUpZ80xerw1PT+j8NO7UDtgVHc/DU4xyHQJarln9oq8= +github.com/terraform-providers/terraform-provider-dns v0.0.0-20191209223915-3fb1c1918eb1 h1:fsepTKeS1gKsCk3ODLeaSAx1kinBljzsTPtQDqOfreE= +github.com/terraform-providers/terraform-provider-dns v0.0.0-20191209223915-3fb1c1918eb1/go.mod h1:t5ie+C2S9ezG7YeW5JSuhBKFZ0MDq/xcz7SbRplcjnA= github.com/terraform-providers/terraform-provider-google v1.20.1-0.20200623174414-27107f2ee160 h1:Ghc1PD9TckxsdyP1BG+dM8q84cNNyL93qRmYq/PVNow= github.com/terraform-providers/terraform-provider-google v1.20.1-0.20200623174414-27107f2ee160/go.mod h1:QxehqxV8Swl+O2JXJUdS6orHYJXWUEr4HFfYH5JV9ew= github.com/terraform-providers/terraform-provider-ignition v1.2.1 h1:dlRZGcokysc9Z2gTVm+neSghAMv9/2WA/pYiGZ6JHCg= @@ -1774,7 +1698,6 @@ github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec/go.mod h1:Qimiff github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tombuildsstuff/giovanni v0.10.0 h1:XqZBPVD2hETa30FFdMz/zVfnidMnUrIMMmKIH7hWnWA= github.com/tombuildsstuff/giovanni v0.10.0/go.mod h1:WwPhFP2+WnhJzvPYDnsyBab2wOIksMX6xm+Tg+jVvKw= @@ -1794,7 +1717,6 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA= github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= -github.com/ulikunitz/xz v0.5.6 h1:jGHAfXawEGZQ3blwU5wnWKQJvAraT7Ftq9EXjnXYgt8= github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/ulikunitz/xz v0.5.7 h1:YvTNdFzX6+W5m9msiYg/zpkSURPPtOlzbqYjrFn7Yt4= github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= @@ -1839,7 +1761,6 @@ github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4m github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= github.com/xlab/treeprint v0.0.0-20161029104018-1d6e34225557/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= @@ -1849,6 +1770,8 @@ github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6Ut github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= @@ -1856,7 +1779,6 @@ github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLE github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.1.1/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.2.1 h1:vGMsygfmeCl4Xb6OA5U5XVAaQZ69FvoG7X2jUtQujb8= github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= github.com/zclconf/go-cty v1.4.0 h1:+q+tmgyUB94HIdH/uVTIi/+kt3pt4sHwEZAcTyLoGsQ= github.com/zclconf/go-cty v1.4.0/go.mod h1:nHzOclRkoj++EU9ZjSrZvRG0BXIWt8c7loYc0qXAFGQ= @@ -1869,7 +1791,6 @@ go.elastic.co/apm/module/apmhttp v1.5.0/go.mod h1:1FbmNuyD3ddauwzgVwFB0fqY6KbZt3 go.elastic.co/apm/module/apmot v1.5.0/go.mod h1:d2KYwhJParTpyw2WnTNy8geNlHKKFX+4oK3YLlsesWE= go.elastic.co/fastjson v1.0.0/go.mod h1:PmeUOMMtLHQr9ZS9J9owrAVg0FkaZDRZJEFTTGHtchs= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= @@ -1887,10 +1808,10 @@ go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1910,7 +1831,6 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= -go4.org v0.0.0-20191010144846-132d2879e1e9 h1:zHLoVtbywceo2hE4Wqv8CmIufe7jDERQ2KJHZoSDfCU= go4.org v0.0.0-20191010144846-132d2879e1e9/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= go4.org v0.0.0-20200104003542-c7e774b10ea0 h1:M6XsnQeLwG+rHQ+/rrGh3puBI3WZEy9TBWmf2H+enQA= go4.org v0.0.0-20200104003542-c7e774b10ea0/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= @@ -1920,7 +1840,6 @@ golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1951,15 +1870,12 @@ golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 h1:xMPOj6Pz6UipU1wXLkrtqpHbR0AVFnyPEQq/wRWz9lM= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79 h1:IaQbIIB2X/Mp/DKctl6ROxz1KyMlKp4uyvL6+kQ7C88= golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1968,13 +1884,11 @@ golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxT golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191029154019-8994fa331a53/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587 h1:5Uz0rkjCFu9BC9gCRN7EkwVvhNyQgGWb8KNJrPwBoHY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1996,7 +1910,6 @@ golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKG golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -2051,15 +1964,17 @@ golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f h1:QBjCr1Fz5kw158VqdE9JfI9cJnl/ymnJWAdMuinqL7Y= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2070,18 +1985,18 @@ golang.org/x/oauth2 v0.0.0-20190319182350-c85d3e98c914/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 h1:ld7aEMNHoBnnDAX15v1T6z31v8HwR2A9FYOuAhWqkwc= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180117170059-2c42eef0765b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2129,9 +2044,9 @@ golang.org/x/sys v0.0.0-20190730183949-1393eb018365/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2152,7 +2067,6 @@ golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2160,16 +2074,21 @@ golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d h1:nc5K6ox/4lTFbMVSL9WRR81ixkcwXThoiF6yf+R9scA= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200409092240-59c9f1ba88fa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f h1:mOhmO9WsBaJCNmaZHPtHs9wOcdqdKCjF6OPJlmDM3KI= golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4 h1:5/PjkGUjvEU5Gl6BxmvKRPpqo2uNMv4rcHBMwzk/st8= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201018230417-eeed37f84f13 h1:5jaG59Zhd+8ZXe8C+lgiAGqkOaZBruqrWclLkgAww34= +golang.org/x/sys v0.0.0-20201018230417-eeed37f84f13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2178,14 +2097,12 @@ golang.org/x/text v0.3.1-0.20171227012246-e19ae1496984/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.1-0.20180805044716-cb6730876b98/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2229,7 +2146,6 @@ golang.org/x/tools v0.0.0-20190718200317-82a3ea8a504c/go.mod h1:jcCCGcm9btYwXyDq golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190813034749-528a2984e271/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190909030654-5b82db07426d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -2271,27 +2187,32 @@ golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWc golang.org/x/tools v0.0.0-20200409170454-77362c5149f0/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200430192856-2840dafb9ee1 h1:OlmCHPqCyX+EpIpxG55cfMJuINAFd7HHTdWwA3yyelQ= golang.org/x/tools v0.0.0-20200430192856-2840dafb9ee1/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200616195046-dc31b401abb5 h1:UaoXseXAWUJUcuJ2E2oczJdLxAJXL0lOmVaBl7kuk+I= golang.org/x/tools v0.0.0-20200616195046-dc31b401abb5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201020123448-f5c826d1900e h1:1bM10t4JLbCgfLBtSABt3dzY9IXomQvX+XB61h5xfr8= +golang.org/x/tools v0.0.0-20201020123448-f5c826d1900e/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0= gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= gomodules.xyz/jsonpatch/v3 v3.0.1/go.mod h1:CBhndykehEwTOlEfnsfJwvkFQbSN8YZFr9M+cIHAJto= gomodules.xyz/orderedmap v0.1.0/go.mod h1:g9/TPUCm1t2gwD3j3zfV8uylyYhVdCNSi+xCEIu7yTU= -gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= -gonum.org/v1/gonum v0.0.0-20190915125329-975d99cd20a9/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= @@ -2306,7 +2227,6 @@ google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEt google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0 h1:Q3Ui3V3/CVinFWFiW39Iw0kMuVrRzYX0wN6OPFp0lTA= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= @@ -2317,8 +2237,13 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.21.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.23.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.25.0 h1:LodzhlzZEUfhXzNUMIfVlf9Gr6Ua5MMtoFWh7+f47qA= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.33.0 h1:+gL0XvACeMIvpwLZ5rQZzLn5cwOsgg8dIcfJ2SYfBVw= +google.golang.org/api v0.33.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2326,13 +2251,10 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.4/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= -google.golang.org/genproto v0.0.0-20170731182057-09f6ed296fc6/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180608181217-32ee49c4dd80/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -2361,7 +2283,6 @@ google.golang.org/genproto v0.0.0-20191203220235-3fa9dbf08042/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150 h1:VPpdpQkGvFicX9yo4G5oxZPi9ALBnEOZblPSa/Wa2m4= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -2370,17 +2291,21 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200310143817-43be25429f5a/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940 h1:MRHtG0U6SnaUb+s+LhNE1qt1FQ1wlhqr5E4usBKC0uA= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200409111301-baae70f3302d/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200507105951-43844f6eee31 h1:Bz1qTn2YRWV+9OKJtxHJiQKCiXIdf+kwuKXdt9cBxyU= google.golang.org/genproto v0.0.0-20200507105951-43844f6eee31/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d h1:92D1fum1bJLKSdr11OJ+54YeCMCGYIygTA7R/YZxH5M= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -2394,27 +2319,28 @@ google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1 h1:SfXqXS5hkufcdZ/mHtYCh53P2b+92WQq/DZcKLgsFRs= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0 h1:cJv5/xdbk1NnMPR1VP9+HU6gupuG9MLBoH1r6RHZ2MY= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/AlecAivazis/survey.v1 v1.8.9-0.20200217094205-6773bdf39b7f h1:AQkMzsSzHWrgZWqGRpuRaRPDmyNibcXlpGcnQJ7HxZw= gopkg.in/AlecAivazis/survey.v1 v1.8.9-0.20200217094205-6773bdf39b7f/go.mod h1:CaHjv79TCgAvXMSFJSVgonHXYWxnhzI3eoHtnX5UgUo= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= @@ -2423,14 +2349,12 @@ gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUy gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= @@ -2445,19 +2369,15 @@ gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ldap.v2 v2.5.1/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= -gopkg.in/natefinch/lumberjack.v2 v2.0.0-20150622162204-20b71e5b60d7/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog= gopkg.in/ory-am/dockertest.v3 v3.3.4/go.mod h1:s9mmoLkaGeAh97qygnNj4xWkiN7e1SKekYC6CovU+ek= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.0.0-20180411045311-89060dee6a84/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.3.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.0.0/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.1.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -2467,14 +2387,11 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20190502103701-55513cacd4ae/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966 h1:B0J02caTR6tpSJozBJyiAzT6CtBzjclw4pgm9gg8Ys0= gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20191010095647-fc94e3f71652 h1:VKvJ/mQ4BgCjZUDggYFxTe0qv9jPMHsZPD4Xt91Y5H4= gopkg.in/yaml.v3 v3.0.0-20191010095647-fc94e3f71652/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -2489,16 +2406,15 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= k8s.io/api v0.19.0 h1:XyrFIJqTYZJ2DU7FBE/bSPz7b1HvbVBuBf07oeo6eTc= k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY= -k8s.io/apiextensions-apiserver v0.0.0-20190918201827-3de75813f604/go.mod h1:7H8sjDlWQu89yWB3FhZfsLyRCRLuoXoCoY5qtwW1q6I= k8s.io/apiextensions-apiserver v0.0.0-20191121021419-88daf26ec3b8/go.mod h1:NMIy5Wa/or8CsLhYRleOp9CWAHVdcWpzT6Ufx1SNVjA= k8s.io/apiextensions-apiserver v0.16.7/go.mod h1:6xYRp4trGp6eT5WZ6tPi/TB2nfWQCzwUvBlpg8iswe0= k8s.io/apiextensions-apiserver v0.17.0/go.mod h1:XiIFUakZywkUl54fVXa7QTEHcqQz9HG55nHd1DCoHj8= @@ -2508,7 +2424,7 @@ k8s.io/apiextensions-apiserver v0.17.4/go.mod h1:rCbbbaFS/s3Qau3/1HbPlHblrWpFivo k8s.io/apiextensions-apiserver v0.18.0-beta.2/go.mod h1:Hnrg5jx8/PbxRbUoqDGxtQkULjwx8FDW4WYJaKNK+fk= k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo= k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY= -k8s.io/apiextensions-apiserver v0.19.0 h1:jlY13lvZp+0p9fRX2khHFdiT9PYzT7zUrANz6R1NKtY= +k8s.io/apiextensions-apiserver v0.18.6/go.mod h1:lv89S7fUysXjLZO7ke783xOwVTm6lKizADfvUM/SS/M= k8s.io/apiextensions-apiserver v0.19.0/go.mod h1:znfQxNpjqz/ZehvbfMg5N6fvBJW5Lqu5HVLTJQdP4Fs= k8s.io/apiextensions-apiserver v0.19.2 h1:oG84UwiDsVDu7dlsGQs5GySmQHCzMhknfhFExJMz9tA= k8s.io/apiextensions-apiserver v0.19.2/go.mod h1:EYNjpqIAvNZe+svXVx9j4uBaVhTB4C94HkY3w058qcg= @@ -2516,7 +2432,6 @@ k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9 k8s.io/apimachinery v0.0.0-20190409092423-760d1845f48b/go.mod h1:FW86P8YXVLsbuplGMZeb20J3jYHscrDqw4jELaFJvRU= k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA= k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010/go.mod h1:Waf/xTS2FGRrgXCkO5FP3XxTOWh0qLf2QhL1qFZZ/R8= -k8s.io/apimachinery v0.0.0-20190817020851-f2f3a405f61d/go.mod h1:3jediapYqJ2w1BFw7lAZPCx7scubsTfosqHkhXCWJKw= k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4= k8s.io/apimachinery v0.0.0-20191115015347-3c7067801da2/go.mod h1:dXFS2zaQR8fyzuvRdJDHw2Aerij/yVGJSre0bZQSVJA= k8s.io/apimachinery v0.0.0-20191121015412-41065c7a8c2a/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= @@ -2526,16 +2441,14 @@ k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZ k8s.io/apimachinery v0.17.3/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= k8s.io/apimachinery v0.17.4/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= k8s.io/apimachinery v0.18.0-beta.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= -k8s.io/apimachinery v0.18.0-rc.1/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= -k8s.io/apimachinery v0.19.0 h1:gjKnAda/HZp5k4xQYjL0K/Yb66IvNqjthCb03QlKpaQ= +k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.19.2 h1:5Gy9vQpAGTKHPVOh5c4plE274X8D/6cuEiTO2zve7tc= k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg= -k8s.io/apiserver v0.0.0-20190918200908-1e17798da8c1/go.mod h1:4FuDU+iKPjdsdQSN3GsEKZLB/feQsj1y9dhhBDVV2Ns= k8s.io/apiserver v0.0.0-20191121020624-6eed2f5a3289/go.mod h1:7P+0qMKoaggchirHLUSCVD22ohdkjN19+qQOKcAdfbI= k8s.io/apiserver v0.16.7/go.mod h1:/5zSatF30/L9zYfMTl55jzzOnx7r/gGv5a5wtRp8yAw= k8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg= @@ -2543,21 +2456,19 @@ k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo= k8s.io/apiserver v0.17.3/go.mod h1:iJtsPpu1ZpEnHaNawpSV0nYTGBhhX2dUlnn7/QS7QiY= k8s.io/apiserver v0.17.4/go.mod h1:5ZDQ6Xr5MNBxyi3iUZXS84QOhZl+W7Oq2us/29c0j9I= k8s.io/apiserver v0.18.0-beta.2/go.mod h1:bnblMkMoCFnIfVnVftd0SXJPzyvrk3RtaqSbblphF/A= -k8s.io/apiserver v0.18.0-rc.1/go.mod h1:RYE9w2Lijk1aWW3i3pS7kFGU0Afof+UDoOz1qW9aSYg= k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw= k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw= +k8s.io/apiserver v0.18.6/go.mod h1:Zt2XvTHuaZjBz6EFYzpp+X4hTmgWGy8AthNVnTdm3Wg= k8s.io/apiserver v0.19.0/go.mod h1:XvzqavYj73931x7FLtyagh8WibHpePJ1QwWrSJs2CLk= k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA= k8s.io/autoscaler v0.0.0-20190607113959-1b4f1855cb8e/go.mod h1:QEXezc9uKPT91dwqhSJq3GNI3B1HxFRQHiku9kmrsSA= k8s.io/cli-runtime v0.17.2/go.mod h1:aa8t9ziyQdbkuizkNLAw3qe3srSyWh9zlSB7zTqRNPI= k8s.io/cli-runtime v0.17.3/go.mod h1:X7idckYphH4SZflgNpOOViSxetiMj6xI0viMAjM81TA= k8s.io/cli-runtime v0.17.4/go.mod h1:IVW4zrKKx/8gBgNNkhiUIc7nZbVVNhc1+HcQh+PiNHc= -k8s.io/cli-runtime v0.18.0-rc.1/go.mod h1:yuKZYDG8raONmwjwIkT77lCfIuPwX+Bsp88MKYf1TlU= k8s.io/cli-runtime v0.19.0/go.mod h1:tun9l0eUklT8IHIM0jors17KmUjcrAxn0myoBYwuNuo= k8s.io/client-go v0.19.0 h1:1+0E0zfWFIWeyRhQYWzimJOyAk2UT7TiARaLNwJCf7k= k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= k8s.io/cluster-bootstrap v0.0.0-20190202014938-c9acc0c1bea2/go.mod h1:iBSm2nwo3OaiuW8VDvc3ySDXK5SKfUrxwPvBloKG7zg= -k8s.io/code-generator v0.0.0-20190612205613-18da4a14b22b/go.mod h1:G8bQwmHm2eafm5bgtX67XDZQ8CWKSGu9DekI+yN4Y5I= k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269/go.mod h1:V5BD6M4CyaN5m+VthcclXWsVcT1Hu+glwa1bi3MIsyE= k8s.io/code-generator v0.0.0-20191003035328-700b1226c0bd/go.mod h1:HC9p4y3SBN+txSs8x57qmNPXFZ/CxdCHiDTNnocCSEw= k8s.io/code-generator v0.0.0-20191121015212-c4c8f8345c7e/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= @@ -2567,14 +2478,13 @@ k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+ k8s.io/code-generator v0.17.3/go.mod h1:l8BLVwASXQZTo2xamW5mQNFCe1XPiAesVq7Y1t7PiQQ= k8s.io/code-generator v0.17.4/go.mod h1:l8BLVwASXQZTo2xamW5mQNFCe1XPiAesVq7Y1t7PiQQ= k8s.io/code-generator v0.18.0-beta.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= -k8s.io/code-generator v0.18.0-rc.1/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= k8s.io/code-generator v0.18.3/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= +k8s.io/code-generator v0.18.6/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= k8s.io/code-generator v0.19.2/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA= -k8s.io/component-base v0.0.0-20190918200425-ed2f0867c778/go.mod h1:DFWQCXgXVLiWtzFaS17KxHdlUeUymP7FLxZSkmL9/jU= k8s.io/component-base v0.0.0-20191121020327-771114ba3383/go.mod h1:tv9ITs6VEFWkF+kHwY4GiFvDr9vUGKJ4X/8+Z+oqVLk= k8s.io/component-base v0.16.7/go.mod h1:ikdyfezOFMu5O0qJjy/Y9eXwj+fV3pVwdmt0ulVcIR0= k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc= @@ -2582,12 +2492,11 @@ k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1a k8s.io/component-base v0.17.3/go.mod h1:GeQf4BrgelWm64PXkIXiPh/XS0hnO42d9gx9BtbZRp8= k8s.io/component-base v0.17.4/go.mod h1:5BRqHMbbQPm2kKu35v3G+CpVq4K0RJKC7TRioF0I9lE= k8s.io/component-base v0.18.0-beta.2/go.mod h1:HVk5FpRnyzQ/MjBr9//e/yEBjTVa2qjGXCTuUzcD7ks= -k8s.io/component-base v0.18.0-rc.1/go.mod h1:NNlRaxZEdLqTs2+6yXiU2SHl8gKsbcy19Ii+Sfq53RM= k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c= k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM= +k8s.io/component-base v0.18.6/go.mod h1:knSVsibPR5K6EW2XOjEHik6sdU5nCvKMrzMt2D4In14= k8s.io/component-base v0.19.0/go.mod h1:dKsY8BxkA+9dZIAh2aWJLL/UdASFDNtGYTCItL4LM7Y= k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo= -k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190907103519-ebc107f98eab/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -2605,14 +2514,11 @@ k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.3.0 h1:WmkrnW7fdrm0/DMClc+HIxtftvxVIPAhlVwMQo5yLco= k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/kube-aggregator v0.0.0-20190404125450-f5e124c822d6/go.mod h1:8sbzT4QQKDEmSCIbfqjV0sd97GpUT7A4W626sBiYJmU= k8s.io/kube-aggregator v0.17.3/go.mod h1:1dMwMFQbmH76RKF0614L7dNenMl3dwnUJuOOyZ3GMXA= k8s.io/kube-aggregator v0.18.0-beta.2/go.mod h1:O3Td9mheraINbLHH4pzoFP2gRzG0Wk1COqzdSL4rBPk= -k8s.io/kube-aggregator v0.18.0-rc.1/go.mod h1:35N7x/aAF8C5rEU78J+3pJ/k9v/8LypeWbzqBAEWA1I= k8s.io/kube-aggregator v0.19.0/go.mod h1:1Ln45PQggFAG8xOqWPIYMxUq8WNtpPnYsbUJ39DpF/A= k8s.io/kube-aggregator v0.19.2/go.mod h1:wVsjy6OTeUrWkgG9WVsGftnjpm8JIY0vJV7LH2j4nhM= k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= @@ -2620,7 +2526,6 @@ k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH k8s.io/kube-openapi v0.0.0-20190320154901-5e45bb682580/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= @@ -2630,16 +2535,13 @@ k8s.io/kube-state-metrics v1.7.2/go.mod h1:U2Y6DRi07sS85rmVPmBFlmv+2peBcL8IWGjM+ k8s.io/kubectl v0.17.2/go.mod h1:y4rfLV0n6aPmvbRCqZQjvOp3ezxsFgpqL+zF5jH/lxk= k8s.io/kubectl v0.17.3/go.mod h1:NUn4IBY7f7yCMwSop2HCXlw/MVYP4HJBiUmOR3n9w28= k8s.io/kubectl v0.17.4/go.mod h1:im5QWmh6fvtmJkkNm4HToLe8z9aM3jihYK5X/wOybcY= -k8s.io/kubectl v0.18.0-rc.1/go.mod h1:UpG1w7klD633nyMS73/29cNl2tMdEbXU0nWupttyha4= k8s.io/kubectl v0.19.0/go.mod h1:gPCjjsmE6unJzgaUNXIFGZGafiUp5jh0If3F/x7/rRg= k8s.io/kubelet v0.19.0/go.mod h1:cGds22piF/LnFzfAaIT+efvOYBHVYdunqka6NVuNw9g= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/metrics v0.17.2/go.mod h1:3TkNHET4ROd+NfzNxkjoVfQ0Ob4iZnaHmSEA4vYpwLw= k8s.io/metrics v0.17.3/go.mod h1:HEJGy1fhHOjHggW9rMDBJBD3YuGroH3Y1pnIRw9FFaI= k8s.io/metrics v0.17.4/go.mod h1:6rylW2iD3M9VppnEAAtJASY1XS8Pt9tcYh+tHxBeV3I= -k8s.io/metrics v0.18.0-rc.1/go.mod h1:ME3EkXCyiZ7mVFEiAYKBfuo3JkpgggeATG+DBUQby5o= k8s.io/metrics v0.19.0/go.mod h1:WykpW8B60OeAJx1imdwUgyOID2kDljr/Q+1zrPJ98Wo= -k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20190529001817-6999998975a7/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= @@ -2649,8 +2551,7 @@ k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200229041039-0a110f9eb7ab/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20200327001022-6496210b90e8 h1:6JFbaLjRyBz8K2Jvt+pcT+N3vvwMZfg8MfVENwe9aag= -k8s.io/utils v0.0.0-20200327001022-6496210b90e8/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= layeh.com/radius v0.0.0-20190322222518-890bc1058917/go.mod h1:fywZKyu//X7iRzaxLgPWsvc0L26IUpVvE/aeIL2JtIQ= @@ -2666,7 +2567,6 @@ mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f/go.mod h1:4G1h5nDURzA3bwVMZI rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/letsencrypt v0.0.3/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= @@ -2674,24 +2574,20 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQb sigs.k8s.io/controller-runtime v0.0.0-20190520212815-96b67f231945/go.mod h1:TSH2R0nSz4WAlUUlNnOFcOR/VUhfwBLlmtq2X6AiQCA= sigs.k8s.io/controller-runtime v0.2.0-beta.2/go.mod h1:TSH2R0nSz4WAlUUlNnOFcOR/VUhfwBLlmtq2X6AiQCA= sigs.k8s.io/controller-runtime v0.2.0/go.mod h1:ZHqrRDZi3f6BzONcvlUxkqCKgwasGk5FZrnSv9TVZF4= -sigs.k8s.io/controller-runtime v0.3.1-0.20191016212439-2df793d02076/go.mod h1:p2vzQ3RuSVv9YR4AcM0y8TKHQA+0oLXazKFt6Z0OdS8= sigs.k8s.io/controller-runtime v0.4.0/go.mod h1:ApC79lpY3PHW9xj/w9pj+lYkLgwAAUZwfXkME1Lajns= sigs.k8s.io/controller-runtime v0.5.1-0.20200330174416-a11a908d91e0/go.mod h1:j4echH3Y/UPHRpXS65rxGXujda8iWOheMQvDh1uNgaY= -sigs.k8s.io/controller-runtime v0.5.2 h1:pyXbUfoTo+HA3jeIfr0vgi+1WtmNh0CwlcnQGLXwsSw= sigs.k8s.io/controller-runtime v0.5.2/go.mod h1:JZUwSMVbxDupo0lTJSSFP5pimEyxGynROImSsqIOx1A= +sigs.k8s.io/controller-runtime v0.6.2 h1:jkAnfdTYBpFwlmBn3pS5HFO06SfxvnTZ1p5PeEF/zAA= +sigs.k8s.io/controller-runtime v0.6.2/go.mod h1:vhcq/rlnENJ09SIRp3EveTaZ0yqH526hjf9iJdbUJ/E= sigs.k8s.io/controller-tools v0.2.2-0.20190919191502-76a25b63325a/go.mod h1:8SNGuj163x/sMwydREj7ld5mIMJu1cDanIfnx6xsU70= -sigs.k8s.io/controller-tools v0.2.2-0.20190930215132-4752ed2de7d2/go.mod h1:8SNGuj163x/sMwydREj7ld5mIMJu1cDanIfnx6xsU70= sigs.k8s.io/controller-tools v0.2.4/go.mod h1:m/ztfQNocGYBgTTCmFdnK94uVvgxeZeE3LtJvd/jIzA= sigs.k8s.io/controller-tools v0.2.8/go.mod h1:9VKHPszmf2DHz/QmHkcfZoewO6BL7pPs9uAiBVsaJSE= -sigs.k8s.io/controller-tools v0.2.9-0.20200331153640-3c5446d407dd/go.mod h1:D2LzYpGDYjxaAALDVYAwaqaKp2fNuyO5yfOBoU/cbBE= sigs.k8s.io/controller-tools v0.3.0/go.mod h1:enhtKGfxZD1GFEoMgP8Fdbu+uKQ/cq1/WGJhdVChfvI= sigs.k8s.io/controller-tools v0.3.1-0.20200617211605-651903477185 h1:wLsmaqTEgs3DIfNzr0u/AfPHSVJbWHj/eevcS4AFvFE= sigs.k8s.io/controller-tools v0.3.1-0.20200617211605-651903477185/go.mod h1:JuPG+FXjAeZL7eGmTuXUJduEMlI2/kGqb0rUGlVi+Yo= sigs.k8s.io/kube-storage-version-migrator v0.0.3/go.mod h1:mXfSLkx9xbJHQsgNDDUZK/iQTs2tMbx/hsJlWe6Fthw= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= -sigs.k8s.io/structured-merge-diff v0.0.0-20190302045857-e85c7b244fd2/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= -sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca h1:6dsH6AYQWbyZmtttJNe8Gq1cXOeS1BdV3eW37zHilAQ= sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= sigs.k8s.io/structured-merge-diff v1.0.2 h1:WiMoyniAVAYm03w+ImfF9IE2G23GLR/SwDnQyaNZvPk= diff --git a/pkg/asset/cluster/equinixmetal/OWNERS b/pkg/asset/cluster/equinixmetal/OWNERS new file mode 100644 index 00000000000..06e8c730e33 --- /dev/null +++ b/pkg/asset/cluster/equinixmetal/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +approvers: + - equinix-approvers +reviewers: + - equinix-reviewers diff --git a/pkg/asset/cluster/equinixmetal/equinixmetal.go b/pkg/asset/cluster/equinixmetal/equinixmetal.go new file mode 100644 index 00000000000..99ad0da1bf3 --- /dev/null +++ b/pkg/asset/cluster/equinixmetal/equinixmetal.go @@ -0,0 +1,17 @@ +// Package equinixmetal extracts equinixmetal metadata from install configurations. +package equinixmetal + +import ( + "github.com/openshift/installer/pkg/types" + equinixmetal "github.com/openshift/installer/pkg/types/equinixmetal" +) + +// Metadata converts an install configuration to EquinixMetal metadata. +func Metadata(config *types.InstallConfig) *equinixmetal.Metadata { + m := equinixmetal.Metadata{ + // Facility: config.Platform.EquinixMetal.Facility, + Metro: config.Platform.EquinixMetal.Metro, + ProjectID: config.Platform.EquinixMetal.ProjectID, + } + return &m +} diff --git a/pkg/asset/cluster/metadata.go b/pkg/asset/cluster/metadata.go index 95d779d0acf..6e6fc4d3f22 100644 --- a/pkg/asset/cluster/metadata.go +++ b/pkg/asset/cluster/metadata.go @@ -11,6 +11,7 @@ import ( "github.com/openshift/installer/pkg/asset/cluster/aws" "github.com/openshift/installer/pkg/asset/cluster/azure" "github.com/openshift/installer/pkg/asset/cluster/baremetal" + equinixmetal "github.com/openshift/installer/pkg/asset/cluster/equinixmetal" "github.com/openshift/installer/pkg/asset/cluster/gcp" "github.com/openshift/installer/pkg/asset/cluster/libvirt" "github.com/openshift/installer/pkg/asset/cluster/openstack" @@ -22,6 +23,7 @@ import ( awstypes "github.com/openshift/installer/pkg/types/aws" azuretypes "github.com/openshift/installer/pkg/types/azure" baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" + equinixtypes "github.com/openshift/installer/pkg/types/equinixmetal" gcptypes "github.com/openshift/installer/pkg/types/gcp" libvirttypes "github.com/openshift/installer/pkg/types/libvirt" nonetypes "github.com/openshift/installer/pkg/types/none" @@ -85,6 +87,8 @@ func (m *Metadata) Generate(parents asset.Parents) (err error) { metadata.ClusterPlatformMetadata.Ovirt = ovirt.Metadata(installConfig.Config) case vspheretypes.Name: metadata.ClusterPlatformMetadata.VSphere = vsphere.Metadata(installConfig.Config) + case equinixtypes.Name: + metadata.ClusterPlatformMetadata.EquinixMetal = equinixmetal.Metadata(installConfig.Config) case nonetypes.Name: default: return errors.Errorf("no known platform") diff --git a/pkg/asset/cluster/tfvars.go b/pkg/asset/cluster/tfvars.go index b87dd938f6a..98c8015da29 100644 --- a/pkg/asset/cluster/tfvars.go +++ b/pkg/asset/cluster/tfvars.go @@ -9,6 +9,7 @@ import ( "strings" igntypes "github.com/coreos/ignition/v2/config/v3_1/types" + equinixprovider "github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/equinixmetal/v1beta1" gcpprovider "github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1" libvirtprovider "github.com/openshift/cluster-api-provider-libvirt/pkg/apis/libvirtproviderconfig/v1beta1" ovirtprovider "github.com/openshift/cluster-api-provider-ovirt/pkg/apis/ovirtprovider/v1beta1" @@ -26,6 +27,7 @@ import ( "github.com/openshift/installer/pkg/asset/ignition/machine" "github.com/openshift/installer/pkg/asset/installconfig" awsconfig "github.com/openshift/installer/pkg/asset/installconfig/aws" + equinixconfig "github.com/openshift/installer/pkg/asset/installconfig/equinixmetal" gcpconfig "github.com/openshift/installer/pkg/asset/installconfig/gcp" openstackconfig "github.com/openshift/installer/pkg/asset/installconfig/openstack" ovirtconfig "github.com/openshift/installer/pkg/asset/installconfig/ovirt" @@ -37,6 +39,7 @@ import ( awstfvars "github.com/openshift/installer/pkg/tfvars/aws" azuretfvars "github.com/openshift/installer/pkg/tfvars/azure" baremetaltfvars "github.com/openshift/installer/pkg/tfvars/baremetal" + equinixtfvars "github.com/openshift/installer/pkg/tfvars/equinixmetal" gcptfvars "github.com/openshift/installer/pkg/tfvars/gcp" libvirttfvars "github.com/openshift/installer/pkg/tfvars/libvirt" openstacktfvars "github.com/openshift/installer/pkg/tfvars/openstack" @@ -46,6 +49,7 @@ import ( "github.com/openshift/installer/pkg/types/aws" "github.com/openshift/installer/pkg/types/azure" "github.com/openshift/installer/pkg/types/baremetal" + "github.com/openshift/installer/pkg/types/equinixmetal" "github.com/openshift/installer/pkg/types/gcp" "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/none" @@ -502,6 +506,42 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { Filename: fmt.Sprintf(TfPlatformVarsFileName, platform), Data: data, }) + case equinixmetal.Name: + config, err := equinixconfig.NewConfig() + if err != nil { + return err + } + auth := equinixtfvars.Auth{ + APIURL: config.APIURL, + APIKey: config.APIKey, + } + + /* + con, err := equinixconfig.NewConnection() + if err != nil { + return err + } + */ + // TODO(displague) Equinix Metal networking + + masters, err := mastersAsset.Machines() + if err != nil { + return err + } + + data, err := equinixtfvars.TFVars(equinixtfvars.TFVarsSources{ + Auth: auth, + ControlPlaneConfigs: []*equinixprovider.EquinixMetalMachineProviderConfig{ + masters[0].Spec.ProviderSpec.Value.Object.(*equinixprovider.EquinixMetalMachineProviderConfig), + }, + }) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: fmt.Sprintf(TfPlatformVarsFileName, platform), + Data: data, + }) case vsphere.Name: controlPlanes, err := mastersAsset.Machines() if err != nil { diff --git a/pkg/asset/ignition/bootstrap/bootstrap.go b/pkg/asset/ignition/bootstrap/bootstrap.go index 106dbe0502b..097bed703c3 100644 --- a/pkg/asset/ignition/bootstrap/bootstrap.go +++ b/pkg/asset/ignition/bootstrap/bootstrap.go @@ -341,6 +341,7 @@ func (a *Bootstrap) addSystemdUnits(uri string, templateData *bootstrapTemplateD "systemd-journal-gatewayd.socket": {}, "approve-csr.service": {}, // baremetal & openstack platform services + // TODO(displague) equinixmetal? "keepalived.service": {}, "coredns.service": {}, "ironic.service": {}, diff --git a/pkg/asset/ignition/machine/node.go b/pkg/asset/ignition/machine/node.go index 75b80e6accf..855d60ff864 100644 --- a/pkg/asset/ignition/machine/node.go +++ b/pkg/asset/ignition/machine/node.go @@ -13,6 +13,7 @@ import ( "github.com/openshift/installer/pkg/asset/ignition" "github.com/openshift/installer/pkg/types" baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" + equinixtypes "github.com/openshift/installer/pkg/types/equinixmetal" openstacktypes "github.com/openshift/installer/pkg/types/openstack" ovirttypes "github.com/openshift/installer/pkg/types/ovirt" vspheretypes "github.com/openshift/installer/pkg/types/vsphere" @@ -41,6 +42,12 @@ func pointerIgnitionConfig(installConfig *types.InstallConfig, rootCA []byte, ro if installConfig.VSphere.APIVIP != "" { ignitionHost = net.JoinHostPort(installConfig.VSphere.APIVIP, "22623") } + case equinixtypes.Name: + // TODO(displague) Do I have to set platform = "equinixmetal" around here? + if installConfig.EquinixMetal.APIVIP != "" { + ignitionHost = net.JoinHostPort(installConfig.EquinixMetal.APIVIP, "22623") + + } } return &igntypes.Config{ Ignition: igntypes.Ignition{ diff --git a/pkg/asset/installconfig/equinixmetal/client.go b/pkg/asset/installconfig/equinixmetal/client.go new file mode 100644 index 00000000000..f22bca43a81 --- /dev/null +++ b/pkg/asset/installconfig/equinixmetal/client.go @@ -0,0 +1,81 @@ +package equinixmetal + +import ( + "context" + + packngo "github.com/packethost/packngo" + "github.com/pkg/errors" +) + +const ( + EQUINIXMETAL_CONSUMER_TOKEN = "redhat openshift ipi" +) + +//go:generate mockgen -source=./client.go -destination=mock/equinixmetalclient_generated.go -package=mock + +// API represents the calls made to the API. +type API interface { + ListProjects(ctx context.Context) ([]packngo.Project, error) + ListFacilities(ctx context.Context) ([]packngo.Facility, error) + // ListMetros(ctx context.Context) ([]packngo.Metro, error) + ListPlans(ctx context.Context) ([]packngo.Plan, error) +} + +type Client struct { + OrganizationID string + FacilityID string + MetroID string + ProjectID string + + Conn *packngo.Client +} + +func (c *Client) ListProjects(_ context.Context) ([]packngo.Project, error) { + p, _, err := c.Conn.Projects.List(nil) + return p, err +} + +func (c *Client) ListFacilities(_ context.Context) ([]packngo.Facility, error) { + f, _, err := c.Conn.Facilities.List(nil) + return f, err +} + +/* +func (c *Client) ListMetros(_ context.Context) ([]packngo.Metro, error) { + m, _, err := c.Conn.Metros.List(nil) + return m, err +} +*/ + +func (c *Client) ListPlans(_ context.Context) ([]packngo.Plan, error) { + p, _, err := c.Conn.Plans.List(nil) + return p, err +} + +var _ API = &Client{} + +// getConnection is a convenience method to get a Equinix Metal API client +// from a Config Object. +func getConnection(c *Config) (*packngo.Client, error) { + return packngo.NewClientWithBaseURL( + EQUINIXMETAL_CONSUMER_TOKEN, c.APIKey, nil, c.APIURL, + ) +} + +// NewConnection returns a new client connection to Equinix Metal's API endpoint. +// It is the responsibility of the caller to close the connection. +func NewConnection() (*packngo.Client, error) { + equinixmetalConfig, err := NewConfig() + if err != nil { + return nil, errors.Wrap(err, "getting Engine configuration") + } + if err := equinixmetalConfig.Save(); err != nil { + return nil, errors.Wrap(err, "saving Engine configuration") + } + + con, err := getConnection(equinixmetalConfig) + if err != nil { + return nil, errors.Wrap(err, "establishing Engine connection") + } + return con, nil +} diff --git a/pkg/asset/installconfig/equinixmetal/config.go b/pkg/asset/installconfig/equinixmetal/config.go new file mode 100644 index 00000000000..7b2630e13bb --- /dev/null +++ b/pkg/asset/installconfig/equinixmetal/config.go @@ -0,0 +1,81 @@ +package equinixmetal + +import ( + "io/ioutil" + "os" + "path/filepath" + + "sigs.k8s.io/yaml" +) + +var defaultEquinixMetalConfigEnvVar = "EQUINIXMETAL_CONFIG" + +// TODO(displague) what is the preferred config for Equinix Metal projects? support +// both yaml and json? +var defaultEquinixMetalConfigPath = filepath.Join(os.Getenv("HOME"), ".equinixmetal-config.yaml") + +// Config holds Equinix Metal api access details +type Config struct { + // APIURL is the Base URL for accessing the Equinix Metal API (https://api.equinix.com/metal/v1) + APIURL string `json:"api_url,omitempty"` + + // APIKey is the User or Project API Key used to authenticate requests to the Equinix Metal API + APIKey string `json:"api_key,omitempty"` +} + +// LoadEquinixMetalConfig from the following location (first wins): +// 1. EQUINIXMETAL_CONFIG env variable +// 2. $defaultEquinixMetalConfigPath +// See #@Config for the expected format +func LoadEquinixMetalConfig() ([]byte, error) { + data, err := ioutil.ReadFile(discoverPath()) + if err != nil { + return nil, err + } + return data, nil +} + +// NewConfig will return an Config by loading +// the configuration from locations specified in @LoadEquinixMetalConfig +func NewConfig() (*Config, error) { + c := &Config{} + in, err := LoadEquinixMetalConfig() + if err != nil { + if os.IsNotExist(err) { + return askForConfig() + } + return c, err + } + + err = yaml.Unmarshal(in, c) + if err != nil { + return c, err + } + + return c, nil +} + +func discoverPath() string { + path, _ := os.LookupEnv(defaultEquinixMetalConfigEnvVar) + if path != "" { + return path + } + + return defaultEquinixMetalConfigPath +} + +// Save will serialize the config back into the locations +// specified in @LoadEquinixMetalConfig, first location with a file, wins. +func (c *Config) Save() error { + out, err := yaml.Marshal(c) + if err != nil { + return err + } + + path := discoverPath() + err = os.MkdirAll(filepath.Dir(path), 0700) + if err != nil { + return err + } + return ioutil.WriteFile(path, out, 0600) +} diff --git a/pkg/asset/installconfig/equinixmetal/equinixmetal.go b/pkg/asset/installconfig/equinixmetal/equinixmetal.go new file mode 100644 index 00000000000..9459d6a27a6 --- /dev/null +++ b/pkg/asset/installconfig/equinixmetal/equinixmetal.go @@ -0,0 +1,241 @@ +// Package equinixmetal collects equinixmetal-specific configuration. +package equinixmetal + +import ( + "context" + "strings" + "time" + + survey "gopkg.in/AlecAivazis/survey.v1" + + "github.com/openshift/installer/pkg/types/equinixmetal" + "github.com/openshift/installer/pkg/validate" + "github.com/pkg/errors" +) + +const ( + DefaultFacility = "sv15" // Dallas, TX, US + DefaultMetro = "SV" // Silicon Valley, US +) + +// Platform collects equinixmetal-specific configuration. +func Platform() (*equinixmetal.Platform, error) { + conn, err := NewConnection() + if err != nil { + return nil, errors.Wrap(err, "failed to create Equinix Metal connection") + } + + client := &Client{Conn: conn} + + facilityCode, err := selectFacility(client) + if err != nil { + return nil, err + } + + metroCode, err := selectMetro(client) + if err != nil { + return nil, err + } + + projectID, err := selectProject(client) + if err != nil { + return nil, err + } + + planSlug, err := selectPlan(client) + if err != nil { + return nil, err + } + + return &equinixmetal.Platform{ + Facility: facilityCode, + Metro: metroCode, + ProjectID: projectID, + DefaultMachinePlatform: &equinixmetal.MachinePool{ + Plan: planSlug, + }, + }, nil +} + +func selectProject(client *Client) (string, error) { + var projectID string + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + projects, err := client.ListProjects(ctx) + + if err != nil { + return "", errors.Wrap(err, "failed to list Equinix Metal projects") + } + + projectNames := []string{} + for _, p := range projects { + projectNames = append(projectNames, p.ID+" ("+p.Name+")") + } + + err = survey.Ask([]*survey.Question{{ + Prompt: &survey.Select{ + Message: "Equinix Metal Project ID", + Help: "The Equinix Metal project id to use for installation", + Options: projectNames, + }, + Validate: survey.ComposeValidators(survey.Required), + }}, &projectID) + + if err != nil { + return "", err + } + + parts := strings.Split(projectID, " ") + return parts[0], nil +} + +func selectFacility(client *Client) (string, error) { + var facilityCode string + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + facilities, err := client.ListFacilities(ctx) + + if err != nil { + return "", errors.Wrap(err, "failed to list Equinix Metal facilities") + } + + facilitiesNames := []string{} + for _, f := range facilities { + facilitiesNames = append(facilitiesNames, f.Code+" ("+f.Name+")") + } + + err = survey.Ask([]*survey.Question{ + { + Prompt: &survey.Select{ + Message: "Equinix Metal Facility Code", + Help: "The Equinix Metal Facility code (this is the short name, e.g. 'da11')", + Default: DefaultFacility, + Options: facilitiesNames, + }, + Validate: survey.ComposeValidators(survey.Required), + }, + }, &facilityCode) + + if err != nil { + return "", err + } + return strings.Split(facilityCode, " ")[0], nil +} + +func selectMetro(client *Client) (string, error) { + return "", nil + /* + var metroID string + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + metros, err := client.ListMetros(ctx) + + if err != nil { + return "", errors.Wrap(err, "failed to list Equinix Metal metros") + } + + metroNames := []string{} + for _, m := range metros { + metroNames = append(metroNames, m.Code+" ("+m.Name+")") + } + + err = survey.Ask([]*survey.Question{ + { + Prompt: &survey.Select{ + Message: "Equinix Metal Metro Code", + Help: "The Equinix Metal Metro code (this is the short name, e.g. 'SV')", + Default: DefaultMetro, + Options: metroNames, + }, + Validate: survey.ComposeValidators(survey.Required), + }, + }, &metroID) + + if err != nil { + return "", err + } + return strings.Split(metroID, " ")[0], nil + */ +} + +func selectPlan(client *Client) (string, error) { + var planCode string + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + plans, err := client.ListPlans(ctx) + + if err != nil { + return "", errors.Wrap(err, "failed to list Equinix Metal plans") + } + + planNames := []string{} + for _, p := range plans { + planNames = append(planNames, p.Slug+" ("+p.Name+")") + } + + err = survey.Ask([]*survey.Question{{ + Prompt: &survey.Select{ + Message: "Equinix Metal Plan Code", + Help: "The Equinix Metal plan code to use for installation", + Options: planNames, + }, + Validate: survey.ComposeValidators(survey.Required), + }}, &planCode) + + if err != nil { + return "", err + } + + parts := strings.Split(planCode, " ") + return parts[0], nil +} + +func askForConfig() (*Config, error) { + var apiURL, apiKey string + + err := survey.Ask([]*survey.Question{ + { + Prompt: &survey.Input{ + Message: "Equinix Metal API URL", + Help: "The base URL for accessing the Equinix Metal API", + Default: "https://api.equinix.com/metal/v1/", + }, + Validate: survey.ComposeValidators(survey.Required, uriValidator), + }, + }, &apiURL) + if err != nil { + return nil, err + } + + err = survey.Ask([]*survey.Question{ + { + Prompt: &survey.Password{ + Message: "Equinix Metal API Key", + Help: "The User or Project Equinix Metal API Key to access the Equinix Metal API", + }, + Validate: survey.ComposeValidators(survey.Required), + }, + }, &apiKey) + if err != nil { + return nil, err + } + + return &Config{ + APIKey: apiKey, + APIURL: apiURL, + }, nil +} + +// uriValidator validates if the answer provided in prompt is a valid +// url and has non-empty scheme. +func uriValidator(ans interface{}) error { + return validate.URI(ans.(string)) +} diff --git a/pkg/asset/installconfig/equinixmetal/mock/equinixmetalclient_generated.go b/pkg/asset/installconfig/equinixmetal/mock/equinixmetalclient_generated.go new file mode 100644 index 00000000000..e50377f497a --- /dev/null +++ b/pkg/asset/installconfig/equinixmetal/mock/equinixmetalclient_generated.go @@ -0,0 +1,95 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: ./client.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + gomock "github.com/golang/mock/gomock" + packngo "github.com/packethost/packngo" + reflect "reflect" +) + +// MockAPI is a mock of API interface +type MockAPI struct { + ctrl *gomock.Controller + recorder *MockAPIMockRecorder +} + +// MockAPIMockRecorder is the mock recorder for MockAPI +type MockAPIMockRecorder struct { + mock *MockAPI +} + +// NewMockAPI creates a new mock instance +func NewMockAPI(ctrl *gomock.Controller) *MockAPI { + mock := &MockAPI{ctrl: ctrl} + mock.recorder = &MockAPIMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockAPI) EXPECT() *MockAPIMockRecorder { + return m.recorder +} + +// ListProjects mocks base method +func (m *MockAPI) ListProjects(ctx context.Context) ([]packngo.Project, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListProjects", ctx) + ret0, _ := ret[0].([]packngo.Project) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListProjects indicates an expected call of ListProjects +func (mr *MockAPIMockRecorder) ListProjects(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListProjects", reflect.TypeOf((*MockAPI)(nil).ListProjects), ctx) +} + +// ListFacilities mocks base method +func (m *MockAPI) ListFacilities(ctx context.Context) ([]packngo.Facility, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListFacilities", ctx) + ret0, _ := ret[0].([]packngo.Facility) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListMetros mocks base method +func (m *MockAPI) ListMetros(ctx context.Context) ([]packngo.Metro, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListMetros", ctx) + ret0, _ := ret[0].([]packngo.Metro) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListFacilities indicates an expected call of ListFacilities +func (mr *MockAPIMockRecorder) ListFacilities(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListFacilities", reflect.TypeOf((*MockAPI)(nil).ListFacilities), ctx) +} + +// ListFacilities indicates an expected call of ListFacilities +func (mr *MockAPIMockRecorder) ListMetros(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMetros", reflect.TypeOf((*MockAPI)(nil).ListMetros), ctx) +} + +// ListPlans mocks base method +func (m *MockAPI) ListPlans(ctx context.Context) ([]packngo.Plan, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListPlans", ctx) + ret0, _ := ret[0].([]packngo.Plan) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListPlans indicates an expected call of ListPlans +func (mr *MockAPIMockRecorder) ListPlans(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPlans", reflect.TypeOf((*MockAPI)(nil).ListPlans), ctx) +} diff --git a/pkg/asset/installconfig/equinixmetal/validation.go b/pkg/asset/installconfig/equinixmetal/validation.go new file mode 100644 index 00000000000..56d13952d38 --- /dev/null +++ b/pkg/asset/installconfig/equinixmetal/validation.go @@ -0,0 +1,74 @@ +package equinixmetal + +import ( + packngo "github.com/packethost/packngo" + "github.com/pkg/errors" + "gopkg.in/AlecAivazis/survey.v1" + "k8s.io/apimachinery/pkg/util/validation/field" + + "github.com/openshift/installer/pkg/types" + "github.com/openshift/installer/pkg/types/equinixmetal" + "github.com/openshift/installer/pkg/types/equinixmetal/validation" +) + +// Validate executes Equinix Metal specific validation +func Validate(ic *types.InstallConfig) error { + allErrs := field.ErrorList{} + equinixmetalPlatformPath := field.NewPath("platform", "equinixmetal") + + if ic.Platform.EquinixMetal == nil { + return errors.New(field.Required( + equinixmetalPlatformPath, + "validation requires a Engine platform configuration").Error()) + } + + allErrs = append( + allErrs, + validation.ValidatePlatform(ic.Platform.EquinixMetal, equinixmetalPlatformPath)...) + + con, err := packngo.NewClient() + if err != nil { + return err + } + + // TODO(displague) validate networks + + if ic.ControlPlane != nil && ic.ControlPlane.Platform.EquinixMetal != nil { + allErrs = append( + allErrs, + validateMachinePool(con, field.NewPath("controlPlane", "platform", "equinixmetal"), ic.ControlPlane.Platform.EquinixMetal)...) + } + for idx, compute := range ic.Compute { + fldPath := field.NewPath("compute").Index(idx) + if compute.Platform.EquinixMetal != nil { + allErrs = append( + allErrs, + validateMachinePool(con, fldPath.Child("platform", "equinixmetal"), compute.Platform.EquinixMetal)...) + } + } + + return allErrs.ToAggregate() +} + +func validateMachinePool(con *packngo.Client, child *field.Path, pool *equinixmetal.MachinePool) field.ErrorList { + allErrs := field.ErrorList{} + return allErrs +} + +// authenticated takes an equinixmetal platform and validates +// its connection to the API by establishing +// the connection and authenticating successfully. +// The API connection is closed in the end and must leak +// or be reused in any way. +func authenticated(c *Config) survey.Validator { + return func(val interface{}) error { + _, err := packngo.NewClient() + + if err != nil { + return errors.Errorf("failed to construct connection to Engine platform %s", err) + } + + return nil + } + +} diff --git a/pkg/asset/installconfig/installconfig.go b/pkg/asset/installconfig/installconfig.go index 95587806696..b845e809575 100644 --- a/pkg/asset/installconfig/installconfig.go +++ b/pkg/asset/installconfig/installconfig.go @@ -12,6 +12,7 @@ import ( "github.com/openshift/installer/pkg/asset" "github.com/openshift/installer/pkg/asset/installconfig/aws" icazure "github.com/openshift/installer/pkg/asset/installconfig/azure" + icequinix "github.com/openshift/installer/pkg/asset/installconfig/equinixmetal" icgcp "github.com/openshift/installer/pkg/asset/installconfig/gcp" icopenstack "github.com/openshift/installer/pkg/asset/installconfig/openstack" icovirt "github.com/openshift/installer/pkg/asset/installconfig/ovirt" @@ -84,6 +85,7 @@ func (a *InstallConfig) Generate(parents asset.Parents) error { a.Config.GCP = platform.GCP a.Config.BareMetal = platform.BareMetal a.Config.Ovirt = platform.Ovirt + a.Config.EquinixMetal = platform.EquinixMetal return a.finish("") } @@ -187,5 +189,8 @@ func (a *InstallConfig) platformValidation() error { if a.Config.Platform.OpenStack != nil { return icopenstack.Validate(a.Config) } + if a.Config.Platform.EquinixMetal != nil { + return icequinix.Validate(a.Config) + } return field.ErrorList{}.ToAggregate() } diff --git a/pkg/asset/installconfig/platform.go b/pkg/asset/installconfig/platform.go index 2fff965e653..e04027933b5 100644 --- a/pkg/asset/installconfig/platform.go +++ b/pkg/asset/installconfig/platform.go @@ -11,6 +11,7 @@ import ( awsconfig "github.com/openshift/installer/pkg/asset/installconfig/aws" azureconfig "github.com/openshift/installer/pkg/asset/installconfig/azure" baremetalconfig "github.com/openshift/installer/pkg/asset/installconfig/baremetal" + equinixconfig "github.com/openshift/installer/pkg/asset/installconfig/equinixmetal" gcpconfig "github.com/openshift/installer/pkg/asset/installconfig/gcp" libvirtconfig "github.com/openshift/installer/pkg/asset/installconfig/libvirt" openstackconfig "github.com/openshift/installer/pkg/asset/installconfig/openstack" @@ -20,6 +21,7 @@ import ( "github.com/openshift/installer/pkg/types/aws" "github.com/openshift/installer/pkg/types/azure" "github.com/openshift/installer/pkg/types/baremetal" + equinixmetal "github.com/openshift/installer/pkg/types/equinixmetal" "github.com/openshift/installer/pkg/types/gcp" "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/none" @@ -86,6 +88,11 @@ func (a *platform) Generate(asset.Parents) error { if err != nil { return err } + case equinixmetal.Name: + a.EquinixMetal, err = equinixconfig.Platform() + if err != nil { + return err + } case vsphere.Name: a.VSphere, err = vsphereconfig.Platform() if err != nil { diff --git a/pkg/asset/installconfig/platformcredscheck.go b/pkg/asset/installconfig/platformcredscheck.go index 40ce31ec229..1ab5ebe1e6e 100644 --- a/pkg/asset/installconfig/platformcredscheck.go +++ b/pkg/asset/installconfig/platformcredscheck.go @@ -7,12 +7,14 @@ import ( "github.com/pkg/errors" "github.com/openshift/installer/pkg/asset" + equinixconfig "github.com/openshift/installer/pkg/asset/installconfig/equinixmetal" gcpconfig "github.com/openshift/installer/pkg/asset/installconfig/gcp" openstackconfig "github.com/openshift/installer/pkg/asset/installconfig/openstack" ovirtconfig "github.com/openshift/installer/pkg/asset/installconfig/ovirt" "github.com/openshift/installer/pkg/types/aws" "github.com/openshift/installer/pkg/types/azure" "github.com/openshift/installer/pkg/types/baremetal" + "github.com/openshift/installer/pkg/types/equinixmetal" "github.com/openshift/installer/pkg/types/gcp" "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/none" @@ -75,6 +77,11 @@ func (a *PlatformCredsCheck) Generate(dependencies asset.Parents) error { if err != nil { return errors.Wrap(err, "testing Engine connection") } + case equinixmetal.Name: + _, err := equinixconfig.NewConnection() + if err != nil { + return errors.Wrap(err, "creating Engine connection") + } default: err = fmt.Errorf("unknown platform type %q", platform) } diff --git a/pkg/asset/installconfig/platformpermscheck.go b/pkg/asset/installconfig/platformpermscheck.go index c6128253236..9342a7ee8e2 100644 --- a/pkg/asset/installconfig/platformpermscheck.go +++ b/pkg/asset/installconfig/platformpermscheck.go @@ -12,6 +12,7 @@ import ( "github.com/openshift/installer/pkg/types/aws" "github.com/openshift/installer/pkg/types/azure" "github.com/openshift/installer/pkg/types/baremetal" + "github.com/openshift/installer/pkg/types/equinixmetal" "github.com/openshift/installer/pkg/types/gcp" "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/none" @@ -83,7 +84,7 @@ func (a *PlatformPermsCheck) Generate(dependencies asset.Parents) error { if err = gcpconfig.ValidateEnabledServices(ctx, client, ic.Config.GCP.ProjectID); err != nil { return errors.Wrap(err, "failed to validate services in this project") } - case azure.Name, baremetal.Name, libvirt.Name, none.Name, openstack.Name, ovirt.Name, vsphere.Name: + case azure.Name, baremetal.Name, libvirt.Name, none.Name, openstack.Name, ovirt.Name, equinixmetal.Name, vsphere.Name: // no permissions to check default: err = fmt.Errorf("unknown platform type %q", platform) diff --git a/pkg/asset/installconfig/platformprovisioncheck.go b/pkg/asset/installconfig/platformprovisioncheck.go index 0f099312ec5..2bfa02ceaae 100644 --- a/pkg/asset/installconfig/platformprovisioncheck.go +++ b/pkg/asset/installconfig/platformprovisioncheck.go @@ -12,6 +12,7 @@ import ( "github.com/openshift/installer/pkg/types/aws" "github.com/openshift/installer/pkg/types/azure" "github.com/openshift/installer/pkg/types/baremetal" + "github.com/openshift/installer/pkg/types/equinixmetal" "github.com/openshift/installer/pkg/types/gcp" "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/none" @@ -75,7 +76,7 @@ func (a *PlatformProvisionCheck) Generate(dependencies asset.Parents) error { if err != nil { return err } - case aws.Name, libvirt.Name, none.Name, openstack.Name, ovirt.Name: + case aws.Name, libvirt.Name, none.Name, openstack.Name, ovirt.Name, equinixmetal.Name: // no special provisioning requirements to check default: err = fmt.Errorf("unknown platform type %q", platform) diff --git a/pkg/asset/machines/equinixmetal/OWNERS b/pkg/asset/machines/equinixmetal/OWNERS new file mode 100644 index 00000000000..06e8c730e33 --- /dev/null +++ b/pkg/asset/machines/equinixmetal/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +approvers: + - equinix-approvers +reviewers: + - equinix-reviewers diff --git a/pkg/asset/machines/equinixmetal/machines.go b/pkg/asset/machines/equinixmetal/machines.go new file mode 100644 index 00000000000..4951f5f8e37 --- /dev/null +++ b/pkg/asset/machines/equinixmetal/machines.go @@ -0,0 +1,98 @@ +// Package equinixmetal generates Machine objects for equinixmetal. +package equinixmetal + +import ( + "fmt" + + machineapi "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/openshift/installer/pkg/types" + "github.com/openshift/installer/pkg/types/equinixmetal" + + equinixprovider "github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/equinixmetal/v1beta1" +) + +// Machines returns a list of machines for a machinepool. +func Machines(clusterID string, config *types.InstallConfig, pool *types.MachinePool, osImage, role, userDataSecret string) ([]machineapi.Machine, error) { + if configPlatform := config.Platform.Name(); configPlatform != equinixmetal.Name { + return nil, fmt.Errorf("non-equinixmetal configuration: %q", configPlatform) + } + if poolPlatform := pool.Platform.Name(); poolPlatform != equinixmetal.Name { + return nil, fmt.Errorf("non-equinixmetal machine-pool: %q", poolPlatform) + } + platform := config.Platform.EquinixMetal + mpool := pool.Platform.EquinixMetal + + total := int64(1) + if pool.Replicas != nil { + total = *pool.Replicas + } + var machines []machineapi.Machine + for idx := int64(0); idx < total; idx++ { + provider := provider(clusterID, platform, mpool.Plan, mpool.CustomData, role, userDataSecret, osImage) + + machine := machineapi.Machine{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "machine.openshift.io/v1beta1", + Kind: "Machine", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "openshift-machine-api", + Name: fmt.Sprintf("%s-%s-%d", clusterID, pool.Name, idx), + Labels: map[string]string{ + "machine.openshift.io/cluster-api-cluster": clusterID, + "machine.openshift.io/cluster-api-machine-role": role, + "machine.openshift.io/cluster-api-machine-type": role, + }, + }, + Spec: machineapi.MachineSpec{ + ProviderSpec: machineapi.ProviderSpec{ + Value: &runtime.RawExtension{Object: provider}, + }, + // we don't need to set Versions, because we control those via cluster operators. + }, + } + machines = append(machines, machine) + } + + return machines, nil +} + +func provider(clusterID string, platform *equinixmetal.Platform, plan, customData, role, userDataSecret, osImage string) *equinixprovider.EquinixMetalMachineProviderConfig { + // TOOD(displague) This IPXE script url contains the kernel and initrd + // parameters needed to an official RHCOS image from the official mirror. + // Equinix Metal devices can be created with userdata values of #!ipxe" to + // avoid the need for a hosted IPXE script, but userdata must be a valid + // Ignition Config for IPI purposes. I am actively seeking a EM feature to + // permit ipxescripturl to support data urls or to offer an ipxescript + // (content, not url) device creation field. Notably, this static script + // should be dynamic based on the osImage parameter which may reflect a + // differnet version and architecture than this gist offers. + ipxeScriptURL := "https://gist.githubusercontent.com/displague/5282172449a83c7b83821f8f8333a072/raw/f7300a5ab652e923dddacb5c9f206864c4c2aceb/rhcos.ipxe" + _ = osImage + + spec := equinixprovider.EquinixMetalMachineProviderConfig{ + TypeMeta: metav1.TypeMeta{ + APIVersion: equinixprovider.SchemeGroupVersion.String(), + Kind: "EquinixMetalMachineProviderConfig", + }, + CustomData: customData, + Facility: platform.Facility, + // TODO: cluster-api-provider-equinix-metal + // Metro: platform.Metro, + OS: "custom_ipxe", + ProjectID: platform.ProjectID, + IPXEScriptURL: ipxeScriptURL, + BillingCycle: "hourly", + MachineType: plan, + Tags: []string{"openshift-ipi", fmt.Sprintf("%s-%s", clusterID, role)}, + // TODO(displague) ssh keys will need to be defined in the project + // SshKeys: []string{}, + UserDataSecret: &corev1.LocalObjectReference{Name: userDataSecret}, + CredentialsSecret: &corev1.LocalObjectReference{Name: "equinixmetal-credentials"}, + } + return &spec +} diff --git a/pkg/asset/machines/equinixmetal/machinesets.go b/pkg/asset/machines/equinixmetal/machinesets.go new file mode 100644 index 00000000000..0ba787f3a3e --- /dev/null +++ b/pkg/asset/machines/equinixmetal/machinesets.go @@ -0,0 +1,79 @@ +// Package equinixmetal generates Machine objects for equinixmetal. +package equinixmetal + +import ( + "fmt" + + machineapi "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/pointer" + + "github.com/openshift/installer/pkg/types" + "github.com/openshift/installer/pkg/types/equinixmetal" +) + +// MachineSets returns a list of machinesets for a machinepool. +func MachineSets(clusterID string, config *types.InstallConfig, pool *types.MachinePool, osImage, role, + userDataSecret string) ([]*machineapi.MachineSet, error) { + + if configPlatform := config.Platform.Name(); configPlatform != equinixmetal.Name { + return nil, fmt.Errorf("non-equinixmetal configuration: %q", configPlatform) + } + if poolPlatform := pool.Platform.Name(); poolPlatform != "" && poolPlatform != equinixmetal.Name { + return nil, fmt.Errorf("non-equinixmetal machine-pool: %q", poolPlatform) + } + platform := config.Platform.EquinixMetal + mpool := pool.Platform.EquinixMetal + plan := mpool.Plan + customData := mpool.CustomData + total := int64(1) + if pool.Replicas != nil { + total = *pool.Replicas + } + + provider := provider(clusterID, platform, plan, customData, role, userDataSecret, osImage) + name := fmt.Sprintf("%s-%s-%d", clusterID, pool.Name, 0) + mset := &machineapi.MachineSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "machine.openshift.io/v1beta1", + Kind: "MachineSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "openshift-machine-api", + Name: name, + Labels: map[string]string{ + "machine.openshift.io/cluster-api-cluster": clusterID, + "machine.openshift.io/cluster-api-machine-role": role, + "machine.openshift.io/cluster-api-machine-type": role, + }, + }, + Spec: machineapi.MachineSetSpec{ + Replicas: pointer.Int32Ptr(int32(total)), + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "machine.openshift.io/cluster-api-machineset": name, + "machine.openshift.io/cluster-api-cluster": clusterID, + }, + }, + Template: machineapi.MachineTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "machine.openshift.io/cluster-api-machineset": name, + "machine.openshift.io/cluster-api-cluster": clusterID, + "machine.openshift.io/cluster-api-machine-role": role, + "machine.openshift.io/cluster-api-machine-type": role, + }, + }, + Spec: machineapi.MachineSpec{ + ProviderSpec: machineapi.ProviderSpec{ + Value: &runtime.RawExtension{Object: provider}, + }, + // we don't need to set Versions, because we control those via cluster operators. + }, + }, + }, + } + + return []*machineapi.MachineSet{mset}, nil +} diff --git a/pkg/asset/machines/master.go b/pkg/asset/machines/master.go index e4fbda0ee63..b16ef9773f9 100644 --- a/pkg/asset/machines/master.go +++ b/pkg/asset/machines/master.go @@ -10,6 +10,8 @@ import ( "github.com/ghodss/yaml" baremetalapi "github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis" baremetalprovider "github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1" + equinixapi "github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis" + equinixprovider "github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/equinixmetal/v1beta1" gcpapi "github.com/openshift/cluster-api-provider-gcp/pkg/apis" gcpprovider "github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1" libvirtapi "github.com/openshift/cluster-api-provider-libvirt/pkg/apis" @@ -37,6 +39,7 @@ import ( "github.com/openshift/installer/pkg/asset/machines/aws" "github.com/openshift/installer/pkg/asset/machines/azure" "github.com/openshift/installer/pkg/asset/machines/baremetal" + "github.com/openshift/installer/pkg/asset/machines/equinixmetal" "github.com/openshift/installer/pkg/asset/machines/gcp" "github.com/openshift/installer/pkg/asset/machines/libvirt" "github.com/openshift/installer/pkg/asset/machines/machineconfig" @@ -51,6 +54,7 @@ import ( azuretypes "github.com/openshift/installer/pkg/types/azure" azuredefaults "github.com/openshift/installer/pkg/types/azure/defaults" baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" + equinixtypes "github.com/openshift/installer/pkg/types/equinixmetal" gcptypes "github.com/openshift/installer/pkg/types/gcp" libvirttypes "github.com/openshift/installer/pkg/types/libvirt" nonetypes "github.com/openshift/installer/pkg/types/none" @@ -349,6 +353,15 @@ func (m *Master) Generate(dependencies asset.Parents) error { return errors.Wrap(err, "failed to create master machine objects") } vsphere.ConfigMasters(machines, clusterID.InfraID) + case equinixtypes.Name: + mpool := defaultEquinixMetalMachinePoolPlatform() + mpool.Set(ic.Platform.EquinixMetal.DefaultMachinePlatform) + mpool.Set(pool.Platform.EquinixMetal) + pool.Platform.EquinixMetal = &mpool + machines, err = equinixmetal.Machines(clusterID.InfraID, ic, pool, string(*rhcosImage), "master", "master-user-data") + if err != nil { + return errors.Wrap(err, "failed to create master machine objects") + } case nonetypes.Name: default: return fmt.Errorf("invalid Platform") @@ -476,6 +489,7 @@ func (m *Master) Machines() ([]machineapi.Machine, error) { libvirtapi.AddToScheme(scheme) openstackapi.AddToScheme(scheme) ovirtproviderapi.AddToScheme(scheme) + equinixapi.AddToScheme(scheme) vsphereapi.AddToScheme(scheme) decoder := serializer.NewCodecFactory(scheme).UniversalDecoder( awsprovider.SchemeGroupVersion, @@ -486,6 +500,7 @@ func (m *Master) Machines() ([]machineapi.Machine, error) { openstackprovider.SchemeGroupVersion, vsphereprovider.SchemeGroupVersion, ovirtprovider.SchemeGroupVersion, + equinixprovider.SchemeGroupVersion, ) machines := []machineapi.Machine{} diff --git a/pkg/asset/machines/worker.go b/pkg/asset/machines/worker.go index 663fab1be74..1405785a676 100644 --- a/pkg/asset/machines/worker.go +++ b/pkg/asset/machines/worker.go @@ -11,6 +11,8 @@ import ( openstackclientconfig "github.com/gophercloud/utils/openstack/clientconfig" baremetalapi "github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis" baremetalprovider "github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1" + equinixapi "github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis" + equinixprovider "github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/equinixmetal/v1beta1" gcpapi "github.com/openshift/cluster-api-provider-gcp/pkg/apis" gcpprovider "github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1" libvirtapi "github.com/openshift/cluster-api-provider-libvirt/pkg/apis" @@ -38,6 +40,7 @@ import ( "github.com/openshift/installer/pkg/asset/machines/aws" "github.com/openshift/installer/pkg/asset/machines/azure" "github.com/openshift/installer/pkg/asset/machines/baremetal" + "github.com/openshift/installer/pkg/asset/machines/equinixmetal" "github.com/openshift/installer/pkg/asset/machines/gcp" "github.com/openshift/installer/pkg/asset/machines/libvirt" "github.com/openshift/installer/pkg/asset/machines/machineconfig" @@ -52,6 +55,7 @@ import ( azuretypes "github.com/openshift/installer/pkg/types/azure" azuredefaults "github.com/openshift/installer/pkg/types/azure/defaults" baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" + equinixtypes "github.com/openshift/installer/pkg/types/equinixmetal" gcptypes "github.com/openshift/installer/pkg/types/gcp" libvirttypes "github.com/openshift/installer/pkg/types/libvirt" nonetypes "github.com/openshift/installer/pkg/types/none" @@ -131,6 +135,13 @@ func defaultOvirtMachinePoolPlatform() ovirttypes.MachinePool { } } +func defaultEquinixMetalMachinePoolPlatform() equinixtypes.MachinePool { + return equinixtypes.MachinePool{ + // TODO(displague) what defaults should we supply? + Plan: "c3.medium.x86", + } +} + func defaultVSphereMachinePoolPlatform() vspheretypes.MachinePool { return vspheretypes.MachinePool{ NumCPUs: 2, @@ -389,6 +400,21 @@ func (w *Worker) Generate(dependencies asset.Parents) error { for _, set := range sets { machineSets = append(machineSets, set) } + case equinixtypes.Name: + mpool := defaultEquinixMetalMachinePoolPlatform() + mpool.Set(ic.Platform.EquinixMetal.DefaultMachinePlatform) + mpool.Set(pool.Platform.EquinixMetal) + pool.Platform.EquinixMetal = &mpool + + imageName, _ := rhcosutils.GenerateOpenStackImageName(string(*rhcosImage), clusterID.InfraID) + + sets, err := equinixmetal.MachineSets(clusterID.InfraID, ic, &pool, imageName, "worker", "worker-user-data") + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects for ovirt provider") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } case nonetypes.Name: default: return fmt.Errorf("invalid Platform") @@ -472,6 +498,7 @@ func (w *Worker) MachineSets() ([]machineapi.MachineSet, error) { libvirtapi.AddToScheme(scheme) openstackapi.AddToScheme(scheme) ovirtproviderapi.AddToScheme(scheme) + equinixapi.AddToScheme(scheme) vsphereproviderapi.AddToScheme(scheme) decoder := serializer.NewCodecFactory(scheme).UniversalDecoder( awsprovider.SchemeGroupVersion, @@ -481,6 +508,7 @@ func (w *Worker) MachineSets() ([]machineapi.MachineSet, error) { libvirtprovider.SchemeGroupVersion, openstackprovider.SchemeGroupVersion, ovirtprovider.SchemeGroupVersion, + equinixprovider.SchemeGroupVersion, vsphereprovider.SchemeGroupVersion, ) diff --git a/pkg/asset/manifests/cloudproviderconfig.go b/pkg/asset/manifests/cloudproviderconfig.go index b65519b7346..f7498be5234 100644 --- a/pkg/asset/manifests/cloudproviderconfig.go +++ b/pkg/asset/manifests/cloudproviderconfig.go @@ -21,6 +21,7 @@ import ( awstypes "github.com/openshift/installer/pkg/types/aws" azuretypes "github.com/openshift/installer/pkg/types/azure" baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" + equinixtypes "github.com/openshift/installer/pkg/types/equinixmetal" gcptypes "github.com/openshift/installer/pkg/types/gcp" libvirttypes "github.com/openshift/installer/pkg/types/libvirt" nonetypes "github.com/openshift/installer/pkg/types/none" @@ -84,7 +85,8 @@ func (cpc *CloudProviderConfig) Generate(dependencies asset.Parents) error { } switch installConfig.Config.Platform.Name() { - case libvirttypes.Name, nonetypes.Name, baremetaltypes.Name, ovirttypes.Name: + case libvirttypes.Name, nonetypes.Name, baremetaltypes.Name, ovirttypes.Name, equinixtypes.Name: + // TODO(displague) What should Equinix Metal do? return nil case awstypes.Name: // Store the additional trust bundle in the ca-bundle.pem key if the cluster is being installed on a C2S region. diff --git a/pkg/asset/manifests/dns.go b/pkg/asset/manifests/dns.go index 49f8a9fa69c..db0fbcb287b 100644 --- a/pkg/asset/manifests/dns.go +++ b/pkg/asset/manifests/dns.go @@ -20,6 +20,7 @@ import ( awstypes "github.com/openshift/installer/pkg/types/aws" azuretypes "github.com/openshift/installer/pkg/types/azure" baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" + equinixtypes "github.com/openshift/installer/pkg/types/equinixmetal" gcptypes "github.com/openshift/installer/pkg/types/gcp" libvirttypes "github.com/openshift/installer/pkg/types/libvirt" nonetypes "github.com/openshift/installer/pkg/types/none" @@ -118,7 +119,8 @@ func (d *DNS) Generate(dependencies asset.Parents) error { config.Spec.PublicZone = &configv1.DNSZone{ID: zone.Name} } config.Spec.PrivateZone = &configv1.DNSZone{ID: fmt.Sprintf("%s-private-zone", clusterID.InfraID)} - case libvirttypes.Name, openstacktypes.Name, baremetaltypes.Name, nonetypes.Name, vspheretypes.Name, ovirttypes.Name: + case libvirttypes.Name, openstacktypes.Name, baremetaltypes.Name, nonetypes.Name, vspheretypes.Name, ovirttypes.Name, equinixtypes.Name: + // TODO(displague) What should Equinix Metal do? default: return errors.New("invalid Platform") } diff --git a/pkg/asset/manifests/openshift.go b/pkg/asset/manifests/openshift.go index bf18f9d22bb..21104e94e68 100644 --- a/pkg/asset/manifests/openshift.go +++ b/pkg/asset/manifests/openshift.go @@ -13,6 +13,7 @@ import ( "github.com/openshift/installer/pkg/asset" "github.com/openshift/installer/pkg/asset/installconfig" + "github.com/openshift/installer/pkg/asset/installconfig/equinixmetal" "github.com/openshift/installer/pkg/asset/installconfig/gcp" "github.com/openshift/installer/pkg/asset/installconfig/ovirt" "github.com/openshift/installer/pkg/asset/machines" @@ -26,6 +27,7 @@ import ( awstypes "github.com/openshift/installer/pkg/types/aws" azuretypes "github.com/openshift/installer/pkg/types/azure" baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" + equinixtypes "github.com/openshift/installer/pkg/types/equinixmetal" gcptypes "github.com/openshift/installer/pkg/types/gcp" openstacktypes "github.com/openshift/installer/pkg/types/openstack" ovirttypes "github.com/openshift/installer/pkg/types/ovirt" @@ -182,6 +184,19 @@ func (o *Openshift) Generate(dependencies asset.Parents) error { Base64encodeCABundle: base64.StdEncoding.EncodeToString([]byte(conf.CABundle)), }, } + case equinixtypes.Name: + conf, err := equinixmetal.NewConfig() + if err != nil { + return err + } + + cloudCreds = cloudCredsSecretData{ + EquinixMetal: &EquinixMetalCredsSecretData{ + Base64encodeURL: base64.StdEncoding.EncodeToString([]byte(conf.APIURL)), + Base64encodeUsername: base64.StdEncoding.EncodeToString([]byte(conf.APIKey)), + }, + } + } templateData := &openshiftTemplateData{ diff --git a/pkg/asset/manifests/template.go b/pkg/asset/manifests/template.go index 5480a30a8be..c0356121d09 100644 --- a/pkg/asset/manifests/template.go +++ b/pkg/asset/manifests/template.go @@ -47,13 +47,24 @@ type OvirtCredsSecretData struct { Base64encodeCABundle string } +// EquinixMetalCredsSecretData holds encoded credentials and is used to generated cloud-creds secret +type EquinixMetalCredsSecretData struct { + Base64encodeURL string + Base64encodeUsername string + Base64encodePassword string + Base64encodeCAFile string + Base64encodeInsecure string + Base64encodeCABundle string +} + type cloudCredsSecretData struct { - AWS *AwsCredsSecretData - Azure *AzureCredsSecretData - GCP *GCPCredsSecretData - OpenStack *OpenStackCredsSecretData - VSphere *VSphereCredsSecretData - Ovirt *OvirtCredsSecretData + AWS *AwsCredsSecretData + Azure *AzureCredsSecretData + GCP *GCPCredsSecretData + OpenStack *OpenStackCredsSecretData + VSphere *VSphereCredsSecretData + Ovirt *OvirtCredsSecretData + EquinixMetal *EquinixMetalCredsSecretData } type bootkubeTemplateData struct { diff --git a/pkg/asset/quota/quota.go b/pkg/asset/quota/quota.go index 5ee00d022e9..ea78c71bb7d 100644 --- a/pkg/asset/quota/quota.go +++ b/pkg/asset/quota/quota.go @@ -21,6 +21,7 @@ import ( typesaws "github.com/openshift/installer/pkg/types/aws" "github.com/openshift/installer/pkg/types/azure" "github.com/openshift/installer/pkg/types/baremetal" + "github.com/openshift/installer/pkg/types/equinixmetal" typesgcp "github.com/openshift/installer/pkg/types/gcp" "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/none" @@ -118,7 +119,8 @@ func (a *PlatformQuotaCheck) Generate(dependencies asset.Parents) error { return summarizeFailingReport(reports) } summarizeReport(reports) - case azure.Name, baremetal.Name, libvirt.Name, none.Name, openstack.Name, ovirt.Name, vsphere.Name: + case azure.Name, baremetal.Name, libvirt.Name, none.Name, openstack.Name, ovirt.Name, equinixmetal.Name, vsphere.Name: + // TODO(displague) Anything special for EquinixMetal? // no special provisioning requirements to check default: err = fmt.Errorf("unknown platform type %q", platform) diff --git a/pkg/asset/rhcos/bootstrap_image.go b/pkg/asset/rhcos/bootstrap_image.go index 6fde584b5ba..80c3839c23a 100644 --- a/pkg/asset/rhcos/bootstrap_image.go +++ b/pkg/asset/rhcos/bootstrap_image.go @@ -42,6 +42,7 @@ func (i *BootstrapImage) Generate(p asset.Parents) error { ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) defer cancel() switch config.Platform.Name() { + // TODO(displague) EquinixMetal? case baremetal.Name: // Check for RHCOS image URL override if boi := config.Platform.BareMetal.BootstrapOSImage; boi != "" { diff --git a/pkg/asset/rhcos/image.go b/pkg/asset/rhcos/image.go index 56dbbaa0fa4..158d5f5d1e7 100644 --- a/pkg/asset/rhcos/image.go +++ b/pkg/asset/rhcos/image.go @@ -18,6 +18,7 @@ import ( "github.com/openshift/installer/pkg/types/aws" "github.com/openshift/installer/pkg/types/azure" "github.com/openshift/installer/pkg/types/baremetal" + "github.com/openshift/installer/pkg/types/equinixmetal" "github.com/openshift/installer/pkg/types/gcp" "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/none" @@ -97,6 +98,8 @@ func osImage(config *types.InstallConfig) (string, error) { osimage, err = rhcos.OpenStack(ctx, arch) case ovirt.Name: osimage, err = rhcos.OpenStack(ctx, arch) + case equinixmetal.Name: + osimage, err = rhcos.OpenStack(ctx, arch) case azure.Name: osimage, err = rhcos.VHD(ctx, arch) case baremetal.Name: diff --git a/pkg/destroy/equinixmetal/OWNERS b/pkg/destroy/equinixmetal/OWNERS new file mode 100644 index 00000000000..06e8c730e33 --- /dev/null +++ b/pkg/destroy/equinixmetal/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +approvers: + - equinix-approvers +reviewers: + - equinix-reviewers diff --git a/pkg/destroy/equinixmetal/doc.go b/pkg/destroy/equinixmetal/doc.go new file mode 100644 index 00000000000..05cc42c2211 --- /dev/null +++ b/pkg/destroy/equinixmetal/doc.go @@ -0,0 +1,2 @@ +// Package equinixmetal provides a cluster-destroyer for ovirt clusters. +package equinixmetal diff --git a/pkg/destroy/equinixmetal/equinixmetal.go b/pkg/destroy/equinixmetal/equinixmetal.go new file mode 100644 index 00000000000..57bd8bfcc22 --- /dev/null +++ b/pkg/destroy/equinixmetal/equinixmetal.go @@ -0,0 +1,39 @@ +package equinixmetal + +import ( + "fmt" + + "github.com/sirupsen/logrus" + + "github.com/openshift/installer/pkg/asset/installconfig/equinixmetal" + "github.com/openshift/installer/pkg/destroy/providers" + "github.com/openshift/installer/pkg/types" +) + +// ClusterUninstaller holds the various options for the cluster we want to delete. +type ClusterUninstaller struct { + Metadata types.ClusterMetadata + Logger logrus.FieldLogger +} + +// Run is the entrypoint to start the uninstall process. +func (uninstaller *ClusterUninstaller) Run() error { + _, err := equinixmetal.NewConnection() + if err != nil { + return fmt.Errorf("failed to initialize connection to equinixmetal-engine's %s", err) + } + // @TODO(displague) delete each thing + //if err := uninstaller.deleteThing(con); err != nil { + // uninstaller.Logger.Errorf("Failed to remove Thing: %s", err) + // } + + return nil +} + +// New returns Equinix Metal Uninstaller from ClusterMetadata. +func New(logger logrus.FieldLogger, metadata *types.ClusterMetadata) (providers.Destroyer, error) { + return &ClusterUninstaller{ + Metadata: *metadata, + Logger: logger, + }, nil +} diff --git a/pkg/terraform/exec/plugins/dns.go b/pkg/terraform/exec/plugins/dns.go new file mode 100644 index 00000000000..7b953d8e599 --- /dev/null +++ b/pkg/terraform/exec/plugins/dns.go @@ -0,0 +1,15 @@ +package plugins + +import ( + "github.com/hashicorp/terraform-plugin-sdk/plugin" + "github.com/terraform-providers/terraform-provider-dns/dns" +) + +func init() { + dnsProvider := func() { + plugin.Serve(&plugin.ServeOpts{ + ProviderFunc: dns.Provider, + }) + } + KnownPlugins["terraform-provider-dns"] = dnsProvider +} diff --git a/pkg/terraform/exec/plugins/equinixmetal.go b/pkg/terraform/exec/plugins/equinixmetal.go new file mode 100644 index 00000000000..279b0656806 --- /dev/null +++ b/pkg/terraform/exec/plugins/equinixmetal.go @@ -0,0 +1,16 @@ +package plugins + +import ( + metal "github.com/equinix/terraform-provider-equinix-metal/packet" + "github.com/hashicorp/terraform-plugin-sdk/plugin" +) + +func init() { + exec := func() { + plugin.Serve(&plugin.ServeOpts{ + ProviderFunc: metal.Provider, + }) + } + // TODO(displague) update to equinix-metal when TF 0.13+ sdk can be used + KnownPlugins["terraform-provider-packet"] = exec +} diff --git a/pkg/terraform/gather/equinixmetal/OWNERS b/pkg/terraform/gather/equinixmetal/OWNERS new file mode 100644 index 00000000000..06e8c730e33 --- /dev/null +++ b/pkg/terraform/gather/equinixmetal/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +approvers: + - equinix-approvers +reviewers: + - equinix-reviewers diff --git a/pkg/terraform/gather/equinixmetal/ip.go b/pkg/terraform/gather/equinixmetal/ip.go new file mode 100644 index 00000000000..b020713cfa1 --- /dev/null +++ b/pkg/terraform/gather/equinixmetal/ip.go @@ -0,0 +1,28 @@ +// Package equinixmetal supply utilities to extract information from terraform state +package equinixmetal + +import ( + "github.com/openshift/installer/pkg/terraform" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +// BootstrapIP returns the ip address for bootstrap host. +// TODO(displague) implement +func BootstrapIP(tfs *terraform.State) (string, error) { + br, err := terraform.LookupResource(tfs, "module.bootstrap", "packet_device", "bootstrap") + if err != nil { + return "", errors.Wrap(err, "failed to lookup bootstrap") + } + if len(br.Instances) == 0 { + return "", errors.New("no bootstrap instance found") + } + bootstrap, _, err := unstructured.NestedString(br.Instances[0].Attributes, "access_public_ipv4") + return bootstrap, err +} + +// ControlPlaneIPs returns the ip addresses for control plane hosts. +// TODO(displague) implement +func ControlPlaneIPs(tfs *terraform.State) ([]string, error) { + return []string{""}, nil +} diff --git a/pkg/tfvars/equinixmetal/OWNERS b/pkg/tfvars/equinixmetal/OWNERS new file mode 100644 index 00000000000..06e8c730e33 --- /dev/null +++ b/pkg/tfvars/equinixmetal/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +approvers: + - equinix-approvers +reviewers: + - equinix-reviewers diff --git a/pkg/tfvars/equinixmetal/equinixmetal.go b/pkg/tfvars/equinixmetal/equinixmetal.go new file mode 100644 index 00000000000..7cef52af27b --- /dev/null +++ b/pkg/tfvars/equinixmetal/equinixmetal.go @@ -0,0 +1,59 @@ +package equinixmetal + +import ( + "encoding/json" + + equinixprovider "github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/equinixmetal/v1beta1" +) + +type Auth struct { + APIURL string `json:"metal_api_url"` + APIKey string `json:"metal_auth_token"` +} +type config struct { + Auth `json:",inline"` + Roles []string `json:"metal_roles,omitempty"` + Facility string `json:"metal_facility,omitempty"` + Metro string `json:"metal_metro,omitempty"` + OS string `json:"metal_os"` + ProjectID string `json:"metal_project_id"` + BillingCycle string `json:"metal_billing_cycle"` + MachineType string `json:"metal_machine_type"` + SshKeys []string `json:"metal_ssh_keys,omitempty"` + IPXEScriptURL string `json:"metal_ipxe_script_url,omitempty"` + CustomData string `json:"metal_custom_data"` +} + +// TFVarsSources contains the parameters to be converted into Terraform variables +type TFVarsSources struct { + ControlPlaneConfigs []*equinixprovider.EquinixMetalMachineProviderConfig + Auth Auth +} + +//TFVars generate EquinixMetal-specific Terraform variables +func TFVars(sources TFVarsSources) ([]byte, error) { + plane0 := sources.ControlPlaneConfigs[0] + + /* + roles := make([]string, len(plane0.Roles)) + for _, r := range plane0.Roles { + roles = append(roles, string(r)) + } + */ + // TODO(displague) fill in the tf vars + cfg := &config{ + Auth: sources.Auth, + // Roles: roles, + Facility: plane0.Facility, + // Metro: plane0.Metro, + OS: plane0.OS, + ProjectID: plane0.ProjectID, + BillingCycle: plane0.BillingCycle, + MachineType: plane0.MachineType, + IPXEScriptURL: plane0.IPXEScriptURL, + + // SshKeys: plane0.SshKeys, + } + + return json.MarshalIndent(cfg, "", " ") +} diff --git a/pkg/types/clustermetadata.go b/pkg/types/clustermetadata.go index 31084ffcdeb..deb84fe10c7 100644 --- a/pkg/types/clustermetadata.go +++ b/pkg/types/clustermetadata.go @@ -4,6 +4,7 @@ import ( "github.com/openshift/installer/pkg/types/aws" "github.com/openshift/installer/pkg/types/azure" "github.com/openshift/installer/pkg/types/baremetal" + "github.com/openshift/installer/pkg/types/equinixmetal" "github.com/openshift/installer/pkg/types/gcp" "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/openstack" @@ -25,14 +26,15 @@ type ClusterMetadata struct { // ClusterPlatformMetadata contains metadata for platfrom. type ClusterPlatformMetadata struct { - AWS *aws.Metadata `json:"aws,omitempty"` - OpenStack *openstack.Metadata `json:"openstack,omitempty"` - Libvirt *libvirt.Metadata `json:"libvirt,omitempty"` - Azure *azure.Metadata `json:"azure,omitempty"` - GCP *gcp.Metadata `json:"gcp,omitempty"` - BareMetal *baremetal.Metadata `json:"baremetal,omitempty"` - Ovirt *ovirt.Metadata `json:"ovirt,omitempty"` - VSphere *vsphere.Metadata `json:"vsphere,omitempty"` + AWS *aws.Metadata `json:"aws,omitempty"` + OpenStack *openstack.Metadata `json:"openstack,omitempty"` + Libvirt *libvirt.Metadata `json:"libvirt,omitempty"` + Azure *azure.Metadata `json:"azure,omitempty"` + GCP *gcp.Metadata `json:"gcp,omitempty"` + BareMetal *baremetal.Metadata `json:"baremetal,omitempty"` + Ovirt *ovirt.Metadata `json:"ovirt,omitempty"` + EquinixMetal *equinixmetal.Metadata `json:"equinixmetal,omitempty"` + VSphere *vsphere.Metadata `json:"vsphere,omitempty"` } // Platform returns a string representation of the platform @@ -63,6 +65,9 @@ func (cpm *ClusterPlatformMetadata) Platform() string { if cpm.Ovirt != nil { return ovirt.Name } + if cpm.EquinixMetal != nil { + return equinixmetal.Name + } if cpm.VSphere != nil { return vsphere.Name } diff --git a/pkg/types/equinixmetal/OWNERS b/pkg/types/equinixmetal/OWNERS new file mode 100644 index 00000000000..06e8c730e33 --- /dev/null +++ b/pkg/types/equinixmetal/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +approvers: + - equinix-approvers +reviewers: + - equinix-reviewers diff --git a/pkg/types/equinixmetal/defaults/platform.go b/pkg/types/equinixmetal/defaults/platform.go new file mode 100644 index 00000000000..36df582fd79 --- /dev/null +++ b/pkg/types/equinixmetal/defaults/platform.go @@ -0,0 +1,13 @@ +package defaults + +import "github.com/openshift/installer/pkg/types/equinixmetal" + +// Defaults for the equinixmetal platform. +const ( + // TODO(displague) what API? metadata? + DefaultURI = "https://api.equinix.com/metal/v1" +) + +// SetPlatformDefaults sets the defaults for the platform. +func SetPlatformDefaults(p *equinixmetal.Platform) { +} diff --git a/pkg/types/equinixmetal/defaults/platform_test.go b/pkg/types/equinixmetal/defaults/platform_test.go new file mode 100644 index 00000000000..20c56eff6a9 --- /dev/null +++ b/pkg/types/equinixmetal/defaults/platform_test.go @@ -0,0 +1 @@ +package defaults diff --git a/pkg/types/equinixmetal/doc.go b/pkg/types/equinixmetal/doc.go new file mode 100644 index 00000000000..a4961cde1cc --- /dev/null +++ b/pkg/types/equinixmetal/doc.go @@ -0,0 +1,6 @@ +// Package equinixmetal contains equinixmetal-specific structures for +// installer configuration and management. +package equinixmetal + +// Name is the name for the equinixmetal platform. +const Name string = "equinixmetal" diff --git a/pkg/types/equinixmetal/machinepool.go b/pkg/types/equinixmetal/machinepool.go new file mode 100644 index 00000000000..892402d646f --- /dev/null +++ b/pkg/types/equinixmetal/machinepool.go @@ -0,0 +1,25 @@ +package equinixmetal + +// MachinePool stores the configuration for a machine pool installed +// on equinixmetal. +type MachinePool struct { + // The Equinix Metal Plan defines the CPU, memory, and networking specs of the + // provisioned node + Plan string + + // CustomData is an arbitrary bit of json to make available within each + // nodes metadata + CustomData string + + // TODO(displague) Hardware reservation id? + // TODO(displague) virtual network? +} + +// Set sets the values from `required` to `p`. +func (p *MachinePool) Set(required *MachinePool) { + if required == nil || p == nil { + return + } + p.Plan = required.Plan + p.CustomData = required.CustomData +} diff --git a/pkg/types/equinixmetal/metadata.go b/pkg/types/equinixmetal/metadata.go new file mode 100644 index 00000000000..04a0ab52efe --- /dev/null +++ b/pkg/types/equinixmetal/metadata.go @@ -0,0 +1,19 @@ +package equinixmetal + +// Metadata contains equinixmetal metadata (e.g. for uninstalling the cluster). +type Metadata struct { + // Metro represents the Equinix Metal metro code where your devices will be + // provisioned + // (https://metal.equinix.com/developers/docs/getting-started/facilities/) + Metro string `json:"metro,omitempty"` + + // Facility represents the Equinix Metal facility code where your devices + // will be provisioned + // (https://metal.equinix.com/developers/docs/getting-started/facilities/) + Facility string `json:"facility,omitempty"` + + // ProjectID represents the Equinix Metal project used for logical grouping + // and invoicing + // (https://metal.equinix.com/developers/docs/API/getting-started/) + ProjectID string `json:"project_id,omitempty"` +} diff --git a/pkg/types/equinixmetal/platform.go b/pkg/types/equinixmetal/platform.go new file mode 100644 index 00000000000..864243901e8 --- /dev/null +++ b/pkg/types/equinixmetal/platform.go @@ -0,0 +1,32 @@ +package equinixmetal + +type Platform struct { + // Metro represents the Equinix Metal metro code for the location where your devices will be provisioned + // (https://metal.equinix.com/developers/docs/getting-started/metros/) + Metro string `json:"metro"` + + // Facility represents the Equinix Metal facility code for the region and + // datacenter where your devices will be provisioned + // (https://metal.equinix.com/developers/docs/getting-started/facilities/) + Facility string `json:"facility"` + + // ProjectID represents the Equinix Metal project used for logical grouping and invoicing (https://metal.equinix.com/developers/docs/API/getting-started/) + ProjectID string `json:"project_id"` + + // APIVIP is the static IP that was provisioned as the persistant endpoint for the cluster API and ignition + // +kubebuilder:validation:Format=ip + APIVIP string `json:"apivip"` + + // DefaultMachinePlatform is the default configuration used when + // installing on Equinix Metal for machine pools which do not define their own + // platform configuration. + // +optional + DefaultMachinePlatform *MachinePool `json:"defaultMachinePlatform,omitempty"` + + // ClusterOSImage is a URL to override the default OS image + // for cluster nodes. The URL must contain a sha256 hash of the image + // e.g https://mirror.example.com/images/metal.qcow2.gz?sha256=3b5a8... + // + // +optional + ClusterOSImage string `json:"clusterOSImage,omitempty" validate:"omitempty,osimageuri,urlexist"` +} diff --git a/pkg/types/equinixmetal/validation/machinepool.go b/pkg/types/equinixmetal/validation/machinepool.go new file mode 100644 index 00000000000..8866829d8fd --- /dev/null +++ b/pkg/types/equinixmetal/validation/machinepool.go @@ -0,0 +1,13 @@ +package validation + +import ( + "github.com/openshift/installer/pkg/types/equinixmetal" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// ValidateMachinePool checks that the specified machine pool is valid. +func ValidateMachinePool(p *equinixmetal.MachinePool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + return allErrs +} diff --git a/pkg/types/equinixmetal/validation/machinepool_test.go b/pkg/types/equinixmetal/validation/machinepool_test.go new file mode 100644 index 00000000000..958ae1a6226 --- /dev/null +++ b/pkg/types/equinixmetal/validation/machinepool_test.go @@ -0,0 +1 @@ +package validation diff --git a/pkg/types/equinixmetal/validation/platform.go b/pkg/types/equinixmetal/validation/platform.go new file mode 100644 index 00000000000..fd6a36a0038 --- /dev/null +++ b/pkg/types/equinixmetal/validation/platform.go @@ -0,0 +1,12 @@ +package validation + +import ( + "github.com/openshift/installer/pkg/types/equinixmetal" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// ValidatePlatform checks that the specified platform is valid. +func ValidatePlatform(p *equinixmetal.Platform, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + return allErrs +} diff --git a/pkg/types/equinixmetal/validation/platform_test.go b/pkg/types/equinixmetal/validation/platform_test.go new file mode 100644 index 00000000000..5a3c8f3a974 --- /dev/null +++ b/pkg/types/equinixmetal/validation/platform_test.go @@ -0,0 +1 @@ +package validation \ No newline at end of file diff --git a/pkg/types/installconfig.go b/pkg/types/installconfig.go index 580e837d05e..8d5eda9991d 100644 --- a/pkg/types/installconfig.go +++ b/pkg/types/installconfig.go @@ -8,6 +8,7 @@ import ( "github.com/openshift/installer/pkg/types/aws" "github.com/openshift/installer/pkg/types/azure" "github.com/openshift/installer/pkg/types/baremetal" + "github.com/openshift/installer/pkg/types/equinixmetal" "github.com/openshift/installer/pkg/types/gcp" "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/none" @@ -31,6 +32,7 @@ var ( PlatformNames = []string{ aws.Name, azure.Name, + equinixmetal.Name, gcp.Name, openstack.Name, ovirt.Name, @@ -177,6 +179,10 @@ type Platform struct { // +optional OpenStack *openstack.Platform `json:"openstack,omitempty"` + // EquinixMetal is the configuration used when installing on EquinixMetal. + // +optional + EquinixMetal *equinixmetal.Platform `json:"equinixmetal,omitempty"` + // VSphere is the configuration used when installing on vSphere. // +optional VSphere *vsphere.Platform `json:"vsphere,omitempty"` @@ -207,6 +213,8 @@ func (p *Platform) Name() string { return none.Name case p.OpenStack != nil: return openstack.Name + case p.EquinixMetal != nil: + return equinixmetal.Name case p.VSphere != nil: return vsphere.Name case p.Ovirt != nil: diff --git a/pkg/types/machinepools.go b/pkg/types/machinepools.go index d385327516d..af49cceadbd 100644 --- a/pkg/types/machinepools.go +++ b/pkg/types/machinepools.go @@ -4,6 +4,7 @@ import ( "github.com/openshift/installer/pkg/types/aws" "github.com/openshift/installer/pkg/types/azure" "github.com/openshift/installer/pkg/types/baremetal" + "github.com/openshift/installer/pkg/types/equinixmetal" "github.com/openshift/installer/pkg/types/gcp" "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/openstack" @@ -90,6 +91,9 @@ type MachinePoolPlatform struct { // Ovirt is the configuration used when installing on oVirt. Ovirt *ovirt.MachinePool `json:"ovirt,omitempty"` + + // EquinixMetal is the configuration used when installing on Equinix Metal. + EquinixMetal *equinixmetal.MachinePool `json:"equinixmetal,omitempty"` } // Name returns a string representation of the platform (e.g. "aws" if @@ -111,10 +115,12 @@ func (p *MachinePoolPlatform) Name() string { return libvirt.Name case p.OpenStack != nil: return openstack.Name - case p.VSphere != nil: - return vsphere.Name case p.Ovirt != nil: return ovirt.Name + case p.VSphere != nil: + return vsphere.Name + case p.EquinixMetal != nil: + return equinixmetal.Name default: return "" } diff --git a/pkg/types/validation/installconfig.go b/pkg/types/validation/installconfig.go index 6d13b8fc57f..1ffc0dbe8a9 100644 --- a/pkg/types/validation/installconfig.go +++ b/pkg/types/validation/installconfig.go @@ -441,6 +441,7 @@ func validatePlatform(platform *types.Platform, fldPath *field.Path, network *ty return ovirtvalidation.ValidatePlatform(platform.Ovirt, f) }) } + // TODO(displague) platform.EquinixMetal return allErrs } diff --git a/pkg/types/validation/machinepools.go b/pkg/types/validation/machinepools.go index f017ae8f069..01165d2209b 100644 --- a/pkg/types/validation/machinepools.go +++ b/pkg/types/validation/machinepools.go @@ -106,6 +106,7 @@ func validateMachinePoolPlatform(platform *types.Platform, p *types.MachinePoolP if p.Ovirt != nil { validate(ovirt.Name, p.Ovirt, func(f *field.Path) field.ErrorList { return ovirtvalidation.ValidateMachinePool(p.Ovirt, f) }) } + // TODO(displague) p.EquinixMetal ? return allErrs } diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md index bd56fc4c36b..eff3b8a61a1 100644 --- a/vendor/cloud.google.com/go/CHANGES.md +++ b/vendor/cloud.google.com/go/CHANGES.md @@ -1,5 +1,160 @@ # Changes +## [0.65.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.64.0...v0.65.0) (2020-08-27) + + +### Announcements + +The following changes will be included in an upcoming release and are not +included in this one. + +#### Default Deadlines + +By default, non-streaming methods, like Create or Get methods, will have a +default deadline applied to the context provided at call time, unless a context +deadline is already set. Streaming methods have no default deadline and will run +indefinitely, unless the context provided at call time contains a deadline. + +To opt-out of this behavior, set the environment variable +`GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE` to `true` prior to +initializing a client. This opt-out mechanism will be removed in a later +release, with a notice similar to this one ahead of its removal. + + +### Features + +* **all:** auto-regenerate gapics , refs [#2774](https://www.github.com/googleapis/google-cloud-go/issues/2774) [#2764](https://www.github.com/googleapis/google-cloud-go/issues/2764) + + +### Bug Fixes + +* **all:** correct minor typos ([#2756](https://www.github.com/googleapis/google-cloud-go/issues/2756)) ([03d78b5](https://www.github.com/googleapis/google-cloud-go/commit/03d78b5627819cb64d1f3866f90043f709e825e1)) +* **compute/metadata:** remove leading slash for Get suffix ([#2760](https://www.github.com/googleapis/google-cloud-go/issues/2760)) ([f0d605c](https://www.github.com/googleapis/google-cloud-go/commit/f0d605ccf32391a9da056a2c551158bd076c128d)) + +## [0.64.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.63.0...v0.64.0) (2020-08-18) + + +### Features + +* **all:** auto-regenerate gapics , refs [#2734](https://www.github.com/googleapis/google-cloud-go/issues/2734) [#2731](https://www.github.com/googleapis/google-cloud-go/issues/2731) [#2730](https://www.github.com/googleapis/google-cloud-go/issues/2730) [#2725](https://www.github.com/googleapis/google-cloud-go/issues/2725) [#2722](https://www.github.com/googleapis/google-cloud-go/issues/2722) [#2706](https://www.github.com/googleapis/google-cloud-go/issues/2706) +* **pubsublite:** start generating v1 ([#2700](https://www.github.com/googleapis/google-cloud-go/issues/2700)) ([d2e777f](https://www.github.com/googleapis/google-cloud-go/commit/d2e777f56e08146646b3ffb7a78856795094ab4e)) + +## [0.63.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.62.0...v0.63.0) (2020-08-05) + + +### Features + +* **all:** auto-regenerate gapics ([#2682](https://www.github.com/googleapis/google-cloud-go/issues/2682)) ([63bfd63](https://www.github.com/googleapis/google-cloud-go/commit/63bfd638da169e0f1f4fa4a5125da2955022dc04)) +* **analytics/admin:** start generating apiv1alpha ([#2670](https://www.github.com/googleapis/google-cloud-go/issues/2670)) ([268199e](https://www.github.com/googleapis/google-cloud-go/commit/268199e5350a64a83ecf198e0e0fa4863f00fa6c)) +* **functions/metadata:** Special-case marshaling ([#2669](https://www.github.com/googleapis/google-cloud-go/issues/2669)) ([d8d7fc6](https://www.github.com/googleapis/google-cloud-go/commit/d8d7fc66cbc42f79bec25fb0daaf53d926e3645b)) +* **gaming:** start generate apiv1 ([#2681](https://www.github.com/googleapis/google-cloud-go/issues/2681)) ([1adfd0a](https://www.github.com/googleapis/google-cloud-go/commit/1adfd0aed6b2c0e1dd0c575a5ec0f49388fa5601)) +* **internal/kokoro:** add script to test compatibility with samples ([#2637](https://www.github.com/googleapis/google-cloud-go/issues/2637)) ([f2aa76a](https://www.github.com/googleapis/google-cloud-go/commit/f2aa76a0058e86c1c33bb634d2c084b58f77ab32)) + +## v0.62.0 + +### Announcements + +- There was a breaking change to `cloud.google.com/go/dataproc/apiv1` that was + merged in [this PR](https://github.com/googleapis/google-cloud-go/pull/2606). + This fixed a broken API response for `DiagnoseCluster`. When polling on the + Long Running Operation(LRO), the API now returns + `(*dataprocpb.DiagnoseClusterResults, error)` whereas it only returned an + `error` before. + +### Changes + +- all: + - Updated all direct dependencies. + - Updated contributing guidelines to suggest allowing edits from maintainers. +- billing/budgets: + - Start generating client for apiv1beta1. +- functions: + - Start generating client for apiv1. +- notebooks: + - Start generating client apiv1beta1. +- profiler: + - update proftest to support parsing floating-point backoff durations. + - Fix the regexp used to parse backoff duration. +- Various updates to autogenerated clients. + +## v0.61.0 + +### Changes + +- all: + - Update all direct dependencies. +- dashboard: + - Start generating client for apiv1. +- policytroubleshooter: + - Start generating client for apiv1. +- profiler: + - Disable OpenCensus Telemetry for requests made by the profiler package by default. You can re-enable it using `profiler.Config.EnableOCTelemetry`. +- Various updates to autogenerated clients. + +## v0.60.0 + +### Changes + +- all: + - Refactored examples to reduce module dependencies. + - Update sub-modules to use cloud.google.com/go v0.59.0. +- internal: + - Start generating client for gaming apiv1beta. +- Various updates to autogenerated clients. + +## v0.59.0 + +### Announcements + +goolgeapis/google-cloud-go has moved its source of truth to GitHub and is no longer a mirror. This means that our +contributing process has changed a bit. We will now be conducting all code reviews on GitHub which means we now accept +pull requests! If you have a version of the codebase previously checked out you may wish to update your git remote to +point to GitHub. + +### Changes + +- all: + - Remove dependency on honnef.co/go/tools. + - Update our contributing instructions now that we use GitHub for reviews. + - Remove some un-inclusive terminology. +- compute/metadata: + - Pass cancelable context to DNS lookup. +- .github: + - Update templates issue/PR templates. +- internal: + - Bump several clients to GA. + - Fix GoDoc badge source. + - Several automation changes related to the move to GitHub. + - Start generating a client for asset v1p5beta1. +- Various updates to autogenerated clients. + +## v0.58.0 + +### Deprecation notice + +- `cloud.google.com/go/monitoring/apiv3` has been deprecated due to breaking + changes in the API. Please migrate to `cloud.google.com/go/monitoring/apiv3/v2`. + +### Changes + +- all: + - The remaining uses of gtransport.Dial have been removed. + - The `genproto` dependency has been updated to a version that makes use of + new `protoreflect` library. For more information on these protobuf changes + please see the following post from the official Go blog: + https://blog.golang.org/protobuf-apiv2. +- internal: + - Started generation of datastore admin v1 client. + - Updated protofuf version used for generation to 3.12.X. + - Update the release levels for several APIs. + - Generate clients with protoc-gen-go@v1.4.1. +- monitoring: + - Re-enable generation of monitoring/apiv3 under v2 directory (see deprecation + notice above). +- profiler: + - Fixed flakiness in tests. +- Various updates to autogenerated clients. + ## v0.57.0 - all: @@ -1485,5 +1640,3 @@ Natural Language. [`cloud.google.com/go/preview/logging`](https://godoc.org/cloud.google.com/go/preview/logging). This client uses gRPC as its transport layer, and supports log reading, sinks and metrics. It will replace the current client at `cloud.google.com/go/logging` shortly. - - diff --git a/vendor/cloud.google.com/go/CONTRIBUTING.md b/vendor/cloud.google.com/go/CONTRIBUTING.md index 1e45441d139..d9775744e5c 100644 --- a/vendor/cloud.google.com/go/CONTRIBUTING.md +++ b/vendor/cloud.google.com/go/CONTRIBUTING.md @@ -14,94 +14,38 @@ 1. Sign one of the [contributor license agreements](#contributor-license-agreements) below. -1. Run `go get golang.org/x/review/git-codereview && go install golang.org/x/review/git-codereview` -to install the code reviewing tool. +1. Clone the repo: + `git clone https://github.com/googleapis/google-cloud-go` - 1. Ensure it's working by running `git codereview` (check your `PATH` if - not). +1. Change into the checked out source: + `cd google-cloud-go` - 1. If you would like, you may want to set up aliases for `git-codereview`, - such that `git codereview change` becomes `git change`. See the - [godoc](https://pkg.go.dev/golang.org/x/review/git-codereview) for details. +1. Fork the repo. - * Should you run into issues with the `git-codereview` tool, please note - that all error messages will assume that you have set up these aliases. +1. Set your fork as a remote: + `git remote add fork git@github.com:GITHUB_USERNAME/google-cloud-go.git` -1. Change to a directory of your choosing and clone the repo. +1. Make changes, commit to your fork. - ``` - cd ~/code - git clone https://code.googlesource.com/gocloud - ``` + Commit messages should follow the + [Conventional Commits Style](https://www.conventionalcommits.org). The scope + portion should always be filled with the name of the package affected by the + changes being made. For example: + ``` + feat(functions): add gophers codelab + ``` - * If you have already checked out the source, make sure that the remote - `git` `origin` is https://code.googlesource.com/gocloud: +1. Send a pull request with your changes. - ``` - git remote -v - # ... - git remote set-url origin https://code.googlesource.com/gocloud - ``` + To minimize friction, consider setting `Allow edits from maintainers` on the + PR, which will enable project committers and automation to update your PR. - * The project uses [Go Modules](https://blog.golang.org/using-go-modules) - for dependency management See - [`gopls`](https://github.com/golang/go/wiki/gopls) for making your editor - work with modules. +1. A maintainer will review the pull request and make comments. -1. Change to the project directory: - - ``` - cd ~/code/gocloud - ``` - -1. Make sure your `git` auth is configured correctly by visiting -https://code.googlesource.com, clicking "Generate Password" at the top-right, -and following the directions. Otherwise, `git codereview mail` in the next step -will fail. - -1. Now you are ready to make changes. Don't create a new branch or make commits in the traditional -way. Use the following`git codereview` commands to create a commit and create a Gerrit CL: - - ``` - git codereview change # Use this instead of git checkout -b - # Make changes. - git add ... - git codereview change # Use this instead of git commit - git codereview mail # If this fails, the error message will contain instructions to fix it. - ``` - - * This will create a new `git` branch for you to develop on. Once your - change is merged, you can delete this branch. - -1. As you make changes for code review, ammend the commit and re-mail the -change: - - ``` - # Make more changes. - git add ... - git codereview change - git codereview mail - ``` - - * **Warning**: do not change the `Change-Id` at the bottom of the commit - message - it's how Gerrit knows which change this is (or if it's new). - - * When you fixes issues from code review, respond to each code review - message then click **Reply** at the top of the page. - - * Each new mailed amendment will create a new patch set for - your change in Gerrit. Patch sets can be compared and reviewed. - - * **Note**: if your change includes a breaking change, our breaking change - detector will cause CI/CD to fail. If your breaking change is acceptable - in some way, add a `BREAKING_CHANGE_ACCEPTABLE=` line to the commit - message to cause the detector not to be run and to make it clear why that is - acceptable. - -1. Finally, add reviewers to your CL when it's ready for review. Reviewers will -not be added automatically. If you're not sure who to add for your code review, -add tbp@, cbro@, and codyoss@. + Prefer adding additional commits over amending and force-pushing since it can + be difficult to follow code reviews when the commit history changes. + Commits will be squashed when they're merged. ## Integration Tests diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md index bad1afa07f0..b115812c269 100644 --- a/vendor/cloud.google.com/go/README.md +++ b/vendor/cloud.google.com/go/README.md @@ -1,6 +1,6 @@ # Google Cloud Client Libraries for Go -[![GoDoc](https://pkg.go.dev/cloud.google.com/go?status.svg)](https://pkg.go.dev/cloud.google.com/go) +[![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://pkg.go.dev/cloud.google.com/go) Go packages for [Google Cloud Platform](https://cloud.google.com) services. @@ -127,8 +127,7 @@ client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource)) Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md) -document for details. We're using Gerrit for our code reviews. Please don't open pull -requests against this repo, new pull requests will be automatically closed. +document for details. Please note that this project is released with a Contributor Code of Conduct. By participating in this project you agree to abide by its terms. diff --git a/vendor/cloud.google.com/go/RELEASING.md b/vendor/cloud.google.com/go/RELEASING.md index 3fb49c4a894..c8c7f933527 100644 --- a/vendor/cloud.google.com/go/RELEASING.md +++ b/vendor/cloud.google.com/go/RELEASING.md @@ -10,51 +10,14 @@ 1. Sign one of the [contributor license agreements](#contributor-license-agreements) below. -1. Run `go get golang.org/x/review/git-codereview && go install golang.org/x/review/git-codereview` -to install the code reviewing tool. +1. Clone the repo: + `git clone https://github.com/googleapis/google-cloud-go` - 1. Ensure it's working by running `git codereview` (check your `PATH` if - not). +1. Change into the checked out source: + `cd google-cloud-go` - 1. If you would like, you may want to set up aliases for `git-codereview`, - such that `git codereview change` becomes `git change`. See the - [godoc](https://pkg.go.dev/golang.org/x/review/git-codereview) for details. - - * Should you run into issues with the `git-codereview` tool, please note - that all error messages will assume that you have set up these aliases. - -1. Change to a directory of your choosing and clone the repo. - - ``` - cd ~/code - git clone https://code.googlesource.com/gocloud - ``` - - * If you have already checked out the source, make sure that the remote - `git` `origin` is https://code.googlesource.com/gocloud: - - ``` - git remote -v - # ... - git remote set-url origin https://code.googlesource.com/gocloud - ``` - - * The project uses [Go Modules](https://blog.golang.org/using-go-modules) - for dependency management See - [`gopls`](https://github.com/golang/go/wiki/gopls) for making your editor - work with modules. - -1. Change to the project directory and add the github remote: - - ``` - cd ~/code/gocloud - git remote add github https://github.com/googleapis/google-cloud-go - ``` - -1. Make sure your `git` auth is configured correctly by visiting -https://code.googlesource.com, clicking "Generate Password" at the top-right, -and following the directions. Otherwise, `git codereview mail` in the next step -will fail. +1. Fork the repo and add your fork as a secondary remote (this is necessary in + order to create PRs). # Which module to release? @@ -90,8 +53,17 @@ of the `cloud.google.com/go` repository root module. Note: releasing `cloud.google.com/go` has no impact on any of the submodules, and vice-versa. They are released entirely independently. +# Test failures + +If there are any test failures in the Kokoro build, releases are blocked until +the failures have been resolved. + # How to release `cloud.google.com/go` +1. Check for failures in the + [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are any + failures in the most recent build, address them before proceeding with the + release. 1. Navigate to `~/code/gocloud/` and switch to master. 1. `git pull` 1. Run `git tag -l | grep -v beta | grep -v alpha` to see all existing releases. @@ -105,16 +77,15 @@ They are released entirely independently. to be part of your release). 1. Edit `CHANGES.md` to include a summary of the changes. 1. `cd internal/version && go generate && cd -` -1. Mail the CL: `git add -A && git change && git mail` -1. Wait for the CL to be submitted. Once it's submitted, and without submitting - any other CLs in the meantime: +1. Commit the changes, push to your fork, and create a PR. +1. Wait for the PR to be reviewed and merged. Once it's merged, and without + merging any other PRs in the meantime: a. Switch to master. b. `git pull` c. Tag the repo with the next version: `git tag $NV`. - d. Push the tag to both remotes: + d. Push the tag to origin: `git push origin $NV` - `git push github $NV` -1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases) +2. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases) with the new release, copying the contents of `CHANGES.md`. # How to release a submodule @@ -126,6 +97,11 @@ To release a submodule: (these instructions assume we're releasing `cloud.google.com/go/datastore` - adjust accordingly) +1. Check for failures in the + [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are any + failures in the most recent build, address them before proceeding with the + release. (This applies even if the failures are in a different submodule from the one + being released.) 1. Navigate to `~/code/gocloud/` and switch to master. 1. `git pull` 1. Run `git tag -l | grep datastore | grep -v beta | grep -v alpha` to see all @@ -136,15 +112,14 @@ To release a submodule: submodule directory since the last release. 1. Edit `datastore/CHANGES.md` to include a summary of the changes. 1. `cd internal/version && go generate && cd -` -1. Mail the CL: `git add -A && git change && git mail` -1. Wait for the CL to be submitted. Once it's submitted, and without submitting - any other CLs in the meantime: +1. Commit the changes, push to your fork, and create a PR. +1. Wait for the PR to be reviewed and merged. Once it's merged, and without + merging any other PRs in the meantime: a. Switch to master. b. `git pull` c. Tag the repo with the next version: `git tag $NV`. - d. Push the tag to both remotes: + d. Push the tag to origin: `git push origin $NV` - `git push github $NV` 1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases) with the new release, copying the contents of `datastore/CHANGES.md`. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 1a7a4c7e57b..545bd9d379c 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -140,7 +140,7 @@ func testOnGCE() bool { }() go func() { - addrs, err := net.LookupHost("metadata.google.internal") + addrs, err := net.DefaultResolver.LookupHost(ctx, "metadata.google.internal") if err != nil || len(addrs) == 0 { resc <- false return @@ -296,6 +296,7 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) { // being stable anyway. host = metadataIP } + suffix = strings.TrimLeft(suffix, "/") u := "http://" + host + "/computeMetadata/v1/" + suffix req, err := http.NewRequest("GET", u, nil) if err != nil { diff --git a/vendor/cloud.google.com/go/go.mod b/vendor/cloud.google.com/go/go.mod index 2ebf06faf17..81cd801e6e5 100644 --- a/vendor/cloud.google.com/go/go.mod +++ b/vendor/cloud.google.com/go/go.mod @@ -3,28 +3,22 @@ module cloud.google.com/go go 1.11 require ( - cloud.google.com/go/bigquery v1.4.0 - cloud.google.com/go/datastore v1.1.0 - cloud.google.com/go/pubsub v1.2.0 - cloud.google.com/go/storage v1.6.0 - github.com/golang/mock v1.4.3 - github.com/golang/protobuf v1.4.0 - github.com/google/go-cmp v0.4.0 - github.com/google/martian v2.1.0+incompatible - github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d + cloud.google.com/go/storage v1.10.0 + github.com/golang/mock v1.4.4 + github.com/golang/protobuf v1.4.2 + github.com/google/go-cmp v0.5.1 + github.com/google/martian/v3 v3.0.0 + github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 github.com/googleapis/gax-go/v2 v2.0.5 github.com/jstemmer/go-junit-report v0.9.1 - go.opencensus.io v0.22.3 + go.opencensus.io v0.22.4 golang.org/x/lint v0.0.0-20200302205851-738671d3881b - golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5 + golang.org/x/net v0.0.0-20200822124328-c89045814202 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d - golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a // indirect - golang.org/x/sys v0.0.0-20200501052902-10377860bb8e // indirect - golang.org/x/text v0.3.2 - golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d - google.golang.org/api v0.22.0 - google.golang.org/appengine v1.6.6 // indirect - google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84 - google.golang.org/grpc v1.29.1 - honnef.co/go/tools v0.0.1-2020.1.3 + golang.org/x/text v0.3.3 + golang.org/x/tools v0.0.0-20200825202427-b303f430e36d + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + google.golang.org/api v0.30.0 + google.golang.org/genproto v0.0.0-20200825200019-8632dd797987 + google.golang.org/grpc v1.31.0 ) diff --git a/vendor/cloud.google.com/go/go.sum b/vendor/cloud.google.com/go/go.sum index adb5e6d2495..cac94f72837 100644 --- a/vendor/cloud.google.com/go/go.sum +++ b/vendor/cloud.google.com/go/go.sum @@ -8,12 +8,20 @@ cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go/bigquery v1.0.1 h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0 h1:sAbMqjY1PEQKZBWfbu6Y6bsupJ9c4QdHnzg/VvYTLcE= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0 h1:a/O/bK/vWrYGOTFtH8di4rBxMZnmkjy+Y5LxpDwo+dA= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= @@ -24,12 +32,18 @@ cloud.google.com/go/pubsub v1.1.0 h1:9/vpR43S4aJaROxqQHQ3nH9lfyKKV0dC3vOmnw8ebQQ cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0 h1:VV2nUM3wwLLGh9lSABFgZMjInyUbJeaRSE64WuAIQ+4= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0 h1:UDpwYIwla4jHGzZJaEJYx1tOejbgSoNqsAfHAUYe2r8= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0 h1:86K1Gel7BQ9/WmNWn7dTKMvTLFzwtBe5FNqYbi9X35g= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -62,8 +76,11 @@ github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0 h1:Rd1kQnQu0Hq3qvJppYSG0HtP+f5LPPUiDswTLiEegLg= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -71,12 +88,18 @@ github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= @@ -88,16 +111,25 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0 h1:pMen7vLs8nvgEYhywH3KDWJIJTeEr2ULsVWHWYHQyBs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57 h1:eqyIo2HjKhKe/mJzTG8n4VqvLXIOEG+SLdDqX7xGtkY= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f h1:Jnx61latede7zDD3DiiP4gmNz33uK0U5HDUaF0a/HVQ= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d h1:iaAPcMIY2f+gpk8tKf0BMW5sLrlhaASiYAnFmvVG5e0= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 h1:Ak8CrdlwwXwAZxzS66vgPt4U8yUZX7JwLvVR58FN5jM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -124,7 +156,9 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= @@ -132,10 +166,13 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4 h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -176,6 +213,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -189,14 +228,27 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5 h1:WQ8q63x+f/zpC8Ac1s9wLElVoHhm32p6tudrU72n1QA= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f h1:QBjCr1Fz5kw158VqdE9JfI9cJnl/ymnJWAdMuinqL7Y= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -216,6 +268,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4 golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -237,16 +291,27 @@ golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e h1:hq86ru83GdWTlfQFZGO4nZJTU4Bs2wfHl8oFHRaXsfc= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25 h1:OKbAoGs4fGM5cPLlVQLZGYkFC8OnOfgo6tt0Smf9XhM= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121 h1:rITEj+UZHYC927n8GT97eC3zrpzXdb/voyeOuVKS46o= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642 h1:B6caxRw+hozq68X2MY7jEpZh/cr4/aHLv9xU8Kkadrw= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= @@ -285,12 +350,27 @@ golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d h1:lzLdP95xJmMpwQ6LUHwrc5V7js93hTiY7gkznu0BgmY= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88 h1:4j84u0sokprDu3IdSYHJMmou+YSLflMz8p7yAx/QI4g= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d h1:szSOL78iTCl0LF1AMjhSWJj8tIM0KixlUUnBtYXsmd8= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d h1:W07d4xkoAUSNOkOzdzXCdFGxT7o2rW4q8M34tB2i//k= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0 h1:VGGbLNyPF7dvYHhcUGYBBGCRDDK0RRJAI6KCvo0CL+E= @@ -304,8 +384,17 @@ google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.17.0 h1:0q95w+VuFtv4PAx4PZVQdBMmYbaCHbnfKaEiDIcVyag= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0 h1:J1Pl9P2lnmYFSJvgs70DKELqHNh8CNWXPbud4njEE2s= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0 h1:cG03eaksBzhfSIk7JRGctfp3lanklcOM/mTGvow7BbQ= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0 h1:yfrXXP61wVuLb0vBcG6qaOoIoqYEzOQS8jum51jkv2w= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -339,8 +428,22 @@ google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84 h1:pSLkPbrjnPyLDYUO2VM9mDLqo2V6CFBY84lFSZAfoi4= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380 h1:xriR1EgvKfkKxIoU2uUvrMVl+H26359loFFUleSMXFo= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c h1:Lq4llNryJoaVFRmvrIwC/ZHH7tNt4tUYIu8+se2aayY= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987 h1:PDIOdWxZ8eRizhKa1AAvY53xsvLB1cWorMjslvY3VA8= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= @@ -354,14 +457,28 @@ google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0 h1:T7P4R73V3SSDPhH7WW7ATbfViLtmamH0DKrP3f9AuDI= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0 h1:cJv5/xdbk1NnMPR1VP9+HU6gupuG9MLBoH1r6RHZ2MY= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc h1:TnonUr8u3himcMY0vSh23jFOXA+cnucl1gB6EQTReBI= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -378,6 +495,7 @@ honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXe honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY= diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index 91b56ff5d29..536f89cb36e 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -1,4 +1,12 @@ { + "cloud.google.com/go/analytics/admin/apiv1alpha": { + "distribution_name": "cloud.google.com/go/analytics/admin/apiv1alpha", + "description": "", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/analytics/admin/apiv1alpha", + "release_level": "alpha" + }, "cloud.google.com/go/asset/apiv1": { "distribution_name": "cloud.google.com/go/asset/apiv1", "description": "Cloud Asset API", @@ -23,6 +31,14 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/asset/apiv1p2beta1", "release_level": "beta" }, + "cloud.google.com/go/asset/apiv1p5beta1": { + "distribution_name": "cloud.google.com/go/asset/apiv1p5beta1", + "description": "Cloud Asset API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/asset/apiv1p5beta1", + "release_level": "beta" + }, "cloud.google.com/go/automl/apiv1": { "distribution_name": "cloud.google.com/go/automl/apiv1", "description": "Cloud AutoML API", @@ -53,7 +69,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/connection/apiv1", - "release_level": "beta" + "release_level": "ga" }, "cloud.google.com/go/bigquery/connection/apiv1beta1": { "distribution_name": "cloud.google.com/go/bigquery/connection/apiv1beta1", @@ -133,6 +149,14 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/billing/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/billing/budgets/apiv1beta1": { + "distribution_name": "cloud.google.com/go/billing/budgets/apiv1beta1", + "description": "", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/billing/budgets/apiv1beta1", "release_level": "beta" }, "cloud.google.com/go/cloudbuild/apiv1/v2": { @@ -223,6 +247,14 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/datastore", "release_level": "ga" }, + "cloud.google.com/go/datastore/admin/apiv1": { + "distribution_name": "cloud.google.com/go/datastore/admin/apiv1", + "description": "Cloud Datastore API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/datastore/admin/apiv1", + "release_level": "alpha" + }, "cloud.google.com/go/debugger/apiv2": { "distribution_name": "cloud.google.com/go/debugger/apiv2", "description": "Stackdriver Debugger API", @@ -287,6 +319,30 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/firestore/apiv1/admin", "release_level": "ga" }, + "cloud.google.com/go/functions/apiv1": { + "distribution_name": "cloud.google.com/go/functions/apiv1", + "description": "", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/functions/apiv1", + "release_level": "beta" + }, + "cloud.google.com/go/gaming/apiv1": { + "distribution_name": "cloud.google.com/go/gaming/apiv1", + "description": "", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/gaming/apiv1", + "release_level": "beta" + }, + "cloud.google.com/go/gaming/apiv1beta": { + "distribution_name": "cloud.google.com/go/gaming/apiv1beta", + "description": "", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/gaming/apiv1beta", + "release_level": "beta" + }, "cloud.google.com/go/iam": { "distribution_name": "cloud.google.com/go/iam", "description": "Cloud IAM", @@ -311,14 +367,6 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/iot/apiv1", "release_level": "ga" }, - "cloud.google.com/go/irm/apiv1alpha2": { - "distribution_name": "cloud.google.com/go/irm/apiv1alpha2", - "description": "Stackdriver Incident Response \u0026 Management API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/irm/apiv1alpha2", - "release_level": "alpha" - }, "cloud.google.com/go/kms/apiv1": { "distribution_name": "cloud.google.com/go/kms/apiv1", "description": "Cloud Key Management Service (KMS) API", @@ -353,7 +401,7 @@ }, "cloud.google.com/go/logging/apiv2": { "distribution_name": "cloud.google.com/go/logging/apiv2", - "description": "Stackdriver Logging API", + "description": "Cloud Logging API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/logging/apiv2", @@ -375,21 +423,37 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/memcache/apiv1beta2", "release_level": "beta" }, - "cloud.google.com/go/monitoring/apiv3": { - "distribution_name": "cloud.google.com/go/monitoring/apiv3", + "cloud.google.com/go/monitoring/apiv3/v2": { + "distribution_name": "cloud.google.com/go/monitoring/apiv3/v2", "description": "Cloud Monitoring API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/monitoring/apiv3", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/monitoring/apiv3/v2", "release_level": "ga" }, + "cloud.google.com/go/monitoring/dashboard/apiv1": { + "distribution_name": "cloud.google.com/go/monitoring/dashboard/apiv1", + "description": "", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/monitoring/dashboard/apiv1", + "release_level": "beta" + }, + "cloud.google.com/go/notebooks/apiv1beta1": { + "distribution_name": "cloud.google.com/go/notebooks/apiv1beta1", + "description": "Notebooks API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/notebooks/apiv1beta1", + "release_level": "beta" + }, "cloud.google.com/go/osconfig/agentendpoint/apiv1": { "distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1", "description": "Cloud OS Config API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/osconfig/agentendpoint/apiv1", - "release_level": "beta" + "release_level": "ga" }, "cloud.google.com/go/osconfig/agentendpoint/apiv1beta": { "distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1beta", @@ -405,7 +469,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/osconfig/apiv1", - "release_level": "beta" + "release_level": "ga" }, "cloud.google.com/go/osconfig/apiv1beta": { "distribution_name": "cloud.google.com/go/osconfig/apiv1beta", @@ -439,6 +503,22 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/phishingprotection/apiv1beta1", "release_level": "beta" }, + "cloud.google.com/go/policytroubleshooter/apiv1": { + "distribution_name": "cloud.google.com/go/policytroubleshooter/apiv1", + "description": "Policy Troubleshooter API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/policytroubleshooter/apiv1", + "release_level": "beta" + }, + "cloud.google.com/go/profiler": { + "distribution_name": "cloud.google.com/go/profiler", + "description": "Cloud Profiler", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/profiler", + "release_level": "ga" + }, "cloud.google.com/go/pubsub": { "distribution_name": "cloud.google.com/go/pubsub", "description": "Cloud PubSub", @@ -455,6 +535,14 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/pubsub/apiv1", "release_level": "ga" }, + "cloud.google.com/go/pubsublite/apiv1": { + "distribution_name": "cloud.google.com/go/pubsublite/apiv1", + "description": "", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/pubsublite/apiv1", + "release_level": "beta" + }, "cloud.google.com/go/recaptchaenterprise/apiv1": { "distribution_name": "cloud.google.com/go/recaptchaenterprise/apiv1", "description": "reCAPTCHA Enterprise API", @@ -477,7 +565,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/recommender/apiv1", - "release_level": "alpha" + "release_level": "ga" }, "cloud.google.com/go/recommender/apiv1beta1": { "distribution_name": "cloud.google.com/go/recommender/apiv1beta1", @@ -533,7 +621,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/secretmanager/apiv1", - "release_level": "beta" + "release_level": "ga" }, "cloud.google.com/go/secretmanager/apiv1beta1": { "distribution_name": "cloud.google.com/go/secretmanager/apiv1beta1", @@ -545,7 +633,7 @@ }, "cloud.google.com/go/securitycenter/apiv1": { "distribution_name": "cloud.google.com/go/securitycenter/apiv1", - "description": "Cloud Security Command Center API", + "description": "Security Command Center API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1", @@ -553,7 +641,7 @@ }, "cloud.google.com/go/securitycenter/apiv1beta1": { "distribution_name": "cloud.google.com/go/securitycenter/apiv1beta1", - "description": "Cloud Security Command Center API", + "description": "Security Command Center API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1beta1", @@ -561,7 +649,7 @@ }, "cloud.google.com/go/securitycenter/apiv1p1beta1": { "distribution_name": "cloud.google.com/go/securitycenter/apiv1p1beta1", - "description": "Cloud Security Command Center API", + "description": "Security Command Center API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1p1beta1", @@ -725,7 +813,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/webrisk/apiv1", - "release_level": "beta" + "release_level": "ga" }, "cloud.google.com/go/webrisk/apiv1beta1": { "distribution_name": "cloud.google.com/go/webrisk/apiv1beta1", diff --git a/vendor/cloud.google.com/go/internal/version/version.go b/vendor/cloud.google.com/go/internal/version/version.go index a4f4049d6f9..f3dca3d0472 100644 --- a/vendor/cloud.google.com/go/internal/version/version.go +++ b/vendor/cloud.google.com/go/internal/version/version.go @@ -26,7 +26,7 @@ import ( // Repo is the current version of the client libraries in this // repo. It should be a date in YYYYMMDD format. -const Repo = "20200417" +const Repo = "20200817" // Go returns the Go runtime version. The returned string // has no whitespace. diff --git a/vendor/cloud.google.com/go/longrunning/autogen/doc.go b/vendor/cloud.google.com/go/longrunning/autogen/doc.go index 42a4a97740d..d394a758c59 100644 --- a/vendor/cloud.google.com/go/longrunning/autogen/doc.go +++ b/vendor/cloud.google.com/go/longrunning/autogen/doc.go @@ -28,7 +28,7 @@ // To close the open connection, use the Close() method. // // For information about setting deadlines, reusing contexts, and more -// please visit godoc.org/cloud.google.com/go. +// please visit pkg.go.dev/cloud.google.com/go. package longrunning // import "cloud.google.com/go/longrunning/autogen" import ( @@ -46,7 +46,7 @@ import ( type clientHookParams struct{} type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) -const versionClient = "20200501" +const versionClient = "20200825" func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { out, _ := metadata.FromOutgoingContext(ctx) diff --git a/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go index aaed25dc9e0..e2edca81916 100644 --- a/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go +++ b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go @@ -215,7 +215,7 @@ func (c *OperationsClient) ListOperations(ctx context.Context, req *longrunningp } it.Response = resp - return resp.Operations, resp.NextPageToken, nil + return resp.GetOperations(), resp.GetNextPageToken(), nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) @@ -226,8 +226,8 @@ func (c *OperationsClient) ListOperations(ctx context.Context, req *longrunningp return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.PageSize) - it.pageInfo.Token = req.PageToken + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() return it } diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/doc.go b/vendor/cloud.google.com/go/monitoring/apiv3/doc.go index 712ecf81e8d..52fa0ee0dbd 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/doc.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/doc.go @@ -34,6 +34,8 @@ // // For information about setting deadlines, reusing contexts, and more // please visit godoc.org/cloud.google.com/go. +// +// Deprecated: Please use cloud.google.com/go/monitoring/apiv3/v2. package monitoring // import "cloud.google.com/go/monitoring/apiv3" import ( diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md index 295f5f2269b..f6d57be5085 100644 --- a/vendor/cloud.google.com/go/storage/CHANGES.md +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -1,5 +1,21 @@ # Changes +## v1.10.0 +- Bump dependency on google.golang.org/api to capture changes to retry logic + which will make retries on writes more resilient. +- Improve documentation for Writer.ChunkSize. +- Fix a bug in lifecycle to allow callers to clear lifecycle rules on a bucket. + +## v1.9.0 +- Add retry for transient network errors on most operations (with the exception + of writes). +- Bump dependency for google.golang.org/api to capture a change in the default + HTTP transport which will improve performance for reads under heavy load. +- Add CRC32C checksum validation option to Composer. + +## v1.8.0 +- Add support for V4 signed post policies. + ## v1.7.0 - V4 signed URL support: - Add support for bucket-bound domains and virtual hosted style URLs. diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go index 389402f7300..478482645fa 100644 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -750,6 +750,7 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { } if ua.Lifecycle != nil { rb.Lifecycle = toRawLifecycle(*ua.Lifecycle) + rb.ForceSendFields = append(rb.ForceSendFields, "Lifecycle") } if ua.Logging != nil { if *ua.Logging == (BucketLogging{}) { @@ -936,7 +937,7 @@ func toCORS(rc []*raw.BucketCors) []CORS { func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle { var rl raw.BucketLifecycle if len(l.Rules) == 0 { - return nil + rl.ForceSendFields = []string{"Rule"} } for _, r := range l.Rules { rr := &raw.BucketLifecycleRule{ diff --git a/vendor/cloud.google.com/go/storage/copy.go b/vendor/cloud.google.com/go/storage/copy.go index 52162e72d10..61983df5ada 100644 --- a/vendor/cloud.google.com/go/storage/copy.go +++ b/vendor/cloud.google.com/go/storage/copy.go @@ -166,6 +166,13 @@ type Composer struct { // or zero-valued attributes are ignored. ObjectAttrs + // SendCRC specifies whether to transmit a CRC32C field. It should be set + // to true in addition to setting the Composer's CRC32C field, because zero + // is a valid CRC and normally a zero would not be transmitted. + // If a CRC32C is sent, and the data in the destination object does not match + // the checksum, the compose will be rejected. + SendCRC32C bool + dst *ObjectHandle srcs []*ObjectHandle } @@ -186,6 +193,9 @@ func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { // Compose requires a non-empty Destination, so we always set it, // even if the caller-provided ObjectAttrs is the zero value. req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket) + if c.SendCRC32C { + req.Destination.Crc32c = encodeUint32(c.ObjectAttrs.CRC32C) + } for _, src := range c.srcs { if err := src.validate(); err != nil { return nil, err diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go index 5f9a62b3385..614ea11a590 100644 --- a/vendor/cloud.google.com/go/storage/doc.go +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -191,6 +191,21 @@ SignedURL for details. } fmt.Println(url) +Post Policy V4 Signed Request + +A type of signed request that allows uploads through HTML forms directly to Cloud Storage with +temporary permission. Conditions can be applied to restrict how the HTML form is used and exercised +by a user. + +For more information, please see https://cloud.google.com/storage/docs/xml-api/post-object as well +as the documentation of GenerateSignedPostPolicyV4. + + pv4, err := storage.GenerateSignedPostPolicyV4(bucketName, objectName, opts) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields) + Errors Errors returned by this client are often of the type [`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error). diff --git a/vendor/cloud.google.com/go/storage/go.mod b/vendor/cloud.google.com/go/storage/go.mod index 5940923fdf1..2eb6df3cbde 100644 --- a/vendor/cloud.google.com/go/storage/go.mod +++ b/vendor/cloud.google.com/go/storage/go.mod @@ -3,17 +3,16 @@ module cloud.google.com/go/storage go 1.11 require ( - cloud.google.com/go v0.56.0 - cloud.google.com/go/bigquery v1.6.0 // indirect - github.com/golang/protobuf v1.4.0 - github.com/google/go-cmp v0.4.0 + cloud.google.com/go v0.57.0 + cloud.google.com/go/bigquery v1.8.0 // indirect + github.com/golang/protobuf v1.4.2 + github.com/google/go-cmp v0.4.1 github.com/googleapis/gax-go/v2 v2.0.5 - golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5 // indirect + golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2 // indirect golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d - golang.org/x/sys v0.0.0-20200501052902-10377860bb8e // indirect - golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d // indirect - google.golang.org/api v0.22.0 - google.golang.org/appengine v1.6.6 // indirect - google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84 + golang.org/x/sys v0.0.0-20200523222454-059865788121 // indirect + golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2 // indirect + google.golang.org/api v0.28.0 + google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790 google.golang.org/grpc v1.29.1 ) diff --git a/vendor/cloud.google.com/go/storage/go.sum b/vendor/cloud.google.com/go/storage/go.sum index 13c75b4bd96..5d3fca5f832 100644 --- a/vendor/cloud.google.com/go/storage/go.sum +++ b/vendor/cloud.google.com/go/storage/go.sum @@ -15,6 +15,8 @@ cloud.google.com/go v0.54.0 h1:3ithwDMr7/3vpAMXiH+ZQnYbuIsh+OPhUPMFC9enmn0= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go v0.56.0 h1:WRz29PgAsVEyPSDHyk+0fpEkwEFyfhHn+JbksT6gIL4= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0 h1:EpMNVUorLiZIELdMZbCYX/ByTFCdoYopYAGxaGVz9ms= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go/bigquery v1.0.1 h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0 h1:sAbMqjY1PEQKZBWfbu6Y6bsupJ9c4QdHnzg/VvYTLcE= @@ -23,8 +25,10 @@ cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hM cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0 h1:K2NyuHRuv15ku6eUpe0DQk5ZykPMnSOnvuVf6IHcjaE= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.6.0 h1:ajp/DjpiCHO71SyIhwb83YsUGAyWuzVvMko+9xCsJLw= -cloud.google.com/go/bigquery v1.6.0/go.mod h1:hyFDG0qSGdHNz8Q6nDN8rYIkld0q/+5uBZaelxiDLfE= +cloud.google.com/go/bigquery v1.7.0 h1:a/O/bK/vWrYGOTFtH8di4rBxMZnmkjy+Y5LxpDwo+dA= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0 h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= @@ -40,6 +44,7 @@ cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjp cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -87,6 +92,10 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -95,6 +104,8 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1 h1:/exdXoGamhu5ONeUJH0deniYLWYvQwW66yvlfiiKTu0= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -102,6 +113,7 @@ github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OI github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= @@ -176,6 +188,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -202,6 +216,10 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7 golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5 h1:WQ8q63x+f/zpC8Ac1s9wLElVoHhm32p6tudrU72n1QA= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2 h1:eDrdRpKgkcCqKZQwyZRyeFZgfqt37SL7Kv3tok06cKE= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= @@ -248,10 +266,12 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20u golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d h1:nc5K6ox/4lTFbMVSL9WRR81ixkcwXThoiF6yf+R9scA= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200409092240-59c9f1ba88fa h1:mQTN3ECqfsViCNBgq+A40vdwhkGykrrQlYe3mPj6BoU= -golang.org/x/sys v0.0.0-20200409092240-59c9f1ba88fa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e h1:hq86ru83GdWTlfQFZGO4nZJTU4Bs2wfHl8oFHRaXsfc= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121 h1:rITEj+UZHYC927n8GT97eC3zrpzXdb/voyeOuVKS46o= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -302,10 +322,12 @@ golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d h1:3K34ovZAOnVaUPxanr0j4gh golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4 h1:kDtqNkeBrZb8B+atrj50B5XLHpzXXqcCdZPP/ApQ5NY= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200409170454-77362c5149f0 h1:Vj4uPv+FWfJqeeBexROGL+6fhy0yL5JgwKU5B54Cu7Y= -golang.org/x/tools v0.0.0-20200409170454-77362c5149f0/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d h1:lzLdP95xJmMpwQ6LUHwrc5V7js93hTiY7gkznu0BgmY= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2 h1:FD4wDsP+CQUqh2V12OBOt90pLHVToe58P++fUu3ggV4= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= @@ -326,10 +348,12 @@ google.golang.org/api v0.19.0 h1:GwFK8+l5/gdsOYKz5p6M4UK+QT8OvmHWZPJCnf+5DjA= google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0 h1:jz2KixHX7EcCPiQrySzPdnYT7DbINAypCqKZ1Z7GM40= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.21.0 h1:zS+Q/CJJnVlXpXQVIz+lH0ZT2lBuT2ac7XD8Y/3w6hY= -google.golang.org/api v0.21.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0 h1:J1Pl9P2lnmYFSJvgs70DKELqHNh8CNWXPbud4njEE2s= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0 h1:cG03eaksBzhfSIk7JRGctfp3lanklcOM/mTGvow7BbQ= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0 h1:jMF5hhVfMkTZwHW1SDpKq5CkgWLXOb31Foaca9Zr3oM= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -369,10 +393,14 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672 h1:jiDSspVssiikoRP google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940 h1:MRHtG0U6SnaUb+s+LhNE1qt1FQ1wlhqr5E4usBKC0uA= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200409111301-baae70f3302d h1:I7Vuu5Ejagca+VcgfBINHke3xwjCTYnIG4Q57fv0wYY= -google.golang.org/genproto v0.0.0-20200409111301-baae70f3302d/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84 h1:pSLkPbrjnPyLDYUO2VM9mDLqo2V6CFBY84lFSZAfoi4= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790 h1:FGjyjrQGURdc98leD1P65IdQD9Zlr4McvRcqIlV6OSs= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= @@ -387,8 +415,6 @@ google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.28.1 h1:C1QC6KzgSiLyBabDi87BbjaGreoRgGUF5nOyvfrAZ1k= -google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -397,6 +423,14 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0 h1:cJv5/xdbk1NnMPR1VP9+HU6gupuG9MLBoH1r6RHZ2MY= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc h1:TnonUr8u3himcMY0vSh23jFOXA+cnucl1gB6EQTReBI= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= @@ -409,6 +443,8 @@ honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXe honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/vendor/cloud.google.com/go/storage/go110.go b/vendor/cloud.google.com/go/storage/go110.go index 206813f0cea..c1273d59ade 100644 --- a/vendor/cloud.google.com/go/storage/go110.go +++ b/vendor/cloud.google.com/go/storage/go110.go @@ -16,7 +16,12 @@ package storage -import "google.golang.org/api/googleapi" +import ( + "net/url" + "strings" + + "google.golang.org/api/googleapi" +) func shouldRetry(err error) bool { switch e := err.(type) { @@ -24,6 +29,17 @@ func shouldRetry(err error) bool { // Retry on 429 and 5xx, according to // https://cloud.google.com/storage/docs/exponential-backoff. return e.Code == 429 || (e.Code >= 500 && e.Code < 600) + case *url.Error: + // Retry socket-level errors ECONNREFUSED and ENETUNREACH (from syscall). + // Unfortunately the error type is unexported, so we resort to string + // matching. + retriable := []string{"connection refused", "connection reset"} + for _, s := range retriable { + if strings.Contains(e.Error(), s) { + return true + } + } + return false case interface{ Temporary() bool }: return e.Temporary() default: diff --git a/vendor/cloud.google.com/go/storage/post_policy_v4.go b/vendor/cloud.google.com/go/storage/post_policy_v4.go new file mode 100644 index 00000000000..b9df7db9581 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/post_policy_v4.go @@ -0,0 +1,377 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/url" + "strings" + "time" +) + +// PostPolicyV4Options are used to construct a signed post policy. +// Please see https://cloud.google.com/storage/docs/xml-api/post-object +// for reference about the fields. +type PostPolicyV4Options struct { + // GoogleAccessID represents the authorizer of the signed URL generation. + // It is typically the Google service account client email address from + // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". + // Required. + GoogleAccessID string + + // PrivateKey is the Google service account private key. It is obtainable + // from the Google Developers Console. + // At https://console.developers.google.com/project//apiui/credential, + // create a service account client ID or reuse one of your existing service account + // credentials. Click on the "Generate new P12 key" to generate and download + // a new private key. Once you download the P12 file, use the following command + // to convert it into a PEM file. + // + // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes + // + // Provide the contents of the PEM file as a byte slice. + // Exactly one of PrivateKey or SignBytes must be non-nil. + PrivateKey []byte + + // SignBytes is a function for implementing custom signing. For example, if + // your application is running on Google App Engine, you can use + // appengine's internal signing function: + // ctx := appengine.NewContext(request) + // acc, _ := appengine.ServiceAccount(ctx) + // url, err := SignedURL("bucket", "object", &SignedURLOptions{ + // GoogleAccessID: acc, + // SignBytes: func(b []byte) ([]byte, error) { + // _, signedBytes, err := appengine.SignBytes(ctx, b) + // return signedBytes, err + // }, + // // etc. + // }) + // + // Exactly one of PrivateKey or SignBytes must be non-nil. + SignBytes func(hashBytes []byte) (signature []byte, err error) + + // Expires is the expiration time on the signed URL. + // It must be a time in the future. + // Required. + Expires time.Time + + // Style provides options for the type of URL to use. Options are + // PathStyle (default), BucketBoundHostname, and VirtualHostedStyle. See + // https://cloud.google.com/storage/docs/request-endpoints for details. + // Optional. + Style URLStyle + + // Insecure when set indicates that the generated URL's scheme + // will use "http" instead of "https" (default). + // Optional. + Insecure bool + + // Fields specifies the attributes of a PostPolicyV4 request. + // When Fields is non-nil, its attributes must match those that will + // passed into field Conditions. + // Optional. + Fields *PolicyV4Fields + + // The conditions that the uploaded file will be expected to conform to. + // When used, the failure of an upload to satisfy a condition will result in + // a 4XX status code, back with the message describing the problem. + // Optional. + Conditions []PostPolicyV4Condition +} + +// PolicyV4Fields describes the attributes for a PostPolicyV4 request. +type PolicyV4Fields struct { + // ACL specifies the access control permissions for the object. + // Optional. + ACL string + // CacheControl specifies the caching directives for the object. + // Optional. + CacheControl string + // ContentType specifies the media type of the object. + // Optional. + ContentType string + // ContentDisposition specifies how the file will be served back to requesters. + // Optional. + ContentDisposition string + // ContentEncoding specifies the decompressive transcoding that the object. + // This field is complementary to ContentType in that the file could be + // compressed but ContentType specifies the file's original media type. + // Optional. + ContentEncoding string + // Metadata specifies custom metadata for the object. + // If any key doesn't begin with "x-goog-meta-", an error will be returned. + // Optional. + Metadata map[string]string + // StatusCodeOnSuccess when set, specifies the status code that Cloud Storage + // will serve back on successful upload of the object. + // Optional. + StatusCodeOnSuccess int + // RedirectToURLOnSuccess when set, specifies the URL that Cloud Storage + // will serve back on successful upload of the object. + // Optional. + RedirectToURLOnSuccess string +} + +// PostPolicyV4 describes the URL and respective form fields for a generated PostPolicyV4 request. +type PostPolicyV4 struct { + // URL is the generated URL that the file upload will be made to. + URL string + // Fields specifies the generated key-values that the file uploader + // must include in their multipart upload form. + Fields map[string]string +} + +// PostPolicyV4Condition describes the constraints that the subsequent +// object upload's multipart form fields will be expected to conform to. +type PostPolicyV4Condition interface { + isEmpty() bool + json.Marshaler +} + +type startsWith struct { + key, value string +} + +func (sw *startsWith) MarshalJSON() ([]byte, error) { + return json.Marshal([]string{"starts-with", sw.key, sw.value}) +} +func (sw *startsWith) isEmpty() bool { + return sw.value == "" +} + +// ConditionStartsWith checks that an attributes starts with value. +// An empty value will cause this condition to be ignored. +func ConditionStartsWith(key, value string) PostPolicyV4Condition { + return &startsWith{key, value} +} + +type contentLengthRangeCondition struct { + start, end uint64 +} + +func (clr *contentLengthRangeCondition) MarshalJSON() ([]byte, error) { + return json.Marshal([]interface{}{"content-length-range", clr.start, clr.end}) +} +func (clr *contentLengthRangeCondition) isEmpty() bool { + return clr.start == 0 && clr.end == 0 +} + +type singleValueCondition struct { + name, value string +} + +func (svc *singleValueCondition) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]string{svc.name: svc.value}) +} +func (svc *singleValueCondition) isEmpty() bool { + return svc.value == "" +} + +// ConditionContentLengthRange constraints the limits that the +// multipart upload's range header will be expected to be within. +func ConditionContentLengthRange(start, end uint64) PostPolicyV4Condition { + return &contentLengthRangeCondition{start, end} +} + +func conditionRedirectToURLOnSuccess(redirectURL string) PostPolicyV4Condition { + return &singleValueCondition{"success_action_redirect", redirectURL} +} + +func conditionStatusCodeOnSuccess(statusCode int) PostPolicyV4Condition { + svc := &singleValueCondition{name: "success_action_status"} + if statusCode > 0 { + svc.value = fmt.Sprintf("%d", statusCode) + } + return svc +} + +// GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts. +// The generated URL and fields will then allow an unauthenticated client to perform multipart uploads. +func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) { + if bucket == "" { + return nil, errors.New("storage: bucket must be non-empty") + } + if object == "" { + return nil, errors.New("storage: object must be non-empty") + } + now := utcNow() + if err := validatePostPolicyV4Options(opts, now); err != nil { + return nil, err + } + + var signingFn func(hashedBytes []byte) ([]byte, error) + switch { + case opts.SignBytes != nil: + signingFn = opts.SignBytes + + case len(opts.PrivateKey) != 0: + parsedRSAPrivKey, err := parseKey(opts.PrivateKey) + if err != nil { + return nil, err + } + signingFn = func(hashedBytes []byte) ([]byte, error) { + return rsa.SignPKCS1v15(rand.Reader, parsedRSAPrivKey, crypto.SHA256, hashedBytes) + } + + default: + return nil, errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") + } + + var descFields PolicyV4Fields + if opts.Fields != nil { + descFields = *opts.Fields + } + + if err := validateMetadata(descFields.Metadata); err != nil { + return nil, err + } + + // Build the policy. + conds := make([]PostPolicyV4Condition, len(opts.Conditions)) + copy(conds, opts.Conditions) + conds = append(conds, + conditionRedirectToURLOnSuccess(descFields.RedirectToURLOnSuccess), + conditionStatusCodeOnSuccess(descFields.StatusCodeOnSuccess), + &singleValueCondition{"acl", descFields.ACL}, + &singleValueCondition{"cache-control", descFields.CacheControl}, + ) + + YYYYMMDD := now.Format(yearMonthDay) + policyFields := map[string]string{ + "key": object, + "x-goog-date": now.Format(iso8601), + "x-goog-credential": opts.GoogleAccessID + "/" + YYYYMMDD + "/auto/storage/goog4_request", + "x-goog-algorithm": "GOOG4-RSA-SHA256", + "success_action_redirect": descFields.RedirectToURLOnSuccess, + "acl": descFields.ACL, + } + for key, value := range descFields.Metadata { + conds = append(conds, &singleValueCondition{key, value}) + policyFields[key] = value + } + + // Following from the order expected by the conformance test cases, + // hence manually inserting these fields in a specific order. + conds = append(conds, + &singleValueCondition{"bucket", bucket}, + &singleValueCondition{"key", object}, + &singleValueCondition{"x-goog-date", now.Format(iso8601)}, + &singleValueCondition{ + name: "x-goog-credential", + value: opts.GoogleAccessID + "/" + YYYYMMDD + "/auto/storage/goog4_request", + }, + &singleValueCondition{"x-goog-algorithm", "GOOG4-RSA-SHA256"}, + ) + + nonEmptyConds := make([]PostPolicyV4Condition, 0, len(opts.Conditions)) + for _, cond := range conds { + if cond == nil || !cond.isEmpty() { + nonEmptyConds = append(nonEmptyConds, cond) + } + } + condsAsJSON, err := json.Marshal(map[string]interface{}{ + "conditions": nonEmptyConds, + "expiration": opts.Expires.Format(time.RFC3339), + }) + if err != nil { + return nil, fmt.Errorf("storage: PostPolicyV4 JSON serialization failed: %v", err) + } + + b64Policy := base64.StdEncoding.EncodeToString(condsAsJSON) + shaSum := sha256.Sum256([]byte(b64Policy)) + signature, err := signingFn(shaSum[:]) + if err != nil { + return nil, err + } + + policyFields["policy"] = b64Policy + policyFields["x-goog-signature"] = fmt.Sprintf("%x", signature) + + // Construct the URL. + scheme := "https" + if opts.Insecure { + scheme = "http" + } + path := opts.Style.path(bucket, "") + "/" + u := &url.URL{ + Path: path, + RawPath: pathEncodeV4(path), + Host: opts.Style.host(bucket), + Scheme: scheme, + } + + if descFields.StatusCodeOnSuccess > 0 { + policyFields["success_action_status"] = fmt.Sprintf("%d", descFields.StatusCodeOnSuccess) + } + + // Clear out fields with blanks values. + for key, value := range policyFields { + if value == "" { + delete(policyFields, key) + } + } + pp4 := &PostPolicyV4{ + Fields: policyFields, + URL: u.String(), + } + return pp4, nil +} + +// validatePostPolicyV4Options checks that: +// * GoogleAccessID is set +// * either but not both PrivateKey and SignBytes are set or nil, but not both +// * Expires, the deadline is not in the past +// * if Style is not set, it'll use PathStyle +func validatePostPolicyV4Options(opts *PostPolicyV4Options, now time.Time) error { + if opts == nil || opts.GoogleAccessID == "" { + return errors.New("storage: missing required GoogleAccessID") + } + if privBlank, signBlank := len(opts.PrivateKey) == 0, opts.SignBytes == nil; privBlank == signBlank { + return errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") + } + if opts.Expires.Before(now) { + return errors.New("storage: expecting Expires to be in the future") + } + if opts.Style == nil { + opts.Style = PathStyle() + } + return nil +} + +// validateMetadata ensures that all keys passed in have a prefix of "x-goog-meta-", +// otherwise it will return an error. +func validateMetadata(hdrs map[string]string) (err error) { + if len(hdrs) == 0 { + return nil + } + + badKeys := make([]string, 0, len(hdrs)) + for key := range hdrs { + if !strings.HasPrefix(key, "x-goog-meta-") { + badKeys = append(badKeys, key) + } + } + if len(badKeys) != 0 { + err = errors.New("storage: expected metadata to begin with x-goog-meta-, got " + strings.Join(badKeys, ", ")) + } + return +} diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index a065d6f6faa..20d9518a42d 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -1130,11 +1130,11 @@ type ObjectAttrs struct { // data is rejected if its MD5 hash does not match this field. MD5 []byte - // CRC32C is the CRC32 checksum of the object's content using - // the Castagnoli93 polynomial. This field is read-only, except when - // used from a Writer. If set on a Writer and Writer.SendCRC32C - // is true, the uploaded data is rejected if its CRC32c hash does not - // match this field. + // CRC32C is the CRC32 checksum of the object's content using the Castagnoli93 + // polynomial. This field is read-only, except when used from a Writer or + // Composer. In those cases, if the SendCRC32C field in the Writer or Composer + // is set to is true, the uploaded data is rejected if its CRC32C hash does + // not match this field. CRC32C uint32 // MediaLink is an URL to the object's content. This field is read-only. @@ -1350,7 +1350,7 @@ func (q *Query) SetAttrSelection(attrs []string) error { } if len(fieldSet) > 0 { - var b strings.Builder + var b bytes.Buffer b.WriteString("items(") first := true for field := range fieldSet { diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go index 75a3575f6da..1843a814155 100644 --- a/vendor/cloud.google.com/go/storage/writer.go +++ b/vendor/cloud.google.com/go/storage/writer.go @@ -45,12 +45,20 @@ type Writer struct { // Writer will attempt to send to the server in a single request. Objects // smaller than the size will be sent in a single request, while larger // objects will be split over multiple requests. The size will be rounded up - // to the nearest multiple of 256K. If zero, chunking will be disabled and - // the object will be uploaded in a single request. + // to the nearest multiple of 256K. // - // ChunkSize will default to a reasonable value. If you perform many concurrent - // writes of small objects, you may wish set ChunkSize to a value that matches - // your objects' sizes to avoid consuming large amounts of memory. + // ChunkSize will default to a reasonable value. If you perform many + // concurrent writes of small objects (under ~8MB), you may wish set ChunkSize + // to a value that matches your objects' sizes to avoid consuming large + // amounts of memory. See + // https://cloud.google.com/storage/docs/json_api/v1/how-tos/upload#size + // for more information about performance trade-offs related to ChunkSize. + // + // If ChunkSize is set to zero, chunking will be disabled and the object will + // be uploaded in a single request without the use of a buffer. This will + // further reduce memory used during uploads, but will also prevent the writer + // from retrying in case of a transient error from the server, since a buffer + // is required in order to retry the failed request. // // ChunkSize must be set before the first Write call. ChunkSize int @@ -150,14 +158,10 @@ func (w *Writer) open() error { } setClientHeader(call.Header()) - // The internals that perform call.Do automatically retry - // uploading chunks, hence no need to add retries here. - // See issue https://github.com/googleapis/google-cloud-go/issues/1507. - // - // However, since this whole call's internals involve making the initial - // resumable upload session, the first HTTP request is not retried. - // TODO: Follow-up with google.golang.org/gensupport to solve - // https://github.com/googleapis/google-api-go-client/issues/392. + // The internals that perform call.Do automatically retry both the initial + // call to set up the upload as well as calls to upload individual chunks + // for a resumable upload (as long as the chunk size is non-zero). Hence + // there is no need to add retries here. resp, err = call.Do() } if err != nil { @@ -178,6 +182,9 @@ func (w *Writer) open() error { // error even though the write failed (or will fail). Always // use the error returned from Writer.Close to determine if // the upload was successful. +// +// Writes will be retried on transient errors from the server, unless +// Writer.ChunkSize has been set to zero. func (w *Writer) Write(p []byte) (n int, err error) { w.mu.Lock() werr := w.err diff --git a/vendor/cloud.google.com/go/tools.go b/vendor/cloud.google.com/go/tools.go index 5c91f8a3857..da5ca585d48 100644 --- a/vendor/cloud.google.com/go/tools.go +++ b/vendor/cloud.google.com/go/tools.go @@ -28,5 +28,4 @@ import ( _ "github.com/jstemmer/go-junit-report" _ "golang.org/x/lint/golint" _ "golang.org/x/tools/cmd/goimports" - _ "honnef.co/go/tools/cmd/staticcheck" ) diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/LICENSE b/vendor/github.com/equinix/terraform-provider-equinix-metal/LICENSE new file mode 100644 index 00000000000..a612ad9813b --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/config.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/config.go new file mode 100644 index 00000000000..7b67a66b9c3 --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/config.go @@ -0,0 +1,62 @@ +package packet + +import ( + "context" + "crypto/x509" + "net/http" + "net/url" + "regexp" + "time" + + "github.com/hashicorp/go-retryablehttp" + "github.com/hashicorp/terraform-plugin-sdk/helper/logging" + "github.com/packethost/packngo" +) + +const ( + consumerToken = "aZ9GmqHTPtxevvFq9SK3Pi2yr9YCbRzduCSXF2SNem5sjB91mDq7Th3ZwTtRqMWZ" +) + +type Config struct { + AuthToken string +} + +var redirectsErrorRe = regexp.MustCompile(`stopped after \d+ redirects\z`) + +func PacketRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { + if ctx.Err() != nil { + return false, ctx.Err() + } + + if err != nil { + if v, ok := err.(*url.Error); ok { + // Don't retry if the error was due to too many redirects. + if redirectsErrorRe.MatchString(v.Error()) { + return false, nil + } + + // Don't retry if the error was due to TLS cert verification failure. + if _, ok := v.Err.(x509.UnknownAuthorityError); ok { + return false, nil + } + } + + // The error is likely recoverable so retry. + return true, nil + } + return false, nil +} + +// Client returns a new client for accessing Equinix Metal's API. +func (c *Config) Client() *packngo.Client { + httpClient := retryablehttp.NewClient() + httpClient.RetryWaitMin = time.Second + httpClient.RetryWaitMax = 30 * time.Second + httpClient.RetryMax = 10 + httpClient.CheckRetry = PacketRetryPolicy + httpClient.HTTPClient.Transport = logging.NewTransport( + "Equinix Metal", + httpClient.HTTPClient.Transport) + + return packngo.NewClientWithAuth(consumerToken, c.AuthToken, httpClient) +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_device.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_device.go new file mode 100644 index 00000000000..a8e5ad2accd --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_device.go @@ -0,0 +1,283 @@ +package packet + +import ( + "encoding/json" + "fmt" + "path" + "path/filepath" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" + "github.com/packethost/packngo" +) + +func dataSourcePacketDevice() *schema.Resource { + return &schema.Resource{ + Read: dataSourcePacketDeviceRead, + Schema: map[string]*schema.Schema{ + "hostname": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"device_id"}, + }, + "project_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"device_id"}, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "device_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"project_id", "hostname"}, + }, + "facility": { + Type: schema.TypeString, + Computed: true, + }, + "plan": { + Type: schema.TypeString, + Computed: true, + }, + "operating_system": { + Type: schema.TypeString, + Computed: true, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "billing_cycle": { + Type: schema.TypeString, + Computed: true, + }, + "access_public_ipv6": { + Type: schema.TypeString, + Computed: true, + }, + + "access_public_ipv4": { + Type: schema.TypeString, + Computed: true, + }, + "access_private_ipv4": { + Type: schema.TypeString, + Computed: true, + }, + "tags": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "ssh_key_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "network_type": { + Type: schema.TypeString, + Computed: true, + }, + "hardware_reservation_id": { + Type: schema.TypeString, + Computed: true, + }, + "storage": { + Type: schema.TypeString, + StateFunc: func(v interface{}) string { + s, _ := structure.NormalizeJsonString(v) + return s + }, + Computed: true, + }, + "root_password": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "always_pxe": { + Type: schema.TypeBool, + Computed: true, + }, + "ipxe_script_url": { + Type: schema.TypeString, + Computed: true, + }, + "network": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address": { + Type: schema.TypeString, + Computed: true, + }, + "gateway": { + Type: schema.TypeString, + Computed: true, + }, + "family": { + Type: schema.TypeInt, + Computed: true, + }, + "cidr": { + Type: schema.TypeInt, + Computed: true, + }, + "public": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "ports": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "mac": { + Type: schema.TypeString, + Computed: true, + }, + "bonded": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourcePacketDeviceRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + hostnameRaw, hostnameOK := d.GetOk("hostname") + projectIdRaw, projectIdOK := d.GetOk("project_id") + deviceIdRaw, deviceIdOK := d.GetOk("device_id") + + if !deviceIdOK && !hostnameOK { + return fmt.Errorf("You must supply device_id or hostname") + } + var device *packngo.Device + if hostnameOK { + if !projectIdOK { + return fmt.Errorf("If you lookup via hostname, you must supply project_id") + } + hostname := hostnameRaw.(string) + projectId := projectIdRaw.(string) + + ds, _, err := client.Devices.List(projectId, &packngo.ListOptions{Search: hostname}) + if err != nil { + return err + } + + device, err = findDeviceByHostname(ds, hostname) + if err != nil { + return err + } + } else { + deviceId := deviceIdRaw.(string) + var err error + device, _, err = client.Devices.Get(deviceId, nil) + if err != nil { + return err + } + } + + d.Set("hostname", device.Hostname) + d.Set("project_id", device.Project.ID) + d.Set("device_id", device.ID) + d.Set("plan", device.Plan.Slug) + d.Set("facility", device.Facility.Code) + d.Set("operating_system", device.OS.Slug) + d.Set("state", device.State) + d.Set("billing_cycle", device.BillingCycle) + d.Set("ipxe_script_url", device.IPXEScriptURL) + d.Set("always_pxe", device.AlwaysPXE) + d.Set("root_password", device.RootPassword) + if device.Storage != nil { + rawStorageBytes, err := json.Marshal(device.Storage) + if err != nil { + return fmt.Errorf("[ERR] Error getting storage JSON string for device (%s): %s", d.Id(), err) + } + + storageString, err := structure.NormalizeJsonString(string(rawStorageBytes)) + if err != nil { + return fmt.Errorf("[ERR] Error normalizing storage JSON string for device (%s): %s", d.Id(), err) + } + d.Set("storage", storageString) + } + + if len(device.HardwareReservation.Href) > 0 { + d.Set("hardware_reservation_id", path.Base(device.HardwareReservation.Href)) + } + networkType := device.GetNetworkType() + + d.Set("network_type", networkType) + + d.Set("tags", device.Tags) + + keyIDs := []string{} + for _, k := range device.SSHKeys { + keyIDs = append(keyIDs, filepath.Base(k.URL)) + } + d.Set("ssh_key_ids", keyIDs) + networkInfo := getNetworkInfo(device.Network) + + sort.SliceStable(networkInfo.Networks, func(i, j int) bool { + famI := networkInfo.Networks[i]["family"].(int) + famJ := networkInfo.Networks[j]["family"].(int) + pubI := networkInfo.Networks[i]["public"].(bool) + pubJ := networkInfo.Networks[j]["public"].(bool) + return getNetworkRank(famI, pubI) < getNetworkRank(famJ, pubJ) + }) + + d.Set("network", networkInfo.Networks) + d.Set("access_public_ipv4", networkInfo.PublicIPv4) + d.Set("access_private_ipv4", networkInfo.PrivateIPv4) + d.Set("access_public_ipv6", networkInfo.PublicIPv6) + + ports := getPorts(device.NetworkPorts) + d.Set("ports", ports) + + d.SetId(device.ID) + return nil +} + +func findDeviceByHostname(devices []packngo.Device, hostname string) (*packngo.Device, error) { + results := make([]packngo.Device, 0) + for _, d := range devices { + if d.Hostname == hostname { + results = append(results, d) + } + } + if len(results) == 1 { + return &results[0], nil + } + if len(results) == 0 { + return nil, fmt.Errorf("no device found with hostname %s", hostname) + } + return nil, fmt.Errorf("too many devices found with hostname %s (found %d, expected 1)", hostname, len(results)) +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_device_bgp_neighbors.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_device_bgp_neighbors.go new file mode 100644 index 00000000000..f6707803752 --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_device_bgp_neighbors.go @@ -0,0 +1,132 @@ +package packet + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/packethost/packngo" +) + +func bgpNeighborSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address_family": { + Type: schema.TypeInt, + Computed: true, + }, + "customer_as": { + Type: schema.TypeInt, + Computed: true, + }, + "customer_ip": { + Type: schema.TypeString, + Computed: true, + }, + "md5_enabled": { + Type: schema.TypeBool, + Computed: true, + }, + "md5_password": { + Type: schema.TypeString, + Computed: true, + }, + "multihop": { + Type: schema.TypeBool, + Computed: true, + }, + "peer_as": { + Type: schema.TypeInt, + Computed: true, + }, + "peer_ips": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "routes_in": { + Type: schema.TypeList, + Computed: true, + Elem: bgpRouteSchema(), + }, + "routes_out": { + Type: schema.TypeList, + Computed: true, + Elem: bgpRouteSchema(), + }, + }, + } +} + +func bgpRouteSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "route": { + Type: schema.TypeString, + Computed: true, + }, + "exact": { + Type: schema.TypeBool, + Computed: true, + }, + }, + } +} + +func dataSourcePacketDeviceBGPNeighbors() *schema.Resource { + return &schema.Resource{ + Read: dataSourcePacketDeviceBGPNeighborsRead, + Schema: map[string]*schema.Schema{ + "device_id": { + Type: schema.TypeString, + Required: true, + }, + "bgp_neighbors": { + Type: schema.TypeList, + Computed: true, + Elem: bgpNeighborSchema(), + }, + }, + } +} + +func dataSourcePacketDeviceBGPNeighborsRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + deviceID := d.Get("device_id").(string) + + bgpNeighborsRaw, _, err := client.Devices.ListBGPNeighbors(deviceID, nil) + if err != nil { + return err + } + + d.Set("bgp_neighbors", getBgpNeighbors(bgpNeighborsRaw)) + d.SetId(deviceID) + return nil +} + +func getRoutesSlice(routes []packngo.BGPRoute) []map[string]interface{} { + ret := []map[string]interface{}{} + for _, r := range routes { + ret = append(ret, map[string]interface{}{ + "route": r.Route, "exact": r.Exact, + }) + } + return ret +} + +func getBgpNeighbors(ns []packngo.BGPNeighbor) []map[string]interface{} { + ret := make([]map[string]interface{}, 0, 1) + for _, n := range ns { + neighbor := map[string]interface{}{ + "address_family": n.AddressFamily, + "customer_as": n.CustomerAs, + "customer_ip": n.CustomerIP, + "md5_enabled": n.Md5Enabled, + "md5_password": n.Md5Password, + "multihop": n.Multihop, + "peer_as": n.PeerAs, + "peer_ips": n.PeerIps, + "routes_in": getRoutesSlice(n.RoutesIn), + "routes_out": getRoutesSlice(n.RoutesOut), + } + ret = append(ret, neighbor) + } + return ret +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_ip_block_ranges.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_ip_block_ranges.go new file mode 100644 index 00000000000..f7a368e5295 --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_ip_block_ranges.go @@ -0,0 +1,104 @@ +package packet + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/packethost/packngo" +) + +func dataSourcePacketIPBlockRanges() *schema.Resource { + return &schema.Resource{ + Read: dataSourcePacketIPBlockRangesRead, + + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + }, + "facility": { + Type: schema.TypeString, + Optional: true, + }, + "public_ipv4": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + "global_ipv4": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + "private_ipv4": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + "ipv6": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + }, + } +} + +func faclityMatch(ref, ipFacility string) bool { + if ref == "" { + return true + } + if ref == ipFacility { + return true + } + return false +} + +func dataSourcePacketIPBlockRangesRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + projectID := d.Get("project_id").(string) + ips, _, err := client.ProjectIPs.List(projectID, nil) + if err != nil { + return err + } + + facility := d.Get("facility").(string) + + publicIPv4s := []string{} + globalIPv4s := []string{} + privateIPv4s := []string{} + theIPv6s := []string{} + var targetSlice *[]string + + for _, ip := range ips { + targetSlice = nil + cnStr := fmt.Sprintf("%s/%d", ip.Network, ip.CIDR) + if ip.AddressFamily == 4 { + if ip.Public { + if getGlobalBool(&ip) { + globalIPv4s = append(globalIPv4s, cnStr) + } else { + targetSlice = &publicIPv4s + } + } else { + targetSlice = &privateIPv4s + } + } else { + targetSlice = &theIPv6s + } + if targetSlice != nil && faclityMatch(facility, ip.Facility.Code) { + *targetSlice = append(*targetSlice, cnStr) + } + } + + d.Set("public_ipv4", publicIPv4s) + d.Set("global_ipv4", globalIPv4s) + d.Set("private_ipv4", privateIPv4s) + d.Set("ipv6", theIPv6s) + if facility != "" { + facility = "-" + facility + } + d.SetId(projectID + facility + "-IPs") + return nil + +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_operating_system.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_operating_system.go new file mode 100644 index 00000000000..f6b5351fd01 --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_operating_system.go @@ -0,0 +1,117 @@ +package packet + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/packethost/packngo" +) + +func dataSourceOperatingSystem() *schema.Resource { + return &schema.Resource{ + Read: dataSourcePacketOperatingSystemRead, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + }, + "distro": { + Type: schema.TypeString, + Optional: true, + }, + "version": { + Type: schema.TypeString, + Optional: true, + }, + "provisionable_on": { + Type: schema.TypeString, + Optional: true, + }, + "slug": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourcePacketOperatingSystemRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + name, nameOK := d.GetOk("name") + distro, distroOK := d.GetOk("distro") + version, versionOK := d.GetOk("version") + provisionableOn, provisionableOnOK := d.GetOk("provisionable_on") + + if !nameOK && !distroOK && !versionOK && !provisionableOnOK { + return fmt.Errorf("One of name, distro, version, or provisionable_on must be assigned") + } + + log.Println("[DEBUG] ******") + log.Println("[DEBUG] params", name, distro, version, provisionableOn) + log.Println("[DEBUG] ******") + + oss, _, err := client.OperatingSystems.List() + if err != nil { + return err + } + + if nameOK { + temp := []packngo.OS{} + for _, os := range oss { + if strings.Contains(strings.ToLower(os.Name), strings.ToLower(name.(string))) { + temp = append(temp, os) + } + } + oss = temp + } + + if distroOK && (len(oss) != 0) { + temp := []packngo.OS{} + for _, v := range oss { + if v.Distro == distro.(string) { + temp = append(temp, v) + } + } + oss = temp + } + + if versionOK && (len(oss) != 0) { + temp := []packngo.OS{} + for _, v := range oss { + if v.Version == version.(string) { + temp = append(temp, v) + } + } + oss = temp + } + + if provisionableOnOK && (len(oss) != 0) { + temp := []packngo.OS{} + for _, v := range oss { + for _, po := range v.ProvisionableOn { + if po == provisionableOn.(string) { + temp = append(temp, v) + } + } + } + oss = temp + } + log.Println("[DEBUG] RESULTS:", oss) + + if len(oss) == 0 { + return fmt.Errorf("There are no operating systems that match the search criteria") + } + + if len(oss) > 1 { + return fmt.Errorf("There is more than one operating system that matches the search criteria") + } + d.Set("name", oss[0].Name) + d.Set("distro", oss[0].Distro) + d.Set("version", oss[0].Version) + d.Set("slug", oss[0].Slug) + d.SetId(oss[0].Slug) + return nil +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_organization.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_organization.go new file mode 100644 index 00000000000..7a960904528 --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_organization.go @@ -0,0 +1,119 @@ +package packet + +import ( + "fmt" + "log" + "path/filepath" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/packethost/packngo" +) + +func dataSourcePacketOrganization() *schema.Resource { + return &schema.Resource{ + Read: dataSourcePacketOrganizationRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"organization_id"}, + }, + "organization_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"name"}, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + + "website": { + Type: schema.TypeString, + Computed: true, + }, + + "twitter": { + Type: schema.TypeString, + Computed: true, + }, + "logo": { + Type: schema.TypeString, + Computed: true, + }, + "project_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func findOrgByName(os []packngo.Organization, name string) (*packngo.Organization, error) { + results := make([]packngo.Organization, 0) + for _, o := range os { + if o.Name == name { + results = append(results, o) + } + } + if len(results) == 1 { + return &results[0], nil + } + if len(results) == 0 { + return nil, fmt.Errorf("no organization found with name %s", name) + } + return nil, fmt.Errorf("too many organizations found with name %s (found %d, expected 1)", name, len(results)) +} + +func dataSourcePacketOrganizationRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + nameRaw, nameOK := d.GetOk("name") + orgIdRaw, orgIdOK := d.GetOk("organization_id") + + if !orgIdOK && !nameOK { + return fmt.Errorf("You must supply organization_id or name") + } + var org *packngo.Organization + + if nameOK { + name := nameRaw.(string) + + os, _, err := client.Organizations.List(nil) + if err != nil { + return err + } + + org, err = findOrgByName(os, name) + if err != nil { + return err + } + } else { + orgId := orgIdRaw.(string) + log.Println(orgId) + var err error + org, _, err = client.Organizations.Get(orgId, nil) + if err != nil { + return err + } + } + projectIds := []string{} + + for _, p := range org.Projects { + projectIds = append(projectIds, filepath.Base(p.URL)) + } + + d.Set("organization_id", org.ID) + d.Set("name", org.Name) + d.Set("description", org.Description) + d.Set("website", org.Website) + d.Set("twitter", org.Twitter) + d.Set("logo", org.Logo) + d.Set("project_ids", projectIds) + d.SetId(org.ID) + + return nil +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_precreated_ip_block.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_precreated_ip_block.go new file mode 100644 index 00000000000..d0c6bf256e9 --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_precreated_ip_block.go @@ -0,0 +1,104 @@ +package packet + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/packethost/packngo" +) + +func dataSourcePacketPreCreatedIPBlock() *schema.Resource { + s := packetIPComputedFields() + s["project_id"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + } + s["global"] = &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + } + s["public"] = &schema.Schema{ + Type: schema.TypeBool, + Required: true, + } + + s["facility"] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + } + s["address_family"] = &schema.Schema{ + Type: schema.TypeInt, + Required: true, + } + s["cidr_notation"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + s["quantity"] = &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + } + s["type"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + + return &schema.Resource{ + Read: dataSourcePacketReservedIPBlockRead, + Schema: s, + } +} + +func dataSourcePacketReservedIPBlockRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + projectID := d.Get("project_id").(string) + log.Println("[DEBUG] packet_precreated_ip_block - getting list of IPs in a project") + ips, _, err := client.ProjectIPs.List(projectID, nil) + if err != nil { + return err + } + ipv := d.Get("address_family").(int) + public := d.Get("public").(bool) + global := d.Get("global").(bool) + + if !public && global { + return fmt.Errorf("Private (non-public) global IP address blocks are not supported in Equinix Metal") + } + + fval, fok := d.GetOk("facility") + if fok && global { + return fmt.Errorf("You can't specify facility for global IP block - addresses from global blocks can be assigned to devices across several facilities") + } + + if fok { + // lookup of not-global block + facility := fval.(string) + for _, ip := range ips { + if ip.Public == public && ip.AddressFamily == ipv && facility == ip.Facility.Code { + if err := loadBlock(d, &ip); err != nil { + return err + } + break + } + } + } else { + // lookup of global block + for _, ip := range ips { + blockGlobal := getGlobalBool(&ip) + if ip.Public == public && ip.AddressFamily == ipv && blockGlobal { + if err := loadBlock(d, &ip); err != nil { + return err + } + break + } + } + + } + if d.Get("cidr_notation") == "" { + return fmt.Errorf("Could not find matching reserved block, all IPs were %v", ips) + } + return nil + +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_project.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_project.go new file mode 100644 index 00000000000..930ce0b0c18 --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_project.go @@ -0,0 +1,168 @@ +package packet + +import ( + "fmt" + "log" + "path" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/packethost/packngo" +) + +func dataSourcePacketProject() *schema.Resource { + return &schema.Resource{ + Read: dataSourcePacketProjectRead, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"project_id"}, + }, + "project_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"name"}, + }, + + "created": { + Type: schema.TypeString, + Computed: true, + }, + + "updated": { + Type: schema.TypeString, + Computed: true, + }, + + "backend_transfer": { + Type: schema.TypeBool, + Computed: true, + }, + + "payment_method_id": { + Type: schema.TypeString, + Computed: true, + }, + + "organization_id": { + Type: schema.TypeString, + Computed: true, + }, + "user_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "bgp_config": { + Type: schema.TypeList, + MaxItems: 1, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "deployment_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"local", "global"}, false), + }, + "asn": { + Type: schema.TypeInt, + Required: true, + }, + "md5": { + Type: schema.TypeString, + Optional: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "max_prefix": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourcePacketProjectRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + nameRaw, nameOK := d.GetOk("name") + projectIdRaw, projectIdOK := d.GetOk("project_id") + + if !projectIdOK && !nameOK { + return fmt.Errorf("You must supply project_id or name") + } + var project *packngo.Project + + if nameOK { + name := nameRaw.(string) + + os, _, err := client.Projects.List(nil) + if err != nil { + return err + } + + project, err = findProjectByName(os, name) + if err != nil { + return err + } + } else { + projectId := projectIdRaw.(string) + log.Println(projectId) + var err error + project, _, err = client.Projects.Get(projectId, nil) + if err != nil { + return err + } + } + + d.SetId(project.ID) + d.Set("payment_method_id", path.Base(project.PaymentMethod.URL)) + d.Set("name", project.Name) + d.Set("project_id", project.ID) + d.Set("organization_id", path.Base(project.Organization.URL)) + d.Set("created", project.Created) + d.Set("updated", project.Updated) + d.Set("backend_transfer", project.BackendTransfer) + + bgpConf, _, err := client.BGPConfig.Get(project.ID, nil) + userIds := []string{} + for _, u := range project.Users { + userIds = append(userIds, path.Base(u.URL)) + } + d.Set("user_ids", userIds) + + if (err == nil) && (bgpConf != nil) { + // guard against an empty struct + if bgpConf.ID != "" { + err := d.Set("bgp_config", flattenBGPConfig(bgpConf)) + if err != nil { + err = friendlyError(err) + return err + } + } + } + return nil +} + +func findProjectByName(ps []packngo.Project, name string) (*packngo.Project, error) { + results := make([]packngo.Project, 0) + for _, p := range ps { + if p.Name == name { + results = append(results, p) + } + } + if len(results) == 1 { + return &results[0], nil + } + if len(results) == 0 { + return nil, fmt.Errorf("no project found with name %s", name) + } + return nil, fmt.Errorf("too many projects found with name %s (found %d, expected 1)", name, len(results)) +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_project_ssh_key.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_project_ssh_key.go new file mode 100644 index 00000000000..a00b75efaeb --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_project_ssh_key.go @@ -0,0 +1,125 @@ +package packet + +import ( + "fmt" + "path" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/packethost/packngo" +) + +func dataSourcePacketProjectSSHKey() *schema.Resource { + return &schema.Resource{ + Read: dataSourcePacketProjectSSHKeyRead, + Schema: map[string]*schema.Schema{ + "search": { + Type: schema.TypeString, + Description: "The name, fingerprint, id, or public_key of the SSH Key to search for in the Equinix Metal project", + Optional: true, + ValidateFunc: validation.NoZeroValues, + }, + "id": { + Type: schema.TypeString, + Description: "The id of the SSH Key", + Optional: true, + ValidateFunc: validation.NoZeroValues, + Computed: true, + }, + "project_id": { + Type: schema.TypeString, + Description: "The Equinix Metal project id of the Equinix Metal SSH Key", + Required: true, + ValidateFunc: validation.NoZeroValues, + }, + "name": { + Type: schema.TypeString, + Description: "The label of the Equinix Metal SSH Key", + Computed: true, + }, + "public_key": { + Type: schema.TypeString, + Description: "The public SSH key that will be authorized for SSH access on Equinix Metal devices provisioned with this key", + Computed: true, + }, + "fingerprint": { + Type: schema.TypeString, + Computed: true, + }, + "created": { + Type: schema.TypeString, + Computed: true, + }, + "updated": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourcePacketProjectSSHKeyRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + search := d.Get("search").(string) + id := d.Get("id").(string) + projectID := d.Get("project_id").(string) + + if id == "" && search == "" { + return fmt.Errorf("You must supply either search or id") + } + + var ( + key packngo.SSHKey + searchOpts *packngo.SearchOptions + ) + + if search != "" { + searchOpts = &packngo.SearchOptions{Search: search} + } + keys, _, err := client.Projects.ListSSHKeys(projectID, searchOpts) + + if err != nil { + err = fmt.Errorf("Error listing project ssh keys: %s", friendlyError(err)) + return err + } + + for i := range keys { + // use the first match for searches + if search != "" { + key = keys[i] + break + } + + // otherwise find the matching ID + if keys[i].ID == id { + key = keys[i] + break + } + } + + if key.ID == "" { + // Not Found + return fmt.Errorf("Project %q SSH Key matching %q was not found", projectID, search) + } + + ownerID := path.Base(key.Owner.Href) + + d.SetId(key.ID) + d.Set("name", key.Label) + d.Set("public_key", key.Key) + d.Set("fingerprint", key.FingerPrint) + d.Set("owner_id", ownerID) + d.Set("created", key.Created) + d.Set("updated", key.Updated) + + if key.Owner.Href[:10] == "/projects/" { + d.Set("project_id", ownerID) + } + + return nil +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_spot_market_price.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_spot_market_price.go new file mode 100644 index 00000000000..df3b8627213 --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_spot_market_price.go @@ -0,0 +1,54 @@ +package packet + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/packethost/packngo" +) + +func dataSourceSpotMarketPrice() *schema.Resource { + return &schema.Resource{ + Read: dataSourcePacketSpotMarketPriceRead, + Schema: map[string]*schema.Schema{ + "facility": { + Type: schema.TypeString, + Required: true, + }, + "plan": { + Type: schema.TypeString, + Required: true, + }, + "price": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + } +} + +func dataSourcePacketSpotMarketPriceRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + facility := d.Get("facility").(string) + plan := d.Get("plan").(string) + + prices, _, err := client.SpotMarket.Prices() + if err != nil { + return err + } + + var price float64 + if fac, ok := prices[facility]; ok { + if pri, ok := fac[plan]; ok { + price = pri + } else { + return fmt.Errorf("Facility %s does not have prices for plan %s", facility, plan) + } + } else { + return fmt.Errorf("There is no facility %s", facility) + } + d.Set("price", price) + d.SetId(facility) + return nil +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_spot_market_request.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_spot_market_request.go new file mode 100644 index 00000000000..da239e48b98 --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_spot_market_request.go @@ -0,0 +1,49 @@ +package packet + +import ( + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/packethost/packngo" +) + +func dataSourcePacketSpotMarketRequest() *schema.Resource { + return &schema.Resource{ + Read: dataSourcePacketSpotMarketRequestRead, + + Schema: map[string]*schema.Schema{ + "request_id": { + Type: schema.TypeString, + Required: true, + }, + "device_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + Timeouts: resourceDefaultTimeouts, + } +} +func dataSourcePacketSpotMarketRequestRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + id := d.Get("request_id").(string) + + smr, _, err := client.SpotMarketRequests.Get(id, &packngo.GetOptions{Includes: []string{"project", "devices", "facilities"}}) + if err != nil { + err = friendlyError(err) + if isNotFound(err) { + d.SetId("") + return nil + } + return err + } + + deviceIDs := make([]string, len(smr.Devices)) + for i, d := range smr.Devices { + deviceIDs[i] = d.ID + } + d.Set("device_ids", deviceIDs) + d.SetId(id + strings.Join(deviceIDs, "-")) + return nil +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_volume.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_volume.go new file mode 100644 index 00000000000..e8f23d0d05e --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/datasource_packet_volume.go @@ -0,0 +1,188 @@ +package packet + +import ( + "fmt" + "path" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/packethost/packngo" +) + +func dataSourcePacketVolume() *schema.Resource { + return &schema.Resource{ + Read: dataSourcePacketVolumeRead, + Schema: map[string]*schema.Schema{ + + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"volume_id"}, + }, + "project_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"volume_id"}, + }, + "volume_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"project_id", "name"}, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "size": { + Type: schema.TypeInt, + Computed: true, + }, + + "facility": { + Type: schema.TypeString, + Computed: true, + }, + + "plan": { + Type: schema.TypeString, + Computed: true, + }, + + "billing_cycle": { + Type: schema.TypeString, + Computed: true, + }, + + "state": { + Type: schema.TypeString, + Computed: true, + }, + + "locked": { + Type: schema.TypeBool, + Computed: true, + }, + + "snapshot_policies": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "snapshot_frequency": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_count": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + + "device_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "created": { + Type: schema.TypeString, + Computed: true, + }, + + "updated": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourcePacketVolumeRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + nameRaw, nameOK := d.GetOk("name") + projectIdRaw, projectIdOK := d.GetOk("project_id") + volumeIdRaw, volumeIdOK := d.GetOk("volume_id") + + if !volumeIdOK && !nameOK { + return fmt.Errorf("You must supply volume_id or name") + } + var volume *packngo.Volume + if nameOK { + if !projectIdOK { + return fmt.Errorf("If you lookup via name, you must supply project_id") + } + name := nameRaw.(string) + projectId := projectIdRaw.(string) + + vs, _, err := client.Volumes.List(projectId, &packngo.ListOptions{Includes: []string{"attachments.device"}}) + if err != nil { + return err + } + + volume, err = findVolumeByName(vs, name) + if err != nil { + return err + } + } else { + volumeId := volumeIdRaw.(string) + var err error + volume, _, err = client.Volumes.Get(volumeId, &packngo.GetOptions{Includes: []string{"attachments.device"}}) + if err != nil { + return err + } + } + + d.Set("name", volume.Name) + d.Set("description", volume.Description) + d.Set("size", volume.Size) + d.Set("plan", volume.Plan.Slug) + d.Set("facility", volume.Facility.Code) + d.Set("state", volume.State) + d.Set("billing_cycle", volume.BillingCycle) + d.Set("locked", volume.Locked) + d.Set("created", volume.Created) + d.Set("updated", volume.Updated) + d.Set("project_id", volume.Project.ID) + + snapshot_policies := make([]map[string]interface{}, 0, len(volume.SnapshotPolicies)) + for _, snapshot_policy := range volume.SnapshotPolicies { + policy := map[string]interface{}{ + "snapshot_frequency": snapshot_policy.SnapshotFrequency, + "snapshot_count": snapshot_policy.SnapshotCount, + } + snapshot_policies = append(snapshot_policies, policy) + } + d.Set("snapshot_policies", snapshot_policies) + + deviceIds := []string{} + + for _, a := range volume.Attachments { + deviceIds = append(deviceIds, path.Base(a.Device.Href)) + } + + d.Set("device_ids", deviceIds) + d.SetId(volume.ID) + + return nil +} + +func findVolumeByName(volumes []packngo.Volume, name string) (*packngo.Volume, error) { + results := make([]packngo.Volume, 0) + for _, v := range volumes { + if v.Name == name { + results = append(results, v) + } + } + if len(results) == 1 { + return &results[0], nil + } + if len(results) == 0 { + return nil, fmt.Errorf("no volume found with name %s", name) + } + return nil, fmt.Errorf("too many volumes found with hostname %s (found %d, expected 1)", name, len(results)) +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/errors.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/errors.go new file mode 100644 index 00000000000..5b9583c65d9 --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/errors.go @@ -0,0 +1,92 @@ +package packet + +import ( + "net/http" + "sort" + "strings" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/packethost/packngo" +) + +// friendlyError improves error messages when the API error is blank or in an +// alternate format (as is the case with invalid token or loadbalancer errors) +func friendlyError(err error) error { + if e, ok := err.(*packngo.ErrorResponse); ok { + resp := e.Response + errors := Errors(e.Errors) + + if 0 == len(errors) { + errors = Errors{e.SingleError} + } + er := &ErrorResponse{ + StatusCode: resp.StatusCode, + Errors: errors, + } + respHead := resp.Header + + // this checks if the error comes from API (and not from cache/LB) + if len(errors) > 0 { + ct := respHead.Get("Content-Type") + xrid := respHead.Get("X-Request-Id") + if strings.Contains(ct, "application/json") && len(xrid) > 0 { + er.IsAPIError = true + } + } + return er + } + return err +} + +func isForbidden(err error) bool { + if r, ok := err.(*ErrorResponse); ok { + return r.StatusCode == http.StatusForbidden + } + return false +} + +func isNotFound(err error) bool { + if r, ok := err.(*ErrorResponse); ok { + return r.StatusCode == http.StatusNotFound && r.IsAPIError + } + return false +} + +type Errors []string + +func (e Errors) Error() string { + return strings.Join(e, "; ") +} + +type ErrorResponse struct { + StatusCode int + Errors + IsAPIError bool +} + +// setMap sets the map of values to ResourceData, checking and returning the +// errors. Typically d.Set is not error checked. This helper makes checking +// those errors less tedious. Because this works with a map, the order of the +// errors would not be predictable, to avoid this the errors will be sorted. +func setMap(d *schema.ResourceData, m map[string]interface{}) error { + errs := &multierror.Error{} + for key, v := range m { + var err error + if f, ok := v.(setFn); ok { + err = f(d, key) + } else { + err = d.Set(key, v) + } + + if err != nil { + errs = multierror.Append(errs, err) + } + } + sort.Sort(errs) + + return errs.ErrorOrNil() +} + +type setFn = func(d *schema.ResourceData, key string) error diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/helpers_device.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/helpers_device.go new file mode 100644 index 00000000000..917b9b551fc --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/helpers_device.go @@ -0,0 +1,249 @@ +package packet + +import ( + "fmt" + "strings" + "sync" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/packethost/packngo" +) + +var wgMap = map[string]*sync.WaitGroup{} +var wgMutex = sync.Mutex{} + +func ifToIPCreateRequest(m interface{}) packngo.IPAddressCreateRequest { + iacr := packngo.IPAddressCreateRequest{} + ia := m.(map[string]interface{}) + at := ia["type"].(string) + switch at { + case "public_ipv4": + iacr.AddressFamily = 4 + iacr.Public = true + case "private_ipv4": + iacr.AddressFamily = 4 + iacr.Public = false + case "public_ipv6": + iacr.AddressFamily = 6 + iacr.Public = true + } + iacr.CIDR = ia["cidr"].(int) + iacr.Reservations = convertStringArr(ia["reservation_ids"].([]interface{})) + return iacr +} + +func getNewIPAddressSlice(arr []interface{}) []packngo.IPAddressCreateRequest { + addressTypesSlice := make([]packngo.IPAddressCreateRequest, len(arr)) + + for i, m := range arr { + addressTypesSlice[i] = ifToIPCreateRequest(m) + } + return addressTypesSlice +} + +type NetworkInfo struct { + Networks []map[string]interface{} + IPv4SubnetSize int + Host string + PublicIPv4 string + PublicIPv6 string + PrivateIPv4 string +} + +func getNetworkInfo(ips []*packngo.IPAddressAssignment) NetworkInfo { + ni := NetworkInfo{Networks: make([]map[string]interface{}, 0, 1)} + for _, ip := range ips { + network := map[string]interface{}{ + "address": ip.Address, + "gateway": ip.Gateway, + "family": ip.AddressFamily, + "cidr": ip.CIDR, + "public": ip.Public, + } + ni.Networks = append(ni.Networks, network) + + // Initial device IPs are fixed and marked as "Management" + if ip.Management { + if ip.AddressFamily == 4 { + if ip.Public { + ni.Host = ip.Address + ni.IPv4SubnetSize = ip.CIDR + ni.PublicIPv4 = ip.Address + } else { + ni.PrivateIPv4 = ip.Address + } + } else { + ni.PublicIPv6 = ip.Address + } + } + } + return ni +} + +func getNetworkRank(family int, public bool) int { + switch { + case family == 4 && public: + return 0 + case family == 6: + return 1 + case family == 4 && public: + return 2 + } + return 3 +} + +func getPorts(ps []packngo.Port) []map[string]interface{} { + ret := make([]map[string]interface{}, 0, 1) + for _, p := range ps { + port := map[string]interface{}{ + "name": p.Name, + "id": p.ID, + "type": p.Type, + "mac": p.Data.MAC, + "bonded": p.Data.Bonded, + } + ret = append(ret, port) + } + return ret +} + +func waitUntilReservationProvisionable(id string, meta interface{}) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{"false"}, + Target: []string{"true"}, + Refresh: func() (interface{}, string, error) { + client := meta.(*packngo.Client) + r, _, err := client.HardwareReservations.Get(id, nil) + if err != nil { + return 42, "error", friendlyError(err) + } + provisionableString := "false" + if r.Provisionable { + provisionableString = "true" + } + return 42, provisionableString, nil + }, + Timeout: 60 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + _, err := stateConf.WaitForState() + return err +} + +func getWaitForDeviceLock(deviceID string) *sync.WaitGroup { + wgMutex.Lock() + defer wgMutex.Unlock() + wg, ok := wgMap[deviceID] + if !ok { + wg = &sync.WaitGroup{} + wgMap[deviceID] = wg + } + return wg +} + +func waitForDeviceAttribute(d *schema.ResourceData, targets []string, pending []string, attribute string, meta interface{}) (string, error) { + + wg := getWaitForDeviceLock(d.Id()) + wg.Wait() + + wgMutex.Lock() + wg.Add(1) + wgMutex.Unlock() + + defer func() { + wgMutex.Lock() + wg.Done() + wgMutex.Unlock() + }() + + if attribute != "state" && attribute != "network_type" { + return "", fmt.Errorf("unsupported attr to wait for: %s", attribute) + } + + stateConf := &resource.StateChangeConf{ + Pending: pending, + Target: targets, + Refresh: func() (interface{}, string, error) { + client := meta.(*packngo.Client) + device, _, err := client.Devices.Get(d.Id(), &packngo.GetOptions{Includes: []string{"project"}}) + if err == nil { + retAttrVal := device.State + if attribute == "network_type" { + networkType := device.GetNetworkType() + retAttrVal = networkType + } + return retAttrVal, retAttrVal, nil + } + return "error", "error", err + }, + Timeout: 60 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + attrValRaw, err := stateConf.WaitForState() + + if v, ok := attrValRaw.(string); ok { + return v, err + } + + return "", err +} + +// powerOnAndWait Powers on the device and waits for it to be active. +func powerOnAndWait(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + _, err := client.Devices.PowerOn(d.Id()) + if err != nil { + return friendlyError(err) + } + + _, err = waitForDeviceAttribute(d, []string{"active", "failed"}, []string{"off"}, "state", client) + if err != nil { + return err + } + state := d.Get("state").(string) + if state != "active" { + return friendlyError(fmt.Errorf("Device in non-active state \"%s\"", state)) + } + return nil +} + +func validateFacilityForDevice(v interface{}, k string) (ws []string, errors []error) { + if v.(string) == "any" { + errors = append(errors, fmt.Errorf(`Cannot use facility: "any"`)) + } + return +} + +func ipAddressSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(ipAddressTypes, false), + Description: fmt.Sprintf("one of %s", strings.Join(ipAddressTypes, ",")), + }, + "cidr": { + Type: schema.TypeInt, + Optional: true, + Description: "CIDR suffix for IP block assigned to this device", + }, + "reservation_ids": { + Type: schema.TypeList, + Optional: true, + Description: "IDs of reservations to pick the blocks from", + MinItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringMatch(uuidRE, "must be a valid UUID"), + }, + }, + }, + } +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/provider.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/provider.go new file mode 100644 index 00000000000..000294a2081 --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/provider.go @@ -0,0 +1,71 @@ +package packet + +import ( + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/mutexkv" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +var packetMutexKV = mutexkv.NewMutexKV() + +func Provider() terraform.ResourceProvider { + + return &schema.Provider{ + Schema: map[string]*schema.Schema{ + "auth_token": { + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("PACKET_AUTH_TOKEN", nil), + Description: "The API auth key for API operations.", + }, + }, + DataSourcesMap: map[string]*schema.Resource{ + "packet_ip_block_ranges": dataSourcePacketIPBlockRanges(), + "packet_precreated_ip_block": dataSourcePacketPreCreatedIPBlock(), + "packet_operating_system": dataSourceOperatingSystem(), + "packet_organization": dataSourcePacketOrganization(), + "packet_spot_market_price": dataSourceSpotMarketPrice(), + "packet_device": dataSourcePacketDevice(), + "packet_device_bgp_neighbors": dataSourcePacketDeviceBGPNeighbors(), + "packet_project": dataSourcePacketProject(), + "packet_project_ssh_key": dataSourcePacketProjectSSHKey(), + "packet_spot_market_request": dataSourcePacketSpotMarketRequest(), + "packet_volume": dataSourcePacketVolume(), + }, + + ResourcesMap: map[string]*schema.Resource{ + "packet_device": resourcePacketDevice(), + "packet_device_network_type": resourcePacketDeviceNetworkType(), + "packet_ssh_key": resourcePacketSSHKey(), + "packet_project_ssh_key": resourcePacketProjectSSHKey(), + "packet_project": resourcePacketProject(), + "packet_organization": resourcePacketOrganization(), + "packet_volume": resourcePacketVolume(), + "packet_volume_attachment": resourcePacketVolumeAttachment(), + "packet_reserved_ip_block": resourcePacketReservedIPBlock(), + "packet_ip_attachment": resourcePacketIPAttachment(), + "packet_spot_market_request": resourcePacketSpotMarketRequest(), + "packet_vlan": resourcePacketVlan(), + "packet_bgp_session": resourcePacketBGPSession(), + "packet_port_vlan_attachment": resourcePacketPortVlanAttachment(), + }, + + ConfigureFunc: providerConfigure, + } +} + +func providerConfigure(d *schema.ResourceData) (interface{}, error) { + config := Config{ + AuthToken: d.Get("auth_token").(string), + } + return config.Client(), nil +} + +var resourceDefaultTimeouts = &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + Default: schema.DefaultTimeout(60 * time.Minute), +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_bgp_session.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_bgp_session.go new file mode 100644 index 00000000000..6e504a7d32e --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_bgp_session.go @@ -0,0 +1,100 @@ +package packet + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/packethost/packngo" +) + +func resourcePacketBGPSession() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketBGPSessionCreate, + Read: resourcePacketBGPSessionRead, + Delete: resourcePacketBGPSessionDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "device_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "address_family": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"ipv4", "ipv6"}, false), + }, + "default_route": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + "status": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourcePacketBGPSessionCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + dID := d.Get("device_id").(string) + addressFamily := d.Get("address_family").(string) + defaultRoute := d.Get("default_route").(bool) + log.Printf("[DEBUG] creating %s BGP session to device (%s)\n", addressFamily, dID) + bgpSession, _, err := client.BGPSessions.Create( + dID, packngo.CreateBGPSessionRequest{ + AddressFamily: addressFamily, + DefaultRoute: &defaultRoute}) + if err != nil { + return friendlyError(err) + } + + d.SetId(bgpSession.ID) + return resourcePacketBGPSessionRead(d, meta) +} + +func resourcePacketBGPSessionRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + bgpSession, _, err := client.BGPSessions.Get(d.Id(), + &packngo.GetOptions{Includes: []string{"device"}}) + if err != nil { + err = friendlyError(err) + if isNotFound(err) { + log.Printf("[WARN] BGP Session (%s) not found, removing from state", d.Id()) + + d.SetId("") + return nil + } + return err + } + defaultRoute := false + if bgpSession.DefaultRoute != nil { + if *(bgpSession.DefaultRoute) { + defaultRoute = true + } + } + d.Set("device_id", bgpSession.Device.ID) + d.Set("address_family", bgpSession.AddressFamily) + d.Set("status", bgpSession.Status) + d.Set("default_route", defaultRoute) + d.SetId(bgpSession.ID) + return nil +} + +func resourcePacketBGPSessionDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + _, err := client.BGPSessions.Delete(d.Id()) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_device.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_device.go new file mode 100644 index 00000000000..f8d848bab09 --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_device.go @@ -0,0 +1,603 @@ +package packet + +import ( + "encoding/json" + "errors" + "fmt" + "log" + "path" + "path/filepath" + "reflect" + "regexp" + "sort" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/packethost/packngo" +) + +var matchIPXEScript = regexp.MustCompile(`(?i)^#![i]?pxe`) +var ipAddressTypes = []string{"public_ipv4", "private_ipv4", "public_ipv6"} + +func resourcePacketDevice() *schema.Resource { + return &schema.Resource{ + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + Create: resourcePacketDeviceCreate, + Read: resourcePacketDeviceRead, + Update: resourcePacketDeviceUpdate, + Delete: resourcePacketDeviceDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "hostname": { + Type: schema.TypeString, + Required: true, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + }, + + "operating_system": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "deployed_facility": { + Type: schema.TypeString, + Computed: true, + }, + + "facilities": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + ForceNew: true, + MinItems: 1, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + fsRaw := d.Get("facilities") + fs := convertStringArr(fsRaw.([]interface{})) + df := d.Get("deployed_facility").(string) + if contains(fs, df) { + return true + } + if contains(fs, "any") && (len(df) != 0) { + return true + } + return false + }, + }, + "ip_address": { + Type: schema.TypeList, + Optional: true, + Description: "Inbound rules for this security group", + Elem: ipAddressSchema(), + MinItems: 1, + }, + + "plan": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "billing_cycle": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + + "root_password": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + + "locked": { + Type: schema.TypeBool, + Computed: true, + }, + + "access_public_ipv6": { + Type: schema.TypeString, + Computed: true, + }, + + "access_public_ipv4": { + Type: schema.TypeString, + Computed: true, + }, + + "access_private_ipv4": { + Type: schema.TypeString, + Computed: true, + }, + "network_type": { + Type: schema.TypeString, + Computed: true, + Deprecated: "You should handle Network Type with the new packet_device_network_type resource.", + }, + + "ports": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "mac": { + Type: schema.TypeString, + Computed: true, + }, + "bonded": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + + "network": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address": { + Type: schema.TypeString, + Computed: true, + }, + + "gateway": { + Type: schema.TypeString, + Computed: true, + }, + + "family": { + Type: schema.TypeInt, + Computed: true, + }, + + "cidr": { + Type: schema.TypeInt, + Computed: true, + }, + + "public": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + + "created": { + Type: schema.TypeString, + Computed: true, + }, + + "updated": { + Type: schema.TypeString, + Computed: true, + }, + + "user_data": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + ForceNew: true, + }, + + "custom_data": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + ForceNew: true, + }, + + "ipxe_script_url": { + Type: schema.TypeString, + Optional: true, + }, + + "always_pxe": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "deployed_hardware_reservation_id": { + Type: schema.TypeString, + Computed: true, + }, + + "hardware_reservation_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + dhwr, ok := d.GetOk("deployed_hardware_reservation_id") + return ok && dhwr == new + }, + }, + + "tags": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "storage": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + StateFunc: func(v interface{}) string { + s, _ := structure.NormalizeJsonString(v) + return s + }, + ValidateFunc: validation.ValidateJsonString, + }, + "project_ssh_key_ids": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "ssh_key_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "wait_for_reservation_deprovision": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: false, + }, + "force_detach_volumes": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: false, + }, + }, + } +} + +func resourcePacketDeviceCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + facs := convertStringArr(d.Get("facilities").([]interface{})) + + var addressTypesSlice []packngo.IPAddressCreateRequest + _, ok := d.GetOk("ip_address") + if ok { + arr := d.Get("ip_address").([]interface{}) + addressTypesSlice = getNewIPAddressSlice(arr) + } + + createRequest := &packngo.DeviceCreateRequest{ + Hostname: d.Get("hostname").(string), + Plan: d.Get("plan").(string), + Facility: facs, + IPAddresses: addressTypesSlice, + OS: d.Get("operating_system").(string), + BillingCycle: d.Get("billing_cycle").(string), + ProjectID: d.Get("project_id").(string), + } + if attr, ok := d.GetOk("user_data"); ok { + createRequest.UserData = attr.(string) + } + + if attr, ok := d.GetOk("custom_data"); ok { + createRequest.CustomData = attr.(string) + } + + if attr, ok := d.GetOk("ipxe_script_url"); ok { + createRequest.IPXEScriptURL = attr.(string) + } + + if attr, ok := d.GetOk("hardware_reservation_id"); ok { + createRequest.HardwareReservationID = attr.(string) + } else { + wfrd := "wait_for_reservation_deprovision" + if d.Get(wfrd).(bool) { + return friendlyError(fmt.Errorf("You can't set %s when not using a hardware reservation", wfrd)) + } + } + + if createRequest.OS == "custom_ipxe" { + if createRequest.IPXEScriptURL == "" && createRequest.UserData == "" { + return friendlyError(errors.New("\"ipxe_script_url\" or \"user_data\"" + + " must be provided when \"custom_ipxe\" OS is selected.")) + } + + // ipxe_script_url + user_data is OK, unless user_data is an ipxe script in + // which case it's an error. + if createRequest.IPXEScriptURL != "" { + if matchIPXEScript.MatchString(createRequest.UserData) { + return friendlyError(errors.New("\"user_data\" should not be an iPXE " + + "script when \"ipxe_script_url\" is also provided.")) + } + } + } + + if createRequest.OS != "custom_ipxe" && createRequest.IPXEScriptURL != "" { + return friendlyError(errors.New("\"ipxe_script_url\" argument provided, but" + + " OS is not \"custom_ipxe\". Please verify and fix device arguments.")) + } + + if attr, ok := d.GetOk("always_pxe"); ok { + createRequest.AlwaysPXE = attr.(bool) + } + + projectKeys := d.Get("project_ssh_key_ids.#").(int) + if projectKeys > 0 { + createRequest.ProjectSSHKeys = convertStringArr(d.Get("project_ssh_key_ids").([]interface{})) + } + + tags := d.Get("tags.#").(int) + if tags > 0 { + createRequest.Tags = convertStringArr(d.Get("tags").([]interface{})) + } + + if attr, ok := d.GetOk("storage"); ok { + s, err := structure.NormalizeJsonString(attr.(string)) + if err != nil { + return errwrap.Wrapf("storage param contains invalid JSON: {{err}}", err) + } + var cpr packngo.CPR + err = json.Unmarshal([]byte(s), &cpr) + if err != nil { + return errwrap.Wrapf("Error parsing Storage string: {{err}}", err) + } + createRequest.Storage = &cpr + } + + newDevice, _, err := client.Devices.Create(createRequest) + if err != nil { + retErr := friendlyError(err) + if isNotFound(retErr) { + retErr = fmt.Errorf("%s, make sure project \"%s\" exists", retErr, createRequest.ProjectID) + } + return retErr + } + + d.SetId(newDevice.ID) + + // Wait for the device so we can get the networking attributes that show up after a while. + state, err := waitForDeviceAttribute(d, []string{"active", "failed"}, []string{"queued", "provisioning"}, "state", meta) + if err != nil { + d.SetId("") + fErr := friendlyError(err) + if isForbidden(fErr) { + // If the device doesn't get to the active state, we can't recover it from here. + + return errors.New("provisioning time limit exceeded; the Equinix Metal team will investigate") + } + return fErr + } + if state != "active" { + d.SetId("") + return fmt.Errorf("Device in non-active state \"%s\"", state) + } + /* + Possibly wait for device network state + _, err := waitForDeviceAttribute(d, []string{"layer3"}, []string{"hybrid", "layer2-bonded", "layer2-individual"}, "network_type", meta) + if err != nil { + return err + } + */ + + return resourcePacketDeviceRead(d, meta) +} + +func resourcePacketDeviceRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + device, _, err := client.Devices.Get(d.Id(), &packngo.GetOptions{Includes: []string{"project"}}) + if err != nil { + err = friendlyError(err) + + // If the device somehow already destroyed, mark as successfully gone. + if isNotFound(err) { + log.Printf("[WARN] Device (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + return err + } + + d.Set("hostname", device.Hostname) + d.Set("plan", device.Plan.Slug) + d.Set("deployed_facility", device.Facility.Code) + d.Set("facilities", []string{device.Facility.Code}) + d.Set("operating_system", device.OS.Slug) + d.Set("state", device.State) + d.Set("billing_cycle", device.BillingCycle) + d.Set("locked", device.Locked) + d.Set("created", device.Created) + d.Set("updated", device.Updated) + d.Set("ipxe_script_url", device.IPXEScriptURL) + d.Set("always_pxe", device.AlwaysPXE) + d.Set("root_password", device.RootPassword) + d.Set("project_id", device.Project.ID) + if device.Storage != nil { + rawStorageBytes, err := json.Marshal(device.Storage) + if err != nil { + return fmt.Errorf("[ERR] Error getting storage JSON string for device (%s): %s", d.Id(), err) + } + + storageString, err := structure.NormalizeJsonString(string(rawStorageBytes)) + if err != nil { + return fmt.Errorf("[ERR] Errori normalizing storage JSON string for device (%s): %s", d.Id(), err) + } + d.Set("storage", storageString) + } + + if len(device.HardwareReservation.Href) > 0 { + d.Set("deployed_hardware_reservation_id", path.Base(device.HardwareReservation.Href)) + } + networkType := device.GetNetworkType() + d.Set("network_type", networkType) + + wfrd := "wait_for_reservation_deprovision" + if _, ok := d.GetOk(wfrd); !ok { + d.Set(wfrd, nil) + } + fdv := "force_detach_volumes" + if _, ok := d.GetOk(fdv); !ok { + d.Set(fdv, nil) + } + + d.Set("tags", device.Tags) + keyIDs := []string{} + for _, k := range device.SSHKeys { + keyIDs = append(keyIDs, filepath.Base(k.URL)) + } + d.Set("ssh_key_ids", keyIDs) + networkInfo := getNetworkInfo(device.Network) + + sort.SliceStable(networkInfo.Networks, func(i, j int) bool { + famI := networkInfo.Networks[i]["family"].(int) + famJ := networkInfo.Networks[j]["family"].(int) + pubI := networkInfo.Networks[i]["public"].(bool) + pubJ := networkInfo.Networks[j]["public"].(bool) + return getNetworkRank(famI, pubI) < getNetworkRank(famJ, pubJ) + }) + + d.Set("network", networkInfo.Networks) + d.Set("access_public_ipv4", networkInfo.PublicIPv4) + d.Set("access_private_ipv4", networkInfo.PrivateIPv4) + d.Set("access_public_ipv6", networkInfo.PublicIPv6) + + ports := getPorts(device.NetworkPorts) + d.Set("ports", ports) + + if networkInfo.Host != "" { + d.SetConnInfo(map[string]string{ + "type": "ssh", + "host": networkInfo.Host, + }) + } + + return nil +} + +func resourcePacketDeviceUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + if d.HasChange("locked") { + var action func(string) (*packngo.Response, error) + if d.Get("locked").(bool) { + action = client.Devices.Lock + } else { + action = client.Devices.Unlock + } + if _, err := action(d.Id()); err != nil { + return friendlyError(err) + } + } + ur := packngo.DeviceUpdateRequest{} + + if d.HasChange("description") { + dDesc := d.Get("description").(string) + ur.Description = &dDesc + } + if d.HasChange("user_data") { + dUserData := d.Get("user_data").(string) + ur.UserData = &dUserData + } + if d.HasChange("custom_data") { + dCustomData := d.Get("custom_data").(string) + ur.CustomData = &dCustomData + } + if d.HasChange("hostname") { + dHostname := d.Get("hostname").(string) + ur.Hostname = &dHostname + } + if d.HasChange("tags") { + ts := d.Get("tags") + sts := []string{} + + switch ts.(type) { + case []interface{}: + for _, v := range ts.([]interface{}) { + sts = append(sts, v.(string)) + } + ur.Tags = &sts + default: + return friendlyError(fmt.Errorf("garbage in tags: %s", ts)) + } + } + if d.HasChange("ipxe_script_url") { + dUrl := d.Get("ipxe_script_url").(string) + ur.IPXEScriptURL = &dUrl + } + if d.HasChange("always_pxe") { + dPXE := d.Get("always_pxe").(bool) + ur.AlwaysPXE = &dPXE + } + if !reflect.DeepEqual(ur, packngo.DeviceUpdateRequest{}) { + if _, _, err := client.Devices.Update(d.Id(), &ur); err != nil { + return friendlyError(err) + } + + } + return resourcePacketDeviceRead(d, meta) +} + +func resourcePacketDeviceDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + fdvIf, fdvOk := d.GetOk("force_detach_volumes") + fdv := false + if fdvOk && fdvIf.(bool) { + fdv = true + } + + if _, err := client.Devices.Delete(d.Id(), fdv); err != nil { + return friendlyError(err) + } + + resId, resIdOk := d.GetOk("hardware_reservation_id") + if resIdOk { + wfrd, wfrdOK := d.GetOk("wait_for_reservation_deprovision") + if wfrdOK && wfrd.(bool) { + err := waitUntilReservationProvisionable(resId.(string), meta) + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_device_network_type.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_device_network_type.go new file mode 100644 index 00000000000..4c855da383a --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_device_network_type.go @@ -0,0 +1,114 @@ +package packet + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/packethost/packngo" +) + +func resourcePacketDeviceNetworkType() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketDeviceNetworkTypeCreate, + Read: resourcePacketDeviceNetworkTypeRead, + Delete: resourcePacketDeviceNetworkTypeDelete, + Update: resourcePacketDeviceNetworkTypeUpdate, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "device_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"layer3", "layer2-bonded", "layer2-individual", "hybrid"}, false), + }, + }, + } +} + +func getDevIDandNetworkType(d *schema.ResourceData, c *packngo.Client) (string, string, error) { + deviceID := d.Id() + if len(deviceID) == 0 { + deviceID = d.Get("device_id").(string) + } + + dev, _, err := c.Devices.Get(deviceID, nil) + if err != nil { + return "", "", err + } + devType := dev.GetNetworkType() + + return dev.ID, devType, nil +} + +func getAndPossiblySetNetworkType(d *schema.ResourceData, c *packngo.Client, targetType string) error { + devID, devType, err := getDevIDandNetworkType(d, c) + if err != nil { + return err + } + + if devType != targetType { + _, err := c.DevicePorts.DeviceToNetworkType(devID, targetType) + if err != nil { + return err + } + } + return nil +} + +func resourcePacketDeviceNetworkTypeCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + ntype := d.Get("type").(string) + + err := getAndPossiblySetNetworkType(d, client, ntype) + if err != nil { + return err + } + d.SetId(d.Get("device_id").(string)) + return resourcePacketDeviceNetworkTypeRead(d, meta) +} + +func resourcePacketDeviceNetworkTypeRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + _, devNType, err := getDevIDandNetworkType(d, client) + + if err != nil { + err = friendlyError(err) + + if isNotFound(err) { + log.Printf("[WARN] Device (%s) for Network Type request not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + return err + } + + d.Set("type", devNType) + return nil +} + +func resourcePacketDeviceNetworkTypeUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + ntype := d.Get("type").(string) + if d.HasChange("type") { + err := getAndPossiblySetNetworkType(d, client, ntype) + if err != nil { + return err + } + } + + return resourcePacketDeviceNetworkTypeRead(d, meta) +} + +func resourcePacketDeviceNetworkTypeDelete(d *schema.ResourceData, meta interface{}) error { + return nil +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_ip_attachment.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_ip_attachment.go new file mode 100644 index 00000000000..b0a03b93921 --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_ip_attachment.go @@ -0,0 +1,102 @@ +package packet + +import ( + "fmt" + "log" + "path" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/packethost/packngo" +) + +func resourcePacketIPAttachment() *schema.Resource { + ipAttachmentSchema := packetIPResourceComputedFields() + ipAttachmentSchema["device_id"] = &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Required: true, + } + ipAttachmentSchema["cidr_notation"] = &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Required: true, + } + return &schema.Resource{ + Create: resourcePacketIPAttachmentCreate, + Read: resourcePacketIPAttachmentRead, + Delete: resourcePacketIPAttachmentDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: ipAttachmentSchema, + } +} + +func resourcePacketIPAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + deviceID := d.Get("device_id").(string) + ipa := d.Get("cidr_notation").(string) + + req := packngo.AddressStruct{Address: ipa} + + assignment, _, err := client.DeviceIPs.Assign(deviceID, &req) + if err != nil { + return fmt.Errorf("error assigning address %s to device %s: %s", ipa, deviceID, err) + } + + d.SetId(assignment.ID) + + return resourcePacketIPAttachmentRead(d, meta) +} + +func resourcePacketIPAttachmentRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + assignment, _, err := client.DeviceIPs.Get(d.Id(), nil) + if err != nil { + err = friendlyError(err) + + // If the IP attachment was already destroyed, mark as succesfully gone. + if isNotFound(err) { + log.Printf("[WARN] IP attachment (%q) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + return err + } + + d.SetId(assignment.ID) + d.Set("address", assignment.Address) + d.Set("gateway", assignment.Gateway) + d.Set("network", assignment.Network) + d.Set("netmask", assignment.Netmask) + d.Set("address_family", assignment.AddressFamily) + d.Set("cidr", assignment.CIDR) + d.Set("public", assignment.Public) + d.Set("management", assignment.Management) + d.Set("manageable", assignment.Manageable) + + g := false + if assignment.Global != nil { + g = *(assignment.Global) + } + d.Set("global", g) + + d.Set("device_id", path.Base(assignment.AssignedTo.Href)) + d.Set("cidr_notation", + fmt.Sprintf("%s/%d", assignment.Network, assignment.CIDR)) + + return nil +} + +func resourcePacketIPAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + _, err := client.DeviceIPs.Unassign(d.Id()) + if err != nil { + return friendlyError(err) + } + + d.SetId("") + return nil +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_organization.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_organization.go new file mode 100644 index 00000000000..ac08ccb4599 --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_organization.go @@ -0,0 +1,170 @@ +package packet + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/packethost/packngo" +) + +func resourcePacketOrganization() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketOrganizationCreate, + Read: resourcePacketOrganizationRead, + Update: resourcePacketOrganizationUpdate, + Delete: resourcePacketOrganizationDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Required: false, + }, + + "website": { + Type: schema.TypeString, + Optional: true, + Required: false, + }, + + "twitter": { + Type: schema.TypeString, + Optional: true, + Required: false, + }, + + "logo": { + Type: schema.TypeString, + Optional: true, + Required: false, + }, + + "created": { + Type: schema.TypeString, + Computed: true, + }, + + "updated": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourcePacketOrganizationCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + createRequest := &packngo.OrganizationCreateRequest{ + Name: d.Get("name").(string), + } + + if attr, ok := d.GetOk("website"); ok { + createRequest.Website = attr.(string) + } + + if attr, ok := d.GetOk("description"); ok { + createRequest.Description = attr.(string) + } + + if attr, ok := d.GetOk("twitter"); ok { + createRequest.Twitter = attr.(string) + } + + if attr, ok := d.GetOk("logo"); ok { + createRequest.Logo = attr.(string) + } + + org, _, err := client.Organizations.Create(createRequest) + if err != nil { + return friendlyError(err) + } + + d.SetId(org.ID) + + return resourcePacketOrganizationRead(d, meta) +} + +func resourcePacketOrganizationRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + key, _, err := client.Organizations.Get(d.Id(), nil) + if err != nil { + err = friendlyError(err) + + // If the project somehow already destroyed, mark as succesfully gone. + if isNotFound(err) { + d.SetId("") + + return nil + } + + return err + } + + d.SetId(key.ID) + d.Set("name", key.Name) + d.Set("description", key.Description) + d.Set("website", key.Website) + d.Set("twitter", key.Twitter) + d.Set("logo", key.Logo) + d.Set("created", key.Created) + d.Set("updated", key.Updated) + + return nil +} + +func resourcePacketOrganizationUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + updateRequest := &packngo.OrganizationUpdateRequest{} + + if d.HasChange("name") { + oName := d.Get("name").(string) + updateRequest.Name = &oName + } + + if d.HasChange("description") { + oDescription := d.Get("description").(string) + updateRequest.Description = &oDescription + } + + if d.HasChange("website") { + oWebsite := d.Get("website").(string) + updateRequest.Website = &oWebsite + } + + if d.HasChange("twitter") { + oTwitter := d.Get("twitter").(string) + updateRequest.Twitter = &oTwitter + } + + if d.HasChange("logo") { + oLogo := d.Get("logo").(string) + updateRequest.Logo = &oLogo + } + _, _, err := client.Organizations.Update(d.Id(), updateRequest) + if err != nil { + return friendlyError(err) + } + + return resourcePacketOrganizationRead(d, meta) +} + +func resourcePacketOrganizationDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + _, err := client.Organizations.Delete(d.Id()) + if err != nil { + return friendlyError(err) + } + + d.SetId("") + return nil +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_port_vlan_attachment.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_port_vlan_attachment.go new file mode 100644 index 00000000000..7cca02df5e0 --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_port_vlan_attachment.go @@ -0,0 +1,249 @@ +package packet + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/packethost/packngo" +) + +func resourcePacketPortVlanAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketPortVlanAttachmentCreate, + Read: resourcePacketPortVlanAttachmentRead, + Delete: resourcePacketPortVlanAttachmentDelete, + Update: resourcePacketPortVlanAttachmentUpdate, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "force_bond": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + "device_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "port_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "vlan_vnid": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "vlan_id": { + Type: schema.TypeString, + Computed: true, + }, + "port_id": { + Type: schema.TypeString, + Computed: true, + }, + "native": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + } +} + +func resourcePacketPortVlanAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + deviceID := d.Get("device_id").(string) + pName := d.Get("port_name").(string) + vlanVNID := d.Get("vlan_vnid").(int) + + dev, _, err := client.Devices.Get(deviceID, &packngo.GetOptions{ + Includes: []string{"virtual_networks,project,native_virtual_network"}, + }) + if err != nil { + return err + } + + portFound := false + vlanFound := false + vlanID := "" + var port packngo.Port + for _, p := range dev.NetworkPorts { + if p.Name == pName { + portFound = true + port = p + for _, n := range p.AttachedVirtualNetworks { + if vlanVNID == n.VXLAN { + vlanFound = true + vlanID = n.ID + break + } + } + break + } + } + if !portFound { + return fmt.Errorf("Device %s doesn't have port %s", deviceID, pName) + } + + par := &packngo.PortAssignRequest{PortID: port.ID} + if vlanFound { + log.Printf("Port %s already has VLAN %d assigned", pName, vlanVNID) + par.VirtualNetworkID = vlanID + } else { + facility := dev.Facility.Code + vlans, _, err := client.ProjectVirtualNetworks.List(dev.Project.ID, nil) + if err != nil { + return err + } + for _, n := range vlans.VirtualNetworks { + if (n.VXLAN == vlanVNID) && (n.FacilityCode == facility) { + vlanID = n.ID + } + } + if len(vlanID) == 0 { + return fmt.Errorf("VLAN with VNID %d doesn't exist in facilty %s", vlanVNID, facility) + } + + par.VirtualNetworkID = vlanID + + // Equinix Metal doesn't allow multiple VLANs to be assigned + // to the same port at the same time + lockId := "vlan-attachment-" + port.ID + packetMutexKV.Lock(lockId) + defer packetMutexKV.Unlock(lockId) + + _, _, err = client.DevicePorts.Assign(par) + if err != nil { + return err + } + } + + d.SetId(port.ID + ":" + vlanID) + + native := d.Get("native").(bool) + if native { + _, _, err = client.DevicePorts.AssignNative(par) + if err != nil { + return err + } + } + + return resourcePacketPortVlanAttachmentRead(d, meta) +} + +func resourcePacketPortVlanAttachmentRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + deviceID := d.Get("device_id").(string) + pName := d.Get("port_name").(string) + vlanVNID := d.Get("vlan_vnid").(int) + + dev, _, err := client.Devices.Get(deviceID, &packngo.GetOptions{Includes: []string{"virtual_networks,project,native_virtual_network"}}) + if err != nil { + err = friendlyError(err) + + if isNotFound(err) { + log.Printf("[WARN] Device (%s) for Port Vlan Attachment not found, removing from state", d.Id()) + d.SetId("") + return nil + } + return err + } + portFound := false + vlanFound := false + portID := "" + vlanID := "" + vlanNative := false + for _, p := range dev.NetworkPorts { + if p.Name == pName { + portFound = true + portID = p.ID + for _, n := range p.AttachedVirtualNetworks { + if vlanVNID == n.VXLAN { + vlanFound = true + vlanID = n.ID + if p.NativeVirtualNetwork != nil { + vlanNative = vlanID == p.NativeVirtualNetwork.ID + } + break + } + } + break + } + } + if !portFound { + // TODO(displague) should we clear state if the port is unexpectedly + // gone? Can we treat this like a deletion? + return fmt.Errorf("Device %s doesn't have port %s", deviceID, pName) + } + if !vlanFound { + d.SetId("") + } + d.Set("port_id", portID) + d.Set("vlan_id", vlanID) + d.Set("native", vlanNative) + return nil +} + +func resourcePacketPortVlanAttachmentUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + if d.HasChange("native") { + native := d.Get("native").(bool) + portID := d.Get("port_id").(string) + if native { + vlanID := d.Get("vlan_id").(string) + par := &packngo.PortAssignRequest{PortID: portID, VirtualNetworkID: vlanID} + _, _, err := client.DevicePorts.AssignNative(par) + if err != nil { + return err + } + } else { + _, _, err := client.DevicePorts.UnassignNative(portID) + if err != nil { + return err + } + } + } + return resourcePacketPortVlanAttachmentRead(d, meta) +} + +func resourcePacketPortVlanAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + pID := d.Get("port_id").(string) + vlanID := d.Get("vlan_id").(string) + native := d.Get("native").(bool) + if native { + _, _, err := client.DevicePorts.UnassignNative(pID) + if err != nil { + return err + } + } + par := &packngo.PortAssignRequest{PortID: pID, VirtualNetworkID: vlanID} + lockId := "vlan-detachment-" + pID + packetMutexKV.Lock(lockId) + defer packetMutexKV.Unlock(lockId) + portPtr, _, err := client.DevicePorts.Unassign(par) + if err != nil { + return err + } + forceBond := d.Get("force_bond").(bool) + if forceBond && (len(portPtr.AttachedVirtualNetworks) == 0) { + deviceID := d.Get("device_id").(string) + portName := d.Get("port_name").(string) + port, err := client.DevicePorts.GetPortByName(deviceID, portName) + if err != nil { + return friendlyError(err) + } + _, _, err = client.DevicePorts.Bond(port, false) + if err != nil { + return friendlyError(err) + } + } + return nil +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_project.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_project.go new file mode 100644 index 00000000000..5b7a8e9d009 --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_project.go @@ -0,0 +1,281 @@ +package packet + +import ( + "fmt" + "path" + "regexp" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/packethost/packngo" +) + +var uuidRE = regexp.MustCompile("^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[8|9|aA|bB][a-fA-F0-9]{3}-[a-fA-F0-9]{12}$") + +func resourcePacketProject() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketProjectCreate, + Read: resourcePacketProjectRead, + Update: resourcePacketProjectUpdate, + Delete: resourcePacketProjectDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + + "created": { + Type: schema.TypeString, + Computed: true, + }, + + "updated": { + Type: schema.TypeString, + Computed: true, + }, + + "backend_transfer": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "payment_method_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + return strings.ToLower(strings.Trim(old, `"`)) == strings.ToLower(strings.Trim(new, `"`)) + }, + ValidateFunc: validation.StringMatch(uuidRE, "must be a valid UUID"), + }, + + "organization_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + return strings.ToLower(strings.Trim(old, `"`)) == strings.ToLower(strings.Trim(new, `"`)) + }, + ValidateFunc: validation.StringMatch(uuidRE, "must be a valid UUID"), + }, + "bgp_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "deployment_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"local", "global"}, false), + }, + "asn": { + Type: schema.TypeInt, + Required: true, + }, + "md5": { + Type: schema.TypeString, + Optional: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "max_prefix": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func expandBGPConfig(d *schema.ResourceData) packngo.CreateBGPConfigRequest { + bgpCreateRequest := packngo.CreateBGPConfigRequest{ + DeploymentType: d.Get("bgp_config.0.deployment_type").(string), + Asn: d.Get("bgp_config.0.asn").(int), + } + md5, ok := d.GetOk("bgp_config.0.md5") + if ok { + bgpCreateRequest.Md5 = md5.(string) + } + + return bgpCreateRequest + +} + +func resourcePacketProjectCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + createRequest := &packngo.ProjectCreateRequest{ + Name: d.Get("name").(string), + OrganizationID: d.Get("organization_id").(string), + } + + project, _, err := client.Projects.Create(createRequest) + if err != nil { + return friendlyError(err) + } + + d.SetId(project.ID) + + _, hasBGPConfig := d.GetOk("bgp_config") + if hasBGPConfig { + bgpCR := expandBGPConfig(d) + _, err := client.BGPConfig.Create(project.ID, bgpCR) + if err != nil { + return friendlyError(err) + } + } + + backendTransfer := d.Get("backend_transfer").(bool) + if backendTransfer { + pur := packngo.ProjectUpdateRequest{BackendTransfer: &backendTransfer} + _, _, err := client.Projects.Update(project.ID, &pur) + if err != nil { + return friendlyError(err) + } + } + return resourcePacketProjectRead(d, meta) +} + +func resourcePacketProjectRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + proj, _, err := client.Projects.Get(d.Id(), nil) + if err != nil { + err = friendlyError(err) + + // If the project somehow already destroyed, mark as succesfully gone. + if isNotFound(err) { + d.SetId("") + + return nil + } + + return err + } + + d.SetId(proj.ID) + d.Set("payment_method_id", path.Base(proj.PaymentMethod.URL)) + d.Set("name", proj.Name) + d.Set("organization_id", path.Base(proj.Organization.URL)) + d.Set("created", proj.Created) + d.Set("updated", proj.Updated) + d.Set("backend_transfer", proj.BackendTransfer) + + bgpConf, _, err := client.BGPConfig.Get(proj.ID, nil) + + if (err == nil) && (bgpConf != nil) { + // guard against an empty struct + if bgpConf.ID != "" { + err := d.Set("bgp_config", flattenBGPConfig(bgpConf)) + if err != nil { + err = friendlyError(err) + return err + } + } + } + return nil +} + +func flattenBGPConfig(l *packngo.BGPConfig) []map[string]interface{} { + result := make([]map[string]interface{}, 0, 1) + + if l == nil { + return nil + } + + r := make(map[string]interface{}) + + if l.Status != "" { + r["status"] = l.Status + } + if l.DeploymentType != "" { + r["deployment_type"] = l.DeploymentType + } + if l.Md5 != "" { + r["md5"] = l.Md5 + } + if l.Asn != 0 { + r["asn"] = l.Asn + } + if l.MaxPrefix != 0 { + r["max_prefix"] = l.MaxPrefix + } + + result = append(result, r) + + return result +} + +func resourcePacketProjectUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + updateRequest := &packngo.ProjectUpdateRequest{} + if d.HasChange("name") { + pName := d.Get("name").(string) + updateRequest.Name = &pName + } + if d.HasChange("payment_method_id") { + pPayment := d.Get("payment_method_id").(string) + updateRequest.PaymentMethodID = &pPayment + } + if d.HasChange("backend_transfer") { + pBT := d.Get("backend_transfer").(bool) + updateRequest.BackendTransfer = &pBT + } + if d.HasChange("bgp_config") { + o, n := d.GetChange("bgp_config") + oldarr := o.([]interface{}) + newarr := n.([]interface{}) + if len(newarr) == 1 { + bgpCreateRequest := expandBGPConfig(d) + _, err := client.BGPConfig.Create(d.Id(), bgpCreateRequest) + if err != nil { + return friendlyError(err) + } + } else { + if len(oldarr) == 1 { + m := oldarr[0].(map[string]interface{}) + + bgpConfStr := fmt.Sprintf( + "bgp_config {\n"+ + " deployment_type = \"%s\"\n"+ + " md5 = \"%s\"\n"+ + " asn = %d\n"+ + "}", m["deployment_type"].(string), m["md5"].(string), + m["asn"].(int)) + + errStr := fmt.Errorf("BGP Config can not be removed from a project, please add back\n%s", bgpConfStr) + return friendlyError(errStr) + } + } + } else { + _, _, err := client.Projects.Update(d.Id(), updateRequest) + if err != nil { + return friendlyError(err) + } + } + + return resourcePacketProjectRead(d, meta) +} + +func resourcePacketProjectDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + _, err := client.Projects.Delete(d.Id()) + if err != nil { + return friendlyError(err) + } + + d.SetId("") + return nil +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_project_ssh_key.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_project_ssh_key.go new file mode 100644 index 00000000000..e23bb6cd69c --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_project_ssh_key.go @@ -0,0 +1,24 @@ +package packet + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourcePacketProjectSSHKey() *schema.Resource { + pkeySchema := packetSSHKeyCommonFields() + pkeySchema["project_id"] = &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Required: true, + } + return &schema.Resource{ + Create: resourcePacketSSHKeyCreate, + Read: resourcePacketSSHKeyRead, + Update: resourcePacketSSHKeyUpdate, + Delete: resourcePacketSSHKeyDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: pkeySchema, + } +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_reserved_ip_block.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_reserved_ip_block.go new file mode 100644 index 00000000000..964ee81abed --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_reserved_ip_block.go @@ -0,0 +1,258 @@ +package packet + +import ( + "fmt" + "log" + "path" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/packethost/packngo" +) + +func packetIPComputedFields() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "address": { + Type: schema.TypeString, + Computed: true, + }, + "address_family": { + Type: schema.TypeInt, + Computed: true, + }, + "cidr": { + Type: schema.TypeInt, + Computed: true, + }, + "gateway": { + Type: schema.TypeString, + Computed: true, + }, + "netmask": { + Type: schema.TypeString, + Computed: true, + }, + "network": { + Type: schema.TypeString, + Computed: true, + }, + "manageable": { + Type: schema.TypeBool, + Computed: true, + }, + "management": { + Type: schema.TypeBool, + Computed: true, + }, + } +} + +func packetIPResourceComputedFields() map[string]*schema.Schema { + s := packetIPComputedFields() + s["address_family"] = &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + } + s["public"] = &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + } + s["global"] = &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + } + return s +} + +func resourcePacketReservedIPBlock() *schema.Resource { + reservedBlockSchema := packetIPResourceComputedFields() + reservedBlockSchema["project_id"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + } + reservedBlockSchema["facility"] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + } + reservedBlockSchema["description"] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + } + reservedBlockSchema["quantity"] = &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + } + reservedBlockSchema["type"] = &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Default: "public_ipv4", + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"public_ipv4", "global_ipv4"}, false), + } + reservedBlockSchema["cidr_notation"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + + return &schema.Resource{ + Create: resourcePacketReservedIPBlockCreate, + Read: resourcePacketReservedIPBlockRead, + Delete: resourcePacketReservedIPBlockDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: reservedBlockSchema, + } +} + +func resourcePacketReservedIPBlockCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + quantity := d.Get("quantity").(int) + typ := d.Get("type").(string) + + req := packngo.IPReservationRequest{ + Type: typ, + Quantity: quantity, + } + f, ok := d.GetOk("facility") + + if ok && typ == "global_ipv4" { + return fmt.Errorf("Facility can not be set for type == global_ipv4") + } + fs := f.(string) + if typ == "public_ipv4" { + req.Facility = &fs + } + desc, ok := d.GetOk("description") + if ok { + req.Description = desc.(string) + } + + projectID := d.Get("project_id").(string) + + blockAddr, _, err := client.ProjectIPs.Request(projectID, &req) + if err != nil { + return fmt.Errorf("Error reserving IP address block: %s", err) + } + + d.Set("project_id", projectID) + d.SetId(blockAddr.ID) + + return resourcePacketReservedIPBlockRead(d, meta) +} + +func getGlobalBool(r *packngo.IPAddressReservation) bool { + if r.Global != nil { + return *(r.Global) + } + return false +} + +func getType(r *packngo.IPAddressReservation) (string, error) { + globalBool := getGlobalBool(r) + switch { + case !r.Public: + return fmt.Sprintf("private_ipv%d", r.AddressFamily), nil + case r.Public && !globalBool: + return fmt.Sprintf("public_ipv%d", r.AddressFamily), nil + case r.Public && globalBool: + return fmt.Sprintf("global_ipv%d", r.AddressFamily), nil + } + return "", fmt.Errorf("Unknown reservation type %+v", r) +} + +func loadBlock(d *schema.ResourceData, reservedBlock *packngo.IPAddressReservation) error { + ipv4CIDRToQuantity := map[int]int{32: 1, 31: 2, 30: 4, 29: 8, 28: 16, 27: 32, 26: 64, 25: 128, 24: 256} + + d.SetId(reservedBlock.ID) + + typ, err := getType(reservedBlock) + if err != nil { + return err + } + quantity := 0 + if reservedBlock.AddressFamily == 4 { + quantity = ipv4CIDRToQuantity[reservedBlock.CIDR] + } else { + // In Equinix Metal, a reserved IPv6 block is allocated when a device is + // run in a project. It's always /56, and it can't be created with + // Terraform, only imported. The longest assignable prefix is /64, + // making it max 256 subnets per block. The following logic will hold as + // long as /64 is the smallest assignable subnet size. + bits := 64 - reservedBlock.CIDR + if bits > 30 { + return fmt.Errorf("Strange (too small) CIDR prefix: %d", reservedBlock.CIDR) + } + quantity = 1 << uint(bits) + } + + err = setMap(d, map[string]interface{}{ + "address": reservedBlock.Address, + "facility": func(d *schema.ResourceData, k string) error { + if reservedBlock.Facility == nil { + return nil + } + return d.Set(k, reservedBlock.Facility.Code) + }, + "gateway": reservedBlock.Gateway, + "network": reservedBlock.Network, + "netmask": reservedBlock.Netmask, + "address_family": reservedBlock.AddressFamily, + "cidr": reservedBlock.CIDR, + "type": typ, + "public": reservedBlock.Public, + "management": reservedBlock.Management, + "manageable": reservedBlock.Manageable, + "quantity": quantity, + "project_id": path.Base(reservedBlock.Project.Href), + "cidr_notation": fmt.Sprintf("%s/%d", reservedBlock.Network, reservedBlock.CIDR), + }) + return err +} + +func resourcePacketReservedIPBlockRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + id := d.Id() + + reservedBlock, _, err := client.ProjectIPs.Get(id, nil) + if err != nil { + err = friendlyError(err) + if isNotFound(err) { + log.Printf("[WARN] Reserved IP Block (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("Error reading IP address block with ID %s: %s", id, err) + } + err = loadBlock(d, reservedBlock) + if err != nil { + return err + } + + if (reservedBlock.Description != nil) && (*(reservedBlock.Description) != "") { + d.Set("description", *(reservedBlock.Description)) + } + d.Set("global", getGlobalBool(reservedBlock)) + + return nil +} + +func resourcePacketReservedIPBlockDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + id := d.Id() + + _, err := client.ProjectIPs.Remove(id) + + if err != nil { + return fmt.Errorf("Error deleting IP reservation block %s: %s", id, err) + } + + d.SetId("") + return nil +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_spot_market_request.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_spot_market_request.go new file mode 100644 index 00000000000..07279226e90 --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_spot_market_request.go @@ -0,0 +1,371 @@ +package packet + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/packethost/packngo" +) + +func resourcePacketSpotMarketRequest() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketSpotMarketRequestCreate, + Read: resourcePacketSpotMarketRequestRead, + Delete: resourcePacketSpotMarketRequestDelete, + + Schema: map[string]*schema.Schema{ + "devices_min": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "devices_max": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "max_bid_price": { + Type: schema.TypeFloat, + Required: true, + ForceNew: true, + }, + "facilities": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + ForceNew: true, + }, + "instance_parameters": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "billing_cycle": { + Type: schema.TypeString, + Required: true, + }, + "plan": { + Type: schema.TypeString, + Required: true, + }, + "operating_system": { + Type: schema.TypeString, + Required: true, + }, + "hostname": { + Type: schema.TypeString, + Required: true, + }, + "termintation_time": { + Type: schema.TypeString, + Computed: true, + }, + "always_pxe": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "features": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "locked": { + Type: schema.TypeString, + Optional: true, + }, + "project_ssh_keys": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "user_ssh_keys": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "userdata": { + Type: schema.TypeString, + Optional: true, + }, + "customdata": { + Type: schema.TypeString, + Optional: true, + }, + "ipxe_script_url": { + Type: schema.TypeString, + Optional: true, + }, + "tags": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "wait_for_devices": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + }, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + } +} + +func resourcePacketSpotMarketRequestCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + var waitForDevices bool + + facilitiesRaw := d.Get("facilities").([]interface{}) + facilities := []string{} + + for _, f := range facilitiesRaw { + facilities = append(facilities, f.(string)) + } + + params := packngo.SpotMarketRequestInstanceParameters{ + Hostname: d.Get("instance_parameters.0.hostname").(string), + BillingCycle: d.Get("instance_parameters.0.billing_cycle").(string), + Plan: d.Get("instance_parameters.0.plan").(string), + OperatingSystem: d.Get("instance_parameters.0.operating_system").(string), + } + + if val, ok := d.GetOk("instance_parameters.0.userdata"); ok { + params.UserData = val.(string) + } + + if val, ok := d.GetOk("instance_parameters.0.customdata"); ok { + params.CustomData = val.(string) + } + + if val, ok := d.GetOk("instance_parameters.0.ipxe_script_url"); ok { + params.IPXEScriptURL = val.(string) + } + + if val, ok := d.GetOk("instance_parameters.0.always_pxe"); ok { + params.AlwaysPXE = val.(bool) + } + + if params.OperatingSystem == "custom_ipxe" { + if params.IPXEScriptURL == "" && params.UserData == "" { + return fmt.Errorf("\"ipxe_script_url\" or \"user_data\"" + + " must be provided when \"custom_ipxe\" OS is selected.") + } + + // ipxe_script_url + user_data is OK, unless user_data is an ipxe script in + // which case it's an error. + if params.IPXEScriptURL != "" { + if matchIPXEScript.MatchString(params.UserData) { + return fmt.Errorf("\"user_data\" should not be an iPXE " + + "script when \"ipxe_script_url\" is also provided.") + } + } + } + + if params.OperatingSystem != "custom_ipxe" && params.IPXEScriptURL != "" { + return fmt.Errorf("\"ipxe_script_url\" argument provided, but" + + " OS is not \"custom_ipxe\". Please verify and fix device arguments.") + } + + if val, ok := d.GetOk("instance_parameters.0.description"); ok { + params.Description = val.(string) + } + + if val, ok := d.GetOk("instance_parameters.0.features"); ok { + temp := val.([]interface{}) + for _, i := range temp { + if i != nil { + params.Features = append(params.Features, i.(string)) + } + } + } + + if val, ok := d.GetOk("wait_for_devices"); ok { + waitForDevices = val.(bool) + } + + if val, ok := d.GetOk("instance_parameters.0.locked"); ok { + params.Locked = val.(bool) + } + + if val, ok := d.GetOk("instance_parameters.0.project_ssh_keys"); ok { + temp := val.([]interface{}) + for _, i := range temp { + if i != nil { + params.ProjectSSHKeys = append(params.ProjectSSHKeys, i.(string)) + } + } + } + + if val, ok := d.GetOk("instance_parameters.0.tags"); ok { + temp := val.([]interface{}) + for _, i := range temp { + if i != nil { + params.Tags = append(params.Tags, i.(string)) + } + } + } + + if val, ok := d.GetOk("instance_parameters.0.user_ssh_keys"); ok { + temp := val.([]interface{}) + for _, i := range temp { + if i != nil { + params.UserSSHKeys = append(params.UserSSHKeys, i.(string)) + } + } + } + + smrc := &packngo.SpotMarketRequestCreateRequest{ + DevicesMax: d.Get("devices_max").(int), + DevicesMin: d.Get("devices_min").(int), + MaxBidPrice: d.Get("max_bid_price").(float64), + FacilityIDs: facilities, + Parameters: params, + } + + smr, _, err := client.SpotMarketRequests.Create(smrc, d.Get("project_id").(string)) + if err != nil { + return err + } + + d.SetId(smr.ID) + + if waitForDevices { + stateConf := &resource.StateChangeConf{ + Pending: []string{"not_done"}, + Target: []string{"done"}, + Refresh: resourceStateRefreshFunc(d, meta), + Timeout: d.Timeout(schema.TimeoutCreate), + MinTimeout: 5 * time.Second, + Delay: 3 * time.Second, // Wait 10 secs before starting + NotFoundChecks: 600, //Setting high number, to support long timeouts + } + + _, err = stateConf.WaitForState() + if err != nil { + return err + } + } + + return resourcePacketSpotMarketRequestRead(d, meta) +} + +func resourcePacketSpotMarketRequestRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + smr, _, err := client.SpotMarketRequests.Get(d.Id(), &packngo.GetOptions{Includes: []string{"project", "devices", "facilities"}}) + if err != nil { + err = friendlyError(err) + if isNotFound(err) { + log.Printf("[WARN] SpotMarketRequest (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + return err + } + + deviceIDs := make([]string, len(smr.Devices)) + for i, d := range smr.Devices { + deviceIDs[i] = d.ID + } + + facilityIDs := make([]string, len(smr.Facilities)) + if len(smr.Facilities) > 0 { + for i, f := range smr.Facilities { + facilityIDs[i] = f.ID + } + } + d.Set("project_id", smr.Project.ID) + + return nil +} + +func resourcePacketSpotMarketRequestDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + var waitForDevices bool + + if val, ok := d.GetOk("wait_for_devices"); ok { + waitForDevices = val.(bool) + } + if waitForDevices { + smr, _, err := client.SpotMarketRequests.Get(d.Id(), &packngo.GetOptions{Includes: []string{"project", "devices", "facilities"}}) + if err != nil { + return nil + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"not_done"}, + Target: []string{"done"}, + Refresh: resourceStateRefreshFunc(d, meta), + Timeout: d.Timeout(schema.TimeoutDelete), + MinTimeout: 5 * time.Second, + Delay: 3 * time.Second, // Wait 10 secs before starting + NotFoundChecks: 600, //Setting high number, to support long timeouts + } + + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + for _, d := range smr.Devices { + _, err := client.Devices.Delete(d.ID, true) + if err != nil { + return err + } + } + } + _, err := client.SpotMarketRequests.Delete(d.Id(), true) + if err != nil { + return nil + } + return nil +} + +func resourceStateRefreshFunc(d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + client := meta.(*packngo.Client) + smr, _, err := client.SpotMarketRequests.Get(d.Id(), &packngo.GetOptions{Includes: []string{"project", "devices", "facilities"}}) + + if err != nil { + return nil, "", fmt.Errorf("Failed to fetch Spot market request with following error: %s", err.Error()) + } + var finished bool + + for _, d := range smr.Devices { + + dev, _, err := client.Devices.Get(d.ID, nil) + if err != nil { + return nil, "", fmt.Errorf("Failed to fetch Device with following error: %s", err.Error()) + } + if dev.State != "active" { + break + } else { + finished = true + } + } + if finished { + return smr, "done", nil + } + return nil, "not_done", nil + } +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_ssh_key.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_ssh_key.go new file mode 100644 index 00000000000..8db43ecf507 --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_ssh_key.go @@ -0,0 +1,150 @@ +package packet + +import ( + "log" + "path" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/packethost/packngo" +) + +func packetSSHKeyCommonFields() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + + "public_key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "fingerprint": { + Type: schema.TypeString, + Computed: true, + }, + + "created": { + Type: schema.TypeString, + Computed: true, + }, + + "updated": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + } + +} + +func resourcePacketSSHKey() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketSSHKeyCreate, + Read: resourcePacketSSHKeyRead, + Update: resourcePacketSSHKeyUpdate, + Delete: resourcePacketSSHKeyDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: packetSSHKeyCommonFields(), + } +} + +func resourcePacketSSHKeyCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + createRequest := &packngo.SSHKeyCreateRequest{ + Label: d.Get("name").(string), + Key: d.Get("public_key").(string), + } + + projectID, isProjectKey := d.GetOk("project_id") + if isProjectKey { + createRequest.ProjectID = projectID.(string) + } + + key, _, err := client.SSHKeys.Create(createRequest) + if err != nil { + return friendlyError(err) + } + + d.SetId(key.ID) + + return resourcePacketSSHKeyRead(d, meta) +} + +func resourcePacketSSHKeyRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + key, _, err := client.SSHKeys.Get(d.Id(), nil) + if err != nil { + err = friendlyError(err) + + // If the key is somehow already destroyed, mark as + // succesfully gone + if isNotFound(err) { + log.Printf("[WARN] SSHKey (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + return err + } + + ownerID := path.Base(key.Owner.Href) + + d.SetId(key.ID) + d.Set("name", key.Label) + d.Set("public_key", key.Key) + d.Set("fingerprint", key.FingerPrint) + d.Set("owner_id", ownerID) + d.Set("created", key.Created) + d.Set("updated", key.Updated) + + if key.Owner.Href[:10] == "/projects/" { + d.Set("project_id", ownerID) + } + + return nil +} + +func resourcePacketSSHKeyUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + updateRequest := &packngo.SSHKeyUpdateRequest{} + + if d.HasChange("name") { + kName := d.Get("name").(string) + updateRequest.Label = &kName + } + + if d.HasChange("public_key") { + kKey := d.Get("public_key").(string) + updateRequest.Key = &kKey + } + + _, _, err := client.SSHKeys.Update(d.Id(), updateRequest) + if err != nil { + return friendlyError(err) + } + + return resourcePacketSSHKeyRead(d, meta) +} + +func resourcePacketSSHKeyDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + _, err := client.SSHKeys.Delete(d.Id()) + if err != nil { + return friendlyError(err) + } + + d.SetId("") + return nil +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_vlan.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_vlan.go new file mode 100644 index 00000000000..d81733f4b9f --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_vlan.go @@ -0,0 +1,85 @@ +package packet + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/packethost/packngo" +) + +func resourcePacketVlan() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketVlanCreate, + Read: resourcePacketVlanRead, + Delete: resourcePacketVlanDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "description": { + Type: schema.TypeString, + Required: false, + Optional: true, + ForceNew: true, + }, + "facility": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "vxlan": { + Type: schema.TypeInt, + Computed: true, + }, + }, + } +} + +func resourcePacketVlanCreate(d *schema.ResourceData, meta interface{}) error { + c := meta.(*packngo.Client) + createRequest := &packngo.VirtualNetworkCreateRequest{ + ProjectID: d.Get("project_id").(string), + Description: d.Get("description").(string), + Facility: d.Get("facility").(string), + } + vlan, _, err := c.ProjectVirtualNetworks.Create(createRequest) + if err != nil { + return friendlyError(err) + } + d.SetId(vlan.ID) + return resourcePacketVlanRead(d, meta) +} + +func resourcePacketVlanRead(d *schema.ResourceData, meta interface{}) error { + c := meta.(*packngo.Client) + + vlan, _, err := c.ProjectVirtualNetworks.Get(d.Id(), + &packngo.GetOptions{Includes: []string{"assigned_to"}}) + if err != nil { + err = friendlyError(err) + if isNotFound(err) { + d.SetId("") + return nil + } + return err + + } + d.Set("description", vlan.Description) + d.Set("project_id", vlan.Project.ID) + d.Set("vxlan", vlan.VXLAN) + d.Set("facility", vlan.FacilityCode) + return nil +} + +func resourcePacketVlanDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + _, err := client.ProjectVirtualNetworks.Delete(d.Id()) + if err != nil { + return friendlyError(err) + } + return nil +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_volume.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_volume.go new file mode 100644 index 00000000000..5129a1b84c6 --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_volume.go @@ -0,0 +1,296 @@ +package packet + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/packethost/packngo" +) + +func resourcePacketVolume() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketVolumeCreate, + Read: resourcePacketVolumeRead, + Update: resourcePacketVolumeUpdate, + Delete: resourcePacketVolumeDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": { + Type: schema.TypeString, + Computed: true, + }, + + "description": { + Type: schema.TypeString, + Required: false, + Optional: true, + }, + + "size": { + Type: schema.TypeInt, + Required: true, + }, + + "facility": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "plan": { + Type: schema.TypeString, + Required: true, + }, + + "billing_cycle": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + + "state": { + Type: schema.TypeString, + Computed: true, + }, + + "locked": { + Type: schema.TypeBool, + Optional: true, + }, + + "snapshot_policies": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "snapshot_frequency": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "snapshot_count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + }, + }, + }, + + "attachments": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "created": { + Type: schema.TypeString, + Computed: true, + }, + + "updated": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourcePacketVolumeCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + createRequest := &packngo.VolumeCreateRequest{ + PlanID: d.Get("plan").(string), + FacilityID: d.Get("facility").(string), + Size: d.Get("size").(int), + Locked: d.Get("locked").(bool), + } + + if attr, ok := d.GetOk("billing_cycle"); ok { + createRequest.BillingCycle = attr.(string) + } else { + createRequest.BillingCycle = "hourly" + } + + if attr, ok := d.GetOk("description"); ok { + createRequest.Description = attr.(string) + } + + snapshot_count := d.Get("snapshot_policies.#").(int) + if snapshot_count > 0 { + createRequest.SnapshotPolicies = make([]*packngo.SnapshotPolicy, 0, snapshot_count) + for i := 0; i < snapshot_count; i++ { + policy := new(packngo.SnapshotPolicy) + policy.SnapshotFrequency = d.Get(fmt.Sprintf("snapshot_policies.%d.snapshot_frequency", i)).(string) + policy.SnapshotCount = d.Get(fmt.Sprintf("snapshot_policies.%d.snapshot_count", i)).(int) + createRequest.SnapshotPolicies = append(createRequest.SnapshotPolicies, policy) + } + } + + newVolume, _, err := client.Volumes.Create(createRequest, d.Get("project_id").(string)) + if err != nil { + return friendlyError(err) + } + + d.SetId(newVolume.ID) + + err = waitForVolumeState(newVolume.ID, "active", []string{"queued", "provisioning"}, meta) + if err != nil { + d.SetId("") + return err + } + + return resourcePacketVolumeRead(d, meta) +} + +func waitForVolumeState(volumeID string, target string, pending []string, meta interface{}) error { + stateConf := &resource.StateChangeConf{ + Pending: pending, + Target: []string{target}, + Refresh: func() (interface{}, string, error) { + client := meta.(*packngo.Client) + v, _, err := client.Volumes.Get(volumeID, &packngo.GetOptions{Includes: []string{"project", "snapshot_policies", "facility"}}) + if err == nil { + return 42, v.State, nil + } + return 42, "error", err + }, + Timeout: 60 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + _, err := stateConf.WaitForState() + return err +} + +func resourcePacketVolumeRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + volume, _, err := client.Volumes.Get(d.Id(), &packngo.GetOptions{Includes: []string{"project", "snapshot_policies", "facility"}}) + if err != nil { + err = friendlyError(err) + + // If the volume somehow already destroyed, mark as successfully gone. + if isNotFound(err) { + log.Printf("[WARN] Volume (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + return err + } + + d.Set("name", volume.Name) + d.Set("description", volume.Description) + d.Set("size", volume.Size) + d.Set("plan", volume.Plan.Slug) + d.Set("facility", volume.Facility.Code) + d.Set("state", volume.State) + d.Set("billing_cycle", volume.BillingCycle) + d.Set("locked", volume.Locked) + d.Set("created", volume.Created) + d.Set("updated", volume.Updated) + d.Set("project_id", volume.Project.ID) + + snapshot_policies := make([]map[string]interface{}, 0, len(volume.SnapshotPolicies)) + for _, snapshot_policy := range volume.SnapshotPolicies { + policy := map[string]interface{}{ + "snapshot_frequency": snapshot_policy.SnapshotFrequency, + "snapshot_count": snapshot_policy.SnapshotCount, + } + snapshot_policies = append(snapshot_policies, policy) + } + d.Set("snapshot_policies", snapshot_policies) + + attachments := make([]*packngo.VolumeAttachment, 0, len(volume.Attachments)) + for _, attachment := range volume.Attachments { + attachments = append(attachments, attachment) + } + d.Set("attachments", attachments) + + return nil +} + +func resourcePacketVolumeUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + if d.HasChange("locked") { + // the change is true => false, i.e. unlock + if !d.Get("locked").(bool) { + if _, err := client.Volumes.Unlock(d.Id()); err != nil { + return friendlyError(err) + } + } + } + + updateRequest := &packngo.VolumeUpdateRequest{} + + sendAttrUpdate := false + + if d.HasChange("description") { + sendAttrUpdate = true + vDesc := d.Get("description").(string) + updateRequest.Description = &vDesc + } + if d.HasChange("plan") { + sendAttrUpdate = true + vPlan := d.Get("plan").(string) + updateRequest.PlanID = &vPlan + } + if d.HasChange("size") { + sendAttrUpdate = true + vSize := d.Get("size").(int) + updateRequest.Size = &vSize + } + if d.HasChange("billing_cycle") { + sendAttrUpdate = true + vCycle := d.Get("billing_cycle").(string) + updateRequest.BillingCycle = &vCycle + } + + if sendAttrUpdate { + _, _, err := client.Volumes.Update(d.Id(), updateRequest) + if err != nil { + return friendlyError(err) + } + } + if d.HasChange("locked") { + // the change is false => true, i.e. lock + if d.Get("locked").(bool) { + if _, err := client.Volumes.Lock(d.Id()); err != nil { + return friendlyError(err) + } + } + } + + return resourcePacketVolumeRead(d, meta) +} + +func resourcePacketVolumeDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + if _, err := client.Volumes.Delete(d.Id()); err != nil { + return friendlyError(err) + } + + return nil +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_volume_attachment.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_volume_attachment.go new file mode 100644 index 00000000000..3e8d676c766 --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/resource_packet_volume_attachment.go @@ -0,0 +1,84 @@ +package packet + +import ( + "log" + "path/filepath" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/packethost/packngo" +) + +func resourcePacketVolumeAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketVolumeAttachmentCreate, + Read: resourcePacketVolumeAttachmentRead, + Delete: resourcePacketVolumeAttachmentDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "device_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "volume_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourcePacketVolumeAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + dID := d.Get("device_id").(string) + vID := d.Get("volume_id").(string) + log.Printf("[DEBUG] Attaching Volume (%s) to Instance (%s)\n", vID, dID) + va, _, err := client.VolumeAttachments.Create(vID, dID) + if err != nil { + switch err.(type) { + case *packngo.ErrorResponse: + e := err.(*packngo.ErrorResponse) + if len(e.Errors) == 1 { + if e.Errors[0] == "Instance is already attached to this volume" { + log.Printf("[DEBUG] Volume (%s) is already attached to Instance (%s)", vID, dID) + break + } + } + } + return err + } + + d.SetId(va.ID) + return resourcePacketVolumeAttachmentRead(d, meta) +} + +func resourcePacketVolumeAttachmentRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + va, _, err := client.VolumeAttachments.Get(d.Id(), nil) + if err != nil { + err = friendlyError(err) + if isNotFound(err) { + log.Printf("[WARN] Volume Attachment (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + return err + } + d.Set("device_id", filepath.Base(va.Device.Href)) + d.Set("volume_id", filepath.Base(va.Volume.Href)) + return nil +} + +func resourcePacketVolumeAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + _, err := client.VolumeAttachments.Delete(d.Id()) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/utils.go b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/utils.go new file mode 100644 index 00000000000..4f5bc569139 --- /dev/null +++ b/vendor/github.com/equinix/terraform-provider-equinix-metal/packet/utils.go @@ -0,0 +1,21 @@ +package packet + +func contains(s []string, e string) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} + +func convertStringArr(ifaceArr []interface{}) []string { + var arr []string + for _, v := range ifaceArr { + if v == nil { + continue + } + arr = append(arr, v.(string)) + } + return arr +} diff --git a/vendor/github.com/evanphx/json-patch/.travis.yml b/vendor/github.com/evanphx/json-patch/.travis.yml new file mode 100644 index 00000000000..50e4afd19a4 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/.travis.yml @@ -0,0 +1,19 @@ +language: go + +go: + - 1.14 + - 1.13 + +install: + - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi + - go get github.com/jessevdk/go-flags + +script: + - go get + - go test -cover ./... + - cd ./v5 + - go get + - go test -cover ./... + +notifications: + email: false diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE new file mode 100644 index 00000000000..df76d7d7716 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2014, Evan Phoenix +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md new file mode 100644 index 00000000000..121b039dbaa --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/README.md @@ -0,0 +1,298 @@ +# JSON-Patch +`jsonpatch` is a library which provides functionality for both applying +[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as +well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). + +[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) +[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) +[![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch) + +# Get It! + +**Latest and greatest**: +```bash +go get -u github.com/evanphx/json-patch/v5 +``` + +**Stable Versions**: +* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5` +* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` + +(previous versions below `v3` are unavailable) + +# Use It! +* [Create and apply a merge patch](#create-and-apply-a-merge-patch) +* [Create and apply a JSON Patch](#create-and-apply-a-json-patch) +* [Comparing JSON documents](#comparing-json-documents) +* [Combine merge patches](#combine-merge-patches) + + +# Configuration + +* There is a global configuration variable `jsonpatch.SupportNegativeIndices`. + This defaults to `true` and enables the non-standard practice of allowing + negative indices to mean indices starting at the end of an array. This + functionality can be disabled by setting `jsonpatch.SupportNegativeIndices = + false`. + +* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`, + which limits the total size increase in bytes caused by "copy" operations in a + patch. It defaults to 0, which means there is no limit. + +## Create and apply a merge patch +Given both an original JSON document and a modified JSON document, you can create +a [Merge Patch](https://tools.ietf.org/html/rfc7396) document. + +It can describe the changes needed to convert from the original to the +modified JSON document. + +Once you have a merge patch, you can apply it to other JSON documents using the +`jsonpatch.MergePatch(document, patch)` function. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + // Let's create a merge patch from these two documents... + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + target := []byte(`{"name": "Jane", "age": 24}`) + + patch, err := jsonpatch.CreateMergePatch(original, target) + if err != nil { + panic(err) + } + + // Now lets apply the patch against a different JSON document... + + alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`) + modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch) + + fmt.Printf("patch document: %s\n", patch) + fmt.Printf("updated alternative doc: %s\n", modifiedAlternative) +} +``` + +When ran, you get the following output: + +```bash +$ go run main.go +patch document: {"height":null,"name":"Jane"} +updated alternative doc: {"age":28,"name":"Jane"} +``` + +## Create and apply a JSON Patch +You can create patch objects using `DecodePatch([]byte)`, which can then +be applied against JSON documents. + +The following is an example of creating a patch from two operations, and +applying it against a JSON document. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + patchJSON := []byte(`[ + {"op": "replace", "path": "/name", "value": "Jane"}, + {"op": "remove", "path": "/height"} + ]`) + + patch, err := jsonpatch.DecodePatch(patchJSON) + if err != nil { + panic(err) + } + + modified, err := patch.Apply(original) + if err != nil { + panic(err) + } + + fmt.Printf("Original document: %s\n", original) + fmt.Printf("Modified document: %s\n", modified) +} +``` + +When ran, you get the following output: + +```bash +$ go run main.go +Original document: {"name": "John", "age": 24, "height": 3.21} +Modified document: {"age":24,"name":"Jane"} +``` + +## Comparing JSON documents +Due to potential whitespace and ordering differences, one cannot simply compare +JSON strings or byte-arrays directly. + +As such, you can instead use `jsonpatch.Equal(document1, document2)` to +determine if two JSON documents are _structurally_ equal. This ignores +whitespace differences, and key-value ordering. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + similar := []byte(` + { + "age": 24, + "height": 3.21, + "name": "John" + } + `) + different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`) + + if jsonpatch.Equal(original, similar) { + fmt.Println(`"original" is structurally equal to "similar"`) + } + + if !jsonpatch.Equal(original, different) { + fmt.Println(`"original" is _not_ structurally equal to "different"`) + } +} +``` + +When ran, you get the following output: +```bash +$ go run main.go +"original" is structurally equal to "similar" +"original" is _not_ structurally equal to "different" +``` + +## Combine merge patches +Given two JSON merge patch documents, it is possible to combine them into a +single merge patch which can describe both set of changes. + +The resulting merge patch can be used such that applying it results in a +document structurally similar as merging each merge patch to the document +in succession. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + + nameAndHeight := []byte(`{"height":null,"name":"Jane"}`) + ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`) + + // Let's combine these merge patch documents... + combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes) + if err != nil { + panic(err) + } + + // Apply each patch individual against the original document + withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight) + if err != nil { + panic(err) + } + + withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes) + if err != nil { + panic(err) + } + + // Apply the combined patch against the original document + + withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch) + if err != nil { + panic(err) + } + + // Do both result in the same thing? They should! + if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) { + fmt.Println("Both JSON documents are structurally the same!") + } + + fmt.Printf("combined merge patch: %s", combinedPatch) +} +``` + +When ran, you get the following output: +```bash +$ go run main.go +Both JSON documents are structurally the same! +combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"} +``` + +# CLI for comparing JSON documents +You can install the commandline program `json-patch`. + +This program can take multiple JSON patch documents as arguments, +and fed a JSON document from `stdin`. It will apply the patch(es) against +the document and output the modified doc. + +**patch.1.json** +```json +[ + {"op": "replace", "path": "/name", "value": "Jane"}, + {"op": "remove", "path": "/height"} +] +``` + +**patch.2.json** +```json +[ + {"op": "add", "path": "/address", "value": "123 Main St"}, + {"op": "replace", "path": "/age", "value": "21"} +] +``` + +**document.json** +```json +{ + "name": "John", + "age": 24, + "height": 3.21 +} +``` + +You can then run: + +```bash +$ go install github.com/evanphx/json-patch/cmd/json-patch +$ cat document.json | json-patch -p patch.1.json -p patch.2.json +{"address":"123 Main St","age":"21","name":"Jane"} +``` + +# Help It! +Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues) +or [create a PR](https://github.com/evanphx/json-patch/compare). + + +Before creating a pull request, we'd ask that you make sure tests are passing +and that you have added new tests when applicable. + +Contributors can run tests using: + +```bash +go test -cover ./... +``` + +Builds for pull requests are tested automatically +using [TravisCI](https://travis-ci.org/evanphx/json-patch). diff --git a/vendor/github.com/evanphx/json-patch/errors.go b/vendor/github.com/evanphx/json-patch/errors.go new file mode 100644 index 00000000000..75304b4437c --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/errors.go @@ -0,0 +1,38 @@ +package jsonpatch + +import "fmt" + +// AccumulatedCopySizeError is an error type returned when the accumulated size +// increase caused by copy operations in a patch operation has exceeded the +// limit. +type AccumulatedCopySizeError struct { + limit int64 + accumulated int64 +} + +// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError. +func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError { + return &AccumulatedCopySizeError{limit: l, accumulated: a} +} + +// Error implements the error interface. +func (a *AccumulatedCopySizeError) Error() string { + return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit) +} + +// ArraySizeError is an error type returned when the array size has exceeded +// the limit. +type ArraySizeError struct { + limit int + size int +} + +// NewArraySizeError returns an ArraySizeError. +func NewArraySizeError(l, s int) *ArraySizeError { + return &ArraySizeError{limit: l, size: s} +} + +// Error implements the error interface. +func (a *ArraySizeError) Error() string { + return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit) +} diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go new file mode 100644 index 00000000000..14e8bb5ce38 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/merge.go @@ -0,0 +1,386 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" +) + +func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { + curDoc, err := cur.intoDoc() + + if err != nil { + pruneNulls(patch) + return patch + } + + patchDoc, err := patch.intoDoc() + + if err != nil { + return patch + } + + mergeDocs(curDoc, patchDoc, mergeMerge) + + return cur +} + +func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { + for k, v := range *patch { + if v == nil { + if mergeMerge { + (*doc)[k] = nil + } else { + delete(*doc, k) + } + } else { + cur, ok := (*doc)[k] + + if !ok || cur == nil { + pruneNulls(v) + (*doc)[k] = v + } else { + (*doc)[k] = merge(cur, v, mergeMerge) + } + } + } +} + +func pruneNulls(n *lazyNode) { + sub, err := n.intoDoc() + + if err == nil { + pruneDocNulls(sub) + } else { + ary, err := n.intoAry() + + if err == nil { + pruneAryNulls(ary) + } + } +} + +func pruneDocNulls(doc *partialDoc) *partialDoc { + for k, v := range *doc { + if v == nil { + delete(*doc, k) + } else { + pruneNulls(v) + } + } + + return doc +} + +func pruneAryNulls(ary *partialArray) *partialArray { + newAry := []*lazyNode{} + + for _, v := range *ary { + if v != nil { + pruneNulls(v) + newAry = append(newAry, v) + } + } + + *ary = newAry + + return ary +} + +var errBadJSONDoc = fmt.Errorf("Invalid JSON Document") +var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch") +var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents") + +// MergeMergePatches merges two merge patches together, such that +// applying this resulting merged merge patch to a document yields the same +// as merging each merge patch to the document in succession. +func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) { + return doMergePatch(patch1Data, patch2Data, true) +} + +// MergePatch merges the patchData into the docData. +func MergePatch(docData, patchData []byte) ([]byte, error) { + return doMergePatch(docData, patchData, false) +} + +func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { + doc := &partialDoc{} + + docErr := json.Unmarshal(docData, doc) + + patch := &partialDoc{} + + patchErr := json.Unmarshal(patchData, patch) + + if _, ok := docErr.(*json.SyntaxError); ok { + return nil, errBadJSONDoc + } + + if _, ok := patchErr.(*json.SyntaxError); ok { + return nil, errBadJSONPatch + } + + if docErr == nil && *doc == nil { + return nil, errBadJSONDoc + } + + if patchErr == nil && *patch == nil { + return nil, errBadJSONPatch + } + + if docErr != nil || patchErr != nil { + // Not an error, just not a doc, so we turn straight into the patch + if patchErr == nil { + if mergeMerge { + doc = patch + } else { + doc = pruneDocNulls(patch) + } + } else { + patchAry := &partialArray{} + patchErr = json.Unmarshal(patchData, patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + pruneAryNulls(patchAry) + + out, patchErr := json.Marshal(patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + return out, nil + } + } else { + mergeDocs(doc, patch, mergeMerge) + } + + return json.Marshal(doc) +} + +// resemblesJSONArray indicates whether the byte-slice "appears" to be +// a JSON array or not. +// False-positives are possible, as this function does not check the internal +// structure of the array. It only checks that the outer syntax is present and +// correct. +func resemblesJSONArray(input []byte) bool { + input = bytes.TrimSpace(input) + + hasPrefix := bytes.HasPrefix(input, []byte("[")) + hasSuffix := bytes.HasSuffix(input, []byte("]")) + + return hasPrefix && hasSuffix +} + +// CreateMergePatch will return a merge patch document capable of converting +// the original document(s) to the modified document(s). +// The parameters can be bytes of either two JSON Documents, or two arrays of +// JSON documents. +// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 +func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalResemblesArray := resemblesJSONArray(originalJSON) + modifiedResemblesArray := resemblesJSONArray(modifiedJSON) + + // Do both byte-slices seem like JSON arrays? + if originalResemblesArray && modifiedResemblesArray { + return createArrayMergePatch(originalJSON, modifiedJSON) + } + + // Are both byte-slices are not arrays? Then they are likely JSON objects... + if !originalResemblesArray && !modifiedResemblesArray { + return createObjectMergePatch(originalJSON, modifiedJSON) + } + + // None of the above? Then return an error because of mismatched types. + return nil, errBadMergeTypes +} + +// createObjectMergePatch will return a merge-patch document capable of +// converting the original document to the modified document. +func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDoc := map[string]interface{}{} + modifiedDoc := map[string]interface{}{} + + err := json.Unmarshal(originalJSON, &originalDoc) + if err != nil { + return nil, errBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDoc) + if err != nil { + return nil, errBadJSONDoc + } + + dest, err := getDiff(originalDoc, modifiedDoc) + if err != nil { + return nil, err + } + + return json.Marshal(dest) +} + +// createArrayMergePatch will return an array of merge-patch documents capable +// of converting the original document to the modified document for each +// pair of JSON documents provided in the arrays. +// Arrays of mismatched sizes will result in an error. +func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDocs := []json.RawMessage{} + modifiedDocs := []json.RawMessage{} + + err := json.Unmarshal(originalJSON, &originalDocs) + if err != nil { + return nil, errBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDocs) + if err != nil { + return nil, errBadJSONDoc + } + + total := len(originalDocs) + if len(modifiedDocs) != total { + return nil, errBadJSONDoc + } + + result := []json.RawMessage{} + for i := 0; i < len(originalDocs); i++ { + original := originalDocs[i] + modified := modifiedDocs[i] + + patch, err := createObjectMergePatch(original, modified) + if err != nil { + return nil, err + } + + result = append(result, json.RawMessage(patch)) + } + + return json.Marshal(result) +} + +// Returns true if the array matches (must be json types). +// As is idiomatic for go, an empty array is not the same as a nil array. +func matchesArray(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + if (a == nil && b != nil) || (a != nil && b == nil) { + return false + } + for i := range a { + if !matchesValue(a[i], b[i]) { + return false + } + } + return true +} + +// Returns true if the values matches (must be json types) +// The types of the values must match, otherwise it will always return false +// If two map[string]interface{} are given, all elements must match. +func matchesValue(av, bv interface{}) bool { + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + return false + } + switch at := av.(type) { + case string: + bt := bv.(string) + if bt == at { + return true + } + case float64: + bt := bv.(float64) + if bt == at { + return true + } + case bool: + bt := bv.(bool) + if bt == at { + return true + } + case nil: + // Both nil, fine. + return true + case map[string]interface{}: + bt := bv.(map[string]interface{}) + if len(bt) != len(at) { + return false + } + for key := range bt { + av, aOK := at[key] + bv, bOK := bt[key] + if aOK != bOK { + return false + } + if !matchesValue(av, bv) { + return false + } + } + return true + case []interface{}: + bt := bv.([]interface{}) + return matchesArray(at, bt) + } + return false +} + +// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. +func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { + into := map[string]interface{}{} + for key, bv := range b { + av, ok := a[key] + // value was added + if !ok { + into[key] = bv + continue + } + // If types have changed, replace completely + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + into[key] = bv + continue + } + // Types are the same, compare values + switch at := av.(type) { + case map[string]interface{}: + bt := bv.(map[string]interface{}) + dst := make(map[string]interface{}, len(bt)) + dst, err := getDiff(at, bt) + if err != nil { + return nil, err + } + if len(dst) > 0 { + into[key] = dst + } + case string, float64, bool: + if !matchesValue(av, bv) { + into[key] = bv + } + case []interface{}: + bt := bv.([]interface{}) + if !matchesArray(at, bt) { + into[key] = bv + } + case nil: + switch bv.(type) { + case nil: + // Both nil, fine. + default: + into[key] = bv + } + default: + panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) + } + } + // Now add all deleted values as nil + for key := range a { + _, found := b[key] + if !found { + into[key] = nil + } + } + return into, nil +} diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go new file mode 100644 index 00000000000..f185a45b2cb --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -0,0 +1,784 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +const ( + eRaw = iota + eDoc + eAry +) + +var ( + // SupportNegativeIndices decides whether to support non-standard practice of + // allowing negative indices to mean indices starting at the end of an array. + // Default to true. + SupportNegativeIndices bool = true + // AccumulatedCopySizeLimit limits the total size increase in bytes caused by + // "copy" operations in a patch. + AccumulatedCopySizeLimit int64 = 0 +) + +var ( + ErrTestFailed = errors.New("test failed") + ErrMissing = errors.New("missing value") + ErrUnknownType = errors.New("unknown object type") + ErrInvalid = errors.New("invalid state detected") + ErrInvalidIndex = errors.New("invalid index referenced") +) + +type lazyNode struct { + raw *json.RawMessage + doc partialDoc + ary partialArray + which int +} + +// Operation is a single JSON-Patch step, such as a single 'add' operation. +type Operation map[string]*json.RawMessage + +// Patch is an ordered collection of Operations. +type Patch []Operation + +type partialDoc map[string]*lazyNode +type partialArray []*lazyNode + +type container interface { + get(key string) (*lazyNode, error) + set(key string, val *lazyNode) error + add(key string, val *lazyNode) error + remove(key string) error +} + +func newLazyNode(raw *json.RawMessage) *lazyNode { + return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} +} + +func (n *lazyNode) MarshalJSON() ([]byte, error) { + switch n.which { + case eRaw: + return json.Marshal(n.raw) + case eDoc: + return json.Marshal(n.doc) + case eAry: + return json.Marshal(n.ary) + default: + return nil, ErrUnknownType + } +} + +func (n *lazyNode) UnmarshalJSON(data []byte) error { + dest := make(json.RawMessage, len(data)) + copy(dest, data) + n.raw = &dest + n.which = eRaw + return nil +} + +func deepCopy(src *lazyNode) (*lazyNode, int, error) { + if src == nil { + return nil, 0, nil + } + a, err := src.MarshalJSON() + if err != nil { + return nil, 0, err + } + sz := len(a) + ra := make(json.RawMessage, sz) + copy(ra, a) + return newLazyNode(&ra), sz, nil +} + +func (n *lazyNode) intoDoc() (*partialDoc, error) { + if n.which == eDoc { + return &n.doc, nil + } + + if n.raw == nil { + return nil, ErrInvalid + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return nil, err + } + + n.which = eDoc + return &n.doc, nil +} + +func (n *lazyNode) intoAry() (*partialArray, error) { + if n.which == eAry { + return &n.ary, nil + } + + if n.raw == nil { + return nil, ErrInvalid + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return nil, err + } + + n.which = eAry + return &n.ary, nil +} + +func (n *lazyNode) compact() []byte { + buf := &bytes.Buffer{} + + if n.raw == nil { + return nil + } + + err := json.Compact(buf, *n.raw) + + if err != nil { + return *n.raw + } + + return buf.Bytes() +} + +func (n *lazyNode) tryDoc() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return false + } + + n.which = eDoc + return true +} + +func (n *lazyNode) tryAry() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return false + } + + n.which = eAry + return true +} + +func (n *lazyNode) equal(o *lazyNode) bool { + if n.which == eRaw { + if !n.tryDoc() && !n.tryAry() { + if o.which != eRaw { + return false + } + + return bytes.Equal(n.compact(), o.compact()) + } + } + + if n.which == eDoc { + if o.which == eRaw { + if !o.tryDoc() { + return false + } + } + + if o.which != eDoc { + return false + } + + if len(n.doc) != len(o.doc) { + return false + } + + for k, v := range n.doc { + ov, ok := o.doc[k] + + if !ok { + return false + } + + if (v == nil) != (ov == nil) { + return false + } + + if v == nil && ov == nil { + continue + } + + if !v.equal(ov) { + return false + } + } + + return true + } + + if o.which != eAry && !o.tryAry() { + return false + } + + if len(n.ary) != len(o.ary) { + return false + } + + for idx, val := range n.ary { + if !val.equal(o.ary[idx]) { + return false + } + } + + return true +} + +// Kind reads the "op" field of the Operation. +func (o Operation) Kind() string { + if obj, ok := o["op"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +// Path reads the "path" field of the Operation. +func (o Operation) Path() (string, error) { + if obj, ok := o["path"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown", err + } + + return op, nil + } + + return "unknown", errors.Wrapf(ErrMissing, "operation missing path field") +} + +// From reads the "from" field of the Operation. +func (o Operation) From() (string, error) { + if obj, ok := o["from"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown", err + } + + return op, nil + } + + return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field") +} + +func (o Operation) value() *lazyNode { + if obj, ok := o["value"]; ok { + return newLazyNode(obj) + } + + return nil +} + +// ValueInterface decodes the operation value into an interface. +func (o Operation) ValueInterface() (interface{}, error) { + if obj, ok := o["value"]; ok && obj != nil { + var v interface{} + + err := json.Unmarshal(*obj, &v) + + if err != nil { + return nil, err + } + + return v, nil + } + + return nil, errors.Wrapf(ErrMissing, "operation, missing value field") +} + +func isArray(buf []byte) bool { +Loop: + for _, c := range buf { + switch c { + case ' ': + case '\n': + case '\t': + continue + case '[': + return true + default: + break Loop + } + } + + return false +} + +func findObject(pd *container, path string) (container, string) { + doc := *pd + + split := strings.Split(path, "/") + + if len(split) < 2 { + return nil, "" + } + + parts := split[1 : len(split)-1] + + key := split[len(split)-1] + + var err error + + for _, part := range parts { + + next, ok := doc.get(decodePatchKey(part)) + + if next == nil || ok != nil { + return nil, "" + } + + if isArray(*next.raw) { + doc, err = next.intoAry() + + if err != nil { + return nil, "" + } + } else { + doc, err = next.intoDoc() + + if err != nil { + return nil, "" + } + } + } + + return doc, decodePatchKey(key) +} + +func (d *partialDoc) set(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) add(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) get(key string) (*lazyNode, error) { + return (*d)[key], nil +} + +func (d *partialDoc) remove(key string) error { + _, ok := (*d)[key] + if !ok { + return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key) + } + + delete(*d, key) + return nil +} + +// set should only be used to implement the "replace" operation, so "key" must +// be an already existing index in "d". +func (d *partialArray) set(key string, val *lazyNode) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + (*d)[idx] = val + return nil +} + +func (d *partialArray) add(key string, val *lazyNode) error { + if key == "-" { + *d = append(*d, val) + return nil + } + + idx, err := strconv.Atoi(key) + if err != nil { + return errors.Wrapf(err, "value was not a proper array index: '%s'", key) + } + + sz := len(*d) + 1 + + ary := make([]*lazyNode, sz) + + cur := *d + + if idx >= len(ary) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if idx < 0 { + if !SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(ary) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(ary) + } + + copy(ary[0:idx], cur[0:idx]) + ary[idx] = val + copy(ary[idx+1:], cur[idx:]) + + *d = ary + return nil +} + +func (d *partialArray) get(key string) (*lazyNode, error) { + idx, err := strconv.Atoi(key) + + if err != nil { + return nil, err + } + + if idx >= len(*d) { + return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + return (*d)[idx], nil +} + +func (d *partialArray) remove(key string) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + cur := *d + + if idx >= len(cur) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if idx < 0 { + if !SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(cur) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(cur) + } + + ary := make([]*lazyNode, len(cur)-1) + + copy(ary[0:idx], cur[0:idx]) + copy(ary[idx:], cur[idx+1:]) + + *d = ary + return nil + +} + +func (p Patch) add(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "add operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path) + } + + err = con.add(key, op.value()) + if err != nil { + return errors.Wrapf(err, "error in add for path: '%s'", path) + } + + return nil +} + +func (p Patch) remove(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "remove operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path) + } + + err = con.remove(key) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) + } + + return nil +} + +func (p Patch) replace(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "replace operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path) + } + + _, ok := con.get(key) + if ok != nil { + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path) + } + + err = con.set(key, op.value()) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) + } + + return nil +} + +func (p Patch) move(doc *container, op Operation) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode from") + } + + con, key := findObject(doc, from) + + if con == nil { + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", key) + } + + err = con.remove(key) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", key) + } + + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode path") + } + + con, key = findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path) + } + + err = con.add(key, val) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", path) + } + + return nil +} + +func (p Patch) test(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "test operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in test for path: '%s'", path) + } + + if val == nil { + if op.value().raw == nil { + return nil + } + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } else if op.value() == nil { + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } + + if val.equal(op.value()) { + return nil + } + + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) +} + +func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "copy operation failed to decode from") + } + + con, key := findObject(doc, from) + + if con == nil { + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in copy for from: '%s'", from) + } + + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "copy operation failed to decode path") + } + + con, key = findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) + } + + valCopy, sz, err := deepCopy(val) + if err != nil { + return errors.Wrapf(err, "error while performing deep copy") + } + + (*accumulatedCopySize) += int64(sz) + if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit { + return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize) + } + + err = con.add(key, valCopy) + if err != nil { + return errors.Wrapf(err, "error while adding value during copy") + } + + return nil +} + +// Equal indicates if 2 JSON documents have the same structural equality. +func Equal(a, b []byte) bool { + ra := make(json.RawMessage, len(a)) + copy(ra, a) + la := newLazyNode(&ra) + + rb := make(json.RawMessage, len(b)) + copy(rb, b) + lb := newLazyNode(&rb) + + return la.equal(lb) +} + +// DecodePatch decodes the passed JSON document as an RFC 6902 patch. +func DecodePatch(buf []byte) (Patch, error) { + var p Patch + + err := json.Unmarshal(buf, &p) + + if err != nil { + return nil, err + } + + return p, nil +} + +// Apply mutates a JSON document according to the patch, and returns the new +// document. +func (p Patch) Apply(doc []byte) ([]byte, error) { + return p.ApplyIndent(doc, "") +} + +// ApplyIndent mutates a JSON document according to the patch, and returns the new +// document indented. +func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { + var pd container + if doc[0] == '[' { + pd = &partialArray{} + } else { + pd = &partialDoc{} + } + + err := json.Unmarshal(doc, pd) + + if err != nil { + return nil, err + } + + err = nil + + var accumulatedCopySize int64 + + for _, op := range p { + switch op.Kind() { + case "add": + err = p.add(&pd, op) + case "remove": + err = p.remove(&pd, op) + case "replace": + err = p.replace(&pd, op) + case "move": + err = p.move(&pd, op) + case "test": + err = p.test(&pd, op) + case "copy": + err = p.copy(&pd, op, &accumulatedCopySize) + default: + err = fmt.Errorf("Unexpected kind: %s", op.Kind()) + } + + if err != nil { + return nil, err + } + } + + if indent != "" { + return json.MarshalIndent(pd, "", indent) + } + + return json.Marshal(pd) +} + +// From http://tools.ietf.org/html/rfc6901#section-4 : +// +// Evaluation of each reference token begins by decoding any escaped +// character sequence. This is performed by first transforming any +// occurrence of the sequence '~1' to '/', and then transforming any +// occurrence of the sequence '~0' to '~'. + +var ( + rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") +) + +func decodePatchKey(k string) string { + return rfc6901Decoder.Replace(k) +} diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig new file mode 100644 index 00000000000..fad895851e5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.editorconfig @@ -0,0 +1,12 @@ +root = true + +[*.go] +indent_style = tab +indent_size = 4 +insert_final_newline = true + +[*.{yml,yaml}] +indent_style = space +indent_size = 2 +insert_final_newline = true +trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes new file mode 100644 index 00000000000..32f1001be0a --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.gitattributes @@ -0,0 +1 @@ +go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore new file mode 100644 index 00000000000..4cd0cbaf432 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -0,0 +1,6 @@ +# Setup a Global .gitignore for OS and editor generated files: +# https://help.github.com/articles/ignoring-files +# git config --global core.excludesfile ~/.gitignore_global + +.vagrant +*.sublime-project diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml new file mode 100644 index 00000000000..a9c30165cdd --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.travis.yml @@ -0,0 +1,36 @@ +sudo: false +language: go + +go: + - "stable" + - "1.11.x" + - "1.10.x" + - "1.9.x" + +matrix: + include: + - go: "stable" + env: GOLINT=true + allow_failures: + - go: tip + fast_finish: true + + +before_install: + - if [ ! -z "${GOLINT}" ]; then go get -u golang.org/x/lint/golint; fi + +script: + - go test --race ./... + +after_script: + - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" + - if [ ! -z "${GOLINT}" ]; then echo running golint; golint --set_exit_status ./...; else echo skipping golint; fi + - go vet ./... + +os: + - linux + - osx + - windows + +notifications: + email: false diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS new file mode 100644 index 00000000000..5ab5d41c547 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS @@ -0,0 +1,52 @@ +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# You can update this list using the following command: +# +# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' + +# Please keep the list sorted. + +Aaron L +Adrien Bustany +Amit Krishnan +Anmol Sethi +Bjørn Erik Pedersen +Bruno Bigras +Caleb Spare +Case Nelson +Chris Howey +Christoffer Buchholz +Daniel Wagner-Hall +Dave Cheney +Evan Phoenix +Francisco Souza +Hari haran +John C Barstow +Kelvin Fo +Ken-ichirou MATSUZAWA +Matt Layher +Nathan Youngman +Nickolai Zeldovich +Patrick +Paul Hammond +Pawel Knap +Pieter Droogendijk +Pursuit92 +Riku Voipio +Rob Figueiredo +Rodrigo Chiossi +Slawek Ligus +Soge Zhang +Tiffany Jernigan +Tilak Sharma +Tom Payne +Travis Cline +Tudor Golubenco +Vahe Khachikyan +Yukang +bronze1man +debrando +henrikedwards +铁哥 diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md new file mode 100644 index 00000000000..be4d7ea2c14 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -0,0 +1,317 @@ +# Changelog + +## v1.4.7 / 2018-01-09 + +* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) +* Tests: Fix missing verb on format string (thanks @rchiossi) +* Linux: Fix deadlock in Remove (thanks @aarondl) +* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) +* Docs: Moved FAQ into the README (thanks @vahe) +* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) +* Docs: replace references to OS X with macOS + +## v1.4.2 / 2016-10-10 + +* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) + +## v1.4.1 / 2016-10-04 + +* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) + +## v1.4.0 / 2016-10-01 + +* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) + +## v1.3.1 / 2016-06-28 + +* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) + +## v1.3.0 / 2016-04-19 + +* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) + +## v1.2.10 / 2016-03-02 + +* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) + +## v1.2.9 / 2016-01-13 + +kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) + +## v1.2.8 / 2015-12-17 + +* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) +* inotify: fix race in test +* enable race detection for continuous integration (Linux, Mac, Windows) + +## v1.2.5 / 2015-10-17 + +* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) +* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) +* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) +* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) + +## v1.2.1 / 2015-10-14 + +* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) + +## v1.2.0 / 2015-02-08 + +* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) +* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) +* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) + +## v1.1.1 / 2015-02-05 + +* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) + +## v1.1.0 / 2014-12-12 + +* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) + * add low-level functions + * only need to store flags on directories + * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) + * done can be an unbuffered channel + * remove calls to os.NewSyscallError +* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) +* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## v1.0.4 / 2014-09-07 + +* kqueue: add dragonfly to the build tags. +* Rename source code files, rearrange code so exported APIs are at the top. +* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) + +## v1.0.3 / 2014-08-19 + +* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) + +## v1.0.2 / 2014-08-17 + +* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) +* [Fix] Make ./path and path equivalent. (thanks @zhsso) + +## v1.0.0 / 2014-08-15 + +* [API] Remove AddWatch on Windows, use Add. +* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) +* Minor updates based on feedback from golint. + +## dev / 2014-07-09 + +* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). +* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) + +## dev / 2014-07-04 + +* kqueue: fix incorrect mutex used in Close() +* Update example to demonstrate usage of Op. + +## dev / 2014-06-28 + +* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) +* Fix for String() method on Event (thanks Alex Brainman) +* Don't build on Plan 9 or Solaris (thanks @4ad) + +## dev / 2014-06-21 + +* Events channel of type Event rather than *Event. +* [internal] use syscall constants directly for inotify and kqueue. +* [internal] kqueue: rename events to kevents and fileEvent to event. + +## dev / 2014-06-19 + +* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). +* [internal] remove cookie from Event struct (unused). +* [internal] Event struct has the same definition across every OS. +* [internal] remove internal watch and removeWatch methods. + +## dev / 2014-06-12 + +* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). +* [API] Pluralized channel names: Events and Errors. +* [API] Renamed FileEvent struct to Event. +* [API] Op constants replace methods like IsCreate(). + +## dev / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## dev / 2014-05-23 + +* [API] Remove current implementation of WatchFlags. + * current implementation doesn't take advantage of OS for efficiency + * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes + * no tests for the current implementation + * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) + +## v0.9.3 / 2014-12-31 + +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## v0.9.2 / 2014-08-17 + +* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) + +## v0.9.1 / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## v0.9.0 / 2014-01-17 + +* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) +* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) +* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. + +## v0.8.12 / 2013-11-13 + +* [API] Remove FD_SET and friends from Linux adapter + +## v0.8.11 / 2013-11-02 + +* [Doc] Add Changelog [#72][] (thanks @nathany) +* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) + +## v0.8.10 / 2013-10-19 + +* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) +* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) +* [Doc] specify OS-specific limits in README (thanks @debrando) + +## v0.8.9 / 2013-09-08 + +* [Doc] Contributing (thanks @nathany) +* [Doc] update package path in example code [#63][] (thanks @paulhammond) +* [Doc] GoCI badge in README (Linux only) [#60][] +* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) + +## v0.8.8 / 2013-06-17 + +* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) + +## v0.8.7 / 2013-06-03 + +* [API] Make syscall flags internal +* [Fix] inotify: ignore event changes +* [Fix] race in symlink test [#45][] (reported by @srid) +* [Fix] tests on Windows +* lower case error messages + +## v0.8.6 / 2013-05-23 + +* kqueue: Use EVT_ONLY flag on Darwin +* [Doc] Update README with full example + +## v0.8.5 / 2013-05-09 + +* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) + +## v0.8.4 / 2013-04-07 + +* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) + +## v0.8.3 / 2013-03-13 + +* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) +* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) + +## v0.8.2 / 2013-02-07 + +* [Doc] add Authors +* [Fix] fix data races for map access [#29][] (thanks @fsouza) + +## v0.8.1 / 2013-01-09 + +* [Fix] Windows path separators +* [Doc] BSD License + +## v0.8.0 / 2012-11-09 + +* kqueue: directory watching improvements (thanks @vmirage) +* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) +* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) + +## v0.7.4 / 2012-10-09 + +* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) +* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) +* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) +* [Fix] kqueue: modify after recreation of file + +## v0.7.3 / 2012-09-27 + +* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) +* [Fix] kqueue: no longer get duplicate CREATE events + +## v0.7.2 / 2012-09-01 + +* kqueue: events for created directories + +## v0.7.1 / 2012-07-14 + +* [Fix] for renaming files + +## v0.7.0 / 2012-07-02 + +* [Feature] FSNotify flags +* [Fix] inotify: Added file name back to event path + +## v0.6.0 / 2012-06-06 + +* kqueue: watch files after directory created (thanks @tmc) + +## v0.5.1 / 2012-05-22 + +* [Fix] inotify: remove all watches before Close() + +## v0.5.0 / 2012-05-03 + +* [API] kqueue: return errors during watch instead of sending over channel +* kqueue: match symlink behavior on Linux +* inotify: add `DELETE_SELF` (requested by @taralx) +* [Fix] kqueue: handle EINTR (reported by @robfig) +* [Doc] Godoc example [#1][] (thanks @davecheney) + +## v0.4.0 / 2012-03-30 + +* Go 1 released: build with go tool +* [Feature] Windows support using winfsnotify +* Windows does not have attribute change notifications +* Roll attribute notifications into IsModify + +## v0.3.0 / 2012-02-19 + +* kqueue: add files when watch directory + +## v0.2.0 / 2011-12-30 + +* update to latest Go weekly code + +## v0.1.0 / 2011-10-19 + +* kqueue: add watch on file creation to match inotify +* kqueue: create file event +* inotify: ignore `IN_IGNORED` events +* event String() +* linux: common FileEvent functions +* initial commit + +[#79]: https://github.com/howeyc/fsnotify/pull/79 +[#77]: https://github.com/howeyc/fsnotify/pull/77 +[#72]: https://github.com/howeyc/fsnotify/issues/72 +[#71]: https://github.com/howeyc/fsnotify/issues/71 +[#70]: https://github.com/howeyc/fsnotify/issues/70 +[#63]: https://github.com/howeyc/fsnotify/issues/63 +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#60]: https://github.com/howeyc/fsnotify/issues/60 +[#59]: https://github.com/howeyc/fsnotify/issues/59 +[#49]: https://github.com/howeyc/fsnotify/issues/49 +[#45]: https://github.com/howeyc/fsnotify/issues/45 +[#40]: https://github.com/howeyc/fsnotify/issues/40 +[#36]: https://github.com/howeyc/fsnotify/issues/36 +[#33]: https://github.com/howeyc/fsnotify/issues/33 +[#29]: https://github.com/howeyc/fsnotify/issues/29 +[#25]: https://github.com/howeyc/fsnotify/issues/25 +[#24]: https://github.com/howeyc/fsnotify/issues/24 +[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md new file mode 100644 index 00000000000..828a60b24ba --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -0,0 +1,77 @@ +# Contributing + +## Issues + +* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). +* Please indicate the platform you are using fsnotify on. +* A code example to reproduce the problem is appreciated. + +## Pull Requests + +### Contributor License Agreement + +fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). + +Please indicate that you have signed the CLA in your pull request. + +### How fsnotify is Developed + +* Development is done on feature branches. +* Tests are run on BSD, Linux, macOS and Windows. +* Pull requests are reviewed and [applied to master][am] using [hub][]. + * Maintainers may modify or squash commits rather than asking contributors to. +* To issue a new release, the maintainers will: + * Update the CHANGELOG + * Tag a version, which will become available through gopkg.in. + +### How to Fork + +For smooth sailing, always use the original import path. Installing with `go get` makes this easy. + +1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Ensure everything works and the tests pass (see below) +4. Commit your changes (`git commit -am 'Add some feature'`) + +Contribute upstream: + +1. Fork fsnotify on GitHub +2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) +3. Push to the branch (`git push fork my-new-feature`) +4. Create a new Pull Request on GitHub + +This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). + +### Testing + +fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. + +Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. + +To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. + +* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) +* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. +* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) +* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`. +* When you're done, you will want to halt or destroy the Vagrant boxes. + +Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. + +Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). + +### Maintainers + +Help maintaining fsnotify is welcome. To be a maintainer: + +* Submit a pull request and sign the CLA as above. +* You must be able to run the test suite on Mac, Windows, Linux and BSD. + +To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. + +All code changes should be internal pull requests. + +Releases are tagged using [Semantic Versioning](http://semver.org/). + +[hub]: https://github.com/github/hub +[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs diff --git a/vendor/honnef.co/go/tools/ir/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE similarity index 92% rename from vendor/honnef.co/go/tools/ir/LICENSE rename to vendor/github.com/fsnotify/fsnotify/LICENSE index aee48041e11..e180c8fb059 100644 --- a/vendor/honnef.co/go/tools/ir/LICENSE +++ b/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -1,5 +1,5 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. -Copyright (c) 2016 Dominik Honnef. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md new file mode 100644 index 00000000000..b2629e5229c --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -0,0 +1,130 @@ +# File system notifications for Go + +[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) + +fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running: + +```console +go get -u golang.org/x/sys/... +``` + +Cross platform: Windows, Linux, BSD and macOS. + +| Adapter | OS | Status | +| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | +| inotify | Linux 2.6.27 or later, Android\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| kqueue | BSD, macOS, iOS\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| ReadDirectoryChangesW | Windows | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | +| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) | +| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) | +| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | +| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | + +\* Android and iOS are untested. + +Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. + +## API stability + +fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). + +All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number. + +Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`. + +## Usage + +```go +package main + +import ( + "log" + + "github.com/fsnotify/fsnotify" +) + +func main() { + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + done := make(chan bool) + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { + return + } + log.Println("event:", event) + if event.Op&fsnotify.Write == fsnotify.Write { + log.Println("modified file:", event.Name) + } + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Println("error:", err) + } + } + }() + + err = watcher.Add("/tmp/foo") + if err != nil { + log.Fatal(err) + } + <-done +} +``` + +## Contributing + +Please refer to [CONTRIBUTING][] before opening an issue or pull request. + +## Example + +See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go). + +## FAQ + +**When a file is moved to another directory is it still being watched?** + +No (it shouldn't be, unless you are watching where it was moved to). + +**When I watch a directory, are all subdirectories watched as well?** + +No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). + +**Do I have to watch the Error and Event channels in a separate goroutine?** + +As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) + +**Why am I receiving multiple events for the same file on OS X?** + +Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). + +**How many files can be watched at once?** + +There are OS-specific limits as to how many watches can be created: +* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. +* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. + +**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** + +fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. + +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#18]: https://github.com/fsnotify/fsnotify/issues/18 +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#7]: https://github.com/howeyc/fsnotify/issues/7 + +[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md + +## Related Projects + +* [notify](https://github.com/rjeczalik/notify) +* [fsevents](https://github.com/fsnotify/fsevents) + diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go new file mode 100644 index 00000000000..ced39cb881e --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fen.go @@ -0,0 +1,37 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go new file mode 100644 index 00000000000..89cab046d12 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -0,0 +1,68 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +// Package fsnotify provides a platform-independent interface for file system notifications. +package fsnotify + +import ( + "bytes" + "errors" + "fmt" +) + +// Event represents a single file system notification. +type Event struct { + Name string // Relative path to the file or directory. + Op Op // File operation that triggered the event. +} + +// Op describes a set of file operations. +type Op uint32 + +// These are the generalized file operations that can trigger a notification. +const ( + Create Op = 1 << iota + Write + Remove + Rename + Chmod +) + +func (op Op) String() string { + // Use a buffer for efficient string concatenation + var buffer bytes.Buffer + + if op&Create == Create { + buffer.WriteString("|CREATE") + } + if op&Remove == Remove { + buffer.WriteString("|REMOVE") + } + if op&Write == Write { + buffer.WriteString("|WRITE") + } + if op&Rename == Rename { + buffer.WriteString("|RENAME") + } + if op&Chmod == Chmod { + buffer.WriteString("|CHMOD") + } + if buffer.Len() == 0 { + return "" + } + return buffer.String()[1:] // Strip leading pipe +} + +// String returns a string representation of the event in the form +// "file: REMOVE|WRITE|..." +func (e Event) String() string { + return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) +} + +// Common errors that can be reported by a watcher +var ( + ErrEventOverflow = errors.New("fsnotify queue overflow") +) diff --git a/vendor/github.com/fsnotify/fsnotify/go.mod b/vendor/github.com/fsnotify/fsnotify/go.mod new file mode 100644 index 00000000000..ff11e13f224 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/go.mod @@ -0,0 +1,5 @@ +module github.com/fsnotify/fsnotify + +go 1.13 + +require golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 diff --git a/vendor/github.com/fsnotify/fsnotify/go.sum b/vendor/github.com/fsnotify/fsnotify/go.sum new file mode 100644 index 00000000000..f60af9855da --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go new file mode 100644 index 00000000000..d9fd1b88a05 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/inotify.go @@ -0,0 +1,337 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + mu sync.Mutex // Map access + fd int + poller *fdPoller + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) + if fd == -1 { + return nil, errno + } + // Create epoll + poller, err := newFdPoller(fd) + if err != nil { + unix.Close(fd) + return nil, err + } + w := &Watcher{ + fd: fd, + poller: poller, + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed() { + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + + // Wake up goroutine + w.poller.wake() + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + var flags uint32 = agnosticEvents + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops watching the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case. + // the only two possible errors are: + // EBADF, which happens when w.fd is not a valid file descriptor of any kind. + // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. + // Watch descriptors are invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. + return errno + } + + return nil +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + n int // Number of bytes read with read() + errno error // Syscall errno + ok bool // For poller.wait + ) + + defer close(w.doneResp) + defer close(w.Errors) + defer close(w.Events) + defer unix.Close(w.fd) + defer w.poller.close() + + for { + // See if we have been closed. + if w.isClosed() { + return + } + + ok, errno = w.poller.wait() + if errno != nil { + select { + case w.Errors <- errno: + case <-w.done: + return + } + continue + } + + if !ok { + continue + } + + n, errno = unix.Read(w.fd, buf[:]) + // If a signal interrupted execution, see if we've been asked to close, and try again. + // http://man7.org/linux/man-pages/man7/signal.7.html : + // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" + if errno == unix.EINTR { + continue + } + + // unix.Read might have been woken up by Close. If so, we're done. + if w.isClosed() { + return + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + select { + case w.Errors <- err: + case <-w.done: + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + // Point "raw" to the event in the buffer + raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + + mask := uint32(raw.Mask) + nameLen := uint32(raw.Len) + + if mask&unix.IN_Q_OVERFLOW != 0 { + select { + case w.Errors <- ErrEventOverflow: + case <-w.done: + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if !event.ignoreLinux(mask) { + select { + case w.Events <- event: + case <-w.done: + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// Certain types of events can be "ignored" and not sent over the Events +// channel. Such as events marked ignore by the kernel, or MODIFY events +// against files that do not exist. +func (e *Event) ignoreLinux(mask uint32) bool { + // Ignore anything the inotify API says to ignore + if mask&unix.IN_IGNORED == unix.IN_IGNORED { + return true + } + + // If the event is not a DELETE or RENAME, the file must exist. + // Otherwise the event is ignored. + // *Note*: this was put in place because it was seen that a MODIFY + // event was sent after the DELETE. This ignores that MODIFY and + // assumes a DELETE will come or has come if the file doesn't exist. + if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { + _, statErr := os.Lstat(e.Name) + return os.IsNotExist(statErr) + } + return false +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go new file mode 100644 index 00000000000..b33f2b4d4b7 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go @@ -0,0 +1,187 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + + "golang.org/x/sys/unix" +) + +type fdPoller struct { + fd int // File descriptor (as returned by the inotify_init() syscall) + epfd int // Epoll file descriptor + pipe [2]int // Pipe for waking up +} + +func emptyPoller(fd int) *fdPoller { + poller := new(fdPoller) + poller.fd = fd + poller.epfd = -1 + poller.pipe[0] = -1 + poller.pipe[1] = -1 + return poller +} + +// Create a new inotify poller. +// This creates an inotify handler, and an epoll handler. +func newFdPoller(fd int) (*fdPoller, error) { + var errno error + poller := emptyPoller(fd) + defer func() { + if errno != nil { + poller.close() + } + }() + poller.fd = fd + + // Create epoll fd + poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) + if poller.epfd == -1 { + return nil, errno + } + // Create pipe; pipe[0] is the read end, pipe[1] the write end. + errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC) + if errno != nil { + return nil, errno + } + + // Register inotify fd with epoll + event := unix.EpollEvent{ + Fd: int32(poller.fd), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) + if errno != nil { + return nil, errno + } + + // Register pipe fd with epoll + event = unix.EpollEvent{ + Fd: int32(poller.pipe[0]), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) + if errno != nil { + return nil, errno + } + + return poller, nil +} + +// Wait using epoll. +// Returns true if something is ready to be read, +// false if there is not. +func (poller *fdPoller) wait() (bool, error) { + // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. + // I don't know whether epoll_wait returns the number of events returned, + // or the total number of events ready. + // I decided to catch both by making the buffer one larger than the maximum. + events := make([]unix.EpollEvent, 7) + for { + n, errno := unix.EpollWait(poller.epfd, events, -1) + if n == -1 { + if errno == unix.EINTR { + continue + } + return false, errno + } + if n == 0 { + // If there are no events, try again. + continue + } + if n > 6 { + // This should never happen. More events were returned than should be possible. + return false, errors.New("epoll_wait returned more events than I know what to do with") + } + ready := events[:n] + epollhup := false + epollerr := false + epollin := false + for _, event := range ready { + if event.Fd == int32(poller.fd) { + if event.Events&unix.EPOLLHUP != 0 { + // This should not happen, but if it does, treat it as a wakeup. + epollhup = true + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the file descriptor, we should pretend + // something is ready to read, and let unix.Read pick up the error. + epollerr = true + } + if event.Events&unix.EPOLLIN != 0 { + // There is data to read. + epollin = true + } + } + if event.Fd == int32(poller.pipe[0]) { + if event.Events&unix.EPOLLHUP != 0 { + // Write pipe descriptor was closed, by us. This means we're closing down the + // watcher, and we should wake up. + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the pipe file descriptor. + // This is an absolute mystery, and should never ever happen. + return false, errors.New("Error on the pipe descriptor.") + } + if event.Events&unix.EPOLLIN != 0 { + // This is a regular wakeup, so we have to clear the buffer. + err := poller.clearWake() + if err != nil { + return false, err + } + } + } + } + + if epollhup || epollerr || epollin { + return true, nil + } + return false, nil + } +} + +// Close the write end of the poller. +func (poller *fdPoller) wake() error { + buf := make([]byte, 1) + n, errno := unix.Write(poller.pipe[1], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is full, poller will wake. + return nil + } + return errno + } + return nil +} + +func (poller *fdPoller) clearWake() error { + // You have to be woken up a LOT in order to get to 100! + buf := make([]byte, 100) + n, errno := unix.Read(poller.pipe[0], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is empty, someone else cleared our wake. + return nil + } + return errno + } + return nil +} + +// Close all poller file descriptors, but not the one passed to it. +func (poller *fdPoller) close() { + if poller.pipe[1] != -1 { + unix.Close(poller.pipe[1]) + } + if poller.pipe[0] != -1 { + unix.Close(poller.pipe[0]) + } + if poller.epfd != -1 { + unix.Close(poller.epfd) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go new file mode 100644 index 00000000000..86e76a3d676 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go @@ -0,0 +1,521 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + + kq int // File descriptor (as returned by the kqueue() syscall). + + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Map of watched file descriptors (key: path). + externalWatches map[string]bool // Map of watches added by user of the library. + dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. + paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. + fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + kq, err := kqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + watches: make(map[string]int), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]bool), + externalWatches: make(map[string]bool), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + var pathsToRemove = make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() + // unlock before calling Remove, which also locks + + for _, name := range pathsToRemove { + w.Remove(name) + } + + // send a "quit" message to the reader goroutine + close(w.done) + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.externalWatches[name] = true + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) + } + + const registerRemove = unix.EV_DELETE + if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.paths, watchfd) + delete(w.dirFlags, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for _, path := range w.paths { + wdir, _ := filepath.Split(path.name) + if filepath.Clean(wdir) == name { + if !w.externalWatches[path.name] { + pathsToRemove = append(pathsToRemove, path.name) + } + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// keventWaitTime to block on each read from kevent +var keventWaitTime = durationToTimespec(100 * time.Millisecond) + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets. + if fi.Mode()&os.ModeSocket == os.ModeSocket { + return "", nil + } + + // Don't watch named pipes. + if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { + return "", nil + } + + // Follow Symlinks + // Unfortunately, Linux can add bogus symlinks to watch list without + // issue, and Windows can't do symlinks period (AFAIK). To maintain + // consistency, we will act like everything is fine. There will simply + // be no file events for broken symlinks. + // Hence the returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + watchfd, err = unix.Open(name, openMode, 0700) + if watchfd == -1 { + return "", err + } + + isDir = fi.IsDir() + } + + const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE + if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + w.watches[name] = watchfd + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + eventBuffer := make([]unix.Kevent_t, 10) + +loop: + for { + // See if there is a message on the "done" channel + select { + case <-w.done: + break loop + default: + } + + // Get new events + kevents, err := read(w.kq, eventBuffer, &keventWaitTime) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + select { + case w.Errors <- err: + case <-w.done: + break loop + } + continue + } + + // Flush the events we received to the Events channel + for len(kevents) > 0 { + kevent := &kevents[0] + watchfd := int(kevent.Ident) + mask := uint32(kevent.Fflags) + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + event := newEvent(path.name, mask) + + if path.isDir && !(event.Op&Remove == Remove) { + // Double check to make sure the directory exists. This can happen when + // we do a rm -fr on a recursively watched folders and we receive a + // modification event first but the folder has been deleted and later + // receive the delete event + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + // mark is as delete event + event.Op |= Remove + } + } + + if event.Op&Rename == Rename || event.Op&Remove == Remove { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + // Send the event on the Events channel. + select { + case w.Events <- event: + case <-w.done: + break loop + } + } + + if event.Op&Remove == Remove { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + + // Move to next event + kevents = kevents[1:] + } + } + + // cleanup + err := unix.Close(w.kq) + if err != nil { + // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. + select { + case w.Errors <- err: + default: + } + } + close(w.Events) + close(w.Errors) +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +func newCreateEvent(name string) Event { + return Event{Name: name, Op: Create} +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + } + + return nil +} + +// sendDirectoryEvents searches the directory for newly created files +// and sends them over the event channel. This functionality is to have +// the BSD version of fsnotify match Linux inotify which provides a +// create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + select { + case w.Errors <- err: + case <-w.done: + return + } + } + + // Search for new files + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + err := w.sendFileCreatedEventIfNew(filePath, fileInfo) + + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + // Send create event + select { + case w.Events <- newCreateEvent(filePath): + case <-w.done: + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// kqueue creates a new kernel event queue and returns a descriptor. +func kqueue() (kq int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, err + } + return kq, nil +} + +// register events with the queue +func register(kq int, fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + + for i, fd := range fds { + // SetKevent converts int to the platform-specific types: + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // register the events + success, err := unix.Kevent(kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +// A timeout of nil blocks indefinitely, while 0 polls the queue. +func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(kq, nil, events, timeout) + if err != nil { + return nil, err + } + return events[0:n], nil +} + +// durationToTimespec prepares a timeout value +func durationToTimespec(d time.Duration) unix.Timespec { + return unix.NsecToTimespec(d.Nanoseconds()) +} diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go new file mode 100644 index 00000000000..2306c4620bf --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly + +package fsnotify + +import "golang.org/x/sys/unix" + +const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go new file mode 100644 index 00000000000..870c4d6d184 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package fsnotify + +import "golang.org/x/sys/unix" + +// note: this constant is not defined on BSD +const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go new file mode 100644 index 00000000000..09436f31d82 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/windows.go @@ -0,0 +1,561 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "sync" + "syscall" + "unsafe" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + isClosed bool // Set to true when Close() is first called + mu sync.Mutex // Map access + port syscall.Handle // Handle to completion port + watches watchMap // Map of watches (key: i-number) + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) + if e != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", e) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed { + return nil + } + w.isClosed = true + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + if w.isClosed { + return errors.New("watcher already closed") + } + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +const ( + // Options for AddWatch + sysFSONESHOT = 0x80000000 + sysFSONLYDIR = 0x1000000 + + // Events + sysFSACCESS = 0x1 + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCLOSE = 0x18 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + + // Special events + sysFSIGNORED = 0x8000 + sysFSQOVERFLOW = 0x4000 +) + +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle syscall.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov syscall.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [4096]byte +} + +type indexMap map[uint64]*watch +type watchMap map[uint32]indexMap + +func (w *Watcher) wakeupReader() error { + e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if e != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", e) + } + return nil +} + +func getDir(pathname string) (dir string, err error) { + attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) + if e != nil { + return "", os.NewSyscallError("GetFileAttributes", e) + } + if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func getIno(path string) (ino *inode, err error) { + h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), + syscall.FILE_LIST_DIRECTORY, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) + if e != nil { + return nil, os.NewSyscallError("CreateFile", e) + } + var fi syscall.ByHandleFileInformation + if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { + syscall.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", e) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + if flags&sysFSONLYDIR != 0 && pathname != dir { + return nil + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { + syscall.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", e) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + syscall.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + if err = w.startRead(watchEntry); err != nil { + return err + } + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + if watch == nil { + return fmt.Errorf("can't remove non-existent watch for: %s", pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + if e := syscall.CancelIo(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CancelIo", e) + w.deleteWatch(watch) + } + mask := toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= toWindowsFlags(m) + } + if mask == 0 { + if e := syscall.CloseHandle(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CloseHandle", e) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if e != nil { + err := os.NewSyscallError("ReadDirectoryChanges", e) + if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n, key uint32 + ov *syscall.Overlapped + ) + runtime.LockOSThread() + + for { + e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) + watch := (*watch)(unsafe.Pointer(ov)) + + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + var err error + if e := syscall.CloseHandle(w.port); e != nil { + err = os.NewSyscallError("CloseHandle", e) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch e { + case syscall.ERROR_MORE_DATA: + if watch == nil { + w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case syscall.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case syscall.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.Events <- newEvent("", sysFSQOVERFLOW) + w.Errors <- errors.New("short read in readEvents()") + break + } + + // Point "raw" to the event in the buffer + raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) + name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case syscall.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case syscall.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + if w.sendEvent(fullname, watch.names[name]&mask) { + if watch.names[name]&sysFSONESHOT != 0 { + delete(watch.names, name) + } + } + } + if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == syscall.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") + break + } + } + + if err := w.startRead(watch); err != nil { + w.Errors <- err + } + } +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + event := newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +func toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSACCESS != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS + } + if mask&sysFSMODIFY != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func toFSnotifyFlags(action uint32) uint64 { + switch action { + case syscall.FILE_ACTION_ADDED: + return sysFSCREATE + case syscall.FILE_ACTION_REMOVED: + return sysFSDELETE + case syscall.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go deleted file mode 100644 index 8d82abe2133..00000000000 --- a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go +++ /dev/null @@ -1,78 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/struct/struct.proto - -package structpb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - structpb "google.golang.org/protobuf/types/known/structpb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/struct.proto. - -type NullValue = structpb.NullValue - -const NullValue_NULL_VALUE = structpb.NullValue_NULL_VALUE - -var NullValue_name = structpb.NullValue_name -var NullValue_value = structpb.NullValue_value - -type Struct = structpb.Struct -type Value = structpb.Value -type Value_NullValue = structpb.Value_NullValue -type Value_NumberValue = structpb.Value_NumberValue -type Value_StringValue = structpb.Value_StringValue -type Value_BoolValue = structpb.Value_BoolValue -type Value_StructValue = structpb.Value_StructValue -type Value_ListValue = structpb.Value_ListValue -type ListValue = structpb.ListValue - -var File_github_com_golang_protobuf_ptypes_struct_struct_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_struct_struct_proto_rawDesc = []byte{ - 0x0a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, - 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, - 0x74, 0x3b, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_struct_struct_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_struct_struct_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_struct_struct_proto_init() } -func file_github_com_golang_protobuf_ptypes_struct_struct_proto_init() { - if File_github_com_golang_protobuf_ptypes_struct_struct_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_struct_struct_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_struct_struct_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_struct_struct_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_struct_struct_proto = out.File - file_github_com_golang_protobuf_ptypes_struct_struct_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_struct_struct_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_struct_struct_proto_depIdxs = nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go deleted file mode 100644 index cc40f27ad30..00000000000 --- a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go +++ /dev/null @@ -1,71 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/wrappers/wrappers.proto - -package wrappers - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/wrappers.proto. - -type DoubleValue = wrapperspb.DoubleValue -type FloatValue = wrapperspb.FloatValue -type Int64Value = wrapperspb.Int64Value -type UInt64Value = wrapperspb.UInt64Value -type Int32Value = wrapperspb.Int32Value -type UInt32Value = wrapperspb.UInt32Value -type BoolValue = wrapperspb.BoolValue -type StringValue = wrapperspb.StringValue -type BytesValue = wrapperspb.BytesValue - -var File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = []byte{ - 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2f, 0x77, 0x72, 0x61, - 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, - 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x3b, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, - 0x72, 0x73, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() } -func file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() { - if File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto = out.File - file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = nil -} diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go index c9a63ceda5e..6656186846e 100644 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -6,6 +6,10 @@ // // This package is intended to be a more powerful and safer alternative to // reflect.DeepEqual for comparing whether two values are semantically equal. +// It is intended to only be used in tests, as performance is not a goal and +// it may panic if it cannot compare the values. Its propensity towards +// panicking means that its unsuitable for production environments where a +// spurious panic may be fatal. // // The primary features of cmp are: // @@ -86,6 +90,52 @@ import ( // If there is a cycle, then the pointed at values are considered equal // only if both addresses were previously visited in the same path step. func Equal(x, y interface{}, opts ...Option) bool { + s := newState(opts) + s.compareAny(rootStep(x, y)) + return s.result.Equal() +} + +// Diff returns a human-readable report of the differences between two values: +// y - x. It returns an empty string if and only if Equal returns true for the +// same input values and options. +// +// The output is displayed as a literal in pseudo-Go syntax. +// At the start of each line, a "-" prefix indicates an element removed from y, +// a "+" prefix to indicates an element added to y, and the lack of a prefix +// indicates an element common to both x and y. If possible, the output +// uses fmt.Stringer.String or error.Error methods to produce more humanly +// readable outputs. In such cases, the string is prefixed with either an +// 's' or 'e' character, respectively, to indicate that the method was called. +// +// Do not depend on this output being stable. If you need the ability to +// programmatically interpret the difference, consider using a custom Reporter. +func Diff(x, y interface{}, opts ...Option) string { + s := newState(opts) + + // Optimization: If there are no other reporters, we can optimize for the + // common case where the result is equal (and thus no reported difference). + // This avoids the expensive construction of a difference tree. + if len(s.reporters) == 0 { + s.compareAny(rootStep(x, y)) + if s.result.Equal() { + return "" + } + s.result = diff.Result{} // Reset results + } + + r := new(defaultReporter) + s.reporters = append(s.reporters, reporter{r}) + s.compareAny(rootStep(x, y)) + d := r.String() + if (d == "") != s.result.Equal() { + panic("inconsistent difference and equality results") + } + return d +} + +// rootStep constructs the first path step. If x and y have differing types, +// then they are stored within an empty interface type. +func rootStep(x, y interface{}) PathStep { vx := reflect.ValueOf(x) vy := reflect.ValueOf(y) @@ -108,33 +158,7 @@ func Equal(x, y interface{}, opts ...Option) bool { t = vx.Type() } - s := newState(opts) - s.compareAny(&pathStep{t, vx, vy}) - return s.result.Equal() -} - -// Diff returns a human-readable report of the differences between two values. -// It returns an empty string if and only if Equal returns true for the same -// input values and options. -// -// The output is displayed as a literal in pseudo-Go syntax. -// At the start of each line, a "-" prefix indicates an element removed from x, -// a "+" prefix to indicates an element added to y, and the lack of a prefix -// indicates an element common to both x and y. If possible, the output -// uses fmt.Stringer.String or error.Error methods to produce more humanly -// readable outputs. In such cases, the string is prefixed with either an -// 's' or 'e' character, respectively, to indicate that the method was called. -// -// Do not depend on this output being stable. If you need the ability to -// programmatically interpret the difference, consider using a custom Reporter. -func Diff(x, y interface{}, opts ...Option) string { - r := new(defaultReporter) - eq := Equal(x, y, Options(opts), Reporter(r)) - d := r.String() - if (d == "") != eq { - panic("inconsistent difference and equality results") - } - return d + return &pathStep{t, vx, vy} } type state struct { @@ -352,7 +376,7 @@ func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { // assuming that T is assignable to R. // Otherwise, it returns the input value as is. func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { - // TODO(dsnet): Workaround for reflect bug (https://golang.org/issue/22143). + // TODO(≥go1.10): Workaround for reflect bug (https://golang.org/issue/22143). if !flags.AtLeastGo110 { if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { return reflect.New(t).Elem() @@ -362,6 +386,7 @@ func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { } func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { + var addr bool var vax, vay reflect.Value // Addressable versions of vx and vy var mayForce, mayForceInit bool @@ -383,6 +408,7 @@ func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { // For retrieveUnexportedField to work, the parent struct must // be addressable. Create a new copy of the values if // necessary to make them addressable. + addr = vx.CanAddr() || vy.CanAddr() vax = makeAddressable(vx) vay = makeAddressable(vy) } @@ -393,6 +419,7 @@ func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { mayForceInit = true } step.mayForce = mayForce + step.paddr = addr step.pvx = vax step.pvy = vay step.field = t.Field(i) diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go index dd032354fef..dfa5d213769 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_panic.go +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -10,6 +10,6 @@ import "reflect" const supportExporters = false -func retrieveUnexportedField(reflect.Value, reflect.StructField) reflect.Value { +func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value { panic("no support for forcibly accessing unexported fields") } diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go index 57020e26ca7..351f1a34b46 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -17,9 +17,19 @@ const supportExporters = true // a struct such that the value has read-write permissions. // // The parent struct, v, must be addressable, while f must be a StructField -// describing the field to retrieve. -func retrieveUnexportedField(v reflect.Value, f reflect.StructField) reflect.Value { - // See https://github.com/google/go-cmp/issues/167 for discussion of the - // following expression. - return reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem() +// describing the field to retrieve. If addr is false, +// then the returned value will be shallowed copied to be non-addressable. +func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value { + ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem() + if !addr { + // A field is addressable if and only if the struct is addressable. + // If the original parent value was not addressable, shallow copy the + // value to make it non-addressable to avoid leaking an implementation + // detail of how forcibly exporting a field works. + if ve.Kind() == reflect.Interface && ve.IsNil() { + return reflect.Zero(f.Type) + } + return reflect.ValueOf(ve.Interface()).Convert(f.Type) + } + return ve } diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go index 3d2e42662ca..730e223ee7b 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -12,6 +12,13 @@ // is more important than obtaining a minimal Levenshtein distance. package diff +import ( + "math/rand" + "time" + + "github.com/google/go-cmp/cmp/internal/flags" +) + // EditType represents a single operation within an edit-script. type EditType uint8 @@ -112,6 +119,8 @@ func (r Result) Similar() bool { return r.NumSame+1 >= r.NumDiff } +var randInt = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) + // Difference reports whether two lists of lengths nx and ny are equal // given the definition of equality provided as f. // @@ -159,6 +168,17 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // A vertical edge is equivalent to inserting a symbol from list Y. // A diagonal edge is equivalent to a matching symbol between both X and Y. + // To ensure flexibility in changing the algorithm in the future, + // introduce some degree of deliberate instability. + // This is achieved by fiddling the zigzag iterator to start searching + // the graph starting from the bottom-right versus than the top-left. + // The result may differ depending on the starting search location, + // but still produces a valid edit script. + zigzagInit := randInt // either 0 or 1 + if flags.Deterministic { + zigzagInit = 0 + } + // Invariants: // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny @@ -209,7 +229,7 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { break } - for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + for stop1, stop2, i := false, false, zigzagInit; !(stop1 && stop2) && searchBudget > 0; i++ { // Search in a diagonal pattern for a match. z := zigzag(i) p := point{fwdFrontier.X + z, fwdFrontier.Y - z} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go new file mode 100644 index 00000000000..8228e7d512a --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go @@ -0,0 +1,157 @@ +// Copyright 2020, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import ( + "reflect" + "strconv" +) + +// TypeString is nearly identical to reflect.Type.String, +// but has an additional option to specify that full type names be used. +func TypeString(t reflect.Type, qualified bool) string { + return string(appendTypeName(nil, t, qualified, false)) +} + +func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte { + // BUG: Go reflection provides no way to disambiguate two named types + // of the same name and within the same package, + // but declared within the namespace of different functions. + + // Named type. + if t.Name() != "" { + if qualified && t.PkgPath() != "" { + b = append(b, '"') + b = append(b, t.PkgPath()...) + b = append(b, '"') + b = append(b, '.') + b = append(b, t.Name()...) + } else { + b = append(b, t.String()...) + } + return b + } + + // Unnamed type. + switch k := t.Kind(); k { + case reflect.Bool, reflect.String, reflect.UnsafePointer, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + b = append(b, k.String()...) + case reflect.Chan: + if t.ChanDir() == reflect.RecvDir { + b = append(b, "<-"...) + } + b = append(b, "chan"...) + if t.ChanDir() == reflect.SendDir { + b = append(b, "<-"...) + } + b = append(b, ' ') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Func: + if !elideFunc { + b = append(b, "func"...) + } + b = append(b, '(') + for i := 0; i < t.NumIn(); i++ { + if i > 0 { + b = append(b, ", "...) + } + if i == t.NumIn()-1 && t.IsVariadic() { + b = append(b, "..."...) + b = appendTypeName(b, t.In(i).Elem(), qualified, false) + } else { + b = appendTypeName(b, t.In(i), qualified, false) + } + } + b = append(b, ')') + switch t.NumOut() { + case 0: + // Do nothing + case 1: + b = append(b, ' ') + b = appendTypeName(b, t.Out(0), qualified, false) + default: + b = append(b, " ("...) + for i := 0; i < t.NumOut(); i++ { + if i > 0 { + b = append(b, ", "...) + } + b = appendTypeName(b, t.Out(i), qualified, false) + } + b = append(b, ')') + } + case reflect.Struct: + b = append(b, "struct{ "...) + for i := 0; i < t.NumField(); i++ { + if i > 0 { + b = append(b, "; "...) + } + sf := t.Field(i) + if !sf.Anonymous { + if qualified && sf.PkgPath != "" { + b = append(b, '"') + b = append(b, sf.PkgPath...) + b = append(b, '"') + b = append(b, '.') + } + b = append(b, sf.Name...) + b = append(b, ' ') + } + b = appendTypeName(b, sf.Type, qualified, false) + if sf.Tag != "" { + b = append(b, ' ') + b = strconv.AppendQuote(b, string(sf.Tag)) + } + } + if b[len(b)-1] == ' ' { + b = b[:len(b)-1] + } else { + b = append(b, ' ') + } + b = append(b, '}') + case reflect.Slice, reflect.Array: + b = append(b, '[') + if k == reflect.Array { + b = strconv.AppendUint(b, uint64(t.Len()), 10) + } + b = append(b, ']') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Map: + b = append(b, "map["...) + b = appendTypeName(b, t.Key(), qualified, false) + b = append(b, ']') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Ptr: + b = append(b, '*') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Interface: + b = append(b, "interface{ "...) + for i := 0; i < t.NumMethod(); i++ { + if i > 0 { + b = append(b, "; "...) + } + m := t.Method(i) + if qualified && m.PkgPath != "" { + b = append(b, '"') + b = append(b, m.PkgPath...) + b = append(b, '"') + b = append(b, '.') + } + b = append(b, m.Name...) + b = appendTypeName(b, m.Type, qualified, true) + } + if b[len(b)-1] == ' ' { + b = b[:len(b)-1] + } else { + b = append(b, ' ') + } + b = append(b, '}') + default: + panic("invalid kind: " + k.String()) + } + return b +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go index 0a01c4796f1..e9e384a1c89 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -21,3 +21,13 @@ func PointerOf(v reflect.Value) Pointer { // assumes that the GC implementation does not use a moving collector. return Pointer{v.Pointer(), v.Type()} } + +// IsNil reports whether the pointer is nil. +func (p Pointer) IsNil() bool { + return p.p == 0 +} + +// Uintptr returns the pointer as a uintptr. +func (p Pointer) Uintptr() uintptr { + return p.p +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go index da134ae2a80..b50c17ec725 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -24,3 +24,13 @@ func PointerOf(v reflect.Value) Pointer { // which is necessary if the GC ever uses a moving collector. return Pointer{unsafe.Pointer(v.Pointer()), v.Type()} } + +// IsNil reports whether the pointer is nil. +func (p Pointer) IsNil() bool { + return p.p == nil +} + +// Uintptr returns the pointer as a uintptr. +func (p Pointer) Uintptr() uintptr { + return uintptr(p.p) +} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go index abbd2a63b69..4b0407a7f88 100644 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -225,11 +225,14 @@ func (validator) apply(s *state, vx, vy reflect.Value) { // Unable to Interface implies unexported field without visibility access. if !vx.CanInterface() || !vy.CanInterface() { - const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported" + help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported" var name string if t := s.curPath.Index(-2).Type(); t.Name() != "" { // Named type with unexported fields. name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType + if _, ok := reflect.New(t).Interface().(error); ok { + help = "consider using cmpopts.EquateErrors to compare error values" + } } else { // Unnamed type with unexported fields. Derive PkgPath from field. var pkgPath string diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index 509d6b8527d..603dbb0026e 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -177,7 +177,8 @@ type structField struct { // pvx, pvy, and field are only valid if unexported is true. unexported bool mayForce bool // Forcibly allow visibility - pvx, pvy reflect.Value // Parent values + paddr bool // Was parent addressable? + pvx, pvy reflect.Value // Parent values (always addressible) field reflect.StructField // Field information } @@ -189,8 +190,8 @@ func (sf StructField) Values() (vx, vy reflect.Value) { // Forcibly obtain read-write access to an unexported struct field. if sf.mayForce { - vx = retrieveUnexportedField(sf.pvx, sf.field) - vy = retrieveUnexportedField(sf.pvy, sf.field) + vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr) + vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr) return vx, vy // CanInterface reports true } return sf.vx, sf.vy // CanInterface reports false diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go index 6ddf29993e5..aafcb363545 100644 --- a/vendor/github.com/google/go-cmp/cmp/report.go +++ b/vendor/github.com/google/go-cmp/cmp/report.go @@ -41,7 +41,10 @@ func (r *defaultReporter) String() string { if r.root.NumDiff == 0 { return "" } - return formatOptions{}.FormatDiff(r.root).String() + ptrs := new(pointerReferences) + text := formatOptions{}.FormatDiff(r.root, ptrs) + resolveReferences(text) + return text.String() } func assert(ok bool) { diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go index 17a05eede48..9e2180964f1 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_compare.go +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -11,14 +11,6 @@ import ( "github.com/google/go-cmp/cmp/internal/value" ) -// TODO: Enforce limits? -// * Enforce maximum number of records to print per node? -// * Enforce maximum size in bytes allowed? -// * As a heuristic, use less verbosity for equal nodes than unequal nodes. -// TODO: Enforce unique outputs? -// * Avoid Stringer methods if it results in same output? -// * Print pointer address if outputs still equal? - // numContextRecords is the number of surrounding equal records to print. const numContextRecords = 2 @@ -71,24 +63,66 @@ func (opts formatOptions) WithTypeMode(t typeMode) formatOptions { opts.TypeMode = t return opts } +func (opts formatOptions) WithVerbosity(level int) formatOptions { + opts.VerbosityLevel = level + opts.LimitVerbosity = true + return opts +} +func (opts formatOptions) verbosity() uint { + switch { + case opts.VerbosityLevel < 0: + return 0 + case opts.VerbosityLevel > 16: + return 16 // some reasonable maximum to avoid shift overflow + default: + return uint(opts.VerbosityLevel) + } +} + +const maxVerbosityPreset = 3 + +// verbosityPreset modifies the verbosity settings given an index +// between 0 and maxVerbosityPreset, inclusive. +func verbosityPreset(opts formatOptions, i int) formatOptions { + opts.VerbosityLevel = int(opts.verbosity()) + 2*i + if i > 0 { + opts.AvoidStringer = true + } + if i >= maxVerbosityPreset { + opts.PrintAddresses = true + opts.QualifiedNames = true + } + return opts +} // FormatDiff converts a valueNode tree into a textNode tree, where the later // is a textual representation of the differences detected in the former. -func (opts formatOptions) FormatDiff(v *valueNode) textNode { +func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) { + if opts.DiffMode == diffIdentical { + opts = opts.WithVerbosity(1) + } else { + opts = opts.WithVerbosity(3) + } + // Check whether we have specialized formatting for this node. // This is not necessary, but helpful for producing more readable outputs. if opts.CanFormatDiffSlice(v) { return opts.FormatDiffSlice(v) } + var parentKind reflect.Kind + if v.parent != nil && v.parent.TransformerName == "" { + parentKind = v.parent.Type.Kind() + } + // For leaf nodes, format the value based on the reflect.Values alone. if v.MaxDepth == 0 { switch opts.DiffMode { case diffUnknown, diffIdentical: // Format Equal. if v.NumDiff == 0 { - outx := opts.FormatValue(v.ValueX, visitedPointers{}) - outy := opts.FormatValue(v.ValueY, visitedPointers{}) + outx := opts.FormatValue(v.ValueX, parentKind, ptrs) + outy := opts.FormatValue(v.ValueY, parentKind, ptrs) if v.NumIgnored > 0 && v.NumSame == 0 { return textEllipsis } else if outx.Len() < outy.Len() { @@ -101,8 +135,13 @@ func (opts formatOptions) FormatDiff(v *valueNode) textNode { // Format unequal. assert(opts.DiffMode == diffUnknown) var list textList - outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, visitedPointers{}) - outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, visitedPointers{}) + outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs) + outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs) + for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { + opts2 := verbosityPreset(opts, i).WithTypeMode(elideType) + outx = opts2.FormatValue(v.ValueX, parentKind, ptrs) + outy = opts2.FormatValue(v.ValueY, parentKind, ptrs) + } if outx != nil { list = append(list, textRecord{Diff: '-', Value: outx}) } @@ -111,34 +150,57 @@ func (opts formatOptions) FormatDiff(v *valueNode) textNode { } return opts.WithTypeMode(emitType).FormatType(v.Type, list) case diffRemoved: - return opts.FormatValue(v.ValueX, visitedPointers{}) + return opts.FormatValue(v.ValueX, parentKind, ptrs) case diffInserted: - return opts.FormatValue(v.ValueY, visitedPointers{}) + return opts.FormatValue(v.ValueY, parentKind, ptrs) default: panic("invalid diff mode") } } + // Register slice element to support cycle detection. + if parentKind == reflect.Slice { + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true) + defer ptrs.Pop() + defer func() { out = wrapTrunkReferences(ptrRefs, out) }() + } + // Descend into the child value node. if v.TransformerName != "" { - out := opts.WithTypeMode(emitType).FormatDiff(v.Value) - out = textWrap{"Inverse(" + v.TransformerName + ", ", out, ")"} + out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) + out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"} return opts.FormatType(v.Type, out) } else { switch k := v.Type.Kind(); k { - case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map: - return opts.FormatType(v.Type, opts.formatDiffList(v.Records, k)) + case reflect.Struct, reflect.Array, reflect.Slice: + out = opts.formatDiffList(v.Records, k, ptrs) + out = opts.FormatType(v.Type, out) + case reflect.Map: + // Register map to support cycle detection. + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) + defer ptrs.Pop() + + out = opts.formatDiffList(v.Records, k, ptrs) + out = wrapTrunkReferences(ptrRefs, out) + out = opts.FormatType(v.Type, out) case reflect.Ptr: - return textWrap{"&", opts.FormatDiff(v.Value), ""} + // Register pointer to support cycle detection. + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) + defer ptrs.Pop() + + out = opts.FormatDiff(v.Value, ptrs) + out = wrapTrunkReferences(ptrRefs, out) + out = &textWrap{Prefix: "&", Value: out} case reflect.Interface: - return opts.WithTypeMode(emitType).FormatDiff(v.Value) + out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) default: panic(fmt.Sprintf("%v cannot have children", k)) } + return out } } -func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) textNode { +func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode { // Derive record name based on the data structure kind. var name string var formatKey func(reflect.Value) string @@ -154,7 +216,17 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te case reflect.Map: name = "entry" opts = opts.WithTypeMode(elideType) - formatKey = formatMapKey + formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) } + } + + maxLen := -1 + if opts.LimitVerbosity { + if opts.DiffMode == diffIdentical { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + } else { + maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc... + } + opts.VerbosityLevel-- } // Handle unification. @@ -163,6 +235,11 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te var list textList var deferredEllipsis bool // Add final "..." to indicate records were dropped for _, r := range recs { + if len(list) == maxLen { + deferredEllipsis = true + break + } + // Elide struct fields that are zero value. if k == reflect.Struct { var isZero bool @@ -186,23 +263,31 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te } continue } - if out := opts.FormatDiff(r.Value); out != nil { + if out := opts.FormatDiff(r.Value, ptrs); out != nil { list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) } } if deferredEllipsis { list.AppendEllipsis(diffStats{}) } - return textWrap{"{", list, "}"} + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} case diffUnknown: default: panic("invalid diff mode") } // Handle differencing. + var numDiffs int var list textList + var keys []reflect.Value // invariant: len(list) == len(keys) groups := coalesceAdjacentRecords(name, recs) + maxGroup := diffStats{Name: name} for i, ds := range groups { + if maxLen >= 0 && numDiffs >= maxLen { + maxGroup = maxGroup.Append(ds) + continue + } + // Handle equal records. if ds.NumDiff() == 0 { // Compute the number of leading and trailing records to print. @@ -226,16 +311,21 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te // Format the equal values. for _, r := range recs[:numLo] { - out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) } if numEqual > numLo+numHi { ds.NumIdentical -= numLo + numHi list.AppendEllipsis(ds) + for len(keys) < len(list) { + keys = append(keys, reflect.Value{}) + } } for _, r := range recs[numEqual-numHi : numEqual] { - out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) } recs = recs[numEqual:] continue @@ -247,24 +337,70 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te case opts.CanFormatDiffSlice(r.Value): out := opts.FormatDiffSlice(r.Value) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) case r.Value.NumChildren == r.Value.MaxDepth: - outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value) - outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value) + outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) + outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) + for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { + opts2 := verbosityPreset(opts, i) + outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) + outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) + } if outx != nil { list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx}) + keys = append(keys, r.Key) } if outy != nil { list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy}) + keys = append(keys, r.Key) } default: - out := opts.FormatDiff(r.Value) + out := opts.FormatDiff(r.Value, ptrs) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) } } recs = recs[ds.NumDiff():] + numDiffs += ds.NumDiff() + } + if maxGroup.IsZero() { + assert(len(recs) == 0) + } else { + list.AppendEllipsis(maxGroup) + for len(keys) < len(list) { + keys = append(keys, reflect.Value{}) + } } - assert(len(recs) == 0) - return textWrap{"{", list, "}"} + assert(len(list) == len(keys)) + + // For maps, the default formatting logic uses fmt.Stringer which may + // produce ambiguous output. Avoid calling String to disambiguate. + if k == reflect.Map { + var ambiguous bool + seenKeys := map[string]reflect.Value{} + for i, currKey := range keys { + if currKey.IsValid() { + strKey := list[i].Key + prevKey, seen := seenKeys[strKey] + if seen && prevKey.CanInterface() && currKey.CanInterface() { + ambiguous = prevKey.Interface() != currKey.Interface() + if ambiguous { + break + } + } + seenKeys[strKey] = currKey + } + } + if ambiguous { + for i, k := range keys { + if k.IsValid() { + list[i].Key = formatMapKey(k, true, ptrs) + } + } + } + } + + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} } // coalesceAdjacentRecords coalesces the list of records into groups of diff --git a/vendor/github.com/google/go-cmp/cmp/report_references.go b/vendor/github.com/google/go-cmp/cmp/report_references.go new file mode 100644 index 00000000000..d620c2c20e7 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_references.go @@ -0,0 +1,264 @@ +// Copyright 2020, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/value" +) + +const ( + pointerDelimPrefix = "⟪" + pointerDelimSuffix = "⟫" +) + +// formatPointer prints the address of the pointer. +func formatPointer(p value.Pointer, withDelims bool) string { + v := p.Uintptr() + if flags.Deterministic { + v = 0xdeadf00f // Only used for stable testing purposes + } + if withDelims { + return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix + } + return formatHex(uint64(v)) +} + +// pointerReferences is a stack of pointers visited so far. +type pointerReferences [][2]value.Pointer + +func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) { + if deref && vx.IsValid() { + vx = vx.Addr() + } + if deref && vy.IsValid() { + vy = vy.Addr() + } + switch d { + case diffUnknown, diffIdentical: + pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)} + case diffRemoved: + pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}} + case diffInserted: + pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)} + } + *ps = append(*ps, pp) + return pp +} + +func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) { + p = value.PointerOf(v) + for _, pp := range *ps { + if p == pp[0] || p == pp[1] { + return p, true + } + } + *ps = append(*ps, [2]value.Pointer{p, p}) + return p, false +} + +func (ps *pointerReferences) Pop() { + *ps = (*ps)[:len(*ps)-1] +} + +// trunkReferences is metadata for a textNode indicating that the sub-tree +// represents the value for either pointer in a pair of references. +type trunkReferences struct{ pp [2]value.Pointer } + +// trunkReference is metadata for a textNode indicating that the sub-tree +// represents the value for the given pointer reference. +type trunkReference struct{ p value.Pointer } + +// leafReference is metadata for a textNode indicating that the value is +// truncated as it refers to another part of the tree (i.e., a trunk). +type leafReference struct{ p value.Pointer } + +func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode { + switch { + case pp[0].IsNil(): + return &textWrap{Value: s, Metadata: trunkReference{pp[1]}} + case pp[1].IsNil(): + return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} + case pp[0] == pp[1]: + return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} + default: + return &textWrap{Value: s, Metadata: trunkReferences{pp}} + } +} +func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode { + var prefix string + if printAddress { + prefix = formatPointer(p, true) + } + return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}} +} +func makeLeafReference(p value.Pointer, printAddress bool) textNode { + out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"} + var prefix string + if printAddress { + prefix = formatPointer(p, true) + } + return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}} +} + +// resolveReferences walks the textNode tree searching for any leaf reference +// metadata and resolves each against the corresponding trunk references. +// Since pointer addresses in memory are not particularly readable to the user, +// it replaces each pointer value with an arbitrary and unique reference ID. +func resolveReferences(s textNode) { + var walkNodes func(textNode, func(textNode)) + walkNodes = func(s textNode, f func(textNode)) { + f(s) + switch s := s.(type) { + case *textWrap: + walkNodes(s.Value, f) + case textList: + for _, r := range s { + walkNodes(r.Value, f) + } + } + } + + // Collect all trunks and leaves with reference metadata. + var trunks, leaves []*textWrap + walkNodes(s, func(s textNode) { + if s, ok := s.(*textWrap); ok { + switch s.Metadata.(type) { + case leafReference: + leaves = append(leaves, s) + case trunkReference, trunkReferences: + trunks = append(trunks, s) + } + } + }) + + // No leaf references to resolve. + if len(leaves) == 0 { + return + } + + // Collect the set of all leaf references to resolve. + leafPtrs := make(map[value.Pointer]bool) + for _, leaf := range leaves { + leafPtrs[leaf.Metadata.(leafReference).p] = true + } + + // Collect the set of trunk pointers that are always paired together. + // This allows us to assign a single ID to both pointers for brevity. + // If a pointer in a pair ever occurs by itself or as a different pair, + // then the pair is broken. + pairedTrunkPtrs := make(map[value.Pointer]value.Pointer) + unpair := func(p value.Pointer) { + if !pairedTrunkPtrs[p].IsNil() { + pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half + } + pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half + } + for _, trunk := range trunks { + switch p := trunk.Metadata.(type) { + case trunkReference: + unpair(p.p) // standalone pointer cannot be part of a pair + case trunkReferences: + p0, ok0 := pairedTrunkPtrs[p.pp[0]] + p1, ok1 := pairedTrunkPtrs[p.pp[1]] + switch { + case !ok0 && !ok1: + // Register the newly seen pair. + pairedTrunkPtrs[p.pp[0]] = p.pp[1] + pairedTrunkPtrs[p.pp[1]] = p.pp[0] + case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]: + // Exact pair already seen; do nothing. + default: + // Pair conflicts with some other pair; break all pairs. + unpair(p.pp[0]) + unpair(p.pp[1]) + } + } + } + + // Correlate each pointer referenced by leaves to a unique identifier, + // and print the IDs for each trunk that matches those pointers. + var nextID uint + ptrIDs := make(map[value.Pointer]uint) + newID := func() uint { + id := nextID + nextID++ + return id + } + for _, trunk := range trunks { + switch p := trunk.Metadata.(type) { + case trunkReference: + if print := leafPtrs[p.p]; print { + id, ok := ptrIDs[p.p] + if !ok { + id = newID() + ptrIDs[p.p] = id + } + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) + } + case trunkReferences: + print0 := leafPtrs[p.pp[0]] + print1 := leafPtrs[p.pp[1]] + if print0 || print1 { + id0, ok0 := ptrIDs[p.pp[0]] + id1, ok1 := ptrIDs[p.pp[1]] + isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0] + if isPair { + var id uint + assert(ok0 == ok1) // must be seen together or not at all + if ok0 { + assert(id0 == id1) // must have the same ID + id = id0 + } else { + id = newID() + ptrIDs[p.pp[0]] = id + ptrIDs[p.pp[1]] = id + } + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) + } else { + if print0 && !ok0 { + id0 = newID() + ptrIDs[p.pp[0]] = id0 + } + if print1 && !ok1 { + id1 = newID() + ptrIDs[p.pp[1]] = id1 + } + switch { + case print0 && print1: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1)) + case print0: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)) + case print1: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1)) + } + } + } + } + } + + // Update all leaf references with the unique identifier. + for _, leaf := range leaves { + if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok { + leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id)) + } + } +} + +func formatReference(id uint) string { + return fmt.Sprintf("ref#%d", id) +} + +func updateReferencePrefix(prefix, ref string) string { + if prefix == "" { + return pointerDelimPrefix + ref + pointerDelimSuffix + } + suffix := strings.TrimPrefix(prefix, pointerDelimPrefix) + return pointerDelimPrefix + ref + ": " + suffix +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go index 2761b628921..786f671269c 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -5,13 +5,14 @@ package cmp import ( + "bytes" "fmt" "reflect" "strconv" "strings" "unicode" + "unicode/utf8" - "github.com/google/go-cmp/cmp/internal/flags" "github.com/google/go-cmp/cmp/internal/value" ) @@ -20,14 +21,22 @@ type formatValueOptions struct { // methods like error.Error or fmt.Stringer.String. AvoidStringer bool - // ShallowPointers controls whether to avoid descending into pointers. - // Useful when printing map keys, where pointer comparison is performed - // on the pointer address rather than the pointed-at value. - ShallowPointers bool - // PrintAddresses controls whether to print the address of all pointers, // slice elements, and maps. PrintAddresses bool + + // QualifiedNames controls whether FormatType uses the fully qualified name + // (including the full package path as opposed to just the package name). + QualifiedNames bool + + // VerbosityLevel controls the amount of output to produce. + // A higher value produces more output. A value of zero or lower produces + // no output (represented using an ellipsis). + // If LimitVerbosity is false, then the level is treated as infinite. + VerbosityLevel int + + // LimitVerbosity specifies that formatting should respect VerbosityLevel. + LimitVerbosity bool } // FormatType prints the type as if it were wrapping s. @@ -44,12 +53,15 @@ func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { default: return s } + if opts.DiffMode == diffIdentical { + return s // elide type for identical nodes + } case elideType: return s } // Determine the type label, applying special handling for unnamed types. - typeName := t.String() + typeName := value.TypeString(t, opts.QualifiedNames) if t.Name() == "" { // According to Go grammar, certain type literals contain symbols that // do not strongly bind to the next lexicographical token (e.g., *T). @@ -57,39 +69,77 @@ func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { case reflect.Chan, reflect.Func, reflect.Ptr: typeName = "(" + typeName + ")" } - typeName = strings.Replace(typeName, "struct {", "struct{", -1) - typeName = strings.Replace(typeName, "interface {", "interface{", -1) } + return &textWrap{Prefix: typeName, Value: wrapParens(s)} +} + +// wrapParens wraps s with a set of parenthesis, but avoids it if the +// wrapped node itself is already surrounded by a pair of parenthesis or braces. +// It handles unwrapping one level of pointer-reference nodes. +func wrapParens(s textNode) textNode { + var refNode *textWrap + if s2, ok := s.(*textWrap); ok { + // Unwrap a single pointer reference node. + switch s2.Metadata.(type) { + case leafReference, trunkReference, trunkReferences: + refNode = s2 + if s3, ok := refNode.Value.(*textWrap); ok { + s2 = s3 + } + } - // Avoid wrap the value in parenthesis if unnecessary. - if s, ok := s.(textWrap); ok { - hasParens := strings.HasPrefix(s.Prefix, "(") && strings.HasSuffix(s.Suffix, ")") - hasBraces := strings.HasPrefix(s.Prefix, "{") && strings.HasSuffix(s.Suffix, "}") + // Already has delimiters that make parenthesis unnecessary. + hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")") + hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}") if hasParens || hasBraces { - return textWrap{typeName, s, ""} + return s } } - return textWrap{typeName + "(", s, ")"} + if refNode != nil { + refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"} + return s + } + return &textWrap{Prefix: "(", Value: s, Suffix: ")"} } // FormatValue prints the reflect.Value, taking extra care to avoid descending -// into pointers already in m. As pointers are visited, m is also updated. -func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out textNode) { +// into pointers already in ptrs. As pointers are visited, ptrs is also updated. +func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) { if !v.IsValid() { return nil } t := v.Type() + // Check slice element for cycles. + if parentKind == reflect.Slice { + ptrRef, visited := ptrs.Push(v.Addr()) + if visited { + return makeLeafReference(ptrRef, false) + } + defer ptrs.Pop() + defer func() { out = wrapTrunkReference(ptrRef, false, out) }() + } + // Check whether there is an Error or String method to call. if !opts.AvoidStringer && v.CanInterface() { // Avoid calling Error or String methods on nil receivers since many // implementations crash when doing so. if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() { - switch v := v.Interface().(type) { - case error: - return textLine("e" + formatString(v.Error())) - case fmt.Stringer: - return textLine("s" + formatString(v.String())) + var prefix, strVal string + func() { + // Swallow and ignore any panics from String or Error. + defer func() { recover() }() + switch v := v.Interface().(type) { + case error: + strVal = v.Error() + prefix = "e" + case fmt.Stringer: + strVal = v.String() + prefix = "s" + } + }() + if prefix != "" { + return opts.formatString(prefix, strVal) } } } @@ -102,94 +152,140 @@ func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out t } }() - var ptr string switch t.Kind() { case reflect.Bool: return textLine(fmt.Sprint(v.Bool())) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return textLine(fmt.Sprint(v.Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - // Unnamed uints are usually bytes or words, so use hexadecimal. - if t.PkgPath() == "" || t.Kind() == reflect.Uintptr { + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return textLine(fmt.Sprint(v.Uint())) + case reflect.Uint8: + if parentKind == reflect.Slice || parentKind == reflect.Array { return textLine(formatHex(v.Uint())) } return textLine(fmt.Sprint(v.Uint())) + case reflect.Uintptr: + return textLine(formatHex(v.Uint())) case reflect.Float32, reflect.Float64: return textLine(fmt.Sprint(v.Float())) case reflect.Complex64, reflect.Complex128: return textLine(fmt.Sprint(v.Complex())) case reflect.String: - return textLine(formatString(v.String())) + return opts.formatString("", v.String()) case reflect.UnsafePointer, reflect.Chan, reflect.Func: - return textLine(formatPointer(v)) + return textLine(formatPointer(value.PointerOf(v), true)) case reflect.Struct: var list textList + v := makeAddressable(v) // needed for retrieveUnexportedField + maxLen := v.NumField() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } for i := 0; i < v.NumField(); i++ { vv := v.Field(i) if value.IsZero(vv) { continue // Elide fields with zero values } - s := opts.WithTypeMode(autoType).FormatValue(vv, m) - list = append(list, textRecord{Key: t.Field(i).Name, Value: s}) + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + sf := t.Field(i) + if supportExporters && !isExported(sf.Name) { + vv = retrieveUnexportedField(v, sf, true) + } + s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs) + list = append(list, textRecord{Key: sf.Name, Value: s}) } - return textWrap{"{", list, "}"} + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} case reflect.Slice: if v.IsNil() { return textNil } - if opts.PrintAddresses { - ptr = formatPointer(v) + + // Check whether this is a []byte of text data. + if t.Elem() == reflect.TypeOf(byte(0)) { + b := v.Bytes() + isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) && unicode.IsSpace(r) } + if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { + out = opts.formatString("", string(b)) + return opts.WithTypeMode(emitType).FormatType(t, out) + } } + fallthrough case reflect.Array: + maxLen := v.Len() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } var list textList for i := 0; i < v.Len(); i++ { - vi := v.Index(i) - if vi.CanAddr() { // Check for cyclic elements - p := vi.Addr() - if m.Visit(p) { - var out textNode - out = textLine(formatPointer(p)) - out = opts.WithTypeMode(emitType).FormatType(p.Type(), out) - out = textWrap{"*", out, ""} - list = append(list, textRecord{Value: out}) - continue - } + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break } - s := opts.WithTypeMode(elideType).FormatValue(vi, m) + s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs) list = append(list, textRecord{Value: s}) } - return textWrap{ptr + "{", list, "}"} + + out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + if t.Kind() == reflect.Slice && opts.PrintAddresses { + header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap()) + out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out} + } + return out case reflect.Map: if v.IsNil() { return textNil } - if m.Visit(v) { - return textLine(formatPointer(v)) + + // Check pointer for cycles. + ptrRef, visited := ptrs.Push(v) + if visited { + return makeLeafReference(ptrRef, opts.PrintAddresses) } + defer ptrs.Pop() + maxLen := v.Len() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } var list textList for _, k := range value.SortKeys(v.MapKeys()) { - sk := formatMapKey(k) - sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), m) + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + sk := formatMapKey(k, false, ptrs) + sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs) list = append(list, textRecord{Key: sk, Value: sv}) } - if opts.PrintAddresses { - ptr = formatPointer(v) - } - return textWrap{ptr + "{", list, "}"} + + out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) + return out case reflect.Ptr: if v.IsNil() { return textNil } - if m.Visit(v) || opts.ShallowPointers { - return textLine(formatPointer(v)) - } - if opts.PrintAddresses { - ptr = formatPointer(v) + + // Check pointer for cycles. + ptrRef, visited := ptrs.Push(v) + if visited { + out = makeLeafReference(ptrRef, opts.PrintAddresses) + return &textWrap{Prefix: "&", Value: out} } + defer ptrs.Pop() + skipType = true // Let the underlying value print the type instead - return textWrap{"&" + ptr, opts.FormatValue(v.Elem(), m), ""} + out = opts.FormatValue(v.Elem(), t.Kind(), ptrs) + out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) + out = &textWrap{Prefix: "&", Value: out} + return out case reflect.Interface: if v.IsNil() { return textNil @@ -197,19 +293,65 @@ func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out t // Interfaces accept different concrete types, // so configure the underlying value to explicitly print the type. skipType = true // Print the concrete type instead - return opts.WithTypeMode(emitType).FormatValue(v.Elem(), m) + return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs) default: panic(fmt.Sprintf("%v kind not handled", v.Kind())) } } +func (opts formatOptions) formatString(prefix, s string) textNode { + maxLen := len(s) + maxLines := strings.Count(s, "\n") + 1 + if opts.LimitVerbosity { + maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc... + maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... + } + + // For multiline strings, use the triple-quote syntax, + // but only use it when printing removed or inserted nodes since + // we only want the extra verbosity for those cases. + lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n") + isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+') + for i := 0; i < len(lines) && isTripleQuoted; i++ { + lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support + isPrintable := func(r rune) bool { + return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable + } + line := lines[i] + isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen + } + if isTripleQuoted { + var list textList + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) + for i, line := range lines { + if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 { + comment := commentString(fmt.Sprintf("%d elided lines", numElided)) + list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment}) + break + } + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true}) + } + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) + return &textWrap{Prefix: "(", Value: list, Suffix: ")"} + } + + // Format the string as a single-line quoted string. + if len(s) > maxLen+len(textEllipsis) { + return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis)) + } + return textLine(prefix + formatString(s)) +} + // formatMapKey formats v as if it were a map key. // The result is guaranteed to be a single line. -func formatMapKey(v reflect.Value) string { +func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string { var opts formatOptions + opts.DiffMode = diffIdentical opts.TypeMode = elideType - opts.ShallowPointers = true - s := opts.FormatValue(v, visitedPointers{}).String() + opts.PrintAddresses = disambiguate + opts.AvoidStringer = disambiguate + opts.QualifiedNames = disambiguate + s := opts.FormatValue(v, reflect.Map, ptrs).String() return strings.TrimSpace(s) } @@ -227,7 +369,7 @@ func formatString(s string) string { rawInvalid := func(r rune) bool { return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t') } - if strings.IndexFunc(s, rawInvalid) < 0 { + if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 { return "`" + s + "`" } return qs @@ -256,23 +398,3 @@ func formatHex(u uint64) string { } return fmt.Sprintf(f, u) } - -// formatPointer prints the address of the pointer. -func formatPointer(v reflect.Value) string { - p := v.Pointer() - if flags.Deterministic { - p = 0xdeadf00f // Only used for stable testing purposes - } - return fmt.Sprintf("⟪0x%x⟫", p) -} - -type visitedPointers map[value.Pointer]struct{} - -// Visit inserts pointer v into the visited map and reports whether it had -// already been visited before. -func (m visitedPointers) Visit(v reflect.Value) bool { - p := value.PointerOf(v) - _, visited := m[p] - m[p] = struct{}{} - return visited -} diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index eafcf2e4c0b..35315dad355 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -8,6 +8,7 @@ import ( "bytes" "fmt" "reflect" + "strconv" "strings" "unicode" "unicode/utf8" @@ -23,11 +24,25 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { return false // Must be formatting in diff mode case v.NumDiff == 0: return false // No differences detected - case v.NumIgnored+v.NumCompared+v.NumTransformed > 0: - // TODO: Handle the case where someone uses bytes.Equal on a large slice. - return false // Some custom option was used to determined equality case !v.ValueX.IsValid() || !v.ValueY.IsValid(): return false // Both values must be valid + case v.Type.Kind() == reflect.Slice && (v.ValueX.Len() == 0 || v.ValueY.Len() == 0): + return false // Both slice values have to be non-empty + case v.NumIgnored > 0: + return false // Some ignore option was used + case v.NumTransformed > 0: + return false // Some transform option was used + case v.NumCompared > 1: + return false // More than one comparison was used + case v.NumCompared == 1 && v.Type.Name() != "": + // The need for cmp to check applicability of options on every element + // in a slice is a significant performance detriment for large []byte. + // The workaround is to specify Comparer(bytes.Equal), + // which enables cmp to compare []byte more efficiently. + // If they differ, we still want to provide batched diffing. + // The logic disallows named types since they tend to have their own + // String method, with nicer formatting than what this provides. + return false } switch t := v.Type; t.Kind() { @@ -82,7 +97,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } if isText || isBinary { var numLines, lastLineIdx, maxLineLen int - isBinary = false + isBinary = !utf8.ValidString(sx) || !utf8.ValidString(sy) for i, r := range sx + sy { if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError { isBinary = true @@ -97,7 +112,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } } isText = !isBinary - isLinedText = isText && numLines >= 4 && maxLineLen <= 256 + isLinedText = isText && numLines >= 4 && maxLineLen <= 1024 } // Format the string into printable records. @@ -117,6 +132,83 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { }, ) delim = "\n" + + // If possible, use a custom triple-quote (""") syntax for printing + // differences in a string literal. This format is more readable, + // but has edge-cases where differences are visually indistinguishable. + // This format is avoided under the following conditions: + // • A line starts with `"""` + // • A line starts with "..." + // • A line contains non-printable characters + // • Adjacent different lines differ only by whitespace + // + // For example: + // """ + // ... // 3 identical lines + // foo + // bar + // - baz + // + BAZ + // """ + isTripleQuoted := true + prevRemoveLines := map[string]bool{} + prevInsertLines := map[string]bool{} + var list2 textList + list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) + for _, r := range list { + if !r.Value.Equal(textEllipsis) { + line, _ := strconv.Unquote(string(r.Value.(textLine))) + line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support + normLine := strings.Map(func(r rune) rune { + if unicode.IsSpace(r) { + return -1 // drop whitespace to avoid visually indistinguishable output + } + return r + }, line) + isPrintable := func(r rune) bool { + return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable + } + isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" + switch r.Diff { + case diffRemoved: + isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine] + prevRemoveLines[normLine] = true + case diffInserted: + isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine] + prevInsertLines[normLine] = true + } + if !isTripleQuoted { + break + } + r.Value = textLine(line) + r.ElideComma = true + } + if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group + prevRemoveLines = map[string]bool{} + prevInsertLines = map[string]bool{} + } + list2 = append(list2, r) + } + if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 { + list2 = list2[:len(list2)-1] // elide single empty line at the end + } + list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) + if isTripleQuoted { + var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"} + switch t.Kind() { + case reflect.String: + if t != reflect.TypeOf(string("")) { + out = opts.FormatType(t, out) + } + case reflect.Slice: + // Always emit type for slices since the triple-quote syntax + // looks like a string (not a slice). + opts = opts.WithTypeMode(emitType) + out = opts.FormatType(t, out) + } + return out + } + // If the text appears to be single-lined text, // then perform differencing in approximately fixed-sized chunks. // The output is printed as quoted strings. @@ -129,6 +221,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { }, ) delim = "" + // If the text appears to be binary data, // then perform differencing in approximately fixed-sized chunks. // The output is inspired by hexdump. @@ -145,6 +238,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { return textRecord{Diff: d, Value: textLine(s), Comment: comment} }, ) + // For all other slices of primitive types, // then perform differencing in approximately fixed-sized chunks. // The size of each chunk depends on the width of the element kind. @@ -172,7 +266,9 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { switch t.Elem().Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: ss = append(ss, fmt.Sprint(v.Index(i).Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + ss = append(ss, fmt.Sprint(v.Index(i).Uint())) + case reflect.Uint8, reflect.Uintptr: ss = append(ss, formatHex(v.Index(i).Uint())) case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: ss = append(ss, fmt.Sprint(v.Index(i).Interface())) @@ -185,7 +281,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } // Wrap the output with appropriate type information. - var out textNode = textWrap{"{", list, "}"} + var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"} if !isText { // The "{...}" byte-sequence literal is not valid Go syntax for strings. // Emit the type for extra clarity (e.g. "string{...}"). @@ -196,12 +292,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } switch t.Kind() { case reflect.String: - out = textWrap{"strings.Join(", out, fmt.Sprintf(", %q)", delim)} + out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} if t != reflect.TypeOf(string("")) { out = opts.FormatType(t, out) } case reflect.Slice: - out = textWrap{"bytes.Join(", out, fmt.Sprintf(", %q)", delim)} + out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} if t != reflect.TypeOf([]byte(nil)) { out = opts.FormatType(t, out) } @@ -242,9 +338,22 @@ func (opts formatOptions) formatDiffSlice( return n0 - v.Len() } + var numDiffs int + maxLen := -1 + if opts.LimitVerbosity { + maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... + opts.VerbosityLevel-- + } + groups := coalesceAdjacentEdits(name, es) groups = coalesceInterveningIdentical(groups, chunkSize/4) + maxGroup := diffStats{Name: name} for i, ds := range groups { + if maxLen >= 0 && numDiffs >= maxLen { + maxGroup = maxGroup.Append(ds) + continue + } + // Print equal. if ds.NumDiff() == 0 { // Compute the number of leading and trailing equal bytes to print. @@ -273,12 +382,18 @@ func (opts formatOptions) formatDiffSlice( } // Print unequal. + len0 := len(list) nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved) vx = vx.Slice(nx, vx.Len()) ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted) vy = vy.Slice(ny, vy.Len()) + numDiffs += len(list) - len0 + } + if maxGroup.IsZero() { + assert(vx.Len() == 0 && vy.Len() == 0) + } else { + list.AppendEllipsis(maxGroup) } - assert(vx.Len() == 0 && vy.Len() == 0) return list } diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go index 8b8fcab7bdf..8b12c05cd4f 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_text.go +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -10,12 +10,15 @@ import ( "math/rand" "strings" "time" + "unicode/utf8" "github.com/google/go-cmp/cmp/internal/flags" ) var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 +const maxColumnLength = 80 + type indentMode int func (n indentMode) appendIndent(b []byte, d diffMode) []byte { @@ -91,21 +94,22 @@ type textNode interface { // textWrap is a wrapper that concatenates a prefix and/or a suffix // to the underlying node. type textWrap struct { - Prefix string // e.g., "bytes.Buffer{" - Value textNode // textWrap | textList | textLine - Suffix string // e.g., "}" + Prefix string // e.g., "bytes.Buffer{" + Value textNode // textWrap | textList | textLine + Suffix string // e.g., "}" + Metadata interface{} // arbitrary metadata; has no effect on formatting } -func (s textWrap) Len() int { +func (s *textWrap) Len() int { return len(s.Prefix) + s.Value.Len() + len(s.Suffix) } -func (s1 textWrap) Equal(s2 textNode) bool { - if s2, ok := s2.(textWrap); ok { +func (s1 *textWrap) Equal(s2 textNode) bool { + if s2, ok := s2.(*textWrap); ok { return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix } return false } -func (s textWrap) String() string { +func (s *textWrap) String() string { var d diffMode var n indentMode _, s2 := s.formatCompactTo(nil, d) @@ -114,7 +118,7 @@ func (s textWrap) String() string { b = append(b, '\n') // Trailing newline return string(b) } -func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { +func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { n0 := len(b) // Original buffer length b = append(b, s.Prefix...) b, s.Value = s.Value.formatCompactTo(b, d) @@ -124,7 +128,7 @@ func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { } return b, s } -func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { +func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { b = append(b, s.Prefix...) b = s.Value.formatExpandedTo(b, d, n) b = append(b, s.Suffix...) @@ -136,22 +140,23 @@ func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { // of the textList.formatCompactTo method. type textList []textRecord type textRecord struct { - Diff diffMode // e.g., 0 or '-' or '+' - Key string // e.g., "MyField" - Value textNode // textWrap | textLine - Comment fmt.Stringer // e.g., "6 identical fields" + Diff diffMode // e.g., 0 or '-' or '+' + Key string // e.g., "MyField" + Value textNode // textWrap | textLine + ElideComma bool // avoid trailing comma + Comment fmt.Stringer // e.g., "6 identical fields" } // AppendEllipsis appends a new ellipsis node to the list if none already // exists at the end. If cs is non-zero it coalesces the statistics with the // previous diffStats. func (s *textList) AppendEllipsis(ds diffStats) { - hasStats := ds != diffStats{} + hasStats := !ds.IsZero() if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) { if hasStats { - *s = append(*s, textRecord{Value: textEllipsis, Comment: ds}) + *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds}) } else { - *s = append(*s, textRecord{Value: textEllipsis}) + *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true}) } return } @@ -191,7 +196,7 @@ func (s1 textList) Equal(s2 textNode) bool { } func (s textList) String() string { - return textWrap{"{", s, "}"}.String() + return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String() } func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { @@ -221,7 +226,7 @@ func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { } // Force multi-lined output when printing a removed/inserted node that // is sufficiently long. - if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > 80 { + if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength { multiLine = true } if !multiLine { @@ -236,16 +241,50 @@ func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { _, isLine := r.Value.(textLine) return r.Key == "" || !isLine }, - func(r textRecord) int { return len(r.Key) }, + func(r textRecord) int { return utf8.RuneCountInString(r.Key) }, ) alignValueLens := s.alignLens( func(r textRecord) bool { _, isLine := r.Value.(textLine) return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil }, - func(r textRecord) int { return len(r.Value.(textLine)) }, + func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) }, ) + // Format lists of simple lists in a batched form. + // If the list is sequence of only textLine values, + // then batch multiple values on a single line. + var isSimple bool + for _, r := range s { + _, isLine := r.Value.(textLine) + isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil + if !isSimple { + break + } + } + if isSimple { + n++ + var batch []byte + emitBatch := func() { + if len(batch) > 0 { + b = n.appendIndent(append(b, '\n'), d) + b = append(b, bytes.TrimRight(batch, " ")...) + batch = batch[:0] + } + } + for _, r := range s { + line := r.Value.(textLine) + if len(batch)+len(line)+len(", ") > maxColumnLength { + emitBatch() + } + batch = append(batch, line...) + batch = append(batch, ", "...) + } + emitBatch() + n-- + return n.appendIndent(append(b, '\n'), d) + } + // Format the list as a multi-lined output. n++ for i, r := range s { @@ -256,7 +295,7 @@ func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { b = alignKeyLens[i].appendChar(b, ' ') b = r.Value.formatExpandedTo(b, d|r.Diff, n) - if !r.Value.Equal(textEllipsis) { + if !r.ElideComma { b = append(b, ',') } b = alignValueLens[i].appendChar(b, ' ') @@ -332,6 +371,11 @@ type diffStats struct { NumModified int } +func (s diffStats) IsZero() bool { + s.Name = "" + return s == diffStats{} +} + func (s diffStats) NumDiff() int { return s.NumRemoved + s.NumInserted + s.NumModified } diff --git a/vendor/github.com/hashicorp/go-retryablehttp/README.md b/vendor/github.com/hashicorp/go-retryablehttp/README.md index b8cc459bf7b..30357c75668 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/README.md +++ b/vendor/github.com/hashicorp/go-retryablehttp/README.md @@ -44,5 +44,18 @@ The returned response object is an `*http.Response`, the same thing you would usually get from `net/http`. Had the request failed one or more times, the above call would block and retry with exponential backoff. +## Getting a stdlib `*http.Client` with retries + +It's possible to convert a `*retryablehttp.Client` directly to a `*http.Client`. +This makes use of retryablehttp broadly applicable with minimal effort. Simply +configure a `*retryablehttp.Client` as you wish, and then call `StandardClient()`: + +```go +retryClient := retryablehttp.NewClient() +retryClient.RetryMax = 10 + +standardClient := retryClient.StandardClient() // *http.Client +``` + For more usage and examples see the [godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp). diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go index 7bfa75933e1..f1ccd3df35c 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/client.go +++ b/vendor/github.com/hashicorp/go-retryablehttp/client.go @@ -1,4 +1,4 @@ -// The retryablehttp package provides a familiar HTTP client interface with +// Package retryablehttp provides a familiar HTTP client interface with // automatic retries and exponential backoff. It is a thin wrapper over the // standard net/http client library and exposes nearly the same public API. // This makes retryablehttp very easy to drop into existing programs. @@ -119,95 +119,127 @@ func (r *Request) BodyBytes() ([]byte, error) { return buf.Bytes(), nil } +// SetBody allows setting the request body. +// +// It is useful if a new body needs to be set without constructing a new Request. +func (r *Request) SetBody(rawBody interface{}) error { + bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody) + if err != nil { + return err + } + r.body = bodyReader + r.ContentLength = contentLength + return nil +} + +// WriteTo allows copying the request body into a writer. +// +// It writes data to w until there's no more data to write or +// when an error occurs. The return int64 value is the number of bytes +// written. Any error encountered during the write is also returned. +// The signature matches io.WriterTo interface. +func (r *Request) WriteTo(w io.Writer) (int64, error) { + body, err := r.body() + if err != nil { + return 0, err + } + if c, ok := body.(io.Closer); ok { + defer c.Close() + } + return io.Copy(w, body) +} + func getBodyReaderAndContentLength(rawBody interface{}) (ReaderFunc, int64, error) { var bodyReader ReaderFunc var contentLength int64 - if rawBody != nil { - switch body := rawBody.(type) { - // If they gave us a function already, great! Use it. - case ReaderFunc: - bodyReader = body - tmp, err := body() - if err != nil { - return nil, 0, err - } - if lr, ok := tmp.(LenReader); ok { - contentLength = int64(lr.Len()) - } - if c, ok := tmp.(io.Closer); ok { - c.Close() - } - - case func() (io.Reader, error): - bodyReader = body - tmp, err := body() - if err != nil { - return nil, 0, err - } - if lr, ok := tmp.(LenReader); ok { - contentLength = int64(lr.Len()) - } - if c, ok := tmp.(io.Closer); ok { - c.Close() - } + switch body := rawBody.(type) { + // If they gave us a function already, great! Use it. + case ReaderFunc: + bodyReader = body + tmp, err := body() + if err != nil { + return nil, 0, err + } + if lr, ok := tmp.(LenReader); ok { + contentLength = int64(lr.Len()) + } + if c, ok := tmp.(io.Closer); ok { + c.Close() + } - // If a regular byte slice, we can read it over and over via new - // readers - case []byte: - buf := body - bodyReader = func() (io.Reader, error) { - return bytes.NewReader(buf), nil - } - contentLength = int64(len(buf)) - - // If a bytes.Buffer we can read the underlying byte slice over and - // over - case *bytes.Buffer: - buf := body - bodyReader = func() (io.Reader, error) { - return bytes.NewReader(buf.Bytes()), nil - } - contentLength = int64(buf.Len()) + case func() (io.Reader, error): + bodyReader = body + tmp, err := body() + if err != nil { + return nil, 0, err + } + if lr, ok := tmp.(LenReader); ok { + contentLength = int64(lr.Len()) + } + if c, ok := tmp.(io.Closer); ok { + c.Close() + } - // We prioritize *bytes.Reader here because we don't really want to - // deal with it seeking so want it to match here instead of the - // io.ReadSeeker case. - case *bytes.Reader: - buf, err := ioutil.ReadAll(body) - if err != nil { - return nil, 0, err - } - bodyReader = func() (io.Reader, error) { - return bytes.NewReader(buf), nil - } - contentLength = int64(len(buf)) - - // Compat case - case io.ReadSeeker: - raw := body - bodyReader = func() (io.Reader, error) { - _, err := raw.Seek(0, 0) - return ioutil.NopCloser(raw), err - } - if lr, ok := raw.(LenReader); ok { - contentLength = int64(lr.Len()) - } + // If a regular byte slice, we can read it over and over via new + // readers + case []byte: + buf := body + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // If a bytes.Buffer we can read the underlying byte slice over and + // over + case *bytes.Buffer: + buf := body + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf.Bytes()), nil + } + contentLength = int64(buf.Len()) - // Read all in so we can reset - case io.Reader: - buf, err := ioutil.ReadAll(body) - if err != nil { - return nil, 0, err - } - bodyReader = func() (io.Reader, error) { - return bytes.NewReader(buf), nil - } - contentLength = int64(len(buf)) + // We prioritize *bytes.Reader here because we don't really want to + // deal with it seeking so want it to match here instead of the + // io.ReadSeeker case. + case *bytes.Reader: + buf, err := ioutil.ReadAll(body) + if err != nil { + return nil, 0, err + } + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // Compat case + case io.ReadSeeker: + raw := body + bodyReader = func() (io.Reader, error) { + _, err := raw.Seek(0, 0) + return ioutil.NopCloser(raw), err + } + if lr, ok := raw.(LenReader); ok { + contentLength = int64(lr.Len()) + } - default: - return nil, 0, fmt.Errorf("cannot handle type %T", rawBody) + // Read all in so we can reset + case io.Reader: + buf, err := ioutil.ReadAll(body) + if err != nil { + return nil, 0, err } + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // No body provided, nothing to do + case nil: + + // Unrecognized type + default: + return nil, 0, fmt.Errorf("cannot handle type %T", rawBody) } return bodyReader, contentLength, nil } @@ -415,7 +447,7 @@ func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) // perform linear backoff based on the attempt number and with jitter to // prevent a thundering herd. // -// min and max here are *not* absolute values. The number to be multipled by +// min and max here are *not* absolute values. The number to be multiplied by // the attempt number will be chosen at random from between them, thus they are // bounding the jitter. // @@ -545,7 +577,7 @@ func (c *Client) Do(req *Request) (*http.Response, error) { return resp, err } - // We do this before drainBody beause there's no need for the I/O if + // We do this before drainBody because there's no need for the I/O if // we're breaking out remain := c.RetryMax - i if remain <= 0 { @@ -663,3 +695,11 @@ func PostForm(url string, data url.Values) (*http.Response, error) { func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) { return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) } + +// StandardClient returns a stdlib *http.Client with a custom Transport, which +// shims in a *retryablehttp.Client for added retries. +func (c *Client) StandardClient() *http.Client { + return &http.Client{ + Transport: &RoundTripper{Client: c}, + } +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go b/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go new file mode 100644 index 00000000000..b841b4cfe53 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go @@ -0,0 +1,43 @@ +package retryablehttp + +import ( + "net/http" + "sync" +) + +// RoundTripper implements the http.RoundTripper interface, using a retrying +// HTTP client to execute requests. +// +// It is important to note that retryablehttp doesn't always act exactly as a +// RoundTripper should. This is highly dependent on the retryable client's +// configuration. +type RoundTripper struct { + // The client to use during requests. If nil, the default retryablehttp + // client and settings will be used. + Client *Client + + // once ensures that the logic to initialize the default client runs at + // most once, in a single thread. + once sync.Once +} + +// init initializes the underlying retryable client. +func (rt *RoundTripper) init() { + if rt.Client == nil { + rt.Client = NewClient() + } +} + +// RoundTrip satisfies the http.RoundTripper interface. +func (rt *RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + rt.once.Do(rt.init) + + // Convert the request to be retryable. + retryableReq, err := FromRequest(req) + if err != nil { + return nil, err + } + + // Execute the request. + return rt.Client.Do(retryableReq) +} diff --git a/vendor/github.com/miekg/dns/.codecov.yml b/vendor/github.com/miekg/dns/.codecov.yml new file mode 100644 index 00000000000..f91e5c1fe57 --- /dev/null +++ b/vendor/github.com/miekg/dns/.codecov.yml @@ -0,0 +1,8 @@ +coverage: + status: + project: + default: + target: 40% + threshold: null + patch: false + changes: false diff --git a/vendor/github.com/miekg/dns/.gitignore b/vendor/github.com/miekg/dns/.gitignore new file mode 100644 index 00000000000..776cd950c25 --- /dev/null +++ b/vendor/github.com/miekg/dns/.gitignore @@ -0,0 +1,4 @@ +*.6 +tags +test.out +a.out diff --git a/vendor/github.com/miekg/dns/.travis.yml b/vendor/github.com/miekg/dns/.travis.yml new file mode 100644 index 00000000000..542dd68c0ff --- /dev/null +++ b/vendor/github.com/miekg/dns/.travis.yml @@ -0,0 +1,20 @@ +language: go +sudo: false +go: + - 1.9.x + - tip + +env: + - TESTS="-race -v -bench=. -coverprofile=coverage.txt -covermode=atomic" + - TESTS="-race -v ./..." + +before_install: + # don't use the miekg/dns when testing forks + - mkdir -p $GOPATH/src/github.com/miekg + - ln -s $TRAVIS_BUILD_DIR $GOPATH/src/github.com/miekg/ || true + +script: + - go test $TESTS + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/miekg/dns/AUTHORS b/vendor/github.com/miekg/dns/AUTHORS new file mode 100644 index 00000000000..1965683525a --- /dev/null +++ b/vendor/github.com/miekg/dns/AUTHORS @@ -0,0 +1 @@ +Miek Gieben diff --git a/vendor/github.com/miekg/dns/CONTRIBUTORS b/vendor/github.com/miekg/dns/CONTRIBUTORS new file mode 100644 index 00000000000..5903779d81f --- /dev/null +++ b/vendor/github.com/miekg/dns/CONTRIBUTORS @@ -0,0 +1,10 @@ +Alex A. Skinner +Andrew Tunnell-Jones +Ask Bjørn Hansen +Dave Cheney +Dusty Wilson +Marek Majkowski +Peter van Dijk +Omri Bahumi +Alex Sergeyev +James Hartig diff --git a/vendor/github.com/miekg/dns/COPYRIGHT b/vendor/github.com/miekg/dns/COPYRIGHT new file mode 100644 index 00000000000..35702b10e87 --- /dev/null +++ b/vendor/github.com/miekg/dns/COPYRIGHT @@ -0,0 +1,9 @@ +Copyright 2009 The Go Authors. All rights reserved. Use of this source code +is governed by a BSD-style license that can be found in the LICENSE file. +Extensions of the original work are copyright (c) 2011 Miek Gieben + +Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is +governed by a BSD-style license that can be found in the LICENSE file. + +Copyright 2014 CloudFlare. All rights reserved. Use of this source code is +governed by a BSD-style license that can be found in the LICENSE file. diff --git a/vendor/github.com/miekg/dns/Gopkg.lock b/vendor/github.com/miekg/dns/Gopkg.lock new file mode 100644 index 00000000000..0c73a64444f --- /dev/null +++ b/vendor/github.com/miekg/dns/Gopkg.lock @@ -0,0 +1,21 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + branch = "master" + name = "golang.org/x/crypto" + packages = ["ed25519","ed25519/internal/edwards25519"] + revision = "b080dc9a8c480b08e698fb1219160d598526310f" + +[[projects]] + branch = "master" + name = "golang.org/x/net" + packages = ["bpf","internal/iana","internal/socket","ipv4","ipv6"] + revision = "894f8ed5849b15b810ae41e9590a0d05395bba27" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "c4abc38abaeeeeb9be92455c9c02cae32841122b8982aaa067ef25bb8e86ff9d" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/miekg/dns/Gopkg.toml b/vendor/github.com/miekg/dns/Gopkg.toml new file mode 100644 index 00000000000..2f655b2c7b3 --- /dev/null +++ b/vendor/github.com/miekg/dns/Gopkg.toml @@ -0,0 +1,26 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + branch = "master" + name = "golang.org/x/crypto" diff --git a/vendor/honnef.co/go/tools/lint/LICENSE b/vendor/github.com/miekg/dns/LICENSE similarity index 88% rename from vendor/honnef.co/go/tools/lint/LICENSE rename to vendor/github.com/miekg/dns/LICENSE index 796130a123a..5763fa7fe5d 100644 --- a/vendor/honnef.co/go/tools/lint/LICENSE +++ b/vendor/github.com/miekg/dns/LICENSE @@ -1,5 +1,8 @@ -Copyright (c) 2013 The Go Authors. All rights reserved. -Copyright (c) 2016 Dominik Honnef. All rights reserved. +Extensions of the original work are copyright (c) 2011 Miek Gieben + +As this is fork of the official Go code the same license applies: + +Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -26,3 +29,4 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/miekg/dns/Makefile.fuzz b/vendor/github.com/miekg/dns/Makefile.fuzz new file mode 100644 index 00000000000..dc158c4acee --- /dev/null +++ b/vendor/github.com/miekg/dns/Makefile.fuzz @@ -0,0 +1,33 @@ +# Makefile for fuzzing +# +# Use go-fuzz and needs the tools installed. +# See https://blog.cloudflare.com/dns-parser-meet-go-fuzzer/ +# +# Installing go-fuzz: +# $ make -f Makefile.fuzz get +# Installs: +# * github.com/dvyukov/go-fuzz/go-fuzz +# * get github.com/dvyukov/go-fuzz/go-fuzz-build + +all: build + +.PHONY: build +build: + go-fuzz-build -tags fuzz github.com/miekg/dns + +.PHONY: build-newrr +build-newrr: + go-fuzz-build -func FuzzNewRR -tags fuzz github.com/miekg/dns + +.PHONY: fuzz +fuzz: + go-fuzz -bin=dns-fuzz.zip -workdir=fuzz + +.PHONY: get +get: + go get github.com/dvyukov/go-fuzz/go-fuzz + go get github.com/dvyukov/go-fuzz/go-fuzz-build + +.PHONY: clean +clean: + rm *-fuzz.zip diff --git a/vendor/github.com/miekg/dns/Makefile.release b/vendor/github.com/miekg/dns/Makefile.release new file mode 100644 index 00000000000..8fb748e8aae --- /dev/null +++ b/vendor/github.com/miekg/dns/Makefile.release @@ -0,0 +1,52 @@ +# Makefile for releasing. +# +# The release is controlled from version.go. The version found there is +# used to tag the git repo, we're not building any artifects so there is nothing +# to upload to github. +# +# * Up the version in version.go +# * Run: make -f Makefile.release release +# * will *commit* your change with 'Release $VERSION' +# * push to github +# + +define GO +//+build ignore + +package main + +import ( + "fmt" + + "github.com/miekg/dns" +) + +func main() { + fmt.Println(dns.Version.String()) +} +endef + +$(file > version_release.go,$(GO)) +VERSION:=$(shell go run version_release.go) +TAG="v$(VERSION)" + +all: + @echo Use the \'release\' target to start a release $(VERSION) + rm -f version_release.go + +.PHONY: release +release: commit push + @echo Released $(VERSION) + rm -f version_release.go + +.PHONY: commit +commit: + @echo Committing release $(VERSION) + git commit -am"Release $(VERSION)" + git tag $(TAG) + +.PHONY: push +push: + @echo Pushing release $(VERSION) to master + git push --tags + git push diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md new file mode 100644 index 00000000000..1ad23c75161 --- /dev/null +++ b/vendor/github.com/miekg/dns/README.md @@ -0,0 +1,168 @@ +[![Build Status](https://travis-ci.org/miekg/dns.svg?branch=master)](https://travis-ci.org/miekg/dns) +[![Code Coverage](https://img.shields.io/codecov/c/github/miekg/dns/master.svg)](https://codecov.io/github/miekg/dns?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/miekg/dns)](https://goreportcard.com/report/miekg/dns) +[![](https://godoc.org/github.com/miekg/dns?status.svg)](https://godoc.org/github.com/miekg/dns) + +# Alternative (more granular) approach to a DNS library + +> Less is more. + +Complete and usable DNS library. All widely used Resource Records are supported, including the +DNSSEC types. It follows a lean and mean philosophy. If there is stuff you should know as a DNS +programmer there isn't a convenience function for it. Server side and client side programming is +supported, i.e. you can build servers and resolvers with it. + +We try to keep the "master" branch as sane as possible and at the bleeding edge of standards, +avoiding breaking changes wherever reasonable. We support the last two versions of Go. + +# Goals + +* KISS; +* Fast; +* Small API. If it's easy to code in Go, don't make a function for it. + +# Users + +A not-so-up-to-date-list-that-may-be-actually-current: + +* https://github.com/coredns/coredns +* https://cloudflare.com +* https://github.com/abh/geodns +* http://www.statdns.com/ +* http://www.dnsinspect.com/ +* https://github.com/chuangbo/jianbing-dictionary-dns +* http://www.dns-lg.com/ +* https://github.com/fcambus/rrda +* https://github.com/kenshinx/godns +* https://github.com/skynetservices/skydns +* https://github.com/hashicorp/consul +* https://github.com/DevelopersPL/godnsagent +* https://github.com/duedil-ltd/discodns +* https://github.com/StalkR/dns-reverse-proxy +* https://github.com/tianon/rawdns +* https://mesosphere.github.io/mesos-dns/ +* https://pulse.turbobytes.com/ +* https://play.google.com/store/apps/details?id=com.turbobytes.dig +* https://github.com/fcambus/statzone +* https://github.com/benschw/dns-clb-go +* https://github.com/corny/dnscheck for http://public-dns.info/ +* https://namesmith.io +* https://github.com/miekg/unbound +* https://github.com/miekg/exdns +* https://dnslookup.org +* https://github.com/looterz/grimd +* https://github.com/phamhongviet/serf-dns +* https://github.com/mehrdadrad/mylg +* https://github.com/bamarni/dockness +* https://github.com/fffaraz/microdns +* http://kelda.io +* https://github.com/ipdcode/hades (JD.COM) +* https://github.com/StackExchange/dnscontrol/ +* https://www.dnsperf.com/ +* https://dnssectest.net/ +* https://dns.apebits.com +* https://github.com/oif/apex +* https://github.com/jedisct1/dnscrypt-proxy +* https://github.com/jedisct1/rpdns + +Send pull request if you want to be listed here. + +# Features + +* UDP/TCP queries, IPv4 and IPv6; +* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported; +* Fast: + * Reply speed around ~ 80K qps (faster hardware results in more qps); + * Parsing RRs ~ 100K RR/s, that's 5M records in about 50 seconds; +* Server side programming (mimicking the net/http package); +* Client side programming; +* DNSSEC: signing, validating and key generation for DSA, RSA, ECDSA and Ed25519; +* EDNS0, NSID, Cookies; +* AXFR/IXFR; +* TSIG, SIG(0); +* DNS over TLS: optional encrypted connection between client and server; +* DNS name compression; +* Depends only on the standard library. + +Have fun! + +Miek Gieben - 2010-2012 - + +# Building + +Building is done with the `go` tool. If you have setup your GOPATH correctly, the following should +work: + + go get github.com/miekg/dns + go build github.com/miekg/dns + +## Examples + +A short "how to use the API" is at the beginning of doc.go (this also will show +when you call `godoc github.com/miekg/dns`). + +Example programs can be found in the `github.com/miekg/exdns` repository. + +## Supported RFCs + +*all of them* + +* 103{4,5} - DNS standard +* 1348 - NSAP record (removed the record) +* 1982 - Serial Arithmetic +* 1876 - LOC record +* 1995 - IXFR +* 1996 - DNS notify +* 2136 - DNS Update (dynamic updates) +* 2181 - RRset definition - there is no RRset type though, just []RR +* 2537 - RSAMD5 DNS keys +* 2065 - DNSSEC (updated in later RFCs) +* 2671 - EDNS record +* 2782 - SRV record +* 2845 - TSIG record +* 2915 - NAPTR record +* 2929 - DNS IANA Considerations +* 3110 - RSASHA1 DNS keys +* 3225 - DO bit (DNSSEC OK) +* 340{1,2,3} - NAPTR record +* 3445 - Limiting the scope of (DNS)KEY +* 3597 - Unknown RRs +* 403{3,4,5} - DNSSEC + validation functions +* 4255 - SSHFP record +* 4343 - Case insensitivity +* 4408 - SPF record +* 4509 - SHA256 Hash in DS +* 4592 - Wildcards in the DNS +* 4635 - HMAC SHA TSIG +* 4701 - DHCID +* 4892 - id.server +* 5001 - NSID +* 5155 - NSEC3 record +* 5205 - HIP record +* 5702 - SHA2 in the DNS +* 5936 - AXFR +* 5966 - TCP implementation recommendations +* 6605 - ECDSA +* 6725 - IANA Registry Update +* 6742 - ILNP DNS +* 6840 - Clarifications and Implementation Notes for DNS Security +* 6844 - CAA record +* 6891 - EDNS0 update +* 6895 - DNS IANA considerations +* 6975 - Algorithm Understanding in DNSSEC +* 7043 - EUI48/EUI64 records +* 7314 - DNS (EDNS) EXPIRE Option +* 7477 - CSYNC RR +* 7828 - edns-tcp-keepalive EDNS0 Option +* 7553 - URI record +* 7858 - DNS over TLS: Initiation and Performance Considerations +* 7871 - EDNS0 Client Subnet +* 7873 - Domain Name System (DNS) Cookies (draft-ietf-dnsop-cookies) +* 8080 - EdDSA for DNSSEC + +## Loosely based upon + +* `ldns` +* `NSD` +* `Net::DNS` +* `GRONG` diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go new file mode 100644 index 00000000000..dd6b512afb6 --- /dev/null +++ b/vendor/github.com/miekg/dns/client.go @@ -0,0 +1,586 @@ +package dns + +// A client implementation. + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "strings" + "time" +) + +const ( + dnsTimeout time.Duration = 2 * time.Second + tcpIdleTimeout time.Duration = 8 * time.Second + + dohMimeType = "application/dns-message" +) + +// A Conn represents a connection to a DNS server. +type Conn struct { + net.Conn // a net.Conn holding the connection + UDPSize uint16 // minimum receive buffer for UDP messages + TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) + tsigRequestMAC string +} + +// A Client defines parameters for a DNS client. +type Client struct { + Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP) + UDPSize uint16 // minimum receive buffer for UDP messages + TLSConfig *tls.Config // TLS connection configuration + Dialer *net.Dialer // a net.Dialer used to set local address, timeouts and more + // Timeout is a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout, + // WriteTimeout when non-zero. Can be overridden with net.Dialer.Timeout (see Client.ExchangeWithDialer and + // Client.Dialer) or context.Context.Deadline (see the deprecated ExchangeContext) + Timeout time.Duration + DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero + ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero + WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero + HTTPClient *http.Client // The http.Client to use for DNS-over-HTTPS + TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) + SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass + group singleflight +} + +// Exchange performs a synchronous UDP query. It sends the message m to the address +// contained in a and waits for a reply. Exchange does not retry a failed query, nor +// will it fall back to TCP in case of truncation. +// See client.Exchange for more information on setting larger buffer sizes. +func Exchange(m *Msg, a string) (r *Msg, err error) { + client := Client{Net: "udp"} + r, _, err = client.Exchange(m, a) + return r, err +} + +func (c *Client) dialTimeout() time.Duration { + if c.Timeout != 0 { + return c.Timeout + } + if c.DialTimeout != 0 { + return c.DialTimeout + } + return dnsTimeout +} + +func (c *Client) readTimeout() time.Duration { + if c.ReadTimeout != 0 { + return c.ReadTimeout + } + return dnsTimeout +} + +func (c *Client) writeTimeout() time.Duration { + if c.WriteTimeout != 0 { + return c.WriteTimeout + } + return dnsTimeout +} + +// Dial connects to the address on the named network. +func (c *Client) Dial(address string) (conn *Conn, err error) { + // create a new dialer with the appropriate timeout + var d net.Dialer + if c.Dialer == nil { + d = net.Dialer{Timeout:c.getTimeoutForRequest(c.dialTimeout())} + } else { + d = net.Dialer(*c.Dialer) + } + + network := "udp" + useTLS := false + + switch c.Net { + case "tcp-tls": + network = "tcp" + useTLS = true + case "tcp4-tls": + network = "tcp4" + useTLS = true + case "tcp6-tls": + network = "tcp6" + useTLS = true + default: + if c.Net != "" { + network = c.Net + } + } + + conn = new(Conn) + if useTLS { + conn.Conn, err = tls.DialWithDialer(&d, network, address, c.TLSConfig) + } else { + conn.Conn, err = d.Dial(network, address) + } + if err != nil { + return nil, err + } + return conn, nil +} + +// Exchange performs a synchronous query. It sends the message m to the address +// contained in a and waits for a reply. Basic use pattern with a *dns.Client: +// +// c := new(dns.Client) +// in, rtt, err := c.Exchange(message, "127.0.0.1:53") +// +// Exchange does not retry a failed query, nor will it fall back to TCP in +// case of truncation. +// It is up to the caller to create a message that allows for larger responses to be +// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger +// buffer, see SetEdns0. Messages without an OPT RR will fallback to the historic limit +// of 512 bytes +// To specify a local address or a timeout, the caller has to set the `Client.Dialer` +// attribute appropriately +func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) { + if !c.SingleInflight { + if c.Net == "https" { + // TODO(tmthrgd): pipe timeouts into exchangeDOH + return c.exchangeDOH(context.TODO(), m, address) + } + + return c.exchange(m, address) + } + + t := "nop" + if t1, ok := TypeToString[m.Question[0].Qtype]; ok { + t = t1 + } + cl := "nop" + if cl1, ok := ClassToString[m.Question[0].Qclass]; ok { + cl = cl1 + } + r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) { + if c.Net == "https" { + // TODO(tmthrgd): pipe timeouts into exchangeDOH + return c.exchangeDOH(context.TODO(), m, address) + } + + return c.exchange(m, address) + }) + if r != nil && shared { + r = r.Copy() + } + return r, rtt, err +} + +func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) { + var co *Conn + + co, err = c.Dial(a) + + if err != nil { + return nil, 0, err + } + defer co.Close() + + opt := m.IsEdns0() + // If EDNS0 is used use that for size. + if opt != nil && opt.UDPSize() >= MinMsgSize { + co.UDPSize = opt.UDPSize() + } + // Otherwise use the client's configured UDP size. + if opt == nil && c.UDPSize >= MinMsgSize { + co.UDPSize = c.UDPSize + } + + co.TsigSecret = c.TsigSecret + t := time.Now() + // write with the appropriate write timeout + co.SetWriteDeadline(t.Add(c.getTimeoutForRequest(c.writeTimeout()))) + if err = co.WriteMsg(m); err != nil { + return nil, 0, err + } + + co.SetReadDeadline(time.Now().Add(c.getTimeoutForRequest(c.readTimeout()))) + r, err = co.ReadMsg() + if err == nil && r.Id != m.Id { + err = ErrId + } + rtt = time.Since(t) + return r, rtt, err +} + +func (c *Client) exchangeDOH(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) { + p, err := m.Pack() + if err != nil { + return nil, 0, err + } + + req, err := http.NewRequest(http.MethodPost, a, bytes.NewReader(p)) + if err != nil { + return nil, 0, err + } + + req.Header.Set("Content-Type", dohMimeType) + req.Header.Set("Accept", dohMimeType) + + hc := http.DefaultClient + if c.HTTPClient != nil { + hc = c.HTTPClient + } + + if ctx != context.Background() && ctx != context.TODO() { + req = req.WithContext(ctx) + } + + t := time.Now() + + resp, err := hc.Do(req) + if err != nil { + return nil, 0, err + } + defer closeHTTPBody(resp.Body) + + if resp.StatusCode != http.StatusOK { + return nil, 0, fmt.Errorf("dns: server returned HTTP %d error: %q", resp.StatusCode, resp.Status) + } + + if ct := resp.Header.Get("Content-Type"); ct != dohMimeType { + return nil, 0, fmt.Errorf("dns: unexpected Content-Type %q; expected %q", ct, dohMimeType) + } + + p, err = ioutil.ReadAll(resp.Body) + if err != nil { + return nil, 0, err + } + + rtt = time.Since(t) + + r = new(Msg) + if err := r.Unpack(p); err != nil { + return r, 0, err + } + + // TODO: TSIG? Is it even supported over DoH? + + return r, rtt, nil +} + +func closeHTTPBody(r io.ReadCloser) error { + io.Copy(ioutil.Discard, io.LimitReader(r, 8<<20)) + return r.Close() +} + +// ReadMsg reads a message from the connection co. +// If the received message contains a TSIG record the transaction signature +// is verified. This method always tries to return the message, however if an +// error is returned there are no guarantees that the returned message is a +// valid representation of the packet read. +func (co *Conn) ReadMsg() (*Msg, error) { + p, err := co.ReadMsgHeader(nil) + if err != nil { + return nil, err + } + + m := new(Msg) + if err := m.Unpack(p); err != nil { + // If an error was returned, we still want to allow the user to use + // the message, but naively they can just check err if they don't want + // to use an erroneous message + return m, err + } + if t := m.IsTsig(); t != nil { + if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { + return m, ErrSecret + } + // Need to work on the original message p, as that was used to calculate the tsig. + err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) + } + return m, err +} + +// ReadMsgHeader reads a DNS message, parses and populates hdr (when hdr is not nil). +// Returns message as a byte slice to be parsed with Msg.Unpack later on. +// Note that error handling on the message body is not possible as only the header is parsed. +func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) { + var ( + p []byte + n int + err error + ) + + switch t := co.Conn.(type) { + case *net.TCPConn, *tls.Conn: + r := t.(io.Reader) + + // First two bytes specify the length of the entire message. + l, err := tcpMsgLen(r) + if err != nil { + return nil, err + } + p = make([]byte, l) + n, err = tcpRead(r, p) + default: + if co.UDPSize > MinMsgSize { + p = make([]byte, co.UDPSize) + } else { + p = make([]byte, MinMsgSize) + } + n, err = co.Read(p) + } + + if err != nil { + return nil, err + } else if n < headerSize { + return nil, ErrShortRead + } + + p = p[:n] + if hdr != nil { + dh, _, err := unpackMsgHdr(p, 0) + if err != nil { + return nil, err + } + *hdr = dh + } + return p, err +} + +// tcpMsgLen is a helper func to read first two bytes of stream as uint16 packet length. +func tcpMsgLen(t io.Reader) (int, error) { + p := []byte{0, 0} + n, err := t.Read(p) + if err != nil { + return 0, err + } + + // As seen with my local router/switch, returns 1 byte on the above read, + // resulting a a ShortRead. Just write it out (instead of loop) and read the + // other byte. + if n == 1 { + n1, err := t.Read(p[1:]) + if err != nil { + return 0, err + } + n += n1 + } + + if n != 2 { + return 0, ErrShortRead + } + l := binary.BigEndian.Uint16(p) + if l == 0 { + return 0, ErrShortRead + } + return int(l), nil +} + +// tcpRead calls TCPConn.Read enough times to fill allocated buffer. +func tcpRead(t io.Reader, p []byte) (int, error) { + n, err := t.Read(p) + if err != nil { + return n, err + } + for n < len(p) { + j, err := t.Read(p[n:]) + if err != nil { + return n, err + } + n += j + } + return n, err +} + +// Read implements the net.Conn read method. +func (co *Conn) Read(p []byte) (n int, err error) { + if co.Conn == nil { + return 0, ErrConnEmpty + } + if len(p) < 2 { + return 0, io.ErrShortBuffer + } + switch t := co.Conn.(type) { + case *net.TCPConn, *tls.Conn: + r := t.(io.Reader) + + l, err := tcpMsgLen(r) + if err != nil { + return 0, err + } + if l > len(p) { + return int(l), io.ErrShortBuffer + } + return tcpRead(r, p[:l]) + } + // UDP connection + n, err = co.Conn.Read(p) + if err != nil { + return n, err + } + return n, err +} + +// WriteMsg sends a message through the connection co. +// If the message m contains a TSIG record the transaction +// signature is calculated. +func (co *Conn) WriteMsg(m *Msg) (err error) { + var out []byte + if t := m.IsTsig(); t != nil { + mac := "" + if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { + return ErrSecret + } + out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) + // Set for the next read, although only used in zone transfers + co.tsigRequestMAC = mac + } else { + out, err = m.Pack() + } + if err != nil { + return err + } + if _, err = co.Write(out); err != nil { + return err + } + return nil +} + +// Write implements the net.Conn Write method. +func (co *Conn) Write(p []byte) (n int, err error) { + switch t := co.Conn.(type) { + case *net.TCPConn, *tls.Conn: + w := t.(io.Writer) + + lp := len(p) + if lp < 2 { + return 0, io.ErrShortBuffer + } + if lp > MaxMsgSize { + return 0, &Error{err: "message too large"} + } + l := make([]byte, 2, lp+2) + binary.BigEndian.PutUint16(l, uint16(lp)) + p = append(l, p...) + n, err := io.Copy(w, bytes.NewReader(p)) + return int(n), err + } + n, err = co.Conn.Write(p) + return n, err +} + +// Return the appropriate timeout for a specific request +func (c *Client) getTimeoutForRequest(timeout time.Duration) time.Duration { + var requestTimeout time.Duration + if c.Timeout != 0 { + requestTimeout = c.Timeout + } else { + requestTimeout = timeout + } + // net.Dialer.Timeout has priority if smaller than the timeouts computed so + // far + if c.Dialer != nil && c.Dialer.Timeout != 0 { + if c.Dialer.Timeout < requestTimeout { + requestTimeout = c.Dialer.Timeout + } + } + return requestTimeout +} + +// Dial connects to the address on the named network. +func Dial(network, address string) (conn *Conn, err error) { + conn = new(Conn) + conn.Conn, err = net.Dial(network, address) + if err != nil { + return nil, err + } + return conn, nil +} + +// ExchangeContext performs a synchronous UDP query, like Exchange. It +// additionally obeys deadlines from the passed Context. +func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error) { + client := Client{Net: "udp"} + r, _, err = client.ExchangeContext(ctx, m, a) + // ignorint rtt to leave the original ExchangeContext API unchanged, but + // this function will go away + return r, err +} + +// ExchangeConn performs a synchronous query. It sends the message m via the connection +// c and waits for a reply. The connection c is not closed by ExchangeConn. +// This function is going away, but can easily be mimicked: +// +// co := &dns.Conn{Conn: c} // c is your net.Conn +// co.WriteMsg(m) +// in, _ := co.ReadMsg() +// co.Close() +// +func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) { + println("dns: ExchangeConn: this function is deprecated") + co := new(Conn) + co.Conn = c + if err = co.WriteMsg(m); err != nil { + return nil, err + } + r, err = co.ReadMsg() + if err == nil && r.Id != m.Id { + err = ErrId + } + return r, err +} + +// DialTimeout acts like Dial but takes a timeout. +func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) { + client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}} + conn, err = client.Dial(address) + if err != nil { + return nil, err + } + return conn, nil +} + +// DialWithTLS connects to the address on the named network with TLS. +func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) { + if !strings.HasSuffix(network, "-tls") { + network += "-tls" + } + client := Client{Net: network, TLSConfig: tlsConfig} + conn, err = client.Dial(address) + + if err != nil { + return nil, err + } + return conn, nil +} + +// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout. +func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) { + if !strings.HasSuffix(network, "-tls") { + network += "-tls" + } + client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}, TLSConfig: tlsConfig} + conn, err = client.Dial(address) + if err != nil { + return nil, err + } + return conn, nil +} + +// ExchangeContext acts like Exchange, but honors the deadline on the provided +// context, if present. If there is both a context deadline and a configured +// timeout on the client, the earliest of the two takes effect. +func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) { + if !c.SingleInflight && c.Net == "https" { + return c.exchangeDOH(ctx, m, a) + } + + var timeout time.Duration + if deadline, ok := ctx.Deadline(); !ok { + timeout = 0 + } else { + timeout = deadline.Sub(time.Now()) + } + // not passing the context to the underlying calls, as the API does not support + // context. For timeouts you should set up Client.Dialer and call Client.Exchange. + // TODO(tmthrgd): this is a race condition + c.Dialer = &net.Dialer{Timeout: timeout} + return c.Exchange(m, a) +} diff --git a/vendor/github.com/miekg/dns/clientconfig.go b/vendor/github.com/miekg/dns/clientconfig.go new file mode 100644 index 00000000000..f13cfa30cb5 --- /dev/null +++ b/vendor/github.com/miekg/dns/clientconfig.go @@ -0,0 +1,139 @@ +package dns + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" +) + +// ClientConfig wraps the contents of the /etc/resolv.conf file. +type ClientConfig struct { + Servers []string // servers to use + Search []string // suffixes to append to local name + Port string // what port to use + Ndots int // number of dots in name to trigger absolute lookup + Timeout int // seconds before giving up on packet + Attempts int // lost packets before giving up on server, not used in the package dns +} + +// ClientConfigFromFile parses a resolv.conf(5) like file and returns +// a *ClientConfig. +func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) { + file, err := os.Open(resolvconf) + if err != nil { + return nil, err + } + defer file.Close() + return ClientConfigFromReader(file) +} + +// ClientConfigFromReader works like ClientConfigFromFile but takes an io.Reader as argument +func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) { + c := new(ClientConfig) + scanner := bufio.NewScanner(resolvconf) + c.Servers = make([]string, 0) + c.Search = make([]string, 0) + c.Port = "53" + c.Ndots = 1 + c.Timeout = 5 + c.Attempts = 2 + + for scanner.Scan() { + if err := scanner.Err(); err != nil { + return nil, err + } + line := scanner.Text() + f := strings.Fields(line) + if len(f) < 1 { + continue + } + switch f[0] { + case "nameserver": // add one name server + if len(f) > 1 { + // One more check: make sure server name is + // just an IP address. Otherwise we need DNS + // to look it up. + name := f[1] + c.Servers = append(c.Servers, name) + } + + case "domain": // set search path to just this domain + if len(f) > 1 { + c.Search = make([]string, 1) + c.Search[0] = f[1] + } else { + c.Search = make([]string, 0) + } + + case "search": // set search path to given servers + c.Search = make([]string, len(f)-1) + for i := 0; i < len(c.Search); i++ { + c.Search[i] = f[i+1] + } + + case "options": // magic options + for i := 1; i < len(f); i++ { + s := f[i] + switch { + case len(s) >= 6 && s[:6] == "ndots:": + n, _ := strconv.Atoi(s[6:]) + if n < 0 { + n = 0 + } else if n > 15 { + n = 15 + } + c.Ndots = n + case len(s) >= 8 && s[:8] == "timeout:": + n, _ := strconv.Atoi(s[8:]) + if n < 1 { + n = 1 + } + c.Timeout = n + case len(s) >= 9 && s[:9] == "attempts:": + n, _ := strconv.Atoi(s[9:]) + if n < 1 { + n = 1 + } + c.Attempts = n + case s == "rotate": + /* not imp */ + } + } + } + } + return c, nil +} + +// NameList returns all of the names that should be queried based on the +// config. It is based off of go's net/dns name building, but it does not +// check the length of the resulting names. +func (c *ClientConfig) NameList(name string) []string { + // if this domain is already fully qualified, no append needed. + if IsFqdn(name) { + return []string{name} + } + + // Check to see if the name has more labels than Ndots. Do this before making + // the domain fully qualified. + hasNdots := CountLabel(name) > c.Ndots + // Make the domain fully qualified. + name = Fqdn(name) + + // Make a list of names based off search. + names := []string{} + + // If name has enough dots, try that first. + if hasNdots { + names = append(names, name) + } + for _, s := range c.Search { + names = append(names, Fqdn(name+s)) + } + // If we didn't have enough dots, try after suffixes. + if !hasNdots { + names = append(names, name) + } + return names +} diff --git a/vendor/github.com/miekg/dns/dane.go b/vendor/github.com/miekg/dns/dane.go new file mode 100644 index 00000000000..8c4a14ef190 --- /dev/null +++ b/vendor/github.com/miekg/dns/dane.go @@ -0,0 +1,43 @@ +package dns + +import ( + "crypto/sha256" + "crypto/sha512" + "crypto/x509" + "encoding/hex" + "errors" +) + +// CertificateToDANE converts a certificate to a hex string as used in the TLSA or SMIMEA records. +func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) { + switch matchingType { + case 0: + switch selector { + case 0: + return hex.EncodeToString(cert.Raw), nil + case 1: + return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil + } + case 1: + h := sha256.New() + switch selector { + case 0: + h.Write(cert.Raw) + return hex.EncodeToString(h.Sum(nil)), nil + case 1: + h.Write(cert.RawSubjectPublicKeyInfo) + return hex.EncodeToString(h.Sum(nil)), nil + } + case 2: + h := sha512.New() + switch selector { + case 0: + h.Write(cert.Raw) + return hex.EncodeToString(h.Sum(nil)), nil + case 1: + h.Write(cert.RawSubjectPublicKeyInfo) + return hex.EncodeToString(h.Sum(nil)), nil + } + } + return "", errors.New("dns: bad MatchingType or Selector") +} diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go new file mode 100644 index 00000000000..14e18b0b38f --- /dev/null +++ b/vendor/github.com/miekg/dns/defaults.go @@ -0,0 +1,288 @@ +package dns + +import ( + "errors" + "net" + "strconv" +) + +const hexDigit = "0123456789abcdef" + +// Everything is assumed in ClassINET. + +// SetReply creates a reply message from a request message. +func (dns *Msg) SetReply(request *Msg) *Msg { + dns.Id = request.Id + dns.Response = true + dns.Opcode = request.Opcode + if dns.Opcode == OpcodeQuery { + dns.RecursionDesired = request.RecursionDesired // Copy rd bit + dns.CheckingDisabled = request.CheckingDisabled // Copy cd bit + } + dns.Rcode = RcodeSuccess + if len(request.Question) > 0 { + dns.Question = make([]Question, 1) + dns.Question[0] = request.Question[0] + } + return dns +} + +// SetQuestion creates a question message, it sets the Question +// section, generates an Id and sets the RecursionDesired (RD) +// bit to true. +func (dns *Msg) SetQuestion(z string, t uint16) *Msg { + dns.Id = Id() + dns.RecursionDesired = true + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, t, ClassINET} + return dns +} + +// SetNotify creates a notify message, it sets the Question +// section, generates an Id and sets the Authoritative (AA) +// bit to true. +func (dns *Msg) SetNotify(z string) *Msg { + dns.Opcode = OpcodeNotify + dns.Authoritative = true + dns.Id = Id() + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, TypeSOA, ClassINET} + return dns +} + +// SetRcode creates an error message suitable for the request. +func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg { + dns.SetReply(request) + dns.Rcode = rcode + return dns +} + +// SetRcodeFormatError creates a message with FormError set. +func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg { + dns.Rcode = RcodeFormatError + dns.Opcode = OpcodeQuery + dns.Response = true + dns.Authoritative = false + dns.Id = request.Id + return dns +} + +// SetUpdate makes the message a dynamic update message. It +// sets the ZONE section to: z, TypeSOA, ClassINET. +func (dns *Msg) SetUpdate(z string) *Msg { + dns.Id = Id() + dns.Response = false + dns.Opcode = OpcodeUpdate + dns.Compress = false // BIND9 cannot handle compression + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, TypeSOA, ClassINET} + return dns +} + +// SetIxfr creates message for requesting an IXFR. +func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg { + dns.Id = Id() + dns.Question = make([]Question, 1) + dns.Ns = make([]RR, 1) + s := new(SOA) + s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0} + s.Serial = serial + s.Ns = ns + s.Mbox = mbox + dns.Question[0] = Question{z, TypeIXFR, ClassINET} + dns.Ns[0] = s + return dns +} + +// SetAxfr creates message for requesting an AXFR. +func (dns *Msg) SetAxfr(z string) *Msg { + dns.Id = Id() + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, TypeAXFR, ClassINET} + return dns +} + +// SetTsig appends a TSIG RR to the message. +// This is only a skeleton TSIG RR that is added as the last RR in the +// additional section. The Tsig is calculated when the message is being send. +func (dns *Msg) SetTsig(z, algo string, fudge uint16, timesigned int64) *Msg { + t := new(TSIG) + t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0} + t.Algorithm = algo + t.Fudge = fudge + t.TimeSigned = uint64(timesigned) + t.OrigId = dns.Id + dns.Extra = append(dns.Extra, t) + return dns +} + +// SetEdns0 appends a EDNS0 OPT RR to the message. +// TSIG should always the last RR in a message. +func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg { + e := new(OPT) + e.Hdr.Name = "." + e.Hdr.Rrtype = TypeOPT + e.SetUDPSize(udpsize) + if do { + e.SetDo() + } + dns.Extra = append(dns.Extra, e) + return dns +} + +// IsTsig checks if the message has a TSIG record as the last record +// in the additional section. It returns the TSIG record found or nil. +func (dns *Msg) IsTsig() *TSIG { + if len(dns.Extra) > 0 { + if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG { + return dns.Extra[len(dns.Extra)-1].(*TSIG) + } + } + return nil +} + +// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0 +// record in the additional section will do. It returns the OPT record +// found or nil. +func (dns *Msg) IsEdns0() *OPT { + // EDNS0 is at the end of the additional section, start there. + // We might want to change this to *only* look at the last two + // records. So we see TSIG and/or OPT - this a slightly bigger + // change though. + for i := len(dns.Extra) - 1; i >= 0; i-- { + if dns.Extra[i].Header().Rrtype == TypeOPT { + return dns.Extra[i].(*OPT) + } + } + return nil +} + +// IsDomainName checks if s is a valid domain name, it returns the number of +// labels and true, when a domain name is valid. Note that non fully qualified +// domain name is considered valid, in this case the last label is counted in +// the number of labels. When false is returned the number of labels is not +// defined. Also note that this function is extremely liberal; almost any +// string is a valid domain name as the DNS is 8 bit protocol. It checks if each +// label fits in 63 characters, but there is no length check for the entire +// string s. I.e. a domain name longer than 255 characters is considered valid. +func IsDomainName(s string) (labels int, ok bool) { + _, labels, err := packDomainName(s, nil, 0, nil, false) + return labels, err == nil +} + +// IsSubDomain checks if child is indeed a child of the parent. If child and parent +// are the same domain true is returned as well. +func IsSubDomain(parent, child string) bool { + // Entire child is contained in parent + return CompareDomainName(parent, child) == CountLabel(parent) +} + +// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet. +// The checking is performed on the binary payload. +func IsMsg(buf []byte) error { + // Header + if len(buf) < 12 { + return errors.New("dns: bad message header") + } + // Header: Opcode + // TODO(miek): more checks here, e.g. check all header bits. + return nil +} + +// IsFqdn checks if a domain name is fully qualified. +func IsFqdn(s string) bool { + l := len(s) + if l == 0 { + return false + } + return s[l-1] == '.' +} + +// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181. +// This means the RRs need to have the same type, name, and class. Returns true +// if the RR set is valid, otherwise false. +func IsRRset(rrset []RR) bool { + if len(rrset) == 0 { + return false + } + if len(rrset) == 1 { + return true + } + rrHeader := rrset[0].Header() + rrType := rrHeader.Rrtype + rrClass := rrHeader.Class + rrName := rrHeader.Name + + for _, rr := range rrset[1:] { + curRRHeader := rr.Header() + if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName { + // Mismatch between the records, so this is not a valid rrset for + //signing/verifying + return false + } + } + + return true +} + +// Fqdn return the fully qualified domain name from s. +// If s is already fully qualified, it behaves as the identity function. +func Fqdn(s string) string { + if IsFqdn(s) { + return s + } + return s + "." +} + +// Copied from the official Go code. + +// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP +// address suitable for reverse DNS (PTR) record lookups or an error if it fails +// to parse the IP address. +func ReverseAddr(addr string) (arpa string, err error) { + ip := net.ParseIP(addr) + if ip == nil { + return "", &Error{err: "unrecognized address: " + addr} + } + if ip.To4() != nil { + return strconv.Itoa(int(ip[15])) + "." + strconv.Itoa(int(ip[14])) + "." + strconv.Itoa(int(ip[13])) + "." + + strconv.Itoa(int(ip[12])) + ".in-addr.arpa.", nil + } + // Must be IPv6 + buf := make([]byte, 0, len(ip)*4+len("ip6.arpa.")) + // Add it, in reverse, to the buffer + for i := len(ip) - 1; i >= 0; i-- { + v := ip[i] + buf = append(buf, hexDigit[v&0xF]) + buf = append(buf, '.') + buf = append(buf, hexDigit[v>>4]) + buf = append(buf, '.') + } + // Append "ip6.arpa." and return (buf already has the final .) + buf = append(buf, "ip6.arpa."...) + return string(buf), nil +} + +// String returns the string representation for the type t. +func (t Type) String() string { + if t1, ok := TypeToString[uint16(t)]; ok { + return t1 + } + return "TYPE" + strconv.Itoa(int(t)) +} + +// String returns the string representation for the class c. +func (c Class) String() string { + if s, ok := ClassToString[uint16(c)]; ok { + // Only emit mnemonics when they are unambiguous, specically ANY is in both. + if _, ok := StringToType[s]; !ok { + return s + } + } + return "CLASS" + strconv.Itoa(int(c)) +} + +// String returns the string representation for the name n. +func (n Name) String() string { + return sprintName(string(n)) +} diff --git a/vendor/github.com/miekg/dns/dns.go b/vendor/github.com/miekg/dns/dns.go new file mode 100644 index 00000000000..e7557f51a81 --- /dev/null +++ b/vendor/github.com/miekg/dns/dns.go @@ -0,0 +1,97 @@ +package dns + +import "strconv" + +const ( + year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits. + defaultTtl = 3600 // Default internal TTL. + + // DefaultMsgSize is the standard default for messages larger than 512 bytes. + DefaultMsgSize = 4096 + // MinMsgSize is the minimal size of a DNS packet. + MinMsgSize = 512 + // MaxMsgSize is the largest possible DNS packet. + MaxMsgSize = 65535 +) + +// Error represents a DNS error. +type Error struct{ err string } + +func (e *Error) Error() string { + if e == nil { + return "dns: " + } + return "dns: " + e.err +} + +// An RR represents a resource record. +type RR interface { + // Header returns the header of an resource record. The header contains + // everything up to the rdata. + Header() *RR_Header + // String returns the text representation of the resource record. + String() string + + // copy returns a copy of the RR + copy() RR + // len returns the length (in octets) of the uncompressed RR in wire format. + len() int + // pack packs an RR into wire format. + pack([]byte, int, map[string]int, bool) (int, error) +} + +// RR_Header is the header all DNS resource records share. +type RR_Header struct { + Name string `dns:"cdomain-name"` + Rrtype uint16 + Class uint16 + Ttl uint32 + Rdlength uint16 // Length of data after header. +} + +// Header returns itself. This is here to make RR_Header implements the RR interface. +func (h *RR_Header) Header() *RR_Header { return h } + +// Just to implement the RR interface. +func (h *RR_Header) copy() RR { return nil } + +func (h *RR_Header) String() string { + var s string + + if h.Rrtype == TypeOPT { + s = ";" + // and maybe other things + } + + s += sprintName(h.Name) + "\t" + s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" + s += Class(h.Class).String() + "\t" + s += Type(h.Rrtype).String() + "\t" + return s +} + +func (h *RR_Header) len() int { + l := len(h.Name) + 1 + l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2) + return l +} + +// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597. +func (rr *RFC3597) ToRFC3597(r RR) error { + buf := make([]byte, r.len()*2) + off, err := PackRR(r, buf, 0, nil, false) + if err != nil { + return err + } + buf = buf[:off] + if int(r.Header().Rdlength) > off { + return ErrBuf + } + + rfc3597, _, err := unpackRFC3597(*r.Header(), buf, off-int(r.Header().Rdlength)) + if err != nil { + return err + } + *rr = *rfc3597.(*RFC3597) + return nil +} diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go new file mode 100644 index 00000000000..7e6bac4287e --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec.go @@ -0,0 +1,785 @@ +package dns + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + _ "crypto/md5" + "crypto/rand" + "crypto/rsa" + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" + "encoding/asn1" + "encoding/binary" + "encoding/hex" + "math/big" + "sort" + "strings" + "time" + + "golang.org/x/crypto/ed25519" +) + +// DNSSEC encryption algorithm codes. +const ( + _ uint8 = iota + RSAMD5 + DH + DSA + _ // Skip 4, RFC 6725, section 2.1 + RSASHA1 + DSANSEC3SHA1 + RSASHA1NSEC3SHA1 + RSASHA256 + _ // Skip 9, RFC 6725, section 2.1 + RSASHA512 + _ // Skip 11, RFC 6725, section 2.1 + ECCGOST + ECDSAP256SHA256 + ECDSAP384SHA384 + ED25519 + ED448 + INDIRECT uint8 = 252 + PRIVATEDNS uint8 = 253 // Private (experimental keys) + PRIVATEOID uint8 = 254 +) + +// AlgorithmToString is a map of algorithm IDs to algorithm names. +var AlgorithmToString = map[uint8]string{ + RSAMD5: "RSAMD5", + DH: "DH", + DSA: "DSA", + RSASHA1: "RSASHA1", + DSANSEC3SHA1: "DSA-NSEC3-SHA1", + RSASHA1NSEC3SHA1: "RSASHA1-NSEC3-SHA1", + RSASHA256: "RSASHA256", + RSASHA512: "RSASHA512", + ECCGOST: "ECC-GOST", + ECDSAP256SHA256: "ECDSAP256SHA256", + ECDSAP384SHA384: "ECDSAP384SHA384", + ED25519: "ED25519", + ED448: "ED448", + INDIRECT: "INDIRECT", + PRIVATEDNS: "PRIVATEDNS", + PRIVATEOID: "PRIVATEOID", +} + +// StringToAlgorithm is the reverse of AlgorithmToString. +var StringToAlgorithm = reverseInt8(AlgorithmToString) + +// AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's. +var AlgorithmToHash = map[uint8]crypto.Hash{ + RSAMD5: crypto.MD5, // Deprecated in RFC 6725 + DSA: crypto.SHA1, + RSASHA1: crypto.SHA1, + RSASHA1NSEC3SHA1: crypto.SHA1, + RSASHA256: crypto.SHA256, + ECDSAP256SHA256: crypto.SHA256, + ECDSAP384SHA384: crypto.SHA384, + RSASHA512: crypto.SHA512, + ED25519: crypto.Hash(0), +} + +// DNSSEC hashing algorithm codes. +const ( + _ uint8 = iota + SHA1 // RFC 4034 + SHA256 // RFC 4509 + GOST94 // RFC 5933 + SHA384 // Experimental + SHA512 // Experimental +) + +// HashToString is a map of hash IDs to names. +var HashToString = map[uint8]string{ + SHA1: "SHA1", + SHA256: "SHA256", + GOST94: "GOST94", + SHA384: "SHA384", + SHA512: "SHA512", +} + +// StringToHash is a map of names to hash IDs. +var StringToHash = reverseInt8(HashToString) + +// DNSKEY flag values. +const ( + SEP = 1 + REVOKE = 1 << 7 + ZONE = 1 << 8 +) + +// The RRSIG needs to be converted to wireformat with some of the rdata (the signature) missing. +type rrsigWireFmt struct { + TypeCovered uint16 + Algorithm uint8 + Labels uint8 + OrigTtl uint32 + Expiration uint32 + Inception uint32 + KeyTag uint16 + SignerName string `dns:"domain-name"` + /* No Signature */ +} + +// Used for converting DNSKEY's rdata to wirefmt. +type dnskeyWireFmt struct { + Flags uint16 + Protocol uint8 + Algorithm uint8 + PublicKey string `dns:"base64"` + /* Nothing is left out */ +} + +func divRoundUp(a, b int) int { + return (a + b - 1) / b +} + +// KeyTag calculates the keytag (or key-id) of the DNSKEY. +func (k *DNSKEY) KeyTag() uint16 { + if k == nil { + return 0 + } + var keytag int + switch k.Algorithm { + case RSAMD5: + // Look at the bottom two bytes of the modules, which the last + // item in the pubkey. We could do this faster by looking directly + // at the base64 values. But I'm lazy. + modulus, _ := fromBase64([]byte(k.PublicKey)) + if len(modulus) > 1 { + x := binary.BigEndian.Uint16(modulus[len(modulus)-2:]) + keytag = int(x) + } + default: + keywire := new(dnskeyWireFmt) + keywire.Flags = k.Flags + keywire.Protocol = k.Protocol + keywire.Algorithm = k.Algorithm + keywire.PublicKey = k.PublicKey + wire := make([]byte, DefaultMsgSize) + n, err := packKeyWire(keywire, wire) + if err != nil { + return 0 + } + wire = wire[:n] + for i, v := range wire { + if i&1 != 0 { + keytag += int(v) // must be larger than uint32 + } else { + keytag += int(v) << 8 + } + } + keytag += (keytag >> 16) & 0xFFFF + keytag &= 0xFFFF + } + return uint16(keytag) +} + +// ToDS converts a DNSKEY record to a DS record. +func (k *DNSKEY) ToDS(h uint8) *DS { + if k == nil { + return nil + } + ds := new(DS) + ds.Hdr.Name = k.Hdr.Name + ds.Hdr.Class = k.Hdr.Class + ds.Hdr.Rrtype = TypeDS + ds.Hdr.Ttl = k.Hdr.Ttl + ds.Algorithm = k.Algorithm + ds.DigestType = h + ds.KeyTag = k.KeyTag() + + keywire := new(dnskeyWireFmt) + keywire.Flags = k.Flags + keywire.Protocol = k.Protocol + keywire.Algorithm = k.Algorithm + keywire.PublicKey = k.PublicKey + wire := make([]byte, DefaultMsgSize) + n, err := packKeyWire(keywire, wire) + if err != nil { + return nil + } + wire = wire[:n] + + owner := make([]byte, 255) + off, err1 := PackDomainName(strings.ToLower(k.Hdr.Name), owner, 0, nil, false) + if err1 != nil { + return nil + } + owner = owner[:off] + // RFC4034: + // digest = digest_algorithm( DNSKEY owner name | DNSKEY RDATA); + // "|" denotes concatenation + // DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key. + + var hash crypto.Hash + switch h { + case SHA1: + hash = crypto.SHA1 + case SHA256: + hash = crypto.SHA256 + case SHA384: + hash = crypto.SHA384 + case SHA512: + hash = crypto.SHA512 + default: + return nil + } + + s := hash.New() + s.Write(owner) + s.Write(wire) + ds.Digest = hex.EncodeToString(s.Sum(nil)) + return ds +} + +// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record. +func (k *DNSKEY) ToCDNSKEY() *CDNSKEY { + c := &CDNSKEY{DNSKEY: *k} + c.Hdr = k.Hdr + c.Hdr.Rrtype = TypeCDNSKEY + return c +} + +// ToCDS converts a DS record to a CDS record. +func (d *DS) ToCDS() *CDS { + c := &CDS{DS: *d} + c.Hdr = d.Hdr + c.Hdr.Rrtype = TypeCDS + return c +} + +// Sign signs an RRSet. The signature needs to be filled in with the values: +// Inception, Expiration, KeyTag, SignerName and Algorithm. The rest is copied +// from the RRset. Sign returns a non-nill error when the signing went OK. +// There is no check if RRSet is a proper (RFC 2181) RRSet. If OrigTTL is non +// zero, it is used as-is, otherwise the TTL of the RRset is used as the +// OrigTTL. +func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error { + if k == nil { + return ErrPrivKey + } + // s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return ErrKey + } + + rr.Hdr.Rrtype = TypeRRSIG + rr.Hdr.Name = rrset[0].Header().Name + rr.Hdr.Class = rrset[0].Header().Class + if rr.OrigTtl == 0 { // If set don't override + rr.OrigTtl = rrset[0].Header().Ttl + } + rr.TypeCovered = rrset[0].Header().Rrtype + rr.Labels = uint8(CountLabel(rrset[0].Header().Name)) + + if strings.HasPrefix(rrset[0].Header().Name, "*") { + rr.Labels-- // wildcard, remove from label count + } + + sigwire := new(rrsigWireFmt) + sigwire.TypeCovered = rr.TypeCovered + sigwire.Algorithm = rr.Algorithm + sigwire.Labels = rr.Labels + sigwire.OrigTtl = rr.OrigTtl + sigwire.Expiration = rr.Expiration + sigwire.Inception = rr.Inception + sigwire.KeyTag = rr.KeyTag + // For signing, lowercase this name + sigwire.SignerName = strings.ToLower(rr.SignerName) + + // Create the desired binary blob + signdata := make([]byte, DefaultMsgSize) + n, err := packSigWire(sigwire, signdata) + if err != nil { + return err + } + signdata = signdata[:n] + wire, err := rawSignatureData(rrset, rr) + if err != nil { + return err + } + + hash, ok := AlgorithmToHash[rr.Algorithm] + if !ok { + return ErrAlg + } + + switch rr.Algorithm { + case ED25519: + // ed25519 signs the raw message and performs hashing internally. + // All other supported signature schemes operate over the pre-hashed + // message, and thus ed25519 must be handled separately here. + // + // The raw message is passed directly into sign and crypto.Hash(0) is + // used to signal to the crypto.Signer that the data has not been hashed. + signature, err := sign(k, append(signdata, wire...), crypto.Hash(0), rr.Algorithm) + if err != nil { + return err + } + + rr.Signature = toBase64(signature) + default: + h := hash.New() + h.Write(signdata) + h.Write(wire) + + signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm) + if err != nil { + return err + } + + rr.Signature = toBase64(signature) + } + + return nil +} + +func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, error) { + signature, err := k.Sign(rand.Reader, hashed, hash) + if err != nil { + return nil, err + } + + switch alg { + case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512: + return signature, nil + + case ECDSAP256SHA256, ECDSAP384SHA384: + ecdsaSignature := &struct { + R, S *big.Int + }{} + if _, err := asn1.Unmarshal(signature, ecdsaSignature); err != nil { + return nil, err + } + + var intlen int + switch alg { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + + signature := intToBytes(ecdsaSignature.R, intlen) + signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...) + return signature, nil + + // There is no defined interface for what a DSA backed crypto.Signer returns + case DSA, DSANSEC3SHA1: + // t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8) + // signature := []byte{byte(t)} + // signature = append(signature, intToBytes(r1, 20)...) + // signature = append(signature, intToBytes(s1, 20)...) + // rr.Signature = signature + + case ED25519: + return signature, nil + } + + return nil, ErrAlg +} + +// Verify validates an RRSet with the signature and key. This is only the +// cryptographic test, the signature validity period must be checked separately. +// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work. +func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { + // First the easy checks + if !IsRRset(rrset) { + return ErrRRset + } + if rr.KeyTag != k.KeyTag() { + return ErrKey + } + if rr.Hdr.Class != k.Hdr.Class { + return ErrKey + } + if rr.Algorithm != k.Algorithm { + return ErrKey + } + if strings.ToLower(rr.SignerName) != strings.ToLower(k.Hdr.Name) { + return ErrKey + } + if k.Protocol != 3 { + return ErrKey + } + + // IsRRset checked that we have at least one RR and that the RRs in + // the set have consistent type, class, and name. Also check that type and + // class matches the RRSIG record. + if rrset[0].Header().Class != rr.Hdr.Class { + return ErrRRset + } + if rrset[0].Header().Rrtype != rr.TypeCovered { + return ErrRRset + } + + // RFC 4035 5.3.2. Reconstructing the Signed Data + // Copy the sig, except the rrsig data + sigwire := new(rrsigWireFmt) + sigwire.TypeCovered = rr.TypeCovered + sigwire.Algorithm = rr.Algorithm + sigwire.Labels = rr.Labels + sigwire.OrigTtl = rr.OrigTtl + sigwire.Expiration = rr.Expiration + sigwire.Inception = rr.Inception + sigwire.KeyTag = rr.KeyTag + sigwire.SignerName = strings.ToLower(rr.SignerName) + // Create the desired binary blob + signeddata := make([]byte, DefaultMsgSize) + n, err := packSigWire(sigwire, signeddata) + if err != nil { + return err + } + signeddata = signeddata[:n] + wire, err := rawSignatureData(rrset, rr) + if err != nil { + return err + } + + sigbuf := rr.sigBuf() // Get the binary signature data + if rr.Algorithm == PRIVATEDNS { // PRIVATEOID + // TODO(miek) + // remove the domain name and assume its ours? + } + + hash, ok := AlgorithmToHash[rr.Algorithm] + if !ok { + return ErrAlg + } + + switch rr.Algorithm { + case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5: + // TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere?? + pubkey := k.publicKeyRSA() // Get the key + if pubkey == nil { + return ErrKey + } + + h := hash.New() + h.Write(signeddata) + h.Write(wire) + return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf) + + case ECDSAP256SHA256, ECDSAP384SHA384: + pubkey := k.publicKeyECDSA() + if pubkey == nil { + return ErrKey + } + + // Split sigbuf into the r and s coordinates + r := new(big.Int).SetBytes(sigbuf[:len(sigbuf)/2]) + s := new(big.Int).SetBytes(sigbuf[len(sigbuf)/2:]) + + h := hash.New() + h.Write(signeddata) + h.Write(wire) + if ecdsa.Verify(pubkey, h.Sum(nil), r, s) { + return nil + } + return ErrSig + + case ED25519: + pubkey := k.publicKeyED25519() + if pubkey == nil { + return ErrKey + } + + if ed25519.Verify(pubkey, append(signeddata, wire...), sigbuf) { + return nil + } + return ErrSig + + default: + return ErrAlg + } +} + +// ValidityPeriod uses RFC1982 serial arithmetic to calculate +// if a signature period is valid. If t is the zero time, the +// current time is taken other t is. Returns true if the signature +// is valid at the given time, otherwise returns false. +func (rr *RRSIG) ValidityPeriod(t time.Time) bool { + var utc int64 + if t.IsZero() { + utc = time.Now().UTC().Unix() + } else { + utc = t.UTC().Unix() + } + modi := (int64(rr.Inception) - utc) / year68 + mode := (int64(rr.Expiration) - utc) / year68 + ti := int64(rr.Inception) + (modi * year68) + te := int64(rr.Expiration) + (mode * year68) + return ti <= utc && utc <= te +} + +// Return the signatures base64 encodedig sigdata as a byte slice. +func (rr *RRSIG) sigBuf() []byte { + sigbuf, err := fromBase64([]byte(rr.Signature)) + if err != nil { + return nil + } + return sigbuf +} + +// publicKeyRSA returns the RSA public key from a DNSKEY record. +func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + + // RFC 2537/3110, section 2. RSA Public KEY Resource Records + // Length is in the 0th byte, unless its zero, then it + // it in bytes 1 and 2 and its a 16 bit number + explen := uint16(keybuf[0]) + keyoff := 1 + if explen == 0 { + explen = uint16(keybuf[1])<<8 | uint16(keybuf[2]) + keyoff = 3 + } + if explen > 4 { + // Larger exponent than supported by the crypto package. + return nil + } + pubkey := new(rsa.PublicKey) + + pubkey.N = big.NewInt(0) + expo := uint64(0) + for i := 0; i < int(explen); i++ { + expo <<= 8 + expo |= uint64(keybuf[keyoff+i]) + } + if expo > 1<<31-1 { + // Larger exponent than supported by the crypto package. + return nil + } + pubkey.E = int(expo) + + pubkey.N.SetBytes(keybuf[keyoff+int(explen):]) + return pubkey +} + +// publicKeyECDSA returns the Curve public key from the DNSKEY record. +func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + pubkey := new(ecdsa.PublicKey) + switch k.Algorithm { + case ECDSAP256SHA256: + pubkey.Curve = elliptic.P256() + if len(keybuf) != 64 { + // wrongly encoded key + return nil + } + case ECDSAP384SHA384: + pubkey.Curve = elliptic.P384() + if len(keybuf) != 96 { + // Wrongly encoded key + return nil + } + } + pubkey.X = big.NewInt(0) + pubkey.X.SetBytes(keybuf[:len(keybuf)/2]) + pubkey.Y = big.NewInt(0) + pubkey.Y.SetBytes(keybuf[len(keybuf)/2:]) + return pubkey +} + +func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + if len(keybuf) < 22 { + return nil + } + t, keybuf := int(keybuf[0]), keybuf[1:] + size := 64 + t*8 + q, keybuf := keybuf[:20], keybuf[20:] + if len(keybuf) != 3*size { + return nil + } + p, keybuf := keybuf[:size], keybuf[size:] + g, y := keybuf[:size], keybuf[size:] + pubkey := new(dsa.PublicKey) + pubkey.Parameters.Q = big.NewInt(0).SetBytes(q) + pubkey.Parameters.P = big.NewInt(0).SetBytes(p) + pubkey.Parameters.G = big.NewInt(0).SetBytes(g) + pubkey.Y = big.NewInt(0).SetBytes(y) + return pubkey +} + +func (k *DNSKEY) publicKeyED25519() ed25519.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + if len(keybuf) != ed25519.PublicKeySize { + return nil + } + return keybuf +} + +type wireSlice [][]byte + +func (p wireSlice) Len() int { return len(p) } +func (p wireSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p wireSlice) Less(i, j int) bool { + _, ioff, _ := UnpackDomainName(p[i], 0) + _, joff, _ := UnpackDomainName(p[j], 0) + return bytes.Compare(p[i][ioff+10:], p[j][joff+10:]) < 0 +} + +// Return the raw signature data. +func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) { + wires := make(wireSlice, len(rrset)) + for i, r := range rrset { + r1 := r.copy() + r1.Header().Ttl = s.OrigTtl + labels := SplitDomainName(r1.Header().Name) + // 6.2. Canonical RR Form. (4) - wildcards + if len(labels) > int(s.Labels) { + // Wildcard + r1.Header().Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "." + } + // RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase + r1.Header().Name = strings.ToLower(r1.Header().Name) + // 6.2. Canonical RR Form. (3) - domain rdata to lowercase. + // NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR, + // HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX, + // SRV, DNAME, A6 + // + // RFC 6840 - Clarifications and Implementation Notes for DNS Security (DNSSEC): + // Section 6.2 of [RFC4034] also erroneously lists HINFO as a record + // that needs conversion to lowercase, and twice at that. Since HINFO + // records contain no domain names, they are not subject to case + // conversion. + switch x := r1.(type) { + case *NS: + x.Ns = strings.ToLower(x.Ns) + case *MD: + x.Md = strings.ToLower(x.Md) + case *MF: + x.Mf = strings.ToLower(x.Mf) + case *CNAME: + x.Target = strings.ToLower(x.Target) + case *SOA: + x.Ns = strings.ToLower(x.Ns) + x.Mbox = strings.ToLower(x.Mbox) + case *MB: + x.Mb = strings.ToLower(x.Mb) + case *MG: + x.Mg = strings.ToLower(x.Mg) + case *MR: + x.Mr = strings.ToLower(x.Mr) + case *PTR: + x.Ptr = strings.ToLower(x.Ptr) + case *MINFO: + x.Rmail = strings.ToLower(x.Rmail) + x.Email = strings.ToLower(x.Email) + case *MX: + x.Mx = strings.ToLower(x.Mx) + case *RP: + x.Mbox = strings.ToLower(x.Mbox) + x.Txt = strings.ToLower(x.Txt) + case *AFSDB: + x.Hostname = strings.ToLower(x.Hostname) + case *RT: + x.Host = strings.ToLower(x.Host) + case *SIG: + x.SignerName = strings.ToLower(x.SignerName) + case *PX: + x.Map822 = strings.ToLower(x.Map822) + x.Mapx400 = strings.ToLower(x.Mapx400) + case *NAPTR: + x.Replacement = strings.ToLower(x.Replacement) + case *KX: + x.Exchanger = strings.ToLower(x.Exchanger) + case *SRV: + x.Target = strings.ToLower(x.Target) + case *DNAME: + x.Target = strings.ToLower(x.Target) + } + // 6.2. Canonical RR Form. (5) - origTTL + wire := make([]byte, r1.len()+1) // +1 to be safe(r) + off, err1 := PackRR(r1, wire, 0, nil, false) + if err1 != nil { + return nil, err1 + } + wire = wire[:off] + wires[i] = wire + } + sort.Sort(wires) + for i, wire := range wires { + if i > 0 && bytes.Equal(wire, wires[i-1]) { + continue + } + buf = append(buf, wire...) + } + return buf, nil +} + +func packSigWire(sw *rrsigWireFmt, msg []byte) (int, error) { + // copied from zmsg.go RRSIG packing + off, err := packUint16(sw.TypeCovered, msg, 0) + if err != nil { + return off, err + } + off, err = packUint8(sw.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(sw.Labels, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(sw.OrigTtl, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(sw.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(sw.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(sw.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(sw.SignerName, msg, off, nil, false) + if err != nil { + return off, err + } + return off, nil +} + +func packKeyWire(dw *dnskeyWireFmt, msg []byte) (int, error) { + // copied from zmsg.go DNSKEY packing + off, err := packUint16(dw.Flags, msg, 0) + if err != nil { + return off, err + } + off, err = packUint8(dw.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(dw.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(dw.PublicKey, msg, off) + if err != nil { + return off, err + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/dnssec_keygen.go b/vendor/github.com/miekg/dns/dnssec_keygen.go new file mode 100644 index 00000000000..33e913ac527 --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_keygen.go @@ -0,0 +1,178 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "math/big" + + "golang.org/x/crypto/ed25519" +) + +// Generate generates a DNSKEY of the given bit size. +// The public part is put inside the DNSKEY record. +// The Algorithm in the key must be set as this will define +// what kind of DNSKEY will be generated. +// The ECDSA algorithms imply a fixed keysize, in that case +// bits should be set to the size of the algorithm. +func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) { + switch k.Algorithm { + case DSA, DSANSEC3SHA1: + if bits != 1024 { + return nil, ErrKeySize + } + case RSAMD5, RSASHA1, RSASHA256, RSASHA1NSEC3SHA1: + if bits < 512 || bits > 4096 { + return nil, ErrKeySize + } + case RSASHA512: + if bits < 1024 || bits > 4096 { + return nil, ErrKeySize + } + case ECDSAP256SHA256: + if bits != 256 { + return nil, ErrKeySize + } + case ECDSAP384SHA384: + if bits != 384 { + return nil, ErrKeySize + } + case ED25519: + if bits != 256 { + return nil, ErrKeySize + } + } + + switch k.Algorithm { + case DSA, DSANSEC3SHA1: + params := new(dsa.Parameters) + if err := dsa.GenerateParameters(params, rand.Reader, dsa.L1024N160); err != nil { + return nil, err + } + priv := new(dsa.PrivateKey) + priv.PublicKey.Parameters = *params + err := dsa.GenerateKey(priv, rand.Reader) + if err != nil { + return nil, err + } + k.setPublicKeyDSA(params.Q, params.P, params.G, priv.PublicKey.Y) + return priv, nil + case RSAMD5, RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1: + priv, err := rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N) + return priv, nil + case ECDSAP256SHA256, ECDSAP384SHA384: + var c elliptic.Curve + switch k.Algorithm { + case ECDSAP256SHA256: + c = elliptic.P256() + case ECDSAP384SHA384: + c = elliptic.P384() + } + priv, err := ecdsa.GenerateKey(c, rand.Reader) + if err != nil { + return nil, err + } + k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y) + return priv, nil + case ED25519: + pub, priv, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + return nil, err + } + k.setPublicKeyED25519(pub) + return priv, nil + default: + return nil, ErrAlg + } +} + +// Set the public key (the value E and N) +func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool { + if _E == 0 || _N == nil { + return false + } + buf := exponentToBuf(_E) + buf = append(buf, _N.Bytes()...) + k.PublicKey = toBase64(buf) + return true +} + +// Set the public key for Elliptic Curves +func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool { + if _X == nil || _Y == nil { + return false + } + var intlen int + switch k.Algorithm { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen)) + return true +} + +// Set the public key for DSA +func (k *DNSKEY) setPublicKeyDSA(_Q, _P, _G, _Y *big.Int) bool { + if _Q == nil || _P == nil || _G == nil || _Y == nil { + return false + } + buf := dsaToBuf(_Q, _P, _G, _Y) + k.PublicKey = toBase64(buf) + return true +} + +// Set the public key for Ed25519 +func (k *DNSKEY) setPublicKeyED25519(_K ed25519.PublicKey) bool { + if _K == nil { + return false + } + k.PublicKey = toBase64(_K) + return true +} + +// Set the public key (the values E and N) for RSA +// RFC 3110: Section 2. RSA Public KEY Resource Records +func exponentToBuf(_E int) []byte { + var buf []byte + i := big.NewInt(int64(_E)).Bytes() + if len(i) < 256 { + buf = make([]byte, 1, 1+len(i)) + buf[0] = uint8(len(i)) + } else { + buf = make([]byte, 3, 3+len(i)) + buf[0] = 0 + buf[1] = uint8(len(i) >> 8) + buf[2] = uint8(len(i)) + } + buf = append(buf, i...) + return buf +} + +// Set the public key for X and Y for Curve. The two +// values are just concatenated. +func curveToBuf(_X, _Y *big.Int, intlen int) []byte { + buf := intToBytes(_X, intlen) + buf = append(buf, intToBytes(_Y, intlen)...) + return buf +} + +// Set the public key for X and Y for Curve. The two +// values are just concatenated. +func dsaToBuf(_Q, _P, _G, _Y *big.Int) []byte { + t := divRoundUp(divRoundUp(_G.BitLen(), 8)-64, 8) + buf := []byte{byte(t)} + buf = append(buf, intToBytes(_Q, 20)...) + buf = append(buf, intToBytes(_P, 64+t*8)...) + buf = append(buf, intToBytes(_G, 64+t*8)...) + buf = append(buf, intToBytes(_Y, 64+t*8)...) + return buf +} diff --git a/vendor/github.com/miekg/dns/dnssec_keyscan.go b/vendor/github.com/miekg/dns/dnssec_keyscan.go new file mode 100644 index 00000000000..e2d9d8f924f --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_keyscan.go @@ -0,0 +1,297 @@ +package dns + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "io" + "math/big" + "strconv" + "strings" + + "golang.org/x/crypto/ed25519" +) + +// NewPrivateKey returns a PrivateKey by parsing the string s. +// s should be in the same form of the BIND private key files. +func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) { + if s == "" || s[len(s)-1] != '\n' { // We need a closing newline + return k.ReadPrivateKey(strings.NewReader(s+"\n"), "") + } + return k.ReadPrivateKey(strings.NewReader(s), "") +} + +// ReadPrivateKey reads a private key from the io.Reader q. The string file is +// only used in error reporting. +// The public key must be known, because some cryptographic algorithms embed +// the public inside the privatekey. +func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) { + m, err := parseKey(q, file) + if m == nil { + return nil, err + } + if _, ok := m["private-key-format"]; !ok { + return nil, ErrPrivKey + } + if m["private-key-format"] != "v1.2" && m["private-key-format"] != "v1.3" { + return nil, ErrPrivKey + } + // TODO(mg): check if the pubkey matches the private key + algo, err := strconv.ParseUint(strings.SplitN(m["algorithm"], " ", 2)[0], 10, 8) + if err != nil { + return nil, ErrPrivKey + } + switch uint8(algo) { + case DSA: + priv, err := readPrivateKeyDSA(m) + if err != nil { + return nil, err + } + pub := k.publicKeyDSA() + if pub == nil { + return nil, ErrKey + } + priv.PublicKey = *pub + return priv, nil + case RSAMD5: + fallthrough + case RSASHA1: + fallthrough + case RSASHA1NSEC3SHA1: + fallthrough + case RSASHA256: + fallthrough + case RSASHA512: + priv, err := readPrivateKeyRSA(m) + if err != nil { + return nil, err + } + pub := k.publicKeyRSA() + if pub == nil { + return nil, ErrKey + } + priv.PublicKey = *pub + return priv, nil + case ECCGOST: + return nil, ErrPrivKey + case ECDSAP256SHA256: + fallthrough + case ECDSAP384SHA384: + priv, err := readPrivateKeyECDSA(m) + if err != nil { + return nil, err + } + pub := k.publicKeyECDSA() + if pub == nil { + return nil, ErrKey + } + priv.PublicKey = *pub + return priv, nil + case ED25519: + return readPrivateKeyED25519(m) + default: + return nil, ErrPrivKey + } +} + +// Read a private key (file) string and create a public key. Return the private key. +func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) { + p := new(rsa.PrivateKey) + p.Primes = []*big.Int{nil, nil} + for k, v := range m { + switch k { + case "modulus", "publicexponent", "privateexponent", "prime1", "prime2": + v1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + switch k { + case "modulus": + p.PublicKey.N = big.NewInt(0) + p.PublicKey.N.SetBytes(v1) + case "publicexponent": + i := big.NewInt(0) + i.SetBytes(v1) + p.PublicKey.E = int(i.Int64()) // int64 should be large enough + case "privateexponent": + p.D = big.NewInt(0) + p.D.SetBytes(v1) + case "prime1": + p.Primes[0] = big.NewInt(0) + p.Primes[0].SetBytes(v1) + case "prime2": + p.Primes[1] = big.NewInt(0) + p.Primes[1].SetBytes(v1) + } + case "exponent1", "exponent2", "coefficient": + // not used in Go (yet) + case "created", "publish", "activate": + // not used in Go (yet) + } + } + return p, nil +} + +func readPrivateKeyDSA(m map[string]string) (*dsa.PrivateKey, error) { + p := new(dsa.PrivateKey) + p.X = big.NewInt(0) + for k, v := range m { + switch k { + case "private_value(x)": + v1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + p.X.SetBytes(v1) + case "created", "publish", "activate": + /* not used in Go (yet) */ + } + } + return p, nil +} + +func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) { + p := new(ecdsa.PrivateKey) + p.D = big.NewInt(0) + // TODO: validate that the required flags are present + for k, v := range m { + switch k { + case "privatekey": + v1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + p.D.SetBytes(v1) + case "created", "publish", "activate": + /* not used in Go (yet) */ + } + } + return p, nil +} + +func readPrivateKeyED25519(m map[string]string) (ed25519.PrivateKey, error) { + var p ed25519.PrivateKey + // TODO: validate that the required flags are present + for k, v := range m { + switch k { + case "privatekey": + p1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + if len(p1) != 32 { + return nil, ErrPrivKey + } + // RFC 8080 and Golang's x/crypto/ed25519 differ as to how the + // private keys are represented. RFC 8080 specifies that private + // keys be stored solely as the seed value (p1 above) while the + // ed25519 package represents them as the seed value concatenated + // to the public key, which is derived from the seed value. + // + // ed25519.GenerateKey reads exactly 32 bytes from the passed in + // io.Reader and uses them as the seed. It also derives the + // public key and produces a compatible private key. + _, p, err = ed25519.GenerateKey(bytes.NewReader(p1)) + if err != nil { + return nil, err + } + case "created", "publish", "activate": + /* not used in Go (yet) */ + } + } + return p, nil +} + +// parseKey reads a private key from r. It returns a map[string]string, +// with the key-value pairs, or an error when the file is not correct. +func parseKey(r io.Reader, file string) (map[string]string, error) { + s, cancel := scanInit(r) + m := make(map[string]string) + c := make(chan lex) + k := "" + defer func() { + cancel() + // zlexer can send up to two tokens, the next one and possibly 1 remainders. + // Do a non-blocking read. + _, ok := <-c + _, ok = <-c + if !ok { + // too bad + } + }() + // Start the lexer + go klexer(s, c) + for l := range c { + // It should alternate + switch l.value { + case zKey: + k = l.token + case zValue: + if k == "" { + return nil, &ParseError{file, "no private key seen", l} + } + //println("Setting", strings.ToLower(k), "to", l.token, "b") + m[strings.ToLower(k)] = l.token + k = "" + } + } + return m, nil +} + +// klexer scans the sourcefile and returns tokens on the channel c. +func klexer(s *scan, c chan lex) { + var l lex + str := "" // Hold the current read text + commt := false + key := true + x, err := s.tokenText() + defer close(c) + for err == nil { + l.column = s.position.Column + l.line = s.position.Line + switch x { + case ':': + if commt { + break + } + l.token = str + if key { + l.value = zKey + c <- l + // Next token is a space, eat it + s.tokenText() + key = false + str = "" + } else { + l.value = zValue + } + case ';': + commt = true + case '\n': + if commt { + // Reset a comment + commt = false + } + l.value = zValue + l.token = str + c <- l + str = "" + commt = false + key = true + default: + if commt { + break + } + str += string(x) + } + x, err = s.tokenText() + } + if len(str) > 0 { + // Send remainder + l.token = str + l.value = zValue + c <- l + } +} diff --git a/vendor/github.com/miekg/dns/dnssec_privkey.go b/vendor/github.com/miekg/dns/dnssec_privkey.go new file mode 100644 index 00000000000..46f3215c8fb --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_privkey.go @@ -0,0 +1,93 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "math/big" + "strconv" + + "golang.org/x/crypto/ed25519" +) + +const format = "Private-key-format: v1.3\n" + +// PrivateKeyString converts a PrivateKey to a string. This string has the same +// format as the private-key-file of BIND9 (Private-key-format: v1.3). +// It needs some info from the key (the algorithm), so its a method of the DNSKEY +// It supports rsa.PrivateKey, ecdsa.PrivateKey and dsa.PrivateKey +func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string { + algorithm := strconv.Itoa(int(r.Algorithm)) + algorithm += " (" + AlgorithmToString[r.Algorithm] + ")" + + switch p := p.(type) { + case *rsa.PrivateKey: + modulus := toBase64(p.PublicKey.N.Bytes()) + e := big.NewInt(int64(p.PublicKey.E)) + publicExponent := toBase64(e.Bytes()) + privateExponent := toBase64(p.D.Bytes()) + prime1 := toBase64(p.Primes[0].Bytes()) + prime2 := toBase64(p.Primes[1].Bytes()) + // Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm + // and from: http://code.google.com/p/go/issues/detail?id=987 + one := big.NewInt(1) + p1 := big.NewInt(0).Sub(p.Primes[0], one) + q1 := big.NewInt(0).Sub(p.Primes[1], one) + exp1 := big.NewInt(0).Mod(p.D, p1) + exp2 := big.NewInt(0).Mod(p.D, q1) + coeff := big.NewInt(0).ModInverse(p.Primes[1], p.Primes[0]) + + exponent1 := toBase64(exp1.Bytes()) + exponent2 := toBase64(exp2.Bytes()) + coefficient := toBase64(coeff.Bytes()) + + return format + + "Algorithm: " + algorithm + "\n" + + "Modulus: " + modulus + "\n" + + "PublicExponent: " + publicExponent + "\n" + + "PrivateExponent: " + privateExponent + "\n" + + "Prime1: " + prime1 + "\n" + + "Prime2: " + prime2 + "\n" + + "Exponent1: " + exponent1 + "\n" + + "Exponent2: " + exponent2 + "\n" + + "Coefficient: " + coefficient + "\n" + + case *ecdsa.PrivateKey: + var intlen int + switch r.Algorithm { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + private := toBase64(intToBytes(p.D, intlen)) + return format + + "Algorithm: " + algorithm + "\n" + + "PrivateKey: " + private + "\n" + + case *dsa.PrivateKey: + T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8) + prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8)) + subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20)) + base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8)) + priv := toBase64(intToBytes(p.X, 20)) + pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8)) + return format + + "Algorithm: " + algorithm + "\n" + + "Prime(p): " + prime + "\n" + + "Subprime(q): " + subprime + "\n" + + "Base(g): " + base + "\n" + + "Private_value(x): " + priv + "\n" + + "Public_value(y): " + pub + "\n" + + case ed25519.PrivateKey: + private := toBase64(p[:32]) + return format + + "Algorithm: " + algorithm + "\n" + + "PrivateKey: " + private + "\n" + + default: + return "" + } +} diff --git a/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/miekg/dns/doc.go new file mode 100644 index 00000000000..0389d7248ef --- /dev/null +++ b/vendor/github.com/miekg/dns/doc.go @@ -0,0 +1,272 @@ +/* +Package dns implements a full featured interface to the Domain Name System. +Server- and client-side programming is supported. +The package allows complete control over what is sent out to the DNS. The package +API follows the less-is-more principle, by presenting a small, clean interface. + +The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers, +TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing. +Note that domain names MUST be fully qualified, before sending them, unqualified +names in a message will result in a packing failure. + +Resource records are native types. They are not stored in wire format. +Basic usage pattern for creating a new resource record: + + r := new(dns.MX) + r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, + Class: dns.ClassINET, Ttl: 3600} + r.Preference = 10 + r.Mx = "mx.miek.nl." + +Or directly from a string: + + mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.") + +Or when the default origin (.) and TTL (3600) and class (IN) suit you: + + mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl") + +Or even: + + mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek") + +In the DNS messages are exchanged, these messages contain resource +records (sets). Use pattern for creating a message: + + m := new(dns.Msg) + m.SetQuestion("miek.nl.", dns.TypeMX) + +Or when not certain if the domain name is fully qualified: + + m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX) + +The message m is now a message with the question section set to ask +the MX records for the miek.nl. zone. + +The following is slightly more verbose, but more flexible: + + m1 := new(dns.Msg) + m1.Id = dns.Id() + m1.RecursionDesired = true + m1.Question = make([]dns.Question, 1) + m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET} + +After creating a message it can be sent. +Basic use pattern for synchronous querying the DNS at a +server configured on 127.0.0.1 and port 53: + + c := new(dns.Client) + in, rtt, err := c.Exchange(m1, "127.0.0.1:53") + +Suppressing multiple outstanding queries (with the same question, type and +class) is as easy as setting: + + c.SingleInflight = true + +More advanced options are available using a net.Dialer and the corresponding API. +For example it is possible to set a timeout, or to specify a source IP address +and port to use for the connection: + + c := new(dns.Client) + laddr := net.UDPAddr{ + IP: net.ParseIP("[::1]"), + Port: 12345, + Zone: "", + } + c.Dialer := &net.Dialer{ + Timeout: 200 * time.Millisecond, + LocalAddr: &laddr, + } + in, rtt, err := c.Exchange(m1, "8.8.8.8:53") + +If these "advanced" features are not needed, a simple UDP query can be sent, +with: + + in, err := dns.Exchange(m1, "127.0.0.1:53") + +When this functions returns you will get dns message. A dns message consists +out of four sections. +The question section: in.Question, the answer section: in.Answer, +the authority section: in.Ns and the additional section: in.Extra. + +Each of these sections (except the Question section) contain a []RR. Basic +use pattern for accessing the rdata of a TXT RR as the first RR in +the Answer section: + + if t, ok := in.Answer[0].(*dns.TXT); ok { + // do something with t.Txt + } + +Domain Name and TXT Character String Representations + +Both domain names and TXT character strings are converted to presentation +form both when unpacked and when converted to strings. + +For TXT character strings, tabs, carriage returns and line feeds will be +converted to \t, \r and \n respectively. Back slashes and quotations marks +will be escaped. Bytes below 32 and above 127 will be converted to \DDD +form. + +For domain names, in addition to the above rules brackets, periods, +spaces, semicolons and the at symbol are escaped. + +DNSSEC + +DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It +uses public key cryptography to sign resource records. The +public keys are stored in DNSKEY records and the signatures in RRSIG records. + +Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit +to a request. + + m := new(dns.Msg) + m.SetEdns0(4096, true) + +Signature generation, signature verification and key generation are all supported. + +DYNAMIC UPDATES + +Dynamic updates reuses the DNS message format, but renames three of +the sections. Question is Zone, Answer is Prerequisite, Authority is +Update, only the Additional is not renamed. See RFC 2136 for the gory details. + +You can set a rather complex set of rules for the existence of absence of +certain resource records or names in a zone to specify if resource records +should be added or removed. The table from RFC 2136 supplemented with the Go +DNS function shows which functions exist to specify the prerequisites. + + 3.2.4 - Table Of Metavalues Used In Prerequisite Section + + CLASS TYPE RDATA Meaning Function + -------------------------------------------------------------- + ANY ANY empty Name is in use dns.NameUsed + ANY rrset empty RRset exists (value indep) dns.RRsetUsed + NONE ANY empty Name is not in use dns.NameNotUsed + NONE rrset empty RRset does not exist dns.RRsetNotUsed + zone rrset rr RRset exists (value dep) dns.Used + +The prerequisite section can also be left empty. +If you have decided on the prerequisites you can tell what RRs should +be added or deleted. The next table shows the options you have and +what functions to call. + + 3.4.2.6 - Table Of Metavalues Used In Update Section + + CLASS TYPE RDATA Meaning Function + --------------------------------------------------------------- + ANY ANY empty Delete all RRsets from name dns.RemoveName + ANY rrset empty Delete an RRset dns.RemoveRRset + NONE rrset rr Delete an RR from RRset dns.Remove + zone rrset rr Add to an RRset dns.Insert + +TRANSACTION SIGNATURE + +An TSIG or transaction signature adds a HMAC TSIG record to each message sent. +The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512. + +Basic use pattern when querying with a TSIG name "axfr." (note that these key names +must be fully qualified - as they are domain names) and the base64 secret +"so6ZGir4GPAqINNh9U5c3A==": + +If an incoming message contains a TSIG record it MUST be the last record in +the additional section (RFC2845 3.2). This means that you should make the +call to SetTsig last, right before executing the query. If you make any +changes to the RRset after calling SetTsig() the signature will be incorrect. + + c := new(dns.Client) + c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + m := new(dns.Msg) + m.SetQuestion("miek.nl.", dns.TypeMX) + m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) + ... + // When sending the TSIG RR is calculated and filled in before sending + +When requesting an zone transfer (almost all TSIG usage is when requesting zone transfers), with +TSIG, this is the basic use pattern. In this example we request an AXFR for +miek.nl. with TSIG key named "axfr." and secret "so6ZGir4GPAqINNh9U5c3A==" +and using the server 176.58.119.54: + + t := new(dns.Transfer) + m := new(dns.Msg) + t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + m.SetAxfr("miek.nl.") + m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) + c, err := t.In(m, "176.58.119.54:53") + for r := range c { ... } + +You can now read the records from the transfer as they come in. Each envelope is checked with TSIG. +If something is not correct an error is returned. + +Basic use pattern validating and replying to a message that has TSIG set. + + server := &dns.Server{Addr: ":53", Net: "udp"} + server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + go server.ListenAndServe() + dns.HandleFunc(".", handleRequest) + + func handleRequest(w dns.ResponseWriter, r *dns.Msg) { + m := new(dns.Msg) + m.SetReply(r) + if r.IsTsig() != nil { + if w.TsigStatus() == nil { + // *Msg r has an TSIG record and it was validated + m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) + } else { + // *Msg r has an TSIG records and it was not valided + } + } + w.WriteMsg(m) + } + +PRIVATE RRS + +RFC 6895 sets aside a range of type codes for private use. This range +is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these +can be used, before requesting an official type code from IANA. + +see http://miek.nl/2014/September/21/idn-and-private-rr-in-go-dns/ for more +information. + +EDNS0 + +EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated +by RFC 6891. It defines an new RR type, the OPT RR, which is then completely +abused. +Basic use pattern for creating an (empty) OPT RR: + + o := new(dns.OPT) + o.Hdr.Name = "." // MUST be the root zone, per definition. + o.Hdr.Rrtype = dns.TypeOPT + +The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891) +interfaces. Currently only a few have been standardized: EDNS0_NSID +(RFC 5001) and EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note +that these options may be combined in an OPT RR. +Basic use pattern for a server to check if (and which) options are set: + + // o is a dns.OPT + for _, s := range o.Option { + switch e := s.(type) { + case *dns.EDNS0_NSID: + // do stuff with e.Nsid + case *dns.EDNS0_SUBNET: + // access e.Family, e.Address, etc. + } + } + +SIG(0) + +From RFC 2931: + + SIG(0) provides protection for DNS transactions and requests .... + ... protection for glue records, DNS requests, protection for message headers + on requests and responses, and protection of the overall integrity of a response. + +It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared +secret approach in TSIG. +Supported algorithms: DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256 and +RSASHA512. + +Signing subsequent messages in multi-message sessions is not implemented. +*/ +package dns diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go new file mode 100644 index 00000000000..55059eb14a5 --- /dev/null +++ b/vendor/github.com/miekg/dns/edns.go @@ -0,0 +1,630 @@ +package dns + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "net" + "strconv" +) + +// EDNS0 Option codes. +const ( + EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 + EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt + EDNS0NSID = 0x3 // nsid (See RFC 5001) + EDNS0DAU = 0x5 // DNSSEC Algorithm Understood + EDNS0DHU = 0x6 // DS Hash Understood + EDNS0N3U = 0x7 // NSEC3 Hash Understood + EDNS0SUBNET = 0x8 // client-subnet (See RFC 7871) + EDNS0EXPIRE = 0x9 // EDNS0 expire + EDNS0COOKIE = 0xa // EDNS0 Cookie + EDNS0TCPKEEPALIVE = 0xb // EDNS0 tcp keep alive (See RFC 7828) + EDNS0PADDING = 0xc // EDNS0 padding (See RFC 7830) + EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (See RFC 6891) + EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (See RFC 6891) + _DO = 1 << 15 // DNSSEC OK +) + +// OPT is the EDNS0 RR appended to messages to convey extra (meta) information. +// See RFC 6891. +type OPT struct { + Hdr RR_Header + Option []EDNS0 `dns:"opt"` +} + +func (rr *OPT) String() string { + s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; " + if rr.Do() { + s += "flags: do; " + } else { + s += "flags: ; " + } + s += "udp: " + strconv.Itoa(int(rr.UDPSize())) + + for _, o := range rr.Option { + switch o.(type) { + case *EDNS0_NSID: + s += "\n; NSID: " + o.String() + h, e := o.pack() + var r string + if e == nil { + for _, c := range h { + r += "(" + string(c) + ")" + } + s += " " + r + } + case *EDNS0_SUBNET: + s += "\n; SUBNET: " + o.String() + case *EDNS0_COOKIE: + s += "\n; COOKIE: " + o.String() + case *EDNS0_UL: + s += "\n; UPDATE LEASE: " + o.String() + case *EDNS0_LLQ: + s += "\n; LONG LIVED QUERIES: " + o.String() + case *EDNS0_DAU: + s += "\n; DNSSEC ALGORITHM UNDERSTOOD: " + o.String() + case *EDNS0_DHU: + s += "\n; DS HASH UNDERSTOOD: " + o.String() + case *EDNS0_N3U: + s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String() + case *EDNS0_LOCAL: + s += "\n; LOCAL OPT: " + o.String() + case *EDNS0_PADDING: + s += "\n; PADDING: " + o.String() + } + } + return s +} + +func (rr *OPT) len() int { + l := rr.Hdr.len() + for i := 0; i < len(rr.Option); i++ { + l += 4 // Account for 2-byte option code and 2-byte option length. + lo, _ := rr.Option[i].pack() + l += len(lo) + } + return l +} + +// return the old value -> delete SetVersion? + +// Version returns the EDNS version used. Only zero is defined. +func (rr *OPT) Version() uint8 { + return uint8((rr.Hdr.Ttl & 0x00FF0000) >> 16) +} + +// SetVersion sets the version of EDNS. This is usually zero. +func (rr *OPT) SetVersion(v uint8) { + rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | (uint32(v) << 16) +} + +// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL). +func (rr *OPT) ExtendedRcode() int { + return int((rr.Hdr.Ttl&0xFF000000)>>24) + 15 +} + +// SetExtendedRcode sets the EDNS extended RCODE field. +func (rr *OPT) SetExtendedRcode(v uint8) { + if v < RcodeBadVers { // Smaller than 16.. Use the 4 bits you have! + return + } + rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | (uint32(v-15) << 24) +} + +// UDPSize returns the UDP buffer size. +func (rr *OPT) UDPSize() uint16 { + return rr.Hdr.Class +} + +// SetUDPSize sets the UDP buffer size. +func (rr *OPT) SetUDPSize(size uint16) { + rr.Hdr.Class = size +} + +// Do returns the value of the DO (DNSSEC OK) bit. +func (rr *OPT) Do() bool { + return rr.Hdr.Ttl&_DO == _DO +} + +// SetDo sets the DO (DNSSEC OK) bit. +// If we pass an argument, set the DO bit to that value. +// It is possible to pass 2 or more arguments. Any arguments after the 1st is silently ignored. +func (rr *OPT) SetDo(do ...bool) { + if len(do) == 1 { + if do[0] { + rr.Hdr.Ttl |= _DO + } else { + rr.Hdr.Ttl &^= _DO + } + } else { + rr.Hdr.Ttl |= _DO + } +} + +// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it. +type EDNS0 interface { + // Option returns the option code for the option. + Option() uint16 + // pack returns the bytes of the option data. + pack() ([]byte, error) + // unpack sets the data as found in the buffer. Is also sets + // the length of the slice as the length of the option data. + unpack([]byte) error + // String returns the string representation of the option. + String() string +} + +// EDNS0_NSID option is used to retrieve a nameserver +// identifier. When sending a request Nsid must be set to the empty string +// The identifier is an opaque string encoded as hex. +// Basic use pattern for creating an nsid option: +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_NSID) +// e.Code = dns.EDNS0NSID +// e.Nsid = "AA" +// o.Option = append(o.Option, e) +type EDNS0_NSID struct { + Code uint16 // Always EDNS0NSID + Nsid string // This string needs to be hex encoded +} + +func (e *EDNS0_NSID) pack() ([]byte, error) { + h, err := hex.DecodeString(e.Nsid) + if err != nil { + return nil, err + } + return h, nil +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } // Option returns the option code. +func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil } +func (e *EDNS0_NSID) String() string { return string(e.Nsid) } + +// EDNS0_SUBNET is the subnet option that is used to give the remote nameserver +// an idea of where the client lives. See RFC 7871. It can then give back a different +// answer depending on the location or network topology. +// Basic use pattern for creating an subnet option: +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_SUBNET) +// e.Code = dns.EDNS0SUBNET +// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6 +// e.SourceNetmask = 32 // 32 for IPV4, 128 for IPv6 +// e.SourceScope = 0 +// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4 +// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6 +// o.Option = append(o.Option, e) +// +// This code will parse all the available bits when unpacking (up to optlen). +// When packing it will apply SourceNetmask. If you need more advanced logic, +// patches welcome and good luck. +type EDNS0_SUBNET struct { + Code uint16 // Always EDNS0SUBNET + Family uint16 // 1 for IP, 2 for IP6 + SourceNetmask uint8 + SourceScope uint8 + Address net.IP +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_SUBNET) Option() uint16 { return EDNS0SUBNET } + +func (e *EDNS0_SUBNET) pack() ([]byte, error) { + b := make([]byte, 4) + binary.BigEndian.PutUint16(b[0:], e.Family) + b[2] = e.SourceNetmask + b[3] = e.SourceScope + switch e.Family { + case 0: + // "dig" sets AddressFamily to 0 if SourceNetmask is also 0 + // We might don't need to complain either + if e.SourceNetmask != 0 { + return nil, errors.New("dns: bad address family") + } + case 1: + if e.SourceNetmask > net.IPv4len*8 { + return nil, errors.New("dns: bad netmask") + } + if len(e.Address.To4()) != net.IPv4len { + return nil, errors.New("dns: bad address") + } + ip := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8)) + needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up + b = append(b, ip[:needLength]...) + case 2: + if e.SourceNetmask > net.IPv6len*8 { + return nil, errors.New("dns: bad netmask") + } + if len(e.Address) != net.IPv6len { + return nil, errors.New("dns: bad address") + } + ip := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8)) + needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up + b = append(b, ip[:needLength]...) + default: + return nil, errors.New("dns: bad address family") + } + return b, nil +} + +func (e *EDNS0_SUBNET) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Family = binary.BigEndian.Uint16(b) + e.SourceNetmask = b[2] + e.SourceScope = b[3] + switch e.Family { + case 0: + // "dig" sets AddressFamily to 0 if SourceNetmask is also 0 + // It's okay to accept such a packet + if e.SourceNetmask != 0 { + return errors.New("dns: bad address family") + } + e.Address = net.IPv4(0, 0, 0, 0) + case 1: + if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 { + return errors.New("dns: bad netmask") + } + addr := make([]byte, net.IPv4len) + for i := 0; i < net.IPv4len && 4+i < len(b); i++ { + addr[i] = b[4+i] + } + e.Address = net.IPv4(addr[0], addr[1], addr[2], addr[3]) + case 2: + if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 { + return errors.New("dns: bad netmask") + } + addr := make([]byte, net.IPv6len) + for i := 0; i < net.IPv6len && 4+i < len(b); i++ { + addr[i] = b[4+i] + } + e.Address = net.IP{addr[0], addr[1], addr[2], addr[3], addr[4], + addr[5], addr[6], addr[7], addr[8], addr[9], addr[10], + addr[11], addr[12], addr[13], addr[14], addr[15]} + default: + return errors.New("dns: bad address family") + } + return nil +} + +func (e *EDNS0_SUBNET) String() (s string) { + if e.Address == nil { + s = "" + } else if e.Address.To4() != nil { + s = e.Address.String() + } else { + s = "[" + e.Address.String() + "]" + } + s += "/" + strconv.Itoa(int(e.SourceNetmask)) + "/" + strconv.Itoa(int(e.SourceScope)) + return +} + +// The EDNS0_COOKIE option is used to add a DNS Cookie to a message. +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_COOKIE) +// e.Code = dns.EDNS0COOKIE +// e.Cookie = "24a5ac.." +// o.Option = append(o.Option, e) +// +// The Cookie field consists out of a client cookie (RFC 7873 Section 4), that is +// always 8 bytes. It may then optionally be followed by the server cookie. The server +// cookie is of variable length, 8 to a maximum of 32 bytes. In other words: +// +// cCookie := o.Cookie[:16] +// sCookie := o.Cookie[16:] +// +// There is no guarantee that the Cookie string has a specific length. +type EDNS0_COOKIE struct { + Code uint16 // Always EDNS0COOKIE + Cookie string // Hex-encoded cookie data +} + +func (e *EDNS0_COOKIE) pack() ([]byte, error) { + h, err := hex.DecodeString(e.Cookie) + if err != nil { + return nil, err + } + return h, nil +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE } +func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil } +func (e *EDNS0_COOKIE) String() string { return e.Cookie } + +// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set +// an expiration on an update RR. This is helpful for clients that cannot clean +// up after themselves. This is a draft RFC and more information can be found at +// http://files.dns-sd.org/draft-sekar-dns-ul.txt +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_UL) +// e.Code = dns.EDNS0UL +// e.Lease = 120 // in seconds +// o.Option = append(o.Option, e) +type EDNS0_UL struct { + Code uint16 // Always EDNS0UL + Lease uint32 +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_UL) Option() uint16 { return EDNS0UL } +func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) } + +// Copied: http://golang.org/src/pkg/net/dnsmsg.go +func (e *EDNS0_UL) pack() ([]byte, error) { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, e.Lease) + return b, nil +} + +func (e *EDNS0_UL) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Lease = binary.BigEndian.Uint32(b) + return nil +} + +// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 +// Implemented for completeness, as the EDNS0 type code is assigned. +type EDNS0_LLQ struct { + Code uint16 // Always EDNS0LLQ + Version uint16 + Opcode uint16 + Error uint16 + Id uint64 + LeaseLife uint32 +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ } + +func (e *EDNS0_LLQ) pack() ([]byte, error) { + b := make([]byte, 18) + binary.BigEndian.PutUint16(b[0:], e.Version) + binary.BigEndian.PutUint16(b[2:], e.Opcode) + binary.BigEndian.PutUint16(b[4:], e.Error) + binary.BigEndian.PutUint64(b[6:], e.Id) + binary.BigEndian.PutUint32(b[14:], e.LeaseLife) + return b, nil +} + +func (e *EDNS0_LLQ) unpack(b []byte) error { + if len(b) < 18 { + return ErrBuf + } + e.Version = binary.BigEndian.Uint16(b[0:]) + e.Opcode = binary.BigEndian.Uint16(b[2:]) + e.Error = binary.BigEndian.Uint16(b[4:]) + e.Id = binary.BigEndian.Uint64(b[6:]) + e.LeaseLife = binary.BigEndian.Uint32(b[14:]) + return nil +} + +func (e *EDNS0_LLQ) String() string { + s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) + + " " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(uint64(e.Id), 10) + + " " + strconv.FormatUint(uint64(e.LeaseLife), 10) + return s +} + +// EDNS0_DUA implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975. +type EDNS0_DAU struct { + Code uint16 // Always EDNS0DAU + AlgCode []uint8 +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU } +func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil } +func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil } + +func (e *EDNS0_DAU) String() string { + s := "" + for i := 0; i < len(e.AlgCode); i++ { + if a, ok := AlgorithmToString[e.AlgCode[i]]; ok { + s += " " + a + } else { + s += " " + strconv.Itoa(int(e.AlgCode[i])) + } + } + return s +} + +// EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975. +type EDNS0_DHU struct { + Code uint16 // Always EDNS0DHU + AlgCode []uint8 +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU } +func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil } +func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil } + +func (e *EDNS0_DHU) String() string { + s := "" + for i := 0; i < len(e.AlgCode); i++ { + if a, ok := HashToString[e.AlgCode[i]]; ok { + s += " " + a + } else { + s += " " + strconv.Itoa(int(e.AlgCode[i])) + } + } + return s +} + +// EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975. +type EDNS0_N3U struct { + Code uint16 // Always EDNS0N3U + AlgCode []uint8 +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U } +func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil } +func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil } + +func (e *EDNS0_N3U) String() string { + // Re-use the hash map + s := "" + for i := 0; i < len(e.AlgCode); i++ { + if a, ok := HashToString[e.AlgCode[i]]; ok { + s += " " + a + } else { + s += " " + strconv.Itoa(int(e.AlgCode[i])) + } + } + return s +} + +// EDNS0_EXPIRE implementes the EDNS0 option as described in RFC 7314. +type EDNS0_EXPIRE struct { + Code uint16 // Always EDNS0EXPIRE + Expire uint32 +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE } +func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) } + +func (e *EDNS0_EXPIRE) pack() ([]byte, error) { + b := make([]byte, 4) + b[0] = byte(e.Expire >> 24) + b[1] = byte(e.Expire >> 16) + b[2] = byte(e.Expire >> 8) + b[3] = byte(e.Expire) + return b, nil +} + +func (e *EDNS0_EXPIRE) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Expire = binary.BigEndian.Uint32(b) + return nil +} + +// The EDNS0_LOCAL option is used for local/experimental purposes. The option +// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND] +// (RFC6891), although any unassigned code can actually be used. The content of +// the option is made available in Data, unaltered. +// Basic use pattern for creating a local option: +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_LOCAL) +// e.Code = dns.EDNS0LOCALSTART +// e.Data = []byte{72, 82, 74} +// o.Option = append(o.Option, e) +type EDNS0_LOCAL struct { + Code uint16 + Data []byte +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_LOCAL) Option() uint16 { return e.Code } +func (e *EDNS0_LOCAL) String() string { + return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data) +} + +func (e *EDNS0_LOCAL) pack() ([]byte, error) { + b := make([]byte, len(e.Data)) + copied := copy(b, e.Data) + if copied != len(e.Data) { + return nil, ErrBuf + } + return b, nil +} + +func (e *EDNS0_LOCAL) unpack(b []byte) error { + e.Data = make([]byte, len(b)) + copied := copy(e.Data, b) + if copied != len(b) { + return ErrBuf + } + return nil +} + +// EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep +// the TCP connection alive. See RFC 7828. +type EDNS0_TCP_KEEPALIVE struct { + Code uint16 // Always EDNSTCPKEEPALIVE + Length uint16 // the value 0 if the TIMEOUT is omitted, the value 2 if it is present; + Timeout uint16 // an idle timeout value for the TCP connection, specified in units of 100 milliseconds, encoded in network byte order. +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_TCP_KEEPALIVE) Option() uint16 { return EDNS0TCPKEEPALIVE } + +func (e *EDNS0_TCP_KEEPALIVE) pack() ([]byte, error) { + if e.Timeout != 0 && e.Length != 2 { + return nil, errors.New("dns: timeout specified but length is not 2") + } + if e.Timeout == 0 && e.Length != 0 { + return nil, errors.New("dns: timeout not specified but length is not 0") + } + b := make([]byte, 4+e.Length) + binary.BigEndian.PutUint16(b[0:], e.Code) + binary.BigEndian.PutUint16(b[2:], e.Length) + if e.Length == 2 { + binary.BigEndian.PutUint16(b[4:], e.Timeout) + } + return b, nil +} + +func (e *EDNS0_TCP_KEEPALIVE) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Length = binary.BigEndian.Uint16(b[2:4]) + if e.Length != 0 && e.Length != 2 { + return errors.New("dns: length mismatch, want 0/2 but got " + strconv.FormatUint(uint64(e.Length), 10)) + } + if e.Length == 2 { + if len(b) < 6 { + return ErrBuf + } + e.Timeout = binary.BigEndian.Uint16(b[4:6]) + } + return nil +} + +func (e *EDNS0_TCP_KEEPALIVE) String() (s string) { + s = "use tcp keep-alive" + if e.Length == 0 { + s += ", timeout omitted" + } else { + s += fmt.Sprintf(", timeout %dms", e.Timeout*100) + } + return +} + +// EDNS0_PADDING option is used to add padding to a request/response. The default +// value of padding SHOULD be 0x0 but other values MAY be used, for instance if +// compression is applied before encryption which may break signatures. +type EDNS0_PADDING struct { + Padding []byte +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING } +func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil } +func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil } +func (e *EDNS0_PADDING) String() string { return fmt.Sprintf("%0X", e.Padding) } diff --git a/vendor/github.com/miekg/dns/format.go b/vendor/github.com/miekg/dns/format.go new file mode 100644 index 00000000000..3f5303c2013 --- /dev/null +++ b/vendor/github.com/miekg/dns/format.go @@ -0,0 +1,87 @@ +package dns + +import ( + "net" + "reflect" + "strconv" +) + +// NumField returns the number of rdata fields r has. +func NumField(r RR) int { + return reflect.ValueOf(r).Elem().NumField() - 1 // Remove RR_Header +} + +// Field returns the rdata field i as a string. Fields are indexed starting from 1. +// RR types that holds slice data, for instance the NSEC type bitmap will return a single +// string where the types are concatenated using a space. +// Accessing non existing fields will cause a panic. +func Field(r RR, i int) string { + if i == 0 { + return "" + } + d := reflect.ValueOf(r).Elem().Field(i) + switch k := d.Kind(); k { + case reflect.String: + return d.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(d.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return strconv.FormatUint(d.Uint(), 10) + case reflect.Slice: + switch reflect.ValueOf(r).Elem().Type().Field(i).Tag { + case `dns:"a"`: + // TODO(miek): Hmm store this as 16 bytes + if d.Len() < net.IPv6len { + return net.IPv4(byte(d.Index(0).Uint()), + byte(d.Index(1).Uint()), + byte(d.Index(2).Uint()), + byte(d.Index(3).Uint())).String() + } + return net.IPv4(byte(d.Index(12).Uint()), + byte(d.Index(13).Uint()), + byte(d.Index(14).Uint()), + byte(d.Index(15).Uint())).String() + case `dns:"aaaa"`: + return net.IP{ + byte(d.Index(0).Uint()), + byte(d.Index(1).Uint()), + byte(d.Index(2).Uint()), + byte(d.Index(3).Uint()), + byte(d.Index(4).Uint()), + byte(d.Index(5).Uint()), + byte(d.Index(6).Uint()), + byte(d.Index(7).Uint()), + byte(d.Index(8).Uint()), + byte(d.Index(9).Uint()), + byte(d.Index(10).Uint()), + byte(d.Index(11).Uint()), + byte(d.Index(12).Uint()), + byte(d.Index(13).Uint()), + byte(d.Index(14).Uint()), + byte(d.Index(15).Uint()), + }.String() + case `dns:"nsec"`: + if d.Len() == 0 { + return "" + } + s := Type(d.Index(0).Uint()).String() + for i := 1; i < d.Len(); i++ { + s += " " + Type(d.Index(i).Uint()).String() + } + return s + default: + // if it does not have a tag its a string slice + fallthrough + case `dns:"txt"`: + if d.Len() == 0 { + return "" + } + s := d.Index(0).String() + for i := 1; i < d.Len(); i++ { + s += " " + d.Index(i).String() + } + return s + } + } + return "" +} diff --git a/vendor/github.com/miekg/dns/fuzz.go b/vendor/github.com/miekg/dns/fuzz.go new file mode 100644 index 00000000000..a8a09184d40 --- /dev/null +++ b/vendor/github.com/miekg/dns/fuzz.go @@ -0,0 +1,23 @@ +// +build fuzz + +package dns + +func Fuzz(data []byte) int { + msg := new(Msg) + + if err := msg.Unpack(data); err != nil { + return 0 + } + if _, err := msg.Pack(); err != nil { + return 0 + } + + return 1 +} + +func FuzzNewRR(data []byte) int { + if _, err := NewRR(string(data)); err != nil { + return 0 + } + return 1 +} diff --git a/vendor/github.com/miekg/dns/generate.go b/vendor/github.com/miekg/dns/generate.go new file mode 100644 index 00000000000..e4481a4b0d8 --- /dev/null +++ b/vendor/github.com/miekg/dns/generate.go @@ -0,0 +1,159 @@ +package dns + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" +) + +// Parse the $GENERATE statement as used in BIND9 zones. +// See http://www.zytrax.com/books/dns/ch8/generate.html for instance. +// We are called after '$GENERATE '. After which we expect: +// * the range (12-24/2) +// * lhs (ownername) +// * [[ttl][class]] +// * type +// * rhs (rdata) +// But we are lazy here, only the range is parsed *all* occurrences +// of $ after that are interpreted. +// Any error are returned as a string value, the empty string signals +// "no error". +func generate(l lex, c chan lex, t chan *Token, o string) string { + step := 1 + if i := strings.IndexAny(l.token, "/"); i != -1 { + if i+1 == len(l.token) { + return "bad step in $GENERATE range" + } + if s, err := strconv.Atoi(l.token[i+1:]); err == nil { + if s < 0 { + return "bad step in $GENERATE range" + } + step = s + } else { + return "bad step in $GENERATE range" + } + l.token = l.token[:i] + } + sx := strings.SplitN(l.token, "-", 2) + if len(sx) != 2 { + return "bad start-stop in $GENERATE range" + } + start, err := strconv.Atoi(sx[0]) + if err != nil { + return "bad start in $GENERATE range" + } + end, err := strconv.Atoi(sx[1]) + if err != nil { + return "bad stop in $GENERATE range" + } + if end < 0 || start < 0 || end < start { + return "bad range in $GENERATE range" + } + + <-c // _BLANK + // Create a complete new string, which we then parse again. + s := "" +BuildRR: + l = <-c + if l.value != zNewline && l.value != zEOF { + s += l.token + goto BuildRR + } + for i := start; i <= end; i += step { + var ( + escape bool + dom bytes.Buffer + mod string + err error + offset int + ) + + for j := 0; j < len(s); j++ { // No 'range' because we need to jump around + switch s[j] { + case '\\': + if escape { + dom.WriteByte('\\') + escape = false + continue + } + escape = true + case '$': + mod = "%d" + offset = 0 + if escape { + dom.WriteByte('$') + escape = false + continue + } + escape = false + if j+1 >= len(s) { // End of the string + dom.WriteString(fmt.Sprintf(mod, i+offset)) + continue + } else { + if s[j+1] == '$' { + dom.WriteByte('$') + j++ + continue + } + } + // Search for { and } + if s[j+1] == '{' { // Modifier block + sep := strings.Index(s[j+2:], "}") + if sep == -1 { + return "bad modifier in $GENERATE" + } + mod, offset, err = modToPrintf(s[j+2 : j+2+sep]) + if err != nil { + return err.Error() + } + j += 2 + sep // Jump to it + } + dom.WriteString(fmt.Sprintf(mod, i+offset)) + default: + if escape { // Pretty useless here + escape = false + continue + } + dom.WriteByte(s[j]) + } + } + // Re-parse the RR and send it on the current channel t + rx, err := NewRR("$ORIGIN " + o + "\n" + dom.String()) + if err != nil { + return err.Error() + } + t <- &Token{RR: rx} + // Its more efficient to first built the rrlist and then parse it in + // one go! But is this a problem? + } + return "" +} + +// Convert a $GENERATE modifier 0,0,d to something Printf can deal with. +func modToPrintf(s string) (string, int, error) { + xs := strings.SplitN(s, ",", 3) + if len(xs) != 3 { + return "", 0, errors.New("bad modifier in $GENERATE") + } + // xs[0] is offset, xs[1] is width, xs[2] is base + if xs[2] != "o" && xs[2] != "d" && xs[2] != "x" && xs[2] != "X" { + return "", 0, errors.New("bad base in $GENERATE") + } + offset, err := strconv.Atoi(xs[0]) + if err != nil || offset > 255 { + return "", 0, errors.New("bad offset in $GENERATE") + } + width, err := strconv.Atoi(xs[1]) + if err != nil || width > 255 { + return "", offset, errors.New("bad width in $GENERATE") + } + switch { + case width < 0: + return "", offset, errors.New("bad width in $GENERATE") + case width == 0: + return "%" + xs[1] + xs[2], offset, nil + } + return "%0" + xs[1] + xs[2], offset, nil +} diff --git a/vendor/github.com/miekg/dns/labels.go b/vendor/github.com/miekg/dns/labels.go new file mode 100644 index 00000000000..760b89e7114 --- /dev/null +++ b/vendor/github.com/miekg/dns/labels.go @@ -0,0 +1,191 @@ +package dns + +// Holds a bunch of helper functions for dealing with labels. + +// SplitDomainName splits a name string into it's labels. +// www.miek.nl. returns []string{"www", "miek", "nl"} +// .www.miek.nl. returns []string{"", "www", "miek", "nl"}, +// The root label (.) returns nil. Note that using +// strings.Split(s) will work in most cases, but does not handle +// escaped dots (\.) for instance. +// s must be a syntactically valid domain name, see IsDomainName. +func SplitDomainName(s string) (labels []string) { + if len(s) == 0 { + return nil + } + fqdnEnd := 0 // offset of the final '.' or the length of the name + idx := Split(s) + begin := 0 + if s[len(s)-1] == '.' { + fqdnEnd = len(s) - 1 + } else { + fqdnEnd = len(s) + } + + switch len(idx) { + case 0: + return nil + case 1: + // no-op + default: + end := 0 + for i := 1; i < len(idx); i++ { + end = idx[i] + labels = append(labels, s[begin:end-1]) + begin = end + } + } + + labels = append(labels, s[begin:fqdnEnd]) + return labels +} + +// CompareDomainName compares the names s1 and s2 and +// returns how many labels they have in common starting from the *right*. +// The comparison stops at the first inequality. The names are downcased +// before the comparison. +// +// www.miek.nl. and miek.nl. have two labels in common: miek and nl +// www.miek.nl. and www.bla.nl. have one label in common: nl +// +// s1 and s2 must be syntactically valid domain names. +func CompareDomainName(s1, s2 string) (n int) { + // the first check: root label + if s1 == "." || s2 == "." { + return 0 + } + + l1 := Split(s1) + l2 := Split(s2) + + j1 := len(l1) - 1 // end + i1 := len(l1) - 2 // start + j2 := len(l2) - 1 + i2 := len(l2) - 2 + // the second check can be done here: last/only label + // before we fall through into the for-loop below + if equal(s1[l1[j1]:], s2[l2[j2]:]) { + n++ + } else { + return + } + for { + if i1 < 0 || i2 < 0 { + break + } + if equal(s1[l1[i1]:l1[j1]], s2[l2[i2]:l2[j2]]) { + n++ + } else { + break + } + j1-- + i1-- + j2-- + i2-- + } + return +} + +// CountLabel counts the the number of labels in the string s. +// s must be a syntactically valid domain name. +func CountLabel(s string) (labels int) { + if s == "." { + return + } + off := 0 + end := false + for { + off, end = NextLabel(s, off) + labels++ + if end { + return + } + } +} + +// Split splits a name s into its label indexes. +// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}. +// The root name (.) returns nil. Also see SplitDomainName. +// s must be a syntactically valid domain name. +func Split(s string) []int { + if s == "." { + return nil + } + idx := make([]int, 1, 3) + off := 0 + end := false + + for { + off, end = NextLabel(s, off) + if end { + return idx + } + idx = append(idx, off) + } +} + +// NextLabel returns the index of the start of the next label in the +// string s starting at offset. +// The bool end is true when the end of the string has been reached. +// Also see PrevLabel. +func NextLabel(s string, offset int) (i int, end bool) { + quote := false + for i = offset; i < len(s)-1; i++ { + switch s[i] { + case '\\': + quote = !quote + default: + quote = false + case '.': + if quote { + quote = !quote + continue + } + return i + 1, false + } + } + return i + 1, true +} + +// PrevLabel returns the index of the label when starting from the right and +// jumping n labels to the left. +// The bool start is true when the start of the string has been overshot. +// Also see NextLabel. +func PrevLabel(s string, n int) (i int, start bool) { + if n == 0 { + return len(s), false + } + lab := Split(s) + if lab == nil { + return 0, true + } + if n > len(lab) { + return 0, true + } + return lab[len(lab)-n], false +} + +// equal compares a and b while ignoring case. It returns true when equal otherwise false. +func equal(a, b string) bool { + // might be lifted into API function. + la := len(a) + lb := len(b) + if la != lb { + return false + } + + for i := la - 1; i >= 0; i-- { + ai := a[i] + bi := b[i] + if ai >= 'A' && ai <= 'Z' { + ai |= ('a' - 'A') + } + if bi >= 'A' && bi <= 'Z' { + bi |= ('a' - 'A') + } + if ai != bi { + return false + } + } + return true +} diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go new file mode 100644 index 00000000000..dcd3b6a5e16 --- /dev/null +++ b/vendor/github.com/miekg/dns/msg.go @@ -0,0 +1,1206 @@ +// DNS packet assembly, see RFC 1035. Converting from - Unpack() - +// and to - Pack() - wire format. +// All the packers and unpackers take a (msg []byte, off int) +// and return (off1 int, ok bool). If they return ok==false, they +// also return off1==len(msg), so that the next unpacker will +// also fail. This lets us avoid checks of ok until the end of a +// packing sequence. + +package dns + +//go:generate go run msg_generate.go +//go:generate go run compress_generate.go + +import ( + crand "crypto/rand" + "encoding/binary" + "fmt" + "math/big" + "math/rand" + "strconv" + "sync" +) + +const ( + maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer + maxDomainNameWireOctets = 255 // See RFC 1035 section 2.3.4 +) + +// Errors defined in this package. +var ( + ErrAlg error = &Error{err: "bad algorithm"} // ErrAlg indicates an error with the (DNSSEC) algorithm. + ErrAuth error = &Error{err: "bad authentication"} // ErrAuth indicates an error in the TSIG authentication. + ErrBuf error = &Error{err: "buffer size too small"} // ErrBuf indicates that the buffer used is too small for the message. + ErrConnEmpty error = &Error{err: "conn has no connection"} // ErrConnEmpty indicates a connection is being used before it is initialized. + ErrExtendedRcode error = &Error{err: "bad extended rcode"} // ErrExtendedRcode ... + ErrFqdn error = &Error{err: "domain must be fully qualified"} // ErrFqdn indicates that a domain name does not have a closing dot. + ErrId error = &Error{err: "id mismatch"} // ErrId indicates there is a mismatch with the message's ID. + ErrKeyAlg error = &Error{err: "bad key algorithm"} // ErrKeyAlg indicates that the algorithm in the key is not valid. + ErrKey error = &Error{err: "bad key"} + ErrKeySize error = &Error{err: "bad key size"} + ErrLongDomain error = &Error{err: fmt.Sprintf("domain name exceeded %d wire-format octets", maxDomainNameWireOctets)} + ErrNoSig error = &Error{err: "no signature found"} + ErrPrivKey error = &Error{err: "bad private key"} + ErrRcode error = &Error{err: "bad rcode"} + ErrRdata error = &Error{err: "bad rdata"} + ErrRRset error = &Error{err: "bad rrset"} + ErrSecret error = &Error{err: "no secrets defined"} + ErrShortRead error = &Error{err: "short read"} + ErrSig error = &Error{err: "bad signature"} // ErrSig indicates that a signature can not be cryptographically validated. + ErrSoa error = &Error{err: "no SOA"} // ErrSOA indicates that no SOA RR was seen when doing zone transfers. + ErrTime error = &Error{err: "bad time"} // ErrTime indicates a timing error in TSIG authentication. + ErrTruncated error = &Error{err: "failed to unpack truncated message"} // ErrTruncated indicates that we failed to unpack a truncated message. We unpacked as much as we had so Msg can still be used, if desired. +) + +// Id by default, returns a 16 bits random number to be used as a +// message id. The random provided should be good enough. This being a +// variable the function can be reassigned to a custom function. +// For instance, to make it return a static value: +// +// dns.Id = func() uint16 { return 3 } +var Id = id + +var ( + idLock sync.Mutex + idRand *rand.Rand +) + +// id returns a 16 bits random number to be used as a +// message id. The random provided should be good enough. +func id() uint16 { + idLock.Lock() + + if idRand == nil { + // This (partially) works around + // https://github.com/golang/go/issues/11833 by only + // seeding idRand upon the first call to id. + + var seed int64 + var buf [8]byte + + if _, err := crand.Read(buf[:]); err == nil { + seed = int64(binary.LittleEndian.Uint64(buf[:])) + } else { + seed = rand.Int63() + } + + idRand = rand.New(rand.NewSource(seed)) + } + + // The call to idRand.Uint32 must be within the + // mutex lock because *rand.Rand is not safe for + // concurrent use. + // + // There is no added performance overhead to calling + // idRand.Uint32 inside a mutex lock over just + // calling rand.Uint32 as the global math/rand rng + // is internally protected by a sync.Mutex. + id := uint16(idRand.Uint32()) + + idLock.Unlock() + return id +} + +// MsgHdr is a a manually-unpacked version of (id, bits). +type MsgHdr struct { + Id uint16 + Response bool + Opcode int + Authoritative bool + Truncated bool + RecursionDesired bool + RecursionAvailable bool + Zero bool + AuthenticatedData bool + CheckingDisabled bool + Rcode int +} + +// Msg contains the layout of a DNS message. +type Msg struct { + MsgHdr + Compress bool `json:"-"` // If true, the message will be compressed when converted to wire format. + Question []Question // Holds the RR(s) of the question section. + Answer []RR // Holds the RR(s) of the answer section. + Ns []RR // Holds the RR(s) of the authority section. + Extra []RR // Holds the RR(s) of the additional section. +} + +// ClassToString is a maps Classes to strings for each CLASS wire type. +var ClassToString = map[uint16]string{ + ClassINET: "IN", + ClassCSNET: "CS", + ClassCHAOS: "CH", + ClassHESIOD: "HS", + ClassNONE: "NONE", + ClassANY: "ANY", +} + +// OpcodeToString maps Opcodes to strings. +var OpcodeToString = map[int]string{ + OpcodeQuery: "QUERY", + OpcodeIQuery: "IQUERY", + OpcodeStatus: "STATUS", + OpcodeNotify: "NOTIFY", + OpcodeUpdate: "UPDATE", +} + +// RcodeToString maps Rcodes to strings. +var RcodeToString = map[int]string{ + RcodeSuccess: "NOERROR", + RcodeFormatError: "FORMERR", + RcodeServerFailure: "SERVFAIL", + RcodeNameError: "NXDOMAIN", + RcodeNotImplemented: "NOTIMPL", + RcodeRefused: "REFUSED", + RcodeYXDomain: "YXDOMAIN", // See RFC 2136 + RcodeYXRrset: "YXRRSET", + RcodeNXRrset: "NXRRSET", + RcodeNotAuth: "NOTAUTH", + RcodeNotZone: "NOTZONE", + RcodeBadSig: "BADSIG", // Also known as RcodeBadVers, see RFC 6891 + // RcodeBadVers: "BADVERS", + RcodeBadKey: "BADKEY", + RcodeBadTime: "BADTIME", + RcodeBadMode: "BADMODE", + RcodeBadName: "BADNAME", + RcodeBadAlg: "BADALG", + RcodeBadTrunc: "BADTRUNC", + RcodeBadCookie: "BADCOOKIE", +} + +// Domain names are a sequence of counted strings +// split at the dots. They end with a zero-length string. + +// PackDomainName packs a domain name s into msg[off:]. +// If compression is wanted compress must be true and the compression +// map needs to hold a mapping between domain names and offsets +// pointing into msg. +func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { + off1, _, err = packDomainName(s, msg, off, compression, compress) + return +} + +func packDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, labels int, err error) { + // special case if msg == nil + lenmsg := 256 + if msg != nil { + lenmsg = len(msg) + } + ls := len(s) + if ls == 0 { // Ok, for instance when dealing with update RR without any rdata. + return off, 0, nil + } + // If not fully qualified, error out, but only if msg == nil #ugly + switch { + case msg == nil: + if s[ls-1] != '.' { + s += "." + ls++ + } + case msg != nil: + if s[ls-1] != '.' { + return lenmsg, 0, ErrFqdn + } + } + // Each dot ends a segment of the name. + // We trade each dot byte for a length byte. + // Except for escaped dots (\.), which are normal dots. + // There is also a trailing zero. + + // Compression + nameoffset := -1 + pointer := -1 + // Emit sequence of counted strings, chopping at dots. + begin := 0 + bs := []byte(s) + roBs, bsFresh, escapedDot := s, true, false + for i := 0; i < ls; i++ { + if bs[i] == '\\' { + for j := i; j < ls-1; j++ { + bs[j] = bs[j+1] + } + ls-- + if off+1 > lenmsg { + return lenmsg, labels, ErrBuf + } + // check for \DDD + if i+2 < ls && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { + bs[i] = dddToByte(bs[i:]) + for j := i + 1; j < ls-2; j++ { + bs[j] = bs[j+2] + } + ls -= 2 + } + escapedDot = bs[i] == '.' + bsFresh = false + continue + } + + if bs[i] == '.' { + if i > 0 && bs[i-1] == '.' && !escapedDot { + // two dots back to back is not legal + return lenmsg, labels, ErrRdata + } + if i-begin >= 1<<6 { // top two bits of length must be clear + return lenmsg, labels, ErrRdata + } + // off can already (we're in a loop) be bigger than len(msg) + // this happens when a name isn't fully qualified + if off+1 > lenmsg { + return lenmsg, labels, ErrBuf + } + if msg != nil { + msg[off] = byte(i - begin) + } + offset := off + off++ + for j := begin; j < i; j++ { + if off+1 > lenmsg { + return lenmsg, labels, ErrBuf + } + if msg != nil { + msg[off] = bs[j] + } + off++ + } + if compress && !bsFresh { + roBs = string(bs) + bsFresh = true + } + // Don't try to compress '.' + // We should only compress when compress it true, but we should also still pick + // up names that can be used for *future* compression(s). + if compression != nil && roBs[begin:] != "." { + if p, ok := compression[roBs[begin:]]; !ok { + // Only offsets smaller than this can be used. + if offset < maxCompressionOffset { + compression[roBs[begin:]] = offset + } + } else { + // The first hit is the longest matching dname + // keep the pointer offset we get back and store + // the offset of the current name, because that's + // where we need to insert the pointer later + + // If compress is true, we're allowed to compress this dname + if pointer == -1 && compress { + pointer = p // Where to point to + nameoffset = offset // Where to point from + break + } + } + } + labels++ + begin = i + 1 + } + escapedDot = false + } + // Root label is special + if len(bs) == 1 && bs[0] == '.' { + return off, labels, nil + } + // If we did compression and we find something add the pointer here + if pointer != -1 { + // We have two bytes (14 bits) to put the pointer in + // if msg == nil, we will never do compression + binary.BigEndian.PutUint16(msg[nameoffset:], uint16(pointer^0xC000)) + off = nameoffset + 1 + goto End + } + if msg != nil && off < len(msg) { + msg[off] = 0 + } +End: + off++ + return off, labels, nil +} + +// Unpack a domain name. +// In addition to the simple sequences of counted strings above, +// domain names are allowed to refer to strings elsewhere in the +// packet, to avoid repeating common suffixes when returning +// many entries in a single domain. The pointers are marked +// by a length byte with the top two bits set. Ignoring those +// two bits, that byte and the next give a 14 bit offset from msg[0] +// where we should pick up the trail. +// Note that if we jump elsewhere in the packet, +// we return off1 == the offset after the first pointer we found, +// which is where the next record will start. +// In theory, the pointers are only allowed to jump backward. +// We let them jump anywhere and stop jumping after a while. + +// UnpackDomainName unpacks a domain name into a string. +func UnpackDomainName(msg []byte, off int) (string, int, error) { + s := make([]byte, 0, 64) + off1 := 0 + lenmsg := len(msg) + maxLen := maxDomainNameWireOctets + ptr := 0 // number of pointers followed +Loop: + for { + if off >= lenmsg { + return "", lenmsg, ErrBuf + } + c := int(msg[off]) + off++ + switch c & 0xC0 { + case 0x00: + if c == 0x00 { + // end of name + break Loop + } + // literal string + if off+c > lenmsg { + return "", lenmsg, ErrBuf + } + for j := off; j < off+c; j++ { + switch b := msg[j]; b { + case '.', '(', ')', ';', ' ', '@': + fallthrough + case '"', '\\': + s = append(s, '\\', b) + // presentation-format \X escapes add an extra byte + maxLen++ + default: + if b < 32 || b >= 127 { // unprintable, use \DDD + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } + // presentation-format \DDD escapes add 3 extra bytes + maxLen += 3 + } else { + s = append(s, b) + } + } + } + s = append(s, '.') + off += c + case 0xC0: + // pointer to somewhere else in msg. + // remember location after first ptr, + // since that's how many bytes we consumed. + // also, don't follow too many pointers -- + // maybe there's a loop. + if off >= lenmsg { + return "", lenmsg, ErrBuf + } + c1 := msg[off] + off++ + if ptr == 0 { + off1 = off + } + if ptr++; ptr > 10 { + return "", lenmsg, &Error{err: "too many compression pointers"} + } + // pointer should guarantee that it advances and points forwards at least + // but the condition on previous three lines guarantees that it's + // at least loop-free + off = (c^0xC0)<<8 | int(c1) + default: + // 0x80 and 0x40 are reserved + return "", lenmsg, ErrRdata + } + } + if ptr == 0 { + off1 = off + } + if len(s) == 0 { + s = []byte(".") + } else if len(s) >= maxLen { + // error if the name is too long, but don't throw it away + return string(s), lenmsg, ErrLongDomain + } + return string(s), off1, nil +} + +func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) { + if len(txt) == 0 { + if offset >= len(msg) { + return offset, ErrBuf + } + msg[offset] = 0 + return offset, nil + } + var err error + for i := range txt { + if len(txt[i]) > len(tmp) { + return offset, ErrBuf + } + offset, err = packTxtString(txt[i], msg, offset, tmp) + if err != nil { + return offset, err + } + } + return offset, nil +} + +func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) { + lenByteOffset := offset + if offset >= len(msg) || len(s) > len(tmp) { + return offset, ErrBuf + } + offset++ + bs := tmp[:len(s)] + copy(bs, s) + for i := 0; i < len(bs); i++ { + if len(msg) <= offset { + return offset, ErrBuf + } + if bs[i] == '\\' { + i++ + if i == len(bs) { + break + } + // check for \DDD + if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { + msg[offset] = dddToByte(bs[i:]) + i += 2 + } else { + msg[offset] = bs[i] + } + } else { + msg[offset] = bs[i] + } + offset++ + } + l := offset - lenByteOffset - 1 + if l > 255 { + return offset, &Error{err: "string exceeded 255 bytes in txt"} + } + msg[lenByteOffset] = byte(l) + return offset, nil +} + +func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) { + if offset >= len(msg) || len(s) > len(tmp) { + return offset, ErrBuf + } + bs := tmp[:len(s)] + copy(bs, s) + for i := 0; i < len(bs); i++ { + if len(msg) <= offset { + return offset, ErrBuf + } + if bs[i] == '\\' { + i++ + if i == len(bs) { + break + } + // check for \DDD + if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { + msg[offset] = dddToByte(bs[i:]) + i += 2 + } else { + msg[offset] = bs[i] + } + } else { + msg[offset] = bs[i] + } + offset++ + } + return offset, nil +} + +func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) { + off = off0 + var s string + for off < len(msg) && err == nil { + s, off, err = unpackTxtString(msg, off) + if err == nil { + ss = append(ss, s) + } + } + return +} + +func unpackTxtString(msg []byte, offset int) (string, int, error) { + if offset+1 > len(msg) { + return "", offset, &Error{err: "overflow unpacking txt"} + } + l := int(msg[offset]) + if offset+l+1 > len(msg) { + return "", offset, &Error{err: "overflow unpacking txt"} + } + s := make([]byte, 0, l) + for _, b := range msg[offset+1 : offset+1+l] { + switch b { + case '"', '\\': + s = append(s, '\\', b) + default: + if b < 32 || b > 127 { // unprintable + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } + } else { + s = append(s, b) + } + } + } + offset += 1 + l + return string(s), offset, nil +} + +// Helpers for dealing with escaped bytes +func isDigit(b byte) bool { return b >= '0' && b <= '9' } + +func dddToByte(s []byte) byte { + return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) +} + +// Helper function for packing and unpacking +func intToBytes(i *big.Int, length int) []byte { + buf := i.Bytes() + if len(buf) < length { + b := make([]byte, length) + copy(b[length-len(buf):], buf) + return b + } + return buf +} + +// PackRR packs a resource record rr into msg[off:]. +// See PackDomainName for documentation about the compression. +func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { + if rr == nil { + return len(msg), &Error{err: "nil rr"} + } + + off1, err = rr.pack(msg, off, compression, compress) + if err != nil { + return len(msg), err + } + // TODO(miek): Not sure if this is needed? If removed we can remove rawmsg.go as well. + if rawSetRdlength(msg, off, off1) { + return off1, nil + } + return off, ErrRdata +} + +// UnpackRR unpacks msg[off:] into an RR. +func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) { + h, off, msg, err := unpackHeader(msg, off) + if err != nil { + return nil, len(msg), err + } + + return UnpackRRWithHeader(h, msg, off) +} + +// UnpackRRWithHeader unpacks the record type specific payload given an existing +// RR_Header. +func UnpackRRWithHeader(h RR_Header, msg []byte, off int) (rr RR, off1 int, err error) { + end := off + int(h.Rdlength) + + if fn, known := typeToUnpack[h.Rrtype]; !known { + rr, off, err = unpackRFC3597(h, msg, off) + } else { + rr, off, err = fn(h, msg, off) + } + if off != end { + return &h, end, &Error{err: "bad rdlength"} + } + return rr, off, err +} + +// unpackRRslice unpacks msg[off:] into an []RR. +// If we cannot unpack the whole array, then it will return nil +func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error) { + var r RR + // Don't pre-allocate, l may be under attacker control + var dst []RR + for i := 0; i < l; i++ { + off1 := off + r, off, err = UnpackRR(msg, off) + if err != nil { + off = len(msg) + break + } + // If offset does not increase anymore, l is a lie + if off1 == off { + l = i + break + } + dst = append(dst, r) + } + if err != nil && off == len(msg) { + dst = nil + } + return dst, off, err +} + +// Convert a MsgHdr to a string, with dig-like headers: +// +//;; opcode: QUERY, status: NOERROR, id: 48404 +// +//;; flags: qr aa rd ra; +func (h *MsgHdr) String() string { + if h == nil { + return " MsgHdr" + } + + s := ";; opcode: " + OpcodeToString[h.Opcode] + s += ", status: " + RcodeToString[h.Rcode] + s += ", id: " + strconv.Itoa(int(h.Id)) + "\n" + + s += ";; flags:" + if h.Response { + s += " qr" + } + if h.Authoritative { + s += " aa" + } + if h.Truncated { + s += " tc" + } + if h.RecursionDesired { + s += " rd" + } + if h.RecursionAvailable { + s += " ra" + } + if h.Zero { // Hmm + s += " z" + } + if h.AuthenticatedData { + s += " ad" + } + if h.CheckingDisabled { + s += " cd" + } + + s += ";" + return s +} + +// Pack packs a Msg: it is converted to to wire format. +// If the dns.Compress is true the message will be in compressed wire format. +func (dns *Msg) Pack() (msg []byte, err error) { + return dns.PackBuffer(nil) +} + +// PackBuffer packs a Msg, using the given buffer buf. If buf is too small a new buffer is allocated. +func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) { + var compression map[string]int + if dns.Compress { + compression = make(map[string]int) // Compression pointer mappings. + } + return dns.packBufferWithCompressionMap(buf, compression) +} + +// packBufferWithCompressionMap packs a Msg, using the given buffer buf. +func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression map[string]int) (msg []byte, err error) { + // We use a similar function in tsig.go's stripTsig. + + var dh Header + + if dns.Rcode < 0 || dns.Rcode > 0xFFF { + return nil, ErrRcode + } + if dns.Rcode > 0xF { + // Regular RCODE field is 4 bits + opt := dns.IsEdns0() + if opt == nil { + return nil, ErrExtendedRcode + } + opt.SetExtendedRcode(uint8(dns.Rcode >> 4)) + } + + // Convert convenient Msg into wire-like Header. + dh.Id = dns.Id + dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode&0xF) + if dns.Response { + dh.Bits |= _QR + } + if dns.Authoritative { + dh.Bits |= _AA + } + if dns.Truncated { + dh.Bits |= _TC + } + if dns.RecursionDesired { + dh.Bits |= _RD + } + if dns.RecursionAvailable { + dh.Bits |= _RA + } + if dns.Zero { + dh.Bits |= _Z + } + if dns.AuthenticatedData { + dh.Bits |= _AD + } + if dns.CheckingDisabled { + dh.Bits |= _CD + } + + // Prepare variable sized arrays. + question := dns.Question + answer := dns.Answer + ns := dns.Ns + extra := dns.Extra + + dh.Qdcount = uint16(len(question)) + dh.Ancount = uint16(len(answer)) + dh.Nscount = uint16(len(ns)) + dh.Arcount = uint16(len(extra)) + + // We need the uncompressed length here, because we first pack it and then compress it. + msg = buf + uncompressedLen := compressedLen(dns, false) + if packLen := uncompressedLen + 1; len(msg) < packLen { + msg = make([]byte, packLen) + } + + // Pack it in: header and then the pieces. + off := 0 + off, err = dh.pack(msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + for i := 0; i < len(question); i++ { + off, err = question[i].pack(msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + } + for i := 0; i < len(answer); i++ { + off, err = PackRR(answer[i], msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + } + for i := 0; i < len(ns); i++ { + off, err = PackRR(ns[i], msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + } + for i := 0; i < len(extra); i++ { + off, err = PackRR(extra[i], msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + } + return msg[:off], nil +} + +// Unpack unpacks a binary message to a Msg structure. +func (dns *Msg) Unpack(msg []byte) (err error) { + var ( + dh Header + off int + ) + if dh, off, err = unpackMsgHdr(msg, off); err != nil { + return err + } + + dns.Id = dh.Id + dns.Response = (dh.Bits & _QR) != 0 + dns.Opcode = int(dh.Bits>>11) & 0xF + dns.Authoritative = (dh.Bits & _AA) != 0 + dns.Truncated = (dh.Bits & _TC) != 0 + dns.RecursionDesired = (dh.Bits & _RD) != 0 + dns.RecursionAvailable = (dh.Bits & _RA) != 0 + dns.Zero = (dh.Bits & _Z) != 0 + dns.AuthenticatedData = (dh.Bits & _AD) != 0 + dns.CheckingDisabled = (dh.Bits & _CD) != 0 + dns.Rcode = int(dh.Bits & 0xF) + + // If we are at the end of the message we should return *just* the + // header. This can still be useful to the caller. 9.9.9.9 sends these + // when responding with REFUSED for instance. + if off == len(msg) { + // reset sections before returning + dns.Question, dns.Answer, dns.Ns, dns.Extra = nil, nil, nil, nil + return nil + } + + // Qdcount, Ancount, Nscount, Arcount can't be trusted, as they are + // attacker controlled. This means we can't use them to pre-allocate + // slices. + dns.Question = nil + for i := 0; i < int(dh.Qdcount); i++ { + off1 := off + var q Question + q, off, err = unpackQuestion(msg, off) + if err != nil { + // Even if Truncated is set, we only will set ErrTruncated if we + // actually got the questions + return err + } + if off1 == off { // Offset does not increase anymore, dh.Qdcount is a lie! + dh.Qdcount = uint16(i) + break + } + dns.Question = append(dns.Question, q) + } + + dns.Answer, off, err = unpackRRslice(int(dh.Ancount), msg, off) + // The header counts might have been wrong so we need to update it + dh.Ancount = uint16(len(dns.Answer)) + if err == nil { + dns.Ns, off, err = unpackRRslice(int(dh.Nscount), msg, off) + } + // The header counts might have been wrong so we need to update it + dh.Nscount = uint16(len(dns.Ns)) + if err == nil { + dns.Extra, off, err = unpackRRslice(int(dh.Arcount), msg, off) + } + // The header counts might have been wrong so we need to update it + dh.Arcount = uint16(len(dns.Extra)) + + if off != len(msg) { + // TODO(miek) make this an error? + // use PackOpt to let people tell how detailed the error reporting should be? + // println("dns: extra bytes in dns packet", off, "<", len(msg)) + } else if dns.Truncated { + // Whether we ran into a an error or not, we want to return that it + // was truncated + err = ErrTruncated + } + return err +} + +// Convert a complete message to a string with dig-like output. +func (dns *Msg) String() string { + if dns == nil { + return " MsgHdr" + } + s := dns.MsgHdr.String() + " " + s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", " + s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", " + s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", " + s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n" + if len(dns.Question) > 0 { + s += "\n;; QUESTION SECTION:\n" + for i := 0; i < len(dns.Question); i++ { + s += dns.Question[i].String() + "\n" + } + } + if len(dns.Answer) > 0 { + s += "\n;; ANSWER SECTION:\n" + for i := 0; i < len(dns.Answer); i++ { + if dns.Answer[i] != nil { + s += dns.Answer[i].String() + "\n" + } + } + } + if len(dns.Ns) > 0 { + s += "\n;; AUTHORITY SECTION:\n" + for i := 0; i < len(dns.Ns); i++ { + if dns.Ns[i] != nil { + s += dns.Ns[i].String() + "\n" + } + } + } + if len(dns.Extra) > 0 { + s += "\n;; ADDITIONAL SECTION:\n" + for i := 0; i < len(dns.Extra); i++ { + if dns.Extra[i] != nil { + s += dns.Extra[i].String() + "\n" + } + } + } + return s +} + +// Len returns the message length when in (un)compressed wire format. +// If dns.Compress is true compression it is taken into account. Len() +// is provided to be a faster way to get the size of the resulting packet, +// than packing it, measuring the size and discarding the buffer. +func (dns *Msg) Len() int { return compressedLen(dns, dns.Compress) } + +func compressedLenWithCompressionMap(dns *Msg, compression map[string]int) int { + l := 12 // Message header is always 12 bytes + for _, r := range dns.Question { + compressionLenHelper(compression, r.Name, l) + l += r.len() + } + l += compressionLenSlice(l, compression, dns.Answer) + l += compressionLenSlice(l, compression, dns.Ns) + l += compressionLenSlice(l, compression, dns.Extra) + return l +} + +// compressedLen returns the message length when in compressed wire format +// when compress is true, otherwise the uncompressed length is returned. +func compressedLen(dns *Msg, compress bool) int { + // We always return one more than needed. + if compress { + compression := map[string]int{} + return compressedLenWithCompressionMap(dns, compression) + } + l := 12 // Message header is always 12 bytes + + for _, r := range dns.Question { + l += r.len() + } + for _, r := range dns.Answer { + if r != nil { + l += r.len() + } + } + for _, r := range dns.Ns { + if r != nil { + l += r.len() + } + } + for _, r := range dns.Extra { + if r != nil { + l += r.len() + } + } + + return l +} + +func compressionLenSlice(lenp int, c map[string]int, rs []RR) int { + initLen := lenp + for _, r := range rs { + if r == nil { + continue + } + // TmpLen is to track len of record at 14bits boudaries + tmpLen := lenp + + x := r.len() + // track this length, and the global length in len, while taking compression into account for both. + k, ok, _ := compressionLenSearch(c, r.Header().Name) + if ok { + // Size of x is reduced by k, but we add 1 since k includes the '.' and label descriptor take 2 bytes + // so, basically x:= x - k - 1 + 2 + x += 1 - k + } + + tmpLen += compressionLenHelper(c, r.Header().Name, tmpLen) + k, ok, _ = compressionLenSearchType(c, r) + if ok { + x += 1 - k + } + lenp += x + tmpLen = lenp + tmpLen += compressionLenHelperType(c, r, tmpLen) + + } + return lenp - initLen +} + +// Put the parts of the name in the compression map, return the size in bytes added in payload +func compressionLenHelper(c map[string]int, s string, currentLen int) int { + if currentLen > maxCompressionOffset { + // We won't be able to add any label that could be re-used later anyway + return 0 + } + if _, ok := c[s]; ok { + return 0 + } + initLen := currentLen + pref := "" + prev := s + lbs := Split(s) + for j := 0; j < len(lbs); j++ { + pref = s[lbs[j]:] + currentLen += len(prev) - len(pref) + prev = pref + if _, ok := c[pref]; !ok { + // If first byte label is within the first 14bits, it might be re-used later + if currentLen < maxCompressionOffset { + c[pref] = currentLen + } + } else { + added := currentLen - initLen + if j > 0 { + // We added a new PTR + added += 2 + } + return added + } + } + return currentLen - initLen +} + +// Look for each part in the compression map and returns its length, +// keep on searching so we get the longest match. +// Will return the size of compression found, whether a match has been +// found and the size of record if added in payload +func compressionLenSearch(c map[string]int, s string) (int, bool, int) { + off := 0 + end := false + if s == "" { // don't bork on bogus data + return 0, false, 0 + } + fullSize := 0 + for { + if _, ok := c[s[off:]]; ok { + return len(s[off:]), true, fullSize + off + } + if end { + break + } + // Each label descriptor takes 2 bytes, add it + fullSize += 2 + off, end = NextLabel(s, off) + } + return 0, false, fullSize + len(s) +} + +// Copy returns a new RR which is a deep-copy of r. +func Copy(r RR) RR { r1 := r.copy(); return r1 } + +// Len returns the length (in octets) of the uncompressed RR in wire format. +func Len(r RR) int { return r.len() } + +// Copy returns a new *Msg which is a deep-copy of dns. +func (dns *Msg) Copy() *Msg { return dns.CopyTo(new(Msg)) } + +// CopyTo copies the contents to the provided message using a deep-copy and returns the copy. +func (dns *Msg) CopyTo(r1 *Msg) *Msg { + r1.MsgHdr = dns.MsgHdr + r1.Compress = dns.Compress + + if len(dns.Question) > 0 { + r1.Question = make([]Question, len(dns.Question)) + copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy + } + + rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra)) + var rri int + + if len(dns.Answer) > 0 { + rrbegin := rri + for i := 0; i < len(dns.Answer); i++ { + rrArr[rri] = dns.Answer[i].copy() + rri++ + } + r1.Answer = rrArr[rrbegin:rri:rri] + } + + if len(dns.Ns) > 0 { + rrbegin := rri + for i := 0; i < len(dns.Ns); i++ { + rrArr[rri] = dns.Ns[i].copy() + rri++ + } + r1.Ns = rrArr[rrbegin:rri:rri] + } + + if len(dns.Extra) > 0 { + rrbegin := rri + for i := 0; i < len(dns.Extra); i++ { + rrArr[rri] = dns.Extra[i].copy() + rri++ + } + r1.Extra = rrArr[rrbegin:rri:rri] + } + + return r1 +} + +func (q *Question) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := PackDomainName(q.Name, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packUint16(q.Qtype, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(q.Qclass, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func unpackQuestion(msg []byte, off int) (Question, int, error) { + var ( + q Question + err error + ) + q.Name, off, err = UnpackDomainName(msg, off) + if err != nil { + return q, off, err + } + if off == len(msg) { + return q, off, nil + } + q.Qtype, off, err = unpackUint16(msg, off) + if err != nil { + return q, off, err + } + if off == len(msg) { + return q, off, nil + } + q.Qclass, off, err = unpackUint16(msg, off) + if off == len(msg) { + return q, off, nil + } + return q, off, err +} + +func (dh *Header) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := packUint16(dh.Id, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Bits, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Qdcount, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Ancount, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Nscount, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Arcount, msg, off) + return off, err +} + +func unpackMsgHdr(msg []byte, off int) (Header, int, error) { + var ( + dh Header + err error + ) + dh.Id, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Bits, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Qdcount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Ancount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Nscount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Arcount, off, err = unpackUint16(msg, off) + return dh, off, err +} diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go new file mode 100644 index 00000000000..4a6e878de93 --- /dev/null +++ b/vendor/github.com/miekg/dns/msg_helpers.go @@ -0,0 +1,641 @@ +package dns + +import ( + "encoding/base32" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "net" + "strconv" +) + +// helper functions called from the generated zmsg.go + +// These function are named after the tag to help pack/unpack, if there is no tag it is the name +// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or +// packDataDomainName. + +func unpackDataA(msg []byte, off int) (net.IP, int, error) { + if off+net.IPv4len > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking a"} + } + a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...) + off += net.IPv4len + return a, off, nil +} + +func packDataA(a net.IP, msg []byte, off int) (int, error) { + // It must be a slice of 4, even if it is 16, we encode only the first 4 + if off+net.IPv4len > len(msg) { + return len(msg), &Error{err: "overflow packing a"} + } + switch len(a) { + case net.IPv4len, net.IPv6len: + copy(msg[off:], a.To4()) + off += net.IPv4len + case 0: + // Allowed, for dynamic updates. + default: + return len(msg), &Error{err: "overflow packing a"} + } + return off, nil +} + +func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) { + if off+net.IPv6len > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking aaaa"} + } + aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...) + off += net.IPv6len + return aaaa, off, nil +} + +func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) { + if off+net.IPv6len > len(msg) { + return len(msg), &Error{err: "overflow packing aaaa"} + } + + switch len(aaaa) { + case net.IPv6len: + copy(msg[off:], aaaa) + off += net.IPv6len + case 0: + // Allowed, dynamic updates. + default: + return len(msg), &Error{err: "overflow packing aaaa"} + } + return off, nil +} + +// unpackHeader unpacks an RR header, returning the offset to the end of the header and a +// re-sliced msg according to the expected length of the RR. +func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) { + hdr := RR_Header{} + if off == len(msg) { + return hdr, off, msg, nil + } + + hdr.Name, off, err = UnpackDomainName(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Rrtype, off, err = unpackUint16(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Class, off, err = unpackUint16(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Ttl, off, err = unpackUint32(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Rdlength, off, err = unpackUint16(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength) + return hdr, off, msg, err +} + +// pack packs an RR header, returning the offset to the end of the header. +// See PackDomainName for documentation about the compression. +func (hdr RR_Header) pack(msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { + if off == len(msg) { + return off, nil + } + + off, err = PackDomainName(hdr.Name, msg, off, compression, compress) + if err != nil { + return len(msg), err + } + off, err = packUint16(hdr.Rrtype, msg, off) + if err != nil { + return len(msg), err + } + off, err = packUint16(hdr.Class, msg, off) + if err != nil { + return len(msg), err + } + off, err = packUint32(hdr.Ttl, msg, off) + if err != nil { + return len(msg), err + } + off, err = packUint16(hdr.Rdlength, msg, off) + if err != nil { + return len(msg), err + } + return off, nil +} + +// helper helper functions. + +// truncateMsgFromRdLength truncates msg to match the expected length of the RR. +// Returns an error if msg is smaller than the expected size. +func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) { + lenrd := off + int(rdlength) + if lenrd > len(msg) { + return msg, &Error{err: "overflowing header size"} + } + return msg[:lenrd], nil +} + +var base32HexNoPadEncoding = base32.HexEncoding.WithPadding(base32.NoPadding) + +func fromBase32(s []byte) (buf []byte, err error) { + for i, b := range s { + if b >= 'a' && b <= 'z' { + s[i] = b - 32 + } + } + buflen := base32HexNoPadEncoding.DecodedLen(len(s)) + buf = make([]byte, buflen) + n, err := base32HexNoPadEncoding.Decode(buf, s) + buf = buf[:n] + return +} + +func toBase32(b []byte) string { + return base32HexNoPadEncoding.EncodeToString(b) +} + +func fromBase64(s []byte) (buf []byte, err error) { + buflen := base64.StdEncoding.DecodedLen(len(s)) + buf = make([]byte, buflen) + n, err := base64.StdEncoding.Decode(buf, s) + buf = buf[:n] + return +} + +func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) } + +// dynamicUpdate returns true if the Rdlength is zero. +func noRdata(h RR_Header) bool { return h.Rdlength == 0 } + +func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) { + if off+1 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint8"} + } + return uint8(msg[off]), off + 1, nil +} + +func packUint8(i uint8, msg []byte, off int) (off1 int, err error) { + if off+1 > len(msg) { + return len(msg), &Error{err: "overflow packing uint8"} + } + msg[off] = byte(i) + return off + 1, nil +} + +func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) { + if off+2 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint16"} + } + return binary.BigEndian.Uint16(msg[off:]), off + 2, nil +} + +func packUint16(i uint16, msg []byte, off int) (off1 int, err error) { + if off+2 > len(msg) { + return len(msg), &Error{err: "overflow packing uint16"} + } + binary.BigEndian.PutUint16(msg[off:], i) + return off + 2, nil +} + +func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) { + if off+4 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint32"} + } + return binary.BigEndian.Uint32(msg[off:]), off + 4, nil +} + +func packUint32(i uint32, msg []byte, off int) (off1 int, err error) { + if off+4 > len(msg) { + return len(msg), &Error{err: "overflow packing uint32"} + } + binary.BigEndian.PutUint32(msg[off:], i) + return off + 4, nil +} + +func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) { + if off+6 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"} + } + // Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes) + i = (uint64(uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 | + uint64(msg[off+4])<<8 | uint64(msg[off+5]))) + off += 6 + return i, off, nil +} + +func packUint48(i uint64, msg []byte, off int) (off1 int, err error) { + if off+6 > len(msg) { + return len(msg), &Error{err: "overflow packing uint64 as uint48"} + } + msg[off] = byte(i >> 40) + msg[off+1] = byte(i >> 32) + msg[off+2] = byte(i >> 24) + msg[off+3] = byte(i >> 16) + msg[off+4] = byte(i >> 8) + msg[off+5] = byte(i) + off += 6 + return off, nil +} + +func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) { + if off+8 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint64"} + } + return binary.BigEndian.Uint64(msg[off:]), off + 8, nil +} + +func packUint64(i uint64, msg []byte, off int) (off1 int, err error) { + if off+8 > len(msg) { + return len(msg), &Error{err: "overflow packing uint64"} + } + binary.BigEndian.PutUint64(msg[off:], i) + off += 8 + return off, nil +} + +func unpackString(msg []byte, off int) (string, int, error) { + if off+1 > len(msg) { + return "", off, &Error{err: "overflow unpacking txt"} + } + l := int(msg[off]) + if off+l+1 > len(msg) { + return "", off, &Error{err: "overflow unpacking txt"} + } + s := make([]byte, 0, l) + for _, b := range msg[off+1 : off+1+l] { + switch b { + case '"', '\\': + s = append(s, '\\', b) + default: + if b < 32 || b > 127 { // unprintable + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } + } else { + s = append(s, b) + } + } + } + off += 1 + l + return string(s), off, nil +} + +func packString(s string, msg []byte, off int) (int, error) { + txtTmp := make([]byte, 256*4+1) + off, err := packTxtString(s, msg, off, txtTmp) + if err != nil { + return len(msg), err + } + return off, nil +} + +func unpackStringBase32(msg []byte, off, end int) (string, int, error) { + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking base32"} + } + s := toBase32(msg[off:end]) + return s, end, nil +} + +func packStringBase32(s string, msg []byte, off int) (int, error) { + b32, err := fromBase32([]byte(s)) + if err != nil { + return len(msg), err + } + if off+len(b32) > len(msg) { + return len(msg), &Error{err: "overflow packing base32"} + } + copy(msg[off:off+len(b32)], b32) + off += len(b32) + return off, nil +} + +func unpackStringBase64(msg []byte, off, end int) (string, int, error) { + // Rest of the RR is base64 encoded value, so we don't need an explicit length + // to be set. Thus far all RR's that have base64 encoded fields have those as their + // last one. What we do need is the end of the RR! + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking base64"} + } + s := toBase64(msg[off:end]) + return s, end, nil +} + +func packStringBase64(s string, msg []byte, off int) (int, error) { + b64, err := fromBase64([]byte(s)) + if err != nil { + return len(msg), err + } + if off+len(b64) > len(msg) { + return len(msg), &Error{err: "overflow packing base64"} + } + copy(msg[off:off+len(b64)], b64) + off += len(b64) + return off, nil +} + +func unpackStringHex(msg []byte, off, end int) (string, int, error) { + // Rest of the RR is hex encoded value, so we don't need an explicit length + // to be set. NSEC and TSIG have hex fields with a length field. + // What we do need is the end of the RR! + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking hex"} + } + + s := hex.EncodeToString(msg[off:end]) + return s, end, nil +} + +func packStringHex(s string, msg []byte, off int) (int, error) { + h, err := hex.DecodeString(s) + if err != nil { + return len(msg), err + } + if off+(len(h)) > len(msg) { + return len(msg), &Error{err: "overflow packing hex"} + } + copy(msg[off:off+len(h)], h) + off += len(h) + return off, nil +} + +func unpackStringTxt(msg []byte, off int) ([]string, int, error) { + txt, off, err := unpackTxt(msg, off) + if err != nil { + return nil, len(msg), err + } + return txt, off, nil +} + +func packStringTxt(s []string, msg []byte, off int) (int, error) { + txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many. + off, err := packTxt(s, msg, off, txtTmp) + if err != nil { + return len(msg), err + } + return off, nil +} + +func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) { + var edns []EDNS0 +Option: + code := uint16(0) + if off+4 > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking opt"} + } + code = binary.BigEndian.Uint16(msg[off:]) + off += 2 + optlen := binary.BigEndian.Uint16(msg[off:]) + off += 2 + if off+int(optlen) > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking opt"} + } + switch code { + case EDNS0NSID: + e := new(EDNS0_NSID) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0SUBNET: + e := new(EDNS0_SUBNET) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0COOKIE: + e := new(EDNS0_COOKIE) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0UL: + e := new(EDNS0_UL) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0LLQ: + e := new(EDNS0_LLQ) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0DAU: + e := new(EDNS0_DAU) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0DHU: + e := new(EDNS0_DHU) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0N3U: + e := new(EDNS0_N3U) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0PADDING: + e := new(EDNS0_PADDING) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + default: + e := new(EDNS0_LOCAL) + e.Code = code + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + } + + if off < len(msg) { + goto Option + } + + return edns, off, nil +} + +func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) { + for _, el := range options { + b, err := el.pack() + if err != nil || off+3 > len(msg) { + return len(msg), &Error{err: "overflow packing opt"} + } + binary.BigEndian.PutUint16(msg[off:], el.Option()) // Option code + binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length + off += 4 + if off+len(b) > len(msg) { + copy(msg[off:], b) + off = len(msg) + continue + } + // Actual data + copy(msg[off:off+len(b)], b) + off += len(b) + } + return off, nil +} + +func unpackStringOctet(msg []byte, off int) (string, int, error) { + s := string(msg[off:]) + return s, len(msg), nil +} + +func packStringOctet(s string, msg []byte, off int) (int, error) { + txtTmp := make([]byte, 256*4+1) + off, err := packOctetString(s, msg, off, txtTmp) + if err != nil { + return len(msg), err + } + return off, nil +} + +func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) { + var nsec []uint16 + length, window, lastwindow := 0, 0, -1 + for off < len(msg) { + if off+2 > len(msg) { + return nsec, len(msg), &Error{err: "overflow unpacking nsecx"} + } + window = int(msg[off]) + length = int(msg[off+1]) + off += 2 + if window <= lastwindow { + // RFC 4034: Blocks are present in the NSEC RR RDATA in + // increasing numerical order. + return nsec, len(msg), &Error{err: "out of order NSEC block"} + } + if length == 0 { + // RFC 4034: Blocks with no types present MUST NOT be included. + return nsec, len(msg), &Error{err: "empty NSEC block"} + } + if length > 32 { + return nsec, len(msg), &Error{err: "NSEC block too long"} + } + if off+length > len(msg) { + return nsec, len(msg), &Error{err: "overflowing NSEC block"} + } + + // Walk the bytes in the window and extract the type bits + for j := 0; j < length; j++ { + b := msg[off+j] + // Check the bits one by one, and set the type + if b&0x80 == 0x80 { + nsec = append(nsec, uint16(window*256+j*8+0)) + } + if b&0x40 == 0x40 { + nsec = append(nsec, uint16(window*256+j*8+1)) + } + if b&0x20 == 0x20 { + nsec = append(nsec, uint16(window*256+j*8+2)) + } + if b&0x10 == 0x10 { + nsec = append(nsec, uint16(window*256+j*8+3)) + } + if b&0x8 == 0x8 { + nsec = append(nsec, uint16(window*256+j*8+4)) + } + if b&0x4 == 0x4 { + nsec = append(nsec, uint16(window*256+j*8+5)) + } + if b&0x2 == 0x2 { + nsec = append(nsec, uint16(window*256+j*8+6)) + } + if b&0x1 == 0x1 { + nsec = append(nsec, uint16(window*256+j*8+7)) + } + } + off += length + lastwindow = window + } + return nsec, off, nil +} + +func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) { + if len(bitmap) == 0 { + return off, nil + } + var lastwindow, lastlength uint16 + for j := 0; j < len(bitmap); j++ { + t := bitmap[j] + window := t / 256 + length := (t-window*256)/8 + 1 + if window > lastwindow && lastlength != 0 { // New window, jump to the new offset + off += int(lastlength) + 2 + lastlength = 0 + } + if window < lastwindow || length < lastlength { + return len(msg), &Error{err: "nsec bits out of order"} + } + if off+2+int(length) > len(msg) { + return len(msg), &Error{err: "overflow packing nsec"} + } + // Setting the window # + msg[off] = byte(window) + // Setting the octets length + msg[off+1] = byte(length) + // Setting the bit value for the type in the right octet + msg[off+1+int(length)] |= byte(1 << (7 - (t % 8))) + lastwindow, lastlength = window, length + } + off += int(lastlength) + 2 + return off, nil +} + +func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) { + var ( + servers []string + s string + err error + ) + if end > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking domain names"} + } + for off < end { + s, off, err = UnpackDomainName(msg, off) + if err != nil { + return servers, len(msg), err + } + servers = append(servers, s) + } + return servers, off, nil +} + +func packDataDomainNames(names []string, msg []byte, off int, compression map[string]int, compress bool) (int, error) { + var err error + for j := 0; j < len(names); j++ { + off, err = PackDomainName(names[j], msg, off, compression, false && compress) + if err != nil { + return len(msg), err + } + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/nsecx.go b/vendor/github.com/miekg/dns/nsecx.go new file mode 100644 index 00000000000..9b908c44786 --- /dev/null +++ b/vendor/github.com/miekg/dns/nsecx.go @@ -0,0 +1,106 @@ +package dns + +import ( + "crypto/sha1" + "hash" + "strings" +) + +type saltWireFmt struct { + Salt string `dns:"size-hex"` +} + +// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase. +func HashName(label string, ha uint8, iter uint16, salt string) string { + saltwire := new(saltWireFmt) + saltwire.Salt = salt + wire := make([]byte, DefaultMsgSize) + n, err := packSaltWire(saltwire, wire) + if err != nil { + return "" + } + wire = wire[:n] + name := make([]byte, 255) + off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false) + if err != nil { + return "" + } + name = name[:off] + var s hash.Hash + switch ha { + case SHA1: + s = sha1.New() + default: + return "" + } + + // k = 0 + s.Write(name) + s.Write(wire) + nsec3 := s.Sum(nil) + // k > 0 + for k := uint16(0); k < iter; k++ { + s.Reset() + s.Write(nsec3) + s.Write(wire) + nsec3 = s.Sum(nsec3[:0]) + } + return toBase32(nsec3) +} + +// Cover returns true if a name is covered by the NSEC3 record +func (rr *NSEC3) Cover(name string) bool { + nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt) + owner := strings.ToUpper(rr.Hdr.Name) + labelIndices := Split(owner) + if len(labelIndices) < 2 { + return false + } + ownerHash := owner[:labelIndices[1]-1] + ownerZone := owner[labelIndices[1]:] + if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone + return false + } + + nextHash := rr.NextDomain + if ownerHash == nextHash { // empty interval + return false + } + if ownerHash > nextHash { // end of zone + if nameHash > ownerHash { // covered since there is nothing after ownerHash + return true + } + return nameHash < nextHash // if nameHash is before beginning of zone it is covered + } + if nameHash < ownerHash { // nameHash is before ownerHash, not covered + return false + } + return nameHash < nextHash // if nameHash is before nextHash is it covered (between ownerHash and nextHash) +} + +// Match returns true if a name matches the NSEC3 record +func (rr *NSEC3) Match(name string) bool { + nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt) + owner := strings.ToUpper(rr.Hdr.Name) + labelIndices := Split(owner) + if len(labelIndices) < 2 { + return false + } + ownerHash := owner[:labelIndices[1]-1] + ownerZone := owner[labelIndices[1]:] + if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone + return false + } + if ownerHash == nameHash { + return true + } + return false +} + +func packSaltWire(sw *saltWireFmt, msg []byte) (int, error) { + off, err := packStringHex(sw.Salt, msg, 0) + if err != nil { + return off, err + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/privaterr.go b/vendor/github.com/miekg/dns/privaterr.go new file mode 100644 index 00000000000..41989e7aee7 --- /dev/null +++ b/vendor/github.com/miekg/dns/privaterr.go @@ -0,0 +1,148 @@ +package dns + +import ( + "fmt" + "strings" +) + +// PrivateRdata is an interface used for implementing "Private Use" RR types, see +// RFC 6895. This allows one to experiment with new RR types, without requesting an +// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove. +type PrivateRdata interface { + // String returns the text presentaton of the Rdata of the Private RR. + String() string + // Parse parses the Rdata of the private RR. + Parse([]string) error + // Pack is used when packing a private RR into a buffer. + Pack([]byte) (int, error) + // Unpack is used when unpacking a private RR from a buffer. + // TODO(miek): diff. signature than Pack, see edns0.go for instance. + Unpack([]byte) (int, error) + // Copy copies the Rdata. + Copy(PrivateRdata) error + // Len returns the length in octets of the Rdata. + Len() int +} + +// PrivateRR represents an RR that uses a PrivateRdata user-defined type. +// It mocks normal RRs and implements dns.RR interface. +type PrivateRR struct { + Hdr RR_Header + Data PrivateRdata +} + +func mkPrivateRR(rrtype uint16) *PrivateRR { + // Panics if RR is not an instance of PrivateRR. + rrfunc, ok := TypeToRR[rrtype] + if !ok { + panic(fmt.Sprintf("dns: invalid operation with Private RR type %d", rrtype)) + } + + anyrr := rrfunc() + switch rr := anyrr.(type) { + case *PrivateRR: + return rr + } + panic(fmt.Sprintf("dns: RR is not a PrivateRR, TypeToRR[%d] generator returned %T", rrtype, anyrr)) +} + +// Header return the RR header of r. +func (r *PrivateRR) Header() *RR_Header { return &r.Hdr } + +func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() } + +// Private len and copy parts to satisfy RR interface. +func (r *PrivateRR) len() int { return r.Hdr.len() + r.Data.Len() } +func (r *PrivateRR) copy() RR { + // make new RR like this: + rr := mkPrivateRR(r.Hdr.Rrtype) + rr.Hdr = r.Hdr + + err := r.Data.Copy(rr.Data) + if err != nil { + panic("dns: got value that could not be used to copy Private rdata") + } + return rr +} +func (r *PrivateRR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := r.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + n, err := r.Data.Pack(msg[off:]) + if err != nil { + return len(msg), err + } + off += n + r.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +// PrivateHandle registers a private resource record type. It requires +// string and numeric representation of private RR type and generator function as argument. +func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) { + rtypestr = strings.ToUpper(rtypestr) + + TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} } + TypeToString[rtype] = rtypestr + StringToType[rtypestr] = rtype + + typeToUnpack[rtype] = func(h RR_Header, msg []byte, off int) (RR, int, error) { + if noRdata(h) { + return &h, off, nil + } + var err error + + rr := mkPrivateRR(h.Rrtype) + rr.Hdr = h + + off1, err := rr.Data.Unpack(msg[off:]) + off += off1 + if err != nil { + return rr, off, err + } + return rr, off, err + } + + setPrivateRR := func(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := mkPrivateRR(h.Rrtype) + rr.Hdr = h + + var l lex + text := make([]string, 0, 2) // could be 0..N elements, median is probably 1 + Fetch: + for { + // TODO(miek): we could also be returning _QUOTE, this might or might not + // be an issue (basically parsing TXT becomes hard) + switch l = <-c; l.value { + case zNewline, zEOF: + break Fetch + case zString: + text = append(text, l.token) + } + } + + err := rr.Data.Parse(text) + if err != nil { + return nil, &ParseError{f, err.Error(), l}, "" + } + + return rr, nil, "" + } + + typeToparserFunc[rtype] = parserFunc{setPrivateRR, true} +} + +// PrivateHandleRemove removes defenitions required to support private RR type. +func PrivateHandleRemove(rtype uint16) { + rtypestr, ok := TypeToString[rtype] + if ok { + delete(TypeToRR, rtype) + delete(TypeToString, rtype) + delete(typeToparserFunc, rtype) + delete(StringToType, rtypestr) + delete(typeToUnpack, rtype) + } + return +} diff --git a/vendor/github.com/miekg/dns/rawmsg.go b/vendor/github.com/miekg/dns/rawmsg.go new file mode 100644 index 00000000000..6e21fba7e1f --- /dev/null +++ b/vendor/github.com/miekg/dns/rawmsg.go @@ -0,0 +1,49 @@ +package dns + +import "encoding/binary" + +// rawSetRdlength sets the rdlength in the header of +// the RR. The offset 'off' must be positioned at the +// start of the header of the RR, 'end' must be the +// end of the RR. +func rawSetRdlength(msg []byte, off, end int) bool { + l := len(msg) +Loop: + for { + if off+1 > l { + return false + } + c := int(msg[off]) + off++ + switch c & 0xC0 { + case 0x00: + if c == 0x00 { + // End of the domainname + break Loop + } + if off+c > l { + return false + } + off += c + + case 0xC0: + // pointer, next byte included, ends domainname + off++ + break Loop + } + } + // The domainname has been seen, we at the start of the fixed part in the header. + // Type is 2 bytes, class is 2 bytes, ttl 4 and then 2 bytes for the length. + off += 2 + 2 + 4 + if off+2 > l { + return false + } + //off+1 is the end of the header, 'end' is the end of the rr + //so 'end' - 'off+2' is the length of the rdata + rdatalen := end - (off + 2) + if rdatalen > 0xFFFF { + return false + } + binary.BigEndian.PutUint16(msg[off:], uint16(rdatalen)) + return true +} diff --git a/vendor/github.com/miekg/dns/reverse.go b/vendor/github.com/miekg/dns/reverse.go new file mode 100644 index 00000000000..f6e7a47a6e8 --- /dev/null +++ b/vendor/github.com/miekg/dns/reverse.go @@ -0,0 +1,38 @@ +package dns + +// StringToType is the reverse of TypeToString, needed for string parsing. +var StringToType = reverseInt16(TypeToString) + +// StringToClass is the reverse of ClassToString, needed for string parsing. +var StringToClass = reverseInt16(ClassToString) + +// StringToOpcode is a map of opcodes to strings. +var StringToOpcode = reverseInt(OpcodeToString) + +// StringToRcode is a map of rcodes to strings. +var StringToRcode = reverseInt(RcodeToString) + +// Reverse a map +func reverseInt8(m map[uint8]string) map[string]uint8 { + n := make(map[string]uint8, len(m)) + for u, s := range m { + n[s] = u + } + return n +} + +func reverseInt16(m map[uint16]string) map[string]uint16 { + n := make(map[string]uint16, len(m)) + for u, s := range m { + n[s] = u + } + return n +} + +func reverseInt(m map[int]string) map[string]int { + n := make(map[string]int, len(m)) + for u, s := range m { + n[s] = u + } + return n +} diff --git a/vendor/github.com/miekg/dns/sanitize.go b/vendor/github.com/miekg/dns/sanitize.go new file mode 100644 index 00000000000..c415bdd6c36 --- /dev/null +++ b/vendor/github.com/miekg/dns/sanitize.go @@ -0,0 +1,84 @@ +package dns + +// Dedup removes identical RRs from rrs. It preserves the original ordering. +// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies +// rrs. +// m is used to store the RRs temporary. If it is nil a new map will be allocated. +func Dedup(rrs []RR, m map[string]RR) []RR { + if m == nil { + m = make(map[string]RR) + } + // Save the keys, so we don't have to call normalizedString twice. + keys := make([]*string, 0, len(rrs)) + + for _, r := range rrs { + key := normalizedString(r) + keys = append(keys, &key) + if _, ok := m[key]; ok { + // Shortest TTL wins. + if m[key].Header().Ttl > r.Header().Ttl { + m[key].Header().Ttl = r.Header().Ttl + } + continue + } + + m[key] = r + } + // If the length of the result map equals the amount of RRs we got, + // it means they were all different. We can then just return the original rrset. + if len(m) == len(rrs) { + return rrs + } + + j := 0 + for i, r := range rrs { + // If keys[i] lives in the map, we should copy and remove it. + if _, ok := m[*keys[i]]; ok { + delete(m, *keys[i]) + rrs[j] = r + j++ + } + + if len(m) == 0 { + break + } + } + + return rrs[:j] +} + +// normalizedString returns a normalized string from r. The TTL +// is removed and the domain name is lowercased. We go from this: +// DomainNameTTLCLASSTYPERDATA to: +// lowercasenameCLASSTYPE... +func normalizedString(r RR) string { + // A string Go DNS makes has: domainnameTTL... + b := []byte(r.String()) + + // find the first non-escaped tab, then another, so we capture where the TTL lives. + esc := false + ttlStart, ttlEnd := 0, 0 + for i := 0; i < len(b) && ttlEnd == 0; i++ { + switch { + case b[i] == '\\': + esc = !esc + case b[i] == '\t' && !esc: + if ttlStart == 0 { + ttlStart = i + continue + } + if ttlEnd == 0 { + ttlEnd = i + } + case b[i] >= 'A' && b[i] <= 'Z' && !esc: + b[i] += 32 + default: + esc = false + } + } + + // remove TTL. + copy(b[ttlStart:], b[ttlEnd:]) + cut := ttlEnd - ttlStart + return string(b[:len(b)-cut]) +} diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go new file mode 100644 index 00000000000..f9cd47401d1 --- /dev/null +++ b/vendor/github.com/miekg/dns/scan.go @@ -0,0 +1,1007 @@ +package dns + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" +) + +const maxTok = 2048 // Largest token we can return. +const maxUint16 = 1<<16 - 1 + +// Tokinize a RFC 1035 zone file. The tokenizer will normalize it: +// * Add ownernames if they are left blank; +// * Suppress sequences of spaces; +// * Make each RR fit on one line (_NEWLINE is send as last) +// * Handle comments: ; +// * Handle braces - anywhere. +const ( + // Zonefile + zEOF = iota + zString + zBlank + zQuote + zNewline + zRrtpe + zOwner + zClass + zDirOrigin // $ORIGIN + zDirTTL // $TTL + zDirInclude // $INCLUDE + zDirGenerate // $GENERATE + + // Privatekey file + zValue + zKey + + zExpectOwnerDir // Ownername + zExpectOwnerBl // Whitespace after the ownername + zExpectAny // Expect rrtype, ttl or class + zExpectAnyNoClass // Expect rrtype or ttl + zExpectAnyNoClassBl // The whitespace after _EXPECT_ANY_NOCLASS + zExpectAnyNoTTL // Expect rrtype or class + zExpectAnyNoTTLBl // Whitespace after _EXPECT_ANY_NOTTL + zExpectRrtype // Expect rrtype + zExpectRrtypeBl // Whitespace BEFORE rrtype + zExpectRdata // The first element of the rdata + zExpectDirTTLBl // Space after directive $TTL + zExpectDirTTL // Directive $TTL + zExpectDirOriginBl // Space after directive $ORIGIN + zExpectDirOrigin // Directive $ORIGIN + zExpectDirIncludeBl // Space after directive $INCLUDE + zExpectDirInclude // Directive $INCLUDE + zExpectDirGenerate // Directive $GENERATE + zExpectDirGenerateBl // Space after directive $GENERATE +) + +// ParseError is a parsing error. It contains the parse error and the location in the io.Reader +// where the error occurred. +type ParseError struct { + file string + err string + lex lex +} + +func (e *ParseError) Error() (s string) { + if e.file != "" { + s = e.file + ": " + } + s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " + + strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column) + return +} + +type lex struct { + token string // text of the token + tokenUpper string // uppercase text of the token + length int // length of the token + err bool // when true, token text has lexer error + value uint8 // value: zString, _BLANK, etc. + line int // line in the file + column int // column in the file + torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar + comment string // any comment text seen +} + +// Token holds the token that are returned when a zone file is parsed. +type Token struct { + // The scanned resource record when error is not nil. + RR + // When an error occurred, this has the error specifics. + Error *ParseError + // A potential comment positioned after the RR and on the same line. + Comment string +} + +// ttlState describes the state necessary to fill in an omitted RR TTL +type ttlState struct { + ttl uint32 // ttl is the current default TTL + isByDirective bool // isByDirective indicates whether ttl was set by a $TTL directive +} + +// NewRR reads the RR contained in the string s. Only the first RR is +// returned. If s contains no RR, return nil with no error. The class +// defaults to IN and TTL defaults to 3600. The full zone file syntax +// like $TTL, $ORIGIN, etc. is supported. All fields of the returned +// RR are set, except RR.Header().Rdlength which is set to 0. +func NewRR(s string) (RR, error) { + if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline + return ReadRR(strings.NewReader(s+"\n"), "") + } + return ReadRR(strings.NewReader(s), "") +} + +// ReadRR reads the RR contained in q. +// See NewRR for more documentation. +func ReadRR(q io.Reader, filename string) (RR, error) { + defttl := &ttlState{defaultTtl, false} + r := <-parseZoneHelper(q, ".", filename, defttl, 1) + if r == nil { + return nil, nil + } + + if r.Error != nil { + return nil, r.Error + } + return r.RR, nil +} + +// ParseZone reads a RFC 1035 style zonefile from r. It returns *Tokens on the +// returned channel, each consisting of either a parsed RR and optional comment +// or a nil RR and an error. The string file is only used +// in error reporting. The string origin is used as the initial origin, as +// if the file would start with an $ORIGIN directive. +// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are supported. +// The channel t is closed by ParseZone when the end of r is reached. +// +// Basic usage pattern when reading from a string (z) containing the +// zone data: +// +// for x := range dns.ParseZone(strings.NewReader(z), "", "") { +// if x.Error != nil { +// // log.Println(x.Error) +// } else { +// // Do something with x.RR +// } +// } +// +// Comments specified after an RR (and on the same line!) are returned too: +// +// foo. IN A 10.0.0.1 ; this is a comment +// +// The text "; this is comment" is returned in Token.Comment. Comments inside the +// RR are discarded. Comments on a line by themselves are discarded too. +func ParseZone(r io.Reader, origin, file string) chan *Token { + return parseZoneHelper(r, origin, file, nil, 10000) +} + +func parseZoneHelper(r io.Reader, origin, file string, defttl *ttlState, chansize int) chan *Token { + t := make(chan *Token, chansize) + go parseZone(r, origin, file, defttl, t, 0) + return t +} + +func parseZone(r io.Reader, origin, f string, defttl *ttlState, t chan *Token, include int) { + defer func() { + if include == 0 { + close(t) + } + }() + s, cancel := scanInit(r) + c := make(chan lex) + // Start the lexer + go zlexer(s, c) + + defer func() { + cancel() + // zlexer can send up to three tokens, the next one and possibly 2 remainders. + // Do a non-blocking read. + _, ok := <-c + _, ok = <-c + _, ok = <-c + if !ok { + // too bad + } + }() + // 6 possible beginnings of a line, _ is a space + // 0. zRRTYPE -> all omitted until the rrtype + // 1. zOwner _ zRrtype -> class/ttl omitted + // 2. zOwner _ zString _ zRrtype -> class omitted + // 3. zOwner _ zString _ zClass _ zRrtype -> ttl/class + // 4. zOwner _ zClass _ zRrtype -> ttl omitted + // 5. zOwner _ zClass _ zString _ zRrtype -> class/ttl (reversed) + // After detecting these, we know the zRrtype so we can jump to functions + // handling the rdata for each of these types. + + if origin != "" { + origin = Fqdn(origin) + if _, ok := IsDomainName(origin); !ok { + t <- &Token{Error: &ParseError{f, "bad initial origin name", lex{}}} + return + } + } + + st := zExpectOwnerDir // initial state + var h RR_Header + var prevName string + for l := range c { + // Lexer spotted an error already + if l.err == true { + t <- &Token{Error: &ParseError{f, l.token, l}} + return + + } + switch st { + case zExpectOwnerDir: + // We can also expect a directive, like $TTL or $ORIGIN + if defttl != nil { + h.Ttl = defttl.ttl + } + h.Class = ClassINET + switch l.value { + case zNewline: + st = zExpectOwnerDir + case zOwner: + h.Name = l.token + name, ok := toAbsoluteName(l.token, origin) + if !ok { + t <- &Token{Error: &ParseError{f, "bad owner name", l}} + return + } + h.Name = name + prevName = h.Name + st = zExpectOwnerBl + case zDirTTL: + st = zExpectDirTTLBl + case zDirOrigin: + st = zExpectDirOriginBl + case zDirInclude: + st = zExpectDirIncludeBl + case zDirGenerate: + st = zExpectDirGenerateBl + case zRrtpe: + h.Name = prevName + h.Rrtype = l.torc + st = zExpectRdata + case zClass: + h.Name = prevName + h.Class = l.torc + st = zExpectAnyNoClassBl + case zBlank: + // Discard, can happen when there is nothing on the + // line except the RR type + case zString: + ttl, ok := stringToTTL(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "not a TTL", l}} + return + } + h.Ttl = ttl + if defttl == nil || !defttl.isByDirective { + defttl = &ttlState{ttl, false} + } + st = zExpectAnyNoTTLBl + + default: + t <- &Token{Error: &ParseError{f, "syntax error at beginning", l}} + return + } + case zExpectDirIncludeBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after $INCLUDE-directive", l}} + return + } + st = zExpectDirInclude + case zExpectDirInclude: + if l.value != zString { + t <- &Token{Error: &ParseError{f, "expecting $INCLUDE value, not this...", l}} + return + } + neworigin := origin // There may be optionally a new origin set after the filename, if not use current one + switch l := <-c; l.value { + case zBlank: + l := <-c + if l.value == zString { + name, ok := toAbsoluteName(l.token, origin) + if !ok { + t <- &Token{Error: &ParseError{f, "bad origin name", l}} + return + } + neworigin = name + } + case zNewline, zEOF: + // Ok + default: + t <- &Token{Error: &ParseError{f, "garbage after $INCLUDE", l}} + return + } + // Start with the new file + includePath := l.token + if !filepath.IsAbs(includePath) { + includePath = filepath.Join(filepath.Dir(f), includePath) + } + r1, e1 := os.Open(includePath) + if e1 != nil { + msg := fmt.Sprintf("failed to open `%s'", l.token) + if !filepath.IsAbs(l.token) { + msg += fmt.Sprintf(" as `%s'", includePath) + } + t <- &Token{Error: &ParseError{f, msg, l}} + return + } + if include+1 > 7 { + t <- &Token{Error: &ParseError{f, "too deeply nested $INCLUDE", l}} + return + } + parseZone(r1, neworigin, includePath, defttl, t, include+1) + st = zExpectOwnerDir + case zExpectDirTTLBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after $TTL-directive", l}} + return + } + st = zExpectDirTTL + case zExpectDirTTL: + if l.value != zString { + t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}} + return + } + if e, _ := slurpRemainder(c, f); e != nil { + t <- &Token{Error: e} + return + } + ttl, ok := stringToTTL(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}} + return + } + defttl = &ttlState{ttl, true} + st = zExpectOwnerDir + case zExpectDirOriginBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after $ORIGIN-directive", l}} + return + } + st = zExpectDirOrigin + case zExpectDirOrigin: + if l.value != zString { + t <- &Token{Error: &ParseError{f, "expecting $ORIGIN value, not this...", l}} + return + } + if e, _ := slurpRemainder(c, f); e != nil { + t <- &Token{Error: e} + } + name, ok := toAbsoluteName(l.token, origin) + if !ok { + t <- &Token{Error: &ParseError{f, "bad origin name", l}} + return + } + origin = name + st = zExpectOwnerDir + case zExpectDirGenerateBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after $GENERATE-directive", l}} + return + } + st = zExpectDirGenerate + case zExpectDirGenerate: + if l.value != zString { + t <- &Token{Error: &ParseError{f, "expecting $GENERATE value, not this...", l}} + return + } + if errMsg := generate(l, c, t, origin); errMsg != "" { + t <- &Token{Error: &ParseError{f, errMsg, l}} + return + } + st = zExpectOwnerDir + case zExpectOwnerBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after owner", l}} + return + } + st = zExpectAny + case zExpectAny: + switch l.value { + case zRrtpe: + if defttl == nil { + t <- &Token{Error: &ParseError{f, "missing TTL with no previous value", l}} + return + } + h.Rrtype = l.torc + st = zExpectRdata + case zClass: + h.Class = l.torc + st = zExpectAnyNoClassBl + case zString: + ttl, ok := stringToTTL(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "not a TTL", l}} + return + } + h.Ttl = ttl + if defttl == nil || !defttl.isByDirective { + defttl = &ttlState{ttl, false} + } + st = zExpectAnyNoTTLBl + default: + t <- &Token{Error: &ParseError{f, "expecting RR type, TTL or class, not this...", l}} + return + } + case zExpectAnyNoClassBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank before class", l}} + return + } + st = zExpectAnyNoClass + case zExpectAnyNoTTLBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank before TTL", l}} + return + } + st = zExpectAnyNoTTL + case zExpectAnyNoTTL: + switch l.value { + case zClass: + h.Class = l.torc + st = zExpectRrtypeBl + case zRrtpe: + h.Rrtype = l.torc + st = zExpectRdata + default: + t <- &Token{Error: &ParseError{f, "expecting RR type or class, not this...", l}} + return + } + case zExpectAnyNoClass: + switch l.value { + case zString: + ttl, ok := stringToTTL(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "not a TTL", l}} + return + } + h.Ttl = ttl + if defttl == nil || !defttl.isByDirective { + defttl = &ttlState{ttl, false} + } + st = zExpectRrtypeBl + case zRrtpe: + h.Rrtype = l.torc + st = zExpectRdata + default: + t <- &Token{Error: &ParseError{f, "expecting RR type or TTL, not this...", l}} + return + } + case zExpectRrtypeBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank before RR type", l}} + return + } + st = zExpectRrtype + case zExpectRrtype: + if l.value != zRrtpe { + t <- &Token{Error: &ParseError{f, "unknown RR type", l}} + return + } + h.Rrtype = l.torc + st = zExpectRdata + case zExpectRdata: + r, e, c1 := setRR(h, c, origin, f) + if e != nil { + // If e.lex is nil than we have encounter a unknown RR type + // in that case we substitute our current lex token + if e.lex.token == "" && e.lex.value == 0 { + e.lex = l // Uh, dirty + } + t <- &Token{Error: e} + return + } + t <- &Token{RR: r, Comment: c1} + st = zExpectOwnerDir + } + } + // If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this + // is not an error, because an empty zone file is still a zone file. +} + +// zlexer scans the sourcefile and returns tokens on the channel c. +func zlexer(s *scan, c chan lex) { + var l lex + str := make([]byte, maxTok) // Should be enough for any token + stri := 0 // Offset in str (0 means empty) + com := make([]byte, maxTok) // Hold comment text + comi := 0 + quote := false + escape := false + space := false + commt := false + rrtype := false + owner := true + brace := 0 + x, err := s.tokenText() + defer close(c) + for err == nil { + l.column = s.position.Column + l.line = s.position.Line + if stri >= maxTok { + l.token = "token length insufficient for parsing" + l.err = true + c <- l + return + } + if comi >= maxTok { + l.token = "comment length insufficient for parsing" + l.err = true + c <- l + return + } + + switch x { + case ' ', '\t': + if escape { + escape = false + str[stri] = x + stri++ + break + } + if quote { + // Inside quotes this is legal + str[stri] = x + stri++ + break + } + if commt { + com[comi] = x + comi++ + break + } + if stri == 0 { + // Space directly in the beginning, handled in the grammar + } else if owner { + // If we have a string and its the first, make it an owner + l.value = zOwner + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + l.length = stri + // escape $... start with a \ not a $, so this will work + switch l.tokenUpper { + case "$TTL": + l.value = zDirTTL + case "$ORIGIN": + l.value = zDirOrigin + case "$INCLUDE": + l.value = zDirInclude + case "$GENERATE": + l.value = zDirGenerate + } + c <- l + } else { + l.value = zString + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + l.length = stri + if !rrtype { + if t, ok := StringToType[l.tokenUpper]; ok { + l.value = zRrtpe + l.torc = t + rrtype = true + } else { + if strings.HasPrefix(l.tokenUpper, "TYPE") { + t, ok := typeToInt(l.token) + if !ok { + l.token = "unknown RR type" + l.err = true + c <- l + return + } + l.value = zRrtpe + rrtype = true + l.torc = t + } + } + if t, ok := StringToClass[l.tokenUpper]; ok { + l.value = zClass + l.torc = t + } else { + if strings.HasPrefix(l.tokenUpper, "CLASS") { + t, ok := classToInt(l.token) + if !ok { + l.token = "unknown class" + l.err = true + c <- l + return + } + l.value = zClass + l.torc = t + } + } + } + c <- l + } + stri = 0 + + if !space && !commt { + l.value = zBlank + l.token = " " + l.length = 1 + c <- l + } + owner = false + space = true + case ';': + if escape { + escape = false + str[stri] = x + stri++ + break + } + if quote { + // Inside quotes this is legal + str[stri] = x + stri++ + break + } + if stri > 0 { + l.value = zString + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + l.length = stri + c <- l + stri = 0 + } + commt = true + com[comi] = ';' + comi++ + case '\r': + escape = false + if quote { + str[stri] = x + stri++ + break + } + // discard if outside of quotes + case '\n': + escape = false + // Escaped newline + if quote { + str[stri] = x + stri++ + break + } + // inside quotes this is legal + if commt { + // Reset a comment + commt = false + rrtype = false + stri = 0 + // If not in a brace this ends the comment AND the RR + if brace == 0 { + owner = true + owner = true + l.value = zNewline + l.token = "\n" + l.tokenUpper = l.token + l.length = 1 + l.comment = string(com[:comi]) + c <- l + l.comment = "" + comi = 0 + break + } + com[comi] = ' ' // convert newline to space + comi++ + break + } + + if brace == 0 { + // If there is previous text, we should output it here + if stri != 0 { + l.value = zString + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + + l.length = stri + if !rrtype { + if t, ok := StringToType[l.tokenUpper]; ok { + l.value = zRrtpe + l.torc = t + rrtype = true + } + } + c <- l + } + l.value = zNewline + l.token = "\n" + l.tokenUpper = l.token + l.length = 1 + c <- l + stri = 0 + commt = false + rrtype = false + owner = true + comi = 0 + } + case '\\': + // comments do not get escaped chars, everything is copied + if commt { + com[comi] = x + comi++ + break + } + // something already escaped must be in string + if escape { + str[stri] = x + stri++ + escape = false + break + } + // something escaped outside of string gets added to string + str[stri] = x + stri++ + escape = true + case '"': + if commt { + com[comi] = x + comi++ + break + } + if escape { + str[stri] = x + stri++ + escape = false + break + } + space = false + // send previous gathered text and the quote + if stri != 0 { + l.value = zString + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + l.length = stri + + c <- l + stri = 0 + } + + // send quote itself as separate token + l.value = zQuote + l.token = "\"" + l.tokenUpper = l.token + l.length = 1 + c <- l + quote = !quote + case '(', ')': + if commt { + com[comi] = x + comi++ + break + } + if escape { + str[stri] = x + stri++ + escape = false + break + } + if quote { + str[stri] = x + stri++ + break + } + switch x { + case ')': + brace-- + if brace < 0 { + l.token = "extra closing brace" + l.tokenUpper = l.token + l.err = true + c <- l + return + } + case '(': + brace++ + } + default: + escape = false + if commt { + com[comi] = x + comi++ + break + } + str[stri] = x + stri++ + space = false + } + x, err = s.tokenText() + } + if stri > 0 { + // Send remainder + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + l.length = stri + l.value = zString + c <- l + } + if brace != 0 { + l.token = "unbalanced brace" + l.tokenUpper = l.token + l.err = true + c <- l + } +} + +// Extract the class number from CLASSxx +func classToInt(token string) (uint16, bool) { + offset := 5 + if len(token) < offset+1 { + return 0, false + } + class, err := strconv.ParseUint(token[offset:], 10, 16) + if err != nil { + return 0, false + } + return uint16(class), true +} + +// Extract the rr number from TYPExxx +func typeToInt(token string) (uint16, bool) { + offset := 4 + if len(token) < offset+1 { + return 0, false + } + typ, err := strconv.ParseUint(token[offset:], 10, 16) + if err != nil { + return 0, false + } + return uint16(typ), true +} + +// stringToTTL parses things like 2w, 2m, etc, and returns the time in seconds. +func stringToTTL(token string) (uint32, bool) { + s := uint32(0) + i := uint32(0) + for _, c := range token { + switch c { + case 's', 'S': + s += i + i = 0 + case 'm', 'M': + s += i * 60 + i = 0 + case 'h', 'H': + s += i * 60 * 60 + i = 0 + case 'd', 'D': + s += i * 60 * 60 * 24 + i = 0 + case 'w', 'W': + s += i * 60 * 60 * 24 * 7 + i = 0 + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + i *= 10 + i += uint32(c) - '0' + default: + return 0, false + } + } + return s + i, true +} + +// Parse LOC records' [.][mM] into a +// mantissa exponent format. Token should contain the entire +// string (i.e. no spaces allowed) +func stringToCm(token string) (e, m uint8, ok bool) { + if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' { + token = token[0 : len(token)-1] + } + s := strings.SplitN(token, ".", 2) + var meters, cmeters, val int + var err error + switch len(s) { + case 2: + if cmeters, err = strconv.Atoi(s[1]); err != nil { + return + } + fallthrough + case 1: + if meters, err = strconv.Atoi(s[0]); err != nil { + return + } + case 0: + // huh? + return 0, 0, false + } + ok = true + if meters > 0 { + e = 2 + val = meters + } else { + e = 0 + val = cmeters + } + for val > 10 { + e++ + val /= 10 + } + if e > 9 { + ok = false + } + m = uint8(val) + return +} + +func toAbsoluteName(name, origin string) (absolute string, ok bool) { + // check for an explicit origin reference + if name == "@" { + // require a nonempty origin + if origin == "" { + return "", false + } + return origin, true + } + + // require a valid domain name + _, ok = IsDomainName(name) + if !ok || name == "" { + return "", false + } + + // check if name is already absolute + if name[len(name)-1] == '.' { + return name, true + } + + // require a nonempty origin + if origin == "" { + return "", false + } + return appendOrigin(name, origin), true +} + +func appendOrigin(name, origin string) string { + if origin == "." { + return name + origin + } + return name + "." + origin +} + +// LOC record helper function +func locCheckNorth(token string, latitude uint32) (uint32, bool) { + switch token { + case "n", "N": + return LOC_EQUATOR + latitude, true + case "s", "S": + return LOC_EQUATOR - latitude, true + } + return latitude, false +} + +// LOC record helper function +func locCheckEast(token string, longitude uint32) (uint32, bool) { + switch token { + case "e", "E": + return LOC_EQUATOR + longitude, true + case "w", "W": + return LOC_EQUATOR - longitude, true + } + return longitude, false +} + +// "Eat" the rest of the "line". Return potential comments +func slurpRemainder(c chan lex, f string) (*ParseError, string) { + l := <-c + com := "" + switch l.value { + case zBlank: + l = <-c + com = l.comment + if l.value != zNewline && l.value != zEOF { + return &ParseError{f, "garbage after rdata", l}, "" + } + case zNewline: + com = l.comment + case zEOF: + default: + return &ParseError{f, "garbage after rdata", l}, "" + } + return nil, com +} + +// Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64" +// Used for NID and L64 record. +func stringToNodeID(l lex) (uint64, *ParseError) { + if len(l.token) < 19 { + return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + } + // There must be three colons at fixes postitions, if not its a parse error + if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' { + return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + } + s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19] + u, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + } + return u, nil +} diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go new file mode 100644 index 00000000000..fb6f95d1da9 --- /dev/null +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -0,0 +1,2203 @@ +package dns + +import ( + "encoding/base64" + "net" + "strconv" + "strings" +) + +type parserFunc struct { + // Func defines the function that parses the tokens and returns the RR + // or an error. The last string contains any comments in the line as + // they returned by the lexer as well. + Func func(h RR_Header, c chan lex, origin string, file string) (RR, *ParseError, string) + // Signals if the RR ending is of variable length, like TXT or records + // that have Hexadecimal or Base64 as their last element in the Rdata. Records + // that have a fixed ending or for instance A, AAAA, SOA and etc. + Variable bool +} + +// Parse the rdata of each rrtype. +// All data from the channel c is either zString or zBlank. +// After the rdata there may come a zBlank and then a zNewline +// or immediately a zNewline. If this is not the case we flag +// an *ParseError: garbage after rdata. +func setRR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + parserfunc, ok := typeToparserFunc[h.Rrtype] + if ok { + r, e, cm := parserfunc.Func(h, c, o, f) + if parserfunc.Variable { + return r, e, cm + } + if e != nil { + return nil, e, "" + } + e, cm = slurpRemainder(c, f) + if e != nil { + return nil, e, "" + } + return r, nil, cm + } + // RFC3957 RR (Unknown RR handling) + return setRFC3597(h, c, o, f) +} + +// A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces) +// or an error +func endingToString(c chan lex, errstr, f string) (string, *ParseError, string) { + s := "" + l := <-c // zString + for l.value != zNewline && l.value != zEOF { + if l.err { + return s, &ParseError{f, errstr, l}, "" + } + switch l.value { + case zString: + s += l.token + case zBlank: // Ok + default: + return "", &ParseError{f, errstr, l}, "" + } + l = <-c + } + return s, nil, l.comment +} + +// A remainder of the rdata with embedded spaces, split on unquoted whitespace +// and return the parsed string slice or an error +func endingToTxtSlice(c chan lex, errstr, f string) ([]string, *ParseError, string) { + // Get the remaining data until we see a zNewline + l := <-c + if l.err { + return nil, &ParseError{f, errstr, l}, "" + } + + // Build the slice + s := make([]string, 0) + quote := false + empty := false + for l.value != zNewline && l.value != zEOF { + if l.err { + return nil, &ParseError{f, errstr, l}, "" + } + switch l.value { + case zString: + empty = false + if len(l.token) > 255 { + // split up tokens that are larger than 255 into 255-chunks + sx := []string{} + p, i := 0, 255 + for { + if i <= len(l.token) { + sx = append(sx, l.token[p:i]) + } else { + sx = append(sx, l.token[p:]) + break + + } + p, i = p+255, i+255 + } + s = append(s, sx...) + break + } + + s = append(s, l.token) + case zBlank: + if quote { + // zBlank can only be seen in between txt parts. + return nil, &ParseError{f, errstr, l}, "" + } + case zQuote: + if empty && quote { + s = append(s, "") + } + quote = !quote + empty = true + default: + return nil, &ParseError{f, errstr, l}, "" + } + l = <-c + } + if quote { + return nil, &ParseError{f, errstr, l}, "" + } + return s, nil, l.comment +} + +func setA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(A) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + rr.A = net.ParseIP(l.token) + if rr.A == nil || l.err { + return nil, &ParseError{f, "bad A A", l}, "" + } + return rr, nil, "" +} + +func setAAAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(AAAA) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + rr.AAAA = net.ParseIP(l.token) + if rr.AAAA == nil || l.err { + return nil, &ParseError{f, "bad AAAA AAAA", l}, "" + } + return rr, nil, "" +} + +func setNS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NS) + rr.Hdr = h + + l := <-c + rr.Ns = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad NS Ns", l}, "" + } + rr.Ns = name + return rr, nil, "" +} + +func setPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(PTR) + rr.Hdr = h + + l := <-c + rr.Ptr = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad PTR Ptr", l}, "" + } + rr.Ptr = name + return rr, nil, "" +} + +func setNSAPPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NSAPPTR) + rr.Hdr = h + + l := <-c + rr.Ptr = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad NSAP-PTR Ptr", l}, "" + } + rr.Ptr = name + return rr, nil, "" +} + +func setRP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RP) + rr.Hdr = h + + l := <-c + rr.Mbox = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + mbox, mboxOk := toAbsoluteName(l.token, o) + if l.err || !mboxOk { + return nil, &ParseError{f, "bad RP Mbox", l}, "" + } + rr.Mbox = mbox + + <-c // zBlank + l = <-c + rr.Txt = l.token + + txt, txtOk := toAbsoluteName(l.token, o) + if l.err || !txtOk { + return nil, &ParseError{f, "bad RP Txt", l}, "" + } + rr.Txt = txt + + return rr, nil, "" +} + +func setMR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MR) + rr.Hdr = h + + l := <-c + rr.Mr = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad MR Mr", l}, "" + } + rr.Mr = name + return rr, nil, "" +} + +func setMB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MB) + rr.Hdr = h + + l := <-c + rr.Mb = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad MB Mb", l}, "" + } + rr.Mb = name + return rr, nil, "" +} + +func setMG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MG) + rr.Hdr = h + + l := <-c + rr.Mg = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad MG Mg", l}, "" + } + rr.Mg = name + return rr, nil, "" +} + +func setHINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(HINFO) + rr.Hdr = h + + chunks, e, c1 := endingToTxtSlice(c, "bad HINFO Fields", f) + if e != nil { + return nil, e, c1 + } + + if ln := len(chunks); ln == 0 { + return rr, nil, "" + } else if ln == 1 { + // Can we split it? + if out := strings.Fields(chunks[0]); len(out) > 1 { + chunks = out + } else { + chunks = append(chunks, "") + } + } + + rr.Cpu = chunks[0] + rr.Os = strings.Join(chunks[1:], " ") + + return rr, nil, "" +} + +func setMINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MINFO) + rr.Hdr = h + + l := <-c + rr.Rmail = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + rmail, rmailOk := toAbsoluteName(l.token, o) + if l.err || !rmailOk { + return nil, &ParseError{f, "bad MINFO Rmail", l}, "" + } + rr.Rmail = rmail + + <-c // zBlank + l = <-c + rr.Email = l.token + + email, emailOk := toAbsoluteName(l.token, o) + if l.err || !emailOk { + return nil, &ParseError{f, "bad MINFO Email", l}, "" + } + rr.Email = email + + return rr, nil, "" +} + +func setMF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MF) + rr.Hdr = h + + l := <-c + rr.Mf = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad MF Mf", l}, "" + } + rr.Mf = name + return rr, nil, "" +} + +func setMD(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MD) + rr.Hdr = h + + l := <-c + rr.Md = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad MD Md", l}, "" + } + rr.Md = name + return rr, nil, "" +} + +func setMX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MX) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad MX Pref", l}, "" + } + rr.Preference = uint16(i) + + <-c // zBlank + l = <-c // zString + rr.Mx = l.token + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad MX Mx", l}, "" + } + rr.Mx = name + + return rr, nil, "" +} + +func setRT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RT) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil { + return nil, &ParseError{f, "bad RT Preference", l}, "" + } + rr.Preference = uint16(i) + + <-c // zBlank + l = <-c // zString + rr.Host = l.token + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad RT Host", l}, "" + } + rr.Host = name + + return rr, nil, "" +} + +func setAFSDB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(AFSDB) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad AFSDB Subtype", l}, "" + } + rr.Subtype = uint16(i) + + <-c // zBlank + l = <-c // zString + rr.Hostname = l.token + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad AFSDB Hostname", l}, "" + } + rr.Hostname = name + return rr, nil, "" +} + +func setX25(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(X25) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + if l.err { + return nil, &ParseError{f, "bad X25 PSDNAddress", l}, "" + } + rr.PSDNAddress = l.token + return rr, nil, "" +} + +func setKX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(KX) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad KX Pref", l}, "" + } + rr.Preference = uint16(i) + + <-c // zBlank + l = <-c // zString + rr.Exchanger = l.token + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad KX Exchanger", l}, "" + } + rr.Exchanger = name + return rr, nil, "" +} + +func setCNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(CNAME) + rr.Hdr = h + + l := <-c + rr.Target = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad CNAME Target", l}, "" + } + rr.Target = name + return rr, nil, "" +} + +func setDNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(DNAME) + rr.Hdr = h + + l := <-c + rr.Target = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad DNAME Target", l}, "" + } + rr.Target = name + return rr, nil, "" +} + +func setSOA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SOA) + rr.Hdr = h + + l := <-c + rr.Ns = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + ns, nsOk := toAbsoluteName(l.token, o) + if l.err || !nsOk { + return nil, &ParseError{f, "bad SOA Ns", l}, "" + } + rr.Ns = ns + + <-c // zBlank + l = <-c + rr.Mbox = l.token + + mbox, mboxOk := toAbsoluteName(l.token, o) + if l.err || !mboxOk { + return nil, &ParseError{f, "bad SOA Mbox", l}, "" + } + rr.Mbox = mbox + + <-c // zBlank + + var ( + v uint32 + ok bool + ) + for i := 0; i < 5; i++ { + l = <-c + if l.err { + return nil, &ParseError{f, "bad SOA zone parameter", l}, "" + } + if j, e := strconv.ParseUint(l.token, 10, 32); e != nil { + if i == 0 { + // Serial must be a number + return nil, &ParseError{f, "bad SOA zone parameter", l}, "" + } + // We allow other fields to be unitful duration strings + if v, ok = stringToTTL(l.token); !ok { + return nil, &ParseError{f, "bad SOA zone parameter", l}, "" + + } + } else { + v = uint32(j) + } + switch i { + case 0: + rr.Serial = v + <-c // zBlank + case 1: + rr.Refresh = v + <-c // zBlank + case 2: + rr.Retry = v + <-c // zBlank + case 3: + rr.Expire = v + <-c // zBlank + case 4: + rr.Minttl = v + } + } + return rr, nil, "" +} + +func setSRV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SRV) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad SRV Priority", l}, "" + } + rr.Priority = uint16(i) + + <-c // zBlank + l = <-c // zString + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad SRV Weight", l}, "" + } + rr.Weight = uint16(i) + + <-c // zBlank + l = <-c // zString + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad SRV Port", l}, "" + } + rr.Port = uint16(i) + + <-c // zBlank + l = <-c // zString + rr.Target = l.token + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad SRV Target", l}, "" + } + rr.Target = name + return rr, nil, "" +} + +func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NAPTR) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad NAPTR Order", l}, "" + } + rr.Order = uint16(i) + + <-c // zBlank + l = <-c // zString + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad NAPTR Preference", l}, "" + } + rr.Preference = uint16(i) + + // Flags + <-c // zBlank + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Flags", l}, "" + } + l = <-c // Either String or Quote + if l.value == zString { + rr.Flags = l.token + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Flags", l}, "" + } + } else if l.value == zQuote { + rr.Flags = "" + } else { + return nil, &ParseError{f, "bad NAPTR Flags", l}, "" + } + + // Service + <-c // zBlank + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Service", l}, "" + } + l = <-c // Either String or Quote + if l.value == zString { + rr.Service = l.token + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Service", l}, "" + } + } else if l.value == zQuote { + rr.Service = "" + } else { + return nil, &ParseError{f, "bad NAPTR Service", l}, "" + } + + // Regexp + <-c // zBlank + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" + } + l = <-c // Either String or Quote + if l.value == zString { + rr.Regexp = l.token + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" + } + } else if l.value == zQuote { + rr.Regexp = "" + } else { + return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" + } + + // After quote no space?? + <-c // zBlank + l = <-c // zString + rr.Replacement = l.token + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad NAPTR Replacement", l}, "" + } + rr.Replacement = name + return rr, nil, "" +} + +func setTALINK(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TALINK) + rr.Hdr = h + + l := <-c + rr.PreviousName = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + previousName, previousNameOk := toAbsoluteName(l.token, o) + if l.err || !previousNameOk { + return nil, &ParseError{f, "bad TALINK PreviousName", l}, "" + } + rr.PreviousName = previousName + + <-c // zBlank + l = <-c + rr.NextName = l.token + + nextName, nextNameOk := toAbsoluteName(l.token, o) + if l.err || !nextNameOk { + return nil, &ParseError{f, "bad TALINK NextName", l}, "" + } + rr.NextName = nextName + + return rr, nil, "" +} + +func setLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(LOC) + rr.Hdr = h + // Non zero defaults for LOC record, see RFC 1876, Section 3. + rr.HorizPre = 165 // 10000 + rr.VertPre = 162 // 10 + rr.Size = 18 // 1 + ok := false + + // North + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + i, e := strconv.ParseUint(l.token, 10, 32) + if e != nil || l.err { + return nil, &ParseError{f, "bad LOC Latitude", l}, "" + } + rr.Latitude = 1000 * 60 * 60 * uint32(i) + + <-c // zBlank + // Either number, 'N' or 'S' + l = <-c + if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { + goto East + } + i, e = strconv.ParseUint(l.token, 10, 32) + if e != nil || l.err { + return nil, &ParseError{f, "bad LOC Latitude minutes", l}, "" + } + rr.Latitude += 1000 * 60 * uint32(i) + + <-c // zBlank + l = <-c + if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { + return nil, &ParseError{f, "bad LOC Latitude seconds", l}, "" + } else { + rr.Latitude += uint32(1000 * i) + } + <-c // zBlank + // Either number, 'N' or 'S' + l = <-c + if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { + goto East + } + // If still alive, flag an error + return nil, &ParseError{f, "bad LOC Latitude North/South", l}, "" + +East: + // East + <-c // zBlank + l = <-c + if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err { + return nil, &ParseError{f, "bad LOC Longitude", l}, "" + } else { + rr.Longitude = 1000 * 60 * 60 * uint32(i) + } + <-c // zBlank + // Either number, 'E' or 'W' + l = <-c + if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { + goto Altitude + } + if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err { + return nil, &ParseError{f, "bad LOC Longitude minutes", l}, "" + } else { + rr.Longitude += 1000 * 60 * uint32(i) + } + <-c // zBlank + l = <-c + if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { + return nil, &ParseError{f, "bad LOC Longitude seconds", l}, "" + } else { + rr.Longitude += uint32(1000 * i) + } + <-c // zBlank + // Either number, 'E' or 'W' + l = <-c + if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { + goto Altitude + } + // If still alive, flag an error + return nil, &ParseError{f, "bad LOC Longitude East/West", l}, "" + +Altitude: + <-c // zBlank + l = <-c + if l.length == 0 || l.err { + return nil, &ParseError{f, "bad LOC Altitude", l}, "" + } + if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' { + l.token = l.token[0 : len(l.token)-1] + } + if i, e := strconv.ParseFloat(l.token, 32); e != nil { + return nil, &ParseError{f, "bad LOC Altitude", l}, "" + } else { + rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5) + } + + // And now optionally the other values + l = <-c + count := 0 + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zString: + switch count { + case 0: // Size + e, m, ok := stringToCm(l.token) + if !ok { + return nil, &ParseError{f, "bad LOC Size", l}, "" + } + rr.Size = (e & 0x0f) | (m << 4 & 0xf0) + case 1: // HorizPre + e, m, ok := stringToCm(l.token) + if !ok { + return nil, &ParseError{f, "bad LOC HorizPre", l}, "" + } + rr.HorizPre = (e & 0x0f) | (m << 4 & 0xf0) + case 2: // VertPre + e, m, ok := stringToCm(l.token) + if !ok { + return nil, &ParseError{f, "bad LOC VertPre", l}, "" + } + rr.VertPre = (e & 0x0f) | (m << 4 & 0xf0) + } + count++ + case zBlank: + // Ok + default: + return nil, &ParseError{f, "bad LOC Size, HorizPre or VertPre", l}, "" + } + l = <-c + } + return rr, nil, "" +} + +func setHIP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(HIP) + rr.Hdr = h + + // HitLength is not represented + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad HIP PublicKeyAlgorithm", l}, "" + } + rr.PublicKeyAlgorithm = uint8(i) + + <-c // zBlank + l = <-c // zString + if l.length == 0 || l.err { + return nil, &ParseError{f, "bad HIP Hit", l}, "" + } + rr.Hit = l.token // This can not contain spaces, see RFC 5205 Section 6. + rr.HitLength = uint8(len(rr.Hit)) / 2 + + <-c // zBlank + l = <-c // zString + if l.length == 0 || l.err { + return nil, &ParseError{f, "bad HIP PublicKey", l}, "" + } + rr.PublicKey = l.token // This cannot contain spaces + rr.PublicKeyLength = uint16(base64.StdEncoding.DecodedLen(len(rr.PublicKey))) + + // RendezvousServers (if any) + l = <-c + var xs []string + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zString: + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad HIP RendezvousServers", l}, "" + } + xs = append(xs, name) + case zBlank: + // Ok + default: + return nil, &ParseError{f, "bad HIP RendezvousServers", l}, "" + } + l = <-c + } + rr.RendezvousServers = xs + return rr, nil, l.comment +} + +func setCERT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(CERT) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + + if v, ok := StringToCertType[l.token]; ok { + rr.Type = v + } else if i, e := strconv.ParseUint(l.token, 10, 16); e != nil { + return nil, &ParseError{f, "bad CERT Type", l}, "" + } else { + rr.Type = uint16(i) + } + <-c // zBlank + l = <-c // zString + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad CERT KeyTag", l}, "" + } + rr.KeyTag = uint16(i) + <-c // zBlank + l = <-c // zString + if v, ok := StringToAlgorithm[l.token]; ok { + rr.Algorithm = v + } else if i, e := strconv.ParseUint(l.token, 10, 8); e != nil { + return nil, &ParseError{f, "bad CERT Algorithm", l}, "" + } else { + rr.Algorithm = uint8(i) + } + s, e1, c1 := endingToString(c, "bad CERT Certificate", f) + if e1 != nil { + return nil, e1, c1 + } + rr.Certificate = s + return rr, nil, c1 +} + +func setOPENPGPKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(OPENPGPKEY) + rr.Hdr = h + + s, e, c1 := endingToString(c, "bad OPENPGPKEY PublicKey", f) + if e != nil { + return nil, e, c1 + } + rr.PublicKey = s + return rr, nil, c1 +} + +func setCSYNC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(CSYNC) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + j, e := strconv.ParseUint(l.token, 10, 32) + if e != nil { + // Serial must be a number + return nil, &ParseError{f, "bad CSYNC serial", l}, "" + } + rr.Serial = uint32(j) + + <-c // zBlank + + l = <-c + j, e = strconv.ParseUint(l.token, 10, 16) + if e != nil { + // Serial must be a number + return nil, &ParseError{f, "bad CSYNC flags", l}, "" + } + rr.Flags = uint16(j) + + rr.TypeBitMap = make([]uint16, 0) + var ( + k uint16 + ok bool + ) + l = <-c + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zBlank: + // Ok + case zString: + if k, ok = StringToType[l.tokenUpper]; !ok { + if k, ok = typeToInt(l.tokenUpper); !ok { + return nil, &ParseError{f, "bad CSYNC TypeBitMap", l}, "" + } + } + rr.TypeBitMap = append(rr.TypeBitMap, k) + default: + return nil, &ParseError{f, "bad CSYNC TypeBitMap", l}, "" + } + l = <-c + } + return rr, nil, l.comment +} + +func setSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setRRSIG(h, c, o, f) + if r != nil { + return &SIG{*r.(*RRSIG)}, e, s + } + return nil, e, s +} + +func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RRSIG) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + + if t, ok := StringToType[l.tokenUpper]; !ok { + if strings.HasPrefix(l.tokenUpper, "TYPE") { + t, ok = typeToInt(l.tokenUpper) + if !ok { + return nil, &ParseError{f, "bad RRSIG Typecovered", l}, "" + } + rr.TypeCovered = t + } else { + return nil, &ParseError{f, "bad RRSIG Typecovered", l}, "" + } + } else { + rr.TypeCovered = t + } + + <-c // zBlank + l = <-c + i, err := strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return nil, &ParseError{f, "bad RRSIG Algorithm", l}, "" + } + rr.Algorithm = uint8(i) + + <-c // zBlank + l = <-c + i, err = strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return nil, &ParseError{f, "bad RRSIG Labels", l}, "" + } + rr.Labels = uint8(i) + + <-c // zBlank + l = <-c + i, err = strconv.ParseUint(l.token, 10, 32) + if err != nil || l.err { + return nil, &ParseError{f, "bad RRSIG OrigTtl", l}, "" + } + rr.OrigTtl = uint32(i) + + <-c // zBlank + l = <-c + if i, err := StringToTime(l.token); err != nil { + // Try to see if all numeric and use it as epoch + if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { + // TODO(miek): error out on > MAX_UINT32, same below + rr.Expiration = uint32(i) + } else { + return nil, &ParseError{f, "bad RRSIG Expiration", l}, "" + } + } else { + rr.Expiration = i + } + + <-c // zBlank + l = <-c + if i, err := StringToTime(l.token); err != nil { + if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { + rr.Inception = uint32(i) + } else { + return nil, &ParseError{f, "bad RRSIG Inception", l}, "" + } + } else { + rr.Inception = i + } + + <-c // zBlank + l = <-c + i, err = strconv.ParseUint(l.token, 10, 16) + if err != nil || l.err { + return nil, &ParseError{f, "bad RRSIG KeyTag", l}, "" + } + rr.KeyTag = uint16(i) + + <-c // zBlank + l = <-c + rr.SignerName = l.token + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad RRSIG SignerName", l}, "" + } + rr.SignerName = name + + s, e, c1 := endingToString(c, "bad RRSIG Signature", f) + if e != nil { + return nil, e, c1 + } + rr.Signature = s + + return rr, nil, c1 +} + +func setNSEC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NSEC) + rr.Hdr = h + + l := <-c + rr.NextDomain = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad NSEC NextDomain", l}, "" + } + rr.NextDomain = name + + rr.TypeBitMap = make([]uint16, 0) + var ( + k uint16 + ok bool + ) + l = <-c + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zBlank: + // Ok + case zString: + if k, ok = StringToType[l.tokenUpper]; !ok { + if k, ok = typeToInt(l.tokenUpper); !ok { + return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, "" + } + } + rr.TypeBitMap = append(rr.TypeBitMap, k) + default: + return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, "" + } + l = <-c + } + return rr, nil, l.comment +} + +func setNSEC3(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NSEC3) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3 Hash", l}, "" + } + rr.Hash = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3 Flags", l}, "" + } + rr.Flags = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3 Iterations", l}, "" + } + rr.Iterations = uint16(i) + <-c + l = <-c + if len(l.token) == 0 || l.err { + return nil, &ParseError{f, "bad NSEC3 Salt", l}, "" + } + if l.token != "-" { + rr.SaltLength = uint8(len(l.token)) / 2 + rr.Salt = l.token + } + + <-c + l = <-c + if len(l.token) == 0 || l.err { + return nil, &ParseError{f, "bad NSEC3 NextDomain", l}, "" + } + rr.HashLength = 20 // Fix for NSEC3 (sha1 160 bits) + rr.NextDomain = l.token + + rr.TypeBitMap = make([]uint16, 0) + var ( + k uint16 + ok bool + ) + l = <-c + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zBlank: + // Ok + case zString: + if k, ok = StringToType[l.tokenUpper]; !ok { + if k, ok = typeToInt(l.tokenUpper); !ok { + return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, "" + } + } + rr.TypeBitMap = append(rr.TypeBitMap, k) + default: + return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, "" + } + l = <-c + } + return rr, nil, l.comment +} + +func setNSEC3PARAM(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NSEC3PARAM) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3PARAM Hash", l}, "" + } + rr.Hash = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3PARAM Flags", l}, "" + } + rr.Flags = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3PARAM Iterations", l}, "" + } + rr.Iterations = uint16(i) + <-c + l = <-c + if l.token != "-" { + rr.SaltLength = uint8(len(l.token)) + rr.Salt = l.token + } + return rr, nil, "" +} + +func setEUI48(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(EUI48) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + if l.length != 17 || l.err { + return nil, &ParseError{f, "bad EUI48 Address", l}, "" + } + addr := make([]byte, 12) + dash := 0 + for i := 0; i < 10; i += 2 { + addr[i] = l.token[i+dash] + addr[i+1] = l.token[i+1+dash] + dash++ + if l.token[i+1+dash] != '-' { + return nil, &ParseError{f, "bad EUI48 Address", l}, "" + } + } + addr[10] = l.token[15] + addr[11] = l.token[16] + + i, e := strconv.ParseUint(string(addr), 16, 48) + if e != nil { + return nil, &ParseError{f, "bad EUI48 Address", l}, "" + } + rr.Address = i + return rr, nil, "" +} + +func setEUI64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(EUI64) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + if l.length != 23 || l.err { + return nil, &ParseError{f, "bad EUI64 Address", l}, "" + } + addr := make([]byte, 16) + dash := 0 + for i := 0; i < 14; i += 2 { + addr[i] = l.token[i+dash] + addr[i+1] = l.token[i+1+dash] + dash++ + if l.token[i+1+dash] != '-' { + return nil, &ParseError{f, "bad EUI64 Address", l}, "" + } + } + addr[14] = l.token[21] + addr[15] = l.token[22] + + i, e := strconv.ParseUint(string(addr), 16, 64) + if e != nil { + return nil, &ParseError{f, "bad EUI68 Address", l}, "" + } + rr.Address = uint64(i) + return rr, nil, "" +} + +func setSSHFP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SSHFP) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad SSHFP Algorithm", l}, "" + } + rr.Algorithm = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad SSHFP Type", l}, "" + } + rr.Type = uint8(i) + <-c // zBlank + s, e1, c1 := endingToString(c, "bad SSHFP Fingerprint", f) + if e1 != nil { + return nil, e1, c1 + } + rr.FingerPrint = s + return rr, nil, "" +} + +func setDNSKEYs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) { + rr := new(DNSKEY) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " Flags", l}, "" + } + rr.Flags = uint16(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " Protocol", l}, "" + } + rr.Protocol = uint8(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" + } + rr.Algorithm = uint8(i) + s, e1, c1 := endingToString(c, "bad "+typ+" PublicKey", f) + if e1 != nil { + return nil, e1, c1 + } + rr.PublicKey = s + return rr, nil, c1 +} + +func setKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDNSKEYs(h, c, o, f, "KEY") + if r != nil { + return &KEY{*r.(*DNSKEY)}, e, s + } + return nil, e, s +} + +func setDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDNSKEYs(h, c, o, f, "DNSKEY") + return r, e, s +} + +func setCDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDNSKEYs(h, c, o, f, "CDNSKEY") + if r != nil { + return &CDNSKEY{*r.(*DNSKEY)}, e, s + } + return nil, e, s +} + +func setRKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RKEY) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad RKEY Flags", l}, "" + } + rr.Flags = uint16(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad RKEY Protocol", l}, "" + } + rr.Protocol = uint8(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad RKEY Algorithm", l}, "" + } + rr.Algorithm = uint8(i) + s, e1, c1 := endingToString(c, "bad RKEY PublicKey", f) + if e1 != nil { + return nil, e1, c1 + } + rr.PublicKey = s + return rr, nil, c1 +} + +func setEID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(EID) + rr.Hdr = h + s, e, c1 := endingToString(c, "bad EID Endpoint", f) + if e != nil { + return nil, e, c1 + } + rr.Endpoint = s + return rr, nil, c1 +} + +func setNIMLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NIMLOC) + rr.Hdr = h + s, e, c1 := endingToString(c, "bad NIMLOC Locator", f) + if e != nil { + return nil, e, c1 + } + rr.Locator = s + return rr, nil, c1 +} + +func setGPOS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(GPOS) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + _, e := strconv.ParseFloat(l.token, 64) + if e != nil || l.err { + return nil, &ParseError{f, "bad GPOS Longitude", l}, "" + } + rr.Longitude = l.token + <-c // zBlank + l = <-c + _, e = strconv.ParseFloat(l.token, 64) + if e != nil || l.err { + return nil, &ParseError{f, "bad GPOS Latitude", l}, "" + } + rr.Latitude = l.token + <-c // zBlank + l = <-c + _, e = strconv.ParseFloat(l.token, 64) + if e != nil || l.err { + return nil, &ParseError{f, "bad GPOS Altitude", l}, "" + } + rr.Altitude = l.token + return rr, nil, "" +} + +func setDSs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) { + rr := new(DS) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " KeyTag", l}, "" + } + rr.KeyTag = uint16(i) + <-c // zBlank + l = <-c + if i, e = strconv.ParseUint(l.token, 10, 8); e != nil { + i, ok := StringToAlgorithm[l.tokenUpper] + if !ok || l.err { + return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" + } + rr.Algorithm = i + } else { + rr.Algorithm = uint8(i) + } + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " DigestType", l}, "" + } + rr.DigestType = uint8(i) + s, e1, c1 := endingToString(c, "bad "+typ+" Digest", f) + if e1 != nil { + return nil, e1, c1 + } + rr.Digest = s + return rr, nil, c1 +} + +func setDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDSs(h, c, o, f, "DS") + return r, e, s +} + +func setDLV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDSs(h, c, o, f, "DLV") + if r != nil { + return &DLV{*r.(*DS)}, e, s + } + return nil, e, s +} + +func setCDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDSs(h, c, o, f, "CDS") + if r != nil { + return &CDS{*r.(*DS)}, e, s + } + return nil, e, s +} + +func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TA) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad TA KeyTag", l}, "" + } + rr.KeyTag = uint16(i) + <-c // zBlank + l = <-c + if i, e := strconv.ParseUint(l.token, 10, 8); e != nil { + i, ok := StringToAlgorithm[l.tokenUpper] + if !ok || l.err { + return nil, &ParseError{f, "bad TA Algorithm", l}, "" + } + rr.Algorithm = i + } else { + rr.Algorithm = uint8(i) + } + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad TA DigestType", l}, "" + } + rr.DigestType = uint8(i) + s, e, c1 := endingToString(c, "bad TA Digest", f) + if e != nil { + return nil, e.(*ParseError), c1 + } + rr.Digest = s + return rr, nil, c1 +} + +func setTLSA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TLSA) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad TLSA Usage", l}, "" + } + rr.Usage = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad TLSA Selector", l}, "" + } + rr.Selector = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad TLSA MatchingType", l}, "" + } + rr.MatchingType = uint8(i) + // So this needs be e2 (i.e. different than e), because...??t + s, e2, c1 := endingToString(c, "bad TLSA Certificate", f) + if e2 != nil { + return nil, e2, c1 + } + rr.Certificate = s + return rr, nil, c1 +} + +func setSMIMEA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SMIMEA) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad SMIMEA Usage", l}, "" + } + rr.Usage = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad SMIMEA Selector", l}, "" + } + rr.Selector = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad SMIMEA MatchingType", l}, "" + } + rr.MatchingType = uint8(i) + // So this needs be e2 (i.e. different than e), because...??t + s, e2, c1 := endingToString(c, "bad SMIMEA Certificate", f) + if e2 != nil { + return nil, e2, c1 + } + rr.Certificate = s + return rr, nil, c1 +} + +func setRFC3597(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RFC3597) + rr.Hdr = h + + l := <-c + if l.token != "\\#" { + return nil, &ParseError{f, "bad RFC3597 Rdata", l}, "" + } + + <-c // zBlank + l = <-c + rdlength, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad RFC3597 Rdata ", l}, "" + } + + s, e1, c1 := endingToString(c, "bad RFC3597 Rdata", f) + if e1 != nil { + return nil, e1, c1 + } + if rdlength*2 != len(s) { + return nil, &ParseError{f, "bad RFC3597 Rdata", l}, "" + } + rr.Rdata = s + return rr, nil, c1 +} + +func setSPF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SPF) + rr.Hdr = h + + s, e, c1 := endingToTxtSlice(c, "bad SPF Txt", f) + if e != nil { + return nil, e, "" + } + rr.Txt = s + return rr, nil, c1 +} + +func setAVC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(AVC) + rr.Hdr = h + + s, e, c1 := endingToTxtSlice(c, "bad AVC Txt", f) + if e != nil { + return nil, e, "" + } + rr.Txt = s + return rr, nil, c1 +} + +func setTXT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TXT) + rr.Hdr = h + + // no zBlank reading here, because all this rdata is TXT + s, e, c1 := endingToTxtSlice(c, "bad TXT Txt", f) + if e != nil { + return nil, e, "" + } + rr.Txt = s + return rr, nil, c1 +} + +// identical to setTXT +func setNINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NINFO) + rr.Hdr = h + + s, e, c1 := endingToTxtSlice(c, "bad NINFO ZSData", f) + if e != nil { + return nil, e, "" + } + rr.ZSData = s + return rr, nil, c1 +} + +func setURI(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(URI) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad URI Priority", l}, "" + } + rr.Priority = uint16(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad URI Weight", l}, "" + } + rr.Weight = uint16(i) + + <-c // zBlank + s, err, c1 := endingToTxtSlice(c, "bad URI Target", f) + if err != nil { + return nil, err, "" + } + if len(s) != 1 { + return nil, &ParseError{f, "bad URI Target", l}, "" + } + rr.Target = s[0] + return rr, nil, c1 +} + +func setDHCID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + // awesome record to parse! + rr := new(DHCID) + rr.Hdr = h + + s, e, c1 := endingToString(c, "bad DHCID Digest", f) + if e != nil { + return nil, e, c1 + } + rr.Digest = s + return rr, nil, c1 +} + +func setNID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NID) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad NID Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + u, err := stringToNodeID(l) + if err != nil || l.err { + return nil, err, "" + } + rr.NodeID = u + return rr, nil, "" +} + +func setL32(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(L32) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad L32 Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Locator32 = net.ParseIP(l.token) + if rr.Locator32 == nil || l.err { + return nil, &ParseError{f, "bad L32 Locator", l}, "" + } + return rr, nil, "" +} + +func setLP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(LP) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad LP Preference", l}, "" + } + rr.Preference = uint16(i) + + <-c // zBlank + l = <-c // zString + rr.Fqdn = l.token + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad LP Fqdn", l}, "" + } + rr.Fqdn = name + + return rr, nil, "" +} + +func setL64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(L64) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad L64 Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + u, err := stringToNodeID(l) + if err != nil || l.err { + return nil, err, "" + } + rr.Locator64 = u + return rr, nil, "" +} + +func setUID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(UID) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 32) + if e != nil || l.err { + return nil, &ParseError{f, "bad UID Uid", l}, "" + } + rr.Uid = uint32(i) + return rr, nil, "" +} + +func setGID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(GID) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 32) + if e != nil || l.err { + return nil, &ParseError{f, "bad GID Gid", l}, "" + } + rr.Gid = uint32(i) + return rr, nil, "" +} + +func setUINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(UINFO) + rr.Hdr = h + + s, e, c1 := endingToTxtSlice(c, "bad UINFO Uinfo", f) + if e != nil { + return nil, e, c1 + } + if ln := len(s); ln == 0 { + return rr, nil, c1 + } + rr.Uinfo = s[0] // silently discard anything after the first character-string + return rr, nil, c1 +} + +func setPX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(PX) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad PX Preference", l}, "" + } + rr.Preference = uint16(i) + + <-c // zBlank + l = <-c // zString + rr.Map822 = l.token + map822, map822Ok := toAbsoluteName(l.token, o) + if l.err || !map822Ok { + return nil, &ParseError{f, "bad PX Map822", l}, "" + } + rr.Map822 = map822 + + <-c // zBlank + l = <-c // zString + rr.Mapx400 = l.token + mapx400, mapx400Ok := toAbsoluteName(l.token, o) + if l.err || !mapx400Ok { + return nil, &ParseError{f, "bad PX Mapx400", l}, "" + } + rr.Mapx400 = mapx400 + + return rr, nil, "" +} + +func setCAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(CAA) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + + i, err := strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return nil, &ParseError{f, "bad CAA Flag", l}, "" + } + rr.Flag = uint8(i) + + <-c // zBlank + l = <-c // zString + if l.value != zString { + return nil, &ParseError{f, "bad CAA Tag", l}, "" + } + rr.Tag = l.token + + <-c // zBlank + s, e, c1 := endingToTxtSlice(c, "bad CAA Value", f) + if e != nil { + return nil, e, "" + } + if len(s) != 1 { + return nil, &ParseError{f, "bad CAA Value", l}, "" + } + rr.Value = s[0] + return rr, nil, c1 +} + +func setTKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TKEY) + rr.Hdr = h + + l := <-c + + // Algorithm + if l.value != zString { + return nil, &ParseError{f, "bad TKEY algorithm", l}, "" + } + rr.Algorithm = l.token + <-c // zBlank + + // Get the key length and key values + l = <-c + i, err := strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return nil, &ParseError{f, "bad TKEY key length", l}, "" + } + rr.KeySize = uint16(i) + <-c // zBlank + l = <-c + if l.value != zString { + return nil, &ParseError{f, "bad TKEY key", l}, "" + } + rr.Key = l.token + <-c // zBlank + + // Get the otherdata length and string data + l = <-c + i, err = strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return nil, &ParseError{f, "bad TKEY otherdata length", l}, "" + } + rr.OtherLen = uint16(i) + <-c // zBlank + l = <-c + if l.value != zString { + return nil, &ParseError{f, "bad TKEY otherday", l}, "" + } + rr.OtherData = l.token + + return rr, nil, "" +} + +var typeToparserFunc = map[uint16]parserFunc{ + TypeAAAA: {setAAAA, false}, + TypeAFSDB: {setAFSDB, false}, + TypeA: {setA, false}, + TypeCAA: {setCAA, true}, + TypeCDS: {setCDS, true}, + TypeCDNSKEY: {setCDNSKEY, true}, + TypeCERT: {setCERT, true}, + TypeCNAME: {setCNAME, false}, + TypeCSYNC: {setCSYNC, true}, + TypeDHCID: {setDHCID, true}, + TypeDLV: {setDLV, true}, + TypeDNAME: {setDNAME, false}, + TypeKEY: {setKEY, true}, + TypeDNSKEY: {setDNSKEY, true}, + TypeDS: {setDS, true}, + TypeEID: {setEID, true}, + TypeEUI48: {setEUI48, false}, + TypeEUI64: {setEUI64, false}, + TypeGID: {setGID, false}, + TypeGPOS: {setGPOS, false}, + TypeHINFO: {setHINFO, true}, + TypeHIP: {setHIP, true}, + TypeKX: {setKX, false}, + TypeL32: {setL32, false}, + TypeL64: {setL64, false}, + TypeLOC: {setLOC, true}, + TypeLP: {setLP, false}, + TypeMB: {setMB, false}, + TypeMD: {setMD, false}, + TypeMF: {setMF, false}, + TypeMG: {setMG, false}, + TypeMINFO: {setMINFO, false}, + TypeMR: {setMR, false}, + TypeMX: {setMX, false}, + TypeNAPTR: {setNAPTR, false}, + TypeNID: {setNID, false}, + TypeNIMLOC: {setNIMLOC, true}, + TypeNINFO: {setNINFO, true}, + TypeNSAPPTR: {setNSAPPTR, false}, + TypeNSEC3PARAM: {setNSEC3PARAM, false}, + TypeNSEC3: {setNSEC3, true}, + TypeNSEC: {setNSEC, true}, + TypeNS: {setNS, false}, + TypeOPENPGPKEY: {setOPENPGPKEY, true}, + TypePTR: {setPTR, false}, + TypePX: {setPX, false}, + TypeSIG: {setSIG, true}, + TypeRKEY: {setRKEY, true}, + TypeRP: {setRP, false}, + TypeRRSIG: {setRRSIG, true}, + TypeRT: {setRT, false}, + TypeSMIMEA: {setSMIMEA, true}, + TypeSOA: {setSOA, false}, + TypeSPF: {setSPF, true}, + TypeAVC: {setAVC, true}, + TypeSRV: {setSRV, false}, + TypeSSHFP: {setSSHFP, true}, + TypeTALINK: {setTALINK, false}, + TypeTA: {setTA, true}, + TypeTLSA: {setTLSA, true}, + TypeTXT: {setTXT, true}, + TypeUID: {setUID, false}, + TypeUINFO: {setUINFO, true}, + TypeURI: {setURI, true}, + TypeX25: {setX25, false}, + TypeTKEY: {setTKEY, true}, +} diff --git a/vendor/github.com/miekg/dns/scanner.go b/vendor/github.com/miekg/dns/scanner.go new file mode 100644 index 00000000000..424e5af9f59 --- /dev/null +++ b/vendor/github.com/miekg/dns/scanner.go @@ -0,0 +1,56 @@ +package dns + +// Implement a simple scanner, return a byte stream from an io reader. + +import ( + "bufio" + "context" + "io" + "text/scanner" +) + +type scan struct { + src *bufio.Reader + position scanner.Position + eof bool // Have we just seen a eof + ctx context.Context +} + +func scanInit(r io.Reader) (*scan, context.CancelFunc) { + s := new(scan) + s.src = bufio.NewReader(r) + s.position.Line = 1 + + ctx, cancel := context.WithCancel(context.Background()) + s.ctx = ctx + + return s, cancel +} + +// tokenText returns the next byte from the input +func (s *scan) tokenText() (byte, error) { + c, err := s.src.ReadByte() + if err != nil { + return c, err + } + select { + case <-s.ctx.Done(): + return c, context.Canceled + default: + break + } + + // delay the newline handling until the next token is delivered, + // fixes off-by-one errors when reporting a parse error. + if s.eof == true { + s.position.Line++ + s.position.Column = 0 + s.eof = false + } + if c == '\n' { + s.eof = true + return c, nil + } + s.position.Column++ + return c, nil +} diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go new file mode 100644 index 00000000000..2d98f148883 --- /dev/null +++ b/vendor/github.com/miekg/dns/server.go @@ -0,0 +1,782 @@ +// DNS server implementation. + +package dns + +import ( + "bytes" + "crypto/tls" + "encoding/binary" + "io" + "net" + "sync" + "sync/atomic" + "time" +) + +// Default maximum number of TCP queries before we close the socket. +const maxTCPQueries = 128 + +// Interval for stop worker if no load +const idleWorkerTimeout = 10 * time.Second + +// Maximum number of workers +const maxWorkersCount = 10000 + +// Handler is implemented by any value that implements ServeDNS. +type Handler interface { + ServeDNS(w ResponseWriter, r *Msg) +} + +// A ResponseWriter interface is used by an DNS handler to +// construct an DNS response. +type ResponseWriter interface { + // LocalAddr returns the net.Addr of the server + LocalAddr() net.Addr + // RemoteAddr returns the net.Addr of the client that sent the current request. + RemoteAddr() net.Addr + // WriteMsg writes a reply back to the client. + WriteMsg(*Msg) error + // Write writes a raw buffer back to the client. + Write([]byte) (int, error) + // Close closes the connection. + Close() error + // TsigStatus returns the status of the Tsig. + TsigStatus() error + // TsigTimersOnly sets the tsig timers only boolean. + TsigTimersOnly(bool) + // Hijack lets the caller take over the connection. + // After a call to Hijack(), the DNS package will not do anything with the connection. + Hijack() +} + +type response struct { + msg []byte + hijacked bool // connection has been hijacked by handler + tsigStatus error + tsigTimersOnly bool + tsigRequestMAC string + tsigSecret map[string]string // the tsig secrets + udp *net.UDPConn // i/o connection if UDP was used + tcp net.Conn // i/o connection if TCP was used + udpSession *SessionUDP // oob data to get egress interface right + writer Writer // writer to output the raw DNS bits +} + +// ServeMux is an DNS request multiplexer. It matches the +// zone name of each incoming request against a list of +// registered patterns add calls the handler for the pattern +// that most closely matches the zone name. ServeMux is DNSSEC aware, meaning +// that queries for the DS record are redirected to the parent zone (if that +// is also registered), otherwise the child gets the query. +// ServeMux is also safe for concurrent access from multiple goroutines. +type ServeMux struct { + z map[string]Handler + m *sync.RWMutex +} + +// NewServeMux allocates and returns a new ServeMux. +func NewServeMux() *ServeMux { return &ServeMux{z: make(map[string]Handler), m: new(sync.RWMutex)} } + +// DefaultServeMux is the default ServeMux used by Serve. +var DefaultServeMux = NewServeMux() + +// The HandlerFunc type is an adapter to allow the use of +// ordinary functions as DNS handlers. If f is a function +// with the appropriate signature, HandlerFunc(f) is a +// Handler object that calls f. +type HandlerFunc func(ResponseWriter, *Msg) + +// ServeDNS calls f(w, r). +func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) { + f(w, r) +} + +// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets. +func HandleFailed(w ResponseWriter, r *Msg) { + m := new(Msg) + m.SetRcode(r, RcodeServerFailure) + // does not matter if this write fails + w.WriteMsg(m) +} + +func failedHandler() Handler { return HandlerFunc(HandleFailed) } + +// ListenAndServe Starts a server on address and network specified Invoke handler +// for incoming queries. +func ListenAndServe(addr string, network string, handler Handler) error { + server := &Server{Addr: addr, Net: network, Handler: handler} + return server.ListenAndServe() +} + +// ListenAndServeTLS acts like http.ListenAndServeTLS, more information in +// http://golang.org/pkg/net/http/#ListenAndServeTLS +func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return err + } + + config := tls.Config{ + Certificates: []tls.Certificate{cert}, + } + + server := &Server{ + Addr: addr, + Net: "tcp-tls", + TLSConfig: &config, + Handler: handler, + } + + return server.ListenAndServe() +} + +// ActivateAndServe activates a server with a listener from systemd, +// l and p should not both be non-nil. +// If both l and p are not nil only p will be used. +// Invoke handler for incoming queries. +func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error { + server := &Server{Listener: l, PacketConn: p, Handler: handler} + return server.ActivateAndServe() +} + +func (mux *ServeMux) match(q string, t uint16) Handler { + mux.m.RLock() + defer mux.m.RUnlock() + var handler Handler + b := make([]byte, len(q)) // worst case, one label of length q + off := 0 + end := false + for { + l := len(q[off:]) + for i := 0; i < l; i++ { + b[i] = q[off+i] + if b[i] >= 'A' && b[i] <= 'Z' { + b[i] |= ('a' - 'A') + } + } + if h, ok := mux.z[string(b[:l])]; ok { // causes garbage, might want to change the map key + if t != TypeDS { + return h + } + // Continue for DS to see if we have a parent too, if so delegeate to the parent + handler = h + } + off, end = NextLabel(q, off) + if end { + break + } + } + // Wildcard match, if we have found nothing try the root zone as a last resort. + if h, ok := mux.z["."]; ok { + return h + } + return handler +} + +// Handle adds a handler to the ServeMux for pattern. +func (mux *ServeMux) Handle(pattern string, handler Handler) { + if pattern == "" { + panic("dns: invalid pattern " + pattern) + } + mux.m.Lock() + mux.z[Fqdn(pattern)] = handler + mux.m.Unlock() +} + +// HandleFunc adds a handler function to the ServeMux for pattern. +func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { + mux.Handle(pattern, HandlerFunc(handler)) +} + +// HandleRemove deregistrars the handler specific for pattern from the ServeMux. +func (mux *ServeMux) HandleRemove(pattern string) { + if pattern == "" { + panic("dns: invalid pattern " + pattern) + } + mux.m.Lock() + delete(mux.z, Fqdn(pattern)) + mux.m.Unlock() +} + +// ServeDNS dispatches the request to the handler whose +// pattern most closely matches the request message. If DefaultServeMux +// is used the correct thing for DS queries is done: a possible parent +// is sought. +// If no handler is found a standard SERVFAIL message is returned +// If the request message does not have exactly one question in the +// question section a SERVFAIL is returned, unlesss Unsafe is true. +func (mux *ServeMux) ServeDNS(w ResponseWriter, request *Msg) { + var h Handler + if len(request.Question) < 1 { // allow more than one question + h = failedHandler() + } else { + if h = mux.match(request.Question[0].Name, request.Question[0].Qtype); h == nil { + h = failedHandler() + } + } + h.ServeDNS(w, request) +} + +// Handle registers the handler with the given pattern +// in the DefaultServeMux. The documentation for +// ServeMux explains how patterns are matched. +func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } + +// HandleRemove deregisters the handle with the given pattern +// in the DefaultServeMux. +func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) } + +// HandleFunc registers the handler function with the given pattern +// in the DefaultServeMux. +func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { + DefaultServeMux.HandleFunc(pattern, handler) +} + +// Writer writes raw DNS messages; each call to Write should send an entire message. +type Writer interface { + io.Writer +} + +// Reader reads raw DNS messages; each call to ReadTCP or ReadUDP should return an entire message. +type Reader interface { + // ReadTCP reads a raw message from a TCP connection. Implementations may alter + // connection properties, for example the read-deadline. + ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) + // ReadUDP reads a raw message from a UDP connection. Implementations may alter + // connection properties, for example the read-deadline. + ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) +} + +// defaultReader is an adapter for the Server struct that implements the Reader interface +// using the readTCP and readUDP func of the embedded Server. +type defaultReader struct { + *Server +} + +func (dr *defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { + return dr.readTCP(conn, timeout) +} + +func (dr *defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { + return dr.readUDP(conn, timeout) +} + +// DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader. +// Implementations should never return a nil Reader. +type DecorateReader func(Reader) Reader + +// DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer. +// Implementations should never return a nil Writer. +type DecorateWriter func(Writer) Writer + +// A Server defines parameters for running an DNS server. +type Server struct { + // Address to listen on, ":dns" if empty. + Addr string + // if "tcp" or "tcp-tls" (DNS over TLS) it will invoke a TCP listener, otherwise an UDP one + Net string + // TCP Listener to use, this is to aid in systemd's socket activation. + Listener net.Listener + // TLS connection configuration + TLSConfig *tls.Config + // UDP "Listener" to use, this is to aid in systemd's socket activation. + PacketConn net.PacketConn + // Handler to invoke, dns.DefaultServeMux if nil. + Handler Handler + // Default buffer size to use to read incoming UDP messages. If not set + // it defaults to MinMsgSize (512 B). + UDPSize int + // The net.Conn.SetReadTimeout value for new connections, defaults to 2 * time.Second. + ReadTimeout time.Duration + // The net.Conn.SetWriteTimeout value for new connections, defaults to 2 * time.Second. + WriteTimeout time.Duration + // TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966). + IdleTimeout func() time.Duration + // Secret(s) for Tsig map[]. The zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2). + TsigSecret map[string]string + // Unsafe instructs the server to disregard any sanity checks and directly hand the message to + // the handler. It will specifically not check if the query has the QR bit not set. + Unsafe bool + // If NotifyStartedFunc is set it is called once the server has started listening. + NotifyStartedFunc func() + // DecorateReader is optional, allows customization of the process that reads raw DNS messages. + DecorateReader DecorateReader + // DecorateWriter is optional, allows customization of the process that writes raw DNS messages. + DecorateWriter DecorateWriter + // Maximum number of TCP queries before we close the socket. Default is maxTCPQueries (unlimited if -1). + MaxTCPQueries int + + // UDP packet or TCP connection queue + queue chan *response + // Workers count + workersCount int32 + // Shutdown handling + lock sync.RWMutex + started bool +} + +func (srv *Server) worker(w *response) { + srv.serve(w) + + for { + count := atomic.LoadInt32(&srv.workersCount) + if count > maxWorkersCount { + return + } + if atomic.CompareAndSwapInt32(&srv.workersCount, count, count+1) { + break + } + } + + defer atomic.AddInt32(&srv.workersCount, -1) + + inUse := false + timeout := time.NewTimer(idleWorkerTimeout) + defer timeout.Stop() +LOOP: + for { + select { + case w, ok := <-srv.queue: + if !ok { + break LOOP + } + inUse = true + srv.serve(w) + case <-timeout.C: + if !inUse { + break LOOP + } + inUse = false + timeout.Reset(idleWorkerTimeout) + } + } +} + +func (srv *Server) spawnWorker(w *response) { + select { + case srv.queue <- w: + default: + go srv.worker(w) + } +} + +// ListenAndServe starts a nameserver on the configured address in *Server. +func (srv *Server) ListenAndServe() error { + srv.lock.Lock() + defer srv.lock.Unlock() + if srv.started { + return &Error{err: "server already started"} + } + + addr := srv.Addr + if addr == "" { + addr = ":domain" + } + if srv.UDPSize == 0 { + srv.UDPSize = MinMsgSize + } + srv.queue = make(chan *response) + defer close(srv.queue) + switch srv.Net { + case "tcp", "tcp4", "tcp6": + a, err := net.ResolveTCPAddr(srv.Net, addr) + if err != nil { + return err + } + l, err := net.ListenTCP(srv.Net, a) + if err != nil { + return err + } + srv.Listener = l + srv.started = true + srv.lock.Unlock() + err = srv.serveTCP(l) + srv.lock.Lock() // to satisfy the defer at the top + return err + case "tcp-tls", "tcp4-tls", "tcp6-tls": + network := "tcp" + if srv.Net == "tcp4-tls" { + network = "tcp4" + } else if srv.Net == "tcp6-tls" { + network = "tcp6" + } + + l, err := tls.Listen(network, addr, srv.TLSConfig) + if err != nil { + return err + } + srv.Listener = l + srv.started = true + srv.lock.Unlock() + err = srv.serveTCP(l) + srv.lock.Lock() // to satisfy the defer at the top + return err + case "udp", "udp4", "udp6": + a, err := net.ResolveUDPAddr(srv.Net, addr) + if err != nil { + return err + } + l, err := net.ListenUDP(srv.Net, a) + if err != nil { + return err + } + if e := setUDPSocketOptions(l); e != nil { + return e + } + srv.PacketConn = l + srv.started = true + srv.lock.Unlock() + err = srv.serveUDP(l) + srv.lock.Lock() // to satisfy the defer at the top + return err + } + return &Error{err: "bad network"} +} + +// ActivateAndServe starts a nameserver with the PacketConn or Listener +// configured in *Server. Its main use is to start a server from systemd. +func (srv *Server) ActivateAndServe() error { + srv.lock.Lock() + defer srv.lock.Unlock() + if srv.started { + return &Error{err: "server already started"} + } + + pConn := srv.PacketConn + l := srv.Listener + srv.queue = make(chan *response) + defer close(srv.queue) + if pConn != nil { + if srv.UDPSize == 0 { + srv.UDPSize = MinMsgSize + } + // Check PacketConn interface's type is valid and value + // is not nil + if t, ok := pConn.(*net.UDPConn); ok && t != nil { + if e := setUDPSocketOptions(t); e != nil { + return e + } + srv.started = true + srv.lock.Unlock() + e := srv.serveUDP(t) + srv.lock.Lock() // to satisfy the defer at the top + return e + } + } + if l != nil { + srv.started = true + srv.lock.Unlock() + e := srv.serveTCP(l) + srv.lock.Lock() // to satisfy the defer at the top + return e + } + return &Error{err: "bad listeners"} +} + +// Shutdown shuts down a server. After a call to Shutdown, ListenAndServe and +// ActivateAndServe will return. +func (srv *Server) Shutdown() error { + srv.lock.Lock() + if !srv.started { + srv.lock.Unlock() + return &Error{err: "server not started"} + } + srv.started = false + srv.lock.Unlock() + + if srv.PacketConn != nil { + srv.PacketConn.Close() + } + if srv.Listener != nil { + srv.Listener.Close() + } + return nil +} + +// getReadTimeout is a helper func to use system timeout if server did not intend to change it. +func (srv *Server) getReadTimeout() time.Duration { + rtimeout := dnsTimeout + if srv.ReadTimeout != 0 { + rtimeout = srv.ReadTimeout + } + return rtimeout +} + +// serveTCP starts a TCP listener for the server. +func (srv *Server) serveTCP(l net.Listener) error { + defer l.Close() + + if srv.NotifyStartedFunc != nil { + srv.NotifyStartedFunc() + } + + for { + rw, err := l.Accept() + srv.lock.RLock() + if !srv.started { + srv.lock.RUnlock() + return nil + } + srv.lock.RUnlock() + if err != nil { + if neterr, ok := err.(net.Error); ok && neterr.Temporary() { + continue + } + return err + } + srv.spawnWorker(&response{tsigSecret: srv.TsigSecret, tcp: rw}) + } +} + +// serveUDP starts a UDP listener for the server. +func (srv *Server) serveUDP(l *net.UDPConn) error { + defer l.Close() + + if srv.NotifyStartedFunc != nil { + srv.NotifyStartedFunc() + } + + reader := Reader(&defaultReader{srv}) + if srv.DecorateReader != nil { + reader = srv.DecorateReader(reader) + } + + rtimeout := srv.getReadTimeout() + // deadline is not used here + for { + m, s, err := reader.ReadUDP(l, rtimeout) + srv.lock.RLock() + if !srv.started { + srv.lock.RUnlock() + return nil + } + srv.lock.RUnlock() + if err != nil { + if netErr, ok := err.(net.Error); ok && netErr.Temporary() { + continue + } + return err + } + if len(m) < headerSize { + continue + } + srv.spawnWorker(&response{msg: m, tsigSecret: srv.TsigSecret, udp: l, udpSession: s}) + } +} + +func (srv *Server) serve(w *response) { + if srv.DecorateWriter != nil { + w.writer = srv.DecorateWriter(w) + } else { + w.writer = w + } + + if w.udp != nil { + // serve UDP + srv.serveDNS(w) + return + } + + reader := Reader(&defaultReader{srv}) + if srv.DecorateReader != nil { + reader = srv.DecorateReader(reader) + } + + defer func() { + if !w.hijacked { + w.Close() + } + }() + + idleTimeout := tcpIdleTimeout + if srv.IdleTimeout != nil { + idleTimeout = srv.IdleTimeout() + } + + timeout := srv.getReadTimeout() + + limit := srv.MaxTCPQueries + if limit == 0 { + limit = maxTCPQueries + } + + for q := 0; q < limit || limit == -1; q++ { + var err error + w.msg, err = reader.ReadTCP(w.tcp, timeout) + if err != nil { + // TODO(tmthrgd): handle error + break + } + srv.serveDNS(w) + if w.tcp == nil { + break // Close() was called + } + if w.hijacked { + break // client will call Close() themselves + } + // The first read uses the read timeout, the rest use the + // idle timeout. + timeout = idleTimeout + } +} + +func (srv *Server) serveDNS(w *response) { + req := new(Msg) + err := req.Unpack(w.msg) + if err != nil { // Send a FormatError back + x := new(Msg) + x.SetRcodeFormatError(req) + w.WriteMsg(x) + return + } + if !srv.Unsafe && req.Response { + return + } + + w.tsigStatus = nil + if w.tsigSecret != nil { + if t := req.IsTsig(); t != nil { + if secret, ok := w.tsigSecret[t.Hdr.Name]; ok { + w.tsigStatus = TsigVerify(w.msg, secret, "", false) + } else { + w.tsigStatus = ErrSecret + } + w.tsigTimersOnly = false + w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC + } + } + + handler := srv.Handler + if handler == nil { + handler = DefaultServeMux + } + + handler.ServeDNS(w, req) // Writes back to the client +} + +func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { + conn.SetReadDeadline(time.Now().Add(timeout)) + l := make([]byte, 2) + n, err := conn.Read(l) + if err != nil || n != 2 { + if err != nil { + return nil, err + } + return nil, ErrShortRead + } + length := binary.BigEndian.Uint16(l) + if length == 0 { + return nil, ErrShortRead + } + m := make([]byte, int(length)) + n, err = conn.Read(m[:int(length)]) + if err != nil || n == 0 { + if err != nil { + return nil, err + } + return nil, ErrShortRead + } + i := n + for i < int(length) { + j, err := conn.Read(m[i:int(length)]) + if err != nil { + return nil, err + } + i += j + } + n = i + m = m[:n] + return m, nil +} + +func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { + conn.SetReadDeadline(time.Now().Add(timeout)) + m := make([]byte, srv.UDPSize) + n, s, err := ReadFromSessionUDP(conn, m) + if err != nil { + return nil, nil, err + } + m = m[:n] + return m, s, nil +} + +// WriteMsg implements the ResponseWriter.WriteMsg method. +func (w *response) WriteMsg(m *Msg) (err error) { + var data []byte + if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check) + if t := m.IsTsig(); t != nil { + data, w.tsigRequestMAC, err = TsigGenerate(m, w.tsigSecret[t.Hdr.Name], w.tsigRequestMAC, w.tsigTimersOnly) + if err != nil { + return err + } + _, err = w.writer.Write(data) + return err + } + } + data, err = m.Pack() + if err != nil { + return err + } + _, err = w.writer.Write(data) + return err +} + +// Write implements the ResponseWriter.Write method. +func (w *response) Write(m []byte) (int, error) { + switch { + case w.udp != nil: + n, err := WriteToSessionUDP(w.udp, m, w.udpSession) + return n, err + case w.tcp != nil: + lm := len(m) + if lm < 2 { + return 0, io.ErrShortBuffer + } + if lm > MaxMsgSize { + return 0, &Error{err: "message too large"} + } + l := make([]byte, 2, 2+lm) + binary.BigEndian.PutUint16(l, uint16(lm)) + m = append(l, m...) + + n, err := io.Copy(w.tcp, bytes.NewReader(m)) + return int(n), err + } + panic("not reached") +} + +// LocalAddr implements the ResponseWriter.LocalAddr method. +func (w *response) LocalAddr() net.Addr { + if w.tcp != nil { + return w.tcp.LocalAddr() + } + return w.udp.LocalAddr() +} + +// RemoteAddr implements the ResponseWriter.RemoteAddr method. +func (w *response) RemoteAddr() net.Addr { + if w.tcp != nil { + return w.tcp.RemoteAddr() + } + return w.udpSession.RemoteAddr() +} + +// TsigStatus implements the ResponseWriter.TsigStatus method. +func (w *response) TsigStatus() error { return w.tsigStatus } + +// TsigTimersOnly implements the ResponseWriter.TsigTimersOnly method. +func (w *response) TsigTimersOnly(b bool) { w.tsigTimersOnly = b } + +// Hijack implements the ResponseWriter.Hijack method. +func (w *response) Hijack() { w.hijacked = true } + +// Close implements the ResponseWriter.Close method +func (w *response) Close() error { + // Can't close the udp conn, as that is actually the listener. + if w.tcp != nil { + e := w.tcp.Close() + w.tcp = nil + return e + } + return nil +} diff --git a/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/miekg/dns/sig0.go new file mode 100644 index 00000000000..f31e9e6843d --- /dev/null +++ b/vendor/github.com/miekg/dns/sig0.go @@ -0,0 +1,218 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "encoding/binary" + "math/big" + "strings" + "time" +) + +// Sign signs a dns.Msg. It fills the signature with the appropriate data. +// The SIG record should have the SignerName, KeyTag, Algorithm, Inception +// and Expiration set. +func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) { + if k == nil { + return nil, ErrPrivKey + } + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return nil, ErrKey + } + rr.Header().Rrtype = TypeSIG + rr.Header().Class = ClassANY + rr.Header().Ttl = 0 + rr.Header().Name = "." + rr.OrigTtl = 0 + rr.TypeCovered = 0 + rr.Labels = 0 + + buf := make([]byte, m.Len()+rr.len()) + mbuf, err := m.PackBuffer(buf) + if err != nil { + return nil, err + } + if &buf[0] != &mbuf[0] { + return nil, ErrBuf + } + off, err := PackRR(rr, buf, len(mbuf), nil, false) + if err != nil { + return nil, err + } + buf = buf[:off:cap(buf)] + + hash, ok := AlgorithmToHash[rr.Algorithm] + if !ok { + return nil, ErrAlg + } + + hasher := hash.New() + // Write SIG rdata + hasher.Write(buf[len(mbuf)+1+2+2+4+2:]) + // Write message + hasher.Write(buf[:len(mbuf)]) + + signature, err := sign(k, hasher.Sum(nil), hash, rr.Algorithm) + if err != nil { + return nil, err + } + + rr.Signature = toBase64(signature) + + buf = append(buf, signature...) + if len(buf) > int(^uint16(0)) { + return nil, ErrBuf + } + // Adjust sig data length + rdoff := len(mbuf) + 1 + 2 + 2 + 4 + rdlen := binary.BigEndian.Uint16(buf[rdoff:]) + rdlen += uint16(len(signature)) + binary.BigEndian.PutUint16(buf[rdoff:], rdlen) + // Adjust additional count + adc := binary.BigEndian.Uint16(buf[10:]) + adc++ + binary.BigEndian.PutUint16(buf[10:], adc) + return buf, nil +} + +// Verify validates the message buf using the key k. +// It's assumed that buf is a valid message from which rr was unpacked. +func (rr *SIG) Verify(k *KEY, buf []byte) error { + if k == nil { + return ErrKey + } + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return ErrKey + } + + var hash crypto.Hash + switch rr.Algorithm { + case DSA, RSASHA1: + hash = crypto.SHA1 + case RSASHA256, ECDSAP256SHA256: + hash = crypto.SHA256 + case ECDSAP384SHA384: + hash = crypto.SHA384 + case RSASHA512: + hash = crypto.SHA512 + default: + return ErrAlg + } + hasher := hash.New() + + buflen := len(buf) + qdc := binary.BigEndian.Uint16(buf[4:]) + anc := binary.BigEndian.Uint16(buf[6:]) + auc := binary.BigEndian.Uint16(buf[8:]) + adc := binary.BigEndian.Uint16(buf[10:]) + offset := 12 + var err error + for i := uint16(0); i < qdc && offset < buflen; i++ { + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip past Type and Class + offset += 2 + 2 + } + for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ { + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip past Type, Class and TTL + offset += 2 + 2 + 4 + if offset+1 >= buflen { + continue + } + var rdlen uint16 + rdlen = binary.BigEndian.Uint16(buf[offset:]) + offset += 2 + offset += int(rdlen) + } + if offset >= buflen { + return &Error{err: "overflowing unpacking signed message"} + } + + // offset should be just prior to SIG + bodyend := offset + // owner name SHOULD be root + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip Type, Class, TTL, RDLen + offset += 2 + 2 + 4 + 2 + sigstart := offset + // Skip Type Covered, Algorithm, Labels, Original TTL + offset += 2 + 1 + 1 + 4 + if offset+4+4 >= buflen { + return &Error{err: "overflow unpacking signed message"} + } + expire := binary.BigEndian.Uint32(buf[offset:]) + offset += 4 + incept := binary.BigEndian.Uint32(buf[offset:]) + offset += 4 + now := uint32(time.Now().Unix()) + if now < incept || now > expire { + return ErrTime + } + // Skip key tag + offset += 2 + var signername string + signername, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // If key has come from the DNS name compression might + // have mangled the case of the name + if strings.ToLower(signername) != strings.ToLower(k.Header().Name) { + return &Error{err: "signer name doesn't match key name"} + } + sigend := offset + hasher.Write(buf[sigstart:sigend]) + hasher.Write(buf[:10]) + hasher.Write([]byte{ + byte((adc - 1) << 8), + byte(adc - 1), + }) + hasher.Write(buf[12:bodyend]) + + hashed := hasher.Sum(nil) + sig := buf[sigend:] + switch k.Algorithm { + case DSA: + pk := k.publicKeyDSA() + sig = sig[1:] + r := big.NewInt(0) + r.SetBytes(sig[:len(sig)/2]) + s := big.NewInt(0) + s.SetBytes(sig[len(sig)/2:]) + if pk != nil { + if dsa.Verify(pk, hashed, r, s) { + return nil + } + return ErrSig + } + case RSASHA1, RSASHA256, RSASHA512: + pk := k.publicKeyRSA() + if pk != nil { + return rsa.VerifyPKCS1v15(pk, hash, hashed, sig) + } + case ECDSAP256SHA256, ECDSAP384SHA384: + pk := k.publicKeyECDSA() + r := big.NewInt(0) + r.SetBytes(sig[:len(sig)/2]) + s := big.NewInt(0) + s.SetBytes(sig[len(sig)/2:]) + if pk != nil { + if ecdsa.Verify(pk, hashed, r, s) { + return nil + } + return ErrSig + } + } + return ErrKeyAlg +} diff --git a/vendor/github.com/miekg/dns/singleinflight.go b/vendor/github.com/miekg/dns/singleinflight.go new file mode 100644 index 00000000000..9573c7d0b8c --- /dev/null +++ b/vendor/github.com/miekg/dns/singleinflight.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Adapted for dns package usage by Miek Gieben. + +package dns + +import "sync" +import "time" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + val *Msg + rtt time.Duration + err error + dups int +} + +// singleflight represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type singleflight struct { + sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) { + g.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.Unlock() + c.wg.Wait() + return c.val, c.rtt, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.Unlock() + + c.val, c.rtt, c.err = fn() + c.wg.Done() + + g.Lock() + delete(g.m, key) + g.Unlock() + + return c.val, c.rtt, c.err, c.dups > 0 +} diff --git a/vendor/github.com/miekg/dns/smimea.go b/vendor/github.com/miekg/dns/smimea.go new file mode 100644 index 00000000000..4e7ded4b386 --- /dev/null +++ b/vendor/github.com/miekg/dns/smimea.go @@ -0,0 +1,47 @@ +package dns + +import ( + "crypto/sha256" + "crypto/x509" + "encoding/hex" +) + +// Sign creates a SMIMEA record from an SSL certificate. +func (r *SMIMEA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) { + r.Hdr.Rrtype = TypeSMIMEA + r.Usage = uint8(usage) + r.Selector = uint8(selector) + r.MatchingType = uint8(matchingType) + + r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) + if err != nil { + return err + } + return nil +} + +// Verify verifies a SMIMEA record against an SSL certificate. If it is OK +// a nil error is returned. +func (r *SMIMEA) Verify(cert *x509.Certificate) error { + c, err := CertificateToDANE(r.Selector, r.MatchingType, cert) + if err != nil { + return err // Not also ErrSig? + } + if r.Certificate == c { + return nil + } + return ErrSig // ErrSig, really? +} + +// SMIMEAName returns the ownername of a SMIMEA resource record as per the +// format specified in RFC 'draft-ietf-dane-smime-12' Section 2 and 3 +func SMIMEAName(email, domain string) (string, error) { + hasher := sha256.New() + hasher.Write([]byte(email)) + + // RFC Section 3: "The local-part is hashed using the SHA2-256 + // algorithm with the hash truncated to 28 octets and + // represented in its hexadecimal representation to become the + // left-most label in the prepared domain name" + return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain, nil +} diff --git a/vendor/github.com/miekg/dns/tlsa.go b/vendor/github.com/miekg/dns/tlsa.go new file mode 100644 index 00000000000..431e2fb5afc --- /dev/null +++ b/vendor/github.com/miekg/dns/tlsa.go @@ -0,0 +1,47 @@ +package dns + +import ( + "crypto/x509" + "net" + "strconv" +) + +// Sign creates a TLSA record from an SSL certificate. +func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) { + r.Hdr.Rrtype = TypeTLSA + r.Usage = uint8(usage) + r.Selector = uint8(selector) + r.MatchingType = uint8(matchingType) + + r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) + if err != nil { + return err + } + return nil +} + +// Verify verifies a TLSA record against an SSL certificate. If it is OK +// a nil error is returned. +func (r *TLSA) Verify(cert *x509.Certificate) error { + c, err := CertificateToDANE(r.Selector, r.MatchingType, cert) + if err != nil { + return err // Not also ErrSig? + } + if r.Certificate == c { + return nil + } + return ErrSig // ErrSig, really? +} + +// TLSAName returns the ownername of a TLSA resource record as per the +// rules specified in RFC 6698, Section 3. +func TLSAName(name, service, network string) (string, error) { + if !IsFqdn(name) { + return "", ErrFqdn + } + p, err := net.LookupPort(network, service) + if err != nil { + return "", err + } + return "_" + strconv.Itoa(p) + "._" + network + "." + name, nil +} diff --git a/vendor/github.com/miekg/dns/tsig.go b/vendor/github.com/miekg/dns/tsig.go new file mode 100644 index 00000000000..4837b4ab1fd --- /dev/null +++ b/vendor/github.com/miekg/dns/tsig.go @@ -0,0 +1,386 @@ +package dns + +import ( + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/binary" + "encoding/hex" + "hash" + "strconv" + "strings" + "time" +) + +// HMAC hashing codes. These are transmitted as domain names. +const ( + HmacMD5 = "hmac-md5.sig-alg.reg.int." + HmacSHA1 = "hmac-sha1." + HmacSHA256 = "hmac-sha256." + HmacSHA512 = "hmac-sha512." +) + +// TSIG is the RR the holds the transaction signature of a message. +// See RFC 2845 and RFC 4635. +type TSIG struct { + Hdr RR_Header + Algorithm string `dns:"domain-name"` + TimeSigned uint64 `dns:"uint48"` + Fudge uint16 + MACSize uint16 + MAC string `dns:"size-hex:MACSize"` + OrigId uint16 + Error uint16 + OtherLen uint16 + OtherData string `dns:"size-hex:OtherLen"` +} + +// TSIG has no official presentation format, but this will suffice. + +func (rr *TSIG) String() string { + s := "\n;; TSIG PSEUDOSECTION:\n" + s += rr.Hdr.String() + + " " + rr.Algorithm + + " " + tsigTimeToString(rr.TimeSigned) + + " " + strconv.Itoa(int(rr.Fudge)) + + " " + strconv.Itoa(int(rr.MACSize)) + + " " + strings.ToUpper(rr.MAC) + + " " + strconv.Itoa(int(rr.OrigId)) + + " " + strconv.Itoa(int(rr.Error)) + // BIND prints NOERROR + " " + strconv.Itoa(int(rr.OtherLen)) + + " " + rr.OtherData + return s +} + +// The following values must be put in wireformat, so that the MAC can be calculated. +// RFC 2845, section 3.4.2. TSIG Variables. +type tsigWireFmt struct { + // From RR_Header + Name string `dns:"domain-name"` + Class uint16 + Ttl uint32 + // Rdata of the TSIG + Algorithm string `dns:"domain-name"` + TimeSigned uint64 `dns:"uint48"` + Fudge uint16 + // MACSize, MAC and OrigId excluded + Error uint16 + OtherLen uint16 + OtherData string `dns:"size-hex:OtherLen"` +} + +// If we have the MAC use this type to convert it to wiredata. Section 3.4.3. Request MAC +type macWireFmt struct { + MACSize uint16 + MAC string `dns:"size-hex:MACSize"` +} + +// 3.3. Time values used in TSIG calculations +type timerWireFmt struct { + TimeSigned uint64 `dns:"uint48"` + Fudge uint16 +} + +// TsigGenerate fills out the TSIG record attached to the message. +// The message should contain +// a "stub" TSIG RR with the algorithm, key name (owner name of the RR), +// time fudge (defaults to 300 seconds) and the current time +// The TSIG MAC is saved in that Tsig RR. +// When TsigGenerate is called for the first time requestMAC is set to the empty string and +// timersOnly is false. +// If something goes wrong an error is returned, otherwise it is nil. +func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, string, error) { + if m.IsTsig() == nil { + panic("dns: TSIG not last RR in additional") + } + // If we barf here, the caller is to blame + rawsecret, err := fromBase64([]byte(secret)) + if err != nil { + return nil, "", err + } + + rr := m.Extra[len(m.Extra)-1].(*TSIG) + m.Extra = m.Extra[0 : len(m.Extra)-1] // kill the TSIG from the msg + mbuf, err := m.Pack() + if err != nil { + return nil, "", err + } + buf := tsigBuffer(mbuf, rr, requestMAC, timersOnly) + + t := new(TSIG) + var h hash.Hash + switch strings.ToLower(rr.Algorithm) { + case HmacMD5: + h = hmac.New(md5.New, []byte(rawsecret)) + case HmacSHA1: + h = hmac.New(sha1.New, []byte(rawsecret)) + case HmacSHA256: + h = hmac.New(sha256.New, []byte(rawsecret)) + case HmacSHA512: + h = hmac.New(sha512.New, []byte(rawsecret)) + default: + return nil, "", ErrKeyAlg + } + h.Write(buf) + t.MAC = hex.EncodeToString(h.Sum(nil)) + t.MACSize = uint16(len(t.MAC) / 2) // Size is half! + + t.Hdr = RR_Header{Name: rr.Hdr.Name, Rrtype: TypeTSIG, Class: ClassANY, Ttl: 0} + t.Fudge = rr.Fudge + t.TimeSigned = rr.TimeSigned + t.Algorithm = rr.Algorithm + t.OrigId = m.Id + + tbuf := make([]byte, t.len()) + if off, err := PackRR(t, tbuf, 0, nil, false); err == nil { + tbuf = tbuf[:off] // reset to actual size used + } else { + return nil, "", err + } + mbuf = append(mbuf, tbuf...) + // Update the ArCount directly in the buffer. + binary.BigEndian.PutUint16(mbuf[10:], uint16(len(m.Extra)+1)) + + return mbuf, t.MAC, nil +} + +// TsigVerify verifies the TSIG on a message. +// If the signature does not validate err contains the +// error, otherwise it is nil. +func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error { + rawsecret, err := fromBase64([]byte(secret)) + if err != nil { + return err + } + // Strip the TSIG from the incoming msg + stripped, tsig, err := stripTsig(msg) + if err != nil { + return err + } + + msgMAC, err := hex.DecodeString(tsig.MAC) + if err != nil { + return err + } + + buf := tsigBuffer(stripped, tsig, requestMAC, timersOnly) + + // Fudge factor works both ways. A message can arrive before it was signed because + // of clock skew. + now := uint64(time.Now().Unix()) + ti := now - tsig.TimeSigned + if now < tsig.TimeSigned { + ti = tsig.TimeSigned - now + } + if uint64(tsig.Fudge) < ti { + return ErrTime + } + + var h hash.Hash + switch strings.ToLower(tsig.Algorithm) { + case HmacMD5: + h = hmac.New(md5.New, rawsecret) + case HmacSHA1: + h = hmac.New(sha1.New, rawsecret) + case HmacSHA256: + h = hmac.New(sha256.New, rawsecret) + case HmacSHA512: + h = hmac.New(sha512.New, rawsecret) + default: + return ErrKeyAlg + } + h.Write(buf) + if !hmac.Equal(h.Sum(nil), msgMAC) { + return ErrSig + } + return nil +} + +// Create a wiredata buffer for the MAC calculation. +func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []byte { + var buf []byte + if rr.TimeSigned == 0 { + rr.TimeSigned = uint64(time.Now().Unix()) + } + if rr.Fudge == 0 { + rr.Fudge = 300 // Standard (RFC) default. + } + + // Replace message ID in header with original ID from TSIG + binary.BigEndian.PutUint16(msgbuf[0:2], rr.OrigId) + + if requestMAC != "" { + m := new(macWireFmt) + m.MACSize = uint16(len(requestMAC) / 2) + m.MAC = requestMAC + buf = make([]byte, len(requestMAC)) // long enough + n, _ := packMacWire(m, buf) + buf = buf[:n] + } + + tsigvar := make([]byte, DefaultMsgSize) + if timersOnly { + tsig := new(timerWireFmt) + tsig.TimeSigned = rr.TimeSigned + tsig.Fudge = rr.Fudge + n, _ := packTimerWire(tsig, tsigvar) + tsigvar = tsigvar[:n] + } else { + tsig := new(tsigWireFmt) + tsig.Name = strings.ToLower(rr.Hdr.Name) + tsig.Class = ClassANY + tsig.Ttl = rr.Hdr.Ttl + tsig.Algorithm = strings.ToLower(rr.Algorithm) + tsig.TimeSigned = rr.TimeSigned + tsig.Fudge = rr.Fudge + tsig.Error = rr.Error + tsig.OtherLen = rr.OtherLen + tsig.OtherData = rr.OtherData + n, _ := packTsigWire(tsig, tsigvar) + tsigvar = tsigvar[:n] + } + + if requestMAC != "" { + x := append(buf, msgbuf...) + buf = append(x, tsigvar...) + } else { + buf = append(msgbuf, tsigvar...) + } + return buf +} + +// Strip the TSIG from the raw message. +func stripTsig(msg []byte) ([]byte, *TSIG, error) { + // Copied from msg.go's Unpack() Header, but modified. + var ( + dh Header + err error + ) + off, tsigoff := 0, 0 + + if dh, off, err = unpackMsgHdr(msg, off); err != nil { + return nil, nil, err + } + if dh.Arcount == 0 { + return nil, nil, ErrNoSig + } + + // Rcode, see msg.go Unpack() + if int(dh.Bits&0xF) == RcodeNotAuth { + return nil, nil, ErrAuth + } + + for i := 0; i < int(dh.Qdcount); i++ { + _, off, err = unpackQuestion(msg, off) + if err != nil { + return nil, nil, err + } + } + + _, off, err = unpackRRslice(int(dh.Ancount), msg, off) + if err != nil { + return nil, nil, err + } + _, off, err = unpackRRslice(int(dh.Nscount), msg, off) + if err != nil { + return nil, nil, err + } + + rr := new(TSIG) + var extra RR + for i := 0; i < int(dh.Arcount); i++ { + tsigoff = off + extra, off, err = UnpackRR(msg, off) + if err != nil { + return nil, nil, err + } + if extra.Header().Rrtype == TypeTSIG { + rr = extra.(*TSIG) + // Adjust Arcount. + arcount := binary.BigEndian.Uint16(msg[10:]) + binary.BigEndian.PutUint16(msg[10:], arcount-1) + break + } + } + if rr == nil { + return nil, nil, ErrNoSig + } + return msg[:tsigoff], rr, nil +} + +// Translate the TSIG time signed into a date. There is no +// need for RFC1982 calculations as this date is 48 bits. +func tsigTimeToString(t uint64) string { + ti := time.Unix(int64(t), 0).UTC() + return ti.Format("20060102150405") +} + +func packTsigWire(tw *tsigWireFmt, msg []byte) (int, error) { + // copied from zmsg.go TSIG packing + // RR_Header + off, err := PackDomainName(tw.Name, msg, 0, nil, false) + if err != nil { + return off, err + } + off, err = packUint16(tw.Class, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(tw.Ttl, msg, off) + if err != nil { + return off, err + } + + off, err = PackDomainName(tw.Algorithm, msg, off, nil, false) + if err != nil { + return off, err + } + off, err = packUint48(tw.TimeSigned, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(tw.Fudge, msg, off) + if err != nil { + return off, err + } + + off, err = packUint16(tw.Error, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(tw.OtherLen, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(tw.OtherData, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func packMacWire(mw *macWireFmt, msg []byte) (int, error) { + off, err := packUint16(mw.MACSize, msg, 0) + if err != nil { + return off, err + } + off, err = packStringHex(mw.MAC, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func packTimerWire(tw *timerWireFmt, msg []byte) (int, error) { + off, err := packUint48(tw.TimeSigned, msg, 0) + if err != nil { + return off, err + } + off, err = packUint16(tw.Fudge, msg, off) + if err != nil { + return off, err + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go new file mode 100644 index 00000000000..a779ca8abc0 --- /dev/null +++ b/vendor/github.com/miekg/dns/types.go @@ -0,0 +1,1381 @@ +package dns + +import ( + "fmt" + "net" + "strconv" + "strings" + "time" +) + +type ( + // Type is a DNS type. + Type uint16 + // Class is a DNS class. + Class uint16 + // Name is a DNS domain name. + Name string +) + +// Packet formats + +// Wire constants and supported types. +const ( + // valid RR_Header.Rrtype and Question.qtype + + TypeNone uint16 = 0 + TypeA uint16 = 1 + TypeNS uint16 = 2 + TypeMD uint16 = 3 + TypeMF uint16 = 4 + TypeCNAME uint16 = 5 + TypeSOA uint16 = 6 + TypeMB uint16 = 7 + TypeMG uint16 = 8 + TypeMR uint16 = 9 + TypeNULL uint16 = 10 + TypePTR uint16 = 12 + TypeHINFO uint16 = 13 + TypeMINFO uint16 = 14 + TypeMX uint16 = 15 + TypeTXT uint16 = 16 + TypeRP uint16 = 17 + TypeAFSDB uint16 = 18 + TypeX25 uint16 = 19 + TypeISDN uint16 = 20 + TypeRT uint16 = 21 + TypeNSAPPTR uint16 = 23 + TypeSIG uint16 = 24 + TypeKEY uint16 = 25 + TypePX uint16 = 26 + TypeGPOS uint16 = 27 + TypeAAAA uint16 = 28 + TypeLOC uint16 = 29 + TypeNXT uint16 = 30 + TypeEID uint16 = 31 + TypeNIMLOC uint16 = 32 + TypeSRV uint16 = 33 + TypeATMA uint16 = 34 + TypeNAPTR uint16 = 35 + TypeKX uint16 = 36 + TypeCERT uint16 = 37 + TypeDNAME uint16 = 39 + TypeOPT uint16 = 41 // EDNS + TypeDS uint16 = 43 + TypeSSHFP uint16 = 44 + TypeRRSIG uint16 = 46 + TypeNSEC uint16 = 47 + TypeDNSKEY uint16 = 48 + TypeDHCID uint16 = 49 + TypeNSEC3 uint16 = 50 + TypeNSEC3PARAM uint16 = 51 + TypeTLSA uint16 = 52 + TypeSMIMEA uint16 = 53 + TypeHIP uint16 = 55 + TypeNINFO uint16 = 56 + TypeRKEY uint16 = 57 + TypeTALINK uint16 = 58 + TypeCDS uint16 = 59 + TypeCDNSKEY uint16 = 60 + TypeOPENPGPKEY uint16 = 61 + TypeCSYNC uint16 = 62 + TypeSPF uint16 = 99 + TypeUINFO uint16 = 100 + TypeUID uint16 = 101 + TypeGID uint16 = 102 + TypeUNSPEC uint16 = 103 + TypeNID uint16 = 104 + TypeL32 uint16 = 105 + TypeL64 uint16 = 106 + TypeLP uint16 = 107 + TypeEUI48 uint16 = 108 + TypeEUI64 uint16 = 109 + TypeURI uint16 = 256 + TypeCAA uint16 = 257 + TypeAVC uint16 = 258 + + TypeTKEY uint16 = 249 + TypeTSIG uint16 = 250 + + // valid Question.Qtype only + TypeIXFR uint16 = 251 + TypeAXFR uint16 = 252 + TypeMAILB uint16 = 253 + TypeMAILA uint16 = 254 + TypeANY uint16 = 255 + + TypeTA uint16 = 32768 + TypeDLV uint16 = 32769 + TypeReserved uint16 = 65535 + + // valid Question.Qclass + ClassINET = 1 + ClassCSNET = 2 + ClassCHAOS = 3 + ClassHESIOD = 4 + ClassNONE = 254 + ClassANY = 255 + + // Message Response Codes, see https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml + RcodeSuccess = 0 // NoError - No Error [DNS] + RcodeFormatError = 1 // FormErr - Format Error [DNS] + RcodeServerFailure = 2 // ServFail - Server Failure [DNS] + RcodeNameError = 3 // NXDomain - Non-Existent Domain [DNS] + RcodeNotImplemented = 4 // NotImp - Not Implemented [DNS] + RcodeRefused = 5 // Refused - Query Refused [DNS] + RcodeYXDomain = 6 // YXDomain - Name Exists when it should not [DNS Update] + RcodeYXRrset = 7 // YXRRSet - RR Set Exists when it should not [DNS Update] + RcodeNXRrset = 8 // NXRRSet - RR Set that should exist does not [DNS Update] + RcodeNotAuth = 9 // NotAuth - Server Not Authoritative for zone [DNS Update] + RcodeNotZone = 10 // NotZone - Name not contained in zone [DNS Update/TSIG] + RcodeBadSig = 16 // BADSIG - TSIG Signature Failure [TSIG] + RcodeBadVers = 16 // BADVERS - Bad OPT Version [EDNS0] + RcodeBadKey = 17 // BADKEY - Key not recognized [TSIG] + RcodeBadTime = 18 // BADTIME - Signature out of time window [TSIG] + RcodeBadMode = 19 // BADMODE - Bad TKEY Mode [TKEY] + RcodeBadName = 20 // BADNAME - Duplicate key name [TKEY] + RcodeBadAlg = 21 // BADALG - Algorithm not supported [TKEY] + RcodeBadTrunc = 22 // BADTRUNC - Bad Truncation [TSIG] + RcodeBadCookie = 23 // BADCOOKIE - Bad/missing Server Cookie [DNS Cookies] + + // Message Opcodes. There is no 3. + OpcodeQuery = 0 + OpcodeIQuery = 1 + OpcodeStatus = 2 + OpcodeNotify = 4 + OpcodeUpdate = 5 +) + +// Header is the wire format for the DNS packet header. +type Header struct { + Id uint16 + Bits uint16 + Qdcount, Ancount, Nscount, Arcount uint16 +} + +const ( + headerSize = 12 + + // Header.Bits + _QR = 1 << 15 // query/response (response=1) + _AA = 1 << 10 // authoritative + _TC = 1 << 9 // truncated + _RD = 1 << 8 // recursion desired + _RA = 1 << 7 // recursion available + _Z = 1 << 6 // Z + _AD = 1 << 5 // authticated data + _CD = 1 << 4 // checking disabled +) + +// Various constants used in the LOC RR, See RFC 1887. +const ( + LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2. + LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2. + LOC_HOURS = 60 * 1000 + LOC_DEGREES = 60 * LOC_HOURS + LOC_ALTITUDEBASE = 100000 +) + +// Different Certificate Types, see RFC 4398, Section 2.1 +const ( + CertPKIX = 1 + iota + CertSPKI + CertPGP + CertIPIX + CertISPKI + CertIPGP + CertACPKIX + CertIACPKIX + CertURI = 253 + CertOID = 254 +) + +// CertTypeToString converts the Cert Type to its string representation. +// See RFC 4398 and RFC 6944. +var CertTypeToString = map[uint16]string{ + CertPKIX: "PKIX", + CertSPKI: "SPKI", + CertPGP: "PGP", + CertIPIX: "IPIX", + CertISPKI: "ISPKI", + CertIPGP: "IPGP", + CertACPKIX: "ACPKIX", + CertIACPKIX: "IACPKIX", + CertURI: "URI", + CertOID: "OID", +} + +// StringToCertType is the reverseof CertTypeToString. +var StringToCertType = reverseInt16(CertTypeToString) + +//go:generate go run types_generate.go + +// Question holds a DNS question. There can be multiple questions in the +// question section of a message. Usually there is just one. +type Question struct { + Name string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed) + Qtype uint16 + Qclass uint16 +} + +func (q *Question) len() int { + return len(q.Name) + 1 + 2 + 2 +} + +func (q *Question) String() (s string) { + // prefix with ; (as in dig) + s = ";" + sprintName(q.Name) + "\t" + s += Class(q.Qclass).String() + "\t" + s += " " + Type(q.Qtype).String() + return s +} + +// ANY is a wildcard record. See RFC 1035, Section 3.2.3. ANY +// is named "*" there. +type ANY struct { + Hdr RR_Header + // Does not have any rdata +} + +func (rr *ANY) String() string { return rr.Hdr.String() } + +// CNAME RR. See RFC 1034. +type CNAME struct { + Hdr RR_Header + Target string `dns:"cdomain-name"` +} + +func (rr *CNAME) String() string { return rr.Hdr.String() + sprintName(rr.Target) } + +// HINFO RR. See RFC 1034. +type HINFO struct { + Hdr RR_Header + Cpu string + Os string +} + +func (rr *HINFO) String() string { + return rr.Hdr.String() + sprintTxt([]string{rr.Cpu, rr.Os}) +} + +// MB RR. See RFC 1035. +type MB struct { + Hdr RR_Header + Mb string `dns:"cdomain-name"` +} + +func (rr *MB) String() string { return rr.Hdr.String() + sprintName(rr.Mb) } + +// MG RR. See RFC 1035. +type MG struct { + Hdr RR_Header + Mg string `dns:"cdomain-name"` +} + +func (rr *MG) String() string { return rr.Hdr.String() + sprintName(rr.Mg) } + +// MINFO RR. See RFC 1035. +type MINFO struct { + Hdr RR_Header + Rmail string `dns:"cdomain-name"` + Email string `dns:"cdomain-name"` +} + +func (rr *MINFO) String() string { + return rr.Hdr.String() + sprintName(rr.Rmail) + " " + sprintName(rr.Email) +} + +// MR RR. See RFC 1035. +type MR struct { + Hdr RR_Header + Mr string `dns:"cdomain-name"` +} + +func (rr *MR) String() string { + return rr.Hdr.String() + sprintName(rr.Mr) +} + +// MF RR. See RFC 1035. +type MF struct { + Hdr RR_Header + Mf string `dns:"cdomain-name"` +} + +func (rr *MF) String() string { + return rr.Hdr.String() + sprintName(rr.Mf) +} + +// MD RR. See RFC 1035. +type MD struct { + Hdr RR_Header + Md string `dns:"cdomain-name"` +} + +func (rr *MD) String() string { + return rr.Hdr.String() + sprintName(rr.Md) +} + +// MX RR. See RFC 1035. +type MX struct { + Hdr RR_Header + Preference uint16 + Mx string `dns:"cdomain-name"` +} + +func (rr *MX) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Mx) +} + +// AFSDB RR. See RFC 1183. +type AFSDB struct { + Hdr RR_Header + Subtype uint16 + Hostname string `dns:"cdomain-name"` +} + +func (rr *AFSDB) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Subtype)) + " " + sprintName(rr.Hostname) +} + +// X25 RR. See RFC 1183, Section 3.1. +type X25 struct { + Hdr RR_Header + PSDNAddress string +} + +func (rr *X25) String() string { + return rr.Hdr.String() + rr.PSDNAddress +} + +// RT RR. See RFC 1183, Section 3.3. +type RT struct { + Hdr RR_Header + Preference uint16 + Host string `dns:"cdomain-name"` +} + +func (rr *RT) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Host) +} + +// NS RR. See RFC 1035. +type NS struct { + Hdr RR_Header + Ns string `dns:"cdomain-name"` +} + +func (rr *NS) String() string { + return rr.Hdr.String() + sprintName(rr.Ns) +} + +// PTR RR. See RFC 1035. +type PTR struct { + Hdr RR_Header + Ptr string `dns:"cdomain-name"` +} + +func (rr *PTR) String() string { + return rr.Hdr.String() + sprintName(rr.Ptr) +} + +// RP RR. See RFC 1138, Section 2.2. +type RP struct { + Hdr RR_Header + Mbox string `dns:"domain-name"` + Txt string `dns:"domain-name"` +} + +func (rr *RP) String() string { + return rr.Hdr.String() + rr.Mbox + " " + sprintTxt([]string{rr.Txt}) +} + +// SOA RR. See RFC 1035. +type SOA struct { + Hdr RR_Header + Ns string `dns:"cdomain-name"` + Mbox string `dns:"cdomain-name"` + Serial uint32 + Refresh uint32 + Retry uint32 + Expire uint32 + Minttl uint32 +} + +func (rr *SOA) String() string { + return rr.Hdr.String() + sprintName(rr.Ns) + " " + sprintName(rr.Mbox) + + " " + strconv.FormatInt(int64(rr.Serial), 10) + + " " + strconv.FormatInt(int64(rr.Refresh), 10) + + " " + strconv.FormatInt(int64(rr.Retry), 10) + + " " + strconv.FormatInt(int64(rr.Expire), 10) + + " " + strconv.FormatInt(int64(rr.Minttl), 10) +} + +// TXT RR. See RFC 1035. +type TXT struct { + Hdr RR_Header + Txt []string `dns:"txt"` +} + +func (rr *TXT) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } + +func sprintName(s string) string { + src := []byte(s) + dst := make([]byte, 0, len(src)) + for i := 0; i < len(src); { + if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' { + dst = append(dst, src[i:i+2]...) + i += 2 + } else { + b, n := nextByte(src, i) + if n == 0 { + i++ // dangling back slash + } else if b == '.' { + dst = append(dst, b) + } else { + dst = appendDomainNameByte(dst, b) + } + i += n + } + } + return string(dst) +} + +func sprintTxtOctet(s string) string { + src := []byte(s) + dst := make([]byte, 0, len(src)) + dst = append(dst, '"') + for i := 0; i < len(src); { + if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' { + dst = append(dst, src[i:i+2]...) + i += 2 + } else { + b, n := nextByte(src, i) + if n == 0 { + i++ // dangling back slash + } else if b == '.' { + dst = append(dst, b) + } else { + if b < ' ' || b > '~' { + dst = appendByte(dst, b) + } else { + dst = append(dst, b) + } + } + i += n + } + } + dst = append(dst, '"') + return string(dst) +} + +func sprintTxt(txt []string) string { + var out []byte + for i, s := range txt { + if i > 0 { + out = append(out, ` "`...) + } else { + out = append(out, '"') + } + bs := []byte(s) + for j := 0; j < len(bs); { + b, n := nextByte(bs, j) + if n == 0 { + break + } + out = appendTXTStringByte(out, b) + j += n + } + out = append(out, '"') + } + return string(out) +} + +func appendDomainNameByte(s []byte, b byte) []byte { + switch b { + case '.', ' ', '\'', '@', ';', '(', ')': // additional chars to escape + return append(s, '\\', b) + } + return appendTXTStringByte(s, b) +} + +func appendTXTStringByte(s []byte, b byte) []byte { + switch b { + case '"', '\\': + return append(s, '\\', b) + } + if b < ' ' || b > '~' { + return appendByte(s, b) + } + return append(s, b) +} + +func appendByte(s []byte, b byte) []byte { + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } + return s +} + +func nextByte(b []byte, offset int) (byte, int) { + if offset >= len(b) { + return 0, 0 + } + if b[offset] != '\\' { + // not an escape sequence + return b[offset], 1 + } + switch len(b) - offset { + case 1: // dangling escape + return 0, 0 + case 2, 3: // too short to be \ddd + default: // maybe \ddd + if isDigit(b[offset+1]) && isDigit(b[offset+2]) && isDigit(b[offset+3]) { + return dddToByte(b[offset+1:]), 4 + } + } + // not \ddd, just an RFC 1035 "quoted" character + return b[offset+1], 2 +} + +// SPF RR. See RFC 4408, Section 3.1.1. +type SPF struct { + Hdr RR_Header + Txt []string `dns:"txt"` +} + +func (rr *SPF) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } + +// AVC RR. See https://www.iana.org/assignments/dns-parameters/AVC/avc-completed-template. +type AVC struct { + Hdr RR_Header + Txt []string `dns:"txt"` +} + +func (rr *AVC) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } + +// SRV RR. See RFC 2782. +type SRV struct { + Hdr RR_Header + Priority uint16 + Weight uint16 + Port uint16 + Target string `dns:"domain-name"` +} + +func (rr *SRV) String() string { + return rr.Hdr.String() + + strconv.Itoa(int(rr.Priority)) + " " + + strconv.Itoa(int(rr.Weight)) + " " + + strconv.Itoa(int(rr.Port)) + " " + sprintName(rr.Target) +} + +// NAPTR RR. See RFC 2915. +type NAPTR struct { + Hdr RR_Header + Order uint16 + Preference uint16 + Flags string + Service string + Regexp string + Replacement string `dns:"domain-name"` +} + +func (rr *NAPTR) String() string { + return rr.Hdr.String() + + strconv.Itoa(int(rr.Order)) + " " + + strconv.Itoa(int(rr.Preference)) + " " + + "\"" + rr.Flags + "\" " + + "\"" + rr.Service + "\" " + + "\"" + rr.Regexp + "\" " + + rr.Replacement +} + +// CERT RR. See RFC 4398. +type CERT struct { + Hdr RR_Header + Type uint16 + KeyTag uint16 + Algorithm uint8 + Certificate string `dns:"base64"` +} + +func (rr *CERT) String() string { + var ( + ok bool + certtype, algorithm string + ) + if certtype, ok = CertTypeToString[rr.Type]; !ok { + certtype = strconv.Itoa(int(rr.Type)) + } + if algorithm, ok = AlgorithmToString[rr.Algorithm]; !ok { + algorithm = strconv.Itoa(int(rr.Algorithm)) + } + return rr.Hdr.String() + certtype + + " " + strconv.Itoa(int(rr.KeyTag)) + + " " + algorithm + + " " + rr.Certificate +} + +// DNAME RR. See RFC 2672. +type DNAME struct { + Hdr RR_Header + Target string `dns:"domain-name"` +} + +func (rr *DNAME) String() string { + return rr.Hdr.String() + sprintName(rr.Target) +} + +// A RR. See RFC 1035. +type A struct { + Hdr RR_Header + A net.IP `dns:"a"` +} + +func (rr *A) String() string { + if rr.A == nil { + return rr.Hdr.String() + } + return rr.Hdr.String() + rr.A.String() +} + +// AAAA RR. See RFC 3596. +type AAAA struct { + Hdr RR_Header + AAAA net.IP `dns:"aaaa"` +} + +func (rr *AAAA) String() string { + if rr.AAAA == nil { + return rr.Hdr.String() + } + return rr.Hdr.String() + rr.AAAA.String() +} + +// PX RR. See RFC 2163. +type PX struct { + Hdr RR_Header + Preference uint16 + Map822 string `dns:"domain-name"` + Mapx400 string `dns:"domain-name"` +} + +func (rr *PX) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Map822) + " " + sprintName(rr.Mapx400) +} + +// GPOS RR. See RFC 1712. +type GPOS struct { + Hdr RR_Header + Longitude string + Latitude string + Altitude string +} + +func (rr *GPOS) String() string { + return rr.Hdr.String() + rr.Longitude + " " + rr.Latitude + " " + rr.Altitude +} + +// LOC RR. See RFC RFC 1876. +type LOC struct { + Hdr RR_Header + Version uint8 + Size uint8 + HorizPre uint8 + VertPre uint8 + Latitude uint32 + Longitude uint32 + Altitude uint32 +} + +// cmToM takes a cm value expressed in RFC1876 SIZE mantissa/exponent +// format and returns a string in m (two decimals for the cm) +func cmToM(m, e uint8) string { + if e < 2 { + if e == 1 { + m *= 10 + } + + return fmt.Sprintf("0.%02d", m) + } + + s := fmt.Sprintf("%d", m) + for e > 2 { + s += "0" + e-- + } + return s +} + +func (rr *LOC) String() string { + s := rr.Hdr.String() + + lat := rr.Latitude + ns := "N" + if lat > LOC_EQUATOR { + lat = lat - LOC_EQUATOR + } else { + ns = "S" + lat = LOC_EQUATOR - lat + } + h := lat / LOC_DEGREES + lat = lat % LOC_DEGREES + m := lat / LOC_HOURS + lat = lat % LOC_HOURS + s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lat) / 1000), ns) + + lon := rr.Longitude + ew := "E" + if lon > LOC_PRIMEMERIDIAN { + lon = lon - LOC_PRIMEMERIDIAN + } else { + ew = "W" + lon = LOC_PRIMEMERIDIAN - lon + } + h = lon / LOC_DEGREES + lon = lon % LOC_DEGREES + m = lon / LOC_HOURS + lon = lon % LOC_HOURS + s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lon) / 1000), ew) + + var alt = float64(rr.Altitude) / 100 + alt -= LOC_ALTITUDEBASE + if rr.Altitude%100 != 0 { + s += fmt.Sprintf("%.2fm ", alt) + } else { + s += fmt.Sprintf("%.0fm ", alt) + } + + s += cmToM((rr.Size&0xf0)>>4, rr.Size&0x0f) + "m " + s += cmToM((rr.HorizPre&0xf0)>>4, rr.HorizPre&0x0f) + "m " + s += cmToM((rr.VertPre&0xf0)>>4, rr.VertPre&0x0f) + "m" + + return s +} + +// SIG RR. See RFC 2535. The SIG RR is identical to RRSIG and nowadays only used for SIG(0), See RFC 2931. +type SIG struct { + RRSIG +} + +// RRSIG RR. See RFC 4034 and RFC 3755. +type RRSIG struct { + Hdr RR_Header + TypeCovered uint16 + Algorithm uint8 + Labels uint8 + OrigTtl uint32 + Expiration uint32 + Inception uint32 + KeyTag uint16 + SignerName string `dns:"domain-name"` + Signature string `dns:"base64"` +} + +func (rr *RRSIG) String() string { + s := rr.Hdr.String() + s += Type(rr.TypeCovered).String() + s += " " + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.Labels)) + + " " + strconv.FormatInt(int64(rr.OrigTtl), 10) + + " " + TimeToString(rr.Expiration) + + " " + TimeToString(rr.Inception) + + " " + strconv.Itoa(int(rr.KeyTag)) + + " " + sprintName(rr.SignerName) + + " " + rr.Signature + return s +} + +// NSEC RR. See RFC 4034 and RFC 3755. +type NSEC struct { + Hdr RR_Header + NextDomain string `dns:"domain-name"` + TypeBitMap []uint16 `dns:"nsec"` +} + +func (rr *NSEC) String() string { + s := rr.Hdr.String() + sprintName(rr.NextDomain) + for i := 0; i < len(rr.TypeBitMap); i++ { + s += " " + Type(rr.TypeBitMap[i]).String() + } + return s +} + +func (rr *NSEC) len() int { + l := rr.Hdr.len() + len(rr.NextDomain) + 1 + lastwindow := uint32(2 ^ 32 + 1) + for _, t := range rr.TypeBitMap { + window := t / 256 + if uint32(window) != lastwindow { + l += 1 + 32 + } + lastwindow = uint32(window) + } + return l +} + +// DLV RR. See RFC 4431. +type DLV struct{ DS } + +// CDS RR. See RFC 7344. +type CDS struct{ DS } + +// DS RR. See RFC 4034 and RFC 3658. +type DS struct { + Hdr RR_Header + KeyTag uint16 + Algorithm uint8 + DigestType uint8 + Digest string `dns:"hex"` +} + +func (rr *DS) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.DigestType)) + + " " + strings.ToUpper(rr.Digest) +} + +// KX RR. See RFC 2230. +type KX struct { + Hdr RR_Header + Preference uint16 + Exchanger string `dns:"domain-name"` +} + +func (rr *KX) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + + " " + sprintName(rr.Exchanger) +} + +// TA RR. See http://www.watson.org/~weiler/INI1999-19.pdf. +type TA struct { + Hdr RR_Header + KeyTag uint16 + Algorithm uint8 + DigestType uint8 + Digest string `dns:"hex"` +} + +func (rr *TA) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.DigestType)) + + " " + strings.ToUpper(rr.Digest) +} + +// TALINK RR. See https://www.iana.org/assignments/dns-parameters/TALINK/talink-completed-template. +type TALINK struct { + Hdr RR_Header + PreviousName string `dns:"domain-name"` + NextName string `dns:"domain-name"` +} + +func (rr *TALINK) String() string { + return rr.Hdr.String() + + sprintName(rr.PreviousName) + " " + sprintName(rr.NextName) +} + +// SSHFP RR. See RFC RFC 4255. +type SSHFP struct { + Hdr RR_Header + Algorithm uint8 + Type uint8 + FingerPrint string `dns:"hex"` +} + +func (rr *SSHFP) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.Type)) + + " " + strings.ToUpper(rr.FingerPrint) +} + +// KEY RR. See RFC RFC 2535. +type KEY struct { + DNSKEY +} + +// CDNSKEY RR. See RFC 7344. +type CDNSKEY struct { + DNSKEY +} + +// DNSKEY RR. See RFC 4034 and RFC 3755. +type DNSKEY struct { + Hdr RR_Header + Flags uint16 + Protocol uint8 + Algorithm uint8 + PublicKey string `dns:"base64"` +} + +func (rr *DNSKEY) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Protocol)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + rr.PublicKey +} + +// RKEY RR. See https://www.iana.org/assignments/dns-parameters/RKEY/rkey-completed-template. +type RKEY struct { + Hdr RR_Header + Flags uint16 + Protocol uint8 + Algorithm uint8 + PublicKey string `dns:"base64"` +} + +func (rr *RKEY) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Protocol)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + rr.PublicKey +} + +// NSAPPTR RR. See RFC 1348. +type NSAPPTR struct { + Hdr RR_Header + Ptr string `dns:"domain-name"` +} + +func (rr *NSAPPTR) String() string { return rr.Hdr.String() + sprintName(rr.Ptr) } + +// NSEC3 RR. See RFC 5155. +type NSEC3 struct { + Hdr RR_Header + Hash uint8 + Flags uint8 + Iterations uint16 + SaltLength uint8 + Salt string `dns:"size-hex:SaltLength"` + HashLength uint8 + NextDomain string `dns:"size-base32:HashLength"` + TypeBitMap []uint16 `dns:"nsec"` +} + +func (rr *NSEC3) String() string { + s := rr.Hdr.String() + s += strconv.Itoa(int(rr.Hash)) + + " " + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Iterations)) + + " " + saltToString(rr.Salt) + + " " + rr.NextDomain + for i := 0; i < len(rr.TypeBitMap); i++ { + s += " " + Type(rr.TypeBitMap[i]).String() + } + return s +} + +func (rr *NSEC3) len() int { + l := rr.Hdr.len() + 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1 + lastwindow := uint32(2 ^ 32 + 1) + for _, t := range rr.TypeBitMap { + window := t / 256 + if uint32(window) != lastwindow { + l += 1 + 32 + } + lastwindow = uint32(window) + } + return l +} + +// NSEC3PARAM RR. See RFC 5155. +type NSEC3PARAM struct { + Hdr RR_Header + Hash uint8 + Flags uint8 + Iterations uint16 + SaltLength uint8 + Salt string `dns:"size-hex:SaltLength"` +} + +func (rr *NSEC3PARAM) String() string { + s := rr.Hdr.String() + s += strconv.Itoa(int(rr.Hash)) + + " " + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Iterations)) + + " " + saltToString(rr.Salt) + return s +} + +// TKEY RR. See RFC 2930. +type TKEY struct { + Hdr RR_Header + Algorithm string `dns:"domain-name"` + Inception uint32 + Expiration uint32 + Mode uint16 + Error uint16 + KeySize uint16 + Key string `dns:"size-hex:KeySize"` + OtherLen uint16 + OtherData string `dns:"size-hex:OtherLen"` +} + +// TKEY has no official presentation format, but this will suffice. +func (rr *TKEY) String() string { + s := "\n;; TKEY PSEUDOSECTION:\n" + s += rr.Hdr.String() + " " + rr.Algorithm + " " + + strconv.Itoa(int(rr.KeySize)) + " " + rr.Key + " " + + strconv.Itoa(int(rr.OtherLen)) + " " + rr.OtherData + return s +} + +// RFC3597 represents an unknown/generic RR. See RFC 3597. +type RFC3597 struct { + Hdr RR_Header + Rdata string `dns:"hex"` +} + +func (rr *RFC3597) String() string { + // Let's call it a hack + s := rfc3597Header(rr.Hdr) + + s += "\\# " + strconv.Itoa(len(rr.Rdata)/2) + " " + rr.Rdata + return s +} + +func rfc3597Header(h RR_Header) string { + var s string + + s += sprintName(h.Name) + "\t" + s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" + s += "CLASS" + strconv.Itoa(int(h.Class)) + "\t" + s += "TYPE" + strconv.Itoa(int(h.Rrtype)) + "\t" + return s +} + +// URI RR. See RFC 7553. +type URI struct { + Hdr RR_Header + Priority uint16 + Weight uint16 + Target string `dns:"octet"` +} + +func (rr *URI) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) + + " " + strconv.Itoa(int(rr.Weight)) + " " + sprintTxtOctet(rr.Target) +} + +// DHCID RR. See RFC 4701. +type DHCID struct { + Hdr RR_Header + Digest string `dns:"base64"` +} + +func (rr *DHCID) String() string { return rr.Hdr.String() + rr.Digest } + +// TLSA RR. See RFC 6698. +type TLSA struct { + Hdr RR_Header + Usage uint8 + Selector uint8 + MatchingType uint8 + Certificate string `dns:"hex"` +} + +func (rr *TLSA) String() string { + return rr.Hdr.String() + + strconv.Itoa(int(rr.Usage)) + + " " + strconv.Itoa(int(rr.Selector)) + + " " + strconv.Itoa(int(rr.MatchingType)) + + " " + rr.Certificate +} + +// SMIMEA RR. See RFC 8162. +type SMIMEA struct { + Hdr RR_Header + Usage uint8 + Selector uint8 + MatchingType uint8 + Certificate string `dns:"hex"` +} + +func (rr *SMIMEA) String() string { + s := rr.Hdr.String() + + strconv.Itoa(int(rr.Usage)) + + " " + strconv.Itoa(int(rr.Selector)) + + " " + strconv.Itoa(int(rr.MatchingType)) + + // Every Nth char needs a space on this output. If we output + // this as one giant line, we can't read it can in because in some cases + // the cert length overflows scan.maxTok (2048). + sx := splitN(rr.Certificate, 1024) // conservative value here + s += " " + strings.Join(sx, " ") + return s +} + +// HIP RR. See RFC 8005. +type HIP struct { + Hdr RR_Header + HitLength uint8 + PublicKeyAlgorithm uint8 + PublicKeyLength uint16 + Hit string `dns:"size-hex:HitLength"` + PublicKey string `dns:"size-base64:PublicKeyLength"` + RendezvousServers []string `dns:"domain-name"` +} + +func (rr *HIP) String() string { + s := rr.Hdr.String() + + strconv.Itoa(int(rr.PublicKeyAlgorithm)) + + " " + rr.Hit + + " " + rr.PublicKey + for _, d := range rr.RendezvousServers { + s += " " + sprintName(d) + } + return s +} + +// NINFO RR. See https://www.iana.org/assignments/dns-parameters/NINFO/ninfo-completed-template. +type NINFO struct { + Hdr RR_Header + ZSData []string `dns:"txt"` +} + +func (rr *NINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.ZSData) } + +// NID RR. See RFC RFC 6742. +type NID struct { + Hdr RR_Header + Preference uint16 + NodeID uint64 +} + +func (rr *NID) String() string { + s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + node := fmt.Sprintf("%0.16x", rr.NodeID) + s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] + return s +} + +// L32 RR, See RFC 6742. +type L32 struct { + Hdr RR_Header + Preference uint16 + Locator32 net.IP `dns:"a"` +} + +func (rr *L32) String() string { + if rr.Locator32 == nil { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + } + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + + " " + rr.Locator32.String() +} + +// L64 RR, See RFC 6742. +type L64 struct { + Hdr RR_Header + Preference uint16 + Locator64 uint64 +} + +func (rr *L64) String() string { + s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + node := fmt.Sprintf("%0.16X", rr.Locator64) + s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] + return s +} + +// LP RR. See RFC 6742. +type LP struct { + Hdr RR_Header + Preference uint16 + Fqdn string `dns:"domain-name"` +} + +func (rr *LP) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Fqdn) +} + +// EUI48 RR. See RFC 7043. +type EUI48 struct { + Hdr RR_Header + Address uint64 `dns:"uint48"` +} + +func (rr *EUI48) String() string { return rr.Hdr.String() + euiToString(rr.Address, 48) } + +// EUI64 RR. See RFC 7043. +type EUI64 struct { + Hdr RR_Header + Address uint64 +} + +func (rr *EUI64) String() string { return rr.Hdr.String() + euiToString(rr.Address, 64) } + +// CAA RR. See RFC 6844. +type CAA struct { + Hdr RR_Header + Flag uint8 + Tag string + Value string `dns:"octet"` +} + +func (rr *CAA) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Flag)) + " " + rr.Tag + " " + sprintTxtOctet(rr.Value) +} + +// UID RR. Deprecated, IANA-Reserved. +type UID struct { + Hdr RR_Header + Uid uint32 +} + +func (rr *UID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Uid), 10) } + +// GID RR. Deprecated, IANA-Reserved. +type GID struct { + Hdr RR_Header + Gid uint32 +} + +func (rr *GID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Gid), 10) } + +// UINFO RR. Deprecated, IANA-Reserved. +type UINFO struct { + Hdr RR_Header + Uinfo string +} + +func (rr *UINFO) String() string { return rr.Hdr.String() + sprintTxt([]string{rr.Uinfo}) } + +// EID RR. See http://ana-3.lcs.mit.edu/~jnc/nimrod/dns.txt. +type EID struct { + Hdr RR_Header + Endpoint string `dns:"hex"` +} + +func (rr *EID) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Endpoint) } + +// NIMLOC RR. See http://ana-3.lcs.mit.edu/~jnc/nimrod/dns.txt. +type NIMLOC struct { + Hdr RR_Header + Locator string `dns:"hex"` +} + +func (rr *NIMLOC) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Locator) } + +// OPENPGPKEY RR. See RFC 7929. +type OPENPGPKEY struct { + Hdr RR_Header + PublicKey string `dns:"base64"` +} + +func (rr *OPENPGPKEY) String() string { return rr.Hdr.String() + rr.PublicKey } + +// CSYNC RR. See RFC 7477. +type CSYNC struct { + Hdr RR_Header + Serial uint32 + Flags uint16 + TypeBitMap []uint16 `dns:"nsec"` +} + +func (rr *CSYNC) String() string { + s := rr.Hdr.String() + strconv.FormatInt(int64(rr.Serial), 10) + " " + strconv.Itoa(int(rr.Flags)) + + for i := 0; i < len(rr.TypeBitMap); i++ { + s += " " + Type(rr.TypeBitMap[i]).String() + } + return s +} + +func (rr *CSYNC) len() int { + l := rr.Hdr.len() + 4 + 2 + lastwindow := uint32(2 ^ 32 + 1) + for _, t := range rr.TypeBitMap { + window := t / 256 + if uint32(window) != lastwindow { + l += 1 + 32 + } + lastwindow = uint32(window) + } + return l +} + +// TimeToString translates the RRSIG's incep. and expir. times to the +// string representation used when printing the record. +// It takes serial arithmetic (RFC 1982) into account. +func TimeToString(t uint32) string { + mod := ((int64(t) - time.Now().Unix()) / year68) - 1 + if mod < 0 { + mod = 0 + } + ti := time.Unix(int64(t)-(mod*year68), 0).UTC() + return ti.Format("20060102150405") +} + +// StringToTime translates the RRSIG's incep. and expir. times from +// string values like "20110403154150" to an 32 bit integer. +// It takes serial arithmetic (RFC 1982) into account. +func StringToTime(s string) (uint32, error) { + t, err := time.Parse("20060102150405", s) + if err != nil { + return 0, err + } + mod := (t.Unix() / year68) - 1 + if mod < 0 { + mod = 0 + } + return uint32(t.Unix() - (mod * year68)), nil +} + +// saltToString converts a NSECX salt to uppercase and returns "-" when it is empty. +func saltToString(s string) string { + if len(s) == 0 { + return "-" + } + return strings.ToUpper(s) +} + +func euiToString(eui uint64, bits int) (hex string) { + switch bits { + case 64: + hex = fmt.Sprintf("%16.16x", eui) + hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + + "-" + hex[8:10] + "-" + hex[10:12] + "-" + hex[12:14] + "-" + hex[14:16] + case 48: + hex = fmt.Sprintf("%12.12x", eui) + hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + + "-" + hex[8:10] + "-" + hex[10:12] + } + return +} + +// copyIP returns a copy of ip. +func copyIP(ip net.IP) net.IP { + p := make(net.IP, len(ip)) + copy(p, ip) + return p +} + +// SplitN splits a string into N sized string chunks. +// This might become an exported function once. +func splitN(s string, n int) []string { + if len(s) < n { + return []string{s} + } + sx := []string{} + p, i := 0, n + for { + if i <= len(s) { + sx = append(sx, s[p:i]) + } else { + sx = append(sx, s[p:]) + break + + } + p, i = p+n, i+n + } + + return sx +} diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/miekg/dns/udp.go new file mode 100644 index 00000000000..a4826ee2ffd --- /dev/null +++ b/vendor/github.com/miekg/dns/udp.go @@ -0,0 +1,102 @@ +// +build !windows + +package dns + +import ( + "net" + + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +// This is the required size of the OOB buffer to pass to ReadMsgUDP. +var udpOOBSize = func() int { + // We can't know whether we'll get an IPv4 control message or an + // IPv6 control message ahead of time. To get around this, we size + // the buffer equal to the largest of the two. + + oob4 := ipv4.NewControlMessage(ipv4.FlagDst | ipv4.FlagInterface) + oob6 := ipv6.NewControlMessage(ipv6.FlagDst | ipv6.FlagInterface) + + if len(oob4) > len(oob6) { + return len(oob4) + } + + return len(oob6) +}() + +// SessionUDP holds the remote address and the associated +// out-of-band data. +type SessionUDP struct { + raddr *net.UDPAddr + context []byte +} + +// RemoteAddr returns the remote network address. +func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } + +// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a +// net.UDPAddr. +func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { + oob := make([]byte, udpOOBSize) + n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob) + if err != nil { + return n, nil, err + } + return n, &SessionUDP{raddr, oob[:oobn]}, err +} + +// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr. +func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { + oob := correctSource(session.context) + n, _, err := conn.WriteMsgUDP(b, oob, session.raddr) + return n, err +} + +func setUDPSocketOptions(conn *net.UDPConn) error { + // Try setting the flags for both families and ignore the errors unless they + // both error. + err6 := ipv6.NewPacketConn(conn).SetControlMessage(ipv6.FlagDst|ipv6.FlagInterface, true) + err4 := ipv4.NewPacketConn(conn).SetControlMessage(ipv4.FlagDst|ipv4.FlagInterface, true) + if err6 != nil && err4 != nil { + return err4 + } + return nil +} + +// parseDstFromOOB takes oob data and returns the destination IP. +func parseDstFromOOB(oob []byte) net.IP { + // Start with IPv6 and then fallback to IPv4 + // TODO(fastest963): Figure out a way to prefer one or the other. Looking at + // the lvl of the header for a 0 or 41 isn't cross-platform. + cm6 := new(ipv6.ControlMessage) + if cm6.Parse(oob) == nil && cm6.Dst != nil { + return cm6.Dst + } + cm4 := new(ipv4.ControlMessage) + if cm4.Parse(oob) == nil && cm4.Dst != nil { + return cm4.Dst + } + return nil +} + +// correctSource takes oob data and returns new oob data with the Src equal to the Dst +func correctSource(oob []byte) []byte { + dst := parseDstFromOOB(oob) + if dst == nil { + return nil + } + // If the dst is definitely an IPv6, then use ipv6's ControlMessage to + // respond otherwise use ipv4's because ipv6's marshal ignores ipv4 + // addresses. + if dst.To4() == nil { + cm := new(ipv6.ControlMessage) + cm.Src = dst + oob = cm.Marshal() + } else { + cm := new(ipv4.ControlMessage) + cm.Src = dst + oob = cm.Marshal() + } + return oob +} diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_windows.go new file mode 100644 index 00000000000..6778c3c6cfe --- /dev/null +++ b/vendor/github.com/miekg/dns/udp_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package dns + +import "net" + +// SessionUDP holds the remote address +type SessionUDP struct { + raddr *net.UDPAddr +} + +// RemoteAddr returns the remote network address. +func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } + +// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a +// net.UDPAddr. +// TODO(fastest963): Once go1.10 is released, use ReadMsgUDP. +func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { + n, raddr, err := conn.ReadFrom(b) + if err != nil { + return n, nil, err + } + session := &SessionUDP{raddr.(*net.UDPAddr)} + return n, session, err +} + +// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr. +// TODO(fastest963): Once go1.10 is released, use WriteMsgUDP. +func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { + n, err := conn.WriteTo(b, session.raddr) + return n, err +} + +// TODO(fastest963): Once go1.10 is released and we can use *MsgUDP methods +// use the standard method in udp.go for these. +func setUDPSocketOptions(*net.UDPConn) error { return nil } +func parseDstFromOOB([]byte, net.IP) net.IP { return nil } diff --git a/vendor/github.com/miekg/dns/update.go b/vendor/github.com/miekg/dns/update.go new file mode 100644 index 00000000000..e90c5c968ec --- /dev/null +++ b/vendor/github.com/miekg/dns/update.go @@ -0,0 +1,106 @@ +package dns + +// NameUsed sets the RRs in the prereq section to +// "Name is in use" RRs. RFC 2136 section 2.4.4. +func (u *Msg) NameUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}) + } +} + +// NameNotUsed sets the RRs in the prereq section to +// "Name is in not use" RRs. RFC 2136 section 2.4.5. +func (u *Msg) NameNotUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassNONE}}) + } +} + +// Used sets the RRs in the prereq section to +// "RRset exists (value dependent -- with rdata)" RRs. RFC 2136 section 2.4.2. +func (u *Msg) Used(rr []RR) { + if len(u.Question) == 0 { + panic("dns: empty question section") + } + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + r.Header().Class = u.Question[0].Qclass + u.Answer = append(u.Answer, r) + } +} + +// RRsetUsed sets the RRs in the prereq section to +// "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1. +func (u *Msg) RRsetUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}}) + } +} + +// RRsetNotUsed sets the RRs in the prereq section to +// "RRset does not exist" RRs. RFC 2136 section 2.4.3. +func (u *Msg) RRsetNotUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassNONE}}) + } +} + +// Insert creates a dynamic update packet that adds an complete RRset, see RFC 2136 section 2.5.1. +func (u *Msg) Insert(rr []RR) { + if len(u.Question) == 0 { + panic("dns: empty question section") + } + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + r.Header().Class = u.Question[0].Qclass + u.Ns = append(u.Ns, r) + } +} + +// RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2. +func (u *Msg) RemoveRRset(rr []RR) { + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}}) + } +} + +// RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3 +func (u *Msg) RemoveName(rr []RR) { + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}) + } +} + +// Remove creates a dynamic update packet deletes RR from a RRSset, see RFC 2136 section 2.5.4 +func (u *Msg) Remove(rr []RR) { + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + r.Header().Class = ClassNONE + r.Header().Ttl = 0 + u.Ns = append(u.Ns, r) + } +} diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go new file mode 100644 index 00000000000..dcc84e4a7dc --- /dev/null +++ b/vendor/github.com/miekg/dns/version.go @@ -0,0 +1,15 @@ +package dns + +import "fmt" + +// Version is current version of this library. +var Version = V{1, 0, 8} + +// V holds the version of this library. +type V struct { + Major, Minor, Patch int +} + +func (v V) String() string { + return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch) +} diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go new file mode 100644 index 00000000000..5d0ff5c8a27 --- /dev/null +++ b/vendor/github.com/miekg/dns/xfr.go @@ -0,0 +1,260 @@ +package dns + +import ( + "fmt" + "time" +) + +// Envelope is used when doing a zone transfer with a remote server. +type Envelope struct { + RR []RR // The set of RRs in the answer section of the xfr reply message. + Error error // If something went wrong, this contains the error. +} + +// A Transfer defines parameters that are used during a zone transfer. +type Transfer struct { + *Conn + DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds + ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds + WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds + TsigSecret map[string]string // Secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) + tsigTimersOnly bool +} + +// Think we need to away to stop the transfer + +// In performs an incoming transfer with the server in a. +// If you would like to set the source IP, or some other attribute +// of a Dialer for a Transfer, you can do so by specifying the attributes +// in the Transfer.Conn: +// +// d := net.Dialer{LocalAddr: transfer_source} +// con, err := d.Dial("tcp", master) +// dnscon := &dns.Conn{Conn:con} +// transfer = &dns.Transfer{Conn: dnscon} +// channel, err := transfer.In(message, master) +// +func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) { + timeout := dnsTimeout + if t.DialTimeout != 0 { + timeout = t.DialTimeout + } + if t.Conn == nil { + t.Conn, err = DialTimeout("tcp", a, timeout) + if err != nil { + return nil, err + } + } + if err := t.WriteMsg(q); err != nil { + return nil, err + } + env = make(chan *Envelope) + go func() { + if q.Question[0].Qtype == TypeAXFR { + go t.inAxfr(q, env) + return + } + if q.Question[0].Qtype == TypeIXFR { + go t.inIxfr(q, env) + return + } + }() + return env, nil +} + +func (t *Transfer) inAxfr(q *Msg, c chan *Envelope) { + first := true + defer t.Close() + defer close(c) + timeout := dnsTimeout + if t.ReadTimeout != 0 { + timeout = t.ReadTimeout + } + for { + t.Conn.SetReadDeadline(time.Now().Add(timeout)) + in, err := t.ReadMsg() + if err != nil { + c <- &Envelope{nil, err} + return + } + if q.Id != in.Id { + c <- &Envelope{in.Answer, ErrId} + return + } + if first { + if in.Rcode != RcodeSuccess { + c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}} + return + } + if !isSOAFirst(in) { + c <- &Envelope{in.Answer, ErrSoa} + return + } + first = !first + // only one answer that is SOA, receive more + if len(in.Answer) == 1 { + t.tsigTimersOnly = true + c <- &Envelope{in.Answer, nil} + continue + } + } + + if !first { + t.tsigTimersOnly = true // Subsequent envelopes use this. + if isSOALast(in) { + c <- &Envelope{in.Answer, nil} + return + } + c <- &Envelope{in.Answer, nil} + } + } +} + +func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) { + serial := uint32(0) // The first serial seen is the current server serial + axfr := true + n := 0 + qser := q.Ns[0].(*SOA).Serial + defer t.Close() + defer close(c) + timeout := dnsTimeout + if t.ReadTimeout != 0 { + timeout = t.ReadTimeout + } + for { + t.SetReadDeadline(time.Now().Add(timeout)) + in, err := t.ReadMsg() + if err != nil { + c <- &Envelope{nil, err} + return + } + if q.Id != in.Id { + c <- &Envelope{in.Answer, ErrId} + return + } + if in.Rcode != RcodeSuccess { + c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}} + return + } + if n == 0 { + // Check if the returned answer is ok + if !isSOAFirst(in) { + c <- &Envelope{in.Answer, ErrSoa} + return + } + // This serial is important + serial = in.Answer[0].(*SOA).Serial + // Check if there are no changes in zone + if qser >= serial { + c <- &Envelope{in.Answer, nil} + return + } + } + // Now we need to check each message for SOA records, to see what we need to do + t.tsigTimersOnly = true + for _, rr := range in.Answer { + if v, ok := rr.(*SOA); ok { + if v.Serial == serial { + n++ + // quit if it's a full axfr or the the servers' SOA is repeated the third time + if axfr && n == 2 || n == 3 { + c <- &Envelope{in.Answer, nil} + return + } + } else if axfr { + // it's an ixfr + axfr = false + } + } + } + c <- &Envelope{in.Answer, nil} + } +} + +// Out performs an outgoing transfer with the client connecting in w. +// Basic use pattern: +// +// ch := make(chan *dns.Envelope) +// tr := new(dns.Transfer) +// go tr.Out(w, r, ch) +// ch <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}} +// close(ch) +// w.Hijack() +// // w.Close() // Client closes connection +// +// The server is responsible for sending the correct sequence of RRs through the +// channel ch. +func (t *Transfer) Out(w ResponseWriter, q *Msg, ch chan *Envelope) error { + for x := range ch { + r := new(Msg) + // Compress? + r.SetReply(q) + r.Authoritative = true + // assume it fits TODO(miek): fix + r.Answer = append(r.Answer, x.RR...) + if err := w.WriteMsg(r); err != nil { + return err + } + } + w.TsigTimersOnly(true) + return nil +} + +// ReadMsg reads a message from the transfer connection t. +func (t *Transfer) ReadMsg() (*Msg, error) { + m := new(Msg) + p := make([]byte, MaxMsgSize) + n, err := t.Read(p) + if err != nil && n == 0 { + return nil, err + } + p = p[:n] + if err := m.Unpack(p); err != nil { + return nil, err + } + if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { + if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { + return m, ErrSecret + } + // Need to work on the original message p, as that was used to calculate the tsig. + err = TsigVerify(p, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) + t.tsigRequestMAC = ts.MAC + } + return m, err +} + +// WriteMsg writes a message through the transfer connection t. +func (t *Transfer) WriteMsg(m *Msg) (err error) { + var out []byte + if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { + if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { + return ErrSecret + } + out, t.tsigRequestMAC, err = TsigGenerate(m, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) + } else { + out, err = m.Pack() + } + if err != nil { + return err + } + if _, err = t.Write(out); err != nil { + return err + } + return nil +} + +func isSOAFirst(in *Msg) bool { + if len(in.Answer) > 0 { + return in.Answer[0].Header().Rrtype == TypeSOA + } + return false +} + +func isSOALast(in *Msg) bool { + if len(in.Answer) > 0 { + return in.Answer[len(in.Answer)-1].Header().Rrtype == TypeSOA + } + return false +} + +const errXFR = "bad xfr rcode: %d" diff --git a/vendor/github.com/miekg/dns/zcompress.go b/vendor/github.com/miekg/dns/zcompress.go new file mode 100644 index 00000000000..a2c09dd483b --- /dev/null +++ b/vendor/github.com/miekg/dns/zcompress.go @@ -0,0 +1,155 @@ +// Code generated by "go run compress_generate.go"; DO NOT EDIT. + +package dns + +func compressionLenHelperType(c map[string]int, r RR, initLen int) int { + currentLen := initLen + switch x := r.(type) { + case *AFSDB: + currentLen -= len(x.Hostname) + 1 + currentLen += compressionLenHelper(c, x.Hostname, currentLen) + case *CNAME: + currentLen -= len(x.Target) + 1 + currentLen += compressionLenHelper(c, x.Target, currentLen) + case *DNAME: + currentLen -= len(x.Target) + 1 + currentLen += compressionLenHelper(c, x.Target, currentLen) + case *HIP: + for i := range x.RendezvousServers { + currentLen -= len(x.RendezvousServers[i]) + 1 + } + for i := range x.RendezvousServers { + currentLen += compressionLenHelper(c, x.RendezvousServers[i], currentLen) + } + case *KX: + currentLen -= len(x.Exchanger) + 1 + currentLen += compressionLenHelper(c, x.Exchanger, currentLen) + case *LP: + currentLen -= len(x.Fqdn) + 1 + currentLen += compressionLenHelper(c, x.Fqdn, currentLen) + case *MB: + currentLen -= len(x.Mb) + 1 + currentLen += compressionLenHelper(c, x.Mb, currentLen) + case *MD: + currentLen -= len(x.Md) + 1 + currentLen += compressionLenHelper(c, x.Md, currentLen) + case *MF: + currentLen -= len(x.Mf) + 1 + currentLen += compressionLenHelper(c, x.Mf, currentLen) + case *MG: + currentLen -= len(x.Mg) + 1 + currentLen += compressionLenHelper(c, x.Mg, currentLen) + case *MINFO: + currentLen -= len(x.Rmail) + 1 + currentLen += compressionLenHelper(c, x.Rmail, currentLen) + currentLen -= len(x.Email) + 1 + currentLen += compressionLenHelper(c, x.Email, currentLen) + case *MR: + currentLen -= len(x.Mr) + 1 + currentLen += compressionLenHelper(c, x.Mr, currentLen) + case *MX: + currentLen -= len(x.Mx) + 1 + currentLen += compressionLenHelper(c, x.Mx, currentLen) + case *NAPTR: + currentLen -= len(x.Replacement) + 1 + currentLen += compressionLenHelper(c, x.Replacement, currentLen) + case *NS: + currentLen -= len(x.Ns) + 1 + currentLen += compressionLenHelper(c, x.Ns, currentLen) + case *NSAPPTR: + currentLen -= len(x.Ptr) + 1 + currentLen += compressionLenHelper(c, x.Ptr, currentLen) + case *NSEC: + currentLen -= len(x.NextDomain) + 1 + currentLen += compressionLenHelper(c, x.NextDomain, currentLen) + case *PTR: + currentLen -= len(x.Ptr) + 1 + currentLen += compressionLenHelper(c, x.Ptr, currentLen) + case *PX: + currentLen -= len(x.Map822) + 1 + currentLen += compressionLenHelper(c, x.Map822, currentLen) + currentLen -= len(x.Mapx400) + 1 + currentLen += compressionLenHelper(c, x.Mapx400, currentLen) + case *RP: + currentLen -= len(x.Mbox) + 1 + currentLen += compressionLenHelper(c, x.Mbox, currentLen) + currentLen -= len(x.Txt) + 1 + currentLen += compressionLenHelper(c, x.Txt, currentLen) + case *RRSIG: + currentLen -= len(x.SignerName) + 1 + currentLen += compressionLenHelper(c, x.SignerName, currentLen) + case *RT: + currentLen -= len(x.Host) + 1 + currentLen += compressionLenHelper(c, x.Host, currentLen) + case *SIG: + currentLen -= len(x.SignerName) + 1 + currentLen += compressionLenHelper(c, x.SignerName, currentLen) + case *SOA: + currentLen -= len(x.Ns) + 1 + currentLen += compressionLenHelper(c, x.Ns, currentLen) + currentLen -= len(x.Mbox) + 1 + currentLen += compressionLenHelper(c, x.Mbox, currentLen) + case *SRV: + currentLen -= len(x.Target) + 1 + currentLen += compressionLenHelper(c, x.Target, currentLen) + case *TALINK: + currentLen -= len(x.PreviousName) + 1 + currentLen += compressionLenHelper(c, x.PreviousName, currentLen) + currentLen -= len(x.NextName) + 1 + currentLen += compressionLenHelper(c, x.NextName, currentLen) + case *TKEY: + currentLen -= len(x.Algorithm) + 1 + currentLen += compressionLenHelper(c, x.Algorithm, currentLen) + case *TSIG: + currentLen -= len(x.Algorithm) + 1 + currentLen += compressionLenHelper(c, x.Algorithm, currentLen) + } + return currentLen - initLen +} + +func compressionLenSearchType(c map[string]int, r RR) (int, bool, int) { + switch x := r.(type) { + case *AFSDB: + k1, ok1, sz1 := compressionLenSearch(c, x.Hostname) + return k1, ok1, sz1 + case *CNAME: + k1, ok1, sz1 := compressionLenSearch(c, x.Target) + return k1, ok1, sz1 + case *MB: + k1, ok1, sz1 := compressionLenSearch(c, x.Mb) + return k1, ok1, sz1 + case *MD: + k1, ok1, sz1 := compressionLenSearch(c, x.Md) + return k1, ok1, sz1 + case *MF: + k1, ok1, sz1 := compressionLenSearch(c, x.Mf) + return k1, ok1, sz1 + case *MG: + k1, ok1, sz1 := compressionLenSearch(c, x.Mg) + return k1, ok1, sz1 + case *MINFO: + k1, ok1, sz1 := compressionLenSearch(c, x.Rmail) + k2, ok2, sz2 := compressionLenSearch(c, x.Email) + return k1 + k2, ok1 && ok2, sz1 + sz2 + case *MR: + k1, ok1, sz1 := compressionLenSearch(c, x.Mr) + return k1, ok1, sz1 + case *MX: + k1, ok1, sz1 := compressionLenSearch(c, x.Mx) + return k1, ok1, sz1 + case *NS: + k1, ok1, sz1 := compressionLenSearch(c, x.Ns) + return k1, ok1, sz1 + case *PTR: + k1, ok1, sz1 := compressionLenSearch(c, x.Ptr) + return k1, ok1, sz1 + case *RT: + k1, ok1, sz1 := compressionLenSearch(c, x.Host) + return k1, ok1, sz1 + case *SOA: + k1, ok1, sz1 := compressionLenSearch(c, x.Ns) + k2, ok2, sz2 := compressionLenSearch(c, x.Mbox) + return k1 + k2, ok1 && ok2, sz1 + sz2 + } + return 0, false, 0 +} diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go new file mode 100644 index 00000000000..0d1f6f4daad --- /dev/null +++ b/vendor/github.com/miekg/dns/zmsg.go @@ -0,0 +1,3615 @@ +// Code generated by "go run msg_generate.go"; DO NOT EDIT. + +package dns + +// pack*() functions + +func (rr *A) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packDataA(rr.A, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *AAAA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packDataAAAA(rr.AAAA, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *AFSDB) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Subtype, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Hostname, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *ANY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *AVC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringTxt(rr.Txt, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CAA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Flag, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Tag, msg, off) + if err != nil { + return off, err + } + off, err = packStringOctet(rr.Value, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CDNSKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CDS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CERT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Type, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.Certificate, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CNAME) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Target, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CSYNC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint32(rr.Serial, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packDataNsec(rr.TypeBitMap, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DHCID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringBase64(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DLV) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DNAME) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Target, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DNSKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *EID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringHex(rr.Endpoint, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *EUI48) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint48(rr.Address, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *EUI64) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint64(rr.Address, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *GID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint32(rr.Gid, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *GPOS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packString(rr.Longitude, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Latitude, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Altitude, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *HINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packString(rr.Cpu, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Os, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *HIP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.HitLength, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.PublicKeyAlgorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.PublicKeyLength, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Hit, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + off, err = packDataDomainNames(rr.RendezvousServers, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *KEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *KX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Exchanger, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *L32) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packDataA(rr.Locator32, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *L64) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packUint64(rr.Locator64, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *LOC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Version, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Size, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.HorizPre, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.VertPre, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Latitude, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Longitude, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Altitude, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *LP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Fqdn, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MB) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mb, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MD) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Md, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MF) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mf, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mg, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Rmail, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Email, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mr, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Mx, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NAPTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Order, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Service, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Regexp, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Replacement, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packUint64(rr.NodeID, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NIMLOC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringHex(rr.Locator, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringTxt(rr.ZSData, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Ns, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NSAPPTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Ptr, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NSEC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.NextDomain, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packDataNsec(rr.TypeBitMap, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NSEC3) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Hash, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Iterations, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.SaltLength, msg, off) + if err != nil { + return off, err + } + // Only pack salt if value is not "-", i.e. empty + if rr.Salt != "-" { + off, err = packStringHex(rr.Salt, msg, off) + if err != nil { + return off, err + } + } + off, err = packUint8(rr.HashLength, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase32(rr.NextDomain, msg, off) + if err != nil { + return off, err + } + off, err = packDataNsec(rr.TypeBitMap, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NSEC3PARAM) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Hash, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Iterations, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.SaltLength, msg, off) + if err != nil { + return off, err + } + // Only pack salt if value is not "-", i.e. empty + if rr.Salt != "-" { + off, err = packStringHex(rr.Salt, msg, off) + if err != nil { + return off, err + } + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *OPENPGPKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *OPT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packDataOpt(rr.Option, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *PTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Ptr, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *PX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Map822, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Mapx400, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RFC3597) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringHex(rr.Rdata, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mbox, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Txt, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RRSIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.TypeCovered, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Labels, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.OrigTtl, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.SignerName, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.Signature, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Host, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.TypeCovered, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Labels, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.OrigTtl, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.SignerName, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.Signature, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SMIMEA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Usage, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Selector, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.MatchingType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Certificate, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SOA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Ns, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Mbox, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packUint32(rr.Serial, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Refresh, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Retry, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expire, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Minttl, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SPF) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringTxt(rr.Txt, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SRV) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Priority, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Weight, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Port, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Target, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SSHFP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Type, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.FingerPrint, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TALINK) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.PreviousName, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.NextName, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Algorithm, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packUint32(rr.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Mode, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Error, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeySize, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Key, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.OtherLen, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.OtherData, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TLSA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Usage, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Selector, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.MatchingType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Certificate, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TSIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Algorithm, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packUint48(rr.TimeSigned, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Fudge, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.MACSize, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.MAC, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.OrigId, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Error, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.OtherLen, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.OtherData, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TXT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringTxt(rr.Txt, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *UID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint32(rr.Uid, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *UINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packString(rr.Uinfo, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *URI) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Priority, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Weight, msg, off) + if err != nil { + return off, err + } + off, err = packStringOctet(rr.Target, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *X25) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packString(rr.PSDNAddress, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +// unpack*() functions + +func unpackA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(A) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.A, off, err = unpackDataA(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackAAAA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(AAAA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.AAAA, off, err = unpackDataAAAA(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackAFSDB(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(AFSDB) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Subtype, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Hostname, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackANY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(ANY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + return rr, off, err +} + +func unpackAVC(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(AVC) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Txt, off, err = unpackStringTxt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCAA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CAA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flag, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Tag, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Value, off, err = unpackStringOctet(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCDNSKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CDNSKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCDS(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CDS) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCERT(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CERT) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Type, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Certificate, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCNAME(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CNAME) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCSYNC(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CSYNC) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Serial, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.TypeBitMap, off, err = unpackDataNsec(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDHCID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DHCID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Digest, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDLV(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DLV) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDNAME(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DNAME) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDNSKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DNSKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDS(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DS) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackEID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(EID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Endpoint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackEUI48(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(EUI48) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Address, off, err = unpackUint48(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackEUI64(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(EUI64) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Address, off, err = unpackUint64(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackGID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(GID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Gid, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackGPOS(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(GPOS) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Longitude, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Latitude, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Altitude, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackHINFO(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(HINFO) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Cpu, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Os, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackHIP(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(HIP) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.HitLength, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKeyAlgorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKeyLength, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Hit, off, err = unpackStringHex(msg, off, off+int(rr.HitLength)) + if err != nil { + return rr, off, err + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, off+int(rr.PublicKeyLength)) + if err != nil { + return rr, off, err + } + rr.RendezvousServers, off, err = unpackDataDomainNames(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(KEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackKX(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(KX) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Exchanger, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackL32(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(L32) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Locator32, off, err = unpackDataA(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackL64(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(L64) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Locator64, off, err = unpackUint64(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackLOC(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(LOC) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Version, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Size, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.HorizPre, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.VertPre, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Latitude, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Longitude, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Altitude, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackLP(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(LP) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Fqdn, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMB(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MB) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mb, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMD(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MD) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Md, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMF(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MF) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mf, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMG(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MG) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mg, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMINFO(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MINFO) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Rmail, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Email, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMR(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MR) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mr, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMX(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MX) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Mx, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNAPTR(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NAPTR) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Order, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Flags, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Service, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Regexp, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Replacement, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.NodeID, off, err = unpackUint64(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNIMLOC(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NIMLOC) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Locator, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNINFO(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NINFO) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.ZSData, off, err = unpackStringTxt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNS(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NS) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Ns, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNSAPPTR(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NSAPPTR) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Ptr, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNSEC(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NSEC) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.NextDomain, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.TypeBitMap, off, err = unpackDataNsec(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNSEC3(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NSEC3) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Hash, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Flags, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Iterations, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.SaltLength, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) + if err != nil { + return rr, off, err + } + rr.HashLength, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.NextDomain, off, err = unpackStringBase32(msg, off, off+int(rr.HashLength)) + if err != nil { + return rr, off, err + } + rr.TypeBitMap, off, err = unpackDataNsec(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNSEC3PARAM(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NSEC3PARAM) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Hash, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Flags, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Iterations, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.SaltLength, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackOPENPGPKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(OPENPGPKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackOPT(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(OPT) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Option, off, err = unpackDataOpt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackPTR(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(PTR) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Ptr, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackPX(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(PX) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Map822, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Mapx400, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRFC3597(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RFC3597) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Rdata, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRP(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RP) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mbox, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Txt, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRRSIG(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RRSIG) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.TypeCovered, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Labels, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OrigTtl, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Expiration, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Inception, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.SignerName, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRT(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RT) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Host, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSIG(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SIG) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.TypeCovered, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Labels, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OrigTtl, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Expiration, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Inception, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.SignerName, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSMIMEA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SMIMEA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Usage, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Selector, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.MatchingType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSOA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SOA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Ns, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Mbox, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Serial, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Refresh, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Retry, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Expire, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Minttl, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSPF(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SPF) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Txt, off, err = unpackStringTxt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSRV(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SRV) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Priority, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Weight, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Port, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSSHFP(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SSHFP) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Type, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.FingerPrint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTALINK(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TALINK) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.PreviousName, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.NextName, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Algorithm, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Inception, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Expiration, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Mode, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Error, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.KeySize, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Key, off, err = unpackStringHex(msg, off, off+int(rr.KeySize)) + if err != nil { + return rr, off, err + } + rr.OtherLen, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTLSA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TLSA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Usage, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Selector, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.MatchingType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTSIG(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TSIG) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Algorithm, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.TimeSigned, off, err = unpackUint48(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Fudge, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.MACSize, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.MAC, off, err = unpackStringHex(msg, off, off+int(rr.MACSize)) + if err != nil { + return rr, off, err + } + rr.OrigId, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Error, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OtherLen, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTXT(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TXT) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Txt, off, err = unpackStringTxt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackUID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(UID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Uid, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackUINFO(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(UINFO) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Uinfo, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackURI(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(URI) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Priority, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Weight, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Target, off, err = unpackStringOctet(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackX25(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(X25) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.PSDNAddress, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){ + TypeA: unpackA, + TypeAAAA: unpackAAAA, + TypeAFSDB: unpackAFSDB, + TypeANY: unpackANY, + TypeAVC: unpackAVC, + TypeCAA: unpackCAA, + TypeCDNSKEY: unpackCDNSKEY, + TypeCDS: unpackCDS, + TypeCERT: unpackCERT, + TypeCNAME: unpackCNAME, + TypeCSYNC: unpackCSYNC, + TypeDHCID: unpackDHCID, + TypeDLV: unpackDLV, + TypeDNAME: unpackDNAME, + TypeDNSKEY: unpackDNSKEY, + TypeDS: unpackDS, + TypeEID: unpackEID, + TypeEUI48: unpackEUI48, + TypeEUI64: unpackEUI64, + TypeGID: unpackGID, + TypeGPOS: unpackGPOS, + TypeHINFO: unpackHINFO, + TypeHIP: unpackHIP, + TypeKEY: unpackKEY, + TypeKX: unpackKX, + TypeL32: unpackL32, + TypeL64: unpackL64, + TypeLOC: unpackLOC, + TypeLP: unpackLP, + TypeMB: unpackMB, + TypeMD: unpackMD, + TypeMF: unpackMF, + TypeMG: unpackMG, + TypeMINFO: unpackMINFO, + TypeMR: unpackMR, + TypeMX: unpackMX, + TypeNAPTR: unpackNAPTR, + TypeNID: unpackNID, + TypeNIMLOC: unpackNIMLOC, + TypeNINFO: unpackNINFO, + TypeNS: unpackNS, + TypeNSAPPTR: unpackNSAPPTR, + TypeNSEC: unpackNSEC, + TypeNSEC3: unpackNSEC3, + TypeNSEC3PARAM: unpackNSEC3PARAM, + TypeOPENPGPKEY: unpackOPENPGPKEY, + TypeOPT: unpackOPT, + TypePTR: unpackPTR, + TypePX: unpackPX, + TypeRKEY: unpackRKEY, + TypeRP: unpackRP, + TypeRRSIG: unpackRRSIG, + TypeRT: unpackRT, + TypeSIG: unpackSIG, + TypeSMIMEA: unpackSMIMEA, + TypeSOA: unpackSOA, + TypeSPF: unpackSPF, + TypeSRV: unpackSRV, + TypeSSHFP: unpackSSHFP, + TypeTA: unpackTA, + TypeTALINK: unpackTALINK, + TypeTKEY: unpackTKEY, + TypeTLSA: unpackTLSA, + TypeTSIG: unpackTSIG, + TypeTXT: unpackTXT, + TypeUID: unpackUID, + TypeUINFO: unpackUINFO, + TypeURI: unpackURI, + TypeX25: unpackX25, +} diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go new file mode 100644 index 00000000000..965753b11b2 --- /dev/null +++ b/vendor/github.com/miekg/dns/ztypes.go @@ -0,0 +1,863 @@ +// Code generated by "go run types_generate.go"; DO NOT EDIT. + +package dns + +import ( + "encoding/base64" + "net" +) + +// TypeToRR is a map of constructors for each RR type. +var TypeToRR = map[uint16]func() RR{ + TypeA: func() RR { return new(A) }, + TypeAAAA: func() RR { return new(AAAA) }, + TypeAFSDB: func() RR { return new(AFSDB) }, + TypeANY: func() RR { return new(ANY) }, + TypeAVC: func() RR { return new(AVC) }, + TypeCAA: func() RR { return new(CAA) }, + TypeCDNSKEY: func() RR { return new(CDNSKEY) }, + TypeCDS: func() RR { return new(CDS) }, + TypeCERT: func() RR { return new(CERT) }, + TypeCNAME: func() RR { return new(CNAME) }, + TypeCSYNC: func() RR { return new(CSYNC) }, + TypeDHCID: func() RR { return new(DHCID) }, + TypeDLV: func() RR { return new(DLV) }, + TypeDNAME: func() RR { return new(DNAME) }, + TypeDNSKEY: func() RR { return new(DNSKEY) }, + TypeDS: func() RR { return new(DS) }, + TypeEID: func() RR { return new(EID) }, + TypeEUI48: func() RR { return new(EUI48) }, + TypeEUI64: func() RR { return new(EUI64) }, + TypeGID: func() RR { return new(GID) }, + TypeGPOS: func() RR { return new(GPOS) }, + TypeHINFO: func() RR { return new(HINFO) }, + TypeHIP: func() RR { return new(HIP) }, + TypeKEY: func() RR { return new(KEY) }, + TypeKX: func() RR { return new(KX) }, + TypeL32: func() RR { return new(L32) }, + TypeL64: func() RR { return new(L64) }, + TypeLOC: func() RR { return new(LOC) }, + TypeLP: func() RR { return new(LP) }, + TypeMB: func() RR { return new(MB) }, + TypeMD: func() RR { return new(MD) }, + TypeMF: func() RR { return new(MF) }, + TypeMG: func() RR { return new(MG) }, + TypeMINFO: func() RR { return new(MINFO) }, + TypeMR: func() RR { return new(MR) }, + TypeMX: func() RR { return new(MX) }, + TypeNAPTR: func() RR { return new(NAPTR) }, + TypeNID: func() RR { return new(NID) }, + TypeNIMLOC: func() RR { return new(NIMLOC) }, + TypeNINFO: func() RR { return new(NINFO) }, + TypeNS: func() RR { return new(NS) }, + TypeNSAPPTR: func() RR { return new(NSAPPTR) }, + TypeNSEC: func() RR { return new(NSEC) }, + TypeNSEC3: func() RR { return new(NSEC3) }, + TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) }, + TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) }, + TypeOPT: func() RR { return new(OPT) }, + TypePTR: func() RR { return new(PTR) }, + TypePX: func() RR { return new(PX) }, + TypeRKEY: func() RR { return new(RKEY) }, + TypeRP: func() RR { return new(RP) }, + TypeRRSIG: func() RR { return new(RRSIG) }, + TypeRT: func() RR { return new(RT) }, + TypeSIG: func() RR { return new(SIG) }, + TypeSMIMEA: func() RR { return new(SMIMEA) }, + TypeSOA: func() RR { return new(SOA) }, + TypeSPF: func() RR { return new(SPF) }, + TypeSRV: func() RR { return new(SRV) }, + TypeSSHFP: func() RR { return new(SSHFP) }, + TypeTA: func() RR { return new(TA) }, + TypeTALINK: func() RR { return new(TALINK) }, + TypeTKEY: func() RR { return new(TKEY) }, + TypeTLSA: func() RR { return new(TLSA) }, + TypeTSIG: func() RR { return new(TSIG) }, + TypeTXT: func() RR { return new(TXT) }, + TypeUID: func() RR { return new(UID) }, + TypeUINFO: func() RR { return new(UINFO) }, + TypeURI: func() RR { return new(URI) }, + TypeX25: func() RR { return new(X25) }, +} + +// TypeToString is a map of strings for each RR type. +var TypeToString = map[uint16]string{ + TypeA: "A", + TypeAAAA: "AAAA", + TypeAFSDB: "AFSDB", + TypeANY: "ANY", + TypeATMA: "ATMA", + TypeAVC: "AVC", + TypeAXFR: "AXFR", + TypeCAA: "CAA", + TypeCDNSKEY: "CDNSKEY", + TypeCDS: "CDS", + TypeCERT: "CERT", + TypeCNAME: "CNAME", + TypeCSYNC: "CSYNC", + TypeDHCID: "DHCID", + TypeDLV: "DLV", + TypeDNAME: "DNAME", + TypeDNSKEY: "DNSKEY", + TypeDS: "DS", + TypeEID: "EID", + TypeEUI48: "EUI48", + TypeEUI64: "EUI64", + TypeGID: "GID", + TypeGPOS: "GPOS", + TypeHINFO: "HINFO", + TypeHIP: "HIP", + TypeISDN: "ISDN", + TypeIXFR: "IXFR", + TypeKEY: "KEY", + TypeKX: "KX", + TypeL32: "L32", + TypeL64: "L64", + TypeLOC: "LOC", + TypeLP: "LP", + TypeMAILA: "MAILA", + TypeMAILB: "MAILB", + TypeMB: "MB", + TypeMD: "MD", + TypeMF: "MF", + TypeMG: "MG", + TypeMINFO: "MINFO", + TypeMR: "MR", + TypeMX: "MX", + TypeNAPTR: "NAPTR", + TypeNID: "NID", + TypeNIMLOC: "NIMLOC", + TypeNINFO: "NINFO", + TypeNS: "NS", + TypeNSEC: "NSEC", + TypeNSEC3: "NSEC3", + TypeNSEC3PARAM: "NSEC3PARAM", + TypeNULL: "NULL", + TypeNXT: "NXT", + TypeNone: "None", + TypeOPENPGPKEY: "OPENPGPKEY", + TypeOPT: "OPT", + TypePTR: "PTR", + TypePX: "PX", + TypeRKEY: "RKEY", + TypeRP: "RP", + TypeRRSIG: "RRSIG", + TypeRT: "RT", + TypeReserved: "Reserved", + TypeSIG: "SIG", + TypeSMIMEA: "SMIMEA", + TypeSOA: "SOA", + TypeSPF: "SPF", + TypeSRV: "SRV", + TypeSSHFP: "SSHFP", + TypeTA: "TA", + TypeTALINK: "TALINK", + TypeTKEY: "TKEY", + TypeTLSA: "TLSA", + TypeTSIG: "TSIG", + TypeTXT: "TXT", + TypeUID: "UID", + TypeUINFO: "UINFO", + TypeUNSPEC: "UNSPEC", + TypeURI: "URI", + TypeX25: "X25", + TypeNSAPPTR: "NSAP-PTR", +} + +func (rr *A) Header() *RR_Header { return &rr.Hdr } +func (rr *AAAA) Header() *RR_Header { return &rr.Hdr } +func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr } +func (rr *ANY) Header() *RR_Header { return &rr.Hdr } +func (rr *AVC) Header() *RR_Header { return &rr.Hdr } +func (rr *CAA) Header() *RR_Header { return &rr.Hdr } +func (rr *CDNSKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *CDS) Header() *RR_Header { return &rr.Hdr } +func (rr *CERT) Header() *RR_Header { return &rr.Hdr } +func (rr *CNAME) Header() *RR_Header { return &rr.Hdr } +func (rr *CSYNC) Header() *RR_Header { return &rr.Hdr } +func (rr *DHCID) Header() *RR_Header { return &rr.Hdr } +func (rr *DLV) Header() *RR_Header { return &rr.Hdr } +func (rr *DNAME) Header() *RR_Header { return &rr.Hdr } +func (rr *DNSKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *DS) Header() *RR_Header { return &rr.Hdr } +func (rr *EID) Header() *RR_Header { return &rr.Hdr } +func (rr *EUI48) Header() *RR_Header { return &rr.Hdr } +func (rr *EUI64) Header() *RR_Header { return &rr.Hdr } +func (rr *GID) Header() *RR_Header { return &rr.Hdr } +func (rr *GPOS) Header() *RR_Header { return &rr.Hdr } +func (rr *HINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *HIP) Header() *RR_Header { return &rr.Hdr } +func (rr *KEY) Header() *RR_Header { return &rr.Hdr } +func (rr *KX) Header() *RR_Header { return &rr.Hdr } +func (rr *L32) Header() *RR_Header { return &rr.Hdr } +func (rr *L64) Header() *RR_Header { return &rr.Hdr } +func (rr *LOC) Header() *RR_Header { return &rr.Hdr } +func (rr *LP) Header() *RR_Header { return &rr.Hdr } +func (rr *MB) Header() *RR_Header { return &rr.Hdr } +func (rr *MD) Header() *RR_Header { return &rr.Hdr } +func (rr *MF) Header() *RR_Header { return &rr.Hdr } +func (rr *MG) Header() *RR_Header { return &rr.Hdr } +func (rr *MINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *MR) Header() *RR_Header { return &rr.Hdr } +func (rr *MX) Header() *RR_Header { return &rr.Hdr } +func (rr *NAPTR) Header() *RR_Header { return &rr.Hdr } +func (rr *NID) Header() *RR_Header { return &rr.Hdr } +func (rr *NIMLOC) Header() *RR_Header { return &rr.Hdr } +func (rr *NINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *NS) Header() *RR_Header { return &rr.Hdr } +func (rr *NSAPPTR) Header() *RR_Header { return &rr.Hdr } +func (rr *NSEC) Header() *RR_Header { return &rr.Hdr } +func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr } +func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr } +func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *OPT) Header() *RR_Header { return &rr.Hdr } +func (rr *PTR) Header() *RR_Header { return &rr.Hdr } +func (rr *PX) Header() *RR_Header { return &rr.Hdr } +func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr } +func (rr *RKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *RP) Header() *RR_Header { return &rr.Hdr } +func (rr *RRSIG) Header() *RR_Header { return &rr.Hdr } +func (rr *RT) Header() *RR_Header { return &rr.Hdr } +func (rr *SIG) Header() *RR_Header { return &rr.Hdr } +func (rr *SMIMEA) Header() *RR_Header { return &rr.Hdr } +func (rr *SOA) Header() *RR_Header { return &rr.Hdr } +func (rr *SPF) Header() *RR_Header { return &rr.Hdr } +func (rr *SRV) Header() *RR_Header { return &rr.Hdr } +func (rr *SSHFP) Header() *RR_Header { return &rr.Hdr } +func (rr *TA) Header() *RR_Header { return &rr.Hdr } +func (rr *TALINK) Header() *RR_Header { return &rr.Hdr } +func (rr *TKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *TLSA) Header() *RR_Header { return &rr.Hdr } +func (rr *TSIG) Header() *RR_Header { return &rr.Hdr } +func (rr *TXT) Header() *RR_Header { return &rr.Hdr } +func (rr *UID) Header() *RR_Header { return &rr.Hdr } +func (rr *UINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *URI) Header() *RR_Header { return &rr.Hdr } +func (rr *X25) Header() *RR_Header { return &rr.Hdr } + +// len() functions +func (rr *A) len() int { + l := rr.Hdr.len() + l += net.IPv4len // A + return l +} +func (rr *AAAA) len() int { + l := rr.Hdr.len() + l += net.IPv6len // AAAA + return l +} +func (rr *AFSDB) len() int { + l := rr.Hdr.len() + l += 2 // Subtype + l += len(rr.Hostname) + 1 + return l +} +func (rr *ANY) len() int { + l := rr.Hdr.len() + return l +} +func (rr *AVC) len() int { + l := rr.Hdr.len() + for _, x := range rr.Txt { + l += len(x) + 1 + } + return l +} +func (rr *CAA) len() int { + l := rr.Hdr.len() + l++ // Flag + l += len(rr.Tag) + 1 + l += len(rr.Value) + return l +} +func (rr *CERT) len() int { + l := rr.Hdr.len() + l += 2 // Type + l += 2 // KeyTag + l++ // Algorithm + l += base64.StdEncoding.DecodedLen(len(rr.Certificate)) + return l +} +func (rr *CNAME) len() int { + l := rr.Hdr.len() + l += len(rr.Target) + 1 + return l +} +func (rr *DHCID) len() int { + l := rr.Hdr.len() + l += base64.StdEncoding.DecodedLen(len(rr.Digest)) + return l +} +func (rr *DNAME) len() int { + l := rr.Hdr.len() + l += len(rr.Target) + 1 + return l +} +func (rr *DNSKEY) len() int { + l := rr.Hdr.len() + l += 2 // Flags + l++ // Protocol + l++ // Algorithm + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} +func (rr *DS) len() int { + l := rr.Hdr.len() + l += 2 // KeyTag + l++ // Algorithm + l++ // DigestType + l += len(rr.Digest)/2 + 1 + return l +} +func (rr *EID) len() int { + l := rr.Hdr.len() + l += len(rr.Endpoint)/2 + 1 + return l +} +func (rr *EUI48) len() int { + l := rr.Hdr.len() + l += 6 // Address + return l +} +func (rr *EUI64) len() int { + l := rr.Hdr.len() + l += 8 // Address + return l +} +func (rr *GID) len() int { + l := rr.Hdr.len() + l += 4 // Gid + return l +} +func (rr *GPOS) len() int { + l := rr.Hdr.len() + l += len(rr.Longitude) + 1 + l += len(rr.Latitude) + 1 + l += len(rr.Altitude) + 1 + return l +} +func (rr *HINFO) len() int { + l := rr.Hdr.len() + l += len(rr.Cpu) + 1 + l += len(rr.Os) + 1 + return l +} +func (rr *HIP) len() int { + l := rr.Hdr.len() + l++ // HitLength + l++ // PublicKeyAlgorithm + l += 2 // PublicKeyLength + l += len(rr.Hit) / 2 + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + for _, x := range rr.RendezvousServers { + l += len(x) + 1 + } + return l +} +func (rr *KX) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Exchanger) + 1 + return l +} +func (rr *L32) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += net.IPv4len // Locator32 + return l +} +func (rr *L64) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += 8 // Locator64 + return l +} +func (rr *LOC) len() int { + l := rr.Hdr.len() + l++ // Version + l++ // Size + l++ // HorizPre + l++ // VertPre + l += 4 // Latitude + l += 4 // Longitude + l += 4 // Altitude + return l +} +func (rr *LP) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Fqdn) + 1 + return l +} +func (rr *MB) len() int { + l := rr.Hdr.len() + l += len(rr.Mb) + 1 + return l +} +func (rr *MD) len() int { + l := rr.Hdr.len() + l += len(rr.Md) + 1 + return l +} +func (rr *MF) len() int { + l := rr.Hdr.len() + l += len(rr.Mf) + 1 + return l +} +func (rr *MG) len() int { + l := rr.Hdr.len() + l += len(rr.Mg) + 1 + return l +} +func (rr *MINFO) len() int { + l := rr.Hdr.len() + l += len(rr.Rmail) + 1 + l += len(rr.Email) + 1 + return l +} +func (rr *MR) len() int { + l := rr.Hdr.len() + l += len(rr.Mr) + 1 + return l +} +func (rr *MX) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Mx) + 1 + return l +} +func (rr *NAPTR) len() int { + l := rr.Hdr.len() + l += 2 // Order + l += 2 // Preference + l += len(rr.Flags) + 1 + l += len(rr.Service) + 1 + l += len(rr.Regexp) + 1 + l += len(rr.Replacement) + 1 + return l +} +func (rr *NID) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += 8 // NodeID + return l +} +func (rr *NIMLOC) len() int { + l := rr.Hdr.len() + l += len(rr.Locator)/2 + 1 + return l +} +func (rr *NINFO) len() int { + l := rr.Hdr.len() + for _, x := range rr.ZSData { + l += len(x) + 1 + } + return l +} +func (rr *NS) len() int { + l := rr.Hdr.len() + l += len(rr.Ns) + 1 + return l +} +func (rr *NSAPPTR) len() int { + l := rr.Hdr.len() + l += len(rr.Ptr) + 1 + return l +} +func (rr *NSEC3PARAM) len() int { + l := rr.Hdr.len() + l++ // Hash + l++ // Flags + l += 2 // Iterations + l++ // SaltLength + l += len(rr.Salt) / 2 + return l +} +func (rr *OPENPGPKEY) len() int { + l := rr.Hdr.len() + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} +func (rr *PTR) len() int { + l := rr.Hdr.len() + l += len(rr.Ptr) + 1 + return l +} +func (rr *PX) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Map822) + 1 + l += len(rr.Mapx400) + 1 + return l +} +func (rr *RFC3597) len() int { + l := rr.Hdr.len() + l += len(rr.Rdata)/2 + 1 + return l +} +func (rr *RKEY) len() int { + l := rr.Hdr.len() + l += 2 // Flags + l++ // Protocol + l++ // Algorithm + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} +func (rr *RP) len() int { + l := rr.Hdr.len() + l += len(rr.Mbox) + 1 + l += len(rr.Txt) + 1 + return l +} +func (rr *RRSIG) len() int { + l := rr.Hdr.len() + l += 2 // TypeCovered + l++ // Algorithm + l++ // Labels + l += 4 // OrigTtl + l += 4 // Expiration + l += 4 // Inception + l += 2 // KeyTag + l += len(rr.SignerName) + 1 + l += base64.StdEncoding.DecodedLen(len(rr.Signature)) + return l +} +func (rr *RT) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Host) + 1 + return l +} +func (rr *SMIMEA) len() int { + l := rr.Hdr.len() + l++ // Usage + l++ // Selector + l++ // MatchingType + l += len(rr.Certificate)/2 + 1 + return l +} +func (rr *SOA) len() int { + l := rr.Hdr.len() + l += len(rr.Ns) + 1 + l += len(rr.Mbox) + 1 + l += 4 // Serial + l += 4 // Refresh + l += 4 // Retry + l += 4 // Expire + l += 4 // Minttl + return l +} +func (rr *SPF) len() int { + l := rr.Hdr.len() + for _, x := range rr.Txt { + l += len(x) + 1 + } + return l +} +func (rr *SRV) len() int { + l := rr.Hdr.len() + l += 2 // Priority + l += 2 // Weight + l += 2 // Port + l += len(rr.Target) + 1 + return l +} +func (rr *SSHFP) len() int { + l := rr.Hdr.len() + l++ // Algorithm + l++ // Type + l += len(rr.FingerPrint)/2 + 1 + return l +} +func (rr *TA) len() int { + l := rr.Hdr.len() + l += 2 // KeyTag + l++ // Algorithm + l++ // DigestType + l += len(rr.Digest)/2 + 1 + return l +} +func (rr *TALINK) len() int { + l := rr.Hdr.len() + l += len(rr.PreviousName) + 1 + l += len(rr.NextName) + 1 + return l +} +func (rr *TKEY) len() int { + l := rr.Hdr.len() + l += len(rr.Algorithm) + 1 + l += 4 // Inception + l += 4 // Expiration + l += 2 // Mode + l += 2 // Error + l += 2 // KeySize + l += len(rr.Key) / 2 + l += 2 // OtherLen + l += len(rr.OtherData) / 2 + return l +} +func (rr *TLSA) len() int { + l := rr.Hdr.len() + l++ // Usage + l++ // Selector + l++ // MatchingType + l += len(rr.Certificate)/2 + 1 + return l +} +func (rr *TSIG) len() int { + l := rr.Hdr.len() + l += len(rr.Algorithm) + 1 + l += 6 // TimeSigned + l += 2 // Fudge + l += 2 // MACSize + l += len(rr.MAC) / 2 + l += 2 // OrigId + l += 2 // Error + l += 2 // OtherLen + l += len(rr.OtherData) / 2 + return l +} +func (rr *TXT) len() int { + l := rr.Hdr.len() + for _, x := range rr.Txt { + l += len(x) + 1 + } + return l +} +func (rr *UID) len() int { + l := rr.Hdr.len() + l += 4 // Uid + return l +} +func (rr *UINFO) len() int { + l := rr.Hdr.len() + l += len(rr.Uinfo) + 1 + return l +} +func (rr *URI) len() int { + l := rr.Hdr.len() + l += 2 // Priority + l += 2 // Weight + l += len(rr.Target) + return l +} +func (rr *X25) len() int { + l := rr.Hdr.len() + l += len(rr.PSDNAddress) + 1 + return l +} + +// copy() functions +func (rr *A) copy() RR { + return &A{rr.Hdr, copyIP(rr.A)} +} +func (rr *AAAA) copy() RR { + return &AAAA{rr.Hdr, copyIP(rr.AAAA)} +} +func (rr *AFSDB) copy() RR { + return &AFSDB{rr.Hdr, rr.Subtype, rr.Hostname} +} +func (rr *ANY) copy() RR { + return &ANY{rr.Hdr} +} +func (rr *AVC) copy() RR { + Txt := make([]string, len(rr.Txt)) + copy(Txt, rr.Txt) + return &AVC{rr.Hdr, Txt} +} +func (rr *CAA) copy() RR { + return &CAA{rr.Hdr, rr.Flag, rr.Tag, rr.Value} +} +func (rr *CERT) copy() RR { + return &CERT{rr.Hdr, rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate} +} +func (rr *CNAME) copy() RR { + return &CNAME{rr.Hdr, rr.Target} +} +func (rr *CSYNC) copy() RR { + TypeBitMap := make([]uint16, len(rr.TypeBitMap)) + copy(TypeBitMap, rr.TypeBitMap) + return &CSYNC{rr.Hdr, rr.Serial, rr.Flags, TypeBitMap} +} +func (rr *DHCID) copy() RR { + return &DHCID{rr.Hdr, rr.Digest} +} +func (rr *DNAME) copy() RR { + return &DNAME{rr.Hdr, rr.Target} +} +func (rr *DNSKEY) copy() RR { + return &DNSKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} +} +func (rr *DS) copy() RR { + return &DS{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} +} +func (rr *EID) copy() RR { + return &EID{rr.Hdr, rr.Endpoint} +} +func (rr *EUI48) copy() RR { + return &EUI48{rr.Hdr, rr.Address} +} +func (rr *EUI64) copy() RR { + return &EUI64{rr.Hdr, rr.Address} +} +func (rr *GID) copy() RR { + return &GID{rr.Hdr, rr.Gid} +} +func (rr *GPOS) copy() RR { + return &GPOS{rr.Hdr, rr.Longitude, rr.Latitude, rr.Altitude} +} +func (rr *HINFO) copy() RR { + return &HINFO{rr.Hdr, rr.Cpu, rr.Os} +} +func (rr *HIP) copy() RR { + RendezvousServers := make([]string, len(rr.RendezvousServers)) + copy(RendezvousServers, rr.RendezvousServers) + return &HIP{rr.Hdr, rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers} +} +func (rr *KX) copy() RR { + return &KX{rr.Hdr, rr.Preference, rr.Exchanger} +} +func (rr *L32) copy() RR { + return &L32{rr.Hdr, rr.Preference, copyIP(rr.Locator32)} +} +func (rr *L64) copy() RR { + return &L64{rr.Hdr, rr.Preference, rr.Locator64} +} +func (rr *LOC) copy() RR { + return &LOC{rr.Hdr, rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude} +} +func (rr *LP) copy() RR { + return &LP{rr.Hdr, rr.Preference, rr.Fqdn} +} +func (rr *MB) copy() RR { + return &MB{rr.Hdr, rr.Mb} +} +func (rr *MD) copy() RR { + return &MD{rr.Hdr, rr.Md} +} +func (rr *MF) copy() RR { + return &MF{rr.Hdr, rr.Mf} +} +func (rr *MG) copy() RR { + return &MG{rr.Hdr, rr.Mg} +} +func (rr *MINFO) copy() RR { + return &MINFO{rr.Hdr, rr.Rmail, rr.Email} +} +func (rr *MR) copy() RR { + return &MR{rr.Hdr, rr.Mr} +} +func (rr *MX) copy() RR { + return &MX{rr.Hdr, rr.Preference, rr.Mx} +} +func (rr *NAPTR) copy() RR { + return &NAPTR{rr.Hdr, rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement} +} +func (rr *NID) copy() RR { + return &NID{rr.Hdr, rr.Preference, rr.NodeID} +} +func (rr *NIMLOC) copy() RR { + return &NIMLOC{rr.Hdr, rr.Locator} +} +func (rr *NINFO) copy() RR { + ZSData := make([]string, len(rr.ZSData)) + copy(ZSData, rr.ZSData) + return &NINFO{rr.Hdr, ZSData} +} +func (rr *NS) copy() RR { + return &NS{rr.Hdr, rr.Ns} +} +func (rr *NSAPPTR) copy() RR { + return &NSAPPTR{rr.Hdr, rr.Ptr} +} +func (rr *NSEC) copy() RR { + TypeBitMap := make([]uint16, len(rr.TypeBitMap)) + copy(TypeBitMap, rr.TypeBitMap) + return &NSEC{rr.Hdr, rr.NextDomain, TypeBitMap} +} +func (rr *NSEC3) copy() RR { + TypeBitMap := make([]uint16, len(rr.TypeBitMap)) + copy(TypeBitMap, rr.TypeBitMap) + return &NSEC3{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap} +} +func (rr *NSEC3PARAM) copy() RR { + return &NSEC3PARAM{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt} +} +func (rr *OPENPGPKEY) copy() RR { + return &OPENPGPKEY{rr.Hdr, rr.PublicKey} +} +func (rr *OPT) copy() RR { + Option := make([]EDNS0, len(rr.Option)) + copy(Option, rr.Option) + return &OPT{rr.Hdr, Option} +} +func (rr *PTR) copy() RR { + return &PTR{rr.Hdr, rr.Ptr} +} +func (rr *PX) copy() RR { + return &PX{rr.Hdr, rr.Preference, rr.Map822, rr.Mapx400} +} +func (rr *RFC3597) copy() RR { + return &RFC3597{rr.Hdr, rr.Rdata} +} +func (rr *RKEY) copy() RR { + return &RKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} +} +func (rr *RP) copy() RR { + return &RP{rr.Hdr, rr.Mbox, rr.Txt} +} +func (rr *RRSIG) copy() RR { + return &RRSIG{rr.Hdr, rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature} +} +func (rr *RT) copy() RR { + return &RT{rr.Hdr, rr.Preference, rr.Host} +} +func (rr *SMIMEA) copy() RR { + return &SMIMEA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} +} +func (rr *SOA) copy() RR { + return &SOA{rr.Hdr, rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl} +} +func (rr *SPF) copy() RR { + Txt := make([]string, len(rr.Txt)) + copy(Txt, rr.Txt) + return &SPF{rr.Hdr, Txt} +} +func (rr *SRV) copy() RR { + return &SRV{rr.Hdr, rr.Priority, rr.Weight, rr.Port, rr.Target} +} +func (rr *SSHFP) copy() RR { + return &SSHFP{rr.Hdr, rr.Algorithm, rr.Type, rr.FingerPrint} +} +func (rr *TA) copy() RR { + return &TA{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} +} +func (rr *TALINK) copy() RR { + return &TALINK{rr.Hdr, rr.PreviousName, rr.NextName} +} +func (rr *TKEY) copy() RR { + return &TKEY{rr.Hdr, rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData} +} +func (rr *TLSA) copy() RR { + return &TLSA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} +} +func (rr *TSIG) copy() RR { + return &TSIG{rr.Hdr, rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData} +} +func (rr *TXT) copy() RR { + Txt := make([]string, len(rr.Txt)) + copy(Txt, rr.Txt) + return &TXT{rr.Hdr, Txt} +} +func (rr *UID) copy() RR { + return &UID{rr.Hdr, rr.Uid} +} +func (rr *UINFO) copy() RR { + return &UINFO{rr.Hdr, rr.Uinfo} +} +func (rr *URI) copy() RR { + return &URI{rr.Hdr, rr.Priority, rr.Weight, rr.Target} +} +func (rr *X25) copy() RR { + return &X25{rr.Hdr, rr.PSDNAddress} +} diff --git a/vendor/github.com/openshift/cluster-api-provider-equinix-metal/LICENSE b/vendor/github.com/openshift/cluster-api-provider-equinix-metal/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-provider-equinix-metal/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/addtoscheme_equinix_metal_v1beta1.go b/vendor/github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/addtoscheme_equinix_metal_v1beta1.go new file mode 100644 index 00000000000..c92cb9d3576 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/addtoscheme_equinix_metal_v1beta1.go @@ -0,0 +1,10 @@ +package apis + +import ( + "github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/equinixmetal/v1beta1" +) + +func init() { + // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back + AddToSchemes = append(AddToSchemes, v1beta1.SchemeBuilder.AddToScheme) +} diff --git a/vendor/github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/apis.go b/vendor/github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/apis.go new file mode 100644 index 00000000000..5d960dfa06b --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/apis.go @@ -0,0 +1,17 @@ +// Generate deepcopy for apis +//go:generate go run ../../vendor/sigs.k8s.io/controller-tools/cmd/controller-gen paths=./... object:headerFile=../../hack/boilerplate/boilerplate.go.txt,year=2020 + +// Package apis contains Kubernetes API groups. +package apis + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// AddToSchemes may be used to add all resources defined in the project to a Scheme +var AddToSchemes runtime.SchemeBuilder + +// AddToScheme adds all Resources to the Scheme +func AddToScheme(s *runtime.Scheme) error { + return AddToSchemes.AddToScheme(s) +} diff --git a/vendor/github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/equinixmetal/v1beta1/doc.go b/vendor/github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/equinixmetal/v1beta1/doc.go new file mode 100644 index 00000000000..dcaf779b0d5 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/equinixmetal/v1beta1/doc.go @@ -0,0 +1,7 @@ +// Package v1beta1 contains API Schema definitions for the packet v1beta1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/equinixmetal +// +k8s:defaulter-gen=TypeMeta +// +groupName=equinixmetal.machine.openshift.io +package v1beta1 diff --git a/vendor/github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/equinixmetal/v1beta1/equinixmetalmachineproviderconfig_types.go b/vendor/github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/equinixmetal/v1beta1/equinixmetalmachineproviderconfig_types.go new file mode 100644 index 00000000000..6cd6137053b --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/equinixmetal/v1beta1/equinixmetalmachineproviderconfig_types.go @@ -0,0 +1,38 @@ +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EquinixMetalMachineProviderConfig is the Shema for the equinixmetalmachineproviderconfigs API. +// +k8s:openapi-gen=true +type EquinixMetalMachineProviderConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // UserDataSecret contains a local reference to a secret that contains the + // UserData to apply to the instance + UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"` + + // CredentialsSecret is a reference to the secret with EquinixMetal credentials. + CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret,omitempty"` + + MachineType string `json:"machineType"` + Facility string `json:"facility"` + ProjectID string `json:"projectID,omitempty"` + BillingCycle string `json:"billingCycle"` + OS string `json:"os"` + CustomData string `json:"customData,omitempty"` + IPXEScriptURL string `json:"ipxeScriptURL,omitempty"` + Tags []string `json:"tags,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +func init() { + SchemeBuilder.Register(&EquinixMetalMachineProviderConfig{}) +} diff --git a/vendor/github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/equinixmetal/v1beta1/equinixmetalmachineproviderstatus_types.go b/vendor/github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/equinixmetal/v1beta1/equinixmetalmachineproviderstatus_types.go new file mode 100644 index 00000000000..fd1d1fd4d24 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/equinixmetal/v1beta1/equinixmetalmachineproviderstatus_types.go @@ -0,0 +1,65 @@ +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EquinixMetalMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. +// It contains EquinixMetal-specific status information. +// +k8s:openapi-gen=true +type EquinixMetalMachineProviderStatus struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // InstanceID is the ID of the instance in EquinixMetal + // +optional + InstanceID *string `json:"instanceId,omitempty"` + + // InstanceState is the provisioning state of the EquinixMetal Instance. + // +optional + InstanceState *string `json:"instanceState,omitempty"` + + // Conditions is a set of conditions associated with the Machine to indicate + // errors or other status + Conditions []EquinixMetalMachineProviderCondition `json:"conditions,omitempty"` +} + +// EquinixMetalMachineProviderConditionType is a valid value for EquinixMetalMachineProviderCondition.Type. +type EquinixMetalMachineProviderConditionType string + +// Valid conditions for an EquinixMetal machine instance. +const ( + // MachineCreated indicates whether the machine has been created or not. If not, + // it should include a reason and message for the failure. + MachineCreated EquinixMetalMachineProviderConditionType = "MachineCreated" +) + +// EquinixMetalMachineProviderCondition is a condition in a EquinixMetalMachineProviderStatus. +type EquinixMetalMachineProviderCondition struct { + // Type is the type of the condition. + Type EquinixMetalMachineProviderConditionType `json:"type"` + // Status is the status of the condition. + Status corev1.ConditionStatus `json:"status"` + // LastProbeTime is the last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +func init() { + SchemeBuilder.Register(&EquinixMetalMachineProviderStatus{}) +} diff --git a/vendor/github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/equinixmetal/v1beta1/register.go b/vendor/github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/equinixmetal/v1beta1/register.go new file mode 100644 index 00000000000..137d5920f40 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/equinixmetal/v1beta1/register.go @@ -0,0 +1,96 @@ +// Package v1beta1 contains API Schema definitions for the equinix-metal v1beta1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/equinixmetal +// +k8s:defaulter-gen=TypeMeta +// +groupName=equinixmetal.machine.openshift.io +package v1beta1 + +import ( + "encoding/json" + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/scheme" + "sigs.k8s.io/yaml" +) + +var ( + // SchemeGroupVersion is group version used to register these objects. + SchemeGroupVersion = schema.GroupVersion{Group: "equinixmetal.machine.openshift.io", Version: "v1beta1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} +) + +// RawExtensionFromProviderSpec marshals the machine provider spec. +func RawExtensionFromProviderSpec(spec *EquinixMetalMachineProviderConfig) (*runtime.RawExtension, error) { + if spec == nil { + return &runtime.RawExtension{}, nil + } + + var rawBytes []byte + + var err error + + if rawBytes, err = json.Marshal(spec); err != nil { + return nil, fmt.Errorf("error marshalling providerSpec: %v", err) + } + + return &runtime.RawExtension{ + Raw: rawBytes, + }, nil +} + +// RawExtensionFromProviderStatus marshals the provider status +func RawExtensionFromProviderStatus(status *EquinixMetalMachineProviderStatus) (*runtime.RawExtension, error) { + if status == nil { + return &runtime.RawExtension{}, nil + } + + var rawBytes []byte + + var err error + + if rawBytes, err = json.Marshal(status); err != nil { + return nil, fmt.Errorf("error marshalling providerStatus: %v", err) + } + + return &runtime.RawExtension{ + Raw: rawBytes, + }, nil +} + +// ProviderSpecFromRawExtension unmarshals the JSON-encoded spec +func ProviderSpecFromRawExtension(rawExtension *runtime.RawExtension) (*EquinixMetalMachineProviderConfig, error) { + if rawExtension == nil { + return &EquinixMetalMachineProviderConfig{}, nil + } + + spec := new(EquinixMetalMachineProviderConfig) + if err := yaml.Unmarshal(rawExtension.Raw, &spec); err != nil { + return nil, fmt.Errorf("error unmarshalling providerSpec: %v", err) + } + + klog.V(5).Infof("Got provider spec from raw extension: %+v", spec) + + return spec, nil +} + +// ProviderStatusFromRawExtension unmarshals a raw extension into a EquinixMetalMachineProviderStatus type +func ProviderStatusFromRawExtension(rawExtension *runtime.RawExtension) (*EquinixMetalMachineProviderStatus, error) { + if rawExtension == nil { + return &EquinixMetalMachineProviderStatus{}, nil + } + + providerStatus := new(EquinixMetalMachineProviderStatus) + if err := yaml.Unmarshal(rawExtension.Raw, providerStatus); err != nil { + return nil, fmt.Errorf("error unmarshalling providerStatus: %v", err) + } + + klog.V(5).Infof("Got provider Status from raw extension: %+v", providerStatus) + + return providerStatus, nil +} diff --git a/vendor/github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/equinixmetal/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/equinixmetal/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..dff6e519f30 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-provider-equinix-metal/pkg/apis/equinixmetal/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,125 @@ +// +build !ignore_autogenerated + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EquinixMetalMachineProviderCondition) DeepCopyInto(out *EquinixMetalMachineProviderCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EquinixMetalMachineProviderCondition. +func (in *EquinixMetalMachineProviderCondition) DeepCopy() *EquinixMetalMachineProviderCondition { + if in == nil { + return nil + } + out := new(EquinixMetalMachineProviderCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EquinixMetalMachineProviderConfig) DeepCopyInto(out *EquinixMetalMachineProviderConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.UserDataSecret != nil { + in, out := &in.UserDataSecret, &out.UserDataSecret + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.CredentialsSecret != nil { + in, out := &in.CredentialsSecret, &out.CredentialsSecret + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EquinixMetalMachineProviderConfig. +func (in *EquinixMetalMachineProviderConfig) DeepCopy() *EquinixMetalMachineProviderConfig { + if in == nil { + return nil + } + out := new(EquinixMetalMachineProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EquinixMetalMachineProviderConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EquinixMetalMachineProviderStatus) DeepCopyInto(out *EquinixMetalMachineProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceState != nil { + in, out := &in.InstanceState, &out.InstanceState + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]EquinixMetalMachineProviderCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EquinixMetalMachineProviderStatus. +func (in *EquinixMetalMachineProviderStatus) DeepCopy() *EquinixMetalMachineProviderStatus { + if in == nil { + return nil + } + out := new(EquinixMetalMachineProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EquinixMetalMachineProviderStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/gcpmachineproviderconfig_types.go b/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/gcpmachineproviderconfig_types.go index 6e402364a62..04aa60f95e0 100644 --- a/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/gcpmachineproviderconfig_types.go +++ b/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/gcpmachineproviderconfig_types.go @@ -35,6 +35,9 @@ type GCPMachineProviderSpec struct { Region string `json:"region"` Zone string `json:"zone"` ProjectID string `json:"projectID,omitempty"` + + // Preemptible indicates if created instance is preemptible + Preemptible bool `json:"preemptible,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -45,12 +48,13 @@ func init() { // GCPDisk describes disks for GCP. type GCPDisk struct { - AutoDelete bool `json:"autoDelete"` - Boot bool `json:"boot"` - SizeGb int64 `json:"sizeGb"` - Type string `json:"type"` - Image string `json:"image"` - Labels map[string]string `json:"labels"` + AutoDelete bool `json:"autoDelete"` + Boot bool `json:"boot"` + SizeGb int64 `json:"sizeGb"` + Type string `json:"type"` + Image string `json:"image"` + Labels map[string]string `json:"labels"` + EncryptionKey *GCPEncryptionKeyReference `json:"encryptionKey,omitempty"` } // GCPMetadata describes metadata for GCP. @@ -63,6 +67,7 @@ type GCPMetadata struct { type GCPNetworkInterface struct { PublicIP bool `json:"publicIP,omitempty"` Network string `json:"network,omitempty"` + ProjectID string `json:"projectID,omitempty"` Subnetwork string `json:"subnetwork,omitempty"` } @@ -71,3 +76,31 @@ type GCPServiceAccount struct { Email string `json:"email"` Scopes []string `json:"scopes"` } + +// GCPEncryptionKeyReference describes the encryptionKey to use for a disk's encryption. +type GCPEncryptionKeyReference struct { + KMSKey *GCPKMSKeyReference `json:"kmsKey,omitempty"` + + // KMSKeyServiceAccount is the service account being used for the + // encryption request for the given KMS key. If absent, the Compute + // Engine default service account is used. + // See https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account + // for details on the default service account. + KMSKeyServiceAccount string `json:"kmsKeyServiceAccount,omitempty"` +} + +// GCPKMSKeyReference gathers required fields for looking up a GCP KMS Key +type GCPKMSKeyReference struct { + // Name is the name of the customer managed encryption key to be used for the disk encryption. + Name string `json:"name"` + + // KeyRing is the name of the KMS Key Ring which the KMS Key belongs to. + KeyRing string `json:"keyRing"` + + // ProjectID is the ID of the Project in which the KMS Key Ring exists. + // Defaults to the VM ProjectID if not set. + ProjectID string `json:"projectID,omitempty"` + + // Location is the GCP location in which the Key Ring exists. + Location string `json:"location"` +} diff --git a/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/register.go b/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/register.go index 8ce9dc5a147..3400d917a22 100644 --- a/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/register.go +++ b/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/register.go @@ -12,7 +12,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/klog" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/scheme" "sigs.k8s.io/yaml" ) diff --git a/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/zz_generated.deepcopy.go index 08c506c2a33..9f3d6d44387 100644 --- a/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/zz_generated.deepcopy.go @@ -35,6 +35,11 @@ func (in *GCPDisk) DeepCopyInto(out *GCPDisk) { (*out)[key] = val } } + if in.EncryptionKey != nil { + in, out := &in.EncryptionKey, &out.EncryptionKey + *out = new(GCPEncryptionKeyReference) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPDisk. @@ -47,6 +52,41 @@ func (in *GCPDisk) DeepCopy() *GCPDisk { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPEncryptionKeyReference) DeepCopyInto(out *GCPEncryptionKeyReference) { + *out = *in + if in.KMSKey != nil { + in, out := &in.KMSKey, &out.KMSKey + *out = new(GCPKMSKeyReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPEncryptionKeyReference. +func (in *GCPEncryptionKeyReference) DeepCopy() *GCPEncryptionKeyReference { + if in == nil { + return nil + } + out := new(GCPEncryptionKeyReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPKMSKeyReference) DeepCopyInto(out *GCPKMSKeyReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPKMSKeyReference. +func (in *GCPKMSKeyReference) DeepCopy() *GCPKMSKeyReference { + if in == nil { + return nil + } + out := new(GCPKMSKeyReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GCPMachineProviderCondition) DeepCopyInto(out *GCPMachineProviderCondition) { *out = *in diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/register.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/register.go index b986c43b329..1b710aafa26 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/register.go +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/register.go @@ -1,3 +1,8 @@ +// Generate deepcopy for apis +//go:generate go run ../../../vendor/sigs.k8s.io/controller-tools/cmd/controller-gen paths=./... object:headerFile=../../../hack/boilerplate.go.txt,year=2019 +// Ensure generated code is goimports compliant +//go:generate goimports -w ./v1beta1/zz_generated.deepcopy.go + package machine const GroupName = "machine.openshift.io" diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machine_webhook.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machine_webhook.go new file mode 100644 index 00000000000..9afc4e6e01e --- /dev/null +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machine_webhook.go @@ -0,0 +1,1101 @@ +package v1beta1 + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strings" + + osconfigv1 "github.com/openshift/api/config/v1" + osclientset "github.com/openshift/client-go/config/clientset/versioned" + gcp "github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1" + "github.com/openshift/machine-api-operator/pkg/apis/machine" + vsphere "github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/klog/v2" + "k8s.io/utils/pointer" + aws "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1" + azure "sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + yaml "sigs.k8s.io/yaml" +) + +var ( + // Azure Defaults + defaultAzureVnet = func(clusterID string) string { + return fmt.Sprintf("%s-vnet", clusterID) + } + defaultAzureSubnet = func(clusterID string) string { + return fmt.Sprintf("%s-worker-subnet", clusterID) + } + defaultAzureNetworkResourceGroup = func(clusterID string) string { + return fmt.Sprintf("%s-rg", clusterID) + } + defaultAzureImageResourceID = func(clusterID string) string { + return fmt.Sprintf("/resourceGroups/%s/providers/Microsoft.Compute/images/%s", clusterID+"-rg", clusterID) + } + defaultAzureManagedIdentiy = func(clusterID string) string { + return fmt.Sprintf("%s-identity", clusterID) + } + defaultAzureResourceGroup = func(clusterID string) string { + return fmt.Sprintf("%s-rg", clusterID) + } + + // GCP Defaults + defaultGCPNetwork = func(clusterID string) string { + return fmt.Sprintf("%s-network", clusterID) + } + defaultGCPSubnetwork = func(clusterID string) string { + return fmt.Sprintf("%s-worker-subnet", clusterID) + } + defaultGCPTags = func(clusterID string) []string { + return []string{fmt.Sprintf("%s-worker", clusterID)} + } +) + +const ( + DefaultMachineMutatingHookPath = "/mutate-machine-openshift-io-v1beta1-machine" + DefaultMachineValidatingHookPath = "/validate-machine-openshift-io-v1beta1-machine" + DefaultMachineSetMutatingHookPath = "/mutate-machine-openshift-io-v1beta1-machineset" + DefaultMachineSetValidatingHookPath = "/validate-machine-openshift-io-v1beta1-machineset" + + defaultWebhookConfigurationName = "machine-api" + defaultWebhookServiceName = "machine-api-operator-webhook" + defaultWebhookServiceNamespace = "openshift-machine-api" + defaultWebhookServicePort = 443 + + defaultUserDataSecret = "worker-user-data" + defaultSecretNamespace = "openshift-machine-api" + + // AWS Defaults + defaultAWSCredentialsSecret = "aws-cloud-credentials" + defaultAWSInstanceType = "m4.large" + + // Azure Defaults + defaultAzureVMSize = "Standard_D4s_V3" + defaultAzureCredentialsSecret = "azure-cloud-credentials" + defaultAzureOSDiskOSType = "Linux" + defaultAzureOSDiskStorageType = "Premium_LRS" + azureMaxDiskSizeGB = 32768 + + // GCP Defaults + defaultGCPMachineType = "n1-standard-4" + defaultGCPCredentialsSecret = "gcp-cloud-credentials" + defaultGCPDiskSizeGb = 128 + defaultGCPDiskType = "pd-standard" + // https://releases-art-rhcos.svc.ci.openshift.org/art/storage/releases/rhcos-4.6/46.82.202007212240-0/x86_64/meta.json + // https://github.com/openshift/installer/pull/3808 + // https://github.com/openshift/installer/blob/d75bf7ad98124b901ae7e22b5595e0392ed6ea3c/data/data/rhcos.json + defaultGCPDiskImage = "projects/rhcos-cloud/global/images/rhcos-46-82-202007212240-0-gcp-x86-64" + + // vSphere Defaults + defaultVSphereCredentialsSecret = "vsphere-cloud-credentials" + // Minimum vSphere values taken from vSphere reconciler + minVSphereCPU = 2 + minVSphereMemoryMiB = 2048 + // https://docs.openshift.com/container-platform/4.1/installing/installing_vsphere/installing-vsphere.html#minimum-resource-requirements_installing-vsphere + minVSphereDiskGiB = 120 +) + +var ( + // webhookFailurePolicy is ignore so we don't want to block machine lifecycle on the webhook operational aspects. + // This would be particularly problematic for chicken egg issues when bootstrapping a cluster. + webhookFailurePolicy = admissionregistrationv1.Ignore + webhookSideEffects = admissionregistrationv1.SideEffectClassNone +) + +func getInfra() (*osconfigv1.Infrastructure, error) { + cfg, err := ctrl.GetConfig() + if err != nil { + return nil, err + } + client, err := osclientset.NewForConfig(cfg) + if err != nil { + return nil, err + } + infra, err := client.ConfigV1().Infrastructures().Get(context.Background(), "cluster", metav1.GetOptions{}) + if err != nil { + return nil, err + } + return infra, nil +} + +func getDNS() (*osconfigv1.DNS, error) { + cfg, err := ctrl.GetConfig() + if err != nil { + return nil, err + } + client, err := osclientset.NewForConfig(cfg) + if err != nil { + return nil, err + } + dns, err := client.ConfigV1().DNSes().Get(context.Background(), "cluster", metav1.GetOptions{}) + if err != nil { + return nil, err + } + + return dns, nil +} + +type machineAdmissionFn func(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) + +type admissionConfig struct { + clusterID string + dnsDisconnected bool +} + +type admissionHandler struct { + *admissionConfig + webhookOperations machineAdmissionFn + decoder *admission.Decoder +} + +// InjectDecoder injects the decoder. +func (a *admissionHandler) InjectDecoder(d *admission.Decoder) error { + a.decoder = d + return nil +} + +// machineValidatorHandler validates Machine API resources. +// implements type Handler interface. +// https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/webhook/admission#Handler +type machineValidatorHandler struct { + *admissionHandler +} + +// machineDefaulterHandler defaults Machine API resources. +// implements type Handler interface. +// https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/webhook/admission#Handler +type machineDefaulterHandler struct { + *admissionHandler +} + +// NewValidator returns a new machineValidatorHandler. +func NewMachineValidator() (*machineValidatorHandler, error) { + infra, err := getInfra() + if err != nil { + return nil, err + } + + dns, err := getDNS() + if err != nil { + return nil, err + } + + return createMachineValidator(infra, dns), nil +} + +func createMachineValidator(infra *osconfigv1.Infrastructure, dns *osconfigv1.DNS) *machineValidatorHandler { + admissionConfig := &admissionConfig{ + dnsDisconnected: dns.Spec.PublicZone == nil, + clusterID: infra.Status.InfrastructureName, + } + return &machineValidatorHandler{ + admissionHandler: &admissionHandler{ + admissionConfig: admissionConfig, + webhookOperations: getMachineValidatorOperation(infra.Status.PlatformStatus.Type), + }, + } +} + +func getMachineValidatorOperation(platform osconfigv1.PlatformType) machineAdmissionFn { + switch platform { + case osconfigv1.AWSPlatformType: + return validateAWS + case osconfigv1.AzurePlatformType: + return validateAzure + case osconfigv1.GCPPlatformType: + return validateGCP + case osconfigv1.VSpherePlatformType: + return validateVSphere + default: + // just no-op + return func(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { + return true, []string{}, nil + } + } +} + +// NewDefaulter returns a new machineDefaulterHandler. +func NewMachineDefaulter() (*machineDefaulterHandler, error) { + infra, err := getInfra() + if err != nil { + return nil, err + } + + return createMachineDefaulter(infra.Status.PlatformStatus, infra.Status.InfrastructureName), nil +} + +func createMachineDefaulter(platformStatus *osconfigv1.PlatformStatus, clusterID string) *machineDefaulterHandler { + return &machineDefaulterHandler{ + admissionHandler: &admissionHandler{ + admissionConfig: &admissionConfig{clusterID: clusterID}, + webhookOperations: getMachineDefaulterOperation(platformStatus), + }, + } +} + +func getMachineDefaulterOperation(platformStatus *osconfigv1.PlatformStatus) machineAdmissionFn { + switch platformStatus.Type { + case osconfigv1.AWSPlatformType: + region := "" + if platformStatus.AWS != nil { + region = platformStatus.AWS.Region + } + return awsDefaulter{region: region}.defaultAWS + case osconfigv1.AzurePlatformType: + return defaultAzure + case osconfigv1.GCPPlatformType: + return defaultGCP + case osconfigv1.VSpherePlatformType: + return defaultVSphere + default: + // just no-op + return func(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { + return true, []string{}, nil + } + } +} + +// NewValidatingWebhookConfiguration creates a validation webhook configuration with configured Machine and MachineSet webhooks +func NewValidatingWebhookConfiguration() *admissionregistrationv1.ValidatingWebhookConfiguration { + validatingWebhookConfiguration := &admissionregistrationv1.ValidatingWebhookConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: defaultWebhookConfigurationName, + Annotations: map[string]string{ + "service.beta.openshift.io/inject-cabundle": "true", + }, + }, + Webhooks: []admissionregistrationv1.ValidatingWebhook{ + MachineValidatingWebhook(), + MachineSetValidatingWebhook(), + }, + } + + // Setting group version is required for testEnv to create unstructured objects, as the new structure sets it on empty strings + // Usual way to populate those values, is to create the resource in the cluster first, which we can't yet do. + validatingWebhookConfiguration.SetGroupVersionKind(admissionregistrationv1.SchemeGroupVersion.WithKind("ValidatingWebhookConfiguration")) + return validatingWebhookConfiguration +} + +// MachineValidatingWebhook returns validating webhooks for machine to populate the configuration +func MachineValidatingWebhook() admissionregistrationv1.ValidatingWebhook { + serviceReference := admissionregistrationv1.ServiceReference{ + Namespace: defaultWebhookServiceNamespace, + Name: defaultWebhookServiceName, + Path: pointer.StringPtr(DefaultMachineValidatingHookPath), + Port: pointer.Int32Ptr(defaultWebhookServicePort), + } + return admissionregistrationv1.ValidatingWebhook{ + AdmissionReviewVersions: []string{"v1beta1"}, + Name: "validation.machine.machine.openshift.io", + FailurePolicy: &webhookFailurePolicy, + SideEffects: &webhookSideEffects, + ClientConfig: admissionregistrationv1.WebhookClientConfig{ + Service: &serviceReference, + }, + Rules: []admissionregistrationv1.RuleWithOperations{ + { + Rule: admissionregistrationv1.Rule{ + APIGroups: []string{machine.GroupName}, + APIVersions: []string{SchemeGroupVersion.Version}, + Resources: []string{"machines"}, + }, + Operations: []admissionregistrationv1.OperationType{ + admissionregistrationv1.Create, + admissionregistrationv1.Update, + }, + }, + }, + } +} + +// MachineSetValidatingWebhook returns validating webhooks for machineSet to populate the configuration +func MachineSetValidatingWebhook() admissionregistrationv1.ValidatingWebhook { + machinesetServiceReference := admissionregistrationv1.ServiceReference{ + Namespace: defaultWebhookServiceNamespace, + Name: defaultWebhookServiceName, + Path: pointer.StringPtr(DefaultMachineSetValidatingHookPath), + Port: pointer.Int32Ptr(defaultWebhookServicePort), + } + return admissionregistrationv1.ValidatingWebhook{ + AdmissionReviewVersions: []string{"v1beta1"}, + Name: "validation.machineset.machine.openshift.io", + FailurePolicy: &webhookFailurePolicy, + SideEffects: &webhookSideEffects, + ClientConfig: admissionregistrationv1.WebhookClientConfig{ + Service: &machinesetServiceReference, + }, + Rules: []admissionregistrationv1.RuleWithOperations{ + { + Rule: admissionregistrationv1.Rule{ + APIGroups: []string{machine.GroupName}, + APIVersions: []string{SchemeGroupVersion.Version}, + Resources: []string{"machinesets"}, + }, + Operations: []admissionregistrationv1.OperationType{ + admissionregistrationv1.Create, + admissionregistrationv1.Update, + }, + }, + }, + } +} + +// NewMutatingWebhookConfiguration creates a mutating webhook configuration with configured Machine and MachineSet webhooks +func NewMutatingWebhookConfiguration() *admissionregistrationv1.MutatingWebhookConfiguration { + mutatingWebhookConfiguration := &admissionregistrationv1.MutatingWebhookConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: defaultWebhookConfigurationName, + Annotations: map[string]string{ + "service.beta.openshift.io/inject-cabundle": "true", + }, + }, + Webhooks: []admissionregistrationv1.MutatingWebhook{ + MachineMutatingWebhook(), + MachineSetMutatingWebhook(), + }, + } + + // Setting group version is required for testEnv to create unstructured objects, as the new structure sets it on empty strings + // Usual way to populate those values, is to create the resource in the cluster first, which we can't yet do. + mutatingWebhookConfiguration.SetGroupVersionKind(admissionregistrationv1.SchemeGroupVersion.WithKind("MutatingWebhookConfiguration")) + return mutatingWebhookConfiguration +} + +// MachineMutatingWebhook returns mutating webhooks for machine to apply in configuration +func MachineMutatingWebhook() admissionregistrationv1.MutatingWebhook { + machineServiceReference := admissionregistrationv1.ServiceReference{ + Namespace: defaultWebhookServiceNamespace, + Name: defaultWebhookServiceName, + Path: pointer.StringPtr(DefaultMachineMutatingHookPath), + Port: pointer.Int32Ptr(defaultWebhookServicePort), + } + return admissionregistrationv1.MutatingWebhook{ + AdmissionReviewVersions: []string{"v1beta1"}, + Name: "default.machine.machine.openshift.io", + FailurePolicy: &webhookFailurePolicy, + SideEffects: &webhookSideEffects, + ClientConfig: admissionregistrationv1.WebhookClientConfig{ + Service: &machineServiceReference, + }, + Rules: []admissionregistrationv1.RuleWithOperations{ + { + Rule: admissionregistrationv1.Rule{ + APIGroups: []string{machine.GroupName}, + APIVersions: []string{SchemeGroupVersion.Version}, + Resources: []string{"machines"}, + }, + Operations: []admissionregistrationv1.OperationType{ + admissionregistrationv1.Create, + }, + }, + }, + } +} + +// MachineSetMutatingWebhook returns mutating webhook for machineSet to apply in configuration +func MachineSetMutatingWebhook() admissionregistrationv1.MutatingWebhook { + machineSetServiceReference := admissionregistrationv1.ServiceReference{ + Namespace: defaultWebhookServiceNamespace, + Name: defaultWebhookServiceName, + Path: pointer.StringPtr(DefaultMachineSetMutatingHookPath), + Port: pointer.Int32Ptr(defaultWebhookServicePort), + } + return admissionregistrationv1.MutatingWebhook{ + AdmissionReviewVersions: []string{"v1beta1"}, + Name: "default.machineset.machine.openshift.io", + FailurePolicy: &webhookFailurePolicy, + SideEffects: &webhookSideEffects, + ClientConfig: admissionregistrationv1.WebhookClientConfig{ + Service: &machineSetServiceReference, + }, + Rules: []admissionregistrationv1.RuleWithOperations{ + { + Rule: admissionregistrationv1.Rule{ + APIGroups: []string{machine.GroupName}, + APIVersions: []string{SchemeGroupVersion.Version}, + Resources: []string{"machinesets"}, + }, + Operations: []admissionregistrationv1.OperationType{ + admissionregistrationv1.Create, + }, + }, + }, + } +} + +func responseWithWarnings(response admission.Response, warnings []string) admission.Response { + response.AdmissionResponse.Warnings = warnings + return response +} + +// Handle handles HTTP requests for admission webhook servers. +func (h *machineValidatorHandler) Handle(ctx context.Context, req admission.Request) admission.Response { + m := &Machine{} + + if err := h.decoder.Decode(req, m); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + klog.V(3).Infof("Validate webhook called for Machine: %s", m.GetName()) + + ok, warnings, errs := h.webhookOperations(m, h.admissionConfig) + if !ok { + return responseWithWarnings(admission.Denied(errs.Error()), warnings) + } + + return responseWithWarnings(admission.Allowed("Machine valid"), warnings) +} + +// Handle handles HTTP requests for admission webhook servers. +func (h *machineDefaulterHandler) Handle(ctx context.Context, req admission.Request) admission.Response { + m := &Machine{} + + if err := h.decoder.Decode(req, m); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + klog.V(3).Infof("Mutate webhook called for Machine: %s", m.GetName()) + + // Only enforce the clusterID if it's not set. + // Otherwise a discrepancy on the value would leave the machine orphan + // and would trigger a new machine creation by the machineSet. + // https://bugzilla.redhat.com/show_bug.cgi?id=1857175 + if m.Labels == nil { + m.Labels = make(map[string]string) + } + if _, ok := m.Labels[MachineClusterIDLabel]; !ok { + m.Labels[MachineClusterIDLabel] = h.clusterID + } + + ok, warnings, errs := h.webhookOperations(m, h.admissionConfig) + if !ok { + return responseWithWarnings(admission.Denied(errs.Error()), warnings) + } + + marshaledMachine, err := json.Marshal(m) + if err != nil { + return responseWithWarnings(admission.Errored(http.StatusInternalServerError, err), warnings) + } + return responseWithWarnings(admission.PatchResponseFromRaw(req.Object.Raw, marshaledMachine), warnings) +} + +type awsDefaulter struct { + region string +} + +func (a awsDefaulter) defaultAWS(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { + klog.V(3).Infof("Defaulting AWS providerSpec") + + var errs []error + var warnings []string + providerSpec := new(aws.AWSMachineProviderConfig) + if err := unmarshalInto(m, providerSpec); err != nil { + errs = append(errs, err) + return false, warnings, utilerrors.NewAggregate(errs) + } + + if providerSpec.InstanceType == "" { + providerSpec.InstanceType = defaultAWSInstanceType + } + + if providerSpec.Placement.Region == "" { + providerSpec.Placement.Region = a.region + } + + if providerSpec.UserDataSecret == nil { + providerSpec.UserDataSecret = &corev1.LocalObjectReference{Name: defaultUserDataSecret} + } + + if providerSpec.CredentialsSecret == nil { + providerSpec.CredentialsSecret = &corev1.LocalObjectReference{Name: defaultAWSCredentialsSecret} + } + + rawBytes, err := json.Marshal(providerSpec) + if err != nil { + errs = append(errs, err) + } + + if len(errs) > 0 { + return false, warnings, utilerrors.NewAggregate(errs) + } + + m.Spec.ProviderSpec.Value = &runtime.RawExtension{Raw: rawBytes} + return true, warnings, nil +} + +func unmarshalInto(m *Machine, providerSpec interface{}) error { + if m.Spec.ProviderSpec.Value == nil { + return field.Required(field.NewPath("providerSpec", "value"), "a value must be provided") + } + + if err := yaml.Unmarshal(m.Spec.ProviderSpec.Value.Raw, &providerSpec); err != nil { + return field.Invalid(field.NewPath("providerSpec", "value"), providerSpec, err.Error()) + } + return nil +} + +func validateAWS(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { + klog.V(3).Infof("Validating AWS providerSpec") + + var errs []error + var warnings []string + providerSpec := new(aws.AWSMachineProviderConfig) + if err := unmarshalInto(m, providerSpec); err != nil { + errs = append(errs, err) + return false, warnings, utilerrors.NewAggregate(errs) + } + + if providerSpec.AMI.ARN == nil && providerSpec.AMI.Filters == nil && providerSpec.AMI.ID == nil { + errs = append( + errs, + field.Required( + field.NewPath("providerSpec", "ami"), + "expected either providerSpec.ami.arn or providerSpec.ami.filters or providerSpec.ami.id to be populated", + ), + ) + } + + if providerSpec.Placement.Region == "" { + errs = append( + errs, + field.Required( + field.NewPath("providerSpec", "placement", "region"), + "expected providerSpec.placement.region to be populated", + ), + ) + } + + if providerSpec.InstanceType == "" { + errs = append( + errs, + field.Required( + field.NewPath("providerSpec", "instanceType"), + "expected providerSpec.instanceType to be populated", + ), + ) + } + + if providerSpec.UserDataSecret == nil { + errs = append( + errs, + field.Required( + field.NewPath("providerSpec", "userDataSecret"), + "expected providerSpec.userDataSecret to be populated", + ), + ) + } + + if providerSpec.CredentialsSecret == nil { + errs = append( + errs, + field.Required( + field.NewPath("providerSpec", "credentialsSecret"), + "expected providerSpec.credentialsSecret to be populated", + ), + ) + } + + if providerSpec.Subnet.ARN == nil && providerSpec.Subnet.ID == nil && providerSpec.Subnet.Filters == nil { + warnings = append( + warnings, + "providerSpec.subnet: No subnet has been provided. Instances may be created in an unexpected subnet and may not join the cluster.", + ) + } + // TODO(alberto): Validate providerSpec.BlockDevices. + // https://github.com/openshift/cluster-api-provider-aws/pull/299#discussion_r433920532 + + switch providerSpec.Tenancy { + case "", aws.DefaultTenancy, aws.DedicatedTenancy, aws.HostTenancy: + // Do nothing, valid values + default: + errs = append( + errs, + field.Invalid( + field.NewPath("providerSpec", "tenancy"), + providerSpec.Tenancy, + fmt.Sprintf("Invalid providerSpec.tenancy, the only allowed options are: %s, %s, %s", aws.DefaultTenancy, aws.DedicatedTenancy, aws.HostTenancy), + ), + ) + } + + if len(errs) > 0 { + return false, warnings, utilerrors.NewAggregate(errs) + } + + return true, warnings, nil +} + +func defaultAzure(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { + klog.V(3).Infof("Defaulting Azure providerSpec") + + var errs []error + var warnings []string + providerSpec := new(azure.AzureMachineProviderSpec) + if err := unmarshalInto(m, providerSpec); err != nil { + errs = append(errs, err) + return false, warnings, utilerrors.NewAggregate(errs) + } + + if providerSpec.VMSize == "" { + providerSpec.VMSize = defaultAzureVMSize + } + + // Vnet and Subnet need to be provided together by the user + if providerSpec.Vnet == "" && providerSpec.Subnet == "" { + providerSpec.Vnet = defaultAzureVnet(config.clusterID) + providerSpec.Subnet = defaultAzureSubnet(config.clusterID) + } + + if providerSpec.Image == (azure.Image{}) { + providerSpec.Image.ResourceID = defaultAzureImageResourceID(config.clusterID) + } + + if providerSpec.UserDataSecret == nil { + providerSpec.UserDataSecret = &corev1.SecretReference{Name: defaultUserDataSecret} + } else if providerSpec.UserDataSecret.Name == "" { + providerSpec.UserDataSecret.Name = defaultUserDataSecret + } + + if providerSpec.CredentialsSecret == nil { + providerSpec.CredentialsSecret = &corev1.SecretReference{Name: defaultAzureCredentialsSecret, Namespace: defaultSecretNamespace} + } else { + if providerSpec.CredentialsSecret.Namespace == "" { + providerSpec.CredentialsSecret.Namespace = defaultSecretNamespace + } + if providerSpec.CredentialsSecret.Name == "" { + providerSpec.CredentialsSecret.Name = defaultAzureCredentialsSecret + } + } + + rawBytes, err := json.Marshal(providerSpec) + if err != nil { + errs = append(errs, err) + } + + if len(errs) > 0 { + return false, warnings, utilerrors.NewAggregate(errs) + } + + m.Spec.ProviderSpec.Value = &runtime.RawExtension{Raw: rawBytes} + return true, warnings, nil +} + +func validateAzure(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { + klog.V(3).Infof("Validating Azure providerSpec") + + var errs []error + var warnings []string + providerSpec := new(azure.AzureMachineProviderSpec) + if err := unmarshalInto(m, providerSpec); err != nil { + errs = append(errs, err) + return false, warnings, utilerrors.NewAggregate(errs) + } + + if providerSpec.VMSize == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "vmSize"), "vmSize should be set to one of the supported Azure VM sizes")) + } + + if providerSpec.PublicIP && config.dnsDisconnected { + errs = append(errs, field.Forbidden(field.NewPath("providerSpec", "publicIP"), "publicIP is not allowed in Azure disconnected installation")) + } + // Vnet requires Subnet + if providerSpec.Vnet != "" && providerSpec.Subnet == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "subnet"), "must provide a subnet when a virtual network is specified")) + } + + // Subnet requires Vnet + if providerSpec.Subnet != "" && providerSpec.Vnet == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "vnet"), "must provide a virtual network when supplying subnets")) + } + + errs = append(errs, validateAzureImage(providerSpec.Image)...) + + if providerSpec.UserDataSecret == nil { + errs = append(errs, field.Required(field.NewPath("providerSpec", "userDataSecret"), "userDataSecret must be provided")) + } else if providerSpec.UserDataSecret.Name == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "userDataSecret", "name"), "name must be provided")) + } + + if providerSpec.CredentialsSecret == nil { + errs = append(errs, field.Required(field.NewPath("providerSpec", "credentialsSecret"), "credentialsSecret must be provided")) + } else { + if providerSpec.CredentialsSecret.Namespace == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "credentialsSecret", "namespace"), "namespace must be provided")) + } + if providerSpec.CredentialsSecret.Name == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "credentialsSecret", "name"), "name must be provided")) + } + } + + if providerSpec.OSDisk.DiskSizeGB <= 0 || providerSpec.OSDisk.DiskSizeGB >= azureMaxDiskSizeGB { + errs = append(errs, field.Invalid(field.NewPath("providerSpec", "osDisk", "diskSizeGB"), providerSpec.OSDisk.DiskSizeGB, "diskSizeGB must be greater than zero and less than 32768")) + } + + if len(errs) > 0 { + return false, warnings, utilerrors.NewAggregate(errs) + } + return true, warnings, nil +} + +func validateAzureImage(image azure.Image) []error { + errors := []error{} + if image == (azure.Image{}) { + return append(errors, field.Required(field.NewPath("providerSpec", "image"), "an image reference must be provided")) + } + + if image.ResourceID != "" { + if image != (azure.Image{ResourceID: image.ResourceID}) { + return append(errors, field.Required(field.NewPath("providerSpec", "image", "resourceID"), "resourceID is already specified, other fields such as [Offer, Publisher, SKU, Version] should not be set")) + } + return errors + } + + // Resource ID not provided, so Offer, Publisher, SKU and Version are required + if image.Offer == "" { + errors = append(errors, field.Required(field.NewPath("providerSpec", "image", "Offer"), "Offer must be provided")) + } + if image.Publisher == "" { + errors = append(errors, field.Required(field.NewPath("providerSpec", "image", "Publisher"), "Publisher must be provided")) + } + if image.SKU == "" { + errors = append(errors, field.Required(field.NewPath("providerSpec", "image", "SKU"), "SKU must be provided")) + } + if image.Version == "" { + errors = append(errors, field.Required(field.NewPath("providerSpec", "image", "Version"), "Version must be provided")) + } + + return errors +} + +func defaultGCP(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { + klog.V(3).Infof("Defaulting GCP providerSpec") + + var errs []error + var warnings []string + providerSpec := new(gcp.GCPMachineProviderSpec) + if err := unmarshalInto(m, providerSpec); err != nil { + errs = append(errs, err) + return false, warnings, utilerrors.NewAggregate(errs) + } + + if providerSpec.MachineType == "" { + providerSpec.MachineType = defaultGCPMachineType + } + + if len(providerSpec.NetworkInterfaces) == 0 { + providerSpec.NetworkInterfaces = append(providerSpec.NetworkInterfaces, &gcp.GCPNetworkInterface{ + Network: defaultGCPNetwork(config.clusterID), + Subnetwork: defaultGCPSubnetwork(config.clusterID), + }) + } + + providerSpec.Disks = defaultGCPDisks(providerSpec.Disks, config.clusterID) + + if len(providerSpec.Tags) == 0 { + providerSpec.Tags = defaultGCPTags(config.clusterID) + } + + if providerSpec.UserDataSecret == nil { + providerSpec.UserDataSecret = &corev1.LocalObjectReference{Name: defaultUserDataSecret} + } + + if providerSpec.CredentialsSecret == nil { + providerSpec.CredentialsSecret = &corev1.LocalObjectReference{Name: defaultGCPCredentialsSecret} + } + + rawBytes, err := json.Marshal(providerSpec) + if err != nil { + errs = append(errs, err) + } + + if len(errs) > 0 { + return false, warnings, utilerrors.NewAggregate(errs) + } + + m.Spec.ProviderSpec.Value = &runtime.RawExtension{Raw: rawBytes} + return true, warnings, nil +} + +func defaultGCPDisks(disks []*gcp.GCPDisk, clusterID string) []*gcp.GCPDisk { + if len(disks) == 0 { + return []*gcp.GCPDisk{ + { + AutoDelete: true, + Boot: true, + SizeGb: defaultGCPDiskSizeGb, + Type: defaultGCPDiskType, + Image: defaultGCPDiskImage, + }, + } + } + + for _, disk := range disks { + if disk.Type == "" { + disk.Type = defaultGCPDiskType + } + + if disk.Image == "" { + disk.Image = defaultGCPDiskImage + } + } + + return disks +} + +func validateGCP(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { + klog.V(3).Infof("Validating GCP providerSpec") + + var errs []error + var warnings []string + providerSpec := new(gcp.GCPMachineProviderSpec) + if err := unmarshalInto(m, providerSpec); err != nil { + errs = append(errs, err) + return false, warnings, utilerrors.NewAggregate(errs) + } + + if providerSpec.Region == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "region"), "region is required")) + } + + if !strings.HasPrefix(providerSpec.Zone, providerSpec.Region) { + errs = append(errs, field.Invalid(field.NewPath("providerSpec", "zone"), providerSpec.Zone, fmt.Sprintf("zone not in configured region (%s)", providerSpec.Region))) + } + + if providerSpec.MachineType == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "machineType"), "machineType should be set to one of the supported GCP machine types")) + } + + errs = append(errs, validateGCPNetworkInterfaces(providerSpec.NetworkInterfaces, field.NewPath("providerSpec", "networkInterfaces"))...) + errs = append(errs, validateGCPDisks(providerSpec.Disks, field.NewPath("providerSpec", "disks"))...) + + if len(providerSpec.ServiceAccounts) == 0 { + warnings = append(warnings, "providerSpec.serviceAccounts: no service account provided: nodes may be unable to join the cluster") + } else { + errs = append(errs, validateGCPServiceAccounts(providerSpec.ServiceAccounts, field.NewPath("providerSpec", "serviceAccounts"))...) + } + + if providerSpec.UserDataSecret == nil { + errs = append(errs, field.Required(field.NewPath("providerSpec", "userDataSecret"), "userDataSecret must be provided")) + } else { + if providerSpec.UserDataSecret.Name == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "userDataSecret", "name"), "name must be provided")) + } + } + + if providerSpec.CredentialsSecret == nil { + errs = append(errs, field.Required(field.NewPath("providerSpec", "credentialsSecret"), "credentialsSecret must be provided")) + } else { + if providerSpec.CredentialsSecret.Name == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "credentialsSecret", "name"), "name must be provided")) + } + } + + if len(errs) > 0 { + return false, warnings, utilerrors.NewAggregate(errs) + } + return true, warnings, nil +} + +func validateGCPNetworkInterfaces(networkInterfaces []*gcp.GCPNetworkInterface, parentPath *field.Path) []error { + if len(networkInterfaces) == 0 { + return []error{field.Required(parentPath, "at least 1 network interface is required")} + } + + var errs []error + for i, ni := range networkInterfaces { + fldPath := parentPath.Index(i) + + if ni.Network == "" { + errs = append(errs, field.Required(fldPath.Child("network"), "network is required")) + } + + if ni.Subnetwork == "" { + errs = append(errs, field.Required(fldPath.Child("subnetwork"), "subnetwork is required")) + } + } + + return errs +} + +func validateGCPDisks(disks []*gcp.GCPDisk, parentPath *field.Path) []error { + if len(disks) == 0 { + return []error{field.Required(parentPath, "at least 1 disk is required")} + } + + var errs []error + for i, disk := range disks { + fldPath := parentPath.Index(i) + + if disk.SizeGb != 0 { + if disk.SizeGb < 16 { + errs = append(errs, field.Invalid(fldPath.Child("sizeGb"), disk.SizeGb, "must be at least 16GB in size")) + } else if disk.SizeGb > 65536 { + errs = append(errs, field.Invalid(fldPath.Child("sizeGb"), disk.SizeGb, "exceeding maximum GCP disk size limit, must be below 65536")) + } + } + + if disk.Type != "" { + diskTypes := sets.NewString("pd-standard", "pd-ssd") + if !diskTypes.Has(disk.Type) { + errs = append(errs, field.NotSupported(fldPath.Child("type"), disk.Type, diskTypes.List())) + } + } + } + + return errs +} + +func validateGCPServiceAccounts(serviceAccounts []gcp.GCPServiceAccount, parentPath *field.Path) []error { + if len(serviceAccounts) != 1 { + return []error{field.Invalid(parentPath, fmt.Sprintf("%d service accounts supplied", len(serviceAccounts)), "exactly 1 service account must be supplied")} + } + + var errs []error + for i, serviceAccount := range serviceAccounts { + fldPath := parentPath.Index(i) + + if serviceAccount.Email == "" { + errs = append(errs, field.Required(fldPath.Child("email"), "email is required")) + } + + if len(serviceAccount.Scopes) == 0 { + errs = append(errs, field.Required(fldPath.Child("scopes"), "at least 1 scope is required")) + } + } + return errs +} + +func defaultVSphere(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { + klog.V(3).Infof("Defaulting vSphere providerSpec") + + var errs []error + var warnings []string + providerSpec := new(vsphere.VSphereMachineProviderSpec) + if err := unmarshalInto(m, providerSpec); err != nil { + errs = append(errs, err) + return false, warnings, utilerrors.NewAggregate(errs) + } + + if providerSpec.UserDataSecret == nil { + providerSpec.UserDataSecret = &corev1.LocalObjectReference{Name: defaultUserDataSecret} + } + + if providerSpec.CredentialsSecret == nil { + providerSpec.CredentialsSecret = &corev1.LocalObjectReference{Name: defaultVSphereCredentialsSecret} + } + + rawBytes, err := json.Marshal(providerSpec) + if err != nil { + errs = append(errs, err) + } + + if len(errs) > 0 { + return false, warnings, utilerrors.NewAggregate(errs) + } + + m.Spec.ProviderSpec.Value = &runtime.RawExtension{Raw: rawBytes} + return true, warnings, nil +} + +func validateVSphere(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { + klog.V(3).Infof("Validating vSphere providerSpec") + + var errs []error + var warnings []string + providerSpec := new(vsphere.VSphereMachineProviderSpec) + if err := unmarshalInto(m, providerSpec); err != nil { + errs = append(errs, err) + return false, warnings, utilerrors.NewAggregate(errs) + } + + if providerSpec.Template == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "template"), "template must be provided")) + } + + workspaceWarnings, workspaceErrors := validateVSphereWorkspace(providerSpec.Workspace, field.NewPath("providerSpec", "workspace")) + warnings = append(warnings, workspaceWarnings...) + errs = append(errs, workspaceErrors...) + + errs = append(errs, validateVSphereNetwork(providerSpec.Network, field.NewPath("providerSpec", "network"))...) + + if providerSpec.NumCPUs < minVSphereCPU { + warnings = append(warnings, fmt.Sprintf("providerSpec.numCPUs: %d is less than the minimum value (%d): the minimum value will be used instead", providerSpec.NumCPUs, minVSphereCPU)) + } + if providerSpec.MemoryMiB < minVSphereMemoryMiB { + warnings = append(warnings, fmt.Sprintf("providerSpec.memoryMiB: %d is less than the recommended minimum value (%d): nodes may not boot correctly", providerSpec.MemoryMiB, minVSphereMemoryMiB)) + } + if providerSpec.DiskGiB < minVSphereDiskGiB { + warnings = append(warnings, fmt.Sprintf("providerSpec.diskGiB: %d is less than the recommended minimum (%d): nodes may fail to start if disk size is too low", providerSpec.DiskGiB, minVSphereDiskGiB)) + } + + if providerSpec.UserDataSecret == nil { + errs = append(errs, field.Required(field.NewPath("providerSpec", "userDataSecret"), "userDataSecret must be provided")) + } else { + if providerSpec.UserDataSecret.Name == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "userDataSecret", "name"), "name must be provided")) + } + } + + if providerSpec.CredentialsSecret == nil { + errs = append(errs, field.Required(field.NewPath("providerSpec", "credentialsSecret"), "credentialsSecret must be provided")) + } else { + if providerSpec.CredentialsSecret.Name == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "credentialsSecret", "name"), "name must be provided")) + } + } + + if len(errs) > 0 { + return false, warnings, utilerrors.NewAggregate(errs) + } + return true, warnings, nil +} + +func validateVSphereWorkspace(workspace *vsphere.Workspace, parentPath *field.Path) ([]string, []error) { + if workspace == nil { + return []string{}, []error{field.Required(parentPath, "workspace must be provided")} + } + + var errs []error + var warnings []string + if workspace.Server == "" { + errs = append(errs, field.Required(parentPath.Child("server"), "server must be provided")) + } + if workspace.Datacenter == "" { + warnings = append(warnings, fmt.Sprintf("%s: datacenter is unset: if more than one datacenter is present, VMs cannot be created", parentPath.Child("datacenter"))) + } + if workspace.Folder != "" { + expectedPrefix := fmt.Sprintf("/%s/vm/", workspace.Datacenter) + if !strings.HasPrefix(workspace.Folder, expectedPrefix) { + errMsg := fmt.Sprintf("folder must be absolute path: expected prefix %q", expectedPrefix) + errs = append(errs, field.Invalid(parentPath.Child("folder"), workspace.Folder, errMsg)) + } + } + + return warnings, errs +} + +func validateVSphereNetwork(network vsphere.NetworkSpec, parentPath *field.Path) []error { + if len(network.Devices) == 0 { + return []error{field.Required(parentPath.Child("devices"), "at least 1 network device must be provided")} + } + + var errs []error + for i, spec := range network.Devices { + fldPath := parentPath.Child("devices").Index(i) + if spec.NetworkName == "" { + errs = append(errs, field.Required(fldPath.Child("networkName"), "networkName must be provided")) + } + } + + return errs +} diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machinehealthcheck_types.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machinehealthcheck_types.go index 21ff73e42c3..f06c85a78c3 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machinehealthcheck_types.go +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machinehealthcheck_types.go @@ -54,20 +54,24 @@ type MachineHealthCheckSpec struct { // Any farther remediation is only allowed if at most "MaxUnhealthy" machines selected by // "selector" are not healthy. - // +optional + // Expects either a postive integer value or a percentage value. + // Percentage values must be positive whole numbers and are capped at 100%. + // Both 0 and 0% are valid and will block all remediation. + // +kubebuilder:default:="100%" + // +kubebuilder:validation:Pattern="^((100|[0-9]{1,2})%|[0-9]+)$" + // +kubebuilder:validation:Type:=string MaxUnhealthy *intstr.IntOrString `json:"maxUnhealthy,omitempty"` - // It would be preferable for nodeStartupTimeout to be a metav1.Duration, but - // there's no good way to validate the format here. Invalid input would cause - // problems with marshaling, so it's better to just make it a string and - // handle the conversion in the controller. - // - // Intentional blank line to keep this out of the OpenAPI description... - // Machines older than this duration without a node will be considered to have // failed and will be remediated. + // Expects an unsigned duration string of decimal numbers each with optional + // fraction and a unit suffix, eg "300ms", "1.5h" or "2h45m". + // Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". // +optional - NodeStartupTimeout string `json:"nodeStartupTimeout,omitempty"` + // +kubebuilder:default:="10m" + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$" + // +kubebuilder:validation:Type:=string + NodeStartupTimeout metav1.Duration `json:"nodeStartupTimeout,omitempty"` } // UnhealthyCondition represents a Node condition type and value with a timeout @@ -82,15 +86,12 @@ type UnhealthyCondition struct { // +kubebuilder:validation:MinLength=1 Status corev1.ConditionStatus `json:"status"` - // It would be preferable for timeout to be a metav1.Duration, but there's - // no good way to validate the format here. Invalid input would cause - // problems with marshaling, so it's better to just make it a string and - // handle the conversion in the controller. - // - // Intentional blank line to keep this out of the OpenAPI description... - - // +kubebuilder:validation:MinLength=1 - Timeout string `json:"timeout"` + // Expects an unsigned duration string of decimal numbers each with optional + // fraction and a unit suffix, eg "300ms", "1.5h" or "2h45m". + // Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$" + // +kubebuilder:validation:Type:=string + Timeout metav1.Duration `json:"timeout"` } // MachineHealthCheckStatus defines the observed state of MachineHealthCheck diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machineset_types.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machineset_types.go index e0991ea79a0..f2c3c7e184a 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machineset_types.go +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machineset_types.go @@ -55,7 +55,7 @@ type MachineSetSpec struct { // Replicas is the number of desired replicas. // This is a pointer to distinguish between explicit zero and unspecified. // Defaults to 1. - // +optional + // +kubebuilder:default=1 Replicas *int32 `json:"replicas,omitempty"` // MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. @@ -110,12 +110,12 @@ const ( // MachineTemplateSpec describes the data needed to create a Machine from a template type MachineTemplateSpec struct { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional ObjectMeta `json:"metadata,omitempty"` // Specification of the desired behavior of the machine. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec MachineSpec `json:"spec,omitempty"` } diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machineset_webhook.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machineset_webhook.go new file mode 100644 index 00000000000..fba62af71a2 --- /dev/null +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machineset_webhook.go @@ -0,0 +1,133 @@ +package v1beta1 + +import ( + "context" + "encoding/json" + "net/http" + + osconfigv1 "github.com/openshift/api/config/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// machineSetValidatorHandler validates MachineSet API resources. +// implements type Handler interface. +// https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/webhook/admission#Handler +type machineSetValidatorHandler struct { + *admissionHandler +} + +// machineSetDefaulterHandler defaults MachineSet API resources. +// implements type Handler interface. +// https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/webhook/admission#Handler +type machineSetDefaulterHandler struct { + *admissionHandler +} + +// NewMachineSetValidator returns a new machineSetValidatorHandler. +func NewMachineSetValidator() (*machineSetValidatorHandler, error) { + infra, err := getInfra() + if err != nil { + return nil, err + } + + return createMachineSetValidator(infra.Status.PlatformStatus.Type, infra.Status.InfrastructureName), nil +} + +func createMachineSetValidator(platform osconfigv1.PlatformType, clusterID string) *machineSetValidatorHandler { + return &machineSetValidatorHandler{ + admissionHandler: &admissionHandler{ + admissionConfig: &admissionConfig{clusterID: clusterID}, + webhookOperations: getMachineValidatorOperation(platform), + }, + } +} + +// NewMachineSetDefaulter returns a new machineSetDefaulterHandler. +func NewMachineSetDefaulter() (*machineSetDefaulterHandler, error) { + infra, err := getInfra() + if err != nil { + return nil, err + } + + return createMachineSetDefaulter(infra.Status.PlatformStatus, infra.Status.InfrastructureName), nil +} + +func createMachineSetDefaulter(platformStatus *osconfigv1.PlatformStatus, clusterID string) *machineSetDefaulterHandler { + return &machineSetDefaulterHandler{ + admissionHandler: &admissionHandler{ + admissionConfig: &admissionConfig{clusterID: clusterID}, + webhookOperations: getMachineDefaulterOperation(platformStatus), + }, + } +} + +// Handle handles HTTP requests for admission webhook servers. +func (h *machineSetValidatorHandler) Handle(ctx context.Context, req admission.Request) admission.Response { + ms := &MachineSet{} + + if err := h.decoder.Decode(req, ms); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + klog.V(3).Infof("Validate webhook called for MachineSet: %s", ms.GetName()) + + ok, warnings, errs := h.validateMachineSet(ms) + if !ok { + return responseWithWarnings(admission.Denied(errs.Error()), warnings) + } + + return responseWithWarnings(admission.Allowed("MachineSet valid"), warnings) +} + +// Handle handles HTTP requests for admission webhook servers. +func (h *machineSetDefaulterHandler) Handle(ctx context.Context, req admission.Request) admission.Response { + ms := &MachineSet{} + + if err := h.decoder.Decode(req, ms); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + klog.V(3).Infof("Mutate webhook called for MachineSet: %s", ms.GetName()) + + ok, warnings, errs := h.defaultMachineSet(ms) + if !ok { + return responseWithWarnings(admission.Denied(errs.Error()), warnings) + } + + marshaledMachineSet, err := json.Marshal(ms) + if err != nil { + return responseWithWarnings(admission.Errored(http.StatusInternalServerError, err), warnings) + } + return responseWithWarnings(admission.PatchResponseFromRaw(req.Object.Raw, marshaledMachineSet), warnings) +} + +func (h *machineSetValidatorHandler) validateMachineSet(ms *MachineSet) (bool, []string, utilerrors.Aggregate) { + var errs []error + + // Create a Machine from the MachineSet and validate the Machine template + m := &Machine{Spec: ms.Spec.Template.Spec} + ok, warnings, err := h.webhookOperations(m, h.admissionConfig) + if !ok { + errs = append(errs, err.Errors()...) + } + + if len(errs) > 0 { + return false, warnings, utilerrors.NewAggregate(errs) + } + return true, warnings, nil +} + +func (h *machineSetDefaulterHandler) defaultMachineSet(ms *MachineSet) (bool, []string, utilerrors.Aggregate) { + // Create a Machine from the MachineSet and default the Machine template + m := &Machine{Spec: ms.Spec.Template.Spec} + ok, warnings, err := h.webhookOperations(m, h.admissionConfig) + if !ok { + return false, warnings, utilerrors.NewAggregate(err.Errors()) + } + + // Restore the defaulted template + ms.Spec.Template.Spec = m.Spec + return true, warnings, nil +} diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/zz_generated.deepcopy.go index f13a4dbb27c..7a79e549b3e 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/zz_generated.deepcopy.go @@ -16,15 +16,16 @@ * Copyright 2019 Red Hat, Inc. * */ -// Code generated by deepcopy-gen. DO NOT EDIT. + +// Code generated by controller-gen. DO NOT EDIT. package v1beta1 import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - intstr "k8s.io/apimachinery/pkg/util/intstr" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -49,7 +50,6 @@ func (in *LastOperation) DeepCopyInto(out *LastOperation) { *out = new(string) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastOperation. @@ -69,7 +69,6 @@ func (in *Machine) DeepCopyInto(out *Machine) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Machine. @@ -97,7 +96,6 @@ func (in *MachineHealthCheck) DeepCopyInto(out *MachineHealthCheck) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheck. @@ -130,7 +128,6 @@ func (in *MachineHealthCheckList) DeepCopyInto(out *MachineHealthCheckList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheckList. @@ -165,7 +162,7 @@ func (in *MachineHealthCheckSpec) DeepCopyInto(out *MachineHealthCheckSpec) { *out = new(intstr.IntOrString) **out = **in } - return + out.NodeStartupTimeout = in.NodeStartupTimeout } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheckSpec. @@ -191,7 +188,6 @@ func (in *MachineHealthCheckStatus) DeepCopyInto(out *MachineHealthCheckStatus) *out = new(int) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheckStatus. @@ -216,7 +212,6 @@ func (in *MachineList) DeepCopyInto(out *MachineList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineList. @@ -244,7 +239,6 @@ func (in *MachineSet) DeepCopyInto(out *MachineSet) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSet. @@ -277,7 +271,6 @@ func (in *MachineSetList) DeepCopyInto(out *MachineSetList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetList. @@ -308,7 +301,6 @@ func (in *MachineSetSpec) DeepCopyInto(out *MachineSetSpec) { } in.Selector.DeepCopyInto(&out.Selector) in.Template.DeepCopyInto(&out.Template) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetSpec. @@ -334,7 +326,6 @@ func (in *MachineSetStatus) DeepCopyInto(out *MachineSetStatus) { *out = new(string) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetStatus. @@ -353,7 +344,7 @@ func (in *MachineSpec) DeepCopyInto(out *MachineSpec) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Taints != nil { in, out := &in.Taints, &out.Taints - *out = make([]v1.Taint, len(*in)) + *out = make([]corev1.Taint, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -364,7 +355,6 @@ func (in *MachineSpec) DeepCopyInto(out *MachineSpec) { *out = new(string) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSpec. @@ -382,7 +372,7 @@ func (in *MachineStatus) DeepCopyInto(out *MachineStatus) { *out = *in if in.NodeRef != nil { in, out := &in.NodeRef, &out.NodeRef - *out = new(v1.ObjectReference) + *out = new(corev1.ObjectReference) **out = **in } if in.LastUpdated != nil { @@ -406,7 +396,7 @@ func (in *MachineStatus) DeepCopyInto(out *MachineStatus) { } if in.Addresses != nil { in, out := &in.Addresses, &out.Addresses - *out = make([]v1.NodeAddress, len(*in)) + *out = make([]corev1.NodeAddress, len(*in)) copy(*out, *in) } if in.LastOperation != nil { @@ -419,7 +409,6 @@ func (in *MachineStatus) DeepCopyInto(out *MachineStatus) { *out = new(string) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineStatus. @@ -437,7 +426,6 @@ func (in *MachineTemplateSpec) DeepCopyInto(out *MachineTemplateSpec) { *out = *in in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineTemplateSpec. @@ -469,12 +457,11 @@ func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { } if in.OwnerReferences != nil { in, out := &in.OwnerReferences, &out.OwnerReferences - *out = make([]metav1.OwnerReference, len(*in)) + *out = make([]v1.OwnerReference, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta. @@ -495,7 +482,6 @@ func (in *ProviderSpec) DeepCopyInto(out *ProviderSpec) { *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderSpec. @@ -511,7 +497,7 @@ func (in *ProviderSpec) DeepCopy() *ProviderSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UnhealthyCondition) DeepCopyInto(out *UnhealthyCondition) { *out = *in - return + out.Timeout = in.Timeout } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnhealthyCondition. diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/apis.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/apis.go index 609a8391cb6..3b340c70b7b 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/apis.go +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/apis.go @@ -1,5 +1,7 @@ // Generate deepcopy for apis //go:generate go run ../../../vendor/sigs.k8s.io/controller-tools/cmd/controller-gen paths=./... object:headerFile=../../../hack/boilerplate.go.txt,year=2019 +// Ensure generated code is goimports compliant +//go:generate goimports -w ./v1beta1/zz_generated.deepcopy.go // Package apis contains Kubernetes API groups. package apis diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/register.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/register.go index 04fb4543fc3..741becbf38c 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/register.go +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/register.go @@ -12,7 +12,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/klog" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/scheme" "sigs.k8s.io/yaml" ) diff --git a/vendor/github.com/packethost/packngo/.drone.yml b/vendor/github.com/packethost/packngo/.drone.yml new file mode 100644 index 00000000000..522261b234a --- /dev/null +++ b/vendor/github.com/packethost/packngo/.drone.yml @@ -0,0 +1,19 @@ +workspace: + base: /go + path: src/github.com/packethost/packngo + +pipeline: + lint: + image: golang:1.13 + commands: + - make lint BUILD=local + + build: + image: golang:1.13 + commands: + - make build BUILD=local + + test: + image: golang:1.13 + commands: + - make test BUILD=local diff --git a/vendor/github.com/packethost/packngo/.gitignore b/vendor/github.com/packethost/packngo/.gitignore new file mode 100644 index 00000000000..7cc2daadbfd --- /dev/null +++ b/vendor/github.com/packethost/packngo/.gitignore @@ -0,0 +1,30 @@ +### Go template +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.swp + +# IDEs +.idea**/** +.vscode diff --git a/vendor/github.com/packethost/packngo/CHANGELOG.md b/vendor/github.com/packethost/packngo/CHANGELOG.md new file mode 100644 index 00000000000..53b6de27e23 --- /dev/null +++ b/vendor/github.com/packethost/packngo/CHANGELOG.md @@ -0,0 +1,12 @@ +# Changelog + +This project adheres to [Semantic +Versioning](http://semver.org/spec/v2.0.0.html). + +All notable changes to this project will be documented at +. Drafts release notes may be +used to track features that will be available in future releases. + +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/), +breaking changes, additions, removals, and fixes should be pointed out in the +release notes. diff --git a/vendor/github.com/packethost/packngo/CONTRIBUTING.md b/vendor/github.com/packethost/packngo/CONTRIBUTING.md new file mode 100644 index 00000000000..368e3a4ac9e --- /dev/null +++ b/vendor/github.com/packethost/packngo/CONTRIBUTING.md @@ -0,0 +1,119 @@ +# Contributing + +Thanks for your interest in improving this project! Before we get technical, +make sure you have reviewed the [code of conduct](code-of-conduct.md), +[Developer Certificate of Origin](DCO), and [OWNERS](OWNERS.md) files. Code will +be licensed according to [LICENSE.txt](LICENSE.txt). + +## Pull Requests + +When creating a pull request, please refer to an open issue. If there is no +issue open for the pull request you are creating, please create one. Frequently, +pull requests may be merged or closed while the underlying issue being addressed +is not fully addressed. Issues are a place to discuss the problem in need of a +solution. Pull requests are a place to discuss an implementation of one +particular answer to that problem. A pull request may not address all (or any) +of the problems expressed in the issue, so it is important to track these +separately. + +## Code Quality + +### Documentation + +All public functions and variables should include at least a short description +of the functionality they provide. Comments should be formatted according to +. + +Documentation at will be +generated from these comments. + +The documentation provided for packngo fields and functions should be at or +better than the quality provided at . +When the API documentation provides a lengthy description, a linking to the +related API documentation will benefit users. + +### Linters + +`golangci-lint` is used to verify that the style of the code remains consistent. + +Before committing, it's a good idea to run `goimports -w .`. +([goimports](https://pkg.go.dev/golang.org/x/tools/cmd/goimports?tab=doc)) and +`gofmt -w *.go`. ([gofmt](https://golang.org/cmd/gofmt/)) + +`make lint` can be used to verify style before creating a pull request. + +## Building and Testing + +The [Makefile](./Makefile) contains the targets to build, lint and test: + +```sh +make build +make lint +make test +``` + +These normally will be run in a docker image of golang. To run locally, just run +with `BUILD=local`: + +```sh +make build BUILD=local +make lint BUILD=local +make test BUILD=local +``` + +### Acceptance Tests + +If you want to run tests against the actual Equinix Metal API, you must set the +environment variable `PACKET_TEST_ACTUAL_API` to a non-empty string and set +`PACKNGO_TEST_RECORDER` to `disabled`. The device tests wait for the device +creation, so it's best to run a few in parallel. + +To run a particular test, you can do + +```sh +PACKNGO_TEST_ACTUAL_API=1 go test -v -run=TestAccDeviceBasic --timeout=2h +``` + +If you want to see HTTP requests, set the `PACKNGO_DEBUG` env var to non-empty +string, for example: + +```sh +PACKNGO_DEBUG=1 PACKNGO_TEST_ACTUAL_API=1 go test -v -run=TestAccVolumeUpdate +``` + +### Test Fixtures + +By default, `go test ./...` will skip most of the tests unless +`PACKNGO_TEST_ACTUAL_API` is non-empty. + +With the `PACKNGO_TEST_ACTUAL_API` environment variable set, tests will be run +against the Equinix Metal API, creating real infrastructure and incurring costs. + +The `PACKNGO_TEST_RECORDER` variable can be used to record and playback API +responses to test code changes without the delay and costs of making actual API +calls. When unset, `PACKNGO_TEST_RECORDER` acts as though it was set to +`disabled`. This is the default behavior. This default behavior may change in +the future once fixtures are available for all tests. + +When `PACKNGO_TEST_RECORDER` is set to `play`, tests will playback API responses +from recorded HTTP response fixtures. This is idea for refactoring and making +changes to request and response handling without introducing changes to the data +sent or received by the Equinix Metal API. + +When adding support for new end-points, recorded test sessions should be added. +Record the HTTP interactions to fixtures by setting the environment variable +`PACKNGO_TEST_RECORDER` to `record`. + +The fixtures are automatically named according to the test they were run from. +They are placed in `fixtures/`. The API token used during authentication is +automatically removed from these fixtures. Nonetheless, caution should be +exercised before committing any fixtures into the project. Account details +includes API tokens, contact, and payment details could easily be leaked by +committing fixtures that haven't been thoroughly reviewed. + +### Automation (CI/CD) + +Today, Drone tests pull requests using tests defined in +[.drone.yml](.drone.yml). + +See [RELEASE.md](RELEASE.md) for details on the release process. diff --git a/vendor/github.com/packethost/packngo/DCO b/vendor/github.com/packethost/packngo/DCO new file mode 100644 index 00000000000..068953d4bd9 --- /dev/null +++ b/vendor/github.com/packethost/packngo/DCO @@ -0,0 +1,37 @@ +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. + diff --git a/vendor/github.com/packethost/packngo/LICENSE.txt b/vendor/github.com/packethost/packngo/LICENSE.txt new file mode 100644 index 00000000000..57c50110ca2 --- /dev/null +++ b/vendor/github.com/packethost/packngo/LICENSE.txt @@ -0,0 +1,56 @@ +Copyright (c) 2014 The packngo AUTHORS. All rights reserved. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +====================== +Portions of the client are based on code at: +https://github.com/google/go-github/ and +https://github.com/digitalocean/godo + +Copyright (c) 2013 The go-github AUTHORS. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/packethost/packngo/Makefile b/vendor/github.com/packethost/packngo/Makefile new file mode 100644 index 00000000000..57d98505968 --- /dev/null +++ b/vendor/github.com/packethost/packngo/Makefile @@ -0,0 +1,36 @@ +IMG ?= golang:1.13 + +# enable go modules, disabled CGO + +GOENV ?= GO111MODULE=on CGO_ENABLED=0 +export GO111MODULE=on +export CGO_ENABLED=0 + +# we build in a docker image, unless we are set to BUILD=local +GO ?= docker run --rm -v $(PWD):/app -w /app $(IMG) env $(GOENV) +ifeq ($(BUILD),local) +GO = +endif + + +build: + $(GO) go build -i -v ./... + +golangci-lint: +ifeq (, $(shell which golangci-lint)) + $(GO) go get github.com/golangci/golangci-lint/cmd/golangci-lint@v1.27.0 +endif + +golint: +ifeq (, $(shell which golint)) + $(GO) go get -u golang.org/x/lint/golint +endif + +lint: golint golangci-lint + $(GO) golangci-lint run --disable-all --enable=golint ./... + $(GO) go vet ./... + $(GO) gofmt -d . + +test: + $(GO) test ./... + diff --git a/vendor/github.com/packethost/packngo/OWNERS.md b/vendor/github.com/packethost/packngo/OWNERS.md new file mode 100644 index 00000000000..8b002e98b81 --- /dev/null +++ b/vendor/github.com/packethost/packngo/OWNERS.md @@ -0,0 +1,26 @@ +# Owners + +This project is governed by [Equinix Metal] and benefits from a community of users that +collaborate and contribute to its use in Go powered projects, such as the [Equinix Metal +Terraform provider], [Docker machine driver], Kubernetes drivers for [CSI] and [CCM], +the [Equinix Metal CLI], and others. + +Members of the Equinix Metal Github organization will strive to triage issues in a +timely manner, see [SUPPORT.md] for details. + +See the [packethost/standards glossary] for more details about this file. + +## Maintainers + +Maintainers of this repository are defined within the [CODEOWNERS] file. + +[Equinix Metal]: https://metal.equinix.com +[Equinix Metal Terraform provider]: https://github.com/packethost/terraform-provider-packet +[Docker machine driver]: https://github.com/packethost/docker-machine-driver-packet +[CSI]: https://github.com/packethost/csi-packet +[CCM]: https://github.com/packethost/packet-ccm +[Equinix Metal CLI]: https://github.com/packethost/packet-cli +[SUPPORT.md]: SUPPORT.md +[packethost/standards +glossary]: https://github.com/packethost/standards/blob/master/glossary.md#ownersmd +[CODEOWNERS]: CODEOWNERS diff --git a/vendor/github.com/packethost/packngo/README.md b/vendor/github.com/packethost/packngo/README.md new file mode 100644 index 00000000000..97d37062aab --- /dev/null +++ b/vendor/github.com/packethost/packngo/README.md @@ -0,0 +1,136 @@ +# packngo + +[![](https://img.shields.io/badge/stability-maintained-green.svg)](https://github.com/packethost/standards/blob/master/maintained-statement.md) +[![Release](https://img.shields.io/github/v/release/packethost/packngo)](https://github.com/packethost/packngo/releases/latest) +[![GoDoc](https://godoc.org/github.com/packethost/packngo?status.svg)](https://godoc.org/github.com/packethost/packngo) +[![Go Report Card](https://goreportcard.com/badge/github.com/packethost/packngo)](https://goreportcard.com/report/github.com/packethost/packngo) +[![Slack](https://slack.equinixmetal.com/badge.svg)](https://slack.equinixmetal.com/) +[![Twitter Follow](https://img.shields.io/twitter/follow/equinixmetal.svg?style=social&label=Follow)](https://twitter.com/intent/follow?screen_name=equinixmetal) + +A Golang client for the Equinix Metal API. ([Packet is now Equinix Metal](https://blog.equinix.com/blog/2020/10/06/equinix-metal-metal-and-more/)) + +## Installation + +To import this library into your Go project: + +```go +import "github.com/packethost/packngo" +``` + +Reference a particular version with: + +```sh +go get github.com/packethost/packngo@v0.2.0 +``` + +## Stability and Compatibility + +This repository is [Maintained](https://github.com/packethost/standards/blob/master/maintained-statement.md) meaning that this software is supported by Equinix Metal and its community - available to use in production environments. + +Packngo is currently provided with a major version of [v0](https://blog.golang.org/v2-go-modules). We'll try to avoid breaking changes to this library, but they will certainly happen as we work towards a stable v1 library. See [CHANGELOG.md](CHANGELOG.md) for details on the latest additions, removals, fixes, and breaking changes. + +While packngo provides an interface to most of the [Equinix Metal API](https://metal.equinix.com/developers/api/), the API is regularly adding new features. To request or contribute support for more API end-points or added fields, [create an issue](https://github.com/packethost/packngo/issues/new). + +See [SUPPORT.md](SUPPORT.md) for any other issues. + +## Usage + +To authenticate to the Equinix Metal API, you must have your API token exported in env var `PACKET_AUTH_TOKEN`. + +This code snippet initializes Equinix Metal API client, and lists your Projects: + +```go +package main + +import ( + "log" + + "github.com/packethost/packngo" +) + +func main() { + c, err := packngo.NewClient() + if err != nil { + log.Fatal(err) + } + + ps, _, err := c.Projects.List(nil) + if err != nil { + log.Fatal(err) + } + for _, p := range ps { + log.Println(p.ID, p.Name) + } +} + +``` + +This library is used by the official [terraform-provider-packet](https://github.com/packethost/terraform-provider-packet). + +You can also learn a lot from the `*_test.go` sources. Almost all out tests touch the Equinix Metal API, so you can see how auth, querying and POSTing works. For example [devices_test.go](devices_test.go). + +
+Linked Resources + +### Linked resources in Get\* and List\* functions + +The Equinix Metal API includes references to related entities for a wide selection of resource types, indicated by `href` fields. The Equinix Metal API allows for these entities to be included in the API response, saving the user from making more round-trip API requests. This is useful for linked resources, e.g members of a project, devices in a project. Similarly, by excluding entities that are included by default, you can reduce the API response time and payload size. + +Control of this behavior is provided through [common attributes](https://metal.equinix.com/developers/api/common-parameters/) that can be used to toggle, by field name, which referenced resources will be included as values in API responses. The API exposes this feature through `?include=` and `?exclude=` query parameters which accept a comma-separated list of field names. These field names can be dotted to reference nested entities. + +Most of the packngo `Get` functions take references to `GetOptions` parameters (or `ListOptions` for `List` functions). These types include an `Include` and `Exclude` slice that will be converted to query parameters upon request. + +For example, if you want to list users in a project, you can fetch the project via `Projects.Get(pid, nil)` call. The result of this call will be a `Project` which has a `Users []User` attribute. The items in the `[]User` slice only have a non-zero URL attribute, the rest of the fields will be type defaults. You can then parse the ID of the User resources and fetch them consequently. + +Optionally, you can use the ListOptions struct in the project fetch call to include the Users (`members` JSON tag). Then, every item in the `[]User` slice will have all (not only the `Href`) attributes populated. + +```go +Projects.Get(pid, &packngo.ListOptions{Includes: []{'members'}}) +``` + +The following is a more comprehensive illustration of Includes and Excludes. + +```go +import ( + "log" + + "github.com/packethost/packngo" +) + +func listProjectsAndUsers(lo *packngo.ListOptions) { + c, err := packngo.NewClient() + if err != nil { + log.Fatal(err) + } + + ps, _, err := c.Projects.List(lo) + if err != nil { + log.Fatal(err) + } + log.Printf("Listing for listOptions %+v\n", lo) + for _, p := range ps { + log.Printf("project resource %s has %d users", p.Name, len(p.Users)) + for _, u := range p.Users { + if u.Email != "" && u.FullName != "" { + log.Printf(" user %s has email %s\n", u.FullName, u.Email) + } else { + log.Printf(" only got user link %s\n", u.URL) + } + } + } +} + +func main() { + loMembers := &packngo.ListOptions{Includes: []string{"members"}} + loMembersOut := &packngo.ListOptions{Excludes: []string{"members"}} + listProjectsAndUsers(loMembers) + listProjectsAndUsers(nil) + listProjectsAndUsers(loMembersOut) +} +``` + +
+ +## Contributing + +See [CONTIBUTING.md](CONTRIBUTING.md). diff --git a/vendor/github.com/packethost/packngo/RELEASE.md b/vendor/github.com/packethost/packngo/RELEASE.md new file mode 100644 index 00000000000..5b3626a4946 --- /dev/null +++ b/vendor/github.com/packethost/packngo/RELEASE.md @@ -0,0 +1,49 @@ +# Releases + +This file serves to provide guidance and act as a checklist for any maintainers +to this project, now or in the future. This file should be updated with any +changes to the process. Automated processes should be described well enough that +they can be run in the absence of that automation. + +* See [CHANGELOG.md](CHANGELOG.md) for notes on versioning. +* Fetch the latest origin branches: + + ```sh + git fetch origin + git checkout master + git pull + ``` + +* Verify that your branch matches the upstream branch: + + ```sh + git branch --points-at=master -r | grep origin/master >/dev/null || echo "master differs from origin/master" + ``` + +* Update the `libraryVersion` constant. This is a library, so we can not assure + that a build flag will be used in every client that provides a compile time + value, let alone the correct one. + + ```sh + vim packngo.go # change libraryVersion, "0.3.0" (no v) + git commit --signoff -m 'v0.3.0 version bump' packngo.go + ``` + +* Tag `master` with a semver tag that suits the level of changes + introduced: + + ```sh + git tag -m "v0.3.0" -a v0.3.0 master # use -s if gpg is available + ``` +* Push the tag: + + ```sh + git push --tags origin master v0.3.0 + ``` +* Create a release from the tag (include a keepthechangelog.com formatted description): + + (use the correct + version) + +Releases can be followed through the GitHub Atom feed at +. diff --git a/vendor/github.com/packethost/packngo/SUPPORT.md b/vendor/github.com/packethost/packngo/SUPPORT.md new file mode 100644 index 00000000000..86f8401f065 --- /dev/null +++ b/vendor/github.com/packethost/packngo/SUPPORT.md @@ -0,0 +1,17 @@ +# Support + +Please [open a GitHub Issue] stating any problems that you encounter using this +software. Be sure to leave a descriptive report of the problem, including the +environment you were running in and the expected behavior. Be careful not to +include any sensitive information such as API Keys, IP addresses or their +ranges, or any resource identifiers (UUID or ID) of your organizations, +projects, or devices. + +As an open-source project, the priority, timing, or eventual resolution is not +guaranteed. Issues will be addressed based on priorities that may or may not +be reflected in Github or issue comments. + +For other forms of support, including our Slack community, visit +. + +[open a GitHub Issue]: https://github.com/packethost/packngo/issues/new \ No newline at end of file diff --git a/vendor/github.com/packethost/packngo/apikeys.go b/vendor/github.com/packethost/packngo/apikeys.go new file mode 100644 index 00000000000..fcce1f68ecd --- /dev/null +++ b/vendor/github.com/packethost/packngo/apikeys.go @@ -0,0 +1,183 @@ +package packngo + +import ( + "fmt" +) + +const ( + apiKeyUserBasePath = "/user/api-keys" + apiKeyProjectBasePath = "/projects/%s/api-keys" +) + +// APIKeyService interface defines available device methods +type APIKeyService interface { + UserList(*ListOptions) ([]APIKey, *Response, error) + ProjectList(string, *ListOptions) ([]APIKey, *Response, error) + UserGet(string, *GetOptions) (*APIKey, error) + ProjectGet(string, string, *GetOptions) (*APIKey, error) + Create(*APIKeyCreateRequest) (*APIKey, *Response, error) + Delete(string) (*Response, error) +} + +type apiKeyRoot struct { + APIKeys []APIKey `json:"api_keys"` +} + +type APIKey struct { + // ID is the UUIDv4 representing an API key in API requests and responses. + ID string `json:"id"` + + // Description is any text description of the key. This can be used to + // describe the purpose of the key. + Description string `json:"description"` + + // Token is a sensitive credential that can be used as a `Client.APIKey` to + // access Equinix Metal resources. + Token string `json:"token"` + + // ReadOnly keys can not create new resources. + ReadOnly bool `json:"read_only"` + + // Created is the creation date of the API key. + Created string `json:"created_at"` + + // Updated is the last-update date of the API key. + Updated string `json:"updated_at"` + + // User will be non-nil when getting or listing an User API key. + User *User `json:"user"` + + // Project will be non-nil when getting or listing a Project API key + Project *Project `json:"project"` +} + +// APIKeyCreateRequest type used to create an api key. +type APIKeyCreateRequest struct { + // Description is any text description of the key. This can be used to + // describe the purpose of the key. + Description string `json:"description"` + + // ReadOnly keys can not create new resources. + ReadOnly bool `json:"read_only"` + + // ProjectID when non-empty will result in the creation of a Project API + // key. + ProjectID string `json:"-"` +} + +func (s APIKeyCreateRequest) String() string { + return Stringify(s) +} + +// APIKeyServiceOp implements APIKeyService +type APIKeyServiceOp struct { + client *Client +} + +func (s *APIKeyServiceOp) list(url string, lopts *ListOptions) ([]APIKey, *Response, error) { + root := new(apiKeyRoot) + params := urlQuery(lopts) + paramURL := fmt.Sprintf("%s?%s", url, params) + + resp, err := s.client.DoRequest("GET", paramURL, nil, root) + if err != nil { + return nil, resp, err + } + + return root.APIKeys, resp, err +} + +// ProjectList lists the API keys associated with a project having `projectID` +// match `Project.ID`. +func (s *APIKeyServiceOp) ProjectList(projectID string, lopts *ListOptions) ([]APIKey, *Response, error) { + return s.list(fmt.Sprintf(apiKeyProjectBasePath, projectID), lopts) +} + +// UserList returns the API keys for the User associated with the +// `Client.APIKey`. +// +// When `Client.APIKey` is a Project API key, this method will return an access +// denied error. +func (s *APIKeyServiceOp) UserList(lopts *ListOptions) ([]APIKey, *Response, error) { + return s.list(apiKeyUserBasePath, lopts) +} + +// ProjectGet returns the Project API key with the given `APIKey.ID`. +// +// In other methods, it is typical for a Response to be returned, which could +// include a StatusCode of `http.StatusNotFound` (404 error) when the resource +// was not found. The Equinix Metal API does not expose a get by ID endpoint for +// APIKeys. That is why in this method, all API keys are listed and compared +// for a match. Therefor, the Response is not returned and a custom error will +// be returned when the key is not found. +func (s *APIKeyServiceOp) ProjectGet(projectID, apiKeyID string, getOpt *GetOptions) (*APIKey, error) { + var lopts *ListOptions + if getOpt != nil { + lopts = &ListOptions{Includes: getOpt.Includes, Excludes: getOpt.Excludes} + } + pkeys, _, err := s.ProjectList(projectID, lopts) + if err != nil { + return nil, err + } + for _, k := range pkeys { + if k.ID == apiKeyID { + return &k, nil + } + } + return nil, fmt.Errorf("Project (%s) API key %s not found", projectID, apiKeyID) +} + +// UserGet returns the User API key with the given `APIKey.ID`. +// +// In other methods, it is typical for a Response to be returned, which could +// include a StatusCode of `http.StatusNotFound` (404 error) when the resource +// was not found. The Equinix Metal API does not expose a get by ID endpoint for +// APIKeys. That is why in this method, all API keys are listed and compared +// for a match. Therefor, the Response is not returned and a custom error will +// be returned when the key is not found. +func (s *APIKeyServiceOp) UserGet(apiKeyID string, getOpt *GetOptions) (*APIKey, error) { + var lopts *ListOptions + if getOpt != nil { + lopts = &ListOptions{Includes: getOpt.Includes, Excludes: getOpt.Excludes} + } + ukeys, _, err := s.UserList(lopts) + if err != nil { + return nil, err + } + for _, k := range ukeys { + if k.ID == apiKeyID { + return &k, nil + } + } + return nil, fmt.Errorf("User API key %s not found", apiKeyID) +} + +// Create creates a new API key. +// +// The API key can be either an User API key or a Project API key, determined by +// the value (or emptiness) of `APIKeyCreateRequest.ProjectID`. Either `User` or +// `Project` will be non-nil in the `APIKey` depending on this factor. +func (s *APIKeyServiceOp) Create(createRequest *APIKeyCreateRequest) (*APIKey, *Response, error) { + path := apiKeyUserBasePath + if createRequest.ProjectID != "" { + path = fmt.Sprintf(apiKeyProjectBasePath, createRequest.ProjectID) + } + apiKey := new(APIKey) + + resp, err := s.client.DoRequest("POST", path, createRequest, apiKey) + if err != nil { + return nil, resp, err + } + + return apiKey, resp, err +} + +// Delete deletes an API key by `APIKey.ID` +// +// The API key can be either an User API key or a Project API key. +// +// Project API keys can not be used to delete themselves. +func (s *APIKeyServiceOp) Delete(apiKeyID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", apiKeyUserBasePath, apiKeyID) + return s.client.DoRequest("DELETE", path, nil, nil) +} diff --git a/vendor/github.com/packethost/packngo/batches.go b/vendor/github.com/packethost/packngo/batches.go new file mode 100644 index 00000000000..8e79ceaf88a --- /dev/null +++ b/vendor/github.com/packethost/packngo/batches.go @@ -0,0 +1,97 @@ +package packngo + +import ( + "fmt" +) + +const batchBasePath = "/batches" + +// BatchService interface defines available batch methods +type BatchService interface { + Get(batchID string, getOpt *GetOptions) (*Batch, *Response, error) + List(ProjectID string, listOpt *ListOptions) ([]Batch, *Response, error) + Create(projectID string, batches *BatchCreateRequest) ([]Batch, *Response, error) + Delete(string, bool) (*Response, error) +} + +// Batch type +type Batch struct { + ID string `json:"id"` + State string `json:"state,omitempty"` + Quantity int32 `json:"quantity,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + Href string `json:"href,omitempty"` + Project Href `json:"project,omitempty"` + Devices []Device `json:"devices,omitempty"` +} + +//BatchesList represents collection of batches +type batchesList struct { + Batches []Batch `json:"batches,omitempty"` +} + +// BatchCreateRequest type used to create batch of device instances +type BatchCreateRequest struct { + Batches []BatchCreateDevice `json:"batches"` +} + +// BatchCreateDevice type used to describe batch instances +type BatchCreateDevice struct { + DeviceCreateRequest + Quantity int32 `json:"quantity"` + FacilityDiversityLevel int32 `json:"facility_diversity_level,omitempty"` +} + +// BatchServiceOp implements BatchService +type BatchServiceOp struct { + client *Client +} + +// Get returns batch details +func (s *BatchServiceOp) Get(batchID string, getOpt *GetOptions) (*Batch, *Response, error) { + params := urlQuery(getOpt) + path := fmt.Sprintf("%s/%s?%s", batchBasePath, batchID, params) + batch := new(Batch) + + resp, err := s.client.DoRequest("GET", path, nil, batch) + if err != nil { + return nil, resp, err + } + + return batch, resp, err +} + +// List returns batches on a project +func (s *BatchServiceOp) List(projectID string, listOpt *ListOptions) (batches []Batch, resp *Response, err error) { + params := urlQuery(listOpt) + path := fmt.Sprintf("%s/%s%s?%s", projectBasePath, projectID, batchBasePath, params) + subset := new(batchesList) + resp, err = s.client.DoRequest("GET", path, nil, subset) + if err != nil { + return nil, resp, err + } + + batches = append(batches, subset.Batches...) + return batches, resp, err +} + +// Create function to create batch of device instances +func (s *BatchServiceOp) Create(projectID string, request *BatchCreateRequest) ([]Batch, *Response, error) { + path := fmt.Sprintf("%s/%s/devices/batch", projectBasePath, projectID) + + batches := new(batchesList) + resp, err := s.client.DoRequest("POST", path, request, batches) + + if err != nil { + return nil, resp, err + } + + return batches.Batches, resp, err +} + +// Delete function to remove an instance batch +func (s *BatchServiceOp) Delete(id string, removeDevices bool) (*Response, error) { + path := fmt.Sprintf("%s/%s?remove_associated_instances=%t", batchBasePath, id, removeDevices) + + return s.client.DoRequest("DELETE", path, nil, nil) +} diff --git a/vendor/github.com/packethost/packngo/bgp_configs.go b/vendor/github.com/packethost/packngo/bgp_configs.go new file mode 100644 index 00000000000..10d602186fa --- /dev/null +++ b/vendor/github.com/packethost/packngo/bgp_configs.go @@ -0,0 +1,81 @@ +package packngo + +import "fmt" + +var bgpConfigBasePath = "/bgp-config" + +// BGPConfigService interface defines available BGP config methods +type BGPConfigService interface { + Get(projectID string, getOpt *GetOptions) (*BGPConfig, *Response, error) + Create(projectID string, request CreateBGPConfigRequest) (*Response, error) + // Delete(configID string) (resp *Response, err error) TODO: Not in Equinix Metal API +} + +// BGPConfigServiceOp implements BgpConfigService +type BGPConfigServiceOp struct { + client *Client +} + +// CreateBGPConfigRequest struct +type CreateBGPConfigRequest struct { + DeploymentType string `json:"deployment_type,omitempty"` + Asn int `json:"asn,omitempty"` + Md5 string `json:"md5,omitempty"` + UseCase string `json:"use_case,omitempty"` +} + +// BGPConfig represents an Equinix Metal BGP Config +type BGPConfig struct { + ID string `json:"id,omitempty"` + Status string `json:"status,omitempty"` + DeploymentType string `json:"deployment_type,omitempty"` + Asn int `json:"asn,omitempty"` + RouteObject string `json:"route_object,omitempty"` + Md5 string `json:"md5,omitempty"` + MaxPrefix int `json:"max_prefix,omitempty"` + Project Project `json:"project,omitempty"` + CreatedAt Timestamp `json:"created_at,omitempty"` + RequestedAt Timestamp `json:"requested_at,omitempty"` + Sessions []BGPSession `json:"sessions,omitempty"` + Href string `json:"href,omitempty"` +} + +// Create function +func (s *BGPConfigServiceOp) Create(projectID string, request CreateBGPConfigRequest) (*Response, error) { + path := fmt.Sprintf("%s/%s%ss", projectBasePath, projectID, bgpConfigBasePath) + + resp, err := s.client.DoRequest("POST", path, request, nil) + if err != nil { + return resp, err + } + + return resp, err +} + +// Get function +func (s *BGPConfigServiceOp) Get(projectID string, getOpt *GetOptions) (bgpConfig *BGPConfig, resp *Response, err error) { + params := urlQuery(getOpt) + + path := fmt.Sprintf("%s/%s%s?%s", projectBasePath, projectID, bgpConfigBasePath, params) + + subset := new(BGPConfig) + + resp, err = s.client.DoRequest("GET", path, nil, subset) + if err != nil { + return nil, resp, err + } + + return subset, resp, err +} + +// Delete function TODO: this is not implemented in the Equinix Metal API +// func (s *BGPConfigServiceOp) Delete(configID string) (resp *Response, err error) { +// path := fmt.Sprintf("%ss/%s", bgpConfigBasePath, configID) + +// resp, err = s.client.DoRequest("DELETE", path, nil, nil) +// if err != nil { +// return resp, err +// } + +// return resp, err +// } diff --git a/vendor/github.com/packethost/packngo/bgp_sessions.go b/vendor/github.com/packethost/packngo/bgp_sessions.go new file mode 100644 index 00000000000..952cb330254 --- /dev/null +++ b/vendor/github.com/packethost/packngo/bgp_sessions.go @@ -0,0 +1,97 @@ +package packngo + +import "fmt" + +var bgpSessionBasePath = "/bgp/sessions" +var bgpNeighborsBasePath = "/bgp/neighbors" + +// BGPSessionService interface defines available BGP session methods +type BGPSessionService interface { + Get(string, *GetOptions) (*BGPSession, *Response, error) + Create(string, CreateBGPSessionRequest) (*BGPSession, *Response, error) + Delete(string) (*Response, error) +} + +type bgpSessionsRoot struct { + Sessions []BGPSession `json:"bgp_sessions"` + Meta meta `json:"meta"` +} + +// BGPSessionServiceOp implements BgpSessionService +type BGPSessionServiceOp struct { + client *Client +} + +// BGPSession represents an Equinix Metal BGP Session +type BGPSession struct { + ID string `json:"id,omitempty"` + Status string `json:"status,omitempty"` + LearnedRoutes []string `json:"learned_routes,omitempty"` + AddressFamily string `json:"address_family,omitempty"` + Device Device `json:"device,omitempty"` + Href string `json:"href,omitempty"` + DefaultRoute *bool `json:"default_route,omitempty"` +} + +type bgpNeighborsRoot struct { + BGPNeighbors []BGPNeighbor `json:"bgp_neighbors"` +} + +// BGPNeighor is struct for listing BGP neighbors of a device +type BGPNeighbor struct { + AddressFamily int `json:"address_family"` + CustomerAs int `json:"customer_as"` + CustomerIP string `json:"customer_ip"` + Md5Enabled bool `json:"md5_enabled"` + Md5Password string `json:"md5_password"` + Multihop bool `json:"multihop"` + PeerAs int `json:"peer_as"` + PeerIps []string `json:"peer_ips"` + RoutesIn []BGPRoute `json:"routes_in"` + RoutesOut []BGPRoute `json:"routes_out"` +} + +// BGPRoute is a struct for Route in BGP neighbor listing +type BGPRoute struct { + Route string `json:"route"` + Exact bool `json:"exact"` +} + +// CreateBGPSessionRequest struct +type CreateBGPSessionRequest struct { + AddressFamily string `json:"address_family"` + DefaultRoute *bool `json:"default_route,omitempty"` +} + +// Create function +func (s *BGPSessionServiceOp) Create(deviceID string, request CreateBGPSessionRequest) (*BGPSession, *Response, error) { + path := fmt.Sprintf("%s/%s%s", deviceBasePath, deviceID, bgpSessionBasePath) + session := new(BGPSession) + + resp, err := s.client.DoRequest("POST", path, request, session) + if err != nil { + return nil, resp, err + } + + return session, resp, err +} + +// Delete function +func (s *BGPSessionServiceOp) Delete(id string) (*Response, error) { + path := fmt.Sprintf("%s/%s", bgpSessionBasePath, id) + + return s.client.DoRequest("DELETE", path, nil, nil) +} + +// Get function +func (s *BGPSessionServiceOp) Get(id string, getOpt *GetOptions) (session *BGPSession, response *Response, err error) { + params := urlQuery(getOpt) + path := fmt.Sprintf("%s/%s?%s", bgpSessionBasePath, id, params) + session = new(BGPSession) + response, err = s.client.DoRequest("GET", path, nil, session) + if err != nil { + return nil, response, err + } + + return session, response, err +} diff --git a/vendor/github.com/packethost/packngo/billing_address.go b/vendor/github.com/packethost/packngo/billing_address.go new file mode 100644 index 00000000000..93255b32290 --- /dev/null +++ b/vendor/github.com/packethost/packngo/billing_address.go @@ -0,0 +1,7 @@ +package packngo + +type BillingAddress struct { + StreetAddress string `json:"street_address,omitempty"` + PostalCode string `json:"postal_code,omitempty"` + CountryCode string `json:"country_code_alpha2,omitempty"` +} diff --git a/vendor/github.com/packethost/packngo/capacities.go b/vendor/github.com/packethost/packngo/capacities.go new file mode 100644 index 00000000000..fa51413be66 --- /dev/null +++ b/vendor/github.com/packethost/packngo/capacities.go @@ -0,0 +1,79 @@ +package packngo + +const capacityBasePath = "/capacity" + +// CapacityService interface defines available capacity methods +type CapacityService interface { + List() (*CapacityReport, *Response, error) + Check(*CapacityInput) (*CapacityInput, *Response, error) +} + +// CapacityInput struct +type CapacityInput struct { + Servers []ServerInfo `json:"servers,omitempty"` +} + +// ServerInfo struct +type ServerInfo struct { + Facility string `json:"facility,omitempty"` + Plan string `json:"plan,omitempty"` + Quantity int `json:"quantity,omitempty"` + Available bool `json:"available,omitempty"` +} + +type capacityRoot struct { + Capacity CapacityReport `json:"capacity,omitempty"` +} + +// CapacityReport map +type CapacityReport map[string]map[string]CapacityPerBaremetal + +// // CapacityPerFacility struct +// type CapacityPerFacility struct { +// T1SmallX86 *CapacityPerBaremetal `json:"t1.small.x86,omitempty"` +// C1SmallX86 *CapacityPerBaremetal `json:"c1.small.x86,omitempty"` +// M1XlargeX86 *CapacityPerBaremetal `json:"m1.xlarge.x86,omitempty"` +// C1XlargeX86 *CapacityPerBaremetal `json:"c1.xlarge.x86,omitempty"` + +// Baremetal0 *CapacityPerBaremetal `json:"baremetal_0,omitempty"` +// Baremetal1 *CapacityPerBaremetal `json:"baremetal_1,omitempty"` +// Baremetal1e *CapacityPerBaremetal `json:"baremetal_1e,omitempty"` +// Baremetal2 *CapacityPerBaremetal `json:"baremetal_2,omitempty"` +// Baremetal2a *CapacityPerBaremetal `json:"baremetal_2a,omitempty"` +// Baremetal2a2 *CapacityPerBaremetal `json:"baremetal_2a2,omitempty"` +// Baremetal3 *CapacityPerBaremetal `json:"baremetal_3,omitempty"` +// } + +// CapacityPerBaremetal struct +type CapacityPerBaremetal struct { + Level string `json:"level,omitempty"` +} + +// CapacityList struct +type CapacityList struct { + Capacity CapacityReport `json:"capacity,omitempty"` +} + +// CapacityServiceOp implements CapacityService +type CapacityServiceOp struct { + client *Client +} + +// List returns a list of facilities and plans with their current capacity. +func (s *CapacityServiceOp) List() (*CapacityReport, *Response, error) { + root := new(capacityRoot) + + resp, err := s.client.DoRequest("GET", capacityBasePath, nil, root) + if err != nil { + return nil, resp, err + } + + return &root.Capacity, nil, nil +} + +// Check validates if a deploy can be fulfilled. +func (s *CapacityServiceOp) Check(input *CapacityInput) (cap *CapacityInput, resp *Response, err error) { + cap = new(CapacityInput) + resp, err = s.client.DoRequest("POST", capacityBasePath, input, cap) + return cap, resp, err +} diff --git a/vendor/github.com/packethost/packngo/code-of-conduct.md b/vendor/github.com/packethost/packngo/code-of-conduct.md new file mode 100644 index 00000000000..3a14e5283c6 --- /dev/null +++ b/vendor/github.com/packethost/packngo/code-of-conduct.md @@ -0,0 +1,3 @@ +# Code Of Conduct + +Please refer to the [Contributor Covenant](https://www.contributor-covenant.org/version/1/2/0/code-of-conduct/). diff --git a/vendor/github.com/packethost/packngo/devices.go b/vendor/github.com/packethost/packngo/devices.go new file mode 100644 index 00000000000..c45595e7a5c --- /dev/null +++ b/vendor/github.com/packethost/packngo/devices.go @@ -0,0 +1,496 @@ +package packngo + +import ( + "fmt" +) + +const deviceBasePath = "/devices" + +const ( + NetworkTypeHybrid = "hybrid" + NetworkTypeL2Bonded = "layer2-bonded" + NetworkTypeL2Individual = "layer2-individual" + NetworkTypeL3 = "layer3" +) + +// DeviceService interface defines available device methods +type DeviceService interface { + List(ProjectID string, listOpt *ListOptions) ([]Device, *Response, error) + Get(DeviceID string, getOpt *GetOptions) (*Device, *Response, error) + Create(*DeviceCreateRequest) (*Device, *Response, error) + Update(string, *DeviceUpdateRequest) (*Device, *Response, error) + Delete(string, bool) (*Response, error) + Reboot(string) (*Response, error) + PowerOff(string) (*Response, error) + PowerOn(string) (*Response, error) + Lock(string) (*Response, error) + Unlock(string) (*Response, error) + ListBGPSessions(deviceID string, listOpt *ListOptions) ([]BGPSession, *Response, error) + ListBGPNeighbors(deviceID string, listOpt *ListOptions) ([]BGPNeighbor, *Response, error) + ListEvents(string, *ListOptions) ([]Event, *Response, error) +} + +type devicesRoot struct { + Devices []Device `json:"devices"` + Meta meta `json:"meta"` +} + +// Device represents an Equinix Metal device from API +type Device struct { + ID string `json:"id"` + Href string `json:"href,omitempty"` + Hostname string `json:"hostname,omitempty"` + Description *string `json:"description,omitempty"` + State string `json:"state,omitempty"` + Created string `json:"created_at,omitempty"` + Updated string `json:"updated_at,omitempty"` + Locked bool `json:"locked,omitempty"` + BillingCycle string `json:"billing_cycle,omitempty"` + Storage *CPR `json:"storage,omitempty"` + Tags []string `json:"tags,omitempty"` + Network []*IPAddressAssignment `json:"ip_addresses"` + Volumes []*Volume `json:"volumes"` + OS *OS `json:"operating_system,omitempty"` + Plan *Plan `json:"plan,omitempty"` + Facility *Facility `json:"facility,omitempty"` + Project *Project `json:"project,omitempty"` + ProvisionEvents []*Event `json:"provisioning_events,omitempty"` + ProvisionPer float32 `json:"provisioning_percentage,omitempty"` + UserData string `json:"userdata,omitempty"` + User string `json:"user,omitempty"` + RootPassword string `json:"root_password,omitempty"` + IPXEScriptURL string `json:"ipxe_script_url,omitempty"` + AlwaysPXE bool `json:"always_pxe,omitempty"` + HardwareReservation Href `json:"hardware_reservation,omitempty"` + SpotInstance bool `json:"spot_instance,omitempty"` + SpotPriceMax float64 `json:"spot_price_max,omitempty"` + TerminationTime *Timestamp `json:"termination_time,omitempty"` + NetworkPorts []Port `json:"network_ports,omitempty"` + CustomData map[string]interface{} `json:"customdata,omitempty"` + SSHKeys []SSHKey `json:"ssh_keys,omitempty"` + ShortID string `json:"short_id,omitempty"` + SwitchUUID string `json:"switch_uuid,omitempty"` +} + +type NetworkInfo struct { + PublicIPv4 string + PublicIPv6 string + PrivateIPv4 string +} + +func (d *Device) GetNetworkInfo() NetworkInfo { + ni := NetworkInfo{} + for _, ip := range d.Network { + // Initial device IPs are fixed and marked as "Management" + if ip.Management { + if ip.AddressFamily == 4 { + if ip.Public { + ni.PublicIPv4 = ip.Address + } else { + ni.PrivateIPv4 = ip.Address + } + } else { + ni.PublicIPv6 = ip.Address + } + } + } + return ni +} + +func (d Device) String() string { + return Stringify(d) +} + +func (d *Device) NumOfBonds() int { + numOfBonds := 0 + for _, p := range d.NetworkPorts { + if p.Type == "NetworkBondPort" { + numOfBonds++ + } + } + return numOfBonds +} + +func (d *Device) GetPortsInBond(name string) map[string]*Port { + ports := map[string]*Port{} + for _, port := range d.NetworkPorts { + if port.Bond != nil && port.Bond.Name == name { + p := port + ports[p.Name] = &p + } + } + return ports +} + +func (d *Device) GetBondPorts() map[string]*Port { + ports := map[string]*Port{} + for _, port := range d.NetworkPorts { + if port.Type == "NetworkBondPort" { + p := port + ports[p.Name] = &p + } + } + return ports +} + +func (d *Device) GetPhysicalPorts() map[string]*Port { + ports := map[string]*Port{} + for _, port := range d.NetworkPorts { + if port.Type == "NetworkPort" { + p := port + ports[p.Name] = &p + } + } + return ports +} + +func (d *Device) GetPortByName(name string) (*Port, error) { + for _, port := range d.NetworkPorts { + if port.Name == name { + return &port, nil + } + } + return nil, fmt.Errorf("Port %s not found in device %s", name, d.ID) +} + +type ports map[string]*Port + +func (ports ports) allBonded() bool { + if ports == nil { + return false + } + + if len(ports) == 0 { + return false + } + + for _, p := range ports { + if (p == nil) || (!p.Data.Bonded) { + return false + } + } + return true +} + +func (d *Device) HasManagementIPs() bool { + for _, ip := range d.Network { + if ip.Management { + return true + } + } + return false +} + +// GetNetworkType returns a composite network type identification for a device +// based on the plan, network_type, and IP management state of the device. +// GetNetworkType provides the same composite state rendered in the Packet +// Portal for a given device. +func (d *Device) GetNetworkType() string { + if d.Plan != nil { + if d.Plan.Slug == "baremetal_0" || d.Plan.Slug == "baremetal_1" { + return NetworkTypeL3 + } + if d.Plan.Slug == "baremetal_1e" { + return NetworkTypeHybrid + } + } + + bonds := ports(d.GetBondPorts()) + phys := ports(d.GetPhysicalPorts()) + + if bonds.allBonded() { + if phys.allBonded() { + if !d.HasManagementIPs() { + return NetworkTypeL2Bonded + } + return NetworkTypeL3 + } + return NetworkTypeHybrid + } + return NetworkTypeL2Individual +} + +type IPAddressCreateRequest struct { + AddressFamily int `json:"address_family"` + Public bool `json:"public"` + CIDR int `json:"cidr,omitempty"` + Reservations []string `json:"ip_reservations,omitempty"` +} + +// CPR is a struct for custom partitioning and RAID +// If you don't want to bother writing the struct, just write the CPR conf to +// a string and then do +// +// var cpr CPR +// err := json.Unmarshal([]byte(cprString), &cpr) +// if err != nil { +// log.Fatal(err) +// } +type CPR struct { + Disks []struct { + Device string `json:"device"` + WipeTable bool `json:"wipeTable"` + Partitions []struct { + Label string `json:"label"` + Number int `json:"number"` + Size string `json:"size"` + } `json:"partitions"` + } `json:"disks"` + Raid []struct { + Devices []string `json:"devices"` + Level string `json:"level"` + Name string `json:"name"` + } `json:"raid,omitempty"` + Filesystems []struct { + Mount struct { + Device string `json:"device"` + Format string `json:"format"` + Point string `json:"point"` + Create struct { + Options []string `json:"options"` + } `json:"create"` + } `json:"mount"` + } `json:"filesystems"` +} + +// DeviceCreateRequest type used to create an Equinix Metal device +type DeviceCreateRequest struct { + Hostname string `json:"hostname"` + Plan string `json:"plan"` + Facility []string `json:"facility"` + OS string `json:"operating_system"` + BillingCycle string `json:"billing_cycle"` + ProjectID string `json:"project_id"` + UserData string `json:"userdata"` + Storage *CPR `json:"storage,omitempty"` + Tags []string `json:"tags"` + Description string `json:"description,omitempty"` + IPXEScriptURL string `json:"ipxe_script_url,omitempty"` + PublicIPv4SubnetSize int `json:"public_ipv4_subnet_size,omitempty"` + AlwaysPXE bool `json:"always_pxe,omitempty"` + HardwareReservationID string `json:"hardware_reservation_id,omitempty"` + SpotInstance bool `json:"spot_instance,omitempty"` + SpotPriceMax float64 `json:"spot_price_max,omitempty,string"` + TerminationTime *Timestamp `json:"termination_time,omitempty"` + CustomData string `json:"customdata,omitempty"` + // UserSSHKeys is a list of user UUIDs - essentialy a list of + // collaborators. The users must be a collaborator in the same project + // where the device is created. The user's SSH keys then go to the + // device + UserSSHKeys []string `json:"user_ssh_keys,omitempty"` + // Project SSHKeys is a list of SSHKeys resource UUIDs. If this param + // is supplied, only the listed SSHKeys will go to the device. + // Any other Project SSHKeys and any User SSHKeys will not be present + // in the device. + ProjectSSHKeys []string `json:"project_ssh_keys,omitempty"` + Features map[string]string `json:"features,omitempty"` + IPAddresses []IPAddressCreateRequest `json:"ip_addresses,omitempty"` +} + +// DeviceUpdateRequest type used to update an Equinix Metal device +type DeviceUpdateRequest struct { + Hostname *string `json:"hostname,omitempty"` + Description *string `json:"description,omitempty"` + UserData *string `json:"userdata,omitempty"` + Locked *bool `json:"locked,omitempty"` + Tags *[]string `json:"tags,omitempty"` + AlwaysPXE *bool `json:"always_pxe,omitempty"` + IPXEScriptURL *string `json:"ipxe_script_url,omitempty"` + CustomData *string `json:"customdata,omitempty"` +} + +func (d DeviceCreateRequest) String() string { + return Stringify(d) +} + +// DeviceActionRequest type used to execute actions on devices +type DeviceActionRequest struct { + Type string `json:"type"` +} + +type DeviceDeleteRequest struct { + Force bool `json:"force_delete"` +} + +func (d DeviceActionRequest) String() string { + return Stringify(d) +} + +// DeviceServiceOp implements DeviceService +type DeviceServiceOp struct { + client *Client +} + +// List returns devices on a project +// +// Regarding ListOptions.Search: The API documentation does not provide guidance +// on the fields that will be searched using this parameter, so this behavior is +// undefined and prone to change. +// +// As of 2020-10-20, ListOptions.Search will look for matches in the following +// Device properties: Hostname, Description, Tags, ID, ShortID, Network.Address, +// Plan.Name, Plan.Slug, Facility.Code, Facility.Name, OS.Name, OS.Slug, +// HardwareReservation.ID, HardwareReservation.ShortID +func (s *DeviceServiceOp) List(projectID string, listOpt *ListOptions) (devices []Device, resp *Response, err error) { + listOpt = listOpt.Including("facility") + params := urlQuery(listOpt) + path := fmt.Sprintf("%s/%s%s?%s", projectBasePath, projectID, deviceBasePath, params) + + for { + subset := new(devicesRoot) + + resp, err = s.client.DoRequest("GET", path, nil, subset) + if err != nil { + return nil, resp, err + } + + devices = append(devices, subset.Devices...) + + if subset.Meta.Next != nil && (listOpt == nil || listOpt.Page == 0) { + path = subset.Meta.Next.Href + if params != "" { + path = fmt.Sprintf("%s&%s", path, params) + } + continue + } + + return + } +} + +// Get returns a device by id +func (s *DeviceServiceOp) Get(deviceID string, getOpt *GetOptions) (*Device, *Response, error) { + getOpt = getOpt.Including("facility") + params := urlQuery(getOpt) + + path := fmt.Sprintf("%s/%s?%s", deviceBasePath, deviceID, params) + device := new(Device) + resp, err := s.client.DoRequest("GET", path, nil, device) + if err != nil { + return nil, resp, err + } + return device, resp, err +} + +// Create creates a new device +func (s *DeviceServiceOp) Create(createRequest *DeviceCreateRequest) (*Device, *Response, error) { + path := fmt.Sprintf("%s/%s%s", projectBasePath, createRequest.ProjectID, deviceBasePath) + device := new(Device) + + resp, err := s.client.DoRequest("POST", path, createRequest, device) + if err != nil { + return nil, resp, err + } + return device, resp, err +} + +// Update updates an existing device +func (s *DeviceServiceOp) Update(deviceID string, updateRequest *DeviceUpdateRequest) (*Device, *Response, error) { + path := fmt.Sprintf("%s/%s?include=facility", deviceBasePath, deviceID) + device := new(Device) + + resp, err := s.client.DoRequest("PUT", path, updateRequest, device) + if err != nil { + return nil, resp, err + } + + return device, resp, err +} + +// Delete deletes a device +func (s *DeviceServiceOp) Delete(deviceID string, force bool) (*Response, error) { + path := fmt.Sprintf("%s/%s", deviceBasePath, deviceID) + req := &DeviceDeleteRequest{Force: force} + + return s.client.DoRequest("DELETE", path, req, nil) +} + +// Reboot reboots on a device +func (s *DeviceServiceOp) Reboot(deviceID string) (*Response, error) { + path := fmt.Sprintf("%s/%s/actions", deviceBasePath, deviceID) + action := &DeviceActionRequest{Type: "reboot"} + + return s.client.DoRequest("POST", path, action, nil) +} + +// PowerOff powers on a device +func (s *DeviceServiceOp) PowerOff(deviceID string) (*Response, error) { + path := fmt.Sprintf("%s/%s/actions", deviceBasePath, deviceID) + action := &DeviceActionRequest{Type: "power_off"} + + return s.client.DoRequest("POST", path, action, nil) +} + +// PowerOn powers on a device +func (s *DeviceServiceOp) PowerOn(deviceID string) (*Response, error) { + path := fmt.Sprintf("%s/%s/actions", deviceBasePath, deviceID) + action := &DeviceActionRequest{Type: "power_on"} + + return s.client.DoRequest("POST", path, action, nil) +} + +type lockType struct { + Locked bool `json:"locked"` +} + +// Lock sets a device to "locked" +func (s *DeviceServiceOp) Lock(deviceID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", deviceBasePath, deviceID) + action := lockType{Locked: true} + + return s.client.DoRequest("PATCH", path, action, nil) +} + +// Unlock sets a device to "unlocked" +func (s *DeviceServiceOp) Unlock(deviceID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", deviceBasePath, deviceID) + action := lockType{Locked: false} + + return s.client.DoRequest("PATCH", path, action, nil) +} + +func (s *DeviceServiceOp) ListBGPNeighbors(deviceID string, listOpt *ListOptions) ([]BGPNeighbor, *Response, error) { + root := new(bgpNeighborsRoot) + params := urlQuery(listOpt) + path := fmt.Sprintf("%s/%s%s?%s", deviceBasePath, deviceID, bgpNeighborsBasePath, params) + + resp, err := s.client.DoRequest("GET", path, nil, root) + if err != nil { + return nil, resp, err + } + + return root.BGPNeighbors, resp, err +} + +// ListBGPSessions returns all BGP Sessions associated with the device +func (s *DeviceServiceOp) ListBGPSessions(deviceID string, listOpt *ListOptions) (bgpSessions []BGPSession, resp *Response, err error) { + params := urlQuery(listOpt) + path := fmt.Sprintf("%s/%s%s?%s", deviceBasePath, deviceID, bgpSessionBasePath, params) + + for { + subset := new(bgpSessionsRoot) + + resp, err = s.client.DoRequest("GET", path, nil, subset) + if err != nil { + return nil, resp, err + } + + bgpSessions = append(bgpSessions, subset.Sessions...) + + if subset.Meta.Next != nil && (listOpt == nil || listOpt.Page == 0) { + path = subset.Meta.Next.Href + if params != "" { + path = fmt.Sprintf("%s&%s", path, params) + } + continue + } + return + } +} + +// ListEvents returns list of device events +func (s *DeviceServiceOp) ListEvents(deviceID string, listOpt *ListOptions) ([]Event, *Response, error) { + path := fmt.Sprintf("%s/%s%s", deviceBasePath, deviceID, eventBasePath) + + return listEvents(s.client, path, listOpt) +} diff --git a/vendor/github.com/packethost/packngo/doc.go b/vendor/github.com/packethost/packngo/doc.go new file mode 100644 index 00000000000..52a192a06c9 --- /dev/null +++ b/vendor/github.com/packethost/packngo/doc.go @@ -0,0 +1,3 @@ +// Package packngo implements the Equinix Metal API +// documented at https://metal.equinix.com/developers/api. +package packngo diff --git a/vendor/github.com/packethost/packngo/email.go b/vendor/github.com/packethost/packngo/email.go new file mode 100644 index 00000000000..e225d4d83a0 --- /dev/null +++ b/vendor/github.com/packethost/packngo/email.go @@ -0,0 +1,87 @@ +package packngo + +import "fmt" + +const emailBasePath = "/emails" + +// EmailRequest type used to add an email address to the current user +type EmailRequest struct { + Address string `json:"address,omitempty"` + Default *bool `json:"default,omitempty"` +} + +// EmailService interface defines available email methods +type EmailService interface { + Get(string, *GetOptions) (*Email, *Response, error) + Create(*EmailRequest) (*Email, *Response, error) + Update(string, *EmailRequest) (*Email, *Response, error) + Delete(string) (*Response, error) +} + +// Email represents a user's email address +type Email struct { + ID string `json:"id"` + Address string `json:"address"` + Default bool `json:"default,omitempty"` + URL string `json:"href,omitempty"` +} + +func (e Email) String() string { + return Stringify(e) +} + +// EmailServiceOp implements EmailService +type EmailServiceOp struct { + client *Client +} + +// Get retrieves an email by id +func (s *EmailServiceOp) Get(emailID string, getOpt *GetOptions) (*Email, *Response, error) { + params := urlQuery(getOpt) + path := fmt.Sprintf("%s/%s?%s", emailBasePath, emailID, params) + email := new(Email) + + resp, err := s.client.DoRequest("GET", path, nil, email) + if err != nil { + return nil, resp, err + } + + return email, resp, err +} + +// Create adds a new email address to the current user. +func (s *EmailServiceOp) Create(request *EmailRequest) (*Email, *Response, error) { + email := new(Email) + + resp, err := s.client.DoRequest("POST", emailBasePath, request, email) + if err != nil { + return nil, resp, err + } + + return email, resp, err +} + +// Delete removes the email addres from the current user account +func (s *EmailServiceOp) Delete(emailID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", emailBasePath, emailID) + + resp, err := s.client.DoRequest("DELETE", path, nil, nil) + if err != nil { + return resp, err + } + + return resp, err +} + +// Update email parameters +func (s *EmailServiceOp) Update(emailID string, request *EmailRequest) (*Email, *Response, error) { + email := new(Email) + path := fmt.Sprintf("%s/%s", emailBasePath, emailID) + + resp, err := s.client.DoRequest("PUT", path, request, email) + if err != nil { + return nil, resp, err + } + + return email, resp, err +} diff --git a/vendor/github.com/packethost/packngo/events.go b/vendor/github.com/packethost/packngo/events.go new file mode 100644 index 00000000000..102af7a5df2 --- /dev/null +++ b/vendor/github.com/packethost/packngo/events.go @@ -0,0 +1,104 @@ +package packngo + +import "fmt" + +const eventBasePath = "/events" + +// Event struct +type Event struct { + ID string `json:"id,omitempty"` + State string `json:"state,omitempty"` + Type string `json:"type,omitempty"` + Body string `json:"body,omitempty"` + Relationships []Href `json:"relationships,omitempty"` + Interpolated string `json:"interpolated,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + Href string `json:"href,omitempty"` +} + +type eventsRoot struct { + Events []Event `json:"events,omitempty"` + Meta meta `json:"meta,omitempty"` +} + +// EventService interface defines available event functions +type EventService interface { + List(*ListOptions) ([]Event, *Response, error) + Get(string, *GetOptions) (*Event, *Response, error) +} + +// EventServiceOp implements EventService +type EventServiceOp struct { + client *Client +} + +// List returns all events +func (s *EventServiceOp) List(listOpt *ListOptions) ([]Event, *Response, error) { + return listEvents(s.client, eventBasePath, listOpt) +} + +// Get returns an event by ID +func (s *EventServiceOp) Get(eventID string, getOpt *GetOptions) (*Event, *Response, error) { + path := fmt.Sprintf("%s/%s", eventBasePath, eventID) + return get(s.client, path, getOpt) +} + +// list helper function for all event functions +func listEvents(client requestDoer, path string, listOpt *ListOptions) (events []Event, resp *Response, err error) { + params := urlQuery(listOpt) + path = fmt.Sprintf("%s?%s", path, params) + + for { + subset := new(eventsRoot) + + resp, err = client.DoRequest("GET", path, nil, subset) + if err != nil { + return nil, resp, err + } + + events = append(events, subset.Events...) + + if subset.Meta.Next != nil && (listOpt == nil || listOpt.Page == 0) { + path = subset.Meta.Next.Href + if params != "" { + path = fmt.Sprintf("%s&%s", path, params) + } + continue + } + + return + } + +} + +// list helper function for all event functions +/* +func listEvents(client *Client, path string, listOpt *ListOptions) ([]Event, *Response, error) { + params := urlQuery(listOpt) + root := new(eventsRoot) + + path = fmt.Sprintf("%s?%s", path, params) + + resp, err := client.DoRequest("GET", path, nil, root) + if err != nil { + return nil, resp, err + } + + return root.Events, resp, err +} +*/ + +func get(client *Client, path string, getOpt *GetOptions) (*Event, *Response, error) { + params := urlQuery(getOpt) + + event := new(Event) + + path = fmt.Sprintf("%s?%s", path, params) + + resp, err := client.DoRequest("GET", path, nil, event) + if err != nil { + return nil, resp, err + } + + return event, resp, err +} diff --git a/vendor/github.com/packethost/packngo/facilities.go b/vendor/github.com/packethost/packngo/facilities.go new file mode 100644 index 00000000000..356b4d7492e --- /dev/null +++ b/vendor/github.com/packethost/packngo/facilities.go @@ -0,0 +1,56 @@ +package packngo + +import "fmt" + +const facilityBasePath = "/facilities" + +// FacilityService interface defines available facility methods +type FacilityService interface { + List(*ListOptions) ([]Facility, *Response, error) +} + +type facilityRoot struct { + Facilities []Facility `json:"facilities"` +} + +// Facility represents an Equinix Metal facility +type Facility struct { + ID string `json:"id"` + Name string `json:"name,omitempty"` + Code string `json:"code,omitempty"` + Features []string `json:"features,omitempty"` + Address *Address `json:"address,omitempty"` + URL string `json:"href,omitempty"` +} + +func (f Facility) String() string { + return Stringify(f) +} + +// Address - the physical address of the facility +type Address struct { + ID string `json:"id,omitempty"` +} + +func (a Address) String() string { + return Stringify(a) +} + +// FacilityServiceOp implements FacilityService +type FacilityServiceOp struct { + client *Client +} + +// List returns all facilities +func (s *FacilityServiceOp) List(listOpt *ListOptions) ([]Facility, *Response, error) { + root := new(facilityRoot) + params := urlQuery(listOpt) + path := fmt.Sprintf("%s?%s", facilityBasePath, params) + + resp, err := s.client.DoRequest("GET", path, nil, root) + if err != nil { + return nil, resp, err + } + + return root.Facilities, resp, err +} diff --git a/vendor/github.com/packethost/packngo/go.mod b/vendor/github.com/packethost/packngo/go.mod new file mode 100644 index 00000000000..1a982cbefe5 --- /dev/null +++ b/vendor/github.com/packethost/packngo/go.mod @@ -0,0 +1,10 @@ +module github.com/packethost/packngo + +go 1.13 + +require ( + github.com/dnaeon/go-vcr v1.0.1 + github.com/hashicorp/go-retryablehttp v0.6.6 + github.com/stretchr/testify v1.5.1 + golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a +) diff --git a/vendor/github.com/packethost/packngo/go.sum b/vendor/github.com/packethost/packngo/go.sum new file mode 100644 index 00000000000..5b7a82d44e6 --- /dev/null +++ b/vendor/github.com/packethost/packngo/go.sum @@ -0,0 +1,29 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= +github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a h1:y6sBfNd1b9Wy08a6K1Z1DZc4aXABUN5TKjkYhz7UKmo= +golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/packethost/packngo/hardware_reservations.go b/vendor/github.com/packethost/packngo/hardware_reservations.go new file mode 100644 index 00000000000..4bd78d979ff --- /dev/null +++ b/vendor/github.com/packethost/packngo/hardware_reservations.go @@ -0,0 +1,99 @@ +package packngo + +import "fmt" + +const hardwareReservationBasePath = "/hardware-reservations" + +// HardwareReservationService interface defines available hardware reservation functions +type HardwareReservationService interface { + Get(hardwareReservationID string, getOpt *GetOptions) (*HardwareReservation, *Response, error) + List(projectID string, listOpt *ListOptions) ([]HardwareReservation, *Response, error) + Move(string, string) (*HardwareReservation, *Response, error) +} + +// HardwareReservationServiceOp implements HardwareReservationService +type HardwareReservationServiceOp struct { + client *Client +} + +// HardwareReservation struct +type HardwareReservation struct { + ID string `json:"id,omitempty"` + ShortID string `json:"short_id,omitempty"` + Facility Facility `json:"facility,omitempty"` + Plan Plan `json:"plan,omitempty"` + Provisionable bool `json:"provisionable,omitempty"` + Spare bool `json:"spare,omitempty"` + SwitchUUID string `json:"switch_uuid,omitempty"` + Intervals int `json:"intervals,omitempty"` + CurrentPeriod int `json:"current_period,omitempty"` + Href string `json:"href,omitempty"` + Project Project `json:"project,omitempty"` + Device *Device `json:"device,omitempty"` + CreatedAt Timestamp `json:"created_at,omitempty"` +} + +type hardwareReservationRoot struct { + HardwareReservations []HardwareReservation `json:"hardware_reservations"` + Meta meta `json:"meta"` +} + +// List returns all hardware reservations for a given project +func (s *HardwareReservationServiceOp) List(projectID string, listOpt *ListOptions) (reservations []HardwareReservation, resp *Response, err error) { + root := new(hardwareReservationRoot) + params := urlQuery(listOpt) + + path := fmt.Sprintf("%s/%s%s?%s", projectBasePath, projectID, hardwareReservationBasePath, params) + + for { + subset := new(hardwareReservationRoot) + + resp, err = s.client.DoRequest("GET", path, nil, root) + if err != nil { + return nil, resp, err + } + + reservations = append(reservations, root.HardwareReservations...) + + if subset.Meta.Next != nil && (listOpt == nil || listOpt.Page == 0) { + path = subset.Meta.Next.Href + if params != "" { + path = fmt.Sprintf("%s&%s", path, params) + } + continue + } + + return + } +} + +// Get returns a single hardware reservation +func (s *HardwareReservationServiceOp) Get(hardwareReservationdID string, getOpt *GetOptions) (*HardwareReservation, *Response, error) { + params := urlQuery(getOpt) + + hardwareReservation := new(HardwareReservation) + + path := fmt.Sprintf("%s/%s?%s", hardwareReservationBasePath, hardwareReservationdID, params) + + resp, err := s.client.DoRequest("GET", path, nil, hardwareReservation) + if err != nil { + return nil, resp, err + } + + return hardwareReservation, resp, err +} + +// Move a hardware reservation to another project +func (s *HardwareReservationServiceOp) Move(hardwareReservationdID, projectID string) (*HardwareReservation, *Response, error) { + hardwareReservation := new(HardwareReservation) + path := fmt.Sprintf("%s/%s/%s", hardwareReservationBasePath, hardwareReservationdID, "move") + body := map[string]string{} + body["project_id"] = projectID + + resp, err := s.client.DoRequest("POST", path, body, hardwareReservation) + if err != nil { + return nil, resp, err + } + + return hardwareReservation, resp, err +} diff --git a/vendor/github.com/packethost/packngo/ip.go b/vendor/github.com/packethost/packngo/ip.go new file mode 100644 index 00000000000..fa44aac174a --- /dev/null +++ b/vendor/github.com/packethost/packngo/ip.go @@ -0,0 +1,244 @@ +package packngo + +import ( + "fmt" +) + +const ipBasePath = "/ips" + +const ( + // PublicIPv4 fixed string representation of public ipv4 + PublicIPv4 = "public_ipv4" + // PrivateIPv4 fixed string representation of private ipv4 + PrivateIPv4 = "private_ipv4" + // GlobalIPv4 fixed string representation of global ipv4 + GlobalIPv4 = "global_ipv4" + // PublicIPv6 fixed string representation of public ipv6 + PublicIPv6 = "public_ipv6" + // PrivateIPv6 fixed string representation of private ipv6 + PrivateIPv6 = "private_ipv6" + // GlobalIPv6 fixed string representation of global ipv6 + GlobalIPv6 = "global_ipv6" +) + +// DeviceIPService handles assignment of addresses from reserved blocks to instances in a project. +type DeviceIPService interface { + Assign(deviceID string, assignRequest *AddressStruct) (*IPAddressAssignment, *Response, error) + Unassign(assignmentID string) (*Response, error) + Get(assignmentID string, getOpt *GetOptions) (*IPAddressAssignment, *Response, error) + List(deviceID string, listOpt *ListOptions) ([]IPAddressAssignment, *Response, error) +} + +// ProjectIPService handles reservation of IP address blocks for a project. +type ProjectIPService interface { + Get(reservationID string, getOpt *GetOptions) (*IPAddressReservation, *Response, error) + List(projectID string, listOpt *ListOptions) ([]IPAddressReservation, *Response, error) + Request(projectID string, ipReservationReq *IPReservationRequest) (*IPAddressReservation, *Response, error) + Remove(ipReservationID string) (*Response, error) + AvailableAddresses(ipReservationID string, r *AvailableRequest) ([]string, *Response, error) +} + +type IpAddressCommon struct { //nolint:golint + ID string `json:"id"` + Address string `json:"address"` + Gateway string `json:"gateway"` + Network string `json:"network"` + AddressFamily int `json:"address_family"` + Netmask string `json:"netmask"` + Public bool `json:"public"` + CIDR int `json:"cidr"` + Created string `json:"created_at,omitempty"` + Updated string `json:"updated_at,omitempty"` + Href string `json:"href"` + Management bool `json:"management"` + Manageable bool `json:"manageable"` + Project Href `json:"project"` + Global *bool `json:"global_ip"` + Tags []string `json:"tags,omitempty"` + CustomData map[string]interface{} `json:"customdata,omitempty"` +} + +// IPAddressReservation is created when user sends IP reservation request for a project (considering it's within quota). +type IPAddressReservation struct { + IpAddressCommon + Assignments []*IPAddressAssignment `json:"assignments"` + Facility *Facility `json:"facility,omitempty"` + Available string `json:"available"` + Addon bool `json:"addon"` + Bill bool `json:"bill"` + Description *string `json:"details"` +} + +// AvailableResponse is a type for listing of available addresses from a reserved block. +type AvailableResponse struct { + Available []string `json:"available"` +} + +// AvailableRequest is a type for listing available addresses from a reserved block. +type AvailableRequest struct { + CIDR int `json:"cidr"` +} + +// IPAddressAssignment is created when an IP address from reservation block is assigned to a device. +type IPAddressAssignment struct { + IpAddressCommon + AssignedTo Href `json:"assigned_to"` +} + +// IPReservationRequest represents the body of a reservation request. +type IPReservationRequest struct { + Type string `json:"type"` + Quantity int `json:"quantity"` + Description string `json:"details,omitempty"` + Facility *string `json:"facility,omitempty"` + Tags []string `json:"tags,omitempty"` + CustomData map[string]interface{} `json:"customdata,omitempty"` + // FailOnApprovalRequired if the IP request cannot be approved automatically, rather than sending to + // the longer Equinix Metal approval process, fail immediately with a 422 error + FailOnApprovalRequired bool `json:"fail_on_approval_required,omitempty"` +} + +// AddressStruct is a helper type for request/response with dict like {"address": ... } +type AddressStruct struct { + Address string `json:"address"` +} + +func deleteFromIP(client *Client, resourceID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", ipBasePath, resourceID) + + return client.DoRequest("DELETE", path, nil, nil) +} + +func (i IPAddressReservation) String() string { + return Stringify(i) +} + +func (i IPAddressAssignment) String() string { + return Stringify(i) +} + +// DeviceIPServiceOp is interface for IP-address assignment methods. +type DeviceIPServiceOp struct { + client *Client +} + +// Unassign unassigns an IP address from the device to which it is currently assigned. +// This will remove the relationship between an IP and the device and will make the IP +// address available to be assigned to another device. +func (i *DeviceIPServiceOp) Unassign(assignmentID string) (*Response, error) { + return deleteFromIP(i.client, assignmentID) +} + +// Assign assigns an IP address to a device. +// The IP address must be in one of the IP ranges assigned to the device’s project. +func (i *DeviceIPServiceOp) Assign(deviceID string, assignRequest *AddressStruct) (*IPAddressAssignment, *Response, error) { + path := fmt.Sprintf("%s/%s%s", deviceBasePath, deviceID, ipBasePath) + ipa := new(IPAddressAssignment) + + resp, err := i.client.DoRequest("POST", path, assignRequest, ipa) + if err != nil { + return nil, resp, err + } + + return ipa, resp, err +} + +// Get returns assignment by ID. +func (i *DeviceIPServiceOp) Get(assignmentID string, getOpt *GetOptions) (*IPAddressAssignment, *Response, error) { + params := urlQuery(getOpt) + path := fmt.Sprintf("%s/%s?%s", ipBasePath, assignmentID, params) + ipa := new(IPAddressAssignment) + + resp, err := i.client.DoRequest("GET", path, nil, ipa) + if err != nil { + return nil, resp, err + } + + return ipa, resp, err +} + +// List list all of the IP address assignments on a device +func (i *DeviceIPServiceOp) List(deviceID string, listOpt *ListOptions) ([]IPAddressAssignment, *Response, error) { + params := urlQuery(listOpt) + + path := fmt.Sprintf("%s/%s%s?%s", deviceBasePath, deviceID, ipBasePath, params) + + //ipList represents collection of IP Address reservations + type ipList struct { + IPs []IPAddressAssignment `json:"ip_addresses,omitempty"` + } + + ips := new(ipList) + + resp, err := i.client.DoRequest("GET", path, nil, ips) + if err != nil { + return nil, resp, err + } + + return ips.IPs, resp, err +} + +// ProjectIPServiceOp is interface for IP assignment methods. +type ProjectIPServiceOp struct { + client *Client +} + +// Get returns reservation by ID. +func (i *ProjectIPServiceOp) Get(reservationID string, getOpt *GetOptions) (*IPAddressReservation, *Response, error) { + params := urlQuery(getOpt) + path := fmt.Sprintf("%s/%s?%s", ipBasePath, reservationID, params) + ipr := new(IPAddressReservation) + + resp, err := i.client.DoRequest("GET", path, nil, ipr) + if err != nil { + return nil, resp, err + } + + return ipr, resp, err +} + +// List provides a list of IP resevations for a single project. +func (i *ProjectIPServiceOp) List(projectID string, listOpt *ListOptions) ([]IPAddressReservation, *Response, error) { + params := urlQuery(listOpt) + + path := fmt.Sprintf("%s/%s%s?%s", projectBasePath, projectID, ipBasePath, params) + reservations := new(struct { + Reservations []IPAddressReservation `json:"ip_addresses"` + }) + + resp, err := i.client.DoRequest("GET", path, nil, reservations) + if err != nil { + return nil, resp, err + } + return reservations.Reservations, resp, nil +} + +// Request requests more IP space for a project in order to have additional IP addresses to assign to devices. +func (i *ProjectIPServiceOp) Request(projectID string, ipReservationReq *IPReservationRequest) (*IPAddressReservation, *Response, error) { + path := fmt.Sprintf("%s/%s%s", projectBasePath, projectID, ipBasePath) + ipr := new(IPAddressReservation) + + resp, err := i.client.DoRequest("POST", path, ipReservationReq, ipr) + if err != nil { + return nil, resp, err + } + return ipr, resp, err +} + +// Remove removes an IP reservation from the project. +func (i *ProjectIPServiceOp) Remove(ipReservationID string) (*Response, error) { + return deleteFromIP(i.client, ipReservationID) +} + +// AvailableAddresses lists addresses available from a reserved block +func (i *ProjectIPServiceOp) AvailableAddresses(ipReservationID string, r *AvailableRequest) ([]string, *Response, error) { + path := fmt.Sprintf("%s/%s/available?cidr=%d", ipBasePath, ipReservationID, r.CIDR) + ar := new(AvailableResponse) + + resp, err := i.client.DoRequest("GET", path, r, ar) + if err != nil { + return nil, resp, err + } + return ar.Available, resp, nil + +} diff --git a/vendor/github.com/packethost/packngo/notifications.go b/vendor/github.com/packethost/packngo/notifications.go new file mode 100644 index 00000000000..58ff1d2f249 --- /dev/null +++ b/vendor/github.com/packethost/packngo/notifications.go @@ -0,0 +1,95 @@ +package packngo + +import "fmt" + +const notificationBasePath = "/notifications" + +// Notification struct +type Notification struct { + ID string `json:"id,omitempty"` + Type string `json:"type,omitempty"` + Body string `json:"body,omitempty"` + Severity string `json:"severity,omitempty"` + Read bool `json:"read,omitempty"` + Context string `json:"context,omitempty"` + CreatedAt Timestamp `json:"created_at,omitempty"` + UpdatedAt Timestamp `json:"updated_at,omitempty"` + User Href `json:"user,omitempty"` + Href string `json:"href,omitempty"` +} + +type notificationsRoot struct { + Notifications []Notification `json:"notifications,omitempty"` + Meta meta `json:"meta,omitempty"` +} + +// NotificationService interface defines available event functions +type NotificationService interface { + List(*ListOptions) ([]Notification, *Response, error) + Get(string, *GetOptions) (*Notification, *Response, error) + MarkAsRead(string) (*Notification, *Response, error) +} + +// NotificationServiceOp implements NotificationService +type NotificationServiceOp struct { + client *Client +} + +// List returns all notifications +func (s *NotificationServiceOp) List(listOpt *ListOptions) ([]Notification, *Response, error) { + return listNotifications(s.client, notificationBasePath, listOpt) +} + +// Get returns a notification by ID +func (s *NotificationServiceOp) Get(notificationID string, getOpt *GetOptions) (*Notification, *Response, error) { + params := urlQuery(getOpt) + + path := fmt.Sprintf("%s/%s?%s", notificationBasePath, notificationID, params) + return getNotifications(s.client, path) +} + +// Marks notification as read by ID +func (s *NotificationServiceOp) MarkAsRead(notificationID string) (*Notification, *Response, error) { + path := fmt.Sprintf("%s/%s", notificationBasePath, notificationID) + return markAsRead(s.client, path) +} + +// list helper function for all notification functions +func listNotifications(client *Client, path string, listOpt *ListOptions) ([]Notification, *Response, error) { + params := urlQuery(listOpt) + + root := new(notificationsRoot) + + path = fmt.Sprintf("%s?%s", path, params) + + resp, err := client.DoRequest("GET", path, nil, root) + if err != nil { + return nil, resp, err + } + + return root.Notifications, resp, err +} + +func getNotifications(client *Client, path string) (*Notification, *Response, error) { + + notification := new(Notification) + + resp, err := client.DoRequest("GET", path, nil, notification) + if err != nil { + return nil, resp, err + } + + return notification, resp, err +} + +func markAsRead(client *Client, path string) (*Notification, *Response, error) { + + notification := new(Notification) + + resp, err := client.DoRequest("PUT", path, nil, notification) + if err != nil { + return nil, resp, err + } + + return notification, resp, err +} diff --git a/vendor/github.com/packethost/packngo/operatingsystems.go b/vendor/github.com/packethost/packngo/operatingsystems.go new file mode 100644 index 00000000000..087fba6cf77 --- /dev/null +++ b/vendor/github.com/packethost/packngo/operatingsystems.go @@ -0,0 +1,42 @@ +package packngo + +const osBasePath = "/operating-systems" + +// OSService interface defines available operating_systems methods +type OSService interface { + List() ([]OS, *Response, error) +} + +type osRoot struct { + OperatingSystems []OS `json:"operating_systems"` +} + +// OS represents an Equinix Metal operating system +type OS struct { + Name string `json:"name"` + Slug string `json:"slug"` + Distro string `json:"distro"` + Version string `json:"version"` + ProvisionableOn []string `json:"provisionable_on"` +} + +func (o OS) String() string { + return Stringify(o) +} + +// OSServiceOp implements OSService +type OSServiceOp struct { + client *Client +} + +// List returns all available operating systems +func (s *OSServiceOp) List() ([]OS, *Response, error) { + root := new(osRoot) + + resp, err := s.client.DoRequest("GET", osBasePath, nil, root) + if err != nil { + return nil, resp, err + } + + return root.OperatingSystems, resp, err +} diff --git a/vendor/github.com/packethost/packngo/organizations.go b/vendor/github.com/packethost/packngo/organizations.go new file mode 100644 index 00000000000..aa4684a22b8 --- /dev/null +++ b/vendor/github.com/packethost/packngo/organizations.go @@ -0,0 +1,171 @@ +package packngo + +import "fmt" + +// API documentation https://metal.equinix.com/developers/api/organizations/ +const organizationBasePath = "/organizations" + +// OrganizationService interface defines available organization methods +type OrganizationService interface { + List(*ListOptions) ([]Organization, *Response, error) + Get(string, *GetOptions) (*Organization, *Response, error) + Create(*OrganizationCreateRequest) (*Organization, *Response, error) + Update(string, *OrganizationUpdateRequest) (*Organization, *Response, error) + Delete(string) (*Response, error) + ListPaymentMethods(string) ([]PaymentMethod, *Response, error) + ListEvents(string, *ListOptions) ([]Event, *Response, error) +} + +type organizationsRoot struct { + Organizations []Organization `json:"organizations"` + Meta meta `json:"meta"` +} + +// Organization represents an Equinix Metal organization +type Organization struct { + ID string `json:"id"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Website string `json:"website,omitempty"` + Twitter string `json:"twitter,omitempty"` + Created string `json:"created_at,omitempty"` + Updated string `json:"updated_at,omitempty"` + Address Address `json:"address,omitempty"` + TaxID string `json:"tax_id,omitempty"` + MainPhone string `json:"main_phone,omitempty"` + BillingPhone string `json:"billing_phone,omitempty"` + CreditAmount float64 `json:"credit_amount,omitempty"` + Logo string `json:"logo,omitempty"` + LogoThumb string `json:"logo_thumb,omitempty"` + Projects []Project `json:"projects,omitempty"` + URL string `json:"href,omitempty"` + Users []User `json:"members,omitempty"` + Owners []User `json:"owners,omitempty"` +} + +func (o Organization) String() string { + return Stringify(o) +} + +// OrganizationCreateRequest type used to create an Equinix Metal organization +type OrganizationCreateRequest struct { + Name string `json:"name"` + Description string `json:"description"` + Website string `json:"website"` + Twitter string `json:"twitter"` + Logo string `json:"logo"` +} + +func (o OrganizationCreateRequest) String() string { + return Stringify(o) +} + +// OrganizationUpdateRequest type used to update an Equinix Metal organization +type OrganizationUpdateRequest struct { + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + Website *string `json:"website,omitempty"` + Twitter *string `json:"twitter,omitempty"` + Logo *string `json:"logo,omitempty"` +} + +func (o OrganizationUpdateRequest) String() string { + return Stringify(o) +} + +// OrganizationServiceOp implements OrganizationService +type OrganizationServiceOp struct { + client *Client +} + +// List returns the user's organizations +func (s *OrganizationServiceOp) List(listOpt *ListOptions) (orgs []Organization, resp *Response, err error) { + params := urlQuery(listOpt) + root := new(organizationsRoot) + + path := fmt.Sprintf("%s?%s", organizationBasePath, params) + + for { + resp, err = s.client.DoRequest("GET", path, nil, root) + if err != nil { + return nil, resp, err + } + + orgs = append(orgs, root.Organizations...) + + if root.Meta.Next != nil && (listOpt == nil || listOpt.Page == 0) { + path = root.Meta.Next.Href + if params != "" { + path = fmt.Sprintf("%s&%s", path, params) + } + continue + } + return + } +} + +// Get returns a organization by id +func (s *OrganizationServiceOp) Get(organizationID string, getOpt *GetOptions) (*Organization, *Response, error) { + params := urlQuery(getOpt) + path := fmt.Sprintf("%s/%s?%s", organizationBasePath, organizationID, params) + organization := new(Organization) + + resp, err := s.client.DoRequest("GET", path, nil, organization) + if err != nil { + return nil, resp, err + } + + return organization, resp, err +} + +// Create creates a new organization +func (s *OrganizationServiceOp) Create(createRequest *OrganizationCreateRequest) (*Organization, *Response, error) { + organization := new(Organization) + + resp, err := s.client.DoRequest("POST", organizationBasePath, createRequest, organization) + if err != nil { + return nil, resp, err + } + + return organization, resp, err +} + +// Update updates an organization +func (s *OrganizationServiceOp) Update(id string, updateRequest *OrganizationUpdateRequest) (*Organization, *Response, error) { + path := fmt.Sprintf("%s/%s", organizationBasePath, id) + organization := new(Organization) + + resp, err := s.client.DoRequest("PATCH", path, updateRequest, organization) + if err != nil { + return nil, resp, err + } + + return organization, resp, err +} + +// Delete deletes an organizationID +func (s *OrganizationServiceOp) Delete(organizationID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", organizationBasePath, organizationID) + + return s.client.DoRequest("DELETE", path, nil, nil) +} + +// ListPaymentMethods returns PaymentMethods for an organization +func (s *OrganizationServiceOp) ListPaymentMethods(organizationID string) ([]PaymentMethod, *Response, error) { + url := fmt.Sprintf("%s/%s%s", organizationBasePath, organizationID, paymentMethodBasePath) + root := new(paymentMethodsRoot) + + resp, err := s.client.DoRequest("GET", url, nil, root) + if err != nil { + return nil, resp, err + } + + return root.PaymentMethods, resp, err +} + +// ListEvents returns list of organization events +func (s *OrganizationServiceOp) ListEvents(organizationID string, listOpt *ListOptions) ([]Event, *Response, error) { + path := fmt.Sprintf("%s/%s%s", organizationBasePath, organizationID, eventBasePath) + + return listEvents(s.client, path, listOpt) +} diff --git a/vendor/github.com/packethost/packngo/packngo.go b/vendor/github.com/packethost/packngo/packngo.go new file mode 100644 index 00000000000..97e09545806 --- /dev/null +++ b/vendor/github.com/packethost/packngo/packngo.go @@ -0,0 +1,631 @@ +package packngo + +import ( + "bytes" + "context" + "crypto/x509" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/http/httputil" + "net/url" + "os" + "regexp" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-retryablehttp" +) + +const ( + authTokenEnvVar = "PACKET_AUTH_TOKEN" + libraryVersion = "0.5.1" + baseURL = "https://api.equinix.com/metal/v1/" + userAgent = "packngo/" + libraryVersion + mediaType = "application/json" + debugEnvVar = "PACKNGO_DEBUG" + + headerRateLimit = "X-RateLimit-Limit" + headerRateRemaining = "X-RateLimit-Remaining" + headerRateReset = "X-RateLimit-Reset" + expectedAPIContentTypePrefix = "application/json" +) + +var redirectsErrorRe = regexp.MustCompile(`stopped after \d+ redirects\z`) + +// GetOptions are options common to Equinix Metal API GET requests +type GetOptions struct { + // Includes are a list of fields to expand in the request results. + // + // For resources that contain collections of other resources, the Equinix Metal API + // will only return the `Href` value of these resources by default. In + // nested API Go types, this will result in objects that have zero values in + // all fiends except their `Href` field. When an object's associated field + // name is "included", the returned fields will be Uumarshalled into the + // nested object. Field specifiers can use a dotted notation up to three + // references deep. (For example, "memberships.projects" can be used in + // ListUsers.) + Includes []string `url:"includes,omitempty"` + + // Excludes reduce the size of the API response by removing nested objects + // that may be returned. + // + // The default behavior of the Equinix Metal API is to "exclude" fields, but some + // API endpoints have an "include" behavior on certain fields. Nested Go + // types unmarshalled into an "excluded" field will only have a values in + // their `Href` field. + Excludes []string `url:"excludes,omitempty"` +} + +// GetOptions returns GetOptions from GetOptions (and is nil-receiver safe) +func (g *GetOptions) GetOptions() *GetOptions { + getOpts := GetOptions{} + if g != nil { + getOpts.Includes = g.Includes + getOpts.Excludes = g.Excludes + } + return &getOpts +} + +// ListOptions are options common to Equinix Metal API paginated GET requests +type ListOptions struct { + // avoid embedding GetOptions (packngo-breaking-change) for now + + // Includes are a list of fields to expand in the request results. + Includes []string `url:"includes,omitempty"` + + // Excludes reduce the size of the API response by removing nested objects + // that may be returned. + Excludes []string `url:"excludes,omitempty"` + + // Page is the page of results to retrieve for paginated result sets + Page int `url:"page,omitempty"` + + // PerPage is the number of results to return per page for paginated result + // sets, + PerPage int `url:"per_page,omitempty"` + + // Search is a special API query parameter that, for resources that support + // it, will filter results to those with any one of various fields matching + // the supplied keyword. For example, a resource may have a defined search + // behavior matches either a name or a fingerprint field, while another + // resource may match entirely different fields. Search is currently + // implemented for SSHKeys and uses an exact match. + Search string `url:"search,omitempty"` +} + +// GetOptions returns GetOptions from ListOptions (and is nil-receiver safe) +func (l *ListOptions) GetOptions() *GetOptions { + getOpts := GetOptions{} + if l != nil { + getOpts.Includes = l.Includes + getOpts.Excludes = l.Excludes + } + return &getOpts +} + +// SearchOptions are options common to API GET requests that include a +// multi-field search filter. SearchOptions are used in List functions that are +// known to support `search` but do not offer pagination. +type SearchOptions struct { + // avoid embedding GetOptions (for similar behavior to ListOptions) + + // Includes are a list of fields to expand in the request results. + Includes []string `url:"includes,omitempty"` + + // Excludes reduce the size of the API response by removing nested objects + // that may be returned. + Excludes []string `url:"excludes,omitempty"` + + // Search is a special API query parameter that, for resources that support + // it, will filter results to those with any one of various fields matching + // the supplied keyword. For example, a resource may have a defined search + // behavior matches either a name or a fingerprint field, while another + // resource may match entirely different fields. Search is currently + // implemented for SSHKeys and uses an exact match. + Search string `url:"search,omitempty"` +} + +// GetOptions returns GetOptions from ListOptions (and is nil-receiver safe) +func (s *SearchOptions) GetOptions() *GetOptions { + getOpts := GetOptions{} + if s != nil { + getOpts.Includes = s.Includes + getOpts.Excludes = s.Excludes + } + return &getOpts +} + +// OptionsGetter provides GetOptions +type OptionsGetter interface { + GetOptions() *GetOptions +} + +// Including ensures that the variadic refs are included in a copy of the +// options, resulting in expansion of the the referred sub-resources. Unknown +// values within refs will be silently ignore by the API. +func (g *GetOptions) Including(refs ...string) *GetOptions { + if g == nil { + return &GetOptions{Includes: refs} + } + out := *g + for _, v := range refs { + if !contains(out.Includes, v) { + out.Includes = append(out.Includes, v) + } + } + return &out +} + +// Including ensures that the variadic refs are included in a copy of the +// options, resulting in expansion of the the referred sub-resources. Unknown +// values within refs will be silently ignore by the API. +func (l *ListOptions) Including(refs ...string) *ListOptions { + if l == nil { + return &ListOptions{Includes: refs} + } + out := *l + for _, v := range refs { + if !contains(out.Includes, v) { + out.Includes = append(out.Includes, v) + } + } + return &out +} + +// Including ensures that the variadic refs are included in a copy of the +// options, resulting in expansion of the the referred sub-resources. Unknown +// values within refs will be silently ignore by the API. +func (s *SearchOptions) Including(refs ...string) *SearchOptions { + if s == nil { + return &SearchOptions{Includes: refs} + } + out := *s + for _, v := range refs { + if !contains(out.Includes, v) { + out.Includes = append(out.Includes, v) + } + } + return &out +} + +type paramsReady interface { + Params() url.Values +} + +// compile-time assertions that paramsReady is implemented +var ( + _ paramsReady = (*GetOptions)(nil) + _ paramsReady = (*ListOptions)(nil) + _ paramsReady = (*SearchOptions)(nil) +) + +// urlQuery generates a URL query string ("?foo=bar") from any object that +// implements the paramsReady interface +func urlQuery(p paramsReady) string { + return p.Params().Encode() +} + +// Params generates URL values from GetOptions fields +func (g *GetOptions) Params() url.Values { + params := url.Values{} + if g == nil { + return params + } + if len(g.Includes) != 0 { + params.Set("include", strings.Join(g.Includes, ",")) + } + if len(g.Excludes) != 0 { + params.Set("exclude", strings.Join(g.Excludes, ",")) + } + + return params +} + +// Params generates URL values from ListOptions fields +func (l *ListOptions) Params() url.Values { + if l == nil { + return url.Values{} + } + params := l.GetOptions().Params() + + if l.Page != 0 { + params.Set("page", fmt.Sprintf("%d", l.Page)) + } + if l.PerPage != 0 { + params.Set("per_page", fmt.Sprintf("%d", l.PerPage)) + } + + if l.Search != "" { + params.Set("search", l.Search) + } + + return params +} + +// Params generates a URL values from SearchOptions fields +func (s *SearchOptions) Params() url.Values { + if s == nil { + return url.Values{} + } + + params := s.GetOptions().Params() + params.Set("search", s.Search) + return params +} + +// meta contains pagination information +type meta struct { + Self *Href `json:"self"` + First *Href `json:"first"` + Last *Href `json:"last"` + Previous *Href `json:"previous,omitempty"` + Next *Href `json:"next,omitempty"` + Total int `json:"total"` + CurrentPageNum int `json:"current_page"` + LastPageNum int `json:"last_page"` +} + +// Response is the http response from api calls +type Response struct { + *http.Response + Rate +} + +// Href is an API link +type Href struct { + Href string `json:"href"` +} + +func (r *Response) populateRate() { + // parse the rate limit headers and populate Response.Rate + if limit := r.Header.Get(headerRateLimit); limit != "" { + r.Rate.RequestLimit, _ = strconv.Atoi(limit) + } + if remaining := r.Header.Get(headerRateRemaining); remaining != "" { + r.Rate.RequestsRemaining, _ = strconv.Atoi(remaining) + } + if reset := r.Header.Get(headerRateReset); reset != "" { + if v, _ := strconv.ParseInt(reset, 10, 64); v != 0 { + r.Rate.Reset = Timestamp{time.Unix(v, 0)} + } + } +} + +// ErrorResponse is the http response used on errors +type ErrorResponse struct { + Response *http.Response + Errors []string `json:"errors"` + SingleError string `json:"error"` +} + +func (r *ErrorResponse) Error() string { + return fmt.Sprintf("%v %v: %d %v %v", + r.Response.Request.Method, r.Response.Request.URL, r.Response.StatusCode, strings.Join(r.Errors, ", "), r.SingleError) +} + +// Client is the base API Client +type Client struct { + client *retryablehttp.Client + debug bool + + BaseURL *url.URL + + UserAgent string + ConsumerToken string + APIKey string + + RateLimit Rate + + // Equinix Metal Api Objects + APIKeys APIKeyService + BGPConfig BGPConfigService + BGPSessions BGPSessionService + Batches BatchService + CapacityService CapacityService + DeviceIPs DeviceIPService + DevicePorts DevicePortService + Devices DeviceService + Emails EmailService + Events EventService + Facilities FacilityService + HardwareReservations HardwareReservationService + Notifications NotificationService + OperatingSystems OSService + Organizations OrganizationService + Plans PlanService + ProjectIPs ProjectIPService + ProjectVirtualNetworks ProjectVirtualNetworkService + Projects ProjectService + SSHKeys SSHKeyService + SpotMarket SpotMarketService + SpotMarketRequests SpotMarketRequestService + TwoFactorAuth TwoFactorAuthService + Users UserService + VPN VPNService + VolumeAttachments VolumeAttachmentService + Volumes VolumeService +} + +// requestDoer provides methods for making HTTP requests and receiving the +// response, errors, and a structured result +// +// This interface is used in *ServiceOp as a mockable alternative to a full +// Client object. +type requestDoer interface { + NewRequest(method, path string, body interface{}) (*retryablehttp.Request, error) + Do(req *retryablehttp.Request, v interface{}) (*Response, error) + DoRequest(method, path string, body, v interface{}) (*Response, error) + DoRequestWithHeader(method string, headers map[string]string, path string, body, v interface{}) (*Response, error) +} + +// NewRequest inits a new http request with the proper headers +func (c *Client) NewRequest(method, path string, body interface{}) (*retryablehttp.Request, error) { + // relative path to append to the endpoint url, no leading slash please + if path[0] == '/' { + path = path[1:] + } + rel, err := url.Parse(path) + if err != nil { + return nil, err + } + + u := c.BaseURL.ResolveReference(rel) + + // json encode the request body, if any + buf := new(bytes.Buffer) + if body != nil { + err := json.NewEncoder(buf).Encode(body) + if err != nil { + return nil, err + } + } + + req, err := retryablehttp.NewRequest(method, u.String(), buf) + if err != nil { + return nil, err + } + + req.Close = true + + req.Header.Add("X-Auth-Token", c.APIKey) + req.Header.Add("X-Consumer-Token", c.ConsumerToken) + + req.Header.Add("Content-Type", mediaType) + req.Header.Add("Accept", mediaType) + req.Header.Add("User-Agent", c.UserAgent) + return req, nil +} + +// Do executes the http request +func (c *Client) Do(req *retryablehttp.Request, v interface{}) (*Response, error) { + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + response := Response{Response: resp} + response.populateRate() + if c.debug { + dumpResponse(response.Response) + } + c.RateLimit = response.Rate + + err = checkResponse(resp) + // if the response is an error, return the ErrorResponse + if err != nil { + return &response, err + } + + if v != nil { + // if v implements the io.Writer interface, return the raw response + if w, ok := v.(io.Writer); ok { + _, err = io.Copy(w, resp.Body) + if err != nil { + return &response, err + } + } else { + err = json.NewDecoder(resp.Body).Decode(v) + if err != nil { + return &response, err + } + } + } + + return &response, err +} + +func dumpResponse(resp *http.Response) { + o, _ := httputil.DumpResponse(resp, true) + strResp := string(o) + reg, _ := regexp.Compile(`"token":(.+?),`) + reMatches := reg.FindStringSubmatch(strResp) + if len(reMatches) == 2 { + strResp = strings.Replace(strResp, reMatches[1], strings.Repeat("-", len(reMatches[1])), 1) + } + log.Printf("\n=======[RESPONSE]============\n%s\n\n", strResp) +} + +func dumpRequest(req *retryablehttp.Request) { + o, _ := httputil.DumpRequestOut(req.Request, false) + strReq := string(o) + reg, _ := regexp.Compile(`X-Auth-Token: (\w*)`) + reMatches := reg.FindStringSubmatch(strReq) + if len(reMatches) == 2 { + strReq = strings.Replace(strReq, reMatches[1], strings.Repeat("-", len(reMatches[1])), 1) + } + bbs, _ := req.BodyBytes() + log.Printf("\n=======[REQUEST]=============\n%s%s\n", strReq, string(bbs)) +} + +// DoRequest is a convenience method, it calls NewRequest followed by Do +// v is the interface to unmarshal the response JSON into +func (c *Client) DoRequest(method, path string, body, v interface{}) (*Response, error) { + req, err := c.NewRequest(method, path, body) + if c.debug { + dumpRequest(req) + } + if err != nil { + return nil, err + } + return c.Do(req, v) +} + +// DoRequestWithHeader same as DoRequest +func (c *Client) DoRequestWithHeader(method string, headers map[string]string, path string, body, v interface{}) (*Response, error) { + req, err := c.NewRequest(method, path, body) + for k, v := range headers { + req.Header.Add(k, v) + } + + if c.debug { + dumpRequest(req) + } + if err != nil { + return nil, err + } + return c.Do(req, v) +} + +// NewClient initializes and returns a Client +func NewClient() (*Client, error) { + apiToken := os.Getenv(authTokenEnvVar) + if apiToken == "" { + return nil, fmt.Errorf("you must export %s", authTokenEnvVar) + } + c := NewClientWithAuth("packngo lib", apiToken, nil) + return c, nil + +} + +// NewClientWithAuth initializes and returns a Client, use this to get an API Client to operate on +// N.B.: Equinix Metal's API certificate requires Go 1.5+ to successfully parse. If you are using +// an older version of Go, pass in a custom http.Client with a custom TLS configuration +// that sets "InsecureSkipVerify" to "true" +func NewClientWithAuth(consumerToken string, apiKey string, httpClient *retryablehttp.Client) *Client { + client, _ := NewClientWithBaseURL(consumerToken, apiKey, httpClient, baseURL) + return client +} + +// RetryPolicy determines if the supplied http Response and error can be safely retried +func RetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { + // do not retry on context.Canceled or context.DeadlineExceeded + if ctx.Err() != nil { + return false, ctx.Err() + } + + if err != nil { + if v, ok := err.(*url.Error); ok { + // Don't retry if the error was due to too many redirects. + if redirectsErrorRe.MatchString(v.Error()) { + return false, nil + } + + // Don't retry if the error was due to TLS cert verification failure. + if _, ok := v.Err.(x509.UnknownAuthorityError); ok { + return false, nil + } + } + + // The error is likely recoverable so retry. + return true, nil + } + + // Check the response code. We retry on 500-range responses to allow + // the server time to recover, as 500's are typically not permanent + // errors and may relate to outages on the server side. This will catch + // invalid response codes as well, like 0 and 999. + //if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != 501) { + // return true, nil + //} + + return false, nil +} + +// NewClientWithBaseURL returns a Client pointing to nonstandard API URL, e.g. +// for mocking the remote API +func NewClientWithBaseURL(consumerToken string, apiKey string, httpClient *retryablehttp.Client, apiBaseURL string) (*Client, error) { + if httpClient == nil { + // Don't fall back on http.DefaultClient as it's not nice to adjust state + // implicitly. If the client wants to use http.DefaultClient, they can + // pass it in explicitly. + httpClient = retryablehttp.NewClient() + httpClient.RetryWaitMin = time.Second + httpClient.RetryWaitMax = 30 * time.Second + httpClient.RetryMax = 10 + httpClient.CheckRetry = RetryPolicy + } + + u, err := url.Parse(apiBaseURL) + if err != nil { + return nil, err + } + + c := &Client{client: httpClient, BaseURL: u, UserAgent: userAgent, ConsumerToken: consumerToken, APIKey: apiKey} + c.APIKeys = &APIKeyServiceOp{client: c} + c.BGPConfig = &BGPConfigServiceOp{client: c} + c.BGPSessions = &BGPSessionServiceOp{client: c} + c.Batches = &BatchServiceOp{client: c} + c.CapacityService = &CapacityServiceOp{client: c} + c.DeviceIPs = &DeviceIPServiceOp{client: c} + c.DevicePorts = &DevicePortServiceOp{client: c} + c.Devices = &DeviceServiceOp{client: c} + c.Emails = &EmailServiceOp{client: c} + c.Events = &EventServiceOp{client: c} + c.Facilities = &FacilityServiceOp{client: c} + c.HardwareReservations = &HardwareReservationServiceOp{client: c} + c.Notifications = &NotificationServiceOp{client: c} + c.OperatingSystems = &OSServiceOp{client: c} + c.Organizations = &OrganizationServiceOp{client: c} + c.Plans = &PlanServiceOp{client: c} + c.ProjectIPs = &ProjectIPServiceOp{client: c} + c.ProjectVirtualNetworks = &ProjectVirtualNetworkServiceOp{client: c} + c.Projects = &ProjectServiceOp{client: c} + c.SSHKeys = &SSHKeyServiceOp{client: c} + c.SpotMarket = &SpotMarketServiceOp{client: c} + c.SpotMarketRequests = &SpotMarketRequestServiceOp{client: c} + c.TwoFactorAuth = &TwoFactorAuthServiceOp{client: c} + c.Users = &UserServiceOp{client: c} + c.VPN = &VPNServiceOp{client: c} + c.VolumeAttachments = &VolumeAttachmentServiceOp{client: c} + c.Volumes = &VolumeServiceOp{client: c} + c.debug = os.Getenv(debugEnvVar) != "" + + return c, nil +} + +func checkResponse(r *http.Response) error { + + if s := r.StatusCode; s >= 200 && s <= 299 { + // response is good, return + return nil + } + + errorResponse := &ErrorResponse{Response: r} + data, err := ioutil.ReadAll(r.Body) + // if the response has a body, populate the message in errorResponse + if err != nil { + return err + } + + ct := r.Header.Get("Content-Type") + if !strings.HasPrefix(ct, expectedAPIContentTypePrefix) { + errorResponse.SingleError = fmt.Sprintf("Unexpected Content-Type %s with status %s", ct, r.Status) + return errorResponse + } + + if len(data) > 0 { + err = json.Unmarshal(data, errorResponse) + if err != nil { + return err + } + } + + return errorResponse +} diff --git a/vendor/github.com/packethost/packngo/payment_methods.go b/vendor/github.com/packethost/packngo/payment_methods.go new file mode 100644 index 00000000000..8f9840f0d2a --- /dev/null +++ b/vendor/github.com/packethost/packngo/payment_methods.go @@ -0,0 +1,71 @@ +package packngo + +// API documentation https://metal.equinix.com/developers/api/paymentmethods/ +const paymentMethodBasePath = "/payment-methods" + +// ProjectService interface defines available project methods +type PaymentMethodService interface { + List() ([]PaymentMethod, *Response, error) + Get(string) (*PaymentMethod, *Response, error) + Create(*PaymentMethodCreateRequest) (*PaymentMethod, *Response, error) + Update(string, *PaymentMethodUpdateRequest) (*PaymentMethod, *Response, error) + Delete(string) (*Response, error) +} + +type paymentMethodsRoot struct { + PaymentMethods []PaymentMethod `json:"payment_methods"` +} + +// PaymentMethod represents an Equinix Metal payment method of an organization +type PaymentMethod struct { + ID string `json:"id"` + Name string `json:"name,omitempty"` + Created string `json:"created_at,omitempty"` + Updated string `json:"updated_at,omitempty"` + Nonce string `json:"nonce,omitempty"` + Default bool `json:"default,omitempty"` + Organization Organization `json:"organization,omitempty"` + Projects []Project `json:"projects,omitempty"` + Type string `json:"type,omitempty"` + CardholderName string `json:"cardholder_name,omitempty"` + ExpMonth string `json:"expiration_month,omitempty"` + ExpYear string `json:"expiration_year,omitempty"` + Last4 string `json:"last_4,omitempty"` + BillingAddress BillingAddress `json:"billing_address,omitempty"` + URL string `json:"href,omitempty"` +} + +func (pm PaymentMethod) String() string { + return Stringify(pm) +} + +// PaymentMethodCreateRequest type used to create an Equinix Metal payment method of an organization +type PaymentMethodCreateRequest struct { + Name string `json:"name"` + Nonce string `json:"nonce"` + CardholderName string `json:"cardholder_name,omitempty"` + ExpMonth string `json:"expiration_month,omitempty"` + ExpYear string `json:"expiration_year,omitempty"` + BillingAddress string `json:"billing_address,omitempty"` +} + +func (pm PaymentMethodCreateRequest) String() string { + return Stringify(pm) +} + +// PaymentMethodUpdateRequest type used to update an Equinix Metal payment method of an organization +type PaymentMethodUpdateRequest struct { + Name *string `json:"name,omitempty"` + CardholderName *string `json:"cardholder_name,omitempty"` + ExpMonth *string `json:"expiration_month,omitempty"` + ExpYear *string `json:"expiration_year,omitempty"` + BillingAddress *string `json:"billing_address,omitempty"` +} + +func (pm PaymentMethodUpdateRequest) String() string { + return Stringify(pm) +} + +// PaymentMethodServiceOp implements PaymentMethodService +type PaymentMethodServiceOp struct { +} diff --git a/vendor/github.com/packethost/packngo/plans.go b/vendor/github.com/packethost/packngo/plans.go new file mode 100644 index 00000000000..a3d37d9aace --- /dev/null +++ b/vendor/github.com/packethost/packngo/plans.go @@ -0,0 +1,126 @@ +package packngo + +import ( + "fmt" +) + +const planBasePath = "/plans" + +// PlanService interface defines available plan methods +type PlanService interface { + List(*ListOptions) ([]Plan, *Response, error) +} + +type planRoot struct { + Plans []Plan `json:"plans"` +} + +// Plan represents an Equinix Metal service plan +type Plan struct { + ID string `json:"id"` + Slug string `json:"slug,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Line string `json:"line,omitempty"` + Specs *Specs `json:"specs,omitempty"` + Pricing *Pricing `json:"pricing,omitempty"` + DeploymentTypes []string `json:"deployment_types"` + Class string `json:"class"` + AvailableIn []Facility `json:"available_in"` +} + +func (p Plan) String() string { + return Stringify(p) +} + +// Specs - the server specs for a plan +type Specs struct { + Cpus []*Cpus `json:"cpus,omitempty"` + Memory *Memory `json:"memory,omitempty"` + Drives []*Drives `json:"drives,omitempty"` + Nics []*Nics `json:"nics,omitempty"` + Features *Features `json:"features,omitempty"` +} + +func (s Specs) String() string { + return Stringify(s) +} + +// Cpus - the CPU config details for specs on a plan +type Cpus struct { + Count int `json:"count,omitempty"` + Type string `json:"type,omitempty"` +} + +func (c Cpus) String() string { + return Stringify(c) +} + +// Memory - the RAM config details for specs on a plan +type Memory struct { + Total string `json:"total,omitempty"` +} + +func (m Memory) String() string { + return Stringify(m) +} + +// Drives - the storage config details for specs on a plan +type Drives struct { + Count int `json:"count,omitempty"` + Size string `json:"size,omitempty"` + Type string `json:"type,omitempty"` +} + +func (d Drives) String() string { + return Stringify(d) +} + +// Nics - the network hardware details for specs on a plan +type Nics struct { + Count int `json:"count,omitempty"` + Type string `json:"type,omitempty"` +} + +func (n Nics) String() string { + return Stringify(n) +} + +// Features - other features in the specs for a plan +type Features struct { + Raid bool `json:"raid,omitempty"` + Txt bool `json:"txt,omitempty"` +} + +func (f Features) String() string { + return Stringify(f) +} + +// Pricing - the pricing options on a plan +type Pricing struct { + Hour float32 `json:"hour,omitempty"` + Month float32 `json:"month,omitempty"` +} + +func (p Pricing) String() string { + return Stringify(p) +} + +// PlanServiceOp implements PlanService +type PlanServiceOp struct { + client *Client +} + +// List method returns all available plans +func (s *PlanServiceOp) List(listOpt *ListOptions) ([]Plan, *Response, error) { + root := new(planRoot) + params := urlQuery(listOpt) + path := fmt.Sprintf("%s?%s", planBasePath, params) + + resp, err := s.client.DoRequest("GET", path, nil, root) + if err != nil { + return nil, resp, err + } + + return root.Plans, resp, err +} diff --git a/vendor/github.com/packethost/packngo/ports.go b/vendor/github.com/packethost/packngo/ports.go new file mode 100644 index 00000000000..6e7133f17d4 --- /dev/null +++ b/vendor/github.com/packethost/packngo/ports.go @@ -0,0 +1,371 @@ +package packngo + +import ( + "context" + "fmt" + "strings" + "time" +) + +const portBasePath = "/ports" + +// DevicePortService handles operations on a port which belongs to a particular device +type DevicePortService interface { + Assign(*PortAssignRequest) (*Port, *Response, error) + Unassign(*PortAssignRequest) (*Port, *Response, error) + AssignNative(*PortAssignRequest) (*Port, *Response, error) + UnassignNative(string) (*Port, *Response, error) + Bond(*Port, bool) (*Port, *Response, error) + Disbond(*Port, bool) (*Port, *Response, error) + DeviceToNetworkType(string, string) (*Device, error) + DeviceNetworkType(string) (string, error) + PortToLayerTwo(string, string) (*Port, *Response, error) + PortToLayerThree(string, string) (*Port, *Response, error) + GetPortByName(string, string) (*Port, error) + GetOddEthPorts(*Device) (map[string]*Port, error) + GetAllEthPorts(*Device) (map[string]*Port, error) + ConvertDevice(*Device, string) error +} + +type PortData struct { + MAC string `json:"mac"` + Bonded bool `json:"bonded"` +} + +type BondData struct { + ID string `json:"id"` + Name string `json:"name"` +} + +type Port struct { + ID string `json:"id"` + Type string `json:"type"` + Name string `json:"name"` + Data PortData `json:"data"` + NetworkType string `json:"network_type,omitempty"` + NativeVirtualNetwork *VirtualNetwork `json:"native_virtual_network"` + AttachedVirtualNetworks []VirtualNetwork `json:"virtual_networks"` + Bond *BondData `json:"bond"` +} + +type AddressRequest struct { + AddressFamily int `json:"address_family"` + Public bool `json:"public"` +} + +type BackToL3Request struct { + RequestIPs []AddressRequest `json:"request_ips"` +} + +type DevicePortServiceOp struct { + client *Client +} + +type PortAssignRequest struct { + PortID string `json:"id"` + VirtualNetworkID string `json:"vnid"` +} + +type BondRequest struct { + PortID string `json:"id"` + BulkEnable bool `json:"bulk_enable"` +} + +type DisbondRequest struct { + PortID string `json:"id"` + BulkDisable bool `json:"bulk_disable"` +} + +func (i *DevicePortServiceOp) GetPortByName(deviceID, name string) (*Port, error) { + device, _, err := i.client.Devices.Get(deviceID, nil) + if err != nil { + return nil, err + } + return device.GetPortByName(name) +} + +func (i *DevicePortServiceOp) Assign(par *PortAssignRequest) (*Port, *Response, error) { + path := fmt.Sprintf("%s/%s/assign", portBasePath, par.PortID) + return i.portAction(path, par) +} + +func (i *DevicePortServiceOp) AssignNative(par *PortAssignRequest) (*Port, *Response, error) { + path := fmt.Sprintf("%s/%s/native-vlan", portBasePath, par.PortID) + return i.portAction(path, par) +} + +func (i *DevicePortServiceOp) UnassignNative(portID string) (*Port, *Response, error) { + path := fmt.Sprintf("%s/%s/native-vlan", portBasePath, portID) + port := new(Port) + + resp, err := i.client.DoRequest("DELETE", path, nil, port) + if err != nil { + return nil, resp, err + } + + return port, resp, err +} + +func (i *DevicePortServiceOp) Unassign(par *PortAssignRequest) (*Port, *Response, error) { + path := fmt.Sprintf("%s/%s/unassign", portBasePath, par.PortID) + return i.portAction(path, par) +} + +func (i *DevicePortServiceOp) Bond(p *Port, be bool) (*Port, *Response, error) { + if p.Data.Bonded { + return p, nil, nil + } + br := &BondRequest{PortID: p.ID, BulkEnable: be} + path := fmt.Sprintf("%s/%s/bond", portBasePath, br.PortID) + return i.portAction(path, br) +} + +func (i *DevicePortServiceOp) Disbond(p *Port, bd bool) (*Port, *Response, error) { + if !p.Data.Bonded { + return p, nil, nil + } + dr := &DisbondRequest{PortID: p.ID, BulkDisable: bd} + path := fmt.Sprintf("%s/%s/disbond", portBasePath, dr.PortID) + return i.portAction(path, dr) +} + +func (i *DevicePortServiceOp) portAction(path string, req interface{}) (*Port, *Response, error) { + port := new(Port) + + resp, err := i.client.DoRequest("POST", path, req, port) + if err != nil { + return nil, resp, err + } + + return port, resp, err +} + +func (i *DevicePortServiceOp) PortToLayerTwo(deviceID, portName string) (*Port, *Response, error) { + p, err := i.client.DevicePorts.GetPortByName(deviceID, portName) + if err != nil { + return nil, nil, err + } + if strings.HasPrefix(p.NetworkType, "layer2") { + return p, nil, nil + } + path := fmt.Sprintf("%s/%s/convert/layer-2", portBasePath, p.ID) + port := new(Port) + + resp, err := i.client.DoRequest("POST", path, nil, port) + if err != nil { + return nil, resp, err + } + + return port, resp, err +} + +func (i *DevicePortServiceOp) PortToLayerThree(deviceID, portName string) (*Port, *Response, error) { + p, err := i.client.DevicePorts.GetPortByName(deviceID, portName) + if err != nil { + return nil, nil, err + } + if (p.NetworkType == NetworkTypeL3) || (p.NetworkType == NetworkTypeHybrid) { + return p, nil, nil + } + path := fmt.Sprintf("%s/%s/convert/layer-3", portBasePath, p.ID) + port := new(Port) + + req := BackToL3Request{ + RequestIPs: []AddressRequest{ + {AddressFamily: 4, Public: true}, + {AddressFamily: 4, Public: false}, + {AddressFamily: 6, Public: true}, + }, + } + + resp, err := i.client.DoRequest("POST", path, &req, port) + if err != nil { + return nil, resp, err + } + + return port, resp, err +} + +func (i *DevicePortServiceOp) DeviceNetworkType(deviceID string) (string, error) { + d, _, err := i.client.Devices.Get(deviceID, nil) + if err != nil { + return "", err + } + return d.GetNetworkType(), nil +} + +func (i *DevicePortServiceOp) GetAllEthPorts(d *Device) (map[string]*Port, error) { + d, _, err := i.client.Devices.Get(d.ID, nil) + if err != nil { + return nil, err + } + return d.GetPhysicalPorts(), nil +} + +func (i *DevicePortServiceOp) GetOddEthPorts(d *Device) (map[string]*Port, error) { + d, _, err := i.client.Devices.Get(d.ID, nil) + if err != nil { + return nil, err + } + ret := map[string]*Port{} + eth1, err := d.GetPortByName("eth1") + if err != nil { + return nil, err + } + ret["eth1"] = eth1 + + eth3, err := d.GetPortByName("eth3") + if err != nil { + return ret, nil + } + ret["eth3"] = eth3 + return ret, nil + +} + +func (i *DevicePortServiceOp) ConvertDevice(d *Device, targetType string) error { + bondPorts := d.GetBondPorts() + + if targetType == NetworkTypeL3 { + // TODO: remove vlans from all the ports + for _, p := range bondPorts { + _, _, err := i.client.DevicePorts.Bond(p, false) + if err != nil { + return err + } + } + _, _, err := i.client.DevicePorts.PortToLayerThree(d.ID, "bond0") + if err != nil { + return err + } + allEthPorts, err := i.client.DevicePorts.GetAllEthPorts(d) + if err != nil { + return err + } + for _, p := range allEthPorts { + _, _, err := i.client.DevicePorts.Bond(p, false) + if err != nil { + return err + } + } + } + if targetType == NetworkTypeHybrid { + for _, p := range bondPorts { + _, _, err := i.client.DevicePorts.Bond(p, false) + if err != nil { + return err + } + } + + _, _, err := i.client.DevicePorts.PortToLayerThree(d.ID, "bond0") + if err != nil { + return err + } + + // ports need to be refreshed before bonding/disbonding + oddEthPorts, err := i.client.DevicePorts.GetOddEthPorts(d) + if err != nil { + return err + } + + for _, p := range oddEthPorts { + _, _, err := i.client.DevicePorts.Disbond(p, false) + if err != nil { + return err + } + } + } + if targetType == NetworkTypeL2Individual { + _, _, err := i.client.DevicePorts.PortToLayerTwo(d.ID, "bond0") + if err != nil { + return err + } + for _, p := range bondPorts { + _, _, err = i.client.DevicePorts.Disbond(p, true) + if err != nil { + return err + } + } + } + if targetType == NetworkTypeL2Bonded { + + for _, p := range bondPorts { + _, _, err := i.client.DevicePorts.PortToLayerTwo(d.ID, p.Name) + if err != nil { + return err + } + } + allEthPorts, err := i.client.DevicePorts.GetAllEthPorts(d) + if err != nil { + return err + } + for _, p := range allEthPorts { + _, _, err := i.client.DevicePorts.Bond(p, false) + if err != nil { + return err + } + } + } + return nil +} + +// waitDeviceNetworkType waits for a device's computed network type (as +// determined by GetNetworkType()) to reach the specified state. An error will +// be returned if the device does not attain the desired network type state when +// the timeout is reached without +func waitDeviceNetworkType(id, networkType string, c *Client) (*Device, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(15)*time.Minute) + defer cancel() + + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + d, _, err := c.Devices.Get(id, nil) + if err != nil { + return nil, err + } + if d.GetNetworkType() == networkType { + return d, nil + } + case <-ctx.Done(): + return nil, fmt.Errorf("device %s is still not in state %s after timeout", id, networkType) + } + } +} + +func (i *DevicePortServiceOp) DeviceToNetworkType(deviceID string, targetType string) (*Device, error) { + + d, _, err := i.client.Devices.Get(deviceID, nil) + if err != nil { + return nil, err + } + + curType := d.GetNetworkType() + + if curType == targetType { + return nil, fmt.Errorf("Device already is in state %s", targetType) + } + err = i.client.DevicePorts.ConvertDevice(d, targetType) + if err != nil { + return nil, err + } + + d, _, err = i.client.Devices.Get(deviceID, nil) + //d, err = waitDeviceNetworkType(deviceID, targetType, i.client) + if err != nil { + return nil, err + } + + finalType := d.GetNetworkType() + + if finalType != targetType { + return nil, fmt.Errorf( + "Failed to convert device %s from %s to %s. New type was %s", + deviceID, curType, targetType, finalType) + + } + return d, err +} diff --git a/vendor/github.com/packethost/packngo/projects.go b/vendor/github.com/packethost/packngo/projects.go new file mode 100644 index 00000000000..b5352f7e8ae --- /dev/null +++ b/vendor/github.com/packethost/packngo/projects.go @@ -0,0 +1,192 @@ +package packngo + +import ( + "fmt" +) + +const projectBasePath = "/projects" + +// ProjectService interface defines available project methods +type ProjectService interface { + List(listOpt *ListOptions) ([]Project, *Response, error) + Get(string, *GetOptions) (*Project, *Response, error) + Create(*ProjectCreateRequest) (*Project, *Response, error) + Update(string, *ProjectUpdateRequest) (*Project, *Response, error) + Delete(string) (*Response, error) + ListBGPSessions(projectID string, listOpt *ListOptions) ([]BGPSession, *Response, error) + ListEvents(string, *ListOptions) ([]Event, *Response, error) + ListSSHKeys(projectID string, searchOpt *SearchOptions) ([]SSHKey, *Response, error) +} + +type projectsRoot struct { + Projects []Project `json:"projects"` + Meta meta `json:"meta"` +} + +// Project represents an Equinix Metal project +type Project struct { + ID string `json:"id"` + Name string `json:"name,omitempty"` + Organization Organization `json:"organization,omitempty"` + Created string `json:"created_at,omitempty"` + Updated string `json:"updated_at,omitempty"` + Users []User `json:"members,omitempty"` + Devices []Device `json:"devices,omitempty"` + SSHKeys []SSHKey `json:"ssh_keys,omitempty"` + URL string `json:"href,omitempty"` + PaymentMethod PaymentMethod `json:"payment_method,omitempty"` + BackendTransfer bool `json:"backend_transfer_enabled"` +} + +func (p Project) String() string { + return Stringify(p) +} + +// ProjectCreateRequest type used to create an Equinix Metal project +type ProjectCreateRequest struct { + Name string `json:"name"` + PaymentMethodID string `json:"payment_method_id,omitempty"` + OrganizationID string `json:"organization_id,omitempty"` +} + +func (p ProjectCreateRequest) String() string { + return Stringify(p) +} + +// ProjectUpdateRequest type used to update an Equinix Metal project +type ProjectUpdateRequest struct { + Name *string `json:"name,omitempty"` + PaymentMethodID *string `json:"payment_method_id,omitempty"` + BackendTransfer *bool `json:"backend_transfer_enabled,omitempty"` +} + +func (p ProjectUpdateRequest) String() string { + return Stringify(p) +} + +// ProjectServiceOp implements ProjectService +type ProjectServiceOp struct { + client requestDoer +} + +// List returns the user's projects +func (s *ProjectServiceOp) List(listOpt *ListOptions) (projects []Project, resp *Response, err error) { + params := urlQuery(listOpt) + root := new(projectsRoot) + + path := fmt.Sprintf("%s?%s", projectBasePath, params) + + for { + resp, err = s.client.DoRequest("GET", path, nil, root) + if err != nil { + return nil, resp, err + } + + projects = append(projects, root.Projects...) + + if root.Meta.Next != nil && (listOpt == nil || listOpt.Page == 0) { + path = root.Meta.Next.Href + if params != "" { + path = fmt.Sprintf("%s&%s", path, params) + } + continue + } + + return + } +} + +// Get returns a project by id +func (s *ProjectServiceOp) Get(projectID string, getOpt *GetOptions) (*Project, *Response, error) { + params := urlQuery(getOpt) + path := fmt.Sprintf("%s/%s?%s", projectBasePath, projectID, params) + project := new(Project) + resp, err := s.client.DoRequest("GET", path, nil, project) + if err != nil { + return nil, resp, err + } + return project, resp, err +} + +// Create creates a new project +func (s *ProjectServiceOp) Create(createRequest *ProjectCreateRequest) (*Project, *Response, error) { + project := new(Project) + + resp, err := s.client.DoRequest("POST", projectBasePath, createRequest, project) + if err != nil { + return nil, resp, err + } + + return project, resp, err +} + +// Update updates a project +func (s *ProjectServiceOp) Update(id string, updateRequest *ProjectUpdateRequest) (*Project, *Response, error) { + path := fmt.Sprintf("%s/%s", projectBasePath, id) + project := new(Project) + + resp, err := s.client.DoRequest("PATCH", path, updateRequest, project) + if err != nil { + return nil, resp, err + } + + return project, resp, err +} + +// Delete deletes a project +func (s *ProjectServiceOp) Delete(projectID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", projectBasePath, projectID) + + return s.client.DoRequest("DELETE", path, nil, nil) +} + +// ListBGPSessions returns all BGP Sessions associated with the project +func (s *ProjectServiceOp) ListBGPSessions(projectID string, listOpt *ListOptions) (bgpSessions []BGPSession, resp *Response, err error) { + params := urlQuery(listOpt) + path := fmt.Sprintf("%s/%s%s?%s", projectBasePath, projectID, bgpSessionBasePath, params) + + for { + subset := new(bgpSessionsRoot) + + resp, err = s.client.DoRequest("GET", path, nil, subset) + if err != nil { + return nil, resp, err + } + + bgpSessions = append(bgpSessions, subset.Sessions...) + + if subset.Meta.Next != nil && (listOpt == nil || listOpt.Page == 0) { + path = subset.Meta.Next.Href + if params != "" { + path = fmt.Sprintf("%s&%s", path, params) + } + continue + } + + return + } +} + +// ListSSHKeys returns all SSH Keys associated with the project +func (s *ProjectServiceOp) ListSSHKeys(projectID string, searchOpt *SearchOptions) (sshKeys []SSHKey, resp *Response, err error) { + params := urlQuery(searchOpt) + path := fmt.Sprintf("%s/%s%s?%s", projectBasePath, projectID, sshKeyBasePath, params) + + subset := new(sshKeyRoot) + + resp, err = s.client.DoRequest("GET", path, nil, subset) + if err != nil { + return nil, resp, err + } + + sshKeys = append(sshKeys, subset.SSHKeys...) + + return +} + +// ListEvents returns list of project events +func (s *ProjectServiceOp) ListEvents(projectID string, listOpt *ListOptions) ([]Event, *Response, error) { + path := fmt.Sprintf("%s/%s%s", projectBasePath, projectID, eventBasePath) + + return listEvents(s.client, path, listOpt) +} diff --git a/vendor/github.com/packethost/packngo/rate.go b/vendor/github.com/packethost/packngo/rate.go new file mode 100644 index 00000000000..965967d4557 --- /dev/null +++ b/vendor/github.com/packethost/packngo/rate.go @@ -0,0 +1,12 @@ +package packngo + +// Rate provides the API request rate limit details +type Rate struct { + RequestLimit int `json:"request_limit"` + RequestsRemaining int `json:"requests_remaining"` + Reset Timestamp `json:"rate_reset"` +} + +func (r Rate) String() string { + return Stringify(r) +} diff --git a/vendor/github.com/packethost/packngo/spotmarket.go b/vendor/github.com/packethost/packngo/spotmarket.go new file mode 100644 index 00000000000..5dfb7d559b1 --- /dev/null +++ b/vendor/github.com/packethost/packngo/spotmarket.go @@ -0,0 +1,39 @@ +package packngo + +const spotMarketBasePath = "/market/spot/prices" + +// SpotMarketService expooses Spot Market methods +type SpotMarketService interface { + Prices() (PriceMap, *Response, error) +} + +// SpotMarketServiceOp implements SpotMarketService +type SpotMarketServiceOp struct { + client *Client +} + +// PriceMap is a map of [facility][plan]-> float Price +type PriceMap map[string]map[string]float64 + +// Prices gets current PriceMap from the API +func (s *SpotMarketServiceOp) Prices() (PriceMap, *Response, error) { + root := new(struct { + SMPs map[string]map[string]struct { + Price float64 `json:"price"` + } `json:"spot_market_prices"` + }) + + resp, err := s.client.DoRequest("GET", spotMarketBasePath, nil, root) + if err != nil { + return nil, resp, err + } + + prices := make(PriceMap) + for facility, planMap := range root.SMPs { + prices[facility] = map[string]float64{} + for plan, v := range planMap { + prices[facility][plan] = v.Price + } + } + return prices, resp, err +} diff --git a/vendor/github.com/packethost/packngo/spotmarketrequest.go b/vendor/github.com/packethost/packngo/spotmarketrequest.go new file mode 100644 index 00000000000..033cb6bda75 --- /dev/null +++ b/vendor/github.com/packethost/packngo/spotmarketrequest.go @@ -0,0 +1,115 @@ +package packngo + +import ( + "fmt" + "math" +) + +const spotMarketRequestBasePath = "/spot-market-requests" + +type SpotMarketRequestService interface { + List(string, *ListOptions) ([]SpotMarketRequest, *Response, error) + Create(*SpotMarketRequestCreateRequest, string) (*SpotMarketRequest, *Response, error) + Delete(string, bool) (*Response, error) + Get(string, *GetOptions) (*SpotMarketRequest, *Response, error) +} + +type SpotMarketRequestCreateRequest struct { + DevicesMax int `json:"devices_max"` + DevicesMin int `json:"devices_min"` + EndAt *Timestamp `json:"end_at,omitempty"` + FacilityIDs []string `json:"facilities"` + MaxBidPrice float64 `json:"max_bid_price"` + + Parameters SpotMarketRequestInstanceParameters `json:"instance_parameters"` +} + +type SpotMarketRequest struct { + SpotMarketRequestCreateRequest + ID string `json:"id"` + Devices []Device `json:"devices"` + Facilities []Facility `json:"facilities"` + Project Project `json:"project"` + Href string `json:"href"` + Plan Plan `json:"plan"` +} + +type SpotMarketRequestInstanceParameters struct { + AlwaysPXE bool `json:"always_pxe,omitempty"` + IPXEScriptURL string `json:"ipxe_script_url,omitempty"` + BillingCycle string `json:"billing_cycle"` + CustomData string `json:"customdata,omitempty"` + Description string `json:"description,omitempty"` + Features []string `json:"features,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hostnames []string `json:"hostnames,omitempty"` + Locked bool `json:"locked,omitempty"` + OperatingSystem string `json:"operating_system"` + Plan string `json:"plan"` + ProjectSSHKeys []string `json:"project_ssh_keys,omitempty"` + Tags []string `json:"tags"` + TerminationTime *Timestamp `json:"termination_time,omitempty"` + UserSSHKeys []string `json:"user_ssh_keys,omitempty"` + UserData string `json:"userdata"` +} + +type SpotMarketRequestServiceOp struct { + client *Client +} + +func roundPlus(f float64, places int) float64 { + shift := math.Pow(10, float64(places)) + return math.Floor(f*shift+.5) / shift +} + +func (s *SpotMarketRequestServiceOp) Create(cr *SpotMarketRequestCreateRequest, pID string) (*SpotMarketRequest, *Response, error) { + path := fmt.Sprintf("%s/%s%s?include=devices,project,plan", projectBasePath, pID, spotMarketRequestBasePath) + cr.MaxBidPrice = roundPlus(cr.MaxBidPrice, 2) + smr := new(SpotMarketRequest) + + resp, err := s.client.DoRequest("POST", path, cr, smr) + if err != nil { + return nil, resp, err + } + + return smr, resp, err +} + +func (s *SpotMarketRequestServiceOp) List(pID string, listOpt *ListOptions) ([]SpotMarketRequest, *Response, error) { + type smrRoot struct { + SMRs []SpotMarketRequest `json:"spot_market_requests"` + } + + params := urlQuery(listOpt) + path := fmt.Sprintf("%s/%s%s?%s", projectBasePath, pID, spotMarketRequestBasePath, params) + output := new(smrRoot) + + resp, err := s.client.DoRequest("GET", path, nil, output) + if err != nil { + return nil, nil, err + } + + return output.SMRs, resp, nil +} + +func (s *SpotMarketRequestServiceOp) Get(id string, getOpt *GetOptions) (*SpotMarketRequest, *Response, error) { + params := urlQuery(getOpt) + path := fmt.Sprintf("%s/%s?%s", spotMarketRequestBasePath, id, params) + smr := new(SpotMarketRequest) + + resp, err := s.client.DoRequest("GET", path, nil, &smr) + if err != nil { + return nil, resp, err + } + + return smr, resp, err +} + +func (s *SpotMarketRequestServiceOp) Delete(id string, forceDelete bool) (*Response, error) { + path := fmt.Sprintf("%s/%s", spotMarketRequestBasePath, id) + var params *map[string]bool + if forceDelete { + params = &map[string]bool{"force_termination": true} + } + return s.client.DoRequest("DELETE", path, params, nil) +} diff --git a/vendor/github.com/packethost/packngo/sshkeys.go b/vendor/github.com/packethost/packngo/sshkeys.go new file mode 100644 index 00000000000..9e9fece1364 --- /dev/null +++ b/vendor/github.com/packethost/packngo/sshkeys.go @@ -0,0 +1,140 @@ +package packngo + +import "fmt" + +const ( + sshKeyBasePath = "/ssh-keys" +) + +// SSHKeyService interface defines available device methods +type SSHKeyService interface { + List() ([]SSHKey, *Response, error) + ProjectList(string) ([]SSHKey, *Response, error) + Get(string, *GetOptions) (*SSHKey, *Response, error) + Create(*SSHKeyCreateRequest) (*SSHKey, *Response, error) + Update(string, *SSHKeyUpdateRequest) (*SSHKey, *Response, error) + Delete(string) (*Response, error) +} + +type sshKeyRoot struct { + SSHKeys []SSHKey `json:"ssh_keys"` +} + +// SSHKey represents a user's ssh key +type SSHKey struct { + ID string `json:"id"` + Label string `json:"label"` + Key string `json:"key"` + FingerPrint string `json:"fingerprint"` + Created string `json:"created_at"` + Updated string `json:"updated_at"` + Owner Href + URL string `json:"href,omitempty"` +} + +func (s SSHKey) String() string { + return Stringify(s) +} + +// SSHKeyCreateRequest type used to create an ssh key +type SSHKeyCreateRequest struct { + Label string `json:"label"` + Key string `json:"key"` + ProjectID string `json:"-"` +} + +func (s SSHKeyCreateRequest) String() string { + return Stringify(s) +} + +// SSHKeyUpdateRequest type used to update an ssh key +type SSHKeyUpdateRequest struct { + Label *string `json:"label,omitempty"` + Key *string `json:"key,omitempty"` +} + +func (s SSHKeyUpdateRequest) String() string { + return Stringify(s) +} + +// SSHKeyServiceOp implements SSHKeyService +type SSHKeyServiceOp struct { + client *Client +} + +func (s *SSHKeyServiceOp) list(url string) ([]SSHKey, *Response, error) { + root := new(sshKeyRoot) + + resp, err := s.client.DoRequest("GET", url, nil, root) + if err != nil { + return nil, resp, err + } + + return root.SSHKeys, resp, err +} + +// ProjectList lists ssh keys of a project +// Deprecated: Use ProjectServiceOp.ListSSHKeys +func (s *SSHKeyServiceOp) ProjectList(projectID string) ([]SSHKey, *Response, error) { + return s.list(fmt.Sprintf("%s/%s%s", projectBasePath, projectID, sshKeyBasePath)) + +} + +// List returns a user's ssh keys +func (s *SSHKeyServiceOp) List() ([]SSHKey, *Response, error) { + return s.list(sshKeyBasePath) +} + +// Get returns an ssh key by id +func (s *SSHKeyServiceOp) Get(sshKeyID string, getOpt *GetOptions) (*SSHKey, *Response, error) { + params := urlQuery(getOpt) + path := fmt.Sprintf("%s/%s?%s", sshKeyBasePath, sshKeyID, params) + sshKey := new(SSHKey) + + resp, err := s.client.DoRequest("GET", path, nil, sshKey) + if err != nil { + return nil, resp, err + } + + return sshKey, resp, err +} + +// Create creates a new ssh key +func (s *SSHKeyServiceOp) Create(createRequest *SSHKeyCreateRequest) (*SSHKey, *Response, error) { + path := sshKeyBasePath + if createRequest.ProjectID != "" { + path = fmt.Sprintf("%s/%s%s", projectBasePath, createRequest.ProjectID, sshKeyBasePath) + } + sshKey := new(SSHKey) + + resp, err := s.client.DoRequest("POST", path, createRequest, sshKey) + if err != nil { + return nil, resp, err + } + + return sshKey, resp, err +} + +// Update updates an ssh key +func (s *SSHKeyServiceOp) Update(id string, updateRequest *SSHKeyUpdateRequest) (*SSHKey, *Response, error) { + if updateRequest.Label == nil && updateRequest.Key == nil { + return nil, nil, fmt.Errorf("You must set either Label or Key string for SSH Key update") + } + path := fmt.Sprintf("%s/%s", sshKeyBasePath, id) + + sshKey := new(SSHKey) + + resp, err := s.client.DoRequest("PATCH", path, updateRequest, sshKey) + if err != nil { + return nil, resp, err + } + + return sshKey, resp, err +} + +// Delete deletes an ssh key +func (s *SSHKeyServiceOp) Delete(sshKeyID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", sshKeyBasePath, sshKeyID) + + return s.client.DoRequest("DELETE", path, nil, nil) +} diff --git a/vendor/github.com/packethost/packngo/timestamp.go b/vendor/github.com/packethost/packngo/timestamp.go new file mode 100644 index 00000000000..c3320ed62eb --- /dev/null +++ b/vendor/github.com/packethost/packngo/timestamp.go @@ -0,0 +1,35 @@ +package packngo + +import ( + "strconv" + "time" +) + +// Timestamp represents a time that can be unmarshalled from a JSON string +// formatted as either an RFC3339 or Unix timestamp. All +// exported methods of time.Time can be called on Timestamp. +type Timestamp struct { + time.Time +} + +func (t Timestamp) String() string { + return t.Time.String() +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// Time is expected in RFC3339 or Unix format. +func (t *Timestamp) UnmarshalJSON(data []byte) (err error) { + str := string(data) + i, err := strconv.ParseInt(str, 10, 64) + if err == nil { + t.Time = time.Unix(i, 0) + } else { + t.Time, err = time.Parse(`"`+time.RFC3339+`"`, str) + } + return +} + +// Equal reports whether t and u are equal based on time.Equal +func (t Timestamp) Equal(u Timestamp) bool { + return t.Time.Equal(u.Time) +} diff --git a/vendor/github.com/packethost/packngo/two_factor_auth.go b/vendor/github.com/packethost/packngo/two_factor_auth.go new file mode 100644 index 00000000000..5064b09fe3c --- /dev/null +++ b/vendor/github.com/packethost/packngo/two_factor_auth.go @@ -0,0 +1,56 @@ +package packngo + +const twoFactorAuthAppPath = "/user/otp/app" +const twoFactorAuthSmsPath = "/user/otp/sms" + +// TwoFactorAuthService interface defines available two factor authentication functions +type TwoFactorAuthService interface { + EnableApp(string) (*Response, error) + DisableApp(string) (*Response, error) + EnableSms(string) (*Response, error) + DisableSms(string) (*Response, error) + ReceiveSms() (*Response, error) + SeedApp() (string, *Response, error) +} + +// TwoFactorAuthServiceOp implements TwoFactorAuthService +type TwoFactorAuthServiceOp struct { + client *Client +} + +// EnableApp function enables two factor auth using authenticatior app +func (s *TwoFactorAuthServiceOp) EnableApp(token string) (resp *Response, err error) { + headers := map[string]string{"x-otp-token": token} + return s.client.DoRequestWithHeader("POST", headers, twoFactorAuthAppPath, nil, nil) +} + +// EnableSms function enables two factor auth using sms +func (s *TwoFactorAuthServiceOp) EnableSms(token string) (resp *Response, err error) { + headers := map[string]string{"x-otp-token": token} + return s.client.DoRequestWithHeader("POST", headers, twoFactorAuthSmsPath, nil, nil) +} + +// ReceiveSms orders the auth service to issue an SMS token +func (s *TwoFactorAuthServiceOp) ReceiveSms() (resp *Response, err error) { + return s.client.DoRequest("POST", twoFactorAuthSmsPath+"/receive", nil, nil) +} + +// DisableApp function disables two factor auth using +func (s *TwoFactorAuthServiceOp) DisableApp(token string) (resp *Response, err error) { + headers := map[string]string{"x-otp-token": token} + return s.client.DoRequestWithHeader("DELETE", headers, twoFactorAuthAppPath, nil, nil) +} + +// DisableSms function disables two factor auth using +func (s *TwoFactorAuthServiceOp) DisableSms(token string) (resp *Response, err error) { + headers := map[string]string{"x-otp-token": token} + return s.client.DoRequestWithHeader("DELETE", headers, twoFactorAuthSmsPath, nil, nil) +} + +// SeedApp orders the auth service to issue a token via google authenticator +func (s *TwoFactorAuthServiceOp) SeedApp() (otpURI string, resp *Response, err error) { + ret := &map[string]string{} + resp, err = s.client.DoRequest("POST", twoFactorAuthAppPath+"/receive", nil, ret) + + return (*ret)["otp_uri"], resp, err +} diff --git a/vendor/github.com/packethost/packngo/user.go b/vendor/github.com/packethost/packngo/user.go new file mode 100644 index 00000000000..63db99f9504 --- /dev/null +++ b/vendor/github.com/packethost/packngo/user.go @@ -0,0 +1,100 @@ +package packngo + +import "fmt" + +const usersBasePath = "/users" +const userBasePath = "/user" + +// UserService interface defines available user methods +type UserService interface { + List(*ListOptions) ([]User, *Response, error) + Get(string, *GetOptions) (*User, *Response, error) + Current() (*User, *Response, error) +} + +type usersRoot struct { + Users []User `json:"users"` + Meta meta `json:"meta"` +} + +// User represents an Equinix Metal user +type User struct { + ID string `json:"id"` + FirstName string `json:"first_name,omitempty"` + LastName string `json:"last_name,omitempty"` + FullName string `json:"full_name,omitempty"` + Email string `json:"email,omitempty"` + TwoFactor string `json:"two_factor_auth,omitempty"` + DefaultOrganizationID string `json:"default_organization_id,omitempty"` + AvatarURL string `json:"avatar_url,omitempty"` + Facebook string `json:"twitter,omitempty"` + Twitter string `json:"facebook,omitempty"` + LinkedIn string `json:"linkedin,omitempty"` + Created string `json:"created_at,omitempty"` + Updated string `json:"updated_at,omitempty"` + TimeZone string `json:"timezone,omitempty"` + Emails []Email `json:"emails,omitempty"` + PhoneNumber string `json:"phone_number,omitempty"` + URL string `json:"href,omitempty"` + VPN bool `json:"vpn"` +} + +func (u User) String() string { + return Stringify(u) +} + +// UserServiceOp implements UserService +type UserServiceOp struct { + client *Client +} + +// Get method gets a user by userID +func (s *UserServiceOp) List(listOpt *ListOptions) (users []User, resp *Response, err error) { + params := urlQuery(listOpt) + path := fmt.Sprintf("%s?%s", usersBasePath, params) + + for { + subset := new(usersRoot) + + resp, err = s.client.DoRequest("GET", path, nil, subset) + if err != nil { + return nil, resp, err + } + + users = append(users, subset.Users...) + + if subset.Meta.Next != nil && (listOpt == nil || listOpt.Page == 0) { + path = subset.Meta.Next.Href + if params != "" { + path = fmt.Sprintf("%s&%s", path, params) + } + continue + } + return + } +} + +// Returns the user object for the currently logged-in user. +func (s *UserServiceOp) Current() (*User, *Response, error) { + user := new(User) + + resp, err := s.client.DoRequest("GET", userBasePath, nil, user) + if err != nil { + return nil, resp, err + } + + return user, resp, err +} + +func (s *UserServiceOp) Get(userID string, getOpt *GetOptions) (*User, *Response, error) { + params := urlQuery(getOpt) + path := fmt.Sprintf("%s/%s?%s", usersBasePath, userID, params) + user := new(User) + + resp, err := s.client.DoRequest("GET", path, nil, user) + if err != nil { + return nil, resp, err + } + + return user, resp, err +} diff --git a/vendor/github.com/packethost/packngo/utils.go b/vendor/github.com/packethost/packngo/utils.go new file mode 100644 index 00000000000..42124ba4978 --- /dev/null +++ b/vendor/github.com/packethost/packngo/utils.go @@ -0,0 +1,156 @@ +package packngo + +import ( + "bytes" + "fmt" + "io" + "reflect" +) + +var ( + timestampType = reflect.TypeOf(Timestamp{}) + + // Facilities DEPRECATED Use Facilities.List + Facilities = []string{ + "yyz1", "nrt1", "atl1", "mrs1", "hkg1", "ams1", + "ewr1", "sin1", "dfw1", "lax1", "syd1", "sjc1", + "ord1", "iad1", "fra1", "sea1", "dfw2"} + + // FacilityFeatures DEPRECATED Use Facilities.List + FacilityFeatures = []string{ + "baremetal", "layer_2", "backend_transfer", "storage", "global_ipv4"} + + // UtilizationLevels DEPRECATED + UtilizationLevels = []string{"unavailable", "critical", "limited", "normal"} + + // DevicePlans DEPRECATED Use Plans.List + DevicePlans = []string{"c2.medium.x86", "g2.large.x86", + "m2.xlarge.x86", "x2.xlarge.x86", "baremetal_2a", "baremetal_2a2", + "baremetal_1", "baremetal_3", "baremetal_2", "baremetal_s", + "baremetal_0", "baremetal_1e", + } +) + +// Stringify creates a string representation of the provided message +// DEPRECATED This is used internally and should not be exported by packngo +func Stringify(message interface{}) string { + var buf bytes.Buffer + v := reflect.ValueOf(message) + // TODO(displague) errors here are not reported + _ = stringifyValue(&buf, v) + return buf.String() +} + +// StreamToString converts a reader to a string +// DEPRECATED This is unused and should not be exported by packngo +func StreamToString(stream io.Reader) (string, error) { + buf := new(bytes.Buffer) + if _, err := buf.ReadFrom(stream); err != nil { + return "", err + } + return buf.String(), nil +} + +// contains tells whether a contains x. +func contains(a []string, x string) bool { + for _, n := range a { + if x == n { + return true + } + } + return false +} + +// stringifyValue was graciously cargoculted from the goprotubuf library +func stringifyValue(w io.Writer, val reflect.Value) error { + if val.Kind() == reflect.Ptr && val.IsNil() { + _, err := w.Write([]byte("")) + return err + } + + v := reflect.Indirect(val) + + switch v.Kind() { + case reflect.String: + if _, err := fmt.Fprintf(w, `"%s"`, v); err != nil { + return err + } + case reflect.Slice: + if _, err := w.Write([]byte{'['}); err != nil { + return err + } + for i := 0; i < v.Len(); i++ { + if i > 0 { + if _, err := w.Write([]byte{' '}); err != nil { + return err + } + } + + if err := stringifyValue(w, v.Index(i)); err != nil { + return err + } + } + + if _, err := w.Write([]byte{']'}); err != nil { + return err + } + return nil + case reflect.Struct: + if v.Type().Name() != "" { + if _, err := w.Write([]byte(v.Type().String())); err != nil { + return err + } + } + + // special handling of Timestamp values + if v.Type() == timestampType { + _, err := fmt.Fprintf(w, "{%s}", v.Interface()) + return err + } + + if _, err := w.Write([]byte{'{'}); err != nil { + return err + } + + var sep bool + for i := 0; i < v.NumField(); i++ { + fv := v.Field(i) + if fv.Kind() == reflect.Ptr && fv.IsNil() { + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + continue + } + + if sep { + if _, err := w.Write([]byte(", ")); err != nil { + return err + } + } else { + sep = true + } + + if _, err := w.Write([]byte(v.Type().Field(i).Name)); err != nil { + return err + } + if _, err := w.Write([]byte{':'}); err != nil { + return err + } + + if err := stringifyValue(w, fv); err != nil { + return err + } + } + + if _, err := w.Write([]byte{'}'}); err != nil { + return err + } + default: + if v.CanInterface() { + if _, err := fmt.Fprint(w, v.Interface()); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/packethost/packngo/virtualnetworks.go b/vendor/github.com/packethost/packngo/virtualnetworks.go new file mode 100644 index 00000000000..c2b5d3d3809 --- /dev/null +++ b/vendor/github.com/packethost/packngo/virtualnetworks.go @@ -0,0 +1,92 @@ +package packngo + +import ( + "fmt" +) + +const virtualNetworkBasePath = "/virtual-networks" + +// DevicePortService handles operations on a port which belongs to a particular device +type ProjectVirtualNetworkService interface { + List(projectID string, listOpt *ListOptions) (*VirtualNetworkListResponse, *Response, error) + Create(*VirtualNetworkCreateRequest) (*VirtualNetwork, *Response, error) + Get(string, *GetOptions) (*VirtualNetwork, *Response, error) + Delete(virtualNetworkID string) (*Response, error) +} + +type VirtualNetwork struct { + ID string `json:"id"` + Description string `json:"description,omitempty"` + VXLAN int `json:"vxlan,omitempty"` + FacilityCode string `json:"facility_code,omitempty"` + CreatedAt string `json:"created_at,omitempty"` + Href string `json:"href"` + Project Project `json:"assigned_to"` +} + +type ProjectVirtualNetworkServiceOp struct { + client *Client +} + +type VirtualNetworkListResponse struct { + VirtualNetworks []VirtualNetwork `json:"virtual_networks"` +} + +func (i *ProjectVirtualNetworkServiceOp) List(projectID string, listOpt *ListOptions) (*VirtualNetworkListResponse, *Response, error) { + + params := urlQuery(listOpt) + path := fmt.Sprintf("%s/%s%s?%s", projectBasePath, projectID, virtualNetworkBasePath, params) + output := new(VirtualNetworkListResponse) + + resp, err := i.client.DoRequest("GET", path, nil, output) + if err != nil { + return nil, nil, err + } + + return output, resp, nil +} + +type VirtualNetworkCreateRequest struct { + ProjectID string `json:"project_id"` + Description string `json:"description"` + Facility string `json:"facility"` +} + +func (i *ProjectVirtualNetworkServiceOp) Get(vlanID string, getOpt *GetOptions) (*VirtualNetwork, *Response, error) { + params := urlQuery(getOpt) + path := fmt.Sprintf("%s/%s?%s", virtualNetworkBasePath, vlanID, params) + vlan := new(VirtualNetwork) + + resp, err := i.client.DoRequest("GET", path, nil, vlan) + if err != nil { + return nil, resp, err + } + + return vlan, resp, err +} + +func (i *ProjectVirtualNetworkServiceOp) Create(input *VirtualNetworkCreateRequest) (*VirtualNetwork, *Response, error) { + // TODO: May need to add timestamp to output from 'post' request + // for the 'created_at' attribute of VirtualNetwork struct since + // API response doesn't include it + path := fmt.Sprintf("%s/%s%s", projectBasePath, input.ProjectID, virtualNetworkBasePath) + output := new(VirtualNetwork) + + resp, err := i.client.DoRequest("POST", path, input, output) + if err != nil { + return nil, nil, err + } + + return output, resp, nil +} + +func (i *ProjectVirtualNetworkServiceOp) Delete(virtualNetworkID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", virtualNetworkBasePath, virtualNetworkID) + + resp, err := i.client.DoRequest("DELETE", path, nil, nil) + if err != nil { + return nil, err + } + + return resp, nil +} diff --git a/vendor/github.com/packethost/packngo/volumes.go b/vendor/github.com/packethost/packngo/volumes.go new file mode 100644 index 00000000000..fb2feb83ee7 --- /dev/null +++ b/vendor/github.com/packethost/packngo/volumes.go @@ -0,0 +1,238 @@ +package packngo + +import ( + "fmt" +) + +const ( + volumeBasePath = "/storage" + attachmentsBasePath = "/attachments" +) + +// VolumeService interface defines available Volume methods +type VolumeService interface { + List(string, *ListOptions) ([]Volume, *Response, error) + Get(string, *GetOptions) (*Volume, *Response, error) + Update(string, *VolumeUpdateRequest) (*Volume, *Response, error) + Delete(string) (*Response, error) + Create(*VolumeCreateRequest, string) (*Volume, *Response, error) + Lock(string) (*Response, error) + Unlock(string) (*Response, error) +} + +// VolumeAttachmentService defines attachment methdods +type VolumeAttachmentService interface { + Get(string, *GetOptions) (*VolumeAttachment, *Response, error) + Create(string, string) (*VolumeAttachment, *Response, error) + Delete(string) (*Response, error) +} + +type volumesRoot struct { + Volumes []Volume `json:"volumes"` + Meta meta `json:"meta"` +} + +// Volume represents a volume +type Volume struct { + Attachments []*VolumeAttachment `json:"attachments,omitempty"` + BillingCycle string `json:"billing_cycle,omitempty"` + Created string `json:"created_at,omitempty"` + Description string `json:"description,omitempty"` + Facility *Facility `json:"facility,omitempty"` + Href string `json:"href,omitempty"` + ID string `json:"id"` + Locked bool `json:"locked,omitempty"` + Name string `json:"name,omitempty"` + Plan *Plan `json:"plan,omitempty"` + Project *Project `json:"project,omitempty"` + Size int `json:"size,omitempty"` + SnapshotPolicies []*SnapshotPolicy `json:"snapshot_policies,omitempty"` + State string `json:"state,omitempty"` + Updated string `json:"updated_at,omitempty"` +} + +// SnapshotPolicy used to execute actions on volume +type SnapshotPolicy struct { + ID string `json:"id"` + Href string `json:"href"` + SnapshotFrequency string `json:"snapshot_frequency,omitempty"` + SnapshotCount int `json:"snapshot_count,omitempty"` +} + +func (v Volume) String() string { + return Stringify(v) +} + +// VolumeCreateRequest type used to create an Equinix Metal volume +type VolumeCreateRequest struct { + BillingCycle string `json:"billing_cycle"` + Description string `json:"description,omitempty"` + Locked bool `json:"locked,omitempty"` + Size int `json:"size"` + PlanID string `json:"plan_id"` + FacilityID string `json:"facility_id"` + SnapshotPolicies []*SnapshotPolicy `json:"snapshot_policies,omitempty"` +} + +func (v VolumeCreateRequest) String() string { + return Stringify(v) +} + +// VolumeUpdateRequest type used to update an Equinix Metal volume +type VolumeUpdateRequest struct { + Description *string `json:"description,omitempty"` + PlanID *string `json:"plan_id,omitempty"` + Size *int `json:"size,omitempty"` + BillingCycle *string `json:"billing_cycle,omitempty"` +} + +// VolumeAttachment is a type from Equinix Metal API +type VolumeAttachment struct { + Href string `json:"href"` + ID string `json:"id"` + Volume Volume `json:"volume"` + Device Device `json:"device"` +} + +func (v VolumeUpdateRequest) String() string { + return Stringify(v) +} + +// VolumeAttachmentServiceOp implements VolumeService +type VolumeAttachmentServiceOp struct { + client *Client +} + +// VolumeServiceOp implements VolumeService +type VolumeServiceOp struct { + client *Client +} + +// List returns the volumes for a project +func (v *VolumeServiceOp) List(projectID string, listOpt *ListOptions) (volumes []Volume, resp *Response, err error) { + params := urlQuery(listOpt) + path := fmt.Sprintf("%s/%s%s?%s", projectBasePath, projectID, volumeBasePath, params) + + for { + subset := new(volumesRoot) + + resp, err = v.client.DoRequest("GET", path, nil, subset) + if err != nil { + return nil, resp, err + } + + volumes = append(volumes, subset.Volumes...) + + if subset.Meta.Next != nil && (listOpt == nil || listOpt.Page == 0) { + path = subset.Meta.Next.Href + if params != "" { + path = fmt.Sprintf("%s&%s", path, params) + } + continue + } + + return + } +} + +// Get returns a volume by id +func (v *VolumeServiceOp) Get(volumeID string, getOpt *GetOptions) (*Volume, *Response, error) { + params := urlQuery(getOpt) + path := fmt.Sprintf("%s/%s?%s", volumeBasePath, volumeID, params) + volume := new(Volume) + + resp, err := v.client.DoRequest("GET", path, nil, volume) + if err != nil { + return nil, resp, err + } + + return volume, resp, err +} + +// Update updates a volume +func (v *VolumeServiceOp) Update(id string, updateRequest *VolumeUpdateRequest) (*Volume, *Response, error) { + path := fmt.Sprintf("%s/%s", volumeBasePath, id) + volume := new(Volume) + + resp, err := v.client.DoRequest("PATCH", path, updateRequest, volume) + if err != nil { + return nil, resp, err + } + + return volume, resp, err +} + +// Delete deletes a volume +func (v *VolumeServiceOp) Delete(volumeID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", volumeBasePath, volumeID) + + return v.client.DoRequest("DELETE", path, nil, nil) +} + +// Create creates a new volume for a project +func (v *VolumeServiceOp) Create(createRequest *VolumeCreateRequest, projectID string) (*Volume, *Response, error) { + url := fmt.Sprintf("%s/%s%s", projectBasePath, projectID, volumeBasePath) + volume := new(Volume) + + resp, err := v.client.DoRequest("POST", url, createRequest, volume) + if err != nil { + return nil, resp, err + } + + return volume, resp, err +} + +// Attachments + +// Create Attachment, i.e. attach volume to a device +func (v *VolumeAttachmentServiceOp) Create(volumeID, deviceID string) (*VolumeAttachment, *Response, error) { + url := fmt.Sprintf("%s/%s%s", volumeBasePath, volumeID, attachmentsBasePath) + volAttachParam := map[string]string{ + "device_id": deviceID, + } + volumeAttachment := new(VolumeAttachment) + + resp, err := v.client.DoRequest("POST", url, volAttachParam, volumeAttachment) + if err != nil { + return nil, resp, err + } + return volumeAttachment, resp, nil +} + +// Get gets attachment by id +func (v *VolumeAttachmentServiceOp) Get(attachmentID string, getOpt *GetOptions) (*VolumeAttachment, *Response, error) { + params := urlQuery(getOpt) + + path := fmt.Sprintf("%s%s/%s?%s", volumeBasePath, attachmentsBasePath, attachmentID, params) + volumeAttachment := new(VolumeAttachment) + + resp, err := v.client.DoRequest("GET", path, nil, volumeAttachment) + if err != nil { + return nil, resp, err + } + + return volumeAttachment, resp, nil +} + +// Delete deletes attachment by id +func (v *VolumeAttachmentServiceOp) Delete(attachmentID string) (*Response, error) { + path := fmt.Sprintf("%s%s/%s", volumeBasePath, attachmentsBasePath, attachmentID) + + return v.client.DoRequest("DELETE", path, nil, nil) +} + +// Lock sets a volume to "locked" +func (v *VolumeServiceOp) Lock(id string) (*Response, error) { + path := fmt.Sprintf("%s/%s", volumeBasePath, id) + action := lockType{Locked: true} + + return v.client.DoRequest("PATCH", path, action, nil) +} + +// Unlock sets a volume to "unlocked" +func (v *VolumeServiceOp) Unlock(id string) (*Response, error) { + path := fmt.Sprintf("%s/%s", volumeBasePath, id) + action := lockType{Locked: false} + + return v.client.DoRequest("PATCH", path, action, nil) +} diff --git a/vendor/github.com/packethost/packngo/vpn.go b/vendor/github.com/packethost/packngo/vpn.go new file mode 100644 index 00000000000..8aa56c544a1 --- /dev/null +++ b/vendor/github.com/packethost/packngo/vpn.go @@ -0,0 +1,50 @@ +package packngo + +import "fmt" + +const vpnBasePath = "/user/vpn" + +// VPNConfig struct +type VPNConfig struct { + Config string `json:"config,omitempty"` +} + +// VPNService interface defines available VPN functions +type VPNService interface { + Enable() (*Response, error) + Disable() (*Response, error) + Get(code string, getOpt *GetOptions) (*VPNConfig, *Response, error) +} + +// VPNServiceOp implements VPNService +type VPNServiceOp struct { + client *Client +} + +// Enable VPN for current user +func (s *VPNServiceOp) Enable() (resp *Response, err error) { + return s.client.DoRequest("POST", vpnBasePath, nil, nil) +} + +// Disable VPN for current user +func (s *VPNServiceOp) Disable() (resp *Response, err error) { + return s.client.DoRequest("DELETE", vpnBasePath, nil, nil) + +} + +// Get returns the client vpn config for the currently logged-in user. +func (s *VPNServiceOp) Get(code string, getOpt *GetOptions) (config *VPNConfig, resp *Response, err error) { + params := urlQuery(getOpt) + config = &VPNConfig{} + path := fmt.Sprintf("%s?code=%s", vpnBasePath, code) + if params != "" { + path += params + } + + resp, err = s.client.DoRequest("GET", path, nil, config) + if err != nil { + return nil, resp, err + } + + return config, resp, err +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go new file mode 100644 index 00000000000..5070e72e28f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -0,0 +1,370 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "bufio" + "io" + "net" + "net/http" +) + +const ( + closeNotifier = 1 << iota + flusher + hijacker + readerFrom + pusher +) + +type delegator interface { + http.ResponseWriter + + Status() int + Written() int64 +} + +type responseWriterDelegator struct { + http.ResponseWriter + + status int + written int64 + wroteHeader bool + observeWriteHeader func(int) +} + +func (r *responseWriterDelegator) Status() int { + return r.status +} + +func (r *responseWriterDelegator) Written() int64 { + return r.written +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + if r.observeWriteHeader != nil && !r.wroteHeader { + // Only call observeWriteHeader for the 1st time. It's a bug if + // WriteHeader is called more than once, but we want to protect + // against it here. Note that we still delegate the WriteHeader + // to the original ResponseWriter to not mask the bug from it. + r.observeWriteHeader(code) + } + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type closeNotifierDelegator struct{ *responseWriterDelegator } +type flusherDelegator struct{ *responseWriterDelegator } +type hijackerDelegator struct{ *responseWriterDelegator } +type readerFromDelegator struct{ *responseWriterDelegator } +type pusherDelegator struct{ *responseWriterDelegator } + +func (d closeNotifierDelegator) CloseNotify() <-chan bool { + //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to + //remove support from client_golang yet. + return d.ResponseWriter.(http.CloseNotifier).CloseNotify() +} +func (d flusherDelegator) Flush() { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. + if !d.wroteHeader { + d.WriteHeader(http.StatusOK) + } + d.ResponseWriter.(http.Flusher).Flush() +} +func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return d.ResponseWriter.(http.Hijacker).Hijack() +} +func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. + if !d.wroteHeader { + d.WriteHeader(http.StatusOK) + } + n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re) + d.written += n + return n, err +} +func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { + return d.ResponseWriter.(http.Pusher).Push(target, opts) +} + +var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) + +func init() { + // TODO(beorn7): Code generation would help here. + pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0 + return d + } + pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 + return closeNotifierDelegator{d} + } + pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 + return flusherDelegator{d} + } + pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 + return struct { + *responseWriterDelegator + http.Flusher + http.CloseNotifier + }{d, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 + return hijackerDelegator{d} + } + pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 + return struct { + *responseWriterDelegator + http.Hijacker + http.CloseNotifier + }{d, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + }{d, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 + return readerFromDelegator{d} + } + pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.CloseNotifier + }{d, readerFromDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + }{d, readerFromDelegator{d}, flusherDelegator{d}} + } + pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + }{d, readerFromDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 + return pusherDelegator{d} + } + pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 + return struct { + *responseWriterDelegator + http.Pusher + http.CloseNotifier + }{d, pusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + }{d, pusherDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + }{d, pusherDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + }{d, pusherDelegator{d}, readerFromDelegator{d}} + } + pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } +} + +func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { + d := &responseWriterDelegator{ + ResponseWriter: w, + observeWriteHeader: observeWriteHeaderFunc, + } + + id := 0 + //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to + //remove support from client_golang yet. + if _, ok := w.(http.CloseNotifier); ok { + id += closeNotifier + } + if _, ok := w.(http.Flusher); ok { + id += flusher + } + if _, ok := w.(http.Hijacker); ok { + id += hijacker + } + if _, ok := w.(io.ReaderFrom); ok { + id += readerFrom + } + if _, ok := w.(http.Pusher); ok { + id += pusher + } + + return pickDelegator[id](d) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go new file mode 100644 index 00000000000..5e1c4546ceb --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -0,0 +1,379 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package promhttp provides tooling around HTTP servers and clients. +// +// First, the package allows the creation of http.Handler instances to expose +// Prometheus metrics via HTTP. promhttp.Handler acts on the +// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a +// custom registry or anything that implements the Gatherer interface. It also +// allows the creation of handlers that act differently on errors or allow to +// log errors. +// +// Second, the package provides tooling to instrument instances of http.Handler +// via middleware. Middleware wrappers follow the naming scheme +// InstrumentHandlerX, where X describes the intended use of the middleware. +// See each function's doc comment for specific details. +// +// Finally, the package allows for an http.RoundTripper to be instrumented via +// middleware. Middleware wrappers follow the naming scheme +// InstrumentRoundTripperX, where X describes the intended use of the +// middleware. See each function's doc comment for specific details. +package promhttp + +import ( + "compress/gzip" + "fmt" + "io" + "net/http" + "strings" + "sync" + "time" + + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + contentTypeHeader = "Content-Type" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var gzipPool = sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(nil) + }, +} + +// Handler returns an http.Handler for the prometheus.DefaultGatherer, using +// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has +// no error logging, and it applies compression if requested by the client. +// +// The returned http.Handler is already instrumented using the +// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you +// create multiple http.Handlers by separate calls of the Handler function, the +// metrics used for instrumentation will be shared between them, providing +// global scrape counts. +// +// This function is meant to cover the bulk of basic use cases. If you are doing +// anything that requires more customization (including using a non-default +// Gatherer, different instrumentation, and non-default HandlerOpts), use the +// HandlerFor function. See there for details. +func Handler() http.Handler { + return InstrumentMetricHandler( + prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}), + ) +} + +// HandlerFor returns an uninstrumented http.Handler for the provided +// Gatherer. The behavior of the Handler is defined by the provided +// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom +// Gatherers, with non-default HandlerOpts, and/or with custom (or no) +// instrumentation. Use the InstrumentMetricHandler function to apply the same +// kind of instrumentation as it is used by the Handler function. +func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { + var ( + inFlightSem chan struct{} + errCnt = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_errors_total", + Help: "Total number of internal errors encountered by the promhttp metric handler.", + }, + []string{"cause"}, + ) + ) + + if opts.MaxRequestsInFlight > 0 { + inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) + } + if opts.Registry != nil { + // Initialize all possibilites that can occur below. + errCnt.WithLabelValues("gathering") + errCnt.WithLabelValues("encoding") + if err := opts.Registry.Register(errCnt); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + errCnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) + } + } + } + + h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { + if inFlightSem != nil { + select { + case inFlightSem <- struct{}{}: // All good, carry on. + defer func() { <-inFlightSem }() + default: + http.Error(rsp, fmt.Sprintf( + "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight, + ), http.StatusServiceUnavailable) + return + } + } + mfs, err := reg.Gather() + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error gathering metrics:", err) + } + errCnt.WithLabelValues("gathering").Inc() + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + if len(mfs) == 0 { + // Still report the error if no metrics have been gathered. + httpError(rsp, err) + return + } + case HTTPErrorOnError: + httpError(rsp, err) + return + } + } + + var contentType expfmt.Format + if opts.EnableOpenMetrics { + contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header) + } else { + contentType = expfmt.Negotiate(req.Header) + } + header := rsp.Header() + header.Set(contentTypeHeader, string(contentType)) + + w := io.Writer(rsp) + if !opts.DisableCompression && gzipAccepted(req.Header) { + header.Set(contentEncodingHeader, "gzip") + gz := gzipPool.Get().(*gzip.Writer) + defer gzipPool.Put(gz) + + gz.Reset(w) + defer gz.Close() + + w = gz + } + + enc := expfmt.NewEncoder(w, contentType) + + // handleError handles the error according to opts.ErrorHandling + // and returns true if we have to abort after the handling. + handleError := func(err error) bool { + if err == nil { + return false + } + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error encoding and sending metric family:", err) + } + errCnt.WithLabelValues("encoding").Inc() + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case HTTPErrorOnError: + // We cannot really send an HTTP error at this + // point because we most likely have written + // something to rsp already. But at least we can + // stop sending. + return true + } + // Do nothing in all other cases, including ContinueOnError. + return false + } + + for _, mf := range mfs { + if handleError(enc.Encode(mf)) { + return + } + } + if closer, ok := enc.(expfmt.Closer); ok { + // This in particular takes care of the final "# EOF\n" line for OpenMetrics. + if handleError(closer.Close()) { + return + } + } + }) + + if opts.Timeout <= 0 { + return h + } + return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf( + "Exceeded configured timeout of %v.\n", + opts.Timeout, + )) +} + +// InstrumentMetricHandler is usually used with an http.Handler returned by the +// HandlerFor function. It instruments the provided http.Handler with two +// metrics: A counter vector "promhttp_metric_handler_requests_total" to count +// scrapes partitioned by HTTP status code, and a gauge +// "promhttp_metric_handler_requests_in_flight" to track the number of +// simultaneous scrapes. This function idempotently registers collectors for +// both metrics with the provided Registerer. It panics if the registration +// fails. The provided metrics are useful to see how many scrapes hit the +// monitored target (which could be from different Prometheus servers or other +// scrapers), and how often they overlap (which would result in more than one +// scrape in flight at the same time). Note that the scrapes-in-flight gauge +// will contain the scrape by which it is exposed, while the scrape counter will +// only get incremented after the scrape is complete (as only then the status +// code is known). For tracking scrape durations, use the +// "scrape_duration_seconds" gauge created by the Prometheus server upon each +// scrape. +func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler { + cnt := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_requests_total", + Help: "Total number of scrapes by HTTP status code.", + }, + []string{"code"}, + ) + // Initialize the most likely HTTP status codes. + cnt.WithLabelValues("200") + cnt.WithLabelValues("500") + cnt.WithLabelValues("503") + if err := reg.Register(cnt); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + cnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) + } + } + + gge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "promhttp_metric_handler_requests_in_flight", + Help: "Current number of scrapes being served.", + }) + if err := reg.Register(gge); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + gge = are.ExistingCollector.(prometheus.Gauge) + } else { + panic(err) + } + } + + return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler)) +} + +// HandlerErrorHandling defines how a Handler serving metrics will handle +// errors. +type HandlerErrorHandling int + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // Serve an HTTP status code 500 upon the first error + // encountered. Report the error message in the body. Note that HTTP + // errors cannot be served anymore once the beginning of a regular + // payload has been sent. Thus, in the (unlikely) case that encoding the + // payload into the negotiated wire format fails, serving the response + // will simply be aborted. Set an ErrorLog in HandlerOpts to detect + // those errors. + HTTPErrorOnError HandlerErrorHandling = iota + // Ignore errors and try to serve as many metrics as possible. However, + // if no metrics can be served, serve an HTTP status code 500 and the + // last error message in the body. Only use this in deliberate "best + // effort" metrics collection scenarios. In this case, it is highly + // recommended to provide other means of detecting errors: By setting an + // ErrorLog in HandlerOpts, the errors are logged. By providing a + // Registry in HandlerOpts, the exposed metrics include an error counter + // "promhttp_metric_handler_errors_total", which can be used for + // alerts. + ContinueOnError + // Panic upon the first error encountered (useful for "crash only" apps). + PanicOnError +) + +// Logger is the minimal interface HandlerOpts needs for logging. Note that +// log.Logger from the standard library implements this interface, and it is +// easy to implement by custom loggers, if they don't do so already anyway. +type Logger interface { + Println(v ...interface{}) +} + +// HandlerOpts specifies options how to serve metrics via an http.Handler. The +// zero value of HandlerOpts is a reasonable default. +type HandlerOpts struct { + // ErrorLog specifies an optional logger for errors collecting and + // serving metrics. If nil, errors are not logged at all. + ErrorLog Logger + // ErrorHandling defines how errors are handled. Note that errors are + // logged regardless of the configured ErrorHandling provided ErrorLog + // is not nil. + ErrorHandling HandlerErrorHandling + // If Registry is not nil, it is used to register a metric + // "promhttp_metric_handler_errors_total", partitioned by "cause". A + // failed registration causes a panic. Note that this error counter is + // different from the instrumentation you get from the various + // InstrumentHandler... helpers. It counts errors that don't necessarily + // result in a non-2xx HTTP status code. There are two typical cases: + // (1) Encoding errors that only happen after streaming of the HTTP body + // has already started (and the status code 200 has been sent). This + // should only happen with custom collectors. (2) Collection errors with + // no effect on the HTTP status code because ErrorHandling is set to + // ContinueOnError. + Registry prometheus.Registerer + // If DisableCompression is true, the handler will never compress the + // response, even if requested by the client. + DisableCompression bool + // The number of concurrent HTTP requests is limited to + // MaxRequestsInFlight. Additional requests are responded to with 503 + // Service Unavailable and a suitable message in the body. If + // MaxRequestsInFlight is 0 or negative, no limit is applied. + MaxRequestsInFlight int + // If handling a request takes longer than Timeout, it is responded to + // with 503 ServiceUnavailable and a suitable Message. No timeout is + // applied if Timeout is 0 or negative. Note that with the current + // implementation, reaching the timeout simply ends the HTTP requests as + // described above (and even that only if sending of the body hasn't + // started yet), while the bulk work of gathering all the metrics keeps + // running in the background (with the eventual result to be thrown + // away). Until the implementation is improved, it is recommended to + // implement a separate timeout in potentially slow Collectors. + Timeout time.Duration + // If true, the experimental OpenMetrics encoding is added to the + // possible options during content negotiation. Note that Prometheus + // 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is + // the only way to transmit exemplars. However, the move to OpenMetrics + // is not completely transparent. Most notably, the values of "quantile" + // labels of Summaries and "le" labels of Histograms are formatted with + // a trailing ".0" if they would otherwise look like integer numbers + // (which changes the identity of the resulting series on the Prometheus + // server). + EnableOpenMetrics bool +} + +// gzipAccepted returns whether the client will accept gzip-encoded content. +func gzipAccepted(header http.Header) bool { + a := header.Get(acceptEncodingHeader) + parts := strings.Split(a, ",") + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return true + } + } + return false +} + +// httpError removes any content-encoding header and then calls http.Error with +// the provided error and http.StatusInternalServerError. Error contents is +// supposed to be uncompressed plain text. Same as with a plain http.Error, this +// must not be called if the header or any payload has already been sent. +func httpError(rsp http.ResponseWriter, err error) { + rsp.Header().Del(contentEncodingHeader) + http.Error( + rsp, + "An error has occurred while serving metrics:\n\n"+err.Error(), + http.StatusInternalServerError, + ) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go new file mode 100644 index 00000000000..83c49b66a81 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -0,0 +1,219 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "crypto/tls" + "net/http" + "net/http/httptrace" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// The RoundTripperFunc type is an adapter to allow the use of ordinary +// functions as RoundTrippers. If f is a function with the appropriate +// signature, RountTripperFunc(f) is a RoundTripper that calls f. +type RoundTripperFunc func(req *http.Request) (*http.Response, error) + +// RoundTrip implements the RoundTripper interface. +func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return rt(r) +} + +// InstrumentRoundTripperInFlight is a middleware that wraps the provided +// http.RoundTripper. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.RoundTripper. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + gauge.Inc() + defer gauge.Dec() + return next.RoundTrip(r) + }) +} + +// InstrumentRoundTripperCounter is a middleware that wraps the provided +// http.RoundTripper to observe the request result with the provided CounterVec. +// The CounterVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. Partitioning of the CounterVec happens by HTTP status code +// and/or HTTP method if the respective instance label names are present in the +// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. +// +// If the wrapped RoundTripper panics or returns a non-nil error, the Counter +// is not incremented. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(counter) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + resp, err := next.RoundTrip(r) + if err == nil { + counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc() + } + return resp, err + }) +} + +// InstrumentRoundTripperDuration is a middleware that wraps the provided +// http.RoundTripper to observe the request duration with the provided +// ObserverVec. The ObserverVec must have zero, one, or two non-const +// non-curried labels. For those, the only allowed label names are "code" and +// "method". The function panics otherwise. The Observe method of the Observer +// in the ObserverVec is called with the request duration in +// seconds. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped RoundTripper panics or returns a non-nil error, no values are +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(obs) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + resp, err := next.RoundTrip(r) + if err == nil { + obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds()) + } + return resp, err + }) +} + +// InstrumentTrace is used to offer flexibility in instrumenting the available +// httptrace.ClientTrace hook functions. Each function is passed a float64 +// representing the time in seconds since the start of the http request. A user +// may choose to use separately buckets Histograms, or implement custom +// instance labels on a per function basis. +type InstrumentTrace struct { + GotConn func(float64) + PutIdleConn func(float64) + GotFirstResponseByte func(float64) + Got100Continue func(float64) + DNSStart func(float64) + DNSDone func(float64) + ConnectStart func(float64) + ConnectDone func(float64) + TLSHandshakeStart func(float64) + TLSHandshakeDone func(float64) + WroteHeaders func(float64) + Wait100Continue func(float64) + WroteRequest func(float64) +} + +// InstrumentRoundTripperTrace is a middleware that wraps the provided +// RoundTripper and reports times to hook functions provided in the +// InstrumentTrace struct. Hook functions that are not present in the provided +// InstrumentTrace struct are ignored. Times reported to the hook functions are +// time since the start of the request. Only with Go1.9+, those times are +// guaranteed to never be negative. (Earlier Go versions are not using a +// monotonic clock.) Note that partitioning of Histograms is expensive and +// should be used judiciously. +// +// For hook functions that receive an error as an argument, no observations are +// made in the event of a non-nil error value. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + + trace := &httptrace.ClientTrace{ + GotConn: func(_ httptrace.GotConnInfo) { + if it.GotConn != nil { + it.GotConn(time.Since(start).Seconds()) + } + }, + PutIdleConn: func(err error) { + if err != nil { + return + } + if it.PutIdleConn != nil { + it.PutIdleConn(time.Since(start).Seconds()) + } + }, + DNSStart: func(_ httptrace.DNSStartInfo) { + if it.DNSStart != nil { + it.DNSStart(time.Since(start).Seconds()) + } + }, + DNSDone: func(_ httptrace.DNSDoneInfo) { + if it.DNSDone != nil { + it.DNSDone(time.Since(start).Seconds()) + } + }, + ConnectStart: func(_, _ string) { + if it.ConnectStart != nil { + it.ConnectStart(time.Since(start).Seconds()) + } + }, + ConnectDone: func(_, _ string, err error) { + if err != nil { + return + } + if it.ConnectDone != nil { + it.ConnectDone(time.Since(start).Seconds()) + } + }, + GotFirstResponseByte: func() { + if it.GotFirstResponseByte != nil { + it.GotFirstResponseByte(time.Since(start).Seconds()) + } + }, + Got100Continue: func() { + if it.Got100Continue != nil { + it.Got100Continue(time.Since(start).Seconds()) + } + }, + TLSHandshakeStart: func() { + if it.TLSHandshakeStart != nil { + it.TLSHandshakeStart(time.Since(start).Seconds()) + } + }, + TLSHandshakeDone: func(_ tls.ConnectionState, err error) { + if err != nil { + return + } + if it.TLSHandshakeDone != nil { + it.TLSHandshakeDone(time.Since(start).Seconds()) + } + }, + WroteHeaders: func() { + if it.WroteHeaders != nil { + it.WroteHeaders(time.Since(start).Seconds()) + } + }, + Wait100Continue: func() { + if it.Wait100Continue != nil { + it.Wait100Continue(time.Since(start).Seconds()) + } + }, + WroteRequest: func(_ httptrace.WroteRequestInfo) { + if it.WroteRequest != nil { + it.WroteRequest(time.Since(start).Seconds()) + } + }, + } + r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace)) + + return next.RoundTrip(r) + }) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go new file mode 100644 index 00000000000..9db24380533 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -0,0 +1,447 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "errors" + "net/http" + "strconv" + "strings" + "time" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +// magicString is used for the hacky label test in checkLabels. Remove once fixed. +const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" + +// InstrumentHandlerInFlight is a middleware that wraps the provided +// http.Handler. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.Handler. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + g.Inc() + defer g.Dec() + next.ServeHTTP(w, r) + }) +} + +// InstrumentHandlerDuration is a middleware that wraps the provided +// http.Handler to observe the request duration with the provided ObserverVec. +// The ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the request duration in seconds. Partitioning happens by HTTP +// status code and/or HTTP method if the respective instance label names are +// present in the ObserverVec. For unpartitioned observations, use an +// ObserverVec with zero labels. Note that partitioning of Histograms is +// expensive and should be used judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + + obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds()) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + next.ServeHTTP(w, r) + obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds()) + }) +} + +// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler +// to observe the request result with the provided CounterVec. The CounterVec +// must have zero, one, or two non-const non-curried labels. For those, the only +// allowed label names are "code" and "method". The function panics +// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or +// HTTP method if the respective instance label names are present in the +// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, the Counter is not incremented. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(counter) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + counter.With(labels(code, method, r.Method, d.Status())).Inc() + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + counter.With(labels(code, method, r.Method, 0)).Inc() + }) +} + +// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided +// http.Handler to observe with the provided ObserverVec the request duration +// until the response headers are written. The ObserverVec must have zero, one, +// or two non-const non-curried labels. For those, the only allowed label names +// are "code" and "method". The function panics otherwise. The Observe method of +// the Observer in the ObserverVec is called with the request duration in +// seconds. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped Handler panics before calling WriteHeader, no value is +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, func(status int) { + obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds()) + }) + next.ServeHTTP(d, r) + }) +} + +// InstrumentHandlerRequestSize is a middleware that wraps the provided +// http.Handler to observe the request size with the provided ObserverVec. The +// ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the request size in bytes. Partitioning happens by HTTP status +// code and/or HTTP method if the respective instance label names are present in +// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero +// labels. Note that partitioning of Histograms is expensive and should be used +// judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size)) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, 0)).Observe(float64(size)) + }) +} + +// InstrumentHandlerResponseSize is a middleware that wraps the provided +// http.Handler to observe the response size with the provided ObserverVec. The +// ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the response size in bytes. Partitioning happens by HTTP status +// code and/or HTTP method if the respective instance label names are present in +// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero +// labels. Note that partitioning of Histograms is expensive and should be used +// judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler { + code, method := checkLabels(obs) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written())) + }) +} + +func checkLabels(c prometheus.Collector) (code bool, method bool) { + // TODO(beorn7): Remove this hacky way to check for instance labels + // once Descriptors can have their dimensionality queried. + var ( + desc *prometheus.Desc + m prometheus.Metric + pm dto.Metric + lvs []string + ) + + // Get the Desc from the Collector. + descc := make(chan *prometheus.Desc, 1) + c.Describe(descc) + + select { + case desc = <-descc: + default: + panic("no description provided by collector") + } + select { + case <-descc: + panic("more than one description provided by collector") + default: + } + + close(descc) + + // Create a ConstMetric with the Desc. Since we don't know how many + // variable labels there are, try for as long as it needs. + for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) { + m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...) + } + + // Write out the metric into a proto message and look at the labels. + // If the value is not the magicString, it is a constLabel, which doesn't interest us. + // If the label is curried, it doesn't interest us. + // In all other cases, only "code" or "method" is allowed. + if err := m.Write(&pm); err != nil { + panic("error checking metric for labels") + } + for _, label := range pm.Label { + name, value := label.GetName(), label.GetValue() + if value != magicString || isLabelCurried(c, name) { + continue + } + switch name { + case "code": + code = true + case "method": + method = true + default: + panic("metric partitioned with non-supported labels") + } + } + return +} + +func isLabelCurried(c prometheus.Collector, label string) bool { + // This is even hackier than the label test above. + // We essentially try to curry again and see if it works. + // But for that, we need to type-convert to the two + // types we use here, ObserverVec or *CounterVec. + switch v := c.(type) { + case *prometheus.CounterVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + case prometheus.ObserverVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + default: + panic("unsupported metric vec type") + } + return true +} + +// emptyLabels is a one-time allocation for non-partitioned metrics to avoid +// unnecessary allocations on each request. +var emptyLabels = prometheus.Labels{} + +func labels(code, method bool, reqMethod string, status int) prometheus.Labels { + if !(code || method) { + return emptyLabels + } + labels := prometheus.Labels{} + + if code { + labels["code"] = sanitizeCode(status) + } + if method { + labels["method"] = sanitizeMethod(reqMethod) + } + + return labels +} + +func computeApproximateRequestSize(r *http.Request) int { + s := 0 + if r.URL != nil { + s += len(r.URL.String()) + } + + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + return s +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +// If the wrapped http.Handler has not set a status code, i.e. the value is +// currently 0, santizeCode will return 200, for consistency with behavior in +// the stdlib. +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200, 0: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-dns/LICENSE b/vendor/github.com/terraform-providers/terraform-provider-dns/LICENSE new file mode 100644 index 00000000000..a612ad9813b --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-dns/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/terraform-providers/terraform-provider-dns/dns/acceptance.sh b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/acceptance.sh new file mode 100644 index 00000000000..b42e196ca13 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/acceptance.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +set -eu +set -x + +# Test domains +export DNS_DOMAIN_FORWARD="example.com." +export DNS_DOMAIN_REVERSE="1.168.192.in-addr.arpa." + +DOCKER_CONTAINER_NAME=tf_acc_dns + +cleanup_docker() { + docker stop "$DOCKER_CONTAINER_NAME" + docker rm "$DOCKER_CONTAINER_NAME" +} +failed() { + cleanup_docker + exit 1 +} + +# Run with no authentication + +export DNS_UPDATE_SERVER=127.0.0.1 +export DNS_UPDATE_PORT=55354 +docker run -d -p "$DNS_UPDATE_PORT:53/udp" \ + -e BIND_DOMAIN_FORWARD=${DNS_DOMAIN_FORWARD} \ + -e BIND_DOMAIN_REVERSE=${DNS_DOMAIN_REVERSE} \ + -e BIND_INSECURE=true \ + --name "$DOCKER_CONTAINER_NAME" drebes/bind || failed +GO111MODULE=on GOFLAGS=-mod=vendor make testacc TEST=./dns || failed +cleanup_docker + +# Run with authentication + +export DNS_UPDATE_KEYNAME=${DNS_DOMAIN_FORWARD} +export DNS_UPDATE_KEYALGORITHM="hmac-md5" +export DNS_UPDATE_KEYSECRET="c3VwZXJzZWNyZXQ=" +docker run -d -p "$DNS_UPDATE_PORT:53/udp" \ + -e BIND_DOMAIN_FORWARD=${DNS_DOMAIN_FORWARD} \ + -e BIND_DOMAIN_REVERSE=${DNS_DOMAIN_REVERSE} \ + -e BIND_KEY_NAME=${DNS_UPDATE_KEYNAME} \ + -e BIND_KEY_ALGORITHM=${DNS_UPDATE_KEYALGORITHM} \ + -e BIND_KEY_SECRET=${DNS_UPDATE_KEYSECRET} \ + --name "$DOCKER_CONTAINER_NAME" drebes/bind || failed +GO111MODULE=on GOFLAGS=-mod=vendor make testacc TEST=./dns || failed +cleanup_docker diff --git a/vendor/github.com/terraform-providers/terraform-provider-dns/dns/config.go b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/config.go new file mode 100644 index 00000000000..89588a1495c --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/config.go @@ -0,0 +1,84 @@ +package dns + +import ( + "fmt" + "log" + "net" + "strconv" + "strings" + "time" + + "github.com/miekg/dns" +) + +type Config struct { + server string + port int + transport string + timeout time.Duration + retries int + keyname string + keyalgo string + keysecret string +} + +type DNSClient struct { + c *dns.Client + srv_addr string + transport string + retries int + keyname string + keysecret string + keyalgo string +} + +// Configures and returns a fully initialized DNSClient +func (c *Config) Client() (interface{}, error) { + log.Println("[INFO] Building DNSClient config structure") + + var client DNSClient + client.srv_addr = net.JoinHostPort(c.server, strconv.Itoa(c.port)) + authCfgOk := false + if (c.keyname == "" && c.keysecret == "" && c.keyalgo == "") || + (c.keyname != "" && c.keysecret != "" && c.keyalgo != "") { + authCfgOk = true + } + if !authCfgOk { + return nil, fmt.Errorf("Error configuring provider: when using authentication, \"key_name\", \"key_secret\" and \"key_algorithm\" should be non empty") + } + client.c = new(dns.Client) + client.transport = c.transport + client.c.Timeout = c.timeout + client.retries = c.retries + if c.keyname != "" { + if !dns.IsFqdn(c.keyname) { + return nil, fmt.Errorf("Error configuring provider: \"key_name\" should be fully-qualified") + } + keyname := strings.ToLower(c.keyname) + client.keyname = keyname + client.keysecret = c.keysecret + keyalgo, err := convertHMACAlgorithm(c.keyalgo) + if err != nil { + return nil, fmt.Errorf("Error configuring provider: %s", err) + } + client.keyalgo = keyalgo + client.c.TsigSecret = map[string]string{keyname: c.keysecret} + } + return &client, nil +} + +// Validates and converts HMAC algorithm +func convertHMACAlgorithm(name string) (string, error) { + switch name { + case "hmac-md5": + return dns.HmacMD5, nil + case "hmac-sha1": + return dns.HmacSHA1, nil + case "hmac-sha256": + return dns.HmacSHA256, nil + case "hmac-sha512": + return dns.HmacSHA512, nil + default: + return "", fmt.Errorf("Unknown HMAC algorithm: %s", name) + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-dns/dns/data_dns_a_record_set.go b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/data_dns_a_record_set.go new file mode 100644 index 00000000000..db234a5efa0 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/data_dns_a_record_set.go @@ -0,0 +1,40 @@ +package dns + +import ( + "fmt" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceDnsARecordSet() *schema.Resource { + return &schema.Resource{ + Read: dataSourceDnsARecordSetRead, + Schema: map[string]*schema.Schema{ + "host": { + Type: schema.TypeString, + Required: true, + }, + "addrs": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + }, + } +} + +func dataSourceDnsARecordSetRead(d *schema.ResourceData, meta interface{}) error { + host := d.Get("host").(string) + + a, _, err := lookupIP(host) + if err != nil { + return fmt.Errorf("error looking up A records for %q: %s", host, err) + } + sort.Strings(a) + + d.Set("addrs", a) + d.SetId(host) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-dns/dns/data_dns_aaaa_record_set.go b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/data_dns_aaaa_record_set.go new file mode 100644 index 00000000000..530d1a25dce --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/data_dns_aaaa_record_set.go @@ -0,0 +1,40 @@ +package dns + +import ( + "fmt" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceDnsAAAARecordSet() *schema.Resource { + return &schema.Resource{ + Read: dataSourceDnsAAAARecordSetRead, + Schema: map[string]*schema.Schema{ + "host": { + Type: schema.TypeString, + Required: true, + }, + "addrs": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + }, + } +} + +func dataSourceDnsAAAARecordSetRead(d *schema.ResourceData, meta interface{}) error { + host := d.Get("host").(string) + + _, aaaa, err := lookupIP(host) + if err != nil { + return fmt.Errorf("error looking up AAAA records for %q: %s", host, err) + } + sort.Strings(aaaa) + + d.Set("addrs", aaaa) + d.SetId(host) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-dns/dns/data_dns_cname_record_set.go b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/data_dns_cname_record_set.go new file mode 100644 index 00000000000..acff22252e8 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/data_dns_cname_record_set.go @@ -0,0 +1,41 @@ +package dns + +import ( + "fmt" + "net" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceDnsCnameRecordSet() *schema.Resource { + return &schema.Resource{ + Read: dataSourceDnsCnameRecordSetRead, + + Schema: map[string]*schema.Schema{ + "host": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "cname": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceDnsCnameRecordSetRead(d *schema.ResourceData, meta interface{}) error { + host := d.Get("host").(string) + + cname, err := net.LookupCNAME(host) + if err != nil { + return fmt.Errorf("error looking up CNAME records for %q: %s", host, err) + } + + d.Set("cname", cname) + d.SetId(host) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-dns/dns/data_dns_mx_record_set.go b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/data_dns_mx_record_set.go new file mode 100644 index 00000000000..e23cb9eecd5 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/data_dns_mx_record_set.go @@ -0,0 +1,72 @@ +package dns + +import ( + "fmt" + "net" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceDnsMXRecordSet() *schema.Resource { + return &schema.Resource{ + Read: dataSourceDnsMXRecordSetRead, + Schema: map[string]*schema.Schema{ + "domain": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "mx": &schema.Schema{ + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "preference": { + Type: schema.TypeInt, + Computed: true, + }, + "exchange": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + Computed: true, + }, + }, + } +} + +func dataSourceDnsMXRecordSetRead(d *schema.ResourceData, meta interface{}) error { + domain := d.Get("domain").(string) + + records, err := net.LookupMX(domain) + if err != nil { + return fmt.Errorf("error looking up MX records for %q: %s", domain, err) + } + + // Sort by preference ascending, and host alphabetically + sort.Slice(records, func(i, j int) bool { + if records[i].Pref < records[j].Pref { + return true + } + if records[i].Pref > records[j].Pref { + return false + } + return records[i].Host < records[j].Host + }) + + mx := make([]map[string]interface{}, len(records)) + for i, record := range records { + mx[i] = map[string]interface{}{ + "preference": int(record.Pref), + "exchange": record.Host, + } + } + + if err = d.Set("mx", mx); err != nil { + return err + } + d.SetId(domain) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-dns/dns/data_dns_ns_record_set.go b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/data_dns_ns_record_set.go new file mode 100644 index 00000000000..cb4cc1a2d92 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/data_dns_ns_record_set.go @@ -0,0 +1,49 @@ +package dns + +import ( + "fmt" + "net" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceDnsNSRecordSet() *schema.Resource { + return &schema.Resource{ + Read: dataSourceDnsNSRecordSetRead, + Schema: map[string]*schema.Schema{ + "host": { + Type: schema.TypeString, + Required: true, + }, + "nameservers": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + }, + } +} + +func dataSourceDnsNSRecordSetRead(d *schema.ResourceData, meta interface{}) error { + host := d.Get("host").(string) + + nsRecords, err := net.LookupNS(host) + if err != nil { + return fmt.Errorf("error looking up NS records for %q: %s", host, err) + } + + nameservers := make([]string, len(nsRecords)) + for i, record := range nsRecords { + nameservers[i] = record.Host + } + sort.Strings(nameservers) + + err = d.Set("nameservers", nameservers) + if err != nil { + return err + } + d.SetId(host) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-dns/dns/data_dns_ptr_record_set.go b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/data_dns_ptr_record_set.go new file mode 100644 index 00000000000..99d69ba00c1 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/data_dns_ptr_record_set.go @@ -0,0 +1,40 @@ +package dns + +import ( + "fmt" + "net" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceDnsPtrRecordSet() *schema.Resource { + return &schema.Resource{ + Read: dataSourceDnsPtrRecordSetRead, + Schema: map[string]*schema.Schema{ + "ip_address": { + Type: schema.TypeString, + Required: true, + }, + "ptr": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceDnsPtrRecordSetRead(d *schema.ResourceData, meta interface{}) error { + ipAddress := d.Get("ip_address").(string) + names, err := net.LookupAddr(ipAddress) + if err != nil { + return fmt.Errorf("error looking up PTR records for %q: %s", ipAddress, err) + } + if len(names) == 0 { + return fmt.Errorf("error looking up PTR records for %q: no records found", ipAddress) + } + + d.Set("ptr", names[0]) + d.SetId(ipAddress) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-dns/dns/data_dns_srv_record_set.go b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/data_dns_srv_record_set.go new file mode 100644 index 00000000000..cb3f2eb192d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/data_dns_srv_record_set.go @@ -0,0 +1,96 @@ +package dns + +import ( + "fmt" + "net" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceDnsSRVRecordSet() *schema.Resource { + return &schema.Resource{ + Read: dataSourceDnsSRVRecordSetRead, + Schema: map[string]*schema.Schema{ + "service": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "srv": &schema.Schema{ + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "priority": { + Type: schema.TypeInt, + Computed: true, + }, + "weight": { + Type: schema.TypeInt, + Computed: true, + }, + "port": { + Type: schema.TypeInt, + Computed: true, + }, + "target": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + Computed: true, + }, + }, + } +} + +func dataSourceDnsSRVRecordSetRead(d *schema.ResourceData, meta interface{}) error { + service := d.Get("service").(string) + + _, records, err := net.LookupSRV("", "", service) + if err != nil { + return fmt.Errorf("error looking up SRV records for %q: %s", service, err) + } + + // Sort by priority ascending, weight descending, target + // alphabetically, and port ascending + sort.Slice(records, func(i, j int) bool { + if records[i].Priority < records[j].Priority { + return true + } + if records[i].Priority > records[j].Priority { + return false + } + if records[i].Weight > records[j].Weight { + return true + } + if records[i].Weight < records[j].Weight { + return false + } + if records[i].Target < records[j].Target { + return true + } + if records[i].Target > records[j].Target { + return false + } + return records[i].Port < records[j].Port + }) + + srv := make([]map[string]interface{}, len(records)) + for i, record := range records { + srv[i] = map[string]interface{}{ + "priority": int(record.Priority), + "weight": int(record.Weight), + "port": int(record.Port), + "target": record.Target, + } + } + + err = d.Set("srv", srv) + if err != nil { + return err + } + d.SetId(service) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-dns/dns/data_dns_txt_record_set.go b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/data_dns_txt_record_set.go new file mode 100644 index 00000000000..24d1547a75b --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/data_dns_txt_record_set.go @@ -0,0 +1,52 @@ +package dns + +import ( + "fmt" + "net" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceDnsTxtRecordSet() *schema.Resource { + return &schema.Resource{ + Read: dataSourceDnsTxtRecordSetRead, + + Schema: map[string]*schema.Schema{ + "host": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "record": { + Type: schema.TypeString, + Computed: true, + }, + + "records": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + }, + } +} + +func dataSourceDnsTxtRecordSetRead(d *schema.ResourceData, meta interface{}) error { + host := d.Get("host").(string) + + records, err := net.LookupTXT(host) + if err != nil { + return fmt.Errorf("error looking up TXT records for %q: %s", host, err) + } + + if len(records) > 0 { + d.Set("record", records[0]) + } else { + d.Set("record", "") + } + d.Set("records", records) + d.SetId(host) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-dns/dns/hash_ip_string.go b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/hash_ip_string.go new file mode 100644 index 00000000000..1785a986eb0 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/hash_ip_string.go @@ -0,0 +1,16 @@ +package dns + +import ( + "net" + + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" +) + +func hashIPString(v interface{}) int { + addr := v.(string) + ip := net.ParseIP(addr) + if ip == nil { + return hashcode.String(addr) + } + return hashcode.String(ip.String()) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-dns/dns/lookup.go b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/lookup.go new file mode 100644 index 00000000000..643c559d269 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/lookup.go @@ -0,0 +1,22 @@ +package dns + +import "net" + +func lookupIP(host string) ([]string, []string, error) { + records, err := net.LookupIP(host) + if err != nil { + return nil, nil, err + } + + a := make([]string, 0) + aaaa := make([]string, 0) + for _, ip := range records { + if ipv4 := ip.To4(); ipv4 != nil { + a = append(a, ipv4.String()) + } else { + aaaa = append(aaaa, ip.String()) + } + } + + return a, aaaa, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-dns/dns/provider.go b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/provider.go new file mode 100644 index 00000000000..646243a8592 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/provider.go @@ -0,0 +1,526 @@ +package dns + +import ( + "fmt" + "net" + "os" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/miekg/dns" +) + +const ( + defaultPort = 53 + defaultRetries = 3 + defaultTimeout = "0" + defaultTransport = "udp" +) + +// Provider returns a schema.Provider for DNS dynamic updates. +func Provider() terraform.ResourceProvider { + return &schema.Provider{ + Schema: map[string]*schema.Schema{ + "update": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "server": { + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("DNS_UPDATE_SERVER", nil), + }, + "port": { + Type: schema.TypeInt, + Optional: true, + DefaultFunc: func() (interface{}, error) { + if envPortStr := os.Getenv("DNS_UPDATE_PORT"); envPortStr != "" { + port, err := strconv.Atoi(envPortStr) + if err != nil { + err = fmt.Errorf("invalid DNS_UPDATE_PORT environment variable: %s", err) + } + return port, err + } + + return defaultPort, nil + }, + }, + "transport": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("DNS_UPDATE_TRANSPORT", defaultTransport), + }, + "timeout": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("DNS_UPDATE_TIMEOUT", defaultTimeout), + }, + "retries": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + DefaultFunc: func() (interface{}, error) { + if env := os.Getenv("DNS_UPDATE_RETRIES"); env != "" { + retries, err := strconv.Atoi(env) + if err != nil { + err = fmt.Errorf("invalid DNS_UPDATE_RETRIES environment variable: %s", err) + } + return retries, err + } + + return defaultRetries, nil + }, + }, + "key_name": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("DNS_UPDATE_KEYNAME", nil), + }, + "key_algorithm": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("DNS_UPDATE_KEYALGORITHM", nil), + }, + "key_secret": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("DNS_UPDATE_KEYSECRET", nil), + }, + }, + }, + }, + }, + + DataSourcesMap: map[string]*schema.Resource{ + "dns_a_record_set": dataSourceDnsARecordSet(), + "dns_aaaa_record_set": dataSourceDnsAAAARecordSet(), + "dns_cname_record_set": dataSourceDnsCnameRecordSet(), + "dns_mx_record_set": dataSourceDnsMXRecordSet(), + "dns_ns_record_set": dataSourceDnsNSRecordSet(), + "dns_ptr_record_set": dataSourceDnsPtrRecordSet(), + "dns_srv_record_set": dataSourceDnsSRVRecordSet(), + "dns_txt_record_set": dataSourceDnsTxtRecordSet(), + }, + + ResourcesMap: map[string]*schema.Resource{ + "dns_a_record_set": resourceDnsARecordSet(), + "dns_aaaa_record_set": resourceDnsAAAARecordSet(), + "dns_cname_record": resourceDnsCnameRecord(), + "dns_mx_record_set": resourceDnsMXRecordSet(), + "dns_ns_record_set": resourceDnsNSRecordSet(), + "dns_ptr_record": resourceDnsPtrRecord(), + "dns_srv_record_set": resourceDnsSRVRecordSet(), + "dns_txt_record_set": resourceDnsTXTRecordSet(), + }, + + ConfigureFunc: configureProvider, + } +} + +func configureProvider(d *schema.ResourceData) (interface{}, error) { + + var server, transport, timeout, keyname, keyalgo, keysecret string + var port, retries int + var duration time.Duration + + // if the update block is missing, schema.EnvDefaultFunc is not called + if v, ok := d.GetOk("update"); ok { + update := v.([]interface{})[0].(map[string]interface{}) + if val, ok := update["port"]; ok { + port = int(val.(int)) + } + if val, ok := update["server"]; ok { + server = val.(string) + } + if val, ok := update["transport"]; ok { + transport = val.(string) + } + if val, ok := update["timeout"]; ok { + timeout = val.(string) + } + if val, ok := update["retries"]; ok { + retries = int(val.(int)) + } + if val, ok := update["key_name"]; ok { + keyname = val.(string) + } + if val, ok := update["key_algorithm"]; ok { + keyalgo = val.(string) + } + if val, ok := update["key_secret"]; ok { + keysecret = val.(string) + } + } else { + if len(os.Getenv("DNS_UPDATE_SERVER")) > 0 { + server = os.Getenv("DNS_UPDATE_SERVER") + } else { + return nil, nil + } + if len(os.Getenv("DNS_UPDATE_PORT")) > 0 { + var err error + portStr := os.Getenv("DNS_UPDATE_PORT") + port, err = strconv.Atoi(portStr) + if err != nil { + return nil, fmt.Errorf("invalid DNS_UPDATE_PORT environment variable: %s", err) + } + } else { + port = defaultPort + } + if len(os.Getenv("DNS_UPDATE_TRANSPORT")) > 0 { + transport = os.Getenv("DNS_UPDATE_TRANSPORT") + } else { + transport = defaultTransport + } + if len(os.Getenv("DNS_UPDATE_TIMEOUT")) > 0 { + timeout = os.Getenv("DNS_UPDATE_TIMEOUT") + } else { + timeout = defaultTimeout + } + if len(os.Getenv("DNS_UPDATE_RETRIES")) > 0 { + var err error + env := os.Getenv("DNS_UPDATE_RETRIES") + retries, err = strconv.Atoi(env) + if err != nil { + return nil, fmt.Errorf("invalid DNS_UPDATE_RETRIES environment variable: %s", err) + } + } else { + retries = defaultRetries + } + if len(os.Getenv("DNS_UPDATE_KEYNAME")) > 0 { + keyname = os.Getenv("DNS_UPDATE_KEYNAME") + } + if len(os.Getenv("DNS_UPDATE_KEYALGORITHM")) > 0 { + keyalgo = os.Getenv("DNS_UPDATE_KEYALGORITHM") + } + if len(os.Getenv("DNS_UPDATE_KEYSECRET")) > 0 { + keysecret = os.Getenv("DNS_UPDATE_KEYSECRET") + } + } + + if timeout != "" { + var err error + // Try parsing as a duration + duration, err = time.ParseDuration(timeout) + if err != nil { + // Failing that, convert to an integer and treat as seconds + seconds, err := strconv.Atoi(timeout) + if err != nil { + return nil, fmt.Errorf("invalid timeout: %s", timeout) + } + duration = time.Duration(seconds) * time.Second + } + if duration < 0 { + return nil, fmt.Errorf("timeout cannot be negative: %s", duration) + } + } + + config := Config{ + server: server, + port: port, + transport: transport, + timeout: duration, + retries: retries, + keyname: keyname, + keyalgo: keyalgo, + keysecret: keysecret, + } + + return config.Client() +} + +func getAVal(record interface{}) (string, int, error) { + + _, ok := record.(*dns.A) + if !ok { + return "", 0, fmt.Errorf("didn't get a A record") + } + + recstr := record.(*dns.A).String() + var name, class, typ, addr string + var ttl int + + _, err := fmt.Sscanf(recstr, "%s\t%d\t%s\t%s\t%s", &name, &ttl, &class, &typ, &addr) + if err != nil { + return "", 0, fmt.Errorf("Error parsing record: %s", err) + } + + return addr, ttl, nil +} + +func getNSVal(record interface{}) (string, int, error) { + + _, ok := record.(*dns.NS) + if !ok { + return "", 0, fmt.Errorf("didn't get a NS record") + } + + recstr := record.(*dns.NS).String() + var name, class, typ, nameserver string + var ttl int + + _, err := fmt.Sscanf(recstr, "%s\t%d\t%s\t%s\t%s", &name, &ttl, &class, &typ, &nameserver) + if err != nil { + return "", 0, fmt.Errorf("Error parsing record: %s", err) + } + + return nameserver, ttl, nil +} + +func getAAAAVal(record interface{}) (string, int, error) { + + _, ok := record.(*dns.AAAA) + if !ok { + return "", 0, fmt.Errorf("didn't get a AAAA record") + } + + recstr := record.(*dns.AAAA).String() + var name, class, typ, addr string + var ttl int + + _, err := fmt.Sscanf(recstr, "%s\t%d\t%s\t%s\t%s", &name, &ttl, &class, &typ, &addr) + if err != nil { + return "", 0, fmt.Errorf("Error parsing record: %s", err) + } + + return addr, ttl, nil +} + +func getCnameVal(record interface{}) (string, int, error) { + + _, ok := record.(*dns.CNAME) + if !ok { + return "", 0, fmt.Errorf("didn't get a CNAME record") + } + + recstr := record.(*dns.CNAME).String() + var name, class, typ, cname string + var ttl int + + _, err := fmt.Sscanf(recstr, "%s\t%d\t%s\t%s\t%s", &name, &ttl, &class, &typ, &cname) + if err != nil { + return "", 0, fmt.Errorf("Error parsing record: %s", err) + } + + return cname, ttl, nil +} + +func getPtrVal(record interface{}) (string, int, error) { + + _, ok := record.(*dns.PTR) + if !ok { + return "", 0, fmt.Errorf("didn't get a PTR record") + } + + recstr := record.(*dns.PTR).String() + var name, class, typ, ptr string + var ttl int + + _, err := fmt.Sscanf(recstr, "%s\t%d\t%s\t%s\t%s", &name, &ttl, &class, &typ, &ptr) + if err != nil { + return "", 0, fmt.Errorf("Error parsing record: %s", err) + } + + return ptr, ttl, nil +} + +func isTimeout(err error) bool { + + timeout, ok := err.(net.Error) + return ok && timeout.Timeout() +} + +func exchange(msg *dns.Msg, tsig bool, meta interface{}) (*dns.Msg, error) { + + c := meta.(*DNSClient).c + srv_addr := meta.(*DNSClient).srv_addr + keyname := meta.(*DNSClient).keyname + keyalgo := meta.(*DNSClient).keyalgo + c.Net = meta.(*DNSClient).transport + retries := meta.(*DNSClient).retries + retry_tcp := false + + msg.RecursionDesired = false + +Retry: + if tsig && keyname != "" { + msg.SetTsig(keyname, keyalgo, 300, time.Now().Unix()) + } + + r, _, err := c.Exchange(msg, srv_addr) + + switch err { + case dns.ErrTruncated: + if retry_tcp { + switch c.Net { + case "udp": + c.Net = "tcp" + case "udp4": + c.Net = "tcp4" + case "udp6": + c.Net = "tcp6" + default: + return nil, fmt.Errorf("Unknown transport: %s", c.Net) + } + } else { + msg.SetEdns0(dns.DefaultMsgSize, false) + retry_tcp = true + } + + // Reset retries counter on protocol change + retries = meta.(*DNSClient).retries + goto Retry + case nil: + if r.Rcode == dns.RcodeServerFailure && retries > 0 { + retries-- + goto Retry + } + default: + if isTimeout(err) && retries > 0 { + retries-- + goto Retry + } + } + + return r, err +} + +func resourceDnsImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + + record := d.Id() + if !dns.IsFqdn(record) { + return nil, fmt.Errorf("Not a fully-qualified DNS name: %s", record) + } + + labels := dns.SplitDomainName(record) + + msg := new(dns.Msg) + + var zone *string + +Loop: + for l := range labels { + + msg.SetQuestion(dns.Fqdn(strings.Join(labels[l:], ".")), dns.TypeSOA) + + r, err := exchange(msg, true, meta) + if err != nil { + return nil, fmt.Errorf("Error querying DNS record: %s", err) + } + + switch r.Rcode { + case dns.RcodeSuccess: + + if len(r.Answer) == 0 { + continue + } + + for _, ans := range r.Answer { + switch t := ans.(type) { + case *dns.SOA: + zone = &t.Hdr.Name + case *dns.CNAME: + continue Loop + } + } + + break Loop + case dns.RcodeNameError: + continue + default: + return nil, fmt.Errorf("Error querying DNS record: %v (%s)", r.Rcode, dns.RcodeToString[r.Rcode]) + } + } + + if zone == nil { + return nil, fmt.Errorf("No SOA record in authority section in response for %s", record) + } + + common := dns.CompareDomainName(record, *zone) + if common == 0 { + return nil, fmt.Errorf("DNS record %s shares no common labels with zone %s", record, *zone) + } + + d.Set("zone", *zone) + if name := strings.Join(labels[:len(labels)-common], "."); name != "" { + d.Set("name", name) + } + + return []*schema.ResourceData{d}, nil +} + +func resourceFQDN(d *schema.ResourceData) string { + + fqdn := d.Get("zone").(string) + + if name, ok := d.GetOk("name"); ok { + fqdn = fmt.Sprintf("%s.%s", name.(string), fqdn) + } + + return fqdn +} + +func resourceDnsRead(d *schema.ResourceData, meta interface{}, rrType uint16) ([]dns.RR, error) { + + if meta != nil { + + fqdn := resourceFQDN(d) + + msg := new(dns.Msg) + msg.SetQuestion(fqdn, rrType) + + r, err := exchange(msg, true, meta) + if err != nil { + return nil, fmt.Errorf("Error querying DNS record: %s", err) + } + switch r.Rcode { + case dns.RcodeSuccess: + // NS records are returned slightly differently + if (rrType == dns.TypeNS && len(r.Ns) > 0) || len(r.Answer) > 0 { + break + } + fallthrough + case dns.RcodeNameError: + return nil, nil + default: + return nil, fmt.Errorf("Error querying DNS record: %v (%s)", r.Rcode, dns.RcodeToString[r.Rcode]) + } + + if rrType == dns.TypeNS { + return r.Ns, nil + } + return r.Answer, nil + } else { + return nil, fmt.Errorf("update server is not set") + } +} + +func resourceDnsDelete(d *schema.ResourceData, meta interface{}, rrType uint16) error { + + if meta != nil { + + fqdn := resourceFQDN(d) + + msg := new(dns.Msg) + + msg.SetUpdate(d.Get("zone").(string)) + + rr, _ := dns.NewRR(fmt.Sprintf("%s 0 %s", fqdn, dns.TypeToString[rrType])) + msg.RemoveRRset([]dns.RR{rr}) + + r, err := exchange(msg, true, meta) + if err != nil { + return fmt.Errorf("Error deleting DNS record: %s", err) + } + if r.Rcode != dns.RcodeSuccess { + return fmt.Errorf("Error deleting DNS record: %v (%s)", r.Rcode, dns.RcodeToString[r.Rcode]) + } + + return nil + } else { + return fmt.Errorf("update server is not set") + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-dns/dns/resource_dns_a_record_set.go b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/resource_dns_a_record_set.go new file mode 100644 index 00000000000..fe2ca1bc61a --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/resource_dns_a_record_set.go @@ -0,0 +1,137 @@ +package dns + +import ( + "fmt" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/miekg/dns" +) + +func resourceDnsARecordSet() *schema.Resource { + return &schema.Resource{ + Create: resourceDnsARecordSetCreate, + Read: resourceDnsARecordSetRead, + Update: resourceDnsARecordSetUpdate, + Delete: resourceDnsARecordSetDelete, + Importer: &schema.ResourceImporter{ + State: resourceDnsImport, + }, + + Schema: map[string]*schema.Schema{ + "zone": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateZone, + }, + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateName, + }, + "addresses": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: hashIPString, + }, + "ttl": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 3600, + }, + }, + } +} + +func resourceDnsARecordSetCreate(d *schema.ResourceData, meta interface{}) error { + + d.SetId(resourceFQDN(d)) + + return resourceDnsARecordSetUpdate(d, meta) +} + +func resourceDnsARecordSetRead(d *schema.ResourceData, meta interface{}) error { + + answers, err := resourceDnsRead(d, meta, dns.TypeA) + if err != nil { + return err + } + + if len(answers) > 0 { + + var ttl sort.IntSlice + + addresses := schema.NewSet(hashIPString, nil) + for _, record := range answers { + addr, t, err := getAVal(record) + if err != nil { + return fmt.Errorf("Error querying DNS record: %s", err) + } + addresses.Add(addr) + ttl = append(ttl, t) + } + sort.Sort(ttl) + + d.Set("addresses", addresses) + d.Set("ttl", ttl[0]) + } else { + d.SetId("") + } + + return nil +} + +func resourceDnsARecordSetUpdate(d *schema.ResourceData, meta interface{}) error { + + if meta != nil { + + ttl := d.Get("ttl").(int) + rec_fqdn := resourceFQDN(d) + + msg := new(dns.Msg) + + msg.SetUpdate(d.Get("zone").(string)) + + if d.HasChange("addresses") { + o, n := d.GetChange("addresses") + os := o.(*schema.Set) + ns := n.(*schema.Set) + remove := os.Difference(ns).List() + add := ns.Difference(os).List() + + // Loop through all the old addresses and remove them + for _, addr := range remove { + rr_remove, _ := dns.NewRR(fmt.Sprintf("%s %d A %s", rec_fqdn, ttl, addr.(string))) + msg.Remove([]dns.RR{rr_remove}) + } + // Loop through all the new addresses and insert them + for _, addr := range add { + rr_insert, _ := dns.NewRR(fmt.Sprintf("%s %d A %s", rec_fqdn, ttl, addr.(string))) + msg.Insert([]dns.RR{rr_insert}) + } + + r, err := exchange(msg, true, meta) + if err != nil { + d.SetId("") + return fmt.Errorf("Error updating DNS record: %s", err) + } + if r.Rcode != dns.RcodeSuccess { + d.SetId("") + return fmt.Errorf("Error updating DNS record: %v (%s)", r.Rcode, dns.RcodeToString[r.Rcode]) + } + } + + return resourceDnsARecordSetRead(d, meta) + } else { + return fmt.Errorf("update server is not set") + } +} + +func resourceDnsARecordSetDelete(d *schema.ResourceData, meta interface{}) error { + + return resourceDnsDelete(d, meta, dns.TypeA) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-dns/dns/resource_dns_aaaa_record_set.go b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/resource_dns_aaaa_record_set.go new file mode 100644 index 00000000000..aafa63d99b6 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/resource_dns_aaaa_record_set.go @@ -0,0 +1,138 @@ +package dns + +import ( + "fmt" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/miekg/dns" +) + +func resourceDnsAAAARecordSet() *schema.Resource { + return &schema.Resource{ + Create: resourceDnsAAAARecordSetCreate, + Read: resourceDnsAAAARecordSetRead, + Update: resourceDnsAAAARecordSetUpdate, + Delete: resourceDnsAAAARecordSetDelete, + Importer: &schema.ResourceImporter{ + State: resourceDnsImport, + }, + + Schema: map[string]*schema.Schema{ + "zone": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateZone, + }, + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateName, + }, + "addresses": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: hashIPString, + }, + "ttl": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 3600, + }, + }, + } +} + +func resourceDnsAAAARecordSetCreate(d *schema.ResourceData, meta interface{}) error { + + d.SetId(resourceFQDN(d)) + + return resourceDnsAAAARecordSetUpdate(d, meta) +} + +func resourceDnsAAAARecordSetRead(d *schema.ResourceData, meta interface{}) error { + + answers, err := resourceDnsRead(d, meta, dns.TypeAAAA) + if err != nil { + return err + } + + if len(answers) > 0 { + + var ttl sort.IntSlice + + addresses := schema.NewSet(hashIPString, nil) + for _, record := range answers { + addr, t, err := getAAAAVal(record) + if err != nil { + return fmt.Errorf("Error querying DNS record: %s", err) + } + addresses.Add(addr) + ttl = append(ttl, t) + } + sort.Sort(ttl) + + d.Set("addresses", addresses) + d.Set("ttl", ttl[0]) + } else { + d.SetId("") + } + + return nil +} + +func resourceDnsAAAARecordSetUpdate(d *schema.ResourceData, meta interface{}) error { + + if meta != nil { + + ttl := d.Get("ttl").(int) + + rec_fqdn := resourceFQDN(d) + + msg := new(dns.Msg) + + msg.SetUpdate(d.Get("zone").(string)) + + if d.HasChange("addresses") { + o, n := d.GetChange("addresses") + os := o.(*schema.Set) + ns := n.(*schema.Set) + remove := os.Difference(ns).List() + add := ns.Difference(os).List() + + // Loop through all the old addresses and remove them + for _, addr := range remove { + rr_remove, _ := dns.NewRR(fmt.Sprintf("%s %d AAAA %s", rec_fqdn, ttl, addr.(string))) + msg.Remove([]dns.RR{rr_remove}) + } + // Loop through all the new addresses and insert them + for _, addr := range add { + rr_insert, _ := dns.NewRR(fmt.Sprintf("%s %d AAAA %s", rec_fqdn, ttl, addr.(string))) + msg.Insert([]dns.RR{rr_insert}) + } + + r, err := exchange(msg, true, meta) + if err != nil { + d.SetId("") + return fmt.Errorf("Error updating DNS record: %s", err) + } + if r.Rcode != dns.RcodeSuccess { + d.SetId("") + return fmt.Errorf("Error updating DNS record: %v (%s)", r.Rcode, dns.RcodeToString[r.Rcode]) + } + } + + return resourceDnsAAAARecordSetRead(d, meta) + } else { + return fmt.Errorf("update server is not set") + } +} + +func resourceDnsAAAARecordSetDelete(d *schema.ResourceData, meta interface{}) error { + + return resourceDnsDelete(d, meta, dns.TypeAAAA) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-dns/dns/resource_dns_cname_record.go b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/resource_dns_cname_record.go new file mode 100644 index 00000000000..f989adcec1b --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/resource_dns_cname_record.go @@ -0,0 +1,125 @@ +package dns + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/miekg/dns" +) + +func resourceDnsCnameRecord() *schema.Resource { + return &schema.Resource{ + Create: resourceDnsCnameRecordCreate, + Read: resourceDnsCnameRecordRead, + Update: resourceDnsCnameRecordUpdate, + Delete: resourceDnsCnameRecordDelete, + Importer: &schema.ResourceImporter{ + State: resourceDnsImport, + }, + + Schema: map[string]*schema.Schema{ + "zone": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateZone, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateName, + }, + "cname": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateZone, + }, + "ttl": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 3600, + }, + }, + } +} + +func resourceDnsCnameRecordCreate(d *schema.ResourceData, meta interface{}) error { + + d.SetId(resourceFQDN(d)) + + return resourceDnsCnameRecordUpdate(d, meta) +} + +func resourceDnsCnameRecordRead(d *schema.ResourceData, meta interface{}) error { + + answers, err := resourceDnsRead(d, meta, dns.TypeCNAME) + if err != nil { + return err + } + + if len(answers) > 0 { + + if len(answers) > 1 { + return fmt.Errorf("Error querying DNS record: multiple responses received") + } + record := answers[0] + cname, ttl, err := getCnameVal(record) + if err != nil { + return fmt.Errorf("Error querying DNS record: %s", err) + } + d.Set("cname", cname) + d.Set("ttl", ttl) + } else { + d.SetId("") + } + + return nil +} + +func resourceDnsCnameRecordUpdate(d *schema.ResourceData, meta interface{}) error { + + if meta != nil { + + ttl := d.Get("ttl").(int) + + rec_fqdn := resourceFQDN(d) + + msg := new(dns.Msg) + + msg.SetUpdate(d.Get("zone").(string)) + + if d.HasChange("cname") { + o, n := d.GetChange("cname") + + if o != "" { + rr_remove, _ := dns.NewRR(fmt.Sprintf("%s %d CNAME %s", rec_fqdn, ttl, o)) + msg.Remove([]dns.RR{rr_remove}) + } + if n != "" { + rr_insert, _ := dns.NewRR(fmt.Sprintf("%s %d CNAME %s", rec_fqdn, ttl, n)) + msg.Insert([]dns.RR{rr_insert}) + } + + r, err := exchange(msg, true, meta) + if err != nil { + d.SetId("") + return fmt.Errorf("Error updating DNS record: %s", err) + } + if r.Rcode != dns.RcodeSuccess { + d.SetId("") + return fmt.Errorf("Error updating DNS record: %v (%s)", r.Rcode, dns.RcodeToString[r.Rcode]) + } + } + + return resourceDnsCnameRecordRead(d, meta) + } else { + return fmt.Errorf("update server is not set") + } +} + +func resourceDnsCnameRecordDelete(d *schema.ResourceData, meta interface{}) error { + + return resourceDnsDelete(d, meta, dns.TypeCNAME) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-dns/dns/resource_dns_mx_record_set.go b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/resource_dns_mx_record_set.go new file mode 100644 index 00000000000..c80b8c62bea --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/resource_dns_mx_record_set.go @@ -0,0 +1,167 @@ +package dns + +import ( + "bytes" + "fmt" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/miekg/dns" +) + +func resourceDnsMXRecordSet() *schema.Resource { + return &schema.Resource{ + Create: resourceDnsMXRecordSetCreate, + Read: resourceDnsMXRecordSetRead, + Update: resourceDnsMXRecordSetUpdate, + Delete: resourceDnsMXRecordSetDelete, + Importer: &schema.ResourceImporter{ + State: resourceDnsImport, + }, + + Schema: map[string]*schema.Schema{ + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateZone, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateName, + }, + "mx": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "preference": { + Type: schema.TypeInt, + Required: true, + }, + "exchange": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateZone, + }, + }, + }, + Set: resourceDnsMXRecordSetHash, + }, + "ttl": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 3600, + }, + }, + } +} + +func resourceDnsMXRecordSetCreate(d *schema.ResourceData, meta interface{}) error { + + d.SetId(resourceFQDN(d)) + + return resourceDnsMXRecordSetUpdate(d, meta) +} + +func resourceDnsMXRecordSetRead(d *schema.ResourceData, meta interface{}) error { + + answers, err := resourceDnsRead(d, meta, dns.TypeMX) + if err != nil { + return err + } + + if len(answers) > 0 { + + var ttl sort.IntSlice + + mx := schema.NewSet(resourceDnsMXRecordSetHash, nil) + for _, record := range answers { + switch r := record.(type) { + case *dns.MX: + m := map[string]interface{}{ + "preference": int(r.Preference), + "exchange": r.Mx, + } + mx.Add(m) + ttl = append(ttl, int(r.Hdr.Ttl)) + default: + return fmt.Errorf("didn't get an MX record") + } + } + sort.Sort(ttl) + + d.Set("mx", mx) + d.Set("ttl", ttl[0]) + } else { + d.SetId("") + } + + return nil +} + +func resourceDnsMXRecordSetUpdate(d *schema.ResourceData, meta interface{}) error { + + if meta != nil { + + ttl := d.Get("ttl").(int) + fqdn := resourceFQDN(d) + + msg := new(dns.Msg) + + msg.SetUpdate(d.Get("zone").(string)) + + if d.HasChange("mx") { + o, n := d.GetChange("mx") + os := o.(*schema.Set) + ns := n.(*schema.Set) + remove := os.Difference(ns).List() + add := ns.Difference(os).List() + + // Loop through all the old addresses and remove them + for _, mx := range remove { + m := mx.(map[string]interface{}) + rr_remove, _ := dns.NewRR(fmt.Sprintf("%s %d MX %d %s", fqdn, ttl, m["preference"], m["exchange"])) + msg.Remove([]dns.RR{rr_remove}) + } + // Loop through all the new addresses and insert them + for _, mx := range add { + m := mx.(map[string]interface{}) + rr_insert, _ := dns.NewRR(fmt.Sprintf("%s %d MX %d %s", fqdn, ttl, m["preference"], m["exchange"])) + msg.Insert([]dns.RR{rr_insert}) + } + + r, err := exchange(msg, true, meta) + if err != nil { + d.SetId("") + return fmt.Errorf("Error updating DNS record: %s", err) + } + if r.Rcode != dns.RcodeSuccess { + d.SetId("") + return fmt.Errorf("Error updating DNS record: %v (%s)", r.Rcode, dns.RcodeToString[r.Rcode]) + } + } + + return resourceDnsMXRecordSetRead(d, meta) + } else { + return fmt.Errorf("update server is not set") + } +} + +func resourceDnsMXRecordSetDelete(d *schema.ResourceData, meta interface{}) error { + + return resourceDnsDelete(d, meta, dns.TypeMX) +} + +func resourceDnsMXRecordSetHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%d-", m["preference"].(int))) + buf.WriteString(fmt.Sprintf("%s-", m["exchange"].(string))) + + return hashcode.String(buf.String()) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-dns/dns/resource_dns_ns_record_set.go b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/resource_dns_ns_record_set.go new file mode 100644 index 00000000000..3ad594ca4af --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/resource_dns_ns_record_set.go @@ -0,0 +1,142 @@ +package dns + +import ( + "fmt" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/miekg/dns" +) + +func resourceDnsNSRecordSet() *schema.Resource { + return &schema.Resource{ + Create: resourceDnsNSRecordSetCreate, + Read: resourceDnsNSRecordSetRead, + Update: resourceDnsNSRecordSetUpdate, + Delete: resourceDnsNSRecordSetDelete, + Importer: &schema.ResourceImporter{ + State: resourceDnsImport, + }, + + Schema: map[string]*schema.Schema{ + "zone": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateZone, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateName, + }, + "nameservers": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateZone, + }, + Set: schema.HashString, + }, + "ttl": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 3600, + }, + }, + } +} + +func resourceDnsNSRecordSetCreate(d *schema.ResourceData, meta interface{}) error { + + d.SetId(resourceFQDN(d)) + + return resourceDnsNSRecordSetUpdate(d, meta) +} + +func resourceDnsNSRecordSetRead(d *schema.ResourceData, meta interface{}) error { + + answers, err := resourceDnsRead(d, meta, dns.TypeNS) + if err != nil { + return err + } + + if len(answers) > 0 { + + var ttl sort.IntSlice + + nameservers := schema.NewSet(schema.HashString, nil) + for _, record := range answers { + nameserver, t, err := getNSVal(record) + if err != nil { + return fmt.Errorf("Error querying DNS record: %s", err) + } + nameservers.Add(nameserver) + ttl = append(ttl, t) + } + sort.Sort(ttl) + + d.Set("nameservers", nameservers) + d.Set("ttl", ttl[0]) + } else { + d.SetId("") + } + + return nil + +} + +func resourceDnsNSRecordSetUpdate(d *schema.ResourceData, meta interface{}) error { + + if meta != nil { + + ttl := d.Get("ttl").(int) + + rec_fqdn := resourceFQDN(d) + + msg := new(dns.Msg) + + msg.SetUpdate(d.Get("zone").(string)) + + if d.HasChange("nameservers") { + o, n := d.GetChange("nameservers") + os := o.(*schema.Set) + ns := n.(*schema.Set) + remove := os.Difference(ns).List() + add := ns.Difference(os).List() + + // Loop through all the old nameservers and remove them + for _, nameserver := range remove { + rr_remove, _ := dns.NewRR(fmt.Sprintf("%s %d NS %s", rec_fqdn, ttl, nameserver.(string))) + msg.Remove([]dns.RR{rr_remove}) + } + // Loop through all the new nameservers and insert them + for _, nameserver := range add { + rr_insert, _ := dns.NewRR(fmt.Sprintf("%s %d NS %s", rec_fqdn, ttl, nameserver.(string))) + msg.Insert([]dns.RR{rr_insert}) + } + + r, err := exchange(msg, true, meta) + if err != nil { + d.SetId("") + return fmt.Errorf("Error updating DNS record: %s", err) + } + if r.Rcode != dns.RcodeSuccess { + d.SetId("") + return fmt.Errorf("Error updating DNS record: %v (%s)", r.Rcode, dns.RcodeToString[r.Rcode]) + } + } + + return resourceDnsNSRecordSetRead(d, meta) + } else { + return fmt.Errorf("update server is not set") + } +} + +func resourceDnsNSRecordSetDelete(d *schema.ResourceData, meta interface{}) error { + + return resourceDnsDelete(d, meta, dns.TypeNS) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-dns/dns/resource_dns_ptr_record.go b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/resource_dns_ptr_record.go new file mode 100644 index 00000000000..3440ecb7486 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/resource_dns_ptr_record.go @@ -0,0 +1,125 @@ +package dns + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/miekg/dns" +) + +func resourceDnsPtrRecord() *schema.Resource { + return &schema.Resource{ + Create: resourceDnsPtrRecordCreate, + Read: resourceDnsPtrRecordRead, + Update: resourceDnsPtrRecordUpdate, + Delete: resourceDnsPtrRecordDelete, + Importer: &schema.ResourceImporter{ + State: resourceDnsImport, + }, + + Schema: map[string]*schema.Schema{ + "zone": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateZone, + }, + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateName, + }, + "ptr": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateZone, + }, + "ttl": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 3600, + }, + }, + } +} + +func resourceDnsPtrRecordCreate(d *schema.ResourceData, meta interface{}) error { + + d.SetId(resourceFQDN(d)) + + return resourceDnsPtrRecordUpdate(d, meta) +} + +func resourceDnsPtrRecordRead(d *schema.ResourceData, meta interface{}) error { + + answers, err := resourceDnsRead(d, meta, dns.TypePTR) + if err != nil { + return err + } + + if len(answers) > 0 { + + if len(answers) > 1 { + return fmt.Errorf("Error querying DNS record: multiple responses received") + } + record := answers[0] + ptr, ttl, err := getPtrVal(record) + if err != nil { + return fmt.Errorf("Error querying DNS record: %s", err) + } + d.Set("ptr", ptr) + d.Set("ttl", ttl) + } else { + d.SetId("") + } + + return nil +} + +func resourceDnsPtrRecordUpdate(d *schema.ResourceData, meta interface{}) error { + + if meta != nil { + + ttl := d.Get("ttl").(int) + + rec_fqdn := resourceFQDN(d) + + msg := new(dns.Msg) + + msg.SetUpdate(d.Get("zone").(string)) + + if d.HasChange("ptr") { + o, n := d.GetChange("ptr") + + if o != "" { + rr_remove, _ := dns.NewRR(fmt.Sprintf("%s %d PTR %s", rec_fqdn, ttl, o)) + msg.Remove([]dns.RR{rr_remove}) + } + if n != "" { + rr_insert, _ := dns.NewRR(fmt.Sprintf("%s %d PTR %s", rec_fqdn, ttl, n)) + msg.Insert([]dns.RR{rr_insert}) + } + + r, err := exchange(msg, true, meta) + if err != nil { + d.SetId("") + return fmt.Errorf("Error updating DNS record: %s", err) + } + if r.Rcode != dns.RcodeSuccess { + d.SetId("") + return fmt.Errorf("Error updating DNS record: %v (%s)", r.Rcode, dns.RcodeToString[r.Rcode]) + } + } + + return resourceDnsPtrRecordRead(d, meta) + } else { + return fmt.Errorf("update server is not set") + } +} + +func resourceDnsPtrRecordDelete(d *schema.ResourceData, meta interface{}) error { + + return resourceDnsDelete(d, meta, dns.TypePTR) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-dns/dns/resource_dns_srv_record_set.go b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/resource_dns_srv_record_set.go new file mode 100644 index 00000000000..ded15ecf826 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/resource_dns_srv_record_set.go @@ -0,0 +1,179 @@ +package dns + +import ( + "bytes" + "fmt" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/miekg/dns" +) + +func resourceDnsSRVRecordSet() *schema.Resource { + return &schema.Resource{ + Create: resourceDnsSRVRecordSetCreate, + Read: resourceDnsSRVRecordSetRead, + Update: resourceDnsSRVRecordSetUpdate, + Delete: resourceDnsSRVRecordSetDelete, + Importer: &schema.ResourceImporter{ + State: resourceDnsImport, + }, + + Schema: map[string]*schema.Schema{ + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateZone, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateName, + }, + "srv": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "priority": { + Type: schema.TypeInt, + Required: true, + }, + "weight": { + Type: schema.TypeInt, + Required: true, + }, + "port": { + Type: schema.TypeInt, + Required: true, + }, + "target": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateZone, + }, + }, + }, + Set: resourceDnsSRVRecordSetHash, + }, + "ttl": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 3600, + }, + }, + } +} + +func resourceDnsSRVRecordSetCreate(d *schema.ResourceData, meta interface{}) error { + + d.SetId(resourceFQDN(d)) + + return resourceDnsSRVRecordSetUpdate(d, meta) +} + +func resourceDnsSRVRecordSetRead(d *schema.ResourceData, meta interface{}) error { + + answers, err := resourceDnsRead(d, meta, dns.TypeSRV) + if err != nil { + return err + } + + if len(answers) > 0 { + + var ttl sort.IntSlice + + srv := schema.NewSet(resourceDnsSRVRecordSetHash, nil) + for _, record := range answers { + switch r := record.(type) { + case *dns.SRV: + s := map[string]interface{}{ + "priority": int(r.Priority), + "weight": int(r.Weight), + "port": int(r.Port), + "target": r.Target, + } + srv.Add(s) + ttl = append(ttl, int(r.Hdr.Ttl)) + default: + return fmt.Errorf("didn't get an SRV record") + } + } + sort.Sort(ttl) + + d.Set("srv", srv) + d.Set("ttl", ttl[0]) + } else { + d.SetId("") + } + + return nil +} + +func resourceDnsSRVRecordSetUpdate(d *schema.ResourceData, meta interface{}) error { + + if meta != nil { + + ttl := d.Get("ttl").(int) + fqdn := resourceFQDN(d) + + msg := new(dns.Msg) + + msg.SetUpdate(d.Get("zone").(string)) + + if d.HasChange("srv") { + o, n := d.GetChange("srv") + os := o.(*schema.Set) + ns := n.(*schema.Set) + remove := os.Difference(ns).List() + add := ns.Difference(os).List() + + // Loop through all the old addresses and remove them + for _, srv := range remove { + s := srv.(map[string]interface{}) + rr_remove, _ := dns.NewRR(fmt.Sprintf("%s %d SRV %d %d %d %s", fqdn, ttl, s["priority"], s["weight"], s["port"], s["target"])) + msg.Remove([]dns.RR{rr_remove}) + } + // Loop through all the new addresses and insert them + for _, srv := range add { + s := srv.(map[string]interface{}) + rr_insert, _ := dns.NewRR(fmt.Sprintf("%s %d SRV %d %d %d %s", fqdn, ttl, s["priority"], s["weight"], s["port"], s["target"])) + msg.Insert([]dns.RR{rr_insert}) + } + + r, err := exchange(msg, true, meta) + if err != nil { + d.SetId("") + return fmt.Errorf("Error updating DNS record: %s", err) + } + if r.Rcode != dns.RcodeSuccess { + d.SetId("") + return fmt.Errorf("Error updating DNS record: %v (%s)", r.Rcode, dns.RcodeToString[r.Rcode]) + } + } + + return resourceDnsSRVRecordSetRead(d, meta) + } else { + return fmt.Errorf("update server is not set") + } +} + +func resourceDnsSRVRecordSetDelete(d *schema.ResourceData, meta interface{}) error { + + return resourceDnsDelete(d, meta, dns.TypeSRV) +} + +func resourceDnsSRVRecordSetHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%d-", m["priority"].(int))) + buf.WriteString(fmt.Sprintf("%d-", m["weight"].(int))) + buf.WriteString(fmt.Sprintf("%d-", m["port"].(int))) + buf.WriteString(fmt.Sprintf("%s-", m["target"].(string))) + + return hashcode.String(buf.String()) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-dns/dns/resource_dns_txt_record_set.go b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/resource_dns_txt_record_set.go new file mode 100644 index 00000000000..acc742ce055 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/resource_dns_txt_record_set.go @@ -0,0 +1,139 @@ +package dns + +import ( + "fmt" + "sort" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/miekg/dns" +) + +func resourceDnsTXTRecordSet() *schema.Resource { + return &schema.Resource{ + Create: resourceDnsTXTRecordSetCreate, + Read: resourceDnsTXTRecordSetRead, + Update: resourceDnsTXTRecordSetUpdate, + Delete: resourceDnsTXTRecordSetDelete, + Importer: &schema.ResourceImporter{ + State: resourceDnsImport, + }, + + Schema: map[string]*schema.Schema{ + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateZone, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateName, + }, + "txt": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "ttl": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 3600, + }, + }, + } +} + +func resourceDnsTXTRecordSetCreate(d *schema.ResourceData, meta interface{}) error { + + d.SetId(resourceFQDN(d)) + + return resourceDnsTXTRecordSetUpdate(d, meta) +} + +func resourceDnsTXTRecordSetRead(d *schema.ResourceData, meta interface{}) error { + + answers, err := resourceDnsRead(d, meta, dns.TypeTXT) + if err != nil { + return err + } + + if len(answers) > 0 { + + var ttl sort.IntSlice + + txt := schema.NewSet(schema.HashString, nil) + for _, record := range answers { + switch r := record.(type) { + case *dns.TXT: + txt.Add(strings.Join(r.Txt, "")) + ttl = append(ttl, int(r.Hdr.Ttl)) + default: + return fmt.Errorf("didn't get an TXT record") + } + } + sort.Sort(ttl) + + d.Set("txt", txt) + d.Set("ttl", ttl[0]) + } else { + d.SetId("") + } + + return nil +} + +func resourceDnsTXTRecordSetUpdate(d *schema.ResourceData, meta interface{}) error { + + if meta != nil { + + ttl := d.Get("ttl").(int) + fqdn := resourceFQDN(d) + + msg := new(dns.Msg) + + msg.SetUpdate(d.Get("zone").(string)) + + if d.HasChange("txt") { + o, n := d.GetChange("txt") + os := o.(*schema.Set) + ns := n.(*schema.Set) + remove := os.Difference(ns).List() + add := ns.Difference(os).List() + + // Loop through all the old addresses and remove them + for _, txt := range remove { + rr_remove, _ := dns.NewRR(fmt.Sprintf("%s %d TXT \"%s\"", fqdn, ttl, txt)) + msg.Remove([]dns.RR{rr_remove}) + } + // Loop through all the new addresses and insert them + for _, txt := range add { + rr_insert, _ := dns.NewRR(fmt.Sprintf("%s %d TXT \"%s\"", fqdn, ttl, txt)) + msg.Insert([]dns.RR{rr_insert}) + } + + r, err := exchange(msg, true, meta) + if err != nil { + d.SetId("") + return fmt.Errorf("Error updating DNS record: %s", err) + } + if r.Rcode != dns.RcodeSuccess { + d.SetId("") + return fmt.Errorf("Error updating DNS record: %v (%s)", r.Rcode, dns.RcodeToString[r.Rcode]) + } + } + + return resourceDnsTXTRecordSetRead(d, meta) + } else { + return fmt.Errorf("update server is not set") + } +} + +func resourceDnsTXTRecordSetDelete(d *schema.ResourceData, meta interface{}) error { + + return resourceDnsDelete(d, meta, dns.TypeTXT) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-dns/dns/test_check_attr_string_array.go b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/test_check_attr_string_array.go new file mode 100644 index 00000000000..f6c7de167aa --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/test_check_attr_string_array.go @@ -0,0 +1,55 @@ +package dns + +import ( + "fmt" + "strconv" + + r "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +func testCheckAttrStringArray(name, key string, value []string) r.TestCheckFunc { + return func(s *terraform.State) error { + ms := s.RootModule() + rs, ok := ms.Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + is := rs.Primary + if is == nil { + return fmt.Errorf("No primary instance: %s", name) + } + + attrKey := fmt.Sprintf("%s.#", key) + count, ok := is.Attributes[attrKey] + if !ok { + return fmt.Errorf("Attributes not found for %s", attrKey) + } + + gotCount, _ := strconv.Atoi(count) + if gotCount != len(value) { + return fmt.Errorf("Mismatch array count for %s: got %s, wanted %d", key, count, len(value)) + } + + Next: + for i := 0; i < gotCount; i++ { + attrKey = fmt.Sprintf("%s.%d", key, i) + got, ok := is.Attributes[attrKey] + if !ok { + return fmt.Errorf("Missing array item for %s", attrKey) + } + for _, want := range value { + if got == want { + continue Next + } + } + return fmt.Errorf( + "Unexpected array item for %s: got %s", + attrKey, + got) + } + + return nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-dns/dns/test_check_attr_string_array_member.go b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/test_check_attr_string_array_member.go new file mode 100644 index 00000000000..0f81b86cfb5 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/test_check_attr_string_array_member.go @@ -0,0 +1,39 @@ +package dns + +import ( + "fmt" + + r "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +func testCheckAttrStringArrayMember(name, key string, value []string) r.TestCheckFunc { + return func(s *terraform.State) error { + ms := s.RootModule() + rs, ok := ms.Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + is := rs.Primary + if is == nil { + return fmt.Errorf("No primary instance: %s", name) + } + + got, ok := is.Attributes[key] + if !ok { + return fmt.Errorf("Attributes not found for %s", key) + } + + for _, want := range value { + if got == want { + return nil + } + } + + return fmt.Errorf( + "Unexpected value for %s: got %s", + key, + got) + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-dns/dns/validators.go b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/validators.go new file mode 100644 index 00000000000..6df4bd26591 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-dns/dns/validators.go @@ -0,0 +1,30 @@ +package dns + +import ( + "fmt" + "strings" + + "github.com/miekg/dns" +) + +func validateZone(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if strings.TrimSpace(value) != value { + errors = append(errors, fmt.Errorf("DNS zone name %q must not contain whitespace: %q", k, value)) + } + if !dns.IsFqdn(value) { + errors = append(errors, fmt.Errorf("DNS zone name %q must be fully qualified: %q", k, value)) + } + return +} + +func validateName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if strings.TrimSpace(value) != value || len(value) == 0 { + errors = append(errors, fmt.Errorf("DNS record name %q must not contain whitespace or be empty: %q", k, value)) + } + if dns.IsFqdn(value) { + errors = append(errors, fmt.Errorf("DNS record name %q must not be fully qualified: %q", k, value)) + } + return +} diff --git a/vendor/go.opencensus.io/Makefile b/vendor/go.opencensus.io/Makefile index 457866cb1f2..b3ce3df3032 100644 --- a/vendor/go.opencensus.io/Makefile +++ b/vendor/go.opencensus.io/Makefile @@ -8,7 +8,7 @@ ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC)))) GOTEST_OPT?=-v -race -timeout 30s GOTEST_OPT_WITH_COVERAGE = $(GOTEST_OPT) -coverprofile=coverage.txt -covermode=atomic GOTEST=go test -GOFMT=gofmt +GOIMPORTS=goimports GOLINT=golint GOVET=go vet EMBEDMD=embedmd @@ -17,14 +17,14 @@ TRACE_ID_LINT_EXCEPTION="type name will be used as trace.TraceID by other packag TRACE_OPTION_LINT_EXCEPTION="type name will be used as trace.TraceOptions by other packages" README_FILES := $(shell find . -name '*README.md' | sort | tr '\n' ' ') -.DEFAULT_GOAL := fmt-lint-vet-embedmd-test +.DEFAULT_GOAL := imports-lint-vet-embedmd-test -.PHONY: fmt-lint-vet-embedmd-test -fmt-lint-vet-embedmd-test: fmt lint vet embedmd test +.PHONY: imports-lint-vet-embedmd-test +imports-lint-vet-embedmd-test: imports lint vet embedmd test # TODO enable test-with-coverage in tavis .PHONY: travis-ci -travis-ci: fmt lint vet embedmd test test-386 +travis-ci: imports lint vet embedmd test test-386 all-pkgs: @echo $(ALL_PKGS) | tr ' ' '\n' | sort @@ -44,15 +44,15 @@ test-386: test-with-coverage: $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS) -.PHONY: fmt -fmt: - @FMTOUT=`$(GOFMT) -s -l $(ALL_SRC) 2>&1`; \ - if [ "$$FMTOUT" ]; then \ - echo "$(GOFMT) FAILED => gofmt the following files:\n"; \ - echo "$$FMTOUT\n"; \ +.PHONY: imports +imports: + @IMPORTSOUT=`$(GOIMPORTS) -l $(ALL_SRC) 2>&1`; \ + if [ "$$IMPORTSOUT" ]; then \ + echo "$(GOIMPORTS) FAILED => goimports the following files:\n"; \ + echo "$$IMPORTSOUT\n"; \ exit 1; \ else \ - echo "Fmt finished successfully"; \ + echo "Imports finished successfully"; \ fi .PHONY: lint @@ -91,6 +91,7 @@ embedmd: .PHONY: install-tools install-tools: - go get -u golang.org/x/tools/cmd/cover go get -u golang.org/x/lint/golint + go get -u golang.org/x/tools/cmd/cover + go get -u golang.org/x/tools/cmd/goimports go get -u github.com/rakyll/embedmd diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client.go b/vendor/go.opencensus.io/plugin/ocgrpc/client.go index 28fddb84407..2063b6f76a1 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client.go @@ -16,8 +16,8 @@ package ocgrpc import ( "context" - "go.opencensus.io/trace" + "go.opencensus.io/trace" "google.golang.org/grpc/stats" ) diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server.go b/vendor/go.opencensus.io/plugin/ocgrpc/server.go index 15ada839d6f..8a53e097274 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server.go @@ -16,9 +16,10 @@ package ocgrpc import ( "context" - "go.opencensus.io/trace" "google.golang.org/grpc/stats" + + "go.opencensus.io/trace" ) // ServerHandler implements gRPC stats.Handler recording OpenCensus stats and diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go index fef58275662..61bc543d0a2 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go @@ -15,16 +15,16 @@ package ocgrpc import ( + "context" "strings" "google.golang.org/grpc/codes" - - "context" - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" ) const traceContextKey = "grpc-trace-bin" diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go index 2f1c7f0063e..9ad8852198d 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go +++ b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go @@ -68,7 +68,7 @@ func ParseTraceID(tid string) (trace.TraceID, bool) { return trace.TraceID{}, false } b, err := hex.DecodeString(tid) - if err != nil { + if err != nil || len(b) > 16 { return trace.TraceID{}, false } var traceID trace.TraceID @@ -90,7 +90,7 @@ func ParseSpanID(sid string) (spanID trace.SpanID, ok bool) { return trace.SpanID{}, false } b, err := hex.DecodeString(sid) - if err != nil { + if err != nil || len(b) > 8 { return trace.SpanID{}, false } start := 8 - len(b) diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go index ad4691184df..2b97283462e 100644 --- a/vendor/go.opencensus.io/stats/record.go +++ b/vendor/go.opencensus.io/stats/record.go @@ -31,10 +31,19 @@ func init() { } } +// Recorder provides an interface for exporting measurement information from +// the static Record method by using the WithRecorder option. +type Recorder interface { + // Record records a set of measurements associated with the given tags and attachments. + // The second argument is a `[]Measurement`. + Record(*tag.Map, interface{}, map[string]interface{}) +} + type recordOptions struct { attachments metricdata.Attachments mutators []tag.Mutator measurements []Measurement + recorder Recorder } // WithAttachments applies provided exemplar attachments. @@ -58,6 +67,14 @@ func WithMeasurements(measurements ...Measurement) Options { } } +// WithRecorder records the measurements to the specified `Recorder`, rather +// than to the global metrics recorder. +func WithRecorder(meter Recorder) Options { + return func(ro *recordOptions) { + ro.recorder = meter + } +} + // Options apply changes to recordOptions. type Options func(*recordOptions) @@ -93,6 +110,9 @@ func RecordWithOptions(ctx context.Context, ros ...Options) error { return nil } recorder := internal.DefaultRecorder + if o.recorder != nil { + recorder = o.recorder.Record + } if recorder == nil { return nil } diff --git a/vendor/go.opencensus.io/stats/view/export.go b/vendor/go.opencensus.io/stats/view/export.go index 7cb59718f5f..73ba11f5b6e 100644 --- a/vendor/go.opencensus.io/stats/view/export.go +++ b/vendor/go.opencensus.io/stats/view/export.go @@ -14,13 +14,6 @@ package view -import "sync" - -var ( - exportersMu sync.RWMutex // guards exporters - exporters = make(map[Exporter]struct{}) -) - // Exporter exports the collected records as view data. // // The ExportView method should return quickly; if an @@ -43,16 +36,10 @@ type Exporter interface { // // Binaries can register exporters, libraries shouldn't register exporters. func RegisterExporter(e Exporter) { - exportersMu.Lock() - defer exportersMu.Unlock() - - exporters[e] = struct{}{} + defaultWorker.RegisterExporter(e) } // UnregisterExporter unregisters an exporter. func UnregisterExporter(e Exporter) { - exportersMu.Lock() - defer exportersMu.Unlock() - - delete(exporters, e) + defaultWorker.UnregisterExporter(e) } diff --git a/vendor/go.opencensus.io/stats/view/view_to_metric.go b/vendor/go.opencensus.io/stats/view/view_to_metric.go index 293c1646df2..5e1656a1f2b 100644 --- a/vendor/go.opencensus.io/stats/view/view_to_metric.go +++ b/vendor/go.opencensus.io/stats/view/view_to_metric.go @@ -18,6 +18,8 @@ package view import ( "time" + "go.opencensus.io/resource" + "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats" ) @@ -125,7 +127,7 @@ func rowToTimeseries(v *viewInternal, row *Row, now time.Time, startTime time.Ti } } -func viewToMetric(v *viewInternal, now time.Time, startTime time.Time) *metricdata.Metric { +func viewToMetric(v *viewInternal, r *resource.Resource, now time.Time, startTime time.Time) *metricdata.Metric { if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { startTime = time.Time{} @@ -144,6 +146,7 @@ func viewToMetric(v *viewInternal, now time.Time, startTime time.Time) *metricda m := &metricdata.Metric{ Descriptor: *v.metricDescriptor, TimeSeries: ts, + Resource: r, } return m } diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go index 2f3c018af0e..ab8bfd46d0b 100644 --- a/vendor/go.opencensus.io/stats/view/worker.go +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -20,6 +20,8 @@ import ( "sync" "time" + "go.opencensus.io/resource" + "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricproducer" "go.opencensus.io/stats" @@ -28,7 +30,7 @@ import ( ) func init() { - defaultWorker = newWorker() + defaultWorker = NewMeter().(*worker) go defaultWorker.start() internal.DefaultRecorder = record } @@ -47,8 +49,69 @@ type worker struct { c chan command quit, done chan bool mu sync.RWMutex + r *resource.Resource + + exportersMu sync.RWMutex + exporters map[Exporter]struct{} +} + +// Meter defines an interface which allows a single process to maintain +// multiple sets of metrics exports (intended for the advanced case where a +// single process wants to report metrics about multiple objects, such as +// multiple databases or HTTP services). +// +// Note that this is an advanced use case, and the static functions in this +// module should cover the common use cases. +type Meter interface { + stats.Recorder + // Find returns a registered view associated with this name. + // If no registered view is found, nil is returned. + Find(name string) *View + // Register begins collecting data for the given views. + // Once a view is registered, it reports data to the registered exporters. + Register(views ...*View) error + // Unregister the given views. Data will not longer be exported for these views + // after Unregister returns. + // It is not necessary to unregister from views you expect to collect for the + // duration of your program execution. + Unregister(views ...*View) + // SetReportingPeriod sets the interval between reporting aggregated views in + // the program. If duration is less than or equal to zero, it enables the + // default behavior. + // + // Note: each exporter makes different promises about what the lowest supported + // duration is. For example, the Stackdriver exporter recommends a value no + // lower than 1 minute. Consult each exporter per your needs. + SetReportingPeriod(time.Duration) + + // RegisterExporter registers an exporter. + // Collected data will be reported via all the + // registered exporters. Once you no longer + // want data to be exported, invoke UnregisterExporter + // with the previously registered exporter. + // + // Binaries can register exporters, libraries shouldn't register exporters. + RegisterExporter(Exporter) + // UnregisterExporter unregisters an exporter. + UnregisterExporter(Exporter) + // SetResource may be used to set the Resource associated with this registry. + // This is intended to be used in cases where a single process exports metrics + // for multiple Resources, typically in a multi-tenant situation. + SetResource(*resource.Resource) + + // Start causes the Meter to start processing Record calls and aggregating + // statistics as well as exporting data. + Start() + // Stop causes the Meter to stop processing calls and terminate data export. + Stop() + + // RetrieveData gets a snapshot of the data collected for the the view registered + // with the given name. It is intended for testing only. + RetrieveData(viewName string) ([]*Row, error) } +var _ Meter = (*worker)(nil) + var defaultWorker *worker var defaultReportingDuration = 10 * time.Second @@ -56,11 +119,17 @@ var defaultReportingDuration = 10 * time.Second // Find returns a registered view associated with this name. // If no registered view is found, nil is returned. func Find(name string) (v *View) { + return defaultWorker.Find(name) +} + +// Find returns a registered view associated with this name. +// If no registered view is found, nil is returned. +func (w *worker) Find(name string) (v *View) { req := &getViewByNameReq{ name: name, c: make(chan *getViewByNameResp), } - defaultWorker.c <- req + w.c <- req resp := <-req.c return resp.v } @@ -68,11 +137,17 @@ func Find(name string) (v *View) { // Register begins collecting data for the given views. // Once a view is registered, it reports data to the registered exporters. func Register(views ...*View) error { + return defaultWorker.Register(views...) +} + +// Register begins collecting data for the given views. +// Once a view is registered, it reports data to the registered exporters. +func (w *worker) Register(views ...*View) error { req := ®isterViewReq{ views: views, err: make(chan error), } - defaultWorker.c <- req + w.c <- req return <-req.err } @@ -81,6 +156,14 @@ func Register(views ...*View) error { // It is not necessary to unregister from views you expect to collect for the // duration of your program execution. func Unregister(views ...*View) { + defaultWorker.Unregister(views...) +} + +// Unregister the given views. Data will not longer be exported for these views +// after Unregister returns. +// It is not necessary to unregister from views you expect to collect for the +// duration of your program execution. +func (w *worker) Unregister(views ...*View) { names := make([]string, len(views)) for i := range views { names[i] = views[i].Name @@ -89,31 +172,42 @@ func Unregister(views ...*View) { views: names, done: make(chan struct{}), } - defaultWorker.c <- req + w.c <- req <-req.done } // RetrieveData gets a snapshot of the data collected for the the view registered // with the given name. It is intended for testing only. func RetrieveData(viewName string) ([]*Row, error) { + return defaultWorker.RetrieveData(viewName) +} + +// RetrieveData gets a snapshot of the data collected for the the view registered +// with the given name. It is intended for testing only. +func (w *worker) RetrieveData(viewName string) ([]*Row, error) { req := &retrieveDataReq{ now: time.Now(), v: viewName, c: make(chan *retrieveDataResp), } - defaultWorker.c <- req + w.c <- req resp := <-req.c return resp.rows, resp.err } func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { + defaultWorker.Record(tags, ms, attachments) +} + +// Record records a set of measurements ms associated with the given tags and attachments. +func (w *worker) Record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { req := &recordReq{ tm: tags, ms: ms.([]stats.Measurement), attachments: attachments, t: time.Now(), } - defaultWorker.c <- req + w.c <- req } // SetReportingPeriod sets the interval between reporting aggregated views in @@ -124,17 +218,31 @@ func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { // duration is. For example, the Stackdriver exporter recommends a value no // lower than 1 minute. Consult each exporter per your needs. func SetReportingPeriod(d time.Duration) { + defaultWorker.SetReportingPeriod(d) +} + +// SetReportingPeriod sets the interval between reporting aggregated views in +// the program. If duration is less than or equal to zero, it enables the +// default behavior. +// +// Note: each exporter makes different promises about what the lowest supported +// duration is. For example, the Stackdriver exporter recommends a value no +// lower than 1 minute. Consult each exporter per your needs. +func (w *worker) SetReportingPeriod(d time.Duration) { // TODO(acetechnologist): ensure that the duration d is more than a certain // value. e.g. 1s req := &setReportingPeriodReq{ d: d, c: make(chan bool), } - defaultWorker.c <- req + w.c <- req <-req.c // don't return until the timer is set to the new duration. } -func newWorker() *worker { +// NewMeter constructs a Meter instance. You should only need to use this if +// you need to separate out Measurement recordings and View aggregations within +// a single process. +func NewMeter() Meter { return &worker{ measures: make(map[string]*measureRef), views: make(map[string]*viewInternal), @@ -143,9 +251,23 @@ func newWorker() *worker { c: make(chan command, 1024), quit: make(chan bool), done: make(chan bool), + + exporters: make(map[Exporter]struct{}), } } +// SetResource associates all data collected by this Meter with the specified +// resource. This resource is reported when using metricexport.ReadAndExport; +// it is not provided when used with ExportView/RegisterExporter, because that +// interface does not provide a means for reporting the Resource. +func (w *worker) SetResource(r *resource.Resource) { + w.r = r +} + +func (w *worker) Start() { + go w.start() +} + func (w *worker) start() { prodMgr := metricproducer.GlobalManager() prodMgr.AddProducer(w) @@ -155,7 +277,7 @@ func (w *worker) start() { case cmd := <-w.c: cmd.handleCommand(w) case <-w.timer.C: - w.reportUsage(time.Now()) + w.reportUsage() case <-w.quit: w.timer.Stop() close(w.c) @@ -165,7 +287,7 @@ func (w *worker) start() { } } -func (w *worker) stop() { +func (w *worker) Stop() { prodMgr := metricproducer.GlobalManager() prodMgr.DeleteProducer(w) @@ -202,44 +324,45 @@ func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { return x, nil } w.views[vi.view.Name] = vi + w.startTimes[vi] = time.Now() ref := w.getMeasureRef(vi.view.Measure.Name()) ref.views[vi] = struct{}{} return vi, nil } -func (w *worker) unregisterView(viewName string) { +func (w *worker) unregisterView(v *viewInternal) { w.mu.Lock() defer w.mu.Unlock() - delete(w.views, viewName) + delete(w.views, v.view.Name) + delete(w.startTimes, v) + if measure := w.measures[v.view.Measure.Name()]; measure != nil { + delete(measure.views, v) + } } -func (w *worker) reportView(v *viewInternal, now time.Time) { +func (w *worker) reportView(v *viewInternal) { if !v.isSubscribed() { return } rows := v.collectedRows() - _, ok := w.startTimes[v] - if !ok { - w.startTimes[v] = now - } viewData := &Data{ View: v.view, Start: w.startTimes[v], End: time.Now(), Rows: rows, } - exportersMu.Lock() - for e := range exporters { + w.exportersMu.Lock() + defer w.exportersMu.Unlock() + for e := range w.exporters { e.ExportView(viewData) } - exportersMu.Unlock() } -func (w *worker) reportUsage(now time.Time) { +func (w *worker) reportUsage() { w.mu.Lock() defer w.mu.Unlock() for _, v := range w.views { - w.reportView(v, now) + w.reportView(v) } } @@ -248,11 +371,6 @@ func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric { return nil } - _, ok := w.startTimes[v] - if !ok { - w.startTimes[v] = now - } - var startTime time.Time if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { @@ -261,7 +379,7 @@ func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric { startTime = w.startTimes[v] } - return viewToMetric(v, now, startTime) + return viewToMetric(v, w.r, now, startTime) } // Read reads all view data and returns them as metrics. @@ -279,3 +397,17 @@ func (w *worker) Read() []*metricdata.Metric { } return metrics } + +func (w *worker) RegisterExporter(e Exporter) { + w.exportersMu.Lock() + defer w.exportersMu.Unlock() + + w.exporters[e] = struct{}{} +} + +func (w *worker) UnregisterExporter(e Exporter) { + w.exportersMu.Lock() + defer w.exportersMu.Unlock() + + delete(w.exporters, e) +} diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go index 0267e179aed..9ac4cc05992 100644 --- a/vendor/go.opencensus.io/stats/view/worker_commands.go +++ b/vendor/go.opencensus.io/stats/view/worker_commands.go @@ -95,7 +95,7 @@ func (cmd *unregisterFromViewReq) handleCommand(w *worker) { } // Report pending data for this view before removing it. - w.reportView(vi, time.Now()) + w.reportView(vi) vi.unsubscribe() if !vi.isSubscribed() { @@ -103,7 +103,7 @@ func (cmd *unregisterFromViewReq) handleCommand(w *worker) { // The collected data can be cleared. vi.clearRows() } - w.unregisterView(name) + w.unregisterView(vi) } cmd.done <- struct{}{} } @@ -163,7 +163,7 @@ func (cmd *recordReq) handleCommand(w *worker) { } ref := w.getMeasureRef(m.Measure().Name()) for v := range ref.views { - v.addSample(cmd.tm, m.Value(), cmd.attachments, time.Now()) + v.addSample(cmd.tm, m.Value(), cmd.attachments, cmd.t) } } } diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go index dc7a295c773..908c2497ed5 100644 --- a/vendor/go.opencensus.io/trace/lrumap.go +++ b/vendor/go.opencensus.io/trace/lrumap.go @@ -44,7 +44,7 @@ func (lm lruMap) len() int { } func (lm lruMap) keys() []interface{} { - keys := []interface{}{} + keys := make([]interface{}, len(lm.cacheKeys)) for k := range lm.cacheKeys { keys = append(keys, k) } diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go index 3f8977b41b4..125e2cd9012 100644 --- a/vendor/go.opencensus.io/trace/trace.go +++ b/vendor/go.opencensus.io/trace/trace.go @@ -345,7 +345,7 @@ func (s *Span) SetStatus(status Status) { } func (s *Span) interfaceArrayToLinksArray() []Link { - linksArr := make([]Link, 0) + linksArr := make([]Link, 0, len(s.links.queue)) for _, value := range s.links.queue { linksArr = append(linksArr, value.(Link)) } @@ -353,7 +353,7 @@ func (s *Span) interfaceArrayToLinksArray() []Link { } func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent { - messageEventArr := make([]MessageEvent, 0) + messageEventArr := make([]MessageEvent, 0, len(s.messageEvents.queue)) for _, value := range s.messageEvents.queue { messageEventArr = append(messageEventArr, value.(MessageEvent)) } @@ -361,7 +361,7 @@ func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent { } func (s *Span) interfaceArrayToAnnotationArray() []Annotation { - annotationArr := make([]Annotation, 0) + annotationArr := make([]Annotation, 0, len(s.annotations.queue)) for _, value := range s.annotations.queue { annotationArr = append(annotationArr, value.(Annotation)) } @@ -369,7 +369,7 @@ func (s *Span) interfaceArrayToAnnotationArray() []Annotation { } func (s *Span) lruAttributesToAttributeMap() map[string]interface{} { - attributes := make(map[string]interface{}) + attributes := make(map[string]interface{}, s.lruAttributes.len()) for _, key := range s.lruAttributes.keys() { value, ok := s.lruAttributes.get(key) if ok { @@ -420,7 +420,7 @@ func (s *Span) lazyPrintfInternal(attributes []Attribute, format string, a ...in var m map[string]interface{} s.mu.Lock() if len(attributes) != 0 { - m = make(map[string]interface{}) + m = make(map[string]interface{}, len(attributes)) copyAttributes(m, attributes) } s.annotations.add(Annotation{ @@ -436,7 +436,7 @@ func (s *Span) printStringInternal(attributes []Attribute, str string) { var a map[string]interface{} s.mu.Lock() if len(attributes) != 0 { - a = make(map[string]interface{}) + a = make(map[string]interface{}, len(attributes)) copyAttributes(a, attributes) } s.annotations.add(Annotation{ diff --git a/vendor/golang.org/x/net/bpf/asm.go b/vendor/golang.org/x/net/bpf/asm.go new file mode 100644 index 00000000000..15e21b18122 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/asm.go @@ -0,0 +1,41 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import "fmt" + +// Assemble converts insts into raw instructions suitable for loading +// into a BPF virtual machine. +// +// Currently, no optimization is attempted, the assembled program flow +// is exactly as provided. +func Assemble(insts []Instruction) ([]RawInstruction, error) { + ret := make([]RawInstruction, len(insts)) + var err error + for i, inst := range insts { + ret[i], err = inst.Assemble() + if err != nil { + return nil, fmt.Errorf("assembling instruction %d: %s", i+1, err) + } + } + return ret, nil +} + +// Disassemble attempts to parse raw back into +// Instructions. Unrecognized RawInstructions are assumed to be an +// extension not implemented by this package, and are passed through +// unchanged to the output. The allDecoded value reports whether insts +// contains no RawInstructions. +func Disassemble(raw []RawInstruction) (insts []Instruction, allDecoded bool) { + insts = make([]Instruction, len(raw)) + allDecoded = true + for i, r := range raw { + insts[i] = r.Disassemble() + if _, ok := insts[i].(RawInstruction); ok { + allDecoded = false + } + } + return insts, allDecoded +} diff --git a/vendor/golang.org/x/net/bpf/constants.go b/vendor/golang.org/x/net/bpf/constants.go new file mode 100644 index 00000000000..12f3ee835af --- /dev/null +++ b/vendor/golang.org/x/net/bpf/constants.go @@ -0,0 +1,222 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +// A Register is a register of the BPF virtual machine. +type Register uint16 + +const ( + // RegA is the accumulator register. RegA is always the + // destination register of ALU operations. + RegA Register = iota + // RegX is the indirection register, used by LoadIndirect + // operations. + RegX +) + +// An ALUOp is an arithmetic or logic operation. +type ALUOp uint16 + +// ALU binary operation types. +const ( + ALUOpAdd ALUOp = iota << 4 + ALUOpSub + ALUOpMul + ALUOpDiv + ALUOpOr + ALUOpAnd + ALUOpShiftLeft + ALUOpShiftRight + aluOpNeg // Not exported because it's the only unary ALU operation, and gets its own instruction type. + ALUOpMod + ALUOpXor +) + +// A JumpTest is a comparison operator used in conditional jumps. +type JumpTest uint16 + +// Supported operators for conditional jumps. +// K can be RegX for JumpIfX +const ( + // K == A + JumpEqual JumpTest = iota + // K != A + JumpNotEqual + // K > A + JumpGreaterThan + // K < A + JumpLessThan + // K >= A + JumpGreaterOrEqual + // K <= A + JumpLessOrEqual + // K & A != 0 + JumpBitsSet + // K & A == 0 + JumpBitsNotSet +) + +// An Extension is a function call provided by the kernel that +// performs advanced operations that are expensive or impossible +// within the BPF virtual machine. +// +// Extensions are only implemented by the Linux kernel. +// +// TODO: should we prune this list? Some of these extensions seem +// either broken or near-impossible to use correctly, whereas other +// (len, random, ifindex) are quite useful. +type Extension int + +// Extension functions available in the Linux kernel. +const ( + // extOffset is the negative maximum number of instructions used + // to load instructions by overloading the K argument. + extOffset = -0x1000 + // ExtLen returns the length of the packet. + ExtLen Extension = 1 + // ExtProto returns the packet's L3 protocol type. + ExtProto Extension = 0 + // ExtType returns the packet's type (skb->pkt_type in the kernel) + // + // TODO: better documentation. How nice an API do we want to + // provide for these esoteric extensions? + ExtType Extension = 4 + // ExtPayloadOffset returns the offset of the packet payload, or + // the first protocol header that the kernel does not know how to + // parse. + ExtPayloadOffset Extension = 52 + // ExtInterfaceIndex returns the index of the interface on which + // the packet was received. + ExtInterfaceIndex Extension = 8 + // ExtNetlinkAttr returns the netlink attribute of type X at + // offset A. + ExtNetlinkAttr Extension = 12 + // ExtNetlinkAttrNested returns the nested netlink attribute of + // type X at offset A. + ExtNetlinkAttrNested Extension = 16 + // ExtMark returns the packet's mark value. + ExtMark Extension = 20 + // ExtQueue returns the packet's assigned hardware queue. + ExtQueue Extension = 24 + // ExtLinkLayerType returns the packet's hardware address type + // (e.g. Ethernet, Infiniband). + ExtLinkLayerType Extension = 28 + // ExtRXHash returns the packets receive hash. + // + // TODO: figure out what this rxhash actually is. + ExtRXHash Extension = 32 + // ExtCPUID returns the ID of the CPU processing the current + // packet. + ExtCPUID Extension = 36 + // ExtVLANTag returns the packet's VLAN tag. + ExtVLANTag Extension = 44 + // ExtVLANTagPresent returns non-zero if the packet has a VLAN + // tag. + // + // TODO: I think this might be a lie: it reads bit 0x1000 of the + // VLAN header, which changed meaning in recent revisions of the + // spec - this extension may now return meaningless information. + ExtVLANTagPresent Extension = 48 + // ExtVLANProto returns 0x8100 if the frame has a VLAN header, + // 0x88a8 if the frame has a "Q-in-Q" double VLAN header, or some + // other value if no VLAN information is present. + ExtVLANProto Extension = 60 + // ExtRand returns a uniformly random uint32. + ExtRand Extension = 56 +) + +// The following gives names to various bit patterns used in opcode construction. + +const ( + opMaskCls uint16 = 0x7 + // opClsLoad masks + opMaskLoadDest = 0x01 + opMaskLoadWidth = 0x18 + opMaskLoadMode = 0xe0 + // opClsALU & opClsJump + opMaskOperand = 0x08 + opMaskOperator = 0xf0 +) + +const ( + // +---------------+-----------------+---+---+---+ + // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 0 | + // +---------------+-----------------+---+---+---+ + opClsLoadA uint16 = iota + // +---------------+-----------------+---+---+---+ + // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 1 | + // +---------------+-----------------+---+---+---+ + opClsLoadX + // +---+---+---+---+---+---+---+---+ + // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | + // +---+---+---+---+---+---+---+---+ + opClsStoreA + // +---+---+---+---+---+---+---+---+ + // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | + // +---+---+---+---+---+---+---+---+ + opClsStoreX + // +---------------+-----------------+---+---+---+ + // | Operator (4b) | OperandSrc (1b) | 1 | 0 | 0 | + // +---------------+-----------------+---+---+---+ + opClsALU + // +-----------------------------+---+---+---+---+ + // | TestOperator (4b) | 0 | 1 | 0 | 1 | + // +-----------------------------+---+---+---+---+ + opClsJump + // +---+-------------------------+---+---+---+---+ + // | 0 | 0 | 0 | RetSrc (1b) | 0 | 1 | 1 | 0 | + // +---+-------------------------+---+---+---+---+ + opClsReturn + // +---+-------------------------+---+---+---+---+ + // | 0 | 0 | 0 | TXAorTAX (1b) | 0 | 1 | 1 | 1 | + // +---+-------------------------+---+---+---+---+ + opClsMisc +) + +const ( + opAddrModeImmediate uint16 = iota << 5 + opAddrModeAbsolute + opAddrModeIndirect + opAddrModeScratch + opAddrModePacketLen // actually an extension, not an addressing mode. + opAddrModeMemShift +) + +const ( + opLoadWidth4 uint16 = iota << 3 + opLoadWidth2 + opLoadWidth1 +) + +// Operand for ALU and Jump instructions +type opOperand uint16 + +// Supported operand sources. +const ( + opOperandConstant opOperand = iota << 3 + opOperandX +) + +// An jumpOp is a conditional jump condition. +type jumpOp uint16 + +// Supported jump conditions. +const ( + opJumpAlways jumpOp = iota << 4 + opJumpEqual + opJumpGT + opJumpGE + opJumpSet +) + +const ( + opRetSrcConstant uint16 = iota << 4 + opRetSrcA +) + +const ( + opMiscTAX = 0x00 + opMiscTXA = 0x80 +) diff --git a/vendor/golang.org/x/net/bpf/doc.go b/vendor/golang.org/x/net/bpf/doc.go new file mode 100644 index 00000000000..ae62feb5341 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/doc.go @@ -0,0 +1,82 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + +Package bpf implements marshaling and unmarshaling of programs for the +Berkeley Packet Filter virtual machine, and provides a Go implementation +of the virtual machine. + +BPF's main use is to specify a packet filter for network taps, so that +the kernel doesn't have to expensively copy every packet it sees to +userspace. However, it's been repurposed to other areas where running +user code in-kernel is needed. For example, Linux's seccomp uses BPF +to apply security policies to system calls. For simplicity, this +documentation refers only to packets, but other uses of BPF have their +own data payloads. + +BPF programs run in a restricted virtual machine. It has almost no +access to kernel functions, and while conditional branches are +allowed, they can only jump forwards, to guarantee that there are no +infinite loops. + +The virtual machine + +The BPF VM is an accumulator machine. Its main register, called +register A, is an implicit source and destination in all arithmetic +and logic operations. The machine also has 16 scratch registers for +temporary storage, and an indirection register (register X) for +indirect memory access. All registers are 32 bits wide. + +Each run of a BPF program is given one packet, which is placed in the +VM's read-only "main memory". LoadAbsolute and LoadIndirect +instructions can fetch up to 32 bits at a time into register A for +examination. + +The goal of a BPF program is to produce and return a verdict (uint32), +which tells the kernel what to do with the packet. In the context of +packet filtering, the returned value is the number of bytes of the +packet to forward to userspace, or 0 to ignore the packet. Other +contexts like seccomp define their own return values. + +In order to simplify programs, attempts to read past the end of the +packet terminate the program execution with a verdict of 0 (ignore +packet). This means that the vast majority of BPF programs don't need +to do any explicit bounds checking. + +In addition to the bytes of the packet, some BPF programs have access +to extensions, which are essentially calls to kernel utility +functions. Currently, the only extensions supported by this package +are the Linux packet filter extensions. + +Examples + +This packet filter selects all ARP packets. + + bpf.Assemble([]bpf.Instruction{ + // Load "EtherType" field from the ethernet header. + bpf.LoadAbsolute{Off: 12, Size: 2}, + // Skip over the next instruction if EtherType is not ARP. + bpf.JumpIf{Cond: bpf.JumpNotEqual, Val: 0x0806, SkipTrue: 1}, + // Verdict is "send up to 4k of the packet to userspace." + bpf.RetConstant{Val: 4096}, + // Verdict is "ignore packet." + bpf.RetConstant{Val: 0}, + }) + +This packet filter captures a random 1% sample of traffic. + + bpf.Assemble([]bpf.Instruction{ + // Get a 32-bit random number from the Linux kernel. + bpf.LoadExtension{Num: bpf.ExtRand}, + // 1% dice roll? + bpf.JumpIf{Cond: bpf.JumpLessThan, Val: 2^32/100, SkipFalse: 1}, + // Capture. + bpf.RetConstant{Val: 4096}, + // Ignore. + bpf.RetConstant{Val: 0}, + }) + +*/ +package bpf // import "golang.org/x/net/bpf" diff --git a/vendor/golang.org/x/net/bpf/instructions.go b/vendor/golang.org/x/net/bpf/instructions.go new file mode 100644 index 00000000000..3cffcaa014e --- /dev/null +++ b/vendor/golang.org/x/net/bpf/instructions.go @@ -0,0 +1,726 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import "fmt" + +// An Instruction is one instruction executed by the BPF virtual +// machine. +type Instruction interface { + // Assemble assembles the Instruction into a RawInstruction. + Assemble() (RawInstruction, error) +} + +// A RawInstruction is a raw BPF virtual machine instruction. +type RawInstruction struct { + // Operation to execute. + Op uint16 + // For conditional jump instructions, the number of instructions + // to skip if the condition is true/false. + Jt uint8 + Jf uint8 + // Constant parameter. The meaning depends on the Op. + K uint32 +} + +// Assemble implements the Instruction Assemble method. +func (ri RawInstruction) Assemble() (RawInstruction, error) { return ri, nil } + +// Disassemble parses ri into an Instruction and returns it. If ri is +// not recognized by this package, ri itself is returned. +func (ri RawInstruction) Disassemble() Instruction { + switch ri.Op & opMaskCls { + case opClsLoadA, opClsLoadX: + reg := Register(ri.Op & opMaskLoadDest) + sz := 0 + switch ri.Op & opMaskLoadWidth { + case opLoadWidth4: + sz = 4 + case opLoadWidth2: + sz = 2 + case opLoadWidth1: + sz = 1 + default: + return ri + } + switch ri.Op & opMaskLoadMode { + case opAddrModeImmediate: + if sz != 4 { + return ri + } + return LoadConstant{Dst: reg, Val: ri.K} + case opAddrModeScratch: + if sz != 4 || ri.K > 15 { + return ri + } + return LoadScratch{Dst: reg, N: int(ri.K)} + case opAddrModeAbsolute: + if ri.K > extOffset+0xffffffff { + return LoadExtension{Num: Extension(-extOffset + ri.K)} + } + return LoadAbsolute{Size: sz, Off: ri.K} + case opAddrModeIndirect: + return LoadIndirect{Size: sz, Off: ri.K} + case opAddrModePacketLen: + if sz != 4 { + return ri + } + return LoadExtension{Num: ExtLen} + case opAddrModeMemShift: + return LoadMemShift{Off: ri.K} + default: + return ri + } + + case opClsStoreA: + if ri.Op != opClsStoreA || ri.K > 15 { + return ri + } + return StoreScratch{Src: RegA, N: int(ri.K)} + + case opClsStoreX: + if ri.Op != opClsStoreX || ri.K > 15 { + return ri + } + return StoreScratch{Src: RegX, N: int(ri.K)} + + case opClsALU: + switch op := ALUOp(ri.Op & opMaskOperator); op { + case ALUOpAdd, ALUOpSub, ALUOpMul, ALUOpDiv, ALUOpOr, ALUOpAnd, ALUOpShiftLeft, ALUOpShiftRight, ALUOpMod, ALUOpXor: + switch operand := opOperand(ri.Op & opMaskOperand); operand { + case opOperandX: + return ALUOpX{Op: op} + case opOperandConstant: + return ALUOpConstant{Op: op, Val: ri.K} + default: + return ri + } + case aluOpNeg: + return NegateA{} + default: + return ri + } + + case opClsJump: + switch op := jumpOp(ri.Op & opMaskOperator); op { + case opJumpAlways: + return Jump{Skip: ri.K} + case opJumpEqual, opJumpGT, opJumpGE, opJumpSet: + cond, skipTrue, skipFalse := jumpOpToTest(op, ri.Jt, ri.Jf) + switch operand := opOperand(ri.Op & opMaskOperand); operand { + case opOperandX: + return JumpIfX{Cond: cond, SkipTrue: skipTrue, SkipFalse: skipFalse} + case opOperandConstant: + return JumpIf{Cond: cond, Val: ri.K, SkipTrue: skipTrue, SkipFalse: skipFalse} + default: + return ri + } + default: + return ri + } + + case opClsReturn: + switch ri.Op { + case opClsReturn | opRetSrcA: + return RetA{} + case opClsReturn | opRetSrcConstant: + return RetConstant{Val: ri.K} + default: + return ri + } + + case opClsMisc: + switch ri.Op { + case opClsMisc | opMiscTAX: + return TAX{} + case opClsMisc | opMiscTXA: + return TXA{} + default: + return ri + } + + default: + panic("unreachable") // switch is exhaustive on the bit pattern + } +} + +func jumpOpToTest(op jumpOp, skipTrue uint8, skipFalse uint8) (JumpTest, uint8, uint8) { + var test JumpTest + + // Decode "fake" jump conditions that don't appear in machine code + // Ensures the Assemble -> Disassemble stage recreates the same instructions + // See https://github.com/golang/go/issues/18470 + if skipTrue == 0 { + switch op { + case opJumpEqual: + test = JumpNotEqual + case opJumpGT: + test = JumpLessOrEqual + case opJumpGE: + test = JumpLessThan + case opJumpSet: + test = JumpBitsNotSet + } + + return test, skipFalse, 0 + } + + switch op { + case opJumpEqual: + test = JumpEqual + case opJumpGT: + test = JumpGreaterThan + case opJumpGE: + test = JumpGreaterOrEqual + case opJumpSet: + test = JumpBitsSet + } + + return test, skipTrue, skipFalse +} + +// LoadConstant loads Val into register Dst. +type LoadConstant struct { + Dst Register + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadConstant) Assemble() (RawInstruction, error) { + return assembleLoad(a.Dst, 4, opAddrModeImmediate, a.Val) +} + +// String returns the instruction in assembler notation. +func (a LoadConstant) String() string { + switch a.Dst { + case RegA: + return fmt.Sprintf("ld #%d", a.Val) + case RegX: + return fmt.Sprintf("ldx #%d", a.Val) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadScratch loads scratch[N] into register Dst. +type LoadScratch struct { + Dst Register + N int // 0-15 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadScratch) Assemble() (RawInstruction, error) { + if a.N < 0 || a.N > 15 { + return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N) + } + return assembleLoad(a.Dst, 4, opAddrModeScratch, uint32(a.N)) +} + +// String returns the instruction in assembler notation. +func (a LoadScratch) String() string { + switch a.Dst { + case RegA: + return fmt.Sprintf("ld M[%d]", a.N) + case RegX: + return fmt.Sprintf("ldx M[%d]", a.N) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadAbsolute loads packet[Off:Off+Size] as an integer value into +// register A. +type LoadAbsolute struct { + Off uint32 + Size int // 1, 2 or 4 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadAbsolute) Assemble() (RawInstruction, error) { + return assembleLoad(RegA, a.Size, opAddrModeAbsolute, a.Off) +} + +// String returns the instruction in assembler notation. +func (a LoadAbsolute) String() string { + switch a.Size { + case 1: // byte + return fmt.Sprintf("ldb [%d]", a.Off) + case 2: // half word + return fmt.Sprintf("ldh [%d]", a.Off) + case 4: // word + if a.Off > extOffset+0xffffffff { + return LoadExtension{Num: Extension(a.Off + 0x1000)}.String() + } + return fmt.Sprintf("ld [%d]", a.Off) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadIndirect loads packet[X+Off:X+Off+Size] as an integer value +// into register A. +type LoadIndirect struct { + Off uint32 + Size int // 1, 2 or 4 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadIndirect) Assemble() (RawInstruction, error) { + return assembleLoad(RegA, a.Size, opAddrModeIndirect, a.Off) +} + +// String returns the instruction in assembler notation. +func (a LoadIndirect) String() string { + switch a.Size { + case 1: // byte + return fmt.Sprintf("ldb [x + %d]", a.Off) + case 2: // half word + return fmt.Sprintf("ldh [x + %d]", a.Off) + case 4: // word + return fmt.Sprintf("ld [x + %d]", a.Off) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadMemShift multiplies the first 4 bits of the byte at packet[Off] +// by 4 and stores the result in register X. +// +// This instruction is mainly useful to load into X the length of an +// IPv4 packet header in a single instruction, rather than have to do +// the arithmetic on the header's first byte by hand. +type LoadMemShift struct { + Off uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadMemShift) Assemble() (RawInstruction, error) { + return assembleLoad(RegX, 1, opAddrModeMemShift, a.Off) +} + +// String returns the instruction in assembler notation. +func (a LoadMemShift) String() string { + return fmt.Sprintf("ldx 4*([%d]&0xf)", a.Off) +} + +// LoadExtension invokes a linux-specific extension and stores the +// result in register A. +type LoadExtension struct { + Num Extension +} + +// Assemble implements the Instruction Assemble method. +func (a LoadExtension) Assemble() (RawInstruction, error) { + if a.Num == ExtLen { + return assembleLoad(RegA, 4, opAddrModePacketLen, 0) + } + return assembleLoad(RegA, 4, opAddrModeAbsolute, uint32(extOffset+a.Num)) +} + +// String returns the instruction in assembler notation. +func (a LoadExtension) String() string { + switch a.Num { + case ExtLen: + return "ld #len" + case ExtProto: + return "ld #proto" + case ExtType: + return "ld #type" + case ExtPayloadOffset: + return "ld #poff" + case ExtInterfaceIndex: + return "ld #ifidx" + case ExtNetlinkAttr: + return "ld #nla" + case ExtNetlinkAttrNested: + return "ld #nlan" + case ExtMark: + return "ld #mark" + case ExtQueue: + return "ld #queue" + case ExtLinkLayerType: + return "ld #hatype" + case ExtRXHash: + return "ld #rxhash" + case ExtCPUID: + return "ld #cpu" + case ExtVLANTag: + return "ld #vlan_tci" + case ExtVLANTagPresent: + return "ld #vlan_avail" + case ExtVLANProto: + return "ld #vlan_tpid" + case ExtRand: + return "ld #rand" + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// StoreScratch stores register Src into scratch[N]. +type StoreScratch struct { + Src Register + N int // 0-15 +} + +// Assemble implements the Instruction Assemble method. +func (a StoreScratch) Assemble() (RawInstruction, error) { + if a.N < 0 || a.N > 15 { + return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N) + } + var op uint16 + switch a.Src { + case RegA: + op = opClsStoreA + case RegX: + op = opClsStoreX + default: + return RawInstruction{}, fmt.Errorf("invalid source register %v", a.Src) + } + + return RawInstruction{ + Op: op, + K: uint32(a.N), + }, nil +} + +// String returns the instruction in assembler notation. +func (a StoreScratch) String() string { + switch a.Src { + case RegA: + return fmt.Sprintf("st M[%d]", a.N) + case RegX: + return fmt.Sprintf("stx M[%d]", a.N) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// ALUOpConstant executes A = A Val. +type ALUOpConstant struct { + Op ALUOp + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a ALUOpConstant) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | uint16(opOperandConstant) | uint16(a.Op), + K: a.Val, + }, nil +} + +// String returns the instruction in assembler notation. +func (a ALUOpConstant) String() string { + switch a.Op { + case ALUOpAdd: + return fmt.Sprintf("add #%d", a.Val) + case ALUOpSub: + return fmt.Sprintf("sub #%d", a.Val) + case ALUOpMul: + return fmt.Sprintf("mul #%d", a.Val) + case ALUOpDiv: + return fmt.Sprintf("div #%d", a.Val) + case ALUOpMod: + return fmt.Sprintf("mod #%d", a.Val) + case ALUOpAnd: + return fmt.Sprintf("and #%d", a.Val) + case ALUOpOr: + return fmt.Sprintf("or #%d", a.Val) + case ALUOpXor: + return fmt.Sprintf("xor #%d", a.Val) + case ALUOpShiftLeft: + return fmt.Sprintf("lsh #%d", a.Val) + case ALUOpShiftRight: + return fmt.Sprintf("rsh #%d", a.Val) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// ALUOpX executes A = A X +type ALUOpX struct { + Op ALUOp +} + +// Assemble implements the Instruction Assemble method. +func (a ALUOpX) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | uint16(opOperandX) | uint16(a.Op), + }, nil +} + +// String returns the instruction in assembler notation. +func (a ALUOpX) String() string { + switch a.Op { + case ALUOpAdd: + return "add x" + case ALUOpSub: + return "sub x" + case ALUOpMul: + return "mul x" + case ALUOpDiv: + return "div x" + case ALUOpMod: + return "mod x" + case ALUOpAnd: + return "and x" + case ALUOpOr: + return "or x" + case ALUOpXor: + return "xor x" + case ALUOpShiftLeft: + return "lsh x" + case ALUOpShiftRight: + return "rsh x" + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// NegateA executes A = -A. +type NegateA struct{} + +// Assemble implements the Instruction Assemble method. +func (a NegateA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | uint16(aluOpNeg), + }, nil +} + +// String returns the instruction in assembler notation. +func (a NegateA) String() string { + return fmt.Sprintf("neg") +} + +// Jump skips the following Skip instructions in the program. +type Jump struct { + Skip uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a Jump) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsJump | uint16(opJumpAlways), + K: a.Skip, + }, nil +} + +// String returns the instruction in assembler notation. +func (a Jump) String() string { + return fmt.Sprintf("ja %d", a.Skip) +} + +// JumpIf skips the following Skip instructions in the program if A +// Val is true. +type JumpIf struct { + Cond JumpTest + Val uint32 + SkipTrue uint8 + SkipFalse uint8 +} + +// Assemble implements the Instruction Assemble method. +func (a JumpIf) Assemble() (RawInstruction, error) { + return jumpToRaw(a.Cond, opOperandConstant, a.Val, a.SkipTrue, a.SkipFalse) +} + +// String returns the instruction in assembler notation. +func (a JumpIf) String() string { + return jumpToString(a.Cond, fmt.Sprintf("#%d", a.Val), a.SkipTrue, a.SkipFalse) +} + +// JumpIfX skips the following Skip instructions in the program if A +// X is true. +type JumpIfX struct { + Cond JumpTest + SkipTrue uint8 + SkipFalse uint8 +} + +// Assemble implements the Instruction Assemble method. +func (a JumpIfX) Assemble() (RawInstruction, error) { + return jumpToRaw(a.Cond, opOperandX, 0, a.SkipTrue, a.SkipFalse) +} + +// String returns the instruction in assembler notation. +func (a JumpIfX) String() string { + return jumpToString(a.Cond, "x", a.SkipTrue, a.SkipFalse) +} + +// jumpToRaw assembles a jump instruction into a RawInstruction +func jumpToRaw(test JumpTest, operand opOperand, k uint32, skipTrue, skipFalse uint8) (RawInstruction, error) { + var ( + cond jumpOp + flip bool + ) + switch test { + case JumpEqual: + cond = opJumpEqual + case JumpNotEqual: + cond, flip = opJumpEqual, true + case JumpGreaterThan: + cond = opJumpGT + case JumpLessThan: + cond, flip = opJumpGE, true + case JumpGreaterOrEqual: + cond = opJumpGE + case JumpLessOrEqual: + cond, flip = opJumpGT, true + case JumpBitsSet: + cond = opJumpSet + case JumpBitsNotSet: + cond, flip = opJumpSet, true + default: + return RawInstruction{}, fmt.Errorf("unknown JumpTest %v", test) + } + jt, jf := skipTrue, skipFalse + if flip { + jt, jf = jf, jt + } + return RawInstruction{ + Op: opClsJump | uint16(cond) | uint16(operand), + Jt: jt, + Jf: jf, + K: k, + }, nil +} + +// jumpToString converts a jump instruction to assembler notation +func jumpToString(cond JumpTest, operand string, skipTrue, skipFalse uint8) string { + switch cond { + // K == A + case JumpEqual: + return conditionalJump(operand, skipTrue, skipFalse, "jeq", "jneq") + // K != A + case JumpNotEqual: + return fmt.Sprintf("jneq %s,%d", operand, skipTrue) + // K > A + case JumpGreaterThan: + return conditionalJump(operand, skipTrue, skipFalse, "jgt", "jle") + // K < A + case JumpLessThan: + return fmt.Sprintf("jlt %s,%d", operand, skipTrue) + // K >= A + case JumpGreaterOrEqual: + return conditionalJump(operand, skipTrue, skipFalse, "jge", "jlt") + // K <= A + case JumpLessOrEqual: + return fmt.Sprintf("jle %s,%d", operand, skipTrue) + // K & A != 0 + case JumpBitsSet: + if skipFalse > 0 { + return fmt.Sprintf("jset %s,%d,%d", operand, skipTrue, skipFalse) + } + return fmt.Sprintf("jset %s,%d", operand, skipTrue) + // K & A == 0, there is no assembler instruction for JumpBitNotSet, use JumpBitSet and invert skips + case JumpBitsNotSet: + return jumpToString(JumpBitsSet, operand, skipFalse, skipTrue) + default: + return fmt.Sprintf("unknown JumpTest %#v", cond) + } +} + +func conditionalJump(operand string, skipTrue, skipFalse uint8, positiveJump, negativeJump string) string { + if skipTrue > 0 { + if skipFalse > 0 { + return fmt.Sprintf("%s %s,%d,%d", positiveJump, operand, skipTrue, skipFalse) + } + return fmt.Sprintf("%s %s,%d", positiveJump, operand, skipTrue) + } + return fmt.Sprintf("%s %s,%d", negativeJump, operand, skipFalse) +} + +// RetA exits the BPF program, returning the value of register A. +type RetA struct{} + +// Assemble implements the Instruction Assemble method. +func (a RetA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsReturn | opRetSrcA, + }, nil +} + +// String returns the instruction in assembler notation. +func (a RetA) String() string { + return fmt.Sprintf("ret a") +} + +// RetConstant exits the BPF program, returning a constant value. +type RetConstant struct { + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a RetConstant) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsReturn | opRetSrcConstant, + K: a.Val, + }, nil +} + +// String returns the instruction in assembler notation. +func (a RetConstant) String() string { + return fmt.Sprintf("ret #%d", a.Val) +} + +// TXA copies the value of register X to register A. +type TXA struct{} + +// Assemble implements the Instruction Assemble method. +func (a TXA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsMisc | opMiscTXA, + }, nil +} + +// String returns the instruction in assembler notation. +func (a TXA) String() string { + return fmt.Sprintf("txa") +} + +// TAX copies the value of register A to register X. +type TAX struct{} + +// Assemble implements the Instruction Assemble method. +func (a TAX) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsMisc | opMiscTAX, + }, nil +} + +// String returns the instruction in assembler notation. +func (a TAX) String() string { + return fmt.Sprintf("tax") +} + +func assembleLoad(dst Register, loadSize int, mode uint16, k uint32) (RawInstruction, error) { + var ( + cls uint16 + sz uint16 + ) + switch dst { + case RegA: + cls = opClsLoadA + case RegX: + cls = opClsLoadX + default: + return RawInstruction{}, fmt.Errorf("invalid target register %v", dst) + } + switch loadSize { + case 1: + sz = opLoadWidth1 + case 2: + sz = opLoadWidth2 + case 4: + sz = opLoadWidth4 + default: + return RawInstruction{}, fmt.Errorf("invalid load byte length %d", sz) + } + return RawInstruction{ + Op: cls | sz | mode, + K: k, + }, nil +} diff --git a/vendor/golang.org/x/net/bpf/setter.go b/vendor/golang.org/x/net/bpf/setter.go new file mode 100644 index 00000000000..43e35f0ac24 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/setter.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +// A Setter is a type which can attach a compiled BPF filter to itself. +type Setter interface { + SetBPF(filter []RawInstruction) error +} diff --git a/vendor/golang.org/x/net/bpf/vm.go b/vendor/golang.org/x/net/bpf/vm.go new file mode 100644 index 00000000000..73f57f1f72e --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm.go @@ -0,0 +1,150 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import ( + "errors" + "fmt" +) + +// A VM is an emulated BPF virtual machine. +type VM struct { + filter []Instruction +} + +// NewVM returns a new VM using the input BPF program. +func NewVM(filter []Instruction) (*VM, error) { + if len(filter) == 0 { + return nil, errors.New("one or more Instructions must be specified") + } + + for i, ins := range filter { + check := len(filter) - (i + 1) + switch ins := ins.(type) { + // Check for out-of-bounds jumps in instructions + case Jump: + if check <= int(ins.Skip) { + return nil, fmt.Errorf("cannot jump %d instructions; jumping past program bounds", ins.Skip) + } + case JumpIf: + if check <= int(ins.SkipTrue) { + return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue) + } + if check <= int(ins.SkipFalse) { + return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse) + } + case JumpIfX: + if check <= int(ins.SkipTrue) { + return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue) + } + if check <= int(ins.SkipFalse) { + return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse) + } + // Check for division or modulus by zero + case ALUOpConstant: + if ins.Val != 0 { + break + } + + switch ins.Op { + case ALUOpDiv, ALUOpMod: + return nil, errors.New("cannot divide by zero using ALUOpConstant") + } + // Check for unknown extensions + case LoadExtension: + switch ins.Num { + case ExtLen: + default: + return nil, fmt.Errorf("extension %d not implemented", ins.Num) + } + } + } + + // Make sure last instruction is a return instruction + switch filter[len(filter)-1].(type) { + case RetA, RetConstant: + default: + return nil, errors.New("BPF program must end with RetA or RetConstant") + } + + // Though our VM works using disassembled instructions, we + // attempt to assemble the input filter anyway to ensure it is compatible + // with an operating system VM. + _, err := Assemble(filter) + + return &VM{ + filter: filter, + }, err +} + +// Run runs the VM's BPF program against the input bytes. +// Run returns the number of bytes accepted by the BPF program, and any errors +// which occurred while processing the program. +func (v *VM) Run(in []byte) (int, error) { + var ( + // Registers of the virtual machine + regA uint32 + regX uint32 + regScratch [16]uint32 + + // OK is true if the program should continue processing the next + // instruction, or false if not, causing the loop to break + ok = true + ) + + // TODO(mdlayher): implement: + // - NegateA: + // - would require a change from uint32 registers to int32 + // registers + + // TODO(mdlayher): add interop tests that check signedness of ALU + // operations against kernel implementation, and make sure Go + // implementation matches behavior + + for i := 0; i < len(v.filter) && ok; i++ { + ins := v.filter[i] + + switch ins := ins.(type) { + case ALUOpConstant: + regA = aluOpConstant(ins, regA) + case ALUOpX: + regA, ok = aluOpX(ins, regA, regX) + case Jump: + i += int(ins.Skip) + case JumpIf: + jump := jumpIf(ins, regA) + i += jump + case JumpIfX: + jump := jumpIfX(ins, regA, regX) + i += jump + case LoadAbsolute: + regA, ok = loadAbsolute(ins, in) + case LoadConstant: + regA, regX = loadConstant(ins, regA, regX) + case LoadExtension: + regA = loadExtension(ins, in) + case LoadIndirect: + regA, ok = loadIndirect(ins, in, regX) + case LoadMemShift: + regX, ok = loadMemShift(ins, in) + case LoadScratch: + regA, regX = loadScratch(ins, regScratch, regA, regX) + case RetA: + return int(regA), nil + case RetConstant: + return int(ins.Val), nil + case StoreScratch: + regScratch = storeScratch(ins, regScratch, regA, regX) + case TAX: + regX = regA + case TXA: + regA = regX + default: + return 0, fmt.Errorf("unknown Instruction at index %d: %T", i, ins) + } + } + + return 0, nil +} diff --git a/vendor/golang.org/x/net/bpf/vm_instructions.go b/vendor/golang.org/x/net/bpf/vm_instructions.go new file mode 100644 index 00000000000..cf8947c3327 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_instructions.go @@ -0,0 +1,182 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import ( + "encoding/binary" + "fmt" +) + +func aluOpConstant(ins ALUOpConstant, regA uint32) uint32 { + return aluOpCommon(ins.Op, regA, ins.Val) +} + +func aluOpX(ins ALUOpX, regA uint32, regX uint32) (uint32, bool) { + // Guard against division or modulus by zero by terminating + // the program, as the OS BPF VM does + if regX == 0 { + switch ins.Op { + case ALUOpDiv, ALUOpMod: + return 0, false + } + } + + return aluOpCommon(ins.Op, regA, regX), true +} + +func aluOpCommon(op ALUOp, regA uint32, value uint32) uint32 { + switch op { + case ALUOpAdd: + return regA + value + case ALUOpSub: + return regA - value + case ALUOpMul: + return regA * value + case ALUOpDiv: + // Division by zero not permitted by NewVM and aluOpX checks + return regA / value + case ALUOpOr: + return regA | value + case ALUOpAnd: + return regA & value + case ALUOpShiftLeft: + return regA << value + case ALUOpShiftRight: + return regA >> value + case ALUOpMod: + // Modulus by zero not permitted by NewVM and aluOpX checks + return regA % value + case ALUOpXor: + return regA ^ value + default: + return regA + } +} + +func jumpIf(ins JumpIf, regA uint32) int { + return jumpIfCommon(ins.Cond, ins.SkipTrue, ins.SkipFalse, regA, ins.Val) +} + +func jumpIfX(ins JumpIfX, regA uint32, regX uint32) int { + return jumpIfCommon(ins.Cond, ins.SkipTrue, ins.SkipFalse, regA, regX) +} + +func jumpIfCommon(cond JumpTest, skipTrue, skipFalse uint8, regA uint32, value uint32) int { + var ok bool + + switch cond { + case JumpEqual: + ok = regA == value + case JumpNotEqual: + ok = regA != value + case JumpGreaterThan: + ok = regA > value + case JumpLessThan: + ok = regA < value + case JumpGreaterOrEqual: + ok = regA >= value + case JumpLessOrEqual: + ok = regA <= value + case JumpBitsSet: + ok = (regA & value) != 0 + case JumpBitsNotSet: + ok = (regA & value) == 0 + } + + if ok { + return int(skipTrue) + } + + return int(skipFalse) +} + +func loadAbsolute(ins LoadAbsolute, in []byte) (uint32, bool) { + offset := int(ins.Off) + size := int(ins.Size) + + return loadCommon(in, offset, size) +} + +func loadConstant(ins LoadConstant, regA uint32, regX uint32) (uint32, uint32) { + switch ins.Dst { + case RegA: + regA = ins.Val + case RegX: + regX = ins.Val + } + + return regA, regX +} + +func loadExtension(ins LoadExtension, in []byte) uint32 { + switch ins.Num { + case ExtLen: + return uint32(len(in)) + default: + panic(fmt.Sprintf("unimplemented extension: %d", ins.Num)) + } +} + +func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) { + offset := int(ins.Off) + int(regX) + size := int(ins.Size) + + return loadCommon(in, offset, size) +} + +func loadMemShift(ins LoadMemShift, in []byte) (uint32, bool) { + offset := int(ins.Off) + + // Size of LoadMemShift is always 1 byte + if !inBounds(len(in), offset, 1) { + return 0, false + } + + // Mask off high 4 bits and multiply low 4 bits by 4 + return uint32(in[offset]&0x0f) * 4, true +} + +func inBounds(inLen int, offset int, size int) bool { + return offset+size <= inLen +} + +func loadCommon(in []byte, offset int, size int) (uint32, bool) { + if !inBounds(len(in), offset, size) { + return 0, false + } + + switch size { + case 1: + return uint32(in[offset]), true + case 2: + return uint32(binary.BigEndian.Uint16(in[offset : offset+size])), true + case 4: + return uint32(binary.BigEndian.Uint32(in[offset : offset+size])), true + default: + panic(fmt.Sprintf("invalid load size: %d", size)) + } +} + +func loadScratch(ins LoadScratch, regScratch [16]uint32, regA uint32, regX uint32) (uint32, uint32) { + switch ins.Dst { + case RegA: + regA = regScratch[ins.N] + case RegX: + regX = regScratch[ins.N] + } + + return regA, regX +} + +func storeScratch(ins StoreScratch, regScratch [16]uint32, regA uint32, regX uint32) [16]uint32 { + switch ins.Src { + case RegA: + regScratch[ins.N] = regA + case RegX: + regScratch[ins.N] = regX + } + + return regScratch +} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 76a92e0ca6b..2482f7bf92f 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -2525,6 +2525,7 @@ func strSliceContains(ss []string, s string) bool { type erringRoundTripper struct{ err error } +func (rt erringRoundTripper) RoundTripErr() error { return rt.err } func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } // gzipReader wraps a response body so it can lazily diff --git a/vendor/golang.org/x/net/internal/iana/const.go b/vendor/golang.org/x/net/internal/iana/const.go new file mode 100644 index 00000000000..cea712fac04 --- /dev/null +++ b/vendor/golang.org/x/net/internal/iana/const.go @@ -0,0 +1,223 @@ +// go generate gen.go +// Code generated by the command above; DO NOT EDIT. + +// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA). +package iana // import "golang.org/x/net/internal/iana" + +// Differentiated Services Field Codepoints (DSCP), Updated: 2018-05-04 +const ( + DiffServCS0 = 0x00 // CS0 + DiffServCS1 = 0x20 // CS1 + DiffServCS2 = 0x40 // CS2 + DiffServCS3 = 0x60 // CS3 + DiffServCS4 = 0x80 // CS4 + DiffServCS5 = 0xa0 // CS5 + DiffServCS6 = 0xc0 // CS6 + DiffServCS7 = 0xe0 // CS7 + DiffServAF11 = 0x28 // AF11 + DiffServAF12 = 0x30 // AF12 + DiffServAF13 = 0x38 // AF13 + DiffServAF21 = 0x48 // AF21 + DiffServAF22 = 0x50 // AF22 + DiffServAF23 = 0x58 // AF23 + DiffServAF31 = 0x68 // AF31 + DiffServAF32 = 0x70 // AF32 + DiffServAF33 = 0x78 // AF33 + DiffServAF41 = 0x88 // AF41 + DiffServAF42 = 0x90 // AF42 + DiffServAF43 = 0x98 // AF43 + DiffServEF = 0xb8 // EF + DiffServVOICEADMIT = 0xb0 // VOICE-ADMIT + NotECNTransport = 0x00 // Not-ECT (Not ECN-Capable Transport) + ECNTransport1 = 0x01 // ECT(1) (ECN-Capable Transport(1)) + ECNTransport0 = 0x02 // ECT(0) (ECN-Capable Transport(0)) + CongestionExperienced = 0x03 // CE (Congestion Experienced) +) + +// Protocol Numbers, Updated: 2017-10-13 +const ( + ProtocolIP = 0 // IPv4 encapsulation, pseudo protocol number + ProtocolHOPOPT = 0 // IPv6 Hop-by-Hop Option + ProtocolICMP = 1 // Internet Control Message + ProtocolIGMP = 2 // Internet Group Management + ProtocolGGP = 3 // Gateway-to-Gateway + ProtocolIPv4 = 4 // IPv4 encapsulation + ProtocolST = 5 // Stream + ProtocolTCP = 6 // Transmission Control + ProtocolCBT = 7 // CBT + ProtocolEGP = 8 // Exterior Gateway Protocol + ProtocolIGP = 9 // any private interior gateway (used by Cisco for their IGRP) + ProtocolBBNRCCMON = 10 // BBN RCC Monitoring + ProtocolNVPII = 11 // Network Voice Protocol + ProtocolPUP = 12 // PUP + ProtocolEMCON = 14 // EMCON + ProtocolXNET = 15 // Cross Net Debugger + ProtocolCHAOS = 16 // Chaos + ProtocolUDP = 17 // User Datagram + ProtocolMUX = 18 // Multiplexing + ProtocolDCNMEAS = 19 // DCN Measurement Subsystems + ProtocolHMP = 20 // Host Monitoring + ProtocolPRM = 21 // Packet Radio Measurement + ProtocolXNSIDP = 22 // XEROX NS IDP + ProtocolTRUNK1 = 23 // Trunk-1 + ProtocolTRUNK2 = 24 // Trunk-2 + ProtocolLEAF1 = 25 // Leaf-1 + ProtocolLEAF2 = 26 // Leaf-2 + ProtocolRDP = 27 // Reliable Data Protocol + ProtocolIRTP = 28 // Internet Reliable Transaction + ProtocolISOTP4 = 29 // ISO Transport Protocol Class 4 + ProtocolNETBLT = 30 // Bulk Data Transfer Protocol + ProtocolMFENSP = 31 // MFE Network Services Protocol + ProtocolMERITINP = 32 // MERIT Internodal Protocol + ProtocolDCCP = 33 // Datagram Congestion Control Protocol + Protocol3PC = 34 // Third Party Connect Protocol + ProtocolIDPR = 35 // Inter-Domain Policy Routing Protocol + ProtocolXTP = 36 // XTP + ProtocolDDP = 37 // Datagram Delivery Protocol + ProtocolIDPRCMTP = 38 // IDPR Control Message Transport Proto + ProtocolTPPP = 39 // TP++ Transport Protocol + ProtocolIL = 40 // IL Transport Protocol + ProtocolIPv6 = 41 // IPv6 encapsulation + ProtocolSDRP = 42 // Source Demand Routing Protocol + ProtocolIPv6Route = 43 // Routing Header for IPv6 + ProtocolIPv6Frag = 44 // Fragment Header for IPv6 + ProtocolIDRP = 45 // Inter-Domain Routing Protocol + ProtocolRSVP = 46 // Reservation Protocol + ProtocolGRE = 47 // Generic Routing Encapsulation + ProtocolDSR = 48 // Dynamic Source Routing Protocol + ProtocolBNA = 49 // BNA + ProtocolESP = 50 // Encap Security Payload + ProtocolAH = 51 // Authentication Header + ProtocolINLSP = 52 // Integrated Net Layer Security TUBA + ProtocolNARP = 54 // NBMA Address Resolution Protocol + ProtocolMOBILE = 55 // IP Mobility + ProtocolTLSP = 56 // Transport Layer Security Protocol using Kryptonet key management + ProtocolSKIP = 57 // SKIP + ProtocolIPv6ICMP = 58 // ICMP for IPv6 + ProtocolIPv6NoNxt = 59 // No Next Header for IPv6 + ProtocolIPv6Opts = 60 // Destination Options for IPv6 + ProtocolCFTP = 62 // CFTP + ProtocolSATEXPAK = 64 // SATNET and Backroom EXPAK + ProtocolKRYPTOLAN = 65 // Kryptolan + ProtocolRVD = 66 // MIT Remote Virtual Disk Protocol + ProtocolIPPC = 67 // Internet Pluribus Packet Core + ProtocolSATMON = 69 // SATNET Monitoring + ProtocolVISA = 70 // VISA Protocol + ProtocolIPCV = 71 // Internet Packet Core Utility + ProtocolCPNX = 72 // Computer Protocol Network Executive + ProtocolCPHB = 73 // Computer Protocol Heart Beat + ProtocolWSN = 74 // Wang Span Network + ProtocolPVP = 75 // Packet Video Protocol + ProtocolBRSATMON = 76 // Backroom SATNET Monitoring + ProtocolSUNND = 77 // SUN ND PROTOCOL-Temporary + ProtocolWBMON = 78 // WIDEBAND Monitoring + ProtocolWBEXPAK = 79 // WIDEBAND EXPAK + ProtocolISOIP = 80 // ISO Internet Protocol + ProtocolVMTP = 81 // VMTP + ProtocolSECUREVMTP = 82 // SECURE-VMTP + ProtocolVINES = 83 // VINES + ProtocolTTP = 84 // Transaction Transport Protocol + ProtocolIPTM = 84 // Internet Protocol Traffic Manager + ProtocolNSFNETIGP = 85 // NSFNET-IGP + ProtocolDGP = 86 // Dissimilar Gateway Protocol + ProtocolTCF = 87 // TCF + ProtocolEIGRP = 88 // EIGRP + ProtocolOSPFIGP = 89 // OSPFIGP + ProtocolSpriteRPC = 90 // Sprite RPC Protocol + ProtocolLARP = 91 // Locus Address Resolution Protocol + ProtocolMTP = 92 // Multicast Transport Protocol + ProtocolAX25 = 93 // AX.25 Frames + ProtocolIPIP = 94 // IP-within-IP Encapsulation Protocol + ProtocolSCCSP = 96 // Semaphore Communications Sec. Pro. + ProtocolETHERIP = 97 // Ethernet-within-IP Encapsulation + ProtocolENCAP = 98 // Encapsulation Header + ProtocolGMTP = 100 // GMTP + ProtocolIFMP = 101 // Ipsilon Flow Management Protocol + ProtocolPNNI = 102 // PNNI over IP + ProtocolPIM = 103 // Protocol Independent Multicast + ProtocolARIS = 104 // ARIS + ProtocolSCPS = 105 // SCPS + ProtocolQNX = 106 // QNX + ProtocolAN = 107 // Active Networks + ProtocolIPComp = 108 // IP Payload Compression Protocol + ProtocolSNP = 109 // Sitara Networks Protocol + ProtocolCompaqPeer = 110 // Compaq Peer Protocol + ProtocolIPXinIP = 111 // IPX in IP + ProtocolVRRP = 112 // Virtual Router Redundancy Protocol + ProtocolPGM = 113 // PGM Reliable Transport Protocol + ProtocolL2TP = 115 // Layer Two Tunneling Protocol + ProtocolDDX = 116 // D-II Data Exchange (DDX) + ProtocolIATP = 117 // Interactive Agent Transfer Protocol + ProtocolSTP = 118 // Schedule Transfer Protocol + ProtocolSRP = 119 // SpectraLink Radio Protocol + ProtocolUTI = 120 // UTI + ProtocolSMP = 121 // Simple Message Protocol + ProtocolPTP = 123 // Performance Transparency Protocol + ProtocolISIS = 124 // ISIS over IPv4 + ProtocolFIRE = 125 // FIRE + ProtocolCRTP = 126 // Combat Radio Transport Protocol + ProtocolCRUDP = 127 // Combat Radio User Datagram + ProtocolSSCOPMCE = 128 // SSCOPMCE + ProtocolIPLT = 129 // IPLT + ProtocolSPS = 130 // Secure Packet Shield + ProtocolPIPE = 131 // Private IP Encapsulation within IP + ProtocolSCTP = 132 // Stream Control Transmission Protocol + ProtocolFC = 133 // Fibre Channel + ProtocolRSVPE2EIGNORE = 134 // RSVP-E2E-IGNORE + ProtocolMobilityHeader = 135 // Mobility Header + ProtocolUDPLite = 136 // UDPLite + ProtocolMPLSinIP = 137 // MPLS-in-IP + ProtocolMANET = 138 // MANET Protocols + ProtocolHIP = 139 // Host Identity Protocol + ProtocolShim6 = 140 // Shim6 Protocol + ProtocolWESP = 141 // Wrapped Encapsulating Security Payload + ProtocolROHC = 142 // Robust Header Compression + ProtocolReserved = 255 // Reserved +) + +// Address Family Numbers, Updated: 2018-04-02 +const ( + AddrFamilyIPv4 = 1 // IP (IP version 4) + AddrFamilyIPv6 = 2 // IP6 (IP version 6) + AddrFamilyNSAP = 3 // NSAP + AddrFamilyHDLC = 4 // HDLC (8-bit multidrop) + AddrFamilyBBN1822 = 5 // BBN 1822 + AddrFamily802 = 6 // 802 (includes all 802 media plus Ethernet "canonical format") + AddrFamilyE163 = 7 // E.163 + AddrFamilyE164 = 8 // E.164 (SMDS, Frame Relay, ATM) + AddrFamilyF69 = 9 // F.69 (Telex) + AddrFamilyX121 = 10 // X.121 (X.25, Frame Relay) + AddrFamilyIPX = 11 // IPX + AddrFamilyAppletalk = 12 // Appletalk + AddrFamilyDecnetIV = 13 // Decnet IV + AddrFamilyBanyanVines = 14 // Banyan Vines + AddrFamilyE164withSubaddress = 15 // E.164 with NSAP format subaddress + AddrFamilyDNS = 16 // DNS (Domain Name System) + AddrFamilyDistinguishedName = 17 // Distinguished Name + AddrFamilyASNumber = 18 // AS Number + AddrFamilyXTPoverIPv4 = 19 // XTP over IP version 4 + AddrFamilyXTPoverIPv6 = 20 // XTP over IP version 6 + AddrFamilyXTPnativemodeXTP = 21 // XTP native mode XTP + AddrFamilyFibreChannelWorldWidePortName = 22 // Fibre Channel World-Wide Port Name + AddrFamilyFibreChannelWorldWideNodeName = 23 // Fibre Channel World-Wide Node Name + AddrFamilyGWID = 24 // GWID + AddrFamilyL2VPN = 25 // AFI for L2VPN information + AddrFamilyMPLSTPSectionEndpointID = 26 // MPLS-TP Section Endpoint Identifier + AddrFamilyMPLSTPLSPEndpointID = 27 // MPLS-TP LSP Endpoint Identifier + AddrFamilyMPLSTPPseudowireEndpointID = 28 // MPLS-TP Pseudowire Endpoint Identifier + AddrFamilyMTIPv4 = 29 // MT IP: Multi-Topology IP version 4 + AddrFamilyMTIPv6 = 30 // MT IPv6: Multi-Topology IP version 6 + AddrFamilyEIGRPCommonServiceFamily = 16384 // EIGRP Common Service Family + AddrFamilyEIGRPIPv4ServiceFamily = 16385 // EIGRP IPv4 Service Family + AddrFamilyEIGRPIPv6ServiceFamily = 16386 // EIGRP IPv6 Service Family + AddrFamilyLISPCanonicalAddressFormat = 16387 // LISP Canonical Address Format (LCAF) + AddrFamilyBGPLS = 16388 // BGP-LS + AddrFamily48bitMAC = 16389 // 48-bit MAC + AddrFamily64bitMAC = 16390 // 64-bit MAC + AddrFamilyOUI = 16391 // OUI + AddrFamilyMACFinal24bits = 16392 // MAC/24 + AddrFamilyMACFinal40bits = 16393 // MAC/40 + AddrFamilyIPv6Initial64bits = 16394 // IPv6/64 + AddrFamilyRBridgePortID = 16395 // RBridge Port ID + AddrFamilyTRILLNickname = 16396 // TRILL Nickname +) diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr.go b/vendor/golang.org/x/net/internal/socket/cmsghdr.go new file mode 100644 index 00000000000..0a73e277e09 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket + +func (h *cmsghdr) len() int { return int(h.Len) } +func (h *cmsghdr) lvl() int { return int(h.Level) } +func (h *cmsghdr) typ() int { return int(h.Type) } diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go new file mode 100644 index 00000000000..14dbb3ad42d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go @@ -0,0 +1,13 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd netbsd openbsd + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go new file mode 100644 index 00000000000..bac66811ddd --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build linux + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go new file mode 100644 index 00000000000..27be0efaca9 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le riscv64 s390x +// +build linux + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint64(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go new file mode 100644 index 00000000000..7dedd430eb0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go new file mode 100644 index 00000000000..e581011b055 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type cmsghdr struct{} + +const sizeofCmsghdr = 0 + +func (h *cmsghdr) len() int { return 0 } +func (h *cmsghdr) lvl() int { return 0 } +func (h *cmsghdr) typ() int { return 0 } + +func (h *cmsghdr) set(l, lvl, typ int) {} diff --git a/vendor/golang.org/x/net/internal/socket/empty.s b/vendor/golang.org/x/net/internal/socket/empty.s new file mode 100644 index 00000000000..bff0231c7d5 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/empty.s @@ -0,0 +1,7 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin,go1.12 + +// This exists solely so we can linkname in symbols from syscall. diff --git a/vendor/golang.org/x/net/internal/socket/error_unix.go b/vendor/golang.org/x/net/internal/socket/error_unix.go new file mode 100644 index 00000000000..f14872d3d35 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/error_unix.go @@ -0,0 +1,31 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket + +import "syscall" + +var ( + errEAGAIN error = syscall.EAGAIN + errEINVAL error = syscall.EINVAL + errENOENT error = syscall.ENOENT +) + +// errnoErr returns common boxed Errno values, to prevent allocations +// at runtime. +func errnoErr(errno syscall.Errno) error { + switch errno { + case 0: + return nil + case syscall.EAGAIN: + return errEAGAIN + case syscall.EINVAL: + return errEINVAL + case syscall.ENOENT: + return errENOENT + } + return errno +} diff --git a/vendor/golang.org/x/net/internal/socket/error_windows.go b/vendor/golang.org/x/net/internal/socket/error_windows.go new file mode 100644 index 00000000000..6a6379a8b07 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/error_windows.go @@ -0,0 +1,26 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import "syscall" + +var ( + errERROR_IO_PENDING error = syscall.ERROR_IO_PENDING + errEINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent allocations +// at runtime. +func errnoErr(errno syscall.Errno) error { + switch errno { + case 0: + return nil + case syscall.ERROR_IO_PENDING: + return errERROR_IO_PENDING + case syscall.EINVAL: + return errEINVAL + } + return errno +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_32bit.go b/vendor/golang.org/x/net/internal/socket/iovec_32bit.go new file mode 100644 index 00000000000..05d6082d147 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_32bit.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build darwin dragonfly freebsd linux netbsd openbsd + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } + v.Base = (*byte)(unsafe.Pointer(&b[0])) + v.Len = uint32(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_64bit.go b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go new file mode 100644 index 00000000000..dfeda752be2 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le riscv64 s390x +// +build aix darwin dragonfly freebsd linux netbsd openbsd + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } + v.Base = (*byte)(unsafe.Pointer(&b[0])) + v.Len = uint64(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go new file mode 100644 index 00000000000..8d17a40c404 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } + v.Base = (*int8)(unsafe.Pointer(&b[0])) + v.Len = uint64(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_stub.go b/vendor/golang.org/x/net/internal/socket/iovec_stub.go new file mode 100644 index 00000000000..a746e90e307 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_stub.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type iovec struct{} + +func (v *iovec) set(b []byte) {} diff --git a/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go b/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go new file mode 100644 index 00000000000..1a7f2792f25 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!linux,!netbsd + +package socket + +import "net" + +type mmsghdr struct{} + +type mmsghdrs []mmsghdr + +func (hs mmsghdrs) pack(ms []Message, parseFn func([]byte, string) (net.Addr, error), marshalFn func(net.Addr) []byte) error { + return nil +} + +func (hs mmsghdrs) unpack(ms []Message, parseFn func([]byte, string) (net.Addr, error), hint string) error { + return nil +} diff --git a/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go new file mode 100644 index 00000000000..f1100683a57 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go @@ -0,0 +1,42 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix linux netbsd + +package socket + +import "net" + +type mmsghdrs []mmsghdr + +func (hs mmsghdrs) pack(ms []Message, parseFn func([]byte, string) (net.Addr, error), marshalFn func(net.Addr) []byte) error { + for i := range hs { + vs := make([]iovec, len(ms[i].Buffers)) + var sa []byte + if parseFn != nil { + sa = make([]byte, sizeofSockaddrInet6) + } + if marshalFn != nil { + sa = marshalFn(ms[i].Addr) + } + hs[i].Hdr.pack(vs, ms[i].Buffers, ms[i].OOB, sa) + } + return nil +} + +func (hs mmsghdrs) unpack(ms []Message, parseFn func([]byte, string) (net.Addr, error), hint string) error { + for i := range hs { + ms[i].N = int(hs[i].Len) + ms[i].NN = hs[i].Hdr.controllen() + ms[i].Flags = hs[i].Hdr.flags() + if parseFn != nil { + var err error + ms[i].Addr, err = parseFn(hs[i].Hdr.name(), hint) + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go b/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go new file mode 100644 index 00000000000..77f44c1f129 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go @@ -0,0 +1,39 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd netbsd openbsd + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + h.setIov(vs) + if len(oob) > 0 { + h.Control = (*byte)(unsafe.Pointer(&oob[0])) + h.Controllen = uint32(len(oob)) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) name() []byte { + if h.Name != nil && h.Namelen > 0 { + return (*[sizeofSockaddrInet6]byte)(unsafe.Pointer(h.Name))[:h.Namelen] + } + return nil +} + +func (h *msghdr) controllen() int { + return int(h.Controllen) +} + +func (h *msghdr) flags() int { + return int(h.Flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go b/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go new file mode 100644 index 00000000000..c5562dd66ad --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd netbsd + +package socket + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = int32(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux.go new file mode 100644 index 00000000000..5a38798cc0c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + h.setIov(vs) + if len(oob) > 0 { + h.setControl(oob) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) name() []byte { + if h.Name != nil && h.Namelen > 0 { + return (*[sizeofSockaddrInet6]byte)(unsafe.Pointer(h.Name))[:h.Namelen] + } + return nil +} + +func (h *msghdr) controllen() int { + return int(h.Controllen) +} + +func (h *msghdr) flags() int { + return int(h.Flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go new file mode 100644 index 00000000000..a7a5987c883 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go @@ -0,0 +1,24 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build linux + +package socket + +import "unsafe" + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = uint32(l) +} + +func (h *msghdr) setControl(b []byte) { + h.Control = (*byte)(unsafe.Pointer(&b[0])) + h.Controllen = uint32(len(b)) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go new file mode 100644 index 00000000000..e731833a262 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go @@ -0,0 +1,24 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le riscv64 s390x +// +build linux + +package socket + +import "unsafe" + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = uint64(l) +} + +func (h *msghdr) setControl(b []byte) { + h.Control = (*byte)(unsafe.Pointer(&b[0])) + h.Controllen = uint64(len(b)) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go b/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go new file mode 100644 index 00000000000..71a69e2513a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = uint32(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go new file mode 100644 index 00000000000..6465b207324 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + if len(vs) > 0 { + h.Iov = &vs[0] + h.Iovlen = int32(len(vs)) + } + if len(oob) > 0 { + h.Accrights = (*int8)(unsafe.Pointer(&oob[0])) + h.Accrightslen = int32(len(oob)) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) controllen() int { + return int(h.Accrightslen) +} + +func (h *msghdr) flags() int { + return int(NativeEndian.Uint32(h.Pad_cgo_2[:])) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_stub.go b/vendor/golang.org/x/net/internal/socket/msghdr_stub.go new file mode 100644 index 00000000000..873490a7ae9 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_stub.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type msghdr struct{} + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) {} +func (h *msghdr) name() []byte { return nil } +func (h *msghdr) controllen() int { return 0 } +func (h *msghdr) flags() int { return 0 } diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go b/vendor/golang.org/x/net/internal/socket/norace.go similarity index 57% rename from vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go rename to vendor/golang.org/x/net/internal/socket/norace.go index b5130a6dd14..9519ffbba47 100644 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go +++ b/vendor/golang.org/x/net/internal/socket/norace.go @@ -2,9 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Code generated by generate-protos. DO NOT EDIT. +// +build !race -package fieldnum +package socket -// Field numbers for google.protobuf.Empty. -const () +func (m *Message) raceRead() { +} +func (m *Message) raceWrite() { +} diff --git a/vendor/golang.org/x/net/internal/socket/race.go b/vendor/golang.org/x/net/internal/socket/race.go new file mode 100644 index 00000000000..df60c62fff5 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/race.go @@ -0,0 +1,37 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build race + +package socket + +import ( + "runtime" + "unsafe" +) + +// This package reads and writes the Message buffers using a +// direct system call, which the race detector can't see. +// These functions tell the race detector what is going on during the syscall. + +func (m *Message) raceRead() { + for _, b := range m.Buffers { + if len(b) > 0 { + runtime.RaceReadRange(unsafe.Pointer(&b[0]), len(b)) + } + } + if b := m.OOB; len(b) > 0 { + runtime.RaceReadRange(unsafe.Pointer(&b[0]), len(b)) + } +} +func (m *Message) raceWrite() { + for _, b := range m.Buffers { + if len(b) > 0 { + runtime.RaceWriteRange(unsafe.Pointer(&b[0]), len(b)) + } + } + if b := m.OOB; len(b) > 0 { + runtime.RaceWriteRange(unsafe.Pointer(&b[0]), len(b)) + } +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn.go b/vendor/golang.org/x/net/internal/socket/rawconn.go new file mode 100644 index 00000000000..b07b8900506 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn.go @@ -0,0 +1,64 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "errors" + "net" + "os" + "syscall" +) + +// A Conn represents a raw connection. +type Conn struct { + network string + c syscall.RawConn +} + +// NewConn returns a new raw connection. +func NewConn(c net.Conn) (*Conn, error) { + var err error + var cc Conn + switch c := c.(type) { + case *net.TCPConn: + cc.network = "tcp" + cc.c, err = c.SyscallConn() + case *net.UDPConn: + cc.network = "udp" + cc.c, err = c.SyscallConn() + case *net.IPConn: + cc.network = "ip" + cc.c, err = c.SyscallConn() + default: + return nil, errors.New("unknown connection type") + } + if err != nil { + return nil, err + } + return &cc, nil +} + +func (o *Option) get(c *Conn, b []byte) (int, error) { + var operr error + var n int + fn := func(s uintptr) { + n, operr = getsockopt(s, o.Level, o.Name, b) + } + if err := c.c.Control(fn); err != nil { + return 0, err + } + return n, os.NewSyscallError("getsockopt", operr) +} + +func (o *Option) set(c *Conn, b []byte) error { + var operr error + fn := func(s uintptr) { + operr = setsockopt(s, o.Level, o.Name, b) + } + if err := c.c.Control(fn); err != nil { + return err + } + return os.NewSyscallError("setsockopt", operr) +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go new file mode 100644 index 00000000000..d01fc4c7da2 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go @@ -0,0 +1,79 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package socket + +import ( + "net" + "os" + "syscall" +) + +func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { + for i := range ms { + ms[i].raceWrite() + } + hs := make(mmsghdrs, len(ms)) + var parseFn func([]byte, string) (net.Addr, error) + if c.network != "tcp" { + parseFn = parseInetAddr + } + if err := hs.pack(ms, parseFn, nil); err != nil { + return 0, err + } + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = recvmmsg(s, hs, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Read(fn); err != nil { + return n, err + } + if operr != nil { + return n, os.NewSyscallError("recvmmsg", operr) + } + if err := hs[:n].unpack(ms[:n], parseFn, c.network); err != nil { + return n, err + } + return n, nil +} + +func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { + for i := range ms { + ms[i].raceRead() + } + hs := make(mmsghdrs, len(ms)) + var marshalFn func(net.Addr) []byte + if c.network != "tcp" { + marshalFn = marshalInetAddr + } + if err := hs.pack(ms, nil, marshalFn); err != nil { + return 0, err + } + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = sendmmsg(s, hs, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Write(fn); err != nil { + return n, err + } + if operr != nil { + return n, os.NewSyscallError("sendmmsg", operr) + } + if err := hs[:n].unpack(ms[:n], nil, ""); err != nil { + return n, err + } + return n, nil +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_msg.go b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go new file mode 100644 index 00000000000..d5ae3f8e143 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go @@ -0,0 +1,78 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package socket + +import ( + "os" + "syscall" +) + +func (c *Conn) recvMsg(m *Message, flags int) error { + m.raceWrite() + var h msghdr + vs := make([]iovec, len(m.Buffers)) + var sa []byte + if c.network != "tcp" { + sa = make([]byte, sizeofSockaddrInet6) + } + h.pack(vs, m.Buffers, m.OOB, sa) + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = recvmsg(s, &h, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Read(fn); err != nil { + return err + } + if operr != nil { + return os.NewSyscallError("recvmsg", operr) + } + if c.network != "tcp" { + var err error + m.Addr, err = parseInetAddr(sa[:], c.network) + if err != nil { + return err + } + } + m.N = n + m.NN = h.controllen() + m.Flags = h.flags() + return nil +} + +func (c *Conn) sendMsg(m *Message, flags int) error { + m.raceRead() + var h msghdr + vs := make([]iovec, len(m.Buffers)) + var sa []byte + if m.Addr != nil { + sa = marshalInetAddr(m.Addr) + } + h.pack(vs, m.Buffers, m.OOB, sa) + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = sendmsg(s, &h, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Write(fn); err != nil { + return err + } + if operr != nil { + return os.NewSyscallError("sendmsg", operr) + } + m.N = n + m.NN = len(m.OOB) + return nil +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go new file mode 100644 index 00000000000..fe5bb942ba6 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package socket + +func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { + return 0, errNotImplemented +} + +func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { + return 0, errNotImplemented +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go new file mode 100644 index 00000000000..b8cea6fe534 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package socket + +func (c *Conn) recvMsg(m *Message, flags int) error { + return errNotImplemented +} + +func (c *Conn) sendMsg(m *Message, flags int) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/internal/socket/socket.go b/vendor/golang.org/x/net/internal/socket/socket.go new file mode 100644 index 00000000000..23571b8d4dc --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/socket.go @@ -0,0 +1,288 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package socket provides a portable interface for socket system +// calls. +package socket // import "golang.org/x/net/internal/socket" + +import ( + "errors" + "net" + "runtime" + "unsafe" +) + +var errNotImplemented = errors.New("not implemented on " + runtime.GOOS + "/" + runtime.GOARCH) + +// An Option represents a sticky socket option. +type Option struct { + Level int // level + Name int // name; must be equal or greater than 1 + Len int // length of value in bytes; must be equal or greater than 1 +} + +// Get reads a value for the option from the kernel. +// It returns the number of bytes written into b. +func (o *Option) Get(c *Conn, b []byte) (int, error) { + if o.Name < 1 || o.Len < 1 { + return 0, errors.New("invalid option") + } + if len(b) < o.Len { + return 0, errors.New("short buffer") + } + return o.get(c, b) +} + +// GetInt returns an integer value for the option. +// +// The Len field of Option must be either 1 or 4. +func (o *Option) GetInt(c *Conn) (int, error) { + if o.Len != 1 && o.Len != 4 { + return 0, errors.New("invalid option") + } + var b []byte + var bb [4]byte + if o.Len == 1 { + b = bb[:1] + } else { + b = bb[:4] + } + n, err := o.get(c, b) + if err != nil { + return 0, err + } + if n != o.Len { + return 0, errors.New("invalid option length") + } + if o.Len == 1 { + return int(b[0]), nil + } + return int(NativeEndian.Uint32(b[:4])), nil +} + +// Set writes the option and value to the kernel. +func (o *Option) Set(c *Conn, b []byte) error { + if o.Name < 1 || o.Len < 1 { + return errors.New("invalid option") + } + if len(b) < o.Len { + return errors.New("short buffer") + } + return o.set(c, b) +} + +// SetInt writes the option and value to the kernel. +// +// The Len field of Option must be either 1 or 4. +func (o *Option) SetInt(c *Conn, v int) error { + if o.Len != 1 && o.Len != 4 { + return errors.New("invalid option") + } + var b []byte + if o.Len == 1 { + b = []byte{byte(v)} + } else { + var bb [4]byte + NativeEndian.PutUint32(bb[:o.Len], uint32(v)) + b = bb[:4] + } + return o.set(c, b) +} + +func controlHeaderLen() int { + return roundup(sizeofCmsghdr) +} + +func controlMessageLen(dataLen int) int { + return roundup(sizeofCmsghdr) + dataLen +} + +// ControlMessageSpace returns the whole length of control message. +func ControlMessageSpace(dataLen int) int { + return roundup(sizeofCmsghdr) + roundup(dataLen) +} + +// A ControlMessage represents the head message in a stream of control +// messages. +// +// A control message comprises of a header, data and a few padding +// fields to conform to the interface to the kernel. +// +// See RFC 3542 for further information. +type ControlMessage []byte + +// Data returns the data field of the control message at the head on +// m. +func (m ControlMessage) Data(dataLen int) []byte { + l := controlHeaderLen() + if len(m) < l || len(m) < l+dataLen { + return nil + } + return m[l : l+dataLen] +} + +// Next returns the control message at the next on m. +// +// Next works only for standard control messages. +func (m ControlMessage) Next(dataLen int) ControlMessage { + l := ControlMessageSpace(dataLen) + if len(m) < l { + return nil + } + return m[l:] +} + +// MarshalHeader marshals the header fields of the control message at +// the head on m. +func (m ControlMessage) MarshalHeader(lvl, typ, dataLen int) error { + if len(m) < controlHeaderLen() { + return errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + h.set(controlMessageLen(dataLen), lvl, typ) + return nil +} + +// ParseHeader parses and returns the header fields of the control +// message at the head on m. +func (m ControlMessage) ParseHeader() (lvl, typ, dataLen int, err error) { + l := controlHeaderLen() + if len(m) < l { + return 0, 0, 0, errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + return h.lvl(), h.typ(), int(uint64(h.len()) - uint64(l)), nil +} + +// Marshal marshals the control message at the head on m, and returns +// the next control message. +func (m ControlMessage) Marshal(lvl, typ int, data []byte) (ControlMessage, error) { + l := len(data) + if len(m) < ControlMessageSpace(l) { + return nil, errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + h.set(controlMessageLen(l), lvl, typ) + if l > 0 { + copy(m.Data(l), data) + } + return m.Next(l), nil +} + +// Parse parses m as a single or multiple control messages. +// +// Parse works for both standard and compatible messages. +func (m ControlMessage) Parse() ([]ControlMessage, error) { + var ms []ControlMessage + for len(m) >= controlHeaderLen() { + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + l := h.len() + if l <= 0 { + return nil, errors.New("invalid header length") + } + if uint64(l) < uint64(controlHeaderLen()) { + return nil, errors.New("invalid message length") + } + if uint64(l) > uint64(len(m)) { + return nil, errors.New("short buffer") + } + // On message reception: + // + // |<- ControlMessageSpace --------------->| + // |<- controlMessageLen ---------->| | + // |<- controlHeaderLen ->| | | + // +---------------+------+---------+------+ + // | Header | PadH | Data | PadD | + // +---------------+------+---------+------+ + // + // On compatible message reception: + // + // | ... |<- controlMessageLen ----------->| + // | ... |<- controlHeaderLen ->| | + // +-----+---------------+------+----------+ + // | ... | Header | PadH | Data | + // +-----+---------------+------+----------+ + ms = append(ms, ControlMessage(m[:l])) + ll := l - controlHeaderLen() + if len(m) >= ControlMessageSpace(ll) { + m = m[ControlMessageSpace(ll):] + } else { + m = m[controlMessageLen(ll):] + } + } + return ms, nil +} + +// NewControlMessage returns a new stream of control messages. +func NewControlMessage(dataLen []int) ControlMessage { + var l int + for i := range dataLen { + l += ControlMessageSpace(dataLen[i]) + } + return make([]byte, l) +} + +// A Message represents an IO message. +type Message struct { + // When writing, the Buffers field must contain at least one + // byte to write. + // When reading, the Buffers field will always contain a byte + // to read. + Buffers [][]byte + + // OOB contains protocol-specific control or miscellaneous + // ancillary data known as out-of-band data. + OOB []byte + + // Addr specifies a destination address when writing. + // It can be nil when the underlying protocol of the raw + // connection uses connection-oriented communication. + // After a successful read, it may contain the source address + // on the received packet. + Addr net.Addr + + N int // # of bytes read or written from/to Buffers + NN int // # of bytes read or written from/to OOB + Flags int // protocol-specific information on the received message +} + +// RecvMsg wraps recvmsg system call. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +func (c *Conn) RecvMsg(m *Message, flags int) error { + return c.recvMsg(m, flags) +} + +// SendMsg wraps sendmsg system call. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +func (c *Conn) SendMsg(m *Message, flags int) error { + return c.sendMsg(m, flags) +} + +// RecvMsgs wraps recvmmsg system call. +// +// It returns the number of processed messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// Only Linux supports this. +func (c *Conn) RecvMsgs(ms []Message, flags int) (int, error) { + return c.recvMsgs(ms, flags) +} + +// SendMsgs wraps sendmmsg system call. +// +// It returns the number of processed messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// Only Linux supports this. +func (c *Conn) SendMsgs(ms []Message, flags int) (int, error) { + return c.sendMsgs(ms, flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys.go b/vendor/golang.org/x/net/internal/socket/sys.go new file mode 100644 index 00000000000..ee492ba86b1 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "encoding/binary" + "unsafe" +) + +var ( + // NativeEndian is the machine native endian implementation of + // ByteOrder. + NativeEndian binary.ByteOrder + + kernelAlign int +) + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + NativeEndian = binary.LittleEndian + } else { + NativeEndian = binary.BigEndian + } + kernelAlign = probeProtocolStack() +} + +func roundup(l int) int { + return (l + kernelAlign - 1) &^ (kernelAlign - 1) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_bsd.go b/vendor/golang.org/x/net/internal/socket/sys_bsd.go new file mode 100644 index 00000000000..d432835b419 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_bsd.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd openbsd + +package socket + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errNotImplemented +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errNotImplemented +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go b/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go new file mode 100644 index 00000000000..b4f41b5522f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix freebsd netbsd openbsd + +package socket + +import ( + "runtime" + "unsafe" +) + +func probeProtocolStack() int { + if (runtime.GOOS == "netbsd" || runtime.GOOS == "openbsd") && runtime.GOARCH == "arm" { + return 8 + } + if runtime.GOOS == "aix" { + return 1 + } + var p uintptr + return int(unsafe.Sizeof(p)) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_const_unix.go b/vendor/golang.org/x/net/internal/socket/sys_const_unix.go new file mode 100644 index 00000000000..43797d6e535 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_const_unix.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket + +import "golang.org/x/sys/unix" + +const ( + sysAF_UNSPEC = unix.AF_UNSPEC + sysAF_INET = unix.AF_INET + sysAF_INET6 = unix.AF_INET6 + + sysSOCK_RAW = unix.SOCK_RAW +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_darwin.go b/vendor/golang.org/x/net/internal/socket/sys_darwin.go new file mode 100644 index 00000000000..b17d223bff2 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_darwin.go @@ -0,0 +1,7 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func probeProtocolStack() int { return 4 } diff --git a/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go b/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go new file mode 100644 index 00000000000..ed0448fe985 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go @@ -0,0 +1,32 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "sync" + "syscall" + "unsafe" +) + +// See version list in https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/sys/sys/param.h +var ( + osreldateOnce sync.Once + osreldate uint32 +) + +// First __DragonFly_version after September 2019 ABI changes +// http://lists.dragonflybsd.org/pipermail/users/2019-September/358280.html +const _dragonflyABIChangeVersion = 500705 + +func probeProtocolStack() int { + osreldateOnce.Do(func() { osreldate, _ = syscall.SysctlUint32("kern.osreldate") }) + var p uintptr + if int(unsafe.Sizeof(p)) == 8 && osreldate >= _dragonflyABIChangeVersion { + return int(unsafe.Sizeof(p)) + } + // 64-bit Dragonfly before the September 2019 ABI changes still requires + // 32-bit aligned access to network subsystem. + return 4 +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_go1_11_darwin.go b/vendor/golang.org/x/net/internal/socket/sys_go1_11_darwin.go new file mode 100644 index 00000000000..02d2b3cc835 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_go1_11_darwin.go @@ -0,0 +1,33 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.12 + +package socket + +import ( + "syscall" + "unsafe" +) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_RECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_SENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linkname.go b/vendor/golang.org/x/net/internal/socket/sys_linkname.go new file mode 100644 index 00000000000..61c3f38a51b --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linkname.go @@ -0,0 +1,42 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix go1.12,darwin + +package socket + +import ( + "syscall" + "unsafe" +) + +//go:linkname syscall_getsockopt syscall.getsockopt +func syscall_getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *uint32) error + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + err := syscall_getsockopt(int(s), level, name, unsafe.Pointer(&b[0]), &l) + return int(l), err +} + +//go:linkname syscall_setsockopt syscall.setsockopt +func syscall_setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) error + +func setsockopt(s uintptr, level, name int, b []byte) error { + return syscall_setsockopt(int(s), level, name, unsafe.Pointer(&b[0]), uintptr(len(b))) +} + +//go:linkname syscall_recvmsg syscall.recvmsg +func syscall_recvmsg(s int, msg *syscall.Msghdr, flags int) (n int, err error) + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + return syscall_recvmsg(int(s), (*syscall.Msghdr)(unsafe.Pointer(h)), flags) +} + +//go:linkname syscall_sendmsg syscall.sendmsg +func syscall_sendmsg(s int, msg *syscall.Msghdr, flags int) (n int, err error) + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + return syscall_sendmsg(int(s), (*syscall.Msghdr)(unsafe.Pointer(h)), flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux.go b/vendor/golang.org/x/net/internal/socket/sys_linux.go new file mode 100644 index 00000000000..1559521e038 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,!s390x,!386 + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_386.go b/vendor/golang.org/x/net/internal/socket/sys_linux_386.go new file mode 100644 index 00000000000..235b2cc08a6 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_386.go @@ -0,0 +1,55 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { return 4 } + +const ( + sysSETSOCKOPT = 0xe + sysGETSOCKOPT = 0xf + sysSENDMSG = 0x10 + sysRECVMSG = 0x11 + sysRECVMMSG = 0x13 + sysSENDMMSG = 0x14 +) + +func socketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) +func rawsocketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_386.s b/vendor/golang.org/x/net/internal/socket/sys_linux_386.s new file mode 100644 index 00000000000..93e7d75ec03 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_386.s @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·socketcall(SB),NOSPLIT,$0-36 + JMP syscall·socketcall(SB) + +TEXT ·rawsocketcall(SB),NOSPLIT,$0-36 + JMP syscall·rawsocketcall(SB) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go new file mode 100644 index 00000000000..9decee2e59a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x12b + sysSENDMMSG = 0x133 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go b/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go new file mode 100644 index 00000000000..d753b436dff --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x16d + sysSENDMMSG = 0x176 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go new file mode 100644 index 00000000000..b670894366d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0xf3 + sysSENDMMSG = 0x10d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go new file mode 100644 index 00000000000..9c0d74014f3 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x10ef + sysSENDMMSG = 0x10f7 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go new file mode 100644 index 00000000000..071a4aba8b2 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x14ae + sysSENDMMSG = 0x14b6 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go new file mode 100644 index 00000000000..071a4aba8b2 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x14ae + sysSENDMMSG = 0x14b6 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go new file mode 100644 index 00000000000..9c0d74014f3 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x10ef + sysSENDMMSG = 0x10f7 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go new file mode 100644 index 00000000000..21c1e3f004a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x157 + sysSENDMMSG = 0x15d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go new file mode 100644 index 00000000000..21c1e3f004a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x157 + sysSENDMMSG = 0x15d +) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go b/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go similarity index 54% rename from vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go rename to vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go index 0e3f25aca1a..64f69f1dc55 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go @@ -2,10 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin,arm,!go1.12 +// +build riscv64 -package unix +package socket -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - return 0, ENOSYS -} +const ( + sysRECVMMSG = 0xf3 + sysSENDMMSG = 0x10d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go new file mode 100644 index 00000000000..327979efbb4 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go @@ -0,0 +1,55 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { return 8 } + +const ( + sysSETSOCKOPT = 0xe + sysGETSOCKOPT = 0xf + sysSENDMSG = 0x10 + sysRECVMSG = 0x11 + sysRECVMMSG = 0x13 + sysSENDMMSG = 0x14 +) + +func socketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) +func rawsocketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s new file mode 100644 index 00000000000..06d75628c9b --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·socketcall(SB),NOSPLIT,$0-72 + JMP syscall·socketcall(SB) + +TEXT ·rawsocketcall(SB),NOSPLIT,$0-72 + JMP syscall·rawsocketcall(SB) diff --git a/vendor/golang.org/x/net/internal/socket/sys_netbsd.go b/vendor/golang.org/x/net/internal/socket/sys_netbsd.go new file mode 100644 index 00000000000..431851c12e5 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_netbsd.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +const ( + sysRECVMMSG = 0x1db + sysSENDMMSG = 0x1dc +) + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_posix.go b/vendor/golang.org/x/net/internal/socket/sys_posix.go new file mode 100644 index 00000000000..22eae809c9e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_posix.go @@ -0,0 +1,183 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package socket + +import ( + "encoding/binary" + "errors" + "net" + "runtime" + "strconv" + "sync" + "time" +) + +func marshalInetAddr(a net.Addr) []byte { + switch a := a.(type) { + case *net.TCPAddr: + return marshalSockaddr(a.IP, a.Port, a.Zone) + case *net.UDPAddr: + return marshalSockaddr(a.IP, a.Port, a.Zone) + case *net.IPAddr: + return marshalSockaddr(a.IP, 0, a.Zone) + default: + return nil + } +} + +func marshalSockaddr(ip net.IP, port int, zone string) []byte { + if ip4 := ip.To4(); ip4 != nil { + b := make([]byte, sizeofSockaddrInet) + switch runtime.GOOS { + case "android", "illumos", "linux", "solaris", "windows": + NativeEndian.PutUint16(b[:2], uint16(sysAF_INET)) + default: + b[0] = sizeofSockaddrInet + b[1] = sysAF_INET + } + binary.BigEndian.PutUint16(b[2:4], uint16(port)) + copy(b[4:8], ip4) + return b + } + if ip6 := ip.To16(); ip6 != nil && ip.To4() == nil { + b := make([]byte, sizeofSockaddrInet6) + switch runtime.GOOS { + case "android", "illumos", "linux", "solaris", "windows": + NativeEndian.PutUint16(b[:2], uint16(sysAF_INET6)) + default: + b[0] = sizeofSockaddrInet6 + b[1] = sysAF_INET6 + } + binary.BigEndian.PutUint16(b[2:4], uint16(port)) + copy(b[8:24], ip6) + if zone != "" { + NativeEndian.PutUint32(b[24:28], uint32(zoneCache.index(zone))) + } + return b + } + return nil +} + +func parseInetAddr(b []byte, network string) (net.Addr, error) { + if len(b) < 2 { + return nil, errors.New("invalid address") + } + var af int + switch runtime.GOOS { + case "android", "illumos", "linux", "solaris", "windows": + af = int(NativeEndian.Uint16(b[:2])) + default: + af = int(b[1]) + } + var ip net.IP + var zone string + if af == sysAF_INET { + if len(b) < sizeofSockaddrInet { + return nil, errors.New("short address") + } + ip = make(net.IP, net.IPv4len) + copy(ip, b[4:8]) + } + if af == sysAF_INET6 { + if len(b) < sizeofSockaddrInet6 { + return nil, errors.New("short address") + } + ip = make(net.IP, net.IPv6len) + copy(ip, b[8:24]) + if id := int(NativeEndian.Uint32(b[24:28])); id > 0 { + zone = zoneCache.name(id) + } + } + switch network { + case "tcp", "tcp4", "tcp6": + return &net.TCPAddr{IP: ip, Port: int(binary.BigEndian.Uint16(b[2:4])), Zone: zone}, nil + case "udp", "udp4", "udp6": + return &net.UDPAddr{IP: ip, Port: int(binary.BigEndian.Uint16(b[2:4])), Zone: zone}, nil + default: + return &net.IPAddr{IP: ip, Zone: zone}, nil + } +} + +// An ipv6ZoneCache represents a cache holding partial network +// interface information. It is used for reducing the cost of IPv6 +// addressing scope zone resolution. +// +// Multiple names sharing the index are managed by first-come +// first-served basis for consistency. +type ipv6ZoneCache struct { + sync.RWMutex // guard the following + lastFetched time.Time // last time routing information was fetched + toIndex map[string]int // interface name to its index + toName map[int]string // interface index to its name +} + +var zoneCache = ipv6ZoneCache{ + toIndex: make(map[string]int), + toName: make(map[int]string), +} + +// update refreshes the network interface information if the cache was last +// updated more than 1 minute ago, or if force is set. It returns whether the +// cache was updated. +func (zc *ipv6ZoneCache) update(ift []net.Interface, force bool) (updated bool) { + zc.Lock() + defer zc.Unlock() + now := time.Now() + if !force && zc.lastFetched.After(now.Add(-60*time.Second)) { + return false + } + zc.lastFetched = now + if len(ift) == 0 { + var err error + if ift, err = net.Interfaces(); err != nil { + return false + } + } + zc.toIndex = make(map[string]int, len(ift)) + zc.toName = make(map[int]string, len(ift)) + for _, ifi := range ift { + zc.toIndex[ifi.Name] = ifi.Index + if _, ok := zc.toName[ifi.Index]; !ok { + zc.toName[ifi.Index] = ifi.Name + } + } + return true +} + +func (zc *ipv6ZoneCache) name(zone int) string { + updated := zoneCache.update(nil, false) + zoneCache.RLock() + name, ok := zoneCache.toName[zone] + zoneCache.RUnlock() + if !ok && !updated { + zoneCache.update(nil, true) + zoneCache.RLock() + name, ok = zoneCache.toName[zone] + zoneCache.RUnlock() + } + if !ok { // last resort + name = strconv.Itoa(zone) + } + return name +} + +func (zc *ipv6ZoneCache) index(zone string) int { + updated := zoneCache.update(nil, false) + zoneCache.RLock() + index, ok := zoneCache.toIndex[zone] + zoneCache.RUnlock() + if !ok && !updated { + zoneCache.update(nil, true) + zoneCache.RLock() + index, ok = zoneCache.toIndex[zone] + zoneCache.RUnlock() + } + if !ok { // last resort + index, _ = strconv.Atoi(zone) + } + return index +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_solaris.go b/vendor/golang.org/x/net/internal/socket/sys_solaris.go new file mode 100644 index 00000000000..66b5547868c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_solaris.go @@ -0,0 +1,70 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "runtime" + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + switch runtime.GOARCH { + case "amd64": + return 4 + default: + var p uintptr + return int(unsafe.Sizeof(p)) + } +} + +//go:cgo_import_dynamic libc___xnet_getsockopt __xnet_getsockopt "libsocket.so" +//go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" +//go:cgo_import_dynamic libc___xnet_recvmsg __xnet_recvmsg "libsocket.so" +//go:cgo_import_dynamic libc___xnet_sendmsg __xnet_sendmsg "libsocket.so" + +//go:linkname procGetsockopt libc___xnet_getsockopt +//go:linkname procSetsockopt libc_setsockopt +//go:linkname procRecvmsg libc___xnet_recvmsg +//go:linkname procSendmsg libc___xnet_sendmsg + +var ( + procGetsockopt uintptr + procSetsockopt uintptr + procRecvmsg uintptr + procSendmsg uintptr +) + +func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) +func rawSysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procGetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procSetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procRecvmsg)), 3, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procSendmsg)), 3, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errNotImplemented +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errNotImplemented +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s b/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s new file mode 100644 index 00000000000..a18ac5ed755 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·sysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·sysvicall6(SB) + +TEXT ·rawSysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSysvicall6(SB) diff --git a/vendor/golang.org/x/net/internal/socket/sys_stub.go b/vendor/golang.org/x/net/internal/socket/sys_stub.go new file mode 100644 index 00000000000..0f617426281 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_stub.go @@ -0,0 +1,63 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package socket + +import ( + "net" + "runtime" + "unsafe" +) + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +func probeProtocolStack() int { + switch runtime.GOARCH { + case "amd64p32", "mips64p32": + return 4 + default: + var p uintptr + return int(unsafe.Sizeof(p)) + } +} + +func marshalInetAddr(ip net.IP, port int, zone string) []byte { + return nil +} + +func parseInetAddr(b []byte, network string) (net.Addr, error) { + return nil, errNotImplemented +} + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + return 0, errNotImplemented +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + return errNotImplemented +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errNotImplemented +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errNotImplemented +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errNotImplemented +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errNotImplemented +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_unix.go b/vendor/golang.org/x/net/internal/socket/sys_unix.go new file mode 100644 index 00000000000..0eb71283f52 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_unix.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly freebsd linux,!s390x,!386 netbsd openbsd + +package socket + +import ( + "syscall" + "unsafe" +) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_RECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_SENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_windows.go b/vendor/golang.org/x/net/internal/socket/sys_windows.go new file mode 100644 index 00000000000..d556a446157 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_windows.go @@ -0,0 +1,71 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} + +const ( + sysAF_UNSPEC = windows.AF_UNSPEC + sysAF_INET = windows.AF_INET + sysAF_INET6 = windows.AF_INET6 + + sysSOCK_RAW = windows.SOCK_RAW +) + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + err := syscall.Getsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(unsafe.Pointer(&b[0])), (*int32)(unsafe.Pointer(&l))) + return int(l), err +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + return syscall.Setsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(unsafe.Pointer(&b[0])), int32(len(b))) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errNotImplemented +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errNotImplemented +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errNotImplemented +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errNotImplemented +} diff --git a/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go b/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go new file mode 100644 index 00000000000..e740c8f024c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go @@ -0,0 +1,60 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_aix.go + +// Added for go1.11 compatibility +// +build aix + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go new file mode 100644 index 00000000000..083bda51c3c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go @@ -0,0 +1,51 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_darwin.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go new file mode 100644 index 00000000000..55c6c9f5770 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go @@ -0,0 +1,53 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_darwin.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go new file mode 100644 index 00000000000..083bda51c3c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go @@ -0,0 +1,51 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_darwin.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go new file mode 100644 index 00000000000..55c6c9f5770 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go @@ -0,0 +1,53 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_darwin.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go new file mode 100644 index 00000000000..8b7d161d7d5 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go @@ -0,0 +1,53 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_dragonfly.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go new file mode 100644 index 00000000000..3e71ff57438 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go @@ -0,0 +1,51 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go new file mode 100644 index 00000000000..238d90de626 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go @@ -0,0 +1,53 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go new file mode 100644 index 00000000000..3e71ff57438 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go @@ -0,0 +1,51 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm64.go new file mode 100644 index 00000000000..238d90de626 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm64.go @@ -0,0 +1,53 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go new file mode 100644 index 00000000000..d33025b70db --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go @@ -0,0 +1,54 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go new file mode 100644 index 00000000000..b20d2167749 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go @@ -0,0 +1,57 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go new file mode 100644 index 00000000000..1bb10a4289f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go @@ -0,0 +1,55 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go new file mode 100644 index 00000000000..7f6e8a7fa4d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go @@ -0,0 +1,58 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go new file mode 100644 index 00000000000..1bb10a4289f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go @@ -0,0 +1,55 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go new file mode 100644 index 00000000000..7f6e8a7fa4d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go @@ -0,0 +1,58 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go new file mode 100644 index 00000000000..7f6e8a7fa4d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go @@ -0,0 +1,58 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go new file mode 100644 index 00000000000..1bb10a4289f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go @@ -0,0 +1,55 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go new file mode 100644 index 00000000000..7f6e8a7fa4d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go @@ -0,0 +1,58 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go new file mode 100644 index 00000000000..7f6e8a7fa4d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go @@ -0,0 +1,58 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go new file mode 100644 index 00000000000..f12a1d76824 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go @@ -0,0 +1,59 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +// +build riscv64 + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_0 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go new file mode 100644 index 00000000000..7f6e8a7fa4d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go @@ -0,0 +1,58 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go new file mode 100644 index 00000000000..7e258cec29d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go @@ -0,0 +1,57 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_netbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go new file mode 100644 index 00000000000..b3f9c0d7e5a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go @@ -0,0 +1,60 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_netbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go new file mode 100644 index 00000000000..7e258cec29d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go @@ -0,0 +1,57 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_netbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm64.go new file mode 100644 index 00000000000..da26ef019cd --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm64.go @@ -0,0 +1,59 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_netbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go new file mode 100644 index 00000000000..73655a14c4c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go @@ -0,0 +1,51 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_openbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go new file mode 100644 index 00000000000..0a4de80f235 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go @@ -0,0 +1,53 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_openbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go new file mode 100644 index 00000000000..73655a14c4c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go @@ -0,0 +1,51 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_openbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm64.go new file mode 100644 index 00000000000..0a4de80f235 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm64.go @@ -0,0 +1,53 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_openbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go new file mode 100644 index 00000000000..353cd5fb4ec --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go @@ -0,0 +1,52 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_solaris.go + +package socket + +type iovec struct { + Base *int8 + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Accrights *int8 + Accrightslen int32 + Pad_cgo_2 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + X__sin6_src_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x20 +) diff --git a/vendor/golang.org/x/net/ipv4/batch.go b/vendor/golang.org/x/net/ipv4/batch.go new file mode 100644 index 00000000000..1a3a4fc0c10 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/batch.go @@ -0,0 +1,194 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "runtime" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// PacketConn are not implemented. + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// RawConn are not implemented. + +// A Message represents an IO message. +// +// type Message struct { +// Buffers [][]byte +// OOB []byte +// Addr net.Addr +// N int +// NN int +// Flags int +// } +// +// The Buffers fields represents a list of contiguous buffers, which +// can be used for vectored IO, for example, putting a header and a +// payload in each slice. +// When writing, the Buffers field must contain at least one byte to +// write. +// When reading, the Buffers field will always contain a byte to read. +// +// The OOB field contains protocol-specific control or miscellaneous +// ancillary data known as out-of-band data. +// It can be nil when not required. +// +// The Addr field specifies a destination address when writing. +// It can be nil when the underlying protocol of the endpoint uses +// connection-oriented communication. +// After a successful read, it may contain the source address on the +// received packet. +// +// The N field indicates the number of bytes read or written from/to +// Buffers. +// +// The NN field indicates the number of bytes read or written from/to +// OOB. +// +// The Flags field contains protocol-specific information on the +// received message. +type Message = socket.Message + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +// +// Unlike the ReadFrom method, it doesn't strip the IPv4 header +// followed by option headers from the received IPv4 datagram when the +// underlying transport is net.IPConn. Each Buffers field of Message +// must be large enough to accommodate an IPv4 header and option +// headers. +func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + if compatFreeBSD32 && ms[0].NN > 0 { + adjustFreeBSD32(&ms[0]) + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +func (c *packetHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if compatFreeBSD32 && ms[0].NN > 0 { + adjustFreeBSD32(&ms[0]) + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *packetHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + } +} diff --git a/vendor/golang.org/x/net/ipv4/control.go b/vendor/golang.org/x/net/ipv4/control.go new file mode 100644 index 00000000000..a2b02ca95b9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control.go @@ -0,0 +1,144 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "fmt" + "net" + "sync" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +type rawOpt struct { + sync.RWMutex + cflags ControlFlags +} + +func (c *rawOpt) set(f ControlFlags) { c.cflags |= f } +func (c *rawOpt) clear(f ControlFlags) { c.cflags &^= f } +func (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 } + +type ControlFlags uint + +const ( + FlagTTL ControlFlags = 1 << iota // pass the TTL on the received packet + FlagSrc // pass the source address on the received packet + FlagDst // pass the destination address on the received packet + FlagInterface // pass the interface index on the received packet +) + +// A ControlMessage represents per packet basis IP-level socket options. +type ControlMessage struct { + // Receiving socket options: SetControlMessage allows to + // receive the options from the protocol stack using ReadFrom + // method of PacketConn or RawConn. + // + // Specifying socket options: ControlMessage for WriteTo + // method of PacketConn or RawConn allows to send the options + // to the protocol stack. + // + TTL int // time-to-live, receiving only + Src net.IP // source address, specifying only + Dst net.IP // destination address, receiving only + IfIndex int // interface index, must be 1 <= value when specifying +} + +func (cm *ControlMessage) String() string { + if cm == nil { + return "" + } + return fmt.Sprintf("ttl=%d src=%v dst=%v ifindex=%d", cm.TTL, cm.Src, cm.Dst, cm.IfIndex) +} + +// Marshal returns the binary encoding of cm. +func (cm *ControlMessage) Marshal() []byte { + if cm == nil { + return nil + } + var m socket.ControlMessage + if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To4() != nil || cm.IfIndex > 0) { + m = socket.NewControlMessage([]int{ctlOpts[ctlPacketInfo].length}) + } + if len(m) > 0 { + ctlOpts[ctlPacketInfo].marshal(m, cm) + } + return m +} + +// Parse parses b as a control message and stores the result in cm. +func (cm *ControlMessage) Parse(b []byte) error { + ms, err := socket.ControlMessage(b).Parse() + if err != nil { + return err + } + for _, m := range ms { + lvl, typ, l, err := m.ParseHeader() + if err != nil { + return err + } + if lvl != iana.ProtocolIP { + continue + } + switch { + case typ == ctlOpts[ctlTTL].name && l >= ctlOpts[ctlTTL].length: + ctlOpts[ctlTTL].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlDst].name && l >= ctlOpts[ctlDst].length: + ctlOpts[ctlDst].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlInterface].name && l >= ctlOpts[ctlInterface].length: + ctlOpts[ctlInterface].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPacketInfo].name && l >= ctlOpts[ctlPacketInfo].length: + ctlOpts[ctlPacketInfo].parse(cm, m.Data(l)) + } + } + return nil +} + +// NewControlMessage returns a new control message. +// +// The returned message is large enough for options specified by cf. +func NewControlMessage(cf ControlFlags) []byte { + opt := rawOpt{cflags: cf} + var l int + if opt.isset(FlagTTL) && ctlOpts[ctlTTL].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlTTL].length) + } + if ctlOpts[ctlPacketInfo].name > 0 { + if opt.isset(FlagSrc | FlagDst | FlagInterface) { + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + } else { + if opt.isset(FlagDst) && ctlOpts[ctlDst].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlDst].length) + } + if opt.isset(FlagInterface) && ctlOpts[ctlInterface].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlInterface].length) + } + } + var b []byte + if l > 0 { + b = make([]byte, l) + } + return b +} + +// Ancillary data socket options +const ( + ctlTTL = iota // header field + ctlSrc // header field + ctlDst // header field + ctlInterface // inbound or outbound interface + ctlPacketInfo // inbound or outbound packet path + ctlMax +) + +// A ctlOpt represents a binding for ancillary data socket option. +type ctlOpt struct { + name int // option name, must be equal or greater than 1 + length int // option length + marshal func([]byte, *ControlMessage) []byte + parse func(*ControlMessage, []byte) +} diff --git a/vendor/golang.org/x/net/ipv4/control_bsd.go b/vendor/golang.org/x/net/ipv4/control_bsd.go new file mode 100644 index 00000000000..69c4f553cda --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_bsd.go @@ -0,0 +1,41 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd netbsd openbsd + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshalDst(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVDSTADDR, net.IPv4len) + return m.Next(net.IPv4len) +} + +func parseDst(cm *ControlMessage, b []byte) { + if len(cm.Dst) < net.IPv4len { + cm.Dst = make(net.IP, net.IPv4len) + } + copy(cm.Dst, b[:net.IPv4len]) +} + +func marshalInterface(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVIF, syscall.SizeofSockaddrDatalink) + return m.Next(syscall.SizeofSockaddrDatalink) +} + +func parseInterface(cm *ControlMessage, b []byte) { + var sadl syscall.SockaddrDatalink + copy((*[unsafe.Sizeof(sadl)]byte)(unsafe.Pointer(&sadl))[:], b) + cm.IfIndex = int(sadl.Index) +} diff --git a/vendor/golang.org/x/net/ipv4/control_pktinfo.go b/vendor/golang.org/x/net/ipv4/control_pktinfo.go new file mode 100644 index 00000000000..425338f35bf --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_pktinfo.go @@ -0,0 +1,39 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin linux solaris + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_PKTINFO, sizeofInetPktinfo) + if cm != nil { + pi := (*inetPktinfo)(unsafe.Pointer(&m.Data(sizeofInetPktinfo)[0])) + if ip := cm.Src.To4(); ip != nil { + copy(pi.Spec_dst[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return m.Next(sizeofInetPktinfo) +} + +func parsePacketInfo(cm *ControlMessage, b []byte) { + pi := (*inetPktinfo)(unsafe.Pointer(&b[0])) + cm.IfIndex = int(pi.Ifindex) + if len(cm.Dst) < net.IPv4len { + cm.Dst = make(net.IP, net.IPv4len) + } + copy(cm.Dst, pi.Addr[:]) +} diff --git a/vendor/golang.org/x/net/ipv4/control_stub.go b/vendor/golang.org/x/net/ipv4/control_stub.go new file mode 100644 index 00000000000..a0c049d683a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_stub.go @@ -0,0 +1,13 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv4/control_unix.go b/vendor/golang.org/x/net/ipv4/control_unix.go new file mode 100644 index 00000000000..b27fa4903a9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_unix.go @@ -0,0 +1,73 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv4 + +import ( + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + opt.Lock() + defer opt.Unlock() + if so, ok := sockOpts[ssoReceiveTTL]; ok && cf&FlagTTL != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagTTL) + } else { + opt.clear(FlagTTL) + } + } + if so, ok := sockOpts[ssoPacketInfo]; ok { + if cf&(FlagSrc|FlagDst|FlagInterface) != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(cf & (FlagSrc | FlagDst | FlagInterface)) + } else { + opt.clear(cf & (FlagSrc | FlagDst | FlagInterface)) + } + } + } else { + if so, ok := sockOpts[ssoReceiveDst]; ok && cf&FlagDst != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagDst) + } else { + opt.clear(FlagDst) + } + } + if so, ok := sockOpts[ssoReceiveInterface]; ok && cf&FlagInterface != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagInterface) + } else { + opt.clear(FlagInterface) + } + } + } + return nil +} + +func marshalTTL(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVTTL, 1) + return m.Next(1) +} + +func parseTTL(cm *ControlMessage, b []byte) { + cm.TTL = int(*(*byte)(unsafe.Pointer(&b[:1][0]))) +} diff --git a/vendor/golang.org/x/net/ipv4/control_windows.go b/vendor/golang.org/x/net/ipv4/control_windows.go new file mode 100644 index 00000000000..82c6306421b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_windows.go @@ -0,0 +1,12 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + // TODO(mikio): implement this + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv4/dgramopt.go b/vendor/golang.org/x/net/ipv4/dgramopt.go new file mode 100644 index 00000000000..c191c22aba4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/dgramopt.go @@ -0,0 +1,264 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + + "golang.org/x/net/bpf" +) + +// MulticastTTL returns the time-to-live field value for outgoing +// multicast packets. +func (c *dgramOpt) MulticastTTL() (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + so, ok := sockOpts[ssoMulticastTTL] + if !ok { + return 0, errNotImplemented + } + return so.GetInt(c.Conn) +} + +// SetMulticastTTL sets the time-to-live field value for future +// outgoing multicast packets. +func (c *dgramOpt) SetMulticastTTL(ttl int) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoMulticastTTL] + if !ok { + return errNotImplemented + } + return so.SetInt(c.Conn, ttl) +} + +// MulticastInterface returns the default interface for multicast +// packet transmissions. +func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { + if !c.ok() { + return nil, errInvalidConn + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return nil, errNotImplemented + } + return so.getMulticastInterface(c.Conn) +} + +// SetMulticastInterface sets the default interface for future +// multicast packet transmissions. +func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return errNotImplemented + } + return so.setMulticastInterface(c.Conn, ifi) +} + +// MulticastLoopback reports whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) MulticastLoopback() (bool, error) { + if !c.ok() { + return false, errInvalidConn + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return false, errNotImplemented + } + on, err := so.GetInt(c.Conn) + if err != nil { + return false, err + } + return on == 1, nil +} + +// SetMulticastLoopback sets whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) SetMulticastLoopback(on bool) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return errNotImplemented + } + return so.SetInt(c.Conn, boolint(on)) +} + +// JoinGroup joins the group address group on the interface ifi. +// By default all sources that can cast data to group are accepted. +// It's possible to mute and unmute data transmission from a specific +// source by using ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup. +// JoinGroup uses the system assigned multicast interface when ifi is +// nil, although this is not recommended because the assignment +// depends on platforms and sometimes it might require routing +// configuration. +func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoJoinGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// LeaveGroup leaves the group address group on the interface ifi +// regardless of whether the group is any-source group or +// source-specific group. +func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoLeaveGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// JoinSourceSpecificGroup joins the source-specific group comprising +// group and source on the interface ifi. +// JoinSourceSpecificGroup uses the system assigned multicast +// interface when ifi is nil, although this is not recommended because +// the assignment depends on platforms and sometimes it might require +// routing configuration. +func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoJoinSourceGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// LeaveSourceSpecificGroup leaves the source-specific group on the +// interface ifi. +func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoLeaveSourceGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// ExcludeSourceSpecificGroup excludes the source-specific group from +// the already joined any-source groups by JoinGroup on the interface +// ifi. +func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoBlockSourceGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// IncludeSourceSpecificGroup includes the excluded source-specific +// group by ExcludeSourceSpecificGroup again on the interface ifi. +func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoUnblockSourceGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// ICMPFilter returns an ICMP filter. +// Currently only Linux supports this. +func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { + if !c.ok() { + return nil, errInvalidConn + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return nil, errNotImplemented + } + return so.getICMPFilter(c.Conn) +} + +// SetICMPFilter deploys the ICMP filter. +// Currently only Linux supports this. +func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return errNotImplemented + } + return so.setICMPFilter(c.Conn, f) +} + +// SetBPF attaches a BPF program to the connection. +// +// Only supported on Linux. +func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoAttachFilter] + if !ok { + return errNotImplemented + } + return so.setBPF(c.Conn, filter) +} diff --git a/vendor/golang.org/x/net/ipv4/doc.go b/vendor/golang.org/x/net/ipv4/doc.go new file mode 100644 index 00000000000..24583497993 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/doc.go @@ -0,0 +1,244 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ipv4 implements IP-level socket options for the Internet +// Protocol version 4. +// +// The package provides IP-level socket options that allow +// manipulation of IPv4 facilities. +// +// The IPv4 protocol and basic host requirements for IPv4 are defined +// in RFC 791 and RFC 1122. +// Host extensions for multicasting and socket interface extensions +// for multicast source filters are defined in RFC 1112 and RFC 3678. +// IGMPv1, IGMPv2 and IGMPv3 are defined in RFC 1112, RFC 2236 and RFC +// 3376. +// Source-specific multicast is defined in RFC 4607. +// +// +// Unicasting +// +// The options for unicasting are available for net.TCPConn, +// net.UDPConn and net.IPConn which are created as network connections +// that use the IPv4 transport. When a single TCP connection carrying +// a data flow of multiple packets needs to indicate the flow is +// important, Conn is used to set the type-of-service field on the +// IPv4 header for each packet. +// +// ln, err := net.Listen("tcp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer ln.Close() +// for { +// c, err := ln.Accept() +// if err != nil { +// // error handling +// } +// go func(c net.Conn) { +// defer c.Close() +// +// The outgoing packets will be labeled DiffServ assured forwarding +// class 1 low drop precedence, known as AF11 packets. +// +// if err := ipv4.NewConn(c).SetTOS(0x28); err != nil { +// // error handling +// } +// if _, err := c.Write(data); err != nil { +// // error handling +// } +// }(c) +// } +// +// +// Multicasting +// +// The options for multicasting are available for net.UDPConn and +// net.IPConn which are created as network connections that use the +// IPv4 transport. A few network facilities must be prepared before +// you begin multicasting, at a minimum joining network interfaces and +// multicast groups. +// +// en0, err := net.InterfaceByName("en0") +// if err != nil { +// // error handling +// } +// en1, err := net.InterfaceByIndex(911) +// if err != nil { +// // error handling +// } +// group := net.IPv4(224, 0, 0, 250) +// +// First, an application listens to an appropriate address with an +// appropriate service port. +// +// c, err := net.ListenPacket("udp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// +// Second, the application joins multicast groups, starts listening to +// the groups on the specified network interfaces. Note that the +// service port for transport layer protocol does not matter with this +// operation as joining groups affects only network and link layer +// protocols, such as IPv4 and Ethernet. +// +// p := ipv4.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// +// The application might set per packet control message transmissions +// between the protocol stack within the kernel. When the application +// needs a destination address on an incoming packet, +// SetControlMessage of PacketConn is used to enable control message +// transmissions. +// +// if err := p.SetControlMessage(ipv4.FlagDst, true); err != nil { +// // error handling +// } +// +// The application could identify whether the received packets are +// of interest by using the control message that contains the +// destination address of the received packet. +// +// b := make([]byte, 1500) +// for { +// n, cm, src, err := p.ReadFrom(b) +// if err != nil { +// // error handling +// } +// if cm.Dst.IsMulticast() { +// if cm.Dst.Equal(group) { +// // joined group, do something +// } else { +// // unknown group, discard +// continue +// } +// } +// +// The application can also send both unicast and multicast packets. +// +// p.SetTOS(0x0) +// p.SetTTL(16) +// if _, err := p.WriteTo(data, nil, src); err != nil { +// // error handling +// } +// dst := &net.UDPAddr{IP: group, Port: 1024} +// for _, ifi := range []*net.Interface{en0, en1} { +// if err := p.SetMulticastInterface(ifi); err != nil { +// // error handling +// } +// p.SetMulticastTTL(2) +// if _, err := p.WriteTo(data, nil, dst); err != nil { +// // error handling +// } +// } +// } +// +// +// More multicasting +// +// An application that uses PacketConn or RawConn may join multiple +// multicast groups. For example, a UDP listener with port 1024 might +// join two different groups across over two different network +// interfaces by using: +// +// c, err := net.ListenPacket("udp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// p := ipv4.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil { +// // error handling +// } +// +// It is possible for multiple UDP listeners that listen on the same +// UDP port to join the same multicast group. The net package will +// provide a socket that listens to a wildcard address with reusable +// UDP port when an appropriate multicast address prefix is passed to +// the net.ListenPacket or net.ListenUDP. +// +// c1, err := net.ListenPacket("udp4", "224.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c1.Close() +// c2, err := net.ListenPacket("udp4", "224.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c2.Close() +// p1 := ipv4.NewPacketConn(c1) +// if err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// p2 := ipv4.NewPacketConn(c2) +// if err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// +// Also it is possible for the application to leave or rejoin a +// multicast group on the network interface. +// +// if err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 250)}); err != nil { +// // error handling +// } +// +// +// Source-specific multicasting +// +// An application that uses PacketConn or RawConn on IGMPv3 supported +// platform is able to join source-specific multicast groups. +// The application may use JoinSourceSpecificGroup and +// LeaveSourceSpecificGroup for the operation known as "include" mode, +// +// ssmgroup := net.UDPAddr{IP: net.IPv4(232, 7, 8, 9)} +// ssmsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 1)} +// if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// if err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// +// or JoinGroup, ExcludeSourceSpecificGroup, +// IncludeSourceSpecificGroup and LeaveGroup for the operation known +// as "exclude" mode. +// +// exclsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 254)} +// if err := p.JoinGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// if err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil { +// // error handling +// } +// if err := p.LeaveGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// +// Note that it depends on each platform implementation what happens +// when an application which runs on IGMPv3 unsupported platform uses +// JoinSourceSpecificGroup and LeaveSourceSpecificGroup. +// In general the platform tries to fall back to conversations using +// IGMPv1 or IGMPv2 and starts to listen to multicast traffic. +// In the fallback case, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup may return an error. +package ipv4 // import "golang.org/x/net/ipv4" + +// BUG(mikio): This package is not implemented on JS, NaCl and Plan 9. diff --git a/vendor/golang.org/x/net/ipv4/endpoint.go b/vendor/golang.org/x/net/ipv4/endpoint.go new file mode 100644 index 00000000000..4a6d7a85ee6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/endpoint.go @@ -0,0 +1,186 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "time" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the JoinSourceSpecificGroup, +// LeaveSourceSpecificGroup, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup methods of PacketConn and RawConn are +// not implemented. + +// A Conn represents a network endpoint that uses the IPv4 transport. +// It is used to control basic IP-level socket options such as TOS and +// TTL. +type Conn struct { + genericOpt +} + +type genericOpt struct { + *socket.Conn +} + +func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } + +// NewConn returns a new Conn. +func NewConn(c net.Conn) *Conn { + cc, _ := socket.NewConn(c) + return &Conn{ + genericOpt: genericOpt{Conn: cc}, + } +} + +// A PacketConn represents a packet network endpoint that uses the +// IPv4 transport. It is used to control several IP-level socket +// options including multicasting. It also provides datagram based +// network I/O methods specific to the IPv4 and higher layer protocols +// such as UDP. +type PacketConn struct { + genericOpt + dgramOpt + payloadHandler +} + +type dgramOpt struct { + *socket.Conn +} + +func (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil } + +// SetControlMessage sets the per packet IP-level socket options. +func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return c.payloadHandler.PacketConn.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return c.payloadHandler.PacketConn.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return c.payloadHandler.PacketConn.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return c.payloadHandler.PacketConn.Close() +} + +// NewPacketConn returns a new PacketConn using c as its underlying +// transport. +func NewPacketConn(c net.PacketConn) *PacketConn { + cc, _ := socket.NewConn(c.(net.Conn)) + p := &PacketConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + payloadHandler: payloadHandler{PacketConn: c, Conn: cc}, + } + return p +} + +// A RawConn represents a packet network endpoint that uses the IPv4 +// transport. It is used to control several IP-level socket options +// including IPv4 header manipulation. It also provides datagram +// based network I/O methods specific to the IPv4 and higher layer +// protocols that handle IPv4 datagram directly such as OSPF, GRE. +type RawConn struct { + genericOpt + dgramOpt + packetHandler +} + +// SetControlMessage sets the per packet IP-level socket options. +func (c *RawConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.packetHandler.ok() { + return errInvalidConn + } + return setControlMessage(c.dgramOpt.Conn, &c.packetHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *RawConn) SetDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return errInvalidConn + } + return c.packetHandler.IPConn.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *RawConn) SetReadDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return errInvalidConn + } + return c.packetHandler.IPConn.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *RawConn) SetWriteDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return errInvalidConn + } + return c.packetHandler.IPConn.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *RawConn) Close() error { + if !c.packetHandler.ok() { + return errInvalidConn + } + return c.packetHandler.IPConn.Close() +} + +// NewRawConn returns a new RawConn using c as its underlying +// transport. +func NewRawConn(c net.PacketConn) (*RawConn, error) { + cc, err := socket.NewConn(c.(net.Conn)) + if err != nil { + return nil, err + } + r := &RawConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + packetHandler: packetHandler{IPConn: c.(*net.IPConn), Conn: cc}, + } + so, ok := sockOpts[ssoHeaderPrepend] + if !ok { + return nil, errNotImplemented + } + if err := so.SetInt(r.dgramOpt.Conn, boolint(true)); err != nil { + return nil, err + } + return r, nil +} diff --git a/vendor/golang.org/x/net/ipv4/genericopt.go b/vendor/golang.org/x/net/ipv4/genericopt.go new file mode 100644 index 00000000000..51c12371eb4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/genericopt.go @@ -0,0 +1,55 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +// TOS returns the type-of-service field value for outgoing packets. +func (c *genericOpt) TOS() (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + so, ok := sockOpts[ssoTOS] + if !ok { + return 0, errNotImplemented + } + return so.GetInt(c.Conn) +} + +// SetTOS sets the type-of-service field value for future outgoing +// packets. +func (c *genericOpt) SetTOS(tos int) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoTOS] + if !ok { + return errNotImplemented + } + return so.SetInt(c.Conn, tos) +} + +// TTL returns the time-to-live field value for outgoing packets. +func (c *genericOpt) TTL() (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + so, ok := sockOpts[ssoTTL] + if !ok { + return 0, errNotImplemented + } + return so.GetInt(c.Conn) +} + +// SetTTL sets the time-to-live field value for future outgoing +// packets. +func (c *genericOpt) SetTTL(ttl int) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoTTL] + if !ok { + return errNotImplemented + } + return so.SetInt(c.Conn, ttl) +} diff --git a/vendor/golang.org/x/net/ipv4/header.go b/vendor/golang.org/x/net/ipv4/header.go new file mode 100644 index 00000000000..c271ca46cb9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/header.go @@ -0,0 +1,172 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "encoding/binary" + "fmt" + "net" + "runtime" + + "golang.org/x/net/internal/socket" +) + +const ( + Version = 4 // protocol version + HeaderLen = 20 // header length without extension headers +) + +type HeaderFlags int + +const ( + MoreFragments HeaderFlags = 1 << iota // more fragments flag + DontFragment // don't fragment flag +) + +// A Header represents an IPv4 header. +type Header struct { + Version int // protocol version + Len int // header length + TOS int // type-of-service + TotalLen int // packet total length + ID int // identification + Flags HeaderFlags // flags + FragOff int // fragment offset + TTL int // time-to-live + Protocol int // next protocol + Checksum int // checksum + Src net.IP // source address + Dst net.IP // destination address + Options []byte // options, extension headers +} + +func (h *Header) String() string { + if h == nil { + return "" + } + return fmt.Sprintf("ver=%d hdrlen=%d tos=%#x totallen=%d id=%#x flags=%#x fragoff=%#x ttl=%d proto=%d cksum=%#x src=%v dst=%v", h.Version, h.Len, h.TOS, h.TotalLen, h.ID, h.Flags, h.FragOff, h.TTL, h.Protocol, h.Checksum, h.Src, h.Dst) +} + +// Marshal returns the binary encoding of h. +// +// The returned slice is in the format used by a raw IP socket on the +// local system. +// This may differ from the wire format, depending on the system. +func (h *Header) Marshal() ([]byte, error) { + if h == nil { + return nil, errNilHeader + } + if h.Len < HeaderLen { + return nil, errHeaderTooShort + } + hdrlen := HeaderLen + len(h.Options) + b := make([]byte, hdrlen) + b[0] = byte(Version<<4 | (hdrlen >> 2 & 0x0f)) + b[1] = byte(h.TOS) + flagsAndFragOff := (h.FragOff & 0x1fff) | int(h.Flags<<13) + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + socket.NativeEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + socket.NativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + case "freebsd": + if freebsdVersion < 1100000 { + socket.NativeEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + socket.NativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } else { + binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } + default: + binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } + binary.BigEndian.PutUint16(b[4:6], uint16(h.ID)) + b[8] = byte(h.TTL) + b[9] = byte(h.Protocol) + binary.BigEndian.PutUint16(b[10:12], uint16(h.Checksum)) + if ip := h.Src.To4(); ip != nil { + copy(b[12:16], ip[:net.IPv4len]) + } + if ip := h.Dst.To4(); ip != nil { + copy(b[16:20], ip[:net.IPv4len]) + } else { + return nil, errMissingAddress + } + if len(h.Options) > 0 { + copy(b[HeaderLen:], h.Options) + } + return b, nil +} + +// Parse parses b as an IPv4 header and stores the result in h. +// +// The provided b must be in the format used by a raw IP socket on the +// local system. +// This may differ from the wire format, depending on the system. +func (h *Header) Parse(b []byte) error { + if h == nil || b == nil { + return errNilHeader + } + if len(b) < HeaderLen { + return errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + if len(b) < hdrlen { + return errExtHeaderTooShort + } + h.Version = int(b[0] >> 4) + h.Len = hdrlen + h.TOS = int(b[1]) + h.ID = int(binary.BigEndian.Uint16(b[4:6])) + h.TTL = int(b[8]) + h.Protocol = int(b[9]) + h.Checksum = int(binary.BigEndian.Uint16(b[10:12])) + h.Src = net.IPv4(b[12], b[13], b[14], b[15]) + h.Dst = net.IPv4(b[16], b[17], b[18], b[19]) + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + hdrlen + h.FragOff = int(socket.NativeEndian.Uint16(b[6:8])) + case "freebsd": + if freebsdVersion < 1100000 { + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + if freebsdVersion < 1000000 { + h.TotalLen += hdrlen + } + h.FragOff = int(socket.NativeEndian.Uint16(b[6:8])) + } else { + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + h.FragOff = int(binary.BigEndian.Uint16(b[6:8])) + } + default: + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + h.FragOff = int(binary.BigEndian.Uint16(b[6:8])) + } + h.Flags = HeaderFlags(h.FragOff&0xe000) >> 13 + h.FragOff = h.FragOff & 0x1fff + optlen := hdrlen - HeaderLen + if optlen > 0 && len(b) >= hdrlen { + if cap(h.Options) < optlen { + h.Options = make([]byte, optlen) + } else { + h.Options = h.Options[:optlen] + } + copy(h.Options, b[HeaderLen:hdrlen]) + } + return nil +} + +// ParseHeader parses b as an IPv4 header. +// +// The provided b must be in the format used by a raw IP socket on the +// local system. +// This may differ from the wire format, depending on the system. +func ParseHeader(b []byte) (*Header, error) { + h := new(Header) + if err := h.Parse(b); err != nil { + return nil, err + } + return h, nil +} diff --git a/vendor/golang.org/x/net/ipv4/helper.go b/vendor/golang.org/x/net/ipv4/helper.go new file mode 100644 index 00000000000..e845a7376ea --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/helper.go @@ -0,0 +1,77 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "errors" + "net" + "runtime" + + "golang.org/x/net/internal/socket" +) + +var ( + errInvalidConn = errors.New("invalid connection") + errMissingAddress = errors.New("missing address") + errNilHeader = errors.New("nil header") + errHeaderTooShort = errors.New("header too short") + errExtHeaderTooShort = errors.New("extension header too short") + errInvalidConnType = errors.New("invalid conn type") + errNotImplemented = errors.New("not implemented on " + runtime.GOOS + "/" + runtime.GOARCH) + + // See https://www.freebsd.org/doc/en/books/porters-handbook/versions.html. + freebsdVersion uint32 + compatFreeBSD32 bool // 386 emulation on amd64 +) + +// See golang.org/issue/30899. +func adjustFreeBSD32(m *socket.Message) { + // FreeBSD 12.0-RELEASE is affected by https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236737 + if 1200086 <= freebsdVersion && freebsdVersion < 1201000 { + l := (m.NN + 4 - 1) &^ (4 - 1) + if m.NN < l && l <= len(m.OOB) { + m.NN = l + } + } +} + +func boolint(b bool) int { + if b { + return 1 + } + return 0 +} + +func netAddrToIP4(a net.Addr) net.IP { + switch v := a.(type) { + case *net.UDPAddr: + if ip := v.IP.To4(); ip != nil { + return ip + } + case *net.IPAddr: + if ip := v.IP.To4(); ip != nil { + return ip + } + } + return nil +} + +func opAddr(a net.Addr) net.Addr { + switch a.(type) { + case *net.TCPAddr: + if a == nil { + return nil + } + case *net.UDPAddr: + if a == nil { + return nil + } + case *net.IPAddr: + if a == nil { + return nil + } + } + return a +} diff --git a/vendor/golang.org/x/net/ipv4/iana.go b/vendor/golang.org/x/net/ipv4/iana.go new file mode 100644 index 00000000000..4375b4099b8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/iana.go @@ -0,0 +1,38 @@ +// go generate gen.go +// Code generated by the command above; DO NOT EDIT. + +package ipv4 + +// Internet Control Message Protocol (ICMP) Parameters, Updated: 2018-02-26 +const ( + ICMPTypeEchoReply ICMPType = 0 // Echo Reply + ICMPTypeDestinationUnreachable ICMPType = 3 // Destination Unreachable + ICMPTypeRedirect ICMPType = 5 // Redirect + ICMPTypeEcho ICMPType = 8 // Echo + ICMPTypeRouterAdvertisement ICMPType = 9 // Router Advertisement + ICMPTypeRouterSolicitation ICMPType = 10 // Router Solicitation + ICMPTypeTimeExceeded ICMPType = 11 // Time Exceeded + ICMPTypeParameterProblem ICMPType = 12 // Parameter Problem + ICMPTypeTimestamp ICMPType = 13 // Timestamp + ICMPTypeTimestampReply ICMPType = 14 // Timestamp Reply + ICMPTypePhoturis ICMPType = 40 // Photuris + ICMPTypeExtendedEchoRequest ICMPType = 42 // Extended Echo Request + ICMPTypeExtendedEchoReply ICMPType = 43 // Extended Echo Reply +) + +// Internet Control Message Protocol (ICMP) Parameters, Updated: 2018-02-26 +var icmpTypes = map[ICMPType]string{ + 0: "echo reply", + 3: "destination unreachable", + 5: "redirect", + 8: "echo", + 9: "router advertisement", + 10: "router solicitation", + 11: "time exceeded", + 12: "parameter problem", + 13: "timestamp", + 14: "timestamp reply", + 40: "photuris", + 42: "extended echo request", + 43: "extended echo reply", +} diff --git a/vendor/golang.org/x/net/ipv4/icmp.go b/vendor/golang.org/x/net/ipv4/icmp.go new file mode 100644 index 00000000000..9902bb3d2a5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "golang.org/x/net/internal/iana" + +// An ICMPType represents a type of ICMP message. +type ICMPType int + +func (typ ICMPType) String() string { + s, ok := icmpTypes[typ] + if !ok { + return "" + } + return s +} + +// Protocol returns the ICMPv4 protocol number. +func (typ ICMPType) Protocol() int { + return iana.ProtocolICMP +} + +// An ICMPFilter represents an ICMP message filter for incoming +// packets. The filter belongs to a packet delivery path on a host and +// it cannot interact with forwarding packets or tunnel-outer packets. +// +// Note: RFC 8200 defines a reasonable role model and it works not +// only for IPv6 but IPv4. A node means a device that implements IP. +// A router means a node that forwards IP packets not explicitly +// addressed to itself, and a host means a node that is not a router. +type ICMPFilter struct { + icmpFilter +} + +// Accept accepts incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Accept(typ ICMPType) { + f.accept(typ) +} + +// Block blocks incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Block(typ ICMPType) { + f.block(typ) +} + +// SetAll sets the filter action to the filter. +func (f *ICMPFilter) SetAll(block bool) { + f.setAll(block) +} + +// WillBlock reports whether the ICMP type will be blocked. +func (f *ICMPFilter) WillBlock(typ ICMPType) bool { + return f.willBlock(typ) +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_linux.go b/vendor/golang.org/x/net/ipv4/icmp_linux.go new file mode 100644 index 00000000000..6e1c5c80ad1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_linux.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +func (f *icmpFilter) accept(typ ICMPType) { + f.Data &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpFilter) block(typ ICMPType) { + f.Data |= 1 << (uint32(typ) & 31) +} + +func (f *icmpFilter) setAll(block bool) { + if block { + f.Data = 1<<32 - 1 + } else { + f.Data = 0 + } +} + +func (f *icmpFilter) willBlock(typ ICMPType) bool { + return f.Data&(1<<(uint32(typ)&31)) != 0 +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_stub.go b/vendor/golang.org/x/net/ipv4/icmp_stub.go new file mode 100644 index 00000000000..21bb29ab366 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_stub.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv4 + +const sizeofICMPFilter = 0x0 + +type icmpFilter struct { +} + +func (f *icmpFilter) accept(typ ICMPType) { +} + +func (f *icmpFilter) block(typ ICMPType) { +} + +func (f *icmpFilter) setAll(block bool) { +} + +func (f *icmpFilter) willBlock(typ ICMPType) bool { + return false +} diff --git a/vendor/golang.org/x/net/ipv4/packet.go b/vendor/golang.org/x/net/ipv4/packet.go new file mode 100644 index 00000000000..7d784e06dd0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/packet.go @@ -0,0 +1,117 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadFrom and WriteTo methods of RawConn +// are not implemented. + +// A packetHandler represents the IPv4 datagram handler. +type packetHandler struct { + *net.IPConn + *socket.Conn + rawOpt +} + +func (c *packetHandler) ok() bool { return c != nil && c.IPConn != nil && c.Conn != nil } + +// ReadFrom reads an IPv4 datagram from the endpoint c, copying the +// datagram into b. It returns the received datagram as the IPv4 +// header h, the payload p and the control message cm. +func (c *packetHandler) ReadFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { + if !c.ok() { + return nil, nil, nil, errInvalidConn + } + c.rawOpt.RLock() + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + if err := c.RecvMsg(&m, 0); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + var hs []byte + if hs, p, err = slicePacket(b[:m.N]); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if h, err = ParseHeader(hs); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if m.NN > 0 { + if compatFreeBSD32 { + adjustFreeBSD32(&m) + } + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + } + if src, ok := m.Addr.(*net.IPAddr); ok && cm != nil { + cm.Src = src.IP + } + return +} + +func slicePacket(b []byte) (h, p []byte, err error) { + if len(b) < HeaderLen { + return nil, nil, errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + return b[:hdrlen], b[hdrlen:], nil +} + +// WriteTo writes an IPv4 datagram through the endpoint c, copying the +// datagram from the IPv4 header h and the payload p. The control +// message cm allows the datagram path and the outgoing interface to be +// specified. Currently only Darwin and Linux support this. The cm +// may be nil if control of the outgoing datagram is not required. +// +// The IPv4 header h must contain appropriate fields that include: +// +// Version = +// Len = +// TOS = +// TotalLen = +// ID = platform sets an appropriate value if ID is zero +// FragOff = +// TTL = +// Protocol = +// Checksum = platform sets an appropriate value if Checksum is zero +// Src = platform sets an appropriate value if Src is nil +// Dst = +// Options = optional +func (c *packetHandler) WriteTo(h *Header, p []byte, cm *ControlMessage) error { + if !c.ok() { + return errInvalidConn + } + m := socket.Message{ + OOB: cm.Marshal(), + } + wh, err := h.Marshal() + if err != nil { + return err + } + m.Buffers = [][]byte{wh, p} + dst := new(net.IPAddr) + if cm != nil { + if ip := cm.Dst.To4(); ip != nil { + dst.IP = ip + } + } + if dst.IP == nil { + dst.IP = h.Dst + } + m.Addr = dst + if err := c.SendMsg(&m, 0); err != nil { + return &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv4/payload.go b/vendor/golang.org/x/net/ipv4/payload.go new file mode 100644 index 00000000000..f95f811acd2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ControlMessage for ReadFrom and WriteTo +// methods of PacketConn is not implemented. + +// A payloadHandler represents the IPv4 datagram payload handler. +type payloadHandler struct { + net.PacketConn + *socket.Conn + rawOpt +} + +func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil && c.Conn != nil } diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg.go b/vendor/golang.org/x/net/ipv4/payload_cmsg.go new file mode 100644 index 00000000000..e7614661d7b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg.go @@ -0,0 +1,84 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// ReadFrom reads a payload of the received IPv4 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, errInvalidConn + } + c.rawOpt.RLock() + m := socket.Message{ + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + switch c.PacketConn.(type) { + case *net.UDPConn: + m.Buffers = [][]byte{b} + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + case *net.IPConn: + h := make([]byte, HeaderLen) + m.Buffers = [][]byte{h, b} + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + hdrlen := int(h[0]&0x0f) << 2 + if hdrlen > len(h) { + d := hdrlen - len(h) + copy(b, b[d:]) + m.N -= d + } else { + m.N -= hdrlen + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} + } + if m.NN > 0 { + if compatFreeBSD32 { + adjustFreeBSD32(&m) + } + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + cm.Src = netAddrToIP4(m.Addr) + } + return m.N, cm, m.Addr, nil +} + +// WriteTo writes a payload of the IPv4 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the datagram path and the outgoing interface to be specified. +// Currently only Darwin and Linux support this. The cm may be nil if +// control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, errInvalidConn + } + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: cm.Marshal(), + Addr: dst, + } + err = c.SendMsg(&m, 0) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return m.N, err +} diff --git a/vendor/golang.org/x/net/ipv4/payload_nocmsg.go b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go new file mode 100644 index 00000000000..1116256f245 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go @@ -0,0 +1,39 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package ipv4 + +import "net" + +// ReadFrom reads a payload of the received IPv4 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, errInvalidConn + } + if n, src, err = c.PacketConn.ReadFrom(b); err != nil { + return 0, nil, nil, err + } + return +} + +// WriteTo writes a payload of the IPv4 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the datagram path and the outgoing interface to be specified. +// Currently only Darwin and Linux support this. The cm may be nil if +// control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, errInvalidConn + } + if dst == nil { + return 0, errMissingAddress + } + return c.PacketConn.WriteTo(b, dst) +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt.go b/vendor/golang.org/x/net/ipv4/sockopt.go new file mode 100644 index 00000000000..22e90c0392c --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt.go @@ -0,0 +1,44 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "golang.org/x/net/internal/socket" + +// Sticky socket options +const ( + ssoTOS = iota // header field for unicast packet + ssoTTL // header field for unicast packet + ssoMulticastTTL // header field for multicast packet + ssoMulticastInterface // outbound interface for multicast packet + ssoMulticastLoopback // loopback for multicast packet + ssoReceiveTTL // header field on received packet + ssoReceiveDst // header field on received packet + ssoReceiveInterface // inbound interface on received packet + ssoPacketInfo // incbound or outbound packet path + ssoHeaderPrepend // ipv4 header prepend + ssoStripHeader // strip ipv4 header + ssoICMPFilter // icmp filter + ssoJoinGroup // any-source multicast + ssoLeaveGroup // any-source multicast + ssoJoinSourceGroup // source-specific multicast + ssoLeaveSourceGroup // source-specific multicast + ssoBlockSourceGroup // any-source or source-specific multicast + ssoUnblockSourceGroup // any-source or source-specific multicast + ssoAttachFilter // attach BPF for filtering inbound traffic +) + +// Sticky socket option value types +const ( + ssoTypeIPMreq = iota + 1 + ssoTypeIPMreqn + ssoTypeGroupReq + ssoTypeGroupSourceReq +) + +// A sockOpt represents a binding for sticky socket option. +type sockOpt struct { + socket.Option + typ int // hint for option value type; optional +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_posix.go b/vendor/golang.org/x/net/ipv4/sockopt_posix.go new file mode 100644 index 00000000000..dea64519d8e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_posix.go @@ -0,0 +1,71 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + switch so.typ { + case ssoTypeIPMreqn: + return so.getIPMreqn(c) + default: + return so.getMulticastIf(c) + } +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + switch so.typ { + case ssoTypeIPMreqn: + return so.setIPMreqn(c, ifi, nil) + default: + return so.setMulticastIf(c, ifi) + } +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, err + } + if n != sizeofICMPFilter { + return nil, errNotImplemented + } + return (*ICMPFilter)(unsafe.Pointer(&b[0])), nil +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + b := (*[sizeofICMPFilter]byte)(unsafe.Pointer(f))[:sizeofICMPFilter] + return so.Set(c, b) +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + switch so.typ { + case ssoTypeIPMreq: + return so.setIPMreq(c, ifi, grp) + case ssoTypeIPMreqn: + return so.setIPMreqn(c, ifi, grp) + case ssoTypeGroupReq: + return so.setGroupReq(c, ifi, grp) + default: + return errNotImplemented + } +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return so.setGroupSourceReq(c, ifi, grp, src) +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return so.setAttachFilter(c, f) +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_stub.go b/vendor/golang.org/x/net/ipv4/sockopt_stub.go new file mode 100644 index 00000000000..37d4806b342 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_stub.go @@ -0,0 +1,42 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import ( + "net" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + return nil, errNotImplemented +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + return errNotImplemented +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + return nil, errNotImplemented +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + return errNotImplemented +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errNotImplemented +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errNotImplemented +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv4/sys_aix.go b/vendor/golang.org/x/net/ipv4/sys_aix.go new file mode 100644 index 00000000000..3d1201e6d7c --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_aix.go @@ -0,0 +1,38 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Added for go1.11 compatibility +// +build aix + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreq.go b/vendor/golang.org/x/net/ipv4/sys_asmreq.go new file mode 100644 index 00000000000..76d670acaa9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreq.go @@ -0,0 +1,122 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd netbsd openbsd solaris windows + +package ipv4 + +import ( + "errors" + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +var errNoSuchInterface = errors.New("no such interface") + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + mreq := ipMreq{Multiaddr: [4]byte{grp[0], grp[1], grp[2], grp[3]}} + if err := setIPMreqInterface(&mreq, ifi); err != nil { + return err + } + b := (*[sizeofIPMreq]byte)(unsafe.Pointer(&mreq))[:sizeofIPMreq] + return so.Set(c, b) +} + +func (so *sockOpt) getMulticastIf(c *socket.Conn) (*net.Interface, error) { + var b [4]byte + if _, err := so.Get(c, b[:]); err != nil { + return nil, err + } + ifi, err := netIP4ToInterface(net.IPv4(b[0], b[1], b[2], b[3])) + if err != nil { + return nil, err + } + return ifi, nil +} + +func (so *sockOpt) setMulticastIf(c *socket.Conn, ifi *net.Interface) error { + ip, err := netInterfaceToIP4(ifi) + if err != nil { + return err + } + var b [4]byte + copy(b[:], ip) + return so.Set(c, b[:]) +} + +func setIPMreqInterface(mreq *ipMreq, ifi *net.Interface) error { + if ifi == nil { + return nil + } + ifat, err := ifi.Addrs() + if err != nil { + return err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := ifa.IP.To4(); ip != nil { + copy(mreq.Interface[:], ip) + return nil + } + case *net.IPNet: + if ip := ifa.IP.To4(); ip != nil { + copy(mreq.Interface[:], ip) + return nil + } + } + } + return errNoSuchInterface +} + +func netIP4ToInterface(ip net.IP) (*net.Interface, error) { + ift, err := net.Interfaces() + if err != nil { + return nil, err + } + for _, ifi := range ift { + ifat, err := ifi.Addrs() + if err != nil { + return nil, err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip.Equal(ifa.IP) { + return &ifi, nil + } + case *net.IPNet: + if ip.Equal(ifa.IP) { + return &ifi, nil + } + } + } + } + return nil, errNoSuchInterface +} + +func netInterfaceToIP4(ifi *net.Interface) (net.IP, error) { + if ifi == nil { + return net.IPv4zero.To4(), nil + } + ifat, err := ifi.Addrs() + if err != nil { + return nil, err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := ifa.IP.To4(); ip != nil { + return ip, nil + } + case *net.IPNet: + if ip := ifa.IP.To4(); ip != nil { + return ip, nil + } + } + } + return nil, errNoSuchInterface +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go b/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go new file mode 100644 index 00000000000..6dc339ce67a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errNotImplemented +} + +func (so *sockOpt) getMulticastIf(c *socket.Conn) (*net.Interface, error) { + return nil, errNotImplemented +} + +func (so *sockOpt) setMulticastIf(c *socket.Conn, ifi *net.Interface) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreqn.go b/vendor/golang.org/x/net/ipv4/sys_asmreqn.go new file mode 100644 index 00000000000..1f24f69f3b0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreqn.go @@ -0,0 +1,42 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getIPMreqn(c *socket.Conn) (*net.Interface, error) { + b := make([]byte, so.Len) + if _, err := so.Get(c, b); err != nil { + return nil, err + } + mreqn := (*ipMreqn)(unsafe.Pointer(&b[0])) + if mreqn.Ifindex == 0 { + return nil, nil + } + ifi, err := net.InterfaceByIndex(int(mreqn.Ifindex)) + if err != nil { + return nil, err + } + return ifi, nil +} + +func (so *sockOpt) setIPMreqn(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var mreqn ipMreqn + if ifi != nil { + mreqn.Ifindex = int32(ifi.Index) + } + if grp != nil { + mreqn.Multiaddr = [4]byte{grp[0], grp[1], grp[2], grp[3]} + } + b := (*[sizeofIPMreqn]byte)(unsafe.Pointer(&mreqn))[:sizeofIPMreqn] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go b/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go new file mode 100644 index 00000000000..48ef55624ec --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getIPMreqn(c *socket.Conn) (*net.Interface, error) { + return nil, errNotImplemented +} + +func (so *sockOpt) setIPMreqn(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bpf.go b/vendor/golang.org/x/net/ipv4/sys_bpf.go new file mode 100644 index 00000000000..5c03dce3b77 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bpf.go @@ -0,0 +1,24 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package ipv4 + +import ( + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" + "golang.org/x/sys/unix" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + prog := unix.SockFprog{ + Len: uint16(len(f)), + Filter: (*unix.SockFilter)(unsafe.Pointer(&f[0])), + } + b := (*[unix.SizeofSockFprog]byte)(unsafe.Pointer(&prog))[:unix.SizeofSockFprog] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go b/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go new file mode 100644 index 00000000000..5c98642716b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv4 + +import ( + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bsd.go b/vendor/golang.org/x/net/ipv4/sys_bsd.go new file mode 100644 index 00000000000..58256dd9d6f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bsd.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build netbsd openbsd + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) diff --git a/vendor/golang.org/x/net/ipv4/sys_darwin.go b/vendor/golang.org/x/net/ipv4/sys_darwin.go new file mode 100644 index 00000000000..ac213c73509 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_darwin.go @@ -0,0 +1,65 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoStripHeader: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_STRIPHDR, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoPacketInfo: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVPKTINFO, Len: 4}}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_dragonfly.go b/vendor/golang.org/x/net/ipv4/sys_dragonfly.go new file mode 100644 index 00000000000..859764f33a5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_dragonfly.go @@ -0,0 +1,35 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) diff --git a/vendor/golang.org/x/net/ipv4/sys_freebsd.go b/vendor/golang.org/x/net/ipv4/sys_freebsd.go new file mode 100644 index 00000000000..482873d9a15 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_freebsd.go @@ -0,0 +1,76 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "runtime" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func init() { + freebsdVersion, _ = syscall.SysctlUint32("kern.osreldate") + if freebsdVersion >= 1000000 { + sockOpts[ssoMulticastInterface] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn} + } + if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" { + archs, _ := syscall.Sysctl("kern.supported_archs") + for _, s := range strings.Fields(archs) { + if s == "amd64" { + compatFreeBSD32 = true + break + } + } + } +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gr.Group)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gsr.Group)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(&gsr.Source)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_linux.go b/vendor/golang.org/x/net/ipv4/sys_linux.go new file mode 100644 index 00000000000..cf755c7fbaf --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_linux.go @@ -0,0 +1,60 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" + "golang.org/x/sys/unix" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_TTL, 1, marshalTTL, parseTTL}, + ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoPacketInfo: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_PKTINFO, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolReserved, Name: sysICMP_FILTER, Len: sizeofICMPFilter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoAttachFilter: {Option: socket.Option{Level: unix.SOL_SOCKET, Name: unix.SO_ATTACH_FILTER, Len: unix.SizeofSockFprog}}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gr.Group)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gsr.Group)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(&gsr.Source)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_solaris.go b/vendor/golang.org/x/net/ipv4/sys_solaris.go new file mode 100644 index 00000000000..832fef1e2e2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_solaris.go @@ -0,0 +1,57 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 4, marshalTTL, parseTTL}, + ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoPacketInfo: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVPKTINFO, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 260)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_ssmreq.go b/vendor/golang.org/x/net/ipv4/sys_ssmreq.go new file mode 100644 index 00000000000..eeced7f3138 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_ssmreq.go @@ -0,0 +1,52 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux solaris + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var gr groupReq + if ifi != nil { + gr.Interface = uint32(ifi.Index) + } + gr.setGroup(grp) + var b []byte + if compatFreeBSD32 { + var d [sizeofGroupReq + 4]byte + s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))[:sizeofGroupReq] + } + return so.Set(c, b) +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + var gsr groupSourceReq + if ifi != nil { + gsr.Interface = uint32(ifi.Index) + } + gsr.setSourceGroup(grp, src) + var b []byte + if compatFreeBSD32 { + var d [sizeofGroupSourceReq + 4]byte + s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))[:sizeofGroupSourceReq] + } + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go b/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go new file mode 100644 index 00000000000..c0921674b05 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux,!solaris + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errNotImplemented +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv4/sys_stub.go b/vendor/golang.org/x/net/ipv4/sys_stub.go new file mode 100644 index 00000000000..b9c85b334fc --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_stub.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{} +) diff --git a/vendor/golang.org/x/net/ipv4/sys_windows.go b/vendor/golang.org/x/net/ipv4/sys_windows.go new file mode 100644 index 00000000000..b0913d539c3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_windows.go @@ -0,0 +1,67 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +const ( + // See ws2tcpip.h. + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_DONTFRAGMENT = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0xf + sysIP_DROP_SOURCE_MEMBERSHIP = 0x10 + sysIP_PKTINFO = 0x13 + + sizeofInetPktinfo = 0x8 + sizeofIPMreq = 0x8 + sizeofIPMreqSource = 0xc +) + +type inetPktinfo struct { + Addr [4]byte + Ifindex int32 +} + +type ipMreq struct { + Multiaddr [4]byte + Interface [4]byte +} + +type ipMreqSource struct { + Multiaddr [4]byte + Sourceaddr [4]byte + Interface [4]byte +} + +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms738586(v=vs.85).aspx +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go b/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go new file mode 100644 index 00000000000..c741d5c8ea9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go @@ -0,0 +1,33 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_aix.go + +// Added for go1.11 compatibility +// +build aix + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x20 + sysIP_RECVTTL = 0x22 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_darwin.go b/vendor/golang.org/x/net/ipv4/zsys_darwin.go new file mode 100644 index 00000000000..e05a251ba07 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_darwin.go @@ -0,0 +1,99 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_darwin.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_STRIPHDR = 0x17 + sysIP_RECVTTL = 0x18 + sysIP_BOUND_IF = 0x19 + sysIP_PKTINFO = 0x1a + sysIP_RECVPKTINFO = 0x1a + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_MULTICAST_IFINDEX = 0x42 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type inetPktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [128]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [128]byte + Pad_cgo_1 [128]byte +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go b/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go new file mode 100644 index 00000000000..6d65e9fcb81 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go @@ -0,0 +1,31 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_dragonfly.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_RECVTTL = 0x41 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go new file mode 100644 index 00000000000..136e2b8f1d6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go @@ -0,0 +1,93 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go new file mode 100644 index 00000000000..4f730f19ef3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go @@ -0,0 +1,95 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go new file mode 100644 index 00000000000..4f730f19ef3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go @@ -0,0 +1,95 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm64.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm64.go new file mode 100644 index 00000000000..ecebf327239 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm64.go @@ -0,0 +1,93 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]uint8 + X__ss_align int64 + X__ss_pad2 [112]uint8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_386.go b/vendor/golang.org/x/net/ipv4/zsys_linux_386.go new file mode 100644 index 00000000000..1c7fdfa13af --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_386.go @@ -0,0 +1,130 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go new file mode 100644 index 00000000000..a04e785187a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go @@ -0,0 +1,132 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go b/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go new file mode 100644 index 00000000000..1c7fdfa13af --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go @@ -0,0 +1,130 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go new file mode 100644 index 00000000000..a04e785187a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go @@ -0,0 +1,132 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go new file mode 100644 index 00000000000..1c7fdfa13af --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go @@ -0,0 +1,130 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go new file mode 100644 index 00000000000..a04e785187a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go @@ -0,0 +1,132 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go new file mode 100644 index 00000000000..a04e785187a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go @@ -0,0 +1,132 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go new file mode 100644 index 00000000000..1c7fdfa13af --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go @@ -0,0 +1,130 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go new file mode 100644 index 00000000000..3c5ea54731e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go @@ -0,0 +1,130 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]uint8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go new file mode 100644 index 00000000000..a04e785187a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go @@ -0,0 +1,132 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go new file mode 100644 index 00000000000..a04e785187a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go @@ -0,0 +1,132 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go new file mode 100644 index 00000000000..e626134a8b5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go @@ -0,0 +1,134 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +// +build riscv64 + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go b/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go new file mode 100644 index 00000000000..a04e785187a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go @@ -0,0 +1,132 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_netbsd.go b/vendor/golang.org/x/net/ipv4/zsys_netbsd.go new file mode 100644 index 00000000000..8cfc648ad7e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_netbsd.go @@ -0,0 +1,30 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_netbsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_RECVTTL = 0x17 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_openbsd.go b/vendor/golang.org/x/net/ipv4/zsys_openbsd.go new file mode 100644 index 00000000000..37629cb0ab5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_openbsd.go @@ -0,0 +1,30 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_openbsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x1e + sysIP_RECVTTL = 0x1f + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_solaris.go b/vendor/golang.org/x/net/ipv4/zsys_solaris.go new file mode 100644 index 00000000000..cb80a308b0e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_solaris.go @@ -0,0 +1,100 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_solaris.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x9 + sysIP_RECVSLLA = 0xa + sysIP_RECVTTL = 0xb + + sysIP_MULTICAST_IF = 0x10 + sysIP_MULTICAST_TTL = 0x11 + sysIP_MULTICAST_LOOP = 0x12 + sysIP_ADD_MEMBERSHIP = 0x13 + sysIP_DROP_MEMBERSHIP = 0x14 + sysIP_BLOCK_SOURCE = 0x15 + sysIP_UNBLOCK_SOURCE = 0x16 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x17 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x18 + sysIP_NEXTHOP = 0x19 + + sysIP_PKTINFO = 0x1a + sysIP_RECVPKTINFO = 0x1a + sysIP_DONTFRAG = 0x1b + + sysIP_BOUND_IF = 0x41 + sysIP_UNSPEC_SRC = 0x42 + sysIP_BROADCAST_TTL = 0x43 + sysIP_DHCPINIT_IF = 0x45 + + sysIP_REUSEADDR = 0x104 + sysIP_DONTROUTE = 0x105 + sysIP_BROADCAST = 0x106 + + sysMCAST_JOIN_GROUP = 0x29 + sysMCAST_LEAVE_GROUP = 0x2a + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_JOIN_SOURCE_GROUP = 0x2d + sysMCAST_LEAVE_SOURCE_GROUP = 0x2e + + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + + sizeofIPMreq = 0x8 + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x104 + sizeofGroupSourceReq = 0x204 +) + +type sockaddrStorage struct { + Family uint16 + X_ss_pad1 [6]int8 + X_ss_align float64 + X_ss_pad2 [240]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type inetPktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [256]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [256]byte + Pad_cgo_1 [256]byte +} diff --git a/vendor/golang.org/x/net/ipv6/batch.go b/vendor/golang.org/x/net/ipv6/batch.go new file mode 100644 index 00000000000..2ccb9849c78 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/batch.go @@ -0,0 +1,116 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "runtime" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// PacketConn are not implemented. + +// A Message represents an IO message. +// +// type Message struct { +// Buffers [][]byte +// OOB []byte +// Addr net.Addr +// N int +// NN int +// Flags int +// } +// +// The Buffers fields represents a list of contiguous buffers, which +// can be used for vectored IO, for example, putting a header and a +// payload in each slice. +// When writing, the Buffers field must contain at least one byte to +// write. +// When reading, the Buffers field will always contain a byte to read. +// +// The OOB field contains protocol-specific control or miscellaneous +// ancillary data known as out-of-band data. +// It can be nil when not required. +// +// The Addr field specifies a destination address when writing. +// It can be nil when the underlying protocol of the endpoint uses +// connection-oriented communication. +// After a successful read, it may contain the source address on the +// received packet. +// +// The N field indicates the number of bytes read or written from/to +// Buffers. +// +// The NN field indicates the number of bytes read or written from/to +// OOB. +// +// The Flags field contains protocol-specific information on the +// received message. +type Message = socket.Message + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} diff --git a/vendor/golang.org/x/net/ipv6/control.go b/vendor/golang.org/x/net/ipv6/control.go new file mode 100644 index 00000000000..2da644413b4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control.go @@ -0,0 +1,187 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "fmt" + "net" + "sync" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +// Note that RFC 3542 obsoletes RFC 2292 but OS X Snow Leopard and the +// former still support RFC 2292 only. Please be aware that almost +// all protocol implementations prohibit using a combination of RFC +// 2292 and RFC 3542 for some practical reasons. + +type rawOpt struct { + sync.RWMutex + cflags ControlFlags +} + +func (c *rawOpt) set(f ControlFlags) { c.cflags |= f } +func (c *rawOpt) clear(f ControlFlags) { c.cflags &^= f } +func (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 } + +// A ControlFlags represents per packet basis IP-level socket option +// control flags. +type ControlFlags uint + +const ( + FlagTrafficClass ControlFlags = 1 << iota // pass the traffic class on the received packet + FlagHopLimit // pass the hop limit on the received packet + FlagSrc // pass the source address on the received packet + FlagDst // pass the destination address on the received packet + FlagInterface // pass the interface index on the received packet + FlagPathMTU // pass the path MTU on the received packet path +) + +const flagPacketInfo = FlagDst | FlagInterface + +// A ControlMessage represents per packet basis IP-level socket +// options. +type ControlMessage struct { + // Receiving socket options: SetControlMessage allows to + // receive the options from the protocol stack using ReadFrom + // method of PacketConn. + // + // Specifying socket options: ControlMessage for WriteTo + // method of PacketConn allows to send the options to the + // protocol stack. + // + TrafficClass int // traffic class, must be 1 <= value <= 255 when specifying + HopLimit int // hop limit, must be 1 <= value <= 255 when specifying + Src net.IP // source address, specifying only + Dst net.IP // destination address, receiving only + IfIndex int // interface index, must be 1 <= value when specifying + NextHop net.IP // next hop address, specifying only + MTU int // path MTU, receiving only +} + +func (cm *ControlMessage) String() string { + if cm == nil { + return "" + } + return fmt.Sprintf("tclass=%#x hoplim=%d src=%v dst=%v ifindex=%d nexthop=%v mtu=%d", cm.TrafficClass, cm.HopLimit, cm.Src, cm.Dst, cm.IfIndex, cm.NextHop, cm.MTU) +} + +// Marshal returns the binary encoding of cm. +func (cm *ControlMessage) Marshal() []byte { + if cm == nil { + return nil + } + var l int + tclass := false + if ctlOpts[ctlTrafficClass].name > 0 && cm.TrafficClass > 0 { + tclass = true + l += socket.ControlMessageSpace(ctlOpts[ctlTrafficClass].length) + } + hoplimit := false + if ctlOpts[ctlHopLimit].name > 0 && cm.HopLimit > 0 { + hoplimit = true + l += socket.ControlMessageSpace(ctlOpts[ctlHopLimit].length) + } + pktinfo := false + if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To16() != nil && cm.Src.To4() == nil || cm.IfIndex > 0) { + pktinfo = true + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + nexthop := false + if ctlOpts[ctlNextHop].name > 0 && cm.NextHop.To16() != nil && cm.NextHop.To4() == nil { + nexthop = true + l += socket.ControlMessageSpace(ctlOpts[ctlNextHop].length) + } + var b []byte + if l > 0 { + b = make([]byte, l) + bb := b + if tclass { + bb = ctlOpts[ctlTrafficClass].marshal(bb, cm) + } + if hoplimit { + bb = ctlOpts[ctlHopLimit].marshal(bb, cm) + } + if pktinfo { + bb = ctlOpts[ctlPacketInfo].marshal(bb, cm) + } + if nexthop { + bb = ctlOpts[ctlNextHop].marshal(bb, cm) + } + } + return b +} + +// Parse parses b as a control message and stores the result in cm. +func (cm *ControlMessage) Parse(b []byte) error { + ms, err := socket.ControlMessage(b).Parse() + if err != nil { + return err + } + for _, m := range ms { + lvl, typ, l, err := m.ParseHeader() + if err != nil { + return err + } + if lvl != iana.ProtocolIPv6 { + continue + } + switch { + case typ == ctlOpts[ctlTrafficClass].name && l >= ctlOpts[ctlTrafficClass].length: + ctlOpts[ctlTrafficClass].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlHopLimit].name && l >= ctlOpts[ctlHopLimit].length: + ctlOpts[ctlHopLimit].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPacketInfo].name && l >= ctlOpts[ctlPacketInfo].length: + ctlOpts[ctlPacketInfo].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPathMTU].name && l >= ctlOpts[ctlPathMTU].length: + ctlOpts[ctlPathMTU].parse(cm, m.Data(l)) + } + } + return nil +} + +// NewControlMessage returns a new control message. +// +// The returned message is large enough for options specified by cf. +func NewControlMessage(cf ControlFlags) []byte { + opt := rawOpt{cflags: cf} + var l int + if opt.isset(FlagTrafficClass) && ctlOpts[ctlTrafficClass].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlTrafficClass].length) + } + if opt.isset(FlagHopLimit) && ctlOpts[ctlHopLimit].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlHopLimit].length) + } + if opt.isset(flagPacketInfo) && ctlOpts[ctlPacketInfo].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + if opt.isset(FlagPathMTU) && ctlOpts[ctlPathMTU].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlPathMTU].length) + } + var b []byte + if l > 0 { + b = make([]byte, l) + } + return b +} + +// Ancillary data socket options +const ( + ctlTrafficClass = iota // header field + ctlHopLimit // header field + ctlPacketInfo // inbound or outbound packet path + ctlNextHop // nexthop + ctlPathMTU // path mtu + ctlMax +) + +// A ctlOpt represents a binding for ancillary data socket option. +type ctlOpt struct { + name int // option name, must be equal or greater than 1 + length int // option length + marshal func([]byte, *ControlMessage) []byte + parse func(*ControlMessage, []byte) +} diff --git a/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go new file mode 100644 index 00000000000..9fd9eb15e3b --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go @@ -0,0 +1,48 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package ipv6 + +import ( + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshal2292HopLimit(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292HOPLIMIT, 4) + if cm != nil { + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.HopLimit)) + } + return m.Next(4) +} + +func marshal2292PacketInfo(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292PKTINFO, sizeofInet6Pktinfo) + if cm != nil { + pi := (*inet6Pktinfo)(unsafe.Pointer(&m.Data(sizeofInet6Pktinfo)[0])) + if ip := cm.Src.To16(); ip != nil && ip.To4() == nil { + copy(pi.Addr[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return m.Next(sizeofInet6Pktinfo) +} + +func marshal2292NextHop(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292NEXTHOP, sizeofSockaddrInet6) + if cm != nil { + sa := (*sockaddrInet6)(unsafe.Pointer(&m.Data(sizeofSockaddrInet6)[0])) + sa.setSockaddr(cm.NextHop, cm.IfIndex) + } + return m.Next(sizeofSockaddrInet6) +} diff --git a/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go new file mode 100644 index 00000000000..8c221b59895 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go @@ -0,0 +1,94 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshalTrafficClass(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_TCLASS, 4) + if cm != nil { + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.TrafficClass)) + } + return m.Next(4) +} + +func parseTrafficClass(cm *ControlMessage, b []byte) { + cm.TrafficClass = int(socket.NativeEndian.Uint32(b[:4])) +} + +func marshalHopLimit(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_HOPLIMIT, 4) + if cm != nil { + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.HopLimit)) + } + return m.Next(4) +} + +func parseHopLimit(cm *ControlMessage, b []byte) { + cm.HopLimit = int(socket.NativeEndian.Uint32(b[:4])) +} + +func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_PKTINFO, sizeofInet6Pktinfo) + if cm != nil { + pi := (*inet6Pktinfo)(unsafe.Pointer(&m.Data(sizeofInet6Pktinfo)[0])) + if ip := cm.Src.To16(); ip != nil && ip.To4() == nil { + copy(pi.Addr[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return m.Next(sizeofInet6Pktinfo) +} + +func parsePacketInfo(cm *ControlMessage, b []byte) { + pi := (*inet6Pktinfo)(unsafe.Pointer(&b[0])) + if len(cm.Dst) < net.IPv6len { + cm.Dst = make(net.IP, net.IPv6len) + } + copy(cm.Dst, pi.Addr[:]) + cm.IfIndex = int(pi.Ifindex) +} + +func marshalNextHop(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_NEXTHOP, sizeofSockaddrInet6) + if cm != nil { + sa := (*sockaddrInet6)(unsafe.Pointer(&m.Data(sizeofSockaddrInet6)[0])) + sa.setSockaddr(cm.NextHop, cm.IfIndex) + } + return m.Next(sizeofSockaddrInet6) +} + +func parseNextHop(cm *ControlMessage, b []byte) { +} + +func marshalPathMTU(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_PATHMTU, sizeofIPv6Mtuinfo) + return m.Next(sizeofIPv6Mtuinfo) +} + +func parsePathMTU(cm *ControlMessage, b []byte) { + mi := (*ipv6Mtuinfo)(unsafe.Pointer(&b[0])) + if len(cm.Dst) < net.IPv6len { + cm.Dst = make(net.IP, net.IPv6len) + } + copy(cm.Dst, mi.Addr.Addr[:]) + cm.IfIndex = int(mi.Addr.Scope_id) + cm.MTU = int(mi.Mtu) +} diff --git a/vendor/golang.org/x/net/ipv6/control_stub.go b/vendor/golang.org/x/net/ipv6/control_stub.go new file mode 100644 index 00000000000..1d773cbcc8e --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_stub.go @@ -0,0 +1,13 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv6/control_unix.go b/vendor/golang.org/x/net/ipv6/control_unix.go new file mode 100644 index 00000000000..0971a008bf4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_unix.go @@ -0,0 +1,55 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv6 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + opt.Lock() + defer opt.Unlock() + if so, ok := sockOpts[ssoReceiveTrafficClass]; ok && cf&FlagTrafficClass != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagTrafficClass) + } else { + opt.clear(FlagTrafficClass) + } + } + if so, ok := sockOpts[ssoReceiveHopLimit]; ok && cf&FlagHopLimit != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagHopLimit) + } else { + opt.clear(FlagHopLimit) + } + } + if so, ok := sockOpts[ssoReceivePacketInfo]; ok && cf&flagPacketInfo != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(cf & flagPacketInfo) + } else { + opt.clear(cf & flagPacketInfo) + } + } + if so, ok := sockOpts[ssoReceivePathMTU]; ok && cf&FlagPathMTU != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagPathMTU) + } else { + opt.clear(FlagPathMTU) + } + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv6/control_windows.go b/vendor/golang.org/x/net/ipv6/control_windows.go new file mode 100644 index 00000000000..8882d81934d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_windows.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + // TODO(mikio): implement this + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv6/dgramopt.go b/vendor/golang.org/x/net/ipv6/dgramopt.go new file mode 100644 index 00000000000..1f422e71dc3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/dgramopt.go @@ -0,0 +1,301 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + + "golang.org/x/net/bpf" +) + +// MulticastHopLimit returns the hop limit field value for outgoing +// multicast packets. +func (c *dgramOpt) MulticastHopLimit() (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + so, ok := sockOpts[ssoMulticastHopLimit] + if !ok { + return 0, errNotImplemented + } + return so.GetInt(c.Conn) +} + +// SetMulticastHopLimit sets the hop limit field value for future +// outgoing multicast packets. +func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoMulticastHopLimit] + if !ok { + return errNotImplemented + } + return so.SetInt(c.Conn, hoplim) +} + +// MulticastInterface returns the default interface for multicast +// packet transmissions. +func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { + if !c.ok() { + return nil, errInvalidConn + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return nil, errNotImplemented + } + return so.getMulticastInterface(c.Conn) +} + +// SetMulticastInterface sets the default interface for future +// multicast packet transmissions. +func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return errNotImplemented + } + return so.setMulticastInterface(c.Conn, ifi) +} + +// MulticastLoopback reports whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) MulticastLoopback() (bool, error) { + if !c.ok() { + return false, errInvalidConn + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return false, errNotImplemented + } + on, err := so.GetInt(c.Conn) + if err != nil { + return false, err + } + return on == 1, nil +} + +// SetMulticastLoopback sets whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) SetMulticastLoopback(on bool) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return errNotImplemented + } + return so.SetInt(c.Conn, boolint(on)) +} + +// JoinGroup joins the group address group on the interface ifi. +// By default all sources that can cast data to group are accepted. +// It's possible to mute and unmute data transmission from a specific +// source by using ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup. +// JoinGroup uses the system assigned multicast interface when ifi is +// nil, although this is not recommended because the assignment +// depends on platforms and sometimes it might require routing +// configuration. +func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoJoinGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// LeaveGroup leaves the group address group on the interface ifi +// regardless of whether the group is any-source group or +// source-specific group. +func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoLeaveGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// JoinSourceSpecificGroup joins the source-specific group comprising +// group and source on the interface ifi. +// JoinSourceSpecificGroup uses the system assigned multicast +// interface when ifi is nil, although this is not recommended because +// the assignment depends on platforms and sometimes it might require +// routing configuration. +func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoJoinSourceGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// LeaveSourceSpecificGroup leaves the source-specific group on the +// interface ifi. +func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoLeaveSourceGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// ExcludeSourceSpecificGroup excludes the source-specific group from +// the already joined any-source groups by JoinGroup on the interface +// ifi. +func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoBlockSourceGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// IncludeSourceSpecificGroup includes the excluded source-specific +// group by ExcludeSourceSpecificGroup again on the interface ifi. +func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoUnblockSourceGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// Checksum reports whether the kernel will compute, store or verify a +// checksum for both incoming and outgoing packets. If on is true, it +// returns an offset in bytes into the data of where the checksum +// field is located. +func (c *dgramOpt) Checksum() (on bool, offset int, err error) { + if !c.ok() { + return false, 0, errInvalidConn + } + so, ok := sockOpts[ssoChecksum] + if !ok { + return false, 0, errNotImplemented + } + offset, err = so.GetInt(c.Conn) + if err != nil { + return false, 0, err + } + if offset < 0 { + return false, 0, nil + } + return true, offset, nil +} + +// SetChecksum enables the kernel checksum processing. If on is ture, +// the offset should be an offset in bytes into the data of where the +// checksum field is located. +func (c *dgramOpt) SetChecksum(on bool, offset int) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoChecksum] + if !ok { + return errNotImplemented + } + if !on { + offset = -1 + } + return so.SetInt(c.Conn, offset) +} + +// ICMPFilter returns an ICMP filter. +func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { + if !c.ok() { + return nil, errInvalidConn + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return nil, errNotImplemented + } + return so.getICMPFilter(c.Conn) +} + +// SetICMPFilter deploys the ICMP filter. +func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return errNotImplemented + } + return so.setICMPFilter(c.Conn, f) +} + +// SetBPF attaches a BPF program to the connection. +// +// Only supported on Linux. +func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoAttachFilter] + if !ok { + return errNotImplemented + } + return so.setBPF(c.Conn, filter) +} diff --git a/vendor/golang.org/x/net/ipv6/doc.go b/vendor/golang.org/x/net/ipv6/doc.go new file mode 100644 index 00000000000..e0be9d50d70 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/doc.go @@ -0,0 +1,243 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ipv6 implements IP-level socket options for the Internet +// Protocol version 6. +// +// The package provides IP-level socket options that allow +// manipulation of IPv6 facilities. +// +// The IPv6 protocol is defined in RFC 8200. +// Socket interface extensions are defined in RFC 3493, RFC 3542 and +// RFC 3678. +// MLDv1 and MLDv2 are defined in RFC 2710 and RFC 3810. +// Source-specific multicast is defined in RFC 4607. +// +// On Darwin, this package requires OS X Mavericks version 10.9 or +// above, or equivalent. +// +// +// Unicasting +// +// The options for unicasting are available for net.TCPConn, +// net.UDPConn and net.IPConn which are created as network connections +// that use the IPv6 transport. When a single TCP connection carrying +// a data flow of multiple packets needs to indicate the flow is +// important, Conn is used to set the traffic class field on the IPv6 +// header for each packet. +// +// ln, err := net.Listen("tcp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer ln.Close() +// for { +// c, err := ln.Accept() +// if err != nil { +// // error handling +// } +// go func(c net.Conn) { +// defer c.Close() +// +// The outgoing packets will be labeled DiffServ assured forwarding +// class 1 low drop precedence, known as AF11 packets. +// +// if err := ipv6.NewConn(c).SetTrafficClass(0x28); err != nil { +// // error handling +// } +// if _, err := c.Write(data); err != nil { +// // error handling +// } +// }(c) +// } +// +// +// Multicasting +// +// The options for multicasting are available for net.UDPConn and +// net.IPConn which are created as network connections that use the +// IPv6 transport. A few network facilities must be prepared before +// you begin multicasting, at a minimum joining network interfaces and +// multicast groups. +// +// en0, err := net.InterfaceByName("en0") +// if err != nil { +// // error handling +// } +// en1, err := net.InterfaceByIndex(911) +// if err != nil { +// // error handling +// } +// group := net.ParseIP("ff02::114") +// +// First, an application listens to an appropriate address with an +// appropriate service port. +// +// c, err := net.ListenPacket("udp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// +// Second, the application joins multicast groups, starts listening to +// the groups on the specified network interfaces. Note that the +// service port for transport layer protocol does not matter with this +// operation as joining groups affects only network and link layer +// protocols, such as IPv6 and Ethernet. +// +// p := ipv6.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// +// The application might set per packet control message transmissions +// between the protocol stack within the kernel. When the application +// needs a destination address on an incoming packet, +// SetControlMessage of PacketConn is used to enable control message +// transmissions. +// +// if err := p.SetControlMessage(ipv6.FlagDst, true); err != nil { +// // error handling +// } +// +// The application could identify whether the received packets are +// of interest by using the control message that contains the +// destination address of the received packet. +// +// b := make([]byte, 1500) +// for { +// n, rcm, src, err := p.ReadFrom(b) +// if err != nil { +// // error handling +// } +// if rcm.Dst.IsMulticast() { +// if rcm.Dst.Equal(group) { +// // joined group, do something +// } else { +// // unknown group, discard +// continue +// } +// } +// +// The application can also send both unicast and multicast packets. +// +// p.SetTrafficClass(0x0) +// p.SetHopLimit(16) +// if _, err := p.WriteTo(data[:n], nil, src); err != nil { +// // error handling +// } +// dst := &net.UDPAddr{IP: group, Port: 1024} +// wcm := ipv6.ControlMessage{TrafficClass: 0xe0, HopLimit: 1} +// for _, ifi := range []*net.Interface{en0, en1} { +// wcm.IfIndex = ifi.Index +// if _, err := p.WriteTo(data[:n], &wcm, dst); err != nil { +// // error handling +// } +// } +// } +// +// +// More multicasting +// +// An application that uses PacketConn may join multiple multicast +// groups. For example, a UDP listener with port 1024 might join two +// different groups across over two different network interfaces by +// using: +// +// c, err := net.ListenPacket("udp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// p := ipv6.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::1:114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}); err != nil { +// // error handling +// } +// +// It is possible for multiple UDP listeners that listen on the same +// UDP port to join the same multicast group. The net package will +// provide a socket that listens to a wildcard address with reusable +// UDP port when an appropriate multicast address prefix is passed to +// the net.ListenPacket or net.ListenUDP. +// +// c1, err := net.ListenPacket("udp6", "[ff02::]:1024") +// if err != nil { +// // error handling +// } +// defer c1.Close() +// c2, err := net.ListenPacket("udp6", "[ff02::]:1024") +// if err != nil { +// // error handling +// } +// defer c2.Close() +// p1 := ipv6.NewPacketConn(c1) +// if err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// p2 := ipv6.NewPacketConn(c2) +// if err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// +// Also it is possible for the application to leave or rejoin a +// multicast group on the network interface. +// +// if err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff01::114")}); err != nil { +// // error handling +// } +// +// +// Source-specific multicasting +// +// An application that uses PacketConn on MLDv2 supported platform is +// able to join source-specific multicast groups. +// The application may use JoinSourceSpecificGroup and +// LeaveSourceSpecificGroup for the operation known as "include" mode, +// +// ssmgroup := net.UDPAddr{IP: net.ParseIP("ff32::8000:9")} +// ssmsource := net.UDPAddr{IP: net.ParseIP("fe80::cafe")} +// if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// if err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// +// or JoinGroup, ExcludeSourceSpecificGroup, +// IncludeSourceSpecificGroup and LeaveGroup for the operation known +// as "exclude" mode. +// +// exclsource := net.UDPAddr{IP: net.ParseIP("fe80::dead")} +// if err := p.JoinGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// if err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil { +// // error handling +// } +// if err := p.LeaveGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// +// Note that it depends on each platform implementation what happens +// when an application which runs on MLDv2 unsupported platform uses +// JoinSourceSpecificGroup and LeaveSourceSpecificGroup. +// In general the platform tries to fall back to conversations using +// MLDv1 and starts to listen to multicast traffic. +// In the fallback case, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup may return an error. +package ipv6 // import "golang.org/x/net/ipv6" + +// BUG(mikio): This package is not implemented on JS, NaCl and Plan 9. diff --git a/vendor/golang.org/x/net/ipv6/endpoint.go b/vendor/golang.org/x/net/ipv6/endpoint.go new file mode 100644 index 00000000000..f534a0bf38d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/endpoint.go @@ -0,0 +1,127 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "time" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the JoinSourceSpecificGroup, +// LeaveSourceSpecificGroup, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup methods of PacketConn are not +// implemented. + +// A Conn represents a network endpoint that uses IPv6 transport. +// It allows to set basic IP-level socket options such as traffic +// class and hop limit. +type Conn struct { + genericOpt +} + +type genericOpt struct { + *socket.Conn +} + +func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } + +// PathMTU returns a path MTU value for the destination associated +// with the endpoint. +func (c *Conn) PathMTU() (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + so, ok := sockOpts[ssoPathMTU] + if !ok { + return 0, errNotImplemented + } + _, mtu, err := so.getMTUInfo(c.Conn) + if err != nil { + return 0, err + } + return mtu, nil +} + +// NewConn returns a new Conn. +func NewConn(c net.Conn) *Conn { + cc, _ := socket.NewConn(c) + return &Conn{ + genericOpt: genericOpt{Conn: cc}, + } +} + +// A PacketConn represents a packet network endpoint that uses IPv6 +// transport. It is used to control several IP-level socket options +// including IPv6 header manipulation. It also provides datagram +// based network I/O methods specific to the IPv6 and higher layer +// protocols such as OSPF, GRE, and UDP. +type PacketConn struct { + genericOpt + dgramOpt + payloadHandler +} + +type dgramOpt struct { + *socket.Conn +} + +func (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil } + +// SetControlMessage allows to receive the per packet basis IP-level +// socket options. +func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return c.payloadHandler.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return c.payloadHandler.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return c.payloadHandler.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return c.payloadHandler.Close() +} + +// NewPacketConn returns a new PacketConn using c as its underlying +// transport. +func NewPacketConn(c net.PacketConn) *PacketConn { + cc, _ := socket.NewConn(c.(net.Conn)) + return &PacketConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + payloadHandler: payloadHandler{PacketConn: c, Conn: cc}, + } +} diff --git a/vendor/golang.org/x/net/ipv6/genericopt.go b/vendor/golang.org/x/net/ipv6/genericopt.go new file mode 100644 index 00000000000..0326aed6def --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/genericopt.go @@ -0,0 +1,56 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +// TrafficClass returns the traffic class field value for outgoing +// packets. +func (c *genericOpt) TrafficClass() (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + so, ok := sockOpts[ssoTrafficClass] + if !ok { + return 0, errNotImplemented + } + return so.GetInt(c.Conn) +} + +// SetTrafficClass sets the traffic class field value for future +// outgoing packets. +func (c *genericOpt) SetTrafficClass(tclass int) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoTrafficClass] + if !ok { + return errNotImplemented + } + return so.SetInt(c.Conn, tclass) +} + +// HopLimit returns the hop limit field value for outgoing packets. +func (c *genericOpt) HopLimit() (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + so, ok := sockOpts[ssoHopLimit] + if !ok { + return 0, errNotImplemented + } + return so.GetInt(c.Conn) +} + +// SetHopLimit sets the hop limit field value for future outgoing +// packets. +func (c *genericOpt) SetHopLimit(hoplim int) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoHopLimit] + if !ok { + return errNotImplemented + } + return so.SetInt(c.Conn, hoplim) +} diff --git a/vendor/golang.org/x/net/ipv6/header.go b/vendor/golang.org/x/net/ipv6/header.go new file mode 100644 index 00000000000..e05cb08b21c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/header.go @@ -0,0 +1,55 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "encoding/binary" + "fmt" + "net" +) + +const ( + Version = 6 // protocol version + HeaderLen = 40 // header length +) + +// A Header represents an IPv6 base header. +type Header struct { + Version int // protocol version + TrafficClass int // traffic class + FlowLabel int // flow label + PayloadLen int // payload length + NextHeader int // next header + HopLimit int // hop limit + Src net.IP // source address + Dst net.IP // destination address +} + +func (h *Header) String() string { + if h == nil { + return "" + } + return fmt.Sprintf("ver=%d tclass=%#x flowlbl=%#x payloadlen=%d nxthdr=%d hoplim=%d src=%v dst=%v", h.Version, h.TrafficClass, h.FlowLabel, h.PayloadLen, h.NextHeader, h.HopLimit, h.Src, h.Dst) +} + +// ParseHeader parses b as an IPv6 base header. +func ParseHeader(b []byte) (*Header, error) { + if len(b) < HeaderLen { + return nil, errHeaderTooShort + } + h := &Header{ + Version: int(b[0]) >> 4, + TrafficClass: int(b[0]&0x0f)<<4 | int(b[1])>>4, + FlowLabel: int(b[1]&0x0f)<<16 | int(b[2])<<8 | int(b[3]), + PayloadLen: int(binary.BigEndian.Uint16(b[4:6])), + NextHeader: int(b[6]), + HopLimit: int(b[7]), + } + h.Src = make(net.IP, net.IPv6len) + copy(h.Src, b[8:24]) + h.Dst = make(net.IP, net.IPv6len) + copy(h.Dst, b[24:40]) + return h, nil +} diff --git a/vendor/golang.org/x/net/ipv6/helper.go b/vendor/golang.org/x/net/ipv6/helper.go new file mode 100644 index 00000000000..c2d508f9c30 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/helper.go @@ -0,0 +1,58 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "errors" + "net" + "runtime" +) + +var ( + errInvalidConn = errors.New("invalid connection") + errMissingAddress = errors.New("missing address") + errHeaderTooShort = errors.New("header too short") + errInvalidConnType = errors.New("invalid conn type") + errNotImplemented = errors.New("not implemented on " + runtime.GOOS + "/" + runtime.GOARCH) +) + +func boolint(b bool) int { + if b { + return 1 + } + return 0 +} + +func netAddrToIP16(a net.Addr) net.IP { + switch v := a.(type) { + case *net.UDPAddr: + if ip := v.IP.To16(); ip != nil && ip.To4() == nil { + return ip + } + case *net.IPAddr: + if ip := v.IP.To16(); ip != nil && ip.To4() == nil { + return ip + } + } + return nil +} + +func opAddr(a net.Addr) net.Addr { + switch a.(type) { + case *net.TCPAddr: + if a == nil { + return nil + } + case *net.UDPAddr: + if a == nil { + return nil + } + case *net.IPAddr: + if a == nil { + return nil + } + } + return a +} diff --git a/vendor/golang.org/x/net/ipv6/iana.go b/vendor/golang.org/x/net/ipv6/iana.go new file mode 100644 index 00000000000..32db1aa9496 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/iana.go @@ -0,0 +1,86 @@ +// go generate gen.go +// Code generated by the command above; DO NOT EDIT. + +package ipv6 + +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2018-03-09 +const ( + ICMPTypeDestinationUnreachable ICMPType = 1 // Destination Unreachable + ICMPTypePacketTooBig ICMPType = 2 // Packet Too Big + ICMPTypeTimeExceeded ICMPType = 3 // Time Exceeded + ICMPTypeParameterProblem ICMPType = 4 // Parameter Problem + ICMPTypeEchoRequest ICMPType = 128 // Echo Request + ICMPTypeEchoReply ICMPType = 129 // Echo Reply + ICMPTypeMulticastListenerQuery ICMPType = 130 // Multicast Listener Query + ICMPTypeMulticastListenerReport ICMPType = 131 // Multicast Listener Report + ICMPTypeMulticastListenerDone ICMPType = 132 // Multicast Listener Done + ICMPTypeRouterSolicitation ICMPType = 133 // Router Solicitation + ICMPTypeRouterAdvertisement ICMPType = 134 // Router Advertisement + ICMPTypeNeighborSolicitation ICMPType = 135 // Neighbor Solicitation + ICMPTypeNeighborAdvertisement ICMPType = 136 // Neighbor Advertisement + ICMPTypeRedirect ICMPType = 137 // Redirect Message + ICMPTypeRouterRenumbering ICMPType = 138 // Router Renumbering + ICMPTypeNodeInformationQuery ICMPType = 139 // ICMP Node Information Query + ICMPTypeNodeInformationResponse ICMPType = 140 // ICMP Node Information Response + ICMPTypeInverseNeighborDiscoverySolicitation ICMPType = 141 // Inverse Neighbor Discovery Solicitation Message + ICMPTypeInverseNeighborDiscoveryAdvertisement ICMPType = 142 // Inverse Neighbor Discovery Advertisement Message + ICMPTypeVersion2MulticastListenerReport ICMPType = 143 // Version 2 Multicast Listener Report + ICMPTypeHomeAgentAddressDiscoveryRequest ICMPType = 144 // Home Agent Address Discovery Request Message + ICMPTypeHomeAgentAddressDiscoveryReply ICMPType = 145 // Home Agent Address Discovery Reply Message + ICMPTypeMobilePrefixSolicitation ICMPType = 146 // Mobile Prefix Solicitation + ICMPTypeMobilePrefixAdvertisement ICMPType = 147 // Mobile Prefix Advertisement + ICMPTypeCertificationPathSolicitation ICMPType = 148 // Certification Path Solicitation Message + ICMPTypeCertificationPathAdvertisement ICMPType = 149 // Certification Path Advertisement Message + ICMPTypeMulticastRouterAdvertisement ICMPType = 151 // Multicast Router Advertisement + ICMPTypeMulticastRouterSolicitation ICMPType = 152 // Multicast Router Solicitation + ICMPTypeMulticastRouterTermination ICMPType = 153 // Multicast Router Termination + ICMPTypeFMIPv6 ICMPType = 154 // FMIPv6 Messages + ICMPTypeRPLControl ICMPType = 155 // RPL Control Message + ICMPTypeILNPv6LocatorUpdate ICMPType = 156 // ILNPv6 Locator Update Message + ICMPTypeDuplicateAddressRequest ICMPType = 157 // Duplicate Address Request + ICMPTypeDuplicateAddressConfirmation ICMPType = 158 // Duplicate Address Confirmation + ICMPTypeMPLControl ICMPType = 159 // MPL Control Message + ICMPTypeExtendedEchoRequest ICMPType = 160 // Extended Echo Request + ICMPTypeExtendedEchoReply ICMPType = 161 // Extended Echo Reply +) + +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2018-03-09 +var icmpTypes = map[ICMPType]string{ + 1: "destination unreachable", + 2: "packet too big", + 3: "time exceeded", + 4: "parameter problem", + 128: "echo request", + 129: "echo reply", + 130: "multicast listener query", + 131: "multicast listener report", + 132: "multicast listener done", + 133: "router solicitation", + 134: "router advertisement", + 135: "neighbor solicitation", + 136: "neighbor advertisement", + 137: "redirect message", + 138: "router renumbering", + 139: "icmp node information query", + 140: "icmp node information response", + 141: "inverse neighbor discovery solicitation message", + 142: "inverse neighbor discovery advertisement message", + 143: "version 2 multicast listener report", + 144: "home agent address discovery request message", + 145: "home agent address discovery reply message", + 146: "mobile prefix solicitation", + 147: "mobile prefix advertisement", + 148: "certification path solicitation message", + 149: "certification path advertisement message", + 151: "multicast router advertisement", + 152: "multicast router solicitation", + 153: "multicast router termination", + 154: "fmipv6 messages", + 155: "rpl control message", + 156: "ilnpv6 locator update message", + 157: "duplicate address request", + 158: "duplicate address confirmation", + 159: "mpl control message", + 160: "extended echo request", + 161: "extended echo reply", +} diff --git a/vendor/golang.org/x/net/ipv6/icmp.go b/vendor/golang.org/x/net/ipv6/icmp.go new file mode 100644 index 00000000000..b7f48e27b83 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp.go @@ -0,0 +1,60 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "golang.org/x/net/internal/iana" + +// BUG(mikio): On Windows, methods related to ICMPFilter are not +// implemented. + +// An ICMPType represents a type of ICMP message. +type ICMPType int + +func (typ ICMPType) String() string { + s, ok := icmpTypes[typ] + if !ok { + return "" + } + return s +} + +// Protocol returns the ICMPv6 protocol number. +func (typ ICMPType) Protocol() int { + return iana.ProtocolIPv6ICMP +} + +// An ICMPFilter represents an ICMP message filter for incoming +// packets. The filter belongs to a packet delivery path on a host and +// it cannot interact with forwarding packets or tunnel-outer packets. +// +// Note: RFC 8200 defines a reasonable role model. A node means a +// device that implements IP. A router means a node that forwards IP +// packets not explicitly addressed to itself, and a host means a node +// that is not a router. +type ICMPFilter struct { + icmpv6Filter +} + +// Accept accepts incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Accept(typ ICMPType) { + f.accept(typ) +} + +// Block blocks incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Block(typ ICMPType) { + f.block(typ) +} + +// SetAll sets the filter action to the filter. +func (f *ICMPFilter) SetAll(block bool) { + f.setAll(block) +} + +// WillBlock reports whether the ICMP type will be blocked. +func (f *ICMPFilter) WillBlock(typ ICMPType) bool { + return f.willBlock(typ) +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_bsd.go b/vendor/golang.org/x/net/ipv6/icmp_bsd.go new file mode 100644 index 00000000000..b03025cdccf --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_bsd.go @@ -0,0 +1,29 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd netbsd openbsd + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + f.Filt[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) block(typ ICMPType) { + f.Filt[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.Filt { + if block { + f.Filt[i] = 0 + } else { + f.Filt[i] = 1<<32 - 1 + } + } +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.Filt[typ>>5]&(1<<(uint32(typ)&31)) == 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_linux.go b/vendor/golang.org/x/net/ipv6/icmp_linux.go new file mode 100644 index 00000000000..647f6b44fff --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_linux.go @@ -0,0 +1,27 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + f.Data[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) block(typ ICMPType) { + f.Data[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.Data { + if block { + f.Data[i] = 1<<32 - 1 + } else { + f.Data[i] = 0 + } + } +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.Data[typ>>5]&(1<<(uint32(typ)&31)) != 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_solaris.go b/vendor/golang.org/x/net/ipv6/icmp_solaris.go new file mode 100644 index 00000000000..7c23bb1cf6f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_solaris.go @@ -0,0 +1,27 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + f.X__icmp6_filt[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) block(typ ICMPType) { + f.X__icmp6_filt[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.X__icmp6_filt { + if block { + f.X__icmp6_filt[i] = 0 + } else { + f.X__icmp6_filt[i] = 1<<32 - 1 + } + } +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.X__icmp6_filt[typ>>5]&(1<<(uint32(typ)&31)) == 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_stub.go b/vendor/golang.org/x/net/ipv6/icmp_stub.go new file mode 100644 index 00000000000..370e51acd1f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_stub.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +type icmpv6Filter struct { +} + +func (f *icmpv6Filter) accept(typ ICMPType) { +} + +func (f *icmpv6Filter) block(typ ICMPType) { +} + +func (f *icmpv6Filter) setAll(block bool) { +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return false +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_windows.go b/vendor/golang.org/x/net/ipv6/icmp_windows.go new file mode 100644 index 00000000000..443cd073676 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_windows.go @@ -0,0 +1,22 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + // TODO(mikio): implement this +} + +func (f *icmpv6Filter) block(typ ICMPType) { + // TODO(mikio): implement this +} + +func (f *icmpv6Filter) setAll(block bool) { + // TODO(mikio): implement this +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + // TODO(mikio): implement this + return false +} diff --git a/vendor/golang.org/x/net/ipv6/payload.go b/vendor/golang.org/x/net/ipv6/payload.go new file mode 100644 index 00000000000..a8197f16958 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ControlMessage for ReadFrom and WriteTo +// methods of PacketConn is not implemented. + +// A payloadHandler represents the IPv6 datagram payload handler. +type payloadHandler struct { + net.PacketConn + *socket.Conn + rawOpt +} + +func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil && c.Conn != nil } diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg.go b/vendor/golang.org/x/net/ipv6/payload_cmsg.go new file mode 100644 index 00000000000..284a04278ed --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg.go @@ -0,0 +1,70 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// ReadFrom reads a payload of the received IPv6 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, errInvalidConn + } + c.rawOpt.RLock() + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + switch c.PacketConn.(type) { + case *net.UDPConn: + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + case *net.IPConn: + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} + } + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + cm.Src = netAddrToIP16(m.Addr) + } + return m.N, cm, m.Addr, nil +} + +// WriteTo writes a payload of the IPv6 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the IPv6 header fields and the datagram path to be specified. The +// cm may be nil if control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, errInvalidConn + } + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: cm.Marshal(), + Addr: dst, + } + err = c.SendMsg(&m, 0) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return m.N, err +} diff --git a/vendor/golang.org/x/net/ipv6/payload_nocmsg.go b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go new file mode 100644 index 00000000000..c5a4c967527 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go @@ -0,0 +1,38 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package ipv6 + +import "net" + +// ReadFrom reads a payload of the received IPv6 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, errInvalidConn + } + if n, src, err = c.PacketConn.ReadFrom(b); err != nil { + return 0, nil, nil, err + } + return +} + +// WriteTo writes a payload of the IPv6 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the IPv6 header fields and the datagram path to be specified. The +// cm may be nil if control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, errInvalidConn + } + if dst == nil { + return 0, errMissingAddress + } + return c.PacketConn.WriteTo(b, dst) +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt.go b/vendor/golang.org/x/net/ipv6/sockopt.go new file mode 100644 index 00000000000..cc3907df385 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt.go @@ -0,0 +1,43 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "golang.org/x/net/internal/socket" + +// Sticky socket options +const ( + ssoTrafficClass = iota // header field for unicast packet, RFC 3542 + ssoHopLimit // header field for unicast packet, RFC 3493 + ssoMulticastInterface // outbound interface for multicast packet, RFC 3493 + ssoMulticastHopLimit // header field for multicast packet, RFC 3493 + ssoMulticastLoopback // loopback for multicast packet, RFC 3493 + ssoReceiveTrafficClass // header field on received packet, RFC 3542 + ssoReceiveHopLimit // header field on received packet, RFC 2292 or 3542 + ssoReceivePacketInfo // incbound or outbound packet path, RFC 2292 or 3542 + ssoReceivePathMTU // path mtu, RFC 3542 + ssoPathMTU // path mtu, RFC 3542 + ssoChecksum // packet checksum, RFC 2292 or 3542 + ssoICMPFilter // icmp filter, RFC 2292 or 3542 + ssoJoinGroup // any-source multicast, RFC 3493 + ssoLeaveGroup // any-source multicast, RFC 3493 + ssoJoinSourceGroup // source-specific multicast + ssoLeaveSourceGroup // source-specific multicast + ssoBlockSourceGroup // any-source or source-specific multicast + ssoUnblockSourceGroup // any-source or source-specific multicast + ssoAttachFilter // attach BPF for filtering inbound traffic +) + +// Sticky socket option value types +const ( + ssoTypeIPMreq = iota + 1 + ssoTypeGroupReq + ssoTypeGroupSourceReq +) + +// A sockOpt represents a binding for sticky socket option. +type sockOpt struct { + socket.Option + typ int // hint for option value type; optional +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_posix.go b/vendor/golang.org/x/net/ipv6/sockopt_posix.go new file mode 100644 index 00000000000..824c623ccef --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_posix.go @@ -0,0 +1,89 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv6 + +import ( + "net" + "runtime" + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + n, err := so.GetInt(c) + if err != nil { + return nil, err + } + return net.InterfaceByIndex(n) +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + var n int + if ifi != nil { + n = ifi.Index + } + return so.SetInt(c, n) +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, err + } + if n != sizeofICMPv6Filter { + return nil, errNotImplemented + } + return (*ICMPFilter)(unsafe.Pointer(&b[0])), nil +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + b := (*[sizeofICMPv6Filter]byte)(unsafe.Pointer(f))[:sizeofICMPv6Filter] + return so.Set(c, b) +} + +func (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, 0, err + } + if n != sizeofIPv6Mtuinfo { + return nil, 0, errNotImplemented + } + mi := (*ipv6Mtuinfo)(unsafe.Pointer(&b[0])) + if mi.Addr.Scope_id == 0 || runtime.GOOS == "aix" { + // AIX kernel might return a wrong address. + return nil, int(mi.Mtu), nil + } + ifi, err := net.InterfaceByIndex(int(mi.Addr.Scope_id)) + if err != nil { + return nil, 0, err + } + return ifi, int(mi.Mtu), nil +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + switch so.typ { + case ssoTypeIPMreq: + return so.setIPMreq(c, ifi, grp) + case ssoTypeGroupReq: + return so.setGroupReq(c, ifi, grp) + default: + return errNotImplemented + } +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return so.setGroupSourceReq(c, ifi, grp, src) +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return so.setAttachFilter(c, f) +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_stub.go b/vendor/golang.org/x/net/ipv6/sockopt_stub.go new file mode 100644 index 00000000000..0a87a93bbd4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_stub.go @@ -0,0 +1,46 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import ( + "net" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + return nil, errNotImplemented +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + return errNotImplemented +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + return nil, errNotImplemented +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + return errNotImplemented +} + +func (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) { + return nil, 0, errNotImplemented +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errNotImplemented +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errNotImplemented +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv6/sys_aix.go b/vendor/golang.org/x/net/ipv6/sys_aix.go new file mode 100644 index 00000000000..bce7091fb0b --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_aix.go @@ -0,0 +1,77 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Added for go1.11 compatibility +// +build aix + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_asmreq.go b/vendor/golang.org/x/net/ipv6/sys_asmreq.go new file mode 100644 index 00000000000..8c3934c3eec --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_asmreq.go @@ -0,0 +1,24 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var mreq ipv6Mreq + copy(mreq.Multiaddr[:], grp) + if ifi != nil { + mreq.setIfindex(ifi.Index) + } + b := (*[sizeofIPv6Mreq]byte)(unsafe.Pointer(&mreq))[:sizeofIPv6Mreq] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go b/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go new file mode 100644 index 00000000000..87ae4818144 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go @@ -0,0 +1,17 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bpf.go b/vendor/golang.org/x/net/ipv6/sys_bpf.go new file mode 100644 index 00000000000..90ef4dfaf43 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bpf.go @@ -0,0 +1,24 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package ipv6 + +import ( + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" + "golang.org/x/sys/unix" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + prog := unix.SockFprog{ + Len: uint16(len(f)), + Filter: (*unix.SockFilter)(unsafe.Pointer(&f[0])), + } + b := (*[unix.SizeofSockFprog]byte)(unsafe.Pointer(&prog))[:unix.SizeofSockFprog] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go b/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go new file mode 100644 index 00000000000..eb9f8316237 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv6 + +import ( + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bsd.go b/vendor/golang.org/x/net/ipv6/sys_bsd.go new file mode 100644 index 00000000000..e416eaa1fe4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bsd.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly netbsd openbsd + +package ipv6 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_darwin.go b/vendor/golang.org/x/net/ipv6/sys_darwin.go new file mode 100644 index 00000000000..12cc5cb2c1e --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_darwin.go @@ -0,0 +1,78 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_freebsd.go b/vendor/golang.org/x/net/ipv6/sys_freebsd.go new file mode 100644 index 00000000000..85a9f5d07de --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_freebsd.go @@ -0,0 +1,92 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "runtime" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func init() { + if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" { + archs, _ := syscall.Sysctl("kern.supported_archs") + for _, s := range strings.Fields(archs) { + if s == "amd64" { + compatFreeBSD32 = true + break + } + } + } +} + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gr.Group)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gsr.Group)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(&gsr.Source)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_linux.go b/vendor/golang.org/x/net/ipv6/sys_linux.go new file mode 100644 index 00000000000..96e8093a307 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_linux.go @@ -0,0 +1,75 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" + "golang.org/x/sys/unix" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolReserved, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMPV6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoAttachFilter: {Option: socket.Option{Level: unix.SOL_SOCKET, Name: unix.SO_ATTACH_FILTER, Len: unix.SizeofSockFprog}}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Ifindex = int32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gr.Group)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gsr.Group)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(&gsr.Source)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_solaris.go b/vendor/golang.org/x/net/ipv6/sys_solaris.go new file mode 100644 index 00000000000..d348b5f6e45 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_solaris.go @@ -0,0 +1,74 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 260)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go new file mode 100644 index 00000000000..9b52e978cbd --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go @@ -0,0 +1,54 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin freebsd linux solaris + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +var compatFreeBSD32 bool // 386 emulation on amd64 + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var gr groupReq + if ifi != nil { + gr.Interface = uint32(ifi.Index) + } + gr.setGroup(grp) + var b []byte + if compatFreeBSD32 { + var d [sizeofGroupReq + 4]byte + s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))[:sizeofGroupReq] + } + return so.Set(c, b) +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + var gsr groupSourceReq + if ifi != nil { + gsr.Interface = uint32(ifi.Index) + } + gsr.setSourceGroup(grp, src) + var b []byte + if compatFreeBSD32 { + var d [sizeofGroupSourceReq + 4]byte + s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))[:sizeofGroupSourceReq] + } + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go new file mode 100644 index 00000000000..d5bc1108c5f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!freebsd,!linux,!solaris + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errNotImplemented +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv6/sys_stub.go b/vendor/golang.org/x/net/ipv6/sys_stub.go new file mode 100644 index 00000000000..4f252d09f6c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_stub.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{} +) diff --git a/vendor/golang.org/x/net/ipv6/sys_windows.go b/vendor/golang.org/x/net/ipv6/sys_windows.go new file mode 100644 index 00000000000..fc36b018bd2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_windows.go @@ -0,0 +1,75 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +const ( + // See ws2tcpip.h. + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PKTINFO = 0x13 + + sizeofSockaddrInet6 = 0x1c + + sizeofIPv6Mreq = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofICMPv6Filter = 0 +) + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type icmpv6Filter struct { + // TODO(mikio): implement this +} + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{ + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go b/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go new file mode 100644 index 00000000000..bf44e338bd8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go @@ -0,0 +1,103 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_aix.go + +// Added for go1.11 compatibility +// +build aix + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysICMP6_FILTER = 0x26 + + sysIPV6_CHECKSUM = 0x27 + sysIPV6_V6ONLY = 0x25 + + sysIPV6_RTHDRDSTOPTS = 0x37 + + sysIPV6_RECVPKTINFO = 0x23 + sysIPV6_RECVHOPLIMIT = 0x29 + sysIPV6_RECVRTHDR = 0x33 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_RECVDSTOPTS = 0x38 + + sysIPV6_USE_MIN_MTU = 0x2c + sysIPV6_RECVPATHMTU = 0x2f + sysIPV6_PATHMTU = 0x2e + + sysIPV6_PKTINFO = 0x21 + sysIPV6_HOPLIMIT = 0x28 + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x34 + sysIPV6_DSTOPTS = 0x36 + sysIPV6_RTHDR = 0x32 + + sysIPV6_RECVTCLASS = 0x2a + + sysIPV6_TCLASS = 0x2b + sysIPV6_DONTFRAG = 0x2d + + sizeofSockaddrStorage = 0x508 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x510 + sizeofGroupSourceReq = 0xa18 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + X__ss_len uint8 + Family uint8 + X__ss_pad1 [6]uint8 + X__ss_align int64 + X__ss_pad2 [1265]uint8 + Pad_cgo_0 [7]byte +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_darwin.go b/vendor/golang.org/x/net/ipv6/zsys_darwin.go new file mode 100644 index 00000000000..555744afd71 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_darwin.go @@ -0,0 +1,131 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_darwin.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + sysIPV6_2292PKTINFO = 0x13 + sysIPV6_2292HOPLIMIT = 0x14 + sysIPV6_2292NEXTHOP = 0x15 + sysIPV6_2292HOPOPTS = 0x16 + sysIPV6_2292DSTOPTS = 0x17 + sysIPV6_2292RTHDR = 0x18 + + sysIPV6_2292PKTOPTIONS = 0x19 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RECVTCLASS = 0x23 + sysIPV6_TCLASS = 0x24 + + sysIPV6_RTHDRDSTOPTS = 0x39 + + sysIPV6_RECVPKTINFO = 0x3d + + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_MSFILTER = 0x4a + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_BOUND_IF = 0x7d + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [128]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [128]byte + Pad_cgo_1 [128]byte +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go b/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go new file mode 100644 index 00000000000..cf3cc1024ac --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go @@ -0,0 +1,88 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_dragonfly.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go new file mode 100644 index 00000000000..73f31b260ea --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go @@ -0,0 +1,122 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go new file mode 100644 index 00000000000..490ce7cf104 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go @@ -0,0 +1,124 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go new file mode 100644 index 00000000000..490ce7cf104 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go @@ -0,0 +1,124 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm64.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm64.go new file mode 100644 index 00000000000..47e99ac9d30 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm64.go @@ -0,0 +1,122 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]uint8 + X__ss_align int64 + X__ss_pad2 [112]uint8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_386.go b/vendor/golang.org/x/net/ipv6/zsys_linux_386.go new file mode 100644 index 00000000000..bde4a8f8f5d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_386.go @@ -0,0 +1,152 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go new file mode 100644 index 00000000000..992ac9ec5f2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go @@ -0,0 +1,154 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go b/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go new file mode 100644 index 00000000000..bde4a8f8f5d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go @@ -0,0 +1,152 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go new file mode 100644 index 00000000000..992ac9ec5f2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go @@ -0,0 +1,154 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go new file mode 100644 index 00000000000..bde4a8f8f5d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go @@ -0,0 +1,152 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go new file mode 100644 index 00000000000..992ac9ec5f2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go @@ -0,0 +1,154 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go new file mode 100644 index 00000000000..992ac9ec5f2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go @@ -0,0 +1,154 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go new file mode 100644 index 00000000000..bde4a8f8f5d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go @@ -0,0 +1,152 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go new file mode 100644 index 00000000000..66fd2361210 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go @@ -0,0 +1,152 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go new file mode 100644 index 00000000000..992ac9ec5f2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go @@ -0,0 +1,154 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go new file mode 100644 index 00000000000..992ac9ec5f2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go @@ -0,0 +1,154 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go new file mode 100644 index 00000000000..6083ddcedcb --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go @@ -0,0 +1,156 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +// +build riscv64 + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go b/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go new file mode 100644 index 00000000000..992ac9ec5f2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go @@ -0,0 +1,154 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_netbsd.go b/vendor/golang.org/x/net/ipv6/zsys_netbsd.go new file mode 100644 index 00000000000..e39571e0725 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_netbsd.go @@ -0,0 +1,84 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_netbsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_openbsd.go b/vendor/golang.org/x/net/ipv6/zsys_openbsd.go new file mode 100644 index 00000000000..cc1899a630c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_openbsd.go @@ -0,0 +1,93 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_openbsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_AUTH_LEVEL = 0x35 + sysIPV6_ESP_TRANS_LEVEL = 0x36 + sysIPV6_ESP_NETWORK_LEVEL = 0x37 + sysIPSEC6_OUTSA = 0x38 + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + sysIPV6_IPCOMP_LEVEL = 0x3c + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + sysIPV6_PIPEX = 0x3f + + sysIPV6_RTABLE = 0x1021 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_solaris.go b/vendor/golang.org/x/net/ipv6/zsys_solaris.go new file mode 100644 index 00000000000..690eef9341a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_solaris.go @@ -0,0 +1,131 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_solaris.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x5 + sysIPV6_MULTICAST_IF = 0x6 + sysIPV6_MULTICAST_HOPS = 0x7 + sysIPV6_MULTICAST_LOOP = 0x8 + sysIPV6_JOIN_GROUP = 0x9 + sysIPV6_LEAVE_GROUP = 0xa + + sysIPV6_PKTINFO = 0xb + + sysIPV6_HOPLIMIT = 0xc + sysIPV6_NEXTHOP = 0xd + sysIPV6_HOPOPTS = 0xe + sysIPV6_DSTOPTS = 0xf + + sysIPV6_RTHDR = 0x10 + sysIPV6_RTHDRDSTOPTS = 0x11 + + sysIPV6_RECVPKTINFO = 0x12 + sysIPV6_RECVHOPLIMIT = 0x13 + sysIPV6_RECVHOPOPTS = 0x14 + + sysIPV6_RECVRTHDR = 0x16 + + sysIPV6_RECVRTHDRDSTOPTS = 0x17 + + sysIPV6_CHECKSUM = 0x18 + sysIPV6_RECVTCLASS = 0x19 + sysIPV6_USE_MIN_MTU = 0x20 + sysIPV6_DONTFRAG = 0x21 + sysIPV6_SEC_OPT = 0x22 + sysIPV6_SRC_PREFERENCES = 0x23 + sysIPV6_RECVPATHMTU = 0x24 + sysIPV6_PATHMTU = 0x25 + sysIPV6_TCLASS = 0x26 + sysIPV6_V6ONLY = 0x27 + + sysIPV6_RECVDSTOPTS = 0x28 + + sysMCAST_JOIN_GROUP = 0x29 + sysMCAST_LEAVE_GROUP = 0x2a + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_JOIN_SOURCE_GROUP = 0x2d + sysMCAST_LEAVE_SOURCE_GROUP = 0x2e + + sysIPV6_PREFER_SRC_HOME = 0x1 + sysIPV6_PREFER_SRC_COA = 0x2 + sysIPV6_PREFER_SRC_PUBLIC = 0x4 + sysIPV6_PREFER_SRC_TMP = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x10 + sysIPV6_PREFER_SRC_CGA = 0x20 + + sysIPV6_PREFER_SRC_MIPMASK = 0x3 + sysIPV6_PREFER_SRC_MIPDEFAULT = 0x1 + sysIPV6_PREFER_SRC_TMPMASK = 0xc + sysIPV6_PREFER_SRC_TMPDEFAULT = 0x4 + sysIPV6_PREFER_SRC_CGAMASK = 0x30 + sysIPV6_PREFER_SRC_CGADEFAULT = 0x10 + + sysIPV6_PREFER_SRC_MASK = 0x3f + + sysIPV6_PREFER_SRC_DEFAULT = 0x15 + + sysIPV6_BOUND_IF = 0x41 + sysIPV6_UNSPEC_SRC = 0x42 + + sysICMP6_FILTER = 0x1 + + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet6 = 0x20 + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x24 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x104 + sizeofGroupSourceReq = 0x204 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Family uint16 + X_ss_pad1 [6]int8 + X_ss_align float64 + X_ss_pad2 [240]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + X__sin6_src_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [256]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [256]byte + Pad_cgo_1 [256]byte +} + +type icmpv6Filter struct { + X__icmp6_filt [8]uint32 +} diff --git a/vendor/golang.org/x/oauth2/go.mod b/vendor/golang.org/x/oauth2/go.mod index b3457815528..2b13f0b34cb 100644 --- a/vendor/golang.org/x/oauth2/go.mod +++ b/vendor/golang.org/x/oauth2/go.mod @@ -3,8 +3,7 @@ module golang.org/x/oauth2 go 1.11 require ( - cloud.google.com/go v0.34.0 - golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e - golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect - google.golang.org/appengine v1.4.0 + cloud.google.com/go v0.65.0 + golang.org/x/net v0.0.0-20200822124328-c89045814202 + google.golang.org/appengine v1.6.6 ) diff --git a/vendor/golang.org/x/oauth2/go.sum b/vendor/golang.org/x/oauth2/go.sum index 6f0079b0d7f..eab5833c421 100644 --- a/vendor/golang.org/x/oauth2/go.sum +++ b/vendor/golang.org/x/oauth2/go.sum @@ -1,12 +1,361 @@ -cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e h1:bRhVy7zSSasaqNksaRZiA5EEI+Ei4I1nO5Jh72wfHlg= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index e44deb75746..f77701fe868 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -6,6 +6,11 @@ // various CPU architectures. package cpu +import ( + "os" + "strings" +) + // Initialized reports whether the CPU features were initialized. // // For some GOOS/GOARCH combinations initialization of the CPU features depends @@ -24,26 +29,46 @@ type CacheLinePad struct{ _ [cacheLineSize]byte } // and HasAVX2 are only set if the OS supports XMM and YMM // registers in addition to the CPUID feature bit being set. var X86 struct { - _ CacheLinePad - HasAES bool // AES hardware implementation (AES NI) - HasADX bool // Multi-precision add-carry instruction extensions - HasAVX bool // Advanced vector extension - HasAVX2 bool // Advanced vector extension 2 - HasBMI1 bool // Bit manipulation instruction set 1 - HasBMI2 bool // Bit manipulation instruction set 2 - HasERMS bool // Enhanced REP for MOVSB and STOSB - HasFMA bool // Fused-multiply-add instructions - HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. - HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM - HasPOPCNT bool // Hamming weight instruction POPCNT. - HasRDRAND bool // RDRAND instruction (on-chip random number generator) - HasRDSEED bool // RDSEED instruction (on-chip random number generator) - HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64) - HasSSE3 bool // Streaming SIMD extension 3 - HasSSSE3 bool // Supplemental streaming SIMD extension 3 - HasSSE41 bool // Streaming SIMD extension 4 and 4.1 - HasSSE42 bool // Streaming SIMD extension 4 and 4.2 - _ CacheLinePad + _ CacheLinePad + HasAES bool // AES hardware implementation (AES NI) + HasADX bool // Multi-precision add-carry instruction extensions + HasAVX bool // Advanced vector extension + HasAVX2 bool // Advanced vector extension 2 + HasAVX512 bool // Advanced vector extension 512 + HasAVX512F bool // Advanced vector extension 512 Foundation Instructions + HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions + HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions + HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions Instructions + HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions + HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions + HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions + HasAVX512IFMA bool // Advanced vector extension 512 Integer Fused Multiply Add + HasAVX512VBMI bool // Advanced vector extension 512 Vector Byte Manipulation Instructions + HasAVX5124VNNIW bool // Advanced vector extension 512 Vector Neural Network Instructions Word variable precision + HasAVX5124FMAPS bool // Advanced vector extension 512 Fused Multiply Accumulation Packed Single precision + HasAVX512VPOPCNTDQ bool // Advanced vector extension 512 Double and quad word population count instructions + HasAVX512VPCLMULQDQ bool // Advanced vector extension 512 Vector carry-less multiply operations + HasAVX512VNNI bool // Advanced vector extension 512 Vector Neural Network Instructions + HasAVX512GFNI bool // Advanced vector extension 512 Galois field New Instructions + HasAVX512VAES bool // Advanced vector extension 512 Vector AES instructions + HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2 + HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms + HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions + HasBMI1 bool // Bit manipulation instruction set 1 + HasBMI2 bool // Bit manipulation instruction set 2 + HasERMS bool // Enhanced REP for MOVSB and STOSB + HasFMA bool // Fused-multiply-add instructions + HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. + HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM + HasPOPCNT bool // Hamming weight instruction POPCNT. + HasRDRAND bool // RDRAND instruction (on-chip random number generator) + HasRDSEED bool // RDSEED instruction (on-chip random number generator) + HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64) + HasSSE3 bool // Streaming SIMD extension 3 + HasSSSE3 bool // Supplemental streaming SIMD extension 3 + HasSSE41 bool // Streaming SIMD extension 4 and 4.1 + HasSSE42 bool // Streaming SIMD extension 4 and 4.2 + _ CacheLinePad } // ARM64 contains the supported CPU features of the @@ -169,3 +194,94 @@ var S390X struct { HasVXE bool // vector-enhancements facility 1 _ CacheLinePad } + +func init() { + archInit() + initOptions() + processOptions() +} + +// options contains the cpu debug options that can be used in GODEBUG. +// Options are arch dependent and are added by the arch specific initOptions functions. +// Features that are mandatory for the specific GOARCH should have the Required field set +// (e.g. SSE2 on amd64). +var options []option + +// Option names should be lower case. e.g. avx instead of AVX. +type option struct { + Name string + Feature *bool + Specified bool // whether feature value was specified in GODEBUG + Enable bool // whether feature should be enabled + Required bool // whether feature is mandatory and can not be disabled +} + +func processOptions() { + env := os.Getenv("GODEBUG") +field: + for env != "" { + field := "" + i := strings.IndexByte(env, ',') + if i < 0 { + field, env = env, "" + } else { + field, env = env[:i], env[i+1:] + } + if len(field) < 4 || field[:4] != "cpu." { + continue + } + i = strings.IndexByte(field, '=') + if i < 0 { + print("GODEBUG sys/cpu: no value specified for \"", field, "\"\n") + continue + } + key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on" + + var enable bool + switch value { + case "on": + enable = true + case "off": + enable = false + default: + print("GODEBUG sys/cpu: value \"", value, "\" not supported for cpu option \"", key, "\"\n") + continue field + } + + if key == "all" { + for i := range options { + options[i].Specified = true + options[i].Enable = enable || options[i].Required + } + continue field + } + + for i := range options { + if options[i].Name == key { + options[i].Specified = true + options[i].Enable = enable + continue field + } + } + + print("GODEBUG sys/cpu: unknown cpu feature \"", key, "\"\n") + } + + for _, o := range options { + if !o.Specified { + continue + } + + if o.Enable && !*o.Feature { + print("GODEBUG sys/cpu: can not enable \"", o.Name, "\", missing CPU support\n") + continue + } + + if !o.Enable && o.Required { + print("GODEBUG sys/cpu: can not disable \"", o.Name, "\", required CPU feature\n") + continue + } + + *o.Feature = o.Enable + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix.go b/vendor/golang.org/x/sys/cpu/cpu_aix.go index da298966872..464a209cf59 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_aix.go +++ b/vendor/golang.org/x/sys/cpu/cpu_aix.go @@ -6,8 +6,6 @@ package cpu -const cacheLineSize = 128 - const ( // getsystemcfg constants _SC_IMPL = 2 @@ -15,7 +13,7 @@ const ( _IMPL_POWER9 = 0x20000 ) -func init() { +func archInit() { impl := getsystemcfg(_SC_IMPL) if impl&_IMPL_POWER8 != 0 { PPC64.IsPOWER8 = true diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm.go b/vendor/golang.org/x/sys/cpu/cpu_arm.go index 981af6818ca..301b752e9c5 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm.go +++ b/vendor/golang.org/x/sys/cpu/cpu_arm.go @@ -38,3 +38,36 @@ const ( hwcap2_SHA2 = 1 << 3 hwcap2_CRC32 = 1 << 4 ) + +func initOptions() { + options = []option{ + {Name: "pmull", Feature: &ARM.HasPMULL}, + {Name: "sha1", Feature: &ARM.HasSHA1}, + {Name: "sha2", Feature: &ARM.HasSHA2}, + {Name: "swp", Feature: &ARM.HasSWP}, + {Name: "thumb", Feature: &ARM.HasTHUMB}, + {Name: "thumbee", Feature: &ARM.HasTHUMBEE}, + {Name: "tls", Feature: &ARM.HasTLS}, + {Name: "vfp", Feature: &ARM.HasVFP}, + {Name: "vfpd32", Feature: &ARM.HasVFPD32}, + {Name: "vfpv3", Feature: &ARM.HasVFPv3}, + {Name: "vfpv3d16", Feature: &ARM.HasVFPv3D16}, + {Name: "vfpv4", Feature: &ARM.HasVFPv4}, + {Name: "half", Feature: &ARM.HasHALF}, + {Name: "26bit", Feature: &ARM.Has26BIT}, + {Name: "fastmul", Feature: &ARM.HasFASTMUL}, + {Name: "fpa", Feature: &ARM.HasFPA}, + {Name: "edsp", Feature: &ARM.HasEDSP}, + {Name: "java", Feature: &ARM.HasJAVA}, + {Name: "iwmmxt", Feature: &ARM.HasIWMMXT}, + {Name: "crunch", Feature: &ARM.HasCRUNCH}, + {Name: "neon", Feature: &ARM.HasNEON}, + {Name: "idivt", Feature: &ARM.HasIDIVT}, + {Name: "idiva", Feature: &ARM.HasIDIVA}, + {Name: "lpae", Feature: &ARM.HasLPAE}, + {Name: "evtstrm", Feature: &ARM.HasEVTSTRM}, + {Name: "aes", Feature: &ARM.HasAES}, + {Name: "crc32", Feature: &ARM.HasCRC32}, + } + +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go index 7bcb36c7bb5..2f64d3b3983 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -8,9 +8,38 @@ import "runtime" const cacheLineSize = 64 -func init() { +func initOptions() { + options = []option{ + {Name: "fp", Feature: &ARM64.HasFP}, + {Name: "asimd", Feature: &ARM64.HasASIMD}, + {Name: "evstrm", Feature: &ARM64.HasEVTSTRM}, + {Name: "aes", Feature: &ARM64.HasAES}, + {Name: "fphp", Feature: &ARM64.HasFPHP}, + {Name: "jscvt", Feature: &ARM64.HasJSCVT}, + {Name: "lrcpc", Feature: &ARM64.HasLRCPC}, + {Name: "pmull", Feature: &ARM64.HasPMULL}, + {Name: "sha1", Feature: &ARM64.HasSHA1}, + {Name: "sha2", Feature: &ARM64.HasSHA2}, + {Name: "sha3", Feature: &ARM64.HasSHA3}, + {Name: "sha512", Feature: &ARM64.HasSHA512}, + {Name: "sm3", Feature: &ARM64.HasSM3}, + {Name: "sm4", Feature: &ARM64.HasSM4}, + {Name: "sve", Feature: &ARM64.HasSVE}, + {Name: "crc32", Feature: &ARM64.HasCRC32}, + {Name: "atomics", Feature: &ARM64.HasATOMICS}, + {Name: "asimdhp", Feature: &ARM64.HasASIMDHP}, + {Name: "cpuid", Feature: &ARM64.HasCPUID}, + {Name: "asimrdm", Feature: &ARM64.HasASIMDRDM}, + {Name: "fcma", Feature: &ARM64.HasFCMA}, + {Name: "dcpop", Feature: &ARM64.HasDCPOP}, + {Name: "asimddp", Feature: &ARM64.HasASIMDDP}, + {Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM}, + } +} + +func archInit() { switch runtime.GOOS { - case "android", "darwin", "netbsd": + case "android", "darwin", "ios", "netbsd", "openbsd": // Android and iOS don't seem to allow reading these registers. // // NetBSD: @@ -18,6 +47,9 @@ func init() { // It can be read via sysctl(3). Example for future implementers: // https://nxr.netbsd.org/xref/src/usr.sbin/cpuctl/arch/aarch64.c // + // OpenBSD: + // See https://golang.org/issue/31746 + // // Fake the minimal features expected by // TestARM64minimalFeatures. ARM64.HasASIMD = true diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go index fe139182c8d..6fc874f7fef 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux.go @@ -6,7 +6,7 @@ package cpu -func init() { +func archInit() { if err := readHWCAP(); err != nil { return } diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go index eb24e5073e7..5a418900538 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build linux // +build mips64 mips64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go index 6c8d975d40a..99f8a6399ef 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go @@ -7,8 +7,6 @@ package cpu -const cacheLineSize = 128 - // HWCAP/HWCAP2 bits. These are exposed by the kernel. const ( // ISA Level diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go index d579eaef404..b88d6b8f662 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go @@ -4,8 +4,6 @@ package cpu -const cacheLineSize = 256 - const ( // bit mask values from /usr/include/bits/hwcap.h hwcap_ZARCH = 2 diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go index 6165f121249..57b5b677de0 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -7,3 +7,9 @@ package cpu const cacheLineSize = 32 + +func initOptions() { + options = []option{ + {Name: "msa", Feature: &MIPS64X.HasMSA}, + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go index 1269eee88d0..cfc1946b7bb 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go +++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -7,3 +7,5 @@ package cpu const cacheLineSize = 32 + +func initOptions() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go new file mode 100644 index 00000000000..b412efc1bd1 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go @@ -0,0 +1,9 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux,arm + +package cpu + +func archInit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go new file mode 100644 index 00000000000..d28d675b5f1 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go @@ -0,0 +1,16 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ppc64 ppc64le + +package cpu + +const cacheLineSize = 128 + +func initOptions() { + options = []option{ + {Name: "darn", Feature: &PPC64.HasDARN}, + {Name: "scv", Feature: &PPC64.HasSCV}, + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go index efe2b7a8477..8b08de341b8 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -7,3 +7,5 @@ package cpu const cacheLineSize = 32 + +func initOptions() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_s390x.go new file mode 100644 index 00000000000..544cd621cee --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.go @@ -0,0 +1,30 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 256 + +func initOptions() { + options = []option{ + {Name: "zarch", Feature: &S390X.HasZARCH}, + {Name: "stfle", Feature: &S390X.HasSTFLE}, + {Name: "ldisp", Feature: &S390X.HasLDISP}, + {Name: "eimm", Feature: &S390X.HasEIMM}, + {Name: "dfp", Feature: &S390X.HasDFP}, + {Name: "etf3eh", Feature: &S390X.HasETF3EH}, + {Name: "msa", Feature: &S390X.HasMSA}, + {Name: "aes", Feature: &S390X.HasAES}, + {Name: "aescbc", Feature: &S390X.HasAESCBC}, + {Name: "aesctr", Feature: &S390X.HasAESCTR}, + {Name: "aesgcm", Feature: &S390X.HasAESGCM}, + {Name: "ghash", Feature: &S390X.HasGHASH}, + {Name: "sha1", Feature: &S390X.HasSHA1}, + {Name: "sha256", Feature: &S390X.HasSHA256}, + {Name: "sha3", Feature: &S390X.HasSHA3}, + {Name: "sha512", Feature: &S390X.HasSHA512}, + {Name: "vx", Feature: &S390X.HasVX}, + {Name: "vxe", Feature: &S390X.HasVXE}, + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go index 8681e876a95..5382f2a227a 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_wasm.go +++ b/vendor/golang.org/x/sys/cpu/cpu_wasm.go @@ -11,3 +11,7 @@ package cpu // rules are good enough. const cacheLineSize = 0 + +func initOptions() {} + +func archInit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index d70d317f5a4..48d42933195 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -6,9 +6,57 @@ package cpu +import "runtime" + const cacheLineSize = 64 -func init() { +func initOptions() { + options = []option{ + {Name: "adx", Feature: &X86.HasADX}, + {Name: "aes", Feature: &X86.HasAES}, + {Name: "avx", Feature: &X86.HasAVX}, + {Name: "avx2", Feature: &X86.HasAVX2}, + {Name: "avx512", Feature: &X86.HasAVX512}, + {Name: "avx512f", Feature: &X86.HasAVX512F}, + {Name: "avx512cd", Feature: &X86.HasAVX512CD}, + {Name: "avx512er", Feature: &X86.HasAVX512ER}, + {Name: "avx512pf", Feature: &X86.HasAVX512PF}, + {Name: "avx512vl", Feature: &X86.HasAVX512VL}, + {Name: "avx512bw", Feature: &X86.HasAVX512BW}, + {Name: "avx512dq", Feature: &X86.HasAVX512DQ}, + {Name: "avx512ifma", Feature: &X86.HasAVX512IFMA}, + {Name: "avx512vbmi", Feature: &X86.HasAVX512VBMI}, + {Name: "avx512vnniw", Feature: &X86.HasAVX5124VNNIW}, + {Name: "avx5124fmaps", Feature: &X86.HasAVX5124FMAPS}, + {Name: "avx512vpopcntdq", Feature: &X86.HasAVX512VPOPCNTDQ}, + {Name: "avx512vpclmulqdq", Feature: &X86.HasAVX512VPCLMULQDQ}, + {Name: "avx512vnni", Feature: &X86.HasAVX512VNNI}, + {Name: "avx512gfni", Feature: &X86.HasAVX512GFNI}, + {Name: "avx512vaes", Feature: &X86.HasAVX512VAES}, + {Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2}, + {Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG}, + {Name: "avx512bf16", Feature: &X86.HasAVX512BF16}, + {Name: "bmi1", Feature: &X86.HasBMI1}, + {Name: "bmi2", Feature: &X86.HasBMI2}, + {Name: "erms", Feature: &X86.HasERMS}, + {Name: "fma", Feature: &X86.HasFMA}, + {Name: "osxsave", Feature: &X86.HasOSXSAVE}, + {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ}, + {Name: "popcnt", Feature: &X86.HasPOPCNT}, + {Name: "rdrand", Feature: &X86.HasRDRAND}, + {Name: "rdseed", Feature: &X86.HasRDSEED}, + {Name: "sse3", Feature: &X86.HasSSE3}, + {Name: "sse41", Feature: &X86.HasSSE41}, + {Name: "sse42", Feature: &X86.HasSSE42}, + {Name: "ssse3", Feature: &X86.HasSSSE3}, + + // These capabilities should always be enabled on amd64: + {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, + } +} + +func archInit() { + Initialized = true maxID, _, _, _ := cpuid(0, 0) @@ -31,12 +79,15 @@ func init() { X86.HasOSXSAVE = isSet(27, ecx1) X86.HasRDRAND = isSet(30, ecx1) - osSupportsAVX := false + var osSupportsAVX, osSupportsAVX512 bool // For XGETBV, OSXSAVE bit is required and sufficient. if X86.HasOSXSAVE { eax, _ := xgetbv() // Check if XMM and YMM registers have OS support. osSupportsAVX = isSet(1, eax) && isSet(2, eax) + + // Check if OPMASK and ZMM registers have OS support. + osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) } X86.HasAVX = isSet(28, ecx1) && osSupportsAVX @@ -45,13 +96,38 @@ func init() { return } - _, ebx7, _, _ := cpuid(7, 0) + _, ebx7, ecx7, edx7 := cpuid(7, 0) X86.HasBMI1 = isSet(3, ebx7) X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX X86.HasBMI2 = isSet(8, ebx7) X86.HasERMS = isSet(9, ebx7) X86.HasRDSEED = isSet(18, ebx7) X86.HasADX = isSet(19, ebx7) + + X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension + if X86.HasAVX512 { + X86.HasAVX512F = true + X86.HasAVX512CD = isSet(28, ebx7) + X86.HasAVX512ER = isSet(27, ebx7) + X86.HasAVX512PF = isSet(26, ebx7) + X86.HasAVX512VL = isSet(31, ebx7) + X86.HasAVX512BW = isSet(30, ebx7) + X86.HasAVX512DQ = isSet(17, ebx7) + X86.HasAVX512IFMA = isSet(21, ebx7) + X86.HasAVX512VBMI = isSet(1, ecx7) + X86.HasAVX5124VNNIW = isSet(2, edx7) + X86.HasAVX5124FMAPS = isSet(3, edx7) + X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7) + X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7) + X86.HasAVX512VNNI = isSet(11, ecx7) + X86.HasAVX512GFNI = isSet(8, ecx7) + X86.HasAVX512VAES = isSet(9, ecx7) + X86.HasAVX512VBMI2 = isSet(6, ecx7) + X86.HasAVX512BITALG = isSet(12, ecx7) + + eax71, _, _, _ := cpuid(7, 1) + X86.HasAVX512BF16 = isSet(5, eax71) + } } func isSet(bitpos uint, value uint32) bool { diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s new file mode 100644 index 00000000000..567a4763c88 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for mips64, OpenBSD +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-104 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/fcntl_darwin.go b/vendor/golang.org/x/sys/unix/fcntl_darwin.go index 5868a4a47b4..a9911c7c1d8 100644 --- a/vendor/golang.org/x/sys/unix/fcntl_darwin.go +++ b/vendor/golang.org/x/sys/unix/fcntl_darwin.go @@ -16,3 +16,9 @@ func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { _, err := fcntl(int(fd), cmd, int(uintptr(unsafe.Pointer(lk)))) return err } + +// FcntlFstore performs a fcntl syscall for the F_PREALLOCATE command. +func FcntlFstore(fd uintptr, cmd int, fstore *Fstore_t) error { + _, err := fcntl(int(fd), cmd, int(uintptr(unsafe.Pointer(fstore)))) + return err +} diff --git a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go index fc0e50e0372..8db48e5e062 100644 --- a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go +++ b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go @@ -1,9 +1,9 @@ -// +build linux,386 linux,arm linux,mips linux,mipsle - // Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build linux,386 linux,arm linux,mips linux,mipsle + package unix func init() { diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go index cd6f5a6133f..86032c11ef3 100644 --- a/vendor/golang.org/x/sys/unix/gccgo.go +++ b/vendor/golang.org/x/sys/unix/gccgo.go @@ -12,10 +12,8 @@ import "syscall" // We can't use the gc-syntax .s files for gccgo. On the plus side // much of the functionality can be written directly in Go. -//extern gccgoRealSyscallNoError func realSyscallNoError(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r uintptr) -//extern gccgoRealSyscall func realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r, errno uintptr) func SyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) { diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c index c44730c5e99..2cb1fefac64 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_c.c +++ b/vendor/golang.org/x/sys/unix/gccgo_c.c @@ -21,6 +21,9 @@ struct ret { uintptr_t err; }; +struct ret gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9) + __asm__(GOSYM_PREFIX GOPKGPATH ".realSyscall"); + struct ret gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9) { @@ -32,6 +35,9 @@ gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintp return r; } +uintptr_t gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9) + __asm__(GOSYM_PREFIX GOPKGPATH ".realSyscallNoError"); + uintptr_t gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9) { diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl.go index 3559e5dcb29..5641678613c 100644 --- a/vendor/golang.org/x/sys/unix/ioctl.go +++ b/vendor/golang.org/x/sys/unix/ioctl.go @@ -20,6 +20,15 @@ func IoctlSetInt(fd int, req uint, value int) error { return ioctl(fd, req, uintptr(value)) } +// IoctlSetPointerInt performs an ioctl operation which sets an +// integer value on fd, using the specified request number. The ioctl +// argument is called with a pointer to the integer value, rather than +// passing the integer value directly. +func IoctlSetPointerInt(fd int, req uint, value int) error { + v := int32(value) + return ioctl(fd, req, uintptr(unsafe.Pointer(&v))) +} + // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. // // To change fd's window size, the req argument should be TIOCSWINSZ. diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index ece31e9dcdc..d257fac5057 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -73,26 +73,22 @@ aix_ppc64) darwin_386) mkerrors="$mkerrors -m32" mksyscall="go run mksyscall.go -l32" - mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h" mktypes="GOARCH=$GOARCH go tool cgo -godefs" mkasm="go run mkasm_darwin.go" ;; darwin_amd64) mkerrors="$mkerrors -m64" - mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h" mktypes="GOARCH=$GOARCH go tool cgo -godefs" mkasm="go run mkasm_darwin.go" ;; darwin_arm) mkerrors="$mkerrors" mksyscall="go run mksyscall.go -l32" - mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h" mktypes="GOARCH=$GOARCH go tool cgo -godefs" mkasm="go run mkasm_darwin.go" ;; darwin_arm64) mkerrors="$mkerrors -m64" - mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h" mktypes="GOARCH=$GOARCH go tool cgo -godefs" mkasm="go run mkasm_darwin.go" ;; @@ -184,6 +180,15 @@ openbsd_arm64) # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; +openbsd_mips64) + mkerrors="$mkerrors -m64" + mksyscall="go run mksyscall.go -openbsd" + mksysctl="go run mksysctl_openbsd.go" + mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" + # Let the type of C char be signed for making the bare syscall + # API consistent across platforms. + mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" + ;; solaris_amd64) mksyscall="go run mksyscall_solaris.go" mkerrors="$mkerrors -m64" @@ -217,8 +222,6 @@ esac # aix/ppc64 script generates files instead of writing to stdin. echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in && gofmt -w zsyscall_$GOOSARCH.go && gofmt -w zsyscall_"$GOOSARCH"_gccgo.go && gofmt -w zsyscall_"$GOOSARCH"_gc.go " ; elif [ "$GOOS" == "darwin" ]; then - # pre-1.12, direct syscalls - echo "$mksyscall -tags $GOOS,$GOARCH,!go1.12 $syscall_goos syscall_darwin_${GOARCH}.1_11.go $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.1_11.go"; # 1.12 and later, syscalls via libSystem echo "$mksyscall -tags $GOOS,$GOARCH,go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; # 1.13 and later, syscalls via libSystem (including syscallPtr) diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 780e387e3f1..2363df8632c 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -58,6 +58,8 @@ includes_Darwin=' #define _DARWIN_USE_64_BIT_INODE #include #include +#include +#include #include #include #include @@ -93,6 +95,7 @@ includes_DragonFly=' #include #include #include +#include #include #include #include @@ -107,6 +110,7 @@ includes_FreeBSD=' #include #include #include +#include #include #include #include @@ -192,9 +196,12 @@ struct ltchars { #include #include #include +#include +#include #include #include #include +#include #include #include #include @@ -297,6 +304,7 @@ includes_NetBSD=' #include #include #include +#include #include #include #include @@ -325,6 +333,7 @@ includes_OpenBSD=' #include #include #include +#include #include #include #include @@ -365,6 +374,7 @@ includes_SunOS=' #include #include #include +#include #include #include #include @@ -489,6 +499,7 @@ ccflags="$@" $2 !~ "NLA_TYPE_MASK" && $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || + $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || $2 ~ /^TCGET/ || @@ -507,16 +518,21 @@ ccflags="$@" $2 ~ /^(CLOCK|TIMER)_/ || $2 ~ /^CAN_/ || $2 ~ /^CAP_/ || + $2 ~ /^CP_/ || + $2 ~ /^CPUSTATES$/ || + $2 ~ /^CTLIOCGINFO$/ || $2 ~ /^ALG_/ || + $2 ~ /^FI(CLONE|DEDUPERANGE)/ || $2 ~ /^FS_(POLICY_FLAGS|KEY_DESC|ENCRYPTION_MODE|[A-Z0-9_]+_KEY_SIZE)/ || - $2 ~ /^FS_IOC_.*(ENCRYPTION|VERITY|GETFLAGS)/ || + $2 ~ /^FS_IOC_.*(ENCRYPTION|VERITY|[GS]ETFLAGS)/ || $2 ~ /^FS_VERITY_/ || $2 ~ /^FSCRYPT_/ || + $2 ~ /^DM_/ || $2 ~ /^GRND_/ || $2 ~ /^RND/ || $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || $2 ~ /^KEYCTL_/ || - $2 ~ /^PERF_EVENT_IOC_/ || + $2 ~ /^PERF_/ || $2 ~ /^SECCOMP_MODE_/ || $2 ~ /^SPLICE_/ || $2 ~ /^SYNC_FILE_RANGE_/ || @@ -535,7 +551,7 @@ ccflags="$@" $2 ~ /^XATTR_(CREATE|REPLACE|NO(DEFAULT|FOLLOW|SECURITY)|SHOWCOMPRESSION)/ || $2 ~ /^ATTR_(BIT_MAP_COUNT|(CMN|VOL|FILE)_)/ || $2 ~ /^FSOPT_/ || - $2 ~ /^WDIOC_/ || + $2 ~ /^WDIO[CFS]_/ || $2 ~ /^NFN/ || $2 ~ /^XDP_/ || $2 ~ /^RWF_/ || diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go index 7d08dae5baf..57a0021da55 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go @@ -20,7 +20,7 @@ func cmsgAlignOf(salen int) int { case "aix": // There is no alignment on AIX. salign = 1 - case "darwin", "illumos", "solaris": + case "darwin", "ios", "illumos", "solaris": // NOTE: It seems like 64-bit Darwin, Illumos and Solaris // kernels still require 32-bit aligned access to network // subsystem. @@ -32,6 +32,10 @@ func cmsgAlignOf(salen int) int { if runtime.GOARCH == "arm" { salign = 8 } + // NetBSD aarch64 requires 128-bit alignment. + if runtime.GOOS == "netbsd" && runtime.GOARCH == "arm64" { + salign = 16 + } } return (salen + salign - 1) & ^(salign - 1) diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 9ad8a0d4a56..4408153822d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -19,6 +19,22 @@ import "unsafe" * Wrapped */ +func Access(path string, mode uint32) (err error) { + return Faccessat(AT_FDCWD, path, mode, 0) +} + +func Chmod(path string, mode uint32) (err error) { + return Fchmodat(AT_FDCWD, path, mode, 0) +} + +func Chown(path string, uid int, gid int) (err error) { + return Fchownat(AT_FDCWD, path, uid, gid, 0) +} + +func Creat(path string, mode uint32) (fd int, err error) { + return Open(path, O_CREAT|O_WRONLY|O_TRUNC, mode) +} + //sys utimes(path string, times *[2]Timeval) (err error) func Utimes(path string, tv []Timeval) error { if len(tv) != 2 { diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 68605db6248..123536a028c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -18,6 +18,21 @@ import ( "unsafe" ) +const ImplementsGetwd = true + +func Getwd() (string, error) { + var buf [PathMax]byte + _, err := Getcwd(buf[0:]) + if err != nil { + return "", err + } + n := clen(buf[:]) + if n < 1 { + return "", EINVAL + } + return string(buf[:n]), nil +} + /* * Wrapped */ @@ -272,7 +287,7 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { if err != nil { return } - if runtime.GOOS == "darwin" && len == 0 { + if (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && len == 0 { // Accepted socket has no address. // This is likely due to a bug in xnu kernels, // where instead of ECONNABORTED error socket @@ -527,6 +542,23 @@ func SysctlClockinfo(name string) (*Clockinfo, error) { return &ci, nil } +func SysctlTimeval(name string) (*Timeval, error) { + mib, err := sysctlmib(name) + if err != nil { + return nil, err + } + + var tv Timeval + n := uintptr(unsafe.Sizeof(tv)) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&tv)), &n, nil, 0); err != nil { + return nil, err + } + if n != unsafe.Sizeof(tv) { + return nil, EIO + } + return &tv, nil +} + //sys utimes(path string, timeval *[2]Timeval) (err error) func Utimes(path string, tv []Timeval) error { diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go index 6a15cba6110..b31ef035881 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go @@ -10,6 +10,8 @@ import ( "unsafe" ) +const _SYS_GETDIRENTRIES64 = 344 + func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // To implement this using libSystem we'd need syscall_syscallPtr for // fdopendir. However, syscallPtr was only added in Go 1.13, so we fall @@ -20,7 +22,7 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { } else { p = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(p), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + r0, _, e1 := Syscall6(_SYS_GETDIRENTRIES64, uintptr(fd), uintptr(p), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) n = int(r0) if e1 != 0 { return n, errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 0cf31acf02c..5b0e831f24f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -13,29 +13,11 @@ package unix import ( - "errors" + "runtime" "syscall" "unsafe" ) -const ImplementsGetwd = true - -func Getwd() (string, error) { - buf := make([]byte, 2048) - attrs, err := getAttrList(".", attrList{CommonAttr: attrCmnFullpath}, buf, 0) - if err == nil && len(attrs) == 1 && len(attrs[0]) >= 2 { - wd := string(attrs[0]) - // Sanity check that it's an absolute path and ends - // in a null byte, which we then strip. - if wd[0] == '/' && wd[len(wd)-1] == 0 { - return wd[:len(wd)-1], nil - } - } - // If pkg/os/getwd.go gets ENOTSUP, it will fall back to the - // slow algorithm. - return "", ENOTSUP -} - // SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { Len uint8 @@ -49,6 +31,11 @@ type SockaddrDatalink struct { raw RawSockaddrDatalink } +// Some external packages rely on SYS___SYSCTL being defined to implement their +// own sysctl wrappers. Provide it here, even though direct syscalls are no +// longer supported on darwin. +const SYS___SYSCTL = 202 + // Translate "kern.hostname" to []_C_int{0,1,2,3}. func nametomib(name string) (mib []_C_int, err error) { const siz = unsafe.Sizeof(mib[0]) @@ -92,11 +79,6 @@ func direntNamlen(buf []byte) (uint64, bool) { func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) } func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) } -const ( - attrBitMapCount = 5 - attrCmnFullpath = 0x08000000 -) - type attrList struct { bitmapCount uint16 _ uint16 @@ -107,54 +89,6 @@ type attrList struct { Forkattr uint32 } -func getAttrList(path string, attrList attrList, attrBuf []byte, options uint) (attrs [][]byte, err error) { - if len(attrBuf) < 4 { - return nil, errors.New("attrBuf too small") - } - attrList.bitmapCount = attrBitMapCount - - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return nil, err - } - - if err := getattrlist(_p0, unsafe.Pointer(&attrList), unsafe.Pointer(&attrBuf[0]), uintptr(len(attrBuf)), int(options)); err != nil { - return nil, err - } - size := *(*uint32)(unsafe.Pointer(&attrBuf[0])) - - // dat is the section of attrBuf that contains valid data, - // without the 4 byte length header. All attribute offsets - // are relative to dat. - dat := attrBuf - if int(size) < len(attrBuf) { - dat = dat[:size] - } - dat = dat[4:] // remove length prefix - - for i := uint32(0); int(i) < len(dat); { - header := dat[i:] - if len(header) < 8 { - return attrs, errors.New("truncated attribute header") - } - datOff := *(*int32)(unsafe.Pointer(&header[0])) - attrLen := *(*uint32)(unsafe.Pointer(&header[4])) - if datOff < 0 || uint32(datOff)+attrLen > uint32(len(dat)) { - return attrs, errors.New("truncated results; attrBuf too small") - } - end := uint32(datOff) + attrLen - attrs = append(attrs, dat[datOff:end]) - i = end - if r := i % 4; r != 0 { - i += (4 - r) - } - } - return -} - -//sys getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) - //sysnb pipe() (r int, w int, err error) func Pipe(p []int) (err error) { @@ -324,6 +258,12 @@ func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(sig //sys ioctl(fd int, req uint, arg uintptr) (err error) +func IoctlCtlInfo(fd int, ctlInfo *CtlInfo) error { + err := ioctl(fd, CTLIOCGINFO, uintptr(unsafe.Pointer(ctlInfo))) + runtime.KeepAlive(ctlInfo) + return err +} + //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL func Uname(uname *Utsname) error { @@ -396,6 +336,8 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Chroot(path string) (err error) //sys ClockGettime(clockid int32, time *Timespec) (err error) //sys Close(fd int) (err error) +//sys Clonefile(src string, dst string, flags int) (err error) +//sys Clonefileat(srcDirfd int, src string, dstDirfd int, dst string, flags int) (err error) //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) //sys Exchangedata(path1 string, path2 string, options int) (err error) @@ -407,10 +349,12 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) +//sys Fclonefileat(srcDirfd int, dstDirfd int, dst string, flags int) (err error) //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) +//sys Getcwd(buf []byte) (n int, err error) //sys Getdtablesize() (size int) //sysnb Getegid() (egid int) //sysnb Geteuid() (uid int) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.1_11.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.1_11.go deleted file mode 100644 index 6b223f91a5e..00000000000 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.1_11.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin,386,!go1.12 - -package unix - -//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS_GETDIRENTRIES64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go index 2724e3a5128..6c1f4ab95b4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go @@ -6,11 +6,7 @@ package unix -import ( - "syscall" -) - -//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) +import "syscall" func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: int32(sec), Nsec: int32(nsec)} @@ -44,14 +40,11 @@ func (cmsg *Cmsghdr) SetLen(length int) { func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) -// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions -// of darwin/386 the syscall is called sysctl instead of __sysctl. -const SYS___SYSCTL = SYS_SYSCTL - //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 //sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 //sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64 //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go deleted file mode 100644 index 68ebd6fab2a..00000000000 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin,amd64,!go1.12 - -package unix - -//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS_GETDIRENTRIES64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index ce2e0d24973..0582ae256ef 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -6,11 +6,7 @@ package unix -import ( - "syscall" -) - -//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) +import "syscall" func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} @@ -44,14 +40,11 @@ func (cmsg *Cmsghdr) SetLen(length int) { func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) -// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions -// of darwin/amd64 the syscall is called sysctl instead of __sysctl. -const SYS___SYSCTL = SYS_SYSCTL - //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 //sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 //sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64 //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go index fc17a3f232e..c6a9733b4cb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go @@ -4,9 +4,7 @@ package unix -import ( - "syscall" -) +import "syscall" func ptrace(request int, pid int, addr uintptr, data uintptr) error { return ENOTSUP @@ -44,10 +42,6 @@ func (cmsg *Cmsghdr) SetLen(length int) { func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) // sic -// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions -// of darwin/arm the syscall is called sysctl instead of __sysctl. -const SYS___SYSCTL = SYS_SYSCTL - //sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) //sys Fstatfs(fd int, stat *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.1_11.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.1_11.go deleted file mode 100644 index 01d450406be..00000000000 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.1_11.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin,arm64,!go1.12 - -package unix - -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - return 0, ENOSYS -} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index 1e91ddf3257..253afa4de55 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -6,13 +6,7 @@ package unix -import ( - "syscall" -) - -func ptrace(request int, pid int, addr uintptr, data uintptr) error { - return ENOTSUP -} +import "syscall" func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} @@ -46,14 +40,11 @@ func (cmsg *Cmsghdr) SetLen(length int) { func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) // sic -// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions -// of darwin/arm64 the syscall is called sysctl instead of __sysctl. -const SYS___SYSCTL = SYS_SYSCTL - //sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) //sys Fstatfs(fd int, stat *Statfs_t) (err error) //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT //sys Lstat(path string, stat *Stat_t) (err error) +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 8a195ae586c..bed7dcfec11 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -129,23 +129,8 @@ func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) { return } -const ImplementsGetwd = true - //sys Getcwd(buf []byte) (n int, err error) = SYS___GETCWD -func Getwd() (string, error) { - var buf [PathMax]byte - _, err := Getcwd(buf[0:]) - if err != nil { - return "", err - } - n := clen(buf[:]) - if n < 1 { - return "", EINVAL - } - return string(buf[:n]), nil -} - func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { var _p0 unsafe.Pointer var bufsize uintptr diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 6932e7c2c1a..f6db02aff40 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -140,23 +140,8 @@ func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) { return } -const ImplementsGetwd = true - //sys Getcwd(buf []byte) (n int, err error) = SYS___GETCWD -func Getwd() (string, error) { - var buf [PathMax]byte - _, err := Getcwd(buf[0:]) - if err != nil { - return "", err - } - n := clen(buf[:]) - if n < 1 { - return "", EINVAL - } - return string(buf[:n]), nil -} - func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { var ( _p0 unsafe.Pointer diff --git a/vendor/golang.org/x/sys/unix/syscall_illumos.go b/vendor/golang.org/x/sys/unix/syscall_illumos.go index 99e62dcd828..bbc4f3ea543 100644 --- a/vendor/golang.org/x/sys/unix/syscall_illumos.go +++ b/vendor/golang.org/x/sys/unix/syscall_illumos.go @@ -24,7 +24,7 @@ func bytes2iovec(bs [][]byte) []Iovec { return iovecs } -//sys readv(fd int, iovs []Iovec) (n int, err error) +//sys readv(fd int, iovs []Iovec) (n int, err error) func Readv(fd int, iovs [][]byte) (n int, err error) { iovecs := bytes2iovec(iovs) @@ -32,7 +32,7 @@ func Readv(fd int, iovs [][]byte) (n int, err error) { return n, err } -//sys preadv(fd int, iovs []Iovec, off int64) (n int, err error) +//sys preadv(fd int, iovs []Iovec, off int64) (n int, err error) func Preadv(fd int, iovs [][]byte, off int64) (n int, err error) { iovecs := bytes2iovec(iovs) @@ -40,7 +40,7 @@ func Preadv(fd int, iovs [][]byte, off int64) (n int, err error) { return n, err } -//sys writev(fd int, iovs []Iovec) (n int, err error) +//sys writev(fd int, iovs []Iovec) (n int, err error) func Writev(fd int, iovs [][]byte) (n int, err error) { iovecs := bytes2iovec(iovs) @@ -48,10 +48,43 @@ func Writev(fd int, iovs [][]byte) (n int, err error) { return n, err } -//sys pwritev(fd int, iovs []Iovec, off int64) (n int, err error) +//sys pwritev(fd int, iovs []Iovec, off int64) (n int, err error) func Pwritev(fd int, iovs [][]byte, off int64) (n int, err error) { iovecs := bytes2iovec(iovs) n, err = pwritev(fd, iovecs, off) return n, err } + +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) = libsocket.accept4 + +func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + nfd, err = accept4(fd, &rsa, &len, flags) + if err != nil { + return + } + if len > SizeofSockaddrAny { + panic("RawSockaddrAny too small") + } + sa, err = anyToSockaddr(fd, &rsa) + if err != nil { + Close(nfd) + nfd = 0 + } + return +} + +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) error { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err := pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index e50e4cb276c..84a9e5277ac 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -82,15 +82,6 @@ func IoctlRetInt(fd int, req uint) (int, error) { return int(ret), nil } -// IoctlSetPointerInt performs an ioctl operation which sets an -// integer value on fd, using the specified request number. The ioctl -// argument is called with a pointer to the integer value, rather than -// passing the integer value directly. -func IoctlSetPointerInt(fd int, req uint, value int) error { - v := int32(value) - return ioctl(fd, req, uintptr(unsafe.Pointer(&v))) -} - func IoctlSetRTCTime(fd int, value *RTCTime) error { err := ioctl(fd, RTC_SET_TIME, uintptr(unsafe.Pointer(value))) runtime.KeepAlive(value) @@ -115,12 +106,53 @@ func IoctlGetRTCTime(fd int) (*RTCTime, error) { return &value, err } +// IoctlGetWatchdogInfo fetches information about a watchdog device from the +// Linux watchdog API. For more information, see: +// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. +func IoctlGetWatchdogInfo(fd int) (*WatchdogInfo, error) { + var value WatchdogInfo + err := ioctl(fd, WDIOC_GETSUPPORT, uintptr(unsafe.Pointer(&value))) + return &value, err +} + func IoctlGetRTCWkAlrm(fd int) (*RTCWkAlrm, error) { var value RTCWkAlrm err := ioctl(fd, RTC_WKALM_RD, uintptr(unsafe.Pointer(&value))) return &value, err } +// IoctlFileCloneRange performs an FICLONERANGE ioctl operation to clone the +// range of data conveyed in value to the file associated with the file +// descriptor destFd. See the ioctl_ficlonerange(2) man page for details. +func IoctlFileCloneRange(destFd int, value *FileCloneRange) error { + err := ioctl(destFd, FICLONERANGE, uintptr(unsafe.Pointer(value))) + runtime.KeepAlive(value) + return err +} + +// IoctlFileClone performs an FICLONE ioctl operation to clone the entire file +// associated with the file description srcFd to the file associated with the +// file descriptor destFd. See the ioctl_ficlone(2) man page for details. +func IoctlFileClone(destFd, srcFd int) error { + return ioctl(destFd, FICLONE, uintptr(srcFd)) +} + +// IoctlFileDedupeRange performs an FIDEDUPERANGE ioctl operation to share the +// range of data conveyed in value with the file associated with the file +// descriptor destFd. See the ioctl_fideduperange(2) man page for details. +func IoctlFileDedupeRange(destFd int, value *FileDedupeRange) error { + err := ioctl(destFd, FIDEDUPERANGE, uintptr(unsafe.Pointer(value))) + runtime.KeepAlive(value) + return err +} + +// IoctlWatchdogKeepalive issues a keepalive ioctl to a watchdog device. For +// more information, see: +// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. +func IoctlWatchdogKeepalive(fd int) error { + return ioctl(fd, WDIOC_KEEPALIVE, 0) +} + //sys Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) func Link(oldpath string, newpath string) (err error) { @@ -145,6 +177,12 @@ func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) return openat(dirfd, path, flags|O_LARGEFILE, mode) } +//sys openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) + +func Openat2(dirfd int, path string, how *OpenHow) (fd int, err error) { + return openat2(dirfd, path, how, SizeofOpenHow) +} + //sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { @@ -885,6 +923,35 @@ func (sa *SockaddrL2TPIP6) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrL2TPIP6, nil } +// SockaddrIUCV implements the Sockaddr interface for AF_IUCV sockets. +type SockaddrIUCV struct { + UserID string + Name string + raw RawSockaddrIUCV +} + +func (sa *SockaddrIUCV) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Family = AF_IUCV + // These are EBCDIC encoded by the kernel, but we still need to pad them + // with blanks. Initializing with blanks allows the caller to feed in either + // a padded or an unpadded string. + for i := 0; i < 8; i++ { + sa.raw.Nodeid[i] = ' ' + sa.raw.User_id[i] = ' ' + sa.raw.Name[i] = ' ' + } + if len(sa.UserID) > 8 || len(sa.Name) > 8 { + return nil, 0, EINVAL + } + for i, b := range []byte(sa.UserID[:]) { + sa.raw.User_id[i] = int8(b) + } + for i, b := range []byte(sa.Name[:]) { + sa.raw.Name[i] = int8(b) + } + return unsafe.Pointer(&sa.raw), SizeofSockaddrIUCV, nil +} + func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { switch rsa.Addr.Family { case AF_NETLINK: @@ -1065,6 +1132,38 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { } return sa, nil + case AF_IUCV: + pp := (*RawSockaddrIUCV)(unsafe.Pointer(rsa)) + + var user [8]byte + var name [8]byte + + for i := 0; i < 8; i++ { + user[i] = byte(pp.User_id[i]) + name[i] = byte(pp.Name[i]) + } + + sa := &SockaddrIUCV{ + UserID: string(user[:]), + Name: string(name[:]), + } + return sa, nil + + case AF_CAN: + pp := (*RawSockaddrCAN)(unsafe.Pointer(rsa)) + sa := &SockaddrCAN{ + Ifindex: int(pp.Ifindex), + } + rx := (*[4]byte)(unsafe.Pointer(&sa.RxID)) + for i := 0; i < 4; i++ { + rx[i] = pp.Addr[i] + } + tx := (*[4]byte)(unsafe.Pointer(&sa.TxID)) + for i := 0; i < 4; i++ { + tx[i] = pp.Addr[i+4] + } + return sa, nil + } return nil, EAFNOSUPPORT } @@ -1965,10 +2064,15 @@ func isGroupMember(gid int) bool { } //sys faccessat(dirfd int, path string, mode uint32) (err error) +//sys Faccessat2(dirfd int, path string, mode uint32, flags int) (err error) func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - if flags & ^(AT_SYMLINK_NOFOLLOW|AT_EACCESS) != 0 { - return EINVAL + if flags == 0 { + return faccessat(dirfd, path, mode) + } + + if err := Faccessat2(dirfd, path, mode, flags); err != ENOSYS && err != EPERM { + return err } // The Linux kernel faccessat system call does not take any flags. @@ -1977,8 +2081,8 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { // Because people naturally expect syscall.Faccessat to act // like C faccessat, we do the same. - if flags == 0 { - return faccessat(dirfd, path, mode) + if flags & ^(AT_SYMLINK_NOFOLLOW|AT_EACCESS) != 0 { + return EINVAL } var st Stat_t @@ -2122,6 +2226,18 @@ func Klogset(typ int, arg int) (err error) { return nil } +// RemoteIovec is Iovec with the pointer replaced with an integer. +// It is used for ProcessVMReadv and ProcessVMWritev, where the pointer +// refers to a location in a different process' address space, which +// would confuse the Go garbage collector. +type RemoteIovec struct { + Base uintptr + Len int +} + +//sys ProcessVMReadv(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) = SYS_PROCESS_VM_READV +//sys ProcessVMWritev(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) = SYS_PROCESS_VM_WRITEV + /* * Unimplemented */ diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index 048d18e3c81..c97c2ee53e5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// TODO(rsc): Rewrite all nn(SP) references into name+(nn-8)(FP) -// so that go vet can check that they are correct. - // +build 386,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index e1913e2c934..496837b1e37 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -7,7 +7,6 @@ package unix import ( - "syscall" "unsafe" ) @@ -49,10 +48,6 @@ func Pipe2(p []int, flags int) (err error) { return } -// Underlying system call writes to newoffset via pointer. -// Implemented in assembly to avoid allocation. -func seek(fd int, offset int64, whence int) (newoffset int64, err syscall.Errno) - func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { newoffset, errno := seek(fd, offset, whence) if errno != 0 { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go new file mode 100644 index 00000000000..8c514c95ed4 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go @@ -0,0 +1,13 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm,!gccgo,linux + +package unix + +import "syscall" + +// Underlying system call writes to newoffset via pointer. +// Implemented in assembly to avoid allocation. +func seek(fd int, offset int64, whence int) (newoffset int64, err syscall.Errno) diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 45b50a6105e..dbd5e03b627 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -141,23 +141,8 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { return } -const ImplementsGetwd = true - //sys Getcwd(buf []byte) (n int, err error) = SYS___GETCWD -func Getwd() (string, error) { - var buf [PathMax]byte - _, err := Getcwd(buf[0:]) - if err != nil { - return "", err - } - n := clen(buf[:]) - if n < 1 { - return "", EINVAL - } - return string(buf[:n]), nil -} - // TODO func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { return -1, ENOSYS diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index a266e92a9b1..2c1f46ea1ef 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -114,23 +114,8 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { return } -const ImplementsGetwd = true - //sys Getcwd(buf []byte) (n int, err error) = SYS___GETCWD -func Getwd() (string, error) { - var buf [PathMax]byte - _, err := Getcwd(buf[0:]) - if err != nil { - return "", err - } - n := clen(buf[:]) - if n < 1 { - return "", EINVAL - } - return string(buf[:n]), nil -} - func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go new file mode 100644 index 00000000000..30f285343ee --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go @@ -0,0 +1,35 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions +// of OpenBSD the syscall is called sysctl instead of __sysctl. +const SYS___SYSCTL = SYS_SYSCTL diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 0e2a696ad36..fee6e995289 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -13,6 +13,7 @@ package unix import ( + "runtime" "syscall" "unsafe" ) @@ -553,8 +554,10 @@ func Minor(dev uint64) uint32 { //sys ioctl(fd int, req uint, arg uintptr) (err error) -func IoctlSetTermio(fd int, req uint, value *Termio) (err error) { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) +func IoctlSetTermio(fd int, req uint, value *Termio) error { + err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) + runtime.KeepAlive(value) + return err } func IoctlGetTermio(fd int, req uint) (*Termio, error) { diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go index 6217cdba57b..c8f9f7a1fb1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go @@ -232,6 +232,8 @@ const ( CLOCK_THREAD_CPUTIME_ID = 0x10 CLOCK_UPTIME_RAW = 0x8 CLOCK_UPTIME_RAW_APPROX = 0x9 + CLONE_NOFOLLOW = 0x1 + CLONE_NOOWNERCOPY = 0x2 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -249,6 +251,7 @@ const ( CSTOP = 0x13 CSTOPB = 0x400 CSUSP = 0x1a + CTLIOCGINFO = 0xc0644e03 CTL_HW = 0x6 CTL_KERN = 0x1 CTL_MAXNAME = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index e3ff2ee3d48..7180064b2b8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -232,6 +232,8 @@ const ( CLOCK_THREAD_CPUTIME_ID = 0x10 CLOCK_UPTIME_RAW = 0x8 CLOCK_UPTIME_RAW_APPROX = 0x9 + CLONE_NOFOLLOW = 0x1 + CLONE_NOOWNERCOPY = 0x2 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -249,6 +251,7 @@ const ( CSTOP = 0x13 CSTOPB = 0x400 CSUSP = 0x1a + CTLIOCGINFO = 0xc0644e03 CTL_HW = 0x6 CTL_KERN = 0x1 CTL_MAXNAME = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go index 3e417571a94..3b9ca758582 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go @@ -232,6 +232,8 @@ const ( CLOCK_THREAD_CPUTIME_ID = 0x10 CLOCK_UPTIME_RAW = 0x8 CLOCK_UPTIME_RAW_APPROX = 0x9 + CLONE_NOFOLLOW = 0x1 + CLONE_NOOWNERCOPY = 0x2 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -249,6 +251,7 @@ const ( CSTOP = 0x13 CSTOPB = 0x400 CSUSP = 0x1a + CTLIOCGINFO = 0xc0644e03 CTL_HW = 0x6 CTL_KERN = 0x1 CTL_MAXNAME = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index cbd8ed18b97..4687c73ac16 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -232,6 +232,8 @@ const ( CLOCK_THREAD_CPUTIME_ID = 0x10 CLOCK_UPTIME_RAW = 0x8 CLOCK_UPTIME_RAW_APPROX = 0x9 + CLONE_NOFOLLOW = 0x1 + CLONE_NOOWNERCOPY = 0x2 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -249,6 +251,7 @@ const ( CSTOP = 0x13 CSTOPB = 0x400 CSUSP = 0x1a + CTLIOCGINFO = 0xc0644e03 CTL_HW = 0x6 CTL_KERN = 0x1 CTL_MAXNAME = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go index 6130471748a..f5e91b7abaa 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go @@ -62,6 +62,7 @@ const ( B28800 = 0x7080 B300 = 0x12c B38400 = 0x9600 + B460800 = 0x70800 B4800 = 0x12c0 B50 = 0x32 B57600 = 0xe100 @@ -69,12 +70,15 @@ const ( B7200 = 0x1c20 B75 = 0x4b B76800 = 0x12c00 + B921600 = 0xe1000 B9600 = 0x2580 + BIOCFEEDBACK = 0x8004427d BIOCFLUSH = 0x20004268 BIOCGBLEN = 0x40044266 BIOCGDLT = 0x4004426a BIOCGDLTLIST = 0xc0104279 BIOCGETIF = 0x4020426b + BIOCGFEEDBACK = 0x4004427c BIOCGHDRCMPLT = 0x40044274 BIOCGRSIG = 0x40044272 BIOCGRTIMEOUT = 0x4010426e @@ -88,6 +92,7 @@ const ( BIOCSETF = 0x80104267 BIOCSETIF = 0x8020426c BIOCSETWF = 0x8010427b + BIOCSFEEDBACK = 0x8004427d BIOCSHDRCMPLT = 0x80044275 BIOCSRSIG = 0x80044273 BIOCSRTIMEOUT = 0x8010426d @@ -125,6 +130,7 @@ const ( BPF_MINBUFSIZE = 0x20 BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 + BPF_MOD = 0x90 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 @@ -139,6 +145,7 @@ const ( BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XOR = 0xa0 BRKINT = 0x2 CFLUSH = 0xf CLOCAL = 0x8000 @@ -156,6 +163,12 @@ const ( CLOCK_UPTIME_FAST = 0x8 CLOCK_UPTIME_PRECISE = 0x7 CLOCK_VIRTUAL = 0x1 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x30000 CS5 = 0x0 @@ -175,6 +188,7 @@ const ( DLT_A429 = 0xb8 DLT_A653_ICM = 0xb9 DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde DLT_APPLE_IP_OVER_IEEE1394 = 0x8a DLT_ARCNET = 0x7 DLT_ARCNET_LINUX = 0x81 @@ -184,22 +198,33 @@ const ( DLT_AX25 = 0x3 DLT_AX25_KISS = 0xca DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_BREDR_BB = 0xff DLT_BLUETOOTH_HCI_H4 = 0xbb DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_BLUETOOTH_LE_LL = 0xfb + DLT_BLUETOOTH_LE_LL_WITH_PHDR = 0x100 + DLT_BLUETOOTH_LINUX_MONITOR = 0xfe DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 DLT_CHAOS = 0x5 DLT_CHDLC = 0x68 DLT_CISCO_IOS = 0x76 DLT_C_HDLC = 0x68 DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd DLT_DOCSIS = 0x8f + DLT_DVB_CI = 0xeb DLT_ECONET = 0x73 DLT_EN10MB = 0x1 DLT_EN3MB = 0x2 DLT_ENC = 0x6d + DLT_EPON = 0x103 DLT_ERF = 0xc5 DLT_ERF_ETH = 0xaf DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 DLT_FDDI = 0xa DLT_FLEXRAY = 0xd2 DLT_FRELAY = 0x6b @@ -209,6 +234,8 @@ const ( DLT_GPF_F = 0xab DLT_GPF_T = 0xaa DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 DLT_HHDLC = 0x79 DLT_IBM_SN = 0x92 DLT_IBM_SP = 0x91 @@ -218,18 +245,28 @@ const ( DLT_IEEE802_11_RADIO_AVS = 0xa3 DLT_IEEE802_15_4 = 0xc3 DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 DLT_IEEE802_15_4_NONASK_PHY = 0xd7 DLT_IEEE802_16_MAC_CPS = 0xbc DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_INFINIBAND = 0xf7 DLT_IPFILTER = 0x74 DLT_IPMB = 0xc7 DLT_IPMB_LINUX = 0xd1 + DLT_IPMI_HPM_2 = 0x104 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 DLT_IP_OVER_FC = 0x7a + DLT_ISO_14443 = 0x108 DLT_JUNIPER_ATM1 = 0x89 DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee DLT_JUNIPER_CHDLC = 0xb5 DLT_JUNIPER_ES = 0x84 DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea DLT_JUNIPER_FRELAY = 0xb4 DLT_JUNIPER_GGSN = 0x85 DLT_JUNIPER_ISM = 0xc2 @@ -242,25 +279,40 @@ const ( DLT_JUNIPER_PPPOE = 0xa7 DLT_JUNIPER_PPPOE_ATM = 0xa8 DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 DLT_JUNIPER_ST = 0xc8 DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 DLT_LAPB_WITH_DIR = 0xcf DLT_LAPD = 0xcb DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 DLT_LINUX_IRDA = 0x90 DLT_LINUX_LAPD = 0xb1 DLT_LINUX_SLL = 0x71 DLT_LOOP = 0x6c DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0x109 + DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb DLT_MTP2 = 0x8c DLT_MTP2_WITH_PHDR = 0x8b DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NETLINK = 0xfd + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 DLT_NULL = 0x0 DLT_PCI_EXP = 0x7d DLT_PFLOG = 0x75 DLT_PFSYNC = 0x12 + DLT_PKTAP = 0x102 DLT_PPI = 0xc0 DLT_PPP = 0x9 DLT_PPP_BSDOS = 0x10 @@ -269,22 +321,51 @@ const ( DLT_PPP_SERIAL = 0x32 DLT_PPP_WITH_DIR = 0xcc DLT_PRISM_HEADER = 0x77 + DLT_PROFIBUS_DL = 0x101 DLT_PRONET = 0x4 DLT_RAIF1 = 0xc6 DLT_RAW = 0xc + DLT_RDS = 0x109 DLT_REDBACK_SMARTEDGE = 0x20 DLT_RIO = 0x7c + DLT_RTAC_SERIAL = 0xfa DLT_SCCP = 0x8e + DLT_SCTP = 0xf8 DLT_SITA = 0xc4 DLT_SLIP = 0x8 DLT_SLIP_BSDOS = 0xf + DLT_STANAG_5066_D_PDU = 0xed DLT_SUNATM = 0x7b DLT_SYMANTEC_FIREWALL = 0x63 DLT_TZSP = 0x80 DLT_USB = 0xba + DLT_USBPCAP = 0xf9 + DLT_USB_FREEBSD = 0xba DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_WATTSTOPPER_DLM = 0x107 + DLT_WIHART = 0xdf + DLT_WIRESHARK_UPPER_PDU = 0xfc DLT_X2E_SERIAL = 0xd5 DLT_X2E_XORAYA = 0xd6 + DLT_ZWAVE_R1_R2 = 0x105 + DLT_ZWAVE_R3 = 0x106 DT_BLK = 0x6 DT_CHR = 0x2 DT_DBF = 0xf @@ -323,10 +404,11 @@ const ( EV_EOF = 0x8000 EV_ERROR = 0x4000 EV_FLAG1 = 0x2000 + EV_HUP = 0x800 EV_NODATA = 0x1000 EV_ONESHOT = 0x10 EV_RECEIPT = 0x40 - EV_SYSFLAGS = 0xf000 + EV_SYSFLAGS = 0xf800 EXTA = 0x4b00 EXTB = 0x9600 EXTEXIT_LWP = 0x10000 @@ -365,8 +447,9 @@ const ( IFF_ALLMULTI = 0x200 IFF_ALTPHYS = 0x4000 IFF_BROADCAST = 0x2 - IFF_CANTCHANGE = 0x118e72 + IFF_CANTCHANGE = 0x318e72 IFF_DEBUG = 0x4 + IFF_IDIRECT = 0x200000 IFF_LINK0 = 0x1000 IFF_LINK1 = 0x2000 IFF_LINK2 = 0x4000 @@ -441,7 +524,6 @@ const ( IFT_EPLRS = 0x57 IFT_ESCON = 0x49 IFT_ETHER = 0x6 - IFT_FAITH = 0xf2 IFT_FAST = 0x7d IFT_FASTETHER = 0x3e IFT_FASTETHERFX = 0x45 @@ -614,6 +696,7 @@ const ( IN_CLASSD_NET = 0xf0000000 IN_CLASSD_NSHIFT = 0x1c IN_LOOPBACKNET = 0x7f + IN_RFC3021_MASK = 0xfffffffe IPPROTO_3PC = 0x22 IPPROTO_ADFS = 0x44 IPPROTO_AH = 0x33 @@ -735,7 +818,6 @@ const ( IPV6_DEFHLIM = 0x40 IPV6_DONTFRAG = 0x3e IPV6_DSTOPTS = 0x32 - IPV6_FAITH = 0x1d IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 IPV6_FRAGTTL = 0x78 @@ -747,7 +829,6 @@ const ( IPV6_HLIMDEC = 0x1 IPV6_HOPLIMIT = 0x2f IPV6_HOPOPTS = 0x31 - IPV6_IPSEC_POLICY = 0x1c IPV6_JOIN_GROUP = 0xc IPV6_LEAVE_GROUP = 0xd IPV6_MAXHLIM = 0xff @@ -795,16 +876,22 @@ const ( IP_DUMMYNET_DEL = 0x3d IP_DUMMYNET_FLUSH = 0x3e IP_DUMMYNET_GET = 0x40 - IP_FAITH = 0x16 IP_FW_ADD = 0x32 IP_FW_DEL = 0x33 IP_FW_FLUSH = 0x34 IP_FW_GET = 0x36 IP_FW_RESETLOG = 0x37 + IP_FW_TBL_ADD = 0x2a + IP_FW_TBL_CREATE = 0x28 + IP_FW_TBL_DEL = 0x2b + IP_FW_TBL_DESTROY = 0x29 + IP_FW_TBL_EXPIRE = 0x2f + IP_FW_TBL_FLUSH = 0x2c + IP_FW_TBL_GET = 0x2d + IP_FW_TBL_ZERO = 0x2e IP_FW_X = 0x31 IP_FW_ZERO = 0x35 IP_HDRINCL = 0x2 - IP_IPSEC_POLICY = 0x15 IP_MAXPACKET = 0xffff IP_MAX_MEMBERSHIPS = 0x14 IP_MF = 0x2000 @@ -1080,12 +1167,10 @@ const ( RTM_MISS = 0x7 RTM_NEWADDR = 0xc RTM_NEWMADDR = 0xf - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa RTM_REDIRECT = 0x6 RTM_RESOLVE = 0xb RTM_RTTUNIT = 0xf4240 - RTM_VERSION = 0x6 + RTM_VERSION = 0x7 RTV_EXPIRE = 0x4 RTV_HOPCOUNT = 0x2 RTV_IWCAPSEGS = 0x400 @@ -1106,13 +1191,13 @@ const ( SHUT_RDWR = 0x2 SHUT_WR = 0x1 SIOCADDMULTI = 0x80206931 - SIOCADDRT = 0x8040720a SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80286987 SIOCALIFADDR = 0x8118691b SIOCATMARK = 0x40047307 SIOCDELMULTI = 0x80206932 - SIOCDELRT = 0x8040720b SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80286989 SIOCDIFPHYADDR = 0x80206949 SIOCDLIFADDR = 0x8118691d SIOCGDRVSPEC = 0xc028697b @@ -1120,6 +1205,7 @@ const ( SIOCGETVIFCNT = 0xc028720f SIOCGHIWAT = 0x40047301 SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc0406929 SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCAP = 0xc020691f SIOCGIFCONF = 0xc0106924 @@ -1128,6 +1214,7 @@ const ( SIOCGIFFLAGS = 0xc0206911 SIOCGIFGENERIC = 0xc020693a SIOCGIFGMEMB = 0xc028698a + SIOCGIFGROUP = 0xc0286988 SIOCGIFINDEX = 0xc0206920 SIOCGIFMEDIA = 0xc0306938 SIOCGIFMETRIC = 0xc0206917 @@ -1194,6 +1281,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x2000 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_SNDBUF = 0x1001 @@ -1233,6 +1321,9 @@ const ( S_IXGRP = 0x8 S_IXOTH = 0x1 S_IXUSR = 0x40 + TAB0 = 0x0 + TAB3 = 0x4 + TABDLY = 0x4 TCIFLUSH = 0x1 TCIOFF = 0x3 TCIOFLUSH = 0x3 @@ -1259,6 +1350,8 @@ const ( TCP_NOPUSH = 0x4 TCP_SIGNATURE_ENABLE = 0x10 TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 TIOCCONS = 0x80047462 @@ -1272,7 +1365,6 @@ const ( TIOCGETD = 0x4004741a TIOCGPGRP = 0x40047477 TIOCGSID = 0x40047463 - TIOCGSIZE = 0x40087468 TIOCGWINSZ = 0x40087468 TIOCISPTMASTER = 0x20007455 TIOCMBIC = 0x8004746b @@ -1317,7 +1409,6 @@ const ( TIOCSETD = 0x8004741b TIOCSIG = 0x2000745f TIOCSPGRP = 0x80047476 - TIOCSSIZE = 0x80087467 TIOCSTART = 0x2000746e TIOCSTAT = 0x20007465 TIOCSTI = 0x80017472 @@ -1326,6 +1417,8 @@ const ( TIOCTIMESTAMP = 0x40107459 TIOCUCNTL = 0x80047466 TOSTOP = 0x400000 + UTIME_NOW = -0x1 + UTIME_OMIT = -0x2 VCHECKPT = 0x13 VDISCARD = 0xf VDSUSP = 0xb @@ -1350,9 +1443,12 @@ const ( VWERASE = 0x4 WCONTINUED = 0x4 WCOREFLAG = 0x80 + WEXITED = 0x10 WLINUXCLONE = 0x80000000 WNOHANG = 0x1 - WSTOPPED = 0x7f + WNOWAIT = 0x8 + WSTOPPED = 0x2 + WTRAPPED = 0x20 WUNTRACED = 0x2 ) @@ -1452,11 +1548,6 @@ const ( ETIMEDOUT = syscall.Errno(0x3c) ETOOMANYREFS = syscall.Errno(0x3b) ETXTBSY = syscall.Errno(0x1a) - EUNUSED94 = syscall.Errno(0x5e) - EUNUSED95 = syscall.Errno(0x5f) - EUNUSED96 = syscall.Errno(0x60) - EUNUSED97 = syscall.Errno(0x61) - EUNUSED98 = syscall.Errno(0x62) EUSERS = syscall.Errno(0x44) EWOULDBLOCK = syscall.Errno(0x23) EXDEV = syscall.Errno(0x12) @@ -1600,12 +1691,7 @@ var errorList = [...]struct { {91, "ENOLINK", "link has been severed"}, {92, "EPROTO", "protocol error"}, {93, "ENOMEDIUM", "no medium found"}, - {94, "EUNUSED94", "unknown error: 94"}, - {95, "EUNUSED95", "unknown error: 95"}, - {96, "EUNUSED96", "unknown error: 96"}, - {97, "EUNUSED97", "unknown error: 97"}, - {98, "EUNUSED98", "unknown error: 98"}, - {99, "ELAST", "unknown error: 99"}, + {99, "EASYNC", "unknown error: 99"}, } // Signal table diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index 84824587346..3689c808481 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -339,6 +339,12 @@ const ( CLOCK_UPTIME_FAST = 0x8 CLOCK_UPTIME_PRECISE = 0x7 CLOCK_VIRTUAL = 0x1 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x30000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index 4acd101c3ee..b8f7c3c930a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -339,6 +339,12 @@ const ( CLOCK_UPTIME_FAST = 0x8 CLOCK_UPTIME_PRECISE = 0x7 CLOCK_VIRTUAL = 0x1 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x30000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index e4719873b9e..be14bb1a4cd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -339,6 +339,12 @@ const ( CLOCK_UPTIME_FAST = 0x8 CLOCK_UPTIME_PRECISE = 0x7 CLOCK_VIRTUAL = 0x1 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x30000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index 5e49769d96a..7ce9c0081a8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -339,6 +339,12 @@ const ( CLOCK_UPTIME_FAST = 0x8 CLOCK_UPTIME_PRECISE = 0x7 CLOCK_VIRTUAL = 0x1 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x30000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index f8bd50c11ba..2069fb861d1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -244,8 +244,66 @@ const ( CAN_EFF_FLAG = 0x80000000 CAN_EFF_ID_BITS = 0x1d CAN_EFF_MASK = 0x1fffffff + CAN_ERR_ACK = 0x20 + CAN_ERR_BUSERROR = 0x80 + CAN_ERR_BUSOFF = 0x40 + CAN_ERR_CRTL = 0x4 + CAN_ERR_CRTL_ACTIVE = 0x40 + CAN_ERR_CRTL_RX_OVERFLOW = 0x1 + CAN_ERR_CRTL_RX_PASSIVE = 0x10 + CAN_ERR_CRTL_RX_WARNING = 0x4 + CAN_ERR_CRTL_TX_OVERFLOW = 0x2 + CAN_ERR_CRTL_TX_PASSIVE = 0x20 + CAN_ERR_CRTL_TX_WARNING = 0x8 + CAN_ERR_CRTL_UNSPEC = 0x0 + CAN_ERR_DLC = 0x8 CAN_ERR_FLAG = 0x20000000 + CAN_ERR_LOSTARB = 0x2 + CAN_ERR_LOSTARB_UNSPEC = 0x0 CAN_ERR_MASK = 0x1fffffff + CAN_ERR_PROT = 0x8 + CAN_ERR_PROT_ACTIVE = 0x40 + CAN_ERR_PROT_BIT = 0x1 + CAN_ERR_PROT_BIT0 = 0x8 + CAN_ERR_PROT_BIT1 = 0x10 + CAN_ERR_PROT_FORM = 0x2 + CAN_ERR_PROT_LOC_ACK = 0x19 + CAN_ERR_PROT_LOC_ACK_DEL = 0x1b + CAN_ERR_PROT_LOC_CRC_DEL = 0x18 + CAN_ERR_PROT_LOC_CRC_SEQ = 0x8 + CAN_ERR_PROT_LOC_DATA = 0xa + CAN_ERR_PROT_LOC_DLC = 0xb + CAN_ERR_PROT_LOC_EOF = 0x1a + CAN_ERR_PROT_LOC_ID04_00 = 0xe + CAN_ERR_PROT_LOC_ID12_05 = 0xf + CAN_ERR_PROT_LOC_ID17_13 = 0x7 + CAN_ERR_PROT_LOC_ID20_18 = 0x6 + CAN_ERR_PROT_LOC_ID28_21 = 0x2 + CAN_ERR_PROT_LOC_IDE = 0x5 + CAN_ERR_PROT_LOC_INTERM = 0x12 + CAN_ERR_PROT_LOC_RES0 = 0x9 + CAN_ERR_PROT_LOC_RES1 = 0xd + CAN_ERR_PROT_LOC_RTR = 0xc + CAN_ERR_PROT_LOC_SOF = 0x3 + CAN_ERR_PROT_LOC_SRTR = 0x4 + CAN_ERR_PROT_LOC_UNSPEC = 0x0 + CAN_ERR_PROT_OVERLOAD = 0x20 + CAN_ERR_PROT_STUFF = 0x4 + CAN_ERR_PROT_TX = 0x80 + CAN_ERR_PROT_UNSPEC = 0x0 + CAN_ERR_RESTARTED = 0x100 + CAN_ERR_TRX = 0x10 + CAN_ERR_TRX_CANH_NO_WIRE = 0x4 + CAN_ERR_TRX_CANH_SHORT_TO_BAT = 0x5 + CAN_ERR_TRX_CANH_SHORT_TO_GND = 0x7 + CAN_ERR_TRX_CANH_SHORT_TO_VCC = 0x6 + CAN_ERR_TRX_CANL_NO_WIRE = 0x40 + CAN_ERR_TRX_CANL_SHORT_TO_BAT = 0x50 + CAN_ERR_TRX_CANL_SHORT_TO_CANH = 0x80 + CAN_ERR_TRX_CANL_SHORT_TO_GND = 0x70 + CAN_ERR_TRX_CANL_SHORT_TO_VCC = 0x60 + CAN_ERR_TRX_UNSPEC = 0x0 + CAN_ERR_TX_TIMEOUT = 0x1 CAN_INV_FILTER = 0x20000000 CAN_ISOTP = 0x6 CAN_J1939 = 0x7 @@ -265,6 +323,8 @@ const ( CAP_AUDIT_READ = 0x25 CAP_AUDIT_WRITE = 0x1d CAP_BLOCK_SUSPEND = 0x24 + CAP_BPF = 0x27 + CAP_CHECKPOINT_RESTORE = 0x28 CAP_CHOWN = 0x0 CAP_DAC_OVERRIDE = 0x1 CAP_DAC_READ_SEARCH = 0x2 @@ -273,7 +333,7 @@ const ( CAP_IPC_LOCK = 0xe CAP_IPC_OWNER = 0xf CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 + CAP_LAST_CAP = 0x28 CAP_LEASE = 0x1c CAP_LINUX_IMMUTABLE = 0x9 CAP_MAC_ADMIN = 0x21 @@ -283,6 +343,7 @@ const ( CAP_NET_BIND_SERVICE = 0xa CAP_NET_BROADCAST = 0xb CAP_NET_RAW = 0xd + CAP_PERFMON = 0x26 CAP_SETFCAP = 0x1f CAP_SETGID = 0x6 CAP_SETPCAP = 0x8 @@ -372,8 +433,54 @@ const ( DEVLINK_GENL_NAME = "devlink" DEVLINK_GENL_VERSION = 0x1 DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX = 0x14 + DEVMEM_MAGIC = 0x454d444d DEVPTS_SUPER_MAGIC = 0x1cd1 DMA_BUF_MAGIC = 0x444d4142 + DM_ACTIVE_PRESENT_FLAG = 0x20 + DM_BUFFER_FULL_FLAG = 0x100 + DM_CONTROL_NODE = "control" + DM_DATA_OUT_FLAG = 0x10000 + DM_DEFERRED_REMOVE = 0x20000 + DM_DEV_ARM_POLL = 0xc138fd10 + DM_DEV_CREATE = 0xc138fd03 + DM_DEV_REMOVE = 0xc138fd04 + DM_DEV_RENAME = 0xc138fd05 + DM_DEV_SET_GEOMETRY = 0xc138fd0f + DM_DEV_STATUS = 0xc138fd07 + DM_DEV_SUSPEND = 0xc138fd06 + DM_DEV_WAIT = 0xc138fd08 + DM_DIR = "mapper" + DM_GET_TARGET_VERSION = 0xc138fd11 + DM_INACTIVE_PRESENT_FLAG = 0x40 + DM_INTERNAL_SUSPEND_FLAG = 0x40000 + DM_IOCTL = 0xfd + DM_LIST_DEVICES = 0xc138fd02 + DM_LIST_VERSIONS = 0xc138fd0d + DM_MAX_TYPE_NAME = 0x10 + DM_NAME_LEN = 0x80 + DM_NOFLUSH_FLAG = 0x800 + DM_PERSISTENT_DEV_FLAG = 0x8 + DM_QUERY_INACTIVE_TABLE_FLAG = 0x1000 + DM_READONLY_FLAG = 0x1 + DM_REMOVE_ALL = 0xc138fd01 + DM_SECURE_DATA_FLAG = 0x8000 + DM_SKIP_BDGET_FLAG = 0x200 + DM_SKIP_LOCKFS_FLAG = 0x400 + DM_STATUS_TABLE_FLAG = 0x10 + DM_SUSPEND_FLAG = 0x2 + DM_TABLE_CLEAR = 0xc138fd0a + DM_TABLE_DEPS = 0xc138fd0b + DM_TABLE_LOAD = 0xc138fd09 + DM_TABLE_STATUS = 0xc138fd0c + DM_TARGET_MSG = 0xc138fd0e + DM_UEVENT_GENERATED_FLAG = 0x2000 + DM_UUID_FLAG = 0x4000 + DM_UUID_LEN = 0x81 + DM_VERSION = 0xc138fd00 + DM_VERSION_EXTRA = "-ioctl (2020-02-27)" + DM_VERSION_MAJOR = 0x4 + DM_VERSION_MINOR = 0x2a + DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -475,6 +582,7 @@ const ( ETH_P_MOBITEX = 0x15 ETH_P_MPLS_MC = 0x8848 ETH_P_MPLS_UC = 0x8847 + ETH_P_MRP = 0x88e3 ETH_P_MVRP = 0x88f5 ETH_P_NCSI = 0x88f8 ETH_P_NSH = 0x894f @@ -543,8 +651,8 @@ const ( FAN_DELETE = 0x200 FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 - FAN_DIR_MODIFY = 0x80000 FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_DFID = 0x3 FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 @@ -572,13 +680,17 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_DFID_NAME = 0xc00 + FAN_REPORT_DIR_FID = 0x400 FAN_REPORT_FID = 0x200 + FAN_REPORT_NAME = 0x800 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 FF0 = 0x0 + FIDEDUPERANGE = 0xc0189436 FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8 FSCRYPT_KEY_DESC_PREFIX = "fscrypt:" FSCRYPT_KEY_DESC_PREFIX_SIZE = 0x8 @@ -602,8 +714,9 @@ const ( FSCRYPT_POLICY_FLAGS_PAD_4 = 0x0 FSCRYPT_POLICY_FLAGS_PAD_8 = 0x1 FSCRYPT_POLICY_FLAGS_PAD_MASK = 0x3 - FSCRYPT_POLICY_FLAGS_VALID = 0xf + FSCRYPT_POLICY_FLAGS_VALID = 0x1f FSCRYPT_POLICY_FLAG_DIRECT_KEY = 0x4 + FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32 = 0x10 FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 = 0x8 FSCRYPT_POLICY_V1 = 0x0 FSCRYPT_POLICY_V2 = 0x2 @@ -632,7 +745,7 @@ const ( FS_POLICY_FLAGS_PAD_4 = 0x0 FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0xf + FS_POLICY_FLAGS_VALID = 0x1f FS_VERITY_FL = 0x100000 FS_VERITY_HASH_ALG_SHA256 = 0x1 FS_VERITY_HASH_ALG_SHA512 = 0x2 @@ -834,6 +947,7 @@ const ( IPPROTO_EGP = 0x8 IPPROTO_ENCAP = 0x62 IPPROTO_ESP = 0x32 + IPPROTO_ETHERNET = 0x8f IPPROTO_FRAGMENT = 0x2c IPPROTO_GRE = 0x2f IPPROTO_HOPOPTS = 0x0 @@ -847,6 +961,7 @@ const ( IPPROTO_L2TP = 0x73 IPPROTO_MH = 0x87 IPPROTO_MPLS = 0x89 + IPPROTO_MPTCP = 0x106 IPPROTO_MTP = 0x5c IPPROTO_NONE = 0x3b IPPROTO_PIM = 0x67 @@ -1016,6 +1131,7 @@ const ( KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 KEYCTL_CAPS0_PUBLIC_KEY = 0x8 KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NOTIFICATIONS = 0x4 KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 @@ -1053,6 +1169,7 @@ const ( KEYCTL_SUPPORTS_VERIFY = 0x8 KEYCTL_UNLINK = 0x9 KEYCTL_UPDATE = 0x2 + KEYCTL_WATCH_KEY = 0x20 KEY_REQKEY_DEFL_DEFAULT = 0x0 KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 KEY_REQKEY_DEFL_NO_CHANGE = -0x1 @@ -1096,6 +1213,8 @@ const ( LOOP_SET_FD = 0x4c00 LOOP_SET_STATUS = 0x4c02 LOOP_SET_STATUS64 = 0x4c04 + LOOP_SET_STATUS_CLEARABLE_FLAGS = 0x4 + LOOP_SET_STATUS_SETTABLE_FLAGS = 0xc LO_KEY_SIZE = 0x20 LO_NAME_SIZE = 0x40 MADV_COLD = 0x14 @@ -1393,6 +1512,92 @@ const ( PARITY_DEFAULT = 0x0 PARITY_NONE = 0x1 PARMRK = 0x8 + PERF_ATTR_SIZE_VER0 = 0x40 + PERF_ATTR_SIZE_VER1 = 0x48 + PERF_ATTR_SIZE_VER2 = 0x50 + PERF_ATTR_SIZE_VER3 = 0x60 + PERF_ATTR_SIZE_VER4 = 0x68 + PERF_ATTR_SIZE_VER5 = 0x70 + PERF_ATTR_SIZE_VER6 = 0x78 + PERF_AUX_FLAG_COLLISION = 0x8 + PERF_AUX_FLAG_OVERWRITE = 0x2 + PERF_AUX_FLAG_PARTIAL = 0x4 + PERF_AUX_FLAG_TRUNCATED = 0x1 + PERF_FLAG_FD_CLOEXEC = 0x8 + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 + PERF_MAX_CONTEXTS_PER_STACK = 0x8 + PERF_MAX_STACK_DEPTH = 0x7f + PERF_MEM_LOCK_LOCKED = 0x2 + PERF_MEM_LOCK_NA = 0x1 + PERF_MEM_LOCK_SHIFT = 0x18 + PERF_MEM_LVLNUM_ANY_CACHE = 0xb + PERF_MEM_LVLNUM_L1 = 0x1 + PERF_MEM_LVLNUM_L2 = 0x2 + PERF_MEM_LVLNUM_L3 = 0x3 + PERF_MEM_LVLNUM_L4 = 0x4 + PERF_MEM_LVLNUM_LFB = 0xc + PERF_MEM_LVLNUM_NA = 0xf + PERF_MEM_LVLNUM_PMEM = 0xe + PERF_MEM_LVLNUM_RAM = 0xd + PERF_MEM_LVLNUM_SHIFT = 0x21 + PERF_MEM_LVL_HIT = 0x2 + PERF_MEM_LVL_IO = 0x1000 + PERF_MEM_LVL_L1 = 0x8 + PERF_MEM_LVL_L2 = 0x20 + PERF_MEM_LVL_L3 = 0x40 + PERF_MEM_LVL_LFB = 0x10 + PERF_MEM_LVL_LOC_RAM = 0x80 + PERF_MEM_LVL_MISS = 0x4 + PERF_MEM_LVL_NA = 0x1 + PERF_MEM_LVL_REM_CCE1 = 0x400 + PERF_MEM_LVL_REM_CCE2 = 0x800 + PERF_MEM_LVL_REM_RAM1 = 0x100 + PERF_MEM_LVL_REM_RAM2 = 0x200 + PERF_MEM_LVL_SHIFT = 0x5 + PERF_MEM_LVL_UNC = 0x2000 + PERF_MEM_OP_EXEC = 0x10 + PERF_MEM_OP_LOAD = 0x2 + PERF_MEM_OP_NA = 0x1 + PERF_MEM_OP_PFETCH = 0x8 + PERF_MEM_OP_SHIFT = 0x0 + PERF_MEM_OP_STORE = 0x4 + PERF_MEM_REMOTE_REMOTE = 0x1 + PERF_MEM_REMOTE_SHIFT = 0x25 + PERF_MEM_SNOOPX_FWD = 0x1 + PERF_MEM_SNOOPX_SHIFT = 0x25 + PERF_MEM_SNOOP_HIT = 0x4 + PERF_MEM_SNOOP_HITM = 0x10 + PERF_MEM_SNOOP_MISS = 0x8 + PERF_MEM_SNOOP_NA = 0x1 + PERF_MEM_SNOOP_NONE = 0x2 + PERF_MEM_SNOOP_SHIFT = 0x13 + PERF_MEM_TLB_HIT = 0x2 + PERF_MEM_TLB_L1 = 0x8 + PERF_MEM_TLB_L2 = 0x10 + PERF_MEM_TLB_MISS = 0x4 + PERF_MEM_TLB_NA = 0x1 + PERF_MEM_TLB_OS = 0x40 + PERF_MEM_TLB_SHIFT = 0x1a + PERF_MEM_TLB_WK = 0x20 + PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER = 0x1 + PERF_RECORD_MISC_COMM_EXEC = 0x2000 + PERF_RECORD_MISC_CPUMODE_MASK = 0x7 + PERF_RECORD_MISC_CPUMODE_UNKNOWN = 0x0 + PERF_RECORD_MISC_EXACT_IP = 0x4000 + PERF_RECORD_MISC_EXT_RESERVED = 0x8000 + PERF_RECORD_MISC_FORK_EXEC = 0x2000 + PERF_RECORD_MISC_GUEST_KERNEL = 0x4 + PERF_RECORD_MISC_GUEST_USER = 0x5 + PERF_RECORD_MISC_HYPERVISOR = 0x3 + PERF_RECORD_MISC_KERNEL = 0x1 + PERF_RECORD_MISC_MMAP_DATA = 0x2000 + PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT = 0x1000 + PERF_RECORD_MISC_SWITCH_OUT = 0x2000 + PERF_RECORD_MISC_SWITCH_OUT_PREEMPT = 0x4000 + PERF_RECORD_MISC_USER = 0x2 + PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 PIPEFS_MAGIC = 0x50495045 PPC_CMM_MAGIC = 0xc7571590 PPPIOCGNPMODE = 0xc008744c @@ -1779,6 +1984,7 @@ const ( RTPROT_EIGRP = 0xc0 RTPROT_GATED = 0x8 RTPROT_ISIS = 0xbb + RTPROT_KEEPALIVED = 0x12 RTPROT_KERNEL = 0x2 RTPROT_MROUTED = 0x11 RTPROT_MRT = 0xa @@ -1929,6 +2135,7 @@ const ( SOL_ATM = 0x108 SOL_CAIF = 0x116 SOL_CAN_BASE = 0x64 + SOL_CAN_RAW = 0x65 SOL_DCCP = 0x10d SOL_DECNET = 0x105 SOL_ICMPV6 = 0x3a @@ -1968,6 +2175,7 @@ const ( SO_EE_ORIGIN_TXSTATUS = 0x4 SO_EE_ORIGIN_TXTIME = 0x6 SO_EE_ORIGIN_ZEROCOPY = 0x5 + SO_EE_RFC4884_FLAG_INVALID = 0x1 SO_GET_FILTER = 0x1a SO_NO_CHECK = 0xb SO_PEERNAME = 0x1c @@ -1992,8 +2200,10 @@ const ( STATX_ATTR_APPEND = 0x20 STATX_ATTR_AUTOMOUNT = 0x1000 STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_DAX = 0x2000 STATX_ATTR_ENCRYPTED = 0x800 STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_MOUNT_ROOT = 0x2000 STATX_ATTR_NODUMP = 0x40 STATX_ATTR_VERITY = 0x100000 STATX_BASIC_STATS = 0x7ff @@ -2002,6 +2212,7 @@ const ( STATX_CTIME = 0x80 STATX_GID = 0x10 STATX_INO = 0x100 + STATX_MNT_ID = 0x1000 STATX_MODE = 0x2 STATX_MTIME = 0x40 STATX_NLINK = 0x4 @@ -2238,6 +2449,23 @@ const ( WCONTINUED = 0x8 WDIOC_SETPRETIMEOUT = 0xc0045708 WDIOC_SETTIMEOUT = 0xc0045706 + WDIOF_ALARMONLY = 0x400 + WDIOF_CARDRESET = 0x20 + WDIOF_EXTERN1 = 0x4 + WDIOF_EXTERN2 = 0x8 + WDIOF_FANFAULT = 0x2 + WDIOF_KEEPALIVEPING = 0x8000 + WDIOF_MAGICCLOSE = 0x100 + WDIOF_OVERHEAT = 0x1 + WDIOF_POWEROVER = 0x40 + WDIOF_POWERUNDER = 0x10 + WDIOF_PRETIMEOUT = 0x200 + WDIOF_SETTIMEOUT = 0x80 + WDIOF_UNKNOWN = -0x1 + WDIOS_DISABLECARD = 0x1 + WDIOS_ENABLECARD = 0x2 + WDIOS_TEMPPANIC = 0x4 + WDIOS_UNKNOWN = -0x1 WEXITED = 0x4 WIN_ACKMEDIACHANGE = 0xdb WIN_CHECKPOWERMODE1 = 0xe5 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 8d207b041ef..dd282c08b7f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -71,6 +71,8 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x40049409 + FICLONERANGE = 0x4020940d FLUSHO = 0x1000 FP_XSTATE_MAGIC2 = 0x46505845 FS_IOC_ENABLE_VERITY = 0x40806685 @@ -78,6 +80,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40046602 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 F_GETLK = 0xc F_GETLK64 = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index c4bf9cb80f0..82fc93c7bbc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -71,6 +71,8 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x40049409 + FICLONERANGE = 0x4020940d FLUSHO = 0x1000 FP_XSTATE_MAGIC2 = 0x46505845 FS_IOC_ENABLE_VERITY = 0x40806685 @@ -78,6 +80,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 F_GETLK = 0x5 F_GETLK64 = 0x5 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 0cab0522e64..fe7094f2763 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -71,12 +71,15 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x40049409 + FICLONERANGE = 0x4020940d FLUSHO = 0x1000 FS_IOC_ENABLE_VERITY = 0x40806685 FS_IOC_GETFLAGS = 0x80046601 FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40046602 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 F_GETLK = 0xc F_GETLK64 = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 370d0a7f591..3b6cc58803b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -73,6 +73,8 @@ const ( EXTRA_MAGIC = 0x45585401 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x40049409 + FICLONERANGE = 0x4020940d FLUSHO = 0x1000 FPSIMD_MAGIC = 0x46508001 FS_IOC_ENABLE_VERITY = 0x40806685 @@ -80,6 +82,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 F_GETLK = 0x5 F_GETLK64 = 0x5 @@ -191,6 +194,7 @@ const ( PPPIOCSRASYNCMAP = 0x40047454 PPPIOCSXASYNCMAP = 0x4020744f PPPIOCXFERUNIT = 0x744e + PROT_BTI = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff PTRACE_SYSEMU = 0x1f PTRACE_SYSEMU_SINGLESTEP = 0x20 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index fbf2f3174e7..ce3d9ae1561 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -71,12 +71,15 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x80049409 + FICLONERANGE = 0x8020940d FLUSHO = 0x2000 FS_IOC_ENABLE_VERITY = 0x80806685 FS_IOC_GETFLAGS = 0x40046601 FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80046602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0x21 F_GETLK64 = 0x21 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 25e74b30a90..7a85215ce52 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -71,12 +71,15 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x80049409 + FICLONERANGE = 0x8020940d FLUSHO = 0x2000 FS_IOC_ENABLE_VERITY = 0x80806685 FS_IOC_GETFLAGS = 0x40086601 FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0xe F_GETLK64 = 0xe diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 4ecc0bca344..07d4cc1bd5f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -71,12 +71,15 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x80049409 + FICLONERANGE = 0x8020940d FLUSHO = 0x2000 FS_IOC_ENABLE_VERITY = 0x80806685 FS_IOC_GETFLAGS = 0x40086601 FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0xe F_GETLK64 = 0xe diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index dfb8f88a7ed..d4842ba1c2a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -71,12 +71,15 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x80049409 + FICLONERANGE = 0x8020940d FLUSHO = 0x2000 FS_IOC_ENABLE_VERITY = 0x80806685 FS_IOC_GETFLAGS = 0x40046601 FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80046602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0x21 F_GETLK64 = 0x21 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 72d8dad5b85..941e20dacec 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -71,12 +71,15 @@ const ( EXTPROC = 0x10000000 FF1 = 0x4000 FFDLY = 0x4000 + FICLONE = 0x80049409 + FICLONERANGE = 0x8020940d FLUSHO = 0x800000 FS_IOC_ENABLE_VERITY = 0x80806685 FS_IOC_GETFLAGS = 0x40086601 FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0x5 F_GETLK64 = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index ca0e7b52628..63d3bc56627 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -71,12 +71,15 @@ const ( EXTPROC = 0x10000000 FF1 = 0x4000 FFDLY = 0x4000 + FICLONE = 0x80049409 + FICLONERANGE = 0x8020940d FLUSHO = 0x800000 FS_IOC_ENABLE_VERITY = 0x80806685 FS_IOC_GETFLAGS = 0x40086601 FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0x5 F_GETLK64 = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 147511a9744..490bee1ab1b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -71,12 +71,15 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x40049409 + FICLONERANGE = 0x4020940d FLUSHO = 0x1000 FS_IOC_ENABLE_VERITY = 0x40806685 FS_IOC_GETFLAGS = 0x80086601 FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 F_GETLK = 0x5 F_GETLK64 = 0x5 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 517349dafa4..467b8218e80 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -71,12 +71,15 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x40049409 + FICLONERANGE = 0x4020940d FLUSHO = 0x1000 FS_IOC_ENABLE_VERITY = 0x40806685 FS_IOC_GETFLAGS = 0x80086601 FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 F_GETLK = 0x5 F_GETLK64 = 0x5 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 094822465b1..79fbafbcf6c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -75,12 +75,15 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x80049409 + FICLONERANGE = 0x8020940d FLUSHO = 0x1000 FS_IOC_ENABLE_VERITY = 0x80806685 FS_IOC_GETFLAGS = 0x40086601 FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0x7 F_GETLK64 = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go index 96b9b8ab300..20f3a5799fd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go @@ -158,6 +158,12 @@ const ( CLONE_SIGHAND = 0x800 CLONE_VFORK = 0x4000 CLONE_VM = 0x100 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x10000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go index ed522a84e84..90b8fcd29c5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go @@ -158,6 +158,12 @@ const ( CLONE_SIGHAND = 0x800 CLONE_VFORK = 0x4000 CLONE_VM = 0x100 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x10000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go index c8d36fe998e..c5c03993b67 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go @@ -150,6 +150,12 @@ const ( BRKINT = 0x2 CFLUSH = 0xf CLOCAL = 0x8000 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x10000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go index f1c146a74c9..14dd3c1d1ee 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go @@ -158,6 +158,12 @@ const ( CLONE_SIGHAND = 0x800 CLONE_VFORK = 0x4000 CLONE_VM = 0x100 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x10000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index 5402bd55ce1..c865a10df44 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -146,6 +146,13 @@ const ( BRKINT = 0x2 CFLUSH = 0xf CLOCAL = 0x8000 + CPUSTATES = 0x6 + CP_IDLE = 0x5 + CP_INTR = 0x4 + CP_NICE = 0x1 + CP_SPIN = 0x3 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x10000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index ffaf2d2f9f2..9db6b2fb6e2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -153,6 +153,13 @@ const ( CLOCK_REALTIME = 0x0 CLOCK_THREAD_CPUTIME_ID = 0x4 CLOCK_UPTIME = 0x5 + CPUSTATES = 0x6 + CP_IDLE = 0x5 + CP_INTR = 0x4 + CP_NICE = 0x1 + CP_SPIN = 0x3 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x10000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index 7aa796a642b..7072526a640 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -146,6 +146,13 @@ const ( BRKINT = 0x2 CFLUSH = 0xf CLOCAL = 0x8000 + CPUSTATES = 0x6 + CP_IDLE = 0x5 + CP_INTR = 0x4 + CP_NICE = 0x1 + CP_SPIN = 0x3 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x10000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go index 1792d3f13e6..ac5efbe5ac7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go @@ -156,6 +156,13 @@ const ( CLOCK_REALTIME = 0x0 CLOCK_THREAD_CPUTIME_ID = 0x4 CLOCK_UPTIME = 0x5 + CPUSTATES = 0x6 + CP_IDLE = 0x5 + CP_INTR = 0x4 + CP_NICE = 0x1 + CP_SPIN = 0x3 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x10000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go new file mode 100644 index 00000000000..a74639a46fb --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go @@ -0,0 +1,1862 @@ +// mkerrors.sh -m64 +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build mips64,openbsd + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_BLUETOOTH = 0x20 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_ENCAP = 0x1c + AF_HYLINK = 0xf + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x18 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_KEY = 0x1e + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x24 + AF_MPLS = 0x21 + AF_NATM = 0x1b + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SIP = 0x1d + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + ALTWERASE = 0x200 + ARPHRD_ETHER = 0x1 + ARPHRD_FRELAY = 0xf + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRFILT = 0x4004427c + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc010427b + BIOCGETIF = 0x4020426b + BIOCGFILDROP = 0x40044278 + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044273 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x20004276 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDIRFILT = 0x8004427d + BIOCSDLT = 0x8004427a + BIOCSETF = 0x80104267 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x80104277 + BIOCSFILDROP = 0x80044279 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044272 + BIOCSRTIMEOUT = 0x8010426d + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIRECTION_IN = 0x1 + BPF_DIRECTION_OUT = 0x2 + BPF_DIV = 0x30 + BPF_FILDROP_CAPTURE = 0x1 + BPF_FILDROP_DROP = 0x2 + BPF_FILDROP_PASS = 0x0 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x200000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x6 + CLOCK_MONOTONIC = 0x3 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x4 + CLOCK_UPTIME = 0x5 + CPUSTATES = 0x6 + CP_IDLE = 0x5 + CP_INTR = 0x4 + CP_NICE = 0x1 + CP_SPIN = 0x3 + CP_SYS = 0x2 + CP_USER = 0x0 + CREAD = 0x800 + CRTSCTS = 0x10000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0xff + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DIOCADDQUEUE = 0xc110445d + DIOCADDRULE = 0xcd604404 + DIOCADDSTATE = 0xc1084425 + DIOCCHANGERULE = 0xcd60441a + DIOCCLRIFFLAG = 0xc028445a + DIOCCLRSRCNODES = 0x20004455 + DIOCCLRSTATES = 0xc0e04412 + DIOCCLRSTATUS = 0xc0284416 + DIOCGETLIMIT = 0xc0084427 + DIOCGETQSTATS = 0xc1204460 + DIOCGETQUEUE = 0xc110445f + DIOCGETQUEUES = 0xc110445e + DIOCGETRULE = 0xcd604407 + DIOCGETRULES = 0xcd604406 + DIOCGETRULESET = 0xc444443b + DIOCGETRULESETS = 0xc444443a + DIOCGETSRCNODES = 0xc0104454 + DIOCGETSTATE = 0xc1084413 + DIOCGETSTATES = 0xc0104419 + DIOCGETSTATUS = 0xc1e84415 + DIOCGETSYNFLWATS = 0xc0084463 + DIOCGETTIMEOUT = 0xc008441e + DIOCIGETIFACES = 0xc0284457 + DIOCKILLSRCNODES = 0xc080445b + DIOCKILLSTATES = 0xc0e04429 + DIOCNATLOOK = 0xc0504417 + DIOCOSFPADD = 0xc088444f + DIOCOSFPFLUSH = 0x2000444e + DIOCOSFPGET = 0xc0884450 + DIOCRADDADDRS = 0xc4504443 + DIOCRADDTABLES = 0xc450443d + DIOCRCLRADDRS = 0xc4504442 + DIOCRCLRASTATS = 0xc4504448 + DIOCRCLRTABLES = 0xc450443c + DIOCRCLRTSTATS = 0xc4504441 + DIOCRDELADDRS = 0xc4504444 + DIOCRDELTABLES = 0xc450443e + DIOCRGETADDRS = 0xc4504446 + DIOCRGETASTATS = 0xc4504447 + DIOCRGETTABLES = 0xc450443f + DIOCRGETTSTATS = 0xc4504440 + DIOCRINADEFINE = 0xc450444d + DIOCRSETADDRS = 0xc4504445 + DIOCRSETTFLAGS = 0xc450444a + DIOCRTSTADDRS = 0xc4504449 + DIOCSETDEBUG = 0xc0044418 + DIOCSETHOSTID = 0xc0044456 + DIOCSETIFFLAG = 0xc0284459 + DIOCSETLIMIT = 0xc0084428 + DIOCSETREASS = 0xc004445c + DIOCSETSTATUSIF = 0xc0284414 + DIOCSETSYNCOOKIES = 0xc0014462 + DIOCSETSYNFLWATS = 0xc0084461 + DIOCSETTIMEOUT = 0xc008441d + DIOCSTART = 0x20004401 + DIOCSTOP = 0x20004402 + DIOCXBEGIN = 0xc0104451 + DIOCXCOMMIT = 0xc0104452 + DIOCXROLLBACK = 0xc0104453 + DLT_ARCNET = 0x7 + DLT_ATM_RFC1483 = 0xb + DLT_AX25 = 0x3 + DLT_CHAOS = 0x5 + DLT_C_HDLC = 0x68 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0xd + DLT_FDDI = 0xa + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_LOOP = 0xc + DLT_MPLS = 0xdb + DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_SERIAL = 0x32 + DLT_PRONET = 0x4 + DLT_RAW = 0xe + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_USBPCAP = 0xf9 + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EMT_TAGOVF = 0x1 + EMUL_ENABLED = 0x1 + EMUL_NATIVE = 0x2 + ENDRUNDISC = 0x9 + ETHERMIN = 0x2e + ETHERMTU = 0x5dc + ETHERTYPE_8023 = 0x4 + ETHERTYPE_AARP = 0x80f3 + ETHERTYPE_ACCTON = 0x8390 + ETHERTYPE_AEONIC = 0x8036 + ETHERTYPE_ALPHA = 0x814a + ETHERTYPE_AMBER = 0x6008 + ETHERTYPE_AMOEBA = 0x8145 + ETHERTYPE_AOE = 0x88a2 + ETHERTYPE_APOLLO = 0x80f7 + ETHERTYPE_APOLLODOMAIN = 0x8019 + ETHERTYPE_APPLETALK = 0x809b + ETHERTYPE_APPLITEK = 0x80c7 + ETHERTYPE_ARGONAUT = 0x803a + ETHERTYPE_ARP = 0x806 + ETHERTYPE_AT = 0x809b + ETHERTYPE_ATALK = 0x809b + ETHERTYPE_ATOMIC = 0x86df + ETHERTYPE_ATT = 0x8069 + ETHERTYPE_ATTSTANFORD = 0x8008 + ETHERTYPE_AUTOPHON = 0x806a + ETHERTYPE_AXIS = 0x8856 + ETHERTYPE_BCLOOP = 0x9003 + ETHERTYPE_BOFL = 0x8102 + ETHERTYPE_CABLETRON = 0x7034 + ETHERTYPE_CHAOS = 0x804 + ETHERTYPE_COMDESIGN = 0x806c + ETHERTYPE_COMPUGRAPHIC = 0x806d + ETHERTYPE_COUNTERPOINT = 0x8062 + ETHERTYPE_CRONUS = 0x8004 + ETHERTYPE_CRONUSVLN = 0x8003 + ETHERTYPE_DCA = 0x1234 + ETHERTYPE_DDE = 0x807b + ETHERTYPE_DEBNI = 0xaaaa + ETHERTYPE_DECAM = 0x8048 + ETHERTYPE_DECCUST = 0x6006 + ETHERTYPE_DECDIAG = 0x6005 + ETHERTYPE_DECDNS = 0x803c + ETHERTYPE_DECDTS = 0x803e + ETHERTYPE_DECEXPER = 0x6000 + ETHERTYPE_DECLAST = 0x8041 + ETHERTYPE_DECLTM = 0x803f + ETHERTYPE_DECMUMPS = 0x6009 + ETHERTYPE_DECNETBIOS = 0x8040 + ETHERTYPE_DELTACON = 0x86de + ETHERTYPE_DIDDLE = 0x4321 + ETHERTYPE_DLOG1 = 0x660 + ETHERTYPE_DLOG2 = 0x661 + ETHERTYPE_DN = 0x6003 + ETHERTYPE_DOGFIGHT = 0x1989 + ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_ECMA = 0x803 + ETHERTYPE_ENCRYPT = 0x803d + ETHERTYPE_ES = 0x805d + ETHERTYPE_EXCELAN = 0x8010 + ETHERTYPE_EXPERDATA = 0x8049 + ETHERTYPE_FLIP = 0x8146 + ETHERTYPE_FLOWCONTROL = 0x8808 + ETHERTYPE_FRARP = 0x808 + ETHERTYPE_GENDYN = 0x8068 + ETHERTYPE_HAYES = 0x8130 + ETHERTYPE_HIPPI_FP = 0x8180 + ETHERTYPE_HITACHI = 0x8820 + ETHERTYPE_HP = 0x8005 + ETHERTYPE_IEEEPUP = 0xa00 + ETHERTYPE_IEEEPUPAT = 0xa01 + ETHERTYPE_IMLBL = 0x4c42 + ETHERTYPE_IMLBLDIAG = 0x424c + ETHERTYPE_IP = 0x800 + ETHERTYPE_IPAS = 0x876c + ETHERTYPE_IPV6 = 0x86dd + ETHERTYPE_IPX = 0x8137 + ETHERTYPE_IPXNEW = 0x8037 + ETHERTYPE_KALPANA = 0x8582 + ETHERTYPE_LANBRIDGE = 0x8038 + ETHERTYPE_LANPROBE = 0x8888 + ETHERTYPE_LAT = 0x6004 + ETHERTYPE_LBACK = 0x9000 + ETHERTYPE_LITTLE = 0x8060 + ETHERTYPE_LLDP = 0x88cc + ETHERTYPE_LOGICRAFT = 0x8148 + ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MACSEC = 0x88e5 + ETHERTYPE_MATRA = 0x807a + ETHERTYPE_MAX = 0xffff + ETHERTYPE_MERIT = 0x807c + ETHERTYPE_MICP = 0x873a + ETHERTYPE_MOPDL = 0x6001 + ETHERTYPE_MOPRC = 0x6002 + ETHERTYPE_MOTOROLA = 0x818d + ETHERTYPE_MPLS = 0x8847 + ETHERTYPE_MPLS_MCAST = 0x8848 + ETHERTYPE_MUMPS = 0x813f + ETHERTYPE_NBPCC = 0x3c04 + ETHERTYPE_NBPCLAIM = 0x3c09 + ETHERTYPE_NBPCLREQ = 0x3c05 + ETHERTYPE_NBPCLRSP = 0x3c06 + ETHERTYPE_NBPCREQ = 0x3c02 + ETHERTYPE_NBPCRSP = 0x3c03 + ETHERTYPE_NBPDG = 0x3c07 + ETHERTYPE_NBPDGB = 0x3c08 + ETHERTYPE_NBPDLTE = 0x3c0a + ETHERTYPE_NBPRAR = 0x3c0c + ETHERTYPE_NBPRAS = 0x3c0b + ETHERTYPE_NBPRST = 0x3c0d + ETHERTYPE_NBPSCD = 0x3c01 + ETHERTYPE_NBPVCD = 0x3c00 + ETHERTYPE_NBS = 0x802 + ETHERTYPE_NCD = 0x8149 + ETHERTYPE_NESTAR = 0x8006 + ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NOVELL = 0x8138 + ETHERTYPE_NS = 0x600 + ETHERTYPE_NSAT = 0x601 + ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NTRAILER = 0x10 + ETHERTYPE_OS9 = 0x7007 + ETHERTYPE_OS9NET = 0x7009 + ETHERTYPE_PACER = 0x80c6 + ETHERTYPE_PAE = 0x888e + ETHERTYPE_PBB = 0x88e7 + ETHERTYPE_PCS = 0x4242 + ETHERTYPE_PLANNING = 0x8044 + ETHERTYPE_PPP = 0x880b + ETHERTYPE_PPPOE = 0x8864 + ETHERTYPE_PPPOEDISC = 0x8863 + ETHERTYPE_PRIMENTS = 0x7031 + ETHERTYPE_PUP = 0x200 + ETHERTYPE_PUPAT = 0x200 + ETHERTYPE_QINQ = 0x88a8 + ETHERTYPE_RACAL = 0x7030 + ETHERTYPE_RATIONAL = 0x8150 + ETHERTYPE_RAWFR = 0x6559 + ETHERTYPE_RCL = 0x1995 + ETHERTYPE_RDP = 0x8739 + ETHERTYPE_RETIX = 0x80f2 + ETHERTYPE_REVARP = 0x8035 + ETHERTYPE_SCA = 0x6007 + ETHERTYPE_SECTRA = 0x86db + ETHERTYPE_SECUREDATA = 0x876d + ETHERTYPE_SGITW = 0x817e + ETHERTYPE_SG_BOUNCE = 0x8016 + ETHERTYPE_SG_DIAG = 0x8013 + ETHERTYPE_SG_NETGAMES = 0x8014 + ETHERTYPE_SG_RESV = 0x8015 + ETHERTYPE_SIMNET = 0x5208 + ETHERTYPE_SLOW = 0x8809 + ETHERTYPE_SNA = 0x80d5 + ETHERTYPE_SNMP = 0x814c + ETHERTYPE_SONIX = 0xfaf5 + ETHERTYPE_SPIDER = 0x809f + ETHERTYPE_SPRITE = 0x500 + ETHERTYPE_STP = 0x8181 + ETHERTYPE_TALARIS = 0x812b + ETHERTYPE_TALARISMC = 0x852b + ETHERTYPE_TCPCOMP = 0x876b + ETHERTYPE_TCPSM = 0x9002 + ETHERTYPE_TEC = 0x814f + ETHERTYPE_TIGAN = 0x802f + ETHERTYPE_TRAIL = 0x1000 + ETHERTYPE_TRANSETHER = 0x6558 + ETHERTYPE_TYMSHARE = 0x802e + ETHERTYPE_UBBST = 0x7005 + ETHERTYPE_UBDEBUG = 0x900 + ETHERTYPE_UBDIAGLOOP = 0x7002 + ETHERTYPE_UBDL = 0x7000 + ETHERTYPE_UBNIU = 0x7001 + ETHERTYPE_UBNMC = 0x7003 + ETHERTYPE_VALID = 0x1600 + ETHERTYPE_VARIAN = 0x80dd + ETHERTYPE_VAXELN = 0x803b + ETHERTYPE_VEECO = 0x8067 + ETHERTYPE_VEXP = 0x805b + ETHERTYPE_VGLAB = 0x8131 + ETHERTYPE_VINES = 0xbad + ETHERTYPE_VINESECHO = 0xbaf + ETHERTYPE_VINESLOOP = 0xbae + ETHERTYPE_VITAL = 0xff00 + ETHERTYPE_VLAN = 0x8100 + ETHERTYPE_VLTLMAN = 0x8080 + ETHERTYPE_VPROD = 0x805c + ETHERTYPE_VURESERVED = 0x8147 + ETHERTYPE_WATERLOO = 0x8130 + ETHERTYPE_WELLFLEET = 0x8103 + ETHERTYPE_X25 = 0x805 + ETHERTYPE_X75 = 0x801 + ETHERTYPE_XNSSM = 0x9001 + ETHERTYPE_XTP = 0x817d + ETHER_ADDR_LEN = 0x6 + ETHER_ALIGN = 0x2 + ETHER_CRC_LEN = 0x4 + ETHER_CRC_POLY_BE = 0x4c11db6 + ETHER_CRC_POLY_LE = 0xedb88320 + ETHER_HDR_LEN = 0xe + ETHER_MAX_DIX_LEN = 0x600 + ETHER_MAX_HARDMTU_LEN = 0xff9b + ETHER_MAX_LEN = 0x5ee + ETHER_MIN_LEN = 0x40 + ETHER_TYPE_LEN = 0x2 + ETHER_VLAN_ENCAP_LEN = 0x4 + EVFILT_AIO = -0x3 + EVFILT_DEVICE = -0x8 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0x8 + EVFILT_TIMER = -0x7 + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EVL_ENCAPLEN = 0x4 + EVL_PRIO_BITS = 0xd + EVL_PRIO_MAX = 0x7 + EVL_VLID_MASK = 0xfff + EVL_VLID_MAX = 0xffe + EVL_VLID_MIN = 0x1 + EVL_VLID_NULL = 0x0 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0xa + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETOWN = 0x5 + F_ISATTY = 0xb + F_OK = 0x0 + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x8e52 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x20 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BLUETOOTH = 0xf8 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf7 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DUMMY = 0xf1 + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ECONET = 0xce + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf3 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LINEGROUP = 0xd2 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MBIM = 0xfa + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf5 + IFT_PFLOW = 0xf9 + IFT_PFSYNC = 0xf6 + IFT_PLC = 0xae + IFT_PON155 = 0xcf + IFT_PON622 = 0xd0 + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPATM = 0xc5 + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf2 + IFT_Q2931 = 0xc9 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SIPSIG = 0xcc + IFT_SIPTG = 0xcb + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TELINK = 0xc8 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VIRTUALTG = 0xca + IFT_VOICEDID = 0xd5 + IFT_VOICEEM = 0x64 + IFT_VOICEEMFGD = 0xd3 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFGDEANA = 0xd4 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERCABLE = 0xc6 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_RFC3021_HOST = 0x1 + IN_RFC3021_NET = 0xfffffffe + IN_RFC3021_NSHIFT = 0x1f + IPPROTO_AH = 0x33 + IPPROTO_CARP = 0x70 + IPPROTO_DIVERT = 0x102 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPIP = 0x4 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x103 + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_NONE = 0x3b + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_AUTH_LEVEL = 0x35 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_ESP_NETWORK_LEVEL = 0x37 + IPV6_ESP_TRANS_LEVEL = 0x36 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff + IPV6_FRAGTTL = 0x78 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPCOMP_LEVEL = 0x3c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXPACKET = 0xffff + IPV6_MINHOPCOUNT = 0x41 + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_OPTIONS = 0x1 + IPV6_PATHMTU = 0x2c + IPV6_PIPEX = 0x3f + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVDSTPORT = 0x40 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTABLE = 0x1021 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_AUTH_LEVEL = 0x14 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_ESP_NETWORK_LEVEL = 0x16 + IP_ESP_TRANS_LEVEL = 0x15 + IP_HDRINCL = 0x2 + IP_IPCOMP_LEVEL = 0x1d + IP_IPDEFTTL = 0x25 + IP_IPSECFLOWINFO = 0x24 + IP_IPSEC_LOCAL_AUTH = 0x1b + IP_IPSEC_LOCAL_CRED = 0x19 + IP_IPSEC_LOCAL_ID = 0x17 + IP_IPSEC_REMOTE_AUTH = 0x1c + IP_IPSEC_REMOTE_CRED = 0x1a + IP_IPSEC_REMOTE_ID = 0x18 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0xfff + IP_MF = 0x2000 + IP_MINTTL = 0x20 + IP_MIN_MEMBERSHIPS = 0xf + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x1 + IP_PIPEX = 0x22 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVDSTPORT = 0x21 + IP_RECVIF = 0x1e + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVRTABLE = 0x23 + IP_RECVTTL = 0x1f + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RTABLE = 0x1021 + IP_SENDSRCADDR = 0x7 + IP_TOS = 0x3 + IP_TTL = 0x4 + ISIG = 0x80 + ISTRIP = 0x20 + IUCLC = 0x1000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LCNT_OVERLOAD_FLUSH = 0x6 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x6 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SPACEAVAIL = 0x5 + MADV_WILLNEED = 0x3 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_CONCEAL = 0x8000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_FLAGMASK = 0xfff7 + MAP_HASSEMAPHORE = 0x0 + MAP_INHERIT = 0x0 + MAP_INHERIT_COPY = 0x1 + MAP_INHERIT_NONE = 0x2 + MAP_INHERIT_SHARE = 0x0 + MAP_INHERIT_ZERO = 0x3 + MAP_NOEXTEND = 0x0 + MAP_NORESERVE = 0x0 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x0 + MAP_SHARED = 0x1 + MAP_STACK = 0x4000 + MAP_TRYFIXED = 0x0 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MNT_ASYNC = 0x40 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_DOOMED = 0x8000000 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXRDONLY = 0x80 + MNT_FORCE = 0x80000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_NOATIME = 0x8000 + MNT_NODEV = 0x10 + MNT_NOEXEC = 0x4 + MNT_NOPERM = 0x20 + MNT_NOSUID = 0x8 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SOFTDEP = 0x4000000 + MNT_STALLED = 0x100000 + MNT_SWAPPABLE = 0x200000 + MNT_SYNCHRONOUS = 0x2 + MNT_UPDATE = 0x10000 + MNT_VISFLAGMASK = 0x400ffff + MNT_WAIT = 0x1 + MNT_WANTRDWR = 0x2000000 + MNT_WXALLOWED = 0x800 + MSG_BCAST = 0x100 + MSG_CMSG_CLOEXEC = 0x800 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOR = 0x8 + MSG_MCAST = 0x200 + MSG_NOSIGNAL = 0x400 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x4 + MS_SYNC = 0x2 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFNAMES = 0x6 + NET_RT_MAXID = 0x7 + NET_RT_STATS = 0x4 + NET_RT_TABLE = 0x5 + NFDBITS = 0x20 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ATTRIB = 0x8 + NOTE_CHANGE = 0x1 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EOF = 0x2 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRUNCATE = 0x80 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OLCUC = 0x20 + ONLCR = 0x2 + ONLRET = 0x80 + ONOCR = 0x40 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x10000 + O_CREAT = 0x200 + O_DIRECTORY = 0x20000 + O_DSYNC = 0x80 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x80 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PF_FLUSH = 0x1 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BFD = 0xb + RTAX_BRD = 0x7 + RTAX_DNS = 0xc + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_LABEL = 0xa + RTAX_MAX = 0xf + RTAX_NETMASK = 0x2 + RTAX_SEARCH = 0xe + RTAX_SRC = 0x8 + RTAX_SRCMASK = 0x9 + RTAX_STATIC = 0xd + RTA_AUTHOR = 0x40 + RTA_BFD = 0x800 + RTA_BRD = 0x80 + RTA_DNS = 0x1000 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_LABEL = 0x400 + RTA_NETMASK = 0x4 + RTA_SEARCH = 0x4000 + RTA_SRC = 0x100 + RTA_SRCMASK = 0x200 + RTA_STATIC = 0x2000 + RTF_ANNOUNCE = 0x4000 + RTF_BFD = 0x1000000 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CACHED = 0x20000 + RTF_CLONED = 0x10000 + RTF_CLONING = 0x100 + RTF_CONNECTED = 0x800000 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FMASK = 0x110fc08 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MPATH = 0x40000 + RTF_MPLS = 0x100000 + RTF_MULTICAST = 0x200 + RTF_PERMANENT_ARP = 0x2000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x2000 + RTF_REJECT = 0x8 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_USETRAILERS = 0x8000 + RTM_80211INFO = 0x15 + RTM_ADD = 0x1 + RTM_BFD = 0x12 + RTM_CHANGE = 0x3 + RTM_CHGADDRATTR = 0x14 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DESYNC = 0x10 + RTM_GET = 0x4 + RTM_IFANNOUNCE = 0xf + RTM_IFINFO = 0xe + RTM_INVALIDATE = 0x11 + RTM_LOSING = 0x5 + RTM_MAXSIZE = 0x800 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_PROPOSAL = 0x13 + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RT_TABLEID_BITS = 0x8 + RT_TABLEID_MASK = 0xff + RT_TABLEID_MAX = 0xff + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x4 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80286987 + SIOCATMARK = 0x40047307 + SIOCBRDGADD = 0x8060693c + SIOCBRDGADDL = 0x80606949 + SIOCBRDGADDS = 0x80606941 + SIOCBRDGARL = 0x808c694d + SIOCBRDGDADDR = 0x81286947 + SIOCBRDGDEL = 0x8060693d + SIOCBRDGDELS = 0x80606942 + SIOCBRDGFLUSH = 0x80606948 + SIOCBRDGFRL = 0x808c694e + SIOCBRDGGCACHE = 0xc0186941 + SIOCBRDGGFD = 0xc0186952 + SIOCBRDGGHT = 0xc0186951 + SIOCBRDGGIFFLGS = 0xc060693e + SIOCBRDGGMA = 0xc0186953 + SIOCBRDGGPARAM = 0xc0406958 + SIOCBRDGGPRI = 0xc0186950 + SIOCBRDGGRL = 0xc030694f + SIOCBRDGGTO = 0xc0186946 + SIOCBRDGIFS = 0xc0606942 + SIOCBRDGRTS = 0xc0206943 + SIOCBRDGSADDR = 0xc1286944 + SIOCBRDGSCACHE = 0x80186940 + SIOCBRDGSFD = 0x80186952 + SIOCBRDGSHT = 0x80186951 + SIOCBRDGSIFCOST = 0x80606955 + SIOCBRDGSIFFLGS = 0x8060693f + SIOCBRDGSIFPRIO = 0x80606954 + SIOCBRDGSIFPROT = 0x8060694a + SIOCBRDGSMA = 0x80186953 + SIOCBRDGSPRI = 0x80186950 + SIOCBRDGSPROTO = 0x8018695a + SIOCBRDGSTO = 0x80186945 + SIOCBRDGSTXHC = 0x80186959 + SIOCDELLABEL = 0x80206997 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80286989 + SIOCDIFPARENT = 0x802069b4 + SIOCDIFPHYADDR = 0x80206949 + SIOCDPWE3NEIGHBOR = 0x802069de + SIOCDVNETID = 0x802069af + SIOCGETKALIVE = 0xc01869a4 + SIOCGETLABEL = 0x8020699a + SIOCGETMPWCFG = 0xc02069ae + SIOCGETPFLOW = 0xc02069fe + SIOCGETPFSYNC = 0xc02069f8 + SIOCGETSGCNT = 0xc0207534 + SIOCGETVIFCNT = 0xc0287533 + SIOCGETVLAN = 0xc0206990 + SIOCGIFADDR = 0xc0206921 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCONF = 0xc0106924 + SIOCGIFDATA = 0xc020691b + SIOCGIFDESCR = 0xc0206981 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGATTR = 0xc028698b + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGLIST = 0xc028698d + SIOCGIFGMEMB = 0xc028698a + SIOCGIFGROUP = 0xc0286988 + SIOCGIFHARDMTU = 0xc02069a5 + SIOCGIFLLPRIO = 0xc02069b6 + SIOCGIFMEDIA = 0xc0406938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc020697e + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPAIR = 0xc02069b1 + SIOCGIFPARENT = 0xc02069b3 + SIOCGIFPRIORITY = 0xc020699c + SIOCGIFRDOMAIN = 0xc02069a0 + SIOCGIFRTLABEL = 0xc0206983 + SIOCGIFRXR = 0x802069aa + SIOCGIFSFFPAGE = 0xc1126939 + SIOCGIFXFLAGS = 0xc020699e + SIOCGLIFPHYADDR = 0xc218694b + SIOCGLIFPHYDF = 0xc02069c2 + SIOCGLIFPHYECN = 0xc02069c8 + SIOCGLIFPHYRTABLE = 0xc02069a2 + SIOCGLIFPHYTTL = 0xc02069a9 + SIOCGPGRP = 0x40047309 + SIOCGPWE3 = 0xc0206998 + SIOCGPWE3CTRLWORD = 0xc02069dc + SIOCGPWE3FAT = 0xc02069dd + SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGRXHPRIO = 0xc02069db + SIOCGSPPPPARAMS = 0xc0206994 + SIOCGTXHPRIO = 0xc02069c6 + SIOCGUMBINFO = 0xc02069be + SIOCGUMBPARAM = 0xc02069c0 + SIOCGVH = 0xc02069f6 + SIOCGVNETFLOWID = 0xc02069c4 + SIOCGVNETID = 0xc02069a7 + SIOCIFAFATTACH = 0x801169ab + SIOCIFAFDETACH = 0x801169ac + SIOCIFCREATE = 0x8020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106978 + SIOCSETKALIVE = 0x801869a3 + SIOCSETLABEL = 0x80206999 + SIOCSETMPWCFG = 0x802069ad + SIOCSETPFLOW = 0x802069fd + SIOCSETPFSYNC = 0x802069f7 + SIOCSETVLAN = 0x8020698f + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFDESCR = 0x80206980 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGATTR = 0x8028698c + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020691f + SIOCSIFLLPRIO = 0x802069b5 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x8020697f + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPAIR = 0x802069b0 + SIOCSIFPARENT = 0x802069b2 + SIOCSIFPRIORITY = 0x8020699b + SIOCSIFRDOMAIN = 0x8020699f + SIOCSIFRTLABEL = 0x80206982 + SIOCSIFXFLAGS = 0x8020699d + SIOCSLIFPHYADDR = 0x8218694a + SIOCSLIFPHYDF = 0x802069c1 + SIOCSLIFPHYECN = 0x802069c7 + SIOCSLIFPHYRTABLE = 0x802069a1 + SIOCSLIFPHYTTL = 0x802069a8 + SIOCSPGRP = 0x80047308 + SIOCSPWE3CTRLWORD = 0x802069dc + SIOCSPWE3FAT = 0x802069dd + SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSRXHPRIO = 0x802069db + SIOCSSPPPPARAMS = 0x80206993 + SIOCSTXHPRIO = 0x802069c5 + SIOCSUMBPARAM = 0x802069bf + SIOCSVH = 0xc02069f5 + SIOCSVNETFLOWID = 0x802069c3 + SIOCSVNETID = 0x802069a6 + SIOCSWGDPID = 0xc018695b + SIOCSWGMAXFLOW = 0xc0186960 + SIOCSWGMAXGROUP = 0xc018695d + SIOCSWSDPID = 0x8018695c + SIOCSWSPORTNO = 0xc060695f + SOCK_CLOEXEC = 0x8000 + SOCK_DGRAM = 0x2 + SOCK_DNS = 0x1000 + SOCK_NONBLOCK = 0x4000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BINDANY = 0x1000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DOMAIN = 0x1024 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_NETPROC = 0x1020 + SO_OOBINLINE = 0x100 + SO_PEERCRED = 0x1022 + SO_PROTOCOL = 0x1025 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RTABLE = 0x1021 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_SPLICE = 0x1023 + SO_TIMESTAMP = 0x800 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_ZEROIZE = 0x2000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCP_MAXBURST = 0x4 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x3 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x4 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOPUSH = 0x10 + TCP_SACKHOLE_LIMIT = 0x80 + TCP_SACK_ENABLE = 0x8 + TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCHKVERAUTH = 0x2000741e + TIOCCLRVERAUTH = 0x2000741d + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLAG_CLOCAL = 0x2 + TIOCFLAG_CRTSCTS = 0x4 + TIOCFLAG_MDMBUF = 0x8 + TIOCFLAG_PPS = 0x10 + TIOCFLAG_SOFTCAR = 0x1 + TIOCFLUSH = 0x80047410 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGFLAGS = 0x4004745d + TIOCGPGRP = 0x40047477 + TIOCGSID = 0x40047463 + TIOCGTSTAMP = 0x4010745b + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGET = 0x4004746a + TIOCMODG = 0x4004746a + TIOCMODS = 0x8004746d + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSETVERAUTH = 0x8004741c + TIOCSFLAGS = 0x8004745c + TIOCSIG = 0x8004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTOP = 0x2000746f + TIOCSTSTAMP = 0x8008745a + TIOCSWINSZ = 0x80087467 + TIOCUCNTL = 0x80047466 + TIOCUCNTL_CBRK = 0x7a + TIOCUCNTL_SBRK = 0x7b + TOSTOP = 0x400000 + UTIME_NOW = -0x2 + UTIME_OMIT = -0x1 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VM_ANONMIN = 0x7 + VM_LOADAVG = 0x2 + VM_MALLOC_CONF = 0xc + VM_MAXID = 0xd + VM_MAXSLP = 0xa + VM_METER = 0x1 + VM_NKMEMPAGES = 0x6 + VM_PSSTRINGS = 0x3 + VM_SWAPENCRYPT = 0x5 + VM_USPACE = 0xb + VM_UVMEXP = 0x4 + VM_VNODEMIN = 0x9 + VM_VTEXTMIN = 0x8 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WALTSIG = 0x4 + WCONTINUED = 0x8 + WCOREFLAG = 0x80 + WNOHANG = 0x1 + WUNTRACED = 0x2 + XCASE = 0x1000000 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x5c) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x58) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x59) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EIPSEC = syscall.Errno(0x52) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x5f) + ELOOP = syscall.Errno(0x3e) + EMEDIUMTYPE = syscall.Errno(0x56) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x53) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOMEDIUM = syscall.Errno(0x55) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x5a) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5d) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x5b) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x57) + EOWNERDEAD = syscall.Errno(0x5e) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5f) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disk quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC program not available"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIPSEC", "IPsec processing failure"}, + {83, "ENOATTR", "attribute not found"}, + {84, "EILSEQ", "illegal byte sequence"}, + {85, "ENOMEDIUM", "no medium found"}, + {86, "EMEDIUMTYPE", "wrong medium type"}, + {87, "EOVERFLOW", "value too large to be stored in data type"}, + {88, "ECANCELED", "operation canceled"}, + {89, "EIDRM", "identifier removed"}, + {90, "ENOMSG", "no message of desired type"}, + {91, "ENOTSUP", "not supported"}, + {92, "EBADMSG", "bad message"}, + {93, "ENOTRECOVERABLE", "state not recoverable"}, + {94, "EOWNERDEAD", "previous owner died"}, + {95, "ELAST", "protocol error"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGABRT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "thread AST"}, +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index 46e054ccb0e..5312c36cc82 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -192,6 +192,12 @@ const ( CSTOPB = 0x40 CSUSP = 0x1a CSWTCH = 0x1a + DIOC = 0x6400 + DIOCGETB = 0x6402 + DIOCGETC = 0x6401 + DIOCGETP = 0x6408 + DIOCSETE = 0x6403 + DIOCSETP = 0x6409 DLT_AIRONET_HEADER = 0x78 DLT_APPLE_IP_OVER_IEEE1394 = 0x8a DLT_ARCNET = 0x7 @@ -290,6 +296,7 @@ const ( FF0 = 0x0 FF1 = 0x8000 FFDLY = 0x8000 + FIORDCHK = 0x6603 FLUSHALL = 0x1 FLUSHDATA = 0x0 FLUSHO = 0x2000 @@ -645,6 +652,14 @@ const ( MAP_SHARED = 0x1 MAP_TEXT = 0x400 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x29 + MCAST_JOIN_SOURCE_GROUP = 0x2d + MCAST_LEAVE_GROUP = 0x2a + MCAST_LEAVE_SOURCE_GROUP = 0x2e + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MSG_CTRUNC = 0x10 @@ -653,6 +668,7 @@ const ( MSG_DUPCTRL = 0x800 MSG_EOR = 0x8 MSG_MAXIOVLEN = 0x10 + MSG_NOSIGNAL = 0x200 MSG_NOTIFICATION = 0x100 MSG_OOB = 0x1 MSG_PEEK = 0x2 @@ -687,6 +703,7 @@ const ( O_APPEND = 0x8 O_CLOEXEC = 0x800000 O_CREAT = 0x100 + O_DIRECTORY = 0x1000000 O_DSYNC = 0x40 O_EXCL = 0x400 O_EXEC = 0x400000 @@ -725,7 +742,7 @@ const ( RLIMIT_FSIZE = 0x1 RLIMIT_NOFILE = 0x5 RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x3 + RLIM_INFINITY = 0xfffffffffffffffd RTAX_AUTHOR = 0x6 RTAX_BRD = 0x7 RTAX_DST = 0x0 @@ -1047,6 +1064,7 @@ const ( TCOON = 0x1 TCP_ABORT_THRESHOLD = 0x11 TCP_ANONPRIVBIND = 0x20 + TCP_CONGESTION = 0x25 TCP_CONN_ABORT_THRESHOLD = 0x13 TCP_CONN_NOTIFY_THRESHOLD = 0x12 TCP_CORK = 0x18 @@ -1076,6 +1094,8 @@ const ( TCSETSF = 0x5410 TCSETSW = 0x540f TCXONC = 0x5406 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOC = 0x5400 TIOCCBRK = 0x747a TIOCCDTR = 0x7478 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go deleted file mode 100644 index 23e94d3663c..00000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go +++ /dev/null @@ -1,1809 +0,0 @@ -// go run mksyscall.go -l32 -tags darwin,386,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_386.1_11.go syscall_darwin_386.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build darwin,386,!go1.12 - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimes(path string, timeval *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, behav int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe() (r int, w int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - r = int(r0) - w = int(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fsetxattr(fd int, attr string, data *byte, size int, position uint32, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func removexattr(path string, attr string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fremovexattr(fd int, attr string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func listxattr(path string, dest *byte, size int, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { - r0, _, e1 := Syscall6(SYS_FLISTXATTR, uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := Syscall6(SYS_SETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func kill(pid int, signum int, posix int) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { - _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(offset>>32), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Access(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chflags(path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chmod(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) - nfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exchangedata(path1 string, path2 string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path1) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(path2) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_EXCHANGEDATA, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), uintptr(length>>32)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdtablesize() (size int) { - r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) - size = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) - egid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) - uid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) - gid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) - pgrp = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) - uid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Issetugid() (tainted bool) { - r0, _, _ := RawSyscall(SYS_ISSETUGID, 0, 0, 0) - tainted = bool(r0 != 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lchown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Link(path string, link string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdir(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkfifo(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Open(path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pathconf(path string, name int) (val int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pread(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlink(path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rename(from string, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat(fromfd int, from string, tofd int, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Revoke(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rmdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0) - newoffset = int64(int64(r1)<<32 | int64(r0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setegid(egid int) (err error) { - _, _, e1 := Syscall(SYS_SETEGID, uintptr(egid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setlogin(name string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setprivexec(flag int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIVEXEC, uintptr(flag), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlink(path string, link string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Truncate(path string, length int64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Undelete(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlink(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos), uintptr(pos>>32), 0, 0) - ret = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_GETFSSTAT64, uintptr(buf), uintptr(size), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lstat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Stat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statfs(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go index e2ffb3bed33..6eb45798323 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -490,21 +490,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getattrlist_trampoline() - -//go:linkname libc_getattrlist libc_getattrlist -//go:cgo_import_dynamic libc_getattrlist getattrlist "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe() (r int, w int, err error) { r0, r1, e1 := syscall_rawSyscall(funcPC(libc_pipe_trampoline), 0, 0, 0) r = int(r0) @@ -958,6 +943,56 @@ func libc_close_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Clonefile(src string, dst string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(src) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dst) + if err != nil { + return + } + _, _, e1 := syscall_syscall(funcPC(libc_clonefile_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clonefile_trampoline() + +//go:linkname libc_clonefile libc_clonefile +//go:cgo_import_dynamic libc_clonefile clonefile "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Clonefileat(srcDirfd int, src string, dstDirfd int, dst string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(src) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dst) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(funcPC(libc_clonefileat_trampoline), uintptr(srcDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clonefileat_trampoline() + +//go:linkname libc_clonefileat libc_clonefileat +//go:cgo_import_dynamic libc_clonefileat clonefileat "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup(fd int) (nfd int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_dup_trampoline), uintptr(fd), 0, 0) nfd = int(r0) @@ -1146,6 +1181,26 @@ func libc_fchownat_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fclonefileat(srcDirfd int, dstDirfd int, dst string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(dst) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(funcPC(libc_fclonefileat_trampoline), uintptr(srcDirfd), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fclonefileat_trampoline() + +//go:linkname libc_fclonefileat libc_fclonefileat +//go:cgo_import_dynamic libc_fclonefileat fclonefileat "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_flock_trampoline), uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -1207,6 +1262,28 @@ func libc_ftruncate_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(funcPC(libc_getcwd_trampoline), uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_getcwd_trampoline() + +//go:linkname libc_getcwd libc_getcwd +//go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdtablesize() (size int) { r0, _, _ := syscall_syscall(funcPC(libc_getdtablesize_trampoline), 0, 0, 0) size = int(r0) @@ -2357,21 +2434,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_fstat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { @@ -2458,6 +2520,21 @@ func libc_lstat64_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_ptrace_trampoline() + +//go:linkname libc_ptrace libc_ptrace +//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Stat(path string, stat *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s index 6836a41290e..1c53979a101 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s @@ -60,8 +60,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) TEXT ·libc_getxattr_trampoline(SB),NOSPLIT,$0-0 @@ -110,6 +108,10 @@ TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 JMP libc_clock_gettime(SB) TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 JMP libc_close(SB) +TEXT ·libc_clonefile_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clonefile(SB) +TEXT ·libc_clonefileat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clonefileat(SB) TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 JMP libc_dup(SB) TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 @@ -132,6 +134,8 @@ TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) TEXT ·libc_fchownat_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) +TEXT ·libc_fclonefileat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fclonefileat(SB) TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 JMP libc_flock(SB) TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 @@ -140,6 +144,8 @@ TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) +TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0 JMP libc_getdtablesize(SB) TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 @@ -164,6 +170,8 @@ TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) +TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 @@ -264,10 +272,6 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) -TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 - JMP libc_gettimeofday(SB) TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstat64(SB) TEXT ·libc_fstatat64_trampoline(SB),NOSPLIT,$0-0 @@ -278,6 +282,8 @@ TEXT ·libc_getfsstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_getfsstat64(SB) TEXT ·libc_lstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_lstat64(SB) +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ptrace(SB) TEXT ·libc_stat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_stat64(SB) TEXT ·libc_statfs64_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go deleted file mode 100644 index 102561730ab..00000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go +++ /dev/null @@ -1,1809 +0,0 @@ -// go run mksyscall.go -tags darwin,amd64,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.1_11.go syscall_darwin_amd64.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build darwin,amd64,!go1.12 - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimes(path string, timeval *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, behav int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe() (r int, w int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - r = int(r0) - w = int(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fsetxattr(fd int, attr string, data *byte, size int, position uint32, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func removexattr(path string, attr string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fremovexattr(fd int, attr string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func listxattr(path string, dest *byte, size int, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { - r0, _, e1 := Syscall6(SYS_FLISTXATTR, uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := Syscall6(SYS_SETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func kill(pid int, signum int, posix int) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { - _, _, e1 := Syscall6(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Access(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chflags(path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chmod(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) - nfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exchangedata(path1 string, path2 string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path1) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(path2) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_EXCHANGEDATA, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdtablesize() (size int) { - r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) - size = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) - egid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) - uid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) - gid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) - pgrp = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) - uid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Issetugid() (tainted bool) { - r0, _, _ := RawSyscall(SYS_ISSETUGID, 0, 0, 0) - tainted = bool(r0 != 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lchown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Link(path string, link string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdir(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkfifo(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Open(path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pathconf(path string, name int) (val int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pread(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlink(path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rename(from string, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat(fromfd int, from string, tofd int, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Revoke(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rmdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) - newoffset = int64(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setegid(egid int) (err error) { - _, _, e1 := Syscall(SYS_SETEGID, uintptr(egid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setlogin(name string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setprivexec(flag int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIVEXEC, uintptr(flag), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlink(path string, link string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Truncate(path string, length int64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Undelete(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlink(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) - ret = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_GETFSSTAT64, uintptr(buf), uintptr(size), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lstat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Stat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statfs(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index c67e336e2ab..889c14059e9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -490,21 +490,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getattrlist_trampoline() - -//go:linkname libc_getattrlist libc_getattrlist -//go:cgo_import_dynamic libc_getattrlist getattrlist "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe() (r int, w int, err error) { r0, r1, e1 := syscall_rawSyscall(funcPC(libc_pipe_trampoline), 0, 0, 0) r = int(r0) @@ -958,6 +943,56 @@ func libc_close_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Clonefile(src string, dst string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(src) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dst) + if err != nil { + return + } + _, _, e1 := syscall_syscall(funcPC(libc_clonefile_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clonefile_trampoline() + +//go:linkname libc_clonefile libc_clonefile +//go:cgo_import_dynamic libc_clonefile clonefile "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Clonefileat(srcDirfd int, src string, dstDirfd int, dst string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(src) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dst) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(funcPC(libc_clonefileat_trampoline), uintptr(srcDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clonefileat_trampoline() + +//go:linkname libc_clonefileat libc_clonefileat +//go:cgo_import_dynamic libc_clonefileat clonefileat "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup(fd int) (nfd int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_dup_trampoline), uintptr(fd), 0, 0) nfd = int(r0) @@ -1146,6 +1181,26 @@ func libc_fchownat_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fclonefileat(srcDirfd int, dstDirfd int, dst string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(dst) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(funcPC(libc_fclonefileat_trampoline), uintptr(srcDirfd), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fclonefileat_trampoline() + +//go:linkname libc_fclonefileat libc_fclonefileat +//go:cgo_import_dynamic libc_fclonefileat fclonefileat "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_flock_trampoline), uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -1207,6 +1262,28 @@ func libc_ftruncate_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(funcPC(libc_getcwd_trampoline), uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_getcwd_trampoline() + +//go:linkname libc_getcwd libc_getcwd +//go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdtablesize() (size int) { r0, _, _ := syscall_syscall(funcPC(libc_getdtablesize_trampoline), 0, 0, 0) size = int(r0) @@ -2357,21 +2434,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_fstat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { @@ -2458,6 +2520,21 @@ func libc_lstat64_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_ptrace_trampoline() + +//go:linkname libc_ptrace libc_ptrace +//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Stat(path string, stat *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index a3fdf099d0e..c77bd6e20bd 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -60,8 +60,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) TEXT ·libc_getxattr_trampoline(SB),NOSPLIT,$0-0 @@ -110,6 +108,10 @@ TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 JMP libc_clock_gettime(SB) TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 JMP libc_close(SB) +TEXT ·libc_clonefile_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clonefile(SB) +TEXT ·libc_clonefileat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clonefileat(SB) TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 JMP libc_dup(SB) TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 @@ -132,6 +134,8 @@ TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) TEXT ·libc_fchownat_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) +TEXT ·libc_fclonefileat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fclonefileat(SB) TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 JMP libc_flock(SB) TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 @@ -140,6 +144,8 @@ TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) +TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0 JMP libc_getdtablesize(SB) TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 @@ -164,6 +170,8 @@ TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) +TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 @@ -264,10 +272,6 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) -TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 - JMP libc_gettimeofday(SB) TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstat64(SB) TEXT ·libc_fstatat64_trampoline(SB),NOSPLIT,$0-0 @@ -278,6 +282,8 @@ TEXT ·libc_getfsstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_getfsstat64(SB) TEXT ·libc_lstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_lstat64(SB) +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ptrace(SB) TEXT ·libc_stat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_stat64(SB) TEXT ·libc_statfs64_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go deleted file mode 100644 index d34e6df2fe0..00000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go +++ /dev/null @@ -1,1782 +0,0 @@ -// go run mksyscall.go -l32 -tags darwin,arm,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm.1_11.go syscall_darwin_arm.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build darwin,arm,!go1.12 - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimes(path string, timeval *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, behav int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe() (r int, w int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - r = int(r0) - w = int(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fsetxattr(fd int, attr string, data *byte, size int, position uint32, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func removexattr(path string, attr string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fremovexattr(fd int, attr string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func listxattr(path string, dest *byte, size int, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { - r0, _, e1 := Syscall6(SYS_FLISTXATTR, uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := Syscall6(SYS_SETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func kill(pid int, signum int, posix int) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { - _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(offset>>32), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Access(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chflags(path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chmod(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) - nfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exchangedata(path1 string, path2 string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path1) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(path2) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_EXCHANGEDATA, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), uintptr(length>>32)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdtablesize() (size int) { - r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) - size = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) - egid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) - uid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) - gid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) - pgrp = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) - uid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Issetugid() (tainted bool) { - r0, _, _ := RawSyscall(SYS_ISSETUGID, 0, 0, 0) - tainted = bool(r0 != 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lchown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Link(path string, link string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdir(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkfifo(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Open(path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pathconf(path string, name int) (val int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pread(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlink(path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rename(from string, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat(fromfd int, from string, tofd int, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Revoke(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rmdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0) - newoffset = int64(int64(r1)<<32 | int64(r0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setegid(egid int) (err error) { - _, _, e1 := Syscall(SYS_SETEGID, uintptr(egid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setlogin(name string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setprivexec(flag int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIVEXEC, uintptr(flag), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlink(path string, link string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Truncate(path string, length int64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Undelete(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlink(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos), uintptr(pos>>32), 0, 0) - ret = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(buf), uintptr(size), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lstat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Stat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statfs(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go index b759757a77a..d6b5249c2f2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -490,21 +490,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getattrlist_trampoline() - -//go:linkname libc_getattrlist libc_getattrlist -//go:cgo_import_dynamic libc_getattrlist getattrlist "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe() (r int, w int, err error) { r0, r1, e1 := syscall_rawSyscall(funcPC(libc_pipe_trampoline), 0, 0, 0) r = int(r0) @@ -958,6 +943,56 @@ func libc_close_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Clonefile(src string, dst string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(src) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dst) + if err != nil { + return + } + _, _, e1 := syscall_syscall(funcPC(libc_clonefile_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clonefile_trampoline() + +//go:linkname libc_clonefile libc_clonefile +//go:cgo_import_dynamic libc_clonefile clonefile "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Clonefileat(srcDirfd int, src string, dstDirfd int, dst string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(src) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dst) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(funcPC(libc_clonefileat_trampoline), uintptr(srcDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clonefileat_trampoline() + +//go:linkname libc_clonefileat libc_clonefileat +//go:cgo_import_dynamic libc_clonefileat clonefileat "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup(fd int) (nfd int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_dup_trampoline), uintptr(fd), 0, 0) nfd = int(r0) @@ -1146,6 +1181,26 @@ func libc_fchownat_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fclonefileat(srcDirfd int, dstDirfd int, dst string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(dst) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(funcPC(libc_fclonefileat_trampoline), uintptr(srcDirfd), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fclonefileat_trampoline() + +//go:linkname libc_fclonefileat libc_fclonefileat +//go:cgo_import_dynamic libc_fclonefileat fclonefileat "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_flock_trampoline), uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -1207,6 +1262,28 @@ func libc_ftruncate_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(funcPC(libc_getcwd_trampoline), uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_getcwd_trampoline() + +//go:linkname libc_getcwd libc_getcwd +//go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdtablesize() (size int) { r0, _, _ := syscall_syscall(funcPC(libc_getdtablesize_trampoline), 0, 0, 0) size = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s index b67f518fa30..5eec5f1d953 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s @@ -60,8 +60,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) TEXT ·libc_getxattr_trampoline(SB),NOSPLIT,$0-0 @@ -110,6 +108,10 @@ TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 JMP libc_clock_gettime(SB) TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 JMP libc_close(SB) +TEXT ·libc_clonefile_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clonefile(SB) +TEXT ·libc_clonefileat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clonefileat(SB) TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 JMP libc_dup(SB) TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 @@ -132,6 +134,8 @@ TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) TEXT ·libc_fchownat_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) +TEXT ·libc_fclonefileat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fclonefileat(SB) TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 JMP libc_flock(SB) TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 @@ -140,6 +144,8 @@ TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) +TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0 JMP libc_getdtablesize(SB) TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 @@ -164,6 +170,8 @@ TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) +TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 @@ -264,8 +272,6 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) -TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 - JMP libc_gettimeofday(SB) TEXT ·libc_fstat_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) TEXT ·libc_fstatat_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index b2886126003..23b65a5301a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -490,21 +490,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getattrlist_trampoline() - -//go:linkname libc_getattrlist libc_getattrlist -//go:cgo_import_dynamic libc_getattrlist getattrlist "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe() (r int, w int, err error) { r0, r1, e1 := syscall_rawSyscall(funcPC(libc_pipe_trampoline), 0, 0, 0) r = int(r0) @@ -958,6 +943,56 @@ func libc_close_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Clonefile(src string, dst string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(src) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dst) + if err != nil { + return + } + _, _, e1 := syscall_syscall(funcPC(libc_clonefile_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clonefile_trampoline() + +//go:linkname libc_clonefile libc_clonefile +//go:cgo_import_dynamic libc_clonefile clonefile "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Clonefileat(srcDirfd int, src string, dstDirfd int, dst string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(src) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dst) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(funcPC(libc_clonefileat_trampoline), uintptr(srcDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clonefileat_trampoline() + +//go:linkname libc_clonefileat libc_clonefileat +//go:cgo_import_dynamic libc_clonefileat clonefileat "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup(fd int) (nfd int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_dup_trampoline), uintptr(fd), 0, 0) nfd = int(r0) @@ -1146,6 +1181,26 @@ func libc_fchownat_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fclonefileat(srcDirfd int, dstDirfd int, dst string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(dst) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(funcPC(libc_fclonefileat_trampoline), uintptr(srcDirfd), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fclonefileat_trampoline() + +//go:linkname libc_fclonefileat libc_fclonefileat +//go:cgo_import_dynamic libc_fclonefileat fclonefileat "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_flock_trampoline), uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -1207,6 +1262,28 @@ func libc_ftruncate_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(funcPC(libc_getcwd_trampoline), uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_getcwd_trampoline() + +//go:linkname libc_getcwd libc_getcwd +//go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdtablesize() (size int) { r0, _, _ := syscall_syscall(funcPC(libc_getdtablesize_trampoline), 0, 0, 0) size = int(r0) @@ -2443,6 +2520,21 @@ func libc_lstat_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_ptrace_trampoline() + +//go:linkname libc_ptrace libc_ptrace +//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Stat(path string, stat *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 40cce1bb282..53c402bf68b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -60,8 +60,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) TEXT ·libc_getxattr_trampoline(SB),NOSPLIT,$0-0 @@ -110,6 +108,10 @@ TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 JMP libc_clock_gettime(SB) TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 JMP libc_close(SB) +TEXT ·libc_clonefile_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clonefile(SB) +TEXT ·libc_clonefileat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clonefileat(SB) TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 JMP libc_dup(SB) TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 @@ -132,6 +134,8 @@ TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) TEXT ·libc_fchownat_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) +TEXT ·libc_fclonefileat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fclonefileat(SB) TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 JMP libc_flock(SB) TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 @@ -140,6 +144,8 @@ TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) +TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0 JMP libc_getdtablesize(SB) TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 @@ -164,6 +170,8 @@ TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) +TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 @@ -264,8 +272,6 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) -TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 - JMP libc_gettimeofday(SB) TEXT ·libc_fstat_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) TEXT ·libc_fstatat_trampoline(SB),NOSPLIT,$0-0 @@ -276,6 +282,8 @@ TEXT ·libc_getfsstat_trampoline(SB),NOSPLIT,$0-0 JMP libc_getfsstat(SB) TEXT ·libc_lstat_trampoline(SB),NOSPLIT,$0-0 JMP libc_lstat(SB) +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ptrace(SB) TEXT ·libc_stat_trampoline(SB),NOSPLIT,$0-0 JMP libc_stat(SB) TEXT ·libc_statfs_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index fe1fdd78d70..aebfe511ad5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -439,6 +423,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go index 92efa1da3cd..d3af083f4e7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go @@ -13,17 +13,23 @@ import ( //go:cgo_import_dynamic libc_preadv preadv "libc.so" //go:cgo_import_dynamic libc_writev writev "libc.so" //go:cgo_import_dynamic libc_pwritev pwritev "libc.so" +//go:cgo_import_dynamic libc_accept4 accept4 "libsocket.so" +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" //go:linkname procreadv libc_readv //go:linkname procpreadv libc_preadv //go:linkname procwritev libc_writev //go:linkname procpwritev libc_pwritev +//go:linkname procaccept4 libc_accept4 +//go:linkname procpipe2 libc_pipe2 var ( procreadv, procpreadv, procwritev, - procpwritev syscallFunc + procpwritev, + procaccept4, + procpipe2 syscallFunc ) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -85,3 +91,24 @@ func pwritev(fd int, iovs []Iovec, off int64) (n int, err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procaccept4)), 4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procpipe2)), 2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index df217825f06..2fbbbe5a898 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -83,6 +83,22 @@ func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(open_how)), uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -1821,6 +1837,21 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Faccessat2(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(pathname) @@ -1847,6 +1878,52 @@ func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ProcessVMReadv(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) { + var _p0 unsafe.Pointer + if len(localIov) > 0 { + _p0 = unsafe.Pointer(&localIov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + var _p1 unsafe.Pointer + if len(remoteIov) > 0 { + _p1 = unsafe.Pointer(&remoteIov[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PROCESS_VM_READV, uintptr(pid), uintptr(_p0), uintptr(len(localIov)), uintptr(_p1), uintptr(len(remoteIov)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ProcessVMWritev(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) { + var _p0 unsafe.Pointer + if len(localIov) > 0 { + _p0 = unsafe.Pointer(&localIov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + var _p1 unsafe.Pointer + if len(remoteIov) > 0 { + _p1 = unsafe.Pointer(&remoteIov[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PROCESS_VM_WRITEV, uintptr(pid), uintptr(_p0), uintptr(len(localIov)), uintptr(_p1), uintptr(len(remoteIov)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go similarity index 86% rename from vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go rename to vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index 8d39a09f721..ec6bd5bb73a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -1,7 +1,7 @@ -// go run mksyscall.go -tags darwin,arm64,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.1_11.go syscall_darwin_arm64.go +// go run mksyscall.go -openbsd -tags openbsd,mips64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_mips64.go // Code generated by the command above; see README.md. DO NOT EDIT. -// +build darwin,arm64,!go1.12 +// +build openbsd,mips64 package unix @@ -350,8 +350,8 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } @@ -360,154 +360,15 @@ func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintp // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe() (r int, w int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - r = int(r0) - w = int(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fsetxattr(fd int, attr string, data *byte, size int, position uint32, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func removexattr(path string, attr string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fremovexattr(fd int, attr string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func listxattr(path string, dest *byte, size int, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { - r0, _, e1 := Syscall6(SYS_FLISTXATTR, uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := Syscall6(SYS_SETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -516,19 +377,15 @@ func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintp // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func kill(pid int, signum int, posix int) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -554,7 +411,7 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } @@ -563,8 +420,9 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { - _, _, e1 := Syscall6(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -704,18 +562,8 @@ func Dup2(from int, to int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Exchangedata(path1 string, path2 string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path1) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(path2) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_EXCHANGEDATA, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) +func Dup3(from int, to int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } @@ -837,8 +685,8 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -847,8 +695,33 @@ func Fsync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -857,9 +730,11 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdtablesize() (size int) { - r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) - size = int(r0) +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length)) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -945,6 +820,17 @@ func Getrlimit(which int, lim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getrtable() (rtable int, err error) { + r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + rtable = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { @@ -966,8 +852,8 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Gettimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -985,13 +871,23 @@ func Getuid() (uid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := RawSyscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) tainted = bool(r0 != 0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Kqueue() (fd int, err error) { r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) fd = int(r0) @@ -1068,6 +964,21 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1113,6 +1024,21 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1128,6 +1054,31 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Open(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1183,7 +1134,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1200,7 +1151,7 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1342,7 +1293,7 @@ func Rmdir(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0) newoffset = int64(r0) if e1 != 0 { err = errnoErr(e1) @@ -1364,7 +1315,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := Syscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1428,8 +1379,8 @@ func Setpriority(which int, who int, prio int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setprivexec(flag int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIVEXEC, uintptr(flag), 0, 0) +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1438,8 +1389,8 @@ func Setprivexec(flag int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1448,8 +1399,18 @@ func Setregid(rgid int, egid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } @@ -1468,6 +1429,16 @@ func Setrlimit(which int, lim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Setrtable(rtable int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) @@ -1499,6 +1470,36 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1555,7 +1556,7 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length)) if e1 != 0 { err = errnoErr(e1) } @@ -1572,21 +1573,6 @@ func Umask(newmask int) (oldmask int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Undelete(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Unlink(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1650,7 +1636,7 @@ func write(fd int, p []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -1692,89 +1678,13 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(buf), uintptr(size), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lstat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Stat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statfs(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go new file mode 100644 index 00000000000..aca34b34933 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go @@ -0,0 +1,279 @@ +// go run mksysctl_openbsd.go +// Code generated by the command above; DO NOT EDIT. + +// +build mips64,openbsd + +package unix + +type mibentry struct { + ctlname string + ctloid []_C_int +} + +var sysctlMib = []mibentry{ + {"ddb.console", []_C_int{9, 6}}, + {"ddb.log", []_C_int{9, 7}}, + {"ddb.max_line", []_C_int{9, 3}}, + {"ddb.max_width", []_C_int{9, 2}}, + {"ddb.panic", []_C_int{9, 5}}, + {"ddb.profile", []_C_int{9, 9}}, + {"ddb.radix", []_C_int{9, 1}}, + {"ddb.tab_stop_width", []_C_int{9, 4}}, + {"ddb.trigger", []_C_int{9, 8}}, + {"fs.posix.setuid", []_C_int{3, 1, 1}}, + {"hw.allowpowerdown", []_C_int{6, 22}}, + {"hw.byteorder", []_C_int{6, 4}}, + {"hw.cpuspeed", []_C_int{6, 12}}, + {"hw.diskcount", []_C_int{6, 10}}, + {"hw.disknames", []_C_int{6, 8}}, + {"hw.diskstats", []_C_int{6, 9}}, + {"hw.machine", []_C_int{6, 1}}, + {"hw.model", []_C_int{6, 2}}, + {"hw.ncpu", []_C_int{6, 3}}, + {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.ncpuonline", []_C_int{6, 25}}, + {"hw.pagesize", []_C_int{6, 7}}, + {"hw.perfpolicy", []_C_int{6, 23}}, + {"hw.physmem", []_C_int{6, 19}}, + {"hw.product", []_C_int{6, 15}}, + {"hw.serialno", []_C_int{6, 17}}, + {"hw.setperf", []_C_int{6, 13}}, + {"hw.smt", []_C_int{6, 24}}, + {"hw.usermem", []_C_int{6, 20}}, + {"hw.uuid", []_C_int{6, 18}}, + {"hw.vendor", []_C_int{6, 14}}, + {"hw.version", []_C_int{6, 16}}, + {"kern.allowdt", []_C_int{1, 65}}, + {"kern.allowkmem", []_C_int{1, 52}}, + {"kern.argmax", []_C_int{1, 8}}, + {"kern.audio", []_C_int{1, 84}}, + {"kern.boottime", []_C_int{1, 21}}, + {"kern.bufcachepercent", []_C_int{1, 72}}, + {"kern.ccpu", []_C_int{1, 45}}, + {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consbuf", []_C_int{1, 83}}, + {"kern.consbufsize", []_C_int{1, 82}}, + {"kern.consdev", []_C_int{1, 75}}, + {"kern.cp_time", []_C_int{1, 40}}, + {"kern.cp_time2", []_C_int{1, 71}}, + {"kern.cpustats", []_C_int{1, 85}}, + {"kern.domainname", []_C_int{1, 22}}, + {"kern.file", []_C_int{1, 73}}, + {"kern.forkstat", []_C_int{1, 42}}, + {"kern.fscale", []_C_int{1, 46}}, + {"kern.fsync", []_C_int{1, 33}}, + {"kern.global_ptrace", []_C_int{1, 81}}, + {"kern.hostid", []_C_int{1, 11}}, + {"kern.hostname", []_C_int{1, 10}}, + {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, + {"kern.job_control", []_C_int{1, 19}}, + {"kern.malloc.buckets", []_C_int{1, 39, 1}}, + {"kern.malloc.kmemnames", []_C_int{1, 39, 3}}, + {"kern.maxclusters", []_C_int{1, 67}}, + {"kern.maxfiles", []_C_int{1, 7}}, + {"kern.maxlocksperuid", []_C_int{1, 70}}, + {"kern.maxpartitions", []_C_int{1, 23}}, + {"kern.maxproc", []_C_int{1, 6}}, + {"kern.maxthread", []_C_int{1, 25}}, + {"kern.maxvnodes", []_C_int{1, 5}}, + {"kern.mbstat", []_C_int{1, 59}}, + {"kern.msgbuf", []_C_int{1, 48}}, + {"kern.msgbufsize", []_C_int{1, 38}}, + {"kern.nchstats", []_C_int{1, 41}}, + {"kern.netlivelocks", []_C_int{1, 76}}, + {"kern.nfiles", []_C_int{1, 56}}, + {"kern.ngroups", []_C_int{1, 18}}, + {"kern.nosuidcoredump", []_C_int{1, 32}}, + {"kern.nprocs", []_C_int{1, 47}}, + {"kern.nselcoll", []_C_int{1, 43}}, + {"kern.nthreads", []_C_int{1, 26}}, + {"kern.numvnodes", []_C_int{1, 58}}, + {"kern.osrelease", []_C_int{1, 2}}, + {"kern.osrevision", []_C_int{1, 3}}, + {"kern.ostype", []_C_int{1, 1}}, + {"kern.osversion", []_C_int{1, 27}}, + {"kern.pfstatus", []_C_int{1, 86}}, + {"kern.pool_debug", []_C_int{1, 77}}, + {"kern.posix1version", []_C_int{1, 17}}, + {"kern.proc", []_C_int{1, 66}}, + {"kern.rawpartition", []_C_int{1, 24}}, + {"kern.saved_ids", []_C_int{1, 20}}, + {"kern.securelevel", []_C_int{1, 9}}, + {"kern.seminfo", []_C_int{1, 61}}, + {"kern.shminfo", []_C_int{1, 62}}, + {"kern.somaxconn", []_C_int{1, 28}}, + {"kern.sominconn", []_C_int{1, 29}}, + {"kern.splassert", []_C_int{1, 54}}, + {"kern.stackgap_random", []_C_int{1, 50}}, + {"kern.sysvipc_info", []_C_int{1, 51}}, + {"kern.sysvmsg", []_C_int{1, 34}}, + {"kern.sysvsem", []_C_int{1, 35}}, + {"kern.sysvshm", []_C_int{1, 36}}, + {"kern.timecounter.choice", []_C_int{1, 69, 4}}, + {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, + {"kern.timecounter.tick", []_C_int{1, 69, 1}}, + {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.timeout_stats", []_C_int{1, 87}}, + {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, + {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, + {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, + {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, + {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, + {"kern.ttycount", []_C_int{1, 57}}, + {"kern.utc_offset", []_C_int{1, 88}}, + {"kern.version", []_C_int{1, 4}}, + {"kern.watchdog.auto", []_C_int{1, 64, 2}}, + {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"kern.witnesswatch", []_C_int{1, 53}}, + {"kern.wxabort", []_C_int{1, 74}}, + {"net.bpf.bufsize", []_C_int{4, 31, 1}}, + {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, + {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, + {"net.inet.ah.stats", []_C_int{4, 2, 51, 2}}, + {"net.inet.carp.allow", []_C_int{4, 2, 112, 1}}, + {"net.inet.carp.log", []_C_int{4, 2, 112, 3}}, + {"net.inet.carp.preempt", []_C_int{4, 2, 112, 2}}, + {"net.inet.carp.stats", []_C_int{4, 2, 112, 4}}, + {"net.inet.divert.recvspace", []_C_int{4, 2, 258, 1}}, + {"net.inet.divert.sendspace", []_C_int{4, 2, 258, 2}}, + {"net.inet.divert.stats", []_C_int{4, 2, 258, 3}}, + {"net.inet.esp.enable", []_C_int{4, 2, 50, 1}}, + {"net.inet.esp.stats", []_C_int{4, 2, 50, 4}}, + {"net.inet.esp.udpencap", []_C_int{4, 2, 50, 2}}, + {"net.inet.esp.udpencap_port", []_C_int{4, 2, 50, 3}}, + {"net.inet.etherip.allow", []_C_int{4, 2, 97, 1}}, + {"net.inet.etherip.stats", []_C_int{4, 2, 97, 2}}, + {"net.inet.gre.allow", []_C_int{4, 2, 47, 1}}, + {"net.inet.gre.wccp", []_C_int{4, 2, 47, 2}}, + {"net.inet.icmp.bmcastecho", []_C_int{4, 2, 1, 2}}, + {"net.inet.icmp.errppslimit", []_C_int{4, 2, 1, 3}}, + {"net.inet.icmp.maskrepl", []_C_int{4, 2, 1, 1}}, + {"net.inet.icmp.rediraccept", []_C_int{4, 2, 1, 4}}, + {"net.inet.icmp.redirtimeout", []_C_int{4, 2, 1, 5}}, + {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, + {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, + {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpdown", []_C_int{4, 2, 0, 40}}, + {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.arptimeout", []_C_int{4, 2, 0, 39}}, + {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, + {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, + {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, + {"net.inet.ip.ifq.drops", []_C_int{4, 2, 0, 30, 3}}, + {"net.inet.ip.ifq.len", []_C_int{4, 2, 0, 30, 1}}, + {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, + {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, + {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtmfc", []_C_int{4, 2, 0, 37}}, + {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, + {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mrtvif", []_C_int{4, 2, 0, 38}}, + {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, + {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, + {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, + {"net.inet.ip.multipath", []_C_int{4, 2, 0, 32}}, + {"net.inet.ip.portfirst", []_C_int{4, 2, 0, 7}}, + {"net.inet.ip.porthifirst", []_C_int{4, 2, 0, 9}}, + {"net.inet.ip.porthilast", []_C_int{4, 2, 0, 10}}, + {"net.inet.ip.portlast", []_C_int{4, 2, 0, 8}}, + {"net.inet.ip.redirect", []_C_int{4, 2, 0, 2}}, + {"net.inet.ip.sourceroute", []_C_int{4, 2, 0, 5}}, + {"net.inet.ip.stats", []_C_int{4, 2, 0, 33}}, + {"net.inet.ip.ttl", []_C_int{4, 2, 0, 3}}, + {"net.inet.ipcomp.enable", []_C_int{4, 2, 108, 1}}, + {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, + {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, + {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, + {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, + {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, + {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, + {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, + {"net.inet.tcp.drop", []_C_int{4, 2, 6, 19}}, + {"net.inet.tcp.ecn", []_C_int{4, 2, 6, 14}}, + {"net.inet.tcp.ident", []_C_int{4, 2, 6, 9}}, + {"net.inet.tcp.keepidle", []_C_int{4, 2, 6, 3}}, + {"net.inet.tcp.keepinittime", []_C_int{4, 2, 6, 2}}, + {"net.inet.tcp.keepintvl", []_C_int{4, 2, 6, 4}}, + {"net.inet.tcp.mssdflt", []_C_int{4, 2, 6, 11}}, + {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, + {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, + {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rootonly", []_C_int{4, 2, 6, 24}}, + {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, + {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, + {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, + {"net.inet.tcp.slowhz", []_C_int{4, 2, 6, 5}}, + {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, + {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, + {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.tcp.synhashsize", []_C_int{4, 2, 6, 25}}, + {"net.inet.tcp.synuselimit", []_C_int{4, 2, 6, 23}}, + {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, + {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, + {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.rootonly", []_C_int{4, 2, 17, 6}}, + {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, + {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, + {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, + {"net.inet6.divert.sendspace", []_C_int{4, 24, 86, 2}}, + {"net.inet6.divert.stats", []_C_int{4, 24, 86, 3}}, + {"net.inet6.icmp6.errppslimit", []_C_int{4, 24, 30, 14}}, + {"net.inet6.icmp6.mtudisc_hiwat", []_C_int{4, 24, 30, 16}}, + {"net.inet6.icmp6.mtudisc_lowat", []_C_int{4, 24, 30, 17}}, + {"net.inet6.icmp6.nd6_debug", []_C_int{4, 24, 30, 18}}, + {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, + {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, + {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, + {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, + {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, + {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, + {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, + {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, + {"net.inet6.ip6.defmcasthlim", []_C_int{4, 24, 17, 18}}, + {"net.inet6.ip6.forwarding", []_C_int{4, 24, 17, 1}}, + {"net.inet6.ip6.forwsrcrt", []_C_int{4, 24, 17, 5}}, + {"net.inet6.ip6.hdrnestlimit", []_C_int{4, 24, 17, 15}}, + {"net.inet6.ip6.hlim", []_C_int{4, 24, 17, 3}}, + {"net.inet6.ip6.log_interval", []_C_int{4, 24, 17, 14}}, + {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, + {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, + {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, + {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtmfc", []_C_int{4, 24, 17, 53}}, + {"net.inet6.ip6.mrtmif", []_C_int{4, 24, 17, 52}}, + {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, + {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, + {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, + {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, + {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, + {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, + {"net.inet6.ip6.soiikey", []_C_int{4, 24, 17, 54}}, + {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, + {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, + {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, + {"net.key.sadb_dump", []_C_int{4, 30, 1}}, + {"net.key.spd_dump", []_C_int{4, 30, 2}}, + {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, + {"net.mpls.ifq.drops", []_C_int{4, 33, 3, 3}}, + {"net.mpls.ifq.len", []_C_int{4, 33, 3, 1}}, + {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, + {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, + {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, + {"net.mpls.ttl", []_C_int{4, 33, 2}}, + {"net.pflow.stats", []_C_int{4, 34, 1}}, + {"net.pipex.enable", []_C_int{4, 35, 1}}, + {"vm.anonmin", []_C_int{2, 7}}, + {"vm.loadavg", []_C_int{2, 2}}, + {"vm.malloc_conf", []_C_int{2, 12}}, + {"vm.maxslp", []_C_int{2, 10}}, + {"vm.nkmempages", []_C_int{2, 6}}, + {"vm.psstrings", []_C_int{2, 3}}, + {"vm.swapencrypt.enable", []_C_int{2, 5, 0}}, + {"vm.swapencrypt.keyscreated", []_C_int{2, 5, 1}}, + {"vm.swapencrypt.keysdeleted", []_C_int{2, 5, 2}}, + {"vm.uspace", []_C_int{2, 11}}, + {"vm.uvmexp", []_C_int{2, 4}}, + {"vm.vmmeter", []_C_int{2, 1}}, + {"vm.vnodemin", []_C_int{2, 9}}, + {"vm.vtextmin", []_C_int{2, 8}}, +} diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go deleted file mode 100644 index f33614532f9..00000000000 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go +++ /dev/null @@ -1,436 +0,0 @@ -// go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.13.sdk/usr/include/sys/syscall.h -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build 386,darwin - -package unix - -const ( - SYS_SYSCALL = 0 - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_WAIT4 = 7 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_CHDIR = 12 - SYS_FCHDIR = 13 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_CHOWN = 16 - SYS_GETFSSTAT = 18 - SYS_GETPID = 20 - SYS_SETUID = 23 - SYS_GETUID = 24 - SYS_GETEUID = 25 - SYS_PTRACE = 26 - SYS_RECVMSG = 27 - SYS_SENDMSG = 28 - SYS_RECVFROM = 29 - SYS_ACCEPT = 30 - SYS_GETPEERNAME = 31 - SYS_GETSOCKNAME = 32 - SYS_ACCESS = 33 - SYS_CHFLAGS = 34 - SYS_FCHFLAGS = 35 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_GETPPID = 39 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_GETEGID = 43 - SYS_SIGACTION = 46 - SYS_GETGID = 47 - SYS_SIGPROCMASK = 48 - SYS_GETLOGIN = 49 - SYS_SETLOGIN = 50 - SYS_ACCT = 51 - SYS_SIGPENDING = 52 - SYS_SIGALTSTACK = 53 - SYS_IOCTL = 54 - SYS_REBOOT = 55 - SYS_REVOKE = 56 - SYS_SYMLINK = 57 - SYS_READLINK = 58 - SYS_EXECVE = 59 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_MSYNC = 65 - SYS_VFORK = 66 - SYS_MUNMAP = 73 - SYS_MPROTECT = 74 - SYS_MADVISE = 75 - SYS_MINCORE = 78 - SYS_GETGROUPS = 79 - SYS_SETGROUPS = 80 - SYS_GETPGRP = 81 - SYS_SETPGID = 82 - SYS_SETITIMER = 83 - SYS_SWAPON = 85 - SYS_GETITIMER = 86 - SYS_GETDTABLESIZE = 89 - SYS_DUP2 = 90 - SYS_FCNTL = 92 - SYS_SELECT = 93 - SYS_FSYNC = 95 - SYS_SETPRIORITY = 96 - SYS_SOCKET = 97 - SYS_CONNECT = 98 - SYS_GETPRIORITY = 100 - SYS_BIND = 104 - SYS_SETSOCKOPT = 105 - SYS_LISTEN = 106 - SYS_SIGSUSPEND = 111 - SYS_GETTIMEOFDAY = 116 - SYS_GETRUSAGE = 117 - SYS_GETSOCKOPT = 118 - SYS_READV = 120 - SYS_WRITEV = 121 - SYS_SETTIMEOFDAY = 122 - SYS_FCHOWN = 123 - SYS_FCHMOD = 124 - SYS_SETREUID = 126 - SYS_SETREGID = 127 - SYS_RENAME = 128 - SYS_FLOCK = 131 - SYS_MKFIFO = 132 - SYS_SENDTO = 133 - SYS_SHUTDOWN = 134 - SYS_SOCKETPAIR = 135 - SYS_MKDIR = 136 - SYS_RMDIR = 137 - SYS_UTIMES = 138 - SYS_FUTIMES = 139 - SYS_ADJTIME = 140 - SYS_GETHOSTUUID = 142 - SYS_SETSID = 147 - SYS_GETPGID = 151 - SYS_SETPRIVEXEC = 152 - SYS_PREAD = 153 - SYS_PWRITE = 154 - SYS_NFSSVC = 155 - SYS_STATFS = 157 - SYS_FSTATFS = 158 - SYS_UNMOUNT = 159 - SYS_GETFH = 161 - SYS_QUOTACTL = 165 - SYS_MOUNT = 167 - SYS_CSOPS = 169 - SYS_CSOPS_AUDITTOKEN = 170 - SYS_WAITID = 173 - SYS_KDEBUG_TYPEFILTER = 177 - SYS_KDEBUG_TRACE_STRING = 178 - SYS_KDEBUG_TRACE64 = 179 - SYS_KDEBUG_TRACE = 180 - SYS_SETGID = 181 - SYS_SETEGID = 182 - SYS_SETEUID = 183 - SYS_SIGRETURN = 184 - SYS_THREAD_SELFCOUNTS = 186 - SYS_FDATASYNC = 187 - SYS_STAT = 188 - SYS_FSTAT = 189 - SYS_LSTAT = 190 - SYS_PATHCONF = 191 - SYS_FPATHCONF = 192 - SYS_GETRLIMIT = 194 - SYS_SETRLIMIT = 195 - SYS_GETDIRENTRIES = 196 - SYS_MMAP = 197 - SYS_LSEEK = 199 - SYS_TRUNCATE = 200 - SYS_FTRUNCATE = 201 - SYS_SYSCTL = 202 - SYS_MLOCK = 203 - SYS_MUNLOCK = 204 - SYS_UNDELETE = 205 - SYS_OPEN_DPROTECTED_NP = 216 - SYS_GETATTRLIST = 220 - SYS_SETATTRLIST = 221 - SYS_GETDIRENTRIESATTR = 222 - SYS_EXCHANGEDATA = 223 - SYS_SEARCHFS = 225 - SYS_DELETE = 226 - SYS_COPYFILE = 227 - SYS_FGETATTRLIST = 228 - SYS_FSETATTRLIST = 229 - SYS_POLL = 230 - SYS_WATCHEVENT = 231 - SYS_WAITEVENT = 232 - SYS_MODWATCH = 233 - SYS_GETXATTR = 234 - SYS_FGETXATTR = 235 - SYS_SETXATTR = 236 - SYS_FSETXATTR = 237 - SYS_REMOVEXATTR = 238 - SYS_FREMOVEXATTR = 239 - SYS_LISTXATTR = 240 - SYS_FLISTXATTR = 241 - SYS_FSCTL = 242 - SYS_INITGROUPS = 243 - SYS_POSIX_SPAWN = 244 - SYS_FFSCTL = 245 - SYS_NFSCLNT = 247 - SYS_FHOPEN = 248 - SYS_MINHERIT = 250 - SYS_SEMSYS = 251 - SYS_MSGSYS = 252 - SYS_SHMSYS = 253 - SYS_SEMCTL = 254 - SYS_SEMGET = 255 - SYS_SEMOP = 256 - SYS_MSGCTL = 258 - SYS_MSGGET = 259 - SYS_MSGSND = 260 - SYS_MSGRCV = 261 - SYS_SHMAT = 262 - SYS_SHMCTL = 263 - SYS_SHMDT = 264 - SYS_SHMGET = 265 - SYS_SHM_OPEN = 266 - SYS_SHM_UNLINK = 267 - SYS_SEM_OPEN = 268 - SYS_SEM_CLOSE = 269 - SYS_SEM_UNLINK = 270 - SYS_SEM_WAIT = 271 - SYS_SEM_TRYWAIT = 272 - SYS_SEM_POST = 273 - SYS_SYSCTLBYNAME = 274 - SYS_OPEN_EXTENDED = 277 - SYS_UMASK_EXTENDED = 278 - SYS_STAT_EXTENDED = 279 - SYS_LSTAT_EXTENDED = 280 - SYS_FSTAT_EXTENDED = 281 - SYS_CHMOD_EXTENDED = 282 - SYS_FCHMOD_EXTENDED = 283 - SYS_ACCESS_EXTENDED = 284 - SYS_SETTID = 285 - SYS_GETTID = 286 - SYS_SETSGROUPS = 287 - SYS_GETSGROUPS = 288 - SYS_SETWGROUPS = 289 - SYS_GETWGROUPS = 290 - SYS_MKFIFO_EXTENDED = 291 - SYS_MKDIR_EXTENDED = 292 - SYS_IDENTITYSVC = 293 - SYS_SHARED_REGION_CHECK_NP = 294 - SYS_VM_PRESSURE_MONITOR = 296 - SYS_PSYNCH_RW_LONGRDLOCK = 297 - SYS_PSYNCH_RW_YIELDWRLOCK = 298 - SYS_PSYNCH_RW_DOWNGRADE = 299 - SYS_PSYNCH_RW_UPGRADE = 300 - SYS_PSYNCH_MUTEXWAIT = 301 - SYS_PSYNCH_MUTEXDROP = 302 - SYS_PSYNCH_CVBROAD = 303 - SYS_PSYNCH_CVSIGNAL = 304 - SYS_PSYNCH_CVWAIT = 305 - SYS_PSYNCH_RW_RDLOCK = 306 - SYS_PSYNCH_RW_WRLOCK = 307 - SYS_PSYNCH_RW_UNLOCK = 308 - SYS_PSYNCH_RW_UNLOCK2 = 309 - SYS_GETSID = 310 - SYS_SETTID_WITH_PID = 311 - SYS_PSYNCH_CVCLRPREPOST = 312 - SYS_AIO_FSYNC = 313 - SYS_AIO_RETURN = 314 - SYS_AIO_SUSPEND = 315 - SYS_AIO_CANCEL = 316 - SYS_AIO_ERROR = 317 - SYS_AIO_READ = 318 - SYS_AIO_WRITE = 319 - SYS_LIO_LISTIO = 320 - SYS_IOPOLICYSYS = 322 - SYS_PROCESS_POLICY = 323 - SYS_MLOCKALL = 324 - SYS_MUNLOCKALL = 325 - SYS_ISSETUGID = 327 - SYS___PTHREAD_KILL = 328 - SYS___PTHREAD_SIGMASK = 329 - SYS___SIGWAIT = 330 - SYS___DISABLE_THREADSIGNAL = 331 - SYS___PTHREAD_MARKCANCEL = 332 - SYS___PTHREAD_CANCELED = 333 - SYS___SEMWAIT_SIGNAL = 334 - SYS_PROC_INFO = 336 - SYS_SENDFILE = 337 - SYS_STAT64 = 338 - SYS_FSTAT64 = 339 - SYS_LSTAT64 = 340 - SYS_STAT64_EXTENDED = 341 - SYS_LSTAT64_EXTENDED = 342 - SYS_FSTAT64_EXTENDED = 343 - SYS_GETDIRENTRIES64 = 344 - SYS_STATFS64 = 345 - SYS_FSTATFS64 = 346 - SYS_GETFSSTAT64 = 347 - SYS___PTHREAD_CHDIR = 348 - SYS___PTHREAD_FCHDIR = 349 - SYS_AUDIT = 350 - SYS_AUDITON = 351 - SYS_GETAUID = 353 - SYS_SETAUID = 354 - SYS_GETAUDIT_ADDR = 357 - SYS_SETAUDIT_ADDR = 358 - SYS_AUDITCTL = 359 - SYS_BSDTHREAD_CREATE = 360 - SYS_BSDTHREAD_TERMINATE = 361 - SYS_KQUEUE = 362 - SYS_KEVENT = 363 - SYS_LCHOWN = 364 - SYS_BSDTHREAD_REGISTER = 366 - SYS_WORKQ_OPEN = 367 - SYS_WORKQ_KERNRETURN = 368 - SYS_KEVENT64 = 369 - SYS___OLD_SEMWAIT_SIGNAL = 370 - SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371 - SYS_THREAD_SELFID = 372 - SYS_LEDGER = 373 - SYS_KEVENT_QOS = 374 - SYS_KEVENT_ID = 375 - SYS___MAC_EXECVE = 380 - SYS___MAC_SYSCALL = 381 - SYS___MAC_GET_FILE = 382 - SYS___MAC_SET_FILE = 383 - SYS___MAC_GET_LINK = 384 - SYS___MAC_SET_LINK = 385 - SYS___MAC_GET_PROC = 386 - SYS___MAC_SET_PROC = 387 - SYS___MAC_GET_FD = 388 - SYS___MAC_SET_FD = 389 - SYS___MAC_GET_PID = 390 - SYS_PSELECT = 394 - SYS_PSELECT_NOCANCEL = 395 - SYS_READ_NOCANCEL = 396 - SYS_WRITE_NOCANCEL = 397 - SYS_OPEN_NOCANCEL = 398 - SYS_CLOSE_NOCANCEL = 399 - SYS_WAIT4_NOCANCEL = 400 - SYS_RECVMSG_NOCANCEL = 401 - SYS_SENDMSG_NOCANCEL = 402 - SYS_RECVFROM_NOCANCEL = 403 - SYS_ACCEPT_NOCANCEL = 404 - SYS_MSYNC_NOCANCEL = 405 - SYS_FCNTL_NOCANCEL = 406 - SYS_SELECT_NOCANCEL = 407 - SYS_FSYNC_NOCANCEL = 408 - SYS_CONNECT_NOCANCEL = 409 - SYS_SIGSUSPEND_NOCANCEL = 410 - SYS_READV_NOCANCEL = 411 - SYS_WRITEV_NOCANCEL = 412 - SYS_SENDTO_NOCANCEL = 413 - SYS_PREAD_NOCANCEL = 414 - SYS_PWRITE_NOCANCEL = 415 - SYS_WAITID_NOCANCEL = 416 - SYS_POLL_NOCANCEL = 417 - SYS_MSGSND_NOCANCEL = 418 - SYS_MSGRCV_NOCANCEL = 419 - SYS_SEM_WAIT_NOCANCEL = 420 - SYS_AIO_SUSPEND_NOCANCEL = 421 - SYS___SIGWAIT_NOCANCEL = 422 - SYS___SEMWAIT_SIGNAL_NOCANCEL = 423 - SYS___MAC_MOUNT = 424 - SYS___MAC_GET_MOUNT = 425 - SYS___MAC_GETFSSTAT = 426 - SYS_FSGETPATH = 427 - SYS_AUDIT_SESSION_SELF = 428 - SYS_AUDIT_SESSION_JOIN = 429 - SYS_FILEPORT_MAKEPORT = 430 - SYS_FILEPORT_MAKEFD = 431 - SYS_AUDIT_SESSION_PORT = 432 - SYS_PID_SUSPEND = 433 - SYS_PID_RESUME = 434 - SYS_PID_HIBERNATE = 435 - SYS_PID_SHUTDOWN_SOCKETS = 436 - SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438 - SYS_KAS_INFO = 439 - SYS_MEMORYSTATUS_CONTROL = 440 - SYS_GUARDED_OPEN_NP = 441 - SYS_GUARDED_CLOSE_NP = 442 - SYS_GUARDED_KQUEUE_NP = 443 - SYS_CHANGE_FDGUARD_NP = 444 - SYS_USRCTL = 445 - SYS_PROC_RLIMIT_CONTROL = 446 - SYS_CONNECTX = 447 - SYS_DISCONNECTX = 448 - SYS_PEELOFF = 449 - SYS_SOCKET_DELEGATE = 450 - SYS_TELEMETRY = 451 - SYS_PROC_UUID_POLICY = 452 - SYS_MEMORYSTATUS_GET_LEVEL = 453 - SYS_SYSTEM_OVERRIDE = 454 - SYS_VFS_PURGE = 455 - SYS_SFI_CTL = 456 - SYS_SFI_PIDCTL = 457 - SYS_COALITION = 458 - SYS_COALITION_INFO = 459 - SYS_NECP_MATCH_POLICY = 460 - SYS_GETATTRLISTBULK = 461 - SYS_CLONEFILEAT = 462 - SYS_OPENAT = 463 - SYS_OPENAT_NOCANCEL = 464 - SYS_RENAMEAT = 465 - SYS_FACCESSAT = 466 - SYS_FCHMODAT = 467 - SYS_FCHOWNAT = 468 - SYS_FSTATAT = 469 - SYS_FSTATAT64 = 470 - SYS_LINKAT = 471 - SYS_UNLINKAT = 472 - SYS_READLINKAT = 473 - SYS_SYMLINKAT = 474 - SYS_MKDIRAT = 475 - SYS_GETATTRLISTAT = 476 - SYS_PROC_TRACE_LOG = 477 - SYS_BSDTHREAD_CTL = 478 - SYS_OPENBYID_NP = 479 - SYS_RECVMSG_X = 480 - SYS_SENDMSG_X = 481 - SYS_THREAD_SELFUSAGE = 482 - SYS_CSRCTL = 483 - SYS_GUARDED_OPEN_DPROTECTED_NP = 484 - SYS_GUARDED_WRITE_NP = 485 - SYS_GUARDED_PWRITE_NP = 486 - SYS_GUARDED_WRITEV_NP = 487 - SYS_RENAMEATX_NP = 488 - SYS_MREMAP_ENCRYPTED = 489 - SYS_NETAGENT_TRIGGER = 490 - SYS_STACK_SNAPSHOT_WITH_CONFIG = 491 - SYS_MICROSTACKSHOT = 492 - SYS_GRAB_PGO_DATA = 493 - SYS_PERSONA = 494 - SYS_WORK_INTERVAL_CTL = 499 - SYS_GETENTROPY = 500 - SYS_NECP_OPEN = 501 - SYS_NECP_CLIENT_ACTION = 502 - SYS___NEXUS_OPEN = 503 - SYS___NEXUS_REGISTER = 504 - SYS___NEXUS_DEREGISTER = 505 - SYS___NEXUS_CREATE = 506 - SYS___NEXUS_DESTROY = 507 - SYS___NEXUS_GET_OPT = 508 - SYS___NEXUS_SET_OPT = 509 - SYS___CHANNEL_OPEN = 510 - SYS___CHANNEL_GET_INFO = 511 - SYS___CHANNEL_SYNC = 512 - SYS___CHANNEL_GET_OPT = 513 - SYS___CHANNEL_SET_OPT = 514 - SYS_ULOCK_WAIT = 515 - SYS_ULOCK_WAKE = 516 - SYS_FCLONEFILEAT = 517 - SYS_FS_SNAPSHOT = 518 - SYS_TERMINATE_WITH_PAYLOAD = 520 - SYS_ABORT_WITH_PAYLOAD = 521 - SYS_NECP_SESSION_OPEN = 522 - SYS_NECP_SESSION_ACTION = 523 - SYS_SETATTRLISTAT = 524 - SYS_NET_QOS_GUIDELINE = 525 - SYS_FMOUNT = 526 - SYS_NTP_ADJTIME = 527 - SYS_NTP_GETTIME = 528 - SYS_OS_FAULT_WITH_PAYLOAD = 529 - SYS_MAXSYSCALL = 530 - SYS_INVALID = 63 -) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go deleted file mode 100644 index 654dd3da3be..00000000000 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go +++ /dev/null @@ -1,438 +0,0 @@ -// go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.14.sdk/usr/include/sys/syscall.h -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build amd64,darwin - -package unix - -const ( - SYS_SYSCALL = 0 - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_WAIT4 = 7 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_CHDIR = 12 - SYS_FCHDIR = 13 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_CHOWN = 16 - SYS_GETFSSTAT = 18 - SYS_GETPID = 20 - SYS_SETUID = 23 - SYS_GETUID = 24 - SYS_GETEUID = 25 - SYS_PTRACE = 26 - SYS_RECVMSG = 27 - SYS_SENDMSG = 28 - SYS_RECVFROM = 29 - SYS_ACCEPT = 30 - SYS_GETPEERNAME = 31 - SYS_GETSOCKNAME = 32 - SYS_ACCESS = 33 - SYS_CHFLAGS = 34 - SYS_FCHFLAGS = 35 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_GETPPID = 39 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_GETEGID = 43 - SYS_SIGACTION = 46 - SYS_GETGID = 47 - SYS_SIGPROCMASK = 48 - SYS_GETLOGIN = 49 - SYS_SETLOGIN = 50 - SYS_ACCT = 51 - SYS_SIGPENDING = 52 - SYS_SIGALTSTACK = 53 - SYS_IOCTL = 54 - SYS_REBOOT = 55 - SYS_REVOKE = 56 - SYS_SYMLINK = 57 - SYS_READLINK = 58 - SYS_EXECVE = 59 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_MSYNC = 65 - SYS_VFORK = 66 - SYS_MUNMAP = 73 - SYS_MPROTECT = 74 - SYS_MADVISE = 75 - SYS_MINCORE = 78 - SYS_GETGROUPS = 79 - SYS_SETGROUPS = 80 - SYS_GETPGRP = 81 - SYS_SETPGID = 82 - SYS_SETITIMER = 83 - SYS_SWAPON = 85 - SYS_GETITIMER = 86 - SYS_GETDTABLESIZE = 89 - SYS_DUP2 = 90 - SYS_FCNTL = 92 - SYS_SELECT = 93 - SYS_FSYNC = 95 - SYS_SETPRIORITY = 96 - SYS_SOCKET = 97 - SYS_CONNECT = 98 - SYS_GETPRIORITY = 100 - SYS_BIND = 104 - SYS_SETSOCKOPT = 105 - SYS_LISTEN = 106 - SYS_SIGSUSPEND = 111 - SYS_GETTIMEOFDAY = 116 - SYS_GETRUSAGE = 117 - SYS_GETSOCKOPT = 118 - SYS_READV = 120 - SYS_WRITEV = 121 - SYS_SETTIMEOFDAY = 122 - SYS_FCHOWN = 123 - SYS_FCHMOD = 124 - SYS_SETREUID = 126 - SYS_SETREGID = 127 - SYS_RENAME = 128 - SYS_FLOCK = 131 - SYS_MKFIFO = 132 - SYS_SENDTO = 133 - SYS_SHUTDOWN = 134 - SYS_SOCKETPAIR = 135 - SYS_MKDIR = 136 - SYS_RMDIR = 137 - SYS_UTIMES = 138 - SYS_FUTIMES = 139 - SYS_ADJTIME = 140 - SYS_GETHOSTUUID = 142 - SYS_SETSID = 147 - SYS_GETPGID = 151 - SYS_SETPRIVEXEC = 152 - SYS_PREAD = 153 - SYS_PWRITE = 154 - SYS_NFSSVC = 155 - SYS_STATFS = 157 - SYS_FSTATFS = 158 - SYS_UNMOUNT = 159 - SYS_GETFH = 161 - SYS_QUOTACTL = 165 - SYS_MOUNT = 167 - SYS_CSOPS = 169 - SYS_CSOPS_AUDITTOKEN = 170 - SYS_WAITID = 173 - SYS_KDEBUG_TYPEFILTER = 177 - SYS_KDEBUG_TRACE_STRING = 178 - SYS_KDEBUG_TRACE64 = 179 - SYS_KDEBUG_TRACE = 180 - SYS_SETGID = 181 - SYS_SETEGID = 182 - SYS_SETEUID = 183 - SYS_SIGRETURN = 184 - SYS_THREAD_SELFCOUNTS = 186 - SYS_FDATASYNC = 187 - SYS_STAT = 188 - SYS_FSTAT = 189 - SYS_LSTAT = 190 - SYS_PATHCONF = 191 - SYS_FPATHCONF = 192 - SYS_GETRLIMIT = 194 - SYS_SETRLIMIT = 195 - SYS_GETDIRENTRIES = 196 - SYS_MMAP = 197 - SYS_LSEEK = 199 - SYS_TRUNCATE = 200 - SYS_FTRUNCATE = 201 - SYS_SYSCTL = 202 - SYS_MLOCK = 203 - SYS_MUNLOCK = 204 - SYS_UNDELETE = 205 - SYS_OPEN_DPROTECTED_NP = 216 - SYS_GETATTRLIST = 220 - SYS_SETATTRLIST = 221 - SYS_GETDIRENTRIESATTR = 222 - SYS_EXCHANGEDATA = 223 - SYS_SEARCHFS = 225 - SYS_DELETE = 226 - SYS_COPYFILE = 227 - SYS_FGETATTRLIST = 228 - SYS_FSETATTRLIST = 229 - SYS_POLL = 230 - SYS_WATCHEVENT = 231 - SYS_WAITEVENT = 232 - SYS_MODWATCH = 233 - SYS_GETXATTR = 234 - SYS_FGETXATTR = 235 - SYS_SETXATTR = 236 - SYS_FSETXATTR = 237 - SYS_REMOVEXATTR = 238 - SYS_FREMOVEXATTR = 239 - SYS_LISTXATTR = 240 - SYS_FLISTXATTR = 241 - SYS_FSCTL = 242 - SYS_INITGROUPS = 243 - SYS_POSIX_SPAWN = 244 - SYS_FFSCTL = 245 - SYS_NFSCLNT = 247 - SYS_FHOPEN = 248 - SYS_MINHERIT = 250 - SYS_SEMSYS = 251 - SYS_MSGSYS = 252 - SYS_SHMSYS = 253 - SYS_SEMCTL = 254 - SYS_SEMGET = 255 - SYS_SEMOP = 256 - SYS_MSGCTL = 258 - SYS_MSGGET = 259 - SYS_MSGSND = 260 - SYS_MSGRCV = 261 - SYS_SHMAT = 262 - SYS_SHMCTL = 263 - SYS_SHMDT = 264 - SYS_SHMGET = 265 - SYS_SHM_OPEN = 266 - SYS_SHM_UNLINK = 267 - SYS_SEM_OPEN = 268 - SYS_SEM_CLOSE = 269 - SYS_SEM_UNLINK = 270 - SYS_SEM_WAIT = 271 - SYS_SEM_TRYWAIT = 272 - SYS_SEM_POST = 273 - SYS_SYSCTLBYNAME = 274 - SYS_OPEN_EXTENDED = 277 - SYS_UMASK_EXTENDED = 278 - SYS_STAT_EXTENDED = 279 - SYS_LSTAT_EXTENDED = 280 - SYS_FSTAT_EXTENDED = 281 - SYS_CHMOD_EXTENDED = 282 - SYS_FCHMOD_EXTENDED = 283 - SYS_ACCESS_EXTENDED = 284 - SYS_SETTID = 285 - SYS_GETTID = 286 - SYS_SETSGROUPS = 287 - SYS_GETSGROUPS = 288 - SYS_SETWGROUPS = 289 - SYS_GETWGROUPS = 290 - SYS_MKFIFO_EXTENDED = 291 - SYS_MKDIR_EXTENDED = 292 - SYS_IDENTITYSVC = 293 - SYS_SHARED_REGION_CHECK_NP = 294 - SYS_VM_PRESSURE_MONITOR = 296 - SYS_PSYNCH_RW_LONGRDLOCK = 297 - SYS_PSYNCH_RW_YIELDWRLOCK = 298 - SYS_PSYNCH_RW_DOWNGRADE = 299 - SYS_PSYNCH_RW_UPGRADE = 300 - SYS_PSYNCH_MUTEXWAIT = 301 - SYS_PSYNCH_MUTEXDROP = 302 - SYS_PSYNCH_CVBROAD = 303 - SYS_PSYNCH_CVSIGNAL = 304 - SYS_PSYNCH_CVWAIT = 305 - SYS_PSYNCH_RW_RDLOCK = 306 - SYS_PSYNCH_RW_WRLOCK = 307 - SYS_PSYNCH_RW_UNLOCK = 308 - SYS_PSYNCH_RW_UNLOCK2 = 309 - SYS_GETSID = 310 - SYS_SETTID_WITH_PID = 311 - SYS_PSYNCH_CVCLRPREPOST = 312 - SYS_AIO_FSYNC = 313 - SYS_AIO_RETURN = 314 - SYS_AIO_SUSPEND = 315 - SYS_AIO_CANCEL = 316 - SYS_AIO_ERROR = 317 - SYS_AIO_READ = 318 - SYS_AIO_WRITE = 319 - SYS_LIO_LISTIO = 320 - SYS_IOPOLICYSYS = 322 - SYS_PROCESS_POLICY = 323 - SYS_MLOCKALL = 324 - SYS_MUNLOCKALL = 325 - SYS_ISSETUGID = 327 - SYS___PTHREAD_KILL = 328 - SYS___PTHREAD_SIGMASK = 329 - SYS___SIGWAIT = 330 - SYS___DISABLE_THREADSIGNAL = 331 - SYS___PTHREAD_MARKCANCEL = 332 - SYS___PTHREAD_CANCELED = 333 - SYS___SEMWAIT_SIGNAL = 334 - SYS_PROC_INFO = 336 - SYS_SENDFILE = 337 - SYS_STAT64 = 338 - SYS_FSTAT64 = 339 - SYS_LSTAT64 = 340 - SYS_STAT64_EXTENDED = 341 - SYS_LSTAT64_EXTENDED = 342 - SYS_FSTAT64_EXTENDED = 343 - SYS_GETDIRENTRIES64 = 344 - SYS_STATFS64 = 345 - SYS_FSTATFS64 = 346 - SYS_GETFSSTAT64 = 347 - SYS___PTHREAD_CHDIR = 348 - SYS___PTHREAD_FCHDIR = 349 - SYS_AUDIT = 350 - SYS_AUDITON = 351 - SYS_GETAUID = 353 - SYS_SETAUID = 354 - SYS_GETAUDIT_ADDR = 357 - SYS_SETAUDIT_ADDR = 358 - SYS_AUDITCTL = 359 - SYS_BSDTHREAD_CREATE = 360 - SYS_BSDTHREAD_TERMINATE = 361 - SYS_KQUEUE = 362 - SYS_KEVENT = 363 - SYS_LCHOWN = 364 - SYS_BSDTHREAD_REGISTER = 366 - SYS_WORKQ_OPEN = 367 - SYS_WORKQ_KERNRETURN = 368 - SYS_KEVENT64 = 369 - SYS___OLD_SEMWAIT_SIGNAL = 370 - SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371 - SYS_THREAD_SELFID = 372 - SYS_LEDGER = 373 - SYS_KEVENT_QOS = 374 - SYS_KEVENT_ID = 375 - SYS___MAC_EXECVE = 380 - SYS___MAC_SYSCALL = 381 - SYS___MAC_GET_FILE = 382 - SYS___MAC_SET_FILE = 383 - SYS___MAC_GET_LINK = 384 - SYS___MAC_SET_LINK = 385 - SYS___MAC_GET_PROC = 386 - SYS___MAC_SET_PROC = 387 - SYS___MAC_GET_FD = 388 - SYS___MAC_SET_FD = 389 - SYS___MAC_GET_PID = 390 - SYS_PSELECT = 394 - SYS_PSELECT_NOCANCEL = 395 - SYS_READ_NOCANCEL = 396 - SYS_WRITE_NOCANCEL = 397 - SYS_OPEN_NOCANCEL = 398 - SYS_CLOSE_NOCANCEL = 399 - SYS_WAIT4_NOCANCEL = 400 - SYS_RECVMSG_NOCANCEL = 401 - SYS_SENDMSG_NOCANCEL = 402 - SYS_RECVFROM_NOCANCEL = 403 - SYS_ACCEPT_NOCANCEL = 404 - SYS_MSYNC_NOCANCEL = 405 - SYS_FCNTL_NOCANCEL = 406 - SYS_SELECT_NOCANCEL = 407 - SYS_FSYNC_NOCANCEL = 408 - SYS_CONNECT_NOCANCEL = 409 - SYS_SIGSUSPEND_NOCANCEL = 410 - SYS_READV_NOCANCEL = 411 - SYS_WRITEV_NOCANCEL = 412 - SYS_SENDTO_NOCANCEL = 413 - SYS_PREAD_NOCANCEL = 414 - SYS_PWRITE_NOCANCEL = 415 - SYS_WAITID_NOCANCEL = 416 - SYS_POLL_NOCANCEL = 417 - SYS_MSGSND_NOCANCEL = 418 - SYS_MSGRCV_NOCANCEL = 419 - SYS_SEM_WAIT_NOCANCEL = 420 - SYS_AIO_SUSPEND_NOCANCEL = 421 - SYS___SIGWAIT_NOCANCEL = 422 - SYS___SEMWAIT_SIGNAL_NOCANCEL = 423 - SYS___MAC_MOUNT = 424 - SYS___MAC_GET_MOUNT = 425 - SYS___MAC_GETFSSTAT = 426 - SYS_FSGETPATH = 427 - SYS_AUDIT_SESSION_SELF = 428 - SYS_AUDIT_SESSION_JOIN = 429 - SYS_FILEPORT_MAKEPORT = 430 - SYS_FILEPORT_MAKEFD = 431 - SYS_AUDIT_SESSION_PORT = 432 - SYS_PID_SUSPEND = 433 - SYS_PID_RESUME = 434 - SYS_PID_HIBERNATE = 435 - SYS_PID_SHUTDOWN_SOCKETS = 436 - SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438 - SYS_KAS_INFO = 439 - SYS_MEMORYSTATUS_CONTROL = 440 - SYS_GUARDED_OPEN_NP = 441 - SYS_GUARDED_CLOSE_NP = 442 - SYS_GUARDED_KQUEUE_NP = 443 - SYS_CHANGE_FDGUARD_NP = 444 - SYS_USRCTL = 445 - SYS_PROC_RLIMIT_CONTROL = 446 - SYS_CONNECTX = 447 - SYS_DISCONNECTX = 448 - SYS_PEELOFF = 449 - SYS_SOCKET_DELEGATE = 450 - SYS_TELEMETRY = 451 - SYS_PROC_UUID_POLICY = 452 - SYS_MEMORYSTATUS_GET_LEVEL = 453 - SYS_SYSTEM_OVERRIDE = 454 - SYS_VFS_PURGE = 455 - SYS_SFI_CTL = 456 - SYS_SFI_PIDCTL = 457 - SYS_COALITION = 458 - SYS_COALITION_INFO = 459 - SYS_NECP_MATCH_POLICY = 460 - SYS_GETATTRLISTBULK = 461 - SYS_CLONEFILEAT = 462 - SYS_OPENAT = 463 - SYS_OPENAT_NOCANCEL = 464 - SYS_RENAMEAT = 465 - SYS_FACCESSAT = 466 - SYS_FCHMODAT = 467 - SYS_FCHOWNAT = 468 - SYS_FSTATAT = 469 - SYS_FSTATAT64 = 470 - SYS_LINKAT = 471 - SYS_UNLINKAT = 472 - SYS_READLINKAT = 473 - SYS_SYMLINKAT = 474 - SYS_MKDIRAT = 475 - SYS_GETATTRLISTAT = 476 - SYS_PROC_TRACE_LOG = 477 - SYS_BSDTHREAD_CTL = 478 - SYS_OPENBYID_NP = 479 - SYS_RECVMSG_X = 480 - SYS_SENDMSG_X = 481 - SYS_THREAD_SELFUSAGE = 482 - SYS_CSRCTL = 483 - SYS_GUARDED_OPEN_DPROTECTED_NP = 484 - SYS_GUARDED_WRITE_NP = 485 - SYS_GUARDED_PWRITE_NP = 486 - SYS_GUARDED_WRITEV_NP = 487 - SYS_RENAMEATX_NP = 488 - SYS_MREMAP_ENCRYPTED = 489 - SYS_NETAGENT_TRIGGER = 490 - SYS_STACK_SNAPSHOT_WITH_CONFIG = 491 - SYS_MICROSTACKSHOT = 492 - SYS_GRAB_PGO_DATA = 493 - SYS_PERSONA = 494 - SYS_WORK_INTERVAL_CTL = 499 - SYS_GETENTROPY = 500 - SYS_NECP_OPEN = 501 - SYS_NECP_CLIENT_ACTION = 502 - SYS___NEXUS_OPEN = 503 - SYS___NEXUS_REGISTER = 504 - SYS___NEXUS_DEREGISTER = 505 - SYS___NEXUS_CREATE = 506 - SYS___NEXUS_DESTROY = 507 - SYS___NEXUS_GET_OPT = 508 - SYS___NEXUS_SET_OPT = 509 - SYS___CHANNEL_OPEN = 510 - SYS___CHANNEL_GET_INFO = 511 - SYS___CHANNEL_SYNC = 512 - SYS___CHANNEL_GET_OPT = 513 - SYS___CHANNEL_SET_OPT = 514 - SYS_ULOCK_WAIT = 515 - SYS_ULOCK_WAKE = 516 - SYS_FCLONEFILEAT = 517 - SYS_FS_SNAPSHOT = 518 - SYS_TERMINATE_WITH_PAYLOAD = 520 - SYS_ABORT_WITH_PAYLOAD = 521 - SYS_NECP_SESSION_OPEN = 522 - SYS_NECP_SESSION_ACTION = 523 - SYS_SETATTRLISTAT = 524 - SYS_NET_QOS_GUIDELINE = 525 - SYS_FMOUNT = 526 - SYS_NTP_ADJTIME = 527 - SYS_NTP_GETTIME = 528 - SYS_OS_FAULT_WITH_PAYLOAD = 529 - SYS_KQUEUE_WORKLOOP_CTL = 530 - SYS___MACH_BRIDGE_REMOTE_TIME = 531 - SYS_MAXSYSCALL = 532 - SYS_INVALID = 63 -) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go deleted file mode 100644 index 103a72ed1c0..00000000000 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go +++ /dev/null @@ -1,436 +0,0 @@ -// go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS11.1.sdk/usr/include/sys/syscall.h -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build arm,darwin - -package unix - -const ( - SYS_SYSCALL = 0 - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_WAIT4 = 7 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_CHDIR = 12 - SYS_FCHDIR = 13 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_CHOWN = 16 - SYS_GETFSSTAT = 18 - SYS_GETPID = 20 - SYS_SETUID = 23 - SYS_GETUID = 24 - SYS_GETEUID = 25 - SYS_PTRACE = 26 - SYS_RECVMSG = 27 - SYS_SENDMSG = 28 - SYS_RECVFROM = 29 - SYS_ACCEPT = 30 - SYS_GETPEERNAME = 31 - SYS_GETSOCKNAME = 32 - SYS_ACCESS = 33 - SYS_CHFLAGS = 34 - SYS_FCHFLAGS = 35 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_GETPPID = 39 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_GETEGID = 43 - SYS_SIGACTION = 46 - SYS_GETGID = 47 - SYS_SIGPROCMASK = 48 - SYS_GETLOGIN = 49 - SYS_SETLOGIN = 50 - SYS_ACCT = 51 - SYS_SIGPENDING = 52 - SYS_SIGALTSTACK = 53 - SYS_IOCTL = 54 - SYS_REBOOT = 55 - SYS_REVOKE = 56 - SYS_SYMLINK = 57 - SYS_READLINK = 58 - SYS_EXECVE = 59 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_MSYNC = 65 - SYS_VFORK = 66 - SYS_MUNMAP = 73 - SYS_MPROTECT = 74 - SYS_MADVISE = 75 - SYS_MINCORE = 78 - SYS_GETGROUPS = 79 - SYS_SETGROUPS = 80 - SYS_GETPGRP = 81 - SYS_SETPGID = 82 - SYS_SETITIMER = 83 - SYS_SWAPON = 85 - SYS_GETITIMER = 86 - SYS_GETDTABLESIZE = 89 - SYS_DUP2 = 90 - SYS_FCNTL = 92 - SYS_SELECT = 93 - SYS_FSYNC = 95 - SYS_SETPRIORITY = 96 - SYS_SOCKET = 97 - SYS_CONNECT = 98 - SYS_GETPRIORITY = 100 - SYS_BIND = 104 - SYS_SETSOCKOPT = 105 - SYS_LISTEN = 106 - SYS_SIGSUSPEND = 111 - SYS_GETTIMEOFDAY = 116 - SYS_GETRUSAGE = 117 - SYS_GETSOCKOPT = 118 - SYS_READV = 120 - SYS_WRITEV = 121 - SYS_SETTIMEOFDAY = 122 - SYS_FCHOWN = 123 - SYS_FCHMOD = 124 - SYS_SETREUID = 126 - SYS_SETREGID = 127 - SYS_RENAME = 128 - SYS_FLOCK = 131 - SYS_MKFIFO = 132 - SYS_SENDTO = 133 - SYS_SHUTDOWN = 134 - SYS_SOCKETPAIR = 135 - SYS_MKDIR = 136 - SYS_RMDIR = 137 - SYS_UTIMES = 138 - SYS_FUTIMES = 139 - SYS_ADJTIME = 140 - SYS_GETHOSTUUID = 142 - SYS_SETSID = 147 - SYS_GETPGID = 151 - SYS_SETPRIVEXEC = 152 - SYS_PREAD = 153 - SYS_PWRITE = 154 - SYS_NFSSVC = 155 - SYS_STATFS = 157 - SYS_FSTATFS = 158 - SYS_UNMOUNT = 159 - SYS_GETFH = 161 - SYS_QUOTACTL = 165 - SYS_MOUNT = 167 - SYS_CSOPS = 169 - SYS_CSOPS_AUDITTOKEN = 170 - SYS_WAITID = 173 - SYS_KDEBUG_TYPEFILTER = 177 - SYS_KDEBUG_TRACE_STRING = 178 - SYS_KDEBUG_TRACE64 = 179 - SYS_KDEBUG_TRACE = 180 - SYS_SETGID = 181 - SYS_SETEGID = 182 - SYS_SETEUID = 183 - SYS_SIGRETURN = 184 - SYS_THREAD_SELFCOUNTS = 186 - SYS_FDATASYNC = 187 - SYS_STAT = 188 - SYS_FSTAT = 189 - SYS_LSTAT = 190 - SYS_PATHCONF = 191 - SYS_FPATHCONF = 192 - SYS_GETRLIMIT = 194 - SYS_SETRLIMIT = 195 - SYS_GETDIRENTRIES = 196 - SYS_MMAP = 197 - SYS_LSEEK = 199 - SYS_TRUNCATE = 200 - SYS_FTRUNCATE = 201 - SYS_SYSCTL = 202 - SYS_MLOCK = 203 - SYS_MUNLOCK = 204 - SYS_UNDELETE = 205 - SYS_OPEN_DPROTECTED_NP = 216 - SYS_GETATTRLIST = 220 - SYS_SETATTRLIST = 221 - SYS_GETDIRENTRIESATTR = 222 - SYS_EXCHANGEDATA = 223 - SYS_SEARCHFS = 225 - SYS_DELETE = 226 - SYS_COPYFILE = 227 - SYS_FGETATTRLIST = 228 - SYS_FSETATTRLIST = 229 - SYS_POLL = 230 - SYS_WATCHEVENT = 231 - SYS_WAITEVENT = 232 - SYS_MODWATCH = 233 - SYS_GETXATTR = 234 - SYS_FGETXATTR = 235 - SYS_SETXATTR = 236 - SYS_FSETXATTR = 237 - SYS_REMOVEXATTR = 238 - SYS_FREMOVEXATTR = 239 - SYS_LISTXATTR = 240 - SYS_FLISTXATTR = 241 - SYS_FSCTL = 242 - SYS_INITGROUPS = 243 - SYS_POSIX_SPAWN = 244 - SYS_FFSCTL = 245 - SYS_NFSCLNT = 247 - SYS_FHOPEN = 248 - SYS_MINHERIT = 250 - SYS_SEMSYS = 251 - SYS_MSGSYS = 252 - SYS_SHMSYS = 253 - SYS_SEMCTL = 254 - SYS_SEMGET = 255 - SYS_SEMOP = 256 - SYS_MSGCTL = 258 - SYS_MSGGET = 259 - SYS_MSGSND = 260 - SYS_MSGRCV = 261 - SYS_SHMAT = 262 - SYS_SHMCTL = 263 - SYS_SHMDT = 264 - SYS_SHMGET = 265 - SYS_SHM_OPEN = 266 - SYS_SHM_UNLINK = 267 - SYS_SEM_OPEN = 268 - SYS_SEM_CLOSE = 269 - SYS_SEM_UNLINK = 270 - SYS_SEM_WAIT = 271 - SYS_SEM_TRYWAIT = 272 - SYS_SEM_POST = 273 - SYS_SYSCTLBYNAME = 274 - SYS_OPEN_EXTENDED = 277 - SYS_UMASK_EXTENDED = 278 - SYS_STAT_EXTENDED = 279 - SYS_LSTAT_EXTENDED = 280 - SYS_FSTAT_EXTENDED = 281 - SYS_CHMOD_EXTENDED = 282 - SYS_FCHMOD_EXTENDED = 283 - SYS_ACCESS_EXTENDED = 284 - SYS_SETTID = 285 - SYS_GETTID = 286 - SYS_SETSGROUPS = 287 - SYS_GETSGROUPS = 288 - SYS_SETWGROUPS = 289 - SYS_GETWGROUPS = 290 - SYS_MKFIFO_EXTENDED = 291 - SYS_MKDIR_EXTENDED = 292 - SYS_IDENTITYSVC = 293 - SYS_SHARED_REGION_CHECK_NP = 294 - SYS_VM_PRESSURE_MONITOR = 296 - SYS_PSYNCH_RW_LONGRDLOCK = 297 - SYS_PSYNCH_RW_YIELDWRLOCK = 298 - SYS_PSYNCH_RW_DOWNGRADE = 299 - SYS_PSYNCH_RW_UPGRADE = 300 - SYS_PSYNCH_MUTEXWAIT = 301 - SYS_PSYNCH_MUTEXDROP = 302 - SYS_PSYNCH_CVBROAD = 303 - SYS_PSYNCH_CVSIGNAL = 304 - SYS_PSYNCH_CVWAIT = 305 - SYS_PSYNCH_RW_RDLOCK = 306 - SYS_PSYNCH_RW_WRLOCK = 307 - SYS_PSYNCH_RW_UNLOCK = 308 - SYS_PSYNCH_RW_UNLOCK2 = 309 - SYS_GETSID = 310 - SYS_SETTID_WITH_PID = 311 - SYS_PSYNCH_CVCLRPREPOST = 312 - SYS_AIO_FSYNC = 313 - SYS_AIO_RETURN = 314 - SYS_AIO_SUSPEND = 315 - SYS_AIO_CANCEL = 316 - SYS_AIO_ERROR = 317 - SYS_AIO_READ = 318 - SYS_AIO_WRITE = 319 - SYS_LIO_LISTIO = 320 - SYS_IOPOLICYSYS = 322 - SYS_PROCESS_POLICY = 323 - SYS_MLOCKALL = 324 - SYS_MUNLOCKALL = 325 - SYS_ISSETUGID = 327 - SYS___PTHREAD_KILL = 328 - SYS___PTHREAD_SIGMASK = 329 - SYS___SIGWAIT = 330 - SYS___DISABLE_THREADSIGNAL = 331 - SYS___PTHREAD_MARKCANCEL = 332 - SYS___PTHREAD_CANCELED = 333 - SYS___SEMWAIT_SIGNAL = 334 - SYS_PROC_INFO = 336 - SYS_SENDFILE = 337 - SYS_STAT64 = 338 - SYS_FSTAT64 = 339 - SYS_LSTAT64 = 340 - SYS_STAT64_EXTENDED = 341 - SYS_LSTAT64_EXTENDED = 342 - SYS_FSTAT64_EXTENDED = 343 - SYS_GETDIRENTRIES64 = 344 - SYS_STATFS64 = 345 - SYS_FSTATFS64 = 346 - SYS_GETFSSTAT64 = 347 - SYS___PTHREAD_CHDIR = 348 - SYS___PTHREAD_FCHDIR = 349 - SYS_AUDIT = 350 - SYS_AUDITON = 351 - SYS_GETAUID = 353 - SYS_SETAUID = 354 - SYS_GETAUDIT_ADDR = 357 - SYS_SETAUDIT_ADDR = 358 - SYS_AUDITCTL = 359 - SYS_BSDTHREAD_CREATE = 360 - SYS_BSDTHREAD_TERMINATE = 361 - SYS_KQUEUE = 362 - SYS_KEVENT = 363 - SYS_LCHOWN = 364 - SYS_BSDTHREAD_REGISTER = 366 - SYS_WORKQ_OPEN = 367 - SYS_WORKQ_KERNRETURN = 368 - SYS_KEVENT64 = 369 - SYS___OLD_SEMWAIT_SIGNAL = 370 - SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371 - SYS_THREAD_SELFID = 372 - SYS_LEDGER = 373 - SYS_KEVENT_QOS = 374 - SYS_KEVENT_ID = 375 - SYS___MAC_EXECVE = 380 - SYS___MAC_SYSCALL = 381 - SYS___MAC_GET_FILE = 382 - SYS___MAC_SET_FILE = 383 - SYS___MAC_GET_LINK = 384 - SYS___MAC_SET_LINK = 385 - SYS___MAC_GET_PROC = 386 - SYS___MAC_SET_PROC = 387 - SYS___MAC_GET_FD = 388 - SYS___MAC_SET_FD = 389 - SYS___MAC_GET_PID = 390 - SYS_PSELECT = 394 - SYS_PSELECT_NOCANCEL = 395 - SYS_READ_NOCANCEL = 396 - SYS_WRITE_NOCANCEL = 397 - SYS_OPEN_NOCANCEL = 398 - SYS_CLOSE_NOCANCEL = 399 - SYS_WAIT4_NOCANCEL = 400 - SYS_RECVMSG_NOCANCEL = 401 - SYS_SENDMSG_NOCANCEL = 402 - SYS_RECVFROM_NOCANCEL = 403 - SYS_ACCEPT_NOCANCEL = 404 - SYS_MSYNC_NOCANCEL = 405 - SYS_FCNTL_NOCANCEL = 406 - SYS_SELECT_NOCANCEL = 407 - SYS_FSYNC_NOCANCEL = 408 - SYS_CONNECT_NOCANCEL = 409 - SYS_SIGSUSPEND_NOCANCEL = 410 - SYS_READV_NOCANCEL = 411 - SYS_WRITEV_NOCANCEL = 412 - SYS_SENDTO_NOCANCEL = 413 - SYS_PREAD_NOCANCEL = 414 - SYS_PWRITE_NOCANCEL = 415 - SYS_WAITID_NOCANCEL = 416 - SYS_POLL_NOCANCEL = 417 - SYS_MSGSND_NOCANCEL = 418 - SYS_MSGRCV_NOCANCEL = 419 - SYS_SEM_WAIT_NOCANCEL = 420 - SYS_AIO_SUSPEND_NOCANCEL = 421 - SYS___SIGWAIT_NOCANCEL = 422 - SYS___SEMWAIT_SIGNAL_NOCANCEL = 423 - SYS___MAC_MOUNT = 424 - SYS___MAC_GET_MOUNT = 425 - SYS___MAC_GETFSSTAT = 426 - SYS_FSGETPATH = 427 - SYS_AUDIT_SESSION_SELF = 428 - SYS_AUDIT_SESSION_JOIN = 429 - SYS_FILEPORT_MAKEPORT = 430 - SYS_FILEPORT_MAKEFD = 431 - SYS_AUDIT_SESSION_PORT = 432 - SYS_PID_SUSPEND = 433 - SYS_PID_RESUME = 434 - SYS_PID_HIBERNATE = 435 - SYS_PID_SHUTDOWN_SOCKETS = 436 - SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438 - SYS_KAS_INFO = 439 - SYS_MEMORYSTATUS_CONTROL = 440 - SYS_GUARDED_OPEN_NP = 441 - SYS_GUARDED_CLOSE_NP = 442 - SYS_GUARDED_KQUEUE_NP = 443 - SYS_CHANGE_FDGUARD_NP = 444 - SYS_USRCTL = 445 - SYS_PROC_RLIMIT_CONTROL = 446 - SYS_CONNECTX = 447 - SYS_DISCONNECTX = 448 - SYS_PEELOFF = 449 - SYS_SOCKET_DELEGATE = 450 - SYS_TELEMETRY = 451 - SYS_PROC_UUID_POLICY = 452 - SYS_MEMORYSTATUS_GET_LEVEL = 453 - SYS_SYSTEM_OVERRIDE = 454 - SYS_VFS_PURGE = 455 - SYS_SFI_CTL = 456 - SYS_SFI_PIDCTL = 457 - SYS_COALITION = 458 - SYS_COALITION_INFO = 459 - SYS_NECP_MATCH_POLICY = 460 - SYS_GETATTRLISTBULK = 461 - SYS_CLONEFILEAT = 462 - SYS_OPENAT = 463 - SYS_OPENAT_NOCANCEL = 464 - SYS_RENAMEAT = 465 - SYS_FACCESSAT = 466 - SYS_FCHMODAT = 467 - SYS_FCHOWNAT = 468 - SYS_FSTATAT = 469 - SYS_FSTATAT64 = 470 - SYS_LINKAT = 471 - SYS_UNLINKAT = 472 - SYS_READLINKAT = 473 - SYS_SYMLINKAT = 474 - SYS_MKDIRAT = 475 - SYS_GETATTRLISTAT = 476 - SYS_PROC_TRACE_LOG = 477 - SYS_BSDTHREAD_CTL = 478 - SYS_OPENBYID_NP = 479 - SYS_RECVMSG_X = 480 - SYS_SENDMSG_X = 481 - SYS_THREAD_SELFUSAGE = 482 - SYS_CSRCTL = 483 - SYS_GUARDED_OPEN_DPROTECTED_NP = 484 - SYS_GUARDED_WRITE_NP = 485 - SYS_GUARDED_PWRITE_NP = 486 - SYS_GUARDED_WRITEV_NP = 487 - SYS_RENAMEATX_NP = 488 - SYS_MREMAP_ENCRYPTED = 489 - SYS_NETAGENT_TRIGGER = 490 - SYS_STACK_SNAPSHOT_WITH_CONFIG = 491 - SYS_MICROSTACKSHOT = 492 - SYS_GRAB_PGO_DATA = 493 - SYS_PERSONA = 494 - SYS_WORK_INTERVAL_CTL = 499 - SYS_GETENTROPY = 500 - SYS_NECP_OPEN = 501 - SYS_NECP_CLIENT_ACTION = 502 - SYS___NEXUS_OPEN = 503 - SYS___NEXUS_REGISTER = 504 - SYS___NEXUS_DEREGISTER = 505 - SYS___NEXUS_CREATE = 506 - SYS___NEXUS_DESTROY = 507 - SYS___NEXUS_GET_OPT = 508 - SYS___NEXUS_SET_OPT = 509 - SYS___CHANNEL_OPEN = 510 - SYS___CHANNEL_GET_INFO = 511 - SYS___CHANNEL_SYNC = 512 - SYS___CHANNEL_GET_OPT = 513 - SYS___CHANNEL_SET_OPT = 514 - SYS_ULOCK_WAIT = 515 - SYS_ULOCK_WAKE = 516 - SYS_FCLONEFILEAT = 517 - SYS_FS_SNAPSHOT = 518 - SYS_TERMINATE_WITH_PAYLOAD = 520 - SYS_ABORT_WITH_PAYLOAD = 521 - SYS_NECP_SESSION_OPEN = 522 - SYS_NECP_SESSION_ACTION = 523 - SYS_SETATTRLISTAT = 524 - SYS_NET_QOS_GUIDELINE = 525 - SYS_FMOUNT = 526 - SYS_NTP_ADJTIME = 527 - SYS_NTP_GETTIME = 528 - SYS_OS_FAULT_WITH_PAYLOAD = 529 - SYS_MAXSYSCALL = 530 - SYS_INVALID = 63 -) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go deleted file mode 100644 index 7ab2130b967..00000000000 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go +++ /dev/null @@ -1,436 +0,0 @@ -// go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS11.1.sdk/usr/include/sys/syscall.h -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build arm64,darwin - -package unix - -const ( - SYS_SYSCALL = 0 - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_WAIT4 = 7 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_CHDIR = 12 - SYS_FCHDIR = 13 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_CHOWN = 16 - SYS_GETFSSTAT = 18 - SYS_GETPID = 20 - SYS_SETUID = 23 - SYS_GETUID = 24 - SYS_GETEUID = 25 - SYS_PTRACE = 26 - SYS_RECVMSG = 27 - SYS_SENDMSG = 28 - SYS_RECVFROM = 29 - SYS_ACCEPT = 30 - SYS_GETPEERNAME = 31 - SYS_GETSOCKNAME = 32 - SYS_ACCESS = 33 - SYS_CHFLAGS = 34 - SYS_FCHFLAGS = 35 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_GETPPID = 39 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_GETEGID = 43 - SYS_SIGACTION = 46 - SYS_GETGID = 47 - SYS_SIGPROCMASK = 48 - SYS_GETLOGIN = 49 - SYS_SETLOGIN = 50 - SYS_ACCT = 51 - SYS_SIGPENDING = 52 - SYS_SIGALTSTACK = 53 - SYS_IOCTL = 54 - SYS_REBOOT = 55 - SYS_REVOKE = 56 - SYS_SYMLINK = 57 - SYS_READLINK = 58 - SYS_EXECVE = 59 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_MSYNC = 65 - SYS_VFORK = 66 - SYS_MUNMAP = 73 - SYS_MPROTECT = 74 - SYS_MADVISE = 75 - SYS_MINCORE = 78 - SYS_GETGROUPS = 79 - SYS_SETGROUPS = 80 - SYS_GETPGRP = 81 - SYS_SETPGID = 82 - SYS_SETITIMER = 83 - SYS_SWAPON = 85 - SYS_GETITIMER = 86 - SYS_GETDTABLESIZE = 89 - SYS_DUP2 = 90 - SYS_FCNTL = 92 - SYS_SELECT = 93 - SYS_FSYNC = 95 - SYS_SETPRIORITY = 96 - SYS_SOCKET = 97 - SYS_CONNECT = 98 - SYS_GETPRIORITY = 100 - SYS_BIND = 104 - SYS_SETSOCKOPT = 105 - SYS_LISTEN = 106 - SYS_SIGSUSPEND = 111 - SYS_GETTIMEOFDAY = 116 - SYS_GETRUSAGE = 117 - SYS_GETSOCKOPT = 118 - SYS_READV = 120 - SYS_WRITEV = 121 - SYS_SETTIMEOFDAY = 122 - SYS_FCHOWN = 123 - SYS_FCHMOD = 124 - SYS_SETREUID = 126 - SYS_SETREGID = 127 - SYS_RENAME = 128 - SYS_FLOCK = 131 - SYS_MKFIFO = 132 - SYS_SENDTO = 133 - SYS_SHUTDOWN = 134 - SYS_SOCKETPAIR = 135 - SYS_MKDIR = 136 - SYS_RMDIR = 137 - SYS_UTIMES = 138 - SYS_FUTIMES = 139 - SYS_ADJTIME = 140 - SYS_GETHOSTUUID = 142 - SYS_SETSID = 147 - SYS_GETPGID = 151 - SYS_SETPRIVEXEC = 152 - SYS_PREAD = 153 - SYS_PWRITE = 154 - SYS_NFSSVC = 155 - SYS_STATFS = 157 - SYS_FSTATFS = 158 - SYS_UNMOUNT = 159 - SYS_GETFH = 161 - SYS_QUOTACTL = 165 - SYS_MOUNT = 167 - SYS_CSOPS = 169 - SYS_CSOPS_AUDITTOKEN = 170 - SYS_WAITID = 173 - SYS_KDEBUG_TYPEFILTER = 177 - SYS_KDEBUG_TRACE_STRING = 178 - SYS_KDEBUG_TRACE64 = 179 - SYS_KDEBUG_TRACE = 180 - SYS_SETGID = 181 - SYS_SETEGID = 182 - SYS_SETEUID = 183 - SYS_SIGRETURN = 184 - SYS_THREAD_SELFCOUNTS = 186 - SYS_FDATASYNC = 187 - SYS_STAT = 188 - SYS_FSTAT = 189 - SYS_LSTAT = 190 - SYS_PATHCONF = 191 - SYS_FPATHCONF = 192 - SYS_GETRLIMIT = 194 - SYS_SETRLIMIT = 195 - SYS_GETDIRENTRIES = 196 - SYS_MMAP = 197 - SYS_LSEEK = 199 - SYS_TRUNCATE = 200 - SYS_FTRUNCATE = 201 - SYS_SYSCTL = 202 - SYS_MLOCK = 203 - SYS_MUNLOCK = 204 - SYS_UNDELETE = 205 - SYS_OPEN_DPROTECTED_NP = 216 - SYS_GETATTRLIST = 220 - SYS_SETATTRLIST = 221 - SYS_GETDIRENTRIESATTR = 222 - SYS_EXCHANGEDATA = 223 - SYS_SEARCHFS = 225 - SYS_DELETE = 226 - SYS_COPYFILE = 227 - SYS_FGETATTRLIST = 228 - SYS_FSETATTRLIST = 229 - SYS_POLL = 230 - SYS_WATCHEVENT = 231 - SYS_WAITEVENT = 232 - SYS_MODWATCH = 233 - SYS_GETXATTR = 234 - SYS_FGETXATTR = 235 - SYS_SETXATTR = 236 - SYS_FSETXATTR = 237 - SYS_REMOVEXATTR = 238 - SYS_FREMOVEXATTR = 239 - SYS_LISTXATTR = 240 - SYS_FLISTXATTR = 241 - SYS_FSCTL = 242 - SYS_INITGROUPS = 243 - SYS_POSIX_SPAWN = 244 - SYS_FFSCTL = 245 - SYS_NFSCLNT = 247 - SYS_FHOPEN = 248 - SYS_MINHERIT = 250 - SYS_SEMSYS = 251 - SYS_MSGSYS = 252 - SYS_SHMSYS = 253 - SYS_SEMCTL = 254 - SYS_SEMGET = 255 - SYS_SEMOP = 256 - SYS_MSGCTL = 258 - SYS_MSGGET = 259 - SYS_MSGSND = 260 - SYS_MSGRCV = 261 - SYS_SHMAT = 262 - SYS_SHMCTL = 263 - SYS_SHMDT = 264 - SYS_SHMGET = 265 - SYS_SHM_OPEN = 266 - SYS_SHM_UNLINK = 267 - SYS_SEM_OPEN = 268 - SYS_SEM_CLOSE = 269 - SYS_SEM_UNLINK = 270 - SYS_SEM_WAIT = 271 - SYS_SEM_TRYWAIT = 272 - SYS_SEM_POST = 273 - SYS_SYSCTLBYNAME = 274 - SYS_OPEN_EXTENDED = 277 - SYS_UMASK_EXTENDED = 278 - SYS_STAT_EXTENDED = 279 - SYS_LSTAT_EXTENDED = 280 - SYS_FSTAT_EXTENDED = 281 - SYS_CHMOD_EXTENDED = 282 - SYS_FCHMOD_EXTENDED = 283 - SYS_ACCESS_EXTENDED = 284 - SYS_SETTID = 285 - SYS_GETTID = 286 - SYS_SETSGROUPS = 287 - SYS_GETSGROUPS = 288 - SYS_SETWGROUPS = 289 - SYS_GETWGROUPS = 290 - SYS_MKFIFO_EXTENDED = 291 - SYS_MKDIR_EXTENDED = 292 - SYS_IDENTITYSVC = 293 - SYS_SHARED_REGION_CHECK_NP = 294 - SYS_VM_PRESSURE_MONITOR = 296 - SYS_PSYNCH_RW_LONGRDLOCK = 297 - SYS_PSYNCH_RW_YIELDWRLOCK = 298 - SYS_PSYNCH_RW_DOWNGRADE = 299 - SYS_PSYNCH_RW_UPGRADE = 300 - SYS_PSYNCH_MUTEXWAIT = 301 - SYS_PSYNCH_MUTEXDROP = 302 - SYS_PSYNCH_CVBROAD = 303 - SYS_PSYNCH_CVSIGNAL = 304 - SYS_PSYNCH_CVWAIT = 305 - SYS_PSYNCH_RW_RDLOCK = 306 - SYS_PSYNCH_RW_WRLOCK = 307 - SYS_PSYNCH_RW_UNLOCK = 308 - SYS_PSYNCH_RW_UNLOCK2 = 309 - SYS_GETSID = 310 - SYS_SETTID_WITH_PID = 311 - SYS_PSYNCH_CVCLRPREPOST = 312 - SYS_AIO_FSYNC = 313 - SYS_AIO_RETURN = 314 - SYS_AIO_SUSPEND = 315 - SYS_AIO_CANCEL = 316 - SYS_AIO_ERROR = 317 - SYS_AIO_READ = 318 - SYS_AIO_WRITE = 319 - SYS_LIO_LISTIO = 320 - SYS_IOPOLICYSYS = 322 - SYS_PROCESS_POLICY = 323 - SYS_MLOCKALL = 324 - SYS_MUNLOCKALL = 325 - SYS_ISSETUGID = 327 - SYS___PTHREAD_KILL = 328 - SYS___PTHREAD_SIGMASK = 329 - SYS___SIGWAIT = 330 - SYS___DISABLE_THREADSIGNAL = 331 - SYS___PTHREAD_MARKCANCEL = 332 - SYS___PTHREAD_CANCELED = 333 - SYS___SEMWAIT_SIGNAL = 334 - SYS_PROC_INFO = 336 - SYS_SENDFILE = 337 - SYS_STAT64 = 338 - SYS_FSTAT64 = 339 - SYS_LSTAT64 = 340 - SYS_STAT64_EXTENDED = 341 - SYS_LSTAT64_EXTENDED = 342 - SYS_FSTAT64_EXTENDED = 343 - SYS_GETDIRENTRIES64 = 344 - SYS_STATFS64 = 345 - SYS_FSTATFS64 = 346 - SYS_GETFSSTAT64 = 347 - SYS___PTHREAD_CHDIR = 348 - SYS___PTHREAD_FCHDIR = 349 - SYS_AUDIT = 350 - SYS_AUDITON = 351 - SYS_GETAUID = 353 - SYS_SETAUID = 354 - SYS_GETAUDIT_ADDR = 357 - SYS_SETAUDIT_ADDR = 358 - SYS_AUDITCTL = 359 - SYS_BSDTHREAD_CREATE = 360 - SYS_BSDTHREAD_TERMINATE = 361 - SYS_KQUEUE = 362 - SYS_KEVENT = 363 - SYS_LCHOWN = 364 - SYS_BSDTHREAD_REGISTER = 366 - SYS_WORKQ_OPEN = 367 - SYS_WORKQ_KERNRETURN = 368 - SYS_KEVENT64 = 369 - SYS___OLD_SEMWAIT_SIGNAL = 370 - SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371 - SYS_THREAD_SELFID = 372 - SYS_LEDGER = 373 - SYS_KEVENT_QOS = 374 - SYS_KEVENT_ID = 375 - SYS___MAC_EXECVE = 380 - SYS___MAC_SYSCALL = 381 - SYS___MAC_GET_FILE = 382 - SYS___MAC_SET_FILE = 383 - SYS___MAC_GET_LINK = 384 - SYS___MAC_SET_LINK = 385 - SYS___MAC_GET_PROC = 386 - SYS___MAC_SET_PROC = 387 - SYS___MAC_GET_FD = 388 - SYS___MAC_SET_FD = 389 - SYS___MAC_GET_PID = 390 - SYS_PSELECT = 394 - SYS_PSELECT_NOCANCEL = 395 - SYS_READ_NOCANCEL = 396 - SYS_WRITE_NOCANCEL = 397 - SYS_OPEN_NOCANCEL = 398 - SYS_CLOSE_NOCANCEL = 399 - SYS_WAIT4_NOCANCEL = 400 - SYS_RECVMSG_NOCANCEL = 401 - SYS_SENDMSG_NOCANCEL = 402 - SYS_RECVFROM_NOCANCEL = 403 - SYS_ACCEPT_NOCANCEL = 404 - SYS_MSYNC_NOCANCEL = 405 - SYS_FCNTL_NOCANCEL = 406 - SYS_SELECT_NOCANCEL = 407 - SYS_FSYNC_NOCANCEL = 408 - SYS_CONNECT_NOCANCEL = 409 - SYS_SIGSUSPEND_NOCANCEL = 410 - SYS_READV_NOCANCEL = 411 - SYS_WRITEV_NOCANCEL = 412 - SYS_SENDTO_NOCANCEL = 413 - SYS_PREAD_NOCANCEL = 414 - SYS_PWRITE_NOCANCEL = 415 - SYS_WAITID_NOCANCEL = 416 - SYS_POLL_NOCANCEL = 417 - SYS_MSGSND_NOCANCEL = 418 - SYS_MSGRCV_NOCANCEL = 419 - SYS_SEM_WAIT_NOCANCEL = 420 - SYS_AIO_SUSPEND_NOCANCEL = 421 - SYS___SIGWAIT_NOCANCEL = 422 - SYS___SEMWAIT_SIGNAL_NOCANCEL = 423 - SYS___MAC_MOUNT = 424 - SYS___MAC_GET_MOUNT = 425 - SYS___MAC_GETFSSTAT = 426 - SYS_FSGETPATH = 427 - SYS_AUDIT_SESSION_SELF = 428 - SYS_AUDIT_SESSION_JOIN = 429 - SYS_FILEPORT_MAKEPORT = 430 - SYS_FILEPORT_MAKEFD = 431 - SYS_AUDIT_SESSION_PORT = 432 - SYS_PID_SUSPEND = 433 - SYS_PID_RESUME = 434 - SYS_PID_HIBERNATE = 435 - SYS_PID_SHUTDOWN_SOCKETS = 436 - SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438 - SYS_KAS_INFO = 439 - SYS_MEMORYSTATUS_CONTROL = 440 - SYS_GUARDED_OPEN_NP = 441 - SYS_GUARDED_CLOSE_NP = 442 - SYS_GUARDED_KQUEUE_NP = 443 - SYS_CHANGE_FDGUARD_NP = 444 - SYS_USRCTL = 445 - SYS_PROC_RLIMIT_CONTROL = 446 - SYS_CONNECTX = 447 - SYS_DISCONNECTX = 448 - SYS_PEELOFF = 449 - SYS_SOCKET_DELEGATE = 450 - SYS_TELEMETRY = 451 - SYS_PROC_UUID_POLICY = 452 - SYS_MEMORYSTATUS_GET_LEVEL = 453 - SYS_SYSTEM_OVERRIDE = 454 - SYS_VFS_PURGE = 455 - SYS_SFI_CTL = 456 - SYS_SFI_PIDCTL = 457 - SYS_COALITION = 458 - SYS_COALITION_INFO = 459 - SYS_NECP_MATCH_POLICY = 460 - SYS_GETATTRLISTBULK = 461 - SYS_CLONEFILEAT = 462 - SYS_OPENAT = 463 - SYS_OPENAT_NOCANCEL = 464 - SYS_RENAMEAT = 465 - SYS_FACCESSAT = 466 - SYS_FCHMODAT = 467 - SYS_FCHOWNAT = 468 - SYS_FSTATAT = 469 - SYS_FSTATAT64 = 470 - SYS_LINKAT = 471 - SYS_UNLINKAT = 472 - SYS_READLINKAT = 473 - SYS_SYMLINKAT = 474 - SYS_MKDIRAT = 475 - SYS_GETATTRLISTAT = 476 - SYS_PROC_TRACE_LOG = 477 - SYS_BSDTHREAD_CTL = 478 - SYS_OPENBYID_NP = 479 - SYS_RECVMSG_X = 480 - SYS_SENDMSG_X = 481 - SYS_THREAD_SELFUSAGE = 482 - SYS_CSRCTL = 483 - SYS_GUARDED_OPEN_DPROTECTED_NP = 484 - SYS_GUARDED_WRITE_NP = 485 - SYS_GUARDED_PWRITE_NP = 486 - SYS_GUARDED_WRITEV_NP = 487 - SYS_RENAMEATX_NP = 488 - SYS_MREMAP_ENCRYPTED = 489 - SYS_NETAGENT_TRIGGER = 490 - SYS_STACK_SNAPSHOT_WITH_CONFIG = 491 - SYS_MICROSTACKSHOT = 492 - SYS_GRAB_PGO_DATA = 493 - SYS_PERSONA = 494 - SYS_WORK_INTERVAL_CTL = 499 - SYS_GETENTROPY = 500 - SYS_NECP_OPEN = 501 - SYS_NECP_CLIENT_ACTION = 502 - SYS___NEXUS_OPEN = 503 - SYS___NEXUS_REGISTER = 504 - SYS___NEXUS_DEREGISTER = 505 - SYS___NEXUS_CREATE = 506 - SYS___NEXUS_DESTROY = 507 - SYS___NEXUS_GET_OPT = 508 - SYS___NEXUS_SET_OPT = 509 - SYS___CHANNEL_OPEN = 510 - SYS___CHANNEL_GET_INFO = 511 - SYS___CHANNEL_SYNC = 512 - SYS___CHANNEL_GET_OPT = 513 - SYS___CHANNEL_SET_OPT = 514 - SYS_ULOCK_WAIT = 515 - SYS_ULOCK_WAKE = 516 - SYS_FCLONEFILEAT = 517 - SYS_FS_SNAPSHOT = 518 - SYS_TERMINATE_WITH_PAYLOAD = 520 - SYS_ABORT_WITH_PAYLOAD = 521 - SYS_NECP_SESSION_OPEN = 522 - SYS_NECP_SESSION_ACTION = 523 - SYS_SETATTRLISTAT = 524 - SYS_NET_QOS_GUIDELINE = 525 - SYS_FMOUNT = 526 - SYS_NTP_ADJTIME = 527 - SYS_NTP_GETTIME = 528 - SYS_OS_FAULT_WITH_PAYLOAD = 529 - SYS_MAXSYSCALL = 530 - SYS_INVALID = 63 -) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go index 464c9a9832d..9912c6ee3d6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go @@ -6,129 +6,125 @@ package unix const ( - // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int - SYS_EXIT = 1 // { void exit(int rval); } - SYS_FORK = 2 // { int fork(void); } - SYS_READ = 3 // { ssize_t read(int fd, void *buf, size_t nbyte); } - SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, size_t nbyte); } - SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } - SYS_CLOSE = 6 // { int close(int fd); } - SYS_WAIT4 = 7 // { int wait4(int pid, int *status, int options, struct rusage *rusage); } wait4 wait_args int - SYS_LINK = 9 // { int link(char *path, char *link); } - SYS_UNLINK = 10 // { int unlink(char *path); } - SYS_CHDIR = 12 // { int chdir(char *path); } - SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } - SYS_CHMOD = 15 // { int chmod(char *path, int mode); } - SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int - SYS_GETFSSTAT = 18 // { int getfsstat(struct statfs *buf, long bufsize, int flags); } - SYS_GETPID = 20 // { pid_t getpid(void); } - SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } - SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } - SYS_SETUID = 23 // { int setuid(uid_t uid); } - SYS_GETUID = 24 // { uid_t getuid(void); } - SYS_GETEUID = 25 // { uid_t geteuid(void); } - SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, caddr_t addr, int data); } - SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, int flags); } - SYS_SENDMSG = 28 // { int sendmsg(int s, caddr_t msg, int flags); } - SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, size_t len, int flags, caddr_t from, int *fromlenaddr); } - SYS_ACCEPT = 30 // { int accept(int s, caddr_t name, int *anamelen); } - SYS_GETPEERNAME = 31 // { int getpeername(int fdes, caddr_t asa, int *alen); } - SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, caddr_t asa, int *alen); } - SYS_ACCESS = 33 // { int access(char *path, int flags); } - SYS_CHFLAGS = 34 // { int chflags(char *path, int flags); } - SYS_FCHFLAGS = 35 // { int fchflags(int fd, int flags); } - SYS_SYNC = 36 // { int sync(void); } - SYS_KILL = 37 // { int kill(int pid, int signum); } - SYS_GETPPID = 39 // { pid_t getppid(void); } - SYS_DUP = 41 // { int dup(int fd); } - SYS_PIPE = 42 // { int pipe(void); } - SYS_GETEGID = 43 // { gid_t getegid(void); } - SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } - SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } - SYS_GETGID = 47 // { gid_t getgid(void); } - SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int namelen); } - SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } - SYS_ACCT = 51 // { int acct(char *path); } - SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, stack_t *oss); } - SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, caddr_t data); } - SYS_REBOOT = 55 // { int reboot(int opt); } - SYS_REVOKE = 56 // { int revoke(char *path); } - SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } - SYS_READLINK = 58 // { int readlink(char *path, char *buf, int count); } - SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int - SYS_CHROOT = 61 // { int chroot(char *path); } - SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } - SYS_VFORK = 66 // { pid_t vfork(void); } - SYS_SBRK = 69 // { int sbrk(int incr); } - SYS_SSTK = 70 // { int sstk(int incr); } - SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } - SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } - SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } - SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } - SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, gid_t *gidset); } - SYS_GETPGRP = 81 // { int getpgrp(void); } - SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } - SYS_SETITIMER = 83 // { int setitimer(u_int which, struct itimerval *itv, struct itimerval *oitv); } - SYS_SWAPON = 85 // { int swapon(char *name); } - SYS_GETITIMER = 86 // { int getitimer(u_int which, struct itimerval *itv); } - SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } - SYS_DUP2 = 90 // { int dup2(int from, int to); } - SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } - SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } - SYS_FSYNC = 95 // { int fsync(int fd); } - SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, int prio); } - SYS_SOCKET = 97 // { int socket(int domain, int type, int protocol); } - SYS_CONNECT = 98 // { int connect(int s, caddr_t name, int namelen); } - SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } - SYS_BIND = 104 // { int bind(int s, caddr_t name, int namelen); } - SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, caddr_t val, int valsize); } - SYS_LISTEN = 106 // { int listen(int s, int backlog); } - SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, struct timezone *tzp); } - SYS_GETRUSAGE = 117 // { int getrusage(int who, struct rusage *rusage); } - SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, caddr_t val, int *avalsize); } - SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, u_int iovcnt); } - SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, u_int iovcnt); } - SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, struct timezone *tzp); } - SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } - SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } - SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } - SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } - SYS_RENAME = 128 // { int rename(char *from, char *to); } - SYS_FLOCK = 131 // { int flock(int fd, int how); } - SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } - SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen); } - SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } - SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, int protocol, int *rsv); } - SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } - SYS_RMDIR = 137 // { int rmdir(char *path); } - SYS_UTIMES = 138 // { int utimes(char *path, struct timeval *tptr); } - SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, struct timeval *olddelta); } - SYS_SETSID = 147 // { int setsid(void); } - SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, caddr_t arg); } - SYS_STATFS = 157 // { int statfs(char *path, struct statfs *buf); } - SYS_FSTATFS = 158 // { int fstatfs(int fd, struct statfs *buf); } - SYS_GETFH = 161 // { int getfh(char *fname, struct fhandle *fhp); } - SYS_GETDOMAINNAME = 162 // { int getdomainname(char *domainname, int len); } - SYS_SETDOMAINNAME = 163 // { int setdomainname(char *domainname, int len); } - SYS_UNAME = 164 // { int uname(struct utsname *name); } - SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } - SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, struct rtprio *rtp); } - SYS_EXTPREAD = 173 // { ssize_t extpread(int fd, void *buf, size_t nbyte, int flags, off_t offset); } - SYS_EXTPWRITE = 174 // { ssize_t extpwrite(int fd, const void *buf, size_t nbyte, int flags, off_t offset); } - SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } - SYS_SETGID = 181 // { int setgid(gid_t gid); } - SYS_SETEGID = 182 // { int setegid(gid_t egid); } - SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } - SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } - SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int - SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int - SYS_MMAP = 197 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, int pad, off_t pos); } - // SYS_NOSYS = 198; // { int nosys(void); } __syscall __syscall_args int + SYS_EXIT = 1 // { void exit(int rval); } + SYS_FORK = 2 // { int fork(void); } + SYS_READ = 3 // { ssize_t read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } + SYS_CLOSE = 6 // { int close(int fd); } + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, int options, struct rusage *rusage); } wait4 wait_args int + // SYS_NOSYS = 8; // { int nosys(void); } __nosys nosys_args int + SYS_LINK = 9 // { int link(char *path, char *link); } + SYS_UNLINK = 10 // { int unlink(char *path); } + SYS_CHDIR = 12 // { int chdir(char *path); } + SYS_FCHDIR = 13 // { int fchdir(int fd); } + SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } + SYS_CHMOD = 15 // { int chmod(char *path, int mode); } + SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } + SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_GETFSSTAT = 18 // { int getfsstat(struct statfs *buf, long bufsize, int flags); } + SYS_GETPID = 20 // { pid_t getpid(void); } + SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } + SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } + SYS_SETUID = 23 // { int setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t getuid(void); } + SYS_GETEUID = 25 // { uid_t geteuid(void); } + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, caddr_t addr, int data); } + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { int sendmsg(int s, caddr_t msg, int flags); } + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, size_t len, int flags, caddr_t from, int *fromlenaddr); } + SYS_ACCEPT = 30 // { int accept(int s, caddr_t name, int *anamelen); } + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, caddr_t asa, int *alen); } + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, caddr_t asa, int *alen); } + SYS_ACCESS = 33 // { int access(char *path, int flags); } + SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } + SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } + SYS_SYNC = 36 // { int sync(void); } + SYS_KILL = 37 // { int kill(int pid, int signum); } + SYS_GETPPID = 39 // { pid_t getppid(void); } + SYS_DUP = 41 // { int dup(int fd); } + SYS_PIPE = 42 // { int pipe(void); } + SYS_GETEGID = 43 // { gid_t getegid(void); } + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, u_long offset, u_int scale); } + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } + SYS_GETGID = 47 // { gid_t getgid(void); } + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, size_t namelen); } + SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } + SYS_ACCT = 51 // { int acct(char *path); } + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, stack_t *oss); } + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, caddr_t data); } + SYS_REBOOT = 55 // { int reboot(int opt); } + SYS_REVOKE = 56 // { int revoke(char *path); } + SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } + SYS_READLINK = 58 // { int readlink(char *path, char *buf, int count); } + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } + SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_CHROOT = 61 // { int chroot(char *path); } + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } + SYS_VFORK = 66 // { pid_t vfork(void); } + SYS_SBRK = 69 // { caddr_t sbrk(size_t incr); } + SYS_SSTK = 70 // { int sstk(size_t incr); } + SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, gid_t *gidset); } + SYS_GETPGRP = 81 // { int getpgrp(void); } + SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct itimerval *itv, struct itimerval *oitv); } + SYS_SWAPON = 85 // { int swapon(char *name); } + SYS_GETITIMER = 86 // { int getitimer(u_int which, struct itimerval *itv); } + SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } + SYS_DUP2 = 90 // { int dup2(int from, int to); } + SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } + SYS_FSYNC = 95 // { int fsync(int fd); } + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, int prio); } + SYS_SOCKET = 97 // { int socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, int namelen); } + SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } + SYS_BIND = 104 // { int bind(int s, caddr_t name, int namelen); } + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, caddr_t val, int valsize); } + SYS_LISTEN = 106 // { int listen(int s, int backlog); } + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, struct timezone *tzp); } + SYS_GETRUSAGE = 117 // { int getrusage(int who, struct rusage *rusage); } + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, caddr_t val, int *avalsize); } + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, u_int iovcnt); } + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, u_int iovcnt); } + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, struct timezone *tzp); } + SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } + SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } + SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } + SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } + SYS_RENAME = 128 // { int rename(char *from, char *to); } + SYS_FLOCK = 131 // { int flock(int fd, int how); } + SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen); } + SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, int protocol, int *rsv); } + SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } + SYS_RMDIR = 137 // { int rmdir(char *path); } + SYS_UTIMES = 138 // { int utimes(char *path, struct timeval *tptr); } + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, struct timeval *olddelta); } + SYS_SETSID = 147 // { int setsid(void); } + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, caddr_t arg); } + SYS_STATFS = 157 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 158 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFH = 161 // { int getfh(char *fname, struct fhandle *fhp); } + SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, struct rtprio *rtp); } + SYS_EXTPREAD = 173 // { ssize_t extpread(int fd, void *buf, size_t nbyte, int flags, off_t offset); } + SYS_EXTPWRITE = 174 // { ssize_t extpwrite(int fd, const void *buf, size_t nbyte, int flags, off_t offset); } + SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int setgid(gid_t gid); } + SYS_SETEGID = 182 // { int setegid(gid_t egid); } + SYS_SETEUID = 183 // { int seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } + SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int + SYS_MMAP = 197 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, int pad, off_t pos); } SYS_LSEEK = 199 // { off_t lseek(int fd, int pad, off_t offset, int whence); } SYS_TRUNCATE = 200 // { int truncate(char *path, int pad, off_t length); } SYS_FTRUNCATE = 201 // { int ftruncate(int fd, int pad, off_t length); } @@ -161,8 +157,8 @@ const ( SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } - SYS_EXTPREADV = 289 // { ssize_t extpreadv(int fd, struct iovec *iovp, u_int iovcnt, int flags, off_t offset); } - SYS_EXTPWRITEV = 290 // { ssize_t extpwritev(int fd, struct iovec *iovp,u_int iovcnt, int flags, off_t offset); } + SYS_EXTPREADV = 289 // { ssize_t extpreadv(int fd, const struct iovec *iovp, int iovcnt, int flags, off_t offset); } + SYS_EXTPWRITEV = 290 // { ssize_t extpwritev(int fd, const struct iovec *iovp, int iovcnt, int flags, off_t offset); } SYS_FHSTATFS = 297 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } SYS_MODNEXT = 300 // { int modnext(int modid); } @@ -225,7 +221,7 @@ const ( SYS_KQUEUE = 362 // { int kqueue(void); } SYS_KEVENT = 363 // { int kevent(int fd, const struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } SYS_KENV = 390 // { int kenv(int what, const char *name, char *value, int len); } - SYS_LCHFLAGS = 391 // { int lchflags(char *path, int flags); } + SYS_LCHFLAGS = 391 // { int lchflags(const char *path, u_long flags); } SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_VARSYM_SET = 450 // { int varsym_set(int level, const char *name, const char *data); } @@ -302,7 +298,7 @@ const ( SYS_VMM_GUEST_CTL = 534 // { int vmm_guest_ctl(int op, struct vmm_guest_options *options); } SYS_VMM_GUEST_SYNC_ADDR = 535 // { int vmm_guest_sync_addr(long *dstaddr, long *srcaddr); } SYS_PROCCTL = 536 // { int procctl(idtype_t idtype, id_t id, int cmd, void *data); } - SYS_CHFLAGSAT = 537 // { int chflagsat(int fd, const char *path, int flags, int atflags);} + SYS_CHFLAGSAT = 537 // { int chflagsat(int fd, const char *path, u_long flags, int atflags);} SYS_PIPE2 = 538 // { int pipe2(int *fildes, int flags); } SYS_UTIMENSAT = 539 // { int utimensat(int fd, const char *path, const struct timespec *ts, int flags); } SYS_FUTIMENS = 540 // { int futimens(int fd, const struct timespec *ts); } @@ -312,4 +308,9 @@ const ( SYS_LWP_SETAFFINITY = 544 // { int lwp_setaffinity(pid_t pid, lwpid_t tid, const cpumask_t *mask); } SYS_LWP_GETAFFINITY = 545 // { int lwp_getaffinity(pid_t pid, lwpid_t tid, cpumask_t *mask); } SYS_LWP_CREATE2 = 546 // { int lwp_create2(struct lwp_params *params, const cpumask_t *mask); } + SYS_GETCPUCLOCKID = 547 // { int getcpuclockid(pid_t pid, lwpid_t lwp_id, clockid_t *clock_id); } + SYS_WAIT6 = 548 // { int wait6(idtype_t idtype, id_t id, int *status, int options, struct __wrusage *wrusage, siginfo_t *info); } + SYS_LWP_GETNAME = 549 // { int lwp_getname(lwpid_t tid, char *name, size_t len); } + SYS_GETRANDOM = 550 // { ssize_t getrandom(void *buf, size_t len, unsigned flags); } + SYS___REALPATH = 551 // { ssize_t __realpath(const char *path, char *buf, size_t len); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 54559a8956d..0f5a3f6970a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -431,6 +431,8 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 054a741b7fc..36d5219ef82 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -353,6 +353,8 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 307f2ba12ec..3622ba14b4e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -395,6 +395,8 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index e9404dd545f..6193c3dc07c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -298,6 +298,8 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 68bb6d29b8d..640b974345f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -416,6 +416,8 @@ const ( SYS_FSPICK = 4433 SYS_PIDFD_OPEN = 4434 SYS_CLONE3 = 4435 + SYS_CLOSE_RANGE = 4436 SYS_OPENAT2 = 4437 SYS_PIDFD_GETFD = 4438 + SYS_FACCESSAT2 = 4439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 4e5251185f4..3467fbb5ff1 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -346,6 +346,8 @@ const ( SYS_FSPICK = 5433 SYS_PIDFD_OPEN = 5434 SYS_CLONE3 = 5435 + SYS_CLOSE_RANGE = 5436 SYS_OPENAT2 = 5437 SYS_PIDFD_GETFD = 5438 + SYS_FACCESSAT2 = 5439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 4d9aa3003b9..0fc38d5a72f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -346,6 +346,8 @@ const ( SYS_FSPICK = 5433 SYS_PIDFD_OPEN = 5434 SYS_CLONE3 = 5435 + SYS_CLOSE_RANGE = 5436 SYS_OPENAT2 = 5437 SYS_PIDFD_GETFD = 5438 + SYS_FACCESSAT2 = 5439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 64af0707d5a..999fd55bccb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -416,6 +416,8 @@ const ( SYS_FSPICK = 4433 SYS_PIDFD_OPEN = 4434 SYS_CLONE3 = 4435 + SYS_CLOSE_RANGE = 4436 SYS_OPENAT2 = 4437 SYS_PIDFD_GETFD = 4438 + SYS_FACCESSAT2 = 4439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index cc3c067ba31..1df0d799355 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -395,6 +395,8 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 4050ff98361..4db39cca4da 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -395,6 +395,8 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 529abb6a7f4..e6927401446 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -297,6 +297,8 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 27665001092..a585aec4e79 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -360,6 +360,8 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 4dc82bb2492..d047e567afc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -374,6 +374,8 @@ const ( SYS_FSMOUNT = 432 SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go new file mode 100644 index 00000000000..5c08d573b3e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go @@ -0,0 +1,220 @@ +// go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build mips64,openbsd + +package unix + +const ( + SYS_EXIT = 1 // { void sys_exit(int rval); } + SYS_FORK = 2 // { int sys_fork(void); } + SYS_READ = 3 // { ssize_t sys_read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t sys_write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int sys_open(const char *path, int flags, ... mode_t mode); } + SYS_CLOSE = 6 // { int sys_close(int fd); } + SYS_GETENTROPY = 7 // { int sys_getentropy(void *buf, size_t nbyte); } + SYS___TFORK = 8 // { int sys___tfork(const struct __tfork *param, size_t psize); } + SYS_LINK = 9 // { int sys_link(const char *path, const char *link); } + SYS_UNLINK = 10 // { int sys_unlink(const char *path); } + SYS_WAIT4 = 11 // { pid_t sys_wait4(pid_t pid, int *status, int options, struct rusage *rusage); } + SYS_CHDIR = 12 // { int sys_chdir(const char *path); } + SYS_FCHDIR = 13 // { int sys_fchdir(int fd); } + SYS_MKNOD = 14 // { int sys_mknod(const char *path, mode_t mode, dev_t dev); } + SYS_CHMOD = 15 // { int sys_chmod(const char *path, mode_t mode); } + SYS_CHOWN = 16 // { int sys_chown(const char *path, uid_t uid, gid_t gid); } + SYS_OBREAK = 17 // { int sys_obreak(char *nsize); } break + SYS_GETDTABLECOUNT = 18 // { int sys_getdtablecount(void); } + SYS_GETRUSAGE = 19 // { int sys_getrusage(int who, struct rusage *rusage); } + SYS_GETPID = 20 // { pid_t sys_getpid(void); } + SYS_MOUNT = 21 // { int sys_mount(const char *type, const char *path, int flags, void *data); } + SYS_UNMOUNT = 22 // { int sys_unmount(const char *path, int flags); } + SYS_SETUID = 23 // { int sys_setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t sys_getuid(void); } + SYS_GETEUID = 25 // { uid_t sys_geteuid(void); } + SYS_PTRACE = 26 // { int sys_ptrace(int req, pid_t pid, caddr_t addr, int data); } + SYS_RECVMSG = 27 // { ssize_t sys_recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { ssize_t sys_sendmsg(int s, const struct msghdr *msg, int flags); } + SYS_RECVFROM = 29 // { ssize_t sys_recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlenaddr); } + SYS_ACCEPT = 30 // { int sys_accept(int s, struct sockaddr *name, socklen_t *anamelen); } + SYS_GETPEERNAME = 31 // { int sys_getpeername(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_GETSOCKNAME = 32 // { int sys_getsockname(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_ACCESS = 33 // { int sys_access(const char *path, int amode); } + SYS_CHFLAGS = 34 // { int sys_chflags(const char *path, u_int flags); } + SYS_FCHFLAGS = 35 // { int sys_fchflags(int fd, u_int flags); } + SYS_SYNC = 36 // { void sys_sync(void); } + SYS_MSYSCALL = 37 // { int sys_msyscall(void *addr, size_t len); } + SYS_STAT = 38 // { int sys_stat(const char *path, struct stat *ub); } + SYS_GETPPID = 39 // { pid_t sys_getppid(void); } + SYS_LSTAT = 40 // { int sys_lstat(const char *path, struct stat *ub); } + SYS_DUP = 41 // { int sys_dup(int fd); } + SYS_FSTATAT = 42 // { int sys_fstatat(int fd, const char *path, struct stat *buf, int flag); } + SYS_GETEGID = 43 // { gid_t sys_getegid(void); } + SYS_PROFIL = 44 // { int sys_profil(caddr_t samples, size_t size, u_long offset, u_int scale); } + SYS_KTRACE = 45 // { int sys_ktrace(const char *fname, int ops, int facs, pid_t pid); } + SYS_SIGACTION = 46 // { int sys_sigaction(int signum, const struct sigaction *nsa, struct sigaction *osa); } + SYS_GETGID = 47 // { gid_t sys_getgid(void); } + SYS_SIGPROCMASK = 48 // { int sys_sigprocmask(int how, sigset_t mask); } + SYS_SETLOGIN = 50 // { int sys_setlogin(const char *namebuf); } + SYS_ACCT = 51 // { int sys_acct(const char *path); } + SYS_SIGPENDING = 52 // { int sys_sigpending(void); } + SYS_FSTAT = 53 // { int sys_fstat(int fd, struct stat *sb); } + SYS_IOCTL = 54 // { int sys_ioctl(int fd, u_long com, ... void *data); } + SYS_REBOOT = 55 // { int sys_reboot(int opt); } + SYS_REVOKE = 56 // { int sys_revoke(const char *path); } + SYS_SYMLINK = 57 // { int sys_symlink(const char *path, const char *link); } + SYS_READLINK = 58 // { ssize_t sys_readlink(const char *path, char *buf, size_t count); } + SYS_EXECVE = 59 // { int sys_execve(const char *path, char * const *argp, char * const *envp); } + SYS_UMASK = 60 // { mode_t sys_umask(mode_t newmask); } + SYS_CHROOT = 61 // { int sys_chroot(const char *path); } + SYS_GETFSSTAT = 62 // { int sys_getfsstat(struct statfs *buf, size_t bufsize, int flags); } + SYS_STATFS = 63 // { int sys_statfs(const char *path, struct statfs *buf); } + SYS_FSTATFS = 64 // { int sys_fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 65 // { int sys_fhstatfs(const fhandle_t *fhp, struct statfs *buf); } + SYS_VFORK = 66 // { int sys_vfork(void); } + SYS_GETTIMEOFDAY = 67 // { int sys_gettimeofday(struct timeval *tp, struct timezone *tzp); } + SYS_SETTIMEOFDAY = 68 // { int sys_settimeofday(const struct timeval *tv, const struct timezone *tzp); } + SYS_SETITIMER = 69 // { int sys_setitimer(int which, const struct itimerval *itv, struct itimerval *oitv); } + SYS_GETITIMER = 70 // { int sys_getitimer(int which, struct itimerval *itv); } + SYS_SELECT = 71 // { int sys_select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } + SYS_KEVENT = 72 // { int sys_kevent(int fd, const struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_MUNMAP = 73 // { int sys_munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int sys_mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int sys_madvise(void *addr, size_t len, int behav); } + SYS_UTIMES = 76 // { int sys_utimes(const char *path, const struct timeval *tptr); } + SYS_FUTIMES = 77 // { int sys_futimes(int fd, const struct timeval *tptr); } + SYS_GETGROUPS = 79 // { int sys_getgroups(int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int sys_setgroups(int gidsetsize, const gid_t *gidset); } + SYS_GETPGRP = 81 // { int sys_getpgrp(void); } + SYS_SETPGID = 82 // { int sys_setpgid(pid_t pid, pid_t pgid); } + SYS_FUTEX = 83 // { int sys_futex(uint32_t *f, int op, int val, const struct timespec *timeout, uint32_t *g); } + SYS_UTIMENSAT = 84 // { int sys_utimensat(int fd, const char *path, const struct timespec *times, int flag); } + SYS_FUTIMENS = 85 // { int sys_futimens(int fd, const struct timespec *times); } + SYS_KBIND = 86 // { int sys_kbind(const struct __kbind *param, size_t psize, int64_t proc_cookie); } + SYS_CLOCK_GETTIME = 87 // { int sys_clock_gettime(clockid_t clock_id, struct timespec *tp); } + SYS_CLOCK_SETTIME = 88 // { int sys_clock_settime(clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_GETRES = 89 // { int sys_clock_getres(clockid_t clock_id, struct timespec *tp); } + SYS_DUP2 = 90 // { int sys_dup2(int from, int to); } + SYS_NANOSLEEP = 91 // { int sys_nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } + SYS_FCNTL = 92 // { int sys_fcntl(int fd, int cmd, ... void *arg); } + SYS_ACCEPT4 = 93 // { int sys_accept4(int s, struct sockaddr *name, socklen_t *anamelen, int flags); } + SYS___THRSLEEP = 94 // { int sys___thrsleep(const volatile void *ident, clockid_t clock_id, const struct timespec *tp, void *lock, const int *abort); } + SYS_FSYNC = 95 // { int sys_fsync(int fd); } + SYS_SETPRIORITY = 96 // { int sys_setpriority(int which, id_t who, int prio); } + SYS_SOCKET = 97 // { int sys_socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int sys_connect(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_GETDENTS = 99 // { int sys_getdents(int fd, void *buf, size_t buflen); } + SYS_GETPRIORITY = 100 // { int sys_getpriority(int which, id_t who); } + SYS_PIPE2 = 101 // { int sys_pipe2(int *fdp, int flags); } + SYS_DUP3 = 102 // { int sys_dup3(int from, int to, int flags); } + SYS_SIGRETURN = 103 // { int sys_sigreturn(struct sigcontext *sigcntxp); } + SYS_BIND = 104 // { int sys_bind(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_SETSOCKOPT = 105 // { int sys_setsockopt(int s, int level, int name, const void *val, socklen_t valsize); } + SYS_LISTEN = 106 // { int sys_listen(int s, int backlog); } + SYS_CHFLAGSAT = 107 // { int sys_chflagsat(int fd, const char *path, u_int flags, int atflags); } + SYS_PLEDGE = 108 // { int sys_pledge(const char *promises, const char *execpromises); } + SYS_PPOLL = 109 // { int sys_ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *mask); } + SYS_PSELECT = 110 // { int sys_pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *mask); } + SYS_SIGSUSPEND = 111 // { int sys_sigsuspend(int mask); } + SYS_SENDSYSLOG = 112 // { int sys_sendsyslog(const char *buf, size_t nbyte, int flags); } + SYS_UNVEIL = 114 // { int sys_unveil(const char *path, const char *permissions); } + SYS___REALPATH = 115 // { int sys___realpath(const char *pathname, char *resolved); } + SYS_GETSOCKOPT = 118 // { int sys_getsockopt(int s, int level, int name, void *val, socklen_t *avalsize); } + SYS_THRKILL = 119 // { int sys_thrkill(pid_t tid, int signum, void *tcb); } + SYS_READV = 120 // { ssize_t sys_readv(int fd, const struct iovec *iovp, int iovcnt); } + SYS_WRITEV = 121 // { ssize_t sys_writev(int fd, const struct iovec *iovp, int iovcnt); } + SYS_KILL = 122 // { int sys_kill(int pid, int signum); } + SYS_FCHOWN = 123 // { int sys_fchown(int fd, uid_t uid, gid_t gid); } + SYS_FCHMOD = 124 // { int sys_fchmod(int fd, mode_t mode); } + SYS_SETREUID = 126 // { int sys_setreuid(uid_t ruid, uid_t euid); } + SYS_SETREGID = 127 // { int sys_setregid(gid_t rgid, gid_t egid); } + SYS_RENAME = 128 // { int sys_rename(const char *from, const char *to); } + SYS_FLOCK = 131 // { int sys_flock(int fd, int how); } + SYS_MKFIFO = 132 // { int sys_mkfifo(const char *path, mode_t mode); } + SYS_SENDTO = 133 // { ssize_t sys_sendto(int s, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen); } + SYS_SHUTDOWN = 134 // { int sys_shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int sys_socketpair(int domain, int type, int protocol, int *rsv); } + SYS_MKDIR = 136 // { int sys_mkdir(const char *path, mode_t mode); } + SYS_RMDIR = 137 // { int sys_rmdir(const char *path); } + SYS_ADJTIME = 140 // { int sys_adjtime(const struct timeval *delta, struct timeval *olddelta); } + SYS_GETLOGIN_R = 141 // { int sys_getlogin_r(char *namebuf, u_int namelen); } + SYS_SETSID = 147 // { int sys_setsid(void); } + SYS_QUOTACTL = 148 // { int sys_quotactl(const char *path, int cmd, int uid, char *arg); } + SYS_NFSSVC = 155 // { int sys_nfssvc(int flag, void *argp); } + SYS_GETFH = 161 // { int sys_getfh(const char *fname, fhandle_t *fhp); } + SYS___TMPFD = 164 // { int sys___tmpfd(int flags); } + SYS_SYSARCH = 165 // { int sys_sysarch(int op, void *parms); } + SYS_PREAD = 173 // { ssize_t sys_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); } + SYS_PWRITE = 174 // { ssize_t sys_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); } + SYS_SETGID = 181 // { int sys_setgid(gid_t gid); } + SYS_SETEGID = 182 // { int sys_setegid(gid_t egid); } + SYS_SETEUID = 183 // { int sys_seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { long sys_pathconf(const char *path, int name); } + SYS_FPATHCONF = 192 // { long sys_fpathconf(int fd, int name); } + SYS_SWAPCTL = 193 // { int sys_swapctl(int cmd, const void *arg, int misc); } + SYS_GETRLIMIT = 194 // { int sys_getrlimit(int which, struct rlimit *rlp); } + SYS_SETRLIMIT = 195 // { int sys_setrlimit(int which, const struct rlimit *rlp); } + SYS_MMAP = 197 // { void *sys_mmap(void *addr, size_t len, int prot, int flags, int fd, long pad, off_t pos); } + SYS_LSEEK = 199 // { off_t sys_lseek(int fd, int pad, off_t offset, int whence); } + SYS_TRUNCATE = 200 // { int sys_truncate(const char *path, int pad, off_t length); } + SYS_FTRUNCATE = 201 // { int sys_ftruncate(int fd, int pad, off_t length); } + SYS_SYSCTL = 202 // { int sys_sysctl(const int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_MLOCK = 203 // { int sys_mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int sys_munlock(const void *addr, size_t len); } + SYS_GETPGID = 207 // { pid_t sys_getpgid(pid_t pid); } + SYS_UTRACE = 209 // { int sys_utrace(const char *label, const void *addr, size_t len); } + SYS_SEMGET = 221 // { int sys_semget(key_t key, int nsems, int semflg); } + SYS_MSGGET = 225 // { int sys_msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int sys_msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } + SYS_MSGRCV = 227 // { int sys_msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_SHMAT = 228 // { void *sys_shmat(int shmid, const void *shmaddr, int shmflg); } + SYS_SHMDT = 230 // { int sys_shmdt(const void *shmaddr); } + SYS_MINHERIT = 250 // { int sys_minherit(void *addr, size_t len, int inherit); } + SYS_POLL = 252 // { int sys_poll(struct pollfd *fds, u_int nfds, int timeout); } + SYS_ISSETUGID = 253 // { int sys_issetugid(void); } + SYS_LCHOWN = 254 // { int sys_lchown(const char *path, uid_t uid, gid_t gid); } + SYS_GETSID = 255 // { pid_t sys_getsid(pid_t pid); } + SYS_MSYNC = 256 // { int sys_msync(void *addr, size_t len, int flags); } + SYS_PIPE = 263 // { int sys_pipe(int *fdp); } + SYS_FHOPEN = 264 // { int sys_fhopen(const fhandle_t *fhp, int flags); } + SYS_PREADV = 267 // { ssize_t sys_preadv(int fd, const struct iovec *iovp, int iovcnt, int pad, off_t offset); } + SYS_PWRITEV = 268 // { ssize_t sys_pwritev(int fd, const struct iovec *iovp, int iovcnt, int pad, off_t offset); } + SYS_KQUEUE = 269 // { int sys_kqueue(void); } + SYS_MLOCKALL = 271 // { int sys_mlockall(int flags); } + SYS_MUNLOCKALL = 272 // { int sys_munlockall(void); } + SYS_GETRESUID = 281 // { int sys_getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } + SYS_SETRESUID = 282 // { int sys_setresuid(uid_t ruid, uid_t euid, uid_t suid); } + SYS_GETRESGID = 283 // { int sys_getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } + SYS_SETRESGID = 284 // { int sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid); } + SYS_MQUERY = 286 // { void *sys_mquery(void *addr, size_t len, int prot, int flags, int fd, long pad, off_t pos); } + SYS_CLOSEFROM = 287 // { int sys_closefrom(int fd); } + SYS_SIGALTSTACK = 288 // { int sys_sigaltstack(const struct sigaltstack *nss, struct sigaltstack *oss); } + SYS_SHMGET = 289 // { int sys_shmget(key_t key, size_t size, int shmflg); } + SYS_SEMOP = 290 // { int sys_semop(int semid, struct sembuf *sops, size_t nsops); } + SYS_FHSTAT = 294 // { int sys_fhstat(const fhandle_t *fhp, struct stat *sb); } + SYS___SEMCTL = 295 // { int sys___semctl(int semid, int semnum, int cmd, union semun *arg); } + SYS_SHMCTL = 296 // { int sys_shmctl(int shmid, int cmd, struct shmid_ds *buf); } + SYS_MSGCTL = 297 // { int sys_msgctl(int msqid, int cmd, struct msqid_ds *buf); } + SYS_SCHED_YIELD = 298 // { int sys_sched_yield(void); } + SYS_GETTHRID = 299 // { pid_t sys_getthrid(void); } + SYS___THRWAKEUP = 301 // { int sys___thrwakeup(const volatile void *ident, int n); } + SYS___THREXIT = 302 // { void sys___threxit(pid_t *notdead); } + SYS___THRSIGDIVERT = 303 // { int sys___thrsigdivert(sigset_t sigmask, siginfo_t *info, const struct timespec *timeout); } + SYS___GETCWD = 304 // { int sys___getcwd(char *buf, size_t len); } + SYS_ADJFREQ = 305 // { int sys_adjfreq(const int64_t *freq, int64_t *oldfreq); } + SYS_SETRTABLE = 310 // { int sys_setrtable(int rtableid); } + SYS_GETRTABLE = 311 // { int sys_getrtable(void); } + SYS_FACCESSAT = 313 // { int sys_faccessat(int fd, const char *path, int amode, int flag); } + SYS_FCHMODAT = 314 // { int sys_fchmodat(int fd, const char *path, mode_t mode, int flag); } + SYS_FCHOWNAT = 315 // { int sys_fchownat(int fd, const char *path, uid_t uid, gid_t gid, int flag); } + SYS_LINKAT = 317 // { int sys_linkat(int fd1, const char *path1, int fd2, const char *path2, int flag); } + SYS_MKDIRAT = 318 // { int sys_mkdirat(int fd, const char *path, mode_t mode); } + SYS_MKFIFOAT = 319 // { int sys_mkfifoat(int fd, const char *path, mode_t mode); } + SYS_MKNODAT = 320 // { int sys_mknodat(int fd, const char *path, mode_t mode, dev_t dev); } + SYS_OPENAT = 321 // { int sys_openat(int fd, const char *path, int flags, ... mode_t mode); } + SYS_READLINKAT = 322 // { ssize_t sys_readlinkat(int fd, const char *path, char *buf, size_t count); } + SYS_RENAMEAT = 323 // { int sys_renameat(int fromfd, const char *from, int tofd, const char *to); } + SYS_SYMLINKAT = 324 // { int sys_symlinkat(const char *path, int fd, const char *link); } + SYS_UNLINKAT = 325 // { int sys_unlinkat(int fd, const char *path, int flag); } + SYS___SET_TCB = 329 // { void sys___set_tcb(void *tcb); } + SYS___GET_TCB = 330 // { void *sys___get_tcb(void); } +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go index 9f47b87c507..9ea0293aa8b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go @@ -92,9 +92,9 @@ type Statfs_t struct { Type uint32 Flags uint32 Fssubtype uint32 - Fstypename [16]int8 - Mntonname [1024]int8 - Mntfromname [1024]int8 + Fstypename [16]byte + Mntonname [1024]byte + Mntfromname [1024]byte Reserved [8]uint32 } @@ -145,6 +145,10 @@ type Dirent struct { _ [3]byte } +const ( + PathMax = 0x400 +) + type RawSockaddrInet4 struct { Len uint8 Family uint8 @@ -301,7 +305,6 @@ type IfMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Data IfData } @@ -344,7 +347,6 @@ type IfaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Metric int32 } @@ -365,7 +367,6 @@ type IfmaMsghdr2 struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Refcount int32 } @@ -374,7 +375,6 @@ type RtMsghdr struct { Version uint8 Type uint8 Index uint16 - _ [2]byte Flags int32 Addrs int32 Pid int32 @@ -396,7 +396,8 @@ type RtMetrics struct { Rtt uint32 Rttvar uint32 Pksent uint32 - Filler [4]uint32 + State uint32 + Filler [3]uint32 } const ( @@ -497,3 +498,8 @@ type Clockinfo struct { Stathz int32 Profhz int32 } + +type CtlInfo struct { + Id uint32 + Name [96]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 966798a8709..255e6cbb655 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -70,7 +70,6 @@ type Stat_t struct { Uid uint32 Gid uint32 Rdev int32 - _ [4]byte Atim Timespec Mtim Timespec Ctim Timespec @@ -97,10 +96,11 @@ type Statfs_t struct { Type uint32 Flags uint32 Fssubtype uint32 - Fstypename [16]int8 - Mntonname [1024]int8 - Mntfromname [1024]int8 - Reserved [8]uint32 + Fstypename [16]byte + Mntonname [1024]byte + Mntfromname [1024]byte + Flags_ext uint32 + Reserved [7]uint32 } type Flock_t struct { @@ -133,8 +133,7 @@ type Fbootstraptransfer_t struct { type Log2phys_t struct { Flags uint32 - _ [8]byte - _ [8]byte + _ [16]byte } type Fsid struct { @@ -151,6 +150,10 @@ type Dirent struct { _ [3]byte } +const ( + PathMax = 0x400 +) + type RawSockaddrInet4 struct { Len uint8 Family uint8 @@ -221,10 +224,8 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - _ [4]byte Iov *Iovec Iovlen int32 - _ [4]byte Control *byte Controllen uint32 Flags int32 @@ -309,7 +310,6 @@ type IfMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Data IfData } @@ -352,7 +352,6 @@ type IfaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Metric int32 } @@ -373,7 +372,6 @@ type IfmaMsghdr2 struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Refcount int32 } @@ -382,7 +380,6 @@ type RtMsghdr struct { Version uint8 Type uint8 Index uint16 - _ [2]byte Flags int32 Addrs int32 Pid int32 @@ -404,7 +401,8 @@ type RtMetrics struct { Rtt uint32 Rttvar uint32 Pksent uint32 - Filler [4]uint32 + State uint32 + Filler [3]uint32 } const ( @@ -427,7 +425,6 @@ type BpfStat struct { type BpfProgram struct { Len uint32 - _ [4]byte Insns *BpfInsn } @@ -452,7 +449,6 @@ type Termios struct { Cflag uint64 Lflag uint64 Cc [20]uint8 - _ [4]byte Ispeed uint64 Ospeed uint64 } @@ -507,3 +503,8 @@ type Clockinfo struct { Stathz int32 Profhz int32 } + +type CtlInfo struct { + Id uint32 + Name [96]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go index 4fe4c9cd73e..e21c828504f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go @@ -1,6 +1,5 @@ -// NOTE: cgo can't generate struct Stat_t and struct Statfs_t yet -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_darwin.go +// cgo -godefs types_darwin.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. // +build arm,darwin @@ -31,7 +30,7 @@ type Timeval struct { Usec int32 } -type Timeval32 [0]byte +type Timeval32 struct{} type Rusage struct { Utime Timeval @@ -93,9 +92,9 @@ type Statfs_t struct { Type uint32 Flags uint32 Fssubtype uint32 - Fstypename [16]int8 - Mntonname [1024]int8 - Mntfromname [1024]int8 + Fstypename [16]byte + Mntonname [1024]byte + Mntfromname [1024]byte Reserved [8]uint32 } @@ -146,6 +145,10 @@ type Dirent struct { _ [3]byte } +const ( + PathMax = 0x400 +) + type RawSockaddrInet4 struct { Len uint8 Family uint8 @@ -302,7 +305,6 @@ type IfMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Data IfData } @@ -345,7 +347,6 @@ type IfaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Metric int32 } @@ -366,7 +367,6 @@ type IfmaMsghdr2 struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Refcount int32 } @@ -375,7 +375,6 @@ type RtMsghdr struct { Version uint8 Type uint8 Index uint16 - _ [2]byte Flags int32 Addrs int32 Pid int32 @@ -397,7 +396,8 @@ type RtMetrics struct { Rtt uint32 Rttvar uint32 Pksent uint32 - Filler [4]uint32 + State uint32 + Filler [3]uint32 } const ( @@ -498,3 +498,8 @@ type Clockinfo struct { Stathz int32 Profhz int32 } + +type CtlInfo struct { + Id uint32 + Name [96]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 21999e4b0a2..5eff2c1c444 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -70,7 +70,6 @@ type Stat_t struct { Uid uint32 Gid uint32 Rdev int32 - _ [4]byte Atim Timespec Mtim Timespec Ctim Timespec @@ -97,10 +96,11 @@ type Statfs_t struct { Type uint32 Flags uint32 Fssubtype uint32 - Fstypename [16]int8 - Mntonname [1024]int8 - Mntfromname [1024]int8 - Reserved [8]uint32 + Fstypename [16]byte + Mntonname [1024]byte + Mntfromname [1024]byte + Flags_ext uint32 + Reserved [7]uint32 } type Flock_t struct { @@ -133,8 +133,7 @@ type Fbootstraptransfer_t struct { type Log2phys_t struct { Flags uint32 - _ [8]byte - _ [8]byte + _ [16]byte } type Fsid struct { @@ -151,6 +150,10 @@ type Dirent struct { _ [3]byte } +const ( + PathMax = 0x400 +) + type RawSockaddrInet4 struct { Len uint8 Family uint8 @@ -221,10 +224,8 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - _ [4]byte Iov *Iovec Iovlen int32 - _ [4]byte Control *byte Controllen uint32 Flags int32 @@ -309,7 +310,6 @@ type IfMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Data IfData } @@ -352,7 +352,6 @@ type IfaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Metric int32 } @@ -373,7 +372,6 @@ type IfmaMsghdr2 struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Refcount int32 } @@ -382,7 +380,6 @@ type RtMsghdr struct { Version uint8 Type uint8 Index uint16 - _ [2]byte Flags int32 Addrs int32 Pid int32 @@ -404,7 +401,8 @@ type RtMetrics struct { Rtt uint32 Rttvar uint32 Pksent uint32 - Filler [4]uint32 + State uint32 + Filler [3]uint32 } const ( @@ -427,7 +425,6 @@ type BpfStat struct { type BpfProgram struct { Len uint32 - _ [4]byte Insns *BpfInsn } @@ -452,7 +449,6 @@ type Termios struct { Cflag uint64 Lflag uint64 Cc [20]uint8 - _ [4]byte Ispeed uint64 Ospeed uint64 } @@ -507,3 +503,8 @@ type Clockinfo struct { Stathz int32 Profhz int32 } + +type CtlInfo struct { + Id uint32 + Name [96]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index 71ea1d6d23f..c4772df23bf 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -70,11 +70,11 @@ type Stat_t struct { Ctim Timespec Size int64 Blocks int64 - Blksize uint32 + _ uint32 Flags uint32 Gen uint32 Lspare int32 - Qspare1 int64 + Blksize int64 Qspare2 int64 } @@ -91,17 +91,15 @@ type Statfs_t struct { Owner uint32 Type int32 Flags int32 - _ [4]byte Syncwrites int64 Asyncwrites int64 - Fstypename [16]int8 - Mntonname [80]int8 + Fstypename [16]byte + Mntonname [80]byte Syncreads int64 Asyncreads int64 Spares1 int16 - Mntfromname [80]int8 + Mntfromname [80]byte Spares2 int16 - _ [4]byte Spare [2]int64 } @@ -202,10 +200,8 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - _ [4]byte Iov *Iovec Iovlen int32 - _ [4]byte Control *byte Controllen uint32 Flags int32 @@ -269,7 +265,7 @@ type FdSet struct { const ( SizeofIfMsghdr = 0xb0 SizeofIfData = 0xa0 - SizeofIfaMsghdr = 0x14 + SizeofIfaMsghdr = 0x18 SizeofIfmaMsghdr = 0x10 SizeofIfAnnounceMsghdr = 0x18 SizeofRtMsghdr = 0x98 @@ -280,10 +276,9 @@ type IfMsghdr struct { Msglen uint16 Version uint8 Type uint8 - Addrs int32 - Flags int32 Index uint16 - _ [2]byte + Flags int32 + Addrs int32 Data IfData } @@ -294,7 +289,6 @@ type IfData struct { Hdrlen uint8 Recvquota uint8 Xmitquota uint8 - _ [2]byte Mtu uint64 Metric uint64 Link_state uint64 @@ -316,24 +310,23 @@ type IfData struct { } type IfaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - _ [2]byte - Metric int32 + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Addrflags int32 + Metric int32 } type IfmaMsghdr struct { Msglen uint16 Version uint8 Type uint8 - Addrs int32 - Flags int32 Index uint16 - _ [2]byte + Flags int32 + Addrs int32 } type IfAnnounceMsghdr struct { @@ -350,7 +343,6 @@ type RtMsghdr struct { Version uint8 Type uint8 Index uint16 - _ [2]byte Flags int32 Addrs int32 Pid int32 @@ -374,7 +366,6 @@ type RtMetrics struct { Hopcount uint64 Mssopt uint16 Pad uint16 - _ [4]byte Msl uint64 Iwmaxsegs uint64 Iwcapsegs uint64 @@ -400,7 +391,6 @@ type BpfStat struct { type BpfProgram struct { Len uint32 - _ [4]byte Insns *BpfInsn } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 27d67ac8f57..773fc321b7f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -67,13 +67,30 @@ type Statx_t struct { Rdev_minor uint32 Dev_major uint32 Dev_minor uint32 - _ [14]uint64 + Mnt_id uint64 + _ uint64 + _ [12]uint64 } type Fsid struct { Val [2]int32 } +type FileCloneRange struct { + Src_fd int64 + Src_offset uint64 + Src_length uint64 + Dest_offset uint64 +} + +type FileDedupeRange struct { + Src_offset uint64 + Src_length uint64 + Dest_count uint16 + Reserved1 uint16 + Reserved2 uint32 +} + type FscryptPolicy struct { Version uint8 Contents_encryption_mode uint8 @@ -138,6 +155,48 @@ type FscryptGetKeyStatusArg struct { _ [13]uint32 } +type DmIoctl struct { + Version [3]uint32 + Data_size uint32 + Data_start uint32 + Target_count uint32 + Open_count int32 + Flags uint32 + Event_nr uint32 + _ uint32 + Dev uint64 + Name [128]byte + Uuid [129]byte + Data [7]byte +} + +type DmTargetSpec struct { + Sector_start uint64 + Length uint64 + Status int32 + Next uint32 + Target_type [16]byte +} + +type DmTargetDeps struct { + Count uint32 + _ uint32 +} + +type DmTargetVersions struct { + Next uint32 + Version [3]uint32 +} + +type DmTargetMsg struct { + Sector uint64 +} + +const ( + SizeofDmIoctl = 0x138 + SizeofDmTargetSpec = 0x28 +) + type KeyctlDHParams struct { Private int32 Prime int32 @@ -266,6 +325,15 @@ type RawSockaddrL2TPIP6 struct { Conn_id uint32 } +type RawSockaddrIUCV struct { + Family uint16 + Port uint16 + Addr uint32 + Nodeid [8]int8 + User_id [8]int8 + Name [8]int8 +} + type _Socklen uint32 type Linger struct { @@ -378,6 +446,7 @@ const ( SizeofSockaddrTIPC = 0x10 SizeofSockaddrL2TPIP = 0x10 SizeofSockaddrL2TPIP6 = 0x20 + SizeofSockaddrIUCV = 0x20 SizeofLinger = 0x8 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc @@ -485,7 +554,11 @@ const ( IFLA_NEW_IFINDEX = 0x31 IFLA_MIN_MTU = 0x32 IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x36 + IFLA_PROP_LIST = 0x34 + IFLA_ALT_IFNAME = 0x35 + IFLA_PERM_ADDRESS = 0x36 + IFLA_PROTO_DOWN_REASON = 0x37 + IFLA_MAX = 0x37 IFLA_INFO_KIND = 0x1 IFLA_INFO_DATA = 0x2 IFLA_INFO_XSTATS = 0x3 @@ -671,6 +744,8 @@ type InotifyEvent struct { const SizeofInotifyEvent = 0x10 +const SI_LOAD_SHIFT = 0x10 + type Utsname struct { Sysname [65]byte Nodename [65]byte @@ -696,6 +771,22 @@ const ( AT_EACCESS = 0x200 ) +type OpenHow struct { + Flags uint64 + Mode uint64 + Resolve uint64 +} + +const SizeofOpenHow = 0x18 + +const ( + RESOLVE_BENEATH = 0x8 + RESOLVE_IN_ROOT = 0x10 + RESOLVE_NO_MAGICLINKS = 0x2 + RESOLVE_NO_SYMLINKS = 0x4 + RESOLVE_NO_XDEV = 0x1 +) + type PollFd struct { Fd int32 Events int16 @@ -736,8 +827,6 @@ type SignalfdSiginfo struct { _ [28]uint8 } -const PERF_IOC_FLAG_GROUP = 0x1 - type Winsize struct { Row uint16 Col uint16 @@ -861,7 +950,10 @@ type PerfEventMmapPage struct { Time_offset uint64 Time_zero uint64 Size uint32 - _ [948]uint8 + _ uint32 + Time_cycles uint64 + Time_mask uint64 + _ [928]uint8 Data_head uint64 Data_tail uint64 Data_offset uint64 @@ -903,13 +995,13 @@ const ( ) const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + PERF_TYPE_MAX = 0x6 PERF_COUNT_HW_CPU_CYCLES = 0x0 PERF_COUNT_HW_INSTRUCTIONS = 0x1 PERF_COUNT_HW_CACHE_REFERENCES = 0x2 @@ -920,99 +1012,163 @@ const ( PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + PERF_COUNT_HW_MAX = 0xa + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + PERF_COUNT_HW_CACHE_MAX = 0x7 + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + PERF_COUNT_HW_CACHE_OP_MAX = 0x3 + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + PERF_COUNT_HW_CACHE_RESULT_MAX = 0x2 + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + PERF_COUNT_SW_BPF_OUTPUT = 0xa + PERF_COUNT_SW_MAX = 0xb + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + PERF_SAMPLE_REGS_USER = 0x1000 + PERF_SAMPLE_STACK_USER = 0x2000 + PERF_SAMPLE_WEIGHT = 0x4000 + PERF_SAMPLE_DATA_SRC = 0x8000 + PERF_SAMPLE_IDENTIFIER = 0x10000 + PERF_SAMPLE_TRANSACTION = 0x20000 + PERF_SAMPLE_REGS_INTR = 0x40000 + PERF_SAMPLE_PHYS_ADDR = 0x80000 + PERF_SAMPLE_AUX = 0x100000 + PERF_SAMPLE_CGROUP = 0x200000 + PERF_SAMPLE_MAX = 0x400000 + PERF_SAMPLE_BRANCH_USER_SHIFT = 0x0 + PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 0x1 + PERF_SAMPLE_BRANCH_HV_SHIFT = 0x2 + PERF_SAMPLE_BRANCH_ANY_SHIFT = 0x3 + PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 0x4 + PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 0x5 + PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 0x6 + PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 0x7 + PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 0x8 + PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 0x9 + PERF_SAMPLE_BRANCH_COND_SHIFT = 0xa + PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 0xb + PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 0xc + PERF_SAMPLE_BRANCH_CALL_SHIFT = 0xd + PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 0xe + PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 0xf + PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 0x10 + PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 0x11 + PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x12 + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 + PERF_SAMPLE_BRANCH_IN_TX = 0x100 + PERF_SAMPLE_BRANCH_NO_TX = 0x200 + PERF_SAMPLE_BRANCH_COND = 0x400 + PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 + PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 + PERF_SAMPLE_BRANCH_CALL = 0x2000 + PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 + PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 + PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 + PERF_SAMPLE_BRANCH_HW_INDEX = 0x20000 + PERF_SAMPLE_BRANCH_MAX = 0x40000 + PERF_BR_UNKNOWN = 0x0 + PERF_BR_COND = 0x1 + PERF_BR_UNCOND = 0x2 + PERF_BR_IND = 0x3 + PERF_BR_CALL = 0x4 + PERF_BR_IND_CALL = 0x5 + PERF_BR_RET = 0x6 + PERF_BR_SYSCALL = 0x7 + PERF_BR_SYSRET = 0x8 + PERF_BR_COND_CALL = 0x9 + PERF_BR_COND_RET = 0xa + PERF_BR_MAX = 0xb + PERF_SAMPLE_REGS_ABI_NONE = 0x0 + PERF_SAMPLE_REGS_ABI_32 = 0x1 + PERF_SAMPLE_REGS_ABI_64 = 0x2 + PERF_TXN_ELISION = 0x1 + PERF_TXN_TRANSACTION = 0x2 + PERF_TXN_SYNC = 0x4 + PERF_TXN_ASYNC = 0x8 + PERF_TXN_RETRY = 0x10 + PERF_TXN_CONFLICT = 0x20 + PERF_TXN_CAPACITY_WRITE = 0x40 + PERF_TXN_CAPACITY_READ = 0x80 + PERF_TXN_MAX = 0x100 + PERF_TXN_ABORT_MASK = -0x100000000 + PERF_TXN_ABORT_SHIFT = 0x20 + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + PERF_FORMAT_MAX = 0x10 + PERF_IOC_FLAG_GROUP = 0x1 + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP2 = 0xa + PERF_RECORD_AUX = 0xb + PERF_RECORD_ITRACE_START = 0xc + PERF_RECORD_LOST_SAMPLES = 0xd + PERF_RECORD_SWITCH = 0xe + PERF_RECORD_SWITCH_CPU_WIDE = 0xf + PERF_RECORD_NAMESPACES = 0x10 + PERF_RECORD_KSYMBOL = 0x11 + PERF_RECORD_BPF_EVENT = 0x12 + PERF_RECORD_CGROUP = 0x13 + PERF_RECORD_TEXT_POKE = 0x14 + PERF_RECORD_MAX = 0x15 + PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0x0 + PERF_RECORD_KSYMBOL_TYPE_BPF = 0x1 + PERF_RECORD_KSYMBOL_TYPE_OOL = 0x2 + PERF_RECORD_KSYMBOL_TYPE_MAX = 0x3 + PERF_BPF_EVENT_UNKNOWN = 0x0 + PERF_BPF_EVENT_PROG_LOAD = 0x1 + PERF_BPF_EVENT_PROG_UNLOAD = 0x2 + PERF_BPF_EVENT_MAX = 0x3 + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + PERF_CONTEXT_MAX = -0xfff ) type TCPMD5Sig struct { @@ -1318,7 +1474,7 @@ const ( NFT_MSG_DELOBJ = 0x14 NFT_MSG_GETOBJ_RESET = 0x15 NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 + NFTA_LIST_UNSPEC = 0x0 NFTA_LIST_ELEM = 0x1 NFTA_HOOK_UNSPEC = 0x0 NFTA_HOOK_HOOKNUM = 0x1 @@ -1689,6 +1845,21 @@ const ( NFT_NG_RANDOM = 0x1 ) +const ( + NFTA_TARGET_UNSPEC = 0x0 + NFTA_TARGET_NAME = 0x1 + NFTA_TARGET_REV = 0x2 + NFTA_TARGET_INFO = 0x3 + NFTA_MATCH_UNSPEC = 0x0 + NFTA_MATCH_NAME = 0x1 + NFTA_MATCH_REV = 0x2 + NFTA_MATCH_INFO = 0x3 + NFTA_COMPAT_UNSPEC = 0x0 + NFTA_COMPAT_NAME = 0x1 + NFTA_COMPAT_REV = 0x2 + NFTA_COMPAT_TYPE = 0x3 +) + type RTCTime struct { Sec int32 Min int32 @@ -1742,9 +1913,12 @@ type XDPMmapOffsets struct { } type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 + Rx_dropped uint64 + Rx_invalid_descs uint64 + Tx_invalid_descs uint64 + Rx_ring_full uint64 + Rx_fill_ring_empty_descs uint64 + Tx_ring_empty_descs uint64 } type XDPDesc struct { @@ -1912,6 +2086,10 @@ const ( BPF_MAP_DELETE_BATCH = 0x1b BPF_LINK_CREATE = 0x1c BPF_LINK_UPDATE = 0x1d + BPF_LINK_GET_FD_BY_ID = 0x1e + BPF_LINK_GET_NEXT_ID = 0x1f + BPF_ENABLE_STATS = 0x20 + BPF_ITER_CREATE = 0x21 BPF_MAP_TYPE_UNSPEC = 0x0 BPF_MAP_TYPE_HASH = 0x1 BPF_MAP_TYPE_ARRAY = 0x2 @@ -1939,6 +2117,7 @@ const ( BPF_MAP_TYPE_SK_STORAGE = 0x18 BPF_MAP_TYPE_DEVMAP_HASH = 0x19 BPF_MAP_TYPE_STRUCT_OPS = 0x1a + BPF_MAP_TYPE_RINGBUF = 0x1b BPF_PROG_TYPE_UNSPEC = 0x0 BPF_PROG_TYPE_SOCKET_FILTER = 0x1 BPF_PROG_TYPE_KPROBE = 0x2 @@ -1997,6 +2176,18 @@ const ( BPF_TRACE_FEXIT = 0x19 BPF_MODIFY_RETURN = 0x1a BPF_LSM_MAC = 0x1b + BPF_TRACE_ITER = 0x1c + BPF_CGROUP_INET4_GETPEERNAME = 0x1d + BPF_CGROUP_INET6_GETPEERNAME = 0x1e + BPF_CGROUP_INET4_GETSOCKNAME = 0x1f + BPF_CGROUP_INET6_GETSOCKNAME = 0x20 + BPF_XDP_DEVMAP = 0x21 + BPF_LINK_TYPE_UNSPEC = 0x0 + BPF_LINK_TYPE_RAW_TRACEPOINT = 0x1 + BPF_LINK_TYPE_TRACING = 0x2 + BPF_LINK_TYPE_CGROUP = 0x3 + BPF_LINK_TYPE_ITER = 0x4 + BPF_LINK_TYPE_NETNS = 0x5 BPF_ANY = 0x0 BPF_NOEXIST = 0x1 BPF_EXIST = 0x2 @@ -2012,6 +2203,7 @@ const ( BPF_F_WRONLY_PROG = 0x100 BPF_F_CLONE = 0x200 BPF_F_MMAPABLE = 0x400 + BPF_STATS_RUN_TIME = 0x0 BPF_STACK_BUILD_ID_EMPTY = 0x0 BPF_STACK_BUILD_ID_VALID = 0x1 BPF_STACK_BUILD_ID_IP = 0x2 @@ -2035,16 +2227,30 @@ const ( BPF_F_CURRENT_CPU = 0xffffffff BPF_F_CTXLEN_MASK = 0xfffff00000000 BPF_F_CURRENT_NETNS = -0x1 + BPF_CSUM_LEVEL_QUERY = 0x0 + BPF_CSUM_LEVEL_INC = 0x1 + BPF_CSUM_LEVEL_DEC = 0x2 + BPF_CSUM_LEVEL_RESET = 0x3 BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_NO_CSUM_RESET = 0x20 BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_F_GET_BRANCH_RECORDS_SIZE = 0x1 + BPF_RB_NO_WAKEUP = 0x1 + BPF_RB_FORCE_WAKEUP = 0x2 + BPF_RB_AVAIL_DATA = 0x0 + BPF_RB_RING_SIZE = 0x1 + BPF_RB_CONS_POS = 0x2 + BPF_RB_PROD_POS = 0x3 + BPF_RINGBUF_BUSY_BIT = 0x80000000 + BPF_RINGBUF_DISCARD_BIT = 0x40000000 + BPF_RINGBUF_HDR_SZ = 0x8 BPF_ADJ_ROOM_NET = 0x0 BPF_ADJ_ROOM_MAC = 0x1 BPF_HDR_START_MAC = 0x0 @@ -2359,7 +2565,7 @@ const ( DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE = 0x3c DEVLINK_ATTR_PAD = 0x3d DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 0x3e - DEVLINK_ATTR_MAX = 0x90 + DEVLINK_ATTR_MAX = 0x94 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1 DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0 @@ -2417,3 +2623,18 @@ const ( NHA_GROUPS = 0x9 NHA_MASTER = 0xa ) + +const ( + CAN_RAW_FILTER = 0x1 + CAN_RAW_ERR_FILTER = 0x2 + CAN_RAW_LOOPBACK = 0x3 + CAN_RAW_RECV_OWN_MSGS = 0x4 + CAN_RAW_FD_FRAMES = 0x5 + CAN_RAW_JOIN_FILTERS = 0x6 +) + +type WatchdogInfo struct { + Options uint32 + Version uint32 + Identity [32]uint8 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 761b67c8643..73509d896a2 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -117,6 +117,11 @@ type Flock_t struct { Pid int32 } +type DmNameList struct { + Dev uint64 + Next uint32 +} + const ( FADV_DONTNEED = 0x4 FADV_NOREUSE = 0x5 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 201fb3482de..45eb8738b0d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -117,6 +117,13 @@ type Flock_t struct { _ [4]byte } +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + const ( FADV_DONTNEED = 0x4 FADV_NOREUSE = 0x5 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 8051b56108f..8f6b453aba5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -121,6 +121,13 @@ type Flock_t struct { _ [4]byte } +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + const ( FADV_DONTNEED = 0x4 FADV_NOREUSE = 0x5 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index a936f21692f..b1e0c24f192 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -118,6 +118,13 @@ type Flock_t struct { _ [4]byte } +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + const ( FADV_DONTNEED = 0x4 FADV_NOREUSE = 0x5 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index aaca03dd7db..fb802c3ec9b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -120,6 +120,13 @@ type Flock_t struct { _ [4]byte } +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + const ( FADV_DONTNEED = 0x4 FADV_NOREUSE = 0x5 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 2e7f3b8ca48..30abcf3bb8e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -118,6 +118,13 @@ type Flock_t struct { _ [4]byte } +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + const ( FADV_DONTNEED = 0x4 FADV_NOREUSE = 0x5 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 16add5a2575..99761aa9a78 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -118,6 +118,13 @@ type Flock_t struct { _ [4]byte } +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + const ( FADV_DONTNEED = 0x4 FADV_NOREUSE = 0x5 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 4ed2c8e54c4..293690348f6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -120,6 +120,13 @@ type Flock_t struct { _ [4]byte } +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + const ( FADV_DONTNEED = 0x4 FADV_NOREUSE = 0x5 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 74151909976..0ca856e559b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -119,6 +119,13 @@ type Flock_t struct { _ [4]byte } +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + const ( FADV_DONTNEED = 0x4 FADV_NOREUSE = 0x5 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 046c2debd4f..f50f6482eee 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -119,6 +119,13 @@ type Flock_t struct { _ [4]byte } +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + const ( FADV_DONTNEED = 0x4 FADV_NOREUSE = 0x5 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 0f2f61a6ad1..4d3ac8d7b40 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -118,6 +118,13 @@ type Flock_t struct { _ [4]byte } +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + const ( FADV_DONTNEED = 0x4 FADV_NOREUSE = 0x5 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index cca1b6be270..349f483a80e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -117,6 +117,13 @@ type Flock_t struct { _ [4]byte } +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + const ( FADV_DONTNEED = 0x6 FADV_NOREUSE = 0x7 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 33a73bf183b..80c73beaa15 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -121,6 +121,13 @@ type Flock_t struct { _ [2]byte } +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + const ( FADV_DONTNEED = 0x4 FADV_NOREUSE = 0x5 diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go new file mode 100644 index 00000000000..992a1f8c018 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -0,0 +1,565 @@ +// cgo -godefs -- -fsigned-char types_openbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build mips64,openbsd + +package unix + +const ( + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Mode uint32 + Dev int32 + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev int32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + _ Timespec +} + +type Statfs_t struct { + F_flags uint32 + F_bsize uint32 + F_iosize uint32 + F_blocks uint64 + F_bfree uint64 + F_bavail int64 + F_files uint64 + F_ffree uint64 + F_favail int64 + F_syncwrites uint64 + F_syncreads uint64 + F_asyncwrites uint64 + F_asyncreads uint64 + F_fsid Fsid + F_namemax uint32 + F_owner uint32 + F_ctime uint64 + F_fstypename [16]int8 + F_mntonname [90]int8 + F_mntfromname [90]int8 + F_mntfromspec [90]int8 + _ [2]byte + Mount_info [160]byte +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Namlen uint8 + _ [4]uint8 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +const ( + PathMax = 0x400 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [24]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x20 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint64 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte +} + +type FdSet struct { + Bits [32]uint32 +} + +const ( + SizeofIfMsghdr = 0xa8 + SizeofIfData = 0x90 + SizeofIfaMsghdr = 0x18 + SizeofIfAnnounceMsghdr = 0x1a + SizeofRtMsghdr = 0x60 + SizeofRtMetrics = 0x38 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Xflags int32 + Data IfData +} + +type IfData struct { + Type uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Mtu uint32 + Metric uint32 + Rdomain uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Oqdrops uint64 + Noproto uint64 + Capabilities uint32 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Metric int32 +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + What uint16 + Name [16]int8 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Priority uint8 + Mpls uint8 + Addrs int32 + Flags int32 + Fmask int32 + Pid int32 + Seq int32 + Errno int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Pksent uint64 + Expire int64 + Locks uint32 + Mtu uint32 + Refcnt uint32 + Hopcount uint32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pad uint32 +} + +type Mclpool struct{} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [2]byte +} + +type BpfTimeval struct { + Sec uint32 + Usec uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x64 + AT_SYMLINK_FOLLOW = 0x4 + AT_SYMLINK_NOFOLLOW = 0x2 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type Sigset_t uint32 + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} + +const SizeofUvmexp = 0x158 + +type Uvmexp struct { + Pagesize int32 + Pagemask int32 + Pageshift int32 + Npages int32 + Free int32 + Active int32 + Inactive int32 + Paging int32 + Wired int32 + Zeropages int32 + Reserve_pagedaemon int32 + Reserve_kernel int32 + Unused01 int32 + Vnodepages int32 + Vtextpages int32 + Freemin int32 + Freetarg int32 + Inactarg int32 + Wiredmax int32 + Anonmin int32 + Vtextmin int32 + Vnodemin int32 + Anonminpct int32 + Vtextminpct int32 + Vnodeminpct int32 + Nswapdev int32 + Swpages int32 + Swpginuse int32 + Swpgonly int32 + Nswget int32 + Nanon int32 + Unused05 int32 + Unused06 int32 + Faults int32 + Traps int32 + Intrs int32 + Swtch int32 + Softs int32 + Syscalls int32 + Pageins int32 + Unused07 int32 + Unused08 int32 + Pgswapin int32 + Pgswapout int32 + Forks int32 + Forks_ppwait int32 + Forks_sharevm int32 + Pga_zerohit int32 + Pga_zeromiss int32 + Unused09 int32 + Fltnoram int32 + Fltnoanon int32 + Fltnoamap int32 + Fltpgwait int32 + Fltpgrele int32 + Fltrelck int32 + Fltrelckok int32 + Fltanget int32 + Fltanretry int32 + Fltamcopy int32 + Fltnamap int32 + Fltnomap int32 + Fltlget int32 + Fltget int32 + Flt_anon int32 + Flt_acow int32 + Flt_obj int32 + Flt_prcopy int32 + Flt_przero int32 + Pdwoke int32 + Pdrevs int32 + Pdswout int32 + Pdfreed int32 + Pdscans int32 + Pdanscan int32 + Pdobscan int32 + Pdreact int32 + Pdbusy int32 + Pdpageouts int32 + Pdpending int32 + Pddeact int32 + Unused11 int32 + Unused12 int32 + Unused13 int32 + Fpswtch int32 + Kmapent int32 +} + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Tickadj int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index 23ed9fe51d4..db817f3ba82 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -88,7 +88,6 @@ type Stat_t struct { Mtim Timespec Ctim Timespec Blksize int32 - _ [4]byte Blocks int64 Fstype [16]int8 } @@ -96,7 +95,6 @@ type Stat_t struct { type Flock_t struct { Type int16 Whence int16 - _ [4]byte Start int64 Len int64 Sysid int32 @@ -138,12 +136,12 @@ type RawSockaddrInet4 struct { } type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 - X__sin6_src_id uint32 + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + _ uint32 } type RawSockaddrUnix struct { @@ -196,10 +194,8 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - _ [4]byte Iov *Iovec Iovlen int32 - _ [4]byte Accrights *int8 Accrightslen int32 _ [4]byte @@ -228,7 +224,7 @@ type IPv6MTUInfo struct { } type ICMPv6Filter struct { - X__icmp6_filt [8]uint32 + Filt [8]uint32 } const ( @@ -291,7 +287,6 @@ type IfMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Data IfData } @@ -299,7 +294,6 @@ type IfData struct { Type uint8 Addrlen uint8 Hdrlen uint8 - _ [1]byte Mtu uint32 Metric uint32 Baudrate uint32 @@ -324,7 +318,6 @@ type IfaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Metric int32 } @@ -333,7 +326,6 @@ type RtMsghdr struct { Version uint8 Type uint8 Index uint16 - _ [2]byte Flags int32 Addrs int32 Pid int32 @@ -371,15 +363,14 @@ type BpfVersion struct { } type BpfStat struct { - Recv uint64 - Drop uint64 - Capt uint64 - Padding [13]uint64 + Recv uint64 + Drop uint64 + Capt uint64 + _ [13]uint64 } type BpfProgram struct { Len uint32 - _ [4]byte Insns *BpfInsn } diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go index 847e00bc990..f54ff90aacd 100644 --- a/vendor/golang.org/x/sys/windows/service.go +++ b/vendor/golang.org/x/sys/windows/service.go @@ -65,6 +65,7 @@ const ( SERVICE_ACCEPT_HARDWAREPROFILECHANGE = 32 SERVICE_ACCEPT_POWEREVENT = 64 SERVICE_ACCEPT_SESSIONCHANGE = 128 + SERVICE_ACCEPT_PRESHUTDOWN = 256 SERVICE_CONTROL_STOP = 1 SERVICE_CONTROL_PAUSE = 2 @@ -80,6 +81,7 @@ const ( SERVICE_CONTROL_HARDWAREPROFILECHANGE = 12 SERVICE_CONTROL_POWEREVENT = 13 SERVICE_CONTROL_SESSIONCHANGE = 14 + SERVICE_CONTROL_PRESHUTDOWN = 15 SERVICE_ACTIVE = 1 SERVICE_INACTIVE = 2 diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 62cf70e9f67..bbd075dfec2 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -270,9 +270,11 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegEnumKeyExW //sys RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegQueryValueExW //sys GetCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId +//sys ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) = kernel32.ProcessIdToSessionId //sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo +//sys SetConsoleCursorPosition(console Handle, position Coord) (err error) = kernel32.SetConsoleCursorPosition //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot @@ -303,6 +305,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys ResumeThread(thread Handle) (ret uint32, err error) [failretval==0xffffffff] = kernel32.ResumeThread //sys SetPriorityClass(process Handle, priorityClass uint32) (err error) = kernel32.SetPriorityClass //sys GetPriorityClass(process Handle) (ret uint32, err error) = kernel32.GetPriorityClass +//sys QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) = kernel32.QueryInformationJobObject //sys SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) //sys GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) //sys GetProcessId(process Handle) (id uint32, err error) @@ -387,11 +390,7 @@ func GetProcAddressByOrdinal(module Handle, ordinal uintptr) (proc uintptr, err r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), ordinal, 0) proc = uintptr(r0) if proc == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } @@ -1088,11 +1087,7 @@ func WSASendMsg(fd Handle, msg *WSAMsg, flags uint32, bytesSent *uint32, overlap } r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.sendAddr, 6, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(flags), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return err } @@ -1104,11 +1099,7 @@ func WSARecvMsg(fd Handle, msg *WSAMsg, bytesReceived *uint32, overlapped *Overl } r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.recvAddr, 5, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(bytesReceived)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0) if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return err } diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 809fff0b497..da1652e74b0 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1584,18 +1584,6 @@ const ( JOB_OBJECT_LIMIT_WORKINGSET = 0x00000001 ) -type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { - PerProcessUserTimeLimit int64 - PerJobUserTimeLimit int64 - LimitFlags uint32 - MinimumWorkingSetSize uintptr - MaximumWorkingSetSize uintptr - ActiveProcessLimit uint32 - Affinity uintptr - PriorityClass uint32 - SchedulingClass uint32 -} - type IO_COUNTERS struct { ReadOperationCount uint64 WriteOperationCount uint64 diff --git a/vendor/golang.org/x/sys/windows/types_windows_386.go b/vendor/golang.org/x/sys/windows/types_windows_386.go index fe0ddd03160..8bce3e2fc1b 100644 --- a/vendor/golang.org/x/sys/windows/types_windows_386.go +++ b/vendor/golang.org/x/sys/windows/types_windows_386.go @@ -20,3 +20,16 @@ type Servent struct { Port uint16 Proto *byte } + +type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { + PerProcessUserTimeLimit int64 + PerJobUserTimeLimit int64 + LimitFlags uint32 + MinimumWorkingSetSize uintptr + MaximumWorkingSetSize uintptr + ActiveProcessLimit uint32 + Affinity uintptr + PriorityClass uint32 + SchedulingClass uint32 + _ uint32 // pad to 8 byte boundary +} diff --git a/vendor/golang.org/x/sys/windows/types_windows_amd64.go b/vendor/golang.org/x/sys/windows/types_windows_amd64.go index 7e154c2df2d..fdddc0c70ab 100644 --- a/vendor/golang.org/x/sys/windows/types_windows_amd64.go +++ b/vendor/golang.org/x/sys/windows/types_windows_amd64.go @@ -20,3 +20,15 @@ type Servent struct { Proto *byte Port uint16 } + +type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { + PerProcessUserTimeLimit int64 + PerJobUserTimeLimit int64 + LimitFlags uint32 + MinimumWorkingSetSize uintptr + MaximumWorkingSetSize uintptr + ActiveProcessLimit uint32 + Affinity uintptr + PriorityClass uint32 + SchedulingClass uint32 +} diff --git a/vendor/golang.org/x/sys/windows/types_windows_arm.go b/vendor/golang.org/x/sys/windows/types_windows_arm.go index 74571e3600b..321872c3e04 100644 --- a/vendor/golang.org/x/sys/windows/types_windows_arm.go +++ b/vendor/golang.org/x/sys/windows/types_windows_arm.go @@ -20,3 +20,16 @@ type Servent struct { Port uint16 Proto *byte } + +type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { + PerProcessUserTimeLimit int64 + PerJobUserTimeLimit int64 + LimitFlags uint32 + MinimumWorkingSetSize uintptr + MaximumWorkingSetSize uintptr + ActiveProcessLimit uint32 + Affinity uintptr + PriorityClass uint32 + SchedulingClass uint32 + _ uint32 // pad to 8 byte boundary +} diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 8a562feed0d..a25f09676de 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -24,7 +24,7 @@ var ( func errnoErr(e syscall.Errno) error { switch e { case 0: - return nil + return syscall.EINVAL case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } @@ -36,2065 +36,1595 @@ func errnoErr(e syscall.Errno) error { var ( modadvapi32 = NewLazySystemDLL("advapi32.dll") + modcrypt32 = NewLazySystemDLL("crypt32.dll") + moddnsapi = NewLazySystemDLL("dnsapi.dll") + modiphlpapi = NewLazySystemDLL("iphlpapi.dll") modkernel32 = NewLazySystemDLL("kernel32.dll") - modshell32 = NewLazySystemDLL("shell32.dll") - moduserenv = NewLazySystemDLL("userenv.dll") modmswsock = NewLazySystemDLL("mswsock.dll") - modcrypt32 = NewLazySystemDLL("crypt32.dll") - moduser32 = NewLazySystemDLL("user32.dll") - modole32 = NewLazySystemDLL("ole32.dll") + modnetapi32 = NewLazySystemDLL("netapi32.dll") modntdll = NewLazySystemDLL("ntdll.dll") + modole32 = NewLazySystemDLL("ole32.dll") modpsapi = NewLazySystemDLL("psapi.dll") - modws2_32 = NewLazySystemDLL("ws2_32.dll") - moddnsapi = NewLazySystemDLL("dnsapi.dll") - modiphlpapi = NewLazySystemDLL("iphlpapi.dll") modsecur32 = NewLazySystemDLL("secur32.dll") - modnetapi32 = NewLazySystemDLL("netapi32.dll") + modshell32 = NewLazySystemDLL("shell32.dll") + moduser32 = NewLazySystemDLL("user32.dll") + moduserenv = NewLazySystemDLL("userenv.dll") + modws2_32 = NewLazySystemDLL("ws2_32.dll") modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") - procRegisterEventSourceW = modadvapi32.NewProc("RegisterEventSourceW") - procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") - procReportEventW = modadvapi32.NewProc("ReportEventW") - procOpenSCManagerW = modadvapi32.NewProc("OpenSCManagerW") + procAdjustTokenGroups = modadvapi32.NewProc("AdjustTokenGroups") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") + procBuildSecurityDescriptorW = modadvapi32.NewProc("BuildSecurityDescriptorW") + procChangeServiceConfig2W = modadvapi32.NewProc("ChangeServiceConfig2W") + procChangeServiceConfigW = modadvapi32.NewProc("ChangeServiceConfigW") + procCheckTokenMembership = modadvapi32.NewProc("CheckTokenMembership") procCloseServiceHandle = modadvapi32.NewProc("CloseServiceHandle") + procControlService = modadvapi32.NewProc("ControlService") + procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") + procCopySid = modadvapi32.NewProc("CopySid") procCreateServiceW = modadvapi32.NewProc("CreateServiceW") - procOpenServiceW = modadvapi32.NewProc("OpenServiceW") + procCreateWellKnownSid = modadvapi32.NewProc("CreateWellKnownSid") + procCryptAcquireContextW = modadvapi32.NewProc("CryptAcquireContextW") + procCryptGenRandom = modadvapi32.NewProc("CryptGenRandom") + procCryptReleaseContext = modadvapi32.NewProc("CryptReleaseContext") procDeleteService = modadvapi32.NewProc("DeleteService") - procStartServiceW = modadvapi32.NewProc("StartServiceW") - procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") - procQueryServiceLockStatusW = modadvapi32.NewProc("QueryServiceLockStatusW") - procControlService = modadvapi32.NewProc("ControlService") - procStartServiceCtrlDispatcherW = modadvapi32.NewProc("StartServiceCtrlDispatcherW") - procSetServiceStatus = modadvapi32.NewProc("SetServiceStatus") - procChangeServiceConfigW = modadvapi32.NewProc("ChangeServiceConfigW") - procQueryServiceConfigW = modadvapi32.NewProc("QueryServiceConfigW") - procChangeServiceConfig2W = modadvapi32.NewProc("ChangeServiceConfig2W") - procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") + procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") + procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") - procQueryServiceStatusEx = modadvapi32.NewProc("QueryServiceStatusEx") + procEqualSid = modadvapi32.NewProc("EqualSid") + procFreeSid = modadvapi32.NewProc("FreeSid") + procGetLengthSid = modadvapi32.NewProc("GetLengthSid") + procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") + procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") + procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") + procGetSecurityDescriptorGroup = modadvapi32.NewProc("GetSecurityDescriptorGroup") + procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") + procGetSecurityDescriptorOwner = modadvapi32.NewProc("GetSecurityDescriptorOwner") + procGetSecurityDescriptorRMControl = modadvapi32.NewProc("GetSecurityDescriptorRMControl") + procGetSecurityDescriptorSacl = modadvapi32.NewProc("GetSecurityDescriptorSacl") + procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo") + procGetSidIdentifierAuthority = modadvapi32.NewProc("GetSidIdentifierAuthority") + procGetSidSubAuthority = modadvapi32.NewProc("GetSidSubAuthority") + procGetSidSubAuthorityCount = modadvapi32.NewProc("GetSidSubAuthorityCount") + procGetTokenInformation = modadvapi32.NewProc("GetTokenInformation") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procInitializeSecurityDescriptor = modadvapi32.NewProc("InitializeSecurityDescriptor") + procInitiateSystemShutdownExW = modadvapi32.NewProc("InitiateSystemShutdownExW") + procIsValidSecurityDescriptor = modadvapi32.NewProc("IsValidSecurityDescriptor") + procIsValidSid = modadvapi32.NewProc("IsValidSid") + procIsWellKnownSid = modadvapi32.NewProc("IsWellKnownSid") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procMakeAbsoluteSD = modadvapi32.NewProc("MakeAbsoluteSD") + procMakeSelfRelativeSD = modadvapi32.NewProc("MakeSelfRelativeSD") procNotifyServiceStatusChangeW = modadvapi32.NewProc("NotifyServiceStatusChangeW") - procGetLastError = modkernel32.NewProc("GetLastError") - procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") - procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") - procFreeLibrary = modkernel32.NewProc("FreeLibrary") - procGetProcAddress = modkernel32.NewProc("GetProcAddress") - procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") - procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") - procGetVersion = modkernel32.NewProc("GetVersion") - procFormatMessageW = modkernel32.NewProc("FormatMessageW") - procExitProcess = modkernel32.NewProc("ExitProcess") - procIsWow64Process = modkernel32.NewProc("IsWow64Process") - procCreateFileW = modkernel32.NewProc("CreateFileW") - procReadFile = modkernel32.NewProc("ReadFile") - procWriteFile = modkernel32.NewProc("WriteFile") - procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") - procSetFilePointer = modkernel32.NewProc("SetFilePointer") - procCloseHandle = modkernel32.NewProc("CloseHandle") - procGetStdHandle = modkernel32.NewProc("GetStdHandle") - procSetStdHandle = modkernel32.NewProc("SetStdHandle") - procFindFirstFileW = modkernel32.NewProc("FindFirstFileW") - procFindNextFileW = modkernel32.NewProc("FindNextFileW") - procFindClose = modkernel32.NewProc("FindClose") - procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") - procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") - procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") - procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") - procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") - procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") - procDeleteFileW = modkernel32.NewProc("DeleteFileW") - procMoveFileW = modkernel32.NewProc("MoveFileW") - procMoveFileExW = modkernel32.NewProc("MoveFileExW") - procLockFileEx = modkernel32.NewProc("LockFileEx") - procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") - procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") - procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") - procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") - procGetSystemTimeAsFileTime = modkernel32.NewProc("GetSystemTimeAsFileTime") - procGetSystemTimePreciseAsFileTime = modkernel32.NewProc("GetSystemTimePreciseAsFileTime") - procGetTimeZoneInformation = modkernel32.NewProc("GetTimeZoneInformation") - procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") - procPostQueuedCompletionStatus = modkernel32.NewProc("PostQueuedCompletionStatus") - procCancelIo = modkernel32.NewProc("CancelIo") - procCancelIoEx = modkernel32.NewProc("CancelIoEx") - procCreateProcessW = modkernel32.NewProc("CreateProcessW") - procOpenProcess = modkernel32.NewProc("OpenProcess") - procShellExecuteW = modshell32.NewProc("ShellExecuteW") - procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") - procTerminateProcess = modkernel32.NewProc("TerminateProcess") - procGetExitCodeProcess = modkernel32.NewProc("GetExitCodeProcess") - procGetStartupInfoW = modkernel32.NewProc("GetStartupInfoW") - procGetProcessTimes = modkernel32.NewProc("GetProcessTimes") - procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") - procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") - procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") - procGetTempPathW = modkernel32.NewProc("GetTempPathW") - procCreatePipe = modkernel32.NewProc("CreatePipe") - procGetFileType = modkernel32.NewProc("GetFileType") - procCryptAcquireContextW = modadvapi32.NewProc("CryptAcquireContextW") - procCryptReleaseContext = modadvapi32.NewProc("CryptReleaseContext") - procCryptGenRandom = modadvapi32.NewProc("CryptGenRandom") - procGetEnvironmentStringsW = modkernel32.NewProc("GetEnvironmentStringsW") - procFreeEnvironmentStringsW = modkernel32.NewProc("FreeEnvironmentStringsW") - procGetEnvironmentVariableW = modkernel32.NewProc("GetEnvironmentVariableW") - procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") - procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") - procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") - procGetTickCount64 = modkernel32.NewProc("GetTickCount64") - procSetFileTime = modkernel32.NewProc("SetFileTime") - procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") - procSetFileAttributesW = modkernel32.NewProc("SetFileAttributesW") - procGetFileAttributesExW = modkernel32.NewProc("GetFileAttributesExW") - procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") - procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") - procLocalFree = modkernel32.NewProc("LocalFree") - procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") - procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") - procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") - procGetLongPathNameW = modkernel32.NewProc("GetLongPathNameW") - procGetShortPathNameW = modkernel32.NewProc("GetShortPathNameW") - procCreateFileMappingW = modkernel32.NewProc("CreateFileMappingW") - procMapViewOfFile = modkernel32.NewProc("MapViewOfFile") - procUnmapViewOfFile = modkernel32.NewProc("UnmapViewOfFile") - procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") - procVirtualLock = modkernel32.NewProc("VirtualLock") - procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") - procVirtualAlloc = modkernel32.NewProc("VirtualAlloc") - procVirtualFree = modkernel32.NewProc("VirtualFree") - procVirtualProtect = modkernel32.NewProc("VirtualProtect") - procTransmitFile = modmswsock.NewProc("TransmitFile") - procReadDirectoryChangesW = modkernel32.NewProc("ReadDirectoryChangesW") - procCertOpenSystemStoreW = modcrypt32.NewProc("CertOpenSystemStoreW") - procCertOpenStore = modcrypt32.NewProc("CertOpenStore") - procCertEnumCertificatesInStore = modcrypt32.NewProc("CertEnumCertificatesInStore") + procOpenProcessToken = modadvapi32.NewProc("OpenProcessToken") + procOpenSCManagerW = modadvapi32.NewProc("OpenSCManagerW") + procOpenServiceW = modadvapi32.NewProc("OpenServiceW") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") + procQueryServiceConfigW = modadvapi32.NewProc("QueryServiceConfigW") + procQueryServiceLockStatusW = modadvapi32.NewProc("QueryServiceLockStatusW") + procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") + procQueryServiceStatusEx = modadvapi32.NewProc("QueryServiceStatusEx") + procRegCloseKey = modadvapi32.NewProc("RegCloseKey") + procRegEnumKeyExW = modadvapi32.NewProc("RegEnumKeyExW") + procRegOpenKeyExW = modadvapi32.NewProc("RegOpenKeyExW") + procRegQueryInfoKeyW = modadvapi32.NewProc("RegQueryInfoKeyW") + procRegQueryValueExW = modadvapi32.NewProc("RegQueryValueExW") + procRegisterEventSourceW = modadvapi32.NewProc("RegisterEventSourceW") + procReportEventW = modadvapi32.NewProc("ReportEventW") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW") + procSetNamedSecurityInfoW = modadvapi32.NewProc("SetNamedSecurityInfoW") + procSetSecurityDescriptorControl = modadvapi32.NewProc("SetSecurityDescriptorControl") + procSetSecurityDescriptorDacl = modadvapi32.NewProc("SetSecurityDescriptorDacl") + procSetSecurityDescriptorGroup = modadvapi32.NewProc("SetSecurityDescriptorGroup") + procSetSecurityDescriptorOwner = modadvapi32.NewProc("SetSecurityDescriptorOwner") + procSetSecurityDescriptorRMControl = modadvapi32.NewProc("SetSecurityDescriptorRMControl") + procSetSecurityDescriptorSacl = modadvapi32.NewProc("SetSecurityDescriptorSacl") + procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo") + procSetServiceStatus = modadvapi32.NewProc("SetServiceStatus") + procSetThreadToken = modadvapi32.NewProc("SetThreadToken") + procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation") + procStartServiceCtrlDispatcherW = modadvapi32.NewProc("StartServiceCtrlDispatcherW") + procStartServiceW = modadvapi32.NewProc("StartServiceW") procCertAddCertificateContextToStore = modcrypt32.NewProc("CertAddCertificateContextToStore") procCertCloseStore = modcrypt32.NewProc("CertCloseStore") - procCertGetCertificateChain = modcrypt32.NewProc("CertGetCertificateChain") - procCertFreeCertificateChain = modcrypt32.NewProc("CertFreeCertificateChain") procCertCreateCertificateContext = modcrypt32.NewProc("CertCreateCertificateContext") + procCertEnumCertificatesInStore = modcrypt32.NewProc("CertEnumCertificatesInStore") + procCertFreeCertificateChain = modcrypt32.NewProc("CertFreeCertificateChain") procCertFreeCertificateContext = modcrypt32.NewProc("CertFreeCertificateContext") + procCertGetCertificateChain = modcrypt32.NewProc("CertGetCertificateChain") + procCertOpenStore = modcrypt32.NewProc("CertOpenStore") + procCertOpenSystemStoreW = modcrypt32.NewProc("CertOpenSystemStoreW") procCertVerifyCertificateChainPolicy = modcrypt32.NewProc("CertVerifyCertificateChainPolicy") - procRegOpenKeyExW = modadvapi32.NewProc("RegOpenKeyExW") - procRegCloseKey = modadvapi32.NewProc("RegCloseKey") - procRegQueryInfoKeyW = modadvapi32.NewProc("RegQueryInfoKeyW") - procRegEnumKeyExW = modadvapi32.NewProc("RegEnumKeyExW") - procRegQueryValueExW = modadvapi32.NewProc("RegQueryValueExW") - procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") - procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") - procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") - procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") - procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") - procReadConsoleW = modkernel32.NewProc("ReadConsoleW") - procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") - procProcess32FirstW = modkernel32.NewProc("Process32FirstW") - procProcess32NextW = modkernel32.NewProc("Process32NextW") - procThread32First = modkernel32.NewProc("Thread32First") - procThread32Next = modkernel32.NewProc("Thread32Next") - procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") - procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") - procCreateHardLinkW = modkernel32.NewProc("CreateHardLinkW") - procGetCurrentThreadId = modkernel32.NewProc("GetCurrentThreadId") - procCreateEventW = modkernel32.NewProc("CreateEventW") + procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W") + procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W") + procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") + procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") + procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") + procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") + procCancelIo = modkernel32.NewProc("CancelIo") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procCloseHandle = modkernel32.NewProc("CloseHandle") + procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") procCreateEventExW = modkernel32.NewProc("CreateEventExW") - procOpenEventW = modkernel32.NewProc("OpenEventW") - procSetEvent = modkernel32.NewProc("SetEvent") - procResetEvent = modkernel32.NewProc("ResetEvent") - procPulseEvent = modkernel32.NewProc("PulseEvent") - procCreateMutexW = modkernel32.NewProc("CreateMutexW") - procCreateMutexExW = modkernel32.NewProc("CreateMutexExW") - procOpenMutexW = modkernel32.NewProc("OpenMutexW") - procReleaseMutex = modkernel32.NewProc("ReleaseMutex") - procSleepEx = modkernel32.NewProc("SleepEx") + procCreateEventW = modkernel32.NewProc("CreateEventW") + procCreateFileMappingW = modkernel32.NewProc("CreateFileMappingW") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procCreateHardLinkW = modkernel32.NewProc("CreateHardLinkW") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") procCreateJobObjectW = modkernel32.NewProc("CreateJobObjectW") - procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") - procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") - procSetErrorMode = modkernel32.NewProc("SetErrorMode") - procResumeThread = modkernel32.NewProc("ResumeThread") - procSetPriorityClass = modkernel32.NewProc("SetPriorityClass") - procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") - procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") - procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") - procGetProcessId = modkernel32.NewProc("GetProcessId") - procOpenThread = modkernel32.NewProc("OpenThread") - procSetProcessPriorityBoost = modkernel32.NewProc("SetProcessPriorityBoost") - procGetProcessWorkingSetSizeEx = modkernel32.NewProc("GetProcessWorkingSetSizeEx") - procSetProcessWorkingSetSizeEx = modkernel32.NewProc("SetProcessWorkingSetSizeEx") + procCreateMutexExW = modkernel32.NewProc("CreateMutexExW") + procCreateMutexW = modkernel32.NewProc("CreateMutexW") + procCreatePipe = modkernel32.NewProc("CreatePipe") + procCreateProcessW = modkernel32.NewProc("CreateProcessW") + procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") + procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") + procDeleteFileW = modkernel32.NewProc("DeleteFileW") procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") - procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") + procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") + procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") + procExitProcess = modkernel32.NewProc("ExitProcess") + procFindClose = modkernel32.NewProc("FindClose") + procFindFirstFileW = modkernel32.NewProc("FindFirstFileW") procFindFirstVolumeMountPointW = modkernel32.NewProc("FindFirstVolumeMountPointW") - procFindNextVolumeW = modkernel32.NewProc("FindNextVolumeW") + procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") + procFindNextFileW = modkernel32.NewProc("FindNextFileW") procFindNextVolumeMountPointW = modkernel32.NewProc("FindNextVolumeMountPointW") + procFindNextVolumeW = modkernel32.NewProc("FindNextVolumeW") procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") + procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") + procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") + procFormatMessageW = modkernel32.NewProc("FormatMessageW") + procFreeEnvironmentStringsW = modkernel32.NewProc("FreeEnvironmentStringsW") + procFreeLibrary = modkernel32.NewProc("FreeLibrary") + procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") + procGetACP = modkernel32.NewProc("GetACP") + procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") + procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") + procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") + procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") + procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") + procGetCurrentThreadId = modkernel32.NewProc("GetCurrentThreadId") procGetDiskFreeSpaceExW = modkernel32.NewProc("GetDiskFreeSpaceExW") procGetDriveTypeW = modkernel32.NewProc("GetDriveTypeW") - procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") + procGetEnvironmentStringsW = modkernel32.NewProc("GetEnvironmentStringsW") + procGetEnvironmentVariableW = modkernel32.NewProc("GetEnvironmentVariableW") + procGetExitCodeProcess = modkernel32.NewProc("GetExitCodeProcess") + procGetFileAttributesExW = modkernel32.NewProc("GetFileAttributesExW") + procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") + procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") + procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = modkernel32.NewProc("GetFileType") + procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") + procGetLastError = modkernel32.NewProc("GetLastError") procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") - procGetVolumeInformationW = modkernel32.NewProc("GetVolumeInformationW") + procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") + procGetLongPathNameW = modkernel32.NewProc("GetLongPathNameW") + procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") + procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") + procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") + procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") + procGetProcAddress = modkernel32.NewProc("GetProcAddress") + procGetProcessId = modkernel32.NewProc("GetProcessId") + procGetProcessPreferredUILanguages = modkernel32.NewProc("GetProcessPreferredUILanguages") + procGetProcessShutdownParameters = modkernel32.NewProc("GetProcessShutdownParameters") + procGetProcessTimes = modkernel32.NewProc("GetProcessTimes") + procGetProcessWorkingSetSizeEx = modkernel32.NewProc("GetProcessWorkingSetSizeEx") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procGetShortPathNameW = modkernel32.NewProc("GetShortPathNameW") + procGetStartupInfoW = modkernel32.NewProc("GetStartupInfoW") + procGetStdHandle = modkernel32.NewProc("GetStdHandle") + procGetSystemDirectoryW = modkernel32.NewProc("GetSystemDirectoryW") + procGetSystemPreferredUILanguages = modkernel32.NewProc("GetSystemPreferredUILanguages") + procGetSystemTimeAsFileTime = modkernel32.NewProc("GetSystemTimeAsFileTime") + procGetSystemTimePreciseAsFileTime = modkernel32.NewProc("GetSystemTimePreciseAsFileTime") + procGetSystemWindowsDirectoryW = modkernel32.NewProc("GetSystemWindowsDirectoryW") + procGetTempPathW = modkernel32.NewProc("GetTempPathW") + procGetThreadPreferredUILanguages = modkernel32.NewProc("GetThreadPreferredUILanguages") + procGetTickCount64 = modkernel32.NewProc("GetTickCount64") + procGetTimeZoneInformation = modkernel32.NewProc("GetTimeZoneInformation") + procGetUserPreferredUILanguages = modkernel32.NewProc("GetUserPreferredUILanguages") + procGetVersion = modkernel32.NewProc("GetVersion") procGetVolumeInformationByHandleW = modkernel32.NewProc("GetVolumeInformationByHandleW") + procGetVolumeInformationW = modkernel32.NewProc("GetVolumeInformationW") procGetVolumeNameForVolumeMountPointW = modkernel32.NewProc("GetVolumeNameForVolumeMountPointW") procGetVolumePathNameW = modkernel32.NewProc("GetVolumePathNameW") procGetVolumePathNamesForVolumeNameW = modkernel32.NewProc("GetVolumePathNamesForVolumeNameW") + procGetWindowsDirectoryW = modkernel32.NewProc("GetWindowsDirectoryW") + procIsWow64Process = modkernel32.NewProc("IsWow64Process") + procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") + procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") + procLocalFree = modkernel32.NewProc("LocalFree") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procMapViewOfFile = modkernel32.NewProc("MapViewOfFile") + procMoveFileExW = modkernel32.NewProc("MoveFileExW") + procMoveFileW = modkernel32.NewProc("MoveFileW") + procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") + procOpenEventW = modkernel32.NewProc("OpenEventW") + procOpenMutexW = modkernel32.NewProc("OpenMutexW") + procOpenProcess = modkernel32.NewProc("OpenProcess") + procOpenThread = modkernel32.NewProc("OpenThread") + procPostQueuedCompletionStatus = modkernel32.NewProc("PostQueuedCompletionStatus") + procProcess32FirstW = modkernel32.NewProc("Process32FirstW") + procProcess32NextW = modkernel32.NewProc("Process32NextW") + procProcessIdToSessionId = modkernel32.NewProc("ProcessIdToSessionId") + procPulseEvent = modkernel32.NewProc("PulseEvent") procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") + procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") + procReadConsoleW = modkernel32.NewProc("ReadConsoleW") + procReadDirectoryChangesW = modkernel32.NewProc("ReadDirectoryChangesW") + procReadFile = modkernel32.NewProc("ReadFile") + procReleaseMutex = modkernel32.NewProc("ReleaseMutex") + procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") + procResetEvent = modkernel32.NewProc("ResetEvent") + procResumeThread = modkernel32.NewProc("ResumeThread") + procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") + procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") + procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") + procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") + procSetErrorMode = modkernel32.NewProc("SetErrorMode") + procSetEvent = modkernel32.NewProc("SetEvent") + procSetFileAttributesW = modkernel32.NewProc("SetFileAttributesW") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procSetFilePointer = modkernel32.NewProc("SetFilePointer") + procSetFileTime = modkernel32.NewProc("SetFileTime") + procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") + procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") + procSetPriorityClass = modkernel32.NewProc("SetPriorityClass") + procSetProcessPriorityBoost = modkernel32.NewProc("SetProcessPriorityBoost") + procSetProcessShutdownParameters = modkernel32.NewProc("SetProcessShutdownParameters") + procSetProcessWorkingSetSizeEx = modkernel32.NewProc("SetProcessWorkingSetSizeEx") + procSetStdHandle = modkernel32.NewProc("SetStdHandle") procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") - procMessageBoxW = moduser32.NewProc("MessageBoxW") - procExitWindowsEx = moduser32.NewProc("ExitWindowsEx") - procInitiateSystemShutdownExW = modadvapi32.NewProc("InitiateSystemShutdownExW") - procSetProcessShutdownParameters = modkernel32.NewProc("SetProcessShutdownParameters") - procGetProcessShutdownParameters = modkernel32.NewProc("GetProcessShutdownParameters") + procSleepEx = modkernel32.NewProc("SleepEx") + procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") + procTerminateProcess = modkernel32.NewProc("TerminateProcess") + procThread32First = modkernel32.NewProc("Thread32First") + procThread32Next = modkernel32.NewProc("Thread32Next") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") + procUnmapViewOfFile = modkernel32.NewProc("UnmapViewOfFile") + procVirtualAlloc = modkernel32.NewProc("VirtualAlloc") + procVirtualFree = modkernel32.NewProc("VirtualFree") + procVirtualLock = modkernel32.NewProc("VirtualLock") + procVirtualProtect = modkernel32.NewProc("VirtualProtect") + procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") + procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") + procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") + procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") + procWriteFile = modkernel32.NewProc("WriteFile") + procAcceptEx = modmswsock.NewProc("AcceptEx") + procGetAcceptExSockaddrs = modmswsock.NewProc("GetAcceptExSockaddrs") + procTransmitFile = modmswsock.NewProc("TransmitFile") + procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") + procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") + procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") + procRtlGetNtVersionNumbers = modntdll.NewProc("RtlGetNtVersionNumbers") + procRtlGetVersion = modntdll.NewProc("RtlGetVersion") procCLSIDFromString = modole32.NewProc("CLSIDFromString") - procStringFromGUID2 = modole32.NewProc("StringFromGUID2") procCoCreateGuid = modole32.NewProc("CoCreateGuid") procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") - procRtlGetVersion = modntdll.NewProc("RtlGetVersion") - procRtlGetNtVersionNumbers = modntdll.NewProc("RtlGetNtVersionNumbers") - procGetProcessPreferredUILanguages = modkernel32.NewProc("GetProcessPreferredUILanguages") - procGetThreadPreferredUILanguages = modkernel32.NewProc("GetThreadPreferredUILanguages") - procGetUserPreferredUILanguages = modkernel32.NewProc("GetUserPreferredUILanguages") - procGetSystemPreferredUILanguages = modkernel32.NewProc("GetSystemPreferredUILanguages") + procStringFromGUID2 = modole32.NewProc("StringFromGUID2") procEnumProcesses = modpsapi.NewProc("EnumProcesses") - procWSAStartup = modws2_32.NewProc("WSAStartup") + procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") + procTranslateNameW = modsecur32.NewProc("TranslateNameW") + procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") + procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") + procShellExecuteW = modshell32.NewProc("ShellExecuteW") + procExitWindowsEx = moduser32.NewProc("ExitWindowsEx") + procMessageBoxW = moduser32.NewProc("MessageBoxW") + procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") + procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") + procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") + procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") + procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") procWSACleanup = modws2_32.NewProc("WSACleanup") + procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") procWSAIoctl = modws2_32.NewProc("WSAIoctl") - procsocket = modws2_32.NewProc("socket") - procsendto = modws2_32.NewProc("sendto") - procrecvfrom = modws2_32.NewProc("recvfrom") - procsetsockopt = modws2_32.NewProc("setsockopt") - procgetsockopt = modws2_32.NewProc("getsockopt") - procbind = modws2_32.NewProc("bind") - procconnect = modws2_32.NewProc("connect") - procgetsockname = modws2_32.NewProc("getsockname") - procgetpeername = modws2_32.NewProc("getpeername") - proclisten = modws2_32.NewProc("listen") - procshutdown = modws2_32.NewProc("shutdown") - procclosesocket = modws2_32.NewProc("closesocket") - procAcceptEx = modmswsock.NewProc("AcceptEx") - procGetAcceptExSockaddrs = modmswsock.NewProc("GetAcceptExSockaddrs") procWSARecv = modws2_32.NewProc("WSARecv") - procWSASend = modws2_32.NewProc("WSASend") procWSARecvFrom = modws2_32.NewProc("WSARecvFrom") + procWSASend = modws2_32.NewProc("WSASend") procWSASendTo = modws2_32.NewProc("WSASendTo") - procgethostbyname = modws2_32.NewProc("gethostbyname") - procgetservbyname = modws2_32.NewProc("getservbyname") - procntohs = modws2_32.NewProc("ntohs") - procgetprotobyname = modws2_32.NewProc("getprotobyname") - procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W") - procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") - procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W") - procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") - procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") - procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") - procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") - procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") - procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") - procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") - procGetACP = modkernel32.NewProc("GetACP") - procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") - procTranslateNameW = modsecur32.NewProc("TranslateNameW") - procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") - procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") - procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") - procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") - procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") - procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") - procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") - procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") - procGetLengthSid = modadvapi32.NewProc("GetLengthSid") - procCopySid = modadvapi32.NewProc("CopySid") - procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") - procCreateWellKnownSid = modadvapi32.NewProc("CreateWellKnownSid") - procIsWellKnownSid = modadvapi32.NewProc("IsWellKnownSid") - procFreeSid = modadvapi32.NewProc("FreeSid") - procEqualSid = modadvapi32.NewProc("EqualSid") - procGetSidIdentifierAuthority = modadvapi32.NewProc("GetSidIdentifierAuthority") - procGetSidSubAuthorityCount = modadvapi32.NewProc("GetSidSubAuthorityCount") - procGetSidSubAuthority = modadvapi32.NewProc("GetSidSubAuthority") - procIsValidSid = modadvapi32.NewProc("IsValidSid") - procCheckTokenMembership = modadvapi32.NewProc("CheckTokenMembership") - procOpenProcessToken = modadvapi32.NewProc("OpenProcessToken") - procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") - procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") - procRevertToSelf = modadvapi32.NewProc("RevertToSelf") - procSetThreadToken = modadvapi32.NewProc("SetThreadToken") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") - procAdjustTokenGroups = modadvapi32.NewProc("AdjustTokenGroups") - procGetTokenInformation = modadvapi32.NewProc("GetTokenInformation") - procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation") - procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") - procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") - procGetSystemDirectoryW = modkernel32.NewProc("GetSystemDirectoryW") - procGetWindowsDirectoryW = modkernel32.NewProc("GetWindowsDirectoryW") - procGetSystemWindowsDirectoryW = modkernel32.NewProc("GetSystemWindowsDirectoryW") - procWTSQueryUserToken = modwtsapi32.NewProc("WTSQueryUserToken") - procWTSEnumerateSessionsW = modwtsapi32.NewProc("WTSEnumerateSessionsW") - procWTSFreeMemory = modwtsapi32.NewProc("WTSFreeMemory") - procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo") - procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo") - procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") - procSetNamedSecurityInfoW = modadvapi32.NewProc("SetNamedSecurityInfoW") - procBuildSecurityDescriptorW = modadvapi32.NewProc("BuildSecurityDescriptorW") - procInitializeSecurityDescriptor = modadvapi32.NewProc("InitializeSecurityDescriptor") - procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") - procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") - procGetSecurityDescriptorSacl = modadvapi32.NewProc("GetSecurityDescriptorSacl") - procGetSecurityDescriptorOwner = modadvapi32.NewProc("GetSecurityDescriptorOwner") - procGetSecurityDescriptorGroup = modadvapi32.NewProc("GetSecurityDescriptorGroup") - procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") - procGetSecurityDescriptorRMControl = modadvapi32.NewProc("GetSecurityDescriptorRMControl") - procIsValidSecurityDescriptor = modadvapi32.NewProc("IsValidSecurityDescriptor") - procSetSecurityDescriptorControl = modadvapi32.NewProc("SetSecurityDescriptorControl") - procSetSecurityDescriptorDacl = modadvapi32.NewProc("SetSecurityDescriptorDacl") - procSetSecurityDescriptorSacl = modadvapi32.NewProc("SetSecurityDescriptorSacl") - procSetSecurityDescriptorOwner = modadvapi32.NewProc("SetSecurityDescriptorOwner") - procSetSecurityDescriptorGroup = modadvapi32.NewProc("SetSecurityDescriptorGroup") - procSetSecurityDescriptorRMControl = modadvapi32.NewProc("SetSecurityDescriptorRMControl") - procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") - procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") - procMakeAbsoluteSD = modadvapi32.NewProc("MakeAbsoluteSD") - procMakeSelfRelativeSD = modadvapi32.NewProc("MakeSelfRelativeSD") - procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW") + procWSAStartup = modws2_32.NewProc("WSAStartup") + procbind = modws2_32.NewProc("bind") + procclosesocket = modws2_32.NewProc("closesocket") + procconnect = modws2_32.NewProc("connect") + procgethostbyname = modws2_32.NewProc("gethostbyname") + procgetpeername = modws2_32.NewProc("getpeername") + procgetprotobyname = modws2_32.NewProc("getprotobyname") + procgetservbyname = modws2_32.NewProc("getservbyname") + procgetsockname = modws2_32.NewProc("getsockname") + procgetsockopt = modws2_32.NewProc("getsockopt") + proclisten = modws2_32.NewProc("listen") + procntohs = modws2_32.NewProc("ntohs") + procrecvfrom = modws2_32.NewProc("recvfrom") + procsendto = modws2_32.NewProc("sendto") + procsetsockopt = modws2_32.NewProc("setsockopt") + procshutdown = modws2_32.NewProc("shutdown") + procsocket = modws2_32.NewProc("socket") + procWTSEnumerateSessionsW = modwtsapi32.NewProc("WTSEnumerateSessionsW") + procWTSFreeMemory = modwtsapi32.NewProc("WTSFreeMemory") + procWTSQueryUserToken = modwtsapi32.NewProc("WTSQueryUserToken") ) -func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) { + var _p0 uint32 + if resetToDefault { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + if r1 == 0 { + err = errnoErr(e1) } return } -func DeregisterEventSource(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0) +func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) { + var _p0 uint32 + if disableAllPrivileges { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) +func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) { + r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) { + r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func CloseServiceHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0) +func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) { + r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) { + r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { + r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) + if r1 == 0 { + err = errnoErr(e1) } return } -func DeleteService(service Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0) +func CloseServiceHandle(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) +func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0) +func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) +func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(str) + if err != nil { + return } - return + return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) } -func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0) +func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0) +func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { + r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0) +func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) { + r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) +func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) +func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) +func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { + r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) +func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) { - r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) - if r0 != 0 { - ret = syscall.Errno(r0) +func DeleteService(service Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetLastError() (lasterr error) { - r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) - if r0 != 0 { - lasterr = syscall.Errno(r0) +func DeregisterEventSource(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func LoadLibrary(libname string) (handle Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(libname) - if err != nil { - return +func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) { + r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) + if r1 == 0 { + err = errnoErr(e1) } - return _LoadLibrary(_p0) + return } -func _LoadLibrary(libname *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { + r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(libname) - if err != nil { - return - } - return _LoadLibraryEx(_p0, zero, flags) +func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { + r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0) + isEqual = r0 != 0 + return } -func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func FreeSid(sid *SID) (err error) { + r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + if r1 != 0 { + err = errnoErr(e1) } return } -func FreeLibrary(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func GetLengthSid(sid *SID) (len uint32) { + r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + len = uint32(r0) return } -func GetProcAddress(module Handle, procname string) (proc uintptr, err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(procname) - if err != nil { +func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { + var _p0 *uint16 + _p0, ret = syscall.UTF16PtrFromString(objectName) + if ret != nil { return } - return _GetProcAddress(module, _p0) -} - -func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { - r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0) - proc = uintptr(r0) - if proc == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return + return _getNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl, sd) } -func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { + r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) +func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetVersion() (ver uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) - ver = uint32(r0) - if ver == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl **ACL, daclDefaulted *bool) (err error) { + var _p0 uint32 + if *daclPresent { + _p0 = 1 } - return -} - -func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) { - var _p0 *uint16 - if len(buf) > 0 { - _p0 = &buf[0] + var _p1 uint32 + if *daclDefaulted { + _p1 = 1 } - r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + *daclPresent = _p0 != 0 + *daclDefaulted = _p1 != 0 + if r1 == 0 { + err = errnoErr(e1) } return } -func ExitProcess(exitcode uint32) { - syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) - return -} - -func IsWow64Process(handle Handle, isWow64 *bool) (err error) { +func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefaulted *bool) (err error) { var _p0 uint32 - if *isWow64 { + if *groupDefaulted { _p0 = 1 - } else { - _p0 = 0 } - r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0) - *isWow64 = _p0 != 0 + r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) + *groupDefaulted = _p0 != 0 if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + len = uint32(r0) return } -func ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { - var _p0 *byte - if len(buf) > 0 { - _p0 = &buf[0] +func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefaulted *bool) (err error) { + var _p0 uint32 + if *ownerDefaulted { + _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) + *ownerDefaulted = _p0 != 0 if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { - var _p0 *byte - if len(buf) > 0 { - _p0 = &buf[0] - } - r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { +func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl **ACL, saclDefaulted *bool) (err error) { var _p0 uint32 - if wait { + if *saclPresent { _p0 = 1 - } else { - _p0 = 0 } - r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0) + var _p1 uint32 + if *saclDefaulted { + _p1 = 1 + } + r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + *saclPresent = _p0 != 0 + *saclDefaulted = _p1 != 0 if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { - r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) - newlowoffset = uint32(r0) - if newlowoffset == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { + r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func CloseHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) { + r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0)) return } -func GetStdHandle(stdhandle uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) { + r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0) + subAuthority = (*uint32)(unsafe.Pointer(r0)) return } -func SetStdHandle(stdhandle uint32, handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func getSidSubAuthorityCount(sid *SID) (count *uint8) { + r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + count = (*uint8)(unsafe.Pointer(r0)) return } -func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func findNextFile1(handle Handle, data *win32finddata1) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) +func ImpersonateSelf(impersonationlevel uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func FindClose(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) +func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) { + r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) +func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint32, forceAppsClosed bool, rebootAfterShutdown bool, reason uint32) (err error) { + var _p0 uint32 + if forceAppsClosed { + _p0 = 1 + } + var _p1 uint32 + if rebootAfterShutdown { + _p1 = 1 + } + r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) { + r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + isValid = r0 != 0 return } -func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func isValidSid(sid *SID) (isValid bool) { + r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + isValid = r0 != 0 return } -func SetCurrentDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) { + r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0) + isWellKnown = r0 != 0 return } -func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { - r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0) +func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func RemoveDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) +func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func DeleteFile(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) +func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func MoveFile(from *uint16, to *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0) +func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) +func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) { + r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) +func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { + r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetComputerName(buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func SetEndOfFile(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0) +func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetSystemTimeAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) +func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func GetSystemTimePreciseAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) +func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0) - rc = uint32(r0) - if rc == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uint32, threadcnt uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uint32, overlapped **Overlapped, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) +func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func RegCloseKey(key Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) } return } -func CancelIo(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) } return } -func CancelIoEx(s Handle, o *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) } return } -func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) { - var _p0 uint32 - if inheritHandles { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) { + r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) + if r0 != 0 { + regerrno = syscall.Errno(r0) } return } -func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } else { - _p0 = 0 +func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) + if r0 != 0 { + regerrno = syscall.Errno(r0) } - r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) + return +} + +func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0) handle = Handle(r0) if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { - r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) - if r1 <= 32 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func RevertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) { - r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0) +func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) { + r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0) if r0 != 0 { ret = syscall.Errno(r0) } return } -func TerminateProcess(handle Handle, exitcode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { + var _p0 *uint16 + _p0, ret = syscall.UTF16PtrFromString(objectName) + if ret != nil { + return + } + return _SetNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl) +} + +func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { + r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0) +func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) { + r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetStartupInfo(startupInfo *StartupInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) +func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *ACL, daclDefaulted bool) (err error) { + var _p0 uint32 + if daclPresent { + _p0 = 1 + } + var _p1 uint32 + if daclDefaulted { + _p1 = 1 + } + r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) +func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaulted bool) (err error) { + var _p0 uint32 + if groupDefaulted { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { +func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaulted bool) (err error) { var _p0 uint32 - if bInheritHandle { + if ownerDefaulted { _p0 = 1 - } else { - _p0 = 0 } - r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0) + r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) { - r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0) - event = uint32(r0) - if event == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) { + syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) return } -func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { +func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *ACL, saclDefaulted bool) (err error) { var _p0 uint32 - if waitAll { + if saclPresent { _p0 = 1 - } else { - _p0 = 0 } - r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0) - event = uint32(r0) - if event == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + var _p1 uint32 + if saclDefaulted { + _p1 = 1 + } + r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) { + syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) return } -func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0) +func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetFileType(filehandle Handle) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetThreadToken(thread *Handle, token Token) (err error) { + r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0) +func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0) +func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { + r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { - r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) +func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetEnvironmentStrings() (envs *uint16, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0) - envs = (*uint16)(unsafe.Pointer(r0)) - if envs == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) { + r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func FreeEnvironmentStrings(envs *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0) +func CertCloseStore(store Handle, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) { + r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) + context = (*CertContext)(unsafe.Pointer(r0)) + if context == nil { + err = errnoErr(e1) } return } -func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) { + r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0) + context = (*CertContext)(unsafe.Pointer(r0)) + if context == nil { + err = errnoErr(e1) } return } -func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) { - var _p0 uint32 - if inheritExisting { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func CertFreeCertificateChain(ctx *CertChainContext) { + syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) return } -func DestroyEnvironmentBlock(block *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) +func CertFreeCertificateContext(ctx *CertContext) (err error) { + r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func getTickCount64() (ms uint64) { - r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) - ms = uint64(r0) +func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) { + r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) } return } -func GetFileAttributes(name *uint16) (attrs uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) - attrs = uint32(r0) - if attrs == INVALID_FILE_ATTRIBUTES { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { + r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0) + store = Handle(r0) + if store == 0 { + err = errnoErr(e1) } return } -func SetFileAttributes(name *uint16, attrs uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0) +func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) { + r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) { + r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0) + same = r0 != 0 + return +} + +func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { + var _p0 *uint16 + _p0, status = syscall.UTF16PtrFromString(name) + if status != nil { + return + } + return _DnsQuery(_p0, qtype, options, extra, qrs, pr) +} + +func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { + r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) + if r0 != 0 { + status = syscall.Errno(r0) } return } -func GetCommandLine() (cmd *uint16) { - r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0) - cmd = (*uint16)(unsafe.Pointer(r0)) +func DnsRecordListFree(rl *DNSRecord, freetype uint32) { + syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0) return } -func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { - r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) - argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0)) - if argv == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { + r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) } return } -func LocalFree(hmem Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0) - handle = Handle(r0) - if handle != 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { + r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) } return } -func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) +func GetIfEntry(pIfRow *MibIfRow) (errcode error) { + r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func AssignProcessToJobObject(job Handle, process Handle) (err error) { + r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func FlushFileBuffers(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0) +func CancelIo(s Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CancelIoEx(s Handle, o *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CloseHandle(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { + r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) +func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) handle = Handle(r0) if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0) - addr = uintptr(r0) - if addr == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func UnmapViewOfFile(addr uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func FlushViewOfFile(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) } return } -func VirtualLock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) + if r1&0xff == 0 { + err = errnoErr(e1) } return } -func VirtualUnlock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uint32, threadcnt uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0) - value = uintptr(r0) - if value == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if initialOwner { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0) +func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { +func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) { var _p0 uint32 - if watchSubTree { + if inheritHandles { _p0 = 1 - } else { - _p0 = 0 } - r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0) + r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { - r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0) - store = Handle(r0) - if store == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) + if r1&0xff == 0 { + err = errnoErr(e1) } return } -func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) +func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0) handle = Handle(r0) if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0) - context = (*CertContext)(unsafe.Pointer(r0)) - if context == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) + if r1 == 0 { + err = errnoErr(e1) } return } -func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) { - r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0) +func DeleteFile(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CertCloseStore(store Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0) +func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) { - r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0) +func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CertFreeCertificateChain(ctx *CertChainContext) { - syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) +func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { + var _p0 uint32 + if bInheritHandle { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) - context = (*CertContext)(unsafe.Pointer(r0)) - if context == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func ExitProcess(exitcode uint32) { + syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) return } -func CertFreeCertificateContext(ctx *CertContext) (err error) { - r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) +func FindClose(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) { - r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) } return } -func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) +func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) } return } -func RegCloseKey(key Handle) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) +func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) } return } -func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) - if r0 != 0 { - regerrno = syscall.Errno(r0) +func findNextFile1(handle Handle, data *win32finddata1) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) +func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + if r1 == 0 { + err = errnoErr(e1) } return } -func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) - if r0 != 0 { - regerrno = syscall.Errno(r0) +func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetCurrentProcessId() (pid uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) - pid = uint32(r0) +func FindVolumeClose(findVolume Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func GetConsoleMode(console Handle, mode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) +func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func SetConsoleMode(console Handle, mode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0) +func FlushFileBuffers(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) +func FlushViewOfFile(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) { + var _p0 *uint16 + if len(buf) > 0 { + _p0 = &buf[0] + } + r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0) +func FreeEnvironmentStrings(envs *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func FreeLibrary(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) +func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) +func GetACP() (acp uint32) { + r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) + acp = uint32(r0) + return +} + +func GetCommandLine() (cmd *uint16) { + r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0) + cmd = (*uint16)(unsafe.Pointer(r0)) + return +} + +func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) +func GetComputerName(buf *uint16, n *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) +func GetConsoleMode(console Handle, mode *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) +func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { + r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) - if r1&0xff == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) - if r1&0xff == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func GetCurrentProcessId() (pid uint32) { + r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) + pid = uint32(r0) return } @@ -2104,226 +1634,157 @@ func GetCurrentThreadId() (id uint32) { return } -func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) { + r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetDriveType(rootPathName *uint16) (driveType uint32) { + r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) + driveType = uint32(r0) return } -func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetEnvironmentStrings() (envs *uint16, err error) { + r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0) + envs = (*uint16)(unsafe.Pointer(r0)) + if envs == nil { + err = errnoErr(e1) } return } -func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func SetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0) +func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func ResetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) +func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { + r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func PulseEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetFileAttributes(name *uint16) (attrs uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + attrs = uint32(r0) + if attrs == INVALID_FILE_ATTRIBUTES { + err = errnoErr(e1) } return } -func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) { - var _p0 uint32 - if initialOwner { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) { + r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetFileType(filehandle Handle) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func ReleaseMutex(mutex Handle) (err error) { - r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { - var _p0 uint32 - if alertable { - _p0 = 1 - } else { - _p0 = 0 +func GetLastError() (lasterr error) { + r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) + if r0 != 0 { + lasterr = syscall.Errno(r0) } - r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0) - ret = uint32(r0) return } -func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func AssignProcessToJobObject(job Handle, process Handle) (err error) { - r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetLogicalDrives() (drivesBitMask uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0) + drivesBitMask = uint32(r0) + if drivesBitMask == 0 { + err = errnoErr(e1) } return } -func TerminateJobObject(job Handle, exitCode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func SetErrorMode(mode uint32) (ret uint32) { - r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0) - ret = uint32(r0) +func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } return } -func ResumeThread(thread Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) - ret = uint32(r0) - if ret == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) { + r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) + if r1 == 0 { + err = errnoErr(e1) } return } -func SetPriorityClass(process Handle, priorityClass uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0) +func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } @@ -2332,36 +1793,25 @@ func GetPriorityClass(process Handle) (ret uint32, err error) { r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0) ret = uint32(r0) if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) { - r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0) - ret = int(r0) - if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetProcAddress(module Handle, procname string) (proc uintptr, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(procname) + if err != nil { + return } - return + return _GetProcAddress(module, _p0) } -func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { + r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0) + proc = uintptr(r0) + if proc == 0 { + err = errnoErr(e1) } return } @@ -2370,1701 +1820,1236 @@ func GetProcessId(process Handle) (id uint32, err error) { r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0) id = uint32(r0) if id == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetProcessPriorityBoost(process Handle, disable bool) (err error) { - var _p0 uint32 - if disable { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) { - syscall.Syscall6(procGetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)), 0, 0) - return -} - -func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0) +func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetProcessPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) +func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0) +func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) { + syscall.Syscall6(procGetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)), 0, 0) return } -func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uint32, overlapped **Overlapped, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) +func GetStartupInfo(startupInfo *StartupInfo) (err error) { + r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func FindVolumeClose(findVolume Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetStdHandle(stdhandle uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) } return } -func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + len = uint32(r0) + if len == 0 { + err = errnoErr(e1) } return } -func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) { - r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) +func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetSystemPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetDriveType(rootPathName *uint16) (driveType uint32) { - r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) - driveType = uint32(r0) +func GetSystemTimeAsFileTime(time *Filetime) { + syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) return } -func GetLogicalDrives() (drivesBitMask uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0) - drivesBitMask = uint32(r0) - if drivesBitMask == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func GetSystemTimePreciseAsFileTime(time *Filetime) { + syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) return } -func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + len = uint32(r0) + if len == 0 { + err = errnoErr(e1) } return } -func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) +func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetThreadPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func getTickCount64() (ms uint64) { + r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) + ms = uint64(r0) return } -func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0) + rc = uint32(r0) + if rc == 0xffffffff { + err = errnoErr(e1) } return } -func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) +func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetUserPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetVersion() (ver uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) + ver = uint32(r0) + if ver == 0 { + err = errnoErr(e1) } return } -func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0) +func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0) +func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { - r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) - ret = int32(r0) - if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) + if r1 == 0 { + err = errnoErr(e1) } return } -func ExitWindowsEx(flags uint32, reason uint32) (err error) { - r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) +func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint32, forceAppsClosed bool, rebootAfterShutdown bool, reason uint32) (err error) { - var _p0 uint32 - if forceAppsClosed { - _p0 = 1 - } else { - _p0 = 0 - } - var _p1 uint32 - if rebootAfterShutdown { - _p1 = 1 - } else { - _p1 = 0 - } - r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) +func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + len = uint32(r0) + if len == 0 { + err = errnoErr(e1) } return } -func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0) +func IsWow64Process(handle Handle, isWow64 *bool) (err error) { + var _p0 uint32 + if *isWow64 { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0) + *isWow64 = _p0 != 0 if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) +func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(libname) + if err != nil { + return } - return -} - -func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { - r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) - chars = int32(r0) - return + return _LoadLibraryEx(_p0, zero, flags) } -func coCreateGuid(pguid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) +func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func CoTaskMemFree(address unsafe.Pointer) { - syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) - return +func LoadLibrary(libname string) (handle Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(libname) + if err != nil { + return + } + return _LoadLibrary(_p0) } -func rtlGetVersion(info *OsVersionInfoEx) (ret error) { - r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) +func _LoadLibrary(libname *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) { - syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) +func LocalFree(hmem Handle) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0) + handle = Handle(r0) + if handle != 0 { + err = errnoErr(e1) + } return } -func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) +func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetThreadPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) { + r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0) + addr = uintptr(r0) + if addr == 0 { + err = errnoErr(e1) } return } -func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetUserPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) +func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetSystemPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) +func MoveFile(from *uint16, to *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) { - var _p0 *uint32 - if len(processIds) > 0 { - _p0 = &processIds[0] - } - r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(processIds)), uintptr(unsafe.Pointer(bytesReturned))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { + r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) + nwrite = int32(r0) + if nwrite == 0 { + err = errnoErr(e1) } return } -func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { - r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) - if r0 != 0 { - sockerr = syscall.Errno(r0) +func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func WSACleanup() (err error) { - r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol)) +func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + if handle == 0 { + err = errnoErr(e1) } return } -func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) { - var _p0 *byte - if len(buf) > 0 { - _p0 = &buf[0] - } - r1, _, e1 := syscall.Syscall6(procsendto.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) { - var _p0 *byte - if len(buf) > 0 { - _p0 = &buf[0] - } - r0, _, e1 := syscall.Syscall6(procrecvfrom.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) - n = int32(r0) - if n == -1 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) { - r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) { - r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procProcessIdToSessionId.Addr(), 2, uintptr(pid), uintptr(unsafe.Pointer(sessionid)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func PulseEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) { + r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func listen(s Handle, backlog int32) (err error) { - r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { + var _p0 uint32 + if watchSubTree { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func shutdown(s Handle, how int32) (err error) { - r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func Closesocket(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func ReleaseMutex(mutex Handle) (err error) { + r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0) +func RemoveDirectory(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) { - syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0) +func ResetEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func ResumeThread(thread Handle) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) + ret = uint32(r0) + if ret == 0xffffffff { + err = errnoErr(e1) } return } -func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetConsoleCursorPosition(console Handle, position Coord) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(*((*uint32)(unsafe.Pointer(&position)))), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetConsoleMode(console Handle, mode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetCurrentDirectory(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetHostByName(name string) (h *Hostent, err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(name) - if err != nil { - return +func SetEndOfFile(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } - return _GetHostByName(_p0) + return } -func _GetHostByName(name *byte) (h *Hostent, err error) { - r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) - h = (*Hostent)(unsafe.Pointer(r0)) - if h == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetServByName(name string, proto string) (s *Servent, err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(name) - if err != nil { - return - } - var _p1 *byte - _p1, err = syscall.BytePtrFromString(proto) - if err != nil { - return +func SetErrorMode(mode uint32) (ret uint32) { + r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0) + ret = uint32(r0) + return +} + +func SetEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } - return _GetServByName(_p0, _p1) + return } -func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { - r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0) - s = (*Servent)(unsafe.Pointer(r0)) - if s == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetFileAttributes(name *uint16, attrs uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func Ntohs(netshort uint16) (u uint16) { - r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0) - u = uint16(r0) +func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func GetProtoByName(name string) (p *Protoent, err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(name) - if err != nil { - return +func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { + r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) + newlowoffset = uint32(r0) + if newlowoffset == 0xffffffff { + err = errnoErr(e1) } - return _GetProtoByName(_p0) + return } -func _GetProtoByName(name *byte) (p *Protoent, err error) { - r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) - p = (*Protoent)(unsafe.Pointer(r0)) - if p == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { - var _p0 *uint16 - _p0, status = syscall.UTF16PtrFromString(name) - if status != nil { - return +func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) + if r1 == 0 { + err = errnoErr(e1) } - return _DnsQuery(_p0, qtype, options, extra, qrs, pr) + return } -func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { - r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) - if r0 != 0 { - status = syscall.Errno(r0) +func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) { + r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0) + ret = int(r0) + if ret == 0 { + err = errnoErr(e1) } return } -func DnsRecordListFree(rl *DNSRecord, freetype uint32) { - syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0) +func SetPriorityClass(process Handle, priorityClass uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) { - r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0) - same = r0 != 0 +func SetProcessPriorityBoost(process Handle, disable bool) (err error) { + var _p0 uint32 + if disable { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) { - r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0) - if r0 != 0 { - sockerr = syscall.Errno(r0) +func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func FreeAddrInfoW(addrinfo *AddrinfoW) { - syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0) +func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func GetIfEntry(pIfRow *MibIfRow) (errcode error) { - r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) - if r0 != 0 { - errcode = syscall.Errno(r0) +func SetStdHandle(stdhandle uint32, handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { - r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0) - if r0 != 0 { - errcode = syscall.Errno(r0) +func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0) +func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) { - r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) - n = int32(r0) - if n == -1 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { + var _p0 uint32 + if alertable { + _p0 = 1 } + r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0) + ret = uint32(r0) return } -func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { - r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) - if r0 != 0 { - errcode = syscall.Errno(r0) +func TerminateJobObject(job Handle, exitCode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetACP() (acp uint32) { - r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) - acp = uint32(r0) +func TerminateProcess(handle Handle, exitcode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { - r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) - nwrite = int32(r0) - if nwrite == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0) - if r1&0xff == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) - if r1&0xff == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { - r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) - if r0 != 0 { - neterr = syscall.Errno(r0) +func UnmapViewOfFile(addr uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) { - r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) - if r0 != 0 { - neterr = syscall.Errno(r0) +func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) { + r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0) + value = uintptr(r0) + if value == 0 { + err = errnoErr(e1) } return } -func NetApiBufferFree(buf *byte) (neterr error) { - r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0) - if r0 != 0 { - neterr = syscall.Errno(r0) +func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype)) + if r1 == 0 { + err = errnoErr(e1) } return } -func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) +func VirtualLock(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) +func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0) +func VirtualUnlock(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { + var _p0 uint32 + if waitAll { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0) + event = uint32(r0) + if event == 0xffffffff { + err = errnoErr(e1) } return } -func GetLengthSid(sid *SID) (len uint32) { - r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - len = uint32(r0) +func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) { + r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0) + event = uint32(r0) + if event == 0xffffffff { + err = errnoErr(e1) + } return } -func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) +func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) { + r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0) +func WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0) +func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) { - r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0) - isWellKnown = r0 != 0 +func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) { + syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0) return } -func FreeSid(sid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - if r1 != 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { - r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0) - isEqual = r0 != 0 +func NetApiBufferFree(buf *byte) (neterr error) { + r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } return } -func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) { - r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0)) +func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) { + r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) + if r0 != 0 { + neterr = syscall.Errno(r0) + } return } -func getSidSubAuthorityCount(sid *SID) (count *uint8) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - count = (*uint8)(unsafe.Pointer(r0)) +func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { + r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } return } -func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0) - subAuthority = (*uint32)(unsafe.Pointer(r0)) +func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) { + syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) return } -func isValidSid(sid *SID) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - isValid = r0 != 0 +func rtlGetVersion(info *OsVersionInfoEx) (ret error) { + r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } return } -func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { - r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { + r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func coCreateGuid(pguid *GUID) (ret error) { + r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) { - var _p0 uint32 - if openAsSelf { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func CoTaskMemFree(address unsafe.Pointer) { + syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) return } -func ImpersonateSelf(impersonationlevel uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { + r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) + chars = int32(r0) return } -func RevertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) +func EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) { + var _p0 *uint32 + if len(processIds) > 0 { + _p0 = &processIds[0] + } + r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(processIds)), uintptr(unsafe.Pointer(bytesReturned))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func SetThreadToken(thread *Handle, token Token) (err error) { - r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) + if r1&0xff == 0 { + err = errnoErr(e1) } return } -func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0) + if r1&0xff == 0 { + err = errnoErr(e1) } return } -func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) { - var _p0 uint32 - if disableAllPrivileges { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { + r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) + argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0)) + if argv == nil { + err = errnoErr(e1) } return } -func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) { - var _p0 uint32 - if resetToDefault { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) { + r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { + r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) + if r1 <= 32 { + err = errnoErr(e1) } return } -func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0) +func ExitWindowsEx(flags uint32, reason uint32) (err error) { + r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) { - r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { + r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) + ret = int32(r0) + if ret == 0 { + err = errnoErr(e1) } return } -func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) +func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) { + var _p0 uint32 + if inheritExisting { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) - len = uint32(r0) - if len == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func DestroyEnvironmentBlock(block *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) - len = uint32(r0) - if len == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) + if r1 == 0 { + err = errnoErr(e1) } return } -func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) - len = uint32(r0) - if len == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func FreeAddrInfoW(addrinfo *AddrinfoW) { + syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0) return } -func WTSQueryUserToken(session uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) { + r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0) + if r0 != 0 { + sockerr = syscall.Errno(r0) } return } -func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func WSACleanup() (err error) { + r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0) + if r1 == socket_error { + err = errnoErr(e1) } return } -func WTSFreeMemory(ptr uintptr) { - syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) +func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) { + r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) + n = int32(r0) + if n == -1 { + err = errnoErr(e1) + } return } -func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) +func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { + r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) + if r1 == socket_error { + err = errnoErr(e1) } return } -func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) { - syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) +func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) + } return } -func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - var _p0 *uint16 - _p0, ret = syscall.UTF16PtrFromString(objectName) - if ret != nil { - return +func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + err = errnoErr(e1) } - return _getNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl, sd) + return } -func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) +func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) } return } -func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - var _p0 *uint16 - _p0, ret = syscall.UTF16PtrFromString(objectName) - if ret != nil { - return +func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + err = errnoErr(e1) } - return _SetNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl) + return } -func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) +func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { + r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) if r0 != 0 { - ret = syscall.Errno(r0) + sockerr = syscall.Errno(r0) } return } -func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) - if r0 != 0 { - ret = syscall.Errno(r0) +func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socket_error { + err = errnoErr(e1) } return } -func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) { - r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func Closesocket(s Handle) (err error) { + r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) } return } -func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socket_error { + err = errnoErr(e1) } return } -func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl **ACL, daclDefaulted *bool) (err error) { - var _p0 uint32 - if *daclPresent { - _p0 = 1 - } else { - _p0 = 0 - } - var _p1 uint32 - if *daclDefaulted { - _p1 = 1 - } else { - _p1 = 0 - } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) - *daclPresent = _p0 != 0 - *daclDefaulted = _p1 != 0 - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetHostByName(name string) (h *Hostent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return } - return + return _GetHostByName(_p0) } -func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl **ACL, saclDefaulted *bool) (err error) { - var _p0 uint32 - if *saclPresent { - _p0 = 1 - } else { - _p0 = 0 - } - var _p1 uint32 - if *saclDefaulted { - _p1 = 1 - } else { - _p1 = 0 - } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) - *saclPresent = _p0 != 0 - *saclDefaulted = _p1 != 0 - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func _GetHostByName(name *byte) (h *Hostent, err error) { + r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + h = (*Hostent)(unsafe.Pointer(r0)) + if h == nil { + err = errnoErr(e1) } return } -func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefaulted *bool) (err error) { - var _p0 uint32 - if *ownerDefaulted { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) - *ownerDefaulted = _p0 != 0 - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if r1 == socket_error { + err = errnoErr(e1) } return } -func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefaulted *bool) (err error) { - var _p0 uint32 - if *groupDefaulted { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) - *groupDefaulted = _p0 != 0 - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetProtoByName(name string) (p *Protoent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return } - return + return _GetProtoByName(_p0) } -func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) - len = uint32(r0) +func _GetProtoByName(name *byte) (p *Protoent, err error) { + r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + p = (*Protoent)(unsafe.Pointer(r0)) + if p == nil { + err = errnoErr(e1) + } return } -func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) +func GetServByName(name string, proto string) (s *Servent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return } - return + var _p1 *byte + _p1, err = syscall.BytePtrFromString(proto) + if err != nil { + return + } + return _GetServByName(_p0, _p1) } -func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) - isValid = r0 != 0 +func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { + r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0) + s = (*Servent)(unsafe.Pointer(r0)) + if s == nil { + err = errnoErr(e1) + } return } -func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) { - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if r1 == socket_error { + err = errnoErr(e1) } return } -func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *ACL, daclDefaulted bool) (err error) { - var _p0 uint32 - if daclPresent { - _p0 = 1 - } else { - _p0 = 0 - } - var _p1 uint32 - if daclDefaulted { - _p1 = 1 - } else { - _p1 = 0 - } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) { + r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0) + if r1 == socket_error { + err = errnoErr(e1) } return } -func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *ACL, saclDefaulted bool) (err error) { - var _p0 uint32 - if saclPresent { - _p0 = 1 - } else { - _p0 = 0 - } - var _p1 uint32 - if saclDefaulted { - _p1 = 1 - } else { - _p1 = 0 - } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func listen(s Handle, backlog int32) (err error) { + r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0) + if r1 == socket_error { + err = errnoErr(e1) } return } -func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaulted bool) (err error) { - var _p0 uint32 - if ownerDefaulted { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func Ntohs(netshort uint16) (u uint16) { + r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0) + u = uint16(r0) return } -func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaulted bool) (err error) { - var _p0 uint32 - if groupDefaulted { - _p0 = 1 - } else { - _p0 = 0 +func recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + r0, _, e1 := syscall.Syscall6(procrecvfrom.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int32(r0) + if n == -1 { + err = errnoErr(e1) } return } -func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) { - syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) +func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r1, _, e1 := syscall.Syscall6(procsendto.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) + if r1 == socket_error { + err = errnoErr(e1) + } return } -func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(str) - if err != nil { - return +func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) { + r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0) + if r1 == socket_error { + err = errnoErr(e1) } - return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) + return } -func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func shutdown(s Handle, how int32) (err error) { + r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0) + if r1 == socket_error { + err = errnoErr(e1) } return } -func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol)) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) } return } -func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0) +func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func WTSFreeMemory(ptr uintptr) { + syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) return } -func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) { - r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) +func WTSQueryUserToken(session uint32, token *Token) (err error) { + r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } diff --git a/vendor/golang.org/x/tools/cmd/goimports/goimports.go b/vendor/golang.org/x/tools/cmd/goimports/goimports.go index c195caaf8bd..27708972d1e 100644 --- a/vendor/golang.org/x/tools/cmd/goimports/goimports.go +++ b/vendor/golang.org/x/tools/cmd/goimports/goimports.go @@ -10,7 +10,6 @@ import ( "errors" "flag" "fmt" - "go/build" "go/scanner" "io" "io/ioutil" @@ -22,6 +21,7 @@ import ( "runtime/pprof" "strings" + "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/imports" ) @@ -43,14 +43,8 @@ var ( TabIndent: true, Comments: true, Fragment: true, - // This environment, and its caches, will be reused for the whole run. Env: &imports.ProcessEnv{ - GOPATH: build.Default.GOPATH, - GOROOT: build.Default.GOROOT, - GOFLAGS: os.Getenv("GOFLAGS"), - GO111MODULE: os.Getenv("GO111MODULE"), - GOPROXY: os.Getenv("GOPROXY"), - GOSUMDB: os.Getenv("GOSUMDB"), + GocmdRunner: &gocommand.Runner{}, }, } exitCode = 0 @@ -58,7 +52,7 @@ var ( func init() { flag.BoolVar(&options.AllErrors, "e", false, "report all errors (not just the first 10 on different lines)") - flag.StringVar(&options.Env.LocalPrefix, "local", "", "put imports beginning with this string after 3rd-party packages; comma-separated list") + flag.StringVar(&options.LocalPrefix, "local", "", "put imports beginning with this string after 3rd-party packages; comma-separated list") flag.BoolVar(&options.FormatOnly, "format-only", false, "if true, don't fix imports and only format. In this mode, goimports is effectively gofmt, with the addition that imports are grouped into sections.") } diff --git a/vendor/golang.org/x/tools/go/analysis/analysis.go b/vendor/golang.org/x/tools/go/analysis/analysis.go deleted file mode 100644 index 8c9977355c9..00000000000 --- a/vendor/golang.org/x/tools/go/analysis/analysis.go +++ /dev/null @@ -1,237 +0,0 @@ -package analysis - -import ( - "flag" - "fmt" - "go/ast" - "go/token" - "go/types" - "reflect" - - "golang.org/x/tools/internal/analysisinternal" -) - -// An Analyzer describes an analysis function and its options. -type Analyzer struct { - // The Name of the analyzer must be a valid Go identifier - // as it may appear in command-line flags, URLs, and so on. - Name string - - // Doc is the documentation for the analyzer. - // The part before the first "\n\n" is the title - // (no capital or period, max ~60 letters). - Doc string - - // Flags defines any flags accepted by the analyzer. - // The manner in which these flags are exposed to the user - // depends on the driver which runs the analyzer. - Flags flag.FlagSet - - // Run applies the analyzer to a package. - // It returns an error if the analyzer failed. - // - // On success, the Run function may return a result - // computed by the Analyzer; its type must match ResultType. - // The driver makes this result available as an input to - // another Analyzer that depends directly on this one (see - // Requires) when it analyzes the same package. - // - // To pass analysis results between packages (and thus - // potentially between address spaces), use Facts, which are - // serializable. - Run func(*Pass) (interface{}, error) - - // RunDespiteErrors allows the driver to invoke - // the Run method of this analyzer even on a - // package that contains parse or type errors. - RunDespiteErrors bool - - // Requires is a set of analyzers that must run successfully - // before this one on a given package. This analyzer may inspect - // the outputs produced by each analyzer in Requires. - // The graph over analyzers implied by Requires edges must be acyclic. - // - // Requires establishes a "horizontal" dependency between - // analysis passes (different analyzers, same package). - Requires []*Analyzer - - // ResultType is the type of the optional result of the Run function. - ResultType reflect.Type - - // FactTypes indicates that this analyzer imports and exports - // Facts of the specified concrete types. - // An analyzer that uses facts may assume that its import - // dependencies have been similarly analyzed before it runs. - // Facts must be pointers. - // - // FactTypes establishes a "vertical" dependency between - // analysis passes (same analyzer, different packages). - FactTypes []Fact -} - -func (a *Analyzer) String() string { return a.Name } - -func init() { - // Set the analysisinternal functions to be able to pass type errors - // to the Pass type without modifying the go/analysis API. - analysisinternal.SetTypeErrors = func(p interface{}, errors []types.Error) { - p.(*Pass).typeErrors = errors - } - analysisinternal.GetTypeErrors = func(p interface{}) []types.Error { - return p.(*Pass).typeErrors - } -} - -// A Pass provides information to the Run function that -// applies a specific analyzer to a single Go package. -// -// It forms the interface between the analysis logic and the driver -// program, and has both input and an output components. -// -// As in a compiler, one pass may depend on the result computed by another. -// -// The Run function should not call any of the Pass functions concurrently. -type Pass struct { - Analyzer *Analyzer // the identity of the current analyzer - - // syntax and type information - Fset *token.FileSet // file position information - Files []*ast.File // the abstract syntax tree of each file - OtherFiles []string // names of non-Go files of this package - Pkg *types.Package // type information about the package - TypesInfo *types.Info // type information about the syntax trees - TypesSizes types.Sizes // function for computing sizes of types - - // Report reports a Diagnostic, a finding about a specific location - // in the analyzed source code such as a potential mistake. - // It may be called by the Run function. - Report func(Diagnostic) - - // ResultOf provides the inputs to this analysis pass, which are - // the corresponding results of its prerequisite analyzers. - // The map keys are the elements of Analysis.Required, - // and the type of each corresponding value is the required - // analysis's ResultType. - ResultOf map[*Analyzer]interface{} - - // -- facts -- - - // ImportObjectFact retrieves a fact associated with obj. - // Given a value ptr of type *T, where *T satisfies Fact, - // ImportObjectFact copies the value to *ptr. - // - // ImportObjectFact panics if called after the pass is complete. - // ImportObjectFact is not concurrency-safe. - ImportObjectFact func(obj types.Object, fact Fact) bool - - // ImportPackageFact retrieves a fact associated with package pkg, - // which must be this package or one of its dependencies. - // See comments for ImportObjectFact. - ImportPackageFact func(pkg *types.Package, fact Fact) bool - - // ExportObjectFact associates a fact of type *T with the obj, - // replacing any previous fact of that type. - // - // ExportObjectFact panics if it is called after the pass is - // complete, or if obj does not belong to the package being analyzed. - // ExportObjectFact is not concurrency-safe. - ExportObjectFact func(obj types.Object, fact Fact) - - // ExportPackageFact associates a fact with the current package. - // See comments for ExportObjectFact. - ExportPackageFact func(fact Fact) - - // AllPackageFacts returns a new slice containing all package facts of the analysis's FactTypes - // in unspecified order. - // WARNING: This is an experimental API and may change in the future. - AllPackageFacts func() []PackageFact - - // AllObjectFacts returns a new slice containing all object facts of the analysis's FactTypes - // in unspecified order. - // WARNING: This is an experimental API and may change in the future. - AllObjectFacts func() []ObjectFact - - // typeErrors contains types.Errors that are associated with the pkg. - typeErrors []types.Error - - /* Further fields may be added in future. */ - // For example, suggested or applied refactorings. -} - -// PackageFact is a package together with an associated fact. -// WARNING: This is an experimental API and may change in the future. -type PackageFact struct { - Package *types.Package - Fact Fact -} - -// ObjectFact is an object together with an associated fact. -// WARNING: This is an experimental API and may change in the future. -type ObjectFact struct { - Object types.Object - Fact Fact -} - -// Reportf is a helper function that reports a Diagnostic using the -// specified position and formatted error message. -func (pass *Pass) Reportf(pos token.Pos, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - pass.Report(Diagnostic{Pos: pos, Message: msg}) -} - -// The Range interface provides a range. It's equivalent to and satisfied by -// ast.Node. -type Range interface { - Pos() token.Pos // position of first character belonging to the node - End() token.Pos // position of first character immediately after the node -} - -// ReportRangef is a helper function that reports a Diagnostic using the -// range provided. ast.Node values can be passed in as the range because -// they satisfy the Range interface. -func (pass *Pass) ReportRangef(rng Range, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - pass.Report(Diagnostic{Pos: rng.Pos(), End: rng.End(), Message: msg}) -} - -func (pass *Pass) String() string { - return fmt.Sprintf("%s@%s", pass.Analyzer.Name, pass.Pkg.Path()) -} - -// A Fact is an intermediate fact produced during analysis. -// -// Each fact is associated with a named declaration (a types.Object) or -// with a package as a whole. A single object or package may have -// multiple associated facts, but only one of any particular fact type. -// -// A Fact represents a predicate such as "never returns", but does not -// represent the subject of the predicate such as "function F" or "package P". -// -// Facts may be produced in one analysis pass and consumed by another -// analysis pass even if these are in different address spaces. -// If package P imports Q, all facts about Q produced during -// analysis of that package will be available during later analysis of P. -// Facts are analogous to type export data in a build system: -// just as export data enables separate compilation of several passes, -// facts enable "separate analysis". -// -// Each pass (a, p) starts with the set of facts produced by the -// same analyzer a applied to the packages directly imported by p. -// The analysis may add facts to the set, and they may be exported in turn. -// An analysis's Run function may retrieve facts by calling -// Pass.Import{Object,Package}Fact and update them using -// Pass.Export{Object,Package}Fact. -// -// A fact is logically private to its Analysis. To pass values -// between different analyzers, use the results mechanism; -// see Analyzer.Requires, Analyzer.ResultType, and Pass.ResultOf. -// -// A Fact type must be a pointer. -// Facts are encoded and decoded using encoding/gob. -// A Fact may implement the GobEncoder/GobDecoder interfaces -// to customize its encoding. Fact encoding should not fail. -// -// A Fact should not be modified once exported. -type Fact interface { - AFact() // dummy method to avoid type errors -} diff --git a/vendor/golang.org/x/tools/go/analysis/diagnostic.go b/vendor/golang.org/x/tools/go/analysis/diagnostic.go deleted file mode 100644 index 57eaf6faa2a..00000000000 --- a/vendor/golang.org/x/tools/go/analysis/diagnostic.go +++ /dev/null @@ -1,61 +0,0 @@ -package analysis - -import "go/token" - -// A Diagnostic is a message associated with a source location or range. -// -// An Analyzer may return a variety of diagnostics; the optional Category, -// which should be a constant, may be used to classify them. -// It is primarily intended to make it easy to look up documentation. -// -// If End is provided, the diagnostic is specified to apply to the range between -// Pos and End. -type Diagnostic struct { - Pos token.Pos - End token.Pos // optional - Category string // optional - Message string - - // SuggestedFixes contains suggested fixes for a diagnostic which can be used to perform - // edits to a file that address the diagnostic. - // TODO(matloob): Should multiple SuggestedFixes be allowed for a diagnostic? - // Diagnostics should not contain SuggestedFixes that overlap. - // Experimental: This API is experimental and may change in the future. - SuggestedFixes []SuggestedFix // optional - - // Experimental: This API is experimental and may change in the future. - Related []RelatedInformation // optional -} - -// RelatedInformation contains information related to a diagnostic. -// For example, a diagnostic that flags duplicated declarations of a -// variable may include one RelatedInformation per existing -// declaration. -type RelatedInformation struct { - Pos token.Pos - End token.Pos - Message string -} - -// A SuggestedFix is a code change associated with a Diagnostic that a user can choose -// to apply to their code. Usually the SuggestedFix is meant to fix the issue flagged -// by the diagnostic. -// TextEdits for a SuggestedFix should not overlap. TextEdits for a SuggestedFix -// should not contain edits for other packages. -// Experimental: This API is experimental and may change in the future. -type SuggestedFix struct { - // A description for this suggested fix to be shown to a user deciding - // whether to accept it. - Message string - TextEdits []TextEdit -} - -// A TextEdit represents the replacement of the code between Pos and End with the new text. -// Each TextEdit should apply to a single file. End should not be earlier in the file than Pos. -// Experimental: This API is experimental and may change in the future. -type TextEdit struct { - // For a pure insertion, End can either be set to Pos or token.NoPos. - Pos token.Pos - End token.Pos - NewText []byte -} diff --git a/vendor/golang.org/x/tools/go/analysis/doc.go b/vendor/golang.org/x/tools/go/analysis/doc.go deleted file mode 100644 index fb17a0e4154..00000000000 --- a/vendor/golang.org/x/tools/go/analysis/doc.go +++ /dev/null @@ -1,310 +0,0 @@ -/* - -Package analysis defines the interface between a modular static -analysis and an analysis driver program. - - -Background - -A static analysis is a function that inspects a package of Go code and -reports a set of diagnostics (typically mistakes in the code), and -perhaps produces other results as well, such as suggested refactorings -or other facts. An analysis that reports mistakes is informally called a -"checker". For example, the printf checker reports mistakes in -fmt.Printf format strings. - -A "modular" analysis is one that inspects one package at a time but can -save information from a lower-level package and use it when inspecting a -higher-level package, analogous to separate compilation in a toolchain. -The printf checker is modular: when it discovers that a function such as -log.Fatalf delegates to fmt.Printf, it records this fact, and checks -calls to that function too, including calls made from another package. - -By implementing a common interface, checkers from a variety of sources -can be easily selected, incorporated, and reused in a wide range of -driver programs including command-line tools (such as vet), text editors and -IDEs, build and test systems (such as go build, Bazel, or Buck), test -frameworks, code review tools, code-base indexers (such as SourceGraph), -documentation viewers (such as godoc), batch pipelines for large code -bases, and so on. - - -Analyzer - -The primary type in the API is Analyzer. An Analyzer statically -describes an analysis function: its name, documentation, flags, -relationship to other analyzers, and of course, its logic. - -To define an analysis, a user declares a (logically constant) variable -of type Analyzer. Here is a typical example from one of the analyzers in -the go/analysis/passes/ subdirectory: - - package unusedresult - - var Analyzer = &analysis.Analyzer{ - Name: "unusedresult", - Doc: "check for unused results of calls to some functions", - Run: run, - ... - } - - func run(pass *analysis.Pass) (interface{}, error) { - ... - } - -An analysis driver is a program such as vet that runs a set of -analyses and prints the diagnostics that they report. -The driver program must import the list of Analyzers it needs. -Typically each Analyzer resides in a separate package. -To add a new Analyzer to an existing driver, add another item to the list: - - import ( "unusedresult"; "nilness"; "printf" ) - - var analyses = []*analysis.Analyzer{ - unusedresult.Analyzer, - nilness.Analyzer, - printf.Analyzer, - } - -A driver may use the name, flags, and documentation to provide on-line -help that describes the analyses it performs. -The doc comment contains a brief one-line summary, -optionally followed by paragraphs of explanation. - -The Analyzer type has more fields besides those shown above: - - type Analyzer struct { - Name string - Doc string - Flags flag.FlagSet - Run func(*Pass) (interface{}, error) - RunDespiteErrors bool - ResultType reflect.Type - Requires []*Analyzer - FactTypes []Fact - } - -The Flags field declares a set of named (global) flag variables that -control analysis behavior. Unlike vet, analysis flags are not declared -directly in the command line FlagSet; it is up to the driver to set the -flag variables. A driver for a single analysis, a, might expose its flag -f directly on the command line as -f, whereas a driver for multiple -analyses might prefix the flag name by the analysis name (-a.f) to avoid -ambiguity. An IDE might expose the flags through a graphical interface, -and a batch pipeline might configure them from a config file. -See the "findcall" analyzer for an example of flags in action. - -The RunDespiteErrors flag indicates whether the analysis is equipped to -handle ill-typed code. If not, the driver will skip the analysis if -there were parse or type errors. -The optional ResultType field specifies the type of the result value -computed by this analysis and made available to other analyses. -The Requires field specifies a list of analyses upon which -this one depends and whose results it may access, and it constrains the -order in which a driver may run analyses. -The FactTypes field is discussed in the section on Modularity. -The analysis package provides a Validate function to perform basic -sanity checks on an Analyzer, such as that its Requires graph is -acyclic, its fact and result types are unique, and so on. - -Finally, the Run field contains a function to be called by the driver to -execute the analysis on a single package. The driver passes it an -instance of the Pass type. - - -Pass - -A Pass describes a single unit of work: the application of a particular -Analyzer to a particular package of Go code. -The Pass provides information to the Analyzer's Run function about the -package being analyzed, and provides operations to the Run function for -reporting diagnostics and other information back to the driver. - - type Pass struct { - Fset *token.FileSet - Files []*ast.File - OtherFiles []string - Pkg *types.Package - TypesInfo *types.Info - ResultOf map[*Analyzer]interface{} - Report func(Diagnostic) - ... - } - -The Fset, Files, Pkg, and TypesInfo fields provide the syntax trees, -type information, and source positions for a single package of Go code. - -The OtherFiles field provides the names, but not the contents, of non-Go -files such as assembly that are part of this package. See the "asmdecl" -or "buildtags" analyzers for examples of loading non-Go files and reporting -diagnostics against them. - -The ResultOf field provides the results computed by the analyzers -required by this one, as expressed in its Analyzer.Requires field. The -driver runs the required analyzers first and makes their results -available in this map. Each Analyzer must return a value of the type -described in its Analyzer.ResultType field. -For example, the "ctrlflow" analyzer returns a *ctrlflow.CFGs, which -provides a control-flow graph for each function in the package (see -golang.org/x/tools/go/cfg); the "inspect" analyzer returns a value that -enables other Analyzers to traverse the syntax trees of the package more -efficiently; and the "buildssa" analyzer constructs an SSA-form -intermediate representation. -Each of these Analyzers extends the capabilities of later Analyzers -without adding a dependency to the core API, so an analysis tool pays -only for the extensions it needs. - -The Report function emits a diagnostic, a message associated with a -source position. For most analyses, diagnostics are their primary -result. -For convenience, Pass provides a helper method, Reportf, to report a new -diagnostic by formatting a string. -Diagnostic is defined as: - - type Diagnostic struct { - Pos token.Pos - Category string // optional - Message string - } - -The optional Category field is a short identifier that classifies the -kind of message when an analysis produces several kinds of diagnostic. - -Many analyses want to associate diagnostics with a severity level. -Because Diagnostic does not have a severity level field, an Analyzer's -diagnostics effectively all have the same severity level. To separate which -diagnostics are high severity and which are low severity, expose multiple -Analyzers instead. Analyzers should also be separated when their -diagnostics belong in different groups, or could be tagged differently -before being shown to the end user. Analyzers should document their severity -level to help downstream tools surface diagnostics properly. - -Most Analyzers inspect typed Go syntax trees, but a few, such as asmdecl -and buildtag, inspect the raw text of Go source files or even non-Go -files such as assembly. To report a diagnostic against a line of a -raw text file, use the following sequence: - - content, err := ioutil.ReadFile(filename) - if err != nil { ... } - tf := fset.AddFile(filename, -1, len(content)) - tf.SetLinesForContent(content) - ... - pass.Reportf(tf.LineStart(line), "oops") - - -Modular analysis with Facts - -To improve efficiency and scalability, large programs are routinely -built using separate compilation: units of the program are compiled -separately, and recompiled only when one of their dependencies changes; -independent modules may be compiled in parallel. The same technique may -be applied to static analyses, for the same benefits. Such analyses are -described as "modular". - -A compiler’s type checker is an example of a modular static analysis. -Many other checkers we would like to apply to Go programs can be -understood as alternative or non-standard type systems. For example, -vet's printf checker infers whether a function has the "printf wrapper" -type, and it applies stricter checks to calls of such functions. In -addition, it records which functions are printf wrappers for use by -later analysis passes to identify other printf wrappers by induction. -A result such as “f is a printf wrapper” that is not interesting by -itself but serves as a stepping stone to an interesting result (such as -a diagnostic) is called a "fact". - -The analysis API allows an analysis to define new types of facts, to -associate facts of these types with objects (named entities) declared -within the current package, or with the package as a whole, and to query -for an existing fact of a given type associated with an object or -package. - -An Analyzer that uses facts must declare their types: - - var Analyzer = &analysis.Analyzer{ - Name: "printf", - FactTypes: []analysis.Fact{new(isWrapper)}, - ... - } - - type isWrapper struct{} // => *types.Func f “is a printf wrapper” - -The driver program ensures that facts for a pass’s dependencies are -generated before analyzing the package and is responsible for propagating -facts from one package to another, possibly across address spaces. -Consequently, Facts must be serializable. The API requires that drivers -use the gob encoding, an efficient, robust, self-describing binary -protocol. A fact type may implement the GobEncoder/GobDecoder interfaces -if the default encoding is unsuitable. Facts should be stateless. - -The Pass type has functions to import and export facts, -associated either with an object or with a package: - - type Pass struct { - ... - ExportObjectFact func(types.Object, Fact) - ImportObjectFact func(types.Object, Fact) bool - - ExportPackageFact func(fact Fact) - ImportPackageFact func(*types.Package, Fact) bool - } - -An Analyzer may only export facts associated with the current package or -its objects, though it may import facts from any package or object that -is an import dependency of the current package. - -Conceptually, ExportObjectFact(obj, fact) inserts fact into a hidden map keyed by -the pair (obj, TypeOf(fact)), and the ImportObjectFact function -retrieves the entry from this map and copies its value into the variable -pointed to by fact. This scheme assumes that the concrete type of fact -is a pointer; this assumption is checked by the Validate function. -See the "printf" analyzer for an example of object facts in action. - -Some driver implementations (such as those based on Bazel and Blaze) do -not currently apply analyzers to packages of the standard library. -Therefore, for best results, analyzer authors should not rely on -analysis facts being available for standard packages. -For example, although the printf checker is capable of deducing during -analysis of the log package that log.Printf is a printf wrapper, -this fact is built in to the analyzer so that it correctly checks -calls to log.Printf even when run in a driver that does not apply -it to standard packages. We would like to remove this limitation in future. - - -Testing an Analyzer - -The analysistest subpackage provides utilities for testing an Analyzer. -In a few lines of code, it is possible to run an analyzer on a package -of testdata files and check that it reported all the expected -diagnostics and facts (and no more). Expectations are expressed using -"// want ..." comments in the input code. - - -Standalone commands - -Analyzers are provided in the form of packages that a driver program is -expected to import. The vet command imports a set of several analyzers, -but users may wish to define their own analysis commands that perform -additional checks. To simplify the task of creating an analysis command, -either for a single analyzer or for a whole suite, we provide the -singlechecker and multichecker subpackages. - -The singlechecker package provides the main function for a command that -runs one analyzer. By convention, each analyzer such as -go/passes/findcall should be accompanied by a singlechecker-based -command such as go/analysis/passes/findcall/cmd/findcall, defined in its -entirety as: - - package main - - import ( - "golang.org/x/tools/go/analysis/passes/findcall" - "golang.org/x/tools/go/analysis/singlechecker" - ) - - func main() { singlechecker.Main(findcall.Analyzer) } - -A tool that provides multiple analyzers can use multichecker in a -similar way, giving it the list of Analyzers. - -*/ -package analysis diff --git a/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go b/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go deleted file mode 100644 index 2856df137c5..00000000000 --- a/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package inspect defines an Analyzer that provides an AST inspector -// (golang.org/x/tools/go/ast/inspect.Inspect) for the syntax trees of a -// package. It is only a building block for other analyzers. -// -// Example of use in another analysis: -// -// import ( -// "golang.org/x/tools/go/analysis" -// "golang.org/x/tools/go/analysis/passes/inspect" -// "golang.org/x/tools/go/ast/inspector" -// ) -// -// var Analyzer = &analysis.Analyzer{ -// ... -// Requires: []*analysis.Analyzer{inspect.Analyzer}, -// } -// -// func run(pass *analysis.Pass) (interface{}, error) { -// inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) -// inspect.Preorder(nil, func(n ast.Node) { -// ... -// }) -// return nil -// } -// -package inspect - -import ( - "reflect" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/ast/inspector" -) - -var Analyzer = &analysis.Analyzer{ - Name: "inspect", - Doc: "optimize AST traversal for later passes", - Run: run, - RunDespiteErrors: true, - ResultType: reflect.TypeOf(new(inspector.Inspector)), -} - -func run(pass *analysis.Pass) (interface{}, error) { - return inspector.New(pass.Files), nil -} diff --git a/vendor/golang.org/x/tools/go/analysis/validate.go b/vendor/golang.org/x/tools/go/analysis/validate.go deleted file mode 100644 index be98143461e..00000000000 --- a/vendor/golang.org/x/tools/go/analysis/validate.go +++ /dev/null @@ -1,97 +0,0 @@ -package analysis - -import ( - "fmt" - "reflect" - "unicode" -) - -// Validate reports an error if any of the analyzers are misconfigured. -// Checks include: -// that the name is a valid identifier; -// that the Requires graph is acyclic; -// that analyzer fact types are unique; -// that each fact type is a pointer. -func Validate(analyzers []*Analyzer) error { - // Map each fact type to its sole generating analyzer. - factTypes := make(map[reflect.Type]*Analyzer) - - // Traverse the Requires graph, depth first. - const ( - white = iota - grey - black - finished - ) - color := make(map[*Analyzer]uint8) - var visit func(a *Analyzer) error - visit = func(a *Analyzer) error { - if a == nil { - return fmt.Errorf("nil *Analyzer") - } - if color[a] == white { - color[a] = grey - - // names - if !validIdent(a.Name) { - return fmt.Errorf("invalid analyzer name %q", a) - } - - if a.Doc == "" { - return fmt.Errorf("analyzer %q is undocumented", a) - } - - // fact types - for _, f := range a.FactTypes { - if f == nil { - return fmt.Errorf("analyzer %s has nil FactType", a) - } - t := reflect.TypeOf(f) - if prev := factTypes[t]; prev != nil { - return fmt.Errorf("fact type %s registered by two analyzers: %v, %v", - t, a, prev) - } - if t.Kind() != reflect.Ptr { - return fmt.Errorf("%s: fact type %s is not a pointer", a, t) - } - factTypes[t] = a - } - - // recursion - for i, req := range a.Requires { - if err := visit(req); err != nil { - return fmt.Errorf("%s.Requires[%d]: %v", a.Name, i, err) - } - } - color[a] = black - } - - return nil - } - for _, a := range analyzers { - if err := visit(a); err != nil { - return err - } - } - - // Reject duplicates among analyzers. - // Precondition: color[a] == black. - // Postcondition: color[a] == finished. - for _, a := range analyzers { - if color[a] == finished { - return fmt.Errorf("duplicate analyzer: %s", a.Name) - } - color[a] = finished - } - - return nil -} - -func validIdent(name string) bool { - for i, r := range name { - if !(r == '_' || unicode.IsLetter(r) || i > 0 && unicode.IsDigit(r)) { - return false - } - } - return name != "" -} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go deleted file mode 100644 index af5e17feeea..00000000000 --- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package inspector provides helper functions for traversal over the -// syntax trees of a package, including node filtering by type, and -// materialization of the traversal stack. -// -// During construction, the inspector does a complete traversal and -// builds a list of push/pop events and their node type. Subsequent -// method calls that request a traversal scan this list, rather than walk -// the AST, and perform type filtering using efficient bit sets. -// -// Experiments suggest the inspector's traversals are about 2.5x faster -// than ast.Inspect, but it may take around 5 traversals for this -// benefit to amortize the inspector's construction cost. -// If efficiency is the primary concern, do not use Inspector for -// one-off traversals. -package inspector - -// There are four orthogonal features in a traversal: -// 1 type filtering -// 2 pruning -// 3 postorder calls to f -// 4 stack -// Rather than offer all of them in the API, -// only a few combinations are exposed: -// - Preorder is the fastest and has fewest features, -// but is the most commonly needed traversal. -// - Nodes and WithStack both provide pruning and postorder calls, -// even though few clients need it, because supporting two versions -// is not justified. -// More combinations could be supported by expressing them as -// wrappers around a more generic traversal, but this was measured -// and found to degrade performance significantly (30%). - -import ( - "go/ast" -) - -// An Inspector provides methods for inspecting -// (traversing) the syntax trees of a package. -type Inspector struct { - events []event -} - -// New returns an Inspector for the specified syntax trees. -func New(files []*ast.File) *Inspector { - return &Inspector{traverse(files)} -} - -// An event represents a push or a pop -// of an ast.Node during a traversal. -type event struct { - node ast.Node - typ uint64 // typeOf(node) - index int // 1 + index of corresponding pop event, or 0 if this is a pop -} - -// Preorder visits all the nodes of the files supplied to New in -// depth-first order. It calls f(n) for each node n before it visits -// n's children. -// -// The types argument, if non-empty, enables type-based filtering of -// events. The function f if is called only for nodes whose type -// matches an element of the types slice. -func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { - // Because it avoids postorder calls to f, and the pruning - // check, Preorder is almost twice as fast as Nodes. The two - // features seem to contribute similar slowdowns (~1.4x each). - - mask := maskOf(types) - for i := 0; i < len(in.events); { - ev := in.events[i] - if ev.typ&mask != 0 { - if ev.index > 0 { - f(ev.node) - } - } - i++ - } -} - -// Nodes visits the nodes of the files supplied to New in depth-first -// order. It calls f(n, true) for each node n before it visits n's -// children. If f returns true, Nodes invokes f recursively for each -// of the non-nil children of the node, followed by a call of -// f(n, false). -// -// The types argument, if non-empty, enables type-based filtering of -// events. The function f if is called only for nodes whose type -// matches an element of the types slice. -func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) { - mask := maskOf(types) - for i := 0; i < len(in.events); { - ev := in.events[i] - if ev.typ&mask != 0 { - if ev.index > 0 { - // push - if !f(ev.node, true) { - i = ev.index // jump to corresponding pop + 1 - continue - } - } else { - // pop - f(ev.node, false) - } - } - i++ - } -} - -// WithStack visits nodes in a similar manner to Nodes, but it -// supplies each call to f an additional argument, the current -// traversal stack. The stack's first element is the outermost node, -// an *ast.File; its last is the innermost, n. -func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) { - mask := maskOf(types) - var stack []ast.Node - for i := 0; i < len(in.events); { - ev := in.events[i] - if ev.index > 0 { - // push - stack = append(stack, ev.node) - if ev.typ&mask != 0 { - if !f(ev.node, true, stack) { - i = ev.index - stack = stack[:len(stack)-1] - continue - } - } - } else { - // pop - if ev.typ&mask != 0 { - f(ev.node, false, stack) - } - stack = stack[:len(stack)-1] - } - i++ - } -} - -// traverse builds the table of events representing a traversal. -func traverse(files []*ast.File) []event { - // Preallocate approximate number of events - // based on source file extent. - // This makes traverse faster by 4x (!). - var extent int - for _, f := range files { - extent += int(f.End() - f.Pos()) - } - // This estimate is based on the net/http package. - capacity := extent * 33 / 100 - if capacity > 1e6 { - capacity = 1e6 // impose some reasonable maximum - } - events := make([]event, 0, capacity) - - var stack []event - for _, f := range files { - ast.Inspect(f, func(n ast.Node) bool { - if n != nil { - // push - ev := event{ - node: n, - typ: typeOf(n), - index: len(events), // push event temporarily holds own index - } - stack = append(stack, ev) - events = append(events, ev) - } else { - // pop - ev := stack[len(stack)-1] - stack = stack[:len(stack)-1] - - events[ev.index].index = len(events) + 1 // make push refer to pop - - ev.index = 0 // turn ev into a pop event - events = append(events, ev) - } - return true - }) - } - - return events -} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go deleted file mode 100644 index d61301b133d..00000000000 --- a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go +++ /dev/null @@ -1,216 +0,0 @@ -package inspector - -// This file defines func typeOf(ast.Node) uint64. -// -// The initial map-based implementation was too slow; -// see https://go-review.googlesource.com/c/tools/+/135655/1/go/ast/inspector/inspector.go#196 - -import "go/ast" - -const ( - nArrayType = iota - nAssignStmt - nBadDecl - nBadExpr - nBadStmt - nBasicLit - nBinaryExpr - nBlockStmt - nBranchStmt - nCallExpr - nCaseClause - nChanType - nCommClause - nComment - nCommentGroup - nCompositeLit - nDeclStmt - nDeferStmt - nEllipsis - nEmptyStmt - nExprStmt - nField - nFieldList - nFile - nForStmt - nFuncDecl - nFuncLit - nFuncType - nGenDecl - nGoStmt - nIdent - nIfStmt - nImportSpec - nIncDecStmt - nIndexExpr - nInterfaceType - nKeyValueExpr - nLabeledStmt - nMapType - nPackage - nParenExpr - nRangeStmt - nReturnStmt - nSelectStmt - nSelectorExpr - nSendStmt - nSliceExpr - nStarExpr - nStructType - nSwitchStmt - nTypeAssertExpr - nTypeSpec - nTypeSwitchStmt - nUnaryExpr - nValueSpec -) - -// typeOf returns a distinct single-bit value that represents the type of n. -// -// Various implementations were benchmarked with BenchmarkNewInspector: -// GOGC=off -// - type switch 4.9-5.5ms 2.1ms -// - binary search over a sorted list of types 5.5-5.9ms 2.5ms -// - linear scan, frequency-ordered list 5.9-6.1ms 2.7ms -// - linear scan, unordered list 6.4ms 2.7ms -// - hash table 6.5ms 3.1ms -// A perfect hash seemed like overkill. -// -// The compiler's switch statement is the clear winner -// as it produces a binary tree in code, -// with constant conditions and good branch prediction. -// (Sadly it is the most verbose in source code.) -// Binary search suffered from poor branch prediction. -// -func typeOf(n ast.Node) uint64 { - // Fast path: nearly half of all nodes are identifiers. - if _, ok := n.(*ast.Ident); ok { - return 1 << nIdent - } - - // These cases include all nodes encountered by ast.Inspect. - switch n.(type) { - case *ast.ArrayType: - return 1 << nArrayType - case *ast.AssignStmt: - return 1 << nAssignStmt - case *ast.BadDecl: - return 1 << nBadDecl - case *ast.BadExpr: - return 1 << nBadExpr - case *ast.BadStmt: - return 1 << nBadStmt - case *ast.BasicLit: - return 1 << nBasicLit - case *ast.BinaryExpr: - return 1 << nBinaryExpr - case *ast.BlockStmt: - return 1 << nBlockStmt - case *ast.BranchStmt: - return 1 << nBranchStmt - case *ast.CallExpr: - return 1 << nCallExpr - case *ast.CaseClause: - return 1 << nCaseClause - case *ast.ChanType: - return 1 << nChanType - case *ast.CommClause: - return 1 << nCommClause - case *ast.Comment: - return 1 << nComment - case *ast.CommentGroup: - return 1 << nCommentGroup - case *ast.CompositeLit: - return 1 << nCompositeLit - case *ast.DeclStmt: - return 1 << nDeclStmt - case *ast.DeferStmt: - return 1 << nDeferStmt - case *ast.Ellipsis: - return 1 << nEllipsis - case *ast.EmptyStmt: - return 1 << nEmptyStmt - case *ast.ExprStmt: - return 1 << nExprStmt - case *ast.Field: - return 1 << nField - case *ast.FieldList: - return 1 << nFieldList - case *ast.File: - return 1 << nFile - case *ast.ForStmt: - return 1 << nForStmt - case *ast.FuncDecl: - return 1 << nFuncDecl - case *ast.FuncLit: - return 1 << nFuncLit - case *ast.FuncType: - return 1 << nFuncType - case *ast.GenDecl: - return 1 << nGenDecl - case *ast.GoStmt: - return 1 << nGoStmt - case *ast.Ident: - return 1 << nIdent - case *ast.IfStmt: - return 1 << nIfStmt - case *ast.ImportSpec: - return 1 << nImportSpec - case *ast.IncDecStmt: - return 1 << nIncDecStmt - case *ast.IndexExpr: - return 1 << nIndexExpr - case *ast.InterfaceType: - return 1 << nInterfaceType - case *ast.KeyValueExpr: - return 1 << nKeyValueExpr - case *ast.LabeledStmt: - return 1 << nLabeledStmt - case *ast.MapType: - return 1 << nMapType - case *ast.Package: - return 1 << nPackage - case *ast.ParenExpr: - return 1 << nParenExpr - case *ast.RangeStmt: - return 1 << nRangeStmt - case *ast.ReturnStmt: - return 1 << nReturnStmt - case *ast.SelectStmt: - return 1 << nSelectStmt - case *ast.SelectorExpr: - return 1 << nSelectorExpr - case *ast.SendStmt: - return 1 << nSendStmt - case *ast.SliceExpr: - return 1 << nSliceExpr - case *ast.StarExpr: - return 1 << nStarExpr - case *ast.StructType: - return 1 << nStructType - case *ast.SwitchStmt: - return 1 << nSwitchStmt - case *ast.TypeAssertExpr: - return 1 << nTypeAssertExpr - case *ast.TypeSpec: - return 1 << nTypeSpec - case *ast.TypeSwitchStmt: - return 1 << nTypeSwitchStmt - case *ast.UnaryExpr: - return 1 << nUnaryExpr - case *ast.ValueSpec: - return 1 << nValueSpec - } - return 0 -} - -func maskOf(nodes []ast.Node) uint64 { - if nodes == nil { - return 1<<64 - 1 // match all node types - } - var mask uint64 - for _, n := range nodes { - mask |= typeOf(n) - } - return mask -} diff --git a/vendor/golang.org/x/tools/go/buildutil/allpackages.go b/vendor/golang.org/x/tools/go/buildutil/allpackages.go deleted file mode 100644 index c0cb03e7bee..00000000000 --- a/vendor/golang.org/x/tools/go/buildutil/allpackages.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package buildutil provides utilities related to the go/build -// package in the standard library. -// -// All I/O is done via the build.Context file system interface, which must -// be concurrency-safe. -package buildutil // import "golang.org/x/tools/go/buildutil" - -import ( - "go/build" - "os" - "path/filepath" - "sort" - "strings" - "sync" -) - -// AllPackages returns the package path of each Go package in any source -// directory of the specified build context (e.g. $GOROOT or an element -// of $GOPATH). Errors are ignored. The results are sorted. -// All package paths are canonical, and thus may contain "/vendor/". -// -// The result may include import paths for directories that contain no -// *.go files, such as "archive" (in $GOROOT/src). -// -// All I/O is done via the build.Context file system interface, -// which must be concurrency-safe. -// -func AllPackages(ctxt *build.Context) []string { - var list []string - ForEachPackage(ctxt, func(pkg string, _ error) { - list = append(list, pkg) - }) - sort.Strings(list) - return list -} - -// ForEachPackage calls the found function with the package path of -// each Go package it finds in any source directory of the specified -// build context (e.g. $GOROOT or an element of $GOPATH). -// All package paths are canonical, and thus may contain "/vendor/". -// -// If the package directory exists but could not be read, the second -// argument to the found function provides the error. -// -// All I/O is done via the build.Context file system interface, -// which must be concurrency-safe. -// -func ForEachPackage(ctxt *build.Context, found func(importPath string, err error)) { - ch := make(chan item) - - var wg sync.WaitGroup - for _, root := range ctxt.SrcDirs() { - root := root - wg.Add(1) - go func() { - allPackages(ctxt, root, ch) - wg.Done() - }() - } - go func() { - wg.Wait() - close(ch) - }() - - // All calls to found occur in the caller's goroutine. - for i := range ch { - found(i.importPath, i.err) - } -} - -type item struct { - importPath string - err error // (optional) -} - -// We use a process-wide counting semaphore to limit -// the number of parallel calls to ReadDir. -var ioLimit = make(chan bool, 20) - -func allPackages(ctxt *build.Context, root string, ch chan<- item) { - root = filepath.Clean(root) + string(os.PathSeparator) - - var wg sync.WaitGroup - - var walkDir func(dir string) - walkDir = func(dir string) { - // Avoid .foo, _foo, and testdata directory trees. - base := filepath.Base(dir) - if base == "" || base[0] == '.' || base[0] == '_' || base == "testdata" { - return - } - - pkg := filepath.ToSlash(strings.TrimPrefix(dir, root)) - - // Prune search if we encounter any of these import paths. - switch pkg { - case "builtin": - return - } - - ioLimit <- true - files, err := ReadDir(ctxt, dir) - <-ioLimit - if pkg != "" || err != nil { - ch <- item{pkg, err} - } - for _, fi := range files { - fi := fi - if fi.IsDir() { - wg.Add(1) - go func() { - walkDir(filepath.Join(dir, fi.Name())) - wg.Done() - }() - } - } - } - - walkDir(root) - wg.Wait() -} - -// ExpandPatterns returns the set of packages matched by patterns, -// which may have the following forms: -// -// golang.org/x/tools/cmd/guru # a single package -// golang.org/x/tools/... # all packages beneath dir -// ... # the entire workspace. -// -// Order is significant: a pattern preceded by '-' removes matching -// packages from the set. For example, these patterns match all encoding -// packages except encoding/xml: -// -// encoding/... -encoding/xml -// -// A trailing slash in a pattern is ignored. (Path components of Go -// package names are separated by slash, not the platform's path separator.) -// -func ExpandPatterns(ctxt *build.Context, patterns []string) map[string]bool { - // TODO(adonovan): support other features of 'go list': - // - "std"/"cmd"/"all" meta-packages - // - "..." not at the end of a pattern - // - relative patterns using "./" or "../" prefix - - pkgs := make(map[string]bool) - doPkg := func(pkg string, neg bool) { - if neg { - delete(pkgs, pkg) - } else { - pkgs[pkg] = true - } - } - - // Scan entire workspace if wildcards are present. - // TODO(adonovan): opt: scan only the necessary subtrees of the workspace. - var all []string - for _, arg := range patterns { - if strings.HasSuffix(arg, "...") { - all = AllPackages(ctxt) - break - } - } - - for _, arg := range patterns { - if arg == "" { - continue - } - - neg := arg[0] == '-' - if neg { - arg = arg[1:] - } - - if arg == "..." { - // ... matches all packages - for _, pkg := range all { - doPkg(pkg, neg) - } - } else if dir := strings.TrimSuffix(arg, "/..."); dir != arg { - // dir/... matches all packages beneath dir - for _, pkg := range all { - if strings.HasPrefix(pkg, dir) && - (len(pkg) == len(dir) || pkg[len(dir)] == '/') { - doPkg(pkg, neg) - } - } - } else { - // single package - doPkg(strings.TrimSuffix(arg, "/"), neg) - } - } - - return pkgs -} diff --git a/vendor/golang.org/x/tools/go/buildutil/fakecontext.go b/vendor/golang.org/x/tools/go/buildutil/fakecontext.go deleted file mode 100644 index 8b7f066739f..00000000000 --- a/vendor/golang.org/x/tools/go/buildutil/fakecontext.go +++ /dev/null @@ -1,109 +0,0 @@ -package buildutil - -import ( - "fmt" - "go/build" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "sort" - "strings" - "time" -) - -// FakeContext returns a build.Context for the fake file tree specified -// by pkgs, which maps package import paths to a mapping from file base -// names to contents. -// -// The fake Context has a GOROOT of "/go" and no GOPATH, and overrides -// the necessary file access methods to read from memory instead of the -// real file system. -// -// Unlike a real file tree, the fake one has only two levels---packages -// and files---so ReadDir("/go/src/") returns all packages under -// /go/src/ including, for instance, "math" and "math/big". -// ReadDir("/go/src/math/big") would return all the files in the -// "math/big" package. -// -func FakeContext(pkgs map[string]map[string]string) *build.Context { - clean := func(filename string) string { - f := path.Clean(filepath.ToSlash(filename)) - // Removing "/go/src" while respecting segment - // boundaries has this unfortunate corner case: - if f == "/go/src" { - return "" - } - return strings.TrimPrefix(f, "/go/src/") - } - - ctxt := build.Default // copy - ctxt.GOROOT = "/go" - ctxt.GOPATH = "" - ctxt.Compiler = "gc" - ctxt.IsDir = func(dir string) bool { - dir = clean(dir) - if dir == "" { - return true // needed by (*build.Context).SrcDirs - } - return pkgs[dir] != nil - } - ctxt.ReadDir = func(dir string) ([]os.FileInfo, error) { - dir = clean(dir) - var fis []os.FileInfo - if dir == "" { - // enumerate packages - for importPath := range pkgs { - fis = append(fis, fakeDirInfo(importPath)) - } - } else { - // enumerate files of package - for basename := range pkgs[dir] { - fis = append(fis, fakeFileInfo(basename)) - } - } - sort.Sort(byName(fis)) - return fis, nil - } - ctxt.OpenFile = func(filename string) (io.ReadCloser, error) { - filename = clean(filename) - dir, base := path.Split(filename) - content, ok := pkgs[path.Clean(dir)][base] - if !ok { - return nil, fmt.Errorf("file not found: %s", filename) - } - return ioutil.NopCloser(strings.NewReader(content)), nil - } - ctxt.IsAbsPath = func(path string) bool { - path = filepath.ToSlash(path) - // Don't rely on the default (filepath.Path) since on - // Windows, it reports virtual paths as non-absolute. - return strings.HasPrefix(path, "/") - } - return &ctxt -} - -type byName []os.FileInfo - -func (s byName) Len() int { return len(s) } -func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() } - -type fakeFileInfo string - -func (fi fakeFileInfo) Name() string { return string(fi) } -func (fakeFileInfo) Sys() interface{} { return nil } -func (fakeFileInfo) ModTime() time.Time { return time.Time{} } -func (fakeFileInfo) IsDir() bool { return false } -func (fakeFileInfo) Size() int64 { return 0 } -func (fakeFileInfo) Mode() os.FileMode { return 0644 } - -type fakeDirInfo string - -func (fd fakeDirInfo) Name() string { return string(fd) } -func (fakeDirInfo) Sys() interface{} { return nil } -func (fakeDirInfo) ModTime() time.Time { return time.Time{} } -func (fakeDirInfo) IsDir() bool { return true } -func (fakeDirInfo) Size() int64 { return 0 } -func (fakeDirInfo) Mode() os.FileMode { return 0755 } diff --git a/vendor/golang.org/x/tools/go/buildutil/overlay.go b/vendor/golang.org/x/tools/go/buildutil/overlay.go deleted file mode 100644 index 8e239086bd4..00000000000 --- a/vendor/golang.org/x/tools/go/buildutil/overlay.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package buildutil - -import ( - "bufio" - "bytes" - "fmt" - "go/build" - "io" - "io/ioutil" - "path/filepath" - "strconv" - "strings" -) - -// OverlayContext overlays a build.Context with additional files from -// a map. Files in the map take precedence over other files. -// -// In addition to plain string comparison, two file names are -// considered equal if their base names match and their directory -// components point at the same directory on the file system. That is, -// symbolic links are followed for directories, but not files. -// -// A common use case for OverlayContext is to allow editors to pass in -// a set of unsaved, modified files. -// -// Currently, only the Context.OpenFile function will respect the -// overlay. This may change in the future. -func OverlayContext(orig *build.Context, overlay map[string][]byte) *build.Context { - // TODO(dominikh): Implement IsDir, HasSubdir and ReadDir - - rc := func(data []byte) (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewBuffer(data)), nil - } - - copy := *orig // make a copy - ctxt := © - ctxt.OpenFile = func(path string) (io.ReadCloser, error) { - // Fast path: names match exactly. - if content, ok := overlay[path]; ok { - return rc(content) - } - - // Slow path: check for same file under a different - // alias, perhaps due to a symbolic link. - for filename, content := range overlay { - if sameFile(path, filename) { - return rc(content) - } - } - - return OpenFile(orig, path) - } - return ctxt -} - -// ParseOverlayArchive parses an archive containing Go files and their -// contents. The result is intended to be used with OverlayContext. -// -// -// Archive format -// -// The archive consists of a series of files. Each file consists of a -// name, a decimal file size and the file contents, separated by -// newlines. No newline follows after the file contents. -func ParseOverlayArchive(archive io.Reader) (map[string][]byte, error) { - overlay := make(map[string][]byte) - r := bufio.NewReader(archive) - for { - // Read file name. - filename, err := r.ReadString('\n') - if err != nil { - if err == io.EOF { - break // OK - } - return nil, fmt.Errorf("reading archive file name: %v", err) - } - filename = filepath.Clean(strings.TrimSpace(filename)) - - // Read file size. - sz, err := r.ReadString('\n') - if err != nil { - return nil, fmt.Errorf("reading size of archive file %s: %v", filename, err) - } - sz = strings.TrimSpace(sz) - size, err := strconv.ParseUint(sz, 10, 32) - if err != nil { - return nil, fmt.Errorf("parsing size of archive file %s: %v", filename, err) - } - - // Read file content. - content := make([]byte, size) - if _, err := io.ReadFull(r, content); err != nil { - return nil, fmt.Errorf("reading archive file %s: %v", filename, err) - } - overlay[filename] = content - } - - return overlay, nil -} diff --git a/vendor/golang.org/x/tools/go/buildutil/tags.go b/vendor/golang.org/x/tools/go/buildutil/tags.go deleted file mode 100644 index 486606f3768..00000000000 --- a/vendor/golang.org/x/tools/go/buildutil/tags.go +++ /dev/null @@ -1,75 +0,0 @@ -package buildutil - -// This logic was copied from stringsFlag from $GOROOT/src/cmd/go/build.go. - -import "fmt" - -const TagsFlagDoc = "a list of `build tags` to consider satisfied during the build. " + - "For more information about build tags, see the description of " + - "build constraints in the documentation for the go/build package" - -// TagsFlag is an implementation of the flag.Value and flag.Getter interfaces that parses -// a flag value in the same manner as go build's -tags flag and -// populates a []string slice. -// -// See $GOROOT/src/go/build/doc.go for description of build tags. -// See $GOROOT/src/cmd/go/doc.go for description of 'go build -tags' flag. -// -// Example: -// flag.Var((*buildutil.TagsFlag)(&build.Default.BuildTags), "tags", buildutil.TagsFlagDoc) -type TagsFlag []string - -func (v *TagsFlag) Set(s string) error { - var err error - *v, err = splitQuotedFields(s) - if *v == nil { - *v = []string{} - } - return err -} - -func (v *TagsFlag) Get() interface{} { return *v } - -func splitQuotedFields(s string) ([]string, error) { - // Split fields allowing '' or "" around elements. - // Quotes further inside the string do not count. - var f []string - for len(s) > 0 { - for len(s) > 0 && isSpaceByte(s[0]) { - s = s[1:] - } - if len(s) == 0 { - break - } - // Accepted quoted string. No unescaping inside. - if s[0] == '"' || s[0] == '\'' { - quote := s[0] - s = s[1:] - i := 0 - for i < len(s) && s[i] != quote { - i++ - } - if i >= len(s) { - return nil, fmt.Errorf("unterminated %c string", quote) - } - f = append(f, s[:i]) - s = s[i+1:] - continue - } - i := 0 - for i < len(s) && !isSpaceByte(s[i]) { - i++ - } - f = append(f, s[:i]) - s = s[i:] - } - return f, nil -} - -func (v *TagsFlag) String() string { - return "" -} - -func isSpaceByte(c byte) bool { - return c == ' ' || c == '\t' || c == '\n' || c == '\r' -} diff --git a/vendor/golang.org/x/tools/go/buildutil/util.go b/vendor/golang.org/x/tools/go/buildutil/util.go deleted file mode 100644 index fc923d7a702..00000000000 --- a/vendor/golang.org/x/tools/go/buildutil/util.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package buildutil - -import ( - "fmt" - "go/ast" - "go/build" - "go/parser" - "go/token" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "strings" -) - -// ParseFile behaves like parser.ParseFile, -// but uses the build context's file system interface, if any. -// -// If file is not absolute (as defined by IsAbsPath), the (dir, file) -// components are joined using JoinPath; dir must be absolute. -// -// The displayPath function, if provided, is used to transform the -// filename that will be attached to the ASTs. -// -// TODO(adonovan): call this from go/loader.parseFiles when the tree thaws. -// -func ParseFile(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, file string, mode parser.Mode) (*ast.File, error) { - if !IsAbsPath(ctxt, file) { - file = JoinPath(ctxt, dir, file) - } - rd, err := OpenFile(ctxt, file) - if err != nil { - return nil, err - } - defer rd.Close() // ignore error - if displayPath != nil { - file = displayPath(file) - } - return parser.ParseFile(fset, file, rd, mode) -} - -// ContainingPackage returns the package containing filename. -// -// If filename is not absolute, it is interpreted relative to working directory dir. -// All I/O is via the build context's file system interface, if any. -// -// The '...Files []string' fields of the resulting build.Package are not -// populated (build.FindOnly mode). -// -func ContainingPackage(ctxt *build.Context, dir, filename string) (*build.Package, error) { - if !IsAbsPath(ctxt, filename) { - filename = JoinPath(ctxt, dir, filename) - } - - // We must not assume the file tree uses - // "/" always, - // `\` always, - // or os.PathSeparator (which varies by platform), - // but to make any progress, we are forced to assume that - // paths will not use `\` unless the PathSeparator - // is also `\`, thus we can rely on filepath.ToSlash for some sanity. - - dirSlash := path.Dir(filepath.ToSlash(filename)) + "/" - - // We assume that no source root (GOPATH[i] or GOROOT) contains any other. - for _, srcdir := range ctxt.SrcDirs() { - srcdirSlash := filepath.ToSlash(srcdir) + "/" - if importPath, ok := HasSubdir(ctxt, srcdirSlash, dirSlash); ok { - return ctxt.Import(importPath, dir, build.FindOnly) - } - } - - return nil, fmt.Errorf("can't find package containing %s", filename) -} - -// -- Effective methods of file system interface ------------------------- - -// (go/build.Context defines these as methods, but does not export them.) - -// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses -// the local file system to answer the question. -func HasSubdir(ctxt *build.Context, root, dir string) (rel string, ok bool) { - if f := ctxt.HasSubdir; f != nil { - return f(root, dir) - } - - // Try using paths we received. - if rel, ok = hasSubdir(root, dir); ok { - return - } - - // Try expanding symlinks and comparing - // expanded against unexpanded and - // expanded against expanded. - rootSym, _ := filepath.EvalSymlinks(root) - dirSym, _ := filepath.EvalSymlinks(dir) - - if rel, ok = hasSubdir(rootSym, dir); ok { - return - } - if rel, ok = hasSubdir(root, dirSym); ok { - return - } - return hasSubdir(rootSym, dirSym) -} - -func hasSubdir(root, dir string) (rel string, ok bool) { - const sep = string(filepath.Separator) - root = filepath.Clean(root) - if !strings.HasSuffix(root, sep) { - root += sep - } - - dir = filepath.Clean(dir) - if !strings.HasPrefix(dir, root) { - return "", false - } - - return filepath.ToSlash(dir[len(root):]), true -} - -// FileExists returns true if the specified file exists, -// using the build context's file system interface. -func FileExists(ctxt *build.Context, path string) bool { - if ctxt.OpenFile != nil { - r, err := ctxt.OpenFile(path) - if err != nil { - return false - } - r.Close() // ignore error - return true - } - _, err := os.Stat(path) - return err == nil -} - -// OpenFile behaves like os.Open, -// but uses the build context's file system interface, if any. -func OpenFile(ctxt *build.Context, path string) (io.ReadCloser, error) { - if ctxt.OpenFile != nil { - return ctxt.OpenFile(path) - } - return os.Open(path) -} - -// IsAbsPath behaves like filepath.IsAbs, -// but uses the build context's file system interface, if any. -func IsAbsPath(ctxt *build.Context, path string) bool { - if ctxt.IsAbsPath != nil { - return ctxt.IsAbsPath(path) - } - return filepath.IsAbs(path) -} - -// JoinPath behaves like filepath.Join, -// but uses the build context's file system interface, if any. -func JoinPath(ctxt *build.Context, path ...string) string { - if ctxt.JoinPath != nil { - return ctxt.JoinPath(path...) - } - return filepath.Join(path...) -} - -// IsDir behaves like os.Stat plus IsDir, -// but uses the build context's file system interface, if any. -func IsDir(ctxt *build.Context, path string) bool { - if ctxt.IsDir != nil { - return ctxt.IsDir(path) - } - fi, err := os.Stat(path) - return err == nil && fi.IsDir() -} - -// ReadDir behaves like ioutil.ReadDir, -// but uses the build context's file system interface, if any. -func ReadDir(ctxt *build.Context, path string) ([]os.FileInfo, error) { - if ctxt.ReadDir != nil { - return ctxt.ReadDir(path) - } - return ioutil.ReadDir(path) -} - -// SplitPathList behaves like filepath.SplitList, -// but uses the build context's file system interface, if any. -func SplitPathList(ctxt *build.Context, s string) []string { - if ctxt.SplitPathList != nil { - return ctxt.SplitPathList(s) - } - return filepath.SplitList(s) -} - -// sameFile returns true if x and y have the same basename and denote -// the same file. -// -func sameFile(x, y string) bool { - if path.Clean(x) == path.Clean(y) { - return true - } - if filepath.Base(x) == filepath.Base(y) { // (optimisation) - if xi, err := os.Stat(x); err == nil { - if yi, err := os.Stat(y); err == nil { - return os.SameFile(xi, yi) - } - } - } - return false -} diff --git a/vendor/golang.org/x/tools/go/internal/cgo/cgo.go b/vendor/golang.org/x/tools/go/internal/cgo/cgo.go deleted file mode 100644 index 5db8b309676..00000000000 --- a/vendor/golang.org/x/tools/go/internal/cgo/cgo.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cgo handles cgo preprocessing of files containing `import "C"`. -// -// DESIGN -// -// The approach taken is to run the cgo processor on the package's -// CgoFiles and parse the output, faking the filenames of the -// resulting ASTs so that the synthetic file containing the C types is -// called "C" (e.g. "~/go/src/net/C") and the preprocessed files -// have their original names (e.g. "~/go/src/net/cgo_unix.go"), -// not the names of the actual temporary files. -// -// The advantage of this approach is its fidelity to 'go build'. The -// downside is that the token.Position.Offset for each AST node is -// incorrect, being an offset within the temporary file. Line numbers -// should still be correct because of the //line comments. -// -// The logic of this file is mostly plundered from the 'go build' -// tool, which also invokes the cgo preprocessor. -// -// -// REJECTED ALTERNATIVE -// -// An alternative approach that we explored is to extend go/types' -// Importer mechanism to provide the identity of the importing package -// so that each time `import "C"` appears it resolves to a different -// synthetic package containing just the objects needed in that case. -// The loader would invoke cgo but parse only the cgo_types.go file -// defining the package-level objects, discarding the other files -// resulting from preprocessing. -// -// The benefit of this approach would have been that source-level -// syntax information would correspond exactly to the original cgo -// file, with no preprocessing involved, making source tools like -// godoc, guru, and eg happy. However, the approach was rejected -// due to the additional complexity it would impose on go/types. (It -// made for a beautiful demo, though.) -// -// cgo files, despite their *.go extension, are not legal Go source -// files per the specification since they may refer to unexported -// members of package "C" such as C.int. Also, a function such as -// C.getpwent has in effect two types, one matching its C type and one -// which additionally returns (errno C.int). The cgo preprocessor -// uses name mangling to distinguish these two functions in the -// processed code, but go/types would need to duplicate this logic in -// its handling of function calls, analogous to the treatment of map -// lookups in which y=m[k] and y,ok=m[k] are both legal. - -package cgo - -import ( - "fmt" - "go/ast" - "go/build" - "go/parser" - "go/token" - "io/ioutil" - "log" - "os" - "os/exec" - "path/filepath" - "regexp" - "strings" -) - -// ProcessFiles invokes the cgo preprocessor on bp.CgoFiles, parses -// the output and returns the resulting ASTs. -// -func ProcessFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(path string) string, mode parser.Mode) ([]*ast.File, error) { - tmpdir, err := ioutil.TempDir("", strings.Replace(bp.ImportPath, "/", "_", -1)+"_C") - if err != nil { - return nil, err - } - defer os.RemoveAll(tmpdir) - - pkgdir := bp.Dir - if DisplayPath != nil { - pkgdir = DisplayPath(pkgdir) - } - - cgoFiles, cgoDisplayFiles, err := Run(bp, pkgdir, tmpdir, false) - if err != nil { - return nil, err - } - var files []*ast.File - for i := range cgoFiles { - rd, err := os.Open(cgoFiles[i]) - if err != nil { - return nil, err - } - display := filepath.Join(bp.Dir, cgoDisplayFiles[i]) - f, err := parser.ParseFile(fset, display, rd, mode) - rd.Close() - if err != nil { - return nil, err - } - files = append(files, f) - } - return files, nil -} - -var cgoRe = regexp.MustCompile(`[/\\:]`) - -// Run invokes the cgo preprocessor on bp.CgoFiles and returns two -// lists of files: the resulting processed files (in temporary -// directory tmpdir) and the corresponding names of the unprocessed files. -// -// Run is adapted from (*builder).cgo in -// $GOROOT/src/cmd/go/build.go, but these features are unsupported: -// Objective C, CGOPKGPATH, CGO_FLAGS. -// -// If useabs is set to true, absolute paths of the bp.CgoFiles will be passed in -// to the cgo preprocessor. This in turn will set the // line comments -// referring to those files to use absolute paths. This is needed for -// go/packages using the legacy go list support so it is able to find -// the original files. -func Run(bp *build.Package, pkgdir, tmpdir string, useabs bool) (files, displayFiles []string, err error) { - cgoCPPFLAGS, _, _, _ := cflags(bp, true) - _, cgoexeCFLAGS, _, _ := cflags(bp, false) - - if len(bp.CgoPkgConfig) > 0 { - pcCFLAGS, err := pkgConfigFlags(bp) - if err != nil { - return nil, nil, err - } - cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...) - } - - // Allows including _cgo_export.h from .[ch] files in the package. - cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", tmpdir) - - // _cgo_gotypes.go (displayed "C") contains the type definitions. - files = append(files, filepath.Join(tmpdir, "_cgo_gotypes.go")) - displayFiles = append(displayFiles, "C") - for _, fn := range bp.CgoFiles { - // "foo.cgo1.go" (displayed "foo.go") is the processed Go source. - f := cgoRe.ReplaceAllString(fn[:len(fn)-len("go")], "_") - files = append(files, filepath.Join(tmpdir, f+"cgo1.go")) - displayFiles = append(displayFiles, fn) - } - - var cgoflags []string - if bp.Goroot && bp.ImportPath == "runtime/cgo" { - cgoflags = append(cgoflags, "-import_runtime_cgo=false") - } - if bp.Goroot && bp.ImportPath == "runtime/race" || bp.ImportPath == "runtime/cgo" { - cgoflags = append(cgoflags, "-import_syscall=false") - } - - var cgoFiles []string = bp.CgoFiles - if useabs { - cgoFiles = make([]string, len(bp.CgoFiles)) - for i := range cgoFiles { - cgoFiles[i] = filepath.Join(pkgdir, bp.CgoFiles[i]) - } - } - - args := stringList( - "go", "tool", "cgo", "-objdir", tmpdir, cgoflags, "--", - cgoCPPFLAGS, cgoexeCFLAGS, cgoFiles, - ) - if false { - log.Printf("Running cgo for package %q: %s (dir=%s)", bp.ImportPath, args, pkgdir) - } - cmd := exec.Command(args[0], args[1:]...) - cmd.Dir = pkgdir - cmd.Stdout = os.Stderr - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - return nil, nil, fmt.Errorf("cgo failed: %s: %s", args, err) - } - - return files, displayFiles, nil -} - -// -- unmodified from 'go build' --------------------------------------- - -// Return the flags to use when invoking the C or C++ compilers, or cgo. -func cflags(p *build.Package, def bool) (cppflags, cflags, cxxflags, ldflags []string) { - var defaults string - if def { - defaults = "-g -O2" - } - - cppflags = stringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS) - cflags = stringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS) - cxxflags = stringList(envList("CGO_CXXFLAGS", defaults), p.CgoCXXFLAGS) - ldflags = stringList(envList("CGO_LDFLAGS", defaults), p.CgoLDFLAGS) - return -} - -// envList returns the value of the given environment variable broken -// into fields, using the default value when the variable is empty. -func envList(key, def string) []string { - v := os.Getenv(key) - if v == "" { - v = def - } - return strings.Fields(v) -} - -// stringList's arguments should be a sequence of string or []string values. -// stringList flattens them into a single []string. -func stringList(args ...interface{}) []string { - var x []string - for _, arg := range args { - switch arg := arg.(type) { - case []string: - x = append(x, arg...) - case string: - x = append(x, arg) - default: - panic("stringList: invalid argument") - } - } - return x -} diff --git a/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go b/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go deleted file mode 100644 index b5bb95a63e5..00000000000 --- a/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cgo - -import ( - "errors" - "fmt" - "go/build" - "os/exec" - "strings" -) - -// pkgConfig runs pkg-config with the specified arguments and returns the flags it prints. -func pkgConfig(mode string, pkgs []string) (flags []string, err error) { - cmd := exec.Command("pkg-config", append([]string{mode}, pkgs...)...) - out, err := cmd.CombinedOutput() - if err != nil { - s := fmt.Sprintf("%s failed: %v", strings.Join(cmd.Args, " "), err) - if len(out) > 0 { - s = fmt.Sprintf("%s: %s", s, out) - } - return nil, errors.New(s) - } - if len(out) > 0 { - flags = strings.Fields(string(out)) - } - return -} - -// pkgConfigFlags calls pkg-config if needed and returns the cflags -// needed to build the package. -func pkgConfigFlags(p *build.Package) (cflags []string, err error) { - if len(p.CgoPkgConfig) == 0 { - return nil, nil - } - return pkgConfig("--cflags", p.CgoPkgConfig) -} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go index 8dcd8bbb71a..e8cba6b2375 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go @@ -491,7 +491,7 @@ func (p *parser) parseMapType(parent *types.Package) types.Type { // // For unqualified and anonymous names, the returned package is the parent // package unless parent == nil, in which case the returned package is the -// package being imported. (The parent package is not nil if the the name +// package being imported. (The parent package is not nil if the name // is an unqualified struct field or interface method name belonging to a // type declared in another package.) // diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go index dc6177c122d..35bc6a4127a 100644 --- a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go +++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -39,7 +39,12 @@ func GetSizes(ctx context.Context, buildFlags, env []string, gocmdRunner *gocomm } if tool == "off" { - return GetSizesGolist(ctx, buildFlags, env, gocmdRunner, dir) + inv := gocommand.Invocation{ + BuildFlags: buildFlags, + Env: env, + WorkingDir: dir, + } + return GetSizesGolist(ctx, inv, gocmdRunner) } req, err := json.Marshal(struct { @@ -75,26 +80,17 @@ func GetSizes(ctx context.Context, buildFlags, env []string, gocmdRunner *gocomm return response.Sizes, nil } -func GetSizesGolist(ctx context.Context, buildFlags, env []string, gocmdRunner *gocommand.Runner, dir string) (types.Sizes, error) { - inv := gocommand.Invocation{ - Verb: "list", - Args: []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"}, - Env: env, - BuildFlags: buildFlags, - WorkingDir: dir, - } +func GetSizesGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (types.Sizes, error) { + inv.Verb = "list" + inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) var goarch, compiler string if rawErr != nil { if strings.Contains(rawErr.Error(), "cannot find main module") { // User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc. // TODO(matloob): Is this a problem in practice? - inv := gocommand.Invocation{ - Verb: "env", - Args: []string{"GOARCH"}, - Env: env, - WorkingDir: dir, - } + inv.Verb = "env" + inv.Args = []string{"GOARCH"} envout, enverr := gocmdRunner.Run(ctx, inv) if enverr != nil { return nil, enverr diff --git a/vendor/golang.org/x/tools/go/loader/doc.go b/vendor/golang.org/x/tools/go/loader/doc.go deleted file mode 100644 index c5aa31c1a02..00000000000 --- a/vendor/golang.org/x/tools/go/loader/doc.go +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package loader loads a complete Go program from source code, parsing -// and type-checking the initial packages plus their transitive closure -// of dependencies. The ASTs and the derived facts are retained for -// later use. -// -// Deprecated: This is an older API and does not have support -// for modules. Use golang.org/x/tools/go/packages instead. -// -// The package defines two primary types: Config, which specifies a -// set of initial packages to load and various other options; and -// Program, which is the result of successfully loading the packages -// specified by a configuration. -// -// The configuration can be set directly, but *Config provides various -// convenience methods to simplify the common cases, each of which can -// be called any number of times. Finally, these are followed by a -// call to Load() to actually load and type-check the program. -// -// var conf loader.Config -// -// // Use the command-line arguments to specify -// // a set of initial packages to load from source. -// // See FromArgsUsage for help. -// rest, err := conf.FromArgs(os.Args[1:], wantTests) -// -// // Parse the specified files and create an ad hoc package with path "foo". -// // All files must have the same 'package' declaration. -// conf.CreateFromFilenames("foo", "foo.go", "bar.go") -// -// // Create an ad hoc package with path "foo" from -// // the specified already-parsed files. -// // All ASTs must have the same 'package' declaration. -// conf.CreateFromFiles("foo", parsedFiles) -// -// // Add "runtime" to the set of packages to be loaded. -// conf.Import("runtime") -// -// // Adds "fmt" and "fmt_test" to the set of packages -// // to be loaded. "fmt" will include *_test.go files. -// conf.ImportWithTests("fmt") -// -// // Finally, load all the packages specified by the configuration. -// prog, err := conf.Load() -// -// See examples_test.go for examples of API usage. -// -// -// CONCEPTS AND TERMINOLOGY -// -// The WORKSPACE is the set of packages accessible to the loader. The -// workspace is defined by Config.Build, a *build.Context. The -// default context treats subdirectories of $GOROOT and $GOPATH as -// packages, but this behavior may be overridden. -// -// An AD HOC package is one specified as a set of source files on the -// command line. In the simplest case, it may consist of a single file -// such as $GOROOT/src/net/http/triv.go. -// -// EXTERNAL TEST packages are those comprised of a set of *_test.go -// files all with the same 'package foo_test' declaration, all in the -// same directory. (go/build.Package calls these files XTestFiles.) -// -// An IMPORTABLE package is one that can be referred to by some import -// spec. Every importable package is uniquely identified by its -// PACKAGE PATH or just PATH, a string such as "fmt", "encoding/json", -// or "cmd/vendor/golang.org/x/arch/x86/x86asm". A package path -// typically denotes a subdirectory of the workspace. -// -// An import declaration uses an IMPORT PATH to refer to a package. -// Most import declarations use the package path as the import path. -// -// Due to VENDORING (https://golang.org/s/go15vendor), the -// interpretation of an import path may depend on the directory in which -// it appears. To resolve an import path to a package path, go/build -// must search the enclosing directories for a subdirectory named -// "vendor". -// -// ad hoc packages and external test packages are NON-IMPORTABLE. The -// path of an ad hoc package is inferred from the package -// declarations of its files and is therefore not a unique package key. -// For example, Config.CreatePkgs may specify two initial ad hoc -// packages, both with path "main". -// -// An AUGMENTED package is an importable package P plus all the -// *_test.go files with same 'package foo' declaration as P. -// (go/build.Package calls these files TestFiles.) -// -// The INITIAL packages are those specified in the configuration. A -// DEPENDENCY is a package loaded to satisfy an import in an initial -// package or another dependency. -// -package loader - -// IMPLEMENTATION NOTES -// -// 'go test', in-package test files, and import cycles -// --------------------------------------------------- -// -// An external test package may depend upon members of the augmented -// package that are not in the unaugmented package, such as functions -// that expose internals. (See bufio/export_test.go for an example.) -// So, the loader must ensure that for each external test package -// it loads, it also augments the corresponding non-test package. -// -// The import graph over n unaugmented packages must be acyclic; the -// import graph over n-1 unaugmented packages plus one augmented -// package must also be acyclic. ('go test' relies on this.) But the -// import graph over n augmented packages may contain cycles. -// -// First, all the (unaugmented) non-test packages and their -// dependencies are imported in the usual way; the loader reports an -// error if it detects an import cycle. -// -// Then, each package P for which testing is desired is augmented by -// the list P' of its in-package test files, by calling -// (*types.Checker).Files. This arrangement ensures that P' may -// reference definitions within P, but P may not reference definitions -// within P'. Furthermore, P' may import any other package, including -// ones that depend upon P, without an import cycle error. -// -// Consider two packages A and B, both of which have lists of -// in-package test files we'll call A' and B', and which have the -// following import graph edges: -// B imports A -// B' imports A -// A' imports B -// This last edge would be expected to create an error were it not -// for the special type-checking discipline above. -// Cycles of size greater than two are possible. For example: -// compress/bzip2/bzip2_test.go (package bzip2) imports "io/ioutil" -// io/ioutil/tempfile_test.go (package ioutil) imports "regexp" -// regexp/exec_test.go (package regexp) imports "compress/bzip2" -// -// -// Concurrency -// ----------- -// -// Let us define the import dependency graph as follows. Each node is a -// list of files passed to (Checker).Files at once. Many of these lists -// are the production code of an importable Go package, so those nodes -// are labelled by the package's path. The remaining nodes are -// ad hoc packages and lists of in-package *_test.go files that augment -// an importable package; those nodes have no label. -// -// The edges of the graph represent import statements appearing within a -// file. An edge connects a node (a list of files) to the node it -// imports, which is importable and thus always labelled. -// -// Loading is controlled by this dependency graph. -// -// To reduce I/O latency, we start loading a package's dependencies -// asynchronously as soon as we've parsed its files and enumerated its -// imports (scanImports). This performs a preorder traversal of the -// import dependency graph. -// -// To exploit hardware parallelism, we type-check unrelated packages in -// parallel, where "unrelated" means not ordered by the partial order of -// the import dependency graph. -// -// We use a concurrency-safe non-blocking cache (importer.imported) to -// record the results of type-checking, whether success or failure. An -// entry is created in this cache by startLoad the first time the -// package is imported. The first goroutine to request an entry becomes -// responsible for completing the task and broadcasting completion to -// subsequent requestors, which block until then. -// -// Type checking occurs in (parallel) postorder: we cannot type-check a -// set of files until we have loaded and type-checked all of their -// immediate dependencies (and thus all of their transitive -// dependencies). If the input were guaranteed free of import cycles, -// this would be trivial: we could simply wait for completion of the -// dependencies and then invoke the typechecker. -// -// But as we saw in the 'go test' section above, some cycles in the -// import graph over packages are actually legal, so long as the -// cycle-forming edge originates in the in-package test files that -// augment the package. This explains why the nodes of the import -// dependency graph are not packages, but lists of files: the unlabelled -// nodes avoid the cycles. Consider packages A and B where B imports A -// and A's in-package tests AT import B. The naively constructed import -// graph over packages would contain a cycle (A+AT) --> B --> (A+AT) but -// the graph over lists of files is AT --> B --> A, where AT is an -// unlabelled node. -// -// Awaiting completion of the dependencies in a cyclic graph would -// deadlock, so we must materialize the import dependency graph (as -// importer.graph) and check whether each import edge forms a cycle. If -// x imports y, and the graph already contains a path from y to x, then -// there is an import cycle, in which case the processing of x must not -// wait for the completion of processing of y. -// -// When the type-checker makes a callback (doImport) to the loader for a -// given import edge, there are two possible cases. In the normal case, -// the dependency has already been completely type-checked; doImport -// does a cache lookup and returns it. In the cyclic case, the entry in -// the cache is still necessarily incomplete, indicating a cycle. We -// perform the cycle check again to obtain the error message, and return -// the error. -// -// The result of using concurrency is about a 2.5x speedup for stdlib_test. diff --git a/vendor/golang.org/x/tools/go/loader/loader.go b/vendor/golang.org/x/tools/go/loader/loader.go deleted file mode 100644 index bc12ca33d1a..00000000000 --- a/vendor/golang.org/x/tools/go/loader/loader.go +++ /dev/null @@ -1,1086 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package loader - -// See doc.go for package documentation and implementation notes. - -import ( - "errors" - "fmt" - "go/ast" - "go/build" - "go/parser" - "go/token" - "go/types" - "os" - "path/filepath" - "sort" - "strings" - "sync" - "time" - - "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/go/internal/cgo" -) - -var ignoreVendor build.ImportMode - -const trace = false // show timing info for type-checking - -// Config specifies the configuration for loading a whole program from -// Go source code. -// The zero value for Config is a ready-to-use default configuration. -type Config struct { - // Fset is the file set for the parser to use when loading the - // program. If nil, it may be lazily initialized by any - // method of Config. - Fset *token.FileSet - - // ParserMode specifies the mode to be used by the parser when - // loading source packages. - ParserMode parser.Mode - - // TypeChecker contains options relating to the type checker. - // - // The supplied IgnoreFuncBodies is not used; the effective - // value comes from the TypeCheckFuncBodies func below. - // The supplied Import function is not used either. - TypeChecker types.Config - - // TypeCheckFuncBodies is a predicate over package paths. - // A package for which the predicate is false will - // have its package-level declarations type checked, but not - // its function bodies; this can be used to quickly load - // dependencies from source. If nil, all func bodies are type - // checked. - TypeCheckFuncBodies func(path string) bool - - // If Build is non-nil, it is used to locate source packages. - // Otherwise &build.Default is used. - // - // By default, cgo is invoked to preprocess Go files that - // import the fake package "C". This behaviour can be - // disabled by setting CGO_ENABLED=0 in the environment prior - // to startup, or by setting Build.CgoEnabled=false. - Build *build.Context - - // The current directory, used for resolving relative package - // references such as "./go/loader". If empty, os.Getwd will be - // used instead. - Cwd string - - // If DisplayPath is non-nil, it is used to transform each - // file name obtained from Build.Import(). This can be used - // to prevent a virtualized build.Config's file names from - // leaking into the user interface. - DisplayPath func(path string) string - - // If AllowErrors is true, Load will return a Program even - // if some of the its packages contained I/O, parser or type - // errors; such errors are accessible via PackageInfo.Errors. If - // false, Load will fail if any package had an error. - AllowErrors bool - - // CreatePkgs specifies a list of non-importable initial - // packages to create. The resulting packages will appear in - // the corresponding elements of the Program.Created slice. - CreatePkgs []PkgSpec - - // ImportPkgs specifies a set of initial packages to load. - // The map keys are package paths. - // - // The map value indicates whether to load tests. If true, Load - // will add and type-check two lists of files to the package: - // non-test files followed by in-package *_test.go files. In - // addition, it will append the external test package (if any) - // to Program.Created. - ImportPkgs map[string]bool - - // FindPackage is called during Load to create the build.Package - // for a given import path from a given directory. - // If FindPackage is nil, (*build.Context).Import is used. - // A client may use this hook to adapt to a proprietary build - // system that does not follow the "go build" layout - // conventions, for example. - // - // It must be safe to call concurrently from multiple goroutines. - FindPackage func(ctxt *build.Context, importPath, fromDir string, mode build.ImportMode) (*build.Package, error) - - // AfterTypeCheck is called immediately after a list of files - // has been type-checked and appended to info.Files. - // - // This optional hook function is the earliest opportunity for - // the client to observe the output of the type checker, - // which may be useful to reduce analysis latency when loading - // a large program. - // - // The function is permitted to modify info.Info, for instance - // to clear data structures that are no longer needed, which can - // dramatically reduce peak memory consumption. - // - // The function may be called twice for the same PackageInfo: - // once for the files of the package and again for the - // in-package test files. - // - // It must be safe to call concurrently from multiple goroutines. - AfterTypeCheck func(info *PackageInfo, files []*ast.File) -} - -// A PkgSpec specifies a non-importable package to be created by Load. -// Files are processed first, but typically only one of Files and -// Filenames is provided. The path needn't be globally unique. -// -// For vendoring purposes, the package's directory is the one that -// contains the first file. -type PkgSpec struct { - Path string // package path ("" => use package declaration) - Files []*ast.File // ASTs of already-parsed files - Filenames []string // names of files to be parsed -} - -// A Program is a Go program loaded from source as specified by a Config. -type Program struct { - Fset *token.FileSet // the file set for this program - - // Created[i] contains the initial package whose ASTs or - // filenames were supplied by Config.CreatePkgs[i], followed by - // the external test package, if any, of each package in - // Config.ImportPkgs ordered by ImportPath. - // - // NOTE: these files must not import "C". Cgo preprocessing is - // only performed on imported packages, not ad hoc packages. - // - // TODO(adonovan): we need to copy and adapt the logic of - // goFilesPackage (from $GOROOT/src/cmd/go/build.go) and make - // Config.Import and Config.Create methods return the same kind - // of entity, essentially a build.Package. - // Perhaps we can even reuse that type directly. - Created []*PackageInfo - - // Imported contains the initially imported packages, - // as specified by Config.ImportPkgs. - Imported map[string]*PackageInfo - - // AllPackages contains the PackageInfo of every package - // encountered by Load: all initial packages and all - // dependencies, including incomplete ones. - AllPackages map[*types.Package]*PackageInfo - - // importMap is the canonical mapping of package paths to - // packages. It contains all Imported initial packages, but not - // Created ones, and all imported dependencies. - importMap map[string]*types.Package -} - -// PackageInfo holds the ASTs and facts derived by the type-checker -// for a single package. -// -// Not mutated once exposed via the API. -// -type PackageInfo struct { - Pkg *types.Package - Importable bool // true if 'import "Pkg.Path()"' would resolve to this - TransitivelyErrorFree bool // true if Pkg and all its dependencies are free of errors - Files []*ast.File // syntax trees for the package's files - Errors []error // non-nil if the package had errors - types.Info // type-checker deductions. - dir string // package directory - - checker *types.Checker // transient type-checker state - errorFunc func(error) -} - -func (info *PackageInfo) String() string { return info.Pkg.Path() } - -func (info *PackageInfo) appendError(err error) { - if info.errorFunc != nil { - info.errorFunc(err) - } else { - fmt.Fprintln(os.Stderr, err) - } - info.Errors = append(info.Errors, err) -} - -func (conf *Config) fset() *token.FileSet { - if conf.Fset == nil { - conf.Fset = token.NewFileSet() - } - return conf.Fset -} - -// ParseFile is a convenience function (intended for testing) that invokes -// the parser using the Config's FileSet, which is initialized if nil. -// -// src specifies the parser input as a string, []byte, or io.Reader, and -// filename is its apparent name. If src is nil, the contents of -// filename are read from the file system. -// -func (conf *Config) ParseFile(filename string, src interface{}) (*ast.File, error) { - // TODO(adonovan): use conf.build() etc like parseFiles does. - return parser.ParseFile(conf.fset(), filename, src, conf.ParserMode) -} - -// FromArgsUsage is a partial usage message that applications calling -// FromArgs may wish to include in their -help output. -const FromArgsUsage = ` - is a list of arguments denoting a set of initial packages. -It may take one of two forms: - -1. A list of *.go source files. - - All of the specified files are loaded, parsed and type-checked - as a single package. All the files must belong to the same directory. - -2. A list of import paths, each denoting a package. - - The package's directory is found relative to the $GOROOT and - $GOPATH using similar logic to 'go build', and the *.go files in - that directory are loaded, parsed and type-checked as a single - package. - - In addition, all *_test.go files in the directory are then loaded - and parsed. Those files whose package declaration equals that of - the non-*_test.go files are included in the primary package. Test - files whose package declaration ends with "_test" are type-checked - as another package, the 'external' test package, so that a single - import path may denote two packages. (Whether this behaviour is - enabled is tool-specific, and may depend on additional flags.) - -A '--' argument terminates the list of packages. -` - -// FromArgs interprets args as a set of initial packages to load from -// source and updates the configuration. It returns the list of -// unconsumed arguments. -// -// It is intended for use in command-line interfaces that require a -// set of initial packages to be specified; see FromArgsUsage message -// for details. -// -// Only superficial errors are reported at this stage; errors dependent -// on I/O are detected during Load. -// -func (conf *Config) FromArgs(args []string, xtest bool) ([]string, error) { - var rest []string - for i, arg := range args { - if arg == "--" { - rest = args[i+1:] - args = args[:i] - break // consume "--" and return the remaining args - } - } - - if len(args) > 0 && strings.HasSuffix(args[0], ".go") { - // Assume args is a list of a *.go files - // denoting a single ad hoc package. - for _, arg := range args { - if !strings.HasSuffix(arg, ".go") { - return nil, fmt.Errorf("named files must be .go files: %s", arg) - } - } - conf.CreateFromFilenames("", args...) - } else { - // Assume args are directories each denoting a - // package and (perhaps) an external test, iff xtest. - for _, arg := range args { - if xtest { - conf.ImportWithTests(arg) - } else { - conf.Import(arg) - } - } - } - - return rest, nil -} - -// CreateFromFilenames is a convenience function that adds -// a conf.CreatePkgs entry to create a package of the specified *.go -// files. -// -func (conf *Config) CreateFromFilenames(path string, filenames ...string) { - conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Filenames: filenames}) -} - -// CreateFromFiles is a convenience function that adds a conf.CreatePkgs -// entry to create package of the specified path and parsed files. -// -func (conf *Config) CreateFromFiles(path string, files ...*ast.File) { - conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Files: files}) -} - -// ImportWithTests is a convenience function that adds path to -// ImportPkgs, the set of initial source packages located relative to -// $GOPATH. The package will be augmented by any *_test.go files in -// its directory that contain a "package x" (not "package x_test") -// declaration. -// -// In addition, if any *_test.go files contain a "package x_test" -// declaration, an additional package comprising just those files will -// be added to CreatePkgs. -// -func (conf *Config) ImportWithTests(path string) { conf.addImport(path, true) } - -// Import is a convenience function that adds path to ImportPkgs, the -// set of initial packages that will be imported from source. -// -func (conf *Config) Import(path string) { conf.addImport(path, false) } - -func (conf *Config) addImport(path string, tests bool) { - if path == "C" { - return // ignore; not a real package - } - if conf.ImportPkgs == nil { - conf.ImportPkgs = make(map[string]bool) - } - conf.ImportPkgs[path] = conf.ImportPkgs[path] || tests -} - -// PathEnclosingInterval returns the PackageInfo and ast.Node that -// contain source interval [start, end), and all the node's ancestors -// up to the AST root. It searches all ast.Files of all packages in prog. -// exact is defined as for astutil.PathEnclosingInterval. -// -// The zero value is returned if not found. -// -func (prog *Program) PathEnclosingInterval(start, end token.Pos) (pkg *PackageInfo, path []ast.Node, exact bool) { - for _, info := range prog.AllPackages { - for _, f := range info.Files { - if f.Pos() == token.NoPos { - // This can happen if the parser saw - // too many errors and bailed out. - // (Use parser.AllErrors to prevent that.) - continue - } - if !tokenFileContainsPos(prog.Fset.File(f.Pos()), start) { - continue - } - if path, exact := astutil.PathEnclosingInterval(f, start, end); path != nil { - return info, path, exact - } - } - } - return nil, nil, false -} - -// InitialPackages returns a new slice containing the set of initial -// packages (Created + Imported) in unspecified order. -// -func (prog *Program) InitialPackages() []*PackageInfo { - infos := make([]*PackageInfo, 0, len(prog.Created)+len(prog.Imported)) - infos = append(infos, prog.Created...) - for _, info := range prog.Imported { - infos = append(infos, info) - } - return infos -} - -// Package returns the ASTs and results of type checking for the -// specified package. -func (prog *Program) Package(path string) *PackageInfo { - if info, ok := prog.AllPackages[prog.importMap[path]]; ok { - return info - } - for _, info := range prog.Created { - if path == info.Pkg.Path() { - return info - } - } - return nil -} - -// ---------- Implementation ---------- - -// importer holds the working state of the algorithm. -type importer struct { - conf *Config // the client configuration - start time.Time // for logging - - progMu sync.Mutex // guards prog - prog *Program // the resulting program - - // findpkg is a memoization of FindPackage. - findpkgMu sync.Mutex // guards findpkg - findpkg map[findpkgKey]*findpkgValue - - importedMu sync.Mutex // guards imported - imported map[string]*importInfo // all imported packages (incl. failures) by import path - - // import dependency graph: graph[x][y] => x imports y - // - // Since non-importable packages cannot be cyclic, we ignore - // their imports, thus we only need the subgraph over importable - // packages. Nodes are identified by their import paths. - graphMu sync.Mutex - graph map[string]map[string]bool -} - -type findpkgKey struct { - importPath string - fromDir string - mode build.ImportMode -} - -type findpkgValue struct { - ready chan struct{} // closed to broadcast readiness - bp *build.Package - err error -} - -// importInfo tracks the success or failure of a single import. -// -// Upon completion, exactly one of info and err is non-nil: -// info on successful creation of a package, err otherwise. -// A successful package may still contain type errors. -// -type importInfo struct { - path string // import path - info *PackageInfo // results of typechecking (including errors) - complete chan struct{} // closed to broadcast that info is set. -} - -// awaitCompletion blocks until ii is complete, -// i.e. the info field is safe to inspect. -func (ii *importInfo) awaitCompletion() { - <-ii.complete // wait for close -} - -// Complete marks ii as complete. -// Its info and err fields will not be subsequently updated. -func (ii *importInfo) Complete(info *PackageInfo) { - if info == nil { - panic("info == nil") - } - ii.info = info - close(ii.complete) -} - -type importError struct { - path string // import path - err error // reason for failure to create a package -} - -// Load creates the initial packages specified by conf.{Create,Import}Pkgs, -// loading their dependencies packages as needed. -// -// On success, Load returns a Program containing a PackageInfo for -// each package. On failure, it returns an error. -// -// If AllowErrors is true, Load will return a Program even if some -// packages contained I/O, parser or type errors, or if dependencies -// were missing. (Such errors are accessible via PackageInfo.Errors. If -// false, Load will fail if any package had an error. -// -// It is an error if no packages were loaded. -// -func (conf *Config) Load() (*Program, error) { - // Create a simple default error handler for parse/type errors. - if conf.TypeChecker.Error == nil { - conf.TypeChecker.Error = func(e error) { fmt.Fprintln(os.Stderr, e) } - } - - // Set default working directory for relative package references. - if conf.Cwd == "" { - var err error - conf.Cwd, err = os.Getwd() - if err != nil { - return nil, err - } - } - - // Install default FindPackage hook using go/build logic. - if conf.FindPackage == nil { - conf.FindPackage = (*build.Context).Import - } - - prog := &Program{ - Fset: conf.fset(), - Imported: make(map[string]*PackageInfo), - importMap: make(map[string]*types.Package), - AllPackages: make(map[*types.Package]*PackageInfo), - } - - imp := importer{ - conf: conf, - prog: prog, - findpkg: make(map[findpkgKey]*findpkgValue), - imported: make(map[string]*importInfo), - start: time.Now(), - graph: make(map[string]map[string]bool), - } - - // -- loading proper (concurrent phase) -------------------------------- - - var errpkgs []string // packages that contained errors - - // Load the initially imported packages and their dependencies, - // in parallel. - // No vendor check on packages imported from the command line. - infos, importErrors := imp.importAll("", conf.Cwd, conf.ImportPkgs, ignoreVendor) - for _, ie := range importErrors { - conf.TypeChecker.Error(ie.err) // failed to create package - errpkgs = append(errpkgs, ie.path) - } - for _, info := range infos { - prog.Imported[info.Pkg.Path()] = info - } - - // Augment the designated initial packages by their tests. - // Dependencies are loaded in parallel. - var xtestPkgs []*build.Package - for importPath, augment := range conf.ImportPkgs { - if !augment { - continue - } - - // No vendor check on packages imported from command line. - bp, err := imp.findPackage(importPath, conf.Cwd, ignoreVendor) - if err != nil { - // Package not found, or can't even parse package declaration. - // Already reported by previous loop; ignore it. - continue - } - - // Needs external test package? - if len(bp.XTestGoFiles) > 0 { - xtestPkgs = append(xtestPkgs, bp) - } - - // Consult the cache using the canonical package path. - path := bp.ImportPath - imp.importedMu.Lock() // (unnecessary, we're sequential here) - ii, ok := imp.imported[path] - // Paranoid checks added due to issue #11012. - if !ok { - // Unreachable. - // The previous loop called importAll and thus - // startLoad for each path in ImportPkgs, which - // populates imp.imported[path] with a non-zero value. - panic(fmt.Sprintf("imported[%q] not found", path)) - } - if ii == nil { - // Unreachable. - // The ii values in this loop are the same as in - // the previous loop, which enforced the invariant - // that at least one of ii.err and ii.info is non-nil. - panic(fmt.Sprintf("imported[%q] == nil", path)) - } - if ii.info == nil { - // Unreachable. - // awaitCompletion has the postcondition - // ii.info != nil. - panic(fmt.Sprintf("imported[%q].info = nil", path)) - } - info := ii.info - imp.importedMu.Unlock() - - // Parse the in-package test files. - files, errs := imp.conf.parsePackageFiles(bp, 't') - for _, err := range errs { - info.appendError(err) - } - - // The test files augmenting package P cannot be imported, - // but may import packages that import P, - // so we must disable the cycle check. - imp.addFiles(info, files, false) - } - - createPkg := func(path, dir string, files []*ast.File, errs []error) { - info := imp.newPackageInfo(path, dir) - for _, err := range errs { - info.appendError(err) - } - - // Ad hoc packages are non-importable, - // so no cycle check is needed. - // addFiles loads dependencies in parallel. - imp.addFiles(info, files, false) - prog.Created = append(prog.Created, info) - } - - // Create packages specified by conf.CreatePkgs. - for _, cp := range conf.CreatePkgs { - files, errs := parseFiles(conf.fset(), conf.build(), nil, conf.Cwd, cp.Filenames, conf.ParserMode) - files = append(files, cp.Files...) - - path := cp.Path - if path == "" { - if len(files) > 0 { - path = files[0].Name.Name - } else { - path = "(unnamed)" - } - } - - dir := conf.Cwd - if len(files) > 0 && files[0].Pos().IsValid() { - dir = filepath.Dir(conf.fset().File(files[0].Pos()).Name()) - } - createPkg(path, dir, files, errs) - } - - // Create external test packages. - sort.Sort(byImportPath(xtestPkgs)) - for _, bp := range xtestPkgs { - files, errs := imp.conf.parsePackageFiles(bp, 'x') - createPkg(bp.ImportPath+"_test", bp.Dir, files, errs) - } - - // -- finishing up (sequential) ---------------------------------------- - - if len(prog.Imported)+len(prog.Created) == 0 { - return nil, errors.New("no initial packages were loaded") - } - - // Create infos for indirectly imported packages. - // e.g. incomplete packages without syntax, loaded from export data. - for _, obj := range prog.importMap { - info := prog.AllPackages[obj] - if info == nil { - prog.AllPackages[obj] = &PackageInfo{Pkg: obj, Importable: true} - } else { - // finished - info.checker = nil - info.errorFunc = nil - } - } - - if !conf.AllowErrors { - // Report errors in indirectly imported packages. - for _, info := range prog.AllPackages { - if len(info.Errors) > 0 { - errpkgs = append(errpkgs, info.Pkg.Path()) - } - } - if errpkgs != nil { - var more string - if len(errpkgs) > 3 { - more = fmt.Sprintf(" and %d more", len(errpkgs)-3) - errpkgs = errpkgs[:3] - } - return nil, fmt.Errorf("couldn't load packages due to errors: %s%s", - strings.Join(errpkgs, ", "), more) - } - } - - markErrorFreePackages(prog.AllPackages) - - return prog, nil -} - -type byImportPath []*build.Package - -func (b byImportPath) Len() int { return len(b) } -func (b byImportPath) Less(i, j int) bool { return b[i].ImportPath < b[j].ImportPath } -func (b byImportPath) Swap(i, j int) { b[i], b[j] = b[j], b[i] } - -// markErrorFreePackages sets the TransitivelyErrorFree flag on all -// applicable packages. -func markErrorFreePackages(allPackages map[*types.Package]*PackageInfo) { - // Build the transpose of the import graph. - importedBy := make(map[*types.Package]map[*types.Package]bool) - for P := range allPackages { - for _, Q := range P.Imports() { - clients, ok := importedBy[Q] - if !ok { - clients = make(map[*types.Package]bool) - importedBy[Q] = clients - } - clients[P] = true - } - } - - // Find all packages reachable from some error package. - reachable := make(map[*types.Package]bool) - var visit func(*types.Package) - visit = func(p *types.Package) { - if !reachable[p] { - reachable[p] = true - for q := range importedBy[p] { - visit(q) - } - } - } - for _, info := range allPackages { - if len(info.Errors) > 0 { - visit(info.Pkg) - } - } - - // Mark the others as "transitively error-free". - for _, info := range allPackages { - if !reachable[info.Pkg] { - info.TransitivelyErrorFree = true - } - } -} - -// build returns the effective build context. -func (conf *Config) build() *build.Context { - if conf.Build != nil { - return conf.Build - } - return &build.Default -} - -// parsePackageFiles enumerates the files belonging to package path, -// then loads, parses and returns them, plus a list of I/O or parse -// errors that were encountered. -// -// 'which' indicates which files to include: -// 'g': include non-test *.go source files (GoFiles + processed CgoFiles) -// 't': include in-package *_test.go source files (TestGoFiles) -// 'x': include external *_test.go source files. (XTestGoFiles) -// -func (conf *Config) parsePackageFiles(bp *build.Package, which rune) ([]*ast.File, []error) { - if bp.ImportPath == "unsafe" { - return nil, nil - } - var filenames []string - switch which { - case 'g': - filenames = bp.GoFiles - case 't': - filenames = bp.TestGoFiles - case 'x': - filenames = bp.XTestGoFiles - default: - panic(which) - } - - files, errs := parseFiles(conf.fset(), conf.build(), conf.DisplayPath, bp.Dir, filenames, conf.ParserMode) - - // Preprocess CgoFiles and parse the outputs (sequentially). - if which == 'g' && bp.CgoFiles != nil { - cgofiles, err := cgo.ProcessFiles(bp, conf.fset(), conf.DisplayPath, conf.ParserMode) - if err != nil { - errs = append(errs, err) - } else { - files = append(files, cgofiles...) - } - } - - return files, errs -} - -// doImport imports the package denoted by path. -// It implements the types.Importer signature. -// -// It returns an error if a package could not be created -// (e.g. go/build or parse error), but type errors are reported via -// the types.Config.Error callback (the first of which is also saved -// in the package's PackageInfo). -// -// Idempotent. -// -func (imp *importer) doImport(from *PackageInfo, to string) (*types.Package, error) { - if to == "C" { - // This should be unreachable, but ad hoc packages are - // not currently subject to cgo preprocessing. - // See https://golang.org/issue/11627. - return nil, fmt.Errorf(`the loader doesn't cgo-process ad hoc packages like %q; see Go issue 11627`, - from.Pkg.Path()) - } - - bp, err := imp.findPackage(to, from.dir, 0) - if err != nil { - return nil, err - } - - // The standard unsafe package is handled specially, - // and has no PackageInfo. - if bp.ImportPath == "unsafe" { - return types.Unsafe, nil - } - - // Look for the package in the cache using its canonical path. - path := bp.ImportPath - imp.importedMu.Lock() - ii := imp.imported[path] - imp.importedMu.Unlock() - if ii == nil { - panic("internal error: unexpected import: " + path) - } - if ii.info != nil { - return ii.info.Pkg, nil - } - - // Import of incomplete package: this indicates a cycle. - fromPath := from.Pkg.Path() - if cycle := imp.findPath(path, fromPath); cycle != nil { - // Normalize cycle: start from alphabetically largest node. - pos, start := -1, "" - for i, s := range cycle { - if pos < 0 || s > start { - pos, start = i, s - } - } - cycle = append(cycle, cycle[:pos]...)[pos:] // rotate cycle to start from largest - cycle = append(cycle, cycle[0]) // add start node to end to show cycliness - return nil, fmt.Errorf("import cycle: %s", strings.Join(cycle, " -> ")) - } - - panic("internal error: import of incomplete (yet acyclic) package: " + fromPath) -} - -// findPackage locates the package denoted by the importPath in the -// specified directory. -func (imp *importer) findPackage(importPath, fromDir string, mode build.ImportMode) (*build.Package, error) { - // We use a non-blocking duplicate-suppressing cache (gopl.io §9.7) - // to avoid holding the lock around FindPackage. - key := findpkgKey{importPath, fromDir, mode} - imp.findpkgMu.Lock() - v, ok := imp.findpkg[key] - if ok { - // cache hit - imp.findpkgMu.Unlock() - - <-v.ready // wait for entry to become ready - } else { - // Cache miss: this goroutine becomes responsible for - // populating the map entry and broadcasting its readiness. - v = &findpkgValue{ready: make(chan struct{})} - imp.findpkg[key] = v - imp.findpkgMu.Unlock() - - ioLimit <- true - v.bp, v.err = imp.conf.FindPackage(imp.conf.build(), importPath, fromDir, mode) - <-ioLimit - - if _, ok := v.err.(*build.NoGoError); ok { - v.err = nil // empty directory is not an error - } - - close(v.ready) // broadcast ready condition - } - return v.bp, v.err -} - -// importAll loads, parses, and type-checks the specified packages in -// parallel and returns their completed importInfos in unspecified order. -// -// fromPath is the package path of the importing package, if it is -// importable, "" otherwise. It is used for cycle detection. -// -// fromDir is the directory containing the import declaration that -// caused these imports. -// -func (imp *importer) importAll(fromPath, fromDir string, imports map[string]bool, mode build.ImportMode) (infos []*PackageInfo, errors []importError) { - // TODO(adonovan): opt: do the loop in parallel once - // findPackage is non-blocking. - var pending []*importInfo - for importPath := range imports { - bp, err := imp.findPackage(importPath, fromDir, mode) - if err != nil { - errors = append(errors, importError{ - path: importPath, - err: err, - }) - continue - } - pending = append(pending, imp.startLoad(bp)) - } - - if fromPath != "" { - // We're loading a set of imports. - // - // We must record graph edges from the importing package - // to its dependencies, and check for cycles. - imp.graphMu.Lock() - deps, ok := imp.graph[fromPath] - if !ok { - deps = make(map[string]bool) - imp.graph[fromPath] = deps - } - for _, ii := range pending { - deps[ii.path] = true - } - imp.graphMu.Unlock() - } - - for _, ii := range pending { - if fromPath != "" { - if cycle := imp.findPath(ii.path, fromPath); cycle != nil { - // Cycle-forming import: we must not await its - // completion since it would deadlock. - // - // We don't record the error in ii since - // the error is really associated with the - // cycle-forming edge, not the package itself. - // (Also it would complicate the - // invariants of importPath completion.) - if trace { - fmt.Fprintf(os.Stderr, "import cycle: %q\n", cycle) - } - continue - } - } - ii.awaitCompletion() - infos = append(infos, ii.info) - } - - return infos, errors -} - -// findPath returns an arbitrary path from 'from' to 'to' in the import -// graph, or nil if there was none. -func (imp *importer) findPath(from, to string) []string { - imp.graphMu.Lock() - defer imp.graphMu.Unlock() - - seen := make(map[string]bool) - var search func(stack []string, importPath string) []string - search = func(stack []string, importPath string) []string { - if !seen[importPath] { - seen[importPath] = true - stack = append(stack, importPath) - if importPath == to { - return stack - } - for x := range imp.graph[importPath] { - if p := search(stack, x); p != nil { - return p - } - } - } - return nil - } - return search(make([]string, 0, 20), from) -} - -// startLoad initiates the loading, parsing and type-checking of the -// specified package and its dependencies, if it has not already begun. -// -// It returns an importInfo, not necessarily in a completed state. The -// caller must call awaitCompletion() before accessing its info field. -// -// startLoad is concurrency-safe and idempotent. -// -func (imp *importer) startLoad(bp *build.Package) *importInfo { - path := bp.ImportPath - imp.importedMu.Lock() - ii, ok := imp.imported[path] - if !ok { - ii = &importInfo{path: path, complete: make(chan struct{})} - imp.imported[path] = ii - go func() { - info := imp.load(bp) - ii.Complete(info) - }() - } - imp.importedMu.Unlock() - - return ii -} - -// load implements package loading by parsing Go source files -// located by go/build. -func (imp *importer) load(bp *build.Package) *PackageInfo { - info := imp.newPackageInfo(bp.ImportPath, bp.Dir) - info.Importable = true - files, errs := imp.conf.parsePackageFiles(bp, 'g') - for _, err := range errs { - info.appendError(err) - } - - imp.addFiles(info, files, true) - - imp.progMu.Lock() - imp.prog.importMap[bp.ImportPath] = info.Pkg - imp.progMu.Unlock() - - return info -} - -// addFiles adds and type-checks the specified files to info, loading -// their dependencies if needed. The order of files determines the -// package initialization order. It may be called multiple times on the -// same package. Errors are appended to the info.Errors field. -// -// cycleCheck determines whether the imports within files create -// dependency edges that should be checked for potential cycles. -// -func (imp *importer) addFiles(info *PackageInfo, files []*ast.File, cycleCheck bool) { - // Ensure the dependencies are loaded, in parallel. - var fromPath string - if cycleCheck { - fromPath = info.Pkg.Path() - } - // TODO(adonovan): opt: make the caller do scanImports. - // Callers with a build.Package can skip it. - imp.importAll(fromPath, info.dir, scanImports(files), 0) - - if trace { - fmt.Fprintf(os.Stderr, "%s: start %q (%d)\n", - time.Since(imp.start), info.Pkg.Path(), len(files)) - } - - // Don't call checker.Files on Unsafe, even with zero files, - // because it would mutate the package, which is a global. - if info.Pkg == types.Unsafe { - if len(files) > 0 { - panic(`"unsafe" package contains unexpected files`) - } - } else { - // Ignore the returned (first) error since we - // already collect them all in the PackageInfo. - info.checker.Files(files) - info.Files = append(info.Files, files...) - } - - if imp.conf.AfterTypeCheck != nil { - imp.conf.AfterTypeCheck(info, files) - } - - if trace { - fmt.Fprintf(os.Stderr, "%s: stop %q\n", - time.Since(imp.start), info.Pkg.Path()) - } -} - -func (imp *importer) newPackageInfo(path, dir string) *PackageInfo { - var pkg *types.Package - if path == "unsafe" { - pkg = types.Unsafe - } else { - pkg = types.NewPackage(path, "") - } - info := &PackageInfo{ - Pkg: pkg, - Info: types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), - }, - errorFunc: imp.conf.TypeChecker.Error, - dir: dir, - } - - // Copy the types.Config so we can vary it across PackageInfos. - tc := imp.conf.TypeChecker - tc.IgnoreFuncBodies = false - if f := imp.conf.TypeCheckFuncBodies; f != nil { - tc.IgnoreFuncBodies = !f(path) - } - tc.Importer = closure{imp, info} - tc.Error = info.appendError // appendError wraps the user's Error function - - info.checker = types.NewChecker(&tc, imp.conf.fset(), pkg, &info.Info) - imp.progMu.Lock() - imp.prog.AllPackages[pkg] = info - imp.progMu.Unlock() - return info -} - -type closure struct { - imp *importer - info *PackageInfo -} - -func (c closure) Import(to string) (*types.Package, error) { return c.imp.doImport(c.info, to) } diff --git a/vendor/golang.org/x/tools/go/loader/util.go b/vendor/golang.org/x/tools/go/loader/util.go deleted file mode 100644 index 7f38dd74077..00000000000 --- a/vendor/golang.org/x/tools/go/loader/util.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package loader - -import ( - "go/ast" - "go/build" - "go/parser" - "go/token" - "io" - "os" - "strconv" - "sync" - - "golang.org/x/tools/go/buildutil" -) - -// We use a counting semaphore to limit -// the number of parallel I/O calls per process. -var ioLimit = make(chan bool, 10) - -// parseFiles parses the Go source files within directory dir and -// returns the ASTs of the ones that could be at least partially parsed, -// along with a list of I/O and parse errors encountered. -// -// I/O is done via ctxt, which may specify a virtual file system. -// displayPath is used to transform the filenames attached to the ASTs. -// -func parseFiles(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, files []string, mode parser.Mode) ([]*ast.File, []error) { - if displayPath == nil { - displayPath = func(path string) string { return path } - } - var wg sync.WaitGroup - n := len(files) - parsed := make([]*ast.File, n) - errors := make([]error, n) - for i, file := range files { - if !buildutil.IsAbsPath(ctxt, file) { - file = buildutil.JoinPath(ctxt, dir, file) - } - wg.Add(1) - go func(i int, file string) { - ioLimit <- true // wait - defer func() { - wg.Done() - <-ioLimit // signal - }() - var rd io.ReadCloser - var err error - if ctxt.OpenFile != nil { - rd, err = ctxt.OpenFile(file) - } else { - rd, err = os.Open(file) - } - if err != nil { - errors[i] = err // open failed - return - } - - // ParseFile may return both an AST and an error. - parsed[i], errors[i] = parser.ParseFile(fset, displayPath(file), rd, mode) - rd.Close() - }(i, file) - } - wg.Wait() - - // Eliminate nils, preserving order. - var o int - for _, f := range parsed { - if f != nil { - parsed[o] = f - o++ - } - } - parsed = parsed[:o] - - o = 0 - for _, err := range errors { - if err != nil { - errors[o] = err - o++ - } - } - errors = errors[:o] - - return parsed, errors -} - -// scanImports returns the set of all import paths from all -// import specs in the specified files. -func scanImports(files []*ast.File) map[string]bool { - imports := make(map[string]bool) - for _, f := range files { - for _, decl := range f.Decls { - if decl, ok := decl.(*ast.GenDecl); ok && decl.Tok == token.IMPORT { - for _, spec := range decl.Specs { - spec := spec.(*ast.ImportSpec) - - // NB: do not assume the program is well-formed! - path, err := strconv.Unquote(spec.Path.Value) - if err != nil { - continue // quietly ignore the error - } - if path == "C" { - continue // skip pseudopackage - } - imports[path] = true - } - } - } - } - return imports -} - -// ---------- Internal helpers ---------- - -// TODO(adonovan): make this a method: func (*token.File) Contains(token.Pos) -func tokenFileContainsPos(f *token.File, pos token.Pos) bool { - p := int(pos) - base := f.Base() - return base <= p && p < base+f.Size() -} diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 6e91391ce2b..2696cfb49e4 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -89,6 +89,10 @@ type golistState struct { rootDirsError error rootDirs map[string]string + goVersionOnce sync.Once + goVersionError error + goVersion string // third field of 'go version' + // vendorDirs caches the (non)existence of vendor directories. vendorDirs map[string]bool } @@ -135,6 +139,12 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { response := newDeduper() + state := &golistState{ + cfg: cfg, + ctx: ctx, + vendorDirs: map[string]bool{}, + } + // Fill in response.Sizes asynchronously if necessary. var sizeserr error var sizeswg sync.WaitGroup @@ -142,19 +152,13 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { sizeswg.Add(1) go func() { var sizes types.Sizes - sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, cfg.BuildFlags, cfg.Env, cfg.gocmdRunner, cfg.Dir) + sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) // types.SizesFor always returns nil or a *types.StdSizes. response.dr.Sizes, _ = sizes.(*types.StdSizes) sizeswg.Done() }() } - state := &golistState{ - cfg: cfg, - ctx: ctx, - vendorDirs: map[string]bool{}, - } - // Determine files requested in contains patterns var containFiles []string restPatterns := make([]string, 0, len(patterns)) @@ -242,6 +246,21 @@ extractQueries: } } } + // Add root for any package that matches a pattern. This applies only to + // packages that are modified by overlays, since they are not added as + // roots automatically. + for _, pattern := range restPatterns { + match := matchPattern(pattern) + for _, pkgID := range modifiedPkgs { + pkg, ok := response.seenPackages[pkgID] + if !ok { + continue + } + if match(pkg.PkgPath) { + response.addRoot(pkg.ID) + } + } + } sizeswg.Wait() if sizeserr != nil { @@ -362,32 +381,34 @@ func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, // Fields must match go list; // see $GOROOT/src/cmd/go/internal/load/pkg.go. type jsonPackage struct { - ImportPath string - Dir string - Name string - Export string - GoFiles []string - CompiledGoFiles []string - CFiles []string - CgoFiles []string - CXXFiles []string - MFiles []string - HFiles []string - FFiles []string - SFiles []string - SwigFiles []string - SwigCXXFiles []string - SysoFiles []string - Imports []string - ImportMap map[string]string - Deps []string - Module *Module - TestGoFiles []string - TestImports []string - XTestGoFiles []string - XTestImports []string - ForTest string // q in a "p [q.test]" package, else "" - DepOnly bool + ImportPath string + Dir string + Name string + Export string + GoFiles []string + CompiledGoFiles []string + IgnoredGoFiles []string + IgnoredOtherFiles []string + CFiles []string + CgoFiles []string + CXXFiles []string + MFiles []string + HFiles []string + FFiles []string + SFiles []string + SwigFiles []string + SwigCXXFiles []string + SysoFiles []string + Imports []string + ImportMap map[string]string + Deps []string + Module *Module + TestGoFiles []string + TestImports []string + XTestGoFiles []string + XTestImports []string + ForTest string // q in a "p [q.test]" package, else "" + DepOnly bool Error *jsonPackageError } @@ -539,6 +560,7 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), OtherFiles: absJoin(p.Dir, otherFiles(p)...), + IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles), forTest: p.ForTest, Module: p.Module, } @@ -635,6 +657,39 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse pkg.CompiledGoFiles = pkg.GoFiles } + // Temporary work-around for golang/go#39986. Parse filenames out of + // error messages. This happens if there are unrecoverable syntax + // errors in the source, so we can't match on a specific error message. + if err := p.Error; err != nil && state.shouldAddFilenameFromError(p) { + addFilenameFromPos := func(pos string) bool { + split := strings.Split(pos, ":") + if len(split) < 1 { + return false + } + filename := strings.TrimSpace(split[0]) + if filename == "" { + return false + } + if !filepath.IsAbs(filename) { + filename = filepath.Join(state.cfg.Dir, filename) + } + info, _ := os.Stat(filename) + if info == nil { + return false + } + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename) + pkg.GoFiles = append(pkg.GoFiles, filename) + return true + } + found := addFilenameFromPos(err.Pos) + // In some cases, go list only reports the error position in the + // error text, not the error position. One such case is when the + // file's package name is a keyword (see golang.org/issue/39763). + if !found { + addFilenameFromPos(err.Err) + } + } + if p.Error != nil { msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363. // Address golang.org/issue/35964 by appending import stack to error message. @@ -664,7 +719,60 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse return &response, nil } -// getPkgPath finds the package path of a directory if it's relative to a root directory. +func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { + if len(p.GoFiles) > 0 || len(p.CompiledGoFiles) > 0 { + return false + } + + goV, err := state.getGoVersion() + if err != nil { + return false + } + + // On Go 1.14 and earlier, only add filenames from errors if the import stack is empty. + // The import stack behaves differently for these versions than newer Go versions. + if strings.HasPrefix(goV, "go1.13") || strings.HasPrefix(goV, "go1.14") { + return len(p.Error.ImportStack) == 0 + } + + // On Go 1.15 and later, only parse filenames out of error if there's no import stack, + // or the current package is at the top of the import stack. This is not guaranteed + // to work perfectly, but should avoid some cases where files in errors don't belong to this + // package. + return len(p.Error.ImportStack) == 0 || p.Error.ImportStack[len(p.Error.ImportStack)-1] == p.ImportPath +} + +func (state *golistState) getGoVersion() (string, error) { + state.goVersionOnce.Do(func() { + var b *bytes.Buffer + // Invoke go version. Don't use invokeGo because it will supply build flags, and + // go version doesn't expect build flags. + inv := gocommand.Invocation{ + Verb: "version", + Env: state.cfg.Env, + Logf: state.cfg.Logf, + } + gocmdRunner := state.cfg.gocmdRunner + if gocmdRunner == nil { + gocmdRunner = &gocommand.Runner{} + } + b, _, _, state.goVersionError = gocmdRunner.RunRaw(state.cfg.Context, inv) + if state.goVersionError != nil { + return + } + + sp := strings.Split(b.String(), " ") + if len(sp) < 3 { + state.goVersionError = fmt.Errorf("go version output: expected 'go version ', got '%s'", b.String()) + return + } + state.goVersion = sp[2] + }) + return state.goVersion, state.goVersionError +} + +// getPkgPath finds the package path of a directory if it's relative to a root +// directory. func (state *golistState) getPkgPath(dir string) (string, bool, error) { absDir, err := filepath.Abs(dir) if err != nil { @@ -731,18 +839,26 @@ func golistargs(cfg *Config, words []string) []string { return fullargs } -// invokeGo returns the stdout of a go command invocation. -func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) { +// cfgInvocation returns an Invocation that reflects cfg's settings. +func (state *golistState) cfgInvocation() gocommand.Invocation { cfg := state.cfg - - inv := gocommand.Invocation{ - Verb: verb, - Args: args, + return gocommand.Invocation{ BuildFlags: cfg.BuildFlags, + ModFile: cfg.modFile, + ModFlag: cfg.modFlag, Env: cfg.Env, Logf: cfg.Logf, WorkingDir: cfg.Dir, } +} + +// invokeGo returns the stdout of a go command invocation. +func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) { + cfg := state.cfg + + inv := state.cfgInvocation() + inv.Verb = verb + inv.Args = args gocmdRunner := cfg.gocmdRunner if gocmdRunner == nil { gocmdRunner = &gocommand.Runner{} diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go index b82c90d7c66..de2c1dc5793 100644 --- a/vendor/golang.org/x/tools/go/packages/golist_overlay.go +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -1,3 +1,7 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package packages import ( @@ -8,9 +12,12 @@ import ( "log" "os" "path/filepath" + "regexp" "sort" "strconv" "strings" + + "golang.org/x/tools/internal/gocommand" ) // processGolistOverlay provides rudimentary support for adding @@ -89,9 +96,19 @@ func (state *golistState) processGolistOverlay(response *responseDeduper) (modif // because the file is generated in another directory. testVariantOf = p continue nextPackage + } else if !isTestFile && hasTestFiles(p) { + // We're examining a test variant, but the overlaid file is + // a non-test file. Because the overlay implementation + // (currently) only adds a file to one package, skip this + // package, so that we can add the file to the production + // variant of the package. (https://golang.org/issue/36857 + // tracks handling overlays on both the production and test + // variant of a package). + continue nextPackage } - // We must have already seen the package of which this is a test variant. if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath { + // We have already seen the production version of the + // for which p is a test variant. if hasTestFiles(p) { testVariantOf = pkg } @@ -102,8 +119,11 @@ func (state *golistState) processGolistOverlay(response *responseDeduper) (modif } } } - // The overlay could have included an entirely new package. - if pkg == nil { + // The overlay could have included an entirely new package or an + // ad-hoc package. An ad-hoc package is one that we have manually + // constructed from inadequate `go list` results for a file= query. + // It will have the ID command-line-arguments. + if pkg == nil || pkg.ID == "command-line-arguments" { // Try to find the module or gopath dir the file is contained in. // Then for modules, add the module opath to the beginning. pkgPath, ok, err := state.getPkgPath(dir) @@ -113,42 +133,55 @@ func (state *golistState) processGolistOverlay(response *responseDeduper) (modif if !ok { break } + var forTest string // only set for x tests isXTest := strings.HasSuffix(pkgName, "_test") if isXTest { + forTest = pkgPath pkgPath += "_test" } id := pkgPath - if isTestFile && !isXTest { - id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath) - } - // Try to reclaim a package with the same ID, if it exists in the response. - for _, p := range response.dr.Packages { - if reclaimPackage(p, id, opath, contents) { - pkg = p - break + if isTestFile { + if isXTest { + id = fmt.Sprintf("%s [%s.test]", pkgPath, forTest) + } else { + id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath) } } - // Otherwise, create a new package. - if pkg == nil { - pkg = &Package{ - PkgPath: pkgPath, - ID: id, - Name: pkgName, - Imports: make(map[string]*Package), + if pkg != nil { + // TODO(rstambler): We should change the package's path and ID + // here. The only issue is that this messes with the roots. + } else { + // Try to reclaim a package with the same ID, if it exists in the response. + for _, p := range response.dr.Packages { + if reclaimPackage(p, id, opath, contents) { + pkg = p + break + } } - response.addPackage(pkg) - havePkgs[pkg.PkgPath] = id - // Add the production package's sources for a test variant. - if isTestFile && !isXTest && testVariantOf != nil { - pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) - pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) - // Add the package under test and its imports to the test variant. - pkg.forTest = testVariantOf.PkgPath - for k, v := range testVariantOf.Imports { - pkg.Imports[k] = &Package{ID: v.ID} + // Otherwise, create a new package. + if pkg == nil { + pkg = &Package{ + PkgPath: pkgPath, + ID: id, + Name: pkgName, + Imports: make(map[string]*Package), + } + response.addPackage(pkg) + havePkgs[pkg.PkgPath] = id + // Add the production package's sources for a test variant. + if isTestFile && !isXTest && testVariantOf != nil { + pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) + // Add the package under test and its imports to the test variant. + pkg.forTest = testVariantOf.PkgPath + for k, v := range testVariantOf.Imports { + pkg.Imports[k] = &Package{ID: v.ID} + } + } + if isXTest { + pkg.forTest = forTest } } - // TODO(rstambler): Handle forTest for x_tests. } } if !fileExists { @@ -224,7 +257,7 @@ func (state *golistState) processGolistOverlay(response *responseDeduper) (modif return modifiedPkgs, needPkgs, err } -// resolveImport finds the the ID of a package given its import path. +// resolveImport finds the ID of a package given its import path. // In particular, it will find the right vendored copy when in GOPATH mode. func (state *golistState) resolveImport(sourceDir, importPath string) (string, error) { env, err := state.getEnv() @@ -299,24 +332,25 @@ func (state *golistState) determineRootDirs() (map[string]string, error) { } func (state *golistState) determineRootDirsModules() (map[string]string, error) { - // This will only return the root directory for the main module. - // For now we only support overlays in main modules. + // List all of the modules--the first will be the directory for the main + // module. Any replaced modules will also need to be treated as roots. // Editing files in the module cache isn't a great idea, so we don't - // plan to ever support that, but editing files in replaced modules - // is something we may want to support. To do that, we'll want to - // do a go list -m to determine the replaced module's module path and - // directory, and then a go list -m {{with .Replace}}{{.Dir}}{{end}} - // from the main module to determine if that module is actually a replacement. - // See bcmills's comment here: https://github.com/golang/go/issues/37629#issuecomment-594179751 - // for more information. - out, err := state.invokeGo("list", "-m", "-json") + // plan to ever support that. + out, err := state.invokeGo("list", "-m", "-json", "all") if err != nil { - return nil, err + // 'go list all' will fail if we're outside of a module and + // GO111MODULE=on. Try falling back without 'all'. + var innerErr error + out, innerErr = state.invokeGo("list", "-m", "-json") + if innerErr != nil { + return nil, err + } } - m := map[string]string{} - type jsonMod struct{ Path, Dir string } + roots := map[string]string{} + modules := map[string]string{} + var i int for dec := json.NewDecoder(out); dec.More(); { - mod := new(jsonMod) + mod := new(gocommand.ModuleJSON) if err := dec.Decode(mod); err != nil { return nil, err } @@ -326,10 +360,15 @@ func (state *golistState) determineRootDirsModules() (map[string]string, error) if err != nil { return nil, err } - m[absDir] = mod.Path + modules[absDir] = mod.Path + // The first result is the main module. + if i == 0 || mod.Replace != nil && mod.Replace.Path != "" { + roots[absDir] = mod.Path + } } + i++ } - return m, nil + return roots, nil } func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) { @@ -455,3 +494,79 @@ func maybeFixPackageName(newName string, isTestFile bool, pkgsOfDir []*Package) p.Name = newName } } + +// This function is copy-pasted from +// https://github.com/golang/go/blob/9706f510a5e2754595d716bd64be8375997311fb/src/cmd/go/internal/search/search.go#L360. +// It should be deleted when we remove support for overlays from go/packages. +// +// NOTE: This does not handle any ./... or ./ style queries, as this function +// doesn't know the working directory. +// +// matchPattern(pattern)(name) reports whether +// name matches pattern. Pattern is a limited glob +// pattern in which '...' means 'any string' and there +// is no other special syntax. +// Unfortunately, there are two special cases. Quoting "go help packages": +// +// First, /... at the end of the pattern can match an empty string, +// so that net/... matches both net and packages in its subdirectories, like net/http. +// Second, any slash-separated pattern element containing a wildcard never +// participates in a match of the "vendor" element in the path of a vendored +// package, so that ./... does not match packages in subdirectories of +// ./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do. +// Note, however, that a directory named vendor that itself contains code +// is not a vendored package: cmd/vendor would be a command named vendor, +// and the pattern cmd/... matches it. +func matchPattern(pattern string) func(name string) bool { + // Convert pattern to regular expression. + // The strategy for the trailing /... is to nest it in an explicit ? expression. + // The strategy for the vendor exclusion is to change the unmatchable + // vendor strings to a disallowed code point (vendorChar) and to use + // "(anything but that codepoint)*" as the implementation of the ... wildcard. + // This is a bit complicated but the obvious alternative, + // namely a hand-written search like in most shell glob matchers, + // is too easy to make accidentally exponential. + // Using package regexp guarantees linear-time matching. + + const vendorChar = "\x00" + + if strings.Contains(pattern, vendorChar) { + return func(name string) bool { return false } + } + + re := regexp.QuoteMeta(pattern) + re = replaceVendor(re, vendorChar) + switch { + case strings.HasSuffix(re, `/`+vendorChar+`/\.\.\.`): + re = strings.TrimSuffix(re, `/`+vendorChar+`/\.\.\.`) + `(/vendor|/` + vendorChar + `/\.\.\.)` + case re == vendorChar+`/\.\.\.`: + re = `(/vendor|/` + vendorChar + `/\.\.\.)` + case strings.HasSuffix(re, `/\.\.\.`): + re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?` + } + re = strings.ReplaceAll(re, `\.\.\.`, `[^`+vendorChar+`]*`) + + reg := regexp.MustCompile(`^` + re + `$`) + + return func(name string) bool { + if strings.Contains(name, vendorChar) { + return false + } + return reg.MatchString(replaceVendor(name, vendorChar)) + } +} + +// replaceVendor returns the result of replacing +// non-trailing vendor path elements in x with repl. +func replaceVendor(x, repl string) string { + if !strings.Contains(x, "vendor") { + return x + } + elem := strings.Split(x, "/") + for i := 0; i < len(elem)-1; i++ { + if elem[i] == "vendor" { + elem[i] = repl + } + } + return strings.Join(elem, "/") +} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 04053f1e7d4..38475e8712a 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -144,6 +144,12 @@ type Config struct { // the build system's query tool. BuildFlags []string + // modFile will be used for -modfile in go command invocations. + modFile string + + // modFlag will be used for -modfile in go command invocations. + modFlag string + // Fset provides source position information for syntax trees and types. // If Fset is nil, Load will use a new fileset, but preserve Fset's value. Fset *token.FileSet @@ -289,6 +295,11 @@ type Package struct { // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on. OtherFiles []string + // IgnoredFiles lists source files that are not part of the package + // using the current build configuration but that might be part of + // the package using other build configurations. + IgnoredFiles []string + // ExportFile is the absolute path to a file containing type // information for the package as provided by the build system. ExportFile string @@ -361,6 +372,12 @@ func init() { packagesinternal.SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) { config.(*Config).gocmdRunner = runner } + packagesinternal.SetModFile = func(config interface{}, value string) { + config.(*Config).modFile = value + } + packagesinternal.SetModFlag = func(config interface{}, value string) { + config.(*Config).modFlag = value + } packagesinternal.TypecheckCgo = int(typecheckCgo) } @@ -404,6 +421,7 @@ type flatPackage struct { GoFiles []string `json:",omitempty"` CompiledGoFiles []string `json:",omitempty"` OtherFiles []string `json:",omitempty"` + IgnoredFiles []string `json:",omitempty"` ExportFile string `json:",omitempty"` Imports map[string]string `json:",omitempty"` } @@ -426,6 +444,7 @@ func (p *Package) MarshalJSON() ([]byte, error) { GoFiles: p.GoFiles, CompiledGoFiles: p.CompiledGoFiles, OtherFiles: p.OtherFiles, + IgnoredFiles: p.IgnoredFiles, ExportFile: p.ExportFile, } if len(p.Imports) > 0 { @@ -712,7 +731,8 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { result[i] = lpkg.Package } for i := range ld.pkgs { - // Clear all unrequested fields, for extra de-Hyrum-ization. + // Clear all unrequested fields, + // to catch programs that use more than they request. if ld.requestedMode&NeedName == 0 { ld.pkgs[i].Name = "" ld.pkgs[i].PkgPath = "" @@ -720,6 +740,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { if ld.requestedMode&NeedFiles == 0 { ld.pkgs[i].GoFiles = nil ld.pkgs[i].OtherFiles = nil + ld.pkgs[i].IgnoredFiles = nil } if ld.requestedMode&NeedCompiledGoFiles == 0 { ld.pkgs[i].CompiledGoFiles = nil diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go deleted file mode 100644 index cffd7acbee7..00000000000 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ /dev/null @@ -1,524 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package objectpath defines a naming scheme for types.Objects -// (that is, named entities in Go programs) relative to their enclosing -// package. -// -// Type-checker objects are canonical, so they are usually identified by -// their address in memory (a pointer), but a pointer has meaning only -// within one address space. By contrast, objectpath names allow the -// identity of an object to be sent from one program to another, -// establishing a correspondence between types.Object variables that are -// distinct but logically equivalent. -// -// A single object may have multiple paths. In this example, -// type A struct{ X int } -// type B A -// the field X has two paths due to its membership of both A and B. -// The For(obj) function always returns one of these paths, arbitrarily -// but consistently. -package objectpath - -import ( - "fmt" - "strconv" - "strings" - - "go/types" -) - -// A Path is an opaque name that identifies a types.Object -// relative to its package. Conceptually, the name consists of a -// sequence of destructuring operations applied to the package scope -// to obtain the original object. -// The name does not include the package itself. -type Path string - -// Encoding -// -// An object path is a textual and (with training) human-readable encoding -// of a sequence of destructuring operators, starting from a types.Package. -// The sequences represent a path through the package/object/type graph. -// We classify these operators by their type: -// -// PO package->object Package.Scope.Lookup -// OT object->type Object.Type -// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU] -// TO type->object Type.{At,Field,Method,Obj} [AFMO] -// -// All valid paths start with a package and end at an object -// and thus may be defined by the regular language: -// -// objectpath = PO (OT TT* TO)* -// -// The concrete encoding follows directly: -// - The only PO operator is Package.Scope.Lookup, which requires an identifier. -// - The only OT operator is Object.Type, -// which we encode as '.' because dot cannot appear in an identifier. -// - The TT operators are encoded as [EKPRU]. -// - The OT operators are encoded as [AFMO]; -// three of these (At,Field,Method) require an integer operand, -// which is encoded as a string of decimal digits. -// These indices are stable across different representations -// of the same package, even source and export data. -// -// In the example below, -// -// package p -// -// type T interface { -// f() (a string, b struct{ X int }) -// } -// -// field X has the path "T.UM0.RA1.F0", -// representing the following sequence of operations: -// -// p.Lookup("T") T -// .Type().Underlying().Method(0). f -// .Type().Results().At(1) b -// .Type().Field(0) X -// -// The encoding is not maximally compact---every R or P is -// followed by an A, for example---but this simplifies the -// encoder and decoder. -// -const ( - // object->type operators - opType = '.' // .Type() (Object) - - // type->type operators - opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) - opKey = 'K' // .Key() (Map) - opParams = 'P' // .Params() (Signature) - opResults = 'R' // .Results() (Signature) - opUnderlying = 'U' // .Underlying() (Named) - - // type->object operators - opAt = 'A' // .At(i) (Tuple) - opField = 'F' // .Field(i) (Struct) - opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) - opObj = 'O' // .Obj() (Named) -) - -// The For function returns the path to an object relative to its package, -// or an error if the object is not accessible from the package's Scope. -// -// The For function guarantees to return a path only for the following objects: -// - package-level types -// - exported package-level non-types -// - methods -// - parameter and result variables -// - struct fields -// These objects are sufficient to define the API of their package. -// The objects described by a package's export data are drawn from this set. -// -// For does not return a path for predeclared names, imported package -// names, local names, and unexported package-level names (except -// types). -// -// Example: given this definition, -// -// package p -// -// type T interface { -// f() (a string, b struct{ X int }) -// } -// -// For(X) would return a path that denotes the following sequence of operations: -// -// p.Scope().Lookup("T") (TypeName T) -// .Type().Underlying().Method(0). (method Func f) -// .Type().Results().At(1) (field Var b) -// .Type().Field(0) (field Var X) -// -// where p is the package (*types.Package) to which X belongs. -func For(obj types.Object) (Path, error) { - pkg := obj.Pkg() - - // This table lists the cases of interest. - // - // Object Action - // ------ ------ - // nil reject - // builtin reject - // pkgname reject - // label reject - // var - // package-level accept - // func param/result accept - // local reject - // struct field accept - // const - // package-level accept - // local reject - // func - // package-level accept - // init functions reject - // concrete method accept - // interface method accept - // type - // package-level accept - // local reject - // - // The only accessible package-level objects are members of pkg itself. - // - // The cases are handled in four steps: - // - // 1. reject nil and builtin - // 2. accept package-level objects - // 3. reject obviously invalid objects - // 4. search the API for the path to the param/result/field/method. - - // 1. reference to nil or builtin? - if pkg == nil { - return "", fmt.Errorf("predeclared %s has no path", obj) - } - scope := pkg.Scope() - - // 2. package-level object? - if scope.Lookup(obj.Name()) == obj { - // Only exported objects (and non-exported types) have a path. - // Non-exported types may be referenced by other objects. - if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() { - return "", fmt.Errorf("no path for non-exported %v", obj) - } - return Path(obj.Name()), nil - } - - // 3. Not a package-level object. - // Reject obviously non-viable cases. - switch obj := obj.(type) { - case *types.Const, // Only package-level constants have a path. - *types.TypeName, // Only package-level types have a path. - *types.Label, // Labels are function-local. - *types.PkgName: // PkgNames are file-local. - return "", fmt.Errorf("no path for %v", obj) - - case *types.Var: - // Could be: - // - a field (obj.IsField()) - // - a func parameter or result - // - a local var. - // Sadly there is no way to distinguish - // a param/result from a local - // so we must proceed to the find. - - case *types.Func: - // A func, if not package-level, must be a method. - if recv := obj.Type().(*types.Signature).Recv(); recv == nil { - return "", fmt.Errorf("func is not a method: %v", obj) - } - // TODO(adonovan): opt: if the method is concrete, - // do a specialized version of the rest of this function so - // that it's O(1) not O(|scope|). Basically 'find' is needed - // only for struct fields and interface methods. - - default: - panic(obj) - } - - // 4. Search the API for the path to the var (field/param/result) or method. - - // First inspect package-level named types. - // In the presence of path aliases, these give - // the best paths because non-types may - // refer to types, but not the reverse. - empty := make([]byte, 0, 48) // initial space - names := scope.Names() - for _, name := range names { - o := scope.Lookup(name) - tname, ok := o.(*types.TypeName) - if !ok { - continue // handle non-types in second pass - } - - path := append(empty, name...) - path = append(path, opType) - - T := o.Type() - - if tname.IsAlias() { - // type alias - if r := find(obj, T, path); r != nil { - return Path(r), nil - } - } else { - // defined (named) type - if r := find(obj, T.Underlying(), append(path, opUnderlying)); r != nil { - return Path(r), nil - } - } - } - - // Then inspect everything else: - // non-types, and declared methods of defined types. - for _, name := range names { - o := scope.Lookup(name) - path := append(empty, name...) - if _, ok := o.(*types.TypeName); !ok { - if o.Exported() { - // exported non-type (const, var, func) - if r := find(obj, o.Type(), append(path, opType)); r != nil { - return Path(r), nil - } - } - continue - } - - // Inspect declared methods of defined types. - if T, ok := o.Type().(*types.Named); ok { - path = append(path, opType) - for i := 0; i < T.NumMethods(); i++ { - m := T.Method(i) - path2 := appendOpArg(path, opMethod, i) - if m == obj { - return Path(path2), nil // found declared method - } - if r := find(obj, m.Type(), append(path2, opType)); r != nil { - return Path(r), nil - } - } - } - } - - return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path()) -} - -func appendOpArg(path []byte, op byte, arg int) []byte { - path = append(path, op) - path = strconv.AppendInt(path, int64(arg), 10) - return path -} - -// find finds obj within type T, returning the path to it, or nil if not found. -func find(obj types.Object, T types.Type, path []byte) []byte { - switch T := T.(type) { - case *types.Basic, *types.Named: - // Named types belonging to pkg were handled already, - // so T must belong to another package. No path. - return nil - case *types.Pointer: - return find(obj, T.Elem(), append(path, opElem)) - case *types.Slice: - return find(obj, T.Elem(), append(path, opElem)) - case *types.Array: - return find(obj, T.Elem(), append(path, opElem)) - case *types.Chan: - return find(obj, T.Elem(), append(path, opElem)) - case *types.Map: - if r := find(obj, T.Key(), append(path, opKey)); r != nil { - return r - } - return find(obj, T.Elem(), append(path, opElem)) - case *types.Signature: - if r := find(obj, T.Params(), append(path, opParams)); r != nil { - return r - } - return find(obj, T.Results(), append(path, opResults)) - case *types.Struct: - for i := 0; i < T.NumFields(); i++ { - f := T.Field(i) - path2 := appendOpArg(path, opField, i) - if f == obj { - return path2 // found field var - } - if r := find(obj, f.Type(), append(path2, opType)); r != nil { - return r - } - } - return nil - case *types.Tuple: - for i := 0; i < T.Len(); i++ { - v := T.At(i) - path2 := appendOpArg(path, opAt, i) - if v == obj { - return path2 // found param/result var - } - if r := find(obj, v.Type(), append(path2, opType)); r != nil { - return r - } - } - return nil - case *types.Interface: - for i := 0; i < T.NumMethods(); i++ { - m := T.Method(i) - path2 := appendOpArg(path, opMethod, i) - if m == obj { - return path2 // found interface method - } - if r := find(obj, m.Type(), append(path2, opType)); r != nil { - return r - } - } - return nil - } - panic(T) -} - -// Object returns the object denoted by path p within the package pkg. -func Object(pkg *types.Package, p Path) (types.Object, error) { - if p == "" { - return nil, fmt.Errorf("empty path") - } - - pathstr := string(p) - var pkgobj, suffix string - if dot := strings.IndexByte(pathstr, opType); dot < 0 { - pkgobj = pathstr - } else { - pkgobj = pathstr[:dot] - suffix = pathstr[dot:] // suffix starts with "." - } - - obj := pkg.Scope().Lookup(pkgobj) - if obj == nil { - return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj) - } - - // abstraction of *types.{Pointer,Slice,Array,Chan,Map} - type hasElem interface { - Elem() types.Type - } - // abstraction of *types.{Interface,Named} - type hasMethods interface { - Method(int) *types.Func - NumMethods() int - } - - // The loop state is the pair (t, obj), - // exactly one of which is non-nil, initially obj. - // All suffixes start with '.' (the only object->type operation), - // followed by optional type->type operations, - // then a type->object operation. - // The cycle then repeats. - var t types.Type - for suffix != "" { - code := suffix[0] - suffix = suffix[1:] - - // Codes [AFM] have an integer operand. - var index int - switch code { - case opAt, opField, opMethod: - rest := strings.TrimLeft(suffix, "0123456789") - numerals := suffix[:len(suffix)-len(rest)] - suffix = rest - i, err := strconv.Atoi(numerals) - if err != nil { - return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code) - } - index = int(i) - case opObj: - // no operand - default: - // The suffix must end with a type->object operation. - if suffix == "" { - return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code) - } - } - - if code == opType { - if t != nil { - return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType) - } - t = obj.Type() - obj = nil - continue - } - - if t == nil { - return nil, fmt.Errorf("invalid path: code %q in object context", code) - } - - // Inv: t != nil, obj == nil - - switch code { - case opElem: - hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t) - } - t = hasElem.Elem() - - case opKey: - mapType, ok := t.(*types.Map) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t) - } - t = mapType.Key() - - case opParams: - sig, ok := t.(*types.Signature) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) - } - t = sig.Params() - - case opResults: - sig, ok := t.(*types.Signature) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) - } - t = sig.Results() - - case opUnderlying: - named, ok := t.(*types.Named) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %s, want named)", code, t, t) - } - t = named.Underlying() - - case opAt: - tuple, ok := t.(*types.Tuple) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %s, want tuple)", code, t, t) - } - if n := tuple.Len(); index >= n { - return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) - } - obj = tuple.At(index) - t = nil - - case opField: - structType, ok := t.(*types.Struct) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t) - } - if n := structType.NumFields(); index >= n { - return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n) - } - obj = structType.Field(index) - t = nil - - case opMethod: - hasMethods, ok := t.(hasMethods) // Interface or Named - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %s, want interface or named)", code, t, t) - } - if n := hasMethods.NumMethods(); index >= n { - return nil, fmt.Errorf("method index %d out of range [0-%d)", index, n) - } - obj = hasMethods.Method(index) - t = nil - - case opObj: - named, ok := t.(*types.Named) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %s, want named)", code, t, t) - } - obj = named.Obj() - t = nil - - default: - return nil, fmt.Errorf("invalid path: unknown code %q", code) - } - } - - if obj.Pkg() != pkg { - return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) - } - - return obj, nil // success -} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go deleted file mode 100644 index 38f596daf9e..00000000000 --- a/vendor/golang.org/x/tools/go/types/typeutil/callee.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeutil - -import ( - "go/ast" - "go/types" - - "golang.org/x/tools/go/ast/astutil" -) - -// Callee returns the named target of a function call, if any: -// a function, method, builtin, or variable. -func Callee(info *types.Info, call *ast.CallExpr) types.Object { - var obj types.Object - switch fun := astutil.Unparen(call.Fun).(type) { - case *ast.Ident: - obj = info.Uses[fun] // type, var, builtin, or declared func - case *ast.SelectorExpr: - if sel, ok := info.Selections[fun]; ok { - obj = sel.Obj() // method or field - } else { - obj = info.Uses[fun.Sel] // qualified identifier? - } - } - if _, ok := obj.(*types.TypeName); ok { - return nil // T(x) is a conversion, not a call - } - return obj -} - -// StaticCallee returns the target (function or method) of a static -// function call, if any. It returns nil for calls to builtins. -func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func { - if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) { - return f - } - return nil -} - -func interfaceMethod(f *types.Func) bool { - recv := f.Type().(*types.Signature).Recv() - return recv != nil && types.IsInterface(recv.Type()) -} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/imports.go b/vendor/golang.org/x/tools/go/types/typeutil/imports.go deleted file mode 100644 index 9c441dba9c0..00000000000 --- a/vendor/golang.org/x/tools/go/types/typeutil/imports.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeutil - -import "go/types" - -// Dependencies returns all dependencies of the specified packages. -// -// Dependent packages appear in topological order: if package P imports -// package Q, Q appears earlier than P in the result. -// The algorithm follows import statements in the order they -// appear in the source code, so the result is a total order. -// -func Dependencies(pkgs ...*types.Package) []*types.Package { - var result []*types.Package - seen := make(map[*types.Package]bool) - var visit func(pkgs []*types.Package) - visit = func(pkgs []*types.Package) { - for _, p := range pkgs { - if !seen[p] { - seen[p] = true - visit(p.Imports()) - result = append(result, p) - } - } - } - visit(pkgs) - return result -} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go deleted file mode 100644 index c7f75450064..00000000000 --- a/vendor/golang.org/x/tools/go/types/typeutil/map.go +++ /dev/null @@ -1,313 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package typeutil defines various utilities for types, such as Map, -// a mapping from types.Type to interface{} values. -package typeutil // import "golang.org/x/tools/go/types/typeutil" - -import ( - "bytes" - "fmt" - "go/types" - "reflect" -) - -// Map is a hash-table-based mapping from types (types.Type) to -// arbitrary interface{} values. The concrete types that implement -// the Type interface are pointers. Since they are not canonicalized, -// == cannot be used to check for equivalence, and thus we cannot -// simply use a Go map. -// -// Just as with map[K]V, a nil *Map is a valid empty map. -// -// Not thread-safe. -// -type Map struct { - hasher Hasher // shared by many Maps - table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused - length int // number of map entries -} - -// entry is an entry (key/value association) in a hash bucket. -type entry struct { - key types.Type - value interface{} -} - -// SetHasher sets the hasher used by Map. -// -// All Hashers are functionally equivalent but contain internal state -// used to cache the results of hashing previously seen types. -// -// A single Hasher created by MakeHasher() may be shared among many -// Maps. This is recommended if the instances have many keys in -// common, as it will amortize the cost of hash computation. -// -// A Hasher may grow without bound as new types are seen. Even when a -// type is deleted from the map, the Hasher never shrinks, since other -// types in the map may reference the deleted type indirectly. -// -// Hashers are not thread-safe, and read-only operations such as -// Map.Lookup require updates to the hasher, so a full Mutex lock (not a -// read-lock) is require around all Map operations if a shared -// hasher is accessed from multiple threads. -// -// If SetHasher is not called, the Map will create a private hasher at -// the first call to Insert. -// -func (m *Map) SetHasher(hasher Hasher) { - m.hasher = hasher -} - -// Delete removes the entry with the given key, if any. -// It returns true if the entry was found. -// -func (m *Map) Delete(key types.Type) bool { - if m != nil && m.table != nil { - hash := m.hasher.Hash(key) - bucket := m.table[hash] - for i, e := range bucket { - if e.key != nil && types.Identical(key, e.key) { - // We can't compact the bucket as it - // would disturb iterators. - bucket[i] = entry{} - m.length-- - return true - } - } - } - return false -} - -// At returns the map entry for the given key. -// The result is nil if the entry is not present. -// -func (m *Map) At(key types.Type) interface{} { - if m != nil && m.table != nil { - for _, e := range m.table[m.hasher.Hash(key)] { - if e.key != nil && types.Identical(key, e.key) { - return e.value - } - } - } - return nil -} - -// Set sets the map entry for key to val, -// and returns the previous entry, if any. -func (m *Map) Set(key types.Type, value interface{}) (prev interface{}) { - if m.table != nil { - hash := m.hasher.Hash(key) - bucket := m.table[hash] - var hole *entry - for i, e := range bucket { - if e.key == nil { - hole = &bucket[i] - } else if types.Identical(key, e.key) { - prev = e.value - bucket[i].value = value - return - } - } - - if hole != nil { - *hole = entry{key, value} // overwrite deleted entry - } else { - m.table[hash] = append(bucket, entry{key, value}) - } - } else { - if m.hasher.memo == nil { - m.hasher = MakeHasher() - } - hash := m.hasher.Hash(key) - m.table = map[uint32][]entry{hash: {entry{key, value}}} - } - - m.length++ - return -} - -// Len returns the number of map entries. -func (m *Map) Len() int { - if m != nil { - return m.length - } - return 0 -} - -// Iterate calls function f on each entry in the map in unspecified order. -// -// If f should mutate the map, Iterate provides the same guarantees as -// Go maps: if f deletes a map entry that Iterate has not yet reached, -// f will not be invoked for it, but if f inserts a map entry that -// Iterate has not yet reached, whether or not f will be invoked for -// it is unspecified. -// -func (m *Map) Iterate(f func(key types.Type, value interface{})) { - if m != nil { - for _, bucket := range m.table { - for _, e := range bucket { - if e.key != nil { - f(e.key, e.value) - } - } - } - } -} - -// Keys returns a new slice containing the set of map keys. -// The order is unspecified. -func (m *Map) Keys() []types.Type { - keys := make([]types.Type, 0, m.Len()) - m.Iterate(func(key types.Type, _ interface{}) { - keys = append(keys, key) - }) - return keys -} - -func (m *Map) toString(values bool) string { - if m == nil { - return "{}" - } - var buf bytes.Buffer - fmt.Fprint(&buf, "{") - sep := "" - m.Iterate(func(key types.Type, value interface{}) { - fmt.Fprint(&buf, sep) - sep = ", " - fmt.Fprint(&buf, key) - if values { - fmt.Fprintf(&buf, ": %q", value) - } - }) - fmt.Fprint(&buf, "}") - return buf.String() -} - -// String returns a string representation of the map's entries. -// Values are printed using fmt.Sprintf("%v", v). -// Order is unspecified. -// -func (m *Map) String() string { - return m.toString(true) -} - -// KeysString returns a string representation of the map's key set. -// Order is unspecified. -// -func (m *Map) KeysString() string { - return m.toString(false) -} - -//////////////////////////////////////////////////////////////////////// -// Hasher - -// A Hasher maps each type to its hash value. -// For efficiency, a hasher uses memoization; thus its memory -// footprint grows monotonically over time. -// Hashers are not thread-safe. -// Hashers have reference semantics. -// Call MakeHasher to create a Hasher. -type Hasher struct { - memo map[types.Type]uint32 -} - -// MakeHasher returns a new Hasher instance. -func MakeHasher() Hasher { - return Hasher{make(map[types.Type]uint32)} -} - -// Hash computes a hash value for the given type t such that -// Identical(t, t') => Hash(t) == Hash(t'). -func (h Hasher) Hash(t types.Type) uint32 { - hash, ok := h.memo[t] - if !ok { - hash = h.hashFor(t) - h.memo[t] = hash - } - return hash -} - -// hashString computes the Fowler–Noll–Vo hash of s. -func hashString(s string) uint32 { - var h uint32 - for i := 0; i < len(s); i++ { - h ^= uint32(s[i]) - h *= 16777619 - } - return h -} - -// hashFor computes the hash of t. -func (h Hasher) hashFor(t types.Type) uint32 { - // See Identical for rationale. - switch t := t.(type) { - case *types.Basic: - return uint32(t.Kind()) - - case *types.Array: - return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem()) - - case *types.Slice: - return 9049 + 2*h.Hash(t.Elem()) - - case *types.Struct: - var hash uint32 = 9059 - for i, n := 0, t.NumFields(); i < n; i++ { - f := t.Field(i) - if f.Anonymous() { - hash += 8861 - } - hash += hashString(t.Tag(i)) - hash += hashString(f.Name()) // (ignore f.Pkg) - hash += h.Hash(f.Type()) - } - return hash - - case *types.Pointer: - return 9067 + 2*h.Hash(t.Elem()) - - case *types.Signature: - var hash uint32 = 9091 - if t.Variadic() { - hash *= 8863 - } - return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) - - case *types.Interface: - var hash uint32 = 9103 - for i, n := 0, t.NumMethods(); i < n; i++ { - // See go/types.identicalMethods for rationale. - // Method order is not significant. - // Ignore m.Pkg(). - m := t.Method(i) - hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type()) - } - return hash - - case *types.Map: - return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem()) - - case *types.Chan: - return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem()) - - case *types.Named: - // Not safe with a copying GC; objects may move. - return uint32(reflect.ValueOf(t.Obj()).Pointer()) - - case *types.Tuple: - return h.hashTuple(t) - } - panic(t) -} - -func (h Hasher) hashTuple(tuple *types.Tuple) uint32 { - // See go/types.identicalTypes for rationale. - n := tuple.Len() - var hash uint32 = 9137 + 2*uint32(n) - for i := 0; i < n; i++ { - hash += 3 * h.Hash(tuple.At(i).Type()) - } - return hash -} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go deleted file mode 100644 index 32084610f49..00000000000 --- a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file implements a cache of method sets. - -package typeutil - -import ( - "go/types" - "sync" -) - -// A MethodSetCache records the method set of each type T for which -// MethodSet(T) is called so that repeat queries are fast. -// The zero value is a ready-to-use cache instance. -type MethodSetCache struct { - mu sync.Mutex - named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N - others map[types.Type]*types.MethodSet // all other types -} - -// MethodSet returns the method set of type T. It is thread-safe. -// -// If cache is nil, this function is equivalent to types.NewMethodSet(T). -// Utility functions can thus expose an optional *MethodSetCache -// parameter to clients that care about performance. -// -func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet { - if cache == nil { - return types.NewMethodSet(T) - } - cache.mu.Lock() - defer cache.mu.Unlock() - - switch T := T.(type) { - case *types.Named: - return cache.lookupNamed(T).value - - case *types.Pointer: - if N, ok := T.Elem().(*types.Named); ok { - return cache.lookupNamed(N).pointer - } - } - - // all other types - // (The map uses pointer equivalence, not type identity.) - mset := cache.others[T] - if mset == nil { - mset = types.NewMethodSet(T) - if cache.others == nil { - cache.others = make(map[types.Type]*types.MethodSet) - } - cache.others[T] = mset - } - return mset -} - -func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } { - if cache.named == nil { - cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet }) - } - // Avoid recomputing mset(*T) for each distinct Pointer - // instance whose underlying type is a named type. - msets, ok := cache.named[named] - if !ok { - msets.value = types.NewMethodSet(named) - msets.pointer = types.NewMethodSet(types.NewPointer(named)) - cache.named[named] = msets - } - return msets -} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vendor/golang.org/x/tools/go/types/typeutil/ui.go deleted file mode 100644 index 9849c24cef3..00000000000 --- a/vendor/golang.org/x/tools/go/types/typeutil/ui.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeutil - -// This file defines utilities for user interfaces that display types. - -import "go/types" - -// IntuitiveMethodSet returns the intuitive method set of a type T, -// which is the set of methods you can call on an addressable value of -// that type. -// -// The result always contains MethodSet(T), and is exactly MethodSet(T) -// for interface types and for pointer-to-concrete types. -// For all other concrete types T, the result additionally -// contains each method belonging to *T if there is no identically -// named method on T itself. -// -// This corresponds to user intuition about method sets; -// this function is intended only for user interfaces. -// -// The order of the result is as for types.MethodSet(T). -// -func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection { - isPointerToConcrete := func(T types.Type) bool { - ptr, ok := T.(*types.Pointer) - return ok && !types.IsInterface(ptr.Elem()) - } - - var result []*types.Selection - mset := msets.MethodSet(T) - if types.IsInterface(T) || isPointerToConcrete(T) { - for i, n := 0, mset.Len(); i < n; i++ { - result = append(result, mset.At(i)) - } - } else { - // T is some other concrete type. - // Report methods of T and *T, preferring those of T. - pmset := msets.MethodSet(types.NewPointer(T)) - for i, n := 0, pmset.Len(); i < n; i++ { - meth := pmset.At(i) - if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil { - meth = m - } - result = append(result, meth) - } - - } - return result -} diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go deleted file mode 100644 index 26586810c7f..00000000000 --- a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package analysisinternal exposes internal-only fields from go/analysis. -package analysisinternal - -import ( - "bytes" - "fmt" - "go/ast" - "go/token" - "go/types" - "strings" - - "golang.org/x/tools/go/ast/astutil" -) - -func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos { - // Get the end position for the type error. - offset, end := fset.PositionFor(start, false).Offset, start - if offset >= len(src) { - return end - } - if width := bytes.IndexAny(src[offset:], " \n,():;[]+-*"); width > 0 { - end = start + token.Pos(width) - } - return end -} - -func ZeroValue(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { - under := typ - if n, ok := typ.(*types.Named); ok { - under = n.Underlying() - } - switch u := under.(type) { - case *types.Basic: - switch { - case u.Info()&types.IsNumeric != 0: - return &ast.BasicLit{Kind: token.INT, Value: "0"} - case u.Info()&types.IsBoolean != 0: - return &ast.Ident{Name: "false"} - case u.Info()&types.IsString != 0: - return &ast.BasicLit{Kind: token.STRING, Value: `""`} - default: - panic("unknown basic type") - } - case *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Signature, *types.Slice: - return ast.NewIdent("nil") - case *types.Struct: - texpr := typeExpr(fset, f, pkg, typ) // typ because we want the name here. - if texpr == nil { - return nil - } - return &ast.CompositeLit{ - Type: texpr, - } - case *types.Array: - texpr := typeExpr(fset, f, pkg, u.Elem()) - if texpr == nil { - return nil - } - return &ast.CompositeLit{ - Type: &ast.ArrayType{ - Elt: texpr, - Len: &ast.BasicLit{Kind: token.INT, Value: fmt.Sprintf("%v", u.Len())}, - }, - } - } - return nil -} - -func typeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { - switch t := typ.(type) { - case *types.Basic: - switch t.Kind() { - case types.UnsafePointer: - return &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")} - default: - return ast.NewIdent(t.Name()) - } - case *types.Named: - if t.Obj().Pkg() == pkg { - return ast.NewIdent(t.Obj().Name()) - } - pkgName := t.Obj().Pkg().Name() - // If the file already imports the package under another name, use that. - for _, group := range astutil.Imports(fset, f) { - for _, cand := range group { - if strings.Trim(cand.Path.Value, `"`) == t.Obj().Pkg().Path() { - if cand.Name != nil && cand.Name.Name != "" { - pkgName = cand.Name.Name - } - } - } - } - if pkgName == "." { - return ast.NewIdent(t.Obj().Name()) - } - return &ast.SelectorExpr{ - X: ast.NewIdent(pkgName), - Sel: ast.NewIdent(t.Obj().Name()), - } - default: - return nil // TODO: anonymous structs, but who does that - } -} - -var GetTypeErrors = func(p interface{}) []types.Error { return nil } -var SetTypeErrors = func(p interface{}, errors []types.Error) {} - -type TypeErrorPass string - -const ( - NoNewVars TypeErrorPass = "nonewvars" - NoResultValues TypeErrorPass = "noresultvalues" - UndeclaredName TypeErrorPass = "undeclaredname" -) diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index ff4f9d5aa0f..b5c061b0125 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -23,9 +23,24 @@ import ( // An Runner will run go command invocations and serialize // them if it sees a concurrency error. type Runner struct { - // LoadMu guards packages.Load calls and associated state. - loadMu sync.Mutex - serializeLoads int + // once guards the runner initialization. + once sync.Once + + // inFlight tracks available workers. + inFlight chan struct{} + + // serialized guards the ability to run a go command serially, + // to avoid deadlocks when claiming workers. + serialized chan struct{} +} + +const maxInFlight = 10 + +func (runner *Runner) initialize() { + runner.once.Do(func() { + runner.inFlight = make(chan struct{}, maxInFlight) + runner.serialized = make(chan struct{}, 1) + }) } // 1.13: go: updates to go.mod needed, but contents have changed @@ -35,7 +50,7 @@ var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed // Run is a convenience wrapper around RunRaw. // It returns only stdout and a "friendly" error. func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) { - stdout, _, friendly, _ := runner.runRaw(ctx, inv) + stdout, _, friendly, _ := runner.RunRaw(ctx, inv) return stdout, friendly } @@ -49,55 +64,65 @@ func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stde // RunRaw runs the invocation, serializing requests only if they fight over // go.mod changes. func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { - return runner.runRaw(ctx, inv) -} + // Make sure the runner is always initialized. + runner.initialize() -func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) { - runner.loadMu.Lock() - runner.serializeLoads++ + // First, try to run the go command concurrently. + stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv) - defer func() { - runner.serializeLoads-- - runner.loadMu.Unlock() - }() + // If we encounter a load concurrency error, we need to retry serially. + if friendlyErr == nil || !modConcurrencyError.MatchString(friendlyErr.Error()) { + return stdout, stderr, friendlyErr, err + } + event.Error(ctx, "Load concurrency error, will retry serially", err) - return inv.runWithFriendlyError(ctx, stdout, stderr) + // Run serially by calling runPiped. + stdout.Reset() + stderr.Reset() + friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) + return stdout, stderr, friendlyErr, err } -func (runner *Runner) runRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { - // We want to run invocations concurrently as much as possible. However, - // if go.mod updates are needed, only one can make them and the others will - // fail. We need to retry in those cases, but we don't want to thrash so - // badly we never recover. To avoid that, once we've seen one concurrency - // error, start serializing everything until the backlog has cleared out. - runner.loadMu.Lock() - var locked bool // If true, we hold the mutex and have incremented. - if runner.serializeLoads == 0 { - runner.loadMu.Unlock() - } else { - locked = true - runner.serializeLoads++ +func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + // Wait for 1 worker to become available. + select { + case <-ctx.Done(): + return nil, nil, nil, ctx.Err() + case runner.inFlight <- struct{}{}: + defer func() { <-runner.inFlight }() } - defer func() { - if locked { - locked = false - runner.serializeLoads-- - runner.loadMu.Unlock() - } - }() - for { - stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{} - friendlyErr, err := inv.runWithFriendlyError(ctx, stdout, stderr) - if friendlyErr == nil || !modConcurrencyError.MatchString(friendlyErr.Error()) { - return stdout, stderr, friendlyErr, err - } - event.Error(ctx, "Load concurrency error, will retry serially", err) - if !locked { - runner.loadMu.Lock() - runner.serializeLoads++ + stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{} + friendlyErr, err := inv.runWithFriendlyError(ctx, stdout, stderr) + return stdout, stderr, friendlyErr, err +} + +func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) { + // Make sure the runner is always initialized. + runner.initialize() + + // Acquire the serialization lock. This avoids deadlocks between two + // runPiped commands. + select { + case <-ctx.Done(): + return nil, ctx.Err() + case runner.serialized <- struct{}{}: + defer func() { <-runner.serialized }() + } + + // Wait for all in-progress go commands to return before proceeding, + // to avoid load concurrency errors. + for i := 0; i < maxInFlight; i++ { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case runner.inFlight <- struct{}{}: + // Make sure we always "return" any workers we took. + defer func() { <-runner.inFlight }() } } + + return inv.runWithFriendlyError(ctx, stdout, stderr) } // An Invocation represents a call to the go command. @@ -105,6 +130,8 @@ type Invocation struct { Verb string Args []string BuildFlags []string + ModFlag string + ModFile string Env []string WorkingDir string Logf func(format string, args ...interface{}) @@ -133,17 +160,35 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { } goArgs := []string{i.Verb} + + appendModFile := func() { + if i.ModFile != "" { + goArgs = append(goArgs, "-modfile="+i.ModFile) + } + } + appendModFlag := func() { + if i.ModFlag != "" { + goArgs = append(goArgs, "-mod="+i.ModFlag) + } + } + switch i.Verb { + case "env", "version": + goArgs = append(goArgs, i.Args...) case "mod": - // mod needs the sub-verb before build flags. + // mod needs the sub-verb before flags. goArgs = append(goArgs, i.Args[0]) - goArgs = append(goArgs, i.BuildFlags...) + appendModFile() goArgs = append(goArgs, i.Args[1:]...) - case "env": - // env doesn't take build flags. + case "get": + goArgs = append(goArgs, i.BuildFlags...) + appendModFile() goArgs = append(goArgs, i.Args...) - default: + + default: // notably list and build. goArgs = append(goArgs, i.BuildFlags...) + appendModFile() + appendModFlag() goArgs = append(goArgs, i.Args...) } cmd := exec.Command("go", goArgs...) @@ -160,7 +205,6 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir) cmd.Dir = i.WorkingDir } - defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) return runCmdContext(ctx, cmd) diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go index 390cb9db795..925ff53560a 100644 --- a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -10,7 +10,6 @@ import ( "bufio" "bytes" "fmt" - "go/build" "io/ioutil" "log" "os" @@ -47,16 +46,6 @@ type Root struct { Type RootType } -// SrcDirsRoots returns the roots from build.Default.SrcDirs(). Not modules-compatible. -func SrcDirsRoots(ctx *build.Context) []Root { - var roots []Root - roots = append(roots, Root{filepath.Join(ctx.GOROOT, "src"), RootGOROOT}) - for _, p := range filepath.SplitList(ctx.GOPATH) { - roots = append(roots, Root{filepath.Join(p, "src"), RootGOPATH}) - } - return roots -} - // Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. // For each package found, add will be called (concurrently) with the absolute // paths of the containing source directory and the package directory. diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index 36292d79a7c..d859617b774 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -7,6 +7,7 @@ package imports import ( "bytes" "context" + "encoding/json" "fmt" "go/ast" "go/build" @@ -31,25 +32,25 @@ import ( // importToGroup is a list of functions which map from an import path to // a group number. -var importToGroup = []func(env *ProcessEnv, importPath string) (num int, ok bool){ - func(env *ProcessEnv, importPath string) (num int, ok bool) { - if env.LocalPrefix == "" { +var importToGroup = []func(localPrefix, importPath string) (num int, ok bool){ + func(localPrefix, importPath string) (num int, ok bool) { + if localPrefix == "" { return } - for _, p := range strings.Split(env.LocalPrefix, ",") { + for _, p := range strings.Split(localPrefix, ",") { if strings.HasPrefix(importPath, p) || strings.TrimSuffix(p, "/") == importPath { return 3, true } } return }, - func(_ *ProcessEnv, importPath string) (num int, ok bool) { + func(_, importPath string) (num int, ok bool) { if strings.HasPrefix(importPath, "appengine") { return 2, true } return }, - func(_ *ProcessEnv, importPath string) (num int, ok bool) { + func(_, importPath string) (num int, ok bool) { firstComponent := strings.Split(importPath, "/")[0] if strings.Contains(firstComponent, ".") { return 1, true @@ -58,9 +59,9 @@ var importToGroup = []func(env *ProcessEnv, importPath string) (num int, ok bool }, } -func importGroup(env *ProcessEnv, importPath string) int { +func importGroup(localPrefix, importPath string) int { for _, fn := range importToGroup { - if n, ok := fn(env, importPath); ok { + if n, ok := fn(localPrefix, importPath); ok { return n } } @@ -82,7 +83,7 @@ type ImportFix struct { IdentName string // FixType is the type of fix this is (AddImport, DeleteImport, SetImportName). FixType ImportFixType - Relevance int // see pkg + Relevance float64 // see pkg } // An ImportInfo represents a single import statement. @@ -277,7 +278,12 @@ func (p *pass) loadPackageNames(imports []*ImportInfo) error { unknown = append(unknown, imp.ImportPath) } - names, err := p.env.GetResolver().loadPackageNames(unknown, p.srcDir) + resolver, err := p.env.GetResolver() + if err != nil { + return err + } + + names, err := resolver.loadPackageNames(unknown, p.srcDir) if err != nil { return err } @@ -567,7 +573,9 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv return fixes, nil } - addStdlibCandidates(p, p.missingRefs) + if err := addStdlibCandidates(p, p.missingRefs); err != nil { + return nil, err + } p.assumeSiblingImportsValid() if fixes, done := p.fix(); done { return fixes, nil @@ -584,9 +592,9 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv return fixes, nil } -// Highest relevance, used for the standard library. Chosen arbitrarily to -// match pre-existing gopls code. -const MaxRelevance = 7 +// MaxRelevance is the highest relevance, used for the standard library. +// Chosen arbitrarily to match pre-existing gopls code. +const MaxRelevance = 7.0 // getCandidatePkgs works with the passed callback to find all acceptable packages. // It deduplicates by import path, and uses a cached stdlib rather than reading @@ -595,22 +603,28 @@ func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filena notSelf := func(p *pkg) bool { return p.packageName != filePkg || p.dir != filepath.Dir(filename) } + goenv, err := env.goEnv() + if err != nil { + return err + } + + var mu sync.Mutex // to guard asynchronous access to dupCheck + dupCheck := map[string]struct{}{} + // Start off with the standard library. for importPath, exports := range stdlib { p := &pkg{ - dir: filepath.Join(env.GOROOT, "src", importPath), + dir: filepath.Join(goenv["GOROOT"], "src", importPath), importPathShort: importPath, packageName: path.Base(importPath), relevance: MaxRelevance, } - if notSelf(p) && wrappedCallback.packageNameLoaded(p) { + dupCheck[importPath] = struct{}{} + if notSelf(p) && wrappedCallback.dirFound(p) && wrappedCallback.packageNameLoaded(p) { wrappedCallback.exportsLoaded(p, exports) } } - var mu sync.Mutex - dupCheck := map[string]struct{}{} - scanFilter := &scanCallback{ rootFound: func(root gopathwalk.Root) bool { // Exclude goroot results -- getting them is relatively expensive, not cached, @@ -639,15 +653,23 @@ func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filena wrappedCallback.exportsLoaded(pkg, exports) }, } - return env.GetResolver().scan(ctx, scanFilter) + resolver, err := env.GetResolver() + if err != nil { + return err + } + return resolver.scan(ctx, scanFilter) } -func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) map[string]int { - result := make(map[string]int) +func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) (map[string]float64, error) { + result := make(map[string]float64) + resolver, err := env.GetResolver() + if err != nil { + return nil, err + } for _, path := range paths { - result[path] = env.GetResolver().scoreImportPath(ctx, path) + result[path] = resolver.scoreImportPath(ctx, path) } - return result + return result, nil } func PrimeCache(ctx context.Context, env *ProcessEnv) error { @@ -673,8 +695,9 @@ func candidateImportName(pkg *pkg) string { return "" } -// getAllCandidates gets all of the candidates to be imported, regardless of if they are needed. -func getAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error { +// GetAllCandidates calls wrapped for each package whose name starts with +// searchPrefix, and can be imported from filename with the package name filePkg. +func GetAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error { callback := &scanCallback{ rootFound: func(gopathwalk.Root) bool { return true @@ -707,13 +730,43 @@ func getAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix return getCandidatePkgs(ctx, callback, filename, filePkg, env) } +// GetImportPaths calls wrapped for each package whose import path starts with +// searchPrefix, and can be imported from filename with the package name filePkg. +func GetImportPaths(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error { + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + if !canUse(filename, pkg.dir) { + return false + } + return strings.HasPrefix(pkg.importPathShort, searchPrefix) + }, + packageNameLoaded: func(pkg *pkg) bool { + wrapped(ImportFix{ + StmtInfo: ImportInfo{ + ImportPath: pkg.importPathShort, + Name: candidateImportName(pkg), + }, + IdentName: pkg.packageName, + FixType: AddImport, + Relevance: pkg.relevance, + }) + return false + }, + } + return getCandidatePkgs(ctx, callback, filename, filePkg, env) +} + // A PackageExport is a package and its exports. type PackageExport struct { Fix *ImportFix Exports []string } -func getPackageExports(ctx context.Context, wrapped func(PackageExport), searchPkg, filename, filePkg string, env *ProcessEnv) error { +// GetPackageExports returns all known packages with name pkg and their exports. +func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchPkg, filename, filePkg string, env *ProcessEnv) error { callback := &scanCallback{ rootFound: func(gopathwalk.Root) bool { return true @@ -743,85 +796,154 @@ func getPackageExports(ctx context.Context, wrapped func(PackageExport), searchP return getCandidatePkgs(ctx, callback, filename, filePkg, env) } +var RequiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB"} + // ProcessEnv contains environment variables and settings that affect the use of // the go command, the go/build package, etc. type ProcessEnv struct { - LocalPrefix string - GocmdRunner *gocommand.Runner BuildFlags []string + ModFlag string + ModFile string + + // Env overrides the OS environment, and can be used to specify + // GOPROXY, GO111MODULE, etc. PATH cannot be set here, because + // exec.Command will not honor it. + // Specifying all of RequiredGoEnvVars avoids a call to `go env`. + Env map[string]string - // If non-empty, these will be used instead of the - // process-wide values. - GOPATH, GOROOT, GO111MODULE, GOPROXY, GOFLAGS, GOSUMDB string - WorkingDir string + WorkingDir string // If Logf is non-nil, debug logging is enabled through this function. Logf func(format string, args ...interface{}) + initialized bool + resolver Resolver } +func (e *ProcessEnv) goEnv() (map[string]string, error) { + if err := e.init(); err != nil { + return nil, err + } + return e.Env, nil +} + +func (e *ProcessEnv) matchFile(dir, name string) (bool, error) { + bctx, err := e.buildContext() + if err != nil { + return false, err + } + return bctx.MatchFile(dir, name) +} + // CopyConfig copies the env's configuration into a new env. func (e *ProcessEnv) CopyConfig() *ProcessEnv { - copy := *e - copy.resolver = nil - return © + copy := &ProcessEnv{ + GocmdRunner: e.GocmdRunner, + initialized: e.initialized, + BuildFlags: e.BuildFlags, + Logf: e.Logf, + WorkingDir: e.WorkingDir, + resolver: nil, + Env: map[string]string{}, + } + for k, v := range e.Env { + copy.Env[k] = v + } + return copy } -func (e *ProcessEnv) env() []string { - env := os.Environ() - add := func(k, v string) { - if v != "" { - env = append(env, k+"="+v) +func (e *ProcessEnv) init() error { + if e.initialized { + return nil + } + + foundAllRequired := true + for _, k := range RequiredGoEnvVars { + if _, ok := e.Env[k]; !ok { + foundAllRequired = false + break } } - add("GOPATH", e.GOPATH) - add("GOROOT", e.GOROOT) - add("GO111MODULE", e.GO111MODULE) - add("GOPROXY", e.GOPROXY) - add("GOFLAGS", e.GOFLAGS) - add("GOSUMDB", e.GOSUMDB) - if e.WorkingDir != "" { - add("PWD", e.WorkingDir) + if foundAllRequired { + e.initialized = true + return nil + } + + if e.Env == nil { + e.Env = map[string]string{} + } + + goEnv := map[string]string{} + stdout, err := e.invokeGo(context.TODO(), "env", append([]string{"-json"}, RequiredGoEnvVars...)...) + if err != nil { + return err + } + if err := json.Unmarshal(stdout.Bytes(), &goEnv); err != nil { + return err + } + for k, v := range goEnv { + e.Env[k] = v + } + e.initialized = true + return nil +} + +func (e *ProcessEnv) env() []string { + var env []string // the gocommand package will prepend os.Environ. + for k, v := range e.Env { + env = append(env, k+"="+v) } return env } -func (e *ProcessEnv) GetResolver() Resolver { +func (e *ProcessEnv) GetResolver() (Resolver, error) { if e.resolver != nil { - return e.resolver + return e.resolver, nil + } + if err := e.init(); err != nil { + return nil, err } - out, err := e.invokeGo(context.TODO(), "env", "GOMOD") - if err != nil || len(bytes.TrimSpace(out.Bytes())) == 0 { + if len(e.Env["GOMOD"]) == 0 { e.resolver = newGopathResolver(e) - return e.resolver + return e.resolver, nil } e.resolver = newModuleResolver(e) - return e.resolver + return e.resolver, nil } -func (e *ProcessEnv) buildContext() *build.Context { +func (e *ProcessEnv) buildContext() (*build.Context, error) { ctx := build.Default - ctx.GOROOT = e.GOROOT - ctx.GOPATH = e.GOPATH + goenv, err := e.goEnv() + if err != nil { + return nil, err + } + ctx.GOROOT = goenv["GOROOT"] + ctx.GOPATH = goenv["GOPATH"] // As of Go 1.14, build.Context has a Dir field // (see golang.org/issue/34860). // Populate it only if present. rc := reflect.ValueOf(&ctx).Elem() dir := rc.FieldByName("Dir") - if !dir.IsValid() { - // Working drafts of Go 1.14 named the field "WorkingDir" instead. - // TODO(bcmills): Remove this case after the Go 1.14 beta has been released. - dir = rc.FieldByName("WorkingDir") - } if dir.IsValid() && dir.Kind() == reflect.String { dir.SetString(e.WorkingDir) } - return &ctx + // Since Go 1.11, go/build.Context.Import may invoke 'go list' depending on + // the value in GO111MODULE in the process's environment. We always want to + // run in GOPATH mode when calling Import, so we need to prevent this from + // happening. In Go 1.16, GO111MODULE defaults to "on", so this problem comes + // up more frequently. + // + // HACK: setting any of the Context I/O hooks prevents Import from invoking + // 'go list', regardless of GO111MODULE. This is undocumented, but it's + // unlikely to change before GOPATH support is removed. + ctx.ReadDir = ioutil.ReadDir + + return &ctx, nil } func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string) (*bytes.Buffer, error) { @@ -836,10 +958,14 @@ func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string) return e.GocmdRunner.Run(ctx, inv) } -func addStdlibCandidates(pass *pass, refs references) { +func addStdlibCandidates(pass *pass, refs references) error { + goenv, err := pass.env.goEnv() + if err != nil { + return err + } add := func(pkg string) { // Prevent self-imports. - if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.env.GOROOT, "src", pkg) == pass.srcDir { + if path.Base(pkg) == pass.f.Name.Name && filepath.Join(goenv["GOROOT"], "src", pkg) == pass.srcDir { return } exports := copyExports(stdlib[pkg]) @@ -860,6 +986,7 @@ func addStdlibCandidates(pass *pass, refs references) { } } } + return nil } // A Resolver does the build-system-specific parts of goimports. @@ -872,7 +999,7 @@ type Resolver interface { // loadExports may be called concurrently. loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) // scoreImportPath returns the relevance for an import path. - scoreImportPath(ctx context.Context, path string) int + scoreImportPath(ctx context.Context, path string) float64 ClearForNewScan() } @@ -924,10 +1051,13 @@ func addExternalCandidates(pass *pass, refs references, filename string) error { return false // We'll do our own loading after we sort. }, } - err := pass.env.GetResolver().scan(context.Background(), callback) + resolver, err := pass.env.GetResolver() if err != nil { return err } + if err = resolver.scan(context.Background(), callback); err != nil { + return err + } // Search for imports matching potential package references. type result struct { @@ -1053,21 +1183,24 @@ func (r *gopathResolver) ClearForNewScan() { func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { names := map[string]string{} + bctx, err := r.env.buildContext() + if err != nil { + return nil, err + } for _, path := range importPaths { - names[path] = importPathToName(r.env, path, srcDir) + names[path] = importPathToName(bctx, path, srcDir) } return names, nil } // importPathToName finds out the actual package name, as declared in its .go files. -// If there's a problem, it returns "". -func importPathToName(env *ProcessEnv, importPath, srcDir string) (packageName string) { +func importPathToName(bctx *build.Context, importPath, srcDir string) string { // Fast path for standard library without going to disk. if _, ok := stdlib[importPath]; ok { return path.Base(importPath) // stdlib packages always match their paths. } - buildPkg, err := env.buildContext().Import(importPath, srcDir, build.FindOnly) + buildPkg, err := bctx.Import(importPath, srcDir, build.FindOnly) if err != nil { return "" } @@ -1131,10 +1264,10 @@ func packageDirToName(dir string) (packageName string, err error) { } type pkg struct { - dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http") - importPathShort string // vendorless import path ("net/http", "a/b") - packageName string // package name loaded from source if requested - relevance int // a weakly-defined score of how relevant a package is. 0 is most relevant. + dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http") + importPathShort string // vendorless import path ("net/http", "a/b") + packageName string // package name loaded from source if requested + relevance float64 // a weakly-defined score of how relevant a package is. 0 is most relevant. } type pkgDistance struct { @@ -1228,8 +1361,18 @@ func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error } stop := r.cache.ScanAndListen(ctx, processDir) defer stop() + + goenv, err := r.env.goEnv() + if err != nil { + return err + } + var roots []gopathwalk.Root + roots = append(roots, gopathwalk.Root{filepath.Join(goenv["GOROOT"], "src"), gopathwalk.RootGOROOT}) + for _, p := range filepath.SplitList(goenv["GOPATH"]) { + roots = append(roots, gopathwalk.Root{filepath.Join(p, "src"), gopathwalk.RootGOPATH}) + } // The callback is not necessarily safe to use in the goroutine below. Process roots eagerly. - roots := filterRoots(gopathwalk.SrcDirsRoots(r.env.buildContext()), callback.rootFound) + roots = filterRoots(roots, callback.rootFound) // We can't cancel walks, because we need them to finish to have a usable // cache. Instead, run them in a separate goroutine and detach. scanDone := make(chan struct{}) @@ -1250,7 +1393,7 @@ func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error return nil } -func (r *gopathResolver) scoreImportPath(ctx context.Context, path string) int { +func (r *gopathResolver) scoreImportPath(ctx context.Context, path string) float64 { if _, ok := stdlib[path]; ok { return MaxRelevance } @@ -1289,8 +1432,6 @@ func VendorlessPath(ipath string) string { } func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, includeTest bool) (string, []string, error) { - var exports []string - // Look for non-test, buildable .go files which could provide exports. all, err := ioutil.ReadDir(dir) if err != nil { @@ -1302,7 +1443,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl if !strings.HasSuffix(name, ".go") || (!includeTest && strings.HasSuffix(name, "_test.go")) { continue } - match, err := env.buildContext().MatchFile(dir, fi.Name()) + match, err := env.matchFile(dir, fi.Name()) if err != nil || !match { continue } @@ -1314,6 +1455,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl } var pkgName string + var exports []string fset := token.NewFileSet() for _, fi := range files { select { @@ -1368,6 +1510,10 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa pass.env.Logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir) } } + resolver, err := pass.env.GetResolver() + if err != nil { + return nil, err + } // Collect exports for packages with matching names. rescv := make([]chan *pkg, len(candidates)) @@ -1406,7 +1552,7 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa } // If we're an x_test, load the package under test's test variant. includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir - _, exports, err := pass.env.GetResolver().loadExports(ctx, c.pkg, includeTest) + _, exports, err := resolver.loadExports(ctx, c.pkg, includeTest) if err != nil { if pass.env.Logf != nil { pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err) diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index f43d6b22e54..2815edc33d7 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -11,29 +11,29 @@ package imports import ( "bufio" "bytes" - "context" "fmt" "go/ast" - "go/build" "go/format" "go/parser" "go/printer" "go/token" "io" - "io/ioutil" - "os" "regexp" "strconv" "strings" "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/internal/gocommand" ) // Options is golang.org/x/tools/imports.Options with extra internal-only options. type Options struct { Env *ProcessEnv // The environment to use. Note: this contains the cached module and filesystem state. + // LocalPrefix is a comma-separated string of import path prefixes, which, if + // set, instructs Process to sort the import paths with the given prefixes + // into another group after 3rd-party packages. + LocalPrefix string + Fragment bool // Accept fragment of a source file (no package statement) AllErrors bool // Report all errors (not just the first 10 on different lines) @@ -44,13 +44,8 @@ type Options struct { FormatOnly bool // Disable the insertion and deletion of imports } -// Process implements golang.org/x/tools/imports.Process with explicit context in env. +// Process implements golang.org/x/tools/imports.Process with explicit context in opt.Env. func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) { - src, opt, err = initialize(filename, src, opt) - if err != nil { - return nil, err - } - fileSet := token.NewFileSet() file, adjust, err := parse(fileSet, filename, src, opt) if err != nil { @@ -66,16 +61,12 @@ func Process(filename string, src []byte, opt *Options) (formatted []byte, err e } // FixImports returns a list of fixes to the imports that, when applied, -// will leave the imports in the same state as Process. +// will leave the imports in the same state as Process. src and opt must +// be specified. // // Note that filename's directory influences which imports can be chosen, // so it is important that filename be accurate. func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) { - src, opt, err = initialize(filename, src, opt) - if err != nil { - return nil, err - } - fileSet := token.NewFileSet() file, _, err := parse(fileSet, filename, src, opt) if err != nil { @@ -86,13 +77,9 @@ func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, } // ApplyFixes applies all of the fixes to the file and formats it. extraMode -// is added in when parsing the file. +// is added in when parsing the file. src and opts must be specified, but no +// env is needed. func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, extraMode parser.Mode) (formatted []byte, err error) { - src, opt, err = initialize(filename, src, opt) - if err != nil { - return nil, err - } - // Don't use parse() -- we don't care about fragments or statement lists // here, and we need to work with unparseable files. fileSet := token.NewFileSet() @@ -116,63 +103,9 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e return formatFile(fileSet, file, src, nil, opt) } -// GetAllCandidates gets all of the packages starting with prefix that can be -// imported by filename, sorted by import path. -func GetAllCandidates(ctx context.Context, callback func(ImportFix), searchPrefix, filename, filePkg string, opt *Options) error { - _, opt, err := initialize(filename, []byte{}, opt) - if err != nil { - return err - } - return getAllCandidates(ctx, callback, searchPrefix, filename, filePkg, opt.Env) -} - -// GetPackageExports returns all known packages with name pkg and their exports. -func GetPackageExports(ctx context.Context, callback func(PackageExport), searchPkg, filename, filePkg string, opt *Options) error { - _, opt, err := initialize(filename, []byte{}, opt) - if err != nil { - return err - } - return getPackageExports(ctx, callback, searchPkg, filename, filePkg, opt.Env) -} - -// initialize sets the values for opt and src. -// If they are provided, they are not changed. Otherwise opt is set to the -// default values and src is read from the file system. -func initialize(filename string, src []byte, opt *Options) ([]byte, *Options, error) { - // Use defaults if opt is nil. - if opt == nil { - opt = &Options{Comments: true, TabIndent: true, TabWidth: 8} - } - - // Set the env if the user has not provided it. - if opt.Env == nil { - opt.Env = &ProcessEnv{ - GOPATH: build.Default.GOPATH, - GOROOT: build.Default.GOROOT, - GOFLAGS: os.Getenv("GOFLAGS"), - GO111MODULE: os.Getenv("GO111MODULE"), - GOPROXY: os.Getenv("GOPROXY"), - GOSUMDB: os.Getenv("GOSUMDB"), - } - } - // Set the gocmdRunner if the user has not provided it. - if opt.Env.GocmdRunner == nil { - opt.Env.GocmdRunner = &gocommand.Runner{} - } - if src == nil { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, nil, err - } - src = b - } - - return src, opt, nil -} - func formatFile(fileSet *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) { - mergeImports(opt.Env, fileSet, file) - sortImports(opt.Env, fileSet, file) + mergeImports(fileSet, file) + sortImports(opt.LocalPrefix, fileSet, file) imps := astutil.Imports(fileSet, file) var spacesBefore []string // import paths we need spaces before for _, impSection := range imps { @@ -183,7 +116,7 @@ func formatFile(fileSet *token.FileSet, file *ast.File, src []byte, adjust func( lastGroup := -1 for _, importSpec := range impSection { importPath, _ := strconv.Unquote(importSpec.Path.Value) - groupNum := importGroup(opt.Env, importPath) + groupNum := importGroup(opt.LocalPrefix, importPath) if groupNum != lastGroup && lastGroup != -1 { spacesBefore = append(spacesBefore, importPath) } diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 4e816e8bcf1..8a83613c572 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -53,8 +53,14 @@ func (r *ModuleResolver) init() error { return nil } + goenv, err := r.env.goEnv() + if err != nil { + return err + } inv := gocommand.Invocation{ BuildFlags: r.env.BuildFlags, + ModFlag: r.env.ModFlag, + ModFile: r.env.ModFile, Env: r.env.env(), Logf: r.env.Logf, WorkingDir: r.env.WorkingDir, @@ -79,7 +85,11 @@ func (r *ModuleResolver) init() error { r.initAllMods() } - r.moduleCacheDir = filepath.Join(filepath.SplitList(r.env.GOPATH)[0], "/pkg/mod") + if gmc := r.env.Env["GOMODCACHE"]; gmc != "" { + r.moduleCacheDir = gmc + } else { + r.moduleCacheDir = filepath.Join(filepath.SplitList(goenv["GOPATH"])[0], "/pkg/mod") + } sort.Slice(r.modsByModPath, func(i, j int) bool { count := func(x int) int { @@ -95,7 +105,7 @@ func (r *ModuleResolver) init() error { }) r.roots = []gopathwalk.Root{ - {filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT}, + {filepath.Join(goenv["GOROOT"], "/src"), gopathwalk.RootGOROOT}, } if r.main != nil { r.roots = append(r.roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule}) @@ -236,7 +246,7 @@ func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON, // files in that directory. If not, it could be provided by an // outer module. See #29736. for _, fi := range pkgFiles { - if ok, _ := r.env.buildContext().MatchFile(pkgDir, fi.Name()); ok { + if ok, _ := r.env.matchFile(pkgDir, fi.Name()); ok { return m, pkgDir } } @@ -479,7 +489,7 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error return nil } -func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) int { +func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) float64 { if _, ok := stdlib[path]; ok { return MaxRelevance } @@ -487,17 +497,31 @@ func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) int { return modRelevance(mod) } -func modRelevance(mod *gocommand.ModuleJSON) int { +func modRelevance(mod *gocommand.ModuleJSON) float64 { + var relevance float64 switch { case mod == nil: // out of scope return MaxRelevance - 4 case mod.Indirect: - return MaxRelevance - 3 + relevance = MaxRelevance - 3 case !mod.Main: - return MaxRelevance - 2 + relevance = MaxRelevance - 2 default: - return MaxRelevance - 1 // main module ties with stdlib + relevance = MaxRelevance - 1 // main module ties with stdlib } + + _, versionString, ok := module.SplitPathVersion(mod.Path) + if ok { + index := strings.Index(versionString, "v") + if index == -1 { + return relevance + } + if versionNumber, err := strconv.ParseFloat(versionString[index+1:], 64); err == nil { + relevance += versionNumber / 1000 + } + } + + return relevance } // canonicalize gets the result of canonicalizing the packages using the results diff --git a/vendor/golang.org/x/tools/internal/imports/sortimports.go b/vendor/golang.org/x/tools/internal/imports/sortimports.go index 226279471d3..be8ffa25fec 100644 --- a/vendor/golang.org/x/tools/internal/imports/sortimports.go +++ b/vendor/golang.org/x/tools/internal/imports/sortimports.go @@ -15,7 +15,7 @@ import ( // sortImports sorts runs of consecutive import lines in import blocks in f. // It also removes duplicate imports when it is possible to do so without data loss. -func sortImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) { +func sortImports(localPrefix string, fset *token.FileSet, f *ast.File) { for i, d := range f.Decls { d, ok := d.(*ast.GenDecl) if !ok || d.Tok != token.IMPORT { @@ -40,11 +40,11 @@ func sortImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) { for j, s := range d.Specs { if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line { // j begins a new run. End this one. - specs = append(specs, sortSpecs(env, fset, f, d.Specs[i:j])...) + specs = append(specs, sortSpecs(localPrefix, fset, f, d.Specs[i:j])...) i = j } } - specs = append(specs, sortSpecs(env, fset, f, d.Specs[i:])...) + specs = append(specs, sortSpecs(localPrefix, fset, f, d.Specs[i:])...) d.Specs = specs // Deduping can leave a blank line before the rparen; clean that up. @@ -60,7 +60,7 @@ func sortImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) { // mergeImports merges all the import declarations into the first one. // Taken from golang.org/x/tools/ast/astutil. -func mergeImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) { +func mergeImports(fset *token.FileSet, f *ast.File) { if len(f.Decls) <= 1 { return } @@ -142,7 +142,7 @@ type posSpan struct { End token.Pos } -func sortSpecs(env *ProcessEnv, fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec { +func sortSpecs(localPrefix string, fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec { // Can't short-circuit here even if specs are already sorted, // since they might yet need deduplication. // A lone import, however, may be safely ignored. @@ -191,7 +191,7 @@ func sortSpecs(env *ProcessEnv, fset *token.FileSet, f *ast.File, specs []ast.Sp // Reassign the import paths to have the same position sequence. // Reassign each comment to abut the end of its spec. // Sort the comments by new position. - sort.Sort(byImportSpec{env, specs}) + sort.Sort(byImportSpec{localPrefix, specs}) // Dedup. Thanks to our sorting, we can just consider // adjacent pairs of imports. @@ -245,8 +245,8 @@ func sortSpecs(env *ProcessEnv, fset *token.FileSet, f *ast.File, specs []ast.Sp } type byImportSpec struct { - env *ProcessEnv - specs []ast.Spec // slice of *ast.ImportSpec + localPrefix string + specs []ast.Spec // slice of *ast.ImportSpec } func (x byImportSpec) Len() int { return len(x.specs) } @@ -255,8 +255,8 @@ func (x byImportSpec) Less(i, j int) bool { ipath := importPath(x.specs[i]) jpath := importPath(x.specs[j]) - igroup := importGroup(x.env, ipath) - jgroup := importGroup(x.env, jpath) + igroup := importGroup(x.localPrefix, ipath) + jgroup := importGroup(x.localPrefix, jpath) if igroup != jgroup { return igroup < jgroup } diff --git a/vendor/golang.org/x/tools/internal/imports/zstdlib.go b/vendor/golang.org/x/tools/internal/imports/zstdlib.go index 16252111ff2..7b573b9830b 100644 --- a/vendor/golang.org/x/tools/internal/imports/zstdlib.go +++ b/vendor/golang.org/x/tools/internal/imports/zstdlib.go @@ -56,6 +56,7 @@ var stdlib = map[string][]string{ }, "bufio": []string{ "ErrAdvanceTooFar", + "ErrBadReadCount", "ErrBufferFull", "ErrFinalToken", "ErrInvalidUnreadByte", @@ -303,7 +304,9 @@ var stdlib = map[string][]string{ "PrivateKey", "PublicKey", "Sign", + "SignASN1", "Verify", + "VerifyASN1", }, "crypto/ed25519": []string{ "GenerateKey", @@ -322,11 +325,13 @@ var stdlib = map[string][]string{ "CurveParams", "GenerateKey", "Marshal", + "MarshalCompressed", "P224", "P256", "P384", "P521", "Unmarshal", + "UnmarshalCompressed", }, "crypto/hmac": []string{ "Equal", @@ -432,6 +437,7 @@ var stdlib = map[string][]string{ "CurveP521", "Dial", "DialWithDialer", + "Dialer", "ECDSAWithP256AndSHA256", "ECDSAWithP384AndSHA384", "ECDSAWithP521AndSHA512", @@ -507,6 +513,7 @@ var stdlib = map[string][]string{ "ConstraintViolationError", "CreateCertificate", "CreateCertificateRequest", + "CreateRevocationList", "DSA", "DSAWithSHA1", "DSAWithSHA256", @@ -581,6 +588,7 @@ var stdlib = map[string][]string{ "PublicKeyAlgorithm", "PureEd25519", "RSA", + "RevocationList", "SHA1WithRSA", "SHA256WithRSA", "SHA256WithRSAPSS", @@ -694,6 +702,7 @@ var stdlib = map[string][]string{ "String", "Tx", "TxOptions", + "Validator", "Value", "ValueConverter", "Valuer", @@ -2349,6 +2358,27 @@ var stdlib = map[string][]string{ "IMAGE_DIRECTORY_ENTRY_RESOURCE", "IMAGE_DIRECTORY_ENTRY_SECURITY", "IMAGE_DIRECTORY_ENTRY_TLS", + "IMAGE_DLLCHARACTERISTICS_APPCONTAINER", + "IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE", + "IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY", + "IMAGE_DLLCHARACTERISTICS_GUARD_CF", + "IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA", + "IMAGE_DLLCHARACTERISTICS_NO_BIND", + "IMAGE_DLLCHARACTERISTICS_NO_ISOLATION", + "IMAGE_DLLCHARACTERISTICS_NO_SEH", + "IMAGE_DLLCHARACTERISTICS_NX_COMPAT", + "IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE", + "IMAGE_DLLCHARACTERISTICS_WDM_DRIVER", + "IMAGE_FILE_32BIT_MACHINE", + "IMAGE_FILE_AGGRESIVE_WS_TRIM", + "IMAGE_FILE_BYTES_REVERSED_HI", + "IMAGE_FILE_BYTES_REVERSED_LO", + "IMAGE_FILE_DEBUG_STRIPPED", + "IMAGE_FILE_DLL", + "IMAGE_FILE_EXECUTABLE_IMAGE", + "IMAGE_FILE_LARGE_ADDRESS_AWARE", + "IMAGE_FILE_LINE_NUMS_STRIPPED", + "IMAGE_FILE_LOCAL_SYMS_STRIPPED", "IMAGE_FILE_MACHINE_AM33", "IMAGE_FILE_MACHINE_AMD64", "IMAGE_FILE_MACHINE_ARM", @@ -2371,6 +2401,25 @@ var stdlib = map[string][]string{ "IMAGE_FILE_MACHINE_THUMB", "IMAGE_FILE_MACHINE_UNKNOWN", "IMAGE_FILE_MACHINE_WCEMIPSV2", + "IMAGE_FILE_NET_RUN_FROM_SWAP", + "IMAGE_FILE_RELOCS_STRIPPED", + "IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP", + "IMAGE_FILE_SYSTEM", + "IMAGE_FILE_UP_SYSTEM_ONLY", + "IMAGE_SUBSYSTEM_EFI_APPLICATION", + "IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", + "IMAGE_SUBSYSTEM_EFI_ROM", + "IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER", + "IMAGE_SUBSYSTEM_NATIVE", + "IMAGE_SUBSYSTEM_NATIVE_WINDOWS", + "IMAGE_SUBSYSTEM_OS2_CUI", + "IMAGE_SUBSYSTEM_POSIX_CUI", + "IMAGE_SUBSYSTEM_UNKNOWN", + "IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION", + "IMAGE_SUBSYSTEM_WINDOWS_CE_GUI", + "IMAGE_SUBSYSTEM_WINDOWS_CUI", + "IMAGE_SUBSYSTEM_WINDOWS_GUI", + "IMAGE_SUBSYSTEM_XBOX", "ImportDirectory", "NewFile", "Open", @@ -4188,6 +4237,7 @@ var stdlib = map[string][]string{ "DevNull", "Environ", "ErrClosed", + "ErrDeadlineExceeded", "ErrExist", "ErrInvalid", "ErrNoDeadline", @@ -4646,6 +4696,7 @@ var stdlib = map[string][]string{ "ErrRange", "ErrSyntax", "FormatBool", + "FormatComplex", "FormatFloat", "FormatInt", "FormatUint", @@ -4655,6 +4706,7 @@ var stdlib = map[string][]string{ "Itoa", "NumError", "ParseBool", + "ParseComplex", "ParseFloat", "ParseInt", "ParseUint", diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go index 2c4527f2436..1335a5eed8a 100644 --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -12,3 +12,6 @@ var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil } var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {} var TypecheckCgo int + +var SetModFlag = func(config interface{}, value string) {} +var SetModFile = func(config interface{}, value string) {} diff --git a/vendor/gomodules.xyz/jsonpatch/v2/LICENSE b/vendor/gomodules.xyz/jsonpatch/v2/LICENSE new file mode 100644 index 00000000000..8f71f43fee3 --- /dev/null +++ b/vendor/gomodules.xyz/jsonpatch/v2/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/gomodules.xyz/jsonpatch/v2/go.mod b/vendor/gomodules.xyz/jsonpatch/v2/go.mod new file mode 100644 index 00000000000..b5eaf830ebc --- /dev/null +++ b/vendor/gomodules.xyz/jsonpatch/v2/go.mod @@ -0,0 +1,9 @@ +module gomodules.xyz/jsonpatch/v2 + +go 1.12 + +require ( + github.com/evanphx/json-patch v4.5.0+incompatible + github.com/pkg/errors v0.8.1 // indirect + github.com/stretchr/testify v1.3.0 +) diff --git a/vendor/gomodules.xyz/jsonpatch/v2/go.sum b/vendor/gomodules.xyz/jsonpatch/v2/go.sum new file mode 100644 index 00000000000..d8f9ffe1c94 --- /dev/null +++ b/vendor/gomodules.xyz/jsonpatch/v2/go.sum @@ -0,0 +1,11 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= +github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go new file mode 100644 index 00000000000..e7cb7d6da50 --- /dev/null +++ b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go @@ -0,0 +1,336 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strings" +) + +var errBadJSONDoc = fmt.Errorf("invalid JSON Document") + +type JsonPatchOperation = Operation + +type Operation struct { + Operation string `json:"op"` + Path string `json:"path"` + Value interface{} `json:"value,omitempty"` +} + +func (j *Operation) Json() string { + b, _ := json.Marshal(j) + return string(b) +} + +func (j *Operation) MarshalJSON() ([]byte, error) { + var b bytes.Buffer + b.WriteString("{") + b.WriteString(fmt.Sprintf(`"op":"%s"`, j.Operation)) + b.WriteString(fmt.Sprintf(`,"path":"%s"`, j.Path)) + // Consider omitting Value for non-nullable operations. + if j.Value != nil || j.Operation == "replace" || j.Operation == "add" { + v, err := json.Marshal(j.Value) + if err != nil { + return nil, err + } + b.WriteString(`,"value":`) + b.Write(v) + } + b.WriteString("}") + return b.Bytes(), nil +} + +type ByPath []Operation + +func (a ByPath) Len() int { return len(a) } +func (a ByPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a ByPath) Less(i, j int) bool { return a[i].Path < a[j].Path } + +func NewPatch(operation, path string, value interface{}) Operation { + return Operation{Operation: operation, Path: path, Value: value} +} + +// CreatePatch creates a patch as specified in http://jsonpatch.com/ +// +// 'a' is original, 'b' is the modified document. Both are to be given as json encoded content. +// The function will return an array of JsonPatchOperations +// +// An error will be returned if any of the two documents are invalid. +func CreatePatch(a, b []byte) ([]Operation, error) { + var aI interface{} + var bI interface{} + err := json.Unmarshal(a, &aI) + if err != nil { + return nil, errBadJSONDoc + } + err = json.Unmarshal(b, &bI) + if err != nil { + return nil, errBadJSONDoc + } + return handleValues(aI, bI, "", []Operation{}) +} + +// Returns true if the values matches (must be json types) +// The types of the values must match, otherwise it will always return false +// If two map[string]interface{} are given, all elements must match. +func matchesValue(av, bv interface{}) bool { + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + return false + } + switch at := av.(type) { + case string: + bt, ok := bv.(string) + if ok && bt == at { + return true + } + case float64: + bt, ok := bv.(float64) + if ok && bt == at { + return true + } + case bool: + bt, ok := bv.(bool) + if ok && bt == at { + return true + } + case map[string]interface{}: + bt, ok := bv.(map[string]interface{}) + if !ok { + return false + } + for key := range at { + if !matchesValue(at[key], bt[key]) { + return false + } + } + for key := range bt { + if !matchesValue(at[key], bt[key]) { + return false + } + } + return true + case []interface{}: + bt, ok := bv.([]interface{}) + if !ok { + return false + } + if len(bt) != len(at) { + return false + } + for key := range at { + if !matchesValue(at[key], bt[key]) { + return false + } + } + for key := range bt { + if !matchesValue(at[key], bt[key]) { + return false + } + } + return true + } + return false +} + +// From http://tools.ietf.org/html/rfc6901#section-4 : +// +// Evaluation of each reference token begins by decoding any escaped +// character sequence. This is performed by first transforming any +// occurrence of the sequence '~1' to '/', and then transforming any +// occurrence of the sequence '~0' to '~'. +// TODO decode support: +// var rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") + +var rfc6901Encoder = strings.NewReplacer("~", "~0", "/", "~1") + +func makePath(path string, newPart interface{}) string { + key := rfc6901Encoder.Replace(fmt.Sprintf("%v", newPart)) + if path == "" { + return "/" + key + } + if strings.HasSuffix(path, "/") { + return path + key + } + return path + "/" + key +} + +// diff returns the (recursive) difference between a and b as an array of JsonPatchOperations. +func diff(a, b map[string]interface{}, path string, patch []Operation) ([]Operation, error) { + for key, bv := range b { + p := makePath(path, key) + av, ok := a[key] + // value was added + if !ok { + patch = append(patch, NewPatch("add", p, bv)) + continue + } + // Types are the same, compare values + var err error + patch, err = handleValues(av, bv, p, patch) + if err != nil { + return nil, err + } + } + // Now add all deleted values as nil + for key := range a { + _, found := b[key] + if !found { + p := makePath(path, key) + + patch = append(patch, NewPatch("remove", p, nil)) + } + } + return patch, nil +} + +func handleValues(av, bv interface{}, p string, patch []Operation) ([]Operation, error) { + { + at := reflect.TypeOf(av) + bt := reflect.TypeOf(bv) + if at == nil && bt == nil { + // do nothing + return patch, nil + } else if at == nil && bt != nil { + return append(patch, NewPatch("add", p, bv)), nil + } else if at != bt { + // If types have changed, replace completely (preserves null in destination) + return append(patch, NewPatch("replace", p, bv)), nil + } + } + + var err error + switch at := av.(type) { + case map[string]interface{}: + bt := bv.(map[string]interface{}) + patch, err = diff(at, bt, p, patch) + if err != nil { + return nil, err + } + case string, float64, bool: + if !matchesValue(av, bv) { + patch = append(patch, NewPatch("replace", p, bv)) + } + case []interface{}: + bt := bv.([]interface{}) + if isSimpleArray(at) && isSimpleArray(bt) { + patch = append(patch, compareEditDistance(at, bt, p)...) + } else { + n := min(len(at), len(bt)) + for i := len(at) - 1; i >= n; i-- { + patch = append(patch, NewPatch("remove", makePath(p, i), nil)) + } + for i := n; i < len(bt); i++ { + patch = append(patch, NewPatch("add", makePath(p, i), bt[i])) + } + for i := 0; i < n; i++ { + var err error + patch, err = handleValues(at[i], bt[i], makePath(p, i), patch) + if err != nil { + return nil, err + } + } + } + default: + panic(fmt.Sprintf("Unknown type:%T ", av)) + } + return patch, nil +} + +func isBasicType(a interface{}) bool { + switch a.(type) { + case string, float64, bool: + default: + return false + } + return true +} + +func isSimpleArray(a []interface{}) bool { + for i := range a { + switch a[i].(type) { + case string, float64, bool: + default: + val := reflect.ValueOf(a[i]) + if val.Kind() == reflect.Map { + for _, k := range val.MapKeys() { + av := val.MapIndex(k) + if av.Kind() == reflect.Ptr || av.Kind() == reflect.Interface { + if av.IsNil() { + continue + } + av = av.Elem() + } + if av.Kind() != reflect.String && av.Kind() != reflect.Float64 && av.Kind() != reflect.Bool { + return false + } + } + return true + } + return false + } + } + return true +} + +// https://en.wikipedia.org/wiki/Wagner%E2%80%93Fischer_algorithm +// Adapted from https://github.com/texttheater/golang-levenshtein +func compareEditDistance(s, t []interface{}, p string) []Operation { + m := len(s) + n := len(t) + + d := make([][]int, m+1) + for i := 0; i <= m; i++ { + d[i] = make([]int, n+1) + d[i][0] = i + } + for j := 0; j <= n; j++ { + d[0][j] = j + } + + for j := 1; j <= n; j++ { + for i := 1; i <= m; i++ { + if reflect.DeepEqual(s[i-1], t[j-1]) { + d[i][j] = d[i-1][j-1] // no op required + } else { + del := d[i-1][j] + 1 + add := d[i][j-1] + 1 + rep := d[i-1][j-1] + 1 + d[i][j] = min(rep, min(add, del)) + } + } + } + + return backtrace(s, t, p, m, n, d) +} + +func min(x int, y int) int { + if y < x { + return y + } + return x +} + +func backtrace(s, t []interface{}, p string, i int, j int, matrix [][]int) []Operation { + if i > 0 && matrix[i-1][j]+1 == matrix[i][j] { + op := NewPatch("remove", makePath(p, i-1), nil) + return append([]Operation{op}, backtrace(s, t, p, i-1, j, matrix)...) + } + if j > 0 && matrix[i][j-1]+1 == matrix[i][j] { + op := NewPatch("add", makePath(p, i), t[j-1]) + return append([]Operation{op}, backtrace(s, t, p, i, j-1, matrix)...) + } + if i > 0 && j > 0 && matrix[i-1][j-1]+1 == matrix[i][j] { + if isBasicType(s[0]) { + op := NewPatch("replace", makePath(p, i-1), t[j-1]) + return append([]Operation{op}, backtrace(s, t, p, i-1, j-1, matrix)...) + } + + p2, _ := handleValues(s[i-1], t[j-1], makePath(p, i-1), []Operation{}) + return append(p2, backtrace(s, t, p, i-1, j-1, matrix)...) + } + if i > 0 && j > 0 && matrix[i-1][j-1] == matrix[i][j] { + return backtrace(s, t, p, i-1, j-1, matrix) + } + return []Operation{} +} diff --git a/vendor/google.golang.org/api/appengine/v1/appengine-api.json b/vendor/google.golang.org/api/appengine/v1/appengine-api.json index c17be443fc6..9b254f27648 100644 --- a/vendor/google.golang.org/api/appengine/v1/appengine-api.json +++ b/vendor/google.golang.org/api/appengine/v1/appengine-api.json @@ -112,7 +112,7 @@ "apps": { "methods": { "create": { - "description": "Creates an App Engine application for a Google Cloud Platform project. Required fields:\nid - The ID of the target Cloud Platform project.\nlocation - The region (https://cloud.google.com/appengine/docs/locations) where you want the App Engine application located.For more information about App Engine applications, see Managing Projects, Applications, and Billing (https://cloud.google.com/appengine/docs/standard/python/console/).", + "description": "Creates an App Engine application for a Google Cloud Platform project. Required fields: id - The ID of the target Cloud Platform project. location - The region (https://cloud.google.com/appengine/docs/locations) where you want the App Engine application located.For more information about App Engine applications, see Managing Projects, Applications, and Billing (https://cloud.google.com/appengine/docs/standard/python/console/).", "flatPath": "v1/apps", "httpMethod": "POST", "id": "appengine.apps.create", @@ -156,7 +156,7 @@ ] }, "patch": { - "description": "Updates the specified Application resource. You can update the following fields:\nauth_domain - Google authentication domain for controlling user access to the application.\ndefault_cookie_expiration - Cookie expiration policy for the application.", + "description": "Updates the specified Application resource. You can update the following fields: auth_domain - Google authentication domain for controlling user access to the application. default_cookie_expiration - Cookie expiration policy for the application.", "flatPath": "v1/apps/{appsId}", "httpMethod": "PATCH", "id": "appengine.apps.patch", @@ -305,6 +305,10 @@ "BASIC_CERTIFICATE", "FULL_CERTIFICATE" ], + "enumDescriptions": [ + "Basic certificate information, including applicable domains and expiration date.", + "The information from BASIC_CERTIFICATE, plus detailed information on the domain mappings that have this certificate mapped." + ], "location": "query", "type": "string" } @@ -351,6 +355,10 @@ "BASIC_CERTIFICATE", "FULL_CERTIFICATE" ], + "enumDescriptions": [ + "Basic certificate information, including applicable domains and expiration date.", + "The information from BASIC_CERTIFICATE, plus detailed information on the domain mappings that have this certificate mapped." + ], "location": "query", "type": "string" } @@ -472,6 +480,11 @@ "STRICT", "OVERRIDE" ], + "enumDescriptions": [ + "Strategy unspecified. Defaults to STRICT.", + "Overrides not allowed. If a mapping already exists for the specified domain, the request will return an ALREADY_EXISTS (409).", + "Overrides allowed. If a mapping already exists for the specified domain, the request will overwrite it. Note that this might stop another Google product from serving. For example, if the domain is mapped to another App Engine application, that app will no longer serve from that domain." + ], "location": "query", "type": "string" } @@ -1255,6 +1268,10 @@ "BASIC", "FULL" ], + "enumDescriptions": [ + "Basic version information including scaling and inbound services, but not detailed deployment information.", + "The information from BASIC, plus detailed information about the deployment. This format is required when creating resources, but is not returned in Get or List by default." + ], "location": "query", "type": "string" } @@ -1308,6 +1325,10 @@ "BASIC", "FULL" ], + "enumDescriptions": [ + "Basic version information including scaling and inbound services, but not detailed deployment information.", + "The information from BASIC, plus detailed information about the deployment. This format is required when creating resources, but is not returned in Get or List by default." + ], "location": "query", "type": "string" } @@ -1323,7 +1344,7 @@ ] }, "patch": { - "description": "Updates the specified Version resource. You can specify the following fields depending on the App Engine environment and type of scaling that the version resource uses:Standard environment\ninstance_class (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.instance_class)automatic scaling in the standard environment:\nautomatic_scaling.min_idle_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling)\nautomatic_scaling.max_idle_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling)\nautomaticScaling.standard_scheduler_settings.max_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#StandardSchedulerSettings)\nautomaticScaling.standard_scheduler_settings.min_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#StandardSchedulerSettings)\nautomaticScaling.standard_scheduler_settings.target_cpu_utilization (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#StandardSchedulerSettings)\nautomaticScaling.standard_scheduler_settings.target_throughput_utilization (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#StandardSchedulerSettings)basic scaling or manual scaling in the standard environment:\nserving_status (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.serving_status)Flexible environment\nserving_status (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.serving_status)automatic scaling in the flexible environment:\nautomatic_scaling.min_total_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling)\nautomatic_scaling.max_total_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling)\nautomatic_scaling.cool_down_period_sec (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling)\nautomatic_scaling.cpu_utilization.target_utilization (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling)", + "description": "Updates the specified Version resource. You can specify the following fields depending on the App Engine environment and type of scaling that the version resource uses:Standard environment instance_class (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.instance_class)automatic scaling in the standard environment: automatic_scaling.min_idle_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling) automatic_scaling.max_idle_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling) automaticScaling.standard_scheduler_settings.max_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#StandardSchedulerSettings) automaticScaling.standard_scheduler_settings.min_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#StandardSchedulerSettings) automaticScaling.standard_scheduler_settings.target_cpu_utilization (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#StandardSchedulerSettings) automaticScaling.standard_scheduler_settings.target_throughput_utilization (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#StandardSchedulerSettings)basic scaling or manual scaling in the standard environment: serving_status (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.serving_status) manual_scaling.instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#manualscaling)Flexible environment serving_status (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.serving_status)automatic scaling in the flexible environment: automatic_scaling.min_total_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling) automatic_scaling.max_total_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling) automatic_scaling.cool_down_period_sec (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling) automatic_scaling.cpu_utilization.target_utilization (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling)manual scaling in the flexible environment: manual_scaling.instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#manualscaling)", "flatPath": "v1/apps/{appsId}/services/{servicesId}/versions/{versionsId}", "httpMethod": "PATCH", "id": "appengine.apps.services.versions.patch", @@ -1573,7 +1594,7 @@ } } }, - "revision": "20200403", + "revision": "20200914", "rootUrl": "https://appengine.googleapis.com/", "schemas": { "ApiConfigHandler": { @@ -1662,6 +1683,22 @@ "description": "Google Cloud Storage bucket that can be used for storing files associated with this application. This bucket is associated with the application and can be used by the gcloud deployment commands.@OutputOnly", "type": "string" }, + "databaseType": { + "description": "The type of the Cloud Firestore or Cloud Datastore database associated with this application.", + "enum": [ + "DATABASE_TYPE_UNSPECIFIED", + "CLOUD_DATASTORE", + "CLOUD_FIRESTORE", + "CLOUD_DATASTORE_COMPATIBILITY" + ], + "enumDescriptions": [ + "Database type is unspecified.", + "Cloud Datastore", + "Cloud Firestore Native", + "Cloud Firestore in Datastore Mode" + ], + "type": "string" + }, "defaultBucket": { "description": "Google Cloud Storage bucket that can be used by this application to store content.@OutputOnly", "type": "string" @@ -1907,11 +1944,11 @@ "id": "CertificateRawData", "properties": { "privateKey": { - "description": "Unencrypted PEM encoded RSA private key. This field is set once on certificate creation and then encrypted. The key size must be 2048 bits or fewer. Must include the header and footer. Example: \u003cpre\u003e -----BEGIN RSA PRIVATE KEY----- \u003cunencrypted_key_value\u003e -----END RSA PRIVATE KEY----- \u003c/pre\u003e @InputOnly", + "description": "Unencrypted PEM encoded RSA private key. This field is set once on certificate creation and then encrypted. The key size must be 2048 bits or fewer. Must include the header and footer. Example: -----BEGIN RSA PRIVATE KEY----- -----END RSA PRIVATE KEY----- @InputOnly", "type": "string" }, "publicCertificate": { - "description": "PEM encoded x.509 public key certificate. This field is set once on certificate creation. Must include the header and footer. Example: \u003cpre\u003e -----BEGIN CERTIFICATE----- \u003ccertificate_value\u003e -----END CERTIFICATE----- \u003c/pre\u003e", + "description": "PEM encoded x.509 public key certificate. This field is set once on certificate creation. Must include the header and footer. Example: -----BEGIN CERTIFICATE----- -----END CERTIFICATE----- ", "type": "string" } }, @@ -1999,7 +2036,7 @@ "id": "DebugInstanceRequest", "properties": { "sshKey": { - "description": "Public SSH key to add to the instance. Examples:\n[USERNAME]:ssh-rsa [KEY_VALUE] [USERNAME]\n[USERNAME]:ssh-rsa [KEY_VALUE] google-ssh {\"userName\":\"[USERNAME]\",\"expireOn\":\"[EXPIRE_TIME]\"}For more information, see Adding and Removing SSH Keys (https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys).", + "description": "Public SSH key to add to the instance. Examples: [USERNAME]:ssh-rsa [KEY_VALUE] [USERNAME] [USERNAME]:ssh-rsa [KEY_VALUE] google-ssh {\"userName\":\"[USERNAME]\",\"expireOn\":\"[EXPIRE_TIME]\"}For more information, see Adding and Removing SSH Keys (https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys).", "type": "string" } }, @@ -2085,7 +2122,7 @@ "type": "object" }, "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance:\nservice Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n}\nThe JSON representation for Empty is empty JSON object {}.", + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}.", "id": "Empty", "properties": {}, "type": "object" @@ -2195,7 +2232,7 @@ "type": "string" }, "sourceUrl": { - "description": "URL source to use to fetch this file. Must be a URL to a resource in Google Cloud Storage in the form 'http(s)://storage.googleapis.com/\u003cbucket\u003e/\u003cobject\u003e'.", + "description": "URL source to use to fetch this file. Must be a URL to a resource in Google Cloud Storage in the form 'http(s)://storage.googleapis.com//'.", "type": "string" } }, @@ -2229,7 +2266,7 @@ "type": "integer" }, "sourceRange": { - "description": "IP address or range, defined using CIDR notation, of requests that this rule applies to. You can use the wildcard character \"*\" to match all IPs equivalent to \"0/0\" and \"::/0\" together. Examples: 192.168.1.1 or 192.168.0.0/16 or 2001:db8::/32 or 2001:0db8:0000:0042:0000:8a2e:0370:7334.\u003cp\u003eTruncation will be silently performed on addresses which are not properly truncated. For example, 1.2.3.4/24 is accepted as the same address as 1.2.3.0/24. Similarly, for IPv6, 2001:db8::1/32 is accepted as the same address as 2001:db8::/32.", + "description": "IP address or range, defined using CIDR notation, of requests that this rule applies to. You can use the wildcard character \"*\" to match all IPs equivalent to \"0/0\" and \"::/0\" together. Examples: 192.168.1.1 or 192.168.0.0/16 or 2001:db8::/32 or 2001:0db8:0000:0042:0000:8a2e:0370:7334. Truncation will be silently performed on addresses which are not properly truncated. For example, 1.2.3.4/24 is accepted as the same address as 1.2.3.0/24. Similarly, for IPv6, 2001:db8::1/32 is accepted as the same address as 2001:db8::/32.", "type": "string" } }, @@ -2303,11 +2340,12 @@ "id": "Instance", "properties": { "appEngineRelease": { - "description": "App Engine release this instance is running on.@OutputOnly", + "description": "Output only. App Engine release this instance is running on.", + "readOnly": true, "type": "string" }, "availability": { - "description": "Availability of the instance.@OutputOnly", + "description": "Output only. Availability of the instance.", "enum": [ "UNSPECIFIED", "RESIDENT", @@ -2318,68 +2356,83 @@ "", "" ], + "readOnly": true, "type": "string" }, "averageLatency": { - "description": "Average latency (ms) over the last minute.@OutputOnly", + "description": "Output only. Average latency (ms) over the last minute.", "format": "int32", + "readOnly": true, "type": "integer" }, "errors": { - "description": "Number of errors since this instance was started.@OutputOnly", + "description": "Output only. Number of errors since this instance was started.", "format": "int32", + "readOnly": true, "type": "integer" }, "id": { - "description": "Relative name of the instance within the version. Example: instance-1.@OutputOnly", + "description": "Output only. Relative name of the instance within the version. Example: instance-1.", + "readOnly": true, "type": "string" }, "memoryUsage": { - "description": "Total memory in use (bytes).@OutputOnly", + "description": "Output only. Total memory in use (bytes).", "format": "int64", + "readOnly": true, "type": "string" }, "name": { - "description": "Full path to the Instance resource in the API. Example: apps/myapp/services/default/versions/v1/instances/instance-1.@OutputOnly", + "description": "Output only. Full path to the Instance resource in the API. Example: apps/myapp/services/default/versions/v1/instances/instance-1.", + "readOnly": true, "type": "string" }, "qps": { - "description": "Average queries per second (QPS) over the last minute.@OutputOnly", + "description": "Output only. Average queries per second (QPS) over the last minute.", "format": "float", + "readOnly": true, "type": "number" }, "requests": { - "description": "Number of requests since this instance was started.@OutputOnly", + "description": "Output only. Number of requests since this instance was started.", "format": "int32", + "readOnly": true, "type": "integer" }, "startTime": { - "description": "Time that this instance was started.@OutputOnly", + "description": "Output only. Time that this instance was started.@OutputOnly", "format": "google-datetime", + "readOnly": true, "type": "string" }, "vmDebugEnabled": { - "description": "Whether this instance is in debug mode. Only applicable for instances in App Engine flexible environment.@OutputOnly", + "description": "Output only. Whether this instance is in debug mode. Only applicable for instances in App Engine flexible environment.", + "readOnly": true, "type": "boolean" }, "vmId": { - "description": "Virtual machine ID of this instance. Only applicable for instances in App Engine flexible environment.@OutputOnly", + "description": "Output only. Virtual machine ID of this instance. Only applicable for instances in App Engine flexible environment.", + "readOnly": true, "type": "string" }, "vmIp": { - "description": "The IP address of this instance. Only applicable for instances in App Engine flexible environment.@OutputOnly", + "description": "Output only. The IP address of this instance. Only applicable for instances in App Engine flexible environment.", + "readOnly": true, "type": "string" }, "vmName": { - "description": "Name of the virtual machine where this instance lives. Only applicable for instances in App Engine flexible environment.@OutputOnly", + "description": "Output only. Name of the virtual machine where this instance lives. Only applicable for instances in App Engine flexible environment.", + "readOnly": true, "type": "string" }, "vmStatus": { - "description": "Status of the virtual machine where this instance lives. Only applicable for instances in App Engine flexible environment.@OutputOnly", + "description": "Output only. Status of the virtual machine where this instance lives. Only applicable for instances in App Engine flexible environment.", + "readOnly": true, "type": "string" }, "vmZoneName": { - "description": "Zone where the virtual machine is located. Only applicable for instances in App Engine flexible environment.@OutputOnly", + "description": "Output only. Zone where the virtual machine is located. Only applicable for instances in App Engine flexible environment.", + "readOnly": true, "type": "string" } }, @@ -2614,7 +2667,7 @@ "additionalProperties": { "type": "string" }, - "description": "Cross-service attributes for the location. For example\n{\"cloud.googleapis.com/region\": \"us-east1\"}\n", + "description": "Cross-service attributes for the location. For example {\"cloud.googleapis.com/region\": \"us-east1\"} ", "type": "object" }, "locationId": { @@ -2721,7 +2774,30 @@ "type": "boolean" }, "subnetworkName": { - "description": "Google Cloud Platform sub-network where the virtual machines are created. Specify the short name, not the resource path.If a subnetwork name is specified, a network name will also be required unless it is for the default network.\nIf the network that the instance is being created in is a Legacy network, then the IP address is allocated from the IPv4Range.\nIf the network that the instance is being created in is an auto Subnet Mode Network, then only network name should be specified (not the subnetwork_name) and the IP address is created from the IPCidrRange of the subnetwork that exists in that zone for that network.\nIf the network that the instance is being created in is a custom Subnet Mode Network, then the subnetwork_name must be specified and the IP address is created from the IPCidrRange of the subnetwork.If specified, the subnetwork must exist in the same region as the App Engine flexible environment application.", + "description": "Google Cloud Platform sub-network where the virtual machines are created. Specify the short name, not the resource path.If a subnetwork name is specified, a network name will also be required unless it is for the default network. If the network that the instance is being created in is a Legacy network, then the IP address is allocated from the IPv4Range. If the network that the instance is being created in is an auto Subnet Mode Network, then only network name should be specified (not the subnetwork_name) and the IP address is created from the IPCidrRange of the subnetwork that exists in that zone for that network. If the network that the instance is being created in is a custom Subnet Mode Network, then the subnetwork_name must be specified and the IP address is created from the IPCidrRange of the subnetwork.If specified, the subnetwork must exist in the same region as the App Engine flexible environment application.", + "type": "string" + } + }, + "type": "object" + }, + "NetworkSettings": { + "description": "A NetworkSettings resource is a container for ingress settings for a version or service.", + "id": "NetworkSettings", + "properties": { + "ingressTrafficAllowed": { + "description": "The ingress settings for version or service.", + "enum": [ + "INGRESS_TRAFFIC_ALLOWED_UNSPECIFIED", + "INGRESS_TRAFFIC_ALLOWED_ALL", + "INGRESS_TRAFFIC_ALLOWED_INTERNAL_ONLY", + "INGRESS_TRAFFIC_ALLOWED_INTERNAL_AND_LB" + ], + "enumDescriptions": [ + "Unspecified", + "Allow HTTP traffic from public and private sources.", + "Allow HTTP traffic from only private VPC sources.", + "Allow HTTP traffic from private VPC sources and through load balancers." + ], "type": "string" } }, @@ -3026,6 +3102,10 @@ "format": "double", "type": "number" }, + "kmsKeyReference": { + "description": "The name of the encryption key that is stored in Google Cloud KMS. Only should be used by Cloud Composer to encrypt the vm disk", + "type": "string" + }, "memoryGb": { "description": "Memory (GB) needed.", "format": "double", @@ -3064,6 +3144,10 @@ "description": "Full path to the Service resource in the API. Example: apps/myapp/services/default.@OutputOnly", "type": "string" }, + "networkSettings": { + "$ref": "NetworkSettings", + "description": "Ingress settings for this service. Will apply to all versions." + }, "split": { "$ref": "TrafficSplit", "description": "Mapping that defines fractional HTTP traffic diversion to different versions within the service." @@ -3342,7 +3426,7 @@ }, "automaticScaling": { "$ref": "AutomaticScaling", - "description": "Automatic scaling is based on request rate, response latencies, and other application metrics." + "description": "Automatic scaling is based on request rate, response latencies, and other application metrics. Instances are dynamically created and destroyed as needed in order to handle traffic." }, "basicScaling": { "$ref": "BasicScaling", @@ -3355,6 +3439,13 @@ "description": "Metadata settings that are supplied to this version to enable beta runtime features.", "type": "object" }, + "buildEnvVariables": { + "additionalProperties": { + "type": "string" + }, + "description": "Environment variables available to the build environment.Only returned in GET requests if view=FULL is set.", + "type": "object" + }, "createTime": { "description": "Time that this version was created.@OutputOnly", "format": "google-datetime", @@ -3421,17 +3512,6 @@ }, "inboundServices": { "description": "Before an application can receive email or XMPP messages, the application must be configured to enable the service.", - "enumDescriptions": [ - "Not specified.", - "Allows an application to receive mail.", - "Allows an application to receive email-bound notifications.", - "Allows an application to receive error stanzas.", - "Allows an application to receive instant messages.", - "Allows an application to receive user subscription POSTs.", - "Allows an application to receive a user's chat presence.", - "Registers an application for notifications when a client connects or disconnects from a channel.", - "Enables warmup requests." - ], "items": { "enum": [ "INBOUND_SERVICE_UNSPECIFIED", @@ -3444,12 +3524,23 @@ "INBOUND_SERVICE_CHANNEL_PRESENCE", "INBOUND_SERVICE_WARMUP" ], + "enumDescriptions": [ + "Not specified.", + "Allows an application to receive mail.", + "Allows an application to receive email-bound notifications.", + "Allows an application to receive error stanzas.", + "Allows an application to receive instant messages.", + "Allows an application to receive user subscription POSTs.", + "Allows an application to receive a user's chat presence.", + "Registers an application for notifications when a client connects or disconnects from a channel.", + "Enables warmup requests." + ], "type": "string" }, "type": "array" }, "instanceClass": { - "description": "Instance class that is used to run this version. Valid values are:\nAutomaticScaling: F1, F2, F4, F4_1G\nManualScaling or BasicScaling: B1, B2, B4, B8, B4_1GDefaults to F1 for AutomaticScaling and B1 for ManualScaling or BasicScaling.", + "description": "Instance class that is used to run this version. Valid values are: AutomaticScaling: F1, F2, F4, F4_1G ManualScaling or BasicScaling: B1, B2, B4, B8, B4_1GDefaults to F1 for AutomaticScaling and B1 for ManualScaling or BasicScaling.", "type": "string" }, "libraries": { @@ -3465,7 +3556,7 @@ }, "manualScaling": { "$ref": "ManualScaling", - "description": "A service with manual scaling runs continuously, allowing you to perform complex initialization and rely on the state of its memory over time." + "description": "A service with manual scaling runs continuously, allowing you to perform complex initialization and rely on the state of its memory over time. Manually scaled versions are sometimes referred to as \"backends\"." }, "name": { "description": "Full path to the Version resource in the API. Example: apps/myapp/services/default/versions/v1.@OutputOnly", @@ -3492,7 +3583,7 @@ "type": "string" }, "runtimeApiVersion": { - "description": "The version of the API in the given runtime environment. Please see the app.yaml reference for valid values at https://cloud.google.com/appengine/docs/standard/\u003clanguage\u003e/config/appref", + "description": "The version of the API in the given runtime environment. Please see the app.yaml reference for valid values at https://cloud.google.com/appengine/docs/standard//config/appref", "type": "string" }, "runtimeChannel": { @@ -3584,7 +3675,7 @@ "type": "integer" }, "sourceUrl": { - "description": "URL of the zip file to deploy from. Must be a URL to a resource in Google Cloud Storage in the form 'http(s)://storage.googleapis.com/\u003cbucket\u003e/\u003cobject\u003e'.", + "description": "URL of the zip file to deploy from. Must be a URL to a resource in Google Cloud Storage in the form 'http(s)://storage.googleapis.com//'.", "type": "string" } }, diff --git a/vendor/google.golang.org/api/appengine/v1/appengine-gen.go b/vendor/google.golang.org/api/appengine/v1/appengine-gen.go index 171bea24e81..f7c44337bbe 100644 --- a/vendor/google.golang.org/api/appengine/v1/appengine-gen.go +++ b/vendor/google.golang.org/api/appengine/v1/appengine-gen.go @@ -79,6 +79,7 @@ const apiId = "appengine:v1" const apiName = "appengine" const apiVersion = "v1" const basePath = "https://appengine.googleapis.com/" +const mtlsBasePath = "https://appengine.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -102,6 +103,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*APIService, // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -397,6 +399,16 @@ type Application struct { // commands.@OutputOnly CodeBucket string `json:"codeBucket,omitempty"` + // DatabaseType: The type of the Cloud Firestore or Cloud Datastore + // database associated with this application. + // + // Possible values: + // "DATABASE_TYPE_UNSPECIFIED" - Database type is unspecified. + // "CLOUD_DATASTORE" - Cloud Datastore + // "CLOUD_FIRESTORE" - Cloud Firestore Native + // "CLOUD_DATASTORE_COMPATIBILITY" - Cloud Firestore in Datastore Mode + DatabaseType string `json:"databaseType,omitempty"` + // DefaultBucket: Google Cloud Storage bucket that can be used by this // application to store content.@OutputOnly DefaultBucket string `json:"defaultBucket,omitempty"` @@ -782,15 +794,14 @@ type CertificateRawData struct { // PrivateKey: Unencrypted PEM encoded RSA private key. This field is // set once on certificate creation and then encrypted. The key size // must be 2048 bits or fewer. Must include the header and footer. - // Example:
 -----BEGIN RSA PRIVATE KEY-----
-	//  -----END RSA PRIVATE KEY----- 
- // @InputOnly + // Example: -----BEGIN RSA PRIVATE KEY----- -----END RSA PRIVATE + // KEY----- @InputOnly PrivateKey string `json:"privateKey,omitempty"` // PublicCertificate: PEM encoded x.509 public key certificate. This // field is set once on certificate creation. Must include the header - // and footer. Example:
 -----BEGIN CERTIFICATE-----
-	//  -----END CERTIFICATE----- 
+ // and footer. Example: -----BEGIN CERTIFICATE----- -----END + // CERTIFICATE----- PublicCertificate string `json:"publicCertificate,omitempty"` // ForceSendFields is a list of field names (e.g. "PrivateKey") to @@ -1033,10 +1044,9 @@ func (s *CreateVersionMetadataV1Beta) MarshalJSON() ([]byte, error) { // DebugInstanceRequest: Request message for Instances.DebugInstance. type DebugInstanceRequest struct { - // SshKey: Public SSH key to add to the instance. - // Examples: - // [USERNAME]:ssh-rsa [KEY_VALUE] [USERNAME] - // [USERNAME]:ssh-rsa [KEY_VALUE] google-ssh + // SshKey: Public SSH key to add to the instance. Examples: + // [USERNAME]:ssh-rsa [KEY_VALUE] [USERNAME] [USERNAME]:ssh-rsa + // [KEY_VALUE] google-ssh // {"userName":"[USERNAME]","expireOn":"[EXPIRE_TIME]"}For more // information, see Adding and Removing SSH Keys // (https://cloud.google.com/compute/docs/instances/adding-removing-ssh-k @@ -1202,12 +1212,9 @@ func (s *DomainMapping) MarshalJSON() ([]byte, error) { // Empty: A generic empty message that you can re-use to avoid defining // duplicated empty messages in your APIs. A typical example is to use // it as the request or the response type of an API method. For -// instance: -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// The JSON representation for Empty is empty JSON object {}. +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } The JSON representation for Empty is empty +// JSON object {}. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -1405,7 +1412,7 @@ type FileInfo struct { // SourceUrl: URL source to use to fetch this file. Must be a URL to a // resource in Google Cloud Storage in the form - // 'http(s)://storage.googleapis.com//'. + // 'http(s)://storage.googleapis.com//'. SourceUrl string `json:"sourceUrl,omitempty"` // ForceSendFields is a list of field names (e.g. "MimeType") to @@ -1456,12 +1463,12 @@ type FirewallRule struct { // SourceRange: IP address or range, defined using CIDR notation, of // requests that this rule applies to. You can use the wildcard // character "*" to match all IPs equivalent to "0/0" and "::/0" - // together. Examples: 192.168.1.1 or 192.168.0.0/16 or 2001:db8::/32 - // or 2001:0db8:0000:0042:0000:8a2e:0370:7334.

Truncation will be - // silently performed on addresses which are not properly truncated. For - // example, 1.2.3.4/24 is accepted as the same address as 1.2.3.0/24. - // Similarly, for IPv6, 2001:db8::1/32 is accepted as the same address - // as 2001:db8::/32. + // together. Examples: 192.168.1.1 or 192.168.0.0/16 or 2001:db8::/32 or + // 2001:0db8:0000:0042:0000:8a2e:0370:7334. Truncation will be silently + // performed on addresses which are not properly truncated. For example, + // 1.2.3.4/24 is accepted as the same address as 1.2.3.0/24. Similarly, + // for IPv6, 2001:db8::1/32 is accepted as the same address as + // 2001:db8::/32. SourceRange string `json:"sourceRange,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1590,11 +1597,11 @@ func (s *IdentityAwareProxy) MarshalJSON() ([]byte, error) { // Instance: An Instance resource is the computing unit that App Engine // uses to automatically scale an application. type Instance struct { - // AppEngineRelease: App Engine release this instance is running - // on.@OutputOnly + // AppEngineRelease: Output only. App Engine release this instance is + // running on. AppEngineRelease string `json:"appEngineRelease,omitempty"` - // Availability: Availability of the instance.@OutputOnly + // Availability: Output only. Availability of the instance. // // Possible values: // "UNSPECIFIED" @@ -1602,61 +1609,62 @@ type Instance struct { // "DYNAMIC" Availability string `json:"availability,omitempty"` - // AverageLatency: Average latency (ms) over the last minute.@OutputOnly + // AverageLatency: Output only. Average latency (ms) over the last + // minute. AverageLatency int64 `json:"averageLatency,omitempty"` - // Errors: Number of errors since this instance was started.@OutputOnly + // Errors: Output only. Number of errors since this instance was + // started. Errors int64 `json:"errors,omitempty"` - // Id: Relative name of the instance within the version. Example: - // instance-1.@OutputOnly + // Id: Output only. Relative name of the instance within the version. + // Example: instance-1. Id string `json:"id,omitempty"` - // MemoryUsage: Total memory in use (bytes).@OutputOnly + // MemoryUsage: Output only. Total memory in use (bytes). MemoryUsage int64 `json:"memoryUsage,omitempty,string"` - // Name: Full path to the Instance resource in the API. Example: - // apps/myapp/services/default/versions/v1/instances/instance-1.@OutputOn - // ly + // Name: Output only. Full path to the Instance resource in the API. + // Example: + // apps/myapp/services/default/versions/v1/instances/instance-1. Name string `json:"name,omitempty"` - // Qps: Average queries per second (QPS) over the last - // minute.@OutputOnly + // Qps: Output only. Average queries per second (QPS) over the last + // minute. Qps float64 `json:"qps,omitempty"` - // Requests: Number of requests since this instance was - // started.@OutputOnly + // Requests: Output only. Number of requests since this instance was + // started. Requests int64 `json:"requests,omitempty"` - // StartTime: Time that this instance was started.@OutputOnly + // StartTime: Output only. Time that this instance was + // started.@OutputOnly StartTime string `json:"startTime,omitempty"` - // VmDebugEnabled: Whether this instance is in debug mode. Only - // applicable for instances in App Engine flexible - // environment.@OutputOnly + // VmDebugEnabled: Output only. Whether this instance is in debug mode. + // Only applicable for instances in App Engine flexible environment. VmDebugEnabled bool `json:"vmDebugEnabled,omitempty"` - // VmId: Virtual machine ID of this instance. Only applicable for - // instances in App Engine flexible environment.@OutputOnly + // VmId: Output only. Virtual machine ID of this instance. Only + // applicable for instances in App Engine flexible environment. VmId string `json:"vmId,omitempty"` - // VmIp: The IP address of this instance. Only applicable for instances - // in App Engine flexible environment.@OutputOnly + // VmIp: Output only. The IP address of this instance. Only applicable + // for instances in App Engine flexible environment. VmIp string `json:"vmIp,omitempty"` - // VmName: Name of the virtual machine where this instance lives. Only - // applicable for instances in App Engine flexible - // environment.@OutputOnly + // VmName: Output only. Name of the virtual machine where this instance + // lives. Only applicable for instances in App Engine flexible + // environment. VmName string `json:"vmName,omitempty"` - // VmStatus: Status of the virtual machine where this instance lives. - // Only applicable for instances in App Engine flexible - // environment.@OutputOnly + // VmStatus: Output only. Status of the virtual machine where this + // instance lives. Only applicable for instances in App Engine flexible + // environment. VmStatus string `json:"vmStatus,omitempty"` - // VmZoneName: Zone where the virtual machine is located. Only - // applicable for instances in App Engine flexible - // environment.@OutputOnly + // VmZoneName: Output only. Zone where the virtual machine is located. + // Only applicable for instances in App Engine flexible environment. VmZoneName string `json:"vmZoneName,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2122,10 +2130,8 @@ type Location struct { // city name. For example, "Tokyo". DisplayName string `json:"displayName,omitempty"` - // Labels: Cross-service attributes for the location. For - // example + // Labels: Cross-service attributes for the location. For example // {"cloud.googleapis.com/region": "us-east1"} - // Labels map[string]string `json:"labels,omitempty"` // LocationId: The canonical id for this location. For example: @@ -2323,19 +2329,18 @@ type Network struct { // SubnetworkName: Google Cloud Platform sub-network where the virtual // machines are created. Specify the short name, not the resource // path.If a subnetwork name is specified, a network name will also be - // required unless it is for the default network. - // If the network that the instance is being created in is a Legacy - // network, then the IP address is allocated from the IPv4Range. - // If the network that the instance is being created in is an auto - // Subnet Mode Network, then only network name should be specified (not - // the subnetwork_name) and the IP address is created from the - // IPCidrRange of the subnetwork that exists in that zone for that - // network. - // If the network that the instance is being created in is a custom - // Subnet Mode Network, then the subnetwork_name must be specified and - // the IP address is created from the IPCidrRange of the subnetwork.If - // specified, the subnetwork must exist in the same region as the App - // Engine flexible environment application. + // required unless it is for the default network. If the network that + // the instance is being created in is a Legacy network, then the IP + // address is allocated from the IPv4Range. If the network that the + // instance is being created in is an auto Subnet Mode Network, then + // only network name should be specified (not the subnetwork_name) and + // the IP address is created from the IPCidrRange of the subnetwork that + // exists in that zone for that network. If the network that the + // instance is being created in is a custom Subnet Mode Network, then + // the subnetwork_name must be specified and the IP address is created + // from the IPCidrRange of the subnetwork.If specified, the subnetwork + // must exist in the same region as the App Engine flexible environment + // application. SubnetworkName string `json:"subnetworkName,omitempty"` // ForceSendFields is a list of field names (e.g. "ForwardedPorts") to @@ -2362,6 +2367,46 @@ func (s *Network) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// NetworkSettings: A NetworkSettings resource is a container for +// ingress settings for a version or service. +type NetworkSettings struct { + // IngressTrafficAllowed: The ingress settings for version or service. + // + // Possible values: + // "INGRESS_TRAFFIC_ALLOWED_UNSPECIFIED" - Unspecified + // "INGRESS_TRAFFIC_ALLOWED_ALL" - Allow HTTP traffic from public and + // private sources. + // "INGRESS_TRAFFIC_ALLOWED_INTERNAL_ONLY" - Allow HTTP traffic from + // only private VPC sources. + // "INGRESS_TRAFFIC_ALLOWED_INTERNAL_AND_LB" - Allow HTTP traffic from + // private VPC sources and through load balancers. + IngressTrafficAllowed string `json:"ingressTrafficAllowed,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "IngressTrafficAllowed") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IngressTrafficAllowed") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *NetworkSettings) MarshalJSON() ([]byte, error) { + type NoMethod NetworkSettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // NetworkUtilization: Target scaling by network usage. Only applicable // in the App Engine flexible environment. type NetworkUtilization struct { @@ -2771,6 +2816,11 @@ type Resources struct { // DiskGb: Disk size (GB) needed. DiskGb float64 `json:"diskGb,omitempty"` + // KmsKeyReference: The name of the encryption key that is stored in + // Google Cloud KMS. Only should be used by Cloud Composer to encrypt + // the vm disk + KmsKeyReference string `json:"kmsKeyReference,omitempty"` + // MemoryGb: Memory (GB) needed. MemoryGb float64 `json:"memoryGb,omitempty"` @@ -2863,6 +2913,10 @@ type Service struct { // apps/myapp/services/default.@OutputOnly Name string `json:"name,omitempty"` + // NetworkSettings: Ingress settings for this service. Will apply to all + // versions. + NetworkSettings *NetworkSettings `json:"networkSettings,omitempty"` + // Split: Mapping that defines fractional HTTP traffic diversion to // different versions within the service. Split *TrafficSplit `json:"split,omitempty"` @@ -3326,7 +3380,9 @@ type Version struct { ApiConfig *ApiConfigHandler `json:"apiConfig,omitempty"` // AutomaticScaling: Automatic scaling is based on request rate, - // response latencies, and other application metrics. + // response latencies, and other application metrics. Instances are + // dynamically created and destroyed as needed in order to handle + // traffic. AutomaticScaling *AutomaticScaling `json:"automaticScaling,omitempty"` // BasicScaling: A service with basic scaling will create an instance @@ -3339,6 +3395,10 @@ type Version struct { // enable beta runtime features. BetaSettings map[string]string `json:"betaSettings,omitempty"` + // BuildEnvVariables: Environment variables available to the build + // environment.Only returned in GET requests if view=FULL is set. + BuildEnvVariables map[string]string `json:"buildEnvVariables,omitempty"` + // CreateTime: Time that this version was created.@OutputOnly CreateTime string `json:"createTime,omitempty"` @@ -3423,10 +3483,9 @@ type Version struct { InboundServices []string `json:"inboundServices,omitempty"` // InstanceClass: Instance class that is used to run this version. Valid - // values are: - // AutomaticScaling: F1, F2, F4, F4_1G - // ManualScaling or BasicScaling: B1, B2, B4, B8, B4_1GDefaults to F1 - // for AutomaticScaling and B1 for ManualScaling or BasicScaling. + // values are: AutomaticScaling: F1, F2, F4, F4_1G ManualScaling or + // BasicScaling: B1, B2, B4, B8, B4_1GDefaults to F1 for + // AutomaticScaling and B1 for ManualScaling or BasicScaling. InstanceClass string `json:"instanceClass,omitempty"` // Libraries: Configuration for third-party Python runtime libraries @@ -3441,7 +3500,8 @@ type Version struct { // ManualScaling: A service with manual scaling runs continuously, // allowing you to perform complex initialization and rely on the state - // of its memory over time. + // of its memory over time. Manually scaled versions are sometimes + // referred to as "backends". ManualScaling *ManualScaling `json:"manualScaling,omitempty"` // Name: Full path to the Version resource in the API. Example: @@ -3471,7 +3531,7 @@ type Version struct { // RuntimeApiVersion: The version of the API in the given runtime // environment. Please see the app.yaml reference for valid values at - // https://cloud.google.com/appengine/docs/standard//config/appref + // https://cloud.google.com/appengine/docs/standard//config/appref RuntimeApiVersion string `json:"runtimeApiVersion,omitempty"` // RuntimeChannel: The channel of the runtime to use. Only available for @@ -3630,7 +3690,7 @@ type ZipInfo struct { // SourceUrl: URL of the zip file to deploy from. Must be a URL to a // resource in Google Cloud Storage in the form - // 'http(s)://storage.googleapis.com//'. + // 'http(s)://storage.googleapis.com//'. SourceUrl string `json:"sourceUrl,omitempty"` // ForceSendFields is a list of field names (e.g. "FilesCount") to @@ -3667,9 +3727,8 @@ type AppsCreateCall struct { } // Create: Creates an App Engine application for a Google Cloud Platform -// project. Required fields: -// id - The ID of the target Cloud Platform project. -// location - The region +// project. Required fields: id - The ID of the target Cloud Platform +// project. location - The region // (https://cloud.google.com/appengine/docs/locations) where you want // the App Engine application located.For more information about App // Engine applications, see Managing Projects, Applications, and Billing @@ -3707,7 +3766,7 @@ func (c *AppsCreateCall) Header() http.Header { func (c *AppsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3768,7 +3827,7 @@ func (c *AppsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Creates an App Engine application for a Google Cloud Platform project. Required fields:\nid - The ID of the target Cloud Platform project.\nlocation - The region (https://cloud.google.com/appengine/docs/locations) where you want the App Engine application located.For more information about App Engine applications, see Managing Projects, Applications, and Billing (https://cloud.google.com/appengine/docs/standard/python/console/).", + // "description": "Creates an App Engine application for a Google Cloud Platform project. Required fields: id - The ID of the target Cloud Platform project. location - The region (https://cloud.google.com/appengine/docs/locations) where you want the App Engine application located.For more information about App Engine applications, see Managing Projects, Applications, and Billing (https://cloud.google.com/appengine/docs/standard/python/console/).", // "flatPath": "v1/apps", // "httpMethod": "POST", // "id": "appengine.apps.create", @@ -3843,7 +3902,7 @@ func (c *AppsGetCall) Header() http.Header { func (c *AppsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3945,11 +4004,9 @@ type AppsPatchCall struct { } // Patch: Updates the specified Application resource. You can update the -// following fields: -// auth_domain - Google authentication domain for controlling user -// access to the application. -// default_cookie_expiration - Cookie expiration policy for the -// application. +// following fields: auth_domain - Google authentication domain for +// controlling user access to the application. default_cookie_expiration +// - Cookie expiration policy for the application. func (r *AppsService) Patch(appsId string, application *Application) *AppsPatchCall { c := &AppsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.appsId = appsId @@ -3991,7 +4048,7 @@ func (c *AppsPatchCall) Header() http.Header { func (c *AppsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4055,7 +4112,7 @@ func (c *AppsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Updates the specified Application resource. You can update the following fields:\nauth_domain - Google authentication domain for controlling user access to the application.\ndefault_cookie_expiration - Cookie expiration policy for the application.", + // "description": "Updates the specified Application resource. You can update the following fields: auth_domain - Google authentication domain for controlling user access to the application. default_cookie_expiration - Cookie expiration policy for the application.", // "flatPath": "v1/apps/{appsId}", // "httpMethod": "PATCH", // "id": "appengine.apps.patch", @@ -4144,7 +4201,7 @@ func (c *AppsRepairCall) Header() http.Header { func (c *AppsRepairCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4283,7 +4340,7 @@ func (c *AppsAuthorizedCertificatesCreateCall) Header() http.Header { func (c *AppsAuthorizedCertificatesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4422,7 +4479,7 @@ func (c *AppsAuthorizedCertificatesDeleteCall) Header() http.Header { func (c *AppsAuthorizedCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4539,8 +4596,11 @@ func (r *AppsAuthorizedCertificatesService) Get(appsId string, authorizedCertifi // returned in the GET response. // // Possible values: -// "BASIC_CERTIFICATE" -// "FULL_CERTIFICATE" +// "BASIC_CERTIFICATE" - Basic certificate information, including +// applicable domains and expiration date. +// "FULL_CERTIFICATE" - The information from BASIC_CERTIFICATE, plus +// detailed information on the domain mappings that have this +// certificate mapped. func (c *AppsAuthorizedCertificatesGetCall) View(view string) *AppsAuthorizedCertificatesGetCall { c.urlParams_.Set("view", view) return c @@ -4583,7 +4643,7 @@ func (c *AppsAuthorizedCertificatesGetCall) Header() http.Header { func (c *AppsAuthorizedCertificatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4673,6 +4733,10 @@ func (c *AppsAuthorizedCertificatesGetCall) Do(opts ...googleapi.CallOption) (*A // "BASIC_CERTIFICATE", // "FULL_CERTIFICATE" // ], + // "enumDescriptions": [ + // "Basic certificate information, including applicable domains and expiration date.", + // "The information from BASIC_CERTIFICATE, plus detailed information on the domain mappings that have this certificate mapped." + // ], // "location": "query", // "type": "string" // } @@ -4727,8 +4791,11 @@ func (c *AppsAuthorizedCertificatesListCall) PageToken(pageToken string) *AppsAu // returned in the LIST response. // // Possible values: -// "BASIC_CERTIFICATE" -// "FULL_CERTIFICATE" +// "BASIC_CERTIFICATE" - Basic certificate information, including +// applicable domains and expiration date. +// "FULL_CERTIFICATE" - The information from BASIC_CERTIFICATE, plus +// detailed information on the domain mappings that have this +// certificate mapped. func (c *AppsAuthorizedCertificatesListCall) View(view string) *AppsAuthorizedCertificatesListCall { c.urlParams_.Set("view", view) return c @@ -4771,7 +4838,7 @@ func (c *AppsAuthorizedCertificatesListCall) Header() http.Header { func (c *AppsAuthorizedCertificatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4865,6 +4932,10 @@ func (c *AppsAuthorizedCertificatesListCall) Do(opts ...googleapi.CallOption) (* // "BASIC_CERTIFICATE", // "FULL_CERTIFICATE" // ], + // "enumDescriptions": [ + // "Basic certificate information, including applicable domains and expiration date.", + // "The information from BASIC_CERTIFICATE, plus detailed information on the domain mappings that have this certificate mapped." + // ], // "location": "query", // "type": "string" // } @@ -4963,7 +5034,7 @@ func (c *AppsAuthorizedCertificatesPatchCall) Header() http.Header { func (c *AppsAuthorizedCertificatesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5139,7 +5210,7 @@ func (c *AppsAuthorizedDomainsListCall) Header() http.Header { func (c *AppsAuthorizedDomainsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5288,9 +5359,16 @@ func (r *AppsDomainMappingsService) Create(appsId string, domainmapping *DomainM // this domain. By default, overrides are rejected. // // Possible values: -// "UNSPECIFIED_DOMAIN_OVERRIDE_STRATEGY" -// "STRICT" -// "OVERRIDE" +// "UNSPECIFIED_DOMAIN_OVERRIDE_STRATEGY" - Strategy unspecified. +// Defaults to STRICT. +// "STRICT" - Overrides not allowed. If a mapping already exists for +// the specified domain, the request will return an ALREADY_EXISTS +// (409). +// "OVERRIDE" - Overrides allowed. If a mapping already exists for the +// specified domain, the request will overwrite it. Note that this might +// stop another Google product from serving. For example, if the domain +// is mapped to another App Engine application, that app will no longer +// serve from that domain. func (c *AppsDomainMappingsCreateCall) OverrideStrategy(overrideStrategy string) *AppsDomainMappingsCreateCall { c.urlParams_.Set("overrideStrategy", overrideStrategy) return c @@ -5323,7 +5401,7 @@ func (c *AppsDomainMappingsCreateCall) Header() http.Header { func (c *AppsDomainMappingsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5408,6 +5486,11 @@ func (c *AppsDomainMappingsCreateCall) Do(opts ...googleapi.CallOption) (*Operat // "STRICT", // "OVERRIDE" // ], + // "enumDescriptions": [ + // "Strategy unspecified. Defaults to STRICT.", + // "Overrides not allowed. If a mapping already exists for the specified domain, the request will return an ALREADY_EXISTS (409).", + // "Overrides allowed. If a mapping already exists for the specified domain, the request will overwrite it. Note that this might stop another Google product from serving. For example, if the domain is mapped to another App Engine application, that app will no longer serve from that domain." + // ], // "location": "query", // "type": "string" // } @@ -5474,7 +5557,7 @@ func (c *AppsDomainMappingsDeleteCall) Header() http.Header { func (c *AppsDomainMappingsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5624,7 +5707,7 @@ func (c *AppsDomainMappingsGetCall) Header() http.Header { func (c *AppsDomainMappingsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5791,7 +5874,7 @@ func (c *AppsDomainMappingsListCall) Header() http.Header { func (c *AppsDomainMappingsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5972,7 +6055,7 @@ func (c *AppsDomainMappingsPatchCall) Header() http.Header { func (c *AppsDomainMappingsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6129,7 +6212,7 @@ func (c *AppsFirewallIngressRulesBatchUpdateCall) Header() http.Header { func (c *AppsFirewallIngressRulesBatchUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6268,7 +6351,7 @@ func (c *AppsFirewallIngressRulesCreateCall) Header() http.Header { func (c *AppsFirewallIngressRulesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6407,7 +6490,7 @@ func (c *AppsFirewallIngressRulesDeleteCall) Header() http.Header { func (c *AppsFirewallIngressRulesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6557,7 +6640,7 @@ func (c *AppsFirewallIngressRulesGetCall) Header() http.Header { func (c *AppsFirewallIngressRulesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6733,7 +6816,7 @@ func (c *AppsFirewallIngressRulesListCall) Header() http.Header { func (c *AppsFirewallIngressRulesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6915,7 +6998,7 @@ func (c *AppsFirewallIngressRulesPatchCall) Header() http.Header { func (c *AppsFirewallIngressRulesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7079,7 +7162,7 @@ func (c *AppsLocationsGetCall) Header() http.Header { func (c *AppsLocationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7254,7 +7337,7 @@ func (c *AppsLocationsListCall) Header() http.Header { func (c *AppsLocationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7440,7 +7523,7 @@ func (c *AppsOperationsGetCall) Header() http.Header { func (c *AppsOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7623,7 +7706,7 @@ func (c *AppsOperationsListCall) Header() http.Header { func (c *AppsOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7796,7 +7879,7 @@ func (c *AppsServicesDeleteCall) Header() http.Header { func (c *AppsServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7946,7 +8029,7 @@ func (c *AppsServicesGetCall) Header() http.Header { func (c *AppsServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8113,7 +8196,7 @@ func (c *AppsServicesListCall) Header() http.Header { func (c *AppsServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8310,7 +8393,7 @@ func (c *AppsServicesPatchCall) Header() http.Header { func (c *AppsServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8470,7 +8553,7 @@ func (c *AppsServicesVersionsCreateCall) Header() http.Header { func (c *AppsServicesVersionsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8619,7 +8702,7 @@ func (c *AppsServicesVersionsDeleteCall) Header() http.Header { func (c *AppsServicesVersionsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8748,8 +8831,11 @@ func (r *AppsServicesVersionsService) Get(appsId string, servicesId string, vers // returned in the Get response. // // Possible values: -// "BASIC" -// "FULL" +// "BASIC" - Basic version information including scaling and inbound +// services, but not detailed deployment information. +// "FULL" - The information from BASIC, plus detailed information +// about the deployment. This format is required when creating +// resources, but is not returned in Get or List by default. func (c *AppsServicesVersionsGetCall) View(view string) *AppsServicesVersionsGetCall { c.urlParams_.Set("view", view) return c @@ -8792,7 +8878,7 @@ func (c *AppsServicesVersionsGetCall) Header() http.Header { func (c *AppsServicesVersionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8890,6 +8976,10 @@ func (c *AppsServicesVersionsGetCall) Do(opts ...googleapi.CallOption) (*Version // "BASIC", // "FULL" // ], + // "enumDescriptions": [ + // "Basic version information including scaling and inbound services, but not detailed deployment information.", + // "The information from BASIC, plus detailed information about the deployment. This format is required when creating resources, but is not returned in Get or List by default." + // ], // "location": "query", // "type": "string" // } @@ -8945,8 +9035,11 @@ func (c *AppsServicesVersionsListCall) PageToken(pageToken string) *AppsServices // returned in the List response. // // Possible values: -// "BASIC" -// "FULL" +// "BASIC" - Basic version information including scaling and inbound +// services, but not detailed deployment information. +// "FULL" - The information from BASIC, plus detailed information +// about the deployment. This format is required when creating +// resources, but is not returned in Get or List by default. func (c *AppsServicesVersionsListCall) View(view string) *AppsServicesVersionsListCall { c.urlParams_.Set("view", view) return c @@ -8989,7 +9082,7 @@ func (c *AppsServicesVersionsListCall) Header() http.Header { func (c *AppsServicesVersionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9090,6 +9183,10 @@ func (c *AppsServicesVersionsListCall) Do(opts ...googleapi.CallOption) (*ListVe // "BASIC", // "FULL" // ], + // "enumDescriptions": [ + // "Basic version information including scaling and inbound services, but not detailed deployment information.", + // "The information from BASIC, plus detailed information about the deployment. This format is required when creating resources, but is not returned in Get or List by default." + // ], // "location": "query", // "type": "string" // } @@ -9143,59 +9240,53 @@ type AppsServicesVersionsPatchCall struct { // Patch: Updates the specified Version resource. You can specify the // following fields depending on the App Engine environment and type of -// scaling that the version resource uses:Standard -// environment +// scaling that the version resource uses:Standard environment // instance_class // (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/a // pps.services.versions#Version.FIELDS.instance_class)automatic scaling -// in the standard environment: -// automatic_scaling.min_idle_instances +// in the standard environment: automatic_scaling.min_idle_instances // (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/a // pps.services.versions#Version.FIELDS.automatic_scaling) -// automatic_scal -// ing.max_idle_instances +// automatic_scaling.max_idle_instances // (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/a // pps.services.versions#Version.FIELDS.automatic_scaling) -// automaticScali -// ng.standard_scheduler_settings.max_instances +// automaticScaling.standard_scheduler_settings.max_instances // (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/a // pps.services.versions#StandardSchedulerSettings) -// automaticScaling.stan -// dard_scheduler_settings.min_instances +// automaticScaling.standard_scheduler_settings.min_instances // (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/a // pps.services.versions#StandardSchedulerSettings) -// automaticScaling.stan -// dard_scheduler_settings.target_cpu_utilization +// automaticScaling.standard_scheduler_settings.target_cpu_utilization // (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/a // pps.services.versions#StandardSchedulerSettings) -// automaticScaling.stan -// dard_scheduler_settings.target_throughput_utilization +// automaticScaling.standard_scheduler_settings.target_throughput_utiliza +// tion // (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/a // pps.services.versions#StandardSchedulerSettings)basic scaling or -// manual scaling in the standard environment: -// serving_status +// manual scaling in the standard environment: serving_status +// (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/a +// pps.services.versions#Version.FIELDS.serving_status) +// manual_scaling.instances // (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/a -// pps.services.versions#Version.FIELDS.serving_status)Flexible -// environment +// pps.services.versions#manualscaling)Flexible environment // serving_status // (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/a // pps.services.versions#Version.FIELDS.serving_status)automatic scaling -// in the flexible environment: -// automatic_scaling.min_total_instances +// in the flexible environment: automatic_scaling.min_total_instances // (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/a // pps.services.versions#Version.FIELDS.automatic_scaling) -// automatic_scal -// ing.max_total_instances +// automatic_scaling.max_total_instances // (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/a // pps.services.versions#Version.FIELDS.automatic_scaling) -// automatic_scal -// ing.cool_down_period_sec +// automatic_scaling.cool_down_period_sec // (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/a // pps.services.versions#Version.FIELDS.automatic_scaling) -// automatic_scal -// ing.cpu_utilization.target_utilization +// automatic_scaling.cpu_utilization.target_utilization // (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/a -// pps.services.versions#Version.FIELDS.automatic_scaling) +// pps.services.versions#Version.FIELDS.automatic_scaling)manual scaling +// in the flexible environment: manual_scaling.instances +// (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/a +// pps.services.versions#manualscaling) func (r *AppsServicesVersionsService) Patch(appsId string, servicesId string, versionsId string, version *Version) *AppsServicesVersionsPatchCall { c := &AppsServicesVersionsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.appsId = appsId @@ -9239,7 +9330,7 @@ func (c *AppsServicesVersionsPatchCall) Header() http.Header { func (c *AppsServicesVersionsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9305,7 +9396,7 @@ func (c *AppsServicesVersionsPatchCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Updates the specified Version resource. You can specify the following fields depending on the App Engine environment and type of scaling that the version resource uses:Standard environment\ninstance_class (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.instance_class)automatic scaling in the standard environment:\nautomatic_scaling.min_idle_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling)\nautomatic_scaling.max_idle_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling)\nautomaticScaling.standard_scheduler_settings.max_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#StandardSchedulerSettings)\nautomaticScaling.standard_scheduler_settings.min_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#StandardSchedulerSettings)\nautomaticScaling.standard_scheduler_settings.target_cpu_utilization (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#StandardSchedulerSettings)\nautomaticScaling.standard_scheduler_settings.target_throughput_utilization (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#StandardSchedulerSettings)basic scaling or manual scaling in the standard environment:\nserving_status (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.serving_status)Flexible environment\nserving_status (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.serving_status)automatic scaling in the flexible environment:\nautomatic_scaling.min_total_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling)\nautomatic_scaling.max_total_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling)\nautomatic_scaling.cool_down_period_sec (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling)\nautomatic_scaling.cpu_utilization.target_utilization (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling)", + // "description": "Updates the specified Version resource. You can specify the following fields depending on the App Engine environment and type of scaling that the version resource uses:Standard environment instance_class (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.instance_class)automatic scaling in the standard environment: automatic_scaling.min_idle_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling) automatic_scaling.max_idle_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling) automaticScaling.standard_scheduler_settings.max_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#StandardSchedulerSettings) automaticScaling.standard_scheduler_settings.min_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#StandardSchedulerSettings) automaticScaling.standard_scheduler_settings.target_cpu_utilization (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#StandardSchedulerSettings) automaticScaling.standard_scheduler_settings.target_throughput_utilization (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#StandardSchedulerSettings)basic scaling or manual scaling in the standard environment: serving_status (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.serving_status) manual_scaling.instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#manualscaling)Flexible environment serving_status (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.serving_status)automatic scaling in the flexible environment: automatic_scaling.min_total_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling) automatic_scaling.max_total_instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling) automatic_scaling.cool_down_period_sec (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling) automatic_scaling.cpu_utilization.target_utilization (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling)manual scaling in the flexible environment: manual_scaling.instances (https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#manualscaling)", // "flatPath": "v1/apps/{appsId}/services/{servicesId}/versions/{versionsId}", // "httpMethod": "PATCH", // "id": "appengine.apps.services.versions.patch", @@ -9412,7 +9503,7 @@ func (c *AppsServicesVersionsInstancesDebugCall) Header() http.Header { func (c *AppsServicesVersionsInstancesDebugCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9590,7 +9681,7 @@ func (c *AppsServicesVersionsInstancesDeleteCall) Header() http.Header { func (c *AppsServicesVersionsInstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9760,7 +9851,7 @@ func (c *AppsServicesVersionsInstancesGetCall) Header() http.Header { func (c *AppsServicesVersionsInstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9950,7 +10041,7 @@ func (c *AppsServicesVersionsInstancesListCall) Header() http.Header { func (c *AppsServicesVersionsInstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/api/bigquery/v2/bigquery-api.json b/vendor/google.golang.org/api/bigquery/v2/bigquery-api.json index 680027ae9fb..3cd3fbbfd44 100644 --- a/vendor/google.golang.org/api/bigquery/v2/bigquery-api.json +++ b/vendor/google.golang.org/api/bigquery/v2/bigquery-api.json @@ -691,7 +691,7 @@ ] }, "list": { - "description": "Lists all models in the specified dataset. Requires the READER dataset\nrole.", + "description": "Lists all models in the specified dataset. Requires the READER dataset role.", "flatPath": "projects/{projectsId}/datasets/{datasetsId}/models", "httpMethod": "GET", "id": "bigquery.models.list", @@ -708,13 +708,13 @@ "type": "string" }, "maxResults": { - "description": "The maximum number of results to return in a single response page.\nLeverage the page tokens to iterate through the entire collection.", + "description": "The maximum number of results to return in a single response page. Leverage the page tokens to iterate through the entire collection.", "format": "uint32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Page token, returned by a previous call to request the next page of\nresults", + "description": "Page token, returned by a previous call to request the next page of results", "location": "query", "type": "string" }, @@ -909,7 +909,7 @@ "type": "string" }, "readMask": { - "description": "If set, only the Routine fields in the field mask are returned in the\nresponse. If unset, all Routine fields are returned.", + "description": "If set, only the Routine fields in the field mask are returned in the response. If unset, all Routine fields are returned.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -971,7 +971,7 @@ ] }, "list": { - "description": "Lists all routines in the specified dataset. Requires the READER dataset\nrole.", + "description": "Lists all routines in the specified dataset. Requires the READER dataset role.", "flatPath": "projects/{projectsId}/datasets/{datasetsId}/routines", "httpMethod": "GET", "id": "bigquery.routines.list", @@ -988,18 +988,18 @@ "type": "string" }, "filter": { - "description": "If set, then only the Routines matching this filter are returned.\nThe current supported form is either \"routine_type:\u003cRoutineType\u003e\" or\n\"routineType:\u003cRoutineType\u003e\", where \u003cRoutineType\u003e is a RoutineType enum.\nExample: \"routineType:SCALAR_FUNCTION\".", + "description": "If set, then only the Routines matching this filter are returned. The current supported form is either \"routine_type:\" or \"routineType:\", where is a RoutineType enum. Example: \"routineType:SCALAR_FUNCTION\".", "location": "query", "type": "string" }, "maxResults": { - "description": "The maximum number of results to return in a single response page.\nLeverage the page tokens to iterate through the entire collection.", + "description": "The maximum number of results to return in a single response page. Leverage the page tokens to iterate through the entire collection.", "format": "uint32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Page token, returned by a previous call, to request the next page of\nresults", + "description": "Page token, returned by a previous call, to request the next page of results", "location": "query", "type": "string" }, @@ -1011,7 +1011,7 @@ "type": "string" }, "readMask": { - "description": "If set, then only the Routine fields in the field mask, as well as\nproject_id, dataset_id and routine_id, are returned in the response.\nIf unset, then the following Routine fields are returned:\netag, project_id, dataset_id, routine_id, routine_type, creation_time,\nlast_modified_time, and language.", + "description": "If set, then only the Routine fields in the field mask, as well as project_id, dataset_id and routine_id, are returned in the response. If unset, then the following Routine fields are returned: etag, project_id, dataset_id, routine_id, routine_type, creation_time, last_modified_time, and language.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -1029,7 +1029,7 @@ ] }, "update": { - "description": "Updates information in an existing routine. The update method replaces the\nentire Routine resource.", + "description": "Updates information in an existing routine. The update method replaces the entire Routine resource.", "flatPath": "projects/{projectsId}/datasets/{datasetsId}/routines/{routinesId}", "httpMethod": "PUT", "id": "bigquery.routines.update", @@ -1266,7 +1266,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", "flatPath": "projects/{projectsId}/datasets/{datasetsId}/tables/{tablesId}:getIamPolicy", "httpMethod": "POST", "id": "bigquery.tables.getIamPolicy", @@ -1275,7 +1275,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/datasets/[^/]+/tables/[^/]+$", "required": true, @@ -1416,7 +1416,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", "flatPath": "projects/{projectsId}/datasets/{datasetsId}/tables/{tablesId}:setIamPolicy", "httpMethod": "POST", "id": "bigquery.tables.setIamPolicy", @@ -1425,7 +1425,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/datasets/[^/]+/tables/[^/]+$", "required": true, @@ -1445,7 +1445,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", "flatPath": "projects/{projectsId}/datasets/{datasetsId}/tables/{tablesId}:testIamPermissions", "httpMethod": "POST", "id": "bigquery.tables.testIamPermissions", @@ -1454,7 +1454,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/datasets/[^/]+/tables/[^/]+$", "required": true, @@ -1519,20 +1519,20 @@ } } }, - "revision": "20200429", + "revision": "20200925", "rootUrl": "https://bigquery.googleapis.com/", "schemas": { "AggregateClassificationMetrics": { - "description": "Aggregate metrics for classification/classifier models. For multi-class\nmodels, the metrics are either macro-averaged or micro-averaged. When\nmacro-averaged, the metrics are calculated for each label and then an\nunweighted average is taken of those values. When micro-averaged, the\nmetric is calculated globally by counting the total number of correctly\npredicted rows.", + "description": "Aggregate metrics for classification/classifier models. For multi-class models, the metrics are either macro-averaged or micro-averaged. When macro-averaged, the metrics are calculated for each label and then an unweighted average is taken of those values. When micro-averaged, the metric is calculated globally by counting the total number of correctly predicted rows.", "id": "AggregateClassificationMetrics", "properties": { "accuracy": { - "description": "Accuracy is the fraction of predictions given the correct label. For\nmulticlass this is a micro-averaged metric.", + "description": "Accuracy is the fraction of predictions given the correct label. For multiclass this is a micro-averaged metric.", "format": "double", "type": "number" }, "f1Score": { - "description": "The F1 score is an average of recall and precision. For multiclass\nthis is a macro-averaged metric.", + "description": "The F1 score is an average of recall and precision. For multiclass this is a macro-averaged metric.", "format": "double", "type": "number" }, @@ -1542,22 +1542,22 @@ "type": "number" }, "precision": { - "description": "Precision is the fraction of actual positive predictions that had\npositive actual labels. For multiclass this is a macro-averaged\nmetric treating each class as a binary classifier.", + "description": "Precision is the fraction of actual positive predictions that had positive actual labels. For multiclass this is a macro-averaged metric treating each class as a binary classifier.", "format": "double", "type": "number" }, "recall": { - "description": "Recall is the fraction of actual positive labels that were given a\npositive prediction. For multiclass this is a macro-averaged metric.", + "description": "Recall is the fraction of actual positive labels that were given a positive prediction. For multiclass this is a macro-averaged metric.", "format": "double", "type": "number" }, "rocAuc": { - "description": "Area Under a ROC Curve. For multiclass this is a macro-averaged\nmetric.", + "description": "Area Under a ROC Curve. For multiclass this is a macro-averaged metric.", "format": "double", "type": "number" }, "threshold": { - "description": "Threshold at which the metrics are computed. For binary\nclassification models this is the positive class threshold.\nFor multi-class classfication models this is the confidence\nthreshold.", + "description": "Threshold at which the metrics are computed. For binary classification models this is the positive class threshold. For multi-class classfication models this is the confidence threshold.", "format": "double", "type": "number" } @@ -1577,8 +1577,8 @@ ], "enumDescriptions": [ "", - "The argument is a variable with fully specified type, which can be a\nstruct or an array, but not a table.", - "The argument is any type, including struct or array, but not a table.\nTo be added: FIXED_TABLE, ANY_TABLE" + "The argument is a variable with fully specified type, which can be a struct or an array, but not a table.", + "The argument is any type, including struct or array, but not a table. To be added: FIXED_TABLE, ANY_TABLE" ], "type": "string" }, @@ -1587,7 +1587,7 @@ "description": "Required unless argument_kind = ANY_TYPE." }, "mode": { - "description": "Optional. Specifies whether the argument is input or output.\nCan be set for procedures only.", + "description": "Optional. Specifies whether the argument is input or output. Can be set for procedures only.", "enum": [ "MODE_UNSPECIFIED", "IN", @@ -1659,6 +1659,73 @@ }, "type": "object" }, + "ArimaForecastingMetrics": { + "description": "Model evaluation metrics for ARIMA forecasting models.", + "id": "ArimaForecastingMetrics", + "properties": { + "arimaFittingMetrics": { + "description": "Arima model fitting metrics.", + "items": { + "$ref": "ArimaFittingMetrics" + }, + "type": "array" + }, + "arimaSingleModelForecastingMetrics": { + "description": "Repeated as there can be many metric sets (one for each model) in auto-arima and the large-scale case.", + "items": { + "$ref": "ArimaSingleModelForecastingMetrics" + }, + "type": "array" + }, + "hasDrift": { + "description": "Whether Arima model fitted with drift or not. It is always false when d is not 1.", + "items": { + "type": "boolean" + }, + "type": "array" + }, + "nonSeasonalOrder": { + "description": "Non-seasonal order.", + "items": { + "$ref": "ArimaOrder" + }, + "type": "array" + }, + "seasonalPeriods": { + "description": "Seasonal periods. Repeated because multiple periods are supported for one time series.", + "items": { + "enum": [ + "SEASONAL_PERIOD_TYPE_UNSPECIFIED", + "NO_SEASONALITY", + "DAILY", + "WEEKLY", + "MONTHLY", + "QUARTERLY", + "YEARLY" + ], + "enumDescriptions": [ + "", + "No seasonality", + "Daily period, 24 hours.", + "Weekly period, 7 days.", + "Monthly period, 30 days or irregular.", + "Quarterly period, 90 days or irregular.", + "Yearly period, 365 days or irregular." + ], + "type": "string" + }, + "type": "array" + }, + "timeSeriesId": { + "description": "Id to differentiate different time series for the large-scale case.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "ArimaModelInfo": { "description": "Arima model information.", "id": "ArimaModelInfo", @@ -1672,7 +1739,7 @@ "description": "Arima fitting metrics." }, "hasDrift": { - "description": "Whether Arima model fitted with drift or not. It is always false\nwhen d is not 1.", + "description": "Whether Arima model fitted with drift or not. It is always false when d is not 1.", "type": "boolean" }, "nonSeasonalOrder": { @@ -1680,16 +1747,7 @@ "description": "Non-seasonal order." }, "seasonalPeriods": { - "description": "Seasonal periods. Repeated because multiple periods are supported\nfor one time series.", - "enumDescriptions": [ - "", - "No seasonality", - "Daily period, 24 hours.", - "Weekly period, 7 days.", - "Monthly period, 30 days or irregular.", - "Quarterly period, 90 days or irregular.", - "Yearly period, 365 days or irregular." - ], + "description": "Seasonal periods. Repeated because multiple periods are supported for one time series.", "items": { "enum": [ "SEASONAL_PERIOD_TYPE_UNSPECIFIED", @@ -1700,6 +1758,15 @@ "QUARTERLY", "YEARLY" ], + "enumDescriptions": [ + "", + "No seasonality", + "Daily period, 24 hours.", + "Weekly period, 7 days.", + "Monthly period, 30 days or irregular.", + "Quarterly period, 90 days or irregular.", + "Yearly period, 365 days or irregular." + ], "type": "string" }, "type": "array" @@ -1734,27 +1801,18 @@ "type": "object" }, "ArimaResult": { - "description": "(Auto-)arima fitting result. Wrap everything in ArimaResult for easier\nrefactoring if we want to use model-specific iteration results.", + "description": "(Auto-)arima fitting result. Wrap everything in ArimaResult for easier refactoring if we want to use model-specific iteration results.", "id": "ArimaResult", "properties": { "arimaModelInfo": { - "description": "This message is repeated because there are multiple arima models\nfitted in auto-arima. For non-auto-arima model, its size is one.", + "description": "This message is repeated because there are multiple arima models fitted in auto-arima. For non-auto-arima model, its size is one.", "items": { "$ref": "ArimaModelInfo" }, "type": "array" }, "seasonalPeriods": { - "description": "Seasonal periods. Repeated because multiple periods are supported for\none time series.", - "enumDescriptions": [ - "", - "No seasonality", - "Daily period, 24 hours.", - "Weekly period, 7 days.", - "Monthly period, 30 days or irregular.", - "Quarterly period, 90 days or irregular.", - "Yearly period, 365 days or irregular." - ], + "description": "Seasonal periods. Repeated because multiple periods are supported for one time series.", "items": { "enum": [ "SEASONAL_PERIOD_TYPE_UNSPECIFIED", @@ -1765,6 +1823,15 @@ "QUARTERLY", "YEARLY" ], + "enumDescriptions": [ + "", + "No seasonality", + "Daily period, 24 hours.", + "Weekly period, 7 days.", + "Monthly period, 30 days or irregular.", + "Quarterly period, 90 days or irregular.", + "Yearly period, 365 days or irregular." + ], "type": "string" }, "type": "array" @@ -1772,8 +1839,56 @@ }, "type": "object" }, + "ArimaSingleModelForecastingMetrics": { + "description": "Model evaluation metrics for a single ARIMA forecasting model.", + "id": "ArimaSingleModelForecastingMetrics", + "properties": { + "arimaFittingMetrics": { + "$ref": "ArimaFittingMetrics", + "description": "Arima fitting metrics." + }, + "hasDrift": { + "description": "Is arima model fitted with drift or not. It is always false when d is not 1.", + "type": "boolean" + }, + "nonSeasonalOrder": { + "$ref": "ArimaOrder", + "description": "Non-seasonal order." + }, + "seasonalPeriods": { + "description": "Seasonal periods. Repeated because multiple periods are supported for one time series.", + "items": { + "enum": [ + "SEASONAL_PERIOD_TYPE_UNSPECIFIED", + "NO_SEASONALITY", + "DAILY", + "WEEKLY", + "MONTHLY", + "QUARTERLY", + "YEARLY" + ], + "enumDescriptions": [ + "", + "No seasonality", + "Daily period, 24 hours.", + "Weekly period, 7 days.", + "Monthly period, 30 days or irregular.", + "Quarterly period, 90 days or irregular.", + "Yearly period, 365 days or irregular." + ], + "type": "string" + }, + "type": "array" + }, + "timeSeriesId": { + "description": "The id to indicate different time series.", + "type": "string" + } + }, + "type": "object" + }, "AuditConfig": { - "description": "Specifies the audit configuration for a service.\nThe configuration determines which permission types are logged, and what\nidentities, if any, are exempted from logging.\nAn AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service,\nthe union of the two AuditConfigs is used for that service: the log_types\nspecified in each AuditConfig are enabled, and the exempted_members in each\nAuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n {\n \"audit_configs\": [\n {\n \"service\": \"allServices\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n },\n {\n \"log_type\": \"ADMIN_READ\",\n }\n ]\n },\n {\n \"service\": \"sampleservice.googleapis.com\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n },\n {\n \"log_type\": \"DATA_WRITE\",\n \"exempted_members\": [\n \"user:aliya@example.com\"\n ]\n }\n ]\n }\n ]\n }\n\nFor sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ\nlogging. It also exempts jose@example.com from DATA_READ logging, and\naliya@example.com from DATA_WRITE logging.", + "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { \"audit_configs\": [ { \"service\": \"allServices\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" }, { \"log_type\": \"ADMIN_READ\" } ] }, { \"service\": \"sampleservice.googleapis.com\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\" }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging.", "id": "AuditConfig", "properties": { "auditLogConfigs": { @@ -1784,18 +1899,18 @@ "type": "array" }, "service": { - "description": "Specifies a service that will be enabled for audit logging.\nFor example, `storage.googleapis.com`, `cloudsql.googleapis.com`.\n`allServices` is a special value that covers all services.", + "description": "Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.", "type": "string" } }, "type": "object" }, "AuditLogConfig": { - "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\njose@example.com from DATA_READ logging.", + "description": "Provides the configuration for logging a type of permissions. Example: { \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.", "id": "AuditLogConfig", "properties": { "exemptedMembers": { - "description": "Specifies the identities that do not cause logging for this type of\npermission.\nFollows the same format of Binding.members.", + "description": "Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.", "items": { "type": "string" }, @@ -1972,12 +2087,12 @@ "type": "number" }, "precision": { - "description": "The fraction of actual positive predictions that had positive actual\nlabels.", + "description": "The fraction of actual positive predictions that had positive actual labels.", "format": "double", "type": "number" }, "recall": { - "description": "The fraction of actual positive labels that were given a positive\nprediction.", + "description": "The fraction of actual positive labels that were given a positive prediction.", "format": "double", "type": "number" }, @@ -2000,17 +2115,17 @@ "properties": { "condition": { "$ref": "Expr", - "description": "The condition that is associated with this binding.\n\nIf the condition evaluates to `true`, then this binding applies to the\ncurrent request.\n\nIf the condition evaluates to `false`, then this binding does not apply to\nthe current request. However, a different role binding might grant the same\nrole to one or more of the members in this binding.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies)." + "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the members in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." }, "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@example.com` .\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a user that has been recently deleted. For\n example, `alice@example.com?uid=123456789012345678901`. If the user is\n recovered, this value reverts to `user:{emailid}` and the recovered user\n retains the role in the binding.\n\n* `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus\n unique identifier) representing a service account that has been recently\n deleted. For example,\n `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.\n If the service account is undeleted, this value reverts to\n `serviceAccount:{emailid}` and the undeleted service account retains the\n role in the binding.\n\n* `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a Google group that has been recently\n deleted. For example, `admins@example.com?uid=123456789012345678901`. If\n the group is recovered, this value reverts to `group:{emailid}` and the\n recovered group retains the role in the binding.\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", "items": { "type": "string" }, "type": "array" }, "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.", + "description": "Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.", "type": "string" } }, @@ -2113,7 +2228,7 @@ "id": "CategoricalValue", "properties": { "categoryCounts": { - "description": "Counts of all categories for the categorical feature. If there are\nmore than ten categories, we return top ten (by count) and return\none more CategoryCount with category \"_OTHER_\" and count as\naggregate counts of remaining categories.", + "description": "Counts of all categories for the categorical feature. If there are more than ten categories, we return top ten (by count) and return one more CategoryCount with category \"_OTHER_\" and count as aggregate counts of remaining categories.", "items": { "$ref": "CategoryCount" }, @@ -2131,7 +2246,7 @@ "type": "string" }, "count": { - "description": "The count of training samples matching the category within the\ncluster.", + "description": "The count of training samples matching the category within the cluster.", "format": "int64", "type": "string" } @@ -2172,7 +2287,7 @@ "type": "string" }, "clusterRadius": { - "description": "Cluster radius, the average distance from centroid\nto each point assigned to the cluster.", + "description": "Cluster radius, the average distance from centroid to each point assigned to the cluster.", "format": "double", "type": "number" }, @@ -2226,7 +2341,7 @@ "id": "ConfusionMatrix", "properties": { "confidenceThreshold": { - "description": "Confidence threshold used when computing the entries of the\nconfusion matrix.", + "description": "Confidence threshold used when computing the entries of the confusion matrix.", "format": "double", "type": "number" }, @@ -2288,7 +2403,7 @@ "type": "object" }, "DataSplitResult": { - "description": "Data split result. This contains references to the training and evaluation\ndata tables that were used to train the model.", + "description": "Data split result. This contains references to the training and evaluation data tables that were used to train the model.", "id": "DataSplitResult", "properties": { "evaluationTable": { @@ -2401,6 +2516,10 @@ "description": "The geographic location where the dataset should reside. The default value is US. See details at https://cloud.google.com/bigquery/docs/locations.", "type": "string" }, + "satisfiesPZS": { + "description": "[Output-only] Reserved for future use.", + "type": "boolean" + }, "selfLink": { "description": "[Output-only] A URL that can be used to access the resource again. You can use this URL in Get or Update requests to the resource.", "type": "string" @@ -2529,7 +2648,7 @@ "type": "string" }, "predictedLabel": { - "description": "The predicted label. For confidence_threshold \u003e 0, we will\nalso add an entry indicating the number of items under the\nconfidence threshold.", + "description": "The predicted label. For confidence_threshold \u003e 0, we will also add an entry indicating the number of items under the confidence threshold.", "type": "string" } }, @@ -2558,9 +2677,13 @@ "type": "object" }, "EvaluationMetrics": { - "description": "Evaluation metrics of a model. These are either computed on all training\ndata or just the eval data based on whether eval data was used during\ntraining. These are not present for imported models.", + "description": "Evaluation metrics of a model. These are either computed on all training data or just the eval data based on whether eval data was used during training. These are not present for imported models.", "id": "EvaluationMetrics", "properties": { + "arimaForecastingMetrics": { + "$ref": "ArimaForecastingMetrics", + "description": "Populated for ARIMA models." + }, "binaryClassificationMetrics": { "$ref": "BinaryClassificationMetrics", "description": "Populated for binary classification/classifier models." @@ -2575,11 +2698,11 @@ }, "rankingMetrics": { "$ref": "RankingMetrics", - "description": "[Alpha] Populated for implicit feedback type matrix factorization\nmodels." + "description": "Populated for implicit feedback type matrix factorization models." }, "regressionMetrics": { "$ref": "RegressionMetrics", - "description": "Populated for regression models and explicit feedback type matrix\nfactorization models." + "description": "Populated for regression models and explicit feedback type matrix factorization models." } }, "type": "object" @@ -2761,23 +2884,23 @@ "type": "object" }, "Expr": { - "description": "Represents a textual expression in the Common Expression Language (CEL)\nsyntax. CEL is a C-like expression language. The syntax and semantics of CEL\nare documented at https://github.com/google/cel-spec.\n\nExample (Comparison):\n\n title: \"Summary size limit\"\n description: \"Determines if a summary is less than 100 chars\"\n expression: \"document.summary.size() \u003c 100\"\n\nExample (Equality):\n\n title: \"Requestor is owner\"\n description: \"Determines if requestor is the document owner\"\n expression: \"document.owner == request.auth.claims.email\"\n\nExample (Logic):\n\n title: \"Public documents\"\n description: \"Determine whether the document should be publicly visible\"\n expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\"\n\nExample (Data Manipulation):\n\n title: \"Notification string\"\n description: \"Create a notification string with a timestamp.\"\n expression: \"'New message received at ' + string(document.create_time)\"\n\nThe exact variables and functions that may be referenced within an expression\nare determined by the service that evaluates it. See the service\ndocumentation for additional information.", + "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() \u003c 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", "id": "Expr", "properties": { "description": { - "description": "Optional. Description of the expression. This is a longer text which\ndescribes the expression, e.g. when hovered over it in a UI.", + "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", "type": "string" }, "expression": { - "description": "Textual representation of an expression in Common Expression Language\nsyntax.", + "description": "Textual representation of an expression in Common Expression Language syntax.", "type": "string" }, "location": { - "description": "Optional. String indicating the location of the expression for error\nreporting, e.g. a file name and a position in the file.", + "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", "type": "string" }, "title": { - "description": "Optional. Title for the expression, i.e. a short string describing\nits purpose. This can be used e.g. in UIs which allow to enter the\nexpression.", + "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", "type": "string" } }, @@ -2798,6 +2921,10 @@ "description": "[Optional] The compression type of the data source. Possible values include GZIP and NONE. The default value is NONE. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats.", "type": "string" }, + "connectionId": { + "description": "[Optional, Trusted Tester] Connection for external data source.", + "type": "string" + }, "csvOptions": { "$ref": "CsvOptions", "description": "Additional properties to set if sourceFormat is set to CSV." @@ -2850,7 +2977,7 @@ "type": "string" }, "numericalValue": { - "description": "The numerical feature value. This is the centroid value for this\nfeature.", + "description": "The numerical feature value. This is the centroid value for this feature.", "format": "double", "type": "number" } @@ -2863,7 +2990,7 @@ "properties": { "options": { "$ref": "GetPolicyOptions", - "description": "OPTIONAL: A `GetPolicyOptions` object for specifying options to\n`GetIamPolicy`." + "description": "OPTIONAL: A `GetPolicyOptions` object for specifying options to `GetIamPolicy`." } }, "type": "object" @@ -2873,7 +3000,7 @@ "id": "GetPolicyOptions", "properties": { "requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } @@ -2978,11 +3105,15 @@ "id": "HivePartitioningOptions", "properties": { "mode": { - "description": "[Optional, Trusted Tester] When set, what mode of hive partitioning to use when reading data. Two modes are supported. (1) AUTO: automatically infer partition key name(s) and type(s). (2) STRINGS: automatically infer partition key name(s). All types are interpreted as strings. Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported types include: AVRO, CSV, JSON, ORC and Parquet.", + "description": "[Optional] When set, what mode of hive partitioning to use when reading data. The following modes are supported. (1) AUTO: automatically infer partition key name(s) and type(s). (2) STRINGS: automatically infer partition key name(s). All types are interpreted as strings. (3) CUSTOM: partition key schema is encoded in the source URI prefix. Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported types include: AVRO, CSV, JSON, ORC and Parquet.", "type": "string" }, + "requirePartitionFilter": { + "description": "[Optional] If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. Note that this field should only be true when creating a permanent external table or querying a temporary external table. Hive-partitioned loads with requirePartitionFilter explicitly set to true will fail.", + "type": "boolean" + }, "sourceUriPrefix": { - "description": "[Optional, Trusted Tester] When hive partition detection is requested, a common prefix for all source uris should be supplied. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/ (trailing slash does not matter).", + "description": "[Optional] When hive partition detection is requested, a common prefix for all source uris should be supplied. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/ (trailing slash does not matter).", "type": "string" } }, @@ -3138,7 +3269,7 @@ "type": "string" }, "destinationFormat": { - "description": "[Optional] The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON or AVRO for tables and ML_TF_SAVED_MODEL or ML_XGBOOST_BOOSTER for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is ML_TF_SAVED_MODEL.", + "description": "[Optional] The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON, PARQUET or AVRO for tables and ML_TF_SAVED_MODEL or ML_XGBOOST_BOOSTER for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is ML_TF_SAVED_MODEL.", "type": "string" }, "destinationUri": { @@ -3199,6 +3330,13 @@ "description": "[Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion.", "type": "string" }, + "decimalTargetTypes": { + "description": "[Trusted Tester] Defines the list of possible SQL data types to which the source decimal values are converted. This list and the precision and the scale parameters of the decimal field determine the target type. In the order of NUMERIC, BIGNUMERIC, and STRING, a type is picked if it is in the specified list and if it supports the precision and the scale. STRING supports all precision and scale values. If none of the listed types supports the precision and the scale, the type supporting the widest range in the specified list is picked, and if a value exceeds the supported range when reading the data, an error will be thrown. For example: suppose decimal_target_type = [\"NUMERIC\", \"BIGNUMERIC\"]. Then if (precision,scale) is: * (38,9) -\u003e NUMERIC; * (39,9) -\u003e BIGNUMERIC (NUMERIC cannot hold 30 integer digits); * (38,10) -\u003e BIGNUMERIC (NUMERIC cannot hold 10 fractional digits); * (76,38) -\u003e BIGNUMERIC; * (77,38) -\u003e BIGNUMERIC (error if value exeeds supported range). For duplicated types in this field, only one will be considered and the rest will be ignored. The order of the types in this field is ignored. For example, [\"BIGNUMERIC\", \"NUMERIC\"] is the same as [\"NUMERIC\", \"BIGNUMERIC\"] and NUMERIC always takes precedence over BIGNUMERIC.", + "items": { + "type": "string" + }, + "type": "array" + }, "destinationEncryptionConfiguration": { "$ref": "EncryptionConfiguration", "description": "Custom encryption configuration (e.g., Cloud KMS keys)." @@ -3434,10 +3572,18 @@ "$ref": "EncryptionConfiguration", "description": "Custom encryption configuration (e.g., Cloud KMS keys)." }, + "destinationExpirationTime": { + "description": "[Optional] The time when the destination table expires. Expired tables will be deleted and their storage reclaimed.", + "type": "any" + }, "destinationTable": { "$ref": "TableReference", "description": "[Required] The destination table" }, + "operationType": { + "description": "[Optional] Supported operation types in table copy job.", + "type": "string" + }, "sourceTable": { "$ref": "TableReference", "description": "[Pick one] Source table to copy." @@ -3639,6 +3785,10 @@ "description": "[Output-only] Slot-milliseconds for the job.", "format": "int64", "type": "string" + }, + "transactionInfoTemplate": { + "$ref": "TransactionInfo", + "description": "[Output-only] [Alpha] Information of the multi-statement transaction if this job is part of one." } }, "type": "object" @@ -3875,7 +4025,7 @@ "id": "ListModelsResponse", "properties": { "models": { - "description": "Models in the requested dataset. Only the following fields are populated:\nmodel_reference, model_type, creation_time, last_modified_time and\nlabels.", + "description": "Models in the requested dataset. Only the following fields are populated: model_reference, model_type, creation_time, last_modified_time and labels.", "items": { "$ref": "Model" }, @@ -3896,7 +4046,7 @@ "type": "string" }, "routines": { - "description": "Routines in the requested dataset. Unless read_mask is set in the request,\nonly the following fields are populated:\netag, project_id, dataset_id, routine_id, routine_type, creation_time,\nlast_modified_time, and language.", + "description": "Routines in the requested dataset. Unless read_mask is set in the request, only the following fields are populated: etag, project_id, dataset_id, routine_id, routine_type, creation_time, last_modified_time, and language.", "items": { "$ref": "Routine" }, @@ -3906,11 +4056,11 @@ "type": "object" }, "LocationMetadata": { - "description": "BigQuery-specific metadata about a location. This will be set on\ngoogle.cloud.location.Location.metadata in Cloud Location API\nresponses.", + "description": "BigQuery-specific metadata about a location. This will be set on google.cloud.location.Location.metadata in Cloud Location API responses.", "id": "LocationMetadata", "properties": { "legacyLocationId": { - "description": "The legacy BigQuery location ID, e.g. “EU” for the “europe” location.\nThis is for any API consumers that need the legacy “US” and “EU” locations.", + "description": "The legacy BigQuery location ID, e.g. “EU” for the “europe” location. This is for any API consumers that need the legacy “US” and “EU” locations.", "type": "string" } }, @@ -3946,6 +4096,7 @@ "creationTime": { "description": "Output only. The time when this model was created, in millisecs since the epoch.", "format": "int64", + "readOnly": true, "type": "string" }, "description": { @@ -3954,14 +4105,15 @@ }, "encryptionConfiguration": { "$ref": "EncryptionConfiguration", - "description": "Custom encryption configuration (e.g., Cloud KMS keys). This shows the\nencryption configuration of the model data while stored in BigQuery\nstorage. This field can be used with PatchModel to update encryption key\nfor an already encrypted model." + "description": "Custom encryption configuration (e.g., Cloud KMS keys). This shows the encryption configuration of the model data while stored in BigQuery storage. This field can be used with PatchModel to update encryption key for an already encrypted model." }, "etag": { "description": "Output only. A hash of this resource.", + "readOnly": true, "type": "string" }, "expirationTime": { - "description": "Optional. The time when this model expires, in milliseconds since the epoch.\nIf not present, the model will persist indefinitely. Expired models\nwill be deleted and their storage reclaimed. The defaultTableExpirationMs\nproperty of the encapsulating dataset can be used to set a default\nexpirationTime on newly created models.", + "description": "Optional. The time when this model expires, in milliseconds since the epoch. If not present, the model will persist indefinitely. Expired models will be deleted and their storage reclaimed. The defaultTableExpirationMs property of the encapsulating dataset can be used to set a default expirationTime on newly created models.", "format": "int64", "type": "string" }, @@ -3970,6 +4122,7 @@ "items": { "$ref": "StandardSqlField" }, + "readOnly": true, "type": "array" }, "friendlyName": { @@ -3977,26 +4130,29 @@ "type": "string" }, "labelColumns": { - "description": "Output only. Label columns that were used to train this model.\nThe output of the model will have a \"predicted_\" prefix to these columns.", + "description": "Output only. Label columns that were used to train this model. The output of the model will have a \"predicted_\" prefix to these columns.", "items": { "$ref": "StandardSqlField" }, + "readOnly": true, "type": "array" }, "labels": { "additionalProperties": { "type": "string" }, - "description": "The labels associated with this model. You can use these to organize\nand group your models. Label keys and values can be no longer\nthan 63 characters, can only contain lowercase letters, numeric\ncharacters, underscores and dashes. International characters are allowed.\nLabel values are optional. Label keys must start with a letter and each\nlabel in the list must have a different key.", + "description": "The labels associated with this model. You can use these to organize and group your models. Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. Label values are optional. Label keys must start with a letter and each label in the list must have a different key.", "type": "object" }, "lastModifiedTime": { "description": "Output only. The time when this model was last modified, in millisecs since the epoch.", "format": "int64", + "readOnly": true, "type": "string" }, "location": { - "description": "Output only. The geographic location where the model resides. This value\nis inherited from the dataset.", + "description": "Output only. The geographic location where the model resides. This value is inherited from the dataset.", + "readOnly": true, "type": "string" }, "modelReference": { @@ -4016,6 +4172,7 @@ "DNN_REGRESSOR", "BOOSTED_TREE_REGRESSOR", "BOOSTED_TREE_CLASSIFIER", + "ARIMA", "AUTOML_REGRESSOR", "AUTOML_CLASSIFIER" ], @@ -4025,14 +4182,16 @@ "Logistic regression based classification model.", "K-means clustering model.", "Matrix factorization model.", - "DNN classifier model.", + "[Beta] DNN classifier model.", "[Beta] An imported TensorFlow model.", - "DNN regressor model.", - "Boosted tree regressor model.", - "Boosted tree classifier model.", - "AutoML Tables regression model.", - "AutoML Tables classification model." + "[Beta] DNN regressor model.", + "[Beta] Boosted tree regressor model.", + "[Beta] Boosted tree classifier model.", + "[Beta] ARIMA model.", + "[Beta] AutoML Tables regression model.", + "[Beta] AutoML Tables classification model." ], + "readOnly": true, "type": "string" }, "trainingRuns": { @@ -4040,6 +4199,7 @@ "items": { "$ref": "TrainingRun" }, + "readOnly": true, "type": "array" } }, @@ -4113,7 +4273,7 @@ "type": "object" }, "Policy": { - "description": "An Identity and Access Management (IAM) policy, which specifies access\ncontrols for Google Cloud resources.\n\n\nA `Policy` is a collection of `bindings`. A `binding` binds one or more\n`members` to a single `role`. Members can be user accounts, service accounts,\nGoogle groups, and domains (such as G Suite). A `role` is a named list of\npermissions; each `role` can be an IAM predefined role or a user-created\ncustom role.\n\nFor some types of Google Cloud resources, a `binding` can also specify a\n`condition`, which is a logical expression that allows access to a resource\nonly if the expression evaluates to `true`. A condition can add constraints\nbased on attributes of the request, the resource, or both. To learn which\nresources support conditions in their IAM policies, see the\n[IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).\n\n**JSON example:**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/resourcemanager.organizationAdmin\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-project-id@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles/resourcemanager.organizationViewer\",\n \"members\": [\n \"user:eve@example.com\"\n ],\n \"condition\": {\n \"title\": \"expirable access\",\n \"description\": \"Does not grant access after Sep 2020\",\n \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\",\n }\n }\n ],\n \"etag\": \"BwWWja0YfJA=\",\n \"version\": 3\n }\n\n**YAML example:**\n\n bindings:\n - members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-project-id@appspot.gserviceaccount.com\n role: roles/resourcemanager.organizationAdmin\n - members:\n - user:eve@example.com\n role: roles/resourcemanager.organizationViewer\n condition:\n title: expirable access\n description: Does not grant access after Sep 2020\n expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\n - etag: BwWWja0YfJA=\n - version: 3\n\nFor a description of IAM and its features, see the\n[IAM documentation](https://cloud.google.com/iam/docs/).", + "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } **YAML example:** bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", "id": "Policy", "properties": { "auditConfigs": { @@ -4124,19 +4284,19 @@ "type": "array" }, "bindings": { - "description": "Associates a list of `members` to a `role`. Optionally, may specify a\n`condition` that determines how and when the `bindings` are applied. Each\nof the `bindings` must contain at least one member.", + "description": "Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member.", "items": { "$ref": "Binding" }, "type": "array" }, "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.", + "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.", "format": "byte", "type": "string" }, "version": { - "description": "Specifies the format of the policy.\n\nValid values are `0`, `1`, and `3`. Requests that specify an invalid value\nare rejected.\n\nAny operation that affects conditional role bindings must specify version\n`3`. This requirement applies to the following operations:\n\n* Getting a policy that includes a conditional role binding\n* Adding a conditional role binding to a policy\n* Changing a conditional role binding in a policy\n* Removing any role binding, with or without a condition, from a policy\n that includes conditions\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.\n\nIf a policy does not include any conditions, operations on that policy may\nspecify any valid version or leave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } @@ -4308,6 +4468,13 @@ "description": "The resource type of the request.", "type": "string" }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "The labels associated with this job. You can use these to organize and group your jobs. Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. Label values are optional. Label keys must start with a letter and each label in the list must have a different key.", + "type": "object" + }, "location": { "description": "The geographic location where the job should run. See details at https://cloud.google.com/bigquery/docs/locations#specifying_your_location.", "type": "string" @@ -4317,6 +4484,11 @@ "format": "uint32", "type": "integer" }, + "maximumBytesBilled": { + "description": "[Optional] Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.", + "format": "int64", + "type": "string" + }, "parameterMode": { "description": "Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.", "type": "string" @@ -4341,6 +4513,10 @@ }, "type": "array" }, + "requestId": { + "description": "A unique user provided identifier to ensure idempotent behavior for queries. Note that this is different from the job_id. It has the following properties: 1. It is case-sensitive, limited to up to 36 ASCII characters. A UUID is recommended. 2. Read only queries can ignore this token since they are nullipotent by definition. 3. For the purposes of idempotency ensured by the request_id, a request is considered duplicate of another only if they have the same request_id and are actually duplicates. When determining whether a request is a duplicate of the previous request, all parameters in the request that may affect the behavior are considered. For example, query, connection_properties, query_parameters, use_legacy_sql are parameters that affect the result and are considered when determining whether a request is a duplicate, but properties like timeout_ms don't affect the result and are thus not considered. Dry run query requests are never considered duplicate of another request. 4. When a duplicate mutating query request is detected, it returns: a. the results of the mutation if it completes successfully within the timeout. b. the running operation if it is still in progress at the end of the timeout. 5. Its lifetime is limited to 15 minutes. In other words, if two requests are sent with the same request_id, but more than 15 minutes apart, idempotency is not guaranteed.", + "type": "string" + }, "timeoutMs": { "description": "[Optional] How long to wait for the query to complete, in milliseconds, before the request times out and returns. Note that this is only a timeout for the request, not the query. If the query takes longer to run than the timeout value, the call returns without any results and with the 'jobComplete' flag set to false. You can call GetQueryResults() to wait for the query to complete and read the results. The default value is 10000 milliseconds (10 seconds).", "format": "uint32", @@ -4482,26 +4658,26 @@ "type": "object" }, "RankingMetrics": { - "description": "Evaluation metrics used by weighted-ALS models specified by\nfeedback_type=implicit.", + "description": "Evaluation metrics used by weighted-ALS models specified by feedback_type=implicit.", "id": "RankingMetrics", "properties": { "averageRank": { - "description": "Determines the goodness of a ranking by computing the percentile rank\nfrom the predicted confidence and dividing it by the original rank.", + "description": "Determines the goodness of a ranking by computing the percentile rank from the predicted confidence and dividing it by the original rank.", "format": "double", "type": "number" }, "meanAveragePrecision": { - "description": "Calculates a precision per user for all the items by ranking them and\nthen averages all the precisions across all the users.", + "description": "Calculates a precision per user for all the items by ranking them and then averages all the precisions across all the users.", "format": "double", "type": "number" }, "meanSquaredError": { - "description": "Similar to the mean squared error computed in regression and explicit\nrecommendation models except instead of computing the rating directly,\nthe output from evaluate is computed against a preference which is 1 or 0\ndepending on if the rating exists or not.", + "description": "Similar to the mean squared error computed in regression and explicit recommendation models except instead of computing the rating directly, the output from evaluate is computed against a preference which is 1 or 0 depending on if the rating exists or not.", "format": "double", "type": "number" }, "normalizedDiscountedCumulativeGain": { - "description": "A metric to determine the goodness of a ranking calculated from the\npredicted confidence by comparing it to an ideal rank measured by the\noriginal ratings.", + "description": "A metric to determine the goodness of a ranking calculated from the predicted confidence by comparing it to an ideal rank measured by the original ratings.", "format": "double", "type": "number" } @@ -4509,7 +4685,7 @@ "type": "object" }, "RegressionMetrics": { - "description": "Evaluation metrics for regression and explicit feedback type matrix\nfactorization models.", + "description": "Evaluation metrics for regression and explicit feedback type matrix factorization models.", "id": "RegressionMetrics", "properties": { "meanAbsoluteError": { @@ -4552,24 +4728,40 @@ "type": "array" }, "creationTime": { - "description": "Output only. The time when this routine was created, in milliseconds since\nthe epoch.", + "description": "Output only. The time when this routine was created, in milliseconds since the epoch.", "format": "int64", + "readOnly": true, "type": "string" }, "definitionBody": { - "description": "Required. The body of the routine.\n\nFor functions, this is the expression in the AS clause.\n\nIf language=SQL, it is the substring inside (but excluding) the\nparentheses. For example, for the function created with the following\nstatement:\n\n`CREATE FUNCTION JoinLines(x string, y string) as (concat(x, \"\\n\", y))`\n\nThe definition_body is `concat(x, \"\\n\", y)` (\\n is not replaced with\nlinebreak).\n\nIf language=JAVASCRIPT, it is the evaluated string in the AS clause.\nFor example, for the function created with the following statement:\n\n`CREATE FUNCTION f() RETURNS STRING LANGUAGE js AS 'return \"\\n\";\\n'`\n\nThe definition_body is\n\n`return \"\\n\";\\n`\n\nNote that both \\n are replaced with linebreaks.", + "description": "Required. The body of the routine. For functions, this is the expression in the AS clause. If language=SQL, it is the substring inside (but excluding) the parentheses. For example, for the function created with the following statement: `CREATE FUNCTION JoinLines(x string, y string) as (concat(x, \"\\n\", y))` The definition_body is `concat(x, \"\\n\", y)` (\\n is not replaced with linebreak). If language=JAVASCRIPT, it is the evaluated string in the AS clause. For example, for the function created with the following statement: `CREATE FUNCTION f() RETURNS STRING LANGUAGE js AS 'return \"\\n\";\\n'` The definition_body is `return \"\\n\";\\n` Note that both \\n are replaced with linebreaks.", "type": "string" }, "description": { "description": "Optional. [Experimental] The description of the routine if defined.", "type": "string" }, + "determinismLevel": { + "description": "Optional. [Experimental] The determinism level of the JavaScript UDF if defined.", + "enum": [ + "DETERMINISM_LEVEL_UNSPECIFIED", + "DETERMINISTIC", + "NOT_DETERMINISTIC" + ], + "enumDescriptions": [ + "The determinism of the UDF is unspecified.", + "The UDF is deterministic, meaning that 2 function calls with the same inputs always produce the same result, even across 2 query runs.", + "The UDF is not deterministic." + ], + "type": "string" + }, "etag": { "description": "Output only. A hash of this resource.", + "readOnly": true, "type": "string" }, "importedLibraries": { - "description": "Optional. If language = \"JAVASCRIPT\", this field stores the path of the\nimported JAVASCRIPT libraries.", + "description": "Optional. If language = \"JAVASCRIPT\", this field stores the path of the imported JAVASCRIPT libraries.", "items": { "type": "string" }, @@ -4590,13 +4782,14 @@ "type": "string" }, "lastModifiedTime": { - "description": "Output only. The time when this routine was last modified, in milliseconds\nsince the epoch.", + "description": "Output only. The time when this routine was last modified, in milliseconds since the epoch.", "format": "int64", + "readOnly": true, "type": "string" }, "returnType": { "$ref": "StandardSqlDataType", - "description": "Optional if language = \"SQL\"; required otherwise.\n\nIf absent, the return type is inferred from definition_body at query time\nin each query that references this routine. If present, then the evaluated\nresult will be cast to the specified returned type at query time.\n\nFor example, for the functions created with the following statements:\n\n* `CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + y);`\n\n* `CREATE FUNCTION Increment(x FLOAT64) AS (Add(x, 1));`\n\n* `CREATE FUNCTION Decrement(x FLOAT64) RETURNS FLOAT64 AS (Add(x, -1));`\n\nThe return_type is `{type_kind: \"FLOAT64\"}` for `Add` and `Decrement`, and\nis absent for `Increment` (inferred as FLOAT64 at query time).\n\nSuppose the function `Add` is replaced by\n `CREATE OR REPLACE FUNCTION Add(x INT64, y INT64) AS (x + y);`\n\nThen the inferred return type of `Increment` is automatically changed to\nINT64 at query time, while the return type of `Decrement` remains FLOAT64." + "description": "Optional if language = \"SQL\"; required otherwise. If absent, the return type is inferred from definition_body at query time in each query that references this routine. If present, then the evaluated result will be cast to the specified returned type at query time. For example, for the functions created with the following statements: * `CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + y);` * `CREATE FUNCTION Increment(x FLOAT64) AS (Add(x, 1));` * `CREATE FUNCTION Decrement(x FLOAT64) RETURNS FLOAT64 AS (Add(x, -1));` The return_type is `{type_kind: \"FLOAT64\"}` for `Add` and `Decrement`, and is absent for `Increment` (inferred as FLOAT64 at query time). Suppose the function `Add` is replaced by `CREATE OR REPLACE FUNCTION Add(x INT64, y INT64) AS (x + y);` Then the inferred return type of `Increment` is automatically changed to INT64 at query time, while the return type of `Decrement` remains FLOAT64." }, "routineReference": { "$ref": "RoutineReference", @@ -4744,18 +4937,33 @@ "properties": { "policy": { "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them." }, "updateMask": { - "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only\nthe fields in the mask will be modified. If no mask is provided, the\nfollowing default mask is used:\n\n`paths: \"bindings, etag\"`", + "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only the fields in the mask will be modified. If no mask is provided, the following default mask is used: `paths: \"bindings, etag\"`", "format": "google-fieldmask", "type": "string" } }, "type": "object" }, + "SnapshotDefinition": { + "id": "SnapshotDefinition", + "properties": { + "baseTableReference": { + "$ref": "TableReference", + "description": "[Required] Reference describing the ID of the table that is snapshotted." + }, + "snapshotTime": { + "description": "[Required] The time at which the base table was snapshot.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, "StandardSqlDataType": { - "description": "The type of a variable, e.g., a function argument.\nExamples:\nINT64: {type_kind=\"INT64\"}\nARRAY\u003cSTRING\u003e: {type_kind=\"ARRAY\", array_element_type=\"STRING\"}\nSTRUCT\u003cx STRING, y ARRAY\u003cDATE\u003e\u003e:\n {type_kind=\"STRUCT\",\n struct_type={fields=[\n {name=\"x\", type={type_kind=\"STRING\"}},\n {name=\"y\", type={type_kind=\"ARRAY\", array_element_type=\"DATE\"}}\n ]}}", + "description": "The type of a variable, e.g., a function argument. Examples: INT64: {type_kind=\"INT64\"} ARRAY: {type_kind=\"ARRAY\", array_element_type=\"STRING\"} STRUCT\u003e: {type_kind=\"STRUCT\", struct_type={fields=[ {name=\"x\", type={type_kind=\"STRING\"}}, {name=\"y\", type={type_kind=\"ARRAY\", array_element_type=\"DATE\"}} ]}}", "id": "StandardSqlDataType", "properties": { "arrayElementType": { @@ -4767,7 +4975,7 @@ "description": "The fields of this struct, in order, if type_kind = \"STRUCT\"." }, "typeKind": { - "description": "Required. The top level type of this field.\nCan be any standard SQL data type (e.g., \"INT64\", \"DATE\", \"ARRAY\").", + "description": "Required. The top level type of this field. Can be any standard SQL data type (e.g., \"INT64\", \"DATE\", \"ARRAY\").", "enum": [ "TYPE_KIND_UNSPECIFIED", "INT64", @@ -4781,6 +4989,7 @@ "DATETIME", "GEOGRAPHY", "NUMERIC", + "BIGNUMERIC", "ARRAY", "STRUCT" ], @@ -4791,14 +5000,15 @@ "Encoded as a number, or string \"NaN\", \"Infinity\" or \"-Infinity\".", "Encoded as a string value.", "Encoded as a base64 string per RFC 4648, section 4.", - "Encoded as an RFC 3339 timestamp with mandatory \"Z\" time zone string:\n1985-04-12T23:20:50.52Z", + "Encoded as an RFC 3339 timestamp with mandatory \"Z\" time zone string: 1985-04-12T23:20:50.52Z", "Encoded as RFC 3339 full-date format string: 1985-04-12", "Encoded as RFC 3339 partial-time format string: 23:20:50.52", "Encoded as RFC 3339 full-date \"T\" partial-time: 1985-04-12T23:20:50.52", "Encoded as WKT", "Encoded as a decimal string.", + "Encoded as a decimal string.", "Encoded as a list with types matching Type.array_type.", - "Encoded as a list with fields of type Type.struct_type[i]. List is used\nbecause a JSON object cannot have duplicate field names." + "Encoded as a list with fields of type Type.struct_type[i]. List is used because a JSON object cannot have duplicate field names." ], "type": "string" } @@ -4815,7 +5025,7 @@ }, "type": { "$ref": "StandardSqlDataType", - "description": "Optional. The type of this parameter. Absent if not explicitly\nspecified (e.g., CREATE FUNCTION statement can omit the return type;\nin this case the output parameter does not have this \"type\" field)." + "description": "Optional. The type of this parameter. Absent if not explicitly specified (e.g., CREATE FUNCTION statement can omit the return type; in this case the output parameter does not have this \"type\" field)." } }, "type": "object" @@ -4960,6 +5170,10 @@ "description": "[Output-only] A URL that can be used to access this resource again.", "type": "string" }, + "snapshotDefinition": { + "$ref": "SnapshotDefinition", + "description": "[Output-only] Snapshot definition." + }, "streamingBuffer": { "$ref": "Streamingbuffer", "description": "[Output-only] Contains information regarding this table's streaming buffer, if one is present. This field will be absent if the table is not being streamed to or if there is no data in the streaming buffer." @@ -4973,7 +5187,7 @@ "description": "Time-based partitioning specification for this table. Only one of timePartitioning and rangePartitioning should be specified." }, "type": { - "description": "[Output-only] Describes the table type. The following values are supported: TABLE: A normal BigQuery table. VIEW: A virtual table defined by a SQL query. [TrustedTester] MATERIALIZED_VIEW: SQL query whose result is persisted. EXTERNAL: A table that references data stored in an external storage system, such as Google Cloud Storage. The default value is TABLE.", + "description": "[Output-only] Describes the table type. The following values are supported: TABLE: A normal BigQuery table. VIEW: A virtual table defined by a SQL query. [TrustedTester] SNAPSHOT: An immutable, read-only table that is a copy of another table. [TrustedTester] MATERIALIZED_VIEW: SQL query whose result is persisted. EXTERNAL: A table that references data stored in an external storage system, such as Google Cloud Storage. The default value is TABLE.", "type": "string" }, "view": { @@ -5306,7 +5520,7 @@ "id": "TestIamPermissionsRequest", "properties": { "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "description": "The set of permissions to check for the `resource`. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", "items": { "type": "string" }, @@ -5320,7 +5534,7 @@ "id": "TestIamPermissionsResponse", "properties": { "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", "items": { "type": "string" }, @@ -5345,7 +5559,7 @@ "type": "boolean" }, "type": { - "description": "[Required] The only type supported is DAY, which will generate one partition per day.", + "description": "[Required] The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively. When the type is not specified, the default behavior is DAY.", "type": "string" } }, @@ -5354,17 +5568,50 @@ "TrainingOptions": { "id": "TrainingOptions", "properties": { + "autoArima": { + "description": "Whether to enable auto ARIMA or not.", + "type": "boolean" + }, + "autoArimaMaxOrder": { + "description": "The max value of non-seasonal p and q.", + "format": "int64", + "type": "string" + }, "batchSize": { "description": "Batch size for dnn models.", "format": "int64", "type": "string" }, + "dataFrequency": { + "description": "The data frequency of a time series.", + "enum": [ + "DATA_FREQUENCY_UNSPECIFIED", + "AUTO_FREQUENCY", + "YEARLY", + "QUARTERLY", + "MONTHLY", + "WEEKLY", + "DAILY", + "HOURLY" + ], + "enumDescriptions": [ + "", + "Automatically inferred from timestamps.", + "Yearly data.", + "Quarterly data.", + "Monthly data.", + "Weekly data.", + "Daily data.", + "Hourly data." + ], + "type": "string" + }, "dataSplitColumn": { - "description": "The column to split data with. This column won't be used as a\nfeature.\n1. When data_split_method is CUSTOM, the corresponding column should\nbe boolean. The rows with true value tag are eval data, and the false\nare training data.\n2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION\nrows (from smallest to largest) in the corresponding column are used\nas training data, and the rest are eval data. It respects the order\nin Orderable data types:\nhttps://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties", + "description": "The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties", "type": "string" }, "dataSplitEvalFraction": { - "description": "The fraction of evaluation data over the whole input data. The rest\nof data will be used as training data. The format should be double.\nAccurate to two decimal places.\nDefault value is 0.2.", + "description": "The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2.", "format": "double", "type": "number" }, @@ -5384,7 +5631,7 @@ "Splits data with the user provided tags.", "Splits data sequentially.", "Data split will be skipped.", - "Splits data automatically: Uses NO_SPLIT if the data size is small.\nOtherwise uses RANDOM." + "Splits data automatically: Uses NO_SPLIT if the data size is small. Otherwise uses RANDOM." ], "type": "string" }, @@ -5408,11 +5655,11 @@ "type": "number" }, "earlyStop": { - "description": "Whether to stop early when the loss doesn't improve significantly\nany more (compared to min_relative_progress). Used only for iterative\ntraining algorithms.", + "description": "Whether to stop early when the loss doesn't improve significantly any more (compared to min_relative_progress). Used only for iterative training algorithms.", "type": "boolean" }, "feedbackType": { - "description": "Feedback type that specifies which algorithm to run for matrix\nfactorization.", + "description": "Feedback type that specifies which algorithm to run for matrix factorization.", "enum": [ "FEEDBACK_TYPE_UNSPECIFIED", "IMPLICIT", @@ -5433,8 +5680,163 @@ }, "type": "array" }, + "holidayRegion": { + "description": "The geographical region based on which the holidays are considered in time series modeling. If a valid value is specified, then holiday effects modeling is enabled.", + "enum": [ + "HOLIDAY_REGION_UNSPECIFIED", + "GLOBAL", + "NA", + "JAPAC", + "EMEA", + "LAC", + "AE", + "AR", + "AT", + "AU", + "BE", + "BR", + "CA", + "CH", + "CL", + "CN", + "CO", + "CS", + "CZ", + "DE", + "DK", + "DZ", + "EC", + "EE", + "EG", + "ES", + "FI", + "FR", + "GB", + "GR", + "HK", + "HU", + "ID", + "IE", + "IL", + "IN", + "IR", + "IT", + "JP", + "KR", + "LV", + "MA", + "MX", + "MY", + "NG", + "NL", + "NO", + "NZ", + "PE", + "PH", + "PK", + "PL", + "PT", + "RO", + "RS", + "RU", + "SA", + "SE", + "SG", + "SI", + "SK", + "TH", + "TR", + "TW", + "UA", + "US", + "VE", + "VN", + "ZA" + ], + "enumDescriptions": [ + "Holiday region unspecified.", + "Global.", + "North America.", + "Japan and Asia Pacific: Korea, Greater China, India, Australia, and New Zealand.", + "Europe, the Middle East and Africa.", + "Latin America and the Caribbean.", + "United Arab Emirates", + "Argentina", + "Austria", + "Australia", + "Belgium", + "Brazil", + "Canada", + "Switzerland", + "Chile", + "China", + "Colombia", + "Czechoslovakia", + "Czech Republic", + "Germany", + "Denmark", + "Algeria", + "Ecuador", + "Estonia", + "Egypt", + "Spain", + "Finland", + "France", + "Great Britain (United Kingdom)", + "Greece", + "Hong Kong", + "Hungary", + "Indonesia", + "Ireland", + "Israel", + "India", + "Iran", + "Italy", + "Japan", + "Korea (South)", + "Latvia", + "Morocco", + "Mexico", + "Malaysia", + "Nigeria", + "Netherlands", + "Norway", + "New Zealand", + "Peru", + "Philippines", + "Pakistan", + "Poland", + "Portugal", + "Romania", + "Serbia", + "Russian Federation", + "Saudi Arabia", + "Sweden", + "Singapore", + "Slovenia", + "Slovakia", + "Thailand", + "Turkey", + "Taiwan", + "Ukraine", + "United States", + "Venezuela", + "Viet Nam", + "South Africa" + ], + "type": "string" + }, + "horizon": { + "description": "The number of periods ahead that need to be forecasted.", + "format": "int64", + "type": "string" + }, + "includeDrift": { + "description": "Include drift when fitting an ARIMA model.", + "type": "boolean" + }, "initialLearnRate": { - "description": "Specifies the initial learning rate for the line search learn rate\nstrategy.", + "description": "Specifies the initial learning rate for the line search learn rate strategy.", "format": "double", "type": "number" }, @@ -5450,7 +5852,7 @@ "type": "string" }, "kmeansInitializationColumn": { - "description": "The column used to provide the initial centroids for kmeans algorithm\nwhen kmeans_initialization_method is CUSTOM.", + "description": "The column used to provide the initial centroids for kmeans algorithm when kmeans_initialization_method is CUSTOM.", "type": "string" }, "kmeansInitializationMethod": { @@ -5464,7 +5866,7 @@ "enumDescriptions": [ "", "Initializes the centroids randomly.", - "Initializes the centroids using data specified in\nkmeans_initialization_column.", + "Initializes the centroids using data specified in kmeans_initialization_column.", "Initializes with kmeans++." ], "type": "string" @@ -5484,7 +5886,7 @@ "format": "double", "type": "number" }, - "description": "Weights associated with each label class, for rebalancing the\ntraining data. Only applicable for classification models.", + "description": "Weights associated with each label class, for rebalancing the training data. Only applicable for classification models.", "type": "object" }, "learnRate": { @@ -5521,7 +5923,7 @@ "type": "string" }, "maxIterations": { - "description": "The maximum number of iterations in training. Used only for iterative\ntraining algorithms.", + "description": "The maximum number of iterations in training. Used only for iterative training algorithms.", "format": "int64", "type": "string" }, @@ -5531,7 +5933,7 @@ "type": "string" }, "minRelativeProgress": { - "description": "When early_stop is true, stops training when accuracy improvement is\nless than 'min_relative_progress'. Used only for iterative training\nalgorithms.", + "description": "When early_stop is true, stops training when accuracy improvement is less than 'min_relative_progress'. Used only for iterative training algorithms.", "format": "double", "type": "number" }, @@ -5541,9 +5943,13 @@ "type": "number" }, "modelUri": { - "description": "[Beta] Google Cloud Storage URI from which the model was imported. Only\napplicable for imported models.", + "description": "[Beta] Google Cloud Storage URI from which the model was imported. Only applicable for imported models.", "type": "string" }, + "nonSeasonalOrder": { + "$ref": "ArimaOrder", + "description": "A specification of the non-seasonal part of the ARIMA model: the three components (p, d, q) are the AR order, the degree of differencing, and the MA order." + }, "numClusters": { "description": "Number of clusters for clustering models.", "format": "int64", @@ -5568,17 +5974,33 @@ ], "type": "string" }, + "preserveInputStructs": { + "description": "Whether to preserve the input structs in output feature names. Suppose there is a struct A with field b. When false (default), the output feature name is A_b. When true, the output feature name is A.b.", + "type": "boolean" + }, "subsample": { - "description": "Subsample fraction of the training data to grow tree to prevent\noverfitting for boosted tree models.", + "description": "Subsample fraction of the training data to grow tree to prevent overfitting for boosted tree models.", "format": "double", "type": "number" }, + "timeSeriesDataColumn": { + "description": "Column to be designated as time series data for ARIMA model.", + "type": "string" + }, + "timeSeriesIdColumn": { + "description": "The id column that will be used to indicate different time series to forecast in parallel.", + "type": "string" + }, + "timeSeriesTimestampColumn": { + "description": "Column to be designated as time series timestamp for ARIMA model.", + "type": "string" + }, "userColumn": { "description": "User column specified for matrix factorization models.", "type": "string" }, "walsAlpha": { - "description": "Hyperparameter for matrix factoration when implicit feedback type is\nspecified.", + "description": "Hyperparameter for matrix factoration when implicit feedback type is specified.", "format": "double", "type": "number" }, @@ -5595,11 +6017,11 @@ "properties": { "dataSplitResult": { "$ref": "DataSplitResult", - "description": "Data split result of the training run. Only set when the input data is\nactually split." + "description": "Data split result of the training run. Only set when the input data is actually split." }, "evaluationMetrics": { "$ref": "EvaluationMetrics", - "description": "The evaluation metrics over training/eval data that were computed at the\nend of training." + "description": "The evaluation metrics over training/eval data that were computed at the end of training." }, "results": { "description": "Output of each iteration run, results.size() \u003c= max_iterations.", @@ -5615,12 +6037,23 @@ }, "trainingOptions": { "$ref": "TrainingOptions", - "description": "Options that were used for this training run, includes\nuser specified and default options that were used." + "description": "Options that were used for this training run, includes user specified and default options that were used." + } + }, + "type": "object" + }, + "TransactionInfo": { + "id": "TransactionInfo", + "properties": { + "transactionId": { + "description": "[Output-only] // [Alpha] Id of the transaction.", + "type": "string" } }, "type": "object" }, "UserDefinedFunctionResource": { + "description": "This is used for defining User Defined Function (UDF) resources only when using legacy SQL. Users of Standard SQL should leverage either DDL (e.g. CREATE [TEMPORARY] FUNCTION ... ) or the Routines API to define UDF resources. For additional information on migrating, see: https://cloud.google.com/bigquery/docs/reference/standard-sql/migrating-from-legacy-sql#differences_in_user-defined_javascript_functions", "id": "UserDefinedFunctionResource", "properties": { "inlineCode": { diff --git a/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go b/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go index 806fd7d8819..5d2deaeeba5 100644 --- a/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go +++ b/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go @@ -249,25 +249,19 @@ type TablesService struct { } // AggregateClassificationMetrics: Aggregate metrics for -// classification/classifier models. For multi-class -// models, the metrics are either macro-averaged or micro-averaged. -// When -// macro-averaged, the metrics are calculated for each label and then -// an -// unweighted average is taken of those values. When micro-averaged, -// the -// metric is calculated globally by counting the total number of -// correctly +// classification/classifier models. For multi-class models, the metrics +// are either macro-averaged or micro-averaged. When macro-averaged, the +// metrics are calculated for each label and then an unweighted average +// is taken of those values. When micro-averaged, the metric is +// calculated globally by counting the total number of correctly // predicted rows. type AggregateClassificationMetrics struct { // Accuracy: Accuracy is the fraction of predictions given the correct - // label. For - // multiclass this is a micro-averaged metric. + // label. For multiclass this is a micro-averaged metric. Accuracy float64 `json:"accuracy,omitempty"` // F1Score: The F1 score is an average of recall and precision. For - // multiclass - // this is a macro-averaged metric. + // multiclass this is a macro-averaged metric. F1Score float64 `json:"f1Score,omitempty"` // LogLoss: Logarithmic Loss. For multiclass this is a macro-averaged @@ -275,28 +269,22 @@ type AggregateClassificationMetrics struct { LogLoss float64 `json:"logLoss,omitempty"` // Precision: Precision is the fraction of actual positive predictions - // that had - // positive actual labels. For multiclass this is a - // macro-averaged - // metric treating each class as a binary classifier. + // that had positive actual labels. For multiclass this is a + // macro-averaged metric treating each class as a binary classifier. Precision float64 `json:"precision,omitempty"` // Recall: Recall is the fraction of actual positive labels that were - // given a - // positive prediction. For multiclass this is a macro-averaged metric. + // given a positive prediction. For multiclass this is a macro-averaged + // metric. Recall float64 `json:"recall,omitempty"` // RocAuc: Area Under a ROC Curve. For multiclass this is a - // macro-averaged - // metric. + // macro-averaged metric. RocAuc float64 `json:"rocAuc,omitempty"` - // Threshold: Threshold at which the metrics are computed. For - // binary - // classification models this is the positive class threshold. - // For multi-class classfication models this is the - // confidence - // threshold. + // Threshold: Threshold at which the metrics are computed. For binary + // classification models this is the positive class threshold. For + // multi-class classfication models this is the confidence threshold. Threshold float64 `json:"threshold,omitempty"` // ForceSendFields is a list of field names (e.g. "Accuracy") to @@ -355,18 +343,15 @@ type Argument struct { // Possible values: // "ARGUMENT_KIND_UNSPECIFIED" // "FIXED_TYPE" - The argument is a variable with fully specified - // type, which can be a - // struct or an array, but not a table. + // type, which can be a struct or an array, but not a table. // "ANY_TYPE" - The argument is any type, including struct or array, - // but not a table. - // To be added: FIXED_TABLE, ANY_TABLE + // but not a table. To be added: FIXED_TABLE, ANY_TABLE ArgumentKind string `json:"argumentKind,omitempty"` // DataType: Required unless argument_kind = ANY_TYPE. DataType *StandardSqlDataType `json:"dataType,omitempty"` - // Mode: Optional. Specifies whether the argument is input or - // output. + // Mode: Optional. Specifies whether the argument is input or output. // Can be set for procedures only. // // Possible values: @@ -508,6 +493,65 @@ func (s *ArimaFittingMetrics) UnmarshalJSON(data []byte) error { return nil } +// ArimaForecastingMetrics: Model evaluation metrics for ARIMA +// forecasting models. +type ArimaForecastingMetrics struct { + // ArimaFittingMetrics: Arima model fitting metrics. + ArimaFittingMetrics []*ArimaFittingMetrics `json:"arimaFittingMetrics,omitempty"` + + // ArimaSingleModelForecastingMetrics: Repeated as there can be many + // metric sets (one for each model) in auto-arima and the large-scale + // case. + ArimaSingleModelForecastingMetrics []*ArimaSingleModelForecastingMetrics `json:"arimaSingleModelForecastingMetrics,omitempty"` + + // HasDrift: Whether Arima model fitted with drift or not. It is always + // false when d is not 1. + HasDrift []bool `json:"hasDrift,omitempty"` + + // NonSeasonalOrder: Non-seasonal order. + NonSeasonalOrder []*ArimaOrder `json:"nonSeasonalOrder,omitempty"` + + // SeasonalPeriods: Seasonal periods. Repeated because multiple periods + // are supported for one time series. + // + // Possible values: + // "SEASONAL_PERIOD_TYPE_UNSPECIFIED" + // "NO_SEASONALITY" - No seasonality + // "DAILY" - Daily period, 24 hours. + // "WEEKLY" - Weekly period, 7 days. + // "MONTHLY" - Monthly period, 30 days or irregular. + // "QUARTERLY" - Quarterly period, 90 days or irregular. + // "YEARLY" - Yearly period, 365 days or irregular. + SeasonalPeriods []string `json:"seasonalPeriods,omitempty"` + + // TimeSeriesId: Id to differentiate different time series for the + // large-scale case. + TimeSeriesId []string `json:"timeSeriesId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ArimaFittingMetrics") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ArimaFittingMetrics") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ArimaForecastingMetrics) MarshalJSON() ([]byte, error) { + type NoMethod ArimaForecastingMetrics + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ArimaModelInfo: Arima model information. type ArimaModelInfo struct { // ArimaCoefficients: Arima coefficients. @@ -517,16 +561,14 @@ type ArimaModelInfo struct { ArimaFittingMetrics *ArimaFittingMetrics `json:"arimaFittingMetrics,omitempty"` // HasDrift: Whether Arima model fitted with drift or not. It is always - // false - // when d is not 1. + // false when d is not 1. HasDrift bool `json:"hasDrift,omitempty"` // NonSeasonalOrder: Non-seasonal order. NonSeasonalOrder *ArimaOrder `json:"nonSeasonalOrder,omitempty"` // SeasonalPeriods: Seasonal periods. Repeated because multiple periods - // are supported - // for one time series. + // are supported for one time series. // // Possible values: // "SEASONAL_PERIOD_TYPE_UNSPECIFIED" @@ -601,17 +643,16 @@ func (s *ArimaOrder) MarshalJSON() ([]byte, error) { } // ArimaResult: (Auto-)arima fitting result. Wrap everything in -// ArimaResult for easier -// refactoring if we want to use model-specific iteration results. +// ArimaResult for easier refactoring if we want to use model-specific +// iteration results. type ArimaResult struct { // ArimaModelInfo: This message is repeated because there are multiple - // arima models - // fitted in auto-arima. For non-auto-arima model, its size is one. + // arima models fitted in auto-arima. For non-auto-arima model, its size + // is one. ArimaModelInfo []*ArimaModelInfo `json:"arimaModelInfo,omitempty"` // SeasonalPeriods: Seasonal periods. Repeated because multiple periods - // are supported for - // one time series. + // are supported for one time series. // // Possible values: // "SEASONAL_PERIOD_TYPE_UNSPECIFIED" @@ -647,72 +688,84 @@ func (s *ArimaResult) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// AuditConfig: Specifies the audit configuration for a service. -// The configuration determines which permission types are logged, and -// what -// identities, if any, are exempted from logging. -// An AuditConfig must have one or more AuditLogConfigs. -// -// If there are AuditConfigs for both `allServices` and a specific -// service, -// the union of the two AuditConfigs is used for that service: the -// log_types -// specified in each AuditConfig are enabled, and the exempted_members -// in each -// AuditLogConfig are exempted. -// -// Example Policy with multiple AuditConfigs: -// -// { -// "audit_configs": [ -// { -// "service": "allServices" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// }, -// { -// "log_type": "ADMIN_READ", -// } -// ] -// }, -// { -// "service": "sampleservice.googleapis.com" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// }, -// { -// "log_type": "DATA_WRITE", -// "exempted_members": [ -// "user:aliya@example.com" -// ] -// } -// ] -// } -// ] -// } -// -// For sampleservice, this policy enables DATA_READ, DATA_WRITE and -// ADMIN_READ -// logging. It also exempts jose@example.com from DATA_READ logging, -// and -// aliya@example.com from DATA_WRITE logging. +// ArimaSingleModelForecastingMetrics: Model evaluation metrics for a +// single ARIMA forecasting model. +type ArimaSingleModelForecastingMetrics struct { + // ArimaFittingMetrics: Arima fitting metrics. + ArimaFittingMetrics *ArimaFittingMetrics `json:"arimaFittingMetrics,omitempty"` + + // HasDrift: Is arima model fitted with drift or not. It is always false + // when d is not 1. + HasDrift bool `json:"hasDrift,omitempty"` + + // NonSeasonalOrder: Non-seasonal order. + NonSeasonalOrder *ArimaOrder `json:"nonSeasonalOrder,omitempty"` + + // SeasonalPeriods: Seasonal periods. Repeated because multiple periods + // are supported for one time series. + // + // Possible values: + // "SEASONAL_PERIOD_TYPE_UNSPECIFIED" + // "NO_SEASONALITY" - No seasonality + // "DAILY" - Daily period, 24 hours. + // "WEEKLY" - Weekly period, 7 days. + // "MONTHLY" - Monthly period, 30 days or irregular. + // "QUARTERLY" - Quarterly period, 90 days or irregular. + // "YEARLY" - Yearly period, 365 days or irregular. + SeasonalPeriods []string `json:"seasonalPeriods,omitempty"` + + // TimeSeriesId: The id to indicate different time series. + TimeSeriesId string `json:"timeSeriesId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ArimaFittingMetrics") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ArimaFittingMetrics") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ArimaSingleModelForecastingMetrics) MarshalJSON() ([]byte, error) { + type NoMethod ArimaSingleModelForecastingMetrics + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AuditConfig: Specifies the audit configuration for a service. The +// configuration determines which permission types are logged, and what +// identities, if any, are exempted from logging. An AuditConfig must +// have one or more AuditLogConfigs. If there are AuditConfigs for both +// `allServices` and a specific service, the union of the two +// AuditConfigs is used for that service: the log_types specified in +// each AuditConfig are enabled, and the exempted_members in each +// AuditLogConfig are exempted. Example Policy with multiple +// AuditConfigs: { "audit_configs": [ { "service": "allServices", +// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": +// [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { +// "log_type": "ADMIN_READ" } ] }, { "service": +// "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": +// "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ +// "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy +// enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts +// jose@example.com from DATA_READ logging, and aliya@example.com from +// DATA_WRITE logging. type AuditConfig struct { // AuditLogConfigs: The configuration for logging of each type of // permission. AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"` - // Service: Specifies a service that will be enabled for audit - // logging. - // For example, `storage.googleapis.com`, - // `cloudsql.googleapis.com`. + // Service: Specifies a service that will be enabled for audit logging. + // For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. // `allServices` is a special value that covers all services. Service string `json:"service,omitempty"` @@ -741,31 +794,15 @@ func (s *AuditConfig) MarshalJSON() ([]byte, error) { } // AuditLogConfig: Provides the configuration for logging a type of -// permissions. -// Example: -// -// { -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// } -// ] -// } -// -// This enables 'DATA_READ' and 'DATA_WRITE' logging, while -// exempting -// jose@example.com from DATA_READ logging. +// permissions. Example: { "audit_log_configs": [ { "log_type": +// "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { +// "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and +// 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ +// logging. type AuditLogConfig struct { // ExemptedMembers: Specifies the identities that do not cause logging - // for this type of - // permission. - // Follows the same format of Binding.members. + // for this type of permission. Follows the same format of + // Binding.members. ExemptedMembers []string `json:"exemptedMembers,omitempty"` // LogType: The log type that this config enables. @@ -1067,13 +1104,11 @@ type BinaryConfusionMatrix struct { PositiveClassThreshold float64 `json:"positiveClassThreshold,omitempty"` // Precision: The fraction of actual positive predictions that had - // positive actual - // labels. + // positive actual labels. Precision float64 `json:"precision,omitempty"` // Recall: The fraction of actual positive labels that were given a - // positive - // prediction. + // positive prediction. Recall float64 `json:"recall,omitempty"` // TrueNegatives: Number of true samples predicted as false. @@ -1129,95 +1164,53 @@ func (s *BinaryConfusionMatrix) UnmarshalJSON(data []byte) error { // Binding: Associates `members` with a `role`. type Binding struct { - // Condition: The condition that is associated with this binding. - // - // If the condition evaluates to `true`, then this binding applies to - // the - // current request. - // - // If the condition evaluates to `false`, then this binding does not - // apply to - // the current request. However, a different role binding might grant - // the same - // role to one or more of the members in this binding. - // - // To learn which resources support conditions in their IAM policies, - // see - // the - // [IAM - // documentation](https://cloud.google.com/iam/help/conditions/r - // esource-policies). + // Condition: The condition that is associated with this binding. If the + // condition evaluates to `true`, then this binding applies to the + // current request. If the condition evaluates to `false`, then this + // binding does not apply to the current request. However, a different + // role binding might grant the same role to one or more of the members + // in this binding. To learn which resources support conditions in their + // IAM policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). Condition *Expr `json:"condition,omitempty"` // Members: Specifies the identities requesting access for a Cloud - // Platform resource. - // `members` can have the following values: - // - // * `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. - // - // * `allAuthenticatedUsers`: A special identifier that represents - // anyone - // who is authenticated with a Google account or a service - // account. - // - // * `user:{emailid}`: An email address that represents a specific - // Google - // account. For example, `alice@example.com` . - // - // - // * `serviceAccount:{emailid}`: An email address that represents a - // service - // account. For example, - // `my-other-app@appspot.gserviceaccount.com`. - // - // * `group:{emailid}`: An email address that represents a Google - // group. - // For example, `admins@example.com`. - // - // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a user that has been recently deleted. - // For - // example, `alice@example.com?uid=123456789012345678901`. If the - // user is - // recovered, this value reverts to `user:{emailid}` and the - // recovered user - // retains the role in the binding. - // - // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address - // (plus - // unique identifier) representing a service account that has been - // recently - // deleted. For example, - // + // Platform resource. `members` can have the following values: * + // `allUsers`: A special identifier that represents anyone who is on the + // internet; with or without a Google account. * + // `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. * + // `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . * + // `serviceAccount:{emailid}`: An email address that represents a + // service account. For example, + // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An + // email address that represents a Google group. For example, + // `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An + // email address (plus unique identifier) representing a user that has + // been recently deleted. For example, + // `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered + // user retains the role in the binding. * + // `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address + // (plus unique identifier) representing a service account that has been + // recently deleted. For example, // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account - // retains the - // role in the binding. - // - // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a Google group that has been recently - // deleted. For example, - // `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` - // and the - // recovered group retains the role in the binding. - // - // - // * `domain:{domain}`: The G Suite domain (primary) that represents all - // the - // users of that domain. For example, `google.com` or - // `example.com`. - // - // + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains + // the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: + // An email address (plus unique identifier) representing a Google group + // that has been recently deleted. For example, + // `admins@example.com?uid=123456789012345678901`. If the group is + // recovered, this value reverts to `group:{emailid}` and the recovered + // group retains the role in the binding. * `domain:{domain}`: The G + // Suite domain (primary) that represents all the users of that domain. + // For example, `google.com` or `example.com`. Members []string `json:"members,omitempty"` - // Role: Role that is assigned to `members`. - // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + // Role: Role that is assigned to `members`. For example, + // `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `json:"role,omitempty"` // ForceSendFields is a list of field names (e.g. "Condition") to @@ -1430,10 +1423,9 @@ func (s *BqmlTrainingRunTrainingOptions) UnmarshalJSON(data []byte) error { // CategoricalValue: Representative value of a categorical feature. type CategoricalValue struct { // CategoryCounts: Counts of all categories for the categorical feature. - // If there are - // more than ten categories, we return top ten (by count) and return - // one more CategoryCount with category "_OTHER_" and count as - // aggregate counts of remaining categories. + // If there are more than ten categories, we return top ten (by count) + // and return one more CategoryCount with category "_OTHER_" and count + // as aggregate counts of remaining categories. CategoryCounts []*CategoryCount `json:"categoryCounts,omitempty"` // ForceSendFields is a list of field names (e.g. "CategoryCounts") to @@ -1466,8 +1458,7 @@ type CategoryCount struct { // Category: The name of category. Category string `json:"category,omitempty"` - // Count: The count of training samples matching the category within - // the + // Count: The count of training samples matching the category within the // cluster. Count int64 `json:"count,omitempty,string"` @@ -1534,8 +1525,8 @@ type ClusterInfo struct { // CentroidId: Centroid id. CentroidId int64 `json:"centroidId,omitempty,string"` - // ClusterRadius: Cluster radius, the average distance from centroid - // to each point assigned to the cluster. + // ClusterRadius: Cluster radius, the average distance from centroid to + // each point assigned to the cluster. ClusterRadius float64 `json:"clusterRadius,omitempty"` // ClusterSize: Cluster size, the total number of points assigned to the @@ -1665,8 +1656,7 @@ func (s *ClusteringMetrics) UnmarshalJSON(data []byte) error { // models. type ConfusionMatrix struct { // ConfidenceThreshold: Confidence threshold used when computing the - // entries of the - // confusion matrix. + // entries of the confusion matrix. ConfidenceThreshold float64 `json:"confidenceThreshold,omitempty"` // Rows: One row per actual label. @@ -1818,8 +1808,8 @@ func (s *CsvOptions) MarshalJSON() ([]byte, error) { } // DataSplitResult: Data split result. This contains references to the -// training and evaluation -// data tables that were used to train the model. +// training and evaluation data tables that were used to train the +// model. type DataSplitResult struct { // EvaluationTable: Table reference of the evaluation data after split. EvaluationTable *TableReference `json:"evaluationTable,omitempty"` @@ -1934,6 +1924,9 @@ type Dataset struct { // https://cloud.google.com/bigquery/docs/locations. Location string `json:"location,omitempty"` + // SatisfiesPZS: [Output-only] Reserved for future use. + SatisfiesPZS bool `json:"satisfiesPZS,omitempty"` + // SelfLink: [Output-only] A URL that can be used to access the resource // again. You can use this URL in Get or Update requests to the // resource. @@ -2234,8 +2227,7 @@ type Entry struct { ItemCount int64 `json:"itemCount,omitempty,string"` // PredictedLabel: The predicted label. For confidence_threshold > 0, we - // will - // also add an entry indicating the number of items under the + // will also add an entry indicating the number of items under the // confidence threshold. PredictedLabel string `json:"predictedLabel,omitempty"` @@ -2300,11 +2292,13 @@ func (s *ErrorProto) MarshalJSON() ([]byte, error) { } // EvaluationMetrics: Evaluation metrics of a model. These are either -// computed on all training -// data or just the eval data based on whether eval data was used -// during -// training. These are not present for imported models. +// computed on all training data or just the eval data based on whether +// eval data was used during training. These are not present for +// imported models. type EvaluationMetrics struct { + // ArimaForecastingMetrics: Populated for ARIMA models. + ArimaForecastingMetrics *ArimaForecastingMetrics `json:"arimaForecastingMetrics,omitempty"` + // BinaryClassificationMetrics: Populated for binary // classification/classifier models. BinaryClassificationMetrics *BinaryClassificationMetrics `json:"binaryClassificationMetrics,omitempty"` @@ -2316,18 +2310,16 @@ type EvaluationMetrics struct { // classification/classifier models. MultiClassClassificationMetrics *MultiClassClassificationMetrics `json:"multiClassClassificationMetrics,omitempty"` - // RankingMetrics: [Alpha] Populated for implicit feedback type matrix - // factorization - // models. + // RankingMetrics: Populated for implicit feedback type matrix + // factorization models. RankingMetrics *RankingMetrics `json:"rankingMetrics,omitempty"` // RegressionMetrics: Populated for regression models and explicit - // feedback type matrix - // factorization models. + // feedback type matrix factorization models. RegressionMetrics *RegressionMetrics `json:"regressionMetrics,omitempty"` // ForceSendFields is a list of field names (e.g. - // "BinaryClassificationMetrics") to unconditionally include in API + // "ArimaForecastingMetrics") to unconditionally include in API // requests. By default, fields with empty values are omitted from API // requests. However, any non-pointer, non-interface field appearing in // ForceSendFields will be sent to the server regardless of whether the @@ -2335,13 +2327,13 @@ type EvaluationMetrics struct { // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. - // "BinaryClassificationMetrics") to include in API requests with the - // JSON null value. By default, fields with empty values are omitted - // from API requests. However, any field with an empty value appearing - // in NullFields will be sent to the server as null. It is an error if a - // field in this list has a non-empty value. This may be used to include - // null fields in Patch requests. + // NullFields is a list of field names (e.g. "ArimaForecastingMetrics") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -2540,65 +2532,40 @@ func (s *ExplainQueryStep) MarshalJSON() ([]byte, error) { } // Expr: Represents a textual expression in the Common Expression -// Language (CEL) -// syntax. CEL is a C-like expression language. The syntax and semantics -// of CEL -// are documented at https://github.com/google/cel-spec. -// -// Example (Comparison): -// -// title: "Summary size limit" -// description: "Determines if a summary is less than 100 chars" -// expression: "document.summary.size() < 100" -// -// Example (Equality): -// -// title: "Requestor is owner" -// description: "Determines if requestor is the document owner" -// expression: "document.owner == -// request.auth.claims.email" -// -// Example (Logic): -// -// title: "Public documents" -// description: "Determine whether the document should be publicly -// visible" -// expression: "document.type != 'private' && document.type != -// 'internal'" -// -// Example (Data Manipulation): -// -// title: "Notification string" -// description: "Create a notification string with a timestamp." -// expression: "'New message received at ' + -// string(document.create_time)" -// -// The exact variables and functions that may be referenced within an -// expression -// are determined by the service that evaluates it. See the -// service -// documentation for additional information. +// Language (CEL) syntax. CEL is a C-like expression language. The +// syntax and semantics of CEL are documented at +// https://github.com/google/cel-spec. Example (Comparison): title: +// "Summary size limit" description: "Determines if a summary is less +// than 100 chars" expression: "document.summary.size() < 100" Example +// (Equality): title: "Requestor is owner" description: "Determines if +// requestor is the document owner" expression: "document.owner == +// request.auth.claims.email" Example (Logic): title: "Public documents" +// description: "Determine whether the document should be publicly +// visible" expression: "document.type != 'private' && document.type != +// 'internal'" Example (Data Manipulation): title: "Notification string" +// description: "Create a notification string with a timestamp." +// expression: "'New message received at ' + +// string(document.create_time)" The exact variables and functions that +// may be referenced within an expression are determined by the service +// that evaluates it. See the service documentation for additional +// information. type Expr struct { // Description: Optional. Description of the expression. This is a - // longer text which - // describes the expression, e.g. when hovered over it in a UI. + // longer text which describes the expression, e.g. when hovered over it + // in a UI. Description string `json:"description,omitempty"` // Expression: Textual representation of an expression in Common - // Expression Language - // syntax. + // Expression Language syntax. Expression string `json:"expression,omitempty"` // Location: Optional. String indicating the location of the expression - // for error - // reporting, e.g. a file name and a position in the file. + // for error reporting, e.g. a file name and a position in the file. Location string `json:"location,omitempty"` // Title: Optional. Title for the expression, i.e. a short string - // describing - // its purpose. This can be used e.g. in UIs which allow to enter - // the - // expression. + // describing its purpose. This can be used e.g. in UIs which allow to + // enter the expression. Title string `json:"title,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -2639,6 +2606,10 @@ type ExternalDataConfiguration struct { // Datastore backups and Avro formats. Compression string `json:"compression,omitempty"` + // ConnectionId: [Optional, Trusted Tester] Connection for external data + // source. + ConnectionId string `json:"connectionId,omitempty"` + // CsvOptions: Additional properties to set if sourceFormat is set to // CSV. CsvOptions *CsvOptions `json:"csvOptions,omitempty"` @@ -2729,8 +2700,7 @@ type FeatureValue struct { FeatureColumn string `json:"featureColumn,omitempty"` // NumericalValue: The numerical feature value. This is the centroid - // value for this - // feature. + // value for this feature. NumericalValue float64 `json:"numericalValue,omitempty"` // ForceSendFields is a list of field names (e.g. "CategoricalValue") to @@ -2774,8 +2744,7 @@ func (s *FeatureValue) UnmarshalJSON(data []byte) error { // GetIamPolicyRequest: Request message for `GetIamPolicy` method. type GetIamPolicyRequest struct { // Options: OPTIONAL: A `GetPolicyOptions` object for specifying options - // to - // `GetIamPolicy`. + // to `GetIamPolicy`. Options *GetPolicyOptions `json:"options,omitempty"` // ForceSendFields is a list of field names (e.g. "Options") to @@ -2804,24 +2773,14 @@ func (s *GetIamPolicyRequest) MarshalJSON() ([]byte, error) { // GetPolicyOptions: Encapsulates settings provided to GetIamPolicy. type GetPolicyOptions struct { // RequestedPolicyVersion: Optional. The policy format version to be - // returned. - // - // Valid values are 0, 1, and 3. Requests specifying an invalid value - // will be - // rejected. - // - // Requests for policies with any conditional bindings must specify - // version 3. - // Policies without any conditional bindings may specify any valid value - // or - // leave the field unset. - // - // To learn which resources support conditions in their IAM policies, - // see - // the - // [IAM - // documentation](https://cloud.google.com/iam/help/conditions/r - // esource-policies). + // returned. Valid values are 0, 1, and 3. Requests specifying an + // invalid value will be rejected. Requests for policies with any + // conditional bindings must specify version 3. Policies without any + // conditional bindings may specify any valid value or leave the field + // unset. To learn which resources support conditions in their IAM + // policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). RequestedPolicyVersion int64 `json:"requestedPolicyVersion,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -3012,21 +2971,29 @@ func (s *GoogleSheetsOptions) MarshalJSON() ([]byte, error) { } type HivePartitioningOptions struct { - // Mode: [Optional, Trusted Tester] When set, what mode of hive - // partitioning to use when reading data. Two modes are supported. (1) - // AUTO: automatically infer partition key name(s) and type(s). (2) - // STRINGS: automatically infer partition key name(s). All types are - // interpreted as strings. Not all storage formats support hive - // partitioning. Requesting hive partitioning on an unsupported format - // will lead to an error. Currently supported types include: AVRO, CSV, - // JSON, ORC and Parquet. + // Mode: [Optional] When set, what mode of hive partitioning to use when + // reading data. The following modes are supported. (1) AUTO: + // automatically infer partition key name(s) and type(s). (2) STRINGS: + // automatically infer partition key name(s). All types are interpreted + // as strings. (3) CUSTOM: partition key schema is encoded in the source + // URI prefix. Not all storage formats support hive partitioning. + // Requesting hive partitioning on an unsupported format will lead to an + // error. Currently supported types include: AVRO, CSV, JSON, ORC and + // Parquet. Mode string `json:"mode,omitempty"` - // SourceUriPrefix: [Optional, Trusted Tester] When hive partition - // detection is requested, a common prefix for all source uris should be - // supplied. The prefix must end immediately before the partition key - // encoding begins. For example, consider files following this data - // layout. + // RequirePartitionFilter: [Optional] If set to true, queries over this + // table require a partition filter that can be used for partition + // elimination to be specified. Note that this field should only be true + // when creating a permanent external table or querying a temporary + // external table. Hive-partitioned loads with requirePartitionFilter + // explicitly set to true will fail. + RequirePartitionFilter bool `json:"requirePartitionFilter,omitempty"` + + // SourceUriPrefix: [Optional] When hive partition detection is + // requested, a common prefix for all source uris should be supplied. + // The prefix must end immediately before the partition key encoding + // begins. For example, consider files following this data layout. // gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro // gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro // When hive partitioning is requested with either AUTO or STRINGS @@ -3282,10 +3249,11 @@ type JobConfigurationExtract struct { Compression string `json:"compression,omitempty"` // DestinationFormat: [Optional] The exported file format. Possible - // values include CSV, NEWLINE_DELIMITED_JSON or AVRO for tables and - // ML_TF_SAVED_MODEL or ML_XGBOOST_BOOSTER for models. The default value - // for tables is CSV. Tables with nested or repeated fields cannot be - // exported as CSV. The default value for models is ML_TF_SAVED_MODEL. + // values include CSV, NEWLINE_DELIMITED_JSON, PARQUET or AVRO for + // tables and ML_TF_SAVED_MODEL or ML_XGBOOST_BOOSTER for models. The + // default value for tables is CSV. Tables with nested or repeated + // fields cannot be exported as CSV. The default value for models is + // ML_TF_SAVED_MODEL. DestinationFormat string `json:"destinationFormat,omitempty"` // DestinationUri: [Pick one] DEPRECATED: Use destinationUris instead, @@ -3375,6 +3343,28 @@ type JobConfigurationLoad struct { // one atomic update upon job completion. CreateDisposition string `json:"createDisposition,omitempty"` + // DecimalTargetTypes: [Trusted Tester] Defines the list of possible SQL + // data types to which the source decimal values are converted. This + // list and the precision and the scale parameters of the decimal field + // determine the target type. In the order of NUMERIC, BIGNUMERIC, and + // STRING, a type is picked if it is in the specified list and if it + // supports the precision and the scale. STRING supports all precision + // and scale values. If none of the listed types supports the precision + // and the scale, the type supporting the widest range in the specified + // list is picked, and if a value exceeds the supported range when + // reading the data, an error will be thrown. For example: suppose + // decimal_target_type = ["NUMERIC", "BIGNUMERIC"]. Then if + // (precision,scale) is: * (38,9) -> NUMERIC; * (39,9) -> BIGNUMERIC + // (NUMERIC cannot hold 30 integer digits); * (38,10) -> BIGNUMERIC + // (NUMERIC cannot hold 10 fractional digits); * (76,38) -> BIGNUMERIC; + // * (77,38) -> BIGNUMERIC (error if value exeeds supported range). For + // duplicated types in this field, only one will be considered and the + // rest will be ignored. The order of the types in this field is + // ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as + // ["NUMERIC", "BIGNUMERIC"] and NUMERIC always takes precedence over + // BIGNUMERIC. + DecimalTargetTypes []string `json:"decimalTargetTypes,omitempty"` + // DestinationEncryptionConfiguration: Custom encryption configuration // (e.g., Cloud KMS keys). DestinationEncryptionConfiguration *EncryptionConfiguration `json:"destinationEncryptionConfiguration,omitempty"` @@ -3737,9 +3727,18 @@ type JobConfigurationTableCopy struct { // (e.g., Cloud KMS keys). DestinationEncryptionConfiguration *EncryptionConfiguration `json:"destinationEncryptionConfiguration,omitempty"` + // DestinationExpirationTime: [Optional] The time when the destination + // table expires. Expired tables will be deleted and their storage + // reclaimed. + DestinationExpirationTime interface{} `json:"destinationExpirationTime,omitempty"` + // DestinationTable: [Required] The destination table DestinationTable *TableReference `json:"destinationTable,omitempty"` + // OperationType: [Optional] Supported operation types in table copy + // job. + OperationType string `json:"operationType,omitempty"` + // SourceTable: [Pick one] Source table to copy. SourceTable *TableReference `json:"sourceTable,omitempty"` @@ -3978,6 +3977,10 @@ type JobStatistics struct { // TotalSlotMs: [Output-only] Slot-milliseconds for the job. TotalSlotMs int64 `json:"totalSlotMs,omitempty,string"` + // TransactionInfoTemplate: [Output-only] [Alpha] Information of the + // multi-statement transaction if this job is part of one. + TransactionInfoTemplate *TransactionInfo `json:"transactionInfoTemplate,omitempty"` + // ForceSendFields is a list of field names (e.g. "CompletionRatio") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -4341,10 +4344,8 @@ type JsonValue interface{} type ListModelsResponse struct { // Models: Models in the requested dataset. Only the following fields - // are populated: - // model_reference, model_type, creation_time, last_modified_time - // and - // labels. + // are populated: model_reference, model_type, creation_time, + // last_modified_time and labels. Models []*Model `json:"models,omitempty"` // NextPageToken: A token to request the next page of results. @@ -4382,10 +4383,8 @@ type ListRoutinesResponse struct { NextPageToken string `json:"nextPageToken,omitempty"` // Routines: Routines in the requested dataset. Unless read_mask is set - // in the request, - // only the following fields are populated: - // etag, project_id, dataset_id, routine_id, routine_type, - // creation_time, + // in the request, only the following fields are populated: etag, + // project_id, dataset_id, routine_id, routine_type, creation_time, // last_modified_time, and language. Routines []*Routine `json:"routines,omitempty"` @@ -4417,15 +4416,12 @@ func (s *ListRoutinesResponse) MarshalJSON() ([]byte, error) { } // LocationMetadata: BigQuery-specific metadata about a location. This -// will be set on -// google.cloud.location.Location.metadata in Cloud Location -// API -// responses. +// will be set on google.cloud.location.Location.metadata in Cloud +// Location API responses. type LocationMetadata struct { // LegacyLocationId: The legacy BigQuery location ID, e.g. “EU” for - // the “europe” location. - // This is for any API consumers that need the legacy “US” and - // “EU” locations. + // the “europe” location. This is for any API consumers that need + // the legacy “US” and “EU” locations. LegacyLocationId string `json:"legacyLocationId,omitempty"` // ForceSendFields is a list of field names (e.g. "LegacyLocationId") to @@ -4502,26 +4498,20 @@ type Model struct { Description string `json:"description,omitempty"` // EncryptionConfiguration: Custom encryption configuration (e.g., Cloud - // KMS keys). This shows the - // encryption configuration of the model data while stored in - // BigQuery - // storage. This field can be used with PatchModel to update encryption - // key - // for an already encrypted model. + // KMS keys). This shows the encryption configuration of the model data + // while stored in BigQuery storage. This field can be used with + // PatchModel to update encryption key for an already encrypted model. EncryptionConfiguration *EncryptionConfiguration `json:"encryptionConfiguration,omitempty"` // Etag: Output only. A hash of this resource. Etag string `json:"etag,omitempty"` // ExpirationTime: Optional. The time when this model expires, in - // milliseconds since the epoch. - // If not present, the model will persist indefinitely. Expired - // models - // will be deleted and their storage reclaimed. The - // defaultTableExpirationMs - // property of the encapsulating dataset can be used to set a - // default - // expirationTime on newly created models. + // milliseconds since the epoch. If not present, the model will persist + // indefinitely. Expired models will be deleted and their storage + // reclaimed. The defaultTableExpirationMs property of the encapsulating + // dataset can be used to set a default expirationTime on newly created + // models. ExpirationTime int64 `json:"expirationTime,omitempty,string"` // FeatureColumns: Output only. Input feature columns that were used to @@ -4532,21 +4522,16 @@ type Model struct { FriendlyName string `json:"friendlyName,omitempty"` // LabelColumns: Output only. Label columns that were used to train this - // model. - // The output of the model will have a "predicted_" prefix to these - // columns. + // model. The output of the model will have a "predicted_" prefix to + // these columns. LabelColumns []*StandardSqlField `json:"labelColumns,omitempty"` // Labels: The labels associated with this model. You can use these to - // organize - // and group your models. Label keys and values can be no longer - // than 63 characters, can only contain lowercase letters, - // numeric - // characters, underscores and dashes. International characters are - // allowed. - // Label values are optional. Label keys must start with a letter and - // each - // label in the list must have a different key. + // organize and group your models. Label keys and values can be no + // longer than 63 characters, can only contain lowercase letters, + // numeric characters, underscores and dashes. International characters + // are allowed. Label values are optional. Label keys must start with a + // letter and each label in the list must have a different key. Labels map[string]string `json:"labels,omitempty"` // LastModifiedTime: Output only. The time when this model was last @@ -4554,8 +4539,7 @@ type Model struct { LastModifiedTime int64 `json:"lastModifiedTime,omitempty,string"` // Location: Output only. The geographic location where the model - // resides. This value - // is inherited from the dataset. + // resides. This value is inherited from the dataset. Location string `json:"location,omitempty"` // ModelReference: Required. Unique identifier for this model. @@ -4570,13 +4554,14 @@ type Model struct { // model. // "KMEANS" - K-means clustering model. // "MATRIX_FACTORIZATION" - Matrix factorization model. - // "DNN_CLASSIFIER" - DNN classifier model. + // "DNN_CLASSIFIER" - [Beta] DNN classifier model. // "TENSORFLOW" - [Beta] An imported TensorFlow model. - // "DNN_REGRESSOR" - DNN regressor model. - // "BOOSTED_TREE_REGRESSOR" - Boosted tree regressor model. - // "BOOSTED_TREE_CLASSIFIER" - Boosted tree classifier model. - // "AUTOML_REGRESSOR" - AutoML Tables regression model. - // "AUTOML_CLASSIFIER" - AutoML Tables classification model. + // "DNN_REGRESSOR" - [Beta] DNN regressor model. + // "BOOSTED_TREE_REGRESSOR" - [Beta] Boosted tree regressor model. + // "BOOSTED_TREE_CLASSIFIER" - [Beta] Boosted tree classifier model. + // "ARIMA" - [Beta] ARIMA model. + // "AUTOML_REGRESSOR" - [Beta] AutoML Tables regression model. + // "AUTOML_CLASSIFIER" - [Beta] AutoML Tables classification model. ModelType string `json:"modelType,omitempty"` // TrainingRuns: Output only. Information for all training runs in @@ -4750,154 +4735,77 @@ func (s *MultiClassClassificationMetrics) MarshalJSON() ([]byte, error) { } // Policy: An Identity and Access Management (IAM) policy, which -// specifies access -// controls for Google Cloud resources. -// -// -// A `Policy` is a collection of `bindings`. A `binding` binds one or -// more -// `members` to a single `role`. Members can be user accounts, service -// accounts, +// specifies access controls for Google Cloud resources. A `Policy` is a +// collection of `bindings`. A `binding` binds one or more `members` to +// a single `role`. Members can be user accounts, service accounts, // Google groups, and domains (such as G Suite). A `role` is a named -// list of -// permissions; each `role` can be an IAM predefined role or a -// user-created -// custom role. -// -// For some types of Google Cloud resources, a `binding` can also -// specify a -// `condition`, which is a logical expression that allows access to a -// resource -// only if the expression evaluates to `true`. A condition can add -// constraints -// based on attributes of the request, the resource, or both. To learn -// which -// resources support conditions in their IAM policies, see the -// [IAM +// list of permissions; each `role` can be an IAM predefined role or a +// user-created custom role. For some types of Google Cloud resources, a +// `binding` can also specify a `condition`, which is a logical +// expression that allows access to a resource only if the expression +// evaluates to `true`. A condition can add constraints based on +// attributes of the request, the resource, or both. To learn which +// resources support conditions in their IAM policies, see the [IAM // documentation](https://cloud.google.com/iam/help/conditions/resource-p -// olicies). -// -// **JSON example:** -// -// { -// "bindings": [ -// { -// "role": "roles/resourcemanager.organizationAdmin", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" -// ] -// }, -// { -// "role": "roles/resourcemanager.organizationViewer", -// "members": [ -// "user:eve@example.com" -// ], -// "condition": { -// "title": "expirable access", -// "description": "Does not grant access after Sep 2020", -// "expression": "request.time < -// timestamp('2020-10-01T00:00:00.000Z')", -// } -// } -// ], -// "etag": "BwWWja0YfJA=", -// "version": 3 -// } -// -// **YAML example:** -// -// bindings: -// - members: -// - user:mike@example.com -// - group:admins@example.com -// - domain:google.com -// - serviceAccount:my-project-id@appspot.gserviceaccount.com -// role: roles/resourcemanager.organizationAdmin -// - members: -// - user:eve@example.com -// role: roles/resourcemanager.organizationViewer -// condition: -// title: expirable access -// description: Does not grant access after Sep 2020 -// expression: request.time < -// timestamp('2020-10-01T00:00:00.000Z') -// - etag: BwWWja0YfJA= -// - version: 3 -// -// For a description of IAM and its features, see the -// [IAM documentation](https://cloud.google.com/iam/docs/). +// olicies). **JSON example:** { "bindings": [ { "role": +// "roles/resourcemanager.organizationAdmin", "members": [ +// "user:mike@example.com", "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { +// "role": "roles/resourcemanager.organizationViewer", "members": [ +// "user:eve@example.com" ], "condition": { "title": "expirable access", +// "description": "Does not grant access after Sep 2020", "expression": +// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], +// "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - +// members: - user:mike@example.com - group:admins@example.com - +// domain:google.com - +// serviceAccount:my-project-id@appspot.gserviceaccount.com role: +// roles/resourcemanager.organizationAdmin - members: - +// user:eve@example.com role: roles/resourcemanager.organizationViewer +// condition: title: expirable access description: Does not grant access +// after Sep 2020 expression: request.time < +// timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: +// 3 For a description of IAM and its features, see the [IAM +// documentation](https://cloud.google.com/iam/docs/). type Policy struct { // AuditConfigs: Specifies cloud audit logging configuration for this // policy. AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` // Bindings: Associates a list of `members` to a `role`. Optionally, may - // specify a - // `condition` that determines how and when the `bindings` are applied. - // Each - // of the `bindings` must contain at least one member. + // specify a `condition` that determines how and when the `bindings` are + // applied. Each of the `bindings` must contain at least one member. Bindings []*Binding `json:"bindings,omitempty"` // Etag: `etag` is used for optimistic concurrency control as a way to - // help - // prevent simultaneous updates of a policy from overwriting each - // other. - // It is strongly suggested that systems make use of the `etag` in - // the - // read-modify-write cycle to perform policy updates in order to avoid - // race - // conditions: An `etag` is returned in the response to `getIamPolicy`, - // and - // systems are expected to put that etag in the request to - // `setIamPolicy` to - // ensure that their change will be applied to the same version of the - // policy. - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of + // help prevent simultaneous updates of a policy from overwriting each + // other. It is strongly suggested that systems make use of the `etag` + // in the read-modify-write cycle to perform policy updates in order to + // avoid race conditions: An `etag` is returned in the response to + // `getIamPolicy`, and systems are expected to put that etag in the + // request to `setIamPolicy` to ensure that their change will be applied + // to the same version of the policy. **Important:** If you use IAM + // Conditions, you must include the `etag` field whenever you call + // `setIamPolicy`. If you omit this field, then IAM allows you to + // overwrite a version `3` policy with a version `1` policy, and all of // the conditions in the version `3` policy are lost. Etag string `json:"etag,omitempty"` - // Version: Specifies the format of the policy. - // - // Valid values are `0`, `1`, and `3`. Requests that specify an invalid - // value - // are rejected. - // + // Version: Specifies the format of the policy. Valid values are `0`, + // `1`, and `3`. Requests that specify an invalid value are rejected. // Any operation that affects conditional role bindings must specify - // version - // `3`. This requirement applies to the following operations: - // - // * Getting a policy that includes a conditional role binding - // * Adding a conditional role binding to a policy - // * Changing a conditional role binding in a policy - // * Removing any role binding, with or without a condition, from a - // policy - // that includes conditions - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of - // the conditions in the version `3` policy are lost. - // - // If a policy does not include any conditions, operations on that - // policy may - // specify any valid version or leave the field unset. - // - // To learn which resources support conditions in their IAM policies, - // see the - // [IAM + // version `3`. This requirement applies to the following operations: * + // Getting a policy that includes a conditional role binding * Adding a + // conditional role binding to a policy * Changing a conditional role + // binding in a policy * Removing any role binding, with or without a + // condition, from a policy that includes conditions **Important:** If + // you use IAM Conditions, you must include the `etag` field whenever + // you call `setIamPolicy`. If you omit this field, then IAM allows you + // to overwrite a version `3` policy with a version `1` policy, and all + // of the conditions in the version `3` policy are lost. If a policy + // does not include any conditions, operations on that policy may + // specify any valid version or leave the field unset. To learn which + // resources support conditions in their IAM policies, see the [IAM // documentation](https://cloud.google.com/iam/help/conditions/resource-p // olicies). Version int64 `json:"version,omitempty"` @@ -5194,6 +5102,14 @@ type QueryRequest struct { // Kind: The resource type of the request. Kind string `json:"kind,omitempty"` + // Labels: The labels associated with this job. You can use these to + // organize and group your jobs. Label keys and values can be no longer + // than 63 characters, can only contain lowercase letters, numeric + // characters, underscores and dashes. International characters are + // allowed. Label values are optional. Label keys must start with a + // letter and each label in the list must have a different key. + Labels map[string]string `json:"labels,omitempty"` + // Location: The geographic location where the job should run. See // details at // https://cloud.google.com/bigquery/docs/locations#specifying_your_location. @@ -5207,6 +5123,12 @@ type QueryRequest struct { // only the byte limit applies. MaxResults int64 `json:"maxResults,omitempty"` + // MaximumBytesBilled: [Optional] Limits the bytes billed for this job. + // Queries that will have bytes billed beyond this limit will fail + // (without incurring a charge). If unspecified, this will be set to + // your project default. + MaximumBytesBilled int64 `json:"maximumBytesBilled,omitempty,string"` + // ParameterMode: Standard SQL only. Set to POSITIONAL to use positional // (?) query parameters or to NAMED to use named (@myparam) query // parameters in this query. @@ -5223,6 +5145,29 @@ type QueryRequest struct { // QueryParameters: Query parameters for Standard SQL queries. QueryParameters []*QueryParameter `json:"queryParameters,omitempty"` + // RequestId: A unique user provided identifier to ensure idempotent + // behavior for queries. Note that this is different from the job_id. It + // has the following properties: 1. It is case-sensitive, limited to up + // to 36 ASCII characters. A UUID is recommended. 2. Read only queries + // can ignore this token since they are nullipotent by definition. 3. + // For the purposes of idempotency ensured by the request_id, a request + // is considered duplicate of another only if they have the same + // request_id and are actually duplicates. When determining whether a + // request is a duplicate of the previous request, all parameters in the + // request that may affect the behavior are considered. For example, + // query, connection_properties, query_parameters, use_legacy_sql are + // parameters that affect the result and are considered when determining + // whether a request is a duplicate, but properties like timeout_ms + // don't affect the result and are thus not considered. Dry run query + // requests are never considered duplicate of another request. 4. When a + // duplicate mutating query request is detected, it returns: a. the + // results of the mutation if it completes successfully within the + // timeout. b. the running operation if it is still in progress at the + // end of the timeout. 5. Its lifetime is limited to 15 minutes. In + // other words, if two requests are sent with the same request_id, but + // more than 15 minutes apart, idempotency is not guaranteed. + RequestId string `json:"requestId,omitempty"` + // TimeoutMs: [Optional] How long to wait for the query to complete, in // milliseconds, before the request times out and returns. Note that // this is only a timeout for the request, not the query. If the query @@ -5467,33 +5412,28 @@ func (s *RangePartitioningRange) MarshalJSON() ([]byte, error) { } // RankingMetrics: Evaluation metrics used by weighted-ALS models -// specified by -// feedback_type=implicit. +// specified by feedback_type=implicit. type RankingMetrics struct { // AverageRank: Determines the goodness of a ranking by computing the - // percentile rank - // from the predicted confidence and dividing it by the original rank. + // percentile rank from the predicted confidence and dividing it by the + // original rank. AverageRank float64 `json:"averageRank,omitempty"` // MeanAveragePrecision: Calculates a precision per user for all the - // items by ranking them and - // then averages all the precisions across all the users. + // items by ranking them and then averages all the precisions across all + // the users. MeanAveragePrecision float64 `json:"meanAveragePrecision,omitempty"` // MeanSquaredError: Similar to the mean squared error computed in - // regression and explicit - // recommendation models except instead of computing the rating - // directly, - // the output from evaluate is computed against a preference which is 1 - // or 0 - // depending on if the rating exists or not. + // regression and explicit recommendation models except instead of + // computing the rating directly, the output from evaluate is computed + // against a preference which is 1 or 0 depending on if the rating + // exists or not. MeanSquaredError float64 `json:"meanSquaredError,omitempty"` // NormalizedDiscountedCumulativeGain: A metric to determine the - // goodness of a ranking calculated from the - // predicted confidence by comparing it to an ideal rank measured by - // the - // original ratings. + // goodness of a ranking calculated from the predicted confidence by + // comparing it to an ideal rank measured by the original ratings. NormalizedDiscountedCumulativeGain float64 `json:"normalizedDiscountedCumulativeGain,omitempty"` // ForceSendFields is a list of field names (e.g. "AverageRank") to @@ -5540,8 +5480,7 @@ func (s *RankingMetrics) UnmarshalJSON(data []byte) error { } // RegressionMetrics: Evaluation metrics for regression and explicit -// feedback type matrix -// factorization models. +// feedback type matrix factorization models. type RegressionMetrics struct { // MeanAbsoluteError: Mean absolute error. MeanAbsoluteError float64 `json:"meanAbsoluteError,omitempty"` @@ -5610,52 +5549,43 @@ type Routine struct { Arguments []*Argument `json:"arguments,omitempty"` // CreationTime: Output only. The time when this routine was created, in - // milliseconds since - // the epoch. + // milliseconds since the epoch. CreationTime int64 `json:"creationTime,omitempty,string"` - // DefinitionBody: Required. The body of the routine. - // - // For functions, this is the expression in the AS clause. - // - // If language=SQL, it is the substring inside (but excluding) - // the - // parentheses. For example, for the function created with the - // following - // statement: - // - // `CREATE FUNCTION JoinLines(x string, y string) as (concat(x, "\n", - // y))` - // - // The definition_body is `concat(x, "\n", y)` (\n is not replaced - // with - // linebreak). - // - // If language=JAVASCRIPT, it is the evaluated string in the AS - // clause. - // For example, for the function created with the following - // statement: - // - // `CREATE FUNCTION f() RETURNS STRING LANGUAGE js AS 'return - // "\n";\n'` - // - // The definition_body is - // - // `return "\n";\n` - // - // Note that both \n are replaced with linebreaks. + // DefinitionBody: Required. The body of the routine. For functions, + // this is the expression in the AS clause. If language=SQL, it is the + // substring inside (but excluding) the parentheses. For example, for + // the function created with the following statement: `CREATE FUNCTION + // JoinLines(x string, y string) as (concat(x, "\n", y))` The + // definition_body is `concat(x, "\n", y)` (\n is not replaced with + // linebreak). If language=JAVASCRIPT, it is the evaluated string in the + // AS clause. For example, for the function created with the following + // statement: `CREATE FUNCTION f() RETURNS STRING LANGUAGE js AS 'return + // "\n";\n'` The definition_body is `return "\n";\n` Note that both \n + // are replaced with linebreaks. DefinitionBody string `json:"definitionBody,omitempty"` // Description: Optional. [Experimental] The description of the routine // if defined. Description string `json:"description,omitempty"` + // DeterminismLevel: Optional. [Experimental] The determinism level of + // the JavaScript UDF if defined. + // + // Possible values: + // "DETERMINISM_LEVEL_UNSPECIFIED" - The determinism of the UDF is + // unspecified. + // "DETERMINISTIC" - The UDF is deterministic, meaning that 2 function + // calls with the same inputs always produce the same result, even + // across 2 query runs. + // "NOT_DETERMINISTIC" - The UDF is not deterministic. + DeterminismLevel string `json:"determinismLevel,omitempty"` + // Etag: Output only. A hash of this resource. Etag string `json:"etag,omitempty"` // ImportedLibraries: Optional. If language = "JAVASCRIPT", this field - // stores the path of the - // imported JAVASCRIPT libraries. + // stores the path of the imported JAVASCRIPT libraries. ImportedLibraries []string `json:"importedLibraries,omitempty"` // Language: Optional. Defaults to "SQL". @@ -5667,41 +5597,22 @@ type Routine struct { Language string `json:"language,omitempty"` // LastModifiedTime: Output only. The time when this routine was last - // modified, in milliseconds - // since the epoch. + // modified, in milliseconds since the epoch. LastModifiedTime int64 `json:"lastModifiedTime,omitempty,string"` - // ReturnType: Optional if language = "SQL"; required otherwise. - // - // If absent, the return type is inferred from definition_body at query - // time - // in each query that references this routine. If present, then the - // evaluated - // result will be cast to the specified returned type at query - // time. - // - // For example, for the functions created with the following - // statements: - // - // * `CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + - // y);` - // - // * `CREATE FUNCTION Increment(x FLOAT64) AS (Add(x, 1));` - // - // * `CREATE FUNCTION Decrement(x FLOAT64) RETURNS FLOAT64 AS (Add(x, - // -1));` - // - // The return_type is `{type_kind: "FLOAT64"}` for `Add` and - // `Decrement`, and - // is absent for `Increment` (inferred as FLOAT64 at query - // time). - // - // Suppose the function `Add` is replaced by - // `CREATE OR REPLACE FUNCTION Add(x INT64, y INT64) AS (x + - // y);` - // - // Then the inferred return type of `Increment` is automatically changed - // to + // ReturnType: Optional if language = "SQL"; required otherwise. If + // absent, the return type is inferred from definition_body at query + // time in each query that references this routine. If present, then the + // evaluated result will be cast to the specified returned type at query + // time. For example, for the functions created with the following + // statements: * `CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS + // FLOAT64 AS (x + y);` * `CREATE FUNCTION Increment(x FLOAT64) AS + // (Add(x, 1));` * `CREATE FUNCTION Decrement(x FLOAT64) RETURNS FLOAT64 + // AS (Add(x, -1));` The return_type is `{type_kind: "FLOAT64"}` for + // `Add` and `Decrement`, and is absent for `Increment` (inferred as + // FLOAT64 at query time). Suppose the function `Add` is replaced by + // `CREATE OR REPLACE FUNCTION Add(x INT64, y INT64) AS (x + y);` Then + // the inferred return type of `Increment` is automatically changed to // INT64 at query time, while the return type of `Decrement` remains // FLOAT64. ReturnType *StandardSqlDataType `json:"returnType,omitempty"` @@ -5963,20 +5874,15 @@ func (s *ScriptStatistics) MarshalJSON() ([]byte, error) { // SetIamPolicyRequest: Request message for `SetIamPolicy` method. type SetIamPolicyRequest struct { // Policy: REQUIRED: The complete policy to be applied to the - // `resource`. The size of - // the policy is limited to a few 10s of KB. An empty policy is a - // valid policy but certain Cloud Platform services (such as - // Projects) - // might reject them. + // `resource`. The size of the policy is limited to a few 10s of KB. An + // empty policy is a valid policy but certain Cloud Platform services + // (such as Projects) might reject them. Policy *Policy `json:"policy,omitempty"` // UpdateMask: OPTIONAL: A FieldMask specifying which fields of the - // policy to modify. Only - // the fields in the mask will be modified. If no mask is provided, - // the - // following default mask is used: - // - // `paths: "bindings, etag" + // policy to modify. Only the fields in the mask will be modified. If no + // mask is provided, the following default mask is used: `paths: + // "bindings, etag" UpdateMask string `json:"updateMask,omitempty"` // ForceSendFields is a list of field names (e.g. "Policy") to @@ -6002,18 +5908,45 @@ func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type SnapshotDefinition struct { + // BaseTableReference: [Required] Reference describing the ID of the + // table that is snapshotted. + BaseTableReference *TableReference `json:"baseTableReference,omitempty"` + + // SnapshotTime: [Required] The time at which the base table was + // snapshot. + SnapshotTime string `json:"snapshotTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BaseTableReference") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BaseTableReference") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SnapshotDefinition) MarshalJSON() ([]byte, error) { + type NoMethod SnapshotDefinition + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // StandardSqlDataType: The type of a variable, e.g., a function -// argument. -// Examples: -// INT64: {type_kind="INT64"} -// ARRAY: {type_kind="ARRAY", -// array_element_type="STRING"} -// STRUCT>: -// {type_kind="STRUCT", -// struct_type={fields=[ -// {name="x", type={type_kind="STRING"}}, -// {name="y", type={type_kind="ARRAY", array_element_type="DATE"}} -// ]}} +// argument. Examples: INT64: {type_kind="INT64"} ARRAY: +// {type_kind="ARRAY", array_element_type="STRING"} STRUCT>: +// {type_kind="STRUCT", struct_type={fields=[ {name="x", +// type={type_kind="STRING"}}, {name="y", type={type_kind="ARRAY", +// array_element_type="DATE"}} ]}} type StandardSqlDataType struct { // ArrayElementType: The type of the array's elements, if type_kind = // "ARRAY". @@ -6023,8 +5956,8 @@ type StandardSqlDataType struct { // "STRUCT". StructType *StandardSqlStructType `json:"structType,omitempty"` - // TypeKind: Required. The top level type of this field. - // Can be any standard SQL data type (e.g., "INT64", "DATE", "ARRAY"). + // TypeKind: Required. The top level type of this field. Can be any + // standard SQL data type (e.g., "INT64", "DATE", "ARRAY"). // // Possible values: // "TYPE_KIND_UNSPECIFIED" - Invalid type. @@ -6035,8 +5968,7 @@ type StandardSqlDataType struct { // "STRING" - Encoded as a string value. // "BYTES" - Encoded as a base64 string per RFC 4648, section 4. // "TIMESTAMP" - Encoded as an RFC 3339 timestamp with mandatory "Z" - // time zone string: - // 1985-04-12T23:20:50.52Z + // time zone string: 1985-04-12T23:20:50.52Z // "DATE" - Encoded as RFC 3339 full-date format string: 1985-04-12 // "TIME" - Encoded as RFC 3339 partial-time format string: // 23:20:50.52 @@ -6044,10 +5976,11 @@ type StandardSqlDataType struct { // 1985-04-12T23:20:50.52 // "GEOGRAPHY" - Encoded as WKT // "NUMERIC" - Encoded as a decimal string. + // "BIGNUMERIC" - Encoded as a decimal string. // "ARRAY" - Encoded as a list with types matching Type.array_type. // "STRUCT" - Encoded as a list with fields of type - // Type.struct_type[i]. List is used - // because a JSON object cannot have duplicate field names. + // Type.struct_type[i]. List is used because a JSON object cannot have + // duplicate field names. TypeKind string `json:"typeKind,omitempty"` // ForceSendFields is a list of field names (e.g. "ArrayElementType") to @@ -6080,10 +6013,8 @@ type StandardSqlField struct { // fields. Name string `json:"name,omitempty"` - // Type: Optional. The type of this parameter. Absent if not - // explicitly - // specified (e.g., CREATE FUNCTION statement can omit the return - // type; + // Type: Optional. The type of this parameter. Absent if not explicitly + // specified (e.g., CREATE FUNCTION statement can omit the return type; // in this case the output parameter does not have this "type" field). Type *StandardSqlDataType `json:"type,omitempty"` @@ -6279,6 +6210,9 @@ type Table struct { // resource again. SelfLink string `json:"selfLink,omitempty"` + // SnapshotDefinition: [Output-only] Snapshot definition. + SnapshotDefinition *SnapshotDefinition `json:"snapshotDefinition,omitempty"` + // StreamingBuffer: [Output-only] Contains information regarding this // table's streaming buffer, if one is present. This field will be // absent if the table is not being streamed to or if there is no data @@ -6295,10 +6229,11 @@ type Table struct { // Type: [Output-only] Describes the table type. The following values // are supported: TABLE: A normal BigQuery table. VIEW: A virtual table - // defined by a SQL query. [TrustedTester] MATERIALIZED_VIEW: SQL query - // whose result is persisted. EXTERNAL: A table that references data - // stored in an external storage system, such as Google Cloud Storage. - // The default value is TABLE. + // defined by a SQL query. [TrustedTester] SNAPSHOT: An immutable, + // read-only table that is a copy of another table. [TrustedTester] + // MATERIALIZED_VIEW: SQL query whose result is persisted. EXTERNAL: A + // table that references data stored in an external storage system, such + // as Google Cloud Storage. The default value is TABLE. Type string `json:"type,omitempty"` // View: [Optional] The view definition. @@ -6898,11 +6833,8 @@ func (s *TableSchema) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsRequest struct { // Permissions: The set of permissions to check for the `resource`. - // Permissions with - // wildcards (such as '*' or 'storage.*') are not allowed. For - // more - // information see - // [IAM + // Permissions with wildcards (such as '*' or 'storage.*') are not + // allowed. For more information see [IAM // Overview](https://cloud.google.com/iam/docs/overview#permissions). Permissions []string `json:"permissions,omitempty"` @@ -6933,8 +6865,7 @@ func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsResponse struct { // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is - // allowed. + // the caller is allowed. Permissions []string `json:"permissions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -6980,8 +6911,10 @@ type TimePartitioning struct { RequirePartitionFilter bool `json:"requirePartitionFilter,omitempty"` - // Type: [Required] The only type supported is DAY, which will generate - // one partition per day. + // Type: [Required] The supported types are DAY, HOUR, MONTH, and YEAR, + // which will generate one partition per day, hour, month, and year, + // respectively. When the type is not specified, the default behavior is + // DAY. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "ExpirationMs") to @@ -7008,35 +6941,43 @@ func (s *TimePartitioning) MarshalJSON() ([]byte, error) { } type TrainingOptions struct { + // AutoArima: Whether to enable auto ARIMA or not. + AutoArima bool `json:"autoArima,omitempty"` + + // AutoArimaMaxOrder: The max value of non-seasonal p and q. + AutoArimaMaxOrder int64 `json:"autoArimaMaxOrder,omitempty,string"` + // BatchSize: Batch size for dnn models. BatchSize int64 `json:"batchSize,omitempty,string"` + // DataFrequency: The data frequency of a time series. + // + // Possible values: + // "DATA_FREQUENCY_UNSPECIFIED" + // "AUTO_FREQUENCY" - Automatically inferred from timestamps. + // "YEARLY" - Yearly data. + // "QUARTERLY" - Quarterly data. + // "MONTHLY" - Monthly data. + // "WEEKLY" - Weekly data. + // "DAILY" - Daily data. + // "HOURLY" - Hourly data. + DataFrequency string `json:"dataFrequency,omitempty"` + // DataSplitColumn: The column to split data with. This column won't be - // used as a - // feature. - // 1. When data_split_method is CUSTOM, the corresponding column - // should - // be boolean. The rows with true value tag are eval data, and the - // false - // are training data. - // 2. When data_split_method is SEQ, the first - // DATA_SPLIT_EVAL_FRACTION - // rows (from smallest to largest) in the corresponding column are - // used - // as training data, and the rest are eval data. It respects the - // order - // in Orderable data - // types: - // https://cloud.google.com/bigquery/docs/reference/standard-sql/d - // ata-types#data-type-properties + // used as a feature. 1. When data_split_method is CUSTOM, the + // corresponding column should be boolean. The rows with true value tag + // are eval data, and the false are training data. 2. When + // data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows + // (from smallest to largest) in the corresponding column are used as + // training data, and the rest are eval data. It respects the order in + // Orderable data types: + // https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties DataSplitColumn string `json:"dataSplitColumn,omitempty"` // DataSplitEvalFraction: The fraction of evaluation data over the whole - // input data. The rest - // of data will be used as training data. The format should be - // double. - // Accurate to two decimal places. - // Default value is 0.2. + // input data. The rest of data will be used as training data. The + // format should be double. Accurate to two decimal places. Default + // value is 0.2. DataSplitEvalFraction float64 `json:"dataSplitEvalFraction,omitempty"` // DataSplitMethod: The data split type for training and evaluation, @@ -7049,8 +6990,7 @@ type TrainingOptions struct { // "SEQUENTIAL" - Splits data sequentially. // "NO_SPLIT" - Data split will be skipped. // "AUTO_SPLIT" - Splits data automatically: Uses NO_SPLIT if the data - // size is small. - // Otherwise uses RANDOM. + // size is small. Otherwise uses RANDOM. DataSplitMethod string `json:"dataSplitMethod,omitempty"` // DistanceType: Distance type for clustering models. @@ -7065,15 +7005,12 @@ type TrainingOptions struct { Dropout float64 `json:"dropout,omitempty"` // EarlyStop: Whether to stop early when the loss doesn't improve - // significantly - // any more (compared to min_relative_progress). Used only for - // iterative - // training algorithms. + // significantly any more (compared to min_relative_progress). Used only + // for iterative training algorithms. EarlyStop bool `json:"earlyStop,omitempty"` // FeedbackType: Feedback type that specifies which algorithm to run for - // matrix - // factorization. + // matrix factorization. // // Possible values: // "FEEDBACK_TYPE_UNSPECIFIED" @@ -7084,9 +7021,91 @@ type TrainingOptions struct { // HiddenUnits: Hidden units for dnn models. HiddenUnits googleapi.Int64s `json:"hiddenUnits,omitempty"` + // HolidayRegion: The geographical region based on which the holidays + // are considered in time series modeling. If a valid value is + // specified, then holiday effects modeling is enabled. + // + // Possible values: + // "HOLIDAY_REGION_UNSPECIFIED" - Holiday region unspecified. + // "GLOBAL" - Global. + // "NA" - North America. + // "JAPAC" - Japan and Asia Pacific: Korea, Greater China, India, + // Australia, and New Zealand. + // "EMEA" - Europe, the Middle East and Africa. + // "LAC" - Latin America and the Caribbean. + // "AE" - United Arab Emirates + // "AR" - Argentina + // "AT" - Austria + // "AU" - Australia + // "BE" - Belgium + // "BR" - Brazil + // "CA" - Canada + // "CH" - Switzerland + // "CL" - Chile + // "CN" - China + // "CO" - Colombia + // "CS" - Czechoslovakia + // "CZ" - Czech Republic + // "DE" - Germany + // "DK" - Denmark + // "DZ" - Algeria + // "EC" - Ecuador + // "EE" - Estonia + // "EG" - Egypt + // "ES" - Spain + // "FI" - Finland + // "FR" - France + // "GB" - Great Britain (United Kingdom) + // "GR" - Greece + // "HK" - Hong Kong + // "HU" - Hungary + // "ID" - Indonesia + // "IE" - Ireland + // "IL" - Israel + // "IN" - India + // "IR" - Iran + // "IT" - Italy + // "JP" - Japan + // "KR" - Korea (South) + // "LV" - Latvia + // "MA" - Morocco + // "MX" - Mexico + // "MY" - Malaysia + // "NG" - Nigeria + // "NL" - Netherlands + // "NO" - Norway + // "NZ" - New Zealand + // "PE" - Peru + // "PH" - Philippines + // "PK" - Pakistan + // "PL" - Poland + // "PT" - Portugal + // "RO" - Romania + // "RS" - Serbia + // "RU" - Russian Federation + // "SA" - Saudi Arabia + // "SE" - Sweden + // "SG" - Singapore + // "SI" - Slovenia + // "SK" - Slovakia + // "TH" - Thailand + // "TR" - Turkey + // "TW" - Taiwan + // "UA" - Ukraine + // "US" - United States + // "VE" - Venezuela + // "VN" - Viet Nam + // "ZA" - South Africa + HolidayRegion string `json:"holidayRegion,omitempty"` + + // Horizon: The number of periods ahead that need to be forecasted. + Horizon int64 `json:"horizon,omitempty,string"` + + // IncludeDrift: Include drift when fitting an ARIMA model. + IncludeDrift bool `json:"includeDrift,omitempty"` + // InitialLearnRate: Specifies the initial learning rate for the line - // search learn rate - // strategy. + // search learn rate strategy. InitialLearnRate float64 `json:"initialLearnRate,omitempty"` // InputLabelColumns: Name of input label columns in training data. @@ -7096,8 +7115,8 @@ type TrainingOptions struct { ItemColumn string `json:"itemColumn,omitempty"` // KmeansInitializationColumn: The column used to provide the initial - // centroids for kmeans algorithm - // when kmeans_initialization_method is CUSTOM. + // centroids for kmeans algorithm when kmeans_initialization_method is + // CUSTOM. KmeansInitializationColumn string `json:"kmeansInitializationColumn,omitempty"` // KmeansInitializationMethod: The method used to initialize the @@ -7106,8 +7125,7 @@ type TrainingOptions struct { // Possible values: // "KMEANS_INITIALIZATION_METHOD_UNSPECIFIED" // "RANDOM" - Initializes the centroids randomly. - // "CUSTOM" - Initializes the centroids using data specified - // in + // "CUSTOM" - Initializes the centroids using data specified in // kmeans_initialization_column. // "KMEANS_PLUS_PLUS" - Initializes with kmeans++. KmeansInitializationMethod string `json:"kmeansInitializationMethod,omitempty"` @@ -7119,8 +7137,8 @@ type TrainingOptions struct { L2Regularization float64 `json:"l2Regularization,omitempty"` // LabelClassWeights: Weights associated with each label class, for - // rebalancing the - // training data. Only applicable for classification models. + // rebalancing the training data. Only applicable for classification + // models. LabelClassWeights map[string]float64 `json:"labelClassWeights,omitempty"` // LearnRate: Learning rate in training. Used only for iterative @@ -7146,28 +7164,29 @@ type TrainingOptions struct { LossType string `json:"lossType,omitempty"` // MaxIterations: The maximum number of iterations in training. Used - // only for iterative - // training algorithms. + // only for iterative training algorithms. MaxIterations int64 `json:"maxIterations,omitempty,string"` // MaxTreeDepth: Maximum depth of a tree for boosted tree models. MaxTreeDepth int64 `json:"maxTreeDepth,omitempty,string"` // MinRelativeProgress: When early_stop is true, stops training when - // accuracy improvement is - // less than 'min_relative_progress'. Used only for iterative - // training - // algorithms. + // accuracy improvement is less than 'min_relative_progress'. Used only + // for iterative training algorithms. MinRelativeProgress float64 `json:"minRelativeProgress,omitempty"` // MinSplitLoss: Minimum split loss for boosted tree models. MinSplitLoss float64 `json:"minSplitLoss,omitempty"` // ModelUri: [Beta] Google Cloud Storage URI from which the model was - // imported. Only - // applicable for imported models. + // imported. Only applicable for imported models. ModelUri string `json:"modelUri,omitempty"` + // NonSeasonalOrder: A specification of the non-seasonal part of the + // ARIMA model: the three components (p, d, q) are the AR order, the + // degree of differencing, and the MA order. + NonSeasonalOrder *ArimaOrder `json:"nonSeasonalOrder,omitempty"` + // NumClusters: Number of clusters for clustering models. NumClusters int64 `json:"numClusters,omitempty,string"` @@ -7185,23 +7204,39 @@ type TrainingOptions struct { // regression problem. OptimizationStrategy string `json:"optimizationStrategy,omitempty"` + // PreserveInputStructs: Whether to preserve the input structs in output + // feature names. Suppose there is a struct A with field b. When false + // (default), the output feature name is A_b. When true, the output + // feature name is A.b. + PreserveInputStructs bool `json:"preserveInputStructs,omitempty"` + // Subsample: Subsample fraction of the training data to grow tree to - // prevent - // overfitting for boosted tree models. + // prevent overfitting for boosted tree models. Subsample float64 `json:"subsample,omitempty"` + // TimeSeriesDataColumn: Column to be designated as time series data for + // ARIMA model. + TimeSeriesDataColumn string `json:"timeSeriesDataColumn,omitempty"` + + // TimeSeriesIdColumn: The id column that will be used to indicate + // different time series to forecast in parallel. + TimeSeriesIdColumn string `json:"timeSeriesIdColumn,omitempty"` + + // TimeSeriesTimestampColumn: Column to be designated as time series + // timestamp for ARIMA model. + TimeSeriesTimestampColumn string `json:"timeSeriesTimestampColumn,omitempty"` + // UserColumn: User column specified for matrix factorization models. UserColumn string `json:"userColumn,omitempty"` // WalsAlpha: Hyperparameter for matrix factoration when implicit - // feedback type is - // specified. + // feedback type is specified. WalsAlpha float64 `json:"walsAlpha,omitempty"` // WarmStart: Whether to train a model from the last checkpoint. WarmStart bool `json:"warmStart,omitempty"` - // ForceSendFields is a list of field names (e.g. "BatchSize") to + // ForceSendFields is a list of field names (e.g. "AutoArima") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -7209,7 +7244,7 @@ type TrainingOptions struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "BatchSize") to include in + // NullFields is a list of field names (e.g. "AutoArima") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -7260,13 +7295,11 @@ func (s *TrainingOptions) UnmarshalJSON(data []byte) error { // model. type TrainingRun struct { // DataSplitResult: Data split result of the training run. Only set when - // the input data is - // actually split. + // the input data is actually split. DataSplitResult *DataSplitResult `json:"dataSplitResult,omitempty"` // EvaluationMetrics: The evaluation metrics over training/eval data - // that were computed at the - // end of training. + // that were computed at the end of training. EvaluationMetrics *EvaluationMetrics `json:"evaluationMetrics,omitempty"` // Results: Output of each iteration run, results.size() <= @@ -7277,8 +7310,7 @@ type TrainingRun struct { StartTime string `json:"startTime,omitempty"` // TrainingOptions: Options that were used for this training run, - // includes - // user specified and default options that were used. + // includes user specified and default options that were used. TrainingOptions *TrainingOptions `json:"trainingOptions,omitempty"` // ForceSendFields is a list of field names (e.g. "DataSplitResult") to @@ -7305,6 +7337,39 @@ func (s *TrainingRun) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type TransactionInfo struct { + // TransactionId: [Output-only] // [Alpha] Id of the transaction. + TransactionId string `json:"transactionId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TransactionId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TransactionId") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TransactionInfo) MarshalJSON() ([]byte, error) { + type NoMethod TransactionInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UserDefinedFunctionResource: This is used for defining User Defined +// Function (UDF) resources only when using legacy SQL. Users of +// Standard SQL should leverage either DDL (e.g. CREATE [TEMPORARY] +// FUNCTION ... ) or the Routines API to define UDF resources. For +// additional information on migrating, see: +// https://cloud.google.com/bigquery/docs/reference/standard-sql/migrating-from-legacy-sql#differences_in_user-defined_javascript_functions type UserDefinedFunctionResource struct { // InlineCode: [Pick one] An inline resource that contains code for a // user-defined function (UDF). Providing a inline code resource is @@ -7434,7 +7499,7 @@ func (c *DatasetsDeleteCall) Header() http.Header { func (c *DatasetsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7561,7 +7626,7 @@ func (c *DatasetsGetCall) Header() http.Header { func (c *DatasetsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7705,7 +7770,7 @@ func (c *DatasetsInsertCall) Header() http.Header { func (c *DatasetsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7885,7 +7950,7 @@ func (c *DatasetsListCall) Header() http.Header { func (c *DatasetsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8068,7 +8133,7 @@ func (c *DatasetsPatchCall) Header() http.Header { func (c *DatasetsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8219,7 +8284,7 @@ func (c *DatasetsUpdateCall) Header() http.Header { func (c *DatasetsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8377,7 +8442,7 @@ func (c *JobsCancelCall) Header() http.Header { func (c *JobsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8542,7 +8607,7 @@ func (c *JobsGetCall) Header() http.Header { func (c *JobsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8741,7 +8806,7 @@ func (c *JobsGetQueryResultsCall) Header() http.Header { func (c *JobsGetQueryResultsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8975,7 +9040,7 @@ func (c *JobsInsertCall) Header() http.Header { func (c *JobsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9242,7 +9307,7 @@ func (c *JobsListCall) Header() http.Header { func (c *JobsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9462,7 +9527,7 @@ func (c *JobsQueryCall) Header() http.Header { func (c *JobsQueryCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9605,7 +9670,7 @@ func (c *ModelsDeleteCall) Header() http.Header { func (c *ModelsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9741,7 +9806,7 @@ func (c *ModelsGetCall) Header() http.Header { func (c *ModelsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9864,8 +9929,7 @@ type ModelsListCall struct { } // List: Lists all models in the specified dataset. Requires the READER -// dataset -// role. +// dataset role. func (r *ModelsService) List(projectId string, datasetId string) *ModelsListCall { c := &ModelsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -9874,16 +9938,15 @@ func (r *ModelsService) List(projectId string, datasetId string) *ModelsListCall } // MaxResults sets the optional parameter "maxResults": The maximum -// number of results to return in a single response page. -// Leverage the page tokens to iterate through the entire collection. +// number of results to return in a single response page. Leverage the +// page tokens to iterate through the entire collection. func (c *ModelsListCall) MaxResults(maxResults int64) *ModelsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } // PageToken sets the optional parameter "pageToken": Page token, -// returned by a previous call to request the next page of -// results +// returned by a previous call to request the next page of results func (c *ModelsListCall) PageToken(pageToken string) *ModelsListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -9926,7 +9989,7 @@ func (c *ModelsListCall) Header() http.Header { func (c *ModelsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9989,7 +10052,7 @@ func (c *ModelsListCall) Do(opts ...googleapi.CallOption) (*ListModelsResponse, } return ret, nil // { - // "description": "Lists all models in the specified dataset. Requires the READER dataset\nrole.", + // "description": "Lists all models in the specified dataset. Requires the READER dataset role.", // "flatPath": "projects/{projectsId}/datasets/{datasetsId}/models", // "httpMethod": "GET", // "id": "bigquery.models.list", @@ -10006,13 +10069,13 @@ func (c *ModelsListCall) Do(opts ...googleapi.CallOption) (*ListModelsResponse, // "type": "string" // }, // "maxResults": { - // "description": "The maximum number of results to return in a single response page.\nLeverage the page tokens to iterate through the entire collection.", + // "description": "The maximum number of results to return in a single response page. Leverage the page tokens to iterate through the entire collection.", // "format": "uint32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "Page token, returned by a previous call to request the next page of\nresults", + // "description": "Page token, returned by a previous call to request the next page of results", // "location": "query", // "type": "string" // }, @@ -10109,7 +10172,7 @@ func (c *ModelsPatchCall) Header() http.Header { func (c *ModelsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10278,7 +10341,7 @@ func (c *ProjectsGetServiceAccountCall) Header() http.Header { func (c *ProjectsGetServiceAccountCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10436,7 +10499,7 @@ func (c *ProjectsListCall) Header() http.Header { func (c *ProjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10594,7 +10657,7 @@ func (c *RoutinesDeleteCall) Header() http.Header { func (c *RoutinesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10694,8 +10757,8 @@ func (r *RoutinesService) Get(projectId string, datasetId string, routineId stri } // ReadMask sets the optional parameter "readMask": If set, only the -// Routine fields in the field mask are returned in the -// response. If unset, all Routine fields are returned. +// Routine fields in the field mask are returned in the response. If +// unset, all Routine fields are returned. func (c *RoutinesGetCall) ReadMask(readMask string) *RoutinesGetCall { c.urlParams_.Set("readMask", readMask) return c @@ -10738,7 +10801,7 @@ func (c *RoutinesGetCall) Header() http.Header { func (c *RoutinesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10827,7 +10890,7 @@ func (c *RoutinesGetCall) Do(opts ...googleapi.CallOption) (*Routine, error) { // "type": "string" // }, // "readMask": { - // "description": "If set, only the Routine fields in the field mask are returned in the\nresponse. If unset, all Routine fields are returned.", + // "description": "If set, only the Routine fields in the field mask are returned in the response. If unset, all Routine fields are returned.", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -10902,7 +10965,7 @@ func (c *RoutinesInsertCall) Header() http.Header { func (c *RoutinesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11019,8 +11082,7 @@ type RoutinesListCall struct { } // List: Lists all routines in the specified dataset. Requires the -// READER dataset -// role. +// READER dataset role. func (r *RoutinesService) List(projectId string, datasetId string) *RoutinesListCall { c := &RoutinesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -11029,40 +11091,34 @@ func (r *RoutinesService) List(projectId string, datasetId string) *RoutinesList } // Filter sets the optional parameter "filter": If set, then only the -// Routines matching this filter are returned. -// The current supported form is either "routine_type:" -// or -// "routineType:", where is a RoutineType -// enum. -// Example: "routineType:SCALAR_FUNCTION". +// Routines matching this filter are returned. The current supported +// form is either "routine_type:" or "routineType:", where is a +// RoutineType enum. Example: "routineType:SCALAR_FUNCTION". func (c *RoutinesListCall) Filter(filter string) *RoutinesListCall { c.urlParams_.Set("filter", filter) return c } // MaxResults sets the optional parameter "maxResults": The maximum -// number of results to return in a single response page. -// Leverage the page tokens to iterate through the entire collection. +// number of results to return in a single response page. Leverage the +// page tokens to iterate through the entire collection. func (c *RoutinesListCall) MaxResults(maxResults int64) *RoutinesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } // PageToken sets the optional parameter "pageToken": Page token, -// returned by a previous call, to request the next page of -// results +// returned by a previous call, to request the next page of results func (c *RoutinesListCall) PageToken(pageToken string) *RoutinesListCall { c.urlParams_.Set("pageToken", pageToken) return c } // ReadMask sets the optional parameter "readMask": If set, then only -// the Routine fields in the field mask, as well as -// project_id, dataset_id and routine_id, are returned in the -// response. -// If unset, then the following Routine fields are returned: -// etag, project_id, dataset_id, routine_id, routine_type, -// creation_time, +// the Routine fields in the field mask, as well as project_id, +// dataset_id and routine_id, are returned in the response. If unset, +// then the following Routine fields are returned: etag, project_id, +// dataset_id, routine_id, routine_type, creation_time, // last_modified_time, and language. func (c *RoutinesListCall) ReadMask(readMask string) *RoutinesListCall { c.urlParams_.Set("readMask", readMask) @@ -11106,7 +11162,7 @@ func (c *RoutinesListCall) Header() http.Header { func (c *RoutinesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11169,7 +11225,7 @@ func (c *RoutinesListCall) Do(opts ...googleapi.CallOption) (*ListRoutinesRespon } return ret, nil // { - // "description": "Lists all routines in the specified dataset. Requires the READER dataset\nrole.", + // "description": "Lists all routines in the specified dataset. Requires the READER dataset role.", // "flatPath": "projects/{projectsId}/datasets/{datasetsId}/routines", // "httpMethod": "GET", // "id": "bigquery.routines.list", @@ -11186,18 +11242,18 @@ func (c *RoutinesListCall) Do(opts ...googleapi.CallOption) (*ListRoutinesRespon // "type": "string" // }, // "filter": { - // "description": "If set, then only the Routines matching this filter are returned.\nThe current supported form is either \"routine_type:\u003cRoutineType\u003e\" or\n\"routineType:\u003cRoutineType\u003e\", where \u003cRoutineType\u003e is a RoutineType enum.\nExample: \"routineType:SCALAR_FUNCTION\".", + // "description": "If set, then only the Routines matching this filter are returned. The current supported form is either \"routine_type:\" or \"routineType:\", where is a RoutineType enum. Example: \"routineType:SCALAR_FUNCTION\".", // "location": "query", // "type": "string" // }, // "maxResults": { - // "description": "The maximum number of results to return in a single response page.\nLeverage the page tokens to iterate through the entire collection.", + // "description": "The maximum number of results to return in a single response page. Leverage the page tokens to iterate through the entire collection.", // "format": "uint32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "Page token, returned by a previous call, to request the next page of\nresults", + // "description": "Page token, returned by a previous call, to request the next page of results", // "location": "query", // "type": "string" // }, @@ -11209,7 +11265,7 @@ func (c *RoutinesListCall) Do(opts ...googleapi.CallOption) (*ListRoutinesRespon // "type": "string" // }, // "readMask": { - // "description": "If set, then only the Routine fields in the field mask, as well as\nproject_id, dataset_id and routine_id, are returned in the response.\nIf unset, then the following Routine fields are returned:\netag, project_id, dataset_id, routine_id, routine_type, creation_time,\nlast_modified_time, and language.", + // "description": "If set, then only the Routine fields in the field mask, as well as project_id, dataset_id and routine_id, are returned in the response. If unset, then the following Routine fields are returned: etag, project_id, dataset_id, routine_id, routine_type, creation_time, last_modified_time, and language.", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -11264,8 +11320,7 @@ type RoutinesUpdateCall struct { } // Update: Updates information in an existing routine. The update method -// replaces the -// entire Routine resource. +// replaces the entire Routine resource. func (r *RoutinesService) Update(projectId string, datasetId string, routineId string, routine *Routine) *RoutinesUpdateCall { c := &RoutinesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -11302,7 +11357,7 @@ func (c *RoutinesUpdateCall) Header() http.Header { func (c *RoutinesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11368,7 +11423,7 @@ func (c *RoutinesUpdateCall) Do(opts ...googleapi.CallOption) (*Routine, error) } return ret, nil // { - // "description": "Updates information in an existing routine. The update method replaces the\nentire Routine resource.", + // "description": "Updates information in an existing routine. The update method replaces the entire Routine resource.", // "flatPath": "projects/{projectsId}/datasets/{datasetsId}/routines/{routinesId}", // "httpMethod": "PUT", // "id": "bigquery.routines.update", @@ -11466,7 +11521,7 @@ func (c *TabledataInsertAllCall) Header() http.Header { func (c *TabledataInsertAllCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11665,7 +11720,7 @@ func (c *TabledataListCall) Header() http.Header { func (c *TabledataListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11863,7 +11918,7 @@ func (c *TablesDeleteCall) Header() http.Header { func (c *TablesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12005,7 +12060,7 @@ func (c *TablesGetCall) Header() http.Header { func (c *TablesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12127,9 +12182,8 @@ type TablesGetIamPolicyCall struct { header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. -// Returns an empty policy if the resource exists and does not have a -// policy +// GetIamPolicy: Gets the access control policy for a resource. Returns +// an empty policy if the resource exists and does not have a policy // set. func (r *TablesService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *TablesGetIamPolicyCall { c := &TablesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -12165,7 +12219,7 @@ func (c *TablesGetIamPolicyCall) Header() http.Header { func (c *TablesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12229,7 +12283,7 @@ func (c *TablesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro } return ret, nil // { - // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + // "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", // "flatPath": "projects/{projectsId}/datasets/{datasetsId}/tables/{tablesId}:getIamPolicy", // "httpMethod": "POST", // "id": "bigquery.tables.getIamPolicy", @@ -12238,7 +12292,7 @@ func (c *TablesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/datasets/[^/]+/tables/[^/]+$", // "required": true, @@ -12310,7 +12364,7 @@ func (c *TablesInsertCall) Header() http.Header { func (c *TablesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12483,7 +12537,7 @@ func (c *TablesListCall) Header() http.Header { func (c *TablesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12666,7 +12720,7 @@ func (c *TablesPatchCall) Header() http.Header { func (c *TablesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12787,11 +12841,8 @@ type TablesSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any -// existing policy. -// -// Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` -// errors. +// resource. Replaces any existing policy. Can return `NOT_FOUND`, +// `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. func (r *TablesService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *TablesSetIamPolicyCall { c := &TablesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -12826,7 +12877,7 @@ func (c *TablesSetIamPolicyCall) Header() http.Header { func (c *TablesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12890,7 +12941,7 @@ func (c *TablesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", // "flatPath": "projects/{projectsId}/datasets/{datasetsId}/tables/{tablesId}:setIamPolicy", // "httpMethod": "POST", // "id": "bigquery.tables.setIamPolicy", @@ -12899,7 +12950,7 @@ func (c *TablesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/datasets/[^/]+/tables/[^/]+$", // "required": true, @@ -12933,16 +12984,11 @@ type TablesTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a `NOT_FOUND` error. -// -// Note: This operation is designed to be used for building -// permission-aware -// UIs and command-line tools, not for authorization checking. This -// operation -// may "fail open" without warning. +// specified resource. If the resource does not exist, this will return +// an empty set of permissions, not a `NOT_FOUND` error. Note: This +// operation is designed to be used for building permission-aware UIs +// and command-line tools, not for authorization checking. This +// operation may "fail open" without warning. func (r *TablesService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *TablesTestIamPermissionsCall { c := &TablesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -12977,7 +13023,7 @@ func (c *TablesTestIamPermissionsCall) Header() http.Header { func (c *TablesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13041,7 +13087,7 @@ func (c *TablesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIa } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + // "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", // "flatPath": "projects/{projectsId}/datasets/{datasetsId}/tables/{tablesId}:testIamPermissions", // "httpMethod": "POST", // "id": "bigquery.tables.testIamPermissions", @@ -13050,7 +13096,7 @@ func (c *TablesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIa // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/datasets/[^/]+/tables/[^/]+$", // "required": true, @@ -13126,7 +13172,7 @@ func (c *TablesUpdateCall) Header() http.Header { func (c *TablesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/api/bigtableadmin/v2/bigtableadmin-api.json b/vendor/google.golang.org/api/bigtableadmin/v2/bigtableadmin-api.json index 77b54e070f9..afe2a852567 100644 --- a/vendor/google.golang.org/api/bigtableadmin/v2/bigtableadmin-api.json +++ b/vendor/google.golang.org/api/bigtableadmin/v2/bigtableadmin-api.json @@ -132,7 +132,7 @@ "operations": { "methods": { "cancel": { - "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", "flatPath": "v2/operations/{operationsId}:cancel", "httpMethod": "POST", "id": "bigtableadmin.operations.cancel", @@ -162,7 +162,7 @@ ] }, "delete": { - "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.", + "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", "flatPath": "v2/operations/{operationsId}", "httpMethod": "DELETE", "id": "bigtableadmin.operations.delete", @@ -192,7 +192,7 @@ ] }, "get": { - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", "flatPath": "v2/operations/{operationsId}", "httpMethod": "GET", "id": "bigtableadmin.operations.get", @@ -229,7 +229,7 @@ "operations": { "methods": { "list": { - "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", "flatPath": "v2/operations/projects/{projectsId}/operations", "httpMethod": "GET", "id": "bigtableadmin.operations.projects.operations.list", @@ -295,7 +295,7 @@ ], "parameters": { "parent": { - "description": "Required. The unique name of the project in which to create the new instance.\nValues are of the form `projects/{project}`.", + "description": "Required. The unique name of the project in which to create the new instance. Values are of the form `projects/{project}`.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -328,7 +328,7 @@ ], "parameters": { "name": { - "description": "Required. The unique name of the instance to be deleted.\nValues are of the form `projects/{project}/instances/{instance}`.", + "description": "Required. The unique name of the instance to be deleted. Values are of the form `projects/{project}/instances/{instance}`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, @@ -358,7 +358,7 @@ ], "parameters": { "name": { - "description": "Required. The unique name of the requested instance. Values are of the form\n`projects/{project}/instances/{instance}`.", + "description": "Required. The unique name of the requested instance. Values are of the form `projects/{project}/instances/{instance}`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, @@ -380,7 +380,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for an instance resource. Returns an empty\npolicy if an instance exists but does not have a policy set.", + "description": "Gets the access control policy for an instance resource. Returns an empty policy if an instance exists but does not have a policy set.", "flatPath": "v2/projects/{projectsId}/instances/{instancesId}:getIamPolicy", "httpMethod": "POST", "id": "bigtableadmin.projects.instances.getIamPolicy", @@ -389,7 +389,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, @@ -427,7 +427,7 @@ "type": "string" }, "parent": { - "description": "Required. The unique name of the project for which a list of instances is requested.\nValues are of the form `projects/{project}`.", + "description": "Required. The unique name of the project for which a list of instances is requested. Values are of the form `projects/{project}`.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -449,7 +449,7 @@ ] }, "partialUpdateInstance": { - "description": "Partially updates an instance within a project. This method can modify all\nfields of an Instance and is the preferred way to update an Instance.", + "description": "Partially updates an instance within a project. This method can modify all fields of an Instance and is the preferred way to update an Instance.", "flatPath": "v2/projects/{projectsId}/instances/{instancesId}", "httpMethod": "PATCH", "id": "bigtableadmin.projects.instances.partialUpdateInstance", @@ -458,14 +458,14 @@ ], "parameters": { "name": { - "description": "Required. (`OutputOnly`)\nThe unique name of the instance. Values are of the form\n`projects/{project}/instances/a-z+[a-z0-9]`.", + "description": "The unique name of the instance. Values are of the form `projects/{project}/instances/a-z+[a-z0-9]`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, "type": "string" }, "updateMask": { - "description": "Required. The subset of Instance fields which should be replaced.\nMust be explicitly set.", + "description": "Required. The subset of Instance fields which should be replaced. Must be explicitly set.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -488,7 +488,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on an instance resource. Replaces any\nexisting policy.", + "description": "Sets the access control policy on an instance resource. Replaces any existing policy.", "flatPath": "v2/projects/{projectsId}/instances/{instancesId}:setIamPolicy", "httpMethod": "POST", "id": "bigtableadmin.projects.instances.setIamPolicy", @@ -497,7 +497,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, @@ -530,7 +530,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, @@ -554,7 +554,7 @@ ] }, "update": { - "description": "Updates an instance within a project. This method updates only the display\nname and type for an Instance. To update other Instance properties, such as\nlabels, use PartialUpdateInstance.", + "description": "Updates an instance within a project. This method updates only the display name and type for an Instance. To update other Instance properties, such as labels, use PartialUpdateInstance.", "flatPath": "v2/projects/{projectsId}/instances/{instancesId}", "httpMethod": "PUT", "id": "bigtableadmin.projects.instances.update", @@ -563,7 +563,7 @@ ], "parameters": { "name": { - "description": "Required. (`OutputOnly`)\nThe unique name of the instance. Values are of the form\n`projects/{project}/instances/a-z+[a-z0-9]`.", + "description": "The unique name of the instance. Values are of the form `projects/{project}/instances/a-z+[a-z0-9]`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, @@ -600,7 +600,7 @@ ], "parameters": { "appProfileId": { - "description": "Required. The ID to be used when referring to the new app profile within its\ninstance, e.g., just `myprofile` rather than\n`projects/myproject/instances/myinstance/appProfiles/myprofile`.", + "description": "Required. The ID to be used when referring to the new app profile within its instance, e.g., just `myprofile` rather than `projects/myproject/instances/myinstance/appProfiles/myprofile`.", "location": "query", "type": "string" }, @@ -610,7 +610,7 @@ "type": "boolean" }, "parent": { - "description": "Required. The unique name of the instance in which to create the new app profile.\nValues are of the form\n`projects/{project}/instances/{instance}`.", + "description": "Required. The unique name of the instance in which to create the new app profile. Values are of the form `projects/{project}/instances/{instance}`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, @@ -648,7 +648,7 @@ "type": "boolean" }, "name": { - "description": "Required. The unique name of the app profile to be deleted. Values are of the form\n`projects/{project}/instances/{instance}/appProfiles/{app_profile}`.", + "description": "Required. The unique name of the app profile to be deleted. Values are of the form `projects/{project}/instances/{instance}/appProfiles/{app_profile}`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/appProfiles/[^/]+$", "required": true, @@ -678,7 +678,7 @@ ], "parameters": { "name": { - "description": "Required. The unique name of the requested app profile. Values are of the form\n`projects/{project}/instances/{instance}/appProfiles/{app_profile}`.", + "description": "Required. The unique name of the requested app profile. Values are of the form `projects/{project}/instances/{instance}/appProfiles/{app_profile}`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/appProfiles/[^/]+$", "required": true, @@ -709,7 +709,7 @@ ], "parameters": { "pageSize": { - "description": "Maximum number of results per page.\n\nA page_size of zero lets the server choose the number of items to return.\nA page_size which is strictly positive will return at most that many items.\nA negative page_size will cause an error.\n\nFollowing the first request, subsequent paginated calls are not required\nto pass a page_size. If a page_size is set in subsequent calls, it must\nmatch the page_size given in the first request.", + "description": "Maximum number of results per page. A page_size of zero lets the server choose the number of items to return. A page_size which is strictly positive will return at most that many items. A negative page_size will cause an error. Following the first request, subsequent paginated calls are not required to pass a page_size. If a page_size is set in subsequent calls, it must match the page_size given in the first request.", "format": "int32", "location": "query", "type": "integer" @@ -720,7 +720,7 @@ "type": "string" }, "parent": { - "description": "Required. The unique name of the instance for which a list of app profiles is\nrequested. Values are of the form\n`projects/{project}/instances/{instance}`.\nUse `{instance} = '-'` to list AppProfiles for all Instances in a project,\ne.g., `projects/myproject/instances/-`.", + "description": "Required. The unique name of the instance for which a list of app profiles is requested. Values are of the form `projects/{project}/instances/{instance}`. Use `{instance} = '-'` to list AppProfiles for all Instances in a project, e.g., `projects/myproject/instances/-`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, @@ -755,14 +755,14 @@ "type": "boolean" }, "name": { - "description": "(`OutputOnly`)\nThe unique name of the app profile. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/appProfiles/_a-zA-Z0-9*`.", + "description": "The unique name of the app profile. Values are of the form `projects/{project}/instances/{instance}/appProfiles/_a-zA-Z0-9*`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/appProfiles/[^/]+$", "required": true, "type": "string" }, "updateMask": { - "description": "Required. The subset of app profile fields which should be replaced.\nIf unset, all fields will be replaced.", + "description": "Required. The subset of app profile fields which should be replaced. If unset, all fields will be replaced.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -798,12 +798,12 @@ ], "parameters": { "clusterId": { - "description": "Required. The ID to be used when referring to the new cluster within its instance,\ne.g., just `mycluster` rather than\n`projects/myproject/instances/myinstance/clusters/mycluster`.", + "description": "Required. The ID to be used when referring to the new cluster within its instance, e.g., just `mycluster` rather than `projects/myproject/instances/myinstance/clusters/mycluster`.", "location": "query", "type": "string" }, "parent": { - "description": "Required. The unique name of the instance in which to create the new cluster.\nValues are of the form\n`projects/{project}/instances/{instance}`.", + "description": "Required. The unique name of the instance in which to create the new cluster. Values are of the form `projects/{project}/instances/{instance}`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, @@ -836,7 +836,7 @@ ], "parameters": { "name": { - "description": "Required. The unique name of the cluster to be deleted. Values are of the form\n`projects/{project}/instances/{instance}/clusters/{cluster}`.", + "description": "Required. The unique name of the cluster to be deleted. Values are of the form `projects/{project}/instances/{instance}/clusters/{cluster}`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/clusters/[^/]+$", "required": true, @@ -866,7 +866,7 @@ ], "parameters": { "name": { - "description": "Required. The unique name of the requested cluster. Values are of the form\n`projects/{project}/instances/{instance}/clusters/{cluster}`.", + "description": "Required. The unique name of the requested cluster. Values are of the form `projects/{project}/instances/{instance}/clusters/{cluster}`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/clusters/[^/]+$", "required": true, @@ -902,7 +902,7 @@ "type": "string" }, "parent": { - "description": "Required. The unique name of the instance for which a list of clusters is requested.\nValues are of the form `projects/{project}/instances/{instance}`.\nUse `{instance} = '-'` to list Clusters for all Instances in a project,\ne.g., `projects/myproject/instances/-`.", + "description": "Required. The unique name of the instance for which a list of clusters is requested. Values are of the form `projects/{project}/instances/{instance}`. Use `{instance} = '-'` to list Clusters for all Instances in a project, e.g., `projects/myproject/instances/-`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, @@ -933,7 +933,7 @@ ], "parameters": { "name": { - "description": "Required. (`OutputOnly`)\nThe unique name of the cluster. Values are of the form\n`projects/{project}/instances/{instance}/clusters/a-z*`.", + "description": "The unique name of the cluster. Values are of the form `projects/{project}/instances/{instance}/clusters/a-z*`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/clusters/[^/]+$", "required": true, @@ -960,8 +960,103 @@ "resources": { "backups": { "methods": { + "create": { + "description": "Starts creating a new Cloud Bigtable Backup. The returned backup long-running operation can be used to track creation of the backup. The metadata field type is CreateBackupMetadata. The response field type is Backup, if successful. Cancelling the returned operation will stop the creation and delete the backup.", + "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/clusters/{clustersId}/backups", + "httpMethod": "POST", + "id": "bigtableadmin.projects.instances.clusters.backups.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "backupId": { + "description": "Required. The id of the backup to be created. The `backup_id` along with the parent `parent` are combined as {parent}/backups/{backup_id} to create the full backup name, of the form: `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}`. This string must be between 1 and 50 characters in length and match the regex _a-zA-Z0-9*.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. This must be one of the clusters in the instance in which this table is located. The backup will be stored in this cluster. Values are of the form `projects/{project}/instances/{instance}/clusters/{cluster}`.", + "location": "path", + "pattern": "^projects/[^/]+/instances/[^/]+/clusters/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/backups", + "request": { + "$ref": "Backup" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes a pending or completed Cloud Bigtable backup.", + "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/clusters/{clustersId}/backups/{backupsId}", + "httpMethod": "DELETE", + "id": "bigtableadmin.projects.instances.clusters.backups.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. Name of the backup to delete. Values are of the form `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}`.", + "location": "path", + "pattern": "^projects/[^/]+/instances/[^/]+/clusters/[^/]+/backups/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets metadata on a pending or completed Cloud Bigtable Backup.", + "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/clusters/{clustersId}/backups/{backupsId}", + "httpMethod": "GET", + "id": "bigtableadmin.projects.instances.clusters.backups.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. Name of the backup. Values are of the form `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}`.", + "location": "path", + "pattern": "^projects/[^/]+/instances/[^/]+/clusters/[^/]+/backups/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Backup" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, "getIamPolicy": { - "description": "Gets the access control policy for a Table resource.\nReturns an empty policy if the resource exists but does not have a policy\nset.", + "description": "Gets the access control policy for a Table resource. Returns an empty policy if the resource exists but does not have a policy set.", "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/clusters/{clustersId}/backups/{backupsId}:getIamPolicy", "httpMethod": "POST", "id": "bigtableadmin.projects.instances.clusters.backups.getIamPolicy", @@ -970,7 +1065,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/clusters/[^/]+/backups/[^/]+$", "required": true, @@ -992,8 +1087,96 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, + "list": { + "description": "Lists Cloud Bigtable backups. Returns both completed and pending backups.", + "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/clusters/{clustersId}/backups", + "httpMethod": "GET", + "id": "bigtableadmin.projects.instances.clusters.backups.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters backups listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be \u003c, \u003e, \u003c=, \u003e=, !=, =, or :. Colon ':' represents a HAS operator which is roughly synonymous with equality. Filter rules are case insensitive. The fields eligible for filtering are: * `name` * `source_table` * `state` * `start_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * `end_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * `size_bytes` To filter on multiple expressions, provide each separate expression within parentheses. By default, each expression is an AND expression. However, you can include AND, OR, and NOT expressions explicitly. Some examples of using filters are: * `name:\"exact\"` --\u003e The backup's name is the string \"exact\". * `name:howl` --\u003e The backup's name contains the string \"howl\". * `source_table:prod` --\u003e The source_table's name contains the string \"prod\". * `state:CREATING` --\u003e The backup is pending creation. * `state:READY` --\u003e The backup is fully created and ready for use. * `(name:howl) AND (start_time \u003c \\\"2018-03-28T14:50:00Z\\\")` --\u003e The backup name contains the string \"howl\" and start_time of the backup is before 2018-03-28T14:50:00Z. * `size_bytes \u003e 10000000000` --\u003e The backup's size is greater than 10GB", + "location": "query", + "type": "string" + }, + "orderBy": { + "description": "An expression for specifying the sort order of the results of the request. The string value should specify one or more fields in Backup. The full syntax is described at https://aip.dev/132#ordering. Fields supported are: * name * source_table * expire_time * start_time * end_time * size_bytes * state For example, \"start_time\". The default sorting order is ascending. To specify descending order for the field, a suffix \" desc\" should be appended to the field name. For example, \"start_time desc\". Redundant space characters in the syntax are insigificant. If order_by is empty, results will be sorted by `start_time` in descending order starting from the most recently created backup.", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "Number of backups to be returned in the response. If 0 or less, defaults to the server's maximum allowed page size.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "If non-empty, `page_token` should contain a next_page_token from a previous ListBackupsResponse to the same `parent` and with the same `filter`.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The cluster to list backups from. Values are of the form `projects/{project}/instances/{instance}/clusters/{cluster}`. Use `{cluster} = '-'` to list backups for all clusters in an instance, e.g., `projects/{project}/instances/{instance}/clusters/-`.", + "location": "path", + "pattern": "^projects/[^/]+/instances/[^/]+/clusters/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/backups", + "response": { + "$ref": "ListBackupsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates a pending or completed Cloud Bigtable Backup.", + "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/clusters/{clustersId}/backups/{backupsId}", + "httpMethod": "PATCH", + "id": "bigtableadmin.projects.instances.clusters.backups.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "A globally unique identifier for the backup which cannot be changed. Values are of the form `projects/{project}/instances/{instance}/clusters/{cluster}/ backups/_a-zA-Z0-9*` The final segment of the name must be between 1 and 50 characters in length. The backup is stored in the cluster identified by the prefix of the backup name of the form `projects/{project}/instances/{instance}/clusters/{cluster}`.", + "location": "path", + "pattern": "^projects/[^/]+/instances/[^/]+/clusters/[^/]+/backups/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Required. A mask specifying which fields (e.g. `expire_time`) in the Backup resource should be updated. This mask is relative to the Backup resource, not to the request message. The field mask must always be specified; this prevents any future fields from being erased accidentally by clients that do not know about them.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}", + "request": { + "$ref": "Backup" + }, + "response": { + "$ref": "Backup" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, "setIamPolicy": { - "description": "Sets the access control policy on a Table resource.\nReplaces any existing policy.", + "description": "Sets the access control policy on a Table resource. Replaces any existing policy.", "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/clusters/{clustersId}/backups/{backupsId}:setIamPolicy", "httpMethod": "POST", "id": "bigtableadmin.projects.instances.clusters.backups.setIamPolicy", @@ -1002,7 +1185,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/clusters/[^/]+/backups/[^/]+$", "required": true, @@ -1034,7 +1217,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/clusters/[^/]+/backups/[^/]+$", "required": true, @@ -1063,7 +1246,7 @@ "tables": { "methods": { "checkConsistency": { - "description": "Checks replication consistency based on a consistency token, that is, if\nreplication has caught up based on the conditions specified in the token\nand the check request.", + "description": "Checks replication consistency based on a consistency token, that is, if replication has caught up based on the conditions specified in the token and the check request.", "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/tables/{tablesId}:checkConsistency", "httpMethod": "POST", "id": "bigtableadmin.projects.instances.tables.checkConsistency", @@ -1072,7 +1255,7 @@ ], "parameters": { "name": { - "description": "Required. The unique name of the Table for which to check replication consistency.\nValues are of the form\n`projects/{project}/instances/{instance}/tables/{table}`.", + "description": "Required. The unique name of the Table for which to check replication consistency. Values are of the form `projects/{project}/instances/{instance}/tables/{table}`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/tables/[^/]+$", "required": true, @@ -1095,7 +1278,7 @@ ] }, "create": { - "description": "Creates a new table in the specified instance.\nThe table can be created with a full set of initial column families,\nspecified in the request.", + "description": "Creates a new table in the specified instance. The table can be created with a full set of initial column families, specified in the request.", "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/tables", "httpMethod": "POST", "id": "bigtableadmin.projects.instances.tables.create", @@ -1104,7 +1287,7 @@ ], "parameters": { "parent": { - "description": "Required. The unique name of the instance in which to create the table.\nValues are of the form `projects/{project}/instances/{instance}`.", + "description": "Required. The unique name of the instance in which to create the table. Values are of the form `projects/{project}/instances/{instance}`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, @@ -1136,7 +1319,7 @@ ], "parameters": { "name": { - "description": "Required. The unique name of the table to be deleted.\nValues are of the form\n`projects/{project}/instances/{instance}/tables/{table}`.", + "description": "Required. The unique name of the table to be deleted. Values are of the form `projects/{project}/instances/{instance}/tables/{table}`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/tables/[^/]+$", "required": true, @@ -1156,7 +1339,7 @@ ] }, "dropRowRange": { - "description": "Permanently drop/delete a row range from a specified table. The request can\nspecify whether to delete all rows in a table, or only those that match a\nparticular prefix.", + "description": "Permanently drop/delete a row range from a specified table. The request can specify whether to delete all rows in a table, or only those that match a particular prefix.", "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/tables/{tablesId}:dropRowRange", "httpMethod": "POST", "id": "bigtableadmin.projects.instances.tables.dropRowRange", @@ -1165,7 +1348,7 @@ ], "parameters": { "name": { - "description": "Required. The unique name of the table on which to drop a range of rows.\nValues are of the form\n`projects/{project}/instances/{instance}/tables/{table}`.", + "description": "Required. The unique name of the table on which to drop a range of rows. Values are of the form `projects/{project}/instances/{instance}/tables/{table}`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/tables/[^/]+$", "required": true, @@ -1188,7 +1371,7 @@ ] }, "generateConsistencyToken": { - "description": "Generates a consistency token for a Table, which can be used in\nCheckConsistency to check whether mutations to the table that finished\nbefore this call started have been replicated. The tokens will be available\nfor 90 days.", + "description": "Generates a consistency token for a Table, which can be used in CheckConsistency to check whether mutations to the table that finished before this call started have been replicated. The tokens will be available for 90 days.", "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/tables/{tablesId}:generateConsistencyToken", "httpMethod": "POST", "id": "bigtableadmin.projects.instances.tables.generateConsistencyToken", @@ -1197,7 +1380,7 @@ ], "parameters": { "name": { - "description": "Required. The unique name of the Table for which to create a consistency token.\nValues are of the form\n`projects/{project}/instances/{instance}/tables/{table}`.", + "description": "Required. The unique name of the Table for which to create a consistency token. Values are of the form `projects/{project}/instances/{instance}/tables/{table}`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/tables/[^/]+$", "required": true, @@ -1229,14 +1412,14 @@ ], "parameters": { "name": { - "description": "Required. The unique name of the requested table.\nValues are of the form\n`projects/{project}/instances/{instance}/tables/{table}`.", + "description": "Required. The unique name of the requested table. Values are of the form `projects/{project}/instances/{instance}/tables/{table}`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/tables/[^/]+$", "required": true, "type": "string" }, "view": { - "description": "The view to be applied to the returned table's fields.\nDefaults to `SCHEMA_VIEW` if unspecified.", + "description": "The view to be applied to the returned table's fields. Defaults to `SCHEMA_VIEW` if unspecified.", "enum": [ "VIEW_UNSPECIFIED", "NAME_ONLY", @@ -1244,6 +1427,13 @@ "REPLICATION_VIEW", "FULL" ], + "enumDescriptions": [ + "Uses the default view for each method as documented in its request.", + "Only populates `name`.", + "Only populates `name` and fields related to the table's schema.", + "Only populates `name` and fields related to the table's replication state.", + "Populates all fields." + ], "location": "query", "type": "string" } @@ -1262,7 +1452,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a Table resource.\nReturns an empty policy if the resource exists but does not have a policy\nset.", + "description": "Gets the access control policy for a Table resource. Returns an empty policy if the resource exists but does not have a policy set.", "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/tables/{tablesId}:getIamPolicy", "httpMethod": "POST", "id": "bigtableadmin.projects.instances.tables.getIamPolicy", @@ -1271,7 +1461,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/tables/[^/]+$", "required": true, @@ -1303,7 +1493,7 @@ ], "parameters": { "pageSize": { - "description": "Maximum number of results per page.\n\nA page_size of zero lets the server choose the number of items to return.\nA page_size which is strictly positive will return at most that many items.\nA negative page_size will cause an error.\n\nFollowing the first request, subsequent paginated calls are not required\nto pass a page_size. If a page_size is set in subsequent calls, it must\nmatch the page_size given in the first request.", + "description": "Maximum number of results per page. A page_size of zero lets the server choose the number of items to return. A page_size which is strictly positive will return at most that many items. A negative page_size will cause an error. Following the first request, subsequent paginated calls are not required to pass a page_size. If a page_size is set in subsequent calls, it must match the page_size given in the first request.", "format": "int32", "location": "query", "type": "integer" @@ -1314,14 +1504,14 @@ "type": "string" }, "parent": { - "description": "Required. The unique name of the instance for which tables should be listed.\nValues are of the form `projects/{project}/instances/{instance}`.", + "description": "Required. The unique name of the instance for which tables should be listed. Values are of the form `projects/{project}/instances/{instance}`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, "type": "string" }, "view": { - "description": "The view to be applied to the returned tables' fields.\nOnly NAME_ONLY view (default) and REPLICATION_VIEW are supported.", + "description": "The view to be applied to the returned tables' fields. Only NAME_ONLY view (default) and REPLICATION_VIEW are supported.", "enum": [ "VIEW_UNSPECIFIED", "NAME_ONLY", @@ -1329,6 +1519,13 @@ "REPLICATION_VIEW", "FULL" ], + "enumDescriptions": [ + "Uses the default view for each method as documented in its request.", + "Only populates `name`.", + "Only populates `name` and fields related to the table's schema.", + "Only populates `name` and fields related to the table's replication state.", + "Populates all fields." + ], "location": "query", "type": "string" } @@ -1347,7 +1544,7 @@ ] }, "modifyColumnFamilies": { - "description": "Performs a series of column family modifications on the specified table.\nEither all or none of the modifications will occur before this method\nreturns, but data requests received prior to that point may see a table\nwhere only some modifications have taken effect.", + "description": "Performs a series of column family modifications on the specified table. Either all or none of the modifications will occur before this method returns, but data requests received prior to that point may see a table where only some modifications have taken effect.", "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/tables/{tablesId}:modifyColumnFamilies", "httpMethod": "POST", "id": "bigtableadmin.projects.instances.tables.modifyColumnFamilies", @@ -1356,7 +1553,7 @@ ], "parameters": { "name": { - "description": "Required. The unique name of the table whose families should be modified.\nValues are of the form\n`projects/{project}/instances/{instance}/tables/{table}`.", + "description": "Required. The unique name of the table whose families should be modified. Values are of the form `projects/{project}/instances/{instance}/tables/{table}`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/tables/[^/]+$", "required": true, @@ -1378,8 +1575,40 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, + "restore": { + "description": "Create a new table by restoring from a completed backup. The new table must be in the same instance as the instance containing the backup. The returned table long-running operation can be used to track the progress of the operation, and to cancel it. The metadata field type is RestoreTableMetadata. The response type is Table, if successful.", + "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/tables:restore", + "httpMethod": "POST", + "id": "bigtableadmin.projects.instances.tables.restore", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The name of the instance in which to create the restored table. This instance must be the parent of the source backup. Values are of the form `projects//instances/`.", + "location": "path", + "pattern": "^projects/[^/]+/instances/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/tables:restore", + "request": { + "$ref": "RestoreTableRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, "setIamPolicy": { - "description": "Sets the access control policy on a Table resource.\nReplaces any existing policy.", + "description": "Sets the access control policy on a Table resource. Replaces any existing policy.", "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/tables/{tablesId}:setIamPolicy", "httpMethod": "POST", "id": "bigtableadmin.projects.instances.tables.setIamPolicy", @@ -1388,7 +1617,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/tables/[^/]+$", "required": true, @@ -1420,7 +1649,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/tables/[^/]+$", "required": true, @@ -1531,19 +1760,19 @@ } } }, - "revision": "20200424", + "revision": "20200821", "rootUrl": "https://bigtableadmin.googleapis.com/", "schemas": { "AppProfile": { - "description": "A configuration object describing how Cloud Bigtable should treat traffic\nfrom a particular end user application.", + "description": "A configuration object describing how Cloud Bigtable should treat traffic from a particular end user application.", "id": "AppProfile", "properties": { "description": { - "description": "Optional long form description of the use case for this AppProfile.", + "description": "Long form description of the use case for this AppProfile.", "type": "string" }, "etag": { - "description": "Strongly validated etag for optimistic concurrency control. Preserve the\nvalue returned from `GetAppProfile` when calling `UpdateAppProfile` to\nfail the request if there has been a modification in the mean time. The\n`update_mask` of the request need not include `etag` for this protection\nto apply.\nSee [Wikipedia](https://en.wikipedia.org/wiki/HTTP_ETag) and\n[RFC 7232](https://tools.ietf.org/html/rfc7232#section-2.3) for more\ndetails.", + "description": "Strongly validated etag for optimistic concurrency control. Preserve the value returned from `GetAppProfile` when calling `UpdateAppProfile` to fail the request if there has been a modification in the mean time. The `update_mask` of the request need not include `etag` for this protection to apply. See [Wikipedia](https://en.wikipedia.org/wiki/HTTP_ETag) and [RFC 7232](https://tools.ietf.org/html/rfc7232#section-2.3) for more details.", "type": "string" }, "multiClusterRoutingUseAny": { @@ -1551,7 +1780,7 @@ "description": "Use a multi-cluster routing policy." }, "name": { - "description": "(`OutputOnly`)\nThe unique name of the app profile. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/appProfiles/_a-zA-Z0-9*`.", + "description": "The unique name of the app profile. Values are of the form `projects/{project}/instances/{instance}/appProfiles/_a-zA-Z0-9*`.", "type": "string" }, "singleClusterRouting": { @@ -1562,7 +1791,7 @@ "type": "object" }, "AuditConfig": { - "description": "Specifies the audit configuration for a service.\nThe configuration determines which permission types are logged, and what\nidentities, if any, are exempted from logging.\nAn AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service,\nthe union of the two AuditConfigs is used for that service: the log_types\nspecified in each AuditConfig are enabled, and the exempted_members in each\nAuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n {\n \"audit_configs\": [\n {\n \"service\": \"allServices\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n },\n {\n \"log_type\": \"ADMIN_READ\",\n }\n ]\n },\n {\n \"service\": \"sampleservice.googleapis.com\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n },\n {\n \"log_type\": \"DATA_WRITE\",\n \"exempted_members\": [\n \"user:aliya@example.com\"\n ]\n }\n ]\n }\n ]\n }\n\nFor sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ\nlogging. It also exempts jose@example.com from DATA_READ logging, and\naliya@example.com from DATA_WRITE logging.", + "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { \"audit_configs\": [ { \"service\": \"allServices\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" }, { \"log_type\": \"ADMIN_READ\" } ] }, { \"service\": \"sampleservice.googleapis.com\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\" }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging.", "id": "AuditConfig", "properties": { "auditLogConfigs": { @@ -1573,18 +1802,18 @@ "type": "array" }, "service": { - "description": "Specifies a service that will be enabled for audit logging.\nFor example, `storage.googleapis.com`, `cloudsql.googleapis.com`.\n`allServices` is a special value that covers all services.", + "description": "Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.", "type": "string" } }, "type": "object" }, "AuditLogConfig": { - "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\njose@example.com from DATA_READ logging.", + "description": "Provides the configuration for logging a type of permissions. Example: { \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.", "id": "AuditLogConfig", "properties": { "exemptedMembers": { - "description": "Specifies the identities that do not cause logging for this type of\npermission.\nFollows the same format of Binding.members.", + "description": "Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.", "items": { "type": "string" }, @@ -1609,30 +1838,112 @@ }, "type": "object" }, + "Backup": { + "description": "A backup of a Cloud Bigtable table.", + "id": "Backup", + "properties": { + "endTime": { + "description": "Output only. `end_time` is the time that the backup was finished. The row data in the backup will be no newer than this timestamp.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "expireTime": { + "description": "Required. The expiration time of the backup, with microseconds granularity that must be at least 6 hours and at most 30 days from the time the request is received. Once the `expire_time` has passed, Cloud Bigtable will delete the backup and free the resources used by the backup.", + "format": "google-datetime", + "type": "string" + }, + "name": { + "description": "A globally unique identifier for the backup which cannot be changed. Values are of the form `projects/{project}/instances/{instance}/clusters/{cluster}/ backups/_a-zA-Z0-9*` The final segment of the name must be between 1 and 50 characters in length. The backup is stored in the cluster identified by the prefix of the backup name of the form `projects/{project}/instances/{instance}/clusters/{cluster}`.", + "type": "string" + }, + "sizeBytes": { + "description": "Output only. Size of the backup in bytes.", + "format": "int64", + "readOnly": true, + "type": "string" + }, + "sourceTable": { + "description": "Required. Immutable. Name of the table from which this backup was created. This needs to be in the same instance as the backup. Values are of the form `projects/{project}/instances/{instance}/tables/{source_table}`.", + "type": "string" + }, + "startTime": { + "description": "Output only. `start_time` is the time that the backup was started (i.e. approximately the time the CreateBackup request is received). The row data in this backup will be no older than this timestamp.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "state": { + "description": "Output only. The current state of the backup.", + "enum": [ + "STATE_UNSPECIFIED", + "CREATING", + "READY" + ], + "enumDescriptions": [ + "Not specified.", + "The pending backup is still being created. Operations on the backup may fail with `FAILED_PRECONDITION` in this state.", + "The backup is complete and ready for use." + ], + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "BackupInfo": { + "description": "Information about a backup.", + "id": "BackupInfo", + "properties": { + "backup": { + "description": "Output only. Name of the backup.", + "readOnly": true, + "type": "string" + }, + "endTime": { + "description": "Output only. This time that the backup was finished. Row data in the backup will be no newer than this timestamp.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "sourceTable": { + "description": "Output only. Name of the table the backup was created from.", + "readOnly": true, + "type": "string" + }, + "startTime": { + "description": "Output only. The time that the backup was started. Row data in the backup will be no older than this timestamp.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, "Binding": { "description": "Associates `members` with a `role`.", "id": "Binding", "properties": { "condition": { "$ref": "Expr", - "description": "The condition that is associated with this binding.\nNOTE: An unsatisfied condition will not allow user access via current\nbinding. Different bindings, including their conditions, are examined\nindependently." + "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the members in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." }, "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@example.com` .\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a user that has been recently deleted. For\n example, `alice@example.com?uid=123456789012345678901`. If the user is\n recovered, this value reverts to `user:{emailid}` and the recovered user\n retains the role in the binding.\n\n* `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus\n unique identifier) representing a service account that has been recently\n deleted. For example,\n `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.\n If the service account is undeleted, this value reverts to\n `serviceAccount:{emailid}` and the undeleted service account retains the\n role in the binding.\n\n* `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a Google group that has been recently\n deleted. For example, `admins@example.com?uid=123456789012345678901`. If\n the group is recovered, this value reverts to `group:{emailid}` and the\n recovered group retains the role in the binding.\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", "items": { "type": "string" }, "type": "array" }, "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.", + "description": "Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.", "type": "string" } }, "type": "object" }, "CheckConsistencyRequest": { - "description": "Request message for\ngoogle.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency", + "description": "Request message for google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency", "id": "CheckConsistencyRequest", "properties": { "consistencyToken": { @@ -1643,22 +1954,22 @@ "type": "object" }, "CheckConsistencyResponse": { - "description": "Response message for\ngoogle.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency", + "description": "Response message for google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency", "id": "CheckConsistencyResponse", "properties": { "consistent": { - "description": "True only if the token is consistent. A token is consistent if replication\nhas caught up with the restrictions specified in the request.", + "description": "True only if the token is consistent. A token is consistent if replication has caught up with the restrictions specified in the request.", "type": "boolean" } }, "type": "object" }, "Cluster": { - "description": "A resizable group of nodes in a particular cloud location, capable\nof serving all Tables in the parent\nInstance.", + "description": "A resizable group of nodes in a particular cloud location, capable of serving all Tables in the parent Instance.", "id": "Cluster", "properties": { "defaultStorageType": { - "description": "(`CreationOnly`)\nThe type of storage used by this cluster to serve its\nparent instance's tables, unless explicitly overridden.", + "description": "Immutable. The type of storage used by this cluster to serve its parent instance's tables, unless explicitly overridden.", "enum": [ "STORAGE_TYPE_UNSPECIFIED", "SSD", @@ -1672,20 +1983,20 @@ "type": "string" }, "location": { - "description": "(`CreationOnly`)\nThe location where this cluster's nodes and storage reside. For best\nperformance, clients should be located as close as possible to this\ncluster. Currently only zones are supported, so values should be of the\nform `projects/{project}/locations/{zone}`.", + "description": "Immutable. The location where this cluster's nodes and storage reside. For best performance, clients should be located as close as possible to this cluster. Currently only zones are supported, so values should be of the form `projects/{project}/locations/{zone}`.", "type": "string" }, "name": { - "description": "Required. (`OutputOnly`)\nThe unique name of the cluster. Values are of the form\n`projects/{project}/instances/{instance}/clusters/a-z*`.", + "description": "The unique name of the cluster. Values are of the form `projects/{project}/instances/{instance}/clusters/a-z*`.", "type": "string" }, "serveNodes": { - "description": "Required. The number of nodes allocated to this cluster. More nodes enable higher\nthroughput and more consistent performance.", + "description": "Required. The number of nodes allocated to this cluster. More nodes enable higher throughput and more consistent performance.", "format": "int32", "type": "integer" }, "state": { - "description": "(`OutputOnly`)\nThe current state of the cluster.", + "description": "Output only. The current state of the cluster.", "enum": [ "STATE_NOT_KNOWN", "READY", @@ -1696,10 +2007,11 @@ "enumDescriptions": [ "The state of the cluster could not be determined.", "The cluster has been successfully created and is ready to serve requests.", - "The cluster is currently being created, and may be destroyed\nif the creation process encounters an error.\nA cluster may not be able to serve requests while being created.", - "The cluster is currently being resized, and may revert to its previous\nnode count if the process encounters an error.\nA cluster is still capable of serving requests while being resized,\nbut may exhibit performance as if its number of allocated nodes is\nbetween the starting and requested states.", - "The cluster has no backing nodes. The data (tables) still\nexist, but no operations can be performed on the cluster." + "The cluster is currently being created, and may be destroyed if the creation process encounters an error. A cluster may not be able to serve requests while being created.", + "The cluster is currently being resized, and may revert to its previous node count if the process encounters an error. A cluster is still capable of serving requests while being resized, but may exhibit performance as if its number of allocated nodes is between the starting and requested states.", + "The cluster has no backing nodes. The data (tables) still exist, but no operations can be performed on the cluster." ], + "readOnly": true, "type": "string" } }, @@ -1716,15 +2028,18 @@ "INITIALIZING", "PLANNED_MAINTENANCE", "UNPLANNED_MAINTENANCE", - "READY" + "READY", + "READY_OPTIMIZING" ], "enumDescriptions": [ "The replication state of the table is unknown in this cluster.", - "The cluster was recently created, and the table must finish copying\nover pre-existing data from other clusters before it can begin\nreceiving live replication updates and serving Data API requests.", - "The table is temporarily unable to serve Data API requests from this\ncluster due to planned internal maintenance.", - "The table is temporarily unable to serve Data API requests from this\ncluster due to unplanned or emergency maintenance.", - "The table can serve Data API requests from this cluster. Depending on\nreplication delay, reads may not immediately reflect the state of the\ntable in other clusters." + "The cluster was recently created, and the table must finish copying over pre-existing data from other clusters before it can begin receiving live replication updates and serving Data API requests.", + "The table is temporarily unable to serve Data API requests from this cluster due to planned internal maintenance.", + "The table is temporarily unable to serve Data API requests from this cluster due to unplanned or emergency maintenance.", + "The table can serve Data API requests from this cluster. Depending on replication delay, reads may not immediately reflect the state of the table in other clusters.", + "The table is fully created and ready for use after a restore, and is being optimized for performance. When optimizations are complete, the table will transition to `READY` state." ], + "readOnly": true, "type": "string" } }, @@ -1736,7 +2051,32 @@ "properties": { "gcRule": { "$ref": "GcRule", - "description": "Garbage collection rule specified as a protobuf.\nMust serialize to at most 500 bytes.\n\nNOTE: Garbage collection executes opportunistically in the background, and\nso it's possible for reads to return a cell even if it matches the active\nGC expression for its family." + "description": "Garbage collection rule specified as a protobuf. Must serialize to at most 500 bytes. NOTE: Garbage collection executes opportunistically in the background, and so it's possible for reads to return a cell even if it matches the active GC expression for its family." + } + }, + "type": "object" + }, + "CreateBackupMetadata": { + "description": "Metadata type for the operation returned by CreateBackup.", + "id": "CreateBackupMetadata", + "properties": { + "endTime": { + "description": "If set, the time at which this operation finished or was cancelled.", + "format": "google-datetime", + "type": "string" + }, + "name": { + "description": "The name of the backup being created.", + "type": "string" + }, + "sourceTable": { + "description": "The name of the table the backup is created from.", + "type": "string" + }, + "startTime": { + "description": "The time at which this operation started.", + "format": "google-datetime", + "type": "string" } }, "type": "object" @@ -1763,7 +2103,7 @@ "additionalProperties": { "$ref": "TableProgress" }, - "description": "Keys: the full `name` of each table that existed in the instance when\nCreateCluster was first called, i.e.\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/tables/\u003ctable\u003e`. Any table added\nto the instance by a later API call will be created in the new cluster by\nthat API call, not this one.\n\nValues: information on how much of a table's data has been copied to the\nnewly-created cluster so far.", + "description": "Keys: the full `name` of each table that existed in the instance when CreateCluster was first called, i.e. `projects//instances//tables/`. Any table added to the instance by a later API call will be created in the new cluster by that API call, not this one. Values: information on how much of a table's data has been copied to the newly-created cluster so far.", "type": "object" } }, @@ -1775,14 +2115,14 @@ "properties": { "cluster": { "$ref": "Cluster", - "description": "Required. The cluster to be created.\nFields marked `OutputOnly` must be left blank." + "description": "Required. The cluster to be created. Fields marked `OutputOnly` must be left blank." }, "clusterId": { - "description": "Required. The ID to be used when referring to the new cluster within its instance,\ne.g., just `mycluster` rather than\n`projects/myproject/instances/myinstance/clusters/mycluster`.", + "description": "Required. The ID to be used when referring to the new cluster within its instance, e.g., just `mycluster` rather than `projects/myproject/instances/myinstance/clusters/mycluster`.", "type": "string" }, "parent": { - "description": "Required. The unique name of the instance in which to create the new cluster.\nValues are of the form\n`projects/{project}/instances/{instance}`.", + "description": "Required. The unique name of the instance in which to create the new cluster. Values are of the form `projects/{project}/instances/{instance}`.", "type": "string" } }, @@ -1817,30 +2157,30 @@ "additionalProperties": { "$ref": "Cluster" }, - "description": "Required. The clusters to be created within the instance, mapped by desired\ncluster ID, e.g., just `mycluster` rather than\n`projects/myproject/instances/myinstance/clusters/mycluster`.\nFields marked `OutputOnly` must be left blank.\nCurrently, at most four clusters can be specified.", + "description": "Required. The clusters to be created within the instance, mapped by desired cluster ID, e.g., just `mycluster` rather than `projects/myproject/instances/myinstance/clusters/mycluster`. Fields marked `OutputOnly` must be left blank. Currently, at most four clusters can be specified.", "type": "object" }, "instance": { "$ref": "Instance", - "description": "Required. The instance to create.\nFields marked `OutputOnly` must be left blank." + "description": "Required. The instance to create. Fields marked `OutputOnly` must be left blank." }, "instanceId": { - "description": "Required. The ID to be used when referring to the new instance within its project,\ne.g., just `myinstance` rather than\n`projects/myproject/instances/myinstance`.", + "description": "Required. The ID to be used when referring to the new instance within its project, e.g., just `myinstance` rather than `projects/myproject/instances/myinstance`.", "type": "string" }, "parent": { - "description": "Required. The unique name of the project in which to create the new instance.\nValues are of the form `projects/{project}`.", + "description": "Required. The unique name of the project in which to create the new instance. Values are of the form `projects/{project}`.", "type": "string" } }, "type": "object" }, "CreateTableRequest": { - "description": "Request message for\ngoogle.bigtable.admin.v2.BigtableTableAdmin.CreateTable", + "description": "Request message for google.bigtable.admin.v2.BigtableTableAdmin.CreateTable", "id": "CreateTableRequest", "properties": { "initialSplits": { - "description": "The optional list of row keys that will be used to initially split the\ntable into several tablets (tablets are similar to HBase regions).\nGiven two split keys, `s1` and `s2`, three tablets will be created,\nspanning the key ranges: `[, s1), [s1, s2), [s2, )`.\n\nExample:\n\n* Row keys := `[\"a\", \"apple\", \"custom\", \"customer_1\", \"customer_2\",`\n `\"other\", \"zz\"]`\n* initial_split_keys := `[\"apple\", \"customer_1\", \"customer_2\", \"other\"]`\n* Key assignment:\n - Tablet 1 `[, apple) =\u003e {\"a\"}.`\n - Tablet 2 `[apple, customer_1) =\u003e {\"apple\", \"custom\"}.`\n - Tablet 3 `[customer_1, customer_2) =\u003e {\"customer_1\"}.`\n - Tablet 4 `[customer_2, other) =\u003e {\"customer_2\"}.`\n - Tablet 5 `[other, ) =\u003e {\"other\", \"zz\"}.`", + "description": "The optional list of row keys that will be used to initially split the table into several tablets (tablets are similar to HBase regions). Given two split keys, `s1` and `s2`, three tablets will be created, spanning the key ranges: `[, s1), [s1, s2), [s2, )`. Example: * Row keys := `[\"a\", \"apple\", \"custom\", \"customer_1\", \"customer_2\",` `\"other\", \"zz\"]` * initial_split_keys := `[\"apple\", \"customer_1\", \"customer_2\", \"other\"]` * Key assignment: - Tablet 1 `[, apple) =\u003e {\"a\"}.` - Tablet 2 `[apple, customer_1) =\u003e {\"apple\", \"custom\"}.` - Tablet 3 `[customer_1, customer_2) =\u003e {\"customer_1\"}.` - Tablet 4 `[customer_2, other) =\u003e {\"customer_2\"}.` - Tablet 5 `[other, ) =\u003e {\"other\", \"zz\"}.`", "items": { "$ref": "Split" }, @@ -1851,14 +2191,14 @@ "description": "Required. The Table to create." }, "tableId": { - "description": "Required. The name by which the new table should be referred to within the parent\ninstance, e.g., `foobar` rather than `{parent}/tables/foobar`.\nMaximum 50 characters.", + "description": "Required. The name by which the new table should be referred to within the parent instance, e.g., `foobar` rather than `{parent}/tables/foobar`. Maximum 50 characters.", "type": "string" } }, "type": "object" }, "DropRowRangeRequest": { - "description": "Request message for\ngoogle.bigtable.admin.v2.BigtableTableAdmin.DropRowRange", + "description": "Request message for google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange", "id": "DropRowRangeRequest", "properties": { "deleteAllDataFromTable": { @@ -1866,7 +2206,7 @@ "type": "boolean" }, "rowKeyPrefix": { - "description": "Delete all rows that start with this row key prefix. Prefix cannot be\nzero length.", + "description": "Delete all rows that start with this row key prefix. Prefix cannot be zero length.", "format": "byte", "type": "string" } @@ -1874,29 +2214,57 @@ "type": "object" }, "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", "properties": {}, "type": "object" }, "Expr": { - "description": "Represents a textual expression in the Common Expression Language (CEL)\nsyntax. CEL is a C-like expression language. The syntax and semantics of CEL\nare documented at https://github.com/google/cel-spec.\n\nExample (Comparison):\n\n title: \"Summary size limit\"\n description: \"Determines if a summary is less than 100 chars\"\n expression: \"document.summary.size() \u003c 100\"\n\nExample (Equality):\n\n title: \"Requestor is owner\"\n description: \"Determines if requestor is the document owner\"\n expression: \"document.owner == request.auth.claims.email\"\n\nExample (Logic):\n\n title: \"Public documents\"\n description: \"Determine whether the document should be publicly visible\"\n expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\"\n\nExample (Data Manipulation):\n\n title: \"Notification string\"\n description: \"Create a notification string with a timestamp.\"\n expression: \"'New message received at ' + string(document.create_time)\"\n\nThe exact variables and functions that may be referenced within an expression\nare determined by the service that evaluates it. See the service\ndocumentation for additional information.", + "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() \u003c 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", "id": "Expr", "properties": { "description": { - "description": "Optional. Description of the expression. This is a longer text which\ndescribes the expression, e.g. when hovered over it in a UI.", + "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", "type": "string" }, "expression": { - "description": "Textual representation of an expression in Common Expression Language\nsyntax.", + "description": "Textual representation of an expression in Common Expression Language syntax.", "type": "string" }, "location": { - "description": "Optional. String indicating the location of the expression for error\nreporting, e.g. a file name and a position in the file.", + "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", "type": "string" }, "title": { - "description": "Optional. Title for the expression, i.e. a short string describing\nits purpose. This can be used e.g. in UIs which allow to enter the\nexpression.", + "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", + "type": "string" + } + }, + "type": "object" + }, + "FailureTrace": { + "description": "Added to the error payload.", + "id": "FailureTrace", + "properties": { + "frames": { + "items": { + "$ref": "Frame" + }, + "type": "array" + } + }, + "type": "object" + }, + "Frame": { + "id": "Frame", + "properties": { + "targetName": { + "type": "string" + }, + "workflowGuid": { + "type": "string" + }, + "zoneId": { "type": "string" } }, @@ -1911,7 +2279,7 @@ "description": "Delete cells that would be deleted by every nested rule." }, "maxAge": { - "description": "Delete cells in a column older than the given age.\nValues must be at least one millisecond, and will be truncated to\nmicrosecond granularity.", + "description": "Delete cells in a column older than the given age. Values must be at least one millisecond, and will be truncated to microsecond granularity.", "format": "google-duration", "type": "string" }, @@ -1928,13 +2296,13 @@ "type": "object" }, "GenerateConsistencyTokenRequest": { - "description": "Request message for\ngoogle.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken", + "description": "Request message for google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken", "id": "GenerateConsistencyTokenRequest", "properties": {}, "type": "object" }, "GenerateConsistencyTokenResponse": { - "description": "Response message for\ngoogle.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken", + "description": "Response message for google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken", "id": "GenerateConsistencyTokenResponse", "properties": { "consistencyToken": { @@ -1950,7 +2318,7 @@ "properties": { "options": { "$ref": "GetPolicyOptions", - "description": "OPTIONAL: A `GetPolicyOptions` object for specifying options to\n`GetIamPolicy`. This field is only used by Cloud IAM." + "description": "OPTIONAL: A `GetPolicyOptions` object for specifying options to `GetIamPolicy`." } }, "type": "object" @@ -1960,7 +2328,7 @@ "id": "GetPolicyOptions", "properties": { "requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.", + "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } @@ -1968,26 +2336,26 @@ "type": "object" }, "Instance": { - "description": "A collection of Bigtable Tables and\nthe resources that serve them.\nAll tables in an instance are served from all\nClusters in the instance.", + "description": "A collection of Bigtable Tables and the resources that serve them. All tables in an instance are served from all Clusters in the instance.", "id": "Instance", "properties": { "displayName": { - "description": "Required. The descriptive name for this instance as it appears in UIs.\nCan be changed at any time, but should be kept globally unique\nto avoid confusion.", + "description": "Required. The descriptive name for this instance as it appears in UIs. Can be changed at any time, but should be kept globally unique to avoid confusion.", "type": "string" }, "labels": { "additionalProperties": { "type": "string" }, - "description": "Required. Labels are a flexible and lightweight mechanism for organizing cloud\nresources into groups that reflect a customer's organizational needs and\ndeployment strategies. They can be used to filter resources and aggregate\nmetrics.\n\n* Label keys must be between 1 and 63 characters long and must conform to\n the regular expression: `\\p{Ll}\\p{Lo}{0,62}`.\n* Label values must be between 0 and 63 characters long and must conform to\n the regular expression: `[\\p{Ll}\\p{Lo}\\p{N}_-]{0,63}`.\n* No more than 64 labels can be associated with a given resource.\n* Keys and values must both be under 128 bytes.", + "description": "Required. Labels are a flexible and lightweight mechanism for organizing cloud resources into groups that reflect a customer's organizational needs and deployment strategies. They can be used to filter resources and aggregate metrics. * Label keys must be between 1 and 63 characters long and must conform to the regular expression: `\\p{Ll}\\p{Lo}{0,62}`. * Label values must be between 0 and 63 characters long and must conform to the regular expression: `[\\p{Ll}\\p{Lo}\\p{N}_-]{0,63}`. * No more than 64 labels can be associated with a given resource. * Keys and values must both be under 128 bytes.", "type": "object" }, "name": { - "description": "Required. (`OutputOnly`)\nThe unique name of the instance. Values are of the form\n`projects/{project}/instances/a-z+[a-z0-9]`.", + "description": "The unique name of the instance. Values are of the form `projects/{project}/instances/a-z+[a-z0-9]`.", "type": "string" }, "state": { - "description": "(`OutputOnly`)\nThe current state of the instance.", + "description": "Output only. The current state of the instance.", "enum": [ "STATE_NOT_KNOWN", "READY", @@ -1995,9 +2363,10 @@ ], "enumDescriptions": [ "The state of the instance could not be determined.", - "The instance has been successfully created and can serve requests\nto its tables.", - "The instance is currently being created, and may be destroyed\nif the creation process encounters an error." + "The instance has been successfully created and can serve requests to its tables.", + "The instance is currently being created, and may be destroyed if the creation process encounters an error." ], + "readOnly": true, "type": "string" }, "type": { @@ -2008,9 +2377,9 @@ "DEVELOPMENT" ], "enumDescriptions": [ - "The type of the instance is unspecified. If set when creating an\ninstance, a `PRODUCTION` instance will be created. If set when updating\nan instance, the type will be left unchanged.", - "An instance meant for production use. `serve_nodes` must be set\non the cluster.", - "DEPRECATED: Prefer PRODUCTION for all use cases, as it no longer enforces\na higher minimum node count than DEVELOPMENT." + "The type of the instance is unspecified. If set when creating an instance, a `PRODUCTION` instance will be created. If set when updating an instance, the type will be left unchanged.", + "An instance meant for production use. `serve_nodes` must be set on the cluster.", + "DEPRECATED: Prefer PRODUCTION for all use cases, as it no longer enforces a higher minimum node count than DEVELOPMENT." ], "type": "string" } @@ -2043,14 +2412,32 @@ "type": "array" }, "failedLocations": { - "description": "Locations from which AppProfile information could not be retrieved,\ndue to an outage or some other transient condition.\nAppProfiles from these locations may be missing from `app_profiles`.\nValues are of the form `projects/\u003cproject\u003e/locations/\u003czone_id\u003e`", + "description": "Locations from which AppProfile information could not be retrieved, due to an outage or some other transient condition. AppProfiles from these locations may be missing from `app_profiles`. Values are of the form `projects//locations/`", "items": { "type": "string" }, "type": "array" }, "nextPageToken": { - "description": "Set if not all app profiles could be returned in a single response.\nPass this value to `page_token` in another request to get the next\npage of results.", + "description": "Set if not all app profiles could be returned in a single response. Pass this value to `page_token` in another request to get the next page of results.", + "type": "string" + } + }, + "type": "object" + }, + "ListBackupsResponse": { + "description": "The response for ListBackups.", + "id": "ListBackupsResponse", + "properties": { + "backups": { + "description": "The list of matching backups.", + "items": { + "$ref": "Backup" + }, + "type": "array" + }, + "nextPageToken": { + "description": "`next_page_token` can be sent in a subsequent ListBackups call to fetch more of the matching backups.", "type": "string" } }, @@ -2068,7 +2455,7 @@ "type": "array" }, "failedLocations": { - "description": "Locations from which Cluster information could not be retrieved,\ndue to an outage or some other transient condition.\nClusters from these locations may be missing from `clusters`,\nor may only have partial information returned.\nValues are of the form `projects/\u003cproject\u003e/locations/\u003czone_id\u003e`", + "description": "Locations from which Cluster information could not be retrieved, due to an outage or some other transient condition. Clusters from these locations may be missing from `clusters`, or may only have partial information returned. Values are of the form `projects//locations/`", "items": { "type": "string" }, @@ -2086,7 +2473,7 @@ "id": "ListInstancesResponse", "properties": { "failedLocations": { - "description": "Locations from which Instance information could not be retrieved,\ndue to an outage or some other transient condition.\nInstances whose Clusters are all in one of the failed locations\nmay be missing from `instances`, and Instances with at least one\nCluster in a failed location may only have partial information returned.\nValues are of the form `projects/\u003cproject\u003e/locations/\u003czone_id\u003e`", + "description": "Locations from which Instance information could not be retrieved, due to an outage or some other transient condition. Instances whose Clusters are all in one of the failed locations may be missing from `instances`, and Instances with at least one Cluster in a failed location may only have partial information returned. Values are of the form `projects//locations/`", "items": { "type": "string" }, @@ -2143,11 +2530,11 @@ "type": "object" }, "ListTablesResponse": { - "description": "Response message for\ngoogle.bigtable.admin.v2.BigtableTableAdmin.ListTables", + "description": "Response message for google.bigtable.admin.v2.BigtableTableAdmin.ListTables", "id": "ListTablesResponse", "properties": { "nextPageToken": { - "description": "Set if not all tables could be returned in a single response.\nPass this value to `page_token` in another request to get the next\npage of results.", + "description": "Set if not all tables could be returned in a single response. Pass this value to `page_token` in another request to get the next page of results.", "type": "string" }, "tables": { @@ -2165,14 +2552,14 @@ "id": "Location", "properties": { "displayName": { - "description": "The friendly name for this location, typically a nearby city name.\nFor example, \"Tokyo\".", + "description": "The friendly name for this location, typically a nearby city name. For example, \"Tokyo\".", "type": "string" }, "labels": { "additionalProperties": { "type": "string" }, - "description": "Cross-service attributes for the location. For example\n\n {\"cloud.googleapis.com/region\": \"us-east1\"}", + "description": "Cross-service attributes for the location. For example {\"cloud.googleapis.com/region\": \"us-east1\"}", "type": "object" }, "locationId": { @@ -2184,11 +2571,11 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "Service-specific metadata. For example the available capacity at the given\nlocation.", + "description": "Service-specific metadata. For example the available capacity at the given location.", "type": "object" }, "name": { - "description": "Resource name for the location, which may vary between implementations.\nFor example: `\"projects/example-project/locations/us-east1\"`", + "description": "Resource name for the location, which may vary between implementations. For example: `\"projects/example-project/locations/us-east1\"`", "type": "string" } }, @@ -2200,10 +2587,10 @@ "properties": { "create": { "$ref": "ColumnFamily", - "description": "Create a new column family with the specified schema, or fail if\none already exists with the given ID." + "description": "Create a new column family with the specified schema, or fail if one already exists with the given ID." }, "drop": { - "description": "Drop (delete) the column family with the given ID, or fail if no such\nfamily exists.", + "description": "Drop (delete) the column family with the given ID, or fail if no such family exists.", "type": "boolean" }, "id": { @@ -2212,17 +2599,17 @@ }, "update": { "$ref": "ColumnFamily", - "description": "Update an existing column family to the specified schema, or fail\nif no column family exists with the given ID." + "description": "Update an existing column family to the specified schema, or fail if no column family exists with the given ID." } }, "type": "object" }, "ModifyColumnFamiliesRequest": { - "description": "Request message for\ngoogle.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies", + "description": "Request message for google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies", "id": "ModifyColumnFamiliesRequest", "properties": { "modifications": { - "description": "Required. Modifications to be atomically applied to the specified table's families.\nEntries are applied in order, meaning that earlier modifications can be\nmasked by later ones (in the case of repeated updates to the same family,\nfor example).", + "description": "Required. Modifications to be atomically applied to the specified table's families. Entries are applied in order, meaning that earlier modifications can be masked by later ones (in the case of repeated updates to the same family, for example).", "items": { "$ref": "Modification" }, @@ -2232,17 +2619,17 @@ "type": "object" }, "MultiClusterRoutingUseAny": { - "description": "Read/write requests are routed to the nearest cluster in the instance, and\nwill fail over to the nearest cluster that is available in the event of\ntransient errors or delays. Clusters in a region are considered\nequidistant. Choosing this option sacrifices read-your-writes consistency\nto improve availability.", + "description": "Read/write requests are routed to the nearest cluster in the instance, and will fail over to the nearest cluster that is available in the event of transient errors or delays. Clusters in a region are considered equidistant. Choosing this option sacrifices read-your-writes consistency to improve availability.", "id": "MultiClusterRoutingUseAny", "properties": {}, "type": "object" }, "Operation": { - "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "description": "This resource represents a long-running operation that is the result of a network API call.", "id": "Operation", "properties": { "done": { - "description": "If the value is `false`, it means the operation is still in progress.\nIf `true`, the operation is completed, and either `error` or `response` is\navailable.", + "description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.", "type": "boolean" }, "error": { @@ -2254,11 +2641,11 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", "type": "object" }, "name": { - "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should be a resource name ending with `operations/{unique_id}`.", + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.", "type": "string" }, "response": { @@ -2266,12 +2653,49 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", + "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", "type": "object" } }, "type": "object" }, + "OperationProgress": { + "description": "Encapsulates progress related information for a Cloud Bigtable long running operation.", + "id": "OperationProgress", + "properties": { + "endTime": { + "description": "If set, the time at which this operation failed or was completed successfully.", + "format": "google-datetime", + "type": "string" + }, + "progressPercent": { + "description": "Percent completion of the operation. Values are between 0 and 100 inclusive.", + "format": "int32", + "type": "integer" + }, + "startTime": { + "description": "Time the request was received.", + "format": "google-datetime", + "type": "string" + } + }, + "type": "object" + }, + "OptimizeRestoredTableMetadata": { + "description": "Metadata type for the long-running operation used to track the progress of optimizations performed on a newly restored table. This long-running operation is automatically created by the system after the successful completion of a table restore, and cannot be cancelled.", + "id": "OptimizeRestoredTableMetadata", + "properties": { + "name": { + "description": "Name of the restored table being optimized.", + "type": "string" + }, + "progress": { + "$ref": "OperationProgress", + "description": "The progress of the post-restore optimizations." + } + }, + "type": "object" + }, "PartialUpdateInstanceRequest": { "description": "Request message for BigtableInstanceAdmin.PartialUpdateInstance.", "id": "PartialUpdateInstanceRequest", @@ -2281,7 +2705,7 @@ "description": "Required. The Instance which will (partially) replace the current value." }, "updateMask": { - "description": "Required. The subset of Instance fields which should be replaced.\nMust be explicitly set.", + "description": "Required. The subset of Instance fields which should be replaced. Must be explicitly set.", "format": "google-fieldmask", "type": "string" } @@ -2289,7 +2713,7 @@ "type": "object" }, "Policy": { - "description": "An Identity and Access Management (IAM) policy, which specifies access\ncontrols for Google Cloud resources.\n\n\nA `Policy` is a collection of `bindings`. A `binding` binds one or more\n`members` to a single `role`. Members can be user accounts, service accounts,\nGoogle groups, and domains (such as G Suite). A `role` is a named list of\npermissions; each `role` can be an IAM predefined role or a user-created\ncustom role.\n\nOptionally, a `binding` can specify a `condition`, which is a logical\nexpression that allows access to a resource only if the expression evaluates\nto `true`. A condition can add constraints based on attributes of the\nrequest, the resource, or both.\n\n**JSON example:**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/resourcemanager.organizationAdmin\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-project-id@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles/resourcemanager.organizationViewer\",\n \"members\": [\"user:eve@example.com\"],\n \"condition\": {\n \"title\": \"expirable access\",\n \"description\": \"Does not grant access after Sep 2020\",\n \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\",\n }\n }\n ],\n \"etag\": \"BwWWja0YfJA=\",\n \"version\": 3\n }\n\n**YAML example:**\n\n bindings:\n - members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-project-id@appspot.gserviceaccount.com\n role: roles/resourcemanager.organizationAdmin\n - members:\n - user:eve@example.com\n role: roles/resourcemanager.organizationViewer\n condition:\n title: expirable access\n description: Does not grant access after Sep 2020\n expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\n - etag: BwWWja0YfJA=\n - version: 3\n\nFor a description of IAM and its features, see the\n[IAM documentation](https://cloud.google.com/iam/docs/).", + "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } **YAML example:** bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", "id": "Policy", "properties": { "auditConfigs": { @@ -2300,35 +2724,107 @@ "type": "array" }, "bindings": { - "description": "Associates a list of `members` to a `role`. Optionally, may specify a\n`condition` that determines how and when the `bindings` are applied. Each\nof the `bindings` must contain at least one member.", + "description": "Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member.", "items": { "$ref": "Binding" }, "type": "array" }, "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.", + "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.", "format": "byte", "type": "string" }, "version": { - "description": "Specifies the format of the policy.\n\nValid values are `0`, `1`, and `3`. Requests that specify an invalid value\nare rejected.\n\nAny operation that affects conditional role bindings must specify version\n`3`. This requirement applies to the following operations:\n\n* Getting a policy that includes a conditional role binding\n* Adding a conditional role binding to a policy\n* Changing a conditional role binding in a policy\n* Removing any role binding, with or without a condition, from a policy\n that includes conditions\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.\n\nIf a policy does not include any conditions, operations on that policy may\nspecify any valid version or leave the field unset.", + "description": "Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } }, "type": "object" }, + "RestoreInfo": { + "description": "Information about a table restore.", + "id": "RestoreInfo", + "properties": { + "backupInfo": { + "$ref": "BackupInfo", + "description": "Information about the backup used to restore the table. The backup may no longer exist." + }, + "sourceType": { + "description": "The type of the restore source.", + "enum": [ + "RESTORE_SOURCE_TYPE_UNSPECIFIED", + "BACKUP" + ], + "enumDescriptions": [ + "No restore associated.", + "A backup was used as the source of the restore." + ], + "type": "string" + } + }, + "type": "object" + }, + "RestoreTableMetadata": { + "description": "Metadata type for the long-running operation returned by RestoreTable.", + "id": "RestoreTableMetadata", + "properties": { + "backupInfo": { + "$ref": "BackupInfo" + }, + "name": { + "description": "Name of the table being created and restored to.", + "type": "string" + }, + "optimizeTableOperationName": { + "description": "If exists, the name of the long-running operation that will be used to track the post-restore optimization process to optimize the performance of the restored table. The metadata type of the long-running operation is OptimizeRestoreTableMetadata. The response type is Empty. This long-running operation may be automatically created by the system if applicable after the RestoreTable long-running operation completes successfully. This operation may not be created if the table is already optimized or the restore was not successful.", + "type": "string" + }, + "progress": { + "$ref": "OperationProgress", + "description": "The progress of the RestoreTable operation." + }, + "sourceType": { + "description": "The type of the restore source.", + "enum": [ + "RESTORE_SOURCE_TYPE_UNSPECIFIED", + "BACKUP" + ], + "enumDescriptions": [ + "No restore associated.", + "A backup was used as the source of the restore." + ], + "type": "string" + } + }, + "type": "object" + }, + "RestoreTableRequest": { + "description": "The request for RestoreTable.", + "id": "RestoreTableRequest", + "properties": { + "backup": { + "description": "Name of the backup from which to restore. Values are of the form `projects//instances//clusters//backups/`.", + "type": "string" + }, + "tableId": { + "description": "Required. The id of the table to create and restore to. This table must not already exist. The `table_id` appended to `parent` forms the full table name of the form `projects//instances//tables/`.", + "type": "string" + } + }, + "type": "object" + }, "SetIamPolicyRequest": { "description": "Request message for `SetIamPolicy` method.", "id": "SetIamPolicyRequest", "properties": { "policy": { "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them." }, "updateMask": { - "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only\nthe fields in the mask will be modified. If no mask is provided, the\nfollowing default mask is used:\npaths: \"bindings, etag\"\nThis field is only used by Cloud IAM.", + "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only the fields in the mask will be modified. If no mask is provided, the following default mask is used: `paths: \"bindings, etag\"`", "format": "google-fieldmask", "type": "string" } @@ -2336,11 +2832,11 @@ "type": "object" }, "SingleClusterRouting": { - "description": "Unconditionally routes all read/write requests to a specific cluster.\nThis option preserves read-your-writes consistency but does not improve\navailability.", + "description": "Unconditionally routes all read/write requests to a specific cluster. This option preserves read-your-writes consistency but does not improve availability.", "id": "SingleClusterRouting", "properties": { "allowTransactionalWrites": { - "description": "Whether or not `CheckAndMutateRow` and `ReadModifyWriteRow` requests are\nallowed by this app profile. It is unsafe to send these requests to\nthe same table/row/column in multiple clusters.", + "description": "Whether or not `CheckAndMutateRow` and `ReadModifyWriteRow` requests are allowed by this app profile. It is unsafe to send these requests to the same table/row/column in multiple clusters.", "type": "boolean" }, "clusterId": { @@ -2363,7 +2859,7 @@ "type": "object" }, "Status": { - "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors).", + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", "id": "Status", "properties": { "code": { @@ -2372,7 +2868,7 @@ "type": "integer" }, "details": { - "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use.", + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", "items": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -2383,45 +2879,51 @@ "type": "array" }, "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", "type": "string" } }, "type": "object" }, "Table": { - "description": "A collection of user data indexed by row, column, and timestamp.\nEach table is served using the resources of its parent cluster.", + "description": "A collection of user data indexed by row, column, and timestamp. Each table is served using the resources of its parent cluster.", "id": "Table", "properties": { "clusterStates": { "additionalProperties": { "$ref": "ClusterState" }, - "description": "Output only. Map from cluster ID to per-cluster table state.\nIf it could not be determined whether or not the table has data in a\nparticular cluster (for example, if its zone is unavailable), then\nthere will be an entry for the cluster with UNKNOWN `replication_status`.\nViews: `REPLICATION_VIEW`, `FULL`", + "description": "Output only. Map from cluster ID to per-cluster table state. If it could not be determined whether or not the table has data in a particular cluster (for example, if its zone is unavailable), then there will be an entry for the cluster with UNKNOWN `replication_status`. Views: `REPLICATION_VIEW`, `FULL`", + "readOnly": true, "type": "object" }, "columnFamilies": { "additionalProperties": { "$ref": "ColumnFamily" }, - "description": "(`CreationOnly`)\nThe column families configured for this table, mapped by column family ID.\nViews: `SCHEMA_VIEW`, `FULL`", + "description": "The column families configured for this table, mapped by column family ID. Views: `SCHEMA_VIEW`, `FULL`", "type": "object" }, "granularity": { - "description": "(`CreationOnly`)\nThe granularity (i.e. `MILLIS`) at which timestamps are stored in\nthis table. Timestamps not matching the granularity will be rejected.\nIf unspecified at creation time, the value will be set to `MILLIS`.\nViews: `SCHEMA_VIEW`, `FULL`.", + "description": "Immutable. The granularity (i.e. `MILLIS`) at which timestamps are stored in this table. Timestamps not matching the granularity will be rejected. If unspecified at creation time, the value will be set to `MILLIS`. Views: `SCHEMA_VIEW`, `FULL`.", "enum": [ "TIMESTAMP_GRANULARITY_UNSPECIFIED", "MILLIS" ], "enumDescriptions": [ - "The user did not specify a granularity. Should not be returned.\nWhen specified during table creation, MILLIS will be used.", + "The user did not specify a granularity. Should not be returned. When specified during table creation, MILLIS will be used.", "The table keeps data versioned at a granularity of 1ms." ], "type": "string" }, "name": { - "description": "Output only. The unique name of the table. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/tables/_a-zA-Z0-9*`.\nViews: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL`", + "description": "The unique name of the table. Values are of the form `projects/{project}/instances/{instance}/tables/_a-zA-Z0-9*`. Views: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL`", "type": "string" + }, + "restoreInfo": { + "$ref": "RestoreInfo", + "description": "Output only. If this table was restored from another data source (e.g. a backup), this field will be populated with information about the restore.", + "readOnly": true } }, "type": "object" @@ -2431,7 +2933,7 @@ "id": "TableProgress", "properties": { "estimatedCopiedBytes": { - "description": "Estimate of the number of bytes copied so far for this table.\nThis will eventually reach 'estimated_size_bytes' unless the table copy\nis CANCELLED.", + "description": "Estimate of the number of bytes copied so far for this table. This will eventually reach 'estimated_size_bytes' unless the table copy is CANCELLED.", "format": "int64", "type": "string" }, @@ -2453,7 +2955,7 @@ "The table has not yet begun copying to the new cluster.", "The table is actively being copied to the new cluster.", "The table has been fully copied to the new cluster.", - "The table was deleted before it finished copying to the new cluster.\nNote that tables deleted after completion will stay marked as\nCOMPLETED, not CANCELLED." + "The table was deleted before it finished copying to the new cluster. Note that tables deleted after completion will stay marked as COMPLETED, not CANCELLED." ], "type": "string" } @@ -2465,7 +2967,7 @@ "id": "TestIamPermissionsRequest", "properties": { "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "description": "The set of permissions to check for the `resource`. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", "items": { "type": "string" }, @@ -2479,7 +2981,7 @@ "id": "TestIamPermissionsResponse", "properties": { "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", "items": { "type": "string" }, diff --git a/vendor/google.golang.org/api/bigtableadmin/v2/bigtableadmin-gen.go b/vendor/google.golang.org/api/bigtableadmin/v2/bigtableadmin-gen.go index 5635077cc65..94004ff280c 100644 --- a/vendor/google.golang.org/api/bigtableadmin/v2/bigtableadmin-gen.go +++ b/vendor/google.golang.org/api/bigtableadmin/v2/bigtableadmin-gen.go @@ -79,6 +79,7 @@ const apiId = "bigtableadmin:v2" const apiName = "bigtableadmin" const apiVersion = "v2" const basePath = "https://bigtableadmin.googleapis.com/" +const mtlsBasePath = "https://bigtableadmin.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -126,6 +127,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -287,36 +289,27 @@ type ProjectsLocationsService struct { } // AppProfile: A configuration object describing how Cloud Bigtable -// should treat traffic -// from a particular end user application. +// should treat traffic from a particular end user application. type AppProfile struct { - // Description: Optional long form description of the use case for this + // Description: Long form description of the use case for this // AppProfile. Description string `json:"description,omitempty"` // Etag: Strongly validated etag for optimistic concurrency control. - // Preserve the - // value returned from `GetAppProfile` when calling `UpdateAppProfile` - // to - // fail the request if there has been a modification in the mean time. - // The - // `update_mask` of the request need not include `etag` for this - // protection - // to apply. - // See [Wikipedia](https://en.wikipedia.org/wiki/HTTP_ETag) and - // [RFC 7232](https://tools.ietf.org/html/rfc7232#section-2.3) for - // more + // Preserve the value returned from `GetAppProfile` when calling + // `UpdateAppProfile` to fail the request if there has been a + // modification in the mean time. The `update_mask` of the request need + // not include `etag` for this protection to apply. See + // [Wikipedia](https://en.wikipedia.org/wiki/HTTP_ETag) and [RFC + // 7232](https://tools.ietf.org/html/rfc7232#section-2.3) for more // details. Etag string `json:"etag,omitempty"` // MultiClusterRoutingUseAny: Use a multi-cluster routing policy. MultiClusterRoutingUseAny *MultiClusterRoutingUseAny `json:"multiClusterRoutingUseAny,omitempty"` - // Name: (`OutputOnly`) - // The unique name of the app profile. Values are of the - // form - // `projects//instances//appProfiles/_a-zA-Z0-9*` - // . + // Name: The unique name of the app profile. Values are of the form + // `projects/{project}/instances/{instance}/appProfiles/_a-zA-Z0-9*`. Name string `json:"name,omitempty"` // SingleClusterRouting: Use a single-cluster routing policy. @@ -349,72 +342,31 @@ func (s *AppProfile) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// AuditConfig: Specifies the audit configuration for a service. -// The configuration determines which permission types are logged, and -// what -// identities, if any, are exempted from logging. -// An AuditConfig must have one or more AuditLogConfigs. -// -// If there are AuditConfigs for both `allServices` and a specific -// service, -// the union of the two AuditConfigs is used for that service: the -// log_types -// specified in each AuditConfig are enabled, and the exempted_members -// in each -// AuditLogConfig are exempted. -// -// Example Policy with multiple AuditConfigs: -// -// { -// "audit_configs": [ -// { -// "service": "allServices" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// }, -// { -// "log_type": "ADMIN_READ", -// } -// ] -// }, -// { -// "service": "sampleservice.googleapis.com" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// }, -// { -// "log_type": "DATA_WRITE", -// "exempted_members": [ -// "user:aliya@example.com" -// ] -// } -// ] -// } -// ] -// } -// -// For sampleservice, this policy enables DATA_READ, DATA_WRITE and -// ADMIN_READ -// logging. It also exempts jose@example.com from DATA_READ logging, -// and -// aliya@example.com from DATA_WRITE logging. +// AuditConfig: Specifies the audit configuration for a service. The +// configuration determines which permission types are logged, and what +// identities, if any, are exempted from logging. An AuditConfig must +// have one or more AuditLogConfigs. If there are AuditConfigs for both +// `allServices` and a specific service, the union of the two +// AuditConfigs is used for that service: the log_types specified in +// each AuditConfig are enabled, and the exempted_members in each +// AuditLogConfig are exempted. Example Policy with multiple +// AuditConfigs: { "audit_configs": [ { "service": "allServices", +// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": +// [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { +// "log_type": "ADMIN_READ" } ] }, { "service": +// "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": +// "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ +// "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy +// enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts +// jose@example.com from DATA_READ logging, and aliya@example.com from +// DATA_WRITE logging. type AuditConfig struct { // AuditLogConfigs: The configuration for logging of each type of // permission. AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"` - // Service: Specifies a service that will be enabled for audit - // logging. - // For example, `storage.googleapis.com`, - // `cloudsql.googleapis.com`. + // Service: Specifies a service that will be enabled for audit logging. + // For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. // `allServices` is a special value that covers all services. Service string `json:"service,omitempty"` @@ -443,31 +395,15 @@ func (s *AuditConfig) MarshalJSON() ([]byte, error) { } // AuditLogConfig: Provides the configuration for logging a type of -// permissions. -// Example: -// -// { -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// } -// ] -// } -// -// This enables 'DATA_READ' and 'DATA_WRITE' logging, while -// exempting -// jose@example.com from DATA_READ logging. +// permissions. Example: { "audit_log_configs": [ { "log_type": +// "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { +// "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and +// 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ +// logging. type AuditLogConfig struct { // ExemptedMembers: Specifies the identities that do not cause logging - // for this type of - // permission. - // Follows the same format of Binding.members. + // for this type of permission. Follows the same format of + // Binding.members. ExemptedMembers []string `json:"exemptedMembers,omitempty"` // LogType: The log type that this config enables. @@ -503,85 +439,169 @@ func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Backup: A backup of a Cloud Bigtable table. +type Backup struct { + // EndTime: Output only. `end_time` is the time that the backup was + // finished. The row data in the backup will be no newer than this + // timestamp. + EndTime string `json:"endTime,omitempty"` + + // ExpireTime: Required. The expiration time of the backup, with + // microseconds granularity that must be at least 6 hours and at most 30 + // days from the time the request is received. Once the `expire_time` + // has passed, Cloud Bigtable will delete the backup and free the + // resources used by the backup. + ExpireTime string `json:"expireTime,omitempty"` + + // Name: A globally unique identifier for the backup which cannot be + // changed. Values are of the form + // `projects/{project}/instances/{instance}/clusters/{cluster}/ + // backups/_a-zA-Z0-9*` The final segment of the name must be between 1 + // and 50 characters in length. The backup is stored in the cluster + // identified by the prefix of the backup name of the form + // `projects/{project}/instances/{instance}/clusters/{cluster}`. + Name string `json:"name,omitempty"` + + // SizeBytes: Output only. Size of the backup in bytes. + SizeBytes int64 `json:"sizeBytes,omitempty,string"` + + // SourceTable: Required. Immutable. Name of the table from which this + // backup was created. This needs to be in the same instance as the + // backup. Values are of the form + // `projects/{project}/instances/{instance}/tables/{source_table}`. + SourceTable string `json:"sourceTable,omitempty"` + + // StartTime: Output only. `start_time` is the time that the backup was + // started (i.e. approximately the time the CreateBackup request is + // received). The row data in this backup will be no older than this + // timestamp. + StartTime string `json:"startTime,omitempty"` + + // State: Output only. The current state of the backup. + // + // Possible values: + // "STATE_UNSPECIFIED" - Not specified. + // "CREATING" - The pending backup is still being created. Operations + // on the backup may fail with `FAILED_PRECONDITION` in this state. + // "READY" - The backup is complete and ready for use. + State string `json:"state,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "EndTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EndTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Backup) MarshalJSON() ([]byte, error) { + type NoMethod Backup + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BackupInfo: Information about a backup. +type BackupInfo struct { + // Backup: Output only. Name of the backup. + Backup string `json:"backup,omitempty"` + + // EndTime: Output only. This time that the backup was finished. Row + // data in the backup will be no newer than this timestamp. + EndTime string `json:"endTime,omitempty"` + + // SourceTable: Output only. Name of the table the backup was created + // from. + SourceTable string `json:"sourceTable,omitempty"` + + // StartTime: Output only. The time that the backup was started. Row + // data in the backup will be no older than this timestamp. + StartTime string `json:"startTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Backup") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Backup") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackupInfo) MarshalJSON() ([]byte, error) { + type NoMethod BackupInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Binding: Associates `members` with a `role`. type Binding struct { - // Condition: The condition that is associated with this binding. - // NOTE: An unsatisfied condition will not allow user access via - // current - // binding. Different bindings, including their conditions, are - // examined - // independently. + // Condition: The condition that is associated with this binding. If the + // condition evaluates to `true`, then this binding applies to the + // current request. If the condition evaluates to `false`, then this + // binding does not apply to the current request. However, a different + // role binding might grant the same role to one or more of the members + // in this binding. To learn which resources support conditions in their + // IAM policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). Condition *Expr `json:"condition,omitempty"` // Members: Specifies the identities requesting access for a Cloud - // Platform resource. - // `members` can have the following values: - // - // * `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. - // - // * `allAuthenticatedUsers`: A special identifier that represents - // anyone - // who is authenticated with a Google account or a service - // account. - // - // * `user:{emailid}`: An email address that represents a specific - // Google - // account. For example, `alice@example.com` . - // - // - // * `serviceAccount:{emailid}`: An email address that represents a - // service - // account. For example, - // `my-other-app@appspot.gserviceaccount.com`. - // - // * `group:{emailid}`: An email address that represents a Google - // group. - // For example, `admins@example.com`. - // - // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a user that has been recently deleted. - // For - // example, `alice@example.com?uid=123456789012345678901`. If the - // user is - // recovered, this value reverts to `user:{emailid}` and the - // recovered user - // retains the role in the binding. - // - // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address - // (plus - // unique identifier) representing a service account that has been - // recently - // deleted. For example, - // + // Platform resource. `members` can have the following values: * + // `allUsers`: A special identifier that represents anyone who is on the + // internet; with or without a Google account. * + // `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. * + // `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . * + // `serviceAccount:{emailid}`: An email address that represents a + // service account. For example, + // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An + // email address that represents a Google group. For example, + // `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An + // email address (plus unique identifier) representing a user that has + // been recently deleted. For example, + // `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered + // user retains the role in the binding. * + // `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address + // (plus unique identifier) representing a service account that has been + // recently deleted. For example, // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account - // retains the - // role in the binding. - // - // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a Google group that has been recently - // deleted. For example, - // `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` - // and the - // recovered group retains the role in the binding. - // - // - // * `domain:{domain}`: The G Suite domain (primary) that represents all - // the - // users of that domain. For example, `google.com` or - // `example.com`. - // - // + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains + // the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: + // An email address (plus unique identifier) representing a Google group + // that has been recently deleted. For example, + // `admins@example.com?uid=123456789012345678901`. If the group is + // recovered, this value reverts to `group:{emailid}` and the recovered + // group retains the role in the binding. * `domain:{domain}`: The G + // Suite domain (primary) that represents all the users of that domain. + // For example, `google.com` or `example.com`. Members []string `json:"members,omitempty"` - // Role: Role that is assigned to `members`. - // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + // Role: Role that is assigned to `members`. For example, + // `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `json:"role,omitempty"` // ForceSendFields is a list of field names (e.g. "Condition") to @@ -607,8 +627,7 @@ func (s *Binding) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// CheckConsistencyRequest: Request message -// for +// CheckConsistencyRequest: Request message for // google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency type CheckConsistencyRequest struct { // ConsistencyToken: Required. The token created using @@ -639,13 +658,12 @@ func (s *CheckConsistencyRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// CheckConsistencyResponse: Response message -// for +// CheckConsistencyResponse: Response message for // google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency type CheckConsistencyResponse struct { // Consistent: True only if the token is consistent. A token is - // consistent if replication - // has caught up with the restrictions specified in the request. + // consistent if replication has caught up with the restrictions + // specified in the request. Consistent bool `json:"consistent,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -676,13 +694,11 @@ func (s *CheckConsistencyResponse) MarshalJSON() ([]byte, error) { } // Cluster: A resizable group of nodes in a particular cloud location, -// capable -// of serving all Tables in the parent -// Instance. +// capable of serving all Tables in the parent Instance. type Cluster struct { - // DefaultStorageType: (`CreationOnly`) - // The type of storage used by this cluster to serve its - // parent instance's tables, unless explicitly overridden. + // DefaultStorageType: Immutable. The type of storage used by this + // cluster to serve its parent instance's tables, unless explicitly + // overridden. // // Possible values: // "STORAGE_TYPE_UNSPECIFIED" - The user did not specify a storage @@ -691,29 +707,22 @@ type Cluster struct { // "HDD" - Magnetic drive (HDD) storage should be used. DefaultStorageType string `json:"defaultStorageType,omitempty"` - // Location: (`CreationOnly`) - // The location where this cluster's nodes and storage reside. For - // best - // performance, clients should be located as close as possible to - // this - // cluster. Currently only zones are supported, so values should be of - // the - // form `projects/{project}/locations/{zone}`. + // Location: Immutable. The location where this cluster's nodes and + // storage reside. For best performance, clients should be located as + // close as possible to this cluster. Currently only zones are + // supported, so values should be of the form + // `projects/{project}/locations/{zone}`. Location string `json:"location,omitempty"` - // Name: Required. (`OutputOnly`) - // The unique name of the cluster. Values are of the - // form + // Name: The unique name of the cluster. Values are of the form // `projects/{project}/instances/{instance}/clusters/a-z*`. Name string `json:"name,omitempty"` // ServeNodes: Required. The number of nodes allocated to this cluster. - // More nodes enable higher - // throughput and more consistent performance. + // More nodes enable higher throughput and more consistent performance. ServeNodes int64 `json:"serveNodes,omitempty"` - // State: (`OutputOnly`) - // The current state of the cluster. + // State: Output only. The current state of the cluster. // // Possible values: // "STATE_NOT_KNOWN" - The state of the cluster could not be @@ -721,20 +730,15 @@ type Cluster struct { // "READY" - The cluster has been successfully created and is ready to // serve requests. // "CREATING" - The cluster is currently being created, and may be - // destroyed - // if the creation process encounters an error. - // A cluster may not be able to serve requests while being created. + // destroyed if the creation process encounters an error. A cluster may + // not be able to serve requests while being created. // "RESIZING" - The cluster is currently being resized, and may revert - // to its previous - // node count if the process encounters an error. - // A cluster is still capable of serving requests while being - // resized, - // but may exhibit performance as if its number of allocated nodes - // is + // to its previous node count if the process encounters an error. A + // cluster is still capable of serving requests while being resized, but + // may exhibit performance as if its number of allocated nodes is // between the starting and requested states. // "DISABLED" - The cluster has no backing nodes. The data (tables) - // still - // exist, but no operations can be performed on the cluster. + // still exist, but no operations can be performed on the cluster. State string `json:"state,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -774,21 +778,22 @@ type ClusterState struct { // "STATE_NOT_KNOWN" - The replication state of the table is unknown // in this cluster. // "INITIALIZING" - The cluster was recently created, and the table - // must finish copying - // over pre-existing data from other clusters before it can - // begin - // receiving live replication updates and serving Data API requests. + // must finish copying over pre-existing data from other clusters before + // it can begin receiving live replication updates and serving Data API + // requests. // "PLANNED_MAINTENANCE" - The table is temporarily unable to serve - // Data API requests from this - // cluster due to planned internal maintenance. + // Data API requests from this cluster due to planned internal + // maintenance. // "UNPLANNED_MAINTENANCE" - The table is temporarily unable to serve - // Data API requests from this - // cluster due to unplanned or emergency maintenance. + // Data API requests from this cluster due to unplanned or emergency + // maintenance. // "READY" - The table can serve Data API requests from this cluster. - // Depending on - // replication delay, reads may not immediately reflect the state of - // the - // table in other clusters. + // Depending on replication delay, reads may not immediately reflect the + // state of the table in other clusters. + // "READY_OPTIMIZING" - The table is fully created and ready for use + // after a restore, and is being optimized for performance. When + // optimizations are complete, the table will transition to `READY` + // state. ReplicationState string `json:"replicationState,omitempty"` // ForceSendFields is a list of field names (e.g. "ReplicationState") to @@ -818,14 +823,11 @@ func (s *ClusterState) MarshalJSON() ([]byte, error) { // ColumnFamily: A set of columns within a table which share a common // configuration. type ColumnFamily struct { - // GcRule: Garbage collection rule specified as a protobuf. - // Must serialize to at most 500 bytes. - // - // NOTE: Garbage collection executes opportunistically in the - // background, and - // so it's possible for reads to return a cell even if it matches the - // active - // GC expression for its family. + // GcRule: Garbage collection rule specified as a protobuf. Must + // serialize to at most 500 bytes. NOTE: Garbage collection executes + // opportunistically in the background, and so it's possible for reads + // to return a cell even if it matches the active GC expression for its + // family. GcRule *GcRule `json:"gcRule,omitempty"` // ForceSendFields is a list of field names (e.g. "GcRule") to @@ -851,6 +853,45 @@ func (s *ColumnFamily) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// CreateBackupMetadata: Metadata type for the operation returned by +// CreateBackup. +type CreateBackupMetadata struct { + // EndTime: If set, the time at which this operation finished or was + // cancelled. + EndTime string `json:"endTime,omitempty"` + + // Name: The name of the backup being created. + Name string `json:"name,omitempty"` + + // SourceTable: The name of the table the backup is created from. + SourceTable string `json:"sourceTable,omitempty"` + + // StartTime: The time at which this operation started. + StartTime string `json:"startTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EndTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EndTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CreateBackupMetadata) MarshalJSON() ([]byte, error) { + type NoMethod CreateBackupMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // CreateClusterMetadata: The metadata for the Operation returned by // CreateCluster. type CreateClusterMetadata struct { @@ -866,18 +907,11 @@ type CreateClusterMetadata struct { RequestTime string `json:"requestTime,omitempty"` // Tables: Keys: the full `name` of each table that existed in the - // instance when - // CreateCluster was first called, - // i.e. - // `projects//instances//tables/`. Any table - // added - // to the instance by a later API call will be created in the new - // cluster by - // that API call, not this one. - // - // Values: information on how much of a table's data has been copied to - // the - // newly-created cluster so far. + // instance when CreateCluster was first called, i.e. + // `projects//instances//tables/`. Any table added to the instance by a + // later API call will be created in the new cluster by that API call, + // not this one. Values: information on how much of a table's data has + // been copied to the newly-created cluster so far. Tables map[string]TableProgress `json:"tables,omitempty"` // ForceSendFields is a list of field names (e.g. "FinishTime") to @@ -906,20 +940,17 @@ func (s *CreateClusterMetadata) MarshalJSON() ([]byte, error) { // CreateClusterRequest: Request message for // BigtableInstanceAdmin.CreateCluster. type CreateClusterRequest struct { - // Cluster: Required. The cluster to be created. - // Fields marked `OutputOnly` must be left blank. + // Cluster: Required. The cluster to be created. Fields marked + // `OutputOnly` must be left blank. Cluster *Cluster `json:"cluster,omitempty"` // ClusterId: Required. The ID to be used when referring to the new - // cluster within its instance, - // e.g., just `mycluster` rather - // than + // cluster within its instance, e.g., just `mycluster` rather than // `projects/myproject/instances/myinstance/clusters/mycluster`. ClusterId string `json:"clusterId,omitempty"` // Parent: Required. The unique name of the instance in which to create - // the new cluster. - // Values are of the form + // the new cluster. Values are of the form // `projects/{project}/instances/{instance}`. Parent string `json:"parent,omitempty"` @@ -987,29 +1018,23 @@ func (s *CreateInstanceMetadata) MarshalJSON() ([]byte, error) { // BigtableInstanceAdmin.CreateInstance. type CreateInstanceRequest struct { // Clusters: Required. The clusters to be created within the instance, - // mapped by desired - // cluster ID, e.g., just `mycluster` rather - // than - // `projects/myproject/instances/myinstance/clusters/mycluster`. - // Fie - // lds marked `OutputOnly` must be left blank. - // Currently, at most four clusters can be specified. + // mapped by desired cluster ID, e.g., just `mycluster` rather than + // `projects/myproject/instances/myinstance/clusters/mycluster`. Fields + // marked `OutputOnly` must be left blank. Currently, at most four + // clusters can be specified. Clusters map[string]Cluster `json:"clusters,omitempty"` - // Instance: Required. The instance to create. - // Fields marked `OutputOnly` must be left blank. + // Instance: Required. The instance to create. Fields marked + // `OutputOnly` must be left blank. Instance *Instance `json:"instance,omitempty"` // InstanceId: Required. The ID to be used when referring to the new - // instance within its project, - // e.g., just `myinstance` rather - // than + // instance within its project, e.g., just `myinstance` rather than // `projects/myproject/instances/myinstance`. InstanceId string `json:"instanceId,omitempty"` // Parent: Required. The unique name of the project in which to create - // the new instance. - // Values are of the form `projects/{project}`. + // the new instance. Values are of the form `projects/{project}`. Parent string `json:"parent,omitempty"` // ForceSendFields is a list of field names (e.g. "Clusters") to @@ -1035,40 +1060,28 @@ func (s *CreateInstanceRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// CreateTableRequest: Request message -// for +// CreateTableRequest: Request message for // google.bigtable.admin.v2.BigtableTableAdmin.CreateTable type CreateTableRequest struct { // InitialSplits: The optional list of row keys that will be used to - // initially split the - // table into several tablets (tablets are similar to HBase - // regions). - // Given two split keys, `s1` and `s2`, three tablets will be - // created, - // spanning the key ranges: `[, s1), [s1, s2), [s2, )`. - // - // Example: - // - // * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",` - // "other", "zz"]` - // * initial_split_keys := `["apple", "customer_1", "customer_2", - // "other"]` - // * Key assignment: - // - Tablet 1 `[, apple) => {"a"}.` - // - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.` - // - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.` - // - Tablet 4 `[customer_2, other) => {"customer_2"}.` - // - Tablet 5 `[other, ) => {"other", "zz"}.` + // initially split the table into several tablets (tablets are similar + // to HBase regions). Given two split keys, `s1` and `s2`, three tablets + // will be created, spanning the key ranges: `[, s1), [s1, s2), [s2, )`. + // Example: * Row keys := `["a", "apple", "custom", "customer_1", + // "customer_2",` "other", "zz"]` * initial_split_keys := `["apple", + // "customer_1", "customer_2", "other"]` * Key assignment: - Tablet 1 + // `[, apple) => {"a"}.` - Tablet 2 `[apple, customer_1) => {"apple", + // "custom"}.` - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.` + // - Tablet 4 `[customer_2, other) => {"customer_2"}.` - Tablet 5 + // `[other, ) => {"other", "zz"}.` InitialSplits []*Split `json:"initialSplits,omitempty"` // Table: Required. The Table to create. Table *Table `json:"table,omitempty"` // TableId: Required. The name by which the new table should be referred - // to within the parent - // instance, e.g., `foobar` rather than - // `{parent}/tables/foobar`. - // Maximum 50 characters. + // to within the parent instance, e.g., `foobar` rather than + // `{parent}/tables/foobar`. Maximum 50 characters. TableId string `json:"tableId,omitempty"` // ForceSendFields is a list of field names (e.g. "InitialSplits") to @@ -1094,8 +1107,7 @@ func (s *CreateTableRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// DropRowRangeRequest: Request message -// for +// DropRowRangeRequest: Request message for // google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange type DropRowRangeRequest struct { // DeleteAllDataFromTable: Delete all rows in the table. Setting this to @@ -1103,8 +1115,7 @@ type DropRowRangeRequest struct { DeleteAllDataFromTable bool `json:"deleteAllDataFromTable,omitempty"` // RowKeyPrefix: Delete all rows that start with this row key prefix. - // Prefix cannot be - // zero length. + // Prefix cannot be zero length. RowKeyPrefix string `json:"rowKeyPrefix,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1133,17 +1144,11 @@ func (s *DropRowRangeRequest) MarshalJSON() ([]byte, error) { } // Empty: A generic empty message that you can re-use to avoid defining -// duplicated -// empty messages in your APIs. A typical example is to use it as the -// request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. +// duplicated empty messages in your APIs. A typical example is to use +// it as the request or the response type of an API method. For +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } The JSON representation for `Empty` is +// empty JSON object `{}`. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -1151,65 +1156,40 @@ type Empty struct { } // Expr: Represents a textual expression in the Common Expression -// Language (CEL) -// syntax. CEL is a C-like expression language. The syntax and semantics -// of CEL -// are documented at https://github.com/google/cel-spec. -// -// Example (Comparison): -// -// title: "Summary size limit" -// description: "Determines if a summary is less than 100 chars" -// expression: "document.summary.size() < 100" -// -// Example (Equality): -// -// title: "Requestor is owner" -// description: "Determines if requestor is the document owner" -// expression: "document.owner == -// request.auth.claims.email" -// -// Example (Logic): -// -// title: "Public documents" -// description: "Determine whether the document should be publicly -// visible" -// expression: "document.type != 'private' && document.type != -// 'internal'" -// -// Example (Data Manipulation): -// -// title: "Notification string" -// description: "Create a notification string with a timestamp." -// expression: "'New message received at ' + -// string(document.create_time)" -// -// The exact variables and functions that may be referenced within an -// expression -// are determined by the service that evaluates it. See the -// service -// documentation for additional information. +// Language (CEL) syntax. CEL is a C-like expression language. The +// syntax and semantics of CEL are documented at +// https://github.com/google/cel-spec. Example (Comparison): title: +// "Summary size limit" description: "Determines if a summary is less +// than 100 chars" expression: "document.summary.size() < 100" Example +// (Equality): title: "Requestor is owner" description: "Determines if +// requestor is the document owner" expression: "document.owner == +// request.auth.claims.email" Example (Logic): title: "Public documents" +// description: "Determine whether the document should be publicly +// visible" expression: "document.type != 'private' && document.type != +// 'internal'" Example (Data Manipulation): title: "Notification string" +// description: "Create a notification string with a timestamp." +// expression: "'New message received at ' + +// string(document.create_time)" The exact variables and functions that +// may be referenced within an expression are determined by the service +// that evaluates it. See the service documentation for additional +// information. type Expr struct { // Description: Optional. Description of the expression. This is a - // longer text which - // describes the expression, e.g. when hovered over it in a UI. + // longer text which describes the expression, e.g. when hovered over it + // in a UI. Description string `json:"description,omitempty"` // Expression: Textual representation of an expression in Common - // Expression Language - // syntax. + // Expression Language syntax. Expression string `json:"expression,omitempty"` // Location: Optional. String indicating the location of the expression - // for error - // reporting, e.g. a file name and a position in the file. + // for error reporting, e.g. a file name and a position in the file. Location string `json:"location,omitempty"` // Title: Optional. Title for the expression, i.e. a short string - // describing - // its purpose. This can be used e.g. in UIs which allow to enter - // the - // expression. + // describing its purpose. This can be used e.g. in UIs which allow to + // enter the expression. Title string `json:"title,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -1235,6 +1215,63 @@ func (s *Expr) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// FailureTrace: Added to the error payload. +type FailureTrace struct { + Frames []*Frame `json:"frames,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Frames") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Frames") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FailureTrace) MarshalJSON() ([]byte, error) { + type NoMethod FailureTrace + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type Frame struct { + TargetName string `json:"targetName,omitempty"` + + WorkflowGuid string `json:"workflowGuid,omitempty"` + + ZoneId string `json:"zoneId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TargetName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TargetName") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Frame) MarshalJSON() ([]byte, error) { + type NoMethod Frame + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // GcRule: Rule for determining which cells to delete during garbage // collection. type GcRule struct { @@ -1242,9 +1279,8 @@ type GcRule struct { // rule. Intersection *Intersection `json:"intersection,omitempty"` - // MaxAge: Delete cells in a column older than the given age. - // Values must be at least one millisecond, and will be truncated - // to + // MaxAge: Delete cells in a column older than the given age. Values + // must be at least one millisecond, and will be truncated to // microsecond granularity. MaxAge string `json:"maxAge,omitempty"` @@ -1278,17 +1314,13 @@ func (s *GcRule) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// GenerateConsistencyTokenRequest: Request message -// for -// google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyTok -// en +// GenerateConsistencyTokenRequest: Request message for +// google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken type GenerateConsistencyTokenRequest struct { } -// GenerateConsistencyTokenResponse: Response message -// for -// google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyTok -// en +// GenerateConsistencyTokenResponse: Response message for +// google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken type GenerateConsistencyTokenResponse struct { // ConsistencyToken: The generated consistency token. ConsistencyToken string `json:"consistencyToken,omitempty"` @@ -1324,8 +1356,7 @@ func (s *GenerateConsistencyTokenResponse) MarshalJSON() ([]byte, error) { // GetIamPolicyRequest: Request message for `GetIamPolicy` method. type GetIamPolicyRequest struct { // Options: OPTIONAL: A `GetPolicyOptions` object for specifying options - // to - // `GetIamPolicy`. This field is only used by Cloud IAM. + // to `GetIamPolicy`. Options *GetPolicyOptions `json:"options,omitempty"` // ForceSendFields is a list of field names (e.g. "Options") to @@ -1354,17 +1385,14 @@ func (s *GetIamPolicyRequest) MarshalJSON() ([]byte, error) { // GetPolicyOptions: Encapsulates settings provided to GetIamPolicy. type GetPolicyOptions struct { // RequestedPolicyVersion: Optional. The policy format version to be - // returned. - // - // Valid values are 0, 1, and 3. Requests specifying an invalid value - // will be - // rejected. - // - // Requests for policies with any conditional bindings must specify - // version 3. - // Policies without any conditional bindings may specify any valid value - // or - // leave the field unset. + // returned. Valid values are 0, 1, and 3. Requests specifying an + // invalid value will be rejected. Requests for policies with any + // conditional bindings must specify version 3. Policies without any + // conditional bindings may specify any valid value or leave the field + // unset. To learn which resources support conditions in their IAM + // policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). RequestedPolicyVersion int64 `json:"requestedPolicyVersion,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1392,69 +1420,53 @@ func (s *GetPolicyOptions) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Instance: A collection of Bigtable Tables and -// the resources that serve them. -// All tables in an instance are served from all -// Clusters in the instance. +// Instance: A collection of Bigtable Tables and the resources that +// serve them. All tables in an instance are served from all Clusters in +// the instance. type Instance struct { // DisplayName: Required. The descriptive name for this instance as it - // appears in UIs. - // Can be changed at any time, but should be kept globally unique - // to avoid confusion. + // appears in UIs. Can be changed at any time, but should be kept + // globally unique to avoid confusion. DisplayName string `json:"displayName,omitempty"` // Labels: Required. Labels are a flexible and lightweight mechanism for - // organizing cloud - // resources into groups that reflect a customer's organizational needs - // and - // deployment strategies. They can be used to filter resources and - // aggregate - // metrics. - // - // * Label keys must be between 1 and 63 characters long and must - // conform to - // the regular expression: `\p{Ll}\p{Lo}{0,62}`. - // * Label values must be between 0 and 63 characters long and must - // conform to - // the regular expression: `[\p{Ll}\p{Lo}\p{N}_-]{0,63}`. - // * No more than 64 labels can be associated with a given resource. - // * Keys and values must both be under 128 bytes. + // organizing cloud resources into groups that reflect a customer's + // organizational needs and deployment strategies. They can be used to + // filter resources and aggregate metrics. * Label keys must be between + // 1 and 63 characters long and must conform to the regular expression: + // `\p{Ll}\p{Lo}{0,62}`. * Label values must be between 0 and 63 + // characters long and must conform to the regular expression: + // `[\p{Ll}\p{Lo}\p{N}_-]{0,63}`. * No more than 64 labels can be + // associated with a given resource. * Keys and values must both be + // under 128 bytes. Labels map[string]string `json:"labels,omitempty"` - // Name: Required. (`OutputOnly`) - // The unique name of the instance. Values are of the - // form + // Name: The unique name of the instance. Values are of the form // `projects/{project}/instances/a-z+[a-z0-9]`. Name string `json:"name,omitempty"` - // State: (`OutputOnly`) - // The current state of the instance. + // State: Output only. The current state of the instance. // // Possible values: // "STATE_NOT_KNOWN" - The state of the instance could not be // determined. // "READY" - The instance has been successfully created and can serve - // requests - // to its tables. + // requests to its tables. // "CREATING" - The instance is currently being created, and may be - // destroyed - // if the creation process encounters an error. + // destroyed if the creation process encounters an error. State string `json:"state,omitempty"` // Type: Required. The type of the instance. Defaults to `PRODUCTION`. // // Possible values: // "TYPE_UNSPECIFIED" - The type of the instance is unspecified. If - // set when creating an - // instance, a `PRODUCTION` instance will be created. If set when - // updating - // an instance, the type will be left unchanged. + // set when creating an instance, a `PRODUCTION` instance will be + // created. If set when updating an instance, the type will be left + // unchanged. // "PRODUCTION" - An instance meant for production use. `serve_nodes` - // must be set - // on the cluster. + // must be set on the cluster. // "DEVELOPMENT" - DEPRECATED: Prefer PRODUCTION for all use cases, as - // it no longer enforces - // a higher minimum node count than DEVELOPMENT. + // it no longer enforces a higher minimum node count than DEVELOPMENT. Type string `json:"type,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1521,18 +1533,14 @@ type ListAppProfilesResponse struct { AppProfiles []*AppProfile `json:"appProfiles,omitempty"` // FailedLocations: Locations from which AppProfile information could - // not be retrieved, - // due to an outage or some other transient condition. - // AppProfiles from these locations may be missing from - // `app_profiles`. - // Values are of the form `projects//locations/` + // not be retrieved, due to an outage or some other transient condition. + // AppProfiles from these locations may be missing from `app_profiles`. + // Values are of the form `projects//locations/` FailedLocations []string `json:"failedLocations,omitempty"` // NextPageToken: Set if not all app profiles could be returned in a - // single response. - // Pass this value to `page_token` in another request to get the - // next - // page of results. + // single response. Pass this value to `page_token` in another request + // to get the next page of results. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1562,6 +1570,42 @@ func (s *ListAppProfilesResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ListBackupsResponse: The response for ListBackups. +type ListBackupsResponse struct { + // Backups: The list of matching backups. + Backups []*Backup `json:"backups,omitempty"` + + // NextPageToken: `next_page_token` can be sent in a subsequent + // ListBackups call to fetch more of the matching backups. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Backups") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Backups") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListBackupsResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListBackupsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ListClustersResponse: Response message for // BigtableInstanceAdmin.ListClusters. type ListClustersResponse struct { @@ -1569,11 +1613,10 @@ type ListClustersResponse struct { Clusters []*Cluster `json:"clusters,omitempty"` // FailedLocations: Locations from which Cluster information could not - // be retrieved, - // due to an outage or some other transient condition. - // Clusters from these locations may be missing from `clusters`, - // or may only have partial information returned. - // Values are of the form `projects//locations/` + // be retrieved, due to an outage or some other transient condition. + // Clusters from these locations may be missing from `clusters`, or may + // only have partial information returned. Values are of the form + // `projects//locations/` FailedLocations []string `json:"failedLocations,omitempty"` // NextPageToken: DEPRECATED: This field is unused and ignored. @@ -1610,14 +1653,11 @@ func (s *ListClustersResponse) MarshalJSON() ([]byte, error) { // BigtableInstanceAdmin.ListInstances. type ListInstancesResponse struct { // FailedLocations: Locations from which Instance information could not - // be retrieved, - // due to an outage or some other transient condition. - // Instances whose Clusters are all in one of the failed locations - // may be missing from `instances`, and Instances with at least - // one - // Cluster in a failed location may only have partial information - // returned. - // Values are of the form `projects//locations/` + // be retrieved, due to an outage or some other transient condition. + // Instances whose Clusters are all in one of the failed locations may + // be missing from `instances`, and Instances with at least one Cluster + // in a failed location may only have partial information returned. + // Values are of the form `projects//locations/` FailedLocations []string `json:"failedLocations,omitempty"` // Instances: The list of requested instances. @@ -1728,15 +1768,12 @@ func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ListTablesResponse: Response message -// for +// ListTablesResponse: Response message for // google.bigtable.admin.v2.BigtableTableAdmin.ListTables type ListTablesResponse struct { // NextPageToken: Set if not all tables could be returned in a single - // response. - // Pass this value to `page_token` in another request to get the - // next - // page of results. + // response. Pass this value to `page_token` in another request to get + // the next page of results. NextPageToken string `json:"nextPageToken,omitempty"` // Tables: The tables present in the requested instance. @@ -1772,13 +1809,11 @@ func (s *ListTablesResponse) MarshalJSON() ([]byte, error) { // Location: A resource that represents Google Cloud Platform location. type Location struct { // DisplayName: The friendly name for this location, typically a nearby - // city name. - // For example, "Tokyo". + // city name. For example, "Tokyo". DisplayName string `json:"displayName,omitempty"` // Labels: Cross-service attributes for the location. For example - // - // {"cloud.googleapis.com/region": "us-east1"} + // {"cloud.googleapis.com/region": "us-east1"} Labels map[string]string `json:"labels,omitempty"` // LocationId: The canonical id for this location. For example: @@ -1786,13 +1821,12 @@ type Location struct { LocationId string `json:"locationId,omitempty"` // Metadata: Service-specific metadata. For example the available - // capacity at the given - // location. + // capacity at the given location. Metadata googleapi.RawMessage `json:"metadata,omitempty"` // Name: Resource name for the location, which may vary between - // implementations. - // For example: "projects/example-project/locations/us-east1" + // implementations. For example: + // "projects/example-project/locations/us-east1" Name string `json:"name,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1826,21 +1860,18 @@ func (s *Location) MarshalJSON() ([]byte, error) { // family. type Modification struct { // Create: Create a new column family with the specified schema, or fail - // if - // one already exists with the given ID. + // if one already exists with the given ID. Create *ColumnFamily `json:"create,omitempty"` // Drop: Drop (delete) the column family with the given ID, or fail if - // no such - // family exists. + // no such family exists. Drop bool `json:"drop,omitempty"` // Id: The ID of the column family to be modified. Id string `json:"id,omitempty"` // Update: Update an existing column family to the specified schema, or - // fail - // if no column family exists with the given ID. + // fail if no column family exists with the given ID. Update *ColumnFamily `json:"update,omitempty"` // ForceSendFields is a list of field names (e.g. "Create") to @@ -1866,17 +1897,13 @@ func (s *Modification) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ModifyColumnFamiliesRequest: Request message -// for +// ModifyColumnFamiliesRequest: Request message for // google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies type ModifyColumnFamiliesRequest struct { // Modifications: Required. Modifications to be atomically applied to - // the specified table's families. - // Entries are applied in order, meaning that earlier modifications can - // be - // masked by later ones (in the case of repeated updates to the same - // family, - // for example). + // the specified table's families. Entries are applied in order, meaning + // that earlier modifications can be masked by later ones (in the case + // of repeated updates to the same family, for example). Modifications []*Modification `json:"modifications,omitempty"` // ForceSendFields is a list of field names (e.g. "Modifications") to @@ -1903,64 +1930,46 @@ func (s *ModifyColumnFamiliesRequest) MarshalJSON() ([]byte, error) { } // MultiClusterRoutingUseAny: Read/write requests are routed to the -// nearest cluster in the instance, and -// will fail over to the nearest cluster that is available in the event -// of -// transient errors or delays. Clusters in a region are -// considered -// equidistant. Choosing this option sacrifices read-your-writes -// consistency -// to improve availability. +// nearest cluster in the instance, and will fail over to the nearest +// cluster that is available in the event of transient errors or delays. +// Clusters in a region are considered equidistant. Choosing this option +// sacrifices read-your-writes consistency to improve availability. type MultiClusterRoutingUseAny struct { } // Operation: This resource represents a long-running operation that is -// the result of a -// network API call. +// the result of a network API call. type Operation struct { // Done: If the value is `false`, it means the operation is still in - // progress. - // If `true`, the operation is completed, and either `error` or - // `response` is - // available. + // progress. If `true`, the operation is completed, and either `error` + // or `response` is available. Done bool `json:"done,omitempty"` // Error: The error result of the operation in case of failure or // cancellation. Error *Status `json:"error,omitempty"` - // Metadata: Service-specific metadata associated with the operation. - // It typically - // contains progress information and common metadata such as create - // time. - // Some services might not provide such metadata. Any method that - // returns a - // long-running operation should document the metadata type, if any. + // Metadata: Service-specific metadata associated with the operation. It + // typically contains progress information and common metadata such as + // create time. Some services might not provide such metadata. Any + // method that returns a long-running operation should document the + // metadata type, if any. Metadata googleapi.RawMessage `json:"metadata,omitempty"` // Name: The server-assigned name, which is only unique within the same - // service that - // originally returns it. If you use the default HTTP mapping, - // the - // `name` should be a resource name ending with + // service that originally returns it. If you use the default HTTP + // mapping, the `name` should be a resource name ending with // `operations/{unique_id}`. Name string `json:"name,omitempty"` - // Response: The normal response of the operation in case of success. - // If the original - // method returns no data on success, such as `Delete`, the response - // is - // `google.protobuf.Empty`. If the original method is - // standard - // `Get`/`Create`/`Update`, the response should be the resource. For - // other - // methods, the response should have the type `XxxResponse`, where - // `Xxx` - // is the original method name. For example, if the original method - // name - // is `TakeSnapshot()`, the inferred response type - // is - // `TakeSnapshotResponse`. + // Response: The normal response of the operation in case of success. If + // the original method returns no data on success, such as `Delete`, the + // response is `google.protobuf.Empty`. If the original method is + // standard `Get`/`Create`/`Update`, the response should be the + // resource. For other methods, the response should have the type + // `XxxResponse`, where `Xxx` is the original method name. For example, + // if the original method name is `TakeSnapshot()`, the inferred + // response type is `TakeSnapshotResponse`. Response googleapi.RawMessage `json:"response,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1990,6 +1999,78 @@ func (s *Operation) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// OperationProgress: Encapsulates progress related information for a +// Cloud Bigtable long running operation. +type OperationProgress struct { + // EndTime: If set, the time at which this operation failed or was + // completed successfully. + EndTime string `json:"endTime,omitempty"` + + // ProgressPercent: Percent completion of the operation. Values are + // between 0 and 100 inclusive. + ProgressPercent int64 `json:"progressPercent,omitempty"` + + // StartTime: Time the request was received. + StartTime string `json:"startTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EndTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EndTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationProgress) MarshalJSON() ([]byte, error) { + type NoMethod OperationProgress + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// OptimizeRestoredTableMetadata: Metadata type for the long-running +// operation used to track the progress of optimizations performed on a +// newly restored table. This long-running operation is automatically +// created by the system after the successful completion of a table +// restore, and cannot be cancelled. +type OptimizeRestoredTableMetadata struct { + // Name: Name of the restored table being optimized. + Name string `json:"name,omitempty"` + + // Progress: The progress of the post-restore optimizations. + Progress *OperationProgress `json:"progress,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OptimizeRestoredTableMetadata) MarshalJSON() ([]byte, error) { + type NoMethod OptimizeRestoredTableMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // PartialUpdateInstanceRequest: Request message for // BigtableInstanceAdmin.PartialUpdateInstance. type PartialUpdateInstanceRequest struct { @@ -1998,8 +2079,7 @@ type PartialUpdateInstanceRequest struct { Instance *Instance `json:"instance,omitempty"` // UpdateMask: Required. The subset of Instance fields which should be - // replaced. - // Must be explicitly set. + // replaced. Must be explicitly set. UpdateMask string `json:"updateMask,omitempty"` // ForceSendFields is a list of field names (e.g. "Instance") to @@ -2026,143 +2106,79 @@ func (s *PartialUpdateInstanceRequest) MarshalJSON() ([]byte, error) { } // Policy: An Identity and Access Management (IAM) policy, which -// specifies access -// controls for Google Cloud resources. -// -// -// A `Policy` is a collection of `bindings`. A `binding` binds one or -// more -// `members` to a single `role`. Members can be user accounts, service -// accounts, +// specifies access controls for Google Cloud resources. A `Policy` is a +// collection of `bindings`. A `binding` binds one or more `members` to +// a single `role`. Members can be user accounts, service accounts, // Google groups, and domains (such as G Suite). A `role` is a named -// list of -// permissions; each `role` can be an IAM predefined role or a -// user-created -// custom role. -// -// Optionally, a `binding` can specify a `condition`, which is a -// logical +// list of permissions; each `role` can be an IAM predefined role or a +// user-created custom role. For some types of Google Cloud resources, a +// `binding` can also specify a `condition`, which is a logical // expression that allows access to a resource only if the expression -// evaluates -// to `true`. A condition can add constraints based on attributes of -// the -// request, the resource, or both. -// -// **JSON example:** -// -// { -// "bindings": [ -// { -// "role": "roles/resourcemanager.organizationAdmin", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" -// ] -// }, -// { -// "role": "roles/resourcemanager.organizationViewer", -// "members": ["user:eve@example.com"], -// "condition": { -// "title": "expirable access", -// "description": "Does not grant access after Sep 2020", -// "expression": "request.time < -// timestamp('2020-10-01T00:00:00.000Z')", -// } -// } -// ], -// "etag": "BwWWja0YfJA=", -// "version": 3 -// } -// -// **YAML example:** -// -// bindings: -// - members: -// - user:mike@example.com -// - group:admins@example.com -// - domain:google.com -// - serviceAccount:my-project-id@appspot.gserviceaccount.com -// role: roles/resourcemanager.organizationAdmin -// - members: -// - user:eve@example.com -// role: roles/resourcemanager.organizationViewer -// condition: -// title: expirable access -// description: Does not grant access after Sep 2020 -// expression: request.time < -// timestamp('2020-10-01T00:00:00.000Z') -// - etag: BwWWja0YfJA= -// - version: 3 -// -// For a description of IAM and its features, see the -// [IAM documentation](https://cloud.google.com/iam/docs/). +// evaluates to `true`. A condition can add constraints based on +// attributes of the request, the resource, or both. To learn which +// resources support conditions in their IAM policies, see the [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-p +// olicies). **JSON example:** { "bindings": [ { "role": +// "roles/resourcemanager.organizationAdmin", "members": [ +// "user:mike@example.com", "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { +// "role": "roles/resourcemanager.organizationViewer", "members": [ +// "user:eve@example.com" ], "condition": { "title": "expirable access", +// "description": "Does not grant access after Sep 2020", "expression": +// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], +// "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - +// members: - user:mike@example.com - group:admins@example.com - +// domain:google.com - +// serviceAccount:my-project-id@appspot.gserviceaccount.com role: +// roles/resourcemanager.organizationAdmin - members: - +// user:eve@example.com role: roles/resourcemanager.organizationViewer +// condition: title: expirable access description: Does not grant access +// after Sep 2020 expression: request.time < +// timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: +// 3 For a description of IAM and its features, see the [IAM +// documentation](https://cloud.google.com/iam/docs/). type Policy struct { // AuditConfigs: Specifies cloud audit logging configuration for this // policy. AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` // Bindings: Associates a list of `members` to a `role`. Optionally, may - // specify a - // `condition` that determines how and when the `bindings` are applied. - // Each - // of the `bindings` must contain at least one member. + // specify a `condition` that determines how and when the `bindings` are + // applied. Each of the `bindings` must contain at least one member. Bindings []*Binding `json:"bindings,omitempty"` // Etag: `etag` is used for optimistic concurrency control as a way to - // help - // prevent simultaneous updates of a policy from overwriting each - // other. - // It is strongly suggested that systems make use of the `etag` in - // the - // read-modify-write cycle to perform policy updates in order to avoid - // race - // conditions: An `etag` is returned in the response to `getIamPolicy`, - // and - // systems are expected to put that etag in the request to - // `setIamPolicy` to - // ensure that their change will be applied to the same version of the - // policy. - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of + // help prevent simultaneous updates of a policy from overwriting each + // other. It is strongly suggested that systems make use of the `etag` + // in the read-modify-write cycle to perform policy updates in order to + // avoid race conditions: An `etag` is returned in the response to + // `getIamPolicy`, and systems are expected to put that etag in the + // request to `setIamPolicy` to ensure that their change will be applied + // to the same version of the policy. **Important:** If you use IAM + // Conditions, you must include the `etag` field whenever you call + // `setIamPolicy`. If you omit this field, then IAM allows you to + // overwrite a version `3` policy with a version `1` policy, and all of // the conditions in the version `3` policy are lost. Etag string `json:"etag,omitempty"` - // Version: Specifies the format of the policy. - // - // Valid values are `0`, `1`, and `3`. Requests that specify an invalid - // value - // are rejected. - // + // Version: Specifies the format of the policy. Valid values are `0`, + // `1`, and `3`. Requests that specify an invalid value are rejected. // Any operation that affects conditional role bindings must specify - // version - // `3`. This requirement applies to the following operations: - // - // * Getting a policy that includes a conditional role binding - // * Adding a conditional role binding to a policy - // * Changing a conditional role binding in a policy - // * Removing any role binding, with or without a condition, from a - // policy - // that includes conditions - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of - // the conditions in the version `3` policy are lost. - // - // If a policy does not include any conditions, operations on that - // policy may - // specify any valid version or leave the field unset. + // version `3`. This requirement applies to the following operations: * + // Getting a policy that includes a conditional role binding * Adding a + // conditional role binding to a policy * Changing a conditional role + // binding in a policy * Removing any role binding, with or without a + // condition, from a policy that includes conditions **Important:** If + // you use IAM Conditions, you must include the `etag` field whenever + // you call `setIamPolicy`. If you omit this field, then IAM allows you + // to overwrite a version `3` policy with a version `1` policy, and all + // of the conditions in the version `3` policy are lost. If a policy + // does not include any conditions, operations on that policy may + // specify any valid version or leave the field unset. To learn which + // resources support conditions in their IAM policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). Version int64 `json:"version,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2192,23 +2208,140 @@ func (s *Policy) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SetIamPolicyRequest: Request message for `SetIamPolicy` method. -type SetIamPolicyRequest struct { - // Policy: REQUIRED: The complete policy to be applied to the - // `resource`. The size of - // the policy is limited to a few 10s of KB. An empty policy is a - // valid policy but certain Cloud Platform services (such as - // Projects) - // might reject them. - Policy *Policy `json:"policy,omitempty"` +// RestoreInfo: Information about a table restore. +type RestoreInfo struct { + // BackupInfo: Information about the backup used to restore the table. + // The backup may no longer exist. + BackupInfo *BackupInfo `json:"backupInfo,omitempty"` + + // SourceType: The type of the restore source. + // + // Possible values: + // "RESTORE_SOURCE_TYPE_UNSPECIFIED" - No restore associated. + // "BACKUP" - A backup was used as the source of the restore. + SourceType string `json:"sourceType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BackupInfo") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BackupInfo") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RestoreInfo) MarshalJSON() ([]byte, error) { + type NoMethod RestoreInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RestoreTableMetadata: Metadata type for the long-running operation +// returned by RestoreTable. +type RestoreTableMetadata struct { + BackupInfo *BackupInfo `json:"backupInfo,omitempty"` + + // Name: Name of the table being created and restored to. + Name string `json:"name,omitempty"` + + // OptimizeTableOperationName: If exists, the name of the long-running + // operation that will be used to track the post-restore optimization + // process to optimize the performance of the restored table. The + // metadata type of the long-running operation is + // OptimizeRestoreTableMetadata. The response type is Empty. This + // long-running operation may be automatically created by the system if + // applicable after the RestoreTable long-running operation completes + // successfully. This operation may not be created if the table is + // already optimized or the restore was not successful. + OptimizeTableOperationName string `json:"optimizeTableOperationName,omitempty"` + + // Progress: The progress of the RestoreTable operation. + Progress *OperationProgress `json:"progress,omitempty"` + + // SourceType: The type of the restore source. + // + // Possible values: + // "RESTORE_SOURCE_TYPE_UNSPECIFIED" - No restore associated. + // "BACKUP" - A backup was used as the source of the restore. + SourceType string `json:"sourceType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BackupInfo") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BackupInfo") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RestoreTableMetadata) MarshalJSON() ([]byte, error) { + type NoMethod RestoreTableMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RestoreTableRequest: The request for RestoreTable. +type RestoreTableRequest struct { + // Backup: Name of the backup from which to restore. Values are of the + // form `projects//instances//clusters//backups/`. + Backup string `json:"backup,omitempty"` + + // TableId: Required. The id of the table to create and restore to. This + // table must not already exist. The `table_id` appended to `parent` + // forms the full table name of the form `projects//instances//tables/`. + TableId string `json:"tableId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Backup") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Backup") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RestoreTableRequest) MarshalJSON() ([]byte, error) { + type NoMethod RestoreTableRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SetIamPolicyRequest: Request message for `SetIamPolicy` method. +type SetIamPolicyRequest struct { + // Policy: REQUIRED: The complete policy to be applied to the + // `resource`. The size of the policy is limited to a few 10s of KB. An + // empty policy is a valid policy but certain Cloud Platform services + // (such as Projects) might reject them. + Policy *Policy `json:"policy,omitempty"` // UpdateMask: OPTIONAL: A FieldMask specifying which fields of the - // policy to modify. Only - // the fields in the mask will be modified. If no mask is provided, - // the - // following default mask is used: - // paths: "bindings, etag" - // This field is only used by Cloud IAM. + // policy to modify. Only the fields in the mask will be modified. If no + // mask is provided, the following default mask is used: `paths: + // "bindings, etag" UpdateMask string `json:"updateMask,omitempty"` // ForceSendFields is a list of field names (e.g. "Policy") to @@ -2235,16 +2368,13 @@ func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { } // SingleClusterRouting: Unconditionally routes all read/write requests -// to a specific cluster. -// This option preserves read-your-writes consistency but does not -// improve -// availability. +// to a specific cluster. This option preserves read-your-writes +// consistency but does not improve availability. type SingleClusterRouting struct { // AllowTransactionalWrites: Whether or not `CheckAndMutateRow` and - // `ReadModifyWriteRow` requests are - // allowed by this app profile. It is unsafe to send these requests - // to - // the same table/row/column in multiple clusters. + // `ReadModifyWriteRow` requests are allowed by this app profile. It is + // unsafe to send these requests to the same table/row/column in + // multiple clusters. AllowTransactionalWrites bool `json:"allowTransactionalWrites,omitempty"` // ClusterId: The cluster to which read/write requests should be routed. @@ -2304,32 +2434,24 @@ func (s *Split) MarshalJSON() ([]byte, error) { } // Status: The `Status` type defines a logical error model that is -// suitable for -// different programming environments, including REST APIs and RPC APIs. -// It is -// used by [gRPC](https://github.com/grpc). Each `Status` message -// contains -// three pieces of data: error code, error message, and error -// details. -// -// You can find out more about this error model and how to work with it -// in the -// [API Design Guide](https://cloud.google.com/apis/design/errors). +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each +// `Status` message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the [API Design +// Guide](https://cloud.google.com/apis/design/errors). type Status struct { // Code: The status code, which should be an enum value of // google.rpc.Code. Code int64 `json:"code,omitempty"` - // Details: A list of messages that carry the error details. There is a - // common set of - // message types for APIs to use. + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. Details []googleapi.RawMessage `json:"details,omitempty"` // Message: A developer-facing error message, which should be in - // English. Any - // user-facing error message should be localized and sent in - // the - // google.rpc.Status.details field, or localized by the client. + // English. Any user-facing error message should be localized and sent + // in the google.rpc.Status.details field, or localized by the client. Message string `json:"message,omitempty"` // ForceSendFields is a list of field names (e.g. "Code") to @@ -2356,49 +2478,42 @@ func (s *Status) MarshalJSON() ([]byte, error) { } // Table: A collection of user data indexed by row, column, and -// timestamp. -// Each table is served using the resources of its parent cluster. +// timestamp. Each table is served using the resources of its parent +// cluster. type Table struct { // ClusterStates: Output only. Map from cluster ID to per-cluster table - // state. - // If it could not be determined whether or not the table has data in - // a - // particular cluster (for example, if its zone is unavailable), - // then - // there will be an entry for the cluster with UNKNOWN - // `replication_status`. - // Views: `REPLICATION_VIEW`, `FULL` + // state. If it could not be determined whether or not the table has + // data in a particular cluster (for example, if its zone is + // unavailable), then there will be an entry for the cluster with + // UNKNOWN `replication_status`. Views: `REPLICATION_VIEW`, `FULL` ClusterStates map[string]ClusterState `json:"clusterStates,omitempty"` - // ColumnFamilies: (`CreationOnly`) - // The column families configured for this table, mapped by column - // family ID. - // Views: `SCHEMA_VIEW`, `FULL` + // ColumnFamilies: The column families configured for this table, mapped + // by column family ID. Views: `SCHEMA_VIEW`, `FULL` ColumnFamilies map[string]ColumnFamily `json:"columnFamilies,omitempty"` - // Granularity: (`CreationOnly`) - // The granularity (i.e. `MILLIS`) at which timestamps are stored - // in - // this table. Timestamps not matching the granularity will be - // rejected. - // If unspecified at creation time, the value will be set to - // `MILLIS`. - // Views: `SCHEMA_VIEW`, `FULL`. + // Granularity: Immutable. The granularity (i.e. `MILLIS`) at which + // timestamps are stored in this table. Timestamps not matching the + // granularity will be rejected. If unspecified at creation time, the + // value will be set to `MILLIS`. Views: `SCHEMA_VIEW`, `FULL`. // // Possible values: // "TIMESTAMP_GRANULARITY_UNSPECIFIED" - The user did not specify a - // granularity. Should not be returned. - // When specified during table creation, MILLIS will be used. + // granularity. Should not be returned. When specified during table + // creation, MILLIS will be used. // "MILLIS" - The table keeps data versioned at a granularity of 1ms. Granularity string `json:"granularity,omitempty"` - // Name: Output only. The unique name of the table. Values are of the - // form - // `projects//instances//tables/_a-zA-Z0-9*`. - // Vie - // ws: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL` + // Name: The unique name of the table. Values are of the form + // `projects/{project}/instances/{instance}/tables/_a-zA-Z0-9*`. Views: + // `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL` Name string `json:"name,omitempty"` + // RestoreInfo: Output only. If this table was restored from another + // data source (e.g. a backup), this field will be populated with + // information about the restore. + RestoreInfo *RestoreInfo `json:"restoreInfo,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -2430,10 +2545,8 @@ func (s *Table) MarshalJSON() ([]byte, error) { // cluster. type TableProgress struct { // EstimatedCopiedBytes: Estimate of the number of bytes copied so far - // for this table. - // This will eventually reach 'estimated_size_bytes' unless the table - // copy - // is CANCELLED. + // for this table. This will eventually reach 'estimated_size_bytes' + // unless the table copy is CANCELLED. EstimatedCopiedBytes int64 `json:"estimatedCopiedBytes,omitempty,string"` // EstimatedSizeBytes: Estimate of the size of the table to be copied. @@ -2445,10 +2558,8 @@ type TableProgress struct { // "COPYING" - The table is actively being copied to the new cluster. // "COMPLETED" - The table has been fully copied to the new cluster. // "CANCELLED" - The table was deleted before it finished copying to - // the new cluster. - // Note that tables deleted after completion will stay marked - // as - // COMPLETED, not CANCELLED. + // the new cluster. Note that tables deleted after completion will stay + // marked as COMPLETED, not CANCELLED. State string `json:"state,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -2480,11 +2591,8 @@ func (s *TableProgress) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsRequest struct { // Permissions: The set of permissions to check for the `resource`. - // Permissions with - // wildcards (such as '*' or 'storage.*') are not allowed. For - // more - // information see - // [IAM + // Permissions with wildcards (such as '*' or 'storage.*') are not + // allowed. For more information see [IAM // Overview](https://cloud.google.com/iam/docs/overview#permissions). Permissions []string `json:"permissions,omitempty"` @@ -2515,8 +2623,7 @@ func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsResponse struct { // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is - // allowed. + // the caller is allowed. Permissions []string `json:"permissions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2664,23 +2771,15 @@ type OperationsCancelCall struct { } // Cancel: Starts asynchronous cancellation on a long-running operation. -// The server -// makes a best effort to cancel the operation, but success is -// not -// guaranteed. If the server doesn't support this method, it -// returns -// `google.rpc.Code.UNIMPLEMENTED`. Clients can -// use -// Operations.GetOperation or -// other methods to check whether the cancellation succeeded or whether -// the -// operation completed despite cancellation. On successful -// cancellation, -// the operation is not deleted; instead, it becomes an operation -// with -// an Operation.error value with a google.rpc.Status.code of -// 1, -// corresponding to `Code.CANCELLED`. +// The server makes a best effort to cancel the operation, but success +// is not guaranteed. If the server doesn't support this method, it +// returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use +// Operations.GetOperation or other methods to check whether the +// cancellation succeeded or whether the operation completed despite +// cancellation. On successful cancellation, the operation is not +// deleted; instead, it becomes an operation with an Operation.error +// value with a google.rpc.Status.code of 1, corresponding to +// `Code.CANCELLED`. func (r *OperationsService) Cancel(name string) *OperationsCancelCall { c := &OperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -2714,7 +2813,7 @@ func (c *OperationsCancelCall) Header() http.Header { func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2773,7 +2872,7 @@ func (c *OperationsCancelCall) Do(opts ...googleapi.CallOption) (*Empty, error) } return ret, nil // { - // "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + // "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", // "flatPath": "v2/operations/{operationsId}:cancel", // "httpMethod": "POST", // "id": "bigtableadmin.operations.cancel", @@ -2816,12 +2915,9 @@ type OperationsDeleteCall struct { } // Delete: Deletes a long-running operation. This method indicates that -// the client is -// no longer interested in the operation result. It does not cancel -// the -// operation. If the server doesn't support this method, it -// returns -// `google.rpc.Code.UNIMPLEMENTED`. +// the client is no longer interested in the operation result. It does +// not cancel the operation. If the server doesn't support this method, +// it returns `google.rpc.Code.UNIMPLEMENTED`. func (r *OperationsService) Delete(name string) *OperationsDeleteCall { c := &OperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -2855,7 +2951,7 @@ func (c *OperationsDeleteCall) Header() http.Header { func (c *OperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2914,7 +3010,7 @@ func (c *OperationsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) } return ret, nil // { - // "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.", + // "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", // "flatPath": "v2/operations/{operationsId}", // "httpMethod": "DELETE", // "id": "bigtableadmin.operations.delete", @@ -2957,11 +3053,9 @@ type OperationsGetCall struct { header_ http.Header } -// Get: Gets the latest state of a long-running operation. Clients can -// use this -// method to poll the operation result at intervals as recommended by -// the API -// service. +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. func (r *OperationsService) Get(name string) *OperationsGetCall { c := &OperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -3005,7 +3099,7 @@ func (c *OperationsGetCall) Header() http.Header { func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3067,7 +3161,7 @@ func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", // "flatPath": "v2/operations/{operationsId}", // "httpMethod": "GET", // "id": "bigtableadmin.operations.get", @@ -3112,22 +3206,15 @@ type OperationsProjectsOperationsListCall struct { } // List: Lists operations that match the specified filter in the -// request. If the -// server doesn't support this method, it returns -// `UNIMPLEMENTED`. -// -// NOTE: the `name` binding allows API services to override the -// binding -// to use different resource name schemes, such as `users/*/operations`. -// To -// override the binding, API services can add a binding such -// as -// "/v1/{name=users/*}/operations" to their service configuration. -// For backwards compatibility, the default name includes the -// operations -// collection id, however overriding users must ensure the name -// binding -// is the parent resource, without the operations collection id. +// request. If the server doesn't support this method, it returns +// `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to +// override the binding to use different resource name schemes, such as +// `users/*/operations`. To override the binding, API services can add a +// binding such as "/v1/{name=users/*}/operations" to their service +// configuration. For backwards compatibility, the default name includes +// the operations collection id, however overriding users must ensure +// the name binding is the parent resource, without the operations +// collection id. func (r *OperationsProjectsOperationsService) List(name string) *OperationsProjectsOperationsListCall { c := &OperationsProjectsOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -3192,7 +3279,7 @@ func (c *OperationsProjectsOperationsListCall) Header() http.Header { func (c *OperationsProjectsOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3254,7 +3341,7 @@ func (c *OperationsProjectsOperationsListCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", + // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", // "flatPath": "v2/operations/projects/{projectsId}/operations", // "httpMethod": "GET", // "id": "bigtableadmin.operations.projects.operations.list", @@ -3370,7 +3457,7 @@ func (c *ProjectsInstancesCreateCall) Header() http.Header { func (c *ProjectsInstancesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3443,7 +3530,7 @@ func (c *ProjectsInstancesCreateCall) Do(opts ...googleapi.CallOption) (*Operati // ], // "parameters": { // "parent": { - // "description": "Required. The unique name of the project in which to create the new instance.\nValues are of the form `projects/{project}`.", + // "description": "Required. The unique name of the project in which to create the new instance. Values are of the form `projects/{project}`.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -3513,7 +3600,7 @@ func (c *ProjectsInstancesDeleteCall) Header() http.Header { func (c *ProjectsInstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3581,7 +3668,7 @@ func (c *ProjectsInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, // ], // "parameters": { // "name": { - // "description": "Required. The unique name of the instance to be deleted.\nValues are of the form `projects/{project}/instances/{instance}`.", + // "description": "Required. The unique name of the instance to be deleted. Values are of the form `projects/{project}/instances/{instance}`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, @@ -3659,7 +3746,7 @@ func (c *ProjectsInstancesGetCall) Header() http.Header { func (c *ProjectsInstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3730,7 +3817,7 @@ func (c *ProjectsInstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, // ], // "parameters": { // "name": { - // "description": "Required. The unique name of the requested instance. Values are of the form\n`projects/{project}/instances/{instance}`.", + // "description": "Required. The unique name of the requested instance. Values are of the form `projects/{project}/instances/{instance}`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, @@ -3766,8 +3853,8 @@ type ProjectsInstancesGetIamPolicyCall struct { } // GetIamPolicy: Gets the access control policy for an instance -// resource. Returns an empty -// policy if an instance exists but does not have a policy set. +// resource. Returns an empty policy if an instance exists but does not +// have a policy set. func (r *ProjectsInstancesService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ProjectsInstancesGetIamPolicyCall { c := &ProjectsInstancesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -3802,7 +3889,7 @@ func (c *ProjectsInstancesGetIamPolicyCall) Header() http.Header { func (c *ProjectsInstancesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3866,7 +3953,7 @@ func (c *ProjectsInstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P } return ret, nil // { - // "description": "Gets the access control policy for an instance resource. Returns an empty\npolicy if an instance exists but does not have a policy set.", + // "description": "Gets the access control policy for an instance resource. Returns an empty policy if an instance exists but does not have a policy set.", // "flatPath": "v2/projects/{projectsId}/instances/{instancesId}:getIamPolicy", // "httpMethod": "POST", // "id": "bigtableadmin.projects.instances.getIamPolicy", @@ -3875,7 +3962,7 @@ func (c *ProjectsInstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, @@ -3963,7 +4050,7 @@ func (c *ProjectsInstancesListCall) Header() http.Header { func (c *ProjectsInstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4039,7 +4126,7 @@ func (c *ProjectsInstancesListCall) Do(opts ...googleapi.CallOption) (*ListInsta // "type": "string" // }, // "parent": { - // "description": "Required. The unique name of the project for which a list of instances is requested.\nValues are of the form `projects/{project}`.", + // "description": "Required. The unique name of the project for which a list of instances is requested. Values are of the form `projects/{project}`.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -4096,8 +4183,8 @@ type ProjectsInstancesPartialUpdateInstanceCall struct { } // PartialUpdateInstance: Partially updates an instance within a -// project. This method can modify all -// fields of an Instance and is the preferred way to update an Instance. +// project. This method can modify all fields of an Instance and is the +// preferred way to update an Instance. func (r *ProjectsInstancesService) PartialUpdateInstance(name string, instance *Instance) *ProjectsInstancesPartialUpdateInstanceCall { c := &ProjectsInstancesPartialUpdateInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4106,8 +4193,8 @@ func (r *ProjectsInstancesService) PartialUpdateInstance(name string, instance * } // UpdateMask sets the optional parameter "updateMask": Required. The -// subset of Instance fields which should be replaced. -// Must be explicitly set. +// subset of Instance fields which should be replaced. Must be +// explicitly set. func (c *ProjectsInstancesPartialUpdateInstanceCall) UpdateMask(updateMask string) *ProjectsInstancesPartialUpdateInstanceCall { c.urlParams_.Set("updateMask", updateMask) return c @@ -4140,7 +4227,7 @@ func (c *ProjectsInstancesPartialUpdateInstanceCall) Header() http.Header { func (c *ProjectsInstancesPartialUpdateInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4204,7 +4291,7 @@ func (c *ProjectsInstancesPartialUpdateInstanceCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Partially updates an instance within a project. This method can modify all\nfields of an Instance and is the preferred way to update an Instance.", + // "description": "Partially updates an instance within a project. This method can modify all fields of an Instance and is the preferred way to update an Instance.", // "flatPath": "v2/projects/{projectsId}/instances/{instancesId}", // "httpMethod": "PATCH", // "id": "bigtableadmin.projects.instances.partialUpdateInstance", @@ -4213,14 +4300,14 @@ func (c *ProjectsInstancesPartialUpdateInstanceCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "name": { - // "description": "Required. (`OutputOnly`)\nThe unique name of the instance. Values are of the form\n`projects/{project}/instances/a-z+[a-z0-9]`.", + // "description": "The unique name of the instance. Values are of the form `projects/{project}/instances/a-z+[a-z0-9]`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { - // "description": "Required. The subset of Instance fields which should be replaced.\nMust be explicitly set.", + // "description": "Required. The subset of Instance fields which should be replaced. Must be explicitly set.", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -4257,8 +4344,7 @@ type ProjectsInstancesSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on an instance resource. -// Replaces any -// existing policy. +// Replaces any existing policy. func (r *ProjectsInstancesService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsInstancesSetIamPolicyCall { c := &ProjectsInstancesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -4293,7 +4379,7 @@ func (c *ProjectsInstancesSetIamPolicyCall) Header() http.Header { func (c *ProjectsInstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4357,7 +4443,7 @@ func (c *ProjectsInstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P } return ret, nil // { - // "description": "Sets the access control policy on an instance resource. Replaces any\nexisting policy.", + // "description": "Sets the access control policy on an instance resource. Replaces any existing policy.", // "flatPath": "v2/projects/{projectsId}/instances/{instancesId}:setIamPolicy", // "httpMethod": "POST", // "id": "bigtableadmin.projects.instances.setIamPolicy", @@ -4366,7 +4452,7 @@ func (c *ProjectsInstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, @@ -4439,7 +4525,7 @@ func (c *ProjectsInstancesTestIamPermissionsCall) Header() http.Header { func (c *ProjectsInstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4512,7 +4598,7 @@ func (c *ProjectsInstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, @@ -4550,10 +4636,8 @@ type ProjectsInstancesUpdateCall struct { } // Update: Updates an instance within a project. This method updates -// only the display -// name and type for an Instance. To update other Instance properties, -// such as -// labels, use PartialUpdateInstance. +// only the display name and type for an Instance. To update other +// Instance properties, such as labels, use PartialUpdateInstance. func (r *ProjectsInstancesService) Update(name string, instance *Instance) *ProjectsInstancesUpdateCall { c := &ProjectsInstancesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4588,7 +4672,7 @@ func (c *ProjectsInstancesUpdateCall) Header() http.Header { func (c *ProjectsInstancesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4652,7 +4736,7 @@ func (c *ProjectsInstancesUpdateCall) Do(opts ...googleapi.CallOption) (*Instanc } return ret, nil // { - // "description": "Updates an instance within a project. This method updates only the display\nname and type for an Instance. To update other Instance properties, such as\nlabels, use PartialUpdateInstance.", + // "description": "Updates an instance within a project. This method updates only the display name and type for an Instance. To update other Instance properties, such as labels, use PartialUpdateInstance.", // "flatPath": "v2/projects/{projectsId}/instances/{instancesId}", // "httpMethod": "PUT", // "id": "bigtableadmin.projects.instances.update", @@ -4661,7 +4745,7 @@ func (c *ProjectsInstancesUpdateCall) Do(opts ...googleapi.CallOption) (*Instanc // ], // "parameters": { // "name": { - // "description": "Required. (`OutputOnly`)\nThe unique name of the instance. Values are of the form\n`projects/{project}/instances/a-z+[a-z0-9]`.", + // "description": "The unique name of the instance. Values are of the form `projects/{project}/instances/a-z+[a-z0-9]`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, @@ -4707,10 +4791,8 @@ func (r *ProjectsInstancesAppProfilesService) Create(parent string, appprofile * } // AppProfileId sets the optional parameter "appProfileId": Required. -// The ID to be used when referring to the new app profile within -// its -// instance, e.g., just `myprofile` rather -// than +// The ID to be used when referring to the new app profile within its +// instance, e.g., just `myprofile` rather than // `projects/myproject/instances/myinstance/appProfiles/myprofile`. func (c *ProjectsInstancesAppProfilesCreateCall) AppProfileId(appProfileId string) *ProjectsInstancesAppProfilesCreateCall { c.urlParams_.Set("appProfileId", appProfileId) @@ -4751,7 +4833,7 @@ func (c *ProjectsInstancesAppProfilesCreateCall) Header() http.Header { func (c *ProjectsInstancesAppProfilesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4824,7 +4906,7 @@ func (c *ProjectsInstancesAppProfilesCreateCall) Do(opts ...googleapi.CallOption // ], // "parameters": { // "appProfileId": { - // "description": "Required. The ID to be used when referring to the new app profile within its\ninstance, e.g., just `myprofile` rather than\n`projects/myproject/instances/myinstance/appProfiles/myprofile`.", + // "description": "Required. The ID to be used when referring to the new app profile within its instance, e.g., just `myprofile` rather than `projects/myproject/instances/myinstance/appProfiles/myprofile`.", // "location": "query", // "type": "string" // }, @@ -4834,7 +4916,7 @@ func (c *ProjectsInstancesAppProfilesCreateCall) Do(opts ...googleapi.CallOption // "type": "boolean" // }, // "parent": { - // "description": "Required. The unique name of the instance in which to create the new app profile.\nValues are of the form\n`projects/{project}/instances/{instance}`.", + // "description": "Required. The unique name of the instance in which to create the new app profile. Values are of the form `projects/{project}/instances/{instance}`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, @@ -4912,7 +4994,7 @@ func (c *ProjectsInstancesAppProfilesDeleteCall) Header() http.Header { func (c *ProjectsInstancesAppProfilesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4985,7 +5067,7 @@ func (c *ProjectsInstancesAppProfilesDeleteCall) Do(opts ...googleapi.CallOption // "type": "boolean" // }, // "name": { - // "description": "Required. The unique name of the app profile to be deleted. Values are of the form\n`projects/{project}/instances/{instance}/appProfiles/{app_profile}`.", + // "description": "Required. The unique name of the app profile to be deleted. Values are of the form `projects/{project}/instances/{instance}/appProfiles/{app_profile}`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/appProfiles/[^/]+$", // "required": true, @@ -5063,7 +5145,7 @@ func (c *ProjectsInstancesAppProfilesGetCall) Header() http.Header { func (c *ProjectsInstancesAppProfilesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5134,7 +5216,7 @@ func (c *ProjectsInstancesAppProfilesGetCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "name": { - // "description": "Required. The unique name of the requested app profile. Values are of the form\n`projects/{project}/instances/{instance}/appProfiles/{app_profile}`.", + // "description": "Required. The unique name of the requested app profile. Values are of the form `projects/{project}/instances/{instance}/appProfiles/{app_profile}`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/appProfiles/[^/]+$", // "required": true, @@ -5177,19 +5259,12 @@ func (r *ProjectsInstancesAppProfilesService) List(parent string) *ProjectsInsta } // PageSize sets the optional parameter "pageSize": Maximum number of -// results per page. -// -// A page_size of zero lets the server choose the number of items to -// return. -// A page_size which is strictly positive will return at most that many -// items. -// A negative page_size will cause an error. -// -// Following the first request, subsequent paginated calls are not -// required -// to pass a page_size. If a page_size is set in subsequent calls, it -// must -// match the page_size given in the first request. +// results per page. A page_size of zero lets the server choose the +// number of items to return. A page_size which is strictly positive +// will return at most that many items. A negative page_size will cause +// an error. Following the first request, subsequent paginated calls are +// not required to pass a page_size. If a page_size is set in subsequent +// calls, it must match the page_size given in the first request. func (c *ProjectsInstancesAppProfilesListCall) PageSize(pageSize int64) *ProjectsInstancesAppProfilesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c @@ -5239,7 +5314,7 @@ func (c *ProjectsInstancesAppProfilesListCall) Header() http.Header { func (c *ProjectsInstancesAppProfilesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5310,7 +5385,7 @@ func (c *ProjectsInstancesAppProfilesListCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "pageSize": { - // "description": "Maximum number of results per page.\n\nA page_size of zero lets the server choose the number of items to return.\nA page_size which is strictly positive will return at most that many items.\nA negative page_size will cause an error.\n\nFollowing the first request, subsequent paginated calls are not required\nto pass a page_size. If a page_size is set in subsequent calls, it must\nmatch the page_size given in the first request.", + // "description": "Maximum number of results per page. A page_size of zero lets the server choose the number of items to return. A page_size which is strictly positive will return at most that many items. A negative page_size will cause an error. Following the first request, subsequent paginated calls are not required to pass a page_size. If a page_size is set in subsequent calls, it must match the page_size given in the first request.", // "format": "int32", // "location": "query", // "type": "integer" @@ -5321,7 +5396,7 @@ func (c *ProjectsInstancesAppProfilesListCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "parent": { - // "description": "Required. The unique name of the instance for which a list of app profiles is\nrequested. Values are of the form\n`projects/{project}/instances/{instance}`.\nUse `{instance} = '-'` to list AppProfiles for all Instances in a project,\ne.g., `projects/myproject/instances/-`.", + // "description": "Required. The unique name of the instance for which a list of app profiles is requested. Values are of the form `projects/{project}/instances/{instance}`. Use `{instance} = '-'` to list AppProfiles for all Instances in a project, e.g., `projects/myproject/instances/-`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, @@ -5392,8 +5467,8 @@ func (c *ProjectsInstancesAppProfilesPatchCall) IgnoreWarnings(ignoreWarnings bo } // UpdateMask sets the optional parameter "updateMask": Required. The -// subset of app profile fields which should be replaced. -// If unset, all fields will be replaced. +// subset of app profile fields which should be replaced. If unset, all +// fields will be replaced. func (c *ProjectsInstancesAppProfilesPatchCall) UpdateMask(updateMask string) *ProjectsInstancesAppProfilesPatchCall { c.urlParams_.Set("updateMask", updateMask) return c @@ -5426,7 +5501,7 @@ func (c *ProjectsInstancesAppProfilesPatchCall) Header() http.Header { func (c *ProjectsInstancesAppProfilesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5504,14 +5579,14 @@ func (c *ProjectsInstancesAppProfilesPatchCall) Do(opts ...googleapi.CallOption) // "type": "boolean" // }, // "name": { - // "description": "(`OutputOnly`)\nThe unique name of the app profile. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/appProfiles/_a-zA-Z0-9*`.", + // "description": "The unique name of the app profile. Values are of the form `projects/{project}/instances/{instance}/appProfiles/_a-zA-Z0-9*`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/appProfiles/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { - // "description": "Required. The subset of app profile fields which should be replaced.\nIf unset, all fields will be replaced.", + // "description": "Required. The subset of app profile fields which should be replaced. If unset, all fields will be replaced.", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -5556,10 +5631,8 @@ func (r *ProjectsInstancesClustersService) Create(parent string, cluster *Cluste } // ClusterId sets the optional parameter "clusterId": Required. The ID -// to be used when referring to the new cluster within its -// instance, -// e.g., just `mycluster` rather -// than +// to be used when referring to the new cluster within its instance, +// e.g., just `mycluster` rather than // `projects/myproject/instances/myinstance/clusters/mycluster`. func (c *ProjectsInstancesClustersCreateCall) ClusterId(clusterId string) *ProjectsInstancesClustersCreateCall { c.urlParams_.Set("clusterId", clusterId) @@ -5593,7 +5666,7 @@ func (c *ProjectsInstancesClustersCreateCall) Header() http.Header { func (c *ProjectsInstancesClustersCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5666,12 +5739,12 @@ func (c *ProjectsInstancesClustersCreateCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "clusterId": { - // "description": "Required. The ID to be used when referring to the new cluster within its instance,\ne.g., just `mycluster` rather than\n`projects/myproject/instances/myinstance/clusters/mycluster`.", + // "description": "Required. The ID to be used when referring to the new cluster within its instance, e.g., just `mycluster` rather than `projects/myproject/instances/myinstance/clusters/mycluster`.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The unique name of the instance in which to create the new cluster.\nValues are of the form\n`projects/{project}/instances/{instance}`.", + // "description": "Required. The unique name of the instance in which to create the new cluster. Values are of the form `projects/{project}/instances/{instance}`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, @@ -5741,7 +5814,7 @@ func (c *ProjectsInstancesClustersDeleteCall) Header() http.Header { func (c *ProjectsInstancesClustersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5809,7 +5882,7 @@ func (c *ProjectsInstancesClustersDeleteCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "name": { - // "description": "Required. The unique name of the cluster to be deleted. Values are of the form\n`projects/{project}/instances/{instance}/clusters/{cluster}`.", + // "description": "Required. The unique name of the cluster to be deleted. Values are of the form `projects/{project}/instances/{instance}/clusters/{cluster}`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/clusters/[^/]+$", // "required": true, @@ -5887,7 +5960,7 @@ func (c *ProjectsInstancesClustersGetCall) Header() http.Header { func (c *ProjectsInstancesClustersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5958,7 +6031,7 @@ func (c *ProjectsInstancesClustersGetCall) Do(opts ...googleapi.CallOption) (*Cl // ], // "parameters": { // "name": { - // "description": "Required. The unique name of the requested cluster. Values are of the form\n`projects/{project}/instances/{instance}/clusters/{cluster}`.", + // "description": "Required. The unique name of the requested cluster. Values are of the form `projects/{project}/instances/{instance}/clusters/{cluster}`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/clusters/[^/]+$", // "required": true, @@ -6044,7 +6117,7 @@ func (c *ProjectsInstancesClustersListCall) Header() http.Header { func (c *ProjectsInstancesClustersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6120,7 +6193,7 @@ func (c *ProjectsInstancesClustersListCall) Do(opts ...googleapi.CallOption) (*L // "type": "string" // }, // "parent": { - // "description": "Required. The unique name of the instance for which a list of clusters is requested.\nValues are of the form `projects/{project}/instances/{instance}`.\nUse `{instance} = '-'` to list Clusters for all Instances in a project,\ne.g., `projects/myproject/instances/-`.", + // "description": "Required. The unique name of the instance for which a list of clusters is requested. Values are of the form `projects/{project}/instances/{instance}`. Use `{instance} = '-'` to list Clusters for all Instances in a project, e.g., `projects/myproject/instances/-`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, @@ -6211,7 +6284,7 @@ func (c *ProjectsInstancesClustersUpdateCall) Header() http.Header { func (c *ProjectsInstancesClustersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6221,30 +6294,837 @@ func (c *ProjectsInstancesClustersUpdateCall) doRequest(alt string) (*http.Respo if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigtableadmin.projects.instances.clusters.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsInstancesClustersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a cluster within an instance.", + // "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/clusters/{clustersId}", + // "httpMethod": "PUT", + // "id": "bigtableadmin.projects.instances.clusters.update", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The unique name of the cluster. Values are of the form `projects/{project}/instances/{instance}/clusters/a-z*`.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/clusters/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "request": { + // "$ref": "Cluster" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigtable.admin", + // "https://www.googleapis.com/auth/bigtable.admin.cluster", + // "https://www.googleapis.com/auth/bigtable.admin.instance", + // "https://www.googleapis.com/auth/cloud-bigtable.admin", + // "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "bigtableadmin.projects.instances.clusters.backups.create": + +type ProjectsInstancesClustersBackupsCreateCall struct { + s *Service + parent string + backup *Backup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Starts creating a new Cloud Bigtable Backup. The returned +// backup long-running operation can be used to track creation of the +// backup. The metadata field type is CreateBackupMetadata. The response +// field type is Backup, if successful. Cancelling the returned +// operation will stop the creation and delete the backup. +func (r *ProjectsInstancesClustersBackupsService) Create(parent string, backup *Backup) *ProjectsInstancesClustersBackupsCreateCall { + c := &ProjectsInstancesClustersBackupsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.backup = backup + return c +} + +// BackupId sets the optional parameter "backupId": Required. The id of +// the backup to be created. The `backup_id` along with the parent +// `parent` are combined as {parent}/backups/{backup_id} to create the +// full backup name, of the form: +// `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{b +// ackup_id}`. This string must be between 1 and 50 characters in length +// and match the regex _a-zA-Z0-9*. +func (c *ProjectsInstancesClustersBackupsCreateCall) BackupId(backupId string) *ProjectsInstancesClustersBackupsCreateCall { + c.urlParams_.Set("backupId", backupId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesClustersBackupsCreateCall) Fields(s ...googleapi.Field) *ProjectsInstancesClustersBackupsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesClustersBackupsCreateCall) Context(ctx context.Context) *ProjectsInstancesClustersBackupsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesClustersBackupsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesClustersBackupsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backup) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/backups") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigtableadmin.projects.instances.clusters.backups.create" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsInstancesClustersBackupsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Starts creating a new Cloud Bigtable Backup. The returned backup long-running operation can be used to track creation of the backup. The metadata field type is CreateBackupMetadata. The response field type is Backup, if successful. Cancelling the returned operation will stop the creation and delete the backup.", + // "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/clusters/{clustersId}/backups", + // "httpMethod": "POST", + // "id": "bigtableadmin.projects.instances.clusters.backups.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "backupId": { + // "description": "Required. The id of the backup to be created. The `backup_id` along with the parent `parent` are combined as {parent}/backups/{backup_id} to create the full backup name, of the form: `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}`. This string must be between 1 and 50 characters in length and match the regex _a-zA-Z0-9*.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. This must be one of the clusters in the instance in which this table is located. The backup will be stored in this cluster. Values are of the form `projects/{project}/instances/{instance}/clusters/{cluster}`.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/clusters/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/backups", + // "request": { + // "$ref": "Backup" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigtable.admin", + // "https://www.googleapis.com/auth/bigtable.admin.table", + // "https://www.googleapis.com/auth/cloud-bigtable.admin", + // "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "bigtableadmin.projects.instances.clusters.backups.delete": + +type ProjectsInstancesClustersBackupsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a pending or completed Cloud Bigtable backup. +func (r *ProjectsInstancesClustersBackupsService) Delete(name string) *ProjectsInstancesClustersBackupsDeleteCall { + c := &ProjectsInstancesClustersBackupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesClustersBackupsDeleteCall) Fields(s ...googleapi.Field) *ProjectsInstancesClustersBackupsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesClustersBackupsDeleteCall) Context(ctx context.Context) *ProjectsInstancesClustersBackupsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesClustersBackupsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesClustersBackupsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigtableadmin.projects.instances.clusters.backups.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsInstancesClustersBackupsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a pending or completed Cloud Bigtable backup.", + // "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/clusters/{clustersId}/backups/{backupsId}", + // "httpMethod": "DELETE", + // "id": "bigtableadmin.projects.instances.clusters.backups.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. Name of the backup to delete. Values are of the form `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}`.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/clusters/[^/]+/backups/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigtable.admin", + // "https://www.googleapis.com/auth/bigtable.admin.table", + // "https://www.googleapis.com/auth/cloud-bigtable.admin", + // "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "bigtableadmin.projects.instances.clusters.backups.get": + +type ProjectsInstancesClustersBackupsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets metadata on a pending or completed Cloud Bigtable Backup. +func (r *ProjectsInstancesClustersBackupsService) Get(name string) *ProjectsInstancesClustersBackupsGetCall { + c := &ProjectsInstancesClustersBackupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesClustersBackupsGetCall) Fields(s ...googleapi.Field) *ProjectsInstancesClustersBackupsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsInstancesClustersBackupsGetCall) IfNoneMatch(entityTag string) *ProjectsInstancesClustersBackupsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesClustersBackupsGetCall) Context(ctx context.Context) *ProjectsInstancesClustersBackupsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesClustersBackupsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesClustersBackupsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigtableadmin.projects.instances.clusters.backups.get" call. +// Exactly one of *Backup or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Backup.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsInstancesClustersBackupsGetCall) Do(opts ...googleapi.CallOption) (*Backup, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Backup{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets metadata on a pending or completed Cloud Bigtable Backup.", + // "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/clusters/{clustersId}/backups/{backupsId}", + // "httpMethod": "GET", + // "id": "bigtableadmin.projects.instances.clusters.backups.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. Name of the backup. Values are of the form `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}`.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/clusters/[^/]+/backups/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "Backup" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigtable.admin", + // "https://www.googleapis.com/auth/bigtable.admin.table", + // "https://www.googleapis.com/auth/cloud-bigtable.admin", + // "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "bigtableadmin.projects.instances.clusters.backups.getIamPolicy": + +type ProjectsInstancesClustersBackupsGetIamPolicyCall struct { + s *Service + resource string + getiampolicyrequest *GetIamPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for a Table resource. +// Returns an empty policy if the resource exists but does not have a +// policy set. +func (r *ProjectsInstancesClustersBackupsService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ProjectsInstancesClustersBackupsGetIamPolicyCall { + c := &ProjectsInstancesClustersBackupsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.getiampolicyrequest = getiampolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesClustersBackupsGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsInstancesClustersBackupsGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesClustersBackupsGetIamPolicyCall) Context(ctx context.Context) *ProjectsInstancesClustersBackupsGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesClustersBackupsGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesClustersBackupsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.getiampolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+resource}:getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigtableadmin.projects.instances.clusters.backups.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsInstancesClustersBackupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for a Table resource. Returns an empty policy if the resource exists but does not have a policy set.", + // "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/clusters/{clustersId}/backups/{backupsId}:getIamPolicy", + // "httpMethod": "POST", + // "id": "bigtableadmin.projects.instances.clusters.backups.getIamPolicy", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/clusters/[^/]+/backups/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+resource}:getIamPolicy", + // "request": { + // "$ref": "GetIamPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigtable.admin", + // "https://www.googleapis.com/auth/bigtable.admin.table", + // "https://www.googleapis.com/auth/cloud-bigtable.admin", + // "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "bigtableadmin.projects.instances.clusters.backups.list": + +type ProjectsInstancesClustersBackupsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists Cloud Bigtable backups. Returns both completed and +// pending backups. +func (r *ProjectsInstancesClustersBackupsService) List(parent string) *ProjectsInstancesClustersBackupsListCall { + c := &ProjectsInstancesClustersBackupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters backups listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be <, >, <=, >=, !=, =, or :. +// Colon ':' represents a HAS operator which is roughly synonymous with +// equality. Filter rules are case insensitive. The fields eligible for +// filtering are: * `name` * `source_table` * `state` * `start_time` +// (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * `end_time` (and +// values are of the format YYYY-MM-DDTHH:MM:SSZ) * `expire_time` (and +// values are of the format YYYY-MM-DDTHH:MM:SSZ) * `size_bytes` To +// filter on multiple expressions, provide each separate expression +// within parentheses. By default, each expression is an AND expression. +// However, you can include AND, OR, and NOT expressions explicitly. +// Some examples of using filters are: * `name:"exact" --> The backup's +// name is the string "exact". * `name:howl` --> The backup's name +// contains the string "howl". * `source_table:prod` --> The +// source_table's name contains the string "prod". * `state:CREATING` +// --> The backup is pending creation. * `state:READY` --> The backup is +// fully created and ready for use. * `(name:howl) AND (start_time < +// \"2018-03-28T14:50:00Z\")` --> The backup name contains the string +// "howl" and start_time of the backup is before 2018-03-28T14:50:00Z. * +// `size_bytes > 10000000000` --> The backup's size is greater than 10GB +func (c *ProjectsInstancesClustersBackupsListCall) Filter(filter string) *ProjectsInstancesClustersBackupsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// OrderBy sets the optional parameter "orderBy": An expression for +// specifying the sort order of the results of the request. The string +// value should specify one or more fields in Backup. The full syntax is +// described at https://aip.dev/132#ordering. Fields supported are: * +// name * source_table * expire_time * start_time * end_time * +// size_bytes * state For example, "start_time". The default sorting +// order is ascending. To specify descending order for the field, a +// suffix " desc" should be appended to the field name. For example, +// "start_time desc". Redundant space characters in the syntax are +// insigificant. If order_by is empty, results will be sorted by +// `start_time` in descending order starting from the most recently +// created backup. +func (c *ProjectsInstancesClustersBackupsListCall) OrderBy(orderBy string) *ProjectsInstancesClustersBackupsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageSize sets the optional parameter "pageSize": Number of backups to +// be returned in the response. If 0 or less, defaults to the server's +// maximum allowed page size. +func (c *ProjectsInstancesClustersBackupsListCall) PageSize(pageSize int64) *ProjectsInstancesClustersBackupsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If non-empty, +// `page_token` should contain a next_page_token from a previous +// ListBackupsResponse to the same `parent` and with the same `filter`. +func (c *ProjectsInstancesClustersBackupsListCall) PageToken(pageToken string) *ProjectsInstancesClustersBackupsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesClustersBackupsListCall) Fields(s ...googleapi.Field) *ProjectsInstancesClustersBackupsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsInstancesClustersBackupsListCall) IfNoneMatch(entityTag string) *ProjectsInstancesClustersBackupsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesClustersBackupsListCall) Context(ctx context.Context) *ProjectsInstancesClustersBackupsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesClustersBackupsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesClustersBackupsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/backups") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "name": c.name, + "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "bigtableadmin.projects.instances.clusters.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsInstancesClustersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "bigtableadmin.projects.instances.clusters.backups.list" call. +// Exactly one of *ListBackupsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListBackupsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsInstancesClustersBackupsListCall) Do(opts ...googleapi.CallOption) (*ListBackupsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6263,7 +7143,7 @@ func (c *ProjectsInstancesClustersUpdateCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &ListBackupsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6275,68 +7155,113 @@ func (c *ProjectsInstancesClustersUpdateCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Updates a cluster within an instance.", - // "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/clusters/{clustersId}", - // "httpMethod": "PUT", - // "id": "bigtableadmin.projects.instances.clusters.update", + // "description": "Lists Cloud Bigtable backups. Returns both completed and pending backups.", + // "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/clusters/{clustersId}/backups", + // "httpMethod": "GET", + // "id": "bigtableadmin.projects.instances.clusters.backups.list", // "parameterOrder": [ - // "name" + // "parent" // ], // "parameters": { - // "name": { - // "description": "Required. (`OutputOnly`)\nThe unique name of the cluster. Values are of the form\n`projects/{project}/instances/{instance}/clusters/a-z*`.", + // "filter": { + // "description": "A filter expression that filters backups listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be \u003c, \u003e, \u003c=, \u003e=, !=, =, or :. Colon ':' represents a HAS operator which is roughly synonymous with equality. Filter rules are case insensitive. The fields eligible for filtering are: * `name` * `source_table` * `state` * `start_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * `end_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * `size_bytes` To filter on multiple expressions, provide each separate expression within parentheses. By default, each expression is an AND expression. However, you can include AND, OR, and NOT expressions explicitly. Some examples of using filters are: * `name:\"exact\"` --\u003e The backup's name is the string \"exact\". * `name:howl` --\u003e The backup's name contains the string \"howl\". * `source_table:prod` --\u003e The source_table's name contains the string \"prod\". * `state:CREATING` --\u003e The backup is pending creation. * `state:READY` --\u003e The backup is fully created and ready for use. * `(name:howl) AND (start_time \u003c \\\"2018-03-28T14:50:00Z\\\")` --\u003e The backup name contains the string \"howl\" and start_time of the backup is before 2018-03-28T14:50:00Z. * `size_bytes \u003e 10000000000` --\u003e The backup's size is greater than 10GB", + // "location": "query", + // "type": "string" + // }, + // "orderBy": { + // "description": "An expression for specifying the sort order of the results of the request. The string value should specify one or more fields in Backup. The full syntax is described at https://aip.dev/132#ordering. Fields supported are: * name * source_table * expire_time * start_time * end_time * size_bytes * state For example, \"start_time\". The default sorting order is ascending. To specify descending order for the field, a suffix \" desc\" should be appended to the field name. For example, \"start_time desc\". Redundant space characters in the syntax are insigificant. If order_by is empty, results will be sorted by `start_time` in descending order starting from the most recently created backup.", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "description": "Number of backups to be returned in the response. If 0 or less, defaults to the server's maximum allowed page size.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "If non-empty, `page_token` should contain a next_page_token from a previous ListBackupsResponse to the same `parent` and with the same `filter`.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The cluster to list backups from. Values are of the form `projects/{project}/instances/{instance}/clusters/{cluster}`. Use `{cluster} = '-'` to list backups for all clusters in an instance, e.g., `projects/{project}/instances/{instance}/clusters/-`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/clusters/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v2/{+name}", - // "request": { - // "$ref": "Cluster" - // }, + // "path": "v2/{+parent}/backups", // "response": { - // "$ref": "Operation" + // "$ref": "ListBackupsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/bigtable.admin", - // "https://www.googleapis.com/auth/bigtable.admin.cluster", - // "https://www.googleapis.com/auth/bigtable.admin.instance", + // "https://www.googleapis.com/auth/bigtable.admin.table", // "https://www.googleapis.com/auth/cloud-bigtable.admin", - // "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + // "https://www.googleapis.com/auth/cloud-bigtable.admin.table", // "https://www.googleapis.com/auth/cloud-platform" // ] // } } -// method id "bigtableadmin.projects.instances.clusters.backups.getIamPolicy": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsInstancesClustersBackupsListCall) Pages(ctx context.Context, f func(*ListBackupsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ProjectsInstancesClustersBackupsGetIamPolicyCall struct { - s *Service - resource string - getiampolicyrequest *GetIamPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "bigtableadmin.projects.instances.clusters.backups.patch": + +type ProjectsInstancesClustersBackupsPatchCall struct { + s *Service + nameid string + backup *Backup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a Table -// resource. -// Returns an empty policy if the resource exists but does not have a -// policy -// set. -func (r *ProjectsInstancesClustersBackupsService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ProjectsInstancesClustersBackupsGetIamPolicyCall { - c := &ProjectsInstancesClustersBackupsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.getiampolicyrequest = getiampolicyrequest +// Patch: Updates a pending or completed Cloud Bigtable Backup. +func (r *ProjectsInstancesClustersBackupsService) Patch(nameid string, backup *Backup) *ProjectsInstancesClustersBackupsPatchCall { + c := &ProjectsInstancesClustersBackupsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.nameid = nameid + c.backup = backup + return c +} + +// UpdateMask sets the optional parameter "updateMask": Required. A mask +// specifying which fields (e.g. `expire_time`) in the Backup resource +// should be updated. This mask is relative to the Backup resource, not +// to the request message. The field mask must always be specified; this +// prevents any future fields from being erased accidentally by clients +// that do not know about them. +func (c *ProjectsInstancesClustersBackupsPatchCall) UpdateMask(updateMask string) *ProjectsInstancesClustersBackupsPatchCall { + c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsInstancesClustersBackupsGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsInstancesClustersBackupsGetIamPolicyCall { +func (c *ProjectsInstancesClustersBackupsPatchCall) Fields(s ...googleapi.Field) *ProjectsInstancesClustersBackupsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -6344,56 +7269,56 @@ func (c *ProjectsInstancesClustersBackupsGetIamPolicyCall) Fields(s ...googleapi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsInstancesClustersBackupsGetIamPolicyCall) Context(ctx context.Context) *ProjectsInstancesClustersBackupsGetIamPolicyCall { +func (c *ProjectsInstancesClustersBackupsPatchCall) Context(ctx context.Context) *ProjectsInstancesClustersBackupsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsInstancesClustersBackupsGetIamPolicyCall) Header() http.Header { +func (c *ProjectsInstancesClustersBackupsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsInstancesClustersBackupsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsInstancesClustersBackupsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.getiampolicyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backup) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+resource}:getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, + "name": c.nameid, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "bigtableadmin.projects.instances.clusters.backups.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// Do executes the "bigtableadmin.projects.instances.clusters.backups.patch" call. +// Exactly one of *Backup or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) +// *Backup.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *ProjectsInstancesClustersBackupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +func (c *ProjectsInstancesClustersBackupsPatchCall) Do(opts ...googleapi.CallOption) (*Backup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6412,7 +7337,7 @@ func (c *ProjectsInstancesClustersBackupsGetIamPolicyCall) Do(opts ...googleapi. if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Backup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6424,28 +7349,34 @@ func (c *ProjectsInstancesClustersBackupsGetIamPolicyCall) Do(opts ...googleapi. } return ret, nil // { - // "description": "Gets the access control policy for a Table resource.\nReturns an empty policy if the resource exists but does not have a policy\nset.", - // "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/clusters/{clustersId}/backups/{backupsId}:getIamPolicy", - // "httpMethod": "POST", - // "id": "bigtableadmin.projects.instances.clusters.backups.getIamPolicy", + // "description": "Updates a pending or completed Cloud Bigtable Backup.", + // "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/clusters/{clustersId}/backups/{backupsId}", + // "httpMethod": "PATCH", + // "id": "bigtableadmin.projects.instances.clusters.backups.patch", // "parameterOrder": [ - // "resource" + // "name" // ], // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "name": { + // "description": "A globally unique identifier for the backup which cannot be changed. Values are of the form `projects/{project}/instances/{instance}/clusters/{cluster}/ backups/_a-zA-Z0-9*` The final segment of the name must be between 1 and 50 characters in length. The backup is stored in the cluster identified by the prefix of the backup name of the form `projects/{project}/instances/{instance}/clusters/{cluster}`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/clusters/[^/]+/backups/[^/]+$", // "required": true, // "type": "string" + // }, + // "updateMask": { + // "description": "Required. A mask specifying which fields (e.g. `expire_time`) in the Backup resource should be updated. This mask is relative to the Backup resource, not to the request message. The field mask must always be specified; this prevents any future fields from being erased accidentally by clients that do not know about them.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" // } // }, - // "path": "v2/{+resource}:getIamPolicy", + // "path": "v2/{+name}", // "request": { - // "$ref": "GetIamPolicyRequest" + // "$ref": "Backup" // }, // "response": { - // "$ref": "Policy" + // "$ref": "Backup" // }, // "scopes": [ // "https://www.googleapis.com/auth/bigtable.admin", @@ -6469,8 +7400,7 @@ type ProjectsInstancesClustersBackupsSetIamPolicyCall struct { header_ http.Header } -// SetIamPolicy: Sets the access control policy on a Table -// resource. +// SetIamPolicy: Sets the access control policy on a Table resource. // Replaces any existing policy. func (r *ProjectsInstancesClustersBackupsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsInstancesClustersBackupsSetIamPolicyCall { c := &ProjectsInstancesClustersBackupsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -6506,7 +7436,7 @@ func (c *ProjectsInstancesClustersBackupsSetIamPolicyCall) Header() http.Header func (c *ProjectsInstancesClustersBackupsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6570,7 +7500,7 @@ func (c *ProjectsInstancesClustersBackupsSetIamPolicyCall) Do(opts ...googleapi. } return ret, nil // { - // "description": "Sets the access control policy on a Table resource.\nReplaces any existing policy.", + // "description": "Sets the access control policy on a Table resource. Replaces any existing policy.", // "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/clusters/{clustersId}/backups/{backupsId}:setIamPolicy", // "httpMethod": "POST", // "id": "bigtableadmin.projects.instances.clusters.backups.setIamPolicy", @@ -6579,7 +7509,7 @@ func (c *ProjectsInstancesClustersBackupsSetIamPolicyCall) Do(opts ...googleapi. // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/clusters/[^/]+/backups/[^/]+$", // "required": true, @@ -6651,7 +7581,7 @@ func (c *ProjectsInstancesClustersBackupsTestIamPermissionsCall) Header() http.H func (c *ProjectsInstancesClustersBackupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6724,7 +7654,7 @@ func (c *ProjectsInstancesClustersBackupsTestIamPermissionsCall) Do(opts ...goog // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/clusters/[^/]+/backups/[^/]+$", // "required": true, @@ -6761,10 +7691,8 @@ type ProjectsInstancesTablesCheckConsistencyCall struct { } // CheckConsistency: Checks replication consistency based on a -// consistency token, that is, if -// replication has caught up based on the conditions specified in the -// token -// and the check request. +// consistency token, that is, if replication has caught up based on the +// conditions specified in the token and the check request. func (r *ProjectsInstancesTablesService) CheckConsistency(name string, checkconsistencyrequest *CheckConsistencyRequest) *ProjectsInstancesTablesCheckConsistencyCall { c := &ProjectsInstancesTablesCheckConsistencyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -6799,7 +7727,7 @@ func (c *ProjectsInstancesTablesCheckConsistencyCall) Header() http.Header { func (c *ProjectsInstancesTablesCheckConsistencyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6863,7 +7791,7 @@ func (c *ProjectsInstancesTablesCheckConsistencyCall) Do(opts ...googleapi.CallO } return ret, nil // { - // "description": "Checks replication consistency based on a consistency token, that is, if\nreplication has caught up based on the conditions specified in the token\nand the check request.", + // "description": "Checks replication consistency based on a consistency token, that is, if replication has caught up based on the conditions specified in the token and the check request.", // "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/tables/{tablesId}:checkConsistency", // "httpMethod": "POST", // "id": "bigtableadmin.projects.instances.tables.checkConsistency", @@ -6872,7 +7800,7 @@ func (c *ProjectsInstancesTablesCheckConsistencyCall) Do(opts ...googleapi.CallO // ], // "parameters": { // "name": { - // "description": "Required. The unique name of the Table for which to check replication consistency.\nValues are of the form\n`projects/{project}/instances/{instance}/tables/{table}`.", + // "description": "Required. The unique name of the Table for which to check replication consistency. Values are of the form `projects/{project}/instances/{instance}/tables/{table}`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/tables/[^/]+$", // "required": true, @@ -6908,10 +7836,9 @@ type ProjectsInstancesTablesCreateCall struct { header_ http.Header } -// Create: Creates a new table in the specified instance. -// The table can be created with a full set of initial column -// families, -// specified in the request. +// Create: Creates a new table in the specified instance. The table can +// be created with a full set of initial column families, specified in +// the request. func (r *ProjectsInstancesTablesService) Create(parent string, createtablerequest *CreateTableRequest) *ProjectsInstancesTablesCreateCall { c := &ProjectsInstancesTablesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -6946,7 +7873,7 @@ func (c *ProjectsInstancesTablesCreateCall) Header() http.Header { func (c *ProjectsInstancesTablesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7010,7 +7937,7 @@ func (c *ProjectsInstancesTablesCreateCall) Do(opts ...googleapi.CallOption) (*T } return ret, nil // { - // "description": "Creates a new table in the specified instance.\nThe table can be created with a full set of initial column families,\nspecified in the request.", + // "description": "Creates a new table in the specified instance. The table can be created with a full set of initial column families, specified in the request.", // "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/tables", // "httpMethod": "POST", // "id": "bigtableadmin.projects.instances.tables.create", @@ -7019,7 +7946,7 @@ func (c *ProjectsInstancesTablesCreateCall) Do(opts ...googleapi.CallOption) (*T // ], // "parameters": { // "parent": { - // "description": "Required. The unique name of the instance in which to create the table.\nValues are of the form `projects/{project}/instances/{instance}`.", + // "description": "Required. The unique name of the instance in which to create the table. Values are of the form `projects/{project}/instances/{instance}`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, @@ -7088,7 +8015,7 @@ func (c *ProjectsInstancesTablesDeleteCall) Header() http.Header { func (c *ProjectsInstancesTablesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7156,7 +8083,7 @@ func (c *ProjectsInstancesTablesDeleteCall) Do(opts ...googleapi.CallOption) (*E // ], // "parameters": { // "name": { - // "description": "Required. The unique name of the table to be deleted.\nValues are of the form\n`projects/{project}/instances/{instance}/tables/{table}`.", + // "description": "Required. The unique name of the table to be deleted. Values are of the form `projects/{project}/instances/{instance}/tables/{table}`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/tables/[^/]+$", // "required": true, @@ -7190,10 +8117,8 @@ type ProjectsInstancesTablesDropRowRangeCall struct { } // DropRowRange: Permanently drop/delete a row range from a specified -// table. The request can -// specify whether to delete all rows in a table, or only those that -// match a -// particular prefix. +// table. The request can specify whether to delete all rows in a table, +// or only those that match a particular prefix. func (r *ProjectsInstancesTablesService) DropRowRange(name string, droprowrangerequest *DropRowRangeRequest) *ProjectsInstancesTablesDropRowRangeCall { c := &ProjectsInstancesTablesDropRowRangeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -7228,7 +8153,7 @@ func (c *ProjectsInstancesTablesDropRowRangeCall) Header() http.Header { func (c *ProjectsInstancesTablesDropRowRangeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7292,7 +8217,7 @@ func (c *ProjectsInstancesTablesDropRowRangeCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Permanently drop/delete a row range from a specified table. The request can\nspecify whether to delete all rows in a table, or only those that match a\nparticular prefix.", + // "description": "Permanently drop/delete a row range from a specified table. The request can specify whether to delete all rows in a table, or only those that match a particular prefix.", // "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/tables/{tablesId}:dropRowRange", // "httpMethod": "POST", // "id": "bigtableadmin.projects.instances.tables.dropRowRange", @@ -7301,7 +8226,7 @@ func (c *ProjectsInstancesTablesDropRowRangeCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "name": { - // "description": "Required. The unique name of the table on which to drop a range of rows.\nValues are of the form\n`projects/{project}/instances/{instance}/tables/{table}`.", + // "description": "Required. The unique name of the table on which to drop a range of rows. Values are of the form `projects/{project}/instances/{instance}/tables/{table}`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/tables/[^/]+$", // "required": true, @@ -7338,12 +8263,9 @@ type ProjectsInstancesTablesGenerateConsistencyTokenCall struct { } // GenerateConsistencyToken: Generates a consistency token for a Table, -// which can be used in -// CheckConsistency to check whether mutations to the table that -// finished -// before this call started have been replicated. The tokens will be -// available -// for 90 days. +// which can be used in CheckConsistency to check whether mutations to +// the table that finished before this call started have been +// replicated. The tokens will be available for 90 days. func (r *ProjectsInstancesTablesService) GenerateConsistencyToken(name string, generateconsistencytokenrequest *GenerateConsistencyTokenRequest) *ProjectsInstancesTablesGenerateConsistencyTokenCall { c := &ProjectsInstancesTablesGenerateConsistencyTokenCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -7378,7 +8300,7 @@ func (c *ProjectsInstancesTablesGenerateConsistencyTokenCall) Header() http.Head func (c *ProjectsInstancesTablesGenerateConsistencyTokenCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7442,7 +8364,7 @@ func (c *ProjectsInstancesTablesGenerateConsistencyTokenCall) Do(opts ...googlea } return ret, nil // { - // "description": "Generates a consistency token for a Table, which can be used in\nCheckConsistency to check whether mutations to the table that finished\nbefore this call started have been replicated. The tokens will be available\nfor 90 days.", + // "description": "Generates a consistency token for a Table, which can be used in CheckConsistency to check whether mutations to the table that finished before this call started have been replicated. The tokens will be available for 90 days.", // "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/tables/{tablesId}:generateConsistencyToken", // "httpMethod": "POST", // "id": "bigtableadmin.projects.instances.tables.generateConsistencyToken", @@ -7451,7 +8373,7 @@ func (c *ProjectsInstancesTablesGenerateConsistencyTokenCall) Do(opts ...googlea // ], // "parameters": { // "name": { - // "description": "Required. The unique name of the Table for which to create a consistency token.\nValues are of the form\n`projects/{project}/instances/{instance}/tables/{table}`.", + // "description": "Required. The unique name of the Table for which to create a consistency token. Values are of the form `projects/{project}/instances/{instance}/tables/{table}`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/tables/[^/]+$", // "required": true, @@ -7495,15 +8417,18 @@ func (r *ProjectsInstancesTablesService) Get(name string) *ProjectsInstancesTabl } // View sets the optional parameter "view": The view to be applied to -// the returned table's fields. -// Defaults to `SCHEMA_VIEW` if unspecified. +// the returned table's fields. Defaults to `SCHEMA_VIEW` if +// unspecified. // // Possible values: -// "VIEW_UNSPECIFIED" -// "NAME_ONLY" -// "SCHEMA_VIEW" -// "REPLICATION_VIEW" -// "FULL" +// "VIEW_UNSPECIFIED" - Uses the default view for each method as +// documented in its request. +// "NAME_ONLY" - Only populates `name`. +// "SCHEMA_VIEW" - Only populates `name` and fields related to the +// table's schema. +// "REPLICATION_VIEW" - Only populates `name` and fields related to +// the table's replication state. +// "FULL" - Populates all fields. func (c *ProjectsInstancesTablesGetCall) View(view string) *ProjectsInstancesTablesGetCall { c.urlParams_.Set("view", view) return c @@ -7546,7 +8471,7 @@ func (c *ProjectsInstancesTablesGetCall) Header() http.Header { func (c *ProjectsInstancesTablesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7617,14 +8542,14 @@ func (c *ProjectsInstancesTablesGetCall) Do(opts ...googleapi.CallOption) (*Tabl // ], // "parameters": { // "name": { - // "description": "Required. The unique name of the requested table.\nValues are of the form\n`projects/{project}/instances/{instance}/tables/{table}`.", + // "description": "Required. The unique name of the requested table. Values are of the form `projects/{project}/instances/{instance}/tables/{table}`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/tables/[^/]+$", // "required": true, // "type": "string" // }, // "view": { - // "description": "The view to be applied to the returned table's fields.\nDefaults to `SCHEMA_VIEW` if unspecified.", + // "description": "The view to be applied to the returned table's fields. Defaults to `SCHEMA_VIEW` if unspecified.", // "enum": [ // "VIEW_UNSPECIFIED", // "NAME_ONLY", @@ -7632,6 +8557,13 @@ func (c *ProjectsInstancesTablesGetCall) Do(opts ...googleapi.CallOption) (*Tabl // "REPLICATION_VIEW", // "FULL" // ], + // "enumDescriptions": [ + // "Uses the default view for each method as documented in its request.", + // "Only populates `name`.", + // "Only populates `name` and fields related to the table's schema.", + // "Only populates `name` and fields related to the table's replication state.", + // "Populates all fields." + // ], // "location": "query", // "type": "string" // } @@ -7663,11 +8595,9 @@ type ProjectsInstancesTablesGetIamPolicyCall struct { header_ http.Header } -// GetIamPolicy: Gets the access control policy for a Table -// resource. +// GetIamPolicy: Gets the access control policy for a Table resource. // Returns an empty policy if the resource exists but does not have a -// policy -// set. +// policy set. func (r *ProjectsInstancesTablesService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ProjectsInstancesTablesGetIamPolicyCall { c := &ProjectsInstancesTablesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -7702,7 +8632,7 @@ func (c *ProjectsInstancesTablesGetIamPolicyCall) Header() http.Header { func (c *ProjectsInstancesTablesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7766,7 +8696,7 @@ func (c *ProjectsInstancesTablesGetIamPolicyCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Gets the access control policy for a Table resource.\nReturns an empty policy if the resource exists but does not have a policy\nset.", + // "description": "Gets the access control policy for a Table resource. Returns an empty policy if the resource exists but does not have a policy set.", // "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/tables/{tablesId}:getIamPolicy", // "httpMethod": "POST", // "id": "bigtableadmin.projects.instances.tables.getIamPolicy", @@ -7775,7 +8705,7 @@ func (c *ProjectsInstancesTablesGetIamPolicyCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/tables/[^/]+$", // "required": true, @@ -7819,19 +8749,12 @@ func (r *ProjectsInstancesTablesService) List(parent string) *ProjectsInstancesT } // PageSize sets the optional parameter "pageSize": Maximum number of -// results per page. -// -// A page_size of zero lets the server choose the number of items to -// return. -// A page_size which is strictly positive will return at most that many -// items. -// A negative page_size will cause an error. -// -// Following the first request, subsequent paginated calls are not -// required -// to pass a page_size. If a page_size is set in subsequent calls, it -// must -// match the page_size given in the first request. +// results per page. A page_size of zero lets the server choose the +// number of items to return. A page_size which is strictly positive +// will return at most that many items. A negative page_size will cause +// an error. Following the first request, subsequent paginated calls are +// not required to pass a page_size. If a page_size is set in subsequent +// calls, it must match the page_size given in the first request. func (c *ProjectsInstancesTablesListCall) PageSize(pageSize int64) *ProjectsInstancesTablesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c @@ -7845,15 +8768,18 @@ func (c *ProjectsInstancesTablesListCall) PageToken(pageToken string) *ProjectsI } // View sets the optional parameter "view": The view to be applied to -// the returned tables' fields. -// Only NAME_ONLY view (default) and REPLICATION_VIEW are supported. +// the returned tables' fields. Only NAME_ONLY view (default) and +// REPLICATION_VIEW are supported. // // Possible values: -// "VIEW_UNSPECIFIED" -// "NAME_ONLY" -// "SCHEMA_VIEW" -// "REPLICATION_VIEW" -// "FULL" +// "VIEW_UNSPECIFIED" - Uses the default view for each method as +// documented in its request. +// "NAME_ONLY" - Only populates `name`. +// "SCHEMA_VIEW" - Only populates `name` and fields related to the +// table's schema. +// "REPLICATION_VIEW" - Only populates `name` and fields related to +// the table's replication state. +// "FULL" - Populates all fields. func (c *ProjectsInstancesTablesListCall) View(view string) *ProjectsInstancesTablesListCall { c.urlParams_.Set("view", view) return c @@ -7896,7 +8822,7 @@ func (c *ProjectsInstancesTablesListCall) Header() http.Header { func (c *ProjectsInstancesTablesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7967,7 +8893,7 @@ func (c *ProjectsInstancesTablesListCall) Do(opts ...googleapi.CallOption) (*Lis // ], // "parameters": { // "pageSize": { - // "description": "Maximum number of results per page.\n\nA page_size of zero lets the server choose the number of items to return.\nA page_size which is strictly positive will return at most that many items.\nA negative page_size will cause an error.\n\nFollowing the first request, subsequent paginated calls are not required\nto pass a page_size. If a page_size is set in subsequent calls, it must\nmatch the page_size given in the first request.", + // "description": "Maximum number of results per page. A page_size of zero lets the server choose the number of items to return. A page_size which is strictly positive will return at most that many items. A negative page_size will cause an error. Following the first request, subsequent paginated calls are not required to pass a page_size. If a page_size is set in subsequent calls, it must match the page_size given in the first request.", // "format": "int32", // "location": "query", // "type": "integer" @@ -7978,14 +8904,14 @@ func (c *ProjectsInstancesTablesListCall) Do(opts ...googleapi.CallOption) (*Lis // "type": "string" // }, // "parent": { - // "description": "Required. The unique name of the instance for which tables should be listed.\nValues are of the form `projects/{project}/instances/{instance}`.", + // "description": "Required. The unique name of the instance for which tables should be listed. Values are of the form `projects/{project}/instances/{instance}`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, // "type": "string" // }, // "view": { - // "description": "The view to be applied to the returned tables' fields.\nOnly NAME_ONLY view (default) and REPLICATION_VIEW are supported.", + // "description": "The view to be applied to the returned tables' fields. Only NAME_ONLY view (default) and REPLICATION_VIEW are supported.", // "enum": [ // "VIEW_UNSPECIFIED", // "NAME_ONLY", @@ -7993,6 +8919,13 @@ func (c *ProjectsInstancesTablesListCall) Do(opts ...googleapi.CallOption) (*Lis // "REPLICATION_VIEW", // "FULL" // ], + // "enumDescriptions": [ + // "Uses the default view for each method as documented in its request.", + // "Only populates `name`.", + // "Only populates `name` and fields related to the table's schema.", + // "Only populates `name` and fields related to the table's replication state.", + // "Populates all fields." + // ], // "location": "query", // "type": "string" // } @@ -8046,12 +8979,10 @@ type ProjectsInstancesTablesModifyColumnFamiliesCall struct { } // ModifyColumnFamilies: Performs a series of column family -// modifications on the specified table. -// Either all or none of the modifications will occur before this -// method -// returns, but data requests received prior to that point may see a -// table -// where only some modifications have taken effect. +// modifications on the specified table. Either all or none of the +// modifications will occur before this method returns, but data +// requests received prior to that point may see a table where only some +// modifications have taken effect. func (r *ProjectsInstancesTablesService) ModifyColumnFamilies(name string, modifycolumnfamiliesrequest *ModifyColumnFamiliesRequest) *ProjectsInstancesTablesModifyColumnFamiliesCall { c := &ProjectsInstancesTablesModifyColumnFamiliesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -8086,7 +9017,7 @@ func (c *ProjectsInstancesTablesModifyColumnFamiliesCall) Header() http.Header { func (c *ProjectsInstancesTablesModifyColumnFamiliesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8150,7 +9081,7 @@ func (c *ProjectsInstancesTablesModifyColumnFamiliesCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Performs a series of column family modifications on the specified table.\nEither all or none of the modifications will occur before this method\nreturns, but data requests received prior to that point may see a table\nwhere only some modifications have taken effect.", + // "description": "Performs a series of column family modifications on the specified table. Either all or none of the modifications will occur before this method returns, but data requests received prior to that point may see a table where only some modifications have taken effect.", // "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/tables/{tablesId}:modifyColumnFamilies", // "httpMethod": "POST", // "id": "bigtableadmin.projects.instances.tables.modifyColumnFamilies", @@ -8159,7 +9090,7 @@ func (c *ProjectsInstancesTablesModifyColumnFamiliesCall) Do(opts ...googleapi.C // ], // "parameters": { // "name": { - // "description": "Required. The unique name of the table whose families should be modified.\nValues are of the form\n`projects/{project}/instances/{instance}/tables/{table}`.", + // "description": "Required. The unique name of the table whose families should be modified. Values are of the form `projects/{project}/instances/{instance}/tables/{table}`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/tables/[^/]+$", // "required": true, @@ -8184,6 +9115,155 @@ func (c *ProjectsInstancesTablesModifyColumnFamiliesCall) Do(opts ...googleapi.C } +// method id "bigtableadmin.projects.instances.tables.restore": + +type ProjectsInstancesTablesRestoreCall struct { + s *Service + parent string + restoretablerequest *RestoreTableRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Restore: Create a new table by restoring from a completed backup. The +// new table must be in the same instance as the instance containing the +// backup. The returned table long-running operation can be used to +// track the progress of the operation, and to cancel it. The metadata +// field type is RestoreTableMetadata. The response type is Table, if +// successful. +func (r *ProjectsInstancesTablesService) Restore(parent string, restoretablerequest *RestoreTableRequest) *ProjectsInstancesTablesRestoreCall { + c := &ProjectsInstancesTablesRestoreCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.restoretablerequest = restoretablerequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesTablesRestoreCall) Fields(s ...googleapi.Field) *ProjectsInstancesTablesRestoreCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesTablesRestoreCall) Context(ctx context.Context) *ProjectsInstancesTablesRestoreCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesTablesRestoreCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesTablesRestoreCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.restoretablerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/tables:restore") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigtableadmin.projects.instances.tables.restore" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsInstancesTablesRestoreCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Create a new table by restoring from a completed backup. The new table must be in the same instance as the instance containing the backup. The returned table long-running operation can be used to track the progress of the operation, and to cancel it. The metadata field type is RestoreTableMetadata. The response type is Table, if successful.", + // "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/tables:restore", + // "httpMethod": "POST", + // "id": "bigtableadmin.projects.instances.tables.restore", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The name of the instance in which to create the restored table. This instance must be the parent of the source backup. Values are of the form `projects//instances/`.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/tables:restore", + // "request": { + // "$ref": "RestoreTableRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigtable.admin", + // "https://www.googleapis.com/auth/bigtable.admin.table", + // "https://www.googleapis.com/auth/cloud-bigtable.admin", + // "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + // method id "bigtableadmin.projects.instances.tables.setIamPolicy": type ProjectsInstancesTablesSetIamPolicyCall struct { @@ -8195,8 +9275,7 @@ type ProjectsInstancesTablesSetIamPolicyCall struct { header_ http.Header } -// SetIamPolicy: Sets the access control policy on a Table -// resource. +// SetIamPolicy: Sets the access control policy on a Table resource. // Replaces any existing policy. func (r *ProjectsInstancesTablesService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsInstancesTablesSetIamPolicyCall { c := &ProjectsInstancesTablesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -8232,7 +9311,7 @@ func (c *ProjectsInstancesTablesSetIamPolicyCall) Header() http.Header { func (c *ProjectsInstancesTablesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8296,7 +9375,7 @@ func (c *ProjectsInstancesTablesSetIamPolicyCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Sets the access control policy on a Table resource.\nReplaces any existing policy.", + // "description": "Sets the access control policy on a Table resource. Replaces any existing policy.", // "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/tables/{tablesId}:setIamPolicy", // "httpMethod": "POST", // "id": "bigtableadmin.projects.instances.tables.setIamPolicy", @@ -8305,7 +9384,7 @@ func (c *ProjectsInstancesTablesSetIamPolicyCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/tables/[^/]+$", // "required": true, @@ -8377,7 +9456,7 @@ func (c *ProjectsInstancesTablesTestIamPermissionsCall) Header() http.Header { func (c *ProjectsInstancesTablesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8450,7 +9529,7 @@ func (c *ProjectsInstancesTablesTestIamPermissionsCall) Do(opts ...googleapi.Cal // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/tables/[^/]+$", // "required": true, @@ -8530,7 +9609,7 @@ func (c *ProjectsLocationsGetCall) Header() http.Header { func (c *ProjectsLocationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8702,7 +9781,7 @@ func (c *ProjectsLocationsListCall) Header() http.Header { func (c *ProjectsLocationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/api/cloudbilling/v1/cloudbilling-api.json b/vendor/google.golang.org/api/cloudbilling/v1/cloudbilling-api.json index 17c43dac929..dfdf15a323e 100644 --- a/vendor/google.golang.org/api/cloudbilling/v1/cloudbilling-api.json +++ b/vendor/google.golang.org/api/cloudbilling/v1/cloudbilling-api.json @@ -2,6 +2,12 @@ "auth": { "oauth2": { "scopes": { + "https://www.googleapis.com/auth/cloud-billing": { + "description": "View and manage your Google Cloud Platform billing accounts" + }, + "https://www.googleapis.com/auth/cloud-billing.readonly": { + "description": "View your Google Cloud Platform billing accounts" + }, "https://www.googleapis.com/auth/cloud-platform": { "description": "View and manage your data across Google Cloud Platform services" } @@ -12,7 +18,7 @@ "baseUrl": "https://cloudbilling.googleapis.com/", "batchPath": "batch", "canonicalName": "Cloudbilling", - "description": "Allows developers to manage billing for their Google Cloud Platform projects\n programmatically.", + "description": "Allows developers to manage billing for their Google Cloud Platform projects programmatically.", "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/billing/", "fullyEncodeReservedExpansion": true, @@ -108,7 +114,7 @@ "billingAccounts": { "methods": { "create": { - "description": "Creates a billing account.\nThis method can only be used to create\n[billing subaccounts](https://cloud.google.com/billing/docs/concepts)\nby GCP resellers.\nWhen creating a subaccount, the current authenticated user must have the\n`billing.accounts.update` IAM permission on the master account, which is\ntypically given to billing account\n[administrators](https://cloud.google.com/billing/docs/how-to/billing-access).\nThis method will return an error if the master account has not been\nprovisioned as a reseller account.", + "description": "Creates a billing account. This method can only be used to create [billing subaccounts](https://cloud.google.com/billing/docs/concepts) by Google Cloud resellers. When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the master account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the master account has not been provisioned as a reseller account.", "flatPath": "v1/billingAccounts", "httpMethod": "POST", "id": "cloudbilling.billingAccounts.create", @@ -122,11 +128,12 @@ "$ref": "BillingAccount" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-billing", "https://www.googleapis.com/auth/cloud-platform" ] }, "get": { - "description": "Gets information about a billing account. The current authenticated user\nmust be a [viewer of the billing\naccount](https://cloud.google.com/billing/docs/how-to/billing-access).", + "description": "Gets information about a billing account. The current authenticated user must be a [viewer of the billing account](https://cloud.google.com/billing/docs/how-to/billing-access).", "flatPath": "v1/billingAccounts/{billingAccountsId}", "httpMethod": "GET", "id": "cloudbilling.billingAccounts.get", @@ -135,7 +142,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the billing account to retrieve. For example,\n`billingAccounts/012345-567890-ABCDEF`.", + "description": "Required. The resource name of the billing account to retrieve. For example, `billingAccounts/012345-567890-ABCDEF`.", "location": "path", "pattern": "^billingAccounts/[^/]+$", "required": true, @@ -147,11 +154,13 @@ "$ref": "BillingAccount" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-billing", + "https://www.googleapis.com/auth/cloud-billing.readonly", "https://www.googleapis.com/auth/cloud-platform" ] }, "getIamPolicy": { - "description": "Gets the access control policy for a billing account.\nThe caller must have the `billing.accounts.getIamPolicy` permission on the\naccount, which is often given to billing account\n[viewers](https://cloud.google.com/billing/docs/how-to/billing-access).", + "description": "Gets the access control policy for a billing account. The caller must have the `billing.accounts.getIamPolicy` permission on the account, which is often given to billing account [viewers](https://cloud.google.com/billing/docs/how-to/billing-access).", "flatPath": "v1/billingAccounts/{billingAccountsId}:getIamPolicy", "httpMethod": "GET", "id": "cloudbilling.billingAccounts.getIamPolicy", @@ -160,13 +169,13 @@ ], "parameters": { "options.requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "location": "query", "type": "integer" }, "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^billingAccounts/[^/]+$", "required": true, @@ -178,29 +187,31 @@ "$ref": "Policy" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-billing", + "https://www.googleapis.com/auth/cloud-billing.readonly", "https://www.googleapis.com/auth/cloud-platform" ] }, "list": { - "description": "Lists the billing accounts that the current authenticated user has\npermission to\n[view](https://cloud.google.com/billing/docs/how-to/billing-access).", + "description": "Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access).", "flatPath": "v1/billingAccounts", "httpMethod": "GET", "id": "cloudbilling.billingAccounts.list", "parameterOrder": [], "parameters": { "filter": { - "description": "Options for how to filter the returned billing accounts.\nCurrently this only supports filtering for\n[subaccounts](https://cloud.google.com/billing/docs/concepts) under a\nsingle provided reseller billing account.\n(e.g. \"master_billing_account=billingAccounts/012345-678901-ABCDEF\").\nBoolean algebra and other fields are not currently supported.", + "description": "Options for how to filter the returned billing accounts. Currently this only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided reseller billing account. (e.g. \"master_billing_account=billingAccounts/012345-678901-ABCDEF\"). Boolean algebra and other fields are not currently supported.", "location": "query", "type": "string" }, "pageSize": { - "description": "Requested page size. The maximum page size is 100; this is also the\ndefault.", + "description": "Requested page size. The maximum page size is 100; this is also the default.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "A token identifying a page of results to return. This should be a\n`next_page_token` value returned from a previous `ListBillingAccounts`\ncall. If unspecified, the first page of results is returned.", + "description": "A token identifying a page of results to return. This should be a `next_page_token` value returned from a previous `ListBillingAccounts` call. If unspecified, the first page of results is returned.", "location": "query", "type": "string" } @@ -210,11 +221,13 @@ "$ref": "ListBillingAccountsResponse" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-billing", + "https://www.googleapis.com/auth/cloud-billing.readonly", "https://www.googleapis.com/auth/cloud-platform" ] }, "patch": { - "description": "Updates a billing account's fields.\nCurrently the only field that can be edited is `display_name`.\nThe current authenticated user must have the `billing.accounts.update`\nIAM permission, which is typically given to the\n[administrator](https://cloud.google.com/billing/docs/how-to/billing-access)\nof the billing account.", + "description": "Updates a billing account's fields. Currently the only field that can be edited is `display_name`. The current authenticated user must have the `billing.accounts.update` IAM permission, which is typically given to the [administrator](https://cloud.google.com/billing/docs/how-to/billing-access) of the billing account.", "flatPath": "v1/billingAccounts/{billingAccountsId}", "httpMethod": "PATCH", "id": "cloudbilling.billingAccounts.patch", @@ -230,7 +243,7 @@ "type": "string" }, "updateMask": { - "description": "The update mask applied to the resource.\nOnly \"display_name\" is currently supported.", + "description": "The update mask applied to the resource. Only \"display_name\" is currently supported.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -244,11 +257,12 @@ "$ref": "BillingAccount" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-billing", "https://www.googleapis.com/auth/cloud-platform" ] }, "setIamPolicy": { - "description": "Sets the access control policy for a billing account. Replaces any existing\npolicy.\nThe caller must have the `billing.accounts.setIamPolicy` permission on the\naccount, which is often given to billing account\n[administrators](https://cloud.google.com/billing/docs/how-to/billing-access).", + "description": "Sets the access control policy for a billing account. Replaces any existing policy. The caller must have the `billing.accounts.setIamPolicy` permission on the account, which is often given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access).", "flatPath": "v1/billingAccounts/{billingAccountsId}:setIamPolicy", "httpMethod": "POST", "id": "cloudbilling.billingAccounts.setIamPolicy", @@ -257,7 +271,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^billingAccounts/[^/]+$", "required": true, @@ -272,11 +286,12 @@ "$ref": "Policy" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-billing", "https://www.googleapis.com/auth/cloud-platform" ] }, "testIamPermissions": { - "description": "Tests the access control policy for a billing account. This method takes\nthe resource and a set of permissions as input and returns the subset of\nthe input permissions that the caller is allowed for that resource.", + "description": "Tests the access control policy for a billing account. This method takes the resource and a set of permissions as input and returns the subset of the input permissions that the caller is allowed for that resource.", "flatPath": "v1/billingAccounts/{billingAccountsId}:testIamPermissions", "httpMethod": "POST", "id": "cloudbilling.billingAccounts.testIamPermissions", @@ -285,7 +300,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^billingAccounts/[^/]+$", "required": true, @@ -300,6 +315,8 @@ "$ref": "TestIamPermissionsResponse" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-billing", + "https://www.googleapis.com/auth/cloud-billing.readonly", "https://www.googleapis.com/auth/cloud-platform" ] } @@ -308,7 +325,7 @@ "projects": { "methods": { "list": { - "description": "Lists the projects associated with a billing account. The current\nauthenticated user must have the `billing.resourceAssociations.list` IAM\npermission, which is often given to billing account\n[viewers](https://cloud.google.com/billing/docs/how-to/billing-access).", + "description": "Lists the projects associated with a billing account. The current authenticated user must have the `billing.resourceAssociations.list` IAM permission, which is often given to billing account [viewers](https://cloud.google.com/billing/docs/how-to/billing-access).", "flatPath": "v1/billingAccounts/{billingAccountsId}/projects", "httpMethod": "GET", "id": "cloudbilling.billingAccounts.projects.list", @@ -317,20 +334,20 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the billing account associated with the projects that\nyou want to list. For example, `billingAccounts/012345-567890-ABCDEF`.", + "description": "Required. The resource name of the billing account associated with the projects that you want to list. For example, `billingAccounts/012345-567890-ABCDEF`.", "location": "path", "pattern": "^billingAccounts/[^/]+$", "required": true, "type": "string" }, "pageSize": { - "description": "Requested page size. The maximum page size is 100; this is also the\ndefault.", + "description": "Requested page size. The maximum page size is 100; this is also the default.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "A token identifying a page of results to be returned. This should be a\n`next_page_token` value returned from a previous `ListProjectBillingInfo`\ncall. If unspecified, the first page of results is returned.", + "description": "A token identifying a page of results to be returned. This should be a `next_page_token` value returned from a previous `ListProjectBillingInfo` call. If unspecified, the first page of results is returned.", "location": "query", "type": "string" } @@ -340,6 +357,8 @@ "$ref": "ListProjectBillingInfoResponse" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-billing", + "https://www.googleapis.com/auth/cloud-billing.readonly", "https://www.googleapis.com/auth/cloud-platform" ] } @@ -350,7 +369,7 @@ "projects": { "methods": { "getBillingInfo": { - "description": "Gets the billing information for a project. The current authenticated user\nmust have [permission to view the\nproject](https://cloud.google.com/docs/permissions-overview#h.bgs0oxofvnoo\n).", + "description": "Gets the billing information for a project. The current authenticated user must have [permission to view the project](https://cloud.google.com/docs/permissions-overview#h.bgs0oxofvnoo ).", "flatPath": "v1/projects/{projectsId}/billingInfo", "httpMethod": "GET", "id": "cloudbilling.projects.getBillingInfo", @@ -359,7 +378,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the project for which billing information is\nretrieved. For example, `projects/tokyo-rain-123`.", + "description": "Required. The resource name of the project for which billing information is retrieved. For example, `projects/tokyo-rain-123`.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -371,11 +390,13 @@ "$ref": "ProjectBillingInfo" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-billing", + "https://www.googleapis.com/auth/cloud-billing.readonly", "https://www.googleapis.com/auth/cloud-platform" ] }, "updateBillingInfo": { - "description": "Sets or updates the billing account associated with a project. You specify\nthe new billing account by setting the `billing_account_name` in the\n`ProjectBillingInfo` resource to the resource name of a billing account.\nAssociating a project with an open billing account enables billing on the\nproject and allows charges for resource usage. If the project already had a\nbilling account, this method changes the billing account used for resource\nusage charges.\n\n*Note:* Incurred charges that have not yet been reported in the transaction\nhistory of the GCP Console might be billed to the new billing\naccount, even if the charge occurred before the new billing account was\nassigned to the project.\n\nThe current authenticated user must have ownership privileges for both the\n[project](https://cloud.google.com/docs/permissions-overview#h.bgs0oxofvnoo\n) and the [billing\naccount](https://cloud.google.com/billing/docs/how-to/billing-access).\n\nYou can disable billing on the project by setting the\n`billing_account_name` field to empty. This action disassociates the\ncurrent billing account from the project. Any billable activity of your\nin-use services will stop, and your application could stop functioning as\nexpected. Any unbilled charges to date will be billed to the previously\nassociated account. The current authenticated user must be either an owner\nof the project or an owner of the billing account for the project.\n\nNote that associating a project with a *closed* billing account will have\nmuch the same effect as disabling billing on the project: any paid\nresources used by the project will be shut down. Thus, unless you wish to\ndisable billing, you should always call this method with the name of an\n*open* billing account.", + "description": "Sets or updates the billing account associated with a project. You specify the new billing account by setting the `billing_account_name` in the `ProjectBillingInfo` resource to the resource name of a billing account. Associating a project with an open billing account enables billing on the project and allows charges for resource usage. If the project already had a billing account, this method changes the billing account used for resource usage charges. *Note:* Incurred charges that have not yet been reported in the transaction history of the Google Cloud Console might be billed to the new billing account, even if the charge occurred before the new billing account was assigned to the project. The current authenticated user must have ownership privileges for both the [project](https://cloud.google.com/docs/permissions-overview#h.bgs0oxofvnoo ) and the [billing account](https://cloud.google.com/billing/docs/how-to/billing-access). You can disable billing on the project by setting the `billing_account_name` field to empty. This action disassociates the current billing account from the project. Any billable activity of your in-use services will stop, and your application could stop functioning as expected. Any unbilled charges to date will be billed to the previously associated account. The current authenticated user must be either an owner of the project or an owner of the billing account for the project. Note that associating a project with a *closed* billing account will have much the same effect as disabling billing on the project: any paid resources used by the project will be shut down. Thus, unless you wish to disable billing, you should always call this method with the name of an *open* billing account.", "flatPath": "v1/projects/{projectsId}/billingInfo", "httpMethod": "PUT", "id": "cloudbilling.projects.updateBillingInfo", @@ -384,7 +405,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the project associated with the billing information\nthat you want to update. For example, `projects/tokyo-rain-123`.", + "description": "Required. The resource name of the project associated with the billing information that you want to update. For example, `projects/tokyo-rain-123`.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -399,6 +420,7 @@ "$ref": "ProjectBillingInfo" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-billing", "https://www.googleapis.com/auth/cloud-platform" ] } @@ -420,7 +442,7 @@ "type": "integer" }, "pageToken": { - "description": "A token identifying a page of results to return. This should be a\n`next_page_token` value returned from a previous `ListServices`\ncall. If unspecified, the first page of results is returned.", + "description": "A token identifying a page of results to return. This should be a `next_page_token` value returned from a previous `ListServices` call. If unspecified, the first page of results is returned.", "location": "query", "type": "string" } @@ -430,6 +452,8 @@ "$ref": "ListServicesResponse" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-billing", + "https://www.googleapis.com/auth/cloud-billing.readonly", "https://www.googleapis.com/auth/cloud-platform" ] } @@ -447,12 +471,12 @@ ], "parameters": { "currencyCode": { - "description": "The ISO 4217 currency code for the pricing info in the response proto.\nWill use the conversion rate as of start_time.\nOptional. If not specified USD will be used.", + "description": "The ISO 4217 currency code for the pricing info in the response proto. Will use the conversion rate as of start_time. Optional. If not specified USD will be used.", "location": "query", "type": "string" }, "endTime": { - "description": "Optional exclusive end time of the time range for which the pricing\nversions will be returned. Timestamps in the future are not allowed.\nThe time range has to be within a single calendar month in\nAmerica/Los_Angeles timezone. Time range as a whole is optional. If not\nspecified, the latest pricing will be returned (up to 12 hours old at\nmost).", + "description": "Optional exclusive end time of the time range for which the pricing versions will be returned. Timestamps in the future are not allowed. The time range has to be within a single calendar month in America/Los_Angeles timezone. Time range as a whole is optional. If not specified, the latest pricing will be returned (up to 12 hours old at most).", "format": "google-datetime", "location": "query", "type": "string" @@ -464,19 +488,19 @@ "type": "integer" }, "pageToken": { - "description": "A token identifying a page of results to return. This should be a\n`next_page_token` value returned from a previous `ListSkus`\ncall. If unspecified, the first page of results is returned.", + "description": "A token identifying a page of results to return. This should be a `next_page_token` value returned from a previous `ListSkus` call. If unspecified, the first page of results is returned.", "location": "query", "type": "string" }, "parent": { - "description": "Required. The name of the service.\nExample: \"services/DA34-426B-A397\"", + "description": "Required. The name of the service. Example: \"services/DA34-426B-A397\"", "location": "path", "pattern": "^services/[^/]+$", "required": true, "type": "string" }, "startTime": { - "description": "Optional inclusive start time of the time range for which the pricing\nversions will be returned. Timestamps in the future are not allowed.\nThe time range has to be within a single calendar month in\nAmerica/Los_Angeles timezone. Time range as a whole is optional. If not\nspecified, the latest pricing will be returned (up to 12 hours old at\nmost).", + "description": "Optional inclusive start time of the time range for which the pricing versions will be returned. Timestamps in the future are not allowed. The time range has to be within a single calendar month in America/Los_Angeles timezone. Time range as a whole is optional. If not specified, the latest pricing will be returned (up to 12 hours old at most).", "format": "google-datetime", "location": "query", "type": "string" @@ -487,6 +511,8 @@ "$ref": "ListSkusResponse" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-billing", + "https://www.googleapis.com/auth/cloud-billing.readonly", "https://www.googleapis.com/auth/cloud-platform" ] } @@ -495,7 +521,7 @@ } } }, - "revision": "20200503", + "revision": "20200912", "rootUrl": "https://cloudbilling.googleapis.com/", "schemas": { "AggregationInfo": { @@ -503,7 +529,7 @@ "id": "AggregationInfo", "properties": { "aggregationCount": { - "description": "The number of intervals to aggregate over.\nExample: If aggregation_level is \"DAILY\" and aggregation_count is 14,\naggregation will be over 14 days.", + "description": "The number of intervals to aggregate over. Example: If aggregation_level is \"DAILY\" and aggregation_count is 14, aggregation will be over 14 days.", "format": "int32", "type": "integer" }, @@ -537,7 +563,7 @@ "type": "object" }, "AuditConfig": { - "description": "Specifies the audit configuration for a service.\nThe configuration determines which permission types are logged, and what\nidentities, if any, are exempted from logging.\nAn AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service,\nthe union of the two AuditConfigs is used for that service: the log_types\nspecified in each AuditConfig are enabled, and the exempted_members in each\nAuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n {\n \"audit_configs\": [\n {\n \"service\": \"allServices\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n },\n {\n \"log_type\": \"ADMIN_READ\",\n }\n ]\n },\n {\n \"service\": \"sampleservice.googleapis.com\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n },\n {\n \"log_type\": \"DATA_WRITE\",\n \"exempted_members\": [\n \"user:aliya@example.com\"\n ]\n }\n ]\n }\n ]\n }\n\nFor sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ\nlogging. It also exempts jose@example.com from DATA_READ logging, and\naliya@example.com from DATA_WRITE logging.", + "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { \"audit_configs\": [ { \"service\": \"allServices\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" }, { \"log_type\": \"ADMIN_READ\" } ] }, { \"service\": \"sampleservice.googleapis.com\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\" }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging.", "id": "AuditConfig", "properties": { "auditLogConfigs": { @@ -548,18 +574,18 @@ "type": "array" }, "service": { - "description": "Specifies a service that will be enabled for audit logging.\nFor example, `storage.googleapis.com`, `cloudsql.googleapis.com`.\n`allServices` is a special value that covers all services.", + "description": "Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.", "type": "string" } }, "type": "object" }, "AuditLogConfig": { - "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\njose@example.com from DATA_READ logging.", + "description": "Provides the configuration for logging a type of permissions. Example: { \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.", "id": "AuditLogConfig", "properties": { "exemptedMembers": { - "description": "Specifies the identities that do not cause logging for this type of\npermission.\nFollows the same format of Binding.members.", + "description": "Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.", "items": { "type": "string" }, @@ -585,23 +611,25 @@ "type": "object" }, "BillingAccount": { - "description": "A billing account in [GCP Console](https://console.cloud.google.com/).\nYou can assign a billing account to one or more projects.", + "description": "A billing account in the [Google Cloud Console](https://console.cloud.google.com/). You can assign a billing account to one or more projects.", "id": "BillingAccount", "properties": { "displayName": { - "description": "The display name given to the billing account, such as `My Billing\nAccount`. This name is displayed in the GCP Console.", + "description": "The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console.", "type": "string" }, "masterBillingAccount": { - "description": "If this account is a\n[subaccount](https://cloud.google.com/billing/docs/concepts), then this\nwill be the resource name of the master billing account that it is being\nresold through.\nOtherwise this will be empty.", + "description": "If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the master billing account that it is being resold through. Otherwise this will be empty.", "type": "string" }, "name": { - "description": "The resource name of the billing account. The resource name has the form\n`billingAccounts/{billing_account_id}`. For example,\n`billingAccounts/012345-567890-ABCDEF` would be the resource name for\nbilling account `012345-567890-ABCDEF`.", + "description": "Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`.", + "readOnly": true, "type": "string" }, "open": { - "description": "Output only. True if the billing account is open, and will therefore be charged for any\nusage on associated projects. False if the billing account is closed, and\ntherefore projects associated with it will be unable to use paid services.", + "description": "Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services.", + "readOnly": true, "type": "boolean" } }, @@ -613,17 +641,17 @@ "properties": { "condition": { "$ref": "Expr", - "description": "The condition that is associated with this binding.\n\nIf the condition evaluates to `true`, then this binding applies to the\ncurrent request.\n\nIf the condition evaluates to `false`, then this binding does not apply to\nthe current request. However, a different role binding might grant the same\nrole to one or more of the members in this binding.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies)." + "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the members in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." }, "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@example.com` .\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a user that has been recently deleted. For\n example, `alice@example.com?uid=123456789012345678901`. If the user is\n recovered, this value reverts to `user:{emailid}` and the recovered user\n retains the role in the binding.\n\n* `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus\n unique identifier) representing a service account that has been recently\n deleted. For example,\n `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.\n If the service account is undeleted, this value reverts to\n `serviceAccount:{emailid}` and the undeleted service account retains the\n role in the binding.\n\n* `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a Google group that has been recently\n deleted. For example, `admins@example.com?uid=123456789012345678901`. If\n the group is recovered, this value reverts to `group:{emailid}` and the\n recovered group retains the role in the binding.\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", "items": { "type": "string" }, "type": "array" }, "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.", + "description": "Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.", "type": "string" } }, @@ -634,11 +662,11 @@ "id": "Category", "properties": { "resourceFamily": { - "description": "The type of product the SKU refers to.\nExample: \"Compute\", \"Storage\", \"Network\", \"ApplicationServices\" etc.", + "description": "The type of product the SKU refers to. Example: \"Compute\", \"Storage\", \"Network\", \"ApplicationServices\" etc.", "type": "string" }, "resourceGroup": { - "description": "A group classification for related SKUs.\nExample: \"RAM\", \"GPU\", \"Prediction\", \"Ops\", \"GoogleEgress\" etc.", + "description": "A group classification for related SKUs. Example: \"RAM\", \"GPU\", \"Prediction\", \"Ops\", \"GoogleEgress\" etc.", "type": "string" }, "serviceDisplayName": { @@ -646,30 +674,60 @@ "type": "string" }, "usageType": { - "description": "Represents how the SKU is consumed.\nExample: \"OnDemand\", \"Preemptible\", \"Commit1Mo\", \"Commit1Yr\" etc.", + "description": "Represents how the SKU is consumed. Example: \"OnDemand\", \"Preemptible\", \"Commit1Mo\", \"Commit1Yr\" etc.", "type": "string" } }, "type": "object" }, "Expr": { - "description": "Represents a textual expression in the Common Expression Language (CEL)\nsyntax. CEL is a C-like expression language. The syntax and semantics of CEL\nare documented at https://github.com/google/cel-spec.\n\nExample (Comparison):\n\n title: \"Summary size limit\"\n description: \"Determines if a summary is less than 100 chars\"\n expression: \"document.summary.size() \u003c 100\"\n\nExample (Equality):\n\n title: \"Requestor is owner\"\n description: \"Determines if requestor is the document owner\"\n expression: \"document.owner == request.auth.claims.email\"\n\nExample (Logic):\n\n title: \"Public documents\"\n description: \"Determine whether the document should be publicly visible\"\n expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\"\n\nExample (Data Manipulation):\n\n title: \"Notification string\"\n description: \"Create a notification string with a timestamp.\"\n expression: \"'New message received at ' + string(document.create_time)\"\n\nThe exact variables and functions that may be referenced within an expression\nare determined by the service that evaluates it. See the service\ndocumentation for additional information.", + "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() \u003c 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", "id": "Expr", "properties": { "description": { - "description": "Optional. Description of the expression. This is a longer text which\ndescribes the expression, e.g. when hovered over it in a UI.", + "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", "type": "string" }, "expression": { - "description": "Textual representation of an expression in Common Expression Language\nsyntax.", + "description": "Textual representation of an expression in Common Expression Language syntax.", "type": "string" }, "location": { - "description": "Optional. String indicating the location of the expression for error\nreporting, e.g. a file name and a position in the file.", + "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", "type": "string" }, "title": { - "description": "Optional. Title for the expression, i.e. a short string describing\nits purpose. This can be used e.g. in UIs which allow to enter the\nexpression.", + "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", + "type": "string" + } + }, + "type": "object" + }, + "GeoTaxonomy": { + "description": "Encapsulates the geographic taxonomy data for a sku.", + "id": "GeoTaxonomy", + "properties": { + "regions": { + "description": "The list of regions associated with a sku. Empty for Global skus, which are associated with all Google Cloud regions.", + "items": { + "type": "string" + }, + "type": "array" + }, + "type": { + "description": "The type of Geo Taxonomy: GLOBAL, REGIONAL, or MULTI_REGIONAL.", + "enum": [ + "TYPE_UNSPECIFIED", + "GLOBAL", + "REGIONAL", + "MULTI_REGIONAL" + ], + "enumDescriptions": [ + "The type is not specified.", + "The sku is global in nature, e.g. a license sku. Global skus are available in all regions, and so have an empty region list.", + "The sku is available in a specific region, e.g. \"us-west2\".", + "The sku is associated with multiple regions, e.g. \"us-west2\" and \"us-east1\"." + ], "type": "string" } }, @@ -687,7 +745,7 @@ "type": "array" }, "nextPageToken": { - "description": "A token to retrieve the next page of results. To retrieve the next page,\ncall `ListBillingAccounts` again with the `page_token` field set to this\nvalue. This field is empty if there are no more results to retrieve.", + "description": "A token to retrieve the next page of results. To retrieve the next page, call `ListBillingAccounts` again with the `page_token` field set to this value. This field is empty if there are no more results to retrieve.", "type": "string" } }, @@ -698,11 +756,11 @@ "id": "ListProjectBillingInfoResponse", "properties": { "nextPageToken": { - "description": "A token to retrieve the next page of results. To retrieve the next page,\ncall `ListProjectBillingInfo` again with the `page_token` field set to this\nvalue. This field is empty if there are no more results to retrieve.", + "description": "A token to retrieve the next page of results. To retrieve the next page, call `ListProjectBillingInfo` again with the `page_token` field set to this value. This field is empty if there are no more results to retrieve.", "type": "string" }, "projectBillingInfo": { - "description": "A list of `ProjectBillingInfo` resources representing the projects\nassociated with the billing account.", + "description": "A list of `ProjectBillingInfo` resources representing the projects associated with the billing account.", "items": { "$ref": "ProjectBillingInfo" }, @@ -716,7 +774,7 @@ "id": "ListServicesResponse", "properties": { "nextPageToken": { - "description": "A token to retrieve the next page of results. To retrieve the next page,\ncall `ListServices` again with the `page_token` field set to this\nvalue. This field is empty if there are no more results to retrieve.", + "description": "A token to retrieve the next page of results. To retrieve the next page, call `ListServices` again with the `page_token` field set to this value. This field is empty if there are no more results to retrieve.", "type": "string" }, "services": { @@ -734,7 +792,7 @@ "id": "ListSkusResponse", "properties": { "nextPageToken": { - "description": "A token to retrieve the next page of results. To retrieve the next page,\ncall `ListSkus` again with the `page_token` field set to this\nvalue. This field is empty if there are no more results to retrieve.", + "description": "A token to retrieve the next page of results. To retrieve the next page, call `ListSkus` again with the `page_token` field set to this value. This field is empty if there are no more results to retrieve.", "type": "string" }, "skus": { @@ -756,12 +814,12 @@ "type": "string" }, "nanos": { - "description": "Number of nano (10^-9) units of the amount.\nThe value must be between -999,999,999 and +999,999,999 inclusive.\nIf `units` is positive, `nanos` must be positive or zero.\nIf `units` is zero, `nanos` can be positive, zero, or negative.\nIf `units` is negative, `nanos` must be negative or zero.\nFor example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.", + "description": "Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.", "format": "int32", "type": "integer" }, "units": { - "description": "The whole units of the amount.\nFor example if `currencyCode` is `\"USD\"`, then 1 unit is one US dollar.", + "description": "The whole units of the amount. For example if `currencyCode` is `\"USD\"`, then 1 unit is one US dollar.", "format": "int64", "type": "string" } @@ -769,7 +827,7 @@ "type": "object" }, "Policy": { - "description": "An Identity and Access Management (IAM) policy, which specifies access\ncontrols for Google Cloud resources.\n\n\nA `Policy` is a collection of `bindings`. A `binding` binds one or more\n`members` to a single `role`. Members can be user accounts, service accounts,\nGoogle groups, and domains (such as G Suite). A `role` is a named list of\npermissions; each `role` can be an IAM predefined role or a user-created\ncustom role.\n\nFor some types of Google Cloud resources, a `binding` can also specify a\n`condition`, which is a logical expression that allows access to a resource\nonly if the expression evaluates to `true`. A condition can add constraints\nbased on attributes of the request, the resource, or both. To learn which\nresources support conditions in their IAM policies, see the\n[IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).\n\n**JSON example:**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/resourcemanager.organizationAdmin\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-project-id@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles/resourcemanager.organizationViewer\",\n \"members\": [\n \"user:eve@example.com\"\n ],\n \"condition\": {\n \"title\": \"expirable access\",\n \"description\": \"Does not grant access after Sep 2020\",\n \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\",\n }\n }\n ],\n \"etag\": \"BwWWja0YfJA=\",\n \"version\": 3\n }\n\n**YAML example:**\n\n bindings:\n - members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-project-id@appspot.gserviceaccount.com\n role: roles/resourcemanager.organizationAdmin\n - members:\n - user:eve@example.com\n role: roles/resourcemanager.organizationViewer\n condition:\n title: expirable access\n description: Does not grant access after Sep 2020\n expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\n - etag: BwWWja0YfJA=\n - version: 3\n\nFor a description of IAM and its features, see the\n[IAM documentation](https://cloud.google.com/iam/docs/).", + "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } **YAML example:** bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", "id": "Policy", "properties": { "auditConfigs": { @@ -780,19 +838,19 @@ "type": "array" }, "bindings": { - "description": "Associates a list of `members` to a `role`. Optionally, may specify a\n`condition` that determines how and when the `bindings` are applied. Each\nof the `bindings` must contain at least one member.", + "description": "Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member.", "items": { "$ref": "Binding" }, "type": "array" }, "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.", + "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.", "format": "byte", "type": "string" }, "version": { - "description": "Specifies the format of the policy.\n\nValid values are `0`, `1`, and `3`. Requests that specify an invalid value\nare rejected.\n\nAny operation that affects conditional role bindings must specify version\n`3`. This requirement applies to the following operations:\n\n* Getting a policy that includes a conditional role binding\n* Adding a conditional role binding to a policy\n* Changing a conditional role binding in a policy\n* Removing any role binding, with or without a condition, from a policy\n that includes conditions\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.\n\nIf a policy does not include any conditions, operations on that policy may\nspecify any valid version or leave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } @@ -800,40 +858,40 @@ "type": "object" }, "PricingExpression": { - "description": "Expresses a mathematical pricing formula. For Example:-\n\n`usage_unit: GBy`\n`tiered_rates:`\n `[start_usage_amount: 20, unit_price: $10]`\n `[start_usage_amount: 100, unit_price: $5]`\n\nThe above expresses a pricing formula where the first 20GB is free, the\nnext 80GB is priced at $10 per GB followed by $5 per GB for additional\nusage.", + "description": "Expresses a mathematical pricing formula. For Example:- `usage_unit: GBy` `tiered_rates:` `[start_usage_amount: 20, unit_price: $10]` `[start_usage_amount: 100, unit_price: $5]` The above expresses a pricing formula where the first 20GB is free, the next 80GB is priced at $10 per GB followed by $5 per GB for additional usage.", "id": "PricingExpression", "properties": { "baseUnit": { - "description": "The base unit for the SKU which is the unit used in usage exports.\nExample: \"By\"", + "description": "The base unit for the SKU which is the unit used in usage exports. Example: \"By\"", "type": "string" }, "baseUnitConversionFactor": { - "description": "Conversion factor for converting from price per usage_unit to price per\nbase_unit, and start_usage_amount to start_usage_amount in base_unit.\nunit_price / base_unit_conversion_factor = price per base_unit.\nstart_usage_amount * base_unit_conversion_factor = start_usage_amount in\nbase_unit.", + "description": "Conversion factor for converting from price per usage_unit to price per base_unit, and start_usage_amount to start_usage_amount in base_unit. unit_price / base_unit_conversion_factor = price per base_unit. start_usage_amount * base_unit_conversion_factor = start_usage_amount in base_unit.", "format": "double", "type": "number" }, "baseUnitDescription": { - "description": "The base unit in human readable form.\nExample: \"byte\".", + "description": "The base unit in human readable form. Example: \"byte\".", "type": "string" }, "displayQuantity": { - "description": "The recommended quantity of units for displaying pricing info. When\ndisplaying pricing info it is recommended to display:\n(unit_price * display_quantity) per display_quantity usage_unit.\nThis field does not affect the pricing formula and is for display purposes\nonly.\nExample: If the unit_price is \"0.0001 USD\", the usage_unit is \"GB\" and\nthe display_quantity is \"1000\" then the recommended way of displaying the\npricing info is \"0.10 USD per 1000 GB\"", + "description": "The recommended quantity of units for displaying pricing info. When displaying pricing info it is recommended to display: (unit_price * display_quantity) per display_quantity usage_unit. This field does not affect the pricing formula and is for display purposes only. Example: If the unit_price is \"0.0001 USD\", the usage_unit is \"GB\" and the display_quantity is \"1000\" then the recommended way of displaying the pricing info is \"0.10 USD per 1000 GB\"", "format": "double", "type": "number" }, "tieredRates": { - "description": "The list of tiered rates for this pricing. The total cost is computed by\napplying each of the tiered rates on usage. This repeated list is sorted\nby ascending order of start_usage_amount.", + "description": "The list of tiered rates for this pricing. The total cost is computed by applying each of the tiered rates on usage. This repeated list is sorted by ascending order of start_usage_amount.", "items": { "$ref": "TierRate" }, "type": "array" }, "usageUnit": { - "description": "The short hand for unit of usage this pricing is specified in.\nExample: usage_unit of \"GiBy\" means that usage is specified in \"Gibi Byte\".", + "description": "The short hand for unit of usage this pricing is specified in. Example: usage_unit of \"GiBy\" means that usage is specified in \"Gibi Byte\".", "type": "string" }, "usageUnitDescription": { - "description": "The unit of usage in human readable form.\nExample: \"gibi byte\".", + "description": "The unit of usage in human readable form. Example: \"gibi byte\".", "type": "string" } }, @@ -845,15 +903,15 @@ "properties": { "aggregationInfo": { "$ref": "AggregationInfo", - "description": "Aggregation Info. This can be left unspecified if the pricing expression\ndoesn't require aggregation." + "description": "Aggregation Info. This can be left unspecified if the pricing expression doesn't require aggregation." }, "currencyConversionRate": { - "description": "Conversion rate used for currency conversion, from USD to the currency\nspecified in the request. This includes any surcharge collected for billing\nin non USD currency. If a currency is not specified in the request this\ndefaults to 1.0.\nExample: USD * currency_conversion_rate = JPY", + "description": "Conversion rate used for currency conversion, from USD to the currency specified in the request. This includes any surcharge collected for billing in non USD currency. If a currency is not specified in the request this defaults to 1.0. Example: USD * currency_conversion_rate = JPY", "format": "double", "type": "number" }, "effectiveTime": { - "description": "The timestamp from which this pricing was effective within the requested\ntime range. This is guaranteed to be greater than or equal to the\nstart_time field in the request and less than the end_time field in the\nrequest. If a time range was not specified in the request this field will\nbe equivalent to a time within the last 12 hours, indicating the latest\npricing info.", + "description": "The timestamp from which this pricing was effective within the requested time range. This is guaranteed to be greater than or equal to the start_time field in the request and less than the end_time field in the request. If a time range was not specified in the request this field will be equivalent to a time within the last 12 hours, indicating the latest pricing info.", "format": "google-datetime", "type": "string" }, @@ -862,30 +920,30 @@ "description": "Expresses the pricing formula. See `PricingExpression` for an example." }, "summary": { - "description": "An optional human readable summary of the pricing information, has a\nmaximum length of 256 characters.", + "description": "An optional human readable summary of the pricing information, has a maximum length of 256 characters.", "type": "string" } }, "type": "object" }, "ProjectBillingInfo": { - "description": "Encapsulation of billing information for a GCP Console project. A project\nhas at most one associated billing account at a time (but a billing account\ncan be assigned to multiple projects).", + "description": "Encapsulation of billing information for a Google Cloud Console project. A project has at most one associated billing account at a time (but a billing account can be assigned to multiple projects).", "id": "ProjectBillingInfo", "properties": { "billingAccountName": { - "description": "The resource name of the billing account associated with the project, if\nany. For example, `billingAccounts/012345-567890-ABCDEF`.", + "description": "The resource name of the billing account associated with the project, if any. For example, `billingAccounts/012345-567890-ABCDEF`.", "type": "string" }, "billingEnabled": { - "description": "True if the project is associated with an open billing account, to which\nusage on the project is charged. False if the project is associated with a\nclosed billing account, or no billing account at all, and therefore cannot\nuse paid services. This field is read-only.", + "description": "True if the project is associated with an open billing account, to which usage on the project is charged. False if the project is associated with a closed billing account, or no billing account at all, and therefore cannot use paid services. This field is read-only.", "type": "boolean" }, "name": { - "description": "The resource name for the `ProjectBillingInfo`; has the form\n`projects/{project_id}/billingInfo`. For example, the resource name for the\nbilling information for project `tokyo-rain-123` would be\n`projects/tokyo-rain-123/billingInfo`. This field is read-only.", + "description": "The resource name for the `ProjectBillingInfo`; has the form `projects/{project_id}/billingInfo`. For example, the resource name for the billing information for project `tokyo-rain-123` would be `projects/tokyo-rain-123/billingInfo`. This field is read-only.", "type": "string" }, "projectId": { - "description": "The ID of the project that this `ProjectBillingInfo` represents, such as\n`tokyo-rain-123`. This is a convenience field so that you don't need to\nparse the `name` field to obtain a project ID. This field is read-only.", + "description": "The ID of the project that this `ProjectBillingInfo` represents, such as `tokyo-rain-123`. This is a convenience field so that you don't need to parse the `name` field to obtain a project ID. This field is read-only.", "type": "string" } }, @@ -896,7 +954,7 @@ "id": "Service", "properties": { "businessEntityName": { - "description": "The business under which the service is offered.\nEx. \"businessEntities/GCP\", \"businessEntities/Maps\"", + "description": "The business under which the service is offered. Ex. \"businessEntities/GCP\", \"businessEntities/Maps\"", "type": "string" }, "displayName": { @@ -904,11 +962,11 @@ "type": "string" }, "name": { - "description": "The resource name for the service.\nExample: \"services/DA34-426B-A397\"", + "description": "The resource name for the service. Example: \"services/DA34-426B-A397\"", "type": "string" }, "serviceId": { - "description": "The identifier for the service.\nExample: \"DA34-426B-A397\"", + "description": "The identifier for the service. Example: \"DA34-426B-A397\"", "type": "string" } }, @@ -920,10 +978,10 @@ "properties": { "policy": { "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them." }, "updateMask": { - "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only\nthe fields in the mask will be modified. If no mask is provided, the\nfollowing default mask is used:\n\n`paths: \"bindings, etag\"`", + "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only the fields in the mask will be modified. If no mask is provided, the following default mask is used: `paths: \"bindings, etag\"`", "format": "google-fieldmask", "type": "string" } @@ -939,11 +997,15 @@ "description": "The category hierarchy of this SKU, purely for organizational purpose." }, "description": { - "description": "A human readable description of the SKU, has a maximum length of 256\ncharacters.", + "description": "A human readable description of the SKU, has a maximum length of 256 characters.", "type": "string" }, + "geoTaxonomy": { + "$ref": "GeoTaxonomy", + "description": "The geographic taxonomy for this sku." + }, "name": { - "description": "The resource name for the SKU.\nExample: \"services/DA34-426B-A397/skus/AA95-CD31-42FE\"", + "description": "The resource name for the SKU. Example: \"services/DA34-426B-A397/skus/AA95-CD31-42FE\"", "type": "string" }, "pricingInfo": { @@ -954,18 +1016,18 @@ "type": "array" }, "serviceProviderName": { - "description": "Identifies the service provider.\nThis is 'Google' for first party services in Google Cloud Platform.", + "description": "Identifies the service provider. This is 'Google' for first party services in Google Cloud Platform.", "type": "string" }, "serviceRegions": { - "description": "List of service regions this SKU is offered at.\nExample: \"asia-east1\"\nService regions can be found at https://cloud.google.com/about/locations/", + "description": "List of service regions this SKU is offered at. Example: \"asia-east1\" Service regions can be found at https://cloud.google.com/about/locations/", "items": { "type": "string" }, "type": "array" }, "skuId": { - "description": "The identifier for the SKU.\nExample: \"AA95-CD31-42FE\"", + "description": "The identifier for the SKU. Example: \"AA95-CD31-42FE\"", "type": "string" } }, @@ -976,7 +1038,7 @@ "id": "TestIamPermissionsRequest", "properties": { "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "description": "The set of permissions to check for the `resource`. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", "items": { "type": "string" }, @@ -990,7 +1052,7 @@ "id": "TestIamPermissionsResponse", "properties": { "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", "items": { "type": "string" }, @@ -1004,13 +1066,13 @@ "id": "TierRate", "properties": { "startUsageAmount": { - "description": "Usage is priced at this rate only after this amount.\nExample: start_usage_amount of 10 indicates that the usage will be priced\nat the unit_price after the first 10 usage_units.", + "description": "Usage is priced at this rate only after this amount. Example: start_usage_amount of 10 indicates that the usage will be priced at the unit_price after the first 10 usage_units.", "format": "double", "type": "number" }, "unitPrice": { "$ref": "Money", - "description": "The price per unit of usage.\nExample: unit_price of amount $10 indicates that each unit will cost $10." + "description": "The price per unit of usage. Example: unit_price of amount $10 indicates that each unit will cost $10." } }, "type": "object" diff --git a/vendor/google.golang.org/api/cloudbilling/v1/cloudbilling-gen.go b/vendor/google.golang.org/api/cloudbilling/v1/cloudbilling-gen.go index d485303edfe..8168661cfb6 100644 --- a/vendor/google.golang.org/api/cloudbilling/v1/cloudbilling-gen.go +++ b/vendor/google.golang.org/api/cloudbilling/v1/cloudbilling-gen.go @@ -23,6 +23,10 @@ // // Other authentication options // +// By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: +// +// cloudbillingService, err := cloudbilling.NewService(ctx, option.WithScopes(cloudbilling.CloudPlatformScope)) +// // To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: // // cloudbillingService, err := cloudbilling.NewService(ctx, option.WithAPIKey("AIza...")) @@ -75,9 +79,16 @@ const apiId = "cloudbilling:v1" const apiName = "cloudbilling" const apiVersion = "v1" const basePath = "https://cloudbilling.googleapis.com/" +const mtlsBasePath = "https://cloudbilling.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( + // View and manage your Google Cloud Platform billing accounts + CloudBillingScope = "https://www.googleapis.com/auth/cloud-billing" + + // View your Google Cloud Platform billing accounts + CloudBillingReadonlyScope = "https://www.googleapis.com/auth/cloud-billing.readonly" + // View and manage your data across Google Cloud Platform services CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" ) @@ -85,11 +96,14 @@ const ( // NewService creates a new APIService. func NewService(ctx context.Context, opts ...option.ClientOption) (*APIService, error) { scopesOption := option.WithScopes( + "https://www.googleapis.com/auth/cloud-billing", + "https://www.googleapis.com/auth/cloud-billing.readonly", "https://www.googleapis.com/auth/cloud-platform", ) // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -193,9 +207,8 @@ type ServicesSkusService struct { // AggregationInfo: Represents the aggregation level and interval for // pricing of a single SKU. type AggregationInfo struct { - // AggregationCount: The number of intervals to aggregate over. - // Example: If aggregation_level is "DAILY" and aggregation_count is - // 14, + // AggregationCount: The number of intervals to aggregate over. Example: + // If aggregation_level is "DAILY" and aggregation_count is 14, // aggregation will be over 14 days. AggregationCount int64 `json:"aggregationCount,omitempty"` @@ -235,72 +248,31 @@ func (s *AggregationInfo) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// AuditConfig: Specifies the audit configuration for a service. -// The configuration determines which permission types are logged, and -// what -// identities, if any, are exempted from logging. -// An AuditConfig must have one or more AuditLogConfigs. -// -// If there are AuditConfigs for both `allServices` and a specific -// service, -// the union of the two AuditConfigs is used for that service: the -// log_types -// specified in each AuditConfig are enabled, and the exempted_members -// in each -// AuditLogConfig are exempted. -// -// Example Policy with multiple AuditConfigs: -// -// { -// "audit_configs": [ -// { -// "service": "allServices" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// }, -// { -// "log_type": "ADMIN_READ", -// } -// ] -// }, -// { -// "service": "sampleservice.googleapis.com" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// }, -// { -// "log_type": "DATA_WRITE", -// "exempted_members": [ -// "user:aliya@example.com" -// ] -// } -// ] -// } -// ] -// } -// -// For sampleservice, this policy enables DATA_READ, DATA_WRITE and -// ADMIN_READ -// logging. It also exempts jose@example.com from DATA_READ logging, -// and -// aliya@example.com from DATA_WRITE logging. +// AuditConfig: Specifies the audit configuration for a service. The +// configuration determines which permission types are logged, and what +// identities, if any, are exempted from logging. An AuditConfig must +// have one or more AuditLogConfigs. If there are AuditConfigs for both +// `allServices` and a specific service, the union of the two +// AuditConfigs is used for that service: the log_types specified in +// each AuditConfig are enabled, and the exempted_members in each +// AuditLogConfig are exempted. Example Policy with multiple +// AuditConfigs: { "audit_configs": [ { "service": "allServices", +// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": +// [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { +// "log_type": "ADMIN_READ" } ] }, { "service": +// "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": +// "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ +// "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy +// enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts +// jose@example.com from DATA_READ logging, and aliya@example.com from +// DATA_WRITE logging. type AuditConfig struct { // AuditLogConfigs: The configuration for logging of each type of // permission. AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"` - // Service: Specifies a service that will be enabled for audit - // logging. - // For example, `storage.googleapis.com`, - // `cloudsql.googleapis.com`. + // Service: Specifies a service that will be enabled for audit logging. + // For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. // `allServices` is a special value that covers all services. Service string `json:"service,omitempty"` @@ -329,31 +301,15 @@ func (s *AuditConfig) MarshalJSON() ([]byte, error) { } // AuditLogConfig: Provides the configuration for logging a type of -// permissions. -// Example: -// -// { -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// } -// ] -// } -// -// This enables 'DATA_READ' and 'DATA_WRITE' logging, while -// exempting -// jose@example.com from DATA_READ logging. +// permissions. Example: { "audit_log_configs": [ { "log_type": +// "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { +// "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and +// 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ +// logging. type AuditLogConfig struct { // ExemptedMembers: Specifies the identities that do not cause logging - // for this type of - // permission. - // Follows the same format of Binding.members. + // for this type of permission. Follows the same format of + // Binding.members. ExemptedMembers []string `json:"exemptedMembers,omitempty"` // LogType: The log type that this config enables. @@ -389,40 +345,31 @@ func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// BillingAccount: A billing account in [GCP -// Console](https://console.cloud.google.com/). -// You can assign a billing account to one or more projects. +// BillingAccount: A billing account in the [Google Cloud +// Console](https://console.cloud.google.com/). You can assign a billing +// account to one or more projects. type BillingAccount struct { // DisplayName: The display name given to the billing account, such as - // `My Billing - // Account`. This name is displayed in the GCP Console. + // `My Billing Account`. This name is displayed in the Google Cloud + // Console. DisplayName string `json:"displayName,omitempty"` - // MasterBillingAccount: If this account is - // a + // MasterBillingAccount: If this account is a // [subaccount](https://cloud.google.com/billing/docs/concepts), then - // this - // will be the resource name of the master billing account that it is - // being - // resold through. - // Otherwise this will be empty. + // this will be the resource name of the master billing account that it + // is being resold through. Otherwise this will be empty. MasterBillingAccount string `json:"masterBillingAccount,omitempty"` - // Name: The resource name of the billing account. The resource name has - // the form - // `billingAccounts/{billing_account_id}`. For - // example, - // `billingAccounts/012345-567890-ABCDEF` would be the resource name - // for - // billing account `012345-567890-ABCDEF`. + // Name: Output only. The resource name of the billing account. The + // resource name has the form `billingAccounts/{billing_account_id}`. + // For example, `billingAccounts/012345-567890-ABCDEF` would be the + // resource name for billing account `012345-567890-ABCDEF`. Name string `json:"name,omitempty"` // Open: Output only. True if the billing account is open, and will - // therefore be charged for any - // usage on associated projects. False if the billing account is closed, - // and - // therefore projects associated with it will be unable to use paid - // services. + // therefore be charged for any usage on associated projects. False if + // the billing account is closed, and therefore projects associated with + // it will be unable to use paid services. Open bool `json:"open,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -454,95 +401,53 @@ func (s *BillingAccount) MarshalJSON() ([]byte, error) { // Binding: Associates `members` with a `role`. type Binding struct { - // Condition: The condition that is associated with this binding. - // - // If the condition evaluates to `true`, then this binding applies to - // the - // current request. - // - // If the condition evaluates to `false`, then this binding does not - // apply to - // the current request. However, a different role binding might grant - // the same - // role to one or more of the members in this binding. - // - // To learn which resources support conditions in their IAM policies, - // see - // the - // [IAM - // documentation](https://cloud.google.com/iam/help/conditions/r - // esource-policies). + // Condition: The condition that is associated with this binding. If the + // condition evaluates to `true`, then this binding applies to the + // current request. If the condition evaluates to `false`, then this + // binding does not apply to the current request. However, a different + // role binding might grant the same role to one or more of the members + // in this binding. To learn which resources support conditions in their + // IAM policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). Condition *Expr `json:"condition,omitempty"` // Members: Specifies the identities requesting access for a Cloud - // Platform resource. - // `members` can have the following values: - // - // * `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. - // - // * `allAuthenticatedUsers`: A special identifier that represents - // anyone - // who is authenticated with a Google account or a service - // account. - // - // * `user:{emailid}`: An email address that represents a specific - // Google - // account. For example, `alice@example.com` . - // - // - // * `serviceAccount:{emailid}`: An email address that represents a - // service - // account. For example, - // `my-other-app@appspot.gserviceaccount.com`. - // - // * `group:{emailid}`: An email address that represents a Google - // group. - // For example, `admins@example.com`. - // - // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a user that has been recently deleted. - // For - // example, `alice@example.com?uid=123456789012345678901`. If the - // user is - // recovered, this value reverts to `user:{emailid}` and the - // recovered user - // retains the role in the binding. - // - // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address - // (plus - // unique identifier) representing a service account that has been - // recently - // deleted. For example, - // + // Platform resource. `members` can have the following values: * + // `allUsers`: A special identifier that represents anyone who is on the + // internet; with or without a Google account. * + // `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. * + // `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . * + // `serviceAccount:{emailid}`: An email address that represents a + // service account. For example, + // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An + // email address that represents a Google group. For example, + // `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An + // email address (plus unique identifier) representing a user that has + // been recently deleted. For example, + // `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered + // user retains the role in the binding. * + // `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address + // (plus unique identifier) representing a service account that has been + // recently deleted. For example, // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account - // retains the - // role in the binding. - // - // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a Google group that has been recently - // deleted. For example, - // `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` - // and the - // recovered group retains the role in the binding. - // - // - // * `domain:{domain}`: The G Suite domain (primary) that represents all - // the - // users of that domain. For example, `google.com` or - // `example.com`. - // - // + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains + // the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: + // An email address (plus unique identifier) representing a Google group + // that has been recently deleted. For example, + // `admins@example.com?uid=123456789012345678901`. If the group is + // recovered, this value reverts to `group:{emailid}` and the recovered + // group retains the role in the binding. * `domain:{domain}`: The G + // Suite domain (primary) that represents all the users of that domain. + // For example, `google.com` or `example.com`. Members []string `json:"members,omitempty"` - // Role: Role that is assigned to `members`. - // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + // Role: Role that is assigned to `members`. For example, + // `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `json:"role,omitempty"` // ForceSendFields is a list of field names (e.g. "Condition") to @@ -570,20 +475,20 @@ func (s *Binding) MarshalJSON() ([]byte, error) { // Category: Represents the category hierarchy of a SKU. type Category struct { - // ResourceFamily: The type of product the SKU refers to. - // Example: "Compute", "Storage", "Network", "ApplicationServices" etc. + // ResourceFamily: The type of product the SKU refers to. Example: + // "Compute", "Storage", "Network", "ApplicationServices" etc. ResourceFamily string `json:"resourceFamily,omitempty"` - // ResourceGroup: A group classification for related SKUs. - // Example: "RAM", "GPU", "Prediction", "Ops", "GoogleEgress" etc. + // ResourceGroup: A group classification for related SKUs. Example: + // "RAM", "GPU", "Prediction", "Ops", "GoogleEgress" etc. ResourceGroup string `json:"resourceGroup,omitempty"` // ServiceDisplayName: The display name of the service this SKU belongs // to. ServiceDisplayName string `json:"serviceDisplayName,omitempty"` - // UsageType: Represents how the SKU is consumed. - // Example: "OnDemand", "Preemptible", "Commit1Mo", "Commit1Yr" etc. + // UsageType: Represents how the SKU is consumed. Example: "OnDemand", + // "Preemptible", "Commit1Mo", "Commit1Yr" etc. UsageType string `json:"usageType,omitempty"` // ForceSendFields is a list of field names (e.g. "ResourceFamily") to @@ -611,65 +516,40 @@ func (s *Category) MarshalJSON() ([]byte, error) { } // Expr: Represents a textual expression in the Common Expression -// Language (CEL) -// syntax. CEL is a C-like expression language. The syntax and semantics -// of CEL -// are documented at https://github.com/google/cel-spec. -// -// Example (Comparison): -// -// title: "Summary size limit" -// description: "Determines if a summary is less than 100 chars" -// expression: "document.summary.size() < 100" -// -// Example (Equality): -// -// title: "Requestor is owner" -// description: "Determines if requestor is the document owner" -// expression: "document.owner == -// request.auth.claims.email" -// -// Example (Logic): -// -// title: "Public documents" -// description: "Determine whether the document should be publicly -// visible" -// expression: "document.type != 'private' && document.type != -// 'internal'" -// -// Example (Data Manipulation): -// -// title: "Notification string" -// description: "Create a notification string with a timestamp." -// expression: "'New message received at ' + -// string(document.create_time)" -// -// The exact variables and functions that may be referenced within an -// expression -// are determined by the service that evaluates it. See the -// service -// documentation for additional information. +// Language (CEL) syntax. CEL is a C-like expression language. The +// syntax and semantics of CEL are documented at +// https://github.com/google/cel-spec. Example (Comparison): title: +// "Summary size limit" description: "Determines if a summary is less +// than 100 chars" expression: "document.summary.size() < 100" Example +// (Equality): title: "Requestor is owner" description: "Determines if +// requestor is the document owner" expression: "document.owner == +// request.auth.claims.email" Example (Logic): title: "Public documents" +// description: "Determine whether the document should be publicly +// visible" expression: "document.type != 'private' && document.type != +// 'internal'" Example (Data Manipulation): title: "Notification string" +// description: "Create a notification string with a timestamp." +// expression: "'New message received at ' + +// string(document.create_time)" The exact variables and functions that +// may be referenced within an expression are determined by the service +// that evaluates it. See the service documentation for additional +// information. type Expr struct { // Description: Optional. Description of the expression. This is a - // longer text which - // describes the expression, e.g. when hovered over it in a UI. + // longer text which describes the expression, e.g. when hovered over it + // in a UI. Description string `json:"description,omitempty"` // Expression: Textual representation of an expression in Common - // Expression Language - // syntax. + // Expression Language syntax. Expression string `json:"expression,omitempty"` // Location: Optional. String indicating the location of the expression - // for error - // reporting, e.g. a file name and a position in the file. + // for error reporting, e.g. a file name and a position in the file. Location string `json:"location,omitempty"` // Title: Optional. Title for the expression, i.e. a short string - // describing - // its purpose. This can be used e.g. in UIs which allow to enter - // the - // expression. + // describing its purpose. This can be used e.g. in UIs which allow to + // enter the expression. Title string `json:"title,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -695,6 +575,47 @@ func (s *Expr) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// GeoTaxonomy: Encapsulates the geographic taxonomy data for a sku. +type GeoTaxonomy struct { + // Regions: The list of regions associated with a sku. Empty for Global + // skus, which are associated with all Google Cloud regions. + Regions []string `json:"regions,omitempty"` + + // Type: The type of Geo Taxonomy: GLOBAL, REGIONAL, or MULTI_REGIONAL. + // + // Possible values: + // "TYPE_UNSPECIFIED" - The type is not specified. + // "GLOBAL" - The sku is global in nature, e.g. a license sku. Global + // skus are available in all regions, and so have an empty region list. + // "REGIONAL" - The sku is available in a specific region, e.g. + // "us-west2". + // "MULTI_REGIONAL" - The sku is associated with multiple regions, + // e.g. "us-west2" and "us-east1". + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Regions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Regions") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GeoTaxonomy) MarshalJSON() ([]byte, error) { + type NoMethod GeoTaxonomy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ListBillingAccountsResponse: Response message for // `ListBillingAccounts`. type ListBillingAccountsResponse struct { @@ -702,10 +623,9 @@ type ListBillingAccountsResponse struct { BillingAccounts []*BillingAccount `json:"billingAccounts,omitempty"` // NextPageToken: A token to retrieve the next page of results. To - // retrieve the next page, - // call `ListBillingAccounts` again with the `page_token` field set to - // this - // value. This field is empty if there are no more results to retrieve. + // retrieve the next page, call `ListBillingAccounts` again with the + // `page_token` field set to this value. This field is empty if there + // are no more results to retrieve. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -740,15 +660,13 @@ func (s *ListBillingAccountsResponse) MarshalJSON() ([]byte, error) { // `ListProjectBillingInfoResponse`. type ListProjectBillingInfoResponse struct { // NextPageToken: A token to retrieve the next page of results. To - // retrieve the next page, - // call `ListProjectBillingInfo` again with the `page_token` field set - // to this - // value. This field is empty if there are no more results to retrieve. + // retrieve the next page, call `ListProjectBillingInfo` again with the + // `page_token` field set to this value. This field is empty if there + // are no more results to retrieve. NextPageToken string `json:"nextPageToken,omitempty"` // ProjectBillingInfo: A list of `ProjectBillingInfo` resources - // representing the projects - // associated with the billing account. + // representing the projects associated with the billing account. ProjectBillingInfo []*ProjectBillingInfo `json:"projectBillingInfo,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -781,10 +699,9 @@ func (s *ListProjectBillingInfoResponse) MarshalJSON() ([]byte, error) { // ListServicesResponse: Response message for `ListServices`. type ListServicesResponse struct { // NextPageToken: A token to retrieve the next page of results. To - // retrieve the next page, - // call `ListServices` again with the `page_token` field set to - // this - // value. This field is empty if there are no more results to retrieve. + // retrieve the next page, call `ListServices` again with the + // `page_token` field set to this value. This field is empty if there + // are no more results to retrieve. NextPageToken string `json:"nextPageToken,omitempty"` // Services: A list of services. @@ -820,9 +737,9 @@ func (s *ListServicesResponse) MarshalJSON() ([]byte, error) { // ListSkusResponse: Response message for `ListSkus`. type ListSkusResponse struct { // NextPageToken: A token to retrieve the next page of results. To - // retrieve the next page, - // call `ListSkus` again with the `page_token` field set to this - // value. This field is empty if there are no more results to retrieve. + // retrieve the next page, call `ListSkus` again with the `page_token` + // field set to this value. This field is empty if there are no more + // results to retrieve. NextPageToken string `json:"nextPageToken,omitempty"` // Skus: The list of public SKUs of the given service. @@ -860,18 +777,16 @@ type Money struct { // CurrencyCode: The 3-letter currency code defined in ISO 4217. CurrencyCode string `json:"currencyCode,omitempty"` - // Nanos: Number of nano (10^-9) units of the amount. - // The value must be between -999,999,999 and +999,999,999 inclusive. - // If `units` is positive, `nanos` must be positive or zero. - // If `units` is zero, `nanos` can be positive, zero, or negative. - // If `units` is negative, `nanos` must be negative or zero. - // For example $-1.75 is represented as `units`=-1 and - // `nanos`=-750,000,000. + // Nanos: Number of nano (10^-9) units of the amount. The value must be + // between -999,999,999 and +999,999,999 inclusive. If `units` is + // positive, `nanos` must be positive or zero. If `units` is zero, + // `nanos` can be positive, zero, or negative. If `units` is negative, + // `nanos` must be negative or zero. For example $-1.75 is represented + // as `units`=-1 and `nanos`=-750,000,000. Nanos int64 `json:"nanos,omitempty"` - // Units: The whole units of the amount. - // For example if `currencyCode` is "USD", then 1 unit is one US - // dollar. + // Units: The whole units of the amount. For example if `currencyCode` + // is "USD", then 1 unit is one US dollar. Units int64 `json:"units,omitempty,string"` // ForceSendFields is a list of field names (e.g. "CurrencyCode") to @@ -898,154 +813,77 @@ func (s *Money) MarshalJSON() ([]byte, error) { } // Policy: An Identity and Access Management (IAM) policy, which -// specifies access -// controls for Google Cloud resources. -// -// -// A `Policy` is a collection of `bindings`. A `binding` binds one or -// more -// `members` to a single `role`. Members can be user accounts, service -// accounts, +// specifies access controls for Google Cloud resources. A `Policy` is a +// collection of `bindings`. A `binding` binds one or more `members` to +// a single `role`. Members can be user accounts, service accounts, // Google groups, and domains (such as G Suite). A `role` is a named -// list of -// permissions; each `role` can be an IAM predefined role or a -// user-created -// custom role. -// -// For some types of Google Cloud resources, a `binding` can also -// specify a -// `condition`, which is a logical expression that allows access to a -// resource -// only if the expression evaluates to `true`. A condition can add -// constraints -// based on attributes of the request, the resource, or both. To learn -// which -// resources support conditions in their IAM policies, see the -// [IAM +// list of permissions; each `role` can be an IAM predefined role or a +// user-created custom role. For some types of Google Cloud resources, a +// `binding` can also specify a `condition`, which is a logical +// expression that allows access to a resource only if the expression +// evaluates to `true`. A condition can add constraints based on +// attributes of the request, the resource, or both. To learn which +// resources support conditions in their IAM policies, see the [IAM // documentation](https://cloud.google.com/iam/help/conditions/resource-p -// olicies). -// -// **JSON example:** -// -// { -// "bindings": [ -// { -// "role": "roles/resourcemanager.organizationAdmin", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" -// ] -// }, -// { -// "role": "roles/resourcemanager.organizationViewer", -// "members": [ -// "user:eve@example.com" -// ], -// "condition": { -// "title": "expirable access", -// "description": "Does not grant access after Sep 2020", -// "expression": "request.time < -// timestamp('2020-10-01T00:00:00.000Z')", -// } -// } -// ], -// "etag": "BwWWja0YfJA=", -// "version": 3 -// } -// -// **YAML example:** -// -// bindings: -// - members: -// - user:mike@example.com -// - group:admins@example.com -// - domain:google.com -// - serviceAccount:my-project-id@appspot.gserviceaccount.com -// role: roles/resourcemanager.organizationAdmin -// - members: -// - user:eve@example.com -// role: roles/resourcemanager.organizationViewer -// condition: -// title: expirable access -// description: Does not grant access after Sep 2020 -// expression: request.time < -// timestamp('2020-10-01T00:00:00.000Z') -// - etag: BwWWja0YfJA= -// - version: 3 -// -// For a description of IAM and its features, see the -// [IAM documentation](https://cloud.google.com/iam/docs/). +// olicies). **JSON example:** { "bindings": [ { "role": +// "roles/resourcemanager.organizationAdmin", "members": [ +// "user:mike@example.com", "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { +// "role": "roles/resourcemanager.organizationViewer", "members": [ +// "user:eve@example.com" ], "condition": { "title": "expirable access", +// "description": "Does not grant access after Sep 2020", "expression": +// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], +// "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - +// members: - user:mike@example.com - group:admins@example.com - +// domain:google.com - +// serviceAccount:my-project-id@appspot.gserviceaccount.com role: +// roles/resourcemanager.organizationAdmin - members: - +// user:eve@example.com role: roles/resourcemanager.organizationViewer +// condition: title: expirable access description: Does not grant access +// after Sep 2020 expression: request.time < +// timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: +// 3 For a description of IAM and its features, see the [IAM +// documentation](https://cloud.google.com/iam/docs/). type Policy struct { // AuditConfigs: Specifies cloud audit logging configuration for this // policy. AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` // Bindings: Associates a list of `members` to a `role`. Optionally, may - // specify a - // `condition` that determines how and when the `bindings` are applied. - // Each - // of the `bindings` must contain at least one member. + // specify a `condition` that determines how and when the `bindings` are + // applied. Each of the `bindings` must contain at least one member. Bindings []*Binding `json:"bindings,omitempty"` // Etag: `etag` is used for optimistic concurrency control as a way to - // help - // prevent simultaneous updates of a policy from overwriting each - // other. - // It is strongly suggested that systems make use of the `etag` in - // the - // read-modify-write cycle to perform policy updates in order to avoid - // race - // conditions: An `etag` is returned in the response to `getIamPolicy`, - // and - // systems are expected to put that etag in the request to - // `setIamPolicy` to - // ensure that their change will be applied to the same version of the - // policy. - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of + // help prevent simultaneous updates of a policy from overwriting each + // other. It is strongly suggested that systems make use of the `etag` + // in the read-modify-write cycle to perform policy updates in order to + // avoid race conditions: An `etag` is returned in the response to + // `getIamPolicy`, and systems are expected to put that etag in the + // request to `setIamPolicy` to ensure that their change will be applied + // to the same version of the policy. **Important:** If you use IAM + // Conditions, you must include the `etag` field whenever you call + // `setIamPolicy`. If you omit this field, then IAM allows you to + // overwrite a version `3` policy with a version `1` policy, and all of // the conditions in the version `3` policy are lost. Etag string `json:"etag,omitempty"` - // Version: Specifies the format of the policy. - // - // Valid values are `0`, `1`, and `3`. Requests that specify an invalid - // value - // are rejected. - // + // Version: Specifies the format of the policy. Valid values are `0`, + // `1`, and `3`. Requests that specify an invalid value are rejected. // Any operation that affects conditional role bindings must specify - // version - // `3`. This requirement applies to the following operations: - // - // * Getting a policy that includes a conditional role binding - // * Adding a conditional role binding to a policy - // * Changing a conditional role binding in a policy - // * Removing any role binding, with or without a condition, from a - // policy - // that includes conditions - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of - // the conditions in the version `3` policy are lost. - // - // If a policy does not include any conditions, operations on that - // policy may - // specify any valid version or leave the field unset. - // - // To learn which resources support conditions in their IAM policies, - // see the - // [IAM + // version `3`. This requirement applies to the following operations: * + // Getting a policy that includes a conditional role binding * Adding a + // conditional role binding to a policy * Changing a conditional role + // binding in a policy * Removing any role binding, with or without a + // condition, from a policy that includes conditions **Important:** If + // you use IAM Conditions, you must include the `etag` field whenever + // you call `setIamPolicy`. If you omit this field, then IAM allows you + // to overwrite a version `3` policy with a version `1` policy, and all + // of the conditions in the version `3` policy are lost. If a policy + // does not include any conditions, operations on that policy may + // specify any valid version or leave the field unset. To learn which + // resources support conditions in their IAM policies, see the [IAM // documentation](https://cloud.google.com/iam/help/conditions/resource-p // olicies). Version int64 `json:"version,omitempty"` @@ -1078,68 +916,48 @@ func (s *Policy) MarshalJSON() ([]byte, error) { } // PricingExpression: Expresses a mathematical pricing formula. For -// Example:- -// -// `usage_unit: GBy` -// `tiered_rates:` -// `[start_usage_amount: 20, unit_price: $10]` -// `[start_usage_amount: 100, unit_price: $5]` -// -// The above expresses a pricing formula where the first 20GB is free, -// the +// Example:- `usage_unit: GBy` `tiered_rates:` `[start_usage_amount: 20, +// unit_price: $10]` `[start_usage_amount: 100, unit_price: $5]` The +// above expresses a pricing formula where the first 20GB is free, the // next 80GB is priced at $10 per GB followed by $5 per GB for -// additional -// usage. +// additional usage. type PricingExpression struct { // BaseUnit: The base unit for the SKU which is the unit used in usage - // exports. - // Example: "By" + // exports. Example: "By" BaseUnit string `json:"baseUnit,omitempty"` // BaseUnitConversionFactor: Conversion factor for converting from price - // per usage_unit to price per - // base_unit, and start_usage_amount to start_usage_amount in - // base_unit. - // unit_price / base_unit_conversion_factor = price per - // base_unit. - // start_usage_amount * base_unit_conversion_factor = start_usage_amount - // in - // base_unit. + // per usage_unit to price per base_unit, and start_usage_amount to + // start_usage_amount in base_unit. unit_price / + // base_unit_conversion_factor = price per base_unit. start_usage_amount + // * base_unit_conversion_factor = start_usage_amount in base_unit. BaseUnitConversionFactor float64 `json:"baseUnitConversionFactor,omitempty"` - // BaseUnitDescription: The base unit in human readable form. - // Example: "byte". + // BaseUnitDescription: The base unit in human readable form. Example: + // "byte". BaseUnitDescription string `json:"baseUnitDescription,omitempty"` // DisplayQuantity: The recommended quantity of units for displaying - // pricing info. When - // displaying pricing info it is recommended to display: - // (unit_price * display_quantity) per display_quantity usage_unit. - // This field does not affect the pricing formula and is for display - // purposes - // only. - // Example: If the unit_price is "0.0001 USD", the usage_unit is "GB" - // and - // the display_quantity is "1000" then the recommended way of displaying - // the - // pricing info is "0.10 USD per 1000 GB" + // pricing info. When displaying pricing info it is recommended to + // display: (unit_price * display_quantity) per display_quantity + // usage_unit. This field does not affect the pricing formula and is for + // display purposes only. Example: If the unit_price is "0.0001 USD", + // the usage_unit is "GB" and the display_quantity is "1000" then the + // recommended way of displaying the pricing info is "0.10 USD per 1000 + // GB" DisplayQuantity float64 `json:"displayQuantity,omitempty"` // TieredRates: The list of tiered rates for this pricing. The total - // cost is computed by - // applying each of the tiered rates on usage. This repeated list is - // sorted - // by ascending order of start_usage_amount. + // cost is computed by applying each of the tiered rates on usage. This + // repeated list is sorted by ascending order of start_usage_amount. TieredRates []*TierRate `json:"tieredRates,omitempty"` // UsageUnit: The short hand for unit of usage this pricing is specified - // in. - // Example: usage_unit of "GiBy" means that usage is specified in "Gibi - // Byte". + // in. Example: usage_unit of "GiBy" means that usage is specified in + // "Gibi Byte". UsageUnit string `json:"usageUnit,omitempty"` - // UsageUnitDescription: The unit of usage in human readable - // form. + // UsageUnitDescription: The unit of usage in human readable form. // Example: "gibi byte". UsageUnitDescription string `json:"usageUnitDescription,omitempty"` @@ -1186,31 +1004,22 @@ func (s *PricingExpression) UnmarshalJSON(data []byte) error { // point of time. type PricingInfo struct { // AggregationInfo: Aggregation Info. This can be left unspecified if - // the pricing expression - // doesn't require aggregation. + // the pricing expression doesn't require aggregation. AggregationInfo *AggregationInfo `json:"aggregationInfo,omitempty"` // CurrencyConversionRate: Conversion rate used for currency conversion, - // from USD to the currency - // specified in the request. This includes any surcharge collected for - // billing - // in non USD currency. If a currency is not specified in the request - // this - // defaults to 1.0. - // Example: USD * currency_conversion_rate = JPY + // from USD to the currency specified in the request. This includes any + // surcharge collected for billing in non USD currency. If a currency is + // not specified in the request this defaults to 1.0. Example: USD * + // currency_conversion_rate = JPY CurrencyConversionRate float64 `json:"currencyConversionRate,omitempty"` // EffectiveTime: The timestamp from which this pricing was effective - // within the requested - // time range. This is guaranteed to be greater than or equal to - // the - // start_time field in the request and less than the end_time field in - // the - // request. If a time range was not specified in the request this field - // will - // be equivalent to a time within the last 12 hours, indicating the - // latest - // pricing info. + // within the requested time range. This is guaranteed to be greater + // than or equal to the start_time field in the request and less than + // the end_time field in the request. If a time range was not specified + // in the request this field will be equivalent to a time within the + // last 12 hours, indicating the latest pricing info. EffectiveTime string `json:"effectiveTime,omitempty"` // PricingExpression: Expresses the pricing formula. See @@ -1218,8 +1027,7 @@ type PricingInfo struct { PricingExpression *PricingExpression `json:"pricingExpression,omitempty"` // Summary: An optional human readable summary of the pricing - // information, has a - // maximum length of 256 characters. + // information, has a maximum length of 256 characters. Summary string `json:"summary,omitempty"` // ForceSendFields is a list of field names (e.g. "AggregationInfo") to @@ -1260,41 +1068,33 @@ func (s *PricingInfo) UnmarshalJSON(data []byte) error { return nil } -// ProjectBillingInfo: Encapsulation of billing information for a GCP -// Console project. A project -// has at most one associated billing account at a time (but a billing -// account -// can be assigned to multiple projects). +// ProjectBillingInfo: Encapsulation of billing information for a Google +// Cloud Console project. A project has at most one associated billing +// account at a time (but a billing account can be assigned to multiple +// projects). type ProjectBillingInfo struct { // BillingAccountName: The resource name of the billing account - // associated with the project, if - // any. For example, `billingAccounts/012345-567890-ABCDEF`. + // associated with the project, if any. For example, + // `billingAccounts/012345-567890-ABCDEF`. BillingAccountName string `json:"billingAccountName,omitempty"` // BillingEnabled: True if the project is associated with an open - // billing account, to which - // usage on the project is charged. False if the project is associated - // with a - // closed billing account, or no billing account at all, and therefore - // cannot - // use paid services. This field is read-only. + // billing account, to which usage on the project is charged. False if + // the project is associated with a closed billing account, or no + // billing account at all, and therefore cannot use paid services. This + // field is read-only. BillingEnabled bool `json:"billingEnabled,omitempty"` - // Name: The resource name for the `ProjectBillingInfo`; has the - // form + // Name: The resource name for the `ProjectBillingInfo`; has the form // `projects/{project_id}/billingInfo`. For example, the resource name - // for the - // billing information for project `tokyo-rain-123` would - // be + // for the billing information for project `tokyo-rain-123` would be // `projects/tokyo-rain-123/billingInfo`. This field is read-only. Name string `json:"name,omitempty"` // ProjectId: The ID of the project that this `ProjectBillingInfo` - // represents, such as - // `tokyo-rain-123`. This is a convenience field so that you don't need - // to - // parse the `name` field to obtain a project ID. This field is - // read-only. + // represents, such as `tokyo-rain-123`. This is a convenience field so + // that you don't need to parse the `name` field to obtain a project ID. + // This field is read-only. ProjectId string `json:"projectId,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1327,20 +1127,18 @@ func (s *ProjectBillingInfo) MarshalJSON() ([]byte, error) { // Service: Encapsulates a single service in Google Cloud Platform. type Service struct { - // BusinessEntityName: The business under which the service is - // offered. + // BusinessEntityName: The business under which the service is offered. // Ex. "businessEntities/GCP", "businessEntities/Maps" BusinessEntityName string `json:"businessEntityName,omitempty"` // DisplayName: A human readable display name for this service. DisplayName string `json:"displayName,omitempty"` - // Name: The resource name for the service. - // Example: "services/DA34-426B-A397" + // Name: The resource name for the service. Example: + // "services/DA34-426B-A397" Name string `json:"name,omitempty"` - // ServiceId: The identifier for the service. - // Example: "DA34-426B-A397" + // ServiceId: The identifier for the service. Example: "DA34-426B-A397" ServiceId string `json:"serviceId,omitempty"` // ForceSendFields is a list of field names (e.g. "BusinessEntityName") @@ -1370,20 +1168,15 @@ func (s *Service) MarshalJSON() ([]byte, error) { // SetIamPolicyRequest: Request message for `SetIamPolicy` method. type SetIamPolicyRequest struct { // Policy: REQUIRED: The complete policy to be applied to the - // `resource`. The size of - // the policy is limited to a few 10s of KB. An empty policy is a - // valid policy but certain Cloud Platform services (such as - // Projects) - // might reject them. + // `resource`. The size of the policy is limited to a few 10s of KB. An + // empty policy is a valid policy but certain Cloud Platform services + // (such as Projects) might reject them. Policy *Policy `json:"policy,omitempty"` // UpdateMask: OPTIONAL: A FieldMask specifying which fields of the - // policy to modify. Only - // the fields in the mask will be modified. If no mask is provided, - // the - // following default mask is used: - // - // `paths: "bindings, etag" + // policy to modify. Only the fields in the mask will be modified. If no + // mask is provided, the following default mask is used: `paths: + // "bindings, etag" UpdateMask string `json:"updateMask,omitempty"` // ForceSendFields is a list of field names (e.g. "Policy") to @@ -1416,31 +1209,30 @@ type Sku struct { Category *Category `json:"category,omitempty"` // Description: A human readable description of the SKU, has a maximum - // length of 256 - // characters. + // length of 256 characters. Description string `json:"description,omitempty"` - // Name: The resource name for the SKU. - // Example: "services/DA34-426B-A397/skus/AA95-CD31-42FE" + // GeoTaxonomy: The geographic taxonomy for this sku. + GeoTaxonomy *GeoTaxonomy `json:"geoTaxonomy,omitempty"` + + // Name: The resource name for the SKU. Example: + // "services/DA34-426B-A397/skus/AA95-CD31-42FE" Name string `json:"name,omitempty"` // PricingInfo: A timeline of pricing info for this SKU in chronological // order. PricingInfo []*PricingInfo `json:"pricingInfo,omitempty"` - // ServiceProviderName: Identifies the service provider. - // This is 'Google' for first party services in Google Cloud Platform. + // ServiceProviderName: Identifies the service provider. This is + // 'Google' for first party services in Google Cloud Platform. ServiceProviderName string `json:"serviceProviderName,omitempty"` - // ServiceRegions: List of service regions this SKU is offered - // at. - // Example: "asia-east1" - // Service regions can be found at + // ServiceRegions: List of service regions this SKU is offered at. + // Example: "asia-east1" Service regions can be found at // https://cloud.google.com/about/locations/ ServiceRegions []string `json:"serviceRegions,omitempty"` - // SkuId: The identifier for the SKU. - // Example: "AA95-CD31-42FE" + // SkuId: The identifier for the SKU. Example: "AA95-CD31-42FE" SkuId string `json:"skuId,omitempty"` // ForceSendFields is a list of field names (e.g. "Category") to @@ -1470,11 +1262,8 @@ func (s *Sku) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsRequest struct { // Permissions: The set of permissions to check for the `resource`. - // Permissions with - // wildcards (such as '*' or 'storage.*') are not allowed. For - // more - // information see - // [IAM + // Permissions with wildcards (such as '*' or 'storage.*') are not + // allowed. For more information see [IAM // Overview](https://cloud.google.com/iam/docs/overview#permissions). Permissions []string `json:"permissions,omitempty"` @@ -1505,8 +1294,7 @@ func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsResponse struct { // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is - // allowed. + // the caller is allowed. Permissions []string `json:"permissions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1540,15 +1328,12 @@ func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { // corresponding price. type TierRate struct { // StartUsageAmount: Usage is priced at this rate only after this - // amount. - // Example: start_usage_amount of 10 indicates that the usage will be - // priced - // at the unit_price after the first 10 usage_units. + // amount. Example: start_usage_amount of 10 indicates that the usage + // will be priced at the unit_price after the first 10 usage_units. StartUsageAmount float64 `json:"startUsageAmount,omitempty"` - // UnitPrice: The price per unit of usage. - // Example: unit_price of amount $10 indicates that each unit will cost - // $10. + // UnitPrice: The price per unit of usage. Example: unit_price of amount + // $10 indicates that each unit will cost $10. UnitPrice *Money `json:"unitPrice,omitempty"` // ForceSendFields is a list of field names (e.g. "StartUsageAmount") to @@ -1599,22 +1384,16 @@ type BillingAccountsCreateCall struct { header_ http.Header } -// Create: Creates a billing account. -// This method can only be used to create -// [billing -// subaccounts](https://cloud.google.com/billing/docs/concepts) -// by GCP resellers. -// When creating a subaccount, the current authenticated user must have -// the -// `billing.accounts.update` IAM permission on the master account, which -// is -// typically given to billing +// Create: Creates a billing account. This method can only be used to +// create [billing +// subaccounts](https://cloud.google.com/billing/docs/concepts) by +// Google Cloud resellers. When creating a subaccount, the current +// authenticated user must have the `billing.accounts.update` IAM +// permission on the master account, which is typically given to billing // account -// [administrators](https://cloud.google.com/billing/docs/how-to/ -// billing-access). -// This method will return an error if the master account has not -// been -// provisioned as a reseller account. +// [administrators](https://cloud.google.com/billing/docs/how-to/billing- +// access). This method will return an error if the master account has +// not been provisioned as a reseller account. func (r *BillingAccountsService) Create(billingaccount *BillingAccount) *BillingAccountsCreateCall { c := &BillingAccountsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.billingaccount = billingaccount @@ -1648,7 +1427,7 @@ func (c *BillingAccountsCreateCall) Header() http.Header { func (c *BillingAccountsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1709,7 +1488,7 @@ func (c *BillingAccountsCreateCall) Do(opts ...googleapi.CallOption) (*BillingAc } return ret, nil // { - // "description": "Creates a billing account.\nThis method can only be used to create\n[billing subaccounts](https://cloud.google.com/billing/docs/concepts)\nby GCP resellers.\nWhen creating a subaccount, the current authenticated user must have the\n`billing.accounts.update` IAM permission on the master account, which is\ntypically given to billing account\n[administrators](https://cloud.google.com/billing/docs/how-to/billing-access).\nThis method will return an error if the master account has not been\nprovisioned as a reseller account.", + // "description": "Creates a billing account. This method can only be used to create [billing subaccounts](https://cloud.google.com/billing/docs/concepts) by Google Cloud resellers. When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the master account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the master account has not been provisioned as a reseller account.", // "flatPath": "v1/billingAccounts", // "httpMethod": "POST", // "id": "cloudbilling.billingAccounts.create", @@ -1723,6 +1502,7 @@ func (c *BillingAccountsCreateCall) Do(opts ...googleapi.CallOption) (*BillingAc // "$ref": "BillingAccount" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-billing", // "https://www.googleapis.com/auth/cloud-platform" // ] // } @@ -1741,11 +1521,8 @@ type BillingAccountsGetCall struct { } // Get: Gets information about a billing account. The current -// authenticated user -// must be a [viewer of the -// billing -// account](https://cloud.google.com/billing/docs/how-to/billing- -// access). +// authenticated user must be a [viewer of the billing +// account](https://cloud.google.com/billing/docs/how-to/billing-access). func (r *BillingAccountsService) Get(name string) *BillingAccountsGetCall { c := &BillingAccountsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -1789,7 +1566,7 @@ func (c *BillingAccountsGetCall) Header() http.Header { func (c *BillingAccountsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1851,7 +1628,7 @@ func (c *BillingAccountsGetCall) Do(opts ...googleapi.CallOption) (*BillingAccou } return ret, nil // { - // "description": "Gets information about a billing account. The current authenticated user\nmust be a [viewer of the billing\naccount](https://cloud.google.com/billing/docs/how-to/billing-access).", + // "description": "Gets information about a billing account. The current authenticated user must be a [viewer of the billing account](https://cloud.google.com/billing/docs/how-to/billing-access).", // "flatPath": "v1/billingAccounts/{billingAccountsId}", // "httpMethod": "GET", // "id": "cloudbilling.billingAccounts.get", @@ -1860,7 +1637,7 @@ func (c *BillingAccountsGetCall) Do(opts ...googleapi.CallOption) (*BillingAccou // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the billing account to retrieve. For example,\n`billingAccounts/012345-567890-ABCDEF`.", + // "description": "Required. The resource name of the billing account to retrieve. For example, `billingAccounts/012345-567890-ABCDEF`.", // "location": "path", // "pattern": "^billingAccounts/[^/]+$", // "required": true, @@ -1872,6 +1649,8 @@ func (c *BillingAccountsGetCall) Do(opts ...googleapi.CallOption) (*BillingAccou // "$ref": "BillingAccount" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-billing", + // "https://www.googleapis.com/auth/cloud-billing.readonly", // "https://www.googleapis.com/auth/cloud-platform" // ] // } @@ -1889,14 +1668,11 @@ type BillingAccountsGetIamPolicyCall struct { header_ http.Header } -// GetIamPolicy: Gets the access control policy for a billing -// account. +// GetIamPolicy: Gets the access control policy for a billing account. // The caller must have the `billing.accounts.getIamPolicy` permission -// on the -// account, which is often given to billing -// account -// [viewers](https://cloud.google.com/billing/docs/how-to/billing -// -access). +// on the account, which is often given to billing account +// [viewers](https://cloud.google.com/billing/docs/how-to/billing-access) +// . func (r *BillingAccountsService) GetIamPolicy(resource string) *BillingAccountsGetIamPolicyCall { c := &BillingAccountsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -1905,24 +1681,14 @@ func (r *BillingAccountsService) GetIamPolicy(resource string) *BillingAccountsG // OptionsRequestedPolicyVersion sets the optional parameter // "options.requestedPolicyVersion": The policy format version to be -// returned. -// -// Valid values are 0, 1, and 3. Requests specifying an invalid value -// will be -// rejected. -// -// Requests for policies with any conditional bindings must specify -// version 3. -// Policies without any conditional bindings may specify any valid value -// or -// leave the field unset. -// -// To learn which resources support conditions in their IAM policies, -// see -// the -// [IAM -// documentation](https://cloud.google.com/iam/help/conditions/r -// esource-policies). +// returned. Valid values are 0, 1, and 3. Requests specifying an +// invalid value will be rejected. Requests for policies with any +// conditional bindings must specify version 3. Policies without any +// conditional bindings may specify any valid value or leave the field +// unset. To learn which resources support conditions in their IAM +// policies, see the [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-p +// olicies). func (c *BillingAccountsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *BillingAccountsGetIamPolicyCall { c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c @@ -1965,7 +1731,7 @@ func (c *BillingAccountsGetIamPolicyCall) Header() http.Header { func (c *BillingAccountsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2027,7 +1793,7 @@ func (c *BillingAccountsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Pol } return ret, nil // { - // "description": "Gets the access control policy for a billing account.\nThe caller must have the `billing.accounts.getIamPolicy` permission on the\naccount, which is often given to billing account\n[viewers](https://cloud.google.com/billing/docs/how-to/billing-access).", + // "description": "Gets the access control policy for a billing account. The caller must have the `billing.accounts.getIamPolicy` permission on the account, which is often given to billing account [viewers](https://cloud.google.com/billing/docs/how-to/billing-access).", // "flatPath": "v1/billingAccounts/{billingAccountsId}:getIamPolicy", // "httpMethod": "GET", // "id": "cloudbilling.billingAccounts.getIamPolicy", @@ -2036,13 +1802,13 @@ func (c *BillingAccountsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Pol // ], // "parameters": { // "options.requestedPolicyVersion": { - // "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + // "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", // "format": "int32", // "location": "query", // "type": "integer" // }, // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^billingAccounts/[^/]+$", // "required": true, @@ -2054,6 +1820,8 @@ func (c *BillingAccountsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Pol // "$ref": "Policy" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-billing", + // "https://www.googleapis.com/auth/cloud-billing.readonly", // "https://www.googleapis.com/auth/cloud-platform" // ] // } @@ -2071,46 +1839,37 @@ type BillingAccountsListCall struct { } // List: Lists the billing accounts that the current authenticated user -// has -// permission -// to -// [view](https://cloud.google.com/billing/docs/how-to/billing-access) -// . +// has permission to +// [view](https://cloud.google.com/billing/docs/how-to/billing-access). func (r *BillingAccountsService) List() *BillingAccountsListCall { c := &BillingAccountsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} return c } // Filter sets the optional parameter "filter": Options for how to -// filter the returned billing accounts. -// Currently this only supports filtering -// for -// [subaccounts](https://cloud.google.com/billing/docs/concepts) under -// a -// single provided reseller billing account. -// (e.g. +// filter the returned billing accounts. Currently this only supports +// filtering for +// [subaccounts](https://cloud.google.com/billing/docs/concepts) under a +// single provided reseller billing account. (e.g. // "master_billing_account=billingAccounts/012345-678901-ABCDEF"). -// Boolea -// n algebra and other fields are not currently supported. +// Boolean algebra and other fields are not currently supported. func (c *BillingAccountsListCall) Filter(filter string) *BillingAccountsListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": Requested page size. -// The maximum page size is 100; this is also the -// default. +// The maximum page size is 100; this is also the default. func (c *BillingAccountsListCall) PageSize(pageSize int64) *BillingAccountsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": A token -// identifying a page of results to return. This should be -// a +// identifying a page of results to return. This should be a // `next_page_token` value returned from a previous -// `ListBillingAccounts` -// call. If unspecified, the first page of results is returned. +// `ListBillingAccounts` call. If unspecified, the first page of results +// is returned. func (c *BillingAccountsListCall) PageToken(pageToken string) *BillingAccountsListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -2153,7 +1912,7 @@ func (c *BillingAccountsListCall) Header() http.Header { func (c *BillingAccountsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2212,25 +1971,25 @@ func (c *BillingAccountsListCall) Do(opts ...googleapi.CallOption) (*ListBilling } return ret, nil // { - // "description": "Lists the billing accounts that the current authenticated user has\npermission to\n[view](https://cloud.google.com/billing/docs/how-to/billing-access).", + // "description": "Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access).", // "flatPath": "v1/billingAccounts", // "httpMethod": "GET", // "id": "cloudbilling.billingAccounts.list", // "parameterOrder": [], // "parameters": { // "filter": { - // "description": "Options for how to filter the returned billing accounts.\nCurrently this only supports filtering for\n[subaccounts](https://cloud.google.com/billing/docs/concepts) under a\nsingle provided reseller billing account.\n(e.g. \"master_billing_account=billingAccounts/012345-678901-ABCDEF\").\nBoolean algebra and other fields are not currently supported.", + // "description": "Options for how to filter the returned billing accounts. Currently this only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided reseller billing account. (e.g. \"master_billing_account=billingAccounts/012345-678901-ABCDEF\"). Boolean algebra and other fields are not currently supported.", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "Requested page size. The maximum page size is 100; this is also the\ndefault.", + // "description": "Requested page size. The maximum page size is 100; this is also the default.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "A token identifying a page of results to return. This should be a\n`next_page_token` value returned from a previous `ListBillingAccounts`\ncall. If unspecified, the first page of results is returned.", + // "description": "A token identifying a page of results to return. This should be a `next_page_token` value returned from a previous `ListBillingAccounts` call. If unspecified, the first page of results is returned.", // "location": "query", // "type": "string" // } @@ -2240,6 +1999,8 @@ func (c *BillingAccountsListCall) Do(opts ...googleapi.CallOption) (*ListBilling // "$ref": "ListBillingAccountsResponse" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-billing", + // "https://www.googleapis.com/auth/cloud-billing.readonly", // "https://www.googleapis.com/auth/cloud-platform" // ] // } @@ -2278,15 +2039,12 @@ type BillingAccountsPatchCall struct { header_ http.Header } -// Patch: Updates a billing account's fields. -// Currently the only field that can be edited is `display_name`. -// The current authenticated user must have the -// `billing.accounts.update` -// IAM permission, which is typically given to -// the -// [administrator](https://cloud.google.com/billing/docs/how-to/billi -// ng-access) -// of the billing account. +// Patch: Updates a billing account's fields. Currently the only field +// that can be edited is `display_name`. The current authenticated user +// must have the `billing.accounts.update` IAM permission, which is +// typically given to the +// [administrator](https://cloud.google.com/billing/docs/how-to/billing-a +// ccess) of the billing account. func (r *BillingAccountsService) Patch(name string, billingaccount *BillingAccount) *BillingAccountsPatchCall { c := &BillingAccountsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -2295,8 +2053,7 @@ func (r *BillingAccountsService) Patch(name string, billingaccount *BillingAccou } // UpdateMask sets the optional parameter "updateMask": The update mask -// applied to the resource. -// Only "display_name" is currently supported. +// applied to the resource. Only "display_name" is currently supported. func (c *BillingAccountsPatchCall) UpdateMask(updateMask string) *BillingAccountsPatchCall { c.urlParams_.Set("updateMask", updateMask) return c @@ -2329,7 +2086,7 @@ func (c *BillingAccountsPatchCall) Header() http.Header { func (c *BillingAccountsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2393,7 +2150,7 @@ func (c *BillingAccountsPatchCall) Do(opts ...googleapi.CallOption) (*BillingAcc } return ret, nil // { - // "description": "Updates a billing account's fields.\nCurrently the only field that can be edited is `display_name`.\nThe current authenticated user must have the `billing.accounts.update`\nIAM permission, which is typically given to the\n[administrator](https://cloud.google.com/billing/docs/how-to/billing-access)\nof the billing account.", + // "description": "Updates a billing account's fields. Currently the only field that can be edited is `display_name`. The current authenticated user must have the `billing.accounts.update` IAM permission, which is typically given to the [administrator](https://cloud.google.com/billing/docs/how-to/billing-access) of the billing account.", // "flatPath": "v1/billingAccounts/{billingAccountsId}", // "httpMethod": "PATCH", // "id": "cloudbilling.billingAccounts.patch", @@ -2409,7 +2166,7 @@ func (c *BillingAccountsPatchCall) Do(opts ...googleapi.CallOption) (*BillingAcc // "type": "string" // }, // "updateMask": { - // "description": "The update mask applied to the resource.\nOnly \"display_name\" is currently supported.", + // "description": "The update mask applied to the resource. Only \"display_name\" is currently supported.", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -2423,6 +2180,7 @@ func (c *BillingAccountsPatchCall) Do(opts ...googleapi.CallOption) (*BillingAcc // "$ref": "BillingAccount" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-billing", // "https://www.googleapis.com/auth/cloud-platform" // ] // } @@ -2441,14 +2199,11 @@ type BillingAccountsSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy for a billing account. -// Replaces any existing -// policy. -// The caller must have the `billing.accounts.setIamPolicy` permission -// on the -// account, which is often given to billing -// account -// [administrators](https://cloud.google.com/billing/docs/how-to/ -// billing-access). +// Replaces any existing policy. The caller must have the +// `billing.accounts.setIamPolicy` permission on the account, which is +// often given to billing account +// [administrators](https://cloud.google.com/billing/docs/how-to/billing- +// access). func (r *BillingAccountsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *BillingAccountsSetIamPolicyCall { c := &BillingAccountsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -2483,7 +2238,7 @@ func (c *BillingAccountsSetIamPolicyCall) Header() http.Header { func (c *BillingAccountsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2547,7 +2302,7 @@ func (c *BillingAccountsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Pol } return ret, nil // { - // "description": "Sets the access control policy for a billing account. Replaces any existing\npolicy.\nThe caller must have the `billing.accounts.setIamPolicy` permission on the\naccount, which is often given to billing account\n[administrators](https://cloud.google.com/billing/docs/how-to/billing-access).", + // "description": "Sets the access control policy for a billing account. Replaces any existing policy. The caller must have the `billing.accounts.setIamPolicy` permission on the account, which is often given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access).", // "flatPath": "v1/billingAccounts/{billingAccountsId}:setIamPolicy", // "httpMethod": "POST", // "id": "cloudbilling.billingAccounts.setIamPolicy", @@ -2556,7 +2311,7 @@ func (c *BillingAccountsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Pol // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^billingAccounts/[^/]+$", // "required": true, @@ -2571,6 +2326,7 @@ func (c *BillingAccountsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Pol // "$ref": "Policy" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-billing", // "https://www.googleapis.com/auth/cloud-platform" // ] // } @@ -2589,10 +2345,9 @@ type BillingAccountsTestIamPermissionsCall struct { } // TestIamPermissions: Tests the access control policy for a billing -// account. This method takes -// the resource and a set of permissions as input and returns the subset -// of -// the input permissions that the caller is allowed for that resource. +// account. This method takes the resource and a set of permissions as +// input and returns the subset of the input permissions that the caller +// is allowed for that resource. func (r *BillingAccountsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *BillingAccountsTestIamPermissionsCall { c := &BillingAccountsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -2627,7 +2382,7 @@ func (c *BillingAccountsTestIamPermissionsCall) Header() http.Header { func (c *BillingAccountsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2691,7 +2446,7 @@ func (c *BillingAccountsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Tests the access control policy for a billing account. This method takes\nthe resource and a set of permissions as input and returns the subset of\nthe input permissions that the caller is allowed for that resource.", + // "description": "Tests the access control policy for a billing account. This method takes the resource and a set of permissions as input and returns the subset of the input permissions that the caller is allowed for that resource.", // "flatPath": "v1/billingAccounts/{billingAccountsId}:testIamPermissions", // "httpMethod": "POST", // "id": "cloudbilling.billingAccounts.testIamPermissions", @@ -2700,7 +2455,7 @@ func (c *BillingAccountsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^billingAccounts/[^/]+$", // "required": true, @@ -2715,6 +2470,8 @@ func (c *BillingAccountsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) // "$ref": "TestIamPermissionsResponse" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-billing", + // "https://www.googleapis.com/auth/cloud-billing.readonly", // "https://www.googleapis.com/auth/cloud-platform" // ] // } @@ -2733,13 +2490,11 @@ type BillingAccountsProjectsListCall struct { } // List: Lists the projects associated with a billing account. The -// current -// authenticated user must have the `billing.resourceAssociations.list` -// IAM -// permission, which is often given to billing -// account -// [viewers](https://cloud.google.com/billing/docs/how-to/billing -// -access). +// current authenticated user must have the +// `billing.resourceAssociations.list` IAM permission, which is often +// given to billing account +// [viewers](https://cloud.google.com/billing/docs/how-to/billing-access) +// . func (r *BillingAccountsProjectsService) List(name string) *BillingAccountsProjectsListCall { c := &BillingAccountsProjectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -2747,19 +2502,17 @@ func (r *BillingAccountsProjectsService) List(name string) *BillingAccountsProje } // PageSize sets the optional parameter "pageSize": Requested page size. -// The maximum page size is 100; this is also the -// default. +// The maximum page size is 100; this is also the default. func (c *BillingAccountsProjectsListCall) PageSize(pageSize int64) *BillingAccountsProjectsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": A token -// identifying a page of results to be returned. This should be -// a +// identifying a page of results to be returned. This should be a // `next_page_token` value returned from a previous -// `ListProjectBillingInfo` -// call. If unspecified, the first page of results is returned. +// `ListProjectBillingInfo` call. If unspecified, the first page of +// results is returned. func (c *BillingAccountsProjectsListCall) PageToken(pageToken string) *BillingAccountsProjectsListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -2802,7 +2555,7 @@ func (c *BillingAccountsProjectsListCall) Header() http.Header { func (c *BillingAccountsProjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2864,7 +2617,7 @@ func (c *BillingAccountsProjectsListCall) Do(opts ...googleapi.CallOption) (*Lis } return ret, nil // { - // "description": "Lists the projects associated with a billing account. The current\nauthenticated user must have the `billing.resourceAssociations.list` IAM\npermission, which is often given to billing account\n[viewers](https://cloud.google.com/billing/docs/how-to/billing-access).", + // "description": "Lists the projects associated with a billing account. The current authenticated user must have the `billing.resourceAssociations.list` IAM permission, which is often given to billing account [viewers](https://cloud.google.com/billing/docs/how-to/billing-access).", // "flatPath": "v1/billingAccounts/{billingAccountsId}/projects", // "httpMethod": "GET", // "id": "cloudbilling.billingAccounts.projects.list", @@ -2873,20 +2626,20 @@ func (c *BillingAccountsProjectsListCall) Do(opts ...googleapi.CallOption) (*Lis // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the billing account associated with the projects that\nyou want to list. For example, `billingAccounts/012345-567890-ABCDEF`.", + // "description": "Required. The resource name of the billing account associated with the projects that you want to list. For example, `billingAccounts/012345-567890-ABCDEF`.", // "location": "path", // "pattern": "^billingAccounts/[^/]+$", // "required": true, // "type": "string" // }, // "pageSize": { - // "description": "Requested page size. The maximum page size is 100; this is also the\ndefault.", + // "description": "Requested page size. The maximum page size is 100; this is also the default.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "A token identifying a page of results to be returned. This should be a\n`next_page_token` value returned from a previous `ListProjectBillingInfo`\ncall. If unspecified, the first page of results is returned.", + // "description": "A token identifying a page of results to be returned. This should be a `next_page_token` value returned from a previous `ListProjectBillingInfo` call. If unspecified, the first page of results is returned.", // "location": "query", // "type": "string" // } @@ -2896,6 +2649,8 @@ func (c *BillingAccountsProjectsListCall) Do(opts ...googleapi.CallOption) (*Lis // "$ref": "ListProjectBillingInfoResponse" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-billing", + // "https://www.googleapis.com/auth/cloud-billing.readonly", // "https://www.googleapis.com/auth/cloud-platform" // ] // } @@ -2935,12 +2690,9 @@ type ProjectsGetBillingInfoCall struct { } // GetBillingInfo: Gets the billing information for a project. The -// current authenticated user -// must have [permission to view -// the -// project](https://cloud.google.com/docs/permissions-overview#h.bgs0 -// oxofvnoo -// ). +// current authenticated user must have [permission to view the +// project](https://cloud.google.com/docs/permissions-overview#h.bgs0oxof +// vnoo ). func (r *ProjectsService) GetBillingInfo(name string) *ProjectsGetBillingInfoCall { c := &ProjectsGetBillingInfoCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -2984,7 +2736,7 @@ func (c *ProjectsGetBillingInfoCall) Header() http.Header { func (c *ProjectsGetBillingInfoCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3046,7 +2798,7 @@ func (c *ProjectsGetBillingInfoCall) Do(opts ...googleapi.CallOption) (*ProjectB } return ret, nil // { - // "description": "Gets the billing information for a project. The current authenticated user\nmust have [permission to view the\nproject](https://cloud.google.com/docs/permissions-overview#h.bgs0oxofvnoo\n).", + // "description": "Gets the billing information for a project. The current authenticated user must have [permission to view the project](https://cloud.google.com/docs/permissions-overview#h.bgs0oxofvnoo ).", // "flatPath": "v1/projects/{projectsId}/billingInfo", // "httpMethod": "GET", // "id": "cloudbilling.projects.getBillingInfo", @@ -3055,7 +2807,7 @@ func (c *ProjectsGetBillingInfoCall) Do(opts ...googleapi.CallOption) (*ProjectB // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the project for which billing information is\nretrieved. For example, `projects/tokyo-rain-123`.", + // "description": "Required. The resource name of the project for which billing information is retrieved. For example, `projects/tokyo-rain-123`.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -3067,6 +2819,8 @@ func (c *ProjectsGetBillingInfoCall) Do(opts ...googleapi.CallOption) (*ProjectB // "$ref": "ProjectBillingInfo" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-billing", + // "https://www.googleapis.com/auth/cloud-billing.readonly", // "https://www.googleapis.com/auth/cloud-platform" // ] // } @@ -3085,61 +2839,33 @@ type ProjectsUpdateBillingInfoCall struct { } // UpdateBillingInfo: Sets or updates the billing account associated -// with a project. You specify -// the new billing account by setting the `billing_account_name` in -// the -// `ProjectBillingInfo` resource to the resource name of a billing -// account. -// Associating a project with an open billing account enables billing on -// the -// project and allows charges for resource usage. If the project already -// had a -// billing account, this method changes the billing account used for -// resource -// usage charges. -// -// *Note:* Incurred charges that have not yet been reported in the -// transaction -// history of the GCP Console might be billed to the new -// billing -// account, even if the charge occurred before the new billing account -// was -// assigned to the project. -// -// The current authenticated user must have ownership privileges for -// both -// the -// [project](https://cloud.google.com/docs/permissions-overview#h.bgs -// 0oxofvnoo -// ) and the -// [billing -// account](https://cloud.google.com/billing/docs/how-to/billing -// -access). -// -// You can disable billing on the project by setting -// the -// `billing_account_name` field to empty. This action disassociates +// with a project. You specify the new billing account by setting the +// `billing_account_name` in the `ProjectBillingInfo` resource to the +// resource name of a billing account. Associating a project with an +// open billing account enables billing on the project and allows +// charges for resource usage. If the project already had a billing +// account, this method changes the billing account used for resource +// usage charges. *Note:* Incurred charges that have not yet been +// reported in the transaction history of the Google Cloud Console might +// be billed to the new billing account, even if the charge occurred +// before the new billing account was assigned to the project. The +// current authenticated user must have ownership privileges for both // the +// [project](https://cloud.google.com/docs/permissions-overview#h.bgs0oxo +// fvnoo ) and the [billing +// account](https://cloud.google.com/billing/docs/how-to/billing-access). +// You can disable billing on the project by setting the +// `billing_account_name` field to empty. This action disassociates the // current billing account from the project. Any billable activity of -// your -// in-use services will stop, and your application could stop -// functioning as -// expected. Any unbilled charges to date will be billed to the -// previously -// associated account. The current authenticated user must be either an -// owner -// of the project or an owner of the billing account for the -// project. -// -// Note that associating a project with a *closed* billing account will -// have -// much the same effect as disabling billing on the project: any -// paid -// resources used by the project will be shut down. Thus, unless you -// wish to -// disable billing, you should always call this method with the name of -// an -// *open* billing account. +// your in-use services will stop, and your application could stop +// functioning as expected. Any unbilled charges to date will be billed +// to the previously associated account. The current authenticated user +// must be either an owner of the project or an owner of the billing +// account for the project. Note that associating a project with a +// *closed* billing account will have much the same effect as disabling +// billing on the project: any paid resources used by the project will +// be shut down. Thus, unless you wish to disable billing, you should +// always call this method with the name of an *open* billing account. func (r *ProjectsService) UpdateBillingInfo(name string, projectbillinginfo *ProjectBillingInfo) *ProjectsUpdateBillingInfoCall { c := &ProjectsUpdateBillingInfoCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -3174,7 +2900,7 @@ func (c *ProjectsUpdateBillingInfoCall) Header() http.Header { func (c *ProjectsUpdateBillingInfoCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3238,7 +2964,7 @@ func (c *ProjectsUpdateBillingInfoCall) Do(opts ...googleapi.CallOption) (*Proje } return ret, nil // { - // "description": "Sets or updates the billing account associated with a project. You specify\nthe new billing account by setting the `billing_account_name` in the\n`ProjectBillingInfo` resource to the resource name of a billing account.\nAssociating a project with an open billing account enables billing on the\nproject and allows charges for resource usage. If the project already had a\nbilling account, this method changes the billing account used for resource\nusage charges.\n\n*Note:* Incurred charges that have not yet been reported in the transaction\nhistory of the GCP Console might be billed to the new billing\naccount, even if the charge occurred before the new billing account was\nassigned to the project.\n\nThe current authenticated user must have ownership privileges for both the\n[project](https://cloud.google.com/docs/permissions-overview#h.bgs0oxofvnoo\n) and the [billing\naccount](https://cloud.google.com/billing/docs/how-to/billing-access).\n\nYou can disable billing on the project by setting the\n`billing_account_name` field to empty. This action disassociates the\ncurrent billing account from the project. Any billable activity of your\nin-use services will stop, and your application could stop functioning as\nexpected. Any unbilled charges to date will be billed to the previously\nassociated account. The current authenticated user must be either an owner\nof the project or an owner of the billing account for the project.\n\nNote that associating a project with a *closed* billing account will have\nmuch the same effect as disabling billing on the project: any paid\nresources used by the project will be shut down. Thus, unless you wish to\ndisable billing, you should always call this method with the name of an\n*open* billing account.", + // "description": "Sets or updates the billing account associated with a project. You specify the new billing account by setting the `billing_account_name` in the `ProjectBillingInfo` resource to the resource name of a billing account. Associating a project with an open billing account enables billing on the project and allows charges for resource usage. If the project already had a billing account, this method changes the billing account used for resource usage charges. *Note:* Incurred charges that have not yet been reported in the transaction history of the Google Cloud Console might be billed to the new billing account, even if the charge occurred before the new billing account was assigned to the project. The current authenticated user must have ownership privileges for both the [project](https://cloud.google.com/docs/permissions-overview#h.bgs0oxofvnoo ) and the [billing account](https://cloud.google.com/billing/docs/how-to/billing-access). You can disable billing on the project by setting the `billing_account_name` field to empty. This action disassociates the current billing account from the project. Any billable activity of your in-use services will stop, and your application could stop functioning as expected. Any unbilled charges to date will be billed to the previously associated account. The current authenticated user must be either an owner of the project or an owner of the billing account for the project. Note that associating a project with a *closed* billing account will have much the same effect as disabling billing on the project: any paid resources used by the project will be shut down. Thus, unless you wish to disable billing, you should always call this method with the name of an *open* billing account.", // "flatPath": "v1/projects/{projectsId}/billingInfo", // "httpMethod": "PUT", // "id": "cloudbilling.projects.updateBillingInfo", @@ -3247,7 +2973,7 @@ func (c *ProjectsUpdateBillingInfoCall) Do(opts ...googleapi.CallOption) (*Proje // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the project associated with the billing information\nthat you want to update. For example, `projects/tokyo-rain-123`.", + // "description": "Required. The resource name of the project associated with the billing information that you want to update. For example, `projects/tokyo-rain-123`.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -3262,6 +2988,7 @@ func (c *ProjectsUpdateBillingInfoCall) Do(opts ...googleapi.CallOption) (*Proje // "$ref": "ProjectBillingInfo" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-billing", // "https://www.googleapis.com/auth/cloud-platform" // ] // } @@ -3292,10 +3019,9 @@ func (c *ServicesListCall) PageSize(pageSize int64) *ServicesListCall { } // PageToken sets the optional parameter "pageToken": A token -// identifying a page of results to return. This should be -// a -// `next_page_token` value returned from a previous `ListServices` -// call. If unspecified, the first page of results is returned. +// identifying a page of results to return. This should be a +// `next_page_token` value returned from a previous `ListServices` call. +// If unspecified, the first page of results is returned. func (c *ServicesListCall) PageToken(pageToken string) *ServicesListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -3338,7 +3064,7 @@ func (c *ServicesListCall) Header() http.Header { func (c *ServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3410,7 +3136,7 @@ func (c *ServicesListCall) Do(opts ...googleapi.CallOption) (*ListServicesRespon // "type": "integer" // }, // "pageToken": { - // "description": "A token identifying a page of results to return. This should be a\n`next_page_token` value returned from a previous `ListServices`\ncall. If unspecified, the first page of results is returned.", + // "description": "A token identifying a page of results to return. This should be a `next_page_token` value returned from a previous `ListServices` call. If unspecified, the first page of results is returned.", // "location": "query", // "type": "string" // } @@ -3420,6 +3146,8 @@ func (c *ServicesListCall) Do(opts ...googleapi.CallOption) (*ListServicesRespon // "$ref": "ListServicesResponse" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-billing", + // "https://www.googleapis.com/auth/cloud-billing.readonly", // "https://www.googleapis.com/auth/cloud-platform" // ] // } @@ -3466,25 +3194,20 @@ func (r *ServicesSkusService) List(parent string) *ServicesSkusListCall { } // CurrencyCode sets the optional parameter "currencyCode": The ISO 4217 -// currency code for the pricing info in the response proto. -// Will use the conversion rate as of start_time. -// If not specified USD will be used. +// currency code for the pricing info in the response proto. Will use +// the conversion rate as of start_time. If not specified USD will be +// used. func (c *ServicesSkusListCall) CurrencyCode(currencyCode string) *ServicesSkusListCall { c.urlParams_.Set("currencyCode", currencyCode) return c } // EndTime sets the optional parameter "endTime": Optional exclusive end -// time of the time range for which the pricing -// versions will be returned. Timestamps in the future are not -// allowed. -// The time range has to be within a single calendar month -// in -// America/Los_Angeles timezone. Time range as a whole is optional. If -// not -// specified, the latest pricing will be returned (up to 12 hours old -// at -// most). +// time of the time range for which the pricing versions will be +// returned. Timestamps in the future are not allowed. The time range +// has to be within a single calendar month in America/Los_Angeles +// timezone. Time range as a whole is optional. If not specified, the +// latest pricing will be returned (up to 12 hours old at most). func (c *ServicesSkusListCall) EndTime(endTime string) *ServicesSkusListCall { c.urlParams_.Set("endTime", endTime) return c @@ -3498,26 +3221,20 @@ func (c *ServicesSkusListCall) PageSize(pageSize int64) *ServicesSkusListCall { } // PageToken sets the optional parameter "pageToken": A token -// identifying a page of results to return. This should be -// a -// `next_page_token` value returned from a previous `ListSkus` -// call. If unspecified, the first page of results is returned. +// identifying a page of results to return. This should be a +// `next_page_token` value returned from a previous `ListSkus` call. If +// unspecified, the first page of results is returned. func (c *ServicesSkusListCall) PageToken(pageToken string) *ServicesSkusListCall { c.urlParams_.Set("pageToken", pageToken) return c } // StartTime sets the optional parameter "startTime": Optional inclusive -// start time of the time range for which the pricing -// versions will be returned. Timestamps in the future are not -// allowed. -// The time range has to be within a single calendar month -// in -// America/Los_Angeles timezone. Time range as a whole is optional. If -// not -// specified, the latest pricing will be returned (up to 12 hours old -// at -// most). +// start time of the time range for which the pricing versions will be +// returned. Timestamps in the future are not allowed. The time range +// has to be within a single calendar month in America/Los_Angeles +// timezone. Time range as a whole is optional. If not specified, the +// latest pricing will be returned (up to 12 hours old at most). func (c *ServicesSkusListCall) StartTime(startTime string) *ServicesSkusListCall { c.urlParams_.Set("startTime", startTime) return c @@ -3560,7 +3277,7 @@ func (c *ServicesSkusListCall) Header() http.Header { func (c *ServicesSkusListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3631,12 +3348,12 @@ func (c *ServicesSkusListCall) Do(opts ...googleapi.CallOption) (*ListSkusRespon // ], // "parameters": { // "currencyCode": { - // "description": "The ISO 4217 currency code for the pricing info in the response proto.\nWill use the conversion rate as of start_time.\nOptional. If not specified USD will be used.", + // "description": "The ISO 4217 currency code for the pricing info in the response proto. Will use the conversion rate as of start_time. Optional. If not specified USD will be used.", // "location": "query", // "type": "string" // }, // "endTime": { - // "description": "Optional exclusive end time of the time range for which the pricing\nversions will be returned. Timestamps in the future are not allowed.\nThe time range has to be within a single calendar month in\nAmerica/Los_Angeles timezone. Time range as a whole is optional. If not\nspecified, the latest pricing will be returned (up to 12 hours old at\nmost).", + // "description": "Optional exclusive end time of the time range for which the pricing versions will be returned. Timestamps in the future are not allowed. The time range has to be within a single calendar month in America/Los_Angeles timezone. Time range as a whole is optional. If not specified, the latest pricing will be returned (up to 12 hours old at most).", // "format": "google-datetime", // "location": "query", // "type": "string" @@ -3648,19 +3365,19 @@ func (c *ServicesSkusListCall) Do(opts ...googleapi.CallOption) (*ListSkusRespon // "type": "integer" // }, // "pageToken": { - // "description": "A token identifying a page of results to return. This should be a\n`next_page_token` value returned from a previous `ListSkus`\ncall. If unspecified, the first page of results is returned.", + // "description": "A token identifying a page of results to return. This should be a `next_page_token` value returned from a previous `ListSkus` call. If unspecified, the first page of results is returned.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The name of the service.\nExample: \"services/DA34-426B-A397\"", + // "description": "Required. The name of the service. Example: \"services/DA34-426B-A397\"", // "location": "path", // "pattern": "^services/[^/]+$", // "required": true, // "type": "string" // }, // "startTime": { - // "description": "Optional inclusive start time of the time range for which the pricing\nversions will be returned. Timestamps in the future are not allowed.\nThe time range has to be within a single calendar month in\nAmerica/Los_Angeles timezone. Time range as a whole is optional. If not\nspecified, the latest pricing will be returned (up to 12 hours old at\nmost).", + // "description": "Optional inclusive start time of the time range for which the pricing versions will be returned. Timestamps in the future are not allowed. The time range has to be within a single calendar month in America/Los_Angeles timezone. Time range as a whole is optional. If not specified, the latest pricing will be returned (up to 12 hours old at most).", // "format": "google-datetime", // "location": "query", // "type": "string" @@ -3671,6 +3388,8 @@ func (c *ServicesSkusListCall) Do(opts ...googleapi.CallOption) (*ListSkusRespon // "$ref": "ListSkusResponse" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-billing", + // "https://www.googleapis.com/auth/cloud-billing.readonly", // "https://www.googleapis.com/auth/cloud-platform" // ] // } diff --git a/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-api.json b/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-api.json index 598f255c181..366ffe78279 100644 --- a/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-api.json +++ b/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-api.json @@ -108,7 +108,7 @@ "operations": { "methods": { "cancel": { - "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", "flatPath": "v1/operations/{operationsId}:cancel", "httpMethod": "POST", "id": "cloudbuild.operations.cancel", @@ -136,7 +136,7 @@ ] }, "get": { - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", "flatPath": "v1/operations/{operationsId}", "httpMethod": "GET", "id": "cloudbuild.operations.get", @@ -159,47 +159,6 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] - }, - "list": { - "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", - "flatPath": "v1/operations", - "httpMethod": "GET", - "id": "cloudbuild.operations.list", - "parameterOrder": [ - "name" - ], - "parameters": { - "filter": { - "description": "The standard list filter.", - "location": "query", - "type": "string" - }, - "name": { - "description": "The name of the operation's parent resource.", - "location": "path", - "pattern": "^operations$", - "required": true, - "type": "string" - }, - "pageSize": { - "description": "The standard list page size.", - "format": "int32", - "location": "query", - "type": "integer" - }, - "pageToken": { - "description": "The standard list page token.", - "location": "query", - "type": "string" - } - }, - "path": "v1/{+name}", - "response": { - "$ref": "ListOperationsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] } } }, @@ -242,7 +201,7 @@ ] }, "create": { - "description": "Starts a build with the specified configuration.\n\nThis method returns a long-running `Operation`, which includes the build\nID. Pass the build ID to `GetBuild` to determine the build status (such as\n`SUCCESS` or `FAILURE`).", + "description": "Starts a build with the specified configuration. This method returns a long-running `Operation`, which includes the build ID. Pass the build ID to `GetBuild` to determine the build status (such as `SUCCESS` or `FAILURE`).", "flatPath": "v1/projects/{projectId}/builds", "httpMethod": "POST", "id": "cloudbuild.projects.builds.create", @@ -250,6 +209,11 @@ "projectId" ], "parameters": { + "parent": { + "description": "The parent resource where this build will be created. Format: `projects/{project}/locations/{location}`", + "location": "query", + "type": "string" + }, "projectId": { "description": "Required. ID of the project.", "location": "path", @@ -269,7 +233,7 @@ ] }, "get": { - "description": "Returns information about a previously requested build.\n\nThe `Build` that is returned includes its status (such as `SUCCESS`,\n`FAILURE`, or `WORKING`), and timing information.", + "description": "Returns information about a previously requested build. The `Build` that is returned includes its status (such as `SUCCESS`, `FAILURE`, or `WORKING`), and timing information.", "flatPath": "v1/projects/{projectId}/builds/{id}", "httpMethod": "GET", "id": "cloudbuild.projects.builds.get", @@ -284,6 +248,11 @@ "required": true, "type": "string" }, + "name": { + "description": "The name of the `Build` to retrieve. Format: `projects/{project}/locations/{location}/builds/{build}`", + "location": "query", + "type": "string" + }, "projectId": { "description": "Required. ID of the project.", "location": "path", @@ -300,7 +269,7 @@ ] }, "list": { - "description": "Lists previously requested builds.\n\nPreviously requested builds may still be in-progress, or may have finished\nsuccessfully or unsuccessfully.", + "description": "Lists previously requested builds. Previously requested builds may still be in-progress, or may have finished successfully or unsuccessfully.", "flatPath": "v1/projects/{projectId}/builds", "httpMethod": "GET", "id": "cloudbuild.projects.builds.list", @@ -324,6 +293,11 @@ "location": "query", "type": "string" }, + "parent": { + "description": "The parent of the collection of `Builds`. Format: `projects/{project}/locations/location`", + "location": "query", + "type": "string" + }, "projectId": { "description": "Required. ID of the project.", "location": "path", @@ -340,7 +314,7 @@ ] }, "retry": { - "description": "Creates a new build based on the specified build.\n\nThis method creates a new build using the original build request, which may\nor may not result in an identical build.\n\nFor triggered builds:\n\n* Triggered builds resolve to a precise revision; therefore a retry of a\ntriggered build will result in a build that uses the same revision.\n\nFor non-triggered builds that specify `RepoSource`:\n\n* If the original build built from the tip of a branch, the retried build\nwill build from the tip of that branch, which may not be the same revision\nas the original build.\n* If the original build specified a commit sha or revision ID, the retried\nbuild will use the identical source.\n\nFor builds that specify `StorageSource`:\n\n* If the original build pulled source from Google Cloud Storage without\nspecifying the generation of the object, the new build will use the current\nobject, which may be different from the original build source.\n* If the original build pulled source from Cloud Storage and specified the\ngeneration of the object, the new build will attempt to use the same\nobject, which may or may not be available depending on the bucket's\nlifecycle management settings.", + "description": "Creates a new build based on the specified build. This method creates a new build using the original build request, which may or may not result in an identical build. For triggered builds: * Triggered builds resolve to a precise revision; therefore a retry of a triggered build will result in a build that uses the same revision. For non-triggered builds that specify `RepoSource`: * If the original build built from the tip of a branch, the retried build will build from the tip of that branch, which may not be the same revision as the original build. * If the original build specified a commit sha or revision ID, the retried build will use the identical source. For builds that specify `StorageSource`: * If the original build pulled source from Google Cloud Storage without specifying the generation of the object, the new build will use the current object, which may be different from the original build source. * If the original build pulled source from Cloud Storage and specified the generation of the object, the new build will attempt to use the same object, which may or may not be available depending on the bucket's lifecycle management settings.", "flatPath": "v1/projects/{projectId}/builds/{id}:retry", "httpMethod": "POST", "id": "cloudbuild.projects.builds.retry", @@ -375,10 +349,245 @@ } } }, + "locations": { + "resources": { + "builds": { + "methods": { + "cancel": { + "description": "Cancels a build in progress.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/builds/{buildsId}:cancel", + "httpMethod": "POST", + "id": "cloudbuild.projects.locations.builds.cancel", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the `Build` to retrieve. Format: `projects/{project}/locations/{location}/builds/{build}`", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/builds/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:cancel", + "request": { + "$ref": "CancelBuildRequest" + }, + "response": { + "$ref": "Build" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "create": { + "description": "Starts a build with the specified configuration. This method returns a long-running `Operation`, which includes the build ID. Pass the build ID to `GetBuild` to determine the build status (such as `SUCCESS` or `FAILURE`).", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/builds", + "httpMethod": "POST", + "id": "cloudbuild.projects.locations.builds.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "The parent resource where this build will be created. Format: `projects/{project}/locations/{location}`", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Required. ID of the project.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+parent}/builds", + "request": { + "$ref": "Build" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Returns information about a previously requested build. The `Build` that is returned includes its status (such as `SUCCESS`, `FAILURE`, or `WORKING`), and timing information.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/builds/{buildsId}", + "httpMethod": "GET", + "id": "cloudbuild.projects.locations.builds.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "id": { + "description": "Required. ID of the build.", + "location": "query", + "type": "string" + }, + "name": { + "description": "The name of the `Build` to retrieve. Format: `projects/{project}/locations/{location}/builds/{build}`", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/builds/[^/]+$", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Required. ID of the project.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Build" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists previously requested builds. Previously requested builds may still be in-progress, or may have finished successfully or unsuccessfully.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/builds", + "httpMethod": "GET", + "id": "cloudbuild.projects.locations.builds.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "filter": { + "description": "The raw filter text to constrain the results.", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "Number of results to return in the list.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Token to provide to skip to a particular spot in the list.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "The parent of the collection of `Builds`. Format: `projects/{project}/locations/location`", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Required. ID of the project.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+parent}/builds", + "response": { + "$ref": "ListBuildsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "retry": { + "description": "Creates a new build based on the specified build. This method creates a new build using the original build request, which may or may not result in an identical build. For triggered builds: * Triggered builds resolve to a precise revision; therefore a retry of a triggered build will result in a build that uses the same revision. For non-triggered builds that specify `RepoSource`: * If the original build built from the tip of a branch, the retried build will build from the tip of that branch, which may not be the same revision as the original build. * If the original build specified a commit sha or revision ID, the retried build will use the identical source. For builds that specify `StorageSource`: * If the original build pulled source from Google Cloud Storage without specifying the generation of the object, the new build will use the current object, which may be different from the original build source. * If the original build pulled source from Cloud Storage and specified the generation of the object, the new build will attempt to use the same object, which may or may not be available depending on the bucket's lifecycle management settings.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/builds/{buildsId}:retry", + "httpMethod": "POST", + "id": "cloudbuild.projects.locations.builds.retry", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the `Build` to retry. Format: `projects/{project}/locations/{location}/builds/{build}`", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/builds/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:retry", + "request": { + "$ref": "RetryBuildRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "operations": { + "methods": { + "cancel": { + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}:cancel", + "httpMethod": "POST", + "id": "cloudbuild.projects.locations.operations.cancel", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource to be cancelled.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:cancel", + "request": { + "$ref": "CancelOperationRequest" + }, + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", + "httpMethod": "GET", + "id": "cloudbuild.projects.locations.operations.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + }, "triggers": { "methods": { "create": { - "description": "Creates a new `BuildTrigger`.\n\nThis API is experimental.", + "description": "Creates a new `BuildTrigger`. This API is experimental.", "flatPath": "v1/projects/{projectId}/triggers", "httpMethod": "POST", "id": "cloudbuild.projects.triggers.create", @@ -405,7 +614,7 @@ ] }, "delete": { - "description": "Deletes a `BuildTrigger` by its project ID and trigger ID.\n\nThis API is experimental.", + "description": "Deletes a `BuildTrigger` by its project ID and trigger ID. This API is experimental.", "flatPath": "v1/projects/{projectId}/triggers/{triggerId}", "httpMethod": "DELETE", "id": "cloudbuild.projects.triggers.delete", @@ -436,7 +645,7 @@ ] }, "get": { - "description": "Returns information about a `BuildTrigger`.\n\nThis API is experimental.", + "description": "Returns information about a `BuildTrigger`. This API is experimental.", "flatPath": "v1/projects/{projectId}/triggers/{triggerId}", "httpMethod": "GET", "id": "cloudbuild.projects.triggers.get", @@ -467,7 +676,7 @@ ] }, "list": { - "description": "Lists existing `BuildTrigger`s.\n\nThis API is experimental.", + "description": "Lists existing `BuildTrigger`s. This API is experimental.", "flatPath": "v1/projects/{projectId}/triggers", "httpMethod": "GET", "id": "cloudbuild.projects.triggers.list", @@ -502,7 +711,7 @@ ] }, "patch": { - "description": "Updates a `BuildTrigger` by its project ID and trigger ID.\n\nThis API is experimental.", + "description": "Updates a `BuildTrigger` by its project ID and trigger ID. This API is experimental.", "flatPath": "v1/projects/{projectId}/triggers/{triggerId}", "httpMethod": "PATCH", "id": "cloudbuild.projects.triggers.patch", @@ -574,15 +783,15 @@ } } }, - "revision": "20200506", + "revision": "20200921", "rootUrl": "https://cloudbuild.googleapis.com/", "schemas": { "ArtifactObjects": { - "description": "Files in the workspace to upload to Cloud Storage upon successful\ncompletion of all build steps.", + "description": "Files in the workspace to upload to Cloud Storage upon successful completion of all build steps.", "id": "ArtifactObjects", "properties": { "location": { - "description": "Cloud Storage bucket and optional object path, in the form\n\"gs://bucket/path/to/somewhere/\". (see [Bucket Name\nRequirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)).\n\nFiles in the workspace matching any path pattern will be uploaded to\nCloud Storage with this location as a prefix.", + "description": "Cloud Storage bucket and optional object path, in the form \"gs://bucket/path/to/somewhere/\". (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). Files in the workspace matching any path pattern will be uploaded to Cloud Storage with this location as a prefix.", "type": "string" }, "paths": { @@ -594,13 +803,14 @@ }, "timing": { "$ref": "TimeSpan", - "description": "Output only. Stores timing information for pushing all artifact objects." + "description": "Output only. Stores timing information for pushing all artifact objects.", + "readOnly": true } }, "type": "object" }, "ArtifactResult": { - "description": "An artifact that was uploaded during a build. This\nis a single record in the artifact manifest JSON file.", + "description": "An artifact that was uploaded during a build. This is a single record in the artifact manifest JSON file.", "id": "ArtifactResult", "properties": { "fileHash": { @@ -611,18 +821,18 @@ "type": "array" }, "location": { - "description": "The path of an artifact in a Google Cloud Storage bucket, with the\ngeneration number. For example,\n`gs://mybucket/path/to/output.jar#generation`.", + "description": "The path of an artifact in a Google Cloud Storage bucket, with the generation number. For example, `gs://mybucket/path/to/output.jar#generation`.", "type": "string" } }, "type": "object" }, "Artifacts": { - "description": "Artifacts produced by a build that should be uploaded upon\nsuccessful completion of all build steps.", + "description": "Artifacts produced by a build that should be uploaded upon successful completion of all build steps.", "id": "Artifacts", "properties": { "images": { - "description": "A list of images to be pushed upon the successful completion of all build\nsteps.\n\nThe images will be pushed using the builder service account's credentials.\n\nThe digests of the pushed images will be stored in the Build resource's\nresults field.\n\nIf any of the images fail to be pushed, the build is marked FAILURE.", + "description": "A list of images to be pushed upon the successful completion of all build steps. The images will be pushed using the builder service account's credentials. The digests of the pushed images will be stored in the Build resource's results field. If any of the images fail to be pushed, the build is marked FAILURE.", "items": { "type": "string" }, @@ -630,39 +840,43 @@ }, "objects": { "$ref": "ArtifactObjects", - "description": "A list of objects to be uploaded to Cloud Storage upon successful\ncompletion of all build steps.\n\nFiles in the workspace matching specified paths globs will be uploaded to\nthe specified Cloud Storage location using the builder service account's\ncredentials.\n\nThe location and generation of the uploaded objects will be stored in the\nBuild resource's results field.\n\nIf any objects fail to be pushed, the build is marked FAILURE." + "description": "A list of objects to be uploaded to Cloud Storage upon successful completion of all build steps. Files in the workspace matching specified paths globs will be uploaded to the specified Cloud Storage location using the builder service account's credentials. The location and generation of the uploaded objects will be stored in the Build resource's results field. If any objects fail to be pushed, the build is marked FAILURE." } }, "type": "object" }, "Build": { - "description": "A build resource in the Cloud Build API.\n\nAt a high level, a `Build` describes where to find source code, how to build\nit (for example, the builder image to run on the source), and where to store\nthe built artifacts.\n\nFields can include the following variables, which will be expanded when the\nbuild is created:\n\n- $PROJECT_ID: the project ID of the build.\n- $BUILD_ID: the autogenerated ID of the build.\n- $REPO_NAME: the source repository name specified by RepoSource.\n- $BRANCH_NAME: the branch name specified by RepoSource.\n- $TAG_NAME: the tag name specified by RepoSource.\n- $REVISION_ID or $COMMIT_SHA: the commit SHA specified by RepoSource or\n resolved from the specified branch or tag.\n- $SHORT_SHA: first 7 characters of $REVISION_ID or $COMMIT_SHA.", + "description": "A build resource in the Cloud Build API. At a high level, a `Build` describes where to find source code, how to build it (for example, the builder image to run on the source), and where to store the built artifacts. Fields can include the following variables, which will be expanded when the build is created: - $PROJECT_ID: the project ID of the build. - $BUILD_ID: the autogenerated ID of the build. - $REPO_NAME: the source repository name specified by RepoSource. - $BRANCH_NAME: the branch name specified by RepoSource. - $TAG_NAME: the tag name specified by RepoSource. - $REVISION_ID or $COMMIT_SHA: the commit SHA specified by RepoSource or resolved from the specified branch or tag. - $SHORT_SHA: first 7 characters of $REVISION_ID or $COMMIT_SHA.", "id": "Build", "properties": { "artifacts": { "$ref": "Artifacts", - "description": "Artifacts produced by the build that should be uploaded upon\nsuccessful completion of all build steps." + "description": "Artifacts produced by the build that should be uploaded upon successful completion of all build steps." }, "buildTriggerId": { - "description": "Output only. The ID of the `BuildTrigger` that triggered this build, if it\nwas triggered automatically.", + "description": "Output only. The ID of the `BuildTrigger` that triggered this build, if it was triggered automatically.", + "readOnly": true, "type": "string" }, "createTime": { "description": "Output only. Time at which the request to create the build was received.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "finishTime": { - "description": "Output only. Time at which execution of the build was finished.\n\nThe difference between finish_time and start_time is the duration of the\nbuild's execution.", + "description": "Output only. Time at which execution of the build was finished. The difference between finish_time and start_time is the duration of the build's execution.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "id": { "description": "Output only. Unique identifier of the build.", + "readOnly": true, "type": "string" }, "images": { - "description": "A list of images to be pushed upon the successful completion of all build\nsteps.\n\nThe images are pushed using the builder service account's credentials.\n\nThe digests of the pushed images will be stored in the `Build` resource's\nresults field.\n\nIf any of the images fail to be pushed, the build status is marked\n`FAILURE`.", + "description": "A list of images to be pushed upon the successful completion of all build steps. The images are pushed using the builder service account's credentials. The digests of the pushed images will be stored in the `Build` resource's results field. If any of the images fail to be pushed, the build status is marked `FAILURE`.", "items": { "type": "string" }, @@ -670,10 +884,16 @@ }, "logUrl": { "description": "Output only. URL to logs for this build in Google Cloud Console.", + "readOnly": true, "type": "string" }, "logsBucket": { - "description": "Google Cloud Storage bucket where logs should be written (see\n[Bucket Name\nRequirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)).\nLogs file names will be of the format `${logs_bucket}/log-${build_id}.txt`.", + "description": "Google Cloud Storage bucket where logs should be written (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). Logs file names will be of the format `${logs_bucket}/log-${build_id}.txt`.", + "type": "string" + }, + "name": { + "description": "Output only. The 'Build' name with format: `projects/{project}/locations/{location}/builds/{build}`, where {build} is a unique identifier generated by the service.", + "readOnly": true, "type": "string" }, "options": { @@ -682,16 +902,18 @@ }, "projectId": { "description": "Output only. ID of the project.", + "readOnly": true, "type": "string" }, "queueTtl": { - "description": "TTL in queue for this build. If provided and the build is enqueued longer\nthan this value, the build will expire and the build status will be\n`EXPIRED`.\n\nThe TTL starts ticking from create_time.", + "description": "TTL in queue for this build. If provided and the build is enqueued longer than this value, the build will expire and the build status will be `EXPIRED`. The TTL starts ticking from create_time.", "format": "google-duration", "type": "string" }, "results": { "$ref": "Results", - "description": "Output only. Results of the build." + "description": "Output only. Results of the build.", + "readOnly": true }, "secrets": { "description": "Secrets to decrypt using Cloud Key Management Service.", @@ -700,17 +922,23 @@ }, "type": "array" }, + "serviceAccount": { + "description": "IAM service account whose credentials will be used at build runtime. Must be of the format `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. ACCOUNT can be email address or uniqueId of the service account. This field is in alpha and is not publicly available.", + "type": "string" + }, "source": { "$ref": "Source", "description": "The location of the source files to build." }, "sourceProvenance": { "$ref": "SourceProvenance", - "description": "Output only. A permanent fixed identifier for source." + "description": "Output only. A permanent fixed identifier for source.", + "readOnly": true }, "startTime": { "description": "Output only. Time at which execution of the build was started.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "status": { @@ -737,10 +965,12 @@ "Build or step was canceled by a user.", "Build was enqueued for longer than the value of `queue_ttl`." ], + "readOnly": true, "type": "string" }, "statusDetail": { "description": "Output only. Customer-readable message about the current status.", + "readOnly": true, "type": "string" }, "steps": { @@ -765,7 +995,7 @@ "type": "array" }, "timeout": { - "description": "Amount of time that this build should be allowed to run, to second\ngranularity. If this amount of time elapses, work on the build will cease\nand the build status will be `TIMEOUT`.\n\nDefault time is ten minutes.", + "description": "Amount of time that this build should be allowed to run, to second granularity. If this amount of time elapses, work on the build will cease and the build status will be `TIMEOUT`. `timeout` starts ticking from `startTime`. Default time is ten minutes.", "format": "google-duration", "type": "string" }, @@ -773,7 +1003,8 @@ "additionalProperties": { "$ref": "TimeSpan" }, - "description": "Output only. Stores timing information for phases of the build. Valid keys\nare:\n\n* BUILD: time to execute all build steps\n* PUSH: time to push all specified images.\n* FETCHSOURCE: time to fetch source.\n\nIf the build does not specify source or images,\nthese keys will not be included.", + "description": "Output only. Stores timing information for phases of the build. Valid keys are: * BUILD: time to execute all build steps * PUSH: time to push all specified images. * FETCHSOURCE: time to fetch source. If the build does not specify source or images, these keys will not be included.", + "readOnly": true, "type": "object" } }, @@ -795,19 +1026,23 @@ "id": "BuildOptions", "properties": { "diskSizeGb": { - "description": "Requested disk size for the VM that runs the build. Note that this is *NOT*\n\"disk free\"; some of the space will be used by the operating system and\nbuild utilities. Also note that this is the minimum disk size that will be\nallocated for the build -- the build may run with a larger disk than\nrequested. At present, the maximum disk size is 1000GB; builds that request\nmore than the maximum are rejected with an error.", + "description": "Requested disk size for the VM that runs the build. Note that this is *NOT* \"disk free\"; some of the space will be used by the operating system and build utilities. Also note that this is the minimum disk size that will be allocated for the build -- the build may run with a larger disk than requested. At present, the maximum disk size is 1000GB; builds that request more than the maximum are rejected with an error.", "format": "int64", "type": "string" }, + "dynamicSubstitutions": { + "description": "Option to specify whether or not to apply bash style string operations to the substitutions. NOTE: this is always enabled for triggered builds and cannot be overridden in the build configuration file.", + "type": "boolean" + }, "env": { - "description": "A list of global environment variable definitions that will exist for all\nbuild steps in this build. If a variable is defined in both globally and in\na build step, the variable will use the build step value.\n\nThe elements are of the form \"KEY=VALUE\" for the environment variable \"KEY\"\nbeing given the value \"VALUE\".", + "description": "A list of global environment variable definitions that will exist for all build steps in this build. If a variable is defined in both globally and in a build step, the variable will use the build step value. The elements are of the form \"KEY=VALUE\" for the environment variable \"KEY\" being given the value \"VALUE\".", "items": { "type": "string" }, "type": "array" }, "logStreamingOption": { - "description": "Option to define build log streaming behavior to Google Cloud\nStorage.", + "description": "Option to define build log streaming behavior to Google Cloud Storage.", "enum": [ "STREAM_DEFAULT", "STREAM_ON", @@ -816,21 +1051,27 @@ "enumDescriptions": [ "Service may automatically determine build log streaming behavior.", "Build logs should be streamed to Google Cloud Storage.", - "Build logs should not be streamed to Google Cloud Storage; they will be\nwritten when the build is completed." + "Build logs should not be streamed to Google Cloud Storage; they will be written when the build is completed." ], "type": "string" }, "logging": { - "description": "Option to specify the logging mode, which determines where the logs are\nstored.", + "description": "Option to specify the logging mode, which determines if and where build logs are stored.", "enum": [ "LOGGING_UNSPECIFIED", "LEGACY", - "GCS_ONLY" + "GCS_ONLY", + "STACKDRIVER_ONLY", + "CLOUD_LOGGING_ONLY", + "NONE" ], "enumDescriptions": [ - "The service determines the logging mode. The default is `LEGACY`. Do not\nrely on the default logging behavior as it may change in the future.", - "Stackdriver logging and Cloud Storage logging are enabled.", - "Only Cloud Storage logging is enabled." + "The service determines the logging mode. The default is `LEGACY`. Do not rely on the default logging behavior as it may change in the future.", + "Cloud Logging and Cloud Storage logging are enabled.", + "Only Cloud Storage logging is enabled.", + "This option is the same as CLOUD_LOGGING_ONLY.", + "Only Cloud Logging is enabled. Note that logs for both the Cloud Console UI and Cloud SDK are based on Cloud Storage logs, so neither will provide logs if this option is chosen.", + "Turn off all logging. No build logs will be captured. Next ID: 6" ], "type": "string" }, @@ -861,7 +1102,7 @@ "type": "string" }, "secretEnv": { - "description": "A list of global environment variables, which are encrypted using a Cloud\nKey Management Service crypto key. These values must be specified in the\nbuild's `Secret`. These variables will be available to all build steps\nin this build.", + "description": "A list of global environment variables, which are encrypted using a Cloud Key Management Service crypto key. These values must be specified in the build's `Secret`. These variables will be available to all build steps in this build.", "items": { "type": "string" }, @@ -869,42 +1110,42 @@ }, "sourceProvenanceHash": { "description": "Requested hash for SourceProvenance.", - "enumDescriptions": [ - "No hash requested.", - "Use a sha256 hash.", - "Use a md5 hash." - ], "items": { "enum": [ "NONE", "SHA256", "MD5" ], + "enumDescriptions": [ + "No hash requested.", + "Use a sha256 hash.", + "Use a md5 hash." + ], "type": "string" }, "type": "array" }, "substitutionOption": { - "description": "Option to specify behavior when there is an error in the substitution\nchecks.", + "description": "Option to specify behavior when there is an error in the substitution checks. NOTE: this is always set to ALLOW_LOOSE for triggered builds and cannot be overridden in the build configuration file.", "enum": [ "MUST_MATCH", "ALLOW_LOOSE" ], "enumDescriptions": [ - "Fails the build if error in substitutions checks, like missing\na substitution in the template or in the map.", + "Fails the build if error in substitutions checks, like missing a substitution in the template or in the map.", "Do not fail the build if error in substitutions checks." ], "type": "string" }, "volumes": { - "description": "Global list of volumes to mount for ALL build steps\n\nEach volume is created as an empty volume prior to starting the build\nprocess. Upon completion of the build, volumes and their contents are\ndiscarded. Global volume names and paths cannot conflict with the volumes\ndefined a build step.\n\nUsing a global volume in a build with only one step is not valid as\nit is indicative of a build request with an incorrect configuration.", + "description": "Global list of volumes to mount for ALL build steps Each volume is created as an empty volume prior to starting the build process. Upon completion of the build, volumes and their contents are discarded. Global volume names and paths cannot conflict with the volumes defined a build step. Using a global volume in a build with only one step is not valid as it is indicative of a build request with an incorrect configuration.", "items": { "$ref": "Volume" }, "type": "array" }, "workerPool": { - "description": "Option to specify a `WorkerPool` for the build.\nFormat: projects/{project}/workerPools/{workerPool}\n\nThis field is experimental.", + "description": "Option to specify a `WorkerPool` for the build. Format: projects/{project}/locations/{location}/workerPools/{workerPool} This field is experimental.", "type": "string" } }, @@ -915,48 +1156,49 @@ "id": "BuildStep", "properties": { "args": { - "description": "A list of arguments that will be presented to the step when it is started.\n\nIf the image used to run the step's container has an entrypoint, the `args`\nare used as arguments to that entrypoint. If the image does not define\nan entrypoint, the first element in args is used as the entrypoint,\nand the remainder will be used as arguments.", + "description": "A list of arguments that will be presented to the step when it is started. If the image used to run the step's container has an entrypoint, the `args` are used as arguments to that entrypoint. If the image does not define an entrypoint, the first element in args is used as the entrypoint, and the remainder will be used as arguments.", "items": { "type": "string" }, "type": "array" }, "dir": { - "description": "Working directory to use when running this step's container.\n\nIf this value is a relative path, it is relative to the build's working\ndirectory. If this value is absolute, it may be outside the build's working\ndirectory, in which case the contents of the path may not be persisted\nacross build step executions, unless a `volume` for that path is specified.\n\nIf the build specifies a `RepoSource` with `dir` and a step with a `dir`,\nwhich specifies an absolute path, the `RepoSource` `dir` is ignored for\nthe step's execution.", + "description": "Working directory to use when running this step's container. If this value is a relative path, it is relative to the build's working directory. If this value is absolute, it may be outside the build's working directory, in which case the contents of the path may not be persisted across build step executions, unless a `volume` for that path is specified. If the build specifies a `RepoSource` with `dir` and a step with a `dir`, which specifies an absolute path, the `RepoSource` `dir` is ignored for the step's execution.", "type": "string" }, "entrypoint": { - "description": "Entrypoint to be used instead of the build step image's default entrypoint.\nIf unset, the image's default entrypoint is used.", + "description": "Entrypoint to be used instead of the build step image's default entrypoint. If unset, the image's default entrypoint is used.", "type": "string" }, "env": { - "description": "A list of environment variable definitions to be used when running a step.\n\nThe elements are of the form \"KEY=VALUE\" for the environment variable \"KEY\"\nbeing given the value \"VALUE\".", + "description": "A list of environment variable definitions to be used when running a step. The elements are of the form \"KEY=VALUE\" for the environment variable \"KEY\" being given the value \"VALUE\".", "items": { "type": "string" }, "type": "array" }, "id": { - "description": "Unique identifier for this build step, used in `wait_for` to\nreference this build step as a dependency.", + "description": "Unique identifier for this build step, used in `wait_for` to reference this build step as a dependency.", "type": "string" }, "name": { - "description": "Required. The name of the container image that will run this particular\nbuild step.\n\nIf the image is available in the host's Docker daemon's cache, it\nwill be run directly. If not, the host will attempt to pull the image\nfirst, using the builder service account's credentials if necessary.\n\nThe Docker daemon's cache will already have the latest versions of all of\nthe officially supported build steps\n([https://github.com/GoogleCloudPlatform/cloud-builders](https://github.com/GoogleCloudPlatform/cloud-builders)).\nThe Docker daemon will also have cached many of the layers for some popular\nimages, like \"ubuntu\", \"debian\", but they will be refreshed at the time you\nattempt to use them.\n\nIf you built an image in a previous build step, it will be stored in the\nhost's Docker daemon's cache and is available to use as the name for a\nlater build step.", + "description": "Required. The name of the container image that will run this particular build step. If the image is available in the host's Docker daemon's cache, it will be run directly. If not, the host will attempt to pull the image first, using the builder service account's credentials if necessary. The Docker daemon's cache will already have the latest versions of all of the officially supported build steps ([https://github.com/GoogleCloudPlatform/cloud-builders](https://github.com/GoogleCloudPlatform/cloud-builders)). The Docker daemon will also have cached many of the layers for some popular images, like \"ubuntu\", \"debian\", but they will be refreshed at the time you attempt to use them. If you built an image in a previous build step, it will be stored in the host's Docker daemon's cache and is available to use as the name for a later build step.", "type": "string" }, "pullTiming": { "$ref": "TimeSpan", - "description": "Output only. Stores timing information for pulling this build step's\nbuilder image only." + "description": "Output only. Stores timing information for pulling this build step's builder image only.", + "readOnly": true }, "secretEnv": { - "description": "A list of environment variables which are encrypted using a Cloud Key\nManagement Service crypto key. These values must be specified in the\nbuild's `Secret`.", + "description": "A list of environment variables which are encrypted using a Cloud Key Management Service crypto key. These values must be specified in the build's `Secret`.", "items": { "type": "string" }, "type": "array" }, "status": { - "description": "Output only. Status of the build step. At this time, build step status is\nonly updated on build completion; step status is not updated in real-time\nas the build progresses.", + "description": "Output only. Status of the build step. At this time, build step status is only updated on build completion; step status is not updated in real-time as the build progresses.", "enum": [ "STATUS_UNKNOWN", "QUEUED", @@ -979,26 +1221,28 @@ "Build or step was canceled by a user.", "Build was enqueued for longer than the value of `queue_ttl`." ], + "readOnly": true, "type": "string" }, "timeout": { - "description": "Time limit for executing this build step. If not defined, the step has no\ntime limit and will be allowed to continue to run until either it completes\nor the build itself times out.", + "description": "Time limit for executing this build step. If not defined, the step has no time limit and will be allowed to continue to run until either it completes or the build itself times out.", "format": "google-duration", "type": "string" }, "timing": { "$ref": "TimeSpan", - "description": "Output only. Stores timing information for executing this build step." + "description": "Output only. Stores timing information for executing this build step.", + "readOnly": true }, "volumes": { - "description": "List of volumes to mount into the build step.\n\nEach volume is created as an empty volume prior to execution of the\nbuild step. Upon completion of the build, volumes and their contents are\ndiscarded.\n\nUsing a named volume in only one step is not valid as it is indicative\nof a build request with an incorrect configuration.", + "description": "List of volumes to mount into the build step. Each volume is created as an empty volume prior to execution of the build step. Upon completion of the build, volumes and their contents are discarded. Using a named volume in only one step is not valid as it is indicative of a build request with an incorrect configuration.", "items": { "$ref": "Volume" }, "type": "array" }, "waitFor": { - "description": "The ID(s) of the step(s) that this build step depends on.\nThis build step will not start until all the build steps in `wait_for`\nhave completed successfully. If `wait_for` is empty, this build step will\nstart when all previous build steps in the `Build.Steps` list have\ncompleted successfully.", + "description": "The ID(s) of the step(s) that this build step depends on. This build step will not start until all the build steps in `wait_for` have completed successfully. If `wait_for` is empty, this build step will start when all previous build steps in the `Build.Steps` list have completed successfully.", "items": { "type": "string" }, @@ -1008,7 +1252,7 @@ "type": "object" }, "BuildTrigger": { - "description": "Configuration for an automated build in response to source repository\nchanges.", + "description": "Configuration for an automated build in response to source repository changes.", "id": "BuildTrigger", "properties": { "build": { @@ -1018,6 +1262,7 @@ "createTime": { "description": "Output only. Time when the trigger was created.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "description": { @@ -1025,44 +1270,45 @@ "type": "string" }, "disabled": { - "description": "If true, the trigger will never result in a build.", + "description": "If true, the trigger will never automatically execute a build.", "type": "boolean" }, "filename": { - "description": "Path, from the source root, to a file whose contents is used for the\ntemplate.", + "description": "Path, from the source root, to a file whose contents is used for the template.", "type": "string" }, "github": { "$ref": "GitHubEventsConfig", - "description": "GitHubEventsConfig describes the configuration of a trigger that creates\na build whenever a GitHub event is received.\n\nMutually exclusive with `trigger_template`." + "description": "GitHubEventsConfig describes the configuration of a trigger that creates a build whenever a GitHub event is received. Mutually exclusive with `trigger_template`." }, "id": { "description": "Output only. Unique identifier of the trigger.", + "readOnly": true, "type": "string" }, "ignoredFiles": { - "description": "ignored_files and included_files are file glob matches using\nhttps://golang.org/pkg/path/filepath/#Match extended with support for \"**\".\n\nIf ignored_files and changed files are both empty, then they are\nnot used to determine whether or not to trigger a build.\n\nIf ignored_files is not empty, then we ignore any files that match\nany of the ignored_file globs. If the change has no files that are\noutside of the ignored_files globs, then we do not trigger a build.", + "description": "ignored_files and included_files are file glob matches using https://golang.org/pkg/path/filepath/#Match extended with support for \"**\". If ignored_files and changed files are both empty, then they are not used to determine whether or not to trigger a build. If ignored_files is not empty, then we ignore any files that match any of the ignored_file globs. If the change has no files that are outside of the ignored_files globs, then we do not trigger a build.", "items": { "type": "string" }, "type": "array" }, "includedFiles": { - "description": "If any of the files altered in the commit pass the ignored_files\nfilter and included_files is empty, then as far as this filter is\nconcerned, we should trigger the build.\n\nIf any of the files altered in the commit pass the ignored_files\nfilter and included_files is not empty, then we make sure that at\nleast one of those files matches a included_files glob. If not,\nthen we do not trigger a build.", + "description": "If any of the files altered in the commit pass the ignored_files filter and included_files is empty, then as far as this filter is concerned, we should trigger the build. If any of the files altered in the commit pass the ignored_files filter and included_files is not empty, then we make sure that at least one of those files matches a included_files glob. If not, then we do not trigger a build.", "items": { "type": "string" }, "type": "array" }, "name": { - "description": "User-assigned name of the trigger. Must be unique within the project.\nTrigger names must meet the following requirements:\n\n+ They must contain only alphanumeric characters and dashes.\n+ They can be 1-64 characters long.\n+ They must begin and end with an alphanumeric character.", + "description": "User-assigned name of the trigger. Must be unique within the project. Trigger names must meet the following requirements: + They must contain only alphanumeric characters and dashes. + They can be 1-64 characters long. + They must begin and end with an alphanumeric character.", "type": "string" }, "substitutions": { "additionalProperties": { "type": "string" }, - "description": "Substitutions for Build resource. The keys must match the following\nregular expression: `^_[A-Z0-9_]+$`.The keys cannot conflict with the\nkeys in bindings.", + "description": "Substitutions for Build resource. The keys must match the following regular expression: `^_[A-Z0-9_]+$`.", "type": "object" }, "tags": { @@ -1074,7 +1320,7 @@ }, "triggerTemplate": { "$ref": "RepoSource", - "description": "Template describing the types of source changes to trigger a build.\n\nBranch and tag names in trigger templates are interpreted as regular\nexpressions. Any branch or tag change that matches that regular expression\nwill trigger a build.\n\nMutually exclusive with `github`." + "description": "Template describing the types of source changes to trigger a build. Branch and tag names in trigger templates are interpreted as regular expressions. Any branch or tag change that matches that regular expression will trigger a build. Mutually exclusive with `github`." } }, "type": "object" @@ -1088,12 +1334,13 @@ "type": "string" }, "name": { - "description": "Name used to push the container image to Google Container Registry, as\npresented to `docker push`.", + "description": "Name used to push the container image to Google Container Registry, as presented to `docker push`.", "type": "string" }, "pushTiming": { "$ref": "TimeSpan", - "description": "Output only. Stores timing information for pushing the specified image." + "description": "Output only. Stores timing information for pushing the specified image.", + "readOnly": true } }, "type": "object" @@ -1101,7 +1348,20 @@ "CancelBuildRequest": { "description": "Request to cancel an ongoing build.", "id": "CancelBuildRequest", - "properties": {}, + "properties": { + "id": { + "description": "Required. ID of the build.", + "type": "string" + }, + "name": { + "description": "The name of the `Build` to retrieve. Format: `projects/{project}/locations/{location}/builds/{build}`", + "type": "string" + }, + "projectId": { + "description": "Required. ID of the project.", + "type": "string" + } + }, "type": "object" }, "CancelOperationRequest": { @@ -1111,13 +1371,13 @@ "type": "object" }, "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", "properties": {}, "type": "object" }, "FileHashes": { - "description": "Container message for hashes of byte content of files, used in\nSourceProvenance messages to verify integrity of source input to the build.", + "description": "Container message for hashes of byte content of files, used in SourceProvenance messages to verify integrity of source input to the build.", "id": "FileHashes", "properties": { "fileHash": { @@ -1131,7 +1391,7 @@ "type": "object" }, "GitHubEventsConfig": { - "description": "GitHubEventsConfig describes the configuration of a trigger that creates a\nbuild whenever a GitHub event is received.\n\nThis message is experimental.", + "description": "GitHubEventsConfig describes the configuration of a trigger that creates a build whenever a GitHub event is received. This message is experimental.", "id": "GitHubEventsConfig", "properties": { "installationId": { @@ -1140,11 +1400,11 @@ "type": "string" }, "name": { - "description": "Name of the repository. For example: The name for\nhttps://github.com/googlecloudplatform/cloud-builders is \"cloud-builders\".", + "description": "Name of the repository. For example: The name for https://github.com/googlecloudplatform/cloud-builders is \"cloud-builders\".", "type": "string" }, "owner": { - "description": "Owner of the repository. For example: The owner for\nhttps://github.com/googlecloudplatform/cloud-builders is\n\"googlecloudplatform\".", + "description": "Owner of the repository. For example: The owner for https://github.com/googlecloudplatform/cloud-builders is \"googlecloudplatform\".", "type": "string" }, "pullRequest": { @@ -1158,6 +1418,17 @@ }, "type": "object" }, + "HTTPDelivery": { + "description": "HTTPDelivery is the delivery configuration for an HTTP notification.", + "id": "HTTPDelivery", + "properties": { + "uri": { + "description": "The URI to which JSON-containing HTTP POST requests should be sent.", + "type": "string" + } + }, + "type": "object" + }, "Hash": { "description": "Container message for hash values.", "id": "Hash", @@ -1220,18 +1491,113 @@ }, "type": "object" }, - "ListOperationsResponse": { - "description": "The response message for Operations.ListOperations.", - "id": "ListOperationsResponse", + "Notification": { + "description": "Notification is the container which holds the data that is relevant to this particular notification.", + "id": "Notification", "properties": { - "nextPageToken": { - "description": "The standard List next-page token.", + "filter": { + "description": "The filter string to use for notification filtering. Currently, this is assumed to be a CEL program. See https://opensource.google/projects/cel for more.", "type": "string" }, - "operations": { - "description": "A list of operations that matches the specified filter in the request.", + "httpDelivery": { + "$ref": "HTTPDelivery", + "description": "Configuration for HTTP delivery." + }, + "slackDelivery": { + "$ref": "SlackDelivery", + "description": "Configuration for Slack delivery." + }, + "smtpDelivery": { + "$ref": "SMTPDelivery", + "description": "Configuration for SMTP (email) delivery." + }, + "structDelivery": { + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "Escape hatch for users to supply custom delivery configs.", + "type": "object" + } + }, + "type": "object" + }, + "NotifierConfig": { + "description": "NotifierConfig is the top-level configuration message.", + "id": "NotifierConfig", + "properties": { + "apiVersion": { + "description": "The API version of this configuration format.", + "type": "string" + }, + "kind": { + "description": "The type of notifier to use (e.g. SMTPNotifier).", + "type": "string" + }, + "metadata": { + "$ref": "NotifierMetadata", + "description": "Metadata for referring to/handling/deploying this notifier." + }, + "spec": { + "$ref": "NotifierSpec", + "description": "The actual configuration for this notifier." + } + }, + "type": "object" + }, + "NotifierMetadata": { + "description": "NotifierMetadata contains the data which can be used to reference or describe this notifier.", + "id": "NotifierMetadata", + "properties": { + "name": { + "description": "The human-readable and user-given name for the notifier. For example: \"repo-merge-email-notifier\".", + "type": "string" + }, + "notifier": { + "description": "The string representing the name and version of notifier to deploy. Expected to be of the form of \"/:\". For example: \"gcr.io/my-project/notifiers/smtp:1.2.34\".", + "type": "string" + } + }, + "type": "object" + }, + "NotifierSecret": { + "description": "NotifierSecret is the container that maps a secret name (reference) to its Google Cloud Secret Manager resource path.", + "id": "NotifierSecret", + "properties": { + "name": { + "description": "Name is the local name of the secret, such as the verbatim string \"my-smtp-password\".", + "type": "string" + }, + "value": { + "description": "Value is interpreted to be a resource path for fetching the actual (versioned) secret data for this secret. For example, this would be a Google Cloud Secret Manager secret version resource path like: \"projects/my-project/secrets/my-secret/versions/latest\".", + "type": "string" + } + }, + "type": "object" + }, + "NotifierSecretRef": { + "description": "NotifierSecretRef contains the reference to a secret stored in the corresponding NotifierSpec.", + "id": "NotifierSecretRef", + "properties": { + "secretRef": { + "description": "The value of `secret_ref` should be a `name` that is registered in a `Secret` in the `secrets` list of the `Spec`.", + "type": "string" + } + }, + "type": "object" + }, + "NotifierSpec": { + "description": "NotifierSpec is the configuration container for notifications.", + "id": "NotifierSpec", + "properties": { + "notification": { + "$ref": "Notification", + "description": "The configuration of this particular notifier." + }, + "secrets": { + "description": "Configurations for secret resources used by this particular notifier.", "items": { - "$ref": "Operation" + "$ref": "NotifierSecret" }, "type": "array" } @@ -1239,11 +1605,11 @@ "type": "object" }, "Operation": { - "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "description": "This resource represents a long-running operation that is the result of a network API call.", "id": "Operation", "properties": { "done": { - "description": "If the value is `false`, it means the operation is still in progress.\nIf `true`, the operation is completed, and either `error` or `response` is\navailable.", + "description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.", "type": "boolean" }, "error": { @@ -1255,11 +1621,11 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", "type": "object" }, "name": { - "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should be a resource name ending with `operations/{unique_id}`.", + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.", "type": "string" }, "response": { @@ -1267,29 +1633,31 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", + "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", "type": "object" } }, "type": "object" }, "PullRequestFilter": { - "description": "PullRequestFilter contains filter properties for matching GitHub Pull\nRequests.", + "description": "PullRequestFilter contains filter properties for matching GitHub Pull Requests.", "id": "PullRequestFilter", "properties": { "branch": { - "description": "Regex of branches to match.\n\nThe syntax of the regular expressions accepted is the syntax accepted by\nRE2 and described at https://github.com/google/re2/wiki/Syntax", + "description": "Regex of branches to match. The syntax of the regular expressions accepted is the syntax accepted by RE2 and described at https://github.com/google/re2/wiki/Syntax", "type": "string" }, "commentControl": { - "description": "Configure builds to run only when a repository owner or collaborator\ncomments `/gcbrun`.", + "description": "Configure builds to run whether a repository owner or collaborator need to comment `/gcbrun`.", "enum": [ "COMMENTS_DISABLED", - "COMMENTS_ENABLED" + "COMMENTS_ENABLED", + "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY" ], "enumDescriptions": [ "Do not require comments on Pull Requests before builds are triggered.", - "Enforce that repository owners or collaborators must comment on Pull\nRequests before builds are triggered." + "Enforce that repository owners or collaborators must comment on Pull Requests before builds are triggered.", + "Enforce that repository owners or collaborators must comment on external contributors' Pull Requests before builds are triggered." ], "type": "string" }, @@ -1305,15 +1673,15 @@ "id": "PushFilter", "properties": { "branch": { - "description": "Regexes matching branches to build.\n\nThe syntax of the regular expressions accepted is the syntax accepted by\nRE2 and described at https://github.com/google/re2/wiki/Syntax", + "description": "Regexes matching branches to build. The syntax of the regular expressions accepted is the syntax accepted by RE2 and described at https://github.com/google/re2/wiki/Syntax", "type": "string" }, "invertRegex": { - "description": "When true, only trigger a build if the revision regex does NOT match the\ngit_ref regex.", + "description": "When true, only trigger a build if the revision regex does NOT match the git_ref regex.", "type": "boolean" }, "tag": { - "description": "Regexes matching tags to build.\n\nThe syntax of the regular expressions accepted is the syntax accepted by\nRE2 and described at https://github.com/google/re2/wiki/Syntax", + "description": "Regexes matching tags to build. The syntax of the regular expressions accepted is the syntax accepted by RE2 and described at https://github.com/google/re2/wiki/Syntax", "type": "string" } }, @@ -1324,7 +1692,7 @@ "id": "RepoSource", "properties": { "branchName": { - "description": "Regex matching branches to build.\n\nThe syntax of the regular expressions accepted is the syntax accepted by\nRE2 and described at https://github.com/google/re2/wiki/Syntax", + "description": "Regex matching branches to build. The syntax of the regular expressions accepted is the syntax accepted by RE2 and described at https://github.com/google/re2/wiki/Syntax", "type": "string" }, "commitSha": { @@ -1332,15 +1700,15 @@ "type": "string" }, "dir": { - "description": "Directory, relative to the source root, in which to run the build.\n\nThis must be a relative path. If a step's `dir` is specified and is an\nabsolute path, this value is ignored for that step's execution.", + "description": "Directory, relative to the source root, in which to run the build. This must be a relative path. If a step's `dir` is specified and is an absolute path, this value is ignored for that step's execution.", "type": "string" }, "invertRegex": { - "description": "Only trigger a build if the revision regex does NOT match the revision\nregex.", + "description": "Only trigger a build if the revision regex does NOT match the revision regex.", "type": "boolean" }, "projectId": { - "description": "ID of the project that owns the Cloud Source Repository. If omitted, the\nproject ID requesting the build is assumed.", + "description": "ID of the project that owns the Cloud Source Repository. If omitted, the project ID requesting the build is assumed.", "type": "string" }, "repoName": { @@ -1351,11 +1719,11 @@ "additionalProperties": { "type": "string" }, - "description": "Substitutions to use in a triggered build.\nShould only be used with RunBuildTrigger", + "description": "Substitutions to use in a triggered build. Should only be used with RunBuildTrigger", "type": "object" }, "tagName": { - "description": "Regex matching tags to build.\n\nThe syntax of the regular expressions accepted is the syntax accepted by\nRE2 and described at https://github.com/google/re2/wiki/Syntax", + "description": "Regex matching tags to build. The syntax of the regular expressions accepted is the syntax accepted by RE2 and described at https://github.com/google/re2/wiki/Syntax", "type": "string" } }, @@ -1374,14 +1742,14 @@ "description": "Time to push all non-container artifacts." }, "buildStepImages": { - "description": "List of build step digests, in the order corresponding to build step\nindices.", + "description": "List of build step digests, in the order corresponding to build step indices.", "items": { "type": "string" }, "type": "array" }, "buildStepOutputs": { - "description": "List of build step outputs, produced by builder images, in the order\ncorresponding to build step indices.\n\n[Cloud Builders](https://cloud.google.com/cloud-build/docs/cloud-builders)\ncan produce this output by writing to `$BUILDER_OUTPUT/output`.\nOnly the first 4KB of data is stored.", + "description": "List of build step outputs, produced by builder images, in the order corresponding to build step indices. [Cloud Builders](https://cloud.google.com/cloud-build/docs/cloud-builders) can produce this output by writing to `$BUILDER_OUTPUT/output`. Only the first 4KB of data is stored.", "items": { "format": "byte", "type": "string" @@ -1406,11 +1774,58 @@ "RetryBuildRequest": { "description": "Specifies a build to retry.", "id": "RetryBuildRequest", - "properties": {}, + "properties": { + "id": { + "description": "Required. Build ID of the original build.", + "type": "string" + }, + "name": { + "description": "The name of the `Build` to retry. Format: `projects/{project}/locations/{location}/builds/{build}`", + "type": "string" + }, + "projectId": { + "description": "Required. ID of the project.", + "type": "string" + } + }, + "type": "object" + }, + "SMTPDelivery": { + "description": "SMTPDelivery is the delivery configuration for an SMTP (email) notification.", + "id": "SMTPDelivery", + "properties": { + "fromAddress": { + "description": "This is the SMTP account/email that appears in the `From:` of the email. If empty, it is assumed to be sender.", + "type": "string" + }, + "password": { + "$ref": "NotifierSecretRef", + "description": "The SMTP sender's password." + }, + "port": { + "description": "The SMTP port of the server.", + "type": "string" + }, + "recipientAddresses": { + "description": "This is the list of addresses to which we send the email (i.e. in the `To:` of the email).", + "items": { + "type": "string" + }, + "type": "array" + }, + "senderAddress": { + "description": "This is the SMTP account/email that is used to send the message.", + "type": "string" + }, + "server": { + "description": "The address of the SMTP server.", + "type": "string" + } + }, "type": "object" }, "Secret": { - "description": "Pairs a set of secret environment variables containing encrypted\nvalues with the Cloud KMS key to use to decrypt the value.", + "description": "Pairs a set of secret environment variables containing encrypted values with the Cloud KMS key to use to decrypt the value.", "id": "Secret", "properties": { "kmsKeyName": { @@ -1422,19 +1837,30 @@ "format": "byte", "type": "string" }, - "description": "Map of environment variable name to its encrypted value.\n\nSecret environment variables must be unique across all of a build's\nsecrets, and must be used by at least one build step. Values can be at most\n64 KB in size. There can be at most 100 secret values across all of a\nbuild's secrets.", + "description": "Map of environment variable name to its encrypted value. Secret environment variables must be unique across all of a build's secrets, and must be used by at least one build step. Values can be at most 64 KB in size. There can be at most 100 secret values across all of a build's secrets.", "type": "object" } }, "type": "object" }, + "SlackDelivery": { + "description": "SlackDelivery is the delivery configuration for delivering Slack messages via webhooks. See Slack webhook documentation at: https://api.slack.com/messaging/webhooks.", + "id": "SlackDelivery", + "properties": { + "webhookUri": { + "$ref": "NotifierSecretRef", + "description": "The secret reference for the Slack webhook URI for sending messages to a channel." + } + }, + "type": "object" + }, "Source": { "description": "Location of the source in a supported storage service.", "id": "Source", "properties": { "repoSource": { "$ref": "RepoSource", - "description": "If provided, get the source from this location in a Cloud Source\nRepository." + "description": "If provided, get the source from this location in a Cloud Source Repository." }, "storageSource": { "$ref": "StorageSource", @@ -1444,29 +1870,30 @@ "type": "object" }, "SourceProvenance": { - "description": "Provenance of the source. Ways to find the original source, or verify that\nsome source was used for this build.", + "description": "Provenance of the source. Ways to find the original source, or verify that some source was used for this build.", "id": "SourceProvenance", "properties": { "fileHashes": { "additionalProperties": { "$ref": "FileHashes" }, - "description": "Output only. Hash(es) of the build source, which can be used to verify that\nthe original source integrity was maintained in the build. Note that\n`FileHashes` will only be populated if `BuildOptions` has requested a\n`SourceProvenanceHash`.\n\nThe keys to this map are file paths used as build source and the values\ncontain the hash values for those files.\n\nIf the build source came in a single package such as a gzipped tarfile\n(`.tar.gz`), the `FileHash` will be for the single path to that file.", + "description": "Output only. Hash(es) of the build source, which can be used to verify that the original source integrity was maintained in the build. Note that `FileHashes` will only be populated if `BuildOptions` has requested a `SourceProvenanceHash`. The keys to this map are file paths used as build source and the values contain the hash values for those files. If the build source came in a single package such as a gzipped tarfile (`.tar.gz`), the `FileHash` will be for the single path to that file.", + "readOnly": true, "type": "object" }, "resolvedRepoSource": { "$ref": "RepoSource", - "description": "A copy of the build's `source.repo_source`, if exists, with any\nrevisions resolved." + "description": "A copy of the build's `source.repo_source`, if exists, with any revisions resolved." }, "resolvedStorageSource": { "$ref": "StorageSource", - "description": "A copy of the build's `source.storage_source`, if exists, with any\ngenerations resolved." + "description": "A copy of the build's `source.storage_source`, if exists, with any generations resolved." } }, "type": "object" }, "Status": { - "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors).", + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", "id": "Status", "properties": { "code": { @@ -1475,7 +1902,7 @@ "type": "integer" }, "details": { - "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use.", + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", "items": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -1486,7 +1913,7 @@ "type": "array" }, "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", "type": "string" } }, @@ -1497,16 +1924,16 @@ "id": "StorageSource", "properties": { "bucket": { - "description": "Google Cloud Storage bucket containing the source (see\n[Bucket Name\nRequirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)).", + "description": "Google Cloud Storage bucket containing the source (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)).", "type": "string" }, "generation": { - "description": "Google Cloud Storage generation for the object. If the generation is\nomitted, the latest generation will be used.", + "description": "Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used.", "format": "int64", "type": "string" }, "object": { - "description": "Google Cloud Storage object containing the source.\n\nThis object must be a gzipped archive file (`.tar.gz`) containing source to\nbuild.", + "description": "Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build.", "type": "string" } }, @@ -1530,15 +1957,15 @@ "type": "object" }, "Volume": { - "description": "Volume describes a Docker container volume which is mounted into build steps\nin order to persist files across build step execution.", + "description": "Volume describes a Docker container volume which is mounted into build steps in order to persist files across build step execution.", "id": "Volume", "properties": { "name": { - "description": "Name of the volume to mount.\n\nVolume names must be unique per build step and must be valid names for\nDocker volumes. Each named volume must be used by at least two build steps.", + "description": "Name of the volume to mount. Volume names must be unique per build step and must be valid names for Docker volumes. Each named volume must be used by at least two build steps.", "type": "string" }, "path": { - "description": "Path at which to mount the volume.\n\nPaths must be absolute and cannot conflict with other volume paths on the\nsame build step or with certain reserved volume paths.", + "description": "Path at which to mount the volume. Paths must be absolute and cannot conflict with other volume paths on the same build step or with certain reserved volume paths.", "type": "string" } }, diff --git a/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-gen.go b/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-gen.go index 3c645f02556..82fa3e825a4 100644 --- a/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-gen.go +++ b/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-gen.go @@ -75,6 +75,7 @@ const apiId = "cloudbuild:v1" const apiName = "cloudbuild" const apiVersion = "v1" const basePath = "https://cloudbuild.googleapis.com/" +const mtlsBasePath = "https://cloudbuild.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -90,6 +91,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -148,6 +150,7 @@ type OperationsService struct { func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} rs.Builds = NewProjectsBuildsService(s) + rs.Locations = NewProjectsLocationsService(s) rs.Triggers = NewProjectsTriggersService(s) return rs } @@ -157,6 +160,8 @@ type ProjectsService struct { Builds *ProjectsBuildsService + Locations *ProjectsLocationsService + Triggers *ProjectsTriggersService } @@ -169,6 +174,39 @@ type ProjectsBuildsService struct { s *Service } +func NewProjectsLocationsService(s *Service) *ProjectsLocationsService { + rs := &ProjectsLocationsService{s: s} + rs.Builds = NewProjectsLocationsBuildsService(s) + rs.Operations = NewProjectsLocationsOperationsService(s) + return rs +} + +type ProjectsLocationsService struct { + s *Service + + Builds *ProjectsLocationsBuildsService + + Operations *ProjectsLocationsOperationsService +} + +func NewProjectsLocationsBuildsService(s *Service) *ProjectsLocationsBuildsService { + rs := &ProjectsLocationsBuildsService{s: s} + return rs +} + +type ProjectsLocationsBuildsService struct { + s *Service +} + +func NewProjectsLocationsOperationsService(s *Service) *ProjectsLocationsOperationsService { + rs := &ProjectsLocationsOperationsService{s: s} + return rs +} + +type ProjectsLocationsOperationsService struct { + s *Service +} + func NewProjectsTriggersService(s *Service) *ProjectsTriggersService { rs := &ProjectsTriggersService{s: s} return rs @@ -179,19 +217,13 @@ type ProjectsTriggersService struct { } // ArtifactObjects: Files in the workspace to upload to Cloud Storage -// upon successful -// completion of all build steps. +// upon successful completion of all build steps. type ArtifactObjects struct { - // Location: Cloud Storage bucket and optional object path, in the - // form - // "gs://bucket/path/to/somewhere/". (see [Bucket - // Name - // Requirements](https://cloud.google.com/storage/docs/bucket-naming - // #requirements)). - // - // Files in the workspace matching any path pattern will be uploaded - // to - // Cloud Storage with this location as a prefix. + // Location: Cloud Storage bucket and optional object path, in the form + // "gs://bucket/path/to/somewhere/". (see [Bucket Name + // Requirements](https://cloud.google.com/storage/docs/bucket-naming#requ + // irements)). Files in the workspace matching any path pattern will be + // uploaded to Cloud Storage with this location as a prefix. Location string `json:"location,omitempty"` // Paths: Path globs used to match files in the build's workspace. @@ -224,16 +256,14 @@ func (s *ArtifactObjects) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ArtifactResult: An artifact that was uploaded during a build. This -// is a single record in the artifact manifest JSON file. +// ArtifactResult: An artifact that was uploaded during a build. This is +// a single record in the artifact manifest JSON file. type ArtifactResult struct { // FileHash: The file hash of the artifact. FileHash []*FileHashes `json:"fileHash,omitempty"` // Location: The path of an artifact in a Google Cloud Storage bucket, - // with the - // generation number. For - // example, + // with the generation number. For example, // `gs://mybucket/path/to/output.jar#generation`. Location string `json:"location,omitempty"` @@ -260,39 +290,23 @@ func (s *ArtifactResult) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Artifacts: Artifacts produced by a build that should be uploaded -// upon +// Artifacts: Artifacts produced by a build that should be uploaded upon // successful completion of all build steps. type Artifacts struct { // Images: A list of images to be pushed upon the successful completion - // of all build - // steps. - // - // The images will be pushed using the builder service account's - // credentials. - // - // The digests of the pushed images will be stored in the Build - // resource's - // results field. - // - // If any of the images fail to be pushed, the build is marked FAILURE. + // of all build steps. The images will be pushed using the builder + // service account's credentials. The digests of the pushed images will + // be stored in the Build resource's results field. If any of the images + // fail to be pushed, the build is marked FAILURE. Images []string `json:"images,omitempty"` // Objects: A list of objects to be uploaded to Cloud Storage upon - // successful - // completion of all build steps. - // - // Files in the workspace matching specified paths globs will be - // uploaded to - // the specified Cloud Storage location using the builder service - // account's - // credentials. - // - // The location and generation of the uploaded objects will be stored in - // the - // Build resource's results field. - // - // If any objects fail to be pushed, the build is marked FAILURE. + // successful completion of all build steps. Files in the workspace + // matching specified paths globs will be uploaded to the specified + // Cloud Storage location using the builder service account's + // credentials. The location and generation of the uploaded objects will + // be stored in the Build resource's results field. If any objects fail + // to be pushed, the build is marked FAILURE. Objects *ArtifactObjects `json:"objects,omitempty"` // ForceSendFields is a list of field names (e.g. "Images") to @@ -318,36 +332,25 @@ func (s *Artifacts) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Build: A build resource in the Cloud Build API. -// -// At a high level, a `Build` describes where to find source code, how -// to build -// it (for example, the builder image to run on the source), and where -// to store -// the built artifacts. -// -// Fields can include the following variables, which will be expanded -// when the -// build is created: -// -// - $PROJECT_ID: the project ID of the build. -// - $BUILD_ID: the autogenerated ID of the build. -// - $REPO_NAME: the source repository name specified by RepoSource. -// - $BRANCH_NAME: the branch name specified by RepoSource. -// - $TAG_NAME: the tag name specified by RepoSource. -// - $REVISION_ID or $COMMIT_SHA: the commit SHA specified by RepoSource -// or -// resolved from the specified branch or tag. -// - $SHORT_SHA: first 7 characters of $REVISION_ID or $COMMIT_SHA. +// Build: A build resource in the Cloud Build API. At a high level, a +// `Build` describes where to find source code, how to build it (for +// example, the builder image to run on the source), and where to store +// the built artifacts. Fields can include the following variables, +// which will be expanded when the build is created: - $PROJECT_ID: the +// project ID of the build. - $BUILD_ID: the autogenerated ID of the +// build. - $REPO_NAME: the source repository name specified by +// RepoSource. - $BRANCH_NAME: the branch name specified by RepoSource. +// - $TAG_NAME: the tag name specified by RepoSource. - $REVISION_ID or +// $COMMIT_SHA: the commit SHA specified by RepoSource or resolved from +// the specified branch or tag. - $SHORT_SHA: first 7 characters of +// $REVISION_ID or $COMMIT_SHA. type Build struct { // Artifacts: Artifacts produced by the build that should be uploaded - // upon - // successful completion of all build steps. + // upon successful completion of all build steps. Artifacts *Artifacts `json:"artifacts,omitempty"` // BuildTriggerId: Output only. The ID of the `BuildTrigger` that - // triggered this build, if it - // was triggered automatically. + // triggered this build, if it was triggered automatically. BuildTriggerId string `json:"buildTriggerId,omitempty"` // CreateTime: Output only. Time at which the request to create the @@ -355,30 +358,18 @@ type Build struct { CreateTime string `json:"createTime,omitempty"` // FinishTime: Output only. Time at which execution of the build was - // finished. - // - // The difference between finish_time and start_time is the duration of - // the - // build's execution. + // finished. The difference between finish_time and start_time is the + // duration of the build's execution. FinishTime string `json:"finishTime,omitempty"` // Id: Output only. Unique identifier of the build. Id string `json:"id,omitempty"` // Images: A list of images to be pushed upon the successful completion - // of all build - // steps. - // - // The images are pushed using the builder service account's - // credentials. - // - // The digests of the pushed images will be stored in the `Build` - // resource's - // results field. - // - // If any of the images fail to be pushed, the build status is - // marked - // `FAILURE`. + // of all build steps. The images are pushed using the builder service + // account's credentials. The digests of the pushed images will be + // stored in the `Build` resource's results field. If any of the images + // fail to be pushed, the build status is marked `FAILURE`. Images []string `json:"images,omitempty"` // LogUrl: Output only. URL to logs for this build in Google Cloud @@ -386,15 +377,17 @@ type Build struct { LogUrl string `json:"logUrl,omitempty"` // LogsBucket: Google Cloud Storage bucket where logs should be written - // (see - // [Bucket - // Name - // Requirements](https://cloud.google.com/storage/docs/bucket-naming - // #requirements)). - // Logs file names will be of the format + // (see [Bucket Name + // Requirements](https://cloud.google.com/storage/docs/bucket-naming#requ + // irements)). Logs file names will be of the format // `${logs_bucket}/log-${build_id}.txt`. LogsBucket string `json:"logsBucket,omitempty"` + // Name: Output only. The 'Build' name with format: + // `projects/{project}/locations/{location}/builds/{build}`, where + // {build} is a unique identifier generated by the service. + Name string `json:"name,omitempty"` + // Options: Special options for this build. Options *BuildOptions `json:"options,omitempty"` @@ -402,12 +395,8 @@ type Build struct { ProjectId string `json:"projectId,omitempty"` // QueueTtl: TTL in queue for this build. If provided and the build is - // enqueued longer - // than this value, the build will expire and the build status will - // be - // `EXPIRED`. - // - // The TTL starts ticking from create_time. + // enqueued longer than this value, the build will expire and the build + // status will be `EXPIRED`. The TTL starts ticking from create_time. QueueTtl string `json:"queueTtl,omitempty"` // Results: Output only. Results of the build. @@ -416,6 +405,13 @@ type Build struct { // Secrets: Secrets to decrypt using Cloud Key Management Service. Secrets []*Secret `json:"secrets,omitempty"` + // ServiceAccount: IAM service account whose credentials will be used at + // build runtime. Must be of the format + // `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. ACCOUNT can be + // email address or uniqueId of the service account. This field is in + // alpha and is not publicly available. + ServiceAccount string `json:"serviceAccount,omitempty"` + // Source: The location of the source files to build. Source *Source `json:"source,omitempty"` @@ -456,24 +452,16 @@ type Build struct { Tags []string `json:"tags,omitempty"` // Timeout: Amount of time that this build should be allowed to run, to - // second - // granularity. If this amount of time elapses, work on the build will - // cease - // and the build status will be `TIMEOUT`. - // - // Default time is ten minutes. + // second granularity. If this amount of time elapses, work on the build + // will cease and the build status will be `TIMEOUT`. `timeout` starts + // ticking from `startTime`. Default time is ten minutes. Timeout string `json:"timeout,omitempty"` // Timing: Output only. Stores timing information for phases of the - // build. Valid keys - // are: - // - // * BUILD: time to execute all build steps - // * PUSH: time to push all specified images. - // * FETCHSOURCE: time to fetch source. - // - // If the build does not specify source or images, - // these keys will not be included. + // build. Valid keys are: * BUILD: time to execute all build steps * + // PUSH: time to push all specified images. * FETCHSOURCE: time to fetch + // source. If the build does not specify source or images, these keys + // will not be included. Timing map[string]TimeSpan `json:"timing,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -535,32 +523,29 @@ func (s *BuildOperationMetadata) MarshalJSON() ([]byte, error) { // builds. type BuildOptions struct { // DiskSizeGb: Requested disk size for the VM that runs the build. Note - // that this is *NOT* - // "disk free"; some of the space will be used by the operating system - // and - // build utilities. Also note that this is the minimum disk size that - // will be - // allocated for the build -- the build may run with a larger disk - // than - // requested. At present, the maximum disk size is 1000GB; builds that - // request - // more than the maximum are rejected with an error. + // that this is *NOT* "disk free"; some of the space will be used by the + // operating system and build utilities. Also note that this is the + // minimum disk size that will be allocated for the build -- the build + // may run with a larger disk than requested. At present, the maximum + // disk size is 1000GB; builds that request more than the maximum are + // rejected with an error. DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` + // DynamicSubstitutions: Option to specify whether or not to apply bash + // style string operations to the substitutions. NOTE: this is always + // enabled for triggered builds and cannot be overridden in the build + // configuration file. + DynamicSubstitutions bool `json:"dynamicSubstitutions,omitempty"` + // Env: A list of global environment variable definitions that will - // exist for all - // build steps in this build. If a variable is defined in both globally - // and in - // a build step, the variable will use the build step value. - // - // The elements are of the form "KEY=VALUE" for the environment variable - // "KEY" - // being given the value "VALUE". + // exist for all build steps in this build. If a variable is defined in + // both globally and in a build step, the variable will use the build + // step value. The elements are of the form "KEY=VALUE" for the + // environment variable "KEY" being given the value "VALUE". Env []string `json:"env,omitempty"` // LogStreamingOption: Option to define build log streaming behavior to - // Google Cloud - // Storage. + // Google Cloud Storage. // // Possible values: // "STREAM_DEFAULT" - Service may automatically determine build log @@ -568,21 +553,24 @@ type BuildOptions struct { // "STREAM_ON" - Build logs should be streamed to Google Cloud // Storage. // "STREAM_OFF" - Build logs should not be streamed to Google Cloud - // Storage; they will be - // written when the build is completed. + // Storage; they will be written when the build is completed. LogStreamingOption string `json:"logStreamingOption,omitempty"` - // Logging: Option to specify the logging mode, which determines where - // the logs are - // stored. + // Logging: Option to specify the logging mode, which determines if and + // where build logs are stored. // // Possible values: // "LOGGING_UNSPECIFIED" - The service determines the logging mode. - // The default is `LEGACY`. Do not - // rely on the default logging behavior as it may change in the future. - // "LEGACY" - Stackdriver logging and Cloud Storage logging are - // enabled. + // The default is `LEGACY`. Do not rely on the default logging behavior + // as it may change in the future. + // "LEGACY" - Cloud Logging and Cloud Storage logging are enabled. // "GCS_ONLY" - Only Cloud Storage logging is enabled. + // "STACKDRIVER_ONLY" - This option is the same as CLOUD_LOGGING_ONLY. + // "CLOUD_LOGGING_ONLY" - Only Cloud Logging is enabled. Note that + // logs for both the Cloud Console UI and Cloud SDK are based on Cloud + // Storage logs, so neither will provide logs if this option is chosen. + // "NONE" - Turn off all logging. No build logs will be captured. Next + // ID: 6 Logging string `json:"logging,omitempty"` // MachineType: Compute Engine machine type on which to run the build. @@ -601,12 +589,9 @@ type BuildOptions struct { RequestedVerifyOption string `json:"requestedVerifyOption,omitempty"` // SecretEnv: A list of global environment variables, which are - // encrypted using a Cloud - // Key Management Service crypto key. These values must be specified in - // the - // build's `Secret`. These variables will be available to all build - // steps - // in this build. + // encrypted using a Cloud Key Management Service crypto key. These + // values must be specified in the build's `Secret`. These variables + // will be available to all build steps in this build. SecretEnv []string `json:"secretEnv,omitempty"` // SourceProvenanceHash: Requested hash for SourceProvenance. @@ -618,36 +603,29 @@ type BuildOptions struct { SourceProvenanceHash []string `json:"sourceProvenanceHash,omitempty"` // SubstitutionOption: Option to specify behavior when there is an error - // in the substitution - // checks. + // in the substitution checks. NOTE: this is always set to ALLOW_LOOSE + // for triggered builds and cannot be overridden in the build + // configuration file. // // Possible values: // "MUST_MATCH" - Fails the build if error in substitutions checks, - // like missing - // a substitution in the template or in the map. + // like missing a substitution in the template or in the map. // "ALLOW_LOOSE" - Do not fail the build if error in substitutions // checks. SubstitutionOption string `json:"substitutionOption,omitempty"` - // Volumes: Global list of volumes to mount for ALL build steps - // - // Each volume is created as an empty volume prior to starting the - // build - // process. Upon completion of the build, volumes and their contents - // are + // Volumes: Global list of volumes to mount for ALL build steps Each + // volume is created as an empty volume prior to starting the build + // process. Upon completion of the build, volumes and their contents are // discarded. Global volume names and paths cannot conflict with the - // volumes - // defined a build step. - // - // Using a global volume in a build with only one step is not valid - // as - // it is indicative of a build request with an incorrect configuration. + // volumes defined a build step. Using a global volume in a build with + // only one step is not valid as it is indicative of a build request + // with an incorrect configuration. Volumes []*Volume `json:"volumes,omitempty"` - // WorkerPool: Option to specify a `WorkerPool` for the build. - // Format: projects/{project}/workerPools/{workerPool} - // - // This field is experimental. + // WorkerPool: Option to specify a `WorkerPool` for the build. Format: + // projects/{project}/locations/{location}/workerPools/{workerPool} This + // field is experimental. WorkerPool string `json:"workerPool,omitempty"` // ForceSendFields is a list of field names (e.g. "DiskSizeGb") to @@ -676,100 +654,62 @@ func (s *BuildOptions) MarshalJSON() ([]byte, error) { // BuildStep: A step in the build pipeline. type BuildStep struct { // Args: A list of arguments that will be presented to the step when it - // is started. - // - // If the image used to run the step's container has an entrypoint, the - // `args` - // are used as arguments to that entrypoint. If the image does not - // define - // an entrypoint, the first element in args is used as the - // entrypoint, - // and the remainder will be used as arguments. + // is started. If the image used to run the step's container has an + // entrypoint, the `args` are used as arguments to that entrypoint. If + // the image does not define an entrypoint, the first element in args is + // used as the entrypoint, and the remainder will be used as arguments. Args []string `json:"args,omitempty"` - // Dir: Working directory to use when running this step's container. - // - // If this value is a relative path, it is relative to the build's - // working + // Dir: Working directory to use when running this step's container. If + // this value is a relative path, it is relative to the build's working // directory. If this value is absolute, it may be outside the build's - // working - // directory, in which case the contents of the path may not be - // persisted - // across build step executions, unless a `volume` for that path is - // specified. - // - // If the build specifies a `RepoSource` with `dir` and a step with a - // `dir`, - // which specifies an absolute path, the `RepoSource` `dir` is ignored - // for - // the step's execution. + // working directory, in which case the contents of the path may not be + // persisted across build step executions, unless a `volume` for that + // path is specified. If the build specifies a `RepoSource` with `dir` + // and a step with a `dir`, which specifies an absolute path, the + // `RepoSource` `dir` is ignored for the step's execution. Dir string `json:"dir,omitempty"` // Entrypoint: Entrypoint to be used instead of the build step image's - // default entrypoint. - // If unset, the image's default entrypoint is used. + // default entrypoint. If unset, the image's default entrypoint is used. Entrypoint string `json:"entrypoint,omitempty"` // Env: A list of environment variable definitions to be used when - // running a step. - // - // The elements are of the form "KEY=VALUE" for the environment variable - // "KEY" - // being given the value "VALUE". + // running a step. The elements are of the form "KEY=VALUE" for the + // environment variable "KEY" being given the value "VALUE". Env []string `json:"env,omitempty"` - // Id: Unique identifier for this build step, used in `wait_for` - // to + // Id: Unique identifier for this build step, used in `wait_for` to // reference this build step as a dependency. Id string `json:"id,omitempty"` // Name: Required. The name of the container image that will run this - // particular - // build step. - // - // If the image is available in the host's Docker daemon's cache, - // it - // will be run directly. If not, the host will attempt to pull the - // image - // first, using the builder service account's credentials if - // necessary. - // - // The Docker daemon's cache will already have the latest versions of - // all of - // the officially supported build - // steps - // ([https://github.com/GoogleCloudPlatform/cloud-builders](https:/ - // /github.com/GoogleCloudPlatform/cloud-builders)). - // The Docker daemon will also have cached many of the layers for some - // popular - // images, like "ubuntu", "debian", but they will be refreshed at the - // time you - // attempt to use them. - // - // If you built an image in a previous build step, it will be stored in - // the - // host's Docker daemon's cache and is available to use as the name for - // a - // later build step. + // particular build step. If the image is available in the host's Docker + // daemon's cache, it will be run directly. If not, the host will + // attempt to pull the image first, using the builder service account's + // credentials if necessary. The Docker daemon's cache will already have + // the latest versions of all of the officially supported build steps + // ([https://github.com/GoogleCloudPlatform/cloud-builders](https://githu + // b.com/GoogleCloudPlatform/cloud-builders)). The Docker daemon will + // also have cached many of the layers for some popular images, like + // "ubuntu", "debian", but they will be refreshed at the time you + // attempt to use them. If you built an image in a previous build step, + // it will be stored in the host's Docker daemon's cache and is + // available to use as the name for a later build step. Name string `json:"name,omitempty"` // PullTiming: Output only. Stores timing information for pulling this - // build step's - // builder image only. + // build step's builder image only. PullTiming *TimeSpan `json:"pullTiming,omitempty"` // SecretEnv: A list of environment variables which are encrypted using - // a Cloud Key - // Management Service crypto key. These values must be specified in - // the - // build's `Secret`. + // a Cloud Key Management Service crypto key. These values must be + // specified in the build's `Secret`. SecretEnv []string `json:"secretEnv,omitempty"` // Status: Output only. Status of the build step. At this time, build - // step status is - // only updated on build completion; step status is not updated in - // real-time - // as the build progresses. + // step status is only updated on build completion; step status is not + // updated in real-time as the build progresses. // // Possible values: // "STATUS_UNKNOWN" - Status of the build is unknown. @@ -785,38 +725,26 @@ type BuildStep struct { Status string `json:"status,omitempty"` // Timeout: Time limit for executing this build step. If not defined, - // the step has no - // time limit and will be allowed to continue to run until either it - // completes - // or the build itself times out. + // the step has no time limit and will be allowed to continue to run + // until either it completes or the build itself times out. Timeout string `json:"timeout,omitempty"` // Timing: Output only. Stores timing information for executing this // build step. Timing *TimeSpan `json:"timing,omitempty"` - // Volumes: List of volumes to mount into the build step. - // - // Each volume is created as an empty volume prior to execution of - // the - // build step. Upon completion of the build, volumes and their contents - // are - // discarded. - // + // Volumes: List of volumes to mount into the build step. Each volume is + // created as an empty volume prior to execution of the build step. Upon + // completion of the build, volumes and their contents are discarded. // Using a named volume in only one step is not valid as it is - // indicative - // of a build request with an incorrect configuration. + // indicative of a build request with an incorrect configuration. Volumes []*Volume `json:"volumes,omitempty"` - // WaitFor: The ID(s) of the step(s) that this build step depends - // on. + // WaitFor: The ID(s) of the step(s) that this build step depends on. // This build step will not start until all the build steps in - // `wait_for` - // have completed successfully. If `wait_for` is empty, this build step - // will - // start when all previous build steps in the `Build.Steps` list - // have - // completed successfully. + // `wait_for` have completed successfully. If `wait_for` is empty, this + // build step will start when all previous build steps in the + // `Build.Steps` list have completed successfully. WaitFor []string `json:"waitFor,omitempty"` // ForceSendFields is a list of field names (e.g. "Args") to @@ -843,8 +771,7 @@ func (s *BuildStep) MarshalJSON() ([]byte, error) { } // BuildTrigger: Configuration for an automated build in response to -// source repository -// changes. +// source repository changes. type BuildTrigger struct { // Build: Contents of the build template. Build *Build `json:"build,omitempty"` @@ -855,82 +782,60 @@ type BuildTrigger struct { // Description: Human-readable description of this trigger. Description string `json:"description,omitempty"` - // Disabled: If true, the trigger will never result in a build. + // Disabled: If true, the trigger will never automatically execute a + // build. Disabled bool `json:"disabled,omitempty"` // Filename: Path, from the source root, to a file whose contents is - // used for the - // template. + // used for the template. Filename string `json:"filename,omitempty"` // Github: GitHubEventsConfig describes the configuration of a trigger - // that creates - // a build whenever a GitHub event is received. - // - // Mutually exclusive with `trigger_template`. + // that creates a build whenever a GitHub event is received. Mutually + // exclusive with `trigger_template`. Github *GitHubEventsConfig `json:"github,omitempty"` // Id: Output only. Unique identifier of the trigger. Id string `json:"id,omitempty"` // IgnoredFiles: ignored_files and included_files are file glob matches - // using - // https://golang.org/pkg/path/filepath/#Match extended with support for - // "**". - // - // If ignored_files and changed files are both empty, then they are - // not used to determine whether or not to trigger a build. - // - // If ignored_files is not empty, then we ignore any files that - // match - // any of the ignored_file globs. If the change has no files that - // are - // outside of the ignored_files globs, then we do not trigger a build. + // using https://golang.org/pkg/path/filepath/#Match extended with + // support for "**". If ignored_files and changed files are both empty, + // then they are not used to determine whether or not to trigger a + // build. If ignored_files is not empty, then we ignore any files that + // match any of the ignored_file globs. If the change has no files that + // are outside of the ignored_files globs, then we do not trigger a + // build. IgnoredFiles []string `json:"ignoredFiles,omitempty"` // IncludedFiles: If any of the files altered in the commit pass the - // ignored_files - // filter and included_files is empty, then as far as this filter - // is - // concerned, we should trigger the build. - // - // If any of the files altered in the commit pass the - // ignored_files - // filter and included_files is not empty, then we make sure that - // at - // least one of those files matches a included_files glob. If not, - // then we do not trigger a build. + // ignored_files filter and included_files is empty, then as far as this + // filter is concerned, we should trigger the build. If any of the files + // altered in the commit pass the ignored_files filter and + // included_files is not empty, then we make sure that at least one of + // those files matches a included_files glob. If not, then we do not + // trigger a build. IncludedFiles []string `json:"includedFiles,omitempty"` // Name: User-assigned name of the trigger. Must be unique within the - // project. - // Trigger names must meet the following requirements: - // - // + They must contain only alphanumeric characters and dashes. - // + They can be 1-64 characters long. - // + They must begin and end with an alphanumeric character. + // project. Trigger names must meet the following requirements: + They + // must contain only alphanumeric characters and dashes. + They can be + // 1-64 characters long. + They must begin and end with an alphanumeric + // character. Name string `json:"name,omitempty"` // Substitutions: Substitutions for Build resource. The keys must match - // the following - // regular expression: `^_[A-Z0-9_]+$`.The keys cannot conflict with - // the - // keys in bindings. + // the following regular expression: `^_[A-Z0-9_]+$`. Substitutions map[string]string `json:"substitutions,omitempty"` // Tags: Tags for annotation of a `BuildTrigger` Tags []string `json:"tags,omitempty"` // TriggerTemplate: Template describing the types of source changes to - // trigger a build. - // - // Branch and tag names in trigger templates are interpreted as - // regular - // expressions. Any branch or tag change that matches that regular - // expression - // will trigger a build. - // - // Mutually exclusive with `github`. + // trigger a build. Branch and tag names in trigger templates are + // interpreted as regular expressions. Any branch or tag change that + // matches that regular expression will trigger a build. Mutually + // exclusive with `github`. TriggerTemplate *RepoSource `json:"triggerTemplate,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -966,8 +871,7 @@ type BuiltImage struct { Digest string `json:"digest,omitempty"` // Name: Name used to push the container image to Google Container - // Registry, as - // presented to `docker push`. + // Registry, as presented to `docker push`. Name string `json:"name,omitempty"` // PushTiming: Output only. Stores timing information for pushing the @@ -999,6 +903,37 @@ func (s *BuiltImage) MarshalJSON() ([]byte, error) { // CancelBuildRequest: Request to cancel an ongoing build. type CancelBuildRequest struct { + // Id: Required. ID of the build. + Id string `json:"id,omitempty"` + + // Name: The name of the `Build` to retrieve. Format: + // `projects/{project}/locations/{location}/builds/{build}` + Name string `json:"name,omitempty"` + + // ProjectId: Required. ID of the project. + ProjectId string `json:"projectId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CancelBuildRequest) MarshalJSON() ([]byte, error) { + type NoMethod CancelBuildRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // CancelOperationRequest: The request message for @@ -1007,17 +942,11 @@ type CancelOperationRequest struct { } // Empty: A generic empty message that you can re-use to avoid defining -// duplicated -// empty messages in your APIs. A typical example is to use it as the -// request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. +// duplicated empty messages in your APIs. A typical example is to use +// it as the request or the response type of an API method. For +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } The JSON representation for `Empty` is +// empty JSON object `{}`. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -1025,9 +954,8 @@ type Empty struct { } // FileHashes: Container message for hashes of byte content of files, -// used in -// SourceProvenance messages to verify integrity of source input to the -// build. +// used in SourceProvenance messages to verify integrity of source input +// to the build. type FileHashes struct { // FileHash: Collection of file hashes. FileHash []*Hash `json:"fileHash,omitempty"` @@ -1056,24 +984,19 @@ func (s *FileHashes) MarshalJSON() ([]byte, error) { } // GitHubEventsConfig: GitHubEventsConfig describes the configuration of -// a trigger that creates a -// build whenever a GitHub event is received. -// +// a trigger that creates a build whenever a GitHub event is received. // This message is experimental. type GitHubEventsConfig struct { // InstallationId: The installationID that emits the GitHub event. InstallationId int64 `json:"installationId,omitempty,string"` - // Name: Name of the repository. For example: The name - // for + // Name: Name of the repository. For example: The name for // https://github.com/googlecloudplatform/cloud-builders is // "cloud-builders". Name string `json:"name,omitempty"` - // Owner: Owner of the repository. For example: The owner - // for - // https://github.com/googlecloudplatform/cloud-builders - // is + // Owner: Owner of the repository. For example: The owner for + // https://github.com/googlecloudplatform/cloud-builders is // "googlecloudplatform". Owner string `json:"owner,omitempty"` @@ -1107,6 +1030,36 @@ func (s *GitHubEventsConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// HTTPDelivery: HTTPDelivery is the delivery configuration for an HTTP +// notification. +type HTTPDelivery struct { + // Uri: The URI to which JSON-containing HTTP POST requests should be + // sent. + Uri string `json:"uri,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Uri") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Uri") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HTTPDelivery) MarshalJSON() ([]byte, error) { + type NoMethod HTTPDelivery + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Hash: Container message for hash values. type Hash struct { // Type: The type of hash that was performed. @@ -1215,21 +1168,28 @@ func (s *ListBuildsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ListOperationsResponse: The response message for -// Operations.ListOperations. -type ListOperationsResponse struct { - // NextPageToken: The standard List next-page token. - NextPageToken string `json:"nextPageToken,omitempty"` +// Notification: Notification is the container which holds the data that +// is relevant to this particular notification. +type Notification struct { + // Filter: The filter string to use for notification filtering. + // Currently, this is assumed to be a CEL program. See + // https://opensource.google/projects/cel for more. + Filter string `json:"filter,omitempty"` - // Operations: A list of operations that matches the specified filter in - // the request. - Operations []*Operation `json:"operations,omitempty"` + // HttpDelivery: Configuration for HTTP delivery. + HttpDelivery *HTTPDelivery `json:"httpDelivery,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // SlackDelivery: Configuration for Slack delivery. + SlackDelivery *SlackDelivery `json:"slackDelivery,omitempty"` - // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // SmtpDelivery: Configuration for SMTP (email) delivery. + SmtpDelivery *SMTPDelivery `json:"smtpDelivery,omitempty"` + + // StructDelivery: Escape hatch for users to supply custom delivery + // configs. + StructDelivery googleapi.RawMessage `json:"structDelivery,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Filter") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -1237,7 +1197,179 @@ type ListOperationsResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NextPageToken") to include + // NullFields is a list of field names (e.g. "Filter") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Notification) MarshalJSON() ([]byte, error) { + type NoMethod Notification + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NotifierConfig: NotifierConfig is the top-level configuration +// message. +type NotifierConfig struct { + // ApiVersion: The API version of this configuration format. + ApiVersion string `json:"apiVersion,omitempty"` + + // Kind: The type of notifier to use (e.g. SMTPNotifier). + Kind string `json:"kind,omitempty"` + + // Metadata: Metadata for referring to/handling/deploying this notifier. + Metadata *NotifierMetadata `json:"metadata,omitempty"` + + // Spec: The actual configuration for this notifier. + Spec *NotifierSpec `json:"spec,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ApiVersion") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ApiVersion") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NotifierConfig) MarshalJSON() ([]byte, error) { + type NoMethod NotifierConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NotifierMetadata: NotifierMetadata contains the data which can be +// used to reference or describe this notifier. +type NotifierMetadata struct { + // Name: The human-readable and user-given name for the notifier. For + // example: "repo-merge-email-notifier". + Name string `json:"name,omitempty"` + + // Notifier: The string representing the name and version of notifier to + // deploy. Expected to be of the form of "/:". For example: + // "gcr.io/my-project/notifiers/smtp:1.2.34". + Notifier string `json:"notifier,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NotifierMetadata) MarshalJSON() ([]byte, error) { + type NoMethod NotifierMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NotifierSecret: NotifierSecret is the container that maps a secret +// name (reference) to its Google Cloud Secret Manager resource path. +type NotifierSecret struct { + // Name: Name is the local name of the secret, such as the verbatim + // string "my-smtp-password". + Name string `json:"name,omitempty"` + + // Value: Value is interpreted to be a resource path for fetching the + // actual (versioned) secret data for this secret. For example, this + // would be a Google Cloud Secret Manager secret version resource path + // like: "projects/my-project/secrets/my-secret/versions/latest". + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NotifierSecret) MarshalJSON() ([]byte, error) { + type NoMethod NotifierSecret + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NotifierSecretRef: NotifierSecretRef contains the reference to a +// secret stored in the corresponding NotifierSpec. +type NotifierSecretRef struct { + // SecretRef: The value of `secret_ref` should be a `name` that is + // registered in a `Secret` in the `secrets` list of the `Spec`. + SecretRef string `json:"secretRef,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SecretRef") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SecretRef") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NotifierSecretRef) MarshalJSON() ([]byte, error) { + type NoMethod NotifierSecretRef + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NotifierSpec: NotifierSpec is the configuration container for +// notifications. +type NotifierSpec struct { + // Notification: The configuration of this particular notifier. + Notification *Notification `json:"notification,omitempty"` + + // Secrets: Configurations for secret resources used by this particular + // notifier. + Secrets []*NotifierSecret `json:"secrets,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Notification") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Notification") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -1246,59 +1378,45 @@ type ListOperationsResponse struct { NullFields []string `json:"-"` } -func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { - type NoMethod ListOperationsResponse +func (s *NotifierSpec) MarshalJSON() ([]byte, error) { + type NoMethod NotifierSpec raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Operation: This resource represents a long-running operation that is -// the result of a -// network API call. +// the result of a network API call. type Operation struct { // Done: If the value is `false`, it means the operation is still in - // progress. - // If `true`, the operation is completed, and either `error` or - // `response` is - // available. + // progress. If `true`, the operation is completed, and either `error` + // or `response` is available. Done bool `json:"done,omitempty"` // Error: The error result of the operation in case of failure or // cancellation. Error *Status `json:"error,omitempty"` - // Metadata: Service-specific metadata associated with the operation. - // It typically - // contains progress information and common metadata such as create - // time. - // Some services might not provide such metadata. Any method that - // returns a - // long-running operation should document the metadata type, if any. + // Metadata: Service-specific metadata associated with the operation. It + // typically contains progress information and common metadata such as + // create time. Some services might not provide such metadata. Any + // method that returns a long-running operation should document the + // metadata type, if any. Metadata googleapi.RawMessage `json:"metadata,omitempty"` // Name: The server-assigned name, which is only unique within the same - // service that - // originally returns it. If you use the default HTTP mapping, - // the - // `name` should be a resource name ending with + // service that originally returns it. If you use the default HTTP + // mapping, the `name` should be a resource name ending with // `operations/{unique_id}`. Name string `json:"name,omitempty"` - // Response: The normal response of the operation in case of success. - // If the original - // method returns no data on success, such as `Delete`, the response - // is - // `google.protobuf.Empty`. If the original method is - // standard - // `Get`/`Create`/`Update`, the response should be the resource. For - // other - // methods, the response should have the type `XxxResponse`, where - // `Xxx` - // is the original method name. For example, if the original method - // name - // is `TakeSnapshot()`, the inferred response type - // is - // `TakeSnapshotResponse`. + // Response: The normal response of the operation in case of success. If + // the original method returns no data on success, such as `Delete`, the + // response is `google.protobuf.Empty`. If the original method is + // standard `Get`/`Create`/`Update`, the response should be the + // resource. For other methods, the response should have the type + // `XxxResponse`, where `Xxx` is the original method name. For example, + // if the original method name is `TakeSnapshot()`, the inferred + // response type is `TakeSnapshotResponse`. Response googleapi.RawMessage `json:"response,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1329,26 +1447,25 @@ func (s *Operation) MarshalJSON() ([]byte, error) { } // PullRequestFilter: PullRequestFilter contains filter properties for -// matching GitHub Pull -// Requests. +// matching GitHub Pull Requests. type PullRequestFilter struct { - // Branch: Regex of branches to match. - // - // The syntax of the regular expressions accepted is the syntax accepted - // by - // RE2 and described at https://github.com/google/re2/wiki/Syntax + // Branch: Regex of branches to match. The syntax of the regular + // expressions accepted is the syntax accepted by RE2 and described at + // https://github.com/google/re2/wiki/Syntax Branch string `json:"branch,omitempty"` - // CommentControl: Configure builds to run only when a repository owner - // or collaborator - // comments `/gcbrun`. + // CommentControl: Configure builds to run whether a repository owner or + // collaborator need to comment `/gcbrun`. // // Possible values: // "COMMENTS_DISABLED" - Do not require comments on Pull Requests // before builds are triggered. // "COMMENTS_ENABLED" - Enforce that repository owners or - // collaborators must comment on Pull - // Requests before builds are triggered. + // collaborators must comment on Pull Requests before builds are + // triggered. + // "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY" - Enforce that + // repository owners or collaborators must comment on external + // contributors' Pull Requests before builds are triggered. CommentControl string `json:"commentControl,omitempty"` // InvertRegex: If true, branches that do NOT match the git_ref will @@ -1381,23 +1498,18 @@ func (s *PullRequestFilter) MarshalJSON() ([]byte, error) { // PushFilter: Push contains filter properties for matching GitHub git // pushes. type PushFilter struct { - // Branch: Regexes matching branches to build. - // - // The syntax of the regular expressions accepted is the syntax accepted - // by - // RE2 and described at https://github.com/google/re2/wiki/Syntax + // Branch: Regexes matching branches to build. The syntax of the regular + // expressions accepted is the syntax accepted by RE2 and described at + // https://github.com/google/re2/wiki/Syntax Branch string `json:"branch,omitempty"` // InvertRegex: When true, only trigger a build if the revision regex - // does NOT match the - // git_ref regex. + // does NOT match the git_ref regex. InvertRegex bool `json:"invertRegex,omitempty"` - // Tag: Regexes matching tags to build. - // - // The syntax of the regular expressions accepted is the syntax accepted - // by - // RE2 and described at https://github.com/google/re2/wiki/Syntax + // Tag: Regexes matching tags to build. The syntax of the regular + // expressions accepted is the syntax accepted by RE2 and described at + // https://github.com/google/re2/wiki/Syntax Tag string `json:"tag,omitempty"` // ForceSendFields is a list of field names (e.g. "Branch") to @@ -1426,46 +1538,38 @@ func (s *PushFilter) MarshalJSON() ([]byte, error) { // RepoSource: Location of the source in a Google Cloud Source // Repository. type RepoSource struct { - // BranchName: Regex matching branches to build. - // - // The syntax of the regular expressions accepted is the syntax accepted - // by - // RE2 and described at https://github.com/google/re2/wiki/Syntax + // BranchName: Regex matching branches to build. The syntax of the + // regular expressions accepted is the syntax accepted by RE2 and + // described at https://github.com/google/re2/wiki/Syntax BranchName string `json:"branchName,omitempty"` // CommitSha: Explicit commit SHA to build. CommitSha string `json:"commitSha,omitempty"` // Dir: Directory, relative to the source root, in which to run the - // build. - // - // This must be a relative path. If a step's `dir` is specified and is - // an - // absolute path, this value is ignored for that step's execution. + // build. This must be a relative path. If a step's `dir` is specified + // and is an absolute path, this value is ignored for that step's + // execution. Dir string `json:"dir,omitempty"` // InvertRegex: Only trigger a build if the revision regex does NOT - // match the revision - // regex. + // match the revision regex. InvertRegex bool `json:"invertRegex,omitempty"` // ProjectId: ID of the project that owns the Cloud Source Repository. - // If omitted, the - // project ID requesting the build is assumed. + // If omitted, the project ID requesting the build is assumed. ProjectId string `json:"projectId,omitempty"` // RepoName: Required. Name of the Cloud Source Repository. RepoName string `json:"repoName,omitempty"` - // Substitutions: Substitutions to use in a triggered build. - // Should only be used with RunBuildTrigger + // Substitutions: Substitutions to use in a triggered build. Should only + // be used with RunBuildTrigger Substitutions map[string]string `json:"substitutions,omitempty"` - // TagName: Regex matching tags to build. - // - // The syntax of the regular expressions accepted is the syntax accepted - // by - // RE2 and described at https://github.com/google/re2/wiki/Syntax + // TagName: Regex matching tags to build. The syntax of the regular + // expressions accepted is the syntax accepted by RE2 and described at + // https://github.com/google/re2/wiki/Syntax TagName string `json:"tagName,omitempty"` // ForceSendFields is a list of field names (e.g. "BranchName") to @@ -1501,19 +1605,14 @@ type Results struct { ArtifactTiming *TimeSpan `json:"artifactTiming,omitempty"` // BuildStepImages: List of build step digests, in the order - // corresponding to build step - // indices. + // corresponding to build step indices. BuildStepImages []string `json:"buildStepImages,omitempty"` // BuildStepOutputs: List of build step outputs, produced by builder - // images, in the order - // corresponding to build step indices. - // - // [Cloud + // images, in the order corresponding to build step indices. [Cloud // Builders](https://cloud.google.com/cloud-build/docs/cloud-builders) - // ca - // n produce this output by writing to `$BUILDER_OUTPUT/output`. - // Only the first 4KB of data is stored. + // can produce this output by writing to `$BUILDER_OUTPUT/output`. Only + // the first 4KB of data is stored. BuildStepOutputs []string `json:"buildStepOutputs,omitempty"` // Images: Container images that were built as a part of the build. @@ -1549,25 +1648,97 @@ func (s *Results) MarshalJSON() ([]byte, error) { // RetryBuildRequest: Specifies a build to retry. type RetryBuildRequest struct { -} + // Id: Required. Build ID of the original build. + Id string `json:"id,omitempty"` -// Secret: Pairs a set of secret environment variables containing -// encrypted -// values with the Cloud KMS key to use to decrypt the value. -type Secret struct { - // KmsKeyName: Cloud KMS key name to use to decrypt these envs. - KmsKeyName string `json:"kmsKeyName,omitempty"` + // Name: The name of the `Build` to retry. Format: + // `projects/{project}/locations/{location}/builds/{build}` + Name string `json:"name,omitempty"` - // SecretEnv: Map of environment variable name to its encrypted - // value. - // - // Secret environment variables must be unique across all of a - // build's + // ProjectId: Required. ID of the project. + ProjectId string `json:"projectId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RetryBuildRequest) MarshalJSON() ([]byte, error) { + type NoMethod RetryBuildRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SMTPDelivery: SMTPDelivery is the delivery configuration for an SMTP +// (email) notification. +type SMTPDelivery struct { + // FromAddress: This is the SMTP account/email that appears in the + // `From:` of the email. If empty, it is assumed to be sender. + FromAddress string `json:"fromAddress,omitempty"` + + // Password: The SMTP sender's password. + Password *NotifierSecretRef `json:"password,omitempty"` + + // Port: The SMTP port of the server. + Port string `json:"port,omitempty"` + + // RecipientAddresses: This is the list of addresses to which we send + // the email (i.e. in the `To:` of the email). + RecipientAddresses []string `json:"recipientAddresses,omitempty"` + + // SenderAddress: This is the SMTP account/email that is used to send + // the message. + SenderAddress string `json:"senderAddress,omitempty"` + + // Server: The address of the SMTP server. + Server string `json:"server,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FromAddress") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FromAddress") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SMTPDelivery) MarshalJSON() ([]byte, error) { + type NoMethod SMTPDelivery + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Secret: Pairs a set of secret environment variables containing +// encrypted values with the Cloud KMS key to use to decrypt the value. +type Secret struct { + // KmsKeyName: Cloud KMS key name to use to decrypt these envs. + KmsKeyName string `json:"kmsKeyName,omitempty"` + + // SecretEnv: Map of environment variable name to its encrypted value. + // Secret environment variables must be unique across all of a build's // secrets, and must be used by at least one build step. Values can be - // at most - // 64 KB in size. There can be at most 100 secret values across all of - // a - // build's secrets. + // at most 64 KB in size. There can be at most 100 secret values across + // all of a build's secrets. SecretEnv map[string]string `json:"secretEnv,omitempty"` // ForceSendFields is a list of field names (e.g. "KmsKeyName") to @@ -1593,11 +1764,41 @@ func (s *Secret) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// SlackDelivery: SlackDelivery is the delivery configuration for +// delivering Slack messages via webhooks. See Slack webhook +// documentation at: https://api.slack.com/messaging/webhooks. +type SlackDelivery struct { + // WebhookUri: The secret reference for the Slack webhook URI for + // sending messages to a channel. + WebhookUri *NotifierSecretRef `json:"webhookUri,omitempty"` + + // ForceSendFields is a list of field names (e.g. "WebhookUri") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "WebhookUri") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SlackDelivery) MarshalJSON() ([]byte, error) { + type NoMethod SlackDelivery + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Source: Location of the source in a supported storage service. type Source struct { // RepoSource: If provided, get the source from this location in a Cloud - // Source - // Repository. + // Source Repository. RepoSource *RepoSource `json:"repoSource,omitempty"` // StorageSource: If provided, get the source from this location in @@ -1628,34 +1829,24 @@ func (s *Source) MarshalJSON() ([]byte, error) { } // SourceProvenance: Provenance of the source. Ways to find the original -// source, or verify that -// some source was used for this build. +// source, or verify that some source was used for this build. type SourceProvenance struct { // FileHashes: Output only. Hash(es) of the build source, which can be - // used to verify that - // the original source integrity was maintained in the build. Note - // that - // `FileHashes` will only be populated if `BuildOptions` has requested - // a - // `SourceProvenanceHash`. - // - // The keys to this map are file paths used as build source and the - // values - // contain the hash values for those files. - // - // If the build source came in a single package such as a gzipped - // tarfile - // (`.tar.gz`), the `FileHash` will be for the single path to that file. + // used to verify that the original source integrity was maintained in + // the build. Note that `FileHashes` will only be populated if + // `BuildOptions` has requested a `SourceProvenanceHash`. The keys to + // this map are file paths used as build source and the values contain + // the hash values for those files. If the build source came in a single + // package such as a gzipped tarfile (`.tar.gz`), the `FileHash` will be + // for the single path to that file. FileHashes map[string]FileHashes `json:"fileHashes,omitempty"` // ResolvedRepoSource: A copy of the build's `source.repo_source`, if - // exists, with any - // revisions resolved. + // exists, with any revisions resolved. ResolvedRepoSource *RepoSource `json:"resolvedRepoSource,omitempty"` // ResolvedStorageSource: A copy of the build's `source.storage_source`, - // if exists, with any - // generations resolved. + // if exists, with any generations resolved. ResolvedStorageSource *StorageSource `json:"resolvedStorageSource,omitempty"` // ForceSendFields is a list of field names (e.g. "FileHashes") to @@ -1682,32 +1873,24 @@ func (s *SourceProvenance) MarshalJSON() ([]byte, error) { } // Status: The `Status` type defines a logical error model that is -// suitable for -// different programming environments, including REST APIs and RPC APIs. -// It is -// used by [gRPC](https://github.com/grpc). Each `Status` message -// contains -// three pieces of data: error code, error message, and error -// details. -// -// You can find out more about this error model and how to work with it -// in the -// [API Design Guide](https://cloud.google.com/apis/design/errors). +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each +// `Status` message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the [API Design +// Guide](https://cloud.google.com/apis/design/errors). type Status struct { // Code: The status code, which should be an enum value of // google.rpc.Code. Code int64 `json:"code,omitempty"` - // Details: A list of messages that carry the error details. There is a - // common set of - // message types for APIs to use. + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. Details []googleapi.RawMessage `json:"details,omitempty"` // Message: A developer-facing error message, which should be in - // English. Any - // user-facing error message should be localized and sent in - // the - // google.rpc.Status.details field, or localized by the client. + // English. Any user-facing error message should be localized and sent + // in the google.rpc.Status.details field, or localized by the client. Message string `json:"message,omitempty"` // ForceSendFields is a list of field names (e.g. "Code") to @@ -1736,24 +1919,19 @@ func (s *Status) MarshalJSON() ([]byte, error) { // StorageSource: Location of the source in an archive file in Google // Cloud Storage. type StorageSource struct { - // Bucket: Google Cloud Storage bucket containing the source - // (see - // [Bucket - // Name - // Requirements](https://cloud.google.com/storage/docs/bucket-naming - // #requirements)). + // Bucket: Google Cloud Storage bucket containing the source (see + // [Bucket Name + // Requirements](https://cloud.google.com/storage/docs/bucket-naming#requ + // irements)). Bucket string `json:"bucket,omitempty"` // Generation: Google Cloud Storage generation for the object. If the - // generation is - // omitted, the latest generation will be used. + // generation is omitted, the latest generation will be used. Generation int64 `json:"generation,omitempty,string"` - // Object: Google Cloud Storage object containing the source. - // - // This object must be a gzipped archive file (`.tar.gz`) containing - // source to - // build. + // Object: Google Cloud Storage object containing the source. This + // object must be a gzipped archive file (`.tar.gz`) containing source + // to build. Object string `json:"object,omitempty"` // ForceSendFields is a list of field names (e.g. "Bucket") to @@ -1811,22 +1989,17 @@ func (s *TimeSpan) MarshalJSON() ([]byte, error) { } // Volume: Volume describes a Docker container volume which is mounted -// into build steps -// in order to persist files across build step execution. +// into build steps in order to persist files across build step +// execution. type Volume struct { - // Name: Name of the volume to mount. - // - // Volume names must be unique per build step and must be valid names - // for - // Docker volumes. Each named volume must be used by at least two build - // steps. + // Name: Name of the volume to mount. Volume names must be unique per + // build step and must be valid names for Docker volumes. Each named + // volume must be used by at least two build steps. Name string `json:"name,omitempty"` - // Path: Path at which to mount the volume. - // - // Paths must be absolute and cannot conflict with other volume paths on - // the - // same build step or with certain reserved volume paths. + // Path: Path at which to mount the volume. Paths must be absolute and + // cannot conflict with other volume paths on the same build step or + // with certain reserved volume paths. Path string `json:"path,omitempty"` // ForceSendFields is a list of field names (e.g. "Name") to @@ -1864,23 +2037,15 @@ type OperationsCancelCall struct { } // Cancel: Starts asynchronous cancellation on a long-running operation. -// The server -// makes a best effort to cancel the operation, but success is -// not -// guaranteed. If the server doesn't support this method, it -// returns -// `google.rpc.Code.UNIMPLEMENTED`. Clients can -// use -// Operations.GetOperation or -// other methods to check whether the cancellation succeeded or whether -// the -// operation completed despite cancellation. On successful -// cancellation, -// the operation is not deleted; instead, it becomes an operation -// with -// an Operation.error value with a google.rpc.Status.code of -// 1, -// corresponding to `Code.CANCELLED`. +// The server makes a best effort to cancel the operation, but success +// is not guaranteed. If the server doesn't support this method, it +// returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use +// Operations.GetOperation or other methods to check whether the +// cancellation succeeded or whether the operation completed despite +// cancellation. On successful cancellation, the operation is not +// deleted; instead, it becomes an operation with an Operation.error +// value with a google.rpc.Status.code of 1, corresponding to +// `Code.CANCELLED`. func (r *OperationsService) Cancel(name string, canceloperationrequest *CancelOperationRequest) *OperationsCancelCall { c := &OperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -1915,7 +2080,7 @@ func (c *OperationsCancelCall) Header() http.Header { func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1979,7 +2144,7 @@ func (c *OperationsCancelCall) Do(opts ...googleapi.CallOption) (*Empty, error) } return ret, nil // { - // "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + // "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", // "flatPath": "v1/operations/{operationsId}:cancel", // "httpMethod": "POST", // "id": "cloudbuild.operations.cancel", @@ -2020,11 +2185,9 @@ type OperationsGetCall struct { header_ http.Header } -// Get: Gets the latest state of a long-running operation. Clients can -// use this -// method to poll the operation result at intervals as recommended by -// the API -// service. +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. func (r *OperationsService) Get(name string) *OperationsGetCall { c := &OperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -2068,7 +2231,7 @@ func (c *OperationsGetCall) Header() http.Header { func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2130,7 +2293,7 @@ func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", // "flatPath": "v1/operations/{operationsId}", // "httpMethod": "GET", // "id": "cloudbuild.operations.get", @@ -2157,224 +2320,6 @@ func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "cloudbuild.operations.list": - -type OperationsListCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists operations that match the specified filter in the -// request. If the -// server doesn't support this method, it returns -// `UNIMPLEMENTED`. -// -// NOTE: the `name` binding allows API services to override the -// binding -// to use different resource name schemes, such as `users/*/operations`. -// To -// override the binding, API services can add a binding such -// as -// "/v1/{name=users/*}/operations" to their service configuration. -// For backwards compatibility, the default name includes the -// operations -// collection id, however overriding users must ensure the name -// binding -// is the parent resource, without the operations collection id. -func (r *OperationsService) List(name string) *OperationsListCall { - c := &OperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// Filter sets the optional parameter "filter": The standard list -// filter. -func (c *OperationsListCall) Filter(filter string) *OperationsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// PageSize sets the optional parameter "pageSize": The standard list -// page size. -func (c *OperationsListCall) PageSize(pageSize int64) *OperationsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": The standard list -// page token. -func (c *OperationsListCall) PageToken(pageToken string) *OperationsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *OperationsListCall) Fields(s ...googleapi.Field) *OperationsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *OperationsListCall) IfNoneMatch(entityTag string) *OperationsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *OperationsListCall) Context(ctx context.Context) *OperationsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *OperationsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudbuild.operations.list" call. -// Exactly one of *ListOperationsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListOperationsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListOperationsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", - // "flatPath": "v1/operations", - // "httpMethod": "GET", - // "id": "cloudbuild.operations.list", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "filter": { - // "description": "The standard list filter.", - // "location": "query", - // "type": "string" - // }, - // "name": { - // "description": "The name of the operation's parent resource.", - // "location": "path", - // "pattern": "^operations$", - // "required": true, - // "type": "string" - // }, - // "pageSize": { - // "description": "The standard list page size.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "The standard list page token.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "v1/{+name}", - // "response": { - // "$ref": "ListOperationsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *OperationsListCall) Pages(ctx context.Context, f func(*ListOperationsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - // method id "cloudbuild.projects.builds.cancel": type ProjectsBuildsCancelCall struct { @@ -2423,7 +2368,7 @@ func (c *ProjectsBuildsCancelCall) Header() http.Header { func (c *ProjectsBuildsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2535,12 +2480,9 @@ type ProjectsBuildsCreateCall struct { header_ http.Header } -// Create: Starts a build with the specified configuration. -// -// This method returns a long-running `Operation`, which includes the -// build -// ID. Pass the build ID to `GetBuild` to determine the build status -// (such as +// Create: Starts a build with the specified configuration. This method +// returns a long-running `Operation`, which includes the build ID. Pass +// the build ID to `GetBuild` to determine the build status (such as // `SUCCESS` or `FAILURE`). func (r *ProjectsBuildsService) Create(projectId string, build *Build) *ProjectsBuildsCreateCall { c := &ProjectsBuildsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -2549,6 +2491,14 @@ func (r *ProjectsBuildsService) Create(projectId string, build *Build) *Projects return c } +// Parent sets the optional parameter "parent": The parent resource +// where this build will be created. Format: +// `projects/{project}/locations/{location}` +func (c *ProjectsBuildsCreateCall) Parent(parent string) *ProjectsBuildsCreateCall { + c.urlParams_.Set("parent", parent) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -2576,7 +2526,7 @@ func (c *ProjectsBuildsCreateCall) Header() http.Header { func (c *ProjectsBuildsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2640,7 +2590,7 @@ func (c *ProjectsBuildsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Starts a build with the specified configuration.\n\nThis method returns a long-running `Operation`, which includes the build\nID. Pass the build ID to `GetBuild` to determine the build status (such as\n`SUCCESS` or `FAILURE`).", + // "description": "Starts a build with the specified configuration. This method returns a long-running `Operation`, which includes the build ID. Pass the build ID to `GetBuild` to determine the build status (such as `SUCCESS` or `FAILURE`).", // "flatPath": "v1/projects/{projectId}/builds", // "httpMethod": "POST", // "id": "cloudbuild.projects.builds.create", @@ -2648,6 +2598,11 @@ func (c *ProjectsBuildsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, // "projectId" // ], // "parameters": { + // "parent": { + // "description": "The parent resource where this build will be created. Format: `projects/{project}/locations/{location}`", + // "location": "query", + // "type": "string" + // }, // "projectId": { // "description": "Required. ID of the project.", // "location": "path", @@ -2681,10 +2636,8 @@ type ProjectsBuildsGetCall struct { header_ http.Header } -// Get: Returns information about a previously requested build. -// -// The `Build` that is returned includes its status (such as -// `SUCCESS`, +// Get: Returns information about a previously requested build. The +// `Build` that is returned includes its status (such as `SUCCESS`, // `FAILURE`, or `WORKING`), and timing information. func (r *ProjectsBuildsService) Get(projectId string, id string) *ProjectsBuildsGetCall { c := &ProjectsBuildsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -2693,7 +2646,15 @@ func (r *ProjectsBuildsService) Get(projectId string, id string) *ProjectsBuilds return c } -// Fields allows partial responses to be retrieved. See +// Name sets the optional parameter "name": The name of the `Build` to +// retrieve. Format: +// `projects/{project}/locations/{location}/builds/{build}` +func (c *ProjectsBuildsGetCall) Name(name string) *ProjectsBuildsGetCall { + c.urlParams_.Set("name", name) + return c +} + +// Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsBuildsGetCall) Fields(s ...googleapi.Field) *ProjectsBuildsGetCall { @@ -2730,7 +2691,7 @@ func (c *ProjectsBuildsGetCall) Header() http.Header { func (c *ProjectsBuildsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2781,7 +2742,1234 @@ func (c *ProjectsBuildsGetCall) Do(opts ...googleapi.CallOption) (*Build, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Build{ + ret := &Build{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns information about a previously requested build. The `Build` that is returned includes its status (such as `SUCCESS`, `FAILURE`, or `WORKING`), and timing information.", + // "flatPath": "v1/projects/{projectId}/builds/{id}", + // "httpMethod": "GET", + // "id": "cloudbuild.projects.builds.get", + // "parameterOrder": [ + // "projectId", + // "id" + // ], + // "parameters": { + // "id": { + // "description": "Required. ID of the build.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "name": { + // "description": "The name of the `Build` to retrieve. Format: `projects/{project}/locations/{location}/builds/{build}`", + // "location": "query", + // "type": "string" + // }, + // "projectId": { + // "description": "Required. ID of the project.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}/builds/{id}", + // "response": { + // "$ref": "Build" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "cloudbuild.projects.builds.list": + +type ProjectsBuildsListCall struct { + s *Service + projectId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists previously requested builds. Previously requested builds +// may still be in-progress, or may have finished successfully or +// unsuccessfully. +func (r *ProjectsBuildsService) List(projectId string) *ProjectsBuildsListCall { + c := &ProjectsBuildsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + return c +} + +// Filter sets the optional parameter "filter": The raw filter text to +// constrain the results. +func (c *ProjectsBuildsListCall) Filter(filter string) *ProjectsBuildsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// PageSize sets the optional parameter "pageSize": Number of results to +// return in the list. +func (c *ProjectsBuildsListCall) PageSize(pageSize int64) *ProjectsBuildsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": Token to provide +// to skip to a particular spot in the list. +func (c *ProjectsBuildsListCall) PageToken(pageToken string) *ProjectsBuildsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Parent sets the optional parameter "parent": The parent of the +// collection of `Builds`. Format: +// `projects/{project}/locations/location` +func (c *ProjectsBuildsListCall) Parent(parent string) *ProjectsBuildsListCall { + c.urlParams_.Set("parent", parent) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsBuildsListCall) Fields(s ...googleapi.Field) *ProjectsBuildsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsBuildsListCall) IfNoneMatch(entityTag string) *ProjectsBuildsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsBuildsListCall) Context(ctx context.Context) *ProjectsBuildsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsBuildsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsBuildsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/builds") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudbuild.projects.builds.list" call. +// Exactly one of *ListBuildsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListBuildsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsBuildsListCall) Do(opts ...googleapi.CallOption) (*ListBuildsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListBuildsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists previously requested builds. Previously requested builds may still be in-progress, or may have finished successfully or unsuccessfully.", + // "flatPath": "v1/projects/{projectId}/builds", + // "httpMethod": "GET", + // "id": "cloudbuild.projects.builds.list", + // "parameterOrder": [ + // "projectId" + // ], + // "parameters": { + // "filter": { + // "description": "The raw filter text to constrain the results.", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "description": "Number of results to return in the list.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Token to provide to skip to a particular spot in the list.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "The parent of the collection of `Builds`. Format: `projects/{project}/locations/location`", + // "location": "query", + // "type": "string" + // }, + // "projectId": { + // "description": "Required. ID of the project.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}/builds", + // "response": { + // "$ref": "ListBuildsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsBuildsListCall) Pages(ctx context.Context, f func(*ListBuildsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "cloudbuild.projects.builds.retry": + +type ProjectsBuildsRetryCall struct { + s *Service + projectId string + id string + retrybuildrequest *RetryBuildRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Retry: Creates a new build based on the specified build. This method +// creates a new build using the original build request, which may or +// may not result in an identical build. For triggered builds: * +// Triggered builds resolve to a precise revision; therefore a retry of +// a triggered build will result in a build that uses the same revision. +// For non-triggered builds that specify `RepoSource`: * If the original +// build built from the tip of a branch, the retried build will build +// from the tip of that branch, which may not be the same revision as +// the original build. * If the original build specified a commit sha or +// revision ID, the retried build will use the identical source. For +// builds that specify `StorageSource`: * If the original build pulled +// source from Google Cloud Storage without specifying the generation of +// the object, the new build will use the current object, which may be +// different from the original build source. * If the original build +// pulled source from Cloud Storage and specified the generation of the +// object, the new build will attempt to use the same object, which may +// or may not be available depending on the bucket's lifecycle +// management settings. +func (r *ProjectsBuildsService) Retry(projectId string, id string, retrybuildrequest *RetryBuildRequest) *ProjectsBuildsRetryCall { + c := &ProjectsBuildsRetryCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.id = id + c.retrybuildrequest = retrybuildrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsBuildsRetryCall) Fields(s ...googleapi.Field) *ProjectsBuildsRetryCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsBuildsRetryCall) Context(ctx context.Context) *ProjectsBuildsRetryCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsBuildsRetryCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsBuildsRetryCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.retrybuildrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/builds/{id}:retry") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "id": c.id, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudbuild.projects.builds.retry" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsBuildsRetryCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new build based on the specified build. This method creates a new build using the original build request, which may or may not result in an identical build. For triggered builds: * Triggered builds resolve to a precise revision; therefore a retry of a triggered build will result in a build that uses the same revision. For non-triggered builds that specify `RepoSource`: * If the original build built from the tip of a branch, the retried build will build from the tip of that branch, which may not be the same revision as the original build. * If the original build specified a commit sha or revision ID, the retried build will use the identical source. For builds that specify `StorageSource`: * If the original build pulled source from Google Cloud Storage without specifying the generation of the object, the new build will use the current object, which may be different from the original build source. * If the original build pulled source from Cloud Storage and specified the generation of the object, the new build will attempt to use the same object, which may or may not be available depending on the bucket's lifecycle management settings.", + // "flatPath": "v1/projects/{projectId}/builds/{id}:retry", + // "httpMethod": "POST", + // "id": "cloudbuild.projects.builds.retry", + // "parameterOrder": [ + // "projectId", + // "id" + // ], + // "parameters": { + // "id": { + // "description": "Required. Build ID of the original build.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Required. ID of the project.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}/builds/{id}:retry", + // "request": { + // "$ref": "RetryBuildRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "cloudbuild.projects.locations.builds.cancel": + +type ProjectsLocationsBuildsCancelCall struct { + s *Service + name string + cancelbuildrequest *CancelBuildRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Cancel: Cancels a build in progress. +func (r *ProjectsLocationsBuildsService) Cancel(name string, cancelbuildrequest *CancelBuildRequest) *ProjectsLocationsBuildsCancelCall { + c := &ProjectsLocationsBuildsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.cancelbuildrequest = cancelbuildrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsBuildsCancelCall) Fields(s ...googleapi.Field) *ProjectsLocationsBuildsCancelCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsBuildsCancelCall) Context(ctx context.Context) *ProjectsLocationsBuildsCancelCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsBuildsCancelCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBuildsCancelCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.cancelbuildrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:cancel") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudbuild.projects.locations.builds.cancel" call. +// Exactly one of *Build or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Build.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsLocationsBuildsCancelCall) Do(opts ...googleapi.CallOption) (*Build, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Build{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Cancels a build in progress.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/builds/{buildsId}:cancel", + // "httpMethod": "POST", + // "id": "cloudbuild.projects.locations.builds.cancel", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name of the `Build` to retrieve. Format: `projects/{project}/locations/{location}/builds/{build}`", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/builds/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:cancel", + // "request": { + // "$ref": "CancelBuildRequest" + // }, + // "response": { + // "$ref": "Build" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "cloudbuild.projects.locations.builds.create": + +type ProjectsLocationsBuildsCreateCall struct { + s *Service + parent string + build *Build + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Starts a build with the specified configuration. This method +// returns a long-running `Operation`, which includes the build ID. Pass +// the build ID to `GetBuild` to determine the build status (such as +// `SUCCESS` or `FAILURE`). +func (r *ProjectsLocationsBuildsService) Create(parent string, build *Build) *ProjectsLocationsBuildsCreateCall { + c := &ProjectsLocationsBuildsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.build = build + return c +} + +// ProjectId sets the optional parameter "projectId": Required. ID of +// the project. +func (c *ProjectsLocationsBuildsCreateCall) ProjectId(projectId string) *ProjectsLocationsBuildsCreateCall { + c.urlParams_.Set("projectId", projectId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsBuildsCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsBuildsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsBuildsCreateCall) Context(ctx context.Context) *ProjectsLocationsBuildsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsBuildsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBuildsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.build) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/builds") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudbuild.projects.locations.builds.create" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBuildsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Starts a build with the specified configuration. This method returns a long-running `Operation`, which includes the build ID. Pass the build ID to `GetBuild` to determine the build status (such as `SUCCESS` or `FAILURE`).", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/builds", + // "httpMethod": "POST", + // "id": "cloudbuild.projects.locations.builds.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "The parent resource where this build will be created. Format: `projects/{project}/locations/{location}`", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Required. ID of the project.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/builds", + // "request": { + // "$ref": "Build" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "cloudbuild.projects.locations.builds.get": + +type ProjectsLocationsBuildsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns information about a previously requested build. The +// `Build` that is returned includes its status (such as `SUCCESS`, +// `FAILURE`, or `WORKING`), and timing information. +func (r *ProjectsLocationsBuildsService) Get(name string) *ProjectsLocationsBuildsGetCall { + c := &ProjectsLocationsBuildsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Id sets the optional parameter "id": Required. ID of the build. +func (c *ProjectsLocationsBuildsGetCall) Id(id string) *ProjectsLocationsBuildsGetCall { + c.urlParams_.Set("id", id) + return c +} + +// ProjectId sets the optional parameter "projectId": Required. ID of +// the project. +func (c *ProjectsLocationsBuildsGetCall) ProjectId(projectId string) *ProjectsLocationsBuildsGetCall { + c.urlParams_.Set("projectId", projectId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsBuildsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsBuildsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsBuildsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsBuildsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsBuildsGetCall) Context(ctx context.Context) *ProjectsLocationsBuildsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsBuildsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBuildsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudbuild.projects.locations.builds.get" call. +// Exactly one of *Build or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Build.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsLocationsBuildsGetCall) Do(opts ...googleapi.CallOption) (*Build, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Build{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns information about a previously requested build. The `Build` that is returned includes its status (such as `SUCCESS`, `FAILURE`, or `WORKING`), and timing information.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/builds/{buildsId}", + // "httpMethod": "GET", + // "id": "cloudbuild.projects.locations.builds.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "id": { + // "description": "Required. ID of the build.", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "The name of the `Build` to retrieve. Format: `projects/{project}/locations/{location}/builds/{build}`", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/builds/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Required. ID of the project.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Build" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "cloudbuild.projects.locations.builds.list": + +type ProjectsLocationsBuildsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists previously requested builds. Previously requested builds +// may still be in-progress, or may have finished successfully or +// unsuccessfully. +func (r *ProjectsLocationsBuildsService) List(parent string) *ProjectsLocationsBuildsListCall { + c := &ProjectsLocationsBuildsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Filter sets the optional parameter "filter": The raw filter text to +// constrain the results. +func (c *ProjectsLocationsBuildsListCall) Filter(filter string) *ProjectsLocationsBuildsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// PageSize sets the optional parameter "pageSize": Number of results to +// return in the list. +func (c *ProjectsLocationsBuildsListCall) PageSize(pageSize int64) *ProjectsLocationsBuildsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": Token to provide +// to skip to a particular spot in the list. +func (c *ProjectsLocationsBuildsListCall) PageToken(pageToken string) *ProjectsLocationsBuildsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ProjectId sets the optional parameter "projectId": Required. ID of +// the project. +func (c *ProjectsLocationsBuildsListCall) ProjectId(projectId string) *ProjectsLocationsBuildsListCall { + c.urlParams_.Set("projectId", projectId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsBuildsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsBuildsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsBuildsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsBuildsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsBuildsListCall) Context(ctx context.Context) *ProjectsLocationsBuildsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsBuildsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBuildsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/builds") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudbuild.projects.locations.builds.list" call. +// Exactly one of *ListBuildsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListBuildsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsLocationsBuildsListCall) Do(opts ...googleapi.CallOption) (*ListBuildsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListBuildsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists previously requested builds. Previously requested builds may still be in-progress, or may have finished successfully or unsuccessfully.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/builds", + // "httpMethod": "GET", + // "id": "cloudbuild.projects.locations.builds.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "filter": { + // "description": "The raw filter text to constrain the results.", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "description": "Number of results to return in the list.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Token to provide to skip to a particular spot in the list.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "The parent of the collection of `Builds`. Format: `projects/{project}/locations/location`", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Required. ID of the project.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/builds", + // "response": { + // "$ref": "ListBuildsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsBuildsListCall) Pages(ctx context.Context, f func(*ListBuildsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "cloudbuild.projects.locations.builds.retry": + +type ProjectsLocationsBuildsRetryCall struct { + s *Service + name string + retrybuildrequest *RetryBuildRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Retry: Creates a new build based on the specified build. This method +// creates a new build using the original build request, which may or +// may not result in an identical build. For triggered builds: * +// Triggered builds resolve to a precise revision; therefore a retry of +// a triggered build will result in a build that uses the same revision. +// For non-triggered builds that specify `RepoSource`: * If the original +// build built from the tip of a branch, the retried build will build +// from the tip of that branch, which may not be the same revision as +// the original build. * If the original build specified a commit sha or +// revision ID, the retried build will use the identical source. For +// builds that specify `StorageSource`: * If the original build pulled +// source from Google Cloud Storage without specifying the generation of +// the object, the new build will use the current object, which may be +// different from the original build source. * If the original build +// pulled source from Cloud Storage and specified the generation of the +// object, the new build will attempt to use the same object, which may +// or may not be available depending on the bucket's lifecycle +// management settings. +func (r *ProjectsLocationsBuildsService) Retry(name string, retrybuildrequest *RetryBuildRequest) *ProjectsLocationsBuildsRetryCall { + c := &ProjectsLocationsBuildsRetryCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.retrybuildrequest = retrybuildrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsBuildsRetryCall) Fields(s ...googleapi.Field) *ProjectsLocationsBuildsRetryCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsBuildsRetryCall) Context(ctx context.Context) *ProjectsLocationsBuildsRetryCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsBuildsRetryCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBuildsRetryCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.retrybuildrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:retry") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudbuild.projects.locations.builds.retry" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBuildsRetryCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -2793,31 +3981,28 @@ func (c *ProjectsBuildsGetCall) Do(opts ...googleapi.CallOption) (*Build, error) } return ret, nil // { - // "description": "Returns information about a previously requested build.\n\nThe `Build` that is returned includes its status (such as `SUCCESS`,\n`FAILURE`, or `WORKING`), and timing information.", - // "flatPath": "v1/projects/{projectId}/builds/{id}", - // "httpMethod": "GET", - // "id": "cloudbuild.projects.builds.get", + // "description": "Creates a new build based on the specified build. This method creates a new build using the original build request, which may or may not result in an identical build. For triggered builds: * Triggered builds resolve to a precise revision; therefore a retry of a triggered build will result in a build that uses the same revision. For non-triggered builds that specify `RepoSource`: * If the original build built from the tip of a branch, the retried build will build from the tip of that branch, which may not be the same revision as the original build. * If the original build specified a commit sha or revision ID, the retried build will use the identical source. For builds that specify `StorageSource`: * If the original build pulled source from Google Cloud Storage without specifying the generation of the object, the new build will use the current object, which may be different from the original build source. * If the original build pulled source from Cloud Storage and specified the generation of the object, the new build will attempt to use the same object, which may or may not be available depending on the bucket's lifecycle management settings.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/builds/{buildsId}:retry", + // "httpMethod": "POST", + // "id": "cloudbuild.projects.locations.builds.retry", // "parameterOrder": [ - // "projectId", - // "id" + // "name" // ], // "parameters": { - // "id": { - // "description": "Required. ID of the build.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "projectId": { - // "description": "Required. ID of the project.", + // "name": { + // "description": "The name of the `Build` to retry. Format: `projects/{project}/locations/{location}/builds/{build}`", // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/builds/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v1/projects/{projectId}/builds/{id}", + // "path": "v1/{+name}:retry", + // "request": { + // "$ref": "RetryBuildRequest" + // }, // "response": { - // "$ref": "Build" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" @@ -2826,118 +4011,95 @@ func (c *ProjectsBuildsGetCall) Do(opts ...googleapi.CallOption) (*Build, error) } -// method id "cloudbuild.projects.builds.list": - -type ProjectsBuildsListCall struct { - s *Service - projectId string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists previously requested builds. -// -// Previously requested builds may still be in-progress, or may have -// finished -// successfully or unsuccessfully. -func (r *ProjectsBuildsService) List(projectId string) *ProjectsBuildsListCall { - c := &ProjectsBuildsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.projectId = projectId - return c -} - -// Filter sets the optional parameter "filter": The raw filter text to -// constrain the results. -func (c *ProjectsBuildsListCall) Filter(filter string) *ProjectsBuildsListCall { - c.urlParams_.Set("filter", filter) - return c -} +// method id "cloudbuild.projects.locations.operations.cancel": -// PageSize sets the optional parameter "pageSize": Number of results to -// return in the list. -func (c *ProjectsBuildsListCall) PageSize(pageSize int64) *ProjectsBuildsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c +type ProjectsLocationsOperationsCancelCall struct { + s *Service + name string + canceloperationrequest *CancelOperationRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// PageToken sets the optional parameter "pageToken": Token to provide -// to skip to a particular spot in the list. -func (c *ProjectsBuildsListCall) PageToken(pageToken string) *ProjectsBuildsListCall { - c.urlParams_.Set("pageToken", pageToken) +// Cancel: Starts asynchronous cancellation on a long-running operation. +// The server makes a best effort to cancel the operation, but success +// is not guaranteed. If the server doesn't support this method, it +// returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use +// Operations.GetOperation or other methods to check whether the +// cancellation succeeded or whether the operation completed despite +// cancellation. On successful cancellation, the operation is not +// deleted; instead, it becomes an operation with an Operation.error +// value with a google.rpc.Status.code of 1, corresponding to +// `Code.CANCELLED`. +func (r *ProjectsLocationsOperationsService) Cancel(name string, canceloperationrequest *CancelOperationRequest) *ProjectsLocationsOperationsCancelCall { + c := &ProjectsLocationsOperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.canceloperationrequest = canceloperationrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsBuildsListCall) Fields(s ...googleapi.Field) *ProjectsBuildsListCall { +func (c *ProjectsLocationsOperationsCancelCall) Fields(s ...googleapi.Field) *ProjectsLocationsOperationsCancelCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsBuildsListCall) IfNoneMatch(entityTag string) *ProjectsBuildsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsBuildsListCall) Context(ctx context.Context) *ProjectsBuildsListCall { +func (c *ProjectsLocationsOperationsCancelCall) Context(ctx context.Context) *ProjectsLocationsOperationsCancelCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsBuildsListCall) Header() http.Header { +func (c *ProjectsLocationsOperationsCancelCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsBuildsListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsOperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.canceloperationrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/builds") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:cancel") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "projectId": c.projectId, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "cloudbuild.projects.builds.list" call. -// Exactly one of *ListBuildsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListBuildsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsBuildsListCall) Do(opts ...googleapi.CallOption) (*ListBuildsResponse, error) { +// Do executes the "cloudbuild.projects.locations.operations.cancel" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsLocationsOperationsCancelCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -2956,7 +4118,7 @@ func (c *ProjectsBuildsListCall) Do(opts ...googleapi.CallOption) (*ListBuildsRe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ListBuildsResponse{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -2968,40 +4130,28 @@ func (c *ProjectsBuildsListCall) Do(opts ...googleapi.CallOption) (*ListBuildsRe } return ret, nil // { - // "description": "Lists previously requested builds.\n\nPreviously requested builds may still be in-progress, or may have finished\nsuccessfully or unsuccessfully.", - // "flatPath": "v1/projects/{projectId}/builds", - // "httpMethod": "GET", - // "id": "cloudbuild.projects.builds.list", + // "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}:cancel", + // "httpMethod": "POST", + // "id": "cloudbuild.projects.locations.operations.cancel", // "parameterOrder": [ - // "projectId" + // "name" // ], // "parameters": { - // "filter": { - // "description": "The raw filter text to constrain the results.", - // "location": "query", - // "type": "string" - // }, - // "pageSize": { - // "description": "Number of results to return in the list.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Token to provide to skip to a particular spot in the list.", - // "location": "query", - // "type": "string" - // }, - // "projectId": { - // "description": "Required. ID of the project.", + // "name": { + // "description": "The name of the operation resource to be cancelled.", // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v1/projects/{projectId}/builds", + // "path": "v1/{+name}:cancel", + // "request": { + // "$ref": "CancelOperationRequest" + // }, // "response": { - // "$ref": "ListBuildsResponse" + // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" @@ -3010,147 +4160,95 @@ func (c *ProjectsBuildsListCall) Do(opts ...googleapi.CallOption) (*ListBuildsRe } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsBuildsListCall) Pages(ctx context.Context, f func(*ListBuildsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "cloudbuild.projects.builds.retry": +// method id "cloudbuild.projects.locations.operations.get": -type ProjectsBuildsRetryCall struct { - s *Service - projectId string - id string - retrybuildrequest *RetryBuildRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsOperationsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Retry: Creates a new build based on the specified build. -// -// This method creates a new build using the original build request, -// which may -// or may not result in an identical build. -// -// For triggered builds: -// -// * Triggered builds resolve to a precise revision; therefore a retry -// of a -// triggered build will result in a build that uses the same -// revision. -// -// For non-triggered builds that specify `RepoSource`: -// -// * If the original build built from the tip of a branch, the retried -// build -// will build from the tip of that branch, which may not be the same -// revision -// as the original build. -// * If the original build specified a commit sha or revision ID, the -// retried -// build will use the identical source. -// -// For builds that specify `StorageSource`: -// -// * If the original build pulled source from Google Cloud Storage -// without -// specifying the generation of the object, the new build will use the -// current -// object, which may be different from the original build source. -// * If the original build pulled source from Cloud Storage and -// specified the -// generation of the object, the new build will attempt to use the -// same -// object, which may or may not be available depending on the -// bucket's -// lifecycle management settings. -func (r *ProjectsBuildsService) Retry(projectId string, id string, retrybuildrequest *RetryBuildRequest) *ProjectsBuildsRetryCall { - c := &ProjectsBuildsRetryCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.projectId = projectId - c.id = id - c.retrybuildrequest = retrybuildrequest +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. +func (r *ProjectsLocationsOperationsService) Get(name string) *ProjectsLocationsOperationsGetCall { + c := &ProjectsLocationsOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsBuildsRetryCall) Fields(s ...googleapi.Field) *ProjectsBuildsRetryCall { +func (c *ProjectsLocationsOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsOperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsOperationsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsBuildsRetryCall) Context(ctx context.Context) *ProjectsBuildsRetryCall { +func (c *ProjectsLocationsOperationsGetCall) Context(ctx context.Context) *ProjectsLocationsOperationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsBuildsRetryCall) Header() http.Header { +func (c *ProjectsLocationsOperationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsBuildsRetryCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.retrybuildrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/builds/{id}:retry") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "projectId": c.projectId, - "id": c.id, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "cloudbuild.projects.builds.retry" call. +// Do executes the "cloudbuild.projects.locations.operations.get" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsBuildsRetryCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsLocationsOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -3181,32 +4279,23 @@ func (c *ProjectsBuildsRetryCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a new build based on the specified build.\n\nThis method creates a new build using the original build request, which may\nor may not result in an identical build.\n\nFor triggered builds:\n\n* Triggered builds resolve to a precise revision; therefore a retry of a\ntriggered build will result in a build that uses the same revision.\n\nFor non-triggered builds that specify `RepoSource`:\n\n* If the original build built from the tip of a branch, the retried build\nwill build from the tip of that branch, which may not be the same revision\nas the original build.\n* If the original build specified a commit sha or revision ID, the retried\nbuild will use the identical source.\n\nFor builds that specify `StorageSource`:\n\n* If the original build pulled source from Google Cloud Storage without\nspecifying the generation of the object, the new build will use the current\nobject, which may be different from the original build source.\n* If the original build pulled source from Cloud Storage and specified the\ngeneration of the object, the new build will attempt to use the same\nobject, which may or may not be available depending on the bucket's\nlifecycle management settings.", - // "flatPath": "v1/projects/{projectId}/builds/{id}:retry", - // "httpMethod": "POST", - // "id": "cloudbuild.projects.builds.retry", + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", + // "httpMethod": "GET", + // "id": "cloudbuild.projects.locations.operations.get", // "parameterOrder": [ - // "projectId", - // "id" + // "name" // ], // "parameters": { - // "id": { - // "description": "Required. Build ID of the original build.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "projectId": { - // "description": "Required. ID of the project.", + // "name": { + // "description": "The name of the operation resource.", // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v1/projects/{projectId}/builds/{id}:retry", - // "request": { - // "$ref": "RetryBuildRequest" - // }, + // "path": "v1/{+name}", // "response": { // "$ref": "Operation" // }, @@ -3228,9 +4317,7 @@ type ProjectsTriggersCreateCall struct { header_ http.Header } -// Create: Creates a new `BuildTrigger`. -// -// This API is experimental. +// Create: Creates a new `BuildTrigger`. This API is experimental. func (r *ProjectsTriggersService) Create(projectId string, buildtrigger *BuildTrigger) *ProjectsTriggersCreateCall { c := &ProjectsTriggersCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -3265,7 +4352,7 @@ func (c *ProjectsTriggersCreateCall) Header() http.Header { func (c *ProjectsTriggersCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3329,7 +4416,7 @@ func (c *ProjectsTriggersCreateCall) Do(opts ...googleapi.CallOption) (*BuildTri } return ret, nil // { - // "description": "Creates a new `BuildTrigger`.\n\nThis API is experimental.", + // "description": "Creates a new `BuildTrigger`. This API is experimental.", // "flatPath": "v1/projects/{projectId}/triggers", // "httpMethod": "POST", // "id": "cloudbuild.projects.triggers.create", @@ -3369,9 +4456,7 @@ type ProjectsTriggersDeleteCall struct { header_ http.Header } -// Delete: Deletes a `BuildTrigger` by its project ID and trigger -// ID. -// +// Delete: Deletes a `BuildTrigger` by its project ID and trigger ID. // This API is experimental. func (r *ProjectsTriggersService) Delete(projectId string, triggerId string) *ProjectsTriggersDeleteCall { c := &ProjectsTriggersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -3407,7 +4492,7 @@ func (c *ProjectsTriggersDeleteCall) Header() http.Header { func (c *ProjectsTriggersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3467,7 +4552,7 @@ func (c *ProjectsTriggersDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, e } return ret, nil // { - // "description": "Deletes a `BuildTrigger` by its project ID and trigger ID.\n\nThis API is experimental.", + // "description": "Deletes a `BuildTrigger` by its project ID and trigger ID. This API is experimental.", // "flatPath": "v1/projects/{projectId}/triggers/{triggerId}", // "httpMethod": "DELETE", // "id": "cloudbuild.projects.triggers.delete", @@ -3512,9 +4597,8 @@ type ProjectsTriggersGetCall struct { header_ http.Header } -// Get: Returns information about a `BuildTrigger`. -// -// This API is experimental. +// Get: Returns information about a `BuildTrigger`. This API is +// experimental. func (r *ProjectsTriggersService) Get(projectId string, triggerId string) *ProjectsTriggersGetCall { c := &ProjectsTriggersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -3559,7 +4643,7 @@ func (c *ProjectsTriggersGetCall) Header() http.Header { func (c *ProjectsTriggersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3622,7 +4706,7 @@ func (c *ProjectsTriggersGetCall) Do(opts ...googleapi.CallOption) (*BuildTrigge } return ret, nil // { - // "description": "Returns information about a `BuildTrigger`.\n\nThis API is experimental.", + // "description": "Returns information about a `BuildTrigger`. This API is experimental.", // "flatPath": "v1/projects/{projectId}/triggers/{triggerId}", // "httpMethod": "GET", // "id": "cloudbuild.projects.triggers.get", @@ -3666,9 +4750,7 @@ type ProjectsTriggersListCall struct { header_ http.Header } -// List: Lists existing `BuildTrigger`s. -// -// This API is experimental. +// List: Lists existing `BuildTrigger`s. This API is experimental. func (r *ProjectsTriggersService) List(projectId string) *ProjectsTriggersListCall { c := &ProjectsTriggersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -3726,7 +4808,7 @@ func (c *ProjectsTriggersListCall) Header() http.Header { func (c *ProjectsTriggersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3788,7 +4870,7 @@ func (c *ProjectsTriggersListCall) Do(opts ...googleapi.CallOption) (*ListBuildT } return ret, nil // { - // "description": "Lists existing `BuildTrigger`s.\n\nThis API is experimental.", + // "description": "Lists existing `BuildTrigger`s. This API is experimental.", // "flatPath": "v1/projects/{projectId}/triggers", // "httpMethod": "GET", // "id": "cloudbuild.projects.triggers.list", @@ -3858,9 +4940,7 @@ type ProjectsTriggersPatchCall struct { header_ http.Header } -// Patch: Updates a `BuildTrigger` by its project ID and trigger -// ID. -// +// Patch: Updates a `BuildTrigger` by its project ID and trigger ID. // This API is experimental. func (r *ProjectsTriggersService) Patch(projectId string, triggerId string, buildtrigger *BuildTrigger) *ProjectsTriggersPatchCall { c := &ProjectsTriggersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -3897,7 +4977,7 @@ func (c *ProjectsTriggersPatchCall) Header() http.Header { func (c *ProjectsTriggersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3962,7 +5042,7 @@ func (c *ProjectsTriggersPatchCall) Do(opts ...googleapi.CallOption) (*BuildTrig } return ret, nil // { - // "description": "Updates a `BuildTrigger` by its project ID and trigger ID.\n\nThis API is experimental.", + // "description": "Updates a `BuildTrigger` by its project ID and trigger ID. This API is experimental.", // "flatPath": "v1/projects/{projectId}/triggers/{triggerId}", // "httpMethod": "PATCH", // "id": "cloudbuild.projects.triggers.patch", @@ -4046,7 +5126,7 @@ func (c *ProjectsTriggersRunCall) Header() http.Header { func (c *ProjectsTriggersRunCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/api/cloudfunctions/v1/cloudfunctions-api.json b/vendor/google.golang.org/api/cloudfunctions/v1/cloudfunctions-api.json index 994cd148104..7a51c0e950d 100644 --- a/vendor/google.golang.org/api/cloudfunctions/v1/cloudfunctions-api.json +++ b/vendor/google.golang.org/api/cloudfunctions/v1/cloudfunctions-api.json @@ -108,7 +108,7 @@ "operations": { "methods": { "get": { - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", "flatPath": "v1/operations/{operationsId}", "httpMethod": "GET", "id": "cloudfunctions.operations.get", @@ -133,14 +133,14 @@ ] }, "list": { - "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", "flatPath": "v1/operations", "httpMethod": "GET", "id": "cloudfunctions.operations.list", "parameterOrder": [], "parameters": { "filter": { - "description": "Required. A filter for matching the requested operations.\u003cbr\u003e\u003cbr\u003e The supported formats of \u003cb\u003efilter\u003c/b\u003e are:\u003cbr\u003e To query for a specific function: \u003ccode\u003eproject:*,location:*,function:*\u003c/code\u003e\u003cbr\u003e To query for all of the latest operations for a project: \u003ccode\u003eproject:*,latest:true\u003c/code\u003e", + "description": "Required. A filter for matching the requested operations. The supported formats of *filter* are: To query for a specific function: project:*,location:*,function:* To query for all of the latest operations for a project: project:*,latest:true", "location": "query", "type": "string" }, @@ -150,13 +150,13 @@ "type": "string" }, "pageSize": { - "description": "The maximum number of records that should be returned.\u003cbr\u003e Requested page size cannot exceed 100. If not set, the default page size is 100.\u003cbr\u003e\u003cbr\u003e Pagination is only supported when querying for a specific function.", + "description": "The maximum number of records that should be returned. Requested page size cannot exceed 100. If not set, the default page size is 100. Pagination is only supported when querying for a specific function.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Token identifying which result to start with, which is returned by a previous list call.\u003cbr\u003e\u003cbr\u003e Pagination is only supported when querying for a specific function.", + "description": "Token identifying which result to start with, which is returned by a previous list call. Pagination is only supported when querying for a specific function.", "location": "query", "type": "string" } @@ -221,7 +221,7 @@ "functions": { "methods": { "call": { - "description": "Synchronously invokes a deployed Cloud Function. To be used for testing\npurposes as very limited traffic is allowed. For more information on\nthe actual limits, refer to\n[Rate Limits](https://cloud.google.com/functions/quotas#rate_limits).", + "description": "Synchronously invokes a deployed Cloud Function. To be used for testing purposes as very limited traffic is allowed. For more information on the actual limits, refer to [Rate Limits](https://cloud.google.com/functions/quotas#rate_limits).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}:call", "httpMethod": "POST", "id": "cloudfunctions.projects.locations.functions.call", @@ -249,7 +249,7 @@ ] }, "create": { - "description": "Creates a new function. If a function with the given name already exists in\nthe specified project, the long running operation will return\n`ALREADY_EXISTS` error.", + "description": "Creates a new function. If a function with the given name already exists in the specified project, the long running operation will return `ALREADY_EXISTS` error.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/functions", "httpMethod": "POST", "id": "cloudfunctions.projects.locations.functions.create", @@ -258,7 +258,7 @@ ], "parameters": { "location": { - "description": "Required. The project and location in which the function should be created, specified\nin the format `projects/*/locations/*`", + "description": "Required. The project and location in which the function should be created, specified in the format `projects/*/locations/*`", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -277,7 +277,7 @@ ] }, "delete": { - "description": "Deletes a function with the given name from the specified project. If the\ngiven function is used by some trigger, the trigger will be updated to\nremove this function.", + "description": "Deletes a function with the given name from the specified project. If the given function is used by some trigger, the trigger will be updated to remove this function.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}", "httpMethod": "DELETE", "id": "cloudfunctions.projects.locations.functions.delete", @@ -302,7 +302,7 @@ ] }, "generateDownloadUrl": { - "description": "Returns a signed URL for downloading deployed function source code.\nThe URL is only valid for a limited period and should be used within\nminutes after generation.\nFor more information about the signed URL usage see:\nhttps://cloud.google.com/storage/docs/access-control/signed-urls", + "description": "Returns a signed URL for downloading deployed function source code. The URL is only valid for a limited period and should be used within minutes after generation. For more information about the signed URL usage see: https://cloud.google.com/storage/docs/access-control/signed-urls", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}:generateDownloadUrl", "httpMethod": "POST", "id": "cloudfunctions.projects.locations.functions.generateDownloadUrl", @@ -311,7 +311,7 @@ ], "parameters": { "name": { - "description": "The name of function for which source code Google Cloud Storage signed\nURL should be generated.", + "description": "The name of function for which source code Google Cloud Storage signed URL should be generated.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/functions/[^/]+$", "required": true, @@ -330,7 +330,7 @@ ] }, "generateUploadUrl": { - "description": "Returns a signed URL for uploading a function source code.\nFor more information about the signed URL usage see:\nhttps://cloud.google.com/storage/docs/access-control/signed-urls.\nOnce the function source code upload is complete, the used signed\nURL should be provided in CreateFunction or UpdateFunction request\nas a reference to the function source code.\n\nWhen uploading source code to the generated signed URL, please follow\nthese restrictions:\n\n* Source file type should be a zip file.\n* Source file size should not exceed 100MB limit.\n* No credentials should be attached - the signed URLs provide access to the\n target bucket using internal service identity; if credentials were\n attached, the identity from the credentials would be used, but that\n identity does not have permissions to upload files to the URL.\n\nWhen making a HTTP PUT request, these two headers need to be specified:\n\n* `content-type: application/zip`\n* `x-goog-content-length-range: 0,104857600`\n\nAnd this header SHOULD NOT be specified:\n\n* `Authorization: Bearer YOUR_TOKEN`", + "description": "Returns a signed URL for uploading a function source code. For more information about the signed URL usage see: https://cloud.google.com/storage/docs/access-control/signed-urls. Once the function source code upload is complete, the used signed URL should be provided in CreateFunction or UpdateFunction request as a reference to the function source code. When uploading source code to the generated signed URL, please follow these restrictions: * Source file type should be a zip file. * Source file size should not exceed 100MB limit. * No credentials should be attached - the signed URLs provide access to the target bucket using internal service identity; if credentials were attached, the identity from the credentials would be used, but that identity does not have permissions to upload files to the URL. When making a HTTP PUT request, these two headers need to be specified: * `content-type: application/zip` * `x-goog-content-length-range: 0,104857600` And this header SHOULD NOT be specified: * `Authorization: Bearer YOUR_TOKEN`", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/functions:generateUploadUrl", "httpMethod": "POST", "id": "cloudfunctions.projects.locations.functions.generateUploadUrl", @@ -339,7 +339,7 @@ ], "parameters": { "parent": { - "description": "The project and location in which the Google Cloud Storage signed URL\nshould be generated, specified in the format `projects/*/locations/*`.", + "description": "The project and location in which the Google Cloud Storage signed URL should be generated, specified in the format `projects/*/locations/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -383,7 +383,7 @@ ] }, "getIamPolicy": { - "description": "Gets the IAM access control policy for a function.\nReturns an empty policy if the function exists and does not have a policy\nset.", + "description": "Gets the IAM access control policy for a function. Returns an empty policy if the function exists and does not have a policy set.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}:getIamPolicy", "httpMethod": "GET", "id": "cloudfunctions.projects.locations.functions.getIamPolicy", @@ -392,13 +392,13 @@ ], "parameters": { "options.requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "location": "query", "type": "integer" }, "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/functions/[^/]+$", "required": true, @@ -429,12 +429,12 @@ "type": "integer" }, "pageToken": { - "description": "The value returned by the last\n`ListFunctionsResponse`; indicates that\nthis is a continuation of a prior `ListFunctions` call, and that the\nsystem should return the next page of data.", + "description": "The value returned by the last `ListFunctionsResponse`; indicates that this is a continuation of a prior `ListFunctions` call, and that the system should return the next page of data.", "location": "query", "type": "string" }, "parent": { - "description": "The project and location from which the function should be listed,\nspecified in the format `projects/*/locations/*`\nIf you want to list functions in all locations, use \"-\" in place of a\nlocation. When listing functions in all locations, if one or more\nlocation(s) are unreachable, the response will contain functions from all\nreachable locations along with the names of any unreachable locations.", + "description": "The project and location from which the function should be listed, specified in the format `projects/*/locations/*` If you want to list functions in all locations, use \"-\" in place of a location. When listing functions in all locations, if one or more location(s) are unreachable, the response will contain functions from all reachable locations along with the names of any unreachable locations.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -459,7 +459,7 @@ ], "parameters": { "name": { - "description": "A user-defined name of the function. Function names must be unique\nglobally and match pattern `projects/*/locations/*/functions/*`", + "description": "A user-defined name of the function. Function names must be unique globally and match pattern `projects/*/locations/*/functions/*`", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/functions/[^/]+$", "required": true, @@ -484,7 +484,7 @@ ] }, "setIamPolicy": { - "description": "Sets the IAM access control policy on the specified function.\nReplaces any existing policy.", + "description": "Sets the IAM access control policy on the specified function. Replaces any existing policy.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}:setIamPolicy", "httpMethod": "POST", "id": "cloudfunctions.projects.locations.functions.setIamPolicy", @@ -493,7 +493,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/functions/[^/]+$", "required": true, @@ -512,7 +512,7 @@ ] }, "testIamPermissions": { - "description": "Tests the specified permissions against the IAM access control policy\nfor a function.\nIf the function does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", + "description": "Tests the specified permissions against the IAM access control policy for a function. If the function does not exist, this will return an empty set of permissions, not a NOT_FOUND error.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}:testIamPermissions", "httpMethod": "POST", "id": "cloudfunctions.projects.locations.functions.testIamPermissions", @@ -521,7 +521,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/functions/[^/]+$", "required": true, @@ -546,11 +546,11 @@ } } }, - "revision": "20200504", + "revision": "20200917", "rootUrl": "https://cloudfunctions.googleapis.com/", "schemas": { "AuditConfig": { - "description": "Specifies the audit configuration for a service.\nThe configuration determines which permission types are logged, and what\nidentities, if any, are exempted from logging.\nAn AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service,\nthe union of the two AuditConfigs is used for that service: the log_types\nspecified in each AuditConfig are enabled, and the exempted_members in each\nAuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n {\n \"audit_configs\": [\n {\n \"service\": \"allServices\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n },\n {\n \"log_type\": \"ADMIN_READ\",\n }\n ]\n },\n {\n \"service\": \"sampleservice.googleapis.com\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n },\n {\n \"log_type\": \"DATA_WRITE\",\n \"exempted_members\": [\n \"user:aliya@example.com\"\n ]\n }\n ]\n }\n ]\n }\n\nFor sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ\nlogging. It also exempts jose@example.com from DATA_READ logging, and\naliya@example.com from DATA_WRITE logging.", + "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { \"audit_configs\": [ { \"service\": \"allServices\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" }, { \"log_type\": \"ADMIN_READ\" } ] }, { \"service\": \"sampleservice.googleapis.com\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\" }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging.", "id": "AuditConfig", "properties": { "auditLogConfigs": { @@ -561,18 +561,18 @@ "type": "array" }, "service": { - "description": "Specifies a service that will be enabled for audit logging.\nFor example, `storage.googleapis.com`, `cloudsql.googleapis.com`.\n`allServices` is a special value that covers all services.", + "description": "Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.", "type": "string" } }, "type": "object" }, "AuditLogConfig": { - "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\njose@example.com from DATA_READ logging.", + "description": "Provides the configuration for logging a type of permissions. Example: { \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.", "id": "AuditLogConfig", "properties": { "exemptedMembers": { - "description": "Specifies the identities that do not cause logging for this type of\npermission.\nFollows the same format of Binding.members.", + "description": "Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.", "items": { "type": "string" }, @@ -603,17 +603,17 @@ "properties": { "condition": { "$ref": "Expr", - "description": "The condition that is associated with this binding.\n\nIf the condition evaluates to `true`, then this binding applies to the\ncurrent request.\n\nIf the condition evaluates to `false`, then this binding does not apply to\nthe current request. However, a different role binding might grant the same\nrole to one or more of the members in this binding.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies)." + "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the members in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." }, "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@example.com` .\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a user that has been recently deleted. For\n example, `alice@example.com?uid=123456789012345678901`. If the user is\n recovered, this value reverts to `user:{emailid}` and the recovered user\n retains the role in the binding.\n\n* `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus\n unique identifier) representing a service account that has been recently\n deleted. For example,\n `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.\n If the service account is undeleted, this value reverts to\n `serviceAccount:{emailid}` and the undeleted service account retains the\n role in the binding.\n\n* `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a Google group that has been recently\n deleted. For example, `admins@example.com?uid=123456789012345678901`. If\n the group is recovered, this value reverts to `group:{emailid}` and the\n recovered group retains the role in the binding.\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", "items": { "type": "string" }, "type": "array" }, "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.", + "description": "Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.", "type": "string" } }, @@ -635,7 +635,7 @@ "id": "CallFunctionResponse", "properties": { "error": { - "description": "Either system or user-function generated error. Set if execution\nwas not successful.", + "description": "Either system or user-function generated error. Set if execution was not successful.", "type": "string" }, "executionId": { @@ -643,27 +643,39 @@ "type": "string" }, "result": { - "description": "Result populated for successful execution of synchronous function. Will\nnot be populated if function does not return a result through context.", + "description": "Result populated for successful execution of synchronous function. Will not be populated if function does not return a result through context.", "type": "string" } }, "type": "object" }, "CloudFunction": { - "description": "Describes a Cloud Function that contains user computation executed in\nresponse to an event. It encapsulate function and triggers configurations.", + "description": "Describes a Cloud Function that contains user computation executed in response to an event. It encapsulate function and triggers configurations.", "id": "CloudFunction", "properties": { "availableMemoryMb": { - "description": "The amount of memory in MB available for a function.\nDefaults to 256MB.", + "description": "The amount of memory in MB available for a function. Defaults to 256MB.", "format": "int32", "type": "integer" }, + "buildEnvironmentVariables": { + "additionalProperties": { + "type": "string" + }, + "description": "Build environment variables that shall be available during build time.", + "type": "object" + }, + "buildId": { + "description": "Output only. The Cloud Build ID of the latest successful deployment of the function.", + "readOnly": true, + "type": "string" + }, "description": { "description": "User-provided description of a function.", "type": "string" }, "entryPoint": { - "description": "The name of the function (as defined in source code) that will be\nexecuted. Defaults to the resource name suffix, if not specified. For\nbackward compatibility, if function with given name is not found, then the\nsystem will try to use function named \"function\".\nFor Node.js this is name of a function exported by the module specified\nin `source_location`.", + "description": "The name of the function (as defined in source code) that will be executed. Defaults to the resource name suffix, if not specified. For backward compatibility, if function with given name is not found, then the system will try to use function named \"function\". For Node.js this is name of a function exported by the module specified in `source_location`.", "type": "string" }, "environmentVariables": { @@ -682,16 +694,18 @@ "description": "An HTTPS endpoint type of source that can be triggered via URL." }, "ingressSettings": { - "description": "The ingress settings for the function, controlling what traffic can reach\nit.", + "description": "The ingress settings for the function, controlling what traffic can reach it.", "enum": [ "INGRESS_SETTINGS_UNSPECIFIED", "ALLOW_ALL", - "ALLOW_INTERNAL_ONLY" + "ALLOW_INTERNAL_ONLY", + "ALLOW_INTERNAL_AND_GCLB" ], "enumDescriptions": [ "Unspecified.", "Allow HTTP traffic from public and private sources.", - "Allow HTTP traffic from only private VPC sources." + "Allow HTTP traffic from only private VPC sources.", + "Allow HTTP traffic from private VPC sources and through GCLB." ], "type": "string" }, @@ -703,36 +717,36 @@ "type": "object" }, "maxInstances": { - "description": "The limit on the maximum number of function instances that may coexist at a\ngiven time.", + "description": "The limit on the maximum number of function instances that may coexist at a given time. In some cases, such as rapid traffic surges, Cloud Functions may, for a short period of time, create more instances than the specified max instances limit. If your function cannot tolerate this temporary behavior, you may want to factor in a safety margin and set a lower max instances value than your function can tolerate. See the [Max Instances](https://cloud.google.com/functions/docs/max-instances) Guide for more details.", "format": "int32", "type": "integer" }, "name": { - "description": "A user-defined name of the function. Function names must be unique\nglobally and match pattern `projects/*/locations/*/functions/*`", + "description": "A user-defined name of the function. Function names must be unique globally and match pattern `projects/*/locations/*/functions/*`", "type": "string" }, "network": { - "description": "The VPC Network that this cloud function can connect to. It can be\neither the fully-qualified URI, or the short name of the network resource.\nIf the short network name is used, the network must belong to the same\nproject. Otherwise, it must belong to a project within the same\norganization. The format of this field is either\n`projects/{project}/global/networks/{network}` or `{network}`, where\n{project} is a project id where the network is defined, and {network} is\nthe short name of the network.\n\nThis field is mutually exclusive with `vpc_connector` and will be replaced\nby it.\n\nSee [the VPC documentation](https://cloud.google.com/compute/docs/vpc) for\nmore information on connecting Cloud projects.", + "description": "The VPC Network that this cloud function can connect to. It can be either the fully-qualified URI, or the short name of the network resource. If the short network name is used, the network must belong to the same project. Otherwise, it must belong to a project within the same organization. The format of this field is either `projects/{project}/global/networks/{network}` or `{network}`, where {project} is a project id where the network is defined, and {network} is the short name of the network. This field is mutually exclusive with `vpc_connector` and will be replaced by it. See [the VPC documentation](https://cloud.google.com/compute/docs/vpc) for more information on connecting Cloud projects.", "type": "string" }, "runtime": { - "description": "The runtime in which to run the function. Required when deploying a new\nfunction, optional when updating an existing function. For a complete\nlist of possible choices, see the\n[`gcloud` command\nreference](/sdk/gcloud/reference/functions/deploy#--runtime).", + "description": "The runtime in which to run the function. Required when deploying a new function, optional when updating an existing function. For a complete list of possible choices, see the [`gcloud` command reference](/sdk/gcloud/reference/functions/deploy#--runtime).", "type": "string" }, "serviceAccountEmail": { - "description": "The email of the function's service account. If empty, defaults to\n`{project_id}@appspot.gserviceaccount.com`.", + "description": "The email of the function's service account. If empty, defaults to `{project_id}@appspot.gserviceaccount.com`.", "type": "string" }, "sourceArchiveUrl": { - "description": "The Google Cloud Storage URL, starting with gs://, pointing to the zip\narchive which contains the function.", + "description": "The Google Cloud Storage URL, starting with gs://, pointing to the zip archive which contains the function.", "type": "string" }, "sourceRepository": { "$ref": "SourceRepository", - "description": "**Beta Feature**\n\nThe source repository where a function is hosted." + "description": "**Beta Feature** The source repository where a function is hosted." }, "sourceUploadUrl": { - "description": "The Google Cloud Storage signed URL used for source uploading, generated\nby google.cloud.functions.v1.GenerateUploadUrl", + "description": "The Google Cloud Storage signed URL used for source uploading, generated by google.cloud.functions.v1.GenerateUploadUrl", "type": "string" }, "status": { @@ -751,31 +765,34 @@ "Function deployment failed and the function isn’t serving.", "Function is being created or updated.", "Function is being deleted.", - "Function deployment failed and the function serving state is undefined.\nThe function should be updated or deleted to move it out of this state." + "Function deployment failed and the function serving state is undefined. The function should be updated or deleted to move it out of this state." ], + "readOnly": true, "type": "string" }, "timeout": { - "description": "The function execution timeout. Execution is considered failed and\ncan be terminated if the function is not completed at the end of the\ntimeout period. Defaults to 60 seconds.", + "description": "The function execution timeout. Execution is considered failed and can be terminated if the function is not completed at the end of the timeout period. Defaults to 60 seconds.", "format": "google-duration", "type": "string" }, "updateTime": { "description": "Output only. The last update timestamp of a Cloud Function.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "versionId": { - "description": "Output only. The version identifier of the Cloud Function. Each deployment attempt\nresults in a new version of a function being created.", + "description": "Output only. The version identifier of the Cloud Function. Each deployment attempt results in a new version of a function being created.", "format": "int64", + "readOnly": true, "type": "string" }, "vpcConnector": { - "description": "The VPC Network Connector that this cloud function can connect to. It can\nbe either the fully-qualified URI, or the short name of the network\nconnector resource. The format of this field is\n`projects/*/locations/*/connectors/*`\n\nThis field is mutually exclusive with `network` field and will eventually\nreplace it.\n\nSee [the VPC documentation](https://cloud.google.com/compute/docs/vpc) for\nmore information on connecting Cloud projects.", + "description": "The VPC Network Connector that this cloud function can connect to. It can be either the fully-qualified URI, or the short name of the network connector resource. The format of this field is `projects/*/locations/*/connectors/*` This field is mutually exclusive with `network` field and will eventually replace it. See [the VPC documentation](https://cloud.google.com/compute/docs/vpc) for more information on connecting Cloud projects.", "type": "string" }, "vpcConnectorEgressSettings": { - "description": "The egress settings for the connector, controlling what traffic is diverted\nthrough it.", + "description": "The egress settings for the connector, controlling what traffic is diverted through it.", "enum": [ "VPC_CONNECTOR_EGRESS_SETTINGS_UNSPECIFIED", "PRIVATE_RANGES_ONLY", @@ -784,7 +801,7 @@ "enumDescriptions": [ "Unspecified.", "Use the VPC Access Connector only for private IP space from RFC1918.", - "Force the use of VPC Access Connector for all egress traffic from the\nfunction." + "Force the use of VPC Access Connector for all egress traffic from the function." ], "type": "string" } @@ -792,11 +809,11 @@ "type": "object" }, "EventTrigger": { - "description": "Describes EventTrigger, used to request events be sent from another\nservice.", + "description": "Describes EventTrigger, used to request events be sent from another service.", "id": "EventTrigger", "properties": { "eventType": { - "description": "Required. The type of event to observe. For example:\n`providers/cloud.storage/eventTypes/object.change` and\n`providers/cloud.pubsub/eventTypes/topic.publish`.\n\nEvent types match pattern `providers/*/eventTypes/*.*`.\nThe pattern contains:\n\n1. namespace: For example, `cloud.storage` and\n `google.firebase.analytics`.\n2. resource type: The type of resource on which event occurs. For\n example, the Google Cloud Storage API includes the type `object`.\n3. action: The action that generates the event. For example, action for\n a Google Cloud Storage Object is 'change'.\nThese parts are lower case.", + "description": "Required. The type of event to observe. For example: `providers/cloud.storage/eventTypes/object.change` and `providers/cloud.pubsub/eventTypes/topic.publish`. Event types match pattern `providers/*/eventTypes/*.*`. The pattern contains: 1. namespace: For example, `cloud.storage` and `google.firebase.analytics`. 2. resource type: The type of resource on which event occurs. For example, the Google Cloud Storage API includes the type `object`. 3. action: The action that generates the event. For example, action for a Google Cloud Storage Object is 'change'. These parts are lower case.", "type": "string" }, "failurePolicy": { @@ -804,41 +821,41 @@ "description": "Specifies policy for failed executions." }, "resource": { - "description": "Required. The resource(s) from which to observe events, for example,\n`projects/_/buckets/myBucket`.\n\nNot all syntactically correct values are accepted by all services. For\nexample:\n\n1. The authorization model must support it. Google Cloud Functions\n only allows EventTriggers to be deployed that observe resources in the\n same project as the `CloudFunction`.\n2. The resource type must match the pattern expected for an\n `event_type`. For example, an `EventTrigger` that has an\n `event_type` of \"google.pubsub.topic.publish\" should have a resource\n that matches Google Cloud Pub/Sub topics.\n\nAdditionally, some services may support short names when creating an\n`EventTrigger`. These will always be returned in the normalized \"long\"\nformat.\n\nSee each *service's* documentation for supported formats.", + "description": "Required. The resource(s) from which to observe events, for example, `projects/_/buckets/myBucket`. Not all syntactically correct values are accepted by all services. For example: 1. The authorization model must support it. Google Cloud Functions only allows EventTriggers to be deployed that observe resources in the same project as the `CloudFunction`. 2. The resource type must match the pattern expected for an `event_type`. For example, an `EventTrigger` that has an `event_type` of \"google.pubsub.topic.publish\" should have a resource that matches Google Cloud Pub/Sub topics. Additionally, some services may support short names when creating an `EventTrigger`. These will always be returned in the normalized \"long\" format. See each *service's* documentation for supported formats.", "type": "string" }, "service": { - "description": "The hostname of the service that should be observed.\n\nIf no string is provided, the default service implementing the API will\nbe used. For example, `storage.googleapis.com` is the default for all\nevent types in the `google.storage` namespace.", + "description": "The hostname of the service that should be observed. If no string is provided, the default service implementing the API will be used. For example, `storage.googleapis.com` is the default for all event types in the `google.storage` namespace.", "type": "string" } }, "type": "object" }, "Expr": { - "description": "Represents a textual expression in the Common Expression Language (CEL)\nsyntax. CEL is a C-like expression language. The syntax and semantics of CEL\nare documented at https://github.com/google/cel-spec.\n\nExample (Comparison):\n\n title: \"Summary size limit\"\n description: \"Determines if a summary is less than 100 chars\"\n expression: \"document.summary.size() \u003c 100\"\n\nExample (Equality):\n\n title: \"Requestor is owner\"\n description: \"Determines if requestor is the document owner\"\n expression: \"document.owner == request.auth.claims.email\"\n\nExample (Logic):\n\n title: \"Public documents\"\n description: \"Determine whether the document should be publicly visible\"\n expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\"\n\nExample (Data Manipulation):\n\n title: \"Notification string\"\n description: \"Create a notification string with a timestamp.\"\n expression: \"'New message received at ' + string(document.create_time)\"\n\nThe exact variables and functions that may be referenced within an expression\nare determined by the service that evaluates it. See the service\ndocumentation for additional information.", + "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() \u003c 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", "id": "Expr", "properties": { "description": { - "description": "Optional. Description of the expression. This is a longer text which\ndescribes the expression, e.g. when hovered over it in a UI.", + "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", "type": "string" }, "expression": { - "description": "Textual representation of an expression in Common Expression Language\nsyntax.", + "description": "Textual representation of an expression in Common Expression Language syntax.", "type": "string" }, "location": { - "description": "Optional. String indicating the location of the expression for error\nreporting, e.g. a file name and a position in the file.", + "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", "type": "string" }, "title": { - "description": "Optional. Title for the expression, i.e. a short string describing\nits purpose. This can be used e.g. in UIs which allow to enter the\nexpression.", + "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", "type": "string" } }, "type": "object" }, "FailurePolicy": { - "description": "Describes the policy in case of function's execution failure.\nIf empty, then defaults to ignoring failures (i.e. not retrying them).", + "description": "Describes the policy in case of function's execution failure. If empty, then defaults to ignoring failures (i.e. not retrying them).", "id": "FailurePolicy", "properties": { "retry": { @@ -853,7 +870,7 @@ "id": "GenerateDownloadUrlRequest", "properties": { "versionId": { - "description": "The optional version of function. If not set, default, current version\nis used.", + "description": "The optional version of function. If not set, default, current version is used.", "format": "uint64", "type": "string" } @@ -865,7 +882,7 @@ "id": "GenerateDownloadUrlResponse", "properties": { "downloadUrl": { - "description": "The generated Google Cloud Storage signed URL that should be used for\nfunction source code download.", + "description": "The generated Google Cloud Storage signed URL that should be used for function source code download.", "type": "string" } }, @@ -882,7 +899,7 @@ "id": "GenerateUploadUrlResponse", "properties": { "uploadUrl": { - "description": "The generated Google Cloud Storage signed URL that should be used for a\nfunction source code upload. The uploaded file should be a zip archive\nwhich contains a function.", + "description": "The generated Google Cloud Storage signed URL that should be used for a function source code upload. The uploaded file should be a zip archive which contains a function.", "type": "string" } }, @@ -894,6 +911,7 @@ "properties": { "url": { "description": "Output only. The deployed url for the function.", + "readOnly": true, "type": "string" } }, @@ -911,11 +929,11 @@ "type": "array" }, "nextPageToken": { - "description": "If not empty, indicates that there may be more functions that match\nthe request; this value should be passed in a new\ngoogle.cloud.functions.v1.ListFunctionsRequest\nto get more functions.", + "description": "If not empty, indicates that there may be more functions that match the request; this value should be passed in a new google.cloud.functions.v1.ListFunctionsRequest to get more functions.", "type": "string" }, "unreachable": { - "description": "Locations that could not be reached. The response does not include any\nfunctions from these locations.", + "description": "Locations that could not be reached. The response does not include any functions from these locations.", "items": { "type": "string" }, @@ -965,14 +983,14 @@ "id": "Location", "properties": { "displayName": { - "description": "The friendly name for this location, typically a nearby city name.\nFor example, \"Tokyo\".", + "description": "The friendly name for this location, typically a nearby city name. For example, \"Tokyo\".", "type": "string" }, "labels": { "additionalProperties": { "type": "string" }, - "description": "Cross-service attributes for the location. For example\n\n {\"cloud.googleapis.com/region\": \"us-east1\"}", + "description": "Cross-service attributes for the location. For example {\"cloud.googleapis.com/region\": \"us-east1\"}", "type": "object" }, "locationId": { @@ -984,22 +1002,22 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "Service-specific metadata. For example the available capacity at the given\nlocation.", + "description": "Service-specific metadata. For example the available capacity at the given location.", "type": "object" }, "name": { - "description": "Resource name for the location, which may vary between implementations.\nFor example: `\"projects/example-project/locations/us-east1\"`", + "description": "Resource name for the location, which may vary between implementations. For example: `\"projects/example-project/locations/us-east1\"`", "type": "string" } }, "type": "object" }, "Operation": { - "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "description": "This resource represents a long-running operation that is the result of a network API call.", "id": "Operation", "properties": { "done": { - "description": "If the value is `false`, it means the operation is still in progress.\nIf `true`, the operation is completed, and either `error` or `response` is\navailable.", + "description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.", "type": "boolean" }, "error": { @@ -1011,11 +1029,11 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", "type": "object" }, "name": { - "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should be a resource name ending with `operations/{unique_id}`.", + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.", "type": "string" }, "response": { @@ -1023,7 +1041,7 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", + "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", "type": "object" } }, @@ -1033,51 +1051,10 @@ "description": "Metadata describing an Operation", "id": "OperationMetadataV1", "properties": { - "request": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "description": "The original request that started the operation.", - "type": "object" - }, - "target": { - "description": "Target of the operation - for example\nprojects/project-1/locations/region-1/functions/function-1", - "type": "string" - }, - "type": { - "description": "Type of operation.", - "enum": [ - "OPERATION_UNSPECIFIED", - "CREATE_FUNCTION", - "UPDATE_FUNCTION", - "DELETE_FUNCTION" - ], - "enumDescriptions": [ - "Unknown operation type.", - "Triggered by CreateFunction call", - "Triggered by UpdateFunction call", - "Triggered by DeleteFunction call." - ], + "buildId": { + "description": "The Cloud Build ID of the function created or updated by an API call. This field is only populated for Create and Update operations.", "type": "string" }, - "updateTime": { - "description": "The last update timestamp of the operation.", - "format": "google-datetime", - "type": "string" - }, - "versionId": { - "description": "Version id of the function created or updated by an API call.\nThis field is only populated for Create and Update operations.", - "format": "int64", - "type": "string" - } - }, - "type": "object" - }, - "OperationMetadataV1Beta2": { - "description": "Metadata describing an Operation", - "id": "OperationMetadataV1Beta2", - "properties": { "request": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -1087,7 +1064,7 @@ "type": "object" }, "target": { - "description": "Target of the operation - for example\nprojects/project-1/locations/region-1/functions/function-1", + "description": "Target of the operation - for example projects/project-1/locations/region-1/functions/function-1", "type": "string" }, "type": { @@ -1112,7 +1089,7 @@ "type": "string" }, "versionId": { - "description": "Version id of the function created or updated by an API call.\nThis field is only populated for Create and Update operations.", + "description": "Version id of the function created or updated by an API call. This field is only populated for Create and Update operations.", "format": "int64", "type": "string" } @@ -1120,7 +1097,7 @@ "type": "object" }, "Policy": { - "description": "An Identity and Access Management (IAM) policy, which specifies access\ncontrols for Google Cloud resources.\n\n\nA `Policy` is a collection of `bindings`. A `binding` binds one or more\n`members` to a single `role`. Members can be user accounts, service accounts,\nGoogle groups, and domains (such as G Suite). A `role` is a named list of\npermissions; each `role` can be an IAM predefined role or a user-created\ncustom role.\n\nFor some types of Google Cloud resources, a `binding` can also specify a\n`condition`, which is a logical expression that allows access to a resource\nonly if the expression evaluates to `true`. A condition can add constraints\nbased on attributes of the request, the resource, or both. To learn which\nresources support conditions in their IAM policies, see the\n[IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).\n\n**JSON example:**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/resourcemanager.organizationAdmin\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-project-id@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles/resourcemanager.organizationViewer\",\n \"members\": [\n \"user:eve@example.com\"\n ],\n \"condition\": {\n \"title\": \"expirable access\",\n \"description\": \"Does not grant access after Sep 2020\",\n \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\",\n }\n }\n ],\n \"etag\": \"BwWWja0YfJA=\",\n \"version\": 3\n }\n\n**YAML example:**\n\n bindings:\n - members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-project-id@appspot.gserviceaccount.com\n role: roles/resourcemanager.organizationAdmin\n - members:\n - user:eve@example.com\n role: roles/resourcemanager.organizationViewer\n condition:\n title: expirable access\n description: Does not grant access after Sep 2020\n expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\n - etag: BwWWja0YfJA=\n - version: 3\n\nFor a description of IAM and its features, see the\n[IAM documentation](https://cloud.google.com/iam/docs/).", + "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } **YAML example:** bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", "id": "Policy", "properties": { "auditConfigs": { @@ -1131,19 +1108,19 @@ "type": "array" }, "bindings": { - "description": "Associates a list of `members` to a `role`. Optionally, may specify a\n`condition` that determines how and when the `bindings` are applied. Each\nof the `bindings` must contain at least one member.", + "description": "Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member.", "items": { "$ref": "Binding" }, "type": "array" }, "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.", + "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.", "format": "byte", "type": "string" }, "version": { - "description": "Specifies the format of the policy.\n\nValid values are `0`, `1`, and `3`. Requests that specify an invalid value\nare rejected.\n\nAny operation that affects conditional role bindings must specify version\n`3`. This requirement applies to the following operations:\n\n* Getting a policy that includes a conditional role binding\n* Adding a conditional role binding to a policy\n* Changing a conditional role binding in a policy\n* Removing any role binding, with or without a condition, from a policy\n that includes conditions\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.\n\nIf a policy does not include any conditions, operations on that policy may\nspecify any valid version or leave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } @@ -1151,7 +1128,7 @@ "type": "object" }, "Retry": { - "description": "Describes the retry policy in case of function's execution failure.\nA function execution will be retried on any failure.\nA failed execution will be retried up to 7 days with an exponential backoff\n(capped at 10 seconds).\nRetried execution is charged as any other execution.", + "description": "Describes the retry policy in case of function's execution failure. A function execution will be retried on any failure. A failed execution will be retried up to 7 days with an exponential backoff (capped at 10 seconds). Retried execution is charged as any other execution.", "id": "Retry", "properties": {}, "type": "object" @@ -1162,10 +1139,10 @@ "properties": { "policy": { "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them." }, "updateMask": { - "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only\nthe fields in the mask will be modified. If no mask is provided, the\nfollowing default mask is used:\n\n`paths: \"bindings, etag\"`", + "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only the fields in the mask will be modified. If no mask is provided, the following default mask is used: `paths: \"bindings, etag\"`", "format": "google-fieldmask", "type": "string" } @@ -1173,22 +1150,23 @@ "type": "object" }, "SourceRepository": { - "description": "Describes SourceRepository, used to represent parameters related to\nsource repository where a function is hosted.", + "description": "Describes SourceRepository, used to represent parameters related to source repository where a function is hosted.", "id": "SourceRepository", "properties": { "deployedUrl": { - "description": "Output only. The URL pointing to the hosted repository where the function\nwere defined at the time of deployment. It always points to a specific\ncommit in the format described above.", + "description": "Output only. The URL pointing to the hosted repository where the function were defined at the time of deployment. It always points to a specific commit in the format described above.", + "readOnly": true, "type": "string" }, "url": { - "description": "The URL pointing to the hosted repository where the function is defined.\nThere are supported Cloud Source Repository URLs in the following\nformats:\n\nTo refer to a specific commit:\n`https://source.developers.google.com/projects/*/repos/*/revisions/*/paths/*`\nTo refer to a moveable alias (branch):\n`https://source.developers.google.com/projects/*/repos/*/moveable-aliases/*/paths/*`\nIn particular, to refer to HEAD use `master` moveable alias.\nTo refer to a specific fixed alias (tag):\n`https://source.developers.google.com/projects/*/repos/*/fixed-aliases/*/paths/*`\n\nYou may omit `paths/*` if you want to use the main directory.", + "description": "The URL pointing to the hosted repository where the function is defined. There are supported Cloud Source Repository URLs in the following formats: To refer to a specific commit: `https://source.developers.google.com/projects/*/repos/*/revisions/*/paths/*` To refer to a moveable alias (branch): `https://source.developers.google.com/projects/*/repos/*/moveable-aliases/*/paths/*` In particular, to refer to HEAD use `master` moveable alias. To refer to a specific fixed alias (tag): `https://source.developers.google.com/projects/*/repos/*/fixed-aliases/*/paths/*` You may omit `paths/*` if you want to use the main directory.", "type": "string" } }, "type": "object" }, "Status": { - "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors).", + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", "id": "Status", "properties": { "code": { @@ -1197,7 +1175,7 @@ "type": "integer" }, "details": { - "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use.", + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", "items": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -1208,7 +1186,7 @@ "type": "array" }, "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", "type": "string" } }, @@ -1219,7 +1197,7 @@ "id": "TestIamPermissionsRequest", "properties": { "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "description": "The set of permissions to check for the `resource`. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", "items": { "type": "string" }, @@ -1233,7 +1211,7 @@ "id": "TestIamPermissionsResponse", "properties": { "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", "items": { "type": "string" }, diff --git a/vendor/google.golang.org/api/cloudfunctions/v1/cloudfunctions-gen.go b/vendor/google.golang.org/api/cloudfunctions/v1/cloudfunctions-gen.go index f62c16a012f..d0d8c5bc034 100644 --- a/vendor/google.golang.org/api/cloudfunctions/v1/cloudfunctions-gen.go +++ b/vendor/google.golang.org/api/cloudfunctions/v1/cloudfunctions-gen.go @@ -75,6 +75,7 @@ const apiId = "cloudfunctions:v1" const apiName = "cloudfunctions" const apiVersion = "v1" const basePath = "https://cloudfunctions.googleapis.com/" +const mtlsBasePath = "https://cloudfunctions.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -90,6 +91,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -178,72 +180,31 @@ type ProjectsLocationsFunctionsService struct { s *Service } -// AuditConfig: Specifies the audit configuration for a service. -// The configuration determines which permission types are logged, and -// what -// identities, if any, are exempted from logging. -// An AuditConfig must have one or more AuditLogConfigs. -// -// If there are AuditConfigs for both `allServices` and a specific -// service, -// the union of the two AuditConfigs is used for that service: the -// log_types -// specified in each AuditConfig are enabled, and the exempted_members -// in each -// AuditLogConfig are exempted. -// -// Example Policy with multiple AuditConfigs: -// -// { -// "audit_configs": [ -// { -// "service": "allServices" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// }, -// { -// "log_type": "ADMIN_READ", -// } -// ] -// }, -// { -// "service": "sampleservice.googleapis.com" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// }, -// { -// "log_type": "DATA_WRITE", -// "exempted_members": [ -// "user:aliya@example.com" -// ] -// } -// ] -// } -// ] -// } -// -// For sampleservice, this policy enables DATA_READ, DATA_WRITE and -// ADMIN_READ -// logging. It also exempts jose@example.com from DATA_READ logging, -// and -// aliya@example.com from DATA_WRITE logging. +// AuditConfig: Specifies the audit configuration for a service. The +// configuration determines which permission types are logged, and what +// identities, if any, are exempted from logging. An AuditConfig must +// have one or more AuditLogConfigs. If there are AuditConfigs for both +// `allServices` and a specific service, the union of the two +// AuditConfigs is used for that service: the log_types specified in +// each AuditConfig are enabled, and the exempted_members in each +// AuditLogConfig are exempted. Example Policy with multiple +// AuditConfigs: { "audit_configs": [ { "service": "allServices", +// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": +// [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { +// "log_type": "ADMIN_READ" } ] }, { "service": +// "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": +// "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ +// "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy +// enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts +// jose@example.com from DATA_READ logging, and aliya@example.com from +// DATA_WRITE logging. type AuditConfig struct { // AuditLogConfigs: The configuration for logging of each type of // permission. AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"` - // Service: Specifies a service that will be enabled for audit - // logging. - // For example, `storage.googleapis.com`, - // `cloudsql.googleapis.com`. + // Service: Specifies a service that will be enabled for audit logging. + // For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. // `allServices` is a special value that covers all services. Service string `json:"service,omitempty"` @@ -272,31 +233,15 @@ func (s *AuditConfig) MarshalJSON() ([]byte, error) { } // AuditLogConfig: Provides the configuration for logging a type of -// permissions. -// Example: -// -// { -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// } -// ] -// } -// -// This enables 'DATA_READ' and 'DATA_WRITE' logging, while -// exempting -// jose@example.com from DATA_READ logging. +// permissions. Example: { "audit_log_configs": [ { "log_type": +// "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { +// "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and +// 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ +// logging. type AuditLogConfig struct { // ExemptedMembers: Specifies the identities that do not cause logging - // for this type of - // permission. - // Follows the same format of Binding.members. + // for this type of permission. Follows the same format of + // Binding.members. ExemptedMembers []string `json:"exemptedMembers,omitempty"` // LogType: The log type that this config enables. @@ -334,95 +279,53 @@ func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { // Binding: Associates `members` with a `role`. type Binding struct { - // Condition: The condition that is associated with this binding. - // - // If the condition evaluates to `true`, then this binding applies to - // the - // current request. - // - // If the condition evaluates to `false`, then this binding does not - // apply to - // the current request. However, a different role binding might grant - // the same - // role to one or more of the members in this binding. - // - // To learn which resources support conditions in their IAM policies, - // see - // the - // [IAM - // documentation](https://cloud.google.com/iam/help/conditions/r - // esource-policies). + // Condition: The condition that is associated with this binding. If the + // condition evaluates to `true`, then this binding applies to the + // current request. If the condition evaluates to `false`, then this + // binding does not apply to the current request. However, a different + // role binding might grant the same role to one or more of the members + // in this binding. To learn which resources support conditions in their + // IAM policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). Condition *Expr `json:"condition,omitempty"` // Members: Specifies the identities requesting access for a Cloud - // Platform resource. - // `members` can have the following values: - // - // * `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. - // - // * `allAuthenticatedUsers`: A special identifier that represents - // anyone - // who is authenticated with a Google account or a service - // account. - // - // * `user:{emailid}`: An email address that represents a specific - // Google - // account. For example, `alice@example.com` . - // - // - // * `serviceAccount:{emailid}`: An email address that represents a - // service - // account. For example, - // `my-other-app@appspot.gserviceaccount.com`. - // - // * `group:{emailid}`: An email address that represents a Google - // group. - // For example, `admins@example.com`. - // - // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a user that has been recently deleted. - // For - // example, `alice@example.com?uid=123456789012345678901`. If the - // user is - // recovered, this value reverts to `user:{emailid}` and the - // recovered user - // retains the role in the binding. - // - // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address - // (plus - // unique identifier) representing a service account that has been - // recently - // deleted. For example, - // + // Platform resource. `members` can have the following values: * + // `allUsers`: A special identifier that represents anyone who is on the + // internet; with or without a Google account. * + // `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. * + // `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . * + // `serviceAccount:{emailid}`: An email address that represents a + // service account. For example, + // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An + // email address that represents a Google group. For example, + // `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An + // email address (plus unique identifier) representing a user that has + // been recently deleted. For example, + // `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered + // user retains the role in the binding. * + // `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address + // (plus unique identifier) representing a service account that has been + // recently deleted. For example, // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account - // retains the - // role in the binding. - // - // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a Google group that has been recently - // deleted. For example, - // `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` - // and the - // recovered group retains the role in the binding. - // - // - // * `domain:{domain}`: The G Suite domain (primary) that represents all - // the - // users of that domain. For example, `google.com` or - // `example.com`. - // - // + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains + // the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: + // An email address (plus unique identifier) representing a Google group + // that has been recently deleted. For example, + // `admins@example.com?uid=123456789012345678901`. If the group is + // recovered, this value reverts to `group:{emailid}` and the recovered + // group retains the role in the binding. * `domain:{domain}`: The G + // Suite domain (primary) that represents all the users of that domain. + // For example, `google.com` or `example.com`. Members []string `json:"members,omitempty"` - // Role: Role that is assigned to `members`. - // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + // Role: Role that is assigned to `members`. For example, + // `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `json:"role,omitempty"` // ForceSendFields is a list of field names (e.g. "Condition") to @@ -479,17 +382,15 @@ func (s *CallFunctionRequest) MarshalJSON() ([]byte, error) { // CallFunctionResponse: Response of `CallFunction` method. type CallFunctionResponse struct { // Error: Either system or user-function generated error. Set if - // execution - // was not successful. + // execution was not successful. Error string `json:"error,omitempty"` // ExecutionId: Execution id of function invocation. ExecutionId string `json:"executionId,omitempty"` // Result: Result populated for successful execution of synchronous - // function. Will - // not be populated if function does not return a result through - // context. + // function. Will not be populated if function does not return a result + // through context. Result string `json:"result,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -520,28 +421,30 @@ func (s *CallFunctionResponse) MarshalJSON() ([]byte, error) { } // CloudFunction: Describes a Cloud Function that contains user -// computation executed in -// response to an event. It encapsulate function and triggers -// configurations. +// computation executed in response to an event. It encapsulate function +// and triggers configurations. type CloudFunction struct { // AvailableMemoryMb: The amount of memory in MB available for a - // function. - // Defaults to 256MB. + // function. Defaults to 256MB. AvailableMemoryMb int64 `json:"availableMemoryMb,omitempty"` + // BuildEnvironmentVariables: Build environment variables that shall be + // available during build time. + BuildEnvironmentVariables map[string]string `json:"buildEnvironmentVariables,omitempty"` + + // BuildId: Output only. The Cloud Build ID of the latest successful + // deployment of the function. + BuildId string `json:"buildId,omitempty"` + // Description: User-provided description of a function. Description string `json:"description,omitempty"` // EntryPoint: The name of the function (as defined in source code) that - // will be - // executed. Defaults to the resource name suffix, if not specified. - // For - // backward compatibility, if function with given name is not found, - // then the - // system will try to use function named "function". + // will be executed. Defaults to the resource name suffix, if not + // specified. For backward compatibility, if function with given name is + // not found, then the system will try to use function named "function". // For Node.js this is name of a function exported by the module - // specified - // in `source_location`. + // specified in `source_location`. EntryPoint string `json:"entryPoint,omitempty"` // EnvironmentVariables: Environment variables that shall be available @@ -557,82 +460,69 @@ type CloudFunction struct { HttpsTrigger *HttpsTrigger `json:"httpsTrigger,omitempty"` // IngressSettings: The ingress settings for the function, controlling - // what traffic can reach - // it. + // what traffic can reach it. // // Possible values: // "INGRESS_SETTINGS_UNSPECIFIED" - Unspecified. // "ALLOW_ALL" - Allow HTTP traffic from public and private sources. // "ALLOW_INTERNAL_ONLY" - Allow HTTP traffic from only private VPC // sources. + // "ALLOW_INTERNAL_AND_GCLB" - Allow HTTP traffic from private VPC + // sources and through GCLB. IngressSettings string `json:"ingressSettings,omitempty"` // Labels: Labels associated with this Cloud Function. Labels map[string]string `json:"labels,omitempty"` // MaxInstances: The limit on the maximum number of function instances - // that may coexist at a - // given time. + // that may coexist at a given time. In some cases, such as rapid + // traffic surges, Cloud Functions may, for a short period of time, + // create more instances than the specified max instances limit. If your + // function cannot tolerate this temporary behavior, you may want to + // factor in a safety margin and set a lower max instances value than + // your function can tolerate. See the [Max + // Instances](https://cloud.google.com/functions/docs/max-instances) + // Guide for more details. MaxInstances int64 `json:"maxInstances,omitempty"` // Name: A user-defined name of the function. Function names must be - // unique - // globally and match pattern `projects/*/locations/*/functions/*` + // unique globally and match pattern + // `projects/*/locations/*/functions/*` Name string `json:"name,omitempty"` // Network: The VPC Network that this cloud function can connect to. It - // can be - // either the fully-qualified URI, or the short name of the network - // resource. - // If the short network name is used, the network must belong to the - // same - // project. Otherwise, it must belong to a project within the - // same - // organization. The format of this field is - // either - // `projects/{project}/global/networks/{network}` or `{network}`, - // where + // can be either the fully-qualified URI, or the short name of the + // network resource. If the short network name is used, the network must + // belong to the same project. Otherwise, it must belong to a project + // within the same organization. The format of this field is either + // `projects/{project}/global/networks/{network}` or `{network}`, where // {project} is a project id where the network is defined, and {network} - // is - // the short name of the network. - // - // This field is mutually exclusive with `vpc_connector` and will be - // replaced - // by it. - // - // See [the VPC - // documentation](https://cloud.google.com/compute/docs/vpc) for - // more information on connecting Cloud projects. + // is the short name of the network. This field is mutually exclusive + // with `vpc_connector` and will be replaced by it. See [the VPC + // documentation](https://cloud.google.com/compute/docs/vpc) for more + // information on connecting Cloud projects. Network string `json:"network,omitempty"` // Runtime: The runtime in which to run the function. Required when - // deploying a new - // function, optional when updating an existing function. For a - // complete - // list of possible choices, see the - // [`gcloud` - // command - // reference](/sdk/gcloud/reference/functions/deploy#--runtime). + // deploying a new function, optional when updating an existing + // function. For a complete list of possible choices, see the [`gcloud` + // command reference](/sdk/gcloud/reference/functions/deploy#--runtime). Runtime string `json:"runtime,omitempty"` // ServiceAccountEmail: The email of the function's service account. If - // empty, defaults to - // `{project_id}@appspot.gserviceaccount.com`. + // empty, defaults to `{project_id}@appspot.gserviceaccount.com`. ServiceAccountEmail string `json:"serviceAccountEmail,omitempty"` // SourceArchiveUrl: The Google Cloud Storage URL, starting with gs://, - // pointing to the zip - // archive which contains the function. + // pointing to the zip archive which contains the function. SourceArchiveUrl string `json:"sourceArchiveUrl,omitempty"` - // SourceRepository: **Beta Feature** - // - // The source repository where a function is hosted. + // SourceRepository: **Beta Feature** The source repository where a + // function is hosted. SourceRepository *SourceRepository `json:"sourceRepository,omitempty"` // SourceUploadUrl: The Google Cloud Storage signed URL used for source - // uploading, generated - // by google.cloud.functions.v1.GenerateUploadUrl + // uploading, generated by google.cloud.functions.v1.GenerateUploadUrl SourceUploadUrl string `json:"sourceUploadUrl,omitempty"` // Status: Output only. Status of the function deployment. @@ -645,16 +535,13 @@ type CloudFunction struct { // "DEPLOY_IN_PROGRESS" - Function is being created or updated. // "DELETE_IN_PROGRESS" - Function is being deleted. // "UNKNOWN" - Function deployment failed and the function serving - // state is undefined. - // The function should be updated or deleted to move it out of this - // state. + // state is undefined. The function should be updated or deleted to move + // it out of this state. Status string `json:"status,omitempty"` // Timeout: The function execution timeout. Execution is considered - // failed and - // can be terminated if the function is not completed at the end of - // the - // timeout period. Defaults to 60 seconds. + // failed and can be terminated if the function is not completed at the + // end of the timeout period. Defaults to 60 seconds. Timeout string `json:"timeout,omitempty"` // UpdateTime: Output only. The last update timestamp of a Cloud @@ -662,38 +549,28 @@ type CloudFunction struct { UpdateTime string `json:"updateTime,omitempty"` // VersionId: Output only. The version identifier of the Cloud Function. - // Each deployment attempt - // results in a new version of a function being created. + // Each deployment attempt results in a new version of a function being + // created. VersionId int64 `json:"versionId,omitempty,string"` // VpcConnector: The VPC Network Connector that this cloud function can - // connect to. It can - // be either the fully-qualified URI, or the short name of the - // network - // connector resource. The format of this field - // is - // `projects/*/locations/*/connectors/*` - // - // This field is mutually exclusive with `network` field and will - // eventually - // replace it. - // - // See [the VPC - // documentation](https://cloud.google.com/compute/docs/vpc) for - // more information on connecting Cloud projects. + // connect to. It can be either the fully-qualified URI, or the short + // name of the network connector resource. The format of this field is + // `projects/*/locations/*/connectors/*` This field is mutually + // exclusive with `network` field and will eventually replace it. See + // [the VPC documentation](https://cloud.google.com/compute/docs/vpc) + // for more information on connecting Cloud projects. VpcConnector string `json:"vpcConnector,omitempty"` // VpcConnectorEgressSettings: The egress settings for the connector, - // controlling what traffic is diverted - // through it. + // controlling what traffic is diverted through it. // // Possible values: // "VPC_CONNECTOR_EGRESS_SETTINGS_UNSPECIFIED" - Unspecified. // "PRIVATE_RANGES_ONLY" - Use the VPC Access Connector only for // private IP space from RFC1918. // "ALL_TRAFFIC" - Force the use of VPC Access Connector for all - // egress traffic from the - // function. + // egress traffic from the function. VpcConnectorEgressSettings string `json:"vpcConnectorEgressSettings,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -725,65 +602,41 @@ func (s *CloudFunction) MarshalJSON() ([]byte, error) { } // EventTrigger: Describes EventTrigger, used to request events be sent -// from another -// service. +// from another service. type EventTrigger struct { - // EventType: Required. The type of event to observe. For - // example: - // `providers/cloud.storage/eventTypes/object.change` - // and - // `providers/cloud.pubsub/eventTypes/topic.publish`. - // - // Event types match pattern `providers/*/eventTypes/*.*`. - // The pattern contains: - // - // 1. namespace: For example, `cloud.storage` and - // `google.firebase.analytics`. - // 2. resource type: The type of resource on which event occurs. For - // example, the Google Cloud Storage API includes the type - // `object`. - // 3. action: The action that generates the event. For example, action - // for - // a Google Cloud Storage Object is 'change'. - // These parts are lower case. + // EventType: Required. The type of event to observe. For example: + // `providers/cloud.storage/eventTypes/object.change` and + // `providers/cloud.pubsub/eventTypes/topic.publish`. Event types match + // pattern `providers/*/eventTypes/*.*`. The pattern contains: 1. + // namespace: For example, `cloud.storage` and + // `google.firebase.analytics`. 2. resource type: The type of resource + // on which event occurs. For example, the Google Cloud Storage API + // includes the type `object`. 3. action: The action that generates the + // event. For example, action for a Google Cloud Storage Object is + // 'change'. These parts are lower case. EventType string `json:"eventType,omitempty"` // FailurePolicy: Specifies policy for failed executions. FailurePolicy *FailurePolicy `json:"failurePolicy,omitempty"` // Resource: Required. The resource(s) from which to observe events, for - // example, - // `projects/_/buckets/myBucket`. - // - // Not all syntactically correct values are accepted by all services. - // For - // example: - // - // 1. The authorization model must support it. Google Cloud Functions - // only allows EventTriggers to be deployed that observe resources in - // the - // same project as the `CloudFunction`. - // 2. The resource type must match the pattern expected for an - // `event_type`. For example, an `EventTrigger` that has an - // `event_type` of "google.pubsub.topic.publish" should have a - // resource - // that matches Google Cloud Pub/Sub topics. - // - // Additionally, some services may support short names when creating - // an - // `EventTrigger`. These will always be returned in the normalized - // "long" - // format. - // - // See each *service's* documentation for supported formats. + // example, `projects/_/buckets/myBucket`. Not all syntactically correct + // values are accepted by all services. For example: 1. The + // authorization model must support it. Google Cloud Functions only + // allows EventTriggers to be deployed that observe resources in the + // same project as the `CloudFunction`. 2. The resource type must match + // the pattern expected for an `event_type`. For example, an + // `EventTrigger` that has an `event_type` of + // "google.pubsub.topic.publish" should have a resource that matches + // Google Cloud Pub/Sub topics. Additionally, some services may support + // short names when creating an `EventTrigger`. These will always be + // returned in the normalized "long" format. See each *service's* + // documentation for supported formats. Resource string `json:"resource,omitempty"` - // Service: The hostname of the service that should be observed. - // - // If no string is provided, the default service implementing the API - // will - // be used. For example, `storage.googleapis.com` is the default for - // all + // Service: The hostname of the service that should be observed. If no + // string is provided, the default service implementing the API will be + // used. For example, `storage.googleapis.com` is the default for all // event types in the `google.storage` namespace. Service string `json:"service,omitempty"` @@ -811,65 +664,40 @@ func (s *EventTrigger) MarshalJSON() ([]byte, error) { } // Expr: Represents a textual expression in the Common Expression -// Language (CEL) -// syntax. CEL is a C-like expression language. The syntax and semantics -// of CEL -// are documented at https://github.com/google/cel-spec. -// -// Example (Comparison): -// -// title: "Summary size limit" -// description: "Determines if a summary is less than 100 chars" -// expression: "document.summary.size() < 100" -// -// Example (Equality): -// -// title: "Requestor is owner" -// description: "Determines if requestor is the document owner" -// expression: "document.owner == -// request.auth.claims.email" -// -// Example (Logic): -// -// title: "Public documents" -// description: "Determine whether the document should be publicly -// visible" -// expression: "document.type != 'private' && document.type != -// 'internal'" -// -// Example (Data Manipulation): -// -// title: "Notification string" -// description: "Create a notification string with a timestamp." -// expression: "'New message received at ' + -// string(document.create_time)" -// -// The exact variables and functions that may be referenced within an -// expression -// are determined by the service that evaluates it. See the -// service -// documentation for additional information. +// Language (CEL) syntax. CEL is a C-like expression language. The +// syntax and semantics of CEL are documented at +// https://github.com/google/cel-spec. Example (Comparison): title: +// "Summary size limit" description: "Determines if a summary is less +// than 100 chars" expression: "document.summary.size() < 100" Example +// (Equality): title: "Requestor is owner" description: "Determines if +// requestor is the document owner" expression: "document.owner == +// request.auth.claims.email" Example (Logic): title: "Public documents" +// description: "Determine whether the document should be publicly +// visible" expression: "document.type != 'private' && document.type != +// 'internal'" Example (Data Manipulation): title: "Notification string" +// description: "Create a notification string with a timestamp." +// expression: "'New message received at ' + +// string(document.create_time)" The exact variables and functions that +// may be referenced within an expression are determined by the service +// that evaluates it. See the service documentation for additional +// information. type Expr struct { // Description: Optional. Description of the expression. This is a - // longer text which - // describes the expression, e.g. when hovered over it in a UI. + // longer text which describes the expression, e.g. when hovered over it + // in a UI. Description string `json:"description,omitempty"` // Expression: Textual representation of an expression in Common - // Expression Language - // syntax. + // Expression Language syntax. Expression string `json:"expression,omitempty"` // Location: Optional. String indicating the location of the expression - // for error - // reporting, e.g. a file name and a position in the file. + // for error reporting, e.g. a file name and a position in the file. Location string `json:"location,omitempty"` // Title: Optional. Title for the expression, i.e. a short string - // describing - // its purpose. This can be used e.g. in UIs which allow to enter - // the - // expression. + // describing its purpose. This can be used e.g. in UIs which allow to + // enter the expression. Title string `json:"title,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -896,9 +724,8 @@ func (s *Expr) MarshalJSON() ([]byte, error) { } // FailurePolicy: Describes the policy in case of function's execution -// failure. -// If empty, then defaults to ignoring failures (i.e. not retrying -// them). +// failure. If empty, then defaults to ignoring failures (i.e. not +// retrying them). type FailurePolicy struct { // Retry: If specified, then the function will be retried in case of a // failure. @@ -930,8 +757,7 @@ func (s *FailurePolicy) MarshalJSON() ([]byte, error) { // GenerateDownloadUrlRequest: Request of `GenerateDownloadUrl` method. type GenerateDownloadUrlRequest struct { // VersionId: The optional version of function. If not set, default, - // current version - // is used. + // current version is used. VersionId uint64 `json:"versionId,omitempty,string"` // ForceSendFields is a list of field names (e.g. "VersionId") to @@ -961,8 +787,7 @@ func (s *GenerateDownloadUrlRequest) MarshalJSON() ([]byte, error) { // method. type GenerateDownloadUrlResponse struct { // DownloadUrl: The generated Google Cloud Storage signed URL that - // should be used for - // function source code download. + // should be used for function source code download. DownloadUrl string `json:"downloadUrl,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1001,10 +826,8 @@ type GenerateUploadUrlRequest struct { // method. type GenerateUploadUrlResponse struct { // UploadUrl: The generated Google Cloud Storage signed URL that should - // be used for a - // function source code upload. The uploaded file should be a zip - // archive - // which contains a function. + // be used for a function source code upload. The uploaded file should + // be a zip archive which contains a function. UploadUrl string `json:"uploadUrl,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1069,16 +892,13 @@ type ListFunctionsResponse struct { Functions []*CloudFunction `json:"functions,omitempty"` // NextPageToken: If not empty, indicates that there may be more - // functions that match - // the request; this value should be passed in a - // new - // google.cloud.functions.v1.ListFunctionsRequest - // to get more functions. + // functions that match the request; this value should be passed in a + // new google.cloud.functions.v1.ListFunctionsRequest to get more + // functions. NextPageToken string `json:"nextPageToken,omitempty"` // Unreachable: Locations that could not be reached. The response does - // not include any - // functions from these locations. + // not include any functions from these locations. Unreachable []string `json:"unreachable,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1185,13 +1005,11 @@ func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { // Location: A resource that represents Google Cloud Platform location. type Location struct { // DisplayName: The friendly name for this location, typically a nearby - // city name. - // For example, "Tokyo". + // city name. For example, "Tokyo". DisplayName string `json:"displayName,omitempty"` // Labels: Cross-service attributes for the location. For example - // - // {"cloud.googleapis.com/region": "us-east1"} + // {"cloud.googleapis.com/region": "us-east1"} Labels map[string]string `json:"labels,omitempty"` // LocationId: The canonical id for this location. For example: @@ -1199,13 +1017,12 @@ type Location struct { LocationId string `json:"locationId,omitempty"` // Metadata: Service-specific metadata. For example the available - // capacity at the given - // location. + // capacity at the given location. Metadata googleapi.RawMessage `json:"metadata,omitempty"` // Name: Resource name for the location, which may vary between - // implementations. - // For example: "projects/example-project/locations/us-east1" + // implementations. For example: + // "projects/example-project/locations/us-east1" Name string `json:"name,omitempty"` // ForceSendFields is a list of field names (e.g. "DisplayName") to @@ -1232,52 +1049,38 @@ func (s *Location) MarshalJSON() ([]byte, error) { } // Operation: This resource represents a long-running operation that is -// the result of a -// network API call. +// the result of a network API call. type Operation struct { // Done: If the value is `false`, it means the operation is still in - // progress. - // If `true`, the operation is completed, and either `error` or - // `response` is - // available. + // progress. If `true`, the operation is completed, and either `error` + // or `response` is available. Done bool `json:"done,omitempty"` // Error: The error result of the operation in case of failure or // cancellation. Error *Status `json:"error,omitempty"` - // Metadata: Service-specific metadata associated with the operation. - // It typically - // contains progress information and common metadata such as create - // time. - // Some services might not provide such metadata. Any method that - // returns a - // long-running operation should document the metadata type, if any. + // Metadata: Service-specific metadata associated with the operation. It + // typically contains progress information and common metadata such as + // create time. Some services might not provide such metadata. Any + // method that returns a long-running operation should document the + // metadata type, if any. Metadata googleapi.RawMessage `json:"metadata,omitempty"` // Name: The server-assigned name, which is only unique within the same - // service that - // originally returns it. If you use the default HTTP mapping, - // the - // `name` should be a resource name ending with + // service that originally returns it. If you use the default HTTP + // mapping, the `name` should be a resource name ending with // `operations/{unique_id}`. Name string `json:"name,omitempty"` - // Response: The normal response of the operation in case of success. - // If the original - // method returns no data on success, such as `Delete`, the response - // is - // `google.protobuf.Empty`. If the original method is - // standard - // `Get`/`Create`/`Update`, the response should be the resource. For - // other - // methods, the response should have the type `XxxResponse`, where - // `Xxx` - // is the original method name. For example, if the original method - // name - // is `TakeSnapshot()`, the inferred response type - // is - // `TakeSnapshotResponse`. + // Response: The normal response of the operation in case of success. If + // the original method returns no data on success, such as `Delete`, the + // response is `google.protobuf.Empty`. If the original method is + // standard `Get`/`Create`/`Update`, the response should be the + // resource. For other methods, the response should have the type + // `XxxResponse`, where `Xxx` is the original method name. For example, + // if the original method name is `TakeSnapshot()`, the inferred + // response type is `TakeSnapshotResponse`. Response googleapi.RawMessage `json:"response,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1309,11 +1112,15 @@ func (s *Operation) MarshalJSON() ([]byte, error) { // OperationMetadataV1: Metadata describing an Operation type OperationMetadataV1 struct { + // BuildId: The Cloud Build ID of the function created or updated by an + // API call. This field is only populated for Create and Update + // operations. + BuildId string `json:"buildId,omitempty"` + // Request: The original request that started the operation. Request googleapi.RawMessage `json:"request,omitempty"` - // Target: Target of the operation - for - // example + // Target: Target of the operation - for example // projects/project-1/locations/region-1/functions/function-1 Target string `json:"target,omitempty"` @@ -1330,11 +1137,10 @@ type OperationMetadataV1 struct { UpdateTime string `json:"updateTime,omitempty"` // VersionId: Version id of the function created or updated by an API - // call. - // This field is only populated for Create and Update operations. + // call. This field is only populated for Create and Update operations. VersionId int64 `json:"versionId,omitempty,string"` - // ForceSendFields is a list of field names (e.g. "Request") to + // ForceSendFields is a list of field names (e.g. "BuildId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -1342,7 +1148,7 @@ type OperationMetadataV1 struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Request") to include in + // NullFields is a list of field names (e.g. "BuildId") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -1357,205 +1163,78 @@ func (s *OperationMetadataV1) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// OperationMetadataV1Beta2: Metadata describing an Operation -type OperationMetadataV1Beta2 struct { - // Request: The original request that started the operation. - Request googleapi.RawMessage `json:"request,omitempty"` - - // Target: Target of the operation - for - // example - // projects/project-1/locations/region-1/functions/function-1 - Target string `json:"target,omitempty"` - - // Type: Type of operation. - // - // Possible values: - // "OPERATION_UNSPECIFIED" - Unknown operation type. - // "CREATE_FUNCTION" - Triggered by CreateFunction call - // "UPDATE_FUNCTION" - Triggered by UpdateFunction call - // "DELETE_FUNCTION" - Triggered by DeleteFunction call. - Type string `json:"type,omitempty"` - - // UpdateTime: The last update timestamp of the operation. - UpdateTime string `json:"updateTime,omitempty"` - - // VersionId: Version id of the function created or updated by an API - // call. - // This field is only populated for Create and Update operations. - VersionId int64 `json:"versionId,omitempty,string"` - - // ForceSendFields is a list of field names (e.g. "Request") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Request") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *OperationMetadataV1Beta2) MarshalJSON() ([]byte, error) { - type NoMethod OperationMetadataV1Beta2 - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - // Policy: An Identity and Access Management (IAM) policy, which -// specifies access -// controls for Google Cloud resources. -// -// -// A `Policy` is a collection of `bindings`. A `binding` binds one or -// more -// `members` to a single `role`. Members can be user accounts, service -// accounts, +// specifies access controls for Google Cloud resources. A `Policy` is a +// collection of `bindings`. A `binding` binds one or more `members` to +// a single `role`. Members can be user accounts, service accounts, // Google groups, and domains (such as G Suite). A `role` is a named -// list of -// permissions; each `role` can be an IAM predefined role or a -// user-created -// custom role. -// -// For some types of Google Cloud resources, a `binding` can also -// specify a -// `condition`, which is a logical expression that allows access to a -// resource -// only if the expression evaluates to `true`. A condition can add -// constraints -// based on attributes of the request, the resource, or both. To learn -// which -// resources support conditions in their IAM policies, see the -// [IAM +// list of permissions; each `role` can be an IAM predefined role or a +// user-created custom role. For some types of Google Cloud resources, a +// `binding` can also specify a `condition`, which is a logical +// expression that allows access to a resource only if the expression +// evaluates to `true`. A condition can add constraints based on +// attributes of the request, the resource, or both. To learn which +// resources support conditions in their IAM policies, see the [IAM // documentation](https://cloud.google.com/iam/help/conditions/resource-p -// olicies). -// -// **JSON example:** -// -// { -// "bindings": [ -// { -// "role": "roles/resourcemanager.organizationAdmin", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" -// ] -// }, -// { -// "role": "roles/resourcemanager.organizationViewer", -// "members": [ -// "user:eve@example.com" -// ], -// "condition": { -// "title": "expirable access", -// "description": "Does not grant access after Sep 2020", -// "expression": "request.time < -// timestamp('2020-10-01T00:00:00.000Z')", -// } -// } -// ], -// "etag": "BwWWja0YfJA=", -// "version": 3 -// } -// -// **YAML example:** -// -// bindings: -// - members: -// - user:mike@example.com -// - group:admins@example.com -// - domain:google.com -// - serviceAccount:my-project-id@appspot.gserviceaccount.com -// role: roles/resourcemanager.organizationAdmin -// - members: -// - user:eve@example.com -// role: roles/resourcemanager.organizationViewer -// condition: -// title: expirable access -// description: Does not grant access after Sep 2020 -// expression: request.time < -// timestamp('2020-10-01T00:00:00.000Z') -// - etag: BwWWja0YfJA= -// - version: 3 -// -// For a description of IAM and its features, see the -// [IAM documentation](https://cloud.google.com/iam/docs/). +// olicies). **JSON example:** { "bindings": [ { "role": +// "roles/resourcemanager.organizationAdmin", "members": [ +// "user:mike@example.com", "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { +// "role": "roles/resourcemanager.organizationViewer", "members": [ +// "user:eve@example.com" ], "condition": { "title": "expirable access", +// "description": "Does not grant access after Sep 2020", "expression": +// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], +// "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - +// members: - user:mike@example.com - group:admins@example.com - +// domain:google.com - +// serviceAccount:my-project-id@appspot.gserviceaccount.com role: +// roles/resourcemanager.organizationAdmin - members: - +// user:eve@example.com role: roles/resourcemanager.organizationViewer +// condition: title: expirable access description: Does not grant access +// after Sep 2020 expression: request.time < +// timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: +// 3 For a description of IAM and its features, see the [IAM +// documentation](https://cloud.google.com/iam/docs/). type Policy struct { // AuditConfigs: Specifies cloud audit logging configuration for this // policy. AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` // Bindings: Associates a list of `members` to a `role`. Optionally, may - // specify a - // `condition` that determines how and when the `bindings` are applied. - // Each - // of the `bindings` must contain at least one member. + // specify a `condition` that determines how and when the `bindings` are + // applied. Each of the `bindings` must contain at least one member. Bindings []*Binding `json:"bindings,omitempty"` // Etag: `etag` is used for optimistic concurrency control as a way to - // help - // prevent simultaneous updates of a policy from overwriting each - // other. - // It is strongly suggested that systems make use of the `etag` in - // the - // read-modify-write cycle to perform policy updates in order to avoid - // race - // conditions: An `etag` is returned in the response to `getIamPolicy`, - // and - // systems are expected to put that etag in the request to - // `setIamPolicy` to - // ensure that their change will be applied to the same version of the - // policy. - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of + // help prevent simultaneous updates of a policy from overwriting each + // other. It is strongly suggested that systems make use of the `etag` + // in the read-modify-write cycle to perform policy updates in order to + // avoid race conditions: An `etag` is returned in the response to + // `getIamPolicy`, and systems are expected to put that etag in the + // request to `setIamPolicy` to ensure that their change will be applied + // to the same version of the policy. **Important:** If you use IAM + // Conditions, you must include the `etag` field whenever you call + // `setIamPolicy`. If you omit this field, then IAM allows you to + // overwrite a version `3` policy with a version `1` policy, and all of // the conditions in the version `3` policy are lost. Etag string `json:"etag,omitempty"` - // Version: Specifies the format of the policy. - // - // Valid values are `0`, `1`, and `3`. Requests that specify an invalid - // value - // are rejected. - // + // Version: Specifies the format of the policy. Valid values are `0`, + // `1`, and `3`. Requests that specify an invalid value are rejected. // Any operation that affects conditional role bindings must specify - // version - // `3`. This requirement applies to the following operations: - // - // * Getting a policy that includes a conditional role binding - // * Adding a conditional role binding to a policy - // * Changing a conditional role binding in a policy - // * Removing any role binding, with or without a condition, from a - // policy - // that includes conditions - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of - // the conditions in the version `3` policy are lost. - // - // If a policy does not include any conditions, operations on that - // policy may - // specify any valid version or leave the field unset. - // - // To learn which resources support conditions in their IAM policies, - // see the - // [IAM + // version `3`. This requirement applies to the following operations: * + // Getting a policy that includes a conditional role binding * Adding a + // conditional role binding to a policy * Changing a conditional role + // binding in a policy * Removing any role binding, with or without a + // condition, from a policy that includes conditions **Important:** If + // you use IAM Conditions, you must include the `etag` field whenever + // you call `setIamPolicy`. If you omit this field, then IAM allows you + // to overwrite a version `3` policy with a version `1` policy, and all + // of the conditions in the version `3` policy are lost. If a policy + // does not include any conditions, operations on that policy may + // specify any valid version or leave the field unset. To learn which + // resources support conditions in their IAM policies, see the [IAM // documentation](https://cloud.google.com/iam/help/conditions/resource-p // olicies). Version int64 `json:"version,omitempty"` @@ -1588,32 +1267,25 @@ func (s *Policy) MarshalJSON() ([]byte, error) { } // Retry: Describes the retry policy in case of function's execution -// failure. -// A function execution will be retried on any failure. -// A failed execution will be retried up to 7 days with an exponential -// backoff -// (capped at 10 seconds). -// Retried execution is charged as any other execution. +// failure. A function execution will be retried on any failure. A +// failed execution will be retried up to 7 days with an exponential +// backoff (capped at 10 seconds). Retried execution is charged as any +// other execution. type Retry struct { } // SetIamPolicyRequest: Request message for `SetIamPolicy` method. type SetIamPolicyRequest struct { // Policy: REQUIRED: The complete policy to be applied to the - // `resource`. The size of - // the policy is limited to a few 10s of KB. An empty policy is a - // valid policy but certain Cloud Platform services (such as - // Projects) - // might reject them. + // `resource`. The size of the policy is limited to a few 10s of KB. An + // empty policy is a valid policy but certain Cloud Platform services + // (such as Projects) might reject them. Policy *Policy `json:"policy,omitempty"` // UpdateMask: OPTIONAL: A FieldMask specifying which fields of the - // policy to modify. Only - // the fields in the mask will be modified. If no mask is provided, - // the - // following default mask is used: - // - // `paths: "bindings, etag" + // policy to modify. Only the fields in the mask will be modified. If no + // mask is provided, the following default mask is used: `paths: + // "bindings, etag" UpdateMask string `json:"updateMask,omitempty"` // ForceSendFields is a list of field names (e.g. "Policy") to @@ -1640,37 +1312,24 @@ func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { } // SourceRepository: Describes SourceRepository, used to represent -// parameters related to -// source repository where a function is hosted. +// parameters related to source repository where a function is hosted. type SourceRepository struct { // DeployedUrl: Output only. The URL pointing to the hosted repository - // where the function - // were defined at the time of deployment. It always points to a - // specific - // commit in the format described above. + // where the function were defined at the time of deployment. It always + // points to a specific commit in the format described above. DeployedUrl string `json:"deployedUrl,omitempty"` // Url: The URL pointing to the hosted repository where the function is - // defined. - // There are supported Cloud Source Repository URLs in the - // following - // formats: - // - // To refer to a specific - // commit: - // `https://source.developers.google.com/projects/*/repos/*/revis - // ions/*/paths/*` - // To refer to a moveable alias - // (branch): - // `https://source.developers.google.com/projects/*/repos/*/mov - // eable-aliases/*/paths/*` - // In particular, to refer to HEAD use `master` moveable alias. - // To refer to a specific fixed alias - // (tag): - // `https://source.developers.google.com/projects/*/repos/*/fixed- - // aliases/*/paths/*` - // - // You may omit `paths/*` if you want to use the main directory. + // defined. There are supported Cloud Source Repository URLs in the + // following formats: To refer to a specific commit: + // `https://source.developers.google.com/projects/*/repos/*/revisions/*/p + // aths/*` To refer to a moveable alias (branch): + // `https://source.developers.google.com/projects/*/repos/*/moveable-alia + // ses/*/paths/*` In particular, to refer to HEAD use `master` moveable + // alias. To refer to a specific fixed alias (tag): + // `https://source.developers.google.com/projects/*/repos/*/fixed-aliases + // /*/paths/*` You may omit `paths/*` if you want to use the main + // directory. Url string `json:"url,omitempty"` // ForceSendFields is a list of field names (e.g. "DeployedUrl") to @@ -1697,32 +1356,24 @@ func (s *SourceRepository) MarshalJSON() ([]byte, error) { } // Status: The `Status` type defines a logical error model that is -// suitable for -// different programming environments, including REST APIs and RPC APIs. -// It is -// used by [gRPC](https://github.com/grpc). Each `Status` message -// contains -// three pieces of data: error code, error message, and error -// details. -// -// You can find out more about this error model and how to work with it -// in the -// [API Design Guide](https://cloud.google.com/apis/design/errors). +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each +// `Status` message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the [API Design +// Guide](https://cloud.google.com/apis/design/errors). type Status struct { // Code: The status code, which should be an enum value of // google.rpc.Code. Code int64 `json:"code,omitempty"` - // Details: A list of messages that carry the error details. There is a - // common set of - // message types for APIs to use. + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. Details []googleapi.RawMessage `json:"details,omitempty"` // Message: A developer-facing error message, which should be in - // English. Any - // user-facing error message should be localized and sent in - // the - // google.rpc.Status.details field, or localized by the client. + // English. Any user-facing error message should be localized and sent + // in the google.rpc.Status.details field, or localized by the client. Message string `json:"message,omitempty"` // ForceSendFields is a list of field names (e.g. "Code") to @@ -1752,11 +1403,8 @@ func (s *Status) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsRequest struct { // Permissions: The set of permissions to check for the `resource`. - // Permissions with - // wildcards (such as '*' or 'storage.*') are not allowed. For - // more - // information see - // [IAM + // Permissions with wildcards (such as '*' or 'storage.*') are not + // allowed. For more information see [IAM // Overview](https://cloud.google.com/iam/docs/overview#permissions). Permissions []string `json:"permissions,omitempty"` @@ -1787,8 +1435,7 @@ func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsResponse struct { // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is - // allowed. + // the caller is allowed. Permissions []string `json:"permissions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1829,11 +1476,9 @@ type OperationsGetCall struct { header_ http.Header } -// Get: Gets the latest state of a long-running operation. Clients can -// use this -// method to poll the operation result at intervals as recommended by -// the API -// service. +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. func (r *OperationsService) Get(name string) *OperationsGetCall { c := &OperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -1877,7 +1522,7 @@ func (c *OperationsGetCall) Header() http.Header { func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1939,7 +1584,7 @@ func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", // "flatPath": "v1/operations/{operationsId}", // "httpMethod": "GET", // "id": "cloudfunctions.operations.get", @@ -1977,33 +1622,25 @@ type OperationsListCall struct { } // List: Lists operations that match the specified filter in the -// request. If the -// server doesn't support this method, it returns -// `UNIMPLEMENTED`. -// -// NOTE: the `name` binding allows API services to override the -// binding -// to use different resource name schemes, such as `users/*/operations`. -// To -// override the binding, API services can add a binding such -// as -// "/v1/{name=users/*}/operations" to their service configuration. -// For backwards compatibility, the default name includes the -// operations -// collection id, however overriding users must ensure the name -// binding -// is the parent resource, without the operations collection id. +// request. If the server doesn't support this method, it returns +// `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to +// override the binding to use different resource name schemes, such as +// `users/*/operations`. To override the binding, API services can add a +// binding such as "/v1/{name=users/*}/operations" to their service +// configuration. For backwards compatibility, the default name includes +// the operations collection id, however overriding users must ensure +// the name binding is the parent resource, without the operations +// collection id. func (r *OperationsService) List() *OperationsListCall { c := &OperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} return c } // Filter sets the optional parameter "filter": Required. A filter for -// matching the requested operations.

The supported formats of -// filter are:
To query for a specific function: -// project:*,location:*,function:*
To query for all of -// the latest operations for a project: -// project:*,latest:true +// matching the requested operations. The supported formats of *filter* +// are: To query for a specific function: +// project:*,location:*,function:* To query for all of the latest +// operations for a project: project:*,latest:true func (c *OperationsListCall) Filter(filter string) *OperationsListCall { c.urlParams_.Set("filter", filter) return c @@ -2016,9 +1653,9 @@ func (c *OperationsListCall) Name(name string) *OperationsListCall { } // PageSize sets the optional parameter "pageSize": The maximum number -// of records that should be returned.
Requested page size cannot -// exceed 100. If not set, the default page size is 100.

-// Pagination is only supported when querying for a specific function. +// of records that should be returned. Requested page size cannot exceed +// 100. If not set, the default page size is 100. Pagination is only +// supported when querying for a specific function. func (c *OperationsListCall) PageSize(pageSize int64) *OperationsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c @@ -2026,8 +1663,8 @@ func (c *OperationsListCall) PageSize(pageSize int64) *OperationsListCall { // PageToken sets the optional parameter "pageToken": Token identifying // which result to start with, which is returned by a previous list -// call.

Pagination is only supported when querying for a -// specific function. +// call. Pagination is only supported when querying for a specific +// function. func (c *OperationsListCall) PageToken(pageToken string) *OperationsListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -2070,7 +1707,7 @@ func (c *OperationsListCall) Header() http.Header { func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2129,14 +1766,14 @@ func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsRe } return ret, nil // { - // "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", + // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", // "flatPath": "v1/operations", // "httpMethod": "GET", // "id": "cloudfunctions.operations.list", // "parameterOrder": [], // "parameters": { // "filter": { - // "description": "Required. A filter for matching the requested operations.\u003cbr\u003e\u003cbr\u003e The supported formats of \u003cb\u003efilter\u003c/b\u003e are:\u003cbr\u003e To query for a specific function: \u003ccode\u003eproject:*,location:*,function:*\u003c/code\u003e\u003cbr\u003e To query for all of the latest operations for a project: \u003ccode\u003eproject:*,latest:true\u003c/code\u003e", + // "description": "Required. A filter for matching the requested operations. The supported formats of *filter* are: To query for a specific function: project:*,location:*,function:* To query for all of the latest operations for a project: project:*,latest:true", // "location": "query", // "type": "string" // }, @@ -2146,13 +1783,13 @@ func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsRe // "type": "string" // }, // "pageSize": { - // "description": "The maximum number of records that should be returned.\u003cbr\u003e Requested page size cannot exceed 100. If not set, the default page size is 100.\u003cbr\u003e\u003cbr\u003e Pagination is only supported when querying for a specific function.", + // "description": "The maximum number of records that should be returned. Requested page size cannot exceed 100. If not set, the default page size is 100. Pagination is only supported when querying for a specific function.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "Token identifying which result to start with, which is returned by a previous list call.\u003cbr\u003e\u003cbr\u003e Pagination is only supported when querying for a specific function.", + // "description": "Token identifying which result to start with, which is returned by a previous list call. Pagination is only supported when querying for a specific function.", // "location": "query", // "type": "string" // } @@ -2266,7 +1903,7 @@ func (c *ProjectsLocationsListCall) Header() http.Header { func (c *ProjectsLocationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2404,11 +2041,9 @@ type ProjectsLocationsFunctionsCallCall struct { } // Call: Synchronously invokes a deployed Cloud Function. To be used for -// testing -// purposes as very limited traffic is allowed. For more information -// on -// the actual limits, refer to -// [Rate Limits](https://cloud.google.com/functions/quotas#rate_limits). +// testing purposes as very limited traffic is allowed. For more +// information on the actual limits, refer to [Rate +// Limits](https://cloud.google.com/functions/quotas#rate_limits). func (r *ProjectsLocationsFunctionsService) Call(name string, callfunctionrequest *CallFunctionRequest) *ProjectsLocationsFunctionsCallCall { c := &ProjectsLocationsFunctionsCallCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -2443,7 +2078,7 @@ func (c *ProjectsLocationsFunctionsCallCall) Header() http.Header { func (c *ProjectsLocationsFunctionsCallCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2507,7 +2142,7 @@ func (c *ProjectsLocationsFunctionsCallCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Synchronously invokes a deployed Cloud Function. To be used for testing\npurposes as very limited traffic is allowed. For more information on\nthe actual limits, refer to\n[Rate Limits](https://cloud.google.com/functions/quotas#rate_limits).", + // "description": "Synchronously invokes a deployed Cloud Function. To be used for testing purposes as very limited traffic is allowed. For more information on the actual limits, refer to [Rate Limits](https://cloud.google.com/functions/quotas#rate_limits).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}:call", // "httpMethod": "POST", // "id": "cloudfunctions.projects.locations.functions.call", @@ -2549,10 +2184,8 @@ type ProjectsLocationsFunctionsCreateCall struct { } // Create: Creates a new function. If a function with the given name -// already exists in -// the specified project, the long running operation will -// return -// `ALREADY_EXISTS` error. +// already exists in the specified project, the long running operation +// will return `ALREADY_EXISTS` error. func (r *ProjectsLocationsFunctionsService) Create(location string, cloudfunction *CloudFunction) *ProjectsLocationsFunctionsCreateCall { c := &ProjectsLocationsFunctionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.location = location @@ -2587,7 +2220,7 @@ func (c *ProjectsLocationsFunctionsCreateCall) Header() http.Header { func (c *ProjectsLocationsFunctionsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2651,7 +2284,7 @@ func (c *ProjectsLocationsFunctionsCreateCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Creates a new function. If a function with the given name already exists in\nthe specified project, the long running operation will return\n`ALREADY_EXISTS` error.", + // "description": "Creates a new function. If a function with the given name already exists in the specified project, the long running operation will return `ALREADY_EXISTS` error.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/functions", // "httpMethod": "POST", // "id": "cloudfunctions.projects.locations.functions.create", @@ -2660,7 +2293,7 @@ func (c *ProjectsLocationsFunctionsCreateCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "location": { - // "description": "Required. The project and location in which the function should be created, specified\nin the format `projects/*/locations/*`", + // "description": "Required. The project and location in which the function should be created, specified in the format `projects/*/locations/*`", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -2692,10 +2325,8 @@ type ProjectsLocationsFunctionsDeleteCall struct { } // Delete: Deletes a function with the given name from the specified -// project. If the -// given function is used by some trigger, the trigger will be updated -// to -// remove this function. +// project. If the given function is used by some trigger, the trigger +// will be updated to remove this function. func (r *ProjectsLocationsFunctionsService) Delete(name string) *ProjectsLocationsFunctionsDeleteCall { c := &ProjectsLocationsFunctionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -2729,7 +2360,7 @@ func (c *ProjectsLocationsFunctionsDeleteCall) Header() http.Header { func (c *ProjectsLocationsFunctionsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2788,7 +2419,7 @@ func (c *ProjectsLocationsFunctionsDeleteCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes a function with the given name from the specified project. If the\ngiven function is used by some trigger, the trigger will be updated to\nremove this function.", + // "description": "Deletes a function with the given name from the specified project. If the given function is used by some trigger, the trigger will be updated to remove this function.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}", // "httpMethod": "DELETE", // "id": "cloudfunctions.projects.locations.functions.delete", @@ -2827,12 +2458,9 @@ type ProjectsLocationsFunctionsGenerateDownloadUrlCall struct { } // GenerateDownloadUrl: Returns a signed URL for downloading deployed -// function source code. -// The URL is only valid for a limited period and should be used -// within -// minutes after generation. -// For more information about the signed URL usage -// see: +// function source code. The URL is only valid for a limited period and +// should be used within minutes after generation. For more information +// about the signed URL usage see: // https://cloud.google.com/storage/docs/access-control/signed-urls func (r *ProjectsLocationsFunctionsService) GenerateDownloadUrl(name string, generatedownloadurlrequest *GenerateDownloadUrlRequest) *ProjectsLocationsFunctionsGenerateDownloadUrlCall { c := &ProjectsLocationsFunctionsGenerateDownloadUrlCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -2868,7 +2496,7 @@ func (c *ProjectsLocationsFunctionsGenerateDownloadUrlCall) Header() http.Header func (c *ProjectsLocationsFunctionsGenerateDownloadUrlCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2932,7 +2560,7 @@ func (c *ProjectsLocationsFunctionsGenerateDownloadUrlCall) Do(opts ...googleapi } return ret, nil // { - // "description": "Returns a signed URL for downloading deployed function source code.\nThe URL is only valid for a limited period and should be used within\nminutes after generation.\nFor more information about the signed URL usage see:\nhttps://cloud.google.com/storage/docs/access-control/signed-urls", + // "description": "Returns a signed URL for downloading deployed function source code. The URL is only valid for a limited period and should be used within minutes after generation. For more information about the signed URL usage see: https://cloud.google.com/storage/docs/access-control/signed-urls", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}:generateDownloadUrl", // "httpMethod": "POST", // "id": "cloudfunctions.projects.locations.functions.generateDownloadUrl", @@ -2941,7 +2569,7 @@ func (c *ProjectsLocationsFunctionsGenerateDownloadUrlCall) Do(opts ...googleapi // ], // "parameters": { // "name": { - // "description": "The name of function for which source code Google Cloud Storage signed\nURL should be generated.", + // "description": "The name of function for which source code Google Cloud Storage signed URL should be generated.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/functions/[^/]+$", // "required": true, @@ -2974,38 +2602,21 @@ type ProjectsLocationsFunctionsGenerateUploadUrlCall struct { } // GenerateUploadUrl: Returns a signed URL for uploading a function -// source code. -// For more information about the signed URL usage -// see: +// source code. For more information about the signed URL usage see: // https://cloud.google.com/storage/docs/access-control/signed-urls. -// -// Once the function source code upload is complete, the used signed -// URL should be provided in CreateFunction or UpdateFunction request -// as a reference to the function source code. -// -// When uploading source code to the generated signed URL, please -// follow -// these restrictions: -// -// * Source file type should be a zip file. -// * Source file size should not exceed 100MB limit. -// * No credentials should be attached - the signed URLs provide access -// to the -// target bucket using internal service identity; if credentials were -// attached, the identity from the credentials would be used, but -// that -// identity does not have permissions to upload files to the -// URL. -// -// When making a HTTP PUT request, these two headers need to be -// specified: -// -// * `content-type: application/zip` -// * `x-goog-content-length-range: 0,104857600` -// -// And this header SHOULD NOT be specified: -// -// * `Authorization: Bearer YOUR_TOKEN` +// Once the function source code upload is complete, the used signed URL +// should be provided in CreateFunction or UpdateFunction request as a +// reference to the function source code. When uploading source code to +// the generated signed URL, please follow these restrictions: * Source +// file type should be a zip file. * Source file size should not exceed +// 100MB limit. * No credentials should be attached - the signed URLs +// provide access to the target bucket using internal service identity; +// if credentials were attached, the identity from the credentials would +// be used, but that identity does not have permissions to upload files +// to the URL. When making a HTTP PUT request, these two headers need to +// be specified: * `content-type: application/zip` * +// `x-goog-content-length-range: 0,104857600` And this header SHOULD NOT +// be specified: * `Authorization: Bearer YOUR_TOKEN` func (r *ProjectsLocationsFunctionsService) GenerateUploadUrl(parent string, generateuploadurlrequest *GenerateUploadUrlRequest) *ProjectsLocationsFunctionsGenerateUploadUrlCall { c := &ProjectsLocationsFunctionsGenerateUploadUrlCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -3040,7 +2651,7 @@ func (c *ProjectsLocationsFunctionsGenerateUploadUrlCall) Header() http.Header { func (c *ProjectsLocationsFunctionsGenerateUploadUrlCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3104,7 +2715,7 @@ func (c *ProjectsLocationsFunctionsGenerateUploadUrlCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Returns a signed URL for uploading a function source code.\nFor more information about the signed URL usage see:\nhttps://cloud.google.com/storage/docs/access-control/signed-urls.\nOnce the function source code upload is complete, the used signed\nURL should be provided in CreateFunction or UpdateFunction request\nas a reference to the function source code.\n\nWhen uploading source code to the generated signed URL, please follow\nthese restrictions:\n\n* Source file type should be a zip file.\n* Source file size should not exceed 100MB limit.\n* No credentials should be attached - the signed URLs provide access to the\n target bucket using internal service identity; if credentials were\n attached, the identity from the credentials would be used, but that\n identity does not have permissions to upload files to the URL.\n\nWhen making a HTTP PUT request, these two headers need to be specified:\n\n* `content-type: application/zip`\n* `x-goog-content-length-range: 0,104857600`\n\nAnd this header SHOULD NOT be specified:\n\n* `Authorization: Bearer YOUR_TOKEN`", + // "description": "Returns a signed URL for uploading a function source code. For more information about the signed URL usage see: https://cloud.google.com/storage/docs/access-control/signed-urls. Once the function source code upload is complete, the used signed URL should be provided in CreateFunction or UpdateFunction request as a reference to the function source code. When uploading source code to the generated signed URL, please follow these restrictions: * Source file type should be a zip file. * Source file size should not exceed 100MB limit. * No credentials should be attached - the signed URLs provide access to the target bucket using internal service identity; if credentials were attached, the identity from the credentials would be used, but that identity does not have permissions to upload files to the URL. When making a HTTP PUT request, these two headers need to be specified: * `content-type: application/zip` * `x-goog-content-length-range: 0,104857600` And this header SHOULD NOT be specified: * `Authorization: Bearer YOUR_TOKEN`", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/functions:generateUploadUrl", // "httpMethod": "POST", // "id": "cloudfunctions.projects.locations.functions.generateUploadUrl", @@ -3113,7 +2724,7 @@ func (c *ProjectsLocationsFunctionsGenerateUploadUrlCall) Do(opts ...googleapi.C // ], // "parameters": { // "parent": { - // "description": "The project and location in which the Google Cloud Storage signed URL\nshould be generated, specified in the format `projects/*/locations/*`.", + // "description": "The project and location in which the Google Cloud Storage signed URL should be generated, specified in the format `projects/*/locations/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -3190,7 +2801,7 @@ func (c *ProjectsLocationsFunctionsGetCall) Header() http.Header { func (c *ProjectsLocationsFunctionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3290,11 +2901,9 @@ type ProjectsLocationsFunctionsGetIamPolicyCall struct { header_ http.Header } -// GetIamPolicy: Gets the IAM access control policy for a -// function. +// GetIamPolicy: Gets the IAM access control policy for a function. // Returns an empty policy if the function exists and does not have a -// policy -// set. +// policy set. func (r *ProjectsLocationsFunctionsService) GetIamPolicy(resource string) *ProjectsLocationsFunctionsGetIamPolicyCall { c := &ProjectsLocationsFunctionsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -3303,24 +2912,14 @@ func (r *ProjectsLocationsFunctionsService) GetIamPolicy(resource string) *Proje // OptionsRequestedPolicyVersion sets the optional parameter // "options.requestedPolicyVersion": The policy format version to be -// returned. -// -// Valid values are 0, 1, and 3. Requests specifying an invalid value -// will be -// rejected. -// -// Requests for policies with any conditional bindings must specify -// version 3. -// Policies without any conditional bindings may specify any valid value -// or -// leave the field unset. -// -// To learn which resources support conditions in their IAM policies, -// see -// the -// [IAM -// documentation](https://cloud.google.com/iam/help/conditions/r -// esource-policies). +// returned. Valid values are 0, 1, and 3. Requests specifying an +// invalid value will be rejected. Requests for policies with any +// conditional bindings must specify version 3. Policies without any +// conditional bindings may specify any valid value or leave the field +// unset. To learn which resources support conditions in their IAM +// policies, see the [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-p +// olicies). func (c *ProjectsLocationsFunctionsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsLocationsFunctionsGetIamPolicyCall { c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c @@ -3363,7 +2962,7 @@ func (c *ProjectsLocationsFunctionsGetIamPolicyCall) Header() http.Header { func (c *ProjectsLocationsFunctionsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3425,7 +3024,7 @@ func (c *ProjectsLocationsFunctionsGetIamPolicyCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Gets the IAM access control policy for a function.\nReturns an empty policy if the function exists and does not have a policy\nset.", + // "description": "Gets the IAM access control policy for a function. Returns an empty policy if the function exists and does not have a policy set.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}:getIamPolicy", // "httpMethod": "GET", // "id": "cloudfunctions.projects.locations.functions.getIamPolicy", @@ -3434,13 +3033,13 @@ func (c *ProjectsLocationsFunctionsGetIamPolicyCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "options.requestedPolicyVersion": { - // "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + // "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", // "format": "int32", // "location": "query", // "type": "integer" // }, // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/functions/[^/]+$", // "required": true, @@ -3485,11 +3084,9 @@ func (c *ProjectsLocationsFunctionsListCall) PageSize(pageSize int64) *ProjectsL } // PageToken sets the optional parameter "pageToken": The value returned -// by the last -// `ListFunctionsResponse`; indicates that -// this is a continuation of a prior `ListFunctions` call, and that -// the -// system should return the next page of data. +// by the last `ListFunctionsResponse`; indicates that this is a +// continuation of a prior `ListFunctions` call, and that the system +// should return the next page of data. func (c *ProjectsLocationsFunctionsListCall) PageToken(pageToken string) *ProjectsLocationsFunctionsListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -3532,7 +3129,7 @@ func (c *ProjectsLocationsFunctionsListCall) Header() http.Header { func (c *ProjectsLocationsFunctionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3609,12 +3206,12 @@ func (c *ProjectsLocationsFunctionsListCall) Do(opts ...googleapi.CallOption) (* // "type": "integer" // }, // "pageToken": { - // "description": "The value returned by the last\n`ListFunctionsResponse`; indicates that\nthis is a continuation of a prior `ListFunctions` call, and that the\nsystem should return the next page of data.", + // "description": "The value returned by the last `ListFunctionsResponse`; indicates that this is a continuation of a prior `ListFunctions` call, and that the system should return the next page of data.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "The project and location from which the function should be listed,\nspecified in the format `projects/*/locations/*`\nIf you want to list functions in all locations, use \"-\" in place of a\nlocation. When listing functions in all locations, if one or more\nlocation(s) are unreachable, the response will contain functions from all\nreachable locations along with the names of any unreachable locations.", + // "description": "The project and location from which the function should be listed, specified in the format `projects/*/locations/*` If you want to list functions in all locations, use \"-\" in place of a location. When listing functions in all locations, if one or more location(s) are unreachable, the response will contain functions from all reachable locations along with the names of any unreachable locations.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -3706,7 +3303,7 @@ func (c *ProjectsLocationsFunctionsPatchCall) Header() http.Header { func (c *ProjectsLocationsFunctionsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3779,7 +3376,7 @@ func (c *ProjectsLocationsFunctionsPatchCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "name": { - // "description": "A user-defined name of the function. Function names must be unique\nglobally and match pattern `projects/*/locations/*/functions/*`", + // "description": "A user-defined name of the function. Function names must be unique globally and match pattern `projects/*/locations/*/functions/*`", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/functions/[^/]+$", // "required": true, @@ -3818,8 +3415,7 @@ type ProjectsLocationsFunctionsSetIamPolicyCall struct { } // SetIamPolicy: Sets the IAM access control policy on the specified -// function. -// Replaces any existing policy. +// function. Replaces any existing policy. func (r *ProjectsLocationsFunctionsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsFunctionsSetIamPolicyCall { c := &ProjectsLocationsFunctionsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -3854,7 +3450,7 @@ func (c *ProjectsLocationsFunctionsSetIamPolicyCall) Header() http.Header { func (c *ProjectsLocationsFunctionsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3918,7 +3514,7 @@ func (c *ProjectsLocationsFunctionsSetIamPolicyCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Sets the IAM access control policy on the specified function.\nReplaces any existing policy.", + // "description": "Sets the IAM access control policy on the specified function. Replaces any existing policy.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}:setIamPolicy", // "httpMethod": "POST", // "id": "cloudfunctions.projects.locations.functions.setIamPolicy", @@ -3927,7 +3523,7 @@ func (c *ProjectsLocationsFunctionsSetIamPolicyCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/functions/[^/]+$", // "required": true, @@ -3960,11 +3556,8 @@ type ProjectsLocationsFunctionsTestIamPermissionsCall struct { } // TestIamPermissions: Tests the specified permissions against the IAM -// access control policy -// for a function. -// If the function does not exist, this will return an empty set -// of -// permissions, not a NOT_FOUND error. +// access control policy for a function. If the function does not exist, +// this will return an empty set of permissions, not a NOT_FOUND error. func (r *ProjectsLocationsFunctionsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsFunctionsTestIamPermissionsCall { c := &ProjectsLocationsFunctionsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -3999,7 +3592,7 @@ func (c *ProjectsLocationsFunctionsTestIamPermissionsCall) Header() http.Header func (c *ProjectsLocationsFunctionsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4063,7 +3656,7 @@ func (c *ProjectsLocationsFunctionsTestIamPermissionsCall) Do(opts ...googleapi. } return ret, nil // { - // "description": "Tests the specified permissions against the IAM access control policy\nfor a function.\nIf the function does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", + // "description": "Tests the specified permissions against the IAM access control policy for a function. If the function does not exist, this will return an empty set of permissions, not a NOT_FOUND error.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}:testIamPermissions", // "httpMethod": "POST", // "id": "cloudfunctions.projects.locations.functions.testIamPermissions", @@ -4072,7 +3665,7 @@ func (c *ProjectsLocationsFunctionsTestIamPermissionsCall) Do(opts ...googleapi. // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/functions/[^/]+$", // "required": true, diff --git a/vendor/google.golang.org/api/cloudiot/v1/cloudiot-api.json b/vendor/google.golang.org/api/cloudiot/v1/cloudiot-api.json index f78664c7395..72ce2310a5d 100644 --- a/vendor/google.golang.org/api/cloudiot/v1/cloudiot-api.json +++ b/vendor/google.golang.org/api/cloudiot/v1/cloudiot-api.json @@ -15,7 +15,7 @@ "baseUrl": "https://cloudiot.googleapis.com/", "batchPath": "batch", "canonicalName": "Cloud Iot", - "description": "Registers and manages IoT (Internet of Things) devices that connect to the Google Cloud Platform.\n", + "description": "Registers and manages IoT (Internet of Things) devices that connect to the Google Cloud Platform. ", "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/iot", "fullyEncodeReservedExpansion": true, @@ -124,7 +124,7 @@ ], "parameters": { "parent": { - "description": "Required. The name of the registry. For example,\n`projects/example-project/locations/us-central1/registries/my-registry`.", + "description": "Required. The name of the registry. For example, `projects/example-project/locations/us-central1/registries/my-registry`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+$", "required": true, @@ -153,7 +153,7 @@ ], "parameters": { "parent": { - "description": "Required. The project and cloud region where this device registry must be created.\nFor example, `projects/example-project/locations/us-central1`.", + "description": "Required. The project and cloud region where this device registry must be created. For example, `projects/example-project/locations/us-central1`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -182,7 +182,7 @@ ], "parameters": { "name": { - "description": "Required. The name of the device registry. For example,\n`projects/example-project/locations/us-central1/registries/my-registry`.", + "description": "Required. The name of the device registry. For example, `projects/example-project/locations/us-central1/registries/my-registry`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+$", "required": true, @@ -208,7 +208,7 @@ ], "parameters": { "name": { - "description": "Required. The name of the device registry. For example,\n`projects/example-project/locations/us-central1/registries/my-registry`.", + "description": "Required. The name of the device registry. For example, `projects/example-project/locations/us-central1/registries/my-registry`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+$", "required": true, @@ -225,7 +225,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}:getIamPolicy", "httpMethod": "POST", "id": "cloudiot.projects.locations.registries.getIamPolicy", @@ -234,7 +234,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+$", "required": true, @@ -263,18 +263,18 @@ ], "parameters": { "pageSize": { - "description": "The maximum number of registries to return in the response. If this value\nis zero, the service will select a default size. A call may return fewer\nobjects than requested. A non-empty `next_page_token` in the response\nindicates that more data is available.", + "description": "The maximum number of registries to return in the response. If this value is zero, the service will select a default size. A call may return fewer objects than requested. A non-empty `next_page_token` in the response indicates that more data is available.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "The value returned by the last `ListDeviceRegistriesResponse`; indicates\nthat this is a continuation of a prior `ListDeviceRegistries` call and\nthe system should return the next page of data.", + "description": "The value returned by the last `ListDeviceRegistriesResponse`; indicates that this is a continuation of a prior `ListDeviceRegistries` call and the system should return the next page of data.", "location": "query", "type": "string" }, "parent": { - "description": "Required. The project and cloud region path. For example,\n`projects/example-project/locations/us-central1`.", + "description": "Required. The project and cloud region path. For example, `projects/example-project/locations/us-central1`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -300,14 +300,14 @@ ], "parameters": { "name": { - "description": "The resource path name. For example,\n`projects/example-project/locations/us-central1/registries/my-registry`.", + "description": "The resource path name. For example, `projects/example-project/locations/us-central1/registries/my-registry`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+$", "required": true, "type": "string" }, "updateMask": { - "description": "Required. Only updates the `device_registry` fields indicated by this mask.\nThe field mask must not be empty, and it must not contain fields that\nare immutable or only set by the server.\nMutable top-level fields: `event_notification_config`, `http_config`,\n`mqtt_config`, and `state_notification_config`.", + "description": "Required. Only updates the `device_registry` fields indicated by this mask. The field mask must not be empty, and it must not contain fields that are immutable or only set by the server. Mutable top-level fields: `event_notification_config`, `http_config`, `mqtt_config`, and `state_notification_config`.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -326,7 +326,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}:setIamPolicy", "httpMethod": "POST", "id": "cloudiot.projects.locations.registries.setIamPolicy", @@ -335,7 +335,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+$", "required": true, @@ -355,7 +355,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}:testIamPermissions", "httpMethod": "POST", "id": "cloudiot.projects.locations.registries.testIamPermissions", @@ -364,7 +364,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+$", "required": true, @@ -393,7 +393,7 @@ ], "parameters": { "parent": { - "description": "Required. The name of the registry. For example,\n`projects/example-project/locations/us-central1/registries/my-registry`.", + "description": "Required. The name of the registry. For example, `projects/example-project/locations/us-central1/registries/my-registry`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+$", "required": true, @@ -426,7 +426,7 @@ ], "parameters": { "parent": { - "description": "Required. The name of the device registry where this device should be created.\nFor example,\n`projects/example-project/locations/us-central1/registries/my-registry`.", + "description": "Required. The name of the device registry where this device should be created. For example, `projects/example-project/locations/us-central1/registries/my-registry`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+$", "required": true, @@ -455,7 +455,7 @@ ], "parameters": { "name": { - "description": "Required. The name of the device. For example,\n`projects/p0/locations/us-central1/registries/registry0/devices/device0` or\n`projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`.", + "description": "Required. The name of the device. For example, `projects/p0/locations/us-central1/registries/registry0/devices/device0` or `projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+/devices/[^/]+$", "required": true, @@ -481,13 +481,13 @@ ], "parameters": { "fieldMask": { - "description": "The fields of the `Device` resource to be returned in the response. If the\nfield mask is unset or empty, all fields are returned.", + "description": "The fields of the `Device` resource to be returned in the response. If the field mask is unset or empty, all fields are returned.", "format": "google-fieldmask", "location": "query", "type": "string" }, "name": { - "description": "Required. The name of the device. For example,\n`projects/p0/locations/us-central1/registries/registry0/devices/device0` or\n`projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`.", + "description": "Required. The name of the device. For example, `projects/p0/locations/us-central1/registries/registry0/devices/device0` or `projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+/devices/[^/]+$", "required": true, @@ -513,57 +513,62 @@ ], "parameters": { "deviceIds": { - "description": "A list of device string IDs. For example, `['device0', 'device12']`.\nIf empty, this field is ignored. Maximum IDs: 10,000", + "description": "A list of device string IDs. For example, `['device0', 'device12']`. If empty, this field is ignored. Maximum IDs: 10,000", "location": "query", "repeated": true, "type": "string" }, "deviceNumIds": { - "description": "A list of device numeric IDs. If empty, this field is ignored. Maximum\nIDs: 10,000.", + "description": "A list of device numeric IDs. If empty, this field is ignored. Maximum IDs: 10,000.", "format": "uint64", "location": "query", "repeated": true, "type": "string" }, "fieldMask": { - "description": "The fields of the `Device` resource to be returned in the response. The\nfields `id` and `num_id` are always returned, along with any\nother fields specified.", + "description": "The fields of the `Device` resource to be returned in the response. The fields `id` and `num_id` are always returned, along with any other fields specified.", "format": "google-fieldmask", "location": "query", "type": "string" }, "gatewayListOptions.associationsDeviceId": { - "description": "If set, returns only the gateways with which the specified device is\nassociated. The device ID can be numeric (`num_id`) or the user-defined\nstring (`id`). For example, if `456` is specified, returns only the\ngateways to which the device with `num_id` 456 is bound.", + "description": "If set, returns only the gateways with which the specified device is associated. The device ID can be numeric (`num_id`) or the user-defined string (`id`). For example, if `456` is specified, returns only the gateways to which the device with `num_id` 456 is bound.", "location": "query", "type": "string" }, "gatewayListOptions.associationsGatewayId": { - "description": "If set, only devices associated with the specified gateway are returned.\nThe gateway ID can be numeric (`num_id`) or the user-defined string\n(`id`). For example, if `123` is specified, only devices bound to the\ngateway with `num_id` 123 are returned.", + "description": "If set, only devices associated with the specified gateway are returned. The gateway ID can be numeric (`num_id`) or the user-defined string (`id`). For example, if `123` is specified, only devices bound to the gateway with `num_id` 123 are returned.", "location": "query", "type": "string" }, "gatewayListOptions.gatewayType": { - "description": "If `GATEWAY` is specified, only gateways are returned. If `NON_GATEWAY`\nis specified, only non-gateway devices are returned. If\n`GATEWAY_TYPE_UNSPECIFIED` is specified, all devices are returned.", + "description": "If `GATEWAY` is specified, only gateways are returned. If `NON_GATEWAY` is specified, only non-gateway devices are returned. If `GATEWAY_TYPE_UNSPECIFIED` is specified, all devices are returned.", "enum": [ "GATEWAY_TYPE_UNSPECIFIED", "GATEWAY", "NON_GATEWAY" ], + "enumDescriptions": [ + "If unspecified, the device is considered a non-gateway device.", + "The device is a gateway.", + "The device is not a gateway." + ], "location": "query", "type": "string" }, "pageSize": { - "description": "The maximum number of devices to return in the response. If this value\nis zero, the service will select a default size. A call may return fewer\nobjects than requested. A non-empty `next_page_token` in the response\nindicates that more data is available.", + "description": "The maximum number of devices to return in the response. If this value is zero, the service will select a default size. A call may return fewer objects than requested. A non-empty `next_page_token` in the response indicates that more data is available.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "The value returned by the last `ListDevicesResponse`; indicates\nthat this is a continuation of a prior `ListDevices` call and\nthe system should return the next page of data.", + "description": "The value returned by the last `ListDevicesResponse`; indicates that this is a continuation of a prior `ListDevices` call and the system should return the next page of data.", "location": "query", "type": "string" }, "parent": { - "description": "Required. The device registry path. Required. For example,\n`projects/my-project/locations/us-central1/registries/my-registry`.", + "description": "Required. The device registry path. Required. For example, `projects/my-project/locations/us-central1/registries/my-registry`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+$", "required": true, @@ -580,7 +585,7 @@ ] }, "modifyCloudToDeviceConfig": { - "description": "Modifies the configuration for the device, which is eventually sent from\nthe Cloud IoT Core servers. Returns the modified configuration version and\nits metadata.", + "description": "Modifies the configuration for the device, which is eventually sent from the Cloud IoT Core servers. Returns the modified configuration version and its metadata.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices/{devicesId}:modifyCloudToDeviceConfig", "httpMethod": "POST", "id": "cloudiot.projects.locations.registries.devices.modifyCloudToDeviceConfig", @@ -589,7 +594,7 @@ ], "parameters": { "name": { - "description": "Required. The name of the device. For example,\n`projects/p0/locations/us-central1/registries/registry0/devices/device0` or\n`projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`.", + "description": "Required. The name of the device. For example, `projects/p0/locations/us-central1/registries/registry0/devices/device0` or `projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+/devices/[^/]+$", "required": true, @@ -618,14 +623,14 @@ ], "parameters": { "name": { - "description": "The resource path name. For example,\n`projects/p1/locations/us-central1/registries/registry0/devices/dev0` or\n`projects/p1/locations/us-central1/registries/registry0/devices/{num_id}`.\nWhen `name` is populated as a response from the service, it always ends\nin the device numeric ID.", + "description": "The resource path name. For example, `projects/p1/locations/us-central1/registries/registry0/devices/dev0` or `projects/p1/locations/us-central1/registries/registry0/devices/{num_id}`. When `name` is populated as a response from the service, it always ends in the device numeric ID.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+/devices/[^/]+$", "required": true, "type": "string" }, "updateMask": { - "description": "Required. Only updates the `device` fields indicated by this mask.\nThe field mask must not be empty, and it must not contain fields that\nare immutable or only set by the server.\nMutable top-level fields: `credentials`, `blocked`, and `metadata`", + "description": "Required. Only updates the `device` fields indicated by this mask. The field mask must not be empty, and it must not contain fields that are immutable or only set by the server. Mutable top-level fields: `credentials`, `blocked`, and `metadata`", "format": "google-fieldmask", "location": "query", "type": "string" @@ -644,7 +649,7 @@ ] }, "sendCommandToDevice": { - "description": "Sends a command to the specified device. In order for a device to be able\nto receive commands, it must:\n1) be connected to Cloud IoT Core using the MQTT protocol, and\n2) be subscribed to the group of MQTT topics specified by\n /devices/{device-id}/commands/#. This subscription will receive commands\n at the top-level topic /devices/{device-id}/commands as well as commands\n for subfolders, like /devices/{device-id}/commands/subfolder.\n Note that subscribing to specific subfolders is not supported.\nIf the command could not be delivered to the device, this method will\nreturn an error; in particular, if the device is not subscribed, this\nmethod will return FAILED_PRECONDITION. Otherwise, this method will\nreturn OK. If the subscription is QoS 1, at least once delivery will be\nguaranteed; for QoS 0, no acknowledgment will be expected from the device.", + "description": "Sends a command to the specified device. In order for a device to be able to receive commands, it must: 1) be connected to Cloud IoT Core using the MQTT protocol, and 2) be subscribed to the group of MQTT topics specified by /devices/{device-id}/commands/#. This subscription will receive commands at the top-level topic /devices/{device-id}/commands as well as commands for subfolders, like /devices/{device-id}/commands/subfolder. Note that subscribing to specific subfolders is not supported. If the command could not be delivered to the device, this method will return an error; in particular, if the device is not subscribed, this method will return FAILED_PRECONDITION. Otherwise, this method will return OK. If the subscription is QoS 1, at least once delivery will be guaranteed; for QoS 0, no acknowledgment will be expected from the device.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices/{devicesId}:sendCommandToDevice", "httpMethod": "POST", "id": "cloudiot.projects.locations.registries.devices.sendCommandToDevice", @@ -653,7 +658,7 @@ ], "parameters": { "name": { - "description": "Required. The name of the device. For example,\n`projects/p0/locations/us-central1/registries/registry0/devices/device0` or\n`projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`.", + "description": "Required. The name of the device. For example, `projects/p0/locations/us-central1/registries/registry0/devices/device0` or `projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+/devices/[^/]+$", "required": true, @@ -677,7 +682,7 @@ "configVersions": { "methods": { "list": { - "description": "Lists the last few versions of the device configuration in descending\norder (i.e.: newest first).", + "description": "Lists the last few versions of the device configuration in descending order (i.e.: newest first).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices/{devicesId}/configVersions", "httpMethod": "GET", "id": "cloudiot.projects.locations.registries.devices.configVersions.list", @@ -686,14 +691,14 @@ ], "parameters": { "name": { - "description": "Required. The name of the device. For example,\n`projects/p0/locations/us-central1/registries/registry0/devices/device0` or\n`projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`.", + "description": "Required. The name of the device. For example, `projects/p0/locations/us-central1/registries/registry0/devices/device0` or `projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+/devices/[^/]+$", "required": true, "type": "string" }, "numVersions": { - "description": "The number of versions to list. Versions are listed in decreasing order of\nthe version number. The maximum number of versions retained is 10. If this\nvalue is zero, it will return all the versions available.", + "description": "The number of versions to list. Versions are listed in decreasing order of the version number. The maximum number of versions retained is 10. If this value is zero, it will return all the versions available.", "format": "int32", "location": "query", "type": "integer" @@ -713,7 +718,7 @@ "states": { "methods": { "list": { - "description": "Lists the last few versions of the device state in descending order (i.e.:\nnewest first).", + "description": "Lists the last few versions of the device state in descending order (i.e.: newest first).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices/{devicesId}/states", "httpMethod": "GET", "id": "cloudiot.projects.locations.registries.devices.states.list", @@ -722,14 +727,14 @@ ], "parameters": { "name": { - "description": "Required. The name of the device. For example,\n`projects/p0/locations/us-central1/registries/registry0/devices/device0` or\n`projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`.", + "description": "Required. The name of the device. For example, `projects/p0/locations/us-central1/registries/registry0/devices/device0` or `projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+/devices/[^/]+$", "required": true, "type": "string" }, "numStates": { - "description": "The number of states to list. States are listed in descending order of\nupdate time. The maximum number of states retained is 10. If this\nvalue is zero, it will return all the states available.", + "description": "The number of states to list. States are listed in descending order of update time. The maximum number of states retained is 10. If this value is zero, it will return all the states available.", "format": "int32", "location": "query", "type": "integer" @@ -751,7 +756,7 @@ "groups": { "methods": { "getIamPolicy": { - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/groups/{groupsId}:getIamPolicy", "httpMethod": "POST", "id": "cloudiot.projects.locations.registries.groups.getIamPolicy", @@ -760,7 +765,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+/groups/[^/]+$", "required": true, @@ -780,7 +785,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/groups/{groupsId}:setIamPolicy", "httpMethod": "POST", "id": "cloudiot.projects.locations.registries.groups.setIamPolicy", @@ -789,7 +794,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+/groups/[^/]+$", "required": true, @@ -809,7 +814,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/groups/{groupsId}:testIamPermissions", "httpMethod": "POST", "id": "cloudiot.projects.locations.registries.groups.testIamPermissions", @@ -818,7 +823,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+/groups/[^/]+$", "required": true, @@ -851,57 +856,62 @@ ], "parameters": { "deviceIds": { - "description": "A list of device string IDs. For example, `['device0', 'device12']`.\nIf empty, this field is ignored. Maximum IDs: 10,000", + "description": "A list of device string IDs. For example, `['device0', 'device12']`. If empty, this field is ignored. Maximum IDs: 10,000", "location": "query", "repeated": true, "type": "string" }, "deviceNumIds": { - "description": "A list of device numeric IDs. If empty, this field is ignored. Maximum\nIDs: 10,000.", + "description": "A list of device numeric IDs. If empty, this field is ignored. Maximum IDs: 10,000.", "format": "uint64", "location": "query", "repeated": true, "type": "string" }, "fieldMask": { - "description": "The fields of the `Device` resource to be returned in the response. The\nfields `id` and `num_id` are always returned, along with any\nother fields specified.", + "description": "The fields of the `Device` resource to be returned in the response. The fields `id` and `num_id` are always returned, along with any other fields specified.", "format": "google-fieldmask", "location": "query", "type": "string" }, "gatewayListOptions.associationsDeviceId": { - "description": "If set, returns only the gateways with which the specified device is\nassociated. The device ID can be numeric (`num_id`) or the user-defined\nstring (`id`). For example, if `456` is specified, returns only the\ngateways to which the device with `num_id` 456 is bound.", + "description": "If set, returns only the gateways with which the specified device is associated. The device ID can be numeric (`num_id`) or the user-defined string (`id`). For example, if `456` is specified, returns only the gateways to which the device with `num_id` 456 is bound.", "location": "query", "type": "string" }, "gatewayListOptions.associationsGatewayId": { - "description": "If set, only devices associated with the specified gateway are returned.\nThe gateway ID can be numeric (`num_id`) or the user-defined string\n(`id`). For example, if `123` is specified, only devices bound to the\ngateway with `num_id` 123 are returned.", + "description": "If set, only devices associated with the specified gateway are returned. The gateway ID can be numeric (`num_id`) or the user-defined string (`id`). For example, if `123` is specified, only devices bound to the gateway with `num_id` 123 are returned.", "location": "query", "type": "string" }, "gatewayListOptions.gatewayType": { - "description": "If `GATEWAY` is specified, only gateways are returned. If `NON_GATEWAY`\nis specified, only non-gateway devices are returned. If\n`GATEWAY_TYPE_UNSPECIFIED` is specified, all devices are returned.", + "description": "If `GATEWAY` is specified, only gateways are returned. If `NON_GATEWAY` is specified, only non-gateway devices are returned. If `GATEWAY_TYPE_UNSPECIFIED` is specified, all devices are returned.", "enum": [ "GATEWAY_TYPE_UNSPECIFIED", "GATEWAY", "NON_GATEWAY" ], + "enumDescriptions": [ + "If unspecified, the device is considered a non-gateway device.", + "The device is a gateway.", + "The device is not a gateway." + ], "location": "query", "type": "string" }, "pageSize": { - "description": "The maximum number of devices to return in the response. If this value\nis zero, the service will select a default size. A call may return fewer\nobjects than requested. A non-empty `next_page_token` in the response\nindicates that more data is available.", + "description": "The maximum number of devices to return in the response. If this value is zero, the service will select a default size. A call may return fewer objects than requested. A non-empty `next_page_token` in the response indicates that more data is available.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "The value returned by the last `ListDevicesResponse`; indicates\nthat this is a continuation of a prior `ListDevices` call and\nthe system should return the next page of data.", + "description": "The value returned by the last `ListDevicesResponse`; indicates that this is a continuation of a prior `ListDevices` call and the system should return the next page of data.", "location": "query", "type": "string" }, "parent": { - "description": "Required. The device registry path. Required. For example,\n`projects/my-project/locations/us-central1/registries/my-registry`.", + "description": "Required. The device registry path. Required. For example, `projects/my-project/locations/us-central1/registries/my-registry`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+/groups/[^/]+$", "required": true, @@ -928,7 +938,7 @@ } } }, - "revision": "20200414", + "revision": "20200901", "rootUrl": "https://cloudiot.googleapis.com/", "schemas": { "BindDeviceToGatewayRequest": { @@ -936,11 +946,11 @@ "id": "BindDeviceToGatewayRequest", "properties": { "deviceId": { - "description": "Required. The device to associate with the specified gateway. The value of\n`device_id` can be either the device numeric ID or the user-defined device\nidentifier.", + "description": "Required. The device to associate with the specified gateway. The value of `device_id` can be either the device numeric ID or the user-defined device identifier.", "type": "string" }, "gatewayId": { - "description": "Required. The value of `gateway_id` can be either the device numeric ID or the\nuser-defined device identifier.", + "description": "Required. The value of `gateway_id` can be either the device numeric ID or the user-defined device identifier.", "type": "string" } }, @@ -956,19 +966,23 @@ "description": "Associates `members` with a `role`.", "id": "Binding", "properties": { + "bindingId": { + "description": "A client-specified ID for this binding. Expected to be globally unique to support the internal bindings-by-ID API.", + "type": "string" + }, "condition": { "$ref": "Expr", - "description": "The condition that is associated with this binding.\nNOTE: An unsatisfied condition will not allow user access via current\nbinding. Different bindings, including their conditions, are examined\nindependently." + "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the members in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." }, "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@example.com` .\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a user that has been recently deleted. For\n example, `alice@example.com?uid=123456789012345678901`. If the user is\n recovered, this value reverts to `user:{emailid}` and the recovered user\n retains the role in the binding.\n\n* `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus\n unique identifier) representing a service account that has been recently\n deleted. For example,\n `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.\n If the service account is undeleted, this value reverts to\n `serviceAccount:{emailid}` and the undeleted service account retains the\n role in the binding.\n\n* `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a Google group that has been recently\n deleted. For example, `admins@example.com?uid=123456789012345678901`. If\n the group is recovered, this value reverts to `group:{emailid}` and the\n recovered group retains the role in the binding.\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", "items": { "type": "string" }, "type": "array" }, "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.", + "description": "Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.", "type": "string" } }, @@ -979,15 +993,15 @@ "id": "Device", "properties": { "blocked": { - "description": "If a device is blocked, connections or requests from this device will fail.\nCan be used to temporarily prevent the device from connecting if, for\nexample, the sensor is generating bad data and needs maintenance.", + "description": "If a device is blocked, connections or requests from this device will fail. Can be used to temporarily prevent the device from connecting if, for example, the sensor is generating bad data and needs maintenance.", "type": "boolean" }, "config": { "$ref": "DeviceConfig", - "description": "The most recent device configuration, which is eventually sent from\nCloud IoT Core to the device. If not present on creation, the\nconfiguration will be initialized with an empty payload and version value\nof `1`. To update this field after creation, use the\n`DeviceManager.ModifyCloudToDeviceConfig` method." + "description": "The most recent device configuration, which is eventually sent from Cloud IoT Core to the device. If not present on creation, the configuration will be initialized with an empty payload and version value of `1`. To update this field after creation, use the `DeviceManager.ModifyCloudToDeviceConfig` method." }, "credentials": { - "description": "The credentials used to authenticate this device. To allow credential\nrotation without interruption, multiple device credentials can be bound to\nthis device. No more than 3 credentials can be bound to a single device at\na time. When new credentials are added to a device, they are verified\nagainst the registry credentials. For details, see the description of the\n`DeviceRegistry.credentials` field.", + "description": "The credentials used to authenticate this device. To allow credential rotation without interruption, multiple device credentials can be bound to this device. No more than 3 credentials can be bound to a single device at a time. When new credentials are added to a device, they are verified against the registry credentials. For details, see the description of the `DeviceRegistry.credentials` field.", "items": { "$ref": "DeviceCredential" }, @@ -998,45 +1012,45 @@ "description": "Gateway-related configuration and state." }, "id": { - "description": "The user-defined device identifier. The device ID must be unique\nwithin a device registry.", + "description": "The user-defined device identifier. The device ID must be unique within a device registry.", "type": "string" }, "lastConfigAckTime": { - "description": "[Output only] The last time a cloud-to-device config version acknowledgment\nwas received from the device. This field is only for configurations\nsent through MQTT.", + "description": "[Output only] The last time a cloud-to-device config version acknowledgment was received from the device. This field is only for configurations sent through MQTT.", "format": "google-datetime", "type": "string" }, "lastConfigSendTime": { - "description": "[Output only] The last time a cloud-to-device config version was sent to\nthe device.", + "description": "[Output only] The last time a cloud-to-device config version was sent to the device.", "format": "google-datetime", "type": "string" }, "lastErrorStatus": { "$ref": "Status", - "description": "[Output only] The error message of the most recent error, such as a failure\nto publish to Cloud Pub/Sub. 'last_error_time' is the timestamp of this\nfield. If no errors have occurred, this field has an empty message\nand the status code 0 == OK. Otherwise, this field is expected to have a\nstatus code other than OK." + "description": "[Output only] The error message of the most recent error, such as a failure to publish to Cloud Pub/Sub. 'last_error_time' is the timestamp of this field. If no errors have occurred, this field has an empty message and the status code 0 == OK. Otherwise, this field is expected to have a status code other than OK." }, "lastErrorTime": { - "description": "[Output only] The time the most recent error occurred, such as a failure to\npublish to Cloud Pub/Sub. This field is the timestamp of\n'last_error_status'.", + "description": "[Output only] The time the most recent error occurred, such as a failure to publish to Cloud Pub/Sub. This field is the timestamp of 'last_error_status'.", "format": "google-datetime", "type": "string" }, "lastEventTime": { - "description": "[Output only] The last time a telemetry event was received. Timestamps are\nperiodically collected and written to storage; they may be stale by a few\nminutes.", + "description": "[Output only] The last time a telemetry event was received. Timestamps are periodically collected and written to storage; they may be stale by a few minutes.", "format": "google-datetime", "type": "string" }, "lastHeartbeatTime": { - "description": "[Output only] The last time an MQTT `PINGREQ` was received. This field\napplies only to devices connecting through MQTT. MQTT clients usually only\nsend `PINGREQ` messages if the connection is idle, and no other messages\nhave been sent. Timestamps are periodically collected and written to\nstorage; they may be stale by a few minutes.", + "description": "[Output only] The last time an MQTT `PINGREQ` was received. This field applies only to devices connecting through MQTT. MQTT clients usually only send `PINGREQ` messages if the connection is idle, and no other messages have been sent. Timestamps are periodically collected and written to storage; they may be stale by a few minutes.", "format": "google-datetime", "type": "string" }, "lastStateTime": { - "description": "[Output only] The last time a state event was received. Timestamps are\nperiodically collected and written to storage; they may be stale by a few\nminutes.", + "description": "[Output only] The last time a state event was received. Timestamps are periodically collected and written to storage; they may be stale by a few minutes.", "format": "google-datetime", "type": "string" }, "logLevel": { - "description": "**Beta Feature**\n\nThe logging verbosity for device activity. If unspecified,\nDeviceRegistry.log_level will be used.", + "description": "**Beta Feature** The logging verbosity for device activity. If unspecified, DeviceRegistry.log_level will be used.", "enum": [ "LOG_LEVEL_UNSPECIFIED", "NONE", @@ -1048,7 +1062,7 @@ "No logging specified. If not specified, logging will be disabled.", "Disables logging.", "Error events will be logged.", - "Informational events will be logged, such as connections and\ndisconnections.", + "Informational events will be logged, such as connections and disconnections.", "All events will be logged." ], "type": "string" @@ -1057,21 +1071,21 @@ "additionalProperties": { "type": "string" }, - "description": "The metadata key-value pairs assigned to the device. This metadata is not\ninterpreted or indexed by Cloud IoT Core. It can be used to add contextual\ninformation for the device.\n\nKeys must conform to the regular expression a-zA-Z+ and\nbe less than 128 bytes in length.\n\nValues are free-form strings. Each value must be less than or equal to 32\nKB in size.\n\nThe total size of all keys and values must be less than 256 KB, and the\nmaximum number of key-value pairs is 500.", + "description": "The metadata key-value pairs assigned to the device. This metadata is not interpreted or indexed by Cloud IoT Core. It can be used to add contextual information for the device. Keys must conform to the regular expression a-zA-Z+ and be less than 128 bytes in length. Values are free-form strings. Each value must be less than or equal to 32 KB in size. The total size of all keys and values must be less than 256 KB, and the maximum number of key-value pairs is 500.", "type": "object" }, "name": { - "description": "The resource path name. For example,\n`projects/p1/locations/us-central1/registries/registry0/devices/dev0` or\n`projects/p1/locations/us-central1/registries/registry0/devices/{num_id}`.\nWhen `name` is populated as a response from the service, it always ends\nin the device numeric ID.", + "description": "The resource path name. For example, `projects/p1/locations/us-central1/registries/registry0/devices/dev0` or `projects/p1/locations/us-central1/registries/registry0/devices/{num_id}`. When `name` is populated as a response from the service, it always ends in the device numeric ID.", "type": "string" }, "numId": { - "description": "[Output only] A server-defined unique numeric ID for the device. This is a\nmore compact way to identify devices, and it is globally unique.", + "description": "[Output only] A server-defined unique numeric ID for the device. This is a more compact way to identify devices, and it is globally unique.", "format": "uint64", "type": "string" }, "state": { "$ref": "DeviceState", - "description": "[Output only] The state most recently received from the device. If no state\nhas been reported, this field is not present." + "description": "[Output only] The state most recently received from the device. If no state has been reported, this field is not present." } }, "type": "object" @@ -1086,17 +1100,17 @@ "type": "string" }, "cloudUpdateTime": { - "description": "[Output only] The time at which this configuration version was updated in\nCloud IoT Core. This timestamp is set by the server.", + "description": "[Output only] The time at which this configuration version was updated in Cloud IoT Core. This timestamp is set by the server.", "format": "google-datetime", "type": "string" }, "deviceAckTime": { - "description": "[Output only] The time at which Cloud IoT Core received the\nacknowledgment from the device, indicating that the device has received\nthis configuration version. If this field is not present, the device has\nnot yet acknowledged that it received this version. Note that when\nthe config was sent to the device, many config versions may have been\navailable in Cloud IoT Core while the device was disconnected, and on\nconnection, only the latest version is sent to the device. Some\nversions may never be sent to the device, and therefore are never\nacknowledged. This timestamp is set by Cloud IoT Core.", + "description": "[Output only] The time at which Cloud IoT Core received the acknowledgment from the device, indicating that the device has received this configuration version. If this field is not present, the device has not yet acknowledged that it received this version. Note that when the config was sent to the device, many config versions may have been available in Cloud IoT Core while the device was disconnected, and on connection, only the latest version is sent to the device. Some versions may never be sent to the device, and therefore are never acknowledged. This timestamp is set by Cloud IoT Core.", "format": "google-datetime", "type": "string" }, "version": { - "description": "[Output only] The version of this update. The version number is assigned by\nthe server, and is always greater than 0 after device creation. The\nversion must be 0 on the `CreateDevice` request if a `config` is\nspecified; the response of `CreateDevice` will always have a value of 1.", + "description": "[Output only] The version of this update. The version number is assigned by the server, and is always greater than 0 after device creation. The version must be 0 on the `CreateDevice` request if a `config` is specified; the response of `CreateDevice` will always have a value of 1.", "format": "int64", "type": "string" } @@ -1108,13 +1122,13 @@ "id": "DeviceCredential", "properties": { "expirationTime": { - "description": "[Optional] The time at which this credential becomes invalid. This\ncredential will be ignored for new client authentication requests after\nthis timestamp; however, it will not be automatically deleted.", + "description": "[Optional] The time at which this credential becomes invalid. This credential will be ignored for new client authentication requests after this timestamp; however, it will not be automatically deleted.", "format": "google-datetime", "type": "string" }, "publicKey": { "$ref": "PublicKeyCredential", - "description": "A public key used to verify the signature of JSON Web Tokens (JWTs).\nWhen adding a new device credential, either via device creation or via\nmodifications, this public key credential may be required to be signed by\none of the registry level certificates. More specifically, if the\nregistry contains at least one certificate, any new device credential\nmust be signed by one of the registry certificates. As a result,\nwhen the registry contains certificates, only X.509 certificates are\naccepted as device credentials. However, if the registry does\nnot contain a certificate, self-signed certificates and public keys will\nbe accepted. New device credentials must be different from every\nregistry-level certificate." + "description": "A public key used to verify the signature of JSON Web Tokens (JWTs). When adding a new device credential, either via device creation or via modifications, this public key credential may be required to be signed by one of the registry level certificates. More specifically, if the registry contains at least one certificate, any new device credential must be signed by one of the registry certificates. As a result, when the registry contains certificates, only X.509 certificates are accepted as device credentials. However, if the registry does not contain a certificate, self-signed certificates and public keys will be accepted. New device credentials must be different from every registry-level certificate." } }, "type": "object" @@ -1124,14 +1138,14 @@ "id": "DeviceRegistry", "properties": { "credentials": { - "description": "The credentials used to verify the device credentials. No more than 10\ncredentials can be bound to a single registry at a time. The verification\nprocess occurs at the time of device creation or update. If this field is\nempty, no verification is performed. Otherwise, the credentials of a newly\ncreated device or added credentials of an updated device should be signed\nwith one of these registry credentials.\n\nNote, however, that existing devices will never be affected by\nmodifications to this list of credentials: after a device has been\nsuccessfully created in a registry, it should be able to connect even if\nits registry credentials are revoked, deleted, or modified.", + "description": "The credentials used to verify the device credentials. No more than 10 credentials can be bound to a single registry at a time. The verification process occurs at the time of device creation or update. If this field is empty, no verification is performed. Otherwise, the credentials of a newly created device or added credentials of an updated device should be signed with one of these registry credentials. Note, however, that existing devices will never be affected by modifications to this list of credentials: after a device has been successfully created in a registry, it should be able to connect even if its registry credentials are revoked, deleted, or modified.", "items": { "$ref": "RegistryCredential" }, "type": "array" }, "eventNotificationConfigs": { - "description": "The configuration for notification of telemetry events received from the\ndevice. All telemetry events that were successfully published by the\ndevice and acknowledged by Cloud IoT Core are guaranteed to be\ndelivered to Cloud Pub/Sub. If multiple configurations match a message,\nonly the first matching configuration is used. If you try to publish a\ndevice telemetry event using MQTT without specifying a Cloud Pub/Sub topic\nfor the device's registry, the connection closes automatically. If you try\nto do so using an HTTP connection, an error is returned. Up to 10\nconfigurations may be provided.", + "description": "The configuration for notification of telemetry events received from the device. All telemetry events that were successfully published by the device and acknowledged by Cloud IoT Core are guaranteed to be delivered to Cloud Pub/Sub. If multiple configurations match a message, only the first matching configuration is used. If you try to publish a device telemetry event using MQTT without specifying a Cloud Pub/Sub topic for the device's registry, the connection closes automatically. If you try to do so using an HTTP connection, an error is returned. Up to 10 configurations may be provided.", "items": { "$ref": "EventNotificationConfig" }, @@ -1146,7 +1160,7 @@ "type": "string" }, "logLevel": { - "description": "**Beta Feature**\n\nThe default logging verbosity for activity from devices in this registry.\nThe verbosity level can be overridden by Device.log_level.", + "description": "**Beta Feature** The default logging verbosity for activity from devices in this registry. The verbosity level can be overridden by Device.log_level.", "enum": [ "LOG_LEVEL_UNSPECIFIED", "NONE", @@ -1158,7 +1172,7 @@ "No logging specified. If not specified, logging will be disabled.", "Disables logging.", "Error events will be logged.", - "Informational events will be logged, such as connections and\ndisconnections.", + "Informational events will be logged, such as connections and disconnections.", "All events will be logged." ], "type": "string" @@ -1168,12 +1182,12 @@ "description": "The MQTT configuration for this device registry." }, "name": { - "description": "The resource path name. For example,\n`projects/example-project/locations/us-central1/registries/my-registry`.", + "description": "The resource path name. For example, `projects/example-project/locations/us-central1/registries/my-registry`.", "type": "string" }, "stateNotificationConfig": { "$ref": "StateNotificationConfig", - "description": "The configuration for notification of new states received from the device.\nState updates are guaranteed to be stored in the state history, but\nnotifications to Cloud Pub/Sub are not guaranteed. For example, if\npermissions are misconfigured or the specified topic doesn't exist, no\nnotification will be published but the state will still be stored in Cloud\nIoT Core." + "description": "The configuration for notification of new states received from the device. State updates are guaranteed to be stored in the state history, but notifications to Cloud Pub/Sub are not guaranteed. For example, if permissions are misconfigured or the specified topic doesn't exist, no notification will be published but the state will still be stored in Cloud IoT Core." } }, "type": "object" @@ -1188,7 +1202,7 @@ "type": "string" }, "updateTime": { - "description": "[Output only] The time at which this state version was updated in Cloud\nIoT Core.", + "description": "[Output only] The time at which this state version was updated in Cloud IoT Core.", "format": "google-datetime", "type": "string" } @@ -1196,7 +1210,7 @@ "type": "object" }, "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", "properties": {}, "type": "object" @@ -1206,34 +1220,34 @@ "id": "EventNotificationConfig", "properties": { "pubsubTopicName": { - "description": "A Cloud Pub/Sub topic name. For example,\n`projects/myProject/topics/deviceEvents`.", + "description": "A Cloud Pub/Sub topic name. For example, `projects/myProject/topics/deviceEvents`.", "type": "string" }, "subfolderMatches": { - "description": "If the subfolder name matches this string exactly, this configuration will\nbe used. The string must not include the leading '/' character. If empty,\nall strings are matched. This field is used only for telemetry events;\nsubfolders are not supported for state changes.", + "description": "If the subfolder name matches this string exactly, this configuration will be used. The string must not include the leading '/' character. If empty, all strings are matched. This field is used only for telemetry events; subfolders are not supported for state changes.", "type": "string" } }, "type": "object" }, "Expr": { - "description": "Represents a textual expression in the Common Expression Language (CEL)\nsyntax. CEL is a C-like expression language. The syntax and semantics of CEL\nare documented at https://github.com/google/cel-spec.\n\nExample (Comparison):\n\n title: \"Summary size limit\"\n description: \"Determines if a summary is less than 100 chars\"\n expression: \"document.summary.size() \u003c 100\"\n\nExample (Equality):\n\n title: \"Requestor is owner\"\n description: \"Determines if requestor is the document owner\"\n expression: \"document.owner == request.auth.claims.email\"\n\nExample (Logic):\n\n title: \"Public documents\"\n description: \"Determine whether the document should be publicly visible\"\n expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\"\n\nExample (Data Manipulation):\n\n title: \"Notification string\"\n description: \"Create a notification string with a timestamp.\"\n expression: \"'New message received at ' + string(document.create_time)\"\n\nThe exact variables and functions that may be referenced within an expression\nare determined by the service that evaluates it. See the service\ndocumentation for additional information.", + "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() \u003c 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", "id": "Expr", "properties": { "description": { - "description": "Optional. Description of the expression. This is a longer text which\ndescribes the expression, e.g. when hovered over it in a UI.", + "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", "type": "string" }, "expression": { - "description": "Textual representation of an expression in Common Expression Language\nsyntax.", + "description": "Textual representation of an expression in Common Expression Language syntax.", "type": "string" }, "location": { - "description": "Optional. String indicating the location of the expression for error\nreporting, e.g. a file name and a position in the file.", + "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", "type": "string" }, "title": { - "description": "Optional. Title for the expression, i.e. a short string describing\nits purpose. This can be used e.g. in UIs which allow to enter the\nexpression.", + "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", "type": "string" } }, @@ -1244,7 +1258,7 @@ "id": "GatewayConfig", "properties": { "gatewayAuthMethod": { - "description": "Indicates how to authorize and/or authenticate devices to access the\ngateway.", + "description": "Indicates how to authorize and/or authenticate devices to access the gateway.", "enum": [ "GATEWAY_AUTH_METHOD_UNSPECIFIED", "ASSOCIATION_ONLY", @@ -1252,10 +1266,10 @@ "ASSOCIATION_AND_DEVICE_AUTH_TOKEN" ], "enumDescriptions": [ - "No authentication/authorization method specified. No devices are allowed to\naccess the gateway.", - "The device is authenticated through the gateway association only. Device\ncredentials are ignored even if provided.", - "The device is authenticated through its own credentials. Gateway\nassociation is not checked.", - "The device is authenticated through both device credentials and gateway\nassociation. The device must be bound to the gateway and must provide its\nown credentials." + "No authentication/authorization method specified. No devices are allowed to access the gateway.", + "The device is authenticated through the gateway association only. Device credentials are ignored even if provided.", + "The device is authenticated through its own credentials. Gateway association is not checked.", + "The device is authenticated through both device credentials and gateway association. The device must be bound to the gateway and must provide its own credentials." ], "type": "string" }, @@ -1278,7 +1292,7 @@ "type": "string" }, "lastAccessedGatewayTime": { - "description": "[Output only] The most recent time at which the device accessed the gateway\nspecified in `last_accessed_gateway`.", + "description": "[Output only] The most recent time at which the device accessed the gateway specified in `last_accessed_gateway`.", "format": "google-datetime", "type": "string" } @@ -1291,7 +1305,7 @@ "properties": { "options": { "$ref": "GetPolicyOptions", - "description": "OPTIONAL: A `GetPolicyOptions` object for specifying options to\n`GetIamPolicy`." + "description": "OPTIONAL: A `GetPolicyOptions` object for specifying options to `GetIamPolicy`." } }, "type": "object" @@ -1301,7 +1315,7 @@ "id": "GetPolicyOptions", "properties": { "requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.", + "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } @@ -1313,14 +1327,14 @@ "id": "HttpConfig", "properties": { "httpEnabledState": { - "description": "If enabled, allows devices to use DeviceService via the HTTP protocol.\nOtherwise, any requests to DeviceService will fail for this registry.", + "description": "If enabled, allows devices to use DeviceService via the HTTP protocol. Otherwise, any requests to DeviceService will fail for this registry.", "enum": [ "HTTP_STATE_UNSPECIFIED", "HTTP_ENABLED", "HTTP_DISABLED" ], "enumDescriptions": [ - "No HTTP state specified. If not specified, DeviceService will be\nenabled by default.", + "No HTTP state specified. If not specified, DeviceService will be enabled by default.", "Enables DeviceService (HTTP) service for the registry.", "Disables DeviceService (HTTP) service for the registry." ], @@ -1334,7 +1348,7 @@ "id": "ListDeviceConfigVersionsResponse", "properties": { "deviceConfigs": { - "description": "The device configuration for the last few versions. Versions are listed\nin decreasing order, starting from the most recent one.", + "description": "The device configuration for the last few versions. Versions are listed in decreasing order, starting from the most recent one.", "items": { "$ref": "DeviceConfig" }, @@ -1355,7 +1369,7 @@ "type": "array" }, "nextPageToken": { - "description": "If not empty, indicates that there may be more registries that match the\nrequest; this value should be passed in a new\n`ListDeviceRegistriesRequest`.", + "description": "If not empty, indicates that there may be more registries that match the request; this value should be passed in a new `ListDeviceRegistriesRequest`.", "type": "string" } }, @@ -1366,7 +1380,7 @@ "id": "ListDeviceStatesResponse", "properties": { "deviceStates": { - "description": "The last few device states. States are listed in descending order of server\nupdate time, starting from the most recent one.", + "description": "The last few device states. States are listed in descending order of server update time, starting from the most recent one.", "items": { "$ref": "DeviceState" }, @@ -1387,7 +1401,7 @@ "type": "array" }, "nextPageToken": { - "description": "If not empty, indicates that there may be more devices that match the\nrequest; this value should be passed in a new `ListDevicesRequest`.", + "description": "If not empty, indicates that there may be more devices that match the request; this value should be passed in a new `ListDevicesRequest`.", "type": "string" } }, @@ -1403,7 +1417,7 @@ "type": "string" }, "versionToUpdate": { - "description": "The version number to update. If this value is zero, it will not check the\nversion number of the server and will always update the current version;\notherwise, this update will fail if the version number found on the server\ndoes not match this version number. This is used to support multiple\nsimultaneous updates without losing data.", + "description": "The version number to update. If this value is zero, it will not check the version number of the server and will always update the current version; otherwise, this update will fail if the version number found on the server does not match this version number. This is used to support multiple simultaneous updates without losing data.", "format": "int64", "type": "string" } @@ -1415,7 +1429,7 @@ "id": "MqttConfig", "properties": { "mqttEnabledState": { - "description": "If enabled, allows connections using the MQTT protocol. Otherwise, MQTT\nconnections to this registry will fail.", + "description": "If enabled, allows connections using the MQTT protocol. Otherwise, MQTT connections to this registry will fail.", "enum": [ "MQTT_STATE_UNSPECIFIED", "MQTT_ENABLED", @@ -1432,23 +1446,23 @@ "type": "object" }, "Policy": { - "description": "An Identity and Access Management (IAM) policy, which specifies access\ncontrols for Google Cloud resources.\n\n\nA `Policy` is a collection of `bindings`. A `binding` binds one or more\n`members` to a single `role`. Members can be user accounts, service accounts,\nGoogle groups, and domains (such as G Suite). A `role` is a named list of\npermissions; each `role` can be an IAM predefined role or a user-created\ncustom role.\n\nOptionally, a `binding` can specify a `condition`, which is a logical\nexpression that allows access to a resource only if the expression evaluates\nto `true`. A condition can add constraints based on attributes of the\nrequest, the resource, or both.\n\n**JSON example:**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/resourcemanager.organizationAdmin\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-project-id@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles/resourcemanager.organizationViewer\",\n \"members\": [\"user:eve@example.com\"],\n \"condition\": {\n \"title\": \"expirable access\",\n \"description\": \"Does not grant access after Sep 2020\",\n \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\",\n }\n }\n ],\n \"etag\": \"BwWWja0YfJA=\",\n \"version\": 3\n }\n\n**YAML example:**\n\n bindings:\n - members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-project-id@appspot.gserviceaccount.com\n role: roles/resourcemanager.organizationAdmin\n - members:\n - user:eve@example.com\n role: roles/resourcemanager.organizationViewer\n condition:\n title: expirable access\n description: Does not grant access after Sep 2020\n expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\n - etag: BwWWja0YfJA=\n - version: 3\n\nFor a description of IAM and its features, see the\n[IAM documentation](https://cloud.google.com/iam/docs/).", + "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } **YAML example:** bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", "id": "Policy", "properties": { "bindings": { - "description": "Associates a list of `members` to a `role`. Optionally, may specify a\n`condition` that determines how and when the `bindings` are applied. Each\nof the `bindings` must contain at least one member.", + "description": "Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member.", "items": { "$ref": "Binding" }, "type": "array" }, "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.", + "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.", "format": "byte", "type": "string" }, "version": { - "description": "Specifies the format of the policy.\n\nValid values are `0`, `1`, and `3`. Requests that specify an invalid value\nare rejected.\n\nAny operation that affects conditional role bindings must specify version\n`3`. This requirement applies to the following operations:\n\n* Getting a policy that includes a conditional role binding\n* Adding a conditional role binding to a policy\n* Changing a conditional role binding in a policy\n* Removing any role binding, with or without a condition, from a policy\n that includes conditions\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.\n\nIf a policy does not include any conditions, operations on that policy may\nspecify any valid version or leave the field unset.", + "description": "Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } @@ -1470,8 +1484,8 @@ "X509_CERTIFICATE_PEM" ], "enumDescriptions": [ - "The format has not been specified. This is an invalid default value and\nmust not be used.", - "An X.509v3 certificate ([RFC5280](https://www.ietf.org/rfc/rfc5280.txt)),\nencoded in base64, and wrapped by `-----BEGIN CERTIFICATE-----` and\n`-----END CERTIFICATE-----`." + "The format has not been specified. This is an invalid default value and must not be used.", + "An X.509v3 certificate ([RFC5280](https://www.ietf.org/rfc/rfc5280.txt)), encoded in base64, and wrapped by `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`." ], "type": "string" }, @@ -1496,11 +1510,11 @@ "ES256_X509_PEM" ], "enumDescriptions": [ - "The format has not been specified. This is an invalid default value and\nmust not be used.", - "An RSA public key encoded in base64, and wrapped by\n`-----BEGIN PUBLIC KEY-----` and `-----END PUBLIC KEY-----`. This can be\nused to verify `RS256` signatures in JWT tokens ([RFC7518](\nhttps://www.ietf.org/rfc/rfc7518.txt)).", - "As RSA_PEM, but wrapped in an X.509v3 certificate ([RFC5280](\nhttps://www.ietf.org/rfc/rfc5280.txt)), encoded in base64, and wrapped by\n`-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`.", - "Public key for the ECDSA algorithm using P-256 and SHA-256, encoded in\nbase64, and wrapped by `-----BEGIN PUBLIC KEY-----` and `-----END\nPUBLIC KEY-----`. This can be used to verify JWT tokens with the `ES256`\nalgorithm ([RFC7518](https://www.ietf.org/rfc/rfc7518.txt)). This curve is\ndefined in [OpenSSL](https://www.openssl.org/) as the `prime256v1` curve.", - "As ES256_PEM, but wrapped in an X.509v3 certificate ([RFC5280](\nhttps://www.ietf.org/rfc/rfc5280.txt)), encoded in base64, and wrapped by\n`-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`." + "The format has not been specified. This is an invalid default value and must not be used.", + "An RSA public key encoded in base64, and wrapped by `-----BEGIN PUBLIC KEY-----` and `-----END PUBLIC KEY-----`. This can be used to verify `RS256` signatures in JWT tokens ([RFC7518]( https://www.ietf.org/rfc/rfc7518.txt)).", + "As RSA_PEM, but wrapped in an X.509v3 certificate ([RFC5280]( https://www.ietf.org/rfc/rfc5280.txt)), encoded in base64, and wrapped by `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`.", + "Public key for the ECDSA algorithm using P-256 and SHA-256, encoded in base64, and wrapped by `-----BEGIN PUBLIC KEY-----` and `-----END PUBLIC KEY-----`. This can be used to verify JWT tokens with the `ES256` algorithm ([RFC7518](https://www.ietf.org/rfc/rfc7518.txt)). This curve is defined in [OpenSSL](https://www.openssl.org/) as the `prime256v1` curve.", + "As ES256_PEM, but wrapped in an X.509v3 certificate ([RFC5280]( https://www.ietf.org/rfc/rfc5280.txt)), encoded in base64, and wrapped by `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`." ], "type": "string" }, @@ -1532,7 +1546,7 @@ "type": "string" }, "subfolder": { - "description": "Optional subfolder for the command. If empty, the command will be delivered\nto the /devices/{device-id}/commands topic, otherwise it will be delivered\nto the /devices/{device-id}/commands/{subfolder} topic. Multi-level\nsubfolders are allowed. This field must not have more than 256 characters,\nand must not contain any MQTT wildcards (\"+\" or \"#\") or null characters.", + "description": "Optional subfolder for the command. If empty, the command will be delivered to the /devices/{device-id}/commands topic, otherwise it will be delivered to the /devices/{device-id}/commands/{subfolder} topic. Multi-level subfolders are allowed. This field must not have more than 256 characters, and must not contain any MQTT wildcards (\"+\" or \"#\") or null characters.", "type": "string" } }, @@ -1550,7 +1564,7 @@ "properties": { "policy": { "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them." } }, "type": "object" @@ -1560,14 +1574,14 @@ "id": "StateNotificationConfig", "properties": { "pubsubTopicName": { - "description": "A Cloud Pub/Sub topic name. For example,\n`projects/myProject/topics/deviceEvents`.", + "description": "A Cloud Pub/Sub topic name. For example, `projects/myProject/topics/deviceEvents`.", "type": "string" } }, "type": "object" }, "Status": { - "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors).", + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", "id": "Status", "properties": { "code": { @@ -1576,7 +1590,7 @@ "type": "integer" }, "details": { - "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use.", + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", "items": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -1587,7 +1601,7 @@ "type": "array" }, "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", "type": "string" } }, @@ -1598,7 +1612,7 @@ "id": "TestIamPermissionsRequest", "properties": { "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "description": "The set of permissions to check for the `resource`. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", "items": { "type": "string" }, @@ -1612,7 +1626,7 @@ "id": "TestIamPermissionsResponse", "properties": { "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", "items": { "type": "string" }, @@ -1626,11 +1640,11 @@ "id": "UnbindDeviceFromGatewayRequest", "properties": { "deviceId": { - "description": "Required. The device to disassociate from the specified gateway. The value of\n`device_id` can be either the device numeric ID or the user-defined device\nidentifier.", + "description": "Required. The device to disassociate from the specified gateway. The value of `device_id` can be either the device numeric ID or the user-defined device identifier.", "type": "string" }, "gatewayId": { - "description": "Required. The value of `gateway_id` can be either the device numeric ID or the\nuser-defined device identifier.", + "description": "Required. The value of `gateway_id` can be either the device numeric ID or the user-defined device identifier.", "type": "string" } }, diff --git a/vendor/google.golang.org/api/cloudiot/v1/cloudiot-gen.go b/vendor/google.golang.org/api/cloudiot/v1/cloudiot-gen.go index 000f24dfd83..9594ad02663 100644 --- a/vendor/google.golang.org/api/cloudiot/v1/cloudiot-gen.go +++ b/vendor/google.golang.org/api/cloudiot/v1/cloudiot-gen.go @@ -79,6 +79,7 @@ const apiId = "cloudiot:v1" const apiName = "cloudiot" const apiVersion = "v1" const basePath = "https://cloudiot.googleapis.com/" +const mtlsBasePath = "https://cloudiot.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -98,6 +99,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -237,15 +239,12 @@ type ProjectsLocationsRegistriesGroupsDevicesService struct { // BindDeviceToGatewayRequest: Request for `BindDeviceToGateway`. type BindDeviceToGatewayRequest struct { // DeviceId: Required. The device to associate with the specified - // gateway. The value of - // `device_id` can be either the device numeric ID or the user-defined - // device - // identifier. + // gateway. The value of `device_id` can be either the device numeric ID + // or the user-defined device identifier. DeviceId string `json:"deviceId,omitempty"` // GatewayId: Required. The value of `gateway_id` can be either the - // device numeric ID or the - // user-defined device identifier. + // device numeric ID or the user-defined device identifier. GatewayId string `json:"gatewayId,omitempty"` // ForceSendFields is a list of field names (e.g. "DeviceId") to @@ -280,86 +279,60 @@ type BindDeviceToGatewayResponse struct { // Binding: Associates `members` with a `role`. type Binding struct { - // Condition: The condition that is associated with this binding. - // NOTE: An unsatisfied condition will not allow user access via - // current - // binding. Different bindings, including their conditions, are - // examined - // independently. + // BindingId: A client-specified ID for this binding. Expected to be + // globally unique to support the internal bindings-by-ID API. + BindingId string `json:"bindingId,omitempty"` + + // Condition: The condition that is associated with this binding. If the + // condition evaluates to `true`, then this binding applies to the + // current request. If the condition evaluates to `false`, then this + // binding does not apply to the current request. However, a different + // role binding might grant the same role to one or more of the members + // in this binding. To learn which resources support conditions in their + // IAM policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). Condition *Expr `json:"condition,omitempty"` // Members: Specifies the identities requesting access for a Cloud - // Platform resource. - // `members` can have the following values: - // - // * `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. - // - // * `allAuthenticatedUsers`: A special identifier that represents - // anyone - // who is authenticated with a Google account or a service - // account. - // - // * `user:{emailid}`: An email address that represents a specific - // Google - // account. For example, `alice@example.com` . - // - // - // * `serviceAccount:{emailid}`: An email address that represents a - // service - // account. For example, - // `my-other-app@appspot.gserviceaccount.com`. - // - // * `group:{emailid}`: An email address that represents a Google - // group. - // For example, `admins@example.com`. - // - // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a user that has been recently deleted. - // For - // example, `alice@example.com?uid=123456789012345678901`. If the - // user is - // recovered, this value reverts to `user:{emailid}` and the - // recovered user - // retains the role in the binding. - // - // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address - // (plus - // unique identifier) representing a service account that has been - // recently - // deleted. For example, - // + // Platform resource. `members` can have the following values: * + // `allUsers`: A special identifier that represents anyone who is on the + // internet; with or without a Google account. * + // `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. * + // `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . * + // `serviceAccount:{emailid}`: An email address that represents a + // service account. For example, + // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An + // email address that represents a Google group. For example, + // `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An + // email address (plus unique identifier) representing a user that has + // been recently deleted. For example, + // `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered + // user retains the role in the binding. * + // `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address + // (plus unique identifier) representing a service account that has been + // recently deleted. For example, // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account - // retains the - // role in the binding. - // - // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a Google group that has been recently - // deleted. For example, - // `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` - // and the - // recovered group retains the role in the binding. - // - // - // * `domain:{domain}`: The G Suite domain (primary) that represents all - // the - // users of that domain. For example, `google.com` or - // `example.com`. - // - // + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains + // the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: + // An email address (plus unique identifier) representing a Google group + // that has been recently deleted. For example, + // `admins@example.com?uid=123456789012345678901`. If the group is + // recovered, this value reverts to `group:{emailid}` and the recovered + // group retains the role in the binding. * `domain:{domain}`: The G + // Suite domain (primary) that represents all the users of that domain. + // For example, `google.com` or `example.com`. Members []string `json:"members,omitempty"` - // Role: Role that is assigned to `members`. - // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + // Role: Role that is assigned to `members`. For example, + // `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `json:"role,omitempty"` - // ForceSendFields is a list of field names (e.g. "Condition") to + // ForceSendFields is a list of field names (e.g. "BindingId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -367,7 +340,7 @@ type Binding struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Condition") to include in + // NullFields is a list of field names (e.g. "BindingId") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -385,104 +358,76 @@ func (s *Binding) MarshalJSON() ([]byte, error) { // Device: The device resource. type Device struct { // Blocked: If a device is blocked, connections or requests from this - // device will fail. - // Can be used to temporarily prevent the device from connecting if, - // for - // example, the sensor is generating bad data and needs maintenance. + // device will fail. Can be used to temporarily prevent the device from + // connecting if, for example, the sensor is generating bad data and + // needs maintenance. Blocked bool `json:"blocked,omitempty"` // Config: The most recent device configuration, which is eventually - // sent from - // Cloud IoT Core to the device. If not present on creation, - // the - // configuration will be initialized with an empty payload and version - // value - // of `1`. To update this field after creation, use - // the + // sent from Cloud IoT Core to the device. If not present on creation, + // the configuration will be initialized with an empty payload and + // version value of `1`. To update this field after creation, use the // `DeviceManager.ModifyCloudToDeviceConfig` method. Config *DeviceConfig `json:"config,omitempty"` // Credentials: The credentials used to authenticate this device. To - // allow credential - // rotation without interruption, multiple device credentials can be - // bound to - // this device. No more than 3 credentials can be bound to a single - // device at - // a time. When new credentials are added to a device, they are - // verified - // against the registry credentials. For details, see the description of - // the + // allow credential rotation without interruption, multiple device + // credentials can be bound to this device. No more than 3 credentials + // can be bound to a single device at a time. When new credentials are + // added to a device, they are verified against the registry + // credentials. For details, see the description of the // `DeviceRegistry.credentials` field. Credentials []*DeviceCredential `json:"credentials,omitempty"` // GatewayConfig: Gateway-related configuration and state. GatewayConfig *GatewayConfig `json:"gatewayConfig,omitempty"` - // Id: The user-defined device identifier. The device ID must be - // unique + // Id: The user-defined device identifier. The device ID must be unique // within a device registry. Id string `json:"id,omitempty"` // LastConfigAckTime: [Output only] The last time a cloud-to-device - // config version acknowledgment - // was received from the device. This field is only for - // configurations - // sent through MQTT. + // config version acknowledgment was received from the device. This + // field is only for configurations sent through MQTT. LastConfigAckTime string `json:"lastConfigAckTime,omitempty"` // LastConfigSendTime: [Output only] The last time a cloud-to-device - // config version was sent to - // the device. + // config version was sent to the device. LastConfigSendTime string `json:"lastConfigSendTime,omitempty"` // LastErrorStatus: [Output only] The error message of the most recent - // error, such as a failure - // to publish to Cloud Pub/Sub. 'last_error_time' is the timestamp of - // this - // field. If no errors have occurred, this field has an empty - // message - // and the status code 0 == OK. Otherwise, this field is expected to - // have a - // status code other than OK. + // error, such as a failure to publish to Cloud Pub/Sub. + // 'last_error_time' is the timestamp of this field. If no errors have + // occurred, this field has an empty message and the status code 0 == + // OK. Otherwise, this field is expected to have a status code other + // than OK. LastErrorStatus *Status `json:"lastErrorStatus,omitempty"` // LastErrorTime: [Output only] The time the most recent error occurred, - // such as a failure to - // publish to Cloud Pub/Sub. This field is the timestamp - // of - // 'last_error_status'. + // such as a failure to publish to Cloud Pub/Sub. This field is the + // timestamp of 'last_error_status'. LastErrorTime string `json:"lastErrorTime,omitempty"` // LastEventTime: [Output only] The last time a telemetry event was - // received. Timestamps are - // periodically collected and written to storage; they may be stale by a - // few - // minutes. + // received. Timestamps are periodically collected and written to + // storage; they may be stale by a few minutes. LastEventTime string `json:"lastEventTime,omitempty"` // LastHeartbeatTime: [Output only] The last time an MQTT `PINGREQ` was - // received. This field - // applies only to devices connecting through MQTT. MQTT clients usually - // only - // send `PINGREQ` messages if the connection is idle, and no other - // messages - // have been sent. Timestamps are periodically collected and written - // to - // storage; they may be stale by a few minutes. + // received. This field applies only to devices connecting through MQTT. + // MQTT clients usually only send `PINGREQ` messages if the connection + // is idle, and no other messages have been sent. Timestamps are + // periodically collected and written to storage; they may be stale by a + // few minutes. LastHeartbeatTime string `json:"lastHeartbeatTime,omitempty"` // LastStateTime: [Output only] The last time a state event was - // received. Timestamps are - // periodically collected and written to storage; they may be stale by a - // few - // minutes. + // received. Timestamps are periodically collected and written to + // storage; they may be stale by a few minutes. LastStateTime string `json:"lastStateTime,omitempty"` - // LogLevel: **Beta Feature** - // - // The logging verbosity for device activity. If - // unspecified, - // DeviceRegistry.log_level will be used. + // LogLevel: **Beta Feature** The logging verbosity for device activity. + // If unspecified, DeviceRegistry.log_level will be used. // // Possible values: // "LOG_LEVEL_UNSPECIFIED" - No logging specified. If not specified, @@ -490,49 +435,34 @@ type Device struct { // "NONE" - Disables logging. // "ERROR" - Error events will be logged. // "INFO" - Informational events will be logged, such as connections - // and - // disconnections. + // and disconnections. // "DEBUG" - All events will be logged. LogLevel string `json:"logLevel,omitempty"` // Metadata: The metadata key-value pairs assigned to the device. This - // metadata is not - // interpreted or indexed by Cloud IoT Core. It can be used to add - // contextual - // information for the device. - // - // Keys must conform to the regular expression a-zA-Z+ and - // be less than 128 bytes in length. - // - // Values are free-form strings. Each value must be less than or equal - // to 32 - // KB in size. - // - // The total size of all keys and values must be less than 256 KB, and - // the - // maximum number of key-value pairs is 500. + // metadata is not interpreted or indexed by Cloud IoT Core. It can be + // used to add contextual information for the device. Keys must conform + // to the regular expression a-zA-Z+ and be less than 128 bytes in + // length. Values are free-form strings. Each value must be less than or + // equal to 32 KB in size. The total size of all keys and values must be + // less than 256 KB, and the maximum number of key-value pairs is 500. Metadata map[string]string `json:"metadata,omitempty"` - // Name: The resource path name. For - // example, - // `projects/p1/locations/us-central1/registries/registry0/devic - // es/dev0` + // Name: The resource path name. For example, + // `projects/p1/locations/us-central1/registries/registry0/devices/dev0` // or - // `projects/p1/locations/us-central1/registries/registry0/devices/{nu - // m_id}`. - // When `name` is populated as a response from the service, it always - // ends - // in the device numeric ID. + // `projects/p1/locations/us-central1/registries/registry0/devices/{num_i + // d}`. When `name` is populated as a response from the service, it + // always ends in the device numeric ID. Name string `json:"name,omitempty"` // NumId: [Output only] A server-defined unique numeric ID for the - // device. This is a - // more compact way to identify devices, and it is globally unique. + // device. This is a more compact way to identify devices, and it is + // globally unique. NumId uint64 `json:"numId,omitempty,string"` // State: [Output only] The state most recently received from the - // device. If no state - // has been reported, this field is not present. + // device. If no state has been reported, this field is not present. State *DeviceState `json:"state,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -569,37 +499,27 @@ type DeviceConfig struct { BinaryData string `json:"binaryData,omitempty"` // CloudUpdateTime: [Output only] The time at which this configuration - // version was updated in - // Cloud IoT Core. This timestamp is set by the server. + // version was updated in Cloud IoT Core. This timestamp is set by the + // server. CloudUpdateTime string `json:"cloudUpdateTime,omitempty"` // DeviceAckTime: [Output only] The time at which Cloud IoT Core - // received the - // acknowledgment from the device, indicating that the device has - // received - // this configuration version. If this field is not present, the device - // has - // not yet acknowledged that it received this version. Note that - // when - // the config was sent to the device, many config versions may have - // been - // available in Cloud IoT Core while the device was disconnected, and - // on - // connection, only the latest version is sent to the device. - // Some - // versions may never be sent to the device, and therefore are - // never - // acknowledged. This timestamp is set by Cloud IoT Core. + // received the acknowledgment from the device, indicating that the + // device has received this configuration version. If this field is not + // present, the device has not yet acknowledged that it received this + // version. Note that when the config was sent to the device, many + // config versions may have been available in Cloud IoT Core while the + // device was disconnected, and on connection, only the latest version + // is sent to the device. Some versions may never be sent to the device, + // and therefore are never acknowledged. This timestamp is set by Cloud + // IoT Core. DeviceAckTime string `json:"deviceAckTime,omitempty"` // Version: [Output only] The version of this update. The version number - // is assigned by - // the server, and is always greater than 0 after device creation. - // The - // version must be 0 on the `CreateDevice` request if a `config` - // is - // specified; the response of `CreateDevice` will always have a value of - // 1. + // is assigned by the server, and is always greater than 0 after device + // creation. The version must be 0 on the `CreateDevice` request if a + // `config` is specified; the response of `CreateDevice` will always + // have a value of 1. Version int64 `json:"version,omitempty,string"` // ServerResponse contains the HTTP response code and headers from the @@ -633,31 +553,22 @@ func (s *DeviceConfig) MarshalJSON() ([]byte, error) { // authentication. type DeviceCredential struct { // ExpirationTime: [Optional] The time at which this credential becomes - // invalid. This - // credential will be ignored for new client authentication requests - // after - // this timestamp; however, it will not be automatically deleted. + // invalid. This credential will be ignored for new client + // authentication requests after this timestamp; however, it will not be + // automatically deleted. ExpirationTime string `json:"expirationTime,omitempty"` // PublicKey: A public key used to verify the signature of JSON Web - // Tokens (JWTs). - // When adding a new device credential, either via device creation or - // via - // modifications, this public key credential may be required to be - // signed by - // one of the registry level certificates. More specifically, if - // the - // registry contains at least one certificate, any new device - // credential - // must be signed by one of the registry certificates. As a result, - // when the registry contains certificates, only X.509 certificates - // are - // accepted as device credentials. However, if the registry does - // not contain a certificate, self-signed certificates and public keys - // will - // be accepted. New device credentials must be different from - // every - // registry-level certificate. + // Tokens (JWTs). When adding a new device credential, either via device + // creation or via modifications, this public key credential may be + // required to be signed by one of the registry level certificates. More + // specifically, if the registry contains at least one certificate, any + // new device credential must be signed by one of the registry + // certificates. As a result, when the registry contains certificates, + // only X.509 certificates are accepted as device credentials. However, + // if the registry does not contain a certificate, self-signed + // certificates and public keys will be accepted. New device credentials + // must be different from every registry-level certificate. PublicKey *PublicKeyCredential `json:"publicKey,omitempty"` // ForceSendFields is a list of field names (e.g. "ExpirationTime") to @@ -687,43 +598,28 @@ func (s *DeviceCredential) MarshalJSON() ([]byte, error) { // DeviceRegistry: A container for a group of devices. type DeviceRegistry struct { // Credentials: The credentials used to verify the device credentials. - // No more than 10 - // credentials can be bound to a single registry at a time. The - // verification - // process occurs at the time of device creation or update. If this - // field is - // empty, no verification is performed. Otherwise, the credentials of a - // newly - // created device or added credentials of an updated device should be - // signed - // with one of these registry credentials. - // - // Note, however, that existing devices will never be affected - // by - // modifications to this list of credentials: after a device has - // been - // successfully created in a registry, it should be able to connect even - // if - // its registry credentials are revoked, deleted, or modified. + // No more than 10 credentials can be bound to a single registry at a + // time. The verification process occurs at the time of device creation + // or update. If this field is empty, no verification is performed. + // Otherwise, the credentials of a newly created device or added + // credentials of an updated device should be signed with one of these + // registry credentials. Note, however, that existing devices will never + // be affected by modifications to this list of credentials: after a + // device has been successfully created in a registry, it should be able + // to connect even if its registry credentials are revoked, deleted, or + // modified. Credentials []*RegistryCredential `json:"credentials,omitempty"` // EventNotificationConfigs: The configuration for notification of - // telemetry events received from the - // device. All telemetry events that were successfully published by - // the - // device and acknowledged by Cloud IoT Core are guaranteed to - // be - // delivered to Cloud Pub/Sub. If multiple configurations match a - // message, - // only the first matching configuration is used. If you try to publish - // a - // device telemetry event using MQTT without specifying a Cloud Pub/Sub - // topic - // for the device's registry, the connection closes automatically. If - // you try - // to do so using an HTTP connection, an error is returned. Up to - // 10 - // configurations may be provided. + // telemetry events received from the device. All telemetry events that + // were successfully published by the device and acknowledged by Cloud + // IoT Core are guaranteed to be delivered to Cloud Pub/Sub. If multiple + // configurations match a message, only the first matching configuration + // is used. If you try to publish a device telemetry event using MQTT + // without specifying a Cloud Pub/Sub topic for the device's registry, + // the connection closes automatically. If you try to do so using an + // HTTP connection, an error is returned. Up to 10 configurations may be + // provided. EventNotificationConfigs []*EventNotificationConfig `json:"eventNotificationConfigs,omitempty"` // HttpConfig: The DeviceService (HTTP) configuration for this device @@ -734,11 +630,9 @@ type DeviceRegistry struct { // `myRegistry`. Id string `json:"id,omitempty"` - // LogLevel: **Beta Feature** - // - // The default logging verbosity for activity from devices in this - // registry. - // The verbosity level can be overridden by Device.log_level. + // LogLevel: **Beta Feature** The default logging verbosity for activity + // from devices in this registry. The verbosity level can be overridden + // by Device.log_level. // // Possible values: // "LOG_LEVEL_UNSPECIFIED" - No logging specified. If not specified, @@ -746,31 +640,24 @@ type DeviceRegistry struct { // "NONE" - Disables logging. // "ERROR" - Error events will be logged. // "INFO" - Informational events will be logged, such as connections - // and - // disconnections. + // and disconnections. // "DEBUG" - All events will be logged. LogLevel string `json:"logLevel,omitempty"` // MqttConfig: The MQTT configuration for this device registry. MqttConfig *MqttConfig `json:"mqttConfig,omitempty"` - // Name: The resource path name. For - // example, - // `projects/example-project/locations/us-central1/registries/my - // -registry`. + // Name: The resource path name. For example, + // `projects/example-project/locations/us-central1/registries/my-registry + // `. Name string `json:"name,omitempty"` // StateNotificationConfig: The configuration for notification of new - // states received from the device. - // State updates are guaranteed to be stored in the state history, - // but - // notifications to Cloud Pub/Sub are not guaranteed. For example, - // if - // permissions are misconfigured or the specified topic doesn't exist, - // no - // notification will be published but the state will still be stored in - // Cloud - // IoT Core. + // states received from the device. State updates are guaranteed to be + // stored in the state history, but notifications to Cloud Pub/Sub are + // not guaranteed. For example, if permissions are misconfigured or the + // specified topic doesn't exist, no notification will be published but + // the state will still be stored in Cloud IoT Core. StateNotificationConfig *StateNotificationConfig `json:"stateNotificationConfig,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -806,8 +693,7 @@ type DeviceState struct { BinaryData string `json:"binaryData,omitempty"` // UpdateTime: [Output only] The time at which this state version was - // updated in Cloud - // IoT Core. + // updated in Cloud IoT Core. UpdateTime string `json:"updateTime,omitempty"` // ForceSendFields is a list of field names (e.g. "BinaryData") to @@ -834,17 +720,11 @@ func (s *DeviceState) MarshalJSON() ([]byte, error) { } // Empty: A generic empty message that you can re-use to avoid defining -// duplicated -// empty messages in your APIs. A typical example is to use it as the -// request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. +// duplicated empty messages in your APIs. A typical example is to use +// it as the request or the response type of an API method. For +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } The JSON representation for `Empty` is +// empty JSON object `{}`. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -854,18 +734,15 @@ type Empty struct { // EventNotificationConfig: The configuration for forwarding telemetry // events. type EventNotificationConfig struct { - // PubsubTopicName: A Cloud Pub/Sub topic name. For - // example, + // PubsubTopicName: A Cloud Pub/Sub topic name. For example, // `projects/myProject/topics/deviceEvents`. PubsubTopicName string `json:"pubsubTopicName,omitempty"` // SubfolderMatches: If the subfolder name matches this string exactly, - // this configuration will - // be used. The string must not include the leading '/' character. If - // empty, - // all strings are matched. This field is used only for telemetry - // events; - // subfolders are not supported for state changes. + // this configuration will be used. The string must not include the + // leading '/' character. If empty, all strings are matched. This field + // is used only for telemetry events; subfolders are not supported for + // state changes. SubfolderMatches string `json:"subfolderMatches,omitempty"` // ForceSendFields is a list of field names (e.g. "PubsubTopicName") to @@ -893,65 +770,40 @@ func (s *EventNotificationConfig) MarshalJSON() ([]byte, error) { } // Expr: Represents a textual expression in the Common Expression -// Language (CEL) -// syntax. CEL is a C-like expression language. The syntax and semantics -// of CEL -// are documented at https://github.com/google/cel-spec. -// -// Example (Comparison): -// -// title: "Summary size limit" -// description: "Determines if a summary is less than 100 chars" -// expression: "document.summary.size() < 100" -// -// Example (Equality): -// -// title: "Requestor is owner" -// description: "Determines if requestor is the document owner" -// expression: "document.owner == -// request.auth.claims.email" -// -// Example (Logic): -// -// title: "Public documents" -// description: "Determine whether the document should be publicly -// visible" -// expression: "document.type != 'private' && document.type != -// 'internal'" -// -// Example (Data Manipulation): -// -// title: "Notification string" -// description: "Create a notification string with a timestamp." -// expression: "'New message received at ' + -// string(document.create_time)" -// -// The exact variables and functions that may be referenced within an -// expression -// are determined by the service that evaluates it. See the -// service -// documentation for additional information. +// Language (CEL) syntax. CEL is a C-like expression language. The +// syntax and semantics of CEL are documented at +// https://github.com/google/cel-spec. Example (Comparison): title: +// "Summary size limit" description: "Determines if a summary is less +// than 100 chars" expression: "document.summary.size() < 100" Example +// (Equality): title: "Requestor is owner" description: "Determines if +// requestor is the document owner" expression: "document.owner == +// request.auth.claims.email" Example (Logic): title: "Public documents" +// description: "Determine whether the document should be publicly +// visible" expression: "document.type != 'private' && document.type != +// 'internal'" Example (Data Manipulation): title: "Notification string" +// description: "Create a notification string with a timestamp." +// expression: "'New message received at ' + +// string(document.create_time)" The exact variables and functions that +// may be referenced within an expression are determined by the service +// that evaluates it. See the service documentation for additional +// information. type Expr struct { // Description: Optional. Description of the expression. This is a - // longer text which - // describes the expression, e.g. when hovered over it in a UI. + // longer text which describes the expression, e.g. when hovered over it + // in a UI. Description string `json:"description,omitempty"` // Expression: Textual representation of an expression in Common - // Expression Language - // syntax. + // Expression Language syntax. Expression string `json:"expression,omitempty"` // Location: Optional. String indicating the location of the expression - // for error - // reporting, e.g. a file name and a position in the file. + // for error reporting, e.g. a file name and a position in the file. Location string `json:"location,omitempty"` // Title: Optional. Title for the expression, i.e. a short string - // describing - // its purpose. This can be used e.g. in UIs which allow to enter - // the - // expression. + // describing its purpose. This can be used e.g. in UIs which allow to + // enter the expression. Title string `json:"title,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -980,24 +832,19 @@ func (s *Expr) MarshalJSON() ([]byte, error) { // GatewayConfig: Gateway-related configuration and state. type GatewayConfig struct { // GatewayAuthMethod: Indicates how to authorize and/or authenticate - // devices to access the - // gateway. + // devices to access the gateway. // // Possible values: // "GATEWAY_AUTH_METHOD_UNSPECIFIED" - No authentication/authorization - // method specified. No devices are allowed to - // access the gateway. + // method specified. No devices are allowed to access the gateway. // "ASSOCIATION_ONLY" - The device is authenticated through the - // gateway association only. Device - // credentials are ignored even if provided. + // gateway association only. Device credentials are ignored even if + // provided. // "DEVICE_AUTH_TOKEN_ONLY" - The device is authenticated through its - // own credentials. Gateway - // association is not checked. + // own credentials. Gateway association is not checked. // "ASSOCIATION_AND_DEVICE_AUTH_TOKEN" - The device is authenticated - // through both device credentials and gateway - // association. The device must be bound to the gateway and must provide - // its - // own credentials. + // through both device credentials and gateway association. The device + // must be bound to the gateway and must provide its own credentials. GatewayAuthMethod string `json:"gatewayAuthMethod,omitempty"` // GatewayType: Indicates whether the device is a gateway. @@ -1014,8 +861,7 @@ type GatewayConfig struct { LastAccessedGatewayId string `json:"lastAccessedGatewayId,omitempty"` // LastAccessedGatewayTime: [Output only] The most recent time at which - // the device accessed the gateway - // specified in `last_accessed_gateway`. + // the device accessed the gateway specified in `last_accessed_gateway`. LastAccessedGatewayTime string `json:"lastAccessedGatewayTime,omitempty"` // ForceSendFields is a list of field names (e.g. "GatewayAuthMethod") @@ -1045,8 +891,7 @@ func (s *GatewayConfig) MarshalJSON() ([]byte, error) { // GetIamPolicyRequest: Request message for `GetIamPolicy` method. type GetIamPolicyRequest struct { // Options: OPTIONAL: A `GetPolicyOptions` object for specifying options - // to - // `GetIamPolicy`. + // to `GetIamPolicy`. Options *GetPolicyOptions `json:"options,omitempty"` // ForceSendFields is a list of field names (e.g. "Options") to @@ -1075,17 +920,14 @@ func (s *GetIamPolicyRequest) MarshalJSON() ([]byte, error) { // GetPolicyOptions: Encapsulates settings provided to GetIamPolicy. type GetPolicyOptions struct { // RequestedPolicyVersion: Optional. The policy format version to be - // returned. - // - // Valid values are 0, 1, and 3. Requests specifying an invalid value - // will be - // rejected. - // - // Requests for policies with any conditional bindings must specify - // version 3. - // Policies without any conditional bindings may specify any valid value - // or - // leave the field unset. + // returned. Valid values are 0, 1, and 3. Requests specifying an + // invalid value will be rejected. Requests for policies with any + // conditional bindings must specify version 3. Policies without any + // conditional bindings may specify any valid value or leave the field + // unset. To learn which resources support conditions in their IAM + // policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). RequestedPolicyVersion int64 `json:"requestedPolicyVersion,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1117,13 +959,12 @@ func (s *GetPolicyOptions) MarshalJSON() ([]byte, error) { // registry. type HttpConfig struct { // HttpEnabledState: If enabled, allows devices to use DeviceService via - // the HTTP protocol. - // Otherwise, any requests to DeviceService will fail for this registry. + // the HTTP protocol. Otherwise, any requests to DeviceService will fail + // for this registry. // // Possible values: // "HTTP_STATE_UNSPECIFIED" - No HTTP state specified. If not - // specified, DeviceService will be - // enabled by default. + // specified, DeviceService will be enabled by default. // "HTTP_ENABLED" - Enables DeviceService (HTTP) service for the // registry. // "HTTP_DISABLED" - Disables DeviceService (HTTP) service for the @@ -1158,8 +999,8 @@ func (s *HttpConfig) MarshalJSON() ([]byte, error) { // `ListDeviceConfigVersions`. type ListDeviceConfigVersionsResponse struct { // DeviceConfigs: The device configuration for the last few versions. - // Versions are listed - // in decreasing order, starting from the most recent one. + // Versions are listed in decreasing order, starting from the most + // recent one. DeviceConfigs []*DeviceConfig `json:"deviceConfigs,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1195,10 +1036,8 @@ type ListDeviceRegistriesResponse struct { DeviceRegistries []*DeviceRegistry `json:"deviceRegistries,omitempty"` // NextPageToken: If not empty, indicates that there may be more - // registries that match the - // request; this value should be passed in a - // new - // `ListDeviceRegistriesRequest`. + // registries that match the request; this value should be passed in a + // new `ListDeviceRegistriesRequest`. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1232,8 +1071,8 @@ func (s *ListDeviceRegistriesResponse) MarshalJSON() ([]byte, error) { // ListDeviceStatesResponse: Response for `ListDeviceStates`. type ListDeviceStatesResponse struct { // DeviceStates: The last few device states. States are listed in - // descending order of server - // update time, starting from the most recent one. + // descending order of server update time, starting from the most recent + // one. DeviceStates []*DeviceState `json:"deviceStates,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1269,8 +1108,8 @@ type ListDevicesResponse struct { Devices []*Device `json:"devices,omitempty"` // NextPageToken: If not empty, indicates that there may be more devices - // that match the - // request; this value should be passed in a new `ListDevicesRequest`. + // that match the request; this value should be passed in a new + // `ListDevicesRequest`. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1307,14 +1146,11 @@ type ModifyCloudToDeviceConfigRequest struct { BinaryData string `json:"binaryData,omitempty"` // VersionToUpdate: The version number to update. If this value is zero, - // it will not check the - // version number of the server and will always update the current - // version; - // otherwise, this update will fail if the version number found on the - // server - // does not match this version number. This is used to support - // multiple - // simultaneous updates without losing data. + // it will not check the version number of the server and will always + // update the current version; otherwise, this update will fail if the + // version number found on the server does not match this version + // number. This is used to support multiple simultaneous updates without + // losing data. VersionToUpdate int64 `json:"versionToUpdate,omitempty,string"` // ForceSendFields is a list of field names (e.g. "BinaryData") to @@ -1343,8 +1179,7 @@ func (s *ModifyCloudToDeviceConfigRequest) MarshalJSON() ([]byte, error) { // MqttConfig: The configuration of MQTT for a device registry. type MqttConfig struct { // MqttEnabledState: If enabled, allows connections using the MQTT - // protocol. Otherwise, MQTT - // connections to this registry will fail. + // protocol. Otherwise, MQTT connections to this registry will fail. // // Possible values: // "MQTT_STATE_UNSPECIFIED" - No MQTT state specified. If not @@ -1378,139 +1213,75 @@ func (s *MqttConfig) MarshalJSON() ([]byte, error) { } // Policy: An Identity and Access Management (IAM) policy, which -// specifies access -// controls for Google Cloud resources. -// -// -// A `Policy` is a collection of `bindings`. A `binding` binds one or -// more -// `members` to a single `role`. Members can be user accounts, service -// accounts, +// specifies access controls for Google Cloud resources. A `Policy` is a +// collection of `bindings`. A `binding` binds one or more `members` to +// a single `role`. Members can be user accounts, service accounts, // Google groups, and domains (such as G Suite). A `role` is a named -// list of -// permissions; each `role` can be an IAM predefined role or a -// user-created -// custom role. -// -// Optionally, a `binding` can specify a `condition`, which is a -// logical +// list of permissions; each `role` can be an IAM predefined role or a +// user-created custom role. For some types of Google Cloud resources, a +// `binding` can also specify a `condition`, which is a logical // expression that allows access to a resource only if the expression -// evaluates -// to `true`. A condition can add constraints based on attributes of -// the -// request, the resource, or both. -// -// **JSON example:** -// -// { -// "bindings": [ -// { -// "role": "roles/resourcemanager.organizationAdmin", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" -// ] -// }, -// { -// "role": "roles/resourcemanager.organizationViewer", -// "members": ["user:eve@example.com"], -// "condition": { -// "title": "expirable access", -// "description": "Does not grant access after Sep 2020", -// "expression": "request.time < -// timestamp('2020-10-01T00:00:00.000Z')", -// } -// } -// ], -// "etag": "BwWWja0YfJA=", -// "version": 3 -// } -// -// **YAML example:** -// -// bindings: -// - members: -// - user:mike@example.com -// - group:admins@example.com -// - domain:google.com -// - serviceAccount:my-project-id@appspot.gserviceaccount.com -// role: roles/resourcemanager.organizationAdmin -// - members: -// - user:eve@example.com -// role: roles/resourcemanager.organizationViewer -// condition: -// title: expirable access -// description: Does not grant access after Sep 2020 -// expression: request.time < -// timestamp('2020-10-01T00:00:00.000Z') -// - etag: BwWWja0YfJA= -// - version: 3 -// -// For a description of IAM and its features, see the -// [IAM documentation](https://cloud.google.com/iam/docs/). +// evaluates to `true`. A condition can add constraints based on +// attributes of the request, the resource, or both. To learn which +// resources support conditions in their IAM policies, see the [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-p +// olicies). **JSON example:** { "bindings": [ { "role": +// "roles/resourcemanager.organizationAdmin", "members": [ +// "user:mike@example.com", "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { +// "role": "roles/resourcemanager.organizationViewer", "members": [ +// "user:eve@example.com" ], "condition": { "title": "expirable access", +// "description": "Does not grant access after Sep 2020", "expression": +// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], +// "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - +// members: - user:mike@example.com - group:admins@example.com - +// domain:google.com - +// serviceAccount:my-project-id@appspot.gserviceaccount.com role: +// roles/resourcemanager.organizationAdmin - members: - +// user:eve@example.com role: roles/resourcemanager.organizationViewer +// condition: title: expirable access description: Does not grant access +// after Sep 2020 expression: request.time < +// timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: +// 3 For a description of IAM and its features, see the [IAM +// documentation](https://cloud.google.com/iam/docs/). type Policy struct { // Bindings: Associates a list of `members` to a `role`. Optionally, may - // specify a - // `condition` that determines how and when the `bindings` are applied. - // Each - // of the `bindings` must contain at least one member. + // specify a `condition` that determines how and when the `bindings` are + // applied. Each of the `bindings` must contain at least one member. Bindings []*Binding `json:"bindings,omitempty"` // Etag: `etag` is used for optimistic concurrency control as a way to - // help - // prevent simultaneous updates of a policy from overwriting each - // other. - // It is strongly suggested that systems make use of the `etag` in - // the - // read-modify-write cycle to perform policy updates in order to avoid - // race - // conditions: An `etag` is returned in the response to `getIamPolicy`, - // and - // systems are expected to put that etag in the request to - // `setIamPolicy` to - // ensure that their change will be applied to the same version of the - // policy. - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of + // help prevent simultaneous updates of a policy from overwriting each + // other. It is strongly suggested that systems make use of the `etag` + // in the read-modify-write cycle to perform policy updates in order to + // avoid race conditions: An `etag` is returned in the response to + // `getIamPolicy`, and systems are expected to put that etag in the + // request to `setIamPolicy` to ensure that their change will be applied + // to the same version of the policy. **Important:** If you use IAM + // Conditions, you must include the `etag` field whenever you call + // `setIamPolicy`. If you omit this field, then IAM allows you to + // overwrite a version `3` policy with a version `1` policy, and all of // the conditions in the version `3` policy are lost. Etag string `json:"etag,omitempty"` - // Version: Specifies the format of the policy. - // - // Valid values are `0`, `1`, and `3`. Requests that specify an invalid - // value - // are rejected. - // + // Version: Specifies the format of the policy. Valid values are `0`, + // `1`, and `3`. Requests that specify an invalid value are rejected. // Any operation that affects conditional role bindings must specify - // version - // `3`. This requirement applies to the following operations: - // - // * Getting a policy that includes a conditional role binding - // * Adding a conditional role binding to a policy - // * Changing a conditional role binding in a policy - // * Removing any role binding, with or without a condition, from a - // policy - // that includes conditions - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of - // the conditions in the version `3` policy are lost. - // - // If a policy does not include any conditions, operations on that - // policy may - // specify any valid version or leave the field unset. + // version `3`. This requirement applies to the following operations: * + // Getting a policy that includes a conditional role binding * Adding a + // conditional role binding to a policy * Changing a conditional role + // binding in a policy * Removing any role binding, with or without a + // condition, from a policy that includes conditions **Important:** If + // you use IAM Conditions, you must include the `etag` field whenever + // you call `setIamPolicy`. If you omit this field, then IAM allows you + // to overwrite a version `3` policy with a version `1` policy, and all + // of the conditions in the version `3` policy are lost. If a policy + // does not include any conditions, operations on that policy may + // specify any valid version or leave the field unset. To learn which + // resources support conditions in their IAM policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). Version int64 `json:"version,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1549,13 +1320,12 @@ type PublicKeyCertificate struct { // // Possible values: // "UNSPECIFIED_PUBLIC_KEY_CERTIFICATE_FORMAT" - The format has not - // been specified. This is an invalid default value and - // must not be used. + // been specified. This is an invalid default value and must not be + // used. // "X509_CERTIFICATE_PEM" - An X.509v3 certificate - // ([RFC5280](https://www.ietf.org/rfc/rfc5280.txt)), - // encoded in base64, and wrapped by `-----BEGIN CERTIFICATE-----` - // and - // `-----END CERTIFICATE-----`. + // ([RFC5280](https://www.ietf.org/rfc/rfc5280.txt)), encoded in base64, + // and wrapped by `-----BEGIN CERTIFICATE-----` and `-----END + // CERTIFICATE-----`. Format string `json:"format,omitempty"` // X509Details: [Output only] The certificate details. Used only for @@ -1591,35 +1361,26 @@ type PublicKeyCredential struct { // // Possible values: // "UNSPECIFIED_PUBLIC_KEY_FORMAT" - The format has not been - // specified. This is an invalid default value and - // must not be used. - // "RSA_PEM" - An RSA public key encoded in base64, and wrapped - // by + // specified. This is an invalid default value and must not be used. + // "RSA_PEM" - An RSA public key encoded in base64, and wrapped by // `-----BEGIN PUBLIC KEY-----` and `-----END PUBLIC KEY-----`. This can - // be - // used to verify `RS256` signatures in JWT tokens - // ([RFC7518]( + // be used to verify `RS256` signatures in JWT tokens ([RFC7518]( // https://www.ietf.org/rfc/rfc7518.txt)). // "RSA_X509_PEM" - As RSA_PEM, but wrapped in an X.509v3 certificate - // ([RFC5280]( - // https://www.ietf.org/rfc/rfc5280.txt)), encoded in base64, and - // wrapped by - // `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`. + // ([RFC5280]( https://www.ietf.org/rfc/rfc5280.txt)), encoded in + // base64, and wrapped by `-----BEGIN CERTIFICATE-----` and `-----END + // CERTIFICATE-----`. // "ES256_PEM" - Public key for the ECDSA algorithm using P-256 and - // SHA-256, encoded in - // base64, and wrapped by `-----BEGIN PUBLIC KEY-----` and - // `-----END - // PUBLIC KEY-----`. This can be used to verify JWT tokens with the - // `ES256` - // algorithm ([RFC7518](https://www.ietf.org/rfc/rfc7518.txt)). This - // curve is + // SHA-256, encoded in base64, and wrapped by `-----BEGIN PUBLIC + // KEY-----` and `-----END PUBLIC KEY-----`. This can be used to verify + // JWT tokens with the `ES256` algorithm + // ([RFC7518](https://www.ietf.org/rfc/rfc7518.txt)). This curve is // defined in [OpenSSL](https://www.openssl.org/) as the `prime256v1` // curve. // "ES256_X509_PEM" - As ES256_PEM, but wrapped in an X.509v3 - // certificate ([RFC5280]( - // https://www.ietf.org/rfc/rfc5280.txt)), encoded in base64, and - // wrapped by - // `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`. + // certificate ([RFC5280]( https://www.ietf.org/rfc/rfc5280.txt)), + // encoded in base64, and wrapped by `-----BEGIN CERTIFICATE-----` and + // `-----END CERTIFICATE-----`. Format string `json:"format,omitempty"` // Key: The key data. @@ -1686,15 +1447,12 @@ type SendCommandToDeviceRequest struct { BinaryData string `json:"binaryData,omitempty"` // Subfolder: Optional subfolder for the command. If empty, the command - // will be delivered - // to the /devices/{device-id}/commands topic, otherwise it will be - // delivered - // to the /devices/{device-id}/commands/{subfolder} topic. - // Multi-level + // will be delivered to the /devices/{device-id}/commands topic, + // otherwise it will be delivered to the + // /devices/{device-id}/commands/{subfolder} topic. Multi-level // subfolders are allowed. This field must not have more than 256 - // characters, - // and must not contain any MQTT wildcards ("+" or "#") or null - // characters. + // characters, and must not contain any MQTT wildcards ("+" or "#") or + // null characters. Subfolder string `json:"subfolder,omitempty"` // ForceSendFields is a list of field names (e.g. "BinaryData") to @@ -1730,11 +1488,9 @@ type SendCommandToDeviceResponse struct { // SetIamPolicyRequest: Request message for `SetIamPolicy` method. type SetIamPolicyRequest struct { // Policy: REQUIRED: The complete policy to be applied to the - // `resource`. The size of - // the policy is limited to a few 10s of KB. An empty policy is a - // valid policy but certain Cloud Platform services (such as - // Projects) - // might reject them. + // `resource`. The size of the policy is limited to a few 10s of KB. An + // empty policy is a valid policy but certain Cloud Platform services + // (such as Projects) might reject them. Policy *Policy `json:"policy,omitempty"` // ForceSendFields is a list of field names (e.g. "Policy") to @@ -1763,8 +1519,7 @@ func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { // StateNotificationConfig: The configuration for notification of new // states received from the device. type StateNotificationConfig struct { - // PubsubTopicName: A Cloud Pub/Sub topic name. For - // example, + // PubsubTopicName: A Cloud Pub/Sub topic name. For example, // `projects/myProject/topics/deviceEvents`. PubsubTopicName string `json:"pubsubTopicName,omitempty"` @@ -1793,32 +1548,24 @@ func (s *StateNotificationConfig) MarshalJSON() ([]byte, error) { } // Status: The `Status` type defines a logical error model that is -// suitable for -// different programming environments, including REST APIs and RPC APIs. -// It is -// used by [gRPC](https://github.com/grpc). Each `Status` message -// contains -// three pieces of data: error code, error message, and error -// details. -// -// You can find out more about this error model and how to work with it -// in the -// [API Design Guide](https://cloud.google.com/apis/design/errors). +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each +// `Status` message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the [API Design +// Guide](https://cloud.google.com/apis/design/errors). type Status struct { // Code: The status code, which should be an enum value of // google.rpc.Code. Code int64 `json:"code,omitempty"` - // Details: A list of messages that carry the error details. There is a - // common set of - // message types for APIs to use. + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. Details []googleapi.RawMessage `json:"details,omitempty"` // Message: A developer-facing error message, which should be in - // English. Any - // user-facing error message should be localized and sent in - // the - // google.rpc.Status.details field, or localized by the client. + // English. Any user-facing error message should be localized and sent + // in the google.rpc.Status.details field, or localized by the client. Message string `json:"message,omitempty"` // ForceSendFields is a list of field names (e.g. "Code") to @@ -1848,11 +1595,8 @@ func (s *Status) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsRequest struct { // Permissions: The set of permissions to check for the `resource`. - // Permissions with - // wildcards (such as '*' or 'storage.*') are not allowed. For - // more - // information see - // [IAM + // Permissions with wildcards (such as '*' or 'storage.*') are not + // allowed. For more information see [IAM // Overview](https://cloud.google.com/iam/docs/overview#permissions). Permissions []string `json:"permissions,omitempty"` @@ -1883,8 +1627,7 @@ func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsResponse struct { // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is - // allowed. + // the caller is allowed. Permissions []string `json:"permissions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1918,15 +1661,12 @@ func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { // `UnbindDeviceFromGateway`. type UnbindDeviceFromGatewayRequest struct { // DeviceId: Required. The device to disassociate from the specified - // gateway. The value of - // `device_id` can be either the device numeric ID or the user-defined - // device - // identifier. + // gateway. The value of `device_id` can be either the device numeric ID + // or the user-defined device identifier. DeviceId string `json:"deviceId,omitempty"` // GatewayId: Required. The value of `gateway_id` can be either the - // device numeric ID or the - // user-defined device identifier. + // device numeric ID or the user-defined device identifier. GatewayId string `json:"gatewayId,omitempty"` // ForceSendFields is a list of field names (e.g. "DeviceId") to @@ -2050,7 +1790,7 @@ func (c *ProjectsLocationsRegistriesBindDeviceToGatewayCall) Header() http.Heade func (c *ProjectsLocationsRegistriesBindDeviceToGatewayCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2123,7 +1863,7 @@ func (c *ProjectsLocationsRegistriesBindDeviceToGatewayCall) Do(opts ...googleap // ], // "parameters": { // "parent": { - // "description": "Required. The name of the registry. For example,\n`projects/example-project/locations/us-central1/registries/my-registry`.", + // "description": "Required. The name of the registry. For example, `projects/example-project/locations/us-central1/registries/my-registry`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+$", // "required": true, @@ -2191,7 +1931,7 @@ func (c *ProjectsLocationsRegistriesCreateCall) Header() http.Header { func (c *ProjectsLocationsRegistriesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2264,7 +2004,7 @@ func (c *ProjectsLocationsRegistriesCreateCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "parent": { - // "description": "Required. The project and cloud region where this device registry must be created.\nFor example, `projects/example-project/locations/us-central1`.", + // "description": "Required. The project and cloud region where this device registry must be created. For example, `projects/example-project/locations/us-central1`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -2330,7 +2070,7 @@ func (c *ProjectsLocationsRegistriesDeleteCall) Header() http.Header { func (c *ProjectsLocationsRegistriesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2398,7 +2138,7 @@ func (c *ProjectsLocationsRegistriesDeleteCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "name": { - // "description": "Required. The name of the device registry. For example,\n`projects/example-project/locations/us-central1/registries/my-registry`.", + // "description": "Required. The name of the device registry. For example, `projects/example-project/locations/us-central1/registries/my-registry`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+$", // "required": true, @@ -2472,7 +2212,7 @@ func (c *ProjectsLocationsRegistriesGetCall) Header() http.Header { func (c *ProjectsLocationsRegistriesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2543,7 +2283,7 @@ func (c *ProjectsLocationsRegistriesGetCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "name": { - // "description": "Required. The name of the device registry. For example,\n`projects/example-project/locations/us-central1/registries/my-registry`.", + // "description": "Required. The name of the device registry. For example, `projects/example-project/locations/us-central1/registries/my-registry`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+$", // "required": true, @@ -2573,9 +2313,8 @@ type ProjectsLocationsRegistriesGetIamPolicyCall struct { header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. -// Returns an empty policy if the resource exists and does not have a -// policy +// GetIamPolicy: Gets the access control policy for a resource. Returns +// an empty policy if the resource exists and does not have a policy // set. func (r *ProjectsLocationsRegistriesService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ProjectsLocationsRegistriesGetIamPolicyCall { c := &ProjectsLocationsRegistriesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -2611,7 +2350,7 @@ func (c *ProjectsLocationsRegistriesGetIamPolicyCall) Header() http.Header { func (c *ProjectsLocationsRegistriesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2675,7 +2414,7 @@ func (c *ProjectsLocationsRegistriesGetIamPolicyCall) Do(opts ...googleapi.CallO } return ret, nil // { - // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + // "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}:getIamPolicy", // "httpMethod": "POST", // "id": "cloudiot.projects.locations.registries.getIamPolicy", @@ -2684,7 +2423,7 @@ func (c *ProjectsLocationsRegistriesGetIamPolicyCall) Do(opts ...googleapi.CallO // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+$", // "required": true, @@ -2725,11 +2464,9 @@ func (r *ProjectsLocationsRegistriesService) List(parent string) *ProjectsLocati } // PageSize sets the optional parameter "pageSize": The maximum number -// of registries to return in the response. If this value -// is zero, the service will select a default size. A call may return -// fewer -// objects than requested. A non-empty `next_page_token` in the -// response +// of registries to return in the response. If this value is zero, the +// service will select a default size. A call may return fewer objects +// than requested. A non-empty `next_page_token` in the response // indicates that more data is available. func (c *ProjectsLocationsRegistriesListCall) PageSize(pageSize int64) *ProjectsLocationsRegistriesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) @@ -2737,10 +2474,9 @@ func (c *ProjectsLocationsRegistriesListCall) PageSize(pageSize int64) *Projects } // PageToken sets the optional parameter "pageToken": The value returned -// by the last `ListDeviceRegistriesResponse`; indicates -// that this is a continuation of a prior `ListDeviceRegistries` call -// and -// the system should return the next page of data. +// by the last `ListDeviceRegistriesResponse`; indicates that this is a +// continuation of a prior `ListDeviceRegistries` call and the system +// should return the next page of data. func (c *ProjectsLocationsRegistriesListCall) PageToken(pageToken string) *ProjectsLocationsRegistriesListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -2783,7 +2519,7 @@ func (c *ProjectsLocationsRegistriesListCall) Header() http.Header { func (c *ProjectsLocationsRegistriesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2854,18 +2590,18 @@ func (c *ProjectsLocationsRegistriesListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "pageSize": { - // "description": "The maximum number of registries to return in the response. If this value\nis zero, the service will select a default size. A call may return fewer\nobjects than requested. A non-empty `next_page_token` in the response\nindicates that more data is available.", + // "description": "The maximum number of registries to return in the response. If this value is zero, the service will select a default size. A call may return fewer objects than requested. A non-empty `next_page_token` in the response indicates that more data is available.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "The value returned by the last `ListDeviceRegistriesResponse`; indicates\nthat this is a continuation of a prior `ListDeviceRegistries` call and\nthe system should return the next page of data.", + // "description": "The value returned by the last `ListDeviceRegistriesResponse`; indicates that this is a continuation of a prior `ListDeviceRegistries` call and the system should return the next page of data.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The project and cloud region path. For example,\n`projects/example-project/locations/us-central1`.", + // "description": "Required. The project and cloud region path. For example, `projects/example-project/locations/us-central1`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -2925,13 +2661,11 @@ func (r *ProjectsLocationsRegistriesService) Patch(name string, deviceregistry * } // UpdateMask sets the optional parameter "updateMask": Required. Only -// updates the `device_registry` fields indicated by this mask. -// The field mask must not be empty, and it must not contain fields -// that -// are immutable or only set by the server. -// Mutable top-level fields: `event_notification_config`, -// `http_config`, -// `mqtt_config`, and `state_notification_config`. +// updates the `device_registry` fields indicated by this mask. The +// field mask must not be empty, and it must not contain fields that are +// immutable or only set by the server. Mutable top-level fields: +// `event_notification_config`, `http_config`, `mqtt_config`, and +// `state_notification_config`. func (c *ProjectsLocationsRegistriesPatchCall) UpdateMask(updateMask string) *ProjectsLocationsRegistriesPatchCall { c.urlParams_.Set("updateMask", updateMask) return c @@ -2964,7 +2698,7 @@ func (c *ProjectsLocationsRegistriesPatchCall) Header() http.Header { func (c *ProjectsLocationsRegistriesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3037,14 +2771,14 @@ func (c *ProjectsLocationsRegistriesPatchCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "name": { - // "description": "The resource path name. For example,\n`projects/example-project/locations/us-central1/registries/my-registry`.", + // "description": "The resource path name. For example, `projects/example-project/locations/us-central1/registries/my-registry`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { - // "description": "Required. Only updates the `device_registry` fields indicated by this mask.\nThe field mask must not be empty, and it must not contain fields that\nare immutable or only set by the server.\nMutable top-level fields: `event_notification_config`, `http_config`,\n`mqtt_config`, and `state_notification_config`.", + // "description": "Required. Only updates the `device_registry` fields indicated by this mask. The field mask must not be empty, and it must not contain fields that are immutable or only set by the server. Mutable top-level fields: `event_notification_config`, `http_config`, `mqtt_config`, and `state_notification_config`.", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -3077,8 +2811,7 @@ type ProjectsLocationsRegistriesSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any -// existing policy. +// resource. Replaces any existing policy. func (r *ProjectsLocationsRegistriesService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsRegistriesSetIamPolicyCall { c := &ProjectsLocationsRegistriesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -3113,7 +2846,7 @@ func (c *ProjectsLocationsRegistriesSetIamPolicyCall) Header() http.Header { func (c *ProjectsLocationsRegistriesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3177,7 +2910,7 @@ func (c *ProjectsLocationsRegistriesSetIamPolicyCall) Do(opts ...googleapi.CallO } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}:setIamPolicy", // "httpMethod": "POST", // "id": "cloudiot.projects.locations.registries.setIamPolicy", @@ -3186,7 +2919,7 @@ func (c *ProjectsLocationsRegistriesSetIamPolicyCall) Do(opts ...googleapi.CallO // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+$", // "required": true, @@ -3220,10 +2953,8 @@ type ProjectsLocationsRegistriesTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a NOT_FOUND error. +// specified resource. If the resource does not exist, this will return +// an empty set of permissions, not a NOT_FOUND error. func (r *ProjectsLocationsRegistriesService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsRegistriesTestIamPermissionsCall { c := &ProjectsLocationsRegistriesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -3258,7 +2989,7 @@ func (c *ProjectsLocationsRegistriesTestIamPermissionsCall) Header() http.Header func (c *ProjectsLocationsRegistriesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3322,7 +3053,7 @@ func (c *ProjectsLocationsRegistriesTestIamPermissionsCall) Do(opts ...googleapi } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", + // "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}:testIamPermissions", // "httpMethod": "POST", // "id": "cloudiot.projects.locations.registries.testIamPermissions", @@ -3331,7 +3062,7 @@ func (c *ProjectsLocationsRegistriesTestIamPermissionsCall) Do(opts ...googleapi // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+$", // "required": true, @@ -3400,7 +3131,7 @@ func (c *ProjectsLocationsRegistriesUnbindDeviceFromGatewayCall) Header() http.H func (c *ProjectsLocationsRegistriesUnbindDeviceFromGatewayCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3473,7 +3204,7 @@ func (c *ProjectsLocationsRegistriesUnbindDeviceFromGatewayCall) Do(opts ...goog // ], // "parameters": { // "parent": { - // "description": "Required. The name of the registry. For example,\n`projects/example-project/locations/us-central1/registries/my-registry`.", + // "description": "Required. The name of the registry. For example, `projects/example-project/locations/us-central1/registries/my-registry`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+$", // "required": true, @@ -3541,7 +3272,7 @@ func (c *ProjectsLocationsRegistriesDevicesCreateCall) Header() http.Header { func (c *ProjectsLocationsRegistriesDevicesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3614,7 +3345,7 @@ func (c *ProjectsLocationsRegistriesDevicesCreateCall) Do(opts ...googleapi.Call // ], // "parameters": { // "parent": { - // "description": "Required. The name of the device registry where this device should be created.\nFor example,\n`projects/example-project/locations/us-central1/registries/my-registry`.", + // "description": "Required. The name of the device registry where this device should be created. For example, `projects/example-project/locations/us-central1/registries/my-registry`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+$", // "required": true, @@ -3680,7 +3411,7 @@ func (c *ProjectsLocationsRegistriesDevicesDeleteCall) Header() http.Header { func (c *ProjectsLocationsRegistriesDevicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3748,7 +3479,7 @@ func (c *ProjectsLocationsRegistriesDevicesDeleteCall) Do(opts ...googleapi.Call // ], // "parameters": { // "name": { - // "description": "Required. The name of the device. For example,\n`projects/p0/locations/us-central1/registries/registry0/devices/device0` or\n`projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`.", + // "description": "Required. The name of the device. For example, `projects/p0/locations/us-central1/registries/registry0/devices/device0` or `projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+/devices/[^/]+$", // "required": true, @@ -3786,8 +3517,8 @@ func (r *ProjectsLocationsRegistriesDevicesService) Get(name string) *ProjectsLo } // FieldMask sets the optional parameter "fieldMask": The fields of the -// `Device` resource to be returned in the response. If the -// field mask is unset or empty, all fields are returned. +// `Device` resource to be returned in the response. If the field mask +// is unset or empty, all fields are returned. func (c *ProjectsLocationsRegistriesDevicesGetCall) FieldMask(fieldMask string) *ProjectsLocationsRegistriesDevicesGetCall { c.urlParams_.Set("fieldMask", fieldMask) return c @@ -3830,7 +3561,7 @@ func (c *ProjectsLocationsRegistriesDevicesGetCall) Header() http.Header { func (c *ProjectsLocationsRegistriesDevicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3901,13 +3632,13 @@ func (c *ProjectsLocationsRegistriesDevicesGetCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "fieldMask": { - // "description": "The fields of the `Device` resource to be returned in the response. If the\nfield mask is unset or empty, all fields are returned.", + // "description": "The fields of the `Device` resource to be returned in the response. If the field mask is unset or empty, all fields are returned.", // "format": "google-fieldmask", // "location": "query", // "type": "string" // }, // "name": { - // "description": "Required. The name of the device. For example,\n`projects/p0/locations/us-central1/registries/registry0/devices/device0` or\n`projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`.", + // "description": "Required. The name of the device. For example, `projects/p0/locations/us-central1/registries/registry0/devices/device0` or `projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+/devices/[^/]+$", // "required": true, @@ -3945,16 +3676,16 @@ func (r *ProjectsLocationsRegistriesDevicesService) List(parent string) *Project } // DeviceIds sets the optional parameter "deviceIds": A list of device -// string IDs. For example, `['device0', 'device12']`. -// If empty, this field is ignored. Maximum IDs: 10,000 +// string IDs. For example, `['device0', 'device12']`. If empty, this +// field is ignored. Maximum IDs: 10,000 func (c *ProjectsLocationsRegistriesDevicesListCall) DeviceIds(deviceIds ...string) *ProjectsLocationsRegistriesDevicesListCall { c.urlParams_.SetMulti("deviceIds", append([]string{}, deviceIds...)) return c } // DeviceNumIds sets the optional parameter "deviceNumIds": A list of -// device numeric IDs. If empty, this field is ignored. Maximum -// IDs: 10,000. +// device numeric IDs. If empty, this field is ignored. Maximum IDs: +// 10,000. func (c *ProjectsLocationsRegistriesDevicesListCall) DeviceNumIds(deviceNumIds ...uint64) *ProjectsLocationsRegistriesDevicesListCall { var deviceNumIds_ []string for _, v := range deviceNumIds { @@ -3965,9 +3696,8 @@ func (c *ProjectsLocationsRegistriesDevicesListCall) DeviceNumIds(deviceNumIds . } // FieldMask sets the optional parameter "fieldMask": The fields of the -// `Device` resource to be returned in the response. The -// fields `id` and `num_id` are always returned, along with any -// other fields specified. +// `Device` resource to be returned in the response. The fields `id` and +// `num_id` are always returned, along with any other fields specified. func (c *ProjectsLocationsRegistriesDevicesListCall) FieldMask(fieldMask string) *ProjectsLocationsRegistriesDevicesListCall { c.urlParams_.Set("fieldMask", fieldMask) return c @@ -3975,12 +3705,10 @@ func (c *ProjectsLocationsRegistriesDevicesListCall) FieldMask(fieldMask string) // GatewayListOptionsAssociationsDeviceId sets the optional parameter // "gatewayListOptions.associationsDeviceId": If set, returns only the -// gateways with which the specified device is -// associated. The device ID can be numeric (`num_id`) or the -// user-defined -// string (`id`). For example, if `456` is specified, returns only -// the -// gateways to which the device with `num_id` 456 is bound. +// gateways with which the specified device is associated. The device ID +// can be numeric (`num_id`) or the user-defined string (`id`). For +// example, if `456` is specified, returns only the gateways to which +// the device with `num_id` 456 is bound. func (c *ProjectsLocationsRegistriesDevicesListCall) GatewayListOptionsAssociationsDeviceId(gatewayListOptionsAssociationsDeviceId string) *ProjectsLocationsRegistriesDevicesListCall { c.urlParams_.Set("gatewayListOptions.associationsDeviceId", gatewayListOptionsAssociationsDeviceId) return c @@ -3988,12 +3716,10 @@ func (c *ProjectsLocationsRegistriesDevicesListCall) GatewayListOptionsAssociati // GatewayListOptionsAssociationsGatewayId sets the optional parameter // "gatewayListOptions.associationsGatewayId": If set, only devices -// associated with the specified gateway are returned. -// The gateway ID can be numeric (`num_id`) or the user-defined -// string -// (`id`). For example, if `123` is specified, only devices bound to -// the -// gateway with `num_id` 123 are returned. +// associated with the specified gateway are returned. The gateway ID +// can be numeric (`num_id`) or the user-defined string (`id`). For +// example, if `123` is specified, only devices bound to the gateway +// with `num_id` 123 are returned. func (c *ProjectsLocationsRegistriesDevicesListCall) GatewayListOptionsAssociationsGatewayId(gatewayListOptionsAssociationsGatewayId string) *ProjectsLocationsRegistriesDevicesListCall { c.urlParams_.Set("gatewayListOptions.associationsGatewayId", gatewayListOptionsAssociationsGatewayId) return c @@ -4001,26 +3727,24 @@ func (c *ProjectsLocationsRegistriesDevicesListCall) GatewayListOptionsAssociati // GatewayListOptionsGatewayType sets the optional parameter // "gatewayListOptions.gatewayType": If `GATEWAY` is specified, only -// gateways are returned. If `NON_GATEWAY` -// is specified, only non-gateway devices are returned. -// If -// `GATEWAY_TYPE_UNSPECIFIED` is specified, all devices are returned. +// gateways are returned. If `NON_GATEWAY` is specified, only +// non-gateway devices are returned. If `GATEWAY_TYPE_UNSPECIFIED` is +// specified, all devices are returned. // // Possible values: -// "GATEWAY_TYPE_UNSPECIFIED" -// "GATEWAY" -// "NON_GATEWAY" +// "GATEWAY_TYPE_UNSPECIFIED" - If unspecified, the device is +// considered a non-gateway device. +// "GATEWAY" - The device is a gateway. +// "NON_GATEWAY" - The device is not a gateway. func (c *ProjectsLocationsRegistriesDevicesListCall) GatewayListOptionsGatewayType(gatewayListOptionsGatewayType string) *ProjectsLocationsRegistriesDevicesListCall { c.urlParams_.Set("gatewayListOptions.gatewayType", gatewayListOptionsGatewayType) return c } // PageSize sets the optional parameter "pageSize": The maximum number -// of devices to return in the response. If this value -// is zero, the service will select a default size. A call may return -// fewer -// objects than requested. A non-empty `next_page_token` in the -// response +// of devices to return in the response. If this value is zero, the +// service will select a default size. A call may return fewer objects +// than requested. A non-empty `next_page_token` in the response // indicates that more data is available. func (c *ProjectsLocationsRegistriesDevicesListCall) PageSize(pageSize int64) *ProjectsLocationsRegistriesDevicesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) @@ -4028,9 +3752,9 @@ func (c *ProjectsLocationsRegistriesDevicesListCall) PageSize(pageSize int64) *P } // PageToken sets the optional parameter "pageToken": The value returned -// by the last `ListDevicesResponse`; indicates -// that this is a continuation of a prior `ListDevices` call and -// the system should return the next page of data. +// by the last `ListDevicesResponse`; indicates that this is a +// continuation of a prior `ListDevices` call and the system should +// return the next page of data. func (c *ProjectsLocationsRegistriesDevicesListCall) PageToken(pageToken string) *ProjectsLocationsRegistriesDevicesListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -4073,7 +3797,7 @@ func (c *ProjectsLocationsRegistriesDevicesListCall) Header() http.Header { func (c *ProjectsLocationsRegistriesDevicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4144,57 +3868,62 @@ func (c *ProjectsLocationsRegistriesDevicesListCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "deviceIds": { - // "description": "A list of device string IDs. For example, `['device0', 'device12']`.\nIf empty, this field is ignored. Maximum IDs: 10,000", + // "description": "A list of device string IDs. For example, `['device0', 'device12']`. If empty, this field is ignored. Maximum IDs: 10,000", // "location": "query", // "repeated": true, // "type": "string" // }, // "deviceNumIds": { - // "description": "A list of device numeric IDs. If empty, this field is ignored. Maximum\nIDs: 10,000.", + // "description": "A list of device numeric IDs. If empty, this field is ignored. Maximum IDs: 10,000.", // "format": "uint64", // "location": "query", // "repeated": true, // "type": "string" // }, // "fieldMask": { - // "description": "The fields of the `Device` resource to be returned in the response. The\nfields `id` and `num_id` are always returned, along with any\nother fields specified.", + // "description": "The fields of the `Device` resource to be returned in the response. The fields `id` and `num_id` are always returned, along with any other fields specified.", // "format": "google-fieldmask", // "location": "query", // "type": "string" // }, // "gatewayListOptions.associationsDeviceId": { - // "description": "If set, returns only the gateways with which the specified device is\nassociated. The device ID can be numeric (`num_id`) or the user-defined\nstring (`id`). For example, if `456` is specified, returns only the\ngateways to which the device with `num_id` 456 is bound.", + // "description": "If set, returns only the gateways with which the specified device is associated. The device ID can be numeric (`num_id`) or the user-defined string (`id`). For example, if `456` is specified, returns only the gateways to which the device with `num_id` 456 is bound.", // "location": "query", // "type": "string" // }, // "gatewayListOptions.associationsGatewayId": { - // "description": "If set, only devices associated with the specified gateway are returned.\nThe gateway ID can be numeric (`num_id`) or the user-defined string\n(`id`). For example, if `123` is specified, only devices bound to the\ngateway with `num_id` 123 are returned.", + // "description": "If set, only devices associated with the specified gateway are returned. The gateway ID can be numeric (`num_id`) or the user-defined string (`id`). For example, if `123` is specified, only devices bound to the gateway with `num_id` 123 are returned.", // "location": "query", // "type": "string" // }, // "gatewayListOptions.gatewayType": { - // "description": "If `GATEWAY` is specified, only gateways are returned. If `NON_GATEWAY`\nis specified, only non-gateway devices are returned. If\n`GATEWAY_TYPE_UNSPECIFIED` is specified, all devices are returned.", + // "description": "If `GATEWAY` is specified, only gateways are returned. If `NON_GATEWAY` is specified, only non-gateway devices are returned. If `GATEWAY_TYPE_UNSPECIFIED` is specified, all devices are returned.", // "enum": [ // "GATEWAY_TYPE_UNSPECIFIED", // "GATEWAY", // "NON_GATEWAY" // ], + // "enumDescriptions": [ + // "If unspecified, the device is considered a non-gateway device.", + // "The device is a gateway.", + // "The device is not a gateway." + // ], // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "The maximum number of devices to return in the response. If this value\nis zero, the service will select a default size. A call may return fewer\nobjects than requested. A non-empty `next_page_token` in the response\nindicates that more data is available.", + // "description": "The maximum number of devices to return in the response. If this value is zero, the service will select a default size. A call may return fewer objects than requested. A non-empty `next_page_token` in the response indicates that more data is available.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "The value returned by the last `ListDevicesResponse`; indicates\nthat this is a continuation of a prior `ListDevices` call and\nthe system should return the next page of data.", + // "description": "The value returned by the last `ListDevicesResponse`; indicates that this is a continuation of a prior `ListDevices` call and the system should return the next page of data.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The device registry path. Required. For example,\n`projects/my-project/locations/us-central1/registries/my-registry`.", + // "description": "Required. The device registry path. Required. For example, `projects/my-project/locations/us-central1/registries/my-registry`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+$", // "required": true, @@ -4246,10 +3975,8 @@ type ProjectsLocationsRegistriesDevicesModifyCloudToDeviceConfigCall struct { } // ModifyCloudToDeviceConfig: Modifies the configuration for the device, -// which is eventually sent from -// the Cloud IoT Core servers. Returns the modified configuration -// version and -// its metadata. +// which is eventually sent from the Cloud IoT Core servers. Returns the +// modified configuration version and its metadata. func (r *ProjectsLocationsRegistriesDevicesService) ModifyCloudToDeviceConfig(name string, modifycloudtodeviceconfigrequest *ModifyCloudToDeviceConfigRequest) *ProjectsLocationsRegistriesDevicesModifyCloudToDeviceConfigCall { c := &ProjectsLocationsRegistriesDevicesModifyCloudToDeviceConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4284,7 +4011,7 @@ func (c *ProjectsLocationsRegistriesDevicesModifyCloudToDeviceConfigCall) Header func (c *ProjectsLocationsRegistriesDevicesModifyCloudToDeviceConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4348,7 +4075,7 @@ func (c *ProjectsLocationsRegistriesDevicesModifyCloudToDeviceConfigCall) Do(opt } return ret, nil // { - // "description": "Modifies the configuration for the device, which is eventually sent from\nthe Cloud IoT Core servers. Returns the modified configuration version and\nits metadata.", + // "description": "Modifies the configuration for the device, which is eventually sent from the Cloud IoT Core servers. Returns the modified configuration version and its metadata.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices/{devicesId}:modifyCloudToDeviceConfig", // "httpMethod": "POST", // "id": "cloudiot.projects.locations.registries.devices.modifyCloudToDeviceConfig", @@ -4357,7 +4084,7 @@ func (c *ProjectsLocationsRegistriesDevicesModifyCloudToDeviceConfigCall) Do(opt // ], // "parameters": { // "name": { - // "description": "Required. The name of the device. For example,\n`projects/p0/locations/us-central1/registries/registry0/devices/device0` or\n`projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`.", + // "description": "Required. The name of the device. For example, `projects/p0/locations/us-central1/registries/registry0/devices/device0` or `projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+/devices/[^/]+$", // "required": true, @@ -4399,11 +4126,10 @@ func (r *ProjectsLocationsRegistriesDevicesService) Patch(name string, device *D } // UpdateMask sets the optional parameter "updateMask": Required. Only -// updates the `device` fields indicated by this mask. -// The field mask must not be empty, and it must not contain fields -// that -// are immutable or only set by the server. -// Mutable top-level fields: `credentials`, `blocked`, and `metadata` +// updates the `device` fields indicated by this mask. The field mask +// must not be empty, and it must not contain fields that are immutable +// or only set by the server. Mutable top-level fields: `credentials`, +// `blocked`, and `metadata` func (c *ProjectsLocationsRegistriesDevicesPatchCall) UpdateMask(updateMask string) *ProjectsLocationsRegistriesDevicesPatchCall { c.urlParams_.Set("updateMask", updateMask) return c @@ -4436,7 +4162,7 @@ func (c *ProjectsLocationsRegistriesDevicesPatchCall) Header() http.Header { func (c *ProjectsLocationsRegistriesDevicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4509,14 +4235,14 @@ func (c *ProjectsLocationsRegistriesDevicesPatchCall) Do(opts ...googleapi.CallO // ], // "parameters": { // "name": { - // "description": "The resource path name. For example,\n`projects/p1/locations/us-central1/registries/registry0/devices/dev0` or\n`projects/p1/locations/us-central1/registries/registry0/devices/{num_id}`.\nWhen `name` is populated as a response from the service, it always ends\nin the device numeric ID.", + // "description": "The resource path name. For example, `projects/p1/locations/us-central1/registries/registry0/devices/dev0` or `projects/p1/locations/us-central1/registries/registry0/devices/{num_id}`. When `name` is populated as a response from the service, it always ends in the device numeric ID.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+/devices/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { - // "description": "Required. Only updates the `device` fields indicated by this mask.\nThe field mask must not be empty, and it must not contain fields that\nare immutable or only set by the server.\nMutable top-level fields: `credentials`, `blocked`, and `metadata`", + // "description": "Required. Only updates the `device` fields indicated by this mask. The field mask must not be empty, and it must not contain fields that are immutable or only set by the server. Mutable top-level fields: `credentials`, `blocked`, and `metadata`", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -4549,26 +4275,19 @@ type ProjectsLocationsRegistriesDevicesSendCommandToDeviceCall struct { } // SendCommandToDevice: Sends a command to the specified device. In -// order for a device to be able -// to receive commands, it must: -// 1) be connected to Cloud IoT Core using the MQTT protocol, and -// 2) be subscribed to the group of MQTT topics specified by -// /devices/{device-id}/commands/#. This subscription will receive -// commands -// at the top-level topic /devices/{device-id}/commands as well as -// commands -// for subfolders, like /devices/{device-id}/commands/subfolder. -// Note that subscribing to specific subfolders is not supported. -// If the command could not be delivered to the device, this method -// will -// return an error; in particular, if the device is not subscribed, -// this -// method will return FAILED_PRECONDITION. Otherwise, this method -// will -// return OK. If the subscription is QoS 1, at least once delivery will -// be -// guaranteed; for QoS 0, no acknowledgment will be expected from the -// device. +// order for a device to be able to receive commands, it must: 1) be +// connected to Cloud IoT Core using the MQTT protocol, and 2) be +// subscribed to the group of MQTT topics specified by +// /devices/{device-id}/commands/#. This subscription will receive +// commands at the top-level topic /devices/{device-id}/commands as well +// as commands for subfolders, like +// /devices/{device-id}/commands/subfolder. Note that subscribing to +// specific subfolders is not supported. If the command could not be +// delivered to the device, this method will return an error; in +// particular, if the device is not subscribed, this method will return +// FAILED_PRECONDITION. Otherwise, this method will return OK. If the +// subscription is QoS 1, at least once delivery will be guaranteed; for +// QoS 0, no acknowledgment will be expected from the device. func (r *ProjectsLocationsRegistriesDevicesService) SendCommandToDevice(name string, sendcommandtodevicerequest *SendCommandToDeviceRequest) *ProjectsLocationsRegistriesDevicesSendCommandToDeviceCall { c := &ProjectsLocationsRegistriesDevicesSendCommandToDeviceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4603,7 +4322,7 @@ func (c *ProjectsLocationsRegistriesDevicesSendCommandToDeviceCall) Header() htt func (c *ProjectsLocationsRegistriesDevicesSendCommandToDeviceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4667,7 +4386,7 @@ func (c *ProjectsLocationsRegistriesDevicesSendCommandToDeviceCall) Do(opts ...g } return ret, nil // { - // "description": "Sends a command to the specified device. In order for a device to be able\nto receive commands, it must:\n1) be connected to Cloud IoT Core using the MQTT protocol, and\n2) be subscribed to the group of MQTT topics specified by\n /devices/{device-id}/commands/#. This subscription will receive commands\n at the top-level topic /devices/{device-id}/commands as well as commands\n for subfolders, like /devices/{device-id}/commands/subfolder.\n Note that subscribing to specific subfolders is not supported.\nIf the command could not be delivered to the device, this method will\nreturn an error; in particular, if the device is not subscribed, this\nmethod will return FAILED_PRECONDITION. Otherwise, this method will\nreturn OK. If the subscription is QoS 1, at least once delivery will be\nguaranteed; for QoS 0, no acknowledgment will be expected from the device.", + // "description": "Sends a command to the specified device. In order for a device to be able to receive commands, it must: 1) be connected to Cloud IoT Core using the MQTT protocol, and 2) be subscribed to the group of MQTT topics specified by /devices/{device-id}/commands/#. This subscription will receive commands at the top-level topic /devices/{device-id}/commands as well as commands for subfolders, like /devices/{device-id}/commands/subfolder. Note that subscribing to specific subfolders is not supported. If the command could not be delivered to the device, this method will return an error; in particular, if the device is not subscribed, this method will return FAILED_PRECONDITION. Otherwise, this method will return OK. If the subscription is QoS 1, at least once delivery will be guaranteed; for QoS 0, no acknowledgment will be expected from the device.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices/{devicesId}:sendCommandToDevice", // "httpMethod": "POST", // "id": "cloudiot.projects.locations.registries.devices.sendCommandToDevice", @@ -4676,7 +4395,7 @@ func (c *ProjectsLocationsRegistriesDevicesSendCommandToDeviceCall) Do(opts ...g // ], // "parameters": { // "name": { - // "description": "Required. The name of the device. For example,\n`projects/p0/locations/us-central1/registries/registry0/devices/device0` or\n`projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`.", + // "description": "Required. The name of the device. For example, `projects/p0/locations/us-central1/registries/registry0/devices/device0` or `projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+/devices/[^/]+$", // "required": true, @@ -4710,8 +4429,7 @@ type ProjectsLocationsRegistriesDevicesConfigVersionsListCall struct { } // List: Lists the last few versions of the device configuration in -// descending -// order (i.e.: newest first). +// descending order (i.e.: newest first). func (r *ProjectsLocationsRegistriesDevicesConfigVersionsService) List(name string) *ProjectsLocationsRegistriesDevicesConfigVersionsListCall { c := &ProjectsLocationsRegistriesDevicesConfigVersionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4719,10 +4437,9 @@ func (r *ProjectsLocationsRegistriesDevicesConfigVersionsService) List(name stri } // NumVersions sets the optional parameter "numVersions": The number of -// versions to list. Versions are listed in decreasing order of -// the version number. The maximum number of versions retained is 10. If -// this -// value is zero, it will return all the versions available. +// versions to list. Versions are listed in decreasing order of the +// version number. The maximum number of versions retained is 10. If +// this value is zero, it will return all the versions available. func (c *ProjectsLocationsRegistriesDevicesConfigVersionsListCall) NumVersions(numVersions int64) *ProjectsLocationsRegistriesDevicesConfigVersionsListCall { c.urlParams_.Set("numVersions", fmt.Sprint(numVersions)) return c @@ -4765,7 +4482,7 @@ func (c *ProjectsLocationsRegistriesDevicesConfigVersionsListCall) Header() http func (c *ProjectsLocationsRegistriesDevicesConfigVersionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4827,7 +4544,7 @@ func (c *ProjectsLocationsRegistriesDevicesConfigVersionsListCall) Do(opts ...go } return ret, nil // { - // "description": "Lists the last few versions of the device configuration in descending\norder (i.e.: newest first).", + // "description": "Lists the last few versions of the device configuration in descending order (i.e.: newest first).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices/{devicesId}/configVersions", // "httpMethod": "GET", // "id": "cloudiot.projects.locations.registries.devices.configVersions.list", @@ -4836,14 +4553,14 @@ func (c *ProjectsLocationsRegistriesDevicesConfigVersionsListCall) Do(opts ...go // ], // "parameters": { // "name": { - // "description": "Required. The name of the device. For example,\n`projects/p0/locations/us-central1/registries/registry0/devices/device0` or\n`projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`.", + // "description": "Required. The name of the device. For example, `projects/p0/locations/us-central1/registries/registry0/devices/device0` or `projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+/devices/[^/]+$", // "required": true, // "type": "string" // }, // "numVersions": { - // "description": "The number of versions to list. Versions are listed in decreasing order of\nthe version number. The maximum number of versions retained is 10. If this\nvalue is zero, it will return all the versions available.", + // "description": "The number of versions to list. Versions are listed in decreasing order of the version number. The maximum number of versions retained is 10. If this value is zero, it will return all the versions available.", // "format": "int32", // "location": "query", // "type": "integer" @@ -4873,8 +4590,7 @@ type ProjectsLocationsRegistriesDevicesStatesListCall struct { } // List: Lists the last few versions of the device state in descending -// order (i.e.: -// newest first). +// order (i.e.: newest first). func (r *ProjectsLocationsRegistriesDevicesStatesService) List(name string) *ProjectsLocationsRegistriesDevicesStatesListCall { c := &ProjectsLocationsRegistriesDevicesStatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4882,10 +4598,9 @@ func (r *ProjectsLocationsRegistriesDevicesStatesService) List(name string) *Pro } // NumStates sets the optional parameter "numStates": The number of -// states to list. States are listed in descending order of -// update time. The maximum number of states retained is 10. If -// this -// value is zero, it will return all the states available. +// states to list. States are listed in descending order of update time. +// The maximum number of states retained is 10. If this value is zero, +// it will return all the states available. func (c *ProjectsLocationsRegistriesDevicesStatesListCall) NumStates(numStates int64) *ProjectsLocationsRegistriesDevicesStatesListCall { c.urlParams_.Set("numStates", fmt.Sprint(numStates)) return c @@ -4928,7 +4643,7 @@ func (c *ProjectsLocationsRegistriesDevicesStatesListCall) Header() http.Header func (c *ProjectsLocationsRegistriesDevicesStatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4990,7 +4705,7 @@ func (c *ProjectsLocationsRegistriesDevicesStatesListCall) Do(opts ...googleapi. } return ret, nil // { - // "description": "Lists the last few versions of the device state in descending order (i.e.:\nnewest first).", + // "description": "Lists the last few versions of the device state in descending order (i.e.: newest first).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices/{devicesId}/states", // "httpMethod": "GET", // "id": "cloudiot.projects.locations.registries.devices.states.list", @@ -4999,14 +4714,14 @@ func (c *ProjectsLocationsRegistriesDevicesStatesListCall) Do(opts ...googleapi. // ], // "parameters": { // "name": { - // "description": "Required. The name of the device. For example,\n`projects/p0/locations/us-central1/registries/registry0/devices/device0` or\n`projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`.", + // "description": "Required. The name of the device. For example, `projects/p0/locations/us-central1/registries/registry0/devices/device0` or `projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+/devices/[^/]+$", // "required": true, // "type": "string" // }, // "numStates": { - // "description": "The number of states to list. States are listed in descending order of\nupdate time. The maximum number of states retained is 10. If this\nvalue is zero, it will return all the states available.", + // "description": "The number of states to list. States are listed in descending order of update time. The maximum number of states retained is 10. If this value is zero, it will return all the states available.", // "format": "int32", // "location": "query", // "type": "integer" @@ -5035,9 +4750,8 @@ type ProjectsLocationsRegistriesGroupsGetIamPolicyCall struct { header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. -// Returns an empty policy if the resource exists and does not have a -// policy +// GetIamPolicy: Gets the access control policy for a resource. Returns +// an empty policy if the resource exists and does not have a policy // set. func (r *ProjectsLocationsRegistriesGroupsService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ProjectsLocationsRegistriesGroupsGetIamPolicyCall { c := &ProjectsLocationsRegistriesGroupsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -5073,7 +4787,7 @@ func (c *ProjectsLocationsRegistriesGroupsGetIamPolicyCall) Header() http.Header func (c *ProjectsLocationsRegistriesGroupsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5137,7 +4851,7 @@ func (c *ProjectsLocationsRegistriesGroupsGetIamPolicyCall) Do(opts ...googleapi } return ret, nil // { - // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + // "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/groups/{groupsId}:getIamPolicy", // "httpMethod": "POST", // "id": "cloudiot.projects.locations.registries.groups.getIamPolicy", @@ -5146,7 +4860,7 @@ func (c *ProjectsLocationsRegistriesGroupsGetIamPolicyCall) Do(opts ...googleapi // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+/groups/[^/]+$", // "required": true, @@ -5180,8 +4894,7 @@ type ProjectsLocationsRegistriesGroupsSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any -// existing policy. +// resource. Replaces any existing policy. func (r *ProjectsLocationsRegistriesGroupsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsRegistriesGroupsSetIamPolicyCall { c := &ProjectsLocationsRegistriesGroupsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -5216,7 +4929,7 @@ func (c *ProjectsLocationsRegistriesGroupsSetIamPolicyCall) Header() http.Header func (c *ProjectsLocationsRegistriesGroupsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5280,7 +4993,7 @@ func (c *ProjectsLocationsRegistriesGroupsSetIamPolicyCall) Do(opts ...googleapi } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/groups/{groupsId}:setIamPolicy", // "httpMethod": "POST", // "id": "cloudiot.projects.locations.registries.groups.setIamPolicy", @@ -5289,7 +5002,7 @@ func (c *ProjectsLocationsRegistriesGroupsSetIamPolicyCall) Do(opts ...googleapi // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+/groups/[^/]+$", // "required": true, @@ -5323,10 +5036,8 @@ type ProjectsLocationsRegistriesGroupsTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a NOT_FOUND error. +// specified resource. If the resource does not exist, this will return +// an empty set of permissions, not a NOT_FOUND error. func (r *ProjectsLocationsRegistriesGroupsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsRegistriesGroupsTestIamPermissionsCall { c := &ProjectsLocationsRegistriesGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -5361,7 +5072,7 @@ func (c *ProjectsLocationsRegistriesGroupsTestIamPermissionsCall) Header() http. func (c *ProjectsLocationsRegistriesGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5425,7 +5136,7 @@ func (c *ProjectsLocationsRegistriesGroupsTestIamPermissionsCall) Do(opts ...goo } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", + // "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/groups/{groupsId}:testIamPermissions", // "httpMethod": "POST", // "id": "cloudiot.projects.locations.registries.groups.testIamPermissions", @@ -5434,7 +5145,7 @@ func (c *ProjectsLocationsRegistriesGroupsTestIamPermissionsCall) Do(opts ...goo // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+/groups/[^/]+$", // "required": true, @@ -5475,16 +5186,16 @@ func (r *ProjectsLocationsRegistriesGroupsDevicesService) List(parent string) *P } // DeviceIds sets the optional parameter "deviceIds": A list of device -// string IDs. For example, `['device0', 'device12']`. -// If empty, this field is ignored. Maximum IDs: 10,000 +// string IDs. For example, `['device0', 'device12']`. If empty, this +// field is ignored. Maximum IDs: 10,000 func (c *ProjectsLocationsRegistriesGroupsDevicesListCall) DeviceIds(deviceIds ...string) *ProjectsLocationsRegistriesGroupsDevicesListCall { c.urlParams_.SetMulti("deviceIds", append([]string{}, deviceIds...)) return c } // DeviceNumIds sets the optional parameter "deviceNumIds": A list of -// device numeric IDs. If empty, this field is ignored. Maximum -// IDs: 10,000. +// device numeric IDs. If empty, this field is ignored. Maximum IDs: +// 10,000. func (c *ProjectsLocationsRegistriesGroupsDevicesListCall) DeviceNumIds(deviceNumIds ...uint64) *ProjectsLocationsRegistriesGroupsDevicesListCall { var deviceNumIds_ []string for _, v := range deviceNumIds { @@ -5495,9 +5206,8 @@ func (c *ProjectsLocationsRegistriesGroupsDevicesListCall) DeviceNumIds(deviceNu } // FieldMask sets the optional parameter "fieldMask": The fields of the -// `Device` resource to be returned in the response. The -// fields `id` and `num_id` are always returned, along with any -// other fields specified. +// `Device` resource to be returned in the response. The fields `id` and +// `num_id` are always returned, along with any other fields specified. func (c *ProjectsLocationsRegistriesGroupsDevicesListCall) FieldMask(fieldMask string) *ProjectsLocationsRegistriesGroupsDevicesListCall { c.urlParams_.Set("fieldMask", fieldMask) return c @@ -5505,12 +5215,10 @@ func (c *ProjectsLocationsRegistriesGroupsDevicesListCall) FieldMask(fieldMask s // GatewayListOptionsAssociationsDeviceId sets the optional parameter // "gatewayListOptions.associationsDeviceId": If set, returns only the -// gateways with which the specified device is -// associated. The device ID can be numeric (`num_id`) or the -// user-defined -// string (`id`). For example, if `456` is specified, returns only -// the -// gateways to which the device with `num_id` 456 is bound. +// gateways with which the specified device is associated. The device ID +// can be numeric (`num_id`) or the user-defined string (`id`). For +// example, if `456` is specified, returns only the gateways to which +// the device with `num_id` 456 is bound. func (c *ProjectsLocationsRegistriesGroupsDevicesListCall) GatewayListOptionsAssociationsDeviceId(gatewayListOptionsAssociationsDeviceId string) *ProjectsLocationsRegistriesGroupsDevicesListCall { c.urlParams_.Set("gatewayListOptions.associationsDeviceId", gatewayListOptionsAssociationsDeviceId) return c @@ -5518,12 +5226,10 @@ func (c *ProjectsLocationsRegistriesGroupsDevicesListCall) GatewayListOptionsAss // GatewayListOptionsAssociationsGatewayId sets the optional parameter // "gatewayListOptions.associationsGatewayId": If set, only devices -// associated with the specified gateway are returned. -// The gateway ID can be numeric (`num_id`) or the user-defined -// string -// (`id`). For example, if `123` is specified, only devices bound to -// the -// gateway with `num_id` 123 are returned. +// associated with the specified gateway are returned. The gateway ID +// can be numeric (`num_id`) or the user-defined string (`id`). For +// example, if `123` is specified, only devices bound to the gateway +// with `num_id` 123 are returned. func (c *ProjectsLocationsRegistriesGroupsDevicesListCall) GatewayListOptionsAssociationsGatewayId(gatewayListOptionsAssociationsGatewayId string) *ProjectsLocationsRegistriesGroupsDevicesListCall { c.urlParams_.Set("gatewayListOptions.associationsGatewayId", gatewayListOptionsAssociationsGatewayId) return c @@ -5531,26 +5237,24 @@ func (c *ProjectsLocationsRegistriesGroupsDevicesListCall) GatewayListOptionsAss // GatewayListOptionsGatewayType sets the optional parameter // "gatewayListOptions.gatewayType": If `GATEWAY` is specified, only -// gateways are returned. If `NON_GATEWAY` -// is specified, only non-gateway devices are returned. -// If -// `GATEWAY_TYPE_UNSPECIFIED` is specified, all devices are returned. +// gateways are returned. If `NON_GATEWAY` is specified, only +// non-gateway devices are returned. If `GATEWAY_TYPE_UNSPECIFIED` is +// specified, all devices are returned. // // Possible values: -// "GATEWAY_TYPE_UNSPECIFIED" -// "GATEWAY" -// "NON_GATEWAY" +// "GATEWAY_TYPE_UNSPECIFIED" - If unspecified, the device is +// considered a non-gateway device. +// "GATEWAY" - The device is a gateway. +// "NON_GATEWAY" - The device is not a gateway. func (c *ProjectsLocationsRegistriesGroupsDevicesListCall) GatewayListOptionsGatewayType(gatewayListOptionsGatewayType string) *ProjectsLocationsRegistriesGroupsDevicesListCall { c.urlParams_.Set("gatewayListOptions.gatewayType", gatewayListOptionsGatewayType) return c } // PageSize sets the optional parameter "pageSize": The maximum number -// of devices to return in the response. If this value -// is zero, the service will select a default size. A call may return -// fewer -// objects than requested. A non-empty `next_page_token` in the -// response +// of devices to return in the response. If this value is zero, the +// service will select a default size. A call may return fewer objects +// than requested. A non-empty `next_page_token` in the response // indicates that more data is available. func (c *ProjectsLocationsRegistriesGroupsDevicesListCall) PageSize(pageSize int64) *ProjectsLocationsRegistriesGroupsDevicesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) @@ -5558,9 +5262,9 @@ func (c *ProjectsLocationsRegistriesGroupsDevicesListCall) PageSize(pageSize int } // PageToken sets the optional parameter "pageToken": The value returned -// by the last `ListDevicesResponse`; indicates -// that this is a continuation of a prior `ListDevices` call and -// the system should return the next page of data. +// by the last `ListDevicesResponse`; indicates that this is a +// continuation of a prior `ListDevices` call and the system should +// return the next page of data. func (c *ProjectsLocationsRegistriesGroupsDevicesListCall) PageToken(pageToken string) *ProjectsLocationsRegistriesGroupsDevicesListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -5603,7 +5307,7 @@ func (c *ProjectsLocationsRegistriesGroupsDevicesListCall) Header() http.Header func (c *ProjectsLocationsRegistriesGroupsDevicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5674,57 +5378,62 @@ func (c *ProjectsLocationsRegistriesGroupsDevicesListCall) Do(opts ...googleapi. // ], // "parameters": { // "deviceIds": { - // "description": "A list of device string IDs. For example, `['device0', 'device12']`.\nIf empty, this field is ignored. Maximum IDs: 10,000", + // "description": "A list of device string IDs. For example, `['device0', 'device12']`. If empty, this field is ignored. Maximum IDs: 10,000", // "location": "query", // "repeated": true, // "type": "string" // }, // "deviceNumIds": { - // "description": "A list of device numeric IDs. If empty, this field is ignored. Maximum\nIDs: 10,000.", + // "description": "A list of device numeric IDs. If empty, this field is ignored. Maximum IDs: 10,000.", // "format": "uint64", // "location": "query", // "repeated": true, // "type": "string" // }, // "fieldMask": { - // "description": "The fields of the `Device` resource to be returned in the response. The\nfields `id` and `num_id` are always returned, along with any\nother fields specified.", + // "description": "The fields of the `Device` resource to be returned in the response. The fields `id` and `num_id` are always returned, along with any other fields specified.", // "format": "google-fieldmask", // "location": "query", // "type": "string" // }, // "gatewayListOptions.associationsDeviceId": { - // "description": "If set, returns only the gateways with which the specified device is\nassociated. The device ID can be numeric (`num_id`) or the user-defined\nstring (`id`). For example, if `456` is specified, returns only the\ngateways to which the device with `num_id` 456 is bound.", + // "description": "If set, returns only the gateways with which the specified device is associated. The device ID can be numeric (`num_id`) or the user-defined string (`id`). For example, if `456` is specified, returns only the gateways to which the device with `num_id` 456 is bound.", // "location": "query", // "type": "string" // }, // "gatewayListOptions.associationsGatewayId": { - // "description": "If set, only devices associated with the specified gateway are returned.\nThe gateway ID can be numeric (`num_id`) or the user-defined string\n(`id`). For example, if `123` is specified, only devices bound to the\ngateway with `num_id` 123 are returned.", + // "description": "If set, only devices associated with the specified gateway are returned. The gateway ID can be numeric (`num_id`) or the user-defined string (`id`). For example, if `123` is specified, only devices bound to the gateway with `num_id` 123 are returned.", // "location": "query", // "type": "string" // }, // "gatewayListOptions.gatewayType": { - // "description": "If `GATEWAY` is specified, only gateways are returned. If `NON_GATEWAY`\nis specified, only non-gateway devices are returned. If\n`GATEWAY_TYPE_UNSPECIFIED` is specified, all devices are returned.", + // "description": "If `GATEWAY` is specified, only gateways are returned. If `NON_GATEWAY` is specified, only non-gateway devices are returned. If `GATEWAY_TYPE_UNSPECIFIED` is specified, all devices are returned.", // "enum": [ // "GATEWAY_TYPE_UNSPECIFIED", // "GATEWAY", // "NON_GATEWAY" // ], + // "enumDescriptions": [ + // "If unspecified, the device is considered a non-gateway device.", + // "The device is a gateway.", + // "The device is not a gateway." + // ], // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "The maximum number of devices to return in the response. If this value\nis zero, the service will select a default size. A call may return fewer\nobjects than requested. A non-empty `next_page_token` in the response\nindicates that more data is available.", + // "description": "The maximum number of devices to return in the response. If this value is zero, the service will select a default size. A call may return fewer objects than requested. A non-empty `next_page_token` in the response indicates that more data is available.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "The value returned by the last `ListDevicesResponse`; indicates\nthat this is a continuation of a prior `ListDevices` call and\nthe system should return the next page of data.", + // "description": "The value returned by the last `ListDevicesResponse`; indicates that this is a continuation of a prior `ListDevices` call and the system should return the next page of data.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The device registry path. Required. For example,\n`projects/my-project/locations/us-central1/registries/my-registry`.", + // "description": "Required. The device registry path. Required. For example, `projects/my-project/locations/us-central1/registries/my-registry`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/registries/[^/]+/groups/[^/]+$", // "required": true, diff --git a/vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json b/vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json index 68f12201adf..f51957ede9c 100644 --- a/vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json +++ b/vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json @@ -15,7 +15,7 @@ "baseUrl": "https://cloudkms.googleapis.com/", "batchPath": "batch", "canonicalName": "Cloud KMS", - "description": "Manages keys and performs cryptographic operations in a central cloud service, for direct use by other cloud resources and applications.\n", + "description": "Manages keys and performs cryptographic operations in a central cloud service, for direct use by other cloud resources and applications. ", "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/kms/", "fullyEncodeReservedExpansion": true, @@ -194,12 +194,12 @@ ], "parameters": { "keyRingId": { - "description": "Required. It must be unique within a location and match the regular\nexpression `[a-zA-Z0-9_-]{1,63}`", + "description": "Required. It must be unique within a location and match the regular expression `[a-zA-Z0-9_-]{1,63}`", "location": "query", "type": "string" }, "parent": { - "description": "Required. The resource name of the location associated with the\nKeyRings, in the format `projects/*/locations/*`.", + "description": "Required. The resource name of the location associated with the KeyRings, in the format `projects/*/locations/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -245,7 +245,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}:getIamPolicy", "httpMethod": "GET", "id": "cloudkms.projects.locations.keyRings.getIamPolicy", @@ -254,13 +254,13 @@ ], "parameters": { "options.requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "location": "query", "type": "integer" }, "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", "required": true, @@ -286,28 +286,28 @@ ], "parameters": { "filter": { - "description": "Optional. Only include resources that match the filter in the response. For\nmore information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", + "description": "Optional. Only include resources that match the filter in the response. For more information, see [Sorting and filtering list results](https://cloud.google.com/kms/docs/sorting-and-filtering).", "location": "query", "type": "string" }, "orderBy": { - "description": "Optional. Specify how the results should be sorted. If not specified, the\nresults will be sorted in the default order. For more information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", + "description": "Optional. Specify how the results should be sorted. If not specified, the results will be sorted in the default order. For more information, see [Sorting and filtering list results](https://cloud.google.com/kms/docs/sorting-and-filtering).", "location": "query", "type": "string" }, "pageSize": { - "description": "Optional. Optional limit on the number of KeyRings to include in the\nresponse. Further KeyRings can subsequently be obtained by\nincluding the ListKeyRingsResponse.next_page_token in a subsequent\nrequest. If unspecified, the server will pick an appropriate default.", + "description": "Optional. Optional limit on the number of KeyRings to include in the response. Further KeyRings can subsequently be obtained by including the ListKeyRingsResponse.next_page_token in a subsequent request. If unspecified, the server will pick an appropriate default.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Optional. Optional pagination token, returned earlier via\nListKeyRingsResponse.next_page_token.", + "description": "Optional. Optional pagination token, returned earlier via ListKeyRingsResponse.next_page_token.", "location": "query", "type": "string" }, "parent": { - "description": "Required. The resource name of the location associated with the\nKeyRings, in the format `projects/*/locations/*`.", + "description": "Required. The resource name of the location associated with the KeyRings, in the format `projects/*/locations/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -324,7 +324,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}:setIamPolicy", "httpMethod": "POST", "id": "cloudkms.projects.locations.keyRings.setIamPolicy", @@ -333,7 +333,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", "required": true, @@ -353,7 +353,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}:testIamPermissions", "httpMethod": "POST", "id": "cloudkms.projects.locations.keyRings.testIamPermissions", @@ -362,7 +362,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", "required": true, @@ -386,7 +386,7 @@ "cryptoKeys": { "methods": { "create": { - "description": "Create a new CryptoKey within a KeyRing.\n\nCryptoKey.purpose and\nCryptoKey.version_template.algorithm\nare required.", + "description": "Create a new CryptoKey within a KeyRing. CryptoKey.purpose and CryptoKey.version_template.algorithm are required.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys", "httpMethod": "POST", "id": "cloudkms.projects.locations.keyRings.cryptoKeys.create", @@ -395,19 +395,19 @@ ], "parameters": { "cryptoKeyId": { - "description": "Required. It must be unique within a KeyRing and match the regular\nexpression `[a-zA-Z0-9_-]{1,63}`", + "description": "Required. It must be unique within a KeyRing and match the regular expression `[a-zA-Z0-9_-]{1,63}`", "location": "query", "type": "string" }, "parent": { - "description": "Required. The name of the KeyRing associated with the\nCryptoKeys.", + "description": "Required. The name of the KeyRing associated with the CryptoKeys.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", "required": true, "type": "string" }, "skipInitialVersionCreation": { - "description": "If set to true, the request will create a CryptoKey without any\nCryptoKeyVersions. You must manually call\nCreateCryptoKeyVersion or\nImportCryptoKeyVersion\nbefore you can use this CryptoKey.", + "description": "If set to true, the request will create a CryptoKey without any CryptoKeyVersions. You must manually call CreateCryptoKeyVersion or ImportCryptoKeyVersion before you can use this CryptoKey.", "location": "query", "type": "boolean" } @@ -425,7 +425,7 @@ ] }, "decrypt": { - "description": "Decrypts data that was protected by Encrypt. The CryptoKey.purpose\nmust be ENCRYPT_DECRYPT.", + "description": "Decrypts data that was protected by Encrypt. The CryptoKey.purpose must be ENCRYPT_DECRYPT.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:decrypt", "httpMethod": "POST", "id": "cloudkms.projects.locations.keyRings.cryptoKeys.decrypt", @@ -434,7 +434,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the CryptoKey to use for decryption.\nThe server will choose the appropriate version.", + "description": "Required. The resource name of the CryptoKey to use for decryption. The server will choose the appropriate version.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", "required": true, @@ -454,7 +454,7 @@ ] }, "encrypt": { - "description": "Encrypts data, so that it can only be recovered by a call to Decrypt.\nThe CryptoKey.purpose must be\nENCRYPT_DECRYPT.", + "description": "Encrypts data, so that it can only be recovered by a call to Decrypt. The CryptoKey.purpose must be ENCRYPT_DECRYPT.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:encrypt", "httpMethod": "POST", "id": "cloudkms.projects.locations.keyRings.cryptoKeys.encrypt", @@ -463,7 +463,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the CryptoKey or CryptoKeyVersion\nto use for encryption.\n\nIf a CryptoKey is specified, the server will use its\nprimary version.", + "description": "Required. The resource name of the CryptoKey or CryptoKeyVersion to use for encryption. If a CryptoKey is specified, the server will use its primary version.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/.*$", "required": true, @@ -483,7 +483,7 @@ ] }, "get": { - "description": "Returns metadata for a given CryptoKey, as well as its\nprimary CryptoKeyVersion.", + "description": "Returns metadata for a given CryptoKey, as well as its primary CryptoKeyVersion.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}", "httpMethod": "GET", "id": "cloudkms.projects.locations.keyRings.cryptoKeys.get", @@ -509,7 +509,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:getIamPolicy", "httpMethod": "GET", "id": "cloudkms.projects.locations.keyRings.cryptoKeys.getIamPolicy", @@ -518,13 +518,13 @@ ], "parameters": { "options.requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "location": "query", "type": "integer" }, "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", "required": true, @@ -550,28 +550,28 @@ ], "parameters": { "filter": { - "description": "Optional. Only include resources that match the filter in the response. For\nmore information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", + "description": "Optional. Only include resources that match the filter in the response. For more information, see [Sorting and filtering list results](https://cloud.google.com/kms/docs/sorting-and-filtering).", "location": "query", "type": "string" }, "orderBy": { - "description": "Optional. Specify how the results should be sorted. If not specified, the\nresults will be sorted in the default order. For more information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", + "description": "Optional. Specify how the results should be sorted. If not specified, the results will be sorted in the default order. For more information, see [Sorting and filtering list results](https://cloud.google.com/kms/docs/sorting-and-filtering).", "location": "query", "type": "string" }, "pageSize": { - "description": "Optional. Optional limit on the number of CryptoKeys to include in the\nresponse. Further CryptoKeys can subsequently be obtained by\nincluding the ListCryptoKeysResponse.next_page_token in a subsequent\nrequest. If unspecified, the server will pick an appropriate default.", + "description": "Optional. Optional limit on the number of CryptoKeys to include in the response. Further CryptoKeys can subsequently be obtained by including the ListCryptoKeysResponse.next_page_token in a subsequent request. If unspecified, the server will pick an appropriate default.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Optional. Optional pagination token, returned earlier via\nListCryptoKeysResponse.next_page_token.", + "description": "Optional. Optional pagination token, returned earlier via ListCryptoKeysResponse.next_page_token.", "location": "query", "type": "string" }, "parent": { - "description": "Required. The resource name of the KeyRing to list, in the format\n`projects/*/locations/*/keyRings/*`.", + "description": "Required. The resource name of the KeyRing to list, in the format `projects/*/locations/*/keyRings/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", "required": true, @@ -583,6 +583,10 @@ "CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED", "FULL" ], + "enumDescriptions": [ + "Default view for each CryptoKeyVersion. Does not include the attestation field.", + "Provides all fields in each CryptoKeyVersion, including the attestation." + ], "location": "query", "type": "string" } @@ -606,7 +610,7 @@ ], "parameters": { "name": { - "description": "Output only. The resource name for this CryptoKey in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*`.", + "description": "Output only. The resource name for this CryptoKey in the format `projects/*/locations/*/keyRings/*/cryptoKeys/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", "required": true, @@ -632,7 +636,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:setIamPolicy", "httpMethod": "POST", "id": "cloudkms.projects.locations.keyRings.cryptoKeys.setIamPolicy", @@ -641,7 +645,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", "required": true, @@ -661,7 +665,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:testIamPermissions", "httpMethod": "POST", "id": "cloudkms.projects.locations.keyRings.cryptoKeys.testIamPermissions", @@ -670,7 +674,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", "required": true, @@ -690,7 +694,7 @@ ] }, "updatePrimaryVersion": { - "description": "Update the version of a CryptoKey that will be used in Encrypt.\n\nReturns an error if called on an asymmetric key.", + "description": "Update the version of a CryptoKey that will be used in Encrypt. Returns an error if called on an asymmetric key.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:updatePrimaryVersion", "httpMethod": "POST", "id": "cloudkms.projects.locations.keyRings.cryptoKeys.updatePrimaryVersion", @@ -723,7 +727,7 @@ "cryptoKeyVersions": { "methods": { "asymmetricDecrypt": { - "description": "Decrypts data that was encrypted with a public key retrieved from\nGetPublicKey corresponding to a CryptoKeyVersion with\nCryptoKey.purpose ASYMMETRIC_DECRYPT.", + "description": "Decrypts data that was encrypted with a public key retrieved from GetPublicKey corresponding to a CryptoKeyVersion with CryptoKey.purpose ASYMMETRIC_DECRYPT.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:asymmetricDecrypt", "httpMethod": "POST", "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.asymmetricDecrypt", @@ -732,7 +736,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the CryptoKeyVersion to use for\ndecryption.", + "description": "Required. The resource name of the CryptoKeyVersion to use for decryption.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", "required": true, @@ -752,7 +756,7 @@ ] }, "asymmetricSign": { - "description": "Signs data using a CryptoKeyVersion with CryptoKey.purpose\nASYMMETRIC_SIGN, producing a signature that can be verified with the public\nkey retrieved from GetPublicKey.", + "description": "Signs data using a CryptoKeyVersion with CryptoKey.purpose ASYMMETRIC_SIGN, producing a signature that can be verified with the public key retrieved from GetPublicKey.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:asymmetricSign", "httpMethod": "POST", "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.asymmetricSign", @@ -781,7 +785,7 @@ ] }, "create": { - "description": "Create a new CryptoKeyVersion in a CryptoKey.\n\nThe server will assign the next sequential id. If unset,\nstate will be set to\nENABLED.", + "description": "Create a new CryptoKeyVersion in a CryptoKey. The server will assign the next sequential id. If unset, state will be set to ENABLED.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions", "httpMethod": "POST", "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.create", @@ -790,7 +794,7 @@ ], "parameters": { "parent": { - "description": "Required. The name of the CryptoKey associated with\nthe CryptoKeyVersions.", + "description": "Required. The name of the CryptoKey associated with the CryptoKeyVersions.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", "required": true, @@ -810,7 +814,7 @@ ] }, "destroy": { - "description": "Schedule a CryptoKeyVersion for destruction.\n\nUpon calling this method, CryptoKeyVersion.state will be set to\nDESTROY_SCHEDULED\nand destroy_time will be set to a time 24\nhours in the future, at which point the state\nwill be changed to\nDESTROYED, and the key\nmaterial will be irrevocably destroyed.\n\nBefore the destroy_time is reached,\nRestoreCryptoKeyVersion may be called to reverse the process.", + "description": "Schedule a CryptoKeyVersion for destruction. Upon calling this method, CryptoKeyVersion.state will be set to DESTROY_SCHEDULED and destroy_time will be set to a time 24 hours in the future, at which point the state will be changed to DESTROYED, and the key material will be irrevocably destroyed. Before the destroy_time is reached, RestoreCryptoKeyVersion may be called to reverse the process.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:destroy", "httpMethod": "POST", "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.destroy", @@ -865,7 +869,7 @@ ] }, "getPublicKey": { - "description": "Returns the public key for the given CryptoKeyVersion. The\nCryptoKey.purpose must be\nASYMMETRIC_SIGN or\nASYMMETRIC_DECRYPT.", + "description": "Returns the public key for the given CryptoKeyVersion. The CryptoKey.purpose must be ASYMMETRIC_SIGN or ASYMMETRIC_DECRYPT.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}/publicKey", "httpMethod": "GET", "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.getPublicKey", @@ -874,7 +878,7 @@ ], "parameters": { "name": { - "description": "Required. The name of the CryptoKeyVersion public key to\nget.", + "description": "Required. The name of the CryptoKeyVersion public key to get.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", "required": true, @@ -891,7 +895,7 @@ ] }, "import": { - "description": "Imports a new CryptoKeyVersion into an existing CryptoKey using the\nwrapped key material provided in the request.\n\nThe version ID will be assigned the next sequential id within the\nCryptoKey.", + "description": "Imports a new CryptoKeyVersion into an existing CryptoKey using the wrapped key material provided in the request. The version ID will be assigned the next sequential id within the CryptoKey.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions:import", "httpMethod": "POST", "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.import", @@ -900,7 +904,7 @@ ], "parameters": { "parent": { - "description": "Required. The name of the CryptoKey to\nbe imported into.", + "description": "Required. The name of the CryptoKey to be imported into.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", "required": true, @@ -929,28 +933,28 @@ ], "parameters": { "filter": { - "description": "Optional. Only include resources that match the filter in the response. For\nmore information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", + "description": "Optional. Only include resources that match the filter in the response. For more information, see [Sorting and filtering list results](https://cloud.google.com/kms/docs/sorting-and-filtering).", "location": "query", "type": "string" }, "orderBy": { - "description": "Optional. Specify how the results should be sorted. If not specified, the\nresults will be sorted in the default order. For more information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", + "description": "Optional. Specify how the results should be sorted. If not specified, the results will be sorted in the default order. For more information, see [Sorting and filtering list results](https://cloud.google.com/kms/docs/sorting-and-filtering).", "location": "query", "type": "string" }, "pageSize": { - "description": "Optional. Optional limit on the number of CryptoKeyVersions to\ninclude in the response. Further CryptoKeyVersions can\nsubsequently be obtained by including the\nListCryptoKeyVersionsResponse.next_page_token in a subsequent request.\nIf unspecified, the server will pick an appropriate default.", + "description": "Optional. Optional limit on the number of CryptoKeyVersions to include in the response. Further CryptoKeyVersions can subsequently be obtained by including the ListCryptoKeyVersionsResponse.next_page_token in a subsequent request. If unspecified, the server will pick an appropriate default.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Optional. Optional pagination token, returned earlier via\nListCryptoKeyVersionsResponse.next_page_token.", + "description": "Optional. Optional pagination token, returned earlier via ListCryptoKeyVersionsResponse.next_page_token.", "location": "query", "type": "string" }, "parent": { - "description": "Required. The resource name of the CryptoKey to list, in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*`.", + "description": "Required. The resource name of the CryptoKey to list, in the format `projects/*/locations/*/keyRings/*/cryptoKeys/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", "required": true, @@ -962,6 +966,10 @@ "CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED", "FULL" ], + "enumDescriptions": [ + "Default view for each CryptoKeyVersion. Does not include the attestation field.", + "Provides all fields in each CryptoKeyVersion, including the attestation." + ], "location": "query", "type": "string" } @@ -976,7 +984,7 @@ ] }, "patch": { - "description": "Update a CryptoKeyVersion's metadata.\n\nstate may be changed between\nENABLED and\nDISABLED using this\nmethod. See DestroyCryptoKeyVersion and RestoreCryptoKeyVersion to\nmove between other states.", + "description": "Update a CryptoKeyVersion's metadata. state may be changed between ENABLED and DISABLED using this method. See DestroyCryptoKeyVersion and RestoreCryptoKeyVersion to move between other states.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}", "httpMethod": "PATCH", "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.patch", @@ -985,7 +993,7 @@ ], "parameters": { "name": { - "description": "Output only. The resource name for this CryptoKeyVersion in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`.", + "description": "Output only. The resource name for this CryptoKeyVersion in the format `projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", "required": true, @@ -1011,7 +1019,7 @@ ] }, "restore": { - "description": "Restore a CryptoKeyVersion in the\nDESTROY_SCHEDULED\nstate.\n\nUpon restoration of the CryptoKeyVersion, state\nwill be set to DISABLED,\nand destroy_time will be cleared.", + "description": "Restore a CryptoKeyVersion in the DESTROY_SCHEDULED state. Upon restoration of the CryptoKeyVersion, state will be set to DISABLED, and destroy_time will be cleared.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:restore", "httpMethod": "POST", "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.restore", @@ -1046,7 +1054,7 @@ "importJobs": { "methods": { "create": { - "description": "Create a new ImportJob within a KeyRing.\n\nImportJob.import_method is required.", + "description": "Create a new ImportJob within a KeyRing. ImportJob.import_method is required.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/importJobs", "httpMethod": "POST", "id": "cloudkms.projects.locations.keyRings.importJobs.create", @@ -1055,12 +1063,12 @@ ], "parameters": { "importJobId": { - "description": "Required. It must be unique within a KeyRing and match the regular\nexpression `[a-zA-Z0-9_-]{1,63}`", + "description": "Required. It must be unique within a KeyRing and match the regular expression `[a-zA-Z0-9_-]{1,63}`", "location": "query", "type": "string" }, "parent": { - "description": "Required. The name of the KeyRing associated with the\nImportJobs.", + "description": "Required. The name of the KeyRing associated with the ImportJobs.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", "required": true, @@ -1106,7 +1114,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/importJobs/{importJobsId}:getIamPolicy", "httpMethod": "GET", "id": "cloudkms.projects.locations.keyRings.importJobs.getIamPolicy", @@ -1115,13 +1123,13 @@ ], "parameters": { "options.requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "location": "query", "type": "integer" }, "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/importJobs/[^/]+$", "required": true, @@ -1147,28 +1155,28 @@ ], "parameters": { "filter": { - "description": "Optional. Only include resources that match the filter in the response. For\nmore information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", + "description": "Optional. Only include resources that match the filter in the response. For more information, see [Sorting and filtering list results](https://cloud.google.com/kms/docs/sorting-and-filtering).", "location": "query", "type": "string" }, "orderBy": { - "description": "Optional. Specify how the results should be sorted. If not specified, the\nresults will be sorted in the default order. For more information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", + "description": "Optional. Specify how the results should be sorted. If not specified, the results will be sorted in the default order. For more information, see [Sorting and filtering list results](https://cloud.google.com/kms/docs/sorting-and-filtering).", "location": "query", "type": "string" }, "pageSize": { - "description": "Optional. Optional limit on the number of ImportJobs to include in the\nresponse. Further ImportJobs can subsequently be obtained by\nincluding the ListImportJobsResponse.next_page_token in a subsequent\nrequest. If unspecified, the server will pick an appropriate default.", + "description": "Optional. Optional limit on the number of ImportJobs to include in the response. Further ImportJobs can subsequently be obtained by including the ListImportJobsResponse.next_page_token in a subsequent request. If unspecified, the server will pick an appropriate default.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Optional. Optional pagination token, returned earlier via\nListImportJobsResponse.next_page_token.", + "description": "Optional. Optional pagination token, returned earlier via ListImportJobsResponse.next_page_token.", "location": "query", "type": "string" }, "parent": { - "description": "Required. The resource name of the KeyRing to list, in the format\n`projects/*/locations/*/keyRings/*`.", + "description": "Required. The resource name of the KeyRing to list, in the format `projects/*/locations/*/keyRings/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", "required": true, @@ -1185,7 +1193,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/importJobs/{importJobsId}:setIamPolicy", "httpMethod": "POST", "id": "cloudkms.projects.locations.keyRings.importJobs.setIamPolicy", @@ -1194,7 +1202,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/importJobs/[^/]+$", "required": true, @@ -1214,7 +1222,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/importJobs/{importJobsId}:testIamPermissions", "httpMethod": "POST", "id": "cloudkms.projects.locations.keyRings.importJobs.testIamPermissions", @@ -1223,7 +1231,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/importJobs/[^/]+$", "required": true, @@ -1251,7 +1259,7 @@ } } }, - "revision": "20200509", + "revision": "20200903", "rootUrl": "https://cloudkms.googleapis.com/", "schemas": { "AsymmetricDecryptRequest": { @@ -1259,9 +1267,14 @@ "id": "AsymmetricDecryptRequest", "properties": { "ciphertext": { - "description": "Required. The data encrypted with the named CryptoKeyVersion's public\nkey using OAEP.", + "description": "Required. The data encrypted with the named CryptoKeyVersion's public key using OAEP.", "format": "byte", "type": "string" + }, + "ciphertextCrc32c": { + "description": "Optional. An optional CRC32C checksum of the AsymmetricDecryptRequest.ciphertext. If specified, KeyManagementService will verify the integrity of the received AsymmetricDecryptRequest.ciphertext using this checksum. KeyManagementService will report an error if the checksum verification fails. If you receive a checksum error, your client should verify that CRC32C(AsymmetricDecryptRequest.ciphertext) is equal to AsymmetricDecryptRequest.ciphertext_crc32c, and if so, perform a limited number of retries. A persistent mismatch may indicate an issue in your computation of the CRC32C checksum. Note: This field is defined as int64 for reasons of compatibility across different languages. However, it is a non-negative integer, which will never exceed 2^32-1, and can be safely downconverted to uint32 in languages that support this type. NOTE: This field is in Beta.", + "format": "int64", + "type": "string" } }, "type": "object" @@ -1274,6 +1287,15 @@ "description": "The decrypted data originally encrypted with the matching public key.", "format": "byte", "type": "string" + }, + "plaintextCrc32c": { + "description": "Integrity verification field. A CRC32C checksum of the returned AsymmetricDecryptResponse.plaintext. An integrity check of AsymmetricDecryptResponse.plaintext can be performed by computing the CRC32C checksum of AsymmetricDecryptResponse.plaintext and comparing your results to this field. Discard the response in case of non-matching checksum values, and perform a limited number of retries. A persistent mismatch may indicate an issue in your computation of the CRC32C checksum. Note: This field is defined as int64 for reasons of compatibility across different languages. However, it is a non-negative integer, which will never exceed 2^32-1, and can be safely downconverted to uint32 in languages that support this type. NOTE: This field is in Beta.", + "format": "int64", + "type": "string" + }, + "verifiedCiphertextCrc32c": { + "description": "Integrity verification field. A flag indicating whether AsymmetricDecryptRequest.ciphertext_crc32c was received by KeyManagementService and used for the integrity verification of the ciphertext. A false value of this field indicates either that AsymmetricDecryptRequest.ciphertext_crc32c was left unset or that it was not delivered to KeyManagementService. If you've set AsymmetricDecryptRequest.ciphertext_crc32c but this field is still false, discard the response and perform a limited number of retries. NOTE: This field is in Beta.", + "type": "boolean" } }, "type": "object" @@ -1284,7 +1306,12 @@ "properties": { "digest": { "$ref": "Digest", - "description": "Required. The digest of the data to sign. The digest must be produced with\nthe same digest algorithm as specified by the key version's\nalgorithm." + "description": "Required. The digest of the data to sign. The digest must be produced with the same digest algorithm as specified by the key version's algorithm." + }, + "digestCrc32c": { + "description": "Optional. An optional CRC32C checksum of the AsymmetricSignRequest.digest. If specified, KeyManagementService will verify the integrity of the received AsymmetricSignRequest.digest using this checksum. KeyManagementService will report an error if the checksum verification fails. If you receive a checksum error, your client should verify that CRC32C(AsymmetricSignRequest.digest) is equal to AsymmetricSignRequest.digest_crc32c, and if so, perform a limited number of retries. A persistent mismatch may indicate an issue in your computation of the CRC32C checksum. Note: This field is defined as int64 for reasons of compatibility across different languages. However, it is a non-negative integer, which will never exceed 2^32-1, and can be safely downconverted to uint32 in languages that support this type. NOTE: This field is in Beta.", + "format": "int64", + "type": "string" } }, "type": "object" @@ -1293,16 +1320,29 @@ "description": "Response message for KeyManagementService.AsymmetricSign.", "id": "AsymmetricSignResponse", "properties": { + "name": { + "description": "The resource name of the CryptoKeyVersion used for signing. Check this field to verify that the intended resource was used for signing. NOTE: This field is in Beta.", + "type": "string" + }, "signature": { "description": "The created signature.", "format": "byte", "type": "string" + }, + "signatureCrc32c": { + "description": "Integrity verification field. A CRC32C checksum of the returned AsymmetricSignResponse.signature. An integrity check of AsymmetricSignResponse.signature can be performed by computing the CRC32C checksum of AsymmetricSignResponse.signature and comparing your results to this field. Discard the response in case of non-matching checksum values, and perform a limited number of retries. A persistent mismatch may indicate an issue in your computation of the CRC32C checksum. Note: This field is defined as int64 for reasons of compatibility across different languages. However, it is a non-negative integer, which will never exceed 2^32-1, and can be safely downconverted to uint32 in languages that support this type. NOTE: This field is in Beta.", + "format": "int64", + "type": "string" + }, + "verifiedDigestCrc32c": { + "description": "Integrity verification field. A flag indicating whether AsymmetricSignRequest.digest_crc32c was received by KeyManagementService and used for the integrity verification of the digest. A false value of this field indicates either that AsymmetricSignRequest.digest_crc32c was left unset or that it was not delivered to KeyManagementService. If you've set AsymmetricSignRequest.digest_crc32c but this field is still false, discard the response and perform a limited number of retries. NOTE: This field is in Beta.", + "type": "boolean" } }, "type": "object" }, "AuditConfig": { - "description": "Specifies the audit configuration for a service.\nThe configuration determines which permission types are logged, and what\nidentities, if any, are exempted from logging.\nAn AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service,\nthe union of the two AuditConfigs is used for that service: the log_types\nspecified in each AuditConfig are enabled, and the exempted_members in each\nAuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n {\n \"audit_configs\": [\n {\n \"service\": \"allServices\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n },\n {\n \"log_type\": \"ADMIN_READ\",\n }\n ]\n },\n {\n \"service\": \"sampleservice.googleapis.com\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n },\n {\n \"log_type\": \"DATA_WRITE\",\n \"exempted_members\": [\n \"user:aliya@example.com\"\n ]\n }\n ]\n }\n ]\n }\n\nFor sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ\nlogging. It also exempts jose@example.com from DATA_READ logging, and\naliya@example.com from DATA_WRITE logging.", + "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { \"audit_configs\": [ { \"service\": \"allServices\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" }, { \"log_type\": \"ADMIN_READ\" } ] }, { \"service\": \"sampleservice.googleapis.com\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\" }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging.", "id": "AuditConfig", "properties": { "auditLogConfigs": { @@ -1313,18 +1353,18 @@ "type": "array" }, "service": { - "description": "Specifies a service that will be enabled for audit logging.\nFor example, `storage.googleapis.com`, `cloudsql.googleapis.com`.\n`allServices` is a special value that covers all services.", + "description": "Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.", "type": "string" } }, "type": "object" }, "AuditLogConfig": { - "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\njose@example.com from DATA_READ logging.", + "description": "Provides the configuration for logging a type of permissions. Example: { \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.", "id": "AuditLogConfig", "properties": { "exemptedMembers": { - "description": "Specifies the identities that do not cause logging for this type of\npermission.\nFollows the same format of Binding.members.", + "description": "Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.", "items": { "type": "string" }, @@ -1353,52 +1393,87 @@ "description": "Associates `members` with a `role`.", "id": "Binding", "properties": { + "bindingId": { + "description": "A client-specified ID for this binding. Expected to be globally unique to support the internal bindings-by-ID API.", + "type": "string" + }, "condition": { "$ref": "Expr", - "description": "The condition that is associated with this binding.\n\nIf the condition evaluates to `true`, then this binding applies to the\ncurrent request.\n\nIf the condition evaluates to `false`, then this binding does not apply to\nthe current request. However, a different role binding might grant the same\nrole to one or more of the members in this binding.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies)." + "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the members in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." }, "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@example.com` .\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a user that has been recently deleted. For\n example, `alice@example.com?uid=123456789012345678901`. If the user is\n recovered, this value reverts to `user:{emailid}` and the recovered user\n retains the role in the binding.\n\n* `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus\n unique identifier) representing a service account that has been recently\n deleted. For example,\n `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.\n If the service account is undeleted, this value reverts to\n `serviceAccount:{emailid}` and the undeleted service account retains the\n role in the binding.\n\n* `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a Google group that has been recently\n deleted. For example, `admins@example.com?uid=123456789012345678901`. If\n the group is recovered, this value reverts to `group:{emailid}` and the\n recovered group retains the role in the binding.\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", "items": { "type": "string" }, "type": "array" }, "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.", + "description": "Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.", "type": "string" } }, "type": "object" }, + "CertificateChains": { + "description": "Certificate chains needed to verify the attestation. Certificates in chains are PEM-encoded and are ordered based on https://tools.ietf.org/html/rfc5246#section-7.4.2.", + "id": "CertificateChains", + "properties": { + "caviumCerts": { + "description": "Cavium certificate chain corresponding to the attestation.", + "items": { + "type": "string" + }, + "type": "array" + }, + "googleCardCerts": { + "description": "Google card certificate chain corresponding to the attestation.", + "items": { + "type": "string" + }, + "type": "array" + }, + "googlePartitionCerts": { + "description": "Google partition certificate chain corresponding to the attestation.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "CryptoKey": { - "description": "A CryptoKey represents a logical key that can be used for cryptographic\noperations.\n\nA CryptoKey is made up of zero or more versions,\nwhich represent the actual key material used in cryptographic operations.", + "description": "A CryptoKey represents a logical key that can be used for cryptographic operations. A CryptoKey is made up of zero or more versions, which represent the actual key material used in cryptographic operations.", "id": "CryptoKey", "properties": { "createTime": { "description": "Output only. The time at which this CryptoKey was created.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "labels": { "additionalProperties": { "type": "string" }, - "description": "Labels with user-defined metadata. For more information, see\n[Labeling Keys](/kms/docs/labeling-keys).", + "description": "Labels with user-defined metadata. For more information, see [Labeling Keys](https://cloud.google.com/kms/docs/labeling-keys).", "type": "object" }, "name": { - "description": "Output only. The resource name for this CryptoKey in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*`.", + "description": "Output only. The resource name for this CryptoKey in the format `projects/*/locations/*/keyRings/*/cryptoKeys/*`.", + "readOnly": true, "type": "string" }, "nextRotationTime": { - "description": "At next_rotation_time, the Key Management Service will automatically:\n\n1. Create a new version of this CryptoKey.\n2. Mark the new version as primary.\n\nKey rotations performed manually via\nCreateCryptoKeyVersion and\nUpdateCryptoKeyPrimaryVersion\ndo not affect next_rotation_time.\n\nKeys with purpose\nENCRYPT_DECRYPT support\nautomatic rotation. For other keys, this field must be omitted.", + "description": "At next_rotation_time, the Key Management Service will automatically: 1. Create a new version of this CryptoKey. 2. Mark the new version as primary. Key rotations performed manually via CreateCryptoKeyVersion and UpdateCryptoKeyPrimaryVersion do not affect next_rotation_time. Keys with purpose ENCRYPT_DECRYPT support automatic rotation. For other keys, this field must be omitted.", "format": "google-datetime", "type": "string" }, "primary": { "$ref": "CryptoKeyVersion", - "description": "Output only. A copy of the \"primary\" CryptoKeyVersion that will be used\nby Encrypt when this CryptoKey is given\nin EncryptRequest.name.\n\nThe CryptoKey's primary version can be updated via\nUpdateCryptoKeyPrimaryVersion.\n\nKeys with purpose\nENCRYPT_DECRYPT may have a\nprimary. For other keys, this field will be omitted." + "description": "Output only. A copy of the \"primary\" CryptoKeyVersion that will be used by Encrypt when this CryptoKey is given in EncryptRequest.name. The CryptoKey's primary version can be updated via UpdateCryptoKeyPrimaryVersion. Keys with purpose ENCRYPT_DECRYPT may have a primary. For other keys, this field will be omitted.", + "readOnly": true }, "purpose": { "description": "Immutable. The immutable purpose of this CryptoKey.", @@ -1410,30 +1485,30 @@ ], "enumDescriptions": [ "Not specified.", - "CryptoKeys with this purpose may be used with\nEncrypt and\nDecrypt.", - "CryptoKeys with this purpose may be used with\nAsymmetricSign and\nGetPublicKey.", - "CryptoKeys with this purpose may be used with\nAsymmetricDecrypt and\nGetPublicKey." + "CryptoKeys with this purpose may be used with Encrypt and Decrypt.", + "CryptoKeys with this purpose may be used with AsymmetricSign and GetPublicKey.", + "CryptoKeys with this purpose may be used with AsymmetricDecrypt and GetPublicKey." ], "type": "string" }, "rotationPeriod": { - "description": "next_rotation_time will be advanced by this period when the service\nautomatically rotates a key. Must be at least 24 hours and at most\n876,000 hours.\n\nIf rotation_period is set, next_rotation_time must also be set.\n\nKeys with purpose\nENCRYPT_DECRYPT support\nautomatic rotation. For other keys, this field must be omitted.", + "description": "next_rotation_time will be advanced by this period when the service automatically rotates a key. Must be at least 24 hours and at most 876,000 hours. If rotation_period is set, next_rotation_time must also be set. Keys with purpose ENCRYPT_DECRYPT support automatic rotation. For other keys, this field must be omitted.", "format": "google-duration", "type": "string" }, "versionTemplate": { "$ref": "CryptoKeyVersionTemplate", - "description": "A template describing settings for new CryptoKeyVersion instances.\nThe properties of new CryptoKeyVersion instances created by either\nCreateCryptoKeyVersion or\nauto-rotation are controlled by this template." + "description": "A template describing settings for new CryptoKeyVersion instances. The properties of new CryptoKeyVersion instances created by either CreateCryptoKeyVersion or auto-rotation are controlled by this template." } }, "type": "object" }, "CryptoKeyVersion": { - "description": "A CryptoKeyVersion represents an individual cryptographic key, and the\nassociated key material.\n\nAn ENABLED version can be\nused for cryptographic operations.\n\nFor security reasons, the raw cryptographic key material represented by a\nCryptoKeyVersion can never be viewed or exported. It can only be used to\nencrypt, decrypt, or sign data when an authorized user or application invokes\nCloud KMS.", + "description": "A CryptoKeyVersion represents an individual cryptographic key, and the associated key material. An ENABLED version can be used for cryptographic operations. For security reasons, the raw cryptographic key material represented by a CryptoKeyVersion can never be viewed or exported. It can only be used to encrypt, decrypt, or sign data when an authorized user or application invokes Cloud KMS.", "id": "CryptoKeyVersion", "properties": { "algorithm": { - "description": "Output only. The CryptoKeyVersionAlgorithm that this\nCryptoKeyVersion supports.", + "description": "Output only. The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports.", "enum": [ "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED", "GOOGLE_SYMMETRIC_ENCRYPTION", @@ -1472,55 +1547,65 @@ "ECDSA on the NIST P-384 curve with a SHA384 digest.", "Algorithm representing symmetric encryption by an external key manager." ], + "readOnly": true, "type": "string" }, "attestation": { "$ref": "KeyOperationAttestation", - "description": "Output only. Statement that was generated and signed by the HSM at key\ncreation time. Use this statement to verify attributes of the key as stored\non the HSM, independently of Google. Only provided for key versions with\nprotection_level HSM." + "description": "Output only. Statement that was generated and signed by the HSM at key creation time. Use this statement to verify attributes of the key as stored on the HSM, independently of Google. Only provided for key versions with protection_level HSM.", + "readOnly": true }, "createTime": { "description": "Output only. The time at which this CryptoKeyVersion was created.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "destroyEventTime": { - "description": "Output only. The time this CryptoKeyVersion's key material was\ndestroyed. Only present if state is\nDESTROYED.", + "description": "Output only. The time this CryptoKeyVersion's key material was destroyed. Only present if state is DESTROYED.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "destroyTime": { - "description": "Output only. The time this CryptoKeyVersion's key material is scheduled\nfor destruction. Only present if state is\nDESTROY_SCHEDULED.", + "description": "Output only. The time this CryptoKeyVersion's key material is scheduled for destruction. Only present if state is DESTROY_SCHEDULED.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "externalProtectionLevelOptions": { "$ref": "ExternalProtectionLevelOptions", - "description": "ExternalProtectionLevelOptions stores a group of additional fields for\nconfiguring a CryptoKeyVersion that are specific to the\nEXTERNAL protection level." + "description": "ExternalProtectionLevelOptions stores a group of additional fields for configuring a CryptoKeyVersion that are specific to the EXTERNAL protection level." }, "generateTime": { - "description": "Output only. The time this CryptoKeyVersion's key material was\ngenerated.", + "description": "Output only. The time this CryptoKeyVersion's key material was generated.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "importFailureReason": { - "description": "Output only. The root cause of an import failure. Only present if\nstate is\nIMPORT_FAILED.", + "description": "Output only. The root cause of an import failure. Only present if state is IMPORT_FAILED.", + "readOnly": true, "type": "string" }, "importJob": { - "description": "Output only. The name of the ImportJob used to import this\nCryptoKeyVersion. Only present if the underlying key material was\nimported.", + "description": "Output only. The name of the ImportJob used to import this CryptoKeyVersion. Only present if the underlying key material was imported.", + "readOnly": true, "type": "string" }, "importTime": { - "description": "Output only. The time at which this CryptoKeyVersion's key material\nwas imported.", + "description": "Output only. The time at which this CryptoKeyVersion's key material was imported.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "name": { - "description": "Output only. The resource name for this CryptoKeyVersion in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`.", + "description": "Output only. The resource name for this CryptoKeyVersion in the format `projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`.", + "readOnly": true, "type": "string" }, "protectionLevel": { - "description": "Output only. The ProtectionLevel describing how crypto operations are\nperformed with this CryptoKeyVersion.", + "description": "Output only. The ProtectionLevel describing how crypto operations are performed with this CryptoKeyVersion.", "enum": [ "PROTECTION_LEVEL_UNSPECIFIED", "SOFTWARE", @@ -1533,6 +1618,7 @@ "Crypto operations are performed in a Hardware Security Module.", "Crypto operations are performed by an external key manager." ], + "readOnly": true, "type": "string" }, "state": { @@ -1549,13 +1635,13 @@ ], "enumDescriptions": [ "Not specified.", - "This version is still being generated. It may not be used, enabled,\ndisabled, or destroyed yet. Cloud KMS will automatically mark this\nversion ENABLED as soon as the version is ready.", + "This version is still being generated. It may not be used, enabled, disabled, or destroyed yet. Cloud KMS will automatically mark this version ENABLED as soon as the version is ready.", "This version may be used for cryptographic operations.", - "This version may not be used, but the key material is still available,\nand the version can be placed back into the ENABLED state.", - "This version is destroyed, and the key material is no longer stored.\nA version may not leave this state once entered.", - "This version is scheduled for destruction, and will be destroyed soon.\nCall\nRestoreCryptoKeyVersion\nto put it back into the DISABLED state.", - "This version is still being imported. It may not be used, enabled,\ndisabled, or destroyed yet. Cloud KMS will automatically mark this\nversion ENABLED as soon as the version is ready.", - "This version was not imported successfully. It may not be used, enabled,\ndisabled, or destroyed. The submitted key material has been discarded.\nAdditional details can be found in\nCryptoKeyVersion.import_failure_reason." + "This version may not be used, but the key material is still available, and the version can be placed back into the ENABLED state.", + "This version is destroyed, and the key material is no longer stored. A version may not leave this state once entered.", + "This version is scheduled for destruction, and will be destroyed soon. Call RestoreCryptoKeyVersion to put it back into the DISABLED state.", + "This version is still being imported. It may not be used, enabled, disabled, or destroyed yet. Cloud KMS will automatically mark this version ENABLED as soon as the version is ready.", + "This version was not imported successfully. It may not be used, enabled, disabled, or destroyed. The submitted key material has been discarded. Additional details can be found in CryptoKeyVersion.import_failure_reason." ], "type": "string" } @@ -1563,11 +1649,11 @@ "type": "object" }, "CryptoKeyVersionTemplate": { - "description": "A CryptoKeyVersionTemplate specifies the properties to use when creating\na new CryptoKeyVersion, either manually with\nCreateCryptoKeyVersion or\nautomatically as a result of auto-rotation.", + "description": "A CryptoKeyVersionTemplate specifies the properties to use when creating a new CryptoKeyVersion, either manually with CreateCryptoKeyVersion or automatically as a result of auto-rotation.", "id": "CryptoKeyVersionTemplate", "properties": { "algorithm": { - "description": "Required. Algorithm to use\nwhen creating a CryptoKeyVersion based on this template.\n\nFor backwards compatibility, GOOGLE_SYMMETRIC_ENCRYPTION is implied if both\nthis field is omitted and CryptoKey.purpose is\nENCRYPT_DECRYPT.", + "description": "Required. Algorithm to use when creating a CryptoKeyVersion based on this template. For backwards compatibility, GOOGLE_SYMMETRIC_ENCRYPTION is implied if both this field is omitted and CryptoKey.purpose is ENCRYPT_DECRYPT.", "enum": [ "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED", "GOOGLE_SYMMETRIC_ENCRYPTION", @@ -1609,7 +1695,7 @@ "type": "string" }, "protectionLevel": { - "description": "ProtectionLevel to use when creating a CryptoKeyVersion based on\nthis template. Immutable. Defaults to SOFTWARE.", + "description": "ProtectionLevel to use when creating a CryptoKeyVersion based on this template. Immutable. Defaults to SOFTWARE.", "enum": [ "PROTECTION_LEVEL_UNSPECIFIED", "SOFTWARE", @@ -1632,14 +1718,24 @@ "id": "DecryptRequest", "properties": { "additionalAuthenticatedData": { - "description": "Optional. Optional data that must match the data originally supplied in\nEncryptRequest.additional_authenticated_data.", + "description": "Optional. Optional data that must match the data originally supplied in EncryptRequest.additional_authenticated_data.", "format": "byte", "type": "string" }, + "additionalAuthenticatedDataCrc32c": { + "description": "Optional. An optional CRC32C checksum of the DecryptRequest.additional_authenticated_data. If specified, KeyManagementService will verify the integrity of the received DecryptRequest.additional_authenticated_data using this checksum. KeyManagementService will report an error if the checksum verification fails. If you receive a checksum error, your client should verify that CRC32C(DecryptRequest.additional_authenticated_data) is equal to DecryptRequest.additional_authenticated_data_crc32c, and if so, perform a limited number of retries. A persistent mismatch may indicate an issue in your computation of the CRC32C checksum. Note: This field is defined as int64 for reasons of compatibility across different languages. However, it is a non-negative integer, which will never exceed 2^32-1, and can be safely downconverted to uint32 in languages that support this type. NOTE: This field is in Beta.", + "format": "int64", + "type": "string" + }, "ciphertext": { - "description": "Required. The encrypted data originally returned in\nEncryptResponse.ciphertext.", + "description": "Required. The encrypted data originally returned in EncryptResponse.ciphertext.", "format": "byte", "type": "string" + }, + "ciphertextCrc32c": { + "description": "Optional. An optional CRC32C checksum of the DecryptRequest.ciphertext. If specified, KeyManagementService will verify the integrity of the received DecryptRequest.ciphertext using this checksum. KeyManagementService will report an error if the checksum verification fails. If you receive a checksum error, your client should verify that CRC32C(DecryptRequest.ciphertext) is equal to DecryptRequest.ciphertext_crc32c, and if so, perform a limited number of retries. A persistent mismatch may indicate an issue in your computation of the CRC32C checksum. Note: This field is defined as int64 for reasons of compatibility across different languages. However, it is a non-negative integer, which will never exceed 2^32-1, and can be safely downconverted to uint32 in languages that support this type. NOTE: This field is in Beta.", + "format": "int64", + "type": "string" } }, "type": "object" @@ -1652,6 +1748,11 @@ "description": "The decrypted data originally supplied in EncryptRequest.plaintext.", "format": "byte", "type": "string" + }, + "plaintextCrc32c": { + "description": "Integrity verification field. A CRC32C checksum of the returned DecryptResponse.plaintext. An integrity check of DecryptResponse.plaintext can be performed by computing the CRC32C checksum of DecryptResponse.plaintext and comparing your results to this field. Discard the response in case of non-matching checksum values, and perform a limited number of retries. A persistent mismatch may indicate an issue in your computation of the CRC32C checksum. Note: receiving this response message indicates that KeyManagementService is able to successfully decrypt the ciphertext. Note: This field is defined as int64 for reasons of compatibility across different languages. However, it is a non-negative integer, which will never exceed 2^32-1, and can be safely downconverted to uint32 in languages that support this type. NOTE: This field is in Beta.", + "format": "int64", + "type": "string" } }, "type": "object" @@ -1689,14 +1790,24 @@ "id": "EncryptRequest", "properties": { "additionalAuthenticatedData": { - "description": "Optional. Optional data that, if specified, must also be provided during decryption\nthrough DecryptRequest.additional_authenticated_data.\n\nThe maximum size depends on the key version's\nprotection_level. For\nSOFTWARE keys, the AAD must be no larger than\n64KiB. For HSM keys, the combined length of the\nplaintext and additional_authenticated_data fields must be no larger than\n8KiB.", + "description": "Optional. Optional data that, if specified, must also be provided during decryption through DecryptRequest.additional_authenticated_data. The maximum size depends on the key version's protection_level. For SOFTWARE keys, the AAD must be no larger than 64KiB. For HSM keys, the combined length of the plaintext and additional_authenticated_data fields must be no larger than 8KiB.", "format": "byte", "type": "string" }, + "additionalAuthenticatedDataCrc32c": { + "description": "Optional. An optional CRC32C checksum of the EncryptRequest.additional_authenticated_data. If specified, KeyManagementService will verify the integrity of the received EncryptRequest.additional_authenticated_data using this checksum. KeyManagementService will report an error if the checksum verification fails. If you receive a checksum error, your client should verify that CRC32C(EncryptRequest.additional_authenticated_data) is equal to EncryptRequest.additional_authenticated_data_crc32c, and if so, perform a limited number of retries. A persistent mismatch may indicate an issue in your computation of the CRC32C checksum. Note: This field is defined as int64 for reasons of compatibility across different languages. However, it is a non-negative integer, which will never exceed 2^32-1, and can be safely downconverted to uint32 in languages that support this type. NOTE: This field is in Beta.", + "format": "int64", + "type": "string" + }, "plaintext": { - "description": "Required. The data to encrypt. Must be no larger than 64KiB.\n\nThe maximum size depends on the key version's\nprotection_level. For\nSOFTWARE keys, the plaintext must be no larger\nthan 64KiB. For HSM keys, the combined length of the\nplaintext and additional_authenticated_data fields must be no larger than\n8KiB.", + "description": "Required. The data to encrypt. Must be no larger than 64KiB. The maximum size depends on the key version's protection_level. For SOFTWARE keys, the plaintext must be no larger than 64KiB. For HSM keys, the combined length of the plaintext and additional_authenticated_data fields must be no larger than 8KiB.", "format": "byte", "type": "string" + }, + "plaintextCrc32c": { + "description": "Optional. An optional CRC32C checksum of the EncryptRequest.plaintext. If specified, KeyManagementService will verify the integrity of the received EncryptRequest.plaintext using this checksum. KeyManagementService will report an error if the checksum verification fails. If you receive a checksum error, your client should verify that CRC32C(EncryptRequest.plaintext) is equal to EncryptRequest.plaintext_crc32c, and if so, perform a limited number of retries. A persistent mismatch may indicate an issue in your computation of the CRC32C checksum. Note: This field is defined as int64 for reasons of compatibility across different languages. However, it is a non-negative integer, which will never exceed 2^32-1, and can be safely downconverted to uint32 in languages that support this type. NOTE: This field is in Beta.", + "format": "int64", + "type": "string" } }, "type": "object" @@ -1710,38 +1821,51 @@ "format": "byte", "type": "string" }, + "ciphertextCrc32c": { + "description": "Integrity verification field. A CRC32C checksum of the returned EncryptResponse.ciphertext. An integrity check of EncryptResponse.ciphertext can be performed by computing the CRC32C checksum of EncryptResponse.ciphertext and comparing your results to this field. Discard the response in case of non-matching checksum values, and perform a limited number of retries. A persistent mismatch may indicate an issue in your computation of the CRC32C checksum. Note: This field is defined as int64 for reasons of compatibility across different languages. However, it is a non-negative integer, which will never exceed 2^32-1, and can be safely downconverted to uint32 in languages that support this type. NOTE: This field is in Beta.", + "format": "int64", + "type": "string" + }, "name": { - "description": "The resource name of the CryptoKeyVersion used in encryption. Check\nthis field to verify that the intended resource was used for encryption.", + "description": "The resource name of the CryptoKeyVersion used in encryption. Check this field to verify that the intended resource was used for encryption.", "type": "string" + }, + "verifiedAdditionalAuthenticatedDataCrc32c": { + "description": "Integrity verification field. A flag indicating whether EncryptRequest.additional_authenticated_data_crc32c was received by KeyManagementService and used for the integrity verification of the AAD. A false value of this field indicates either that EncryptRequest.additional_authenticated_data_crc32c was left unset or that it was not delivered to KeyManagementService. If you've set EncryptRequest.additional_authenticated_data_crc32c but this field is still false, discard the response and perform a limited number of retries. NOTE: This field is in Beta.", + "type": "boolean" + }, + "verifiedPlaintextCrc32c": { + "description": "Integrity verification field. A flag indicating whether EncryptRequest.plaintext_crc32c was received by KeyManagementService and used for the integrity verification of the plaintext. A false value of this field indicates either that EncryptRequest.plaintext_crc32c was left unset or that it was not delivered to KeyManagementService. If you've set EncryptRequest.plaintext_crc32c but this field is still false, discard the response and perform a limited number of retries. NOTE: This field is in Beta.", + "type": "boolean" } }, "type": "object" }, "Expr": { - "description": "Represents a textual expression in the Common Expression Language (CEL)\nsyntax. CEL is a C-like expression language. The syntax and semantics of CEL\nare documented at https://github.com/google/cel-spec.\n\nExample (Comparison):\n\n title: \"Summary size limit\"\n description: \"Determines if a summary is less than 100 chars\"\n expression: \"document.summary.size() \u003c 100\"\n\nExample (Equality):\n\n title: \"Requestor is owner\"\n description: \"Determines if requestor is the document owner\"\n expression: \"document.owner == request.auth.claims.email\"\n\nExample (Logic):\n\n title: \"Public documents\"\n description: \"Determine whether the document should be publicly visible\"\n expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\"\n\nExample (Data Manipulation):\n\n title: \"Notification string\"\n description: \"Create a notification string with a timestamp.\"\n expression: \"'New message received at ' + string(document.create_time)\"\n\nThe exact variables and functions that may be referenced within an expression\nare determined by the service that evaluates it. See the service\ndocumentation for additional information.", + "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() \u003c 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", "id": "Expr", "properties": { "description": { - "description": "Optional. Description of the expression. This is a longer text which\ndescribes the expression, e.g. when hovered over it in a UI.", + "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", "type": "string" }, "expression": { - "description": "Textual representation of an expression in Common Expression Language\nsyntax.", + "description": "Textual representation of an expression in Common Expression Language syntax.", "type": "string" }, "location": { - "description": "Optional. String indicating the location of the expression for error\nreporting, e.g. a file name and a position in the file.", + "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", "type": "string" }, "title": { - "description": "Optional. Title for the expression, i.e. a short string describing\nits purpose. This can be used e.g. in UIs which allow to enter the\nexpression.", + "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", "type": "string" } }, "type": "object" }, "ExternalProtectionLevelOptions": { - "description": "ExternalProtectionLevelOptions stores a group of additional fields for\nconfiguring a CryptoKeyVersion that are specific to the\nEXTERNAL protection level.", + "description": "ExternalProtectionLevelOptions stores a group of additional fields for configuring a CryptoKeyVersion that are specific to the EXTERNAL protection level.", "id": "ExternalProtectionLevelOptions", "properties": { "externalKeyUri": { @@ -1756,7 +1880,7 @@ "id": "ImportCryptoKeyVersionRequest", "properties": { "algorithm": { - "description": "Required. The algorithm of\nthe key being imported. This does not need to match the\nversion_template of the CryptoKey this\nversion imports into.", + "description": "Required. The algorithm of the key being imported. This does not need to match the version_template of the CryptoKey this version imports into.", "enum": [ "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED", "GOOGLE_SYMMETRIC_ENCRYPTION", @@ -1798,11 +1922,11 @@ "type": "string" }, "importJob": { - "description": "Required. The name of the ImportJob that was used to\nwrap this key material.", + "description": "Required. The name of the ImportJob that was used to wrap this key material.", "type": "string" }, "rsaAesWrappedKey": { - "description": "Wrapped key material produced with\nRSA_OAEP_3072_SHA1_AES_256\nor\nRSA_OAEP_4096_SHA1_AES_256.\n\nThis field contains the concatenation of two wrapped keys:\n\u003col\u003e\n \u003cli\u003eAn ephemeral AES-256 wrapping key wrapped with the\n public_key using RSAES-OAEP with SHA-1,\n MGF1 with SHA-1, and an empty label.\n \u003c/li\u003e\n \u003cli\u003eThe key to be imported, wrapped with the ephemeral AES-256 key\n using AES-KWP (RFC 5649).\n \u003c/li\u003e\n\u003c/ol\u003e\n\nIf importing symmetric key material, it is expected that the unwrapped\nkey contains plain bytes. If importing asymmetric key material, it is\nexpected that the unwrapped key is in PKCS#8-encoded DER format (the\nPrivateKeyInfo structure from RFC 5208).\n\nThis format is the same as the format produced by PKCS#11 mechanism\nCKM_RSA_AES_KEY_WRAP.", + "description": "Wrapped key material produced with RSA_OAEP_3072_SHA1_AES_256 or RSA_OAEP_4096_SHA1_AES_256. This field contains the concatenation of two wrapped keys: 1. An ephemeral AES-256 wrapping key wrapped with the public_key using RSAES-OAEP with SHA-1, MGF1 with SHA-1, and an empty label. 2. The key to be imported, wrapped with the ephemeral AES-256 key using AES-KWP (RFC 5649). If importing symmetric key material, it is expected that the unwrapped key contains plain bytes. If importing asymmetric key material, it is expected that the unwrapped key is in PKCS#8-encoded DER format (the PrivateKeyInfo structure from RFC 5208). This format is the same as the format produced by PKCS#11 mechanism CKM_RSA_AES_KEY_WRAP.", "format": "byte", "type": "string" } @@ -1810,31 +1934,36 @@ "type": "object" }, "ImportJob": { - "description": "An ImportJob can be used to create CryptoKeys and\nCryptoKeyVersions using pre-existing key material,\ngenerated outside of Cloud KMS.\n\nWhen an ImportJob is created, Cloud KMS will generate a \"wrapping key\",\nwhich is a public/private key pair. You use the wrapping key to encrypt (also\nknown as wrap) the pre-existing key material to protect it during the import\nprocess. The nature of the wrapping key depends on the choice of\nimport_method. When the wrapping key generation\nis complete, the state will be set to\nACTIVE and the public_key\ncan be fetched. The fetched public key can then be used to wrap your\npre-existing key material.\n\nOnce the key material is wrapped, it can be imported into a new\nCryptoKeyVersion in an existing CryptoKey by calling\nImportCryptoKeyVersion.\nMultiple CryptoKeyVersions can be imported with a single\nImportJob. Cloud KMS uses the private key portion of the wrapping key to\nunwrap the key material. Only Cloud KMS has access to the private key.\n\nAn ImportJob expires 3 days after it is created. Once expired, Cloud KMS\nwill no longer be able to import or unwrap any key material that was wrapped\nwith the ImportJob's public key.\n\nFor more information, see\n[Importing a key](https://cloud.google.com/kms/docs/importing-a-key).", + "description": "An ImportJob can be used to create CryptoKeys and CryptoKeyVersions using pre-existing key material, generated outside of Cloud KMS. When an ImportJob is created, Cloud KMS will generate a \"wrapping key\", which is a public/private key pair. You use the wrapping key to encrypt (also known as wrap) the pre-existing key material to protect it during the import process. The nature of the wrapping key depends on the choice of import_method. When the wrapping key generation is complete, the state will be set to ACTIVE and the public_key can be fetched. The fetched public key can then be used to wrap your pre-existing key material. Once the key material is wrapped, it can be imported into a new CryptoKeyVersion in an existing CryptoKey by calling ImportCryptoKeyVersion. Multiple CryptoKeyVersions can be imported with a single ImportJob. Cloud KMS uses the private key portion of the wrapping key to unwrap the key material. Only Cloud KMS has access to the private key. An ImportJob expires 3 days after it is created. Once expired, Cloud KMS will no longer be able to import or unwrap any key material that was wrapped with the ImportJob's public key. For more information, see [Importing a key](https://cloud.google.com/kms/docs/importing-a-key).", "id": "ImportJob", "properties": { "attestation": { "$ref": "KeyOperationAttestation", - "description": "Output only. Statement that was generated and signed by the key creator\n(for example, an HSM) at key creation time. Use this statement to verify\nattributes of the key as stored on the HSM, independently of Google.\nOnly present if the chosen ImportMethod is one with a protection\nlevel of HSM." + "description": "Output only. Statement that was generated and signed by the key creator (for example, an HSM) at key creation time. Use this statement to verify attributes of the key as stored on the HSM, independently of Google. Only present if the chosen ImportMethod is one with a protection level of HSM.", + "readOnly": true }, "createTime": { "description": "Output only. The time at which this ImportJob was created.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "expireEventTime": { - "description": "Output only. The time this ImportJob expired. Only present if\nstate is EXPIRED.", + "description": "Output only. The time this ImportJob expired. Only present if state is EXPIRED.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "expireTime": { - "description": "Output only. The time at which this ImportJob is scheduled for\nexpiration and can no longer be used to import key material.", + "description": "Output only. The time at which this ImportJob is scheduled for expiration and can no longer be used to import key material.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "generateTime": { "description": "Output only. The time this ImportJob's key material was generated.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "importMethod": { @@ -1846,17 +1975,18 @@ ], "enumDescriptions": [ "Not specified.", - "This ImportMethod represents the CKM_RSA_AES_KEY_WRAP key wrapping\nscheme defined in the PKCS #11 standard. In summary, this involves\nwrapping the raw key with an ephemeral AES key, and wrapping the\nephemeral AES key with a 3072 bit RSA key. For more details, see\n[RSA AES key wrap\nmechanism](http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cos01/pkcs11-curr-v2.40-cos01.html#_Toc408226908).", - "This ImportMethod represents the CKM_RSA_AES_KEY_WRAP key wrapping\nscheme defined in the PKCS #11 standard. In summary, this involves\nwrapping the raw key with an ephemeral AES key, and wrapping the\nephemeral AES key with a 4096 bit RSA key. For more details, see\n[RSA AES key wrap\nmechanism](http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cos01/pkcs11-curr-v2.40-cos01.html#_Toc408226908)." + "This ImportMethod represents the CKM_RSA_AES_KEY_WRAP key wrapping scheme defined in the PKCS #11 standard. In summary, this involves wrapping the raw key with an ephemeral AES key, and wrapping the ephemeral AES key with a 3072 bit RSA key. For more details, see [RSA AES key wrap mechanism](http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cos01/pkcs11-curr-v2.40-cos01.html#_Toc408226908).", + "This ImportMethod represents the CKM_RSA_AES_KEY_WRAP key wrapping scheme defined in the PKCS #11 standard. In summary, this involves wrapping the raw key with an ephemeral AES key, and wrapping the ephemeral AES key with a 4096 bit RSA key. For more details, see [RSA AES key wrap mechanism](http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cos01/pkcs11-curr-v2.40-cos01.html#_Toc408226908)." ], "type": "string" }, "name": { - "description": "Output only. The resource name for this ImportJob in the format\n`projects/*/locations/*/keyRings/*/importJobs/*`.", + "description": "Output only. The resource name for this ImportJob in the format `projects/*/locations/*/keyRings/*/importJobs/*`.", + "readOnly": true, "type": "string" }, "protectionLevel": { - "description": "Required. Immutable. The protection level of the ImportJob. This must match the\nprotection_level of the\nversion_template on the CryptoKey you\nattempt to import into.", + "description": "Required. Immutable. The protection level of the ImportJob. This must match the protection_level of the version_template on the CryptoKey you attempt to import into.", "enum": [ "PROTECTION_LEVEL_UNSPECIFIED", "SOFTWARE", @@ -1873,10 +2003,11 @@ }, "publicKey": { "$ref": "WrappingPublicKey", - "description": "Output only. The public key with which to wrap key material prior to\nimport. Only returned if state is\nACTIVE." + "description": "Output only. The public key with which to wrap key material prior to import. Only returned if state is ACTIVE.", + "readOnly": true }, "state": { - "description": "Output only. The current state of the ImportJob, indicating if it can\nbe used.", + "description": "Output only. The current state of the ImportJob, indicating if it can be used.", "enum": [ "IMPORT_JOB_STATE_UNSPECIFIED", "PENDING_GENERATION", @@ -1885,22 +2016,29 @@ ], "enumDescriptions": [ "Not specified.", - "The wrapping key for this job is still being generated. It may not be\nused. Cloud KMS will automatically mark this job as\nACTIVE as soon as the wrapping key is generated.", - "This job may be used in\nCreateCryptoKey and\nCreateCryptoKeyVersion\nrequests.", + "The wrapping key for this job is still being generated. It may not be used. Cloud KMS will automatically mark this job as ACTIVE as soon as the wrapping key is generated.", + "This job may be used in CreateCryptoKey and CreateCryptoKeyVersion requests.", "This job can no longer be used and may not leave this state once entered." ], + "readOnly": true, "type": "string" } }, "type": "object" }, "KeyOperationAttestation": { - "description": "Contains an HSM-generated attestation about a key operation. For more\ninformation, see [Verifying attestations]\n(https://cloud.google.com/kms/docs/attest-key).", + "description": "Contains an HSM-generated attestation about a key operation. For more information, see [Verifying attestations] (https://cloud.google.com/kms/docs/attest-key).", "id": "KeyOperationAttestation", "properties": { + "certChains": { + "$ref": "CertificateChains", + "description": "Output only. The certificate chains needed to validate the attestation", + "readOnly": true + }, "content": { - "description": "Output only. The attestation data provided by the HSM when the key\noperation was performed.", + "description": "Output only. The attestation data provided by the HSM when the key operation was performed.", "format": "byte", + "readOnly": true, "type": "string" }, "format": { @@ -1912,9 +2050,10 @@ ], "enumDescriptions": [ "Not specified.", - "Cavium HSM attestation compressed with gzip. Note that this format is\ndefined by Cavium and subject to change at any time.", - "Cavium HSM attestation V2 compressed with gzip. This is a new format\nintroduced in Cavium's version 3.2-08." + "Cavium HSM attestation compressed with gzip. Note that this format is defined by Cavium and subject to change at any time.", + "Cavium HSM attestation V2 compressed with gzip. This is a new format introduced in Cavium's version 3.2-08." ], + "readOnly": true, "type": "string" } }, @@ -1927,10 +2066,12 @@ "createTime": { "description": "Output only. The time at which this KeyRing was created.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "name": { - "description": "Output only. The resource name for the KeyRing in the format\n`projects/*/locations/*/keyRings/*`.", + "description": "Output only. The resource name for the KeyRing in the format `projects/*/locations/*/keyRings/*`.", + "readOnly": true, "type": "string" } }, @@ -1948,11 +2089,11 @@ "type": "array" }, "nextPageToken": { - "description": "A token to retrieve next page of results. Pass this value in\nListCryptoKeyVersionsRequest.page_token to retrieve the next page of\nresults.", + "description": "A token to retrieve next page of results. Pass this value in ListCryptoKeyVersionsRequest.page_token to retrieve the next page of results.", "type": "string" }, "totalSize": { - "description": "The total number of CryptoKeyVersions that matched the\nquery.", + "description": "The total number of CryptoKeyVersions that matched the query.", "format": "int32", "type": "integer" } @@ -1971,7 +2112,7 @@ "type": "array" }, "nextPageToken": { - "description": "A token to retrieve next page of results. Pass this value in\nListCryptoKeysRequest.page_token to retrieve the next page of results.", + "description": "A token to retrieve next page of results. Pass this value in ListCryptoKeysRequest.page_token to retrieve the next page of results.", "type": "string" }, "totalSize": { @@ -1994,7 +2135,7 @@ "type": "array" }, "nextPageToken": { - "description": "A token to retrieve next page of results. Pass this value in\nListImportJobsRequest.page_token to retrieve the next page of results.", + "description": "A token to retrieve next page of results. Pass this value in ListImportJobsRequest.page_token to retrieve the next page of results.", "type": "string" }, "totalSize": { @@ -2017,7 +2158,7 @@ "type": "array" }, "nextPageToken": { - "description": "A token to retrieve next page of results. Pass this value in\nListKeyRingsRequest.page_token to retrieve the next page of results.", + "description": "A token to retrieve next page of results. Pass this value in ListKeyRingsRequest.page_token to retrieve the next page of results.", "type": "string" }, "totalSize": { @@ -2051,14 +2192,14 @@ "id": "Location", "properties": { "displayName": { - "description": "The friendly name for this location, typically a nearby city name.\nFor example, \"Tokyo\".", + "description": "The friendly name for this location, typically a nearby city name. For example, \"Tokyo\".", "type": "string" }, "labels": { "additionalProperties": { "type": "string" }, - "description": "Cross-service attributes for the location. For example\n\n {\"cloud.googleapis.com/region\": \"us-east1\"}", + "description": "Cross-service attributes for the location. For example {\"cloud.googleapis.com/region\": \"us-east1\"}", "type": "object" }, "locationId": { @@ -2070,11 +2211,11 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "Service-specific metadata. For example the available capacity at the given\nlocation.", + "description": "Service-specific metadata. For example the available capacity at the given location.", "type": "object" }, "name": { - "description": "Resource name for the location, which may vary between implementations.\nFor example: `\"projects/example-project/locations/us-east1\"`", + "description": "Resource name for the location, which may vary between implementations. For example: `\"projects/example-project/locations/us-east1\"`", "type": "string" } }, @@ -2085,18 +2226,18 @@ "id": "LocationMetadata", "properties": { "ekmAvailable": { - "description": "Indicates whether CryptoKeys with\nprotection_level\nEXTERNAL can be created in this location.", + "description": "Indicates whether CryptoKeys with protection_level EXTERNAL can be created in this location.", "type": "boolean" }, "hsmAvailable": { - "description": "Indicates whether CryptoKeys with\nprotection_level\nHSM can be created in this location.", + "description": "Indicates whether CryptoKeys with protection_level HSM can be created in this location.", "type": "boolean" } }, "type": "object" }, "Policy": { - "description": "An Identity and Access Management (IAM) policy, which specifies access\ncontrols for Google Cloud resources.\n\n\nA `Policy` is a collection of `bindings`. A `binding` binds one or more\n`members` to a single `role`. Members can be user accounts, service accounts,\nGoogle groups, and domains (such as G Suite). A `role` is a named list of\npermissions; each `role` can be an IAM predefined role or a user-created\ncustom role.\n\nFor some types of Google Cloud resources, a `binding` can also specify a\n`condition`, which is a logical expression that allows access to a resource\nonly if the expression evaluates to `true`. A condition can add constraints\nbased on attributes of the request, the resource, or both. To learn which\nresources support conditions in their IAM policies, see the\n[IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).\n\n**JSON example:**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/resourcemanager.organizationAdmin\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-project-id@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles/resourcemanager.organizationViewer\",\n \"members\": [\n \"user:eve@example.com\"\n ],\n \"condition\": {\n \"title\": \"expirable access\",\n \"description\": \"Does not grant access after Sep 2020\",\n \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\",\n }\n }\n ],\n \"etag\": \"BwWWja0YfJA=\",\n \"version\": 3\n }\n\n**YAML example:**\n\n bindings:\n - members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-project-id@appspot.gserviceaccount.com\n role: roles/resourcemanager.organizationAdmin\n - members:\n - user:eve@example.com\n role: roles/resourcemanager.organizationViewer\n condition:\n title: expirable access\n description: Does not grant access after Sep 2020\n expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\n - etag: BwWWja0YfJA=\n - version: 3\n\nFor a description of IAM and its features, see the\n[IAM documentation](https://cloud.google.com/iam/docs/).", + "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } **YAML example:** bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", "id": "Policy", "properties": { "auditConfigs": { @@ -2107,19 +2248,19 @@ "type": "array" }, "bindings": { - "description": "Associates a list of `members` to a `role`. Optionally, may specify a\n`condition` that determines how and when the `bindings` are applied. Each\nof the `bindings` must contain at least one member.", + "description": "Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member.", "items": { "$ref": "Binding" }, "type": "array" }, "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.", + "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.", "format": "byte", "type": "string" }, "version": { - "description": "Specifies the format of the policy.\n\nValid values are `0`, `1`, and `3`. Requests that specify an invalid value\nare rejected.\n\nAny operation that affects conditional role bindings must specify version\n`3`. This requirement applies to the following operations:\n\n* Getting a policy that includes a conditional role binding\n* Adding a conditional role binding to a policy\n* Changing a conditional role binding in a policy\n* Removing any role binding, with or without a condition, from a policy\n that includes conditions\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.\n\nIf a policy does not include any conditions, operations on that policy may\nspecify any valid version or leave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } @@ -2127,11 +2268,11 @@ "type": "object" }, "PublicKey": { - "description": "The public key for a given CryptoKeyVersion. Obtained via\nGetPublicKey.", + "description": "The public key for a given CryptoKeyVersion. Obtained via GetPublicKey.", "id": "PublicKey", "properties": { "algorithm": { - "description": "The Algorithm associated\nwith this key.", + "description": "The Algorithm associated with this key.", "enum": [ "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED", "GOOGLE_SYMMETRIC_ENCRYPTION", @@ -2172,8 +2313,17 @@ ], "type": "string" }, + "name": { + "description": "The name of the CryptoKeyVersion public key. Provided here for verification. NOTE: This field is in Beta.", + "type": "string" + }, "pem": { - "description": "The public key, encoded in PEM format. For more information, see the\n[RFC 7468](https://tools.ietf.org/html/rfc7468) sections for\n[General Considerations](https://tools.ietf.org/html/rfc7468#section-2) and\n[Textual Encoding of Subject Public Key Info]\n(https://tools.ietf.org/html/rfc7468#section-13).", + "description": "The public key, encoded in PEM format. For more information, see the [RFC 7468](https://tools.ietf.org/html/rfc7468) sections for [General Considerations](https://tools.ietf.org/html/rfc7468#section-2) and [Textual Encoding of Subject Public Key Info] (https://tools.ietf.org/html/rfc7468#section-13).", + "type": "string" + }, + "pemCrc32c": { + "description": "Integrity verification field. A CRC32C checksum of the returned PublicKey.pem. An integrity check of PublicKey.pem can be performed by computing the CRC32C checksum of PublicKey.pem and comparing your results to this field. Discard the response in case of non-matching checksum values, and perform a limited number of retries. A persistent mismatch may indicate an issue in your computation of the CRC32C checksum. Note: This field is defined as int64 for reasons of compatibility across different languages. However, it is a non-negative integer, which will never exceed 2^32-1, and can be safely downconverted to uint32 in languages that support this type. NOTE: This field is in Beta.", + "format": "int64", "type": "string" } }, @@ -2191,10 +2341,10 @@ "properties": { "policy": { "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them." }, "updateMask": { - "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only\nthe fields in the mask will be modified. If no mask is provided, the\nfollowing default mask is used:\n\n`paths: \"bindings, etag\"`", + "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only the fields in the mask will be modified. If no mask is provided, the following default mask is used: `paths: \"bindings, etag\"`", "format": "google-fieldmask", "type": "string" } @@ -2206,7 +2356,7 @@ "id": "TestIamPermissionsRequest", "properties": { "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "description": "The set of permissions to check for the `resource`. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", "items": { "type": "string" }, @@ -2220,7 +2370,7 @@ "id": "TestIamPermissionsResponse", "properties": { "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", "items": { "type": "string" }, @@ -2241,11 +2391,11 @@ "type": "object" }, "WrappingPublicKey": { - "description": "The public key component of the wrapping key. For details of the type of\nkey this public key corresponds to, see the ImportMethod.", + "description": "The public key component of the wrapping key. For details of the type of key this public key corresponds to, see the ImportMethod.", "id": "WrappingPublicKey", "properties": { "pem": { - "description": "The public key, encoded in PEM format. For more information, see the [RFC\n7468](https://tools.ietf.org/html/rfc7468) sections for [General\nConsiderations](https://tools.ietf.org/html/rfc7468#section-2) and\n[Textual Encoding of Subject Public Key Info]\n(https://tools.ietf.org/html/rfc7468#section-13).", + "description": "The public key, encoded in PEM format. For more information, see the [RFC 7468](https://tools.ietf.org/html/rfc7468) sections for [General Considerations](https://tools.ietf.org/html/rfc7468#section-2) and [Textual Encoding of Subject Public Key Info] (https://tools.ietf.org/html/rfc7468#section-13).", "type": "string" } }, diff --git a/vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go b/vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go index 34b6def1062..0989412215a 100644 --- a/vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go +++ b/vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go @@ -81,6 +81,7 @@ const apiId = "cloudkms:v1" const apiName = "cloudkms" const apiVersion = "v1" const basePath = "https://cloudkms.googleapis.com/" +const mtlsBasePath = "https://cloudkms.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -101,6 +102,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -217,10 +219,25 @@ type ProjectsLocationsKeyRingsImportJobsService struct { // KeyManagementService.AsymmetricDecrypt. type AsymmetricDecryptRequest struct { // Ciphertext: Required. The data encrypted with the named - // CryptoKeyVersion's public - // key using OAEP. + // CryptoKeyVersion's public key using OAEP. Ciphertext string `json:"ciphertext,omitempty"` + // CiphertextCrc32c: Optional. An optional CRC32C checksum of the + // AsymmetricDecryptRequest.ciphertext. If specified, + // KeyManagementService will verify the integrity of the received + // AsymmetricDecryptRequest.ciphertext using this checksum. + // KeyManagementService will report an error if the checksum + // verification fails. If you receive a checksum error, your client + // should verify that CRC32C(AsymmetricDecryptRequest.ciphertext) is + // equal to AsymmetricDecryptRequest.ciphertext_crc32c, and if so, + // perform a limited number of retries. A persistent mismatch may + // indicate an issue in your computation of the CRC32C checksum. Note: + // This field is defined as int64 for reasons of compatibility across + // different languages. However, it is a non-negative integer, which + // will never exceed 2^32-1, and can be safely downconverted to uint32 + // in languages that support this type. NOTE: This field is in Beta. + CiphertextCrc32c int64 `json:"ciphertextCrc32c,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "Ciphertext") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -251,6 +268,31 @@ type AsymmetricDecryptResponse struct { // public key. Plaintext string `json:"plaintext,omitempty"` + // PlaintextCrc32c: Integrity verification field. A CRC32C checksum of + // the returned AsymmetricDecryptResponse.plaintext. An integrity check + // of AsymmetricDecryptResponse.plaintext can be performed by computing + // the CRC32C checksum of AsymmetricDecryptResponse.plaintext and + // comparing your results to this field. Discard the response in case of + // non-matching checksum values, and perform a limited number of + // retries. A persistent mismatch may indicate an issue in your + // computation of the CRC32C checksum. Note: This field is defined as + // int64 for reasons of compatibility across different languages. + // However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that + // support this type. NOTE: This field is in Beta. + PlaintextCrc32c int64 `json:"plaintextCrc32c,omitempty,string"` + + // VerifiedCiphertextCrc32c: Integrity verification field. A flag + // indicating whether AsymmetricDecryptRequest.ciphertext_crc32c was + // received by KeyManagementService and used for the integrity + // verification of the ciphertext. A false value of this field indicates + // either that AsymmetricDecryptRequest.ciphertext_crc32c was left unset + // or that it was not delivered to KeyManagementService. If you've set + // AsymmetricDecryptRequest.ciphertext_crc32c but this field is still + // false, discard the response and perform a limited number of retries. + // NOTE: This field is in Beta. + VerifiedCiphertextCrc32c bool `json:"verifiedCiphertextCrc32c,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -282,12 +324,25 @@ func (s *AsymmetricDecryptResponse) MarshalJSON() ([]byte, error) { // KeyManagementService.AsymmetricSign. type AsymmetricSignRequest struct { // Digest: Required. The digest of the data to sign. The digest must be - // produced with - // the same digest algorithm as specified by the key - // version's - // algorithm. + // produced with the same digest algorithm as specified by the key + // version's algorithm. Digest *Digest `json:"digest,omitempty"` + // DigestCrc32c: Optional. An optional CRC32C checksum of the + // AsymmetricSignRequest.digest. If specified, KeyManagementService will + // verify the integrity of the received AsymmetricSignRequest.digest + // using this checksum. KeyManagementService will report an error if the + // checksum verification fails. If you receive a checksum error, your + // client should verify that CRC32C(AsymmetricSignRequest.digest) is + // equal to AsymmetricSignRequest.digest_crc32c, and if so, perform a + // limited number of retries. A persistent mismatch may indicate an + // issue in your computation of the CRC32C checksum. Note: This field is + // defined as int64 for reasons of compatibility across different + // languages. However, it is a non-negative integer, which will never + // exceed 2^32-1, and can be safely downconverted to uint32 in languages + // that support this type. NOTE: This field is in Beta. + DigestCrc32c int64 `json:"digestCrc32c,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "Digest") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -314,14 +369,44 @@ func (s *AsymmetricSignRequest) MarshalJSON() ([]byte, error) { // AsymmetricSignResponse: Response message for // KeyManagementService.AsymmetricSign. type AsymmetricSignResponse struct { + // Name: The resource name of the CryptoKeyVersion used for signing. + // Check this field to verify that the intended resource was used for + // signing. NOTE: This field is in Beta. + Name string `json:"name,omitempty"` + // Signature: The created signature. Signature string `json:"signature,omitempty"` + // SignatureCrc32c: Integrity verification field. A CRC32C checksum of + // the returned AsymmetricSignResponse.signature. An integrity check of + // AsymmetricSignResponse.signature can be performed by computing the + // CRC32C checksum of AsymmetricSignResponse.signature and comparing + // your results to this field. Discard the response in case of + // non-matching checksum values, and perform a limited number of + // retries. A persistent mismatch may indicate an issue in your + // computation of the CRC32C checksum. Note: This field is defined as + // int64 for reasons of compatibility across different languages. + // However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that + // support this type. NOTE: This field is in Beta. + SignatureCrc32c int64 `json:"signatureCrc32c,omitempty,string"` + + // VerifiedDigestCrc32c: Integrity verification field. A flag indicating + // whether AsymmetricSignRequest.digest_crc32c was received by + // KeyManagementService and used for the integrity verification of the + // digest. A false value of this field indicates either that + // AsymmetricSignRequest.digest_crc32c was left unset or that it was not + // delivered to KeyManagementService. If you've set + // AsymmetricSignRequest.digest_crc32c but this field is still false, + // discard the response and perform a limited number of retries. NOTE: + // This field is in Beta. + VerifiedDigestCrc32c bool `json:"verifiedDigestCrc32c,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Signature") to + // ForceSendFields is a list of field names (e.g. "Name") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -329,8 +414,8 @@ type AsymmetricSignResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Signature") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -344,72 +429,31 @@ func (s *AsymmetricSignResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// AuditConfig: Specifies the audit configuration for a service. -// The configuration determines which permission types are logged, and -// what -// identities, if any, are exempted from logging. -// An AuditConfig must have one or more AuditLogConfigs. -// -// If there are AuditConfigs for both `allServices` and a specific -// service, -// the union of the two AuditConfigs is used for that service: the -// log_types -// specified in each AuditConfig are enabled, and the exempted_members -// in each -// AuditLogConfig are exempted. -// -// Example Policy with multiple AuditConfigs: -// -// { -// "audit_configs": [ -// { -// "service": "allServices" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// }, -// { -// "log_type": "ADMIN_READ", -// } -// ] -// }, -// { -// "service": "sampleservice.googleapis.com" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// }, -// { -// "log_type": "DATA_WRITE", -// "exempted_members": [ -// "user:aliya@example.com" -// ] -// } -// ] -// } -// ] -// } -// -// For sampleservice, this policy enables DATA_READ, DATA_WRITE and -// ADMIN_READ -// logging. It also exempts jose@example.com from DATA_READ logging, -// and -// aliya@example.com from DATA_WRITE logging. +// AuditConfig: Specifies the audit configuration for a service. The +// configuration determines which permission types are logged, and what +// identities, if any, are exempted from logging. An AuditConfig must +// have one or more AuditLogConfigs. If there are AuditConfigs for both +// `allServices` and a specific service, the union of the two +// AuditConfigs is used for that service: the log_types specified in +// each AuditConfig are enabled, and the exempted_members in each +// AuditLogConfig are exempted. Example Policy with multiple +// AuditConfigs: { "audit_configs": [ { "service": "allServices", +// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": +// [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { +// "log_type": "ADMIN_READ" } ] }, { "service": +// "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": +// "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ +// "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy +// enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts +// jose@example.com from DATA_READ logging, and aliya@example.com from +// DATA_WRITE logging. type AuditConfig struct { // AuditLogConfigs: The configuration for logging of each type of // permission. AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"` - // Service: Specifies a service that will be enabled for audit - // logging. - // For example, `storage.googleapis.com`, - // `cloudsql.googleapis.com`. + // Service: Specifies a service that will be enabled for audit logging. + // For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. // `allServices` is a special value that covers all services. Service string `json:"service,omitempty"` @@ -438,31 +482,15 @@ func (s *AuditConfig) MarshalJSON() ([]byte, error) { } // AuditLogConfig: Provides the configuration for logging a type of -// permissions. -// Example: -// -// { -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// } -// ] -// } -// -// This enables 'DATA_READ' and 'DATA_WRITE' logging, while -// exempting -// jose@example.com from DATA_READ logging. +// permissions. Example: { "audit_log_configs": [ { "log_type": +// "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { +// "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and +// 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ +// logging. type AuditLogConfig struct { // ExemptedMembers: Specifies the identities that do not cause logging - // for this type of - // permission. - // Follows the same format of Binding.members. + // for this type of permission. Follows the same format of + // Binding.members. ExemptedMembers []string `json:"exemptedMembers,omitempty"` // LogType: The log type that this config enables. @@ -500,98 +528,60 @@ func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { // Binding: Associates `members` with a `role`. type Binding struct { - // Condition: The condition that is associated with this binding. - // - // If the condition evaluates to `true`, then this binding applies to - // the - // current request. - // - // If the condition evaluates to `false`, then this binding does not - // apply to - // the current request. However, a different role binding might grant - // the same - // role to one or more of the members in this binding. - // - // To learn which resources support conditions in their IAM policies, - // see - // the - // [IAM - // documentation](https://cloud.google.com/iam/help/conditions/r - // esource-policies). + // BindingId: A client-specified ID for this binding. Expected to be + // globally unique to support the internal bindings-by-ID API. + BindingId string `json:"bindingId,omitempty"` + + // Condition: The condition that is associated with this binding. If the + // condition evaluates to `true`, then this binding applies to the + // current request. If the condition evaluates to `false`, then this + // binding does not apply to the current request. However, a different + // role binding might grant the same role to one or more of the members + // in this binding. To learn which resources support conditions in their + // IAM policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). Condition *Expr `json:"condition,omitempty"` // Members: Specifies the identities requesting access for a Cloud - // Platform resource. - // `members` can have the following values: - // - // * `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. - // - // * `allAuthenticatedUsers`: A special identifier that represents - // anyone - // who is authenticated with a Google account or a service - // account. - // - // * `user:{emailid}`: An email address that represents a specific - // Google - // account. For example, `alice@example.com` . - // - // - // * `serviceAccount:{emailid}`: An email address that represents a - // service - // account. For example, - // `my-other-app@appspot.gserviceaccount.com`. - // - // * `group:{emailid}`: An email address that represents a Google - // group. - // For example, `admins@example.com`. - // - // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a user that has been recently deleted. - // For - // example, `alice@example.com?uid=123456789012345678901`. If the - // user is - // recovered, this value reverts to `user:{emailid}` and the - // recovered user - // retains the role in the binding. - // - // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address - // (plus - // unique identifier) representing a service account that has been - // recently - // deleted. For example, - // + // Platform resource. `members` can have the following values: * + // `allUsers`: A special identifier that represents anyone who is on the + // internet; with or without a Google account. * + // `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. * + // `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . * + // `serviceAccount:{emailid}`: An email address that represents a + // service account. For example, + // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An + // email address that represents a Google group. For example, + // `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An + // email address (plus unique identifier) representing a user that has + // been recently deleted. For example, + // `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered + // user retains the role in the binding. * + // `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address + // (plus unique identifier) representing a service account that has been + // recently deleted. For example, // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account - // retains the - // role in the binding. - // - // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a Google group that has been recently - // deleted. For example, - // `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` - // and the - // recovered group retains the role in the binding. - // - // - // * `domain:{domain}`: The G Suite domain (primary) that represents all - // the - // users of that domain. For example, `google.com` or - // `example.com`. - // - // + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains + // the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: + // An email address (plus unique identifier) representing a Google group + // that has been recently deleted. For example, + // `admins@example.com?uid=123456789012345678901`. If the group is + // recovered, this value reverts to `group:{emailid}` and the recovered + // group retains the role in the binding. * `domain:{domain}`: The G + // Suite domain (primary) that represents all the users of that domain. + // For example, `google.com` or `example.com`. Members []string `json:"members,omitempty"` - // Role: Role that is assigned to `members`. - // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + // Role: Role that is assigned to `members`. For example, + // `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `json:"role,omitempty"` - // ForceSendFields is a list of field names (e.g. "Condition") to + // ForceSendFields is a list of field names (e.g. "BindingId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -599,7 +589,7 @@ type Binding struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Condition") to include in + // NullFields is a list of field names (e.g. "BindingId") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -614,95 +604,101 @@ func (s *Binding) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// CertificateChains: Certificate chains needed to verify the +// attestation. Certificates in chains are PEM-encoded and are ordered +// based on https://tools.ietf.org/html/rfc5246#section-7.4.2. +type CertificateChains struct { + // CaviumCerts: Cavium certificate chain corresponding to the + // attestation. + CaviumCerts []string `json:"caviumCerts,omitempty"` + + // GoogleCardCerts: Google card certificate chain corresponding to the + // attestation. + GoogleCardCerts []string `json:"googleCardCerts,omitempty"` + + // GooglePartitionCerts: Google partition certificate chain + // corresponding to the attestation. + GooglePartitionCerts []string `json:"googlePartitionCerts,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CaviumCerts") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CaviumCerts") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CertificateChains) MarshalJSON() ([]byte, error) { + type NoMethod CertificateChains + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // CryptoKey: A CryptoKey represents a logical key that can be used for -// cryptographic -// operations. -// -// A CryptoKey is made up of zero or more versions, -// which represent the actual key material used in cryptographic -// operations. +// cryptographic operations. A CryptoKey is made up of zero or more +// versions, which represent the actual key material used in +// cryptographic operations. type CryptoKey struct { // CreateTime: Output only. The time at which this CryptoKey was // created. CreateTime string `json:"createTime,omitempty"` - // Labels: Labels with user-defined metadata. For more information, - // see - // [Labeling Keys](/kms/docs/labeling-keys). + // Labels: Labels with user-defined metadata. For more information, see + // [Labeling Keys](https://cloud.google.com/kms/docs/labeling-keys). Labels map[string]string `json:"labels,omitempty"` - // Name: Output only. The resource name for this CryptoKey in the - // format + // Name: Output only. The resource name for this CryptoKey in the format // `projects/*/locations/*/keyRings/*/cryptoKeys/*`. Name string `json:"name,omitempty"` // NextRotationTime: At next_rotation_time, the Key Management Service - // will automatically: - // - // 1. Create a new version of this CryptoKey. - // 2. Mark the new version as primary. - // - // Key rotations performed manually via - // CreateCryptoKeyVersion and - // UpdateCryptoKeyPrimaryVersion - // do not affect next_rotation_time. - // - // Keys with purpose - // ENCRYPT_DECRYPT support + // will automatically: 1. Create a new version of this CryptoKey. 2. + // Mark the new version as primary. Key rotations performed manually via + // CreateCryptoKeyVersion and UpdateCryptoKeyPrimaryVersion do not + // affect next_rotation_time. Keys with purpose ENCRYPT_DECRYPT support // automatic rotation. For other keys, this field must be omitted. NextRotationTime string `json:"nextRotationTime,omitempty"` // Primary: Output only. A copy of the "primary" CryptoKeyVersion that - // will be used - // by Encrypt when this CryptoKey is given - // in EncryptRequest.name. - // - // The CryptoKey's primary version can be updated - // via - // UpdateCryptoKeyPrimaryVersion. - // - // Keys with purpose - // ENCRYPT_DECRYPT may have a - // primary. For other keys, this field will be omitted. + // will be used by Encrypt when this CryptoKey is given in + // EncryptRequest.name. The CryptoKey's primary version can be updated + // via UpdateCryptoKeyPrimaryVersion. Keys with purpose ENCRYPT_DECRYPT + // may have a primary. For other keys, this field will be omitted. Primary *CryptoKeyVersion `json:"primary,omitempty"` // Purpose: Immutable. The immutable purpose of this CryptoKey. // // Possible values: // "CRYPTO_KEY_PURPOSE_UNSPECIFIED" - Not specified. - // "ENCRYPT_DECRYPT" - CryptoKeys with this purpose may be used - // with - // Encrypt and - // Decrypt. - // "ASYMMETRIC_SIGN" - CryptoKeys with this purpose may be used - // with - // AsymmetricSign and - // GetPublicKey. + // "ENCRYPT_DECRYPT" - CryptoKeys with this purpose may be used with + // Encrypt and Decrypt. + // "ASYMMETRIC_SIGN" - CryptoKeys with this purpose may be used with + // AsymmetricSign and GetPublicKey. // "ASYMMETRIC_DECRYPT" - CryptoKeys with this purpose may be used - // with - // AsymmetricDecrypt and - // GetPublicKey. + // with AsymmetricDecrypt and GetPublicKey. Purpose string `json:"purpose,omitempty"` // RotationPeriod: next_rotation_time will be advanced by this period - // when the service - // automatically rotates a key. Must be at least 24 hours and at - // most - // 876,000 hours. - // - // If rotation_period is set, next_rotation_time must also be set. - // - // Keys with purpose - // ENCRYPT_DECRYPT support - // automatic rotation. For other keys, this field must be omitted. + // when the service automatically rotates a key. Must be at least 24 + // hours and at most 876,000 hours. If rotation_period is set, + // next_rotation_time must also be set. Keys with purpose + // ENCRYPT_DECRYPT support automatic rotation. For other keys, this + // field must be omitted. RotationPeriod string `json:"rotationPeriod,omitempty"` // VersionTemplate: A template describing settings for new - // CryptoKeyVersion instances. - // The properties of new CryptoKeyVersion instances created by - // either - // CreateCryptoKeyVersion or - // auto-rotation are controlled by this template. + // CryptoKeyVersion instances. The properties of new CryptoKeyVersion + // instances created by either CreateCryptoKeyVersion or auto-rotation + // are controlled by this template. VersionTemplate *CryptoKeyVersionTemplate `json:"versionTemplate,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -733,22 +729,14 @@ func (s *CryptoKey) MarshalJSON() ([]byte, error) { } // CryptoKeyVersion: A CryptoKeyVersion represents an individual -// cryptographic key, and the -// associated key material. -// -// An ENABLED version can be -// used for cryptographic operations. -// -// For security reasons, the raw cryptographic key material represented -// by a +// cryptographic key, and the associated key material. An ENABLED +// version can be used for cryptographic operations. For security +// reasons, the raw cryptographic key material represented by a // CryptoKeyVersion can never be viewed or exported. It can only be used -// to -// encrypt, decrypt, or sign data when an authorized user or application -// invokes -// Cloud KMS. +// to encrypt, decrypt, or sign data when an authorized user or +// application invokes Cloud KMS. type CryptoKeyVersion struct { - // Algorithm: Output only. The CryptoKeyVersionAlgorithm that - // this + // Algorithm: Output only. The CryptoKeyVersionAlgorithm that this // CryptoKeyVersion supports. // // Possible values: @@ -787,12 +775,9 @@ type CryptoKeyVersion struct { Algorithm string `json:"algorithm,omitempty"` // Attestation: Output only. Statement that was generated and signed by - // the HSM at key - // creation time. Use this statement to verify attributes of the key as - // stored - // on the HSM, independently of Google. Only provided for key versions - // with - // protection_level HSM. + // the HSM at key creation time. Use this statement to verify attributes + // of the key as stored on the HSM, independently of Google. Only + // provided for key versions with protection_level HSM. Attestation *KeyOperationAttestation `json:"attestation,omitempty"` // CreateTime: Output only. The time at which this CryptoKeyVersion was @@ -800,55 +785,43 @@ type CryptoKeyVersion struct { CreateTime string `json:"createTime,omitempty"` // DestroyEventTime: Output only. The time this CryptoKeyVersion's key - // material was - // destroyed. Only present if state is - // DESTROYED. + // material was destroyed. Only present if state is DESTROYED. DestroyEventTime string `json:"destroyEventTime,omitempty"` // DestroyTime: Output only. The time this CryptoKeyVersion's key - // material is scheduled - // for destruction. Only present if state is + // material is scheduled for destruction. Only present if state is // DESTROY_SCHEDULED. DestroyTime string `json:"destroyTime,omitempty"` // ExternalProtectionLevelOptions: ExternalProtectionLevelOptions stores - // a group of additional fields for - // configuring a CryptoKeyVersion that are specific to the - // EXTERNAL protection level. + // a group of additional fields for configuring a CryptoKeyVersion that + // are specific to the EXTERNAL protection level. ExternalProtectionLevelOptions *ExternalProtectionLevelOptions `json:"externalProtectionLevelOptions,omitempty"` // GenerateTime: Output only. The time this CryptoKeyVersion's key - // material was - // generated. + // material was generated. GenerateTime string `json:"generateTime,omitempty"` // ImportFailureReason: Output only. The root cause of an import - // failure. Only present if - // state is - // IMPORT_FAILED. + // failure. Only present if state is IMPORT_FAILED. ImportFailureReason string `json:"importFailureReason,omitempty"` - // ImportJob: Output only. The name of the ImportJob used to import - // this - // CryptoKeyVersion. Only present if the underlying key material - // was + // ImportJob: Output only. The name of the ImportJob used to import this + // CryptoKeyVersion. Only present if the underlying key material was // imported. ImportJob string `json:"importJob,omitempty"` // ImportTime: Output only. The time at which this CryptoKeyVersion's - // key material - // was imported. + // key material was imported. ImportTime string `json:"importTime,omitempty"` // Name: Output only. The resource name for this CryptoKeyVersion in the // format - // `projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersio - // ns/*`. + // `projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`. Name string `json:"name,omitempty"` // ProtectionLevel: Output only. The ProtectionLevel describing how - // crypto operations are - // performed with this CryptoKeyVersion. + // crypto operations are performed with this CryptoKeyVersion. // // Possible values: // "PROTECTION_LEVEL_UNSPECIFIED" - Not specified. @@ -864,33 +837,25 @@ type CryptoKeyVersion struct { // Possible values: // "CRYPTO_KEY_VERSION_STATE_UNSPECIFIED" - Not specified. // "PENDING_GENERATION" - This version is still being generated. It - // may not be used, enabled, - // disabled, or destroyed yet. Cloud KMS will automatically mark - // this - // version ENABLED as soon as the version is ready. + // may not be used, enabled, disabled, or destroyed yet. Cloud KMS will + // automatically mark this version ENABLED as soon as the version is + // ready. // "ENABLED" - This version may be used for cryptographic operations. // "DISABLED" - This version may not be used, but the key material is - // still available, - // and the version can be placed back into the ENABLED state. + // still available, and the version can be placed back into the ENABLED + // state. // "DESTROYED" - This version is destroyed, and the key material is no - // longer stored. - // A version may not leave this state once entered. + // longer stored. A version may not leave this state once entered. // "DESTROY_SCHEDULED" - This version is scheduled for destruction, - // and will be destroyed soon. - // Call - // RestoreCryptoKeyVersion - // to put it back into the DISABLED state. + // and will be destroyed soon. Call RestoreCryptoKeyVersion to put it + // back into the DISABLED state. // "PENDING_IMPORT" - This version is still being imported. It may not - // be used, enabled, - // disabled, or destroyed yet. Cloud KMS will automatically mark - // this - // version ENABLED as soon as the version is ready. + // be used, enabled, disabled, or destroyed yet. Cloud KMS will + // automatically mark this version ENABLED as soon as the version is + // ready. // "IMPORT_FAILED" - This version was not imported successfully. It - // may not be used, enabled, - // disabled, or destroyed. The submitted key material has been - // discarded. - // Additional details can be found - // in + // may not be used, enabled, disabled, or destroyed. The submitted key + // material has been discarded. Additional details can be found in // CryptoKeyVersion.import_failure_reason. State string `json:"state,omitempty"` @@ -922,18 +887,14 @@ func (s *CryptoKeyVersion) MarshalJSON() ([]byte, error) { } // CryptoKeyVersionTemplate: A CryptoKeyVersionTemplate specifies the -// properties to use when creating -// a new CryptoKeyVersion, either manually with -// CreateCryptoKeyVersion or -// automatically as a result of auto-rotation. +// properties to use when creating a new CryptoKeyVersion, either +// manually with CreateCryptoKeyVersion or automatically as a result of +// auto-rotation. type CryptoKeyVersionTemplate struct { - // Algorithm: Required. Algorithm to use - // when creating a CryptoKeyVersion based on this template. - // - // For backwards compatibility, GOOGLE_SYMMETRIC_ENCRYPTION is implied - // if both - // this field is omitted and CryptoKey.purpose is - // ENCRYPT_DECRYPT. + // Algorithm: Required. Algorithm to use when creating a + // CryptoKeyVersion based on this template. For backwards compatibility, + // GOOGLE_SYMMETRIC_ENCRYPTION is implied if both this field is omitted + // and CryptoKey.purpose is ENCRYPT_DECRYPT. // // Possible values: // "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED" - Not specified. @@ -971,8 +932,8 @@ type CryptoKeyVersionTemplate struct { Algorithm string `json:"algorithm,omitempty"` // ProtectionLevel: ProtectionLevel to use when creating a - // CryptoKeyVersion based on - // this template. Immutable. Defaults to SOFTWARE. + // CryptoKeyVersion based on this template. Immutable. Defaults to + // SOFTWARE. // // Possible values: // "PROTECTION_LEVEL_UNSPECIFIED" - Not specified. @@ -1009,16 +970,46 @@ func (s *CryptoKeyVersionTemplate) MarshalJSON() ([]byte, error) { // DecryptRequest: Request message for KeyManagementService.Decrypt. type DecryptRequest struct { // AdditionalAuthenticatedData: Optional. Optional data that must match - // the data originally supplied - // in + // the data originally supplied in // EncryptRequest.additional_authenticated_data. AdditionalAuthenticatedData string `json:"additionalAuthenticatedData,omitempty"` - // Ciphertext: Required. The encrypted data originally returned - // in + // AdditionalAuthenticatedDataCrc32c: Optional. An optional CRC32C + // checksum of the DecryptRequest.additional_authenticated_data. If + // specified, KeyManagementService will verify the integrity of the + // received DecryptRequest.additional_authenticated_data using this + // checksum. KeyManagementService will report an error if the checksum + // verification fails. If you receive a checksum error, your client + // should verify that + // CRC32C(DecryptRequest.additional_authenticated_data) is equal to + // DecryptRequest.additional_authenticated_data_crc32c, and if so, + // perform a limited number of retries. A persistent mismatch may + // indicate an issue in your computation of the CRC32C checksum. Note: + // This field is defined as int64 for reasons of compatibility across + // different languages. However, it is a non-negative integer, which + // will never exceed 2^32-1, and can be safely downconverted to uint32 + // in languages that support this type. NOTE: This field is in Beta. + AdditionalAuthenticatedDataCrc32c int64 `json:"additionalAuthenticatedDataCrc32c,omitempty,string"` + + // Ciphertext: Required. The encrypted data originally returned in // EncryptResponse.ciphertext. Ciphertext string `json:"ciphertext,omitempty"` + // CiphertextCrc32c: Optional. An optional CRC32C checksum of the + // DecryptRequest.ciphertext. If specified, KeyManagementService will + // verify the integrity of the received DecryptRequest.ciphertext using + // this checksum. KeyManagementService will report an error if the + // checksum verification fails. If you receive a checksum error, your + // client should verify that CRC32C(DecryptRequest.ciphertext) is equal + // to DecryptRequest.ciphertext_crc32c, and if so, perform a limited + // number of retries. A persistent mismatch may indicate an issue in + // your computation of the CRC32C checksum. Note: This field is defined + // as int64 for reasons of compatibility across different languages. + // However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that + // support this type. NOTE: This field is in Beta. + CiphertextCrc32c int64 `json:"ciphertextCrc32c,omitempty,string"` + // ForceSendFields is a list of field names (e.g. // "AdditionalAuthenticatedData") to unconditionally include in API // requests. By default, fields with empty values are omitted from API @@ -1050,6 +1041,22 @@ type DecryptResponse struct { // EncryptRequest.plaintext. Plaintext string `json:"plaintext,omitempty"` + // PlaintextCrc32c: Integrity verification field. A CRC32C checksum of + // the returned DecryptResponse.plaintext. An integrity check of + // DecryptResponse.plaintext can be performed by computing the CRC32C + // checksum of DecryptResponse.plaintext and comparing your results to + // this field. Discard the response in case of non-matching checksum + // values, and perform a limited number of retries. A persistent + // mismatch may indicate an issue in your computation of the CRC32C + // checksum. Note: receiving this response message indicates that + // KeyManagementService is able to successfully decrypt the ciphertext. + // Note: This field is defined as int64 for reasons of compatibility + // across different languages. However, it is a non-negative integer, + // which will never exceed 2^32-1, and can be safely downconverted to + // uint32 in languages that support this type. NOTE: This field is in + // Beta. + PlaintextCrc32c int64 `json:"plaintextCrc32c,omitempty,string"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -1119,30 +1126,53 @@ func (s *Digest) MarshalJSON() ([]byte, error) { // EncryptRequest: Request message for KeyManagementService.Encrypt. type EncryptRequest struct { // AdditionalAuthenticatedData: Optional. Optional data that, if - // specified, must also be provided during decryption - // through DecryptRequest.additional_authenticated_data. - // - // The maximum size depends on the key version's - // protection_level. For - // SOFTWARE keys, the AAD must be no larger than - // 64KiB. For HSM keys, the combined length of the - // plaintext and additional_authenticated_data fields must be no larger - // than - // 8KiB. + // specified, must also be provided during decryption through + // DecryptRequest.additional_authenticated_data. The maximum size + // depends on the key version's protection_level. For SOFTWARE keys, the + // AAD must be no larger than 64KiB. For HSM keys, the combined length + // of the plaintext and additional_authenticated_data fields must be no + // larger than 8KiB. AdditionalAuthenticatedData string `json:"additionalAuthenticatedData,omitempty"` + // AdditionalAuthenticatedDataCrc32c: Optional. An optional CRC32C + // checksum of the EncryptRequest.additional_authenticated_data. If + // specified, KeyManagementService will verify the integrity of the + // received EncryptRequest.additional_authenticated_data using this + // checksum. KeyManagementService will report an error if the checksum + // verification fails. If you receive a checksum error, your client + // should verify that + // CRC32C(EncryptRequest.additional_authenticated_data) is equal to + // EncryptRequest.additional_authenticated_data_crc32c, and if so, + // perform a limited number of retries. A persistent mismatch may + // indicate an issue in your computation of the CRC32C checksum. Note: + // This field is defined as int64 for reasons of compatibility across + // different languages. However, it is a non-negative integer, which + // will never exceed 2^32-1, and can be safely downconverted to uint32 + // in languages that support this type. NOTE: This field is in Beta. + AdditionalAuthenticatedDataCrc32c int64 `json:"additionalAuthenticatedDataCrc32c,omitempty,string"` + // Plaintext: Required. The data to encrypt. Must be no larger than - // 64KiB. - // - // The maximum size depends on the key version's - // protection_level. For - // SOFTWARE keys, the plaintext must be no larger - // than 64KiB. For HSM keys, the combined length of the - // plaintext and additional_authenticated_data fields must be no larger - // than - // 8KiB. + // 64KiB. The maximum size depends on the key version's + // protection_level. For SOFTWARE keys, the plaintext must be no larger + // than 64KiB. For HSM keys, the combined length of the plaintext and + // additional_authenticated_data fields must be no larger than 8KiB. Plaintext string `json:"plaintext,omitempty"` + // PlaintextCrc32c: Optional. An optional CRC32C checksum of the + // EncryptRequest.plaintext. If specified, KeyManagementService will + // verify the integrity of the received EncryptRequest.plaintext using + // this checksum. KeyManagementService will report an error if the + // checksum verification fails. If you receive a checksum error, your + // client should verify that CRC32C(EncryptRequest.plaintext) is equal + // to EncryptRequest.plaintext_crc32c, and if so, perform a limited + // number of retries. A persistent mismatch may indicate an issue in + // your computation of the CRC32C checksum. Note: This field is defined + // as int64 for reasons of compatibility across different languages. + // However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that + // support this type. NOTE: This field is in Beta. + PlaintextCrc32c int64 `json:"plaintextCrc32c,omitempty,string"` + // ForceSendFields is a list of field names (e.g. // "AdditionalAuthenticatedData") to unconditionally include in API // requests. By default, fields with empty values are omitted from API @@ -1173,12 +1203,48 @@ type EncryptResponse struct { // Ciphertext: The encrypted data. Ciphertext string `json:"ciphertext,omitempty"` + // CiphertextCrc32c: Integrity verification field. A CRC32C checksum of + // the returned EncryptResponse.ciphertext. An integrity check of + // EncryptResponse.ciphertext can be performed by computing the CRC32C + // checksum of EncryptResponse.ciphertext and comparing your results to + // this field. Discard the response in case of non-matching checksum + // values, and perform a limited number of retries. A persistent + // mismatch may indicate an issue in your computation of the CRC32C + // checksum. Note: This field is defined as int64 for reasons of + // compatibility across different languages. However, it is a + // non-negative integer, which will never exceed 2^32-1, and can be + // safely downconverted to uint32 in languages that support this type. + // NOTE: This field is in Beta. + CiphertextCrc32c int64 `json:"ciphertextCrc32c,omitempty,string"` + // Name: The resource name of the CryptoKeyVersion used in encryption. - // Check - // this field to verify that the intended resource was used for + // Check this field to verify that the intended resource was used for // encryption. Name string `json:"name,omitempty"` + // VerifiedAdditionalAuthenticatedDataCrc32c: Integrity verification + // field. A flag indicating whether + // EncryptRequest.additional_authenticated_data_crc32c was received by + // KeyManagementService and used for the integrity verification of the + // AAD. A false value of this field indicates either that + // EncryptRequest.additional_authenticated_data_crc32c was left unset or + // that it was not delivered to KeyManagementService. If you've set + // EncryptRequest.additional_authenticated_data_crc32c but this field is + // still false, discard the response and perform a limited number of + // retries. NOTE: This field is in Beta. + VerifiedAdditionalAuthenticatedDataCrc32c bool `json:"verifiedAdditionalAuthenticatedDataCrc32c,omitempty"` + + // VerifiedPlaintextCrc32c: Integrity verification field. A flag + // indicating whether EncryptRequest.plaintext_crc32c was received by + // KeyManagementService and used for the integrity verification of the + // plaintext. A false value of this field indicates either that + // EncryptRequest.plaintext_crc32c was left unset or that it was not + // delivered to KeyManagementService. If you've set + // EncryptRequest.plaintext_crc32c but this field is still false, + // discard the response and perform a limited number of retries. NOTE: + // This field is in Beta. + VerifiedPlaintextCrc32c bool `json:"verifiedPlaintextCrc32c,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -1207,65 +1273,40 @@ func (s *EncryptResponse) MarshalJSON() ([]byte, error) { } // Expr: Represents a textual expression in the Common Expression -// Language (CEL) -// syntax. CEL is a C-like expression language. The syntax and semantics -// of CEL -// are documented at https://github.com/google/cel-spec. -// -// Example (Comparison): -// -// title: "Summary size limit" -// description: "Determines if a summary is less than 100 chars" -// expression: "document.summary.size() < 100" -// -// Example (Equality): -// -// title: "Requestor is owner" -// description: "Determines if requestor is the document owner" -// expression: "document.owner == -// request.auth.claims.email" -// -// Example (Logic): -// -// title: "Public documents" -// description: "Determine whether the document should be publicly -// visible" -// expression: "document.type != 'private' && document.type != -// 'internal'" -// -// Example (Data Manipulation): -// -// title: "Notification string" -// description: "Create a notification string with a timestamp." -// expression: "'New message received at ' + -// string(document.create_time)" -// -// The exact variables and functions that may be referenced within an -// expression -// are determined by the service that evaluates it. See the -// service -// documentation for additional information. +// Language (CEL) syntax. CEL is a C-like expression language. The +// syntax and semantics of CEL are documented at +// https://github.com/google/cel-spec. Example (Comparison): title: +// "Summary size limit" description: "Determines if a summary is less +// than 100 chars" expression: "document.summary.size() < 100" Example +// (Equality): title: "Requestor is owner" description: "Determines if +// requestor is the document owner" expression: "document.owner == +// request.auth.claims.email" Example (Logic): title: "Public documents" +// description: "Determine whether the document should be publicly +// visible" expression: "document.type != 'private' && document.type != +// 'internal'" Example (Data Manipulation): title: "Notification string" +// description: "Create a notification string with a timestamp." +// expression: "'New message received at ' + +// string(document.create_time)" The exact variables and functions that +// may be referenced within an expression are determined by the service +// that evaluates it. See the service documentation for additional +// information. type Expr struct { // Description: Optional. Description of the expression. This is a - // longer text which - // describes the expression, e.g. when hovered over it in a UI. + // longer text which describes the expression, e.g. when hovered over it + // in a UI. Description string `json:"description,omitempty"` // Expression: Textual representation of an expression in Common - // Expression Language - // syntax. + // Expression Language syntax. Expression string `json:"expression,omitempty"` // Location: Optional. String indicating the location of the expression - // for error - // reporting, e.g. a file name and a position in the file. + // for error reporting, e.g. a file name and a position in the file. Location string `json:"location,omitempty"` // Title: Optional. Title for the expression, i.e. a short string - // describing - // its purpose. This can be used e.g. in UIs which allow to enter - // the - // expression. + // describing its purpose. This can be used e.g. in UIs which allow to + // enter the expression. Title string `json:"title,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -1292,9 +1333,8 @@ func (s *Expr) MarshalJSON() ([]byte, error) { } // ExternalProtectionLevelOptions: ExternalProtectionLevelOptions stores -// a group of additional fields for -// configuring a CryptoKeyVersion that are specific to the -// EXTERNAL protection level. +// a group of additional fields for configuring a CryptoKeyVersion that +// are specific to the EXTERNAL protection level. type ExternalProtectionLevelOptions struct { // ExternalKeyUri: The URI for an external resource that this // CryptoKeyVersion represents. @@ -1327,10 +1367,8 @@ func (s *ExternalProtectionLevelOptions) MarshalJSON() ([]byte, error) { // ImportCryptoKeyVersionRequest: Request message for // KeyManagementService.ImportCryptoKeyVersion. type ImportCryptoKeyVersionRequest struct { - // Algorithm: Required. The algorithm of - // the key being imported. This does not need to match - // the - // version_template of the CryptoKey this + // Algorithm: Required. The algorithm of the key being imported. This + // does not need to match the version_template of the CryptoKey this // version imports into. // // Possible values: @@ -1368,37 +1406,21 @@ type ImportCryptoKeyVersionRequest struct { // encryption by an external key manager. Algorithm string `json:"algorithm,omitempty"` - // ImportJob: Required. The name of the ImportJob that was used to - // wrap this key material. + // ImportJob: Required. The name of the ImportJob that was used to wrap + // this key material. ImportJob string `json:"importJob,omitempty"` - // RsaAesWrappedKey: Wrapped key material produced - // with - // RSA_OAEP_3072_SHA1_AES_256 - // or - // RSA_OAEP_4096_SHA1_AES_256. - // - // This field contains the concatenation of two wrapped keys: - //
    - //
  1. An ephemeral AES-256 wrapping key wrapped with the - // public_key using RSAES-OAEP with SHA-1, - // MGF1 with SHA-1, and an empty label. - //
  2. - //
  3. The key to be imported, wrapped with the ephemeral AES-256 key - // using AES-KWP (RFC 5649). - //
  4. - //
- // - // If importing symmetric key material, it is expected that the - // unwrapped - // key contains plain bytes. If importing asymmetric key material, it - // is - // expected that the unwrapped key is in PKCS#8-encoded DER format - // (the - // PrivateKeyInfo structure from RFC 5208). - // - // This format is the same as the format produced by PKCS#11 - // mechanism + // RsaAesWrappedKey: Wrapped key material produced with + // RSA_OAEP_3072_SHA1_AES_256 or RSA_OAEP_4096_SHA1_AES_256. This field + // contains the concatenation of two wrapped keys: 1. An ephemeral + // AES-256 wrapping key wrapped with the public_key using RSAES-OAEP + // with SHA-1, MGF1 with SHA-1, and an empty label. 2. The key to be + // imported, wrapped with the ephemeral AES-256 key using AES-KWP (RFC + // 5649). If importing symmetric key material, it is expected that the + // unwrapped key contains plain bytes. If importing asymmetric key + // material, it is expected that the unwrapped key is in PKCS#8-encoded + // DER format (the PrivateKeyInfo structure from RFC 5208). This format + // is the same as the format produced by PKCS#11 mechanism // CKM_RSA_AES_KEY_WRAP. RsaAesWrappedKey string `json:"rsaAesWrappedKey,omitempty"` @@ -1425,55 +1447,31 @@ func (s *ImportCryptoKeyVersionRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ImportJob: An ImportJob can be used to create CryptoKeys -// and -// CryptoKeyVersions using pre-existing key material, -// generated outside of Cloud KMS. -// -// When an ImportJob is created, Cloud KMS will generate a "wrapping -// key", -// which is a public/private key pair. You use the wrapping key to -// encrypt (also -// known as wrap) the pre-existing key material to protect it during the -// import -// process. The nature of the wrapping key depends on the choice -// of -// import_method. When the wrapping key generation -// is complete, the state will be set to -// ACTIVE and the public_key -// can be fetched. The fetched public key can then be used to wrap -// your -// pre-existing key material. -// -// Once the key material is wrapped, it can be imported into a -// new -// CryptoKeyVersion in an existing CryptoKey by -// calling -// ImportCryptoKeyVersion. -// Multiple CryptoKeyVersions can be imported with a single -// ImportJob. Cloud KMS uses the private key portion of the wrapping key -// to -// unwrap the key material. Only Cloud KMS has access to the private -// key. -// -// An ImportJob expires 3 days after it is created. Once expired, Cloud -// KMS -// will no longer be able to import or unwrap any key material that was -// wrapped -// with the ImportJob's public key. -// -// For more information, see -// [Importing a key](https://cloud.google.com/kms/docs/importing-a-key). +// ImportJob: An ImportJob can be used to create CryptoKeys and +// CryptoKeyVersions using pre-existing key material, generated outside +// of Cloud KMS. When an ImportJob is created, Cloud KMS will generate a +// "wrapping key", which is a public/private key pair. You use the +// wrapping key to encrypt (also known as wrap) the pre-existing key +// material to protect it during the import process. The nature of the +// wrapping key depends on the choice of import_method. When the +// wrapping key generation is complete, the state will be set to ACTIVE +// and the public_key can be fetched. The fetched public key can then be +// used to wrap your pre-existing key material. Once the key material is +// wrapped, it can be imported into a new CryptoKeyVersion in an +// existing CryptoKey by calling ImportCryptoKeyVersion. Multiple +// CryptoKeyVersions can be imported with a single ImportJob. Cloud KMS +// uses the private key portion of the wrapping key to unwrap the key +// material. Only Cloud KMS has access to the private key. An ImportJob +// expires 3 days after it is created. Once expired, Cloud KMS will no +// longer be able to import or unwrap any key material that was wrapped +// with the ImportJob's public key. For more information, see [Importing +// a key](https://cloud.google.com/kms/docs/importing-a-key). type ImportJob struct { // Attestation: Output only. Statement that was generated and signed by - // the key creator - // (for example, an HSM) at key creation time. Use this statement to - // verify - // attributes of the key as stored on the HSM, independently of - // Google. - // Only present if the chosen ImportMethod is one with a - // protection - // level of HSM. + // the key creator (for example, an HSM) at key creation time. Use this + // statement to verify attributes of the key as stored on the HSM, + // independently of Google. Only present if the chosen ImportMethod is + // one with a protection level of HSM. Attestation *KeyOperationAttestation `json:"attestation,omitempty"` // CreateTime: Output only. The time at which this ImportJob was @@ -1481,13 +1479,12 @@ type ImportJob struct { CreateTime string `json:"createTime,omitempty"` // ExpireEventTime: Output only. The time this ImportJob expired. Only - // present if - // state is EXPIRED. + // present if state is EXPIRED. ExpireEventTime string `json:"expireEventTime,omitempty"` // ExpireTime: Output only. The time at which this ImportJob is - // scheduled for - // expiration and can no longer be used to import key material. + // scheduled for expiration and can no longer be used to import key + // material. ExpireTime string `json:"expireTime,omitempty"` // GenerateTime: Output only. The time this ImportJob's key material was @@ -1500,39 +1497,28 @@ type ImportJob struct { // Possible values: // "IMPORT_METHOD_UNSPECIFIED" - Not specified. // "RSA_OAEP_3072_SHA1_AES_256" - This ImportMethod represents the - // CKM_RSA_AES_KEY_WRAP key wrapping - // scheme defined in the PKCS #11 standard. In summary, this - // involves - // wrapping the raw key with an ephemeral AES key, and wrapping - // the - // ephemeral AES key with a 3072 bit RSA key. For more details, see - // [RSA AES key - // wrap - // mechanism](http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/co - // s01/pkcs11-curr-v2.40-cos01.html#_Toc408226908). + // CKM_RSA_AES_KEY_WRAP key wrapping scheme defined in the PKCS #11 + // standard. In summary, this involves wrapping the raw key with an + // ephemeral AES key, and wrapping the ephemeral AES key with a 3072 bit + // RSA key. For more details, see [RSA AES key wrap + // mechanism](http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cos01/p + // kcs11-curr-v2.40-cos01.html#_Toc408226908). // "RSA_OAEP_4096_SHA1_AES_256" - This ImportMethod represents the - // CKM_RSA_AES_KEY_WRAP key wrapping - // scheme defined in the PKCS #11 standard. In summary, this - // involves - // wrapping the raw key with an ephemeral AES key, and wrapping - // the - // ephemeral AES key with a 4096 bit RSA key. For more details, see - // [RSA AES key - // wrap - // mechanism](http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/co - // s01/pkcs11-curr-v2.40-cos01.html#_Toc408226908). + // CKM_RSA_AES_KEY_WRAP key wrapping scheme defined in the PKCS #11 + // standard. In summary, this involves wrapping the raw key with an + // ephemeral AES key, and wrapping the ephemeral AES key with a 4096 bit + // RSA key. For more details, see [RSA AES key wrap + // mechanism](http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cos01/p + // kcs11-curr-v2.40-cos01.html#_Toc408226908). ImportMethod string `json:"importMethod,omitempty"` - // Name: Output only. The resource name for this ImportJob in the - // format + // Name: Output only. The resource name for this ImportJob in the format // `projects/*/locations/*/keyRings/*/importJobs/*`. Name string `json:"name,omitempty"` // ProtectionLevel: Required. Immutable. The protection level of the - // ImportJob. This must match the - // protection_level of the - // version_template on the CryptoKey you - // attempt to import into. + // ImportJob. This must match the protection_level of the + // version_template on the CryptoKey you attempt to import into. // // Possible values: // "PROTECTION_LEVEL_UNSPECIFIED" - Not specified. @@ -1544,25 +1530,19 @@ type ImportJob struct { ProtectionLevel string `json:"protectionLevel,omitempty"` // PublicKey: Output only. The public key with which to wrap key - // material prior to - // import. Only returned if state is - // ACTIVE. + // material prior to import. Only returned if state is ACTIVE. PublicKey *WrappingPublicKey `json:"publicKey,omitempty"` // State: Output only. The current state of the ImportJob, indicating if - // it can - // be used. + // it can be used. // // Possible values: // "IMPORT_JOB_STATE_UNSPECIFIED" - Not specified. // "PENDING_GENERATION" - The wrapping key for this job is still being - // generated. It may not be - // used. Cloud KMS will automatically mark this job as - // ACTIVE as soon as the wrapping key is generated. - // "ACTIVE" - This job may be used in - // CreateCryptoKey and - // CreateCryptoKeyVersion - // requests. + // generated. It may not be used. Cloud KMS will automatically mark this + // job as ACTIVE as soon as the wrapping key is generated. + // "ACTIVE" - This job may be used in CreateCryptoKey and + // CreateCryptoKeyVersion requests. // "EXPIRED" - This job can no longer be used and may not leave this // state once entered. State string `json:"state,omitempty"` @@ -1595,14 +1575,15 @@ func (s *ImportJob) MarshalJSON() ([]byte, error) { } // KeyOperationAttestation: Contains an HSM-generated attestation about -// a key operation. For more -// information, see [Verifying -// attestations] +// a key operation. For more information, see [Verifying attestations] // (https://cloud.google.com/kms/docs/attest-key). type KeyOperationAttestation struct { + // CertChains: Output only. The certificate chains needed to validate + // the attestation + CertChains *CertificateChains `json:"certChains,omitempty"` + // Content: Output only. The attestation data provided by the HSM when - // the key - // operation was performed. + // the key operation was performed. Content string `json:"content,omitempty"` // Format: Output only. The format of the attestation data. @@ -1610,14 +1591,13 @@ type KeyOperationAttestation struct { // Possible values: // "ATTESTATION_FORMAT_UNSPECIFIED" - Not specified. // "CAVIUM_V1_COMPRESSED" - Cavium HSM attestation compressed with - // gzip. Note that this format is - // defined by Cavium and subject to change at any time. + // gzip. Note that this format is defined by Cavium and subject to + // change at any time. // "CAVIUM_V2_COMPRESSED" - Cavium HSM attestation V2 compressed with - // gzip. This is a new format - // introduced in Cavium's version 3.2-08. + // gzip. This is a new format introduced in Cavium's version 3.2-08. Format string `json:"format,omitempty"` - // ForceSendFields is a list of field names (e.g. "Content") to + // ForceSendFields is a list of field names (e.g. "CertChains") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -1625,7 +1605,7 @@ type KeyOperationAttestation struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Content") to include in + // NullFields is a list of field names (e.g. "CertChains") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -1645,8 +1625,7 @@ type KeyRing struct { // CreateTime: Output only. The time at which this KeyRing was created. CreateTime string `json:"createTime,omitempty"` - // Name: Output only. The resource name for the KeyRing in the - // format + // Name: Output only. The resource name for the KeyRing in the format // `projects/*/locations/*/keyRings/*`. Name string `json:"name,omitempty"` @@ -1684,14 +1663,11 @@ type ListCryptoKeyVersionsResponse struct { CryptoKeyVersions []*CryptoKeyVersion `json:"cryptoKeyVersions,omitempty"` // NextPageToken: A token to retrieve next page of results. Pass this - // value in - // ListCryptoKeyVersionsRequest.page_token to retrieve the next page - // of - // results. + // value in ListCryptoKeyVersionsRequest.page_token to retrieve the next + // page of results. NextPageToken string `json:"nextPageToken,omitempty"` - // TotalSize: The total number of CryptoKeyVersions that matched - // the + // TotalSize: The total number of CryptoKeyVersions that matched the // query. TotalSize int64 `json:"totalSize,omitempty"` @@ -1730,9 +1706,8 @@ type ListCryptoKeysResponse struct { CryptoKeys []*CryptoKey `json:"cryptoKeys,omitempty"` // NextPageToken: A token to retrieve next page of results. Pass this - // value in - // ListCryptoKeysRequest.page_token to retrieve the next page of - // results. + // value in ListCryptoKeysRequest.page_token to retrieve the next page + // of results. NextPageToken string `json:"nextPageToken,omitempty"` // TotalSize: The total number of CryptoKeys that matched the query. @@ -1772,9 +1747,8 @@ type ListImportJobsResponse struct { ImportJobs []*ImportJob `json:"importJobs,omitempty"` // NextPageToken: A token to retrieve next page of results. Pass this - // value in - // ListImportJobsRequest.page_token to retrieve the next page of - // results. + // value in ListImportJobsRequest.page_token to retrieve the next page + // of results. NextPageToken string `json:"nextPageToken,omitempty"` // TotalSize: The total number of ImportJobs that matched the query. @@ -1814,8 +1788,8 @@ type ListKeyRingsResponse struct { KeyRings []*KeyRing `json:"keyRings,omitempty"` // NextPageToken: A token to retrieve next page of results. Pass this - // value in - // ListKeyRingsRequest.page_token to retrieve the next page of results. + // value in ListKeyRingsRequest.page_token to retrieve the next page of + // results. NextPageToken string `json:"nextPageToken,omitempty"` // TotalSize: The total number of KeyRings that matched the query. @@ -1888,13 +1862,11 @@ func (s *ListLocationsResponse) MarshalJSON() ([]byte, error) { // Location: A resource that represents Google Cloud Platform location. type Location struct { // DisplayName: The friendly name for this location, typically a nearby - // city name. - // For example, "Tokyo". + // city name. For example, "Tokyo". DisplayName string `json:"displayName,omitempty"` // Labels: Cross-service attributes for the location. For example - // - // {"cloud.googleapis.com/region": "us-east1"} + // {"cloud.googleapis.com/region": "us-east1"} Labels map[string]string `json:"labels,omitempty"` // LocationId: The canonical id for this location. For example: @@ -1902,13 +1874,12 @@ type Location struct { LocationId string `json:"locationId,omitempty"` // Metadata: Service-specific metadata. For example the available - // capacity at the given - // location. + // capacity at the given location. Metadata googleapi.RawMessage `json:"metadata,omitempty"` // Name: Resource name for the location, which may vary between - // implementations. - // For example: "projects/example-project/locations/us-east1" + // implementations. For example: + // "projects/example-project/locations/us-east1" Name string `json:"name,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1941,15 +1912,12 @@ func (s *Location) MarshalJSON() ([]byte, error) { // LocationMetadata: Cloud KMS metadata for the given // google.cloud.location.Location. type LocationMetadata struct { - // EkmAvailable: Indicates whether CryptoKeys - // with - // protection_level + // EkmAvailable: Indicates whether CryptoKeys with protection_level // EXTERNAL can be created in this location. EkmAvailable bool `json:"ekmAvailable,omitempty"` - // HsmAvailable: Indicates whether CryptoKeys with - // protection_level - // HSM can be created in this location. + // HsmAvailable: Indicates whether CryptoKeys with protection_level HSM + // can be created in this location. HsmAvailable bool `json:"hsmAvailable,omitempty"` // ForceSendFields is a list of field names (e.g. "EkmAvailable") to @@ -1976,154 +1944,77 @@ func (s *LocationMetadata) MarshalJSON() ([]byte, error) { } // Policy: An Identity and Access Management (IAM) policy, which -// specifies access -// controls for Google Cloud resources. -// -// -// A `Policy` is a collection of `bindings`. A `binding` binds one or -// more -// `members` to a single `role`. Members can be user accounts, service -// accounts, +// specifies access controls for Google Cloud resources. A `Policy` is a +// collection of `bindings`. A `binding` binds one or more `members` to +// a single `role`. Members can be user accounts, service accounts, // Google groups, and domains (such as G Suite). A `role` is a named -// list of -// permissions; each `role` can be an IAM predefined role or a -// user-created -// custom role. -// -// For some types of Google Cloud resources, a `binding` can also -// specify a -// `condition`, which is a logical expression that allows access to a -// resource -// only if the expression evaluates to `true`. A condition can add -// constraints -// based on attributes of the request, the resource, or both. To learn -// which -// resources support conditions in their IAM policies, see the -// [IAM +// list of permissions; each `role` can be an IAM predefined role or a +// user-created custom role. For some types of Google Cloud resources, a +// `binding` can also specify a `condition`, which is a logical +// expression that allows access to a resource only if the expression +// evaluates to `true`. A condition can add constraints based on +// attributes of the request, the resource, or both. To learn which +// resources support conditions in their IAM policies, see the [IAM // documentation](https://cloud.google.com/iam/help/conditions/resource-p -// olicies). -// -// **JSON example:** -// -// { -// "bindings": [ -// { -// "role": "roles/resourcemanager.organizationAdmin", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" -// ] -// }, -// { -// "role": "roles/resourcemanager.organizationViewer", -// "members": [ -// "user:eve@example.com" -// ], -// "condition": { -// "title": "expirable access", -// "description": "Does not grant access after Sep 2020", -// "expression": "request.time < -// timestamp('2020-10-01T00:00:00.000Z')", -// } -// } -// ], -// "etag": "BwWWja0YfJA=", -// "version": 3 -// } -// -// **YAML example:** -// -// bindings: -// - members: -// - user:mike@example.com -// - group:admins@example.com -// - domain:google.com -// - serviceAccount:my-project-id@appspot.gserviceaccount.com -// role: roles/resourcemanager.organizationAdmin -// - members: -// - user:eve@example.com -// role: roles/resourcemanager.organizationViewer -// condition: -// title: expirable access -// description: Does not grant access after Sep 2020 -// expression: request.time < -// timestamp('2020-10-01T00:00:00.000Z') -// - etag: BwWWja0YfJA= -// - version: 3 -// -// For a description of IAM and its features, see the -// [IAM documentation](https://cloud.google.com/iam/docs/). +// olicies). **JSON example:** { "bindings": [ { "role": +// "roles/resourcemanager.organizationAdmin", "members": [ +// "user:mike@example.com", "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { +// "role": "roles/resourcemanager.organizationViewer", "members": [ +// "user:eve@example.com" ], "condition": { "title": "expirable access", +// "description": "Does not grant access after Sep 2020", "expression": +// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], +// "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - +// members: - user:mike@example.com - group:admins@example.com - +// domain:google.com - +// serviceAccount:my-project-id@appspot.gserviceaccount.com role: +// roles/resourcemanager.organizationAdmin - members: - +// user:eve@example.com role: roles/resourcemanager.organizationViewer +// condition: title: expirable access description: Does not grant access +// after Sep 2020 expression: request.time < +// timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: +// 3 For a description of IAM and its features, see the [IAM +// documentation](https://cloud.google.com/iam/docs/). type Policy struct { // AuditConfigs: Specifies cloud audit logging configuration for this // policy. AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` // Bindings: Associates a list of `members` to a `role`. Optionally, may - // specify a - // `condition` that determines how and when the `bindings` are applied. - // Each - // of the `bindings` must contain at least one member. + // specify a `condition` that determines how and when the `bindings` are + // applied. Each of the `bindings` must contain at least one member. Bindings []*Binding `json:"bindings,omitempty"` // Etag: `etag` is used for optimistic concurrency control as a way to - // help - // prevent simultaneous updates of a policy from overwriting each - // other. - // It is strongly suggested that systems make use of the `etag` in - // the - // read-modify-write cycle to perform policy updates in order to avoid - // race - // conditions: An `etag` is returned in the response to `getIamPolicy`, - // and - // systems are expected to put that etag in the request to - // `setIamPolicy` to - // ensure that their change will be applied to the same version of the - // policy. - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of + // help prevent simultaneous updates of a policy from overwriting each + // other. It is strongly suggested that systems make use of the `etag` + // in the read-modify-write cycle to perform policy updates in order to + // avoid race conditions: An `etag` is returned in the response to + // `getIamPolicy`, and systems are expected to put that etag in the + // request to `setIamPolicy` to ensure that their change will be applied + // to the same version of the policy. **Important:** If you use IAM + // Conditions, you must include the `etag` field whenever you call + // `setIamPolicy`. If you omit this field, then IAM allows you to + // overwrite a version `3` policy with a version `1` policy, and all of // the conditions in the version `3` policy are lost. Etag string `json:"etag,omitempty"` - // Version: Specifies the format of the policy. - // - // Valid values are `0`, `1`, and `3`. Requests that specify an invalid - // value - // are rejected. - // + // Version: Specifies the format of the policy. Valid values are `0`, + // `1`, and `3`. Requests that specify an invalid value are rejected. // Any operation that affects conditional role bindings must specify - // version - // `3`. This requirement applies to the following operations: - // - // * Getting a policy that includes a conditional role binding - // * Adding a conditional role binding to a policy - // * Changing a conditional role binding in a policy - // * Removing any role binding, with or without a condition, from a - // policy - // that includes conditions - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of - // the conditions in the version `3` policy are lost. - // - // If a policy does not include any conditions, operations on that - // policy may - // specify any valid version or leave the field unset. - // - // To learn which resources support conditions in their IAM policies, - // see the - // [IAM + // version `3`. This requirement applies to the following operations: * + // Getting a policy that includes a conditional role binding * Adding a + // conditional role binding to a policy * Changing a conditional role + // binding in a policy * Removing any role binding, with or without a + // condition, from a policy that includes conditions **Important:** If + // you use IAM Conditions, you must include the `etag` field whenever + // you call `setIamPolicy`. If you omit this field, then IAM allows you + // to overwrite a version `3` policy with a version `1` policy, and all + // of the conditions in the version `3` policy are lost. If a policy + // does not include any conditions, operations on that policy may + // specify any valid version or leave the field unset. To learn which + // resources support conditions in their IAM policies, see the [IAM // documentation](https://cloud.google.com/iam/help/conditions/resource-p // olicies). Version int64 `json:"version,omitempty"` @@ -2155,12 +2046,10 @@ func (s *Policy) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PublicKey: The public key for a given CryptoKeyVersion. Obtained -// via +// PublicKey: The public key for a given CryptoKeyVersion. Obtained via // GetPublicKey. type PublicKey struct { - // Algorithm: The Algorithm associated - // with this key. + // Algorithm: The Algorithm associated with this key. // // Possible values: // "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED" - Not specified. @@ -2197,17 +2086,31 @@ type PublicKey struct { // encryption by an external key manager. Algorithm string `json:"algorithm,omitempty"` + // Name: The name of the CryptoKeyVersion public key. Provided here for + // verification. NOTE: This field is in Beta. + Name string `json:"name,omitempty"` + // Pem: The public key, encoded in PEM format. For more information, see - // the - // [RFC 7468](https://tools.ietf.org/html/rfc7468) sections for + // the [RFC 7468](https://tools.ietf.org/html/rfc7468) sections for // [General - // Considerations](https://tools.ietf.org/html/rfc7468#section-2) - // and - // [Textual Encoding of Subject Public Key - // Info] + // Considerations](https://tools.ietf.org/html/rfc7468#section-2) and + // [Textual Encoding of Subject Public Key Info] // (https://tools.ietf.org/html/rfc7468#section-13). Pem string `json:"pem,omitempty"` + // PemCrc32c: Integrity verification field. A CRC32C checksum of the + // returned PublicKey.pem. An integrity check of PublicKey.pem can be + // performed by computing the CRC32C checksum of PublicKey.pem and + // comparing your results to this field. Discard the response in case of + // non-matching checksum values, and perform a limited number of + // retries. A persistent mismatch may indicate an issue in your + // computation of the CRC32C checksum. Note: This field is defined as + // int64 for reasons of compatibility across different languages. + // However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that + // support this type. NOTE: This field is in Beta. + PemCrc32c int64 `json:"pemCrc32c,omitempty,string"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -2243,20 +2146,15 @@ type RestoreCryptoKeyVersionRequest struct { // SetIamPolicyRequest: Request message for `SetIamPolicy` method. type SetIamPolicyRequest struct { // Policy: REQUIRED: The complete policy to be applied to the - // `resource`. The size of - // the policy is limited to a few 10s of KB. An empty policy is a - // valid policy but certain Cloud Platform services (such as - // Projects) - // might reject them. + // `resource`. The size of the policy is limited to a few 10s of KB. An + // empty policy is a valid policy but certain Cloud Platform services + // (such as Projects) might reject them. Policy *Policy `json:"policy,omitempty"` // UpdateMask: OPTIONAL: A FieldMask specifying which fields of the - // policy to modify. Only - // the fields in the mask will be modified. If no mask is provided, - // the - // following default mask is used: - // - // `paths: "bindings, etag" + // policy to modify. Only the fields in the mask will be modified. If no + // mask is provided, the following default mask is used: `paths: + // "bindings, etag" UpdateMask string `json:"updateMask,omitempty"` // ForceSendFields is a list of field names (e.g. "Policy") to @@ -2286,11 +2184,8 @@ func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsRequest struct { // Permissions: The set of permissions to check for the `resource`. - // Permissions with - // wildcards (such as '*' or 'storage.*') are not allowed. For - // more - // information see - // [IAM + // Permissions with wildcards (such as '*' or 'storage.*') are not + // allowed. For more information see [IAM // Overview](https://cloud.google.com/iam/docs/overview#permissions). Permissions []string `json:"permissions,omitempty"` @@ -2321,8 +2216,7 @@ func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsResponse struct { // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is - // allowed. + // the caller is allowed. Permissions []string `json:"permissions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2384,17 +2278,14 @@ func (s *UpdateCryptoKeyPrimaryVersionRequest) MarshalJSON() ([]byte, error) { } // WrappingPublicKey: The public key component of the wrapping key. For -// details of the type of -// key this public key corresponds to, see the ImportMethod. +// details of the type of key this public key corresponds to, see the +// ImportMethod. type WrappingPublicKey struct { // Pem: The public key, encoded in PEM format. For more information, see - // the [RFC - // 7468](https://tools.ietf.org/html/rfc7468) sections for + // the [RFC 7468](https://tools.ietf.org/html/rfc7468) sections for // [General - // Considerations](https://tools.ietf.org/html/rfc7468#section-2 - // ) and - // [Textual Encoding of Subject Public Key - // Info] + // Considerations](https://tools.ietf.org/html/rfc7468#section-2) and + // [Textual Encoding of Subject Public Key Info] // (https://tools.ietf.org/html/rfc7468#section-13). Pem string `json:"pem,omitempty"` @@ -2476,7 +2367,7 @@ func (c *ProjectsLocationsGetCall) Header() http.Header { func (c *ProjectsLocationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2643,7 +2534,7 @@ func (c *ProjectsLocationsListCall) Header() http.Header { func (c *ProjectsLocationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2790,8 +2681,8 @@ func (r *ProjectsLocationsKeyRingsService) Create(parent string, keyring *KeyRin } // KeyRingId sets the optional parameter "keyRingId": Required. It must -// be unique within a location and match the regular -// expression `[a-zA-Z0-9_-]{1,63}` +// be unique within a location and match the regular expression +// `[a-zA-Z0-9_-]{1,63}` func (c *ProjectsLocationsKeyRingsCreateCall) KeyRingId(keyRingId string) *ProjectsLocationsKeyRingsCreateCall { c.urlParams_.Set("keyRingId", keyRingId) return c @@ -2824,7 +2715,7 @@ func (c *ProjectsLocationsKeyRingsCreateCall) Header() http.Header { func (c *ProjectsLocationsKeyRingsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2897,12 +2788,12 @@ func (c *ProjectsLocationsKeyRingsCreateCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "keyRingId": { - // "description": "Required. It must be unique within a location and match the regular\nexpression `[a-zA-Z0-9_-]{1,63}`", + // "description": "Required. It must be unique within a location and match the regular expression `[a-zA-Z0-9_-]{1,63}`", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The resource name of the location associated with the\nKeyRings, in the format `projects/*/locations/*`.", + // "description": "Required. The resource name of the location associated with the KeyRings, in the format `projects/*/locations/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -2979,7 +2870,7 @@ func (c *ProjectsLocationsKeyRingsGetCall) Header() http.Header { func (c *ProjectsLocationsKeyRingsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3080,9 +2971,8 @@ type ProjectsLocationsKeyRingsGetIamPolicyCall struct { header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. -// Returns an empty policy if the resource exists and does not have a -// policy +// GetIamPolicy: Gets the access control policy for a resource. Returns +// an empty policy if the resource exists and does not have a policy // set. func (r *ProjectsLocationsKeyRingsService) GetIamPolicy(resource string) *ProjectsLocationsKeyRingsGetIamPolicyCall { c := &ProjectsLocationsKeyRingsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -3092,24 +2982,14 @@ func (r *ProjectsLocationsKeyRingsService) GetIamPolicy(resource string) *Projec // OptionsRequestedPolicyVersion sets the optional parameter // "options.requestedPolicyVersion": The policy format version to be -// returned. -// -// Valid values are 0, 1, and 3. Requests specifying an invalid value -// will be -// rejected. -// -// Requests for policies with any conditional bindings must specify -// version 3. -// Policies without any conditional bindings may specify any valid value -// or -// leave the field unset. -// -// To learn which resources support conditions in their IAM policies, -// see -// the -// [IAM -// documentation](https://cloud.google.com/iam/help/conditions/r -// esource-policies). +// returned. Valid values are 0, 1, and 3. Requests specifying an +// invalid value will be rejected. Requests for policies with any +// conditional bindings must specify version 3. Policies without any +// conditional bindings may specify any valid value or leave the field +// unset. To learn which resources support conditions in their IAM +// policies, see the [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-p +// olicies). func (c *ProjectsLocationsKeyRingsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsLocationsKeyRingsGetIamPolicyCall { c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c @@ -3152,7 +3032,7 @@ func (c *ProjectsLocationsKeyRingsGetIamPolicyCall) Header() http.Header { func (c *ProjectsLocationsKeyRingsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3214,7 +3094,7 @@ func (c *ProjectsLocationsKeyRingsGetIamPolicyCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + // "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}:getIamPolicy", // "httpMethod": "GET", // "id": "cloudkms.projects.locations.keyRings.getIamPolicy", @@ -3223,13 +3103,13 @@ func (c *ProjectsLocationsKeyRingsGetIamPolicyCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "options.requestedPolicyVersion": { - // "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + // "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", // "format": "int32", // "location": "query", // "type": "integer" // }, // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", // "required": true, @@ -3267,45 +3147,36 @@ func (r *ProjectsLocationsKeyRingsService) List(parent string) *ProjectsLocation } // Filter sets the optional parameter "filter": Only include resources -// that match the filter in the response. For -// more information, see -// [Sorting and filtering -// list -// results](https://cloud.google.com/kms/docs/sorting-and-filtering) -// . +// that match the filter in the response. For more information, see +// [Sorting and filtering list +// results](https://cloud.google.com/kms/docs/sorting-and-filtering). func (c *ProjectsLocationsKeyRingsListCall) Filter(filter string) *ProjectsLocationsKeyRingsListCall { c.urlParams_.Set("filter", filter) return c } // OrderBy sets the optional parameter "orderBy": Specify how the -// results should be sorted. If not specified, the -// results will be sorted in the default order. For more information, -// see -// [Sorting and filtering -// list -// results](https://cloud.google.com/kms/docs/sorting-and-filtering) -// . +// results should be sorted. If not specified, the results will be +// sorted in the default order. For more information, see [Sorting and +// filtering list +// results](https://cloud.google.com/kms/docs/sorting-and-filtering). func (c *ProjectsLocationsKeyRingsListCall) OrderBy(orderBy string) *ProjectsLocationsKeyRingsListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageSize sets the optional parameter "pageSize": Optional limit on -// the number of KeyRings to include in the -// response. Further KeyRings can subsequently be obtained by -// including the ListKeyRingsResponse.next_page_token in a -// subsequent -// request. If unspecified, the server will pick an appropriate -// default. +// the number of KeyRings to include in the response. Further KeyRings +// can subsequently be obtained by including the +// ListKeyRingsResponse.next_page_token in a subsequent request. If +// unspecified, the server will pick an appropriate default. func (c *ProjectsLocationsKeyRingsListCall) PageSize(pageSize int64) *ProjectsLocationsKeyRingsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": Optional -// pagination token, returned earlier -// via +// pagination token, returned earlier via // ListKeyRingsResponse.next_page_token. func (c *ProjectsLocationsKeyRingsListCall) PageToken(pageToken string) *ProjectsLocationsKeyRingsListCall { c.urlParams_.Set("pageToken", pageToken) @@ -3349,7 +3220,7 @@ func (c *ProjectsLocationsKeyRingsListCall) Header() http.Header { func (c *ProjectsLocationsKeyRingsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3420,28 +3291,28 @@ func (c *ProjectsLocationsKeyRingsListCall) Do(opts ...googleapi.CallOption) (*L // ], // "parameters": { // "filter": { - // "description": "Optional. Only include resources that match the filter in the response. For\nmore information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", + // "description": "Optional. Only include resources that match the filter in the response. For more information, see [Sorting and filtering list results](https://cloud.google.com/kms/docs/sorting-and-filtering).", // "location": "query", // "type": "string" // }, // "orderBy": { - // "description": "Optional. Specify how the results should be sorted. If not specified, the\nresults will be sorted in the default order. For more information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", + // "description": "Optional. Specify how the results should be sorted. If not specified, the results will be sorted in the default order. For more information, see [Sorting and filtering list results](https://cloud.google.com/kms/docs/sorting-and-filtering).", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "Optional. Optional limit on the number of KeyRings to include in the\nresponse. Further KeyRings can subsequently be obtained by\nincluding the ListKeyRingsResponse.next_page_token in a subsequent\nrequest. If unspecified, the server will pick an appropriate default.", + // "description": "Optional. Optional limit on the number of KeyRings to include in the response. Further KeyRings can subsequently be obtained by including the ListKeyRingsResponse.next_page_token in a subsequent request. If unspecified, the server will pick an appropriate default.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "Optional. Optional pagination token, returned earlier via\nListKeyRingsResponse.next_page_token.", + // "description": "Optional. Optional pagination token, returned earlier via ListKeyRingsResponse.next_page_token.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The resource name of the location associated with the\nKeyRings, in the format `projects/*/locations/*`.", + // "description": "Required. The resource name of the location associated with the KeyRings, in the format `projects/*/locations/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -3493,11 +3364,8 @@ type ProjectsLocationsKeyRingsSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any -// existing policy. -// -// Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` -// errors. +// resource. Replaces any existing policy. Can return `NOT_FOUND`, +// `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. func (r *ProjectsLocationsKeyRingsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsKeyRingsSetIamPolicyCall { c := &ProjectsLocationsKeyRingsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -3532,7 +3400,7 @@ func (c *ProjectsLocationsKeyRingsSetIamPolicyCall) Header() http.Header { func (c *ProjectsLocationsKeyRingsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3596,7 +3464,7 @@ func (c *ProjectsLocationsKeyRingsSetIamPolicyCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}:setIamPolicy", // "httpMethod": "POST", // "id": "cloudkms.projects.locations.keyRings.setIamPolicy", @@ -3605,7 +3473,7 @@ func (c *ProjectsLocationsKeyRingsSetIamPolicyCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", // "required": true, @@ -3639,16 +3507,11 @@ type ProjectsLocationsKeyRingsTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a `NOT_FOUND` error. -// -// Note: This operation is designed to be used for building -// permission-aware -// UIs and command-line tools, not for authorization checking. This -// operation -// may "fail open" without warning. +// specified resource. If the resource does not exist, this will return +// an empty set of permissions, not a `NOT_FOUND` error. Note: This +// operation is designed to be used for building permission-aware UIs +// and command-line tools, not for authorization checking. This +// operation may "fail open" without warning. func (r *ProjectsLocationsKeyRingsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsKeyRingsTestIamPermissionsCall { c := &ProjectsLocationsKeyRingsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -3683,7 +3546,7 @@ func (c *ProjectsLocationsKeyRingsTestIamPermissionsCall) Header() http.Header { func (c *ProjectsLocationsKeyRingsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3747,7 +3610,7 @@ func (c *ProjectsLocationsKeyRingsTestIamPermissionsCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + // "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}:testIamPermissions", // "httpMethod": "POST", // "id": "cloudkms.projects.locations.keyRings.testIamPermissions", @@ -3756,7 +3619,7 @@ func (c *ProjectsLocationsKeyRingsTestIamPermissionsCall) Do(opts ...googleapi.C // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", // "required": true, @@ -3789,11 +3652,8 @@ type ProjectsLocationsKeyRingsCryptoKeysCreateCall struct { header_ http.Header } -// Create: Create a new CryptoKey within a KeyRing. -// -// CryptoKey.purpose and -// CryptoKey.version_template.algorithm -// are required. +// Create: Create a new CryptoKey within a KeyRing. CryptoKey.purpose +// and CryptoKey.version_template.algorithm are required. func (r *ProjectsLocationsKeyRingsCryptoKeysService) Create(parent string, cryptokey *CryptoKey) *ProjectsLocationsKeyRingsCryptoKeysCreateCall { c := &ProjectsLocationsKeyRingsCryptoKeysCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -3802,8 +3662,8 @@ func (r *ProjectsLocationsKeyRingsCryptoKeysService) Create(parent string, crypt } // CryptoKeyId sets the optional parameter "cryptoKeyId": Required. It -// must be unique within a KeyRing and match the regular -// expression `[a-zA-Z0-9_-]{1,63}` +// must be unique within a KeyRing and match the regular expression +// `[a-zA-Z0-9_-]{1,63}` func (c *ProjectsLocationsKeyRingsCryptoKeysCreateCall) CryptoKeyId(cryptoKeyId string) *ProjectsLocationsKeyRingsCryptoKeysCreateCall { c.urlParams_.Set("cryptoKeyId", cryptoKeyId) return c @@ -3811,11 +3671,9 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCreateCall) CryptoKeyId(cryptoKeyId // SkipInitialVersionCreation sets the optional parameter // "skipInitialVersionCreation": If set to true, the request will create -// a CryptoKey without any -// CryptoKeyVersions. You must manually call -// CreateCryptoKeyVersion or -// ImportCryptoKeyVersion -// before you can use this CryptoKey. +// a CryptoKey without any CryptoKeyVersions. You must manually call +// CreateCryptoKeyVersion or ImportCryptoKeyVersion before you can use +// this CryptoKey. func (c *ProjectsLocationsKeyRingsCryptoKeysCreateCall) SkipInitialVersionCreation(skipInitialVersionCreation bool) *ProjectsLocationsKeyRingsCryptoKeysCreateCall { c.urlParams_.Set("skipInitialVersionCreation", fmt.Sprint(skipInitialVersionCreation)) return c @@ -3848,7 +3706,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCreateCall) Header() http.Header { func (c *ProjectsLocationsKeyRingsCryptoKeysCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3912,7 +3770,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCreateCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Create a new CryptoKey within a KeyRing.\n\nCryptoKey.purpose and\nCryptoKey.version_template.algorithm\nare required.", + // "description": "Create a new CryptoKey within a KeyRing. CryptoKey.purpose and CryptoKey.version_template.algorithm are required.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys", // "httpMethod": "POST", // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.create", @@ -3921,19 +3779,19 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCreateCall) Do(opts ...googleapi.Cal // ], // "parameters": { // "cryptoKeyId": { - // "description": "Required. It must be unique within a KeyRing and match the regular\nexpression `[a-zA-Z0-9_-]{1,63}`", + // "description": "Required. It must be unique within a KeyRing and match the regular expression `[a-zA-Z0-9_-]{1,63}`", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The name of the KeyRing associated with the\nCryptoKeys.", + // "description": "Required. The name of the KeyRing associated with the CryptoKeys.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", // "required": true, // "type": "string" // }, // "skipInitialVersionCreation": { - // "description": "If set to true, the request will create a CryptoKey without any\nCryptoKeyVersions. You must manually call\nCreateCryptoKeyVersion or\nImportCryptoKeyVersion\nbefore you can use this CryptoKey.", + // "description": "If set to true, the request will create a CryptoKey without any CryptoKeyVersions. You must manually call CreateCryptoKeyVersion or ImportCryptoKeyVersion before you can use this CryptoKey.", // "location": "query", // "type": "boolean" // } @@ -3965,8 +3823,7 @@ type ProjectsLocationsKeyRingsCryptoKeysDecryptCall struct { } // Decrypt: Decrypts data that was protected by Encrypt. The -// CryptoKey.purpose -// must be ENCRYPT_DECRYPT. +// CryptoKey.purpose must be ENCRYPT_DECRYPT. func (r *ProjectsLocationsKeyRingsCryptoKeysService) Decrypt(name string, decryptrequest *DecryptRequest) *ProjectsLocationsKeyRingsCryptoKeysDecryptCall { c := &ProjectsLocationsKeyRingsCryptoKeysDecryptCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4001,7 +3858,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysDecryptCall) Header() http.Header { func (c *ProjectsLocationsKeyRingsCryptoKeysDecryptCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4065,7 +3922,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysDecryptCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Decrypts data that was protected by Encrypt. The CryptoKey.purpose\nmust be ENCRYPT_DECRYPT.", + // "description": "Decrypts data that was protected by Encrypt. The CryptoKey.purpose must be ENCRYPT_DECRYPT.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:decrypt", // "httpMethod": "POST", // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.decrypt", @@ -4074,7 +3931,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysDecryptCall) Do(opts ...googleapi.Ca // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the CryptoKey to use for decryption.\nThe server will choose the appropriate version.", + // "description": "Required. The resource name of the CryptoKey to use for decryption. The server will choose the appropriate version.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", // "required": true, @@ -4108,9 +3965,7 @@ type ProjectsLocationsKeyRingsCryptoKeysEncryptCall struct { } // Encrypt: Encrypts data, so that it can only be recovered by a call to -// Decrypt. -// The CryptoKey.purpose must be -// ENCRYPT_DECRYPT. +// Decrypt. The CryptoKey.purpose must be ENCRYPT_DECRYPT. func (r *ProjectsLocationsKeyRingsCryptoKeysService) Encrypt(name string, encryptrequest *EncryptRequest) *ProjectsLocationsKeyRingsCryptoKeysEncryptCall { c := &ProjectsLocationsKeyRingsCryptoKeysEncryptCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4145,7 +4000,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysEncryptCall) Header() http.Header { func (c *ProjectsLocationsKeyRingsCryptoKeysEncryptCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4209,7 +4064,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysEncryptCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Encrypts data, so that it can only be recovered by a call to Decrypt.\nThe CryptoKey.purpose must be\nENCRYPT_DECRYPT.", + // "description": "Encrypts data, so that it can only be recovered by a call to Decrypt. The CryptoKey.purpose must be ENCRYPT_DECRYPT.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:encrypt", // "httpMethod": "POST", // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.encrypt", @@ -4218,7 +4073,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysEncryptCall) Do(opts ...googleapi.Ca // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the CryptoKey or CryptoKeyVersion\nto use for encryption.\n\nIf a CryptoKey is specified, the server will use its\nprimary version.", + // "description": "Required. The resource name of the CryptoKey or CryptoKeyVersion to use for encryption. If a CryptoKey is specified, the server will use its primary version.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/.*$", // "required": true, @@ -4251,8 +4106,8 @@ type ProjectsLocationsKeyRingsCryptoKeysGetCall struct { header_ http.Header } -// Get: Returns metadata for a given CryptoKey, as well as its -// primary CryptoKeyVersion. +// Get: Returns metadata for a given CryptoKey, as well as its primary +// CryptoKeyVersion. func (r *ProjectsLocationsKeyRingsCryptoKeysService) Get(name string) *ProjectsLocationsKeyRingsCryptoKeysGetCall { c := &ProjectsLocationsKeyRingsCryptoKeysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4296,7 +4151,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysGetCall) Header() http.Header { func (c *ProjectsLocationsKeyRingsCryptoKeysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4358,7 +4213,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysGetCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Returns metadata for a given CryptoKey, as well as its\nprimary CryptoKeyVersion.", + // "description": "Returns metadata for a given CryptoKey, as well as its primary CryptoKeyVersion.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}", // "httpMethod": "GET", // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.get", @@ -4397,9 +4252,8 @@ type ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall struct { header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. -// Returns an empty policy if the resource exists and does not have a -// policy +// GetIamPolicy: Gets the access control policy for a resource. Returns +// an empty policy if the resource exists and does not have a policy // set. func (r *ProjectsLocationsKeyRingsCryptoKeysService) GetIamPolicy(resource string) *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall { c := &ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -4409,24 +4263,14 @@ func (r *ProjectsLocationsKeyRingsCryptoKeysService) GetIamPolicy(resource strin // OptionsRequestedPolicyVersion sets the optional parameter // "options.requestedPolicyVersion": The policy format version to be -// returned. -// -// Valid values are 0, 1, and 3. Requests specifying an invalid value -// will be -// rejected. -// -// Requests for policies with any conditional bindings must specify -// version 3. -// Policies without any conditional bindings may specify any valid value -// or -// leave the field unset. -// -// To learn which resources support conditions in their IAM policies, -// see -// the -// [IAM -// documentation](https://cloud.google.com/iam/help/conditions/r -// esource-policies). +// returned. Valid values are 0, 1, and 3. Requests specifying an +// invalid value will be rejected. Requests for policies with any +// conditional bindings must specify version 3. Policies without any +// conditional bindings may specify any valid value or leave the field +// unset. To learn which resources support conditions in their IAM +// policies, see the [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-p +// olicies). func (c *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall { c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c @@ -4469,7 +4313,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall) Header() http.Head func (c *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4531,7 +4375,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall) Do(opts ...googlea } return ret, nil // { - // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + // "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:getIamPolicy", // "httpMethod": "GET", // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.getIamPolicy", @@ -4540,13 +4384,13 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall) Do(opts ...googlea // ], // "parameters": { // "options.requestedPolicyVersion": { - // "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + // "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", // "format": "int32", // "location": "query", // "type": "integer" // }, // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", // "required": true, @@ -4584,46 +4428,36 @@ func (r *ProjectsLocationsKeyRingsCryptoKeysService) List(parent string) *Projec } // Filter sets the optional parameter "filter": Only include resources -// that match the filter in the response. For -// more information, see -// [Sorting and filtering -// list -// results](https://cloud.google.com/kms/docs/sorting-and-filtering) -// . +// that match the filter in the response. For more information, see +// [Sorting and filtering list +// results](https://cloud.google.com/kms/docs/sorting-and-filtering). func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) Filter(filter string) *ProjectsLocationsKeyRingsCryptoKeysListCall { c.urlParams_.Set("filter", filter) return c } // OrderBy sets the optional parameter "orderBy": Specify how the -// results should be sorted. If not specified, the -// results will be sorted in the default order. For more information, -// see -// [Sorting and filtering -// list -// results](https://cloud.google.com/kms/docs/sorting-and-filtering) -// . +// results should be sorted. If not specified, the results will be +// sorted in the default order. For more information, see [Sorting and +// filtering list +// results](https://cloud.google.com/kms/docs/sorting-and-filtering). func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) OrderBy(orderBy string) *ProjectsLocationsKeyRingsCryptoKeysListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageSize sets the optional parameter "pageSize": Optional limit on -// the number of CryptoKeys to include in the -// response. Further CryptoKeys can subsequently be obtained -// by -// including the ListCryptoKeysResponse.next_page_token in a -// subsequent -// request. If unspecified, the server will pick an appropriate -// default. +// the number of CryptoKeys to include in the response. Further +// CryptoKeys can subsequently be obtained by including the +// ListCryptoKeysResponse.next_page_token in a subsequent request. If +// unspecified, the server will pick an appropriate default. func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) PageSize(pageSize int64) *ProjectsLocationsKeyRingsCryptoKeysListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": Optional -// pagination token, returned earlier -// via +// pagination token, returned earlier via // ListCryptoKeysResponse.next_page_token. func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) PageToken(pageToken string) *ProjectsLocationsKeyRingsCryptoKeysListCall { c.urlParams_.Set("pageToken", pageToken) @@ -4634,8 +4468,10 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) PageToken(pageToken string // the primary version to include in the response. // // Possible values: -// "CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED" -// "FULL" +// "CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED" - Default view for each +// CryptoKeyVersion. Does not include the attestation field. +// "FULL" - Provides all fields in each CryptoKeyVersion, including +// the attestation. func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) VersionView(versionView string) *ProjectsLocationsKeyRingsCryptoKeysListCall { c.urlParams_.Set("versionView", versionView) return c @@ -4678,7 +4514,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) Header() http.Header { func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4749,28 +4585,28 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) Do(opts ...googleapi.CallO // ], // "parameters": { // "filter": { - // "description": "Optional. Only include resources that match the filter in the response. For\nmore information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", + // "description": "Optional. Only include resources that match the filter in the response. For more information, see [Sorting and filtering list results](https://cloud.google.com/kms/docs/sorting-and-filtering).", // "location": "query", // "type": "string" // }, // "orderBy": { - // "description": "Optional. Specify how the results should be sorted. If not specified, the\nresults will be sorted in the default order. For more information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", + // "description": "Optional. Specify how the results should be sorted. If not specified, the results will be sorted in the default order. For more information, see [Sorting and filtering list results](https://cloud.google.com/kms/docs/sorting-and-filtering).", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "Optional. Optional limit on the number of CryptoKeys to include in the\nresponse. Further CryptoKeys can subsequently be obtained by\nincluding the ListCryptoKeysResponse.next_page_token in a subsequent\nrequest. If unspecified, the server will pick an appropriate default.", + // "description": "Optional. Optional limit on the number of CryptoKeys to include in the response. Further CryptoKeys can subsequently be obtained by including the ListCryptoKeysResponse.next_page_token in a subsequent request. If unspecified, the server will pick an appropriate default.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "Optional. Optional pagination token, returned earlier via\nListCryptoKeysResponse.next_page_token.", + // "description": "Optional. Optional pagination token, returned earlier via ListCryptoKeysResponse.next_page_token.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The resource name of the KeyRing to list, in the format\n`projects/*/locations/*/keyRings/*`.", + // "description": "Required. The resource name of the KeyRing to list, in the format `projects/*/locations/*/keyRings/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", // "required": true, @@ -4782,6 +4618,10 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) Do(opts ...googleapi.CallO // "CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED", // "FULL" // ], + // "enumDescriptions": [ + // "Default view for each CryptoKeyVersion. Does not include the attestation field.", + // "Provides all fields in each CryptoKeyVersion, including the attestation." + // ], // "location": "query", // "type": "string" // } @@ -4872,7 +4712,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysPatchCall) Header() http.Header { func (c *ProjectsLocationsKeyRingsCryptoKeysPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4945,7 +4785,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysPatchCall) Do(opts ...googleapi.Call // ], // "parameters": { // "name": { - // "description": "Output only. The resource name for this CryptoKey in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*`.", + // "description": "Output only. The resource name for this CryptoKey in the format `projects/*/locations/*/keyRings/*/cryptoKeys/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", // "required": true, @@ -4985,11 +4825,8 @@ type ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any -// existing policy. -// -// Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` -// errors. +// resource. Replaces any existing policy. Can return `NOT_FOUND`, +// `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. func (r *ProjectsLocationsKeyRingsCryptoKeysService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall { c := &ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -5024,7 +4861,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall) Header() http.Head func (c *ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5088,7 +4925,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall) Do(opts ...googlea } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:setIamPolicy", // "httpMethod": "POST", // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.setIamPolicy", @@ -5097,7 +4934,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall) Do(opts ...googlea // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", // "required": true, @@ -5131,16 +4968,11 @@ type ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a `NOT_FOUND` error. -// -// Note: This operation is designed to be used for building -// permission-aware -// UIs and command-line tools, not for authorization checking. This -// operation -// may "fail open" without warning. +// specified resource. If the resource does not exist, this will return +// an empty set of permissions, not a `NOT_FOUND` error. Note: This +// operation is designed to be used for building permission-aware UIs +// and command-line tools, not for authorization checking. This +// operation may "fail open" without warning. func (r *ProjectsLocationsKeyRingsCryptoKeysService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall { c := &ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -5175,7 +5007,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall) Header() htt func (c *ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5239,7 +5071,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall) Do(opts ...g } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + // "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:testIamPermissions", // "httpMethod": "POST", // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.testIamPermissions", @@ -5248,7 +5080,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall) Do(opts ...g // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", // "required": true, @@ -5282,9 +5114,7 @@ type ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall struct { } // UpdatePrimaryVersion: Update the version of a CryptoKey that will be -// used in Encrypt. -// -// Returns an error if called on an asymmetric key. +// used in Encrypt. Returns an error if called on an asymmetric key. func (r *ProjectsLocationsKeyRingsCryptoKeysService) UpdatePrimaryVersion(name string, updatecryptokeyprimaryversionrequest *UpdateCryptoKeyPrimaryVersionRequest) *ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall { c := &ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5319,7 +5149,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall) Header() h func (c *ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5383,7 +5213,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall) Do(opts .. } return ret, nil // { - // "description": "Update the version of a CryptoKey that will be used in Encrypt.\n\nReturns an error if called on an asymmetric key.", + // "description": "Update the version of a CryptoKey that will be used in Encrypt. Returns an error if called on an asymmetric key.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:updatePrimaryVersion", // "httpMethod": "POST", // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.updatePrimaryVersion", @@ -5426,9 +5256,7 @@ type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricDecryptCall s } // AsymmetricDecrypt: Decrypts data that was encrypted with a public key -// retrieved from -// GetPublicKey corresponding to a CryptoKeyVersion -// with +// retrieved from GetPublicKey corresponding to a CryptoKeyVersion with // CryptoKey.purpose ASYMMETRIC_DECRYPT. func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) AsymmetricDecrypt(name string, asymmetricdecryptrequest *AsymmetricDecryptRequest) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricDecryptCall { c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricDecryptCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -5464,7 +5292,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricDecryptCa func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricDecryptCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5528,7 +5356,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricDecryptCa } return ret, nil // { - // "description": "Decrypts data that was encrypted with a public key retrieved from\nGetPublicKey corresponding to a CryptoKeyVersion with\nCryptoKey.purpose ASYMMETRIC_DECRYPT.", + // "description": "Decrypts data that was encrypted with a public key retrieved from GetPublicKey corresponding to a CryptoKeyVersion with CryptoKey.purpose ASYMMETRIC_DECRYPT.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:asymmetricDecrypt", // "httpMethod": "POST", // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.asymmetricDecrypt", @@ -5537,7 +5365,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricDecryptCa // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the CryptoKeyVersion to use for\ndecryption.", + // "description": "Required. The resource name of the CryptoKeyVersion to use for decryption.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", // "required": true, @@ -5571,10 +5399,8 @@ type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricSignCall stru } // AsymmetricSign: Signs data using a CryptoKeyVersion with -// CryptoKey.purpose -// ASYMMETRIC_SIGN, producing a signature that can be verified with the -// public -// key retrieved from GetPublicKey. +// CryptoKey.purpose ASYMMETRIC_SIGN, producing a signature that can be +// verified with the public key retrieved from GetPublicKey. func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) AsymmetricSign(name string, asymmetricsignrequest *AsymmetricSignRequest) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricSignCall { c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricSignCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5609,7 +5435,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricSignCall) func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricSignCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5673,7 +5499,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricSignCall) } return ret, nil // { - // "description": "Signs data using a CryptoKeyVersion with CryptoKey.purpose\nASYMMETRIC_SIGN, producing a signature that can be verified with the public\nkey retrieved from GetPublicKey.", + // "description": "Signs data using a CryptoKeyVersion with CryptoKey.purpose ASYMMETRIC_SIGN, producing a signature that can be verified with the public key retrieved from GetPublicKey.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:asymmetricSign", // "httpMethod": "POST", // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.asymmetricSign", @@ -5715,10 +5541,8 @@ type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall struct { header_ http.Header } -// Create: Create a new CryptoKeyVersion in a CryptoKey. -// -// The server will assign the next sequential id. If unset, -// state will be set to +// Create: Create a new CryptoKeyVersion in a CryptoKey. The server will +// assign the next sequential id. If unset, state will be set to // ENABLED. func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) Create(parent string, cryptokeyversion *CryptoKeyVersion) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall { c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -5754,7 +5578,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall) Header( func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5818,7 +5642,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall) Do(opts } return ret, nil // { - // "description": "Create a new CryptoKeyVersion in a CryptoKey.\n\nThe server will assign the next sequential id. If unset,\nstate will be set to\nENABLED.", + // "description": "Create a new CryptoKeyVersion in a CryptoKey. The server will assign the next sequential id. If unset, state will be set to ENABLED.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions", // "httpMethod": "POST", // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.create", @@ -5827,7 +5651,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall) Do(opts // ], // "parameters": { // "parent": { - // "description": "Required. The name of the CryptoKey associated with\nthe CryptoKeyVersions.", + // "description": "Required. The name of the CryptoKey associated with the CryptoKeyVersions.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", // "required": true, @@ -5860,19 +5684,13 @@ type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall struct { header_ http.Header } -// Destroy: Schedule a CryptoKeyVersion for destruction. -// -// Upon calling this method, CryptoKeyVersion.state will be set -// to -// DESTROY_SCHEDULED -// and destroy_time will be set to a time 24 -// hours in the future, at which point the state -// will be changed to -// DESTROYED, and the key -// material will be irrevocably destroyed. -// -// Before the destroy_time is reached, -// RestoreCryptoKeyVersion may be called to reverse the process. +// Destroy: Schedule a CryptoKeyVersion for destruction. Upon calling +// this method, CryptoKeyVersion.state will be set to DESTROY_SCHEDULED +// and destroy_time will be set to a time 24 hours in the future, at +// which point the state will be changed to DESTROYED, and the key +// material will be irrevocably destroyed. Before the destroy_time is +// reached, RestoreCryptoKeyVersion may be called to reverse the +// process. func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) Destroy(name string, destroycryptokeyversionrequest *DestroyCryptoKeyVersionRequest) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall { c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5907,7 +5725,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall) Header func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5971,7 +5789,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall) Do(opt } return ret, nil // { - // "description": "Schedule a CryptoKeyVersion for destruction.\n\nUpon calling this method, CryptoKeyVersion.state will be set to\nDESTROY_SCHEDULED\nand destroy_time will be set to a time 24\nhours in the future, at which point the state\nwill be changed to\nDESTROYED, and the key\nmaterial will be irrevocably destroyed.\n\nBefore the destroy_time is reached,\nRestoreCryptoKeyVersion may be called to reverse the process.", + // "description": "Schedule a CryptoKeyVersion for destruction. Upon calling this method, CryptoKeyVersion.state will be set to DESTROY_SCHEDULED and destroy_time will be set to a time 24 hours in the future, at which point the state will be changed to DESTROYED, and the key material will be irrevocably destroyed. Before the destroy_time is reached, RestoreCryptoKeyVersion may be called to reverse the process.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:destroy", // "httpMethod": "POST", // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.destroy", @@ -6057,7 +5875,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall) Header() h func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6159,10 +5977,7 @@ type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetPublicKeyCall struct } // GetPublicKey: Returns the public key for the given CryptoKeyVersion. -// The -// CryptoKey.purpose must be -// ASYMMETRIC_SIGN or -// ASYMMETRIC_DECRYPT. +// The CryptoKey.purpose must be ASYMMETRIC_SIGN or ASYMMETRIC_DECRYPT. func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) GetPublicKey(name string) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetPublicKeyCall { c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetPublicKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -6206,7 +6021,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetPublicKeyCall) H func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetPublicKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6268,7 +6083,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetPublicKeyCall) D } return ret, nil // { - // "description": "Returns the public key for the given CryptoKeyVersion. The\nCryptoKey.purpose must be\nASYMMETRIC_SIGN or\nASYMMETRIC_DECRYPT.", + // "description": "Returns the public key for the given CryptoKeyVersion. The CryptoKey.purpose must be ASYMMETRIC_SIGN or ASYMMETRIC_DECRYPT.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}/publicKey", // "httpMethod": "GET", // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.getPublicKey", @@ -6277,7 +6092,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetPublicKeyCall) D // ], // "parameters": { // "name": { - // "description": "Required. The name of the CryptoKeyVersion public key to\nget.", + // "description": "Required. The name of the CryptoKeyVersion public key to get.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", // "required": true, @@ -6308,12 +6123,8 @@ type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsImportCall struct { } // Import: Imports a new CryptoKeyVersion into an existing CryptoKey -// using the -// wrapped key material provided in the request. -// -// The version ID will be assigned the next sequential id within -// the -// CryptoKey. +// using the wrapped key material provided in the request. The version +// ID will be assigned the next sequential id within the CryptoKey. func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) Import(parent string, importcryptokeyversionrequest *ImportCryptoKeyVersionRequest) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsImportCall { c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsImportCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -6348,7 +6159,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsImportCall) Header( func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsImportCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6412,7 +6223,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsImportCall) Do(opts } return ret, nil // { - // "description": "Imports a new CryptoKeyVersion into an existing CryptoKey using the\nwrapped key material provided in the request.\n\nThe version ID will be assigned the next sequential id within the\nCryptoKey.", + // "description": "Imports a new CryptoKeyVersion into an existing CryptoKey using the wrapped key material provided in the request. The version ID will be assigned the next sequential id within the CryptoKey.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions:import", // "httpMethod": "POST", // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.import", @@ -6421,7 +6232,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsImportCall) Do(opts // ], // "parameters": { // "parent": { - // "description": "Required. The name of the CryptoKey to\nbe imported into.", + // "description": "Required. The name of the CryptoKey to be imported into.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", // "required": true, @@ -6462,46 +6273,36 @@ func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) List(paren } // Filter sets the optional parameter "filter": Only include resources -// that match the filter in the response. For -// more information, see -// [Sorting and filtering -// list -// results](https://cloud.google.com/kms/docs/sorting-and-filtering) -// . +// that match the filter in the response. For more information, see +// [Sorting and filtering list +// results](https://cloud.google.com/kms/docs/sorting-and-filtering). func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) Filter(filter string) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall { c.urlParams_.Set("filter", filter) return c } // OrderBy sets the optional parameter "orderBy": Specify how the -// results should be sorted. If not specified, the -// results will be sorted in the default order. For more information, -// see -// [Sorting and filtering -// list -// results](https://cloud.google.com/kms/docs/sorting-and-filtering) -// . +// results should be sorted. If not specified, the results will be +// sorted in the default order. For more information, see [Sorting and +// filtering list +// results](https://cloud.google.com/kms/docs/sorting-and-filtering). func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) OrderBy(orderBy string) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageSize sets the optional parameter "pageSize": Optional limit on -// the number of CryptoKeyVersions to -// include in the response. Further CryptoKeyVersions can -// subsequently be obtained by including -// the +// the number of CryptoKeyVersions to include in the response. Further +// CryptoKeyVersions can subsequently be obtained by including the // ListCryptoKeyVersionsResponse.next_page_token in a subsequent -// request. -// If unspecified, the server will pick an appropriate default. +// request. If unspecified, the server will pick an appropriate default. func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) PageSize(pageSize int64) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": Optional -// pagination token, returned earlier -// via +// pagination token, returned earlier via // ListCryptoKeyVersionsResponse.next_page_token. func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) PageToken(pageToken string) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall { c.urlParams_.Set("pageToken", pageToken) @@ -6512,8 +6313,10 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) PageToken // response. // // Possible values: -// "CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED" -// "FULL" +// "CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED" - Default view for each +// CryptoKeyVersion. Does not include the attestation field. +// "FULL" - Provides all fields in each CryptoKeyVersion, including +// the attestation. func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) View(view string) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall { c.urlParams_.Set("view", view) return c @@ -6556,7 +6359,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) Header() func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6627,28 +6430,28 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) Do(opts . // ], // "parameters": { // "filter": { - // "description": "Optional. Only include resources that match the filter in the response. For\nmore information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", + // "description": "Optional. Only include resources that match the filter in the response. For more information, see [Sorting and filtering list results](https://cloud.google.com/kms/docs/sorting-and-filtering).", // "location": "query", // "type": "string" // }, // "orderBy": { - // "description": "Optional. Specify how the results should be sorted. If not specified, the\nresults will be sorted in the default order. For more information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", + // "description": "Optional. Specify how the results should be sorted. If not specified, the results will be sorted in the default order. For more information, see [Sorting and filtering list results](https://cloud.google.com/kms/docs/sorting-and-filtering).", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "Optional. Optional limit on the number of CryptoKeyVersions to\ninclude in the response. Further CryptoKeyVersions can\nsubsequently be obtained by including the\nListCryptoKeyVersionsResponse.next_page_token in a subsequent request.\nIf unspecified, the server will pick an appropriate default.", + // "description": "Optional. Optional limit on the number of CryptoKeyVersions to include in the response. Further CryptoKeyVersions can subsequently be obtained by including the ListCryptoKeyVersionsResponse.next_page_token in a subsequent request. If unspecified, the server will pick an appropriate default.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "Optional. Optional pagination token, returned earlier via\nListCryptoKeyVersionsResponse.next_page_token.", + // "description": "Optional. Optional pagination token, returned earlier via ListCryptoKeyVersionsResponse.next_page_token.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The resource name of the CryptoKey to list, in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*`.", + // "description": "Required. The resource name of the CryptoKey to list, in the format `projects/*/locations/*/keyRings/*/cryptoKeys/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", // "required": true, @@ -6660,6 +6463,10 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) Do(opts . // "CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED", // "FULL" // ], + // "enumDescriptions": [ + // "Default view for each CryptoKeyVersion. Does not include the attestation field.", + // "Provides all fields in each CryptoKeyVersion, including the attestation." + // ], // "location": "query", // "type": "string" // } @@ -6708,14 +6515,10 @@ type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall struct { header_ http.Header } -// Patch: Update a CryptoKeyVersion's metadata. -// -// state may be changed between -// ENABLED and -// DISABLED using this -// method. See DestroyCryptoKeyVersion and RestoreCryptoKeyVersion -// to -// move between other states. +// Patch: Update a CryptoKeyVersion's metadata. state may be changed +// between ENABLED and DISABLED using this method. See +// DestroyCryptoKeyVersion and RestoreCryptoKeyVersion to move between +// other states. func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) Patch(name string, cryptokeyversion *CryptoKeyVersion) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall { c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -6757,7 +6560,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall) Header() func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6821,7 +6624,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall) Do(opts } return ret, nil // { - // "description": "Update a CryptoKeyVersion's metadata.\n\nstate may be changed between\nENABLED and\nDISABLED using this\nmethod. See DestroyCryptoKeyVersion and RestoreCryptoKeyVersion to\nmove between other states.", + // "description": "Update a CryptoKeyVersion's metadata. state may be changed between ENABLED and DISABLED using this method. See DestroyCryptoKeyVersion and RestoreCryptoKeyVersion to move between other states.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}", // "httpMethod": "PATCH", // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.patch", @@ -6830,7 +6633,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall) Do(opts // ], // "parameters": { // "name": { - // "description": "Output only. The resource name for this CryptoKeyVersion in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`.", + // "description": "Output only. The resource name for this CryptoKeyVersion in the format `projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", // "required": true, @@ -6869,14 +6672,9 @@ type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall struct { header_ http.Header } -// Restore: Restore a CryptoKeyVersion in -// the -// DESTROY_SCHEDULED -// state. -// -// Upon restoration of the CryptoKeyVersion, state -// will be set to DISABLED, -// and destroy_time will be cleared. +// Restore: Restore a CryptoKeyVersion in the DESTROY_SCHEDULED state. +// Upon restoration of the CryptoKeyVersion, state will be set to +// DISABLED, and destroy_time will be cleared. func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) Restore(name string, restorecryptokeyversionrequest *RestoreCryptoKeyVersionRequest) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall { c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -6911,7 +6709,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall) Header func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6975,7 +6773,7 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall) Do(opt } return ret, nil // { - // "description": "Restore a CryptoKeyVersion in the\nDESTROY_SCHEDULED\nstate.\n\nUpon restoration of the CryptoKeyVersion, state\nwill be set to DISABLED,\nand destroy_time will be cleared.", + // "description": "Restore a CryptoKeyVersion in the DESTROY_SCHEDULED state. Upon restoration of the CryptoKeyVersion, state will be set to DISABLED, and destroy_time will be cleared.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:restore", // "httpMethod": "POST", // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.restore", @@ -7017,9 +6815,7 @@ type ProjectsLocationsKeyRingsImportJobsCreateCall struct { header_ http.Header } -// Create: Create a new ImportJob within a -// KeyRing. -// +// Create: Create a new ImportJob within a KeyRing. // ImportJob.import_method is required. func (r *ProjectsLocationsKeyRingsImportJobsService) Create(parent string, importjob *ImportJob) *ProjectsLocationsKeyRingsImportJobsCreateCall { c := &ProjectsLocationsKeyRingsImportJobsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -7029,8 +6825,8 @@ func (r *ProjectsLocationsKeyRingsImportJobsService) Create(parent string, impor } // ImportJobId sets the optional parameter "importJobId": Required. It -// must be unique within a KeyRing and match the regular -// expression `[a-zA-Z0-9_-]{1,63}` +// must be unique within a KeyRing and match the regular expression +// `[a-zA-Z0-9_-]{1,63}` func (c *ProjectsLocationsKeyRingsImportJobsCreateCall) ImportJobId(importJobId string) *ProjectsLocationsKeyRingsImportJobsCreateCall { c.urlParams_.Set("importJobId", importJobId) return c @@ -7063,7 +6859,7 @@ func (c *ProjectsLocationsKeyRingsImportJobsCreateCall) Header() http.Header { func (c *ProjectsLocationsKeyRingsImportJobsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7127,7 +6923,7 @@ func (c *ProjectsLocationsKeyRingsImportJobsCreateCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Create a new ImportJob within a KeyRing.\n\nImportJob.import_method is required.", + // "description": "Create a new ImportJob within a KeyRing. ImportJob.import_method is required.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/importJobs", // "httpMethod": "POST", // "id": "cloudkms.projects.locations.keyRings.importJobs.create", @@ -7136,12 +6932,12 @@ func (c *ProjectsLocationsKeyRingsImportJobsCreateCall) Do(opts ...googleapi.Cal // ], // "parameters": { // "importJobId": { - // "description": "Required. It must be unique within a KeyRing and match the regular\nexpression `[a-zA-Z0-9_-]{1,63}`", + // "description": "Required. It must be unique within a KeyRing and match the regular expression `[a-zA-Z0-9_-]{1,63}`", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The name of the KeyRing associated with the\nImportJobs.", + // "description": "Required. The name of the KeyRing associated with the ImportJobs.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", // "required": true, @@ -7218,7 +7014,7 @@ func (c *ProjectsLocationsKeyRingsImportJobsGetCall) Header() http.Header { func (c *ProjectsLocationsKeyRingsImportJobsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7319,9 +7115,8 @@ type ProjectsLocationsKeyRingsImportJobsGetIamPolicyCall struct { header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. -// Returns an empty policy if the resource exists and does not have a -// policy +// GetIamPolicy: Gets the access control policy for a resource. Returns +// an empty policy if the resource exists and does not have a policy // set. func (r *ProjectsLocationsKeyRingsImportJobsService) GetIamPolicy(resource string) *ProjectsLocationsKeyRingsImportJobsGetIamPolicyCall { c := &ProjectsLocationsKeyRingsImportJobsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -7331,24 +7126,14 @@ func (r *ProjectsLocationsKeyRingsImportJobsService) GetIamPolicy(resource strin // OptionsRequestedPolicyVersion sets the optional parameter // "options.requestedPolicyVersion": The policy format version to be -// returned. -// -// Valid values are 0, 1, and 3. Requests specifying an invalid value -// will be -// rejected. -// -// Requests for policies with any conditional bindings must specify -// version 3. -// Policies without any conditional bindings may specify any valid value -// or -// leave the field unset. -// -// To learn which resources support conditions in their IAM policies, -// see -// the -// [IAM -// documentation](https://cloud.google.com/iam/help/conditions/r -// esource-policies). +// returned. Valid values are 0, 1, and 3. Requests specifying an +// invalid value will be rejected. Requests for policies with any +// conditional bindings must specify version 3. Policies without any +// conditional bindings may specify any valid value or leave the field +// unset. To learn which resources support conditions in their IAM +// policies, see the [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-p +// olicies). func (c *ProjectsLocationsKeyRingsImportJobsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsLocationsKeyRingsImportJobsGetIamPolicyCall { c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c @@ -7391,7 +7176,7 @@ func (c *ProjectsLocationsKeyRingsImportJobsGetIamPolicyCall) Header() http.Head func (c *ProjectsLocationsKeyRingsImportJobsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7453,7 +7238,7 @@ func (c *ProjectsLocationsKeyRingsImportJobsGetIamPolicyCall) Do(opts ...googlea } return ret, nil // { - // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + // "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/importJobs/{importJobsId}:getIamPolicy", // "httpMethod": "GET", // "id": "cloudkms.projects.locations.keyRings.importJobs.getIamPolicy", @@ -7462,13 +7247,13 @@ func (c *ProjectsLocationsKeyRingsImportJobsGetIamPolicyCall) Do(opts ...googlea // ], // "parameters": { // "options.requestedPolicyVersion": { - // "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + // "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", // "format": "int32", // "location": "query", // "type": "integer" // }, // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/importJobs/[^/]+$", // "required": true, @@ -7506,45 +7291,36 @@ func (r *ProjectsLocationsKeyRingsImportJobsService) List(parent string) *Projec } // Filter sets the optional parameter "filter": Only include resources -// that match the filter in the response. For -// more information, see -// [Sorting and filtering -// list -// results](https://cloud.google.com/kms/docs/sorting-and-filtering) -// . +// that match the filter in the response. For more information, see +// [Sorting and filtering list +// results](https://cloud.google.com/kms/docs/sorting-and-filtering). func (c *ProjectsLocationsKeyRingsImportJobsListCall) Filter(filter string) *ProjectsLocationsKeyRingsImportJobsListCall { c.urlParams_.Set("filter", filter) return c } // OrderBy sets the optional parameter "orderBy": Specify how the -// results should be sorted. If not specified, the -// results will be sorted in the default order. For more information, -// see -// [Sorting and filtering -// list -// results](https://cloud.google.com/kms/docs/sorting-and-filtering) -// . +// results should be sorted. If not specified, the results will be +// sorted in the default order. For more information, see [Sorting and +// filtering list +// results](https://cloud.google.com/kms/docs/sorting-and-filtering). func (c *ProjectsLocationsKeyRingsImportJobsListCall) OrderBy(orderBy string) *ProjectsLocationsKeyRingsImportJobsListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageSize sets the optional parameter "pageSize": Optional limit on -// the number of ImportJobs to include in the -// response. Further ImportJobs can subsequently be obtained -// by -// including the ListImportJobsResponse.next_page_token in a -// subsequent -// request. If unspecified, the server will pick an appropriate default. +// the number of ImportJobs to include in the response. Further +// ImportJobs can subsequently be obtained by including the +// ListImportJobsResponse.next_page_token in a subsequent request. If +// unspecified, the server will pick an appropriate default. func (c *ProjectsLocationsKeyRingsImportJobsListCall) PageSize(pageSize int64) *ProjectsLocationsKeyRingsImportJobsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": Optional -// pagination token, returned earlier -// via +// pagination token, returned earlier via // ListImportJobsResponse.next_page_token. func (c *ProjectsLocationsKeyRingsImportJobsListCall) PageToken(pageToken string) *ProjectsLocationsKeyRingsImportJobsListCall { c.urlParams_.Set("pageToken", pageToken) @@ -7588,7 +7364,7 @@ func (c *ProjectsLocationsKeyRingsImportJobsListCall) Header() http.Header { func (c *ProjectsLocationsKeyRingsImportJobsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7659,28 +7435,28 @@ func (c *ProjectsLocationsKeyRingsImportJobsListCall) Do(opts ...googleapi.CallO // ], // "parameters": { // "filter": { - // "description": "Optional. Only include resources that match the filter in the response. For\nmore information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", + // "description": "Optional. Only include resources that match the filter in the response. For more information, see [Sorting and filtering list results](https://cloud.google.com/kms/docs/sorting-and-filtering).", // "location": "query", // "type": "string" // }, // "orderBy": { - // "description": "Optional. Specify how the results should be sorted. If not specified, the\nresults will be sorted in the default order. For more information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", + // "description": "Optional. Specify how the results should be sorted. If not specified, the results will be sorted in the default order. For more information, see [Sorting and filtering list results](https://cloud.google.com/kms/docs/sorting-and-filtering).", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "Optional. Optional limit on the number of ImportJobs to include in the\nresponse. Further ImportJobs can subsequently be obtained by\nincluding the ListImportJobsResponse.next_page_token in a subsequent\nrequest. If unspecified, the server will pick an appropriate default.", + // "description": "Optional. Optional limit on the number of ImportJobs to include in the response. Further ImportJobs can subsequently be obtained by including the ListImportJobsResponse.next_page_token in a subsequent request. If unspecified, the server will pick an appropriate default.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "Optional. Optional pagination token, returned earlier via\nListImportJobsResponse.next_page_token.", + // "description": "Optional. Optional pagination token, returned earlier via ListImportJobsResponse.next_page_token.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The resource name of the KeyRing to list, in the format\n`projects/*/locations/*/keyRings/*`.", + // "description": "Required. The resource name of the KeyRing to list, in the format `projects/*/locations/*/keyRings/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", // "required": true, @@ -7732,11 +7508,8 @@ type ProjectsLocationsKeyRingsImportJobsSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any -// existing policy. -// -// Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` -// errors. +// resource. Replaces any existing policy. Can return `NOT_FOUND`, +// `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. func (r *ProjectsLocationsKeyRingsImportJobsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsKeyRingsImportJobsSetIamPolicyCall { c := &ProjectsLocationsKeyRingsImportJobsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -7771,7 +7544,7 @@ func (c *ProjectsLocationsKeyRingsImportJobsSetIamPolicyCall) Header() http.Head func (c *ProjectsLocationsKeyRingsImportJobsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7835,7 +7608,7 @@ func (c *ProjectsLocationsKeyRingsImportJobsSetIamPolicyCall) Do(opts ...googlea } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/importJobs/{importJobsId}:setIamPolicy", // "httpMethod": "POST", // "id": "cloudkms.projects.locations.keyRings.importJobs.setIamPolicy", @@ -7844,7 +7617,7 @@ func (c *ProjectsLocationsKeyRingsImportJobsSetIamPolicyCall) Do(opts ...googlea // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/importJobs/[^/]+$", // "required": true, @@ -7878,16 +7651,11 @@ type ProjectsLocationsKeyRingsImportJobsTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a `NOT_FOUND` error. -// -// Note: This operation is designed to be used for building -// permission-aware -// UIs and command-line tools, not for authorization checking. This -// operation -// may "fail open" without warning. +// specified resource. If the resource does not exist, this will return +// an empty set of permissions, not a `NOT_FOUND` error. Note: This +// operation is designed to be used for building permission-aware UIs +// and command-line tools, not for authorization checking. This +// operation may "fail open" without warning. func (r *ProjectsLocationsKeyRingsImportJobsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsKeyRingsImportJobsTestIamPermissionsCall { c := &ProjectsLocationsKeyRingsImportJobsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -7922,7 +7690,7 @@ func (c *ProjectsLocationsKeyRingsImportJobsTestIamPermissionsCall) Header() htt func (c *ProjectsLocationsKeyRingsImportJobsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7986,7 +7754,7 @@ func (c *ProjectsLocationsKeyRingsImportJobsTestIamPermissionsCall) Do(opts ...g } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + // "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/importJobs/{importJobsId}:testIamPermissions", // "httpMethod": "POST", // "id": "cloudkms.projects.locations.keyRings.importJobs.testIamPermissions", @@ -7995,7 +7763,7 @@ func (c *ProjectsLocationsKeyRingsImportJobsTestIamPermissionsCall) Do(opts ...g // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/importJobs/[^/]+$", // "required": true, diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json index f0f8e54086f..9623b1b50e2 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json @@ -139,7 +139,7 @@ ] }, "getEffectiveOrgPolicy": { - "description": "Gets the effective `Policy` on a resource. This is the result of merging\n`Policies` in the resource hierarchy. The returned `Policy` will not have\nan `etag`set because it is a computed `Policy` across multiple resources.\nSubtrees of Resource Manager resource hierarchy with 'under:' prefix will\nnot be expanded.", + "description": "Gets the effective `Policy` on a resource. This is the result of merging `Policies` in the resource hierarchy. The returned `Policy` will not have an `etag`set because it is a computed `Policy` across multiple resources. Subtrees of Resource Manager resource hierarchy with 'under:' prefix will not be expanded.", "flatPath": "v1/folders/{foldersId}:getEffectiveOrgPolicy", "httpMethod": "POST", "id": "cloudresourcemanager.folders.getEffectiveOrgPolicy", @@ -168,7 +168,7 @@ ] }, "getOrgPolicy": { - "description": "Gets a `Policy` on a resource.\n\nIf no `Policy` is set on the resource, a `Policy` is returned with default\nvalues including `POLICY_TYPE_NOT_SET` for the `policy_type oneof`. The\n`etag` value can be used with `SetOrgPolicy()` to create or update a\n`Policy` during read-modify-write.", + "description": "Gets a `Policy` on a resource. If no `Policy` is set on the resource, a `Policy` is returned with default values including `POLICY_TYPE_NOT_SET` for the `policy_type oneof`. The `etag` value can be used with `SetOrgPolicy()` to create or update a `Policy` during read-modify-write.", "flatPath": "v1/folders/{foldersId}:getOrgPolicy", "httpMethod": "POST", "id": "cloudresourcemanager.folders.getOrgPolicy", @@ -255,7 +255,7 @@ ] }, "setOrgPolicy": { - "description": "Updates the specified `Policy` on the resource. Creates a new `Policy` for\nthat `Constraint` on the resource if one does not exist.\n\nNot supplying an `etag` on the request `Policy` results in an unconditional\nwrite of the `Policy`.", + "description": "Updates the specified `Policy` on the resource. Creates a new `Policy` for that `Constraint` on the resource if one does not exist. Not supplying an `etag` on the request `Policy` results in an unconditional write of the `Policy`.", "flatPath": "v1/folders/{foldersId}:setOrgPolicy", "httpMethod": "POST", "id": "cloudresourcemanager.folders.setOrgPolicy", @@ -287,7 +287,7 @@ "liens": { "methods": { "create": { - "description": "Create a Lien which applies to the resource denoted by the `parent` field.\n\nCallers of this method will require permission on the `parent` resource.\nFor example, applying to `projects/1234` requires permission\n`resourcemanager.projects.updateLiens`.\n\nNOTE: Some resources may limit the number of Liens which may be applied.", + "description": "Create a Lien which applies to the resource denoted by the `parent` field. Callers of this method will require permission on the `parent` resource. For example, applying to `projects/1234` requires permission `resourcemanager.projects.updateLiens`. NOTE: Some resources may limit the number of Liens which may be applied.", "flatPath": "v1/liens", "httpMethod": "POST", "id": "cloudresourcemanager.liens.create", @@ -306,7 +306,7 @@ ] }, "delete": { - "description": "Delete a Lien by `name`.\n\nCallers of this method will require permission on the `parent` resource.\nFor example, a Lien with a `parent` of `projects/1234` requires permission\n`resourcemanager.projects.updateLiens`.", + "description": "Delete a Lien by `name`. Callers of this method will require permission on the `parent` resource. For example, a Lien with a `parent` of `projects/1234` requires permission `resourcemanager.projects.updateLiens`.", "flatPath": "v1/liens/{liensId}", "httpMethod": "DELETE", "id": "cloudresourcemanager.liens.delete", @@ -332,7 +332,7 @@ ] }, "get": { - "description": "Retrieve a Lien by `name`.\n\nCallers of this method will require permission on the `parent` resource.\nFor example, a Lien with a `parent` of `projects/1234` requires permission\nrequires permission `resourcemanager.projects.get` or\n`resourcemanager.projects.updateLiens`.", + "description": "Retrieve a Lien by `name`. Callers of this method will require permission on the `parent` resource. For example, a Lien with a `parent` of `projects/1234` requires permission requires permission `resourcemanager.projects.get` or `resourcemanager.projects.updateLiens`.", "flatPath": "v1/liens/{liensId}", "httpMethod": "GET", "id": "cloudresourcemanager.liens.get", @@ -358,7 +358,7 @@ ] }, "list": { - "description": "List all Liens applied to the `parent` resource.\n\nCallers of this method will require permission on the `parent` resource.\nFor example, a Lien with a `parent` of `projects/1234` requires permission\n`resourcemanager.projects.get`.", + "description": "List all Liens applied to the `parent` resource. Callers of this method will require permission on the `parent` resource. For example, a Lien with a `parent` of `projects/1234` requires permission `resourcemanager.projects.get`.", "flatPath": "v1/liens", "httpMethod": "GET", "id": "cloudresourcemanager.liens.list", @@ -376,7 +376,7 @@ "type": "string" }, "parent": { - "description": "Required. The name of the resource to list all attached Liens.\nFor example, `projects/1234`.\n\n(google.api.field_policy).resource_type annotation is not set since the\nparent depends on the meta api implementation. This field could be a\nproject or other sub project resources.", + "description": "Required. The name of the resource to list all attached Liens. For example, `projects/1234`. (google.api.field_policy).resource_type annotation is not set since the parent depends on the meta api implementation. This field could be a project or other sub project resources.", "location": "query", "type": "string" } @@ -395,7 +395,7 @@ "operations": { "methods": { "get": { - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", "flatPath": "v1/operations/{operationsId}", "httpMethod": "GET", "id": "cloudresourcemanager.operations.get", @@ -462,7 +462,7 @@ ], "parameters": { "name": { - "description": "The resource name of the Organization to fetch. This is the organization's\nrelative path in the API, formatted as \"organizations/[organizationId]\".\nFor example, \"organizations/1234\".", + "description": "The resource name of the Organization to fetch. This is the organization's relative path in the API, formatted as \"organizations/[organizationId]\". For example, \"organizations/1234\".", "location": "path", "pattern": "^organizations/[^/]+$", "required": true, @@ -479,7 +479,7 @@ ] }, "getEffectiveOrgPolicy": { - "description": "Gets the effective `Policy` on a resource. This is the result of merging\n`Policies` in the resource hierarchy. The returned `Policy` will not have\nan `etag`set because it is a computed `Policy` across multiple resources.\nSubtrees of Resource Manager resource hierarchy with 'under:' prefix will\nnot be expanded.", + "description": "Gets the effective `Policy` on a resource. This is the result of merging `Policies` in the resource hierarchy. The returned `Policy` will not have an `etag`set because it is a computed `Policy` across multiple resources. Subtrees of Resource Manager resource hierarchy with 'under:' prefix will not be expanded.", "flatPath": "v1/organizations/{organizationsId}:getEffectiveOrgPolicy", "httpMethod": "POST", "id": "cloudresourcemanager.organizations.getEffectiveOrgPolicy", @@ -508,7 +508,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for an Organization resource. May be empty\nif no such policy or resource exists. The `resource` field should be the\norganization's resource name, e.g. \"organizations/123\".\n\nAuthorization requires the Google IAM permission\n`resourcemanager.organizations.getIamPolicy` on the specified organization", + "description": "Gets the access control policy for an Organization resource. May be empty if no such policy or resource exists. The `resource` field should be the organization's resource name, e.g. \"organizations/123\". Authorization requires the Google IAM permission `resourcemanager.organizations.getIamPolicy` on the specified organization", "flatPath": "v1/organizations/{organizationsId}:getIamPolicy", "httpMethod": "POST", "id": "cloudresourcemanager.organizations.getIamPolicy", @@ -517,7 +517,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^organizations/[^/]+$", "required": true, @@ -537,7 +537,7 @@ ] }, "getOrgPolicy": { - "description": "Gets a `Policy` on a resource.\n\nIf no `Policy` is set on the resource, a `Policy` is returned with default\nvalues including `POLICY_TYPE_NOT_SET` for the `policy_type oneof`. The\n`etag` value can be used with `SetOrgPolicy()` to create or update a\n`Policy` during read-modify-write.", + "description": "Gets a `Policy` on a resource. If no `Policy` is set on the resource, a `Policy` is returned with default values including `POLICY_TYPE_NOT_SET` for the `policy_type oneof`. The `etag` value can be used with `SetOrgPolicy()` to create or update a `Policy` during read-modify-write.", "flatPath": "v1/organizations/{organizationsId}:getOrgPolicy", "httpMethod": "POST", "id": "cloudresourcemanager.organizations.getOrgPolicy", @@ -624,7 +624,7 @@ ] }, "search": { - "description": "Searches Organization resources that are visible to the user and satisfy\nthe specified filter. This method returns Organizations in an unspecified\norder. New Organizations do not necessarily appear at the end of the\nresults.\n\nSearch will only return organizations on which the user has the permission\n`resourcemanager.organizations.get`", + "description": "Searches Organization resources that are visible to the user and satisfy the specified filter. This method returns Organizations in an unspecified order. New Organizations do not necessarily appear at the end of the results. Search will only return organizations on which the user has the permission `resourcemanager.organizations.get`", "flatPath": "v1/organizations:search", "httpMethod": "POST", "id": "cloudresourcemanager.organizations.search", @@ -643,7 +643,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on an Organization resource. Replaces any\nexisting policy. The `resource` field should be the organization's resource\nname, e.g. \"organizations/123\".\n\nAuthorization requires the Google IAM permission\n`resourcemanager.organizations.setIamPolicy` on the specified organization", + "description": "Sets the access control policy on an Organization resource. Replaces any existing policy. The `resource` field should be the organization's resource name, e.g. \"organizations/123\". Authorization requires the Google IAM permission `resourcemanager.organizations.setIamPolicy` on the specified organization", "flatPath": "v1/organizations/{organizationsId}:setIamPolicy", "httpMethod": "POST", "id": "cloudresourcemanager.organizations.setIamPolicy", @@ -652,7 +652,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^organizations/[^/]+$", "required": true, @@ -671,7 +671,7 @@ ] }, "setOrgPolicy": { - "description": "Updates the specified `Policy` on the resource. Creates a new `Policy` for\nthat `Constraint` on the resource if one does not exist.\n\nNot supplying an `etag` on the request `Policy` results in an unconditional\nwrite of the `Policy`.", + "description": "Updates the specified `Policy` on the resource. Creates a new `Policy` for that `Constraint` on the resource if one does not exist. Not supplying an `etag` on the request `Policy` results in an unconditional write of the `Policy`.", "flatPath": "v1/organizations/{organizationsId}:setOrgPolicy", "httpMethod": "POST", "id": "cloudresourcemanager.organizations.setOrgPolicy", @@ -699,7 +699,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified Organization.\nThe `resource` field should be the organization's resource name,\ne.g. \"organizations/123\".\n\nThere are no permissions required for making this API call.", + "description": "Returns permissions that a caller has on the specified Organization. The `resource` field should be the organization's resource name, e.g. \"organizations/123\". There are no permissions required for making this API call.", "flatPath": "v1/organizations/{organizationsId}:testIamPermissions", "httpMethod": "POST", "id": "cloudresourcemanager.organizations.testIamPermissions", @@ -708,7 +708,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^organizations/[^/]+$", "required": true, @@ -760,7 +760,7 @@ ] }, "create": { - "description": "Request that a new Project be created. The result is an Operation which\ncan be used to track the creation process. This process usually takes a few\nseconds, but can sometimes take much longer. The tracking Operation is\nautomatically deleted after a few hours, so there is no need to call\nDeleteOperation.\n\nAuthorization requires the Google IAM permission\n`resourcemanager.projects.create` on the specified parent for the new\nproject. The parent is identified by a specified ResourceId,\nwhich must include both an ID and a type, such as organization.\n\nThis method does not associate the new project with a billing account.\nYou can set or update the billing account associated with a project using\nthe [`projects.updateBillingInfo`]\n(/billing/reference/rest/v1/projects/updateBillingInfo) method.", + "description": "Request that a new Project be created. The result is an Operation which can be used to track the creation process. This process usually takes a few seconds, but can sometimes take much longer. The tracking Operation is automatically deleted after a few hours, so there is no need to call DeleteOperation. Authorization requires the Google IAM permission `resourcemanager.projects.create` on the specified parent for the new project. The parent is identified by a specified ResourceId, which must include both an ID and a type, such as organization. This method does not associate the new project with a billing account. You can set or update the billing account associated with a project using the [`projects.updateBillingInfo`] (/billing/reference/rest/v1/projects/updateBillingInfo) method.", "flatPath": "v1/projects", "httpMethod": "POST", "id": "cloudresourcemanager.projects.create", @@ -778,7 +778,7 @@ ] }, "delete": { - "description": "Marks the Project identified by the specified\n`project_id` (for example, `my-project-123`) for deletion.\nThis method will only affect the Project if it has a lifecycle state of\nACTIVE.\n\nThis method changes the Project's lifecycle state from\nACTIVE\nto DELETE_REQUESTED.\nThe deletion starts at an unspecified time,\nat which point the Project is no longer accessible.\n\nUntil the deletion completes, you can check the lifecycle state\nchecked by retrieving the Project with GetProject,\nand the Project remains visible to ListProjects.\nHowever, you cannot update the project.\n\nAfter the deletion completes, the Project is not retrievable by\nthe GetProject and\nListProjects methods.\n\nThe caller must have modify permissions for this Project.", + "description": "Marks the Project identified by the specified `project_id` (for example, `my-project-123`) for deletion. This method will only affect the Project if it has a lifecycle state of ACTIVE. This method changes the Project's lifecycle state from ACTIVE to DELETE_REQUESTED. The deletion starts at an unspecified time, at which point the Project is no longer accessible. Until the deletion completes, you can check the lifecycle state checked by retrieving the Project with GetProject, and the Project remains visible to ListProjects. However, you cannot update the project. After the deletion completes, the Project is not retrievable by the GetProject and ListProjects methods. The caller must have delete permissions for this Project.", "flatPath": "v1/projects/{projectId}", "httpMethod": "DELETE", "id": "cloudresourcemanager.projects.delete", @@ -787,7 +787,7 @@ ], "parameters": { "projectId": { - "description": "The Project ID (for example, `foo-bar-123`).\n\nRequired.", + "description": "The Project ID (for example, `foo-bar-123`). Required.", "location": "path", "required": true, "type": "string" @@ -802,7 +802,7 @@ ] }, "get": { - "description": "Retrieves the Project identified by the specified\n`project_id` (for example, `my-project-123`).\n\nThe caller must have read permissions for this Project.", + "description": "Retrieves the Project identified by the specified `project_id` (for example, `my-project-123`). The caller must have read permissions for this Project.", "flatPath": "v1/projects/{projectId}", "httpMethod": "GET", "id": "cloudresourcemanager.projects.get", @@ -811,7 +811,7 @@ ], "parameters": { "projectId": { - "description": "The Project ID (for example, `my-project-123`).\n\nRequired.", + "description": "Required. The Project ID (for example, `my-project-123`).", "location": "path", "required": true, "type": "string" @@ -827,7 +827,7 @@ ] }, "getAncestry": { - "description": "Gets a list of ancestors in the resource hierarchy for the Project\nidentified by the specified `project_id` (for example, `my-project-123`).\n\nThe caller must have read permissions for this Project.", + "description": "Gets a list of ancestors in the resource hierarchy for the Project identified by the specified `project_id` (for example, `my-project-123`). The caller must have read permissions for this Project.", "flatPath": "v1/projects/{projectId}:getAncestry", "httpMethod": "POST", "id": "cloudresourcemanager.projects.getAncestry", @@ -836,7 +836,7 @@ ], "parameters": { "projectId": { - "description": "The Project ID (for example, `my-project-123`).\n\nRequired.", + "description": "Required. The Project ID (for example, `my-project-123`).", "location": "path", "required": true, "type": "string" @@ -855,7 +855,7 @@ ] }, "getEffectiveOrgPolicy": { - "description": "Gets the effective `Policy` on a resource. This is the result of merging\n`Policies` in the resource hierarchy. The returned `Policy` will not have\nan `etag`set because it is a computed `Policy` across multiple resources.\nSubtrees of Resource Manager resource hierarchy with 'under:' prefix will\nnot be expanded.", + "description": "Gets the effective `Policy` on a resource. This is the result of merging `Policies` in the resource hierarchy. The returned `Policy` will not have an `etag`set because it is a computed `Policy` across multiple resources. Subtrees of Resource Manager resource hierarchy with 'under:' prefix will not be expanded.", "flatPath": "v1/projects/{projectsId}:getEffectiveOrgPolicy", "httpMethod": "POST", "id": "cloudresourcemanager.projects.getEffectiveOrgPolicy", @@ -884,7 +884,7 @@ ] }, "getIamPolicy": { - "description": "Returns the IAM access control policy for the specified Project.\nPermission is denied if the policy or the resource does not exist.\n\nAuthorization requires the Google IAM permission\n`resourcemanager.projects.getIamPolicy` on the project.\n\nFor additional information about resource structure and identification,\nsee [Resource Names](/apis/design/resource_names).", + "description": "Returns the IAM access control policy for the specified Project. Permission is denied if the policy or the resource does not exist. Authorization requires the Google IAM permission `resourcemanager.projects.getIamPolicy` on the project. For additional information about `resource` (e.g. my-project-id) structure and identification, see [Resource Names](https://cloud.google.com/apis/design/resource_names).", "flatPath": "v1/projects/{resource}:getIamPolicy", "httpMethod": "POST", "id": "cloudresourcemanager.projects.getIamPolicy", @@ -893,7 +893,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "required": true, "type": "string" @@ -912,7 +912,7 @@ ] }, "getOrgPolicy": { - "description": "Gets a `Policy` on a resource.\n\nIf no `Policy` is set on the resource, a `Policy` is returned with default\nvalues including `POLICY_TYPE_NOT_SET` for the `policy_type oneof`. The\n`etag` value can be used with `SetOrgPolicy()` to create or update a\n`Policy` during read-modify-write.", + "description": "Gets a `Policy` on a resource. If no `Policy` is set on the resource, a `Policy` is returned with default values including `POLICY_TYPE_NOT_SET` for the `policy_type oneof`. The `etag` value can be used with `SetOrgPolicy()` to create or update a `Policy` during read-modify-write.", "flatPath": "v1/projects/{projectsId}:getOrgPolicy", "httpMethod": "POST", "id": "cloudresourcemanager.projects.getOrgPolicy", @@ -941,25 +941,25 @@ ] }, "list": { - "description": "Lists Projects that the caller has the `resourcemanager.projects.get`\npermission on and satisfy the specified filter.\n\nThis method returns Projects in an unspecified order.\nThis method is eventually consistent with project mutations; this means\nthat a newly created project may not appear in the results or recent\nupdates to an existing project may not be reflected in the results. To\nretrieve the latest state of a project, use the\nGetProject method.\n\nNOTE: If the request filter contains a `parent.type` and `parent.id` and\nthe caller has the `resourcemanager.projects.list` permission on the\nparent, the results will be drawn from an alternate index which provides\nmore consistent results. In future versions of this API, this List method\nwill be split into List and Search to properly capture the behavorial\ndifference.", + "description": "Lists Projects that the caller has the `resourcemanager.projects.get` permission on and satisfy the specified filter. This method returns Projects in an unspecified order. This method is eventually consistent with project mutations; this means that a newly created project may not appear in the results or recent updates to an existing project may not be reflected in the results. To retrieve the latest state of a project, use the GetProject method. NOTE: If the request filter contains a `parent.type` and `parent.id` and the caller has the `resourcemanager.projects.list` permission on the parent, the results will be drawn from an alternate index which provides more consistent results. In future versions of this API, this List method will be split into List and Search to properly capture the behavioral difference.", "flatPath": "v1/projects", "httpMethod": "GET", "id": "cloudresourcemanager.projects.list", "parameterOrder": [], "parameters": { "filter": { - "description": "An expression for filtering the results of the request. Filter rules are\ncase insensitive. The fields eligible for filtering are:\n\n+ `name`\n+ `id`\n+ `labels.\u003ckey\u003e` (where *key* is the name of a label)\n+ `parent.type`\n+ `parent.id`\n\nSome examples of using labels as filters:\n\n| Filter | Description |\n|------------------|-----------------------------------------------------|\n| name:how* | The project's name starts with \"how\". |\n| name:Howl | The project's name is `Howl` or `howl`. |\n| name:HOWL | Equivalent to above. |\n| NAME:howl | Equivalent to above. |\n| labels.color:* | The project has the label `color`. |\n| labels.color:red | The project's label `color` has the value `red`. |\n| labels.color:red\u0026nbsp;labels.size:big |The project's label `color` has\n the value `red` and its label `size` has the value `big`. |\n\nIf no filter is specified, the call will return projects for which the user\nhas the `resourcemanager.projects.get` permission.\n\nNOTE: To perform a by-parent query (eg., what projects are directly in a\nFolder), the caller must have the `resourcemanager.projects.list`\npermission on the parent and the filter must contain both a `parent.type`\nand a `parent.id` restriction\n(example: \"parent.type:folder parent.id:123\"). In this case an alternate\nsearch index is used which provides more consistent results.\n\nOptional.", + "description": "Optional. An expression for filtering the results of the request. Filter rules are case insensitive. If multiple fields are included in a filter query, the query will return results that match any of the fields. Some eligible fields for filtering are: + `name` + `id` + `labels.` (where *key* is the name of a label) + `parent.type` + `parent.id` + `lifecycleState` Some examples of filter strings: | Filter | Description | |------------------|-----------------------------------------------------| | name:how* | The project's name starts with \"how\". | | name:Howl | The project's name is `Howl` or `howl`. | | name:HOWL | Equivalent to above. | | NAME:howl | Equivalent to above. | | labels.color:* | The project has the label `color`. | | labels.color:red | The project's label `color` has the value `red`. | | labels.color:red labels.size:big | The project's label `color` | : : has the value `red` and its : : : label`size` has the value : : : `big`. : | lifecycleState:DELETE_REQUESTED | Only show projects that are | : : pending deletion. : If no filter is specified, the call will return projects for which the user has the `resourcemanager.projects.get` permission. NOTE: To perform a by-parent query (eg., what projects are directly in a Folder), the caller must have the `resourcemanager.projects.list` permission on the parent and the filter must contain both a `parent.type` and a `parent.id` restriction (example: \"parent.type:folder parent.id:123\"). In this case an alternate search index is used which provides more consistent results.", "location": "query", "type": "string" }, "pageSize": { - "description": "The maximum number of Projects to return in the response.\nThe server can return fewer Projects than requested.\nIf unspecified, server picks an appropriate default.\n\nOptional.", + "description": "Optional. The maximum number of Projects to return in the response. The server can return fewer Projects than requested. If unspecified, server picks an appropriate default.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "A pagination token returned from a previous call to ListProjects\nthat indicates from where listing should continue.\n\nOptional.", + "description": "Optional. A pagination token returned from a previous call to ListProjects that indicates from where listing should continue.", "location": "query", "type": "string" } @@ -1032,7 +1032,7 @@ ] }, "setIamPolicy": { - "description": "Sets the IAM access control policy for the specified Project. Overwrites\nany existing policy.\n\nThe following constraints apply when using `setIamPolicy()`:\n\n+ Project does not support `allUsers` and `allAuthenticatedUsers` as\n`members` in a `Binding` of a `Policy`.\n\n+ The owner role can be granted to a `user`, `serviceAccount`, or a group\nthat is part of an organization. For example,\ngroup@myownpersonaldomain.com could be added as an owner to a project in\nthe myownpersonaldomain.com organization, but not the examplepetstore.com\norganization.\n\n+ Service accounts can be made owners of a project directly\nwithout any restrictions. However, to be added as an owner, a user must be\ninvited via Cloud Platform console and must accept the invitation.\n\n+ A user cannot be granted the owner role using `setIamPolicy()`. The user\nmust be granted the owner role using the Cloud Platform Console and must\nexplicitly accept the invitation.\n\n+ You can only grant ownership of a project to a member by using the\nGCP Console. Inviting a member will deliver an invitation email that\nthey must accept. An invitation email is not generated if you are\ngranting a role other than owner, or if both the member you are inviting\nand the project are part of your organization.\n\n+ Membership changes that leave the project without any owners that have\naccepted the Terms of Service (ToS) will be rejected.\n\n+ If the project is not part of an organization, there must be at least\none owner who has accepted the Terms of Service (ToS) agreement in the\npolicy. Calling `setIamPolicy()` to remove the last ToS-accepted owner\nfrom the policy will fail. This restriction also applies to legacy\nprojects that no longer have owners who have accepted the ToS. Edits to\nIAM policies will be rejected until the lack of a ToS-accepting owner is\nrectified.\n\n+ This method will replace the existing policy, and cannot be used to\nappend additional IAM settings.\n\nNote: Removing service accounts from policies or changing their roles\ncan render services completely inoperable. It is important to understand\nhow the service account is being used before removing or updating its\nroles.\n\nAuthorization requires the Google IAM permission\n`resourcemanager.projects.setIamPolicy` on the project", + "description": "Sets the IAM access control policy for the specified Project. CAUTION: This method will replace the existing policy, and cannot be used to append additional IAM settings. NOTE: Removing service accounts from policies or changing their roles can render services completely inoperable. It is important to understand how the service account is being used before removing or updating its roles. For additional information about `resource` (e.g. my-project-id) structure and identification, see [Resource Names](https://cloud.google.com/apis/design/resource_names). The following constraints apply when using `setIamPolicy()`: + Project does not support `allUsers` and `allAuthenticatedUsers` as `members` in a `Binding` of a `Policy`. + The owner role can be granted to a `user`, `serviceAccount`, or a group that is part of an organization. For example, group@myownpersonaldomain.com could be added as an owner to a project in the myownpersonaldomain.com organization, but not the examplepetstore.com organization. + Service accounts can be made owners of a project directly without any restrictions. However, to be added as an owner, a user must be invited via Cloud Platform console and must accept the invitation. + A user cannot be granted the owner role using `setIamPolicy()`. The user must be granted the owner role using the Cloud Platform Console and must explicitly accept the invitation. + You can only grant ownership of a project to a member by using the GCP Console. Inviting a member will deliver an invitation email that they must accept. An invitation email is not generated if you are granting a role other than owner, or if both the member you are inviting and the project are part of your organization. + Membership changes that leave the project without any owners that have accepted the Terms of Service (ToS) will be rejected. + If the project is not part of an organization, there must be at least one owner who has accepted the Terms of Service (ToS) agreement in the policy. Calling `setIamPolicy()` to remove the last ToS-accepted owner from the policy will fail. This restriction also applies to legacy projects that no longer have owners who have accepted the ToS. Edits to IAM policies will be rejected until the lack of a ToS-accepting owner is rectified. Authorization requires the Google IAM permission `resourcemanager.projects.setIamPolicy` on the project", "flatPath": "v1/projects/{resource}:setIamPolicy", "httpMethod": "POST", "id": "cloudresourcemanager.projects.setIamPolicy", @@ -1041,7 +1041,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "required": true, "type": "string" @@ -1059,7 +1059,7 @@ ] }, "setOrgPolicy": { - "description": "Updates the specified `Policy` on the resource. Creates a new `Policy` for\nthat `Constraint` on the resource if one does not exist.\n\nNot supplying an `etag` on the request `Policy` results in an unconditional\nwrite of the `Policy`.", + "description": "Updates the specified `Policy` on the resource. Creates a new `Policy` for that `Constraint` on the resource if one does not exist. Not supplying an `etag` on the request `Policy` results in an unconditional write of the `Policy`.", "flatPath": "v1/projects/{projectsId}:setOrgPolicy", "httpMethod": "POST", "id": "cloudresourcemanager.projects.setOrgPolicy", @@ -1087,7 +1087,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified Project.\n\nThere are no permissions required for making this API call.", + "description": "Returns permissions that a caller has on the specified Project. For additional information about `resource` (e.g. my-project-id) structure and identification, see [Resource Names](https://cloud.google.com/apis/design/resource_names). There are no permissions required for making this API call.", "flatPath": "v1/projects/{resource}:testIamPermissions", "httpMethod": "POST", "id": "cloudresourcemanager.projects.testIamPermissions", @@ -1096,7 +1096,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "required": true, "type": "string" @@ -1115,7 +1115,7 @@ ] }, "undelete": { - "description": "Restores the Project identified by the specified\n`project_id` (for example, `my-project-123`).\nYou can only use this method for a Project that has a lifecycle state of\nDELETE_REQUESTED.\nAfter deletion starts, the Project cannot be restored.\n\nThe caller must have modify permissions for this Project.", + "description": "Restores the Project identified by the specified `project_id` (for example, `my-project-123`). You can only use this method for a Project that has a lifecycle state of DELETE_REQUESTED. After deletion starts, the Project cannot be restored. The caller must have undelete permissions for this Project.", "flatPath": "v1/projects/{projectId}:undelete", "httpMethod": "POST", "id": "cloudresourcemanager.projects.undelete", @@ -1124,7 +1124,7 @@ ], "parameters": { "projectId": { - "description": "The project ID (for example, `foo-bar-123`).\n\nRequired.", + "description": "Required. The project ID (for example, `foo-bar-123`).", "location": "path", "required": true, "type": "string" @@ -1142,7 +1142,7 @@ ] }, "update": { - "description": "Updates the attributes of the Project identified by the specified\n`project_id` (for example, `my-project-123`).\n\nThe caller must have modify permissions for this Project.", + "description": "Updates the attributes of the Project identified by the specified `project_id` (for example, `my-project-123`). The caller must have modify permissions for this Project.", "flatPath": "v1/projects/{projectId}", "httpMethod": "PUT", "id": "cloudresourcemanager.projects.update", @@ -1151,7 +1151,7 @@ ], "parameters": { "projectId": { - "description": "The project ID (for example, `my-project-123`).\n\nRequired.", + "description": "The project ID (for example, `my-project-123`). Required.", "location": "path", "required": true, "type": "string" @@ -1171,7 +1171,7 @@ } } }, - "revision": "20200504", + "revision": "20200921", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "Ancestor": { @@ -1186,7 +1186,7 @@ "type": "object" }, "AuditConfig": { - "description": "Specifies the audit configuration for a service.\nThe configuration determines which permission types are logged, and what\nidentities, if any, are exempted from logging.\nAn AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service,\nthe union of the two AuditConfigs is used for that service: the log_types\nspecified in each AuditConfig are enabled, and the exempted_members in each\nAuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n {\n \"audit_configs\": [\n {\n \"service\": \"allServices\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n },\n {\n \"log_type\": \"ADMIN_READ\",\n }\n ]\n },\n {\n \"service\": \"sampleservice.googleapis.com\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n },\n {\n \"log_type\": \"DATA_WRITE\",\n \"exempted_members\": [\n \"user:aliya@example.com\"\n ]\n }\n ]\n }\n ]\n }\n\nFor sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ\nlogging. It also exempts jose@example.com from DATA_READ logging, and\naliya@example.com from DATA_WRITE logging.", + "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { \"audit_configs\": [ { \"service\": \"allServices\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" }, { \"log_type\": \"ADMIN_READ\" } ] }, { \"service\": \"sampleservice.googleapis.com\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\" }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging.", "id": "AuditConfig", "properties": { "auditLogConfigs": { @@ -1197,18 +1197,18 @@ "type": "array" }, "service": { - "description": "Specifies a service that will be enabled for audit logging.\nFor example, `storage.googleapis.com`, `cloudsql.googleapis.com`.\n`allServices` is a special value that covers all services.", + "description": "Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.", "type": "string" } }, "type": "object" }, "AuditLogConfig": { - "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\njose@example.com from DATA_READ logging.", + "description": "Provides the configuration for logging a type of permissions. Example: { \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.", "id": "AuditLogConfig", "properties": { "exemptedMembers": { - "description": "Specifies the identities that do not cause logging for this type of\npermission.\nFollows the same format of Binding.members.", + "description": "Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.", "items": { "type": "string" }, @@ -1237,36 +1237,40 @@ "description": "Associates `members` with a `role`.", "id": "Binding", "properties": { + "bindingId": { + "description": "A client-specified ID for this binding. Expected to be globally unique to support the internal bindings-by-ID API.", + "type": "string" + }, "condition": { "$ref": "Expr", - "description": "The condition that is associated with this binding.\n\nIf the condition evaluates to `true`, then this binding applies to the\ncurrent request.\n\nIf the condition evaluates to `false`, then this binding does not apply to\nthe current request. However, a different role binding might grant the same\nrole to one or more of the members in this binding.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies)." + "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the members in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." }, "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@example.com` .\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a user that has been recently deleted. For\n example, `alice@example.com?uid=123456789012345678901`. If the user is\n recovered, this value reverts to `user:{emailid}` and the recovered user\n retains the role in the binding.\n\n* `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus\n unique identifier) representing a service account that has been recently\n deleted. For example,\n `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.\n If the service account is undeleted, this value reverts to\n `serviceAccount:{emailid}` and the undeleted service account retains the\n role in the binding.\n\n* `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a Google group that has been recently\n deleted. For example, `admins@example.com?uid=123456789012345678901`. If\n the group is recovered, this value reverts to `group:{emailid}` and the\n recovered group retains the role in the binding.\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", "items": { "type": "string" }, "type": "array" }, "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.", + "description": "Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.", "type": "string" } }, "type": "object" }, "BooleanConstraint": { - "description": "A `Constraint` that is either enforced or not.\n\nFor example a constraint `constraints/compute.disableSerialPortAccess`.\nIf it is enforced on a VM instance, serial port connections will not be\nopened to that instance.", + "description": "A `Constraint` that is either enforced or not. For example a constraint `constraints/compute.disableSerialPortAccess`. If it is enforced on a VM instance, serial port connections will not be opened to that instance.", "id": "BooleanConstraint", "properties": {}, "type": "object" }, "BooleanPolicy": { - "description": "Used in `policy_type` to specify how `boolean_policy` will behave at this\nresource.", + "description": "Used in `policy_type` to specify how `boolean_policy` will behave at this resource.", "id": "BooleanPolicy", "properties": { "enforced": { - "description": "If `true`, then the `Policy` is enforced. If `false`, then any\nconfiguration is acceptable.\n\nSuppose you have a `Constraint`\n`constraints/compute.disableSerialPortAccess` with `constraint_default`\nset to `ALLOW`. A `Policy` for that `Constraint` exhibits the following\nbehavior:\n - If the `Policy` at this resource has enforced set to `false`, serial\n port connection attempts will be allowed.\n - If the `Policy` at this resource has enforced set to `true`, serial\n port connection attempts will be refused.\n - If the `Policy` at this resource is `RestoreDefault`, serial port\n connection attempts will be allowed.\n - If no `Policy` is set at this resource or anywhere higher in the\n resource hierarchy, serial port connection attempts will be allowed.\n - If no `Policy` is set at this resource, but one exists higher in the\n resource hierarchy, the behavior is as if the`Policy` were set at\n this resource.\n\nThe following examples demonstrate the different possible layerings:\n\nExample 1 (nearest `Constraint` wins):\n `organizations/foo` has a `Policy` with:\n {enforced: false}\n `projects/bar` has no `Policy` set.\nThe constraint at `projects/bar` and `organizations/foo` will not be\nenforced.\n\nExample 2 (enforcement gets replaced):\n `organizations/foo` has a `Policy` with:\n {enforced: false}\n `projects/bar` has a `Policy` with:\n {enforced: true}\nThe constraint at `organizations/foo` is not enforced.\nThe constraint at `projects/bar` is enforced.\n\nExample 3 (RestoreDefault):\n `organizations/foo` has a `Policy` with:\n {enforced: true}\n `projects/bar` has a `Policy` with:\n {RestoreDefault: {}}\nThe constraint at `organizations/foo` is enforced.\nThe constraint at `projects/bar` is not enforced, because\n`constraint_default` for the `Constraint` is `ALLOW`.", + "description": "If `true`, then the `Policy` is enforced. If `false`, then any configuration is acceptable. Suppose you have a `Constraint` `constraints/compute.disableSerialPortAccess` with `constraint_default` set to `ALLOW`. A `Policy` for that `Constraint` exhibits the following behavior: - If the `Policy` at this resource has enforced set to `false`, serial port connection attempts will be allowed. - If the `Policy` at this resource has enforced set to `true`, serial port connection attempts will be refused. - If the `Policy` at this resource is `RestoreDefault`, serial port connection attempts will be allowed. - If no `Policy` is set at this resource or anywhere higher in the resource hierarchy, serial port connection attempts will be allowed. - If no `Policy` is set at this resource, but one exists higher in the resource hierarchy, the behavior is as if the`Policy` were set at this resource. The following examples demonstrate the different possible layerings: Example 1 (nearest `Constraint` wins): `organizations/foo` has a `Policy` with: {enforced: false} `projects/bar` has no `Policy` set. The constraint at `projects/bar` and `organizations/foo` will not be enforced. Example 2 (enforcement gets replaced): `organizations/foo` has a `Policy` with: {enforced: false} `projects/bar` has a `Policy` with: {enforced: true} The constraint at `organizations/foo` is not enforced. The constraint at `projects/bar` is enforced. Example 3 (RestoreDefault): `organizations/foo` has a `Policy` with: {enforced: true} `projects/bar` has a `Policy` with: {RestoreDefault: {}} The constraint at `organizations/foo` is enforced. The constraint at `projects/bar` is not enforced, because `constraint_default` for the `Constraint` is `ALLOW`.", "type": "boolean" } }, @@ -1281,7 +1285,7 @@ "type": "string" }, "etag": { - "description": "The current version, for concurrency control. Not sending an `etag`\nwill cause the `Policy` to be cleared blindly.", + "description": "The current version, for concurrency control. Not sending an `etag` will cause the `Policy` to be cleared blindly.", "format": "byte", "type": "string" } @@ -1289,7 +1293,7 @@ "type": "object" }, "Constraint": { - "description": "A `Constraint` describes a way in which a resource's configuration can be\nrestricted. For example, it controls which cloud services can be activated\nacross an organization, or whether a Compute Engine instance can have\nserial port connections established. `Constraints` can be configured by the\norganization's policy adminstrator to fit the needs of the organzation by\nsetting Policies for `Constraints` at different locations in the\norganization's resource hierarchy. Policies are inherited down the resource\nhierarchy from higher levels, but can also be overridden. For details about\nthe inheritance rules please read about\nPolicies.\n\n`Constraints` have a default behavior determined by the `constraint_default`\nfield, which is the enforcement behavior that is used in the absence of a\n`Policy` being defined or inherited for the resource in question.", + "description": "A `Constraint` describes a way in which a resource's configuration can be restricted. For example, it controls which cloud services can be activated across an organization, or whether a Compute Engine instance can have serial port connections established. `Constraints` can be configured by the organization's policy administrator to fit the needs of the organzation by setting Policies for `Constraints` at different locations in the organization's resource hierarchy. Policies are inherited down the resource hierarchy from higher levels, but can also be overridden. For details about the inheritance rules please read about [Policies](/resource-manager/reference/rest/v1/Policy). `Constraints` have a default behavior determined by the `constraint_default` field, which is the enforcement behavior that is used in the absence of a `Policy` being defined or inherited for the resource in question.", "id": "Constraint", "properties": { "booleanConstraint": { @@ -1297,25 +1301,25 @@ "description": "Defines this constraint as being a BooleanConstraint." }, "constraintDefault": { - "description": "The evaluation behavior of this constraint in the absense of 'Policy'.", + "description": "The evaluation behavior of this constraint in the absence of 'Policy'.", "enum": [ "CONSTRAINT_DEFAULT_UNSPECIFIED", "ALLOW", "DENY" ], "enumDescriptions": [ - "This is only used for distinguishing unset values and should never be\nused.", - "Indicate that all values are allowed for list constraints.\nIndicate that enforcement is off for boolean constraints.", - "Indicate that all values are denied for list constraints.\nIndicate that enforcement is on for boolean constraints." + "This is only used for distinguishing unset values and should never be used.", + "Indicate that all values are allowed for list constraints. Indicate that enforcement is off for boolean constraints.", + "Indicate that all values are denied for list constraints. Indicate that enforcement is on for boolean constraints." ], "type": "string" }, "description": { - "description": "Detailed description of what this `Constraint` controls as well as how and\nwhere it is enforced.\n\nMutable.", + "description": "Detailed description of what this `Constraint` controls as well as how and where it is enforced. Mutable.", "type": "string" }, "displayName": { - "description": "The human readable name.\n\nMutable.", + "description": "The human readable name. Mutable.", "type": "string" }, "listConstraint": { @@ -1323,7 +1327,7 @@ "description": "Defines this constraint as being a ListConstraint." }, "name": { - "description": "Immutable value, required to globally be unique. For example,\n`constraints/serviceuser.services`", + "description": "Immutable value, required to globally be unique. For example, `constraints/serviceuser.services`", "type": "string" }, "version": { @@ -1335,29 +1339,29 @@ "type": "object" }, "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", "properties": {}, "type": "object" }, "Expr": { - "description": "Represents a textual expression in the Common Expression Language (CEL)\nsyntax. CEL is a C-like expression language. The syntax and semantics of CEL\nare documented at https://github.com/google/cel-spec.\n\nExample (Comparison):\n\n title: \"Summary size limit\"\n description: \"Determines if a summary is less than 100 chars\"\n expression: \"document.summary.size() \u003c 100\"\n\nExample (Equality):\n\n title: \"Requestor is owner\"\n description: \"Determines if requestor is the document owner\"\n expression: \"document.owner == request.auth.claims.email\"\n\nExample (Logic):\n\n title: \"Public documents\"\n description: \"Determine whether the document should be publicly visible\"\n expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\"\n\nExample (Data Manipulation):\n\n title: \"Notification string\"\n description: \"Create a notification string with a timestamp.\"\n expression: \"'New message received at ' + string(document.create_time)\"\n\nThe exact variables and functions that may be referenced within an expression\nare determined by the service that evaluates it. See the service\ndocumentation for additional information.", + "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() \u003c 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", "id": "Expr", "properties": { "description": { - "description": "Optional. Description of the expression. This is a longer text which\ndescribes the expression, e.g. when hovered over it in a UI.", + "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", "type": "string" }, "expression": { - "description": "Textual representation of an expression in Common Expression Language\nsyntax.", + "description": "Textual representation of an expression in Common Expression Language syntax.", "type": "string" }, "location": { - "description": "Optional. String indicating the location of the expression for error\nreporting, e.g. a file name and a position in the file.", + "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", "type": "string" }, "title": { - "description": "Optional. Title for the expression, i.e. a short string describing\nits purpose. This can be used e.g. in UIs which allow to enter the\nexpression.", + "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", "type": "string" } }, @@ -1368,7 +1372,7 @@ "id": "FolderOperation", "properties": { "destinationParent": { - "description": "The resource name of the folder or organization we are either creating\nthe folder under or moving the folder to.", + "description": "The resource name of the folder or organization we are either creating the folder under or moving the folder to.", "type": "string" }, "displayName": { @@ -1390,7 +1394,7 @@ "type": "string" }, "sourceParent": { - "description": "The resource name of the folder's parent.\nOnly applicable when the operation_type is MOVE.", + "description": "The resource name of the folder's parent. Only applicable when the operation_type is MOVE.", "type": "string" } }, @@ -1418,13 +1422,13 @@ "The error type was unrecognized or unspecified.", "The attempted action would violate the max folder depth constraint.", "The attempted action would violate the max child folders constraint.", - "The attempted action would violate the locally-unique folder\ndisplay_name constraint.", + "The attempted action would violate the locally-unique folder display_name constraint.", "The resource being moved has been deleted.", "The resource a folder was being added to has been deleted.", "The attempted action would introduce cycle in resource path.", "The attempted action would move a folder that is already being moved.", "The folder the caller is trying to delete contains active resources.", - "The attempted action would violate the max deleted folder depth\nconstraint." + "The attempted action would violate the max deleted folder depth constraint." ], "type": "string" } @@ -1432,17 +1436,17 @@ "type": "object" }, "GetAncestryRequest": { - "description": "The request sent to the\nGetAncestry\nmethod.", + "description": "The request sent to the GetAncestry method.", "id": "GetAncestryRequest", "properties": {}, "type": "object" }, "GetAncestryResponse": { - "description": "Response from the GetAncestry method.", + "description": "Response from the projects.getAncestry method.", "id": "GetAncestryResponse", "properties": { "ancestor": { - "description": "Ancestors are ordered from bottom to top of the resource hierarchy. The\nfirst ancestor is the project itself, followed by the project's parent,\netc..", + "description": "Ancestors are ordered from bottom to top of the resource hierarchy. The first ancestor is the project itself, followed by the project's parent, etc..", "items": { "$ref": "Ancestor" }, @@ -1468,7 +1472,7 @@ "properties": { "options": { "$ref": "GetPolicyOptions", - "description": "OPTIONAL: A `GetPolicyOptions` object for specifying options to\n`GetIamPolicy`." + "description": "OPTIONAL: A `GetPolicyOptions` object for specifying options to `GetIamPolicy`." } }, "type": "object" @@ -1489,7 +1493,7 @@ "id": "GetPolicyOptions", "properties": { "requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } @@ -1497,7 +1501,7 @@ "type": "object" }, "Lien": { - "description": "A Lien represents an encumbrance on the actions that can be performed on a\nresource.", + "description": "A Lien represents an encumbrance on the actions that can be performed on a resource.", "id": "Lien", "properties": { "createTime": { @@ -1506,23 +1510,23 @@ "type": "string" }, "name": { - "description": "A system-generated unique identifier for this Lien.\n\nExample: `liens/1234abcd`", + "description": "A system-generated unique identifier for this Lien. Example: `liens/1234abcd`", "type": "string" }, "origin": { - "description": "A stable, user-visible/meaningful string identifying the origin of the\nLien, intended to be inspected programmatically. Maximum length of 200\ncharacters.\n\nExample: 'compute.googleapis.com'", + "description": "A stable, user-visible/meaningful string identifying the origin of the Lien, intended to be inspected programmatically. Maximum length of 200 characters. Example: 'compute.googleapis.com'", "type": "string" }, "parent": { - "description": "A reference to the resource this Lien is attached to. The server will\nvalidate the parent against those for which Liens are supported.\n\nExample: `projects/1234`", + "description": "A reference to the resource this Lien is attached to. The server will validate the parent against those for which Liens are supported. Example: `projects/1234`", "type": "string" }, "reason": { - "description": "Concise user-visible strings indicating why an action cannot be performed\non a resource. Maximum length of 200 characters.\n\nExample: 'Holds production API key'", + "description": "Concise user-visible strings indicating why an action cannot be performed on a resource. Maximum length of 200 characters. Example: 'Holds production API key'", "type": "string" }, "restrictions": { - "description": "The types of operations which should be blocked as a result of this Lien.\nEach value should correspond to an IAM permission. The server will\nvalidate the permissions against those for which Liens are supported.\n\nAn empty list is meaningless and will be rejected.\n\nExample: ['resourcemanager.projects.delete']", + "description": "The types of operations which should be blocked as a result of this Lien. Each value should correspond to an IAM permission. The server will validate the permissions against those for which Liens are supported. An empty list is meaningless and will be rejected. Example: ['resourcemanager.projects.delete']", "items": { "type": "string" }, @@ -1532,23 +1536,23 @@ "type": "object" }, "ListAvailableOrgPolicyConstraintsRequest": { - "description": "The request sent to the [ListAvailableOrgPolicyConstraints]\ngoogle.cloud.OrgPolicy.v1.ListAvailableOrgPolicyConstraints] method.", + "description": "The request sent to the `ListAvailableOrgPolicyConstraints` method on the project, folder, or organization.", "id": "ListAvailableOrgPolicyConstraintsRequest", "properties": { "pageSize": { - "description": "Size of the pages to be returned. This is currently unsupported and will\nbe ignored. The server may at any point start using this field to limit\npage size.", + "description": "Size of the pages to be returned. This is currently unsupported and will be ignored. The server may at any point start using this field to limit page size.", "format": "int32", "type": "integer" }, "pageToken": { - "description": "Page token used to retrieve the next page. This is currently unsupported\nand will be ignored. The server may at any point start using this field.", + "description": "Page token used to retrieve the next page. This is currently unsupported and will be ignored. The server may at any point start using this field.", "type": "string" } }, "type": "object" }, "ListAvailableOrgPolicyConstraintsResponse": { - "description": "The response returned from the ListAvailableOrgPolicyConstraints method.\nReturns all `Constraints` that could be set at this level of the hierarchy\n(contrast with the response from `ListPolicies`, which returns all policies\nwhich are set).", + "description": "The response returned from the `ListAvailableOrgPolicyConstraints` method. Returns all `Constraints` that could be set at this level of the hierarchy (contrast with the response from `ListPolicies`, which returns all policies which are set).", "id": "ListAvailableOrgPolicyConstraintsResponse", "properties": { "constraints": { @@ -1566,15 +1570,15 @@ "type": "object" }, "ListConstraint": { - "description": "A `Constraint` that allows or disallows a list of string values, which are\nconfigured by an Organization's policy administrator with a `Policy`.", + "description": "A `Constraint` that allows or disallows a list of string values, which are configured by an Organization's policy administrator with a `Policy`.", "id": "ListConstraint", "properties": { "suggestedValue": { - "description": "Optional. The Google Cloud Console will try to default to a configuration\nthat matches the value specified in this `Constraint`.", + "description": "Optional. The Google Cloud Console will try to default to a configuration that matches the value specified in this `Constraint`.", "type": "string" }, "supportsUnder": { - "description": "Indicates whether subtrees of Cloud Resource Manager resource hierarchy\ncan be used in `Policy.allowed_values` and `Policy.denied_values`. For\nexample, `\"under:folders/123\"` would match any resource under the\n'folders/123' folder.", + "description": "Indicates whether subtrees of Cloud Resource Manager resource hierarchy can be used in `Policy.allowed_values` and `Policy.denied_values`. For example, `\"under:folders/123\"` would match any resource under the 'folders/123' folder.", "type": "boolean" } }, @@ -1592,7 +1596,7 @@ "type": "array" }, "nextPageToken": { - "description": "Token to retrieve the next page of results, or empty if there are no more\nresults in the list.", + "description": "Token to retrieve the next page of results, or empty if there are no more results in the list.", "type": "string" } }, @@ -1603,27 +1607,27 @@ "id": "ListOrgPoliciesRequest", "properties": { "pageSize": { - "description": "Size of the pages to be returned. This is currently unsupported and will\nbe ignored. The server may at any point start using this field to limit\npage size.", + "description": "Size of the pages to be returned. This is currently unsupported and will be ignored. The server may at any point start using this field to limit page size.", "format": "int32", "type": "integer" }, "pageToken": { - "description": "Page token used to retrieve the next page. This is currently unsupported\nand will be ignored. The server may at any point start using this field.", + "description": "Page token used to retrieve the next page. This is currently unsupported and will be ignored. The server may at any point start using this field.", "type": "string" } }, "type": "object" }, "ListOrgPoliciesResponse": { - "description": "The response returned from the ListOrgPolicies method. It will be empty\nif no `Policies` are set on the resource.", + "description": "The response returned from the `ListOrgPolicies` method. It will be empty if no `Policies` are set on the resource.", "id": "ListOrgPoliciesResponse", "properties": { "nextPageToken": { - "description": "Page token used to retrieve the next page. This is currently not used, but\nthe server may at any point start supplying a valid token.", + "description": "Page token used to retrieve the next page. This is currently not used, but the server may at any point start supplying a valid token.", "type": "string" }, "policies": { - "description": "The `Policies` that are set on the resource. It will be empty if no\n`Policies` are set.", + "description": "The `Policies` that are set on the resource. It will be empty if no `Policies` are set.", "items": { "$ref": "OrgPolicy" }, @@ -1633,7 +1637,7 @@ "type": "object" }, "ListPolicy": { - "description": "Used in `policy_type` to specify how `list_policy` behaves at this\nresource.\n\n`ListPolicy` can define specific values and subtrees of Cloud Resource\nManager resource hierarchy (`Organizations`, `Folders`, `Projects`) that\nare allowed or denied by setting the `allowed_values` and `denied_values`\nfields. This is achieved by using the `under:` and optional `is:` prefixes.\nThe `under:` prefix is used to denote resource subtree values.\nThe `is:` prefix is used to denote specific values, and is required only\nif the value contains a \":\". Values prefixed with \"is:\" are treated the\nsame as values with no prefix.\nAncestry subtrees must be in one of the following formats:\n - \"projects/\u003cproject-id\u003e\", e.g. \"projects/tokyo-rain-123\"\n - \"folders/\u003cfolder-id\u003e\", e.g. \"folders/1234\"\n - \"organizations/\u003corganization-id\u003e\", e.g. \"organizations/1234\"\nThe `supports_under` field of the associated `Constraint` defines whether\nancestry prefixes can be used. You can set `allowed_values` and\n`denied_values` in the same `Policy` if `all_values` is\n`ALL_VALUES_UNSPECIFIED`. `ALLOW` or `DENY` are used to allow or deny all\nvalues. If `all_values` is set to either `ALLOW` or `DENY`,\n`allowed_values` and `denied_values` must be unset.", + "description": "Used in `policy_type` to specify how `list_policy` behaves at this resource. `ListPolicy` can define specific values and subtrees of Cloud Resource Manager resource hierarchy (`Organizations`, `Folders`, `Projects`) that are allowed or denied by setting the `allowed_values` and `denied_values` fields. This is achieved by using the `under:` and optional `is:` prefixes. The `under:` prefix is used to denote resource subtree values. The `is:` prefix is used to denote specific values, and is required only if the value contains a \":\". Values prefixed with \"is:\" are treated the same as values with no prefix. Ancestry subtrees must be in one of the following formats: - \"projects/\", e.g. \"projects/tokyo-rain-123\" - \"folders/\", e.g. \"folders/1234\" - \"organizations/\", e.g. \"organizations/1234\" The `supports_under` field of the associated `Constraint` defines whether ancestry prefixes can be used. You can set `allowed_values` and `denied_values` in the same `Policy` if `all_values` is `ALL_VALUES_UNSPECIFIED`. `ALLOW` or `DENY` are used to allow or deny all values. If `all_values` is set to either `ALLOW` or `DENY`, `allowed_values` and `denied_values` must be unset.", "id": "ListPolicy", "properties": { "allValues": { @@ -1651,40 +1655,40 @@ "type": "string" }, "allowedValues": { - "description": "List of values allowed at this resource. Can only be set if `all_values`\nis set to `ALL_VALUES_UNSPECIFIED`.", + "description": "List of values allowed at this resource. Can only be set if `all_values` is set to `ALL_VALUES_UNSPECIFIED`.", "items": { "type": "string" }, "type": "array" }, "deniedValues": { - "description": "List of values denied at this resource. Can only be set if `all_values`\nis set to `ALL_VALUES_UNSPECIFIED`.", + "description": "List of values denied at this resource. Can only be set if `all_values` is set to `ALL_VALUES_UNSPECIFIED`.", "items": { "type": "string" }, "type": "array" }, "inheritFromParent": { - "description": "Determines the inheritance behavior for this `Policy`.\n\nBy default, a `ListPolicy` set at a resource supercedes any `Policy` set\nanywhere up the resource hierarchy. However, if `inherit_from_parent` is\nset to `true`, then the values from the effective `Policy` of the parent\nresource are inherited, meaning the values set in this `Policy` are\nadded to the values inherited up the hierarchy.\n\nSetting `Policy` hierarchies that inherit both allowed values and denied\nvalues isn't recommended in most circumstances to keep the configuration\nsimple and understandable. However, it is possible to set a `Policy` with\n`allowed_values` set that inherits a `Policy` with `denied_values` set.\nIn this case, the values that are allowed must be in `allowed_values` and\nnot present in `denied_values`.\n\nFor example, suppose you have a `Constraint`\n`constraints/serviceuser.services`, which has a `constraint_type` of\n`list_constraint`, and with `constraint_default` set to `ALLOW`.\nSuppose that at the Organization level, a `Policy` is applied that\nrestricts the allowed API activations to {`E1`, `E2`}. Then, if a\n`Policy` is applied to a project below the Organization that has\n`inherit_from_parent` set to `false` and field all_values set to DENY,\nthen an attempt to activate any API will be denied.\n\nThe following examples demonstrate different possible layerings for\n`projects/bar` parented by `organizations/foo`:\n\nExample 1 (no inherited values):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"E1\" allowed_values:\"E2\"}\n `projects/bar` has `inherit_from_parent` `false` and values:\n {allowed_values: \"E3\" allowed_values: \"E4\"}\nThe accepted values at `organizations/foo` are `E1`, `E2`.\nThe accepted values at `projects/bar` are `E3`, and `E4`.\n\nExample 2 (inherited values):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"E1\" allowed_values:\"E2\"}\n `projects/bar` has a `Policy` with values:\n {value: \"E3\" value: \"E4\" inherit_from_parent: true}\nThe accepted values at `organizations/foo` are `E1`, `E2`.\nThe accepted values at `projects/bar` are `E1`, `E2`, `E3`, and `E4`.\n\nExample 3 (inheriting both allowed and denied values):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"E1\" allowed_values: \"E2\"}\n `projects/bar` has a `Policy` with:\n {denied_values: \"E1\"}\nThe accepted values at `organizations/foo` are `E1`, `E2`.\nThe value accepted at `projects/bar` is `E2`.\n\nExample 4 (RestoreDefault):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"E1\" allowed_values:\"E2\"}\n `projects/bar` has a `Policy` with values:\n {RestoreDefault: {}}\nThe accepted values at `organizations/foo` are `E1`, `E2`.\nThe accepted values at `projects/bar` are either all or none depending on\nthe value of `constraint_default` (if `ALLOW`, all; if\n`DENY`, none).\n\nExample 5 (no policy inherits parent policy):\n `organizations/foo` has no `Policy` set.\n `projects/bar` has no `Policy` set.\nThe accepted values at both levels are either all or none depending on\nthe value of `constraint_default` (if `ALLOW`, all; if\n`DENY`, none).\n\nExample 6 (ListConstraint allowing all):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"E1\" allowed_values: \"E2\"}\n `projects/bar` has a `Policy` with:\n {all: ALLOW}\nThe accepted values at `organizations/foo` are `E1`, E2`.\nAny value is accepted at `projects/bar`.\n\nExample 7 (ListConstraint allowing none):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"E1\" allowed_values: \"E2\"}\n `projects/bar` has a `Policy` with:\n {all: DENY}\nThe accepted values at `organizations/foo` are `E1`, E2`.\nNo value is accepted at `projects/bar`.\n\nExample 10 (allowed and denied subtrees of Resource Manager hierarchy):\nGiven the following resource hierarchy\n O1-\u003e{F1, F2}; F1-\u003e{P1}; F2-\u003e{P2, P3},\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"under:organizations/O1\"}\n `projects/bar` has a `Policy` with:\n {allowed_values: \"under:projects/P3\"}\n {denied_values: \"under:folders/F2\"}\nThe accepted values at `organizations/foo` are `organizations/O1`,\n `folders/F1`, `folders/F2`, `projects/P1`, `projects/P2`,\n `projects/P3`.\nThe accepted values at `projects/bar` are `organizations/O1`,\n `folders/F1`, `projects/P1`.", + "description": "Determines the inheritance behavior for this `Policy`. By default, a `ListPolicy` set at a resource supersedes any `Policy` set anywhere up the resource hierarchy. However, if `inherit_from_parent` is set to `true`, then the values from the effective `Policy` of the parent resource are inherited, meaning the values set in this `Policy` are added to the values inherited up the hierarchy. Setting `Policy` hierarchies that inherit both allowed values and denied values isn't recommended in most circumstances to keep the configuration simple and understandable. However, it is possible to set a `Policy` with `allowed_values` set that inherits a `Policy` with `denied_values` set. In this case, the values that are allowed must be in `allowed_values` and not present in `denied_values`. For example, suppose you have a `Constraint` `constraints/serviceuser.services`, which has a `constraint_type` of `list_constraint`, and with `constraint_default` set to `ALLOW`. Suppose that at the Organization level, a `Policy` is applied that restricts the allowed API activations to {`E1`, `E2`}. Then, if a `Policy` is applied to a project below the Organization that has `inherit_from_parent` set to `false` and field all_values set to DENY, then an attempt to activate any API will be denied. The following examples demonstrate different possible layerings for `projects/bar` parented by `organizations/foo`: Example 1 (no inherited values): `organizations/foo` has a `Policy` with values: {allowed_values: \"E1\" allowed_values:\"E2\"} `projects/bar` has `inherit_from_parent` `false` and values: {allowed_values: \"E3\" allowed_values: \"E4\"} The accepted values at `organizations/foo` are `E1`, `E2`. The accepted values at `projects/bar` are `E3`, and `E4`. Example 2 (inherited values): `organizations/foo` has a `Policy` with values: {allowed_values: \"E1\" allowed_values:\"E2\"} `projects/bar` has a `Policy` with values: {value: \"E3\" value: \"E4\" inherit_from_parent: true} The accepted values at `organizations/foo` are `E1`, `E2`. The accepted values at `projects/bar` are `E1`, `E2`, `E3`, and `E4`. Example 3 (inheriting both allowed and denied values): `organizations/foo` has a `Policy` with values: {allowed_values: \"E1\" allowed_values: \"E2\"} `projects/bar` has a `Policy` with: {denied_values: \"E1\"} The accepted values at `organizations/foo` are `E1`, `E2`. The value accepted at `projects/bar` is `E2`. Example 4 (RestoreDefault): `organizations/foo` has a `Policy` with values: {allowed_values: \"E1\" allowed_values:\"E2\"} `projects/bar` has a `Policy` with values: {RestoreDefault: {}} The accepted values at `organizations/foo` are `E1`, `E2`. The accepted values at `projects/bar` are either all or none depending on the value of `constraint_default` (if `ALLOW`, all; if `DENY`, none). Example 5 (no policy inherits parent policy): `organizations/foo` has no `Policy` set. `projects/bar` has no `Policy` set. The accepted values at both levels are either all or none depending on the value of `constraint_default` (if `ALLOW`, all; if `DENY`, none). Example 6 (ListConstraint allowing all): `organizations/foo` has a `Policy` with values: {allowed_values: \"E1\" allowed_values: \"E2\"} `projects/bar` has a `Policy` with: {all: ALLOW} The accepted values at `organizations/foo` are `E1`, E2`. Any value is accepted at `projects/bar`. Example 7 (ListConstraint allowing none): `organizations/foo` has a `Policy` with values: {allowed_values: \"E1\" allowed_values: \"E2\"} `projects/bar` has a `Policy` with: {all: DENY} The accepted values at `organizations/foo` are `E1`, E2`. No value is accepted at `projects/bar`. Example 10 (allowed and denied subtrees of Resource Manager hierarchy): Given the following resource hierarchy O1-\u003e{F1, F2}; F1-\u003e{P1}; F2-\u003e{P2, P3}, `organizations/foo` has a `Policy` with values: {allowed_values: \"under:organizations/O1\"} `projects/bar` has a `Policy` with: {allowed_values: \"under:projects/P3\"} {denied_values: \"under:folders/F2\"} The accepted values at `organizations/foo` are `organizations/O1`, `folders/F1`, `folders/F2`, `projects/P1`, `projects/P2`, `projects/P3`. The accepted values at `projects/bar` are `organizations/O1`, `folders/F1`, `projects/P1`.", "type": "boolean" }, "suggestedValue": { - "description": "Optional. The Google Cloud Console will try to default to a configuration\nthat matches the value specified in this `Policy`. If `suggested_value`\nis not set, it will inherit the value specified higher in the hierarchy,\nunless `inherit_from_parent` is `false`.", + "description": "Optional. The Google Cloud Console will try to default to a configuration that matches the value specified in this `Policy`. If `suggested_value` is not set, it will inherit the value specified higher in the hierarchy, unless `inherit_from_parent` is `false`.", "type": "string" } }, "type": "object" }, "ListProjectsResponse": { - "description": "A page of the response received from the\nListProjects\nmethod.\n\nA paginated response where more pages are available has\n`next_page_token` set. This token can be used in a subsequent request to\nretrieve the next request page.", + "description": "A page of the response received from the ListProjects method. A paginated response where more pages are available has `next_page_token` set. This token can be used in a subsequent request to retrieve the next request page.", "id": "ListProjectsResponse", "properties": { "nextPageToken": { - "description": "Pagination token.\n\nIf the result set is too large to fit in a single response, this token\nis returned. It encodes the position of the current result cursor.\nFeeding this value into a new list request with the `page_token` parameter\ngives the next page of the results.\n\nWhen `next_page_token` is not filled in, there is no next page and\nthe list returned is the last page in the result set.\n\nPagination tokens have a limited lifetime.", + "description": "Pagination token. If the result set is too large to fit in a single response, this token is returned. It encodes the position of the current result cursor. Feeding this value into a new list request with the `page_token` parameter gives the next page of the results. When `next_page_token` is not filled in, there is no next page and the list returned is the last page in the result set. Pagination tokens have a limited lifetime.", "type": "string" }, "projects": { - "description": "The list of Projects that matched the list filter. This list can\nbe paginated.", + "description": "The list of Projects that matched the list filter. This list can be paginated.", "items": { "$ref": "Project" }, @@ -1694,11 +1698,11 @@ "type": "object" }, "Operation": { - "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "description": "This resource represents a long-running operation that is the result of a network API call.", "id": "Operation", "properties": { "done": { - "description": "If the value is `false`, it means the operation is still in progress.\nIf `true`, the operation is completed, and either `error` or `response` is\navailable.", + "description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.", "type": "boolean" }, "error": { @@ -1710,11 +1714,11 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", "type": "object" }, "name": { - "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should be a resource name ending with `operations/{unique_id}`.", + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.", "type": "string" }, "response": { @@ -1722,14 +1726,14 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", + "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", "type": "object" } }, "type": "object" }, "OrgPolicy": { - "description": "Defines a Cloud Organization `Policy` which is used to specify `Constraints`\nfor configurations of Cloud Platform resources.", + "description": "Defines a Cloud Organization `Policy` which is used to specify `Constraints` for configurations of Cloud Platform resources.", "id": "OrgPolicy", "properties": { "booleanPolicy": { @@ -1737,11 +1741,11 @@ "description": "For boolean `Constraints`, whether to enforce the `Constraint` or not." }, "constraint": { - "description": "The name of the `Constraint` the `Policy` is configuring, for example,\n`constraints/serviceuser.services`.\n\nImmutable after creation.", + "description": "The name of the `Constraint` the `Policy` is configuring, for example, `constraints/serviceuser.services`. A [list of available constraints](/resource-manager/docs/organization-policy/org-policy-constraints) is available. Immutable after creation.", "type": "string" }, "etag": { - "description": "An opaque tag indicating the current version of the `Policy`, used for\nconcurrency control.\n\nWhen the `Policy` is returned from either a `GetPolicy` or a\n`ListOrgPolicy` request, this `etag` indicates the version of the current\n`Policy` to use when executing a read-modify-write loop.\n\nWhen the `Policy` is returned from a `GetEffectivePolicy` request, the\n`etag` will be unset.\n\nWhen the `Policy` is used in a `SetOrgPolicy` method, use the `etag` value\nthat was returned from a `GetOrgPolicy` request as part of a\nread-modify-write loop for concurrency control. Not setting the `etag`in a\n`SetOrgPolicy` request will result in an unconditional write of the\n`Policy`.", + "description": "An opaque tag indicating the current version of the `Policy`, used for concurrency control. When the `Policy` is returned from either a `GetPolicy` or a `ListOrgPolicy` request, this `etag` indicates the version of the current `Policy` to use when executing a read-modify-write loop. When the `Policy` is returned from a `GetEffectivePolicy` request, the `etag` will be unset. When the `Policy` is used in a `SetOrgPolicy` method, use the `etag` value that was returned from a `GetOrgPolicy` request as part of a read-modify-write loop for concurrency control. Not setting the `etag`in a `SetOrgPolicy` request will result in an unconditional write of the `Policy`.", "format": "byte", "type": "string" }, @@ -1751,10 +1755,10 @@ }, "restoreDefault": { "$ref": "RestoreDefault", - "description": "Restores the default behavior of the constraint; independent of\n`Constraint` type." + "description": "Restores the default behavior of the constraint; independent of `Constraint` type." }, "updateTime": { - "description": "The time stamp the `Policy` was previously updated. This is set by the\nserver, not specified by the caller, and represents the last time a call to\n`SetOrgPolicy` was made for that `Policy`. Any value set by the client will\nbe ignored.", + "description": "The time stamp the `Policy` was previously updated. This is set by the server, not specified by the caller, and represents the last time a call to `SetOrgPolicy` was made for that `Policy`. Any value set by the client will be ignored.", "format": "google-datetime", "type": "string" }, @@ -1767,7 +1771,7 @@ "type": "object" }, "Organization": { - "description": "The root node in the resource hierarchy to which a particular entity's\n(e.g., company) resources belong.", + "description": "The root node in the resource hierarchy to which a particular entity's (e.g., company) resources belong.", "id": "Organization", "properties": { "creationTime": { @@ -1776,7 +1780,7 @@ "type": "string" }, "displayName": { - "description": "A human-readable string that refers to the Organization in the\nGCP Console UI. This string is set by the server and cannot be\nchanged. The string will be set to the primary domain (for example,\n\"google.com\") of the G Suite customer that owns the organization.", + "description": "A human-readable string that refers to the Organization in the GCP Console UI. This string is set by the server and cannot be changed. The string will be set to the primary domain (for example, \"google.com\") of the G Suite customer that owns the organization.", "type": "string" }, "lifecycleState": { @@ -1787,25 +1791,25 @@ "DELETE_REQUESTED" ], "enumDescriptions": [ - "Unspecified state. This is only useful for distinguishing unset values.", + "Unspecified state. This is only useful for distinguishing unset values.", "The normal and active state.", "The organization has been marked for deletion by the user." ], "type": "string" }, "name": { - "description": "Output only. The resource name of the organization. This is the\norganization's relative path in the API. Its format is\n\"organizations/[organization_id]\". For example, \"organizations/1234\".", + "description": "Output only. The resource name of the organization. This is the organization's relative path in the API. Its format is \"organizations/[organization_id]\". For example, \"organizations/1234\".", "type": "string" }, "owner": { "$ref": "OrganizationOwner", - "description": "The owner of this Organization. The owner should be specified on\ncreation. Once set, it cannot be changed.\nThis field is required." + "description": "The owner of this Organization. The owner should be specified on creation. Once set, it cannot be changed. This field is required." } }, "type": "object" }, "OrganizationOwner": { - "description": "The entity that owns an Organization. The lifetime of the Organization and\nall of its descendants are bound to the `OrganizationOwner`. If the\n`OrganizationOwner` is deleted, the Organization and all its descendants will\nbe deleted.", + "description": "The entity that owns an Organization. The lifetime of the Organization and all of its descendants are bound to the `OrganizationOwner`. If the `OrganizationOwner` is deleted, the Organization and all its descendants will be deleted.", "id": "OrganizationOwner", "properties": { "directoryCustomerId": { @@ -1816,7 +1820,7 @@ "type": "object" }, "Policy": { - "description": "An Identity and Access Management (IAM) policy, which specifies access\ncontrols for Google Cloud resources.\n\n\nA `Policy` is a collection of `bindings`. A `binding` binds one or more\n`members` to a single `role`. Members can be user accounts, service accounts,\nGoogle groups, and domains (such as G Suite). A `role` is a named list of\npermissions; each `role` can be an IAM predefined role or a user-created\ncustom role.\n\nFor some types of Google Cloud resources, a `binding` can also specify a\n`condition`, which is a logical expression that allows access to a resource\nonly if the expression evaluates to `true`. A condition can add constraints\nbased on attributes of the request, the resource, or both. To learn which\nresources support conditions in their IAM policies, see the\n[IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).\n\n**JSON example:**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/resourcemanager.organizationAdmin\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-project-id@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles/resourcemanager.organizationViewer\",\n \"members\": [\n \"user:eve@example.com\"\n ],\n \"condition\": {\n \"title\": \"expirable access\",\n \"description\": \"Does not grant access after Sep 2020\",\n \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\",\n }\n }\n ],\n \"etag\": \"BwWWja0YfJA=\",\n \"version\": 3\n }\n\n**YAML example:**\n\n bindings:\n - members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-project-id@appspot.gserviceaccount.com\n role: roles/resourcemanager.organizationAdmin\n - members:\n - user:eve@example.com\n role: roles/resourcemanager.organizationViewer\n condition:\n title: expirable access\n description: Does not grant access after Sep 2020\n expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\n - etag: BwWWja0YfJA=\n - version: 3\n\nFor a description of IAM and its features, see the\n[IAM documentation](https://cloud.google.com/iam/docs/).", + "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } **YAML example:** bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", "id": "Policy", "properties": { "auditConfigs": { @@ -1827,19 +1831,19 @@ "type": "array" }, "bindings": { - "description": "Associates a list of `members` to a `role`. Optionally, may specify a\n`condition` that determines how and when the `bindings` are applied. Each\nof the `bindings` must contain at least one member.", + "description": "Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member.", "items": { "$ref": "Binding" }, "type": "array" }, "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.", + "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.", "format": "byte", "type": "string" }, "version": { - "description": "Specifies the format of the policy.\n\nValid values are `0`, `1`, and `3`. Requests that specify an invalid value\nare rejected.\n\nAny operation that affects conditional role bindings must specify version\n`3`. This requirement applies to the following operations:\n\n* Getting a policy that includes a conditional role binding\n* Adding a conditional role binding to a policy\n* Changing a conditional role binding in a policy\n* Removing any role binding, with or without a condition, from a policy\n that includes conditions\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.\n\nIf a policy does not include any conditions, operations on that policy may\nspecify any valid version or leave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } @@ -1847,11 +1851,11 @@ "type": "object" }, "Project": { - "description": "A Project is a high-level Google Cloud Platform entity. It is a\ncontainer for ACLs, APIs, App Engine Apps, VMs, and other\nGoogle Cloud Platform resources.", + "description": "A Project is a high-level Google Cloud Platform entity. It is a container for ACLs, APIs, App Engine Apps, VMs, and other Google Cloud Platform resources.", "id": "Project", "properties": { "createTime": { - "description": "Creation time.\n\nRead-only.", + "description": "Creation time. Read-only.", "format": "google-datetime", "type": "string" }, @@ -1859,11 +1863,11 @@ "additionalProperties": { "type": "string" }, - "description": "The labels associated with this Project.\n\nLabel keys must be between 1 and 63 characters long and must conform\nto the following regular expression: \\[a-z\\](\\[-a-z0-9\\]*\\[a-z0-9\\])?.\n\nLabel values must be between 0 and 63 characters long and must conform\nto the regular expression (\\[a-z\\](\\[-a-z0-9\\]*\\[a-z0-9\\])?)?. A label\nvalue can be empty.\n\nNo more than 256 labels can be associated with a given resource.\n\nClients should store labels in a representation such as JSON that does not\ndepend on specific characters being disallowed.\n\nExample: \u003ccode\u003e\"environment\" : \"dev\"\u003c/code\u003e\nRead-write.", + "description": "The labels associated with this Project. Label keys must be between 1 and 63 characters long and must conform to the following regular expression: a-z{0,62}. Label values must be between 0 and 63 characters long and must conform to the regular expression [a-z0-9_-]{0,63}. A label value can be empty. No more than 256 labels can be associated with a given resource. Clients should store labels in a representation such as JSON that does not depend on specific characters being disallowed. Example: \"environment\" : \"dev\" Read-write.", "type": "object" }, "lifecycleState": { - "description": "The Project lifecycle state.\n\nRead-only.", + "description": "The Project lifecycle state. Read-only.", "enum": [ "LIFECYCLE_STATE_UNSPECIFIED", "ACTIVE", @@ -1871,27 +1875,27 @@ "DELETE_IN_PROGRESS" ], "enumDescriptions": [ - "Unspecified state. This is only used/useful for distinguishing\nunset values.", + "Unspecified state. This is only used/useful for distinguishing unset values.", "The normal and active state.", - "The project has been marked for deletion by the user\n(by invoking\nDeleteProject)\nor by the system (Google Cloud Platform).\nThis can generally be reversed by invoking UndeleteProject.", + "The project has been marked for deletion by the user (by invoking DeleteProject) or by the system (Google Cloud Platform). This can generally be reversed by invoking UndeleteProject.", "This lifecycle state is no longer used and not returned by the API." ], "type": "string" }, "name": { - "description": "The optional user-assigned display name of the Project.\nWhen present it must be between 4 to 30 characters.\nAllowed characters are: lowercase and uppercase letters, numbers,\nhyphen, single-quote, double-quote, space, and exclamation point.\n\nExample: \u003ccode\u003eMy Project\u003c/code\u003e\nRead-write.", + "description": "The optional user-assigned display name of the Project. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, single-quote, double-quote, space, and exclamation point. Example: `My Project` Read-write.", "type": "string" }, "parent": { "$ref": "ResourceId", - "description": "An optional reference to a parent Resource.\n\nSupported parent types include \"organization\" and \"folder\". Once set, the\nparent cannot be cleared. The `parent` can be set on creation or using the\n`UpdateProject` method; the end user must have the\n`resourcemanager.projects.create` permission on the parent.\n\nRead-write." + "description": "An optional reference to a parent Resource. Supported parent types include \"organization\" and \"folder\". Once set, the parent cannot be cleared. The `parent` can be set on creation or using the `UpdateProject` method; the end user must have the `resourcemanager.projects.create` permission on the parent." }, "projectId": { - "description": "The unique, user-assigned ID of the Project.\nIt must be 6 to 30 lowercase letters, digits, or hyphens.\nIt must start with a letter.\nTrailing hyphens are prohibited.\n\nExample: \u003ccode\u003etokyo-rain-123\u003c/code\u003e\nRead-only after creation.", + "description": "The unique, user-assigned ID of the Project. It must be 6 to 30 lowercase letters, digits, or hyphens. It must start with a letter. Trailing hyphens are prohibited. Example: `tokyo-rain-123` Read-only after creation.", "type": "string" }, "projectNumber": { - "description": "The number uniquely identifying the project.\n\nExample: \u003ccode\u003e415104041262\u003c/code\u003e\nRead-only.", + "description": "The number uniquely identifying the project. Example: `415104041262` Read-only.", "format": "int64", "type": "string" } @@ -1899,7 +1903,7 @@ "type": "object" }, "ProjectCreationStatus": { - "description": "A status object which is used as the `metadata` field for the Operation\nreturned by CreateProject. It provides insight for when significant phases of\nProject creation have completed.", + "description": "A status object which is used as the `metadata` field for the Operation returned by CreateProject. It provides insight for when significant phases of Project creation have completed.", "id": "ProjectCreationStatus", "properties": { "createTime": { @@ -1908,7 +1912,7 @@ "type": "string" }, "gettable": { - "description": "True if the project can be retrieved using GetProject. No other operations\non the project are guaranteed to work until the project creation is\ncomplete.", + "description": "True if the project can be retrieved using GetProject. No other operations on the project are guaranteed to work until the project creation is complete.", "type": "boolean" }, "ready": { @@ -1919,22 +1923,22 @@ "type": "object" }, "ResourceId": { - "description": "A container to reference an id for any resource type. A `resource` in Google\nCloud Platform is a generic term for something you (a developer) may want to\ninteract with through one of our API's. Some examples are an App Engine app,\na Compute Engine instance, a Cloud SQL database, and so on.", + "description": "A container to reference an id for any resource type. A `resource` in Google Cloud Platform is a generic term for something you (a developer) may want to interact with through one of our API's. Some examples are an App Engine app, a Compute Engine instance, a Cloud SQL database, and so on.", "id": "ResourceId", "properties": { "id": { - "description": "Required field for the type-specific id. This should correspond to the id\nused in the type-specific API's.", + "description": "The type-specific id. This should correspond to the id used in the type-specific API's.", "type": "string" }, "type": { - "description": "Required field representing the resource type this id is for.\nAt present, the valid types are: \"organization\", \"folder\", and \"project\".", + "description": "The resource type this id is for. At present, the valid types are: \"organization\", \"folder\", and \"project\".", "type": "string" } }, "type": "object" }, "RestoreDefault": { - "description": "Ignores policies set above this resource and restores the\n`constraint_default` enforcement behavior of the specific `Constraint` at\nthis resource.\n\nSuppose that `constraint_default` is set to `ALLOW` for the\n`Constraint` `constraints/serviceuser.services`. Suppose that organization\nfoo.com sets a `Policy` at their Organization resource node that restricts\nthe allowed service activations to deny all service activations. They\ncould then set a `Policy` with the `policy_type` `restore_default` on\nseveral experimental projects, restoring the `constraint_default`\nenforcement of the `Constraint` for only those projects, allowing those\nprojects to have all services activated.", + "description": "Ignores policies set above this resource and restores the `constraint_default` enforcement behavior of the specific `Constraint` at this resource. Suppose that `constraint_default` is set to `ALLOW` for the `Constraint` `constraints/serviceuser.services`. Suppose that organization foo.com sets a `Policy` at their Organization resource node that restricts the allowed service activations to deny all service activations. They could then set a `Policy` with the `policy_type` `restore_default` on several experimental projects, restoring the `constraint_default` enforcement of the `Constraint` for only those projects, allowing those projects to have all services activated.", "id": "RestoreDefault", "properties": {}, "type": "object" @@ -1944,16 +1948,16 @@ "id": "SearchOrganizationsRequest", "properties": { "filter": { - "description": "An optional query string used to filter the Organizations to return in\nthe response. Filter rules are case-insensitive.\n\n\nOrganizations may be filtered by `owner.directoryCustomerId` or by\n`domain`, where the domain is a G Suite domain, for example:\n\n* Filter `owner.directorycustomerid:123456789` returns Organization\nresources with `owner.directory_customer_id` equal to `123456789`.\n* Filter `domain:google.com` returns Organization resources corresponding\nto the domain `google.com`.\n\nThis field is optional.", + "description": "An optional query string used to filter the Organizations to return in the response. Filter rules are case-insensitive. Organizations may be filtered by `owner.directoryCustomerId` or by `domain`, where the domain is a G Suite domain, for example: * Filter `owner.directorycustomerid:123456789` returns Organization resources with `owner.directory_customer_id` equal to `123456789`. * Filter `domain:google.com` returns Organization resources corresponding to the domain `google.com`. This field is optional.", "type": "string" }, "pageSize": { - "description": "The maximum number of Organizations to return in the response.\nThis field is optional.", + "description": "The maximum number of Organizations to return in the response. This field is optional.", "format": "int32", "type": "integer" }, "pageToken": { - "description": "A pagination token returned from a previous call to `SearchOrganizations`\nthat indicates from where listing should continue.\nThis field is optional.", + "description": "A pagination token returned from a previous call to `SearchOrganizations` that indicates from where listing should continue. This field is optional.", "type": "string" } }, @@ -1964,11 +1968,11 @@ "id": "SearchOrganizationsResponse", "properties": { "nextPageToken": { - "description": "A pagination token to be used to retrieve the next page of results. If the\nresult is too large to fit within the page size specified in the request,\nthis field will be set with a token that can be used to fetch the next page\nof results. If this field is empty, it indicates that this response\ncontains the last page of results.", + "description": "A pagination token to be used to retrieve the next page of results. If the result is too large to fit within the page size specified in the request, this field will be set with a token that can be used to fetch the next page of results. If this field is empty, it indicates that this response contains the last page of results.", "type": "string" }, "organizations": { - "description": "The list of Organizations that matched the search query, possibly\npaginated.", + "description": "The list of Organizations that matched the search query, possibly paginated.", "items": { "$ref": "Organization" }, @@ -1983,10 +1987,10 @@ "properties": { "policy": { "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them." }, "updateMask": { - "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only\nthe fields in the mask will be modified. If no mask is provided, the\nfollowing default mask is used:\n\n`paths: \"bindings, etag\"`", + "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only the fields in the mask will be modified. If no mask is provided, the following default mask is used: `paths: \"bindings, etag\"`", "format": "google-fieldmask", "type": "string" } @@ -2005,7 +2009,7 @@ "type": "object" }, "Status": { - "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors).", + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", "id": "Status", "properties": { "code": { @@ -2014,7 +2018,7 @@ "type": "integer" }, "details": { - "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use.", + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", "items": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -2025,7 +2029,7 @@ "type": "array" }, "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", "type": "string" } }, @@ -2036,7 +2040,7 @@ "id": "TestIamPermissionsRequest", "properties": { "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "description": "The set of permissions to check for the `resource`. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", "items": { "type": "string" }, @@ -2050,7 +2054,7 @@ "id": "TestIamPermissionsResponse", "properties": { "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", "items": { "type": "string" }, @@ -2060,7 +2064,7 @@ "type": "object" }, "UndeleteProjectRequest": { - "description": "The request sent to the UndeleteProject\nmethod.", + "description": "The request sent to the UndeleteProject method.", "id": "UndeleteProjectRequest", "properties": {}, "type": "object" diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go index 2bd558333f1..856908c95c2 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go @@ -79,6 +79,7 @@ const apiId = "cloudresourcemanager:v1" const apiName = "cloudresourcemanager" const apiVersion = "v1" const basePath = "https://cloudresourcemanager.googleapis.com/" +const mtlsBasePath = "https://cloudresourcemanager.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -98,6 +99,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -226,72 +228,31 @@ func (s *Ancestor) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// AuditConfig: Specifies the audit configuration for a service. -// The configuration determines which permission types are logged, and -// what -// identities, if any, are exempted from logging. -// An AuditConfig must have one or more AuditLogConfigs. -// -// If there are AuditConfigs for both `allServices` and a specific -// service, -// the union of the two AuditConfigs is used for that service: the -// log_types -// specified in each AuditConfig are enabled, and the exempted_members -// in each -// AuditLogConfig are exempted. -// -// Example Policy with multiple AuditConfigs: -// -// { -// "audit_configs": [ -// { -// "service": "allServices" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// }, -// { -// "log_type": "ADMIN_READ", -// } -// ] -// }, -// { -// "service": "sampleservice.googleapis.com" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// }, -// { -// "log_type": "DATA_WRITE", -// "exempted_members": [ -// "user:aliya@example.com" -// ] -// } -// ] -// } -// ] -// } -// -// For sampleservice, this policy enables DATA_READ, DATA_WRITE and -// ADMIN_READ -// logging. It also exempts jose@example.com from DATA_READ logging, -// and -// aliya@example.com from DATA_WRITE logging. +// AuditConfig: Specifies the audit configuration for a service. The +// configuration determines which permission types are logged, and what +// identities, if any, are exempted from logging. An AuditConfig must +// have one or more AuditLogConfigs. If there are AuditConfigs for both +// `allServices` and a specific service, the union of the two +// AuditConfigs is used for that service: the log_types specified in +// each AuditConfig are enabled, and the exempted_members in each +// AuditLogConfig are exempted. Example Policy with multiple +// AuditConfigs: { "audit_configs": [ { "service": "allServices", +// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": +// [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { +// "log_type": "ADMIN_READ" } ] }, { "service": +// "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": +// "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ +// "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy +// enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts +// jose@example.com from DATA_READ logging, and aliya@example.com from +// DATA_WRITE logging. type AuditConfig struct { // AuditLogConfigs: The configuration for logging of each type of // permission. AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"` - // Service: Specifies a service that will be enabled for audit - // logging. - // For example, `storage.googleapis.com`, - // `cloudsql.googleapis.com`. + // Service: Specifies a service that will be enabled for audit logging. + // For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. // `allServices` is a special value that covers all services. Service string `json:"service,omitempty"` @@ -320,31 +281,15 @@ func (s *AuditConfig) MarshalJSON() ([]byte, error) { } // AuditLogConfig: Provides the configuration for logging a type of -// permissions. -// Example: -// -// { -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// } -// ] -// } -// -// This enables 'DATA_READ' and 'DATA_WRITE' logging, while -// exempting -// jose@example.com from DATA_READ logging. +// permissions. Example: { "audit_log_configs": [ { "log_type": +// "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { +// "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and +// 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ +// logging. type AuditLogConfig struct { // ExemptedMembers: Specifies the identities that do not cause logging - // for this type of - // permission. - // Follows the same format of Binding.members. + // for this type of permission. Follows the same format of + // Binding.members. ExemptedMembers []string `json:"exemptedMembers,omitempty"` // LogType: The log type that this config enables. @@ -382,98 +327,60 @@ func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { // Binding: Associates `members` with a `role`. type Binding struct { - // Condition: The condition that is associated with this binding. - // - // If the condition evaluates to `true`, then this binding applies to - // the - // current request. - // - // If the condition evaluates to `false`, then this binding does not - // apply to - // the current request. However, a different role binding might grant - // the same - // role to one or more of the members in this binding. - // - // To learn which resources support conditions in their IAM policies, - // see - // the - // [IAM - // documentation](https://cloud.google.com/iam/help/conditions/r - // esource-policies). + // BindingId: A client-specified ID for this binding. Expected to be + // globally unique to support the internal bindings-by-ID API. + BindingId string `json:"bindingId,omitempty"` + + // Condition: The condition that is associated with this binding. If the + // condition evaluates to `true`, then this binding applies to the + // current request. If the condition evaluates to `false`, then this + // binding does not apply to the current request. However, a different + // role binding might grant the same role to one or more of the members + // in this binding. To learn which resources support conditions in their + // IAM policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). Condition *Expr `json:"condition,omitempty"` // Members: Specifies the identities requesting access for a Cloud - // Platform resource. - // `members` can have the following values: - // - // * `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. - // - // * `allAuthenticatedUsers`: A special identifier that represents - // anyone - // who is authenticated with a Google account or a service - // account. - // - // * `user:{emailid}`: An email address that represents a specific - // Google - // account. For example, `alice@example.com` . - // - // - // * `serviceAccount:{emailid}`: An email address that represents a - // service - // account. For example, - // `my-other-app@appspot.gserviceaccount.com`. - // - // * `group:{emailid}`: An email address that represents a Google - // group. - // For example, `admins@example.com`. - // - // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a user that has been recently deleted. - // For - // example, `alice@example.com?uid=123456789012345678901`. If the - // user is - // recovered, this value reverts to `user:{emailid}` and the - // recovered user - // retains the role in the binding. - // - // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address - // (plus - // unique identifier) representing a service account that has been - // recently - // deleted. For example, - // + // Platform resource. `members` can have the following values: * + // `allUsers`: A special identifier that represents anyone who is on the + // internet; with or without a Google account. * + // `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. * + // `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . * + // `serviceAccount:{emailid}`: An email address that represents a + // service account. For example, + // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An + // email address that represents a Google group. For example, + // `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An + // email address (plus unique identifier) representing a user that has + // been recently deleted. For example, + // `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered + // user retains the role in the binding. * + // `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address + // (plus unique identifier) representing a service account that has been + // recently deleted. For example, // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account - // retains the - // role in the binding. - // - // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a Google group that has been recently - // deleted. For example, - // `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` - // and the - // recovered group retains the role in the binding. - // - // - // * `domain:{domain}`: The G Suite domain (primary) that represents all - // the - // users of that domain. For example, `google.com` or - // `example.com`. - // - // + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains + // the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: + // An email address (plus unique identifier) representing a Google group + // that has been recently deleted. For example, + // `admins@example.com?uid=123456789012345678901`. If the group is + // recovered, this value reverts to `group:{emailid}` and the recovered + // group retains the role in the binding. * `domain:{domain}`: The G + // Suite domain (primary) that represents all the users of that domain. + // For example, `google.com` or `example.com`. Members []string `json:"members,omitempty"` - // Role: Role that is assigned to `members`. - // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + // Role: Role that is assigned to `members`. For example, + // `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `json:"role,omitempty"` - // ForceSendFields is a list of field names (e.g. "Condition") to + // ForceSendFields is a list of field names (e.g. "BindingId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -481,7 +388,7 @@ type Binding struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Condition") to include in + // NullFields is a list of field names (e.g. "BindingId") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -496,78 +403,43 @@ func (s *Binding) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// BooleanConstraint: A `Constraint` that is either enforced or -// not. -// -// For example a constraint -// `constraints/compute.disableSerialPortAccess`. +// BooleanConstraint: A `Constraint` that is either enforced or not. For +// example a constraint `constraints/compute.disableSerialPortAccess`. // If it is enforced on a VM instance, serial port connections will not -// be -// opened to that instance. +// be opened to that instance. type BooleanConstraint struct { } // BooleanPolicy: Used in `policy_type` to specify how `boolean_policy` -// will behave at this -// resource. +// will behave at this resource. type BooleanPolicy struct { // Enforced: If `true`, then the `Policy` is enforced. If `false`, then - // any - // configuration is acceptable. - // - // Suppose you have a - // `Constraint` + // any configuration is acceptable. Suppose you have a `Constraint` // `constraints/compute.disableSerialPortAccess` with - // `constraint_default` - // set to `ALLOW`. A `Policy` for that `Constraint` exhibits the - // following - // behavior: - // - If the `Policy` at this resource has enforced set to `false`, - // serial - // port connection attempts will be allowed. - // - If the `Policy` at this resource has enforced set to `true`, - // serial - // port connection attempts will be refused. - // - If the `Policy` at this resource is `RestoreDefault`, serial - // port - // connection attempts will be allowed. - // - If no `Policy` is set at this resource or anywhere higher in the - // resource hierarchy, serial port connection attempts will be - // allowed. - // - If no `Policy` is set at this resource, but one exists higher in - // the - // resource hierarchy, the behavior is as if the`Policy` were set - // at - // this resource. - // - // The following examples demonstrate the different possible - // layerings: - // - // Example 1 (nearest `Constraint` wins): - // `organizations/foo` has a `Policy` with: - // {enforced: false} - // `projects/bar` has no `Policy` set. - // The constraint at `projects/bar` and `organizations/foo` will not - // be - // enforced. - // - // Example 2 (enforcement gets replaced): - // `organizations/foo` has a `Policy` with: - // {enforced: false} - // `projects/bar` has a `Policy` with: - // {enforced: true} - // The constraint at `organizations/foo` is not enforced. - // The constraint at `projects/bar` is enforced. - // - // Example 3 (RestoreDefault): - // `organizations/foo` has a `Policy` with: - // {enforced: true} - // `projects/bar` has a `Policy` with: - // {RestoreDefault: {}} - // The constraint at `organizations/foo` is enforced. - // The constraint at `projects/bar` is not enforced, - // because - // `constraint_default` for the `Constraint` is `ALLOW`. + // `constraint_default` set to `ALLOW`. A `Policy` for that `Constraint` + // exhibits the following behavior: - If the `Policy` at this resource + // has enforced set to `false`, serial port connection attempts will be + // allowed. - If the `Policy` at this resource has enforced set to + // `true`, serial port connection attempts will be refused. - If the + // `Policy` at this resource is `RestoreDefault`, serial port connection + // attempts will be allowed. - If no `Policy` is set at this resource or + // anywhere higher in the resource hierarchy, serial port connection + // attempts will be allowed. - If no `Policy` is set at this resource, + // but one exists higher in the resource hierarchy, the behavior is as + // if the`Policy` were set at this resource. The following examples + // demonstrate the different possible layerings: Example 1 (nearest + // `Constraint` wins): `organizations/foo` has a `Policy` with: + // {enforced: false} `projects/bar` has no `Policy` set. The constraint + // at `projects/bar` and `organizations/foo` will not be enforced. + // Example 2 (enforcement gets replaced): `organizations/foo` has a + // `Policy` with: {enforced: false} `projects/bar` has a `Policy` with: + // {enforced: true} The constraint at `organizations/foo` is not + // enforced. The constraint at `projects/bar` is enforced. Example 3 + // (RestoreDefault): `organizations/foo` has a `Policy` with: {enforced: + // true} `projects/bar` has a `Policy` with: {RestoreDefault: {}} The + // constraint at `organizations/foo` is enforced. The constraint at + // `projects/bar` is not enforced, because `constraint_default` for the + // `Constraint` is `ALLOW`. Enforced bool `json:"enforced,omitempty"` // ForceSendFields is a list of field names (e.g. "Enforced") to @@ -599,8 +471,7 @@ type ClearOrgPolicyRequest struct { Constraint string `json:"constraint,omitempty"` // Etag: The current version, for concurrency control. Not sending an - // `etag` - // will cause the `Policy` to be cleared blindly. + // `etag` will cause the `Policy` to be cleared blindly. Etag string `json:"etag,omitempty"` // ForceSendFields is a list of field names (e.g. "Constraint") to @@ -627,28 +498,18 @@ func (s *ClearOrgPolicyRequest) MarshalJSON() ([]byte, error) { } // Constraint: A `Constraint` describes a way in which a resource's -// configuration can be -// restricted. For example, it controls which cloud services can be -// activated -// across an organization, or whether a Compute Engine instance can -// have -// serial port connections established. `Constraints` can be configured -// by the -// organization's policy adminstrator to fit the needs of the -// organzation by -// setting Policies for `Constraints` at different locations in -// the -// organization's resource hierarchy. Policies are inherited down the -// resource +// configuration can be restricted. For example, it controls which cloud +// services can be activated across an organization, or whether a +// Compute Engine instance can have serial port connections established. +// `Constraints` can be configured by the organization's policy +// administrator to fit the needs of the organzation by setting Policies +// for `Constraints` at different locations in the organization's +// resource hierarchy. Policies are inherited down the resource // hierarchy from higher levels, but can also be overridden. For details -// about -// the inheritance rules please read about -// Policies. -// -// `Constraints` have a default behavior determined by the -// `constraint_default` -// field, which is the enforcement behavior that is used in the absence -// of a +// about the inheritance rules please read about +// [Policies](/resource-manager/reference/rest/v1/Policy). `Constraints` +// have a default behavior determined by the `constraint_default` field, +// which is the enforcement behavior that is used in the absence of a // `Policy` being defined or inherited for the resource in question. type Constraint struct { // BooleanConstraint: Defines this constraint as being a @@ -656,37 +517,29 @@ type Constraint struct { BooleanConstraint *BooleanConstraint `json:"booleanConstraint,omitempty"` // ConstraintDefault: The evaluation behavior of this constraint in the - // absense of 'Policy'. + // absence of 'Policy'. // // Possible values: // "CONSTRAINT_DEFAULT_UNSPECIFIED" - This is only used for - // distinguishing unset values and should never be - // used. + // distinguishing unset values and should never be used. // "ALLOW" - Indicate that all values are allowed for list + // constraints. Indicate that enforcement is off for boolean // constraints. - // Indicate that enforcement is off for boolean constraints. - // "DENY" - Indicate that all values are denied for list - // constraints. + // "DENY" - Indicate that all values are denied for list constraints. // Indicate that enforcement is on for boolean constraints. ConstraintDefault string `json:"constraintDefault,omitempty"` // Description: Detailed description of what this `Constraint` controls - // as well as how and - // where it is enforced. - // - // Mutable. + // as well as how and where it is enforced. Mutable. Description string `json:"description,omitempty"` - // DisplayName: The human readable name. - // - // Mutable. + // DisplayName: The human readable name. Mutable. DisplayName string `json:"displayName,omitempty"` // ListConstraint: Defines this constraint as being a ListConstraint. ListConstraint *ListConstraint `json:"listConstraint,omitempty"` - // Name: Immutable value, required to globally be unique. For - // example, + // Name: Immutable value, required to globally be unique. For example, // `constraints/serviceuser.services` Name string `json:"name,omitempty"` @@ -718,17 +571,11 @@ func (s *Constraint) MarshalJSON() ([]byte, error) { } // Empty: A generic empty message that you can re-use to avoid defining -// duplicated -// empty messages in your APIs. A typical example is to use it as the -// request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. +// duplicated empty messages in your APIs. A typical example is to use +// it as the request or the response type of an API method. For +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } The JSON representation for `Empty` is +// empty JSON object `{}`. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -736,65 +583,40 @@ type Empty struct { } // Expr: Represents a textual expression in the Common Expression -// Language (CEL) -// syntax. CEL is a C-like expression language. The syntax and semantics -// of CEL -// are documented at https://github.com/google/cel-spec. -// -// Example (Comparison): -// -// title: "Summary size limit" -// description: "Determines if a summary is less than 100 chars" -// expression: "document.summary.size() < 100" -// -// Example (Equality): -// -// title: "Requestor is owner" -// description: "Determines if requestor is the document owner" -// expression: "document.owner == -// request.auth.claims.email" -// -// Example (Logic): -// -// title: "Public documents" -// description: "Determine whether the document should be publicly -// visible" -// expression: "document.type != 'private' && document.type != -// 'internal'" -// -// Example (Data Manipulation): -// -// title: "Notification string" -// description: "Create a notification string with a timestamp." -// expression: "'New message received at ' + -// string(document.create_time)" -// -// The exact variables and functions that may be referenced within an -// expression -// are determined by the service that evaluates it. See the -// service -// documentation for additional information. +// Language (CEL) syntax. CEL is a C-like expression language. The +// syntax and semantics of CEL are documented at +// https://github.com/google/cel-spec. Example (Comparison): title: +// "Summary size limit" description: "Determines if a summary is less +// than 100 chars" expression: "document.summary.size() < 100" Example +// (Equality): title: "Requestor is owner" description: "Determines if +// requestor is the document owner" expression: "document.owner == +// request.auth.claims.email" Example (Logic): title: "Public documents" +// description: "Determine whether the document should be publicly +// visible" expression: "document.type != 'private' && document.type != +// 'internal'" Example (Data Manipulation): title: "Notification string" +// description: "Create a notification string with a timestamp." +// expression: "'New message received at ' + +// string(document.create_time)" The exact variables and functions that +// may be referenced within an expression are determined by the service +// that evaluates it. See the service documentation for additional +// information. type Expr struct { // Description: Optional. Description of the expression. This is a - // longer text which - // describes the expression, e.g. when hovered over it in a UI. + // longer text which describes the expression, e.g. when hovered over it + // in a UI. Description string `json:"description,omitempty"` // Expression: Textual representation of an expression in Common - // Expression Language - // syntax. + // Expression Language syntax. Expression string `json:"expression,omitempty"` // Location: Optional. String indicating the location of the expression - // for error - // reporting, e.g. a file name and a position in the file. + // for error reporting, e.g. a file name and a position in the file. Location string `json:"location,omitempty"` // Title: Optional. Title for the expression, i.e. a short string - // describing - // its purpose. This can be used e.g. in UIs which allow to enter - // the - // expression. + // describing its purpose. This can be used e.g. in UIs which allow to + // enter the expression. Title string `json:"title,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -823,8 +645,7 @@ func (s *Expr) MarshalJSON() ([]byte, error) { // FolderOperation: Metadata describing a long running folder operation type FolderOperation struct { // DestinationParent: The resource name of the folder or organization we - // are either creating - // the folder under or moving the folder to. + // are either creating the folder under or moving the folder to. DestinationParent string `json:"destinationParent,omitempty"` // DisplayName: The display name of the folder. @@ -838,8 +659,8 @@ type FolderOperation struct { // "MOVE" - A move folder operation. OperationType string `json:"operationType,omitempty"` - // SourceParent: The resource name of the folder's parent. - // Only applicable when the operation_type is MOVE. + // SourceParent: The resource name of the folder's parent. Only + // applicable when the operation_type is MOVE. SourceParent string `json:"sourceParent,omitempty"` // ForceSendFields is a list of field names (e.g. "DestinationParent") @@ -878,8 +699,7 @@ type FolderOperationError struct { // "MAX_CHILD_FOLDERS_VIOLATION" - The attempted action would violate // the max child folders constraint. // "FOLDER_NAME_UNIQUENESS_VIOLATION" - The attempted action would - // violate the locally-unique folder - // display_name constraint. + // violate the locally-unique folder display_name constraint. // "RESOURCE_DELETED_VIOLATION" - The resource being moved has been // deleted. // "PARENT_DELETED_VIOLATION" - The resource a folder was being added @@ -891,8 +711,7 @@ type FolderOperationError struct { // "FOLDER_TO_DELETE_NON_EMPTY_VIOLATION" - The folder the caller is // trying to delete contains active resources. // "DELETED_FOLDER_HEIGHT_VIOLATION" - The attempted action would - // violate the max deleted folder depth - // constraint. + // violate the max deleted folder depth constraint. ErrorMessageId string `json:"errorMessageId,omitempty"` // ForceSendFields is a list of field names (e.g. "ErrorMessageId") to @@ -919,19 +738,15 @@ func (s *FolderOperationError) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// GetAncestryRequest: The request sent to the -// GetAncestry -// method. +// GetAncestryRequest: The request sent to the GetAncestry method. type GetAncestryRequest struct { } -// GetAncestryResponse: Response from the GetAncestry method. +// GetAncestryResponse: Response from the projects.getAncestry method. type GetAncestryResponse struct { // Ancestor: Ancestors are ordered from bottom to top of the resource - // hierarchy. The - // first ancestor is the project itself, followed by the project's - // parent, - // etc.. + // hierarchy. The first ancestor is the project itself, followed by the + // project's parent, etc.. Ancestor []*Ancestor `json:"ancestor,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -994,8 +809,7 @@ func (s *GetEffectiveOrgPolicyRequest) MarshalJSON() ([]byte, error) { // GetIamPolicyRequest: Request message for `GetIamPolicy` method. type GetIamPolicyRequest struct { // Options: OPTIONAL: A `GetPolicyOptions` object for specifying options - // to - // `GetIamPolicy`. + // to `GetIamPolicy`. Options *GetPolicyOptions `json:"options,omitempty"` // ForceSendFields is a list of field names (e.g. "Options") to @@ -1052,24 +866,14 @@ func (s *GetOrgPolicyRequest) MarshalJSON() ([]byte, error) { // GetPolicyOptions: Encapsulates settings provided to GetIamPolicy. type GetPolicyOptions struct { // RequestedPolicyVersion: Optional. The policy format version to be - // returned. - // - // Valid values are 0, 1, and 3. Requests specifying an invalid value - // will be - // rejected. - // - // Requests for policies with any conditional bindings must specify - // version 3. - // Policies without any conditional bindings may specify any valid value - // or - // leave the field unset. - // - // To learn which resources support conditions in their IAM policies, - // see - // the - // [IAM - // documentation](https://cloud.google.com/iam/help/conditions/r - // esource-policies). + // returned. Valid values are 0, 1, and 3. Requests specifying an + // invalid value will be rejected. Requests for policies with any + // conditional bindings must specify version 3. Policies without any + // conditional bindings may specify any valid value or leave the field + // unset. To learn which resources support conditions in their IAM + // policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). RequestedPolicyVersion int64 `json:"requestedPolicyVersion,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1098,51 +902,35 @@ func (s *GetPolicyOptions) MarshalJSON() ([]byte, error) { } // Lien: A Lien represents an encumbrance on the actions that can be -// performed on a -// resource. +// performed on a resource. type Lien struct { // CreateTime: The creation time of this Lien. CreateTime string `json:"createTime,omitempty"` - // Name: A system-generated unique identifier for this Lien. - // - // Example: `liens/1234abcd` + // Name: A system-generated unique identifier for this Lien. Example: + // `liens/1234abcd` Name string `json:"name,omitempty"` // Origin: A stable, user-visible/meaningful string identifying the - // origin of the - // Lien, intended to be inspected programmatically. Maximum length of - // 200 - // characters. - // - // Example: 'compute.googleapis.com' + // origin of the Lien, intended to be inspected programmatically. + // Maximum length of 200 characters. Example: 'compute.googleapis.com' Origin string `json:"origin,omitempty"` // Parent: A reference to the resource this Lien is attached to. The - // server will - // validate the parent against those for which Liens are - // supported. - // - // Example: `projects/1234` + // server will validate the parent against those for which Liens are + // supported. Example: `projects/1234` Parent string `json:"parent,omitempty"` // Reason: Concise user-visible strings indicating why an action cannot - // be performed - // on a resource. Maximum length of 200 characters. - // + // be performed on a resource. Maximum length of 200 characters. // Example: 'Holds production API key' Reason string `json:"reason,omitempty"` // Restrictions: The types of operations which should be blocked as a - // result of this Lien. - // Each value should correspond to an IAM permission. The server - // will - // validate the permissions against those for which Liens are - // supported. - // - // An empty list is meaningless and will be rejected. - // - // Example: ['resourcemanager.projects.delete'] + // result of this Lien. Each value should correspond to an IAM + // permission. The server will validate the permissions against those + // for which Liens are supported. An empty list is meaningless and will + // be rejected. Example: ['resourcemanager.projects.delete'] Restrictions []string `json:"restrictions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1173,21 +961,17 @@ func (s *Lien) MarshalJSON() ([]byte, error) { } // ListAvailableOrgPolicyConstraintsRequest: The request sent to the -// [ListAvailableOrgPolicyConstraints] -// google.cloud.OrgPolicy.v1.ListAvai -// lableOrgPolicyConstraints] method. +// `ListAvailableOrgPolicyConstraints` method on the project, folder, or +// organization. type ListAvailableOrgPolicyConstraintsRequest struct { // PageSize: Size of the pages to be returned. This is currently - // unsupported and will - // be ignored. The server may at any point start using this field to - // limit - // page size. + // unsupported and will be ignored. The server may at any point start + // using this field to limit page size. PageSize int64 `json:"pageSize,omitempty"` // PageToken: Page token used to retrieve the next page. This is - // currently unsupported - // and will be ignored. The server may at any point start using this - // field. + // currently unsupported and will be ignored. The server may at any + // point start using this field. PageToken string `json:"pageToken,omitempty"` // ForceSendFields is a list of field names (e.g. "PageSize") to @@ -1214,12 +998,10 @@ func (s *ListAvailableOrgPolicyConstraintsRequest) MarshalJSON() ([]byte, error) } // ListAvailableOrgPolicyConstraintsResponse: The response returned from -// the ListAvailableOrgPolicyConstraints method. -// Returns all `Constraints` that could be set at this level of the -// hierarchy +// the `ListAvailableOrgPolicyConstraints` method. Returns all +// `Constraints` that could be set at this level of the hierarchy // (contrast with the response from `ListPolicies`, which returns all -// policies -// which are set). +// policies which are set). type ListAvailableOrgPolicyConstraintsResponse struct { // Constraints: The collection of constraints that are settable on the // request resource. @@ -1257,21 +1039,18 @@ func (s *ListAvailableOrgPolicyConstraintsResponse) MarshalJSON() ([]byte, error } // ListConstraint: A `Constraint` that allows or disallows a list of -// string values, which are -// configured by an Organization's policy administrator with a `Policy`. +// string values, which are configured by an Organization's policy +// administrator with a `Policy`. type ListConstraint struct { // SuggestedValue: Optional. The Google Cloud Console will try to - // default to a configuration - // that matches the value specified in this `Constraint`. + // default to a configuration that matches the value specified in this + // `Constraint`. SuggestedValue string `json:"suggestedValue,omitempty"` // SupportsUnder: Indicates whether subtrees of Cloud Resource Manager - // resource hierarchy - // can be used in `Policy.allowed_values` and `Policy.denied_values`. - // For - // example, "under:folders/123" would match any resource under - // the - // 'folders/123' folder. + // resource hierarchy can be used in `Policy.allowed_values` and + // `Policy.denied_values`. For example, "under:folders/123" would + // match any resource under the 'folders/123' folder. SupportsUnder bool `json:"supportsUnder,omitempty"` // ForceSendFields is a list of field names (e.g. "SuggestedValue") to @@ -1304,8 +1083,7 @@ type ListLiensResponse struct { Liens []*Lien `json:"liens,omitempty"` // NextPageToken: Token to retrieve the next page of results, or empty - // if there are no more - // results in the list. + // if there are no more results in the list. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1339,16 +1117,13 @@ func (s *ListLiensResponse) MarshalJSON() ([]byte, error) { // method. type ListOrgPoliciesRequest struct { // PageSize: Size of the pages to be returned. This is currently - // unsupported and will - // be ignored. The server may at any point start using this field to - // limit - // page size. + // unsupported and will be ignored. The server may at any point start + // using this field to limit page size. PageSize int64 `json:"pageSize,omitempty"` // PageToken: Page token used to retrieve the next page. This is - // currently unsupported - // and will be ignored. The server may at any point start using this - // field. + // currently unsupported and will be ignored. The server may at any + // point start using this field. PageToken string `json:"pageToken,omitempty"` // ForceSendFields is a list of field names (e.g. "PageSize") to @@ -1375,17 +1150,16 @@ func (s *ListOrgPoliciesRequest) MarshalJSON() ([]byte, error) { } // ListOrgPoliciesResponse: The response returned from the -// ListOrgPolicies method. It will be empty -// if no `Policies` are set on the resource. +// `ListOrgPolicies` method. It will be empty if no `Policies` are set +// on the resource. type ListOrgPoliciesResponse struct { // NextPageToken: Page token used to retrieve the next page. This is - // currently not used, but - // the server may at any point start supplying a valid token. + // currently not used, but the server may at any point start supplying a + // valid token. NextPageToken string `json:"nextPageToken,omitempty"` // Policies: The `Policies` that are set on the resource. It will be - // empty if no - // `Policies` are set. + // empty if no `Policies` are set. Policies []*OrgPolicy `json:"policies,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1416,39 +1190,23 @@ func (s *ListOrgPoliciesResponse) MarshalJSON() ([]byte, error) { } // ListPolicy: Used in `policy_type` to specify how `list_policy` -// behaves at this -// resource. -// -// `ListPolicy` can define specific values and subtrees of Cloud -// Resource -// Manager resource hierarchy (`Organizations`, `Folders`, `Projects`) -// that -// are allowed or denied by setting the `allowed_values` and -// `denied_values` -// fields. This is achieved by using the `under:` and optional `is:` -// prefixes. -// The `under:` prefix is used to denote resource subtree values. -// The `is:` prefix is used to denote specific values, and is required -// only -// if the value contains a ":". Values prefixed with "is:" are treated -// the -// same as values with no prefix. -// Ancestry subtrees must be in one of the following formats: -// - "projects/", e.g. "projects/tokyo-rain-123" -// - "folders/", e.g. "folders/1234" -// - "organizations/", e.g. -// "organizations/1234" -// The `supports_under` field of the associated `Constraint` defines -// whether -// ancestry prefixes can be used. You can set `allowed_values` -// and -// `denied_values` in the same `Policy` if `all_values` -// is -// `ALL_VALUES_UNSPECIFIED`. `ALLOW` or `DENY` are used to allow or deny -// all -// values. If `all_values` is set to either `ALLOW` or -// `DENY`, -// `allowed_values` and `denied_values` must be unset. +// behaves at this resource. `ListPolicy` can define specific values and +// subtrees of Cloud Resource Manager resource hierarchy +// (`Organizations`, `Folders`, `Projects`) that are allowed or denied +// by setting the `allowed_values` and `denied_values` fields. This is +// achieved by using the `under:` and optional `is:` prefixes. The +// `under:` prefix is used to denote resource subtree values. The `is:` +// prefix is used to denote specific values, and is required only if the +// value contains a ":". Values prefixed with "is:" are treated the same +// as values with no prefix. Ancestry subtrees must be in one of the +// following formats: - "projects/", e.g. "projects/tokyo-rain-123" - +// "folders/", e.g. "folders/1234" - "organizations/", e.g. +// "organizations/1234" The `supports_under` field of the associated +// `Constraint` defines whether ancestry prefixes can be used. You can +// set `allowed_values` and `denied_values` in the same `Policy` if +// `all_values` is `ALL_VALUES_UNSPECIFIED`. `ALLOW` or `DENY` are used +// to allow or deny all values. If `all_values` is set to either `ALLOW` +// or `DENY`, `allowed_values` and `denied_values` must be unset. type ListPolicy struct { // AllValues: The policy all_values state. // @@ -1459,144 +1217,86 @@ type ListPolicy struct { // "DENY" - A policy with this set denies all values. AllValues string `json:"allValues,omitempty"` - // AllowedValues: List of values allowed at this resource. Can only be - // set if `all_values` - // is set to `ALL_VALUES_UNSPECIFIED`. + // AllowedValues: List of values allowed at this resource. Can only be + // set if `all_values` is set to `ALL_VALUES_UNSPECIFIED`. AllowedValues []string `json:"allowedValues,omitempty"` // DeniedValues: List of values denied at this resource. Can only be set - // if `all_values` - // is set to `ALL_VALUES_UNSPECIFIED`. + // if `all_values` is set to `ALL_VALUES_UNSPECIFIED`. DeniedValues []string `json:"deniedValues,omitempty"` // InheritFromParent: Determines the inheritance behavior for this - // `Policy`. - // - // By default, a `ListPolicy` set at a resource supercedes any `Policy` - // set - // anywhere up the resource hierarchy. However, if `inherit_from_parent` - // is - // set to `true`, then the values from the effective `Policy` of the - // parent - // resource are inherited, meaning the values set in this `Policy` - // are - // added to the values inherited up the hierarchy. - // - // Setting `Policy` hierarchies that inherit both allowed values and - // denied - // values isn't recommended in most circumstances to keep the - // configuration - // simple and understandable. However, it is possible to set a `Policy` - // with - // `allowed_values` set that inherits a `Policy` with `denied_values` - // set. - // In this case, the values that are allowed must be in `allowed_values` - // and - // not present in `denied_values`. - // - // For example, suppose you have a - // `Constraint` - // `constraints/serviceuser.services`, which has a `constraint_type` - // of - // `list_constraint`, and with `constraint_default` set to - // `ALLOW`. - // Suppose that at the Organization level, a `Policy` is applied - // that - // restricts the allowed API activations to {`E1`, `E2`}. Then, if - // a - // `Policy` is applied to a project below the Organization that - // has + // `Policy`. By default, a `ListPolicy` set at a resource supersedes any + // `Policy` set anywhere up the resource hierarchy. However, if + // `inherit_from_parent` is set to `true`, then the values from the + // effective `Policy` of the parent resource are inherited, meaning the + // values set in this `Policy` are added to the values inherited up the + // hierarchy. Setting `Policy` hierarchies that inherit both allowed + // values and denied values isn't recommended in most circumstances to + // keep the configuration simple and understandable. However, it is + // possible to set a `Policy` with `allowed_values` set that inherits a + // `Policy` with `denied_values` set. In this case, the values that are + // allowed must be in `allowed_values` and not present in + // `denied_values`. For example, suppose you have a `Constraint` + // `constraints/serviceuser.services`, which has a `constraint_type` of + // `list_constraint`, and with `constraint_default` set to `ALLOW`. + // Suppose that at the Organization level, a `Policy` is applied that + // restricts the allowed API activations to {`E1`, `E2`}. Then, if a + // `Policy` is applied to a project below the Organization that has // `inherit_from_parent` set to `false` and field all_values set to - // DENY, - // then an attempt to activate any API will be denied. - // - // The following examples demonstrate different possible layerings - // for - // `projects/bar` parented by `organizations/foo`: - // - // Example 1 (no inherited values): - // `organizations/foo` has a `Policy` with values: - // {allowed_values: "E1" allowed_values:"E2"} - // `projects/bar` has `inherit_from_parent` `false` and values: - // {allowed_values: "E3" allowed_values: "E4"} - // The accepted values at `organizations/foo` are `E1`, `E2`. - // The accepted values at `projects/bar` are `E3`, and `E4`. - // - // Example 2 (inherited values): - // `organizations/foo` has a `Policy` with values: - // {allowed_values: "E1" allowed_values:"E2"} - // `projects/bar` has a `Policy` with values: - // {value: "E3" value: "E4" inherit_from_parent: true} - // The accepted values at `organizations/foo` are `E1`, `E2`. - // The accepted values at `projects/bar` are `E1`, `E2`, `E3`, and - // `E4`. - // + // DENY, then an attempt to activate any API will be denied. The + // following examples demonstrate different possible layerings for + // `projects/bar` parented by `organizations/foo`: Example 1 (no + // inherited values): `organizations/foo` has a `Policy` with values: + // {allowed_values: "E1" allowed_values:"E2"} `projects/bar` has + // `inherit_from_parent` `false` and values: {allowed_values: "E3" + // allowed_values: "E4"} The accepted values at `organizations/foo` are + // `E1`, `E2`. The accepted values at `projects/bar` are `E3`, and `E4`. + // Example 2 (inherited values): `organizations/foo` has a `Policy` with + // values: {allowed_values: "E1" allowed_values:"E2"} `projects/bar` has + // a `Policy` with values: {value: "E3" value: "E4" inherit_from_parent: + // true} The accepted values at `organizations/foo` are `E1`, `E2`. The + // accepted values at `projects/bar` are `E1`, `E2`, `E3`, and `E4`. // Example 3 (inheriting both allowed and denied values): - // `organizations/foo` has a `Policy` with values: - // {allowed_values: "E1" allowed_values: "E2"} - // `projects/bar` has a `Policy` with: - // {denied_values: "E1"} - // The accepted values at `organizations/foo` are `E1`, `E2`. - // The value accepted at `projects/bar` is `E2`. - // - // Example 4 (RestoreDefault): - // `organizations/foo` has a `Policy` with values: - // {allowed_values: "E1" allowed_values:"E2"} - // `projects/bar` has a `Policy` with values: - // {RestoreDefault: {}} - // The accepted values at `organizations/foo` are `E1`, `E2`. - // The accepted values at `projects/bar` are either all or none - // depending on - // the value of `constraint_default` (if `ALLOW`, all; if - // `DENY`, none). - // - // Example 5 (no policy inherits parent policy): - // `organizations/foo` has no `Policy` set. - // `projects/bar` has no `Policy` set. - // The accepted values at both levels are either all or none depending - // on - // the value of `constraint_default` (if `ALLOW`, all; if - // `DENY`, none). - // - // Example 6 (ListConstraint allowing all): - // `organizations/foo` has a `Policy` with values: - // {allowed_values: "E1" allowed_values: "E2"} - // `projects/bar` has a `Policy` with: - // {all: ALLOW} - // The accepted values at `organizations/foo` are `E1`, E2`. - // Any value is accepted at `projects/bar`. - // - // Example 7 (ListConstraint allowing none): - // `organizations/foo` has a `Policy` with values: - // {allowed_values: "E1" allowed_values: "E2"} - // `projects/bar` has a `Policy` with: - // {all: DENY} - // The accepted values at `organizations/foo` are `E1`, E2`. - // No value is accepted at `projects/bar`. - // - // Example 10 (allowed and denied subtrees of Resource Manager - // hierarchy): - // Given the following resource hierarchy - // O1->{F1, F2}; F1->{P1}; F2->{P2, P3}, - // `organizations/foo` has a `Policy` with values: - // {allowed_values: "under:organizations/O1"} - // `projects/bar` has a `Policy` with: - // {allowed_values: "under:projects/P3"} - // {denied_values: "under:folders/F2"} - // The accepted values at `organizations/foo` are `organizations/O1`, - // `folders/F1`, `folders/F2`, `projects/P1`, `projects/P2`, - // `projects/P3`. - // The accepted values at `projects/bar` are `organizations/O1`, - // `folders/F1`, `projects/P1`. + // `organizations/foo` has a `Policy` with values: {allowed_values: "E1" + // allowed_values: "E2"} `projects/bar` has a `Policy` with: + // {denied_values: "E1"} The accepted values at `organizations/foo` are + // `E1`, `E2`. The value accepted at `projects/bar` is `E2`. Example 4 + // (RestoreDefault): `organizations/foo` has a `Policy` with values: + // {allowed_values: "E1" allowed_values:"E2"} `projects/bar` has a + // `Policy` with values: {RestoreDefault: {}} The accepted values at + // `organizations/foo` are `E1`, `E2`. The accepted values at + // `projects/bar` are either all or none depending on the value of + // `constraint_default` (if `ALLOW`, all; if `DENY`, none). Example 5 + // (no policy inherits parent policy): `organizations/foo` has no + // `Policy` set. `projects/bar` has no `Policy` set. The accepted values + // at both levels are either all or none depending on the value of + // `constraint_default` (if `ALLOW`, all; if `DENY`, none). Example 6 + // (ListConstraint allowing all): `organizations/foo` has a `Policy` + // with values: {allowed_values: "E1" allowed_values: "E2"} + // `projects/bar` has a `Policy` with: {all: ALLOW} The accepted values + // at `organizations/foo` are `E1`, E2`. Any value is accepted at + // `projects/bar`. Example 7 (ListConstraint allowing none): + // `organizations/foo` has a `Policy` with values: {allowed_values: "E1" + // allowed_values: "E2"} `projects/bar` has a `Policy` with: {all: DENY} + // The accepted values at `organizations/foo` are `E1`, E2`. No value is + // accepted at `projects/bar`. Example 10 (allowed and denied subtrees + // of Resource Manager hierarchy): Given the following resource + // hierarchy O1->{F1, F2}; F1->{P1}; F2->{P2, P3}, `organizations/foo` + // has a `Policy` with values: {allowed_values: + // "under:organizations/O1"} `projects/bar` has a `Policy` with: + // {allowed_values: "under:projects/P3"} {denied_values: + // "under:folders/F2"} The accepted values at `organizations/foo` are + // `organizations/O1`, `folders/F1`, `folders/F2`, `projects/P1`, + // `projects/P2`, `projects/P3`. The accepted values at `projects/bar` + // are `organizations/O1`, `folders/F1`, `projects/P1`. InheritFromParent bool `json:"inheritFromParent,omitempty"` // SuggestedValue: Optional. The Google Cloud Console will try to - // default to a configuration - // that matches the value specified in this `Policy`. If - // `suggested_value` - // is not set, it will inherit the value specified higher in the - // hierarchy, - // unless `inherit_from_parent` is `false`. + // default to a configuration that matches the value specified in this + // `Policy`. If `suggested_value` is not set, it will inherit the value + // specified higher in the hierarchy, unless `inherit_from_parent` is + // `false`. SuggestedValue string `json:"suggestedValue,omitempty"` // ForceSendFields is a list of field names (e.g. "AllValues") to @@ -1622,37 +1322,22 @@ func (s *ListPolicy) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ListProjectsResponse: A page of the response received from -// the -// ListProjects -// method. -// -// A paginated response where more pages are available -// has -// `next_page_token` set. This token can be used in a subsequent request -// to -// retrieve the next request page. +// ListProjectsResponse: A page of the response received from the +// ListProjects method. A paginated response where more pages are +// available has `next_page_token` set. This token can be used in a +// subsequent request to retrieve the next request page. type ListProjectsResponse struct { - // NextPageToken: Pagination token. - // - // If the result set is too large to fit in a single response, this - // token - // is returned. It encodes the position of the current result - // cursor. - // Feeding this value into a new list request with the `page_token` - // parameter - // gives the next page of the results. - // - // When `next_page_token` is not filled in, there is no next page - // and - // the list returned is the last page in the result set. - // + // NextPageToken: Pagination token. If the result set is too large to + // fit in a single response, this token is returned. It encodes the + // position of the current result cursor. Feeding this value into a new + // list request with the `page_token` parameter gives the next page of + // the results. When `next_page_token` is not filled in, there is no + // next page and the list returned is the last page in the result set. // Pagination tokens have a limited lifetime. NextPageToken string `json:"nextPageToken,omitempty"` // Projects: The list of Projects that matched the list filter. This - // list can - // be paginated. + // list can be paginated. Projects []*Project `json:"projects,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1683,52 +1368,38 @@ func (s *ListProjectsResponse) MarshalJSON() ([]byte, error) { } // Operation: This resource represents a long-running operation that is -// the result of a -// network API call. +// the result of a network API call. type Operation struct { // Done: If the value is `false`, it means the operation is still in - // progress. - // If `true`, the operation is completed, and either `error` or - // `response` is - // available. + // progress. If `true`, the operation is completed, and either `error` + // or `response` is available. Done bool `json:"done,omitempty"` // Error: The error result of the operation in case of failure or // cancellation. Error *Status `json:"error,omitempty"` - // Metadata: Service-specific metadata associated with the operation. - // It typically - // contains progress information and common metadata such as create - // time. - // Some services might not provide such metadata. Any method that - // returns a - // long-running operation should document the metadata type, if any. + // Metadata: Service-specific metadata associated with the operation. It + // typically contains progress information and common metadata such as + // create time. Some services might not provide such metadata. Any + // method that returns a long-running operation should document the + // metadata type, if any. Metadata googleapi.RawMessage `json:"metadata,omitempty"` // Name: The server-assigned name, which is only unique within the same - // service that - // originally returns it. If you use the default HTTP mapping, - // the - // `name` should be a resource name ending with + // service that originally returns it. If you use the default HTTP + // mapping, the `name` should be a resource name ending with // `operations/{unique_id}`. Name string `json:"name,omitempty"` - // Response: The normal response of the operation in case of success. - // If the original - // method returns no data on success, such as `Delete`, the response - // is - // `google.protobuf.Empty`. If the original method is - // standard - // `Get`/`Create`/`Update`, the response should be the resource. For - // other - // methods, the response should have the type `XxxResponse`, where - // `Xxx` - // is the original method name. For example, if the original method - // name - // is `TakeSnapshot()`, the inferred response type - // is - // `TakeSnapshotResponse`. + // Response: The normal response of the operation in case of success. If + // the original method returns no data on success, such as `Delete`, the + // response is `google.protobuf.Empty`. If the original method is + // standard `Get`/`Create`/`Update`, the response should be the + // resource. For other methods, the response should have the type + // `XxxResponse`, where `Xxx` is the original method name. For example, + // if the original method name is `TakeSnapshot()`, the inferred + // response type is `TakeSnapshotResponse`. Response googleapi.RawMessage `json:"response,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1759,60 +1430,42 @@ func (s *Operation) MarshalJSON() ([]byte, error) { } // OrgPolicy: Defines a Cloud Organization `Policy` which is used to -// specify `Constraints` -// for configurations of Cloud Platform resources. +// specify `Constraints` for configurations of Cloud Platform resources. type OrgPolicy struct { // BooleanPolicy: For boolean `Constraints`, whether to enforce the // `Constraint` or not. BooleanPolicy *BooleanPolicy `json:"booleanPolicy,omitempty"` // Constraint: The name of the `Constraint` the `Policy` is configuring, - // for example, - // `constraints/serviceuser.services`. - // - // Immutable after creation. + // for example, `constraints/serviceuser.services`. A [list of available + // constraints](/resource-manager/docs/organization-policy/org-policy-con + // straints) is available. Immutable after creation. Constraint string `json:"constraint,omitempty"` // Etag: An opaque tag indicating the current version of the `Policy`, - // used for - // concurrency control. - // - // When the `Policy` is returned from either a `GetPolicy` or - // a - // `ListOrgPolicy` request, this `etag` indicates the version of the - // current - // `Policy` to use when executing a read-modify-write loop. - // - // When the `Policy` is returned from a `GetEffectivePolicy` request, - // the - // `etag` will be unset. - // - // When the `Policy` is used in a `SetOrgPolicy` method, use the `etag` - // value - // that was returned from a `GetOrgPolicy` request as part of - // a + // used for concurrency control. When the `Policy` is returned from + // either a `GetPolicy` or a `ListOrgPolicy` request, this `etag` + // indicates the version of the current `Policy` to use when executing a + // read-modify-write loop. When the `Policy` is returned from a + // `GetEffectivePolicy` request, the `etag` will be unset. When the + // `Policy` is used in a `SetOrgPolicy` method, use the `etag` value + // that was returned from a `GetOrgPolicy` request as part of a // read-modify-write loop for concurrency control. Not setting the - // `etag`in a - // `SetOrgPolicy` request will result in an unconditional write of - // the - // `Policy`. + // `etag`in a `SetOrgPolicy` request will result in an unconditional + // write of the `Policy`. Etag string `json:"etag,omitempty"` // ListPolicy: List of values either allowed or disallowed. ListPolicy *ListPolicy `json:"listPolicy,omitempty"` // RestoreDefault: Restores the default behavior of the constraint; - // independent of - // `Constraint` type. + // independent of `Constraint` type. RestoreDefault *RestoreDefault `json:"restoreDefault,omitempty"` // UpdateTime: The time stamp the `Policy` was previously updated. This - // is set by the - // server, not specified by the caller, and represents the last time a - // call to - // `SetOrgPolicy` was made for that `Policy`. Any value set by the - // client will - // be ignored. + // is set by the server, not specified by the caller, and represents the + // last time a call to `SetOrgPolicy` was made for that `Policy`. Any + // value set by the client will be ignored. UpdateTime string `json:"updateTime,omitempty"` // Version: Version of the `Policy`. Default version is 0; @@ -1846,19 +1499,15 @@ func (s *OrgPolicy) MarshalJSON() ([]byte, error) { } // Organization: The root node in the resource hierarchy to which a -// particular entity's -// (e.g., company) resources belong. +// particular entity's (e.g., company) resources belong. type Organization struct { // CreationTime: Timestamp when the Organization was created. Assigned // by the server. CreationTime string `json:"creationTime,omitempty"` // DisplayName: A human-readable string that refers to the Organization - // in the - // GCP Console UI. This string is set by the server and cannot - // be - // changed. The string will be set to the primary domain (for - // example, + // in the GCP Console UI. This string is set by the server and cannot be + // changed. The string will be set to the primary domain (for example, // "google.com") of the G Suite customer that owns the organization. DisplayName string `json:"displayName,omitempty"` @@ -1866,24 +1515,20 @@ type Organization struct { // by the server. // // Possible values: - // "LIFECYCLE_STATE_UNSPECIFIED" - Unspecified state. This is only + // "LIFECYCLE_STATE_UNSPECIFIED" - Unspecified state. This is only // useful for distinguishing unset values. // "ACTIVE" - The normal and active state. // "DELETE_REQUESTED" - The organization has been marked for deletion // by the user. LifecycleState string `json:"lifecycleState,omitempty"` - // Name: Output only. The resource name of the organization. This is - // the - // organization's relative path in the API. Its format - // is + // Name: Output only. The resource name of the organization. This is the + // organization's relative path in the API. Its format is // "organizations/[organization_id]". For example, "organizations/1234". Name string `json:"name,omitempty"` // Owner: The owner of this Organization. The owner should be specified - // on - // creation. Once set, it cannot be changed. - // This field is required. + // on creation. Once set, it cannot be changed. This field is required. Owner *OrganizationOwner `json:"owner,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1914,12 +1559,9 @@ func (s *Organization) MarshalJSON() ([]byte, error) { } // OrganizationOwner: The entity that owns an Organization. The lifetime -// of the Organization and -// all of its descendants are bound to the `OrganizationOwner`. If -// the -// `OrganizationOwner` is deleted, the Organization and all its -// descendants will -// be deleted. +// of the Organization and all of its descendants are bound to the +// `OrganizationOwner`. If the `OrganizationOwner` is deleted, the +// Organization and all its descendants will be deleted. type OrganizationOwner struct { // DirectoryCustomerId: The G Suite customer id used in the Directory // API. @@ -1950,154 +1592,77 @@ func (s *OrganizationOwner) MarshalJSON() ([]byte, error) { } // Policy: An Identity and Access Management (IAM) policy, which -// specifies access -// controls for Google Cloud resources. -// -// -// A `Policy` is a collection of `bindings`. A `binding` binds one or -// more -// `members` to a single `role`. Members can be user accounts, service -// accounts, +// specifies access controls for Google Cloud resources. A `Policy` is a +// collection of `bindings`. A `binding` binds one or more `members` to +// a single `role`. Members can be user accounts, service accounts, // Google groups, and domains (such as G Suite). A `role` is a named -// list of -// permissions; each `role` can be an IAM predefined role or a -// user-created -// custom role. -// -// For some types of Google Cloud resources, a `binding` can also -// specify a -// `condition`, which is a logical expression that allows access to a -// resource -// only if the expression evaluates to `true`. A condition can add -// constraints -// based on attributes of the request, the resource, or both. To learn -// which -// resources support conditions in their IAM policies, see the -// [IAM +// list of permissions; each `role` can be an IAM predefined role or a +// user-created custom role. For some types of Google Cloud resources, a +// `binding` can also specify a `condition`, which is a logical +// expression that allows access to a resource only if the expression +// evaluates to `true`. A condition can add constraints based on +// attributes of the request, the resource, or both. To learn which +// resources support conditions in their IAM policies, see the [IAM // documentation](https://cloud.google.com/iam/help/conditions/resource-p -// olicies). -// -// **JSON example:** -// -// { -// "bindings": [ -// { -// "role": "roles/resourcemanager.organizationAdmin", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" -// ] -// }, -// { -// "role": "roles/resourcemanager.organizationViewer", -// "members": [ -// "user:eve@example.com" -// ], -// "condition": { -// "title": "expirable access", -// "description": "Does not grant access after Sep 2020", -// "expression": "request.time < -// timestamp('2020-10-01T00:00:00.000Z')", -// } -// } -// ], -// "etag": "BwWWja0YfJA=", -// "version": 3 -// } -// -// **YAML example:** -// -// bindings: -// - members: -// - user:mike@example.com -// - group:admins@example.com -// - domain:google.com -// - serviceAccount:my-project-id@appspot.gserviceaccount.com -// role: roles/resourcemanager.organizationAdmin -// - members: -// - user:eve@example.com -// role: roles/resourcemanager.organizationViewer -// condition: -// title: expirable access -// description: Does not grant access after Sep 2020 -// expression: request.time < -// timestamp('2020-10-01T00:00:00.000Z') -// - etag: BwWWja0YfJA= -// - version: 3 -// -// For a description of IAM and its features, see the -// [IAM documentation](https://cloud.google.com/iam/docs/). +// olicies). **JSON example:** { "bindings": [ { "role": +// "roles/resourcemanager.organizationAdmin", "members": [ +// "user:mike@example.com", "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { +// "role": "roles/resourcemanager.organizationViewer", "members": [ +// "user:eve@example.com" ], "condition": { "title": "expirable access", +// "description": "Does not grant access after Sep 2020", "expression": +// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], +// "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - +// members: - user:mike@example.com - group:admins@example.com - +// domain:google.com - +// serviceAccount:my-project-id@appspot.gserviceaccount.com role: +// roles/resourcemanager.organizationAdmin - members: - +// user:eve@example.com role: roles/resourcemanager.organizationViewer +// condition: title: expirable access description: Does not grant access +// after Sep 2020 expression: request.time < +// timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: +// 3 For a description of IAM and its features, see the [IAM +// documentation](https://cloud.google.com/iam/docs/). type Policy struct { // AuditConfigs: Specifies cloud audit logging configuration for this // policy. AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` // Bindings: Associates a list of `members` to a `role`. Optionally, may - // specify a - // `condition` that determines how and when the `bindings` are applied. - // Each - // of the `bindings` must contain at least one member. + // specify a `condition` that determines how and when the `bindings` are + // applied. Each of the `bindings` must contain at least one member. Bindings []*Binding `json:"bindings,omitempty"` // Etag: `etag` is used for optimistic concurrency control as a way to - // help - // prevent simultaneous updates of a policy from overwriting each - // other. - // It is strongly suggested that systems make use of the `etag` in - // the - // read-modify-write cycle to perform policy updates in order to avoid - // race - // conditions: An `etag` is returned in the response to `getIamPolicy`, - // and - // systems are expected to put that etag in the request to - // `setIamPolicy` to - // ensure that their change will be applied to the same version of the - // policy. - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of + // help prevent simultaneous updates of a policy from overwriting each + // other. It is strongly suggested that systems make use of the `etag` + // in the read-modify-write cycle to perform policy updates in order to + // avoid race conditions: An `etag` is returned in the response to + // `getIamPolicy`, and systems are expected to put that etag in the + // request to `setIamPolicy` to ensure that their change will be applied + // to the same version of the policy. **Important:** If you use IAM + // Conditions, you must include the `etag` field whenever you call + // `setIamPolicy`. If you omit this field, then IAM allows you to + // overwrite a version `3` policy with a version `1` policy, and all of // the conditions in the version `3` policy are lost. Etag string `json:"etag,omitempty"` - // Version: Specifies the format of the policy. - // - // Valid values are `0`, `1`, and `3`. Requests that specify an invalid - // value - // are rejected. - // + // Version: Specifies the format of the policy. Valid values are `0`, + // `1`, and `3`. Requests that specify an invalid value are rejected. // Any operation that affects conditional role bindings must specify - // version - // `3`. This requirement applies to the following operations: - // - // * Getting a policy that includes a conditional role binding - // * Adding a conditional role binding to a policy - // * Changing a conditional role binding in a policy - // * Removing any role binding, with or without a condition, from a - // policy - // that includes conditions - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of - // the conditions in the version `3` policy are lost. - // - // If a policy does not include any conditions, operations on that - // policy may - // specify any valid version or leave the field unset. - // - // To learn which resources support conditions in their IAM policies, - // see the - // [IAM + // version `3`. This requirement applies to the following operations: * + // Getting a policy that includes a conditional role binding * Adding a + // conditional role binding to a policy * Changing a conditional role + // binding in a policy * Removing any role binding, with or without a + // condition, from a policy that includes conditions **Important:** If + // you use IAM Conditions, you must include the `etag` field whenever + // you call `setIamPolicy`. If you omit this field, then IAM allows you + // to overwrite a version `3` policy with a version `1` policy, and all + // of the conditions in the version `3` policy are lost. If a policy + // does not include any conditions, operations on that policy may + // specify any valid version or leave the field unset. To learn which + // resources support conditions in their IAM policies, see the [IAM // documentation](https://cloud.google.com/iam/help/conditions/resource-p // olicies). Version int64 `json:"version,omitempty"` @@ -2129,97 +1694,60 @@ func (s *Policy) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Project: A Project is a high-level Google Cloud Platform entity. It -// is a -// container for ACLs, APIs, App Engine Apps, VMs, and other -// Google Cloud Platform resources. +// Project: A Project is a high-level Google Cloud Platform entity. It +// is a container for ACLs, APIs, App Engine Apps, VMs, and other Google +// Cloud Platform resources. type Project struct { - // CreateTime: Creation time. - // - // Read-only. + // CreateTime: Creation time. Read-only. CreateTime string `json:"createTime,omitempty"` - // Labels: The labels associated with this Project. - // - // Label keys must be between 1 and 63 characters long and must - // conform - // to the following regular expression: - // \[a-z\](\[-a-z0-9\]*\[a-z0-9\])?. - // - // Label values must be between 0 and 63 characters long and must - // conform - // to the regular expression (\[a-z\](\[-a-z0-9\]*\[a-z0-9\])?)?. A - // label - // value can be empty. - // - // No more than 256 labels can be associated with a given - // resource. - // - // Clients should store labels in a representation such as JSON that - // does not - // depend on specific characters being disallowed. - // - // Example: "environment" : "dev" + // Labels: The labels associated with this Project. Label keys must be + // between 1 and 63 characters long and must conform to the following + // regular expression: a-z{0,62}. Label values must be between 0 and 63 + // characters long and must conform to the regular expression + // [a-z0-9_-]{0,63}. A label value can be empty. No more than 256 labels + // can be associated with a given resource. Clients should store labels + // in a representation such as JSON that does not depend on specific + // characters being disallowed. Example: "environment" : "dev" // Read-write. Labels map[string]string `json:"labels,omitempty"` - // LifecycleState: The Project lifecycle state. - // - // Read-only. + // LifecycleState: The Project lifecycle state. Read-only. // // Possible values: - // "LIFECYCLE_STATE_UNSPECIFIED" - Unspecified state. This is only - // used/useful for distinguishing - // unset values. + // "LIFECYCLE_STATE_UNSPECIFIED" - Unspecified state. This is only + // used/useful for distinguishing unset values. // "ACTIVE" - The normal and active state. // "DELETE_REQUESTED" - The project has been marked for deletion by - // the user - // (by invoking - // DeleteProject) - // or by the system (Google Cloud Platform). - // This can generally be reversed by invoking UndeleteProject. + // the user (by invoking DeleteProject) or by the system (Google Cloud + // Platform). This can generally be reversed by invoking + // UndeleteProject. // "DELETE_IN_PROGRESS" - This lifecycle state is no longer used and // not returned by the API. LifecycleState string `json:"lifecycleState,omitempty"` - // Name: The optional user-assigned display name of the Project. - // When present it must be between 4 to 30 characters. - // Allowed characters are: lowercase and uppercase letters, - // numbers, - // hyphen, single-quote, double-quote, space, and exclamation - // point. - // - // Example: My Project + // Name: The optional user-assigned display name of the Project. When + // present it must be between 4 to 30 characters. Allowed characters + // are: lowercase and uppercase letters, numbers, hyphen, single-quote, + // double-quote, space, and exclamation point. Example: `My Project` // Read-write. Name string `json:"name,omitempty"` - // Parent: An optional reference to a parent Resource. - // - // Supported parent types include "organization" and "folder". Once set, - // the - // parent cannot be cleared. The `parent` can be set on creation or - // using the - // `UpdateProject` method; the end user must have - // the - // `resourcemanager.projects.create` permission on the - // parent. - // - // Read-write. + // Parent: An optional reference to a parent Resource. Supported parent + // types include "organization" and "folder". Once set, the parent + // cannot be cleared. The `parent` can be set on creation or using the + // `UpdateProject` method; the end user must have the + // `resourcemanager.projects.create` permission on the parent. Parent *ResourceId `json:"parent,omitempty"` - // ProjectId: The unique, user-assigned ID of the Project. - // It must be 6 to 30 lowercase letters, digits, or hyphens. - // It must start with a letter. - // Trailing hyphens are prohibited. - // - // Example: tokyo-rain-123 + // ProjectId: The unique, user-assigned ID of the Project. It must be 6 + // to 30 lowercase letters, digits, or hyphens. It must start with a + // letter. Trailing hyphens are prohibited. Example: `tokyo-rain-123` // Read-only after creation. ProjectId string `json:"projectId,omitempty"` - // ProjectNumber: The number uniquely identifying the project. - // - // Example: 415104041262 - // Read-only. + // ProjectNumber: The number uniquely identifying the project. Example: + // `415104041262` Read-only. ProjectNumber int64 `json:"projectNumber,omitempty,string"` // ServerResponse contains the HTTP response code and headers from the @@ -2250,19 +1778,16 @@ func (s *Project) MarshalJSON() ([]byte, error) { } // ProjectCreationStatus: A status object which is used as the -// `metadata` field for the Operation -// returned by CreateProject. It provides insight for when significant -// phases of -// Project creation have completed. +// `metadata` field for the Operation returned by CreateProject. It +// provides insight for when significant phases of Project creation have +// completed. type ProjectCreationStatus struct { // CreateTime: Creation time of the project creation workflow. CreateTime string `json:"createTime,omitempty"` // Gettable: True if the project can be retrieved using GetProject. No - // other operations - // on the project are guaranteed to work until the project creation - // is - // complete. + // other operations on the project are guaranteed to work until the + // project creation is complete. Gettable bool `json:"gettable,omitempty"` // Ready: True if the project creation process is complete. @@ -2292,22 +1817,17 @@ func (s *ProjectCreationStatus) MarshalJSON() ([]byte, error) { } // ResourceId: A container to reference an id for any resource type. A -// `resource` in Google -// Cloud Platform is a generic term for something you (a developer) may -// want to -// interact with through one of our API's. Some examples are an App -// Engine app, -// a Compute Engine instance, a Cloud SQL database, and so on. +// `resource` in Google Cloud Platform is a generic term for something +// you (a developer) may want to interact with through one of our API's. +// Some examples are an App Engine app, a Compute Engine instance, a +// Cloud SQL database, and so on. type ResourceId struct { - // Id: Required field for the type-specific id. This should correspond - // to the id - // used in the type-specific API's. + // Id: The type-specific id. This should correspond to the id used in + // the type-specific API's. Id string `json:"id,omitempty"` - // Type: Required field representing the resource type this id is - // for. - // At present, the valid types are: "organization", "folder", and - // "project". + // Type: The resource type this id is for. At present, the valid types + // are: "organization", "folder", and "project". Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "Id") to @@ -2334,26 +1854,16 @@ func (s *ResourceId) MarshalJSON() ([]byte, error) { } // RestoreDefault: Ignores policies set above this resource and restores -// the -// `constraint_default` enforcement behavior of the specific -// `Constraint` at -// this resource. -// -// Suppose that `constraint_default` is set to `ALLOW` for -// the -// `Constraint` `constraints/serviceuser.services`. Suppose that -// organization -// foo.com sets a `Policy` at their Organization resource node that -// restricts -// the allowed service activations to deny all service activations. -// They -// could then set a `Policy` with the `policy_type` `restore_default` -// on -// several experimental projects, restoring the -// `constraint_default` +// the `constraint_default` enforcement behavior of the specific +// `Constraint` at this resource. Suppose that `constraint_default` is +// set to `ALLOW` for the `Constraint` +// `constraints/serviceuser.services`. Suppose that organization foo.com +// sets a `Policy` at their Organization resource node that restricts +// the allowed service activations to deny all service activations. They +// could then set a `Policy` with the `policy_type` `restore_default` on +// several experimental projects, restoring the `constraint_default` // enforcement of the `Constraint` for only those projects, allowing -// those -// projects to have all services activated. +// those projects to have all services activated. type RestoreDefault struct { } @@ -2361,33 +1871,22 @@ type RestoreDefault struct { // `SearchOrganizations` method. type SearchOrganizationsRequest struct { // Filter: An optional query string used to filter the Organizations to - // return in - // the response. Filter rules are case-insensitive. - // - // - // Organizations may be filtered by `owner.directoryCustomerId` or - // by - // `domain`, where the domain is a G Suite domain, for example: - // - // * Filter `owner.directorycustomerid:123456789` returns - // Organization - // resources with `owner.directory_customer_id` equal to `123456789`. - // * Filter `domain:google.com` returns Organization resources - // corresponding - // to the domain `google.com`. - // - // This field is optional. + // return in the response. Filter rules are case-insensitive. + // Organizations may be filtered by `owner.directoryCustomerId` or by + // `domain`, where the domain is a G Suite domain, for example: * Filter + // `owner.directorycustomerid:123456789` returns Organization resources + // with `owner.directory_customer_id` equal to `123456789`. * Filter + // `domain:google.com` returns Organization resources corresponding to + // the domain `google.com`. This field is optional. Filter string `json:"filter,omitempty"` // PageSize: The maximum number of Organizations to return in the - // response. - // This field is optional. + // response. This field is optional. PageSize int64 `json:"pageSize,omitempty"` // PageToken: A pagination token returned from a previous call to - // `SearchOrganizations` - // that indicates from where listing should continue. - // This field is optional. + // `SearchOrganizations` that indicates from where listing should + // continue. This field is optional. PageToken string `json:"pageToken,omitempty"` // ForceSendFields is a list of field names (e.g. "Filter") to @@ -2417,19 +1916,15 @@ func (s *SearchOrganizationsRequest) MarshalJSON() ([]byte, error) { // `SearchOrganizations` method. type SearchOrganizationsResponse struct { // NextPageToken: A pagination token to be used to retrieve the next - // page of results. If the - // result is too large to fit within the page size specified in the - // request, - // this field will be set with a token that can be used to fetch the - // next page - // of results. If this field is empty, it indicates that this - // response - // contains the last page of results. + // page of results. If the result is too large to fit within the page + // size specified in the request, this field will be set with a token + // that can be used to fetch the next page of results. If this field is + // empty, it indicates that this response contains the last page of + // results. NextPageToken string `json:"nextPageToken,omitempty"` // Organizations: The list of Organizations that matched the search - // query, possibly - // paginated. + // query, possibly paginated. Organizations []*Organization `json:"organizations,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2462,20 +1957,15 @@ func (s *SearchOrganizationsResponse) MarshalJSON() ([]byte, error) { // SetIamPolicyRequest: Request message for `SetIamPolicy` method. type SetIamPolicyRequest struct { // Policy: REQUIRED: The complete policy to be applied to the - // `resource`. The size of - // the policy is limited to a few 10s of KB. An empty policy is a - // valid policy but certain Cloud Platform services (such as - // Projects) - // might reject them. + // `resource`. The size of the policy is limited to a few 10s of KB. An + // empty policy is a valid policy but certain Cloud Platform services + // (such as Projects) might reject them. Policy *Policy `json:"policy,omitempty"` // UpdateMask: OPTIONAL: A FieldMask specifying which fields of the - // policy to modify. Only - // the fields in the mask will be modified. If no mask is provided, - // the - // following default mask is used: - // - // `paths: "bindings, etag" + // policy to modify. Only the fields in the mask will be modified. If no + // mask is provided, the following default mask is used: `paths: + // "bindings, etag" UpdateMask string `json:"updateMask,omitempty"` // ForceSendFields is a list of field names (e.g. "Policy") to @@ -2531,32 +2021,24 @@ func (s *SetOrgPolicyRequest) MarshalJSON() ([]byte, error) { } // Status: The `Status` type defines a logical error model that is -// suitable for -// different programming environments, including REST APIs and RPC APIs. -// It is -// used by [gRPC](https://github.com/grpc). Each `Status` message -// contains -// three pieces of data: error code, error message, and error -// details. -// -// You can find out more about this error model and how to work with it -// in the -// [API Design Guide](https://cloud.google.com/apis/design/errors). +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each +// `Status` message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the [API Design +// Guide](https://cloud.google.com/apis/design/errors). type Status struct { // Code: The status code, which should be an enum value of // google.rpc.Code. Code int64 `json:"code,omitempty"` - // Details: A list of messages that carry the error details. There is a - // common set of - // message types for APIs to use. + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. Details []googleapi.RawMessage `json:"details,omitempty"` // Message: A developer-facing error message, which should be in - // English. Any - // user-facing error message should be localized and sent in - // the - // google.rpc.Status.details field, or localized by the client. + // English. Any user-facing error message should be localized and sent + // in the google.rpc.Status.details field, or localized by the client. Message string `json:"message,omitempty"` // ForceSendFields is a list of field names (e.g. "Code") to @@ -2586,11 +2068,8 @@ func (s *Status) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsRequest struct { // Permissions: The set of permissions to check for the `resource`. - // Permissions with - // wildcards (such as '*' or 'storage.*') are not allowed. For - // more - // information see - // [IAM + // Permissions with wildcards (such as '*' or 'storage.*') are not + // allowed. For more information see [IAM // Overview](https://cloud.google.com/iam/docs/overview#permissions). Permissions []string `json:"permissions,omitempty"` @@ -2621,8 +2100,7 @@ func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsResponse struct { // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is - // allowed. + // the caller is allowed. Permissions []string `json:"permissions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2652,8 +2130,7 @@ func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UndeleteProjectRequest: The request sent to the -// UndeleteProject +// UndeleteProjectRequest: The request sent to the UndeleteProject // method. type UndeleteProjectRequest struct { } @@ -2704,7 +2181,7 @@ func (c *FoldersClearOrgPolicyCall) Header() http.Header { func (c *FoldersClearOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2810,14 +2287,10 @@ type FoldersGetEffectiveOrgPolicyCall struct { } // GetEffectiveOrgPolicy: Gets the effective `Policy` on a resource. -// This is the result of merging -// `Policies` in the resource hierarchy. The returned `Policy` will not -// have -// an `etag`set because it is a computed `Policy` across multiple -// resources. -// Subtrees of Resource Manager resource hierarchy with 'under:' prefix -// will -// not be expanded. +// This is the result of merging `Policies` in the resource hierarchy. +// The returned `Policy` will not have an `etag`set because it is a +// computed `Policy` across multiple resources. Subtrees of Resource +// Manager resource hierarchy with 'under:' prefix will not be expanded. func (r *FoldersService) GetEffectiveOrgPolicy(resource string, geteffectiveorgpolicyrequest *GetEffectiveOrgPolicyRequest) *FoldersGetEffectiveOrgPolicyCall { c := &FoldersGetEffectiveOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -2852,7 +2325,7 @@ func (c *FoldersGetEffectiveOrgPolicyCall) Header() http.Header { func (c *FoldersGetEffectiveOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2916,7 +2389,7 @@ func (c *FoldersGetEffectiveOrgPolicyCall) Do(opts ...googleapi.CallOption) (*Or } return ret, nil // { - // "description": "Gets the effective `Policy` on a resource. This is the result of merging\n`Policies` in the resource hierarchy. The returned `Policy` will not have\nan `etag`set because it is a computed `Policy` across multiple resources.\nSubtrees of Resource Manager resource hierarchy with 'under:' prefix will\nnot be expanded.", + // "description": "Gets the effective `Policy` on a resource. This is the result of merging `Policies` in the resource hierarchy. The returned `Policy` will not have an `etag`set because it is a computed `Policy` across multiple resources. Subtrees of Resource Manager resource hierarchy with 'under:' prefix will not be expanded.", // "flatPath": "v1/folders/{foldersId}:getEffectiveOrgPolicy", // "httpMethod": "POST", // "id": "cloudresourcemanager.folders.getEffectiveOrgPolicy", @@ -2958,15 +2431,11 @@ type FoldersGetOrgPolicyCall struct { header_ http.Header } -// GetOrgPolicy: Gets a `Policy` on a resource. -// -// If no `Policy` is set on the resource, a `Policy` is returned with -// default -// values including `POLICY_TYPE_NOT_SET` for the `policy_type oneof`. -// The -// `etag` value can be used with `SetOrgPolicy()` to create or update -// a -// `Policy` during read-modify-write. +// GetOrgPolicy: Gets a `Policy` on a resource. If no `Policy` is set on +// the resource, a `Policy` is returned with default values including +// `POLICY_TYPE_NOT_SET` for the `policy_type oneof`. The `etag` value +// can be used with `SetOrgPolicy()` to create or update a `Policy` +// during read-modify-write. func (r *FoldersService) GetOrgPolicy(resource string, getorgpolicyrequest *GetOrgPolicyRequest) *FoldersGetOrgPolicyCall { c := &FoldersGetOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -3001,7 +2470,7 @@ func (c *FoldersGetOrgPolicyCall) Header() http.Header { func (c *FoldersGetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3065,7 +2534,7 @@ func (c *FoldersGetOrgPolicyCall) Do(opts ...googleapi.CallOption) (*OrgPolicy, } return ret, nil // { - // "description": "Gets a `Policy` on a resource.\n\nIf no `Policy` is set on the resource, a `Policy` is returned with default\nvalues including `POLICY_TYPE_NOT_SET` for the `policy_type oneof`. The\n`etag` value can be used with `SetOrgPolicy()` to create or update a\n`Policy` during read-modify-write.", + // "description": "Gets a `Policy` on a resource. If no `Policy` is set on the resource, a `Policy` is returned with default values including `POLICY_TYPE_NOT_SET` for the `policy_type oneof`. The `etag` value can be used with `SetOrgPolicy()` to create or update a `Policy` during read-modify-write.", // "flatPath": "v1/folders/{foldersId}:getOrgPolicy", // "httpMethod": "POST", // "id": "cloudresourcemanager.folders.getOrgPolicy", @@ -3143,7 +2612,7 @@ func (c *FoldersListAvailableOrgPolicyConstraintsCall) Header() http.Header { func (c *FoldersListAvailableOrgPolicyConstraintsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3308,7 +2777,7 @@ func (c *FoldersListOrgPoliciesCall) Header() http.Header { func (c *FoldersListOrgPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3436,12 +2905,9 @@ type FoldersSetOrgPolicyCall struct { } // SetOrgPolicy: Updates the specified `Policy` on the resource. Creates -// a new `Policy` for -// that `Constraint` on the resource if one does not exist. -// -// Not supplying an `etag` on the request `Policy` results in an -// unconditional -// write of the `Policy`. +// a new `Policy` for that `Constraint` on the resource if one does not +// exist. Not supplying an `etag` on the request `Policy` results in an +// unconditional write of the `Policy`. func (r *FoldersService) SetOrgPolicy(resource string, setorgpolicyrequest *SetOrgPolicyRequest) *FoldersSetOrgPolicyCall { c := &FoldersSetOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -3476,7 +2942,7 @@ func (c *FoldersSetOrgPolicyCall) Header() http.Header { func (c *FoldersSetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3540,7 +3006,7 @@ func (c *FoldersSetOrgPolicyCall) Do(opts ...googleapi.CallOption) (*OrgPolicy, } return ret, nil // { - // "description": "Updates the specified `Policy` on the resource. Creates a new `Policy` for\nthat `Constraint` on the resource if one does not exist.\n\nNot supplying an `etag` on the request `Policy` results in an unconditional\nwrite of the `Policy`.", + // "description": "Updates the specified `Policy` on the resource. Creates a new `Policy` for that `Constraint` on the resource if one does not exist. Not supplying an `etag` on the request `Policy` results in an unconditional write of the `Policy`.", // "flatPath": "v1/folders/{foldersId}:setOrgPolicy", // "httpMethod": "POST", // "id": "cloudresourcemanager.folders.setOrgPolicy", @@ -3581,16 +3047,10 @@ type LiensCreateCall struct { } // Create: Create a Lien which applies to the resource denoted by the -// `parent` field. -// -// Callers of this method will require permission on the `parent` -// resource. -// For example, applying to `projects/1234` requires -// permission -// `resourcemanager.projects.updateLiens`. -// -// NOTE: Some resources may limit the number of Liens which may be -// applied. +// `parent` field. Callers of this method will require permission on the +// `parent` resource. For example, applying to `projects/1234` requires +// permission `resourcemanager.projects.updateLiens`. NOTE: Some +// resources may limit the number of Liens which may be applied. func (r *LiensService) Create(lien *Lien) *LiensCreateCall { c := &LiensCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.lien = lien @@ -3624,7 +3084,7 @@ func (c *LiensCreateCall) Header() http.Header { func (c *LiensCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3685,7 +3145,7 @@ func (c *LiensCreateCall) Do(opts ...googleapi.CallOption) (*Lien, error) { } return ret, nil // { - // "description": "Create a Lien which applies to the resource denoted by the `parent` field.\n\nCallers of this method will require permission on the `parent` resource.\nFor example, applying to `projects/1234` requires permission\n`resourcemanager.projects.updateLiens`.\n\nNOTE: Some resources may limit the number of Liens which may be applied.", + // "description": "Create a Lien which applies to the resource denoted by the `parent` field. Callers of this method will require permission on the `parent` resource. For example, applying to `projects/1234` requires permission `resourcemanager.projects.updateLiens`. NOTE: Some resources may limit the number of Liens which may be applied.", // "flatPath": "v1/liens", // "httpMethod": "POST", // "id": "cloudresourcemanager.liens.create", @@ -3716,12 +3176,9 @@ type LiensDeleteCall struct { header_ http.Header } -// Delete: Delete a Lien by `name`. -// -// Callers of this method will require permission on the `parent` -// resource. -// For example, a Lien with a `parent` of `projects/1234` requires -// permission +// Delete: Delete a Lien by `name`. Callers of this method will require +// permission on the `parent` resource. For example, a Lien with a +// `parent` of `projects/1234` requires permission // `resourcemanager.projects.updateLiens`. func (r *LiensService) Delete(nameid string) *LiensDeleteCall { c := &LiensDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -3756,7 +3213,7 @@ func (c *LiensDeleteCall) Header() http.Header { func (c *LiensDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3815,7 +3272,7 @@ func (c *LiensDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { } return ret, nil // { - // "description": "Delete a Lien by `name`.\n\nCallers of this method will require permission on the `parent` resource.\nFor example, a Lien with a `parent` of `projects/1234` requires permission\n`resourcemanager.projects.updateLiens`.", + // "description": "Delete a Lien by `name`. Callers of this method will require permission on the `parent` resource. For example, a Lien with a `parent` of `projects/1234` requires permission `resourcemanager.projects.updateLiens`.", // "flatPath": "v1/liens/{liensId}", // "httpMethod": "DELETE", // "id": "cloudresourcemanager.liens.delete", @@ -3854,14 +3311,10 @@ type LiensGetCall struct { header_ http.Header } -// Get: Retrieve a Lien by `name`. -// -// Callers of this method will require permission on the `parent` -// resource. -// For example, a Lien with a `parent` of `projects/1234` requires -// permission -// requires permission `resourcemanager.projects.get` -// or +// Get: Retrieve a Lien by `name`. Callers of this method will require +// permission on the `parent` resource. For example, a Lien with a +// `parent` of `projects/1234` requires permission requires permission +// `resourcemanager.projects.get` or // `resourcemanager.projects.updateLiens`. func (r *LiensService) Get(nameid string) *LiensGetCall { c := &LiensGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -3906,7 +3359,7 @@ func (c *LiensGetCall) Header() http.Header { func (c *LiensGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3968,7 +3421,7 @@ func (c *LiensGetCall) Do(opts ...googleapi.CallOption) (*Lien, error) { } return ret, nil // { - // "description": "Retrieve a Lien by `name`.\n\nCallers of this method will require permission on the `parent` resource.\nFor example, a Lien with a `parent` of `projects/1234` requires permission\nrequires permission `resourcemanager.projects.get` or\n`resourcemanager.projects.updateLiens`.", + // "description": "Retrieve a Lien by `name`. Callers of this method will require permission on the `parent` resource. For example, a Lien with a `parent` of `projects/1234` requires permission requires permission `resourcemanager.projects.get` or `resourcemanager.projects.updateLiens`.", // "flatPath": "v1/liens/{liensId}", // "httpMethod": "GET", // "id": "cloudresourcemanager.liens.get", @@ -4006,13 +3459,10 @@ type LiensListCall struct { header_ http.Header } -// List: List all Liens applied to the `parent` resource. -// -// Callers of this method will require permission on the `parent` -// resource. -// For example, a Lien with a `parent` of `projects/1234` requires -// permission -// `resourcemanager.projects.get`. +// List: List all Liens applied to the `parent` resource. Callers of +// this method will require permission on the `parent` resource. For +// example, a Lien with a `parent` of `projects/1234` requires +// permission `resourcemanager.projects.get`. func (r *LiensService) List() *LiensListCall { c := &LiensListCall{s: r.s, urlParams_: make(gensupport.URLParams)} return c @@ -4034,15 +3484,10 @@ func (c *LiensListCall) PageToken(pageToken string) *LiensListCall { } // Parent sets the optional parameter "parent": Required. The name of -// the resource to list all attached Liens. -// For example, -// `projects/1234`. -// -// (google.api.field_policy).resource_type annotation is not set since -// the -// parent depends on the meta api implementation. This field could be -// a -// project or other sub project resources. +// the resource to list all attached Liens. For example, +// `projects/1234`. (google.api.field_policy).resource_type annotation +// is not set since the parent depends on the meta api implementation. +// This field could be a project or other sub project resources. func (c *LiensListCall) Parent(parent string) *LiensListCall { c.urlParams_.Set("parent", parent) return c @@ -4085,7 +3530,7 @@ func (c *LiensListCall) Header() http.Header { func (c *LiensListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4144,7 +3589,7 @@ func (c *LiensListCall) Do(opts ...googleapi.CallOption) (*ListLiensResponse, er } return ret, nil // { - // "description": "List all Liens applied to the `parent` resource.\n\nCallers of this method will require permission on the `parent` resource.\nFor example, a Lien with a `parent` of `projects/1234` requires permission\n`resourcemanager.projects.get`.", + // "description": "List all Liens applied to the `parent` resource. Callers of this method will require permission on the `parent` resource. For example, a Lien with a `parent` of `projects/1234` requires permission `resourcemanager.projects.get`.", // "flatPath": "v1/liens", // "httpMethod": "GET", // "id": "cloudresourcemanager.liens.list", @@ -4162,7 +3607,7 @@ func (c *LiensListCall) Do(opts ...googleapi.CallOption) (*ListLiensResponse, er // "type": "string" // }, // "parent": { - // "description": "Required. The name of the resource to list all attached Liens.\nFor example, `projects/1234`.\n\n(google.api.field_policy).resource_type annotation is not set since the\nparent depends on the meta api implementation. This field could be a\nproject or other sub project resources.", + // "description": "Required. The name of the resource to list all attached Liens. For example, `projects/1234`. (google.api.field_policy).resource_type annotation is not set since the parent depends on the meta api implementation. This field could be a project or other sub project resources.", // "location": "query", // "type": "string" // } @@ -4211,11 +3656,9 @@ type OperationsGetCall struct { header_ http.Header } -// Get: Gets the latest state of a long-running operation. Clients can -// use this -// method to poll the operation result at intervals as recommended by -// the API -// service. +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. func (r *OperationsService) Get(name string) *OperationsGetCall { c := &OperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4259,7 +3702,7 @@ func (c *OperationsGetCall) Header() http.Header { func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4321,7 +3764,7 @@ func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", // "flatPath": "v1/operations/{operationsId}", // "httpMethod": "GET", // "id": "cloudresourcemanager.operations.get", @@ -4395,7 +3838,7 @@ func (c *OrganizationsClearOrgPolicyCall) Header() http.Header { func (c *OrganizationsClearOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4545,7 +3988,7 @@ func (c *OrganizationsGetCall) Header() http.Header { func (c *OrganizationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4616,7 +4059,7 @@ func (c *OrganizationsGetCall) Do(opts ...googleapi.CallOption) (*Organization, // ], // "parameters": { // "name": { - // "description": "The resource name of the Organization to fetch. This is the organization's\nrelative path in the API, formatted as \"organizations/[organizationId]\".\nFor example, \"organizations/1234\".", + // "description": "The resource name of the Organization to fetch. This is the organization's relative path in the API, formatted as \"organizations/[organizationId]\". For example, \"organizations/1234\".", // "location": "path", // "pattern": "^organizations/[^/]+$", // "required": true, @@ -4647,14 +4090,10 @@ type OrganizationsGetEffectiveOrgPolicyCall struct { } // GetEffectiveOrgPolicy: Gets the effective `Policy` on a resource. -// This is the result of merging -// `Policies` in the resource hierarchy. The returned `Policy` will not -// have -// an `etag`set because it is a computed `Policy` across multiple -// resources. -// Subtrees of Resource Manager resource hierarchy with 'under:' prefix -// will -// not be expanded. +// This is the result of merging `Policies` in the resource hierarchy. +// The returned `Policy` will not have an `etag`set because it is a +// computed `Policy` across multiple resources. Subtrees of Resource +// Manager resource hierarchy with 'under:' prefix will not be expanded. func (r *OrganizationsService) GetEffectiveOrgPolicy(resource string, geteffectiveorgpolicyrequest *GetEffectiveOrgPolicyRequest) *OrganizationsGetEffectiveOrgPolicyCall { c := &OrganizationsGetEffectiveOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -4689,7 +4128,7 @@ func (c *OrganizationsGetEffectiveOrgPolicyCall) Header() http.Header { func (c *OrganizationsGetEffectiveOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4753,7 +4192,7 @@ func (c *OrganizationsGetEffectiveOrgPolicyCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Gets the effective `Policy` on a resource. This is the result of merging\n`Policies` in the resource hierarchy. The returned `Policy` will not have\nan `etag`set because it is a computed `Policy` across multiple resources.\nSubtrees of Resource Manager resource hierarchy with 'under:' prefix will\nnot be expanded.", + // "description": "Gets the effective `Policy` on a resource. This is the result of merging `Policies` in the resource hierarchy. The returned `Policy` will not have an `etag`set because it is a computed `Policy` across multiple resources. Subtrees of Resource Manager resource hierarchy with 'under:' prefix will not be expanded.", // "flatPath": "v1/organizations/{organizationsId}:getEffectiveOrgPolicy", // "httpMethod": "POST", // "id": "cloudresourcemanager.organizations.getEffectiveOrgPolicy", @@ -4796,14 +4235,9 @@ type OrganizationsGetIamPolicyCall struct { } // GetIamPolicy: Gets the access control policy for an Organization -// resource. May be empty -// if no such policy or resource exists. The `resource` field should be -// the -// organization's resource name, e.g. -// "organizations/123". -// -// Authorization requires the Google IAM -// permission +// resource. May be empty if no such policy or resource exists. The +// `resource` field should be the organization's resource name, e.g. +// "organizations/123". Authorization requires the Google IAM permission // `resourcemanager.organizations.getIamPolicy` on the specified // organization func (r *OrganizationsService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *OrganizationsGetIamPolicyCall { @@ -4840,7 +4274,7 @@ func (c *OrganizationsGetIamPolicyCall) Header() http.Header { func (c *OrganizationsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4904,7 +4338,7 @@ func (c *OrganizationsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } return ret, nil // { - // "description": "Gets the access control policy for an Organization resource. May be empty\nif no such policy or resource exists. The `resource` field should be the\norganization's resource name, e.g. \"organizations/123\".\n\nAuthorization requires the Google IAM permission\n`resourcemanager.organizations.getIamPolicy` on the specified organization", + // "description": "Gets the access control policy for an Organization resource. May be empty if no such policy or resource exists. The `resource` field should be the organization's resource name, e.g. \"organizations/123\". Authorization requires the Google IAM permission `resourcemanager.organizations.getIamPolicy` on the specified organization", // "flatPath": "v1/organizations/{organizationsId}:getIamPolicy", // "httpMethod": "POST", // "id": "cloudresourcemanager.organizations.getIamPolicy", @@ -4913,7 +4347,7 @@ func (c *OrganizationsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^organizations/[^/]+$", // "required": true, @@ -4946,15 +4380,11 @@ type OrganizationsGetOrgPolicyCall struct { header_ http.Header } -// GetOrgPolicy: Gets a `Policy` on a resource. -// -// If no `Policy` is set on the resource, a `Policy` is returned with -// default -// values including `POLICY_TYPE_NOT_SET` for the `policy_type oneof`. -// The -// `etag` value can be used with `SetOrgPolicy()` to create or update -// a -// `Policy` during read-modify-write. +// GetOrgPolicy: Gets a `Policy` on a resource. If no `Policy` is set on +// the resource, a `Policy` is returned with default values including +// `POLICY_TYPE_NOT_SET` for the `policy_type oneof`. The `etag` value +// can be used with `SetOrgPolicy()` to create or update a `Policy` +// during read-modify-write. func (r *OrganizationsService) GetOrgPolicy(resource string, getorgpolicyrequest *GetOrgPolicyRequest) *OrganizationsGetOrgPolicyCall { c := &OrganizationsGetOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -4989,7 +4419,7 @@ func (c *OrganizationsGetOrgPolicyCall) Header() http.Header { func (c *OrganizationsGetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5053,7 +4483,7 @@ func (c *OrganizationsGetOrgPolicyCall) Do(opts ...googleapi.CallOption) (*OrgPo } return ret, nil // { - // "description": "Gets a `Policy` on a resource.\n\nIf no `Policy` is set on the resource, a `Policy` is returned with default\nvalues including `POLICY_TYPE_NOT_SET` for the `policy_type oneof`. The\n`etag` value can be used with `SetOrgPolicy()` to create or update a\n`Policy` during read-modify-write.", + // "description": "Gets a `Policy` on a resource. If no `Policy` is set on the resource, a `Policy` is returned with default values including `POLICY_TYPE_NOT_SET` for the `policy_type oneof`. The `etag` value can be used with `SetOrgPolicy()` to create or update a `Policy` during read-modify-write.", // "flatPath": "v1/organizations/{organizationsId}:getOrgPolicy", // "httpMethod": "POST", // "id": "cloudresourcemanager.organizations.getOrgPolicy", @@ -5131,7 +4561,7 @@ func (c *OrganizationsListAvailableOrgPolicyConstraintsCall) Header() http.Heade func (c *OrganizationsListAvailableOrgPolicyConstraintsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5296,7 +4726,7 @@ func (c *OrganizationsListOrgPoliciesCall) Header() http.Header { func (c *OrganizationsListOrgPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5423,16 +4853,10 @@ type OrganizationsSearchCall struct { } // Search: Searches Organization resources that are visible to the user -// and satisfy -// the specified filter. This method returns Organizations in an -// unspecified -// order. New Organizations do not necessarily appear at the end of -// the -// results. -// -// Search will only return organizations on which the user has the -// permission -// `resourcemanager.organizations.get` +// and satisfy the specified filter. This method returns Organizations +// in an unspecified order. New Organizations do not necessarily appear +// at the end of the results. Search will only return organizations on +// which the user has the permission `resourcemanager.organizations.get` func (r *OrganizationsService) Search(searchorganizationsrequest *SearchOrganizationsRequest) *OrganizationsSearchCall { c := &OrganizationsSearchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.searchorganizationsrequest = searchorganizationsrequest @@ -5466,7 +4890,7 @@ func (c *OrganizationsSearchCall) Header() http.Header { func (c *OrganizationsSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5527,7 +4951,7 @@ func (c *OrganizationsSearchCall) Do(opts ...googleapi.CallOption) (*SearchOrgan } return ret, nil // { - // "description": "Searches Organization resources that are visible to the user and satisfy\nthe specified filter. This method returns Organizations in an unspecified\norder. New Organizations do not necessarily appear at the end of the\nresults.\n\nSearch will only return organizations on which the user has the permission\n`resourcemanager.organizations.get`", + // "description": "Searches Organization resources that are visible to the user and satisfy the specified filter. This method returns Organizations in an unspecified order. New Organizations do not necessarily appear at the end of the results. Search will only return organizations on which the user has the permission `resourcemanager.organizations.get`", // "flatPath": "v1/organizations:search", // "httpMethod": "POST", // "id": "cloudresourcemanager.organizations.search", @@ -5581,13 +5005,9 @@ type OrganizationsSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on an Organization -// resource. Replaces any -// existing policy. The `resource` field should be the organization's -// resource -// name, e.g. "organizations/123". -// -// Authorization requires the Google IAM -// permission +// resource. Replaces any existing policy. The `resource` field should +// be the organization's resource name, e.g. "organizations/123". +// Authorization requires the Google IAM permission // `resourcemanager.organizations.setIamPolicy` on the specified // organization func (r *OrganizationsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *OrganizationsSetIamPolicyCall { @@ -5624,7 +5044,7 @@ func (c *OrganizationsSetIamPolicyCall) Header() http.Header { func (c *OrganizationsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5688,7 +5108,7 @@ func (c *OrganizationsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } return ret, nil // { - // "description": "Sets the access control policy on an Organization resource. Replaces any\nexisting policy. The `resource` field should be the organization's resource\nname, e.g. \"organizations/123\".\n\nAuthorization requires the Google IAM permission\n`resourcemanager.organizations.setIamPolicy` on the specified organization", + // "description": "Sets the access control policy on an Organization resource. Replaces any existing policy. The `resource` field should be the organization's resource name, e.g. \"organizations/123\". Authorization requires the Google IAM permission `resourcemanager.organizations.setIamPolicy` on the specified organization", // "flatPath": "v1/organizations/{organizationsId}:setIamPolicy", // "httpMethod": "POST", // "id": "cloudresourcemanager.organizations.setIamPolicy", @@ -5697,7 +5117,7 @@ func (c *OrganizationsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^organizations/[^/]+$", // "required": true, @@ -5730,12 +5150,9 @@ type OrganizationsSetOrgPolicyCall struct { } // SetOrgPolicy: Updates the specified `Policy` on the resource. Creates -// a new `Policy` for -// that `Constraint` on the resource if one does not exist. -// -// Not supplying an `etag` on the request `Policy` results in an -// unconditional -// write of the `Policy`. +// a new `Policy` for that `Constraint` on the resource if one does not +// exist. Not supplying an `etag` on the request `Policy` results in an +// unconditional write of the `Policy`. func (r *OrganizationsService) SetOrgPolicy(resource string, setorgpolicyrequest *SetOrgPolicyRequest) *OrganizationsSetOrgPolicyCall { c := &OrganizationsSetOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -5770,7 +5187,7 @@ func (c *OrganizationsSetOrgPolicyCall) Header() http.Header { func (c *OrganizationsSetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5834,7 +5251,7 @@ func (c *OrganizationsSetOrgPolicyCall) Do(opts ...googleapi.CallOption) (*OrgPo } return ret, nil // { - // "description": "Updates the specified `Policy` on the resource. Creates a new `Policy` for\nthat `Constraint` on the resource if one does not exist.\n\nNot supplying an `etag` on the request `Policy` results in an unconditional\nwrite of the `Policy`.", + // "description": "Updates the specified `Policy` on the resource. Creates a new `Policy` for that `Constraint` on the resource if one does not exist. Not supplying an `etag` on the request `Policy` results in an unconditional write of the `Policy`.", // "flatPath": "v1/organizations/{organizationsId}:setOrgPolicy", // "httpMethod": "POST", // "id": "cloudresourcemanager.organizations.setOrgPolicy", @@ -5876,11 +5293,9 @@ type OrganizationsTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified Organization. -// The `resource` field should be the organization's resource name, -// e.g. "organizations/123". -// -// There are no permissions required for making this API call. +// specified Organization. The `resource` field should be the +// organization's resource name, e.g. "organizations/123". There are no +// permissions required for making this API call. func (r *OrganizationsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *OrganizationsTestIamPermissionsCall { c := &OrganizationsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -5915,7 +5330,7 @@ func (c *OrganizationsTestIamPermissionsCall) Header() http.Header { func (c *OrganizationsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5979,7 +5394,7 @@ func (c *OrganizationsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified Organization.\nThe `resource` field should be the organization's resource name,\ne.g. \"organizations/123\".\n\nThere are no permissions required for making this API call.", + // "description": "Returns permissions that a caller has on the specified Organization. The `resource` field should be the organization's resource name, e.g. \"organizations/123\". There are no permissions required for making this API call.", // "flatPath": "v1/organizations/{organizationsId}:testIamPermissions", // "httpMethod": "POST", // "id": "cloudresourcemanager.organizations.testIamPermissions", @@ -5988,7 +5403,7 @@ func (c *OrganizationsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^organizations/[^/]+$", // "required": true, @@ -6056,7 +5471,7 @@ func (c *ProjectsClearOrgPolicyCall) Header() http.Header { func (c *ProjectsClearOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6161,30 +5576,17 @@ type ProjectsCreateCall struct { } // Create: Request that a new Project be created. The result is an -// Operation which -// can be used to track the creation process. This process usually takes -// a few -// seconds, but can sometimes take much longer. The tracking Operation -// is -// automatically deleted after a few hours, so there is no need to -// call -// DeleteOperation. -// -// Authorization requires the Google IAM -// permission -// `resourcemanager.projects.create` on the specified parent for the -// new -// project. The parent is identified by a specified ResourceId, -// which must include both an ID and a type, such as organization. -// -// This method does not associate the new project with a billing -// account. -// You can set or update the billing account associated with a project -// using -// the -// [`projects.updateBillingInfo`] -// (/billing/reference/rest/v1/projects/up -// dateBillingInfo) method. +// Operation which can be used to track the creation process. This +// process usually takes a few seconds, but can sometimes take much +// longer. The tracking Operation is automatically deleted after a few +// hours, so there is no need to call DeleteOperation. Authorization +// requires the Google IAM permission `resourcemanager.projects.create` +// on the specified parent for the new project. The parent is identified +// by a specified ResourceId, which must include both an ID and a type, +// such as organization. This method does not associate the new project +// with a billing account. You can set or update the billing account +// associated with a project using the [`projects.updateBillingInfo`] +// (/billing/reference/rest/v1/projects/updateBillingInfo) method. func (r *ProjectsService) Create(project *Project) *ProjectsCreateCall { c := &ProjectsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -6218,7 +5620,7 @@ func (c *ProjectsCreateCall) Header() http.Header { func (c *ProjectsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6279,7 +5681,7 @@ func (c *ProjectsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Request that a new Project be created. The result is an Operation which\ncan be used to track the creation process. This process usually takes a few\nseconds, but can sometimes take much longer. The tracking Operation is\nautomatically deleted after a few hours, so there is no need to call\nDeleteOperation.\n\nAuthorization requires the Google IAM permission\n`resourcemanager.projects.create` on the specified parent for the new\nproject. The parent is identified by a specified ResourceId,\nwhich must include both an ID and a type, such as organization.\n\nThis method does not associate the new project with a billing account.\nYou can set or update the billing account associated with a project using\nthe [`projects.updateBillingInfo`]\n(/billing/reference/rest/v1/projects/updateBillingInfo) method.", + // "description": "Request that a new Project be created. The result is an Operation which can be used to track the creation process. This process usually takes a few seconds, but can sometimes take much longer. The tracking Operation is automatically deleted after a few hours, so there is no need to call DeleteOperation. Authorization requires the Google IAM permission `resourcemanager.projects.create` on the specified parent for the new project. The parent is identified by a specified ResourceId, which must include both an ID and a type, such as organization. This method does not associate the new project with a billing account. You can set or update the billing account associated with a project using the [`projects.updateBillingInfo`] (/billing/reference/rest/v1/projects/updateBillingInfo) method.", // "flatPath": "v1/projects", // "httpMethod": "POST", // "id": "cloudresourcemanager.projects.create", @@ -6309,29 +5711,18 @@ type ProjectsDeleteCall struct { header_ http.Header } -// Delete: Marks the Project identified by the specified -// `project_id` (for example, `my-project-123`) for deletion. -// This method will only affect the Project if it has a lifecycle state -// of -// ACTIVE. -// -// This method changes the Project's lifecycle state from -// ACTIVE -// to DELETE_REQUESTED. -// The deletion starts at an unspecified time, -// at which point the Project is no longer accessible. -// -// Until the deletion completes, you can check the lifecycle -// state -// checked by retrieving the Project with GetProject, -// and the Project remains visible to ListProjects. -// However, you cannot update the project. -// -// After the deletion completes, the Project is not retrievable by -// the GetProject and -// ListProjects methods. -// -// The caller must have modify permissions for this Project. +// Delete: Marks the Project identified by the specified `project_id` +// (for example, `my-project-123`) for deletion. This method will only +// affect the Project if it has a lifecycle state of ACTIVE. This method +// changes the Project's lifecycle state from ACTIVE to +// DELETE_REQUESTED. The deletion starts at an unspecified time, at +// which point the Project is no longer accessible. Until the deletion +// completes, you can check the lifecycle state checked by retrieving +// the Project with GetProject, and the Project remains visible to +// ListProjects. However, you cannot update the project. After the +// deletion completes, the Project is not retrievable by the GetProject +// and ListProjects methods. The caller must have delete permissions for +// this Project. func (r *ProjectsService) Delete(projectId string) *ProjectsDeleteCall { c := &ProjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -6365,7 +5756,7 @@ func (c *ProjectsDeleteCall) Header() http.Header { func (c *ProjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6424,7 +5815,7 @@ func (c *ProjectsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { } return ret, nil // { - // "description": "Marks the Project identified by the specified\n`project_id` (for example, `my-project-123`) for deletion.\nThis method will only affect the Project if it has a lifecycle state of\nACTIVE.\n\nThis method changes the Project's lifecycle state from\nACTIVE\nto DELETE_REQUESTED.\nThe deletion starts at an unspecified time,\nat which point the Project is no longer accessible.\n\nUntil the deletion completes, you can check the lifecycle state\nchecked by retrieving the Project with GetProject,\nand the Project remains visible to ListProjects.\nHowever, you cannot update the project.\n\nAfter the deletion completes, the Project is not retrievable by\nthe GetProject and\nListProjects methods.\n\nThe caller must have modify permissions for this Project.", + // "description": "Marks the Project identified by the specified `project_id` (for example, `my-project-123`) for deletion. This method will only affect the Project if it has a lifecycle state of ACTIVE. This method changes the Project's lifecycle state from ACTIVE to DELETE_REQUESTED. The deletion starts at an unspecified time, at which point the Project is no longer accessible. Until the deletion completes, you can check the lifecycle state checked by retrieving the Project with GetProject, and the Project remains visible to ListProjects. However, you cannot update the project. After the deletion completes, the Project is not retrievable by the GetProject and ListProjects methods. The caller must have delete permissions for this Project.", // "flatPath": "v1/projects/{projectId}", // "httpMethod": "DELETE", // "id": "cloudresourcemanager.projects.delete", @@ -6433,7 +5824,7 @@ func (c *ProjectsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { // ], // "parameters": { // "projectId": { - // "description": "The Project ID (for example, `foo-bar-123`).\n\nRequired.", + // "description": "The Project ID (for example, `foo-bar-123`). Required.", // "location": "path", // "required": true, // "type": "string" @@ -6461,10 +5852,9 @@ type ProjectsGetCall struct { header_ http.Header } -// Get: Retrieves the Project identified by the specified -// `project_id` (for example, `my-project-123`). -// -// The caller must have read permissions for this Project. +// Get: Retrieves the Project identified by the specified `project_id` +// (for example, `my-project-123`). The caller must have read +// permissions for this Project. func (r *ProjectsService) Get(projectId string) *ProjectsGetCall { c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -6508,7 +5898,7 @@ func (c *ProjectsGetCall) Header() http.Header { func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6570,7 +5960,7 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { } return ret, nil // { - // "description": "Retrieves the Project identified by the specified\n`project_id` (for example, `my-project-123`).\n\nThe caller must have read permissions for this Project.", + // "description": "Retrieves the Project identified by the specified `project_id` (for example, `my-project-123`). The caller must have read permissions for this Project.", // "flatPath": "v1/projects/{projectId}", // "httpMethod": "GET", // "id": "cloudresourcemanager.projects.get", @@ -6579,7 +5969,7 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { // ], // "parameters": { // "projectId": { - // "description": "The Project ID (for example, `my-project-123`).\n\nRequired.", + // "description": "Required. The Project ID (for example, `my-project-123`).", // "location": "path", // "required": true, // "type": "string" @@ -6609,11 +5999,9 @@ type ProjectsGetAncestryCall struct { } // GetAncestry: Gets a list of ancestors in the resource hierarchy for -// the Project -// identified by the specified `project_id` (for example, -// `my-project-123`). -// -// The caller must have read permissions for this Project. +// the Project identified by the specified `project_id` (for example, +// `my-project-123`). The caller must have read permissions for this +// Project. func (r *ProjectsService) GetAncestry(projectId string, getancestryrequest *GetAncestryRequest) *ProjectsGetAncestryCall { c := &ProjectsGetAncestryCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -6648,7 +6036,7 @@ func (c *ProjectsGetAncestryCall) Header() http.Header { func (c *ProjectsGetAncestryCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6712,7 +6100,7 @@ func (c *ProjectsGetAncestryCall) Do(opts ...googleapi.CallOption) (*GetAncestry } return ret, nil // { - // "description": "Gets a list of ancestors in the resource hierarchy for the Project\nidentified by the specified `project_id` (for example, `my-project-123`).\n\nThe caller must have read permissions for this Project.", + // "description": "Gets a list of ancestors in the resource hierarchy for the Project identified by the specified `project_id` (for example, `my-project-123`). The caller must have read permissions for this Project.", // "flatPath": "v1/projects/{projectId}:getAncestry", // "httpMethod": "POST", // "id": "cloudresourcemanager.projects.getAncestry", @@ -6721,7 +6109,7 @@ func (c *ProjectsGetAncestryCall) Do(opts ...googleapi.CallOption) (*GetAncestry // ], // "parameters": { // "projectId": { - // "description": "The Project ID (for example, `my-project-123`).\n\nRequired.", + // "description": "Required. The Project ID (for example, `my-project-123`).", // "location": "path", // "required": true, // "type": "string" @@ -6754,14 +6142,10 @@ type ProjectsGetEffectiveOrgPolicyCall struct { } // GetEffectiveOrgPolicy: Gets the effective `Policy` on a resource. -// This is the result of merging -// `Policies` in the resource hierarchy. The returned `Policy` will not -// have -// an `etag`set because it is a computed `Policy` across multiple -// resources. -// Subtrees of Resource Manager resource hierarchy with 'under:' prefix -// will -// not be expanded. +// This is the result of merging `Policies` in the resource hierarchy. +// The returned `Policy` will not have an `etag`set because it is a +// computed `Policy` across multiple resources. Subtrees of Resource +// Manager resource hierarchy with 'under:' prefix will not be expanded. func (r *ProjectsService) GetEffectiveOrgPolicy(resource string, geteffectiveorgpolicyrequest *GetEffectiveOrgPolicyRequest) *ProjectsGetEffectiveOrgPolicyCall { c := &ProjectsGetEffectiveOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -6796,7 +6180,7 @@ func (c *ProjectsGetEffectiveOrgPolicyCall) Header() http.Header { func (c *ProjectsGetEffectiveOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6860,7 +6244,7 @@ func (c *ProjectsGetEffectiveOrgPolicyCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Gets the effective `Policy` on a resource. This is the result of merging\n`Policies` in the resource hierarchy. The returned `Policy` will not have\nan `etag`set because it is a computed `Policy` across multiple resources.\nSubtrees of Resource Manager resource hierarchy with 'under:' prefix will\nnot be expanded.", + // "description": "Gets the effective `Policy` on a resource. This is the result of merging `Policies` in the resource hierarchy. The returned `Policy` will not have an `etag`set because it is a computed `Policy` across multiple resources. Subtrees of Resource Manager resource hierarchy with 'under:' prefix will not be expanded.", // "flatPath": "v1/projects/{projectsId}:getEffectiveOrgPolicy", // "httpMethod": "POST", // "id": "cloudresourcemanager.projects.getEffectiveOrgPolicy", @@ -6903,17 +6287,12 @@ type ProjectsGetIamPolicyCall struct { } // GetIamPolicy: Returns the IAM access control policy for the specified -// Project. -// Permission is denied if the policy or the resource does not -// exist. -// -// Authorization requires the Google IAM -// permission -// `resourcemanager.projects.getIamPolicy` on the project. -// -// For additional information about resource structure and -// identification, -// see [Resource Names](/apis/design/resource_names). +// Project. Permission is denied if the policy or the resource does not +// exist. Authorization requires the Google IAM permission +// `resourcemanager.projects.getIamPolicy` on the project. For +// additional information about `resource` (e.g. my-project-id) +// structure and identification, see [Resource +// Names](https://cloud.google.com/apis/design/resource_names). func (r *ProjectsService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ProjectsGetIamPolicyCall { c := &ProjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -6948,7 +6327,7 @@ func (c *ProjectsGetIamPolicyCall) Header() http.Header { func (c *ProjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7012,7 +6391,7 @@ func (c *ProjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er } return ret, nil // { - // "description": "Returns the IAM access control policy for the specified Project.\nPermission is denied if the policy or the resource does not exist.\n\nAuthorization requires the Google IAM permission\n`resourcemanager.projects.getIamPolicy` on the project.\n\nFor additional information about resource structure and identification,\nsee [Resource Names](/apis/design/resource_names).", + // "description": "Returns the IAM access control policy for the specified Project. Permission is denied if the policy or the resource does not exist. Authorization requires the Google IAM permission `resourcemanager.projects.getIamPolicy` on the project. For additional information about `resource` (e.g. my-project-id) structure and identification, see [Resource Names](https://cloud.google.com/apis/design/resource_names).", // "flatPath": "v1/projects/{resource}:getIamPolicy", // "httpMethod": "POST", // "id": "cloudresourcemanager.projects.getIamPolicy", @@ -7021,7 +6400,7 @@ func (c *ProjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "required": true, // "type": "string" @@ -7053,15 +6432,11 @@ type ProjectsGetOrgPolicyCall struct { header_ http.Header } -// GetOrgPolicy: Gets a `Policy` on a resource. -// -// If no `Policy` is set on the resource, a `Policy` is returned with -// default -// values including `POLICY_TYPE_NOT_SET` for the `policy_type oneof`. -// The -// `etag` value can be used with `SetOrgPolicy()` to create or update -// a -// `Policy` during read-modify-write. +// GetOrgPolicy: Gets a `Policy` on a resource. If no `Policy` is set on +// the resource, a `Policy` is returned with default values including +// `POLICY_TYPE_NOT_SET` for the `policy_type oneof`. The `etag` value +// can be used with `SetOrgPolicy()` to create or update a `Policy` +// during read-modify-write. func (r *ProjectsService) GetOrgPolicy(resource string, getorgpolicyrequest *GetOrgPolicyRequest) *ProjectsGetOrgPolicyCall { c := &ProjectsGetOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -7096,7 +6471,7 @@ func (c *ProjectsGetOrgPolicyCall) Header() http.Header { func (c *ProjectsGetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7160,7 +6535,7 @@ func (c *ProjectsGetOrgPolicyCall) Do(opts ...googleapi.CallOption) (*OrgPolicy, } return ret, nil // { - // "description": "Gets a `Policy` on a resource.\n\nIf no `Policy` is set on the resource, a `Policy` is returned with default\nvalues including `POLICY_TYPE_NOT_SET` for the `policy_type oneof`. The\n`etag` value can be used with `SetOrgPolicy()` to create or update a\n`Policy` during read-modify-write.", + // "description": "Gets a `Policy` on a resource. If no `Policy` is set on the resource, a `Policy` is returned with default values including `POLICY_TYPE_NOT_SET` for the `policy_type oneof`. The `etag` value can be used with `SetOrgPolicy()` to create or update a `Policy` during read-modify-write.", // "flatPath": "v1/projects/{projectsId}:getOrgPolicy", // "httpMethod": "POST", // "id": "cloudresourcemanager.projects.getOrgPolicy", @@ -7202,29 +6577,18 @@ type ProjectsListCall struct { } // List: Lists Projects that the caller has the -// `resourcemanager.projects.get` -// permission on and satisfy the specified filter. -// -// This method returns Projects in an unspecified order. -// This method is eventually consistent with project mutations; this -// means -// that a newly created project may not appear in the results or -// recent -// updates to an existing project may not be reflected in the results. -// To -// retrieve the latest state of a project, use the -// GetProject method. -// -// NOTE: If the request filter contains a `parent.type` and `parent.id` -// and -// the caller has the `resourcemanager.projects.list` permission on -// the -// parent, the results will be drawn from an alternate index which -// provides -// more consistent results. In future versions of this API, this List -// method -// will be split into List and Search to properly capture the -// behavorial +// `resourcemanager.projects.get` permission on and satisfy the +// specified filter. This method returns Projects in an unspecified +// order. This method is eventually consistent with project mutations; +// this means that a newly created project may not appear in the results +// or recent updates to an existing project may not be reflected in the +// results. To retrieve the latest state of a project, use the +// GetProject method. NOTE: If the request filter contains a +// `parent.type` and `parent.id` and the caller has the +// `resourcemanager.projects.list` permission on the parent, the results +// will be drawn from an alternate index which provides more consistent +// results. In future versions of this API, this List method will be +// split into List and Search to properly capture the behavioral // difference. func (r *ProjectsService) List() *ProjectsListCall { c := &ProjectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -7232,70 +6596,47 @@ func (r *ProjectsService) List() *ProjectsListCall { } // Filter sets the optional parameter "filter": An expression for -// filtering the results of the request. Filter rules are -// case insensitive. The fields eligible for filtering are: -// -// + `name` -// + `id` -// + `labels.` (where *key* is the name of a label) -// + `parent.type` -// + `parent.id` -// -// Some examples of using labels as filters: -// -// | Filter | Description -// -// | -// |------------------|------------------------------------------------ -// -----| -// | name:how* | The project's name starts with "how". -// | -// | name:Howl | The project's name is `Howl` or `howl`. -// | -// | name:HOWL | Equivalent to above. -// | -// | NAME:howl | Equivalent to above. -// | -// | labels.color:* | The project has the label `color`. -// | -// | labels.color:red | The project's label `color` has the value `red`. -// | -// | labels.color:red labels.size:big |The project's label `color` -// has -// the value `red` and its label `size` has the value `big`. -// | -// -// If no filter is specified, the call will return projects for which -// the user -// has the `resourcemanager.projects.get` permission. -// -// NOTE: To perform a by-parent query (eg., what projects are directly -// in a -// Folder), the caller must have the -// `resourcemanager.projects.list` -// permission on the parent and the filter must contain both a -// `parent.type` -// and a `parent.id` restriction -// (example: "parent.type:folder parent.id:123"). In this case an -// alternate -// search index is used which provides more consistent results. +// filtering the results of the request. Filter rules are case +// insensitive. If multiple fields are included in a filter query, the +// query will return results that match any of the fields. Some eligible +// fields for filtering are: + `name` + `id` + `labels.` (where *key* is +// the name of a label) + `parent.type` + `parent.id` + `lifecycleState` +// Some examples of filter strings: | Filter | Description | +// |------------------|-------------------------------------------------- +// ---| | name:how* | The project's name starts with "how". | | +// name:Howl | The project's name is `Howl` or `howl`. | | name:HOWL | +// Equivalent to above. | | NAME:howl | Equivalent to above. | | +// labels.color:* | The project has the label `color`. | | +// labels.color:red | The project's label `color` has the value `red`. | +// | labels.color:red labels.size:big | The project's label `color` | : +// : has the value `red` and its : : : label`size` has the value : : : +// `big`. : | lifecycleState:DELETE_REQUESTED | Only show projects that +// are | : : pending deletion. : If no filter is specified, the call +// will return projects for which the user has the +// `resourcemanager.projects.get` permission. NOTE: To perform a +// by-parent query (eg., what projects are directly in a Folder), the +// caller must have the `resourcemanager.projects.list` permission on +// the parent and the filter must contain both a `parent.type` and a +// `parent.id` restriction (example: "parent.type:folder +// parent.id:123"). In this case an alternate search index is used which +// provides more consistent results. func (c *ProjectsListCall) Filter(filter string) *ProjectsListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": The maximum number -// of Projects to return in the response. -// The server can return fewer Projects than requested. -// If unspecified, server picks an appropriate default. +// of Projects to return in the response. The server can return fewer +// Projects than requested. If unspecified, server picks an appropriate +// default. func (c *ProjectsListCall) PageSize(pageSize int64) *ProjectsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": A pagination token -// returned from a previous call to ListProjects -// that indicates from where listing should continue. +// returned from a previous call to ListProjects that indicates from +// where listing should continue. func (c *ProjectsListCall) PageToken(pageToken string) *ProjectsListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -7338,7 +6679,7 @@ func (c *ProjectsListCall) Header() http.Header { func (c *ProjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7397,25 +6738,25 @@ func (c *ProjectsListCall) Do(opts ...googleapi.CallOption) (*ListProjectsRespon } return ret, nil // { - // "description": "Lists Projects that the caller has the `resourcemanager.projects.get`\npermission on and satisfy the specified filter.\n\nThis method returns Projects in an unspecified order.\nThis method is eventually consistent with project mutations; this means\nthat a newly created project may not appear in the results or recent\nupdates to an existing project may not be reflected in the results. To\nretrieve the latest state of a project, use the\nGetProject method.\n\nNOTE: If the request filter contains a `parent.type` and `parent.id` and\nthe caller has the `resourcemanager.projects.list` permission on the\nparent, the results will be drawn from an alternate index which provides\nmore consistent results. In future versions of this API, this List method\nwill be split into List and Search to properly capture the behavorial\ndifference.", + // "description": "Lists Projects that the caller has the `resourcemanager.projects.get` permission on and satisfy the specified filter. This method returns Projects in an unspecified order. This method is eventually consistent with project mutations; this means that a newly created project may not appear in the results or recent updates to an existing project may not be reflected in the results. To retrieve the latest state of a project, use the GetProject method. NOTE: If the request filter contains a `parent.type` and `parent.id` and the caller has the `resourcemanager.projects.list` permission on the parent, the results will be drawn from an alternate index which provides more consistent results. In future versions of this API, this List method will be split into List and Search to properly capture the behavioral difference.", // "flatPath": "v1/projects", // "httpMethod": "GET", // "id": "cloudresourcemanager.projects.list", // "parameterOrder": [], // "parameters": { // "filter": { - // "description": "An expression for filtering the results of the request. Filter rules are\ncase insensitive. The fields eligible for filtering are:\n\n+ `name`\n+ `id`\n+ `labels.\u003ckey\u003e` (where *key* is the name of a label)\n+ `parent.type`\n+ `parent.id`\n\nSome examples of using labels as filters:\n\n| Filter | Description |\n|------------------|-----------------------------------------------------|\n| name:how* | The project's name starts with \"how\". |\n| name:Howl | The project's name is `Howl` or `howl`. |\n| name:HOWL | Equivalent to above. |\n| NAME:howl | Equivalent to above. |\n| labels.color:* | The project has the label `color`. |\n| labels.color:red | The project's label `color` has the value `red`. |\n| labels.color:red\u0026nbsp;labels.size:big |The project's label `color` has\n the value `red` and its label `size` has the value `big`. |\n\nIf no filter is specified, the call will return projects for which the user\nhas the `resourcemanager.projects.get` permission.\n\nNOTE: To perform a by-parent query (eg., what projects are directly in a\nFolder), the caller must have the `resourcemanager.projects.list`\npermission on the parent and the filter must contain both a `parent.type`\nand a `parent.id` restriction\n(example: \"parent.type:folder parent.id:123\"). In this case an alternate\nsearch index is used which provides more consistent results.\n\nOptional.", + // "description": "Optional. An expression for filtering the results of the request. Filter rules are case insensitive. If multiple fields are included in a filter query, the query will return results that match any of the fields. Some eligible fields for filtering are: + `name` + `id` + `labels.` (where *key* is the name of a label) + `parent.type` + `parent.id` + `lifecycleState` Some examples of filter strings: | Filter | Description | |------------------|-----------------------------------------------------| | name:how* | The project's name starts with \"how\". | | name:Howl | The project's name is `Howl` or `howl`. | | name:HOWL | Equivalent to above. | | NAME:howl | Equivalent to above. | | labels.color:* | The project has the label `color`. | | labels.color:red | The project's label `color` has the value `red`. | | labels.color:red labels.size:big | The project's label `color` | : : has the value `red` and its : : : label`size` has the value : : : `big`. : | lifecycleState:DELETE_REQUESTED | Only show projects that are | : : pending deletion. : If no filter is specified, the call will return projects for which the user has the `resourcemanager.projects.get` permission. NOTE: To perform a by-parent query (eg., what projects are directly in a Folder), the caller must have the `resourcemanager.projects.list` permission on the parent and the filter must contain both a `parent.type` and a `parent.id` restriction (example: \"parent.type:folder parent.id:123\"). In this case an alternate search index is used which provides more consistent results.", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "The maximum number of Projects to return in the response.\nThe server can return fewer Projects than requested.\nIf unspecified, server picks an appropriate default.\n\nOptional.", + // "description": "Optional. The maximum number of Projects to return in the response. The server can return fewer Projects than requested. If unspecified, server picks an appropriate default.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "A pagination token returned from a previous call to ListProjects\nthat indicates from where listing should continue.\n\nOptional.", + // "description": "Optional. A pagination token returned from a previous call to ListProjects that indicates from where listing should continue.", // "location": "query", // "type": "string" // } @@ -7500,7 +6841,7 @@ func (c *ProjectsListAvailableOrgPolicyConstraintsCall) Header() http.Header { func (c *ProjectsListAvailableOrgPolicyConstraintsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7665,7 +7006,7 @@ func (c *ProjectsListOrgPoliciesCall) Header() http.Header { func (c *ProjectsListOrgPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7793,79 +7134,42 @@ type ProjectsSetIamPolicyCall struct { } // SetIamPolicy: Sets the IAM access control policy for the specified -// Project. Overwrites -// any existing policy. -// -// The following constraints apply when using `setIamPolicy()`: -// -// + Project does not support `allUsers` and `allAuthenticatedUsers` -// as -// `members` in a `Binding` of a `Policy`. -// -// + The owner role can be granted to a `user`, `serviceAccount`, or a -// group -// that is part of an organization. For -// example, -// group@myownpersonaldomain.com could be added as an owner to a project -// in -// the myownpersonaldomain.com organization, but not the -// examplepetstore.com -// organization. -// -// + Service accounts can be made owners of a project directly -// without any restrictions. However, to be added as an owner, a user -// must be -// invited via Cloud Platform console and must accept the invitation. -// -// + A user cannot be granted the owner role using `setIamPolicy()`. The -// user -// must be granted the owner role using the Cloud Platform Console and -// must -// explicitly accept the invitation. -// -// + You can only grant ownership of a project to a member by using -// the -// GCP Console. Inviting a member will deliver an invitation email -// that -// they must accept. An invitation email is not generated if you -// are -// granting a role other than owner, or if both the member you are -// inviting -// and the project are part of your organization. -// -// + Membership changes that leave the project without any owners that -// have -// accepted the Terms of Service (ToS) will be rejected. -// -// + If the project is not part of an organization, there must be at -// least -// one owner who has accepted the Terms of Service (ToS) agreement in -// the -// policy. Calling `setIamPolicy()` to remove the last ToS-accepted -// owner -// from the policy will fail. This restriction also applies to -// legacy -// projects that no longer have owners who have accepted the ToS. Edits -// to -// IAM policies will be rejected until the lack of a ToS-accepting owner -// is -// rectified. -// -// + This method will replace the existing policy, and cannot be used -// to -// append additional IAM settings. -// -// Note: Removing service accounts from policies or changing their -// roles -// can render services completely inoperable. It is important to -// understand -// how the service account is being used before removing or updating -// its -// roles. -// -// Authorization requires the Google IAM -// permission -// `resourcemanager.projects.setIamPolicy` on the project +// Project. CAUTION: This method will replace the existing policy, and +// cannot be used to append additional IAM settings. NOTE: Removing +// service accounts from policies or changing their roles can render +// services completely inoperable. It is important to understand how the +// service account is being used before removing or updating its roles. +// For additional information about `resource` (e.g. my-project-id) +// structure and identification, see [Resource +// Names](https://cloud.google.com/apis/design/resource_names). The +// following constraints apply when using `setIamPolicy()`: + Project +// does not support `allUsers` and `allAuthenticatedUsers` as `members` +// in a `Binding` of a `Policy`. + The owner role can be granted to a +// `user`, `serviceAccount`, or a group that is part of an organization. +// For example, group@myownpersonaldomain.com could be added as an owner +// to a project in the myownpersonaldomain.com organization, but not the +// examplepetstore.com organization. + Service accounts can be made +// owners of a project directly without any restrictions. However, to be +// added as an owner, a user must be invited via Cloud Platform console +// and must accept the invitation. + A user cannot be granted the owner +// role using `setIamPolicy()`. The user must be granted the owner role +// using the Cloud Platform Console and must explicitly accept the +// invitation. + You can only grant ownership of a project to a member +// by using the GCP Console. Inviting a member will deliver an +// invitation email that they must accept. An invitation email is not +// generated if you are granting a role other than owner, or if both the +// member you are inviting and the project are part of your +// organization. + Membership changes that leave the project without any +// owners that have accepted the Terms of Service (ToS) will be +// rejected. + If the project is not part of an organization, there must +// be at least one owner who has accepted the Terms of Service (ToS) +// agreement in the policy. Calling `setIamPolicy()` to remove the last +// ToS-accepted owner from the policy will fail. This restriction also +// applies to legacy projects that no longer have owners who have +// accepted the ToS. Edits to IAM policies will be rejected until the +// lack of a ToS-accepting owner is rectified. Authorization requires +// the Google IAM permission `resourcemanager.projects.setIamPolicy` on +// the project func (r *ProjectsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsSetIamPolicyCall { c := &ProjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -7900,7 +7204,7 @@ func (c *ProjectsSetIamPolicyCall) Header() http.Header { func (c *ProjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7964,7 +7268,7 @@ func (c *ProjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er } return ret, nil // { - // "description": "Sets the IAM access control policy for the specified Project. Overwrites\nany existing policy.\n\nThe following constraints apply when using `setIamPolicy()`:\n\n+ Project does not support `allUsers` and `allAuthenticatedUsers` as\n`members` in a `Binding` of a `Policy`.\n\n+ The owner role can be granted to a `user`, `serviceAccount`, or a group\nthat is part of an organization. For example,\ngroup@myownpersonaldomain.com could be added as an owner to a project in\nthe myownpersonaldomain.com organization, but not the examplepetstore.com\norganization.\n\n+ Service accounts can be made owners of a project directly\nwithout any restrictions. However, to be added as an owner, a user must be\ninvited via Cloud Platform console and must accept the invitation.\n\n+ A user cannot be granted the owner role using `setIamPolicy()`. The user\nmust be granted the owner role using the Cloud Platform Console and must\nexplicitly accept the invitation.\n\n+ You can only grant ownership of a project to a member by using the\nGCP Console. Inviting a member will deliver an invitation email that\nthey must accept. An invitation email is not generated if you are\ngranting a role other than owner, or if both the member you are inviting\nand the project are part of your organization.\n\n+ Membership changes that leave the project without any owners that have\naccepted the Terms of Service (ToS) will be rejected.\n\n+ If the project is not part of an organization, there must be at least\none owner who has accepted the Terms of Service (ToS) agreement in the\npolicy. Calling `setIamPolicy()` to remove the last ToS-accepted owner\nfrom the policy will fail. This restriction also applies to legacy\nprojects that no longer have owners who have accepted the ToS. Edits to\nIAM policies will be rejected until the lack of a ToS-accepting owner is\nrectified.\n\n+ This method will replace the existing policy, and cannot be used to\nappend additional IAM settings.\n\nNote: Removing service accounts from policies or changing their roles\ncan render services completely inoperable. It is important to understand\nhow the service account is being used before removing or updating its\nroles.\n\nAuthorization requires the Google IAM permission\n`resourcemanager.projects.setIamPolicy` on the project", + // "description": "Sets the IAM access control policy for the specified Project. CAUTION: This method will replace the existing policy, and cannot be used to append additional IAM settings. NOTE: Removing service accounts from policies or changing their roles can render services completely inoperable. It is important to understand how the service account is being used before removing or updating its roles. For additional information about `resource` (e.g. my-project-id) structure and identification, see [Resource Names](https://cloud.google.com/apis/design/resource_names). The following constraints apply when using `setIamPolicy()`: + Project does not support `allUsers` and `allAuthenticatedUsers` as `members` in a `Binding` of a `Policy`. + The owner role can be granted to a `user`, `serviceAccount`, or a group that is part of an organization. For example, group@myownpersonaldomain.com could be added as an owner to a project in the myownpersonaldomain.com organization, but not the examplepetstore.com organization. + Service accounts can be made owners of a project directly without any restrictions. However, to be added as an owner, a user must be invited via Cloud Platform console and must accept the invitation. + A user cannot be granted the owner role using `setIamPolicy()`. The user must be granted the owner role using the Cloud Platform Console and must explicitly accept the invitation. + You can only grant ownership of a project to a member by using the GCP Console. Inviting a member will deliver an invitation email that they must accept. An invitation email is not generated if you are granting a role other than owner, or if both the member you are inviting and the project are part of your organization. + Membership changes that leave the project without any owners that have accepted the Terms of Service (ToS) will be rejected. + If the project is not part of an organization, there must be at least one owner who has accepted the Terms of Service (ToS) agreement in the policy. Calling `setIamPolicy()` to remove the last ToS-accepted owner from the policy will fail. This restriction also applies to legacy projects that no longer have owners who have accepted the ToS. Edits to IAM policies will be rejected until the lack of a ToS-accepting owner is rectified. Authorization requires the Google IAM permission `resourcemanager.projects.setIamPolicy` on the project", // "flatPath": "v1/projects/{resource}:setIamPolicy", // "httpMethod": "POST", // "id": "cloudresourcemanager.projects.setIamPolicy", @@ -7973,7 +7277,7 @@ func (c *ProjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "required": true, // "type": "string" @@ -8005,12 +7309,9 @@ type ProjectsSetOrgPolicyCall struct { } // SetOrgPolicy: Updates the specified `Policy` on the resource. Creates -// a new `Policy` for -// that `Constraint` on the resource if one does not exist. -// -// Not supplying an `etag` on the request `Policy` results in an -// unconditional -// write of the `Policy`. +// a new `Policy` for that `Constraint` on the resource if one does not +// exist. Not supplying an `etag` on the request `Policy` results in an +// unconditional write of the `Policy`. func (r *ProjectsService) SetOrgPolicy(resource string, setorgpolicyrequest *SetOrgPolicyRequest) *ProjectsSetOrgPolicyCall { c := &ProjectsSetOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -8045,7 +7346,7 @@ func (c *ProjectsSetOrgPolicyCall) Header() http.Header { func (c *ProjectsSetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8109,7 +7410,7 @@ func (c *ProjectsSetOrgPolicyCall) Do(opts ...googleapi.CallOption) (*OrgPolicy, } return ret, nil // { - // "description": "Updates the specified `Policy` on the resource. Creates a new `Policy` for\nthat `Constraint` on the resource if one does not exist.\n\nNot supplying an `etag` on the request `Policy` results in an unconditional\nwrite of the `Policy`.", + // "description": "Updates the specified `Policy` on the resource. Creates a new `Policy` for that `Constraint` on the resource if one does not exist. Not supplying an `etag` on the request `Policy` results in an unconditional write of the `Policy`.", // "flatPath": "v1/projects/{projectsId}:setOrgPolicy", // "httpMethod": "POST", // "id": "cloudresourcemanager.projects.setOrgPolicy", @@ -8151,9 +7452,10 @@ type ProjectsTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified Project. -// -// There are no permissions required for making this API call. +// specified Project. For additional information about `resource` (e.g. +// my-project-id) structure and identification, see [Resource +// Names](https://cloud.google.com/apis/design/resource_names). There +// are no permissions required for making this API call. func (r *ProjectsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsTestIamPermissionsCall { c := &ProjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -8188,7 +7490,7 @@ func (c *ProjectsTestIamPermissionsCall) Header() http.Header { func (c *ProjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8252,7 +7554,7 @@ func (c *ProjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified Project.\n\nThere are no permissions required for making this API call.", + // "description": "Returns permissions that a caller has on the specified Project. For additional information about `resource` (e.g. my-project-id) structure and identification, see [Resource Names](https://cloud.google.com/apis/design/resource_names). There are no permissions required for making this API call.", // "flatPath": "v1/projects/{resource}:testIamPermissions", // "httpMethod": "POST", // "id": "cloudresourcemanager.projects.testIamPermissions", @@ -8261,7 +7563,7 @@ func (c *ProjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "required": true, // "type": "string" @@ -8293,15 +7595,11 @@ type ProjectsUndeleteCall struct { header_ http.Header } -// Undelete: Restores the Project identified by the -// specified -// `project_id` (for example, `my-project-123`). -// You can only use this method for a Project that has a lifecycle state -// of -// DELETE_REQUESTED. -// After deletion starts, the Project cannot be restored. -// -// The caller must have modify permissions for this Project. +// Undelete: Restores the Project identified by the specified +// `project_id` (for example, `my-project-123`). You can only use this +// method for a Project that has a lifecycle state of DELETE_REQUESTED. +// After deletion starts, the Project cannot be restored. The caller +// must have undelete permissions for this Project. func (r *ProjectsService) Undelete(projectId string, undeleteprojectrequest *UndeleteProjectRequest) *ProjectsUndeleteCall { c := &ProjectsUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -8336,7 +7634,7 @@ func (c *ProjectsUndeleteCall) Header() http.Header { func (c *ProjectsUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8400,7 +7698,7 @@ func (c *ProjectsUndeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) } return ret, nil // { - // "description": "Restores the Project identified by the specified\n`project_id` (for example, `my-project-123`).\nYou can only use this method for a Project that has a lifecycle state of\nDELETE_REQUESTED.\nAfter deletion starts, the Project cannot be restored.\n\nThe caller must have modify permissions for this Project.", + // "description": "Restores the Project identified by the specified `project_id` (for example, `my-project-123`). You can only use this method for a Project that has a lifecycle state of DELETE_REQUESTED. After deletion starts, the Project cannot be restored. The caller must have undelete permissions for this Project.", // "flatPath": "v1/projects/{projectId}:undelete", // "httpMethod": "POST", // "id": "cloudresourcemanager.projects.undelete", @@ -8409,7 +7707,7 @@ func (c *ProjectsUndeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) // ], // "parameters": { // "projectId": { - // "description": "The project ID (for example, `foo-bar-123`).\n\nRequired.", + // "description": "Required. The project ID (for example, `foo-bar-123`).", // "location": "path", // "required": true, // "type": "string" @@ -8441,10 +7739,8 @@ type ProjectsUpdateCall struct { } // Update: Updates the attributes of the Project identified by the -// specified -// `project_id` (for example, `my-project-123`). -// -// The caller must have modify permissions for this Project. +// specified `project_id` (for example, `my-project-123`). The caller +// must have modify permissions for this Project. func (r *ProjectsService) Update(projectId string, project *Project) *ProjectsUpdateCall { c := &ProjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -8479,7 +7775,7 @@ func (c *ProjectsUpdateCall) Header() http.Header { func (c *ProjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8543,7 +7839,7 @@ func (c *ProjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Project, error) } return ret, nil // { - // "description": "Updates the attributes of the Project identified by the specified\n`project_id` (for example, `my-project-123`).\n\nThe caller must have modify permissions for this Project.", + // "description": "Updates the attributes of the Project identified by the specified `project_id` (for example, `my-project-123`). The caller must have modify permissions for this Project.", // "flatPath": "v1/projects/{projectId}", // "httpMethod": "PUT", // "id": "cloudresourcemanager.projects.update", @@ -8552,7 +7848,7 @@ func (c *ProjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Project, error) // ], // "parameters": { // "projectId": { - // "description": "The project ID (for example, `my-project-123`).\n\nRequired.", + // "description": "The project ID (for example, `my-project-123`). Required.", // "location": "path", // "required": true, // "type": "string" diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v2beta1/cloudresourcemanager-api.json b/vendor/google.golang.org/api/cloudresourcemanager/v2beta1/cloudresourcemanager-api.json index a2aeec242b8..3808463a224 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v2beta1/cloudresourcemanager-api.json +++ b/vendor/google.golang.org/api/cloudresourcemanager/v2beta1/cloudresourcemanager-api.json @@ -111,14 +111,14 @@ "folders": { "methods": { "create": { - "description": "Creates a Folder in the resource hierarchy.\nReturns an Operation which can be used to track the progress of the\nfolder creation workflow.\nUpon success the Operation.response field will be populated with the\ncreated Folder.\n\nIn order to succeed, the addition of this new Folder must not violate\nthe Folder naming, height or fanout constraints.\n\n+ The Folder's display_name must be distinct from all other Folder's that\nshare its parent.\n+ The addition of the Folder must not cause the active Folder hierarchy\nto exceed a height of 4. Note, the full active + deleted Folder hierarchy\nis allowed to reach a height of 8; this provides additional headroom when\nmoving folders that contain deleted folders.\n+ The addition of the Folder must not cause the total number of Folders\nunder its parent to exceed 100.\n\nIf the operation fails due to a folder constraint violation, some errors\nmay be returned by the CreateFolder request, with status code\nFAILED_PRECONDITION and an error description. Other folder constraint\nviolations will be communicated in the Operation, with the specific\nPreconditionFailure returned via the details list in the Operation.error\nfield.\n\nThe caller must have `resourcemanager.folders.create` permission on the\nidentified parent.", + "description": "Creates a Folder in the resource hierarchy. Returns an Operation which can be used to track the progress of the folder creation workflow. Upon success the Operation.response field will be populated with the created Folder. In order to succeed, the addition of this new Folder must not violate the Folder naming, height or fanout constraints. + The Folder's display_name must be distinct from all other Folder's that share its parent. + The addition of the Folder must not cause the active Folder hierarchy to exceed a height of 4. Note, the full active + deleted Folder hierarchy is allowed to reach a height of 8; this provides additional headroom when moving folders that contain deleted folders. + The addition of the Folder must not cause the total number of Folders under its parent to exceed 100. If the operation fails due to a folder constraint violation, some errors may be returned by the CreateFolder request, with status code FAILED_PRECONDITION and an error description. Other folder constraint violations will be communicated in the Operation, with the specific PreconditionFailure returned via the details list in the Operation.error field. The caller must have `resourcemanager.folders.create` permission on the identified parent.", "flatPath": "v2/folders", "httpMethod": "POST", "id": "cloudresourcemanager.folders.create", "parameterOrder": [], "parameters": { "parent": { - "description": "Required. The resource name of the new Folder's parent.\nMust be of the form `folders/{folder_id}` or `organizations/{org_id}`.", + "description": "Required. The resource name of the new Folder's parent. Must be of the form `folders/{folder_id}` or `organizations/{org_id}`.", "location": "query", "type": "string" } @@ -135,7 +135,7 @@ ] }, "delete": { - "description": "Requests deletion of a Folder. The Folder is moved into the\nDELETE_REQUESTED state\nimmediately, and is deleted approximately 30 days later. This method may\nonly be called on an empty Folder in the\nACTIVE state, where a Folder is empty if\nit doesn't contain any Folders or Projects in the\nACTIVE state.\nThe caller must have `resourcemanager.folders.delete` permission on the\nidentified folder.", + "description": "Requests deletion of a Folder. The Folder is moved into the DELETE_REQUESTED state immediately, and is deleted approximately 30 days later. This method may only be called on an empty Folder in the ACTIVE state, where a Folder is empty if it doesn't contain any Folders or Projects in the ACTIVE state. The caller must have `resourcemanager.folders.delete` permission on the identified folder.", "flatPath": "v2/folders/{foldersId}", "httpMethod": "DELETE", "id": "cloudresourcemanager.folders.delete", @@ -144,7 +144,7 @@ ], "parameters": { "name": { - "description": "Required. the resource name of the Folder to be deleted.\nMust be of the form `folders/{folder_id}`.", + "description": "Required. the resource name of the Folder to be deleted. Must be of the form `folders/{folder_id}`.", "location": "path", "pattern": "^folders/[^/]+$", "required": true, @@ -160,7 +160,7 @@ ] }, "get": { - "description": "Retrieves a Folder identified by the supplied resource name.\nValid Folder resource names have the format `folders/{folder_id}`\n(for example, `folders/1234`).\nThe caller must have `resourcemanager.folders.get` permission on the\nidentified folder.", + "description": "Retrieves a Folder identified by the supplied resource name. Valid Folder resource names have the format `folders/{folder_id}` (for example, `folders/1234`). The caller must have `resourcemanager.folders.get` permission on the identified folder.", "flatPath": "v2/folders/{foldersId}", "httpMethod": "GET", "id": "cloudresourcemanager.folders.get", @@ -169,7 +169,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the Folder to retrieve.\nMust be of the form `folders/{folder_id}`.", + "description": "Required. The resource name of the Folder to retrieve. Must be of the form `folders/{folder_id}`.", "location": "path", "pattern": "^folders/[^/]+$", "required": true, @@ -186,7 +186,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a Folder. The returned policy may be\nempty if no such policy or resource exists. The `resource` field should\nbe the Folder's resource name, e.g. \"folders/1234\".\nThe caller must have `resourcemanager.folders.getIamPolicy` permission\non the identified folder.", + "description": "Gets the access control policy for a Folder. The returned policy may be empty if no such policy or resource exists. The `resource` field should be the Folder's resource name, e.g. \"folders/1234\". The caller must have `resourcemanager.folders.getIamPolicy` permission on the identified folder.", "flatPath": "v2/folders/{foldersId}:getIamPolicy", "httpMethod": "POST", "id": "cloudresourcemanager.folders.getIamPolicy", @@ -195,7 +195,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^folders/[^/]+$", "required": true, @@ -215,7 +215,7 @@ ] }, "list": { - "description": "Lists the Folders that are direct descendants of supplied parent resource.\nList provides a strongly consistent view of the Folders underneath\nthe specified parent resource.\nList returns Folders sorted based upon the (ascending) lexical ordering\nof their display_name.\nThe caller must have `resourcemanager.folders.list` permission on the\nidentified parent.", + "description": "Lists the Folders that are direct descendants of supplied parent resource. List provides a strongly consistent view of the Folders underneath the specified parent resource. List returns Folders sorted based upon the (ascending) lexical ordering of their display_name. The caller must have `resourcemanager.folders.list` permission on the identified parent.", "flatPath": "v2/folders", "httpMethod": "GET", "id": "cloudresourcemanager.folders.list", @@ -228,17 +228,17 @@ "type": "integer" }, "pageToken": { - "description": "Optional. A pagination token returned from a previous call to `ListFolders`\nthat indicates where this listing should continue from.", + "description": "Optional. A pagination token returned from a previous call to `ListFolders` that indicates where this listing should continue from.", "location": "query", "type": "string" }, "parent": { - "description": "Required. The resource name of the Organization or Folder whose Folders are\nbeing listed.\nMust be of the form `folders/{folder_id}` or `organizations/{org_id}`.\nAccess to this method is controlled by checking the\n`resourcemanager.folders.list` permission on the `parent`.", + "description": "Required. The resource name of the Organization or Folder whose Folders are being listed. Must be of the form `folders/{folder_id}` or `organizations/{org_id}`. Access to this method is controlled by checking the `resourcemanager.folders.list` permission on the `parent`.", "location": "query", "type": "string" }, "showDeleted": { - "description": "Optional. Controls whether Folders in the\nDELETE_REQUESTED\nstate should be returned. Defaults to false.", + "description": "Optional. Controls whether Folders in the DELETE_REQUESTED state should be returned. Defaults to false.", "location": "query", "type": "boolean" } @@ -253,7 +253,7 @@ ] }, "move": { - "description": "Moves a Folder under a new resource parent.\nReturns an Operation which can be used to track the progress of the\nfolder move workflow.\nUpon success the Operation.response field will be populated with the\nmoved Folder.\nUpon failure, a FolderOperationError categorizing the failure cause will\nbe returned - if the failure occurs synchronously then the\nFolderOperationError will be returned via the Status.details field\nand if it occurs asynchronously then the FolderOperation will be returned\nvia the Operation.error field.\nIn addition, the Operation.metadata field will be populated with a\nFolderOperation message as an aid to stateless clients.\nFolder moves will be rejected if they violate either the naming, height\nor fanout constraints described in the\nCreateFolder documentation.\nThe caller must have `resourcemanager.folders.move` permission on the\nfolder's current and proposed new parent.", + "description": "Moves a Folder under a new resource parent. Returns an Operation which can be used to track the progress of the folder move workflow. Upon success the Operation.response field will be populated with the moved Folder. Upon failure, a FolderOperationError categorizing the failure cause will be returned - if the failure occurs synchronously then the FolderOperationError will be returned via the Status.details field and if it occurs asynchronously then the FolderOperation will be returned via the Operation.error field. In addition, the Operation.metadata field will be populated with a FolderOperation message as an aid to stateless clients. Folder moves will be rejected if they violate either the naming, height or fanout constraints described in the CreateFolder documentation. The caller must have `resourcemanager.folders.move` permission on the folder's current and proposed new parent.", "flatPath": "v2/folders/{foldersId}:move", "httpMethod": "POST", "id": "cloudresourcemanager.folders.move", @@ -262,7 +262,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the Folder to move.\nMust be of the form folders/{folder_id}", + "description": "Required. The resource name of the Folder to move. Must be of the form folders/{folder_id}", "location": "path", "pattern": "^folders/[^/]+$", "required": true, @@ -281,7 +281,7 @@ ] }, "patch": { - "description": "Updates a Folder, changing its display_name.\nChanges to the folder display_name will be rejected if they violate either\nthe display_name formatting rules or naming constraints described in\nthe CreateFolder documentation.\n\nThe Folder's display name must start and end with a letter or digit,\nmay contain letters, digits, spaces, hyphens and underscores and can be\nno longer than 30 characters. This is captured by the regular expression:\n[\\p{L}\\p{N}]([\\p{L}\\p{N}_- ]{0,28}[\\p{L}\\p{N}])?.\nThe caller must have `resourcemanager.folders.update` permission on the\nidentified folder.\n\nIf the update fails due to the unique name constraint then a\nPreconditionFailure explaining this violation will be returned\nin the Status.details field.", + "description": "Updates a Folder, changing its display_name. Changes to the folder display_name will be rejected if they violate either the display_name formatting rules or naming constraints described in the CreateFolder documentation. The Folder's display name must start and end with a letter or digit, may contain letters, digits, spaces, hyphens and underscores and can be no longer than 30 characters. This is captured by the regular expression: [\\p{L}\\p{N}]([\\p{L}\\p{N}_- ]{0,28}[\\p{L}\\p{N}])?. The caller must have `resourcemanager.folders.update` permission on the identified folder. If the update fails due to the unique name constraint then a PreconditionFailure explaining this violation will be returned in the Status.details field.", "flatPath": "v2/folders/{foldersId}", "httpMethod": "PATCH", "id": "cloudresourcemanager.folders.patch", @@ -290,14 +290,14 @@ ], "parameters": { "name": { - "description": "Output only. The resource name of the Folder.\nIts format is `folders/{folder_id}`, for example: \"folders/1234\".", + "description": "Output only. The resource name of the Folder. Its format is `folders/{folder_id}`, for example: \"folders/1234\".", "location": "path", "pattern": "^folders/[^/]+$", "required": true, "type": "string" }, "updateMask": { - "description": "Required. Fields to be updated.\nOnly the `display_name` can be updated.", + "description": "Required. Fields to be updated. Only the `display_name` can be updated.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -315,7 +315,7 @@ ] }, "search": { - "description": "Search for folders that match specific filter criteria.\nSearch provides an eventually consistent view of the folders a user has\naccess to which meet the specified filter criteria.\n\nThis will only return folders on which the caller has the\npermission `resourcemanager.folders.get`.", + "description": "Search for folders that match specific filter criteria. Search provides an eventually consistent view of the folders a user has access to which meet the specified filter criteria. This will only return folders on which the caller has the permission `resourcemanager.folders.get`.", "flatPath": "v2/folders:search", "httpMethod": "POST", "id": "cloudresourcemanager.folders.search", @@ -334,7 +334,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on a Folder, replacing any existing policy.\nThe `resource` field should be the Folder's resource name, e.g.\n\"folders/1234\".\nThe caller must have `resourcemanager.folders.setIamPolicy` permission\non the identified folder.", + "description": "Sets the access control policy on a Folder, replacing any existing policy. The `resource` field should be the Folder's resource name, e.g. \"folders/1234\". The caller must have `resourcemanager.folders.setIamPolicy` permission on the identified folder.", "flatPath": "v2/folders/{foldersId}:setIamPolicy", "httpMethod": "POST", "id": "cloudresourcemanager.folders.setIamPolicy", @@ -343,7 +343,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^folders/[^/]+$", "required": true, @@ -362,7 +362,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified Folder.\nThe `resource` field should be the Folder's resource name,\ne.g. \"folders/1234\".\n\nThere are no permissions required for making this API call.", + "description": "Returns permissions that a caller has on the specified Folder. The `resource` field should be the Folder's resource name, e.g. \"folders/1234\". There are no permissions required for making this API call.", "flatPath": "v2/folders/{foldersId}:testIamPermissions", "httpMethod": "POST", "id": "cloudresourcemanager.folders.testIamPermissions", @@ -371,7 +371,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^folders/[^/]+$", "required": true, @@ -390,7 +390,7 @@ ] }, "undelete": { - "description": "Cancels the deletion request for a Folder. This method may only be\ncalled on a Folder in the\nDELETE_REQUESTED state.\nIn order to succeed, the Folder's parent must be in the\nACTIVE state.\nIn addition, reintroducing the folder into the tree must not violate\nfolder naming, height and fanout constraints described in the\nCreateFolder documentation.\nThe caller must have `resourcemanager.folders.undelete` permission on the\nidentified folder.", + "description": "Cancels the deletion request for a Folder. This method may only be called on a Folder in the DELETE_REQUESTED state. In order to succeed, the Folder's parent must be in the ACTIVE state. In addition, reintroducing the folder into the tree must not violate folder naming, height and fanout constraints described in the CreateFolder documentation. The caller must have `resourcemanager.folders.undelete` permission on the identified folder.", "flatPath": "v2/folders/{foldersId}:undelete", "httpMethod": "POST", "id": "cloudresourcemanager.folders.undelete", @@ -399,7 +399,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the Folder to undelete.\nMust be of the form `folders/{folder_id}`.", + "description": "Required. The resource name of the Folder to undelete. Must be of the form `folders/{folder_id}`.", "location": "path", "pattern": "^folders/[^/]+$", "required": true, @@ -422,7 +422,7 @@ "operations": { "methods": { "get": { - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", "flatPath": "v1/operations/{operationsId}", "httpMethod": "GET", "id": "cloudresourcemanager.operations.get", @@ -450,11 +450,11 @@ } } }, - "revision": "20200504", + "revision": "20200907", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "AuditConfig": { - "description": "Specifies the audit configuration for a service.\nThe configuration determines which permission types are logged, and what\nidentities, if any, are exempted from logging.\nAn AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service,\nthe union of the two AuditConfigs is used for that service: the log_types\nspecified in each AuditConfig are enabled, and the exempted_members in each\nAuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n {\n \"audit_configs\": [\n {\n \"service\": \"allServices\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n },\n {\n \"log_type\": \"ADMIN_READ\",\n }\n ]\n },\n {\n \"service\": \"sampleservice.googleapis.com\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n },\n {\n \"log_type\": \"DATA_WRITE\",\n \"exempted_members\": [\n \"user:aliya@example.com\"\n ]\n }\n ]\n }\n ]\n }\n\nFor sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ\nlogging. It also exempts jose@example.com from DATA_READ logging, and\naliya@example.com from DATA_WRITE logging.", + "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { \"audit_configs\": [ { \"service\": \"allServices\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" }, { \"log_type\": \"ADMIN_READ\" } ] }, { \"service\": \"sampleservice.googleapis.com\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\" }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging.", "id": "AuditConfig", "properties": { "auditLogConfigs": { @@ -465,18 +465,18 @@ "type": "array" }, "service": { - "description": "Specifies a service that will be enabled for audit logging.\nFor example, `storage.googleapis.com`, `cloudsql.googleapis.com`.\n`allServices` is a special value that covers all services.", + "description": "Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.", "type": "string" } }, "type": "object" }, "AuditLogConfig": { - "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\njose@example.com from DATA_READ logging.", + "description": "Provides the configuration for logging a type of permissions. Example: { \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.", "id": "AuditLogConfig", "properties": { "exemptedMembers": { - "description": "Specifies the identities that do not cause logging for this type of\npermission.\nFollows the same format of Binding.members.", + "description": "Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.", "items": { "type": "string" }, @@ -505,62 +505,67 @@ "description": "Associates `members` with a `role`.", "id": "Binding", "properties": { + "bindingId": { + "description": "A client-specified ID for this binding. Expected to be globally unique to support the internal bindings-by-ID API.", + "type": "string" + }, "condition": { "$ref": "Expr", - "description": "The condition that is associated with this binding.\n\nIf the condition evaluates to `true`, then this binding applies to the\ncurrent request.\n\nIf the condition evaluates to `false`, then this binding does not apply to\nthe current request. However, a different role binding might grant the same\nrole to one or more of the members in this binding.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies)." + "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the members in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." }, "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@example.com` .\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a user that has been recently deleted. For\n example, `alice@example.com?uid=123456789012345678901`. If the user is\n recovered, this value reverts to `user:{emailid}` and the recovered user\n retains the role in the binding.\n\n* `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus\n unique identifier) representing a service account that has been recently\n deleted. For example,\n `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.\n If the service account is undeleted, this value reverts to\n `serviceAccount:{emailid}` and the undeleted service account retains the\n role in the binding.\n\n* `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a Google group that has been recently\n deleted. For example, `admins@example.com?uid=123456789012345678901`. If\n the group is recovered, this value reverts to `group:{emailid}` and the\n recovered group retains the role in the binding.\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", "items": { "type": "string" }, "type": "array" }, "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.", + "description": "Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.", "type": "string" } }, "type": "object" }, "Expr": { - "description": "Represents a textual expression in the Common Expression Language (CEL)\nsyntax. CEL is a C-like expression language. The syntax and semantics of CEL\nare documented at https://github.com/google/cel-spec.\n\nExample (Comparison):\n\n title: \"Summary size limit\"\n description: \"Determines if a summary is less than 100 chars\"\n expression: \"document.summary.size() \u003c 100\"\n\nExample (Equality):\n\n title: \"Requestor is owner\"\n description: \"Determines if requestor is the document owner\"\n expression: \"document.owner == request.auth.claims.email\"\n\nExample (Logic):\n\n title: \"Public documents\"\n description: \"Determine whether the document should be publicly visible\"\n expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\"\n\nExample (Data Manipulation):\n\n title: \"Notification string\"\n description: \"Create a notification string with a timestamp.\"\n expression: \"'New message received at ' + string(document.create_time)\"\n\nThe exact variables and functions that may be referenced within an expression\nare determined by the service that evaluates it. See the service\ndocumentation for additional information.", + "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() \u003c 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", "id": "Expr", "properties": { "description": { - "description": "Optional. Description of the expression. This is a longer text which\ndescribes the expression, e.g. when hovered over it in a UI.", + "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", "type": "string" }, "expression": { - "description": "Textual representation of an expression in Common Expression Language\nsyntax.", + "description": "Textual representation of an expression in Common Expression Language syntax.", "type": "string" }, "location": { - "description": "Optional. String indicating the location of the expression for error\nreporting, e.g. a file name and a position in the file.", + "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", "type": "string" }, "title": { - "description": "Optional. Title for the expression, i.e. a short string describing\nits purpose. This can be used e.g. in UIs which allow to enter the\nexpression.", + "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", "type": "string" } }, "type": "object" }, "Folder": { - "description": "A Folder in an Organization's resource hierarchy, used to\norganize that Organization's resources.", + "description": "A Folder in an Organization's resource hierarchy, used to organize that Organization's resources.", "id": "Folder", "properties": { "createTime": { "description": "Output only. Timestamp when the Folder was created. Assigned by the server.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "displayName": { - "description": "The folder’s display name.\nA folder’s display name must be unique amongst its siblings, e.g.\nno two folders with the same parent can share the same display name.\nThe display name must start and end with a letter or digit, may contain\nletters, digits, spaces, hyphens and underscores and can be no longer\nthan 30 characters. This is captured by the regular expression:\n[\\p{L}\\p{N}]([\\p{L}\\p{N}_- ]{0,28}[\\p{L}\\p{N}])?.", + "description": "The folder’s display name. A folder’s display name must be unique amongst its siblings, e.g. no two folders with the same parent can share the same display name. The display name must start and end with a letter or digit, may contain letters, digits, spaces, hyphens and underscores and can be no longer than 30 characters. This is captured by the regular expression: [\\p{L}\\p{N}]([\\p{L}\\p{N}_- ]{0,28}[\\p{L}\\p{N}])?.", "type": "string" }, "lifecycleState": { - "description": "Output only. The lifecycle state of the folder.\nUpdates to the lifecycle_state must be performed via\nDeleteFolder and\nUndeleteFolder.", + "description": "Output only. The lifecycle state of the folder. Updates to the lifecycle_state must be performed via DeleteFolder and UndeleteFolder.", "enum": [ "LIFECYCLE_STATE_UNSPECIFIED", "ACTIVE", @@ -571,14 +576,16 @@ "The normal and active state.", "The folder has been marked for deletion by the user." ], + "readOnly": true, "type": "string" }, "name": { - "description": "Output only. The resource name of the Folder.\nIts format is `folders/{folder_id}`, for example: \"folders/1234\".", + "description": "Output only. The resource name of the Folder. Its format is `folders/{folder_id}`, for example: \"folders/1234\".", + "readOnly": true, "type": "string" }, "parent": { - "description": "Required. The Folder’s parent's resource name.\nUpdates to the folder's parent must be performed via\nMoveFolder.", + "description": "Required. The Folder’s parent's resource name. Updates to the folder's parent must be performed via MoveFolder.", "type": "string" } }, @@ -589,7 +596,7 @@ "id": "FolderOperation", "properties": { "destinationParent": { - "description": "The resource name of the folder or organization we are either creating\nthe folder under or moving the folder to.", + "description": "The resource name of the folder or organization we are either creating the folder under or moving the folder to.", "type": "string" }, "displayName": { @@ -611,7 +618,7 @@ "type": "string" }, "sourceParent": { - "description": "The resource name of the folder's parent.\nOnly applicable when the operation_type is MOVE.", + "description": "The resource name of the folder's parent. Only applicable when the operation_type is MOVE.", "type": "string" } }, @@ -639,13 +646,13 @@ "The error type was unrecognized or unspecified.", "The attempted action would violate the max folder depth constraint.", "The attempted action would violate the max child folders constraint.", - "The attempted action would violate the locally-unique folder\ndisplay_name constraint.", + "The attempted action would violate the locally-unique folder display_name constraint.", "The resource being moved has been deleted.", "The resource a folder was being added to has been deleted.", "The attempted action would introduce cycle in resource path.", "The attempted action would move a folder that is already being moved.", "The folder the caller is trying to delete contains active resources.", - "The attempted action would violate the max deleted folder depth\nconstraint." + "The attempted action would violate the max deleted folder depth constraint." ], "type": "string" } @@ -658,7 +665,7 @@ "properties": { "options": { "$ref": "GetPolicyOptions", - "description": "OPTIONAL: A `GetPolicyOptions` object for specifying options to\n`GetIamPolicy`." + "description": "OPTIONAL: A `GetPolicyOptions` object for specifying options to `GetIamPolicy`." } }, "type": "object" @@ -668,7 +675,7 @@ "id": "GetPolicyOptions", "properties": { "requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } @@ -680,14 +687,14 @@ "id": "ListFoldersResponse", "properties": { "folders": { - "description": "A possibly paginated list of Folders that are direct descendants of\nthe specified parent resource.", + "description": "A possibly paginated list of Folders that are direct descendants of the specified parent resource.", "items": { "$ref": "Folder" }, "type": "array" }, "nextPageToken": { - "description": "A pagination token returned from a previous call to `ListFolders`\nthat indicates from where listing should continue.", + "description": "A pagination token returned from a previous call to `ListFolders` that indicates from where listing should continue.", "type": "string" } }, @@ -698,18 +705,18 @@ "id": "MoveFolderRequest", "properties": { "destinationParent": { - "description": "Required. The resource name of the Folder or Organization to reparent\nthe folder under.\nMust be of the form `folders/{folder_id}` or `organizations/{org_id}`.", + "description": "Required. The resource name of the Folder or Organization to reparent the folder under. Must be of the form `folders/{folder_id}` or `organizations/{org_id}`.", "type": "string" } }, "type": "object" }, "Operation": { - "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "description": "This resource represents a long-running operation that is the result of a network API call.", "id": "Operation", "properties": { "done": { - "description": "If the value is `false`, it means the operation is still in progress.\nIf `true`, the operation is completed, and either `error` or `response` is\navailable.", + "description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.", "type": "boolean" }, "error": { @@ -721,11 +728,11 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", "type": "object" }, "name": { - "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should be a resource name ending with `operations/{unique_id}`.", + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.", "type": "string" }, "response": { @@ -733,14 +740,14 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", + "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", "type": "object" } }, "type": "object" }, "Policy": { - "description": "An Identity and Access Management (IAM) policy, which specifies access\ncontrols for Google Cloud resources.\n\n\nA `Policy` is a collection of `bindings`. A `binding` binds one or more\n`members` to a single `role`. Members can be user accounts, service accounts,\nGoogle groups, and domains (such as G Suite). A `role` is a named list of\npermissions; each `role` can be an IAM predefined role or a user-created\ncustom role.\n\nFor some types of Google Cloud resources, a `binding` can also specify a\n`condition`, which is a logical expression that allows access to a resource\nonly if the expression evaluates to `true`. A condition can add constraints\nbased on attributes of the request, the resource, or both. To learn which\nresources support conditions in their IAM policies, see the\n[IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).\n\n**JSON example:**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/resourcemanager.organizationAdmin\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-project-id@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles/resourcemanager.organizationViewer\",\n \"members\": [\n \"user:eve@example.com\"\n ],\n \"condition\": {\n \"title\": \"expirable access\",\n \"description\": \"Does not grant access after Sep 2020\",\n \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\",\n }\n }\n ],\n \"etag\": \"BwWWja0YfJA=\",\n \"version\": 3\n }\n\n**YAML example:**\n\n bindings:\n - members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-project-id@appspot.gserviceaccount.com\n role: roles/resourcemanager.organizationAdmin\n - members:\n - user:eve@example.com\n role: roles/resourcemanager.organizationViewer\n condition:\n title: expirable access\n description: Does not grant access after Sep 2020\n expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\n - etag: BwWWja0YfJA=\n - version: 3\n\nFor a description of IAM and its features, see the\n[IAM documentation](https://cloud.google.com/iam/docs/).", + "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } **YAML example:** bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", "id": "Policy", "properties": { "auditConfigs": { @@ -751,19 +758,19 @@ "type": "array" }, "bindings": { - "description": "Associates a list of `members` to a `role`. Optionally, may specify a\n`condition` that determines how and when the `bindings` are applied. Each\nof the `bindings` must contain at least one member.", + "description": "Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member.", "items": { "$ref": "Binding" }, "type": "array" }, "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.", + "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.", "format": "byte", "type": "string" }, "version": { - "description": "Specifies the format of the policy.\n\nValid values are `0`, `1`, and `3`. Requests that specify an invalid value\nare rejected.\n\nAny operation that affects conditional role bindings must specify version\n`3`. This requirement applies to the following operations:\n\n* Getting a policy that includes a conditional role binding\n* Adding a conditional role binding to a policy\n* Changing a conditional role binding in a policy\n* Removing any role binding, with or without a condition, from a policy\n that includes conditions\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.\n\nIf a policy does not include any conditions, operations on that policy may\nspecify any valid version or leave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } @@ -771,7 +778,7 @@ "type": "object" }, "ProjectCreationStatus": { - "description": "A status object which is used as the `metadata` field for the Operation\nreturned by CreateProject. It provides insight for when significant phases of\nProject creation have completed.", + "description": "A status object which is used as the `metadata` field for the Operation returned by CreateProject. It provides insight for when significant phases of Project creation have completed.", "id": "ProjectCreationStatus", "properties": { "createTime": { @@ -780,7 +787,7 @@ "type": "string" }, "gettable": { - "description": "True if the project can be retrieved using GetProject. No other operations\non the project are guaranteed to work until the project creation is\ncomplete.", + "description": "True if the project can be retrieved using GetProject. No other operations on the project are guaranteed to work until the project creation is complete.", "type": "boolean" }, "ready": { @@ -800,11 +807,11 @@ "type": "integer" }, "pageToken": { - "description": "Optional. A pagination token returned from a previous call to `SearchFolders`\nthat indicates from where search should continue.", + "description": "Optional. A pagination token returned from a previous call to `SearchFolders` that indicates from where search should continue.", "type": "string" }, "query": { - "description": "Search criteria used to select the Folders to return.\nIf no search criteria is specified then all accessible folders will be\nreturned.\n\nQuery expressions can be used to restrict results based upon displayName,\nlifecycleState and parent, where the operators `=`, `NOT`, `AND` and `OR`\ncan be used along with the suffix wildcard symbol `*`.\n\nThe displayName field in a query expression should use escaped quotes\nfor values that include whitespace to prevent unexpected behavior.\n\nSome example queries are:\n\n* Query `displayName=Test*` returns Folder resources whose display name\nstarts with \"Test\".\n* Query `lifecycleState=ACTIVE` returns Folder resources with\n`lifecycleState` set to `ACTIVE`.\n* Query `parent=folders/123` returns Folder resources that have\n`folders/123` as a parent resource.\n* Query `parent=folders/123 AND lifecycleState=ACTIVE` returns active\nFolder resources that have `folders/123` as a parent resource.\n* Query `displayName=\\\\\"Test String\\\\\"` returns Folder resources with\ndisplay names that include both \"Test\" and \"String\".", + "description": "Search criteria used to select the Folders to return. If no search criteria is specified then all accessible folders will be returned. Query expressions can be used to restrict results based upon displayName, lifecycleState and parent, where the operators `=`, `NOT`, `AND` and `OR` can be used along with the suffix wildcard symbol `*`. The displayName field in a query expression should use escaped quotes for values that include whitespace to prevent unexpected behavior. Some example queries are: * Query `displayName=Test*` returns Folder resources whose display name starts with \"Test\". * Query `lifecycleState=ACTIVE` returns Folder resources with `lifecycleState` set to `ACTIVE`. * Query `parent=folders/123` returns Folder resources that have `folders/123` as a parent resource. * Query `parent=folders/123 AND lifecycleState=ACTIVE` returns active Folder resources that have `folders/123` as a parent resource. * Query `displayName=\\\\\"Test String\\\\\"` returns Folder resources with display names that include both \"Test\" and \"String\".", "type": "string" } }, @@ -815,14 +822,14 @@ "id": "SearchFoldersResponse", "properties": { "folders": { - "description": "A possibly paginated folder search results.\nthe specified parent resource.", + "description": "A possibly paginated folder search results. the specified parent resource.", "items": { "$ref": "Folder" }, "type": "array" }, "nextPageToken": { - "description": "A pagination token returned from a previous call to `SearchFolders`\nthat indicates from where searching should continue.", + "description": "A pagination token returned from a previous call to `SearchFolders` that indicates from where searching should continue.", "type": "string" } }, @@ -834,10 +841,10 @@ "properties": { "policy": { "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them." }, "updateMask": { - "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only\nthe fields in the mask will be modified. If no mask is provided, the\nfollowing default mask is used:\n\n`paths: \"bindings, etag\"`", + "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only the fields in the mask will be modified. If no mask is provided, the following default mask is used: `paths: \"bindings, etag\"`", "format": "google-fieldmask", "type": "string" } @@ -845,7 +852,7 @@ "type": "object" }, "Status": { - "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors).", + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", "id": "Status", "properties": { "code": { @@ -854,7 +861,7 @@ "type": "integer" }, "details": { - "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use.", + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", "items": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -865,7 +872,7 @@ "type": "array" }, "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", "type": "string" } }, @@ -876,7 +883,7 @@ "id": "TestIamPermissionsRequest", "properties": { "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "description": "The set of permissions to check for the `resource`. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", "items": { "type": "string" }, @@ -890,7 +897,7 @@ "id": "TestIamPermissionsResponse", "properties": { "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", "items": { "type": "string" }, diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v2beta1/cloudresourcemanager-gen.go b/vendor/google.golang.org/api/cloudresourcemanager/v2beta1/cloudresourcemanager-gen.go index 79469ec286b..744c8eefd5b 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v2beta1/cloudresourcemanager-gen.go +++ b/vendor/google.golang.org/api/cloudresourcemanager/v2beta1/cloudresourcemanager-gen.go @@ -79,6 +79,7 @@ const apiId = "cloudresourcemanager:v2beta1" const apiName = "cloudresourcemanager" const apiVersion = "v2beta1" const basePath = "https://cloudresourcemanager.googleapis.com/" +const mtlsBasePath = "https://cloudresourcemanager.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -98,6 +99,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -162,72 +164,31 @@ type OperationsService struct { s *Service } -// AuditConfig: Specifies the audit configuration for a service. -// The configuration determines which permission types are logged, and -// what -// identities, if any, are exempted from logging. -// An AuditConfig must have one or more AuditLogConfigs. -// -// If there are AuditConfigs for both `allServices` and a specific -// service, -// the union of the two AuditConfigs is used for that service: the -// log_types -// specified in each AuditConfig are enabled, and the exempted_members -// in each -// AuditLogConfig are exempted. -// -// Example Policy with multiple AuditConfigs: -// -// { -// "audit_configs": [ -// { -// "service": "allServices" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// }, -// { -// "log_type": "ADMIN_READ", -// } -// ] -// }, -// { -// "service": "sampleservice.googleapis.com" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// }, -// { -// "log_type": "DATA_WRITE", -// "exempted_members": [ -// "user:aliya@example.com" -// ] -// } -// ] -// } -// ] -// } -// -// For sampleservice, this policy enables DATA_READ, DATA_WRITE and -// ADMIN_READ -// logging. It also exempts jose@example.com from DATA_READ logging, -// and -// aliya@example.com from DATA_WRITE logging. +// AuditConfig: Specifies the audit configuration for a service. The +// configuration determines which permission types are logged, and what +// identities, if any, are exempted from logging. An AuditConfig must +// have one or more AuditLogConfigs. If there are AuditConfigs for both +// `allServices` and a specific service, the union of the two +// AuditConfigs is used for that service: the log_types specified in +// each AuditConfig are enabled, and the exempted_members in each +// AuditLogConfig are exempted. Example Policy with multiple +// AuditConfigs: { "audit_configs": [ { "service": "allServices", +// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": +// [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { +// "log_type": "ADMIN_READ" } ] }, { "service": +// "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": +// "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ +// "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy +// enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts +// jose@example.com from DATA_READ logging, and aliya@example.com from +// DATA_WRITE logging. type AuditConfig struct { // AuditLogConfigs: The configuration for logging of each type of // permission. AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"` - // Service: Specifies a service that will be enabled for audit - // logging. - // For example, `storage.googleapis.com`, - // `cloudsql.googleapis.com`. + // Service: Specifies a service that will be enabled for audit logging. + // For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. // `allServices` is a special value that covers all services. Service string `json:"service,omitempty"` @@ -256,31 +217,15 @@ func (s *AuditConfig) MarshalJSON() ([]byte, error) { } // AuditLogConfig: Provides the configuration for logging a type of -// permissions. -// Example: -// -// { -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// } -// ] -// } -// -// This enables 'DATA_READ' and 'DATA_WRITE' logging, while -// exempting -// jose@example.com from DATA_READ logging. +// permissions. Example: { "audit_log_configs": [ { "log_type": +// "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { +// "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and +// 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ +// logging. type AuditLogConfig struct { // ExemptedMembers: Specifies the identities that do not cause logging - // for this type of - // permission. - // Follows the same format of Binding.members. + // for this type of permission. Follows the same format of + // Binding.members. ExemptedMembers []string `json:"exemptedMembers,omitempty"` // LogType: The log type that this config enables. @@ -318,98 +263,60 @@ func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { // Binding: Associates `members` with a `role`. type Binding struct { - // Condition: The condition that is associated with this binding. - // - // If the condition evaluates to `true`, then this binding applies to - // the - // current request. - // - // If the condition evaluates to `false`, then this binding does not - // apply to - // the current request. However, a different role binding might grant - // the same - // role to one or more of the members in this binding. - // - // To learn which resources support conditions in their IAM policies, - // see - // the - // [IAM - // documentation](https://cloud.google.com/iam/help/conditions/r - // esource-policies). + // BindingId: A client-specified ID for this binding. Expected to be + // globally unique to support the internal bindings-by-ID API. + BindingId string `json:"bindingId,omitempty"` + + // Condition: The condition that is associated with this binding. If the + // condition evaluates to `true`, then this binding applies to the + // current request. If the condition evaluates to `false`, then this + // binding does not apply to the current request. However, a different + // role binding might grant the same role to one or more of the members + // in this binding. To learn which resources support conditions in their + // IAM policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). Condition *Expr `json:"condition,omitempty"` // Members: Specifies the identities requesting access for a Cloud - // Platform resource. - // `members` can have the following values: - // - // * `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. - // - // * `allAuthenticatedUsers`: A special identifier that represents - // anyone - // who is authenticated with a Google account or a service - // account. - // - // * `user:{emailid}`: An email address that represents a specific - // Google - // account. For example, `alice@example.com` . - // - // - // * `serviceAccount:{emailid}`: An email address that represents a - // service - // account. For example, - // `my-other-app@appspot.gserviceaccount.com`. - // - // * `group:{emailid}`: An email address that represents a Google - // group. - // For example, `admins@example.com`. - // - // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a user that has been recently deleted. - // For - // example, `alice@example.com?uid=123456789012345678901`. If the - // user is - // recovered, this value reverts to `user:{emailid}` and the - // recovered user - // retains the role in the binding. - // - // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address - // (plus - // unique identifier) representing a service account that has been - // recently - // deleted. For example, - // + // Platform resource. `members` can have the following values: * + // `allUsers`: A special identifier that represents anyone who is on the + // internet; with or without a Google account. * + // `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. * + // `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . * + // `serviceAccount:{emailid}`: An email address that represents a + // service account. For example, + // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An + // email address that represents a Google group. For example, + // `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An + // email address (plus unique identifier) representing a user that has + // been recently deleted. For example, + // `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered + // user retains the role in the binding. * + // `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address + // (plus unique identifier) representing a service account that has been + // recently deleted. For example, // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account - // retains the - // role in the binding. - // - // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a Google group that has been recently - // deleted. For example, - // `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` - // and the - // recovered group retains the role in the binding. - // - // - // * `domain:{domain}`: The G Suite domain (primary) that represents all - // the - // users of that domain. For example, `google.com` or - // `example.com`. - // - // + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains + // the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: + // An email address (plus unique identifier) representing a Google group + // that has been recently deleted. For example, + // `admins@example.com?uid=123456789012345678901`. If the group is + // recovered, this value reverts to `group:{emailid}` and the recovered + // group retains the role in the binding. * `domain:{domain}`: The G + // Suite domain (primary) that represents all the users of that domain. + // For example, `google.com` or `example.com`. Members []string `json:"members,omitempty"` - // Role: Role that is assigned to `members`. - // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + // Role: Role that is assigned to `members`. For example, + // `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `json:"role,omitempty"` - // ForceSendFields is a list of field names (e.g. "Condition") to + // ForceSendFields is a list of field names (e.g. "BindingId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -417,7 +324,7 @@ type Binding struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Condition") to include in + // NullFields is a list of field names (e.g. "BindingId") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -433,65 +340,40 @@ func (s *Binding) MarshalJSON() ([]byte, error) { } // Expr: Represents a textual expression in the Common Expression -// Language (CEL) -// syntax. CEL is a C-like expression language. The syntax and semantics -// of CEL -// are documented at https://github.com/google/cel-spec. -// -// Example (Comparison): -// -// title: "Summary size limit" -// description: "Determines if a summary is less than 100 chars" -// expression: "document.summary.size() < 100" -// -// Example (Equality): -// -// title: "Requestor is owner" -// description: "Determines if requestor is the document owner" -// expression: "document.owner == -// request.auth.claims.email" -// -// Example (Logic): -// -// title: "Public documents" -// description: "Determine whether the document should be publicly -// visible" -// expression: "document.type != 'private' && document.type != -// 'internal'" -// -// Example (Data Manipulation): -// -// title: "Notification string" -// description: "Create a notification string with a timestamp." -// expression: "'New message received at ' + -// string(document.create_time)" -// -// The exact variables and functions that may be referenced within an -// expression -// are determined by the service that evaluates it. See the -// service -// documentation for additional information. +// Language (CEL) syntax. CEL is a C-like expression language. The +// syntax and semantics of CEL are documented at +// https://github.com/google/cel-spec. Example (Comparison): title: +// "Summary size limit" description: "Determines if a summary is less +// than 100 chars" expression: "document.summary.size() < 100" Example +// (Equality): title: "Requestor is owner" description: "Determines if +// requestor is the document owner" expression: "document.owner == +// request.auth.claims.email" Example (Logic): title: "Public documents" +// description: "Determine whether the document should be publicly +// visible" expression: "document.type != 'private' && document.type != +// 'internal'" Example (Data Manipulation): title: "Notification string" +// description: "Create a notification string with a timestamp." +// expression: "'New message received at ' + +// string(document.create_time)" The exact variables and functions that +// may be referenced within an expression are determined by the service +// that evaluates it. See the service documentation for additional +// information. type Expr struct { // Description: Optional. Description of the expression. This is a - // longer text which - // describes the expression, e.g. when hovered over it in a UI. + // longer text which describes the expression, e.g. when hovered over it + // in a UI. Description string `json:"description,omitempty"` // Expression: Textual representation of an expression in Common - // Expression Language - // syntax. + // Expression Language syntax. Expression string `json:"expression,omitempty"` // Location: Optional. String indicating the location of the expression - // for error - // reporting, e.g. a file name and a position in the file. + // for error reporting, e.g. a file name and a position in the file. Location string `json:"location,omitempty"` // Title: Optional. Title for the expression, i.e. a short string - // describing - // its purpose. This can be used e.g. in UIs which allow to enter - // the - // expression. + // describing its purpose. This can be used e.g. in UIs which allow to + // enter the expression. Title string `json:"title,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -517,32 +399,24 @@ func (s *Expr) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Folder: A Folder in an Organization's resource hierarchy, used -// to +// Folder: A Folder in an Organization's resource hierarchy, used to // organize that Organization's resources. type Folder struct { // CreateTime: Output only. Timestamp when the Folder was created. // Assigned by the server. CreateTime string `json:"createTime,omitempty"` - // DisplayName: The folder’s display name. - // A folder’s display name must be unique amongst its siblings, - // e.g. - // no two folders with the same parent can share the same display - // name. - // The display name must start and end with a letter or digit, may - // contain - // letters, digits, spaces, hyphens and underscores and can be no - // longer - // than 30 characters. This is captured by the regular - // expression: + // DisplayName: The folder’s display name. A folder’s display name + // must be unique amongst its siblings, e.g. no two folders with the + // same parent can share the same display name. The display name must + // start and end with a letter or digit, may contain letters, digits, + // spaces, hyphens and underscores and can be no longer than 30 + // characters. This is captured by the regular expression: // [\p{L}\p{N}]([\p{L}\p{N}_- ]{0,28}[\p{L}\p{N}])?. DisplayName string `json:"displayName,omitempty"` - // LifecycleState: Output only. The lifecycle state of the - // folder. - // Updates to the lifecycle_state must be performed via - // DeleteFolder and + // LifecycleState: Output only. The lifecycle state of the folder. + // Updates to the lifecycle_state must be performed via DeleteFolder and // UndeleteFolder. // // Possible values: @@ -552,13 +426,12 @@ type Folder struct { // user. LifecycleState string `json:"lifecycleState,omitempty"` - // Name: Output only. The resource name of the Folder. - // Its format is `folders/{folder_id}`, for example: "folders/1234". + // Name: Output only. The resource name of the Folder. Its format is + // `folders/{folder_id}`, for example: "folders/1234". Name string `json:"name,omitempty"` - // Parent: Required. The Folder’s parent's resource name. - // Updates to the folder's parent must be performed via - // MoveFolder. + // Parent: Required. The Folder’s parent's resource name. Updates to + // the folder's parent must be performed via MoveFolder. Parent string `json:"parent,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -591,8 +464,7 @@ func (s *Folder) MarshalJSON() ([]byte, error) { // FolderOperation: Metadata describing a long running folder operation type FolderOperation struct { // DestinationParent: The resource name of the folder or organization we - // are either creating - // the folder under or moving the folder to. + // are either creating the folder under or moving the folder to. DestinationParent string `json:"destinationParent,omitempty"` // DisplayName: The display name of the folder. @@ -606,8 +478,8 @@ type FolderOperation struct { // "MOVE" - A move folder operation. OperationType string `json:"operationType,omitempty"` - // SourceParent: The resource name of the folder's parent. - // Only applicable when the operation_type is MOVE. + // SourceParent: The resource name of the folder's parent. Only + // applicable when the operation_type is MOVE. SourceParent string `json:"sourceParent,omitempty"` // ForceSendFields is a list of field names (e.g. "DestinationParent") @@ -646,8 +518,7 @@ type FolderOperationError struct { // "MAX_CHILD_FOLDERS_VIOLATION" - The attempted action would violate // the max child folders constraint. // "FOLDER_NAME_UNIQUENESS_VIOLATION" - The attempted action would - // violate the locally-unique folder - // display_name constraint. + // violate the locally-unique folder display_name constraint. // "RESOURCE_DELETED_VIOLATION" - The resource being moved has been // deleted. // "PARENT_DELETED_VIOLATION" - The resource a folder was being added @@ -659,8 +530,7 @@ type FolderOperationError struct { // "FOLDER_TO_DELETE_NON_EMPTY_VIOLATION" - The folder the caller is // trying to delete contains active resources. // "DELETED_FOLDER_HEIGHT_VIOLATION" - The attempted action would - // violate the max deleted folder depth - // constraint. + // violate the max deleted folder depth constraint. ErrorMessageId string `json:"errorMessageId,omitempty"` // ForceSendFields is a list of field names (e.g. "ErrorMessageId") to @@ -690,8 +560,7 @@ func (s *FolderOperationError) MarshalJSON() ([]byte, error) { // GetIamPolicyRequest: Request message for `GetIamPolicy` method. type GetIamPolicyRequest struct { // Options: OPTIONAL: A `GetPolicyOptions` object for specifying options - // to - // `GetIamPolicy`. + // to `GetIamPolicy`. Options *GetPolicyOptions `json:"options,omitempty"` // ForceSendFields is a list of field names (e.g. "Options") to @@ -720,24 +589,14 @@ func (s *GetIamPolicyRequest) MarshalJSON() ([]byte, error) { // GetPolicyOptions: Encapsulates settings provided to GetIamPolicy. type GetPolicyOptions struct { // RequestedPolicyVersion: Optional. The policy format version to be - // returned. - // - // Valid values are 0, 1, and 3. Requests specifying an invalid value - // will be - // rejected. - // - // Requests for policies with any conditional bindings must specify - // version 3. - // Policies without any conditional bindings may specify any valid value - // or - // leave the field unset. - // - // To learn which resources support conditions in their IAM policies, - // see - // the - // [IAM - // documentation](https://cloud.google.com/iam/help/conditions/r - // esource-policies). + // returned. Valid values are 0, 1, and 3. Requests specifying an + // invalid value will be rejected. Requests for policies with any + // conditional bindings must specify version 3. Policies without any + // conditional bindings may specify any valid value or leave the field + // unset. To learn which resources support conditions in their IAM + // policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). RequestedPolicyVersion int64 `json:"requestedPolicyVersion,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -768,13 +627,11 @@ func (s *GetPolicyOptions) MarshalJSON() ([]byte, error) { // ListFoldersResponse: The ListFolders response message. type ListFoldersResponse struct { // Folders: A possibly paginated list of Folders that are direct - // descendants of - // the specified parent resource. + // descendants of the specified parent resource. Folders []*Folder `json:"folders,omitempty"` // NextPageToken: A pagination token returned from a previous call to - // `ListFolders` - // that indicates from where listing should continue. + // `ListFolders` that indicates from where listing should continue. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -807,10 +664,8 @@ func (s *ListFoldersResponse) MarshalJSON() ([]byte, error) { // MoveFolderRequest: The MoveFolder request message. type MoveFolderRequest struct { // DestinationParent: Required. The resource name of the Folder or - // Organization to reparent - // the folder under. - // Must be of the form `folders/{folder_id}` or - // `organizations/{org_id}`. + // Organization to reparent the folder under. Must be of the form + // `folders/{folder_id}` or `organizations/{org_id}`. DestinationParent string `json:"destinationParent,omitempty"` // ForceSendFields is a list of field names (e.g. "DestinationParent") @@ -838,52 +693,38 @@ func (s *MoveFolderRequest) MarshalJSON() ([]byte, error) { } // Operation: This resource represents a long-running operation that is -// the result of a -// network API call. +// the result of a network API call. type Operation struct { // Done: If the value is `false`, it means the operation is still in - // progress. - // If `true`, the operation is completed, and either `error` or - // `response` is - // available. + // progress. If `true`, the operation is completed, and either `error` + // or `response` is available. Done bool `json:"done,omitempty"` // Error: The error result of the operation in case of failure or // cancellation. Error *Status `json:"error,omitempty"` - // Metadata: Service-specific metadata associated with the operation. - // It typically - // contains progress information and common metadata such as create - // time. - // Some services might not provide such metadata. Any method that - // returns a - // long-running operation should document the metadata type, if any. + // Metadata: Service-specific metadata associated with the operation. It + // typically contains progress information and common metadata such as + // create time. Some services might not provide such metadata. Any + // method that returns a long-running operation should document the + // metadata type, if any. Metadata googleapi.RawMessage `json:"metadata,omitempty"` // Name: The server-assigned name, which is only unique within the same - // service that - // originally returns it. If you use the default HTTP mapping, - // the - // `name` should be a resource name ending with + // service that originally returns it. If you use the default HTTP + // mapping, the `name` should be a resource name ending with // `operations/{unique_id}`. Name string `json:"name,omitempty"` - // Response: The normal response of the operation in case of success. - // If the original - // method returns no data on success, such as `Delete`, the response - // is - // `google.protobuf.Empty`. If the original method is - // standard - // `Get`/`Create`/`Update`, the response should be the resource. For - // other - // methods, the response should have the type `XxxResponse`, where - // `Xxx` - // is the original method name. For example, if the original method - // name - // is `TakeSnapshot()`, the inferred response type - // is - // `TakeSnapshotResponse`. + // Response: The normal response of the operation in case of success. If + // the original method returns no data on success, such as `Delete`, the + // response is `google.protobuf.Empty`. If the original method is + // standard `Get`/`Create`/`Update`, the response should be the + // resource. For other methods, the response should have the type + // `XxxResponse`, where `Xxx` is the original method name. For example, + // if the original method name is `TakeSnapshot()`, the inferred + // response type is `TakeSnapshotResponse`. Response googleapi.RawMessage `json:"response,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -914,154 +755,77 @@ func (s *Operation) MarshalJSON() ([]byte, error) { } // Policy: An Identity and Access Management (IAM) policy, which -// specifies access -// controls for Google Cloud resources. -// -// -// A `Policy` is a collection of `bindings`. A `binding` binds one or -// more -// `members` to a single `role`. Members can be user accounts, service -// accounts, +// specifies access controls for Google Cloud resources. A `Policy` is a +// collection of `bindings`. A `binding` binds one or more `members` to +// a single `role`. Members can be user accounts, service accounts, // Google groups, and domains (such as G Suite). A `role` is a named -// list of -// permissions; each `role` can be an IAM predefined role or a -// user-created -// custom role. -// -// For some types of Google Cloud resources, a `binding` can also -// specify a -// `condition`, which is a logical expression that allows access to a -// resource -// only if the expression evaluates to `true`. A condition can add -// constraints -// based on attributes of the request, the resource, or both. To learn -// which -// resources support conditions in their IAM policies, see the -// [IAM +// list of permissions; each `role` can be an IAM predefined role or a +// user-created custom role. For some types of Google Cloud resources, a +// `binding` can also specify a `condition`, which is a logical +// expression that allows access to a resource only if the expression +// evaluates to `true`. A condition can add constraints based on +// attributes of the request, the resource, or both. To learn which +// resources support conditions in their IAM policies, see the [IAM // documentation](https://cloud.google.com/iam/help/conditions/resource-p -// olicies). -// -// **JSON example:** -// -// { -// "bindings": [ -// { -// "role": "roles/resourcemanager.organizationAdmin", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" -// ] -// }, -// { -// "role": "roles/resourcemanager.organizationViewer", -// "members": [ -// "user:eve@example.com" -// ], -// "condition": { -// "title": "expirable access", -// "description": "Does not grant access after Sep 2020", -// "expression": "request.time < -// timestamp('2020-10-01T00:00:00.000Z')", -// } -// } -// ], -// "etag": "BwWWja0YfJA=", -// "version": 3 -// } -// -// **YAML example:** -// -// bindings: -// - members: -// - user:mike@example.com -// - group:admins@example.com -// - domain:google.com -// - serviceAccount:my-project-id@appspot.gserviceaccount.com -// role: roles/resourcemanager.organizationAdmin -// - members: -// - user:eve@example.com -// role: roles/resourcemanager.organizationViewer -// condition: -// title: expirable access -// description: Does not grant access after Sep 2020 -// expression: request.time < -// timestamp('2020-10-01T00:00:00.000Z') -// - etag: BwWWja0YfJA= -// - version: 3 -// -// For a description of IAM and its features, see the -// [IAM documentation](https://cloud.google.com/iam/docs/). +// olicies). **JSON example:** { "bindings": [ { "role": +// "roles/resourcemanager.organizationAdmin", "members": [ +// "user:mike@example.com", "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { +// "role": "roles/resourcemanager.organizationViewer", "members": [ +// "user:eve@example.com" ], "condition": { "title": "expirable access", +// "description": "Does not grant access after Sep 2020", "expression": +// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], +// "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - +// members: - user:mike@example.com - group:admins@example.com - +// domain:google.com - +// serviceAccount:my-project-id@appspot.gserviceaccount.com role: +// roles/resourcemanager.organizationAdmin - members: - +// user:eve@example.com role: roles/resourcemanager.organizationViewer +// condition: title: expirable access description: Does not grant access +// after Sep 2020 expression: request.time < +// timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: +// 3 For a description of IAM and its features, see the [IAM +// documentation](https://cloud.google.com/iam/docs/). type Policy struct { // AuditConfigs: Specifies cloud audit logging configuration for this // policy. AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` // Bindings: Associates a list of `members` to a `role`. Optionally, may - // specify a - // `condition` that determines how and when the `bindings` are applied. - // Each - // of the `bindings` must contain at least one member. + // specify a `condition` that determines how and when the `bindings` are + // applied. Each of the `bindings` must contain at least one member. Bindings []*Binding `json:"bindings,omitempty"` // Etag: `etag` is used for optimistic concurrency control as a way to - // help - // prevent simultaneous updates of a policy from overwriting each - // other. - // It is strongly suggested that systems make use of the `etag` in - // the - // read-modify-write cycle to perform policy updates in order to avoid - // race - // conditions: An `etag` is returned in the response to `getIamPolicy`, - // and - // systems are expected to put that etag in the request to - // `setIamPolicy` to - // ensure that their change will be applied to the same version of the - // policy. - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of + // help prevent simultaneous updates of a policy from overwriting each + // other. It is strongly suggested that systems make use of the `etag` + // in the read-modify-write cycle to perform policy updates in order to + // avoid race conditions: An `etag` is returned in the response to + // `getIamPolicy`, and systems are expected to put that etag in the + // request to `setIamPolicy` to ensure that their change will be applied + // to the same version of the policy. **Important:** If you use IAM + // Conditions, you must include the `etag` field whenever you call + // `setIamPolicy`. If you omit this field, then IAM allows you to + // overwrite a version `3` policy with a version `1` policy, and all of // the conditions in the version `3` policy are lost. Etag string `json:"etag,omitempty"` - // Version: Specifies the format of the policy. - // - // Valid values are `0`, `1`, and `3`. Requests that specify an invalid - // value - // are rejected. - // + // Version: Specifies the format of the policy. Valid values are `0`, + // `1`, and `3`. Requests that specify an invalid value are rejected. // Any operation that affects conditional role bindings must specify - // version - // `3`. This requirement applies to the following operations: - // - // * Getting a policy that includes a conditional role binding - // * Adding a conditional role binding to a policy - // * Changing a conditional role binding in a policy - // * Removing any role binding, with or without a condition, from a - // policy - // that includes conditions - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of - // the conditions in the version `3` policy are lost. - // - // If a policy does not include any conditions, operations on that - // policy may - // specify any valid version or leave the field unset. - // - // To learn which resources support conditions in their IAM policies, - // see the - // [IAM + // version `3`. This requirement applies to the following operations: * + // Getting a policy that includes a conditional role binding * Adding a + // conditional role binding to a policy * Changing a conditional role + // binding in a policy * Removing any role binding, with or without a + // condition, from a policy that includes conditions **Important:** If + // you use IAM Conditions, you must include the `etag` field whenever + // you call `setIamPolicy`. If you omit this field, then IAM allows you + // to overwrite a version `3` policy with a version `1` policy, and all + // of the conditions in the version `3` policy are lost. If a policy + // does not include any conditions, operations on that policy may + // specify any valid version or leave the field unset. To learn which + // resources support conditions in their IAM policies, see the [IAM // documentation](https://cloud.google.com/iam/help/conditions/resource-p // olicies). Version int64 `json:"version,omitempty"` @@ -1094,19 +858,16 @@ func (s *Policy) MarshalJSON() ([]byte, error) { } // ProjectCreationStatus: A status object which is used as the -// `metadata` field for the Operation -// returned by CreateProject. It provides insight for when significant -// phases of -// Project creation have completed. +// `metadata` field for the Operation returned by CreateProject. It +// provides insight for when significant phases of Project creation have +// completed. type ProjectCreationStatus struct { // CreateTime: Creation time of the project creation workflow. CreateTime string `json:"createTime,omitempty"` // Gettable: True if the project can be retrieved using GetProject. No - // other operations - // on the project are guaranteed to work until the project creation - // is - // complete. + // other operations on the project are guaranteed to work until the + // project creation is complete. Gettable bool `json:"gettable,omitempty"` // Ready: True if the project creation process is complete. @@ -1142,43 +903,26 @@ type SearchFoldersRequest struct { PageSize int64 `json:"pageSize,omitempty"` // PageToken: Optional. A pagination token returned from a previous call - // to `SearchFolders` - // that indicates from where search should continue. + // to `SearchFolders` that indicates from where search should continue. PageToken string `json:"pageToken,omitempty"` - // Query: Search criteria used to select the Folders to return. - // If no search criteria is specified then all accessible folders will - // be - // returned. - // - // Query expressions can be used to restrict results based upon - // displayName, - // lifecycleState and parent, where the operators `=`, `NOT`, `AND` and - // `OR` - // can be used along with the suffix wildcard symbol `*`. - // - // The displayName field in a query expression should use escaped - // quotes - // for values that include whitespace to prevent unexpected - // behavior. - // - // Some example queries are: - // - // * Query `displayName=Test*` returns Folder resources whose display - // name - // starts with "Test". - // * Query `lifecycleState=ACTIVE` returns Folder resources - // with - // `lifecycleState` set to `ACTIVE`. - // * Query `parent=folders/123` returns Folder resources that - // have - // `folders/123` as a parent resource. - // * Query `parent=folders/123 AND lifecycleState=ACTIVE` returns - // active - // Folder resources that have `folders/123` as a parent resource. - // * Query `displayName=\\"Test String\\" returns Folder resources - // with - // display names that include both "Test" and "String". + // Query: Search criteria used to select the Folders to return. If no + // search criteria is specified then all accessible folders will be + // returned. Query expressions can be used to restrict results based + // upon displayName, lifecycleState and parent, where the operators `=`, + // `NOT`, `AND` and `OR` can be used along with the suffix wildcard + // symbol `*`. The displayName field in a query expression should use + // escaped quotes for values that include whitespace to prevent + // unexpected behavior. Some example queries are: * Query + // `displayName=Test*` returns Folder resources whose display name + // starts with "Test". * Query `lifecycleState=ACTIVE` returns Folder + // resources with `lifecycleState` set to `ACTIVE`. * Query + // `parent=folders/123` returns Folder resources that have `folders/123` + // as a parent resource. * Query `parent=folders/123 AND + // lifecycleState=ACTIVE` returns active Folder resources that have + // `folders/123` as a parent resource. * Query `displayName=\\"Test + // String\\" returns Folder resources with display names that include + // both "Test" and "String". Query string `json:"query,omitempty"` // ForceSendFields is a list of field names (e.g. "PageSize") to @@ -1206,13 +950,12 @@ func (s *SearchFoldersRequest) MarshalJSON() ([]byte, error) { // SearchFoldersResponse: The response message for searching folders. type SearchFoldersResponse struct { - // Folders: A possibly paginated folder search results. - // the specified parent resource. + // Folders: A possibly paginated folder search results. the specified + // parent resource. Folders []*Folder `json:"folders,omitempty"` // NextPageToken: A pagination token returned from a previous call to - // `SearchFolders` - // that indicates from where searching should continue. + // `SearchFolders` that indicates from where searching should continue. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1245,20 +988,15 @@ func (s *SearchFoldersResponse) MarshalJSON() ([]byte, error) { // SetIamPolicyRequest: Request message for `SetIamPolicy` method. type SetIamPolicyRequest struct { // Policy: REQUIRED: The complete policy to be applied to the - // `resource`. The size of - // the policy is limited to a few 10s of KB. An empty policy is a - // valid policy but certain Cloud Platform services (such as - // Projects) - // might reject them. + // `resource`. The size of the policy is limited to a few 10s of KB. An + // empty policy is a valid policy but certain Cloud Platform services + // (such as Projects) might reject them. Policy *Policy `json:"policy,omitempty"` // UpdateMask: OPTIONAL: A FieldMask specifying which fields of the - // policy to modify. Only - // the fields in the mask will be modified. If no mask is provided, - // the - // following default mask is used: - // - // `paths: "bindings, etag" + // policy to modify. Only the fields in the mask will be modified. If no + // mask is provided, the following default mask is used: `paths: + // "bindings, etag" UpdateMask string `json:"updateMask,omitempty"` // ForceSendFields is a list of field names (e.g. "Policy") to @@ -1285,32 +1023,24 @@ func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { } // Status: The `Status` type defines a logical error model that is -// suitable for -// different programming environments, including REST APIs and RPC APIs. -// It is -// used by [gRPC](https://github.com/grpc). Each `Status` message -// contains -// three pieces of data: error code, error message, and error -// details. -// -// You can find out more about this error model and how to work with it -// in the -// [API Design Guide](https://cloud.google.com/apis/design/errors). +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each +// `Status` message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the [API Design +// Guide](https://cloud.google.com/apis/design/errors). type Status struct { // Code: The status code, which should be an enum value of // google.rpc.Code. Code int64 `json:"code,omitempty"` - // Details: A list of messages that carry the error details. There is a - // common set of - // message types for APIs to use. + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. Details []googleapi.RawMessage `json:"details,omitempty"` // Message: A developer-facing error message, which should be in - // English. Any - // user-facing error message should be localized and sent in - // the - // google.rpc.Status.details field, or localized by the client. + // English. Any user-facing error message should be localized and sent + // in the google.rpc.Status.details field, or localized by the client. Message string `json:"message,omitempty"` // ForceSendFields is a list of field names (e.g. "Code") to @@ -1340,11 +1070,8 @@ func (s *Status) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsRequest struct { // Permissions: The set of permissions to check for the `resource`. - // Permissions with - // wildcards (such as '*' or 'storage.*') are not allowed. For - // more - // information see - // [IAM + // Permissions with wildcards (such as '*' or 'storage.*') are not + // allowed. For more information see [IAM // Overview](https://cloud.google.com/iam/docs/overview#permissions). Permissions []string `json:"permissions,omitempty"` @@ -1375,8 +1102,7 @@ func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsResponse struct { // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is - // allowed. + // the caller is allowed. Permissions []string `json:"permissions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1420,47 +1146,25 @@ type FoldersCreateCall struct { header_ http.Header } -// Create: Creates a Folder in the resource hierarchy. -// Returns an Operation which can be used to track the progress of -// the -// folder creation workflow. -// Upon success the Operation.response field will be populated with -// the -// created Folder. -// -// In order to succeed, the addition of this new Folder must not -// violate -// the Folder naming, height or fanout constraints. -// -// + The Folder's display_name must be distinct from all other Folder's -// that -// share its parent. -// + The addition of the Folder must not cause the active Folder -// hierarchy -// to exceed a height of 4. Note, the full active + deleted Folder -// hierarchy -// is allowed to reach a height of 8; this provides additional headroom -// when -// moving folders that contain deleted folders. -// + The addition of the Folder must not cause the total number of -// Folders -// under its parent to exceed 100. -// -// If the operation fails due to a folder constraint violation, some -// errors -// may be returned by the CreateFolder request, with status -// code -// FAILED_PRECONDITION and an error description. Other folder -// constraint -// violations will be communicated in the Operation, with the -// specific -// PreconditionFailure returned via the details list in the -// Operation.error -// field. -// -// The caller must have `resourcemanager.folders.create` permission on -// the -// identified parent. +// Create: Creates a Folder in the resource hierarchy. Returns an +// Operation which can be used to track the progress of the folder +// creation workflow. Upon success the Operation.response field will be +// populated with the created Folder. In order to succeed, the addition +// of this new Folder must not violate the Folder naming, height or +// fanout constraints. + The Folder's display_name must be distinct from +// all other Folder's that share its parent. + The addition of the +// Folder must not cause the active Folder hierarchy to exceed a height +// of 4. Note, the full active + deleted Folder hierarchy is allowed to +// reach a height of 8; this provides additional headroom when moving +// folders that contain deleted folders. + The addition of the Folder +// must not cause the total number of Folders under its parent to exceed +// 100. If the operation fails due to a folder constraint violation, +// some errors may be returned by the CreateFolder request, with status +// code FAILED_PRECONDITION and an error description. Other folder +// constraint violations will be communicated in the Operation, with the +// specific PreconditionFailure returned via the details list in the +// Operation.error field. The caller must have +// `resourcemanager.folders.create` permission on the identified parent. func (r *FoldersService) Create(folder *Folder) *FoldersCreateCall { c := &FoldersCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.folder = folder @@ -1468,9 +1172,8 @@ func (r *FoldersService) Create(folder *Folder) *FoldersCreateCall { } // Parent sets the optional parameter "parent": Required. The resource -// name of the new Folder's parent. -// Must be of the form `folders/{folder_id}` or -// `organizations/{org_id}`. +// name of the new Folder's parent. Must be of the form +// `folders/{folder_id}` or `organizations/{org_id}`. func (c *FoldersCreateCall) Parent(parent string) *FoldersCreateCall { c.urlParams_.Set("parent", parent) return c @@ -1503,7 +1206,7 @@ func (c *FoldersCreateCall) Header() http.Header { func (c *FoldersCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1564,14 +1267,14 @@ func (c *FoldersCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Creates a Folder in the resource hierarchy.\nReturns an Operation which can be used to track the progress of the\nfolder creation workflow.\nUpon success the Operation.response field will be populated with the\ncreated Folder.\n\nIn order to succeed, the addition of this new Folder must not violate\nthe Folder naming, height or fanout constraints.\n\n+ The Folder's display_name must be distinct from all other Folder's that\nshare its parent.\n+ The addition of the Folder must not cause the active Folder hierarchy\nto exceed a height of 4. Note, the full active + deleted Folder hierarchy\nis allowed to reach a height of 8; this provides additional headroom when\nmoving folders that contain deleted folders.\n+ The addition of the Folder must not cause the total number of Folders\nunder its parent to exceed 100.\n\nIf the operation fails due to a folder constraint violation, some errors\nmay be returned by the CreateFolder request, with status code\nFAILED_PRECONDITION and an error description. Other folder constraint\nviolations will be communicated in the Operation, with the specific\nPreconditionFailure returned via the details list in the Operation.error\nfield.\n\nThe caller must have `resourcemanager.folders.create` permission on the\nidentified parent.", + // "description": "Creates a Folder in the resource hierarchy. Returns an Operation which can be used to track the progress of the folder creation workflow. Upon success the Operation.response field will be populated with the created Folder. In order to succeed, the addition of this new Folder must not violate the Folder naming, height or fanout constraints. + The Folder's display_name must be distinct from all other Folder's that share its parent. + The addition of the Folder must not cause the active Folder hierarchy to exceed a height of 4. Note, the full active + deleted Folder hierarchy is allowed to reach a height of 8; this provides additional headroom when moving folders that contain deleted folders. + The addition of the Folder must not cause the total number of Folders under its parent to exceed 100. If the operation fails due to a folder constraint violation, some errors may be returned by the CreateFolder request, with status code FAILED_PRECONDITION and an error description. Other folder constraint violations will be communicated in the Operation, with the specific PreconditionFailure returned via the details list in the Operation.error field. The caller must have `resourcemanager.folders.create` permission on the identified parent.", // "flatPath": "v2/folders", // "httpMethod": "POST", // "id": "cloudresourcemanager.folders.create", // "parameterOrder": [], // "parameters": { // "parent": { - // "description": "Required. The resource name of the new Folder's parent.\nMust be of the form `folders/{folder_id}` or `organizations/{org_id}`.", + // "description": "Required. The resource name of the new Folder's parent. Must be of the form `folders/{folder_id}` or `organizations/{org_id}`.", // "location": "query", // "type": "string" // } @@ -1600,18 +1303,12 @@ type FoldersDeleteCall struct { header_ http.Header } -// Delete: Requests deletion of a Folder. The Folder is moved into -// the -// DELETE_REQUESTED state -// immediately, and is deleted approximately 30 days later. This method -// may -// only be called on an empty Folder in the -// ACTIVE state, where a Folder is empty if -// it doesn't contain any Folders or Projects in the -// ACTIVE state. -// The caller must have `resourcemanager.folders.delete` permission on -// the -// identified folder. +// Delete: Requests deletion of a Folder. The Folder is moved into the +// DELETE_REQUESTED state immediately, and is deleted approximately 30 +// days later. This method may only be called on an empty Folder in the +// ACTIVE state, where a Folder is empty if it doesn't contain any +// Folders or Projects in the ACTIVE state. The caller must have +// `resourcemanager.folders.delete` permission on the identified folder. func (r *FoldersService) Delete(name string) *FoldersDeleteCall { c := &FoldersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -1645,7 +1342,7 @@ func (c *FoldersDeleteCall) Header() http.Header { func (c *FoldersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1704,7 +1401,7 @@ func (c *FoldersDeleteCall) Do(opts ...googleapi.CallOption) (*Folder, error) { } return ret, nil // { - // "description": "Requests deletion of a Folder. The Folder is moved into the\nDELETE_REQUESTED state\nimmediately, and is deleted approximately 30 days later. This method may\nonly be called on an empty Folder in the\nACTIVE state, where a Folder is empty if\nit doesn't contain any Folders or Projects in the\nACTIVE state.\nThe caller must have `resourcemanager.folders.delete` permission on the\nidentified folder.", + // "description": "Requests deletion of a Folder. The Folder is moved into the DELETE_REQUESTED state immediately, and is deleted approximately 30 days later. This method may only be called on an empty Folder in the ACTIVE state, where a Folder is empty if it doesn't contain any Folders or Projects in the ACTIVE state. The caller must have `resourcemanager.folders.delete` permission on the identified folder.", // "flatPath": "v2/folders/{foldersId}", // "httpMethod": "DELETE", // "id": "cloudresourcemanager.folders.delete", @@ -1713,7 +1410,7 @@ func (c *FoldersDeleteCall) Do(opts ...googleapi.CallOption) (*Folder, error) { // ], // "parameters": { // "name": { - // "description": "Required. the resource name of the Folder to be deleted.\nMust be of the form `folders/{folder_id}`.", + // "description": "Required. the resource name of the Folder to be deleted. Must be of the form `folders/{folder_id}`.", // "location": "path", // "pattern": "^folders/[^/]+$", // "required": true, @@ -1742,14 +1439,10 @@ type FoldersGetCall struct { header_ http.Header } -// Get: Retrieves a Folder identified by the supplied resource -// name. -// Valid Folder resource names have the format -// `folders/{folder_id}` -// (for example, `folders/1234`). -// The caller must have `resourcemanager.folders.get` permission on -// the -// identified folder. +// Get: Retrieves a Folder identified by the supplied resource name. +// Valid Folder resource names have the format `folders/{folder_id}` +// (for example, `folders/1234`). The caller must have +// `resourcemanager.folders.get` permission on the identified folder. func (r *FoldersService) Get(name string) *FoldersGetCall { c := &FoldersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -1793,7 +1486,7 @@ func (c *FoldersGetCall) Header() http.Header { func (c *FoldersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1855,7 +1548,7 @@ func (c *FoldersGetCall) Do(opts ...googleapi.CallOption) (*Folder, error) { } return ret, nil // { - // "description": "Retrieves a Folder identified by the supplied resource name.\nValid Folder resource names have the format `folders/{folder_id}`\n(for example, `folders/1234`).\nThe caller must have `resourcemanager.folders.get` permission on the\nidentified folder.", + // "description": "Retrieves a Folder identified by the supplied resource name. Valid Folder resource names have the format `folders/{folder_id}` (for example, `folders/1234`). The caller must have `resourcemanager.folders.get` permission on the identified folder.", // "flatPath": "v2/folders/{foldersId}", // "httpMethod": "GET", // "id": "cloudresourcemanager.folders.get", @@ -1864,7 +1557,7 @@ func (c *FoldersGetCall) Do(opts ...googleapi.CallOption) (*Folder, error) { // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the Folder to retrieve.\nMust be of the form `folders/{folder_id}`.", + // "description": "Required. The resource name of the Folder to retrieve. Must be of the form `folders/{folder_id}`.", // "location": "path", // "pattern": "^folders/[^/]+$", // "required": true, @@ -1895,13 +1588,11 @@ type FoldersGetIamPolicyCall struct { } // GetIamPolicy: Gets the access control policy for a Folder. The -// returned policy may be -// empty if no such policy or resource exists. The `resource` field -// should -// be the Folder's resource name, e.g. "folders/1234". -// The caller must have `resourcemanager.folders.getIamPolicy` -// permission -// on the identified folder. +// returned policy may be empty if no such policy or resource exists. +// The `resource` field should be the Folder's resource name, e.g. +// "folders/1234". The caller must have +// `resourcemanager.folders.getIamPolicy` permission on the identified +// folder. func (r *FoldersService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *FoldersGetIamPolicyCall { c := &FoldersGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -1936,7 +1627,7 @@ func (c *FoldersGetIamPolicyCall) Header() http.Header { func (c *FoldersGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2000,7 +1691,7 @@ func (c *FoldersGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err } return ret, nil // { - // "description": "Gets the access control policy for a Folder. The returned policy may be\nempty if no such policy or resource exists. The `resource` field should\nbe the Folder's resource name, e.g. \"folders/1234\".\nThe caller must have `resourcemanager.folders.getIamPolicy` permission\non the identified folder.", + // "description": "Gets the access control policy for a Folder. The returned policy may be empty if no such policy or resource exists. The `resource` field should be the Folder's resource name, e.g. \"folders/1234\". The caller must have `resourcemanager.folders.getIamPolicy` permission on the identified folder.", // "flatPath": "v2/folders/{foldersId}:getIamPolicy", // "httpMethod": "POST", // "id": "cloudresourcemanager.folders.getIamPolicy", @@ -2009,7 +1700,7 @@ func (c *FoldersGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^folders/[^/]+$", // "required": true, @@ -2042,16 +1733,11 @@ type FoldersListCall struct { } // List: Lists the Folders that are direct descendants of supplied -// parent resource. -// List provides a strongly consistent view of the Folders -// underneath -// the specified parent resource. -// List returns Folders sorted based upon the (ascending) lexical -// ordering -// of their display_name. -// The caller must have `resourcemanager.folders.list` permission on -// the -// identified parent. +// parent resource. List provides a strongly consistent view of the +// Folders underneath the specified parent resource. List returns +// Folders sorted based upon the (ascending) lexical ordering of their +// display_name. The caller must have `resourcemanager.folders.list` +// permission on the identified parent. func (r *FoldersService) List() *FoldersListCall { c := &FoldersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} return c @@ -2065,30 +1751,27 @@ func (c *FoldersListCall) PageSize(pageSize int64) *FoldersListCall { } // PageToken sets the optional parameter "pageToken": A pagination token -// returned from a previous call to `ListFolders` -// that indicates where this listing should continue from. +// returned from a previous call to `ListFolders` that indicates where +// this listing should continue from. func (c *FoldersListCall) PageToken(pageToken string) *FoldersListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Parent sets the optional parameter "parent": Required. The resource -// name of the Organization or Folder whose Folders are -// being listed. +// name of the Organization or Folder whose Folders are being listed. // Must be of the form `folders/{folder_id}` or -// `organizations/{org_id}`. -// Access to this method is controlled by checking -// the -// `resourcemanager.folders.list` permission on the `parent`. +// `organizations/{org_id}`. Access to this method is controlled by +// checking the `resourcemanager.folders.list` permission on the +// `parent`. func (c *FoldersListCall) Parent(parent string) *FoldersListCall { c.urlParams_.Set("parent", parent) return c } // ShowDeleted sets the optional parameter "showDeleted": Controls -// whether Folders in the -// DELETE_REQUESTED -// state should be returned. Defaults to false. +// whether Folders in the DELETE_REQUESTED state should be returned. +// Defaults to false. func (c *FoldersListCall) ShowDeleted(showDeleted bool) *FoldersListCall { c.urlParams_.Set("showDeleted", fmt.Sprint(showDeleted)) return c @@ -2131,7 +1814,7 @@ func (c *FoldersListCall) Header() http.Header { func (c *FoldersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2190,7 +1873,7 @@ func (c *FoldersListCall) Do(opts ...googleapi.CallOption) (*ListFoldersResponse } return ret, nil // { - // "description": "Lists the Folders that are direct descendants of supplied parent resource.\nList provides a strongly consistent view of the Folders underneath\nthe specified parent resource.\nList returns Folders sorted based upon the (ascending) lexical ordering\nof their display_name.\nThe caller must have `resourcemanager.folders.list` permission on the\nidentified parent.", + // "description": "Lists the Folders that are direct descendants of supplied parent resource. List provides a strongly consistent view of the Folders underneath the specified parent resource. List returns Folders sorted based upon the (ascending) lexical ordering of their display_name. The caller must have `resourcemanager.folders.list` permission on the identified parent.", // "flatPath": "v2/folders", // "httpMethod": "GET", // "id": "cloudresourcemanager.folders.list", @@ -2203,17 +1886,17 @@ func (c *FoldersListCall) Do(opts ...googleapi.CallOption) (*ListFoldersResponse // "type": "integer" // }, // "pageToken": { - // "description": "Optional. A pagination token returned from a previous call to `ListFolders`\nthat indicates where this listing should continue from.", + // "description": "Optional. A pagination token returned from a previous call to `ListFolders` that indicates where this listing should continue from.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The resource name of the Organization or Folder whose Folders are\nbeing listed.\nMust be of the form `folders/{folder_id}` or `organizations/{org_id}`.\nAccess to this method is controlled by checking the\n`resourcemanager.folders.list` permission on the `parent`.", + // "description": "Required. The resource name of the Organization or Folder whose Folders are being listed. Must be of the form `folders/{folder_id}` or `organizations/{org_id}`. Access to this method is controlled by checking the `resourcemanager.folders.list` permission on the `parent`.", // "location": "query", // "type": "string" // }, // "showDeleted": { - // "description": "Optional. Controls whether Folders in the\nDELETE_REQUESTED\nstate should be returned. Defaults to false.", + // "description": "Optional. Controls whether Folders in the DELETE_REQUESTED state should be returned. Defaults to false.", // "location": "query", // "type": "boolean" // } @@ -2262,32 +1945,20 @@ type FoldersMoveCall struct { header_ http.Header } -// Move: Moves a Folder under a new resource parent. -// Returns an Operation which can be used to track the progress of -// the -// folder move workflow. -// Upon success the Operation.response field will be populated with -// the -// moved Folder. -// Upon failure, a FolderOperationError categorizing the failure cause -// will -// be returned - if the failure occurs synchronously then -// the -// FolderOperationError will be returned via the Status.details -// field -// and if it occurs asynchronously then the FolderOperation will be -// returned -// via the Operation.error field. -// In addition, the Operation.metadata field will be populated with -// a -// FolderOperation message as an aid to stateless clients. -// Folder moves will be rejected if they violate either the naming, -// height -// or fanout constraints described in the -// CreateFolder documentation. -// The caller must have `resourcemanager.folders.move` permission on -// the -// folder's current and proposed new parent. +// Move: Moves a Folder under a new resource parent. Returns an +// Operation which can be used to track the progress of the folder move +// workflow. Upon success the Operation.response field will be populated +// with the moved Folder. Upon failure, a FolderOperationError +// categorizing the failure cause will be returned - if the failure +// occurs synchronously then the FolderOperationError will be returned +// via the Status.details field and if it occurs asynchronously then the +// FolderOperation will be returned via the Operation.error field. In +// addition, the Operation.metadata field will be populated with a +// FolderOperation message as an aid to stateless clients. Folder moves +// will be rejected if they violate either the naming, height or fanout +// constraints described in the CreateFolder documentation. The caller +// must have `resourcemanager.folders.move` permission on the folder's +// current and proposed new parent. func (r *FoldersService) Move(name string, movefolderrequest *MoveFolderRequest) *FoldersMoveCall { c := &FoldersMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -2322,7 +1993,7 @@ func (c *FoldersMoveCall) Header() http.Header { func (c *FoldersMoveCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2386,7 +2057,7 @@ func (c *FoldersMoveCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Moves a Folder under a new resource parent.\nReturns an Operation which can be used to track the progress of the\nfolder move workflow.\nUpon success the Operation.response field will be populated with the\nmoved Folder.\nUpon failure, a FolderOperationError categorizing the failure cause will\nbe returned - if the failure occurs synchronously then the\nFolderOperationError will be returned via the Status.details field\nand if it occurs asynchronously then the FolderOperation will be returned\nvia the Operation.error field.\nIn addition, the Operation.metadata field will be populated with a\nFolderOperation message as an aid to stateless clients.\nFolder moves will be rejected if they violate either the naming, height\nor fanout constraints described in the\nCreateFolder documentation.\nThe caller must have `resourcemanager.folders.move` permission on the\nfolder's current and proposed new parent.", + // "description": "Moves a Folder under a new resource parent. Returns an Operation which can be used to track the progress of the folder move workflow. Upon success the Operation.response field will be populated with the moved Folder. Upon failure, a FolderOperationError categorizing the failure cause will be returned - if the failure occurs synchronously then the FolderOperationError will be returned via the Status.details field and if it occurs asynchronously then the FolderOperation will be returned via the Operation.error field. In addition, the Operation.metadata field will be populated with a FolderOperation message as an aid to stateless clients. Folder moves will be rejected if they violate either the naming, height or fanout constraints described in the CreateFolder documentation. The caller must have `resourcemanager.folders.move` permission on the folder's current and proposed new parent.", // "flatPath": "v2/folders/{foldersId}:move", // "httpMethod": "POST", // "id": "cloudresourcemanager.folders.move", @@ -2395,7 +2066,7 @@ func (c *FoldersMoveCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the Folder to move.\nMust be of the form folders/{folder_id}", + // "description": "Required. The resource name of the Folder to move. Must be of the form folders/{folder_id}", // "location": "path", // "pattern": "^folders/[^/]+$", // "required": true, @@ -2427,28 +2098,18 @@ type FoldersPatchCall struct { header_ http.Header } -// Patch: Updates a Folder, changing its display_name. -// Changes to the folder display_name will be rejected if they violate -// either -// the display_name formatting rules or naming constraints described -// in -// the CreateFolder documentation. -// -// The Folder's display name must start and end with a letter or -// digit, -// may contain letters, digits, spaces, hyphens and underscores and can -// be -// no longer than 30 characters. This is captured by the regular -// expression: -// [\p{L}\p{N}]([\p{L}\p{N}_- ]{0,28}[\p{L}\p{N}])?. -// The caller must have `resourcemanager.folders.update` permission on -// the -// identified folder. -// -// If the update fails due to the unique name constraint then -// a -// PreconditionFailure explaining this violation will be returned -// in the Status.details field. +// Patch: Updates a Folder, changing its display_name. Changes to the +// folder display_name will be rejected if they violate either the +// display_name formatting rules or naming constraints described in the +// CreateFolder documentation. The Folder's display name must start and +// end with a letter or digit, may contain letters, digits, spaces, +// hyphens and underscores and can be no longer than 30 characters. This +// is captured by the regular expression: [\p{L}\p{N}]([\p{L}\p{N}_- +// ]{0,28}[\p{L}\p{N}])?. The caller must have +// `resourcemanager.folders.update` permission on the identified folder. +// If the update fails due to the unique name constraint then a +// PreconditionFailure explaining this violation will be returned in the +// Status.details field. func (r *FoldersService) Patch(name string, folder *Folder) *FoldersPatchCall { c := &FoldersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -2457,8 +2118,7 @@ func (r *FoldersService) Patch(name string, folder *Folder) *FoldersPatchCall { } // UpdateMask sets the optional parameter "updateMask": Required. Fields -// to be updated. -// Only the `display_name` can be updated. +// to be updated. Only the `display_name` can be updated. func (c *FoldersPatchCall) UpdateMask(updateMask string) *FoldersPatchCall { c.urlParams_.Set("updateMask", updateMask) return c @@ -2491,7 +2151,7 @@ func (c *FoldersPatchCall) Header() http.Header { func (c *FoldersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2555,7 +2215,7 @@ func (c *FoldersPatchCall) Do(opts ...googleapi.CallOption) (*Folder, error) { } return ret, nil // { - // "description": "Updates a Folder, changing its display_name.\nChanges to the folder display_name will be rejected if they violate either\nthe display_name formatting rules or naming constraints described in\nthe CreateFolder documentation.\n\nThe Folder's display name must start and end with a letter or digit,\nmay contain letters, digits, spaces, hyphens and underscores and can be\nno longer than 30 characters. This is captured by the regular expression:\n[\\p{L}\\p{N}]([\\p{L}\\p{N}_- ]{0,28}[\\p{L}\\p{N}])?.\nThe caller must have `resourcemanager.folders.update` permission on the\nidentified folder.\n\nIf the update fails due to the unique name constraint then a\nPreconditionFailure explaining this violation will be returned\nin the Status.details field.", + // "description": "Updates a Folder, changing its display_name. Changes to the folder display_name will be rejected if they violate either the display_name formatting rules or naming constraints described in the CreateFolder documentation. The Folder's display name must start and end with a letter or digit, may contain letters, digits, spaces, hyphens and underscores and can be no longer than 30 characters. This is captured by the regular expression: [\\p{L}\\p{N}]([\\p{L}\\p{N}_- ]{0,28}[\\p{L}\\p{N}])?. The caller must have `resourcemanager.folders.update` permission on the identified folder. If the update fails due to the unique name constraint then a PreconditionFailure explaining this violation will be returned in the Status.details field.", // "flatPath": "v2/folders/{foldersId}", // "httpMethod": "PATCH", // "id": "cloudresourcemanager.folders.patch", @@ -2564,14 +2224,14 @@ func (c *FoldersPatchCall) Do(opts ...googleapi.CallOption) (*Folder, error) { // ], // "parameters": { // "name": { - // "description": "Output only. The resource name of the Folder.\nIts format is `folders/{folder_id}`, for example: \"folders/1234\".", + // "description": "Output only. The resource name of the Folder. Its format is `folders/{folder_id}`, for example: \"folders/1234\".", // "location": "path", // "pattern": "^folders/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { - // "description": "Required. Fields to be updated.\nOnly the `display_name` can be updated.", + // "description": "Required. Fields to be updated. Only the `display_name` can be updated.", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -2601,14 +2261,11 @@ type FoldersSearchCall struct { header_ http.Header } -// Search: Search for folders that match specific filter -// criteria. +// Search: Search for folders that match specific filter criteria. // Search provides an eventually consistent view of the folders a user -// has -// access to which meet the specified filter criteria. -// -// This will only return folders on which the caller has the -// permission `resourcemanager.folders.get`. +// has access to which meet the specified filter criteria. This will +// only return folders on which the caller has the permission +// `resourcemanager.folders.get`. func (r *FoldersService) Search(searchfoldersrequest *SearchFoldersRequest) *FoldersSearchCall { c := &FoldersSearchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.searchfoldersrequest = searchfoldersrequest @@ -2642,7 +2299,7 @@ func (c *FoldersSearchCall) Header() http.Header { func (c *FoldersSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2703,7 +2360,7 @@ func (c *FoldersSearchCall) Do(opts ...googleapi.CallOption) (*SearchFoldersResp } return ret, nil // { - // "description": "Search for folders that match specific filter criteria.\nSearch provides an eventually consistent view of the folders a user has\naccess to which meet the specified filter criteria.\n\nThis will only return folders on which the caller has the\npermission `resourcemanager.folders.get`.", + // "description": "Search for folders that match specific filter criteria. Search provides an eventually consistent view of the folders a user has access to which meet the specified filter criteria. This will only return folders on which the caller has the permission `resourcemanager.folders.get`.", // "flatPath": "v2/folders:search", // "httpMethod": "POST", // "id": "cloudresourcemanager.folders.search", @@ -2757,13 +2414,10 @@ type FoldersSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on a Folder, replacing -// any existing policy. -// The `resource` field should be the Folder's resource name, -// e.g. -// "folders/1234". -// The caller must have `resourcemanager.folders.setIamPolicy` -// permission -// on the identified folder. +// any existing policy. The `resource` field should be the Folder's +// resource name, e.g. "folders/1234". The caller must have +// `resourcemanager.folders.setIamPolicy` permission on the identified +// folder. func (r *FoldersService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *FoldersSetIamPolicyCall { c := &FoldersSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -2798,7 +2452,7 @@ func (c *FoldersSetIamPolicyCall) Header() http.Header { func (c *FoldersSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2862,7 +2516,7 @@ func (c *FoldersSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err } return ret, nil // { - // "description": "Sets the access control policy on a Folder, replacing any existing policy.\nThe `resource` field should be the Folder's resource name, e.g.\n\"folders/1234\".\nThe caller must have `resourcemanager.folders.setIamPolicy` permission\non the identified folder.", + // "description": "Sets the access control policy on a Folder, replacing any existing policy. The `resource` field should be the Folder's resource name, e.g. \"folders/1234\". The caller must have `resourcemanager.folders.setIamPolicy` permission on the identified folder.", // "flatPath": "v2/folders/{foldersId}:setIamPolicy", // "httpMethod": "POST", // "id": "cloudresourcemanager.folders.setIamPolicy", @@ -2871,7 +2525,7 @@ func (c *FoldersSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^folders/[^/]+$", // "required": true, @@ -2904,11 +2558,9 @@ type FoldersTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified Folder. -// The `resource` field should be the Folder's resource name, -// e.g. "folders/1234". -// -// There are no permissions required for making this API call. +// specified Folder. The `resource` field should be the Folder's +// resource name, e.g. "folders/1234". There are no permissions required +// for making this API call. func (r *FoldersService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *FoldersTestIamPermissionsCall { c := &FoldersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -2943,7 +2595,7 @@ func (c *FoldersTestIamPermissionsCall) Header() http.Header { func (c *FoldersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3007,7 +2659,7 @@ func (c *FoldersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified Folder.\nThe `resource` field should be the Folder's resource name,\ne.g. \"folders/1234\".\n\nThere are no permissions required for making this API call.", + // "description": "Returns permissions that a caller has on the specified Folder. The `resource` field should be the Folder's resource name, e.g. \"folders/1234\". There are no permissions required for making this API call.", // "flatPath": "v2/folders/{foldersId}:testIamPermissions", // "httpMethod": "POST", // "id": "cloudresourcemanager.folders.testIamPermissions", @@ -3016,7 +2668,7 @@ func (c *FoldersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^folders/[^/]+$", // "required": true, @@ -3049,19 +2701,13 @@ type FoldersUndeleteCall struct { } // Undelete: Cancels the deletion request for a Folder. This method may -// only be -// called on a Folder in the -// DELETE_REQUESTED state. -// In order to succeed, the Folder's parent must be in the -// ACTIVE state. -// In addition, reintroducing the folder into the tree must not -// violate -// folder naming, height and fanout constraints described in -// the -// CreateFolder documentation. -// The caller must have `resourcemanager.folders.undelete` permission on -// the -// identified folder. +// only be called on a Folder in the DELETE_REQUESTED state. In order to +// succeed, the Folder's parent must be in the ACTIVE state. In +// addition, reintroducing the folder into the tree must not violate +// folder naming, height and fanout constraints described in the +// CreateFolder documentation. The caller must have +// `resourcemanager.folders.undelete` permission on the identified +// folder. func (r *FoldersService) Undelete(name string, undeletefolderrequest *UndeleteFolderRequest) *FoldersUndeleteCall { c := &FoldersUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -3096,7 +2742,7 @@ func (c *FoldersUndeleteCall) Header() http.Header { func (c *FoldersUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3160,7 +2806,7 @@ func (c *FoldersUndeleteCall) Do(opts ...googleapi.CallOption) (*Folder, error) } return ret, nil // { - // "description": "Cancels the deletion request for a Folder. This method may only be\ncalled on a Folder in the\nDELETE_REQUESTED state.\nIn order to succeed, the Folder's parent must be in the\nACTIVE state.\nIn addition, reintroducing the folder into the tree must not violate\nfolder naming, height and fanout constraints described in the\nCreateFolder documentation.\nThe caller must have `resourcemanager.folders.undelete` permission on the\nidentified folder.", + // "description": "Cancels the deletion request for a Folder. This method may only be called on a Folder in the DELETE_REQUESTED state. In order to succeed, the Folder's parent must be in the ACTIVE state. In addition, reintroducing the folder into the tree must not violate folder naming, height and fanout constraints described in the CreateFolder documentation. The caller must have `resourcemanager.folders.undelete` permission on the identified folder.", // "flatPath": "v2/folders/{foldersId}:undelete", // "httpMethod": "POST", // "id": "cloudresourcemanager.folders.undelete", @@ -3169,7 +2815,7 @@ func (c *FoldersUndeleteCall) Do(opts ...googleapi.CallOption) (*Folder, error) // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the Folder to undelete.\nMust be of the form `folders/{folder_id}`.", + // "description": "Required. The resource name of the Folder to undelete. Must be of the form `folders/{folder_id}`.", // "location": "path", // "pattern": "^folders/[^/]+$", // "required": true, @@ -3201,11 +2847,9 @@ type OperationsGetCall struct { header_ http.Header } -// Get: Gets the latest state of a long-running operation. Clients can -// use this -// method to poll the operation result at intervals as recommended by -// the API -// service. +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. func (r *OperationsService) Get(name string) *OperationsGetCall { c := &OperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -3249,7 +2893,7 @@ func (c *OperationsGetCall) Header() http.Header { func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3311,7 +2955,7 @@ func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", // "flatPath": "v1/operations/{operationsId}", // "httpMethod": "GET", // "id": "cloudresourcemanager.operations.get", diff --git a/vendor/google.golang.org/api/composer/v1beta1/composer-api.json b/vendor/google.golang.org/api/composer/v1beta1/composer-api.json index 2e9afbabd0d..618d95ad16a 100644 --- a/vendor/google.golang.org/api/composer/v1beta1/composer-api.json +++ b/vendor/google.golang.org/api/composer/v1beta1/composer-api.json @@ -121,7 +121,7 @@ ], "parameters": { "parent": { - "description": "The parent must be of the form\n\"projects/{projectId}/locations/{locationId}\".", + "description": "The parent must be of the form \"projects/{projectId}/locations/{locationId}\".", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -149,7 +149,7 @@ ], "parameters": { "name": { - "description": "The environment to delete, in the form:\n\"projects/{projectId}/locations/{locationId}/environments/{environmentId}\"", + "description": "The environment to delete, in the form: \"projects/{projectId}/locations/{locationId}/environments/{environmentId}\"", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/environments/[^/]+$", "required": true, @@ -174,7 +174,7 @@ ], "parameters": { "name": { - "description": "The resource name of the environment to get, in the form:\n\"projects/{projectId}/locations/{locationId}/environments/{environmentId}\"", + "description": "The resource name of the environment to get, in the form: \"projects/{projectId}/locations/{locationId}/environments/{environmentId}\"", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/environments/[^/]+$", "required": true, @@ -210,7 +210,7 @@ "type": "string" }, "parent": { - "description": "List environments in the given project and location, in the form:\n\"projects/{projectId}/locations/{locationId}\"", + "description": "List environments in the given project and location, in the form: \"projects/{projectId}/locations/{locationId}\"", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -235,14 +235,14 @@ ], "parameters": { "name": { - "description": "The relative resource name of the environment to update, in the form:\n\"projects/{projectId}/locations/{locationId}/environments/{environmentId}\"", + "description": "The relative resource name of the environment to update, in the form: \"projects/{projectId}/locations/{locationId}/environments/{environmentId}\"", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/environments/[^/]+$", "required": true, "type": "string" }, "updateMask": { - "description": "Required. A comma-separated list of paths, relative to `Environment`, of\nfields to update.\nFor example, to set the version of scikit-learn to install in the\nenvironment to 0.19.0 and to remove an existing installation of\nargparse, the `updateMask` parameter would include the following two\n`paths` values: \"config.softwareConfig.pypiPackages.scikit-learn\" and\n\"config.softwareConfig.pypiPackages.argparse\". The included patch\nenvironment would specify the scikit-learn version as follows:\n\n {\n \"config\":{\n \"softwareConfig\":{\n \"pypiPackages\":{\n \"scikit-learn\":\"==0.19.0\"\n }\n }\n }\n }\n\nNote that in the above example, any existing PyPI packages\nother than scikit-learn and argparse will be unaffected.\n\nOnly one update type may be included in a single request's `updateMask`.\nFor example, one cannot update both the PyPI packages and\nlabels in the same request. However, it is possible to update multiple\nmembers of a map field simultaneously in the same request. For example,\nto set the labels \"label1\" and \"label2\" while clearing \"label3\" (assuming\nit already exists), one can\nprovide the paths \"labels.label1\", \"labels.label2\", and \"labels.label3\"\nand populate the patch environment as follows:\n\n {\n \"labels\":{\n \"label1\":\"new-label1-value\"\n \"label2\":\"new-label2-value\"\n }\n }\n\nNote that in the above example, any existing labels that are not\nincluded in the `updateMask` will be unaffected.\n\nIt is also possible to replace an entire map field by providing the\nmap field's path in the `updateMask`. The new value of the field will\nbe that which is provided in the patch environment. For example, to\ndelete all pre-existing user-specified PyPI packages and\ninstall botocore at version 1.7.14, the `updateMask` would contain\nthe path \"config.softwareConfig.pypiPackages\", and\nthe patch environment would be the following:\n\n {\n \"config\":{\n \"softwareConfig\":{\n \"pypiPackages\":{\n \"botocore\":\"==1.7.14\"\n }\n }\n }\n }\n\n\u003cstrong\u003eNote:\u003c/strong\u003e Only the following fields can be updated:\n\n \u003ctable\u003e\n \u003ctbody\u003e\n \u003ctr\u003e\n \u003ctd\u003e\u003cstrong\u003eMask\u003c/strong\u003e\u003c/td\u003e\n \u003ctd\u003e\u003cstrong\u003ePurpose\u003c/strong\u003e\u003c/td\u003e\n \u003c/tr\u003e\n \u003ctr\u003e\n \u003ctd\u003econfig.softwareConfig.pypiPackages\n \u003c/td\u003e\n \u003ctd\u003eReplace all custom custom PyPI packages. If a replacement\n package map is not included in `environment`, all custom\n PyPI packages are cleared. It is an error to provide both this mask and a\n mask specifying an individual package.\u003c/td\u003e\n \u003c/tr\u003e\n \u003ctr\u003e\n \u003ctd\u003econfig.softwareConfig.pypiPackages.\u003cvar\u003epackagename\u003c/var\u003e\u003c/td\u003e\n \u003ctd\u003eUpdate the custom PyPI package \u003cvar\u003epackagename\u003c/var\u003e,\n preserving other packages. To delete the package, include it in\n `updateMask`, and omit the mapping for it in\n `environment.config.softwareConfig.pypiPackages`. It is an error\n to provide both a mask of this form and the\n \"config.softwareConfig.pypiPackages\" mask.\u003c/td\u003e\n \u003c/tr\u003e\n \u003ctr\u003e\n \u003ctd\u003elabels\u003c/td\u003e\n \u003ctd\u003eReplace all environment labels. If a replacement labels map is not\n included in `environment`, all labels are cleared. It is an error to\n provide both this mask and a mask specifying one or more individual\n labels.\u003c/td\u003e\n \u003c/tr\u003e\n \u003ctr\u003e\n \u003ctd\u003elabels.\u003cvar\u003elabelName\u003c/var\u003e\u003c/td\u003e\n \u003ctd\u003eSet the label named \u003cvar\u003elabelName\u003c/var\u003e, while preserving other\n labels. To delete the label, include it in `updateMask` and omit its\n mapping in `environment.labels`. It is an error to provide both a\n mask of this form and the \"labels\" mask.\u003c/td\u003e\n \u003c/tr\u003e\n \u003ctr\u003e\n \u003ctd\u003econfig.nodeCount\u003c/td\u003e\n \u003ctd\u003eHorizontally scale the number of nodes in the environment. An integer\n greater than or equal to 3 must be provided in the `config.nodeCount`\n field.\n \u003c/td\u003e\n \u003c/tr\u003e\n \u003ctr\u003e\n \u003ctd\u003econfig.webServerNetworkAccessControl\u003c/td\u003e\n \u003ctd\u003eReplace the environment's current WebServerNetworkAccessControl.\n \u003c/td\u003e\n \u003c/tr\u003e\n \u003ctr\u003e\n \u003ctd\u003econfig.softwareConfig.airflowConfigOverrides\u003c/td\u003e\n \u003ctd\u003eReplace all Apache Airflow config overrides. If a replacement config\n overrides map is not included in `environment`, all config overrides\n are cleared.\n It is an error to provide both this mask and a mask specifying one or\n more individual config overrides.\u003c/td\u003e\n \u003c/tr\u003e\n \u003ctr\u003e\n \u003ctd\u003econfig.softwareConfig.airflowConfigOverrides.\u003cvar\u003esection\u003c/var\u003e-\u003cvar\u003ename\n \u003c/var\u003e\u003c/td\u003e\n \u003ctd\u003eOverride the Apache Airflow config property \u003cvar\u003ename\u003c/var\u003e in the\n section named \u003cvar\u003esection\u003c/var\u003e, preserving other properties. To delete\n the property override, include it in `updateMask` and omit its mapping\n in `environment.config.softwareConfig.airflowConfigOverrides`.\n It is an error to provide both a mask of this form and the\n \"config.softwareConfig.airflowConfigOverrides\" mask.\u003c/td\u003e\n \u003c/tr\u003e\n \u003ctr\u003e\n \u003ctd\u003econfig.softwareConfig.envVariables\u003c/td\u003e\n \u003ctd\u003eReplace all environment variables. If a replacement environment\n variable map is not included in `environment`, all custom environment\n variables are cleared.\n It is an error to provide both this mask and a mask specifying one or\n more individual environment variables.\u003c/td\u003e\n \u003c/tr\u003e\n \u003ctr\u003e\n \u003ctd\u003econfig.softwareConfig.imageVersion\u003c/td\u003e\n \u003ctd\u003eUpgrade the version of the environment in-place. Refer to\n `SoftwareConfig.image_version` for information on how to format the new\n image version. Additionally, the new image version cannot effect a version\n downgrade and must match the current image version's Composer major\n version and Airflow major and minor versions. Consult the\n \u003ca href=\"/composer/docs/concepts/versioning/composer-versions\"\u003eCloud\n Composer Version List\u003c/a\u003e for valid values.\u003c/td\u003e\n \u003c/tr\u003e\n \u003c/tbody\u003e\n \u003c/table\u003e", + "description": "Required. A comma-separated list of paths, relative to `Environment`, of fields to update. For example, to set the version of scikit-learn to install in the environment to 0.19.0 and to remove an existing installation of argparse, the `updateMask` parameter would include the following two `paths` values: \"config.softwareConfig.pypiPackages.scikit-learn\" and \"config.softwareConfig.pypiPackages.argparse\". The included patch environment would specify the scikit-learn version as follows: { \"config\":{ \"softwareConfig\":{ \"pypiPackages\":{ \"scikit-learn\":\"==0.19.0\" } } } } Note that in the above example, any existing PyPI packages other than scikit-learn and argparse will be unaffected. Only one update type may be included in a single request's `updateMask`. For example, one cannot update both the PyPI packages and labels in the same request. However, it is possible to update multiple members of a map field simultaneously in the same request. For example, to set the labels \"label1\" and \"label2\" while clearing \"label3\" (assuming it already exists), one can provide the paths \"labels.label1\", \"labels.label2\", and \"labels.label3\" and populate the patch environment as follows: { \"labels\":{ \"label1\":\"new-label1-value\" \"label2\":\"new-label2-value\" } } Note that in the above example, any existing labels that are not included in the `updateMask` will be unaffected. It is also possible to replace an entire map field by providing the map field's path in the `updateMask`. The new value of the field will be that which is provided in the patch environment. For example, to delete all pre-existing user-specified PyPI packages and install botocore at version 1.7.14, the `updateMask` would contain the path \"config.softwareConfig.pypiPackages\", and the patch environment would be the following: { \"config\":{ \"softwareConfig\":{ \"pypiPackages\":{ \"botocore\":\"==1.7.14\" } } } } *Note:* Only the following fields can be updated: * config.softwareConfig.pypiPackages * Replace all custom custom PyPI packages. If a replacement package map is not included in `environment`, all custom PyPI packages are cleared. It is an error to provide both this mask and a mask specifying an individual package. * config.softwareConfig.pypiPackages.packagename * Update the custom PyPI package packagename, preserving other packages. To delete the package, include it in `updateMask`, and omit the mapping for it in `environment.config.softwareConfig.pypiPackages`. It is an error to provide both a mask of this form and the \"config.softwareConfig.pypiPackages\" mask. * labels * Replace all environment labels. If a replacement labels map is not included in `environment`, all labels are cleared. It is an error to provide both this mask and a mask specifying one or more individual labels. * labels.labelName * Set the label named labelName, while preserving other labels. To delete the label, include it in `updateMask` and omit its mapping in `environment.labels`. It is an error to provide both a mask of this form and the \"labels\" mask. * config.nodeCount * Horizontally scale the number of nodes in the environment. An integer greater than or equal to 3 must be provided in the `config.nodeCount` field. * config.webServerNetworkAccessControl * Replace the environment's current WebServerNetworkAccessControl. * config.softwareConfig.airflowConfigOverrides * Replace all Apache Airflow config overrides. If a replacement config overrides map is not included in `environment`, all config overrides are cleared. It is an error to provide both this mask and a mask specifying one or more individual config overrides. * config.softwareConfig.airflowConfigOverrides.section- name * Override the Apache Airflow config property name in the section named section, preserving other properties. To delete the property override, include it in `updateMask` and omit its mapping in `environment.config.softwareConfig.airflowConfigOverrides`. It is an error to provide both a mask of this form and the \"config.softwareConfig.airflowConfigOverrides\" mask. * config.softwareConfig.envVariables * Replace all environment variables. If a replacement environment variable map is not included in `environment`, all custom environment variables are cleared. It is an error to provide both this mask and a mask specifying one or more individual environment variables. * config.softwareConfig.imageVersion * Upgrade the version of the environment in-place. Refer to `SoftwareConfig.image_version` for information on how to format the new image version. Additionally, the new image version cannot effect a version downgrade and must match the current image version's Composer major version and Airflow major and minor versions. Consult the Cloud Composer Version List for valid values. * config.databaseConfig.machineType * Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. * config.webServerConfig.machineType * Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. * config.maintenanceWindow * Maintenance window during which Cloud Composer components may be under maintenance.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -284,7 +284,7 @@ "type": "string" }, "parent": { - "description": "List ImageVersions in the given project and location, in the form:\n\"projects/{projectId}/locations/{locationId}\"", + "description": "List ImageVersions in the given project and location, in the form: \"projects/{projectId}/locations/{locationId}\"", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -304,7 +304,7 @@ "operations": { "methods": { "delete": { - "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.", + "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", "httpMethod": "DELETE", "id": "composer.projects.locations.operations.delete", @@ -329,7 +329,7 @@ ] }, "get": { - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", "httpMethod": "GET", "id": "composer.projects.locations.operations.get", @@ -354,7 +354,7 @@ ] }, "list": { - "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/operations", "httpMethod": "GET", "id": "composer.projects.locations.operations.list", @@ -401,7 +401,7 @@ } } }, - "revision": "20200505", + "revision": "20201002", "rootUrl": "https://composer.googleapis.com/", "schemas": { "AllowedIpRange": { @@ -413,25 +413,25 @@ "type": "string" }, "value": { - "description": "IP address or range, defined using CIDR notation, of requests that this\nrule applies to.\nExamples: `192.168.1.1` or `192.168.0.0/16` or `2001:db8::/32`\n or `2001:0db8:0000:0042:0000:8a2e:0370:7334`.\n\n\n\u003cp\u003eIP range prefixes should be properly truncated. For example,\n`1.2.3.4/24` should be truncated to `1.2.3.0/24`. Similarly, for IPv6,\n`2001:db8::1/32` should be truncated to `2001:db8::/32`.", + "description": "IP address or range, defined using CIDR notation, of requests that this rule applies to. Examples: `192.168.1.1` or `192.168.0.0/16` or `2001:db8::/32` or `2001:0db8:0000:0042:0000:8a2e:0370:7334`. IP range prefixes should be properly truncated. For example, `1.2.3.4/24` should be truncated to `1.2.3.0/24`. Similarly, for IPv6, `2001:db8::1/32` should be truncated to `2001:db8::/32`.", "type": "string" } }, "type": "object" }, "DatabaseConfig": { - "description": "The configuration of Cloud SQL instance that is used by the Apache Airflow\nsoftware.", + "description": "The configuration of Cloud SQL instance that is used by the Apache Airflow software.", "id": "DatabaseConfig", "properties": { "machineType": { - "description": "Optional. Cloud SQL tier used by Airflow database.\nIf not specified, db-n1-standard-2 will be used.", + "description": "Optional. Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.", "type": "string" } }, "type": "object" }, "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", "properties": {}, "type": "object" @@ -447,17 +447,18 @@ "createTime": { "description": "Output only. The time at which this environment was created.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "labels": { "additionalProperties": { "type": "string" }, - "description": "Optional. User-defined labels for this environment.\nThe labels map can contain no more than 64 entries. Entries of the labels\nmap are UTF8 strings that comply with the following restrictions:\n\n* Keys must conform to regexp: \\p{Ll}\\p{Lo}{0,62}\n* Values must conform to regexp: [\\p{Ll}\\p{Lo}\\p{N}_-]{0,63}\n* Both keys and values are additionally constrained to be \u003c= 128 bytes in\nsize.", + "description": "Optional. User-defined labels for this environment. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \\p{Ll}\\p{Lo}{0,62} * Values must conform to regexp: [\\p{Ll}\\p{Lo}\\p{N}_-]{0,63} * Both keys and values are additionally constrained to be \u003c= 128 bytes in size.", "type": "object" }, "name": { - "description": "The resource name of the environment, in the form:\n\"projects/{projectId}/locations/{locationId}/environments/{environmentId}\"\n\nEnvironmentId must start with a lowercase letter followed by up to 63\nlowercase letters, numbers, or hyphens, and cannot end with a hyphen.", + "description": "The resource name of the environment, in the form: \"projects/{projectId}/locations/{locationId}/environments/{environmentId}\" EnvironmentId must start with a lowercase letter followed by up to 63 lowercase letters, numbers, or hyphens, and cannot end with a hyphen.", "type": "string" }, "state": { @@ -474,7 +475,7 @@ "The state of the environment is unknown.", "The environment is in the process of being created.", "The environment is currently running and healthy. It is ready for use.", - "The environment is being updated. It remains usable but cannot receive\nadditional update requests or be deleted at this time.", + "The environment is being updated. It remains usable but cannot receive additional update requests or be deleted at this time.", "The environment is undergoing deletion. It cannot be used.", "The environment has encountered an error and cannot be used." ], @@ -483,10 +484,12 @@ "updateTime": { "description": "Output only. The time at which this environment was last modified.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "uuid": { - "description": "Output only. The UUID (Universally Unique IDentifier) associated with this environment.\nThis value is generated when the environment is created.", + "description": "Output only. The UUID (Universally Unique IDentifier) associated with this environment. This value is generated when the environment is created.", + "readOnly": true, "type": "string" } }, @@ -497,19 +500,22 @@ "id": "EnvironmentConfig", "properties": { "airflowUri": { - "description": "Output only. The URI of the Apache Airflow Web UI hosted within this environment (see\n[Airflow web\ninterface](/composer/docs/how-to/accessing/airflow-web-interface)).", + "description": "Output only. The URI of the Apache Airflow Web UI hosted within this environment (see [Airflow web interface](/composer/docs/how-to/accessing/airflow-web-interface)).", + "readOnly": true, "type": "string" }, "dagGcsPrefix": { - "description": "Output only. The Cloud Storage prefix of the DAGs for this environment. Although Cloud\nStorage objects reside in a flat namespace, a hierarchical file tree\ncan be simulated using \"/\"-delimited object name prefixes. DAG objects for\nthis environment reside in a simulated directory with the given prefix.", + "description": "Output only. The Cloud Storage prefix of the DAGs for this environment. Although Cloud Storage objects reside in a flat namespace, a hierarchical file tree can be simulated using \"/\"-delimited object name prefixes. DAG objects for this environment reside in a simulated directory with the given prefix.", + "readOnly": true, "type": "string" }, "databaseConfig": { "$ref": "DatabaseConfig", - "description": "Optional. The configuration settings for Cloud SQL instance used internally by Apache\nAirflow software." + "description": "Optional. The configuration settings for Cloud SQL instance used internally by Apache Airflow software." }, "gkeCluster": { "description": "Output only. The Kubernetes Engine cluster used to run this environment.", + "readOnly": true, "type": "string" }, "nodeConfig": { @@ -517,7 +523,7 @@ "description": "The configuration used for the Kubernetes Engine cluster." }, "nodeCount": { - "description": "The number of nodes in the Kubernetes Engine cluster that will be\nused to run this environment.", + "description": "The number of nodes in the Kubernetes Engine cluster that will be used to run this environment.", "format": "int32", "type": "integer" }, @@ -535,33 +541,33 @@ }, "webServerNetworkAccessControl": { "$ref": "WebServerNetworkAccessControl", - "description": "Optional. The network-level access control policy for the Airflow web server. If\nunspecified, no network-level access restrictions will be applied." + "description": "Optional. The network-level access control policy for the Airflow web server. If unspecified, no network-level access restrictions will be applied." } }, "type": "object" }, "IPAllocationPolicy": { - "description": "Configuration for controlling how IPs are allocated in the\nGKE cluster.", + "description": "Configuration for controlling how IPs are allocated in the GKE cluster.", "id": "IPAllocationPolicy", "properties": { "clusterIpv4CidrBlock": { - "description": "Optional. The IP address range used to allocate IP addresses to pods in\nthe cluster.\n\nThis field is applicable only when `use_ip_aliases` is true.\n\nSet to blank to have GKE choose a range with the default size.\n\nSet to /netmask (e.g. `/14`) to have GKE choose a range with a specific\nnetmask.\n\nSet to a\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.\n`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range\nto use.\nSpecify `cluster_secondary_range_name` or `cluster_ipv4_cidr_block`\nbut not both.", + "description": "Optional. The IP address range used to allocate IP addresses to pods in the cluster. This field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both.", "type": "string" }, "clusterSecondaryRangeName": { - "description": "Optional. The name of the cluster's secondary range used to allocate\nIP addresses to pods. Specify either `cluster_secondary_range_name`\nor `cluster_ipv4_cidr_block` but not both.\n\nThis field is applicable only when `use_ip_aliases` is true.", + "description": "Optional. The name of the cluster's secondary range used to allocate IP addresses to pods. Specify either `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. This field is applicable only when `use_ip_aliases` is true.", "type": "string" }, "servicesIpv4CidrBlock": { - "description": "Optional. The IP address range of the services IP addresses in this\ncluster.\n\nThis field is applicable only when `use_ip_aliases` is true.\n\nSet to blank to have GKE choose a range with the default size.\n\nSet to /netmask (e.g. `/14`) to have GKE choose a range with a specific\nnetmask.\n\nSet to a\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.\n`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range\nto use.\nSpecify `services_secondary_range_name` or `services_ipv4_cidr_block`\nbut not both.", + "description": "Optional. The IP address range of the services IP addresses in this cluster. This field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `services_secondary_range_name` or `services_ipv4_cidr_block` but not both.", "type": "string" }, "servicesSecondaryRangeName": { - "description": "Optional. The name of the services' secondary range used to allocate\nIP addresses to the cluster. Specify either `services_secondary_range_name`\nor `services_ipv4_cidr_block` but not both.\n\nThis field is applicable only when `use_ip_aliases` is true.", + "description": "Optional. The name of the services' secondary range used to allocate IP addresses to the cluster. Specify either `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. This field is applicable only when `use_ip_aliases` is true.", "type": "string" }, "useIpAliases": { - "description": "Optional. Whether or not to enable Alias IPs in the GKE cluster.\nIf `true`, a VPC-native cluster is created.", + "description": "Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created.", "type": "boolean" } }, @@ -572,11 +578,11 @@ "id": "ImageVersion", "properties": { "imageVersionId": { - "description": "The string identifier of the ImageVersion, in the form:\n\"composer-x.y.z-airflow-a.b(.c)\"", + "description": "The string identifier of the ImageVersion, in the form: \"composer-x.y.z-airflow-a.b(.c)\"", "type": "string" }, "isDefault": { - "description": "Whether this is the default ImageVersion used by Composer during\nenvironment creation if no input ImageVersion is specified.", + "description": "Whether this is the default ImageVersion used by Composer during environment creation if no input ImageVersion is specified.", "type": "boolean" }, "supportedPythonVersions": { @@ -644,11 +650,11 @@ "type": "object" }, "NodeConfig": { - "description": "The configuration information for the Kubernetes Engine nodes running\nthe Apache Airflow software.", + "description": "The configuration information for the Kubernetes Engine nodes running the Apache Airflow software.", "id": "NodeConfig", "properties": { "diskSizeGb": { - "description": "Optional. The disk size in GB used for node VMs. Minimum size is 20GB.\nIf unspecified, defaults to 100GB. Cannot be updated.", + "description": "Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated.", "format": "int32", "type": "integer" }, @@ -657,34 +663,34 @@ "description": "Optional. The IPAllocationPolicy fields for the GKE cluster." }, "location": { - "description": "Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which\nto deploy the VMs used to run the Apache Airflow software, specified as a\n[relative resource\nname](/apis/design/resource_names#relative_resource_name). For example:\n\"projects/{projectId}/zones/{zoneId}\".\n\nThis `location` must belong to the enclosing environment's project and\nlocation. If both this field and `nodeConfig.machineType` are specified,\n`nodeConfig.machineType` must belong to this `location`; if both are\nunspecified, the service will pick a zone in the Compute Engine region\ncorresponding to the Cloud Composer location, and propagate that choice to\nboth fields. If only one field (`location` or `nodeConfig.machineType`) is\nspecified, the location information from the specified field will be\npropagated to the unspecified field.", + "description": "Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: \"projects/{projectId}/zones/{zoneId}\". This `location` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field.", "type": "string" }, "machineType": { - "description": "Optional. The Compute Engine\n[machine type](/compute/docs/machine-types) used for cluster instances,\nspecified as a\n[relative resource\nname](/apis/design/resource_names#relative_resource_name). For example:\n\"projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}\".\n\nThe `machineType` must belong to the enclosing environment's project and\nlocation. If both this field and `nodeConfig.location` are specified,\nthis `machineType` must belong to the `nodeConfig.location`; if both are\nunspecified, the service will pick a zone in the Compute Engine region\ncorresponding to the Cloud Composer location, and propagate that choice to\nboth fields. If exactly one of this field and `nodeConfig.location` is\nspecified, the location information from the specified field will be\npropagated to the unspecified field.\n\nThe `machineTypeId` must not be a [shared-core machine\ntype](/compute/docs/machine-types#sharedcore).\n\nIf this field is unspecified, the `machineTypeId` defaults\nto \"n1-standard-1\".", + "description": "Optional. The Compute Engine [machine type](/compute/docs/machine-types) used for cluster instances, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: \"projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}\". The `machineType` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.location` are specified, this `machineType` must belong to the `nodeConfig.location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If exactly one of this field and `nodeConfig.location` is specified, the location information from the specified field will be propagated to the unspecified field. The `machineTypeId` must not be a [shared-core machine type](/compute/docs/machine-types#sharedcore). If this field is unspecified, the `machineTypeId` defaults to \"n1-standard-1\".", "type": "string" }, "network": { - "description": "Optional. The Compute Engine network to be used for machine\ncommunications, specified as a\n[relative resource\nname](/apis/design/resource_names#relative_resource_name). For example:\n\"projects/{projectId}/global/networks/{networkId}\".\n\nIf unspecified, the default network in the environment's project is used.\nIf a [Custom Subnet Network](/vpc/docs/vpc#vpc_networks_and_subnets)\nis provided, `nodeConfig.subnetwork` must also be provided. For\n[Shared VPC](/vpc/docs/shared-vpc) subnetwork requirements, see\n`nodeConfig.subnetwork`.", + "description": "Optional. The Compute Engine network to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: \"projects/{projectId}/global/networks/{networkId}\". If unspecified, the default network in the environment's project is used. If a [Custom Subnet Network](/vpc/docs/vpc#vpc_networks_and_subnets) is provided, `nodeConfig.subnetwork` must also be provided. For [Shared VPC](/vpc/docs/shared-vpc) subnetwork requirements, see `nodeConfig.subnetwork`.", "type": "string" }, "oauthScopes": { - "description": "Optional. The set of Google API scopes to be made available on all\nnode VMs. If `oauth_scopes` is empty, defaults to\n[\"https://www.googleapis.com/auth/cloud-platform\"]. Cannot be updated.", + "description": "Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to [\"https://www.googleapis.com/auth/cloud-platform\"]. Cannot be updated.", "items": { "type": "string" }, "type": "array" }, "serviceAccount": { - "description": "Optional. The Google Cloud Platform Service Account to be used by the node\nVMs. If a service account is not specified, the \"default\" Compute Engine\nservice account is used. Cannot be updated.", + "description": "Optional. The Google Cloud Platform Service Account to be used by the node VMs. If a service account is not specified, the \"default\" Compute Engine service account is used. Cannot be updated.", "type": "string" }, "subnetwork": { - "description": "Optional. The Compute Engine subnetwork to be used for machine\ncommunications, specified as a\n[relative resource\nname](/apis/design/resource_names#relative_resource_name). For example:\n\"projects/{projectId}/regions/{regionId}/subnetworks/{subnetworkId}\"\n\nIf a subnetwork is provided, `nodeConfig.network` must also be provided,\nand the subnetwork must belong to the enclosing environment's project and\nlocation.", + "description": "Optional. The Compute Engine subnetwork to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: \"projects/{projectId}/regions/{regionId}/subnetworks/{subnetworkId}\" If a subnetwork is provided, `nodeConfig.network` must also be provided, and the subnetwork must belong to the enclosing environment's project and location.", "type": "string" }, "tags": { - "description": "Optional. The list of instance tags applied to all node VMs. Tags are used\nto identify valid sources or targets for network firewalls. Each tag within\nthe list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt).\nCannot be updated.", + "description": "Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated.", "items": { "type": "string" }, @@ -694,11 +700,11 @@ "type": "object" }, "Operation": { - "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "description": "This resource represents a long-running operation that is the result of a network API call.", "id": "Operation", "properties": { "done": { - "description": "If the value is `false`, it means the operation is still in progress.\nIf `true`, the operation is completed, and either `error` or `response` is\navailable.", + "description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.", "type": "boolean" }, "error": { @@ -710,11 +716,11 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", "type": "object" }, "name": { - "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should be a resource name ending with `operations/{unique_id}`.", + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.", "type": "string" }, "response": { @@ -722,7 +728,7 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", + "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", "type": "object" } }, @@ -738,7 +744,7 @@ "type": "string" }, "endTime": { - "description": "Output only. The time when the operation terminated, regardless of its success.\nThis field is unset if the operation is still ongoing.", + "description": "Output only. The time when the operation terminated, regardless of its success. This field is unset if the operation is still ongoing.", "format": "google-datetime", "type": "string" }, @@ -759,7 +765,7 @@ "type": "string" }, "resource": { - "description": "Output only. The resource being operated on, as a [relative resource name](\n/apis/design/resource_names#relative_resource_name).", + "description": "Output only. The resource being operated on, as a [relative resource name]( /apis/design/resource_names#relative_resource_name).", "type": "string" }, "resourceUuid": { @@ -788,46 +794,48 @@ "type": "object" }, "PrivateClusterConfig": { - "description": "Configuration options for the private GKE cluster in a Cloud Composer\nenvironment.", + "description": "Configuration options for the private GKE cluster in a Cloud Composer environment.", "id": "PrivateClusterConfig", "properties": { "enablePrivateEndpoint": { - "description": "Optional. If `true`, access to the public endpoint of the GKE cluster is\ndenied.", + "description": "Optional. If `true`, access to the public endpoint of the GKE cluster is denied.", "type": "boolean" }, "masterIpv4CidrBlock": { - "description": "Optional. The CIDR block from which IPv4 range for GKE master will be reserved. If\nleft blank, the default value of '172.16.0.0/23' is used.", + "description": "Optional. The CIDR block from which IPv4 range for GKE master will be reserved. If left blank, the default value of '172.16.0.0/23' is used.", "type": "string" }, "masterIpv4ReservedRange": { - "description": "Output only. The IP range in CIDR notation to use for the hosted master network. This\nrange is used for assigning internal IP addresses to the cluster\nmaster or set of masters and to the internal load balancer virtual IP.\nThis range must not overlap with any other ranges in use\nwithin the cluster's network.", + "description": "Output only. The IP range in CIDR notation to use for the hosted master network. This range is used for assigning internal IP addresses to the cluster master or set of masters and to the internal load balancer virtual IP. This range must not overlap with any other ranges in use within the cluster's network.", + "readOnly": true, "type": "string" } }, "type": "object" }, "PrivateEnvironmentConfig": { - "description": "The configuration information for configuring a Private IP Cloud Composer\nenvironment.", + "description": "The configuration information for configuring a Private IP Cloud Composer environment.", "id": "PrivateEnvironmentConfig", "properties": { "cloudSqlIpv4CidrBlock": { - "description": "Optional. The CIDR block from which IP range in tenant project will be reserved for\nCloud SQL. Needs to be disjoint from web_server_ipv4_cidr_block", + "description": "Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from web_server_ipv4_cidr_block", "type": "string" }, "enablePrivateEnvironment": { - "description": "Optional. If `true`, a Private IP Cloud Composer environment is created.\nIf this field is true, `use_ip_aliases` must be true.", + "description": "Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true.", "type": "boolean" }, "privateClusterConfig": { "$ref": "PrivateClusterConfig", - "description": "Optional. Configuration for the private GKE cluster for a Private IP\nCloud Composer environment." + "description": "Optional. Configuration for the private GKE cluster for a Private IP Cloud Composer environment." }, "webServerIpv4CidrBlock": { - "description": "Optional. The CIDR block from which IP range for web server will be reserved. Needs\nto be disjoint from private_cluster_config.master_ipv4_cidr_block and\ncloud_sql_ipv4_cidr_block.", + "description": "Optional. The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from private_cluster_config.master_ipv4_cidr_block and cloud_sql_ipv4_cidr_block.", "type": "string" }, "webServerIpv4ReservedRange": { "description": "Output only. The IP range reserved for the tenant project's App Engine VMs.", + "readOnly": true, "type": "string" } }, @@ -841,36 +849,36 @@ "additionalProperties": { "type": "string" }, - "description": "Optional. Apache Airflow configuration properties to override.\n\nProperty keys contain the section and property names, separated by a\nhyphen, for example \"core-dags_are_paused_at_creation\". Section names must\nnot contain hyphens (\"-\"), opening square brackets (\"[\"), or closing\nsquare brackets (\"]\"). The property name must not be empty and must not\ncontain an equals sign (\"=\") or semicolon (\";\"). Section and property names\nmust not contain a period (\".\"). Apache Airflow configuration property\nnames must be written in\n[snake_case](https://en.wikipedia.org/wiki/Snake_case). Property values can\ncontain any character, and can be written in any lower/upper case format.\n\nCertain Apache Airflow configuration property values are\n[blacklisted](/composer/docs/how-to/managing/setting-airflow-configurations#airflow_configuration_blacklists),\nand cannot be overridden.", + "description": "Optional. Apache Airflow configuration properties to override. Property keys contain the section and property names, separated by a hyphen, for example \"core-dags_are_paused_at_creation\". Section names must not contain hyphens (\"-\"), opening square brackets (\"[\"), or closing square brackets (\"]\"). The property name must not be empty and must not contain an equals sign (\"=\") or semicolon (\";\"). Section and property names must not contain a period (\".\"). Apache Airflow configuration property names must be written in [snake_case](https://en.wikipedia.org/wiki/Snake_case). Property values can contain any character, and can be written in any lower/upper case format. Certain Apache Airflow configuration property values are [blocked](/composer/docs/concepts/airflow-configurations), and cannot be overridden.", "type": "object" }, "envVariables": { "additionalProperties": { "type": "string" }, - "description": "Optional. Additional environment variables to provide to the Apache Airflow\nscheduler, worker, and webserver processes.\n\nEnvironment variable names must match the regular expression\n`a-zA-Z_*`. They cannot specify Apache Airflow\nsoftware configuration overrides (they cannot match the regular expression\n`AIRFLOW__[A-Z0-9_]+__[A-Z0-9_]+`), and they cannot match any of the\nfollowing reserved names:\n\n* `AIRFLOW_HOME`\n* `C_FORCE_ROOT`\n* `CONTAINER_NAME`\n* `DAGS_FOLDER`\n* `GCP_PROJECT`\n* `GCS_BUCKET`\n* `GKE_CLUSTER_NAME`\n* `SQL_DATABASE`\n* `SQL_INSTANCE`\n* `SQL_PASSWORD`\n* `SQL_PROJECT`\n* `SQL_REGION`\n* `SQL_USER`", + "description": "Optional. Additional environment variables to provide to the Apache Airflow scheduler, worker, and webserver processes. Environment variable names must match the regular expression `a-zA-Z_*`. They cannot specify Apache Airflow software configuration overrides (they cannot match the regular expression `AIRFLOW__[A-Z0-9_]+__[A-Z0-9_]+`), and they cannot match any of the following reserved names: * `AIRFLOW_HOME` * `C_FORCE_ROOT` * `CONTAINER_NAME` * `DAGS_FOLDER` * `GCP_PROJECT` * `GCS_BUCKET` * `GKE_CLUSTER_NAME` * `SQL_DATABASE` * `SQL_INSTANCE` * `SQL_PASSWORD` * `SQL_PROJECT` * `SQL_REGION` * `SQL_USER`", "type": "object" }, "imageVersion": { - "description": "The version of the software running in the environment.\nThis encapsulates both the version of Cloud Composer functionality and the\nversion of Apache Airflow. It must match the regular expression\n`composer-([0-9]+\\.[0-9]+\\.[0-9]+|latest)-airflow-[0-9]+\\.[0-9]+(\\.[0-9]+.*)?`.\nWhen used as input, the server also checks if the provided version is\nsupported and denies the request for an unsupported version.\n\nThe Cloud Composer portion of the version is a\n[semantic version](https://semver.org) or `latest`. When the patch version\nis omitted, the current Cloud Composer patch version is selected.\nWhen `latest` is provided instead of an explicit version number,\nthe server replaces `latest` with the current Cloud Composer version\nand stores that version number in the same field.\n\nThe portion of the image version that follows \u003cem\u003eairflow-\u003c/em\u003e is an\nofficial Apache Airflow repository\n[release name](https://github.com/apache/incubator-airflow/releases).\n\nSee also [Version\nList](/composer/docs/concepts/versioning/composer-versions).", + "description": "The version of the software running in the environment. This encapsulates both the version of Cloud Composer functionality and the version of Apache Airflow. It must match the regular expression `composer-([0-9]+\\.[0-9]+\\.[0-9]+|latest)-airflow-[0-9]+\\.[0-9]+(\\.[0-9]+.*)?`. When used as input, the server also checks if the provided version is supported and denies the request for an unsupported version. The Cloud Composer portion of the version is a [semantic version](https://semver.org) or `latest`. When the patch version is omitted, the current Cloud Composer patch version is selected. When `latest` is provided instead of an explicit version number, the server replaces `latest` with the current Cloud Composer version and stores that version number in the same field. The portion of the image version that follows *airflow-* is an official Apache Airflow repository [release name](https://github.com/apache/incubator-airflow/releases). See also [Version List](/composer/docs/concepts/versioning/composer-versions).", "type": "string" }, "pypiPackages": { "additionalProperties": { "type": "string" }, - "description": "Optional. Custom Python Package Index (PyPI) packages to be installed in\nthe environment.\n\nKeys refer to the lowercase package name such as \"numpy\"\nand values are the lowercase extras and version specifier such as\n\"==1.12.0\", \"[devel,gcp_api]\", or \"[devel]\u003e=1.8.2, \u003c1.9.2\". To specify a\npackage without pinning it to a version specifier, use the empty string as\nthe value.", + "description": "Optional. Custom Python Package Index (PyPI) packages to be installed in the environment. Keys refer to the lowercase package name such as \"numpy\" and values are the lowercase extras and version specifier such as \"==1.12.0\", \"[devel,gcp_api]\", or \"[devel]\u003e=1.8.2, \u003c1.9.2\". To specify a package without pinning it to a version specifier, use the empty string as the value.", "type": "object" }, "pythonVersion": { - "description": "Optional. The major version of Python used to run the Apache Airflow\nscheduler, worker, and webserver processes.\n\nCan be set to '2' or '3'. If not specified, the default is '2'. Cannot be\nupdated.", + "description": "Optional. The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to '2' or '3'. If not specified, the default is '2'. Cannot be updated.", "type": "string" } }, "type": "object" }, "Status": { - "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors).", + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", "id": "Status", "properties": { "code": { @@ -879,7 +887,7 @@ "type": "integer" }, "details": { - "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use.", + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", "items": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -890,7 +898,7 @@ "type": "array" }, "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", "type": "string" } }, @@ -901,7 +909,7 @@ "id": "WebServerConfig", "properties": { "machineType": { - "description": "Optional. Machine type on which Airflow web server is running.\nFor example: composer-n1-webserver-2, composer-n1-webserver-4,\ncomposer-n1-webserver-8.\nIf not specified, composer-n1-webserver-2 will be used.\nValue custom is returned only in response, if Airflow web server parameters\nwere manually changed to a non-standard values.", + "description": "Optional. Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. If not specified, composer-n1-webserver-2 will be used. Value custom is returned only in response, if Airflow web server parameters were manually changed to a non-standard values.", "type": "string" } }, diff --git a/vendor/google.golang.org/api/composer/v1beta1/composer-gen.go b/vendor/google.golang.org/api/composer/v1beta1/composer-gen.go index a285a022500..f316dbe6975 100644 --- a/vendor/google.golang.org/api/composer/v1beta1/composer-gen.go +++ b/vendor/google.golang.org/api/composer/v1beta1/composer-gen.go @@ -75,6 +75,7 @@ const apiId = "composer:v1beta1" const apiName = "composer" const apiVersion = "v1beta1" const basePath = "https://composer.googleapis.com/" +const mtlsBasePath = "https://composer.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -90,6 +91,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -197,17 +199,12 @@ type AllowedIpRange struct { Description string `json:"description,omitempty"` // Value: IP address or range, defined using CIDR notation, of requests - // that this - // rule applies to. - // Examples: `192.168.1.1` or `192.168.0.0/16` or `2001:db8::/32` - // or `2001:0db8:0000:0042:0000:8a2e:0370:7334`. - // - // - //

IP range prefixes should be properly truncated. For - // example, - // `1.2.3.4/24` should be truncated to `1.2.3.0/24`. Similarly, for - // IPv6, - // `2001:db8::1/32` should be truncated to `2001:db8::/32`. + // that this rule applies to. Examples: `192.168.1.1` or + // `192.168.0.0/16` or `2001:db8::/32` or + // `2001:0db8:0000:0042:0000:8a2e:0370:7334`. IP range prefixes should + // be properly truncated. For example, `1.2.3.4/24` should be truncated + // to `1.2.3.0/24`. Similarly, for IPv6, `2001:db8::1/32` should be + // truncated to `2001:db8::/32`. Value string `json:"value,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -234,11 +231,12 @@ func (s *AllowedIpRange) MarshalJSON() ([]byte, error) { } // DatabaseConfig: The configuration of Cloud SQL instance that is used -// by the Apache Airflow -// software. +// by the Apache Airflow software. type DatabaseConfig struct { - // MachineType: Optional. Cloud SQL tier used by Airflow database. - // If not specified, db-n1-standard-2 will be used. + // MachineType: Optional. Cloud SQL machine type used by Airflow + // database. It has to be one of: db-n1-standard-2, db-n1-standard-4, + // db-n1-standard-8 or db-n1-standard-16. If not specified, + // db-n1-standard-2 will be used. MachineType string `json:"machineType,omitempty"` // ForceSendFields is a list of field names (e.g. "MachineType") to @@ -265,17 +263,11 @@ func (s *DatabaseConfig) MarshalJSON() ([]byte, error) { } // Empty: A generic empty message that you can re-use to avoid defining -// duplicated -// empty messages in your APIs. A typical example is to use it as the -// request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. +// duplicated empty messages in your APIs. A typical example is to use +// it as the request or the response type of an API method. For +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } The JSON representation for `Empty` is +// empty JSON object `{}`. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -291,26 +283,19 @@ type Environment struct { // created. CreateTime string `json:"createTime,omitempty"` - // Labels: Optional. User-defined labels for this environment. - // The labels map can contain no more than 64 entries. Entries of the - // labels - // map are UTF8 strings that comply with the following restrictions: - // - // * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} - // * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} - // * Both keys and values are additionally constrained to be <= 128 - // bytes in - // size. + // Labels: Optional. User-defined labels for this environment. The + // labels map can contain no more than 64 entries. Entries of the labels + // map are UTF8 strings that comply with the following restrictions: * + // Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform + // to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are + // additionally constrained to be <= 128 bytes in size. Labels map[string]string `json:"labels,omitempty"` - // Name: The resource name of the environment, in the - // form: - // "projects/{projectId}/locations/{locationId}/environments/{envir - // onmentId}" - // - // EnvironmentId must start with a lowercase letter followed by up to - // 63 - // lowercase letters, numbers, or hyphens, and cannot end with a hyphen. + // Name: The resource name of the environment, in the form: + // "projects/{projectId}/locations/{locationId}/environments/{environment + // Id}" EnvironmentId must start with a lowercase letter followed by up + // to 63 lowercase letters, numbers, or hyphens, and cannot end with a + // hyphen. Name string `json:"name,omitempty"` // State: The current state of the environment. @@ -321,8 +306,8 @@ type Environment struct { // "RUNNING" - The environment is currently running and healthy. It is // ready for use. // "UPDATING" - The environment is being updated. It remains usable - // but cannot receive - // additional update requests or be deleted at this time. + // but cannot receive additional update requests or be deleted at this + // time. // "DELETING" - The environment is undergoing deletion. It cannot be // used. // "ERROR" - The environment has encountered an error and cannot be @@ -334,8 +319,8 @@ type Environment struct { UpdateTime string `json:"updateTime,omitempty"` // Uuid: Output only. The UUID (Universally Unique IDentifier) - // associated with this environment. - // This value is generated when the environment is created. + // associated with this environment. This value is generated when the + // environment is created. Uuid string `json:"uuid,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -368,26 +353,19 @@ func (s *Environment) MarshalJSON() ([]byte, error) { // EnvironmentConfig: Configuration information for an environment. type EnvironmentConfig struct { // AirflowUri: Output only. The URI of the Apache Airflow Web UI hosted - // within this environment (see - // [Airflow - // web - // interface](/composer/docs/how-to/accessing/airflow-web-interface)) - // . + // within this environment (see [Airflow web + // interface](/composer/docs/how-to/accessing/airflow-web-interface)). AirflowUri string `json:"airflowUri,omitempty"` // DagGcsPrefix: Output only. The Cloud Storage prefix of the DAGs for - // this environment. Although Cloud - // Storage objects reside in a flat namespace, a hierarchical file - // tree - // can be simulated using "/"-delimited object name prefixes. DAG - // objects for - // this environment reside in a simulated directory with the given - // prefix. + // this environment. Although Cloud Storage objects reside in a flat + // namespace, a hierarchical file tree can be simulated using + // "/"-delimited object name prefixes. DAG objects for this environment + // reside in a simulated directory with the given prefix. DagGcsPrefix string `json:"dagGcsPrefix,omitempty"` // DatabaseConfig: Optional. The configuration settings for Cloud SQL - // instance used internally by Apache - // Airflow software. + // instance used internally by Apache Airflow software. DatabaseConfig *DatabaseConfig `json:"databaseConfig,omitempty"` // GkeCluster: Output only. The Kubernetes Engine cluster used to run @@ -398,8 +376,7 @@ type EnvironmentConfig struct { NodeConfig *NodeConfig `json:"nodeConfig,omitempty"` // NodeCount: The number of nodes in the Kubernetes Engine cluster that - // will be - // used to run this environment. + // will be used to run this environment. NodeCount int64 `json:"nodeCount,omitempty"` // PrivateEnvironmentConfig: The configuration used for the Private IP @@ -415,8 +392,8 @@ type EnvironmentConfig struct { WebServerConfig *WebServerConfig `json:"webServerConfig,omitempty"` // WebServerNetworkAccessControl: Optional. The network-level access - // control policy for the Airflow web server. If - // unspecified, no network-level access restrictions will be applied. + // control policy for the Airflow web server. If unspecified, no + // network-level access restrictions will be applied. WebServerNetworkAccessControl *WebServerNetworkAccessControl `json:"webServerNetworkAccessControl,omitempty"` // ForceSendFields is a list of field names (e.g. "AirflowUri") to @@ -443,82 +420,47 @@ func (s *EnvironmentConfig) MarshalJSON() ([]byte, error) { } // IPAllocationPolicy: Configuration for controlling how IPs are -// allocated in the -// GKE cluster. +// allocated in the GKE cluster. type IPAllocationPolicy struct { // ClusterIpv4CidrBlock: Optional. The IP address range used to allocate - // IP addresses to pods in - // the cluster. - // - // This field is applicable only when `use_ip_aliases` is true. - // - // Set to blank to have GKE choose a range with the default size. - // - // Set to /netmask (e.g. `/14`) to have GKE choose a range with a - // specific - // netmask. - // - // Set to - // a + // IP addresses to pods in the cluster. This field is applicable only + // when `use_ip_aliases` is true. Set to blank to have GKE choose a + // range with the default size. Set to /netmask (e.g. `/14`) to have GKE + // choose a range with a specific netmask. Set to a // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks - // (e.g. - // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific - // range - // to use. - // Specify `cluster_secondary_range_name` or - // `cluster_ipv4_cidr_block` - // but not both. + // (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a + // specific range to use. Specify `cluster_secondary_range_name` or + // `cluster_ipv4_cidr_block` but not both. ClusterIpv4CidrBlock string `json:"clusterIpv4CidrBlock,omitempty"` // ClusterSecondaryRangeName: Optional. The name of the cluster's - // secondary range used to allocate - // IP addresses to pods. Specify either - // `cluster_secondary_range_name` - // or `cluster_ipv4_cidr_block` but not both. - // - // This field is applicable only when `use_ip_aliases` is true. + // secondary range used to allocate IP addresses to pods. Specify either + // `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not + // both. This field is applicable only when `use_ip_aliases` is true. ClusterSecondaryRangeName string `json:"clusterSecondaryRangeName,omitempty"` // ServicesIpv4CidrBlock: Optional. The IP address range of the services - // IP addresses in this - // cluster. - // - // This field is applicable only when `use_ip_aliases` is true. - // - // Set to blank to have GKE choose a range with the default size. - // - // Set to /netmask (e.g. `/14`) to have GKE choose a range with a - // specific - // netmask. - // - // Set to - // a + // IP addresses in this cluster. This field is applicable only when + // `use_ip_aliases` is true. Set to blank to have GKE choose a range + // with the default size. Set to /netmask (e.g. `/14`) to have GKE + // choose a range with a specific netmask. Set to a // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks - // (e.g. - // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific - // range - // to use. - // Specify `services_secondary_range_name` or - // `services_ipv4_cidr_block` - // but not both. + // (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a + // specific range to use. Specify `services_secondary_range_name` or + // `services_ipv4_cidr_block` but not both. ServicesIpv4CidrBlock string `json:"servicesIpv4CidrBlock,omitempty"` // ServicesSecondaryRangeName: Optional. The name of the services' - // secondary range used to allocate - // IP addresses to the cluster. Specify either - // `services_secondary_range_name` - // or `services_ipv4_cidr_block` but not both. - // - // This field is applicable only when `use_ip_aliases` is true. + // secondary range used to allocate IP addresses to the cluster. Specify + // either `services_secondary_range_name` or `services_ipv4_cidr_block` + // but not both. This field is applicable only when `use_ip_aliases` is + // true. ServicesSecondaryRangeName string `json:"servicesSecondaryRangeName,omitempty"` // UseIpAliases: Optional. Whether or not to enable Alias IPs in the GKE - // cluster. - // If `true`, a VPC-native cluster is created. + // cluster. If `true`, a VPC-native cluster is created. UseIpAliases bool `json:"useIpAliases,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -549,13 +491,11 @@ func (s *IPAllocationPolicy) MarshalJSON() ([]byte, error) { // ImageVersion: Image Version information type ImageVersion struct { // ImageVersionId: The string identifier of the ImageVersion, in the - // form: - // "composer-x.y.z-airflow-a.b(.c)" + // form: "composer-x.y.z-airflow-a.b(.c)" ImageVersionId string `json:"imageVersionId,omitempty"` // IsDefault: Whether this is the default ImageVersion used by Composer - // during - // environment creation if no input ImageVersion is specified. + // during environment creation if no input ImageVersion is specified. IsDefault bool `json:"isDefault,omitempty"` // SupportedPythonVersions: supported python versions @@ -697,12 +637,10 @@ func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { } // NodeConfig: The configuration information for the Kubernetes Engine -// nodes running -// the Apache Airflow software. +// nodes running the Apache Airflow software. type NodeConfig struct { // DiskSizeGb: Optional. The disk size in GB used for node VMs. Minimum - // size is 20GB. - // If unspecified, defaults to 100GB. Cannot be updated. + // size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. DiskSizeGb int64 `json:"diskSizeGb,omitempty"` // IpAllocationPolicy: Optional. The IPAllocationPolicy fields for the @@ -710,126 +648,77 @@ type NodeConfig struct { IpAllocationPolicy *IPAllocationPolicy `json:"ipAllocationPolicy,omitempty"` // Location: Optional. The Compute Engine - // [zone](/compute/docs/regions-zones) in which - // to deploy the VMs used to run the Apache Airflow software, specified - // as a - // [relative - // resource + // [zone](/compute/docs/regions-zones) in which to deploy the VMs used + // to run the Apache Airflow software, specified as a [relative resource // name](/apis/design/resource_names#relative_resource_name). For - // example: - // "projects/{projectId}/zones/{zoneId}". - // - // This `location` must belong to the enclosing environment's project - // and - // location. If both this field and `nodeConfig.machineType` are - // specified, - // `nodeConfig.machineType` must belong to this `location`; if both - // are + // example: "projects/{projectId}/zones/{zoneId}". This `location` must + // belong to the enclosing environment's project and location. If both + // this field and `nodeConfig.machineType` are specified, + // `nodeConfig.machineType` must belong to this `location`; if both are // unspecified, the service will pick a zone in the Compute Engine - // region - // corresponding to the Cloud Composer location, and propagate that - // choice to - // both fields. If only one field (`location` or - // `nodeConfig.machineType`) is - // specified, the location information from the specified field will - // be - // propagated to the unspecified field. + // region corresponding to the Cloud Composer location, and propagate + // that choice to both fields. If only one field (`location` or + // `nodeConfig.machineType`) is specified, the location information from + // the specified field will be propagated to the unspecified field. Location string `json:"location,omitempty"` - // MachineType: Optional. The Compute Engine - // [machine type](/compute/docs/machine-types) used for cluster - // instances, - // specified as a - // [relative - // resource + // MachineType: Optional. The Compute Engine [machine + // type](/compute/docs/machine-types) used for cluster instances, + // specified as a [relative resource // name](/apis/design/resource_names#relative_resource_name). For // example: - // "projects/{projectId}/zones/{zoneId}/machineTypes/{machineTyp - // eId}". - // + // "projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}". // The `machineType` must belong to the enclosing environment's project - // and - // location. If both this field and `nodeConfig.location` are - // specified, - // this `machineType` must belong to the `nodeConfig.location`; if both - // are - // unspecified, the service will pick a zone in the Compute Engine - // region - // corresponding to the Cloud Composer location, and propagate that - // choice to - // both fields. If exactly one of this field and `nodeConfig.location` - // is - // specified, the location information from the specified field will - // be - // propagated to the unspecified field. - // - // The `machineTypeId` must not be a [shared-core - // machine - // type](/compute/docs/machine-types#sharedcore). - // - // If this field is unspecified, the `machineTypeId` defaults - // to "n1-standard-1". + // and location. If both this field and `nodeConfig.location` are + // specified, this `machineType` must belong to the + // `nodeConfig.location`; if both are unspecified, the service will pick + // a zone in the Compute Engine region corresponding to the Cloud + // Composer location, and propagate that choice to both fields. If + // exactly one of this field and `nodeConfig.location` is specified, the + // location information from the specified field will be propagated to + // the unspecified field. The `machineTypeId` must not be a [shared-core + // machine type](/compute/docs/machine-types#sharedcore). If this field + // is unspecified, the `machineTypeId` defaults to "n1-standard-1". MachineType string `json:"machineType,omitempty"` - // Network: Optional. The Compute Engine network to be used for - // machine - // communications, specified as a - // [relative - // resource + // Network: Optional. The Compute Engine network to be used for machine + // communications, specified as a [relative resource // name](/apis/design/resource_names#relative_resource_name). For - // example: - // "projects/{projectId}/global/networks/{networkId}". - // - // If unspecified, the default network in the environment's project is - // used. - // If a [Custom Subnet - // Network](/vpc/docs/vpc#vpc_networks_and_subnets) - // is provided, `nodeConfig.subnetwork` must also be provided. - // For - // [Shared VPC](/vpc/docs/shared-vpc) subnetwork requirements, - // see + // example: "projects/{projectId}/global/networks/{networkId}". If + // unspecified, the default network in the environment's project is + // used. If a [Custom Subnet + // Network](/vpc/docs/vpc#vpc_networks_and_subnets) is provided, + // `nodeConfig.subnetwork` must also be provided. For [Shared + // VPC](/vpc/docs/shared-vpc) subnetwork requirements, see // `nodeConfig.subnetwork`. Network string `json:"network,omitempty"` // OauthScopes: Optional. The set of Google API scopes to be made - // available on all - // node VMs. If `oauth_scopes` is empty, defaults - // to + // available on all node VMs. If `oauth_scopes` is empty, defaults to // ["https://www.googleapis.com/auth/cloud-platform"]. Cannot be // updated. OauthScopes []string `json:"oauthScopes,omitempty"` // ServiceAccount: Optional. The Google Cloud Platform Service Account - // to be used by the node - // VMs. If a service account is not specified, the "default" Compute - // Engine - // service account is used. Cannot be updated. + // to be used by the node VMs. If a service account is not specified, + // the "default" Compute Engine service account is used. Cannot be + // updated. ServiceAccount string `json:"serviceAccount,omitempty"` // Subnetwork: Optional. The Compute Engine subnetwork to be used for - // machine - // communications, specified as a - // [relative - // resource + // machine communications, specified as a [relative resource // name](/apis/design/resource_names#relative_resource_name). For // example: - // "projects/{projectId}/regions/{regionId}/subnetworks/{subnetw - // orkId}" - // + // "projects/{projectId}/regions/{regionId}/subnetworks/{subnetworkId}" // If a subnetwork is provided, `nodeConfig.network` must also be - // provided, - // and the subnetwork must belong to the enclosing environment's project - // and - // location. + // provided, and the subnetwork must belong to the enclosing + // environment's project and location. Subnetwork string `json:"subnetwork,omitempty"` // Tags: Optional. The list of instance tags applied to all node VMs. - // Tags are used - // to identify valid sources or targets for network firewalls. Each tag - // within - // the list must comply with - // [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). - // Cannot be updated. + // Tags are used to identify valid sources or targets for network + // firewalls. Each tag within the list must comply with + // [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated. Tags []string `json:"tags,omitempty"` // ForceSendFields is a list of field names (e.g. "DiskSizeGb") to @@ -856,52 +745,38 @@ func (s *NodeConfig) MarshalJSON() ([]byte, error) { } // Operation: This resource represents a long-running operation that is -// the result of a -// network API call. +// the result of a network API call. type Operation struct { // Done: If the value is `false`, it means the operation is still in - // progress. - // If `true`, the operation is completed, and either `error` or - // `response` is - // available. + // progress. If `true`, the operation is completed, and either `error` + // or `response` is available. Done bool `json:"done,omitempty"` // Error: The error result of the operation in case of failure or // cancellation. Error *Status `json:"error,omitempty"` - // Metadata: Service-specific metadata associated with the operation. - // It typically - // contains progress information and common metadata such as create - // time. - // Some services might not provide such metadata. Any method that - // returns a - // long-running operation should document the metadata type, if any. + // Metadata: Service-specific metadata associated with the operation. It + // typically contains progress information and common metadata such as + // create time. Some services might not provide such metadata. Any + // method that returns a long-running operation should document the + // metadata type, if any. Metadata googleapi.RawMessage `json:"metadata,omitempty"` // Name: The server-assigned name, which is only unique within the same - // service that - // originally returns it. If you use the default HTTP mapping, - // the - // `name` should be a resource name ending with + // service that originally returns it. If you use the default HTTP + // mapping, the `name` should be a resource name ending with // `operations/{unique_id}`. Name string `json:"name,omitempty"` - // Response: The normal response of the operation in case of success. - // If the original - // method returns no data on success, such as `Delete`, the response - // is - // `google.protobuf.Empty`. If the original method is - // standard - // `Get`/`Create`/`Update`, the response should be the resource. For - // other - // methods, the response should have the type `XxxResponse`, where - // `Xxx` - // is the original method name. For example, if the original method - // name - // is `TakeSnapshot()`, the inferred response type - // is - // `TakeSnapshotResponse`. + // Response: The normal response of the operation in case of success. If + // the original method returns no data on success, such as `Delete`, the + // response is `google.protobuf.Empty`. If the original method is + // standard `Get`/`Create`/`Update`, the response should be the + // resource. For other methods, the response should have the type + // `XxxResponse`, where `Xxx` is the original method name. For example, + // if the original method name is `TakeSnapshot()`, the inferred + // response type is `TakeSnapshotResponse`. Response googleapi.RawMessage `json:"response,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -938,8 +813,8 @@ type OperationMetadata struct { CreateTime string `json:"createTime,omitempty"` // EndTime: Output only. The time when the operation terminated, - // regardless of its success. - // This field is unset if the operation is still ongoing. + // regardless of its success. This field is unset if the operation is + // still ongoing. EndTime string `json:"endTime,omitempty"` // OperationType: Output only. The type of operation being performed. @@ -952,8 +827,7 @@ type OperationMetadata struct { OperationType string `json:"operationType,omitempty"` // Resource: Output only. The resource being operated on, as a [relative - // resource name]( - // /apis/design/resource_names#relative_resource_name). + // resource name]( /apis/design/resource_names#relative_resource_name). Resource string `json:"resource,omitempty"` // ResourceUuid: Output only. The UUID of the resource being operated @@ -994,27 +868,23 @@ func (s *OperationMetadata) MarshalJSON() ([]byte, error) { } // PrivateClusterConfig: Configuration options for the private GKE -// cluster in a Cloud Composer -// environment. +// cluster in a Cloud Composer environment. type PrivateClusterConfig struct { // EnablePrivateEndpoint: Optional. If `true`, access to the public - // endpoint of the GKE cluster is - // denied. + // endpoint of the GKE cluster is denied. EnablePrivateEndpoint bool `json:"enablePrivateEndpoint,omitempty"` // MasterIpv4CidrBlock: Optional. The CIDR block from which IPv4 range - // for GKE master will be reserved. If - // left blank, the default value of '172.16.0.0/23' is used. + // for GKE master will be reserved. If left blank, the default value of + // '172.16.0.0/23' is used. MasterIpv4CidrBlock string `json:"masterIpv4CidrBlock,omitempty"` // MasterIpv4ReservedRange: Output only. The IP range in CIDR notation - // to use for the hosted master network. This - // range is used for assigning internal IP addresses to the - // cluster - // master or set of masters and to the internal load balancer virtual - // IP. - // This range must not overlap with any other ranges in use - // within the cluster's network. + // to use for the hosted master network. This range is used for + // assigning internal IP addresses to the cluster master or set of + // masters and to the internal load balancer virtual IP. This range must + // not overlap with any other ranges in use within the cluster's + // network. MasterIpv4ReservedRange string `json:"masterIpv4ReservedRange,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1043,28 +913,25 @@ func (s *PrivateClusterConfig) MarshalJSON() ([]byte, error) { } // PrivateEnvironmentConfig: The configuration information for -// configuring a Private IP Cloud Composer -// environment. +// configuring a Private IP Cloud Composer environment. type PrivateEnvironmentConfig struct { // CloudSqlIpv4CidrBlock: Optional. The CIDR block from which IP range - // in tenant project will be reserved for - // Cloud SQL. Needs to be disjoint from web_server_ipv4_cidr_block + // in tenant project will be reserved for Cloud SQL. Needs to be + // disjoint from web_server_ipv4_cidr_block CloudSqlIpv4CidrBlock string `json:"cloudSqlIpv4CidrBlock,omitempty"` // EnablePrivateEnvironment: Optional. If `true`, a Private IP Cloud - // Composer environment is created. - // If this field is true, `use_ip_aliases` must be true. + // Composer environment is created. If this field is set to true, + // `IPAllocationPolicy.use_ip_aliases` must be set to true. EnablePrivateEnvironment bool `json:"enablePrivateEnvironment,omitempty"` // PrivateClusterConfig: Optional. Configuration for the private GKE - // cluster for a Private IP - // Cloud Composer environment. + // cluster for a Private IP Cloud Composer environment. PrivateClusterConfig *PrivateClusterConfig `json:"privateClusterConfig,omitempty"` // WebServerIpv4CidrBlock: Optional. The CIDR block from which IP range - // for web server will be reserved. Needs - // to be disjoint from private_cluster_config.master_ipv4_cidr_block - // and + // for web server will be reserved. Needs to be disjoint from + // private_cluster_config.master_ipv4_cidr_block and // cloud_sql_ipv4_cidr_block. WebServerIpv4CidrBlock string `json:"webServerIpv4CidrBlock,omitempty"` @@ -1101,115 +968,64 @@ func (s *PrivateEnvironmentConfig) MarshalJSON() ([]byte, error) { // inside the environment. type SoftwareConfig struct { // AirflowConfigOverrides: Optional. Apache Airflow configuration - // properties to override. - // - // Property keys contain the section and property names, separated by - // a - // hyphen, for example "core-dags_are_paused_at_creation". Section names - // must - // not contain hyphens ("-"), opening square brackets ("["), or - // closing - // square brackets ("]"). The property name must not be empty and must - // not + // properties to override. Property keys contain the section and + // property names, separated by a hyphen, for example + // "core-dags_are_paused_at_creation". Section names must not contain + // hyphens ("-"), opening square brackets ("["), or closing square + // brackets ("]"). The property name must not be empty and must not // contain an equals sign ("=") or semicolon (";"). Section and property - // names - // must not contain a period ("."). Apache Airflow configuration - // property - // names must be written - // in + // names must not contain a period ("."). Apache Airflow configuration + // property names must be written in // [snake_case](https://en.wikipedia.org/wiki/Snake_case). Property - // values can - // contain any character, and can be written in any lower/upper case - // format. - // - // Certain Apache Airflow configuration property values - // are - // [blacklisted](/composer/docs/how-to/managing/setting-airflow-confi - // gurations#airflow_configuration_blacklists), - // and cannot be overridden. + // values can contain any character, and can be written in any + // lower/upper case format. Certain Apache Airflow configuration + // property values are + // [blocked](/composer/docs/concepts/airflow-configurations), and cannot + // be overridden. AirflowConfigOverrides map[string]string `json:"airflowConfigOverrides,omitempty"` // EnvVariables: Optional. Additional environment variables to provide - // to the Apache Airflow - // scheduler, worker, and webserver processes. - // - // Environment variable names must match the regular - // expression - // `a-zA-Z_*`. They cannot specify Apache Airflow - // software configuration overrides (they cannot match the regular - // expression - // `AIRFLOW__[A-Z0-9_]+__[A-Z0-9_]+`), and they cannot match any of - // the - // following reserved names: - // - // * `AIRFLOW_HOME` - // * `C_FORCE_ROOT` - // * `CONTAINER_NAME` - // * `DAGS_FOLDER` - // * `GCP_PROJECT` - // * `GCS_BUCKET` - // * `GKE_CLUSTER_NAME` - // * `SQL_DATABASE` - // * `SQL_INSTANCE` - // * `SQL_PASSWORD` - // * `SQL_PROJECT` - // * `SQL_REGION` - // * `SQL_USER` + // to the Apache Airflow scheduler, worker, and webserver processes. + // Environment variable names must match the regular expression + // `a-zA-Z_*`. They cannot specify Apache Airflow software configuration + // overrides (they cannot match the regular expression + // `AIRFLOW__[A-Z0-9_]+__[A-Z0-9_]+`), and they cannot match any of the + // following reserved names: * `AIRFLOW_HOME` * `C_FORCE_ROOT` * + // `CONTAINER_NAME` * `DAGS_FOLDER` * `GCP_PROJECT` * `GCS_BUCKET` * + // `GKE_CLUSTER_NAME` * `SQL_DATABASE` * `SQL_INSTANCE` * `SQL_PASSWORD` + // * `SQL_PROJECT` * `SQL_REGION` * `SQL_USER` EnvVariables map[string]string `json:"envVariables,omitempty"` - // ImageVersion: The version of the software running in the - // environment. + // ImageVersion: The version of the software running in the environment. // This encapsulates both the version of Cloud Composer functionality - // and the - // version of Apache Airflow. It must match the regular + // and the version of Apache Airflow. It must match the regular // expression - // `composer-([0-9]+\.[0-9]+\.[0-9]+|latest)-airflow-[0-9]+\.[ - // 0-9]+(\.[0-9]+.*)?`. - // When used as input, the server also checks if the provided version - // is - // supported and denies the request for an unsupported version. - // - // The Cloud Composer portion of the version is a - // [semantic version](https://semver.org) or `latest`. When the patch - // version - // is omitted, the current Cloud Composer patch version is - // selected. - // When `latest` is provided instead of an explicit version number, - // the server replaces `latest` with the current Cloud Composer - // version - // and stores that version number in the same field. - // - // The portion of the image version that follows airflow- is - // an - // official Apache Airflow repository - // [release - // name](https://github.com/apache/incubator-airflow/releases). - // - // See also - // [Version - // List](/composer/docs/concepts/versioning/composer-versions). + // `composer-([0-9]+\.[0-9]+\.[0-9]+|latest)-airflow-[0-9]+\.[0-9]+(\.[0- + // 9]+.*)?`. When used as input, the server also checks if the provided + // version is supported and denies the request for an unsupported + // version. The Cloud Composer portion of the version is a [semantic + // version](https://semver.org) or `latest`. When the patch version is + // omitted, the current Cloud Composer patch version is selected. When + // `latest` is provided instead of an explicit version number, the + // server replaces `latest` with the current Cloud Composer version and + // stores that version number in the same field. The portion of the + // image version that follows *airflow-* is an official Apache Airflow + // repository [release + // name](https://github.com/apache/incubator-airflow/releases). See also + // [Version List](/composer/docs/concepts/versioning/composer-versions). ImageVersion string `json:"imageVersion,omitempty"` // PypiPackages: Optional. Custom Python Package Index (PyPI) packages - // to be installed in - // the environment. - // - // Keys refer to the lowercase package name such as "numpy" - // and values are the lowercase extras and version specifier such - // as - // "==1.12.0", "[devel,gcp_api]", or "[devel]>=1.8.2, <1.9.2". To - // specify a - // package without pinning it to a version specifier, use the empty - // string as - // the value. + // to be installed in the environment. Keys refer to the lowercase + // package name such as "numpy" and values are the lowercase extras and + // version specifier such as "==1.12.0", "[devel,gcp_api]", or + // "[devel]>=1.8.2, <1.9.2". To specify a package without pinning it to + // a version specifier, use the empty string as the value. PypiPackages map[string]string `json:"pypiPackages,omitempty"` // PythonVersion: Optional. The major version of Python used to run the - // Apache Airflow - // scheduler, worker, and webserver processes. - // - // Can be set to '2' or '3'. If not specified, the default is '2'. - // Cannot be + // Apache Airflow scheduler, worker, and webserver processes. Can be set + // to '2' or '3'. If not specified, the default is '2'. Cannot be // updated. PythonVersion string `json:"pythonVersion,omitempty"` @@ -1239,32 +1055,24 @@ func (s *SoftwareConfig) MarshalJSON() ([]byte, error) { } // Status: The `Status` type defines a logical error model that is -// suitable for -// different programming environments, including REST APIs and RPC APIs. -// It is -// used by [gRPC](https://github.com/grpc). Each `Status` message -// contains -// three pieces of data: error code, error message, and error -// details. -// -// You can find out more about this error model and how to work with it -// in the -// [API Design Guide](https://cloud.google.com/apis/design/errors). +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each +// `Status` message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the [API Design +// Guide](https://cloud.google.com/apis/design/errors). type Status struct { // Code: The status code, which should be an enum value of // google.rpc.Code. Code int64 `json:"code,omitempty"` - // Details: A list of messages that carry the error details. There is a - // common set of - // message types for APIs to use. + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. Details []googleapi.RawMessage `json:"details,omitempty"` // Message: A developer-facing error message, which should be in - // English. Any - // user-facing error message should be localized and sent in - // the - // google.rpc.Status.details field, or localized by the client. + // English. Any user-facing error message should be localized and sent + // in the google.rpc.Status.details field, or localized by the client. Message string `json:"message,omitempty"` // ForceSendFields is a list of field names (e.g. "Code") to @@ -1294,14 +1102,11 @@ func (s *Status) MarshalJSON() ([]byte, error) { // server App Engine instance. type WebServerConfig struct { // MachineType: Optional. Machine type on which Airflow web server is - // running. - // For example: composer-n1-webserver-2, - // composer-n1-webserver-4, - // composer-n1-webserver-8. - // If not specified, composer-n1-webserver-2 will be used. - // Value custom is returned only in response, if Airflow web server - // parameters - // were manually changed to a non-standard values. + // running. It has to be one of: composer-n1-webserver-2, + // composer-n1-webserver-4 or composer-n1-webserver-8. If not specified, + // composer-n1-webserver-2 will be used. Value custom is returned only + // in response, if Airflow web server parameters were manually changed + // to a non-standard values. MachineType string `json:"machineType,omitempty"` // ForceSendFields is a list of field names (e.g. "MachineType") to @@ -1403,7 +1208,7 @@ func (c *ProjectsLocationsEnvironmentsCreateCall) Header() http.Header { func (c *ProjectsLocationsEnvironmentsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1476,7 +1281,7 @@ func (c *ProjectsLocationsEnvironmentsCreateCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "parent": { - // "description": "The parent must be of the form\n\"projects/{projectId}/locations/{locationId}\".", + // "description": "The parent must be of the form \"projects/{projectId}/locations/{locationId}\".", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -1541,7 +1346,7 @@ func (c *ProjectsLocationsEnvironmentsDeleteCall) Header() http.Header { func (c *ProjectsLocationsEnvironmentsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1609,7 +1414,7 @@ func (c *ProjectsLocationsEnvironmentsDeleteCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "name": { - // "description": "The environment to delete, in the form:\n\"projects/{projectId}/locations/{locationId}/environments/{environmentId}\"", + // "description": "The environment to delete, in the form: \"projects/{projectId}/locations/{locationId}/environments/{environmentId}\"", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/environments/[^/]+$", // "required": true, @@ -1682,7 +1487,7 @@ func (c *ProjectsLocationsEnvironmentsGetCall) Header() http.Header { func (c *ProjectsLocationsEnvironmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1753,7 +1558,7 @@ func (c *ProjectsLocationsEnvironmentsGetCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "name": { - // "description": "The resource name of the environment to get, in the form:\n\"projects/{projectId}/locations/{locationId}/environments/{environmentId}\"", + // "description": "The resource name of the environment to get, in the form: \"projects/{projectId}/locations/{locationId}/environments/{environmentId}\"", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/environments/[^/]+$", // "required": true, @@ -1840,7 +1645,7 @@ func (c *ProjectsLocationsEnvironmentsListCall) Header() http.Header { func (c *ProjectsLocationsEnvironmentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1922,7 +1727,7 @@ func (c *ProjectsLocationsEnvironmentsListCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "parent": { - // "description": "List environments in the given project and location, in the form:\n\"projects/{projectId}/locations/{locationId}\"", + // "description": "List environments in the given project and location, in the form: \"projects/{projectId}/locations/{locationId}\"", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -1981,190 +1786,87 @@ func (r *ProjectsLocationsEnvironmentsService) Patch(name string, environment *E } // UpdateMask sets the optional parameter "updateMask": Required. A -// comma-separated list of paths, relative to `Environment`, of -// fields to update. -// For example, to set the version of scikit-learn to install in -// the -// environment to 0.19.0 and to remove an existing installation -// of -// argparse, the `updateMask` parameter would include the following -// two -// `paths` values: "config.softwareConfig.pypiPackages.scikit-learn" -// and -// "config.softwareConfig.pypiPackages.argparse". The included -// patch -// environment would specify the scikit-learn version as follows: -// -// { -// "config":{ -// "softwareConfig":{ -// "pypiPackages":{ -// "scikit-learn":"==0.19.0" -// } -// } -// } -// } -// -// Note that in the above example, any existing PyPI packages -// other than scikit-learn and argparse will be unaffected. -// -// Only one update type may be included in a single request's -// `updateMask`. -// For example, one cannot update both the PyPI packages and -// labels in the same request. However, it is possible to update -// multiple -// members of a map field simultaneously in the same request. For -// example, -// to set the labels "label1" and "label2" while clearing "label3" -// (assuming -// it already exists), one can -// provide the paths "labels.label1", "labels.label2", and -// "labels.label3" -// and populate the patch environment as follows: -// -// { -// "labels":{ -// "label1":"new-label1-value" -// "label2":"new-label2-value" -// } -// } -// -// Note that in the above example, any existing labels that are -// not -// included in the `updateMask` will be unaffected. -// -// It is also possible to replace an entire map field by providing -// the -// map field's path in the `updateMask`. The new value of the field -// will -// be that which is provided in the patch environment. For example, -// to -// delete all pre-existing user-specified PyPI packages and -// install botocore at version 1.7.14, the `updateMask` would -// contain -// the path "config.softwareConfig.pypiPackages", and -// the patch environment would be the following: -// -// { -// "config":{ -// "softwareConfig":{ -// "pypiPackages":{ -// "botocore":"==1.7.14" -// } -// } -// } -// } -// -// Note: Only the following fields can be updated: -// -//

-// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -//
MaskPurpose
config.softwareConfig.pypiPackages -// Replace all custom custom PyPI packages. If a replacement -// package map is not included in `environment`, all custom -// PyPI packages are cleared. It is an error to provide both this mask -// and a -// mask specifying an individual package.
config.softwareConfig.pypiPackages.packagenameUpdate the custom PyPI package packagename, -// preserving other packages. To delete the package, include it in -// `updateMask`, and omit the mapping for it in -// `environment.config.softwareConfig.pypiPackages`. It is an error -// to provide both a mask of this form and the -// "config.softwareConfig.pypiPackages" mask.
labelsReplace all environment labels. If a replacement labels map is -// not -// included in `environment`, all labels are cleared. It is an error -// to -// provide both this mask and a mask specifying one or more individual -// labels.
labels.labelNameSet the label named labelName, while preserving -// other -// labels. To delete the label, include it in `updateMask` and omit -// its -// mapping in `environment.labels`. It is an error to provide both a -// mask of this form and the "labels" mask.
config.nodeCountHorizontally scale the number of nodes in the environment. An -// integer -// greater than or equal to 3 must be provided in the -// `config.nodeCount` -// field. -//
config.webServerNetworkAccessControlReplace the environment's current -// WebServerNetworkAccessControl. -//
config.softwareConfig.airflowConfigOverridesReplace all Apache Airflow config overrides. If a replacement -// config -// overrides map is not included in `environment`, all config -// overrides -// are cleared. -// It is an error to provide both this mask and a mask specifying one -// or -// more individual config overrides.
config.softwareConfig.airflowConfigOverrides.section-name -// Override the Apache Airflow config property name in -// the -// section named section, preserving other properties. To -// delete -// the property override, include it in `updateMask` and omit its -// mapping -// in `environment.config.softwareConfig.airflowConfigOverrides`. -// It is an error to provide both a mask of this form and the -// "config.softwareConfig.airflowConfigOverrides" mask.
config.softwareConfig.envVariablesReplace all environment variables. If a replacement environment -// variable map is not included in `environment`, all custom -// environment -// variables are cleared. -// It is an error to provide both this mask and a mask specifying one -// or -// more individual environment variables.
config.softwareConfig.imageVersionUpgrade the version of the environment in-place. Refer to -// `SoftwareConfig.image_version` for information on how to format the -// new -// image version. Additionally, the new image version cannot effect a -// version -// downgrade and must match the current image version's Composer major -// version and Airflow major and minor versions. Consult the -// Cloud -// Composer Version List for valid values.
+// comma-separated list of paths, relative to `Environment`, of fields +// to update. For example, to set the version of scikit-learn to install +// in the environment to 0.19.0 and to remove an existing installation +// of argparse, the `updateMask` parameter would include the following +// two `paths` values: "config.softwareConfig.pypiPackages.scikit-learn" +// and "config.softwareConfig.pypiPackages.argparse". The included patch +// environment would specify the scikit-learn version as follows: { +// "config":{ "softwareConfig":{ "pypiPackages":{ +// "scikit-learn":"==0.19.0" } } } } Note that in the above example, any +// existing PyPI packages other than scikit-learn and argparse will be +// unaffected. Only one update type may be included in a single +// request's `updateMask`. For example, one cannot update both the PyPI +// packages and labels in the same request. However, it is possible to +// update multiple members of a map field simultaneously in the same +// request. For example, to set the labels "label1" and "label2" while +// clearing "label3" (assuming it already exists), one can provide the +// paths "labels.label1", "labels.label2", and "labels.label3" and +// populate the patch environment as follows: { "labels":{ +// "label1":"new-label1-value" "label2":"new-label2-value" } } Note that +// in the above example, any existing labels that are not included in +// the `updateMask` will be unaffected. It is also possible to replace +// an entire map field by providing the map field's path in the +// `updateMask`. The new value of the field will be that which is +// provided in the patch environment. For example, to delete all +// pre-existing user-specified PyPI packages and install botocore at +// version 1.7.14, the `updateMask` would contain the path +// "config.softwareConfig.pypiPackages", and the patch environment would +// be the following: { "config":{ "softwareConfig":{ "pypiPackages":{ +// "botocore":"==1.7.14" } } } } *Note:* Only the following fields can +// be updated: * config.softwareConfig.pypiPackages * Replace all custom +// custom PyPI packages. If a replacement package map is not included in +// `environment`, all custom PyPI packages are cleared. It is an error +// to provide both this mask and a mask specifying an individual +// package. * config.softwareConfig.pypiPackages.packagename * Update +// the custom PyPI package packagename, preserving other packages. To +// delete the package, include it in `updateMask`, and omit the mapping +// for it in `environment.config.softwareConfig.pypiPackages`. It is an +// error to provide both a mask of this form and the +// "config.softwareConfig.pypiPackages" mask. * labels * Replace all +// environment labels. If a replacement labels map is not included in +// `environment`, all labels are cleared. It is an error to provide both +// this mask and a mask specifying one or more individual labels. * +// labels.labelName * Set the label named labelName, while preserving +// other labels. To delete the label, include it in `updateMask` and +// omit its mapping in `environment.labels`. It is an error to provide +// both a mask of this form and the "labels" mask. * config.nodeCount * +// Horizontally scale the number of nodes in the environment. An integer +// greater than or equal to 3 must be provided in the `config.nodeCount` +// field. * config.webServerNetworkAccessControl * Replace the +// environment's current WebServerNetworkAccessControl. * +// config.softwareConfig.airflowConfigOverrides * Replace all Apache +// Airflow config overrides. If a replacement config overrides map is +// not included in `environment`, all config overrides are cleared. It +// is an error to provide both this mask and a mask specifying one or +// more individual config overrides. * +// config.softwareConfig.airflowConfigOverrides.section- name * Override +// the Apache Airflow config property name in the section named section, +// preserving other properties. To delete the property override, include +// it in `updateMask` and omit its mapping in +// `environment.config.softwareConfig.airflowConfigOverrides`. It is an +// error to provide both a mask of this form and the +// "config.softwareConfig.airflowConfigOverrides" mask. * +// config.softwareConfig.envVariables * Replace all environment +// variables. If a replacement environment variable map is not included +// in `environment`, all custom environment variables are cleared. It is +// an error to provide both this mask and a mask specifying one or more +// individual environment variables. * +// config.softwareConfig.imageVersion * Upgrade the version of the +// environment in-place. Refer to `SoftwareConfig.image_version` for +// information on how to format the new image version. Additionally, the +// new image version cannot effect a version downgrade and must match +// the current image version's Composer major version and Airflow major +// and minor versions. Consult the Cloud Composer Version List for valid +// values. * config.databaseConfig.machineType * Cloud SQL machine type +// used by Airflow database. It has to be one of: db-n1-standard-2, +// db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. * +// config.webServerConfig.machineType * Machine type on which Airflow +// web server is running. It has to be one of: composer-n1-webserver-2, +// composer-n1-webserver-4 or composer-n1-webserver-8. * +// config.maintenanceWindow * Maintenance window during which Cloud +// Composer components may be under maintenance. func (c *ProjectsLocationsEnvironmentsPatchCall) UpdateMask(updateMask string) *ProjectsLocationsEnvironmentsPatchCall { c.urlParams_.Set("updateMask", updateMask) return c @@ -2197,7 +1899,7 @@ func (c *ProjectsLocationsEnvironmentsPatchCall) Header() http.Header { func (c *ProjectsLocationsEnvironmentsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2270,14 +1972,14 @@ func (c *ProjectsLocationsEnvironmentsPatchCall) Do(opts ...googleapi.CallOption // ], // "parameters": { // "name": { - // "description": "The relative resource name of the environment to update, in the form:\n\"projects/{projectId}/locations/{locationId}/environments/{environmentId}\"", + // "description": "The relative resource name of the environment to update, in the form: \"projects/{projectId}/locations/{locationId}/environments/{environmentId}\"", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/environments/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { - // "description": "Required. A comma-separated list of paths, relative to `Environment`, of\nfields to update.\nFor example, to set the version of scikit-learn to install in the\nenvironment to 0.19.0 and to remove an existing installation of\nargparse, the `updateMask` parameter would include the following two\n`paths` values: \"config.softwareConfig.pypiPackages.scikit-learn\" and\n\"config.softwareConfig.pypiPackages.argparse\". The included patch\nenvironment would specify the scikit-learn version as follows:\n\n {\n \"config\":{\n \"softwareConfig\":{\n \"pypiPackages\":{\n \"scikit-learn\":\"==0.19.0\"\n }\n }\n }\n }\n\nNote that in the above example, any existing PyPI packages\nother than scikit-learn and argparse will be unaffected.\n\nOnly one update type may be included in a single request's `updateMask`.\nFor example, one cannot update both the PyPI packages and\nlabels in the same request. However, it is possible to update multiple\nmembers of a map field simultaneously in the same request. For example,\nto set the labels \"label1\" and \"label2\" while clearing \"label3\" (assuming\nit already exists), one can\nprovide the paths \"labels.label1\", \"labels.label2\", and \"labels.label3\"\nand populate the patch environment as follows:\n\n {\n \"labels\":{\n \"label1\":\"new-label1-value\"\n \"label2\":\"new-label2-value\"\n }\n }\n\nNote that in the above example, any existing labels that are not\nincluded in the `updateMask` will be unaffected.\n\nIt is also possible to replace an entire map field by providing the\nmap field's path in the `updateMask`. The new value of the field will\nbe that which is provided in the patch environment. For example, to\ndelete all pre-existing user-specified PyPI packages and\ninstall botocore at version 1.7.14, the `updateMask` would contain\nthe path \"config.softwareConfig.pypiPackages\", and\nthe patch environment would be the following:\n\n {\n \"config\":{\n \"softwareConfig\":{\n \"pypiPackages\":{\n \"botocore\":\"==1.7.14\"\n }\n }\n }\n }\n\n\u003cstrong\u003eNote:\u003c/strong\u003e Only the following fields can be updated:\n\n \u003ctable\u003e\n \u003ctbody\u003e\n \u003ctr\u003e\n \u003ctd\u003e\u003cstrong\u003eMask\u003c/strong\u003e\u003c/td\u003e\n \u003ctd\u003e\u003cstrong\u003ePurpose\u003c/strong\u003e\u003c/td\u003e\n \u003c/tr\u003e\n \u003ctr\u003e\n \u003ctd\u003econfig.softwareConfig.pypiPackages\n \u003c/td\u003e\n \u003ctd\u003eReplace all custom custom PyPI packages. If a replacement\n package map is not included in `environment`, all custom\n PyPI packages are cleared. It is an error to provide both this mask and a\n mask specifying an individual package.\u003c/td\u003e\n \u003c/tr\u003e\n \u003ctr\u003e\n \u003ctd\u003econfig.softwareConfig.pypiPackages.\u003cvar\u003epackagename\u003c/var\u003e\u003c/td\u003e\n \u003ctd\u003eUpdate the custom PyPI package \u003cvar\u003epackagename\u003c/var\u003e,\n preserving other packages. To delete the package, include it in\n `updateMask`, and omit the mapping for it in\n `environment.config.softwareConfig.pypiPackages`. It is an error\n to provide both a mask of this form and the\n \"config.softwareConfig.pypiPackages\" mask.\u003c/td\u003e\n \u003c/tr\u003e\n \u003ctr\u003e\n \u003ctd\u003elabels\u003c/td\u003e\n \u003ctd\u003eReplace all environment labels. If a replacement labels map is not\n included in `environment`, all labels are cleared. It is an error to\n provide both this mask and a mask specifying one or more individual\n labels.\u003c/td\u003e\n \u003c/tr\u003e\n \u003ctr\u003e\n \u003ctd\u003elabels.\u003cvar\u003elabelName\u003c/var\u003e\u003c/td\u003e\n \u003ctd\u003eSet the label named \u003cvar\u003elabelName\u003c/var\u003e, while preserving other\n labels. To delete the label, include it in `updateMask` and omit its\n mapping in `environment.labels`. It is an error to provide both a\n mask of this form and the \"labels\" mask.\u003c/td\u003e\n \u003c/tr\u003e\n \u003ctr\u003e\n \u003ctd\u003econfig.nodeCount\u003c/td\u003e\n \u003ctd\u003eHorizontally scale the number of nodes in the environment. An integer\n greater than or equal to 3 must be provided in the `config.nodeCount`\n field.\n \u003c/td\u003e\n \u003c/tr\u003e\n \u003ctr\u003e\n \u003ctd\u003econfig.webServerNetworkAccessControl\u003c/td\u003e\n \u003ctd\u003eReplace the environment's current WebServerNetworkAccessControl.\n \u003c/td\u003e\n \u003c/tr\u003e\n \u003ctr\u003e\n \u003ctd\u003econfig.softwareConfig.airflowConfigOverrides\u003c/td\u003e\n \u003ctd\u003eReplace all Apache Airflow config overrides. If a replacement config\n overrides map is not included in `environment`, all config overrides\n are cleared.\n It is an error to provide both this mask and a mask specifying one or\n more individual config overrides.\u003c/td\u003e\n \u003c/tr\u003e\n \u003ctr\u003e\n \u003ctd\u003econfig.softwareConfig.airflowConfigOverrides.\u003cvar\u003esection\u003c/var\u003e-\u003cvar\u003ename\n \u003c/var\u003e\u003c/td\u003e\n \u003ctd\u003eOverride the Apache Airflow config property \u003cvar\u003ename\u003c/var\u003e in the\n section named \u003cvar\u003esection\u003c/var\u003e, preserving other properties. To delete\n the property override, include it in `updateMask` and omit its mapping\n in `environment.config.softwareConfig.airflowConfigOverrides`.\n It is an error to provide both a mask of this form and the\n \"config.softwareConfig.airflowConfigOverrides\" mask.\u003c/td\u003e\n \u003c/tr\u003e\n \u003ctr\u003e\n \u003ctd\u003econfig.softwareConfig.envVariables\u003c/td\u003e\n \u003ctd\u003eReplace all environment variables. If a replacement environment\n variable map is not included in `environment`, all custom environment\n variables are cleared.\n It is an error to provide both this mask and a mask specifying one or\n more individual environment variables.\u003c/td\u003e\n \u003c/tr\u003e\n \u003ctr\u003e\n \u003ctd\u003econfig.softwareConfig.imageVersion\u003c/td\u003e\n \u003ctd\u003eUpgrade the version of the environment in-place. Refer to\n `SoftwareConfig.image_version` for information on how to format the new\n image version. Additionally, the new image version cannot effect a version\n downgrade and must match the current image version's Composer major\n version and Airflow major and minor versions. Consult the\n \u003ca href=\"/composer/docs/concepts/versioning/composer-versions\"\u003eCloud\n Composer Version List\u003c/a\u003e for valid values.\u003c/td\u003e\n \u003c/tr\u003e\n \u003c/tbody\u003e\n \u003c/table\u003e", + // "description": "Required. A comma-separated list of paths, relative to `Environment`, of fields to update. For example, to set the version of scikit-learn to install in the environment to 0.19.0 and to remove an existing installation of argparse, the `updateMask` parameter would include the following two `paths` values: \"config.softwareConfig.pypiPackages.scikit-learn\" and \"config.softwareConfig.pypiPackages.argparse\". The included patch environment would specify the scikit-learn version as follows: { \"config\":{ \"softwareConfig\":{ \"pypiPackages\":{ \"scikit-learn\":\"==0.19.0\" } } } } Note that in the above example, any existing PyPI packages other than scikit-learn and argparse will be unaffected. Only one update type may be included in a single request's `updateMask`. For example, one cannot update both the PyPI packages and labels in the same request. However, it is possible to update multiple members of a map field simultaneously in the same request. For example, to set the labels \"label1\" and \"label2\" while clearing \"label3\" (assuming it already exists), one can provide the paths \"labels.label1\", \"labels.label2\", and \"labels.label3\" and populate the patch environment as follows: { \"labels\":{ \"label1\":\"new-label1-value\" \"label2\":\"new-label2-value\" } } Note that in the above example, any existing labels that are not included in the `updateMask` will be unaffected. It is also possible to replace an entire map field by providing the map field's path in the `updateMask`. The new value of the field will be that which is provided in the patch environment. For example, to delete all pre-existing user-specified PyPI packages and install botocore at version 1.7.14, the `updateMask` would contain the path \"config.softwareConfig.pypiPackages\", and the patch environment would be the following: { \"config\":{ \"softwareConfig\":{ \"pypiPackages\":{ \"botocore\":\"==1.7.14\" } } } } *Note:* Only the following fields can be updated: * config.softwareConfig.pypiPackages * Replace all custom custom PyPI packages. If a replacement package map is not included in `environment`, all custom PyPI packages are cleared. It is an error to provide both this mask and a mask specifying an individual package. * config.softwareConfig.pypiPackages.packagename * Update the custom PyPI package packagename, preserving other packages. To delete the package, include it in `updateMask`, and omit the mapping for it in `environment.config.softwareConfig.pypiPackages`. It is an error to provide both a mask of this form and the \"config.softwareConfig.pypiPackages\" mask. * labels * Replace all environment labels. If a replacement labels map is not included in `environment`, all labels are cleared. It is an error to provide both this mask and a mask specifying one or more individual labels. * labels.labelName * Set the label named labelName, while preserving other labels. To delete the label, include it in `updateMask` and omit its mapping in `environment.labels`. It is an error to provide both a mask of this form and the \"labels\" mask. * config.nodeCount * Horizontally scale the number of nodes in the environment. An integer greater than or equal to 3 must be provided in the `config.nodeCount` field. * config.webServerNetworkAccessControl * Replace the environment's current WebServerNetworkAccessControl. * config.softwareConfig.airflowConfigOverrides * Replace all Apache Airflow config overrides. If a replacement config overrides map is not included in `environment`, all config overrides are cleared. It is an error to provide both this mask and a mask specifying one or more individual config overrides. * config.softwareConfig.airflowConfigOverrides.section- name * Override the Apache Airflow config property name in the section named section, preserving other properties. To delete the property override, include it in `updateMask` and omit its mapping in `environment.config.softwareConfig.airflowConfigOverrides`. It is an error to provide both a mask of this form and the \"config.softwareConfig.airflowConfigOverrides\" mask. * config.softwareConfig.envVariables * Replace all environment variables. If a replacement environment variable map is not included in `environment`, all custom environment variables are cleared. It is an error to provide both this mask and a mask specifying one or more individual environment variables. * config.softwareConfig.imageVersion * Upgrade the version of the environment in-place. Refer to `SoftwareConfig.image_version` for information on how to format the new image version. Additionally, the new image version cannot effect a version downgrade and must match the current image version's Composer major version and Airflow major and minor versions. Consult the Cloud Composer Version List for valid values. * config.databaseConfig.machineType * Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. * config.webServerConfig.machineType * Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. * config.maintenanceWindow * Maintenance window during which Cloud Composer components may be under maintenance.", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -2366,7 +2068,7 @@ func (c *ProjectsLocationsImageVersionsListCall) Header() http.Header { func (c *ProjectsLocationsImageVersionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2448,7 +2150,7 @@ func (c *ProjectsLocationsImageVersionsListCall) Do(opts ...googleapi.CallOption // "type": "string" // }, // "parent": { - // "description": "List ImageVersions in the given project and location, in the form:\n\"projects/{projectId}/locations/{locationId}\"", + // "description": "List ImageVersions in the given project and location, in the form: \"projects/{projectId}/locations/{locationId}\"", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -2498,12 +2200,9 @@ type ProjectsLocationsOperationsDeleteCall struct { } // Delete: Deletes a long-running operation. This method indicates that -// the client is -// no longer interested in the operation result. It does not cancel -// the -// operation. If the server doesn't support this method, it -// returns -// `google.rpc.Code.UNIMPLEMENTED`. +// the client is no longer interested in the operation result. It does +// not cancel the operation. If the server doesn't support this method, +// it returns `google.rpc.Code.UNIMPLEMENTED`. func (r *ProjectsLocationsOperationsService) Delete(name string) *ProjectsLocationsOperationsDeleteCall { c := &ProjectsLocationsOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -2537,7 +2236,7 @@ func (c *ProjectsLocationsOperationsDeleteCall) Header() http.Header { func (c *ProjectsLocationsOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2596,7 +2295,7 @@ func (c *ProjectsLocationsOperationsDeleteCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.", + // "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", // "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", // "httpMethod": "DELETE", // "id": "composer.projects.locations.operations.delete", @@ -2634,11 +2333,9 @@ type ProjectsLocationsOperationsGetCall struct { header_ http.Header } -// Get: Gets the latest state of a long-running operation. Clients can -// use this -// method to poll the operation result at intervals as recommended by -// the API -// service. +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. func (r *ProjectsLocationsOperationsService) Get(name string) *ProjectsLocationsOperationsGetCall { c := &ProjectsLocationsOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -2682,7 +2379,7 @@ func (c *ProjectsLocationsOperationsGetCall) Header() http.Header { func (c *ProjectsLocationsOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2744,7 +2441,7 @@ func (c *ProjectsLocationsOperationsGetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", // "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", // "httpMethod": "GET", // "id": "composer.projects.locations.operations.get", @@ -2783,22 +2480,15 @@ type ProjectsLocationsOperationsListCall struct { } // List: Lists operations that match the specified filter in the -// request. If the -// server doesn't support this method, it returns -// `UNIMPLEMENTED`. -// -// NOTE: the `name` binding allows API services to override the -// binding -// to use different resource name schemes, such as `users/*/operations`. -// To -// override the binding, API services can add a binding such -// as -// "/v1/{name=users/*}/operations" to their service configuration. -// For backwards compatibility, the default name includes the -// operations -// collection id, however overriding users must ensure the name -// binding -// is the parent resource, without the operations collection id. +// request. If the server doesn't support this method, it returns +// `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to +// override the binding to use different resource name schemes, such as +// `users/*/operations`. To override the binding, API services can add a +// binding such as "/v1/{name=users/*}/operations" to their service +// configuration. For backwards compatibility, the default name includes +// the operations collection id, however overriding users must ensure +// the name binding is the parent resource, without the operations +// collection id. func (r *ProjectsLocationsOperationsService) List(name string) *ProjectsLocationsOperationsListCall { c := &ProjectsLocationsOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -2863,7 +2553,7 @@ func (c *ProjectsLocationsOperationsListCall) Header() http.Header { func (c *ProjectsLocationsOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2925,7 +2615,7 @@ func (c *ProjectsLocationsOperationsListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", + // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", // "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/operations", // "httpMethod": "GET", // "id": "composer.projects.locations.operations.list", diff --git a/vendor/google.golang.org/api/compute/v0.beta/compute-api.json b/vendor/google.golang.org/api/compute/v0.beta/compute-api.json index 8281178449f..4fc1b7bc4ff 100644 --- a/vendor/google.golang.org/api/compute/v0.beta/compute-api.json +++ b/vendor/google.golang.org/api/compute/v0.beta/compute-api.json @@ -29,7 +29,7 @@ "description": "Creates and runs virtual machines on Google Cloud Platform.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/compute/docs/reference/latest/", - "etag": "\"u9GIe6H63LSGq-9_t39K2Zx_EAc/Exsykdie-mJHojb9c1Q6suH3lMs\"", + "etag": "\"-2NioU2H8y8siEzrBOV_qzRI6kQ/pNe3pcORsOJbWIaKfhC0AYT-Cww\"", "icons": { "x16": "https://www.google.com/images/icons/product/compute_engine-16.png", "x32": "https://www.google.com/images/icons/product/compute_engine-32.png" @@ -130,6 +130,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/acceleratorTypes", @@ -185,7 +190,7 @@ ] }, "list": { - "description": "Retrieves a list of accelerator types available to the specified project.", + "description": "Retrieves a list of accelerator types that are available to the specified project.", "httpMethod": "GET", "id": "compute.acceleratorTypes.list", "parameterOrder": [ @@ -223,6 +228,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -287,6 +297,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/addresses", @@ -473,6 +488,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/addresses", @@ -625,6 +645,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/autoscalers", @@ -805,6 +830,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "Name of the zone for this request.", "location": "path", @@ -1192,6 +1222,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/backendBuckets", @@ -1372,6 +1407,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/backendServices", @@ -1537,7 +1577,7 @@ ] }, "insert": { - "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Understanding backend services for more information.", + "description": "Creates a BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview.", "httpMethod": "POST", "id": "compute.backendServices.insert", "parameterOrder": [ @@ -1606,6 +1646,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/backendServices", @@ -1619,7 +1664,7 @@ ] }, "patch": { - "description": "Patches the specified BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Patches the specified BackendService resource with the data included in the request. For more information, see Backend services overview. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", "id": "compute.backendServices.patch", "parameterOrder": [ @@ -1737,7 +1782,7 @@ ] }, "update": { - "description": "Updates the specified BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information.", + "description": "Updates the specified BackendService resource with the data included in the request. For more information, see Backend services overview.", "httpMethod": "PUT", "id": "compute.backendServices.update", "parameterOrder": [ @@ -1823,6 +1868,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/diskTypes", @@ -1916,6 +1966,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -2029,6 +2084,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/disks", @@ -2059,7 +2119,7 @@ "type": "string" }, "guestFlush": { - "description": "[Input Only] Specifies to create an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", + "description": "[Input Only] Whether to attempt an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", "location": "query", "type": "boolean" }, @@ -2231,7 +2291,7 @@ ] }, "insert": { - "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", + "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk from a source (sourceImage, sourceSnapshot, or sourceDisk) or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", "httpMethod": "POST", "id": "compute.disks.insert", "parameterOrder": [ @@ -2315,6 +2375,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -2715,6 +2780,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/externalVpnGateways", @@ -2946,6 +3016,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/firewalls", @@ -3123,6 +3198,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/forwardingRules", @@ -3309,6 +3389,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/forwardingRules", @@ -3659,6 +3744,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/addresses", @@ -3890,6 +3980,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/forwardingRules", @@ -4281,6 +4376,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/networkEndpointGroups", @@ -4337,6 +4437,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", @@ -4395,6 +4500,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/operations", @@ -4508,6 +4618,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/operations", @@ -4648,6 +4763,11 @@ "description": "Parent ID for this request.", "location": "query", "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "locations/global/operations", @@ -4706,6 +4826,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/healthChecks", @@ -4860,6 +4985,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/healthChecks", @@ -5137,6 +5267,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/httpHealthChecks", @@ -5414,6 +5549,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/httpsHealthChecks", @@ -5814,6 +5954,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/images", @@ -5826,6 +5971,47 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "patch": { + "description": "Patches the specified image with the data included in the request. Only the following fields can be modified: family, description, deprecation status.", + "httpMethod": "PATCH", + "id": "compute.images.patch", + "parameterOrder": [ + "project", + "image" + ], + "parameters": { + "image": { + "description": "Name of the image resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/images/{image}", + "request": { + "$ref": "Image" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setIamPolicy": { "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", "httpMethod": "POST", @@ -6028,6 +6214,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/instanceGroupManagers", @@ -6381,6 +6572,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone where the managed instance group is located.", "location": "path", @@ -6399,7 +6595,7 @@ ] }, "listErrors": { - "description": "Lists all errors thrown by actions on instances for a given managed instance group.", + "description": "Lists all errors thrown by actions on instances for a given managed instance group. The filter and orderBy query parameters are not supported.", "httpMethod": "GET", "id": "compute.instanceGroupManagers.listErrors", "parameterOrder": [ @@ -6444,6 +6640,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", "location": "path", @@ -6462,7 +6663,7 @@ ] }, "listManagedInstances": { - "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", + "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported.", "httpMethod": "POST", "id": "compute.instanceGroupManagers.listManagedInstances", "parameterOrder": [ @@ -6507,6 +6708,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone where the managed instance group is located.", "location": "path", @@ -6525,7 +6731,7 @@ ] }, "listPerInstanceConfigs": { - "description": "Lists all of the per-instance configs defined for the managed instance group.", + "description": "Lists all of the per-instance configs defined for the managed instance group. The orderBy query parameter is not supported.", "httpMethod": "POST", "id": "compute.instanceGroupManagers.listPerInstanceConfigs", "parameterOrder": [ @@ -6570,6 +6776,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", "location": "path", @@ -7200,6 +7411,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/instanceGroups", @@ -7257,7 +7473,7 @@ ] }, "get": { - "description": "Returns the specified instance group. Gets a list of available instance groups by making a list() request.", + "description": "Returns the specified zonal instance group. Get a list of available zonal instance groups by making a list() request.\n\nFor managed instance groups, use the instanceGroupManagers or regionInstanceGroupManagers methods instead.", "httpMethod": "GET", "id": "compute.instanceGroups.get", "parameterOrder": [ @@ -7337,7 +7553,7 @@ ] }, "list": { - "description": "Retrieves the list of instance groups that are located in the specified project and zone.", + "description": "Retrieves the list of zonal instance group resources contained within the specified zone.\n\nFor managed instance groups, use the instanceGroupManagers or regionInstanceGroupManagers methods instead.", "httpMethod": "GET", "id": "compute.instanceGroups.list", "parameterOrder": [ @@ -7375,6 +7591,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone where the instance group is located.", "location": "path", @@ -7393,7 +7614,7 @@ ] }, "listInstances": { - "description": "Lists the instances in the specified instance group.", + "description": "Lists the instances in the specified instance group. The orderBy query parameter is not supported.", "httpMethod": "POST", "id": "compute.instanceGroups.listInstances", "parameterOrder": [ @@ -7438,6 +7659,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone where the instance group is located.", "location": "path", @@ -7783,6 +8009,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/instanceTemplates", @@ -8019,6 +8250,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/instances", @@ -8042,7 +8278,7 @@ ], "parameters": { "forceAttach": { - "description": "Whether to force attach the disk even if it's currently attached to another instance.", + "description": "Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error.", "location": "query", "type": "boolean" }, @@ -8488,7 +8724,7 @@ ], "parameters": { "instance": { - "description": "Name of the instance scoping this request.", + "description": "Name of the instance for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -8511,7 +8747,7 @@ "type": "string" }, "start": { - "description": "Returns output starting from a specific byte position. Use this to page through output when the output is too large to return in a single request. For the initial request, leave this field unspecified. For subsequent calls, this field should be set to the next value returned in the previous call.", + "description": "Specifies the starting byte position of the output to return. To start with the first byte of output to the specified port, omit this field or set it to `0`.\n\nIf the output for that byte position is available, this field matches the `start` parameter sent with the request. If the amount of serial console output exceeds the size of the buffer (1 MB), the oldest output is discarded and is no longer available. If the requested start position refers to discarded output, the start position is adjusted to the oldest output still available, and the adjusted start position is returned as the `start` property value.\n\nYou can also provide a negative start position, which translates to the most recent number of bytes written to the serial port. For example, -3 is interpreted as the most recent 3 bytes written to the serial console.", "format": "int64", "location": "query", "type": "string" @@ -8708,6 +8944,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -8727,7 +8968,7 @@ ] }, "listReferrers": { - "description": "Retrieves the list of referrers to instances contained within the specified zone. For more information, read Viewing Referrers to VM Instances.", + "description": "Retrieves a list of resources that refer to the VM instance specified in the request. For example, if the VM instance is part of a managed or unmanaged instance group, the referrers list includes the instance group. For more information, read Viewing referrers to VM instances.", "httpMethod": "GET", "id": "compute.instances.listReferrers", "parameterOrder": [ @@ -8773,6 +9014,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -9337,8 +9583,57 @@ "https://www.googleapis.com/auth/compute" ] }, + "setName": { + "description": "Sets name of an instance.", + "httpMethod": "POST", + "id": "compute.instances.setName", + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "parameters": { + "instance": { + "description": "The instance name for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/instances/{instance}/setName", + "request": { + "$ref": "InstancesSetNameRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setScheduling": { - "description": "Sets an instance's scheduling options.", + "description": "Sets an instance's scheduling options. You can only call this method on a stopped instance, that is, a VM instance that is in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states.", "httpMethod": "POST", "id": "compute.instances.setScheduling", "parameterOrder": [ @@ -10248,6 +10543,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/interconnectAttachments", @@ -10439,6 +10739,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/interconnectAttachments", @@ -10669,6 +10974,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/interconnectLocations", @@ -10861,6 +11171,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/interconnects", @@ -11214,6 +11529,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/licenses", @@ -11453,6 +11773,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/machineImages", @@ -11584,6 +11909,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/machineTypes", @@ -11677,6 +12007,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -11741,6 +12076,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/networkEndpointGroups", @@ -12010,6 +12350,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", "location": "path", @@ -12073,6 +12418,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", "location": "path", @@ -12359,6 +12709,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/networks", @@ -12439,6 +12794,11 @@ "description": "The region of the request. The response will include all subnet routes, static routes and dynamic routes in the region.", "location": "query", "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/networks/{network}/listPeeringRoutes", @@ -12744,6 +13104,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/nodeGroups", @@ -13029,6 +13394,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -13094,6 +13464,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -13345,6 +13720,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/nodeTemplates", @@ -13579,6 +13959,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/nodeTemplates", @@ -13726,6 +14111,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/nodeTypes", @@ -13819,6 +14209,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -14072,7 +14467,7 @@ "id": "compute.organizationSecurityPolicies.insert", "parameters": { "parentId": { - "description": "Parent ID for this request.", + "description": "Parent ID for this request. The ID can be either be \"folders/[FOLDER_ID]\" if the parent is a folder or \"organizations/[ORGANIZATION_ID]\" if the parent is an organization.", "location": "query", "type": "string" }, @@ -14126,6 +14521,11 @@ "description": "Parent ID for this request.", "location": "query", "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "locations/global/securityPolicies", @@ -14382,6 +14782,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/packetMirrorings", @@ -14568,6 +14973,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/packetMirrorings", @@ -14892,6 +15302,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/getXpnResources", @@ -14940,6 +15355,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/listXpnHosts", @@ -15300,6 +15720,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/autoscalers", @@ -15588,7 +16013,7 @@ ] }, "insert": { - "description": "Creates a regional BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a regional backend service. Read Understanding backend services for more information.", + "description": "Creates a regional BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview.", "httpMethod": "POST", "id": "compute.regionBackendServices.insert", "parameterOrder": [ @@ -15673,6 +16098,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/backendServices", @@ -15686,7 +16116,7 @@ ] }, "patch": { - "description": "Updates the specified regional BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Understanding backend services This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", "id": "compute.regionBackendServices.patch", "parameterOrder": [ @@ -15780,7 +16210,7 @@ ] }, "update": { - "description": "Updates the specified regional BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information.", + "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Backend services overview.", "httpMethod": "PUT", "id": "compute.regionBackendServices.update", "parameterOrder": [ @@ -15874,6 +16304,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/commitments", @@ -16014,6 +16449,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/commitments", @@ -16166,6 +16606,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/diskTypes", @@ -16506,6 +16951,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/disks", @@ -16930,6 +17380,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/healthCheckServices", @@ -17168,6 +17623,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/healthChecks", @@ -17675,6 +18135,11 @@ "location": "path", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/instanceGroupManagers", @@ -17688,7 +18153,7 @@ ] }, "listErrors": { - "description": "Lists all errors thrown by actions on instances for a given regional managed instance group.", + "description": "Lists all errors thrown by actions on instances for a given regional managed instance group. The filter and orderBy query parameters are not supported.", "httpMethod": "GET", "id": "compute.regionInstanceGroupManagers.listErrors", "parameterOrder": [ @@ -17738,6 +18203,11 @@ "location": "path", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listErrors", @@ -17751,7 +18221,7 @@ ] }, "listManagedInstances": { - "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances.", + "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported.", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.listManagedInstances", "parameterOrder": [ @@ -17801,6 +18271,11 @@ "location": "path", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", @@ -17814,7 +18289,7 @@ ] }, "listPerInstanceConfigs": { - "description": "Lists all of the per-instance configs defined for the managed instance group.", + "description": "Lists all of the per-instance configs defined for the managed instance group. The orderBy query parameter is not supported.", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.listPerInstanceConfigs", "parameterOrder": [ @@ -17864,6 +18339,11 @@ "location": "path", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", @@ -18438,6 +18918,11 @@ "location": "path", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/instanceGroups", @@ -18451,7 +18936,7 @@ ] }, "listInstances": { - "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running.", + "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running. The orderBy query parameter is not supported.", "httpMethod": "POST", "id": "compute.regionInstanceGroups.listInstances", "parameterOrder": [ @@ -18501,6 +18986,11 @@ "location": "path", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", @@ -18610,6 +19100,195 @@ } } }, + "regionNetworkEndpointGroups": { + "methods": { + "delete": { + "description": "Deletes the specified network endpoint group. Note that the NEG cannot be deleted if it is configured as a backend of a backend service.", + "httpMethod": "DELETE", + "id": "compute.regionNetworkEndpointGroups.delete", + "parameterOrder": [ + "project", + "region", + "networkEndpointGroup" + ], + "parameters": { + "networkEndpointGroup": { + "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", + "httpMethod": "GET", + "id": "compute.regionNetworkEndpointGroups.get", + "parameterOrder": [ + "project", + "region", + "networkEndpointGroup" + ], + "parameters": { + "networkEndpointGroup": { + "description": "The name of the network endpoint group. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "response": { + "$ref": "NetworkEndpointGroup" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", + "httpMethod": "POST", + "id": "compute.regionNetworkEndpointGroups.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region where you want to create the network endpoint group. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/networkEndpointGroups", + "request": { + "$ref": "NetworkEndpointGroup" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of regional network endpoint groups available to the specified project in the given region.", + "httpMethod": "GET", + "id": "compute.regionNetworkEndpointGroups.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/networkEndpointGroups", + "response": { + "$ref": "NetworkEndpointGroupList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, "regionNotificationEndpoints": { "methods": { "delete": { @@ -18786,6 +19465,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/notificationEndpoints", @@ -18927,6 +19611,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/operations", @@ -19159,6 +19848,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/sslCertificates", @@ -19349,6 +20043,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/targetHttpProxies", @@ -19588,6 +20287,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/targetHttpsProxies", @@ -19925,6 +20629,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/urlMaps", @@ -20154,6 +20863,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions", @@ -20212,6 +20926,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/reservations", @@ -20440,6 +21159,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "Name of the zone for this request.", "location": "path", @@ -20642,6 +21366,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/resourcePolicies", @@ -20876,6 +21605,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/resourcePolicies", @@ -21023,6 +21757,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/routers", @@ -21175,6 +21914,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "router": { "description": "Name of the Router resource to query for Nat Mapping information of VM endpoints.", "location": "path", @@ -21321,6 +22065,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/routers", @@ -21667,6 +22416,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/routes", @@ -21948,6 +22702,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/securityPolicies", @@ -21997,6 +22756,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/securityPolicies/listPreconfiguredExpressionSets", @@ -22324,6 +23088,39 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "insert": { + "description": "Creates a snapshot in the specified project using the data included in the request.", + "httpMethod": "POST", + "id": "compute.snapshots.insert", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/snapshots", + "request": { + "$ref": "Snapshot" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "list": { "description": "Retrieves the list of Snapshot resources contained within the specified project.", "httpMethod": "GET", @@ -22361,6 +23158,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/snapshots", @@ -22528,6 +23330,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/sslCertificates", @@ -22682,6 +23489,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/sslCertificates", @@ -22875,6 +23687,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/sslPolicies", @@ -22924,6 +23741,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/sslPolicies/listAvailableFeatures", @@ -23059,6 +23881,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/subnetworks", @@ -23342,6 +24169,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/subnetworks", @@ -23355,7 +24187,7 @@ ] }, "listUsable": { - "description": "Retrieves an aggregated list of all usable subnetworks in the project. The list contains all of the subnetworks in the project and the subnetworks that were shared by a Shared VPC host project.", + "description": "Retrieves an aggregated list of all usable subnetworks in the project.", "httpMethod": "GET", "id": "compute.subnetworks.listUsable", "parameterOrder": [ @@ -23391,6 +24223,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/subnetworks/listUsable", @@ -23598,6 +24435,210 @@ } } }, + "targetGrpcProxies": { + "methods": { + "delete": { + "description": "Deletes the specified TargetGrpcProxy in the given scope", + "httpMethod": "DELETE", + "id": "compute.targetGrpcProxies.delete", + "parameterOrder": [ + "project", + "targetGrpcProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetGrpcProxy": { + "description": "Name of the TargetGrpcProxy resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/targetGrpcProxies/{targetGrpcProxy}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified TargetGrpcProxy resource in the given scope.", + "httpMethod": "GET", + "id": "compute.targetGrpcProxies.get", + "parameterOrder": [ + "project", + "targetGrpcProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "targetGrpcProxy": { + "description": "Name of the TargetGrpcProxy resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/targetGrpcProxies/{targetGrpcProxy}", + "response": { + "$ref": "TargetGrpcProxy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a TargetGrpcProxy in the specified project in the given scope using the parameters that are included in the request.", + "httpMethod": "POST", + "id": "compute.targetGrpcProxies.insert", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/targetGrpcProxies", + "request": { + "$ref": "TargetGrpcProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Lists the TargetGrpcProxies for a project in the given scope.", + "httpMethod": "GET", + "id": "compute.targetGrpcProxies.list", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/global/targetGrpcProxies", + "response": { + "$ref": "TargetGrpcProxyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "description": "Patches the specified TargetGrpcProxy resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.targetGrpcProxies.patch", + "parameterOrder": [ + "project", + "targetGrpcProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetGrpcProxy": { + "description": "Name of the TargetGrpcProxy resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/targetGrpcProxies/{targetGrpcProxy}", + "request": { + "$ref": "TargetGrpcProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, "targetHttpProxies": { "methods": { "aggregatedList": { @@ -23642,6 +24683,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/targetHttpProxies", @@ -23796,6 +24842,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/targetHttpProxies", @@ -23808,6 +24859,47 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "patch": { + "description": "Patches the specified TargetHttpProxy resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "PATCH", + "id": "compute.targetHttpProxies.patch", + "parameterOrder": [ + "project", + "targetHttpProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/targetHttpProxies/{targetHttpProxy}", + "request": { + "$ref": "TargetHttpProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setUrlMap": { "description": "Changes the URL map for TargetHttpProxy.", "httpMethod": "POST", @@ -23932,6 +25024,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/targetHttpsProxies", @@ -24086,6 +25183,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/targetHttpsProxies", @@ -24343,6 +25445,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/targetInstances", @@ -24523,6 +25630,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "Name of the zone scoping this request.", "location": "path", @@ -24730,6 +25842,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/targetPools", @@ -24961,6 +26078,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/targetPools", @@ -25317,6 +26439,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/targetSslProxies", @@ -25675,6 +26802,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/targetTcpProxies", @@ -25815,6 +26947,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/targetVpnGateways", @@ -26001,6 +27138,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/targetVpnGateways", @@ -26153,6 +27295,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/urlMaps", @@ -26348,6 +27495,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/global/urlMaps", @@ -26561,6 +27713,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/vpnGateways", @@ -26789,6 +27946,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/vpnGateways", @@ -26896,6 +28058,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/aggregated/vpnTunnels", @@ -27082,6 +28249,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/regions/{region}/vpnTunnels", @@ -27311,6 +28483,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "Name of the zone for request.", "location": "path", @@ -27446,6 +28623,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "projects/{project}/zones", @@ -27461,7 +28643,7 @@ } } }, - "revision": "20200427", + "revision": "20200910", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -27507,7 +28689,7 @@ "type": "string" }, "maximumCardsPerInstance": { - "description": "[Output Only] Maximum accelerator cards allowed per instance.", + "description": "[Output Only] Maximum number of accelerator cards allowed per instance.", "format": "int32", "type": "integer" }, @@ -27517,7 +28699,7 @@ "type": "string" }, "selfLink": { - "description": "[Output Only] Server-defined fully-qualified URL for this resource.", + "description": "[Output Only] Server-defined, fully qualified URL for this resource.", "type": "string" }, "zone": { @@ -27555,6 +28737,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -27577,6 +28766,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -27608,6 +28798,7 @@ "", "", "", + "", "" ], "type": "string" @@ -27689,6 +28880,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -27720,6 +28912,7 @@ "", "", "", + "", "" ], "type": "string" @@ -27783,6 +28976,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -27814,6 +29008,7 @@ "", "", "", + "", "" ], "type": "string" @@ -28075,6 +29270,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -28097,6 +29299,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -28128,6 +29331,7 @@ "", "", "", + "", "" ], "type": "string" @@ -28209,6 +29413,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -28240,6 +29445,7 @@ "", "", "", + "", "" ], "type": "string" @@ -28303,6 +29509,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -28334,6 +29541,7 @@ "", "", "", + "", "" ], "type": "string" @@ -28404,7 +29612,7 @@ "type": "object" }, "AllocationSpecificSKUAllocationReservedInstanceProperties": { - "description": "Properties of the SKU instances being reserved.", + "description": "Properties of the SKU instances being reserved. Next ID: 9", "id": "AllocationSpecificSKUAllocationReservedInstanceProperties", "properties": { "guestAccelerators": { @@ -28588,6 +29796,10 @@ "description": "Labels to apply to this disk. These can be later modified by the disks.setLabels method. This field is only applicable for persistent disks.", "type": "object" }, + "multiWriter": { + "description": "Indicates whether or not the disk can be read/write attached to more than one instance.", + "type": "boolean" + }, "onUpdateAction": { "description": "Specifies which action to take on instance update with this disk. Default is to use the existing disk.", "enum": [ @@ -28629,7 +29841,7 @@ "type": "object" }, "AuditConfig": { - "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n{ \"audit_configs\": [ { \"service\": \"allServices\" \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\", }, { \"log_type\": \"ADMIN_READ\", } ] }, { \"service\": \"sampleservice.googleapis.com\" \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] }\n\nFor sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging.", + "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n{ \"audit_configs\": [ { \"service\": \"allServices\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" }, { \"log_type\": \"ADMIN_READ\" } ] }, { \"service\": \"sampleservice.googleapis.com\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\" }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] }\n\nFor sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging.", "id": "AuditConfig", "properties": { "auditLogConfigs": { @@ -28654,7 +29866,7 @@ "type": "object" }, "AuditLogConfig": { - "description": "Provides the configuration for logging a type of permissions. Example:\n\n{ \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\", } ] }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.", + "description": "Provides the configuration for logging a type of permissions. Example:\n\n{ \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" } ] }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.", "id": "AuditLogConfig", "properties": { "exemptedMembers": { @@ -28823,6 +30035,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -28845,6 +30064,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -28876,6 +30096,7 @@ "", "", "", + "", "" ], "type": "string" @@ -28957,6 +30178,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -28988,6 +30210,7 @@ "", "", "", + "", "" ], "type": "string" @@ -29038,6 +30261,7 @@ "MISSING_CUSTOM_METRIC_DATA_POINTS", "MISSING_LOAD_BALANCING_DATA_POINTS", "MODE_OFF", + "MODE_ONLY_SCALE_OUT", "MODE_ONLY_UP", "MORE_THAN_ONE_BACKEND_SERVICE", "NOT_ENOUGH_QUOTA_AVAILABLE", @@ -29064,6 +30288,7 @@ "", "", "", + "", "" ], "type": "string" @@ -29103,6 +30328,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -29134,6 +30360,7 @@ "", "", "", + "", "" ], "type": "string" @@ -29204,9 +30431,11 @@ "enum": [ "OFF", "ON", + "ONLY_SCALE_OUT", "ONLY_UP" ], "enumDescriptions": [ + "", "", "", "" @@ -29215,6 +30444,9 @@ }, "scaleDownControl": { "$ref": "AutoscalingPolicyScaleDownControl" + }, + "scaleInControl": { + "$ref": "AutoscalingPolicyScaleInControl" } }, "type": "object" @@ -29223,6 +30455,20 @@ "description": "CPU utilization policy.", "id": "AutoscalingPolicyCpuUtilization", "properties": { + "predictiveMethod": { + "description": "Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are:\n\n* NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics. * OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand.", + "enum": [ + "NONE", + "OPTIMIZE_AVAILABILITY", + "PREDICTIVE_METHOD_UNSPECIFIED" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, "utilizationTarget": { "description": "The target CPU utilization that the autoscaler should maintain. Must be a float value in the range (0, 1]. If not specified, the default is 0.6.\n\nIf the CPU level is below the target utilization, the autoscaler scales down the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization.\n\nIf the average CPU is above the target utilization, the autoscaler scales up until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization.", "format": "double", @@ -29249,7 +30495,7 @@ "type": "number" }, "utilizationTarget": { - "description": "The target value of the metric that autoscaler should maintain. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric.\n\nFor example, a good metric to use as a utilization_target is compute.googleapis.com/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.", + "description": "The target value of the metric that autoscaler should maintain. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric.\n\nFor example, a good metric to use as a utilization_target is https://www.googleapis.com/compute/v1/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.", "format": "double", "type": "number" }, @@ -29298,6 +30544,22 @@ }, "type": "object" }, + "AutoscalingPolicyScaleInControl": { + "description": "Configuration that allows for slower scale in so that even if Autoscaler recommends an abrupt scale in of a MIG, it will be throttled as specified by the parameters below.", + "id": "AutoscalingPolicyScaleInControl", + "properties": { + "maxScaledInReplicas": { + "$ref": "FixedOrPercent", + "description": "Maximum allowed number (or %) of VMs that can be deducted from the peak recommendation during the window autoscaler looks at when computing recommendations. Possibly all these VMs can be deleted at once so user service needs to be prepared to lose that many VMs in one step." + }, + "timeWindowSec": { + "description": "How long back autoscaling should look when computing recommendations to include directives regarding slower scale in, as described above.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "Backend": { "description": "Message containing information of one individual backend.", "id": "Backend", @@ -29317,7 +30579,7 @@ "type": "string" }, "capacityScaler": { - "description": "A multiplier applied to the group's maximum servicing capacity (based on UTILIZATION, RATE or CONNECTION). Default value is 1, which means the group will serve up to 100% of its configured capacity (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available Capacity. Valid range is [0.0,1.0].\n\nThis cannot be used for internal load balancing.", + "description": "A multiplier applied to the group's maximum servicing capacity (based on UTILIZATION, RATE or CONNECTION). Default value is 1, which means the group will serve up to 100% of its configured capacity (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available capacity. Valid range is 0.0 and [0.1,1.0]. You cannot configure a setting larger than 0 and smaller than 0.1. You cannot configure a setting of 0 when there is only one backend attached to the backend service.\n\nThis cannot be used for internal load balancing.", "format": "float", "type": "number" }, @@ -29387,6 +30649,13 @@ "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, + "customResponseHeaders": { + "description": "Headers that the HTTP/S load balancer should add to proxied responses.", + "items": { + "type": "string" + }, + "type": "array" + }, "description": { "description": "An optional textual description of the resource; provided by the client when the resource is created.", "type": "string" @@ -29421,6 +30690,60 @@ "description": "Message containing Cloud CDN configuration for a backend bucket.", "id": "BackendBucketCdnPolicy", "properties": { + "bypassCacheOnRequestHeaders": { + "description": "Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings.", + "items": { + "$ref": "BackendBucketCdnPolicyBypassCacheOnRequestHeader" + }, + "type": "array" + }, + "cacheMode": { + "description": "Specifies the cache setting for all responses from this backend. The possible values are:\n\nUSE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server.\n\nFORCE_CACHE_ALL Cache all content, ignoring any \"private\", \"no-store\" or \"no-cache\" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content.\n\nCACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached.", + "enum": [ + "CACHE_ALL_STATIC", + "FORCE_CACHE_ALL", + "INVALID_CACHE_MODE", + "USE_ORIGIN_HEADERS" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + }, + "clientTtl": { + "description": "Specifies a separate client (e.g. browser client) TTL, separate from the TTL for Cloud CDN's edge caches. Leaving this empty will use the same cache TTL for both Cloud CDN and the client-facing response. The maximum allowed value is 86400s (1 day).", + "format": "int32", + "type": "integer" + }, + "defaultTtl": { + "description": "Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of \"0\" means \"always revalidate\". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.", + "format": "int32", + "type": "integer" + }, + "maxTtl": { + "description": "Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of \"0\" means \"always revalidate\". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.", + "format": "int32", + "type": "integer" + }, + "negativeCaching": { + "description": "Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. This can reduce the load on your origin and improve end-user experience by reducing response latency. By default, Cloud CDN will apply the following default TTLs to these status codes: HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s HTTP 405 (Method Not Found), 421 (Misdirected Request), 501 (Not Implemented): 60s These defaults can be overridden in negative_caching_policy", + "type": "boolean" + }, + "negativeCachingPolicy": { + "description": "Sets a cache TTL for the specified HTTP status code. negative_caching must be enabled to configure negative_caching_policy. Omitting the policy and leaving negative_caching enabled will use Cloud CDN's default cache TTLs. Note that when specifying an explicit negative_caching_policy, you should take care to specify a cache TTL for all response codes that you wish to cache. Cloud CDN will not apply any default negative caching when a policy exists.", + "items": { + "$ref": "BackendBucketCdnPolicyNegativeCachingPolicy" + }, + "type": "array" + }, + "serveWhileStale": { + "description": "Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. This setting defines the default \"max-stale\" duration for any cached responses that do not specify a max-stale directive. Stale responses that exceed the TTL configured here will not be served. The default limit (max-stale) is 86400s (1 day), which will allow stale content to be served up to this limit beyond the max-age (or s-max-age) of a cached response. The maximum allowed value is 604800 (1 week). Set this to zero (0) to disable serve-while-stale.", + "format": "int32", + "type": "integer" + }, "signedUrlCacheMaxAgeSec": { "description": "Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a \"Cache-Control: public, max-age=[TTL]\" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered.", "format": "int64", @@ -29436,6 +30759,34 @@ }, "type": "object" }, + "BackendBucketCdnPolicyBypassCacheOnRequestHeader": { + "description": "Bypass the cache when the specified request headers are present, e.g. Pragma or Authorization headers. Values are case insensitive. The presence of such a header overrides the cache_mode setting.", + "id": "BackendBucketCdnPolicyBypassCacheOnRequestHeader", + "properties": { + "headerName": { + "description": "The header field name to match on when bypassing cache. Values are case-insensitive.", + "type": "string" + } + }, + "type": "object" + }, + "BackendBucketCdnPolicyNegativeCachingPolicy": { + "description": "Specify CDN TTLs for response error codes.", + "id": "BackendBucketCdnPolicyNegativeCachingPolicy", + "properties": { + "code": { + "description": "The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 308, 404, 405, 410, 421, 451 and 501 are can be specified as values, and you cannot specify a status code more than once.", + "format": "int32", + "type": "integer" + }, + "ttl": { + "description": "The TTL (in seconds) to cache responses with the corresponding status code for. The maximum allowed value is 1800s (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "BackendBucketList": { "description": "Contains a list of BackendBucket resources.", "id": "BackendBucketList", @@ -29486,6 +30837,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -29517,6 +30869,7 @@ "", "", "", + "", "" ], "type": "string" @@ -29549,11 +30902,11 @@ "type": "object" }, "BackendService": { - "description": "Represents a Backend Service resource.\n\nA backend service contains configuration values for Google Cloud Platform load balancing services.\n\nBackend services in Google Compute Engine can be either regionally or globally scoped.\n\n* [Global](/compute/docs/reference/rest/{$api_version}/backendServices) * [Regional](/compute/docs/reference/rest/{$api_version}/regionBackendServices)\n\nFor more information, read Backend Services.\n\n(== resource_for {$api_version}.backendService ==)", + "description": "Represents a Backend Service resource.\n\nA backend service defines how Google Cloud load balancers distribute traffic. The backend service configuration contains a set of values, such as the protocol used to connect to backends, various distribution and session settings, health checks, and timeouts. These settings provide fine-grained control over how your load balancer behaves. Most of the settings have default values that allow for easy configuration if you need to get started quickly.\n\nBackend services in Google Compute Engine can be either regionally or globally scoped.\n\n* [Global](/compute/docs/reference/rest/{$api_version}/backendServices) * [Regional](/compute/docs/reference/rest/{$api_version}/regionBackendServices)\n\nFor more information, see Backend Services.\n\n(== resource_for {$api_version}.backendService ==)", "id": "BackendService", "properties": { "affinityCookieTtlSec": { - "description": "If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is one day (86,400).", + "description": "Lifetime of cookies in seconds. Only applicable if the loadBalancingScheme is EXTERNAL, INTERNAL_SELF_MANAGED, or INTERNAL_MANAGED, the protocol is HTTP or HTTPS, and the sessionAffinity is GENERATED_COOKIE, or HTTP_COOKIE.\n\nIf set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is one day (86,400).\n\nNot supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" }, @@ -29570,14 +30923,14 @@ }, "circuitBreakers": { "$ref": "CircuitBreakers", - "description": "Settings controlling the volume of connections to a backend service. If not set, this feature is considered disabled.\n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED." + "description": "Settings controlling the volume of connections to a backend service. If not set, this feature is considered disabled.\n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. \n\nNot supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true." }, "connectionDraining": { "$ref": "ConnectionDraining" }, "consistentHash": { "$ref": "ConsistentHashLoadBalancerSettings", - "description": "Consistent Hash-based load balancing can be used to provide soft session affinity based on HTTP headers, cookies or other properties. This load balancing policy is applicable only for HTTP connections. The affinity to a particular destination host will be lost when one or more hosts are added/removed from the destination service. This field specifies parameters that control consistent hashing. This field is only applicable when localityLbPolicy is set to MAGLEV or RING_HASH.\n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED." + "description": "Consistent Hash-based load balancing can be used to provide soft session affinity based on HTTP headers, cookies or other properties. This load balancing policy is applicable only for HTTP connections. The affinity to a particular destination host will be lost when one or more hosts are added/removed from the destination service. This field specifies parameters that control consistent hashing. This field is only applicable when localityLbPolicy is set to MAGLEV or RING_HASH.\n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. \n\nNot supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true." }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", @@ -29590,6 +30943,13 @@ }, "type": "array" }, + "customResponseHeaders": { + "description": "Headers that the HTTP/S load balancer should add to proxied responses.", + "items": { + "type": "string" + }, + "type": "array" + }, "description": { "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" @@ -29608,14 +30968,15 @@ "type": "string" }, "healthChecks": { - "description": "The list of URLs to the healthChecks, httpHealthChecks (legacy), or httpsHealthChecks (legacy) resource for health checking this backend service. Not all backend services support legacy health checks. See Load balancer guide. Currently at most one health check can be specified. Backend services with instance group or zonal NEG backends must have a health check. Backend services with internet NEG backends must not have a health check. A health check must", + "description": "The list of URLs to the healthChecks, httpHealthChecks (legacy), or httpsHealthChecks (legacy) resource for health checking this backend service. Not all backend services support legacy health checks. See Load balancer guide. Currently, at most one health check can be specified for each backend service. Backend services with instance group or zonal NEG backends must have a health check. Backend services with internet or serverless NEG backends must not have a health check.", "items": { "type": "string" }, "type": "array" }, "iap": { - "$ref": "BackendServiceIAP" + "$ref": "BackendServiceIAP", + "description": "The configurations for Identity-Aware Proxy on this resource." }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", @@ -29646,7 +31007,7 @@ "type": "string" }, "localityLbPolicy": { - "description": "The load balancing algorithm used within the scope of the locality. The possible values are: \n- ROUND_ROBIN: This is a simple policy in which each healthy backend is selected in round robin order. This is the default. \n- LEAST_REQUEST: An O(1) algorithm which selects two random healthy hosts and picks the host which has fewer active requests. \n- RING_HASH: The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests. \n- RANDOM: The load balancer selects a random healthy host. \n- ORIGINAL_DESTINATION: Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer. \n- MAGLEV: used as a drop in replacement for the ring hash load balancer. Maglev is not as stable as ring hash but has faster table lookup build times and host selection times. For more information about Maglev, refer to https://ai.google/research/pubs/pub44824 \n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. \n\nIf sessionAffinity is not NONE, and this field is not set to \u003eMAGLEV or RING_HASH, session affinity settings will not take effect.", + "description": "The load balancing algorithm used within the scope of the locality. The possible values are: \n- ROUND_ROBIN: This is a simple policy in which each healthy backend is selected in round robin order. This is the default. \n- LEAST_REQUEST: An O(1) algorithm which selects two random healthy hosts and picks the host which has fewer active requests. \n- RING_HASH: The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests. \n- RANDOM: The load balancer selects a random healthy host. \n- ORIGINAL_DESTINATION: Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer. \n- MAGLEV: used as a drop in replacement for the ring hash load balancer. Maglev is not as stable as ring hash but has faster table lookup build times and host selection times. For more information about Maglev, see https://ai.google/research/pubs/pub44824 \n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. \n\nIf sessionAffinity is not NONE, and this field is not set to MAGLEV or RING_HASH, session affinity settings will not take effect.\n\nOnly the default ROUND_ROBIN policy is supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "enum": [ "INVALID_LB_POLICY", "LEAST_REQUEST", @@ -29682,7 +31043,7 @@ }, "outlierDetection": { "$ref": "OutlierDetection", - "description": "Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. If not set, this feature is considered disabled.\n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED." + "description": "Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. If not set, this feature is considered disabled.\n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. \n\nNot supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true." }, "port": { "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80.\n\nThis cannot be used if the loadBalancingScheme is INTERNAL (Internal TCP/UDP Load Balancing).", @@ -29694,8 +31055,9 @@ "type": "string" }, "protocol": { - "description": "The protocol this BackendService uses to communicate with backends.\n\nPossible values are HTTP, HTTPS, HTTP2, TCP, SSL, or UDP. depending on the chosen load balancer or Traffic Director configuration. Refer to the documentation for the load balancer or for Traffic Director for more information.", + "description": "The protocol this BackendService uses to communicate with backends.\n\nPossible values are HTTP, HTTPS, HTTP2, TCP, SSL, UDP or GRPC. depending on the chosen load balancer or Traffic Director configuration. Refer to the documentation for the load balancer or for Traffic Director for more information.\n\nMust be set to GRPC when the backend service is referenced by a URL map that is bound to target gRPC proxy.", "enum": [ + "GRPC", "HTTP", "HTTP2", "HTTPS", @@ -29709,6 +31071,7 @@ "", "", "", + "", "" ], "type": "string" @@ -29730,7 +31093,7 @@ "type": "string" }, "sessionAffinity": { - "description": "Type of session affinity to use. The default is NONE. Session affinity is not applicable if the --protocol is UDP.\n\nWhen the loadBalancingScheme is EXTERNAL, possible values are NONE, CLIENT_IP, or GENERATED_COOKIE. You can use GENERATED_COOKIE if the protocol is HTTP or HTTPS.\n\nWhen the loadBalancingScheme is INTERNAL, possible values are NONE, CLIENT_IP, CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO.\n\nWhen the loadBalancingScheme is INTERNAL_SELF_MANAGED, or INTERNAL_MANAGED, possible values are NONE, CLIENT_IP, GENERATED_COOKIE, HEADER_FIELD, or HTTP_COOKIE.", + "description": "Type of session affinity to use. The default is NONE. Session affinity is not applicable if the --protocol is UDP.\n\nWhen the loadBalancingScheme is EXTERNAL, possible values are NONE, CLIENT_IP, or GENERATED_COOKIE. You can use GENERATED_COOKIE if the protocol is HTTP or HTTPS.\n\nWhen the loadBalancingScheme is INTERNAL, possible values are NONE, CLIENT_IP, CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO.\n\nWhen the loadBalancingScheme is INTERNAL_SELF_MANAGED, or INTERNAL_MANAGED, possible values are NONE, CLIENT_IP, GENERATED_COOKIE, HEADER_FIELD, or HTTP_COOKIE.\n\nNot supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "enum": [ "CLIENT_IP", "CLIENT_IP_PORT_PROTO", @@ -29752,7 +31115,7 @@ "type": "string" }, "timeoutSec": { - "description": "The backend service timeout has a different meaning depending on the type of load balancer. For more information read, Backend service settings The default is 30 seconds.", + "description": "The backend service timeout has a different meaning depending on the type of load balancer. For more information see, Backend service settings The default is 30 seconds.", "format": "int32", "type": "integer" } @@ -29788,6 +31151,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -29810,6 +31180,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -29841,6 +31212,7 @@ "", "", "", + "", "" ], "type": "string" @@ -29876,10 +31248,64 @@ "description": "Message containing Cloud CDN configuration for a backend service.", "id": "BackendServiceCdnPolicy", "properties": { + "bypassCacheOnRequestHeaders": { + "description": "Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings.", + "items": { + "$ref": "BackendServiceCdnPolicyBypassCacheOnRequestHeader" + }, + "type": "array" + }, "cacheKeyPolicy": { "$ref": "CacheKeyPolicy", "description": "The CacheKeyPolicy for this CdnPolicy." }, + "cacheMode": { + "description": "Specifies the cache setting for all responses from this backend. The possible values are:\n\nUSE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server.\n\nFORCE_CACHE_ALL Cache all content, ignoring any \"private\", \"no-store\" or \"no-cache\" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content.\n\nCACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached.", + "enum": [ + "CACHE_ALL_STATIC", + "FORCE_CACHE_ALL", + "INVALID_CACHE_MODE", + "USE_ORIGIN_HEADERS" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + }, + "clientTtl": { + "description": "Specifies a separate client (e.g. browser client) TTL, separate from the TTL for Cloud CDN's edge caches. Leaving this empty will use the same cache TTL for both Cloud CDN and the client-facing response. The maximum allowed value is 86400s (1 day).", + "format": "int32", + "type": "integer" + }, + "defaultTtl": { + "description": "Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of \"0\" means \"always revalidate\". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.", + "format": "int32", + "type": "integer" + }, + "maxTtl": { + "description": "Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of \"0\" means \"always revalidate\". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.", + "format": "int32", + "type": "integer" + }, + "negativeCaching": { + "description": "Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. This can reduce the load on your origin and improve end-user experience by reducing response latency. By default, Cloud CDN will apply the following default TTLs to these status codes: HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s HTTP 405 (Method Not Found), 421 (Misdirected Request), 501 (Not Implemented): 60s These defaults can be overridden in negative_caching_policy", + "type": "boolean" + }, + "negativeCachingPolicy": { + "description": "Sets a cache TTL for the specified HTTP status code. negative_caching must be enabled to configure negative_caching_policy. Omitting the policy and leaving negative_caching enabled will use Cloud CDN's default cache TTLs. Note that when specifying an explicit negative_caching_policy, you should take care to specify a cache TTL for all response codes that you wish to cache. Cloud CDN will not apply any default negative caching when a policy exists.", + "items": { + "$ref": "BackendServiceCdnPolicyNegativeCachingPolicy" + }, + "type": "array" + }, + "serveWhileStale": { + "description": "Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. This setting defines the default \"max-stale\" duration for any cached responses that do not specify a max-stale directive. Stale responses that exceed the TTL configured here will not be served. The default limit (max-stale) is 86400s (1 day), which will allow stale content to be served up to this limit beyond the max-age (or s-max-age) of a cached response. The maximum allowed value is 604800 (1 week). Set this to zero (0) to disable serve-while-stale.", + "format": "int32", + "type": "integer" + }, "signedUrlCacheMaxAgeSec": { "description": "Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a \"Cache-Control: public, max-age=[TTL]\" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered.", "format": "int64", @@ -29895,6 +31321,34 @@ }, "type": "object" }, + "BackendServiceCdnPolicyBypassCacheOnRequestHeader": { + "description": "Bypass the cache when the specified request headers are present, e.g. Pragma or Authorization headers. Values are case insensitive. The presence of such a header overrides the cache_mode setting.", + "id": "BackendServiceCdnPolicyBypassCacheOnRequestHeader", + "properties": { + "headerName": { + "description": "The header field name to match on when bypassing cache. Values are case-insensitive.", + "type": "string" + } + }, + "type": "object" + }, + "BackendServiceCdnPolicyNegativeCachingPolicy": { + "description": "Specify CDN TTLs for response error codes.", + "id": "BackendServiceCdnPolicyNegativeCachingPolicy", + "properties": { + "code": { + "description": "The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 308, 404, 405, 410, 421, 451 and 501 are can be specified as values, and you cannot specify a status code more than once.", + "format": "int32", + "type": "integer" + }, + "ttl": { + "description": "The TTL (in seconds) to cache responses with the corresponding status code for. The maximum allowed value is 1800s (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "BackendServiceFailoverPolicy": { "description": "Applicable only to Failover for Internal TCP/UDP Load Balancing. On failover or failback, this field indicates whether connection draining will be honored. GCP has a fixed connection draining timeout of 10 minutes. A setting of true terminates existing TCP connections to the active pool during failover and failback, immediately draining traffic. A setting of false allows existing TCP connections to persist, even on VMs no longer in the active pool, for up to the duration of the connection draining timeout (10 minutes).", "id": "BackendServiceFailoverPolicy", @@ -29945,12 +31399,15 @@ "id": "BackendServiceIAP", "properties": { "enabled": { + "description": "Whether the serving infrastructure will authenticate and authorize all incoming requests. If true, the oauth2ClientId and oauth2ClientSecret fields must be non-empty.", "type": "boolean" }, "oauth2ClientId": { + "description": "OAuth2 client ID to use for the authentication flow.", "type": "string" }, "oauth2ClientSecret": { + "description": "OAuth2 client secret to use for the authentication flow. For security reasons, this value cannot be retrieved via the API. Instead, the SHA-256 hash of the value is returned in the oauth2ClientSecretSha256 field.", "type": "string" }, "oauth2ClientSecretSha256": { @@ -30010,6 +31467,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -30041,6 +31499,7 @@ "", "", "", + "", "" ], "type": "string" @@ -30129,6 +31588,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -30160,6 +31620,7 @@ "", "", "", + "", "" ], "type": "string" @@ -30439,6 +31900,10 @@ "description": "Associates `members` with a `role`.", "id": "Binding", "properties": { + "bindingId": { + "description": "A client-specified ID for this binding. Expected to be globally unique to support the internal bindings-by-ID API.", + "type": "string" + }, "condition": { "$ref": "Expr", "description": "The condition that is associated with this binding.\n\nIf the condition evaluates to `true`, then this binding applies to the current request.\n\nIf the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the members in this binding.\n\nTo learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." @@ -30543,6 +32008,20 @@ "description": "Represents a regional Commitment resource.\n\nCreating a commitment resource means that you are purchasing a committed use contract with an explicit start and end time. You can create commitments based on vCPUs and memory usage and receive discounted rates. For full details, read Signing Up for Committed Use Discounts. (== resource_for {$api_version}.regionCommitments ==)", "id": "Commitment", "properties": { + "category": { + "description": "The category of the commitment. Category MACHINE specifies commitments composed of machine resources such as VCPU or MEMORY, listed in resources. Category LICENSE specifies commitments composed of software licenses, listed in licenseResources. Note that only MACHINE commitments should have a Type specified.", + "enum": [ + "CATEGORY_UNSPECIFIED", + "LICENSE", + "MACHINE" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -30565,6 +32044,10 @@ "description": "[Output Only] Type of the resource. Always compute#commitment for commitments.", "type": "string" }, + "licenseResource": { + "$ref": "LicenseResourceCommitment", + "description": "The license specification required as part of a license commitment." + }, "name": { "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", @@ -30631,8 +32114,9 @@ "type": "string" }, "type": { - "description": "The type of commitment, which affects the discount rate and the eligible resources. Type MEMORY_OPTIMIZED specifies a commitment that will only apply to memory optimized machines.", + "description": "The type of commitment, which affects the discount rate and the eligible resources. Type MEMORY_OPTIMIZED specifies a commitment that will only apply to memory optimized machines. Type ACCELERATOR_OPTIMIZED specifies a commitment that will only apply to accelerator optimized machines.", "enum": [ + "ACCELERATOR_OPTIMIZED", "COMPUTE_OPTIMIZED", "GENERAL_PURPOSE", "GENERAL_PURPOSE_E2", @@ -30648,6 +32132,7 @@ "", "", "", + "", "" ], "type": "string" @@ -30683,6 +32168,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -30705,6 +32197,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -30736,6 +32229,7 @@ "", "", "", + "", "" ], "type": "string" @@ -30817,6 +32311,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -30848,6 +32343,7 @@ "", "", "", + "", "" ], "type": "string" @@ -30911,6 +32407,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -30942,6 +32439,7 @@ "", "", "", + "", "" ], "type": "string" @@ -30984,6 +32482,7 @@ "ATTRIBUTION", "AUTHORITY", "CREDENTIALS_TYPE", + "CREDS_ASSERTION", "JUSTIFICATION_TYPE", "NO_ATTR", "SECURITY_REALM" @@ -30995,6 +32494,7 @@ "", "", "", + "", "" ], "type": "string" @@ -31169,7 +32669,6 @@ "type": "object" }, "CustomerEncryptionKey": { - "description": "Represents a customer-supplied encryption key", "id": "CustomerEncryptionKey", "properties": { "kmsKeyName": { @@ -31280,6 +32779,20 @@ "format": "uint64", "type": "string" }, + "interface": { + "description": "Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI.", + "enum": [ + "NVME", + "SCSI", + "UNSPECIFIED" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, "kind": { "default": "compute#disk", "description": "[Output Only] Type of the resource. Always compute#disk for disks.", @@ -31320,6 +32833,10 @@ }, "type": "array" }, + "multiWriter": { + "description": "Indicates whether or not the disk can be read/write attached to more than one instance.", + "type": "boolean" + }, "name": { "annotations": { "required": [ @@ -31335,7 +32852,7 @@ "type": "string" }, "physicalBlockSizeBytes": { - "description": "Physical block size of the persistent disk, in bytes. If not present in a request, a default value is used. Currently supported sizes are 4096 and 16384, other sizes may be added in the future. If an unsupported value is requested, the error message will list the supported values for the caller's project.", + "description": "Physical block size of the persistent disk, in bytes. If not present in a request, a default value is used. The currently supported size is 4096, other sizes may be added in the future. If an unsupported value is requested, the error message will list the supported values for the caller's project.", "format": "int64", "type": "string" }, @@ -31362,10 +32879,18 @@ "type": "string" }, "sizeGb": { - "description": "Size of the persistent disk, specified in GB. You can specify this field when creating a persistent disk using the sourceImage or sourceSnapshot parameter, or specify it alone to create an empty persistent disk.\n\nIf you specify this field along with sourceImage or sourceSnapshot, the value of sizeGb must not be less than the size of the sourceImage or the size of the snapshot. Acceptable values are 1 to 65536, inclusive.", + "description": "Size, in GB, of the persistent disk. You can specify this field when creating a persistent disk using the sourceImage, sourceSnapshot, or sourceDisk parameter, or specify it alone to create an empty persistent disk.\n\nIf you specify this field along with a source, the value of sizeGb must not be less than the size of the source. Acceptable values are 1 to 65536, inclusive.", "format": "int64", "type": "string" }, + "sourceDisk": { + "description": "The source disk used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk \n- projects/project/zones/zone/disks/disk \n- zones/zone/disks/disk", + "type": "string" + }, + "sourceDiskId": { + "description": "[Output Only] The unique ID of the disk used to create this disk. This value identifies the exact disk that was used to create this persistent disk. For example, if you created the persistent disk from a disk that was later deleted and recreated under the same name, the source disk ID would identify the exact version of the disk that was used.", + "type": "string" + }, "sourceImage": { "description": "The source image used to create this disk. If the source image is deleted, this field will not be set.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-9 to use the latest Debian 9 image:\nprojects/debian-cloud/global/images/family/debian-9\n\n\nAlternatively, use a specific version of a public operating system image:\nprojects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD\n\n\nTo create a disk with a custom image that you created, specify the image name in the following format:\nglobal/images/my-custom-image\n\n\nYou can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\nglobal/images/family/my-image-family", "type": "string" @@ -31390,6 +32915,10 @@ "description": "[Output Only] The unique ID of the snapshot used to create this disk. This value identifies the exact snapshot that was used to create this persistent disk. For example, if you created the persistent disk from a snapshot that was later deleted and recreated under the same name, the source snapshot ID would identify the exact version of the snapshot that was used.", "type": "string" }, + "sourceStorageObject": { + "description": "The full Google Cloud Storage URI where the disk image is stored. This file must be a gzip-compressed tarball whose name ends in .tar.gz or virtual machine disk whose name ends in vmdk. Valid URIs may start with gs:// or https://storage.googleapis.com/.", + "type": "string" + }, "status": { "description": "[Output Only] The status of disk creation. CREATING: Disk is provisioning. RESTORING: Source data is being copied into the disk. FAILED: Disk creation failed. READY: Disk is ready for use. DELETING: Disk is deleting.", "enum": [ @@ -31421,7 +32950,7 @@ "type": "string" }, "type": { - "description": "URL of the disk type resource describing which disk type to use to create the disk. Provide this when creating the disk. For example: projects/project/zones/zone/diskTypes/pd-standard or pd-ssd", + "description": "URL of the disk type resource describing which disk type to use to create the disk. Provide this when creating the disk. For example: projects/project/zones/zone/diskTypes/pd-standard or pd-ssd", "type": "string" }, "users": { @@ -31466,6 +32995,168 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "DiskInstantiationConfig": { + "description": "A specification of the desired way to instantiate a disk in the instance template when its created from a source instance.", + "id": "DiskInstantiationConfig", + "properties": { + "autoDelete": { + "description": "Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance).", + "type": "boolean" + }, + "customImage": { + "description": "The custom source image to be used to restore this disk when instantiating this instance template.", + "type": "string" + }, + "deviceName": { + "description": "Specifies the device name of the disk to which the configurations apply to.", + "type": "string" + }, + "instantiateFrom": { + "description": "Specifies whether to include the disk and what image to use. Possible values are: \n- source-image: to use the same image that was used to create the source instance's corresponding disk. Applicable to the boot disk and additional read-write disks. \n- source-image-family: to use the same image family that was used to create the source instance's corresponding disk. Applicable to the boot disk and additional read-write disks. \n- custom-image: to use a user-provided image url for disk creation. Applicable to the boot disk and additional read-write disks. \n- attach-read-only: to attach a read-only disk. Applicable to read-only disks. \n- do-not-include: to exclude a disk from the template. Applicable to additional read-write disks, local SSDs, and read-only disks.", + "enum": [ + "ATTACH_READ_ONLY", + "BLANK", + "CUSTOM_IMAGE", + "DEFAULT", + "DO_NOT_INCLUDE", + "SOURCE_IMAGE", + "SOURCE_IMAGE_FAMILY" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "DiskList": { + "description": "A list of Disk resources.", + "id": "DiskList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of Disk resources.", + "items": { + "$ref": "Disk" + }, + "type": "array" + }, + "kind": { + "default": "compute#diskList", + "description": "[Output Only] Type of resource. Always compute#diskList for lists of disks.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -31488,6 +33179,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -31498,158 +33190,6 @@ ], "enumDescriptions": [ "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "DiskInstantiationConfig": { - "description": "A specification of the desired way to instantiate a disk in the instance template when its created from a source instance.", - "id": "DiskInstantiationConfig", - "properties": { - "autoDelete": { - "description": "Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance).", - "type": "boolean" - }, - "customImage": { - "description": "The custom source image to be used to restore this disk when instantiating this instance template.", - "type": "string" - }, - "deviceName": { - "description": "Specifies the device name of the disk to which the configurations apply to.", - "type": "string" - }, - "instantiateFrom": { - "description": "Specifies whether to include the disk and what image to use. Possible values are: \n- source-image: to use the same image that was used to create the source instance's corresponding disk. Applicable to the boot disk and additional read-write disks. \n- source-image-family: to use the same image family that was used to create the source instance's corresponding disk. Applicable to the boot disk and additional read-write disks. \n- custom-image: to use a user-provided image url for disk creation. Applicable to the boot disk and additional read-write disks. \n- attach-read-only: to attach a read-only disk. Applicable to read-only disks. \n- do-not-include: to exclude a disk from the template. Applicable to additional read-write disks, local SSDs, and read-only disks.", - "enum": [ - "ATTACH_READ_ONLY", - "BLANK", - "CUSTOM_IMAGE", - "DEFAULT", - "DO_NOT_INCLUDE", - "SOURCE_IMAGE", - "SOURCE_IMAGE_FAMILY" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "DiskList": { - "description": "A list of Disk resources.", - "id": "DiskList", - "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "description": "A list of Disk resources.", - "items": { - "$ref": "Disk" - }, - "type": "array" - }, - "kind": { - "default": "compute#diskList", - "description": "[Output Only] Type of resource. Always compute#diskList for lists of disks.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, - "warning": { - "description": "[Output Only] Informational warning message.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ "", "", "", @@ -31800,6 +33340,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -31822,6 +33369,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -31853,6 +33401,7 @@ "", "", "", + "", "" ], "type": "string" @@ -31934,6 +33483,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -31965,6 +33515,7 @@ "", "", "", + "", "" ], "type": "string" @@ -32028,6 +33579,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -32059,6 +33611,7 @@ "", "", "", + "", "" ], "type": "string" @@ -32159,6 +33712,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -32190,6 +33744,7 @@ "", "", "", + "", "" ], "type": "string" @@ -32364,6 +33919,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -32395,6 +33951,7 @@ "", "", "", + "", "" ], "type": "string" @@ -32590,6 +34147,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -32621,6 +34179,7 @@ "", "", "", + "", "" ], "type": "string" @@ -32753,7 +34312,7 @@ "type": "boolean" }, "enableLogging": { - "description": "Deprecated in favor of enable in LogConfig. This field denotes whether to enable logging for a particular firewall rule. If logging is enabled, logs will be exported to Stackdriver.", + "description": "Deprecated in favor of enable in LogConfig. This field denotes whether to enable logging for a particular firewall rule. If logging is enabled, logs will be exported t Cloud Logging.", "type": "boolean" }, "id": { @@ -32768,7 +34327,7 @@ }, "logConfig": { "$ref": "FirewallLogConfig", - "description": "This field denotes the logging options for a particular firewall rule. If logging is enabled, logs will be exported to Stackdriver." + "description": "This field denotes the logging options for a particular firewall rule. If logging is enabled, logs will be exported to Cloud Logging." }, "name": { "annotations": { @@ -32882,6 +34441,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -32913,6 +34473,7 @@ "", "", "", + "", "" ], "type": "string" @@ -32994,7 +34555,7 @@ "id": "ForwardingRule", "properties": { "IPAddress": { - "description": "IP address that this forwarding rule serves. When a client sends traffic to this IP address, the forwarding rule directs the traffic to the target that you specify in the forwarding rule.\n\nIf you don't specify a reserved IP address, an ephemeral IP address is assigned. Methods for specifying an IP address:\n\n* IPv4 dotted decimal, as in `100.1.2.3` * Full URL, as in https://www.googleapis.com/compute/v1/projects/project_id/regions/region/addresses/address-name * Partial URL or by name, as in: * projects/project_id/regions/region/addresses/address-name * regions/region/addresses/address-name * global/addresses/address-name * address-name \n\nThe loadBalancingScheme and the forwarding rule's target determine the type of IP address that you can use. For detailed information, refer to [IP address specifications](/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications).", + "description": "IP address that this forwarding rule serves. When a client sends traffic to this IP address, the forwarding rule directs the traffic to the target that you specify in the forwarding rule.\n\nIf you don't specify a reserved IP address, an ephemeral IP address is assigned. Methods for specifying an IP address:\n\n* IPv4 dotted decimal, as in `100.1.2.3` * Full URL, as in https://www.googleapis.com/compute/v1/projects/project_id/regions/region/addresses/address-name * Partial URL or by name, as in: * projects/project_id/regions/region/addresses/address-name * regions/region/addresses/address-name * global/addresses/address-name * address-name \n\nThe loadBalancingScheme and the forwarding rule's target determine the type of IP address that you can use. For detailed information, refer to [IP address specifications](/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications).\n\nMust be set to `0.0.0.0` when the target is targetGrpcProxy that has validateForProxyless field set to true.", "type": "string" }, "IPProtocol": { @@ -33101,7 +34662,7 @@ "type": "string" }, "metadataFilters": { - "description": "Opaque filter criteria used by Loadbalancer to restrict routing configuration to a limited set of xDS compliant clients. In their xDS requests to Loadbalancer, xDS clients present node metadata. If a match takes place, the relevant configuration is made available to those proxies. Otherwise, all the resources (e.g. TargetHttpProxy, UrlMap) referenced by the ForwardingRule will not be visible to those proxies.\nFor each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata.\nmetadataFilters specified here will be applifed before those specified in the UrlMap that this ForwardingRule references.\nmetadataFilters only applies to Loadbalancers that have their loadBalancingScheme set to INTERNAL_SELF_MANAGED.", + "description": "Opaque filter criteria used by Loadbalancer to restrict routing configuration to a limited set of xDS compliant clients. In their xDS requests to Loadbalancer, xDS clients present node metadata. When there is a match, the relevant configuration is made available to those proxies. Otherwise, all the resources (e.g. TargetHttpProxy, UrlMap) referenced by the ForwardingRule will not be visible to those proxies.\nFor each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata. If multiple metadataFilters are specified, all of them need to be satisfied in order to be considered a match.\nmetadataFilters specified here will be applifed before those specified in the UrlMap that this ForwardingRule references.\nmetadataFilters only applies to Loadbalancers that have their loadBalancingScheme set to INTERNAL_SELF_MANAGED.", "items": { "$ref": "MetadataFilter" }, @@ -33113,7 +34674,7 @@ "type": "string" }, "network": { - "description": "This field is not used for external load balancing.\n\nFor INTERNAL and INTERNAL_SELF_MANAGED load balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used.", + "description": "This field is not used for external load balancing.\n\nFor internal load balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used.", "type": "string" }, "networkTier": { @@ -33129,7 +34690,7 @@ "type": "string" }, "portRange": { - "description": "When the load balancing scheme is EXTERNAL, INTERNAL_SELF_MANAGED and INTERNAL_MANAGED, you can specify a port_range. Use with a forwarding rule that points to a target proxy or a target pool. Do not use with a forwarding rule that points to a backend service. This field is used along with the target field for TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance.\n\nApplicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed to ports in the specified range will be forwarded to target. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint port ranges.\n\nSome types of forwarding target have constraints on the acceptable ports: \n- TargetHttpProxy: 80, 8080 \n- TargetHttpsProxy: 443 \n- TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222 \n- TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222 \n- TargetVpnGateway: 500, 4500", + "description": "When the load balancing scheme is EXTERNAL, INTERNAL_SELF_MANAGED and INTERNAL_MANAGED, you can specify a port_range. Use with a forwarding rule that points to a target proxy or a target pool. Do not use with a forwarding rule that points to a backend service. This field is used along with the target field for TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, TargetGrpcProxy, TargetVpnGateway, TargetPool, TargetInstance.\n\nApplicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed to ports in the specified range will be forwarded to target. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint port ranges.\n\nSome types of forwarding target have constraints on the acceptable ports: \n- TargetHttpProxy: 80, 8080 \n- TargetHttpsProxy: 443 \n- TargetGrpcProxy: Any ports \n- TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222 \n- TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222 \n- TargetVpnGateway: 500, 4500", "type": "string" }, "ports": { @@ -33147,6 +34708,13 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "serviceDirectoryRegistrations": { + "description": "Service Directory resources to register this forwarding rule with. Currently, only supports a single Service Directory resource.", + "items": { + "$ref": "ForwardingRuleServiceDirectoryRegistration" + }, + "type": "array" + }, "serviceLabel": { "description": "An optional prefix to the service name for this Forwarding Rule. If specified, the prefix is the first label of the fully qualified service name.\n\nThe label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.\n\nThis field is only used for internal load balancing.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", @@ -33157,11 +34725,11 @@ "type": "string" }, "subnetwork": { - "description": "This field is only used for INTERNAL load balancing.\n\nFor internal load balancing, this field identifies the subnetwork that the load balanced IP should belong to for this Forwarding Rule.\n\nIf the network specified is in auto subnet mode, this field is optional. However, if the network is in custom subnet mode, a subnetwork must be specified.", + "description": "This field is only used for internal load balancing.\n\nFor internal load balancing, this field identifies the subnetwork that the load balanced IP should belong to for this Forwarding Rule.\n\nIf the network specified is in auto subnet mode, this field is optional. However, if the network is in custom subnet mode, a subnetwork must be specified.", "type": "string" }, "target": { - "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. For INTERNAL_SELF_MANAGED load balancing, only targetHttpProxy is valid, not targetHttpsProxy.", + "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must be in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. For more information, see the \"Target\" column in [Port specifications](/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications).", "type": "string" } }, @@ -33195,6 +34763,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -33217,6 +34792,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -33248,6 +34824,7 @@ "", "", "", + "", "" ], "type": "string" @@ -33329,6 +34906,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -33360,6 +34938,7 @@ "", "", "", + "", "" ], "type": "string" @@ -33400,6 +34979,21 @@ }, "type": "object" }, + "ForwardingRuleServiceDirectoryRegistration": { + "description": "Describes the auto-registration of the Forwarding Rule to Service Directory. The region and project of the Service Directory resource generated from this registration will be the same as this Forwarding Rule.", + "id": "ForwardingRuleServiceDirectoryRegistration", + "properties": { + "namespace": { + "description": "Service Directory namespace to register the forwarding rule under.", + "type": "string" + }, + "service": { + "description": "Service Directory service to register the forwarding rule under.", + "type": "string" + } + }, + "type": "object" + }, "ForwardingRulesScopedList": { "id": "ForwardingRulesScopedList", "properties": { @@ -33432,6 +35026,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -33463,6 +35058,7 @@ "", "", "", + "", "" ], "type": "string" @@ -33494,6 +35090,39 @@ }, "type": "object" }, + "GRPCHealthCheck": { + "id": "GRPCHealthCheck", + "properties": { + "grpcServiceName": { + "description": "The gRPC service name for the health check. This field is optional. The value of grpc_service_name has the following meanings by convention:\n- Empty service_name means the overall status of all services at the backend.\n- Non-empty service_name means the health of that gRPC service, as defined by the owner of the service.\nThe grpc_service_name can only be ASCII.", + "type": "string" + }, + "port": { + "description": "The port number for the health check request. Must be specified if port_name and port_specification are not set or if port_specification is USE_FIXED_PORT. Valid values are 1 through 65535.", + "format": "int32", + "type": "integer" + }, + "portName": { + "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. The port_name should conform to RFC1035.", + "type": "string" + }, + "portSpecification": { + "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in port is used for health checking.\nUSE_NAMED_PORT: The portName is used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, gRPC health check follows behavior specified in port and portName fields.", + "enum": [ + "USE_FIXED_PORT", + "USE_NAMED_PORT", + "USE_SERVING_PORT" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "GlobalNetworkEndpointGroupsAttachEndpointsRequest": { "id": "GlobalNetworkEndpointGroupsAttachEndpointsRequest", "properties": { @@ -33635,6 +35264,7 @@ "GVNIC", "MULTI_IP_SUBNET", "SECURE_BOOT", + "SEV_CAPABLE", "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", "WINDOWS" @@ -33646,6 +35276,7 @@ "", "", "", + "", "" ], "type": "string" @@ -33813,7 +35444,7 @@ "type": "object" }, "HealthCheck": { - "description": "Represents a Health Check resource.\n\nGoogle Compute Engine has two Health Check resources:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/healthChecks) * [Regional](/compute/docs/reference/rest/{$api_version}/regionHealthChecks)\n\nInternal HTTP(S) load balancers use regional health checks. All other types of GCP load balancers and managed instance group auto-healing use global health checks. For more information, read Health Check Concepts.\n\nTo perform health checks on network load balancers, you must use either httpHealthChecks or httpsHealthChecks.", + "description": "Represents a Health Check resource.\n\nGoogle Compute Engine has two Health Check resources:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/healthChecks) * [Regional](/compute/docs/reference/rest/{$api_version}/regionHealthChecks)\n\nInternal HTTP(S) load balancers must use regional health checks. Internal TCP/UDP load balancers can use either regional or global health checks. All other types of GCP load balancers and managed instance group auto-healing must use global health checks. For more information, read Health Check Concepts.\n\nTo perform health checks on network load balancers, you must use either httpHealthChecks or httpsHealthChecks.", "id": "HealthCheck", "properties": { "checkIntervalSec": { @@ -33829,6 +35460,9 @@ "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, + "grpcHealthCheck": { + "$ref": "GRPCHealthCheck" + }, "healthyThreshold": { "description": "A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.", "format": "int32", @@ -33884,6 +35518,7 @@ "type": { "description": "Specifies the type of the healthCheck, either TCP, SSL, HTTP, HTTPS or HTTP2. If not specified, the default is TCP. Exactly one of the protocol-specific health check field must be specified, which must match type field.", "enum": [ + "GRPC", "HTTP", "HTTP2", "HTTPS", @@ -33897,6 +35532,7 @@ "", "", "", + "", "" ], "type": "string" @@ -33959,6 +35595,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -33990,6 +35627,7 @@ "", "", "", + "", "" ], "type": "string" @@ -34079,8 +35717,16 @@ "type": "string" }, "healthStatusAggregationStrategy": { - "description": "Policy for how the results from multiple health checks for the same endpoint are aggregated. \n- NO_AGGREGATION. An EndpointHealth message is returned for each backend in the health check service. \n- AND. If any backend's health check reports UNHEALTHY, then UNHEALTHY is the HealthState of the entire health check service. If all backend's are healthy, the HealthState of the health check service is HEALTHY. .", - "type": "any" + "description": "This field is deprecated. Use health_status_aggregation_policy instead.\n\nPolicy for how the results from multiple health checks for the same endpoint are aggregated. \n- NO_AGGREGATION. An EndpointHealth message is returned for each backend in the health check service. \n- AND. If any backend's health check reports UNHEALTHY, then UNHEALTHY is the HealthState of the entire health check service. If all backend's are healthy, the HealthState of the health check service is HEALTHY. .", + "enum": [ + "AND", + "NO_AGGREGATION" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", @@ -34181,6 +35827,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -34212,6 +35859,7 @@ "", "", "", + "", "" ], "type": "string" @@ -34271,6 +35919,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -34293,6 +35948,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -34324,6 +35980,7 @@ "", "", "", + "", "" ], "type": "string" @@ -34387,6 +36044,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -34418,6 +36076,7 @@ "", "", "", + "", "" ], "type": "string" @@ -34534,7 +36193,7 @@ "type": "string" }, "hosts": { - "description": "The list of host patterns to match. They must be valid hostnames with optional port numbers in the format host:port. * matches any string of ([a-z0-9-.]*). In that case, * must be the first character and must be followed in the pattern by either - or ..", + "description": "The list of host patterns to match. They must be valid hostnames with optional port numbers in the format host:port. * matches any string of ([a-z0-9-.]*). In that case, * must be the first character and must be followed in the pattern by either - or ..\n* based matching is not supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true.", "items": { "type": "string" }, @@ -34595,6 +36254,25 @@ }, "type": "object" }, + "HttpFilterConfig": { + "description": "HttpFilterConfiguration supplies additional contextual settings for networkservices.HttpFilter resources enabled by Traffic Director.", + "id": "HttpFilterConfig", + "properties": { + "config": { + "description": "The configuration needed to enable the networkservices.HttpFilter resource. The configuration must be YAML formatted and only contain fields defined in the protobuf identified in configTypeUrl", + "type": "string" + }, + "configTypeUrl": { + "description": "The fully qualified versioned proto3 type url of the protobuf that the filter expects for its contextual settings, for example: type.googleapis.com/google.protobuf.Struct", + "type": "string" + }, + "filterName": { + "description": "Name of the networkservices.HttpFilter resource this configuration belongs to. This name must be known to the xDS client. Example: envoy.wasm", + "type": "string" + } + }, + "type": "object" + }, "HttpHeaderAction": { "description": "The request and response header transformations that take effect before the request is passed along to the selected backendService.", "id": "HttpHeaderAction", @@ -34639,7 +36317,7 @@ "type": "string" }, "headerName": { - "description": "The name of the HTTP header to match.\nFor matching against the HTTP request's authority, use a headerMatch with the header name \":authority\".\nFor matching a request's method, use the headerName \":method\".", + "description": "The name of the HTTP header to match.\nFor matching against the HTTP request's authority, use a headerMatch with the header name \":authority\".\nFor matching a request's method, use the headerName \":method\".\nWhen the URL map is bound to target gRPC proxy that has validateForProxyless field set to true, only non-binary user-specified custom metadata and the `content-type` header are supported. The following transport-level headers cannot be used in header matching rules: `:authority`, `:method`, `:path`, `:scheme`, `user-agent`, `accept-encoding`, `content-encoding`, `grpc-accept-encoding`, `grpc-encoding`, `grpc-previous-rpc-attempts`, `grpc-tags-bin`, `grpc-timeout` and `grpc-trace-bin.", "type": "string" }, "invertMatch": { @@ -34805,6 +36483,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -34836,6 +36515,7 @@ "", "", "", + "", "" ], "type": "string" @@ -34963,30 +36643,30 @@ "properties": { "corsPolicy": { "$ref": "CorsPolicy", - "description": "The specification for allowing client side cross-origin requests. Please see W3C Recommendation for Cross Origin Resource Sharing" + "description": "The specification for allowing client side cross-origin requests. Please see W3C Recommendation for Cross Origin Resource Sharing \nNot supported when the URL map is bound to target gRPC proxy." }, "faultInjectionPolicy": { "$ref": "HttpFaultInjection", - "description": "The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by Loadbalancer on a percentage of requests before sending those request to the backend service. Similarly requests from clients can be aborted by the Loadbalancer for a percentage of requests.\ntimeout and retry_policy will be ignored by clients that are configured with a fault_injection_policy." + "description": "The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by Loadbalancer on a percentage of requests before sending those request to the backend service. Similarly requests from clients can be aborted by the Loadbalancer for a percentage of requests.\ntimeout and retry_policy will be ignored by clients that are configured with a fault_injection_policy.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, "requestMirrorPolicy": { "$ref": "RequestMirrorPolicy", - "description": "Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. Loadbalancer does not wait for responses from the shadow service. Prior to sending traffic to the shadow service, the host / authority header is suffixed with -shadow." + "description": "Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. Loadbalancer does not wait for responses from the shadow service. Prior to sending traffic to the shadow service, the host / authority header is suffixed with -shadow.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, "retryPolicy": { "$ref": "HttpRetryPolicy", - "description": "Specifies the retry policy associated with this route." + "description": "Specifies the retry policy associated with this route.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, "timeout": { "$ref": "Duration", - "description": "Specifies the timeout for the selected route. Timeout is computed from the time the request has been fully processed (i.e. end-of-stream) up until the response has been completely processed. Timeout includes all retries.\nIf not specified, will use the largest timeout among all backend services associated with the route." + "description": "Specifies the timeout for the selected route. Timeout is computed from the time the request has been fully processed (i.e. end-of-stream) up until the response has been completely processed. Timeout includes all retries.\nIf not specified, will use the largest timeout among all backend services associated with the route.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, "urlRewrite": { "$ref": "UrlRewrite", - "description": "The spec to modify the URL of the request, prior to forwarding the request to the matched service." + "description": "The spec to modify the URL of the request, prior to forwarding the request to the matched service.\nurlRewrite is the only action supported in UrlMaps for external HTTP(S) load balancers.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, "weightedBackendServices": { - "description": "A list of weighted backend services to send traffic to when a route match occurs. The weights determine the fraction of traffic that flows to their corresponding backend service. If all traffic needs to go to a single backend service, there must be one weightedBackendService with weight set to a non 0 number.\nOnce a backendService is identified and before forwarding the request to the backend service, advanced routing actions like Url rewrites and header transformations are applied depending on additional settings specified in this HttpRouteAction.", + "description": "A list of weighted backend services to send traffic to when a route match occurs. The weights determine the fraction of traffic that flows to their corresponding backend service. If all traffic needs to go to a single backend service, there must be one weightedBackendService with weight set to a non-zero number.\nOnce a backendService is identified and before forwarding the request to the backend service, advanced routing actions such as URL rewrites and header transformations are applied depending on additional settings specified in this HttpRouteAction.", "items": { "$ref": "WeightedBackendService" }, @@ -35005,9 +36685,24 @@ }, "headerAction": { "$ref": "HttpHeaderAction", - "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nThe headerAction specified here are applied before the matching pathMatchers[].headerAction and after pathMatchers[].routeRules[].routeAction.weightedBackendService.backendServiceWeightAction[].headerAction" + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nThe headerAction specified here are applied before the matching pathMatchers[].headerAction and after pathMatchers[].routeRules[].routeAction.weightedBackendService.backendServiceWeightAction[].headerAction \nNote that headerAction is not supported for Loadbalancers that have their loadBalancingScheme set to EXTERNAL.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." + }, + "httpFilterConfigs": { + "description": "Outbound route specific configuration for networkservices.HttpFilter resources enabled by Traffic Director. httpFilterConfigs only applies for Loadbalancers with loadBalancingScheme set to INTERNAL_SELF_MANAGED. See ForwardingRule for more details.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true.", + "items": { + "$ref": "HttpFilterConfig" + }, + "type": "array" + }, + "httpFilterMetadata": { + "description": "Outbound route specific metadata supplied to networkservices.HttpFilter resources enabled by Traffic Director. httpFilterMetadata only applies for Loadbalancers with loadBalancingScheme set to INTERNAL_SELF_MANAGED. See ForwardingRule for more details.\nThe only configTypeUrl supported is type.googleapis.com/google.protobuf.Struct \nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true.", + "items": { + "$ref": "HttpFilterConfig" + }, + "type": "array" }, "matchRules": { + "description": "The list of criteria for matching attributes of a request to this routeRule. This list has OR semantics: the request matches this routeRule when any of the matchRules are satisfied. However predicates within a given matchRule have AND semantics. All predicates within a matchRule must match for the request to match the rule.", "items": { "$ref": "HttpRouteRuleMatch" }, @@ -35020,7 +36715,7 @@ }, "routeAction": { "$ref": "HttpRouteAction", - "description": "In response to a matching matchRule, the load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices.\nOnly one of urlRedirect, service or routeAction.weightedBackendService must be set." + "description": "In response to a matching matchRule, the load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices.\nOnly one of urlRedirect, service or routeAction.weightedBackendService must be set.\nUrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a routeRule's routeAction." }, "service": { "description": "The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendService s. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of urlRedirect, service or routeAction.weightedBackendService must be set.", @@ -35028,7 +36723,7 @@ }, "urlRedirect": { "$ref": "HttpRedirectAction", - "description": "When this rule is matched, the request is redirected to a URL specified by urlRedirect.\nIf urlRedirect is specified, service or routeAction must not be set." + "description": "When this rule is matched, the request is redirected to a URL specified by urlRedirect.\nIf urlRedirect is specified, service or routeAction must not be set.\nNot supported when the URL map is bound to target gRPC proxy." } }, "type": "object" @@ -35049,11 +36744,11 @@ "type": "array" }, "ignoreCase": { - "description": "Specifies that prefixMatch and fullPathMatch matches are case sensitive.\nThe default value is false.\nignoreCase must not be used with regexMatch.", + "description": "Specifies that prefixMatch and fullPathMatch matches are case sensitive.\nThe default value is false.\nignoreCase must not be used with regexMatch.\nNot supported when the URL map is bound to target gRPC proxy.", "type": "boolean" }, "metadataFilters": { - "description": "Opaque filter criteria used by Loadbalancer to restrict routing configuration to a limited set of xDS compliant clients. In their xDS requests to Loadbalancer, xDS clients present node metadata. If a match takes place, the relevant routing configuration is made available to those proxies.\nFor each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata.\nmetadataFilters specified here will be applied after those specified in ForwardingRule that refers to the UrlMap this HttpRouteRuleMatch belongs to.\nmetadataFilters only applies to Loadbalancers that have their loadBalancingScheme set to INTERNAL_SELF_MANAGED.", + "description": "Opaque filter criteria used by Loadbalancer to restrict routing configuration to a limited set of xDS compliant clients. In their xDS requests to Loadbalancer, xDS clients present node metadata. When there is a match, the relevant routing configuration is made available to those proxies.\nFor each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata. If multiple metadataFilters are specified, all of them need to be satisfied in order to be considered a match.\nmetadataFilters specified here will be applied after those specified in ForwardingRule that refers to the UrlMap this HttpRouteRuleMatch belongs to.\nmetadataFilters only applies to Loadbalancers that have their loadBalancingScheme set to INTERNAL_SELF_MANAGED.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true.", "items": { "$ref": "MetadataFilter" }, @@ -35064,7 +36759,7 @@ "type": "string" }, "queryParameterMatches": { - "description": "Specifies a list of query parameter match criteria, all of which must match corresponding query parameters in the request.", + "description": "Specifies a list of query parameter match criteria, all of which must match corresponding query parameters in the request.\nNot supported when the URL map is bound to target gRPC proxy.", "items": { "$ref": "HttpQueryParameterMatch" }, @@ -35194,6 +36889,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -35225,6 +36921,7 @@ "", "", "", + "", "" ], "type": "string" @@ -35395,7 +37092,7 @@ "type": "string" }, "sourceImage": { - "description": "URL of the source image used to create this image. This can be a full or valid partial URL. You must provide exactly one of: \n- this property, or \n- the rawDisk.source property, or \n- the sourceDisk property in order to create an image.", + "description": "URL of the source image used to create this image.\n\nIn order to create an image, you must provide the full or partial URL of one of the following: \n- The selfLink URL \n- This property \n- The rawDisk.source URL \n- The sourceDisk URL", "type": "string" }, "sourceImageEncryptionKey": { @@ -35407,7 +37104,7 @@ "type": "string" }, "sourceSnapshot": { - "description": "URL of the source snapshot used to create this image. This can be a full or valid partial URL. You must provide exactly one of: \n- this property, or \n- the sourceImage property, or \n- the rawDisk.source property, or \n- the sourceDisk property in order to create an image.", + "description": "URL of the source snapshot used to create this image.\n\nIn order to create an image, you must provide the full or partial URL of one of the following: \n- The selfLink URL \n- This property \n- The sourceImage URL \n- The rawDisk.source URL \n- The sourceDisk URL", "type": "string" }, "sourceSnapshotEncryptionKey": { @@ -35505,6 +37202,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -35536,6 +37234,7 @@ "", "", "", + "", "" ], "type": "string" @@ -35607,6 +37306,9 @@ "description": "Allows this instance to send and receive packets with non-matching destination or source IPs. This is required if you plan to use this instance to forward routes. For more information, see Enabling IP Forwarding.", "type": "boolean" }, + "confidentialInstanceConfig": { + "$ref": "ConfidentialInstanceConfig" + }, "cpuPlatform": { "description": "[Output Only] The CPU platform used by this instance.", "type": "string" @@ -35676,6 +37378,18 @@ "description": "Labels to apply to this instance. These can be later modified by the setLabels method.", "type": "object" }, + "lastStartTimestamp": { + "description": "[Output Only] Last start timestamp in RFC3339 text format.", + "type": "string" + }, + "lastStopTimestamp": { + "description": "[Output Only] Last stop timestamp in RFC3339 text format.", + "type": "string" + }, + "lastSuspendedTimestamp": { + "description": "[Output Only] Last suspended timestamp in RFC3339 text format.", + "type": "string" + }, "machineType": { "annotations": { "required": [ @@ -35757,10 +37471,12 @@ "$ref": "ShieldedInstanceIntegrityPolicy" }, "shieldedVmConfig": { - "$ref": "ShieldedVmConfig" + "$ref": "ShieldedVmConfig", + "description": "Deprecating, please use shielded_instance_config." }, "shieldedVmIntegrityPolicy": { - "$ref": "ShieldedVmIntegrityPolicy" + "$ref": "ShieldedVmIntegrityPolicy", + "description": "Deprecating, please use shielded_instance_integrity_policy." }, "sourceMachineImage": { "description": "Source machine image", @@ -35775,7 +37491,7 @@ "type": "boolean" }, "status": { - "description": "[Output Only] The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, STOPPED, SUSPENDING, SUSPENDED, and TERMINATED.", + "description": "[Output Only] The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see Instance life cycle.", "enum": [ "DEPROVISIONING", "PROVISIONING", @@ -35845,6 +37561,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -35867,6 +37590,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -35898,6 +37622,7 @@ "", "", "", + "", "" ], "type": "string" @@ -36029,6 +37754,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -36051,6 +37783,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -36082,6 +37815,7 @@ "", "", "", + "", "" ], "type": "string" @@ -36163,6 +37897,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -36194,6 +37929,7 @@ "", "", "", + "", "" ], "type": "string" @@ -36454,6 +38190,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -36476,6 +38219,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -36507,6 +38251,7 @@ "", "", "", + "", "" ], "type": "string" @@ -36604,6 +38349,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -36635,6 +38381,7 @@ "", "", "", + "", "" ], "type": "string" @@ -36724,6 +38471,20 @@ "isStateful": { "description": "[Output Only] A bit indicating whether the managed instance group has stateful configuration, that is, if you have configured any items in a stateful policy or in per-instance configs. The group might report that it has no stateful config even when there is still some preserved state on a managed instance, for example, if you have deleted all PICs but not yet applied those deletions. This field is deprecated in favor of has_stateful_config.", "type": "boolean" + }, + "perInstanceConfigs": { + "$ref": "InstanceGroupManagerStatusStatefulPerInstanceConfigs", + "description": "[Output Only] Status of per-instance configs on the instance." + } + }, + "type": "object" + }, + "InstanceGroupManagerStatusStatefulPerInstanceConfigs": { + "id": "InstanceGroupManagerStatusStatefulPerInstanceConfigs", + "properties": { + "allEffective": { + "description": "A bit indicating if all of the group's per-instance configs (listed in the output of a listPerInstanceConfigs API call) have status EFFECTIVE or there are no per-instance-configs.", + "type": "boolean" } }, "type": "object" @@ -36844,6 +38605,10 @@ "description": "InstanceGroupManagers.applyUpdatesToInstances", "id": "InstanceGroupManagersApplyUpdatesRequest", "properties": { + "allInstances": { + "description": "Flag to update all instances instead of specified list of ?instances?. If the flag is set to true then the instances may not be specified in the request.", + "type": "boolean" + }, "instances": { "description": "The list of URLs of one or more instances for which you want to apply updates. Each URL can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", "items": { @@ -36997,6 +38762,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -37028,6 +38794,7 @@ "", "", "", + "", "" ], "type": "string" @@ -37133,6 +38900,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -37164,6 +38932,7 @@ "", "", "", + "", "" ], "type": "string" @@ -37311,6 +39080,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -37342,6 +39112,7 @@ "", "", "", + "", "" ], "type": "string" @@ -37436,6 +39207,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -37467,6 +39239,7 @@ "", "", "", + "", "" ], "type": "string" @@ -37566,6 +39339,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -37597,6 +39371,7 @@ "", "", "", + "", "" ], "type": "string" @@ -37678,6 +39453,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -37709,6 +39485,7 @@ "", "", "", + "", "" ], "type": "string" @@ -37831,7 +39608,7 @@ "id": "InstanceProperties", "properties": { "canIpForward": { - "description": "Enables instances created based on this template to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the Enable IP forwarding documentation for more information.", + "description": "Enables instances created based on these properties to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the Enable IP forwarding documentation for more information.", "type": "boolean" }, "confidentialInstanceConfig": { @@ -37839,11 +39616,11 @@ "description": "Specifies the Confidential Instance options." }, "description": { - "description": "An optional text description for the instances that are created from this instance template.", + "description": "An optional text description for the instances that are created from these properties.", "type": "string" }, "disks": { - "description": "An array of disks that are associated with the instances that are created from this template.", + "description": "An array of disks that are associated with the instances that are created from these properties.", "items": { "$ref": "AttachedDisk" }, @@ -37854,7 +39631,7 @@ "description": "Display Device properties to enable support for remote display products like: Teradici, VNC and TeamViewer" }, "guestAccelerators": { - "description": "A list of guest accelerator cards' type and count to use for instances created from the instance template.", + "description": "A list of guest accelerator cards' type and count to use for instances created from these properties.", "items": { "$ref": "AcceleratorConfig" }, @@ -37864,7 +39641,7 @@ "additionalProperties": { "type": "string" }, - "description": "Labels to apply to instances that are created from this template.", + "description": "Labels to apply to instances that are created from these properties.", "type": "object" }, "machineType": { @@ -37873,15 +39650,15 @@ "compute.instanceTemplates.insert" ] }, - "description": "The machine type to use for instances that are created from this template.", + "description": "The machine type to use for instances that are created from these properties.", "type": "string" }, "metadata": { "$ref": "Metadata", - "description": "The metadata key/value pairs to assign to instances that are created from this template. These pairs can consist of custom metadata or predefined keys. See Project and instance metadata for more information." + "description": "The metadata key/value pairs to assign to instances that are created from these properties. These pairs can consist of custom metadata or predefined keys. See Project and instance metadata for more information." }, "minCpuPlatform": { - "description": "Minimum cpu/platform to be used by this instance. The instance may be scheduled on the specified or newer cpu/platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: \"Intel Haswell\" or minCpuPlatform: \"Intel Sandy Bridge\". For more information, read Specifying a Minimum CPU Platform.", + "description": "Minimum cpu/platform to be used by instances. The instance may be scheduled on the specified or newer cpu/platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: \"Intel Haswell\" or minCpuPlatform: \"Intel Sandy Bridge\". For more information, read Specifying a Minimum CPU Platform.", "type": "string" }, "networkInterfaces": { @@ -37892,7 +39669,7 @@ "type": "array" }, "privateIpv6GoogleAccess": { - "description": "The private IPv6 google access type for the VM. If not specified, use INHERIT_FROM_SUBNETWORK as default.", + "description": "The private IPv6 google access type for VMs. If not specified, use INHERIT_FROM_SUBNETWORK as default.", "enum": [ "ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE", "ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE", @@ -37907,10 +39684,10 @@ }, "reservationAffinity": { "$ref": "ReservationAffinity", - "description": "Specifies the reservations that this instance can consume from." + "description": "Specifies the reservations that instances can consume from." }, "resourcePolicies": { - "description": "Resource policies (names, not ULRs) applied to instances created from this template.", + "description": "Resource policies (names, not ULRs) applied to instances created from these properties.", "items": { "type": "string" }, @@ -37918,10 +39695,10 @@ }, "scheduling": { "$ref": "Scheduling", - "description": "Specifies the scheduling options for the instances that are created from this template." + "description": "Specifies the scheduling options for the instances that are created from these properties." }, "serviceAccounts": { - "description": "A list of service accounts with specified scopes. Access tokens for these service accounts are available to the instances that are created from this template. Use metadata queries to obtain the access tokens for these instances.", + "description": "A list of service accounts with specified scopes. Access tokens for these service accounts are available to the instances that are created from these properties. Use metadata queries to obtain the access tokens for these instances.", "items": { "$ref": "ServiceAccount" }, @@ -37932,11 +39709,11 @@ }, "shieldedVmConfig": { "$ref": "ShieldedVmConfig", - "description": "Specifies the Shielded VM options for the instances that are created from this template." + "description": "Specifies the Shielded VM options for the instances that are created from these properties." }, "tags": { "$ref": "Tags", - "description": "A list of tags to apply to the instances that are created from this template. The tags identify valid sources or targets for network firewalls. The setTags method can modify this list of tags. Each tag within the list must comply with RFC1035." + "description": "A list of tags to apply to the instances that are created from these properties. The tags identify valid sources or targets for network firewalls. The setTags method can modify this list of tags. Each tag within the list must comply with RFC1035." } }, "type": "object" @@ -38052,6 +39829,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -38083,6 +39861,7 @@ "", "", "", + "", "" ], "type": "string" @@ -38273,6 +40052,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -38304,6 +40084,7 @@ "", "", "", + "", "" ], "type": "string" @@ -38385,6 +40166,20 @@ }, "type": "object" }, + "InstancesSetNameRequest": { + "id": "InstancesSetNameRequest", + "properties": { + "currentName": { + "description": "The current name of this resource, used to prevent conflicts. Provide the latest name when making a request to change name.", + "type": "string" + }, + "name": { + "description": "The name to be applied to the instance. Needs to be RFC 1035 compliant.", + "type": "string" + } + }, + "type": "object" + }, "InstancesSetServiceAccountRequest": { "id": "InstancesSetServiceAccountRequest", "properties": { @@ -38817,6 +40612,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -38839,6 +40641,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -38870,6 +40673,7 @@ "", "", "", + "", "" ], "type": "string" @@ -38951,6 +40755,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -38982,6 +40787,7 @@ "", "", "", + "", "" ], "type": "string" @@ -39076,6 +40882,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -39107,6 +40914,7 @@ "", "", "", + "", "" ], "type": "string" @@ -39139,7 +40947,7 @@ "type": "object" }, "InterconnectCircuitInfo": { - "description": "Describes a single physical circuit between the Customer and Google. CircuitInfo objects are created by Google, so all fields are output only. Next id: 4", + "description": "Describes a single physical circuit between the Customer and Google. CircuitInfo objects are created by Google, so all fields are output only.", "id": "InterconnectCircuitInfo", "properties": { "customerDemarcId": { @@ -39334,6 +41142,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -39365,6 +41174,7 @@ "", "", "", + "", "" ], "type": "string" @@ -39550,6 +41360,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -39581,6 +41392,7 @@ "", "", "", + "", "" ], "type": "string" @@ -39645,7 +41457,7 @@ "type": "object" }, "InterconnectOutageNotification": { - "description": "Description of a planned outage on this Interconnect. Next id: 9", + "description": "Description of a planned outage on this Interconnect.", "id": "InterconnectOutageNotification", "properties": { "affectedCircuits": { @@ -39869,6 +41681,26 @@ }, "type": "object" }, + "LicenseResourceCommitment": { + "description": "Commitment for a particular license resource.", + "id": "LicenseResourceCommitment", + "properties": { + "amount": { + "description": "The number of licenses purchased.", + "format": "int64", + "type": "string" + }, + "coresPerLicense": { + "description": "Specifies the core range of the instance for which this license applies.", + "type": "string" + }, + "license": { + "description": "Any applicable license URI.", + "type": "string" + } + }, + "type": "object" + }, "LicenseResourceRequirements": { "id": "LicenseResourceRequirements", "properties": { @@ -39929,6 +41761,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -39960,6 +41793,7 @@ "", "", "", + "", "" ], "type": "string" @@ -39991,6 +41825,26 @@ }, "type": "object" }, + "LocalDisk": { + "id": "LocalDisk", + "properties": { + "diskCount": { + "description": "Specifies the number of such disks.", + "format": "int32", + "type": "integer" + }, + "diskSizeGb": { + "description": "Specifies the size of the disk in base-2 GB.", + "format": "int32", + "type": "integer" + }, + "diskType": { + "description": "Specifies the desired disk type on the node. This disk type must be a local storage type (e.g.: local-ssd). Note that for nodeTemplates, this should be the name of the disk type and not its URL.", + "type": "string" + } + }, + "type": "object" + }, "LogConfig": { "description": "Specifies what kind of log the caller must write", "id": "LogConfig", @@ -40104,7 +41958,7 @@ "type": "string" }, "guestFlush": { - "description": "[Input Only] Specifies to create an application consistent machine image by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", + "description": "[Input Only] Whether to attempt an application consistent machine image by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", "type": "boolean" }, "id": { @@ -40169,7 +42023,7 @@ "type": "string" }, "storageLocations": { - "description": "GCS bucket storage location of the machine image (regional or multi-regional).", + "description": "The regional or multi-regional Cloud Storage bucket location where the machine image is stored.", "items": { "type": "string" }, @@ -40233,6 +42087,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -40264,6 +42119,7 @@ "", "", "", + "", "" ], "type": "string" @@ -40323,7 +42179,7 @@ }, "deprecated": { "$ref": "DeprecationStatus", - "description": "[Output Only] The deprecation status associated with this machine type." + "description": "[Output Only] The deprecation status associated with this machine type. Only applicable if the machine type is unavailable." }, "description": { "description": "[Output Only] An optional textual description of the resource.", @@ -40407,6 +42263,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -40429,6 +42292,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -40460,6 +42324,7 @@ "", "", "", + "", "" ], "type": "string" @@ -40541,6 +42406,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -40572,6 +42438,7 @@ "", "", "", + "", "" ], "type": "string" @@ -40635,6 +42502,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -40666,6 +42534,7 @@ "", "", "", + "", "" ], "type": "string" @@ -40981,7 +42850,7 @@ "type": "string" }, "autoCreateSubnetworks": { - "description": "When set to true, the VPC network is created in \"auto\" mode. When set to false, the VPC network is created in \"custom\" mode.\n\nAn auto mode VPC network starts with one subnet per region. Each subnet has a predetermined range as described in Auto mode VPC network IP ranges.", + "description": "Must be set to create a VPC network. If not set, a legacy network is created.\n\nWhen set to true, the VPC network is created in auto mode. When set to false, the VPC network is created in custom mode.\n\nAn auto mode VPC network starts with one subnet per region. Each subnet has a predetermined range as described in Auto mode VPC network IP ranges.\n\nFor custom mode VPC networks, you can add subnets using the subnetworks insert method.", "type": "boolean" }, "creationTimestamp": { @@ -41007,6 +42876,11 @@ "description": "[Output Only] Type of the resource. Always compute#network for networks.", "type": "string" }, + "mtu": { + "description": "Maximum Transmission Unit in bytes. The minimum value for this field is 1460 and the maximum value is 1500 bytes.", + "format": "int32", + "type": "integer" + }, "name": { "annotations": { "required": [ @@ -41043,7 +42917,7 @@ "type": "object" }, "NetworkEndpoint": { - "description": "The network endpoint. Next ID: 7", + "description": "The network endpoint.", "id": "NetworkEndpoint", "properties": { "annotations": { @@ -41074,7 +42948,7 @@ "type": "object" }, "NetworkEndpointGroup": { - "description": "Represents a collection of network endpoints.\n\nA network endpoint group (NEG) defines how a set of endpoints should be reached, whether they are reachable, and where they are located. For more information about using NEGs, see Setting up internet NEGs or Setting up zonal NEGs. (== resource_for {$api_version}.networkEndpointGroups ==) (== resource_for {$api_version}.globalNetworkEndpointGroups ==)", + "description": "Represents a collection of network endpoints.\n\nA network endpoint group (NEG) defines how a set of endpoints should be reached, whether they are reachable, and where they are located. For more information about using NEGs, see Setting up internet NEGs, Setting up zonal NEGs, or Setting up serverless NEGs. (== resource_for {$api_version}.networkEndpointGroups ==) (== resource_for {$api_version}.globalNetworkEndpointGroups ==) (== resource_for {$api_version}.regionNetworkEndpointGroups ==)", "id": "NetworkEndpointGroup", "properties": { "annotations": { @@ -41084,6 +42958,18 @@ "description": "Metadata defined as annotations on the network endpoint group.", "type": "object" }, + "appEngine": { + "$ref": "NetworkEndpointGroupAppEngine", + "description": "Only valid when networkEndpointType is \"SERVERLESS\". Only one of cloudRun, appEngine or cloudFunction may be set." + }, + "cloudFunction": { + "$ref": "NetworkEndpointGroupCloudFunction", + "description": "Only valid when networkEndpointType is \"SERVERLESS\". Only one of cloudRun, appEngine or cloudFunction may be set." + }, + "cloudRun": { + "$ref": "NetworkEndpointGroupCloudRun", + "description": "Only valid when networkEndpointType is \"SERVERLESS\". Only one of cloudRun, appEngine or cloudFunction may be set." + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -41120,19 +43006,27 @@ "type": "string" }, "networkEndpointType": { - "description": "Type of network endpoints in this network endpoint group.", + "description": "Type of network endpoints in this network endpoint group. Can be one of GCE_VM_IP_PORT, NON_GCP_PRIVATE_IP_PORT, INTERNET_FQDN_PORT, INTERNET_IP_PORT, or SERVERLESS.", "enum": [ "GCE_VM_IP_PORT", "INTERNET_FQDN_PORT", - "INTERNET_IP_PORT" + "INTERNET_IP_PORT", + "NON_GCP_PRIVATE_IP_PORT", + "SERVERLESS" ], "enumDescriptions": [ + "", + "", "", "", "" ], "type": "string" }, + "region": { + "description": "[Output Only] The URL of the region where the network endpoint group is located.", + "type": "string" + }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" @@ -41181,6 +43075,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -41203,6 +43104,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -41234,6 +43136,7 @@ "", "", "", + "", "" ], "type": "string" @@ -41265,6 +43168,59 @@ }, "type": "object" }, + "NetworkEndpointGroupAppEngine": { + "description": "Configuration for an App Engine network endpoint group (NEG). The service is optional, may be provided explicitly or in the URL mask. The version is optional and can only be provided explicitly or in the URL mask when service is present.\n\nNote: App Engine service must be in the same project and located in the same region as the Serverless NEG.", + "id": "NetworkEndpointGroupAppEngine", + "properties": { + "service": { + "description": "Optional serving service.\n\nThe service name must be 1-63 characters long, and comply with RFC1035.\n\nExample value: \"default\", \"my-service\".", + "type": "string" + }, + "urlMask": { + "description": "A template to parse service and version fields from a request URL. URL mask allows for routing to multiple App Engine services without having to create multiple Network Endpoint Groups and backend services.\n\nFor example, the request URLs \"foo1-dot-appname.appspot.com/v1\" and \"foo1-dot-appname.appspot.com/v2\" can be backed by the same Serverless NEG with URL mask \"-dot-appname.appspot.com/\". The URL mask will parse them to { service = \"foo1\", version = \"v1\" } and { service = \"foo1\", version = \"v2\" } respectively.", + "type": "string" + }, + "version": { + "description": "Optional serving version.\n\nThe version must be 1-63 characters long, and comply with RFC1035.\n\nExample value: \"v1\", \"v2\".", + "type": "string" + } + }, + "type": "object" + }, + "NetworkEndpointGroupCloudFunction": { + "description": "Configuration for a Cloud Function network endpoint group (NEG). The function must be provided explicitly or in the URL mask.\n\nNote: Cloud Function must be in the same project and located in the same region as the Serverless NEG.", + "id": "NetworkEndpointGroupCloudFunction", + "properties": { + "function": { + "description": "A user-defined name of the Cloud Function.\n\nThe function name is case-sensitive and must be 1-63 characters long.\n\nExample value: \"func1\".", + "type": "string" + }, + "urlMask": { + "description": "A template to parse function field from a request URL. URL mask allows for routing to multiple Cloud Functions without having to create multiple Network Endpoint Groups and backend services.\n\nFor example, request URLs \"mydomain.com/function1\" and \"mydomain.com/function2\" can be backed by the same Serverless NEG with URL mask \"/\". The URL mask will parse them to { function = \"function1\" } and { function = \"function2\" } respectively.", + "type": "string" + } + }, + "type": "object" + }, + "NetworkEndpointGroupCloudRun": { + "description": "Configuration for a Cloud Run network endpoint group (NEG). The service must be provided explicitly or in the URL mask. The tag is optional, may be provided explicitly or in the URL mask.\n\nNote: Cloud Run service must be in the same project and located in the same region as the Serverless NEG.", + "id": "NetworkEndpointGroupCloudRun", + "properties": { + "service": { + "description": "Cloud Run service is the main resource of Cloud Run.\n\nThe service must be 1-63 characters long, and comply with RFC1035.\n\nExample value: \"run-service\".", + "type": "string" + }, + "tag": { + "description": "Optional Cloud Run tag represents the \"named-revision\" to provide additional fine-grained traffic routing information.\n\nThe tag must be 1-63 characters long, and comply with RFC1035.\n\nExample value: \"revision-0010\".", + "type": "string" + }, + "urlMask": { + "description": "A template to parse service and tag fields from a request URL. URL mask allows for routing to multiple Run services without having to create multiple network endpoint groups and backend services.\n\nFor example, request URLs \"foo1.domain.com/bar1\" and \"foo1.domain.com/bar2\" can be backed by the same Serverless Network Endpoint Group (NEG) with URL mask \".domain.com/\". The URL mask will parse them to { service=\"bar1\", tag=\"foo1\" } and { service=\"bar2\", tag=\"foo2\" } respectively.", + "type": "string" + } + }, + "type": "object" + }, "NetworkEndpointGroupLbNetworkEndpointGroup": { "description": "Load balancing specific fields for network endpoint group.", "id": "NetworkEndpointGroupLbNetworkEndpointGroup", @@ -41338,6 +43294,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -41369,6 +43326,7 @@ "", "", "", + "", "" ], "type": "string" @@ -41505,6 +43463,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -41536,6 +43495,7 @@ "", "", "", + "", "" ], "type": "string" @@ -41599,6 +43559,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -41630,6 +43591,7 @@ "", "", "", + "", "" ], "type": "string" @@ -41779,6 +43741,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -41810,6 +43773,7 @@ "", "", "", + "", "" ], "type": "string" @@ -41877,6 +43841,11 @@ "description": "The URL of the peer network. It can be either full URL or partial URL. The peer network may belong to a different project. If the partial URL does not contain project, it is assumed that the peer network is in the same project as the current network.", "type": "string" }, + "peerMtu": { + "description": "Maximum Transmission Unit in bytes.", + "format": "int32", + "type": "integer" + }, "state": { "description": "[Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The peering is `ACTIVE` when there's a matching configuration in the peer network.", "enum": [ @@ -42031,7 +44000,7 @@ "type": "string" }, "maintenancePolicy": { - "description": "Specifies how to handle instances when a node in the group undergoes maintenance.", + "description": "Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT. For more information, see Maintenance policies.", "enum": [ "DEFAULT", "MAINTENANCE_POLICY_UNSPECIFIED", @@ -42051,7 +44020,7 @@ "type": "string" }, "nodeTemplate": { - "description": "The URL of the node template to which this node group belongs.", + "description": "URL of the node template to create the node group from.", "type": "string" }, "selfLink": { @@ -42113,6 +44082,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -42135,6 +44111,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -42166,6 +44143,7 @@ "", "", "", + "", "" ], "type": "string" @@ -42201,7 +44179,7 @@ "id": "NodeGroupAutoscalingPolicy", "properties": { "maxNodes": { - "description": "The maximum number of nodes that the group should have.", + "description": "The maximum number of nodes that the group should have. Must be set if autoscaling is enabled. Maximum value allowed is 100.", "format": "int32", "type": "integer" }, @@ -42211,7 +44189,7 @@ "type": "integer" }, "mode": { - "description": "The autoscaling mode.", + "description": "The autoscaling mode. Set to one of: ON, OFF, or ONLY_SCALE_OUT. For more information, see Autoscaler modes.", "enum": [ "MODE_UNSPECIFIED", "OFF", @@ -42279,6 +44257,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -42310,6 +44289,7 @@ "", "", "", + "", "" ], "type": "string" @@ -42344,6 +44324,13 @@ "NodeGroupNode": { "id": "NodeGroupNode", "properties": { + "accelerators": { + "description": "Accelerators for this node.", + "items": { + "$ref": "AcceleratorConfig" + }, + "type": "array" + }, "cpuOvercommitType": { "description": "CPU overcommit.", "enum": [ @@ -42358,6 +44345,13 @@ ], "type": "string" }, + "disks": { + "description": "Local disk configurations.", + "items": { + "$ref": "LocalDisk" + }, + "type": "array" + }, "instances": { "description": "Instances scheduled on this node.", "items": { @@ -42474,6 +44468,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -42505,6 +44500,7 @@ "", "", "", + "", "" ], "type": "string" @@ -42568,6 +44564,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -42599,6 +44596,7 @@ "", "", "", + "", "" ], "type": "string" @@ -42641,9 +44639,15 @@ "type": "object" }, "NodeTemplate": { - "description": "Represent a sole-tenant Node Template resource.\n\nYou can use a template to define properties for nodes in a node group. For more information, read Creating node groups and instances. (== resource_for {$api_version}.nodeTemplates ==) (== NextID: 19 ==)", + "description": "Represent a sole-tenant Node Template resource.\n\nYou can use a template to define properties for nodes in a node group. For more information, read Creating node groups and instances. (== resource_for {$api_version}.nodeTemplates ==)", "id": "NodeTemplate", "properties": { + "accelerators": { + "items": { + "$ref": "AcceleratorConfig" + }, + "type": "array" + }, "cpuOvercommitType": { "description": "CPU overcommit.", "enum": [ @@ -42666,6 +44670,12 @@ "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, + "disks": { + "items": { + "$ref": "LocalDisk" + }, + "type": "array" + }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", @@ -42758,6 +44768,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -42780,6 +44797,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -42811,6 +44829,7 @@ "", "", "", + "", "" ], "type": "string" @@ -42892,6 +44911,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -42923,6 +44943,7 @@ "", "", "", + "", "" ], "type": "string" @@ -43001,6 +45022,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -43032,6 +45054,7 @@ "", "", "", + "", "" ], "type": "string" @@ -43152,6 +45175,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -43174,6 +45204,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -43205,6 +45236,7 @@ "", "", "", + "", "" ], "type": "string" @@ -43286,6 +45318,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -43317,6 +45350,7 @@ "", "", "", + "", "" ], "type": "string" @@ -43380,6 +45414,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -43411,6 +45446,7 @@ "", "", "", + "", "" ], "type": "string" @@ -43561,6 +45597,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -43592,6 +45629,7 @@ "", "", "", + "", "" ], "type": "string" @@ -43772,6 +45810,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -43803,6 +45842,7 @@ "", "", "", + "", "" ], "type": "string" @@ -43868,6 +45908,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -43890,6 +45937,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -43921,6 +45969,7 @@ "", "", "", + "", "" ], "type": "string" @@ -44002,6 +46051,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -44033,6 +46083,7 @@ "", "", "", + "", "" ], "type": "string" @@ -44096,6 +46147,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -44127,6 +46179,7 @@ "", "", "", + "", "" ], "type": "string" @@ -44405,6 +46458,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -44427,6 +46487,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -44458,6 +46519,7 @@ "", "", "", + "", "" ], "type": "string" @@ -44573,6 +46635,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -44604,6 +46667,7 @@ "", "", "", + "", "" ], "type": "string" @@ -44736,6 +46800,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -44767,6 +46832,7 @@ "", "", "", + "", "" ], "type": "string" @@ -44804,7 +46870,7 @@ "properties": { "defaultRouteAction": { "$ref": "HttpRouteAction", - "description": "defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices.\nOnly one of defaultRouteAction or defaultUrlRedirect must be set." + "description": "defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices.\nOnly one of defaultRouteAction or defaultUrlRedirect must be set.\nUrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a pathMatcher's defaultRouteAction." }, "defaultService": { "description": "The full or partial URL to the BackendService resource. This will be used if none of the pathRules or routeRules defined by this PathMatcher are matched. For example, the following are all valid URLs to a BackendService resource: \n- https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService \n- compute/v1/projects/project/global/backendServices/backendService \n- global/backendServices/backendService If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if defaultRouteAction specifies any weightedBackendServices, defaultService must not be specified.\nOnly one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set.\nAuthorization requires one or more of the following Google IAM permissions on the specified resource default_service: \n- compute.backendBuckets.use \n- compute.backendServices.use", @@ -44812,7 +46878,7 @@ }, "defaultUrlRedirect": { "$ref": "HttpRedirectAction", - "description": "When none of the specified pathRules or routeRules match, the request is redirected to a URL specified by defaultUrlRedirect.\nIf defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set." + "description": "When none of the specified pathRules or routeRules match, the request is redirected to a URL specified by defaultUrlRedirect.\nIf defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set.\nNot supported when the URL map is bound to target gRPC proxy." }, "description": { "description": "An optional description of this resource. Provide this property when you create the resource.", @@ -44820,7 +46886,7 @@ }, "headerAction": { "$ref": "HttpHeaderAction", - "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nHeaderAction specified here are applied after the matching HttpRouteRule HeaderAction and before the HeaderAction in the UrlMap" + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nHeaderAction specified here are applied after the matching HttpRouteRule HeaderAction and before the HeaderAction in the UrlMap \nNote that headerAction is not supported for Loadbalancers that have their loadBalancingScheme set to EXTERNAL.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, "name": { "description": "The name to which this PathMatcher is referred by the HostRule.", @@ -44856,7 +46922,7 @@ }, "routeAction": { "$ref": "HttpRouteAction", - "description": "In response to a matching path, the load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices.\nOnly one of routeAction or urlRedirect must be set." + "description": "In response to a matching path, the load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices.\nOnly one of routeAction or urlRedirect must be set.\nUrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a pathRule's routeAction." }, "service": { "description": "The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendService s. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of urlRedirect, service or routeAction.weightedBackendService must be set.", @@ -44864,7 +46930,7 @@ }, "urlRedirect": { "$ref": "HttpRedirectAction", - "description": "When a path pattern is matched, the request is redirected to a URL specified by urlRedirect.\nIf urlRedirect is specified, service or routeAction must not be set." + "description": "When a path pattern is matched, the request is redirected to a URL specified by urlRedirect.\nIf urlRedirect is specified, service or routeAction must not be set.\nNot supported when the URL map is bound to target gRPC proxy." } }, "type": "object" @@ -44884,6 +46950,26 @@ "preservedState": { "$ref": "PreservedState", "description": "The intended preserved state for the given instance. Does not contain preserved state generated from a stateful policy." + }, + "status": { + "description": "The status of applying this per-instance config on the corresponding managed instance.", + "enum": [ + "APPLYING", + "DELETING", + "EFFECTIVE", + "NONE", + "UNAPPLIED", + "UNAPPLIED_DELETION" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "" + ], + "type": "string" } }, "type": "object" @@ -45067,7 +47153,7 @@ "description": "The naming prefix for daily usage reports and the Google Cloud Storage bucket where they are stored." }, "xpnProjectStatus": { - "description": "[Output Only] The role this project has in a shared VPC configuration. Currently only HOST projects are differentiated.", + "description": "[Output Only] The role this project has in a shared VPC configuration. Currently, only projects with the host role, which is specified by the value HOST, are differentiated.", "enum": [ "HOST", "UNSPECIFIED_XPN_PROJECT_STATUS" @@ -45163,18 +47249,22 @@ "metric": { "description": "[Output Only] Name of the quota metric.", "enum": [ + "A2_CPUS", "AFFINITY_GROUPS", "AUTOSCALERS", "BACKEND_BUCKETS", "BACKEND_SERVICES", "C2_CPUS", "COMMITMENTS", + "COMMITTED_A2_CPUS", "COMMITTED_C2_CPUS", "COMMITTED_CPUS", "COMMITTED_LICENSES", "COMMITTED_LOCAL_SSD_TOTAL_GB", + "COMMITTED_MEMORY_OPTIMIZED_CPUS", "COMMITTED_N2D_CPUS", "COMMITTED_N2_CPUS", + "COMMITTED_NVIDIA_A100_GPUS", "COMMITTED_NVIDIA_K80_GPUS", "COMMITTED_NVIDIA_P100_GPUS", "COMMITTED_NVIDIA_P4_GPUS", @@ -45183,6 +47273,8 @@ "CPUS", "CPUS_ALL_REGIONS", "DISKS_TOTAL_GB", + "EXTERNAL_NETWORK_LB_FORWARDING_RULES", + "EXTERNAL_PROTOCOL_FORWARDING_RULES", "EXTERNAL_VPN_GATEWAYS", "FIREWALLS", "FORWARDING_RULES", @@ -45199,18 +47291,23 @@ "INTERCONNECT_ATTACHMENTS_TOTAL_MBPS", "INTERCONNECT_TOTAL_GBPS", "INTERNAL_ADDRESSES", + "INTERNAL_TRAFFIC_DIRECTOR_FORWARDING_RULES", "IN_PLACE_SNAPSHOTS", "IN_USE_ADDRESSES", "IN_USE_BACKUP_SCHEDULES", "IN_USE_SNAPSHOT_SCHEDULES", "LOCAL_SSD_TOTAL_GB", + "M1_CPUS", + "M2_CPUS", "MACHINE_IMAGES", "N2D_CPUS", "N2_CPUS", "NETWORKS", "NETWORK_ENDPOINT_GROUPS", + "NETWORK_FIREWALL_POLICIES", "NODE_GROUPS", "NODE_TEMPLATES", + "NVIDIA_A100_GPUS", "NVIDIA_K80_GPUS", "NVIDIA_P100_GPUS", "NVIDIA_P100_VWS_GPUS", @@ -45222,6 +47319,7 @@ "PACKET_MIRRORINGS", "PREEMPTIBLE_CPUS", "PREEMPTIBLE_LOCAL_SSD_GB", + "PREEMPTIBLE_NVIDIA_A100_GPUS", "PREEMPTIBLE_NVIDIA_K80_GPUS", "PREEMPTIBLE_NVIDIA_P100_GPUS", "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS", @@ -45354,6 +47452,18 @@ "", "", "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", "" ], "type": "string" @@ -45507,6 +47617,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -45538,6 +47649,7 @@ "", "", "", + "", "" ], "type": "string" @@ -45631,6 +47743,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -45662,6 +47775,7 @@ "", "", "", + "", "" ], "type": "string" @@ -45780,6 +47894,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -45811,6 +47926,7 @@ "", "", "", + "", "" ], "type": "string" @@ -45906,6 +48022,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -45937,6 +48054,7 @@ "", "", "", + "", "" ], "type": "string" @@ -46010,9 +48128,13 @@ "type": "object" }, "RegionInstanceGroupManagersApplyUpdatesRequest": { - "description": "InstanceGroupManagers.applyUpdatesToInstances", + "description": "RegionInstanceGroupManagers.applyUpdatesToInstances", "id": "RegionInstanceGroupManagersApplyUpdatesRequest", "properties": { + "allInstances": { + "description": "Flag to update all instances instead of specified list of ?instances?. If the flag is set to true then the instances may not be specified in the request.", + "type": "boolean" + }, "instances": { "description": "The list of URLs of one or more instances for which you want to apply updates. Each URL can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", "items": { @@ -46135,6 +48257,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -46166,6 +48289,7 @@ "", "", "", + "", "" ], "type": "string" @@ -46316,6 +48440,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -46347,6 +48472,7 @@ "", "", "", + "", "" ], "type": "string" @@ -46469,6 +48595,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -46500,6 +48627,7 @@ "", "", "", + "", "" ], "type": "string" @@ -46743,6 +48871,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -46765,6 +48900,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -46796,6 +48932,7 @@ "", "", "", + "", "" ], "type": "string" @@ -46876,6 +49013,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -46907,6 +49045,7 @@ "", "", "", + "", "" ], "type": "string" @@ -46981,6 +49120,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -47012,6 +49152,7 @@ "", "", "", + "", "" ], "type": "string" @@ -47119,6 +49260,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -47150,6 +49292,7 @@ "", "", "", + "", "" ], "type": "string" @@ -47278,6 +49421,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -47300,6 +49450,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -47331,6 +49482,7 @@ "", "", "", + "", "" ], "type": "string" @@ -47483,6 +49635,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -47514,6 +49667,7 @@ "", "", "", + "", "" ], "type": "string" @@ -47610,6 +49764,10 @@ "description": "Specified snapshot properties for scheduled snapshots created by this policy.", "id": "ResourcePolicySnapshotSchedulePolicySnapshotProperties", "properties": { + "chainName": { + "description": "Chain name that the snapshot is created in.", + "type": "string" + }, "guestFlush": { "description": "Indication to perform a 'guest aware' snapshot.", "type": "boolean" @@ -47814,6 +49972,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -47845,6 +50004,7 @@ "", "", "", + "", "" ], "type": "string" @@ -47928,6 +50088,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -47959,6 +50120,7 @@ "", "", "", + "", "" ], "type": "string" @@ -47991,7 +50153,7 @@ "type": "object" }, "Router": { - "description": "Represents a Cloud Router resource.\n\nFor more information about Cloud Router, read the the Cloud Router overview.", + "description": "Represents a Cloud Router resource.\n\nFor more information about Cloud Router, read the Cloud Router overview.", "id": "Router", "properties": { "bgp": { @@ -48112,6 +50274,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -48134,6 +50303,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -48165,6 +50335,7 @@ "", "", "", + "", "" ], "type": "string" @@ -48237,7 +50408,7 @@ "type": "integer" }, "keepaliveInterval": { - "description": "The interval in seconds between BGP keepalive messages that are sent to the peer. Hold time is three times the interval at which keepalive messages are sent, and the hold time is the maximum number of seconds allowed to elapse between successive keepalive messages that BGP receives from a peer. BGP will use the smaller of either the local hold time value or the peer's hold time value as the hold time for the BGP connection between the two peers. If set, this value must be between 1 and 120. The default is 20.", + "description": "The interval in seconds between BGP keepalive messages that are sent to the peer.\nNot currently available publicly.\nHold time is three times the interval at which keepalive messages are sent, and the hold time is the maximum number of seconds allowed to elapse between successive keepalive messages that BGP receives from a peer.\nBGP will use the smaller of either the local hold time value or the peer's hold time value as the hold time for the BGP connection between the two peers.\nIf set, this value must be between 1 and 120. The default is 20.", "format": "uint32", "type": "integer" } @@ -48286,10 +50457,10 @@ }, "bfd": { "$ref": "RouterBgpPeerBfd", - "description": "BFD configuration for the BGP peering." + "description": "BFD configuration for the BGP peering.\nNot currently available publicly." }, "enable": { - "description": "The status of the BGP peer connection. If set to FALSE, any active session with the peer is terminated and all associated routing information is removed. If set to TRUE, the peer connection can be established with routing information. The default is TRUE.", + "description": "The status of the BGP peer connection.\nNot currently available publicly.\nIf set to FALSE, any active session with the peer is terminated and all associated routing information is removed. If set to TRUE, the peer connection can be established with routing information. The default is TRUE.", "enum": [ "FALSE", "TRUE" @@ -48351,22 +50522,22 @@ "id": "RouterBgpPeerBfd", "properties": { "minReceiveInterval": { - "description": "The minimum interval, in milliseconds, between BFD control packets received from the peer router. The actual value is negotiated between the two routers and is equal to the greater of this value and the transmit interval of the other router. If set, this value must be between 100 and 30000. The default is 300.", + "description": "The minimum interval, in milliseconds, between BFD control packets received from the peer router. The actual value is negotiated between the two routers and is equal to the greater of this value and the transmit interval of the other router.\nNot currently available publicly.\nIf set, this value must be between 100 and 30000.\nThe default is 300.", "format": "uint32", "type": "integer" }, "minTransmitInterval": { - "description": "The minimum interval, in milliseconds, between BFD control packets transmitted to the peer router. The actual value is negotiated between the two routers and is equal to the greater of this value and the corresponding receive interval of the other router. If set, this value must be between 100 and 30000. The default is 300.", + "description": "The minimum interval, in milliseconds, between BFD control packets transmitted to the peer router. The actual value is negotiated between the two routers and is equal to the greater of this value and the corresponding receive interval of the other router.\nNot currently available publicly.\nIf set, this value must be between 100 and 30000.\nThe default is 300.", "format": "uint32", "type": "integer" }, "multiplier": { - "description": "The number of consecutive BFD packets that must be missed before BFD declares that a peer is unavailable. If set, the value must be a value between 2 and 16. The default is 3.", + "description": "The number of consecutive BFD packets that must be missed before BFD declares that a peer is unavailable.\nNot currently available publicly.\nIf set, the value must be a value between 2 and 16.\nThe default is 3.", "format": "uint32", "type": "integer" }, "sessionInitializationMode": { - "description": "The BFD session initialization mode for this BGP peer. If set to ACTIVE, the Cloud Router will initiate the BFD session for this BGP peer. If set to PASSIVE, the Cloud Router will wait for the peer router to initiate the BFD session for this BGP peer. If set to DISABLED, BFD is disabled for this BGP peer. The default is PASSIVE.", + "description": "The BFD session initialization mode for this BGP peer.\nNot currently available publicly.\nIf set to ACTIVE, the Cloud Router will initiate the BFD session for this BGP peer. If set to PASSIVE, the Cloud Router will wait for the peer router to initiate the BFD session for this BGP peer. If set to DISABLED, BFD is disabled for this BGP peer. The default is PASSIVE.", "enum": [ "ACTIVE", "DISABLED", @@ -48472,6 +50643,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -48503,6 +50675,7 @@ "", "", "", + "", "" ], "type": "string" @@ -48782,7 +50955,7 @@ "type": "object" }, "RouterStatusNatStatus": { - "description": "Status of a NAT contained in this router. Next tag: 9", + "description": "Status of a NAT contained in this router.", "id": "RouterStatusNatStatus", "properties": { "autoAllocatedNatIps": { @@ -48893,6 +51066,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -48924,6 +51098,7 @@ "", "", "", + "", "" ], "type": "string" @@ -49184,7 +51359,7 @@ "type": "object" }, "Scheduling": { - "description": "Sets the scheduling options for an Instance. NextID: 11", + "description": "Sets the scheduling options for an Instance. NextID: 13", "id": "Scheduling", "properties": { "automaticRestart": { @@ -49216,7 +51391,7 @@ "type": "string" }, "preemptible": { - "description": "Defines whether the instance is preemptible. This can only be set during instance creation, it cannot be set or changed after the instance has been created.", + "description": "Defines whether the instance is preemptible. This can only be set during instance creation or while the instance is stopped and therefore, in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states.", "type": "boolean" } }, @@ -49450,6 +51625,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -49481,6 +51657,7 @@ "", "", "", + "", "" ], "type": "string" @@ -49634,7 +51811,7 @@ "type": "array" }, "srcIpRanges": { - "description": "CIDR IP address range.", + "description": "CIDR IP address range. Maximum number of src_ip_ranges allowed is 10.", "items": { "type": "string" }, @@ -49665,11 +51842,15 @@ "id": "SecuritySettings", "properties": { "authentication": { - "description": "A URL referring to a networksecurity.Authentication resource that describes how clients should authenticate with this service's backends. If left blank, communications between services are not encrypted (i.e., the TLS policy is set to OPEN). When sending traffic to this service's backends, the OriginationTls setting of Authentication.TransportAuthentication is applied. Refer to the Authentication and Authentication.TransportAuthentication.OriginationTls resources for additional details. authentication only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED.", + "description": "[Deprecated] Use clientTlsPolicy instead.", + "type": "string" + }, + "clientTlsPolicy": { + "description": "Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends.\nclientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED.\nIf left blank, communications are not encrypted.\nNote: This field currently has no impact.", "type": "string" }, "subjectAltNames": { - "description": "Optional. A list of subject alternate names to verify the subject identity (SAN) in the certificate presented by the server, to authorize the SAN list as identities to run the service represented by this BackendService. If specified, the client will verify that the server certificate's subject alt name matches one of the specified values. Only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED.", + "description": "Optional. A list of Subject Alternative Names (SANs) that the client verifies during a mutual TLS handshake with an server/endpoint for this BackendService. When the server presents its X.509 certificate to the client, the client inspects the certificate's subjectAltName field. If the field contains one of the specified values, the communication continues. Otherwise, it fails. This additional check enables the client to verify that the server is authorized to run the requested service.\nNote that the contents of the server certificate's subjectAltName field are configured by the Public Key Infrastructure which provisions server identities.\nOnly applies to a global BackendService with loadBalancingScheme set to INTERNAL_SELF_MANAGED. Only applies when BackendService has an attached clientTlsPolicy with clientCertificate (mTLS mode).\nNote: This field currently has no impact.", "items": { "type": "string" }, @@ -49692,7 +51873,7 @@ "type": "string" }, "next": { - "description": "[Output Only] The position of the next byte of content from the serial console output. Use this value in the next request as the start parameter.", + "description": "[Output Only] The position of the next byte of content, regardless of whether the content exists, following the output returned in the `contents` property. Use this value in the next request as the start parameter.", "format": "int64", "type": "string" }, @@ -49701,7 +51882,7 @@ "type": "string" }, "start": { - "description": "The starting byte position of the output that was returned. This should match the start parameter sent with the request. If the serial console output exceeds the size of the buffer, older output will be overwritten by newer content and the start values will be mismatched.", + "description": "The starting byte position of the output that was returned. This should match the start parameter sent with the request. If the serial console output exceeds the size of the buffer (1 MB), older output is overwritten by newer content. The output start value will indicate the byte position of the output that was returned, which might be different than the `start` value that was specified in the request.", "format": "int64", "type": "string" } @@ -49899,6 +52080,10 @@ "description": "[Output Only] Set to true if snapshots are automatically created by applying resource policy on the target disk.", "type": "boolean" }, + "chainName": { + "description": "Creates the new snapshot in the snapshot chain labeled with the specified name. The chain name must be 1-63 characters long and comply with RFC1035. This is an uncommon option only for advanced service owners who needs to create separate snapshot chains, for example, for chargeback tracking. When you describe your snapshot resource, this field is visible only if it has a non-empty value.", + "type": "string" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -49917,6 +52102,10 @@ "format": "int64", "type": "string" }, + "guestFlush": { + "description": "[Input Only] Whether to attempt an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", + "type": "boolean" + }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", @@ -49955,6 +52144,11 @@ "type": "array" }, "name": { + "annotations": { + "required": [ + "compute.snapshots.insert" + ] + }, "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" @@ -49968,7 +52162,7 @@ "description": "Encrypts the snapshot using a customer-supplied encryption key.\n\nAfter you encrypt a snapshot using a customer-supplied key, you must provide the same key if you use the snapshot later. For example, you must provide the encryption key when you create a disk from the encrypted snapshot in a future request.\n\nCustomer-supplied encryption keys do not protect access to metadata of the snapshot.\n\nIf you do not provide an encryption key when creating the snapshot, then the snapshot will be encrypted using an automatically generated key and you do not need to provide a key to use the snapshot later." }, "sourceDisk": { - "description": "[Output Only] The source disk used to create this snapshot.", + "description": "The source disk used to create this snapshot.", "type": "string" }, "sourceDiskEncryptionKey": { @@ -50074,6 +52268,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -50105,6 +52300,7 @@ "", "", "", + "", "" ], "type": "string" @@ -50239,11 +52435,11 @@ "type": "object" }, "SslCertificate": { - "description": "Represents an SSL Certificate resource.\n\nGoogle Compute Engine has two SSL Certificate resources:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/sslCertificates) * [Regional](/compute/docs/reference/rest/{$api_version}/regionSslCertificates)\n\n\n\nThe sslCertificates are used by: \n- external HTTPS load balancers \n- SSL proxy load balancers \n\nThe regionSslCertificates are used by internal HTTPS load balancers.\n\nOptionally, certificate file contents that you upload can contain a set of up to five PEM-encoded certificates. The API call creates an object (sslCertificate) that holds this data. You can use SSL keys and certificates to secure connections to a load balancer. For more information, read Creating and using SSL certificates and SSL certificates quotas and limits. (== resource_for {$api_version}.sslCertificates ==) (== resource_for {$api_version}.regionSslCertificates ==)", + "description": "Represents an SSL Certificate resource.\n\nGoogle Compute Engine has two SSL Certificate resources:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/sslCertificates) * [Regional](/compute/docs/reference/rest/{$api_version}/regionSslCertificates)\n\n\n\nThe sslCertificates are used by: \n- external HTTPS load balancers \n- SSL proxy load balancers \n\nThe regionSslCertificates are used by internal HTTPS load balancers.\n\nOptionally, certificate file contents that you upload can contain a set of up to five PEM-encoded certificates. The API call creates an object (sslCertificate) that holds this data. You can use SSL keys and certificates to secure connections to a load balancer. For more information, read Creating and using SSL certificates, SSL certificates quotas and limits, and Troubleshooting SSL certificates. (== resource_for {$api_version}.sslCertificates ==) (== resource_for {$api_version}.regionSslCertificates ==)", "id": "SslCertificate", "properties": { "certificate": { - "description": "A local certificate file. The certificate must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert.", + "description": "A value read into memory from a certificate file. The certificate file must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert.", "type": "string" }, "creationTimestamp": { @@ -50278,7 +52474,7 @@ "type": "string" }, "privateKey": { - "description": "A write-only private key in PEM format. Only insert requests will include this field.", + "description": "A value read into memory from a write-only private key file. The private key file must be in PEM format. For security, only insert requests include this field.", "type": "string" }, "region": { @@ -50345,6 +52541,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -50367,6 +52570,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -50398,6 +52602,7 @@ "", "", "", + "", "" ], "type": "string" @@ -50479,6 +52684,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -50510,6 +52716,7 @@ "", "", "", + "", "" ], "type": "string" @@ -50647,6 +52854,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -50678,6 +52886,7 @@ "", "", "", + "", "" ], "type": "string" @@ -50758,6 +52967,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -50789,6 +52999,7 @@ "", "", "", + "", "" ], "type": "string" @@ -50935,6 +53146,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -50966,6 +53178,7 @@ "", "", "", + "", "" ], "type": "string" @@ -51085,7 +53298,7 @@ "type": "string" }, "ipCidrRange": { - "description": "The range of internal addresses that are owned by this subnetwork. Provide this property when you create the subnetwork. For example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network. Only IPv4 is supported. This field can be set only at resource creation time.", + "description": "The range of internal addresses that are owned by this subnetwork. Provide this property when you create the subnetwork. For example, 10.0.0.0/8 or 100.64.0.0/10. Ranges must be unique and non-overlapping within a network. Only IPv4 is supported. This field is set at resource creation time. This may be a RFC 1918 IP range, or a privately routed, non-RFC 1918 IP range, not belonging to Google. The range can be expanded after creation using expandIpCidrRange.", "type": "string" }, "ipv6CidrRange": { @@ -51099,7 +53312,7 @@ }, "logConfig": { "$ref": "SubnetworkLogConfig", - "description": "This field denotes the VPC flow logging options for this subnetwork. If logging is enabled, logs are exported to Stackdriver." + "description": "This field denotes the VPC flow logging options for this subnetwork. If logging is enabled, logs are exported to Cloud Logging." }, "name": { "description": "The name of the resource, provided by the client when initially creating the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", @@ -51119,24 +53332,15 @@ "enum": [ "DISABLE_GOOGLE_ACCESS", "ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE", - "ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE", - "ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE_FOR_SERVICE_ACCOUNTS" + "ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE" ], "enumDescriptions": [ - "", "", "", "" ], "type": "string" }, - "privateIpv6GoogleAccessServiceAccounts": { - "description": "Deprecated in favor of enable PrivateIpv6GoogleAccess on instance directly. The service accounts can be used to selectively turn on Private IPv6 Google Access only on the VMs primary service account matching the value. This value only takes effect when PrivateIpv6GoogleAccess is ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE_FOR_SERVICE_ACCOUNTS or ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE_FOR_SERVICE_ACCOUNTS.", - "items": { - "type": "string" - }, - "type": "array" - }, "purpose": { "description": "The purpose of the resource. This field can be either PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created subnetwork that is reserved for Internal HTTP(S) Load Balancing. If unspecified, the purpose defaults to PRIVATE_RFC_1918. The enableFlowLogs field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", "enum": [ @@ -51221,6 +53425,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -51243,6 +53454,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -51274,6 +53486,7 @@ "", "", "", + "", "" ], "type": "string" @@ -51355,6 +53568,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -51386,6 +53600,7 @@ "", "", "", + "", "" ], "type": "string" @@ -51483,7 +53698,7 @@ "id": "SubnetworkSecondaryRange", "properties": { "ipCidrRange": { - "description": "The range of IP addresses belonging to this subnetwork secondary range. Provide this property when you create the subnetwork. Ranges must be unique and non-overlapping with all primary and secondary IP ranges within a network. Only IPv4 is supported.", + "description": "The range of IP addresses belonging to this subnetwork secondary range. Provide this property when you create the subnetwork. Ranges must be unique and non-overlapping with all primary and secondary IP ranges within a network. Only IPv4 is supported. This may be a RFC 1918 IP range, or a privately, non-RFC 1918 IP range, not belonging to Google.", "type": "string" }, "rangeName": { @@ -51535,6 +53750,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -51566,6 +53782,7 @@ "", "", "", + "", "" ], "type": "string" @@ -51674,6 +53891,175 @@ }, "type": "object" }, + "TargetGrpcProxy": { + "description": "Represents a Target gRPC Proxy resource.\n\nA target gRPC proxy is a component of load balancers intended for load balancing gRPC traffic. Global forwarding rules reference a target gRPC proxy. The Target gRPC Proxy references a URL map which specifies how traffic routes to gRPC backend services. (== resource_for {$api_version}.targetGrpcProxies ==)", + "id": "TargetGrpcProxy", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "fingerprint": { + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a TargetGrpcProxy. An up-to-date fingerprint must be provided in order to patch/update the TargetGrpcProxy; otherwise, the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve the TargetGrpcProxy.", + "format": "byte", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource type. The server generates this identifier.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#targetGrpcProxy", + "description": "[Output Only] Type of the resource. Always compute#targetGrpcProxy for target grpc proxies.", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "compute.targetGrpcProxies.insert" + ] + }, + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL with id for the resource.", + "type": "string" + }, + "urlMap": { + "description": "URL to the UrlMap resource that defines the mapping from URL to the BackendService. The protocol field in the BackendService must be set to GRPC.", + "type": "string" + }, + "validateForProxyless": { + "description": "If true, indicates that the BackendServices referenced by the urlMap may be accessed by gRPC applications without using a sidecar proxy. This will enable configuration checks on urlMap and its referenced BackendServices to not allow unsupported features. A gRPC application must use \"xds:///\" scheme in the target URI of the service it is connecting to. If false, indicates that the BackendServices referenced by the urlMap will be accessed by gRPC applications via a sidecar proxy. In this case, a gRPC application must not use \"xds:///\" scheme in the target URI of the service it is connecting to", + "type": "boolean" + } + }, + "type": "object" + }, + "TargetGrpcProxyList": { + "id": "TargetGrpcProxyList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of TargetGrpcProxy resources.", + "items": { + "$ref": "TargetGrpcProxy" + }, + "type": "array" + }, + "kind": { + "default": "compute#targetGrpcProxyList", + "description": "[Output Only] Type of the resource. Always compute#targetGrpcProxy for target grpc proxies.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "TargetHttpProxiesScopedList": { "id": "TargetHttpProxiesScopedList", "properties": { @@ -51706,6 +54092,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -51737,6 +54124,7 @@ "", "", "", + "", "" ], "type": "string" @@ -51780,6 +54168,18 @@ "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, + "fingerprint": { + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a TargetHttpProxy. An up-to-date fingerprint must be provided in order to patch/update the TargetHttpProxy; otherwise, the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve the TargetHttpProxy.", + "format": "byte", + "type": "string" + }, + "httpFilters": { + "description": "URLs to networkservices.HttpFilter resources enabled for xDS clients using this configuration. For example, https://networkservices.googleapis.com/v1alpha1/projects/project/locations/locationhttpFilters/httpFilter Only filters that handle outbound connection and stream events may be specified. These filters work in conjunction with a default set of HTTP filters that may already be configured by Traffic Director. Traffic Director will determine the final location of these filters within xDS configuration based on the name of the HTTP filter. If Traffic Director positions multiple filters at the same location, those filters will be in the same order as specified in this list.\nhttpFilters only applies for loadbalancers with loadBalancingScheme set to INTERNAL_SELF_MANAGED. See ForwardingRule for more details.", + "items": { + "type": "string" + }, + "type": "array" + }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", @@ -51842,6 +54242,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -51864,6 +54271,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -51895,6 +54303,7 @@ "", "", "", + "", "" ], "type": "string" @@ -51976,6 +54385,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -52007,6 +54417,7 @@ "", "", "", + "", "" ], "type": "string" @@ -52070,6 +54481,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -52101,6 +54513,7 @@ "", "", "", + "", "" ], "type": "string" @@ -52170,11 +54583,15 @@ "id": "TargetHttpsProxy", "properties": { "authentication": { - "description": "A URL referring to a networksecurity.Authentication resource that describes how the proxy should authenticate inbound traffic. If left blank, communications between services are not encrypted (i.e., the TLS policy is set to OPEN). When terminating inbound traffic to this proxy, the TerminationTls setting of Authentication.TransportAuthentication is applied.\nRefer to the Authentication and Authentication.TransportAuthentication.TerminationTls resources for additional details.\nauthentication only applies to a global TargetHttpsProxy attached to globalForwardingRules with the loadBalancingScheme set to INTERNAL_SELF_MANAGED.", + "description": "[Deprecated] Use serverTlsPolicy instead.", "type": "string" }, "authorization": { - "description": "A URL referring to a networksecurity.Authorization resource that describes how the proxy should authorize inbound traffic. If left blank, access will not be restricted by an authorization policy.\nRefer to the Authorization resource for additional details.\nauthorization only applies to a global TargetHttpsProxy attached to globalForwardingRules with the loadBalancingScheme set to INTERNAL_SELF_MANAGED.", + "description": "[Deprecated] Use authorizationPolicy instead.", + "type": "string" + }, + "authorizationPolicy": { + "description": "Optional. A URL referring to a networksecurity.AuthorizationPolicy resource that describes how the proxy should authorize inbound traffic. If left blank, access will not be restricted by an authorization policy.\nRefer to the AuthorizationPolicy resource for additional details.\nauthorizationPolicy only applies to a global TargetHttpsProxy attached to globalForwardingRules with the loadBalancingScheme set to INTERNAL_SELF_MANAGED.\nNote: This field currently has no impact.", "type": "string" }, "creationTimestamp": { @@ -52185,6 +54602,13 @@ "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, + "httpFilters": { + "description": "URLs to networkservices.HttpFilter resources enabled for xDS clients using this configuration. For example, https://networkservices.googleapis.com/beta/projects/project/locations/locationhttpFilters/httpFilter Only filters that handle outbound connection and stream events may be specified. These filters work in conjunction with a default set of HTTP filters that may already be configured by Traffic Director. Traffic Director will determine the final location of these filters within xDS configuration based on the name of the HTTP filter. If Traffic Director positions multiple filters at the same location, those filters will be in the same order as specified in this list.\nhttpFilters only applies for loadbalancers with loadBalancingScheme set to INTERNAL_SELF_MANAGED. See ForwardingRule for more details.", + "items": { + "type": "string" + }, + "type": "array" + }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", @@ -52226,6 +54650,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "serverTlsPolicy": { + "description": "Optional. A URL referring to a networksecurity.ServerTlsPolicy resource that describes how the proxy should authenticate inbound traffic.\nserverTlsPolicy only applies to a global TargetHttpsProxy attached to globalForwardingRules with the loadBalancingScheme set to INTERNAL_SELF_MANAGED.\nIf left blank, communications are not encrypted.\nNote: This field currently has no impact.", + "type": "string" + }, "sslCertificates": { "description": "URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. At least one SSL certificate must be specified. Currently, you may specify up to 15 SSL certificates.", "items": { @@ -52272,6 +54700,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -52294,6 +54729,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -52325,6 +54761,7 @@ "", "", "", + "", "" ], "type": "string" @@ -52406,6 +54843,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -52437,6 +54875,7 @@ "", "", "", + "", "" ], "type": "string" @@ -52509,6 +54948,10 @@ ], "type": "string" }, + "network": { + "description": "The URL of the network this target instance uses to forward traffic. If not specified, the traffic will be forwarded to the network that the default network interface belongs to.", + "type": "string" + }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" @@ -52548,6 +54991,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -52570,6 +55020,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -52601,6 +55052,7 @@ "", "", "", + "", "" ], "type": "string" @@ -52682,6 +55134,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -52713,6 +55166,7 @@ "", "", "", + "", "" ], "type": "string" @@ -52776,6 +55230,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -52807,6 +55262,7 @@ "", "", "", + "", "" ], "type": "string" @@ -52860,7 +55316,7 @@ "type": "number" }, "healthChecks": { - "description": "The URL of the HttpHealthCheck resource. A member instance in this pool is considered healthy if and only if the health checks pass. An empty list means all member instances will be considered healthy at all times. Only HttpHealthChecks are supported. Only one health check may be specified.", + "description": "The URL of the HttpHealthCheck resource. A member instance in this pool is considered healthy if and only if the health checks pass. An empty list means all member instances will be considered healthy at all times. Only legacy HttpHealthChecks are supported. Only one health check may be specified.", "items": { "type": "string" }, @@ -52949,6 +55405,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -52971,6 +55434,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -53002,6 +55466,7 @@ "", "", "", + "", "" ], "type": "string" @@ -53100,6 +55565,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -53131,6 +55597,7 @@ "", "", "", + "", "" ], "type": "string" @@ -53246,6 +55713,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -53277,6 +55745,7 @@ "", "", "", + "", "" ], "type": "string" @@ -53469,6 +55938,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -53500,6 +55970,7 @@ "", "", "", + "", "" ], "type": "string" @@ -53659,6 +56130,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -53690,6 +56162,7 @@ "", "", "", + "", "" ], "type": "string" @@ -53843,6 +56316,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -53865,6 +56345,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -53896,6 +56377,7 @@ "", "", "", + "", "" ], "type": "string" @@ -53977,6 +56459,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -54008,6 +56491,7 @@ "", "", "", + "", "" ], "type": "string" @@ -54071,6 +56555,249 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "TestFailure": { + "id": "TestFailure", + "properties": { + "actualService": { + "description": "BackendService or BackendBucket returned by load balancer.", + "type": "string" + }, + "expectedService": { + "description": "Expected BackendService or BackendBucket resource the given URL should be mapped to.", + "type": "string" + }, + "host": { + "description": "Host portion of the URL.", + "type": "string" + }, + "path": { + "description": "Path portion including query parameters in the URL.", + "type": "string" + } + }, + "type": "object" + }, + "TestPermissionsRequest": { + "id": "TestPermissionsRequest", + "properties": { + "permissions": { + "description": "The set of permissions to check for the 'resource'. Permissions with wildcards (such as '*' or 'storage.*') are not allowed.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "TestPermissionsResponse": { + "id": "TestPermissionsResponse", + "properties": { + "permissions": { + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "UrlMap": { + "description": "Represents a URL Map resource.\n\nGoogle Compute Engine has two URL Map resources:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/urlMaps) * [Regional](/compute/docs/reference/rest/{$api_version}/regionUrlMaps)\n\nA URL map resource is a component of certain types of GCP load balancers and Traffic Director.\n\n* urlMaps are used by external HTTP(S) load balancers and Traffic Director. * regionUrlMaps are used by internal HTTP(S) load balancers.\n\nFor a list of supported URL map features by load balancer type, see the Load balancing features: Routing and traffic management table.\n\nFor a list of supported URL map features for Traffic Director, see the Traffic Director features: Routing and traffic management table.\n\nThis resource defines mappings from host names and URL paths to either a backend service or a backend bucket.\n\nTo use the global urlMaps resource, the backend service must have a loadBalancingScheme of either EXTERNAL or INTERNAL_SELF_MANAGED. To use the regionUrlMaps resource, the backend service must have a loadBalancingScheme of INTERNAL_MANAGED. For more information, read URL Map Concepts.", + "id": "UrlMap", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "defaultRouteAction": { + "$ref": "HttpRouteAction", + "description": "defaultRouteAction takes effect when none of the hostRules match. The load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices.\nOnly one of defaultRouteAction or defaultUrlRedirect must be set.\nUrlMaps for external HTTP(S) load balancers support only the urlRewrite action within defaultRouteAction.\ndefaultRouteAction has no effect when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." + }, + "defaultService": { + "description": "The full or partial URL of the defaultService resource to which traffic is directed if none of the hostRules match. If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set.\ndefaultService has no effect when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true.", + "type": "string" + }, + "defaultUrlRedirect": { + "$ref": "HttpRedirectAction", + "description": "When none of the specified hostRules match, the request is redirected to a URL specified by defaultUrlRedirect.\nIf defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set.\nNot supported when the URL map is bound to target gRPC proxy." + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "fingerprint": { + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a UrlMap. An up-to-date fingerprint must be provided in order to update the UrlMap, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve a UrlMap.", + "format": "byte", + "type": "string" + }, + "headerAction": { + "$ref": "HttpHeaderAction", + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nThe headerAction specified here take effect after headerAction specified under pathMatcher.\nNote that headerAction is not supported for Loadbalancers that have their loadBalancingScheme set to EXTERNAL.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." + }, + "hostRules": { + "description": "The list of HostRules to use against the URL.", + "items": { + "$ref": "HostRule" + }, + "type": "array" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#urlMap", + "description": "[Output Only] Type of the resource. Always compute#urlMaps for url maps.", + "type": "string" + }, + "name": { + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "pathMatchers": { + "description": "The list of named PathMatchers to use against the URL.", + "items": { + "$ref": "PathMatcher" + }, + "type": "array" + }, + "region": { + "description": "[Output Only] URL of the region where the regional URL map resides. This field is not applicable to global URL maps. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "tests": { + "description": "The list of expected URL mapping tests. Request to update this UrlMap will succeed only if all of the test cases pass. You can specify a maximum of 100 tests per UrlMap.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true.", + "items": { + "$ref": "UrlMapTest" + }, + "type": "array" + } + }, + "type": "object" + }, + "UrlMapList": { + "description": "Contains a list of UrlMap resources.", + "id": "UrlMapList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of UrlMap resources.", + "items": { + "$ref": "UrlMap" + }, + "type": "array" + }, + "kind": { + "default": "compute#urlMapList", + "description": "Type of resource.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -54081,241 +56808,6 @@ ], "enumDescriptions": [ "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "TestFailure": { - "id": "TestFailure", - "properties": { - "actualService": { - "type": "string" - }, - "expectedService": { - "type": "string" - }, - "host": { - "type": "string" - }, - "path": { - "type": "string" - } - }, - "type": "object" - }, - "TestPermissionsRequest": { - "id": "TestPermissionsRequest", - "properties": { - "permissions": { - "description": "The set of permissions to check for the 'resource'. Permissions with wildcards (such as '*' or 'storage.*') are not allowed.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "TestPermissionsResponse": { - "id": "TestPermissionsResponse", - "properties": { - "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "UrlMap": { - "description": "Represents a URL Map resource.\n\nGoogle Compute Engine has two URL Map resources:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/urlMaps) * [Regional](/compute/docs/reference/rest/{$api_version}/regionUrlMaps)\n\nA URL map resource is a component of certain types of GCP load balancers and Traffic Director.\n\n* urlMaps are used by external HTTP(S) load balancers and Traffic Director. * regionUrlMaps are used by internal HTTP(S) load balancers.\n\nThis resource defines mappings from host names and URL paths to either a backend service or a backend bucket.\n\nTo use the global urlMaps resource, the backend service must have a loadBalancingScheme of either EXTERNAL or INTERNAL_SELF_MANAGED. To use the regionUrlMaps resource, the backend service must have a loadBalancingScheme of INTERNAL_MANAGED. For more information, read URL Map Concepts.", - "id": "UrlMap", - "properties": { - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "defaultRouteAction": { - "$ref": "HttpRouteAction", - "description": "defaultRouteAction takes effect when none of the hostRules match. The load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices.\nOnly one of defaultRouteAction or defaultUrlRedirect must be set." - }, - "defaultService": { - "description": "The full or partial URL of the defaultService resource to which traffic is directed if none of the hostRules match. If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set.", - "type": "string" - }, - "defaultUrlRedirect": { - "$ref": "HttpRedirectAction", - "description": "When none of the specified hostRules match, the request is redirected to a URL specified by defaultUrlRedirect.\nIf defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set." - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "fingerprint": { - "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a UrlMap. An up-to-date fingerprint must be provided in order to update the UrlMap, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve a UrlMap.", - "format": "byte", - "type": "string" - }, - "headerAction": { - "$ref": "HttpHeaderAction", - "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nThe headerAction specified here take effect after headerAction specified under pathMatcher." - }, - "hostRules": { - "description": "The list of HostRules to use against the URL.", - "items": { - "$ref": "HostRule" - }, - "type": "array" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "kind": { - "default": "compute#urlMap", - "description": "[Output Only] Type of the resource. Always compute#urlMaps for url maps.", - "type": "string" - }, - "name": { - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "pathMatchers": { - "description": "The list of named PathMatchers to use against the URL.", - "items": { - "$ref": "PathMatcher" - }, - "type": "array" - }, - "region": { - "description": "[Output Only] URL of the region where the regional URL map resides. This field is not applicable to global URL maps. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", - "type": "string" - }, - "tests": { - "description": "The list of expected URL mapping tests. Request to update this UrlMap will succeed only if all of the test cases pass. You can specify a maximum of 100 tests per UrlMap.", - "items": { - "$ref": "UrlMapTest" - }, - "type": "array" - } - }, - "type": "object" - }, - "UrlMapList": { - "description": "Contains a list of UrlMap resources.", - "id": "UrlMapList", - "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "description": "A list of UrlMap resources.", - "items": { - "$ref": "UrlMap" - }, - "type": "array" - }, - "kind": { - "default": "compute#urlMapList", - "description": "Type of resource.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, - "warning": { - "description": "[Output Only] Informational warning message.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ "", "", "", @@ -54387,7 +56879,7 @@ "type": "string" }, "host": { - "description": "Host portion of the URL.", + "description": "Host portion of the URL. If headers contains a host header, then host must also match the header value.", "type": "string" }, "path": { @@ -54395,7 +56887,7 @@ "type": "string" }, "service": { - "description": "Expected BackendService resource the given URL should be mapped to.", + "description": "Expected BackendService or BackendBucket resource the given URL should be mapped to.\nservice cannot be set if expectedRedirectResponseCode is set.", "type": "string" } }, @@ -54456,6 +56948,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -54478,6 +56977,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -54509,6 +57009,7 @@ "", "", "", + "", "" ], "type": "string" @@ -54572,6 +57073,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -54603,6 +57105,7 @@ "", "", "", + "", "" ], "type": "string" @@ -54758,6 +57261,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -54789,6 +57293,7 @@ "", "", "", + "", "" ], "type": "string" @@ -54941,6 +57446,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -54972,6 +57478,7 @@ "", "", "", + "", "" ], "type": "string" @@ -55102,6 +57609,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -55124,6 +57638,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -55155,6 +57670,7 @@ "", "", "", + "", "" ], "type": "string" @@ -55236,6 +57752,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -55267,6 +57784,7 @@ "", "", "", + "", "" ], "type": "string" @@ -55444,6 +57962,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -55475,6 +57994,7 @@ "", "", "", + "", "" ], "type": "string" @@ -55507,7 +58027,7 @@ "type": "object" }, "VpnTunnel": { - "description": "Represents a Cloud VPN Tunnel resource.\n\nFor more information about VPN, read the the Cloud VPN Overview. (== resource_for {$api_version}.vpnTunnels ==)", + "description": "Represents a Cloud VPN Tunnel resource.\n\nFor more information about VPN, read the the Cloud VPN Overview. (== resource_for {$api_version}.vpnTunnels ==)", "id": "VpnTunnel", "properties": { "creationTimestamp": { @@ -55688,6 +58208,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -55710,6 +58237,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -55741,6 +58269,7 @@ "", "", "", + "", "" ], "type": "string" @@ -55822,6 +58351,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -55853,6 +58383,7 @@ "", "", "", + "", "" ], "type": "string" @@ -55916,6 +58447,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -55947,6 +58479,7 @@ "", "", "", + "", "" ], "type": "string" @@ -56022,7 +58555,7 @@ }, "headerAction": { "$ref": "HttpHeaderAction", - "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nheaderAction specified here take effect before headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap." + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nheaderAction specified here take effect before headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap.\nNote that headerAction is not supported for Loadbalancers that have their loadBalancingScheme set to EXTERNAL.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, "weight": { "description": "Specifies the fraction of traffic sent to backendService, computed as weight / (sum of all weightedBackendService weights in routeAction) .\nThe selection of a backend service is determined only for new traffic. Once a user's request has been directed to a backendService, subsequent requests will be sent to the same backendService as determined by the BackendService's session affinity policy.\nThe value must be between 0 and 1000", @@ -56081,6 +58614,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -56112,6 +58646,7 @@ "", "", "", + "", "" ], "type": "string" @@ -56276,6 +58811,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -56307,6 +58843,7 @@ "", "", "", + "", "" ], "type": "string" diff --git a/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go b/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go index 0aa8fbd4147..7a4e2102fb5 100644 --- a/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go @@ -185,6 +185,7 @@ func New(client *http.Client) (*Service, error) { s.RegionHealthChecks = NewRegionHealthChecksService(s) s.RegionInstanceGroupManagers = NewRegionInstanceGroupManagersService(s) s.RegionInstanceGroups = NewRegionInstanceGroupsService(s) + s.RegionNetworkEndpointGroups = NewRegionNetworkEndpointGroupsService(s) s.RegionNotificationEndpoints = NewRegionNotificationEndpointsService(s) s.RegionOperations = NewRegionOperationsService(s) s.RegionSslCertificates = NewRegionSslCertificatesService(s) @@ -201,6 +202,7 @@ func New(client *http.Client) (*Service, error) { s.SslCertificates = NewSslCertificatesService(s) s.SslPolicies = NewSslPoliciesService(s) s.Subnetworks = NewSubnetworksService(s) + s.TargetGrpcProxies = NewTargetGrpcProxiesService(s) s.TargetHttpProxies = NewTargetHttpProxiesService(s) s.TargetHttpsProxies = NewTargetHttpsProxiesService(s) s.TargetInstances = NewTargetInstancesService(s) @@ -315,6 +317,8 @@ type Service struct { RegionInstanceGroups *RegionInstanceGroupsService + RegionNetworkEndpointGroups *RegionNetworkEndpointGroupsService + RegionNotificationEndpoints *RegionNotificationEndpointsService RegionOperations *RegionOperationsService @@ -347,6 +351,8 @@ type Service struct { Subnetworks *SubnetworksService + TargetGrpcProxies *TargetGrpcProxiesService + TargetHttpProxies *TargetHttpProxiesService TargetHttpsProxies *TargetHttpsProxiesService @@ -802,6 +808,15 @@ type RegionInstanceGroupsService struct { s *Service } +func NewRegionNetworkEndpointGroupsService(s *Service) *RegionNetworkEndpointGroupsService { + rs := &RegionNetworkEndpointGroupsService{s: s} + return rs +} + +type RegionNetworkEndpointGroupsService struct { + s *Service +} + func NewRegionNotificationEndpointsService(s *Service) *RegionNotificationEndpointsService { rs := &RegionNotificationEndpointsService{s: s} return rs @@ -946,6 +961,15 @@ type SubnetworksService struct { s *Service } +func NewTargetGrpcProxiesService(s *Service) *TargetGrpcProxiesService { + rs := &TargetGrpcProxiesService{s: s} + return rs +} + +type TargetGrpcProxiesService struct { + s *Service +} + func NewTargetHttpProxiesService(s *Service) *TargetHttpProxiesService { rs := &TargetHttpProxiesService{s: s} return rs @@ -1121,14 +1145,14 @@ type AcceleratorType struct { // compute#acceleratorType for accelerator types. Kind string `json:"kind,omitempty"` - // MaximumCardsPerInstance: [Output Only] Maximum accelerator cards - // allowed per instance. + // MaximumCardsPerInstance: [Output Only] Maximum number of accelerator + // cards allowed per instance. MaximumCardsPerInstance int64 `json:"maximumCardsPerInstance,omitempty"` // Name: [Output Only] Name of the resource. Name string `json:"name,omitempty"` - // SelfLink: [Output Only] Server-defined fully-qualified URL for this + // SelfLink: [Output Only] Server-defined, fully qualified URL for this // resource. SelfLink string `json:"selfLink,omitempty"` @@ -1190,6 +1214,9 @@ type AcceleratorTypeAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *AcceleratorTypeAggregatedListWarning `json:"warning,omitempty"` @@ -1244,6 +1271,7 @@ type AcceleratorTypeAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -1400,6 +1428,7 @@ type AcceleratorTypeListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -1535,6 +1564,7 @@ type AcceleratorTypesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -1899,6 +1929,9 @@ type AddressAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *AddressAggregatedListWarning `json:"warning,omitempty"` @@ -1953,6 +1986,7 @@ type AddressAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -2108,6 +2142,7 @@ type AddressListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -2241,6 +2276,7 @@ type AddressesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -2396,7 +2432,7 @@ func (s *AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk) } // AllocationSpecificSKUAllocationReservedInstanceProperties: Properties -// of the SKU instances being reserved. +// of the SKU instances being reserved. Next ID: 9 type AllocationSpecificSKUAllocationReservedInstanceProperties struct { // GuestAccelerators: Specifies accelerator type and count. GuestAccelerators []*AcceleratorConfig `json:"guestAccelerators,omitempty"` @@ -2673,6 +2709,10 @@ type AttachedDiskInitializeParams struct { // persistent disks. Labels map[string]string `json:"labels,omitempty"` + // MultiWriter: Indicates whether or not the disk can be read/write + // attached to more than one instance. + MultiWriter bool `json:"multiWriter,omitempty"` + // OnUpdateAction: Specifies which action to take on instance update // with this disk. Default is to use the existing disk. // @@ -2784,12 +2824,12 @@ func (s *AttachedDiskInitializeParams) MarshalJSON() ([]byte, error) { // // Example Policy with multiple AuditConfigs: // -// { "audit_configs": [ { "service": "allServices" "audit_log_configs": +// { "audit_configs": [ { "service": "allServices", "audit_log_configs": // [ { "log_type": "DATA_READ", "exempted_members": [ -// "user:jose@example.com" ] }, { "log_type": "DATA_WRITE", }, { -// "log_type": "ADMIN_READ", } ] }, { "service": -// "sampleservice.googleapis.com" "audit_log_configs": [ { "log_type": -// "DATA_READ", }, { "log_type": "DATA_WRITE", "exempted_members": [ +// "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { +// "log_type": "ADMIN_READ" } ] }, { "service": +// "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": +// "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ // "user:aliya@example.com" ] } ] } ] } // // For sampleservice, this policy enables DATA_READ, DATA_WRITE and @@ -2836,7 +2876,7 @@ func (s *AuditConfig) MarshalJSON() ([]byte, error) { // // { "audit_log_configs": [ { "log_type": "DATA_READ", // "exempted_members": [ "user:jose@example.com" ] }, { "log_type": -// "DATA_WRITE", } ] } +// "DATA_WRITE" } ] } // // This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting // jose@example.com from DATA_READ logging. @@ -3067,6 +3107,9 @@ type AutoscalerAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *AutoscalerAggregatedListWarning `json:"warning,omitempty"` @@ -3121,6 +3164,7 @@ type AutoscalerAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -3276,6 +3320,7 @@ type AutoscalerListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -3413,6 +3458,7 @@ type AutoscalerStatusDetails struct { // "MISSING_CUSTOM_METRIC_DATA_POINTS" // "MISSING_LOAD_BALANCING_DATA_POINTS" // "MODE_OFF" + // "MODE_ONLY_SCALE_OUT" // "MODE_ONLY_UP" // "MORE_THAN_ONE_BACKEND_SERVICE" // "NOT_ENOUGH_QUOTA_AVAILABLE" @@ -3502,6 +3548,7 @@ type AutoscalersScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -3625,11 +3672,14 @@ type AutoscalingPolicy struct { // Possible values: // "OFF" // "ON" + // "ONLY_SCALE_OUT" // "ONLY_UP" Mode string `json:"mode,omitempty"` ScaleDownControl *AutoscalingPolicyScaleDownControl `json:"scaleDownControl,omitempty"` + ScaleInControl *AutoscalingPolicyScaleInControl `json:"scaleInControl,omitempty"` + // ForceSendFields is a list of field names (e.g. "CoolDownPeriodSec") // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -3656,6 +3706,21 @@ func (s *AutoscalingPolicy) MarshalJSON() ([]byte, error) { // AutoscalingPolicyCpuUtilization: CPU utilization policy. type AutoscalingPolicyCpuUtilization struct { + // PredictiveMethod: Indicates whether predictive autoscaling based on + // CPU metric is enabled. Valid values are: + // + // * NONE (default). No predictive method is used. The autoscaler scales + // the group to meet current demand based on real-time metrics. * + // OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability + // by monitoring daily and weekly load patterns and scaling out ahead of + // anticipated demand. + // + // Possible values: + // "NONE" + // "OPTIMIZE_AVAILABILITY" + // "PREDICTIVE_METHOD_UNSPECIFIED" + PredictiveMethod string `json:"predictiveMethod,omitempty"` + // UtilizationTarget: The target CPU utilization that the autoscaler // should maintain. Must be a float value in the range (0, 1]. If not // specified, the default is 0.6. @@ -3671,15 +3736,15 @@ type AutoscalingPolicyCpuUtilization struct { // utilization. UtilizationTarget float64 `json:"utilizationTarget,omitempty"` - // ForceSendFields is a list of field names (e.g. "UtilizationTarget") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "PredictiveMethod") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "UtilizationTarget") to + // NullFields is a list of field names (e.g. "PredictiveMethod") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -3775,8 +3840,7 @@ type AutoscalingPolicyCustomMetricUtilization struct { // decrease proportionally to the metric. // // For example, a good metric to use as a utilization_target is - // compute.googleapis.com/instance/network/received_bytes_count. The - // autoscaler will work to keep this value constant for each of the + // https://www.googleapis.com/compute/v1/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the // instances. UtilizationTarget float64 `json:"utilizationTarget,omitempty"` @@ -3918,6 +3982,46 @@ func (s *AutoscalingPolicyScaleDownControl) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AutoscalingPolicyScaleInControl: Configuration that allows for slower +// scale in so that even if Autoscaler recommends an abrupt scale in of +// a MIG, it will be throttled as specified by the parameters below. +type AutoscalingPolicyScaleInControl struct { + // MaxScaledInReplicas: Maximum allowed number (or %) of VMs that can be + // deducted from the peak recommendation during the window autoscaler + // looks at when computing recommendations. Possibly all these VMs can + // be deleted at once so user service needs to be prepared to lose that + // many VMs in one step. + MaxScaledInReplicas *FixedOrPercent `json:"maxScaledInReplicas,omitempty"` + + // TimeWindowSec: How long back autoscaling should look when computing + // recommendations to include directives regarding slower scale in, as + // described above. + TimeWindowSec int64 `json:"timeWindowSec,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaxScaledInReplicas") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MaxScaledInReplicas") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AutoscalingPolicyScaleInControl) MarshalJSON() ([]byte, error) { + type NoMethod AutoscalingPolicyScaleInControl + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Backend: Message containing information of one individual backend. type Backend struct { // BalancingMode: Specifies the balancing mode for the backend. @@ -3966,8 +4070,10 @@ type Backend struct { // capacity (based on UTILIZATION, RATE or CONNECTION). Default value is // 1, which means the group will serve up to 100% of its configured // capacity (depending on balancingMode). A setting of 0 means the group - // is completely drained, offering 0% of its available Capacity. Valid - // range is [0.0,1.0]. + // is completely drained, offering 0% of its available capacity. Valid + // range is 0.0 and [0.1,1.0]. You cannot configure a setting larger + // than 0 and smaller than 0.1. You cannot configure a setting of 0 when + // there is only one backend attached to the backend service. // // This cannot be used for internal load balancing. CapacityScaler float64 `json:"capacityScaler,omitempty"` @@ -4157,6 +4263,10 @@ type BackendBucket struct { // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` + // CustomResponseHeaders: Headers that the HTTP/S load balancer should + // add to proxied responses. + CustomResponseHeaders []string `json:"customResponseHeaders,omitempty"` + // Description: An optional textual description of the resource; // provided by the client when the resource is created. Description string `json:"description,omitempty"` @@ -4213,6 +4323,100 @@ func (s *BackendBucket) MarshalJSON() ([]byte, error) { // BackendBucketCdnPolicy: Message containing Cloud CDN configuration // for a backend bucket. type BackendBucketCdnPolicy struct { + // BypassCacheOnRequestHeaders: Bypass the cache when the specified + // request headers are matched - e.g. Pragma or Authorization headers. + // Up to 5 headers can be specified. The cache is bypassed for all + // cdnPolicy.cacheMode settings. + BypassCacheOnRequestHeaders []*BackendBucketCdnPolicyBypassCacheOnRequestHeader `json:"bypassCacheOnRequestHeaders,omitempty"` + + // CacheMode: Specifies the cache setting for all responses from this + // backend. The possible values are: + // + // USE_ORIGIN_HEADERS Requires the origin to set valid caching headers + // to cache content. Responses without these headers will not be cached + // at Google's edge, and will require a full trip to the origin on every + // request, potentially impacting performance and increasing load on the + // origin server. + // + // FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" + // or "no-cache" directives in Cache-Control response headers. Warning: + // this may result in Cloud CDN caching private, per-user (user + // identifiable) content. + // + // CACHE_ALL_STATIC Automatically cache static content, including common + // image formats, media (video and audio), and web assets (JavaScript + // and CSS). Requests and responses that are marked as uncacheable, as + // well as dynamic content (including HTML), will not be cached. + // + // Possible values: + // "CACHE_ALL_STATIC" + // "FORCE_CACHE_ALL" + // "INVALID_CACHE_MODE" + // "USE_ORIGIN_HEADERS" + CacheMode string `json:"cacheMode,omitempty"` + + // ClientTtl: Specifies a separate client (e.g. browser client) TTL, + // separate from the TTL for Cloud CDN's edge caches. Leaving this empty + // will use the same cache TTL for both Cloud CDN and the client-facing + // response. The maximum allowed value is 86400s (1 day). + ClientTtl int64 `json:"clientTtl,omitempty"` + + // DefaultTtl: Specifies the default TTL for cached content served by + // this origin for responses that do not have an existing valid TTL + // (max-age or s-max-age). Setting a TTL of "0" means "always + // revalidate". The value of defaultTTL cannot be set to a value greater + // than that of maxTTL, but can be equal. When the cacheMode is set to + // FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all + // responses. The maximum allowed value is 31,622,400s (1 year), noting + // that infrequently accessed objects may be evicted from the cache + // before the defined TTL. + DefaultTtl int64 `json:"defaultTtl,omitempty"` + + // MaxTtl: Specifies the maximum allowed TTL for cached content served + // by this origin. Cache directives that attempt to set a max-age or + // s-maxage higher than this, or an Expires header more than maxTTL + // seconds in the future will be capped at the value of maxTTL, as if it + // were the value of an s-maxage Cache-Control directive. Headers sent + // to the client will not be modified. Setting a TTL of "0" means + // "always revalidate". The maximum allowed value is 31,622,400s (1 + // year), noting that infrequently accessed objects may be evicted from + // the cache before the defined TTL. + MaxTtl int64 `json:"maxTtl,omitempty"` + + // NegativeCaching: Negative caching allows per-status code TTLs to be + // set, in order to apply fine-grained caching for common errors or + // redirects. This can reduce the load on your origin and improve + // end-user experience by reducing response latency. By default, Cloud + // CDN will apply the following default TTLs to these status codes: HTTP + // 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m HTTP 404 + // (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s + // HTTP 405 (Method Not Found), 421 (Misdirected Request), 501 (Not + // Implemented): 60s These defaults can be overridden in + // negative_caching_policy + NegativeCaching bool `json:"negativeCaching,omitempty"` + + // NegativeCachingPolicy: Sets a cache TTL for the specified HTTP status + // code. negative_caching must be enabled to configure + // negative_caching_policy. Omitting the policy and leaving + // negative_caching enabled will use Cloud CDN's default cache TTLs. + // Note that when specifying an explicit negative_caching_policy, you + // should take care to specify a cache TTL for all response codes that + // you wish to cache. Cloud CDN will not apply any default negative + // caching when a policy exists. + NegativeCachingPolicy []*BackendBucketCdnPolicyNegativeCachingPolicy `json:"negativeCachingPolicy,omitempty"` + + // ServeWhileStale: Serve existing content from the cache (if available) + // when revalidating content with the origin, or when an error is + // encountered when refreshing the cache. This setting defines the + // default "max-stale" duration for any cached responses that do not + // specify a max-stale directive. Stale responses that exceed the TTL + // configured here will not be served. The default limit (max-stale) is + // 86400s (1 day), which will allow stale content to be served up to + // this limit beyond the max-age (or s-max-age) of a cached response. + // The maximum allowed value is 604800 (1 week). Set this to zero (0) to + // disable serve-while-stale. + ServeWhileStale int64 `json:"serveWhileStale,omitempty"` + // SignedUrlCacheMaxAgeSec: Maximum number of seconds the response to a // signed URL request will be considered fresh. After this time period, // the response will be revalidated before being served. Defaults to 1hr @@ -4228,7 +4432,7 @@ type BackendBucketCdnPolicy struct { SignedUrlKeyNames []string `json:"signedUrlKeyNames,omitempty"` // ForceSendFields is a list of field names (e.g. - // "SignedUrlCacheMaxAgeSec") to unconditionally include in API + // "BypassCacheOnRequestHeaders") to unconditionally include in API // requests. By default, fields with empty values are omitted from API // requests. However, any non-pointer, non-interface field appearing in // ForceSendFields will be sent to the server regardless of whether the @@ -4236,13 +4440,13 @@ type BackendBucketCdnPolicy struct { // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "SignedUrlCacheMaxAgeSec") - // to include in API requests with the JSON null value. By default, - // fields with empty values are omitted from API requests. However, any - // field with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. + // "BypassCacheOnRequestHeaders") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. NullFields []string `json:"-"` } @@ -4252,6 +4456,76 @@ func (s *BackendBucketCdnPolicy) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BackendBucketCdnPolicyBypassCacheOnRequestHeader: Bypass the cache +// when the specified request headers are present, e.g. Pragma or +// Authorization headers. Values are case insensitive. The presence of +// such a header overrides the cache_mode setting. +type BackendBucketCdnPolicyBypassCacheOnRequestHeader struct { + // HeaderName: The header field name to match on when bypassing cache. + // Values are case-insensitive. + HeaderName string `json:"headerName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HeaderName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HeaderName") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendBucketCdnPolicyBypassCacheOnRequestHeader) MarshalJSON() ([]byte, error) { + type NoMethod BackendBucketCdnPolicyBypassCacheOnRequestHeader + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BackendBucketCdnPolicyNegativeCachingPolicy: Specify CDN TTLs for +// response error codes. +type BackendBucketCdnPolicyNegativeCachingPolicy struct { + // Code: The HTTP status code to define a TTL against. Only HTTP status + // codes 300, 301, 308, 404, 405, 410, 421, 451 and 501 are can be + // specified as values, and you cannot specify a status code more than + // once. + Code int64 `json:"code,omitempty"` + + // Ttl: The TTL (in seconds) to cache responses with the corresponding + // status code for. The maximum allowed value is 1800s (30 minutes), + // noting that infrequently accessed objects may be evicted from the + // cache before the defined TTL. + Ttl int64 `json:"ttl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendBucketCdnPolicyNegativeCachingPolicy) MarshalJSON() ([]byte, error) { + type NoMethod BackendBucketCdnPolicyNegativeCachingPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BackendBucketList: Contains a list of BackendBucket resources. type BackendBucketList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -4329,6 +4603,7 @@ type BackendBucketListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -4409,8 +4684,13 @@ func (s *BackendBucketListWarningData) MarshalJSON() ([]byte, error) { // BackendService: Represents a Backend Service resource. // -// A backend service contains configuration values for Google Cloud -// Platform load balancing services. +// A backend service defines how Google Cloud load balancers distribute +// traffic. The backend service configuration contains a set of values, +// such as the protocol used to connect to backends, various +// distribution and session settings, health checks, and timeouts. These +// settings provide fine-grained control over how your load balancer +// behaves. Most of the settings have default values that allow for easy +// configuration if you need to get started quickly. // // Backend services in Google Compute Engine can be either regionally or // globally scoped. @@ -4421,13 +4701,22 @@ func (s *BackendBucketListWarningData) MarshalJSON() ([]byte, error) { // [Regional](/compute/docs/reference/rest/{$api_version}/regionBackendSe // rvices) // -// For more information, read Backend Services. +// For more information, see Backend Services. // // (== resource_for {$api_version}.backendService ==) type BackendService struct { - // AffinityCookieTtlSec: If set to 0, the cookie is non-persistent and - // lasts only until the end of the browser session (or equivalent). The - // maximum allowed value is one day (86,400). + // AffinityCookieTtlSec: Lifetime of cookies in seconds. Only applicable + // if the loadBalancingScheme is EXTERNAL, INTERNAL_SELF_MANAGED, or + // INTERNAL_MANAGED, the protocol is HTTP or HTTPS, and the + // sessionAffinity is GENERATED_COOKIE, or HTTP_COOKIE. + // + // If set to 0, the cookie is non-persistent and lasts only until the + // end of the browser session (or equivalent). The maximum allowed value + // is one day (86,400). + // + // Not supported when the backend service is referenced by a URL map + // that is bound to target gRPC proxy that has validateForProxyless + // field set to true. AffinityCookieTtlSec int64 `json:"affinityCookieTtlSec,omitempty"` // Backends: The list of backends that serve this BackendService. @@ -4446,6 +4735,10 @@ type BackendService struct { // // - A global backend service with the load_balancing_scheme set to // INTERNAL_SELF_MANAGED. + // + // Not supported when the backend service is referenced by a URL map + // that is bound to target gRPC proxy that has validateForProxyless + // field set to true. CircuitBreakers *CircuitBreakers `json:"circuitBreakers,omitempty"` ConnectionDraining *ConnectionDraining `json:"connectionDraining,omitempty"` @@ -4465,6 +4758,10 @@ type BackendService struct { // // - A global backend service with the load_balancing_scheme set to // INTERNAL_SELF_MANAGED. + // + // Not supported when the backend service is referenced by a URL map + // that is bound to target gRPC proxy that has validateForProxyless + // field set to true. ConsistentHash *ConsistentHashLoadBalancerSettings `json:"consistentHash,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text @@ -4475,6 +4772,10 @@ type BackendService struct { // add to proxied requests. CustomRequestHeaders []string `json:"customRequestHeaders,omitempty"` + // CustomResponseHeaders: Headers that the HTTP/S load balancer should + // add to proxied responses. + CustomResponseHeaders []string `json:"customResponseHeaders,omitempty"` + // Description: An optional description of this resource. Provide this // property when you create the resource. Description string `json:"description,omitempty"` @@ -4502,12 +4803,14 @@ type BackendService struct { // HealthChecks: The list of URLs to the healthChecks, httpHealthChecks // (legacy), or httpsHealthChecks (legacy) resource for health checking // this backend service. Not all backend services support legacy health - // checks. See Load balancer guide. Currently at most one health check - // can be specified. Backend services with instance group or zonal NEG - // backends must have a health check. Backend services with internet NEG - // backends must not have a health check. A health check must + // checks. See Load balancer guide. Currently, at most one health check + // can be specified for each backend service. Backend services with + // instance group or zonal NEG backends must have a health check. + // Backend services with internet or serverless NEG backends must not + // have a health check. HealthChecks []string `json:"healthChecks,omitempty"` + // Iap: The configurations for Identity-Aware Proxy on this resource. Iap *BackendServiceIAP `json:"iap,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -4552,8 +4855,7 @@ type BackendService struct { // - MAGLEV: used as a drop in replacement for the ring hash load // balancer. Maglev is not as stable as ring hash but has faster table // lookup build times and host selection times. For more information - // about Maglev, refer to https://ai.google/research/pubs/pub44824 - // + // about Maglev, see https://ai.google/research/pubs/pub44824 // // This field is applicable to either: // - A regional backend service with the service_protocol set to HTTP, @@ -4562,9 +4864,13 @@ type BackendService struct { // - A global backend service with the load_balancing_scheme set to // INTERNAL_SELF_MANAGED. // - // If sessionAffinity is not NONE, and this field is not set to >MAGLEV + // If sessionAffinity is not NONE, and this field is not set to MAGLEV // or RING_HASH, session affinity settings will not take effect. // + // Only the default ROUND_ROBIN policy is supported when the backend + // service is referenced by a URL map that is bound to target gRPC proxy + // that has validateForProxyless field set to true. + // // Possible values: // "INVALID_LB_POLICY" // "LEAST_REQUEST" @@ -4604,6 +4910,10 @@ type BackendService struct { // // - A global backend service with the load_balancing_scheme set to // INTERNAL_SELF_MANAGED. + // + // Not supported when the backend service is referenced by a URL map + // that is bound to target gRPC proxy that has validateForProxyless + // field set to true. OutlierDetection *OutlierDetection `json:"outlierDetection,omitempty"` // Port: Deprecated in favor of portName. The TCP port to connect on the @@ -4629,12 +4939,16 @@ type BackendService struct { // Protocol: The protocol this BackendService uses to communicate with // backends. // - // Possible values are HTTP, HTTPS, HTTP2, TCP, SSL, or UDP. depending - // on the chosen load balancer or Traffic Director configuration. Refer - // to the documentation for the load balancer or for Traffic Director - // for more information. + // Possible values are HTTP, HTTPS, HTTP2, TCP, SSL, UDP or GRPC. + // depending on the chosen load balancer or Traffic Director + // configuration. Refer to the documentation for the load balancer or + // for Traffic Director for more information. + // + // Must be set to GRPC when the backend service is referenced by a URL + // map that is bound to target gRPC proxy. // // Possible values: + // "GRPC" // "HTTP" // "HTTP2" // "HTTPS" @@ -4681,6 +4995,10 @@ type BackendService struct { // INTERNAL_MANAGED, possible values are NONE, CLIENT_IP, // GENERATED_COOKIE, HEADER_FIELD, or HTTP_COOKIE. // + // Not supported when the backend service is referenced by a URL map + // that is bound to target gRPC proxy that has validateForProxyless + // field set to true. + // // Possible values: // "CLIENT_IP" // "CLIENT_IP_PORT_PROTO" @@ -4692,7 +5010,7 @@ type BackendService struct { SessionAffinity string `json:"sessionAffinity,omitempty"` // TimeoutSec: The backend service timeout has a different meaning - // depending on the type of load balancer. For more information read, + // depending on the type of load balancer. For more information see, // Backend service settings The default is 30 seconds. TimeoutSec int64 `json:"timeoutSec,omitempty"` @@ -4749,6 +5067,9 @@ type BackendServiceAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *BackendServiceAggregatedListWarning `json:"warning,omitempty"` @@ -4803,6 +5124,7 @@ type BackendServiceAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -4884,9 +5206,103 @@ func (s *BackendServiceAggregatedListWarningData) MarshalJSON() ([]byte, error) // BackendServiceCdnPolicy: Message containing Cloud CDN configuration // for a backend service. type BackendServiceCdnPolicy struct { + // BypassCacheOnRequestHeaders: Bypass the cache when the specified + // request headers are matched - e.g. Pragma or Authorization headers. + // Up to 5 headers can be specified. The cache is bypassed for all + // cdnPolicy.cacheMode settings. + BypassCacheOnRequestHeaders []*BackendServiceCdnPolicyBypassCacheOnRequestHeader `json:"bypassCacheOnRequestHeaders,omitempty"` + // CacheKeyPolicy: The CacheKeyPolicy for this CdnPolicy. CacheKeyPolicy *CacheKeyPolicy `json:"cacheKeyPolicy,omitempty"` + // CacheMode: Specifies the cache setting for all responses from this + // backend. The possible values are: + // + // USE_ORIGIN_HEADERS Requires the origin to set valid caching headers + // to cache content. Responses without these headers will not be cached + // at Google's edge, and will require a full trip to the origin on every + // request, potentially impacting performance and increasing load on the + // origin server. + // + // FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" + // or "no-cache" directives in Cache-Control response headers. Warning: + // this may result in Cloud CDN caching private, per-user (user + // identifiable) content. + // + // CACHE_ALL_STATIC Automatically cache static content, including common + // image formats, media (video and audio), and web assets (JavaScript + // and CSS). Requests and responses that are marked as uncacheable, as + // well as dynamic content (including HTML), will not be cached. + // + // Possible values: + // "CACHE_ALL_STATIC" + // "FORCE_CACHE_ALL" + // "INVALID_CACHE_MODE" + // "USE_ORIGIN_HEADERS" + CacheMode string `json:"cacheMode,omitempty"` + + // ClientTtl: Specifies a separate client (e.g. browser client) TTL, + // separate from the TTL for Cloud CDN's edge caches. Leaving this empty + // will use the same cache TTL for both Cloud CDN and the client-facing + // response. The maximum allowed value is 86400s (1 day). + ClientTtl int64 `json:"clientTtl,omitempty"` + + // DefaultTtl: Specifies the default TTL for cached content served by + // this origin for responses that do not have an existing valid TTL + // (max-age or s-max-age). Setting a TTL of "0" means "always + // revalidate". The value of defaultTTL cannot be set to a value greater + // than that of maxTTL, but can be equal. When the cacheMode is set to + // FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all + // responses. The maximum allowed value is 31,622,400s (1 year), noting + // that infrequently accessed objects may be evicted from the cache + // before the defined TTL. + DefaultTtl int64 `json:"defaultTtl,omitempty"` + + // MaxTtl: Specifies the maximum allowed TTL for cached content served + // by this origin. Cache directives that attempt to set a max-age or + // s-maxage higher than this, or an Expires header more than maxTTL + // seconds in the future will be capped at the value of maxTTL, as if it + // were the value of an s-maxage Cache-Control directive. Headers sent + // to the client will not be modified. Setting a TTL of "0" means + // "always revalidate". The maximum allowed value is 31,622,400s (1 + // year), noting that infrequently accessed objects may be evicted from + // the cache before the defined TTL. + MaxTtl int64 `json:"maxTtl,omitempty"` + + // NegativeCaching: Negative caching allows per-status code TTLs to be + // set, in order to apply fine-grained caching for common errors or + // redirects. This can reduce the load on your origin and improve + // end-user experience by reducing response latency. By default, Cloud + // CDN will apply the following default TTLs to these status codes: HTTP + // 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m HTTP 404 + // (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s + // HTTP 405 (Method Not Found), 421 (Misdirected Request), 501 (Not + // Implemented): 60s These defaults can be overridden in + // negative_caching_policy + NegativeCaching bool `json:"negativeCaching,omitempty"` + + // NegativeCachingPolicy: Sets a cache TTL for the specified HTTP status + // code. negative_caching must be enabled to configure + // negative_caching_policy. Omitting the policy and leaving + // negative_caching enabled will use Cloud CDN's default cache TTLs. + // Note that when specifying an explicit negative_caching_policy, you + // should take care to specify a cache TTL for all response codes that + // you wish to cache. Cloud CDN will not apply any default negative + // caching when a policy exists. + NegativeCachingPolicy []*BackendServiceCdnPolicyNegativeCachingPolicy `json:"negativeCachingPolicy,omitempty"` + + // ServeWhileStale: Serve existing content from the cache (if available) + // when revalidating content with the origin, or when an error is + // encountered when refreshing the cache. This setting defines the + // default "max-stale" duration for any cached responses that do not + // specify a max-stale directive. Stale responses that exceed the TTL + // configured here will not be served. The default limit (max-stale) is + // 86400s (1 day), which will allow stale content to be served up to + // this limit beyond the max-age (or s-max-age) of a cached response. + // The maximum allowed value is 604800 (1 week). Set this to zero (0) to + // disable serve-while-stale. + ServeWhileStale int64 `json:"serveWhileStale,omitempty"` + // SignedUrlCacheMaxAgeSec: Maximum number of seconds the response to a // signed URL request will be considered fresh. After this time period, // the response will be revalidated before being served. Defaults to 1hr @@ -4901,7 +5317,41 @@ type BackendServiceCdnPolicy struct { // request URLs. SignedUrlKeyNames []string `json:"signedUrlKeyNames,omitempty"` - // ForceSendFields is a list of field names (e.g. "CacheKeyPolicy") to + // ForceSendFields is a list of field names (e.g. + // "BypassCacheOnRequestHeaders") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "BypassCacheOnRequestHeaders") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceCdnPolicy) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceCdnPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BackendServiceCdnPolicyBypassCacheOnRequestHeader: Bypass the cache +// when the specified request headers are present, e.g. Pragma or +// Authorization headers. Values are case insensitive. The presence of +// such a header overrides the cache_mode setting. +type BackendServiceCdnPolicyBypassCacheOnRequestHeader struct { + // HeaderName: The header field name to match on when bypassing cache. + // Values are case-insensitive. + HeaderName string `json:"headerName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HeaderName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -4909,18 +5359,55 @@ type BackendServiceCdnPolicy struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CacheKeyPolicy") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "HeaderName") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *BackendServiceCdnPolicy) MarshalJSON() ([]byte, error) { - type NoMethod BackendServiceCdnPolicy +func (s *BackendServiceCdnPolicyBypassCacheOnRequestHeader) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceCdnPolicyBypassCacheOnRequestHeader + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BackendServiceCdnPolicyNegativeCachingPolicy: Specify CDN TTLs for +// response error codes. +type BackendServiceCdnPolicyNegativeCachingPolicy struct { + // Code: The HTTP status code to define a TTL against. Only HTTP status + // codes 300, 301, 308, 404, 405, 410, 421, 451 and 501 are can be + // specified as values, and you cannot specify a status code more than + // once. + Code int64 `json:"code,omitempty"` + + // Ttl: The TTL (in seconds) to cache responses with the corresponding + // status code for. The maximum allowed value is 1800s (30 minutes), + // noting that infrequently accessed objects may be evicted from the + // cache before the defined TTL. + Ttl int64 `json:"ttl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceCdnPolicyNegativeCachingPolicy) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceCdnPolicyNegativeCachingPolicy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5041,10 +5528,18 @@ func (s *BackendServiceGroupHealth) MarshalJSON() ([]byte, error) { // BackendServiceIAP: Identity-Aware Proxy type BackendServiceIAP struct { + // Enabled: Whether the serving infrastructure will authenticate and + // authorize all incoming requests. If true, the oauth2ClientId and + // oauth2ClientSecret fields must be non-empty. Enabled bool `json:"enabled,omitempty"` + // Oauth2ClientId: OAuth2 client ID to use for the authentication flow. Oauth2ClientId string `json:"oauth2ClientId,omitempty"` + // Oauth2ClientSecret: OAuth2 client secret to use for the + // authentication flow. For security reasons, this value cannot be + // retrieved via the API. Instead, the SHA-256 hash of the value is + // returned in the oauth2ClientSecretSha256 field. Oauth2ClientSecret string `json:"oauth2ClientSecret,omitempty"` // Oauth2ClientSecretSha256: [Output Only] SHA256 hash value for the @@ -5152,6 +5647,7 @@ type BackendServiceListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -5364,6 +5860,7 @@ type BackendServicesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -5690,6 +6187,10 @@ func (s *BfdStatusPacketCounts) MarshalJSON() ([]byte, error) { // Binding: Associates `members` with a `role`. type Binding struct { + // BindingId: A client-specified ID for this binding. Expected to be + // globally unique to support the internal bindings-by-ID API. + BindingId string `json:"bindingId,omitempty"` + // Condition: The condition that is associated with this binding. // // If the condition evaluates to `true`, then this binding applies to @@ -5758,7 +6259,7 @@ type Binding struct { // `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `json:"role,omitempty"` - // ForceSendFields is a list of field names (e.g. "Condition") to + // ForceSendFields is a list of field names (e.g. "BindingId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -5766,7 +6267,7 @@ type Binding struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Condition") to include in + // NullFields is a list of field names (e.g. "BindingId") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -5924,6 +6425,18 @@ func (s *CircuitBreakers) MarshalJSON() ([]byte, error) { // discounted rates. For full details, read Signing Up for Committed Use // Discounts. (== resource_for {$api_version}.regionCommitments ==) type Commitment struct { + // Category: The category of the commitment. Category MACHINE specifies + // commitments composed of machine resources such as VCPU or MEMORY, + // listed in resources. Category LICENSE specifies commitments composed + // of software licenses, listed in licenseResources. Note that only + // MACHINE commitments should have a Type specified. + // + // Possible values: + // "CATEGORY_UNSPECIFIED" + // "LICENSE" + // "MACHINE" + Category string `json:"category,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -5944,6 +6457,10 @@ type Commitment struct { // for commitments. Kind string `json:"kind,omitempty"` + // LicenseResource: The license specification required as part of a + // license commitment. + LicenseResource *LicenseResourceCommitment `json:"licenseResource,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -5998,9 +6515,12 @@ type Commitment struct { // Type: The type of commitment, which affects the discount rate and the // eligible resources. Type MEMORY_OPTIMIZED specifies a commitment that - // will only apply to memory optimized machines. + // will only apply to memory optimized machines. Type + // ACCELERATOR_OPTIMIZED specifies a commitment that will only apply to + // accelerator optimized machines. // // Possible values: + // "ACCELERATOR_OPTIMIZED" // "COMPUTE_OPTIMIZED" // "GENERAL_PURPOSE" // "GENERAL_PURPOSE_E2" @@ -6014,21 +6534,20 @@ type Commitment struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "Category") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Category") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } @@ -6061,6 +6580,9 @@ type CommitmentAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *CommitmentAggregatedListWarning `json:"warning,omitempty"` @@ -6115,6 +6637,7 @@ type CommitmentAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -6270,6 +6793,7 @@ type CommitmentListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -6404,6 +6928,7 @@ type CommitmentsScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -6491,6 +7016,7 @@ type Condition struct { // "ATTRIBUTION" // "AUTHORITY" // "CREDENTIALS_TYPE" + // "CREDS_ASSERTION" // "JUSTIFICATION_TYPE" // "NO_ATTR" // "SECURITY_REALM" @@ -6759,7 +7285,6 @@ func (s *CorsPolicy) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// CustomerEncryptionKey: Represents a customer-supplied encryption key type CustomerEncryptionKey struct { // KmsKeyName: The name of the encryption key that is stored in Google // Cloud KMS. @@ -6971,6 +7496,15 @@ type Disk struct { // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` + // Interface: Specifies the disk interface to use for attaching this + // disk, which is either SCSI or NVME. The default is SCSI. + // + // Possible values: + // "NVME" + // "SCSI" + // "UNSPECIFIED" + Interface string `json:"interface,omitempty"` + // Kind: [Output Only] Type of the resource. Always compute#disk for // disks. Kind string `json:"kind,omitempty"` @@ -7007,6 +7541,10 @@ type Disk struct { // use. Licenses []string `json:"licenses,omitempty"` + // MultiWriter: Indicates whether or not the disk can be read/write + // attached to more than one instance. + MultiWriter bool `json:"multiWriter,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -7020,10 +7558,10 @@ type Disk struct { Options string `json:"options,omitempty"` // PhysicalBlockSizeBytes: Physical block size of the persistent disk, - // in bytes. If not present in a request, a default value is used. - // Currently supported sizes are 4096 and 16384, other sizes may be - // added in the future. If an unsupported value is requested, the error - // message will list the supported values for the caller's project. + // in bytes. If not present in a request, a default value is used. The + // currently supported size is 4096, other sizes may be added in the + // future. If an unsupported value is requested, the error message will + // list the supported values for the caller's project. PhysicalBlockSizeBytes int64 `json:"physicalBlockSizeBytes,omitempty,string"` // Region: [Output Only] URL of the region where the disk resides. Only @@ -7044,17 +7582,33 @@ type Disk struct { // resource. SelfLink string `json:"selfLink,omitempty"` - // SizeGb: Size of the persistent disk, specified in GB. You can specify - // this field when creating a persistent disk using the sourceImage or - // sourceSnapshot parameter, or specify it alone to create an empty - // persistent disk. + // SizeGb: Size, in GB, of the persistent disk. You can specify this + // field when creating a persistent disk using the sourceImage, + // sourceSnapshot, or sourceDisk parameter, or specify it alone to + // create an empty persistent disk. // - // If you specify this field along with sourceImage or sourceSnapshot, - // the value of sizeGb must not be less than the size of the sourceImage - // or the size of the snapshot. Acceptable values are 1 to 65536, - // inclusive. + // If you specify this field along with a source, the value of sizeGb + // must not be less than the size of the source. Acceptable values are 1 + // to 65536, inclusive. SizeGb int64 `json:"sizeGb,omitempty,string"` + // SourceDisk: The source disk used to create this disk. You can provide + // this as a partial or full URL to the resource. For example, the + // following are valid values: + // - + // https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk + // - projects/project/zones/zone/disks/disk + // - zones/zone/disks/disk + SourceDisk string `json:"sourceDisk,omitempty"` + + // SourceDiskId: [Output Only] The unique ID of the disk used to create + // this disk. This value identifies the exact disk that was used to + // create this persistent disk. For example, if you created the + // persistent disk from a disk that was later deleted and recreated + // under the same name, the source disk ID would identify the exact + // version of the disk that was used. + SourceDiskId string `json:"sourceDiskId,omitempty"` + // SourceImage: The source image used to create this disk. If the source // image is deleted, this field will not be set. // @@ -7120,6 +7674,12 @@ type Disk struct { // version of the snapshot that was used. SourceSnapshotId string `json:"sourceSnapshotId,omitempty"` + // SourceStorageObject: The full Google Cloud Storage URI where the disk + // image is stored. This file must be a gzip-compressed tarball whose + // name ends in .tar.gz or virtual machine disk whose name ends in vmdk. + // Valid URIs may start with gs:// or https://storage.googleapis.com/. + SourceStorageObject string `json:"sourceStorageObject,omitempty"` + // Status: [Output Only] The status of disk creation. CREATING: Disk is // provisioning. RESTORING: Source data is being copied into the disk. // FAILED: Disk creation failed. READY: Disk is ready for use. DELETING: @@ -7142,7 +7702,7 @@ type Disk struct { // Type: URL of the disk type resource describing which disk type to use // to create the disk. Provide this when creating the disk. For example: - // projects/project/zones/zone/diskTypes/pd-standard or pd-ssd + // projects/project/zones/zone/diskTypes/pd-standard or pd-ssd Type string `json:"type,omitempty"` // Users: [Output Only] Links to the users of the disk (attached @@ -7205,6 +7765,9 @@ type DiskAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *DiskAggregatedListWarning `json:"warning,omitempty"` @@ -7259,6 +7822,7 @@ type DiskAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -7479,6 +8043,7 @@ type DiskListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -7715,6 +8280,9 @@ type DiskTypeAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *DiskTypeAggregatedListWarning `json:"warning,omitempty"` @@ -7769,6 +8337,7 @@ type DiskTypeAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -7924,6 +8493,7 @@ type DiskTypeListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -8058,6 +8628,7 @@ type DiskTypesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -8276,6 +8847,7 @@ type DisksScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -8604,6 +9176,7 @@ type ExchangedPeeringRoutesListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -8975,6 +9548,7 @@ type ExternalVpnGatewayListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -9136,7 +9710,7 @@ type Firewall struct { // EnableLogging: Deprecated in favor of enable in LogConfig. This field // denotes whether to enable logging for a particular firewall rule. If - // logging is enabled, logs will be exported to Stackdriver. + // logging is enabled, logs will be exported t Cloud Logging. EnableLogging bool `json:"enableLogging,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -9148,8 +9722,8 @@ type Firewall struct { Kind string `json:"kind,omitempty"` // LogConfig: This field denotes the logging options for a particular - // firewall rule. If logging is enabled, logs will be exported to - // Stackdriver. + // firewall rule. If logging is enabled, logs will be exported to Cloud + // Logging. LogConfig *FirewallLogConfig `json:"logConfig,omitempty"` // Name: Name of the resource; provided by the client when the resource @@ -9421,6 +9995,7 @@ type FirewallListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -9621,6 +10196,9 @@ type ForwardingRule struct { // refer to [IP address // specifications](/load-balancing/docs/forwarding-rule-concepts#ip_addre // ss_specifications). + // + // Must be set to `0.0.0.0` when the target is targetGrpcProxy that has + // validateForProxyless field set to true. IPAddress string `json:"IPAddress,omitempty"` // IPProtocol: The IP protocol to which this rule applies. For protocol @@ -9759,7 +10337,7 @@ type ForwardingRule struct { // MetadataFilters: Opaque filter criteria used by Loadbalancer to // restrict routing configuration to a limited set of xDS compliant // clients. In their xDS requests to Loadbalancer, xDS clients present - // node metadata. If a match takes place, the relevant configuration is + // node metadata. When there is a match, the relevant configuration is // made available to those proxies. Otherwise, all the resources (e.g. // TargetHttpProxy, UrlMap) referenced by the ForwardingRule will not be // visible to those proxies. @@ -9767,8 +10345,9 @@ type ForwardingRule struct { // set to MATCH_ANY, at least one of the filterLabels must match the // corresponding label provided in the metadata. If its // filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels - // must match with corresponding labels provided in the - // metadata. + // must match with corresponding labels provided in the metadata. If + // multiple metadataFilters are specified, all of them need to be + // satisfied in order to be considered a match. // metadataFilters specified here will be applifed before those // specified in the UrlMap that this ForwardingRule // references. @@ -9787,10 +10366,9 @@ type ForwardingRule struct { // Network: This field is not used for external load balancing. // - // For INTERNAL and INTERNAL_SELF_MANAGED load balancing, this field - // identifies the network that the load balanced IP should belong to for - // this Forwarding Rule. If this field is not specified, the default - // network will be used. + // For internal load balancing, this field identifies the network that + // the load balanced IP should belong to for this Forwarding Rule. If + // this field is not specified, the default network will be used. Network string `json:"network,omitempty"` // NetworkTier: This signifies the networking tier used for configuring @@ -9815,7 +10393,8 @@ type ForwardingRule struct { // or a target pool. Do not use with a forwarding rule that points to a // backend service. This field is used along with the target field for // TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, - // TargetVpnGateway, TargetPool, TargetInstance. + // TargetGrpcProxy, TargetVpnGateway, TargetPool, + // TargetInstance. // // Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets // addressed to ports in the specified range will be forwarded to @@ -9826,6 +10405,7 @@ type ForwardingRule struct { // ports: // - TargetHttpProxy: 80, 8080 // - TargetHttpsProxy: 443 + // - TargetGrpcProxy: Any ports // - TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, // 995, 1688, 1883, 5222 // - TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, @@ -9858,6 +10438,11 @@ type ForwardingRule struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // ServiceDirectoryRegistrations: Service Directory resources to + // register this forwarding rule with. Currently, only supports a single + // Service Directory resource. + ServiceDirectoryRegistrations []*ForwardingRuleServiceDirectoryRegistration `json:"serviceDirectoryRegistrations,omitempty"` + // ServiceLabel: An optional prefix to the service name for this // Forwarding Rule. If specified, the prefix is the first label of the // fully qualified service name. @@ -9878,7 +10463,7 @@ type ForwardingRule struct { // This field is only used for internal load balancing. ServiceName string `json:"serviceName,omitempty"` - // Subnetwork: This field is only used for INTERNAL load balancing. + // Subnetwork: This field is only used for internal load balancing. // // For internal load balancing, this field identifies the subnetwork // that the load balanced IP should belong to for this Forwarding @@ -9890,12 +10475,13 @@ type ForwardingRule struct { Subnetwork string `json:"subnetwork,omitempty"` // Target: The URL of the target resource to receive the matched - // traffic. For regional forwarding rules, this target must live in the + // traffic. For regional forwarding rules, this target must be in the // same region as the forwarding rule. For global forwarding rules, this // target must be a global load balancing resource. The forwarded - // traffic must be of a type appropriate to the target object. For - // INTERNAL_SELF_MANAGED load balancing, only targetHttpProxy is valid, - // not targetHttpsProxy. + // traffic must be of a type appropriate to the target object. For more + // information, see the "Target" column in [Port + // specifications](/load-balancing/docs/forwarding-rule-concepts#ip_addre + // ss_specifications). Target string `json:"target,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -9948,6 +10534,9 @@ type ForwardingRuleAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *ForwardingRuleAggregatedListWarning `json:"warning,omitempty"` @@ -10002,6 +10591,7 @@ type ForwardingRuleAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -10157,6 +10747,7 @@ type ForwardingRuleListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -10262,6 +10853,42 @@ func (s *ForwardingRuleReference) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ForwardingRuleServiceDirectoryRegistration: Describes the +// auto-registration of the Forwarding Rule to Service Directory. The +// region and project of the Service Directory resource generated from +// this registration will be the same as this Forwarding Rule. +type ForwardingRuleServiceDirectoryRegistration struct { + // Namespace: Service Directory namespace to register the forwarding + // rule under. + Namespace string `json:"namespace,omitempty"` + + // Service: Service Directory service to register the forwarding rule + // under. + Service string `json:"service,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Namespace") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Namespace") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ForwardingRuleServiceDirectoryRegistration) MarshalJSON() ([]byte, error) { + type NoMethod ForwardingRuleServiceDirectoryRegistration + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type ForwardingRulesScopedList struct { // ForwardingRules: A list of forwarding rules contained in this scope. ForwardingRules []*ForwardingRule `json:"forwardingRules,omitempty"` @@ -10318,6 +10945,7 @@ type ForwardingRulesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -10396,6 +11024,73 @@ func (s *ForwardingRulesScopedListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type GRPCHealthCheck struct { + // GrpcServiceName: The gRPC service name for the health check. This + // field is optional. The value of grpc_service_name has the following + // meanings by convention: + // - Empty service_name means the overall status of all services at the + // backend. + // - Non-empty service_name means the health of that gRPC service, as + // defined by the owner of the service. + // The grpc_service_name can only be ASCII. + GrpcServiceName string `json:"grpcServiceName,omitempty"` + + // Port: The port number for the health check request. Must be specified + // if port_name and port_specification are not set or if + // port_specification is USE_FIXED_PORT. Valid values are 1 through + // 65535. + Port int64 `json:"port,omitempty"` + + // PortName: Port name as defined in InstanceGroup#NamedPort#name. If + // both port and port_name are defined, port takes precedence. The + // port_name should conform to RFC1035. + PortName string `json:"portName,omitempty"` + + // PortSpecification: Specifies how port is selected for health + // checking, can be one of following values: + // USE_FIXED_PORT: The port number in port is used for health + // checking. + // USE_NAMED_PORT: The portName is used for health + // checking. + // USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for + // each network endpoint is used for health checking. For other + // backends, the port or named port specified in the Backend Service is + // used for health checking. + // + // + // If not specified, gRPC health check follows behavior specified in + // port and portName fields. + // + // Possible values: + // "USE_FIXED_PORT" + // "USE_NAMED_PORT" + // "USE_SERVING_PORT" + PortSpecification string `json:"portSpecification,omitempty"` + + // ForceSendFields is a list of field names (e.g. "GrpcServiceName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "GrpcServiceName") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GRPCHealthCheck) MarshalJSON() ([]byte, error) { + type NoMethod GRPCHealthCheck + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type GlobalNetworkEndpointGroupsAttachEndpointsRequest struct { // NetworkEndpoints: The list of network endpoints to be attached. NetworkEndpoints []*NetworkEndpoint `json:"networkEndpoints,omitempty"` @@ -10655,6 +11350,7 @@ type GuestOsFeature struct { // "GVNIC" // "MULTI_IP_SUBNET" // "SECURE_BOOT" + // "SEV_CAPABLE" // "UEFI_COMPATIBLE" // "VIRTIO_SCSI_MULTIQUEUE" // "WINDOWS" @@ -10920,10 +11616,11 @@ func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { // [Regional](/compute/docs/reference/rest/{$api_version}/regionHealthChe // cks) // -// Internal HTTP(S) load balancers use regional health checks. All other -// types of GCP load balancers and managed instance group auto-healing -// use global health checks. For more information, read Health Check -// Concepts. +// Internal HTTP(S) load balancers must use regional health checks. +// Internal TCP/UDP load balancers can use either regional or global +// health checks. All other types of GCP load balancers and managed +// instance group auto-healing must use global health checks. For more +// information, read Health Check Concepts. // // To perform health checks on network load balancers, you must use // either httpHealthChecks or httpsHealthChecks. @@ -10940,6 +11637,8 @@ type HealthCheck struct { // property when you create the resource. Description string `json:"description,omitempty"` + GrpcHealthCheck *GRPCHealthCheck `json:"grpcHealthCheck,omitempty"` + // HealthyThreshold: A so-far unhealthy instance will be marked healthy // after this many consecutive successes. The default value is 2. HealthyThreshold int64 `json:"healthyThreshold,omitempty"` @@ -10991,6 +11690,7 @@ type HealthCheck struct { // must match type field. // // Possible values: + // "GRPC" // "HTTP" // "HTTP2" // "HTTPS" @@ -11108,6 +11808,7 @@ type HealthCheckListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -11295,15 +11996,22 @@ type HealthCheckService struct { // "NO_AGGREGATION" HealthStatusAggregationPolicy string `json:"healthStatusAggregationPolicy,omitempty"` - // HealthStatusAggregationStrategy: Policy for how the results from - // multiple health checks for the same endpoint are aggregated. + // HealthStatusAggregationStrategy: This field is deprecated. Use + // health_status_aggregation_policy instead. + // + // Policy for how the results from multiple health checks for the same + // endpoint are aggregated. // - NO_AGGREGATION. An EndpointHealth message is returned for each // backend in the health check service. // - AND. If any backend's health check reports UNHEALTHY, then // UNHEALTHY is the HealthState of the entire health check service. If // all backend's are healthy, the HealthState of the health check // service is HEALTHY. . - HealthStatusAggregationStrategy interface{} `json:"healthStatusAggregationStrategy,omitempty"` + // + // Possible values: + // "AND" + // "NO_AGGREGATION" + HealthStatusAggregationStrategy string `json:"healthStatusAggregationStrategy,omitempty"` // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. @@ -11485,6 +12193,7 @@ type HealthCheckServicesListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -11585,6 +12294,9 @@ type HealthChecksAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *HealthChecksAggregatedListWarning `json:"warning,omitempty"` @@ -11639,6 +12351,7 @@ type HealthChecksAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -11772,6 +12485,7 @@ type HealthChecksScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -11955,7 +12669,10 @@ type HostRule struct { // Hosts: The list of host patterns to match. They must be valid // hostnames with optional port numbers in the format host:port. * // matches any string of ([a-z0-9-.]*). In that case, * must be the - // first character and must be followed in the pattern by either - or .. + // first character and must be followed in the pattern by either - or + // .. + // * based matching is not supported when the URL map is bound to target + // gRPC proxy that has validateForProxyless field set to true. Hosts []string `json:"hosts,omitempty"` // PathMatcher: The name of the PathMatcher to use to match the path @@ -12124,6 +12841,49 @@ func (s *HttpFaultInjection) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// HttpFilterConfig: HttpFilterConfiguration supplies additional +// contextual settings for networkservices.HttpFilter resources enabled +// by Traffic Director. +type HttpFilterConfig struct { + // Config: The configuration needed to enable the + // networkservices.HttpFilter resource. The configuration must be YAML + // formatted and only contain fields defined in the protobuf identified + // in configTypeUrl + Config string `json:"config,omitempty"` + + // ConfigTypeUrl: The fully qualified versioned proto3 type url of the + // protobuf that the filter expects for its contextual settings, for + // example: type.googleapis.com/google.protobuf.Struct + ConfigTypeUrl string `json:"configTypeUrl,omitempty"` + + // FilterName: Name of the networkservices.HttpFilter resource this + // configuration belongs to. This name must be known to the xDS client. + // Example: envoy.wasm + FilterName string `json:"filterName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Config") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Config") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpFilterConfig) MarshalJSON() ([]byte, error) { + type NoMethod HttpFilterConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // HttpHeaderAction: The request and response header transformations // that take effect before the request is passed along to the selected // backendService. @@ -12182,6 +12942,15 @@ type HttpHeaderMatch struct { // For matching against the HTTP request's authority, use a headerMatch // with the header name ":authority". // For matching a request's method, use the headerName ":method". + // When the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true, only non-binary + // user-specified custom metadata and the `content-type` header are + // supported. The following transport-level headers cannot be used in + // header matching rules: `:authority`, `:method`, `:path`, `:scheme`, + // `user-agent`, `accept-encoding`, `content-encoding`, + // `grpc-accept-encoding`, `grpc-encoding`, + // `grpc-previous-rpc-attempts`, `grpc-tags-bin`, `grpc-timeout` and + // `grpc-trace-bin. HeaderName string `json:"headerName,omitempty"` // InvertMatch: If set to false, the headerMatch is considered a match @@ -12466,6 +13235,7 @@ type HttpHealthCheckListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -12741,6 +13511,7 @@ type HttpRouteAction struct { // CorsPolicy: The specification for allowing client side cross-origin // requests. Please see W3C Recommendation for Cross Origin Resource // Sharing + // Not supported when the URL map is bound to target gRPC proxy. CorsPolicy *CorsPolicy `json:"corsPolicy,omitempty"` // FaultInjectionPolicy: The specification for fault injection @@ -12752,6 +13523,8 @@ type HttpRouteAction struct { // aborted by the Loadbalancer for a percentage of requests. // timeout and retry_policy will be ignored by clients that are // configured with a fault_injection_policy. + // Not supported when the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true. FaultInjectionPolicy *HttpFaultInjection `json:"faultInjectionPolicy,omitempty"` // RequestMirrorPolicy: Specifies the policy on how requests intended @@ -12759,9 +13532,14 @@ type HttpRouteAction struct { // service. Loadbalancer does not wait for responses from the shadow // service. Prior to sending traffic to the shadow service, the host / // authority header is suffixed with -shadow. + // Not supported when the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true. RequestMirrorPolicy *RequestMirrorPolicy `json:"requestMirrorPolicy,omitempty"` - // RetryPolicy: Specifies the retry policy associated with this route. + // RetryPolicy: Specifies the retry policy associated with this + // route. + // Not supported when the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true. RetryPolicy *HttpRetryPolicy `json:"retryPolicy,omitempty"` // Timeout: Specifies the timeout for the selected route. Timeout is @@ -12770,20 +13548,26 @@ type HttpRouteAction struct { // Timeout includes all retries. // If not specified, will use the largest timeout among all backend // services associated with the route. + // Not supported when the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true. Timeout *Duration `json:"timeout,omitempty"` // UrlRewrite: The spec to modify the URL of the request, prior to // forwarding the request to the matched service. + // urlRewrite is the only action supported in UrlMaps for external + // HTTP(S) load balancers. + // Not supported when the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true. UrlRewrite *UrlRewrite `json:"urlRewrite,omitempty"` // WeightedBackendServices: A list of weighted backend services to send // traffic to when a route match occurs. The weights determine the // fraction of traffic that flows to their corresponding backend // service. If all traffic needs to go to a single backend service, - // there must be one weightedBackendService with weight set to a non 0 - // number. + // there must be one weightedBackendService with weight set to a + // non-zero number. // Once a backendService is identified and before forwarding the request - // to the backend service, advanced routing actions like Url rewrites + // to the backend service, advanced routing actions such as URL rewrites // and header transformations are applied depending on additional // settings specified in this HttpRouteAction. WeightedBackendServices []*WeightedBackendService `json:"weightedBackendServices,omitempty"` @@ -12826,8 +13610,38 @@ type HttpRouteRule struct { // pathMatchers[].headerAction and after // pathMatchers[].routeRules[].routeAction.weightedBackendService.backend // ServiceWeightAction[].headerAction + // Note that headerAction is not supported for Loadbalancers that have + // their loadBalancingScheme set to EXTERNAL. + // Not supported when the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true. HeaderAction *HttpHeaderAction `json:"headerAction,omitempty"` + // HttpFilterConfigs: Outbound route specific configuration for + // networkservices.HttpFilter resources enabled by Traffic Director. + // httpFilterConfigs only applies for Loadbalancers with + // loadBalancingScheme set to INTERNAL_SELF_MANAGED. See ForwardingRule + // for more details. + // Not supported when the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true. + HttpFilterConfigs []*HttpFilterConfig `json:"httpFilterConfigs,omitempty"` + + // HttpFilterMetadata: Outbound route specific metadata supplied to + // networkservices.HttpFilter resources enabled by Traffic Director. + // httpFilterMetadata only applies for Loadbalancers with + // loadBalancingScheme set to INTERNAL_SELF_MANAGED. See ForwardingRule + // for more details. + // The only configTypeUrl supported is + // type.googleapis.com/google.protobuf.Struct + // Not supported when the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true. + HttpFilterMetadata []*HttpFilterConfig `json:"httpFilterMetadata,omitempty"` + + // MatchRules: The list of criteria for matching attributes of a request + // to this routeRule. This list has OR semantics: the request matches + // this routeRule when any of the matchRules are satisfied. However + // predicates within a given matchRule have AND semantics. All + // predicates within a matchRule must match for the request to match the + // rule. MatchRules []*HttpRouteRuleMatch `json:"matchRules,omitempty"` // Priority: For routeRules within a given pathMatcher, priority @@ -12854,6 +13668,8 @@ type HttpRouteRule struct { // cannot contain any weightedBackendServices. // Only one of urlRedirect, service or // routeAction.weightedBackendService must be set. + // UrlMaps for external HTTP(S) load balancers support only the + // urlRewrite action within a routeRule's routeAction. RouteAction *HttpRouteAction `json:"routeAction,omitempty"` // Service: The full or partial URL of the backend service resource to @@ -12869,7 +13685,9 @@ type HttpRouteRule struct { // UrlRedirect: When this rule is matched, the request is redirected to // a URL specified by urlRedirect. - // If urlRedirect is specified, service or routeAction must not be set. + // If urlRedirect is specified, service or routeAction must not be + // set. + // Not supported when the URL map is bound to target gRPC proxy. UrlRedirect *HttpRedirectAction `json:"urlRedirect,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -12916,24 +13734,28 @@ type HttpRouteRuleMatch struct { // case sensitive. // The default value is false. // ignoreCase must not be used with regexMatch. + // Not supported when the URL map is bound to target gRPC proxy. IgnoreCase bool `json:"ignoreCase,omitempty"` // MetadataFilters: Opaque filter criteria used by Loadbalancer to // restrict routing configuration to a limited set of xDS compliant // clients. In their xDS requests to Loadbalancer, xDS clients present - // node metadata. If a match takes place, the relevant routing + // node metadata. When there is a match, the relevant routing // configuration is made available to those proxies. // For each metadataFilter in this list, if its filterMatchCriteria is // set to MATCH_ANY, at least one of the filterLabels must match the // corresponding label provided in the metadata. If its // filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels - // must match with corresponding labels provided in the - // metadata. + // must match with corresponding labels provided in the metadata. If + // multiple metadataFilters are specified, all of them need to be + // satisfied in order to be considered a match. // metadataFilters specified here will be applied after those specified // in ForwardingRule that refers to the UrlMap this HttpRouteRuleMatch // belongs to. // metadataFilters only applies to Loadbalancers that have their // loadBalancingScheme set to INTERNAL_SELF_MANAGED. + // Not supported when the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true. MetadataFilters []*MetadataFilter `json:"metadataFilters,omitempty"` // PrefixMatch: For satisfying the matchRule condition, the request's @@ -12947,6 +13769,7 @@ type HttpRouteRuleMatch struct { // QueryParameterMatches: Specifies a list of query parameter match // criteria, all of which must match corresponding query parameters in // the request. + // Not supported when the URL map is bound to target gRPC proxy. QueryParameterMatches []*HttpQueryParameterMatch `json:"queryParameterMatches,omitempty"` // RegexMatch: For satisfying the matchRule condition, the path of the @@ -13152,6 +13975,7 @@ type HttpsHealthCheckListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -13352,12 +14176,14 @@ type Image struct { // taken from the current or a previous instance of a given disk name. SourceDiskId string `json:"sourceDiskId,omitempty"` - // SourceImage: URL of the source image used to create this image. This - // can be a full or valid partial URL. You must provide exactly one of: + // SourceImage: URL of the source image used to create this image. // - // - this property, or - // - the rawDisk.source property, or - // - the sourceDisk property in order to create an image. + // In order to create an image, you must provide the full or partial URL + // of one of the following: + // - The selfLink URL + // - This property + // - The rawDisk.source URL + // - The sourceDisk URL SourceImage string `json:"sourceImage,omitempty"` // SourceImageEncryptionKey: The customer-supplied encryption key of the @@ -13370,13 +14196,16 @@ type Image struct { // taken from the current or a previous instance of a given image name. SourceImageId string `json:"sourceImageId,omitempty"` - // SourceSnapshot: URL of the source snapshot used to create this image. - // This can be a full or valid partial URL. You must provide exactly one - // of: - // - this property, or - // - the sourceImage property, or - // - the rawDisk.source property, or - // - the sourceDisk property in order to create an image. + // SourceSnapshot: URL of the source snapshot used to create this + // image. + // + // In order to create an image, you must provide the full or partial URL + // of one of the following: + // - The selfLink URL + // - This property + // - The sourceImage URL + // - The rawDisk.source URL + // - The sourceDisk URL SourceSnapshot string `json:"sourceSnapshot,omitempty"` // SourceSnapshotEncryptionKey: The customer-supplied encryption key of @@ -13561,6 +14390,7 @@ type ImageListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -13689,6 +14519,8 @@ type Instance struct { // Enabling IP Forwarding. CanIpForward bool `json:"canIpForward,omitempty"` + ConfidentialInstanceConfig *ConfidentialInstanceConfig `json:"confidentialInstanceConfig,omitempty"` + // CpuPlatform: [Output Only] The CPU platform used by this instance. CpuPlatform string `json:"cpuPlatform,omitempty"` @@ -13759,6 +14591,18 @@ type Instance struct { // by the setLabels method. Labels map[string]string `json:"labels,omitempty"` + // LastStartTimestamp: [Output Only] Last start timestamp in RFC3339 + // text format. + LastStartTimestamp string `json:"lastStartTimestamp,omitempty"` + + // LastStopTimestamp: [Output Only] Last stop timestamp in RFC3339 text + // format. + LastStopTimestamp string `json:"lastStopTimestamp,omitempty"` + + // LastSuspendedTimestamp: [Output Only] Last suspended timestamp in + // RFC3339 text format. + LastSuspendedTimestamp string `json:"lastSuspendedTimestamp,omitempty"` + // MachineType: Full or partial URL of the machine type resource to use // for this instance, in the format: // zones/zone/machineTypes/machine-type. This is provided by the client @@ -13844,8 +14688,11 @@ type Instance struct { ShieldedInstanceIntegrityPolicy *ShieldedInstanceIntegrityPolicy `json:"shieldedInstanceIntegrityPolicy,omitempty"` + // ShieldedVmConfig: Deprecating, please use shielded_instance_config. ShieldedVmConfig *ShieldedVmConfig `json:"shieldedVmConfig,omitempty"` + // ShieldedVmIntegrityPolicy: Deprecating, please use + // shielded_instance_integrity_policy. ShieldedVmIntegrityPolicy *ShieldedVmIntegrityPolicy `json:"shieldedVmIntegrityPolicy,omitempty"` // SourceMachineImage: Source machine image @@ -13860,8 +14707,10 @@ type Instance struct { StartRestricted bool `json:"startRestricted,omitempty"` // Status: [Output Only] The status of the instance. One of the - // following values: PROVISIONING, STAGING, RUNNING, STOPPING, STOPPED, - // SUSPENDING, SUSPENDED, and TERMINATED. + // following values: PROVISIONING, STAGING, RUNNING, STOPPING, + // SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more + // information about the status of the instance, see Instance life + // cycle. // // Possible values: // "DEPROVISIONING" @@ -13943,6 +14792,9 @@ type InstanceAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *InstanceAggregatedListWarning `json:"warning,omitempty"` @@ -13997,6 +14849,7 @@ type InstanceAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -14211,6 +15064,9 @@ type InstanceGroupAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *InstanceGroupAggregatedListWarning `json:"warning,omitempty"` @@ -14265,6 +15121,7 @@ type InstanceGroupAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -14421,6 +15278,7 @@ type InstanceGroupListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -14773,6 +15631,9 @@ type InstanceGroupManagerAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *InstanceGroupManagerAggregatedListWarning `json:"warning,omitempty"` @@ -14827,6 +15688,7 @@ type InstanceGroupManagerAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -15021,6 +15883,7 @@ type InstanceGroupManagerListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -15214,6 +16077,10 @@ type InstanceGroupManagerStatusStateful struct { // field is deprecated in favor of has_stateful_config. IsStateful bool `json:"isStateful,omitempty"` + // PerInstanceConfigs: [Output Only] Status of per-instance configs on + // the instance. + PerInstanceConfigs *InstanceGroupManagerStatusStatefulPerInstanceConfigs `json:"perInstanceConfigs,omitempty"` + // ForceSendFields is a list of field names (e.g. "HasStatefulConfig") // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -15238,6 +16105,35 @@ func (s *InstanceGroupManagerStatusStateful) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type InstanceGroupManagerStatusStatefulPerInstanceConfigs struct { + // AllEffective: A bit indicating if all of the group's per-instance + // configs (listed in the output of a listPerInstanceConfigs API call) + // have status EFFECTIVE or there are no per-instance-configs. + AllEffective bool `json:"allEffective,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AllEffective") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AllEffective") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagerStatusStatefulPerInstanceConfigs) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupManagerStatusStatefulPerInstanceConfigs + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceGroupManagerStatusVersionTarget struct { // IsReached: [Output Only] A bit indicating whether version target has // been reached in this managed instance group, i.e. all instances are @@ -15455,6 +16351,11 @@ func (s *InstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, er // InstanceGroupManagersApplyUpdatesRequest: // InstanceGroupManagers.applyUpdatesToInstances type InstanceGroupManagersApplyUpdatesRequest struct { + // AllInstances: Flag to update all instances instead of specified list + // of ?instances?. If the flag is set to true then the instances may not + // be specified in the request. + AllInstances bool `json:"allInstances,omitempty"` + // Instances: The list of URLs of one or more instances for which you // want to apply updates. Each URL can be a full URL or a partial URL, // such as zones/[ZONE]/instances/[INSTANCE_NAME]. @@ -15494,7 +16395,7 @@ type InstanceGroupManagersApplyUpdatesRequest struct { // "RESTART" MostDisruptiveAllowedAction string `json:"mostDisruptiveAllowedAction,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to + // ForceSendFields is a list of field names (e.g. "AllInstances") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15502,10 +16403,10 @@ type InstanceGroupManagersApplyUpdatesRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "AllInstances") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` @@ -15752,6 +16653,7 @@ type InstanceGroupManagersListPerInstanceConfigsRespWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -16003,6 +16905,7 @@ type InstanceGroupManagersScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -16313,6 +17216,7 @@ type InstanceGroupsListInstancesWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -16510,6 +17414,7 @@ type InstanceGroupsScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -16702,6 +17607,7 @@ type InstanceListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -16858,6 +17764,7 @@ type InstanceListReferrersWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -17095,7 +18002,7 @@ func (s *InstanceMoveRequest) MarshalJSON() ([]byte, error) { } type InstanceProperties struct { - // CanIpForward: Enables instances created based on this template to + // CanIpForward: Enables instances created based on these properties to // send packets with source IP addresses other than their own and // receive packets with destination IP addresses other than their own. // If these instances will be used as an IP gateway or it will be set as @@ -17109,11 +18016,11 @@ type InstanceProperties struct { ConfidentialInstanceConfig *ConfidentialInstanceConfig `json:"confidentialInstanceConfig,omitempty"` // Description: An optional text description for the instances that are - // created from this instance template. + // created from these properties. Description string `json:"description,omitempty"` // Disks: An array of disks that are associated with the instances that - // are created from this template. + // are created from these properties. Disks []*AttachedDisk `json:"disks,omitempty"` // DisplayDevice: Display Device properties to enable support for remote @@ -17121,24 +18028,24 @@ type InstanceProperties struct { DisplayDevice *DisplayDevice `json:"displayDevice,omitempty"` // GuestAccelerators: A list of guest accelerator cards' type and count - // to use for instances created from the instance template. + // to use for instances created from these properties. GuestAccelerators []*AcceleratorConfig `json:"guestAccelerators,omitempty"` - // Labels: Labels to apply to instances that are created from this - // template. + // Labels: Labels to apply to instances that are created from these + // properties. Labels map[string]string `json:"labels,omitempty"` // MachineType: The machine type to use for instances that are created - // from this template. + // from these properties. MachineType string `json:"machineType,omitempty"` // Metadata: The metadata key/value pairs to assign to instances that - // are created from this template. These pairs can consist of custom + // are created from these properties. These pairs can consist of custom // metadata or predefined keys. See Project and instance metadata for // more information. Metadata *Metadata `json:"metadata,omitempty"` - // MinCpuPlatform: Minimum cpu/platform to be used by this instance. The + // MinCpuPlatform: Minimum cpu/platform to be used by instances. The // instance may be scheduled on the specified or newer cpu/platform. // Applicable values are the friendly names of CPU platforms, such as // minCpuPlatform: "Intel Haswell" or minCpuPlatform: "Intel Sandy @@ -17150,8 +18057,8 @@ type InstanceProperties struct { // interface. NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"` - // PrivateIpv6GoogleAccess: The private IPv6 google access type for the - // VM. If not specified, use INHERIT_FROM_SUBNETWORK as default. + // PrivateIpv6GoogleAccess: The private IPv6 google access type for VMs. + // If not specified, use INHERIT_FROM_SUBNETWORK as default. // // Possible values: // "ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE" @@ -17159,34 +18066,34 @@ type InstanceProperties struct { // "INHERIT_FROM_SUBNETWORK" PrivateIpv6GoogleAccess string `json:"privateIpv6GoogleAccess,omitempty"` - // ReservationAffinity: Specifies the reservations that this instance - // can consume from. + // ReservationAffinity: Specifies the reservations that instances can + // consume from. ReservationAffinity *ReservationAffinity `json:"reservationAffinity,omitempty"` // ResourcePolicies: Resource policies (names, not ULRs) applied to - // instances created from this template. + // instances created from these properties. ResourcePolicies []string `json:"resourcePolicies,omitempty"` // Scheduling: Specifies the scheduling options for the instances that - // are created from this template. + // are created from these properties. Scheduling *Scheduling `json:"scheduling,omitempty"` // ServiceAccounts: A list of service accounts with specified scopes. // Access tokens for these service accounts are available to the - // instances that are created from this template. Use metadata queries - // to obtain the access tokens for these instances. + // instances that are created from these properties. Use metadata + // queries to obtain the access tokens for these instances. ServiceAccounts []*ServiceAccount `json:"serviceAccounts,omitempty"` ShieldedInstanceConfig *ShieldedInstanceConfig `json:"shieldedInstanceConfig,omitempty"` // ShieldedVmConfig: Specifies the Shielded VM options for the instances - // that are created from this template. + // that are created from these properties. ShieldedVmConfig *ShieldedVmConfig `json:"shieldedVmConfig,omitempty"` // Tags: A list of tags to apply to the instances that are created from - // this template. The tags identify valid sources or targets for network - // firewalls. The setTags method can modify this list of tags. Each tag - // within the list must comply with RFC1035. + // these properties. The tags identify valid sources or targets for + // network firewalls. The setTags method can modify this list of tags. + // Each tag within the list must comply with RFC1035. Tags *Tags `json:"tags,omitempty"` // ForceSendFields is a list of field names (e.g. "CanIpForward") to @@ -17395,6 +18302,7 @@ type InstanceTemplateListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -17741,6 +18649,7 @@ type InstancesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -17938,6 +18847,39 @@ func (s *InstancesSetMinCpuPlatformRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type InstancesSetNameRequest struct { + // CurrentName: The current name of this resource, used to prevent + // conflicts. Provide the latest name when making a request to change + // name. + CurrentName string `json:"currentName,omitempty"` + + // Name: The name to be applied to the instance. Needs to be RFC 1035 + // compliant. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CurrentName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CurrentName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstancesSetNameRequest) MarshalJSON() ([]byte, error) { + type NoMethod InstancesSetNameRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstancesSetServiceAccountRequest struct { // Email: Email address of the service account. Email string `json:"email,omitempty"` @@ -18515,6 +19457,9 @@ type InterconnectAttachmentAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *InterconnectAttachmentAggregatedListWarning `json:"warning,omitempty"` @@ -18569,6 +19514,7 @@ type InterconnectAttachmentAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -18727,6 +19673,7 @@ type InterconnectAttachmentListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -18940,6 +19887,7 @@ type InterconnectAttachmentsScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -19020,7 +19968,7 @@ func (s *InterconnectAttachmentsScopedListWarningData) MarshalJSON() ([]byte, er // InterconnectCircuitInfo: Describes a single physical circuit between // the Customer and Google. CircuitInfo objects are created by Google, -// so all fields are output only. Next id: 4 +// so all fields are output only. type InterconnectCircuitInfo struct { // CustomerDemarcId: Customer-side demarc ID for this circuit. CustomerDemarcId string `json:"customerDemarcId,omitempty"` @@ -19364,6 +20312,7 @@ type InterconnectListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -19641,6 +20590,7 @@ type InterconnectLocationListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -19763,7 +20713,7 @@ func (s *InterconnectLocationRegionInfo) MarshalJSON() ([]byte, error) { } // InterconnectOutageNotification: Description of a planned outage on -// this Interconnect. Next id: 9 +// this Interconnect. type InterconnectOutageNotification struct { // AffectedCircuits: If issue_type is IT_PARTIAL_OUTAGE, a list of the // Google-side circuit IDs that will be affected. @@ -20062,6 +21012,42 @@ func (s *LicenseCodeLicenseAlias) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// LicenseResourceCommitment: Commitment for a particular license +// resource. +type LicenseResourceCommitment struct { + // Amount: The number of licenses purchased. + Amount int64 `json:"amount,omitempty,string"` + + // CoresPerLicense: Specifies the core range of the instance for which + // this license applies. + CoresPerLicense string `json:"coresPerLicense,omitempty"` + + // License: Any applicable license URI. + License string `json:"license,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Amount") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Amount") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LicenseResourceCommitment) MarshalJSON() ([]byte, error) { + type NoMethod LicenseResourceCommitment + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type LicenseResourceRequirements struct { // MinGuestCpuCount: Minimum number of guest cpus required to use the // Instance. Enforced at Instance creation and Instance start. @@ -20168,6 +21154,7 @@ type LicensesListResponseWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -20246,6 +21233,42 @@ func (s *LicensesListResponseWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type LocalDisk struct { + // DiskCount: Specifies the number of such disks. + DiskCount int64 `json:"diskCount,omitempty"` + + // DiskSizeGb: Specifies the size of the disk in base-2 GB. + DiskSizeGb int64 `json:"diskSizeGb,omitempty"` + + // DiskType: Specifies the desired disk type on the node. This disk type + // must be a local storage type (e.g.: local-ssd). Note that for + // nodeTemplates, this should be the name of the disk type and not its + // URL. + DiskType string `json:"diskType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DiskCount") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DiskCount") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LocalDisk) MarshalJSON() ([]byte, error) { + type NoMethod LocalDisk + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // LogConfig: Specifies what kind of log the caller must write type LogConfig struct { // CloudAudit: Cloud audit options. @@ -20456,10 +21479,10 @@ type MachineImage struct { // property when you create the resource. Description string `json:"description,omitempty"` - // GuestFlush: [Input Only] Specifies to create an application - // consistent machine image by informing the OS to prepare for the - // snapshot process. Currently only supported on Windows instances using - // the Volume Shadow Copy Service (VSS). + // GuestFlush: [Input Only] Whether to attempt an application consistent + // machine image by informing the OS to prepare for the snapshot + // process. Currently only supported on Windows instances using the + // Volume Shadow Copy Service (VSS). GuestFlush bool `json:"guestFlush,omitempty"` // Id: [Output Only] A unique identifier for this machine image. The @@ -20530,8 +21553,8 @@ type MachineImage struct { // "UPLOADING" Status string `json:"status,omitempty"` - // StorageLocations: GCS bucket storage location of the machine image - // (regional or multi-regional). + // StorageLocations: The regional or multi-regional Cloud Storage bucket + // location where the machine image is stored. StorageLocations []string `json:"storageLocations,omitempty"` // TotalStorageBytes: [Output Only] Total size of the storage used by @@ -20643,6 +21666,7 @@ type MachineImageListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -20736,7 +21760,7 @@ type MachineType struct { CreationTimestamp string `json:"creationTimestamp,omitempty"` // Deprecated: [Output Only] The deprecation status associated with this - // machine type. + // machine type. Only applicable if the machine type is unavailable. Deprecated *DeprecationStatus `json:"deprecated,omitempty"` // Description: [Output Only] An optional textual description of the @@ -20866,6 +21890,9 @@ type MachineTypeAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *MachineTypeAggregatedListWarning `json:"warning,omitempty"` @@ -20920,6 +21947,7 @@ type MachineTypeAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -21075,6 +22103,7 @@ type MachineTypeListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -21209,6 +22238,7 @@ type MachineTypesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -21782,13 +22812,18 @@ type Network struct { // client when the network is created. IPv4Range string `json:"IPv4Range,omitempty"` - // AutoCreateSubnetworks: When set to true, the VPC network is created - // in "auto" mode. When set to false, the VPC network is created in - // "custom" mode. + // AutoCreateSubnetworks: Must be set to create a VPC network. If not + // set, a legacy network is created. + // + // When set to true, the VPC network is created in auto mode. When set + // to false, the VPC network is created in custom mode. // // An auto mode VPC network starts with one subnet per region. Each // subnet has a predetermined range as described in Auto mode VPC // network IP ranges. + // + // For custom mode VPC networks, you can add subnets using the + // subnetworks insert method. AutoCreateSubnetworks bool `json:"autoCreateSubnetworks,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text @@ -21811,6 +22846,10 @@ type Network struct { // networks. Kind string `json:"kind,omitempty"` + // Mtu: Maximum Transmission Unit in bytes. The minimum value for this + // field is 1460 and the maximum value is 1500 bytes. + Mtu int64 `json:"mtu,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -21862,7 +22901,7 @@ func (s *Network) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkEndpoint: The network endpoint. Next ID: 7 +// NetworkEndpoint: The network endpoint. type NetworkEndpoint struct { // Annotations: Metadata defined as annotations on the network endpoint. Annotations map[string]string `json:"annotations,omitempty"` @@ -21920,15 +22959,28 @@ func (s *NetworkEndpoint) MarshalJSON() ([]byte, error) { // // A network endpoint group (NEG) defines how a set of endpoints should // be reached, whether they are reachable, and where they are located. -// For more information about using NEGs, see Setting up internet NEGs -// or Setting up zonal NEGs. (== resource_for -// {$api_version}.networkEndpointGroups ==) (== resource_for -// {$api_version}.globalNetworkEndpointGroups ==) +// For more information about using NEGs, see Setting up internet NEGs, +// Setting up zonal NEGs, or Setting up serverless NEGs. (== +// resource_for {$api_version}.networkEndpointGroups ==) (== +// resource_for {$api_version}.globalNetworkEndpointGroups ==) (== +// resource_for {$api_version}.regionNetworkEndpointGroups ==) type NetworkEndpointGroup struct { // Annotations: Metadata defined as annotations on the network endpoint // group. Annotations map[string]string `json:"annotations,omitempty"` + // AppEngine: Only valid when networkEndpointType is "SERVERLESS". Only + // one of cloudRun, appEngine or cloudFunction may be set. + AppEngine *NetworkEndpointGroupAppEngine `json:"appEngine,omitempty"` + + // CloudFunction: Only valid when networkEndpointType is "SERVERLESS". + // Only one of cloudRun, appEngine or cloudFunction may be set. + CloudFunction *NetworkEndpointGroupCloudFunction `json:"cloudFunction,omitempty"` + + // CloudRun: Only valid when networkEndpointType is "SERVERLESS". Only + // one of cloudRun, appEngine or cloudFunction may be set. + CloudRun *NetworkEndpointGroupCloudRun `json:"cloudRun,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -21968,14 +23020,22 @@ type NetworkEndpointGroup struct { Network string `json:"network,omitempty"` // NetworkEndpointType: Type of network endpoints in this network - // endpoint group. + // endpoint group. Can be one of GCE_VM_IP_PORT, + // NON_GCP_PRIVATE_IP_PORT, INTERNET_FQDN_PORT, INTERNET_IP_PORT, or + // SERVERLESS. // // Possible values: // "GCE_VM_IP_PORT" // "INTERNET_FQDN_PORT" // "INTERNET_IP_PORT" + // "NON_GCP_PRIVATE_IP_PORT" + // "SERVERLESS" NetworkEndpointType string `json:"networkEndpointType,omitempty"` + // Region: [Output Only] The URL of the region where the network + // endpoint group is located. + Region string `json:"region,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -22042,6 +23102,9 @@ type NetworkEndpointGroupAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *NetworkEndpointGroupAggregatedListWarning `json:"warning,omitempty"` @@ -22096,6 +23159,7 @@ type NetworkEndpointGroupAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -22174,6 +23238,174 @@ func (s *NetworkEndpointGroupAggregatedListWarningData) MarshalJSON() ([]byte, e return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// NetworkEndpointGroupAppEngine: Configuration for an App Engine +// network endpoint group (NEG). The service is optional, may be +// provided explicitly or in the URL mask. The version is optional and +// can only be provided explicitly or in the URL mask when service is +// present. +// +// Note: App Engine service must be in the same project and located in +// the same region as the Serverless NEG. +type NetworkEndpointGroupAppEngine struct { + // Service: Optional serving service. + // + // The service name must be 1-63 characters long, and comply with + // RFC1035. + // + // Example value: "default", "my-service". + Service string `json:"service,omitempty"` + + // UrlMask: A template to parse service and version fields from a + // request URL. URL mask allows for routing to multiple App Engine + // services without having to create multiple Network Endpoint Groups + // and backend services. + // + // For example, the request URLs "foo1-dot-appname.appspot.com/v1" and + // "foo1-dot-appname.appspot.com/v2" can be backed by the same + // Serverless NEG with URL mask "-dot-appname.appspot.com/". The URL + // mask will parse them to { service = "foo1", version = "v1" } and { + // service = "foo1", version = "v2" } respectively. + UrlMask string `json:"urlMask,omitempty"` + + // Version: Optional serving version. + // + // The version must be 1-63 characters long, and comply with + // RFC1035. + // + // Example value: "v1", "v2". + Version string `json:"version,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Service") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Service") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkEndpointGroupAppEngine) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupAppEngine + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NetworkEndpointGroupCloudFunction: Configuration for a Cloud Function +// network endpoint group (NEG). The function must be provided +// explicitly or in the URL mask. +// +// Note: Cloud Function must be in the same project and located in the +// same region as the Serverless NEG. +type NetworkEndpointGroupCloudFunction struct { + // Function: A user-defined name of the Cloud Function. + // + // The function name is case-sensitive and must be 1-63 characters + // long. + // + // Example value: "func1". + Function string `json:"function,omitempty"` + + // UrlMask: A template to parse function field from a request URL. URL + // mask allows for routing to multiple Cloud Functions without having to + // create multiple Network Endpoint Groups and backend services. + // + // For example, request URLs "mydomain.com/function1" and + // "mydomain.com/function2" can be backed by the same Serverless NEG + // with URL mask "/". The URL mask will parse them to { function = + // "function1" } and { function = "function2" } respectively. + UrlMask string `json:"urlMask,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Function") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Function") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkEndpointGroupCloudFunction) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupCloudFunction + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NetworkEndpointGroupCloudRun: Configuration for a Cloud Run network +// endpoint group (NEG). The service must be provided explicitly or in +// the URL mask. The tag is optional, may be provided explicitly or in +// the URL mask. +// +// Note: Cloud Run service must be in the same project and located in +// the same region as the Serverless NEG. +type NetworkEndpointGroupCloudRun struct { + // Service: Cloud Run service is the main resource of Cloud Run. + // + // The service must be 1-63 characters long, and comply with + // RFC1035. + // + // Example value: "run-service". + Service string `json:"service,omitempty"` + + // Tag: Optional Cloud Run tag represents the "named-revision" to + // provide additional fine-grained traffic routing information. + // + // The tag must be 1-63 characters long, and comply with + // RFC1035. + // + // Example value: "revision-0010". + Tag string `json:"tag,omitempty"` + + // UrlMask: A template to parse service and tag fields from a request + // URL. URL mask allows for routing to multiple Run services without + // having to create multiple network endpoint groups and backend + // services. + // + // For example, request URLs "foo1.domain.com/bar1" and + // "foo1.domain.com/bar2" can be backed by the same Serverless Network + // Endpoint Group (NEG) with URL mask ".domain.com/". The URL mask will + // parse them to { service="bar1", tag="foo1" } and { service="bar2", + // tag="foo2" } respectively. + UrlMask string `json:"urlMask,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Service") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Service") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkEndpointGroupCloudRun) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupCloudRun + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // NetworkEndpointGroupLbNetworkEndpointGroup: Load balancing specific // fields for network endpoint group. type NetworkEndpointGroupLbNetworkEndpointGroup struct { @@ -22295,6 +23527,7 @@ type NetworkEndpointGroupListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -22572,6 +23805,7 @@ type NetworkEndpointGroupsListNetworkEndpointsWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -22709,6 +23943,7 @@ type NetworkEndpointGroupsScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -22982,6 +24217,7 @@ type NetworkListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -23115,6 +24351,9 @@ type NetworkPeering struct { // network is in the same project as the current network. Network string `json:"network,omitempty"` + // PeerMtu: Maximum Transmission Unit in bytes. + PeerMtu int64 `json:"peerMtu,omitempty"` + // State: [Output Only] State for the peering, either `ACTIVE` or // `INACTIVE`. The peering is `ACTIVE` when there's a matching // configuration in the peer network. @@ -23395,7 +24634,9 @@ type NodeGroup struct { Kind string `json:"kind,omitempty"` // MaintenancePolicy: Specifies how to handle instances when a node in - // the group undergoes maintenance. + // the group undergoes maintenance. Set to one of: DEFAULT, + // RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is + // DEFAULT. For more information, see Maintenance policies. // // Possible values: // "DEFAULT" @@ -23414,8 +24655,7 @@ type NodeGroup struct { // be a dash. Name string `json:"name,omitempty"` - // NodeTemplate: The URL of the node template to which this node group - // belongs. + // NodeTemplate: URL of the node template to create the node group from. NodeTemplate string `json:"nodeTemplate,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. @@ -23486,6 +24726,9 @@ type NodeGroupAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *NodeGroupAggregatedListWarning `json:"warning,omitempty"` @@ -23540,6 +24783,7 @@ type NodeGroupAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -23620,12 +24864,14 @@ func (s *NodeGroupAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NodeGroupAutoscalingPolicy struct { // MaxNodes: The maximum number of nodes that the group should have. + // Must be set if autoscaling is enabled. Maximum value allowed is 100. MaxNodes int64 `json:"maxNodes,omitempty"` // MinNodes: The minimum number of nodes that the group should have. MinNodes int64 `json:"minNodes,omitempty"` - // Mode: The autoscaling mode. + // Mode: The autoscaling mode. Set to one of: ON, OFF, or + // ONLY_SCALE_OUT. For more information, see Autoscaler modes. // // Possible values: // "MODE_UNSPECIFIED" @@ -23734,6 +24980,7 @@ type NodeGroupListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -23813,6 +25060,9 @@ func (s *NodeGroupListWarningData) MarshalJSON() ([]byte, error) { } type NodeGroupNode struct { + // Accelerators: Accelerators for this node. + Accelerators []*AcceleratorConfig `json:"accelerators,omitempty"` + // CpuOvercommitType: CPU overcommit. // // Possible values: @@ -23821,6 +25071,9 @@ type NodeGroupNode struct { // "NONE" CpuOvercommitType string `json:"cpuOvercommitType,omitempty"` + // Disks: Local disk configurations. + Disks []*LocalDisk `json:"disks,omitempty"` + // Instances: Instances scheduled on this node. Instances []string `json:"instances,omitempty"` @@ -23844,21 +25097,20 @@ type NodeGroupNode struct { // "REPAIRING" Status string `json:"status,omitempty"` - // ForceSendFields is a list of field names (e.g. "CpuOvercommitType") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "Accelerators") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CpuOvercommitType") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Accelerators") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } @@ -24002,6 +25254,7 @@ type NodeGroupsListNodesWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -24136,6 +25389,7 @@ type NodeGroupsScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -24246,8 +25500,10 @@ func (s *NodeGroupsSetNodeTemplateRequest) MarshalJSON() ([]byte, error) { // // You can use a template to define properties for nodes in a node // group. For more information, read Creating node groups and instances. -// (== resource_for {$api_version}.nodeTemplates ==) (== NextID: 19 ==) +// (== resource_for {$api_version}.nodeTemplates ==) type NodeTemplate struct { + Accelerators []*AcceleratorConfig `json:"accelerators,omitempty"` + // CpuOvercommitType: CPU overcommit. // // Possible values: @@ -24264,6 +25520,8 @@ type NodeTemplate struct { // property when you create the resource. Description string `json:"description,omitempty"` + Disks []*LocalDisk `json:"disks,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` @@ -24333,21 +25591,20 @@ type NodeTemplate struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CpuOvercommitType") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "Accelerators") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CpuOvercommitType") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Accelerators") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } @@ -24381,6 +25638,9 @@ type NodeTemplateAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *NodeTemplateAggregatedListWarning `json:"warning,omitempty"` @@ -24435,6 +25695,7 @@ type NodeTemplateAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -24590,6 +25851,7 @@ type NodeTemplateListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -24754,6 +26016,7 @@ type NodeTemplatesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -24936,6 +26199,9 @@ type NodeTypeAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *NodeTypeAggregatedListWarning `json:"warning,omitempty"` @@ -24990,6 +26256,7 @@ type NodeTypeAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -25145,6 +26412,7 @@ type NodeTypeListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -25279,6 +26547,7 @@ type NodeTypesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -25561,6 +26830,7 @@ type NotificationEndpointListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -25878,6 +27148,7 @@ type OperationWarnings struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -25979,6 +27250,9 @@ type OperationAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *OperationAggregatedListWarning `json:"warning,omitempty"` @@ -26033,6 +27307,7 @@ type OperationAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -26188,6 +27463,7 @@ type OperationListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -26322,6 +27598,7 @@ type OperationsScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -26721,6 +27998,9 @@ type PacketMirroringAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *PacketMirroringAggregatedListWarning `json:"warning,omitempty"` @@ -26775,6 +28055,7 @@ type PacketMirroringAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -27000,6 +28281,7 @@ type PacketMirroringListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -27275,6 +28557,7 @@ type PacketMirroringsScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -27364,7 +28647,10 @@ type PathMatcher struct { // defaultRouteAction specifies any weightedBackendServices, // defaultService must not be set. Conversely if defaultService is set, // defaultRouteAction cannot contain any weightedBackendServices. - // Only one of defaultRouteAction or defaultUrlRedirect must be set. + // Only one of defaultRouteAction or defaultUrlRedirect must be + // set. + // UrlMaps for external HTTP(S) load balancers support only the + // urlRewrite action within a pathMatcher's defaultRouteAction. DefaultRouteAction *HttpRouteAction `json:"defaultRouteAction,omitempty"` // DefaultService: The full or partial URL to the BackendService @@ -27395,6 +28681,7 @@ type PathMatcher struct { // defaultUrlRedirect. // If defaultUrlRedirect is specified, defaultService or // defaultRouteAction must not be set. + // Not supported when the URL map is bound to target gRPC proxy. DefaultUrlRedirect *HttpRedirectAction `json:"defaultUrlRedirect,omitempty"` // Description: An optional description of this resource. Provide this @@ -27405,6 +28692,11 @@ type PathMatcher struct { // need to take effect for the selected backendService. // HeaderAction specified here are applied after the matching // HttpRouteRule HeaderAction and before the HeaderAction in the UrlMap + // + // Note that headerAction is not supported for Loadbalancers that have + // their loadBalancingScheme set to EXTERNAL. + // Not supported when the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true. HeaderAction *HttpHeaderAction `json:"headerAction,omitempty"` // Name: The name to which this PathMatcher is referred by the HostRule. @@ -27469,6 +28761,8 @@ type PathRule struct { // service must not be set. Conversely if service is set, routeAction // cannot contain any weightedBackendServices. // Only one of routeAction or urlRedirect must be set. + // UrlMaps for external HTTP(S) load balancers support only the + // urlRewrite action within a pathRule's routeAction. RouteAction *HttpRouteAction `json:"routeAction,omitempty"` // Service: The full or partial URL of the backend service resource to @@ -27484,7 +28778,9 @@ type PathRule struct { // UrlRedirect: When a path pattern is matched, the request is // redirected to a URL specified by urlRedirect. - // If urlRedirect is specified, service or routeAction must not be set. + // If urlRedirect is specified, service or routeAction must not be + // set. + // Not supported when the URL map is bound to target gRPC proxy. UrlRedirect *HttpRedirectAction `json:"urlRedirect,omitempty"` // ForceSendFields is a list of field names (e.g. "Paths") to @@ -27531,6 +28827,18 @@ type PerInstanceConfig struct { // Does not contain preserved state generated from a stateful policy. PreservedState *PreservedState `json:"preservedState,omitempty"` + // Status: The status of applying this per-instance config on the + // corresponding managed instance. + // + // Possible values: + // "APPLYING" + // "DELETING" + // "EFFECTIVE" + // "NONE" + // "UNAPPLIED" + // "UNAPPLIED_DELETION" + Status string `json:"status,omitempty"` + // ForceSendFields is a list of field names (e.g. "Fingerprint") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -27861,7 +29169,8 @@ type Project struct { UsageExportLocation *UsageExportLocation `json:"usageExportLocation,omitempty"` // XpnProjectStatus: [Output Only] The role this project has in a shared - // VPC configuration. Currently only HOST projects are differentiated. + // VPC configuration. Currently, only projects with the host role, which + // is specified by the value HOST, are differentiated. // // Possible values: // "HOST" @@ -28064,18 +29373,22 @@ type Quota struct { // Metric: [Output Only] Name of the quota metric. // // Possible values: + // "A2_CPUS" // "AFFINITY_GROUPS" // "AUTOSCALERS" // "BACKEND_BUCKETS" // "BACKEND_SERVICES" // "C2_CPUS" // "COMMITMENTS" + // "COMMITTED_A2_CPUS" // "COMMITTED_C2_CPUS" // "COMMITTED_CPUS" // "COMMITTED_LICENSES" // "COMMITTED_LOCAL_SSD_TOTAL_GB" + // "COMMITTED_MEMORY_OPTIMIZED_CPUS" // "COMMITTED_N2D_CPUS" // "COMMITTED_N2_CPUS" + // "COMMITTED_NVIDIA_A100_GPUS" // "COMMITTED_NVIDIA_K80_GPUS" // "COMMITTED_NVIDIA_P100_GPUS" // "COMMITTED_NVIDIA_P4_GPUS" @@ -28084,6 +29397,8 @@ type Quota struct { // "CPUS" // "CPUS_ALL_REGIONS" // "DISKS_TOTAL_GB" + // "EXTERNAL_NETWORK_LB_FORWARDING_RULES" + // "EXTERNAL_PROTOCOL_FORWARDING_RULES" // "EXTERNAL_VPN_GATEWAYS" // "FIREWALLS" // "FORWARDING_RULES" @@ -28100,18 +29415,23 @@ type Quota struct { // "INTERCONNECT_ATTACHMENTS_TOTAL_MBPS" // "INTERCONNECT_TOTAL_GBPS" // "INTERNAL_ADDRESSES" + // "INTERNAL_TRAFFIC_DIRECTOR_FORWARDING_RULES" // "IN_PLACE_SNAPSHOTS" // "IN_USE_ADDRESSES" // "IN_USE_BACKUP_SCHEDULES" // "IN_USE_SNAPSHOT_SCHEDULES" // "LOCAL_SSD_TOTAL_GB" + // "M1_CPUS" + // "M2_CPUS" // "MACHINE_IMAGES" // "N2D_CPUS" // "N2_CPUS" // "NETWORKS" // "NETWORK_ENDPOINT_GROUPS" + // "NETWORK_FIREWALL_POLICIES" // "NODE_GROUPS" // "NODE_TEMPLATES" + // "NVIDIA_A100_GPUS" // "NVIDIA_K80_GPUS" // "NVIDIA_P100_GPUS" // "NVIDIA_P100_VWS_GPUS" @@ -28123,6 +29443,7 @@ type Quota struct { // "PACKET_MIRRORINGS" // "PREEMPTIBLE_CPUS" // "PREEMPTIBLE_LOCAL_SSD_GB" + // "PREEMPTIBLE_NVIDIA_A100_GPUS" // "PREEMPTIBLE_NVIDIA_K80_GPUS" // "PREEMPTIBLE_NVIDIA_P100_GPUS" // "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS" @@ -28397,6 +29718,7 @@ type RegionAutoscalerListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -28580,6 +29902,7 @@ type RegionDiskTypeListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -28819,6 +30142,7 @@ type RegionInstanceGroupListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -29007,6 +30331,7 @@ type RegionInstanceGroupManagerListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -29177,8 +30502,13 @@ func (s *RegionInstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]by } // RegionInstanceGroupManagersApplyUpdatesRequest: -// InstanceGroupManagers.applyUpdatesToInstances +// RegionInstanceGroupManagers.applyUpdatesToInstances type RegionInstanceGroupManagersApplyUpdatesRequest struct { + // AllInstances: Flag to update all instances instead of specified list + // of ?instances?. If the flag is set to true then the instances may not + // be specified in the request. + AllInstances bool `json:"allInstances,omitempty"` + // Instances: The list of URLs of one or more instances for which you // want to apply updates. Each URL can be a full URL or a partial URL, // such as zones/[ZONE]/instances/[INSTANCE_NAME]. @@ -29218,7 +30548,7 @@ type RegionInstanceGroupManagersApplyUpdatesRequest struct { // "RESTART" MostDisruptiveAllowedAction string `json:"mostDisruptiveAllowedAction,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to + // ForceSendFields is a list of field names (e.g. "AllInstances") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -29226,10 +30556,10 @@ type RegionInstanceGroupManagersApplyUpdatesRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "AllInstances") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` @@ -29405,6 +30735,7 @@ type RegionInstanceGroupManagersListInstanceConfigsRespWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -29718,6 +31049,7 @@ type RegionInstanceGroupsListInstancesWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -29946,6 +31278,7 @@ type RegionListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -30352,6 +31685,9 @@ type ReservationAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *ReservationAggregatedListWarning `json:"warning,omitempty"` @@ -30406,6 +31742,7 @@ type ReservationAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -30560,6 +31897,7 @@ type ReservationListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -30722,6 +32060,7 @@ type ReservationsScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -30932,6 +32271,7 @@ type ResourcePoliciesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -31117,6 +32457,9 @@ type ResourcePolicyAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *ResourcePolicyAggregatedListWarning `json:"warning,omitempty"` @@ -31171,6 +32514,7 @@ type ResourcePolicyAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -31450,6 +32794,7 @@ type ResourcePolicyListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -31645,6 +32990,9 @@ func (s *ResourcePolicySnapshotSchedulePolicySchedule) MarshalJSON() ([]byte, er // ResourcePolicySnapshotSchedulePolicySnapshotProperties: Specified // snapshot properties for scheduled snapshots created by this policy. type ResourcePolicySnapshotSchedulePolicySnapshotProperties struct { + // ChainName: Chain name that the snapshot is created in. + ChainName string `json:"chainName,omitempty"` + // GuestFlush: Indication to perform a 'guest aware' snapshot. GuestFlush bool `json:"guestFlush,omitempty"` @@ -31656,7 +33004,7 @@ type ResourcePolicySnapshotSchedulePolicySnapshotProperties struct { // snapshot (regional or multi-regional). StorageLocations []string `json:"storageLocations,omitempty"` - // ForceSendFields is a list of field names (e.g. "GuestFlush") to + // ForceSendFields is a list of field names (e.g. "ChainName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -31664,7 +33012,7 @@ type ResourcePolicySnapshotSchedulePolicySnapshotProperties struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "GuestFlush") to include in + // NullFields is a list of field names (e.g. "ChainName") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -31907,6 +33255,7 @@ type RouteWarnings struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -32061,6 +33410,7 @@ type RouteListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -32141,7 +33491,7 @@ func (s *RouteListWarningData) MarshalJSON() ([]byte, error) { // Router: Represents a Cloud Router resource. // -// For more information about Cloud Router, read the the Cloud Router +// For more information about Cloud Router, read the Cloud Router // overview. type Router struct { // Bgp: BGP information specific to this router. @@ -32280,6 +33630,9 @@ type RouterAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *RouterAggregatedListWarning `json:"warning,omitempty"` @@ -32334,6 +33687,7 @@ type RouterAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -32446,12 +33800,15 @@ type RouterBgp struct { Asn int64 `json:"asn,omitempty"` // KeepaliveInterval: The interval in seconds between BGP keepalive - // messages that are sent to the peer. Hold time is three times the - // interval at which keepalive messages are sent, and the hold time is - // the maximum number of seconds allowed to elapse between successive - // keepalive messages that BGP receives from a peer. BGP will use the - // smaller of either the local hold time value or the peer's hold time - // value as the hold time for the BGP connection between the two peers. + // messages that are sent to the peer. + // Not currently available publicly. + // Hold time is three times the interval at which keepalive messages are + // sent, and the hold time is the maximum number of seconds allowed to + // elapse between successive keepalive messages that BGP receives from a + // peer. + // BGP will use the smaller of either the local hold time value or the + // peer's hold time value as the hold time for the BGP connection + // between the two peers. // If set, this value must be between 1 and 120. The default is 20. KeepaliveInterval int64 `json:"keepaliveInterval,omitempty"` @@ -32516,12 +33873,15 @@ type RouterBgpPeer struct { AdvertisedRoutePriority int64 `json:"advertisedRoutePriority,omitempty"` // Bfd: BFD configuration for the BGP peering. + // Not currently available publicly. Bfd *RouterBgpPeerBfd `json:"bfd,omitempty"` - // Enable: The status of the BGP peer connection. If set to FALSE, any - // active session with the peer is terminated and all associated routing - // information is removed. If set to TRUE, the peer connection can be - // established with routing information. The default is TRUE. + // Enable: The status of the BGP peer connection. + // Not currently available publicly. + // If set to FALSE, any active session with the peer is terminated and + // all associated routing information is removed. If set to TRUE, the + // peer connection can be established with routing information. The + // default is TRUE. // // Possible values: // "FALSE" @@ -32594,29 +33954,36 @@ type RouterBgpPeerBfd struct { // MinReceiveInterval: The minimum interval, in milliseconds, between // BFD control packets received from the peer router. The actual value // is negotiated between the two routers and is equal to the greater of - // this value and the transmit interval of the other router. If set, - // this value must be between 100 and 30000. The default is 300. + // this value and the transmit interval of the other router. + // Not currently available publicly. + // If set, this value must be between 100 and 30000. + // The default is 300. MinReceiveInterval int64 `json:"minReceiveInterval,omitempty"` // MinTransmitInterval: The minimum interval, in milliseconds, between // BFD control packets transmitted to the peer router. The actual value // is negotiated between the two routers and is equal to the greater of // this value and the corresponding receive interval of the other - // router. If set, this value must be between 100 and 30000. The default - // is 300. + // router. + // Not currently available publicly. + // If set, this value must be between 100 and 30000. + // The default is 300. MinTransmitInterval int64 `json:"minTransmitInterval,omitempty"` // Multiplier: The number of consecutive BFD packets that must be missed - // before BFD declares that a peer is unavailable. If set, the value - // must be a value between 2 and 16. The default is 3. + // before BFD declares that a peer is unavailable. + // Not currently available publicly. + // If set, the value must be a value between 2 and 16. + // The default is 3. Multiplier int64 `json:"multiplier,omitempty"` // SessionInitializationMode: The BFD session initialization mode for - // this BGP peer. If set to ACTIVE, the Cloud Router will initiate the - // BFD session for this BGP peer. If set to PASSIVE, the Cloud Router - // will wait for the peer router to initiate the BFD session for this - // BGP peer. If set to DISABLED, BFD is disabled for this BGP peer. The - // default is PASSIVE. + // this BGP peer. + // Not currently available publicly. + // If set to ACTIVE, the Cloud Router will initiate the BFD session for + // this BGP peer. If set to PASSIVE, the Cloud Router will wait for the + // peer router to initiate the BFD session for this BGP peer. If set to + // DISABLED, BFD is disabled for this BGP peer. The default is PASSIVE. // // Possible values: // "ACTIVE" @@ -32792,6 +34159,7 @@ type RouterListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -33167,8 +34535,7 @@ func (s *RouterStatusBgpPeerStatus) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterStatusNatStatus: Status of a NAT contained in this router. Next -// tag: 9 +// RouterStatusNatStatus: Status of a NAT contained in this router. type RouterStatusNatStatus struct { // AutoAllocatedNatIps: A list of IPs auto-allocated for NAT. Example: // ["1.1.1.1", "129.2.16.89"] @@ -33346,6 +34713,7 @@ type RoutersScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -33664,7 +35032,7 @@ func (s *SavedAttachedDisk) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Scheduling: Sets the scheduling options for an Instance. NextID: 11 +// Scheduling: Sets the scheduling options for an Instance. NextID: 13 type Scheduling struct { // AutomaticRestart: Specifies whether the instance should be // automatically restarted if it is terminated by Compute Engine (not @@ -33697,8 +35065,9 @@ type Scheduling struct { OnHostMaintenance string `json:"onHostMaintenance,omitempty"` // Preemptible: Defines whether the instance is preemptible. This can - // only be set during instance creation, it cannot be set or changed - // after the instance has been created. + // only be set during instance creation or while the instance is stopped + // and therefore, in a `TERMINATED` state. See Instance Life Cycle for + // more information on the possible instance states. Preemptible bool `json:"preemptible,omitempty"` // ForceSendFields is a list of field names (e.g. "AutomaticRestart") to @@ -34106,6 +35475,7 @@ type SecurityPolicyListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -34363,7 +35733,8 @@ type SecurityPolicyRuleMatcherConfig struct { // FIREWALL. Layer4Configs []*SecurityPolicyRuleMatcherConfigLayer4Config `json:"layer4Configs,omitempty"` - // SrcIpRanges: CIDR IP address range. + // SrcIpRanges: CIDR IP address range. Maximum number of src_ip_ranges + // allowed is 10. SrcIpRanges []string `json:"srcIpRanges,omitempty"` // ForceSendFields is a list of field names (e.g. "DestIpRanges") to @@ -34434,26 +35805,33 @@ func (s *SecurityPolicyRuleMatcherConfigLayer4Config) MarshalJSON() ([]byte, err // SecuritySettings: The authentication and authorization settings for a // BackendService. type SecuritySettings struct { - // Authentication: A URL referring to a networksecurity.Authentication - // resource that describes how clients should authenticate with this - // service's backends. If left blank, communications between services - // are not encrypted (i.e., the TLS policy is set to OPEN). When sending - // traffic to this service's backends, the OriginationTls setting of - // Authentication.TransportAuthentication is applied. Refer to the - // Authentication and - // Authentication.TransportAuthentication.OriginationTls resources for - // additional details. authentication only applies to a global - // BackendService with the loadBalancingScheme set to - // INTERNAL_SELF_MANAGED. + // Authentication: [Deprecated] Use clientTlsPolicy instead. Authentication string `json:"authentication,omitempty"` - // SubjectAltNames: Optional. A list of subject alternate names to - // verify the subject identity (SAN) in the certificate presented by the - // server, to authorize the SAN list as identities to run the service - // represented by this BackendService. If specified, the client will - // verify that the server certificate's subject alt name matches one of - // the specified values. Only applies to a global BackendService with - // the loadBalancingScheme set to INTERNAL_SELF_MANAGED. + // ClientTlsPolicy: Optional. A URL referring to a + // networksecurity.ClientTlsPolicy resource that describes how clients + // should authenticate with this service's backends. + // clientTlsPolicy only applies to a global BackendService with the + // loadBalancingScheme set to INTERNAL_SELF_MANAGED. + // If left blank, communications are not encrypted. + // Note: This field currently has no impact. + ClientTlsPolicy string `json:"clientTlsPolicy,omitempty"` + + // SubjectAltNames: Optional. A list of Subject Alternative Names (SANs) + // that the client verifies during a mutual TLS handshake with an + // server/endpoint for this BackendService. When the server presents its + // X.509 certificate to the client, the client inspects the + // certificate's subjectAltName field. If the field contains one of the + // specified values, the communication continues. Otherwise, it fails. + // This additional check enables the client to verify that the server is + // authorized to run the requested service. + // Note that the contents of the server certificate's subjectAltName + // field are configured by the Public Key Infrastructure which + // provisions server identities. + // Only applies to a global BackendService with loadBalancingScheme set + // to INTERNAL_SELF_MANAGED. Only applies when BackendService has an + // attached clientTlsPolicy with clientCertificate (mTLS mode). + // Note: This field currently has no impact. SubjectAltNames []string `json:"subjectAltNames,omitempty"` // ForceSendFields is a list of field names (e.g. "Authentication") to @@ -34489,9 +35867,10 @@ type SerialPortOutput struct { // compute#serialPortOutput for serial port output. Kind string `json:"kind,omitempty"` - // Next: [Output Only] The position of the next byte of content from the - // serial console output. Use this value in the next request as the - // start parameter. + // Next: [Output Only] The position of the next byte of content, + // regardless of whether the content exists, following the output + // returned in the `contents` property. Use this value in the next + // request as the start parameter. Next int64 `json:"next,omitempty,string"` // SelfLink: [Output Only] Server-defined URL for this resource. @@ -34499,9 +35878,11 @@ type SerialPortOutput struct { // Start: The starting byte position of the output that was returned. // This should match the start parameter sent with the request. If the - // serial console output exceeds the size of the buffer, older output - // will be overwritten by newer content and the start values will be - // mismatched. + // serial console output exceeds the size of the buffer (1 MB), older + // output is overwritten by newer content. The output start value will + // indicate the byte position of the output that was returned, which + // might be different than the `start` value that was specified in the + // request. Start int64 `json:"start,omitempty,string"` // ServerResponse contains the HTTP response code and headers from the @@ -34926,6 +36307,14 @@ type Snapshot struct { // created by applying resource policy on the target disk. AutoCreated bool `json:"autoCreated,omitempty"` + // ChainName: Creates the new snapshot in the snapshot chain labeled + // with the specified name. The chain name must be 1-63 characters long + // and comply with RFC1035. This is an uncommon option only for advanced + // service owners who needs to create separate snapshot chains, for + // example, for chargeback tracking. When you describe your snapshot + // resource, this field is visible only if it has a non-empty value. + ChainName string `json:"chainName,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -34941,6 +36330,12 @@ type Snapshot struct { // snapshot to a disk. DownloadBytes int64 `json:"downloadBytes,omitempty,string"` + // GuestFlush: [Input Only] Whether to attempt an application consistent + // snapshot by informing the OS to prepare for the snapshot process. + // Currently only supported on Windows instances using the Volume Shadow + // Copy Service (VSS). + GuestFlush bool `json:"guestFlush,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` @@ -35002,8 +36397,7 @@ type Snapshot struct { // key and you do not need to provide a key to use the snapshot later. SnapshotEncryptionKey *CustomerEncryptionKey `json:"snapshotEncryptionKey,omitempty"` - // SourceDisk: [Output Only] The source disk used to create this - // snapshot. + // SourceDisk: The source disk used to create this snapshot. SourceDisk string `json:"sourceDisk,omitempty"` // SourceDiskEncryptionKey: The customer-supplied encryption key of the @@ -35151,6 +36545,7 @@ type SnapshotListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -35415,14 +36810,15 @@ func (s *SourceInstanceProperties) MarshalJSON() ([]byte, error) { // set of up to five PEM-encoded certificates. The API call creates an // object (sslCertificate) that holds this data. You can use SSL keys // and certificates to secure connections to a load balancer. For more -// information, read Creating and using SSL certificates and SSL -// certificates quotas and limits. (== resource_for -// {$api_version}.sslCertificates ==) (== resource_for -// {$api_version}.regionSslCertificates ==) +// information, read Creating and using SSL certificates, SSL +// certificates quotas and limits, and Troubleshooting SSL +// certificates. (== resource_for {$api_version}.sslCertificates ==) (== +// resource_for {$api_version}.regionSslCertificates ==) type SslCertificate struct { - // Certificate: A local certificate file. The certificate must be in PEM - // format. The certificate chain must be no greater than 5 certs long. - // The chain must include at least one intermediate cert. + // Certificate: A value read into memory from a certificate file. The + // certificate file must be in PEM format. The certificate chain must be + // no greater than 5 certs long. The chain must include at least one + // intermediate cert. Certificate string `json:"certificate,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text @@ -35456,8 +36852,9 @@ type SslCertificate struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // PrivateKey: A write-only private key in PEM format. Only insert - // requests will include this field. + // PrivateKey: A value read into memory from a write-only private key + // file. The private key file must be in PEM format. For security, only + // insert requests include this field. PrivateKey string `json:"privateKey,omitempty"` // Region: [Output Only] URL of the region where the regional SSL @@ -35536,6 +36933,9 @@ type SslCertificateAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *SslCertificateAggregatedListWarning `json:"warning,omitempty"` @@ -35590,6 +36990,7 @@ type SslCertificateAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -35745,6 +37146,7 @@ type SslCertificateListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -35959,6 +37361,7 @@ type SslCertificatesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -36113,6 +37516,7 @@ type SslPoliciesListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -36354,6 +37758,7 @@ type SslPolicyWarnings struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -36614,9 +38019,11 @@ type Subnetwork struct { // IpCidrRange: The range of internal addresses that are owned by this // subnetwork. Provide this property when you create the subnetwork. For - // example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and + // example, 10.0.0.0/8 or 100.64.0.0/10. Ranges must be unique and // non-overlapping within a network. Only IPv4 is supported. This field - // can be set only at resource creation time. + // is set at resource creation time. This may be a RFC 1918 IP range, or + // a privately routed, non-RFC 1918 IP range, not belonging to Google. + // The range can be expanded after creation using expandIpCidrRange. IpCidrRange string `json:"ipCidrRange,omitempty"` // Ipv6CidrRange: [Output Only] The range of internal IPv6 addresses @@ -36628,7 +38035,8 @@ type Subnetwork struct { Kind string `json:"kind,omitempty"` // LogConfig: This field denotes the VPC flow logging options for this - // subnetwork. If logging is enabled, logs are exported to Stackdriver. + // subnetwork. If logging is enabled, logs are exported to Cloud + // Logging. LogConfig *SubnetworkLogConfig `json:"logConfig,omitempty"` // Name: The name of the resource, provided by the client when initially @@ -36664,18 +38072,8 @@ type Subnetwork struct { // "DISABLE_GOOGLE_ACCESS" // "ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE" // "ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE" - // "ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE_FOR_SERVICE_ACCOUNTS" PrivateIpv6GoogleAccess string `json:"privateIpv6GoogleAccess,omitempty"` - // PrivateIpv6GoogleAccessServiceAccounts: Deprecated in favor of enable - // PrivateIpv6GoogleAccess on instance directly. The service accounts - // can be used to selectively turn on Private IPv6 Google Access only on - // the VMs primary service account matching the value. This value only - // takes effect when PrivateIpv6GoogleAccess is - // ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE_FOR_SERVICE_ACCOUNTS or - // ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE_FOR_SERVICE_ACCOUNTS. - PrivateIpv6GoogleAccessServiceAccounts []string `json:"privateIpv6GoogleAccessServiceAccounts,omitempty"` - // Purpose: The purpose of the resource. This field can be either // PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with // purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created @@ -36781,6 +38179,9 @@ type SubnetworkAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *SubnetworkAggregatedListWarning `json:"warning,omitempty"` @@ -36835,6 +38236,7 @@ type SubnetworkAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -36990,6 +38392,7 @@ type SubnetworkListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -37164,7 +38567,8 @@ type SubnetworkSecondaryRange struct { // secondary range. Provide this property when you create the // subnetwork. Ranges must be unique and non-overlapping with all // primary and secondary IP ranges within a network. Only IPv4 is - // supported. + // supported. This may be a RFC 1918 IP range, or a privately, non-RFC + // 1918 IP range, not belonging to Google. IpCidrRange string `json:"ipCidrRange,omitempty"` // RangeName: The name associated with this subnetwork secondary range, @@ -37282,6 +38686,7 @@ type SubnetworksScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -37500,163 +38905,14 @@ func (s *Tags) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetHttpProxiesScopedList struct { - // TargetHttpProxies: A list of TargetHttpProxies contained in this - // scope. - TargetHttpProxies []*TargetHttpProxy `json:"targetHttpProxies,omitempty"` - - // Warning: Informational warning which replaces the list of backend - // services when the list is empty. - Warning *TargetHttpProxiesScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "TargetHttpProxies") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "TargetHttpProxies") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetHttpProxiesScopedList) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpProxiesScopedList - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetHttpProxiesScopedListWarning: Informational warning which -// replaces the list of backend services when the list is empty. -type TargetHttpProxiesScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DEPRECATED_TYPE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "EXPERIMENTAL_TYPE_USED" - // "EXTERNAL_API_WARNING" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "MISSING_TYPE_DEPENDENCY" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SCHEMA_VALIDATION_IGNORED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNDECLARED_PROPERTIES" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetHttpProxiesScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetHttpProxiesScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpProxiesScopedListWarning - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetHttpProxiesScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetHttpProxiesScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpProxiesScopedListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetHttpProxy: Represents a Target HTTP Proxy resource. -// -// Google Compute Engine has two Target HTTP Proxy resources: -// -// * -// [Global](/compute/docs/reference/rest/{$api_version}/targetHttpProxies -// ) * -// [Regional](/compute/docs/reference/rest/{$api_version}/regionTargetHtt -// pProxies) -// -// A target HTTP proxy is a component of GCP HTTP load balancers. -// -// * targetHttpProxies are used by external HTTP load balancers and -// Traffic Director. * regionTargetHttpProxies are used by internal HTTP -// load balancers. +// TargetGrpcProxy: Represents a Target gRPC Proxy resource. // -// Forwarding rules reference a target HTTP proxy, and the target proxy -// then references a URL map. For more information, read Using Target -// Proxies and Forwarding rule concepts. (== resource_for -// {$api_version}.targetHttpProxies ==) (== resource_for -// {$api_version}.regionTargetHttpProxies ==) -type TargetHttpProxy struct { +// A target gRPC proxy is a component of load balancers intended for +// load balancing gRPC traffic. Global forwarding rules reference a +// target gRPC proxy. The Target gRPC Proxy references a URL map which +// specifies how traffic routes to gRPC backend services. (== +// resource_for {$api_version}.targetGrpcProxies ==) +type TargetGrpcProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -37665,12 +38921,21 @@ type TargetHttpProxy struct { // property when you create the resource. Description string `json:"description,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a TargetGrpcProxy. An up-to-date + // fingerprint must be provided in order to patch/update the + // TargetGrpcProxy; otherwise, the request will fail with error 412 + // conditionNotMet. To see the latest fingerprint, make a get() request + // to retrieve the TargetGrpcProxy. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] The unique identifier for the resource type. The + // server generates this identifier. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of resource. Always compute#targetHttpProxy - // for target HTTP proxies. + // Kind: [Output Only] Type of the resource. Always + // compute#targetGrpcProxy for target grpc proxies. Kind string `json:"kind,omitempty"` // Name: Name of the resource. Provided by the client when the resource @@ -37682,21 +38947,428 @@ type TargetHttpProxy struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // ProxyBind: This field only applies when the loadBalancingScheme is - // INTERNAL_SELF_MANAGED. When set to true the Envoy binds on the IP - // address specified by the forwarding rule. Default is false. - ProxyBind bool `json:"proxyBind,omitempty"` - - // Region: [Output Only] URL of the region where the regional Target - // HTTP Proxy resides. This field is not applicable to global Target - // HTTP Proxies. - Region string `json:"region,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SelfLinkWithId: [Output Only] Server-defined URL with id for the + // resource. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + // UrlMap: URL to the UrlMap resource that defines the mapping from URL - // to the BackendService. + // to the BackendService. The protocol field in the BackendService must + // be set to GRPC. + UrlMap string `json:"urlMap,omitempty"` + + // ValidateForProxyless: If true, indicates that the BackendServices + // referenced by the urlMap may be accessed by gRPC applications without + // using a sidecar proxy. This will enable configuration checks on + // urlMap and its referenced BackendServices to not allow unsupported + // features. A gRPC application must use "xds:///" scheme in the target + // URI of the service it is connecting to. If false, indicates that the + // BackendServices referenced by the urlMap will be accessed by gRPC + // applications via a sidecar proxy. In this case, a gRPC application + // must not use "xds:///" scheme in the target URI of the service it is + // connecting to + ValidateForProxyless bool `json:"validateForProxyless,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetGrpcProxy) MarshalJSON() ([]byte, error) { + type NoMethod TargetGrpcProxy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetGrpcProxyList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetGrpcProxy resources. + Items []*TargetGrpcProxy `json:"items,omitempty"` + + // Kind: [Output Only] Type of the resource. Always + // compute#targetGrpcProxy for target grpc proxies. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetGrpcProxyListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetGrpcProxyList) MarshalJSON() ([]byte, error) { + type NoMethod TargetGrpcProxyList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetGrpcProxyListWarning: [Output Only] Informational warning +// message. +type TargetGrpcProxyListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetGrpcProxyListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetGrpcProxyListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetGrpcProxyListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetGrpcProxyListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetGrpcProxyListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetGrpcProxyListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetHttpProxiesScopedList struct { + // TargetHttpProxies: A list of TargetHttpProxies contained in this + // scope. + TargetHttpProxies []*TargetHttpProxy `json:"targetHttpProxies,omitempty"` + + // Warning: Informational warning which replaces the list of backend + // services when the list is empty. + Warning *TargetHttpProxiesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TargetHttpProxies") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TargetHttpProxies") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpProxiesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxiesScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetHttpProxiesScopedListWarning: Informational warning which +// replaces the list of backend services when the list is empty. +type TargetHttpProxiesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetHttpProxiesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpProxiesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxiesScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetHttpProxiesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpProxiesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxiesScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetHttpProxy: Represents a Target HTTP Proxy resource. +// +// Google Compute Engine has two Target HTTP Proxy resources: +// +// * +// [Global](/compute/docs/reference/rest/{$api_version}/targetHttpProxies +// ) * +// [Regional](/compute/docs/reference/rest/{$api_version}/regionTargetHtt +// pProxies) +// +// A target HTTP proxy is a component of GCP HTTP load balancers. +// +// * targetHttpProxies are used by external HTTP load balancers and +// Traffic Director. * regionTargetHttpProxies are used by internal HTTP +// load balancers. +// +// Forwarding rules reference a target HTTP proxy, and the target proxy +// then references a URL map. For more information, read Using Target +// Proxies and Forwarding rule concepts. (== resource_for +// {$api_version}.targetHttpProxies ==) (== resource_for +// {$api_version}.regionTargetHttpProxies ==) +type TargetHttpProxy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a TargetHttpProxy. An up-to-date + // fingerprint must be provided in order to patch/update the + // TargetHttpProxy; otherwise, the request will fail with error 412 + // conditionNotMet. To see the latest fingerprint, make a get() request + // to retrieve the TargetHttpProxy. + Fingerprint string `json:"fingerprint,omitempty"` + + // HttpFilters: URLs to networkservices.HttpFilter resources enabled for + // xDS clients using this configuration. For example, + // https://networkservices.googleapis.com/v1alpha1/projects/project/locations/locationhttpFilters/httpFilter Only filters that handle outbound connection and stream events may be specified. These filters work in conjunction with a default set of HTTP filters that may already be configured by Traffic Director. Traffic Director will determine the final location of these filters within xDS configuration based on the name of the HTTP filter. If Traffic Director positions multiple filters at the same location, those filters will be in the same order as specified in this list. + // httpFilters only applies for loadbalancers with loadBalancingScheme + // set to INTERNAL_SELF_MANAGED. See ForwardingRule for more details. + HttpFilters []string `json:"httpFilters,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#targetHttpProxy + // for target HTTP proxies. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // ProxyBind: This field only applies when the loadBalancingScheme is + // INTERNAL_SELF_MANAGED. When set to true the Envoy binds on the IP + // address specified by the forwarding rule. Default is false. + ProxyBind bool `json:"proxyBind,omitempty"` + + // Region: [Output Only] URL of the region where the regional Target + // HTTP Proxy resides. This field is not applicable to global Target + // HTTP Proxies. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // UrlMap: URL to the UrlMap resource that defines the mapping from URL + // to the BackendService. UrlMap string `json:"urlMap,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -37751,6 +39423,9 @@ type TargetHttpProxyAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *TargetHttpProxyAggregatedListWarning `json:"warning,omitempty"` @@ -37805,6 +39480,7 @@ type TargetHttpProxyAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -37961,6 +39637,7 @@ type TargetHttpProxyListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -38096,6 +39773,7 @@ type TargetHttpsProxiesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -38258,30 +39936,23 @@ func (s *TargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, err // {$api_version}.targetHttpsProxies ==) (== resource_for // {$api_version}.regionTargetHttpsProxies ==) type TargetHttpsProxy struct { - // Authentication: A URL referring to a networksecurity.Authentication - // resource that describes how the proxy should authenticate inbound - // traffic. If left blank, communications between services are not - // encrypted (i.e., the TLS policy is set to OPEN). When terminating - // inbound traffic to this proxy, the TerminationTls setting of - // Authentication.TransportAuthentication is applied. - // Refer to the Authentication and - // Authentication.TransportAuthentication.TerminationTls resources for - // additional details. - // authentication only applies to a global TargetHttpsProxy attached to - // globalForwardingRules with the loadBalancingScheme set to - // INTERNAL_SELF_MANAGED. + // Authentication: [Deprecated] Use serverTlsPolicy instead. Authentication string `json:"authentication,omitempty"` - // Authorization: A URL referring to a networksecurity.Authorization - // resource that describes how the proxy should authorize inbound - // traffic. If left blank, access will not be restricted by an - // authorization policy. - // Refer to the Authorization resource for additional + // Authorization: [Deprecated] Use authorizationPolicy instead. + Authorization string `json:"authorization,omitempty"` + + // AuthorizationPolicy: Optional. A URL referring to a + // networksecurity.AuthorizationPolicy resource that describes how the + // proxy should authorize inbound traffic. If left blank, access will + // not be restricted by an authorization policy. + // Refer to the AuthorizationPolicy resource for additional // details. - // authorization only applies to a global TargetHttpsProxy attached to - // globalForwardingRules with the loadBalancingScheme set to + // authorizationPolicy only applies to a global TargetHttpsProxy + // attached to globalForwardingRules with the loadBalancingScheme set to // INTERNAL_SELF_MANAGED. - Authorization string `json:"authorization,omitempty"` + // Note: This field currently has no impact. + AuthorizationPolicy string `json:"authorizationPolicy,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -38291,6 +39962,13 @@ type TargetHttpsProxy struct { // property when you create the resource. Description string `json:"description,omitempty"` + // HttpFilters: URLs to networkservices.HttpFilter resources enabled for + // xDS clients using this configuration. For example, + // https://networkservices.googleapis.com/beta/projects/project/locations/locationhttpFilters/httpFilter Only filters that handle outbound connection and stream events may be specified. These filters work in conjunction with a default set of HTTP filters that may already be configured by Traffic Director. Traffic Director will determine the final location of these filters within xDS configuration based on the name of the HTTP filter. If Traffic Director positions multiple filters at the same location, those filters will be in the same order as specified in this list. + // httpFilters only applies for loadbalancers with loadBalancingScheme + // set to INTERNAL_SELF_MANAGED. See ForwardingRule for more details. + HttpFilters []string `json:"httpFilters,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` @@ -38340,6 +40018,16 @@ type TargetHttpsProxy struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // ServerTlsPolicy: Optional. A URL referring to a + // networksecurity.ServerTlsPolicy resource that describes how the proxy + // should authenticate inbound traffic. + // serverTlsPolicy only applies to a global TargetHttpsProxy attached to + // globalForwardingRules with the loadBalancingScheme set to + // INTERNAL_SELF_MANAGED. + // If left blank, communications are not encrypted. + // Note: This field currently has no impact. + ServerTlsPolicy string `json:"serverTlsPolicy,omitempty"` + // SslCertificates: URLs to SslCertificate resources that are used to // authenticate connections between users and the load balancer. At // least one SSL certificate must be specified. Currently, you may @@ -38412,6 +40100,9 @@ type TargetHttpsProxyAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *TargetHttpsProxyAggregatedListWarning `json:"warning,omitempty"` @@ -38466,6 +40157,7 @@ type TargetHttpsProxyAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -38622,6 +40314,7 @@ type TargetHttpsProxyListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -38750,6 +40443,11 @@ type TargetInstance struct { // "NO_NAT" NatPolicy string `json:"natPolicy,omitempty"` + // Network: The URL of the network this target instance uses to forward + // traffic. If not specified, the traffic will be forwarded to the + // network that the default network interface belongs to. + Network string `json:"network,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -38808,6 +40506,9 @@ type TargetInstanceAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *TargetInstanceAggregatedListWarning `json:"warning,omitempty"` @@ -38862,6 +40563,7 @@ type TargetInstanceAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -39017,6 +40719,7 @@ type TargetInstanceListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -39151,6 +40854,7 @@ type TargetInstancesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -39283,8 +40987,8 @@ type TargetPool struct { // HealthChecks: The URL of the HttpHealthCheck resource. A member // instance in this pool is considered healthy if and only if the health // checks pass. An empty list means all member instances will be - // considered healthy at all times. Only HttpHealthChecks are supported. - // Only one health check may be specified. + // considered healthy at all times. Only legacy HttpHealthChecks are + // supported. Only one health check may be specified. HealthChecks []string `json:"healthChecks,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -39402,6 +41106,9 @@ type TargetPoolAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *TargetPoolAggregatedListWarning `json:"warning,omitempty"` @@ -39456,6 +41163,7 @@ type TargetPoolAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -39646,6 +41354,7 @@ type TargetPoolListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -39898,6 +41607,7 @@ type TargetPoolsScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -40256,6 +41966,7 @@ type TargetSslProxyListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -40547,6 +42258,7 @@ type TargetTcpProxyListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -40756,6 +42468,9 @@ type TargetVpnGatewayAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *TargetVpnGatewayAggregatedListWarning `json:"warning,omitempty"` @@ -40810,6 +42525,7 @@ type TargetVpnGatewayAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -40966,6 +42682,7 @@ type TargetVpnGatewayListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -41101,6 +42818,7 @@ type TargetVpnGatewaysScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -41180,12 +42898,18 @@ func (s *TargetVpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { } type TestFailure struct { + // ActualService: BackendService or BackendBucket returned by load + // balancer. ActualService string `json:"actualService,omitempty"` + // ExpectedService: Expected BackendService or BackendBucket resource + // the given URL should be mapped to. ExpectedService string `json:"expectedService,omitempty"` + // Host: Host portion of the URL. Host string `json:"host,omitempty"` + // Path: Path portion including query parameters in the URL. Path string `json:"path,omitempty"` // ForceSendFields is a list of field names (e.g. "ActualService") to @@ -41287,6 +43011,14 @@ func (s *TestPermissionsResponse) MarshalJSON() ([]byte, error) { // Director. * regionUrlMaps are used by internal HTTP(S) load // balancers. // +// For a list of supported URL map features by load balancer type, see +// the Load balancing features: Routing and traffic management +// table. +// +// For a list of supported URL map features for Traffic Director, see +// the Traffic Director features: Routing and traffic management +// table. +// // This resource defines mappings from host names and URL paths to // either a backend service or a backend bucket. // @@ -41307,7 +43039,12 @@ type UrlMap struct { // any weightedBackendServices, defaultService must not be set. // Conversely if defaultService is set, defaultRouteAction cannot // contain any weightedBackendServices. - // Only one of defaultRouteAction or defaultUrlRedirect must be set. + // Only one of defaultRouteAction or defaultUrlRedirect must be + // set. + // UrlMaps for external HTTP(S) load balancers support only the + // urlRewrite action within defaultRouteAction. + // defaultRouteAction has no effect when the URL map is bound to target + // gRPC proxy that has validateForProxyless field set to true. DefaultRouteAction *HttpRouteAction `json:"defaultRouteAction,omitempty"` // DefaultService: The full or partial URL of the defaultService @@ -41320,12 +43057,15 @@ type UrlMap struct { // service must not be specified. // Only one of defaultService, defaultUrlRedirect or // defaultRouteAction.weightedBackendService must be set. + // defaultService has no effect when the URL map is bound to target gRPC + // proxy that has validateForProxyless field set to true. DefaultService string `json:"defaultService,omitempty"` // DefaultUrlRedirect: When none of the specified hostRules match, the // request is redirected to a URL specified by defaultUrlRedirect. // If defaultUrlRedirect is specified, defaultService or // defaultRouteAction must not be set. + // Not supported when the URL map is bound to target gRPC proxy. DefaultUrlRedirect *HttpRedirectAction `json:"defaultUrlRedirect,omitempty"` // Description: An optional description of this resource. Provide this @@ -41346,6 +43086,10 @@ type UrlMap struct { // need to take effect for the selected backendService. // The headerAction specified here take effect after headerAction // specified under pathMatcher. + // Note that headerAction is not supported for Loadbalancers that have + // their loadBalancingScheme set to EXTERNAL. + // Not supported when the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true. HeaderAction *HttpHeaderAction `json:"headerAction,omitempty"` // HostRules: The list of HostRules to use against the URL. @@ -41383,6 +43127,8 @@ type UrlMap struct { // Tests: The list of expected URL mapping tests. Request to update this // UrlMap will succeed only if all of the test cases pass. You can // specify a maximum of 100 tests per UrlMap. + // Not supported when the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true. Tests []*UrlMapTest `json:"tests,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -41489,6 +43235,7 @@ type UrlMapListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -41598,14 +43345,16 @@ type UrlMapTest struct { // Description: Description of this test case. Description string `json:"description,omitempty"` - // Host: Host portion of the URL. + // Host: Host portion of the URL. If headers contains a host header, + // then host must also match the header value. Host string `json:"host,omitempty"` // Path: Path portion of the URL. Path string `json:"path,omitempty"` - // Service: Expected BackendService resource the given URL should be - // mapped to. + // Service: Expected BackendService or BackendBucket resource the given + // URL should be mapped to. + // service cannot be set if expectedRedirectResponseCode is set. Service string `json:"service,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -41692,6 +43441,9 @@ type UrlMapsAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *UrlMapsAggregatedListWarning `json:"warning,omitempty"` @@ -41746,6 +43498,7 @@ type UrlMapsAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -41879,6 +43632,7 @@ type UrlMapsScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -42205,6 +43959,7 @@ type UsableSubnetworksAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -42495,6 +44250,7 @@ type VmEndpointNatMappingsListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -42689,6 +44445,9 @@ type VpnGatewayAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *VpnGatewayAggregatedListWarning `json:"warning,omitempty"` @@ -42743,6 +44502,7 @@ type VpnGatewayAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -42898,6 +44658,7 @@ type VpnGatewayListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -43250,6 +45011,7 @@ type VpnGatewaysScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -43330,7 +45092,7 @@ func (s *VpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { // VpnTunnel: Represents a Cloud VPN Tunnel resource. // -// For more information about VPN, read the the Cloud VPN Overview. (== +// For more information about VPN, read the the Cloud VPN Overview. (== // resource_for {$api_version}.vpnTunnels ==) type VpnTunnel struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text @@ -43548,6 +45310,9 @@ type VpnTunnelAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *VpnTunnelAggregatedListWarning `json:"warning,omitempty"` @@ -43602,6 +45367,7 @@ type VpnTunnelAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -43757,6 +45523,7 @@ type VpnTunnelListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -43890,6 +45657,7 @@ type VpnTunnelsScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -44054,6 +45822,10 @@ type WeightedBackendService struct { // need to take effect for the selected backendService. // headerAction specified here take effect before headerAction in the // enclosing HttpRouteRule, PathMatcher and UrlMap. + // Note that headerAction is not supported for Loadbalancers that have + // their loadBalancingScheme set to EXTERNAL. + // Not supported when the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true. HeaderAction *HttpHeaderAction `json:"headerAction,omitempty"` // Weight: Specifies the fraction of traffic sent to backendService, @@ -44166,6 +45938,7 @@ type XpnHostListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -44433,6 +46206,7 @@ type ZoneListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -44681,6 +46455,15 @@ func (c *AcceleratorTypesAggregatedListCall) PageToken(pageToken string) *Accele return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *AcceleratorTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -44718,7 +46501,7 @@ func (c *AcceleratorTypesAggregatedListCall) Header() http.Header { func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -44821,6 +46604,11 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/acceleratorTypes", @@ -44916,7 +46704,7 @@ func (c *AcceleratorTypesGetCall) Header() http.Header { func (c *AcceleratorTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -45036,7 +46824,7 @@ type AcceleratorTypesListCall struct { header_ http.Header } -// List: Retrieves a list of accelerator types available to the +// List: Retrieves a list of accelerator types that are available to the // specified project. func (r *AcceleratorTypesService) List(project string, zone string) *AcceleratorTypesListCall { c := &AcceleratorTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -45110,6 +46898,15 @@ func (c *AcceleratorTypesListCall) PageToken(pageToken string) *AcceleratorTypes return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *AcceleratorTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AcceleratorTypesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -45147,7 +46944,7 @@ func (c *AcceleratorTypesListCall) Header() http.Header { func (c *AcceleratorTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -45210,7 +47007,7 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato } return ret, nil // { - // "description": "Retrieves a list of accelerator types available to the specified project.", + // "description": "Retrieves a list of accelerator types that are available to the specified project.", // "httpMethod": "GET", // "id": "compute.acceleratorTypes.list", // "parameterOrder": [ @@ -45248,6 +47045,11 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -45387,6 +47189,15 @@ func (c *AddressesAggregatedListCall) PageToken(pageToken string) *AddressesAggr return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *AddressesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AddressesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -45424,7 +47235,7 @@ func (c *AddressesAggregatedListCall) Header() http.Header { func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -45527,6 +47338,11 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/addresses", @@ -45631,7 +47447,7 @@ func (c *AddressesDeleteCall) Header() http.Header { func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -45800,7 +47616,7 @@ func (c *AddressesGetCall) Header() http.Header { func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -45977,7 +47793,7 @@ func (c *AddressesInsertCall) Header() http.Header { func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -46172,6 +47988,15 @@ func (c *AddressesListCall) PageToken(pageToken string) *AddressesListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *AddressesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AddressesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -46209,7 +48034,7 @@ func (c *AddressesListCall) Header() http.Header { func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -46316,6 +48141,11 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/addresses", @@ -46422,7 +48252,7 @@ func (c *AddressesSetLabelsCall) Header() http.Header { func (c *AddressesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -46590,7 +48420,7 @@ func (c *AddressesTestIamPermissionsCall) Header() http.Header { func (c *AddressesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -46799,6 +48629,15 @@ func (c *AutoscalersAggregatedListCall) PageToken(pageToken string) *Autoscalers return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *AutoscalersAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AutoscalersAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -46836,7 +48675,7 @@ func (c *AutoscalersAggregatedListCall) Header() http.Header { func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -46939,6 +48778,11 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/autoscalers", @@ -47042,7 +48886,7 @@ func (c *AutoscalersDeleteCall) Header() http.Header { func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -47211,7 +49055,7 @@ func (c *AutoscalersGetCall) Header() http.Header { func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -47387,7 +49231,7 @@ func (c *AutoscalersInsertCall) Header() http.Header { func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -47581,6 +49425,15 @@ func (c *AutoscalersListCall) PageToken(pageToken string) *AutoscalersListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *AutoscalersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AutoscalersListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -47618,7 +49471,7 @@ func (c *AutoscalersListCall) Header() http.Header { func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -47719,6 +49572,11 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "Name of the zone for this request.", // "location": "path", @@ -47837,7 +49695,7 @@ func (c *AutoscalersPatchCall) Header() http.Header { func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -48002,7 +49860,7 @@ func (c *AutoscalersTestIamPermissionsCall) Header() http.Header { func (c *AutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -48190,7 +50048,7 @@ func (c *AutoscalersUpdateCall) Header() http.Header { func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -48372,7 +50230,7 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Header() http.Header { func (c *BackendBucketsAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -48544,7 +50402,7 @@ func (c *BackendBucketsDeleteCall) Header() http.Header { func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -48711,7 +50569,7 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Header() http.Header { func (c *BackendBucketsDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -48875,7 +50733,7 @@ func (c *BackendBucketsGetCall) Header() http.Header { func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -49040,7 +50898,7 @@ func (c *BackendBucketsInsertCall) Header() http.Header { func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -49223,6 +51081,15 @@ func (c *BackendBucketsListCall) PageToken(pageToken string) *BackendBucketsList return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *BackendBucketsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *BackendBucketsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -49260,7 +51127,7 @@ func (c *BackendBucketsListCall) Header() http.Header { func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -49358,6 +51225,11 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/backendBuckets", @@ -49463,7 +51335,7 @@ func (c *BackendBucketsPatchCall) Header() http.Header { func (c *BackendBucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -49639,7 +51511,7 @@ func (c *BackendBucketsUpdateCall) Header() http.Header { func (c *BackendBucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -49815,7 +51687,7 @@ func (c *BackendServicesAddSignedUrlKeyCall) Header() http.Header { func (c *BackendServicesAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -50019,6 +51891,15 @@ func (c *BackendServicesAggregatedListCall) PageToken(pageToken string) *Backend return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *BackendServicesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *BackendServicesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -50056,7 +51937,7 @@ func (c *BackendServicesAggregatedListCall) Header() http.Header { func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -50159,6 +52040,11 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/backendServices", @@ -50261,7 +52147,7 @@ func (c *BackendServicesDeleteCall) Header() http.Header { func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -50428,7 +52314,7 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) Header() http.Header { func (c *BackendServicesDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -50593,7 +52479,7 @@ func (c *BackendServicesGetCall) Header() http.Header { func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -50746,7 +52632,7 @@ func (c *BackendServicesGetHealthCall) Header() http.Header { func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -50861,9 +52747,8 @@ type BackendServicesInsertCall struct { } // Insert: Creates a BackendService resource in the specified project -// using the data included in the request. There are several -// restrictions and guidelines to keep in mind when creating a backend -// service. Read Understanding backend services for more information. +// using the data included in the request. For more information, see +// Backend services overview. // For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/insert func (r *BackendServicesService) Insert(project string, backendservice *BackendService) *BackendServicesInsertCall { c := &BackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -50918,7 +52803,7 @@ func (c *BackendServicesInsertCall) Header() http.Header { func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -50982,7 +52867,7 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Understanding backend services for more information.", + // "description": "Creates a BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview.", // "httpMethod": "POST", // "id": "compute.backendServices.insert", // "parameterOrder": [ @@ -51102,6 +52987,15 @@ func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesLi return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *BackendServicesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *BackendServicesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -51139,7 +53033,7 @@ func (c *BackendServicesListCall) Header() http.Header { func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -51237,6 +53131,11 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/backendServices", @@ -51286,11 +53185,9 @@ type BackendServicesPatchCall struct { } // Patch: Patches the specified BackendService resource with the data -// included in the request. There are several Understanding backend -// services to keep in mind when updating a backend service. Read -// Understanding backend services for more information. This method -// supports PATCH semantics and uses the JSON merge patch format and -// processing rules. +// included in the request. For more information, see Backend services +// overview. This method supports PATCH semantics and uses the JSON +// merge patch format and processing rules. // For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/patch func (r *BackendServicesService) Patch(project string, backendService string, backendservice *BackendService) *BackendServicesPatchCall { c := &BackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -51346,7 +53243,7 @@ func (c *BackendServicesPatchCall) Header() http.Header { func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -51411,7 +53308,7 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Patches the specified BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Patches the specified BackendService resource with the data included in the request. For more information, see Backend services overview. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", // "httpMethod": "PATCH", // "id": "compute.backendServices.patch", // "parameterOrder": [ @@ -51522,7 +53419,7 @@ func (c *BackendServicesSetSecurityPolicyCall) Header() http.Header { func (c *BackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -51678,7 +53575,7 @@ func (c *BackendServicesTestIamPermissionsCall) Header() http.Header { func (c *BackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -51795,9 +53692,8 @@ type BackendServicesUpdateCall struct { } // Update: Updates the specified BackendService resource with the data -// included in the request. There are several Understanding backend -// services to keep in mind when updating a backend service. Read -// Understanding backend services for more information. +// included in the request. For more information, see Backend services +// overview. // For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/update func (r *BackendServicesService) Update(project string, backendService string, backendservice *BackendService) *BackendServicesUpdateCall { c := &BackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -51853,7 +53749,7 @@ func (c *BackendServicesUpdateCall) Header() http.Header { func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -51918,7 +53814,7 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Updates the specified BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information.", + // "description": "Updates the specified BackendService resource with the data included in the request. For more information, see Backend services overview.", // "httpMethod": "PUT", // "id": "compute.backendServices.update", // "parameterOrder": [ @@ -52058,6 +53954,15 @@ func (c *DiskTypesAggregatedListCall) PageToken(pageToken string) *DiskTypesAggr return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *DiskTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *DiskTypesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -52095,7 +54000,7 @@ func (c *DiskTypesAggregatedListCall) Header() http.Header { func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -52198,6 +54103,11 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/diskTypes", @@ -52295,7 +54205,7 @@ func (c *DiskTypesGetCall) Header() http.Header { func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -52490,6 +54400,15 @@ func (c *DiskTypesListCall) PageToken(pageToken string) *DiskTypesListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *DiskTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *DiskTypesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -52527,7 +54446,7 @@ func (c *DiskTypesListCall) Header() http.Header { func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -52628,6 +54547,11 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -52741,7 +54665,7 @@ func (c *DisksAddResourcePoliciesCall) Header() http.Header { func (c *DisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -52955,6 +54879,15 @@ func (c *DisksAggregatedListCall) PageToken(pageToken string) *DisksAggregatedLi return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *DisksAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *DisksAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -52992,7 +54925,7 @@ func (c *DisksAggregatedListCall) Header() http.Header { func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -53095,6 +55028,11 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/disks", @@ -53156,7 +55094,7 @@ func (r *DisksService) CreateSnapshot(project string, zone string, disk string, } // GuestFlush sets the optional parameter "guestFlush": [Input Only] -// Specifies to create an application consistent snapshot by informing +// Whether to attempt an application consistent snapshot by informing // the OS to prepare for the snapshot process. Currently only supported // on Windows instances using the Volume Shadow Copy Service (VSS). func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapshotCall { @@ -53210,7 +55148,7 @@ func (c *DisksCreateSnapshotCall) Header() http.Header { func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -53293,7 +55231,7 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // }, // "guestFlush": { - // "description": "[Input Only] Specifies to create an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", + // "description": "[Input Only] Whether to attempt an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", // "location": "query", // "type": "boolean" // }, @@ -53403,7 +55341,7 @@ func (c *DisksDeleteCall) Header() http.Header { func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -53572,7 +55510,7 @@ func (c *DisksGetCall) Header() http.Header { func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -53747,7 +55685,7 @@ func (c *DisksGetIamPolicyCall) Header() http.Header { func (c *DisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -53874,10 +55812,11 @@ type DisksInsertCall struct { } // Insert: Creates a persistent disk in the specified project using the -// data in the request. You can create a disk with a sourceImage, a -// sourceSnapshot, or create an empty 500 GB data disk by omitting all -// properties. You can also create a disk that is larger than the -// default size by specifying the sizeGb property. +// data in the request. You can create a disk from a source +// (sourceImage, sourceSnapshot, or sourceDisk) or create an empty 500 +// GB data disk by omitting all properties. You can also create a disk +// that is larger than the default size by specifying the sizeGb +// property. // For details, see https://cloud.google.com/compute/docs/reference/latest/disks/insert func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksInsertCall { c := &DisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -53940,7 +55879,7 @@ func (c *DisksInsertCall) Header() http.Header { func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -54005,7 +55944,7 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", + // "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk from a source (sourceImage, sourceSnapshot, or sourceDisk) or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", // "httpMethod": "POST", // "id": "compute.disks.insert", // "parameterOrder": [ @@ -54140,6 +56079,15 @@ func (c *DisksListCall) PageToken(pageToken string) *DisksListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *DisksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *DisksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -54177,7 +56125,7 @@ func (c *DisksListCall) Header() http.Header { func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -54278,6 +56226,11 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -54389,7 +56342,7 @@ func (c *DisksRemoveResourcePoliciesCall) Header() http.Header { func (c *DisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -54576,7 +56529,7 @@ func (c *DisksResizeCall) Header() http.Header { func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -54744,7 +56697,7 @@ func (c *DisksSetIamPolicyCall) Header() http.Header { func (c *DisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -54926,7 +56879,7 @@ func (c *DisksSetLabelsCall) Header() http.Header { func (c *DisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -55094,7 +57047,7 @@ func (c *DisksTestIamPermissionsCall) Header() http.Header { func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -55272,7 +57225,7 @@ func (c *ExternalVpnGatewaysDeleteCall) Header() http.Header { func (c *ExternalVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -55430,7 +57383,7 @@ func (c *ExternalVpnGatewaysGetCall) Header() http.Header { func (c *ExternalVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -55595,7 +57548,7 @@ func (c *ExternalVpnGatewaysInsertCall) Header() http.Header { func (c *ExternalVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -55778,6 +57731,15 @@ func (c *ExternalVpnGatewaysListCall) PageToken(pageToken string) *ExternalVpnGa return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ExternalVpnGatewaysListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ExternalVpnGatewaysListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -55815,7 +57777,7 @@ func (c *ExternalVpnGatewaysListCall) Header() http.Header { func (c *ExternalVpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -55913,6 +57875,11 @@ func (c *ExternalVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*Externa // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/externalVpnGateways", @@ -55998,7 +57965,7 @@ func (c *ExternalVpnGatewaysSetLabelsCall) Header() http.Header { func (c *ExternalVpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -56150,7 +58117,7 @@ func (c *ExternalVpnGatewaysTestIamPermissionsCall) Header() http.Header { func (c *ExternalVpnGatewaysTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -56320,7 +58287,7 @@ func (c *FirewallsDeleteCall) Header() http.Header { func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -56478,7 +58445,7 @@ func (c *FirewallsGetCall) Header() http.Header { func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -56644,7 +58611,7 @@ func (c *FirewallsInsertCall) Header() http.Header { func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -56828,6 +58795,15 @@ func (c *FirewallsListCall) PageToken(pageToken string) *FirewallsListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *FirewallsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *FirewallsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -56865,7 +58841,7 @@ func (c *FirewallsListCall) Header() http.Header { func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -56963,6 +58939,11 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/firewalls", @@ -57069,7 +59050,7 @@ func (c *FirewallsPatchCall) Header() http.Header { func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -57226,7 +59207,7 @@ func (c *FirewallsTestIamPermissionsCall) Header() http.Header { func (c *FirewallsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -57401,7 +59382,7 @@ func (c *FirewallsUpdateCall) Header() http.Header { func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -57606,6 +59587,15 @@ func (c *ForwardingRulesAggregatedListCall) PageToken(pageToken string) *Forward return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ForwardingRulesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ForwardingRulesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -57643,7 +59633,7 @@ func (c *ForwardingRulesAggregatedListCall) Header() http.Header { func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -57746,6 +59736,11 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/forwardingRules", @@ -57850,7 +59845,7 @@ func (c *ForwardingRulesDeleteCall) Header() http.Header { func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -58019,7 +60014,7 @@ func (c *ForwardingRulesGetCall) Header() http.Header { func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -58196,7 +60191,7 @@ func (c *ForwardingRulesInsertCall) Header() http.Header { func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -58391,6 +60386,15 @@ func (c *ForwardingRulesListCall) PageToken(pageToken string) *ForwardingRulesLi return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ForwardingRulesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ForwardingRulesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -58428,7 +60432,7 @@ func (c *ForwardingRulesListCall) Header() http.Header { func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -58535,6 +60539,11 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/forwardingRules", @@ -58643,7 +60652,7 @@ func (c *ForwardingRulesPatchCall) Header() http.Header { func (c *ForwardingRulesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -58830,7 +60839,7 @@ func (c *ForwardingRulesSetLabelsCall) Header() http.Header { func (c *ForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -59018,7 +61027,7 @@ func (c *ForwardingRulesSetTargetCall) Header() http.Header { func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -59186,7 +61195,7 @@ func (c *ForwardingRulesTestIamPermissionsCall) Header() http.Header { func (c *ForwardingRulesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -59365,7 +61374,7 @@ func (c *GlobalAddressesDeleteCall) Header() http.Header { func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -59524,7 +61533,7 @@ func (c *GlobalAddressesGetCall) Header() http.Header { func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -59690,7 +61699,7 @@ func (c *GlobalAddressesInsertCall) Header() http.Header { func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -59873,6 +61882,15 @@ func (c *GlobalAddressesListCall) PageToken(pageToken string) *GlobalAddressesLi return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *GlobalAddressesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalAddressesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -59910,7 +61928,7 @@ func (c *GlobalAddressesListCall) Header() http.Header { func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -60008,6 +62026,11 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/addresses", @@ -60093,7 +62116,7 @@ func (c *GlobalAddressesSetLabelsCall) Header() http.Header { func (c *GlobalAddressesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -60245,7 +62268,7 @@ func (c *GlobalAddressesTestIamPermissionsCall) Header() http.Header { func (c *GlobalAddressesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -60415,7 +62438,7 @@ func (c *GlobalForwardingRulesDeleteCall) Header() http.Header { func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -60574,7 +62597,7 @@ func (c *GlobalForwardingRulesGetCall) Header() http.Header { func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -60740,7 +62763,7 @@ func (c *GlobalForwardingRulesInsertCall) Header() http.Header { func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -60924,6 +62947,15 @@ func (c *GlobalForwardingRulesListCall) PageToken(pageToken string) *GlobalForwa return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *GlobalForwardingRulesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalForwardingRulesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -60961,7 +62993,7 @@ func (c *GlobalForwardingRulesListCall) Header() http.Header { func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -61059,6 +63091,11 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/forwardingRules", @@ -61165,7 +63202,7 @@ func (c *GlobalForwardingRulesPatchCall) Header() http.Header { func (c *GlobalForwardingRulesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -61322,7 +63359,7 @@ func (c *GlobalForwardingRulesSetLabelsCall) Header() http.Header { func (c *GlobalForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -61494,7 +63531,7 @@ func (c *GlobalForwardingRulesSetTargetCall) Header() http.Header { func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -61651,7 +63688,7 @@ func (c *GlobalForwardingRulesTestIamPermissionsCall) Header() http.Header { func (c *GlobalForwardingRulesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -61823,7 +63860,7 @@ func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.He func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -61996,7 +64033,7 @@ func (c *GlobalNetworkEndpointGroupsDeleteCall) Header() http.Header { func (c *GlobalNetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -62163,7 +64200,7 @@ func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.He func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -62328,7 +64365,7 @@ func (c *GlobalNetworkEndpointGroupsGetCall) Header() http.Header { func (c *GlobalNetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -62492,7 +64529,7 @@ func (c *GlobalNetworkEndpointGroupsInsertCall) Header() http.Header { func (c *GlobalNetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -62675,6 +64712,15 @@ func (c *GlobalNetworkEndpointGroupsListCall) PageToken(pageToken string) *Globa return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *GlobalNetworkEndpointGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalNetworkEndpointGroupsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -62712,7 +64758,7 @@ func (c *GlobalNetworkEndpointGroupsListCall) Header() http.Header { func (c *GlobalNetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -62810,6 +64856,11 @@ func (c *GlobalNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) ( // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/networkEndpointGroups", @@ -62931,6 +64982,15 @@ func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToke return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -62958,7 +65018,7 @@ func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Head func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -63063,6 +65123,11 @@ func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googlea // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", @@ -63196,6 +65261,15 @@ func (c *GlobalOperationsAggregatedListCall) PageToken(pageToken string) *Global return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *GlobalOperationsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -63233,7 +65307,7 @@ func (c *GlobalOperationsAggregatedListCall) Header() http.Header { func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -63336,6 +65410,11 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/operations", @@ -63419,7 +65498,7 @@ func (c *GlobalOperationsDeleteCall) Header() http.Header { func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -63545,7 +65624,7 @@ func (c *GlobalOperationsGetCall) Header() http.Header { func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -63729,6 +65808,15 @@ func (c *GlobalOperationsListCall) PageToken(pageToken string) *GlobalOperations return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *GlobalOperationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalOperationsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -63766,7 +65854,7 @@ func (c *GlobalOperationsListCall) Header() http.Header { func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -63864,6 +65952,11 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/operations", @@ -63959,7 +66052,7 @@ func (c *GlobalOperationsWaitCall) Header() http.Header { func (c *GlobalOperationsWaitCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -64106,7 +66199,7 @@ func (c *GlobalOrganizationOperationsDeleteCall) Header() http.Header { func (c *GlobalOrganizationOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -64232,7 +66325,7 @@ func (c *GlobalOrganizationOperationsGetCall) Header() http.Header { func (c *GlobalOrganizationOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -64416,6 +66509,15 @@ func (c *GlobalOrganizationOperationsListCall) ParentId(parentId string) *Global return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *GlobalOrganizationOperationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalOrganizationOperationsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -64453,7 +66555,7 @@ func (c *GlobalOrganizationOperationsListCall) Header() http.Header { func (c *GlobalOrganizationOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -64543,6 +66645,11 @@ func (c *GlobalOrganizationOperationsListCall) Do(opts ...googleapi.CallOption) // "description": "Parent ID for this request.", // "location": "query", // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "locations/global/operations", @@ -64676,6 +66783,15 @@ func (c *HealthChecksAggregatedListCall) PageToken(pageToken string) *HealthChec return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *HealthChecksAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *HealthChecksAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -64713,7 +66829,7 @@ func (c *HealthChecksAggregatedListCall) Header() http.Header { func (c *HealthChecksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -64816,6 +66932,11 @@ func (c *HealthChecksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Heal // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/healthChecks", @@ -64917,7 +67038,7 @@ func (c *HealthChecksDeleteCall) Header() http.Header { func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -65075,7 +67196,7 @@ func (c *HealthChecksGetCall) Header() http.Header { func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -65240,7 +67361,7 @@ func (c *HealthChecksInsertCall) Header() http.Header { func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -65423,6 +67544,15 @@ func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *HealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *HealthChecksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -65460,7 +67590,7 @@ func (c *HealthChecksListCall) Header() http.Header { func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -65558,6 +67688,11 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/healthChecks", @@ -65663,7 +67798,7 @@ func (c *HealthChecksPatchCall) Header() http.Header { func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -65820,7 +67955,7 @@ func (c *HealthChecksTestIamPermissionsCall) Header() http.Header { func (c *HealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -65992,7 +68127,7 @@ func (c *HealthChecksUpdateCall) Header() http.Header { func (c *HealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -66166,7 +68301,7 @@ func (c *HttpHealthChecksDeleteCall) Header() http.Header { func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -66325,7 +68460,7 @@ func (c *HttpHealthChecksGetCall) Header() http.Header { func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -66491,7 +68626,7 @@ func (c *HttpHealthChecksInsertCall) Header() http.Header { func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -66675,6 +68810,15 @@ func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecks return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *HttpHealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *HttpHealthChecksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -66712,7 +68856,7 @@ func (c *HttpHealthChecksListCall) Header() http.Header { func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -66810,6 +68954,11 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/httpHealthChecks", @@ -66916,7 +69065,7 @@ func (c *HttpHealthChecksPatchCall) Header() http.Header { func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -67073,7 +69222,7 @@ func (c *HttpHealthChecksTestIamPermissionsCall) Header() http.Header { func (c *HttpHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -67246,7 +69395,7 @@ func (c *HttpHealthChecksUpdateCall) Header() http.Header { func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -67419,7 +69568,7 @@ func (c *HttpsHealthChecksDeleteCall) Header() http.Header { func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -67577,7 +69726,7 @@ func (c *HttpsHealthChecksGetCall) Header() http.Header { func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -67742,7 +69891,7 @@ func (c *HttpsHealthChecksInsertCall) Header() http.Header { func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -67925,6 +70074,15 @@ func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChec return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *HttpsHealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *HttpsHealthChecksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -67962,7 +70120,7 @@ func (c *HttpsHealthChecksListCall) Header() http.Header { func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -68060,6 +70218,11 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/httpsHealthChecks", @@ -68165,7 +70328,7 @@ func (c *HttpsHealthChecksPatchCall) Header() http.Header { func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -68322,7 +70485,7 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) Header() http.Header { func (c *HttpsHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -68494,7 +70657,7 @@ func (c *HttpsHealthChecksUpdateCall) Header() http.Header { func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -68668,7 +70831,7 @@ func (c *ImagesDeleteCall) Header() http.Header { func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -68839,7 +71002,7 @@ func (c *ImagesDeprecateCall) Header() http.Header { func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69006,7 +71169,7 @@ func (c *ImagesGetCall) Header() http.Header { func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69163,7 +71326,7 @@ func (c *ImagesGetFromFamilyCall) Header() http.Header { func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69327,7 +71490,7 @@ func (c *ImagesGetIamPolicyCall) Header() http.Header { func (c *ImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69506,7 +71669,7 @@ func (c *ImagesInsertCall) Header() http.Header { func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69703,6 +71866,15 @@ func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ImagesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ImagesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -69740,7 +71912,7 @@ func (c *ImagesListCall) Header() http.Header { func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69838,6 +72010,11 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/images", @@ -69874,6 +72051,183 @@ func (c *ImagesListCall) Pages(ctx context.Context, f func(*ImageList) error) er } } +// method id "compute.images.patch": + +type ImagesPatchCall struct { + s *Service + project string + image string + image2 *Image + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified image with the data included in the +// request. Only the following fields can be modified: family, +// description, deprecation status. +func (r *ImagesService) Patch(project string, image string, image2 *Image) *ImagesPatchCall { + c := &ImagesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.image = image + c.image2 = image2 + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ImagesPatchCall) RequestId(requestId string) *ImagesPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ImagesPatchCall) Fields(s ...googleapi.Field) *ImagesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ImagesPatchCall) Context(ctx context.Context) *ImagesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ImagesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ImagesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.image2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/images/{image}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "image": c.image, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.images.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ImagesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified image with the data included in the request. Only the following fields can be modified: family, description, deprecation status.", + // "httpMethod": "PATCH", + // "id": "compute.images.patch", + // "parameterOrder": [ + // "project", + // "image" + // ], + // "parameters": { + // "image": { + // "description": "Name of the image resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/images/{image}", + // "request": { + // "$ref": "Image" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.images.setIamPolicy": type ImagesSetIamPolicyCall struct { @@ -69923,7 +72277,7 @@ func (c *ImagesSetIamPolicyCall) Header() http.Header { func (c *ImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70075,7 +72429,7 @@ func (c *ImagesSetLabelsCall) Header() http.Header { func (c *ImagesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70227,7 +72581,7 @@ func (c *ImagesTestIamPermissionsCall) Header() http.Header { func (c *ImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70416,7 +72770,7 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Header() http.Header { func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70628,6 +72982,15 @@ func (c *InstanceGroupManagersAggregatedListCall) PageToken(pageToken string) *I return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstanceGroupManagersAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -70665,7 +73028,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Header() http.Header { func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70769,6 +73132,11 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/instanceGroupManagers", @@ -70857,7 +73225,7 @@ func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Header() http.Header func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71040,7 +73408,7 @@ func (c *InstanceGroupManagersCreateInstancesCall) Header() http.Header { func (c *InstanceGroupManagersCreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71225,7 +73593,7 @@ func (c *InstanceGroupManagersDeleteCall) Header() http.Header { func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71416,7 +73784,7 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Header() http.Header { func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71582,7 +73950,7 @@ func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Header() http.Header func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71753,7 +74121,7 @@ func (c *InstanceGroupManagersGetCall) Header() http.Header { func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71935,7 +74303,7 @@ func (c *InstanceGroupManagersInsertCall) Header() http.Header { func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72128,6 +74496,15 @@ func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGro return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstanceGroupManagersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -72165,7 +74542,7 @@ func (c *InstanceGroupManagersListCall) Header() http.Header { func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72266,6 +74643,11 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone where the managed instance group is located.", // "location": "path", @@ -72321,7 +74703,8 @@ type InstanceGroupManagersListErrorsCall struct { } // ListErrors: Lists all errors thrown by actions on instances for a -// given managed instance group. +// given managed instance group. The filter and orderBy query parameters +// are not supported. func (r *InstanceGroupManagersService) ListErrors(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListErrorsCall { c := &InstanceGroupManagersListErrorsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -72395,6 +74778,15 @@ func (c *InstanceGroupManagersListErrorsCall) PageToken(pageToken string) *Insta return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstanceGroupManagersListErrorsCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersListErrorsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -72432,7 +74824,7 @@ func (c *InstanceGroupManagersListErrorsCall) Header() http.Header { func (c *InstanceGroupManagersListErrorsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72497,7 +74889,7 @@ func (c *InstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Lists all errors thrown by actions on instances for a given managed instance group.", + // "description": "Lists all errors thrown by actions on instances for a given managed instance group. The filter and orderBy query parameters are not supported.", // "httpMethod": "GET", // "id": "compute.instanceGroupManagers.listErrors", // "parameterOrder": [ @@ -72542,6 +74934,11 @@ func (c *InstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOption) ( // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", // "location": "path", @@ -72600,7 +74997,8 @@ type InstanceGroupManagersListManagedInstancesCall struct { // indicates the action that the managed instance group is performing on // the instance. For example, if the group is still creating an // instance, the currentAction is CREATING. If a previous action failed, -// the list displays the errors for that failed action. +// the list displays the errors for that failed action. The orderBy +// query parameter is not supported. func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListManagedInstancesCall { c := &InstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -72674,6 +75072,15 @@ func (c *InstanceGroupManagersListManagedInstancesCall) PageToken(pageToken stri return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstanceGroupManagersListManagedInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -72701,7 +75108,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Header() http.Header { func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72764,7 +75171,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", + // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported.", // "httpMethod": "POST", // "id": "compute.instanceGroupManagers.listManagedInstances", // "parameterOrder": [ @@ -72809,6 +75216,11 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone where the managed instance group is located.", // "location": "path", @@ -72863,7 +75275,8 @@ type InstanceGroupManagersListPerInstanceConfigsCall struct { } // ListPerInstanceConfigs: Lists all of the per-instance configs defined -// for the managed instance group. +// for the managed instance group. The orderBy query parameter is not +// supported. func (r *InstanceGroupManagersService) ListPerInstanceConfigs(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListPerInstanceConfigsCall { c := &InstanceGroupManagersListPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -72937,6 +75350,15 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) PageToken(pageToken st return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstanceGroupManagersListPerInstanceConfigsCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersListPerInstanceConfigsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -72964,7 +75386,7 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) Header() http.Header { func (c *InstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73027,7 +75449,7 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Lists all of the per-instance configs defined for the managed instance group.", + // "description": "Lists all of the per-instance configs defined for the managed instance group. The orderBy query parameter is not supported.", // "httpMethod": "POST", // "id": "compute.instanceGroupManagers.listPerInstanceConfigs", // "parameterOrder": [ @@ -73072,6 +75494,11 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.C // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", // "location": "path", @@ -73188,7 +75615,7 @@ func (c *InstanceGroupManagersPatchCall) Header() http.Header { func (c *InstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73374,7 +75801,7 @@ func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) Header() http.Header func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73572,7 +75999,7 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Header() http.Header { func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73775,7 +76202,7 @@ func (c *InstanceGroupManagersResizeCall) Header() http.Header { func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73973,7 +76400,7 @@ func (c *InstanceGroupManagersResizeAdvancedCall) Header() http.Header { func (c *InstanceGroupManagersResizeAdvancedCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74158,7 +76585,7 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Header() http.Header { func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74344,7 +76771,7 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74534,7 +76961,7 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Header() http.Header { func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74700,7 +77127,7 @@ func (c *InstanceGroupManagersTestIamPermissionsCall) Header() http.Header { func (c *InstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74886,7 +77313,7 @@ func (c *InstanceGroupManagersUpdateCall) Header() http.Header { func (c *InstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75072,7 +77499,7 @@ func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Header() http.Header func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75258,7 +77685,7 @@ func (c *InstanceGroupsAddInstancesCall) Header() http.Header { func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75470,6 +77897,15 @@ func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *Instance return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstanceGroupsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -75507,7 +77943,7 @@ func (c *InstanceGroupsAggregatedListCall) Header() http.Header { func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75610,6 +78046,11 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/instanceGroups", @@ -75716,7 +78157,7 @@ func (c *InstanceGroupsDeleteCall) Header() http.Header { func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75836,8 +78277,11 @@ type InstanceGroupsGetCall struct { header_ http.Header } -// Get: Returns the specified instance group. Gets a list of available -// instance groups by making a list() request. +// Get: Returns the specified zonal instance group. Get a list of +// available zonal instance groups by making a list() request. +// +// For managed instance groups, use the instanceGroupManagers or +// regionInstanceGroupManagers methods instead. func (r *InstanceGroupsService) Get(project string, zone string, instanceGroup string) *InstanceGroupsGetCall { c := &InstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -75883,7 +78327,7 @@ func (c *InstanceGroupsGetCall) Header() http.Header { func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75947,7 +78391,7 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup } return ret, nil // { - // "description": "Returns the specified instance group. Gets a list of available instance groups by making a list() request.", + // "description": "Returns the specified zonal instance group. Get a list of available zonal instance groups by making a list() request.\n\nFor managed instance groups, use the instanceGroupManagers or regionInstanceGroupManagers methods instead.", // "httpMethod": "GET", // "id": "compute.instanceGroups.get", // "parameterOrder": [ @@ -76057,7 +78501,7 @@ func (c *InstanceGroupsInsertCall) Header() http.Header { func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -76176,8 +78620,11 @@ type InstanceGroupsListCall struct { header_ http.Header } -// List: Retrieves the list of instance groups that are located in the -// specified project and zone. +// List: Retrieves the list of zonal instance group resources contained +// within the specified zone. +// +// For managed instance groups, use the instanceGroupManagers or +// regionInstanceGroupManagers methods instead. func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroupsListCall { c := &InstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -76250,6 +78697,15 @@ func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsList return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstanceGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -76287,7 +78743,7 @@ func (c *InstanceGroupsListCall) Header() http.Header { func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -76350,7 +78806,7 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou } return ret, nil // { - // "description": "Retrieves the list of instance groups that are located in the specified project and zone.", + // "description": "Retrieves the list of zonal instance group resources contained within the specified zone.\n\nFor managed instance groups, use the instanceGroupManagers or regionInstanceGroupManagers methods instead.", // "httpMethod": "GET", // "id": "compute.instanceGroups.list", // "parameterOrder": [ @@ -76388,6 +78844,11 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone where the instance group is located.", // "location": "path", @@ -76443,6 +78904,7 @@ type InstanceGroupsListInstancesCall struct { } // ListInstances: Lists the instances in the specified instance group. +// The orderBy query parameter is not supported. func (r *InstanceGroupsService) ListInstances(project string, zone string, instanceGroup string, instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest) *InstanceGroupsListInstancesCall { c := &InstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -76517,6 +78979,15 @@ func (c *InstanceGroupsListInstancesCall) PageToken(pageToken string) *InstanceG return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstanceGroupsListInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -76544,7 +79015,7 @@ func (c *InstanceGroupsListInstancesCall) Header() http.Header { func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -76610,7 +79081,7 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins } return ret, nil // { - // "description": "Lists the instances in the specified instance group.", + // "description": "Lists the instances in the specified instance group. The orderBy query parameter is not supported.", // "httpMethod": "POST", // "id": "compute.instanceGroups.listInstances", // "parameterOrder": [ @@ -76655,6 +79126,11 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone where the instance group is located.", // "location": "path", @@ -76773,7 +79249,7 @@ func (c *InstanceGroupsRemoveInstancesCall) Header() http.Header { func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -76957,7 +79433,7 @@ func (c *InstanceGroupsSetNamedPortsCall) Header() http.Header { func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77123,7 +79599,7 @@ func (c *InstanceGroupsTestIamPermissionsCall) Header() http.Header { func (c *InstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77304,7 +79780,7 @@ func (c *InstanceTemplatesDeleteCall) Header() http.Header { func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77463,7 +79939,7 @@ func (c *InstanceTemplatesGetCall) Header() http.Header { func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77627,7 +80103,7 @@ func (c *InstanceTemplatesGetIamPolicyCall) Header() http.Header { func (c *InstanceTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77802,7 +80278,7 @@ func (c *InstanceTemplatesInsertCall) Header() http.Header { func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77986,6 +80462,15 @@ func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplat return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstanceTemplatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceTemplatesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -78023,7 +80508,7 @@ func (c *InstanceTemplatesListCall) Header() http.Header { func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78121,6 +80606,11 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/instanceTemplates", @@ -78206,7 +80696,7 @@ func (c *InstanceTemplatesSetIamPolicyCall) Header() http.Header { func (c *InstanceTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78358,7 +80848,7 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Header() http.Header { func (c *InstanceTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78534,7 +81024,7 @@ func (c *InstancesAddAccessConfigCall) Header() http.Header { func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78729,7 +81219,7 @@ func (c *InstancesAddResourcePoliciesCall) Header() http.Header { func (c *InstancesAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78944,6 +81434,15 @@ func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggr return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstancesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstancesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -78981,7 +81480,7 @@ func (c *InstancesAggregatedListCall) Header() http.Header { func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79084,6 +81583,11 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/instances", @@ -79148,8 +81652,9 @@ func (r *InstancesService) AttachDisk(project string, zone string, instance stri } // ForceAttach sets the optional parameter "forceAttach": Whether to -// force attach the disk even if it's currently attached to another -// instance. +// force attach the regional disk even if it's currently attached to +// another instance. If you try to force attach a zonal disk to an +// instance, you will receive an error. func (c *InstancesAttachDiskCall) ForceAttach(forceAttach bool) *InstancesAttachDiskCall { c.urlParams_.Set("forceAttach", fmt.Sprint(forceAttach)) return c @@ -79201,7 +81706,7 @@ func (c *InstancesAttachDiskCall) Header() http.Header { func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79277,7 +81782,7 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, // ], // "parameters": { // "forceAttach": { - // "description": "Whether to force attach the disk even if it's currently attached to another instance.", + // "description": "Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error.", // "location": "query", // "type": "boolean" // }, @@ -79392,7 +81897,7 @@ func (c *InstancesDeleteCall) Header() http.Header { func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79572,7 +82077,7 @@ func (c *InstancesDeleteAccessConfigCall) Header() http.Header { func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79764,7 +82269,7 @@ func (c *InstancesDetachDiskCall) Header() http.Header { func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79941,7 +82446,7 @@ func (c *InstancesGetCall) Header() http.Header { func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80110,7 +82615,7 @@ func (c *InstancesGetEffectiveFirewallsCall) Header() http.Header { func (c *InstancesGetEffectiveFirewallsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80299,7 +82804,7 @@ func (c *InstancesGetGuestAttributesCall) Header() http.Header { func (c *InstancesGetGuestAttributesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80484,7 +82989,7 @@ func (c *InstancesGetIamPolicyCall) Header() http.Header { func (c *InstancesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80657,7 +83162,7 @@ func (c *InstancesGetScreenshotCall) Header() http.Header { func (c *InstancesGetScreenshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80796,12 +83301,23 @@ func (c *InstancesGetSerialPortOutputCall) Port(port int64) *InstancesGetSerialP return c } -// Start sets the optional parameter "start": Returns output starting -// from a specific byte position. Use this to page through output when -// the output is too large to return in a single request. For the -// initial request, leave this field unspecified. For subsequent calls, -// this field should be set to the next value returned in the previous -// call. +// Start sets the optional parameter "start": Specifies the starting +// byte position of the output to return. To start with the first byte +// of output to the specified port, omit this field or set it to +// `0`. +// +// If the output for that byte position is available, this field matches +// the `start` parameter sent with the request. If the amount of serial +// console output exceeds the size of the buffer (1 MB), the oldest +// output is discarded and is no longer available. If the requested +// start position refers to discarded output, the start position is +// adjusted to the oldest output still available, and the adjusted start +// position is returned as the `start` property value. +// +// You can also provide a negative start position, which translates to +// the most recent number of bytes written to the serial port. For +// example, -3 is interpreted as the most recent 3 bytes written to the +// serial console. func (c *InstancesGetSerialPortOutputCall) Start(start int64) *InstancesGetSerialPortOutputCall { c.urlParams_.Set("start", fmt.Sprint(start)) return c @@ -80844,7 +83360,7 @@ func (c *InstancesGetSerialPortOutputCall) Header() http.Header { func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80918,7 +83434,7 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se // ], // "parameters": { // "instance": { - // "description": "Name of the instance scoping this request.", + // "description": "Name of the instance for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -80941,7 +83457,7 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se // "type": "string" // }, // "start": { - // "description": "Returns output starting from a specific byte position. Use this to page through output when the output is too large to return in a single request. For the initial request, leave this field unspecified. For subsequent calls, this field should be set to the next value returned in the previous call.", + // "description": "Specifies the starting byte position of the output to return. To start with the first byte of output to the specified port, omit this field or set it to `0`.\n\nIf the output for that byte position is available, this field matches the `start` parameter sent with the request. If the amount of serial console output exceeds the size of the buffer (1 MB), the oldest output is discarded and is no longer available. If the requested start position refers to discarded output, the start position is adjusted to the oldest output still available, and the adjusted start position is returned as the `start` property value.\n\nYou can also provide a negative start position, which translates to the most recent number of bytes written to the serial port. For example, -3 is interpreted as the most recent 3 bytes written to the serial console.", // "format": "int64", // "location": "query", // "type": "string" @@ -81027,7 +83543,7 @@ func (c *InstancesGetShieldedInstanceIdentityCall) Header() http.Header { func (c *InstancesGetShieldedInstanceIdentityCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81195,7 +83711,7 @@ func (c *InstancesGetShieldedVmIdentityCall) Header() http.Header { func (c *InstancesGetShieldedVmIdentityCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81401,7 +83917,7 @@ func (c *InstancesInsertCall) Header() http.Header { func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81606,6 +84122,15 @@ func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstancesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstancesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -81643,7 +84168,7 @@ func (c *InstancesListCall) Header() http.Header { func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81744,6 +84269,11 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -81799,9 +84329,11 @@ type InstancesListReferrersCall struct { header_ http.Header } -// ListReferrers: Retrieves the list of referrers to instances contained -// within the specified zone. For more information, read Viewing -// Referrers to VM Instances. +// ListReferrers: Retrieves a list of resources that refer to the VM +// instance specified in the request. For example, if the VM instance is +// part of a managed or unmanaged instance group, the referrers list +// includes the instance group. For more information, read Viewing +// referrers to VM instances. func (r *InstancesService) ListReferrers(project string, zone string, instance string) *InstancesListReferrersCall { c := &InstancesListReferrersCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -81875,6 +84407,15 @@ func (c *InstancesListReferrersCall) PageToken(pageToken string) *InstancesListR return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstancesListReferrersCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstancesListReferrersCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -81912,7 +84453,7 @@ func (c *InstancesListReferrersCall) Header() http.Header { func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81976,7 +84517,7 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance } return ret, nil // { - // "description": "Retrieves the list of referrers to instances contained within the specified zone. For more information, read Viewing Referrers to VM Instances.", + // "description": "Retrieves a list of resources that refer to the VM instance specified in the request. For example, if the VM instance is part of a managed or unmanaged instance group, the referrers list includes the instance group. For more information, read Viewing referrers to VM instances.", // "httpMethod": "GET", // "id": "compute.instances.listReferrers", // "parameterOrder": [ @@ -82022,6 +84563,11 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -82133,7 +84679,7 @@ func (c *InstancesRemoveResourcePoliciesCall) Header() http.Header { func (c *InstancesRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -82320,7 +84866,7 @@ func (c *InstancesResetCall) Header() http.Header { func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -82499,7 +85045,7 @@ func (c *InstancesResumeCall) Header() http.Header { func (c *InstancesResumeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -82690,7 +85236,7 @@ func (c *InstancesSetDeletionProtectionCall) Header() http.Header { func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -82876,7 +85422,7 @@ func (c *InstancesSetDiskAutoDeleteCall) Header() http.Header { func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83051,7 +85597,7 @@ func (c *InstancesSetIamPolicyCall) Header() http.Header { func (c *InstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83233,7 +85779,7 @@ func (c *InstancesSetLabelsCall) Header() http.Header { func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83420,7 +85966,7 @@ func (c *InstancesSetMachineResourcesCall) Header() http.Header { func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83607,7 +86153,7 @@ func (c *InstancesSetMachineTypeCall) Header() http.Header { func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83795,7 +86341,7 @@ func (c *InstancesSetMetadataCall) Header() http.Header { func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83984,7 +86530,7 @@ func (c *InstancesSetMinCpuPlatformCall) Header() http.Header { func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84101,6 +86647,192 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper } +// method id "compute.instances.setName": + +type InstancesSetNameCall struct { + s *Service + project string + zone string + instance string + instancessetnamerequest *InstancesSetNameRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetName: Sets name of an instance. +func (r *InstancesService) SetName(project string, zone string, instance string, instancessetnamerequest *InstancesSetNameRequest) *InstancesSetNameCall { + c := &InstancesSetNameCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.instancessetnamerequest = instancessetnamerequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetNameCall) RequestId(requestId string) *InstancesSetNameCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesSetNameCall) Fields(s ...googleapi.Field) *InstancesSetNameCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesSetNameCall) Context(ctx context.Context) *InstancesSetNameCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesSetNameCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesSetNameCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetnamerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setName") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.setName" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetNameCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets name of an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.setName", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/instances/{instance}/setName", + // "request": { + // "$ref": "InstancesSetNameRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.instances.setScheduling": type InstancesSetSchedulingCall struct { @@ -84114,7 +86846,10 @@ type InstancesSetSchedulingCall struct { header_ http.Header } -// SetScheduling: Sets an instance's scheduling options. +// SetScheduling: Sets an instance's scheduling options. You can only +// call this method on a stopped instance, that is, a VM instance that +// is in a `TERMINATED` state. See Instance Life Cycle for more +// information on the possible instance states. // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setScheduling func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall { c := &InstancesSetSchedulingCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -84171,7 +86906,7 @@ func (c *InstancesSetSchedulingCall) Header() http.Header { func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84237,7 +86972,7 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Sets an instance's scheduling options.", + // "description": "Sets an instance's scheduling options. You can only call this method on a stopped instance, that is, a VM instance that is in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states.", // "httpMethod": "POST", // "id": "compute.instances.setScheduling", // "parameterOrder": [ @@ -84359,7 +87094,7 @@ func (c *InstancesSetServiceAccountCall) Header() http.Header { func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84548,7 +87283,7 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Header() http.Header { func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84737,7 +87472,7 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) Header() http.Header { func (c *InstancesSetShieldedVmIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84925,7 +87660,7 @@ func (c *InstancesSetTagsCall) Header() http.Header { func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85091,7 +87826,7 @@ func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85264,7 +87999,7 @@ func (c *InstancesStartCall) Header() http.Header { func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85444,7 +88179,7 @@ func (c *InstancesStartWithEncryptionKeyCall) Header() http.Header { func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85634,7 +88369,7 @@ func (c *InstancesStopCall) Header() http.Header { func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85823,7 +88558,7 @@ func (c *InstancesSuspendCall) Header() http.Header { func (c *InstancesSuspendCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85988,7 +88723,7 @@ func (c *InstancesTestIamPermissionsCall) Header() http.Header { func (c *InstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86205,7 +88940,7 @@ func (c *InstancesUpdateCall) Header() http.Header { func (c *InstancesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86429,7 +89164,7 @@ func (c *InstancesUpdateAccessConfigCall) Header() http.Header { func (c *InstancesUpdateAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86625,7 +89360,7 @@ func (c *InstancesUpdateDisplayDeviceCall) Header() http.Header { func (c *InstancesUpdateDisplayDeviceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86813,7 +89548,7 @@ func (c *InstancesUpdateNetworkInterfaceCall) Header() http.Header { func (c *InstancesUpdateNetworkInterfaceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87009,7 +89744,7 @@ func (c *InstancesUpdateShieldedInstanceConfigCall) Header() http.Header { func (c *InstancesUpdateShieldedInstanceConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87198,7 +89933,7 @@ func (c *InstancesUpdateShieldedVmConfigCall) Header() http.Header { func (c *InstancesUpdateShieldedVmConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87412,6 +90147,15 @@ func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InterconnectAttachmentsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -87449,7 +90193,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Header() http.Header { func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87553,6 +90297,11 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/interconnectAttachments", @@ -87656,7 +90405,7 @@ func (c *InterconnectAttachmentsDeleteCall) Header() http.Header { func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87824,7 +90573,7 @@ func (c *InterconnectAttachmentsGetCall) Header() http.Header { func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88007,7 +90756,7 @@ func (c *InterconnectAttachmentsInsertCall) Header() http.Header { func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88206,6 +90955,15 @@ func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *Interconn return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InterconnectAttachmentsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectAttachmentsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -88243,7 +91001,7 @@ func (c *InterconnectAttachmentsListCall) Header() http.Header { func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88350,6 +91108,11 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/interconnectAttachments", @@ -88457,7 +91220,7 @@ func (c *InterconnectAttachmentsPatchCall) Header() http.Header { func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88644,7 +91407,7 @@ func (c *InterconnectAttachmentsSetLabelsCall) Header() http.Header { func (c *InterconnectAttachmentsSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88812,7 +91575,7 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) Header() http.Header { func (c *InterconnectAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88984,7 +91747,7 @@ func (c *InterconnectLocationsGetCall) Header() http.Header { func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89167,6 +91930,15 @@ func (c *InterconnectLocationsListCall) PageToken(pageToken string) *Interconnec return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InterconnectLocationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectLocationsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -89204,7 +91976,7 @@ func (c *InterconnectLocationsListCall) Header() http.Header { func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89302,6 +92074,11 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/interconnectLocations", @@ -89403,7 +92180,7 @@ func (c *InterconnectsDeleteCall) Header() http.Header { func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89561,7 +92338,7 @@ func (c *InterconnectsGetCall) Header() http.Header { func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89718,7 +92495,7 @@ func (c *InterconnectsGetDiagnosticsCall) Header() http.Header { func (c *InterconnectsGetDiagnosticsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89884,7 +92661,7 @@ func (c *InterconnectsInsertCall) Header() http.Header { func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90067,6 +92844,15 @@ func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCa return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InterconnectsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -90104,7 +92890,7 @@ func (c *InterconnectsListCall) Header() http.Header { func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90202,6 +92988,11 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/interconnects", @@ -90307,7 +93098,7 @@ func (c *InterconnectsPatchCall) Header() http.Header { func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90464,7 +93255,7 @@ func (c *InterconnectsSetLabelsCall) Header() http.Header { func (c *InterconnectsSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90616,7 +93407,7 @@ func (c *InterconnectsTestIamPermissionsCall) Header() http.Header { func (c *InterconnectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90780,7 +93571,7 @@ func (c *LicenseCodesGetCall) Header() http.Header { func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90946,7 +93737,7 @@ func (c *LicensesDeleteCall) Header() http.Header { func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91106,7 +93897,7 @@ func (c *LicensesGetCall) Header() http.Header { func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91272,7 +94063,7 @@ func (c *LicensesGetIamPolicyCall) Header() http.Header { func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91444,7 +94235,7 @@ func (c *LicensesInsertCall) Header() http.Header { func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91636,6 +94427,15 @@ func (c *LicensesListCall) PageToken(pageToken string) *LicensesListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *LicensesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *LicensesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -91673,7 +94473,7 @@ func (c *LicensesListCall) Header() http.Header { func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91771,6 +94571,11 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/licenses", @@ -91858,7 +94663,7 @@ func (c *LicensesSetIamPolicyCall) Header() http.Header { func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92027,7 +94832,7 @@ func (c *MachineImagesDeleteCall) Header() http.Header { func (c *MachineImagesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92185,7 +94990,7 @@ func (c *MachineImagesGetCall) Header() http.Header { func (c *MachineImagesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92349,7 +95154,7 @@ func (c *MachineImagesGetIamPolicyCall) Header() http.Header { func (c *MachineImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92531,7 +95336,7 @@ func (c *MachineImagesInsertCall) Header() http.Header { func (c *MachineImagesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92719,6 +95524,15 @@ func (c *MachineImagesListCall) PageToken(pageToken string) *MachineImagesListCa return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *MachineImagesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineImagesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -92756,7 +95570,7 @@ func (c *MachineImagesListCall) Header() http.Header { func (c *MachineImagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92854,6 +95668,11 @@ func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageL // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/machineImages", @@ -92939,7 +95758,7 @@ func (c *MachineImagesSetIamPolicyCall) Header() http.Header { func (c *MachineImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93091,7 +95910,7 @@ func (c *MachineImagesTestIamPermissionsCall) Header() http.Header { func (c *MachineImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93292,6 +96111,15 @@ func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTyp return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *MachineTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineTypesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -93329,7 +96157,7 @@ func (c *MachineTypesAggregatedListCall) Header() http.Header { func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93432,6 +96260,11 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/machineTypes", @@ -93529,7 +96362,7 @@ func (c *MachineTypesGetCall) Header() http.Header { func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93724,6 +96557,15 @@ func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *MachineTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineTypesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -93761,7 +96603,7 @@ func (c *MachineTypesListCall) Header() http.Header { func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93862,6 +96704,11 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -94001,6 +96848,15 @@ func (c *NetworkEndpointGroupsAggregatedListCall) PageToken(pageToken string) *N return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NetworkEndpointGroupsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -94038,7 +96894,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Header() http.Header { func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94142,6 +96998,11 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/networkEndpointGroups", @@ -94248,7 +97109,7 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.Header { func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94433,7 +97294,7 @@ func (c *NetworkEndpointGroupsDeleteCall) Header() http.Header { func (c *NetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94610,7 +97471,7 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.Header { func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94785,7 +97646,7 @@ func (c *NetworkEndpointGroupsGetCall) Header() http.Header { func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94959,7 +97820,7 @@ func (c *NetworkEndpointGroupsInsertCall) Header() http.Header { func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95152,6 +98013,15 @@ func (c *NetworkEndpointGroupsListCall) PageToken(pageToken string) *NetworkEndp return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NetworkEndpointGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -95189,7 +98059,7 @@ func (c *NetworkEndpointGroupsListCall) Header() http.Header { func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95290,6 +98160,11 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", @@ -95420,6 +98295,15 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken stri return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -95447,7 +98331,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Header { func (c *NetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95560,6 +98444,11 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", @@ -95655,7 +98544,7 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Header() http.Header { func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95835,7 +98724,7 @@ func (c *NetworksAddPeeringCall) Header() http.Header { func (c *NetworksAddPeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96009,7 +98898,7 @@ func (c *NetworksDeleteCall) Header() http.Header { func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96168,7 +99057,7 @@ func (c *NetworksGetCall) Header() http.Header { func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96325,7 +99214,7 @@ func (c *NetworksGetEffectiveFirewallsCall) Header() http.Header { func (c *NetworksGetEffectiveFirewallsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96492,7 +99381,7 @@ func (c *NetworksInsertCall) Header() http.Header { func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96676,6 +99565,15 @@ func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NetworksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -96713,7 +99611,7 @@ func (c *NetworksListCall) Header() http.Header { func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96811,6 +99709,11 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/networks", @@ -96959,6 +99862,15 @@ func (c *NetworksListPeeringRoutesCall) Region(region string) *NetworksListPeeri return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NetworksListPeeringRoutesCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworksListPeeringRoutesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -96996,7 +99908,7 @@ func (c *NetworksListPeeringRoutesCall) Header() http.Header { func (c *NetworksListPeeringRoutesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97126,6 +100038,11 @@ func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*Excha // "description": "The region of the request. The response will include all subnet routes, static routes and dynamic routes in the region.", // "location": "query", // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/networks/{network}/listPeeringRoutes", @@ -97231,7 +100148,7 @@ func (c *NetworksPatchCall) Header() http.Header { func (c *NetworksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97406,7 +100323,7 @@ func (c *NetworksRemovePeeringCall) Header() http.Header { func (c *NetworksRemovePeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97580,7 +100497,7 @@ func (c *NetworksSwitchToCustomModeCall) Header() http.Header { func (c *NetworksSwitchToCustomModeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97729,7 +100646,7 @@ func (c *NetworksTestIamPermissionsCall) Header() http.Header { func (c *NetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97903,7 +100820,7 @@ func (c *NetworksUpdatePeeringCall) Header() http.Header { func (c *NetworksUpdatePeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98080,7 +100997,7 @@ func (c *NodeGroupsAddNodesCall) Header() http.Header { func (c *NodeGroupsAddNodesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98294,6 +101211,15 @@ func (c *NodeGroupsAggregatedListCall) PageToken(pageToken string) *NodeGroupsAg return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NodeGroupsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeGroupsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -98331,7 +101257,7 @@ func (c *NodeGroupsAggregatedListCall) Header() http.Header { func (c *NodeGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98434,6 +101360,11 @@ func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGr // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/nodeGroups", @@ -98537,7 +101468,7 @@ func (c *NodeGroupsDeleteCall) Header() http.Header { func (c *NodeGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98715,7 +101646,7 @@ func (c *NodeGroupsDeleteNodesCall) Header() http.Header { func (c *NodeGroupsDeleteNodesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98893,7 +101824,7 @@ func (c *NodeGroupsGetCall) Header() http.Header { func (c *NodeGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99068,7 +101999,7 @@ func (c *NodeGroupsGetIamPolicyCall) Header() http.Header { func (c *NodeGroupsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99251,7 +102182,7 @@ func (c *NodeGroupsInsertCall) Header() http.Header { func (c *NodeGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99454,6 +102385,15 @@ func (c *NodeGroupsListCall) PageToken(pageToken string) *NodeGroupsListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NodeGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeGroupsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -99491,7 +102431,7 @@ func (c *NodeGroupsListCall) Header() http.Header { func (c *NodeGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99592,6 +102532,11 @@ func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, e // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -99720,6 +102665,15 @@ func (c *NodeGroupsListNodesCall) PageToken(pageToken string) *NodeGroupsListNod return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NodeGroupsListNodesCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeGroupsListNodesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -99747,7 +102701,7 @@ func (c *NodeGroupsListNodesCall) Header() http.Header { func (c *NodeGroupsListNodesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99854,6 +102808,11 @@ func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsL // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -99965,7 +102924,7 @@ func (c *NodeGroupsPatchCall) Header() http.Header { func (c *NodeGroupsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100133,7 +103092,7 @@ func (c *NodeGroupsSetIamPolicyCall) Header() http.Header { func (c *NodeGroupsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100314,7 +103273,7 @@ func (c *NodeGroupsSetNodeTemplateCall) Header() http.Header { func (c *NodeGroupsSetNodeTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100482,7 +103441,7 @@ func (c *NodeGroupsTestIamPermissionsCall) Header() http.Header { func (c *NodeGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100691,6 +103650,15 @@ func (c *NodeTemplatesAggregatedListCall) PageToken(pageToken string) *NodeTempl return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NodeTemplatesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeTemplatesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -100728,7 +103696,7 @@ func (c *NodeTemplatesAggregatedListCall) Header() http.Header { func (c *NodeTemplatesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100831,6 +103799,11 @@ func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Nod // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/nodeTemplates", @@ -100934,7 +103907,7 @@ func (c *NodeTemplatesDeleteCall) Header() http.Header { func (c *NodeTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101103,7 +104076,7 @@ func (c *NodeTemplatesGetCall) Header() http.Header { func (c *NodeTemplatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101278,7 +104251,7 @@ func (c *NodeTemplatesGetIamPolicyCall) Header() http.Header { func (c *NodeTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101460,7 +104433,7 @@ func (c *NodeTemplatesInsertCall) Header() http.Header { func (c *NodeTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101654,6 +104627,15 @@ func (c *NodeTemplatesListCall) PageToken(pageToken string) *NodeTemplatesListCa return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NodeTemplatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeTemplatesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -101691,7 +104673,7 @@ func (c *NodeTemplatesListCall) Header() http.Header { func (c *NodeTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101798,6 +104780,11 @@ func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateL // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/nodeTemplates", @@ -101885,7 +104872,7 @@ func (c *NodeTemplatesSetIamPolicyCall) Header() http.Header { func (c *NodeTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102048,7 +105035,7 @@ func (c *NodeTemplatesTestIamPermissionsCall) Header() http.Header { func (c *NodeTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102257,6 +105244,15 @@ func (c *NodeTypesAggregatedListCall) PageToken(pageToken string) *NodeTypesAggr return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NodeTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeTypesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -102294,7 +105290,7 @@ func (c *NodeTypesAggregatedListCall) Header() http.Header { func (c *NodeTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102397,6 +105393,11 @@ func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTyp // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/nodeTypes", @@ -102493,7 +105494,7 @@ func (c *NodeTypesGetCall) Header() http.Header { func (c *NodeTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102687,6 +105688,15 @@ func (c *NodeTypesListCall) PageToken(pageToken string) *NodeTypesListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NodeTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeTypesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -102724,7 +105734,7 @@ func (c *NodeTypesListCall) Header() http.Header { func (c *NodeTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102825,6 +105835,11 @@ func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, err // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -102943,7 +105958,7 @@ func (c *OrganizationSecurityPoliciesAddAssociationCall) Header() http.Header { func (c *OrganizationSecurityPoliciesAddAssociationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103112,7 +106127,7 @@ func (c *OrganizationSecurityPoliciesAddRuleCall) Header() http.Header { func (c *OrganizationSecurityPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103281,7 +106296,7 @@ func (c *OrganizationSecurityPoliciesCopyRulesCall) Header() http.Header { func (c *OrganizationSecurityPoliciesCopyRulesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103440,7 +106455,7 @@ func (c *OrganizationSecurityPoliciesDeleteCall) Header() http.Header { func (c *OrganizationSecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103587,7 +106602,7 @@ func (c *OrganizationSecurityPoliciesGetCall) Header() http.Header { func (c *OrganizationSecurityPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103739,7 +106754,7 @@ func (c *OrganizationSecurityPoliciesGetAssociationCall) Header() http.Header { func (c *OrganizationSecurityPoliciesGetAssociationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103896,7 +106911,7 @@ func (c *OrganizationSecurityPoliciesGetRuleCall) Header() http.Header { func (c *OrganizationSecurityPoliciesGetRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -104011,7 +107026,9 @@ func (r *OrganizationSecurityPoliciesService) Insert(securitypolicy *SecurityPol } // ParentId sets the optional parameter "parentId": Parent ID for this -// request. +// request. The ID can be either be "folders/[FOLDER_ID]" if the parent +// is a folder or "organizations/[ORGANIZATION_ID]" if the parent is an +// organization. func (c *OrganizationSecurityPoliciesInsertCall) ParentId(parentId string) *OrganizationSecurityPoliciesInsertCall { c.urlParams_.Set("parentId", parentId) return c @@ -104063,7 +107080,7 @@ func (c *OrganizationSecurityPoliciesInsertCall) Header() http.Header { func (c *OrganizationSecurityPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -104129,7 +107146,7 @@ func (c *OrganizationSecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption // "id": "compute.organizationSecurityPolicies.insert", // "parameters": { // "parentId": { - // "description": "Parent ID for this request.", + // "description": "Parent ID for this request. The ID can be either be \"folders/[FOLDER_ID]\" if the parent is a folder or \"organizations/[ORGANIZATION_ID]\" if the parent is an organization.", // "location": "query", // "type": "string" // }, @@ -104243,6 +107260,15 @@ func (c *OrganizationSecurityPoliciesListCall) ParentId(parentId string) *Organi return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *OrganizationSecurityPoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *OrganizationSecurityPoliciesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -104280,7 +107306,7 @@ func (c *OrganizationSecurityPoliciesListCall) Header() http.Header { func (c *OrganizationSecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -104370,6 +107396,11 @@ func (c *OrganizationSecurityPoliciesListCall) Do(opts ...googleapi.CallOption) // "description": "Parent ID for this request.", // "location": "query", // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "locations/global/securityPolicies", @@ -104468,7 +107499,7 @@ func (c *OrganizationSecurityPoliciesListAssociationsCall) Header() http.Header func (c *OrganizationSecurityPoliciesListAssociationsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -104621,7 +107652,7 @@ func (c *OrganizationSecurityPoliciesMoveCall) Header() http.Header { func (c *OrganizationSecurityPoliciesMoveCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -104783,7 +107814,7 @@ func (c *OrganizationSecurityPoliciesPatchCall) Header() http.Header { func (c *OrganizationSecurityPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -104954,7 +107985,7 @@ func (c *OrganizationSecurityPoliciesPatchRuleCall) Header() http.Header { func (c *OrganizationSecurityPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105130,7 +108161,7 @@ func (c *OrganizationSecurityPoliciesRemoveAssociationCall) Header() http.Header func (c *OrganizationSecurityPoliciesRemoveAssociationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105296,7 +108327,7 @@ func (c *OrganizationSecurityPoliciesRemoveRuleCall) Header() http.Header { func (c *OrganizationSecurityPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105489,6 +108520,15 @@ func (c *PacketMirroringsAggregatedListCall) PageToken(pageToken string) *Packet return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *PacketMirroringsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PacketMirroringsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -105526,7 +108566,7 @@ func (c *PacketMirroringsAggregatedListCall) Header() http.Header { func (c *PacketMirroringsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105629,6 +108669,11 @@ func (c *PacketMirroringsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/packetMirrorings", @@ -105732,7 +108777,7 @@ func (c *PacketMirroringsDeleteCall) Header() http.Header { func (c *PacketMirroringsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105900,7 +108945,7 @@ func (c *PacketMirroringsGetCall) Header() http.Header { func (c *PacketMirroringsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106076,7 +109121,7 @@ func (c *PacketMirroringsInsertCall) Header() http.Header { func (c *PacketMirroringsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106270,6 +109315,15 @@ func (c *PacketMirroringsListCall) PageToken(pageToken string) *PacketMirrorings return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *PacketMirroringsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PacketMirroringsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -106307,7 +109361,7 @@ func (c *PacketMirroringsListCall) Header() http.Header { func (c *PacketMirroringsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106414,6 +109468,11 @@ func (c *PacketMirroringsListCall) Do(opts ...googleapi.CallOption) (*PacketMirr // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/packetMirrorings", @@ -106521,7 +109580,7 @@ func (c *PacketMirroringsPatchCall) Header() http.Header { func (c *PacketMirroringsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106689,7 +109748,7 @@ func (c *PacketMirroringsTestIamPermissionsCall) Header() http.Header { func (c *PacketMirroringsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106865,7 +109924,7 @@ func (c *ProjectsDisableXpnHostCall) Header() http.Header { func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107022,7 +110081,7 @@ func (c *ProjectsDisableXpnResourceCall) Header() http.Header { func (c *ProjectsDisableXpnResourceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107184,7 +110243,7 @@ func (c *ProjectsEnableXpnHostCall) Header() http.Header { func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107342,7 +110401,7 @@ func (c *ProjectsEnableXpnResourceCall) Header() http.Header { func (c *ProjectsEnableXpnResourceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107497,7 +110556,7 @@ func (c *ProjectsGetCall) Header() http.Header { func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107643,7 +110702,7 @@ func (c *ProjectsGetXpnHostCall) Header() http.Header { func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107816,6 +110875,15 @@ func (c *ProjectsGetXpnResourcesCall) PageToken(pageToken string) *ProjectsGetXp return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ProjectsGetXpnResourcesCall) ReturnPartialSuccess(returnPartialSuccess bool) *ProjectsGetXpnResourcesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -107853,7 +110921,7 @@ func (c *ProjectsGetXpnResourcesCall) Header() http.Header { func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107951,6 +111019,11 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/getXpnResources", @@ -108071,6 +111144,15 @@ func (c *ProjectsListXpnHostsCall) PageToken(pageToken string) *ProjectsListXpnH return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ProjectsListXpnHostsCall) ReturnPartialSuccess(returnPartialSuccess bool) *ProjectsListXpnHostsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -108098,7 +111180,7 @@ func (c *ProjectsListXpnHostsCall) Header() http.Header { func (c *ProjectsListXpnHostsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108198,6 +111280,11 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/listXpnHosts", @@ -108301,7 +111388,7 @@ func (c *ProjectsMoveDiskCall) Header() http.Header { func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108466,7 +111553,7 @@ func (c *ProjectsMoveInstanceCall) Header() http.Header { func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108632,7 +111719,7 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Header() http.Header { func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108799,7 +111886,7 @@ func (c *ProjectsSetDefaultNetworkTierCall) Header() http.Header { func (c *ProjectsSetDefaultNetworkTierCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108967,7 +112054,7 @@ func (c *ProjectsSetUsageExportBucketCall) Header() http.Header { func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109136,7 +112223,7 @@ func (c *RegionAutoscalersDeleteCall) Header() http.Header { func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109304,7 +112391,7 @@ func (c *RegionAutoscalersGetCall) Header() http.Header { func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109480,7 +112567,7 @@ func (c *RegionAutoscalersInsertCall) Header() http.Header { func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109674,6 +112761,15 @@ func (c *RegionAutoscalersListCall) PageToken(pageToken string) *RegionAutoscale return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionAutoscalersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionAutoscalersListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -109711,7 +112807,7 @@ func (c *RegionAutoscalersListCall) Header() http.Header { func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109818,6 +112914,11 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/autoscalers", @@ -109930,7 +113031,7 @@ func (c *RegionAutoscalersPatchCall) Header() http.Header { func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110095,7 +113196,7 @@ func (c *RegionAutoscalersTestIamPermissionsCall) Header() http.Header { func (c *RegionAutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110283,7 +113384,7 @@ func (c *RegionAutoscalersUpdateCall) Header() http.Header { func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110464,7 +113565,7 @@ func (c *RegionBackendServicesDeleteCall) Header() http.Header { func (c *RegionBackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110632,7 +113733,7 @@ func (c *RegionBackendServicesGetCall) Header() http.Header { func (c *RegionBackendServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110791,7 +113892,7 @@ func (c *RegionBackendServicesGetHealthCall) Header() http.Header { func (c *RegionBackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110916,10 +114017,8 @@ type RegionBackendServicesInsertCall struct { } // Insert: Creates a regional BackendService resource in the specified -// project using the data included in the request. There are several -// restrictions and guidelines to keep in mind when creating a regional -// backend service. Read Understanding backend services for more -// information. +// project using the data included in the request. For more information, +// see Backend services overview. func (r *RegionBackendServicesService) Insert(project string, region string, backendservice *BackendService) *RegionBackendServicesInsertCall { c := &RegionBackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -110974,7 +114073,7 @@ func (c *RegionBackendServicesInsertCall) Header() http.Header { func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111039,7 +114138,7 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a regional BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a regional backend service. Read Understanding backend services for more information.", + // "description": "Creates a regional BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview.", // "httpMethod": "POST", // "id": "compute.regionBackendServices.insert", // "parameterOrder": [ @@ -111168,6 +114267,15 @@ func (c *RegionBackendServicesListCall) PageToken(pageToken string) *RegionBacke return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionBackendServicesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionBackendServicesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -111205,7 +114313,7 @@ func (c *RegionBackendServicesListCall) Header() http.Header { func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111312,6 +114420,11 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/backendServices", @@ -111362,11 +114475,9 @@ type RegionBackendServicesPatchCall struct { } // Patch: Updates the specified regional BackendService resource with -// the data included in the request. There are several Understanding -// backend services to keep in mind when updating a backend service. -// Read Understanding backend services for more information. This -// method supports PATCH semantics and uses the JSON merge patch format -// and processing rules. +// the data included in the request. For more information, see +// Understanding backend services This method supports PATCH semantics +// and uses the JSON merge patch format and processing rules. func (r *RegionBackendServicesService) Patch(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesPatchCall { c := &RegionBackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -111422,7 +114533,7 @@ func (c *RegionBackendServicesPatchCall) Header() http.Header { func (c *RegionBackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111488,7 +114599,7 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Understanding backend services This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", // "httpMethod": "PATCH", // "id": "compute.regionBackendServices.patch", // "parameterOrder": [ @@ -111590,7 +114701,7 @@ func (c *RegionBackendServicesTestIamPermissionsCall) Header() http.Header { func (c *RegionBackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111717,9 +114828,8 @@ type RegionBackendServicesUpdateCall struct { } // Update: Updates the specified regional BackendService resource with -// the data included in the request. There are several Understanding -// backend services to keep in mind when updating a backend service. -// Read Understanding backend services for more information. +// the data included in the request. For more information, see Backend +// services overview. func (r *RegionBackendServicesService) Update(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesUpdateCall { c := &RegionBackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -111775,7 +114885,7 @@ func (c *RegionBackendServicesUpdateCall) Header() http.Header { func (c *RegionBackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111841,7 +114951,7 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information.", + // "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Backend services overview.", // "httpMethod": "PUT", // "id": "compute.regionBackendServices.update", // "parameterOrder": [ @@ -111988,6 +115098,15 @@ func (c *RegionCommitmentsAggregatedListCall) PageToken(pageToken string) *Regio return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionCommitmentsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionCommitmentsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -112025,7 +115144,7 @@ func (c *RegionCommitmentsAggregatedListCall) Header() http.Header { func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112128,6 +115247,11 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/commitments", @@ -112224,7 +115348,7 @@ func (c *RegionCommitmentsGetCall) Header() http.Header { func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112400,7 +115524,7 @@ func (c *RegionCommitmentsInsertCall) Header() http.Header { func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112594,6 +115718,15 @@ func (c *RegionCommitmentsListCall) PageToken(pageToken string) *RegionCommitmen return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionCommitmentsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionCommitmentsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -112631,7 +115764,7 @@ func (c *RegionCommitmentsListCall) Header() http.Header { func (c *RegionCommitmentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112738,6 +115871,11 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/commitments", @@ -112844,7 +115982,7 @@ func (c *RegionCommitmentsUpdateReservationsCall) Header() http.Header { func (c *RegionCommitmentsUpdateReservationsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113021,7 +116159,7 @@ func (c *RegionDiskTypesGetCall) Header() http.Header { func (c *RegionDiskTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113215,6 +116353,15 @@ func (c *RegionDiskTypesListCall) PageToken(pageToken string) *RegionDiskTypesLi return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionDiskTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionDiskTypesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -113252,7 +116399,7 @@ func (c *RegionDiskTypesListCall) Header() http.Header { func (c *RegionDiskTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113359,6 +116506,11 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/diskTypes", @@ -113466,7 +116618,7 @@ func (c *RegionDisksAddResourcePoliciesCall) Header() http.Header { func (c *RegionDisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113652,7 +116804,7 @@ func (c *RegionDisksCreateSnapshotCall) Header() http.Header { func (c *RegionDisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113839,7 +116991,7 @@ func (c *RegionDisksDeleteCall) Header() http.Header { func (c *RegionDisksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114006,7 +117158,7 @@ func (c *RegionDisksGetCall) Header() http.Header { func (c *RegionDisksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114181,7 +117333,7 @@ func (c *RegionDisksGetIamPolicyCall) Header() http.Header { func (c *RegionDisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114370,7 +117522,7 @@ func (c *RegionDisksInsertCall) Header() http.Header { func (c *RegionDisksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114569,6 +117721,15 @@ func (c *RegionDisksListCall) PageToken(pageToken string) *RegionDisksListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionDisksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionDisksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -114606,7 +117767,7 @@ func (c *RegionDisksListCall) Header() http.Header { func (c *RegionDisksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114713,6 +117874,11 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/disks", @@ -114819,7 +117985,7 @@ func (c *RegionDisksRemoveResourcePoliciesCall) Header() http.Header { func (c *RegionDisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115005,7 +118171,7 @@ func (c *RegionDisksResizeCall) Header() http.Header { func (c *RegionDisksResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115173,7 +118339,7 @@ func (c *RegionDisksSetIamPolicyCall) Header() http.Header { func (c *RegionDisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115354,7 +118520,7 @@ func (c *RegionDisksSetLabelsCall) Header() http.Header { func (c *RegionDisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115522,7 +118688,7 @@ func (c *RegionDisksTestIamPermissionsCall) Header() http.Header { func (c *RegionDisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115702,7 +118868,7 @@ func (c *RegionHealthCheckServicesDeleteCall) Header() http.Header { func (c *RegionHealthCheckServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115869,7 +119035,7 @@ func (c *RegionHealthCheckServicesGetCall) Header() http.Header { func (c *RegionHealthCheckServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116044,7 +119210,7 @@ func (c *RegionHealthCheckServicesInsertCall) Header() http.Header { func (c *RegionHealthCheckServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116238,6 +119404,15 @@ func (c *RegionHealthCheckServicesListCall) PageToken(pageToken string) *RegionH return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionHealthCheckServicesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionHealthCheckServicesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -116275,7 +119450,7 @@ func (c *RegionHealthCheckServicesListCall) Header() http.Header { func (c *RegionHealthCheckServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116382,6 +119557,11 @@ func (c *RegionHealthCheckServicesListCall) Do(opts ...googleapi.CallOption) (*H // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/healthCheckServices", @@ -116489,7 +119669,7 @@ func (c *RegionHealthCheckServicesPatchCall) Header() http.Header { func (c *RegionHealthCheckServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116672,7 +119852,7 @@ func (c *RegionHealthChecksDeleteCall) Header() http.Header { func (c *RegionHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116841,7 +120021,7 @@ func (c *RegionHealthChecksGetCall) Header() http.Header { func (c *RegionHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117017,7 +120197,7 @@ func (c *RegionHealthChecksInsertCall) Header() http.Header { func (c *RegionHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117211,6 +120391,15 @@ func (c *RegionHealthChecksListCall) PageToken(pageToken string) *RegionHealthCh return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionHealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionHealthChecksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -117248,7 +120437,7 @@ func (c *RegionHealthChecksListCall) Header() http.Header { func (c *RegionHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117355,6 +120544,11 @@ func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCh // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/healthChecks", @@ -117462,7 +120656,7 @@ func (c *RegionHealthChecksPatchCall) Header() http.Header { func (c *RegionHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117649,7 +120843,7 @@ func (c *RegionHealthChecksUpdateCall) Header() http.Header { func (c *RegionHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117851,7 +121045,7 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Header() http.Header { func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118017,7 +121211,7 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Header() http.H func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118200,7 +121394,7 @@ func (c *RegionInstanceGroupManagersCreateInstancesCall) Header() http.Header { func (c *RegionInstanceGroupManagersCreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118383,7 +121577,7 @@ func (c *RegionInstanceGroupManagersDeleteCall) Header() http.Header { func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118575,7 +121769,7 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Header() http.Header { func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118741,7 +121935,7 @@ func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Header() http. func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118911,7 +122105,7 @@ func (c *RegionInstanceGroupManagersGetCall) Header() http.Header { func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119092,7 +122286,7 @@ func (c *RegionInstanceGroupManagersInsertCall) Header() http.Header { func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119285,6 +122479,15 @@ func (c *RegionInstanceGroupManagersListCall) PageToken(pageToken string) *Regio return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionInstanceGroupManagersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -119322,7 +122525,7 @@ func (c *RegionInstanceGroupManagersListCall) Header() http.Header { func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119428,6 +122631,11 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( // "location": "path", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/instanceGroupManagers", @@ -119478,7 +122686,8 @@ type RegionInstanceGroupManagersListErrorsCall struct { } // ListErrors: Lists all errors thrown by actions on instances for a -// given regional managed instance group. +// given regional managed instance group. The filter and orderBy query +// parameters are not supported. func (r *RegionInstanceGroupManagersService) ListErrors(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListErrorsCall { c := &RegionInstanceGroupManagersListErrorsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -119552,6 +122761,15 @@ func (c *RegionInstanceGroupManagersListErrorsCall) PageToken(pageToken string) return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionInstanceGroupManagersListErrorsCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupManagersListErrorsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -119589,7 +122807,7 @@ func (c *RegionInstanceGroupManagersListErrorsCall) Header() http.Header { func (c *RegionInstanceGroupManagersListErrorsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119655,7 +122873,7 @@ func (c *RegionInstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Lists all errors thrown by actions on instances for a given regional managed instance group.", + // "description": "Lists all errors thrown by actions on instances for a given regional managed instance group. The filter and orderBy query parameters are not supported.", // "httpMethod": "GET", // "id": "compute.regionInstanceGroupManagers.listErrors", // "parameterOrder": [ @@ -119705,6 +122923,11 @@ func (c *RegionInstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOpt // "location": "path", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listErrors", @@ -119756,7 +122979,7 @@ type RegionInstanceGroupManagersListManagedInstancesCall struct { // ListManagedInstances: Lists the instances in the managed instance // group and instances that are scheduled to be created. The list // includes any current actions that the group has scheduled for its -// instances. +// instances. The orderBy query parameter is not supported. func (r *RegionInstanceGroupManagersService) ListManagedInstances(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListManagedInstancesCall { c := &RegionInstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -119830,6 +123053,15 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) PageToken(pageToke return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -119857,7 +123089,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Header() http.Head func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119920,7 +123152,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea } return ret, nil // { - // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances.", + // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported.", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.listManagedInstances", // "parameterOrder": [ @@ -119970,6 +123202,11 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea // "location": "path", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", @@ -120019,7 +123256,8 @@ type RegionInstanceGroupManagersListPerInstanceConfigsCall struct { } // ListPerInstanceConfigs: Lists all of the per-instance configs defined -// for the managed instance group. +// for the managed instance group. The orderBy query parameter is not +// supported. func (r *RegionInstanceGroupManagersService) ListPerInstanceConfigs(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { c := &RegionInstanceGroupManagersListPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -120093,6 +123331,15 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) PageToken(pageTo return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupManagersListPerInstanceConfigsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -120120,7 +123367,7 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Header() http.He func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120183,7 +123430,7 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googl } return ret, nil // { - // "description": "Lists all of the per-instance configs defined for the managed instance group.", + // "description": "Lists all of the per-instance configs defined for the managed instance group. The orderBy query parameter is not supported.", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.listPerInstanceConfigs", // "parameterOrder": [ @@ -120233,6 +123480,11 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googl // "location": "path", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", @@ -120344,7 +123596,7 @@ func (c *RegionInstanceGroupManagersPatchCall) Header() http.Header { func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120531,7 +123783,7 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Header() http.H func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120729,7 +123981,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Header() http.Header func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120924,7 +124176,7 @@ func (c *RegionInstanceGroupManagersResizeCall) Header() http.Header { func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121111,7 +124363,7 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Header() http.He func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121297,7 +124549,7 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Header() http.Heade func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121483,7 +124735,7 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Header() http.Header { func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121649,7 +124901,7 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Header() http.Header func (c *RegionInstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121835,7 +125087,7 @@ func (c *RegionInstanceGroupManagersUpdateCall) Header() http.Header { func (c *RegionInstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122022,7 +125274,7 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Header() http. func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122196,7 +125448,7 @@ func (c *RegionInstanceGroupsGetCall) Header() http.Header { func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122388,6 +125640,15 @@ func (c *RegionInstanceGroupsListCall) PageToken(pageToken string) *RegionInstan return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionInstanceGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -122425,7 +125686,7 @@ func (c *RegionInstanceGroupsListCall) Header() http.Header { func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122531,6 +125792,11 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region // "location": "path", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/instanceGroups", @@ -122583,7 +125849,8 @@ type RegionInstanceGroupsListInstancesCall struct { // ListInstances: Lists the instances in the specified instance group // and displays information about the named ports. Depending on the // specified options, this method can list all instances or only the -// instances that are running. +// instances that are running. The orderBy query parameter is not +// supported. func (r *RegionInstanceGroupsService) ListInstances(project string, region string, instanceGroup string, regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest) *RegionInstanceGroupsListInstancesCall { c := &RegionInstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -122658,6 +125925,15 @@ func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *Reg return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionInstanceGroupsListInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -122685,7 +125961,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122752,7 +126028,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running.", + // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running. The orderBy query parameter is not supported.", // "httpMethod": "POST", // "id": "compute.regionInstanceGroups.listInstances", // "parameterOrder": [ @@ -122802,6 +126078,11 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // "location": "path", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", @@ -122911,7 +126192,7 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123077,7 +126358,7 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Header() http.Header { func (c *RegionInstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123190,25 +126471,26 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOp } -// method id "compute.regionNotificationEndpoints.delete": +// method id "compute.regionNetworkEndpointGroups.delete": -type RegionNotificationEndpointsDeleteCall struct { +type RegionNetworkEndpointGroupsDeleteCall struct { s *Service project string region string - notificationEndpoint string + networkEndpointGroup string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified NotificationEndpoint in the given -// region -func (r *RegionNotificationEndpointsService) Delete(project string, region string, notificationEndpoint string) *RegionNotificationEndpointsDeleteCall { - c := &RegionNotificationEndpointsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified network endpoint group. Note that the +// NEG cannot be deleted if it is configured as a backend of a backend +// service. +func (r *RegionNetworkEndpointGroupsService) Delete(project string, region string, networkEndpointGroup string) *RegionNetworkEndpointGroupsDeleteCall { + c := &RegionNetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.notificationEndpoint = notificationEndpoint + c.networkEndpointGroup = networkEndpointGroup return c } @@ -123226,7 +126508,7 @@ func (r *RegionNotificationEndpointsService) Delete(project string, region strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionNotificationEndpointsDeleteCall) RequestId(requestId string) *RegionNotificationEndpointsDeleteCall { +func (c *RegionNetworkEndpointGroupsDeleteCall) RequestId(requestId string) *RegionNetworkEndpointGroupsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -123234,7 +126516,7 @@ func (c *RegionNotificationEndpointsDeleteCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionNotificationEndpointsDeleteCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsDeleteCall { +func (c *RegionNetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *RegionNetworkEndpointGroupsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -123242,23 +126524,23 @@ func (c *RegionNotificationEndpointsDeleteCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionNotificationEndpointsDeleteCall) Context(ctx context.Context) *RegionNotificationEndpointsDeleteCall { +func (c *RegionNetworkEndpointGroupsDeleteCall) Context(ctx context.Context) *RegionNetworkEndpointGroupsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionNotificationEndpointsDeleteCall) Header() http.Header { +func (c *RegionNetworkEndpointGroupsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionNotificationEndpointsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123266,7 +126548,7 @@ func (c *RegionNotificationEndpointsDeleteCall) doRequest(alt string) (*http.Res var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -123276,19 +126558,19 @@ func (c *RegionNotificationEndpointsDeleteCall) doRequest(alt string) (*http.Res googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, - "notificationEndpoint": c.notificationEndpoint, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionNotificationEndpoints.delete" call. +// Do executes the "compute.regionNetworkEndpointGroups.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionNotificationEndpointsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionNetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -123319,19 +126601,18 @@ func (c *RegionNotificationEndpointsDeleteCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes the specified NotificationEndpoint in the given region", + // "description": "Deletes the specified network endpoint group. Note that the NEG cannot be deleted if it is configured as a backend of a backend service.", // "httpMethod": "DELETE", - // "id": "compute.regionNotificationEndpoints.delete", + // "id": "compute.regionNetworkEndpointGroups.delete", // "parameterOrder": [ // "project", // "region", - // "notificationEndpoint" + // "networkEndpointGroup" // ], // "parameters": { - // "notificationEndpoint": { - // "description": "Name of the NotificationEndpoint resource to delete.", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -123343,9 +126624,8 @@ func (c *RegionNotificationEndpointsDeleteCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -123355,7 +126635,7 @@ func (c *RegionNotificationEndpointsDeleteCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", + // "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", // "response": { // "$ref": "Operation" // }, @@ -123367,33 +126647,33 @@ func (c *RegionNotificationEndpointsDeleteCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionNotificationEndpoints.get": +// method id "compute.regionNetworkEndpointGroups.get": -type RegionNotificationEndpointsGetCall struct { +type RegionNetworkEndpointGroupsGetCall struct { s *Service project string region string - notificationEndpoint string + networkEndpointGroup string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified NotificationEndpoint resource in the given -// region. -func (r *RegionNotificationEndpointsService) Get(project string, region string, notificationEndpoint string) *RegionNotificationEndpointsGetCall { - c := &RegionNotificationEndpointsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified network endpoint group. Gets a list of +// available network endpoint groups by making a list() request. +func (r *RegionNetworkEndpointGroupsService) Get(project string, region string, networkEndpointGroup string) *RegionNetworkEndpointGroupsGetCall { + c := &RegionNetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.notificationEndpoint = notificationEndpoint + c.networkEndpointGroup = networkEndpointGroup return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionNotificationEndpointsGetCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsGetCall { +func (c *RegionNetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *RegionNetworkEndpointGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -123403,7 +126683,7 @@ func (c *RegionNotificationEndpointsGetCall) Fields(s ...googleapi.Field) *Regio // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionNotificationEndpointsGetCall) IfNoneMatch(entityTag string) *RegionNotificationEndpointsGetCall { +func (c *RegionNetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *RegionNetworkEndpointGroupsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -123411,23 +126691,23 @@ func (c *RegionNotificationEndpointsGetCall) IfNoneMatch(entityTag string) *Regi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionNotificationEndpointsGetCall) Context(ctx context.Context) *RegionNotificationEndpointsGetCall { +func (c *RegionNetworkEndpointGroupsGetCall) Context(ctx context.Context) *RegionNetworkEndpointGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionNotificationEndpointsGetCall) Header() http.Header { +func (c *RegionNetworkEndpointGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionNotificationEndpointsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123438,7 +126718,7 @@ func (c *RegionNotificationEndpointsGetCall) doRequest(alt string) (*http.Respon var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -123448,19 +126728,19 @@ func (c *RegionNotificationEndpointsGetCall) doRequest(alt string) (*http.Respon googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, - "notificationEndpoint": c.notificationEndpoint, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionNotificationEndpoints.get" call. -// Exactly one of *NotificationEndpoint or error will be non-nil. Any +// Do executes the "compute.regionNetworkEndpointGroups.get" call. +// Exactly one of *NetworkEndpointGroup or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *NotificationEndpoint.ServerResponse.Header or (if a response was +// *NetworkEndpointGroup.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionNotificationEndpointsGetCall) Do(opts ...googleapi.CallOption) (*NotificationEndpoint, error) { +func (c *RegionNetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -123479,7 +126759,7 @@ func (c *RegionNotificationEndpointsGetCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NotificationEndpoint{ + ret := &NetworkEndpointGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -123491,19 +126771,18 @@ func (c *RegionNotificationEndpointsGetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns the specified NotificationEndpoint resource in the given region.", + // "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", // "httpMethod": "GET", - // "id": "compute.regionNotificationEndpoints.get", + // "id": "compute.regionNetworkEndpointGroups.get", // "parameterOrder": [ // "project", // "region", - // "notificationEndpoint" + // "networkEndpointGroup" // ], // "parameters": { - // "notificationEndpoint": { - // "description": "Name of the NotificationEndpoint resource to return.", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -123515,16 +126794,15 @@ func (c *RegionNotificationEndpointsGetCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", + // "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", // "response": { - // "$ref": "NotificationEndpoint" + // "$ref": "NetworkEndpointGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -123535,25 +126813,25 @@ func (c *RegionNotificationEndpointsGetCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.regionNotificationEndpoints.insert": +// method id "compute.regionNetworkEndpointGroups.insert": -type RegionNotificationEndpointsInsertCall struct { +type RegionNetworkEndpointGroupsInsertCall struct { s *Service project string region string - notificationendpoint *NotificationEndpoint + networkendpointgroup *NetworkEndpointGroup urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Create a NotificationEndpoint in the specified project in the -// given region using the parameters that are included in the request. -func (r *RegionNotificationEndpointsService) Insert(project string, region string, notificationendpoint *NotificationEndpoint) *RegionNotificationEndpointsInsertCall { - c := &RegionNotificationEndpointsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a network endpoint group in the specified project +// using the parameters that are included in the request. +func (r *RegionNetworkEndpointGroupsService) Insert(project string, region string, networkendpointgroup *NetworkEndpointGroup) *RegionNetworkEndpointGroupsInsertCall { + c := &RegionNetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.notificationendpoint = notificationendpoint + c.networkendpointgroup = networkendpointgroup return c } @@ -123571,7 +126849,7 @@ func (r *RegionNotificationEndpointsService) Insert(project string, region strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionNotificationEndpointsInsertCall) RequestId(requestId string) *RegionNotificationEndpointsInsertCall { +func (c *RegionNetworkEndpointGroupsInsertCall) RequestId(requestId string) *RegionNetworkEndpointGroupsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -123579,7 +126857,7 @@ func (c *RegionNotificationEndpointsInsertCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionNotificationEndpointsInsertCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsInsertCall { +func (c *RegionNetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *RegionNetworkEndpointGroupsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -123587,36 +126865,36 @@ func (c *RegionNotificationEndpointsInsertCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionNotificationEndpointsInsertCall) Context(ctx context.Context) *RegionNotificationEndpointsInsertCall { +func (c *RegionNetworkEndpointGroupsInsertCall) Context(ctx context.Context) *RegionNetworkEndpointGroupsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionNotificationEndpointsInsertCall) Header() http.Header { +func (c *RegionNetworkEndpointGroupsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionNotificationEndpointsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.notificationendpoint) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroup) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/notificationEndpoints") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -123630,14 +126908,813 @@ func (c *RegionNotificationEndpointsInsertCall) doRequest(alt string) (*http.Res return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionNotificationEndpoints.insert" call. +// Do executes the "compute.regionNetworkEndpointGroups.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionNotificationEndpointsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionNetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", + // "httpMethod": "POST", + // "id": "compute.regionNetworkEndpointGroups.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region where you want to create the network endpoint group. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/networkEndpointGroups", + // "request": { + // "$ref": "NetworkEndpointGroup" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionNetworkEndpointGroups.list": + +type RegionNetworkEndpointGroupsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of regional network endpoint groups +// available to the specified project in the given region. +func (r *RegionNetworkEndpointGroupsService) List(project string, region string) *RegionNetworkEndpointGroupsListCall { + c := &RegionNetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *RegionNetworkEndpointGroupsListCall) Filter(filter string) *RegionNetworkEndpointGroupsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionNetworkEndpointGroupsListCall) MaxResults(maxResults int64) *RegionNetworkEndpointGroupsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *RegionNetworkEndpointGroupsListCall) OrderBy(orderBy string) *RegionNetworkEndpointGroupsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionNetworkEndpointGroupsListCall) PageToken(pageToken string) *RegionNetworkEndpointGroupsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionNetworkEndpointGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionNetworkEndpointGroupsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionNetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *RegionNetworkEndpointGroupsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionNetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *RegionNetworkEndpointGroupsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionNetworkEndpointGroupsListCall) Context(ctx context.Context) *RegionNetworkEndpointGroupsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionNetworkEndpointGroupsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionNetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEndpointGroups") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionNetworkEndpointGroups.list" call. +// Exactly one of *NetworkEndpointGroupList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *NetworkEndpointGroupList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &NetworkEndpointGroupList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of regional network endpoint groups available to the specified project in the given region.", + // "httpMethod": "GET", + // "id": "compute.regionNetworkEndpointGroups.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/regions/{region}/networkEndpointGroups", + // "response": { + // "$ref": "NetworkEndpointGroupList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionNetworkEndpointGroupsListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.regionNotificationEndpoints.delete": + +type RegionNotificationEndpointsDeleteCall struct { + s *Service + project string + region string + notificationEndpoint string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified NotificationEndpoint in the given +// region +func (r *RegionNotificationEndpointsService) Delete(project string, region string, notificationEndpoint string) *RegionNotificationEndpointsDeleteCall { + c := &RegionNotificationEndpointsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.notificationEndpoint = notificationEndpoint + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionNotificationEndpointsDeleteCall) RequestId(requestId string) *RegionNotificationEndpointsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionNotificationEndpointsDeleteCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionNotificationEndpointsDeleteCall) Context(ctx context.Context) *RegionNotificationEndpointsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionNotificationEndpointsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionNotificationEndpointsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "notificationEndpoint": c.notificationEndpoint, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionNotificationEndpoints.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionNotificationEndpointsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified NotificationEndpoint in the given region", + // "httpMethod": "DELETE", + // "id": "compute.regionNotificationEndpoints.delete", + // "parameterOrder": [ + // "project", + // "region", + // "notificationEndpoint" + // ], + // "parameters": { + // "notificationEndpoint": { + // "description": "Name of the NotificationEndpoint resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionNotificationEndpoints.get": + +type RegionNotificationEndpointsGetCall struct { + s *Service + project string + region string + notificationEndpoint string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified NotificationEndpoint resource in the given +// region. +func (r *RegionNotificationEndpointsService) Get(project string, region string, notificationEndpoint string) *RegionNotificationEndpointsGetCall { + c := &RegionNotificationEndpointsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.notificationEndpoint = notificationEndpoint + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionNotificationEndpointsGetCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionNotificationEndpointsGetCall) IfNoneMatch(entityTag string) *RegionNotificationEndpointsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionNotificationEndpointsGetCall) Context(ctx context.Context) *RegionNotificationEndpointsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionNotificationEndpointsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionNotificationEndpointsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "notificationEndpoint": c.notificationEndpoint, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionNotificationEndpoints.get" call. +// Exactly one of *NotificationEndpoint or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NotificationEndpoint.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionNotificationEndpointsGetCall) Do(opts ...googleapi.CallOption) (*NotificationEndpoint, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &NotificationEndpoint{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified NotificationEndpoint resource in the given region.", + // "httpMethod": "GET", + // "id": "compute.regionNotificationEndpoints.get", + // "parameterOrder": [ + // "project", + // "region", + // "notificationEndpoint" + // ], + // "parameters": { + // "notificationEndpoint": { + // "description": "Name of the NotificationEndpoint resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", + // "response": { + // "$ref": "NotificationEndpoint" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionNotificationEndpoints.insert": + +type RegionNotificationEndpointsInsertCall struct { + s *Service + project string + region string + notificationendpoint *NotificationEndpoint + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Create a NotificationEndpoint in the specified project in the +// given region using the parameters that are included in the request. +func (r *RegionNotificationEndpointsService) Insert(project string, region string, notificationendpoint *NotificationEndpoint) *RegionNotificationEndpointsInsertCall { + c := &RegionNotificationEndpointsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.notificationendpoint = notificationendpoint + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionNotificationEndpointsInsertCall) RequestId(requestId string) *RegionNotificationEndpointsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionNotificationEndpointsInsertCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionNotificationEndpointsInsertCall) Context(ctx context.Context) *RegionNotificationEndpointsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionNotificationEndpointsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionNotificationEndpointsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.notificationendpoint) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/notificationEndpoints") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionNotificationEndpoints.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionNotificationEndpointsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -123797,6 +127874,15 @@ func (c *RegionNotificationEndpointsListCall) PageToken(pageToken string) *Regio return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionNotificationEndpointsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionNotificationEndpointsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -123834,7 +127920,7 @@ func (c *RegionNotificationEndpointsListCall) Header() http.Header { func (c *RegionNotificationEndpointsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123941,6 +128027,11 @@ func (c *RegionNotificationEndpointsListCall) Do(opts ...googleapi.CallOption) ( // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/notificationEndpoints", @@ -124026,7 +128117,7 @@ func (c *RegionOperationsDeleteCall) Header() http.Header { func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124162,7 +128253,7 @@ func (c *RegionOperationsGetCall) Header() http.Header { func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124357,6 +128448,15 @@ func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperations return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionOperationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionOperationsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -124394,7 +128494,7 @@ func (c *RegionOperationsListCall) Header() http.Header { func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124501,6 +128601,11 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/operations", @@ -124598,7 +128703,7 @@ func (c *RegionOperationsWaitCall) Header() http.Header { func (c *RegionOperationsWaitCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124770,7 +128875,7 @@ func (c *RegionSslCertificatesDeleteCall) Header() http.Header { func (c *RegionSslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124940,7 +129045,7 @@ func (c *RegionSslCertificatesGetCall) Header() http.Header { func (c *RegionSslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125116,7 +129221,7 @@ func (c *RegionSslCertificatesInsertCall) Header() http.Header { func (c *RegionSslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125310,6 +129415,15 @@ func (c *RegionSslCertificatesListCall) PageToken(pageToken string) *RegionSslCe return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionSslCertificatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionSslCertificatesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -125347,7 +129461,7 @@ func (c *RegionSslCertificatesListCall) Header() http.Header { func (c *RegionSslCertificatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125454,6 +129568,11 @@ func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCe // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/sslCertificates", @@ -125557,7 +129676,7 @@ func (c *RegionTargetHttpProxiesDeleteCall) Header() http.Header { func (c *RegionTargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125727,7 +129846,7 @@ func (c *RegionTargetHttpProxiesGetCall) Header() http.Header { func (c *RegionTargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125903,7 +130022,7 @@ func (c *RegionTargetHttpProxiesInsertCall) Header() http.Header { func (c *RegionTargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126097,6 +130216,15 @@ func (c *RegionTargetHttpProxiesListCall) PageToken(pageToken string) *RegionTar return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionTargetHttpProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionTargetHttpProxiesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -126134,7 +130262,7 @@ func (c *RegionTargetHttpProxiesListCall) Header() http.Header { func (c *RegionTargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126241,6 +130369,11 @@ func (c *RegionTargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*Tar // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/targetHttpProxies", @@ -126346,7 +130479,7 @@ func (c *RegionTargetHttpProxiesSetUrlMapCall) Header() http.Header { func (c *RegionTargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126530,7 +130663,7 @@ func (c *RegionTargetHttpsProxiesDeleteCall) Header() http.Header { func (c *RegionTargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126700,7 +130833,7 @@ func (c *RegionTargetHttpsProxiesGetCall) Header() http.Header { func (c *RegionTargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126876,7 +131009,7 @@ func (c *RegionTargetHttpsProxiesInsertCall) Header() http.Header { func (c *RegionTargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127070,6 +131203,15 @@ func (c *RegionTargetHttpsProxiesListCall) PageToken(pageToken string) *RegionTa return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionTargetHttpsProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionTargetHttpsProxiesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -127107,7 +131249,7 @@ func (c *RegionTargetHttpsProxiesListCall) Header() http.Header { func (c *RegionTargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127214,6 +131356,11 @@ func (c *RegionTargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*Ta // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/targetHttpsProxies", @@ -127319,7 +131466,7 @@ func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Header() http.Header { func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127505,7 +131652,7 @@ func (c *RegionTargetHttpsProxiesSetUrlMapCall) Header() http.Header { func (c *RegionTargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127677,7 +131824,7 @@ func (c *RegionUrlMapsDeleteCall) Header() http.Header { func (c *RegionUrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127846,7 +131993,7 @@ func (c *RegionUrlMapsGetCall) Header() http.Header { func (c *RegionUrlMapsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128010,7 +132157,7 @@ func (c *RegionUrlMapsInsertCall) Header() http.Header { func (c *RegionUrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128176,7 +132323,7 @@ func (c *RegionUrlMapsInvalidateCacheCall) Header() http.Header { func (c *RegionUrlMapsInvalidateCacheCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128379,6 +132526,15 @@ func (c *RegionUrlMapsListCall) PageToken(pageToken string) *RegionUrlMapsListCa return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionUrlMapsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionUrlMapsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -128416,7 +132572,7 @@ func (c *RegionUrlMapsListCall) Header() http.Header { func (c *RegionUrlMapsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128523,6 +132679,11 @@ func (c *RegionUrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, e // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/urlMaps", @@ -128618,7 +132779,7 @@ func (c *RegionUrlMapsPatchCall) Header() http.Header { func (c *RegionUrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128793,7 +132954,7 @@ func (c *RegionUrlMapsUpdateCall) Header() http.Header { func (c *RegionUrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128962,7 +133123,7 @@ func (c *RegionUrlMapsValidateCall) Header() http.Header { func (c *RegionUrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129133,7 +133294,7 @@ func (c *RegionsGetCall) Header() http.Header { func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129317,6 +133478,15 @@ func (c *RegionsListCall) PageToken(pageToken string) *RegionsListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -129354,7 +133524,7 @@ func (c *RegionsListCall) Header() http.Header { func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129452,6 +133622,11 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions", @@ -129584,6 +133759,15 @@ func (c *ReservationsAggregatedListCall) PageToken(pageToken string) *Reservatio return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ReservationsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ReservationsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -129621,7 +133805,7 @@ func (c *ReservationsAggregatedListCall) Header() http.Header { func (c *ReservationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129724,6 +133908,11 @@ func (c *ReservationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Rese // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/reservations", @@ -129827,7 +134016,7 @@ func (c *ReservationsDeleteCall) Header() http.Header { func (c *ReservationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129995,7 +134184,7 @@ func (c *ReservationsGetCall) Header() http.Header { func (c *ReservationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130170,7 +134359,7 @@ func (c *ReservationsGetIamPolicyCall) Header() http.Header { func (c *ReservationsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130352,7 +134541,7 @@ func (c *ReservationsInsertCall) Header() http.Header { func (c *ReservationsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130546,6 +134735,15 @@ func (c *ReservationsListCall) PageToken(pageToken string) *ReservationsListCall return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ReservationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ReservationsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -130583,7 +134781,7 @@ func (c *ReservationsListCall) Header() http.Header { func (c *ReservationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130684,6 +134882,11 @@ func (c *ReservationsListCall) Do(opts ...googleapi.CallOption) (*ReservationLis // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "Name of the zone for this request.", // "location": "path", @@ -130797,7 +135000,7 @@ func (c *ReservationsResizeCall) Header() http.Header { func (c *ReservationsResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130965,7 +135168,7 @@ func (c *ReservationsSetIamPolicyCall) Header() http.Header { func (c *ReservationsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131128,7 +135331,7 @@ func (c *ReservationsTestIamPermissionsCall) Header() http.Header { func (c *ReservationsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131337,6 +135540,15 @@ func (c *ResourcePoliciesAggregatedListCall) PageToken(pageToken string) *Resour return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ResourcePoliciesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ResourcePoliciesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -131374,7 +135586,7 @@ func (c *ResourcePoliciesAggregatedListCall) Header() http.Header { func (c *ResourcePoliciesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131477,6 +135689,11 @@ func (c *ResourcePoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/resourcePolicies", @@ -131580,7 +135797,7 @@ func (c *ResourcePoliciesDeleteCall) Header() http.Header { func (c *ResourcePoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131748,7 +135965,7 @@ func (c *ResourcePoliciesGetCall) Header() http.Header { func (c *ResourcePoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131923,7 +136140,7 @@ func (c *ResourcePoliciesGetIamPolicyCall) Header() http.Header { func (c *ResourcePoliciesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132104,7 +136321,7 @@ func (c *ResourcePoliciesInsertCall) Header() http.Header { func (c *ResourcePoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132298,6 +136515,15 @@ func (c *ResourcePoliciesListCall) PageToken(pageToken string) *ResourcePolicies return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ResourcePoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ResourcePoliciesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -132335,7 +136561,7 @@ func (c *ResourcePoliciesListCall) Header() http.Header { func (c *ResourcePoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132442,6 +136668,11 @@ func (c *ResourcePoliciesListCall) Do(opts ...googleapi.CallOption) (*ResourcePo // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/resourcePolicies", @@ -132529,7 +136760,7 @@ func (c *ResourcePoliciesSetIamPolicyCall) Header() http.Header { func (c *ResourcePoliciesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132692,7 +136923,7 @@ func (c *ResourcePoliciesTestIamPermissionsCall) Header() http.Header { func (c *ResourcePoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132901,6 +137132,15 @@ func (c *RoutersAggregatedListCall) PageToken(pageToken string) *RoutersAggregat return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RoutersAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RoutersAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -132938,7 +137178,7 @@ func (c *RoutersAggregatedListCall) Header() http.Header { func (c *RoutersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -133041,6 +137281,11 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/routers", @@ -133144,7 +137389,7 @@ func (c *RoutersDeleteCall) Header() http.Header { func (c *RoutersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -133313,7 +137558,7 @@ func (c *RoutersGetCall) Header() http.Header { func (c *RoutersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -133518,6 +137763,15 @@ func (c *RoutersGetNatMappingInfoCall) PageToken(pageToken string) *RoutersGetNa return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RoutersGetNatMappingInfoCall) ReturnPartialSuccess(returnPartialSuccess bool) *RoutersGetNatMappingInfoCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -133555,7 +137809,7 @@ func (c *RoutersGetNatMappingInfoCall) Header() http.Header { func (c *RoutersGetNatMappingInfoCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -133670,6 +137924,11 @@ func (c *RoutersGetNatMappingInfoCall) Do(opts ...googleapi.CallOption) (*VmEndp // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "router": { // "description": "Name of the Router resource to query for Nat Mapping information of VM endpoints.", // "location": "path", @@ -133772,7 +138031,7 @@ func (c *RoutersGetRouterStatusCall) Header() http.Header { func (c *RoutersGetRouterStatusCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -133948,7 +138207,7 @@ func (c *RoutersInsertCall) Header() http.Header { func (c *RoutersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134142,6 +138401,15 @@ func (c *RoutersListCall) PageToken(pageToken string) *RoutersListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RoutersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RoutersListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -134179,7 +138447,7 @@ func (c *RoutersListCall) Header() http.Header { func (c *RoutersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134286,6 +138554,11 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/routers", @@ -134393,7 +138666,7 @@ func (c *RoutersPatchCall) Header() http.Header { func (c *RoutersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134562,7 +138835,7 @@ func (c *RoutersPreviewCall) Header() http.Header { func (c *RoutersPreviewCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134726,7 +138999,7 @@ func (c *RoutersTestIamPermissionsCall) Header() http.Header { func (c *RoutersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134912,7 +139185,7 @@ func (c *RoutersUpdateCall) Header() http.Header { func (c *RoutersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135095,7 +139368,7 @@ func (c *RoutesDeleteCall) Header() http.Header { func (c *RoutesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135254,7 +139527,7 @@ func (c *RoutesGetCall) Header() http.Header { func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135420,7 +139693,7 @@ func (c *RoutesInsertCall) Header() http.Header { func (c *RoutesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135604,6 +139877,15 @@ func (c *RoutesListCall) PageToken(pageToken string) *RoutesListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RoutesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RoutesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -135641,7 +139923,7 @@ func (c *RoutesListCall) Header() http.Header { func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135739,6 +140021,11 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/routes", @@ -135824,7 +140111,7 @@ func (c *RoutesTestIamPermissionsCall) Header() http.Header { func (c *RoutesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135983,7 +140270,7 @@ func (c *SecurityPoliciesAddRuleCall) Header() http.Header { func (c *SecurityPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -136156,7 +140443,7 @@ func (c *SecurityPoliciesDeleteCall) Header() http.Header { func (c *SecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -136314,7 +140601,7 @@ func (c *SecurityPoliciesGetCall) Header() http.Header { func (c *SecurityPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -136477,7 +140764,7 @@ func (c *SecurityPoliciesGetRuleCall) Header() http.Header { func (c *SecurityPoliciesGetRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -136655,7 +140942,7 @@ func (c *SecurityPoliciesInsertCall) Header() http.Header { func (c *SecurityPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -136843,6 +141130,15 @@ func (c *SecurityPoliciesListCall) PageToken(pageToken string) *SecurityPolicies return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SecurityPoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SecurityPoliciesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -136880,7 +141176,7 @@ func (c *SecurityPoliciesListCall) Header() http.Header { func (c *SecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -136978,6 +141274,11 @@ func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPo // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/securityPolicies", @@ -137098,6 +141399,15 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) PageToken(pageToke return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) ReturnPartialSuccess(returnPartialSuccess bool) *SecurityPoliciesListPreconfiguredExpressionSetsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -137135,7 +141445,7 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Header() http.Head func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -137236,6 +141546,11 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Do(opts ...googlea // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/securityPolicies/listPreconfiguredExpressionSets", @@ -137318,7 +141633,7 @@ func (c *SecurityPoliciesPatchCall) Header() http.Header { func (c *SecurityPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -137488,7 +141803,7 @@ func (c *SecurityPoliciesPatchRuleCall) Header() http.Header { func (c *SecurityPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -137655,7 +141970,7 @@ func (c *SecurityPoliciesRemoveRuleCall) Header() http.Header { func (c *SecurityPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -137805,7 +142120,7 @@ func (c *SecurityPoliciesSetLabelsCall) Header() http.Header { func (c *SecurityPoliciesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -137957,7 +142272,7 @@ func (c *SecurityPoliciesTestIamPermissionsCall) Header() http.Header { func (c *SecurityPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -138133,7 +142448,7 @@ func (c *SnapshotsDeleteCall) Header() http.Header { func (c *SnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -138292,7 +142607,7 @@ func (c *SnapshotsGetCall) Header() http.Header { func (c *SnapshotsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -138456,7 +142771,7 @@ func (c *SnapshotsGetIamPolicyCall) Header() http.Header { func (c *SnapshotsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -138561,6 +142876,171 @@ func (c *SnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } +// method id "compute.snapshots.insert": + +type SnapshotsInsertCall struct { + s *Service + project string + snapshot *Snapshot + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a snapshot in the specified project using the data +// included in the request. +func (r *SnapshotsService) Insert(project string, snapshot *Snapshot) *SnapshotsInsertCall { + c := &SnapshotsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.snapshot = snapshot + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *SnapshotsInsertCall) RequestId(requestId string) *SnapshotsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SnapshotsInsertCall) Fields(s ...googleapi.Field) *SnapshotsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SnapshotsInsertCall) Context(ctx context.Context) *SnapshotsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SnapshotsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SnapshotsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/snapshots") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.snapshots.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SnapshotsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a snapshot in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.snapshots.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/snapshots", + // "request": { + // "$ref": "Snapshot" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.snapshots.list": type SnapshotsListCall struct { @@ -138646,6 +143126,15 @@ func (c *SnapshotsListCall) PageToken(pageToken string) *SnapshotsListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SnapshotsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SnapshotsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -138683,7 +143172,7 @@ func (c *SnapshotsListCall) Header() http.Header { func (c *SnapshotsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -138781,6 +143270,11 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/snapshots", @@ -138866,7 +143360,7 @@ func (c *SnapshotsSetIamPolicyCall) Header() http.Header { func (c *SnapshotsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139018,7 +143512,7 @@ func (c *SnapshotsSetLabelsCall) Header() http.Header { func (c *SnapshotsSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139170,7 +143664,7 @@ func (c *SnapshotsTestIamPermissionsCall) Header() http.Header { func (c *SnapshotsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139371,6 +143865,15 @@ func (c *SslCertificatesAggregatedListCall) PageToken(pageToken string) *SslCert return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SslCertificatesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SslCertificatesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -139408,7 +143911,7 @@ func (c *SslCertificatesAggregatedListCall) Header() http.Header { func (c *SslCertificatesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139511,6 +144014,11 @@ func (c *SslCertificatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*S // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/sslCertificates", @@ -139612,7 +144120,7 @@ func (c *SslCertificatesDeleteCall) Header() http.Header { func (c *SslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139770,7 +144278,7 @@ func (c *SslCertificatesGetCall) Header() http.Header { func (c *SslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139935,7 +144443,7 @@ func (c *SslCertificatesInsertCall) Header() http.Header { func (c *SslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -140118,6 +144626,15 @@ func (c *SslCertificatesListCall) PageToken(pageToken string) *SslCertificatesLi return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SslCertificatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SslCertificatesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -140155,7 +144672,7 @@ func (c *SslCertificatesListCall) Header() http.Header { func (c *SslCertificatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -140253,6 +144770,11 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/sslCertificates", @@ -140338,7 +144860,7 @@ func (c *SslCertificatesTestIamPermissionsCall) Header() http.Header { func (c *SslCertificatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -140509,7 +145031,7 @@ func (c *SslPoliciesDeleteCall) Header() http.Header { func (c *SslPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -140666,7 +145188,7 @@ func (c *SslPoliciesGetCall) Header() http.Header { func (c *SslPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -140830,7 +145352,7 @@ func (c *SslPoliciesInsertCall) Header() http.Header { func (c *SslPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -141013,6 +145535,15 @@ func (c *SslPoliciesListCall) PageToken(pageToken string) *SslPoliciesListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SslPoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SslPoliciesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -141050,7 +145581,7 @@ func (c *SslPoliciesListCall) Header() http.Header { func (c *SslPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -141148,6 +145679,11 @@ func (c *SslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesList // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/sslPolicies", @@ -141268,6 +145804,15 @@ func (c *SslPoliciesListAvailableFeaturesCall) PageToken(pageToken string) *SslP return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SslPoliciesListAvailableFeaturesCall) ReturnPartialSuccess(returnPartialSuccess bool) *SslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -141305,7 +145850,7 @@ func (c *SslPoliciesListAvailableFeaturesCall) Header() http.Header { func (c *SslPoliciesListAvailableFeaturesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -141405,6 +145950,11 @@ func (c *SslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/sslPolicies/listAvailableFeatures", @@ -141488,7 +146038,7 @@ func (c *SslPoliciesPatchCall) Header() http.Header { func (c *SslPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -141644,7 +146194,7 @@ func (c *SslPoliciesTestIamPermissionsCall) Header() http.Header { func (c *SslPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -141844,6 +146394,15 @@ func (c *SubnetworksAggregatedListCall) PageToken(pageToken string) *Subnetworks return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SubnetworksAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SubnetworksAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -141881,7 +146440,7 @@ func (c *SubnetworksAggregatedListCall) Header() http.Header { func (c *SubnetworksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -141984,6 +146543,11 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/subnetworks", @@ -142087,7 +146651,7 @@ func (c *SubnetworksDeleteCall) Header() http.Header { func (c *SubnetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -142266,7 +146830,7 @@ func (c *SubnetworksExpandIpCidrRangeCall) Header() http.Header { func (c *SubnetworksExpandIpCidrRangeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -142443,7 +147007,7 @@ func (c *SubnetworksGetCall) Header() http.Header { func (c *SubnetworksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -142618,7 +147182,7 @@ func (c *SubnetworksGetIamPolicyCall) Header() http.Header { func (c *SubnetworksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -142800,7 +147364,7 @@ func (c *SubnetworksInsertCall) Header() http.Header { func (c *SubnetworksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -142994,6 +147558,15 @@ func (c *SubnetworksListCall) PageToken(pageToken string) *SubnetworksListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SubnetworksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SubnetworksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -143031,7 +147604,7 @@ func (c *SubnetworksListCall) Header() http.Header { func (c *SubnetworksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -143138,6 +147711,11 @@ func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/subnetworks", @@ -143186,8 +147764,7 @@ type SubnetworksListUsableCall struct { } // ListUsable: Retrieves an aggregated list of all usable subnetworks in -// the project. The list contains all of the subnetworks in the project -// and the subnetworks that were shared by a Shared VPC host project. +// the project. func (r *SubnetworksService) ListUsable(project string) *SubnetworksListUsableCall { c := &SubnetworksListUsableCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -143259,6 +147836,15 @@ func (c *SubnetworksListUsableCall) PageToken(pageToken string) *SubnetworksList return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SubnetworksListUsableCall) ReturnPartialSuccess(returnPartialSuccess bool) *SubnetworksListUsableCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -143296,7 +147882,7 @@ func (c *SubnetworksListUsableCall) Header() http.Header { func (c *SubnetworksListUsableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -143358,7 +147944,7 @@ func (c *SubnetworksListUsableCall) Do(opts ...googleapi.CallOption) (*UsableSub } return ret, nil // { - // "description": "Retrieves an aggregated list of all usable subnetworks in the project. The list contains all of the subnetworks in the project and the subnetworks that were shared by a Shared VPC host project.", + // "description": "Retrieves an aggregated list of all usable subnetworks in the project.", // "httpMethod": "GET", // "id": "compute.subnetworks.listUsable", // "parameterOrder": [ @@ -143394,6 +147980,11 @@ func (c *SubnetworksListUsableCall) Do(opts ...googleapi.CallOption) (*UsableSub // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/subnetworks/listUsable", @@ -143517,7 +148108,7 @@ func (c *SubnetworksPatchCall) Header() http.Header { func (c *SubnetworksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -143691,7 +148282,7 @@ func (c *SubnetworksSetIamPolicyCall) Header() http.Header { func (c *SubnetworksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -143874,7 +148465,7 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) Header() http.Header { func (c *SubnetworksSetPrivateIpGoogleAccessCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -143940,13 +148531,678 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Set whether VMs in this subnet can access Google services without assigning external IP addresses through Private Google Access.", + // "description": "Set whether VMs in this subnet can access Google services without assigning external IP addresses through Private Google Access.", + // "httpMethod": "POST", + // "id": "compute.subnetworks.setPrivateIpGoogleAccess", + // "parameterOrder": [ + // "project", + // "region", + // "subnetwork" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "subnetwork": { + // "description": "Name of the Subnetwork resource.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/subnetworks/{subnetwork}/setPrivateIpGoogleAccess", + // "request": { + // "$ref": "SubnetworksSetPrivateIpGoogleAccessRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.subnetworks.testIamPermissions": + +type SubnetworksTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *SubnetworksService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *SubnetworksTestIamPermissionsCall { + c := &SubnetworksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SubnetworksTestIamPermissionsCall) Fields(s ...googleapi.Field) *SubnetworksTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SubnetworksTestIamPermissionsCall) Context(ctx context.Context) *SubnetworksTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SubnetworksTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SubnetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/subnetworks/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.subnetworks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SubnetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.subnetworks.testIamPermissions", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/subnetworks/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetGrpcProxies.delete": + +type TargetGrpcProxiesDeleteCall struct { + s *Service + project string + targetGrpcProxy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified TargetGrpcProxy in the given scope +func (r *TargetGrpcProxiesService) Delete(project string, targetGrpcProxy string) *TargetGrpcProxiesDeleteCall { + c := &TargetGrpcProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetGrpcProxy = targetGrpcProxy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetGrpcProxiesDeleteCall) RequestId(requestId string) *TargetGrpcProxiesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetGrpcProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetGrpcProxiesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetGrpcProxiesDeleteCall) Context(ctx context.Context) *TargetGrpcProxiesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetGrpcProxiesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetGrpcProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/targetGrpcProxies/{targetGrpcProxy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetGrpcProxy": c.targetGrpcProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetGrpcProxies.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetGrpcProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified TargetGrpcProxy in the given scope", + // "httpMethod": "DELETE", + // "id": "compute.targetGrpcProxies.delete", + // "parameterOrder": [ + // "project", + // "targetGrpcProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetGrpcProxy": { + // "description": "Name of the TargetGrpcProxy resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/targetGrpcProxies/{targetGrpcProxy}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetGrpcProxies.get": + +type TargetGrpcProxiesGetCall struct { + s *Service + project string + targetGrpcProxy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified TargetGrpcProxy resource in the given +// scope. +func (r *TargetGrpcProxiesService) Get(project string, targetGrpcProxy string) *TargetGrpcProxiesGetCall { + c := &TargetGrpcProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetGrpcProxy = targetGrpcProxy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetGrpcProxiesGetCall) Fields(s ...googleapi.Field) *TargetGrpcProxiesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetGrpcProxiesGetCall) IfNoneMatch(entityTag string) *TargetGrpcProxiesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetGrpcProxiesGetCall) Context(ctx context.Context) *TargetGrpcProxiesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetGrpcProxiesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetGrpcProxiesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/targetGrpcProxies/{targetGrpcProxy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetGrpcProxy": c.targetGrpcProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetGrpcProxies.get" call. +// Exactly one of *TargetGrpcProxy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TargetGrpcProxy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetGrpcProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetGrpcProxy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetGrpcProxy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified TargetGrpcProxy resource in the given scope.", + // "httpMethod": "GET", + // "id": "compute.targetGrpcProxies.get", + // "parameterOrder": [ + // "project", + // "targetGrpcProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "targetGrpcProxy": { + // "description": "Name of the TargetGrpcProxy resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/targetGrpcProxies/{targetGrpcProxy}", + // "response": { + // "$ref": "TargetGrpcProxy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetGrpcProxies.insert": + +type TargetGrpcProxiesInsertCall struct { + s *Service + project string + targetgrpcproxy *TargetGrpcProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a TargetGrpcProxy in the specified project in the +// given scope using the parameters that are included in the request. +func (r *TargetGrpcProxiesService) Insert(project string, targetgrpcproxy *TargetGrpcProxy) *TargetGrpcProxiesInsertCall { + c := &TargetGrpcProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetgrpcproxy = targetgrpcproxy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetGrpcProxiesInsertCall) RequestId(requestId string) *TargetGrpcProxiesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetGrpcProxiesInsertCall) Fields(s ...googleapi.Field) *TargetGrpcProxiesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetGrpcProxiesInsertCall) Context(ctx context.Context) *TargetGrpcProxiesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetGrpcProxiesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetGrpcProxiesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetgrpcproxy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/targetGrpcProxies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetGrpcProxies.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetGrpcProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a TargetGrpcProxy in the specified project in the given scope using the parameters that are included in the request.", // "httpMethod": "POST", - // "id": "compute.subnetworks.setPrivateIpGoogleAccess", + // "id": "compute.targetGrpcProxies.insert", // "parameterOrder": [ - // "project", - // "region", - // "subnetwork" + // "project" // ], // "parameters": { // "project": { @@ -143956,29 +149212,15 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) Do(opts ...googleapi.CallOptio // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "subnetwork": { - // "description": "Name of the Subnetwork resource.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/subnetworks/{subnetwork}/setPrivateIpGoogleAccess", + // "path": "projects/{project}/global/targetGrpcProxies", // "request": { - // "$ref": "SubnetworksSetPrivateIpGoogleAccessRequest" + // "$ref": "TargetGrpcProxy" // }, // "response": { // "$ref": "Operation" @@ -143991,93 +149233,167 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) Do(opts ...googleapi.CallOptio } -// method id "compute.subnetworks.testIamPermissions": +// method id "compute.targetGrpcProxies.list": -type SubnetworksTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetGrpcProxiesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *SubnetworksService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *SubnetworksTestIamPermissionsCall { - c := &SubnetworksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Lists the TargetGrpcProxies for a project in the given scope. +func (r *TargetGrpcProxiesService) List(project string) *TargetGrpcProxiesListCall { + c := &TargetGrpcProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *TargetGrpcProxiesListCall) Filter(filter string) *TargetGrpcProxiesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *TargetGrpcProxiesListCall) MaxResults(maxResults int64) *TargetGrpcProxiesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *TargetGrpcProxiesListCall) OrderBy(orderBy string) *TargetGrpcProxiesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *TargetGrpcProxiesListCall) PageToken(pageToken string) *TargetGrpcProxiesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetGrpcProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetGrpcProxiesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SubnetworksTestIamPermissionsCall) Fields(s ...googleapi.Field) *SubnetworksTestIamPermissionsCall { +func (c *TargetGrpcProxiesListCall) Fields(s ...googleapi.Field) *TargetGrpcProxiesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetGrpcProxiesListCall) IfNoneMatch(entityTag string) *TargetGrpcProxiesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SubnetworksTestIamPermissionsCall) Context(ctx context.Context) *SubnetworksTestIamPermissionsCall { +func (c *TargetGrpcProxiesListCall) Context(ctx context.Context) *TargetGrpcProxiesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SubnetworksTestIamPermissionsCall) Header() http.Header { +func (c *TargetGrpcProxiesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SubnetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetGrpcProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/subnetworks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/targetGrpcProxies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.subnetworks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// Do executes the "compute.targetGrpcProxies.list" call. +// Exactly one of *TargetGrpcProxyList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// *TargetGrpcProxyList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *SubnetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *TargetGrpcProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetGrpcProxyList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -144096,7 +149412,7 @@ func (c *SubnetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &TargetGrpcProxyList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -144108,15 +149424,36 @@ func (c *SubnetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.subnetworks.testIamPermissions", + // "description": "Lists the TargetGrpcProxies for a project in the given scope.", + // "httpMethod": "GET", + // "id": "compute.targetGrpcProxies.list", // "parameterOrder": [ - // "project", - // "region", - // "resource" + // "project" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -144124,32 +149461,218 @@ func (c *SubnetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/global/targetGrpcProxies", + // "response": { + // "$ref": "TargetGrpcProxyList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TargetGrpcProxiesListCall) Pages(ctx context.Context, f func(*TargetGrpcProxyList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.targetGrpcProxies.patch": + +type TargetGrpcProxiesPatchCall struct { + s *Service + project string + targetGrpcProxy string + targetgrpcproxy *TargetGrpcProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified TargetGrpcProxy resource with the data +// included in the request. This method supports PATCH semantics and +// uses JSON merge patch format and processing rules. +func (r *TargetGrpcProxiesService) Patch(project string, targetGrpcProxy string, targetgrpcproxy *TargetGrpcProxy) *TargetGrpcProxiesPatchCall { + c := &TargetGrpcProxiesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetGrpcProxy = targetGrpcProxy + c.targetgrpcproxy = targetgrpcproxy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetGrpcProxiesPatchCall) RequestId(requestId string) *TargetGrpcProxiesPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetGrpcProxiesPatchCall) Fields(s ...googleapi.Field) *TargetGrpcProxiesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetGrpcProxiesPatchCall) Context(ctx context.Context) *TargetGrpcProxiesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetGrpcProxiesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetGrpcProxiesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetgrpcproxy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/targetGrpcProxies/{targetGrpcProxy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetGrpcProxy": c.targetGrpcProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetGrpcProxies.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetGrpcProxiesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified TargetGrpcProxy resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.targetGrpcProxies.patch", + // "parameterOrder": [ + // "project", + // "targetGrpcProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetGrpcProxy": { + // "description": "Name of the TargetGrpcProxy resource to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/subnetworks/{resource}/testIamPermissions", + // "path": "projects/{project}/global/targetGrpcProxies/{targetGrpcProxy}", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "TargetGrpcProxy" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } @@ -144252,6 +149775,15 @@ func (c *TargetHttpProxiesAggregatedListCall) PageToken(pageToken string) *Targe return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetHttpProxiesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetHttpProxiesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -144289,7 +149821,7 @@ func (c *TargetHttpProxiesAggregatedListCall) Header() http.Header { func (c *TargetHttpProxiesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -144392,6 +149924,11 @@ func (c *TargetHttpProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) ( // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/targetHttpProxies", @@ -144494,7 +150031,7 @@ func (c *TargetHttpProxiesDeleteCall) Header() http.Header { func (c *TargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -144653,7 +150190,7 @@ func (c *TargetHttpProxiesGetCall) Header() http.Header { func (c *TargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -144819,7 +150356,7 @@ func (c *TargetHttpProxiesInsertCall) Header() http.Header { func (c *TargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -145003,6 +150540,15 @@ func (c *TargetHttpProxiesListCall) PageToken(pageToken string) *TargetHttpProxi return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetHttpProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetHttpProxiesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -145040,7 +150586,7 @@ func (c *TargetHttpProxiesListCall) Header() http.Header { func (c *TargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -145138,6 +150684,11 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/targetHttpProxies", @@ -145174,6 +150725,184 @@ func (c *TargetHttpProxiesListCall) Pages(ctx context.Context, f func(*TargetHtt } } +// method id "compute.targetHttpProxies.patch": + +type TargetHttpProxiesPatchCall struct { + s *Service + project string + targetHttpProxy string + targethttpproxy *TargetHttpProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified TargetHttpProxy resource with the data +// included in the request. This method supports PATCH semantics and +// uses JSON merge patch format and processing rules. (== +// suppress_warning http-rest-shadowed ==) +func (r *TargetHttpProxiesService) Patch(project string, targetHttpProxy string, targethttpproxy *TargetHttpProxy) *TargetHttpProxiesPatchCall { + c := &TargetHttpProxiesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetHttpProxy = targetHttpProxy + c.targethttpproxy = targethttpproxy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetHttpProxiesPatchCall) RequestId(requestId string) *TargetHttpProxiesPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpProxiesPatchCall) Fields(s ...googleapi.Field) *TargetHttpProxiesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpProxiesPatchCall) Context(ctx context.Context) *TargetHttpProxiesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetHttpProxiesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetHttpProxiesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpproxy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/targetHttpProxies/{targetHttpProxy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetHttpProxy": c.targetHttpProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetHttpProxies.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpProxiesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified TargetHttpProxy resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "PATCH", + // "id": "compute.targetHttpProxies.patch", + // "parameterOrder": [ + // "project", + // "targetHttpProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/targetHttpProxies/{targetHttpProxy}", + // "request": { + // "$ref": "TargetHttpProxy" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.targetHttpProxies.setUrlMap": type TargetHttpProxiesSetUrlMapCall struct { @@ -145242,7 +150971,7 @@ func (c *TargetHttpProxiesSetUrlMapCall) Header() http.Header { func (c *TargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -145399,7 +151128,7 @@ func (c *TargetHttpProxiesTestIamPermissionsCall) Header() http.Header { func (c *TargetHttpProxiesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -145600,6 +151329,15 @@ func (c *TargetHttpsProxiesAggregatedListCall) PageToken(pageToken string) *Targ return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetHttpsProxiesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetHttpsProxiesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -145637,7 +151375,7 @@ func (c *TargetHttpsProxiesAggregatedListCall) Header() http.Header { func (c *TargetHttpsProxiesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -145740,6 +151478,11 @@ func (c *TargetHttpsProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/targetHttpsProxies", @@ -145841,7 +151584,7 @@ func (c *TargetHttpsProxiesDeleteCall) Header() http.Header { func (c *TargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -145999,7 +151742,7 @@ func (c *TargetHttpsProxiesGetCall) Header() http.Header { func (c *TargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -146164,7 +151907,7 @@ func (c *TargetHttpsProxiesInsertCall) Header() http.Header { func (c *TargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -146347,6 +152090,15 @@ func (c *TargetHttpsProxiesListCall) PageToken(pageToken string) *TargetHttpsPro return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetHttpsProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetHttpsProxiesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -146384,7 +152136,7 @@ func (c *TargetHttpsProxiesListCall) Header() http.Header { func (c *TargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -146482,6 +152234,11 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/targetHttpsProxies", @@ -146585,7 +152342,7 @@ func (c *TargetHttpsProxiesSetQuicOverrideCall) Header() http.Header { func (c *TargetHttpsProxiesSetQuicOverrideCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -146759,7 +152516,7 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Header() http.Header { func (c *TargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -146938,7 +152695,7 @@ func (c *TargetHttpsProxiesSetSslPolicyCall) Header() http.Header { func (c *TargetHttpsProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -147112,7 +152869,7 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Header() http.Header { func (c *TargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -147269,7 +153026,7 @@ func (c *TargetHttpsProxiesTestIamPermissionsCall) Header() http.Header { func (c *TargetHttpsProxiesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -147470,6 +153227,15 @@ func (c *TargetInstancesAggregatedListCall) PageToken(pageToken string) *TargetI return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetInstancesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetInstancesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -147507,7 +153273,7 @@ func (c *TargetInstancesAggregatedListCall) Header() http.Header { func (c *TargetInstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -147610,6 +153376,11 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/targetInstances", @@ -147714,7 +153485,7 @@ func (c *TargetInstancesDeleteCall) Header() http.Header { func (c *TargetInstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -147884,7 +153655,7 @@ func (c *TargetInstancesGetCall) Header() http.Header { func (c *TargetInstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -148061,7 +153832,7 @@ func (c *TargetInstancesInsertCall) Header() http.Header { func (c *TargetInstancesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -148256,6 +154027,15 @@ func (c *TargetInstancesListCall) PageToken(pageToken string) *TargetInstancesLi return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetInstancesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetInstancesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -148293,7 +154073,7 @@ func (c *TargetInstancesListCall) Header() http.Header { func (c *TargetInstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -148394,6 +154174,11 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "Name of the zone scoping this request.", // "location": "path", @@ -148487,7 +154272,7 @@ func (c *TargetInstancesTestIamPermissionsCall) Header() http.Header { func (c *TargetInstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -148670,7 +154455,7 @@ func (c *TargetPoolsAddHealthCheckCall) Header() http.Header { func (c *TargetPoolsAddHealthCheckCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -148857,7 +154642,7 @@ func (c *TargetPoolsAddInstanceCall) Header() http.Header { func (c *TargetPoolsAddInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -149071,6 +154856,15 @@ func (c *TargetPoolsAggregatedListCall) PageToken(pageToken string) *TargetPools return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetPoolsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetPoolsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -149108,7 +154902,7 @@ func (c *TargetPoolsAggregatedListCall) Header() http.Header { func (c *TargetPoolsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -149211,6 +155005,11 @@ func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Targe // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/targetPools", @@ -149315,7 +155114,7 @@ func (c *TargetPoolsDeleteCall) Header() http.Header { func (c *TargetPoolsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -149485,7 +155284,7 @@ func (c *TargetPoolsGetCall) Header() http.Header { func (c *TargetPoolsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -149645,7 +155444,7 @@ func (c *TargetPoolsGetHealthCall) Header() http.Header { func (c *TargetPoolsGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -149827,7 +155626,7 @@ func (c *TargetPoolsInsertCall) Header() http.Header { func (c *TargetPoolsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -150022,6 +155821,15 @@ func (c *TargetPoolsListCall) PageToken(pageToken string) *TargetPoolsListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetPoolsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetPoolsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -150059,7 +155867,7 @@ func (c *TargetPoolsListCall) Header() http.Header { func (c *TargetPoolsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -150166,6 +155974,11 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/targetPools", @@ -150272,7 +156085,7 @@ func (c *TargetPoolsRemoveHealthCheckCall) Header() http.Header { func (c *TargetPoolsRemoveHealthCheckCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -150459,7 +156272,7 @@ func (c *TargetPoolsRemoveInstanceCall) Header() http.Header { func (c *TargetPoolsRemoveInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -150653,7 +156466,7 @@ func (c *TargetPoolsSetBackupCall) Header() http.Header { func (c *TargetPoolsSetBackupCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -150827,7 +156640,7 @@ func (c *TargetPoolsTestIamPermissionsCall) Header() http.Header { func (c *TargetPoolsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -151005,7 +156818,7 @@ func (c *TargetSslProxiesDeleteCall) Header() http.Header { func (c *TargetSslProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -151163,7 +156976,7 @@ func (c *TargetSslProxiesGetCall) Header() http.Header { func (c *TargetSslProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -151328,7 +157141,7 @@ func (c *TargetSslProxiesInsertCall) Header() http.Header { func (c *TargetSslProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -151511,6 +157324,15 @@ func (c *TargetSslProxiesListCall) PageToken(pageToken string) *TargetSslProxies return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetSslProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetSslProxiesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -151548,7 +157370,7 @@ func (c *TargetSslProxiesListCall) Header() http.Header { func (c *TargetSslProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -151646,6 +157468,11 @@ func (c *TargetSslProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetSslP // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/targetSslProxies", @@ -151749,7 +157576,7 @@ func (c *TargetSslProxiesSetBackendServiceCall) Header() http.Header { func (c *TargetSslProxiesSetBackendServiceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -151924,7 +157751,7 @@ func (c *TargetSslProxiesSetProxyHeaderCall) Header() http.Header { func (c *TargetSslProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -152099,7 +157926,7 @@ func (c *TargetSslProxiesSetSslCertificatesCall) Header() http.Header { func (c *TargetSslProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -152277,7 +158104,7 @@ func (c *TargetSslProxiesSetSslPolicyCall) Header() http.Header { func (c *TargetSslProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -152433,7 +158260,7 @@ func (c *TargetSslProxiesTestIamPermissionsCall) Header() http.Header { func (c *TargetSslProxiesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -152602,7 +158429,7 @@ func (c *TargetTcpProxiesDeleteCall) Header() http.Header { func (c *TargetTcpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -152760,7 +158587,7 @@ func (c *TargetTcpProxiesGetCall) Header() http.Header { func (c *TargetTcpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -152925,7 +158752,7 @@ func (c *TargetTcpProxiesInsertCall) Header() http.Header { func (c *TargetTcpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -153108,6 +158935,15 @@ func (c *TargetTcpProxiesListCall) PageToken(pageToken string) *TargetTcpProxies return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetTcpProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetTcpProxiesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -153145,7 +158981,7 @@ func (c *TargetTcpProxiesListCall) Header() http.Header { func (c *TargetTcpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -153243,6 +159079,11 @@ func (c *TargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpP // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/targetTcpProxies", @@ -153346,7 +159187,7 @@ func (c *TargetTcpProxiesSetBackendServiceCall) Header() http.Header { func (c *TargetTcpProxiesSetBackendServiceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -153521,7 +159362,7 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) Header() http.Header { func (c *TargetTcpProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -153725,6 +159566,15 @@ func (c *TargetVpnGatewaysAggregatedListCall) PageToken(pageToken string) *Targe return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetVpnGatewaysAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetVpnGatewaysAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -153762,7 +159612,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) Header() http.Header { func (c *TargetVpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -153865,6 +159715,11 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/targetVpnGateways", @@ -153968,7 +159823,7 @@ func (c *TargetVpnGatewaysDeleteCall) Header() http.Header { func (c *TargetVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -154137,7 +159992,7 @@ func (c *TargetVpnGatewaysGetCall) Header() http.Header { func (c *TargetVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -154313,7 +160168,7 @@ func (c *TargetVpnGatewaysInsertCall) Header() http.Header { func (c *TargetVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -154507,6 +160362,15 @@ func (c *TargetVpnGatewaysListCall) PageToken(pageToken string) *TargetVpnGatewa return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetVpnGatewaysListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetVpnGatewaysListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -154544,7 +160408,7 @@ func (c *TargetVpnGatewaysListCall) Header() http.Header { func (c *TargetVpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -154651,6 +160515,11 @@ func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpn // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/targetVpnGateways", @@ -154757,7 +160626,7 @@ func (c *TargetVpnGatewaysSetLabelsCall) Header() http.Header { func (c *TargetVpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -154925,7 +160794,7 @@ func (c *TargetVpnGatewaysTestIamPermissionsCall) Header() http.Header { func (c *TargetVpnGatewaysTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -155135,6 +161004,15 @@ func (c *UrlMapsAggregatedListCall) PageToken(pageToken string) *UrlMapsAggregat return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *UrlMapsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *UrlMapsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -155172,7 +161050,7 @@ func (c *UrlMapsAggregatedListCall) Header() http.Header { func (c *UrlMapsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -155275,6 +161153,11 @@ func (c *UrlMapsAggregatedListCall) Do(opts ...googleapi.CallOption) (*UrlMapsAg // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/urlMaps", @@ -155377,7 +161260,7 @@ func (c *UrlMapsDeleteCall) Header() http.Header { func (c *UrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -155536,7 +161419,7 @@ func (c *UrlMapsGetCall) Header() http.Header { func (c *UrlMapsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -155702,7 +161585,7 @@ func (c *UrlMapsInsertCall) Header() http.Header { func (c *UrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -155869,7 +161752,7 @@ func (c *UrlMapsInvalidateCacheCall) Header() http.Header { func (c *UrlMapsInvalidateCacheCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -156062,6 +161945,15 @@ func (c *UrlMapsListCall) PageToken(pageToken string) *UrlMapsListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *UrlMapsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *UrlMapsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -156099,7 +161991,7 @@ func (c *UrlMapsListCall) Header() http.Header { func (c *UrlMapsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -156197,6 +162089,11 @@ func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/urlMaps", @@ -156303,7 +162200,7 @@ func (c *UrlMapsPatchCall) Header() http.Header { func (c *UrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -156460,7 +162357,7 @@ func (c *UrlMapsTestIamPermissionsCall) Header() http.Header { func (c *UrlMapsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -156633,7 +162530,7 @@ func (c *UrlMapsUpdateCall) Header() http.Header { func (c *UrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -156792,7 +162689,7 @@ func (c *UrlMapsValidateCall) Header() http.Header { func (c *UrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -156991,6 +162888,15 @@ func (c *VpnGatewaysAggregatedListCall) PageToken(pageToken string) *VpnGateways return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *VpnGatewaysAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *VpnGatewaysAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -157028,7 +162934,7 @@ func (c *VpnGatewaysAggregatedListCall) Header() http.Header { func (c *VpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -157131,6 +163037,11 @@ func (c *VpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnGa // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/vpnGateways", @@ -157234,7 +163145,7 @@ func (c *VpnGatewaysDeleteCall) Header() http.Header { func (c *VpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -157403,7 +163314,7 @@ func (c *VpnGatewaysGetCall) Header() http.Header { func (c *VpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -157570,7 +163481,7 @@ func (c *VpnGatewaysGetStatusCall) Header() http.Header { func (c *VpnGatewaysGetStatusCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -157746,7 +163657,7 @@ func (c *VpnGatewaysInsertCall) Header() http.Header { func (c *VpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -157940,6 +163851,15 @@ func (c *VpnGatewaysListCall) PageToken(pageToken string) *VpnGatewaysListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *VpnGatewaysListCall) ReturnPartialSuccess(returnPartialSuccess bool) *VpnGatewaysListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -157977,7 +163897,7 @@ func (c *VpnGatewaysListCall) Header() http.Header { func (c *VpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -158084,6 +164004,11 @@ func (c *VpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*VpnGatewayList, // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/vpnGateways", @@ -158190,7 +164115,7 @@ func (c *VpnGatewaysSetLabelsCall) Header() http.Header { func (c *VpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -158403,6 +164328,15 @@ func (c *VpnTunnelsAggregatedListCall) PageToken(pageToken string) *VpnTunnelsAg return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *VpnTunnelsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *VpnTunnelsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -158440,7 +164374,7 @@ func (c *VpnTunnelsAggregatedListCall) Header() http.Header { func (c *VpnTunnelsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -158543,6 +164477,11 @@ func (c *VpnTunnelsAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnTun // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/aggregated/vpnTunnels", @@ -158646,7 +164585,7 @@ func (c *VpnTunnelsDeleteCall) Header() http.Header { func (c *VpnTunnelsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -158815,7 +164754,7 @@ func (c *VpnTunnelsGetCall) Header() http.Header { func (c *VpnTunnelsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -158991,7 +164930,7 @@ func (c *VpnTunnelsInsertCall) Header() http.Header { func (c *VpnTunnelsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -159185,6 +165124,15 @@ func (c *VpnTunnelsListCall) PageToken(pageToken string) *VpnTunnelsListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *VpnTunnelsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *VpnTunnelsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -159222,7 +165170,7 @@ func (c *VpnTunnelsListCall) Header() http.Header { func (c *VpnTunnelsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -159329,6 +165277,11 @@ func (c *VpnTunnelsListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelList, e // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/regions/{region}/vpnTunnels", @@ -159435,7 +165388,7 @@ func (c *VpnTunnelsSetLabelsCall) Header() http.Header { func (c *VpnTunnelsSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -159603,7 +165556,7 @@ func (c *VpnTunnelsTestIamPermissionsCall) Header() http.Header { func (c *VpnTunnelsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -159765,7 +165718,7 @@ func (c *ZoneOperationsDeleteCall) Header() http.Header { func (c *ZoneOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -159901,7 +165854,7 @@ func (c *ZoneOperationsGetCall) Header() http.Header { func (c *ZoneOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -160096,6 +166049,15 @@ func (c *ZoneOperationsListCall) PageToken(pageToken string) *ZoneOperationsList return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ZoneOperationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ZoneOperationsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -160133,7 +166095,7 @@ func (c *ZoneOperationsListCall) Header() http.Header { func (c *ZoneOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -160234,6 +166196,11 @@ func (c *ZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationLis // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "Name of the zone for request.", // "location": "path", @@ -160337,7 +166304,7 @@ func (c *ZoneOperationsWaitCall) Header() http.Header { func (c *ZoneOperationsWaitCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -160501,7 +166468,7 @@ func (c *ZonesGetCall) Header() http.Header { func (c *ZonesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -160685,6 +166652,15 @@ func (c *ZonesListCall) PageToken(pageToken string) *ZonesListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ZonesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ZonesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -160722,7 +166698,7 @@ func (c *ZonesListCall) Header() http.Header { func (c *ZonesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -160820,6 +166796,11 @@ func (c *ZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/zones", diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index 9b22f1dcd6a..4a2a4d08542 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -29,7 +29,7 @@ "description": "Creates and runs virtual machines on Google Cloud Platform.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/compute/docs/reference/latest/", - "etag": "\"u9GIe6H63LSGq-9_t39K2Zx_EAc/VZADaIUTjNHbxdv5VQ4VUIadXV4\"", + "etag": "\"-2NioU2H8y8siEzrBOV_qzRI6kQ/CWWSZzuCa0kWhbZODrhTJl5Ht_E\"", "icons": { "x16": "https://www.google.com/images/icons/product/compute_engine-16.png", "x32": "https://www.google.com/images/icons/product/compute_engine-32.png" @@ -130,6 +130,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/acceleratorTypes", @@ -185,7 +190,7 @@ ] }, "list": { - "description": "Retrieves a list of accelerator types available to the specified project.", + "description": "Retrieves a list of accelerator types that are available to the specified project.", "httpMethod": "GET", "id": "compute.acceleratorTypes.list", "parameterOrder": [ @@ -223,6 +228,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -287,6 +297,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/addresses", @@ -473,6 +488,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/addresses", @@ -531,6 +551,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/autoscalers", @@ -711,6 +736,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "Name of the zone for this request.", "location": "path", @@ -1053,6 +1083,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/backendBuckets", @@ -1233,6 +1268,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/backendServices", @@ -1398,7 +1438,7 @@ ] }, "insert": { - "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Understanding backend services for more information.", + "description": "Creates a BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview.", "httpMethod": "POST", "id": "compute.backendServices.insert", "parameterOrder": [ @@ -1467,6 +1507,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/backendServices", @@ -1480,7 +1525,7 @@ ] }, "patch": { - "description": "Patches the specified BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Patches the specified BackendService resource with the data included in the request. For more information, see Backend services overview. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", "id": "compute.backendServices.patch", "parameterOrder": [ @@ -1561,7 +1606,7 @@ ] }, "update": { - "description": "Updates the specified BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information.", + "description": "Updates the specified BackendService resource with the data included in the request. For more information, see Backend services overview.", "httpMethod": "PUT", "id": "compute.backendServices.update", "parameterOrder": [ @@ -1647,6 +1692,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/diskTypes", @@ -1740,6 +1790,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -1853,6 +1908,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/disks", @@ -1883,7 +1943,7 @@ "type": "string" }, "guestFlush": { - "description": "[Input Only] Specifies to create an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", + "description": "[Input Only] Whether to attempt an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", "location": "query", "type": "boolean" }, @@ -2016,6 +2076,12 @@ "resource" ], "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -2049,7 +2115,7 @@ ] }, "insert": { - "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", + "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk from a source (sourceImage, sourceSnapshot, or sourceDisk) or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", "httpMethod": "POST", "id": "compute.disks.insert", "parameterOrder": [ @@ -2133,6 +2199,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -2533,6 +2604,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/externalVpnGateways", @@ -2764,6 +2840,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/firewalls", @@ -2904,6 +2985,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/forwardingRules", @@ -3090,6 +3176,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/forwardingRules", @@ -3346,6 +3437,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/addresses", @@ -3504,6 +3600,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/forwardingRules", @@ -3822,6 +3923,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/networkEndpointGroups", @@ -3878,6 +3984,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", @@ -3936,6 +4047,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/operations", @@ -4049,6 +4165,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/operations", @@ -4141,6 +4262,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/healthChecks", @@ -4295,6 +4421,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/healthChecks", @@ -4535,6 +4666,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/httpHealthChecks", @@ -4775,6 +4911,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/httpsHealthChecks", @@ -5029,6 +5170,12 @@ "resource" ], "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -5132,6 +5279,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/images", @@ -5144,6 +5296,47 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "patch": { + "description": "Patches the specified image with the data included in the request. Only the following fields can be modified: family, description, deprecation status.", + "httpMethod": "PATCH", + "id": "compute.images.patch", + "parameterOrder": [ + "project", + "image" + ], + "parameters": { + "image": { + "description": "Name of the image resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/images/{image}", + "request": { + "$ref": "Image" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setIamPolicy": { "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", "httpMethod": "POST", @@ -5346,6 +5539,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/instanceGroupManagers", @@ -5538,6 +5736,48 @@ "https://www.googleapis.com/auth/compute" ] }, + "deletePerInstanceConfigs": { + "description": "Deletes selected per-instance configs for the managed instance group.", + "httpMethod": "POST", + "id": "compute.instanceGroupManagers.deletePerInstanceConfigs", + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager" + ], + "parameters": { + "instanceGroupManager": { + "description": "The name of the managed instance group. It should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", + "request": { + "$ref": "InstanceGroupManagersDeletePerInstanceConfigsReq" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "get": { "description": "Returns all of the details about the specified managed instance group. Gets a list of available managed instance groups by making a list() request.", "httpMethod": "GET", @@ -5657,6 +5897,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone where the managed instance group is located.", "location": "path", @@ -5675,7 +5920,7 @@ ] }, "listErrors": { - "description": "Lists all errors thrown by actions on instances for a given managed instance group.", + "description": "Lists all errors thrown by actions on instances for a given managed instance group. The filter and orderBy query parameters are not supported.", "httpMethod": "GET", "id": "compute.instanceGroupManagers.listErrors", "parameterOrder": [ @@ -5720,6 +5965,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", "location": "path", @@ -5738,7 +5988,7 @@ ] }, "listManagedInstances": { - "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", + "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported.", "httpMethod": "POST", "id": "compute.instanceGroupManagers.listManagedInstances", "parameterOrder": [ @@ -5783,6 +6033,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone where the managed instance group is located.", "location": "path", @@ -5800,6 +6055,74 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "listPerInstanceConfigs": { + "description": "Lists all of the per-instance configs defined for the managed instance group. The orderBy query parameter is not supported.", + "httpMethod": "POST", + "id": "compute.instanceGroupManagers.listPerInstanceConfigs", + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "instanceGroupManager": { + "description": "The name of the managed instance group. It should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, + "zone": { + "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", + "response": { + "$ref": "InstanceGroupManagersListPerInstanceConfigsResp" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "patch": { "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listManagedInstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", @@ -5847,10 +6170,10 @@ "https://www.googleapis.com/auth/compute" ] }, - "recreateInstances": { - "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + "patchPerInstanceConfigs": { + "description": "Inserts or patches per-instance configs for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", "httpMethod": "POST", - "id": "compute.instanceGroupManagers.recreateInstances", + "id": "compute.instanceGroupManagers.patchPerInstanceConfigs", "parameterOrder": [ "project", "zone", @@ -5858,7 +6181,7 @@ ], "parameters": { "instanceGroupManager": { - "description": "The name of the managed instance group.", + "description": "The name of the managed instance group. It should conform to RFC1035.", "location": "path", "required": true, "type": "string" @@ -5876,15 +6199,15 @@ "type": "string" }, "zone": { - "description": "The name of the zone where the managed instance group is located.", + "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", "location": "path", "required": true, "type": "string" } }, - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", "request": { - "$ref": "InstanceGroupManagersRecreateInstancesRequest" + "$ref": "InstanceGroupManagersPatchPerInstanceConfigsReq" }, "response": { "$ref": "Operation" @@ -5894,15 +6217,62 @@ "https://www.googleapis.com/auth/compute" ] }, - "resize": { - "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nWhen resizing down, the instance group arbitrarily chooses the order in which VMs are deleted. The group takes into account some VM attributes when making the selection including:\n\n+ The status of the VM instance. + The health of the VM instance. + The instance template version the VM is based on. + For regional managed instance groups, the location of the VM instance.\n\nThis list is subject to change.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + "recreateInstances": { + "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", "httpMethod": "POST", - "id": "compute.instanceGroupManagers.resize", + "id": "compute.instanceGroupManagers.recreateInstances", "parameterOrder": [ "project", "zone", - "instanceGroupManager", - "size" + "instanceGroupManager" + ], + "parameters": { + "instanceGroupManager": { + "description": "The name of the managed instance group.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone where the managed instance group is located.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + "request": { + "$ref": "InstanceGroupManagersRecreateInstancesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "resize": { + "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nWhen resizing down, the instance group arbitrarily chooses the order in which VMs are deleted. The group takes into account some VM attributes when making the selection including:\n\n+ The status of the VM instance. + The health of the VM instance. + The instance template version the VM is based on. + For regional managed instance groups, the location of the VM instance.\n\nThis list is subject to change.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + "httpMethod": "POST", + "id": "compute.instanceGroupManagers.resize", + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager", + "size" ], "parameters": { "instanceGroupManager": { @@ -6039,6 +6409,53 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] + }, + "updatePerInstanceConfigs": { + "description": "Inserts or updates per-instance configs for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", + "httpMethod": "POST", + "id": "compute.instanceGroupManagers.updatePerInstanceConfigs", + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager" + ], + "parameters": { + "instanceGroupManager": { + "description": "The name of the managed instance group. It should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", + "request": { + "$ref": "InstanceGroupManagersUpdatePerInstanceConfigsReq" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -6133,6 +6550,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/instanceGroups", @@ -6190,7 +6612,7 @@ ] }, "get": { - "description": "Returns the specified instance group. Gets a list of available instance groups by making a list() request.", + "description": "Returns the specified zonal instance group. Get a list of available zonal instance groups by making a list() request.\n\nFor managed instance groups, use the instanceGroupManagers or regionInstanceGroupManagers methods instead.", "httpMethod": "GET", "id": "compute.instanceGroups.get", "parameterOrder": [ @@ -6270,7 +6692,7 @@ ] }, "list": { - "description": "Retrieves the list of instance groups that are located in the specified project and zone.", + "description": "Retrieves the list of zonal instance group resources contained within the specified zone.\n\nFor managed instance groups, use the instanceGroupManagers or regionInstanceGroupManagers methods instead.", "httpMethod": "GET", "id": "compute.instanceGroups.list", "parameterOrder": [ @@ -6308,6 +6730,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone where the instance group is located.", "location": "path", @@ -6326,7 +6753,7 @@ ] }, "listInstances": { - "description": "Lists the instances in the specified instance group.", + "description": "Lists the instances in the specified instance group. The orderBy query parameter is not supported.", "httpMethod": "POST", "id": "compute.instanceGroups.listInstances", "parameterOrder": [ @@ -6371,6 +6798,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone where the instance group is located.", "location": "path", @@ -6570,6 +7002,12 @@ "resource" ], "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -6665,6 +7103,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/instanceTemplates", @@ -6901,6 +7344,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/instances", @@ -6924,7 +7372,7 @@ ], "parameters": { "forceAttach": { - "description": "Whether to force attach the disk even if it's currently attached to another instance.", + "description": "Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error.", "location": "query", "type": "boolean" }, @@ -7230,6 +7678,12 @@ "resource" ], "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -7262,6 +7716,48 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "getScreenshot": { + "description": "Returns the screenshot from the specified instance.", + "httpMethod": "GET", + "id": "compute.instances.getScreenshot", + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "parameters": { + "instance": { + "description": "Name of the instance scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instances/{instance}/screenshot", + "response": { + "$ref": "Screenshot" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "getSerialPortOutput": { "description": "Returns the last 1 MB of serial port output from the specified instance.", "httpMethod": "GET", @@ -7273,7 +7769,7 @@ ], "parameters": { "instance": { - "description": "Name of the instance scoping this request.", + "description": "Name of the instance for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -7296,7 +7792,7 @@ "type": "string" }, "start": { - "description": "Returns output starting from a specific byte position. Use this to page through output when the output is too large to return in a single request. For the initial request, leave this field unspecified. For subsequent calls, this field should be set to the next value returned in the previous call.", + "description": "Specifies the starting byte position of the output to return. To start with the first byte of output to the specified port, omit this field or set it to `0`.\n\nIf the output for that byte position is available, this field matches the `start` parameter sent with the request. If the amount of serial console output exceeds the size of the buffer (1 MB), the oldest output is discarded and is no longer available. If the requested start position refers to discarded output, the start position is adjusted to the oldest output still available, and the adjusted start position is returned as the `start` property value.\n\nYou can also provide a negative start position, which translates to the most recent number of bytes written to the serial port. For example, -3 is interpreted as the most recent 3 bytes written to the serial console.", "format": "int64", "location": "query", "type": "string" @@ -7446,6 +7942,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -7465,7 +7966,7 @@ ] }, "listReferrers": { - "description": "Retrieves the list of referrers to instances contained within the specified zone. For more information, read Viewing Referrers to VM Instances.", + "description": "Retrieves a list of resources that refer to the VM instance specified in the request. For example, if the VM instance is part of a managed or unmanaged instance group, the referrers list includes the instance group. For more information, read Viewing referrers to VM instances.", "httpMethod": "GET", "id": "compute.instances.listReferrers", "parameterOrder": [ @@ -7511,6 +8012,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -8027,7 +8533,7 @@ ] }, "setScheduling": { - "description": "Sets an instance's scheduling options.", + "description": "Sets an instance's scheduling options. You can only call this method on a stopped instance, that is, a VM instance that is in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states.", "httpMethod": "POST", "id": "compute.instances.setScheduling", "parameterOrder": [ @@ -8788,6 +9294,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/interconnectAttachments", @@ -8979,6 +9490,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/interconnectAttachments", @@ -9115,6 +9631,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/interconnectLocations", @@ -9307,6 +9828,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/interconnects", @@ -9520,6 +10046,12 @@ "resource" ], "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -9618,6 +10150,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/licenses", @@ -9749,6 +10286,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/machineTypes", @@ -9842,6 +10384,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -9906,6 +10453,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/networkEndpointGroups", @@ -10175,6 +10727,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", "location": "path", @@ -10238,6 +10795,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", "location": "path", @@ -10490,6 +11052,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/networks", @@ -10570,6 +11137,11 @@ "description": "The region of the request. The response will include all subnet routes, static routes and dynamic routes in the region.", "location": "query", "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/networks/{network}/listPeeringRoutes", @@ -10838,6 +11410,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/nodeGroups", @@ -10997,6 +11574,12 @@ "resource" ], "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -11117,6 +11700,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -11182,6 +11770,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -11433,6 +12026,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/nodeTemplates", @@ -11543,6 +12141,12 @@ "resource" ], "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -11661,6 +12265,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/nodeTemplates", @@ -11808,6 +12417,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/nodeTypes", @@ -11901,6 +12515,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -11965,6 +12584,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/packetMirrorings", @@ -12151,6 +12775,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/packetMirrorings", @@ -12475,6 +13104,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/getXpnResources", @@ -12523,6 +13157,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/listXpnHosts", @@ -12883,6 +13522,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/autoscalers", @@ -13126,7 +13770,7 @@ ] }, "insert": { - "description": "Creates a regional BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a regional backend service. Read Understanding backend services for more information.", + "description": "Creates a regional BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview.", "httpMethod": "POST", "id": "compute.regionBackendServices.insert", "parameterOrder": [ @@ -13211,6 +13855,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/backendServices", @@ -13224,7 +13873,7 @@ ] }, "patch": { - "description": "Updates the specified regional BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Understanding backend services This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", "id": "compute.regionBackendServices.patch", "parameterOrder": [ @@ -13273,7 +13922,7 @@ ] }, "update": { - "description": "Updates the specified regional BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information.", + "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Backend services overview.", "httpMethod": "PUT", "id": "compute.regionBackendServices.update", "parameterOrder": [ @@ -13367,6 +14016,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/commitments", @@ -13507,6 +14161,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/commitments", @@ -13610,6 +14269,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/diskTypes", @@ -13821,6 +14485,12 @@ "resource" ], "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -13944,6 +14614,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/disks", @@ -14194,6 +14869,247 @@ } } }, + "regionHealthCheckServices": { + "methods": { + "delete": { + "description": "Deletes the specified regional HealthCheckService.", + "httpMethod": "DELETE", + "id": "compute.regionHealthCheckServices.delete", + "parameterOrder": [ + "project", + "region", + "healthCheckService" + ], + "parameters": { + "healthCheckService": { + "description": "Name of the HealthCheckService to delete. The name must be 1-63 characters long, and comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthCheckServices/{healthCheckService}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified regional HealthCheckService resource.", + "httpMethod": "GET", + "id": "compute.regionHealthCheckServices.get", + "parameterOrder": [ + "project", + "region", + "healthCheckService" + ], + "parameters": { + "healthCheckService": { + "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthCheckServices/{healthCheckService}", + "response": { + "$ref": "HealthCheckService" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a regional HealthCheckService resource in the specified project and region using the data included in the request.", + "httpMethod": "POST", + "id": "compute.regionHealthCheckServices.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthCheckServices", + "request": { + "$ref": "HealthCheckService" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Lists all the HealthCheckService resources that have been configured for the specified project in the given region.", + "httpMethod": "GET", + "id": "compute.regionHealthCheckServices.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + } + }, + "path": "{project}/regions/{region}/healthCheckServices", + "response": { + "$ref": "HealthCheckServicesList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "description": "Updates the specified regional HealthCheckService resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.regionHealthCheckServices.patch", + "parameterOrder": [ + "project", + "region", + "healthCheckService" + ], + "parameters": { + "healthCheckService": { + "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthCheckServices/{healthCheckService}", + "request": { + "$ref": "HealthCheckService" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, "regionHealthChecks": { "methods": { "delete": { @@ -14370,6 +15286,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/healthChecks", @@ -14711,6 +15632,48 @@ "https://www.googleapis.com/auth/compute" ] }, + "deletePerInstanceConfigs": { + "description": "Deletes selected per-instance configs for the managed instance group.", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.deletePerInstanceConfigs", + "parameterOrder": [ + "project", + "region", + "instanceGroupManager" + ], + "parameters": { + "instanceGroupManager": { + "description": "The name of the managed instance group. It should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request, should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", + "request": { + "$ref": "RegionInstanceGroupManagerDeleteInstanceConfigReq" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "get": { "description": "Returns all of the details about the specified managed instance group.", "httpMethod": "GET", @@ -14835,6 +15798,11 @@ "location": "path", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/instanceGroupManagers", @@ -14848,7 +15816,7 @@ ] }, "listErrors": { - "description": "Lists all errors thrown by actions on instances for a given regional managed instance group.", + "description": "Lists all errors thrown by actions on instances for a given regional managed instance group. The filter and orderBy query parameters are not supported.", "httpMethod": "GET", "id": "compute.regionInstanceGroupManagers.listErrors", "parameterOrder": [ @@ -14898,6 +15866,11 @@ "location": "path", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listErrors", @@ -14911,7 +15884,7 @@ ] }, "listManagedInstances": { - "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances.", + "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported.", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.listManagedInstances", "parameterOrder": [ @@ -14961,6 +15934,11 @@ "location": "path", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", @@ -14973,6 +15951,74 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "listPerInstanceConfigs": { + "description": "Lists all of the per-instance configs defined for the managed instance group. The orderBy query parameter is not supported.", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.listPerInstanceConfigs", + "parameterOrder": [ + "project", + "region", + "instanceGroupManager" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "instanceGroupManager": { + "description": "The name of the managed instance group. It should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request, should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + } + }, + "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", + "response": { + "$ref": "RegionInstanceGroupManagersListInstanceConfigsResp" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "patch": { "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", @@ -15020,6 +16066,53 @@ "https://www.googleapis.com/auth/compute" ] }, + "patchPerInstanceConfigs": { + "description": "Insert or patch (for the ones that already exist) per-instance configs for the managed instance group. perInstanceConfig.instance serves as a key used to distinguish whether to perform insert or patch.", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.patchPerInstanceConfigs", + "parameterOrder": [ + "project", + "region", + "instanceGroupManager" + ], + "parameters": { + "instanceGroupManager": { + "description": "The name of the managed instance group. It should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request, should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", + "request": { + "$ref": "RegionInstanceGroupManagerPatchInstanceConfigReq" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "recreateInstances": { "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", "httpMethod": "POST", @@ -15213,6 +16306,53 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] + }, + "updatePerInstanceConfigs": { + "description": "Insert or update (for the ones that already exist) per-instance configs for the managed instance group. perInstanceConfig.instance serves as a key used to distinguish whether to perform insert or patch.", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.updatePerInstanceConfigs", + "parameterOrder": [ + "project", + "region", + "instanceGroupManager" + ], + "parameters": { + "instanceGroupManager": { + "description": "The name of the managed instance group. It should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request, should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", + "request": { + "$ref": "RegionInstanceGroupManagerUpdateInstanceConfigReq" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -15302,6 +16442,11 @@ "location": "path", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/instanceGroups", @@ -15315,7 +16460,7 @@ ] }, "listInstances": { - "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running.", + "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running. The orderBy query parameter is not supported.", "httpMethod": "POST", "id": "compute.regionInstanceGroups.listInstances", "parameterOrder": [ @@ -15365,6 +16510,11 @@ "location": "path", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", @@ -15429,22 +16579,21 @@ } } }, - "regionOperations": { + "regionNetworkEndpointGroups": { "methods": { "delete": { - "description": "Deletes the specified region-specific Operations resource.", + "description": "Deletes the specified network endpoint group. Note that the NEG cannot be deleted if it is configured as a backend of a backend service.", "httpMethod": "DELETE", - "id": "compute.regionOperations.delete", + "id": "compute.regionNetworkEndpointGroups.delete", "parameterOrder": [ "project", "region", - "operation" + "networkEndpointGroup" ], "parameters": { - "operation": { - "description": "Name of the Operations resource to delete.", + "networkEndpointGroup": { + "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -15456,33 +16605,39 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" } }, - "path": "{project}/regions/{region}/operations/{operation}", + "path": "{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "response": { + "$ref": "Operation" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] }, "get": { - "description": "Retrieves the specified region-specific Operations resource.", + "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", "httpMethod": "GET", - "id": "compute.regionOperations.get", + "id": "compute.regionNetworkEndpointGroups.get", "parameterOrder": [ "project", "region", - "operation" + "networkEndpointGroup" ], "parameters": { - "operation": { - "description": "Name of the Operations resource to return.", + "networkEndpointGroup": { + "description": "The name of the network endpoint group. It should comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -15494,16 +16649,15 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/operations/{operation}", + "path": "{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", "response": { - "$ref": "Operation" + "$ref": "NetworkEndpointGroup" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -15511,10 +16665,50 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "insert": { + "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", + "httpMethod": "POST", + "id": "compute.regionNetworkEndpointGroups.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region where you want to create the network endpoint group. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/networkEndpointGroups", + "request": { + "$ref": "NetworkEndpointGroup" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "list": { - "description": "Retrieves a list of Operation resources contained within the specified region.", + "description": "Retrieves the list of regional network endpoint groups available to the specified project in the given region.", "httpMethod": "GET", - "id": "compute.regionOperations.list", + "id": "compute.regionNetworkEndpointGroups.list", "parameterOrder": [ "project", "region" @@ -15551,58 +16745,20 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/regions/{region}/operations", - "response": { - "$ref": "OperationList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "wait": { - "description": "Waits for the specified Operation resource to return as `DONE` or for the request to approach the 2 minute deadline, and retrieves the specified Operation resource. This method differs from the `GET` method in that it waits for no more than the default deadline (2 minutes) and then returns the current state of the operation, which might be `DONE` or still in progress.\n\nThis method is called on a best-effort basis. Specifically: \n- In uncommon cases, when the server is overloaded, the request might return before the default deadline is reached, or might return after zero seconds. \n- If the default deadline is reached, there is no guarantee that the operation is actually done when the method returns. Be prepared to retry if the operation is not `DONE`.", - "httpMethod": "POST", - "id": "compute.regionOperations.wait", - "parameterOrder": [ - "project", - "region", - "operation" - ], - "parameters": { - "operation": { - "description": "Name of the Operations resource to return.", + "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, - "path": "{project}/regions/{region}/operations/{operation}/wait", + "path": "{project}/regions/{region}/networkEndpointGroups", "response": { - "$ref": "Operation" + "$ref": "NetworkEndpointGroupList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -15612,18 +16768,25 @@ } } }, - "regionSslCertificates": { + "regionNotificationEndpoints": { "methods": { "delete": { - "description": "Deletes the specified SslCertificate resource in the region.", + "description": "Deletes the specified NotificationEndpoint in the given region", "httpMethod": "DELETE", - "id": "compute.regionSslCertificates.delete", + "id": "compute.regionNotificationEndpoints.delete", "parameterOrder": [ "project", "region", - "sslCertificate" + "notificationEndpoint" ], "parameters": { + "notificationEndpoint": { + "description": "Name of the NotificationEndpoint resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -15642,16 +16805,9 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" - }, - "sslCertificate": { - "description": "Name of the SslCertificate resource to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "{project}/regions/{region}/sslCertificates/{sslCertificate}", + "path": "{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", "response": { "$ref": "Operation" }, @@ -15661,15 +16817,22 @@ ] }, "get": { - "description": "Returns the specified SslCertificate resource in the specified region. Get a list of available SSL certificates by making a list() request.", + "description": "Returns the specified NotificationEndpoint resource in the given region.", "httpMethod": "GET", - "id": "compute.regionSslCertificates.get", + "id": "compute.regionNotificationEndpoints.get", "parameterOrder": [ "project", "region", - "sslCertificate" + "notificationEndpoint" ], "parameters": { + "notificationEndpoint": { + "description": "Name of the NotificationEndpoint resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -15683,18 +16846,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - }, - "sslCertificate": { - "description": "Name of the SslCertificate resource to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "{project}/regions/{region}/sslCertificates/{sslCertificate}", + "path": "{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", "response": { - "$ref": "SslCertificate" + "$ref": "NotificationEndpoint" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -15703,9 +16859,392 @@ ] }, "insert": { - "description": "Creates a SslCertificate resource in the specified project and region using the data included in the request", + "description": "Create a NotificationEndpoint in the specified project in the given region using the parameters that are included in the request.", "httpMethod": "POST", - "id": "compute.regionSslCertificates.insert", + "id": "compute.regionNotificationEndpoints.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/notificationEndpoints", + "request": { + "$ref": "NotificationEndpoint" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Lists the NotificationEndpoints for a project in the given region.", + "httpMethod": "GET", + "id": "compute.regionNotificationEndpoints.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + } + }, + "path": "{project}/regions/{region}/notificationEndpoints", + "response": { + "$ref": "NotificationEndpointList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "regionOperations": { + "methods": { + "delete": { + "description": "Deletes the specified region-specific Operations resource.", + "httpMethod": "DELETE", + "id": "compute.regionOperations.delete", + "parameterOrder": [ + "project", + "region", + "operation" + ], + "parameters": { + "operation": { + "description": "Name of the Operations resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/operations/{operation}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Retrieves the specified region-specific Operations resource.", + "httpMethod": "GET", + "id": "compute.regionOperations.get", + "parameterOrder": [ + "project", + "region", + "operation" + ], + "parameters": { + "operation": { + "description": "Name of the Operations resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/operations/{operation}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "description": "Retrieves a list of Operation resources contained within the specified region.", + "httpMethod": "GET", + "id": "compute.regionOperations.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + } + }, + "path": "{project}/regions/{region}/operations", + "response": { + "$ref": "OperationList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "wait": { + "description": "Waits for the specified Operation resource to return as `DONE` or for the request to approach the 2 minute deadline, and retrieves the specified Operation resource. This method differs from the `GET` method in that it waits for no more than the default deadline (2 minutes) and then returns the current state of the operation, which might be `DONE` or still in progress.\n\nThis method is called on a best-effort basis. Specifically: \n- In uncommon cases, when the server is overloaded, the request might return before the default deadline is reached, or might return after zero seconds. \n- If the default deadline is reached, there is no guarantee that the operation is actually done when the method returns. Be prepared to retry if the operation is not `DONE`.", + "httpMethod": "POST", + "id": "compute.regionOperations.wait", + "parameterOrder": [ + "project", + "region", + "operation" + ], + "parameters": { + "operation": { + "description": "Name of the Operations resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/operations/{operation}/wait", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "regionSslCertificates": { + "methods": { + "delete": { + "description": "Deletes the specified SslCertificate resource in the region.", + "httpMethod": "DELETE", + "id": "compute.regionSslCertificates.delete", + "parameterOrder": [ + "project", + "region", + "sslCertificate" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "sslCertificate": { + "description": "Name of the SslCertificate resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/sslCertificates/{sslCertificate}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified SslCertificate resource in the specified region. Get a list of available SSL certificates by making a list() request.", + "httpMethod": "GET", + "id": "compute.regionSslCertificates.get", + "parameterOrder": [ + "project", + "region", + "sslCertificate" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "sslCertificate": { + "description": "Name of the SslCertificate resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/sslCertificates/{sslCertificate}", + "response": { + "$ref": "SslCertificate" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a SslCertificate resource in the specified project and region using the data included in the request", + "httpMethod": "POST", + "id": "compute.regionSslCertificates.insert", "parameterOrder": [ "project", "region" @@ -15788,6 +17327,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/sslCertificates", @@ -15978,6 +17522,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/targetHttpProxies", @@ -16217,6 +17766,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/targetHttpsProxies", @@ -16505,6 +18059,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/urlMaps", @@ -16734,6 +18293,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions", @@ -16792,6 +18356,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/reservations", @@ -16902,6 +18471,12 @@ "resource" ], "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -17014,6 +18589,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "Name of the zone for this request.", "location": "path", @@ -17216,6 +18796,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/resourcePolicies", @@ -17326,6 +18911,12 @@ "resource" ], "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -17444,6 +19035,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/resourcePolicies", @@ -17591,6 +19187,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/routers", @@ -17738,6 +19339,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "router": { "description": "Name of the Router resource to query for Nat Mapping information of VM endpoints.", "location": "path", @@ -17884,6 +19490,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/routers", @@ -18185,6 +19796,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/routes", @@ -18419,6 +20035,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/securityPolicies", @@ -18468,6 +20089,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/securityPolicies/listPreconfiguredExpressionSets", @@ -18686,6 +20312,12 @@ "resource" ], "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -18748,6 +20380,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/snapshots", @@ -18915,6 +20552,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/sslCertificates", @@ -19069,6 +20711,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/sslCertificates", @@ -19225,6 +20872,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/sslPolicies", @@ -19274,6 +20926,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/sslPolicies/listAvailableFeatures", @@ -19372,6 +21029,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/subnetworks", @@ -19531,6 +21193,12 @@ "resource" ], "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -19649,6 +21317,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/subnetworks", @@ -19662,7 +21335,7 @@ ] }, "listUsable": { - "description": "Retrieves an aggregated list of all usable subnetworks in the project. The list contains all of the subnetworks in the project and the subnetworks that were shared by a Shared VPC host project.", + "description": "Retrieves an aggregated list of all usable subnetworks in the project.", "httpMethod": "GET", "id": "compute.subnetworks.listUsable", "parameterOrder": [ @@ -19698,6 +21371,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/subnetworks/listUsable", @@ -19905,6 +21583,210 @@ } } }, + "targetGrpcProxies": { + "methods": { + "delete": { + "description": "Deletes the specified TargetGrpcProxy in the given scope", + "httpMethod": "DELETE", + "id": "compute.targetGrpcProxies.delete", + "parameterOrder": [ + "project", + "targetGrpcProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetGrpcProxy": { + "description": "Name of the TargetGrpcProxy resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/targetGrpcProxies/{targetGrpcProxy}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified TargetGrpcProxy resource in the given scope.", + "httpMethod": "GET", + "id": "compute.targetGrpcProxies.get", + "parameterOrder": [ + "project", + "targetGrpcProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "targetGrpcProxy": { + "description": "Name of the TargetGrpcProxy resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/targetGrpcProxies/{targetGrpcProxy}", + "response": { + "$ref": "TargetGrpcProxy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a TargetGrpcProxy in the specified project in the given scope using the parameters that are included in the request.", + "httpMethod": "POST", + "id": "compute.targetGrpcProxies.insert", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/targetGrpcProxies", + "request": { + "$ref": "TargetGrpcProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Lists the TargetGrpcProxies for a project in the given scope.", + "httpMethod": "GET", + "id": "compute.targetGrpcProxies.list", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + } + }, + "path": "{project}/global/targetGrpcProxies", + "response": { + "$ref": "TargetGrpcProxyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "description": "Patches the specified TargetGrpcProxy resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.targetGrpcProxies.patch", + "parameterOrder": [ + "project", + "targetGrpcProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetGrpcProxy": { + "description": "Name of the TargetGrpcProxy resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/targetGrpcProxies/{targetGrpcProxy}", + "request": { + "$ref": "TargetGrpcProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, "targetHttpProxies": { "methods": { "aggregatedList": { @@ -19944,16 +21826,180 @@ "type": "string" }, "project": { - "description": "Name of the project scoping this request.", + "description": "Name of the project scoping this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + } + }, + "path": "{project}/aggregated/targetHttpProxies", + "response": { + "$ref": "TargetHttpProxyAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified TargetHttpProxy resource.", + "httpMethod": "DELETE", + "id": "compute.targetHttpProxies.delete", + "parameterOrder": [ + "project", + "targetHttpProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified TargetHttpProxy resource. Gets a list of available target HTTP proxies by making a list() request.", + "httpMethod": "GET", + "id": "compute.targetHttpProxies.get", + "parameterOrder": [ + "project", + "targetHttpProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", + "response": { + "$ref": "TargetHttpProxy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request.", + "httpMethod": "POST", + "id": "compute.targetHttpProxies.insert", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/targetHttpProxies", + "request": { + "$ref": "TargetHttpProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of TargetHttpProxy resources available to the specified project.", + "httpMethod": "GET", + "id": "compute.targetHttpProxies.list", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, - "path": "{project}/aggregated/targetHttpProxies", + "path": "{project}/global/targetHttpProxies", "response": { - "$ref": "TargetHttpProxyAggregatedList" + "$ref": "TargetHttpProxyList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -19961,10 +22007,10 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "delete": { - "description": "Deletes the specified TargetHttpProxy resource.", - "httpMethod": "DELETE", - "id": "compute.targetHttpProxies.delete", + "patch": { + "description": "Patches the specified TargetHttpProxy resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "PATCH", + "id": "compute.targetHttpProxies.patch", "parameterOrder": [ "project", "targetHttpProxy" @@ -19983,40 +22029,7 @@ "type": "string" }, "targetHttpProxy": { - "description": "Name of the TargetHttpProxy resource to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - } - }, - "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "description": "Returns the specified TargetHttpProxy resource. Gets a list of available target HTTP proxies by making a list() request.", - "httpMethod": "GET", - "id": "compute.targetHttpProxies.get", - "parameterOrder": [ - "project", - "targetHttpProxy" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "targetHttpProxy": { - "description": "Name of the TargetHttpProxy resource to return.", + "description": "Name of the TargetHttpProxy resource to patch.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -20024,37 +22037,6 @@ } }, "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", - "response": { - "$ref": "TargetHttpProxy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request.", - "httpMethod": "POST", - "id": "compute.targetHttpProxies.insert", - "parameterOrder": [ - "project" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - } - }, - "path": "{project}/global/targetHttpProxies", "request": { "$ref": "TargetHttpProxy" }, @@ -20066,55 +22048,6 @@ "https://www.googleapis.com/auth/compute" ] }, - "list": { - "description": "Retrieves the list of TargetHttpProxy resources available to the specified project.", - "httpMethod": "GET", - "id": "compute.targetHttpProxies.list", - "parameterOrder": [ - "project" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - } - }, - "path": "{project}/global/targetHttpProxies", - "response": { - "$ref": "TargetHttpProxyList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, "setUrlMap": { "description": "Changes the URL map for TargetHttpProxy.", "httpMethod": "POST", @@ -20202,6 +22135,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/targetHttpsProxies", @@ -20356,6 +22294,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/targetHttpsProxies", @@ -20576,6 +22519,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/targetInstances", @@ -20756,6 +22704,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "Name of the zone scoping this request.", "location": "path", @@ -20918,6 +22871,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/targetPools", @@ -21149,6 +23107,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/targetPools", @@ -21460,6 +23423,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/targetSslProxies", @@ -21781,6 +23749,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/targetTcpProxies", @@ -21915,121 +23888,190 @@ "location": "query", "type": "string" }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - } - }, - "path": "{project}/aggregated/targetVpnGateways", - "response": { - "$ref": "TargetVpnGatewayAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "delete": { - "description": "Deletes the specified target VPN gateway.", - "httpMethod": "DELETE", - "id": "compute.targetVpnGateways.delete", - "parameterOrder": [ - "project", - "region", - "targetVpnGateway" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "targetVpnGateway": { - "description": "Name of the target VPN gateway to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - } - }, - "path": "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "description": "Returns the specified target VPN gateway. Gets a list of available target VPN gateways by making a list() request.", - "httpMethod": "GET", - "id": "compute.targetVpnGateways.get", - "parameterOrder": [ - "project", - "region", - "targetVpnGateway" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "targetVpnGateway": { - "description": "Name of the target VPN gateway to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - } - }, - "path": "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", - "response": { - "$ref": "TargetVpnGateway" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "description": "Creates a target VPN gateway in the specified project and region using the data included in the request.", - "httpMethod": "POST", - "id": "compute.targetVpnGateways.insert", - "parameterOrder": [ - "project", - "region" - ], - "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + } + }, + "path": "{project}/aggregated/targetVpnGateways", + "response": { + "$ref": "TargetVpnGatewayAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified target VPN gateway.", + "httpMethod": "DELETE", + "id": "compute.targetVpnGateways.delete", + "parameterOrder": [ + "project", + "region", + "targetVpnGateway" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetVpnGateway": { + "description": "Name of the target VPN gateway to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified target VPN gateway. Gets a list of available target VPN gateways by making a list() request.", + "httpMethod": "GET", + "id": "compute.targetVpnGateways.get", + "parameterOrder": [ + "project", + "region", + "targetVpnGateway" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "targetVpnGateway": { + "description": "Name of the target VPN gateway to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", + "response": { + "$ref": "TargetVpnGateway" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a target VPN gateway in the specified project and region using the data included in the request.", + "httpMethod": "POST", + "id": "compute.targetVpnGateways.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetVpnGateways", + "request": { + "$ref": "TargetVpnGateway" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves a list of target VPN gateways available to the specified project and region.", + "httpMethod": "GET", + "id": "compute.targetVpnGateways.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -22044,69 +24086,10 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - } - }, - "path": "{project}/regions/{region}/targetVpnGateways", - "request": { - "$ref": "TargetVpnGateway" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "description": "Retrieves a list of target VPN gateways available to the specified project and region.", - "httpMethod": "GET", - "id": "compute.targetVpnGateways.list", - "parameterOrder": [ - "project", - "region" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" + "type": "boolean" } }, "path": "{project}/regions/{region}/targetVpnGateways", @@ -22165,6 +24148,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/urlMaps", @@ -22360,6 +24348,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/urlMaps", @@ -22536,6 +24529,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/vpnGateways", @@ -22764,6 +24762,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/vpnGateways", @@ -22916,6 +24919,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/vpnTunnels", @@ -23102,6 +25110,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/vpnTunnels", @@ -23237,141 +25250,151 @@ "required": true, "type": "string" }, - "zone": { - "description": "Name of the zone for request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}/operations", - "response": { - "$ref": "OperationList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "wait": { - "description": "Waits for the specified Operation resource to return as `DONE` or for the request to approach the 2 minute deadline, and retrieves the specified Operation resource. This method differs from the `GET` method in that it waits for no more than the default deadline (2 minutes) and then returns the current state of the operation, which might be `DONE` or still in progress.\n\nThis method is called on a best-effort basis. Specifically: \n- In uncommon cases, when the server is overloaded, the request might return before the default deadline is reached, or might return after zero seconds. \n- If the default deadline is reached, there is no guarantee that the operation is actually done when the method returns. Be prepared to retry if the operation is not `DONE`.", - "httpMethod": "POST", - "id": "compute.zoneOperations.wait", - "parameterOrder": [ - "project", - "zone", - "operation" - ], - "parameters": { - "operation": { - "description": "Name of the Operations resource to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "zone": { - "description": "Name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}/operations/{operation}/wait", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "zones": { - "methods": { - "get": { - "description": "Returns the specified Zone resource. Gets a list of available zones by making a list() request.", - "httpMethod": "GET", - "id": "compute.zones.get", - "parameterOrder": [ - "project", - "zone" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "zone": { - "description": "Name of the zone resource to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}", - "response": { - "$ref": "Zone" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "list": { - "description": "Retrieves the list of Zone resources available to the specified project.", - "httpMethod": "GET", - "id": "compute.zones.list", - "parameterOrder": [ - "project" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, + "zone": { + "description": "Name of the zone for request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/operations", + "response": { + "$ref": "OperationList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "wait": { + "description": "Waits for the specified Operation resource to return as `DONE` or for the request to approach the 2 minute deadline, and retrieves the specified Operation resource. This method differs from the `GET` method in that it waits for no more than the default deadline (2 minutes) and then returns the current state of the operation, which might be `DONE` or still in progress.\n\nThis method is called on a best-effort basis. Specifically: \n- In uncommon cases, when the server is overloaded, the request might return before the default deadline is reached, or might return after zero seconds. \n- If the default deadline is reached, there is no guarantee that the operation is actually done when the method returns. Be prepared to retry if the operation is not `DONE`.", + "httpMethod": "POST", + "id": "compute.zoneOperations.wait", + "parameterOrder": [ + "project", + "zone", + "operation" + ], + "parameters": { + "operation": { + "description": "Name of the Operations resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "Name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/operations/{operation}/wait", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "zones": { + "methods": { + "get": { + "description": "Returns the specified Zone resource. Gets a list of available zones by making a list() request.", + "httpMethod": "GET", + "id": "compute.zones.get", + "parameterOrder": [ + "project", + "zone" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "Name of the zone resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}", + "response": { + "$ref": "Zone" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "description": "Retrieves the list of Zone resources available to the specified project.", + "httpMethod": "GET", + "id": "compute.zones.list", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" + "type": "boolean" } }, "path": "{project}/zones", @@ -23387,7 +25410,7 @@ } } }, - "revision": "20200427", + "revision": "20200910", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -23433,7 +25456,7 @@ "type": "string" }, "maximumCardsPerInstance": { - "description": "[Output Only] Maximum accelerator cards allowed per instance.", + "description": "[Output Only] Maximum number of accelerator cards allowed per instance.", "format": "int32", "type": "integer" }, @@ -23443,7 +25466,7 @@ "type": "string" }, "selfLink": { - "description": "[Output Only] Server-defined fully-qualified URL for this resource.", + "description": "[Output Only] Server-defined, fully qualified URL for this resource.", "type": "string" }, "zone": { @@ -23481,6 +25504,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -23503,6 +25533,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -23534,6 +25565,7 @@ "", "", "", + "", "" ], "type": "string" @@ -23615,6 +25647,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -23646,6 +25679,7 @@ "", "", "", + "", "" ], "type": "string" @@ -23709,6 +25743,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -23740,6 +25775,7 @@ "", "", "", + "", "" ], "type": "string" @@ -23913,12 +25949,14 @@ "DNS_RESOLVER", "GCE_ENDPOINT", "NAT_AUTO", + "SHARED_LOADBALANCER_VIP", "VPC_PEERING" ], "enumDescriptions": [ "", "", "", + "", "" ], "type": "string" @@ -23987,6 +26025,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -24009,6 +26054,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -24040,6 +26086,7 @@ "", "", "", + "", "" ], "type": "string" @@ -24121,6 +26168,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -24152,6 +26200,7 @@ "", "", "", + "", "" ], "type": "string" @@ -24215,6 +26264,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -24246,6 +26296,7 @@ "", "", "", + "", "" ], "type": "string" @@ -24316,7 +26367,7 @@ "type": "object" }, "AllocationSpecificSKUAllocationReservedInstanceProperties": { - "description": "Properties of the SKU instances being reserved.", + "description": "Properties of the SKU instances being reserved. Next ID: 9", "id": "AllocationSpecificSKUAllocationReservedInstanceProperties", "properties": { "guestAccelerators": { @@ -24534,7 +26585,7 @@ "type": "object" }, "AuditConfig": { - "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n{ \"audit_configs\": [ { \"service\": \"allServices\" \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\", }, { \"log_type\": \"ADMIN_READ\", } ] }, { \"service\": \"sampleservice.googleapis.com\" \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] }\n\nFor sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging.", + "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n{ \"audit_configs\": [ { \"service\": \"allServices\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" }, { \"log_type\": \"ADMIN_READ\" } ] }, { \"service\": \"sampleservice.googleapis.com\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\" }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] }\n\nFor sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging.", "id": "AuditConfig", "properties": { "auditLogConfigs": { @@ -24559,7 +26610,7 @@ "type": "object" }, "AuditLogConfig": { - "description": "Provides the configuration for logging a type of permissions. Example:\n\n{ \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\", } ] }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.", + "description": "Provides the configuration for logging a type of permissions. Example:\n\n{ \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" } ] }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.", "id": "AuditLogConfig", "properties": { "exemptedMembers": { @@ -24728,6 +26779,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -24750,6 +26808,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -24781,6 +26840,7 @@ "", "", "", + "", "" ], "type": "string" @@ -24862,6 +26922,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -24893,6 +26954,7 @@ "", "", "", + "", "" ], "type": "string" @@ -24943,6 +27005,7 @@ "MISSING_CUSTOM_METRIC_DATA_POINTS", "MISSING_LOAD_BALANCING_DATA_POINTS", "MODE_OFF", + "MODE_ONLY_SCALE_OUT", "MODE_ONLY_UP", "MORE_THAN_ONE_BACKEND_SERVICE", "NOT_ENOUGH_QUOTA_AVAILABLE", @@ -24969,6 +27032,7 @@ "", "", "", + "", "" ], "type": "string" @@ -25008,6 +27072,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -25039,6 +27104,7 @@ "", "", "", + "", "" ], "type": "string" @@ -25109,14 +27175,19 @@ "enum": [ "OFF", "ON", + "ONLY_SCALE_OUT", "ONLY_UP" ], "enumDescriptions": [ + "", "", "", "" ], "type": "string" + }, + "scaleInControl": { + "$ref": "AutoscalingPolicyScaleInControl" } }, "type": "object" @@ -25142,7 +27213,7 @@ "type": "string" }, "utilizationTarget": { - "description": "The target value of the metric that autoscaler should maintain. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric.\n\nFor example, a good metric to use as a utilization_target is compute.googleapis.com/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.", + "description": "The target value of the metric that autoscaler should maintain. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric.\n\nFor example, a good metric to use as a utilization_target is https://www.googleapis.com/compute/v1/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.", "format": "double", "type": "number" }, @@ -25175,6 +27246,22 @@ }, "type": "object" }, + "AutoscalingPolicyScaleInControl": { + "description": "Configuration that allows for slower scale in so that even if Autoscaler recommends an abrupt scale in of a MIG, it will be throttled as specified by the parameters below.", + "id": "AutoscalingPolicyScaleInControl", + "properties": { + "maxScaledInReplicas": { + "$ref": "FixedOrPercent", + "description": "Maximum allowed number (or %) of VMs that can be deducted from the peak recommendation during the window autoscaler looks at when computing recommendations. Possibly all these VMs can be deleted at once so user service needs to be prepared to lose that many VMs in one step." + }, + "timeWindowSec": { + "description": "How long back autoscaling should look when computing recommendations to include directives regarding slower scale in, as described above.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "Backend": { "description": "Message containing information of one individual backend.", "id": "Backend", @@ -25194,7 +27281,7 @@ "type": "string" }, "capacityScaler": { - "description": "A multiplier applied to the group's maximum servicing capacity (based on UTILIZATION, RATE or CONNECTION). Default value is 1, which means the group will serve up to 100% of its configured capacity (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available Capacity. Valid range is [0.0,1.0].\n\nThis cannot be used for internal load balancing.", + "description": "A multiplier applied to the group's maximum servicing capacity (based on UTILIZATION, RATE or CONNECTION). Default value is 1, which means the group will serve up to 100% of its configured capacity (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available capacity. Valid range is 0.0 and [0.1,1.0]. You cannot configure a setting larger than 0 and smaller than 0.1. You cannot configure a setting of 0 when there is only one backend attached to the backend service.\n\nThis cannot be used for internal load balancing.", "format": "float", "type": "number" }, @@ -25363,6 +27450,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -25394,6 +27482,7 @@ "", "", "", + "", "" ], "type": "string" @@ -25426,11 +27515,11 @@ "type": "object" }, "BackendService": { - "description": "Represents a Backend Service resource.\n\nA backend service contains configuration values for Google Cloud Platform load balancing services.\n\nBackend services in Google Compute Engine can be either regionally or globally scoped.\n\n* [Global](/compute/docs/reference/rest/{$api_version}/backendServices) * [Regional](/compute/docs/reference/rest/{$api_version}/regionBackendServices)\n\nFor more information, read Backend Services.\n\n(== resource_for {$api_version}.backendService ==)", + "description": "Represents a Backend Service resource.\n\nA backend service defines how Google Cloud load balancers distribute traffic. The backend service configuration contains a set of values, such as the protocol used to connect to backends, various distribution and session settings, health checks, and timeouts. These settings provide fine-grained control over how your load balancer behaves. Most of the settings have default values that allow for easy configuration if you need to get started quickly.\n\nBackend services in Google Compute Engine can be either regionally or globally scoped.\n\n* [Global](/compute/docs/reference/rest/{$api_version}/backendServices) * [Regional](/compute/docs/reference/rest/{$api_version}/regionBackendServices)\n\nFor more information, see Backend Services.\n\n(== resource_for {$api_version}.backendService ==)", "id": "BackendService", "properties": { "affinityCookieTtlSec": { - "description": "If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is one day (86,400).", + "description": "Lifetime of cookies in seconds. Only applicable if the loadBalancingScheme is EXTERNAL, INTERNAL_SELF_MANAGED, or INTERNAL_MANAGED, the protocol is HTTP or HTTPS, and the sessionAffinity is GENERATED_COOKIE, or HTTP_COOKIE.\n\nIf set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is one day (86,400).\n\nNot supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" }, @@ -25447,14 +27536,14 @@ }, "circuitBreakers": { "$ref": "CircuitBreakers", - "description": "Settings controlling the volume of connections to a backend service. If not set, this feature is considered disabled.\n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED." + "description": "Settings controlling the volume of connections to a backend service. If not set, this feature is considered disabled.\n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. \n\nNot supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true." }, "connectionDraining": { "$ref": "ConnectionDraining" }, "consistentHash": { "$ref": "ConsistentHashLoadBalancerSettings", - "description": "Consistent Hash-based load balancing can be used to provide soft session affinity based on HTTP headers, cookies or other properties. This load balancing policy is applicable only for HTTP connections. The affinity to a particular destination host will be lost when one or more hosts are added/removed from the destination service. This field specifies parameters that control consistent hashing. This field is only applicable when localityLbPolicy is set to MAGLEV or RING_HASH.\n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED." + "description": "Consistent Hash-based load balancing can be used to provide soft session affinity based on HTTP headers, cookies or other properties. This load balancing policy is applicable only for HTTP connections. The affinity to a particular destination host will be lost when one or more hosts are added/removed from the destination service. This field specifies parameters that control consistent hashing. This field is only applicable when localityLbPolicy is set to MAGLEV or RING_HASH.\n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. \n\nNot supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true." }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", @@ -25485,14 +27574,15 @@ "type": "string" }, "healthChecks": { - "description": "The list of URLs to the healthChecks, httpHealthChecks (legacy), or httpsHealthChecks (legacy) resource for health checking this backend service. Not all backend services support legacy health checks. See Load balancer guide. Currently at most one health check can be specified. Backend services with instance group or zonal NEG backends must have a health check. Backend services with internet NEG backends must not have a health check. A health check must", + "description": "The list of URLs to the healthChecks, httpHealthChecks (legacy), or httpsHealthChecks (legacy) resource for health checking this backend service. Not all backend services support legacy health checks. See Load balancer guide. Currently, at most one health check can be specified for each backend service. Backend services with instance group or zonal NEG backends must have a health check. Backend services with internet or serverless NEG backends must not have a health check.", "items": { "type": "string" }, "type": "array" }, "iap": { - "$ref": "BackendServiceIAP" + "$ref": "BackendServiceIAP", + "description": "The configurations for Identity-Aware Proxy on this resource." }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", @@ -25523,7 +27613,7 @@ "type": "string" }, "localityLbPolicy": { - "description": "The load balancing algorithm used within the scope of the locality. The possible values are: \n- ROUND_ROBIN: This is a simple policy in which each healthy backend is selected in round robin order. This is the default. \n- LEAST_REQUEST: An O(1) algorithm which selects two random healthy hosts and picks the host which has fewer active requests. \n- RING_HASH: The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests. \n- RANDOM: The load balancer selects a random healthy host. \n- ORIGINAL_DESTINATION: Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer. \n- MAGLEV: used as a drop in replacement for the ring hash load balancer. Maglev is not as stable as ring hash but has faster table lookup build times and host selection times. For more information about Maglev, refer to https://ai.google/research/pubs/pub44824 \n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. \n\nIf sessionAffinity is not NONE, and this field is not set to \u003eMAGLEV or RING_HASH, session affinity settings will not take effect.", + "description": "The load balancing algorithm used within the scope of the locality. The possible values are: \n- ROUND_ROBIN: This is a simple policy in which each healthy backend is selected in round robin order. This is the default. \n- LEAST_REQUEST: An O(1) algorithm which selects two random healthy hosts and picks the host which has fewer active requests. \n- RING_HASH: The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests. \n- RANDOM: The load balancer selects a random healthy host. \n- ORIGINAL_DESTINATION: Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer. \n- MAGLEV: used as a drop in replacement for the ring hash load balancer. Maglev is not as stable as ring hash but has faster table lookup build times and host selection times. For more information about Maglev, see https://ai.google/research/pubs/pub44824 \n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. \n\nIf sessionAffinity is not NONE, and this field is not set to MAGLEV or RING_HASH, session affinity settings will not take effect.\n\nOnly the default ROUND_ROBIN policy is supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "enum": [ "INVALID_LB_POLICY", "LEAST_REQUEST", @@ -25559,7 +27649,7 @@ }, "outlierDetection": { "$ref": "OutlierDetection", - "description": "Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. If not set, this feature is considered disabled.\n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED." + "description": "Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. If not set, this feature is considered disabled.\n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. \n\nNot supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true." }, "port": { "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80.\n\nThis cannot be used if the loadBalancingScheme is INTERNAL (Internal TCP/UDP Load Balancing).", @@ -25571,8 +27661,9 @@ "type": "string" }, "protocol": { - "description": "The protocol this BackendService uses to communicate with backends.\n\nPossible values are HTTP, HTTPS, HTTP2, TCP, SSL, or UDP. depending on the chosen load balancer or Traffic Director configuration. Refer to the documentation for the load balancer or for Traffic Director for more information.", + "description": "The protocol this BackendService uses to communicate with backends.\n\nPossible values are HTTP, HTTPS, HTTP2, TCP, SSL, UDP or GRPC. depending on the chosen load balancer or Traffic Director configuration. Refer to the documentation for the load balancer or for Traffic Director for more information.\n\nMust be set to GRPC when the backend service is referenced by a URL map that is bound to target gRPC proxy.", "enum": [ + "GRPC", "HTTP", "HTTP2", "HTTPS", @@ -25586,6 +27677,7 @@ "", "", "", + "", "" ], "type": "string" @@ -25603,7 +27695,7 @@ "type": "string" }, "sessionAffinity": { - "description": "Type of session affinity to use. The default is NONE. Session affinity is not applicable if the --protocol is UDP.\n\nWhen the loadBalancingScheme is EXTERNAL, possible values are NONE, CLIENT_IP, or GENERATED_COOKIE. You can use GENERATED_COOKIE if the protocol is HTTP or HTTPS.\n\nWhen the loadBalancingScheme is INTERNAL, possible values are NONE, CLIENT_IP, CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO.\n\nWhen the loadBalancingScheme is INTERNAL_SELF_MANAGED, or INTERNAL_MANAGED, possible values are NONE, CLIENT_IP, GENERATED_COOKIE, HEADER_FIELD, or HTTP_COOKIE.", + "description": "Type of session affinity to use. The default is NONE. Session affinity is not applicable if the --protocol is UDP.\n\nWhen the loadBalancingScheme is EXTERNAL, possible values are NONE, CLIENT_IP, or GENERATED_COOKIE. You can use GENERATED_COOKIE if the protocol is HTTP or HTTPS.\n\nWhen the loadBalancingScheme is INTERNAL, possible values are NONE, CLIENT_IP, CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO.\n\nWhen the loadBalancingScheme is INTERNAL_SELF_MANAGED, or INTERNAL_MANAGED, possible values are NONE, CLIENT_IP, GENERATED_COOKIE, HEADER_FIELD, or HTTP_COOKIE.\n\nNot supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "enum": [ "CLIENT_IP", "CLIENT_IP_PORT_PROTO", @@ -25625,7 +27717,7 @@ "type": "string" }, "timeoutSec": { - "description": "The backend service timeout has a different meaning depending on the type of load balancer. For more information read, Backend service settings The default is 30 seconds.", + "description": "The backend service timeout has a different meaning depending on the type of load balancer. For more information see, Backend service settings The default is 30 seconds.", "format": "int32", "type": "integer" } @@ -25661,6 +27753,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -25683,6 +27782,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -25714,6 +27814,7 @@ "", "", "", + "", "" ], "type": "string" @@ -25791,6 +27892,13 @@ "BackendServiceGroupHealth": { "id": "BackendServiceGroupHealth", "properties": { + "annotations": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata defined as annotations on the network endpoint group.", + "type": "object" + }, "healthStatus": { "description": "Health state of the backend instances or endpoints in requested instance or network endpoint group, determined based on configured health checks.", "items": { @@ -25811,12 +27919,15 @@ "id": "BackendServiceIAP", "properties": { "enabled": { + "description": "Whether the serving infrastructure will authenticate and authorize all incoming requests. If true, the oauth2ClientId and oauth2ClientSecret fields must be non-empty.", "type": "boolean" }, "oauth2ClientId": { + "description": "OAuth2 client ID to use for the authentication flow.", "type": "string" }, "oauth2ClientSecret": { + "description": "OAuth2 client secret to use for the authentication flow. For security reasons, this value cannot be retrieved via the API. Instead, the SHA-256 hash of the value is returned in the oauth2ClientSecretSha256 field.", "type": "string" }, "oauth2ClientSecretSha256": { @@ -25876,6 +27987,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -25907,6 +28019,7 @@ "", "", "", + "", "" ], "type": "string" @@ -25995,6 +28108,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -26026,6 +28140,7 @@ "", "", "", + "", "" ], "type": "string" @@ -26061,6 +28176,10 @@ "description": "Associates `members` with a `role`.", "id": "Binding", "properties": { + "bindingId": { + "description": "A client-specified ID for this binding. Expected to be globally unique to support the internal bindings-by-ID API.", + "type": "string" + }, "condition": { "$ref": "Expr", "description": "The condition that is associated with this binding.\n\nIf the condition evaluates to `true`, then this binding applies to the current request.\n\nIf the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the members in this binding.\n\nTo learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." @@ -26161,6 +28280,20 @@ "description": "Represents a regional Commitment resource.\n\nCreating a commitment resource means that you are purchasing a committed use contract with an explicit start and end time. You can create commitments based on vCPUs and memory usage and receive discounted rates. For full details, read Signing Up for Committed Use Discounts. (== resource_for {$api_version}.regionCommitments ==)", "id": "Commitment", "properties": { + "category": { + "description": "The category of the commitment. Category MACHINE specifies commitments composed of machine resources such as VCPU or MEMORY, listed in resources. Category LICENSE specifies commitments composed of software licenses, listed in licenseResources. Note that only MACHINE commitments should have a Type specified.", + "enum": [ + "CATEGORY_UNSPECIFIED", + "LICENSE", + "MACHINE" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -26183,6 +28316,10 @@ "description": "[Output Only] Type of the resource. Always compute#commitment for commitments.", "type": "string" }, + "licenseResource": { + "$ref": "LicenseResourceCommitment", + "description": "The license specification required as part of a license commitment." + }, "name": { "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", @@ -26279,6 +28416,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -26301,6 +28445,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -26332,6 +28477,7 @@ "", "", "", + "", "" ], "type": "string" @@ -26413,6 +28559,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -26444,6 +28591,7 @@ "", "", "", + "", "" ], "type": "string" @@ -26507,6 +28655,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -26538,6 +28687,7 @@ "", "", "", + "", "" ], "type": "string" @@ -26580,6 +28730,7 @@ "ATTRIBUTION", "AUTHORITY", "CREDENTIALS_TYPE", + "CREDS_ASSERTION", "JUSTIFICATION_TYPE", "NO_ATTR", "SECURITY_REALM" @@ -26591,6 +28742,7 @@ "", "", "", + "", "" ], "type": "string" @@ -26647,6 +28799,17 @@ }, "type": "object" }, + "ConfidentialInstanceConfig": { + "description": "A set of Confidential Instance options.", + "id": "ConfidentialInstanceConfig", + "properties": { + "enableConfidentialCompute": { + "description": "Defines whether the instance should have confidential compute enabled.", + "type": "boolean" + } + }, + "type": "object" + }, "ConnectionDraining": { "description": "Message containing connection draining configuration.", "id": "ConnectionDraining", @@ -26754,7 +28917,6 @@ "type": "object" }, "CustomerEncryptionKey": { - "description": "Represents a customer-supplied encryption key", "id": "CustomerEncryptionKey", "properties": { "kmsKeyName": { @@ -26912,7 +29074,7 @@ "type": "string" }, "physicalBlockSizeBytes": { - "description": "Physical block size of the persistent disk, in bytes. If not present in a request, a default value is used. Currently supported sizes are 4096 and 16384, other sizes may be added in the future. If an unsupported value is requested, the error message will list the supported values for the caller's project.", + "description": "Physical block size of the persistent disk, in bytes. If not present in a request, a default value is used. The currently supported size is 4096, other sizes may be added in the future. If an unsupported value is requested, the error message will list the supported values for the caller's project.", "format": "int64", "type": "string" }, @@ -26939,10 +29101,18 @@ "type": "string" }, "sizeGb": { - "description": "Size of the persistent disk, specified in GB. You can specify this field when creating a persistent disk using the sourceImage or sourceSnapshot parameter, or specify it alone to create an empty persistent disk.\n\nIf you specify this field along with sourceImage or sourceSnapshot, the value of sizeGb must not be less than the size of the sourceImage or the size of the snapshot. Acceptable values are 1 to 65536, inclusive.", + "description": "Size, in GB, of the persistent disk. You can specify this field when creating a persistent disk using the sourceImage, sourceSnapshot, or sourceDisk parameter, or specify it alone to create an empty persistent disk.\n\nIf you specify this field along with a source, the value of sizeGb must not be less than the size of the source. Acceptable values are 1 to 65536, inclusive.", "format": "int64", "type": "string" }, + "sourceDisk": { + "description": "The source disk used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk \n- projects/project/zones/zone/disks/disk \n- zones/zone/disks/disk", + "type": "string" + }, + "sourceDiskId": { + "description": "[Output Only] The unique ID of the disk used to create this disk. This value identifies the exact disk that was used to create this persistent disk. For example, if you created the persistent disk from a disk that was later deleted and recreated under the same name, the source disk ID would identify the exact version of the disk that was used.", + "type": "string" + }, "sourceImage": { "description": "The source image used to create this disk. If the source image is deleted, this field will not be set.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-9 to use the latest Debian 9 image:\nprojects/debian-cloud/global/images/family/debian-9\n\n\nAlternatively, use a specific version of a public operating system image:\nprojects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD\n\n\nTo create a disk with a custom image that you created, specify the image name in the following format:\nglobal/images/my-custom-image\n\n\nYou can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\nglobal/images/family/my-image-family", "type": "string" @@ -26986,7 +29156,7 @@ "type": "string" }, "type": { - "description": "URL of the disk type resource describing which disk type to use to create the disk. Provide this when creating the disk. For example: projects/project/zones/zone/diskTypes/pd-standard or pd-ssd", + "description": "URL of the disk type resource describing which disk type to use to create the disk. Provide this when creating the disk. For example: projects/project/zones/zone/diskTypes/pd-standard or pd-ssd", "type": "string" }, "users": { @@ -27031,6 +29201,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -27053,6 +29230,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -27084,6 +29262,7 @@ "", "", "", + "", "" ], "type": "string" @@ -27206,6 +29385,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -27237,6 +29417,7 @@ "", "", "", + "", "" ], "type": "string" @@ -27365,6 +29546,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -27387,6 +29575,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -27418,6 +29607,7 @@ "", "", "", + "", "" ], "type": "string" @@ -27499,6 +29689,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -27530,6 +29721,7 @@ "", "", "", + "", "" ], "type": "string" @@ -27593,6 +29785,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -27624,6 +29817,7 @@ "", "", "", + "", "" ], "type": "string" @@ -27724,6 +29918,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -27755,6 +29950,7 @@ "", "", "", + "", "" ], "type": "string" @@ -27928,6 +30124,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -27959,6 +30156,7 @@ "", "", "", + "", "" ], "type": "string" @@ -28154,6 +30352,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -28185,6 +30384,7 @@ "", "", "", + "", "" ], "type": "string" @@ -28328,7 +30528,7 @@ }, "logConfig": { "$ref": "FirewallLogConfig", - "description": "This field denotes the logging options for a particular firewall rule. If logging is enabled, logs will be exported to Stackdriver." + "description": "This field denotes the logging options for a particular firewall rule. If logging is enabled, logs will be exported to Cloud Logging." }, "name": { "annotations": { @@ -28442,6 +30642,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -28473,6 +30674,7 @@ "", "", "", + "", "" ], "type": "string" @@ -28511,6 +30713,18 @@ "enable": { "description": "This field denotes whether to enable logging for a particular firewall rule.", "type": "boolean" + }, + "metadata": { + "description": "This field can only be specified for a particular firewall rule if logging is enabled for that rule. This field denotes whether to include or exclude metadata for firewall logs.", + "enum": [ + "EXCLUDE_ALL_METADATA", + "INCLUDE_ALL_METADATA" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" } }, "type": "object" @@ -28542,7 +30756,7 @@ "id": "ForwardingRule", "properties": { "IPAddress": { - "description": "IP address that this forwarding rule serves. When a client sends traffic to this IP address, the forwarding rule directs the traffic to the target that you specify in the forwarding rule.\n\nIf you don't specify a reserved IP address, an ephemeral IP address is assigned. Methods for specifying an IP address:\n\n* IPv4 dotted decimal, as in `100.1.2.3` * Full URL, as in https://www.googleapis.com/compute/v1/projects/project_id/regions/region/addresses/address-name * Partial URL or by name, as in: * projects/project_id/regions/region/addresses/address-name * regions/region/addresses/address-name * global/addresses/address-name * address-name \n\nThe loadBalancingScheme and the forwarding rule's target determine the type of IP address that you can use. For detailed information, refer to [IP address specifications](/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications).", + "description": "IP address that this forwarding rule serves. When a client sends traffic to this IP address, the forwarding rule directs the traffic to the target that you specify in the forwarding rule.\n\nIf you don't specify a reserved IP address, an ephemeral IP address is assigned. Methods for specifying an IP address:\n\n* IPv4 dotted decimal, as in `100.1.2.3` * Full URL, as in https://www.googleapis.com/compute/v1/projects/project_id/regions/region/addresses/address-name * Partial URL or by name, as in: * projects/project_id/regions/region/addresses/address-name * regions/region/addresses/address-name * global/addresses/address-name * address-name \n\nThe loadBalancingScheme and the forwarding rule's target determine the type of IP address that you can use. For detailed information, refer to [IP address specifications](/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications).\n\nMust be set to `0.0.0.0` when the target is targetGrpcProxy that has validateForProxyless field set to true.", "type": "string" }, "IPProtocol": { @@ -28637,7 +30851,7 @@ "type": "string" }, "metadataFilters": { - "description": "Opaque filter criteria used by Loadbalancer to restrict routing configuration to a limited set of xDS compliant clients. In their xDS requests to Loadbalancer, xDS clients present node metadata. If a match takes place, the relevant configuration is made available to those proxies. Otherwise, all the resources (e.g. TargetHttpProxy, UrlMap) referenced by the ForwardingRule will not be visible to those proxies.\nFor each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata.\nmetadataFilters specified here will be applifed before those specified in the UrlMap that this ForwardingRule references.\nmetadataFilters only applies to Loadbalancers that have their loadBalancingScheme set to INTERNAL_SELF_MANAGED.", + "description": "Opaque filter criteria used by Loadbalancer to restrict routing configuration to a limited set of xDS compliant clients. In their xDS requests to Loadbalancer, xDS clients present node metadata. When there is a match, the relevant configuration is made available to those proxies. Otherwise, all the resources (e.g. TargetHttpProxy, UrlMap) referenced by the ForwardingRule will not be visible to those proxies.\nFor each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata. If multiple metadataFilters are specified, all of them need to be satisfied in order to be considered a match.\nmetadataFilters specified here will be applifed before those specified in the UrlMap that this ForwardingRule references.\nmetadataFilters only applies to Loadbalancers that have their loadBalancingScheme set to INTERNAL_SELF_MANAGED.", "items": { "$ref": "MetadataFilter" }, @@ -28649,7 +30863,7 @@ "type": "string" }, "network": { - "description": "This field is not used for external load balancing.\n\nFor INTERNAL and INTERNAL_SELF_MANAGED load balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used.", + "description": "This field is not used for external load balancing.\n\nFor internal load balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used.", "type": "string" }, "networkTier": { @@ -28665,7 +30879,7 @@ "type": "string" }, "portRange": { - "description": "When the load balancing scheme is EXTERNAL, INTERNAL_SELF_MANAGED and INTERNAL_MANAGED, you can specify a port_range. Use with a forwarding rule that points to a target proxy or a target pool. Do not use with a forwarding rule that points to a backend service. This field is used along with the target field for TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance.\n\nApplicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed to ports in the specified range will be forwarded to target. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint port ranges.\n\nSome types of forwarding target have constraints on the acceptable ports: \n- TargetHttpProxy: 80, 8080 \n- TargetHttpsProxy: 443 \n- TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222 \n- TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222 \n- TargetVpnGateway: 500, 4500", + "description": "When the load balancing scheme is EXTERNAL, INTERNAL_SELF_MANAGED and INTERNAL_MANAGED, you can specify a port_range. Use with a forwarding rule that points to a target proxy or a target pool. Do not use with a forwarding rule that points to a backend service. This field is used along with the target field for TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, TargetGrpcProxy, TargetVpnGateway, TargetPool, TargetInstance.\n\nApplicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed to ports in the specified range will be forwarded to target. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint port ranges.\n\nSome types of forwarding target have constraints on the acceptable ports: \n- TargetHttpProxy: 80, 8080 \n- TargetHttpsProxy: 443 \n- TargetGrpcProxy: Any ports \n- TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222 \n- TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222 \n- TargetVpnGateway: 500, 4500", "type": "string" }, "ports": { @@ -28693,11 +30907,11 @@ "type": "string" }, "subnetwork": { - "description": "This field is only used for INTERNAL load balancing.\n\nFor internal load balancing, this field identifies the subnetwork that the load balanced IP should belong to for this Forwarding Rule.\n\nIf the network specified is in auto subnet mode, this field is optional. However, if the network is in custom subnet mode, a subnetwork must be specified.", + "description": "This field is only used for internal load balancing.\n\nFor internal load balancing, this field identifies the subnetwork that the load balanced IP should belong to for this Forwarding Rule.\n\nIf the network specified is in auto subnet mode, this field is optional. However, if the network is in custom subnet mode, a subnetwork must be specified.", "type": "string" }, "target": { - "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. For INTERNAL_SELF_MANAGED load balancing, only targetHttpProxy is valid, not targetHttpsProxy.", + "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must be in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. For more information, see the \"Target\" column in [Port specifications](/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications).", "type": "string" } }, @@ -28731,6 +30945,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -28753,6 +30974,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -28784,6 +31006,7 @@ "", "", "", + "", "" ], "type": "string" @@ -28865,6 +31088,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -28896,6 +31120,7 @@ "", "", "", + "", "" ], "type": "string" @@ -28968,6 +31193,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -28999,6 +31225,7 @@ "", "", "", + "", "" ], "type": "string" @@ -29030,6 +31257,39 @@ }, "type": "object" }, + "GRPCHealthCheck": { + "id": "GRPCHealthCheck", + "properties": { + "grpcServiceName": { + "description": "The gRPC service name for the health check. This field is optional. The value of grpc_service_name has the following meanings by convention:\n- Empty service_name means the overall status of all services at the backend.\n- Non-empty service_name means the health of that gRPC service, as defined by the owner of the service.\nThe grpc_service_name can only be ASCII.", + "type": "string" + }, + "port": { + "description": "The port number for the health check request. Must be specified if port_name and port_specification are not set or if port_specification is USE_FIXED_PORT. Valid values are 1 through 65535.", + "format": "int32", + "type": "integer" + }, + "portName": { + "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. The port_name should conform to RFC1035.", + "type": "string" + }, + "portSpecification": { + "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in port is used for health checking.\nUSE_NAMED_PORT: The portName is used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, gRPC health check follows behavior specified in port and portName fields.", + "enum": [ + "USE_FIXED_PORT", + "USE_NAMED_PORT", + "USE_SERVING_PORT" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "GlobalNetworkEndpointGroupsAttachEndpointsRequest": { "id": "GlobalNetworkEndpointGroupsAttachEndpointsRequest", "properties": { @@ -29170,6 +31430,7 @@ "FEATURE_TYPE_UNSPECIFIED", "MULTI_IP_SUBNET", "SECURE_BOOT", + "SEV_CAPABLE", "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", "WINDOWS" @@ -29180,6 +31441,7 @@ "", "", "", + "", "" ], "type": "string" @@ -29347,7 +31609,7 @@ "type": "object" }, "HealthCheck": { - "description": "Represents a Health Check resource.\n\nGoogle Compute Engine has two Health Check resources:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/healthChecks) * [Regional](/compute/docs/reference/rest/{$api_version}/regionHealthChecks)\n\nInternal HTTP(S) load balancers use regional health checks. All other types of GCP load balancers and managed instance group auto-healing use global health checks. For more information, read Health Check Concepts.\n\nTo perform health checks on network load balancers, you must use either httpHealthChecks or httpsHealthChecks.", + "description": "Represents a Health Check resource.\n\nGoogle Compute Engine has two Health Check resources:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/healthChecks) * [Regional](/compute/docs/reference/rest/{$api_version}/regionHealthChecks)\n\nInternal HTTP(S) load balancers must use regional health checks. Internal TCP/UDP load balancers can use either regional or global health checks. All other types of GCP load balancers and managed instance group auto-healing must use global health checks. For more information, read Health Check Concepts.\n\nTo perform health checks on network load balancers, you must use either httpHealthChecks or httpsHealthChecks.", "id": "HealthCheck", "properties": { "checkIntervalSec": { @@ -29363,6 +31625,9 @@ "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, + "grpcHealthCheck": { + "$ref": "GRPCHealthCheck" + }, "healthyThreshold": { "description": "A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.", "format": "int32", @@ -29414,6 +31679,7 @@ "type": { "description": "Specifies the type of the healthCheck, either TCP, SSL, HTTP, HTTPS or HTTP2. If not specified, the default is TCP. Exactly one of the protocol-specific health check field must be specified, which must match type field.", "enum": [ + "GRPC", "HTTP", "HTTP2", "HTTPS", @@ -29427,6 +31693,7 @@ "", "", "", + "", "" ], "type": "string" @@ -29489,6 +31756,337 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "HealthCheckReference": { + "description": "A full or valid partial URL to a health check. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/beta/projects/project-id/global/httpHealthChecks/health-check \n- projects/project-id/global/httpHealthChecks/health-check \n- global/httpHealthChecks/health-check", + "id": "HealthCheckReference", + "properties": { + "healthCheck": { + "type": "string" + } + }, + "type": "object" + }, + "HealthCheckService": { + "description": "Represents a Health-Check as a Service resource.\n\n(== resource_for {$api_version}.regionHealthCheckServices ==)", + "id": "HealthCheckService", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "fingerprint": { + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a HealthCheckService. An up-to-date fingerprint must be provided in order to patch/update the HealthCheckService; Otherwise, the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve the HealthCheckService.", + "format": "byte", + "type": "string" + }, + "healthChecks": { + "description": "List of URLs to the HealthCheck resources. Must have at least one HealthCheck, and not more than 10. HealthCheck resources must have portSpecification=USE_SERVING_PORT. For regional HealthCheckService, the HealthCheck must be regional and in the same region. For global HealthCheckService, HealthCheck must be global. Mix of regional and global HealthChecks is not supported. Multiple regional HealthChecks must belong to the same region. Regional HealthChecks\u003c/code? must belong to the same region as zones of NEGs.", + "items": { + "type": "string" + }, + "type": "array" + }, + "healthStatusAggregationPolicy": { + "description": "Optional. Policy for how the results from multiple health checks for the same endpoint are aggregated. Defaults to NO_AGGREGATION if unspecified. \n- NO_AGGREGATION. An EndpointHealth message is returned for each backend in the health check service. \n- AND. If any backend's health check reports UNHEALTHY, then UNHEALTHY is the HealthState of the entire health check service. If all backend's are healthy, the HealthState of the health check service is HEALTHY. .", + "enum": [ + "AND", + "NO_AGGREGATION" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#healthCheckService", + "description": "[Output only] Type of the resource. Always compute#healthCheckServicefor health check services.", + "type": "string" + }, + "name": { + "description": "Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "networkEndpointGroups": { + "description": "List of URLs to the NetworkEndpointGroup resources. Must not have more than 100. For regional HealthCheckService, NEGs must be in zones in the region of the HealthCheckService.", + "items": { + "type": "string" + }, + "type": "array" + }, + "notificationEndpoints": { + "description": "List of URLs to the NotificationEndpoint resources. Must not have more than 10. A list of endpoints for receiving notifications of change in health status. For regional HealthCheckService, NotificationEndpoint must be regional and in the same region. For global HealthCheckService, NotificationEndpoint must be global.", + "items": { + "type": "string" + }, + "type": "array" + }, + "region": { + "description": "[Output Only] URL of the region where the health check service resides. This field is not applicable to global health check services. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + } + }, + "type": "object" + }, + "HealthCheckServiceReference": { + "description": "A full or valid partial URL to a health check service. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/beta/projects/project-id/regions/us-west1/healthCheckServices/health-check-service \n- projects/project-id/regions/us-west1/healthCheckServices/health-check-service \n- regions/us-west1/healthCheckServices/health-check-service", + "id": "HealthCheckServiceReference", + "properties": { + "healthCheckService": { + "type": "string" + } + }, + "type": "object" + }, + "HealthCheckServicesList": { + "id": "HealthCheckServicesList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of HealthCheckService resources.", + "items": { + "$ref": "HealthCheckService" + }, + "type": "array" + }, + "kind": { + "default": "compute#healthCheckServicesList", + "description": "[Output Only] Type of the resource. Always compute#healthCheckServicesList for lists of HealthCheckServices.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "HealthChecksAggregatedList": { + "id": "HealthChecksAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "HealthChecksScopedList", + "description": "Name of the scope containing this set of HealthChecks." + }, + "description": "A list of HealthChecksScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#healthChecksAggregatedList", + "description": "Type of resource.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -29499,127 +32097,6 @@ ], "enumDescriptions": [ "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "HealthCheckReference": { - "description": "A full or valid partial URL to a health check. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/beta/projects/project-id/global/httpHealthChecks/health-check \n- projects/project-id/global/httpHealthChecks/health-check \n- global/httpHealthChecks/health-check", - "id": "HealthCheckReference", - "properties": { - "healthCheck": { - "type": "string" - } - }, - "type": "object" - }, - "HealthChecksAggregatedList": { - "id": "HealthChecksAggregatedList", - "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "additionalProperties": { - "$ref": "HealthChecksScopedList", - "description": "Name of the scope containing this set of HealthChecks." - }, - "description": "A list of HealthChecksScopedList resources.", - "type": "object" - }, - "kind": { - "default": "compute#healthChecksAggregatedList", - "description": "Type of resource.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, - "warning": { - "description": "[Output Only] Informational warning message.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ "", "", "", @@ -29705,6 +32182,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -29736,6 +32214,7 @@ "", "", "", + "", "" ], "type": "string" @@ -29770,6 +32249,13 @@ "HealthStatus": { "id": "HealthStatus", "properties": { + "annotations": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata defined as annotations for network endpoint.", + "type": "object" + }, "healthState": { "description": "Health state of the instance.", "enum": [ @@ -29813,6 +32299,10 @@ "$ref": "HealthCheckReference", "description": "URL of the health check associated with the health state of the network endpoint." }, + "healthCheckService": { + "$ref": "HealthCheckServiceReference", + "description": "URL of the health check service associated with the health state of the network endpoint." + }, "healthState": { "description": "Health state of the network endpoint determined based on the health checks configured.", "enum": [ @@ -29841,7 +32331,7 @@ "type": "string" }, "hosts": { - "description": "The list of host patterns to match. They must be valid hostnames with optional port numbers in the format host:port. * matches any string of ([a-z0-9-.]*). In that case, * must be the first character and must be followed in the pattern by either - or ..", + "description": "The list of host patterns to match. They must be valid hostnames with optional port numbers in the format host:port. * matches any string of ([a-z0-9-.]*). In that case, * must be the first character and must be followed in the pattern by either - or ..\n* based matching is not supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true.", "items": { "type": "string" }, @@ -29946,7 +32436,7 @@ "type": "string" }, "headerName": { - "description": "The name of the HTTP header to match.\nFor matching against the HTTP request's authority, use a headerMatch with the header name \":authority\".\nFor matching a request's method, use the headerName \":method\".", + "description": "The name of the HTTP header to match.\nFor matching against the HTTP request's authority, use a headerMatch with the header name \":authority\".\nFor matching a request's method, use the headerName \":method\".\nWhen the URL map is bound to target gRPC proxy that has validateForProxyless field set to true, only non-binary user-specified custom metadata and the `content-type` header are supported. The following transport-level headers cannot be used in header matching rules: `:authority`, `:method`, `:path`, `:scheme`, `user-agent`, `accept-encoding`, `content-encoding`, `grpc-accept-encoding`, `grpc-encoding`, `grpc-previous-rpc-attempts`, `grpc-tags-bin`, `grpc-timeout` and `grpc-trace-bin.", "type": "string" }, "invertMatch": { @@ -30112,6 +32602,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -30143,6 +32634,7 @@ "", "", "", + "", "" ], "type": "string" @@ -30270,30 +32762,30 @@ "properties": { "corsPolicy": { "$ref": "CorsPolicy", - "description": "The specification for allowing client side cross-origin requests. Please see W3C Recommendation for Cross Origin Resource Sharing" + "description": "The specification for allowing client side cross-origin requests. Please see W3C Recommendation for Cross Origin Resource Sharing \nNot supported when the URL map is bound to target gRPC proxy." }, "faultInjectionPolicy": { "$ref": "HttpFaultInjection", - "description": "The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by Loadbalancer on a percentage of requests before sending those request to the backend service. Similarly requests from clients can be aborted by the Loadbalancer for a percentage of requests.\ntimeout and retry_policy will be ignored by clients that are configured with a fault_injection_policy." + "description": "The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by Loadbalancer on a percentage of requests before sending those request to the backend service. Similarly requests from clients can be aborted by the Loadbalancer for a percentage of requests.\ntimeout and retry_policy will be ignored by clients that are configured with a fault_injection_policy.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, "requestMirrorPolicy": { "$ref": "RequestMirrorPolicy", - "description": "Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. Loadbalancer does not wait for responses from the shadow service. Prior to sending traffic to the shadow service, the host / authority header is suffixed with -shadow." + "description": "Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. Loadbalancer does not wait for responses from the shadow service. Prior to sending traffic to the shadow service, the host / authority header is suffixed with -shadow.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, "retryPolicy": { "$ref": "HttpRetryPolicy", - "description": "Specifies the retry policy associated with this route." + "description": "Specifies the retry policy associated with this route.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, "timeout": { "$ref": "Duration", - "description": "Specifies the timeout for the selected route. Timeout is computed from the time the request has been fully processed (i.e. end-of-stream) up until the response has been completely processed. Timeout includes all retries.\nIf not specified, will use the largest timeout among all backend services associated with the route." + "description": "Specifies the timeout for the selected route. Timeout is computed from the time the request has been fully processed (i.e. end-of-stream) up until the response has been completely processed. Timeout includes all retries.\nIf not specified, will use the largest timeout among all backend services associated with the route.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, "urlRewrite": { "$ref": "UrlRewrite", - "description": "The spec to modify the URL of the request, prior to forwarding the request to the matched service." + "description": "The spec to modify the URL of the request, prior to forwarding the request to the matched service.\nurlRewrite is the only action supported in UrlMaps for external HTTP(S) load balancers.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, "weightedBackendServices": { - "description": "A list of weighted backend services to send traffic to when a route match occurs. The weights determine the fraction of traffic that flows to their corresponding backend service. If all traffic needs to go to a single backend service, there must be one weightedBackendService with weight set to a non 0 number.\nOnce a backendService is identified and before forwarding the request to the backend service, advanced routing actions like Url rewrites and header transformations are applied depending on additional settings specified in this HttpRouteAction.", + "description": "A list of weighted backend services to send traffic to when a route match occurs. The weights determine the fraction of traffic that flows to their corresponding backend service. If all traffic needs to go to a single backend service, there must be one weightedBackendService with weight set to a non-zero number.\nOnce a backendService is identified and before forwarding the request to the backend service, advanced routing actions such as URL rewrites and header transformations are applied depending on additional settings specified in this HttpRouteAction.", "items": { "$ref": "WeightedBackendService" }, @@ -30312,9 +32804,10 @@ }, "headerAction": { "$ref": "HttpHeaderAction", - "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nThe headerAction specified here are applied before the matching pathMatchers[].headerAction and after pathMatchers[].routeRules[].routeAction.weightedBackendService.backendServiceWeightAction[].headerAction" + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nThe headerAction specified here are applied before the matching pathMatchers[].headerAction and after pathMatchers[].routeRules[].routeAction.weightedBackendService.backendServiceWeightAction[].headerAction \nNote that headerAction is not supported for Loadbalancers that have their loadBalancingScheme set to EXTERNAL.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, "matchRules": { + "description": "The list of criteria for matching attributes of a request to this routeRule. This list has OR semantics: the request matches this routeRule when any of the matchRules are satisfied. However predicates within a given matchRule have AND semantics. All predicates within a matchRule must match for the request to match the rule.", "items": { "$ref": "HttpRouteRuleMatch" }, @@ -30327,7 +32820,7 @@ }, "routeAction": { "$ref": "HttpRouteAction", - "description": "In response to a matching matchRule, the load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices.\nOnly one of urlRedirect, service or routeAction.weightedBackendService must be set." + "description": "In response to a matching matchRule, the load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices.\nOnly one of urlRedirect, service or routeAction.weightedBackendService must be set.\nUrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a routeRule's routeAction." }, "service": { "description": "The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendService s. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of urlRedirect, service or routeAction.weightedBackendService must be set.", @@ -30335,7 +32828,7 @@ }, "urlRedirect": { "$ref": "HttpRedirectAction", - "description": "When this rule is matched, the request is redirected to a URL specified by urlRedirect.\nIf urlRedirect is specified, service or routeAction must not be set." + "description": "When this rule is matched, the request is redirected to a URL specified by urlRedirect.\nIf urlRedirect is specified, service or routeAction must not be set.\nNot supported when the URL map is bound to target gRPC proxy." } }, "type": "object" @@ -30356,11 +32849,11 @@ "type": "array" }, "ignoreCase": { - "description": "Specifies that prefixMatch and fullPathMatch matches are case sensitive.\nThe default value is false.\nignoreCase must not be used with regexMatch.", + "description": "Specifies that prefixMatch and fullPathMatch matches are case sensitive.\nThe default value is false.\nignoreCase must not be used with regexMatch.\nNot supported when the URL map is bound to target gRPC proxy.", "type": "boolean" }, "metadataFilters": { - "description": "Opaque filter criteria used by Loadbalancer to restrict routing configuration to a limited set of xDS compliant clients. In their xDS requests to Loadbalancer, xDS clients present node metadata. If a match takes place, the relevant routing configuration is made available to those proxies.\nFor each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata.\nmetadataFilters specified here will be applied after those specified in ForwardingRule that refers to the UrlMap this HttpRouteRuleMatch belongs to.\nmetadataFilters only applies to Loadbalancers that have their loadBalancingScheme set to INTERNAL_SELF_MANAGED.", + "description": "Opaque filter criteria used by Loadbalancer to restrict routing configuration to a limited set of xDS compliant clients. In their xDS requests to Loadbalancer, xDS clients present node metadata. When there is a match, the relevant routing configuration is made available to those proxies.\nFor each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata. If multiple metadataFilters are specified, all of them need to be satisfied in order to be considered a match.\nmetadataFilters specified here will be applied after those specified in ForwardingRule that refers to the UrlMap this HttpRouteRuleMatch belongs to.\nmetadataFilters only applies to Loadbalancers that have their loadBalancingScheme set to INTERNAL_SELF_MANAGED.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true.", "items": { "$ref": "MetadataFilter" }, @@ -30371,7 +32864,7 @@ "type": "string" }, "queryParameterMatches": { - "description": "Specifies a list of query parameter match criteria, all of which must match corresponding query parameters in the request.", + "description": "Specifies a list of query parameter match criteria, all of which must match corresponding query parameters in the request.\nNot supported when the URL map is bound to target gRPC proxy.", "items": { "$ref": "HttpQueryParameterMatch" }, @@ -30501,6 +32994,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -30532,6 +33026,7 @@ "", "", "", + "", "" ], "type": "string" @@ -30702,7 +33197,7 @@ "type": "string" }, "sourceImage": { - "description": "URL of the source image used to create this image. This can be a full or valid partial URL. You must provide exactly one of: \n- this property, or \n- the rawDisk.source property, or \n- the sourceDisk property in order to create an image.", + "description": "URL of the source image used to create this image.\n\nIn order to create an image, you must provide the full or partial URL of one of the following: \n- The selfLink URL \n- This property \n- The rawDisk.source URL \n- The sourceDisk URL", "type": "string" }, "sourceImageEncryptionKey": { @@ -30714,7 +33209,7 @@ "type": "string" }, "sourceSnapshot": { - "description": "URL of the source snapshot used to create this image. This can be a full or valid partial URL. You must provide exactly one of: \n- this property, or \n- the sourceImage property, or \n- the rawDisk.source property, or \n- the sourceDisk property in order to create an image.", + "description": "URL of the source snapshot used to create this image.\n\nIn order to create an image, you must provide the full or partial URL of one of the following: \n- The selfLink URL \n- This property \n- The sourceImage URL \n- The rawDisk.source URL \n- The sourceDisk URL", "type": "string" }, "sourceSnapshotEncryptionKey": { @@ -30812,6 +33307,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -30843,6 +33339,7 @@ "", "", "", + "", "" ], "type": "string" @@ -30914,6 +33411,9 @@ "description": "Allows this instance to send and receive packets with non-matching destination or source IPs. This is required if you plan to use this instance to forward routes. For more information, see Enabling IP Forwarding.", "type": "boolean" }, + "confidentialInstanceConfig": { + "$ref": "ConfidentialInstanceConfig" + }, "cpuPlatform": { "description": "[Output Only] The CPU platform used by this instance.", "type": "string" @@ -30979,6 +33479,18 @@ "description": "Labels to apply to this instance. These can be later modified by the setLabels method.", "type": "object" }, + "lastStartTimestamp": { + "description": "[Output Only] Last start timestamp in RFC3339 text format.", + "type": "string" + }, + "lastStopTimestamp": { + "description": "[Output Only] Last stop timestamp in RFC3339 text format.", + "type": "string" + }, + "lastSuspendedTimestamp": { + "description": "[Output Only] Last suspended timestamp in RFC3339 text format.", + "type": "string" + }, "machineType": { "annotations": { "required": [ @@ -31013,6 +33525,20 @@ }, "type": "array" }, + "privateIpv6GoogleAccess": { + "description": "The private IPv6 google access type for the VM. If not specified, use INHERIT_FROM_SUBNETWORK as default.", + "enum": [ + "ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE", + "ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE", + "INHERIT_FROM_SUBNETWORK" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, "reservationAffinity": { "$ref": "ReservationAffinity", "description": "Specifies the reservations that this instance can consume from." @@ -31050,7 +33576,7 @@ "type": "boolean" }, "status": { - "description": "[Output Only] The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, STOPPED, SUSPENDING, SUSPENDED, and TERMINATED.", + "description": "[Output Only] The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see Instance life cycle.", "enum": [ "DEPROVISIONING", "PROVISIONING", @@ -31120,6 +33646,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -31142,6 +33675,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -31173,6 +33707,7 @@ "", "", "", + "", "" ], "type": "string" @@ -31304,6 +33839,127 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "InstanceGroupList": { + "description": "A list of InstanceGroup resources.", + "id": "InstanceGroupList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of InstanceGroup resources.", + "items": { + "$ref": "InstanceGroup" + }, + "type": "array" + }, + "kind": { + "default": "compute#instanceGroupList", + "description": "[Output Only] The resource type, which is always compute#instanceGroupList for instance group lists.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -31326,6 +33982,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -31336,117 +33993,6 @@ ], "enumDescriptions": [ "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "InstanceGroupList": { - "description": "A list of InstanceGroup resources.", - "id": "InstanceGroupList", - "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "description": "A list of InstanceGroup resources.", - "items": { - "$ref": "InstanceGroup" - }, - "type": "array" - }, - "kind": { - "default": "compute#instanceGroupList", - "description": "[Output Only] The resource type, which is always compute#instanceGroupList for instance group lists.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, - "warning": { - "description": "[Output Only] Informational warning message.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ "", "", "", @@ -31586,6 +34132,10 @@ "description": "[Output Only] The URL for this managed instance group. The server defines this URL.", "type": "string" }, + "statefulPolicy": { + "$ref": "StatefulPolicy", + "description": "Stateful configuration for this Instanced Group Manager" + }, "status": { "$ref": "InstanceGroupManagerStatus", "description": "[Output Only] The status of this managed instance group." @@ -31705,6 +34255,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -31727,6 +34284,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -31758,6 +34316,7 @@ "", "", "", + "", "" ], "type": "string" @@ -31855,6 +34414,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -31886,6 +34446,7 @@ "", "", "", + "", "" ], "type": "string" @@ -31928,6 +34489,10 @@ "description": "[Output Only] A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified.", "type": "boolean" }, + "stateful": { + "$ref": "InstanceGroupManagerStatusStateful", + "description": "[Output Only] Stateful status of the given Instance Group Manager." + }, "versionTarget": { "$ref": "InstanceGroupManagerStatusVersionTarget", "description": "[Output Only] A status of consistency of Instances' versions with their target version specified by version field on Instance Group Manager." @@ -31935,6 +34500,30 @@ }, "type": "object" }, + "InstanceGroupManagerStatusStateful": { + "id": "InstanceGroupManagerStatusStateful", + "properties": { + "hasStatefulConfig": { + "description": "[Output Only] A bit indicating whether the managed instance group has stateful configuration, that is, if you have configured any items in a stateful policy or in per-instance configs. The group might report that it has no stateful config even when there is still some preserved state on a managed instance, for example, if you have deleted all PICs but not yet applied those deletions.", + "type": "boolean" + }, + "perInstanceConfigs": { + "$ref": "InstanceGroupManagerStatusStatefulPerInstanceConfigs", + "description": "[Output Only] Status of per-instance configs on the instance." + } + }, + "type": "object" + }, + "InstanceGroupManagerStatusStatefulPerInstanceConfigs": { + "id": "InstanceGroupManagerStatusStatefulPerInstanceConfigs", + "properties": { + "allEffective": { + "description": "A bit indicating if all of the group's per-instance configs (listed in the output of a listPerInstanceConfigs API call) have status EFFECTIVE or there are no per-instance-configs.", + "type": "boolean" + } + }, + "type": "object" + }, "InstanceGroupManagerStatusVersionTarget": { "id": "InstanceGroupManagerStatusVersionTarget", "properties": { @@ -32115,6 +34704,20 @@ }, "type": "object" }, + "InstanceGroupManagersDeletePerInstanceConfigsReq": { + "description": "InstanceGroupManagers.deletePerInstanceConfigs", + "id": "InstanceGroupManagersDeletePerInstanceConfigsReq", + "properties": { + "names": { + "description": "The list of instance names for which we want to delete per-instance configs on this managed instance group.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "InstanceGroupManagersListErrorsResponse": { "id": "InstanceGroupManagersListErrorsResponse", "properties": { @@ -32149,6 +34752,120 @@ }, "type": "object" }, + "InstanceGroupManagersListPerInstanceConfigsResp": { + "id": "InstanceGroupManagersListPerInstanceConfigsResp", + "properties": { + "items": { + "description": "[Output Only] The list of PerInstanceConfig.", + "items": { + "$ref": "PerInstanceConfig" + }, + "type": "array" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "InstanceGroupManagersPatchPerInstanceConfigsReq": { + "description": "InstanceGroupManagers.patchPerInstanceConfigs", + "id": "InstanceGroupManagersPatchPerInstanceConfigsReq", + "properties": { + "perInstanceConfigs": { + "description": "The list of per-instance configs to insert or patch on this managed instance group.", + "items": { + "$ref": "PerInstanceConfig" + }, + "type": "array" + } + }, + "type": "object" + }, "InstanceGroupManagersRecreateInstancesRequest": { "id": "InstanceGroupManagersRecreateInstancesRequest", "properties": { @@ -32194,6 +34911,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -32225,6 +34943,7 @@ "", "", "", + "", "" ], "type": "string" @@ -32284,6 +35003,20 @@ }, "type": "object" }, + "InstanceGroupManagersUpdatePerInstanceConfigsReq": { + "description": "InstanceGroupManagers.updatePerInstanceConfigs", + "id": "InstanceGroupManagersUpdatePerInstanceConfigsReq", + "properties": { + "perInstanceConfigs": { + "description": "The list of per-instance configs to insert or patch on this managed instance group.", + "items": { + "$ref": "PerInstanceConfig" + }, + "type": "array" + } + }, + "type": "object" + }, "InstanceGroupsAddInstancesRequest": { "id": "InstanceGroupsAddInstancesRequest", "properties": { @@ -32346,6 +35079,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -32377,6 +35111,7 @@ "", "", "", + "", "" ], "type": "string" @@ -32471,6 +35206,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -32502,6 +35238,7 @@ "", "", "", + "", "" ], "type": "string" @@ -32601,6 +35338,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -32632,6 +35370,7 @@ "", "", "", + "", "" ], "type": "string" @@ -32713,6 +35452,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -32744,6 +35484,7 @@ "", "", "", + "", "" ], "type": "string" @@ -32866,22 +35607,26 @@ "id": "InstanceProperties", "properties": { "canIpForward": { - "description": "Enables instances created based on this template to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the Enable IP forwarding documentation for more information.", + "description": "Enables instances created based on these properties to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the Enable IP forwarding documentation for more information.", "type": "boolean" }, + "confidentialInstanceConfig": { + "$ref": "ConfidentialInstanceConfig", + "description": "Specifies the Confidential Instance options." + }, "description": { - "description": "An optional text description for the instances that are created from this instance template.", + "description": "An optional text description for the instances that are created from these properties.", "type": "string" }, "disks": { - "description": "An array of disks that are associated with the instances that are created from this template.", + "description": "An array of disks that are associated with the instances that are created from these properties.", "items": { "$ref": "AttachedDisk" }, "type": "array" }, "guestAccelerators": { - "description": "A list of guest accelerator cards' type and count to use for instances created from the instance template.", + "description": "A list of guest accelerator cards' type and count to use for instances created from these properties.", "items": { "$ref": "AcceleratorConfig" }, @@ -32891,7 +35636,7 @@ "additionalProperties": { "type": "string" }, - "description": "Labels to apply to instances that are created from this template.", + "description": "Labels to apply to instances that are created from these properties.", "type": "object" }, "machineType": { @@ -32900,15 +35645,15 @@ "compute.instanceTemplates.insert" ] }, - "description": "The machine type to use for instances that are created from this template.", + "description": "The machine type to use for instances that are created from these properties.", "type": "string" }, "metadata": { "$ref": "Metadata", - "description": "The metadata key/value pairs to assign to instances that are created from this template. These pairs can consist of custom metadata or predefined keys. See Project and instance metadata for more information." + "description": "The metadata key/value pairs to assign to instances that are created from these properties. These pairs can consist of custom metadata or predefined keys. See Project and instance metadata for more information." }, "minCpuPlatform": { - "description": "Minimum cpu/platform to be used by this instance. The instance may be scheduled on the specified or newer cpu/platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: \"Intel Haswell\" or minCpuPlatform: \"Intel Sandy Bridge\". For more information, read Specifying a Minimum CPU Platform.", + "description": "Minimum cpu/platform to be used by instances. The instance may be scheduled on the specified or newer cpu/platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: \"Intel Haswell\" or minCpuPlatform: \"Intel Sandy Bridge\". For more information, read Specifying a Minimum CPU Platform.", "type": "string" }, "networkInterfaces": { @@ -32918,12 +35663,26 @@ }, "type": "array" }, + "privateIpv6GoogleAccess": { + "description": "The private IPv6 google access type for VMs. If not specified, use INHERIT_FROM_SUBNETWORK as default.", + "enum": [ + "ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE", + "ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE", + "INHERIT_FROM_SUBNETWORK" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, "reservationAffinity": { "$ref": "ReservationAffinity", - "description": "Specifies the reservations that this instance can consume from." + "description": "Specifies the reservations that instances can consume from." }, "resourcePolicies": { - "description": "Resource policies (names, not ULRs) applied to instances created from this template.", + "description": "Resource policies (names, not ULRs) applied to instances created from these properties.", "items": { "type": "string" }, @@ -32931,10 +35690,10 @@ }, "scheduling": { "$ref": "Scheduling", - "description": "Specifies the scheduling options for the instances that are created from this template." + "description": "Specifies the scheduling options for the instances that are created from these properties." }, "serviceAccounts": { - "description": "A list of service accounts with specified scopes. Access tokens for these service accounts are available to the instances that are created from this template. Use metadata queries to obtain the access tokens for these instances.", + "description": "A list of service accounts with specified scopes. Access tokens for these service accounts are available to the instances that are created from these properties. Use metadata queries to obtain the access tokens for these instances.", "items": { "$ref": "ServiceAccount" }, @@ -32945,7 +35704,7 @@ }, "tags": { "$ref": "Tags", - "description": "A list of tags to apply to the instances that are created from this template. The tags identify valid sources or targets for network firewalls. The setTags method can modify this list of tags. Each tag within the list must comply with RFC1035." + "description": "A list of tags to apply to the instances that are created from these properties. The tags identify valid sources or targets for network firewalls. The setTags method can modify this list of tags. Each tag within the list must comply with RFC1035." } }, "type": "object" @@ -33061,6 +35820,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -33092,6 +35852,7 @@ "", "", "", + "", "" ], "type": "string" @@ -33226,6 +35987,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -33257,6 +36019,7 @@ "", "", "", + "", "" ], "type": "string" @@ -33746,6 +36509,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -33768,6 +36538,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -33799,6 +36570,7 @@ "", "", "", + "", "" ], "type": "string" @@ -33880,6 +36652,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -33911,6 +36684,7 @@ "", "", "", + "", "" ], "type": "string" @@ -34005,6 +36779,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -34036,6 +36811,7 @@ "", "", "", + "", "" ], "type": "string" @@ -34068,7 +36844,7 @@ "type": "object" }, "InterconnectCircuitInfo": { - "description": "Describes a single physical circuit between the Customer and Google. CircuitInfo objects are created by Google, so all fields are output only. Next id: 4", + "description": "Describes a single physical circuit between the Customer and Google. CircuitInfo objects are created by Google, so all fields are output only.", "id": "InterconnectCircuitInfo", "properties": { "customerDemarcId": { @@ -34263,6 +37039,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -34294,6 +37071,7 @@ "", "", "", + "", "" ], "type": "string" @@ -34479,6 +37257,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -34510,6 +37289,7 @@ "", "", "", + "", "" ], "type": "string" @@ -34574,7 +37354,7 @@ "type": "object" }, "InterconnectOutageNotification": { - "description": "Description of a planned outage on this Interconnect. Next id: 9", + "description": "Description of a planned outage on this Interconnect.", "id": "InterconnectOutageNotification", "properties": { "affectedCircuits": { @@ -34798,6 +37578,26 @@ }, "type": "object" }, + "LicenseResourceCommitment": { + "description": "Commitment for a particular license resource.", + "id": "LicenseResourceCommitment", + "properties": { + "amount": { + "description": "The number of licenses purchased.", + "format": "int64", + "type": "string" + }, + "coresPerLicense": { + "description": "Specifies the core range of the instance for which this license applies.", + "type": "string" + }, + "license": { + "description": "Any applicable license URI.", + "type": "string" + } + }, + "type": "object" + }, "LicenseResourceRequirements": { "id": "LicenseResourceRequirements", "properties": { @@ -34858,6 +37658,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -34889,6 +37690,7 @@ "", "", "", + "", "" ], "type": "string" @@ -35048,7 +37850,7 @@ }, "deprecated": { "$ref": "DeprecationStatus", - "description": "[Output Only] The deprecation status associated with this machine type." + "description": "[Output Only] The deprecation status associated with this machine type. Only applicable if the machine type is unavailable." }, "description": { "description": "[Output Only] An optional textual description of the resource.", @@ -35151,6 +37953,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -35173,6 +37982,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -35204,6 +38014,7 @@ "", "", "", + "", "" ], "type": "string" @@ -35285,6 +38096,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -35316,6 +38128,7 @@ "", "", "", + "", "" ], "type": "string" @@ -35379,6 +38192,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -35410,6 +38224,7 @@ "", "", "", + "", "" ], "type": "string" @@ -35519,6 +38334,14 @@ "$ref": "ManagedInstanceLastAttempt", "description": "[Output Only] Information about the last attempt to create or delete the instance." }, + "preservedStateFromConfig": { + "$ref": "PreservedState", + "description": "[Output Only] Preserved state applied from per-instance config for this instance." + }, + "preservedStateFromPolicy": { + "$ref": "PreservedState", + "description": "[Output Only] Preserved state generated based on stateful policy for this instance." + }, "version": { "$ref": "ManagedInstanceVersion", "description": "[Output Only] Intended version of this instance." @@ -35717,7 +38540,7 @@ "type": "string" }, "autoCreateSubnetworks": { - "description": "When set to true, the VPC network is created in \"auto\" mode. When set to false, the VPC network is created in \"custom\" mode.\n\nAn auto mode VPC network starts with one subnet per region. Each subnet has a predetermined range as described in Auto mode VPC network IP ranges.", + "description": "Must be set to create a VPC network. If not set, a legacy network is created.\n\nWhen set to true, the VPC network is created in auto mode. When set to false, the VPC network is created in custom mode.\n\nAn auto mode VPC network starts with one subnet per region. Each subnet has a predetermined range as described in Auto mode VPC network IP ranges.\n\nFor custom mode VPC networks, you can add subnets using the subnetworks insert method.", "type": "boolean" }, "creationTimestamp": { @@ -35743,6 +38566,11 @@ "description": "[Output Only] Type of the resource. Always compute#network for networks.", "type": "string" }, + "mtu": { + "description": "Maximum Transmission Unit in bytes. The minimum value for this field is 1460 and the maximum value is 1500 bytes.", + "format": "int32", + "type": "integer" + }, "name": { "annotations": { "required": [ @@ -35779,9 +38607,16 @@ "type": "object" }, "NetworkEndpoint": { - "description": "The network endpoint. Next ID: 7", + "description": "The network endpoint.", "id": "NetworkEndpoint", "properties": { + "annotations": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata defined as annotations on the network endpoint.", + "type": "object" + }, "fqdn": { "description": "Optional fully qualified domain name of network endpoint. This can only be specified when NetworkEndpointGroup.network_endpoint_type is NON_GCP_FQDN_PORT.", "type": "string" @@ -35803,9 +38638,28 @@ "type": "object" }, "NetworkEndpointGroup": { - "description": "Represents a collection of network endpoints.\n\nA network endpoint group (NEG) defines how a set of endpoints should be reached, whether they are reachable, and where they are located. For more information about using NEGs, see Setting up internet NEGs or Setting up zonal NEGs. (== resource_for {$api_version}.networkEndpointGroups ==) (== resource_for {$api_version}.globalNetworkEndpointGroups ==)", + "description": "Represents a collection of network endpoints.\n\nA network endpoint group (NEG) defines how a set of endpoints should be reached, whether they are reachable, and where they are located. For more information about using NEGs, see Setting up internet NEGs, Setting up zonal NEGs, or Setting up serverless NEGs. (== resource_for {$api_version}.networkEndpointGroups ==) (== resource_for {$api_version}.globalNetworkEndpointGroups ==) (== resource_for {$api_version}.regionNetworkEndpointGroups ==)", "id": "NetworkEndpointGroup", "properties": { + "annotations": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata defined as annotations on the network endpoint group.", + "type": "object" + }, + "appEngine": { + "$ref": "NetworkEndpointGroupAppEngine", + "description": "Only valid when networkEndpointType is \"SERVERLESS\". Only one of cloudRun, appEngine or cloudFunction may be set." + }, + "cloudFunction": { + "$ref": "NetworkEndpointGroupCloudFunction", + "description": "Only valid when networkEndpointType is \"SERVERLESS\". Only one of cloudRun, appEngine or cloudFunction may be set." + }, + "cloudRun": { + "$ref": "NetworkEndpointGroupCloudRun", + "description": "Only valid when networkEndpointType is \"SERVERLESS\". Only one of cloudRun, appEngine or cloudFunction may be set." + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -35838,19 +38692,27 @@ "type": "string" }, "networkEndpointType": { - "description": "Type of network endpoints in this network endpoint group.", + "description": "Type of network endpoints in this network endpoint group. Can be one of GCE_VM_IP_PORT, NON_GCP_PRIVATE_IP_PORT, INTERNET_FQDN_PORT, INTERNET_IP_PORT, or SERVERLESS.", "enum": [ "GCE_VM_IP_PORT", "INTERNET_FQDN_PORT", - "INTERNET_IP_PORT" + "INTERNET_IP_PORT", + "NON_GCP_PRIVATE_IP_PORT", + "SERVERLESS" ], "enumDescriptions": [ + "", + "", "", "", "" ], "type": "string" }, + "region": { + "description": "[Output Only] The URL of the region where the network endpoint group is located.", + "type": "string" + }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" @@ -35899,6 +38761,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -35921,6 +38790,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -35952,6 +38822,7 @@ "", "", "", + "", "" ], "type": "string" @@ -35983,6 +38854,59 @@ }, "type": "object" }, + "NetworkEndpointGroupAppEngine": { + "description": "Configuration for an App Engine network endpoint group (NEG). The service is optional, may be provided explicitly or in the URL mask. The version is optional and can only be provided explicitly or in the URL mask when service is present.\n\nNote: App Engine service must be in the same project and located in the same region as the Serverless NEG.", + "id": "NetworkEndpointGroupAppEngine", + "properties": { + "service": { + "description": "Optional serving service.\n\nThe service name must be 1-63 characters long, and comply with RFC1035.\n\nExample value: \"default\", \"my-service\".", + "type": "string" + }, + "urlMask": { + "description": "A template to parse service and version fields from a request URL. URL mask allows for routing to multiple App Engine services without having to create multiple Network Endpoint Groups and backend services.\n\nFor example, the request URLs \"foo1-dot-appname.appspot.com/v1\" and \"foo1-dot-appname.appspot.com/v2\" can be backed by the same Serverless NEG with URL mask \"-dot-appname.appspot.com/\". The URL mask will parse them to { service = \"foo1\", version = \"v1\" } and { service = \"foo1\", version = \"v2\" } respectively.", + "type": "string" + }, + "version": { + "description": "Optional serving version.\n\nThe version must be 1-63 characters long, and comply with RFC1035.\n\nExample value: \"v1\", \"v2\".", + "type": "string" + } + }, + "type": "object" + }, + "NetworkEndpointGroupCloudFunction": { + "description": "Configuration for a Cloud Function network endpoint group (NEG). The function must be provided explicitly or in the URL mask.\n\nNote: Cloud Function must be in the same project and located in the same region as the Serverless NEG.", + "id": "NetworkEndpointGroupCloudFunction", + "properties": { + "function": { + "description": "A user-defined name of the Cloud Function.\n\nThe function name is case-sensitive and must be 1-63 characters long.\n\nExample value: \"func1\".", + "type": "string" + }, + "urlMask": { + "description": "A template to parse function field from a request URL. URL mask allows for routing to multiple Cloud Functions without having to create multiple Network Endpoint Groups and backend services.\n\nFor example, request URLs \"mydomain.com/function1\" and \"mydomain.com/function2\" can be backed by the same Serverless NEG with URL mask \"/\". The URL mask will parse them to { function = \"function1\" } and { function = \"function2\" } respectively.", + "type": "string" + } + }, + "type": "object" + }, + "NetworkEndpointGroupCloudRun": { + "description": "Configuration for a Cloud Run network endpoint group (NEG). The service must be provided explicitly or in the URL mask. The tag is optional, may be provided explicitly or in the URL mask.\n\nNote: Cloud Run service must be in the same project and located in the same region as the Serverless NEG.", + "id": "NetworkEndpointGroupCloudRun", + "properties": { + "service": { + "description": "Cloud Run service is the main resource of Cloud Run.\n\nThe service must be 1-63 characters long, and comply with RFC1035.\n\nExample value: \"run-service\".", + "type": "string" + }, + "tag": { + "description": "Optional Cloud Run tag represents the \"named-revision\" to provide additional fine-grained traffic routing information.\n\nThe tag must be 1-63 characters long, and comply with RFC1035.\n\nExample value: \"revision-0010\".", + "type": "string" + }, + "urlMask": { + "description": "A template to parse service and tag fields from a request URL. URL mask allows for routing to multiple Run services without having to create multiple network endpoint groups and backend services.\n\nFor example, request URLs \"foo1.domain.com/bar1\" and \"foo1.domain.com/bar2\" can be backed by the same Serverless Network Endpoint Group (NEG) with URL mask \".domain.com/\". The URL mask will parse them to { service=\"bar1\", tag=\"foo1\" } and { service=\"bar2\", tag=\"foo2\" } respectively.", + "type": "string" + } + }, + "type": "object" + }, "NetworkEndpointGroupList": { "id": "NetworkEndpointGroupList", "properties": { @@ -36032,6 +38956,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -36063,6 +38988,7 @@ "", "", "", + "", "" ], "type": "string" @@ -36183,6 +39109,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -36214,6 +39141,7 @@ "", "", "", + "", "" ], "type": "string" @@ -36277,6 +39205,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -36308,6 +39237,7 @@ "", "", "", + "", "" ], "type": "string" @@ -36379,6 +39309,10 @@ "format": "byte", "type": "string" }, + "ipv6Address": { + "description": "[Output Only] An IPv6 internal network address for this network interface.", + "type": "string" + }, "kind": { "default": "compute#networkInterface", "description": "[Output Only] Type of the resource. Always compute#networkInterface for network interfaces.", @@ -36453,6 +39387,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -36484,6 +39419,7 @@ "", "", "", + "", "" ], "type": "string" @@ -36551,6 +39487,11 @@ "description": "The URL of the peer network. It can be either full URL or partial URL. The peer network may belong to a different project. If the partial URL does not contain project, it is assumed that the peer network is in the same project as the current network.", "type": "string" }, + "peerMtu": { + "description": "Maximum Transmission Unit in bytes.", + "format": "int32", + "type": "integer" + }, "state": { "description": "[Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The peering is `ACTIVE` when there's a matching configuration in the peer network.", "enum": [ @@ -36666,7 +39607,7 @@ "type": "string" }, "maintenancePolicy": { - "description": "Specifies how to handle instances when a node in the group undergoes maintenance.", + "description": "Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT. For more information, see Maintenance policies.", "enum": [ "DEFAULT", "MAINTENANCE_POLICY_UNSPECIFIED", @@ -36686,7 +39627,7 @@ "type": "string" }, "nodeTemplate": { - "description": "The URL of the node template to which this node group belongs.", + "description": "URL of the node template to create the node group from.", "type": "string" }, "selfLink": { @@ -36748,6 +39689,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -36770,6 +39718,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -36801,6 +39750,7 @@ "", "", "", + "", "" ], "type": "string" @@ -36836,7 +39786,7 @@ "id": "NodeGroupAutoscalingPolicy", "properties": { "maxNodes": { - "description": "The maximum number of nodes that the group should have.", + "description": "The maximum number of nodes that the group should have. Must be set if autoscaling is enabled. Maximum value allowed is 100.", "format": "int32", "type": "integer" }, @@ -36846,7 +39796,7 @@ "type": "integer" }, "mode": { - "description": "The autoscaling mode.", + "description": "The autoscaling mode. Set to one of: ON, OFF, or ONLY_SCALE_OUT. For more information, see Autoscaler modes.", "enum": [ "MODE_UNSPECIFIED", "OFF", @@ -36914,6 +39864,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -36945,6 +39896,7 @@ "", "", "", + "", "" ], "type": "string" @@ -36979,6 +39931,20 @@ "NodeGroupNode": { "id": "NodeGroupNode", "properties": { + "cpuOvercommitType": { + "description": "CPU overcommit.", + "enum": [ + "CPU_OVERCOMMIT_TYPE_UNSPECIFIED", + "ENABLED", + "NONE" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, "instances": { "description": "Instances scheduled on this node.", "items": { @@ -37095,6 +40061,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -37126,6 +40093,7 @@ "", "", "", + "", "" ], "type": "string" @@ -37189,6 +40157,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -37220,6 +40189,7 @@ "", "", "", + "", "" ], "type": "string" @@ -37262,9 +40232,23 @@ "type": "object" }, "NodeTemplate": { - "description": "Represent a sole-tenant Node Template resource.\n\nYou can use a template to define properties for nodes in a node group. For more information, read Creating node groups and instances. (== resource_for {$api_version}.nodeTemplates ==) (== NextID: 19 ==)", + "description": "Represent a sole-tenant Node Template resource.\n\nYou can use a template to define properties for nodes in a node group. For more information, read Creating node groups and instances. (== resource_for {$api_version}.nodeTemplates ==)", "id": "NodeTemplate", "properties": { + "cpuOvercommitType": { + "description": "CPU overcommit.", + "enum": [ + "CPU_OVERCOMMIT_TYPE_UNSPECIFIED", + "ENABLED", + "NONE" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -37365,6 +40349,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -37387,6 +40378,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -37418,6 +40410,7 @@ "", "", "", + "", "" ], "type": "string" @@ -37499,6 +40492,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -37530,6 +40524,7 @@ "", "", "", + "", "" ], "type": "string" @@ -37608,6 +40603,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -37639,6 +40635,7 @@ "", "", "", + "", "" ], "type": "string" @@ -37759,6 +40756,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -37781,6 +40785,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -37812,6 +40817,7 @@ "", "", "", + "", "" ], "type": "string" @@ -37893,6 +40899,103 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "NodeTypesScopedList": { + "id": "NodeTypesScopedList", + "properties": { + "nodeTypes": { + "description": "[Output Only] A list of node types contained in this scope.", + "items": { + "$ref": "NodeType" + }, + "type": "array" + }, + "warning": { + "description": "[Output Only] An informational warning that appears when the node types list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -37924,6 +41027,7 @@ "", "", "", + "", "" ], "type": "string" @@ -37955,18 +41059,105 @@ }, "type": "object" }, - "NodeTypesScopedList": { - "id": "NodeTypesScopedList", + "NotificationEndpoint": { + "description": "Represents a notification endpoint.\n\nA notification endpoint resource defines an endpoint to receive notifications when there are status changes detected by the associated health check service.\n\nFor more information, see Health checks overview. (== resource_for {$api_version}.notificationEndpoint ==) (== resource_for {$api_version}.regionNotificationEndpoints ==)", + "id": "NotificationEndpoint", "properties": { - "nodeTypes": { - "description": "[Output Only] A list of node types contained in this scope.", + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "grpcSettings": { + "$ref": "NotificationEndpointGrpcSettings", + "description": "Settings of the gRPC notification endpoint including the endpoint URL and the retry duration." + }, + "id": { + "description": "[Output Only] A unique identifier for this resource type. The server generates this identifier.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#notificationEndpoint", + "description": "[Output Only] Type of the resource. Always compute#notificationEndpoint for notification endpoints.", + "type": "string" + }, + "name": { + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "region": { + "description": "[Output Only] URL of the region where the notification endpoint resides. This field applies only to the regional resource. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + } + }, + "type": "object" + }, + "NotificationEndpointGrpcSettings": { + "description": "Represents a gRPC setting that describes one gRPC notification endpoint and the retry duration attempting to send notification to this endpoint.", + "id": "NotificationEndpointGrpcSettings", + "properties": { + "authority": { + "description": "Optional. If specified, this field is used to set the authority header by the sender of notifications. See https://tools.ietf.org/html/rfc7540#section-8.1.2.3", + "type": "string" + }, + "endpoint": { + "description": "Endpoint to which gRPC notifications are sent. This must be a valid gRPCLB DNS name.", + "type": "string" + }, + "payloadName": { + "description": "Optional. If specified, this field is used to populate the \"name\" field in gRPC requests.", + "type": "string" + }, + "resendInterval": { + "$ref": "Duration", + "description": "Optional. This field is used to configure how often to send a full update of all non-healthy backends. If unspecified, full updates are not sent. If specified, must be in the range between 600 seconds to 3600 seconds. Nanos are disallowed." + }, + "retryDurationSec": { + "description": "How much time (in seconds) is spent attempting notification retries until a successful response is received. Default is 30s. Limit is 20m (1200s). Must be a positive number.", + "format": "uint32", + "type": "integer" + } + }, + "type": "object" + }, + "NotificationEndpointList": { + "id": "NotificationEndpointList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of NotificationEndpoint resources.", "items": { - "$ref": "NodeType" + "$ref": "NotificationEndpoint" }, "type": "array" }, + "kind": { + "default": "compute#notificationEndpointList", + "description": "[Output Only] Type of the resource. Always compute#notificationEndpoint for notification endpoints.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, "warning": { - "description": "[Output Only] An informational warning that appears when the node types list is empty.", + "description": "[Output Only] Informational warning message.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -37987,6 +41178,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -38018,6 +41210,7 @@ "", "", "", + "", "" ], "type": "string" @@ -38198,6 +41391,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -38229,6 +41423,7 @@ "", "", "", + "", "" ], "type": "string" @@ -38294,6 +41489,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -38316,6 +41518,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -38347,6 +41550,7 @@ "", "", "", + "", "" ], "type": "string" @@ -38428,6 +41632,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -38459,6 +41664,7 @@ "", "", "", + "", "" ], "type": "string" @@ -38522,6 +41728,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -38553,6 +41760,7 @@ "", "", "", + "", "" ], "type": "string" @@ -38754,6 +41962,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -38776,6 +41991,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -38807,6 +42023,7 @@ "", "", "", + "", "" ], "type": "string" @@ -38922,6 +42139,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -38953,6 +42171,7 @@ "", "", "", + "", "" ], "type": "string" @@ -39085,6 +42304,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -39116,6 +42336,7 @@ "", "", "", + "", "" ], "type": "string" @@ -39153,7 +42374,7 @@ "properties": { "defaultRouteAction": { "$ref": "HttpRouteAction", - "description": "defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices.\nOnly one of defaultRouteAction or defaultUrlRedirect must be set." + "description": "defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices.\nOnly one of defaultRouteAction or defaultUrlRedirect must be set.\nUrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a pathMatcher's defaultRouteAction." }, "defaultService": { "description": "The full or partial URL to the BackendService resource. This will be used if none of the pathRules or routeRules defined by this PathMatcher are matched. For example, the following are all valid URLs to a BackendService resource: \n- https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService \n- compute/v1/projects/project/global/backendServices/backendService \n- global/backendServices/backendService If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if defaultRouteAction specifies any weightedBackendServices, defaultService must not be specified.\nOnly one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set.\nAuthorization requires one or more of the following Google IAM permissions on the specified resource default_service: \n- compute.backendBuckets.use \n- compute.backendServices.use", @@ -39161,7 +42382,7 @@ }, "defaultUrlRedirect": { "$ref": "HttpRedirectAction", - "description": "When none of the specified pathRules or routeRules match, the request is redirected to a URL specified by defaultUrlRedirect.\nIf defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set." + "description": "When none of the specified pathRules or routeRules match, the request is redirected to a URL specified by defaultUrlRedirect.\nIf defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set.\nNot supported when the URL map is bound to target gRPC proxy." }, "description": { "description": "An optional description of this resource. Provide this property when you create the resource.", @@ -39169,7 +42390,7 @@ }, "headerAction": { "$ref": "HttpHeaderAction", - "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nHeaderAction specified here are applied after the matching HttpRouteRule HeaderAction and before the HeaderAction in the UrlMap" + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nHeaderAction specified here are applied after the matching HttpRouteRule HeaderAction and before the HeaderAction in the UrlMap \nNote that headerAction is not supported for Loadbalancers that have their loadBalancingScheme set to EXTERNAL.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, "name": { "description": "The name to which this PathMatcher is referred by the HostRule.", @@ -39205,7 +42426,7 @@ }, "routeAction": { "$ref": "HttpRouteAction", - "description": "In response to a matching path, the load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices.\nOnly one of routeAction or urlRedirect must be set." + "description": "In response to a matching path, the load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices.\nOnly one of routeAction or urlRedirect must be set.\nUrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a pathRule's routeAction." }, "service": { "description": "The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendService s. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of urlRedirect, service or routeAction.weightedBackendService must be set.", @@ -39213,7 +42434,7 @@ }, "urlRedirect": { "$ref": "HttpRedirectAction", - "description": "When a path pattern is matched, the request is redirected to a URL specified by urlRedirect.\nIf urlRedirect is specified, service or routeAction must not be set." + "description": "When a path pattern is matched, the request is redirected to a URL specified by urlRedirect.\nIf urlRedirect is specified, service or routeAction must not be set.\nNot supported when the URL map is bound to target gRPC proxy." } }, "type": "object" @@ -39229,6 +42450,30 @@ "name": { "description": "The name of a per-instance config and its corresponding instance. Serves as a merge key during UpdatePerInstanceConfigs operations, that is, if a per-instance config with the same name exists then it will be updated, otherwise a new one will be created for the VM instance with the same name. An attempt to create a per-instance config for a VM instance that either doesn't exist or is not part of the group will result in an error.", "type": "string" + }, + "preservedState": { + "$ref": "PreservedState", + "description": "The intended preserved state for the given instance. Does not contain preserved state generated from a stateful policy." + }, + "status": { + "description": "The status of applying this per-instance config on the corresponding managed instance.", + "enum": [ + "APPLYING", + "DELETING", + "EFFECTIVE", + "NONE", + "UNAPPLIED", + "UNAPPLIED_DELETION" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "" + ], + "type": "string" } }, "type": "object" @@ -39288,6 +42533,61 @@ }, "type": "object" }, + "PreservedState": { + "description": "Preserved state for a given instance.", + "id": "PreservedState", + "properties": { + "disks": { + "additionalProperties": { + "$ref": "PreservedStatePreservedDisk" + }, + "description": "Preserved disks defined for this instance. This map is keyed with the device names of the disks.", + "type": "object" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Preserved metadata defined for this instance.", + "type": "object" + } + }, + "type": "object" + }, + "PreservedStatePreservedDisk": { + "id": "PreservedStatePreservedDisk", + "properties": { + "autoDelete": { + "description": "These stateful disks will never be deleted during autohealing, update, instance recreate operations. This flag is used to configure if the disk should be deleted after it is no longer used by the group, e.g. when the given instance or the whole MIG is deleted. Note: disks attached in READ_ONLY mode cannot be auto-deleted.", + "enum": [ + "NEVER", + "ON_PERMANENT_INSTANCE_DELETION" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "mode": { + "description": "The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode.", + "enum": [ + "READ_ONLY", + "READ_WRITE" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "source": { + "description": "The URL of the disk resource that is stateful and should be attached to the VM instance.", + "type": "string" + } + }, + "type": "object" + }, "Project": { "description": "Represents a Project resource.\n\nA project is used to organize resources in a Google Cloud Platform environment. For more information, read about the Resource Hierarchy. (== resource_for {$api_version}.projects ==)", "id": "Project", @@ -39357,7 +42657,7 @@ "description": "The naming prefix for daily usage reports and the Google Cloud Storage bucket where they are stored." }, "xpnProjectStatus": { - "description": "[Output Only] The role this project has in a shared VPC configuration. Currently only HOST projects are differentiated.", + "description": "[Output Only] The role this project has in a shared VPC configuration. Currently, only projects with the host role, which is specified by the value HOST, are differentiated.", "enum": [ "HOST", "UNSPECIFIED_XPN_PROJECT_STATUS" @@ -39453,18 +42753,22 @@ "metric": { "description": "[Output Only] Name of the quota metric.", "enum": [ + "A2_CPUS", "AFFINITY_GROUPS", "AUTOSCALERS", "BACKEND_BUCKETS", "BACKEND_SERVICES", "C2_CPUS", "COMMITMENTS", + "COMMITTED_A2_CPUS", "COMMITTED_C2_CPUS", "COMMITTED_CPUS", "COMMITTED_LICENSES", "COMMITTED_LOCAL_SSD_TOTAL_GB", + "COMMITTED_MEMORY_OPTIMIZED_CPUS", "COMMITTED_N2D_CPUS", "COMMITTED_N2_CPUS", + "COMMITTED_NVIDIA_A100_GPUS", "COMMITTED_NVIDIA_K80_GPUS", "COMMITTED_NVIDIA_P100_GPUS", "COMMITTED_NVIDIA_P4_GPUS", @@ -39473,6 +42777,8 @@ "CPUS", "CPUS_ALL_REGIONS", "DISKS_TOTAL_GB", + "EXTERNAL_NETWORK_LB_FORWARDING_RULES", + "EXTERNAL_PROTOCOL_FORWARDING_RULES", "EXTERNAL_VPN_GATEWAYS", "FIREWALLS", "FORWARDING_RULES", @@ -39489,18 +42795,23 @@ "INTERCONNECT_ATTACHMENTS_TOTAL_MBPS", "INTERCONNECT_TOTAL_GBPS", "INTERNAL_ADDRESSES", + "INTERNAL_TRAFFIC_DIRECTOR_FORWARDING_RULES", "IN_PLACE_SNAPSHOTS", "IN_USE_ADDRESSES", "IN_USE_BACKUP_SCHEDULES", "IN_USE_SNAPSHOT_SCHEDULES", "LOCAL_SSD_TOTAL_GB", + "M1_CPUS", + "M2_CPUS", "MACHINE_IMAGES", "N2D_CPUS", "N2_CPUS", "NETWORKS", "NETWORK_ENDPOINT_GROUPS", + "NETWORK_FIREWALL_POLICIES", "NODE_GROUPS", "NODE_TEMPLATES", + "NVIDIA_A100_GPUS", "NVIDIA_K80_GPUS", "NVIDIA_P100_GPUS", "NVIDIA_P100_VWS_GPUS", @@ -39512,6 +42823,7 @@ "PACKET_MIRRORINGS", "PREEMPTIBLE_CPUS", "PREEMPTIBLE_LOCAL_SSD_GB", + "PREEMPTIBLE_NVIDIA_A100_GPUS", "PREEMPTIBLE_NVIDIA_K80_GPUS", "PREEMPTIBLE_NVIDIA_P100_GPUS", "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS", @@ -39642,6 +42954,18 @@ "", "", "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", "" ], "type": "string" @@ -39795,6 +43119,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -39826,6 +43151,7 @@ "", "", "", + "", "" ], "type": "string" @@ -39906,6 +43232,158 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "RegionDisksAddResourcePoliciesRequest": { + "id": "RegionDisksAddResourcePoliciesRequest", + "properties": { + "resourcePolicies": { + "description": "Resource policies to be added to this disk.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "RegionDisksRemoveResourcePoliciesRequest": { + "id": "RegionDisksRemoveResourcePoliciesRequest", + "properties": { + "resourcePolicies": { + "description": "Resource policies to be removed from this disk.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "RegionDisksResizeRequest": { + "id": "RegionDisksResizeRequest", + "properties": { + "sizeGb": { + "description": "The new size of the regional persistent disk, which is specified in GB.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "RegionInstanceGroupList": { + "description": "Contains a list of InstanceGroup resources.", + "id": "RegionInstanceGroupList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of InstanceGroup resources.", + "items": { + "$ref": "InstanceGroup" + }, + "type": "array" + }, + "kind": { + "default": "compute#regionInstanceGroupList", + "description": "The resource type.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -39937,6 +43415,7 @@ "", "", "", + "", "" ], "type": "string" @@ -39968,24 +43447,12 @@ }, "type": "object" }, - "RegionDisksAddResourcePoliciesRequest": { - "id": "RegionDisksAddResourcePoliciesRequest", + "RegionInstanceGroupManagerDeleteInstanceConfigReq": { + "description": "RegionInstanceGroupManagers.deletePerInstanceConfigs", + "id": "RegionInstanceGroupManagerDeleteInstanceConfigReq", "properties": { - "resourcePolicies": { - "description": "Resource policies to be added to this disk.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "RegionDisksRemoveResourcePoliciesRequest": { - "id": "RegionDisksRemoveResourcePoliciesRequest", - "properties": { - "resourcePolicies": { - "description": "Resource policies to be removed from this disk.", + "names": { + "description": "The list of instance names for which we want to delete per-instance configs on this managed instance group.", "items": { "type": "string" }, @@ -39994,35 +43461,24 @@ }, "type": "object" }, - "RegionDisksResizeRequest": { - "id": "RegionDisksResizeRequest", - "properties": { - "sizeGb": { - "description": "The new size of the regional persistent disk, which is specified in GB.", - "format": "int64", - "type": "string" - } - }, - "type": "object" - }, - "RegionInstanceGroupList": { - "description": "Contains a list of InstanceGroup resources.", - "id": "RegionInstanceGroupList", + "RegionInstanceGroupManagerList": { + "description": "Contains a list of managed instance groups.", + "id": "RegionInstanceGroupManagerList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of InstanceGroup resources.", + "description": "A list of InstanceGroupManager resources.", "items": { - "$ref": "InstanceGroup" + "$ref": "InstanceGroupManager" }, "type": "array" }, "kind": { - "default": "compute#regionInstanceGroupList", - "description": "The resource type.", + "default": "compute#regionInstanceGroupManagerList", + "description": "[Output Only] The resource type, which is always compute#instanceGroupManagerList for a list of managed instance groups that exist in th regional scope.", "type": "string" }, "nextPageToken": { @@ -40055,6 +43511,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -40086,6 +43543,7 @@ "", "", "", + "", "" ], "type": "string" @@ -40117,114 +43575,30 @@ }, "type": "object" }, - "RegionInstanceGroupManagerList": { - "description": "Contains a list of managed instance groups.", - "id": "RegionInstanceGroupManagerList", + "RegionInstanceGroupManagerPatchInstanceConfigReq": { + "description": "RegionInstanceGroupManagers.patchPerInstanceConfigs", + "id": "RegionInstanceGroupManagerPatchInstanceConfigReq", "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "description": "A list of InstanceGroupManager resources.", + "perInstanceConfigs": { + "description": "The list of per-instance configs to insert or patch on this managed instance group.", "items": { - "$ref": "InstanceGroupManager" + "$ref": "PerInstanceConfig" }, "type": "array" - }, - "kind": { - "default": "compute#regionInstanceGroupManagerList", - "description": "[Output Only] The resource type, which is always compute#instanceGroupManagerList for a list of managed instance groups that exist in th regional scope.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, - "warning": { - "description": "[Output Only] Informational warning message.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } + } + }, + "type": "object" + }, + "RegionInstanceGroupManagerUpdateInstanceConfigReq": { + "description": "RegionInstanceGroupManagers.updatePerInstanceConfigs", + "id": "RegionInstanceGroupManagerUpdateInstanceConfigReq", + "properties": { + "perInstanceConfigs": { + "description": "The list of per-instance configs to insert or patch on this managed instance group.", + "items": { + "$ref": "PerInstanceConfig" }, - "type": "object" + "type": "array" } }, "type": "object" @@ -40243,7 +43617,7 @@ "type": "object" }, "RegionInstanceGroupManagersApplyUpdatesRequest": { - "description": "InstanceGroupManagers.applyUpdatesToInstances", + "description": "RegionInstanceGroupManagers.applyUpdatesToInstances", "id": "RegionInstanceGroupManagersApplyUpdatesRequest", "properties": { "instances": { @@ -40332,6 +43706,106 @@ }, "type": "object" }, + "RegionInstanceGroupManagersListInstanceConfigsResp": { + "id": "RegionInstanceGroupManagersListInstanceConfigsResp", + "properties": { + "items": { + "description": "[Output Only] The list of PerInstanceConfig.", + "items": { + "$ref": "PerInstanceConfig" + }, + "type": "array" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "RegionInstanceGroupManagersListInstancesResponse": { "id": "RegionInstanceGroupManagersListInstancesResponse", "properties": { @@ -40439,6 +43913,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -40470,6 +43945,7 @@ "", "", "", + "", "" ], "type": "string" @@ -40592,6 +44068,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -40623,6 +44100,7 @@ "", "", "", + "", "" ], "type": "string" @@ -40864,6 +44342,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -40886,6 +44371,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -40917,6 +44403,7 @@ "", "", "", + "", "" ], "type": "string" @@ -40997,6 +44484,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -41028,6 +44516,7 @@ "", "", "", + "", "" ], "type": "string" @@ -41102,6 +44591,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -41133,6 +44623,7 @@ "", "", "", + "", "" ], "type": "string" @@ -41240,6 +44731,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -41271,6 +44763,7 @@ "", "", "", + "", "" ], "type": "string" @@ -41399,6 +44892,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -41421,6 +44921,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -41452,6 +44953,7 @@ "", "", "", + "", "" ], "type": "string" @@ -41604,6 +45106,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -41635,6 +45138,7 @@ "", "", "", + "", "" ], "type": "string" @@ -41931,6 +45435,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -41962,6 +45467,7 @@ "", "", "", + "", "" ], "type": "string" @@ -42045,6 +45551,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -42076,6 +45583,7 @@ "", "", "", + "", "" ], "type": "string" @@ -42108,7 +45616,7 @@ "type": "object" }, "Router": { - "description": "Represents a Cloud Router resource.\n\nFor more information about Cloud Router, read the the Cloud Router overview.", + "description": "Represents a Cloud Router resource.\n\nFor more information about Cloud Router, read the Cloud Router overview.", "id": "Router", "properties": { "bgp": { @@ -42229,6 +45737,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -42251,6 +45766,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -42282,6 +45798,7 @@ "", "", "", + "", "" ], "type": "string" @@ -42533,6 +46050,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -42564,6 +46082,7 @@ "", "", "", + "", "" ], "type": "string" @@ -42840,7 +46359,7 @@ "type": "object" }, "RouterStatusNatStatus": { - "description": "Status of a NAT contained in this router. Next tag: 9", + "description": "Status of a NAT contained in this router.", "id": "RouterStatusNatStatus", "properties": { "autoAllocatedNatIps": { @@ -42951,6 +46470,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -42982,6 +46502,7 @@ "", "", "", + "", "" ], "type": "string" @@ -43129,13 +46650,18 @@ "type": "object" }, "Scheduling": { - "description": "Sets the scheduling options for an Instance. NextID: 11", + "description": "Sets the scheduling options for an Instance. NextID: 13", "id": "Scheduling", "properties": { "automaticRestart": { "description": "Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted.\n\nBy default, this is set to true so an instance is automatically restarted if it is terminated by Compute Engine.", "type": "boolean" }, + "minNodeCpus": { + "description": "The minimum number of virtual CPUs this instance will consume when running on a sole-tenant node.", + "format": "int32", + "type": "integer" + }, "nodeAffinities": { "description": "A set of node affinity and anti-affinity configurations. Refer to Configuring node affinity for more information. Overrides reservationAffinity.", "items": { @@ -43156,7 +46682,7 @@ "type": "string" }, "preemptible": { - "description": "Defines whether the instance is preemptible. This can only be set during instance creation, it cannot be set or changed after the instance has been created.", + "description": "Defines whether the instance is preemptible. This can only be set during instance creation or while the instance is stopped and therefore, in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states.", "type": "boolean" } }, @@ -43194,6 +46720,22 @@ }, "type": "object" }, + "Screenshot": { + "description": "An instance's screenshot.", + "id": "Screenshot", + "properties": { + "contents": { + "description": "[Output Only] The Base64-encoded screenshot data.", + "type": "string" + }, + "kind": { + "default": "compute#screenshot", + "description": "[Output Only] Type of the resource. Always compute#screenshot for the screenshots.", + "type": "string" + } + }, + "type": "object" + }, "SecurityPoliciesListPreconfiguredExpressionSetsResponse": { "id": "SecurityPoliciesListPreconfiguredExpressionSetsResponse", "properties": { @@ -43303,6 +46845,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -43334,6 +46877,7 @@ "", "", "", + "", "" ], "type": "string" @@ -43436,7 +46980,7 @@ "id": "SecurityPolicyRuleMatcherConfig", "properties": { "srcIpRanges": { - "description": "CIDR IP address range.", + "description": "CIDR IP address range. Maximum number of src_ip_ranges allowed is 10.", "items": { "type": "string" }, @@ -43459,7 +47003,7 @@ "type": "string" }, "next": { - "description": "[Output Only] The position of the next byte of content from the serial console output. Use this value in the next request as the start parameter.", + "description": "[Output Only] The position of the next byte of content, regardless of whether the content exists, following the output returned in the `contents` property. Use this value in the next request as the start parameter.", "format": "int64", "type": "string" }, @@ -43468,7 +47012,7 @@ "type": "string" }, "start": { - "description": "The starting byte position of the output that was returned. This should match the start parameter sent with the request. If the serial console output exceeds the size of the buffer, older output will be overwritten by newer content and the start values will be mismatched.", + "description": "The starting byte position of the output that was returned. This should match the start parameter sent with the request. If the serial console output exceeds the size of the buffer (1 MB), older output is overwritten by newer content. The output start value will indicate the byte position of the output that was returned, which might be different than the `start` value that was specified in the request.", "format": "int64", "type": "string" } @@ -43670,7 +47214,7 @@ "description": "Encrypts the snapshot using a customer-supplied encryption key.\n\nAfter you encrypt a snapshot using a customer-supplied key, you must provide the same key if you use the snapshot later. For example, you must provide the encryption key when you create a disk from the encrypted snapshot in a future request.\n\nCustomer-supplied encryption keys do not protect access to metadata of the snapshot.\n\nIf you do not provide an encryption key when creating the snapshot, then the snapshot will be encrypted using an automatically generated key and you do not need to provide a key to use the snapshot later." }, "sourceDisk": { - "description": "[Output Only] The source disk used to create this snapshot.", + "description": "The source disk used to create this snapshot.", "type": "string" }, "sourceDiskEncryptionKey": { @@ -43776,6 +47320,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -43807,6 +47352,7 @@ "", "", "", + "", "" ], "type": "string" @@ -43853,11 +47399,11 @@ "type": "object" }, "SslCertificate": { - "description": "Represents an SSL Certificate resource.\n\nGoogle Compute Engine has two SSL Certificate resources:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/sslCertificates) * [Regional](/compute/docs/reference/rest/{$api_version}/regionSslCertificates)\n\n\n\nThe sslCertificates are used by: \n- external HTTPS load balancers \n- SSL proxy load balancers \n\nThe regionSslCertificates are used by internal HTTPS load balancers.\n\nOptionally, certificate file contents that you upload can contain a set of up to five PEM-encoded certificates. The API call creates an object (sslCertificate) that holds this data. You can use SSL keys and certificates to secure connections to a load balancer. For more information, read Creating and using SSL certificates and SSL certificates quotas and limits. (== resource_for {$api_version}.sslCertificates ==) (== resource_for {$api_version}.regionSslCertificates ==)", + "description": "Represents an SSL Certificate resource.\n\nGoogle Compute Engine has two SSL Certificate resources:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/sslCertificates) * [Regional](/compute/docs/reference/rest/{$api_version}/regionSslCertificates)\n\n\n\nThe sslCertificates are used by: \n- external HTTPS load balancers \n- SSL proxy load balancers \n\nThe regionSslCertificates are used by internal HTTPS load balancers.\n\nOptionally, certificate file contents that you upload can contain a set of up to five PEM-encoded certificates. The API call creates an object (sslCertificate) that holds this data. You can use SSL keys and certificates to secure connections to a load balancer. For more information, read Creating and using SSL certificates, SSL certificates quotas and limits, and Troubleshooting SSL certificates. (== resource_for {$api_version}.sslCertificates ==) (== resource_for {$api_version}.regionSslCertificates ==)", "id": "SslCertificate", "properties": { "certificate": { - "description": "A local certificate file. The certificate must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert.", + "description": "A value read into memory from a certificate file. The certificate file must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert.", "type": "string" }, "creationTimestamp": { @@ -43892,7 +47438,7 @@ "type": "string" }, "privateKey": { - "description": "A write-only private key in PEM format. Only insert requests will include this field.", + "description": "A value read into memory from a write-only private key file. The private key file must be in PEM format. For security, only insert requests include this field.", "type": "string" }, "region": { @@ -43959,6 +47505,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -43981,6 +47534,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -44012,6 +47566,7 @@ "", "", "", + "", "" ], "type": "string" @@ -44093,6 +47648,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -44124,6 +47680,7 @@ "", "", "", + "", "" ], "type": "string" @@ -44261,6 +47818,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -44292,6 +47850,7 @@ "", "", "", + "", "" ], "type": "string" @@ -44372,6 +47931,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -44403,6 +47963,7 @@ "", "", "", + "", "" ], "type": "string" @@ -44549,6 +48110,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -44580,6 +48142,7 @@ "", "", "", + "", "" ], "type": "string" @@ -44623,6 +48186,47 @@ }, "type": "object" }, + "StatefulPolicy": { + "id": "StatefulPolicy", + "properties": { + "preservedState": { + "$ref": "StatefulPolicyPreservedState" + } + }, + "type": "object" + }, + "StatefulPolicyPreservedState": { + "description": "Configuration of preserved resources.", + "id": "StatefulPolicyPreservedState", + "properties": { + "disks": { + "additionalProperties": { + "$ref": "StatefulPolicyPreservedStateDiskDevice" + }, + "description": "Disks created on the instances that will be preserved on instance delete, update, etc. This map is keyed with the device names of the disks.", + "type": "object" + } + }, + "type": "object" + }, + "StatefulPolicyPreservedStateDiskDevice": { + "id": "StatefulPolicyPreservedStateDiskDevice", + "properties": { + "autoDelete": { + "description": "These stateful disks will never be deleted during autohealing, update or VM instance recreate operations. This flag is used to configure if the disk should be deleted after it is no longer used by the group, e.g. when the given instance or the whole group is deleted. Note: disks attached in READ_ONLY mode cannot be auto-deleted.", + "enum": [ + "NEVER", + "ON_PERMANENT_INSTANCE_DELETION" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "Subnetwork": { "description": "Represents a Subnetwork resource.\n\nA subnetwork (also known as a subnet) is a logical partition of a Virtual Private Cloud network with one primary IP range and zero or more secondary IP ranges. For more information, read Virtual Private Cloud (VPC) Network. (== resource_for {$api_version}.subnetworks ==)", "id": "Subnetwork", @@ -44654,7 +48258,11 @@ "type": "string" }, "ipCidrRange": { - "description": "The range of internal addresses that are owned by this subnetwork. Provide this property when you create the subnetwork. For example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network. Only IPv4 is supported. This field can be set only at resource creation time.", + "description": "The range of internal addresses that are owned by this subnetwork. Provide this property when you create the subnetwork. For example, 10.0.0.0/8 or 100.64.0.0/10. Ranges must be unique and non-overlapping within a network. Only IPv4 is supported. This field is set at resource creation time. This may be a RFC 1918 IP range, or a privately routed, non-RFC 1918 IP range, not belonging to Google. The range can be expanded after creation using expandIpCidrRange.", + "type": "string" + }, + "ipv6CidrRange": { + "description": "[Output Only] The range of internal IPv6 addresses that are owned by this subnetwork.", "type": "string" }, "kind": { @@ -44664,7 +48272,7 @@ }, "logConfig": { "$ref": "SubnetworkLogConfig", - "description": "This field denotes the VPC flow logging options for this subnetwork. If logging is enabled, logs are exported to Stackdriver." + "description": "This field denotes the VPC flow logging options for this subnetwork. If logging is enabled, logs are exported to Cloud Logging." }, "name": { "description": "The name of the resource, provided by the client when initially creating the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", @@ -44679,6 +48287,20 @@ "description": "Whether the VMs in this subnet can access Google services without assigned external IP addresses. This field can be both set at resource creation time and updated using setPrivateIpGoogleAccess.", "type": "boolean" }, + "privateIpv6GoogleAccess": { + "description": "The private IPv6 google access type for the VMs in this subnet. This is an expanded field of enablePrivateV6Access. If both fields are set, privateIpv6GoogleAccess will take priority.\n\nThis field can be both set at resource creation time and updated using patch.", + "enum": [ + "DISABLE_GOOGLE_ACCESS", + "ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE", + "ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, "purpose": { "description": "The purpose of the resource. This field can be either PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created subnetwork that is reserved for Internal HTTP(S) Load Balancing. If unspecified, the purpose defaults to PRIVATE_RFC_1918. The enableFlowLogs field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", "enum": [ @@ -44763,6 +48385,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -44785,6 +48414,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -44816,6 +48446,7 @@ "", "", "", + "", "" ], "type": "string" @@ -44897,6 +48528,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -44928,6 +48560,7 @@ "", "", "", + "", "" ], "type": "string" @@ -45025,7 +48658,7 @@ "id": "SubnetworkSecondaryRange", "properties": { "ipCidrRange": { - "description": "The range of IP addresses belonging to this subnetwork secondary range. Provide this property when you create the subnetwork. Ranges must be unique and non-overlapping with all primary and secondary IP ranges within a network. Only IPv4 is supported.", + "description": "The range of IP addresses belonging to this subnetwork secondary range. Provide this property when you create the subnetwork. Ranges must be unique and non-overlapping with all primary and secondary IP ranges within a network. Only IPv4 is supported. This may be a RFC 1918 IP range, or a privately, non-RFC 1918 IP range, not belonging to Google.", "type": "string" }, "rangeName": { @@ -45077,6 +48710,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -45108,6 +48742,7 @@ "", "", "", + "", "" ], "type": "string" @@ -45216,6 +48851,175 @@ }, "type": "object" }, + "TargetGrpcProxy": { + "description": "Represents a Target gRPC Proxy resource.\n\nA target gRPC proxy is a component of load balancers intended for load balancing gRPC traffic. Global forwarding rules reference a target gRPC proxy. The Target gRPC Proxy references a URL map which specifies how traffic routes to gRPC backend services. (== resource_for {$api_version}.targetGrpcProxies ==)", + "id": "TargetGrpcProxy", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "fingerprint": { + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a TargetGrpcProxy. An up-to-date fingerprint must be provided in order to patch/update the TargetGrpcProxy; otherwise, the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve the TargetGrpcProxy.", + "format": "byte", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource type. The server generates this identifier.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#targetGrpcProxy", + "description": "[Output Only] Type of the resource. Always compute#targetGrpcProxy for target grpc proxies.", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "compute.targetGrpcProxies.insert" + ] + }, + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL with id for the resource.", + "type": "string" + }, + "urlMap": { + "description": "URL to the UrlMap resource that defines the mapping from URL to the BackendService. The protocol field in the BackendService must be set to GRPC.", + "type": "string" + }, + "validateForProxyless": { + "description": "If true, indicates that the BackendServices referenced by the urlMap may be accessed by gRPC applications without using a sidecar proxy. This will enable configuration checks on urlMap and its referenced BackendServices to not allow unsupported features. A gRPC application must use \"xds:///\" scheme in the target URI of the service it is connecting to. If false, indicates that the BackendServices referenced by the urlMap will be accessed by gRPC applications via a sidecar proxy. In this case, a gRPC application must not use \"xds:///\" scheme in the target URI of the service it is connecting to", + "type": "boolean" + } + }, + "type": "object" + }, + "TargetGrpcProxyList": { + "id": "TargetGrpcProxyList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of TargetGrpcProxy resources.", + "items": { + "$ref": "TargetGrpcProxy" + }, + "type": "array" + }, + "kind": { + "default": "compute#targetGrpcProxyList", + "description": "[Output Only] Type of the resource. Always compute#targetGrpcProxy for target grpc proxies.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "TargetHttpProxiesScopedList": { "id": "TargetHttpProxiesScopedList", "properties": { @@ -45248,6 +49052,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -45279,6 +49084,7 @@ "", "", "", + "", "" ], "type": "string" @@ -45322,6 +49128,11 @@ "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, + "fingerprint": { + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a TargetHttpProxy. An up-to-date fingerprint must be provided in order to patch/update the TargetHttpProxy; otherwise, the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve the TargetHttpProxy.", + "format": "byte", + "type": "string" + }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", @@ -45379,6 +49190,13 @@ "selfLink": { "description": "[Output Only] Server-defined URL for this resource.", "type": "string" + }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" @@ -45433,6 +49251,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -45464,6 +49283,7 @@ "", "", "", + "", "" ], "type": "string" @@ -45527,6 +49347,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -45558,6 +49379,7 @@ "", "", "", + "", "" ], "type": "string" @@ -45717,6 +49539,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -45739,6 +49568,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -45770,6 +49600,7 @@ "", "", "", + "", "" ], "type": "string" @@ -45851,6 +49682,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -45882,6 +49714,7 @@ "", "", "", + "", "" ], "type": "string" @@ -45993,6 +49826,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -46015,6 +49855,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -46046,6 +49887,7 @@ "", "", "", + "", "" ], "type": "string" @@ -46127,6 +49969,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -46158,6 +50001,7 @@ "", "", "", + "", "" ], "type": "string" @@ -46221,6 +50065,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -46252,6 +50097,7 @@ "", "", "", + "", "" ], "type": "string" @@ -46305,7 +50151,7 @@ "type": "number" }, "healthChecks": { - "description": "The URL of the HttpHealthCheck resource. A member instance in this pool is considered healthy if and only if the health checks pass. An empty list means all member instances will be considered healthy at all times. Only HttpHealthChecks are supported. Only one health check may be specified.", + "description": "The URL of the HttpHealthCheck resource. A member instance in this pool is considered healthy if and only if the health checks pass. An empty list means all member instances will be considered healthy at all times. Only legacy HttpHealthChecks are supported. Only one health check may be specified.", "items": { "type": "string" }, @@ -46394,6 +50240,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -46416,6 +50269,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -46447,6 +50301,7 @@ "", "", "", + "", "" ], "type": "string" @@ -46545,6 +50400,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -46576,6 +50432,7 @@ "", "", "", + "", "" ], "type": "string" @@ -46691,6 +50548,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -46722,6 +50580,7 @@ "", "", "", + "", "" ], "type": "string" @@ -46914,6 +50773,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -46945,6 +50805,7 @@ "", "", "", + "", "" ], "type": "string" @@ -47104,6 +50965,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -47135,6 +50997,7 @@ "", "", "", + "", "" ], "type": "string" @@ -47276,6 +51139,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -47298,6 +51168,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -47329,6 +51200,7 @@ "", "", "", + "", "" ], "type": "string" @@ -47410,6 +51282,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -47441,6 +51314,7 @@ "", "", "", + "", "" ], "type": "string" @@ -47504,6 +51378,249 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "TestFailure": { + "id": "TestFailure", + "properties": { + "actualService": { + "description": "BackendService or BackendBucket returned by load balancer.", + "type": "string" + }, + "expectedService": { + "description": "Expected BackendService or BackendBucket resource the given URL should be mapped to.", + "type": "string" + }, + "host": { + "description": "Host portion of the URL.", + "type": "string" + }, + "path": { + "description": "Path portion including query parameters in the URL.", + "type": "string" + } + }, + "type": "object" + }, + "TestPermissionsRequest": { + "id": "TestPermissionsRequest", + "properties": { + "permissions": { + "description": "The set of permissions to check for the 'resource'. Permissions with wildcards (such as '*' or 'storage.*') are not allowed.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "TestPermissionsResponse": { + "id": "TestPermissionsResponse", + "properties": { + "permissions": { + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "UrlMap": { + "description": "Represents a URL Map resource.\n\nGoogle Compute Engine has two URL Map resources:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/urlMaps) * [Regional](/compute/docs/reference/rest/{$api_version}/regionUrlMaps)\n\nA URL map resource is a component of certain types of GCP load balancers and Traffic Director.\n\n* urlMaps are used by external HTTP(S) load balancers and Traffic Director. * regionUrlMaps are used by internal HTTP(S) load balancers.\n\nFor a list of supported URL map features by load balancer type, see the Load balancing features: Routing and traffic management table.\n\nFor a list of supported URL map features for Traffic Director, see the Traffic Director features: Routing and traffic management table.\n\nThis resource defines mappings from host names and URL paths to either a backend service or a backend bucket.\n\nTo use the global urlMaps resource, the backend service must have a loadBalancingScheme of either EXTERNAL or INTERNAL_SELF_MANAGED. To use the regionUrlMaps resource, the backend service must have a loadBalancingScheme of INTERNAL_MANAGED. For more information, read URL Map Concepts.", + "id": "UrlMap", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "defaultRouteAction": { + "$ref": "HttpRouteAction", + "description": "defaultRouteAction takes effect when none of the hostRules match. The load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices.\nOnly one of defaultRouteAction or defaultUrlRedirect must be set.\nUrlMaps for external HTTP(S) load balancers support only the urlRewrite action within defaultRouteAction.\ndefaultRouteAction has no effect when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." + }, + "defaultService": { + "description": "The full or partial URL of the defaultService resource to which traffic is directed if none of the hostRules match. If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set.\ndefaultService has no effect when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true.", + "type": "string" + }, + "defaultUrlRedirect": { + "$ref": "HttpRedirectAction", + "description": "When none of the specified hostRules match, the request is redirected to a URL specified by defaultUrlRedirect.\nIf defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set.\nNot supported when the URL map is bound to target gRPC proxy." + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "fingerprint": { + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a UrlMap. An up-to-date fingerprint must be provided in order to update the UrlMap, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve a UrlMap.", + "format": "byte", + "type": "string" + }, + "headerAction": { + "$ref": "HttpHeaderAction", + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nThe headerAction specified here take effect after headerAction specified under pathMatcher.\nNote that headerAction is not supported for Loadbalancers that have their loadBalancingScheme set to EXTERNAL.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." + }, + "hostRules": { + "description": "The list of HostRules to use against the URL.", + "items": { + "$ref": "HostRule" + }, + "type": "array" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#urlMap", + "description": "[Output Only] Type of the resource. Always compute#urlMaps for url maps.", + "type": "string" + }, + "name": { + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "pathMatchers": { + "description": "The list of named PathMatchers to use against the URL.", + "items": { + "$ref": "PathMatcher" + }, + "type": "array" + }, + "region": { + "description": "[Output Only] URL of the region where the regional URL map resides. This field is not applicable to global URL maps. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "tests": { + "description": "The list of expected URL mapping tests. Request to update this UrlMap will succeed only if all of the test cases pass. You can specify a maximum of 100 tests per UrlMap.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true.", + "items": { + "$ref": "UrlMapTest" + }, + "type": "array" + } + }, + "type": "object" + }, + "UrlMapList": { + "description": "Contains a list of UrlMap resources.", + "id": "UrlMapList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of UrlMap resources.", + "items": { + "$ref": "UrlMap" + }, + "type": "array" + }, + "kind": { + "default": "compute#urlMapList", + "description": "Type of resource.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -47514,241 +51631,6 @@ ], "enumDescriptions": [ "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "TestFailure": { - "id": "TestFailure", - "properties": { - "actualService": { - "type": "string" - }, - "expectedService": { - "type": "string" - }, - "host": { - "type": "string" - }, - "path": { - "type": "string" - } - }, - "type": "object" - }, - "TestPermissionsRequest": { - "id": "TestPermissionsRequest", - "properties": { - "permissions": { - "description": "The set of permissions to check for the 'resource'. Permissions with wildcards (such as '*' or 'storage.*') are not allowed.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "TestPermissionsResponse": { - "id": "TestPermissionsResponse", - "properties": { - "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "UrlMap": { - "description": "Represents a URL Map resource.\n\nGoogle Compute Engine has two URL Map resources:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/urlMaps) * [Regional](/compute/docs/reference/rest/{$api_version}/regionUrlMaps)\n\nA URL map resource is a component of certain types of GCP load balancers and Traffic Director.\n\n* urlMaps are used by external HTTP(S) load balancers and Traffic Director. * regionUrlMaps are used by internal HTTP(S) load balancers.\n\nThis resource defines mappings from host names and URL paths to either a backend service or a backend bucket.\n\nTo use the global urlMaps resource, the backend service must have a loadBalancingScheme of either EXTERNAL or INTERNAL_SELF_MANAGED. To use the regionUrlMaps resource, the backend service must have a loadBalancingScheme of INTERNAL_MANAGED. For more information, read URL Map Concepts.", - "id": "UrlMap", - "properties": { - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "defaultRouteAction": { - "$ref": "HttpRouteAction", - "description": "defaultRouteAction takes effect when none of the hostRules match. The load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices.\nOnly one of defaultRouteAction or defaultUrlRedirect must be set." - }, - "defaultService": { - "description": "The full or partial URL of the defaultService resource to which traffic is directed if none of the hostRules match. If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set.", - "type": "string" - }, - "defaultUrlRedirect": { - "$ref": "HttpRedirectAction", - "description": "When none of the specified hostRules match, the request is redirected to a URL specified by defaultUrlRedirect.\nIf defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set." - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "fingerprint": { - "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a UrlMap. An up-to-date fingerprint must be provided in order to update the UrlMap, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve a UrlMap.", - "format": "byte", - "type": "string" - }, - "headerAction": { - "$ref": "HttpHeaderAction", - "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nThe headerAction specified here take effect after headerAction specified under pathMatcher." - }, - "hostRules": { - "description": "The list of HostRules to use against the URL.", - "items": { - "$ref": "HostRule" - }, - "type": "array" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "kind": { - "default": "compute#urlMap", - "description": "[Output Only] Type of the resource. Always compute#urlMaps for url maps.", - "type": "string" - }, - "name": { - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "pathMatchers": { - "description": "The list of named PathMatchers to use against the URL.", - "items": { - "$ref": "PathMatcher" - }, - "type": "array" - }, - "region": { - "description": "[Output Only] URL of the region where the regional URL map resides. This field is not applicable to global URL maps. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", - "type": "string" - }, - "tests": { - "description": "The list of expected URL mapping tests. Request to update this UrlMap will succeed only if all of the test cases pass. You can specify a maximum of 100 tests per UrlMap.", - "items": { - "$ref": "UrlMapTest" - }, - "type": "array" - } - }, - "type": "object" - }, - "UrlMapList": { - "description": "Contains a list of UrlMap resources.", - "id": "UrlMapList", - "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "description": "A list of UrlMap resources.", - "items": { - "$ref": "UrlMap" - }, - "type": "array" - }, - "kind": { - "default": "compute#urlMapList", - "description": "Type of resource.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, - "warning": { - "description": "[Output Only] Informational warning message.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ "", "", "", @@ -47820,7 +51702,7 @@ "type": "string" }, "host": { - "description": "Host portion of the URL.", + "description": "Host portion of the URL. If headers contains a host header, then host must also match the header value.", "type": "string" }, "path": { @@ -47828,7 +51710,7 @@ "type": "string" }, "service": { - "description": "Expected BackendService resource the given URL should be mapped to.", + "description": "Expected BackendService or BackendBucket resource the given URL should be mapped to.\nservice cannot be set if expectedRedirectResponseCode is set.", "type": "string" } }, @@ -47889,6 +51771,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -47911,6 +51800,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -47942,6 +51832,7 @@ "", "", "", + "", "" ], "type": "string" @@ -48005,6 +51896,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -48036,6 +51928,7 @@ "", "", "", + "", "" ], "type": "string" @@ -48191,6 +52084,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -48222,6 +52116,7 @@ "", "", "", + "", "" ], "type": "string" @@ -48374,6 +52269,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -48405,6 +52301,7 @@ "", "", "", + "", "" ], "type": "string" @@ -48535,6 +52432,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -48557,6 +52461,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -48588,6 +52493,7 @@ "", "", "", + "", "" ], "type": "string" @@ -48669,6 +52575,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -48700,6 +52607,7 @@ "", "", "", + "", "" ], "type": "string" @@ -48877,6 +52785,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -48908,6 +52817,7 @@ "", "", "", + "", "" ], "type": "string" @@ -48940,7 +52850,7 @@ "type": "object" }, "VpnTunnel": { - "description": "Represents a Cloud VPN Tunnel resource.\n\nFor more information about VPN, read the the Cloud VPN Overview. (== resource_for {$api_version}.vpnTunnels ==)", + "description": "Represents a Cloud VPN Tunnel resource.\n\nFor more information about VPN, read the the Cloud VPN Overview. (== resource_for {$api_version}.vpnTunnels ==)", "id": "VpnTunnel", "properties": { "creationTimestamp": { @@ -49109,6 +53019,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -49131,6 +53048,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -49162,6 +53080,7 @@ "", "", "", + "", "" ], "type": "string" @@ -49243,6 +53162,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -49274,6 +53194,7 @@ "", "", "", + "", "" ], "type": "string" @@ -49337,6 +53258,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -49368,6 +53290,7 @@ "", "", "", + "", "" ], "type": "string" @@ -49443,7 +53366,7 @@ }, "headerAction": { "$ref": "HttpHeaderAction", - "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nheaderAction specified here take effect before headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap." + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nheaderAction specified here take effect before headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap.\nNote that headerAction is not supported for Loadbalancers that have their loadBalancingScheme set to EXTERNAL.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, "weight": { "description": "Specifies the fraction of traffic sent to backendService, computed as weight / (sum of all weightedBackendService weights in routeAction) .\nThe selection of a backend service is determined only for new traffic. Once a user's request has been directed to a backendService, subsequent requests will be sent to the same backendService as determined by the BackendService's session affinity policy.\nThe value must be between 0 and 1000", @@ -49502,6 +53425,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -49533,6 +53457,7 @@ "", "", "", + "", "" ], "type": "string" @@ -49697,6 +53622,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -49728,6 +53654,7 @@ "", "", "", + "", "" ], "type": "string" diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index e40431f5953..2f65969c2d4 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -178,9 +178,12 @@ func New(client *http.Client) (*Service, error) { s.RegionCommitments = NewRegionCommitmentsService(s) s.RegionDiskTypes = NewRegionDiskTypesService(s) s.RegionDisks = NewRegionDisksService(s) + s.RegionHealthCheckServices = NewRegionHealthCheckServicesService(s) s.RegionHealthChecks = NewRegionHealthChecksService(s) s.RegionInstanceGroupManagers = NewRegionInstanceGroupManagersService(s) s.RegionInstanceGroups = NewRegionInstanceGroupsService(s) + s.RegionNetworkEndpointGroups = NewRegionNetworkEndpointGroupsService(s) + s.RegionNotificationEndpoints = NewRegionNotificationEndpointsService(s) s.RegionOperations = NewRegionOperationsService(s) s.RegionSslCertificates = NewRegionSslCertificatesService(s) s.RegionTargetHttpProxies = NewRegionTargetHttpProxiesService(s) @@ -196,6 +199,7 @@ func New(client *http.Client) (*Service, error) { s.SslCertificates = NewSslCertificatesService(s) s.SslPolicies = NewSslPoliciesService(s) s.Subnetworks = NewSubnetworksService(s) + s.TargetGrpcProxies = NewTargetGrpcProxiesService(s) s.TargetHttpProxies = NewTargetHttpProxiesService(s) s.TargetHttpsProxies = NewTargetHttpsProxiesService(s) s.TargetInstances = NewTargetInstancesService(s) @@ -296,12 +300,18 @@ type Service struct { RegionDisks *RegionDisksService + RegionHealthCheckServices *RegionHealthCheckServicesService + RegionHealthChecks *RegionHealthChecksService RegionInstanceGroupManagers *RegionInstanceGroupManagersService RegionInstanceGroups *RegionInstanceGroupsService + RegionNetworkEndpointGroups *RegionNetworkEndpointGroupsService + + RegionNotificationEndpoints *RegionNotificationEndpointsService + RegionOperations *RegionOperationsService RegionSslCertificates *RegionSslCertificatesService @@ -332,6 +342,8 @@ type Service struct { Subnetworks *SubnetworksService + TargetGrpcProxies *TargetGrpcProxiesService + TargetHttpProxies *TargetHttpProxiesService TargetHttpsProxies *TargetHttpsProxiesService @@ -724,6 +736,15 @@ type RegionDisksService struct { s *Service } +func NewRegionHealthCheckServicesService(s *Service) *RegionHealthCheckServicesService { + rs := &RegionHealthCheckServicesService{s: s} + return rs +} + +type RegionHealthCheckServicesService struct { + s *Service +} + func NewRegionHealthChecksService(s *Service) *RegionHealthChecksService { rs := &RegionHealthChecksService{s: s} return rs @@ -751,6 +772,24 @@ type RegionInstanceGroupsService struct { s *Service } +func NewRegionNetworkEndpointGroupsService(s *Service) *RegionNetworkEndpointGroupsService { + rs := &RegionNetworkEndpointGroupsService{s: s} + return rs +} + +type RegionNetworkEndpointGroupsService struct { + s *Service +} + +func NewRegionNotificationEndpointsService(s *Service) *RegionNotificationEndpointsService { + rs := &RegionNotificationEndpointsService{s: s} + return rs +} + +type RegionNotificationEndpointsService struct { + s *Service +} + func NewRegionOperationsService(s *Service) *RegionOperationsService { rs := &RegionOperationsService{s: s} return rs @@ -886,6 +925,15 @@ type SubnetworksService struct { s *Service } +func NewTargetGrpcProxiesService(s *Service) *TargetGrpcProxiesService { + rs := &TargetGrpcProxiesService{s: s} + return rs +} + +type TargetGrpcProxiesService struct { + s *Service +} + func NewTargetHttpProxiesService(s *Service) *TargetHttpProxiesService { rs := &TargetHttpProxiesService{s: s} return rs @@ -1061,14 +1109,14 @@ type AcceleratorType struct { // compute#acceleratorType for accelerator types. Kind string `json:"kind,omitempty"` - // MaximumCardsPerInstance: [Output Only] Maximum accelerator cards - // allowed per instance. + // MaximumCardsPerInstance: [Output Only] Maximum number of accelerator + // cards allowed per instance. MaximumCardsPerInstance int64 `json:"maximumCardsPerInstance,omitempty"` // Name: [Output Only] Name of the resource. Name string `json:"name,omitempty"` - // SelfLink: [Output Only] Server-defined fully-qualified URL for this + // SelfLink: [Output Only] Server-defined, fully qualified URL for this // resource. SelfLink string `json:"selfLink,omitempty"` @@ -1130,6 +1178,9 @@ type AcceleratorTypeAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *AcceleratorTypeAggregatedListWarning `json:"warning,omitempty"` @@ -1184,6 +1235,7 @@ type AcceleratorTypeAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -1340,6 +1392,7 @@ type AcceleratorTypeListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -1475,6 +1528,7 @@ type AcceleratorTypesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -1738,6 +1792,7 @@ type Address struct { // "DNS_RESOLVER" // "GCE_ENDPOINT" // "NAT_AUTO" + // "SHARED_LOADBALANCER_VIP" // "VPC_PEERING" Purpose string `json:"purpose,omitempty"` @@ -1821,6 +1876,9 @@ type AddressAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *AddressAggregatedListWarning `json:"warning,omitempty"` @@ -1875,6 +1933,7 @@ type AddressAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -2030,6 +2089,7 @@ type AddressListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -2163,6 +2223,7 @@ type AddressesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -2318,7 +2379,7 @@ func (s *AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk) } // AllocationSpecificSKUAllocationReservedInstanceProperties: Properties -// of the SKU instances being reserved. +// of the SKU instances being reserved. Next ID: 9 type AllocationSpecificSKUAllocationReservedInstanceProperties struct { // GuestAccelerators: Specifies accelerator type and count. GuestAccelerators []*AcceleratorConfig `json:"guestAccelerators,omitempty"` @@ -2698,12 +2759,12 @@ func (s *AttachedDiskInitializeParams) MarshalJSON() ([]byte, error) { // // Example Policy with multiple AuditConfigs: // -// { "audit_configs": [ { "service": "allServices" "audit_log_configs": +// { "audit_configs": [ { "service": "allServices", "audit_log_configs": // [ { "log_type": "DATA_READ", "exempted_members": [ -// "user:jose@example.com" ] }, { "log_type": "DATA_WRITE", }, { -// "log_type": "ADMIN_READ", } ] }, { "service": -// "sampleservice.googleapis.com" "audit_log_configs": [ { "log_type": -// "DATA_READ", }, { "log_type": "DATA_WRITE", "exempted_members": [ +// "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { +// "log_type": "ADMIN_READ" } ] }, { "service": +// "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": +// "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ // "user:aliya@example.com" ] } ] } ] } // // For sampleservice, this policy enables DATA_READ, DATA_WRITE and @@ -2750,7 +2811,7 @@ func (s *AuditConfig) MarshalJSON() ([]byte, error) { // // { "audit_log_configs": [ { "log_type": "DATA_READ", // "exempted_members": [ "user:jose@example.com" ] }, { "log_type": -// "DATA_WRITE", } ] } +// "DATA_WRITE" } ] } // // This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting // jose@example.com from DATA_READ logging. @@ -2981,6 +3042,9 @@ type AutoscalerAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *AutoscalerAggregatedListWarning `json:"warning,omitempty"` @@ -3035,6 +3099,7 @@ type AutoscalerAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -3190,6 +3255,7 @@ type AutoscalerListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -3327,6 +3393,7 @@ type AutoscalerStatusDetails struct { // "MISSING_CUSTOM_METRIC_DATA_POINTS" // "MISSING_LOAD_BALANCING_DATA_POINTS" // "MODE_OFF" + // "MODE_ONLY_SCALE_OUT" // "MODE_ONLY_UP" // "MORE_THAN_ONE_BACKEND_SERVICE" // "NOT_ENOUGH_QUOTA_AVAILABLE" @@ -3416,6 +3483,7 @@ type AutoscalersScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -3539,9 +3607,12 @@ type AutoscalingPolicy struct { // Possible values: // "OFF" // "ON" + // "ONLY_SCALE_OUT" // "ONLY_UP" Mode string `json:"mode,omitempty"` + ScaleInControl *AutoscalingPolicyScaleInControl `json:"scaleInControl,omitempty"` + // ForceSendFields is a list of field names (e.g. "CoolDownPeriodSec") // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -3636,8 +3707,7 @@ type AutoscalingPolicyCustomMetricUtilization struct { // decrease proportionally to the metric. // // For example, a good metric to use as a utilization_target is - // compute.googleapis.com/instance/network/received_bytes_count. The - // autoscaler will work to keep this value constant for each of the + // https://www.googleapis.com/compute/v1/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the // instances. UtilizationTarget float64 `json:"utilizationTarget,omitempty"` @@ -3735,6 +3805,46 @@ func (s *AutoscalingPolicyLoadBalancingUtilization) UnmarshalJSON(data []byte) e return nil } +// AutoscalingPolicyScaleInControl: Configuration that allows for slower +// scale in so that even if Autoscaler recommends an abrupt scale in of +// a MIG, it will be throttled as specified by the parameters below. +type AutoscalingPolicyScaleInControl struct { + // MaxScaledInReplicas: Maximum allowed number (or %) of VMs that can be + // deducted from the peak recommendation during the window autoscaler + // looks at when computing recommendations. Possibly all these VMs can + // be deleted at once so user service needs to be prepared to lose that + // many VMs in one step. + MaxScaledInReplicas *FixedOrPercent `json:"maxScaledInReplicas,omitempty"` + + // TimeWindowSec: How long back autoscaling should look when computing + // recommendations to include directives regarding slower scale in, as + // described above. + TimeWindowSec int64 `json:"timeWindowSec,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaxScaledInReplicas") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MaxScaledInReplicas") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AutoscalingPolicyScaleInControl) MarshalJSON() ([]byte, error) { + type NoMethod AutoscalingPolicyScaleInControl + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Backend: Message containing information of one individual backend. type Backend struct { // BalancingMode: Specifies the balancing mode for the backend. @@ -3783,8 +3893,10 @@ type Backend struct { // capacity (based on UTILIZATION, RATE or CONNECTION). Default value is // 1, which means the group will serve up to 100% of its configured // capacity (depending on balancingMode). A setting of 0 means the group - // is completely drained, offering 0% of its available Capacity. Valid - // range is [0.0,1.0]. + // is completely drained, offering 0% of its available capacity. Valid + // range is 0.0 and [0.1,1.0]. You cannot configure a setting larger + // than 0 and smaller than 0.1. You cannot configure a setting of 0 when + // there is only one backend attached to the backend service. // // This cannot be used for internal load balancing. CapacityScaler float64 `json:"capacityScaler,omitempty"` @@ -4146,6 +4258,7 @@ type BackendBucketListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -4226,8 +4339,13 @@ func (s *BackendBucketListWarningData) MarshalJSON() ([]byte, error) { // BackendService: Represents a Backend Service resource. // -// A backend service contains configuration values for Google Cloud -// Platform load balancing services. +// A backend service defines how Google Cloud load balancers distribute +// traffic. The backend service configuration contains a set of values, +// such as the protocol used to connect to backends, various +// distribution and session settings, health checks, and timeouts. These +// settings provide fine-grained control over how your load balancer +// behaves. Most of the settings have default values that allow for easy +// configuration if you need to get started quickly. // // Backend services in Google Compute Engine can be either regionally or // globally scoped. @@ -4238,13 +4356,22 @@ func (s *BackendBucketListWarningData) MarshalJSON() ([]byte, error) { // [Regional](/compute/docs/reference/rest/{$api_version}/regionBackendSe // rvices) // -// For more information, read Backend Services. +// For more information, see Backend Services. // // (== resource_for {$api_version}.backendService ==) type BackendService struct { - // AffinityCookieTtlSec: If set to 0, the cookie is non-persistent and - // lasts only until the end of the browser session (or equivalent). The - // maximum allowed value is one day (86,400). + // AffinityCookieTtlSec: Lifetime of cookies in seconds. Only applicable + // if the loadBalancingScheme is EXTERNAL, INTERNAL_SELF_MANAGED, or + // INTERNAL_MANAGED, the protocol is HTTP or HTTPS, and the + // sessionAffinity is GENERATED_COOKIE, or HTTP_COOKIE. + // + // If set to 0, the cookie is non-persistent and lasts only until the + // end of the browser session (or equivalent). The maximum allowed value + // is one day (86,400). + // + // Not supported when the backend service is referenced by a URL map + // that is bound to target gRPC proxy that has validateForProxyless + // field set to true. AffinityCookieTtlSec int64 `json:"affinityCookieTtlSec,omitempty"` // Backends: The list of backends that serve this BackendService. @@ -4263,6 +4390,10 @@ type BackendService struct { // // - A global backend service with the load_balancing_scheme set to // INTERNAL_SELF_MANAGED. + // + // Not supported when the backend service is referenced by a URL map + // that is bound to target gRPC proxy that has validateForProxyless + // field set to true. CircuitBreakers *CircuitBreakers `json:"circuitBreakers,omitempty"` ConnectionDraining *ConnectionDraining `json:"connectionDraining,omitempty"` @@ -4282,6 +4413,10 @@ type BackendService struct { // // - A global backend service with the load_balancing_scheme set to // INTERNAL_SELF_MANAGED. + // + // Not supported when the backend service is referenced by a URL map + // that is bound to target gRPC proxy that has validateForProxyless + // field set to true. ConsistentHash *ConsistentHashLoadBalancerSettings `json:"consistentHash,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text @@ -4319,12 +4454,14 @@ type BackendService struct { // HealthChecks: The list of URLs to the healthChecks, httpHealthChecks // (legacy), or httpsHealthChecks (legacy) resource for health checking // this backend service. Not all backend services support legacy health - // checks. See Load balancer guide. Currently at most one health check - // can be specified. Backend services with instance group or zonal NEG - // backends must have a health check. Backend services with internet NEG - // backends must not have a health check. A health check must + // checks. See Load balancer guide. Currently, at most one health check + // can be specified for each backend service. Backend services with + // instance group or zonal NEG backends must have a health check. + // Backend services with internet or serverless NEG backends must not + // have a health check. HealthChecks []string `json:"healthChecks,omitempty"` + // Iap: The configurations for Identity-Aware Proxy on this resource. Iap *BackendServiceIAP `json:"iap,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -4369,8 +4506,7 @@ type BackendService struct { // - MAGLEV: used as a drop in replacement for the ring hash load // balancer. Maglev is not as stable as ring hash but has faster table // lookup build times and host selection times. For more information - // about Maglev, refer to https://ai.google/research/pubs/pub44824 - // + // about Maglev, see https://ai.google/research/pubs/pub44824 // // This field is applicable to either: // - A regional backend service with the service_protocol set to HTTP, @@ -4379,9 +4515,13 @@ type BackendService struct { // - A global backend service with the load_balancing_scheme set to // INTERNAL_SELF_MANAGED. // - // If sessionAffinity is not NONE, and this field is not set to >MAGLEV + // If sessionAffinity is not NONE, and this field is not set to MAGLEV // or RING_HASH, session affinity settings will not take effect. // + // Only the default ROUND_ROBIN policy is supported when the backend + // service is referenced by a URL map that is bound to target gRPC proxy + // that has validateForProxyless field set to true. + // // Possible values: // "INVALID_LB_POLICY" // "LEAST_REQUEST" @@ -4421,6 +4561,10 @@ type BackendService struct { // // - A global backend service with the load_balancing_scheme set to // INTERNAL_SELF_MANAGED. + // + // Not supported when the backend service is referenced by a URL map + // that is bound to target gRPC proxy that has validateForProxyless + // field set to true. OutlierDetection *OutlierDetection `json:"outlierDetection,omitempty"` // Port: Deprecated in favor of portName. The TCP port to connect on the @@ -4446,12 +4590,16 @@ type BackendService struct { // Protocol: The protocol this BackendService uses to communicate with // backends. // - // Possible values are HTTP, HTTPS, HTTP2, TCP, SSL, or UDP. depending - // on the chosen load balancer or Traffic Director configuration. Refer - // to the documentation for the load balancer or for Traffic Director - // for more information. + // Possible values are HTTP, HTTPS, HTTP2, TCP, SSL, UDP or GRPC. + // depending on the chosen load balancer or Traffic Director + // configuration. Refer to the documentation for the load balancer or + // for Traffic Director for more information. + // + // Must be set to GRPC when the backend service is referenced by a URL + // map that is bound to target gRPC proxy. // // Possible values: + // "GRPC" // "HTTP" // "HTTP2" // "HTTPS" @@ -4488,6 +4636,10 @@ type BackendService struct { // INTERNAL_MANAGED, possible values are NONE, CLIENT_IP, // GENERATED_COOKIE, HEADER_FIELD, or HTTP_COOKIE. // + // Not supported when the backend service is referenced by a URL map + // that is bound to target gRPC proxy that has validateForProxyless + // field set to true. + // // Possible values: // "CLIENT_IP" // "CLIENT_IP_PORT_PROTO" @@ -4499,7 +4651,7 @@ type BackendService struct { SessionAffinity string `json:"sessionAffinity,omitempty"` // TimeoutSec: The backend service timeout has a different meaning - // depending on the type of load balancer. For more information read, + // depending on the type of load balancer. For more information see, // Backend service settings The default is 30 seconds. TimeoutSec int64 `json:"timeoutSec,omitempty"` @@ -4556,6 +4708,9 @@ type BackendServiceAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *BackendServiceAggregatedListWarning `json:"warning,omitempty"` @@ -4610,6 +4765,7 @@ type BackendServiceAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -4806,6 +4962,10 @@ func (s *BackendServiceFailoverPolicy) UnmarshalJSON(data []byte) error { } type BackendServiceGroupHealth struct { + // Annotations: Metadata defined as annotations on the network endpoint + // group. + Annotations map[string]string `json:"annotations,omitempty"` + // HealthStatus: Health state of the backend instances or endpoints in // requested instance or network endpoint group, determined based on // configured health checks. @@ -4819,7 +4979,7 @@ type BackendServiceGroupHealth struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "HealthStatus") to + // ForceSendFields is a list of field names (e.g. "Annotations") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -4827,7 +4987,7 @@ type BackendServiceGroupHealth struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "HealthStatus") to include + // NullFields is a list of field names (e.g. "Annotations") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -4844,10 +5004,18 @@ func (s *BackendServiceGroupHealth) MarshalJSON() ([]byte, error) { // BackendServiceIAP: Identity-Aware Proxy type BackendServiceIAP struct { + // Enabled: Whether the serving infrastructure will authenticate and + // authorize all incoming requests. If true, the oauth2ClientId and + // oauth2ClientSecret fields must be non-empty. Enabled bool `json:"enabled,omitempty"` + // Oauth2ClientId: OAuth2 client ID to use for the authentication flow. Oauth2ClientId string `json:"oauth2ClientId,omitempty"` + // Oauth2ClientSecret: OAuth2 client secret to use for the + // authentication flow. For security reasons, this value cannot be + // retrieved via the API. Instead, the SHA-256 hash of the value is + // returned in the oauth2ClientSecretSha256 field. Oauth2ClientSecret string `json:"oauth2ClientSecret,omitempty"` // Oauth2ClientSecretSha256: [Output Only] SHA256 hash value for the @@ -4955,6 +5123,7 @@ type BackendServiceListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -5167,6 +5336,7 @@ type BackendServicesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -5247,6 +5417,10 @@ func (s *BackendServicesScopedListWarningData) MarshalJSON() ([]byte, error) { // Binding: Associates `members` with a `role`. type Binding struct { + // BindingId: A client-specified ID for this binding. Expected to be + // globally unique to support the internal bindings-by-ID API. + BindingId string `json:"bindingId,omitempty"` + // Condition: The condition that is associated with this binding. // // If the condition evaluates to `true`, then this binding applies to @@ -5315,7 +5489,7 @@ type Binding struct { // `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `json:"role,omitempty"` - // ForceSendFields is a list of field names (e.g. "Condition") to + // ForceSendFields is a list of field names (e.g. "BindingId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -5323,7 +5497,7 @@ type Binding struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Condition") to include in + // NullFields is a list of field names (e.g. "BindingId") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -5478,6 +5652,18 @@ func (s *CircuitBreakers) MarshalJSON() ([]byte, error) { // discounted rates. For full details, read Signing Up for Committed Use // Discounts. (== resource_for {$api_version}.regionCommitments ==) type Commitment struct { + // Category: The category of the commitment. Category MACHINE specifies + // commitments composed of machine resources such as VCPU or MEMORY, + // listed in resources. Category LICENSE specifies commitments composed + // of software licenses, listed in licenseResources. Note that only + // MACHINE commitments should have a Type specified. + // + // Possible values: + // "CATEGORY_UNSPECIFIED" + // "LICENSE" + // "MACHINE" + Category string `json:"category,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -5498,6 +5684,10 @@ type Commitment struct { // for commitments. Kind string `json:"kind,omitempty"` + // LicenseResource: The license specification required as part of a + // license commitment. + LicenseResource *LicenseResourceCommitment `json:"licenseResource,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -5554,21 +5744,20 @@ type Commitment struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "Category") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Category") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } @@ -5601,6 +5790,9 @@ type CommitmentAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *CommitmentAggregatedListWarning `json:"warning,omitempty"` @@ -5655,6 +5847,7 @@ type CommitmentAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -5810,6 +6003,7 @@ type CommitmentListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -5944,6 +6138,7 @@ type CommitmentsScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -6031,6 +6226,7 @@ type Condition struct { // "ATTRIBUTION" // "AUTHORITY" // "CREDENTIALS_TYPE" + // "CREDS_ASSERTION" // "JUSTIFICATION_TYPE" // "NO_ATTR" // "SECURITY_REALM" @@ -6087,6 +6283,37 @@ func (s *Condition) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ConfidentialInstanceConfig: A set of Confidential Instance options. +type ConfidentialInstanceConfig struct { + // EnableConfidentialCompute: Defines whether the instance should have + // confidential compute enabled. + EnableConfidentialCompute bool `json:"enableConfidentialCompute,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "EnableConfidentialCompute") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "EnableConfidentialCompute") to include in API requests with the JSON + // null value. By default, fields with empty values are omitted from API + // requests. However, any field with an empty value appearing in + // NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ConfidentialInstanceConfig) MarshalJSON() ([]byte, error) { + type NoMethod ConfidentialInstanceConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ConnectionDraining: Message containing connection draining // configuration. type ConnectionDraining struct { @@ -6268,7 +6495,6 @@ func (s *CorsPolicy) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// CustomerEncryptionKey: Represents a customer-supplied encryption key type CustomerEncryptionKey struct { // KmsKeyName: The name of the encryption key that is stored in Google // Cloud KMS. @@ -6510,10 +6736,10 @@ type Disk struct { Options string `json:"options,omitempty"` // PhysicalBlockSizeBytes: Physical block size of the persistent disk, - // in bytes. If not present in a request, a default value is used. - // Currently supported sizes are 4096 and 16384, other sizes may be - // added in the future. If an unsupported value is requested, the error - // message will list the supported values for the caller's project. + // in bytes. If not present in a request, a default value is used. The + // currently supported size is 4096, other sizes may be added in the + // future. If an unsupported value is requested, the error message will + // list the supported values for the caller's project. PhysicalBlockSizeBytes int64 `json:"physicalBlockSizeBytes,omitempty,string"` // Region: [Output Only] URL of the region where the disk resides. Only @@ -6534,17 +6760,33 @@ type Disk struct { // resource. SelfLink string `json:"selfLink,omitempty"` - // SizeGb: Size of the persistent disk, specified in GB. You can specify - // this field when creating a persistent disk using the sourceImage or - // sourceSnapshot parameter, or specify it alone to create an empty - // persistent disk. + // SizeGb: Size, in GB, of the persistent disk. You can specify this + // field when creating a persistent disk using the sourceImage, + // sourceSnapshot, or sourceDisk parameter, or specify it alone to + // create an empty persistent disk. // - // If you specify this field along with sourceImage or sourceSnapshot, - // the value of sizeGb must not be less than the size of the sourceImage - // or the size of the snapshot. Acceptable values are 1 to 65536, - // inclusive. + // If you specify this field along with a source, the value of sizeGb + // must not be less than the size of the source. Acceptable values are 1 + // to 65536, inclusive. SizeGb int64 `json:"sizeGb,omitempty,string"` + // SourceDisk: The source disk used to create this disk. You can provide + // this as a partial or full URL to the resource. For example, the + // following are valid values: + // - + // https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk + // - projects/project/zones/zone/disks/disk + // - zones/zone/disks/disk + SourceDisk string `json:"sourceDisk,omitempty"` + + // SourceDiskId: [Output Only] The unique ID of the disk used to create + // this disk. This value identifies the exact disk that was used to + // create this persistent disk. For example, if you created the + // persistent disk from a disk that was later deleted and recreated + // under the same name, the source disk ID would identify the exact + // version of the disk that was used. + SourceDiskId string `json:"sourceDiskId,omitempty"` + // SourceImage: The source image used to create this disk. If the source // image is deleted, this field will not be set. // @@ -6625,7 +6867,7 @@ type Disk struct { // Type: URL of the disk type resource describing which disk type to use // to create the disk. Provide this when creating the disk. For example: - // projects/project/zones/zone/diskTypes/pd-standard or pd-ssd + // projects/project/zones/zone/diskTypes/pd-standard or pd-ssd Type string `json:"type,omitempty"` // Users: [Output Only] Links to the users of the disk (attached @@ -6688,6 +6930,9 @@ type DiskAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *DiskAggregatedListWarning `json:"warning,omitempty"` @@ -6742,6 +6987,7 @@ type DiskAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -6962,6 +7208,7 @@ type DiskListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -7198,6 +7445,9 @@ type DiskTypeAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *DiskTypeAggregatedListWarning `json:"warning,omitempty"` @@ -7252,6 +7502,7 @@ type DiskTypeAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -7407,6 +7658,7 @@ type DiskTypeListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -7541,6 +7793,7 @@ type DiskTypesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -7759,6 +8012,7 @@ type DisksScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -8087,6 +8341,7 @@ type ExchangedPeeringRoutesListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -8458,6 +8713,7 @@ type ExternalVpnGatewayListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -8626,8 +8882,8 @@ type Firewall struct { Kind string `json:"kind,omitempty"` // LogConfig: This field denotes the logging options for a particular - // firewall rule. If logging is enabled, logs will be exported to - // Stackdriver. + // firewall rule. If logging is enabled, logs will be exported to Cloud + // Logging. LogConfig *FirewallLogConfig `json:"logConfig,omitempty"` // Name: Name of the resource; provided by the client when the resource @@ -8899,6 +9155,7 @@ type FirewallListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -8983,6 +9240,15 @@ type FirewallLogConfig struct { // firewall rule. Enable bool `json:"enable,omitempty"` + // Metadata: This field can only be specified for a particular firewall + // rule if logging is enabled for that rule. This field denotes whether + // to include or exclude metadata for firewall logs. + // + // Possible values: + // "EXCLUDE_ALL_METADATA" + // "INCLUDE_ALL_METADATA" + Metadata string `json:"metadata,omitempty"` + // ForceSendFields is a list of field names (e.g. "Enable") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -9090,6 +9356,9 @@ type ForwardingRule struct { // refer to [IP address // specifications](/load-balancing/docs/forwarding-rule-concepts#ip_addre // ss_specifications). + // + // Must be set to `0.0.0.0` when the target is targetGrpcProxy that has + // validateForProxyless field set to true. IPAddress string `json:"IPAddress,omitempty"` // IPProtocol: The IP protocol to which this rule applies. For protocol @@ -9211,7 +9480,7 @@ type ForwardingRule struct { // MetadataFilters: Opaque filter criteria used by Loadbalancer to // restrict routing configuration to a limited set of xDS compliant // clients. In their xDS requests to Loadbalancer, xDS clients present - // node metadata. If a match takes place, the relevant configuration is + // node metadata. When there is a match, the relevant configuration is // made available to those proxies. Otherwise, all the resources (e.g. // TargetHttpProxy, UrlMap) referenced by the ForwardingRule will not be // visible to those proxies. @@ -9219,8 +9488,9 @@ type ForwardingRule struct { // set to MATCH_ANY, at least one of the filterLabels must match the // corresponding label provided in the metadata. If its // filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels - // must match with corresponding labels provided in the - // metadata. + // must match with corresponding labels provided in the metadata. If + // multiple metadataFilters are specified, all of them need to be + // satisfied in order to be considered a match. // metadataFilters specified here will be applifed before those // specified in the UrlMap that this ForwardingRule // references. @@ -9239,10 +9509,9 @@ type ForwardingRule struct { // Network: This field is not used for external load balancing. // - // For INTERNAL and INTERNAL_SELF_MANAGED load balancing, this field - // identifies the network that the load balanced IP should belong to for - // this Forwarding Rule. If this field is not specified, the default - // network will be used. + // For internal load balancing, this field identifies the network that + // the load balanced IP should belong to for this Forwarding Rule. If + // this field is not specified, the default network will be used. Network string `json:"network,omitempty"` // NetworkTier: This signifies the networking tier used for configuring @@ -9267,7 +9536,8 @@ type ForwardingRule struct { // or a target pool. Do not use with a forwarding rule that points to a // backend service. This field is used along with the target field for // TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, - // TargetVpnGateway, TargetPool, TargetInstance. + // TargetGrpcProxy, TargetVpnGateway, TargetPool, + // TargetInstance. // // Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets // addressed to ports in the specified range will be forwarded to @@ -9278,6 +9548,7 @@ type ForwardingRule struct { // ports: // - TargetHttpProxy: 80, 8080 // - TargetHttpsProxy: 443 + // - TargetGrpcProxy: Any ports // - TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, // 995, 1688, 1883, 5222 // - TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, @@ -9330,7 +9601,7 @@ type ForwardingRule struct { // This field is only used for internal load balancing. ServiceName string `json:"serviceName,omitempty"` - // Subnetwork: This field is only used for INTERNAL load balancing. + // Subnetwork: This field is only used for internal load balancing. // // For internal load balancing, this field identifies the subnetwork // that the load balanced IP should belong to for this Forwarding @@ -9342,12 +9613,13 @@ type ForwardingRule struct { Subnetwork string `json:"subnetwork,omitempty"` // Target: The URL of the target resource to receive the matched - // traffic. For regional forwarding rules, this target must live in the + // traffic. For regional forwarding rules, this target must be in the // same region as the forwarding rule. For global forwarding rules, this // target must be a global load balancing resource. The forwarded - // traffic must be of a type appropriate to the target object. For - // INTERNAL_SELF_MANAGED load balancing, only targetHttpProxy is valid, - // not targetHttpsProxy. + // traffic must be of a type appropriate to the target object. For more + // information, see the "Target" column in [Port + // specifications](/load-balancing/docs/forwarding-rule-concepts#ip_addre + // ss_specifications). Target string `json:"target,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -9400,6 +9672,9 @@ type ForwardingRuleAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *ForwardingRuleAggregatedListWarning `json:"warning,omitempty"` @@ -9454,6 +9729,7 @@ type ForwardingRuleAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -9609,6 +9885,7 @@ type ForwardingRuleListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -9770,6 +10047,7 @@ type ForwardingRulesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -9848,6 +10126,73 @@ func (s *ForwardingRulesScopedListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type GRPCHealthCheck struct { + // GrpcServiceName: The gRPC service name for the health check. This + // field is optional. The value of grpc_service_name has the following + // meanings by convention: + // - Empty service_name means the overall status of all services at the + // backend. + // - Non-empty service_name means the health of that gRPC service, as + // defined by the owner of the service. + // The grpc_service_name can only be ASCII. + GrpcServiceName string `json:"grpcServiceName,omitempty"` + + // Port: The port number for the health check request. Must be specified + // if port_name and port_specification are not set or if + // port_specification is USE_FIXED_PORT. Valid values are 1 through + // 65535. + Port int64 `json:"port,omitempty"` + + // PortName: Port name as defined in InstanceGroup#NamedPort#name. If + // both port and port_name are defined, port takes precedence. The + // port_name should conform to RFC1035. + PortName string `json:"portName,omitempty"` + + // PortSpecification: Specifies how port is selected for health + // checking, can be one of following values: + // USE_FIXED_PORT: The port number in port is used for health + // checking. + // USE_NAMED_PORT: The portName is used for health + // checking. + // USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for + // each network endpoint is used for health checking. For other + // backends, the port or named port specified in the Backend Service is + // used for health checking. + // + // + // If not specified, gRPC health check follows behavior specified in + // port and portName fields. + // + // Possible values: + // "USE_FIXED_PORT" + // "USE_NAMED_PORT" + // "USE_SERVING_PORT" + PortSpecification string `json:"portSpecification,omitempty"` + + // ForceSendFields is a list of field names (e.g. "GrpcServiceName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "GrpcServiceName") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GRPCHealthCheck) MarshalJSON() ([]byte, error) { + type NoMethod GRPCHealthCheck + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type GlobalNetworkEndpointGroupsAttachEndpointsRequest struct { // NetworkEndpoints: The list of network endpoints to be attached. NetworkEndpoints []*NetworkEndpoint `json:"networkEndpoints,omitempty"` @@ -10106,6 +10451,7 @@ type GuestOsFeature struct { // "FEATURE_TYPE_UNSPECIFIED" // "MULTI_IP_SUBNET" // "SECURE_BOOT" + // "SEV_CAPABLE" // "UEFI_COMPATIBLE" // "VIRTIO_SCSI_MULTIQUEUE" // "WINDOWS" @@ -10371,10 +10717,11 @@ func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { // [Regional](/compute/docs/reference/rest/{$api_version}/regionHealthChe // cks) // -// Internal HTTP(S) load balancers use regional health checks. All other -// types of GCP load balancers and managed instance group auto-healing -// use global health checks. For more information, read Health Check -// Concepts. +// Internal HTTP(S) load balancers must use regional health checks. +// Internal TCP/UDP load balancers can use either regional or global +// health checks. All other types of GCP load balancers and managed +// instance group auto-healing must use global health checks. For more +// information, read Health Check Concepts. // // To perform health checks on network load balancers, you must use // either httpHealthChecks or httpsHealthChecks. @@ -10391,6 +10738,8 @@ type HealthCheck struct { // property when you create the resource. Description string `json:"description,omitempty"` + GrpcHealthCheck *GRPCHealthCheck `json:"grpcHealthCheck,omitempty"` + // HealthyThreshold: A so-far unhealthy instance will be marked healthy // after this many consecutive successes. The default value is 2. HealthyThreshold int64 `json:"healthyThreshold,omitempty"` @@ -10439,6 +10788,7 @@ type HealthCheck struct { // must match type field. // // Possible values: + // "GRPC" // "HTTP" // "HTTP2" // "HTTPS" @@ -10556,6 +10906,7 @@ type HealthCheckListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -10666,15 +11017,166 @@ func (s *HealthCheckReference) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type HealthChecksAggregatedList struct { +// HealthCheckService: Represents a Health-Check as a Service +// resource. +// +// (== resource_for {$api_version}.regionHealthCheckServices ==) +type HealthCheckService struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a HealthCheckService. An + // up-to-date fingerprint must be provided in order to patch/update the + // HealthCheckService; Otherwise, the request will fail with error 412 + // conditionNotMet. To see the latest fingerprint, make a get() request + // to retrieve the HealthCheckService. + Fingerprint string `json:"fingerprint,omitempty"` + + // HealthChecks: List of URLs to the HealthCheck resources. Must have at + // least one HealthCheck, and not more than 10. HealthCheck resources + // must have portSpecification=USE_SERVING_PORT. For regional + // HealthCheckService, the HealthCheck must be regional and in the same + // region. For global HealthCheckService, HealthCheck must be global. + // Mix of regional and global HealthChecks is not supported. Multiple + // regional HealthChecks must belong to the same region. Regional + // HealthChecks`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *InstanceGroupManagersListPerInstanceConfigsCall) Filter(filter string) *InstanceGroupManagersListPerInstanceConfigsCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InstanceGroupManagersListPerInstanceConfigsCall) MaxResults(maxResults int64) *InstanceGroupManagersListPerInstanceConfigsCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *InstanceGroupManagersListPerInstanceConfigsCall) OrderBy(orderBy string) *InstanceGroupManagersListPerInstanceConfigsCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupManagersListPerInstanceConfigsCall) PageToken(pageToken string) *InstanceGroupManagersListPerInstanceConfigsCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstanceGroupManagersListPerInstanceConfigsCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersListPerInstanceConfigsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersListPerInstanceConfigsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListPerInstanceConfigsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersListPerInstanceConfigsCall) Context(ctx context.Context) *InstanceGroupManagersListPerInstanceConfigsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupManagersListPerInstanceConfigsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroupManagers.listPerInstanceConfigs" call. +// Exactly one of *InstanceGroupManagersListPerInstanceConfigsResp or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *InstanceGroupManagersListPerInstanceConfigsResp.ServerResponse.Header +// or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListPerInstanceConfigsResp, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroupManagersListPerInstanceConfigsResp{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all of the per-instance configs defined for the managed instance group. The orderBy query parameter is not supported.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.listPerInstanceConfigs", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", + // "response": { + // "$ref": "InstanceGroupManagersListPerInstanceConfigsResp" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupManagersListPerInstanceConfigsCall) Pages(ctx context.Context, f func(*InstanceGroupManagersListPerInstanceConfigsResp) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.instanceGroupManagers.patch": type InstanceGroupManagersPatchCall struct { @@ -66194,7 +69896,7 @@ func (c *InstanceGroupManagersPatchCall) Header() http.Header { func (c *InstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -66309,40 +70011,28 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.instanceGroupManagers.recreateInstances": +// method id "compute.instanceGroupManagers.patchPerInstanceConfigs": -type InstanceGroupManagersRecreateInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersPatchPerInstanceConfigsCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerspatchperinstanceconfigsreq *InstanceGroupManagersPatchPerInstanceConfigsReq + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RecreateInstances: Flags the specified instances in the managed -// instance group to be immediately recreated. The instances are deleted -// and recreated using the current instance template for the managed -// instance group. This operation is marked as DONE when the flag is set -// even if the instances have not yet been recreated. You must -// separately verify the status of the recreating action with the -// listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *InstanceGroupManagersService) RecreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest) *InstanceGroupManagersRecreateInstancesCall { - c := &InstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// PatchPerInstanceConfigs: Inserts or patches per-instance configs for +// the managed instance group. perInstanceConfig.name serves as a key +// used to distinguish whether to perform insert or patch. +func (r *InstanceGroupManagersService) PatchPerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagerspatchperinstanceconfigsreq *InstanceGroupManagersPatchPerInstanceConfigsReq) *InstanceGroupManagersPatchPerInstanceConfigsCall { + c := &InstanceGroupManagersPatchPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersrecreateinstancesrequest = instancegroupmanagersrecreateinstancesrequest + c.instancegroupmanagerspatchperinstanceconfigsreq = instancegroupmanagerspatchperinstanceconfigsreq return c } @@ -66360,7 +70050,7 @@ func (r *InstanceGroupManagersService) RecreateInstances(project string, zone st // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *InstanceGroupManagersRecreateInstancesCall { +func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) RequestId(requestId string) *InstanceGroupManagersPatchPerInstanceConfigsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -66368,7 +70058,7 @@ func (c *InstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersRecreateInstancesCall { +func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersPatchPerInstanceConfigsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -66376,36 +70066,36 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersRecreateInstancesCall { +func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) Context(ctx context.Context) *InstanceGroupManagersPatchPerInstanceConfigsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersRecreateInstancesCall) Header() http.Header { +func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersrecreateinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerspatchperinstanceconfigsreq) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -66420,14 +70110,14 @@ func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*htt return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.recreateInstances" call. +// Do executes the "compute.instanceGroupManagers.patchPerInstanceConfigs" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -66458,9 +70148,9 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Inserts or patches per-instance configs for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.recreateInstances", + // "id": "compute.instanceGroupManagers.patchPerInstanceConfigs", // "parameterOrder": [ // "project", // "zone", @@ -66468,7 +70158,7 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "description": "The name of the managed instance group. It should conform to RFC1035.", // "location": "path", // "required": true, // "type": "string" @@ -66486,15 +70176,15 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", // "request": { - // "$ref": "InstanceGroupManagersRecreateInstancesRequest" + // "$ref": "InstanceGroupManagersPatchPerInstanceConfigsReq" // }, // "response": { // "$ref": "Operation" @@ -66507,45 +70197,40 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp } -// method id "compute.instanceGroupManagers.resize": +// method id "compute.instanceGroupManagers.recreateInstances": -type InstanceGroupManagersResizeCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersRecreateInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Resize: Resizes the managed instance group. If you increase the size, -// the group creates new instances using the current instance template. -// If you decrease the size, the group deletes instances. The resize -// operation is marked DONE when the resize actions are scheduled even -// if the group has not yet added or deleted any instances. You must -// separately verify the status of the creating or deleting actions with -// the listmanagedinstances method. -// -// When resizing down, the instance group arbitrarily chooses the order -// in which VMs are deleted. The group takes into account some VM -// attributes when making the selection including: -// -// + The status of the VM instance. + The health of the VM instance. + -// The instance template version the VM is based on. + For regional -// managed instance groups, the location of the VM instance. -// -// This list is subject to change. +// RecreateInstances: Flags the specified instances in the managed +// instance group to be immediately recreated. The instances are deleted +// and recreated using the current instance template for the managed +// instance group. This operation is marked as DONE when the flag is set +// even if the instances have not yet been recreated. You must +// separately verify the status of the recreating action with the +// listmanagedinstances method. // // If the group is part of a backend service that has enabled connection // draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or deleted. -func (r *InstanceGroupManagersService) Resize(project string, zone string, instanceGroupManager string, size int64) *InstanceGroupManagersResizeCall { - c := &InstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *InstanceGroupManagersService) RecreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest) *InstanceGroupManagersRecreateInstancesCall { + c := &InstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instanceGroupManager = instanceGroupManager - c.urlParams_.Set("size", fmt.Sprint(size)) + c.instancegroupmanagersrecreateinstancesrequest = instancegroupmanagersrecreateinstancesrequest return c } @@ -66563,7 +70248,7 @@ func (r *InstanceGroupManagersService) Resize(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersResizeCall) RequestId(requestId string) *InstanceGroupManagersResizeCall { +func (c *InstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *InstanceGroupManagersRecreateInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -66571,7 +70256,7 @@ func (c *InstanceGroupManagersResizeCall) RequestId(requestId string) *InstanceG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeCall { +func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersRecreateInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -66579,31 +70264,36 @@ func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersResizeCall) Context(ctx context.Context) *InstanceGroupManagersResizeCall { +func (c *InstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersRecreateInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersResizeCall) Header() http.Header { +func (c *InstanceGroupManagersRecreateInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersrecreateinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -66618,14 +70308,14 @@ func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.resize" call. +// Do executes the "compute.instanceGroupManagers.recreateInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -66656,14 +70346,13 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nWhen resizing down, the instance group arbitrarily chooses the order in which VMs are deleted. The group takes into account some VM attributes when making the selection including:\n\n+ The status of the VM instance. + The health of the VM instance. + The instance template version the VM is based on. + For regional managed instance groups, the location of the VM instance.\n\nThis list is subject to change.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + // "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.resize", + // "id": "compute.instanceGroupManagers.recreateInstances", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager", - // "size" + // "instanceGroupManager" // ], // "parameters": { // "instanceGroupManager": { @@ -66684,13 +70373,6 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope // "location": "query", // "type": "string" // }, - // "size": { - // "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", - // "format": "int32", - // "location": "query", - // "required": true, - // "type": "integer" - // }, // "zone": { // "description": "The name of the zone where the managed instance group is located.", // "location": "path", @@ -66698,7 +70380,10 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + // "request": { + // "$ref": "InstanceGroupManagersRecreateInstancesRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -66710,28 +70395,45 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instanceGroupManagers.setInstanceTemplate": +// method id "compute.instanceGroupManagers.resize": -type InstanceGroupManagersSetInstanceTemplateCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersResizeCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetInstanceTemplate: Specifies the instance template to use when -// creating new instances in this group. The templates for existing -// instances in the group do not change unless you recreate them. -func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone string, instanceGroupManager string, instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest) *InstanceGroupManagersSetInstanceTemplateCall { - c := &InstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Resize: Resizes the managed instance group. If you increase the size, +// the group creates new instances using the current instance template. +// If you decrease the size, the group deletes instances. The resize +// operation is marked DONE when the resize actions are scheduled even +// if the group has not yet added or deleted any instances. You must +// separately verify the status of the creating or deleting actions with +// the listmanagedinstances method. +// +// When resizing down, the instance group arbitrarily chooses the order +// in which VMs are deleted. The group takes into account some VM +// attributes when making the selection including: +// +// + The status of the VM instance. + The health of the VM instance. + +// The instance template version the VM is based on. + For regional +// managed instance groups, the location of the VM instance. +// +// This list is subject to change. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or deleted. +func (r *InstanceGroupManagersService) Resize(project string, zone string, instanceGroupManager string, size int64) *InstanceGroupManagersResizeCall { + c := &InstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerssetinstancetemplaterequest = instancegroupmanagerssetinstancetemplaterequest + c.urlParams_.Set("size", fmt.Sprint(size)) return c } @@ -66749,7 +70451,7 @@ func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *InstanceGroupManagersSetInstanceTemplateCall { +func (c *InstanceGroupManagersResizeCall) RequestId(requestId string) *InstanceGroupManagersResizeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -66757,7 +70459,7 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId strin // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetInstanceTemplateCall { +func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -66765,36 +70467,31 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Fie // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *InstanceGroupManagersSetInstanceTemplateCall { +func (c *InstanceGroupManagersResizeCall) Context(ctx context.Context) *InstanceGroupManagersResizeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { +func (c *InstanceGroupManagersResizeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetinstancetemplaterequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -66809,14 +70506,14 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*h return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.setInstanceTemplate" call. +// Do executes the "compute.instanceGroupManagers.resize" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -66847,13 +70544,14 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call } return ret, nil // { - // "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", + // "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nWhen resizing down, the instance group arbitrarily chooses the order in which VMs are deleted. The group takes into account some VM attributes when making the selection including:\n\n+ The status of the VM instance. + The health of the VM instance. + The instance template version the VM is based on. + For regional managed instance groups, the location of the VM instance.\n\nThis list is subject to change.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.setInstanceTemplate", + // "id": "compute.instanceGroupManagers.resize", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instanceGroupManager", + // "size" // ], // "parameters": { // "instanceGroupManager": { @@ -66874,6 +70572,13 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call // "location": "query", // "type": "string" // }, + // "size": { + // "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", + // "format": "int32", + // "location": "query", + // "required": true, + // "type": "integer" + // }, // "zone": { // "description": "The name of the zone where the managed instance group is located.", // "location": "path", @@ -66881,10 +70586,7 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", - // "request": { - // "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" - // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", // "response": { // "$ref": "Operation" // }, @@ -66896,32 +70598,28 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call } -// method id "compute.instanceGroupManagers.setTargetPools": +// method id "compute.instanceGroupManagers.setInstanceTemplate": -type InstanceGroupManagersSetTargetPoolsCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersSetInstanceTemplateCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetTargetPools: Modifies the target pools to which all instances in -// this managed instance group are assigned. The target pools -// automatically apply to all of the instances in the managed instance -// group. This operation is marked DONE when you make the request even -// if the instances have not yet been added to their target pools. The -// change might take some time to apply to all of the instances in the -// group depending on the size of the group. -func (r *InstanceGroupManagersService) SetTargetPools(project string, zone string, instanceGroupManager string, instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest) *InstanceGroupManagersSetTargetPoolsCall { - c := &InstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetInstanceTemplate: Specifies the instance template to use when +// creating new instances in this group. The templates for existing +// instances in the group do not change unless you recreate them. +func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone string, instanceGroupManager string, instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest) *InstanceGroupManagersSetInstanceTemplateCall { + c := &InstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerssettargetpoolsrequest = instancegroupmanagerssettargetpoolsrequest + c.instancegroupmanagerssetinstancetemplaterequest = instancegroupmanagerssetinstancetemplaterequest return c } @@ -66939,7 +70637,7 @@ func (r *InstanceGroupManagersService) SetTargetPools(project string, zone strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *InstanceGroupManagersSetTargetPoolsCall { +func (c *InstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *InstanceGroupManagersSetInstanceTemplateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -66947,7 +70645,7 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *I // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetTargetPoolsCall { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetInstanceTemplateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -66955,36 +70653,36 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *InstanceGroupManagersSetTargetPoolsCall { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *InstanceGroupManagersSetInstanceTemplateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersSetTargetPoolsCall) Header() http.Header { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssettargetpoolsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetinstancetemplaterequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -66999,14 +70697,204 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.R return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.setTargetPools" call. +// Do executes the "compute.instanceGroupManagers.setInstanceTemplate" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.setInstanceTemplate", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "request": { + // "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.setTargetPools": + +type InstanceGroupManagersSetTargetPoolsCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetTargetPools: Modifies the target pools to which all instances in +// this managed instance group are assigned. The target pools +// automatically apply to all of the instances in the managed instance +// group. This operation is marked DONE when you make the request even +// if the instances have not yet been added to their target pools. The +// change might take some time to apply to all of the instances in the +// group depending on the size of the group. +func (r *InstanceGroupManagersService) SetTargetPools(project string, zone string, instanceGroupManager string, instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest) *InstanceGroupManagersSetTargetPoolsCall { + c := &InstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerssettargetpoolsrequest = instancegroupmanagerssettargetpoolsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *InstanceGroupManagersSetTargetPoolsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetTargetPoolsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *InstanceGroupManagersSetTargetPoolsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupManagersSetTargetPoolsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssettargetpoolsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroupManagers.setTargetPools" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -67086,6 +70974,192 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio } +// method id "compute.instanceGroupManagers.updatePerInstanceConfigs": + +type InstanceGroupManagersUpdatePerInstanceConfigsCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersupdateperinstanceconfigsreq *InstanceGroupManagersUpdatePerInstanceConfigsReq + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// UpdatePerInstanceConfigs: Inserts or updates per-instance configs for +// the managed instance group. perInstanceConfig.name serves as a key +// used to distinguish whether to perform insert or patch. +func (r *InstanceGroupManagersService) UpdatePerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagersupdateperinstanceconfigsreq *InstanceGroupManagersUpdatePerInstanceConfigsReq) *InstanceGroupManagersUpdatePerInstanceConfigsCall { + c := &InstanceGroupManagersUpdatePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersupdateperinstanceconfigsreq = instancegroupmanagersupdateperinstanceconfigsreq + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) RequestId(requestId string) *InstanceGroupManagersUpdatePerInstanceConfigsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersUpdatePerInstanceConfigsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Context(ctx context.Context) *InstanceGroupManagersUpdatePerInstanceConfigsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersupdateperinstanceconfigsreq) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroupManagers.updatePerInstanceConfigs" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Inserts or updates per-instance configs for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.updatePerInstanceConfigs", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", + // "request": { + // "$ref": "InstanceGroupManagersUpdatePerInstanceConfigsReq" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.instanceGroups.addInstances": type InstanceGroupsAddInstancesCall struct { @@ -67157,7 +71231,7 @@ func (c *InstanceGroupsAddInstancesCall) Header() http.Header { func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -67369,6 +71443,15 @@ func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *Instance return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstanceGroupsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -67406,7 +71489,7 @@ func (c *InstanceGroupsAggregatedListCall) Header() http.Header { func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -67509,6 +71592,11 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/instanceGroups", @@ -67615,7 +71703,7 @@ func (c *InstanceGroupsDeleteCall) Header() http.Header { func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -67735,8 +71823,11 @@ type InstanceGroupsGetCall struct { header_ http.Header } -// Get: Returns the specified instance group. Gets a list of available -// instance groups by making a list() request. +// Get: Returns the specified zonal instance group. Get a list of +// available zonal instance groups by making a list() request. +// +// For managed instance groups, use the instanceGroupManagers or +// regionInstanceGroupManagers methods instead. func (r *InstanceGroupsService) Get(project string, zone string, instanceGroup string) *InstanceGroupsGetCall { c := &InstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -67782,7 +71873,7 @@ func (c *InstanceGroupsGetCall) Header() http.Header { func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -67846,7 +71937,7 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup } return ret, nil // { - // "description": "Returns the specified instance group. Gets a list of available instance groups by making a list() request.", + // "description": "Returns the specified zonal instance group. Get a list of available zonal instance groups by making a list() request.\n\nFor managed instance groups, use the instanceGroupManagers or regionInstanceGroupManagers methods instead.", // "httpMethod": "GET", // "id": "compute.instanceGroups.get", // "parameterOrder": [ @@ -67956,7 +72047,7 @@ func (c *InstanceGroupsInsertCall) Header() http.Header { func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -68075,8 +72166,11 @@ type InstanceGroupsListCall struct { header_ http.Header } -// List: Retrieves the list of instance groups that are located in the -// specified project and zone. +// List: Retrieves the list of zonal instance group resources contained +// within the specified zone. +// +// For managed instance groups, use the instanceGroupManagers or +// regionInstanceGroupManagers methods instead. func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroupsListCall { c := &InstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -68149,6 +72243,15 @@ func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsList return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstanceGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -68186,7 +72289,7 @@ func (c *InstanceGroupsListCall) Header() http.Header { func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -68249,7 +72352,7 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou } return ret, nil // { - // "description": "Retrieves the list of instance groups that are located in the specified project and zone.", + // "description": "Retrieves the list of zonal instance group resources contained within the specified zone.\n\nFor managed instance groups, use the instanceGroupManagers or regionInstanceGroupManagers methods instead.", // "httpMethod": "GET", // "id": "compute.instanceGroups.list", // "parameterOrder": [ @@ -68287,6 +72390,11 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone where the instance group is located.", // "location": "path", @@ -68342,6 +72450,7 @@ type InstanceGroupsListInstancesCall struct { } // ListInstances: Lists the instances in the specified instance group. +// The orderBy query parameter is not supported. func (r *InstanceGroupsService) ListInstances(project string, zone string, instanceGroup string, instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest) *InstanceGroupsListInstancesCall { c := &InstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -68416,6 +72525,15 @@ func (c *InstanceGroupsListInstancesCall) PageToken(pageToken string) *InstanceG return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstanceGroupsListInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -68443,7 +72561,7 @@ func (c *InstanceGroupsListInstancesCall) Header() http.Header { func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -68509,7 +72627,7 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins } return ret, nil // { - // "description": "Lists the instances in the specified instance group.", + // "description": "Lists the instances in the specified instance group. The orderBy query parameter is not supported.", // "httpMethod": "POST", // "id": "compute.instanceGroups.listInstances", // "parameterOrder": [ @@ -68554,6 +72672,11 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone where the instance group is located.", // "location": "path", @@ -68672,7 +72795,7 @@ func (c *InstanceGroupsRemoveInstancesCall) Header() http.Header { func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -68856,7 +72979,7 @@ func (c *InstanceGroupsSetNamedPortsCall) Header() http.Header { func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69039,7 +73162,7 @@ func (c *InstanceTemplatesDeleteCall) Header() http.Header { func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69198,7 +73321,7 @@ func (c *InstanceTemplatesGetCall) Header() http.Header { func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69318,6 +73441,13 @@ func (r *InstanceTemplatesService) GetIamPolicy(project string, resource string) return c } +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *InstanceTemplatesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *InstanceTemplatesGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -69355,7 +73485,7 @@ func (c *InstanceTemplatesGetIamPolicyCall) Header() http.Header { func (c *InstanceTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69426,6 +73556,12 @@ func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P // "resource" // ], // "parameters": { + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -69524,7 +73660,7 @@ func (c *InstanceTemplatesInsertCall) Header() http.Header { func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69708,6 +73844,15 @@ func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplat return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstanceTemplatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceTemplatesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -69745,7 +73890,7 @@ func (c *InstanceTemplatesListCall) Header() http.Header { func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69843,6 +73988,11 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/instanceTemplates", @@ -69928,7 +74078,7 @@ func (c *InstanceTemplatesSetIamPolicyCall) Header() http.Header { func (c *InstanceTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70080,7 +74230,7 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Header() http.Header { func (c *InstanceTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70256,7 +74406,7 @@ func (c *InstancesAddAccessConfigCall) Header() http.Header { func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70451,7 +74601,7 @@ func (c *InstancesAddResourcePoliciesCall) Header() http.Header { func (c *InstancesAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70666,6 +74816,15 @@ func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggr return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstancesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstancesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -70703,7 +74862,7 @@ func (c *InstancesAggregatedListCall) Header() http.Header { func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70806,6 +74965,11 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/instances", @@ -70870,8 +75034,9 @@ func (r *InstancesService) AttachDisk(project string, zone string, instance stri } // ForceAttach sets the optional parameter "forceAttach": Whether to -// force attach the disk even if it's currently attached to another -// instance. +// force attach the regional disk even if it's currently attached to +// another instance. If you try to force attach a zonal disk to an +// instance, you will receive an error. func (c *InstancesAttachDiskCall) ForceAttach(forceAttach bool) *InstancesAttachDiskCall { c.urlParams_.Set("forceAttach", fmt.Sprint(forceAttach)) return c @@ -70923,7 +75088,7 @@ func (c *InstancesAttachDiskCall) Header() http.Header { func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70999,7 +75164,7 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, // ], // "parameters": { // "forceAttach": { - // "description": "Whether to force attach the disk even if it's currently attached to another instance.", + // "description": "Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error.", // "location": "query", // "type": "boolean" // }, @@ -71114,7 +75279,7 @@ func (c *InstancesDeleteCall) Header() http.Header { func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71294,7 +75459,7 @@ func (c *InstancesDeleteAccessConfigCall) Header() http.Header { func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71486,7 +75651,7 @@ func (c *InstancesDetachDiskCall) Header() http.Header { func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71663,7 +75828,7 @@ func (c *InstancesGetCall) Header() http.Header { func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71844,7 +76009,7 @@ func (c *InstancesGetGuestAttributesCall) Header() http.Header { func (c *InstancesGetGuestAttributesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71985,6 +76150,13 @@ func (r *InstancesService) GetIamPolicy(project string, zone string, resource st return c } +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *InstancesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *InstancesGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -72022,7 +76194,7 @@ func (c *InstancesGetIamPolicyCall) Header() http.Header { func (c *InstancesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72095,6 +76267,12 @@ func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e // "resource" // ], // "parameters": { + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -72130,6 +76308,173 @@ func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } +// method id "compute.instances.getScreenshot": + +type InstancesGetScreenshotCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetScreenshot: Returns the screenshot from the specified instance. +func (r *InstancesService) GetScreenshot(project string, zone string, instance string) *InstancesGetScreenshotCall { + c := &InstancesGetScreenshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesGetScreenshotCall) Fields(s ...googleapi.Field) *InstancesGetScreenshotCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetScreenshotCall) IfNoneMatch(entityTag string) *InstancesGetScreenshotCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesGetScreenshotCall) Context(ctx context.Context) *InstancesGetScreenshotCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesGetScreenshotCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesGetScreenshotCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/screenshot") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.getScreenshot" call. +// Exactly one of *Screenshot or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Screenshot.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesGetScreenshotCall) Do(opts ...googleapi.CallOption) (*Screenshot, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Screenshot{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the screenshot from the specified instance.", + // "httpMethod": "GET", + // "id": "compute.instances.getScreenshot", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/screenshot", + // "response": { + // "$ref": "Screenshot" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.instances.getSerialPortOutput": type InstancesGetSerialPortOutputCall struct { @@ -72161,12 +76506,23 @@ func (c *InstancesGetSerialPortOutputCall) Port(port int64) *InstancesGetSerialP return c } -// Start sets the optional parameter "start": Returns output starting -// from a specific byte position. Use this to page through output when -// the output is too large to return in a single request. For the -// initial request, leave this field unspecified. For subsequent calls, -// this field should be set to the next value returned in the previous -// call. +// Start sets the optional parameter "start": Specifies the starting +// byte position of the output to return. To start with the first byte +// of output to the specified port, omit this field or set it to +// `0`. +// +// If the output for that byte position is available, this field matches +// the `start` parameter sent with the request. If the amount of serial +// console output exceeds the size of the buffer (1 MB), the oldest +// output is discarded and is no longer available. If the requested +// start position refers to discarded output, the start position is +// adjusted to the oldest output still available, and the adjusted start +// position is returned as the `start` property value. +// +// You can also provide a negative start position, which translates to +// the most recent number of bytes written to the serial port. For +// example, -3 is interpreted as the most recent 3 bytes written to the +// serial console. func (c *InstancesGetSerialPortOutputCall) Start(start int64) *InstancesGetSerialPortOutputCall { c.urlParams_.Set("start", fmt.Sprint(start)) return c @@ -72209,7 +76565,7 @@ func (c *InstancesGetSerialPortOutputCall) Header() http.Header { func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72283,7 +76639,7 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se // ], // "parameters": { // "instance": { - // "description": "Name of the instance scoping this request.", + // "description": "Name of the instance for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -72306,7 +76662,7 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se // "type": "string" // }, // "start": { - // "description": "Returns output starting from a specific byte position. Use this to page through output when the output is too large to return in a single request. For the initial request, leave this field unspecified. For subsequent calls, this field should be set to the next value returned in the previous call.", + // "description": "Specifies the starting byte position of the output to return. To start with the first byte of output to the specified port, omit this field or set it to `0`.\n\nIf the output for that byte position is available, this field matches the `start` parameter sent with the request. If the amount of serial console output exceeds the size of the buffer (1 MB), the oldest output is discarded and is no longer available. If the requested start position refers to discarded output, the start position is adjusted to the oldest output still available, and the adjusted start position is returned as the `start` property value.\n\nYou can also provide a negative start position, which translates to the most recent number of bytes written to the serial port. For example, -3 is interpreted as the most recent 3 bytes written to the serial console.", // "format": "int64", // "location": "query", // "type": "string" @@ -72392,7 +76748,7 @@ func (c *InstancesGetShieldedInstanceIdentityCall) Header() http.Header { func (c *InstancesGetShieldedInstanceIdentityCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72584,7 +76940,7 @@ func (c *InstancesInsertCall) Header() http.Header { func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72784,6 +77140,15 @@ func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstancesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstancesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -72821,7 +77186,7 @@ func (c *InstancesListCall) Header() http.Header { func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72922,6 +77287,11 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -72977,9 +77347,11 @@ type InstancesListReferrersCall struct { header_ http.Header } -// ListReferrers: Retrieves the list of referrers to instances contained -// within the specified zone. For more information, read Viewing -// Referrers to VM Instances. +// ListReferrers: Retrieves a list of resources that refer to the VM +// instance specified in the request. For example, if the VM instance is +// part of a managed or unmanaged instance group, the referrers list +// includes the instance group. For more information, read Viewing +// referrers to VM instances. func (r *InstancesService) ListReferrers(project string, zone string, instance string) *InstancesListReferrersCall { c := &InstancesListReferrersCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -73053,6 +77425,15 @@ func (c *InstancesListReferrersCall) PageToken(pageToken string) *InstancesListR return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstancesListReferrersCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstancesListReferrersCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -73090,7 +77471,7 @@ func (c *InstancesListReferrersCall) Header() http.Header { func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73154,7 +77535,7 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance } return ret, nil // { - // "description": "Retrieves the list of referrers to instances contained within the specified zone. For more information, read Viewing Referrers to VM Instances.", + // "description": "Retrieves a list of resources that refer to the VM instance specified in the request. For example, if the VM instance is part of a managed or unmanaged instance group, the referrers list includes the instance group. For more information, read Viewing referrers to VM instances.", // "httpMethod": "GET", // "id": "compute.instances.listReferrers", // "parameterOrder": [ @@ -73200,6 +77581,11 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -73311,7 +77697,7 @@ func (c *InstancesRemoveResourcePoliciesCall) Header() http.Header { func (c *InstancesRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73498,7 +77884,7 @@ func (c *InstancesResetCall) Header() http.Header { func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73681,7 +78067,7 @@ func (c *InstancesSetDeletionProtectionCall) Header() http.Header { func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73867,7 +78253,7 @@ func (c *InstancesSetDiskAutoDeleteCall) Header() http.Header { func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74042,7 +78428,7 @@ func (c *InstancesSetIamPolicyCall) Header() http.Header { func (c *InstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74224,7 +78610,7 @@ func (c *InstancesSetLabelsCall) Header() http.Header { func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74411,7 +78797,7 @@ func (c *InstancesSetMachineResourcesCall) Header() http.Header { func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74598,7 +78984,7 @@ func (c *InstancesSetMachineTypeCall) Header() http.Header { func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74786,7 +79172,7 @@ func (c *InstancesSetMetadataCall) Header() http.Header { func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74975,7 +79361,7 @@ func (c *InstancesSetMinCpuPlatformCall) Header() http.Header { func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75105,7 +79491,10 @@ type InstancesSetSchedulingCall struct { header_ http.Header } -// SetScheduling: Sets an instance's scheduling options. +// SetScheduling: Sets an instance's scheduling options. You can only +// call this method on a stopped instance, that is, a VM instance that +// is in a `TERMINATED` state. See Instance Life Cycle for more +// information on the possible instance states. // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setScheduling func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall { c := &InstancesSetSchedulingCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -75162,7 +79551,7 @@ func (c *InstancesSetSchedulingCall) Header() http.Header { func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75228,7 +79617,7 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Sets an instance's scheduling options.", + // "description": "Sets an instance's scheduling options. You can only call this method on a stopped instance, that is, a VM instance that is in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states.", // "httpMethod": "POST", // "id": "compute.instances.setScheduling", // "parameterOrder": [ @@ -75350,7 +79739,7 @@ func (c *InstancesSetServiceAccountCall) Header() http.Header { func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75539,7 +79928,7 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Header() http.Header { func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75727,7 +80116,7 @@ func (c *InstancesSetTagsCall) Header() http.Header { func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75893,7 +80282,7 @@ func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -76066,7 +80455,7 @@ func (c *InstancesStartCall) Header() http.Header { func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -76246,7 +80635,7 @@ func (c *InstancesStartWithEncryptionKeyCall) Header() http.Header { func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -76436,7 +80825,7 @@ func (c *InstancesStopCall) Header() http.Header { func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -76596,7 +80985,7 @@ func (c *InstancesTestIamPermissionsCall) Header() http.Header { func (c *InstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -76813,7 +81202,7 @@ func (c *InstancesUpdateCall) Header() http.Header { func (c *InstancesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77037,7 +81426,7 @@ func (c *InstancesUpdateAccessConfigCall) Header() http.Header { func (c *InstancesUpdateAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77233,7 +81622,7 @@ func (c *InstancesUpdateDisplayDeviceCall) Header() http.Header { func (c *InstancesUpdateDisplayDeviceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77421,7 +81810,7 @@ func (c *InstancesUpdateNetworkInterfaceCall) Header() http.Header { func (c *InstancesUpdateNetworkInterfaceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77617,7 +82006,7 @@ func (c *InstancesUpdateShieldedInstanceConfigCall) Header() http.Header { func (c *InstancesUpdateShieldedInstanceConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77831,6 +82220,15 @@ func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InterconnectAttachmentsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -77868,7 +82266,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Header() http.Header { func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77972,6 +82370,11 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/interconnectAttachments", @@ -78075,7 +82478,7 @@ func (c *InterconnectAttachmentsDeleteCall) Header() http.Header { func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78243,7 +82646,7 @@ func (c *InterconnectAttachmentsGetCall) Header() http.Header { func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78426,7 +82829,7 @@ func (c *InterconnectAttachmentsInsertCall) Header() http.Header { func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78625,6 +83028,15 @@ func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *Interconn return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InterconnectAttachmentsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectAttachmentsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -78662,7 +83074,7 @@ func (c *InterconnectAttachmentsListCall) Header() http.Header { func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78769,6 +83181,11 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/interconnectAttachments", @@ -78876,7 +83293,7 @@ func (c *InterconnectAttachmentsPatchCall) Header() http.Header { func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79052,7 +83469,7 @@ func (c *InterconnectLocationsGetCall) Header() http.Header { func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79235,6 +83652,15 @@ func (c *InterconnectLocationsListCall) PageToken(pageToken string) *Interconnec return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InterconnectLocationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectLocationsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -79272,7 +83698,7 @@ func (c *InterconnectLocationsListCall) Header() http.Header { func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79370,6 +83796,11 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/interconnectLocations", @@ -79471,7 +83902,7 @@ func (c *InterconnectsDeleteCall) Header() http.Header { func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79629,7 +84060,7 @@ func (c *InterconnectsGetCall) Header() http.Header { func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79786,7 +84217,7 @@ func (c *InterconnectsGetDiagnosticsCall) Header() http.Header { func (c *InterconnectsGetDiagnosticsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79952,7 +84383,7 @@ func (c *InterconnectsInsertCall) Header() http.Header { func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80135,6 +84566,15 @@ func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCa return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InterconnectsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -80172,7 +84612,7 @@ func (c *InterconnectsListCall) Header() http.Header { func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80270,6 +84710,11 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/interconnects", @@ -80375,7 +84820,7 @@ func (c *InterconnectsPatchCall) Header() http.Header { func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80543,7 +84988,7 @@ func (c *LicenseCodesGetCall) Header() http.Header { func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80692,7 +85137,7 @@ func (c *LicenseCodesTestIamPermissionsCall) Header() http.Header { func (c *LicenseCodesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80863,7 +85308,7 @@ func (c *LicensesDeleteCall) Header() http.Header { func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81023,7 +85468,7 @@ func (c *LicensesGetCall) Header() http.Header { func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81145,6 +85590,13 @@ func (r *LicensesService) GetIamPolicy(project string, resource string) *License return c } +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *LicensesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *LicensesGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -81182,7 +85634,7 @@ func (c *LicensesGetIamPolicyCall) Header() http.Header { func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81253,6 +85705,12 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er // "resource" // ], // "parameters": { + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -81348,7 +85806,7 @@ func (c *LicensesInsertCall) Header() http.Header { func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81540,6 +85998,15 @@ func (c *LicensesListCall) PageToken(pageToken string) *LicensesListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *LicensesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *LicensesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -81577,7 +86044,7 @@ func (c *LicensesListCall) Header() http.Header { func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81675,6 +86142,11 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/licenses", @@ -81762,7 +86234,7 @@ func (c *LicensesSetIamPolicyCall) Header() http.Header { func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81915,7 +86387,7 @@ func (c *LicensesTestIamPermissionsCall) Header() http.Header { func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -82116,6 +86588,15 @@ func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTyp return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *MachineTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineTypesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -82153,7 +86634,7 @@ func (c *MachineTypesAggregatedListCall) Header() http.Header { func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -82256,6 +86737,11 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/machineTypes", @@ -82353,7 +86839,7 @@ func (c *MachineTypesGetCall) Header() http.Header { func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -82548,6 +87034,15 @@ func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *MachineTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineTypesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -82585,7 +87080,7 @@ func (c *MachineTypesListCall) Header() http.Header { func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -82686,6 +87181,11 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -82825,6 +87325,15 @@ func (c *NetworkEndpointGroupsAggregatedListCall) PageToken(pageToken string) *N return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NetworkEndpointGroupsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -82862,7 +87371,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Header() http.Header { func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -82966,6 +87475,11 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/networkEndpointGroups", @@ -83072,7 +87586,7 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.Header { func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83257,7 +87771,7 @@ func (c *NetworkEndpointGroupsDeleteCall) Header() http.Header { func (c *NetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83434,7 +87948,7 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.Header { func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83609,7 +88123,7 @@ func (c *NetworkEndpointGroupsGetCall) Header() http.Header { func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83783,7 +88297,7 @@ func (c *NetworkEndpointGroupsInsertCall) Header() http.Header { func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83976,6 +88490,15 @@ func (c *NetworkEndpointGroupsListCall) PageToken(pageToken string) *NetworkEndp return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NetworkEndpointGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -84013,7 +88536,7 @@ func (c *NetworkEndpointGroupsListCall) Header() http.Header { func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84114,6 +88637,11 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", @@ -84244,6 +88772,15 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken stri return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -84271,7 +88808,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Header { func (c *NetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84384,6 +88921,11 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", @@ -84479,7 +89021,7 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Header() http.Header { func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84659,7 +89201,7 @@ func (c *NetworksAddPeeringCall) Header() http.Header { func (c *NetworksAddPeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84833,7 +89375,7 @@ func (c *NetworksDeleteCall) Header() http.Header { func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84992,7 +89534,7 @@ func (c *NetworksGetCall) Header() http.Header { func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85158,7 +89700,7 @@ func (c *NetworksInsertCall) Header() http.Header { func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85342,6 +89884,15 @@ func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NetworksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -85379,7 +89930,7 @@ func (c *NetworksListCall) Header() http.Header { func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85477,6 +90028,11 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/networks", @@ -85625,6 +90181,15 @@ func (c *NetworksListPeeringRoutesCall) Region(region string) *NetworksListPeeri return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NetworksListPeeringRoutesCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworksListPeeringRoutesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -85662,7 +90227,7 @@ func (c *NetworksListPeeringRoutesCall) Header() http.Header { func (c *NetworksListPeeringRoutesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85792,6 +90357,11 @@ func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*Excha // "description": "The region of the request. The response will include all subnet routes, static routes and dynamic routes in the region.", // "location": "query", // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/networks/{network}/listPeeringRoutes", @@ -85897,7 +90467,7 @@ func (c *NetworksPatchCall) Header() http.Header { func (c *NetworksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86072,7 +90642,7 @@ func (c *NetworksRemovePeeringCall) Header() http.Header { func (c *NetworksRemovePeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86246,7 +90816,7 @@ func (c *NetworksSwitchToCustomModeCall) Header() http.Header { func (c *NetworksSwitchToCustomModeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86416,7 +90986,7 @@ func (c *NetworksUpdatePeeringCall) Header() http.Header { func (c *NetworksUpdatePeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86593,7 +91163,7 @@ func (c *NodeGroupsAddNodesCall) Header() http.Header { func (c *NodeGroupsAddNodesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86807,6 +91377,15 @@ func (c *NodeGroupsAggregatedListCall) PageToken(pageToken string) *NodeGroupsAg return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NodeGroupsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeGroupsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -86844,7 +91423,7 @@ func (c *NodeGroupsAggregatedListCall) Header() http.Header { func (c *NodeGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86947,6 +91526,11 @@ func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGr // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/nodeGroups", @@ -87050,7 +91634,7 @@ func (c *NodeGroupsDeleteCall) Header() http.Header { func (c *NodeGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87228,7 +91812,7 @@ func (c *NodeGroupsDeleteNodesCall) Header() http.Header { func (c *NodeGroupsDeleteNodesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87406,7 +91990,7 @@ func (c *NodeGroupsGetCall) Header() http.Header { func (c *NodeGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87537,6 +92121,13 @@ func (r *NodeGroupsService) GetIamPolicy(project string, zone string, resource s return c } +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *NodeGroupsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *NodeGroupsGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -87574,7 +92165,7 @@ func (c *NodeGroupsGetIamPolicyCall) Header() http.Header { func (c *NodeGroupsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87647,6 +92238,12 @@ func (c *NodeGroupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, // "resource" // ], // "parameters": { + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -87751,7 +92348,7 @@ func (c *NodeGroupsInsertCall) Header() http.Header { func (c *NodeGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87954,6 +92551,15 @@ func (c *NodeGroupsListCall) PageToken(pageToken string) *NodeGroupsListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NodeGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeGroupsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -87991,7 +92597,7 @@ func (c *NodeGroupsListCall) Header() http.Header { func (c *NodeGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88092,6 +92698,11 @@ func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, e // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -88220,6 +92831,15 @@ func (c *NodeGroupsListNodesCall) PageToken(pageToken string) *NodeGroupsListNod return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NodeGroupsListNodesCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeGroupsListNodesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -88247,7 +92867,7 @@ func (c *NodeGroupsListNodesCall) Header() http.Header { func (c *NodeGroupsListNodesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88354,6 +92974,11 @@ func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsL // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -88465,7 +93090,7 @@ func (c *NodeGroupsPatchCall) Header() http.Header { func (c *NodeGroupsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88633,7 +93258,7 @@ func (c *NodeGroupsSetIamPolicyCall) Header() http.Header { func (c *NodeGroupsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88814,7 +93439,7 @@ func (c *NodeGroupsSetNodeTemplateCall) Header() http.Header { func (c *NodeGroupsSetNodeTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88982,7 +93607,7 @@ func (c *NodeGroupsTestIamPermissionsCall) Header() http.Header { func (c *NodeGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89191,6 +93816,15 @@ func (c *NodeTemplatesAggregatedListCall) PageToken(pageToken string) *NodeTempl return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NodeTemplatesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeTemplatesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -89228,7 +93862,7 @@ func (c *NodeTemplatesAggregatedListCall) Header() http.Header { func (c *NodeTemplatesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89331,6 +93965,11 @@ func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Nod // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/nodeTemplates", @@ -89434,7 +94073,7 @@ func (c *NodeTemplatesDeleteCall) Header() http.Header { func (c *NodeTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89603,7 +94242,7 @@ func (c *NodeTemplatesGetCall) Header() http.Header { func (c *NodeTemplatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89734,6 +94373,13 @@ func (r *NodeTemplatesService) GetIamPolicy(project string, region string, resou return c } +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *NodeTemplatesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *NodeTemplatesGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -89771,7 +94417,7 @@ func (c *NodeTemplatesGetIamPolicyCall) Header() http.Header { func (c *NodeTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89844,6 +94490,12 @@ func (c *NodeTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic // "resource" // ], // "parameters": { + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -89947,7 +94599,7 @@ func (c *NodeTemplatesInsertCall) Header() http.Header { func (c *NodeTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90141,6 +94793,15 @@ func (c *NodeTemplatesListCall) PageToken(pageToken string) *NodeTemplatesListCa return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NodeTemplatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeTemplatesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -90178,7 +94839,7 @@ func (c *NodeTemplatesListCall) Header() http.Header { func (c *NodeTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90285,6 +94946,11 @@ func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateL // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/nodeTemplates", @@ -90372,7 +95038,7 @@ func (c *NodeTemplatesSetIamPolicyCall) Header() http.Header { func (c *NodeTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90535,7 +95201,7 @@ func (c *NodeTemplatesTestIamPermissionsCall) Header() http.Header { func (c *NodeTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90744,6 +95410,15 @@ func (c *NodeTypesAggregatedListCall) PageToken(pageToken string) *NodeTypesAggr return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NodeTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeTypesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -90781,7 +95456,7 @@ func (c *NodeTypesAggregatedListCall) Header() http.Header { func (c *NodeTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90884,6 +95559,11 @@ func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTyp // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/nodeTypes", @@ -90980,7 +95660,7 @@ func (c *NodeTypesGetCall) Header() http.Header { func (c *NodeTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91174,6 +95854,15 @@ func (c *NodeTypesListCall) PageToken(pageToken string) *NodeTypesListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NodeTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeTypesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -91211,7 +95900,7 @@ func (c *NodeTypesListCall) Header() http.Header { func (c *NodeTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91312,6 +96001,11 @@ func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, err // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -91450,6 +96144,15 @@ func (c *PacketMirroringsAggregatedListCall) PageToken(pageToken string) *Packet return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *PacketMirroringsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PacketMirroringsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -91487,7 +96190,7 @@ func (c *PacketMirroringsAggregatedListCall) Header() http.Header { func (c *PacketMirroringsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91590,6 +96293,11 @@ func (c *PacketMirroringsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/packetMirrorings", @@ -91693,7 +96401,7 @@ func (c *PacketMirroringsDeleteCall) Header() http.Header { func (c *PacketMirroringsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91861,7 +96569,7 @@ func (c *PacketMirroringsGetCall) Header() http.Header { func (c *PacketMirroringsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92037,7 +96745,7 @@ func (c *PacketMirroringsInsertCall) Header() http.Header { func (c *PacketMirroringsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92231,6 +96939,15 @@ func (c *PacketMirroringsListCall) PageToken(pageToken string) *PacketMirrorings return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *PacketMirroringsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PacketMirroringsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -92268,7 +96985,7 @@ func (c *PacketMirroringsListCall) Header() http.Header { func (c *PacketMirroringsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92375,6 +97092,11 @@ func (c *PacketMirroringsListCall) Do(opts ...googleapi.CallOption) (*PacketMirr // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/packetMirrorings", @@ -92482,7 +97204,7 @@ func (c *PacketMirroringsPatchCall) Header() http.Header { func (c *PacketMirroringsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92650,7 +97372,7 @@ func (c *PacketMirroringsTestIamPermissionsCall) Header() http.Header { func (c *PacketMirroringsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92826,7 +97548,7 @@ func (c *ProjectsDisableXpnHostCall) Header() http.Header { func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92983,7 +97705,7 @@ func (c *ProjectsDisableXpnResourceCall) Header() http.Header { func (c *ProjectsDisableXpnResourceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93145,7 +97867,7 @@ func (c *ProjectsEnableXpnHostCall) Header() http.Header { func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93303,7 +98025,7 @@ func (c *ProjectsEnableXpnResourceCall) Header() http.Header { func (c *ProjectsEnableXpnResourceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93458,7 +98180,7 @@ func (c *ProjectsGetCall) Header() http.Header { func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93604,7 +98326,7 @@ func (c *ProjectsGetXpnHostCall) Header() http.Header { func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93777,6 +98499,15 @@ func (c *ProjectsGetXpnResourcesCall) PageToken(pageToken string) *ProjectsGetXp return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ProjectsGetXpnResourcesCall) ReturnPartialSuccess(returnPartialSuccess bool) *ProjectsGetXpnResourcesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -93814,7 +98545,7 @@ func (c *ProjectsGetXpnResourcesCall) Header() http.Header { func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93912,6 +98643,11 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/getXpnResources", @@ -94032,6 +98768,15 @@ func (c *ProjectsListXpnHostsCall) PageToken(pageToken string) *ProjectsListXpnH return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ProjectsListXpnHostsCall) ReturnPartialSuccess(returnPartialSuccess bool) *ProjectsListXpnHostsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -94059,7 +98804,7 @@ func (c *ProjectsListXpnHostsCall) Header() http.Header { func (c *ProjectsListXpnHostsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94159,6 +98904,11 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/listXpnHosts", @@ -94262,7 +99012,7 @@ func (c *ProjectsMoveDiskCall) Header() http.Header { func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94427,7 +99177,7 @@ func (c *ProjectsMoveInstanceCall) Header() http.Header { func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94593,7 +99343,7 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Header() http.Header { func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94760,7 +99510,7 @@ func (c *ProjectsSetDefaultNetworkTierCall) Header() http.Header { func (c *ProjectsSetDefaultNetworkTierCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94928,7 +99678,7 @@ func (c *ProjectsSetUsageExportBucketCall) Header() http.Header { func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95097,7 +99847,7 @@ func (c *RegionAutoscalersDeleteCall) Header() http.Header { func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95265,7 +100015,7 @@ func (c *RegionAutoscalersGetCall) Header() http.Header { func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95441,7 +100191,7 @@ func (c *RegionAutoscalersInsertCall) Header() http.Header { func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95635,6 +100385,15 @@ func (c *RegionAutoscalersListCall) PageToken(pageToken string) *RegionAutoscale return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionAutoscalersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionAutoscalersListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -95672,7 +100431,7 @@ func (c *RegionAutoscalersListCall) Header() http.Header { func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95779,6 +100538,11 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/autoscalers", @@ -95891,7 +100655,7 @@ func (c *RegionAutoscalersPatchCall) Header() http.Header { func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96080,7 +100844,7 @@ func (c *RegionAutoscalersUpdateCall) Header() http.Header { func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96261,7 +101025,7 @@ func (c *RegionBackendServicesDeleteCall) Header() http.Header { func (c *RegionBackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96429,7 +101193,7 @@ func (c *RegionBackendServicesGetCall) Header() http.Header { func (c *RegionBackendServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96588,7 +101352,7 @@ func (c *RegionBackendServicesGetHealthCall) Header() http.Header { func (c *RegionBackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96713,10 +101477,8 @@ type RegionBackendServicesInsertCall struct { } // Insert: Creates a regional BackendService resource in the specified -// project using the data included in the request. There are several -// restrictions and guidelines to keep in mind when creating a regional -// backend service. Read Understanding backend services for more -// information. +// project using the data included in the request. For more information, +// see Backend services overview. func (r *RegionBackendServicesService) Insert(project string, region string, backendservice *BackendService) *RegionBackendServicesInsertCall { c := &RegionBackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -96771,7 +101533,7 @@ func (c *RegionBackendServicesInsertCall) Header() http.Header { func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96836,7 +101598,7 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a regional BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a regional backend service. Read Understanding backend services for more information.", + // "description": "Creates a regional BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview.", // "httpMethod": "POST", // "id": "compute.regionBackendServices.insert", // "parameterOrder": [ @@ -96965,6 +101727,15 @@ func (c *RegionBackendServicesListCall) PageToken(pageToken string) *RegionBacke return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionBackendServicesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionBackendServicesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -97002,7 +101773,7 @@ func (c *RegionBackendServicesListCall) Header() http.Header { func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97109,6 +101880,11 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/backendServices", @@ -97159,11 +101935,9 @@ type RegionBackendServicesPatchCall struct { } // Patch: Updates the specified regional BackendService resource with -// the data included in the request. There are several Understanding -// backend services to keep in mind when updating a backend service. -// Read Understanding backend services for more information. This -// method supports PATCH semantics and uses the JSON merge patch format -// and processing rules. +// the data included in the request. For more information, see +// Understanding backend services This method supports PATCH semantics +// and uses the JSON merge patch format and processing rules. func (r *RegionBackendServicesService) Patch(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesPatchCall { c := &RegionBackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -97219,7 +101993,7 @@ func (c *RegionBackendServicesPatchCall) Header() http.Header { func (c *RegionBackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97285,7 +102059,7 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Understanding backend services This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", // "httpMethod": "PATCH", // "id": "compute.regionBackendServices.patch", // "parameterOrder": [ @@ -97350,9 +102124,8 @@ type RegionBackendServicesUpdateCall struct { } // Update: Updates the specified regional BackendService resource with -// the data included in the request. There are several Understanding -// backend services to keep in mind when updating a backend service. -// Read Understanding backend services for more information. +// the data included in the request. For more information, see Backend +// services overview. func (r *RegionBackendServicesService) Update(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesUpdateCall { c := &RegionBackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -97408,7 +102181,7 @@ func (c *RegionBackendServicesUpdateCall) Header() http.Header { func (c *RegionBackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97474,7 +102247,7 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information.", + // "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Backend services overview.", // "httpMethod": "PUT", // "id": "compute.regionBackendServices.update", // "parameterOrder": [ @@ -97621,6 +102394,15 @@ func (c *RegionCommitmentsAggregatedListCall) PageToken(pageToken string) *Regio return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionCommitmentsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionCommitmentsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -97658,7 +102440,7 @@ func (c *RegionCommitmentsAggregatedListCall) Header() http.Header { func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97761,6 +102543,11 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/commitments", @@ -97857,7 +102644,7 @@ func (c *RegionCommitmentsGetCall) Header() http.Header { func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98033,7 +102820,7 @@ func (c *RegionCommitmentsInsertCall) Header() http.Header { func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98227,6 +103014,15 @@ func (c *RegionCommitmentsListCall) PageToken(pageToken string) *RegionCommitmen return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionCommitmentsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionCommitmentsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -98264,7 +103060,7 @@ func (c *RegionCommitmentsListCall) Header() http.Header { func (c *RegionCommitmentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98371,6 +103167,11 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/commitments", @@ -98467,7 +103268,7 @@ func (c *RegionDiskTypesGetCall) Header() http.Header { func (c *RegionDiskTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98661,6 +103462,15 @@ func (c *RegionDiskTypesListCall) PageToken(pageToken string) *RegionDiskTypesLi return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionDiskTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionDiskTypesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -98698,7 +103508,7 @@ func (c *RegionDiskTypesListCall) Header() http.Header { func (c *RegionDiskTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98805,6 +103615,11 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/diskTypes", @@ -98912,7 +103727,7 @@ func (c *RegionDisksAddResourcePoliciesCall) Header() http.Header { func (c *RegionDisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99098,7 +103913,7 @@ func (c *RegionDisksCreateSnapshotCall) Header() http.Header { func (c *RegionDisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99285,7 +104100,7 @@ func (c *RegionDisksDeleteCall) Header() http.Header { func (c *RegionDisksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99452,7 +104267,7 @@ func (c *RegionDisksGetCall) Header() http.Header { func (c *RegionDisksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99583,6 +104398,13 @@ func (r *RegionDisksService) GetIamPolicy(project string, region string, resourc return c } +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *RegionDisksGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *RegionDisksGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -99620,7 +104442,7 @@ func (c *RegionDisksGetIamPolicyCall) Header() http.Header { func (c *RegionDisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99693,6 +104515,12 @@ func (c *RegionDisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, // "resource" // ], // "parameters": { + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -99803,7 +104631,7 @@ func (c *RegionDisksInsertCall) Header() http.Header { func (c *RegionDisksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100002,6 +104830,15 @@ func (c *RegionDisksListCall) PageToken(pageToken string) *RegionDisksListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionDisksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionDisksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -100039,7 +104876,7 @@ func (c *RegionDisksListCall) Header() http.Header { func (c *RegionDisksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100146,6 +104983,11 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/disks", @@ -100252,7 +105094,7 @@ func (c *RegionDisksRemoveResourcePoliciesCall) Header() http.Header { func (c *RegionDisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100438,7 +105280,7 @@ func (c *RegionDisksResizeCall) Header() http.Header { func (c *RegionDisksResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100606,7 +105448,7 @@ func (c *RegionDisksSetIamPolicyCall) Header() http.Header { func (c *RegionDisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100787,7 +105629,7 @@ func (c *RegionDisksSetLabelsCall) Header() http.Header { func (c *RegionDisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100955,7 +105797,7 @@ func (c *RegionDisksTestIamPermissionsCall) Header() http.Header { func (c *RegionDisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101068,24 +105910,24 @@ func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } -// method id "compute.regionHealthChecks.delete": +// method id "compute.regionHealthCheckServices.delete": -type RegionHealthChecksDeleteCall struct { - s *Service - project string - region string - healthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionHealthCheckServicesDeleteCall struct { + s *Service + project string + region string + healthCheckService string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified HealthCheck resource. -func (r *RegionHealthChecksService) Delete(project string, region string, healthCheck string) *RegionHealthChecksDeleteCall { - c := &RegionHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified regional HealthCheckService. +func (r *RegionHealthCheckServicesService) Delete(project string, region string, healthCheckService string) *RegionHealthCheckServicesDeleteCall { + c := &RegionHealthCheckServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.healthCheck = healthCheck + c.healthCheckService = healthCheckService return c } @@ -101103,7 +105945,7 @@ func (r *RegionHealthChecksService) Delete(project string, region string, health // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionHealthChecksDeleteCall) RequestId(requestId string) *RegionHealthChecksDeleteCall { +func (c *RegionHealthCheckServicesDeleteCall) RequestId(requestId string) *RegionHealthCheckServicesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -101111,7 +105953,7 @@ func (c *RegionHealthChecksDeleteCall) RequestId(requestId string) *RegionHealth // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthChecksDeleteCall) Fields(s ...googleapi.Field) *RegionHealthChecksDeleteCall { +func (c *RegionHealthCheckServicesDeleteCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -101119,23 +105961,23 @@ func (c *RegionHealthChecksDeleteCall) Fields(s ...googleapi.Field) *RegionHealt // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthChecksDeleteCall) Context(ctx context.Context) *RegionHealthChecksDeleteCall { +func (c *RegionHealthCheckServicesDeleteCall) Context(ctx context.Context) *RegionHealthCheckServicesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthChecksDeleteCall) Header() http.Header { +func (c *RegionHealthCheckServicesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthCheckServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101143,7 +105985,7 @@ func (c *RegionHealthChecksDeleteCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthCheckServices/{healthCheckService}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -101151,21 +105993,21 @@ func (c *RegionHealthChecksDeleteCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "healthCheck": c.healthCheck, + "project": c.project, + "region": c.region, + "healthCheckService": c.healthCheckService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthChecks.delete" call. +// Do executes the "compute.regionHealthCheckServices.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionHealthCheckServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -101196,19 +106038,18 @@ func (c *RegionHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Deletes the specified HealthCheck resource.", + // "description": "Deletes the specified regional HealthCheckService.", // "httpMethod": "DELETE", - // "id": "compute.regionHealthChecks.delete", + // "id": "compute.regionHealthCheckServices.delete", // "parameterOrder": [ // "project", // "region", - // "healthCheck" + // "healthCheckService" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to delete.", + // "healthCheckService": { + // "description": "Name of the HealthCheckService to delete. The name must be 1-63 characters long, and comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -101232,7 +106073,7 @@ func (c *RegionHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operat // "type": "string" // } // }, - // "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + // "path": "{project}/regions/{region}/healthCheckServices/{healthCheckService}", // "response": { // "$ref": "Operation" // }, @@ -101244,33 +106085,32 @@ func (c *RegionHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.regionHealthChecks.get": +// method id "compute.regionHealthCheckServices.get": -type RegionHealthChecksGetCall struct { - s *Service - project string - region string - healthCheck string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionHealthCheckServicesGetCall struct { + s *Service + project string + region string + healthCheckService string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified HealthCheck resource. Gets a list of -// available health checks by making a list() request. -func (r *RegionHealthChecksService) Get(project string, region string, healthCheck string) *RegionHealthChecksGetCall { - c := &RegionHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified regional HealthCheckService resource. +func (r *RegionHealthCheckServicesService) Get(project string, region string, healthCheckService string) *RegionHealthCheckServicesGetCall { + c := &RegionHealthCheckServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.healthCheck = healthCheck + c.healthCheckService = healthCheckService return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthChecksGetCall) Fields(s ...googleapi.Field) *RegionHealthChecksGetCall { +func (c *RegionHealthCheckServicesGetCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -101280,7 +106120,7 @@ func (c *RegionHealthChecksGetCall) Fields(s ...googleapi.Field) *RegionHealthCh // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionHealthChecksGetCall) IfNoneMatch(entityTag string) *RegionHealthChecksGetCall { +func (c *RegionHealthCheckServicesGetCall) IfNoneMatch(entityTag string) *RegionHealthCheckServicesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -101288,23 +106128,23 @@ func (c *RegionHealthChecksGetCall) IfNoneMatch(entityTag string) *RegionHealthC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthChecksGetCall) Context(ctx context.Context) *RegionHealthChecksGetCall { +func (c *RegionHealthCheckServicesGetCall) Context(ctx context.Context) *RegionHealthCheckServicesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthChecksGetCall) Header() http.Header { +func (c *RegionHealthCheckServicesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthCheckServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101315,7 +106155,7 @@ func (c *RegionHealthChecksGetCall) doRequest(alt string) (*http.Response, error var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthCheckServices/{healthCheckService}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -101323,21 +106163,21 @@ func (c *RegionHealthChecksGetCall) doRequest(alt string) (*http.Response, error } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "healthCheck": c.healthCheck, + "project": c.project, + "region": c.region, + "healthCheckService": c.healthCheckService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthChecks.get" call. -// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HealthCheck.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { +// Do executes the "compute.regionHealthCheckServices.get" call. +// Exactly one of *HealthCheckService or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HealthCheckService.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionHealthCheckServicesGetCall) Do(opts ...googleapi.CallOption) (*HealthCheckService, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -101356,7 +106196,7 @@ func (c *RegionHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthChe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HealthCheck{ + ret := &HealthCheckService{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -101368,19 +106208,18 @@ func (c *RegionHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthChe } return ret, nil // { - // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + // "description": "Returns the specified regional HealthCheckService resource.", // "httpMethod": "GET", - // "id": "compute.regionHealthChecks.get", + // "id": "compute.regionHealthCheckServices.get", // "parameterOrder": [ // "project", // "region", - // "healthCheck" + // "healthCheckService" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to return.", + // "healthCheckService": { + // "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -101399,9 +106238,9 @@ func (c *RegionHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthChe // "type": "string" // } // }, - // "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + // "path": "{project}/regions/{region}/healthCheckServices/{healthCheckService}", // "response": { - // "$ref": "HealthCheck" + // "$ref": "HealthCheckService" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -101412,25 +106251,25 @@ func (c *RegionHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthChe } -// method id "compute.regionHealthChecks.insert": +// method id "compute.regionHealthCheckServices.insert": -type RegionHealthChecksInsertCall struct { - s *Service - project string - region string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionHealthCheckServicesInsertCall struct { + s *Service + project string + region string + healthcheckservice *HealthCheckService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a HealthCheck resource in the specified project using -// the data included in the request. -func (r *RegionHealthChecksService) Insert(project string, region string, healthcheck *HealthCheck) *RegionHealthChecksInsertCall { - c := &RegionHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a regional HealthCheckService resource in the +// specified project and region using the data included in the request. +func (r *RegionHealthCheckServicesService) Insert(project string, region string, healthcheckservice *HealthCheckService) *RegionHealthCheckServicesInsertCall { + c := &RegionHealthCheckServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.healthcheck = healthcheck + c.healthcheckservice = healthcheckservice return c } @@ -101448,7 +106287,7 @@ func (r *RegionHealthChecksService) Insert(project string, region string, health // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionHealthChecksInsertCall) RequestId(requestId string) *RegionHealthChecksInsertCall { +func (c *RegionHealthCheckServicesInsertCall) RequestId(requestId string) *RegionHealthCheckServicesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -101456,7 +106295,7 @@ func (c *RegionHealthChecksInsertCall) RequestId(requestId string) *RegionHealth // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthChecksInsertCall) Fields(s ...googleapi.Field) *RegionHealthChecksInsertCall { +func (c *RegionHealthCheckServicesInsertCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -101464,36 +106303,36 @@ func (c *RegionHealthChecksInsertCall) Fields(s ...googleapi.Field) *RegionHealt // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthChecksInsertCall) Context(ctx context.Context) *RegionHealthChecksInsertCall { +func (c *RegionHealthCheckServicesInsertCall) Context(ctx context.Context) *RegionHealthCheckServicesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthChecksInsertCall) Header() http.Header { +func (c *RegionHealthCheckServicesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthCheckServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheckservice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthCheckServices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -101507,14 +106346,14 @@ func (c *RegionHealthChecksInsertCall) doRequest(alt string) (*http.Response, er return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthChecks.insert" call. +// Do executes the "compute.regionHealthCheckServices.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionHealthCheckServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -101545,9 +106384,9 @@ func (c *RegionHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", + // "description": "Creates a regional HealthCheckService resource in the specified project and region using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.regionHealthChecks.insert", + // "id": "compute.regionHealthCheckServices.insert", // "parameterOrder": [ // "project", // "region" @@ -101573,9 +106412,9 @@ func (c *RegionHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operat // "type": "string" // } // }, - // "path": "{project}/regions/{region}/healthChecks", + // "path": "{project}/regions/{region}/healthCheckServices", // "request": { - // "$ref": "HealthCheck" + // "$ref": "HealthCheckService" // }, // "response": { // "$ref": "Operation" @@ -101588,9 +106427,9 @@ func (c *RegionHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.regionHealthChecks.list": +// method id "compute.regionHealthCheckServices.list": -type RegionHealthChecksListCall struct { +type RegionHealthCheckServicesListCall struct { s *Service project string region string @@ -101600,10 +106439,10 @@ type RegionHealthChecksListCall struct { header_ http.Header } -// List: Retrieves the list of HealthCheck resources available to the -// specified project. -func (r *RegionHealthChecksService) List(project string, region string) *RegionHealthChecksListCall { - c := &RegionHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Lists all the HealthCheckService resources that have been +// configured for the specified project in the given region. +func (r *RegionHealthCheckServicesService) List(project string, region string) *RegionHealthCheckServicesListCall { + c := &RegionHealthCheckServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c @@ -101632,7 +106471,7 @@ func (r *RegionHealthChecksService) List(project string, region string) *RegionH // expressions explicitly. For example: ``` (cpuPlatform = "Intel // Skylake") OR (cpuPlatform = "Intel Broadwell") AND // (scheduling.automaticRestart = true) ``` -func (c *RegionHealthChecksListCall) Filter(filter string) *RegionHealthChecksListCall { +func (c *RegionHealthCheckServicesListCall) Filter(filter string) *RegionHealthCheckServicesListCall { c.urlParams_.Set("filter", filter) return c } @@ -101643,7 +106482,7 @@ func (c *RegionHealthChecksListCall) Filter(filter string) *RegionHealthChecksLi // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *RegionHealthChecksListCall) MaxResults(maxResults int64) *RegionHealthChecksListCall { +func (c *RegionHealthCheckServicesListCall) MaxResults(maxResults int64) *RegionHealthCheckServicesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -101661,7 +106500,7 @@ func (c *RegionHealthChecksListCall) MaxResults(maxResults int64) *RegionHealthC // // Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *RegionHealthChecksListCall) OrderBy(orderBy string) *RegionHealthChecksListCall { +func (c *RegionHealthCheckServicesListCall) OrderBy(orderBy string) *RegionHealthCheckServicesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -101669,15 +106508,24 @@ func (c *RegionHealthChecksListCall) OrderBy(orderBy string) *RegionHealthChecks // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *RegionHealthChecksListCall) PageToken(pageToken string) *RegionHealthChecksListCall { +func (c *RegionHealthCheckServicesListCall) PageToken(pageToken string) *RegionHealthCheckServicesListCall { c.urlParams_.Set("pageToken", pageToken) return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionHealthCheckServicesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionHealthCheckServicesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthChecksListCall) Fields(s ...googleapi.Field) *RegionHealthChecksListCall { +func (c *RegionHealthCheckServicesListCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -101687,7 +106535,7 @@ func (c *RegionHealthChecksListCall) Fields(s ...googleapi.Field) *RegionHealthC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionHealthChecksListCall) IfNoneMatch(entityTag string) *RegionHealthChecksListCall { +func (c *RegionHealthCheckServicesListCall) IfNoneMatch(entityTag string) *RegionHealthCheckServicesListCall { c.ifNoneMatch_ = entityTag return c } @@ -101695,23 +106543,23 @@ func (c *RegionHealthChecksListCall) IfNoneMatch(entityTag string) *RegionHealth // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthChecksListCall) Context(ctx context.Context) *RegionHealthChecksListCall { +func (c *RegionHealthCheckServicesListCall) Context(ctx context.Context) *RegionHealthCheckServicesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthChecksListCall) Header() http.Header { +func (c *RegionHealthCheckServicesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthChecksListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthCheckServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101722,7 +106570,7 @@ func (c *RegionHealthChecksListCall) doRequest(alt string) (*http.Response, erro var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthCheckServices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -101736,14 +106584,14 @@ func (c *RegionHealthChecksListCall) doRequest(alt string) (*http.Response, erro return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthChecks.list" call. -// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HealthCheckList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionHealthCheckServices.list" call. +// Exactly one of *HealthCheckServicesList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HealthCheckServicesList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { +func (c *RegionHealthCheckServicesListCall) Do(opts ...googleapi.CallOption) (*HealthCheckServicesList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -101762,7 +106610,7 @@ func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCh if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HealthCheckList{ + ret := &HealthCheckServicesList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -101774,9 +106622,9 @@ func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCh } return ret, nil // { - // "description": "Retrieves the list of HealthCheck resources available to the specified project.", + // "description": "Lists all the HealthCheckService resources that have been configured for the specified project in the given region.", // "httpMethod": "GET", - // "id": "compute.regionHealthChecks.list", + // "id": "compute.regionHealthCheckServices.list", // "parameterOrder": [ // "project", // "region" @@ -101818,11 +106666,16 @@ func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCh // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "{project}/regions/{region}/healthChecks", + // "path": "{project}/regions/{region}/healthCheckServices", // "response": { - // "$ref": "HealthCheckList" + // "$ref": "HealthCheckServicesList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -101836,7 +106689,7 @@ func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCh // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionHealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { +func (c *RegionHealthCheckServicesListCall) Pages(ctx context.Context, f func(*HealthCheckServicesList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -101854,28 +106707,28 @@ func (c *RegionHealthChecksListCall) Pages(ctx context.Context, f func(*HealthCh } } -// method id "compute.regionHealthChecks.patch": +// method id "compute.regionHealthCheckServices.patch": -type RegionHealthChecksPatchCall struct { - s *Service - project string - region string - healthCheck string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionHealthCheckServicesPatchCall struct { + s *Service + project string + region string + healthCheckService string + healthcheckservice *HealthCheckService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a HealthCheck resource in the specified project using -// the data included in the request. This method supports PATCH +// Patch: Updates the specified regional HealthCheckService resource +// with the data included in the request. This method supports PATCH // semantics and uses the JSON merge patch format and processing rules. -func (r *RegionHealthChecksService) Patch(project string, region string, healthCheck string, healthcheck *HealthCheck) *RegionHealthChecksPatchCall { - c := &RegionHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionHealthCheckServicesService) Patch(project string, region string, healthCheckService string, healthcheckservice *HealthCheckService) *RegionHealthCheckServicesPatchCall { + c := &RegionHealthCheckServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.healthCheck = healthCheck - c.healthcheck = healthcheck + c.healthCheckService = healthCheckService + c.healthcheckservice = healthcheckservice return c } @@ -101893,7 +106746,7 @@ func (r *RegionHealthChecksService) Patch(project string, region string, healthC // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionHealthChecksPatchCall) RequestId(requestId string) *RegionHealthChecksPatchCall { +func (c *RegionHealthCheckServicesPatchCall) RequestId(requestId string) *RegionHealthCheckServicesPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -101901,7 +106754,7 @@ func (c *RegionHealthChecksPatchCall) RequestId(requestId string) *RegionHealthC // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthChecksPatchCall) Fields(s ...googleapi.Field) *RegionHealthChecksPatchCall { +func (c *RegionHealthCheckServicesPatchCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -101909,36 +106762,36 @@ func (c *RegionHealthChecksPatchCall) Fields(s ...googleapi.Field) *RegionHealth // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthChecksPatchCall) Context(ctx context.Context) *RegionHealthChecksPatchCall { +func (c *RegionHealthCheckServicesPatchCall) Context(ctx context.Context) *RegionHealthCheckServicesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthChecksPatchCall) Header() http.Header { +func (c *RegionHealthCheckServicesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthCheckServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheckservice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthCheckServices/{healthCheckService}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { @@ -101946,21 +106799,21 @@ func (c *RegionHealthChecksPatchCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "healthCheck": c.healthCheck, + "project": c.project, + "region": c.region, + "healthCheckService": c.healthCheckService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthChecks.patch" call. +// Do executes the "compute.regionHealthCheckServices.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionHealthCheckServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -101991,19 +106844,18 @@ func (c *RegionHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates the specified regional HealthCheckService resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", // "httpMethod": "PATCH", - // "id": "compute.regionHealthChecks.patch", + // "id": "compute.regionHealthCheckServices.patch", // "parameterOrder": [ // "project", // "region", - // "healthCheck" + // "healthCheckService" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to patch.", + // "healthCheckService": { + // "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -102027,9 +106879,9 @@ func (c *RegionHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // } // }, - // "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + // "path": "{project}/regions/{region}/healthCheckServices/{healthCheckService}", // "request": { - // "$ref": "HealthCheck" + // "$ref": "HealthCheckService" // }, // "response": { // "$ref": "Operation" @@ -102042,27 +106894,24 @@ func (c *RegionHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.regionHealthChecks.update": +// method id "compute.regionHealthChecks.delete": -type RegionHealthChecksUpdateCall struct { +type RegionHealthChecksDeleteCall struct { s *Service project string region string healthCheck string - healthcheck *HealthCheck urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Update: Updates a HealthCheck resource in the specified project using -// the data included in the request. -func (r *RegionHealthChecksService) Update(project string, region string, healthCheck string, healthcheck *HealthCheck) *RegionHealthChecksUpdateCall { - c := &RegionHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified HealthCheck resource. +func (r *RegionHealthChecksService) Delete(project string, region string, healthCheck string) *RegionHealthChecksDeleteCall { + c := &RegionHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.healthCheck = healthCheck - c.healthcheck = healthcheck return c } @@ -102080,7 +106929,7 @@ func (r *RegionHealthChecksService) Update(project string, region string, health // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionHealthChecksUpdateCall) RequestId(requestId string) *RegionHealthChecksUpdateCall { +func (c *RegionHealthChecksDeleteCall) RequestId(requestId string) *RegionHealthChecksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -102088,7 +106937,7 @@ func (c *RegionHealthChecksUpdateCall) RequestId(requestId string) *RegionHealth // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthChecksUpdateCall) Fields(s ...googleapi.Field) *RegionHealthChecksUpdateCall { +func (c *RegionHealthChecksDeleteCall) Fields(s ...googleapi.Field) *RegionHealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -102096,38 +106945,33 @@ func (c *RegionHealthChecksUpdateCall) Fields(s ...googleapi.Field) *RegionHealt // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthChecksUpdateCall) Context(ctx context.Context) *RegionHealthChecksUpdateCall { +func (c *RegionHealthChecksDeleteCall) Context(ctx context.Context) *RegionHealthChecksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthChecksUpdateCall) Header() http.Header { +func (c *RegionHealthChecksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } @@ -102140,14 +106984,14 @@ func (c *RegionHealthChecksUpdateCall) doRequest(alt string) (*http.Response, er return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthChecks.update" call. +// Do executes the "compute.regionHealthChecks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -102178,9 +107022,9 @@ func (c *RegionHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.regionHealthChecks.update", + // "description": "Deletes the specified HealthCheck resource.", + // "httpMethod": "DELETE", + // "id": "compute.regionHealthChecks.delete", // "parameterOrder": [ // "project", // "region", @@ -102188,7 +107032,7 @@ func (c *RegionHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operat // ], // "parameters": { // "healthCheck": { - // "description": "Name of the HealthCheck resource to update.", + // "description": "Name of the HealthCheck resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -102215,9 +107059,6 @@ func (c *RegionHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operat // } // }, // "path": "{project}/regions/{region}/healthChecks/{healthCheck}", - // "request": { - // "$ref": "HealthCheck" - // }, // "response": { // "$ref": "Operation" // }, @@ -102229,127 +107070,100 @@ func (c *RegionHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.regionInstanceGroupManagers.abandonInstances": +// method id "compute.regionHealthChecks.get": -type RegionInstanceGroupManagersAbandonInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionHealthChecksGetCall struct { + s *Service + project string + region string + healthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// AbandonInstances: Flags the specified instances to be immediately -// removed from the managed instance group. Abandoning an instance does -// not delete the instance, but it does remove the instance from any -// target pools that are applied by the managed instance group. This -// method reduces the targetSize of the managed instance group by the -// number of instances that you abandon. This operation is marked as -// DONE when the action is scheduled even if the instances have not yet -// been removed from the group. You must separately verify the status of -// the abandoning action with the listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *RegionInstanceGroupManagersService) AbandonInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest) *RegionInstanceGroupManagersAbandonInstancesCall { - c := &RegionInstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified HealthCheck resource. Gets a list of +// available health checks by making a list() request. +func (r *RegionHealthChecksService) Get(project string, region string, healthCheck string) *RegionHealthChecksGetCall { + c := &RegionHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersabandoninstancesrequest = regioninstancegroupmanagersabandoninstancesrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersAbandonInstancesCall { - c.urlParams_.Set("requestId", requestId) + c.healthCheck = healthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersAbandonInstancesCall { +func (c *RegionHealthChecksGetCall) Fields(s ...googleapi.Field) *RegionHealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionHealthChecksGetCall) IfNoneMatch(entityTag string) *RegionHealthChecksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersAbandonInstancesCall { +func (c *RegionHealthChecksGetCall) Context(ctx context.Context) *RegionHealthChecksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Header() http.Header { +func (c *RegionHealthChecksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersabandoninstancesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.abandonInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.regionHealthChecks.get" call. +// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *HealthCheck.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -102368,7 +107182,7 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &HealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -102380,18 +107194,19 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Flags the specified instances to be immediately removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.abandonInstances", + // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.regionHealthChecks.get", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "healthCheck" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to return.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -102405,58 +107220,69 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", - // "request": { - // "$ref": "RegionInstanceGroupManagersAbandonInstancesRequest" - // }, + // "path": "{project}/regions/{region}/healthChecks/{healthCheck}", // "response": { - // "$ref": "Operation" + // "$ref": "HealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.applyUpdatesToInstances": +// method id "compute.regionHealthChecks.insert": -type RegionInstanceGroupManagersApplyUpdatesToInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersapplyupdatesrequest *RegionInstanceGroupManagersApplyUpdatesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionHealthChecksInsertCall struct { + s *Service + project string + region string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ApplyUpdatesToInstances: Apply updates to selected instances the -// managed instance group. -func (r *RegionInstanceGroupManagersService) ApplyUpdatesToInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersapplyupdatesrequest *RegionInstanceGroupManagersApplyUpdatesRequest) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { - c := &RegionInstanceGroupManagersApplyUpdatesToInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a HealthCheck resource in the specified project using +// the data included in the request. +func (r *RegionHealthChecksService) Insert(project string, region string, healthcheck *HealthCheck) *RegionHealthChecksInsertCall { + c := &RegionHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersapplyupdatesrequest = regioninstancegroupmanagersapplyupdatesrequest + c.healthcheck = healthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionHealthChecksInsertCall) RequestId(requestId string) *RegionHealthChecksInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { +func (c *RegionHealthChecksInsertCall) Fields(s ...googleapi.Field) *RegionHealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -102464,36 +107290,36 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Fields(s ...goo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { +func (c *RegionHealthChecksInsertCall) Context(ctx context.Context) *RegionHealthChecksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Header() http.Header { +func (c *RegionHealthChecksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersapplyupdatesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -102501,21 +107327,1035 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt s } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.applyUpdatesToInstances" call. +// Do executes the "compute.regionHealthChecks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.regionHealthChecks.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/healthChecks", + // "request": { + // "$ref": "HealthCheck" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionHealthChecks.list": + +type RegionHealthChecksListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of HealthCheck resources available to the +// specified project. +func (r *RegionHealthChecksService) List(project string, region string) *RegionHealthChecksListCall { + c := &RegionHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *RegionHealthChecksListCall) Filter(filter string) *RegionHealthChecksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionHealthChecksListCall) MaxResults(maxResults int64) *RegionHealthChecksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *RegionHealthChecksListCall) OrderBy(orderBy string) *RegionHealthChecksListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionHealthChecksListCall) PageToken(pageToken string) *RegionHealthChecksListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionHealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionHealthChecksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionHealthChecksListCall) Fields(s ...googleapi.Field) *RegionHealthChecksListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionHealthChecksListCall) IfNoneMatch(entityTag string) *RegionHealthChecksListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionHealthChecksListCall) Context(ctx context.Context) *RegionHealthChecksListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionHealthChecksListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionHealthChecksListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionHealthChecks.list" call. +// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HealthCheckList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &HealthCheckList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of HealthCheck resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.regionHealthChecks.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "{project}/regions/{region}/healthChecks", + // "response": { + // "$ref": "HealthCheckList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionHealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.regionHealthChecks.patch": + +type RegionHealthChecksPatchCall struct { + s *Service + project string + region string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a HealthCheck resource in the specified project using +// the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. +func (r *RegionHealthChecksService) Patch(project string, region string, healthCheck string, healthcheck *HealthCheck) *RegionHealthChecksPatchCall { + c := &RegionHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.healthCheck = healthCheck + c.healthcheck = healthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionHealthChecksPatchCall) RequestId(requestId string) *RegionHealthChecksPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionHealthChecksPatchCall) Fields(s ...googleapi.Field) *RegionHealthChecksPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionHealthChecksPatchCall) Context(ctx context.Context) *RegionHealthChecksPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionHealthChecksPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "healthCheck": c.healthCheck, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionHealthChecks.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.regionHealthChecks.patch", + // "parameterOrder": [ + // "project", + // "region", + // "healthCheck" + // ], + // "parameters": { + // "healthCheck": { + // "description": "Name of the HealthCheck resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + // "request": { + // "$ref": "HealthCheck" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionHealthChecks.update": + +type RegionHealthChecksUpdateCall struct { + s *Service + project string + region string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates a HealthCheck resource in the specified project using +// the data included in the request. +func (r *RegionHealthChecksService) Update(project string, region string, healthCheck string, healthcheck *HealthCheck) *RegionHealthChecksUpdateCall { + c := &RegionHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.healthCheck = healthCheck + c.healthcheck = healthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionHealthChecksUpdateCall) RequestId(requestId string) *RegionHealthChecksUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionHealthChecksUpdateCall) Fields(s ...googleapi.Field) *RegionHealthChecksUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionHealthChecksUpdateCall) Context(ctx context.Context) *RegionHealthChecksUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionHealthChecksUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "healthCheck": c.healthCheck, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionHealthChecks.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.regionHealthChecks.update", + // "parameterOrder": [ + // "project", + // "region", + // "healthCheck" + // ], + // "parameters": { + // "healthCheck": { + // "description": "Name of the HealthCheck resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + // "request": { + // "$ref": "HealthCheck" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionInstanceGroupManagers.abandonInstances": + +type RegionInstanceGroupManagersAbandonInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AbandonInstances: Flags the specified instances to be immediately +// removed from the managed instance group. Abandoning an instance does +// not delete the instance, but it does remove the instance from any +// target pools that are applied by the managed instance group. This +// method reduces the targetSize of the managed instance group by the +// number of instances that you abandon. This operation is marked as +// DONE when the action is scheduled even if the instances have not yet +// been removed from the group. You must separately verify the status of +// the abandoning action with the listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *RegionInstanceGroupManagersService) AbandonInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest) *RegionInstanceGroupManagersAbandonInstancesCall { + c := &RegionInstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersabandoninstancesrequest = regioninstancegroupmanagersabandoninstancesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersAbandonInstancesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersAbandonInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersAbandonInstancesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersabandoninstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroupManagers.abandonInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Flags the specified instances to be immediately removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.abandonInstances", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + // "request": { + // "$ref": "RegionInstanceGroupManagersAbandonInstancesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionInstanceGroupManagers.applyUpdatesToInstances": + +type RegionInstanceGroupManagersApplyUpdatesToInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersapplyupdatesrequest *RegionInstanceGroupManagersApplyUpdatesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// ApplyUpdatesToInstances: Apply updates to selected instances the +// managed instance group. +func (r *RegionInstanceGroupManagersService) ApplyUpdatesToInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersapplyupdatesrequest *RegionInstanceGroupManagersApplyUpdatesRequest) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { + c := &RegionInstanceGroupManagersApplyUpdatesToInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersapplyupdatesrequest = regioninstancegroupmanagersapplyupdatesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersapplyupdatesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroupManagers.applyUpdatesToInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -102556,7 +108396,2392 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...goog // ], // "parameters": { // "instanceGroupManager": { - // "description": "The name of the managed instance group, should conform to RFC1035.", + // "description": "The name of the managed instance group, should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request, should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", + // "request": { + // "$ref": "RegionInstanceGroupManagersApplyUpdatesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionInstanceGroupManagers.createInstances": + +type RegionInstanceGroupManagersCreateInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerscreateinstancesrequest *RegionInstanceGroupManagersCreateInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// CreateInstances: Creates instances with per-instance configs in this +// regional managed instance group. Instances are created using the +// current instance template. The create instances operation is marked +// DONE if the createInstances request is successful. The underlying +// actions take additional time. You must separately verify the status +// of the creating or actions with the listmanagedinstances method. +func (r *RegionInstanceGroupManagersService) CreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagerscreateinstancesrequest *RegionInstanceGroupManagersCreateInstancesRequest) *RegionInstanceGroupManagersCreateInstancesCall { + c := &RegionInstanceGroupManagersCreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerscreateinstancesrequest = regioninstancegroupmanagerscreateinstancesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersCreateInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersCreateInstancesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupManagersCreateInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersCreateInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupManagersCreateInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersCreateInstancesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersCreateInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersCreateInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerscreateinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/createInstances") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroupManagers.createInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates instances with per-instance configs in this regional managed instance group. Instances are created using the current instance template. The create instances operation is marked DONE if the createInstances request is successful. The underlying actions take additional time. You must separately verify the status of the creating or actions with the listmanagedinstances method.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.createInstances", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region where the managed instance group is located. It should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/createInstances", + // "request": { + // "$ref": "RegionInstanceGroupManagersCreateInstancesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionInstanceGroupManagers.delete": + +type RegionInstanceGroupManagersDeleteCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified managed instance group and all of the +// instances in that group. +func (r *RegionInstanceGroupManagersService) Delete(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersDeleteCall { + c := &RegionInstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersDeleteCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupManagersDeleteCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroupManagers.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified managed instance group and all of the instances in that group.", + // "httpMethod": "DELETE", + // "id": "compute.regionInstanceGroupManagers.delete", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "Name of the managed instance group to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionInstanceGroupManagers.deleteInstances": + +type RegionInstanceGroupManagersDeleteInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DeleteInstances: Flags the specified instances in the managed +// instance group to be immediately deleted. The instances are also +// removed from any target pools of which they were a member. This +// method reduces the targetSize of the managed instance group by the +// number of instances that you delete. The deleteInstances operation is +// marked DONE if the deleteInstances request is successful. The +// underlying actions take additional time. You must separately verify +// the status of the deleting action with the listmanagedinstances +// method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *RegionInstanceGroupManagersService) DeleteInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest) *RegionInstanceGroupManagersDeleteInstancesCall { + c := &RegionInstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersdeleteinstancesrequest = regioninstancegroupmanagersdeleteinstancesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteInstancesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteInstancesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersdeleteinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroupManagers.deleteInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Flags the specified instances in the managed instance group to be immediately deleted. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. The deleteInstances operation is marked DONE if the deleteInstances request is successful. The underlying actions take additional time. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.deleteInstances", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + // "request": { + // "$ref": "RegionInstanceGroupManagersDeleteInstancesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionInstanceGroupManagers.deletePerInstanceConfigs": + +type RegionInstanceGroupManagersDeletePerInstanceConfigsCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerdeleteinstanceconfigreq *RegionInstanceGroupManagerDeleteInstanceConfigReq + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DeletePerInstanceConfigs: Deletes selected per-instance configs for +// the managed instance group. +func (r *RegionInstanceGroupManagersService) DeletePerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerdeleteinstanceconfigreq *RegionInstanceGroupManagerDeleteInstanceConfigReq) *RegionInstanceGroupManagersDeletePerInstanceConfigsCall { + c := &RegionInstanceGroupManagersDeletePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerdeleteinstanceconfigreq = regioninstancegroupmanagerdeleteinstanceconfigreq + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeletePerInstanceConfigsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeletePerInstanceConfigsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerdeleteinstanceconfigreq) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroupManagers.deletePerInstanceConfigs" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes selected per-instance configs for the managed instance group.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.deletePerInstanceConfigs", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request, should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", + // "request": { + // "$ref": "RegionInstanceGroupManagerDeleteInstanceConfigReq" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionInstanceGroupManagers.get": + +type RegionInstanceGroupManagersGetCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns all of the details about the specified managed instance +// group. +func (r *RegionInstanceGroupManagersService) Get(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersGetCall { + c := &RegionInstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionInstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupManagersGetCall) Context(ctx context.Context) *RegionInstanceGroupManagersGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroupManagers.get" call. +// Exactly one of *InstanceGroupManager or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceGroupManager.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroupManager{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns all of the details about the specified managed instance group.", + // "httpMethod": "GET", + // "id": "compute.regionInstanceGroupManagers.get", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "Name of the managed instance group to return.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + // "response": { + // "$ref": "InstanceGroupManager" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionInstanceGroupManagers.insert": + +type RegionInstanceGroupManagersInsertCall struct { + s *Service + project string + region string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a managed instance group using the information that +// you specify in the request. After the group is created, instances in +// the group are created using the specified instance template. This +// operation is marked as DONE when the group is created even if the +// instances in the group have not yet been created. You must separately +// verify the status of the individual instances with the +// listmanagedinstances method. +// +// A regional managed instance group can contain up to 2000 instances. +func (r *RegionInstanceGroupManagersService) Insert(project string, region string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersInsertCall { + c := &RegionInstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instancegroupmanager = instancegroupmanager + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersInsertCall) RequestId(requestId string) *RegionInstanceGroupManagersInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupManagersInsertCall) Context(ctx context.Context) *RegionInstanceGroupManagersInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroupManagers.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA regional managed instance group can contain up to 2000 instances.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/instanceGroupManagers", + // "request": { + // "$ref": "InstanceGroupManager" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionInstanceGroupManagers.list": + +type RegionInstanceGroupManagersListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of managed instance groups that are +// contained within the specified region. +func (r *RegionInstanceGroupManagersService) List(project string, region string) *RegionInstanceGroupManagersListCall { + c := &RegionInstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *RegionInstanceGroupManagersListCall) Filter(filter string) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionInstanceGroupManagersListCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *RegionInstanceGroupManagersListCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionInstanceGroupManagersListCall) PageToken(pageToken string) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionInstanceGroupManagersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupManagersListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionInstanceGroupManagersListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupManagersListCall) Context(ctx context.Context) *RegionInstanceGroupManagersListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroupManagers.list" call. +// Exactly one of *RegionInstanceGroupManagerList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *RegionInstanceGroupManagerList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagerList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &RegionInstanceGroupManagerList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of managed instance groups that are contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.regionInstanceGroupManagers.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "{project}/regions/{region}/instanceGroupManagers", + // "response": { + // "$ref": "RegionInstanceGroupManagerList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupManagersListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagerList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.regionInstanceGroupManagers.listErrors": + +type RegionInstanceGroupManagersListErrorsCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// ListErrors: Lists all errors thrown by actions on instances for a +// given regional managed instance group. The filter and orderBy query +// parameters are not supported. +func (r *RegionInstanceGroupManagersService) ListErrors(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListErrorsCall { + c := &RegionInstanceGroupManagersListErrorsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *RegionInstanceGroupManagersListErrorsCall) Filter(filter string) *RegionInstanceGroupManagersListErrorsCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionInstanceGroupManagersListErrorsCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListErrorsCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *RegionInstanceGroupManagersListErrorsCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListErrorsCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionInstanceGroupManagersListErrorsCall) PageToken(pageToken string) *RegionInstanceGroupManagersListErrorsCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionInstanceGroupManagersListErrorsCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupManagersListErrorsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupManagersListErrorsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListErrorsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionInstanceGroupManagersListErrorsCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersListErrorsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupManagersListErrorsCall) Context(ctx context.Context) *RegionInstanceGroupManagersListErrorsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersListErrorsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersListErrorsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listErrors") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroupManagers.listErrors" call. +// Exactly one of *RegionInstanceGroupManagersListErrorsResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *RegionInstanceGroupManagersListErrorsResponse.ServerResponse.Header +// or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionInstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListErrorsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &RegionInstanceGroupManagersListErrorsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all errors thrown by actions on instances for a given regional managed instance group. The filter and orderBy query parameters are not supported.", + // "httpMethod": "GET", + // "id": "compute.regionInstanceGroupManagers.listErrors", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroupManager" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It must be a string that meets the requirements in RFC1035, or an unsigned long integer: must match regexp pattern: (?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)|[1-9][0-9]{0,19}.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request. This should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listErrors", + // "response": { + // "$ref": "RegionInstanceGroupManagersListErrorsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupManagersListErrorsCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListErrorsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.regionInstanceGroupManagers.listManagedInstances": + +type RegionInstanceGroupManagersListManagedInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// ListManagedInstances: Lists the instances in the managed instance +// group and instances that are scheduled to be created. The list +// includes any current actions that the group has scheduled for its +// instances. The orderBy query parameter is not supported. +func (r *RegionInstanceGroupManagersService) ListManagedInstances(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListManagedInstancesCall { + c := &RegionInstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Filter(filter string) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionInstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersListManagedInstancesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroupManagers.listManagedInstances" call. +// Exactly one of *RegionInstanceGroupManagersListInstancesResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *RegionInstanceGroupManagersListInstancesResponse.ServerResponse.Heade +// r or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListInstancesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &RegionInstanceGroupManagersListInstancesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.listManagedInstances", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroupManager" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + // "response": { + // "$ref": "RegionInstanceGroupManagersListInstancesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListInstancesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.regionInstanceGroupManagers.listPerInstanceConfigs": + +type RegionInstanceGroupManagersListPerInstanceConfigsCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// ListPerInstanceConfigs: Lists all of the per-instance configs defined +// for the managed instance group. The orderBy query parameter is not +// supported. +func (r *RegionInstanceGroupManagersService) ListPerInstanceConfigs(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { + c := &RegionInstanceGroupManagersListPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Filter(filter string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListPerInstanceConfigsCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) PageToken(pageToken string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupManagersListPerInstanceConfigsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListPerInstanceConfigsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersListPerInstanceConfigsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroupManagers.listPerInstanceConfigs" call. +// Exactly one of *RegionInstanceGroupManagersListInstanceConfigsResp or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *RegionInstanceGroupManagersListInstanceConfigsResp.ServerResponse.Hea +// der or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListInstanceConfigsResp, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &RegionInstanceGroupManagersListInstanceConfigsResp{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all of the per-instance configs defined for the managed instance group. The orderBy query parameter is not supported.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.listPerInstanceConfigs", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroupManager" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request, should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", + // "response": { + // "$ref": "RegionInstanceGroupManagersListInstanceConfigsResp" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListInstanceConfigsResp) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.regionInstanceGroupManagers.patch": + +type RegionInstanceGroupManagersPatchCall struct { + s *Service + project string + region string + instanceGroupManager string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a managed instance group using the information that +// you specify in the request. This operation is marked as DONE when the +// group is patched even if the instances in the group are still in the +// process of being patched. You must separately verify the status of +// the individual instances with the listmanagedinstances method. This +// method supports PATCH semantics and uses the JSON merge patch format +// and processing rules. +func (r *RegionInstanceGroupManagersService) Patch(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersPatchCall { + c := &RegionInstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanager = instancegroupmanager + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersPatchCall) RequestId(requestId string) *RegionInstanceGroupManagersPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupManagersPatchCall) Context(ctx context.Context) *RegionInstanceGroupManagersPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroupManagers.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.regionInstanceGroupManagers.patch", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the instance group manager.", // "location": "path", // "required": true, // "type": "string" @@ -102569,15 +110794,20 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...goog // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request, should conform to RFC1035.", + // "description": "Name of the region scoping this request.", // "location": "path", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", // "request": { - // "$ref": "RegionInstanceGroupManagersApplyUpdatesRequest" + // "$ref": "InstanceGroupManager" // }, // "response": { // "$ref": "Operation" @@ -102590,31 +110820,29 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...goog } -// method id "compute.regionInstanceGroupManagers.createInstances": +// method id "compute.regionInstanceGroupManagers.patchPerInstanceConfigs": -type RegionInstanceGroupManagersCreateInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerscreateinstancesrequest *RegionInstanceGroupManagersCreateInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersPatchPerInstanceConfigsCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerpatchinstanceconfigreq *RegionInstanceGroupManagerPatchInstanceConfigReq + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// CreateInstances: Creates instances with per-instance configs in this -// regional managed instance group. Instances are created using the -// current instance template. The create instances operation is marked -// DONE if the createInstances request is successful. The underlying -// actions take additional time. You must separately verify the status -// of the creating or actions with the listmanagedinstances method. -func (r *RegionInstanceGroupManagersService) CreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagerscreateinstancesrequest *RegionInstanceGroupManagersCreateInstancesRequest) *RegionInstanceGroupManagersCreateInstancesCall { - c := &RegionInstanceGroupManagersCreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// PatchPerInstanceConfigs: Insert or patch (for the ones that already +// exist) per-instance configs for the managed instance group. +// perInstanceConfig.instance serves as a key used to distinguish +// whether to perform insert or patch. +func (r *RegionInstanceGroupManagersService) PatchPerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerpatchinstanceconfigreq *RegionInstanceGroupManagerPatchInstanceConfigReq) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { + c := &RegionInstanceGroupManagersPatchPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerscreateinstancesrequest = regioninstancegroupmanagerscreateinstancesrequest + c.regioninstancegroupmanagerpatchinstanceconfigreq = regioninstancegroupmanagerpatchinstanceconfigreq return c } @@ -102627,11 +110855,12 @@ func (r *RegionInstanceGroupManagersService) CreateInstances(project string, reg // and the request times out. If you make the request again with the // same request ID, the server can check if original operation with the // same request ID was received, and if so, will ignore the second -// request. +// request. This prevents clients from accidentally creating duplicate +// commitments. // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersCreateInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersCreateInstancesCall { +func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) RequestId(requestId string) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -102639,7 +110868,7 @@ func (c *RegionInstanceGroupManagersCreateInstancesCall) RequestId(requestId str // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersCreateInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersCreateInstancesCall { +func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -102647,36 +110876,36 @@ func (c *RegionInstanceGroupManagersCreateInstancesCall) Fields(s ...googleapi.F // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersCreateInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersCreateInstancesCall { +func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersCreateInstancesCall) Header() http.Header { +func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersCreateInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerscreateinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerpatchinstanceconfigreq) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/createInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -102691,14 +110920,14 @@ func (c *RegionInstanceGroupManagersCreateInstancesCall) doRequest(alt string) ( return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.createInstances" call. +// Do executes the "compute.regionInstanceGroupManagers.patchPerInstanceConfigs" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -102729,9 +110958,9 @@ func (c *RegionInstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Creates instances with per-instance configs in this regional managed instance group. Instances are created using the current instance template. The create instances operation is marked DONE if the createInstances request is successful. The underlying actions take additional time. You must separately verify the status of the creating or actions with the listmanagedinstances method.", + // "description": "Insert or patch (for the ones that already exist) per-instance configs for the managed instance group. perInstanceConfig.instance serves as a key used to distinguish whether to perform insert or patch.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.createInstances", + // "id": "compute.regionInstanceGroupManagers.patchPerInstanceConfigs", // "parameterOrder": [ // "project", // "region", @@ -102752,20 +110981,20 @@ func (c *RegionInstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.Ca // "type": "string" // }, // "region": { - // "description": "The name of the region where the managed instance group is located. It should conform to RFC1035.", + // "description": "Name of the region scoping this request, should conform to RFC1035.", // "location": "path", // "required": true, // "type": "string" // }, // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/createInstances", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", // "request": { - // "$ref": "RegionInstanceGroupManagersCreateInstancesRequest" + // "$ref": "RegionInstanceGroupManagerPatchInstanceConfigReq" // }, // "response": { // "$ref": "Operation" @@ -102778,25 +111007,40 @@ func (c *RegionInstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.Ca } -// method id "compute.regionInstanceGroupManagers.delete": +// method id "compute.regionInstanceGroupManagers.recreateInstances": -type RegionInstanceGroupManagersDeleteCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersRecreateInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified managed instance group and all of the -// instances in that group. -func (r *RegionInstanceGroupManagersService) Delete(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersDeleteCall { - c := &RegionInstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RecreateInstances: Flags the specified instances in the managed +// instance group to be immediately recreated. The instances are deleted +// and recreated using the current instance template for the managed +// instance group. This operation is marked as DONE when the flag is set +// even if the instances have not yet been recreated. You must +// separately verify the status of the recreating action with the +// listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest) *RegionInstanceGroupManagersRecreateInstancesCall { + c := &RegionInstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersrecreaterequest = regioninstancegroupmanagersrecreaterequest return c } @@ -102814,7 +111058,7 @@ func (r *RegionInstanceGroupManagersService) Delete(project string, region strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersDeleteCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteCall { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersRecreateInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -102822,7 +111066,7 @@ func (c *RegionInstanceGroupManagersDeleteCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteCall { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersRecreateInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -102830,33 +111074,38 @@ func (c *RegionInstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersDeleteCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteCall { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersRecreateInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersDeleteCall) Header() http.Header { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersrecreaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -102869,14 +111118,14 @@ func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Res return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.delete" call. +// Do executes the "compute.regionInstanceGroupManagers.recreateInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -102907,9 +111156,9 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes the specified managed instance group and all of the instances in that group.", - // "httpMethod": "DELETE", - // "id": "compute.regionInstanceGroupManagers.delete", + // "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.recreateInstances", // "parameterOrder": [ // "project", // "region", @@ -102917,7 +111166,7 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "instanceGroupManager": { - // "description": "Name of the managed instance group to delete.", + // "description": "Name of the managed instance group.", // "location": "path", // "required": true, // "type": "string" @@ -102941,7 +111190,10 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + // "request": { + // "$ref": "RegionInstanceGroupManagersRecreateRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -102953,42 +111205,37 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionInstanceGroupManagers.deleteInstances": +// method id "compute.regionInstanceGroupManagers.resize": -type RegionInstanceGroupManagersDeleteInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersResizeCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeleteInstances: Flags the specified instances in the managed -// instance group to be immediately deleted. The instances are also -// removed from any target pools of which they were a member. This -// method reduces the targetSize of the managed instance group by the -// number of instances that you delete. The deleteInstances operation is -// marked DONE if the deleteInstances request is successful. The -// underlying actions take additional time. You must separately verify -// the status of the deleting action with the listmanagedinstances -// method. +// Resize: Changes the intended size of the managed instance group. If +// you increase the size, the group creates new instances using the +// current instance template. If you decrease the size, the group +// deletes one or more instances. +// +// The resize operation is marked DONE if the resize request is +// successful. The underlying actions take additional time. You must +// separately verify the status of the creating or deleting actions with +// the listmanagedinstances method. // // If the group is part of a backend service that has enabled connection // draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *RegionInstanceGroupManagersService) DeleteInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest) *RegionInstanceGroupManagersDeleteInstancesCall { - c := &RegionInstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// duration has elapsed before the VM instance is removed or deleted. +func (r *RegionInstanceGroupManagersService) Resize(project string, region string, instanceGroupManager string, size int64) *RegionInstanceGroupManagersResizeCall { + c := &RegionInstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersdeleteinstancesrequest = regioninstancegroupmanagersdeleteinstancesrequest + c.urlParams_.Set("size", fmt.Sprint(size)) return c } @@ -103006,7 +111253,7 @@ func (r *RegionInstanceGroupManagersService) DeleteInstances(project string, reg // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteInstancesCall { +func (c *RegionInstanceGroupManagersResizeCall) RequestId(requestId string) *RegionInstanceGroupManagersResizeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -103014,7 +111261,7 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) RequestId(requestId str // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteInstancesCall { +func (c *RegionInstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersResizeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -103022,36 +111269,31 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.F // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteInstancesCall { +func (c *RegionInstanceGroupManagersResizeCall) Context(ctx context.Context) *RegionInstanceGroupManagersResizeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Header() http.Header { +func (c *RegionInstanceGroupManagersResizeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersdeleteinstancesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -103066,14 +111308,14 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) ( return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.deleteInstances" call. +// Do executes the "compute.regionInstanceGroupManagers.resize" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -103104,13 +111346,14 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Flags the specified instances in the managed instance group to be immediately deleted. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. The deleteInstances operation is marked DONE if the deleteInstances request is successful. The underlying actions take additional time. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Changes the intended size of the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances.\n\nThe resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.deleteInstances", + // "id": "compute.regionInstanceGroupManagers.resize", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "instanceGroupManager", + // "size" // ], // "parameters": { // "instanceGroupManager": { @@ -103136,12 +111379,17 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "size": { + // "description": "Number of instances that should exist in this instance group manager.", + // "format": "int32", + // "location": "query", + // "minimum": "0", + // "required": true, + // "type": "integer" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", - // "request": { - // "$ref": "RegionInstanceGroupManagersDeleteInstancesRequest" - // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", // "response": { // "$ref": "Operation" // }, @@ -103153,80 +111401,93 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca } -// method id "compute.regionInstanceGroupManagers.get": +// method id "compute.regionInstanceGroupManagers.setInstanceTemplate": -type RegionInstanceGroupManagersGetCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersSetInstanceTemplateCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns all of the details about the specified managed instance -// group. -func (r *RegionInstanceGroupManagersService) Get(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersGetCall { - c := &RegionInstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetInstanceTemplate: Sets the instance template to use when creating +// new instances or recreating instances in this group. Existing +// instances are not affected. +func (r *RegionInstanceGroupManagersService) SetInstanceTemplate(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest) *RegionInstanceGroupManagersSetInstanceTemplateCall { + c := &RegionInstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerssettemplaterequest = regioninstancegroupmanagerssettemplaterequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *RegionInstanceGroupManagersSetInstanceTemplateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersGetCall { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetInstanceTemplateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersGetCall) Context(ctx context.Context) *RegionInstanceGroupManagersGetCall { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetInstanceTemplateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersGetCall) Header() http.Header { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettemplaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -103239,14 +111500,14 @@ func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Respon return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.get" call. -// Exactly one of *InstanceGroupManager or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceGroupManager.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { +// Do executes the "compute.regionInstanceGroupManagers.setInstanceTemplate" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -103265,7 +111526,7 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManager{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -103277,9 +111538,9 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns all of the details about the specified managed instance group.", - // "httpMethod": "GET", - // "id": "compute.regionInstanceGroupManagers.get", + // "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", // "parameterOrder": [ // "project", // "region", @@ -103287,7 +111548,7 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "instanceGroupManager": { - // "description": "Name of the managed instance group to return.", + // "description": "The name of the managed instance group.", // "location": "path", // "required": true, // "type": "string" @@ -103304,47 +111565,50 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* // "location": "path", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "request": { + // "$ref": "RegionInstanceGroupManagersSetTemplateRequest" + // }, // "response": { - // "$ref": "InstanceGroupManager" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionInstanceGroupManagers.insert": +// method id "compute.regionInstanceGroupManagers.setTargetPools": -type RegionInstanceGroupManagersInsertCall struct { - s *Service - project string - region string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersSetTargetPoolsCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a managed instance group using the information that -// you specify in the request. After the group is created, instances in -// the group are created using the specified instance template. This -// operation is marked as DONE when the group is created even if the -// instances in the group have not yet been created. You must separately -// verify the status of the individual instances with the -// listmanagedinstances method. -// -// A regional managed instance group can contain up to 2000 instances. -func (r *RegionInstanceGroupManagersService) Insert(project string, region string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersInsertCall { - c := &RegionInstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTargetPools: Modifies the target pools to which all new instances +// in this group are assigned. Existing instances in the group are not +// affected. +func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest) *RegionInstanceGroupManagersSetTargetPoolsCall { + c := &RegionInstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instancegroupmanager = instancegroupmanager + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerssettargetpoolsrequest = regioninstancegroupmanagerssettargetpoolsrequest return c } @@ -103362,7 +111626,7 @@ func (r *RegionInstanceGroupManagersService) Insert(project string, region strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersInsertCall) RequestId(requestId string) *RegionInstanceGroupManagersInsertCall { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *RegionInstanceGroupManagersSetTargetPoolsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -103370,7 +111634,7 @@ func (c *RegionInstanceGroupManagersInsertCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersInsertCall { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetTargetPoolsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -103378,36 +111642,36 @@ func (c *RegionInstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersInsertCall) Context(ctx context.Context) *RegionInstanceGroupManagersInsertCall { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetTargetPoolsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersInsertCall) Header() http.Header { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettargetpoolsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -103415,20 +111679,21 @@ func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Res } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.insert" call. +// Do executes the "compute.regionInstanceGroupManagers.setTargetPools" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -103459,14 +111724,21 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA regional managed instance group can contain up to 2000 instances.", + // "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.insert", + // "id": "compute.regionInstanceGroupManagers.setTargetPools", // "parameterOrder": [ // "project", - // "region" + // "region", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -103486,9 +111758,9 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", // "request": { - // "$ref": "InstanceGroupManager" + // "$ref": "RegionInstanceGroupManagersSetTargetPoolsRequest" // }, // "response": { // "$ref": "Operation" @@ -103501,96 +111773,219 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionInstanceGroupManagers.list": +// method id "compute.regionInstanceGroupManagers.updatePerInstanceConfigs": -type RegionInstanceGroupManagersListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersUpdatePerInstanceConfigsCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerupdateinstanceconfigreq *RegionInstanceGroupManagerUpdateInstanceConfigReq + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of managed instance groups that are -// contained within the specified region. -func (r *RegionInstanceGroupManagersService) List(project string, region string) *RegionInstanceGroupManagersListCall { - c := &RegionInstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdatePerInstanceConfigs: Insert or update (for the ones that already +// exist) per-instance configs for the managed instance group. +// perInstanceConfig.instance serves as a key used to distinguish +// whether to perform insert or patch. +func (r *RegionInstanceGroupManagersService) UpdatePerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerupdateinstanceconfigreq *RegionInstanceGroupManagerUpdateInstanceConfigReq) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { + c := &RegionInstanceGroupManagersUpdatePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerupdateinstanceconfigreq = regioninstancegroupmanagerupdateinstanceconfigreq return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either `=`, `!=`, `>`, or -// `<`. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also filter nested fields. For example, you could specify -// `scheduling.automaticRestart = false` to include instances only if -// they are not scheduled for automatic restarts. You can use filtering -// on nested fields to filter based on resource labels. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example: ``` (scheduling.automaticRestart = -// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression -// is an `AND` expression. However, you can include `AND` and `OR` -// expressions explicitly. For example: ``` (cpuPlatform = "Intel -// Skylake") OR (cpuPlatform = "Intel Broadwell") AND -// (scheduling.automaticRestart = true) ``` -func (c *RegionInstanceGroupManagersListCall) Filter(filter string) *RegionInstanceGroupManagersListCall { - c.urlParams_.Set("filter", filter) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) RequestId(requestId string) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { + c.urlParams_.Set("requestId", requestId) return c } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *RegionInstanceGroupManagersListCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using `orderBy="creationTimestamp desc". This sorts -// results based on the `creationTimestamp` field in reverse -// chronological order (newest result first). Use this to sort resources -// like operations so that the newest operation is returned -// first. -// -// Currently, only sorting by `name` or `creationTimestamp desc` is -// supported. -func (c *RegionInstanceGroupManagersListCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListCall { - c.urlParams_.Set("orderBy", orderBy) +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { + c.ctx_ = ctx return c } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *RegionInstanceGroupManagersListCall) PageToken(pageToken string) *RegionInstanceGroupManagersListCall { - c.urlParams_.Set("pageToken", pageToken) +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerupdateinstanceconfigreq) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroupManagers.updatePerInstanceConfigs" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Insert or update (for the ones that already exist) per-instance configs for the managed instance group. perInstanceConfig.instance serves as a key used to distinguish whether to perform insert or patch.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.updatePerInstanceConfigs", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request, should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", + // "request": { + // "$ref": "RegionInstanceGroupManagerUpdateInstanceConfigReq" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionInstanceGroups.get": + +type RegionInstanceGroupsGetCall struct { + s *Service + project string + region string + instanceGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified instance group resource. +func (r *RegionInstanceGroupsService) Get(project string, region string, instanceGroup string) *RegionInstanceGroupsGetCall { + c := &RegionInstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroup = instanceGroup return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListCall { +func (c *RegionInstanceGroupsGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -103600,7 +111995,7 @@ func (c *RegionInstanceGroupManagersListCall) Fields(s ...googleapi.Field) *Regi // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupManagersListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersListCall { +func (c *RegionInstanceGroupsGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -103608,23 +112003,23 @@ func (c *RegionInstanceGroupManagersListCall) IfNoneMatch(entityTag string) *Reg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersListCall) Context(ctx context.Context) *RegionInstanceGroupManagersListCall { +func (c *RegionInstanceGroupsGetCall) Context(ctx context.Context) *RegionInstanceGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersListCall) Header() http.Header { +func (c *RegionInstanceGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103635,7 +112030,7 @@ func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Respo var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -103643,20 +112038,21 @@ func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Respo } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.list" call. -// Exactly one of *RegionInstanceGroupManagerList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *RegionInstanceGroupManagerList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionInstanceGroups.get" call. +// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *InstanceGroup.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagerList, error) { +func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -103675,7 +112071,7 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionInstanceGroupManagerList{ + ret := &InstanceGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -103687,35 +112083,19 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Retrieves the list of managed instance groups that are contained within the specified region.", + // "description": "Returns the specified instance group resource.", // "httpMethod": "GET", - // "id": "compute.regionInstanceGroupManagers.list", + // "id": "compute.regionInstanceGroups.get", // "parameterOrder": [ // "project", - // "region" + // "region", + // "instanceGroup" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", + // "instanceGroup": { + // "description": "Name of the instance group resource to return.", + // "location": "path", + // "required": true, // "type": "string" // }, // "project": { @@ -103732,9 +112112,9 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers", + // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}", // "response": { - // "$ref": "RegionInstanceGroupManagerList" + // "$ref": "InstanceGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -103745,47 +112125,24 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupManagersListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagerList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionInstanceGroupManagers.listErrors": +// method id "compute.regionInstanceGroups.list": -type RegionInstanceGroupManagersListErrorsCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// ListErrors: Lists all errors thrown by actions on instances for a -// given regional managed instance group. -func (r *RegionInstanceGroupManagersService) ListErrors(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListErrorsCall { - c := &RegionInstanceGroupManagersListErrorsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of instance group resources contained within +// the specified region. +func (r *RegionInstanceGroupsService) List(project string, region string) *RegionInstanceGroupsListCall { + c := &RegionInstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager return c } @@ -103812,7 +112169,7 @@ func (r *RegionInstanceGroupManagersService) ListErrors(project string, region s // expressions explicitly. For example: ``` (cpuPlatform = "Intel // Skylake") OR (cpuPlatform = "Intel Broadwell") AND // (scheduling.automaticRestart = true) ``` -func (c *RegionInstanceGroupManagersListErrorsCall) Filter(filter string) *RegionInstanceGroupManagersListErrorsCall { +func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGroupsListCall { c.urlParams_.Set("filter", filter) return c } @@ -103823,7 +112180,7 @@ func (c *RegionInstanceGroupManagersListErrorsCall) Filter(filter string) *Regio // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *RegionInstanceGroupManagersListErrorsCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListErrorsCall { +func (c *RegionInstanceGroupsListCall) MaxResults(maxResults int64) *RegionInstanceGroupsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -103841,7 +112198,7 @@ func (c *RegionInstanceGroupManagersListErrorsCall) MaxResults(maxResults int64) // // Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *RegionInstanceGroupManagersListErrorsCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListErrorsCall { +func (c *RegionInstanceGroupsListCall) OrderBy(orderBy string) *RegionInstanceGroupsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -103849,15 +112206,24 @@ func (c *RegionInstanceGroupManagersListErrorsCall) OrderBy(orderBy string) *Reg // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *RegionInstanceGroupManagersListErrorsCall) PageToken(pageToken string) *RegionInstanceGroupManagersListErrorsCall { +func (c *RegionInstanceGroupsListCall) PageToken(pageToken string) *RegionInstanceGroupsListCall { c.urlParams_.Set("pageToken", pageToken) return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionInstanceGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersListErrorsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListErrorsCall { +func (c *RegionInstanceGroupsListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -103867,7 +112233,7 @@ func (c *RegionInstanceGroupManagersListErrorsCall) Fields(s ...googleapi.Field) // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupManagersListErrorsCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersListErrorsCall { +func (c *RegionInstanceGroupsListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsListCall { c.ifNoneMatch_ = entityTag return c } @@ -103875,23 +112241,23 @@ func (c *RegionInstanceGroupManagersListErrorsCall) IfNoneMatch(entityTag string // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersListErrorsCall) Context(ctx context.Context) *RegionInstanceGroupManagersListErrorsCall { +func (c *RegionInstanceGroupsListCall) Context(ctx context.Context) *RegionInstanceGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersListErrorsCall) Header() http.Header { +func (c *RegionInstanceGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersListErrorsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103902,7 +112268,7 @@ func (c *RegionInstanceGroupManagersListErrorsCall) doRequest(alt string) (*http var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listErrors") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -103910,23 +112276,20 @@ func (c *RegionInstanceGroupManagersListErrorsCall) doRequest(alt string) (*http } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.listErrors" call. -// Exactly one of *RegionInstanceGroupManagersListErrorsResponse or -// error will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *RegionInstanceGroupManagersListErrorsResponse.ServerResponse.Header -// or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *RegionInstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListErrorsResponse, error) { +// Do executes the "compute.regionInstanceGroups.list" call. +// Exactly one of *RegionInstanceGroupList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RegionInstanceGroupList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -103945,7 +112308,7 @@ func (c *RegionInstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOpt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionInstanceGroupManagersListErrorsResponse{ + ret := &RegionInstanceGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -103957,13 +112320,12 @@ func (c *RegionInstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Lists all errors thrown by actions on instances for a given regional managed instance group.", + // "description": "Retrieves the list of instance group resources contained within the specified region.", // "httpMethod": "GET", - // "id": "compute.regionInstanceGroupManagers.listErrors", + // "id": "compute.regionInstanceGroups.list", // "parameterOrder": [ // "project", - // "region", - // "instanceGroupManager" + // "region" // ], // "parameters": { // "filter": { @@ -103971,12 +112333,6 @@ func (c *RegionInstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOpt // "location": "query", // "type": "string" // }, - // "instanceGroupManager": { - // "description": "The name of the managed instance group. It must be a string that meets the requirements in RFC1035, or an unsigned long integer: must match regexp pattern: (?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)|[1-9][0-9]{0,19}.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -104003,15 +112359,20 @@ func (c *RegionInstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOpt // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request. This should conform to RFC1035.", + // "description": "Name of the region scoping this request.", // "location": "path", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listErrors", + // "path": "{project}/regions/{region}/instanceGroups", // "response": { - // "$ref": "RegionInstanceGroupManagersListErrorsResponse" + // "$ref": "RegionInstanceGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -104025,7 +112386,7 @@ func (c *RegionInstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOpt // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupManagersListErrorsCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListErrorsResponse) error) error { +func (c *RegionInstanceGroupsListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -104043,27 +112404,30 @@ func (c *RegionInstanceGroupManagersListErrorsCall) Pages(ctx context.Context, f } } -// method id "compute.regionInstanceGroupManagers.listManagedInstances": +// method id "compute.regionInstanceGroups.listInstances": -type RegionInstanceGroupManagersListManagedInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupsListInstancesCall struct { + s *Service + project string + region string + instanceGroup string + regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListManagedInstances: Lists the instances in the managed instance -// group and instances that are scheduled to be created. The list -// includes any current actions that the group has scheduled for its -// instances. -func (r *RegionInstanceGroupManagersService) ListManagedInstances(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListManagedInstancesCall { - c := &RegionInstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListInstances: Lists the instances in the specified instance group +// and displays information about the named ports. Depending on the +// specified options, this method can list all instances or only the +// instances that are running. The orderBy query parameter is not +// supported. +func (r *RegionInstanceGroupsService) ListInstances(project string, region string, instanceGroup string, regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest) *RegionInstanceGroupsListInstancesCall { + c := &RegionInstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager + c.instanceGroup = instanceGroup + c.regioninstancegroupslistinstancesrequest = regioninstancegroupslistinstancesrequest return c } @@ -104090,7 +112454,7 @@ func (r *RegionInstanceGroupManagersService) ListManagedInstances(project string // expressions explicitly. For example: ``` (cpuPlatform = "Intel // Skylake") OR (cpuPlatform = "Intel Broadwell") AND // (scheduling.automaticRestart = true) ``` -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Filter(filter string) *RegionInstanceGroupManagersListManagedInstancesCall { +func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("filter", filter) return c } @@ -104101,7 +112465,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Filter(filter stri // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *RegionInstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListManagedInstancesCall { +func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -104119,7 +112483,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) MaxResults(maxResu // // Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListManagedInstancesCall { +func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -104127,15 +112491,24 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy st // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *RegionInstanceGroupManagersListManagedInstancesCall { +func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("pageToken", pageToken) return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionInstanceGroupsListInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListManagedInstancesCall { +func (c *RegionInstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -104143,31 +112516,36 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Fields(s ...google // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersListManagedInstancesCall { +func (c *RegionInstanceGroupsListInstancesCall) Context(ctx context.Context) *RegionInstanceGroupsListInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Header() http.Header { +func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupslistinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -104175,23 +112553,22 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt stri } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.listManagedInstances" call. -// Exactly one of *RegionInstanceGroupManagersListInstancesResponse or -// error will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *RegionInstanceGroupManagersListInstancesResponse.ServerResponse.Heade -// r or (if a response was returned at all) in +// Do executes the "compute.regionInstanceGroups.listInstances" call. +// Exactly one of *RegionInstanceGroupsListInstances or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *RegionInstanceGroupsListInstances.ServerResponse.Header or +// (if a response was returned at all) in // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check // whether the returned error was because http.StatusNotModified was // returned. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListInstancesResponse, error) { +func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupsListInstances, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -104210,7 +112587,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionInstanceGroupManagersListInstancesResponse{ + ret := &RegionInstanceGroupsListInstances{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -104222,13 +112599,13 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea } return ret, nil // { - // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances.", + // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running. The orderBy query parameter is not supported.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.listManagedInstances", + // "id": "compute.regionInstanceGroups.listInstances", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "instanceGroup" // ], // "parameters": { // "filter": { @@ -104236,8 +112613,8 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea // "location": "query", // "type": "string" // }, - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instanceGroup": { + // "description": "Name of the regional instance group for which we want to list the instances.", // "location": "path", // "required": true, // "type": "string" @@ -104272,11 +112649,19 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea // "location": "path", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", + // "request": { + // "$ref": "RegionInstanceGroupsListInstancesRequest" + // }, // "response": { - // "$ref": "RegionInstanceGroupManagersListInstancesResponse" + // "$ref": "RegionInstanceGroupsListInstances" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -104290,7 +112675,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListInstancesResponse) error) error { +func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupsListInstances) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -104308,32 +112693,27 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Pages(ctx context. } } -// method id "compute.regionInstanceGroupManagers.patch": +// method id "compute.regionInstanceGroups.setNamedPorts": -type RegionInstanceGroupManagersPatchCall struct { - s *Service - project string - region string - instanceGroupManager string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupsSetNamedPortsCall struct { + s *Service + project string + region string + instanceGroup string + regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a managed instance group using the information that -// you specify in the request. This operation is marked as DONE when the -// group is patched even if the instances in the group are still in the -// process of being patched. You must separately verify the status of -// the individual instances with the listmanagedinstances method. This -// method supports PATCH semantics and uses the JSON merge patch format -// and processing rules. -func (r *RegionInstanceGroupManagersService) Patch(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersPatchCall { - c := &RegionInstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetNamedPorts: Sets the named ports for the specified regional +// instance group. +func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { + c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanager = instancegroupmanager + c.instanceGroup = instanceGroup + c.regioninstancegroupssetnamedportsrequest = regioninstancegroupssetnamedportsrequest return c } @@ -104351,7 +112731,7 @@ func (r *RegionInstanceGroupManagersService) Patch(project string, region string // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersPatchCall) RequestId(requestId string) *RegionInstanceGroupManagersPatchCall { +func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *RegionInstanceGroupsSetNamedPortsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -104359,7 +112739,7 @@ func (c *RegionInstanceGroupManagersPatchCall) RequestId(requestId string) *Regi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersPatchCall { +func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsSetNamedPortsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -104367,58 +112747,58 @@ func (c *RegionInstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *Reg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersPatchCall) Context(ctx context.Context) *RegionInstanceGroupManagersPatchCall { +func (c *RegionInstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *RegionInstanceGroupsSetNamedPortsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersPatchCall) Header() http.Header { +func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupssetnamedportsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.patch" call. +// Do executes the "compute.regionInstanceGroups.setNamedPorts" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -104449,17 +112829,17 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.regionInstanceGroupManagers.patch", + // "description": "Sets the named ports for the specified regional instance group.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroups.setNamedPorts", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "instanceGroup" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", + // "instanceGroup": { + // "description": "The name of the regional instance group where the named ports are updated.", // "location": "path", // "required": true, // "type": "string" @@ -104483,9 +112863,9 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", // "request": { - // "$ref": "InstanceGroupManager" + // "$ref": "RegionInstanceGroupsSetNamedPortsRequest" // }, // "response": { // "$ref": "Operation" @@ -104498,40 +112878,26 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionInstanceGroupManagers.recreateInstances": +// method id "compute.regionNetworkEndpointGroups.delete": -type RegionInstanceGroupManagersRecreateInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionNetworkEndpointGroupsDeleteCall struct { + s *Service + project string + region string + networkEndpointGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RecreateInstances: Flags the specified instances in the managed -// instance group to be immediately recreated. The instances are deleted -// and recreated using the current instance template for the managed -// instance group. This operation is marked as DONE when the flag is set -// even if the instances have not yet been recreated. You must -// separately verify the status of the recreating action with the -// listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest) *RegionInstanceGroupManagersRecreateInstancesCall { - c := &RegionInstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified network endpoint group. Note that the +// NEG cannot be deleted if it is configured as a backend of a backend +// service. +func (r *RegionNetworkEndpointGroupsService) Delete(project string, region string, networkEndpointGroup string) *RegionNetworkEndpointGroupsDeleteCall { + c := &RegionNetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersrecreaterequest = regioninstancegroupmanagersrecreaterequest + c.networkEndpointGroup = networkEndpointGroup return c } @@ -104549,7 +112915,7 @@ func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, r // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersRecreateInstancesCall { +func (c *RegionNetworkEndpointGroupsDeleteCall) RequestId(requestId string) *RegionNetworkEndpointGroupsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -104557,7 +112923,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) RequestId(requestId s // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersRecreateInstancesCall { +func (c *RegionNetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *RegionNetworkEndpointGroupsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -104565,38 +112931,33 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersRecreateInstancesCall { +func (c *RegionNetworkEndpointGroupsDeleteCall) Context(ctx context.Context) *RegionNetworkEndpointGroupsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Header() http.Header { +func (c *RegionNetworkEndpointGroupsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersrecreaterequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } @@ -104604,19 +112965,19 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.recreateInstances" call. +// Do executes the "compute.regionNetworkEndpointGroups.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionNetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -104647,17 +113008,17 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. } return ret, nil // { - // "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.recreateInstances", + // "description": "Deletes the specified network endpoint group. Note that the NEG cannot be deleted if it is configured as a backend of a backend service.", + // "httpMethod": "DELETE", + // "id": "compute.regionNetworkEndpointGroups.delete", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "networkEndpointGroup" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", // "location": "path", // "required": true, // "type": "string" @@ -104670,7 +113031,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", // "required": true, // "type": "string" @@ -104681,10 +113042,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", - // "request": { - // "$ref": "RegionInstanceGroupManagersRecreateRequest" - // }, + // "path": "{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", // "response": { // "$ref": "Operation" // }, @@ -104696,97 +113054,80 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. } -// method id "compute.regionInstanceGroupManagers.resize": +// method id "compute.regionNetworkEndpointGroups.get": -type RegionInstanceGroupManagersResizeCall struct { +type RegionNetworkEndpointGroupsGetCall struct { s *Service project string region string - instanceGroupManager string + networkEndpointGroup string urlParams_ gensupport.URLParams + ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Resize: Changes the intended size of the managed instance group. If -// you increase the size, the group creates new instances using the -// current instance template. If you decrease the size, the group -// deletes one or more instances. -// -// The resize operation is marked DONE if the resize request is -// successful. The underlying actions take additional time. You must -// separately verify the status of the creating or deleting actions with -// the listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or deleted. -func (r *RegionInstanceGroupManagersService) Resize(project string, region string, instanceGroupManager string, size int64) *RegionInstanceGroupManagersResizeCall { - c := &RegionInstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified network endpoint group. Gets a list of +// available network endpoint groups by making a list() request. +func (r *RegionNetworkEndpointGroupsService) Get(project string, region string, networkEndpointGroup string) *RegionNetworkEndpointGroupsGetCall { + c := &RegionNetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.urlParams_.Set("size", fmt.Sprint(size)) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersResizeCall) RequestId(requestId string) *RegionInstanceGroupManagersResizeCall { - c.urlParams_.Set("requestId", requestId) + c.networkEndpointGroup = networkEndpointGroup return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersResizeCall { +func (c *RegionNetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *RegionNetworkEndpointGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionNetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *RegionNetworkEndpointGroupsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersResizeCall) Context(ctx context.Context) *RegionInstanceGroupManagersResizeCall { +func (c *RegionNetworkEndpointGroupsGetCall) Context(ctx context.Context) *RegionNetworkEndpointGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersResizeCall) Header() http.Header { +func (c *RegionNetworkEndpointGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -104794,19 +113135,19 @@ func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Res googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.resize" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionNetworkEndpointGroups.get" call. +// Exactly one of *NetworkEndpointGroup or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NetworkEndpointGroup.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionNetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -104825,7 +113166,7 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &NetworkEndpointGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -104837,18 +113178,17 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Changes the intended size of the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances.\n\nThe resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.resize", + // "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.regionNetworkEndpointGroups.get", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager", - // "size" + // "networkEndpointGroup" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group. It should comply with RFC1035.", // "location": "path", // "required": true, // "type": "string" @@ -104861,59 +113201,44 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "size": { - // "description": "Number of instances that should exist in this instance group manager.", - // "format": "int32", - // "location": "query", - // "minimum": "0", - // "required": true, - // "type": "integer" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", + // "path": "{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", // "response": { - // "$ref": "Operation" + // "$ref": "NetworkEndpointGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.setInstanceTemplate": +// method id "compute.regionNetworkEndpointGroups.insert": -type RegionInstanceGroupManagersSetInstanceTemplateCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionNetworkEndpointGroupsInsertCall struct { + s *Service + project string + region string + networkendpointgroup *NetworkEndpointGroup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetInstanceTemplate: Sets the instance template to use when creating -// new instances or recreating instances in this group. Existing -// instances are not affected. -func (r *RegionInstanceGroupManagersService) SetInstanceTemplate(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest) *RegionInstanceGroupManagersSetInstanceTemplateCall { - c := &RegionInstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a network endpoint group in the specified project +// using the parameters that are included in the request. +func (r *RegionNetworkEndpointGroupsService) Insert(project string, region string, networkendpointgroup *NetworkEndpointGroup) *RegionNetworkEndpointGroupsInsertCall { + c := &RegionNetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerssettemplaterequest = regioninstancegroupmanagerssettemplaterequest + c.networkendpointgroup = networkendpointgroup return c } @@ -104931,7 +113256,7 @@ func (r *RegionInstanceGroupManagersService) SetInstanceTemplate(project string, // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *RegionInstanceGroupManagersSetInstanceTemplateCall { +func (c *RegionNetworkEndpointGroupsInsertCall) RequestId(requestId string) *RegionNetworkEndpointGroupsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -104939,7 +113264,7 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetInstanceTemplateCall { +func (c *RegionNetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *RegionNetworkEndpointGroupsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -104947,36 +113272,36 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googlea // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetInstanceTemplateCall { +func (c *RegionNetworkEndpointGroupsInsertCall) Context(ctx context.Context) *RegionNetworkEndpointGroupsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { +func (c *RegionNetworkEndpointGroupsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettemplaterequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroup) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -104984,21 +113309,20 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt strin } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.setInstanceTemplate" call. +// Do executes the "compute.regionNetworkEndpointGroups.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionNetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -105029,21 +113353,14 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap } return ret, nil // { - // "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", + // "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", + // "id": "compute.regionNetworkEndpointGroups.insert", // "parameterOrder": [ // "project", - // "region", - // "instanceGroupManager" + // "region" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -105052,7 +113369,7 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region where you want to create the network endpoint group. It should comply with RFC1035.", // "location": "path", // "required": true, // "type": "string" @@ -105063,9 +113380,9 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "path": "{project}/regions/{region}/networkEndpointGroups", // "request": { - // "$ref": "RegionInstanceGroupManagersSetTemplateRequest" + // "$ref": "NetworkEndpointGroup" // }, // "response": { // "$ref": "Operation" @@ -105078,28 +113395,304 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap } -// method id "compute.regionInstanceGroupManagers.setTargetPools": +// method id "compute.regionNetworkEndpointGroups.list": -type RegionInstanceGroupManagersSetTargetPoolsCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionNetworkEndpointGroupsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetTargetPools: Modifies the target pools to which all new instances -// in this group are assigned. Existing instances in the group are not -// affected. -func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest) *RegionInstanceGroupManagersSetTargetPoolsCall { - c := &RegionInstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of regional network endpoint groups +// available to the specified project in the given region. +func (r *RegionNetworkEndpointGroupsService) List(project string, region string) *RegionNetworkEndpointGroupsListCall { + c := &RegionNetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerssettargetpoolsrequest = regioninstancegroupmanagerssettargetpoolsrequest + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *RegionNetworkEndpointGroupsListCall) Filter(filter string) *RegionNetworkEndpointGroupsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionNetworkEndpointGroupsListCall) MaxResults(maxResults int64) *RegionNetworkEndpointGroupsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *RegionNetworkEndpointGroupsListCall) OrderBy(orderBy string) *RegionNetworkEndpointGroupsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionNetworkEndpointGroupsListCall) PageToken(pageToken string) *RegionNetworkEndpointGroupsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionNetworkEndpointGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionNetworkEndpointGroupsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionNetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *RegionNetworkEndpointGroupsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionNetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *RegionNetworkEndpointGroupsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionNetworkEndpointGroupsListCall) Context(ctx context.Context) *RegionNetworkEndpointGroupsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionNetworkEndpointGroupsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionNetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/networkEndpointGroups") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionNetworkEndpointGroups.list" call. +// Exactly one of *NetworkEndpointGroupList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *NetworkEndpointGroupList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &NetworkEndpointGroupList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of regional network endpoint groups available to the specified project in the given region.", + // "httpMethod": "GET", + // "id": "compute.regionNetworkEndpointGroups.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "{project}/regions/{region}/networkEndpointGroups", + // "response": { + // "$ref": "NetworkEndpointGroupList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionNetworkEndpointGroupsListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.regionNotificationEndpoints.delete": + +type RegionNotificationEndpointsDeleteCall struct { + s *Service + project string + region string + notificationEndpoint string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified NotificationEndpoint in the given +// region +func (r *RegionNotificationEndpointsService) Delete(project string, region string, notificationEndpoint string) *RegionNotificationEndpointsDeleteCall { + c := &RegionNotificationEndpointsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.notificationEndpoint = notificationEndpoint return c } @@ -105117,7 +113710,7 @@ func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, regi // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *RegionInstanceGroupManagersSetTargetPoolsCall { +func (c *RegionNotificationEndpointsDeleteCall) RequestId(requestId string) *RegionNotificationEndpointsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -105125,7 +113718,7 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) RequestId(requestId stri // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetTargetPoolsCall { +func (c *RegionNotificationEndpointsDeleteCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -105133,38 +113726,33 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Fi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetTargetPoolsCall { +func (c *RegionNotificationEndpointsDeleteCall) Context(ctx context.Context) *RegionNotificationEndpointsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Header() http.Header { +func (c *RegionNotificationEndpointsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNotificationEndpointsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettargetpoolsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } @@ -105172,19 +113760,19 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (* googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "notificationEndpoint": c.notificationEndpoint, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.setTargetPools" call. +// Do executes the "compute.regionNotificationEndpoints.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionNotificationEndpointsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -105215,18 +113803,19 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.setTargetPools", + // "description": "Deletes the specified NotificationEndpoint in the given region", + // "httpMethod": "DELETE", + // "id": "compute.regionNotificationEndpoints.delete", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "notificationEndpoint" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", + // "notificationEndpoint": { + // "description": "Name of the NotificationEndpoint resource to delete.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -105240,6 +113829,7 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -105249,10 +113839,7 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", - // "request": { - // "$ref": "RegionInstanceGroupManagersSetTargetPoolsRequest" - // }, + // "path": "{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", // "response": { // "$ref": "Operation" // }, @@ -105264,32 +113851,33 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal } -// method id "compute.regionInstanceGroups.get": +// method id "compute.regionNotificationEndpoints.get": -type RegionInstanceGroupsGetCall struct { - s *Service - project string - region string - instanceGroup string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionNotificationEndpointsGetCall struct { + s *Service + project string + region string + notificationEndpoint string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified instance group resource. -func (r *RegionInstanceGroupsService) Get(project string, region string, instanceGroup string) *RegionInstanceGroupsGetCall { - c := &RegionInstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified NotificationEndpoint resource in the given +// region. +func (r *RegionNotificationEndpointsService) Get(project string, region string, notificationEndpoint string) *RegionNotificationEndpointsGetCall { + c := &RegionNotificationEndpointsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroup = instanceGroup + c.notificationEndpoint = notificationEndpoint return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsGetCall { +func (c *RegionNotificationEndpointsGetCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -105299,7 +113887,7 @@ func (c *RegionInstanceGroupsGetCall) Fields(s ...googleapi.Field) *RegionInstan // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupsGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsGetCall { +func (c *RegionNotificationEndpointsGetCall) IfNoneMatch(entityTag string) *RegionNotificationEndpointsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -105307,23 +113895,23 @@ func (c *RegionInstanceGroupsGetCall) IfNoneMatch(entityTag string) *RegionInsta // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsGetCall) Context(ctx context.Context) *RegionInstanceGroupsGetCall { +func (c *RegionNotificationEndpointsGetCall) Context(ctx context.Context) *RegionNotificationEndpointsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsGetCall) Header() http.Header { +func (c *RegionNotificationEndpointsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNotificationEndpointsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105334,7 +113922,7 @@ func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -105342,21 +113930,21 @@ func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroup": c.instanceGroup, + "project": c.project, + "region": c.region, + "notificationEndpoint": c.notificationEndpoint, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.get" call. -// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *InstanceGroup.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionNotificationEndpoints.get" call. +// Exactly one of *NotificationEndpoint or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NotificationEndpoint.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { +func (c *RegionNotificationEndpointsGetCall) Do(opts ...googleapi.CallOption) (*NotificationEndpoint, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -105375,7 +113963,7 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroup{ + ret := &NotificationEndpoint{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -105387,18 +113975,19 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc } return ret, nil // { - // "description": "Returns the specified instance group resource.", + // "description": "Returns the specified NotificationEndpoint resource in the given region.", // "httpMethod": "GET", - // "id": "compute.regionInstanceGroups.get", + // "id": "compute.regionNotificationEndpoints.get", // "parameterOrder": [ // "project", // "region", - // "instanceGroup" + // "notificationEndpoint" // ], // "parameters": { - // "instanceGroup": { - // "description": "Name of the instance group resource to return.", + // "notificationEndpoint": { + // "description": "Name of the NotificationEndpoint resource to return.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -105412,13 +114001,14 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}", + // "path": "{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", // "response": { - // "$ref": "InstanceGroup" + // "$ref": "NotificationEndpoint" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -105429,143 +114019,90 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc } -// method id "compute.regionInstanceGroups.list": +// method id "compute.regionNotificationEndpoints.insert": -type RegionInstanceGroupsListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionNotificationEndpointsInsertCall struct { + s *Service + project string + region string + notificationendpoint *NotificationEndpoint + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of instance group resources contained within -// the specified region. -func (r *RegionInstanceGroupsService) List(project string, region string) *RegionInstanceGroupsListCall { - c := &RegionInstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Create a NotificationEndpoint in the specified project in the +// given region using the parameters that are included in the request. +func (r *RegionNotificationEndpointsService) Insert(project string, region string, notificationendpoint *NotificationEndpoint) *RegionNotificationEndpointsInsertCall { + c := &RegionNotificationEndpointsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region + c.notificationendpoint = notificationendpoint return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either `=`, `!=`, `>`, or -// `<`. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. -// -// You can also filter nested fields. For example, you could specify -// `scheduling.automaticRestart = false` to include instances only if -// they are not scheduled for automatic restarts. You can use filtering -// on nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example: ``` (scheduling.automaticRestart = -// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression -// is an `AND` expression. However, you can include `AND` and `OR` -// expressions explicitly. For example: ``` (cpuPlatform = "Intel -// Skylake") OR (cpuPlatform = "Intel Broadwell") AND -// (scheduling.automaticRestart = true) ``` -func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGroupsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *RegionInstanceGroupsListCall) MaxResults(maxResults int64) *RegionInstanceGroupsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using `orderBy="creationTimestamp desc". This sorts -// results based on the `creationTimestamp` field in reverse -// chronological order (newest result first). Use this to sort resources -// like operations so that the newest operation is returned -// first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by `name` or `creationTimestamp desc` is -// supported. -func (c *RegionInstanceGroupsListCall) OrderBy(orderBy string) *RegionInstanceGroupsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *RegionInstanceGroupsListCall) PageToken(pageToken string) *RegionInstanceGroupsListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionNotificationEndpointsInsertCall) RequestId(requestId string) *RegionNotificationEndpointsInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListCall { +func (c *RegionNotificationEndpointsInsertCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupsListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsListCall) Context(ctx context.Context) *RegionInstanceGroupsListCall { +func (c *RegionNotificationEndpointsInsertCall) Context(ctx context.Context) *RegionNotificationEndpointsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsListCall) Header() http.Header { +func (c *RegionNotificationEndpointsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNotificationEndpointsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.notificationendpoint) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/notificationEndpoints") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -105577,14 +114114,14 @@ func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, er return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.list" call. -// Exactly one of *RegionInstanceGroupList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *RegionInstanceGroupList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupList, error) { +// Do executes the "compute.regionNotificationEndpoints.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionNotificationEndpointsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -105603,7 +114140,7 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionInstanceGroupList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -105615,37 +114152,14 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region } return ret, nil // { - // "description": "Retrieves the list of instance group resources contained within the specified region.", - // "httpMethod": "GET", - // "id": "compute.regionInstanceGroups.list", + // "description": "Create a NotificationEndpoint in the specified project in the given region using the parameters that are included in the request.", + // "httpMethod": "POST", + // "id": "compute.regionNotificationEndpoints.insert", // "parameterOrder": [ // "project", // "region" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -105656,67 +114170,49 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroups", + // "path": "{project}/regions/{region}/notificationEndpoints", + // "request": { + // "$ref": "NotificationEndpoint" + // }, // "response": { - // "$ref": "RegionInstanceGroupList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupsListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionInstanceGroups.listInstances": +// method id "compute.regionNotificationEndpoints.list": -type RegionInstanceGroupsListInstancesCall struct { - s *Service - project string - region string - instanceGroup string - regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionNotificationEndpointsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// ListInstances: Lists the instances in the specified instance group -// and displays information about the named ports. Depending on the -// specified options, this method can list all instances or only the -// instances that are running. -func (r *RegionInstanceGroupsService) ListInstances(project string, region string, instanceGroup string, regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest) *RegionInstanceGroupsListInstancesCall { - c := &RegionInstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Lists the NotificationEndpoints for a project in the given +// region. +func (r *RegionNotificationEndpointsService) List(project string, region string) *RegionNotificationEndpointsListCall { + c := &RegionNotificationEndpointsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroup = instanceGroup - c.regioninstancegroupslistinstancesrequest = regioninstancegroupslistinstancesrequest return c } @@ -105743,7 +114239,7 @@ func (r *RegionInstanceGroupsService) ListInstances(project string, region strin // expressions explicitly. For example: ``` (cpuPlatform = "Intel // Skylake") OR (cpuPlatform = "Intel Broadwell") AND // (scheduling.automaticRestart = true) ``` -func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { +func (c *RegionNotificationEndpointsListCall) Filter(filter string) *RegionNotificationEndpointsListCall { c.urlParams_.Set("filter", filter) return c } @@ -105754,7 +114250,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionIns // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupsListInstancesCall { +func (c *RegionNotificationEndpointsListCall) MaxResults(maxResults int64) *RegionNotificationEndpointsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -105772,7 +114268,7 @@ func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *Re // // Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupsListInstancesCall { +func (c *RegionNotificationEndpointsListCall) OrderBy(orderBy string) *RegionNotificationEndpointsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -105780,75 +114276,90 @@ func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionI // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *RegionInstanceGroupsListInstancesCall { +func (c *RegionNotificationEndpointsListCall) PageToken(pageToken string) *RegionNotificationEndpointsListCall { c.urlParams_.Set("pageToken", pageToken) return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionNotificationEndpointsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionNotificationEndpointsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListInstancesCall { +func (c *RegionNotificationEndpointsListCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionNotificationEndpointsListCall) IfNoneMatch(entityTag string) *RegionNotificationEndpointsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsListInstancesCall) Context(ctx context.Context) *RegionInstanceGroupsListInstancesCall { +func (c *RegionNotificationEndpointsListCall) Context(ctx context.Context) *RegionNotificationEndpointsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { +func (c *RegionNotificationEndpointsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNotificationEndpointsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupslistinstancesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/notificationEndpoints") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroup": c.instanceGroup, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.listInstances" call. -// Exactly one of *RegionInstanceGroupsListInstances or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *RegionInstanceGroupsListInstances.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupsListInstances, error) { +// Do executes the "compute.regionNotificationEndpoints.list" call. +// Exactly one of *NotificationEndpointList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *NotificationEndpointList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionNotificationEndpointsListCall) Do(opts ...googleapi.CallOption) (*NotificationEndpointList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -105867,7 +114378,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionInstanceGroupsListInstances{ + ret := &NotificationEndpointList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -105879,13 +114390,12 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroups.listInstances", + // "description": "Lists the NotificationEndpoints for a project in the given region.", + // "httpMethod": "GET", + // "id": "compute.regionNotificationEndpoints.list", // "parameterOrder": [ // "project", - // "region", - // "instanceGroup" + // "region" // ], // "parameters": { // "filter": { @@ -105893,12 +114403,6 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // "location": "query", // "type": "string" // }, - // "instanceGroup": { - // "description": "Name of the regional instance group for which we want to list the instances.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -105927,16 +114431,19 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", - // "request": { - // "$ref": "RegionInstanceGroupsListInstancesRequest" - // }, + // "path": "{project}/regions/{region}/notificationEndpoints", // "response": { - // "$ref": "RegionInstanceGroupsListInstances" + // "$ref": "NotificationEndpointList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -105950,7 +114457,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupsListInstances) error) error { +func (c *RegionNotificationEndpointsListCall) Pages(ctx context.Context, f func(*NotificationEndpointList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -105968,191 +114475,6 @@ func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f fun } } -// method id "compute.regionInstanceGroups.setNamedPorts": - -type RegionInstanceGroupsSetNamedPortsCall struct { - s *Service - project string - region string - instanceGroup string - regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetNamedPorts: Sets the named ports for the specified regional -// instance group. -func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { - c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.instanceGroup = instanceGroup - c.regioninstancegroupssetnamedportsrequest = regioninstancegroupssetnamedportsrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *RegionInstanceGroupsSetNamedPortsCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsSetNamedPortsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionInstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *RegionInstanceGroupsSetNamedPortsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupssetnamedportsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroup": c.instanceGroup, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionInstanceGroups.setNamedPorts" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the named ports for the specified regional instance group.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroups.setNamedPorts", - // "parameterOrder": [ - // "project", - // "region", - // "instanceGroup" - // ], - // "parameters": { - // "instanceGroup": { - // "description": "The name of the regional instance group where the named ports are updated.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", - // "request": { - // "$ref": "RegionInstanceGroupsSetNamedPortsRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - // method id "compute.regionOperations.delete": type RegionOperationsDeleteCall struct { @@ -106202,7 +114524,7 @@ func (c *RegionOperationsDeleteCall) Header() http.Header { func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106338,7 +114660,7 @@ func (c *RegionOperationsGetCall) Header() http.Header { func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106533,6 +114855,15 @@ func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperations return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionOperationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionOperationsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -106570,7 +114901,7 @@ func (c *RegionOperationsListCall) Header() http.Header { func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106677,6 +115008,11 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/operations", @@ -106774,7 +115110,7 @@ func (c *RegionOperationsWaitCall) Header() http.Header { func (c *RegionOperationsWaitCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106946,7 +115282,7 @@ func (c *RegionSslCertificatesDeleteCall) Header() http.Header { func (c *RegionSslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107116,7 +115452,7 @@ func (c *RegionSslCertificatesGetCall) Header() http.Header { func (c *RegionSslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107292,7 +115628,7 @@ func (c *RegionSslCertificatesInsertCall) Header() http.Header { func (c *RegionSslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107486,6 +115822,15 @@ func (c *RegionSslCertificatesListCall) PageToken(pageToken string) *RegionSslCe return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionSslCertificatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionSslCertificatesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -107523,7 +115868,7 @@ func (c *RegionSslCertificatesListCall) Header() http.Header { func (c *RegionSslCertificatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107630,6 +115975,11 @@ func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCe // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/sslCertificates", @@ -107733,7 +116083,7 @@ func (c *RegionTargetHttpProxiesDeleteCall) Header() http.Header { func (c *RegionTargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107903,7 +116253,7 @@ func (c *RegionTargetHttpProxiesGetCall) Header() http.Header { func (c *RegionTargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108079,7 +116429,7 @@ func (c *RegionTargetHttpProxiesInsertCall) Header() http.Header { func (c *RegionTargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108273,6 +116623,15 @@ func (c *RegionTargetHttpProxiesListCall) PageToken(pageToken string) *RegionTar return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionTargetHttpProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionTargetHttpProxiesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -108310,7 +116669,7 @@ func (c *RegionTargetHttpProxiesListCall) Header() http.Header { func (c *RegionTargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108417,6 +116776,11 @@ func (c *RegionTargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*Tar // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/targetHttpProxies", @@ -108522,7 +116886,7 @@ func (c *RegionTargetHttpProxiesSetUrlMapCall) Header() http.Header { func (c *RegionTargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108706,7 +117070,7 @@ func (c *RegionTargetHttpsProxiesDeleteCall) Header() http.Header { func (c *RegionTargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108876,7 +117240,7 @@ func (c *RegionTargetHttpsProxiesGetCall) Header() http.Header { func (c *RegionTargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109052,7 +117416,7 @@ func (c *RegionTargetHttpsProxiesInsertCall) Header() http.Header { func (c *RegionTargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109246,6 +117610,15 @@ func (c *RegionTargetHttpsProxiesListCall) PageToken(pageToken string) *RegionTa return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionTargetHttpsProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionTargetHttpsProxiesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -109283,7 +117656,7 @@ func (c *RegionTargetHttpsProxiesListCall) Header() http.Header { func (c *RegionTargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109390,6 +117763,11 @@ func (c *RegionTargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*Ta // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/targetHttpsProxies", @@ -109495,7 +117873,7 @@ func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Header() http.Header { func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109681,7 +118059,7 @@ func (c *RegionTargetHttpsProxiesSetUrlMapCall) Header() http.Header { func (c *RegionTargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109853,7 +118231,7 @@ func (c *RegionUrlMapsDeleteCall) Header() http.Header { func (c *RegionUrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110022,7 +118400,7 @@ func (c *RegionUrlMapsGetCall) Header() http.Header { func (c *RegionUrlMapsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110186,7 +118564,7 @@ func (c *RegionUrlMapsInsertCall) Header() http.Header { func (c *RegionUrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110380,6 +118758,15 @@ func (c *RegionUrlMapsListCall) PageToken(pageToken string) *RegionUrlMapsListCa return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionUrlMapsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionUrlMapsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -110417,7 +118804,7 @@ func (c *RegionUrlMapsListCall) Header() http.Header { func (c *RegionUrlMapsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110524,6 +118911,11 @@ func (c *RegionUrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, e // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/urlMaps", @@ -110619,7 +119011,7 @@ func (c *RegionUrlMapsPatchCall) Header() http.Header { func (c *RegionUrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110794,7 +119186,7 @@ func (c *RegionUrlMapsUpdateCall) Header() http.Header { func (c *RegionUrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110963,7 +119355,7 @@ func (c *RegionUrlMapsValidateCall) Header() http.Header { func (c *RegionUrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111134,7 +119526,7 @@ func (c *RegionsGetCall) Header() http.Header { func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111318,6 +119710,15 @@ func (c *RegionsListCall) PageToken(pageToken string) *RegionsListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -111355,7 +119756,7 @@ func (c *RegionsListCall) Header() http.Header { func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111453,6 +119854,11 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions", @@ -111585,6 +119991,15 @@ func (c *ReservationsAggregatedListCall) PageToken(pageToken string) *Reservatio return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ReservationsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ReservationsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -111622,7 +120037,7 @@ func (c *ReservationsAggregatedListCall) Header() http.Header { func (c *ReservationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111725,6 +120140,11 @@ func (c *ReservationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Rese // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/reservations", @@ -111828,7 +120248,7 @@ func (c *ReservationsDeleteCall) Header() http.Header { func (c *ReservationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111996,7 +120416,7 @@ func (c *ReservationsGetCall) Header() http.Header { func (c *ReservationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112127,6 +120547,13 @@ func (r *ReservationsService) GetIamPolicy(project string, zone string, resource return c } +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *ReservationsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ReservationsGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -112164,7 +120591,7 @@ func (c *ReservationsGetIamPolicyCall) Header() http.Header { func (c *ReservationsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112237,6 +120664,12 @@ func (c *ReservationsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy // "resource" // ], // "parameters": { + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -112340,7 +120773,7 @@ func (c *ReservationsInsertCall) Header() http.Header { func (c *ReservationsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112534,6 +120967,15 @@ func (c *ReservationsListCall) PageToken(pageToken string) *ReservationsListCall return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ReservationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ReservationsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -112571,7 +121013,7 @@ func (c *ReservationsListCall) Header() http.Header { func (c *ReservationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112672,6 +121114,11 @@ func (c *ReservationsListCall) Do(opts ...googleapi.CallOption) (*ReservationLis // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "Name of the zone for this request.", // "location": "path", @@ -112785,7 +121232,7 @@ func (c *ReservationsResizeCall) Header() http.Header { func (c *ReservationsResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112953,7 +121400,7 @@ func (c *ReservationsSetIamPolicyCall) Header() http.Header { func (c *ReservationsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113116,7 +121563,7 @@ func (c *ReservationsTestIamPermissionsCall) Header() http.Header { func (c *ReservationsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113325,6 +121772,15 @@ func (c *ResourcePoliciesAggregatedListCall) PageToken(pageToken string) *Resour return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ResourcePoliciesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ResourcePoliciesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -113362,7 +121818,7 @@ func (c *ResourcePoliciesAggregatedListCall) Header() http.Header { func (c *ResourcePoliciesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113465,6 +121921,11 @@ func (c *ResourcePoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/resourcePolicies", @@ -113568,7 +122029,7 @@ func (c *ResourcePoliciesDeleteCall) Header() http.Header { func (c *ResourcePoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113736,7 +122197,7 @@ func (c *ResourcePoliciesGetCall) Header() http.Header { func (c *ResourcePoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113867,6 +122328,13 @@ func (r *ResourcePoliciesService) GetIamPolicy(project string, region string, re return c } +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *ResourcePoliciesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ResourcePoliciesGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -113904,7 +122372,7 @@ func (c *ResourcePoliciesGetIamPolicyCall) Header() http.Header { func (c *ResourcePoliciesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113977,6 +122445,12 @@ func (c *ResourcePoliciesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Po // "resource" // ], // "parameters": { + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -114079,7 +122553,7 @@ func (c *ResourcePoliciesInsertCall) Header() http.Header { func (c *ResourcePoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114273,6 +122747,15 @@ func (c *ResourcePoliciesListCall) PageToken(pageToken string) *ResourcePolicies return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ResourcePoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ResourcePoliciesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -114310,7 +122793,7 @@ func (c *ResourcePoliciesListCall) Header() http.Header { func (c *ResourcePoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114417,6 +122900,11 @@ func (c *ResourcePoliciesListCall) Do(opts ...googleapi.CallOption) (*ResourcePo // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/resourcePolicies", @@ -114504,7 +122992,7 @@ func (c *ResourcePoliciesSetIamPolicyCall) Header() http.Header { func (c *ResourcePoliciesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114667,7 +123155,7 @@ func (c *ResourcePoliciesTestIamPermissionsCall) Header() http.Header { func (c *ResourcePoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114876,6 +123364,15 @@ func (c *RoutersAggregatedListCall) PageToken(pageToken string) *RoutersAggregat return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RoutersAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RoutersAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -114913,7 +123410,7 @@ func (c *RoutersAggregatedListCall) Header() http.Header { func (c *RoutersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115016,6 +123513,11 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/routers", @@ -115119,7 +123621,7 @@ func (c *RoutersDeleteCall) Header() http.Header { func (c *RoutersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115288,7 +123790,7 @@ func (c *RoutersGetCall) Header() http.Header { func (c *RoutersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115484,6 +123986,15 @@ func (c *RoutersGetNatMappingInfoCall) PageToken(pageToken string) *RoutersGetNa return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RoutersGetNatMappingInfoCall) ReturnPartialSuccess(returnPartialSuccess bool) *RoutersGetNatMappingInfoCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -115521,7 +124032,7 @@ func (c *RoutersGetNatMappingInfoCall) Header() http.Header { func (c *RoutersGetNatMappingInfoCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115631,6 +124142,11 @@ func (c *RoutersGetNatMappingInfoCall) Do(opts ...googleapi.CallOption) (*VmEndp // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "router": { // "description": "Name of the Router resource to query for Nat Mapping information of VM endpoints.", // "location": "path", @@ -115733,7 +124249,7 @@ func (c *RoutersGetRouterStatusCall) Header() http.Header { func (c *RoutersGetRouterStatusCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115909,7 +124425,7 @@ func (c *RoutersInsertCall) Header() http.Header { func (c *RoutersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116103,6 +124619,15 @@ func (c *RoutersListCall) PageToken(pageToken string) *RoutersListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RoutersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RoutersListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -116140,7 +124665,7 @@ func (c *RoutersListCall) Header() http.Header { func (c *RoutersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116247,6 +124772,11 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/routers", @@ -116354,7 +124884,7 @@ func (c *RoutersPatchCall) Header() http.Header { func (c *RoutersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116523,7 +125053,7 @@ func (c *RoutersPreviewCall) Header() http.Header { func (c *RoutersPreviewCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116709,7 +125239,7 @@ func (c *RoutersUpdateCall) Header() http.Header { func (c *RoutersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116892,7 +125422,7 @@ func (c *RoutesDeleteCall) Header() http.Header { func (c *RoutesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117051,7 +125581,7 @@ func (c *RoutesGetCall) Header() http.Header { func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117217,7 +125747,7 @@ func (c *RoutesInsertCall) Header() http.Header { func (c *RoutesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117401,6 +125931,15 @@ func (c *RoutesListCall) PageToken(pageToken string) *RoutesListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RoutesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RoutesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -117438,7 +125977,7 @@ func (c *RoutesListCall) Header() http.Header { func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117536,6 +126075,11 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/routes", @@ -117620,7 +126164,7 @@ func (c *SecurityPoliciesAddRuleCall) Header() http.Header { func (c *SecurityPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117788,7 +126332,7 @@ func (c *SecurityPoliciesDeleteCall) Header() http.Header { func (c *SecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117946,7 +126490,7 @@ func (c *SecurityPoliciesGetCall) Header() http.Header { func (c *SecurityPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118109,7 +126653,7 @@ func (c *SecurityPoliciesGetRuleCall) Header() http.Header { func (c *SecurityPoliciesGetRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118280,7 +126824,7 @@ func (c *SecurityPoliciesInsertCall) Header() http.Header { func (c *SecurityPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118463,6 +127007,15 @@ func (c *SecurityPoliciesListCall) PageToken(pageToken string) *SecurityPolicies return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SecurityPoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SecurityPoliciesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -118500,7 +127053,7 @@ func (c *SecurityPoliciesListCall) Header() http.Header { func (c *SecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118598,264 +127151,283 @@ func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPo // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // } - // }, - // "path": "{project}/global/securityPolicies", - // "response": { - // "$ref": "SecurityPolicyList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *SecurityPoliciesListCall) Pages(ctx context.Context, f func(*SecurityPolicyList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.securityPolicies.listPreconfiguredExpressionSets": - -type SecurityPoliciesListPreconfiguredExpressionSetsCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// ListPreconfiguredExpressionSets: Gets the current list of -// preconfigured Web Application Firewall (WAF) expressions. -func (r *SecurityPoliciesService) ListPreconfiguredExpressionSets(project string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { - c := &SecurityPoliciesListPreconfiguredExpressionSetsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either `=`, `!=`, `>`, or -// `<`. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. -// -// You can also filter nested fields. For example, you could specify -// `scheduling.automaticRestart = false` to include instances only if -// they are not scheduled for automatic restarts. You can use filtering -// on nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example: ``` (scheduling.automaticRestart = -// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression -// is an `AND` expression. However, you can include `AND` and `OR` -// expressions explicitly. For example: ``` (cpuPlatform = "Intel -// Skylake") OR (cpuPlatform = "Intel Broadwell") AND -// (scheduling.automaticRestart = true) ``` -func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Filter(filter string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) MaxResults(maxResults int64) *SecurityPoliciesListPreconfiguredExpressionSetsCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using `orderBy="creationTimestamp desc". This sorts -// results based on the `creationTimestamp` field in reverse -// chronological order (newest result first). Use this to sort resources -// like operations so that the newest operation is returned -// first. -// -// Currently, only sorting by `name` or `creationTimestamp desc` is -// supported. -func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) OrderBy(orderBy string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) PageToken(pageToken string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Fields(s ...googleapi.Field) *SecurityPoliciesListPreconfiguredExpressionSetsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) IfNoneMatch(entityTag string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Context(ctx context.Context) *SecurityPoliciesListPreconfiguredExpressionSetsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/listPreconfiguredExpressionSets") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.securityPolicies.listPreconfiguredExpressionSets" call. -// Exactly one of -// *SecurityPoliciesListPreconfiguredExpressionSetsResponse or error -// will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *SecurityPoliciesListPreconfiguredExpressionSetsResponse.ServerRespons -// e.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Do(opts ...googleapi.CallOption) (*SecurityPoliciesListPreconfiguredExpressionSetsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &SecurityPoliciesListPreconfiguredExpressionSetsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets the current list of preconfigured Web Application Firewall (WAF) expressions.", - // "httpMethod": "GET", - // "id": "compute.securityPolicies.listPreconfiguredExpressionSets", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "{project}/global/securityPolicies", + // "response": { + // "$ref": "SecurityPolicyList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *SecurityPoliciesListCall) Pages(ctx context.Context, f func(*SecurityPolicyList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.securityPolicies.listPreconfiguredExpressionSets": + +type SecurityPoliciesListPreconfiguredExpressionSetsCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// ListPreconfiguredExpressionSets: Gets the current list of +// preconfigured Web Application Firewall (WAF) expressions. +func (r *SecurityPoliciesService) ListPreconfiguredExpressionSets(project string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { + c := &SecurityPoliciesListPreconfiguredExpressionSetsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Filter(filter string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) MaxResults(maxResults int64) *SecurityPoliciesListPreconfiguredExpressionSetsCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) OrderBy(orderBy string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) PageToken(pageToken string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) ReturnPartialSuccess(returnPartialSuccess bool) *SecurityPoliciesListPreconfiguredExpressionSetsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Fields(s ...googleapi.Field) *SecurityPoliciesListPreconfiguredExpressionSetsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) IfNoneMatch(entityTag string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Context(ctx context.Context) *SecurityPoliciesListPreconfiguredExpressionSetsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/listPreconfiguredExpressionSets") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.securityPolicies.listPreconfiguredExpressionSets" call. +// Exactly one of +// *SecurityPoliciesListPreconfiguredExpressionSetsResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *SecurityPoliciesListPreconfiguredExpressionSetsResponse.ServerRespons +// e.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Do(opts ...googleapi.CallOption) (*SecurityPoliciesListPreconfiguredExpressionSetsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SecurityPoliciesListPreconfiguredExpressionSetsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the current list of preconfigured Web Application Firewall (WAF) expressions.", + // "httpMethod": "GET", + // "id": "compute.securityPolicies.listPreconfiguredExpressionSets", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" + // "type": "boolean" // } // }, // "path": "{project}/global/securityPolicies/listPreconfiguredExpressionSets", @@ -118938,7 +127510,7 @@ func (c *SecurityPoliciesPatchCall) Header() http.Header { func (c *SecurityPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119101,7 +127673,7 @@ func (c *SecurityPoliciesPatchRuleCall) Header() http.Header { func (c *SecurityPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119263,7 +127835,7 @@ func (c *SecurityPoliciesRemoveRuleCall) Header() http.Header { func (c *SecurityPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119436,7 +128008,7 @@ func (c *SnapshotsDeleteCall) Header() http.Header { func (c *SnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119595,7 +128167,7 @@ func (c *SnapshotsGetCall) Header() http.Header { func (c *SnapshotsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119715,6 +128287,13 @@ func (r *SnapshotsService) GetIamPolicy(project string, resource string) *Snapsh return c } +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *SnapshotsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *SnapshotsGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -119752,7 +128331,7 @@ func (c *SnapshotsGetIamPolicyCall) Header() http.Header { func (c *SnapshotsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119823,6 +128402,12 @@ func (c *SnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e // "resource" // ], // "parameters": { + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -119936,6 +128521,15 @@ func (c *SnapshotsListCall) PageToken(pageToken string) *SnapshotsListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SnapshotsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SnapshotsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -119973,7 +128567,7 @@ func (c *SnapshotsListCall) Header() http.Header { func (c *SnapshotsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120071,6 +128665,11 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/snapshots", @@ -120156,7 +128755,7 @@ func (c *SnapshotsSetIamPolicyCall) Header() http.Header { func (c *SnapshotsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120308,7 +128907,7 @@ func (c *SnapshotsSetLabelsCall) Header() http.Header { func (c *SnapshotsSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120460,7 +129059,7 @@ func (c *SnapshotsTestIamPermissionsCall) Header() http.Header { func (c *SnapshotsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120661,6 +129260,15 @@ func (c *SslCertificatesAggregatedListCall) PageToken(pageToken string) *SslCert return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SslCertificatesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SslCertificatesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -120698,7 +129306,7 @@ func (c *SslCertificatesAggregatedListCall) Header() http.Header { func (c *SslCertificatesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120801,6 +129409,11 @@ func (c *SslCertificatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*S // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/sslCertificates", @@ -120902,7 +129515,7 @@ func (c *SslCertificatesDeleteCall) Header() http.Header { func (c *SslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121060,7 +129673,7 @@ func (c *SslCertificatesGetCall) Header() http.Header { func (c *SslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121225,7 +129838,7 @@ func (c *SslCertificatesInsertCall) Header() http.Header { func (c *SslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121408,6 +130021,15 @@ func (c *SslCertificatesListCall) PageToken(pageToken string) *SslCertificatesLi return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SslCertificatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SslCertificatesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -121445,7 +130067,7 @@ func (c *SslCertificatesListCall) Header() http.Header { func (c *SslCertificatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121543,6 +130165,11 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/sslCertificates", @@ -121646,7 +130273,7 @@ func (c *SslPoliciesDeleteCall) Header() http.Header { func (c *SslPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121803,7 +130430,7 @@ func (c *SslPoliciesGetCall) Header() http.Header { func (c *SslPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121967,7 +130594,7 @@ func (c *SslPoliciesInsertCall) Header() http.Header { func (c *SslPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122150,6 +130777,15 @@ func (c *SslPoliciesListCall) PageToken(pageToken string) *SslPoliciesListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SslPoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SslPoliciesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -122187,7 +130823,7 @@ func (c *SslPoliciesListCall) Header() http.Header { func (c *SslPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122285,263 +130921,282 @@ func (c *SslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesList // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // } - // }, - // "path": "{project}/global/sslPolicies", - // "response": { - // "$ref": "SslPoliciesList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *SslPoliciesListCall) Pages(ctx context.Context, f func(*SslPoliciesList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.sslPolicies.listAvailableFeatures": - -type SslPoliciesListAvailableFeaturesCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// ListAvailableFeatures: Lists all features that can be specified in -// the SSL policy when using custom profile. -func (r *SslPoliciesService) ListAvailableFeatures(project string) *SslPoliciesListAvailableFeaturesCall { - c := &SslPoliciesListAvailableFeaturesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either `=`, `!=`, `>`, or -// `<`. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. -// -// You can also filter nested fields. For example, you could specify -// `scheduling.automaticRestart = false` to include instances only if -// they are not scheduled for automatic restarts. You can use filtering -// on nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example: ``` (scheduling.automaticRestart = -// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression -// is an `AND` expression. However, you can include `AND` and `OR` -// expressions explicitly. For example: ``` (cpuPlatform = "Intel -// Skylake") OR (cpuPlatform = "Intel Broadwell") AND -// (scheduling.automaticRestart = true) ``` -func (c *SslPoliciesListAvailableFeaturesCall) Filter(filter string) *SslPoliciesListAvailableFeaturesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *SslPoliciesListAvailableFeaturesCall) MaxResults(maxResults int64) *SslPoliciesListAvailableFeaturesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using `orderBy="creationTimestamp desc". This sorts -// results based on the `creationTimestamp` field in reverse -// chronological order (newest result first). Use this to sort resources -// like operations so that the newest operation is returned -// first. -// -// Currently, only sorting by `name` or `creationTimestamp desc` is -// supported. -func (c *SslPoliciesListAvailableFeaturesCall) OrderBy(orderBy string) *SslPoliciesListAvailableFeaturesCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *SslPoliciesListAvailableFeaturesCall) PageToken(pageToken string) *SslPoliciesListAvailableFeaturesCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SslPoliciesListAvailableFeaturesCall) Fields(s ...googleapi.Field) *SslPoliciesListAvailableFeaturesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *SslPoliciesListAvailableFeaturesCall) IfNoneMatch(entityTag string) *SslPoliciesListAvailableFeaturesCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SslPoliciesListAvailableFeaturesCall) Context(ctx context.Context) *SslPoliciesListAvailableFeaturesCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SslPoliciesListAvailableFeaturesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SslPoliciesListAvailableFeaturesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/listAvailableFeatures") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.sslPolicies.listAvailableFeatures" call. -// Exactly one of *SslPoliciesListAvailableFeaturesResponse or error -// will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *SslPoliciesListAvailableFeaturesResponse.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *SslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) (*SslPoliciesListAvailableFeaturesResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &SslPoliciesListAvailableFeaturesResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists all features that can be specified in the SSL policy when using custom profile.", - // "httpMethod": "GET", - // "id": "compute.sslPolicies.listAvailableFeatures", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "{project}/global/sslPolicies", + // "response": { + // "$ref": "SslPoliciesList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *SslPoliciesListCall) Pages(ctx context.Context, f func(*SslPoliciesList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.sslPolicies.listAvailableFeatures": + +type SslPoliciesListAvailableFeaturesCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// ListAvailableFeatures: Lists all features that can be specified in +// the SSL policy when using custom profile. +func (r *SslPoliciesService) ListAvailableFeatures(project string) *SslPoliciesListAvailableFeaturesCall { + c := &SslPoliciesListAvailableFeaturesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *SslPoliciesListAvailableFeaturesCall) Filter(filter string) *SslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *SslPoliciesListAvailableFeaturesCall) MaxResults(maxResults int64) *SslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *SslPoliciesListAvailableFeaturesCall) OrderBy(orderBy string) *SslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *SslPoliciesListAvailableFeaturesCall) PageToken(pageToken string) *SslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SslPoliciesListAvailableFeaturesCall) ReturnPartialSuccess(returnPartialSuccess bool) *SslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslPoliciesListAvailableFeaturesCall) Fields(s ...googleapi.Field) *SslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SslPoliciesListAvailableFeaturesCall) IfNoneMatch(entityTag string) *SslPoliciesListAvailableFeaturesCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslPoliciesListAvailableFeaturesCall) Context(ctx context.Context) *SslPoliciesListAvailableFeaturesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SslPoliciesListAvailableFeaturesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SslPoliciesListAvailableFeaturesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/listAvailableFeatures") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.sslPolicies.listAvailableFeatures" call. +// Exactly one of *SslPoliciesListAvailableFeaturesResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *SslPoliciesListAvailableFeaturesResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *SslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) (*SslPoliciesListAvailableFeaturesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SslPoliciesListAvailableFeaturesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all features that can be specified in the SSL policy when using custom profile.", + // "httpMethod": "GET", + // "id": "compute.sslPolicies.listAvailableFeatures", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" + // "type": "boolean" // } // }, // "path": "{project}/global/sslPolicies/listAvailableFeatures", @@ -122625,7 +131280,7 @@ func (c *SslPoliciesPatchCall) Header() http.Header { func (c *SslPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122828,6 +131483,15 @@ func (c *SubnetworksAggregatedListCall) PageToken(pageToken string) *Subnetworks return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SubnetworksAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SubnetworksAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -122865,7 +131529,7 @@ func (c *SubnetworksAggregatedListCall) Header() http.Header { func (c *SubnetworksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122968,6 +131632,11 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/subnetworks", @@ -123071,7 +131740,7 @@ func (c *SubnetworksDeleteCall) Header() http.Header { func (c *SubnetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123250,7 +131919,7 @@ func (c *SubnetworksExpandIpCidrRangeCall) Header() http.Header { func (c *SubnetworksExpandIpCidrRangeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123427,7 +132096,7 @@ func (c *SubnetworksGetCall) Header() http.Header { func (c *SubnetworksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123558,6 +132227,13 @@ func (r *SubnetworksService) GetIamPolicy(project string, region string, resourc return c } +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *SubnetworksGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *SubnetworksGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -123595,7 +132271,7 @@ func (c *SubnetworksGetIamPolicyCall) Header() http.Header { func (c *SubnetworksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123668,6 +132344,12 @@ func (c *SubnetworksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, // "resource" // ], // "parameters": { + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -123771,7 +132453,7 @@ func (c *SubnetworksInsertCall) Header() http.Header { func (c *SubnetworksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123965,6 +132647,15 @@ func (c *SubnetworksListCall) PageToken(pageToken string) *SubnetworksListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SubnetworksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SubnetworksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -124002,7 +132693,7 @@ func (c *SubnetworksListCall) Header() http.Header { func (c *SubnetworksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124109,6 +132800,11 @@ func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/subnetworks", @@ -124157,8 +132853,7 @@ type SubnetworksListUsableCall struct { } // ListUsable: Retrieves an aggregated list of all usable subnetworks in -// the project. The list contains all of the subnetworks in the project -// and the subnetworks that were shared by a Shared VPC host project. +// the project. func (r *SubnetworksService) ListUsable(project string) *SubnetworksListUsableCall { c := &SubnetworksListUsableCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -124230,6 +132925,15 @@ func (c *SubnetworksListUsableCall) PageToken(pageToken string) *SubnetworksList return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SubnetworksListUsableCall) ReturnPartialSuccess(returnPartialSuccess bool) *SubnetworksListUsableCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -124267,7 +132971,7 @@ func (c *SubnetworksListUsableCall) Header() http.Header { func (c *SubnetworksListUsableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124329,7 +133033,7 @@ func (c *SubnetworksListUsableCall) Do(opts ...googleapi.CallOption) (*UsableSub } return ret, nil // { - // "description": "Retrieves an aggregated list of all usable subnetworks in the project. The list contains all of the subnetworks in the project and the subnetworks that were shared by a Shared VPC host project.", + // "description": "Retrieves an aggregated list of all usable subnetworks in the project.", // "httpMethod": "GET", // "id": "compute.subnetworks.listUsable", // "parameterOrder": [ @@ -124365,6 +133069,11 @@ func (c *SubnetworksListUsableCall) Do(opts ...googleapi.CallOption) (*UsableSub // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/subnetworks/listUsable", @@ -124488,7 +133197,7 @@ func (c *SubnetworksPatchCall) Header() http.Header { func (c *SubnetworksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124662,7 +133371,7 @@ func (c *SubnetworksSetIamPolicyCall) Header() http.Header { func (c *SubnetworksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124845,7 +133554,7 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) Header() http.Header { func (c *SubnetworksSetPrivateIpGoogleAccessCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124911,13 +133620,678 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Set whether VMs in this subnet can access Google services without assigning external IP addresses through Private Google Access.", + // "description": "Set whether VMs in this subnet can access Google services without assigning external IP addresses through Private Google Access.", + // "httpMethod": "POST", + // "id": "compute.subnetworks.setPrivateIpGoogleAccess", + // "parameterOrder": [ + // "project", + // "region", + // "subnetwork" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "subnetwork": { + // "description": "Name of the Subnetwork resource.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/subnetworks/{subnetwork}/setPrivateIpGoogleAccess", + // "request": { + // "$ref": "SubnetworksSetPrivateIpGoogleAccessRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.subnetworks.testIamPermissions": + +type SubnetworksTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *SubnetworksService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *SubnetworksTestIamPermissionsCall { + c := &SubnetworksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SubnetworksTestIamPermissionsCall) Fields(s ...googleapi.Field) *SubnetworksTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SubnetworksTestIamPermissionsCall) Context(ctx context.Context) *SubnetworksTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SubnetworksTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SubnetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.subnetworks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SubnetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.subnetworks.testIamPermissions", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/subnetworks/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetGrpcProxies.delete": + +type TargetGrpcProxiesDeleteCall struct { + s *Service + project string + targetGrpcProxy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified TargetGrpcProxy in the given scope +func (r *TargetGrpcProxiesService) Delete(project string, targetGrpcProxy string) *TargetGrpcProxiesDeleteCall { + c := &TargetGrpcProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetGrpcProxy = targetGrpcProxy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetGrpcProxiesDeleteCall) RequestId(requestId string) *TargetGrpcProxiesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetGrpcProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetGrpcProxiesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetGrpcProxiesDeleteCall) Context(ctx context.Context) *TargetGrpcProxiesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetGrpcProxiesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetGrpcProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetGrpcProxies/{targetGrpcProxy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetGrpcProxy": c.targetGrpcProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetGrpcProxies.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetGrpcProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified TargetGrpcProxy in the given scope", + // "httpMethod": "DELETE", + // "id": "compute.targetGrpcProxies.delete", + // "parameterOrder": [ + // "project", + // "targetGrpcProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetGrpcProxy": { + // "description": "Name of the TargetGrpcProxy resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetGrpcProxies/{targetGrpcProxy}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetGrpcProxies.get": + +type TargetGrpcProxiesGetCall struct { + s *Service + project string + targetGrpcProxy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified TargetGrpcProxy resource in the given +// scope. +func (r *TargetGrpcProxiesService) Get(project string, targetGrpcProxy string) *TargetGrpcProxiesGetCall { + c := &TargetGrpcProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetGrpcProxy = targetGrpcProxy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetGrpcProxiesGetCall) Fields(s ...googleapi.Field) *TargetGrpcProxiesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetGrpcProxiesGetCall) IfNoneMatch(entityTag string) *TargetGrpcProxiesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetGrpcProxiesGetCall) Context(ctx context.Context) *TargetGrpcProxiesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetGrpcProxiesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetGrpcProxiesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetGrpcProxies/{targetGrpcProxy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetGrpcProxy": c.targetGrpcProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetGrpcProxies.get" call. +// Exactly one of *TargetGrpcProxy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TargetGrpcProxy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetGrpcProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetGrpcProxy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetGrpcProxy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified TargetGrpcProxy resource in the given scope.", + // "httpMethod": "GET", + // "id": "compute.targetGrpcProxies.get", + // "parameterOrder": [ + // "project", + // "targetGrpcProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "targetGrpcProxy": { + // "description": "Name of the TargetGrpcProxy resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetGrpcProxies/{targetGrpcProxy}", + // "response": { + // "$ref": "TargetGrpcProxy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetGrpcProxies.insert": + +type TargetGrpcProxiesInsertCall struct { + s *Service + project string + targetgrpcproxy *TargetGrpcProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a TargetGrpcProxy in the specified project in the +// given scope using the parameters that are included in the request. +func (r *TargetGrpcProxiesService) Insert(project string, targetgrpcproxy *TargetGrpcProxy) *TargetGrpcProxiesInsertCall { + c := &TargetGrpcProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetgrpcproxy = targetgrpcproxy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetGrpcProxiesInsertCall) RequestId(requestId string) *TargetGrpcProxiesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetGrpcProxiesInsertCall) Fields(s ...googleapi.Field) *TargetGrpcProxiesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetGrpcProxiesInsertCall) Context(ctx context.Context) *TargetGrpcProxiesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetGrpcProxiesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetGrpcProxiesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetgrpcproxy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetGrpcProxies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetGrpcProxies.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetGrpcProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a TargetGrpcProxy in the specified project in the given scope using the parameters that are included in the request.", // "httpMethod": "POST", - // "id": "compute.subnetworks.setPrivateIpGoogleAccess", + // "id": "compute.targetGrpcProxies.insert", // "parameterOrder": [ - // "project", - // "region", - // "subnetwork" + // "project" // ], // "parameters": { // "project": { @@ -124927,29 +134301,15 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) Do(opts ...googleapi.CallOptio // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "subnetwork": { - // "description": "Name of the Subnetwork resource.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/subnetworks/{subnetwork}/setPrivateIpGoogleAccess", + // "path": "{project}/global/targetGrpcProxies", // "request": { - // "$ref": "SubnetworksSetPrivateIpGoogleAccessRequest" + // "$ref": "TargetGrpcProxy" // }, // "response": { // "$ref": "Operation" @@ -124962,93 +134322,167 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) Do(opts ...googleapi.CallOptio } -// method id "compute.subnetworks.testIamPermissions": +// method id "compute.targetGrpcProxies.list": -type SubnetworksTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetGrpcProxiesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *SubnetworksService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *SubnetworksTestIamPermissionsCall { - c := &SubnetworksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Lists the TargetGrpcProxies for a project in the given scope. +func (r *TargetGrpcProxiesService) List(project string) *TargetGrpcProxiesListCall { + c := &TargetGrpcProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *TargetGrpcProxiesListCall) Filter(filter string) *TargetGrpcProxiesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *TargetGrpcProxiesListCall) MaxResults(maxResults int64) *TargetGrpcProxiesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *TargetGrpcProxiesListCall) OrderBy(orderBy string) *TargetGrpcProxiesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *TargetGrpcProxiesListCall) PageToken(pageToken string) *TargetGrpcProxiesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetGrpcProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetGrpcProxiesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SubnetworksTestIamPermissionsCall) Fields(s ...googleapi.Field) *SubnetworksTestIamPermissionsCall { +func (c *TargetGrpcProxiesListCall) Fields(s ...googleapi.Field) *TargetGrpcProxiesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetGrpcProxiesListCall) IfNoneMatch(entityTag string) *TargetGrpcProxiesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SubnetworksTestIamPermissionsCall) Context(ctx context.Context) *SubnetworksTestIamPermissionsCall { +func (c *TargetGrpcProxiesListCall) Context(ctx context.Context) *TargetGrpcProxiesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SubnetworksTestIamPermissionsCall) Header() http.Header { +func (c *TargetGrpcProxiesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SubnetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetGrpcProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetGrpcProxies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.subnetworks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// Do executes the "compute.targetGrpcProxies.list" call. +// Exactly one of *TargetGrpcProxyList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// *TargetGrpcProxyList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *SubnetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *TargetGrpcProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetGrpcProxyList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -125067,7 +134501,7 @@ func (c *SubnetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &TargetGrpcProxyList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -125079,15 +134513,36 @@ func (c *SubnetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.subnetworks.testIamPermissions", + // "description": "Lists the TargetGrpcProxies for a project in the given scope.", + // "httpMethod": "GET", + // "id": "compute.targetGrpcProxies.list", // "parameterOrder": [ - // "project", - // "region", - // "resource" + // "project" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -125095,32 +134550,218 @@ func (c *SubnetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "{project}/global/targetGrpcProxies", + // "response": { + // "$ref": "TargetGrpcProxyList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TargetGrpcProxiesListCall) Pages(ctx context.Context, f func(*TargetGrpcProxyList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.targetGrpcProxies.patch": + +type TargetGrpcProxiesPatchCall struct { + s *Service + project string + targetGrpcProxy string + targetgrpcproxy *TargetGrpcProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified TargetGrpcProxy resource with the data +// included in the request. This method supports PATCH semantics and +// uses JSON merge patch format and processing rules. +func (r *TargetGrpcProxiesService) Patch(project string, targetGrpcProxy string, targetgrpcproxy *TargetGrpcProxy) *TargetGrpcProxiesPatchCall { + c := &TargetGrpcProxiesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetGrpcProxy = targetGrpcProxy + c.targetgrpcproxy = targetgrpcproxy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetGrpcProxiesPatchCall) RequestId(requestId string) *TargetGrpcProxiesPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetGrpcProxiesPatchCall) Fields(s ...googleapi.Field) *TargetGrpcProxiesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetGrpcProxiesPatchCall) Context(ctx context.Context) *TargetGrpcProxiesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetGrpcProxiesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetGrpcProxiesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetgrpcproxy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetGrpcProxies/{targetGrpcProxy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetGrpcProxy": c.targetGrpcProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetGrpcProxies.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetGrpcProxiesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified TargetGrpcProxy resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.targetGrpcProxies.patch", + // "parameterOrder": [ + // "project", + // "targetGrpcProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetGrpcProxy": { + // "description": "Name of the TargetGrpcProxy resource to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/subnetworks/{resource}/testIamPermissions", + // "path": "{project}/global/targetGrpcProxies/{targetGrpcProxy}", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "TargetGrpcProxy" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } @@ -125223,6 +134864,15 @@ func (c *TargetHttpProxiesAggregatedListCall) PageToken(pageToken string) *Targe return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetHttpProxiesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetHttpProxiesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -125260,7 +134910,7 @@ func (c *TargetHttpProxiesAggregatedListCall) Header() http.Header { func (c *TargetHttpProxiesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125363,6 +135013,11 @@ func (c *TargetHttpProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) ( // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/targetHttpProxies", @@ -125465,7 +135120,7 @@ func (c *TargetHttpProxiesDeleteCall) Header() http.Header { func (c *TargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125624,7 +135279,7 @@ func (c *TargetHttpProxiesGetCall) Header() http.Header { func (c *TargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125790,7 +135445,7 @@ func (c *TargetHttpProxiesInsertCall) Header() http.Header { func (c *TargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125974,6 +135629,15 @@ func (c *TargetHttpProxiesListCall) PageToken(pageToken string) *TargetHttpProxi return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetHttpProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetHttpProxiesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -126011,7 +135675,7 @@ func (c *TargetHttpProxiesListCall) Header() http.Header { func (c *TargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126109,6 +135773,11 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/targetHttpProxies", @@ -126145,6 +135814,184 @@ func (c *TargetHttpProxiesListCall) Pages(ctx context.Context, f func(*TargetHtt } } +// method id "compute.targetHttpProxies.patch": + +type TargetHttpProxiesPatchCall struct { + s *Service + project string + targetHttpProxy string + targethttpproxy *TargetHttpProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified TargetHttpProxy resource with the data +// included in the request. This method supports PATCH semantics and +// uses JSON merge patch format and processing rules. (== +// suppress_warning http-rest-shadowed ==) +func (r *TargetHttpProxiesService) Patch(project string, targetHttpProxy string, targethttpproxy *TargetHttpProxy) *TargetHttpProxiesPatchCall { + c := &TargetHttpProxiesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetHttpProxy = targetHttpProxy + c.targethttpproxy = targethttpproxy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetHttpProxiesPatchCall) RequestId(requestId string) *TargetHttpProxiesPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpProxiesPatchCall) Fields(s ...googleapi.Field) *TargetHttpProxiesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpProxiesPatchCall) Context(ctx context.Context) *TargetHttpProxiesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetHttpProxiesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetHttpProxiesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpproxy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetHttpProxy": c.targetHttpProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetHttpProxies.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpProxiesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified TargetHttpProxy resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "PATCH", + // "id": "compute.targetHttpProxies.patch", + // "parameterOrder": [ + // "project", + // "targetHttpProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", + // "request": { + // "$ref": "TargetHttpProxy" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.targetHttpProxies.setUrlMap": type TargetHttpProxiesSetUrlMapCall struct { @@ -126213,7 +136060,7 @@ func (c *TargetHttpProxiesSetUrlMapCall) Header() http.Header { func (c *TargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126418,6 +136265,15 @@ func (c *TargetHttpsProxiesAggregatedListCall) PageToken(pageToken string) *Targ return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetHttpsProxiesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetHttpsProxiesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -126455,7 +136311,7 @@ func (c *TargetHttpsProxiesAggregatedListCall) Header() http.Header { func (c *TargetHttpsProxiesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126558,6 +136414,11 @@ func (c *TargetHttpsProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/targetHttpsProxies", @@ -126659,7 +136520,7 @@ func (c *TargetHttpsProxiesDeleteCall) Header() http.Header { func (c *TargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126817,7 +136678,7 @@ func (c *TargetHttpsProxiesGetCall) Header() http.Header { func (c *TargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126982,7 +136843,7 @@ func (c *TargetHttpsProxiesInsertCall) Header() http.Header { func (c *TargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127165,6 +137026,15 @@ func (c *TargetHttpsProxiesListCall) PageToken(pageToken string) *TargetHttpsPro return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetHttpsProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetHttpsProxiesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -127202,7 +137072,7 @@ func (c *TargetHttpsProxiesListCall) Header() http.Header { func (c *TargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127300,6 +137170,11 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/targetHttpsProxies", @@ -127403,7 +137278,7 @@ func (c *TargetHttpsProxiesSetQuicOverrideCall) Header() http.Header { func (c *TargetHttpsProxiesSetQuicOverrideCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127577,7 +137452,7 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Header() http.Header { func (c *TargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127756,7 +137631,7 @@ func (c *TargetHttpsProxiesSetSslPolicyCall) Header() http.Header { func (c *TargetHttpsProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127930,7 +137805,7 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Header() http.Header { func (c *TargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128135,6 +138010,15 @@ func (c *TargetInstancesAggregatedListCall) PageToken(pageToken string) *TargetI return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetInstancesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetInstancesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -128172,7 +138056,7 @@ func (c *TargetInstancesAggregatedListCall) Header() http.Header { func (c *TargetInstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128275,6 +138159,11 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/targetInstances", @@ -128379,7 +138268,7 @@ func (c *TargetInstancesDeleteCall) Header() http.Header { func (c *TargetInstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128549,7 +138438,7 @@ func (c *TargetInstancesGetCall) Header() http.Header { func (c *TargetInstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128726,7 +138615,7 @@ func (c *TargetInstancesInsertCall) Header() http.Header { func (c *TargetInstancesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128921,6 +138810,15 @@ func (c *TargetInstancesListCall) PageToken(pageToken string) *TargetInstancesLi return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetInstancesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetInstancesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -128958,7 +138856,7 @@ func (c *TargetInstancesListCall) Header() http.Header { func (c *TargetInstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129059,6 +138957,11 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "Name of the zone scoping this request.", // "location": "path", @@ -129171,7 +139074,7 @@ func (c *TargetPoolsAddHealthCheckCall) Header() http.Header { func (c *TargetPoolsAddHealthCheckCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129358,7 +139261,7 @@ func (c *TargetPoolsAddInstanceCall) Header() http.Header { func (c *TargetPoolsAddInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129572,6 +139475,15 @@ func (c *TargetPoolsAggregatedListCall) PageToken(pageToken string) *TargetPools return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetPoolsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetPoolsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -129609,7 +139521,7 @@ func (c *TargetPoolsAggregatedListCall) Header() http.Header { func (c *TargetPoolsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129712,6 +139624,11 @@ func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Targe // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/targetPools", @@ -129816,7 +139733,7 @@ func (c *TargetPoolsDeleteCall) Header() http.Header { func (c *TargetPoolsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129986,7 +139903,7 @@ func (c *TargetPoolsGetCall) Header() http.Header { func (c *TargetPoolsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130146,7 +140063,7 @@ func (c *TargetPoolsGetHealthCall) Header() http.Header { func (c *TargetPoolsGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130328,7 +140245,7 @@ func (c *TargetPoolsInsertCall) Header() http.Header { func (c *TargetPoolsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130523,6 +140440,15 @@ func (c *TargetPoolsListCall) PageToken(pageToken string) *TargetPoolsListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetPoolsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetPoolsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -130560,7 +140486,7 @@ func (c *TargetPoolsListCall) Header() http.Header { func (c *TargetPoolsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130667,6 +140593,11 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/targetPools", @@ -130773,7 +140704,7 @@ func (c *TargetPoolsRemoveHealthCheckCall) Header() http.Header { func (c *TargetPoolsRemoveHealthCheckCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130960,7 +140891,7 @@ func (c *TargetPoolsRemoveInstanceCall) Header() http.Header { func (c *TargetPoolsRemoveInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131154,7 +141085,7 @@ func (c *TargetPoolsSetBackupCall) Header() http.Header { func (c *TargetPoolsSetBackupCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131342,7 +141273,7 @@ func (c *TargetSslProxiesDeleteCall) Header() http.Header { func (c *TargetSslProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131500,7 +141431,7 @@ func (c *TargetSslProxiesGetCall) Header() http.Header { func (c *TargetSslProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131665,7 +141596,7 @@ func (c *TargetSslProxiesInsertCall) Header() http.Header { func (c *TargetSslProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131848,6 +141779,15 @@ func (c *TargetSslProxiesListCall) PageToken(pageToken string) *TargetSslProxies return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetSslProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetSslProxiesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -131885,7 +141825,7 @@ func (c *TargetSslProxiesListCall) Header() http.Header { func (c *TargetSslProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131983,6 +141923,11 @@ func (c *TargetSslProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetSslP // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/targetSslProxies", @@ -132086,7 +142031,7 @@ func (c *TargetSslProxiesSetBackendServiceCall) Header() http.Header { func (c *TargetSslProxiesSetBackendServiceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132261,7 +142206,7 @@ func (c *TargetSslProxiesSetProxyHeaderCall) Header() http.Header { func (c *TargetSslProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132436,7 +142381,7 @@ func (c *TargetSslProxiesSetSslCertificatesCall) Header() http.Header { func (c *TargetSslProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132614,7 +142559,7 @@ func (c *TargetSslProxiesSetSslPolicyCall) Header() http.Header { func (c *TargetSslProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132786,7 +142731,7 @@ func (c *TargetTcpProxiesDeleteCall) Header() http.Header { func (c *TargetTcpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132944,7 +142889,7 @@ func (c *TargetTcpProxiesGetCall) Header() http.Header { func (c *TargetTcpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -133109,7 +143054,7 @@ func (c *TargetTcpProxiesInsertCall) Header() http.Header { func (c *TargetTcpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -133292,6 +143237,15 @@ func (c *TargetTcpProxiesListCall) PageToken(pageToken string) *TargetTcpProxies return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetTcpProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetTcpProxiesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -133329,7 +143283,7 @@ func (c *TargetTcpProxiesListCall) Header() http.Header { func (c *TargetTcpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -133427,6 +143381,11 @@ func (c *TargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpP // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/targetTcpProxies", @@ -133530,7 +143489,7 @@ func (c *TargetTcpProxiesSetBackendServiceCall) Header() http.Header { func (c *TargetTcpProxiesSetBackendServiceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -133705,7 +143664,7 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) Header() http.Header { func (c *TargetTcpProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -133909,6 +143868,15 @@ func (c *TargetVpnGatewaysAggregatedListCall) PageToken(pageToken string) *Targe return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetVpnGatewaysAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetVpnGatewaysAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -133946,7 +143914,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) Header() http.Header { func (c *TargetVpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134049,6 +144017,11 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/targetVpnGateways", @@ -134152,7 +144125,7 @@ func (c *TargetVpnGatewaysDeleteCall) Header() http.Header { func (c *TargetVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134321,7 +144294,7 @@ func (c *TargetVpnGatewaysGetCall) Header() http.Header { func (c *TargetVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134497,7 +144470,7 @@ func (c *TargetVpnGatewaysInsertCall) Header() http.Header { func (c *TargetVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134691,6 +144664,15 @@ func (c *TargetVpnGatewaysListCall) PageToken(pageToken string) *TargetVpnGatewa return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetVpnGatewaysListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetVpnGatewaysListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -134728,7 +144710,7 @@ func (c *TargetVpnGatewaysListCall) Header() http.Header { func (c *TargetVpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134835,6 +144817,11 @@ func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpn // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/targetVpnGateways", @@ -134968,6 +144955,15 @@ func (c *UrlMapsAggregatedListCall) PageToken(pageToken string) *UrlMapsAggregat return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *UrlMapsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *UrlMapsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -135005,7 +145001,7 @@ func (c *UrlMapsAggregatedListCall) Header() http.Header { func (c *UrlMapsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135108,6 +145104,11 @@ func (c *UrlMapsAggregatedListCall) Do(opts ...googleapi.CallOption) (*UrlMapsAg // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/urlMaps", @@ -135210,7 +145211,7 @@ func (c *UrlMapsDeleteCall) Header() http.Header { func (c *UrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135369,7 +145370,7 @@ func (c *UrlMapsGetCall) Header() http.Header { func (c *UrlMapsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135535,7 +145536,7 @@ func (c *UrlMapsInsertCall) Header() http.Header { func (c *UrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135702,7 +145703,7 @@ func (c *UrlMapsInvalidateCacheCall) Header() http.Header { func (c *UrlMapsInvalidateCacheCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135895,6 +145896,15 @@ func (c *UrlMapsListCall) PageToken(pageToken string) *UrlMapsListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *UrlMapsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *UrlMapsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -135932,7 +145942,7 @@ func (c *UrlMapsListCall) Header() http.Header { func (c *UrlMapsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -136030,6 +146040,11 @@ func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/urlMaps", @@ -136136,7 +146151,7 @@ func (c *UrlMapsPatchCall) Header() http.Header { func (c *UrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -136313,7 +146328,7 @@ func (c *UrlMapsUpdateCall) Header() http.Header { func (c *UrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -136472,7 +146487,7 @@ func (c *UrlMapsValidateCall) Header() http.Header { func (c *UrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -136671,6 +146686,15 @@ func (c *VpnGatewaysAggregatedListCall) PageToken(pageToken string) *VpnGateways return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *VpnGatewaysAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *VpnGatewaysAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -136708,7 +146732,7 @@ func (c *VpnGatewaysAggregatedListCall) Header() http.Header { func (c *VpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -136811,6 +146835,11 @@ func (c *VpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnGa // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/vpnGateways", @@ -136914,7 +146943,7 @@ func (c *VpnGatewaysDeleteCall) Header() http.Header { func (c *VpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -137083,7 +147112,7 @@ func (c *VpnGatewaysGetCall) Header() http.Header { func (c *VpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -137250,7 +147279,7 @@ func (c *VpnGatewaysGetStatusCall) Header() http.Header { func (c *VpnGatewaysGetStatusCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -137426,7 +147455,7 @@ func (c *VpnGatewaysInsertCall) Header() http.Header { func (c *VpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -137620,6 +147649,15 @@ func (c *VpnGatewaysListCall) PageToken(pageToken string) *VpnGatewaysListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *VpnGatewaysListCall) ReturnPartialSuccess(returnPartialSuccess bool) *VpnGatewaysListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -137657,7 +147695,7 @@ func (c *VpnGatewaysListCall) Header() http.Header { func (c *VpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -137764,6 +147802,11 @@ func (c *VpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*VpnGatewayList, // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/vpnGateways", @@ -137870,7 +147913,7 @@ func (c *VpnGatewaysSetLabelsCall) Header() http.Header { func (c *VpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -138038,7 +148081,7 @@ func (c *VpnGatewaysTestIamPermissionsCall) Header() http.Header { func (c *VpnGatewaysTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -138247,6 +148290,15 @@ func (c *VpnTunnelsAggregatedListCall) PageToken(pageToken string) *VpnTunnelsAg return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *VpnTunnelsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *VpnTunnelsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -138284,7 +148336,7 @@ func (c *VpnTunnelsAggregatedListCall) Header() http.Header { func (c *VpnTunnelsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -138387,6 +148439,11 @@ func (c *VpnTunnelsAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnTun // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/vpnTunnels", @@ -138490,7 +148547,7 @@ func (c *VpnTunnelsDeleteCall) Header() http.Header { func (c *VpnTunnelsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -138659,7 +148716,7 @@ func (c *VpnTunnelsGetCall) Header() http.Header { func (c *VpnTunnelsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -138835,7 +148892,7 @@ func (c *VpnTunnelsInsertCall) Header() http.Header { func (c *VpnTunnelsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139029,6 +149086,15 @@ func (c *VpnTunnelsListCall) PageToken(pageToken string) *VpnTunnelsListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *VpnTunnelsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *VpnTunnelsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -139066,7 +149132,7 @@ func (c *VpnTunnelsListCall) Header() http.Header { func (c *VpnTunnelsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139173,6 +149239,11 @@ func (c *VpnTunnelsListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelList, e // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/vpnTunnels", @@ -139258,7 +149329,7 @@ func (c *ZoneOperationsDeleteCall) Header() http.Header { func (c *ZoneOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139394,7 +149465,7 @@ func (c *ZoneOperationsGetCall) Header() http.Header { func (c *ZoneOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139589,6 +149660,15 @@ func (c *ZoneOperationsListCall) PageToken(pageToken string) *ZoneOperationsList return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ZoneOperationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ZoneOperationsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -139626,7 +149706,7 @@ func (c *ZoneOperationsListCall) Header() http.Header { func (c *ZoneOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139727,6 +149807,11 @@ func (c *ZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationLis // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "Name of the zone for request.", // "location": "path", @@ -139830,7 +149915,7 @@ func (c *ZoneOperationsWaitCall) Header() http.Header { func (c *ZoneOperationsWaitCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139994,7 +150079,7 @@ func (c *ZonesGetCall) Header() http.Header { func (c *ZonesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -140178,6 +150263,15 @@ func (c *ZonesListCall) PageToken(pageToken string) *ZonesListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ZonesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ZonesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -140215,7 +150309,7 @@ func (c *ZonesListCall) Header() http.Header { func (c *ZonesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -140313,6 +150407,11 @@ func (c *ZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/zones", diff --git a/vendor/google.golang.org/api/container/v1/container-api.json b/vendor/google.golang.org/api/container/v1/container-api.json index ae0bbf3cc2b..7205da1a5fe 100644 --- a/vendor/google.golang.org/api/container/v1/container-api.json +++ b/vendor/google.golang.org/api/container/v1/container-api.json @@ -121,23 +121,23 @@ ], "parameters": { "filter": { - "description": "Filtering currently only supports equality on the networkProjectId and must\nbe in the form: \"networkProjectId=[PROJECTID]\", where `networkProjectId`\nis the project which owns the listed subnetworks. This defaults to the\nparent project ID.", + "description": "Filtering currently only supports equality on the networkProjectId and must be in the form: \"networkProjectId=[PROJECTID]\", where `networkProjectId` is the project which owns the listed subnetworks. This defaults to the parent project ID.", "location": "query", "type": "string" }, "pageSize": { - "description": "The max number of results per page that should be returned. If the number\nof available results is larger than `page_size`, a `next_page_token` is\nreturned which can be used to get the next page of results in subsequent\nrequests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The max number of results per page that should be returned. If the number of available results is larger than `page_size`, a `next_page_token` is returned which can be used to get the next page of results in subsequent requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Specifies a page token to use. Set this to the nextPageToken returned by\nprevious list requests to get the next page of results.", + "description": "Specifies a page token to use. Set this to the nextPageToken returned by previous list requests to get the next page of results.", "location": "query", "type": "string" }, "parent": { - "description": "The parent project where subnetworks are usable.\nSpecified in the format `projects/*`.", + "description": "The parent project where subnetworks are usable. Specified in the format `projects/*`.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -168,19 +168,19 @@ ], "parameters": { "name": { - "description": "The name (project and location) of the server config to get,\nspecified in the format `projects/*/locations/*`.", + "description": "The name (project and location) of the server config to get, specified in the format `projects/*/locations/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) to return\noperations for. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) to return operations for. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" } @@ -207,7 +207,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster id) of the cluster to complete IP\nrotation. Specified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster id) of the cluster to complete IP rotation. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -226,7 +226,7 @@ ] }, "create": { - "description": "Creates a cluster, consisting of the specified number and type of Google\nCompute Engine instances.\n\nBy default, the cluster is created in the project's\n[default\nnetwork](https://cloud.google.com/compute/docs/networks-and-firewalls#networks).\n\nOne firewall is added for the cluster. After cluster creation,\nthe Kubelet creates routes for each node to allow the containers\non that node to communicate with all other instances in the\ncluster.\n\nFinally, an entry is added to the project's global metadata indicating\nwhich CIDR range the cluster is using.", + "description": "Creates a cluster, consisting of the specified number and type of Google Compute Engine instances. By default, the cluster is created in the project's [default network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks). One firewall is added for the cluster. After cluster creation, the Kubelet creates routes for each node to allow the containers on that node to communicate with all other instances in the cluster. Finally, an entry is added to the project's global metadata indicating which CIDR range the cluster is using.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters", "httpMethod": "POST", "id": "container.projects.locations.clusters.create", @@ -235,7 +235,7 @@ ], "parameters": { "parent": { - "description": "The parent (project and location) where the cluster will be created.\nSpecified in the format `projects/*/locations/*`.", + "description": "The parent (project and location) where the cluster will be created. Specified in the format `projects/*/locations/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -254,7 +254,7 @@ ] }, "delete": { - "description": "Deletes the cluster, including the Kubernetes endpoint and all worker\nnodes.\n\nFirewalls and routes that were configured during cluster creation\nare also deleted.\n\nOther Google Compute Engine resources that might be in use by the cluster,\nsuch as load balancer resources, are not deleted if they weren't present\nwhen the cluster was initially created.", + "description": "Deletes the cluster, including the Kubernetes endpoint and all worker nodes. Firewalls and routes that were configured during cluster creation are also deleted. Other Google Compute Engine resources that might be in use by the cluster, such as load balancer resources, are not deleted if they weren't present when the cluster was initially created.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}", "httpMethod": "DELETE", "id": "container.projects.locations.clusters.delete", @@ -263,24 +263,24 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster to delete.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to delete. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "name": { - "description": "The name (project, location, cluster) of the cluster to delete.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to delete. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" } @@ -303,24 +303,24 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster to retrieve.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to retrieve. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "name": { - "description": "The name (project, location, cluster) of the cluster to retrieve.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to retrieve. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" } @@ -334,7 +334,7 @@ ] }, "getJwks": { - "description": "Gets the public component of the cluster signing keys in\nJSON Web Key format.\nThis API is not yet intended for general use, and is not available for all\nclusters.", + "description": "Gets the public component of the cluster signing keys in JSON Web Key format. This API is not yet intended for general use, and is not available for all clusters.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/jwks", "httpMethod": "GET", "id": "container.projects.locations.clusters.getJwks", @@ -343,7 +343,7 @@ ], "parameters": { "parent": { - "description": "The cluster (project, location, cluster id) to get keys for. Specified in\nthe format `projects/*/locations/*/clusters/*`.", + "description": "The cluster (project, location, cluster id) to get keys for. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -356,7 +356,7 @@ } }, "list": { - "description": "Lists all clusters owned by a project in either the specified zone or all\nzones.", + "description": "Lists all clusters owned by a project in either the specified zone or all zones.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters", "httpMethod": "GET", "id": "container.projects.locations.clusters.list", @@ -365,19 +365,19 @@ ], "parameters": { "parent": { - "description": "The parent (project and location) where the clusters will be listed.\nSpecified in the format `projects/*/locations/*`.\nLocation \"-\" matches all zones and all regions.", + "description": "The parent (project and location) where the clusters will be listed. Specified in the format `projects/*/locations/*`. Location \"-\" matches all zones and all regions.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the parent field.", "location": "query", "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides, or \"-\" for all zones. This field has been deprecated and\nreplaced by the parent field.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides, or \"-\" for all zones. This field has been deprecated and replaced by the parent field.", "location": "query", "type": "string" } @@ -400,7 +400,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster) of the cluster to set addons.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to set addons. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -428,7 +428,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster id) of the cluster to set legacy abac.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster id) of the cluster to set legacy abac. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -447,7 +447,7 @@ ] }, "setLocations": { - "description": "Sets the locations for a specific cluster.\nDeprecated. Use\n[projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/update)\ninstead.", + "description": "Sets the locations for a specific cluster. Deprecated. Use [projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/update) instead.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setLocations", "httpMethod": "POST", "id": "container.projects.locations.clusters.setLocations", @@ -456,7 +456,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster) of the cluster to set locations.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to set locations. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -484,7 +484,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster) of the cluster to set logging.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to set logging. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -512,7 +512,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster id) of the cluster to set maintenance\npolicy.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster id) of the cluster to set maintenance policy. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -531,7 +531,7 @@ ] }, "setMasterAuth": { - "description": "Sets master auth materials. Currently supports changing the admin password\nor a specific cluster, either via password generation or explicitly setting\nthe password.", + "description": "Sets master auth materials. Currently supports changing the admin password or a specific cluster, either via password generation or explicitly setting the password.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setMasterAuth", "httpMethod": "POST", "id": "container.projects.locations.clusters.setMasterAuth", @@ -540,7 +540,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster) of the cluster to set auth.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to set auth. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -568,7 +568,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster) of the cluster to set monitoring.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to set monitoring. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -596,7 +596,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster id) of the cluster to set networking\npolicy. Specified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster id) of the cluster to set networking policy. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -624,7 +624,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster id) of the cluster to set labels.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster id) of the cluster to set labels. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -652,7 +652,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster id) of the cluster to start IP\nrotation. Specified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster id) of the cluster to start IP rotation. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -680,7 +680,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster) of the cluster to update.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to update. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -708,7 +708,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster) of the cluster to update.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to update. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -740,7 +740,7 @@ ], "parameters": { "parent": { - "description": "The parent (project, location, cluster id) where the node pool will be\ncreated. Specified in the format\n`projects/*/locations/*/clusters/*`.", + "description": "The parent (project, location, cluster id) where the node pool will be created. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -768,29 +768,29 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "name": { - "description": "The name (project, location, cluster, node pool id) of the node pool to\ndelete. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool id) of the node pool to delete. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", "required": true, "type": "string" }, "nodePoolId": { - "description": "Deprecated. The name of the node pool to delete.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the node pool to delete. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" } @@ -813,29 +813,29 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "name": { - "description": "The name (project, location, cluster, node pool id) of the node pool to\nget. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool id) of the node pool to get. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", "required": true, "type": "string" }, "nodePoolId": { - "description": "Deprecated. The name of the node pool.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the node pool. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" } @@ -858,24 +858,24 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the parent field.", + "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the parent field.", "location": "query", "type": "string" }, "parent": { - "description": "The parent (project, location, cluster id) where the node pools will be\nlisted. Specified in the format `projects/*/locations/*/clusters/*`.", + "description": "The parent (project, location, cluster id) where the node pools will be listed. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the parent field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the parent field.", "location": "query", "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the parent\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the parent field.", "location": "query", "type": "string" } @@ -889,7 +889,7 @@ ] }, "rollback": { - "description": "Rolls back a previously Aborted or Failed NodePool upgrade.\nThis makes no changes if the last upgrade successfully completed.", + "description": "Rolls back a previously Aborted or Failed NodePool upgrade. This makes no changes if the last upgrade successfully completed.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}:rollback", "httpMethod": "POST", "id": "container.projects.locations.clusters.nodePools.rollback", @@ -898,7 +898,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster, node pool id) of the node poll to\nrollback upgrade.\nSpecified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool id) of the node poll to rollback upgrade. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", "required": true, @@ -926,7 +926,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster, node pool) of the node pool to set\nautoscaler settings. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool) of the node pool to set autoscaler settings. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", "required": true, @@ -954,7 +954,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster, node pool id) of the node pool to set\nmanagement properties. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool id) of the node pool to set management properties. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", "required": true, @@ -982,7 +982,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster, node pool id) of the node pool to set\nsize.\nSpecified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool id) of the node pool to set size. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", "required": true, @@ -1010,7 +1010,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster, node pool) of the node pool to\nupdate. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool) of the node pool to update. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", "required": true, @@ -1033,7 +1033,7 @@ "well-known": { "methods": { "getOpenid-configuration": { - "description": "Gets the OIDC discovery document for the cluster.\nSee the\n[OpenID Connect Discovery 1.0\nspecification](https://openid.net/specs/openid-connect-discovery-1_0.html)\nfor details.\nThis API is not yet intended for general use, and is not available for all\nclusters.", + "description": "Gets the OIDC discovery document for the cluster. See the [OpenID Connect Discovery 1.0 specification](https://openid.net/specs/openid-connect-discovery-1_0.html) for details. This API is not yet intended for general use, and is not available for all clusters.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/.well-known/openid-configuration", "httpMethod": "GET", "id": "container.projects.locations.clusters.well-known.getOpenid-configuration", @@ -1042,7 +1042,7 @@ ], "parameters": { "parent": { - "description": "The cluster (project, location, cluster id) to get the discovery document\nfor. Specified in the format `projects/*/locations/*/clusters/*`.", + "description": "The cluster (project, location, cluster id) to get the discovery document for. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -1070,7 +1070,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, operation id) of the operation to cancel.\nSpecified in the format `projects/*/locations/*/operations/*`.", + "description": "The name (project, location, operation id) of the operation to cancel. Specified in the format `projects/*/locations/*/operations/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", "required": true, @@ -1098,24 +1098,24 @@ ], "parameters": { "name": { - "description": "The name (project, location, operation id) of the operation to get.\nSpecified in the format `projects/*/locations/*/operations/*`.", + "description": "The name (project, location, operation id) of the operation to get. Specified in the format `projects/*/locations/*/operations/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", "required": true, "type": "string" }, "operationId": { - "description": "Deprecated. The server-assigned `name` of the operation.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The server-assigned `name` of the operation. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" } @@ -1138,19 +1138,19 @@ ], "parameters": { "parent": { - "description": "The parent (project and location) where the operations will be listed.\nSpecified in the format `projects/*/locations/*`.\nLocation \"-\" matches all zones and all regions.", + "description": "The parent (project and location) where the operations will be listed. Specified in the format `projects/*/locations/*`. Location \"-\" matches all zones and all regions.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the parent field.", "location": "query", "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) to return\noperations for, or `-` for all zones. This field has been deprecated and\nreplaced by the parent field.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) to return operations for, or `-` for all zones. This field has been deprecated and replaced by the parent field.", "location": "query", "type": "string" } @@ -1180,18 +1180,18 @@ ], "parameters": { "name": { - "description": "The name (project and location) of the server config to get,\nspecified in the format `projects/*/locations/*`.", + "description": "The name (project and location) of the server config to get, specified in the format `projects/*/locations/*`.", "location": "query", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) to return\noperations for. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) to return operations for. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1221,19 +1221,19 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1262,19 +1262,19 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1292,7 +1292,7 @@ ] }, "create": { - "description": "Creates a cluster, consisting of the specified number and type of Google\nCompute Engine instances.\n\nBy default, the cluster is created in the project's\n[default\nnetwork](https://cloud.google.com/compute/docs/networks-and-firewalls#networks).\n\nOne firewall is added for the cluster. After cluster creation,\nthe Kubelet creates routes for each node to allow the containers\non that node to communicate with all other instances in the\ncluster.\n\nFinally, an entry is added to the project's global metadata indicating\nwhich CIDR range the cluster is using.", + "description": "Creates a cluster, consisting of the specified number and type of Google Compute Engine instances. By default, the cluster is created in the project's [default network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks). One firewall is added for the cluster. After cluster creation, the Kubelet creates routes for each node to allow the containers on that node to communicate with all other instances in the cluster. Finally, an entry is added to the project's global metadata indicating which CIDR range the cluster is using.", "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters", "httpMethod": "POST", "id": "container.projects.zones.clusters.create", @@ -1302,13 +1302,13 @@ ], "parameters": { "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the parent\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" @@ -1326,7 +1326,7 @@ ] }, "delete": { - "description": "Deletes the cluster, including the Kubernetes endpoint and all worker\nnodes.\n\nFirewalls and routes that were configured during cluster creation\nare also deleted.\n\nOther Google Compute Engine resources that might be in use by the cluster,\nsuch as load balancer resources, are not deleted if they weren't present\nwhen the cluster was initially created.", + "description": "Deletes the cluster, including the Kubernetes endpoint and all worker nodes. Firewalls and routes that were configured during cluster creation are also deleted. Other Google Compute Engine resources that might be in use by the cluster, such as load balancer resources, are not deleted if they weren't present when the cluster was initially created.", "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", "httpMethod": "DELETE", "id": "container.projects.zones.clusters.delete", @@ -1337,24 +1337,24 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster to delete.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to delete. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "name": { - "description": "The name (project, location, cluster) of the cluster to delete.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to delete. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "query", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1380,24 +1380,24 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster to retrieve.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to retrieve. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "name": { - "description": "The name (project, location, cluster) of the cluster to retrieve.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to retrieve. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "query", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1423,19 +1423,19 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to update. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1453,7 +1453,7 @@ ] }, "list": { - "description": "Lists all clusters owned by a project in either the specified zone or all\nzones.", + "description": "Lists all clusters owned by a project in either the specified zone or all zones.", "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters", "httpMethod": "GET", "id": "container.projects.zones.clusters.list", @@ -1463,18 +1463,18 @@ ], "parameters": { "parent": { - "description": "The parent (project and location) where the clusters will be listed.\nSpecified in the format `projects/*/locations/*`.\nLocation \"-\" matches all zones and all regions.", + "description": "The parent (project and location) where the clusters will be listed. Specified in the format `projects/*/locations/*`. Location \"-\" matches all zones and all regions.", "location": "query", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides, or \"-\" for all zones. This field has been deprecated and\nreplaced by the parent field.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides, or \"-\" for all zones. This field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" @@ -1489,7 +1489,7 @@ ] }, "locations": { - "description": "Sets the locations for a specific cluster.\nDeprecated. Use\n[projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/update)\ninstead.", + "description": "Sets the locations for a specific cluster. Deprecated. Use [projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/update) instead.", "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/locations", "httpMethod": "POST", "id": "container.projects.zones.clusters.locations", @@ -1500,19 +1500,19 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1541,19 +1541,19 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1582,19 +1582,19 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1623,19 +1623,19 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1664,19 +1664,19 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1711,13 +1711,13 @@ "type": "string" }, "projectId": { - "description": "Required. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "Required. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides.", + "description": "Required. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides.", "location": "path", "required": true, "type": "string" @@ -1735,7 +1735,7 @@ ] }, "setMasterAuth": { - "description": "Sets master auth materials. Currently supports changing the admin password\nor a specific cluster, either via password generation or explicitly setting\nthe password.", + "description": "Sets master auth materials. Currently supports changing the admin password or a specific cluster, either via password generation or explicitly setting the password.", "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setMasterAuth", "httpMethod": "POST", "id": "container.projects.zones.clusters.setMasterAuth", @@ -1746,19 +1746,19 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1787,19 +1787,19 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1828,19 +1828,19 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1869,19 +1869,19 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1915,25 +1915,25 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "nodePoolId": { - "description": "Deprecated. The name of the node pool to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the node pool to upgrade. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1962,19 +1962,19 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the parent field.", + "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the parent field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the parent\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" @@ -2004,30 +2004,30 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "name": { - "description": "The name (project, location, cluster, node pool id) of the node pool to\ndelete. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool id) of the node pool to delete. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "location": "query", "type": "string" }, "nodePoolId": { - "description": "Deprecated. The name of the node pool to delete.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the node pool to delete. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -2054,30 +2054,30 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "name": { - "description": "The name (project, location, cluster, node pool id) of the node pool to\nget. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool id) of the node pool to get. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "location": "query", "type": "string" }, "nodePoolId": { - "description": "Deprecated. The name of the node pool.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the node pool. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -2103,24 +2103,24 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the parent field.", + "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" }, "parent": { - "description": "The parent (project, location, cluster id) where the node pools will be\nlisted. Specified in the format `projects/*/locations/*/clusters/*`.", + "description": "The parent (project, location, cluster id) where the node pools will be listed. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "query", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the parent field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the parent\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" @@ -2135,7 +2135,7 @@ ] }, "rollback": { - "description": "Rolls back a previously Aborted or Failed NodePool upgrade.\nThis makes no changes if the last upgrade successfully completed.", + "description": "Rolls back a previously Aborted or Failed NodePool upgrade. This makes no changes if the last upgrade successfully completed.", "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}:rollback", "httpMethod": "POST", "id": "container.projects.zones.clusters.nodePools.rollback", @@ -2147,25 +2147,25 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster to rollback.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to rollback. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "nodePoolId": { - "description": "Deprecated. The name of the node pool to rollback.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the node pool to rollback. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -2195,25 +2195,25 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to update. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "nodePoolId": { - "description": "Deprecated. The name of the node pool to update.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the node pool to update. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -2243,25 +2243,25 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to update. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "nodePoolId": { - "description": "Deprecated. The name of the node pool to update.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the node pool to update. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -2291,25 +2291,25 @@ ], "parameters": { "clusterId": { - "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "nodePoolId": { - "description": "Deprecated. The name of the node pool to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the node pool to upgrade. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -2344,19 +2344,19 @@ ], "parameters": { "operationId": { - "description": "Deprecated. The server-assigned `name` of the operation.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The server-assigned `name` of the operation. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\noperation resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the operation resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -2385,24 +2385,24 @@ ], "parameters": { "name": { - "description": "The name (project, location, operation id) of the operation to get.\nSpecified in the format `projects/*/locations/*/operations/*`.", + "description": "The name (project, location, operation id) of the operation to get. Specified in the format `projects/*/locations/*/operations/*`.", "location": "query", "type": "string" }, "operationId": { - "description": "Deprecated. The server-assigned `name` of the operation.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The server-assigned `name` of the operation. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -2427,18 +2427,18 @@ ], "parameters": { "parent": { - "description": "The parent (project and location) where the operations will be listed.\nSpecified in the format `projects/*/locations/*`.\nLocation \"-\" matches all zones and all regions.", + "description": "The parent (project and location) where the operations will be listed. Specified in the format `projects/*/locations/*`. Location \"-\" matches all zones and all regions.", "location": "query", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) to return\noperations for, or `-` for all zones. This field has been deprecated and\nreplaced by the parent field.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) to return operations for, or `-` for all zones. This field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" @@ -2459,7 +2459,7 @@ } } }, - "revision": "20200501", + "revision": "20200828", "rootUrl": "https://container.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -2472,35 +2472,43 @@ "type": "string" }, "acceleratorType": { - "description": "The accelerator type resource name. List of supported accelerators\n[here](https://cloud.google.com/compute/docs/gpus)", + "description": "The accelerator type resource name. List of supported accelerators [here](https://cloud.google.com/compute/docs/gpus)", "type": "string" } }, "type": "object" }, "AddonsConfig": { - "description": "Configuration for the addons that can be automatically spun up in the\ncluster, enabling additional functionality.", + "description": "Configuration for the addons that can be automatically spun up in the cluster, enabling additional functionality.", "id": "AddonsConfig", "properties": { "cloudRunConfig": { "$ref": "CloudRunConfig", - "description": "Configuration for the Cloud Run addon, which allows the user to use a\nmanaged Knative service." + "description": "Configuration for the Cloud Run addon, which allows the user to use a managed Knative service." + }, + "configConnectorConfig": { + "$ref": "ConfigConnectorConfig", + "description": "Configuration for the ConfigConnector add-on, a Kubernetes extension to manage hosted GCP services through the Kubernetes API" + }, + "dnsCacheConfig": { + "$ref": "DnsCacheConfig", + "description": "Configuration for NodeLocalDNS, a dns cache running on cluster nodes" }, "horizontalPodAutoscaling": { "$ref": "HorizontalPodAutoscaling", - "description": "Configuration for the horizontal pod autoscaling feature, which\nincreases or decreases the number of replica pods a replication controller\nhas based on the resource usage of the existing pods." + "description": "Configuration for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods." }, "httpLoadBalancing": { "$ref": "HttpLoadBalancing", - "description": "Configuration for the HTTP (L7) load balancing controller addon, which\nmakes it easy to set up HTTP load balancers for services in a cluster." + "description": "Configuration for the HTTP (L7) load balancing controller addon, which makes it easy to set up HTTP load balancers for services in a cluster." }, "kubernetesDashboard": { "$ref": "KubernetesDashboard", - "description": "Configuration for the Kubernetes Dashboard.\nThis addon is deprecated, and will be disabled in 1.15. It is recommended\nto use the Cloud Console to manage and monitor your Kubernetes clusters,\nworkloads and applications. For more information, see:\nhttps://cloud.google.com/kubernetes-engine/docs/concepts/dashboards" + "description": "Configuration for the Kubernetes Dashboard. This addon is deprecated, and will be disabled in 1.15. It is recommended to use the Cloud Console to manage and monitor your Kubernetes clusters, workloads and applications. For more information, see: https://cloud.google.com/kubernetes-engine/docs/concepts/dashboards" }, "networkPolicyConfig": { "$ref": "NetworkPolicyConfig", - "description": "Configuration for NetworkPolicy. This only tracks whether the addon\nis enabled or not on the Master, it does not track whether network policy\nis enabled for the nodes." + "description": "Configuration for NetworkPolicy. This only tracks whether the addon is enabled or not on the Master, it does not track whether network policy is enabled for the nodes." } }, "type": "object" @@ -2510,50 +2518,71 @@ "id": "AuthenticatorGroupsConfig", "properties": { "enabled": { - "description": "Whether this cluster should return group membership lookups\nduring authentication using a group of security groups.", + "description": "Whether this cluster should return group membership lookups during authentication using a group of security groups.", "type": "boolean" }, "securityGroup": { - "description": "The name of the security group-of-groups to be used. Only relevant\nif enabled = true.", + "description": "The name of the security group-of-groups to be used. Only relevant if enabled = true.", "type": "string" } }, "type": "object" }, "AutoUpgradeOptions": { - "description": "AutoUpgradeOptions defines the set of options for the user to control how\nthe Auto Upgrades will proceed.", + "description": "AutoUpgradeOptions defines the set of options for the user to control how the Auto Upgrades will proceed.", "id": "AutoUpgradeOptions", "properties": { "autoUpgradeStartTime": { - "description": "[Output only] This field is set when upgrades are about to commence\nwith the approximate start time for the upgrades, in\n[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "description": "[Output only] This field is set when upgrades are about to commence with the approximate start time for the upgrades, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", "type": "string" }, "description": { - "description": "[Output only] This field is set when upgrades are about to commence\nwith the description of the upgrade.", + "description": "[Output only] This field is set when upgrades are about to commence with the description of the upgrade.", "type": "string" } }, "type": "object" }, "AutoprovisioningNodePoolDefaults": { - "description": "AutoprovisioningNodePoolDefaults contains defaults for a node pool created\nby NAP.", + "description": "AutoprovisioningNodePoolDefaults contains defaults for a node pool created by NAP.", "id": "AutoprovisioningNodePoolDefaults", "properties": { + "bootDiskKmsKey": { + "description": "The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption", + "type": "string" + }, + "diskSizeGb": { + "description": "Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB.", + "format": "int32", + "type": "integer" + }, + "diskType": { + "description": "Type of the disk attached to each node (e.g. 'pd-standard' or 'pd-ssd') If unspecified, the default disk type is 'pd-standard'", + "type": "string" + }, "management": { "$ref": "NodeManagement", "description": "Specifies the node management options for NAP created node-pools." }, + "minCpuPlatform": { + "description": "Minimum CPU platform to be used for NAP created node pools. The instance may be scheduled on the specified or newer CPU platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: Intel Haswell or minCpuPlatform: Intel Sandy Bridge. For more information, read [how to specify min CPU platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) To unset the min cpu platform field pass \"automatic\" as field value.", + "type": "string" + }, "oauthScopes": { - "description": "Scopes that are used by NAP when creating node pools. If oauth_scopes are\nspecified, service_account should be empty.", + "description": "Scopes that are used by NAP when creating node pools.", "items": { "type": "string" }, "type": "array" }, "serviceAccount": { - "description": "The Google Cloud Platform Service Account to be used by the node VMs. If\nservice_account is specified, scopes should be empty.", + "description": "The Google Cloud Platform Service Account to be used by the node VMs.", "type": "string" }, + "shieldedInstanceConfig": { + "$ref": "ShieldedInstanceConfig", + "description": "Shielded Instance options." + }, "upgradeSettings": { "$ref": "UpgradeSettings", "description": "Specifies the upgrade settings for NAP created node pools" @@ -2577,7 +2606,7 @@ "id": "BinaryAuthorization", "properties": { "enabled": { - "description": "Enable Binary Authorization for this cluster. If enabled, all container\nimages will be validated by Binary Authorization.", + "description": "Enable Binary Authorization for this cluster. If enabled, all container images will be validated by Binary Authorization.", "type": "boolean" } }, @@ -2588,19 +2617,19 @@ "id": "CancelOperationRequest", "properties": { "name": { - "description": "The name (project, location, operation id) of the operation to cancel.\nSpecified in the format `projects/*/locations/*/operations/*`.", + "description": "The name (project, location, operation id) of the operation to cancel. Specified in the format `projects/*/locations/*/operations/*`.", "type": "string" }, "operationId": { - "description": "Deprecated. The server-assigned `name` of the operation.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The server-assigned `name` of the operation. This field has been deprecated and replaced by the name field.", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\noperation resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the operation resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, @@ -2639,6 +2668,20 @@ "disabled": { "description": "Whether Cloud Run addon is enabled for this cluster.", "type": "boolean" + }, + "loadBalancerType": { + "description": "Which load balancer type is installed for Cloud Run.", + "enum": [ + "LOAD_BALANCER_TYPE_UNSPECIFIED", + "LOAD_BALANCER_TYPE_EXTERNAL", + "LOAD_BALANCER_TYPE_INTERNAL" + ], + "enumDescriptions": [ + "Load balancer type for Cloud Run is unspecified.", + "Install external load balancer for Cloud Run.", + "Install internal load balancer for Cloud Run." + ], + "type": "string" } }, "type": "object" @@ -2664,7 +2707,7 @@ "description": "Configuration for Binary Authorization." }, "clusterIpv4Cidr": { - "description": "The IP address range of the container pods in this cluster, in\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`). Leave blank to have\none automatically chosen or specify a `/14` block in `10.0.0.0/8`.", + "description": "The IP address range of the container pods in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`). Leave blank to have one automatically chosen or specify a `/14` block in `10.0.0.0/8`.", "type": "string" }, "conditions": { @@ -2675,7 +2718,7 @@ "type": "array" }, "createTime": { - "description": "[Output only] The time the cluster was created, in\n[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "description": "[Output only] The time the cluster was created, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", "type": "string" }, "currentMasterVersion": { @@ -2683,12 +2726,12 @@ "type": "string" }, "currentNodeCount": { - "description": "[Output only] The number of nodes currently in the cluster. Deprecated.\nCall Kubernetes API directly to retrieve node information.", + "description": "[Output only] The number of nodes currently in the cluster. Deprecated. Call Kubernetes API directly to retrieve node information.", "format": "int32", "type": "integer" }, "currentNodeVersion": { - "description": "[Output only] Deprecated, use\n[NodePools.version](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.zones.clusters.nodePools)\ninstead. The current version of the node software components. If they are\ncurrently at multiple versions because they're in the process of being\nupgraded, this reflects the minimum version of all nodes.", + "description": "[Output only] Deprecated, use [NodePools.version](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters.nodePools) instead. The current version of the node software components. If they are currently at multiple versions because they're in the process of being upgraded, this reflects the minimum version of all nodes.", "type": "string" }, "databaseEncryption": { @@ -2697,14 +2740,14 @@ }, "defaultMaxPodsConstraint": { "$ref": "MaxPodsConstraint", - "description": "The default constraint on the maximum number of pods that can be run\nsimultaneously on a node in the node pool of this cluster. Only honored\nif cluster created with IP Alias support." + "description": "The default constraint on the maximum number of pods that can be run simultaneously on a node in the node pool of this cluster. Only honored if cluster created with IP Alias support." }, "description": { "description": "An optional description of this cluster.", "type": "string" }, "enableKubernetesAlpha": { - "description": "Kubernetes alpha features are enabled on this cluster. This includes alpha\nAPI groups (e.g. v1alpha1) and features that may not be production ready in\nthe kubernetes version of the master and nodes.\nThe cluster has no SLA for uptime and master/node upgrades are disabled.\nAlpha enabled clusters are automatically deleted thirty days after\ncreation.", + "description": "Kubernetes alpha features are enabled on this cluster. This includes alpha API groups (e.g. v1alpha1) and features that may not be production ready in the kubernetes version of the master and nodes. The cluster has no SLA for uptime and master/node upgrades are disabled. Alpha enabled clusters are automatically deleted thirty days after creation.", "type": "boolean" }, "enableTpu": { @@ -2712,19 +2755,19 @@ "type": "boolean" }, "endpoint": { - "description": "[Output only] The IP address of this cluster's master endpoint.\nThe endpoint can be accessed from the internet at\n`https://username:password@endpoint/`.\n\nSee the `masterAuth` property of this resource for username and\npassword information.", + "description": "[Output only] The IP address of this cluster's master endpoint. The endpoint can be accessed from the internet at `https://username:password@endpoint/`. See the `masterAuth` property of this resource for username and password information.", "type": "string" }, "expireTime": { - "description": "[Output only] The time the cluster will be automatically\ndeleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "description": "[Output only] The time the cluster will be automatically deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", "type": "string" }, "initialClusterVersion": { - "description": "The initial Kubernetes version for this cluster. Valid versions are those\nfound in validMasterVersions returned by getServerConfig. The version can\nbe upgraded over time; such upgrades are reflected in\ncurrentMasterVersion and currentNodeVersion.\n\nUsers may specify either explicit versions offered by\nKubernetes Engine or version aliases, which have the following behavior:\n\n- \"latest\": picks the highest valid Kubernetes version\n- \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version\n- \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version\n- \"1.X.Y-gke.N\": picks an explicit Kubernetes version\n- \"\",\"-\": picks the default Kubernetes version", + "description": "The initial Kubernetes version for this cluster. Valid versions are those found in validMasterVersions returned by getServerConfig. The version can be upgraded over time; such upgrades are reflected in currentMasterVersion and currentNodeVersion. Users may specify either explicit versions offered by Kubernetes Engine or version aliases, which have the following behavior: - \"latest\": picks the highest valid Kubernetes version - \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version - \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version - \"1.X.Y-gke.N\": picks an explicit Kubernetes version - \"\",\"-\": picks the default Kubernetes version", "type": "string" }, "initialNodeCount": { - "description": "The number of nodes to create in this cluster. You must ensure that your\nCompute Engine \u003ca href=\"/compute/docs/resource-quotas\"\u003eresource quota\u003c/a\u003e\nis sufficient for this number of instances. You must also have available\nfirewall and routes quota.\nFor requests, this field should only be used in lieu of a\n\"node_pool\" object, since this configuration (along with the\n\"node_config\") will be used to create a \"NodePool\" object with an\nauto-generated name. Do not use this and a node_pool at the same time.\n\nThis field is deprecated, use node_pool.initial_node_count instead.", + "description": "The number of nodes to create in this cluster. You must ensure that your Compute Engine [resource quota](https://cloud.google.com/compute/quotas) is sufficient for this number of instances. You must also have available firewall and routes quota. For requests, this field should only be used in lieu of a \"node_pool\" object, since this configuration (along with the \"node_config\") will be used to create a \"NodePool\" object with an auto-generated name. Do not use this and a node_pool at the same time. This field is deprecated, use node_pool.initial_node_count instead.", "format": "int32", "type": "integer" }, @@ -2748,18 +2791,18 @@ "description": "Configuration for the legacy ABAC authorization mode." }, "location": { - "description": "[Output only] The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available)\nor\n[region](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available)\nin which the cluster resides.", + "description": "[Output only] The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) or [region](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) in which the cluster resides.", "type": "string" }, "locations": { - "description": "The list of Google Compute Engine\n[zones](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster's nodes should be located.", + "description": "The list of Google Compute Engine [zones](https://cloud.google.com/compute/docs/zones#available) in which the cluster's nodes should be located.", "items": { "type": "string" }, "type": "array" }, "loggingService": { - "description": "The logging service the cluster should use to write logs.\nCurrently available options:\n\n* `logging.googleapis.com/kubernetes` - The Cloud Logging\nservice with a Kubernetes-native resource model\n* `logging.googleapis.com` - The legacy Cloud Logging service (no longer\n available as of GKE 1.15).\n* `none` - no logs will be exported from the cluster.\n\nIf left as an empty string,`logging.googleapis.com/kubernetes` will be\nused for GKE 1.14+ or `logging.googleapis.com` for earlier versions.", + "description": "The logging service the cluster should use to write logs. Currently available options: * `logging.googleapis.com/kubernetes` - The Cloud Logging service with a Kubernetes-native resource model * `logging.googleapis.com` - The legacy Cloud Logging service (no longer available as of GKE 1.15). * `none` - no logs will be exported from the cluster. If left as an empty string,`logging.googleapis.com/kubernetes` will be used for GKE 1.14+ or `logging.googleapis.com` for earlier versions.", "type": "string" }, "maintenancePolicy": { @@ -2768,22 +2811,22 @@ }, "masterAuth": { "$ref": "MasterAuth", - "description": "The authentication information for accessing the master endpoint.\nIf unspecified, the defaults are used:\nFor clusters before v1.12, if master_auth is unspecified, `username` will\nbe set to \"admin\", a random password will be generated, and a client\ncertificate will be issued." + "description": "The authentication information for accessing the master endpoint. If unspecified, the defaults are used: For clusters before v1.12, if master_auth is unspecified, `username` will be set to \"admin\", a random password will be generated, and a client certificate will be issued." }, "masterAuthorizedNetworksConfig": { "$ref": "MasterAuthorizedNetworksConfig", "description": "The configuration options for master authorized networks feature." }, "monitoringService": { - "description": "The monitoring service the cluster should use to write metrics.\nCurrently available options:\n\n* \"monitoring.googleapis.com/kubernetes\" - The Cloud Monitoring\nservice with a Kubernetes-native resource model\n* `monitoring.googleapis.com` - The legacy Cloud Monitoring service (no\n longer available as of GKE 1.15).\n* `none` - No metrics will be exported from the cluster.\n\nIf left as an empty string,`monitoring.googleapis.com/kubernetes` will be\nused for GKE 1.14+ or `monitoring.googleapis.com` for earlier versions.", + "description": "The monitoring service the cluster should use to write metrics. Currently available options: * \"monitoring.googleapis.com/kubernetes\" - The Cloud Monitoring service with a Kubernetes-native resource model * `monitoring.googleapis.com` - The legacy Cloud Monitoring service (no longer available as of GKE 1.15). * `none` - No metrics will be exported from the cluster. If left as an empty string,`monitoring.googleapis.com/kubernetes` will be used for GKE 1.14+ or `monitoring.googleapis.com` for earlier versions.", "type": "string" }, "name": { - "description": "The name of this cluster. The name must be unique within this project\nand location (e.g. zone or region), and can be up to 40 characters with\nthe following restrictions:\n\n* Lowercase letters, numbers, and hyphens only.\n* Must start with a letter.\n* Must end with a number or a letter.", + "description": "The name of this cluster. The name must be unique within this project and location (e.g. zone or region), and can be up to 40 characters with the following restrictions: * Lowercase letters, numbers, and hyphens only. * Must start with a letter. * Must end with a number or a letter.", "type": "string" }, "network": { - "description": "The name of the Google Compute Engine\n[network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)\nto which the cluster is connected. If left unspecified, the `default`\nnetwork will be used.", + "description": "The name of the Google Compute Engine [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks) to which the cluster is connected. If left unspecified, the `default` network will be used.", "type": "string" }, "networkConfig": { @@ -2796,15 +2839,15 @@ }, "nodeConfig": { "$ref": "NodeConfig", - "description": "Parameters used in creating the cluster's nodes.\nFor requests, this field should only be used in lieu of a\n\"node_pool\" object, since this configuration (along with the\n\"initial_node_count\") will be used to create a \"NodePool\" object with an\nauto-generated name. Do not use this and a node_pool at the same time.\nFor responses, this field will be populated with the node configuration of\nthe first node pool. (For configuration of each node pool, see\n`node_pool.config`)\n\nIf unspecified, the defaults are used.\nThis field is deprecated, use node_pool.config instead." + "description": "Parameters used in creating the cluster's nodes. For requests, this field should only be used in lieu of a \"node_pool\" object, since this configuration (along with the \"initial_node_count\") will be used to create a \"NodePool\" object with an auto-generated name. Do not use this and a node_pool at the same time. For responses, this field will be populated with the node configuration of the first node pool. (For configuration of each node pool, see `node_pool.config`) If unspecified, the defaults are used. This field is deprecated, use node_pool.config instead." }, "nodeIpv4CidrSize": { - "description": "[Output only] The size of the address space on each node for hosting\ncontainers. This is provisioned from within the `container_ipv4_cidr`\nrange. This field will only be set when cluster is in route-based network\nmode.", + "description": "[Output only] The size of the address space on each node for hosting containers. This is provisioned from within the `container_ipv4_cidr` range. This field will only be set when cluster is in route-based network mode.", "format": "int32", "type": "integer" }, "nodePools": { - "description": "The node pools associated with this cluster.\nThis field should not be set if \"node_config\" or \"initial_node_count\" are\nspecified.", + "description": "The node pools associated with this cluster. This field should not be set if \"node_config\" or \"initial_node_count\" are specified.", "items": { "$ref": "NodePool" }, @@ -2814,23 +2857,27 @@ "$ref": "PrivateClusterConfig", "description": "Configuration for private cluster." }, + "releaseChannel": { + "$ref": "ReleaseChannel", + "description": "Release channel configuration." + }, "resourceLabels": { "additionalProperties": { "type": "string" }, - "description": "The resource labels for the cluster to use to annotate any related\nGoogle Compute Engine resources.", + "description": "The resource labels for the cluster to use to annotate any related Google Compute Engine resources.", "type": "object" }, "resourceUsageExportConfig": { "$ref": "ResourceUsageExportConfig", - "description": "Configuration for exporting resource usages. Resource usage export is\ndisabled when this config is unspecified." + "description": "Configuration for exporting resource usages. Resource usage export is disabled when this config is unspecified." }, "selfLink": { "description": "[Output only] Server-defined URL for the resource.", "type": "string" }, "servicesIpv4Cidr": { - "description": "[Output only] The IP address range of the Kubernetes services in\nthis cluster, in\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `1.2.3.4/29`). Service addresses are\ntypically put in the last `/16` from the container CIDR.", + "description": "[Output only] The IP address range of the Kubernetes services in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`). Service addresses are typically put in the last `/16` from the container CIDR.", "type": "string" }, "shieldedNodes": { @@ -2851,24 +2898,24 @@ "enumDescriptions": [ "Not set.", "The PROVISIONING state indicates the cluster is being created.", - "The RUNNING state indicates the cluster has been created and is fully\nusable.", - "The RECONCILING state indicates that some work is actively being done on\nthe cluster, such as upgrading the master or node software. Details can\nbe found in the `statusMessage` field.", + "The RUNNING state indicates the cluster has been created and is fully usable.", + "The RECONCILING state indicates that some work is actively being done on the cluster, such as upgrading the master or node software. Details can be found in the `statusMessage` field.", "The STOPPING state indicates the cluster is being deleted.", - "The ERROR state indicates the cluster may be unusable. Details\ncan be found in the `statusMessage` field.", - "The DEGRADED state indicates the cluster requires user action to restore\nfull functionality. Details can be found in the `statusMessage` field." + "The ERROR state indicates the cluster is unusable. It will be automatically deleted. Details can be found in the `statusMessage` field.", + "The DEGRADED state indicates the cluster requires user action to restore full functionality. Details can be found in the `statusMessage` field." ], "type": "string" }, "statusMessage": { - "description": "[Output only] Additional information about the current status of this\ncluster, if available.", + "description": "[Output only] Additional information about the current status of this cluster, if available.", "type": "string" }, "subnetwork": { - "description": "The name of the Google Compute Engine\n[subnetwork](https://cloud.google.com/compute/docs/subnetworks) to which\nthe cluster is connected.", + "description": "The name of the Google Compute Engine [subnetwork](https://cloud.google.com/compute/docs/subnetworks) to which the cluster is connected.", "type": "string" }, "tpuIpv4CidrBlock": { - "description": "[Output only] The IP address range of the Cloud TPUs in this cluster, in\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `1.2.3.4/29`).", + "description": "[Output only] The IP address range of the Cloud TPUs in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`).", "type": "string" }, "verticalPodAutoscaling": { @@ -2877,21 +2924,21 @@ }, "workloadIdentityConfig": { "$ref": "WorkloadIdentityConfig", - "description": "Configuration for the use of Kubernetes Service Accounts in GCP IAM\npolicies." + "description": "Configuration for the use of Kubernetes Service Accounts in GCP IAM policies." }, "zone": { - "description": "[Output only] The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field is deprecated, use location instead.", + "description": "[Output only] The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field is deprecated, use location instead.", "type": "string" } }, "type": "object" }, "ClusterAutoscaling": { - "description": "ClusterAutoscaling contains global, per-cluster information\nrequired by Cluster Autoscaler to automatically adjust\nthe size of the cluster and create/delete\nnode pools based on the current needs.", + "description": "ClusterAutoscaling contains global, per-cluster information required by Cluster Autoscaler to automatically adjust the size of the cluster and create/delete node pools based on the current needs.", "id": "ClusterAutoscaling", "properties": { "autoprovisioningLocations": { - "description": "The list of Google Compute Engine\n[zones](https://cloud.google.com/compute/docs/zones#available) in which the\nNodePool's nodes can be created by NAP.", + "description": "The list of Google Compute Engine [zones](https://cloud.google.com/compute/docs/zones#available) in which the NodePool's nodes can be created by NAP.", "items": { "type": "string" }, @@ -2899,14 +2946,14 @@ }, "autoprovisioningNodePoolDefaults": { "$ref": "AutoprovisioningNodePoolDefaults", - "description": "AutoprovisioningNodePoolDefaults contains defaults for a node pool\ncreated by NAP." + "description": "AutoprovisioningNodePoolDefaults contains defaults for a node pool created by NAP." }, "enableNodeAutoprovisioning": { "description": "Enables automatic node pool creation and deletion.", "type": "boolean" }, "resourceLimits": { - "description": "Contains global constraints regarding minimum and maximum\namount of resources in the cluster.", + "description": "Contains global constraints regarding minimum and maximum amount of resources in the cluster.", "items": { "$ref": "ResourceLimit" }, @@ -2916,7 +2963,7 @@ "type": "object" }, "ClusterUpdate": { - "description": "ClusterUpdate describes an update to the cluster. Exactly one update can\nbe applied to a cluster with each request, so at most one field can be\nprovided.", + "description": "ClusterUpdate describes an update to the cluster. Exactly one update can be applied to a cluster with each request, so at most one field can be provided.", "id": "ClusterUpdate", "properties": { "desiredAddonsConfig": { @@ -2935,8 +2982,12 @@ "$ref": "DatabaseEncryption", "description": "Configuration of etcd encryption." }, + "desiredDefaultSnatStatus": { + "$ref": "DefaultSnatStatus", + "description": "The desired status of whether to disable default sNAT for this cluster." + }, "desiredImageType": { - "description": "The desired image type for the node pool.\nNOTE: Set the \"desired_node_pool\" field as well.", + "description": "The desired image type for the node pool. NOTE: Set the \"desired_node_pool\" field as well.", "type": "string" }, "desiredIntraNodeVisibilityConfig": { @@ -2944,14 +2995,14 @@ "description": "The desired config of Intra-node visibility." }, "desiredLocations": { - "description": "The desired list of Google Compute Engine\n[zones](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster's nodes should be located. Changing the locations a cluster is in\nwill result in nodes being either created or removed from the cluster,\ndepending on whether locations are being added or removed.\n\nThis list must always include the cluster's primary zone.", + "description": "The desired list of Google Compute Engine [zones](https://cloud.google.com/compute/docs/zones#available) in which the cluster's nodes should be located. Changing the locations a cluster is in will result in nodes being either created or removed from the cluster, depending on whether locations are being added or removed. This list must always include the cluster's primary zone.", "items": { "type": "string" }, "type": "array" }, "desiredLoggingService": { - "description": "The logging service the cluster should use to write logs.\nCurrently available options:\n\n* `logging.googleapis.com/kubernetes` - The Cloud Logging\nservice with a Kubernetes-native resource model\n* `logging.googleapis.com` - The legacy Cloud Logging service (no longer\n available as of GKE 1.15).\n* `none` - no logs will be exported from the cluster.\n\nIf left as an empty string,`logging.googleapis.com/kubernetes` will be\nused for GKE 1.14+ or `logging.googleapis.com` for earlier versions.", + "description": "The logging service the cluster should use to write logs. Currently available options: * `logging.googleapis.com/kubernetes` - The Cloud Logging service with a Kubernetes-native resource model * `logging.googleapis.com` - The legacy Cloud Logging service (no longer available as of GKE 1.15). * `none` - no logs will be exported from the cluster. If left as an empty string,`logging.googleapis.com/kubernetes` will be used for GKE 1.14+ or `logging.googleapis.com` for earlier versions.", "type": "string" }, "desiredMasterAuthorizedNetworksConfig": { @@ -2959,25 +3010,33 @@ "description": "The desired configuration options for master authorized networks feature." }, "desiredMasterVersion": { - "description": "The Kubernetes version to change the master to.\n\nUsers may specify either explicit versions offered by\nKubernetes Engine or version aliases, which have the following behavior:\n\n- \"latest\": picks the highest valid Kubernetes version\n- \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version\n- \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version\n- \"1.X.Y-gke.N\": picks an explicit Kubernetes version\n- \"-\": picks the default Kubernetes version", + "description": "The Kubernetes version to change the master to. Users may specify either explicit versions offered by Kubernetes Engine or version aliases, which have the following behavior: - \"latest\": picks the highest valid Kubernetes version - \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version - \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version - \"1.X.Y-gke.N\": picks an explicit Kubernetes version - \"-\": picks the default Kubernetes version", "type": "string" }, "desiredMonitoringService": { - "description": "The monitoring service the cluster should use to write metrics.\nCurrently available options:\n\n* \"monitoring.googleapis.com/kubernetes\" - The Cloud Monitoring\nservice with a Kubernetes-native resource model\n* `monitoring.googleapis.com` - The legacy Cloud Monitoring service (no\n longer available as of GKE 1.15).\n* `none` - No metrics will be exported from the cluster.\n\nIf left as an empty string,`monitoring.googleapis.com/kubernetes` will be\nused for GKE 1.14+ or `monitoring.googleapis.com` for earlier versions.", + "description": "The monitoring service the cluster should use to write metrics. Currently available options: * \"monitoring.googleapis.com/kubernetes\" - The Cloud Monitoring service with a Kubernetes-native resource model * `monitoring.googleapis.com` - The legacy Cloud Monitoring service (no longer available as of GKE 1.15). * `none` - No metrics will be exported from the cluster. If left as an empty string,`monitoring.googleapis.com/kubernetes` will be used for GKE 1.14+ or `monitoring.googleapis.com` for earlier versions.", "type": "string" }, "desiredNodePoolAutoscaling": { "$ref": "NodePoolAutoscaling", - "description": "Autoscaler configuration for the node pool specified in\ndesired_node_pool_id. If there is only one pool in the\ncluster and desired_node_pool_id is not provided then\nthe change applies to that single node pool." + "description": "Autoscaler configuration for the node pool specified in desired_node_pool_id. If there is only one pool in the cluster and desired_node_pool_id is not provided then the change applies to that single node pool." }, "desiredNodePoolId": { - "description": "The node pool to be upgraded. This field is mandatory if\n\"desired_node_version\", \"desired_image_family\" or\n\"desired_node_pool_autoscaling\" is specified and there is more than one\nnode pool on the cluster.", + "description": "The node pool to be upgraded. This field is mandatory if \"desired_node_version\", \"desired_image_family\" or \"desired_node_pool_autoscaling\" is specified and there is more than one node pool on the cluster.", "type": "string" }, "desiredNodeVersion": { - "description": "The Kubernetes version to change the nodes to (typically an\nupgrade).\n\nUsers may specify either explicit versions offered by\nKubernetes Engine or version aliases, which have the following behavior:\n\n- \"latest\": picks the highest valid Kubernetes version\n- \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version\n- \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version\n- \"1.X.Y-gke.N\": picks an explicit Kubernetes version\n- \"-\": picks the Kubernetes master version", + "description": "The Kubernetes version to change the nodes to (typically an upgrade). Users may specify either explicit versions offered by Kubernetes Engine or version aliases, which have the following behavior: - \"latest\": picks the highest valid Kubernetes version - \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version - \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version - \"1.X.Y-gke.N\": picks an explicit Kubernetes version - \"-\": picks the Kubernetes master version", "type": "string" }, + "desiredPrivateClusterConfig": { + "$ref": "PrivateClusterConfig", + "description": "The desired private cluster configuration." + }, + "desiredReleaseChannel": { + "$ref": "ReleaseChannel", + "description": "The desired release channel configuration." + }, "desiredResourceUsageExportConfig": { "$ref": "ResourceUsageExportConfig", "description": "The desired configuration for exporting resource usage." @@ -3002,30 +3061,41 @@ "id": "CompleteIPRotationRequest", "properties": { "clusterId": { - "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "type": "string" }, "name": { - "description": "The name (project, location, cluster id) of the cluster to complete IP\nrotation. Specified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster id) of the cluster to complete IP rotation. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, "type": "object" }, + "ConfigConnectorConfig": { + "description": "Configuration options for the Config Connector add-on.", + "id": "ConfigConnectorConfig", + "properties": { + "enabled": { + "description": "Whether Cloud Connector is enabled for this cluster.", + "type": "boolean" + } + }, + "type": "object" + }, "ConsumptionMeteringConfig": { "description": "Parameters for controlling consumption metering.", "id": "ConsumptionMeteringConfig", "properties": { "enabled": { - "description": "Whether to enable consumption metering for this cluster. If enabled, a\nsecond BigQuery table will be created to hold resource consumption\nrecords.", + "description": "Whether to enable consumption metering for this cluster. If enabled, a second BigQuery table will be created to hold resource consumption records.", "type": "boolean" } }, @@ -3037,18 +3107,18 @@ "properties": { "cluster": { "$ref": "Cluster", - "description": "Required. A [cluster\nresource](https://cloud.google.com/container-engine/reference/rest/v1/projects.zones.clusters)" + "description": "Required. A [cluster resource](https://cloud.google.com/container-engine/reference/rest/v1/projects.locations.clusters)" }, "parent": { - "description": "The parent (project and location) where the cluster will be created.\nSpecified in the format `projects/*/locations/*`.", + "description": "The parent (project and location) where the cluster will be created. Specified in the format `projects/*/locations/*`.", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the parent field.", "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the parent\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the parent field.", "type": "string" } }, @@ -3059,7 +3129,7 @@ "id": "CreateNodePoolRequest", "properties": { "clusterId": { - "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the parent field.", + "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the parent field.", "type": "string" }, "nodePool": { @@ -3067,15 +3137,15 @@ "description": "Required. The node pool to create." }, "parent": { - "description": "The parent (project, location, cluster id) where the node pool will be\ncreated. Specified in the format\n`projects/*/locations/*/clusters/*`.", + "description": "The parent (project, location, cluster id) where the node pool will be created. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the parent field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the parent field.", "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the parent\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the parent field.", "type": "string" } }, @@ -3086,11 +3156,11 @@ "id": "DailyMaintenanceWindow", "properties": { "duration": { - "description": "[Output only] Duration of the time window, automatically chosen to be\nsmallest possible in the given scenario.\nDuration will be in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt)\nformat \"PTnHnMnS\".", + "description": "[Output only] Duration of the time window, automatically chosen to be smallest possible in the given scenario. Duration will be in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) format \"PTnHnMnS\".", "type": "string" }, "startTime": { - "description": "Time within the maintenance window to start the maintenance operations.\nTime format should be in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt)\nformat \"HH:MM\", where HH : [00-23] and MM : [00-59] GMT.", + "description": "Time within the maintenance window to start the maintenance operations. Time format should be in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) format \"HH:MM\", where HH : [00-23] and MM : [00-59] GMT.", "type": "string" } }, @@ -3101,7 +3171,7 @@ "id": "DatabaseEncryption", "properties": { "keyName": { - "description": "Name of CloudKMS key to use for the encryption of secrets in etcd.\nEx. projects/my-project/locations/global/keyRings/my-ring/cryptoKeys/my-key", + "description": "Name of CloudKMS key to use for the encryption of secrets in etcd. Ex. projects/my-project/locations/global/keyRings/my-ring/cryptoKeys/my-key", "type": "string" }, "state": { @@ -3114,15 +3184,37 @@ "enumDescriptions": [ "Should never be set", "Secrets in etcd are encrypted.", - "Secrets in etcd are stored in plain text (at etcd level) - this is\nunrelated to Compute Engine level full disk encryption." + "Secrets in etcd are stored in plain text (at etcd level) - this is unrelated to Compute Engine level full disk encryption." ], "type": "string" } }, "type": "object" }, + "DefaultSnatStatus": { + "description": "DefaultSnatStatus contains the desired state of whether default sNAT should be disabled on the cluster.", + "id": "DefaultSnatStatus", + "properties": { + "disabled": { + "description": "Disables cluster default sNAT rules.", + "type": "boolean" + } + }, + "type": "object" + }, + "DnsCacheConfig": { + "description": "Configuration for NodeLocal DNSCache", + "id": "DnsCacheConfig", + "properties": { + "enabled": { + "description": "Whether NodeLocal DNSCache is enabled for this cluster.", + "type": "boolean" + } + }, + "type": "object" + }, "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", "properties": {}, "type": "object" @@ -3133,10 +3225,10 @@ "properties": { "cacheHeader": { "$ref": "HttpCacheControlResponseHeader", - "description": "OnePlatform automatically extracts this field and uses it to set the HTTP\nCache-Control header." + "description": "OnePlatform automatically extracts this field and uses it to set the HTTP Cache-Control header." }, "keys": { - "description": "The public component of the keys used by the cluster to sign token\nrequests.", + "description": "The public component of the keys used by the cluster to sign token requests.", "items": { "$ref": "Jwk" }, @@ -3146,12 +3238,12 @@ "type": "object" }, "GetOpenIDConfigResponse": { - "description": "GetOpenIDConfigResponse is an OIDC discovery document for the cluster.\nSee the OpenID Connect Discovery 1.0 specification for details.", + "description": "GetOpenIDConfigResponse is an OIDC discovery document for the cluster. See the OpenID Connect Discovery 1.0 specification for details.", "id": "GetOpenIDConfigResponse", "properties": { "cacheHeader": { "$ref": "HttpCacheControlResponseHeader", - "description": "OnePlatform automatically extracts this field and uses it to set the HTTP\nCache-Control header." + "description": "OnePlatform automatically extracts this field and uses it to set the HTTP Cache-Control header." }, "claims_supported": { "description": "Supported claims.", @@ -3200,11 +3292,11 @@ "type": "object" }, "HorizontalPodAutoscaling": { - "description": "Configuration options for the horizontal pod autoscaling feature, which\nincreases or decreases the number of replica pods a replication controller\nhas based on the resource usage of the existing pods.", + "description": "Configuration options for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods.", "id": "HorizontalPodAutoscaling", "properties": { "disabled": { - "description": "Whether the Horizontal Pod Autoscaling feature is enabled in the cluster.\nWhen enabled, it ensures that metrics are collected into Stackdriver\nMonitoring.", + "description": "Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. When enabled, it ensures that metrics are collected into Stackdriver Monitoring.", "type": "boolean" } }, @@ -3231,11 +3323,11 @@ "type": "object" }, "HttpLoadBalancing": { - "description": "Configuration options for the HTTP (L7) load balancing controller addon,\nwhich makes it easy to set up HTTP load balancers for services in a cluster.", + "description": "Configuration options for the HTTP (L7) load balancing controller addon, which makes it easy to set up HTTP load balancers for services in a cluster.", "id": "HttpLoadBalancing", "properties": { "disabled": { - "description": "Whether the HTTP Load Balancing controller is enabled in the cluster.\nWhen enabled, it runs a small pod in the cluster that manages the load\nbalancers.", + "description": "Whether the HTTP Load Balancing controller is enabled in the cluster. When enabled, it runs a small pod in the cluster that manages the load balancers.", "type": "boolean" } }, @@ -3250,15 +3342,15 @@ "type": "string" }, "clusterIpv4CidrBlock": { - "description": "The IP address range for the cluster pod IPs. If this field is set, then\n`cluster.cluster_ipv4_cidr` must be left blank.\n\nThis field is only applicable when `use_ip_aliases` is true.\n\nSet to blank to have a range chosen with the default size.\n\nSet to /netmask (e.g. `/14`) to have a range chosen with a specific\nnetmask.\n\nSet to a\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.\n`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range\nto use.", + "description": "The IP address range for the cluster pod IPs. If this field is set, then `cluster.cluster_ipv4_cidr` must be left blank. This field is only applicable when `use_ip_aliases` is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. `/14`) to have a range chosen with a specific netmask. Set to a [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.", "type": "string" }, "clusterSecondaryRangeName": { - "description": "The name of the secondary range to be used for the cluster CIDR\nblock. The secondary range will be used for pod IP\naddresses. This must be an existing secondary range associated\nwith the cluster subnetwork.\n\nThis field is only applicable with use_ip_aliases is true and\ncreate_subnetwork is false.", + "description": "The name of the secondary range to be used for the cluster CIDR block. The secondary range will be used for pod IP addresses. This must be an existing secondary range associated with the cluster subnetwork. This field is only applicable with use_ip_aliases is true and create_subnetwork is false.", "type": "string" }, "createSubnetwork": { - "description": "Whether a new subnetwork will be created automatically for the cluster.\n\nThis field is only applicable when `use_ip_aliases` is true.", + "description": "Whether a new subnetwork will be created automatically for the cluster. This field is only applicable when `use_ip_aliases` is true.", "type": "boolean" }, "nodeIpv4Cidr": { @@ -3266,7 +3358,7 @@ "type": "string" }, "nodeIpv4CidrBlock": { - "description": "The IP address range of the instance IPs in this cluster.\n\nThis is applicable only if `create_subnetwork` is true.\n\nSet to blank to have a range chosen with the default size.\n\nSet to /netmask (e.g. `/14`) to have a range chosen with a specific\nnetmask.\n\nSet to a\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.\n`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range\nto use.", + "description": "The IP address range of the instance IPs in this cluster. This is applicable only if `create_subnetwork` is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. `/14`) to have a range chosen with a specific netmask. Set to a [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.", "type": "string" }, "servicesIpv4Cidr": { @@ -3274,30 +3366,34 @@ "type": "string" }, "servicesIpv4CidrBlock": { - "description": "The IP address range of the services IPs in this cluster. If blank, a range\nwill be automatically chosen with the default size.\n\nThis field is only applicable when `use_ip_aliases` is true.\n\nSet to blank to have a range chosen with the default size.\n\nSet to /netmask (e.g. `/14`) to have a range chosen with a specific\nnetmask.\n\nSet to a\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.\n`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range\nto use.", + "description": "The IP address range of the services IPs in this cluster. If blank, a range will be automatically chosen with the default size. This field is only applicable when `use_ip_aliases` is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. `/14`) to have a range chosen with a specific netmask. Set to a [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.", "type": "string" }, "servicesSecondaryRangeName": { - "description": "The name of the secondary range to be used as for the services\nCIDR block. The secondary range will be used for service\nClusterIPs. This must be an existing secondary range associated\nwith the cluster subnetwork.\n\nThis field is only applicable with use_ip_aliases is true and\ncreate_subnetwork is false.", + "description": "The name of the secondary range to be used as for the services CIDR block. The secondary range will be used for service ClusterIPs. This must be an existing secondary range associated with the cluster subnetwork. This field is only applicable with use_ip_aliases is true and create_subnetwork is false.", "type": "string" }, "subnetworkName": { - "description": "A custom subnetwork name to be used if `create_subnetwork` is true. If\nthis field is empty, then an automatic name will be chosen for the new\nsubnetwork.", + "description": "A custom subnetwork name to be used if `create_subnetwork` is true. If this field is empty, then an automatic name will be chosen for the new subnetwork.", "type": "string" }, "tpuIpv4CidrBlock": { - "description": "The IP address range of the Cloud TPUs in this cluster. If unspecified, a\nrange will be automatically chosen with the default size.\n\nThis field is only applicable when `use_ip_aliases` is true.\n\nIf unspecified, the range will use the default size.\n\nSet to /netmask (e.g. `/14`) to have a range chosen with a specific\nnetmask.\n\nSet to a\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.\n`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range\nto use.", + "description": "The IP address range of the Cloud TPUs in this cluster. If unspecified, a range will be automatically chosen with the default size. This field is only applicable when `use_ip_aliases` is true. If unspecified, the range will use the default size. Set to /netmask (e.g. `/14`) to have a range chosen with a specific netmask. Set to a [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.", "type": "string" }, "useIpAliases": { - "description": "Whether alias IPs will be used for pod IPs in the cluster.", + "description": "Whether alias IPs will be used for pod IPs in the cluster. This is used in conjunction with use_routes. It cannot be true if use_routes is true. If both use_ip_aliases and use_routes are false, then the server picks the default IP allocation mode", + "type": "boolean" + }, + "useRoutes": { + "description": "Whether routes will be used for pod IPs in the cluster. This is used in conjunction with use_ip_aliases. It cannot be true if use_ip_aliases is true. If both use_ip_aliases and use_routes are false, then the server picks the default IP allocation mode", "type": "boolean" } }, "type": "object" }, "IntraNodeVisibilityConfig": { - "description": "IntraNodeVisibilityConfig contains the desired config of the intra-node\nvisibility on this cluster.", + "description": "IntraNodeVisibilityConfig contains the desired config of the intra-node visibility on this cluster.", "id": "IntraNodeVisibilityConfig", "properties": { "enabled": { @@ -3362,11 +3458,11 @@ "type": "object" }, "LegacyAbac": { - "description": "Configuration for the legacy Attribute Based Access Control authorization\nmode.", + "description": "Configuration for the legacy Attribute Based Access Control authorization mode.", "id": "LegacyAbac", "properties": { "enabled": { - "description": "Whether the ABAC authorizer is enabled for this cluster. When enabled,\nidentities in the system, including service accounts, nodes, and\ncontrollers, will have statically granted permissions beyond those\nprovided by the RBAC configuration or IAM.", + "description": "Whether the ABAC authorizer is enabled for this cluster. When enabled, identities in the system, including service accounts, nodes, and controllers, will have statically granted permissions beyond those provided by the RBAC configuration or IAM.", "type": "boolean" } }, @@ -3377,14 +3473,14 @@ "id": "ListClustersResponse", "properties": { "clusters": { - "description": "A list of clusters in the project in the specified zone, or\nacross all ones.", + "description": "A list of clusters in the project in the specified zone, or across all ones.", "items": { "$ref": "Cluster" }, "type": "array" }, "missingZones": { - "description": "If any zones are listed here, the list of clusters returned\nmay be missing those zones.", + "description": "If any zones are listed here, the list of clusters returned may be missing those zones.", "items": { "type": "string" }, @@ -3412,7 +3508,7 @@ "id": "ListOperationsResponse", "properties": { "missingZones": { - "description": "If any zones are listed here, the list of operations returned\nmay be missing the operations from those zones.", + "description": "If any zones are listed here, the list of operations returned may be missing the operations from those zones.", "items": { "type": "string" }, @@ -3429,11 +3525,11 @@ "type": "object" }, "ListUsableSubnetworksResponse": { - "description": "ListUsableSubnetworksResponse is the response of\nListUsableSubnetworksRequest.", + "description": "ListUsableSubnetworksResponse is the response of ListUsableSubnetworksRequest.", "id": "ListUsableSubnetworksResponse", "properties": { "nextPageToken": { - "description": "This token allows you to get the next page of results for list requests.\nIf the number of results is larger than `page_size`, use the\n`next_page_token` as a value for the query parameter `page_token` in the\nnext request. The value will become empty when there are no more pages.", + "description": "This token allows you to get the next page of results for list requests. If the number of results is larger than `page_size`, use the `next_page_token` as a value for the query parameter `page_token` in the next request. The value will become empty when there are no more pages.", "type": "string" }, "subnetworks": { @@ -3451,7 +3547,7 @@ "id": "MaintenancePolicy", "properties": { "resourceVersion": { - "description": "A hash identifying the version of this policy, so that updates to fields of\nthe policy won't accidentally undo intermediate changes (and so that users\nof the API unaware of some fields won't accidentally remove other fields).\nMake a \u003ccode\u003eget()\u003c/code\u003e request to the cluster to get the current\nresource version and include it with requests to set the policy.", + "description": "A hash identifying the version of this policy, so that updates to fields of the policy won't accidentally undo intermediate changes (and so that users of the API unaware of some fields won't accidentally remove other fields). Make a `get()` request to the cluster to get the current resource version and include it with requests to set the policy.", "type": "string" }, "window": { @@ -3473,53 +3569,53 @@ "additionalProperties": { "$ref": "TimeWindow" }, - "description": "Exceptions to maintenance window. Non-emergency maintenance should not\noccur in these windows.", + "description": "Exceptions to maintenance window. Non-emergency maintenance should not occur in these windows.", "type": "object" }, "recurringWindow": { "$ref": "RecurringTimeWindow", - "description": "RecurringWindow specifies some number of recurring time periods for\nmaintenance to occur. The time windows may be overlapping. If no\nmaintenance windows are set, maintenance can occur at any time." + "description": "RecurringWindow specifies some number of recurring time periods for maintenance to occur. The time windows may be overlapping. If no maintenance windows are set, maintenance can occur at any time." } }, "type": "object" }, "MasterAuth": { - "description": "The authentication information for accessing the master endpoint.\nAuthentication can be done using HTTP basic auth or using client\ncertificates.", + "description": "The authentication information for accessing the master endpoint. Authentication can be done using HTTP basic auth or using client certificates.", "id": "MasterAuth", "properties": { "clientCertificate": { - "description": "[Output only] Base64-encoded public certificate used by clients to\nauthenticate to the cluster endpoint.", + "description": "[Output only] Base64-encoded public certificate used by clients to authenticate to the cluster endpoint.", "type": "string" }, "clientCertificateConfig": { "$ref": "ClientCertificateConfig", - "description": "Configuration for client certificate authentication on the cluster. For\nclusters before v1.12, if no configuration is specified, a client\ncertificate is issued." + "description": "Configuration for client certificate authentication on the cluster. For clusters before v1.12, if no configuration is specified, a client certificate is issued." }, "clientKey": { - "description": "[Output only] Base64-encoded private key used by clients to authenticate\nto the cluster endpoint.", + "description": "[Output only] Base64-encoded private key used by clients to authenticate to the cluster endpoint.", "type": "string" }, "clusterCaCertificate": { - "description": "[Output only] Base64-encoded public certificate that is the root of\ntrust for the cluster.", + "description": "[Output only] Base64-encoded public certificate that is the root of trust for the cluster.", "type": "string" }, "password": { - "description": "The password to use for HTTP basic authentication to the master endpoint.\nBecause the master endpoint is open to the Internet, you should create a\nstrong password. If a password is provided for cluster creation, username\nmust be non-empty.", + "description": "The password to use for HTTP basic authentication to the master endpoint. Because the master endpoint is open to the Internet, you should create a strong password. If a password is provided for cluster creation, username must be non-empty. Warning: basic authentication is deprecated, and will be removed in GKE control plane versions 1.19 and newer. For a list of recommended authentication methods, see: https://cloud.google.com/kubernetes-engine/docs/how-to/api-server-authentication", "type": "string" }, "username": { - "description": "The username to use for HTTP basic authentication to the master endpoint.\nFor clusters v1.6.0 and later, basic authentication can be disabled by\nleaving username unspecified (or setting it to the empty string).", + "description": "The username to use for HTTP basic authentication to the master endpoint. For clusters v1.6.0 and later, basic authentication can be disabled by leaving username unspecified (or setting it to the empty string). Warning: basic authentication is deprecated, and will be removed in GKE control plane versions 1.19 and newer. For a list of recommended authentication methods, see: https://cloud.google.com/kubernetes-engine/docs/how-to/api-server-authentication", "type": "string" } }, "type": "object" }, "MasterAuthorizedNetworksConfig": { - "description": "Configuration options for the master authorized networks feature. Enabled\nmaster authorized networks will disallow all external traffic to access\nKubernetes master through HTTPS except traffic from the given CIDR blocks,\nGoogle Compute Engine Public IPs and Google Prod IPs.", + "description": "Configuration options for the master authorized networks feature. Enabled master authorized networks will disallow all external traffic to access Kubernetes master through HTTPS except traffic from the given CIDR blocks, Google Compute Engine Public IPs and Google Prod IPs.", "id": "MasterAuthorizedNetworksConfig", "properties": { "cidrBlocks": { - "description": "cidr_blocks define up to 50 external networks that could access\nKubernetes master through HTTPS.", + "description": "cidr_blocks define up to 50 external networks that could access Kubernetes master through HTTPS.", "items": { "$ref": "CidrBlock" }, @@ -3573,23 +3669,27 @@ "description": "NetworkConfig reports the relative names of network \u0026 subnetwork.", "id": "NetworkConfig", "properties": { + "defaultSnatStatus": { + "$ref": "DefaultSnatStatus", + "description": "Whether the cluster disables default in-node sNAT rules. In-node sNAT rules will be disabled when default_snat_status is disabled. When disabled is set to false, default IP masquerade rules will be applied to the nodes to prevent sNAT on cluster internal traffic." + }, "enableIntraNodeVisibility": { - "description": "Whether Intra-node visibility is enabled for this cluster.\nThis makes same node pod to pod traffic visible for VPC network.", + "description": "Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network.", "type": "boolean" }, "network": { - "description": "Output only. The relative name of the Google Compute Engine\nnetwork(https://cloud.google.com/compute/docs/networks-and-firewalls#networks)\nto which the cluster is connected. Example:\nprojects/my-project/global/networks/my-network", + "description": "Output only. The relative name of the Google Compute Engine network(https://cloud.google.com/compute/docs/networks-and-firewalls#networks) to which the cluster is connected. Example: projects/my-project/global/networks/my-network", "type": "string" }, "subnetwork": { - "description": "Output only. The relative name of the Google Compute Engine\n[subnetwork](https://cloud.google.com/compute/docs/vpc) to which the\ncluster is connected. Example:\nprojects/my-project/regions/us-central1/subnetworks/my-subnet", + "description": "Output only. The relative name of the Google Compute Engine [subnetwork](https://cloud.google.com/compute/docs/vpc) to which the cluster is connected. Example: projects/my-project/regions/us-central1/subnetworks/my-subnet", "type": "string" } }, "type": "object" }, "NetworkPolicy": { - "description": "Configuration options for the NetworkPolicy feature.\nhttps://kubernetes.io/docs/concepts/services-networking/networkpolicies/", + "description": "Configuration options for the NetworkPolicy feature. https://kubernetes.io/docs/concepts/services-networking/networkpolicies/", "id": "NetworkPolicy", "properties": { "enabled": { @@ -3612,7 +3712,7 @@ "type": "object" }, "NetworkPolicyConfig": { - "description": "Configuration for NetworkPolicy. This only tracks whether the addon\nis enabled or not on the Master, it does not track whether network policy\nis enabled for the nodes.", + "description": "Configuration for NetworkPolicy. This only tracks whether the addon is enabled or not on the Master, it does not track whether network policy is enabled for the nodes.", "id": "NetworkPolicyConfig", "properties": { "disabled": { @@ -3627,73 +3727,81 @@ "id": "NodeConfig", "properties": { "accelerators": { - "description": "A list of hardware accelerators to be attached to each node.\nSee https://cloud.google.com/compute/docs/gpus for more information about\nsupport for GPUs.", + "description": "A list of hardware accelerators to be attached to each node. See https://cloud.google.com/compute/docs/gpus for more information about support for GPUs.", "items": { "$ref": "AcceleratorConfig" }, "type": "array" }, + "bootDiskKmsKey": { + "description": " The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption", + "type": "string" + }, "diskSizeGb": { - "description": "Size of the disk attached to each node, specified in GB.\nThe smallest allowed disk size is 10GB.\n\nIf unspecified, the default disk size is 100GB.", + "description": "Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB.", "format": "int32", "type": "integer" }, "diskType": { - "description": "Type of the disk attached to each node (e.g. 'pd-standard' or 'pd-ssd')\n\nIf unspecified, the default disk type is 'pd-standard'", + "description": "Type of the disk attached to each node (e.g. 'pd-standard' or 'pd-ssd') If unspecified, the default disk type is 'pd-standard'", "type": "string" }, "imageType": { - "description": "The image type to use for this node. Note that for a given image type,\nthe latest version of it will be used.", + "description": "The image type to use for this node. Note that for a given image type, the latest version of it will be used.", "type": "string" }, "labels": { "additionalProperties": { "type": "string" }, - "description": "The map of Kubernetes labels (key/value pairs) to be applied to each node.\nThese will added in addition to any default label(s) that\nKubernetes may apply to the node.\nIn case of conflict in label keys, the applied set may differ depending on\nthe Kubernetes version -- it's best to assume the behavior is undefined\nand conflicts should be avoided.\nFor more information, including usage and the valid values, see:\nhttps://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", + "description": "The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node. In case of conflict in label keys, the applied set may differ depending on the Kubernetes version -- it's best to assume the behavior is undefined and conflicts should be avoided. For more information, including usage and the valid values, see: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", "type": "object" }, "localSsdCount": { - "description": "The number of local SSD disks to be attached to the node.\n\nThe limit for this value is dependent upon the maximum number of\ndisks available on a machine per zone. See:\nhttps://cloud.google.com/compute/docs/disks/local-ssd\nfor more information.", + "description": "The number of local SSD disks to be attached to the node. The limit for this value is dependent upon the maximum number of disks available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information.", "format": "int32", "type": "integer" }, "machineType": { - "description": "The name of a Google Compute Engine [machine\ntype](https://cloud.google.com/compute/docs/machine-types) (e.g.\n`n1-standard-1`).\n\nIf unspecified, the default machine type is\n`n1-standard-1`.", + "description": "The name of a Google Compute Engine [machine type](https://cloud.google.com/compute/docs/machine-types) If unspecified, the default machine type is `e2-medium`.", "type": "string" }, "metadata": { "additionalProperties": { "type": "string" }, - "description": "The metadata key/value pairs assigned to instances in the cluster.\n\nKeys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes\nin length. These are reflected as part of a URL in the metadata server.\nAdditionally, to avoid ambiguity, keys must not conflict with any other\nmetadata keys for the project or be one of the reserved keys:\n \"cluster-location\"\n \"cluster-name\"\n \"cluster-uid\"\n \"configure-sh\"\n \"containerd-configure-sh\"\n \"enable-os-login\"\n \"gci-ensure-gke-docker\"\n \"gci-metrics-enabled\"\n \"gci-update-strategy\"\n \"instance-template\"\n \"kube-env\"\n \"startup-script\"\n \"user-data\"\n \"disable-address-manager\"\n \"windows-startup-script-ps1\"\n \"common-psm1\"\n \"k8s-node-setup-psm1\"\n \"install-ssh-psm1\"\n \"user-profile-psm1\"\n \"serial-port-logging-enable\"\n\nValues are free-form strings, and only have meaning as interpreted by\nthe image running in the instance. The only restriction placed on them is\nthat each value's size must be less than or equal to 32 KB.\n\nThe total size of all keys and values must be less than 512 KB.", + "description": "The metadata key/value pairs assigned to instances in the cluster. Keys must conform to the regexp `[a-zA-Z0-9-_]+` and be less than 128 bytes in length. These are reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project or be one of the reserved keys: - \"cluster-location\" - \"cluster-name\" - \"cluster-uid\" - \"configure-sh\" - \"containerd-configure-sh\" - \"enable-os-login\" - \"gci-ensure-gke-docker\" - \"gci-metrics-enabled\" - \"gci-update-strategy\" - \"instance-template\" - \"kube-env\" - \"startup-script\" - \"user-data\" - \"disable-address-manager\" - \"windows-startup-script-ps1\" - \"common-psm1\" - \"k8s-node-setup-psm1\" - \"install-ssh-psm1\" - \"user-profile-psm1\" - \"serial-port-logging-enable\" Values are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on them is that each value's size must be less than or equal to 32 KB. The total size of all keys and values must be less than 512 KB.", "type": "object" }, "minCpuPlatform": { - "description": "Minimum CPU platform to be used by this instance. The instance may be\nscheduled on the specified or newer CPU platform. Applicable values are the\nfriendly names of CPU platforms, such as\n\u003ccode\u003eminCpuPlatform: \u0026quot;Intel Haswell\u0026quot;\u003c/code\u003e or\n\u003ccode\u003eminCpuPlatform: \u0026quot;Intel Sandy Bridge\u0026quot;\u003c/code\u003e. For more\ninformation, read [how to specify min CPU\nplatform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)", + "description": "Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform. Applicable values are the friendly names of CPU platforms, such as `minCpuPlatform: \"Intel Haswell\"` or `minCpuPlatform: \"Intel Sandy Bridge\"`. For more information, read [how to specify min CPU platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)", + "type": "string" + }, + "nodeGroup": { + "description": "Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on [sole tenant nodes](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes).", "type": "string" }, "oauthScopes": { - "description": "The set of Google API scopes to be made available on all of the\nnode VMs under the \"default\" service account.\n\nThe following scopes are recommended, but not required, and by default are\nnot included:\n\n* `https://www.googleapis.com/auth/compute` is required for mounting\npersistent storage on your nodes.\n* `https://www.googleapis.com/auth/devstorage.read_only` is required for\ncommunicating with **gcr.io**\n(the [Google Container\nRegistry](https://cloud.google.com/container-registry/)).\n\nIf unspecified, no scopes are added, unless Cloud Logging or Cloud\nMonitoring are enabled, in which case their required scopes will be added.", + "description": "The set of Google API scopes to be made available on all of the node VMs under the \"default\" service account. The following scopes are recommended, but not required, and by default are not included: * `https://www.googleapis.com/auth/compute` is required for mounting persistent storage on your nodes. * `https://www.googleapis.com/auth/devstorage.read_only` is required for communicating with **gcr.io** (the [Google Container Registry](https://cloud.google.com/container-registry/)). If unspecified, no scopes are added, unless Cloud Logging or Cloud Monitoring are enabled, in which case their required scopes will be added.", "items": { "type": "string" }, "type": "array" }, "preemptible": { - "description": "Whether the nodes are created as preemptible VM instances. See:\nhttps://cloud.google.com/compute/docs/instances/preemptible for more\ninformation about preemptible VM instances.", + "description": "Whether the nodes are created as preemptible VM instances. See: https://cloud.google.com/compute/docs/instances/preemptible for more information about preemptible VM instances.", "type": "boolean" }, "reservationAffinity": { "$ref": "ReservationAffinity", - "description": "The optional reservation affinity. Setting this field will apply\nthe specified [Zonal Compute\nReservation](https://cloud.google.com/compute/docs/instances/reserving-zonal-resources)\nto this node pool." + "description": "The optional reservation affinity. Setting this field will apply the specified [Zonal Compute Reservation](https://cloud.google.com/compute/docs/instances/reserving-zonal-resources) to this node pool." }, "sandboxConfig": { "$ref": "SandboxConfig", "description": "Sandbox configuration for this node." }, "serviceAccount": { - "description": "The Google Cloud Platform Service Account to be used by the node VMs.\nSpecify the email address of the Service Account; otherwise, if no Service\nAccount is specified, the \"default\" service account is used.", + "description": "The Google Cloud Platform Service Account to be used by the node VMs. Specify the email address of the Service Account; otherwise, if no Service Account is specified, the \"default\" service account is used.", "type": "string" }, "shieldedInstanceConfig": { @@ -3701,14 +3809,14 @@ "description": "Shielded Instance options." }, "tags": { - "description": "The list of instance tags applied to all nodes. Tags are used to identify\nvalid sources or targets for network firewalls and are specified by\nthe client during cluster or node pool creation. Each tag within the list\nmust comply with RFC1035.", + "description": "The list of instance tags applied to all nodes. Tags are used to identify valid sources or targets for network firewalls and are specified by the client during cluster or node pool creation. Each tag within the list must comply with RFC1035.", "items": { "type": "string" }, "type": "array" }, "taints": { - "description": "List of kubernetes taints to be applied to each node.\n\nFor more information, including usage and the valid values, see:\nhttps://kubernetes.io/docs/concepts/configuration/taint-and-toleration/", + "description": "List of kubernetes taints to be applied to each node. For more information, including usage and the valid values, see: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/", "items": { "$ref": "NodeTaint" }, @@ -3722,15 +3830,15 @@ "type": "object" }, "NodeManagement": { - "description": "NodeManagement defines the set of node management services turned on for the\nnode pool.", + "description": "NodeManagement defines the set of node management services turned on for the node pool.", "id": "NodeManagement", "properties": { "autoRepair": { - "description": "A flag that specifies whether the node auto-repair is enabled for the node\npool. If enabled, the nodes in this node pool will be monitored and, if\nthey fail health checks too many times, an automatic repair action will be\ntriggered.", + "description": "A flag that specifies whether the node auto-repair is enabled for the node pool. If enabled, the nodes in this node pool will be monitored and, if they fail health checks too many times, an automatic repair action will be triggered.", "type": "boolean" }, "autoUpgrade": { - "description": "A flag that specifies whether node auto-upgrade is enabled for the node\npool. If enabled, node auto-upgrade helps keep the nodes in your node pool\nup to date with the latest release version of Kubernetes.", + "description": "A flag that specifies whether node auto-upgrade is enabled for the node pool. If enabled, node auto-upgrade helps keep the nodes in your node pool up to date with the latest release version of Kubernetes.", "type": "boolean" }, "upgradeOptions": { @@ -3741,12 +3849,12 @@ "type": "object" }, "NodePool": { - "description": "NodePool contains the name and configuration for a cluster's node pool.\nNode pools are a set of nodes (i.e. VM's), with a common configuration and\nspecification, under the control of the cluster master. They may have a set\nof Kubernetes labels applied to them, which may be used to reference them\nduring pod scheduling. They may also be resized up or down, to accommodate\nthe workload.", + "description": "NodePool contains the name and configuration for a cluster's node pool. Node pools are a set of nodes (i.e. VM's), with a common configuration and specification, under the control of the cluster master. They may have a set of Kubernetes labels applied to them, which may be used to reference them during pod scheduling. They may also be resized up or down, to accommodate the workload.", "id": "NodePool", "properties": { "autoscaling": { "$ref": "NodePoolAutoscaling", - "description": "Autoscaler configuration for this NodePool. Autoscaler is enabled\nonly if a valid configuration is present." + "description": "Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present." }, "conditions": { "description": "Which conditions caused the current node pool state.", @@ -3760,19 +3868,19 @@ "description": "The node configuration of the pool." }, "initialNodeCount": { - "description": "The initial node count for the pool. You must ensure that your\nCompute Engine \u003ca href=\"/compute/docs/resource-quotas\"\u003eresource quota\u003c/a\u003e\nis sufficient for this number of instances. You must also have available\nfirewall and routes quota.", + "description": "The initial node count for the pool. You must ensure that your Compute Engine [resource quota](https://cloud.google.com/compute/quotas) is sufficient for this number of instances. You must also have available firewall and routes quota.", "format": "int32", "type": "integer" }, "instanceGroupUrls": { - "description": "[Output only] The resource URLs of the [managed instance\ngroups](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances)\nassociated with this node pool.", + "description": "[Output only] The resource URLs of the [managed instance groups](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances) associated with this node pool.", "items": { "type": "string" }, "type": "array" }, "locations": { - "description": "The list of Google Compute Engine\n[zones](https://cloud.google.com/compute/docs/zones#available) in which the\nNodePool's nodes should be located.", + "description": "The list of Google Compute Engine [zones](https://cloud.google.com/compute/docs/zones#available) in which the NodePool's nodes should be located.", "items": { "type": "string" }, @@ -3784,7 +3892,7 @@ }, "maxPodsConstraint": { "$ref": "MaxPodsConstraint", - "description": "The constraint on the maximum number of pods that can be run\nsimultaneously on a node in the node pool." + "description": "The constraint on the maximum number of pods that can be run simultaneously on a node in the node pool." }, "name": { "description": "The name of the node pool.", @@ -3813,16 +3921,16 @@ "enumDescriptions": [ "Not set.", "The PROVISIONING state indicates the node pool is being created.", - "The RUNNING state indicates the node pool has been created\nand is fully usable.", - "The RUNNING_WITH_ERROR state indicates the node pool has been created\nand is partially usable. Some error state has occurred and some\nfunctionality may be impaired. Customer may need to reissue a request\nor trigger a new update.", - "The RECONCILING state indicates that some work is actively being done on\nthe node pool, such as upgrading node software. Details can\nbe found in the `statusMessage` field.", + "The RUNNING state indicates the node pool has been created and is fully usable.", + "The RUNNING_WITH_ERROR state indicates the node pool has been created and is partially usable. Some error state has occurred and some functionality may be impaired. Customer may need to reissue a request or trigger a new update.", + "The RECONCILING state indicates that some work is actively being done on the node pool, such as upgrading node software. Details can be found in the `statusMessage` field.", "The STOPPING state indicates the node pool is being deleted.", - "The ERROR state indicates the node pool may be unusable. Details\ncan be found in the `statusMessage` field." + "The ERROR state indicates the node pool may be unusable. Details can be found in the `statusMessage` field." ], "type": "string" }, "statusMessage": { - "description": "[Output only] Additional information about the current status of this\nnode pool instance, if available.", + "description": "[Output only] Additional information about the current status of this node pool instance, if available.", "type": "string" }, "upgradeSettings": { @@ -3837,7 +3945,7 @@ "type": "object" }, "NodePoolAutoscaling": { - "description": "NodePoolAutoscaling contains information required by cluster autoscaler to\nadjust the size of the node pool to the current cluster usage.", + "description": "NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage.", "id": "NodePoolAutoscaling", "properties": { "autoprovisioned": { @@ -3849,12 +3957,12 @@ "type": "boolean" }, "maxNodeCount": { - "description": "Maximum number of nodes in the NodePool. Must be \u003e= min_node_count. There\nhas to enough quota to scale up the cluster.", + "description": "Maximum number of nodes in the NodePool. Must be \u003e= min_node_count. There has to enough quota to scale up the cluster.", "format": "int32", "type": "integer" }, "minNodeCount": { - "description": "Minimum number of nodes in the NodePool. Must be \u003e= 1 and \u003c=\nmax_node_count.", + "description": "Minimum number of nodes in the NodePool. Must be \u003e= 1 and \u003c= max_node_count.", "format": "int32", "type": "integer" } @@ -3862,7 +3970,7 @@ "type": "object" }, "NodeTaint": { - "description": "Kubernetes taint is comprised of three fields: key, value, and effect. Effect\ncan only be one of three types: NoSchedule, PreferNoSchedule or NoExecute.\n\nSee\n[here](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration)\nfor more information, including usage and the valid values.", + "description": "Kubernetes taint is comprised of three fields: key, value, and effect. Effect can only be one of three types: NoSchedule, PreferNoSchedule or NoExecute. See [here](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration) for more information, including usage and the valid values.", "id": "NodeTaint", "properties": { "effect": { @@ -3893,7 +4001,7 @@ "type": "object" }, "Operation": { - "description": "This operation resource represents operations that may have happened or are\nhappening on the cluster. All fields are output only.", + "description": "This operation resource represents operations that may have happened or are happening on the cluster. All fields are output only.", "id": "Operation", "properties": { "clusterConditions": { @@ -3908,11 +4016,11 @@ "type": "string" }, "endTime": { - "description": "[Output only] The time the operation completed, in\n[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "description": "[Output only] The time the operation completed, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", "type": "string" }, "location": { - "description": "[Output only] The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available)\nor\n[region](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available)\nin which the cluster resides.", + "description": "[Output only] The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) or [region](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) in which the cluster resides.", "type": "string" }, "name": { @@ -3970,14 +4078,15 @@ }, "progress": { "$ref": "OperationProgress", - "description": "Output only. [Output only] Progress information for an operation." + "description": "Output only. [Output only] Progress information for an operation.", + "readOnly": true }, "selfLink": { "description": "Server-defined URL for the resource.", "type": "string" }, "startTime": { - "description": "[Output only] The time the operation started, in\n[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "description": "[Output only] The time the operation started, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", "type": "string" }, "status": { @@ -4000,6 +4109,7 @@ }, "statusMessage": { "description": "Output only. If an error has occurred, a textual description of the error.", + "readOnly": true, "type": "string" }, "targetLink": { @@ -4007,7 +4117,7 @@ "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\noperation is taking place. This field is deprecated, use location instead.", + "description": "The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the operation is taking place. This field is deprecated, use location instead.", "type": "string" } }, @@ -4018,14 +4128,14 @@ "id": "OperationProgress", "properties": { "metrics": { - "description": "Progress metric bundle, for example:\n metrics: [{name: \"nodes done\", int_value: 15},\n {name: \"nodes total\", int_value: 32}]\nor\n metrics: [{name: \"progress\", double_value: 0.56},\n {name: \"progress scale\", double_value: 1.0}]", + "description": "Progress metric bundle, for example: metrics: [{name: \"nodes done\", int_value: 15}, {name: \"nodes total\", int_value: 32}] or metrics: [{name: \"progress\", double_value: 0.56}, {name: \"progress scale\", double_value: 1.0}]", "items": { "$ref": "Metric" }, "type": "array" }, "name": { - "description": "A non-parameterized string describing an operation stage.\nUnset for single-stage operations.", + "description": "A non-parameterized string describing an operation stage. Unset for single-stage operations.", "type": "string" }, "stages": { @@ -4036,7 +4146,7 @@ "type": "array" }, "status": { - "description": "Status of an operation stage.\nUnset for single-stage operations.", + "description": "Status of an operation stage. Unset for single-stage operations.", "enum": [ "STATUS_UNSPECIFIED", "PENDING", @@ -4065,11 +4175,15 @@ "type": "boolean" }, "enablePrivateNodes": { - "description": "Whether nodes have internal IP addresses only. If enabled, all nodes are\ngiven only RFC 1918 private addresses and communicate with the master via\nprivate networking.", + "description": "Whether nodes have internal IP addresses only. If enabled, all nodes are given only RFC 1918 private addresses and communicate with the master via private networking.", "type": "boolean" }, + "masterGlobalAccessConfig": { + "$ref": "PrivateClusterMasterGlobalAccessConfig", + "description": "Controls master global access settings." + }, "masterIpv4CidrBlock": { - "description": "The IP range in CIDR notation to use for the hosted master network. This\nrange will be used for assigning internal IP addresses to the master or\nset of masters, as well as the ILB VIP. This range must not overlap with\nany other ranges in use within the cluster's network.", + "description": "The IP range in CIDR notation to use for the hosted master network. This range will be used for assigning internal IP addresses to the master or set of masters, as well as the ILB VIP. This range must not overlap with any other ranges in use within the cluster's network.", "type": "string" }, "peeringName": { @@ -4087,12 +4201,23 @@ }, "type": "object" }, + "PrivateClusterMasterGlobalAccessConfig": { + "description": "Configuration for controlling master global access settings.", + "id": "PrivateClusterMasterGlobalAccessConfig", + "properties": { + "enabled": { + "description": "Whenever master is accessible globally or not.", + "type": "boolean" + } + }, + "type": "object" + }, "RecurringTimeWindow": { "description": "Represents an arbitrary window of time that recurs.", "id": "RecurringTimeWindow", "properties": { "recurrence": { - "description": "An RRULE (https://tools.ietf.org/html/rfc5545#section-3.8.5.3) for how\nthis window reccurs. They go on for the span of time between the start and\nend time.\n\nFor example, to have something repeat every weekday, you'd use:\n \u003ccode\u003eFREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR\u003c/code\u003e\nTo repeat some window daily (equivalent to the DailyMaintenanceWindow):\n \u003ccode\u003eFREQ=DAILY\u003c/code\u003e\nFor the first weekend of every month:\n \u003ccode\u003eFREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU\u003c/code\u003e\nThis specifies how frequently the window starts. Eg, if you wanted to have\na 9-5 UTC-4 window every weekday, you'd use something like:\n\u003ccode\u003e\n start time = 2019-01-01T09:00:00-0400\n end time = 2019-01-01T17:00:00-0400\n recurrence = FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR\n\u003c/code\u003e\nWindows can span multiple days. Eg, to make the window encompass every\nweekend from midnight Saturday till the last minute of Sunday UTC:\n\u003ccode\u003e\n start time = 2019-01-05T00:00:00Z\n end time = 2019-01-07T23:59:00Z\n recurrence = FREQ=WEEKLY;BYDAY=SA\n\u003c/code\u003e\nNote the start and end time's specific dates are largely arbitrary except\nto specify duration of the window and when it first starts.\nThe FREQ values of HOURLY, MINUTELY, and SECONDLY are not supported.", + "description": "An RRULE (https://tools.ietf.org/html/rfc5545#section-3.8.5.3) for how this window reccurs. They go on for the span of time between the start and end time. For example, to have something repeat every weekday, you'd use: `FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR` To repeat some window daily (equivalent to the DailyMaintenanceWindow): `FREQ=DAILY` For the first weekend of every month: `FREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU` This specifies how frequently the window starts. Eg, if you wanted to have a 9-5 UTC-4 window every weekday, you'd use something like: ``` start time = 2019-01-01T09:00:00-0400 end time = 2019-01-01T17:00:00-0400 recurrence = FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR ``` Windows can span multiple days. Eg, to make the window encompass every weekend from midnight Saturday till the last minute of Sunday UTC: ``` start time = 2019-01-05T00:00:00Z end time = 2019-01-07T23:59:00Z recurrence = FREQ=WEEKLY;BYDAY=SA ``` Note the start and end time's specific dates are largely arbitrary except to specify duration of the window and when it first starts. The FREQ values of HOURLY, MINUTELY, and SECONDLY are not supported.", "type": "string" }, "window": { @@ -4102,8 +4227,65 @@ }, "type": "object" }, + "ReleaseChannel": { + "description": "ReleaseChannel indicates which release channel a cluster is subscribed to. Release channels are arranged in order of risk. When a cluster is subscribed to a release channel, Google maintains both the master version and the node version. Node auto-upgrade defaults to true and cannot be disabled.", + "id": "ReleaseChannel", + "properties": { + "channel": { + "description": "channel specifies which release channel the cluster is subscribed to.", + "enum": [ + "UNSPECIFIED", + "RAPID", + "REGULAR", + "STABLE" + ], + "enumDescriptions": [ + "No channel specified.", + "RAPID channel is offered on an early access basis for customers who want to test new releases. WARNING: Versions available in the RAPID Channel may be subject to unresolved issues with no known workaround and are not subject to any SLAs.", + "Clusters subscribed to REGULAR receive versions that are considered GA quality. REGULAR is intended for production users who want to take advantage of new features.", + "Clusters subscribed to STABLE receive versions that are known to be stable and reliable in production." + ], + "type": "string" + } + }, + "type": "object" + }, + "ReleaseChannelConfig": { + "description": "ReleaseChannelConfig exposes configuration for a release channel.", + "id": "ReleaseChannelConfig", + "properties": { + "channel": { + "description": "The release channel this configuration applies to.", + "enum": [ + "UNSPECIFIED", + "RAPID", + "REGULAR", + "STABLE" + ], + "enumDescriptions": [ + "No channel specified.", + "RAPID channel is offered on an early access basis for customers who want to test new releases. WARNING: Versions available in the RAPID Channel may be subject to unresolved issues with no known workaround and are not subject to any SLAs.", + "Clusters subscribed to REGULAR receive versions that are considered GA quality. REGULAR is intended for production users who want to take advantage of new features.", + "Clusters subscribed to STABLE receive versions that are known to be stable and reliable in production." + ], + "type": "string" + }, + "defaultVersion": { + "description": "The default version for newly created clusters on the channel.", + "type": "string" + }, + "validVersions": { + "description": "List of valid versions for the channel.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "ReservationAffinity": { - "description": "[ReservationAffinity](https://cloud.google.com/compute/docs/instances/reserving-zonal-resources)\nis the configuration of desired reservation which instances could take\ncapacity from.", + "description": "[ReservationAffinity](https://cloud.google.com/compute/docs/instances/reserving-zonal-resources) is the configuration of desired reservation which instances could take capacity from.", "id": "ReservationAffinity", "properties": { "consumeReservationType": { @@ -4118,12 +4300,12 @@ "Default value. This should not be used.", "Do not consume from any reserved capacity.", "Consume any reservation available.", - "Must consume from a specific reservation. Must specify key value fields\nfor specifying the reservations." + "Must consume from a specific reservation. Must specify key value fields for specifying the reservations." ], "type": "string" }, "key": { - "description": "Corresponds to the label key of a reservation resource. To target a\nSPECIFIC_RESERVATION by name, specify \"googleapis.com/reservation-name\" as\nthe key and specify the name of your reservation as its value.", + "description": "Corresponds to the label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify \"googleapis.com/reservation-name\" as the key and specify the name of your reservation as its value.", "type": "string" }, "values": { @@ -4137,7 +4319,7 @@ "type": "object" }, "ResourceLimit": { - "description": "Contains information about amount of some resource in the cluster.\nFor memory, value should be in GB.", + "description": "Contains information about amount of some resource in the cluster. For memory, value should be in GB.", "id": "ResourceLimit", "properties": { "maximum": { @@ -4170,34 +4352,34 @@ "description": "Configuration to enable resource consumption metering." }, "enableNetworkEgressMetering": { - "description": "Whether to enable network egress metering for this cluster. If enabled, a\ndaemonset will be created in the cluster to meter network egress traffic.", + "description": "Whether to enable network egress metering for this cluster. If enabled, a daemonset will be created in the cluster to meter network egress traffic.", "type": "boolean" } }, "type": "object" }, "RollbackNodePoolUpgradeRequest": { - "description": "RollbackNodePoolUpgradeRequest rollbacks the previously Aborted or Failed\nNodePool upgrade. This will be an no-op if the last upgrade successfully\ncompleted.", + "description": "RollbackNodePoolUpgradeRequest rollbacks the previously Aborted or Failed NodePool upgrade. This will be an no-op if the last upgrade successfully completed.", "id": "RollbackNodePoolUpgradeRequest", "properties": { "clusterId": { - "description": "Deprecated. The name of the cluster to rollback.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to rollback. This field has been deprecated and replaced by the name field.", "type": "string" }, "name": { - "description": "The name (project, location, cluster, node pool id) of the node poll to\nrollback upgrade.\nSpecified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool id) of the node poll to rollback upgrade. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "type": "string" }, "nodePoolId": { - "description": "Deprecated. The name of the node pool to rollback.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the node pool to rollback. This field has been deprecated and replaced by the name field.", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, @@ -4226,6 +4408,13 @@ "description": "Kubernetes Engine service configuration.", "id": "ServerConfig", "properties": { + "channels": { + "description": "List of release channel configurations.", + "items": { + "$ref": "ReleaseChannelConfig" + }, + "type": "array" + }, "defaultClusterVersion": { "description": "Version of Kubernetes the service deploys by default.", "type": "string" @@ -4242,14 +4431,14 @@ "type": "array" }, "validMasterVersions": { - "description": "List of valid master versions.", + "description": "List of valid master versions, in descending order.", "items": { "type": "string" }, "type": "array" }, "validNodeVersions": { - "description": "List of valid node upgrade target versions.", + "description": "List of valid node upgrade target versions, in descending order.", "items": { "type": "string" }, @@ -4264,45 +4453,45 @@ "properties": { "addonsConfig": { "$ref": "AddonsConfig", - "description": "Required. The desired configurations for the various addons available to run in the\ncluster." + "description": "Required. The desired configurations for the various addons available to run in the cluster." }, "clusterId": { - "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, "name": { - "description": "The name (project, location, cluster) of the cluster to set addons.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to set addons. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, "type": "object" }, "SetLabelsRequest": { - "description": "SetLabelsRequest sets the Google Cloud Platform labels on a Google Container\nEngine cluster, which will in turn set them for Google Compute Engine\nresources used by that cluster", + "description": "SetLabelsRequest sets the Google Cloud Platform labels on a Google Container Engine cluster, which will in turn set them for Google Compute Engine resources used by that cluster", "id": "SetLabelsRequest", "properties": { "clusterId": { - "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "type": "string" }, "labelFingerprint": { - "description": "Required. The fingerprint of the previous set of labels for this resource,\nused to detect conflicts. The fingerprint is initially generated by\nKubernetes Engine and changes after every request to modify or update\nlabels. You must always provide an up-to-date fingerprint hash when\nupdating or changing labels. Make a \u003ccode\u003eget()\u003c/code\u003e request to the\nresource to get the latest fingerprint.", + "description": "Required. The fingerprint of the previous set of labels for this resource, used to detect conflicts. The fingerprint is initially generated by Kubernetes Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash when updating or changing labels. Make a `get()` request to the resource to get the latest fingerprint.", "type": "string" }, "name": { - "description": "The name (project, location, cluster id) of the cluster to set labels.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster id) of the cluster to set labels. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", "type": "string" }, "resourceLabels": { @@ -4313,18 +4502,18 @@ "type": "object" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, "type": "object" }, "SetLegacyAbacRequest": { - "description": "SetLegacyAbacRequest enables or disables the ABAC authorization mechanism for\na cluster.", + "description": "SetLegacyAbacRequest enables or disables the ABAC authorization mechanism for a cluster.", "id": "SetLegacyAbacRequest", "properties": { "clusterId": { - "description": "Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to update. This field has been deprecated and replaced by the name field.", "type": "string" }, "enabled": { @@ -4332,15 +4521,15 @@ "type": "boolean" }, "name": { - "description": "The name (project, location, cluster id) of the cluster to set legacy abac.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster id) of the cluster to set legacy abac. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, @@ -4351,26 +4540,26 @@ "id": "SetLocationsRequest", "properties": { "clusterId": { - "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, "locations": { - "description": "Required. The desired list of Google Compute Engine\n[zones](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster's nodes should be located. Changing the locations a cluster is in\nwill result in nodes being either created or removed from the cluster,\ndepending on whether locations are being added or removed.\n\nThis list must always include the cluster's primary zone.", + "description": "Required. The desired list of Google Compute Engine [zones](https://cloud.google.com/compute/docs/zones#available) in which the cluster's nodes should be located. Changing the locations a cluster is in will result in nodes being either created or removed from the cluster, depending on whether locations are being added or removed. This list must always include the cluster's primary zone.", "items": { "type": "string" }, "type": "array" }, "name": { - "description": "The name (project, location, cluster) of the cluster to set locations.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to set locations. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, @@ -4381,23 +4570,23 @@ "id": "SetLoggingServiceRequest", "properties": { "clusterId": { - "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, "loggingService": { - "description": "Required. The logging service the cluster should use to write logs.\nCurrently available options:\n\n* `logging.googleapis.com/kubernetes` - The Cloud Logging\nservice with a Kubernetes-native resource model\n* `logging.googleapis.com` - The legacy Cloud Logging service (no longer\n available as of GKE 1.15).\n* `none` - no logs will be exported from the cluster.\n\nIf left as an empty string,`logging.googleapis.com/kubernetes` will be\nused for GKE 1.14+ or `logging.googleapis.com` for earlier versions.", + "description": "Required. The logging service the cluster should use to write logs. Currently available options: * `logging.googleapis.com/kubernetes` - The Cloud Logging service with a Kubernetes-native resource model * `logging.googleapis.com` - The legacy Cloud Logging service (no longer available as of GKE 1.15). * `none` - no logs will be exported from the cluster. If left as an empty string,`logging.googleapis.com/kubernetes` will be used for GKE 1.14+ or `logging.googleapis.com` for earlier versions.", "type": "string" }, "name": { - "description": "The name (project, location, cluster) of the cluster to set logging.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to set logging. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, @@ -4413,18 +4602,18 @@ }, "maintenancePolicy": { "$ref": "MaintenancePolicy", - "description": "Required. The maintenance policy to be set for the cluster. An empty field\nclears the existing maintenance policy." + "description": "Required. The maintenance policy to be set for the cluster. An empty field clears the existing maintenance policy." }, "name": { - "description": "The name (project, location, cluster id) of the cluster to set maintenance\npolicy.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster id) of the cluster to set maintenance policy. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Required. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "Required. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).", "type": "string" }, "zone": { - "description": "Required. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides.", + "description": "Required. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides.", "type": "string" } }, @@ -4446,20 +4635,20 @@ "Operation is unknown and will error out.", "Set the password to a user generated value.", "Generate a new password and set it to that.", - "Set the username. If an empty username is provided, basic authentication\nis disabled for the cluster. If a non-empty username is provided, basic\nauthentication is enabled, with either a provided password or a generated\none." + "Set the username. If an empty username is provided, basic authentication is disabled for the cluster. If a non-empty username is provided, basic authentication is enabled, with either a provided password or a generated one." ], "type": "string" }, "clusterId": { - "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, "name": { - "description": "The name (project, location, cluster) of the cluster to set auth.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to set auth. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "update": { @@ -4467,7 +4656,7 @@ "description": "Required. A description of the update." }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, @@ -4478,23 +4667,23 @@ "id": "SetMonitoringServiceRequest", "properties": { "clusterId": { - "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, "monitoringService": { - "description": "Required. The monitoring service the cluster should use to write metrics.\nCurrently available options:\n\n* \"monitoring.googleapis.com/kubernetes\" - The Cloud Monitoring\nservice with a Kubernetes-native resource model\n* `monitoring.googleapis.com` - The legacy Cloud Monitoring service (no\n longer available as of GKE 1.15).\n* `none` - No metrics will be exported from the cluster.\n\nIf left as an empty string,`monitoring.googleapis.com/kubernetes` will be\nused for GKE 1.14+ or `monitoring.googleapis.com` for earlier versions.", + "description": "Required. The monitoring service the cluster should use to write metrics. Currently available options: * \"monitoring.googleapis.com/kubernetes\" - The Cloud Monitoring service with a Kubernetes-native resource model * `monitoring.googleapis.com` - The legacy Cloud Monitoring service (no longer available as of GKE 1.15). * `none` - No metrics will be exported from the cluster. If left as an empty string,`monitoring.googleapis.com/kubernetes` will be used for GKE 1.14+ or `monitoring.googleapis.com` for earlier versions.", "type": "string" }, "name": { - "description": "The name (project, location, cluster) of the cluster to set monitoring.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to set monitoring. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, @@ -4505,11 +4694,11 @@ "id": "SetNetworkPolicyRequest", "properties": { "clusterId": { - "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "type": "string" }, "name": { - "description": "The name (project, location, cluster id) of the cluster to set networking\npolicy. Specified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster id) of the cluster to set networking policy. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "networkPolicy": { @@ -4517,11 +4706,11 @@ "description": "Required. Configuration options for the NetworkPolicy feature." }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, @@ -4536,34 +4725,34 @@ "description": "Required. Autoscaling configuration for the node pool." }, "clusterId": { - "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, "name": { - "description": "The name (project, location, cluster, node pool) of the node pool to set\nautoscaler settings. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool) of the node pool to set autoscaler settings. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "type": "string" }, "nodePoolId": { - "description": "Deprecated. The name of the node pool to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the node pool to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, "type": "object" }, "SetNodePoolManagementRequest": { - "description": "SetNodePoolManagementRequest sets the node management properties of a node\npool.", + "description": "SetNodePoolManagementRequest sets the node management properties of a node pool.", "id": "SetNodePoolManagementRequest", "properties": { "clusterId": { - "description": "Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to update. This field has been deprecated and replaced by the name field.", "type": "string" }, "management": { @@ -4571,34 +4760,34 @@ "description": "Required. NodeManagement configuration for the node pool." }, "name": { - "description": "The name (project, location, cluster, node pool id) of the node pool to set\nmanagement properties. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool id) of the node pool to set management properties. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "type": "string" }, "nodePoolId": { - "description": "Deprecated. The name of the node pool to update.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the node pool to update. This field has been deprecated and replaced by the name field.", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, "type": "object" }, "SetNodePoolSizeRequest": { - "description": "SetNodePoolSizeRequest sets the size a node\npool.", + "description": "SetNodePoolSizeRequest sets the size a node pool.", "id": "SetNodePoolSizeRequest", "properties": { "clusterId": { - "description": "Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to update. This field has been deprecated and replaced by the name field.", "type": "string" }, "name": { - "description": "The name (project, location, cluster, node pool id) of the node pool to set\nsize.\nSpecified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool id) of the node pool to set size. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "type": "string" }, "nodeCount": { @@ -4607,15 +4796,15 @@ "type": "integer" }, "nodePoolId": { - "description": "Deprecated. The name of the node pool to update.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the node pool to update. This field has been deprecated and replaced by the name field.", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, @@ -4626,11 +4815,11 @@ "id": "ShieldedInstanceConfig", "properties": { "enableIntegrityMonitoring": { - "description": "Defines whether the instance has integrity monitoring enabled.\n\nEnables monitoring and attestation of the boot integrity of the instance.\nThe attestation is performed against the integrity policy baseline. This\nbaseline is initially derived from the implicitly trusted boot image when\nthe instance is created.", + "description": "Defines whether the instance has integrity monitoring enabled. Enables monitoring and attestation of the boot integrity of the instance. The attestation is performed against the integrity policy baseline. This baseline is initially derived from the implicitly trusted boot image when the instance is created.", "type": "boolean" }, "enableSecureBoot": { - "description": "Defines whether the instance has Secure Boot enabled.\n\nSecure Boot helps ensure that the system only runs authentic software by\nverifying the digital signature of all boot components, and halting the\nboot process if signature verification fails.", + "description": "Defines whether the instance has Secure Boot enabled. Secure Boot helps ensure that the system only runs authentic software by verifying the digital signature of all boot components, and halting the boot process if signature verification fails.", "type": "boolean" } }, @@ -4648,19 +4837,19 @@ "type": "object" }, "StartIPRotationRequest": { - "description": "StartIPRotationRequest creates a new IP for the cluster and then performs\na node upgrade on each node pool to point to the new IP.", + "description": "StartIPRotationRequest creates a new IP for the cluster and then performs a node upgrade on each node pool to point to the new IP.", "id": "StartIPRotationRequest", "properties": { "clusterId": { - "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "type": "string" }, "name": { - "description": "The name (project, location, cluster id) of the cluster to start IP\nrotation. Specified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster id) of the cluster to start IP rotation. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", "type": "string" }, "rotateCredentials": { @@ -4668,14 +4857,14 @@ "type": "boolean" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, "type": "object" }, "StatusCondition": { - "description": "StatusCondition describes why a cluster or a node pool has a certain status\n(e.g., ERROR or DEGRADED).", + "description": "StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).", "id": "StatusCondition", "properties": { "code": { @@ -4690,11 +4879,11 @@ ], "enumDescriptions": [ "UNKNOWN indicates a generic condition.", - "GCE_STOCKOUT indicates that Google Compute Engine resources are\ntemporarily unavailable.", - "GKE_SERVICE_ACCOUNT_DELETED indicates that the user deleted their robot\nservice account.", + "GCE_STOCKOUT indicates that Google Compute Engine resources are temporarily unavailable.", + "GKE_SERVICE_ACCOUNT_DELETED indicates that the user deleted their robot service account.", "Google Compute Engine quota was exceeded.", "Cluster state was manually changed by an SRE due to a system logic error.", - "Unable to perform an encrypt operation against the CloudKMS key used for\netcd level encryption.\nMore codes TBA" + "Unable to perform an encrypt operation against the CloudKMS key used for etcd level encryption. More codes TBA" ], "type": "string" }, @@ -4710,7 +4899,7 @@ "id": "TimeWindow", "properties": { "endTime": { - "description": "The time that the window ends. The end time should take place after the\nstart time.", + "description": "The time that the window ends. The end time should take place after the start time.", "format": "google-datetime", "type": "string" }, @@ -4727,15 +4916,15 @@ "id": "UpdateClusterRequest", "properties": { "clusterId": { - "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, "name": { - "description": "The name (project, location, cluster) of the cluster to update.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to update. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "update": { @@ -4743,7 +4932,7 @@ "description": "Required. A description of the update." }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, @@ -4754,23 +4943,23 @@ "id": "UpdateMasterRequest", "properties": { "clusterId": { - "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, "masterVersion": { - "description": "Required. The Kubernetes version to change the master to.\n\nUsers may specify either explicit versions offered by Kubernetes Engine or\nversion aliases, which have the following behavior:\n\n- \"latest\": picks the highest valid Kubernetes version\n- \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version\n- \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version\n- \"1.X.Y-gke.N\": picks an explicit Kubernetes version\n- \"-\": picks the default Kubernetes version", + "description": "Required. The Kubernetes version to change the master to. Users may specify either explicit versions offered by Kubernetes Engine or version aliases, which have the following behavior: - \"latest\": picks the highest valid Kubernetes version - \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version - \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version - \"1.X.Y-gke.N\": picks an explicit Kubernetes version - \"-\": picks the default Kubernetes version", "type": "string" }, "name": { - "description": "The name (project, location, cluster) of the cluster to update.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to update. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, @@ -4781,7 +4970,7 @@ "id": "UpdateNodePoolRequest", "properties": { "clusterId": { - "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, "imageType": { @@ -4789,26 +4978,26 @@ "type": "string" }, "locations": { - "description": "The desired list of Google Compute Engine\n[zones](https://cloud.google.com/compute/docs/zones#available) in which the\nnode pool's nodes should be located. Changing the locations for a node pool\nwill result in nodes being either created or removed from the node pool,\ndepending on whether locations are being added or removed.", + "description": "The desired list of Google Compute Engine [zones](https://cloud.google.com/compute/docs/zones#available) in which the node pool's nodes should be located. Changing the locations for a node pool will result in nodes being either created or removed from the node pool, depending on whether locations are being added or removed.", "items": { "type": "string" }, "type": "array" }, "name": { - "description": "The name (project, location, cluster, node pool) of the node pool to\nupdate. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool) of the node pool to update. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "type": "string" }, "nodePoolId": { - "description": "Deprecated. The name of the node pool to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The name of the node pool to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, "nodeVersion": { - "description": "Required. The Kubernetes version to change the nodes to (typically an\nupgrade).\n\nUsers may specify either explicit versions offered by Kubernetes Engine or\nversion aliases, which have the following behavior:\n\n- \"latest\": picks the highest valid Kubernetes version\n- \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version\n- \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version\n- \"1.X.Y-gke.N\": picks an explicit Kubernetes version\n- \"-\": picks the Kubernetes master version", + "description": "Required. The Kubernetes version to change the nodes to (typically an upgrade). Users may specify either explicit versions offered by Kubernetes Engine or version aliases, which have the following behavior: - \"latest\": picks the highest valid Kubernetes version - \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version - \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version - \"1.X.Y-gke.N\": picks an explicit Kubernetes version - \"-\": picks the Kubernetes master version", "type": "string" }, "projectId": { - "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "upgradeSettings": { @@ -4820,23 +5009,65 @@ "description": "The desired workload metadata config for the node pool." }, "zone": { - "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", + "type": "string" + } + }, + "type": "object" + }, + "UpgradeEvent": { + "description": "UpgradeEvent is a notification sent to customers by the cluster server when a resource is upgrading.", + "id": "UpgradeEvent", + "properties": { + "currentVersion": { + "description": "Required. The current version before the upgrade.", + "type": "string" + }, + "operation": { + "description": "Required. The operation associated with this upgrade.", + "type": "string" + }, + "operationStartTime": { + "description": "Required. The time when the operation was started.", + "format": "google-datetime", + "type": "string" + }, + "resource": { + "description": "Optional. Optional relative path to the resource. For example in node pool upgrades, the relative path of the node pool.", + "type": "string" + }, + "resourceType": { + "description": "Required. The resource type that is upgrading.", + "enum": [ + "UPGRADE_RESOURCE_TYPE_UNSPECIFIED", + "MASTER", + "NODE_POOL" + ], + "enumDescriptions": [ + "Default value. This shouldn't be used.", + "Master / control plane", + "Node pool" + ], + "type": "string" + }, + "targetVersion": { + "description": "Required. The target version for the upgrade.", "type": "string" } }, "type": "object" }, "UpgradeSettings": { - "description": "These upgrade settings control the level of parallelism and the level of\ndisruption caused by an upgrade.\n\nmaxUnavailable controls the number of nodes that can be simultaneously\nunavailable.\n\nmaxSurge controls the number of additional nodes that can be added to the\nnode pool temporarily for the time of the upgrade to increase the number of\navailable nodes.\n\n(maxUnavailable + maxSurge) determines the level of parallelism (how many\nnodes are being upgraded at the same time).\n\nNote: upgrades inevitably introduce some disruption since workloads need to\nbe moved from old nodes to new, upgraded ones. Even if maxUnavailable=0,\nthis holds true. (Disruption stays within the limits of\nPodDisruptionBudget, if it is configured.)\n\nConsider a hypothetical node pool with 5 nodes having maxSurge=2,\nmaxUnavailable=1. This means the upgrade process upgrades 3 nodes\nsimultaneously. It creates 2 additional (upgraded) nodes, then it brings\ndown 3 old (not yet upgraded) nodes at the same time. This ensures that\nthere are always at least 4 nodes available.", + "description": "These upgrade settings control the level of parallelism and the level of disruption caused by an upgrade. maxUnavailable controls the number of nodes that can be simultaneously unavailable. maxSurge controls the number of additional nodes that can be added to the node pool temporarily for the time of the upgrade to increase the number of available nodes. (maxUnavailable + maxSurge) determines the level of parallelism (how many nodes are being upgraded at the same time). Note: upgrades inevitably introduce some disruption since workloads need to be moved from old nodes to new, upgraded ones. Even if maxUnavailable=0, this holds true. (Disruption stays within the limits of PodDisruptionBudget, if it is configured.) Consider a hypothetical node pool with 5 nodes having maxSurge=2, maxUnavailable=1. This means the upgrade process upgrades 3 nodes simultaneously. It creates 2 additional (upgraded) nodes, then it brings down 3 old (not yet upgraded) nodes at the same time. This ensures that there are always at least 4 nodes available.", "id": "UpgradeSettings", "properties": { "maxSurge": { - "description": "The maximum number of nodes that can be created beyond the current size\nof the node pool during the upgrade process.", + "description": "The maximum number of nodes that can be created beyond the current size of the node pool during the upgrade process.", "format": "int32", "type": "integer" }, "maxUnavailable": { - "description": "The maximum number of nodes that can be simultaneously unavailable during\nthe upgrade process. A node is considered available if its status is\nReady.", + "description": "The maximum number of nodes that can be simultaneously unavailable during the upgrade process. A node is considered available if its status is Ready.", "format": "int32", "type": "integer" } @@ -4844,7 +5075,7 @@ "type": "object" }, "UsableSubnetwork": { - "description": "UsableSubnetwork resource returns the subnetwork name, its associated network\nand the primary CIDR range.", + "description": "UsableSubnetwork resource returns the subnetwork name, its associated network and the primary CIDR range.", "id": "UsableSubnetwork", "properties": { "ipCidrRange": { @@ -4852,7 +5083,7 @@ "type": "string" }, "network": { - "description": "Network Name.\nExample: projects/my-project/global/networks/my-network", + "description": "Network Name. Example: projects/my-project/global/networks/my-network", "type": "string" }, "secondaryIpRanges": { @@ -4863,11 +5094,11 @@ "type": "array" }, "statusMessage": { - "description": "A human readable status message representing the reasons for cases where\nthe caller cannot use the secondary ranges under the subnet. For example if\nthe secondary_ip_ranges is empty due to a permission issue, an insufficient\npermission message will be given by status_message.", + "description": "A human readable status message representing the reasons for cases where the caller cannot use the secondary ranges under the subnet. For example if the secondary_ip_ranges is empty due to a permission issue, an insufficient permission message will be given by status_message.", "type": "string" }, "subnetwork": { - "description": "Subnetwork Name.\nExample: projects/my-project/regions/us-central1/subnetworks/my-subnet", + "description": "Subnetwork Name. Example: projects/my-project/regions/us-central1/subnetworks/my-subnet", "type": "string" } }, @@ -4882,7 +5113,7 @@ "type": "string" }, "rangeName": { - "description": "The name associated with this subnetwork secondary range, used when adding\nan alias IP range to a VM instance.", + "description": "The name associated with this subnetwork secondary range, used when adding an alias IP range to a VM instance.", "type": "string" }, "status": { @@ -4897,9 +5128,9 @@ "enumDescriptions": [ "UNKNOWN is the zero value of the Status enum. It's not a valid status.", "UNUSED denotes that this range is unclaimed by any cluster.", - "IN_USE_SERVICE denotes that this range is claimed by a cluster for\nservices. It cannot be used for other clusters.", - "IN_USE_SHAREABLE_POD denotes this range was created by the network admin\nand is currently claimed by a cluster for pods. It can only be used by\nother clusters as a pod range.", - "IN_USE_MANAGED_POD denotes this range was created by GKE and is claimed\nfor pods. It cannot be used for other clusters." + "IN_USE_SERVICE denotes that this range is claimed by a cluster for services. It cannot be used for other clusters.", + "IN_USE_SHAREABLE_POD denotes this range was created by the network admin and is currently claimed by a cluster for pods. It can only be used by other clusters as a pod range.", + "IN_USE_MANAGED_POD denotes this range was created by GKE and is claimed for pods. It cannot be used for other clusters." ], "type": "string" } @@ -4907,7 +5138,7 @@ "type": "object" }, "VerticalPodAutoscaling": { - "description": "VerticalPodAutoscaling contains global, per-cluster information\nrequired by Vertical Pod Autoscaler to automatically adjust\nthe resources of pods controlled by it.", + "description": "VerticalPodAutoscaling contains global, per-cluster information required by Vertical Pod Autoscaler to automatically adjust the resources of pods controlled by it.", "id": "VerticalPodAutoscaling", "properties": { "enabled": { @@ -4918,7 +5149,7 @@ "type": "object" }, "WorkloadIdentityConfig": { - "description": "Configuration for the use of Kubernetes Service Accounts in GCP IAM\npolicies.", + "description": "Configuration for the use of Kubernetes Service Accounts in GCP IAM policies.", "id": "WorkloadIdentityConfig", "properties": { "workloadPool": { @@ -4929,11 +5160,11 @@ "type": "object" }, "WorkloadMetadataConfig": { - "description": "WorkloadMetadataConfig defines the metadata configuration to expose to\nworkloads on the node pool.", + "description": "WorkloadMetadataConfig defines the metadata configuration to expose to workloads on the node pool.", "id": "WorkloadMetadataConfig", "properties": { "mode": { - "description": "Mode is the configuration for how to expose metadata to workloads running\non the node pool.", + "description": "Mode is the configuration for how to expose metadata to workloads running on the node pool.", "enum": [ "MODE_UNSPECIFIED", "GCE_METADATA", @@ -4942,7 +5173,7 @@ "enumDescriptions": [ "Not set.", "Expose all Compute Engine metadata to pods.", - "Run the GKE Metadata Server on this node. The GKE Metadata Server exposes\na metadata API to workloads that is compatible with the V1 Compute\nMetadata APIs exposed by the Compute Engine and App Engine Metadata\nServers. This feature can only be enabled if Workload Identity is enabled\nat the cluster level." + "Run the GKE Metadata Server on this node. The GKE Metadata Server exposes a metadata API to workloads that is compatible with the V1 Compute Metadata APIs exposed by the Compute Engine and App Engine Metadata Servers. This feature can only be enabled if Workload Identity is enabled at the cluster level." ], "type": "string" } diff --git a/vendor/google.golang.org/api/container/v1/container-gen.go b/vendor/google.golang.org/api/container/v1/container-gen.go index 8281c0a4fa4..fbf1267da2f 100644 --- a/vendor/google.golang.org/api/container/v1/container-gen.go +++ b/vendor/google.golang.org/api/container/v1/container-gen.go @@ -75,6 +75,7 @@ const apiId = "container:v1" const apiName = "container" const apiVersion = "v1" const basePath = "https://container.googleapis.com/" +const mtlsBasePath = "https://container.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -90,6 +91,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -282,8 +284,7 @@ type AcceleratorConfig struct { AcceleratorCount int64 `json:"acceleratorCount,omitempty,string"` // AcceleratorType: The accelerator type resource name. List of - // supported - // accelerators + // supported accelerators // [here](https://cloud.google.com/compute/docs/gpus) AcceleratorType string `json:"acceleratorType,omitempty"` @@ -312,43 +313,42 @@ func (s *AcceleratorConfig) MarshalJSON() ([]byte, error) { } // AddonsConfig: Configuration for the addons that can be automatically -// spun up in the -// cluster, enabling additional functionality. +// spun up in the cluster, enabling additional functionality. type AddonsConfig struct { // CloudRunConfig: Configuration for the Cloud Run addon, which allows - // the user to use a - // managed Knative service. + // the user to use a managed Knative service. CloudRunConfig *CloudRunConfig `json:"cloudRunConfig,omitempty"` + // ConfigConnectorConfig: Configuration for the ConfigConnector add-on, + // a Kubernetes extension to manage hosted GCP services through the + // Kubernetes API + ConfigConnectorConfig *ConfigConnectorConfig `json:"configConnectorConfig,omitempty"` + + // DnsCacheConfig: Configuration for NodeLocalDNS, a dns cache running + // on cluster nodes + DnsCacheConfig *DnsCacheConfig `json:"dnsCacheConfig,omitempty"` + // HorizontalPodAutoscaling: Configuration for the horizontal pod - // autoscaling feature, which - // increases or decreases the number of replica pods a replication - // controller - // has based on the resource usage of the existing pods. + // autoscaling feature, which increases or decreases the number of + // replica pods a replication controller has based on the resource usage + // of the existing pods. HorizontalPodAutoscaling *HorizontalPodAutoscaling `json:"horizontalPodAutoscaling,omitempty"` // HttpLoadBalancing: Configuration for the HTTP (L7) load balancing - // controller addon, which - // makes it easy to set up HTTP load balancers for services in a - // cluster. + // controller addon, which makes it easy to set up HTTP load balancers + // for services in a cluster. HttpLoadBalancing *HttpLoadBalancing `json:"httpLoadBalancing,omitempty"` - // KubernetesDashboard: Configuration for the Kubernetes Dashboard. - // This addon is deprecated, and will be disabled in 1.15. It is - // recommended + // KubernetesDashboard: Configuration for the Kubernetes Dashboard. This + // addon is deprecated, and will be disabled in 1.15. It is recommended // to use the Cloud Console to manage and monitor your Kubernetes - // clusters, - // workloads and applications. For more information, - // see: - // https://cloud.google.com/kubernetes-engine/docs/concepts/dashboar - // ds + // clusters, workloads and applications. For more information, see: + // https://cloud.google.com/kubernetes-engine/docs/concepts/dashboards KubernetesDashboard *KubernetesDashboard `json:"kubernetesDashboard,omitempty"` // NetworkPolicyConfig: Configuration for NetworkPolicy. This only - // tracks whether the addon - // is enabled or not on the Master, it does not track whether network - // policy - // is enabled for the nodes. + // tracks whether the addon is enabled or not on the Master, it does not + // track whether network policy is enabled for the nodes. NetworkPolicyConfig *NetworkPolicyConfig `json:"networkPolicyConfig,omitempty"` // ForceSendFields is a list of field names (e.g. "CloudRunConfig") to @@ -378,14 +378,12 @@ func (s *AddonsConfig) MarshalJSON() ([]byte, error) { // AuthenticatorGroupsConfig: Configuration for returning group // information from authenticators. type AuthenticatorGroupsConfig struct { - // Enabled: Whether this cluster should return group membership - // lookups + // Enabled: Whether this cluster should return group membership lookups // during authentication using a group of security groups. Enabled bool `json:"enabled,omitempty"` // SecurityGroup: The name of the security group-of-groups to be used. - // Only relevant - // if enabled = true. + // Only relevant if enabled = true. SecurityGroup string `json:"securityGroup,omitempty"` // ForceSendFields is a list of field names (e.g. "Enabled") to @@ -412,19 +410,16 @@ func (s *AuthenticatorGroupsConfig) MarshalJSON() ([]byte, error) { } // AutoUpgradeOptions: AutoUpgradeOptions defines the set of options for -// the user to control how -// the Auto Upgrades will proceed. +// the user to control how the Auto Upgrades will proceed. type AutoUpgradeOptions struct { // AutoUpgradeStartTime: [Output only] This field is set when upgrades - // are about to commence - // with the approximate start time for the upgrades, - // in - // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + // are about to commence with the approximate start time for the + // upgrades, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text + // format. AutoUpgradeStartTime string `json:"autoUpgradeStartTime,omitempty"` // Description: [Output only] This field is set when upgrades are about - // to commence - // with the description of the upgrade. + // to commence with the description of the upgrade. Description string `json:"description,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -453,28 +448,55 @@ func (s *AutoUpgradeOptions) MarshalJSON() ([]byte, error) { } // AutoprovisioningNodePoolDefaults: AutoprovisioningNodePoolDefaults -// contains defaults for a node pool created -// by NAP. +// contains defaults for a node pool created by NAP. type AutoprovisioningNodePoolDefaults struct { + // BootDiskKmsKey: The Customer Managed Encryption Key used to encrypt + // the boot disk attached to each node in the node pool. This should be + // of the form + // projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cr + // yptoKeys/[KEY_NAME]. For more information about protecting resources + // with Cloud KMS Keys please see: + // https://cloud.google.com/compute/docs/disks/customer-managed-encryption + BootDiskKmsKey string `json:"bootDiskKmsKey,omitempty"` + + // DiskSizeGb: Size of the disk attached to each node, specified in GB. + // The smallest allowed disk size is 10GB. If unspecified, the default + // disk size is 100GB. + DiskSizeGb int64 `json:"diskSizeGb,omitempty"` + + // DiskType: Type of the disk attached to each node (e.g. 'pd-standard' + // or 'pd-ssd') If unspecified, the default disk type is 'pd-standard' + DiskType string `json:"diskType,omitempty"` + // Management: Specifies the node management options for NAP created // node-pools. Management *NodeManagement `json:"management,omitempty"` - // OauthScopes: Scopes that are used by NAP when creating node pools. If - // oauth_scopes are - // specified, service_account should be empty. + // MinCpuPlatform: Minimum CPU platform to be used for NAP created node + // pools. The instance may be scheduled on the specified or newer CPU + // platform. Applicable values are the friendly names of CPU platforms, + // such as minCpuPlatform: Intel Haswell or minCpuPlatform: Intel Sandy + // Bridge. For more information, read [how to specify min CPU + // platform](https://cloud.google.com/compute/docs/instances/specify-min- + // cpu-platform) To unset the min cpu platform field pass "automatic" as + // field value. + MinCpuPlatform string `json:"minCpuPlatform,omitempty"` + + // OauthScopes: Scopes that are used by NAP when creating node pools. OauthScopes []string `json:"oauthScopes,omitempty"` // ServiceAccount: The Google Cloud Platform Service Account to be used - // by the node VMs. If - // service_account is specified, scopes should be empty. + // by the node VMs. ServiceAccount string `json:"serviceAccount,omitempty"` + // ShieldedInstanceConfig: Shielded Instance options. + ShieldedInstanceConfig *ShieldedInstanceConfig `json:"shieldedInstanceConfig,omitempty"` + // UpgradeSettings: Specifies the upgrade settings for NAP created node // pools UpgradeSettings *UpgradeSettings `json:"upgradeSettings,omitempty"` - // ForceSendFields is a list of field names (e.g. "Management") to + // ForceSendFields is a list of field names (e.g. "BootDiskKmsKey") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -482,12 +504,13 @@ type AutoprovisioningNodePoolDefaults struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Management") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "BootDiskKmsKey") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -529,8 +552,7 @@ func (s *BigQueryDestination) MarshalJSON() ([]byte, error) { // BinaryAuthorization: Configuration for Binary Authorization. type BinaryAuthorization struct { // Enabled: Enable Binary Authorization for this cluster. If enabled, - // all container - // images will be validated by Binary Authorization. + // all container images will be validated by Binary Authorization. Enabled bool `json:"enabled,omitempty"` // ForceSendFields is a list of field names (e.g. "Enabled") to @@ -560,29 +582,23 @@ func (s *BinaryAuthorization) MarshalJSON() ([]byte, error) { // operation. type CancelOperationRequest struct { // Name: The name (project, location, operation id) of the operation to - // cancel. - // Specified in the format `projects/*/locations/*/operations/*`. + // cancel. Specified in the format + // `projects/*/locations/*/operations/*`. Name string `json:"name,omitempty"` - // OperationId: Deprecated. The server-assigned `name` of the - // operation. + // OperationId: Deprecated. The server-assigned `name` of the operation. // This field has been deprecated and replaced by the name field. OperationId string `json:"operationId,omitempty"` // ProjectId: Deprecated. The Google Developers Console [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // project number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Deprecated. The name of the Google Compute - // Engine + // Zone: Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // operation resides. This field has been deprecated and replaced by the - // name - // field. + // which the operation resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "Name") to @@ -676,6 +692,18 @@ type CloudRunConfig struct { // Disabled: Whether Cloud Run addon is enabled for this cluster. Disabled bool `json:"disabled,omitempty"` + // LoadBalancerType: Which load balancer type is installed for Cloud + // Run. + // + // Possible values: + // "LOAD_BALANCER_TYPE_UNSPECIFIED" - Load balancer type for Cloud Run + // is unspecified. + // "LOAD_BALANCER_TYPE_EXTERNAL" - Install external load balancer for + // Cloud Run. + // "LOAD_BALANCER_TYPE_INTERNAL" - Install internal load balancer for + // Cloud Run. + LoadBalancerType string `json:"loadBalancerType,omitempty"` + // ForceSendFields is a list of field names (e.g. "Disabled") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -716,19 +744,16 @@ type Cluster struct { BinaryAuthorization *BinaryAuthorization `json:"binaryAuthorization,omitempty"` // ClusterIpv4Cidr: The IP address range of the container pods in this - // cluster, - // in + // cluster, in // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // - // notation (e.g. `10.96.0.0/14`). Leave blank to have - // one automatically chosen or specify a `/14` block in `10.0.0.0/8`. + // notation (e.g. `10.96.0.0/14`). Leave blank to have one automatically + // chosen or specify a `/14` block in `10.0.0.0/8`. ClusterIpv4Cidr string `json:"clusterIpv4Cidr,omitempty"` // Conditions: Which conditions caused the current cluster state. Conditions []*StatusCondition `json:"conditions,omitempty"` - // CreateTime: [Output only] The time the cluster was created, - // in + // CreateTime: [Output only] The time the cluster was created, in // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. CreateTime string `json:"createTime,omitempty"` @@ -736,103 +761,76 @@ type Cluster struct { // the master endpoint. CurrentMasterVersion string `json:"currentMasterVersion,omitempty"` - // CurrentNodeCount: [Output only] The number of nodes currently in the - // cluster. Deprecated. - // Call Kubernetes API directly to retrieve node information. + // CurrentNodeCount: [Output only] The number of nodes currently in the + // cluster. Deprecated. Call Kubernetes API directly to retrieve node + // information. CurrentNodeCount int64 `json:"currentNodeCount,omitempty"` - // CurrentNodeVersion: [Output only] Deprecated, - // use - // [NodePools.version](https://cloud.google.com/kubernetes-engine/doc - // s/reference/rest/v1/projects.zones.clusters.nodePools) - // instead. The current version of the node software components. If they - // are + // CurrentNodeVersion: [Output only] Deprecated, use + // [NodePools.version](https://cloud.google.com/kubernetes-engine/docs/re + // ference/rest/v1/projects.locations.clusters.nodePools) instead. The + // current version of the node software components. If they are // currently at multiple versions because they're in the process of - // being - // upgraded, this reflects the minimum version of all nodes. + // being upgraded, this reflects the minimum version of all nodes. CurrentNodeVersion string `json:"currentNodeVersion,omitempty"` // DatabaseEncryption: Configuration of etcd encryption. DatabaseEncryption *DatabaseEncryption `json:"databaseEncryption,omitempty"` // DefaultMaxPodsConstraint: The default constraint on the maximum - // number of pods that can be run - // simultaneously on a node in the node pool of this cluster. Only - // honored - // if cluster created with IP Alias support. + // number of pods that can be run simultaneously on a node in the node + // pool of this cluster. Only honored if cluster created with IP Alias + // support. DefaultMaxPodsConstraint *MaxPodsConstraint `json:"defaultMaxPodsConstraint,omitempty"` // Description: An optional description of this cluster. Description string `json:"description,omitempty"` // EnableKubernetesAlpha: Kubernetes alpha features are enabled on this - // cluster. This includes alpha - // API groups (e.g. v1alpha1) and features that may not be production - // ready in - // the kubernetes version of the master and nodes. - // The cluster has no SLA for uptime and master/node upgrades are - // disabled. - // Alpha enabled clusters are automatically deleted thirty days - // after - // creation. + // cluster. This includes alpha API groups (e.g. v1alpha1) and features + // that may not be production ready in the kubernetes version of the + // master and nodes. The cluster has no SLA for uptime and master/node + // upgrades are disabled. Alpha enabled clusters are automatically + // deleted thirty days after creation. EnableKubernetesAlpha bool `json:"enableKubernetesAlpha,omitempty"` // EnableTpu: Enable the ability to use Cloud TPUs in this cluster. EnableTpu bool `json:"enableTpu,omitempty"` // Endpoint: [Output only] The IP address of this cluster's master - // endpoint. - // The endpoint can be accessed from the internet - // at - // `https://username:password@endpoint/`. - // - // See the `masterAuth` property of this resource for username - // and - // password information. + // endpoint. The endpoint can be accessed from the internet at + // `https://username:password@endpoint/`. See the `masterAuth` property + // of this resource for username and password information. Endpoint string `json:"endpoint,omitempty"` - // ExpireTime: [Output only] The time the cluster will be - // automatically + // ExpireTime: [Output only] The time the cluster will be automatically // deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text // format. ExpireTime string `json:"expireTime,omitempty"` // InitialClusterVersion: The initial Kubernetes version for this - // cluster. Valid versions are those - // found in validMasterVersions returned by getServerConfig. The - // version can - // be upgraded over time; such upgrades are reflected - // in - // currentMasterVersion and currentNodeVersion. - // - // Users may specify either explicit versions offered by - // Kubernetes Engine or version aliases, which have the following - // behavior: - // - // - "latest": picks the highest valid Kubernetes version - // - "1.X": picks the highest valid patch+gke.N patch in the 1.X - // version - // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - // - "1.X.Y-gke.N": picks an explicit Kubernetes version - // - "","-": picks the default Kubernetes version + // cluster. Valid versions are those found in validMasterVersions + // returned by getServerConfig. The version can be upgraded over time; + // such upgrades are reflected in currentMasterVersion and + // currentNodeVersion. Users may specify either explicit versions + // offered by Kubernetes Engine or version aliases, which have the + // following behavior: - "latest": picks the highest valid Kubernetes + // version - "1.X": picks the highest valid patch+gke.N patch in the 1.X + // version - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y + // version - "1.X.Y-gke.N": picks an explicit Kubernetes version - + // "","-": picks the default Kubernetes version InitialClusterVersion string `json:"initialClusterVersion,omitempty"` // InitialNodeCount: The number of nodes to create in this cluster. You - // must ensure that your - // Compute Engine resource - // quota - // is sufficient for this number of instances. You must also have - // available - // firewall and routes quota. - // For requests, this field should only be used in lieu of a - // "node_pool" object, since this configuration (along with - // the - // "node_config") will be used to create a "NodePool" object with - // an + // must ensure that your Compute Engine [resource + // quota](https://cloud.google.com/compute/quotas) is sufficient for + // this number of instances. You must also have available firewall and + // routes quota. For requests, this field should only be used in lieu of + // a "node_pool" object, since this configuration (along with the + // "node_config") will be used to create a "NodePool" object with an // auto-generated name. Do not use this and a node_pool at the same - // time. - // - // This field is deprecated, use node_pool.initial_node_count instead. + // time. This field is deprecated, use node_pool.initial_node_count + // instead. InitialNodeCount int64 `json:"initialNodeCount,omitempty"` // InstanceGroupUrls: Deprecated. Use node_pools.instance_group_urls. @@ -848,49 +846,35 @@ type Cluster struct { // LegacyAbac: Configuration for the legacy ABAC authorization mode. LegacyAbac *LegacyAbac `json:"legacyAbac,omitempty"` - // Location: [Output only] The name of the Google Compute - // Engine - // [zone](https://cloud.google.com/compute/docs/regions-zones/regi - // ons-zones#available) - // or - // [region](https://cloud.google.com/compute/docs - // /regions-zones/regions-zones#available) - // in which the cluster resides. + // Location: [Output only] The name of the Google Compute Engine + // [zone](https://cloud.google.com/compute/docs/regions-zones/regions-zon + // es#available) or + // [region](https://cloud.google.com/compute/docs/regions-zones/regions-z + // ones#available) in which the cluster resides. Location string `json:"location,omitempty"` - // Locations: The list of Google Compute - // Engine + // Locations: The list of Google Compute Engine // [zones](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster's nodes should be located. + // which the cluster's nodes should be located. Locations []string `json:"locations,omitempty"` // LoggingService: The logging service the cluster should use to write - // logs. - // Currently available options: - // - // * `logging.googleapis.com/kubernetes` - The Cloud Logging - // service with a Kubernetes-native resource model - // * `logging.googleapis.com` - The legacy Cloud Logging service (no - // longer - // available as of GKE 1.15). - // * `none` - no logs will be exported from the cluster. - // - // If left as an empty string,`logging.googleapis.com/kubernetes` will - // be - // used for GKE 1.14+ or `logging.googleapis.com` for earlier versions. + // logs. Currently available options: * + // `logging.googleapis.com/kubernetes` - The Cloud Logging service with + // a Kubernetes-native resource model * `logging.googleapis.com` - The + // legacy Cloud Logging service (no longer available as of GKE 1.15). * + // `none` - no logs will be exported from the cluster. If left as an + // empty string,`logging.googleapis.com/kubernetes` will be used for GKE + // 1.14+ or `logging.googleapis.com` for earlier versions. LoggingService string `json:"loggingService,omitempty"` // MaintenancePolicy: Configure the maintenance policy for this cluster. MaintenancePolicy *MaintenancePolicy `json:"maintenancePolicy,omitempty"` // MasterAuth: The authentication information for accessing the master - // endpoint. - // If unspecified, the defaults are used: - // For clusters before v1.12, if master_auth is unspecified, `username` - // will - // be set to "admin", a random password will be generated, and a - // client + // endpoint. If unspecified, the defaults are used: For clusters before + // v1.12, if master_auth is unspecified, `username` will be set to + // "admin", a random password will be generated, and a client // certificate will be issued. MasterAuth *MasterAuth `json:"masterAuth,omitempty"` @@ -899,41 +883,27 @@ type Cluster struct { MasterAuthorizedNetworksConfig *MasterAuthorizedNetworksConfig `json:"masterAuthorizedNetworksConfig,omitempty"` // MonitoringService: The monitoring service the cluster should use to - // write metrics. - // Currently available options: - // - // * "monitoring.googleapis.com/kubernetes" - The Cloud - // Monitoring - // service with a Kubernetes-native resource model - // * `monitoring.googleapis.com` - The legacy Cloud Monitoring service - // (no - // longer available as of GKE 1.15). - // * `none` - No metrics will be exported from the cluster. - // - // If left as an empty string,`monitoring.googleapis.com/kubernetes` - // will be - // used for GKE 1.14+ or `monitoring.googleapis.com` for earlier + // write metrics. Currently available options: * + // "monitoring.googleapis.com/kubernetes" - The Cloud Monitoring service + // with a Kubernetes-native resource model * `monitoring.googleapis.com` + // - The legacy Cloud Monitoring service (no longer available as of GKE + // 1.15). * `none` - No metrics will be exported from the cluster. If + // left as an empty string,`monitoring.googleapis.com/kubernetes` will + // be used for GKE 1.14+ or `monitoring.googleapis.com` for earlier // versions. MonitoringService string `json:"monitoringService,omitempty"` // Name: The name of this cluster. The name must be unique within this - // project - // and location (e.g. zone or region), and can be up to 40 characters - // with - // the following restrictions: - // - // * Lowercase letters, numbers, and hyphens only. - // * Must start with a letter. - // * Must end with a number or a letter. + // project and location (e.g. zone or region), and can be up to 40 + // characters with the following restrictions: * Lowercase letters, + // numbers, and hyphens only. * Must start with a letter. * Must end + // with a number or a letter. Name string `json:"name,omitempty"` - // Network: The name of the Google Compute - // Engine - // [network](https://cloud.google.com/compute/docs/networks-and-fi - // rewalls#networks) - // to which the cluster is connected. If left unspecified, the - // `default` - // network will be used. + // Network: The name of the Google Compute Engine + // [network](https://cloud.google.com/compute/docs/networks-and-firewalls + // #networks) to which the cluster is connected. If left unspecified, + // the `default` network will be used. Network string `json:"network,omitempty"` // NetworkConfig: Configuration for cluster networking. @@ -942,63 +912,51 @@ type Cluster struct { // NetworkPolicy: Configuration options for the NetworkPolicy feature. NetworkPolicy *NetworkPolicy `json:"networkPolicy,omitempty"` - // NodeConfig: Parameters used in creating the cluster's nodes. - // For requests, this field should only be used in lieu of a - // "node_pool" object, since this configuration (along with - // the + // NodeConfig: Parameters used in creating the cluster's nodes. For + // requests, this field should only be used in lieu of a "node_pool" + // object, since this configuration (along with the // "initial_node_count") will be used to create a "NodePool" object with - // an - // auto-generated name. Do not use this and a node_pool at the same - // time. - // For responses, this field will be populated with the node - // configuration of - // the first node pool. (For configuration of each node pool, - // see - // `node_pool.config`) - // - // If unspecified, the defaults are used. + // an auto-generated name. Do not use this and a node_pool at the same + // time. For responses, this field will be populated with the node + // configuration of the first node pool. (For configuration of each node + // pool, see `node_pool.config`) If unspecified, the defaults are used. // This field is deprecated, use node_pool.config instead. NodeConfig *NodeConfig `json:"nodeConfig,omitempty"` // NodeIpv4CidrSize: [Output only] The size of the address space on each - // node for hosting - // containers. This is provisioned from within the - // `container_ipv4_cidr` - // range. This field will only be set when cluster is in route-based - // network - // mode. + // node for hosting containers. This is provisioned from within the + // `container_ipv4_cidr` range. This field will only be set when cluster + // is in route-based network mode. NodeIpv4CidrSize int64 `json:"nodeIpv4CidrSize,omitempty"` - // NodePools: The node pools associated with this cluster. - // This field should not be set if "node_config" or "initial_node_count" - // are + // NodePools: The node pools associated with this cluster. This field + // should not be set if "node_config" or "initial_node_count" are // specified. NodePools []*NodePool `json:"nodePools,omitempty"` // PrivateClusterConfig: Configuration for private cluster. PrivateClusterConfig *PrivateClusterConfig `json:"privateClusterConfig,omitempty"` + // ReleaseChannel: Release channel configuration. + ReleaseChannel *ReleaseChannel `json:"releaseChannel,omitempty"` + // ResourceLabels: The resource labels for the cluster to use to - // annotate any related - // Google Compute Engine resources. + // annotate any related Google Compute Engine resources. ResourceLabels map[string]string `json:"resourceLabels,omitempty"` // ResourceUsageExportConfig: Configuration for exporting resource - // usages. Resource usage export is - // disabled when this config is unspecified. + // usages. Resource usage export is disabled when this config is + // unspecified. ResourceUsageExportConfig *ResourceUsageExportConfig `json:"resourceUsageExportConfig,omitempty"` // SelfLink: [Output only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` // ServicesIpv4Cidr: [Output only] The IP address range of the - // Kubernetes services in - // this cluster, - // in + // Kubernetes services in this cluster, in // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // - // notation (e.g. `1.2.3.4/29`). Service addresses are - // typically put in the last `/16` from the container CIDR. + // notation (e.g. `1.2.3.4/29`). Service addresses are typically put in + // the last `/16` from the container CIDR. ServicesIpv4Cidr string `json:"servicesIpv4Cidr,omitempty"` // ShieldedNodes: Shielded Nodes configuration. @@ -1011,41 +969,32 @@ type Cluster struct { // "PROVISIONING" - The PROVISIONING state indicates the cluster is // being created. // "RUNNING" - The RUNNING state indicates the cluster has been - // created and is fully - // usable. + // created and is fully usable. // "RECONCILING" - The RECONCILING state indicates that some work is - // actively being done on - // the cluster, such as upgrading the master or node software. Details - // can - // be found in the `statusMessage` field. + // actively being done on the cluster, such as upgrading the master or + // node software. Details can be found in the `statusMessage` field. // "STOPPING" - The STOPPING state indicates the cluster is being // deleted. - // "ERROR" - The ERROR state indicates the cluster may be unusable. - // Details - // can be found in the `statusMessage` field. + // "ERROR" - The ERROR state indicates the cluster is unusable. It + // will be automatically deleted. Details can be found in the + // `statusMessage` field. // "DEGRADED" - The DEGRADED state indicates the cluster requires user - // action to restore - // full functionality. Details can be found in the `statusMessage` - // field. + // action to restore full functionality. Details can be found in the + // `statusMessage` field. Status string `json:"status,omitempty"` // StatusMessage: [Output only] Additional information about the current - // status of this - // cluster, if available. + // status of this cluster, if available. StatusMessage string `json:"statusMessage,omitempty"` - // Subnetwork: The name of the Google Compute - // Engine - // [subnetwork](https://cloud.google.com/compute/docs/subnetworks) - // to which - // the cluster is connected. + // Subnetwork: The name of the Google Compute Engine + // [subnetwork](https://cloud.google.com/compute/docs/subnetworks) to + // which the cluster is connected. Subnetwork string `json:"subnetwork,omitempty"` // TpuIpv4CidrBlock: [Output only] The IP address range of the Cloud - // TPUs in this cluster, - // in + // TPUs in this cluster, in // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // // notation (e.g. `1.2.3.4/29`). TpuIpv4CidrBlock string `json:"tpuIpv4CidrBlock,omitempty"` @@ -1054,15 +1003,13 @@ type Cluster struct { VerticalPodAutoscaling *VerticalPodAutoscaling `json:"verticalPodAutoscaling,omitempty"` // WorkloadIdentityConfig: Configuration for the use of Kubernetes - // Service Accounts in GCP IAM - // policies. + // Service Accounts in GCP IAM policies. WorkloadIdentityConfig *WorkloadIdentityConfig `json:"workloadIdentityConfig,omitempty"` - // Zone: [Output only] The name of the Google Compute - // Engine + // Zone: [Output only] The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field is deprecated, use location instead. + // which the cluster resides. This field is deprecated, use location + // instead. Zone string `json:"zone,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1093,21 +1040,17 @@ func (s *Cluster) MarshalJSON() ([]byte, error) { } // ClusterAutoscaling: ClusterAutoscaling contains global, per-cluster -// information -// required by Cluster Autoscaler to automatically adjust -// the size of the cluster and create/delete -// node pools based on the current needs. +// information required by Cluster Autoscaler to automatically adjust +// the size of the cluster and create/delete node pools based on the +// current needs. type ClusterAutoscaling struct { - // AutoprovisioningLocations: The list of Google Compute - // Engine + // AutoprovisioningLocations: The list of Google Compute Engine // [zones](https://cloud.google.com/compute/docs/zones#available) in - // which the - // NodePool's nodes can be created by NAP. + // which the NodePool's nodes can be created by NAP. AutoprovisioningLocations []string `json:"autoprovisioningLocations,omitempty"` // AutoprovisioningNodePoolDefaults: AutoprovisioningNodePoolDefaults - // contains defaults for a node pool - // created by NAP. + // contains defaults for a node pool created by NAP. AutoprovisioningNodePoolDefaults *AutoprovisioningNodePoolDefaults `json:"autoprovisioningNodePoolDefaults,omitempty"` // EnableNodeAutoprovisioning: Enables automatic node pool creation and @@ -1115,8 +1058,7 @@ type ClusterAutoscaling struct { EnableNodeAutoprovisioning bool `json:"enableNodeAutoprovisioning,omitempty"` // ResourceLimits: Contains global constraints regarding minimum and - // maximum - // amount of resources in the cluster. + // maximum amount of resources in the cluster. ResourceLimits []*ResourceLimit `json:"resourceLimits,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1145,10 +1087,8 @@ func (s *ClusterAutoscaling) MarshalJSON() ([]byte, error) { } // ClusterUpdate: ClusterUpdate describes an update to the cluster. -// Exactly one update can -// be applied to a cluster with each request, so at most one field can -// be -// provided. +// Exactly one update can be applied to a cluster with each request, so +// at most one field can be provided. type ClusterUpdate struct { // DesiredAddonsConfig: Configurations for the various addons available // to run in the cluster. @@ -1164,112 +1104,90 @@ type ClusterUpdate struct { // DesiredDatabaseEncryption: Configuration of etcd encryption. DesiredDatabaseEncryption *DatabaseEncryption `json:"desiredDatabaseEncryption,omitempty"` - // DesiredImageType: The desired image type for the node pool. - // NOTE: Set the "desired_node_pool" field as well. + // DesiredDefaultSnatStatus: The desired status of whether to disable + // default sNAT for this cluster. + DesiredDefaultSnatStatus *DefaultSnatStatus `json:"desiredDefaultSnatStatus,omitempty"` + + // DesiredImageType: The desired image type for the node pool. NOTE: Set + // the "desired_node_pool" field as well. DesiredImageType string `json:"desiredImageType,omitempty"` // DesiredIntraNodeVisibilityConfig: The desired config of Intra-node // visibility. DesiredIntraNodeVisibilityConfig *IntraNodeVisibilityConfig `json:"desiredIntraNodeVisibilityConfig,omitempty"` - // DesiredLocations: The desired list of Google Compute - // Engine + // DesiredLocations: The desired list of Google Compute Engine // [zones](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster's nodes should be located. Changing the locations a cluster - // is in - // will result in nodes being either created or removed from the - // cluster, - // depending on whether locations are being added or removed. - // - // This list must always include the cluster's primary zone. + // which the cluster's nodes should be located. Changing the locations a + // cluster is in will result in nodes being either created or removed + // from the cluster, depending on whether locations are being added or + // removed. This list must always include the cluster's primary zone. DesiredLocations []string `json:"desiredLocations,omitempty"` // DesiredLoggingService: The logging service the cluster should use to - // write logs. - // Currently available options: - // - // * `logging.googleapis.com/kubernetes` - The Cloud Logging - // service with a Kubernetes-native resource model - // * `logging.googleapis.com` - The legacy Cloud Logging service (no - // longer - // available as of GKE 1.15). - // * `none` - no logs will be exported from the cluster. - // - // If left as an empty string,`logging.googleapis.com/kubernetes` will - // be - // used for GKE 1.14+ or `logging.googleapis.com` for earlier versions. + // write logs. Currently available options: * + // `logging.googleapis.com/kubernetes` - The Cloud Logging service with + // a Kubernetes-native resource model * `logging.googleapis.com` - The + // legacy Cloud Logging service (no longer available as of GKE 1.15). * + // `none` - no logs will be exported from the cluster. If left as an + // empty string,`logging.googleapis.com/kubernetes` will be used for GKE + // 1.14+ or `logging.googleapis.com` for earlier versions. DesiredLoggingService string `json:"desiredLoggingService,omitempty"` // DesiredMasterAuthorizedNetworksConfig: The desired configuration // options for master authorized networks feature. DesiredMasterAuthorizedNetworksConfig *MasterAuthorizedNetworksConfig `json:"desiredMasterAuthorizedNetworksConfig,omitempty"` - // DesiredMasterVersion: The Kubernetes version to change the master - // to. - // - // Users may specify either explicit versions offered by - // Kubernetes Engine or version aliases, which have the following - // behavior: - // - // - "latest": picks the highest valid Kubernetes version - // - "1.X": picks the highest valid patch+gke.N patch in the 1.X - // version - // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - // - "1.X.Y-gke.N": picks an explicit Kubernetes version - // - "-": picks the default Kubernetes version + // DesiredMasterVersion: The Kubernetes version to change the master to. + // Users may specify either explicit versions offered by Kubernetes + // Engine or version aliases, which have the following behavior: - + // "latest": picks the highest valid Kubernetes version - "1.X": picks + // the highest valid patch+gke.N patch in the 1.X version - "1.X.Y": + // picks the highest valid gke.N patch in the 1.X.Y version - + // "1.X.Y-gke.N": picks an explicit Kubernetes version - "-": picks the + // default Kubernetes version DesiredMasterVersion string `json:"desiredMasterVersion,omitempty"` // DesiredMonitoringService: The monitoring service the cluster should - // use to write metrics. - // Currently available options: - // - // * "monitoring.googleapis.com/kubernetes" - The Cloud - // Monitoring - // service with a Kubernetes-native resource model - // * `monitoring.googleapis.com` - The legacy Cloud Monitoring service - // (no - // longer available as of GKE 1.15). - // * `none` - No metrics will be exported from the cluster. - // - // If left as an empty string,`monitoring.googleapis.com/kubernetes` - // will be - // used for GKE 1.14+ or `monitoring.googleapis.com` for earlier + // use to write metrics. Currently available options: * + // "monitoring.googleapis.com/kubernetes" - The Cloud Monitoring service + // with a Kubernetes-native resource model * `monitoring.googleapis.com` + // - The legacy Cloud Monitoring service (no longer available as of GKE + // 1.15). * `none` - No metrics will be exported from the cluster. If + // left as an empty string,`monitoring.googleapis.com/kubernetes` will + // be used for GKE 1.14+ or `monitoring.googleapis.com` for earlier // versions. DesiredMonitoringService string `json:"desiredMonitoringService,omitempty"` // DesiredNodePoolAutoscaling: Autoscaler configuration for the node - // pool specified in - // desired_node_pool_id. If there is only one pool in the - // cluster and desired_node_pool_id is not provided then - // the change applies to that single node pool. + // pool specified in desired_node_pool_id. If there is only one pool in + // the cluster and desired_node_pool_id is not provided then the change + // applies to that single node pool. DesiredNodePoolAutoscaling *NodePoolAutoscaling `json:"desiredNodePoolAutoscaling,omitempty"` // DesiredNodePoolId: The node pool to be upgraded. This field is - // mandatory if - // "desired_node_version", "desired_image_family" - // or + // mandatory if "desired_node_version", "desired_image_family" or // "desired_node_pool_autoscaling" is specified and there is more than - // one - // node pool on the cluster. + // one node pool on the cluster. DesiredNodePoolId string `json:"desiredNodePoolId,omitempty"` // DesiredNodeVersion: The Kubernetes version to change the nodes to - // (typically an - // upgrade). - // - // Users may specify either explicit versions offered by - // Kubernetes Engine or version aliases, which have the following - // behavior: - // - // - "latest": picks the highest valid Kubernetes version - // - "1.X": picks the highest valid patch+gke.N patch in the 1.X - // version - // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - // - "1.X.Y-gke.N": picks an explicit Kubernetes version - // - "-": picks the Kubernetes master version + // (typically an upgrade). Users may specify either explicit versions + // offered by Kubernetes Engine or version aliases, which have the + // following behavior: - "latest": picks the highest valid Kubernetes + // version - "1.X": picks the highest valid patch+gke.N patch in the 1.X + // version - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y + // version - "1.X.Y-gke.N": picks an explicit Kubernetes version - "-": + // picks the Kubernetes master version DesiredNodeVersion string `json:"desiredNodeVersion,omitempty"` + // DesiredPrivateClusterConfig: The desired private cluster + // configuration. + DesiredPrivateClusterConfig *PrivateClusterConfig `json:"desiredPrivateClusterConfig,omitempty"` + + // DesiredReleaseChannel: The desired release channel configuration. + DesiredReleaseChannel *ReleaseChannel `json:"desiredReleaseChannel,omitempty"` + // DesiredResourceUsageExportConfig: The desired configuration for // exporting resource usage. DesiredResourceUsageExportConfig *ResourceUsageExportConfig `json:"desiredResourceUsageExportConfig,omitempty"` @@ -1311,30 +1229,25 @@ func (s *ClusterUpdate) MarshalJSON() ([]byte, error) { // CompleteIPRotationRequest: CompleteIPRotationRequest moves the // cluster master back into single-IP mode. type CompleteIPRotationRequest struct { - // ClusterId: Deprecated. The name of the cluster. - // This field has been deprecated and replaced by the name field. + // ClusterId: Deprecated. The name of the cluster. This field has been + // deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // Name: The name (project, location, cluster id) of the cluster to - // complete IP - // rotation. Specified in the format + // complete IP rotation. Specified in the format // `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // ProjectId: Deprecated. The Google Developers Console [project ID or // project - // number](https://developers.google.com/console/help/new/#projec - // tnumber). - // This field has been deprecated and replaced by the name field. + // number](https://developers.google.com/console/help/new/#projectnumber) + // . This field has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Deprecated. The name of the Google Compute - // Engine + // Zone: Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -1360,14 +1273,41 @@ func (s *CompleteIPRotationRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ConfigConnectorConfig: Configuration options for the Config Connector +// add-on. +type ConfigConnectorConfig struct { + // Enabled: Whether Cloud Connector is enabled for this cluster. + Enabled bool `json:"enabled,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Enabled") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ConfigConnectorConfig) MarshalJSON() ([]byte, error) { + type NoMethod ConfigConnectorConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ConsumptionMeteringConfig: Parameters for controlling consumption // metering. type ConsumptionMeteringConfig struct { // Enabled: Whether to enable consumption metering for this cluster. If - // enabled, a - // second BigQuery table will be created to hold resource - // consumption - // records. + // enabled, a second BigQuery table will be created to hold resource + // consumption records. Enabled bool `json:"enabled,omitempty"` // ForceSendFields is a list of field names (e.g. "Enabled") to @@ -1395,31 +1335,24 @@ func (s *ConsumptionMeteringConfig) MarshalJSON() ([]byte, error) { // CreateClusterRequest: CreateClusterRequest creates a cluster. type CreateClusterRequest struct { - // Cluster: Required. A - // [cluster - // resource](https://cloud.google.com/container-engine/reference - // /rest/v1/projects.zones.clusters) + // Cluster: Required. A [cluster + // resource](https://cloud.google.com/container-engine/reference/rest/v1/ + // projects.locations.clusters) Cluster *Cluster `json:"cluster,omitempty"` // Parent: The parent (project and location) where the cluster will be - // created. - // Specified in the format `projects/*/locations/*`. + // created. Specified in the format `projects/*/locations/*`. Parent string `json:"parent,omitempty"` // ProjectId: Deprecated. The Google Developers Console [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the parent field. + // project number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the parent field. ProjectId string `json:"projectId,omitempty"` - // Zone: Deprecated. The name of the Google Compute - // Engine + // Zone: Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // parent - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the parent field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "Cluster") to @@ -1448,33 +1381,28 @@ func (s *CreateClusterRequest) MarshalJSON() ([]byte, error) { // CreateNodePoolRequest: CreateNodePoolRequest creates a node pool for // a cluster. type CreateNodePoolRequest struct { - // ClusterId: Deprecated. The name of the cluster. - // This field has been deprecated and replaced by the parent field. + // ClusterId: Deprecated. The name of the cluster. This field has been + // deprecated and replaced by the parent field. ClusterId string `json:"clusterId,omitempty"` // NodePool: Required. The node pool to create. NodePool *NodePool `json:"nodePool,omitempty"` // Parent: The parent (project, location, cluster id) where the node - // pool will be - // created. Specified in the format + // pool will be created. Specified in the format // `projects/*/locations/*/clusters/*`. Parent string `json:"parent,omitempty"` // ProjectId: Deprecated. The Google Developers Console [project ID or // project - // number](https://developers.google.com/console/help/new/#projec - // tnumber). - // This field has been deprecated and replaced by the parent field. + // number](https://developers.google.com/console/help/new/#projectnumber) + // . This field has been deprecated and replaced by the parent field. ProjectId string `json:"projectId,omitempty"` - // Zone: Deprecated. The name of the Google Compute - // Engine + // Zone: Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // parent - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the parent field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -1504,18 +1432,15 @@ func (s *CreateNodePoolRequest) MarshalJSON() ([]byte, error) { // operations. type DailyMaintenanceWindow struct { // Duration: [Output only] Duration of the time window, automatically - // chosen to be - // smallest possible in the given scenario. - // Duration will be in - // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) - // format "PTnHnMnS". + // chosen to be smallest possible in the given scenario. Duration will + // be in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) format + // "PTnHnMnS". Duration string `json:"duration,omitempty"` // StartTime: Time within the maintenance window to start the - // maintenance operations. - // Time format should be in - // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) - // format "HH:MM", where HH : [00-23] and MM : [00-59] GMT. + // maintenance operations. Time format should be in + // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) format "HH:MM", where + // HH : [00-23] and MM : [00-59] GMT. StartTime string `json:"startTime,omitempty"` // ForceSendFields is a list of field names (e.g. "Duration") to @@ -1544,8 +1469,7 @@ func (s *DailyMaintenanceWindow) MarshalJSON() ([]byte, error) { // DatabaseEncryption: Configuration of etcd encryption. type DatabaseEncryption struct { // KeyName: Name of CloudKMS key to use for the encryption of secrets in - // etcd. - // Ex. + // etcd. Ex. // projects/my-project/locations/global/keyRings/my-ring/cryptoKeys/my-ke // y KeyName string `json:"keyName,omitempty"` @@ -1556,8 +1480,8 @@ type DatabaseEncryption struct { // "UNKNOWN" - Should never be set // "ENCRYPTED" - Secrets in etcd are encrypted. // "DECRYPTED" - Secrets in etcd are stored in plain text (at etcd - // level) - this is - // unrelated to Compute Engine level full disk encryption. + // level) - this is unrelated to Compute Engine level full disk + // encryption. State string `json:"state,omitempty"` // ForceSendFields is a list of field names (e.g. "KeyName") to @@ -1583,18 +1507,69 @@ func (s *DatabaseEncryption) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DefaultSnatStatus: DefaultSnatStatus contains the desired state of +// whether default sNAT should be disabled on the cluster. +type DefaultSnatStatus struct { + // Disabled: Disables cluster default sNAT rules. + Disabled bool `json:"disabled,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Disabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Disabled") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DefaultSnatStatus) MarshalJSON() ([]byte, error) { + type NoMethod DefaultSnatStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// DnsCacheConfig: Configuration for NodeLocal DNSCache +type DnsCacheConfig struct { + // Enabled: Whether NodeLocal DNSCache is enabled for this cluster. + Enabled bool `json:"enabled,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Enabled") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DnsCacheConfig) MarshalJSON() ([]byte, error) { + type NoMethod DnsCacheConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Empty: A generic empty message that you can re-use to avoid defining -// duplicated -// empty messages in your APIs. A typical example is to use it as the -// request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. +// duplicated empty messages in your APIs. A typical example is to use +// it as the request or the response type of an API method. For +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } The JSON representation for `Empty` is +// empty JSON object `{}`. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -1605,13 +1580,11 @@ type Empty struct { // Key Set as specififed in rfc 7517 type GetJSONWebKeysResponse struct { // CacheHeader: OnePlatform automatically extracts this field and uses - // it to set the HTTP - // Cache-Control header. + // it to set the HTTP Cache-Control header. CacheHeader *HttpCacheControlResponseHeader `json:"cacheHeader,omitempty"` // Keys: The public component of the keys used by the cluster to sign - // token - // requests. + // token requests. Keys []*Jwk `json:"keys,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1642,12 +1615,11 @@ func (s *GetJSONWebKeysResponse) MarshalJSON() ([]byte, error) { } // GetOpenIDConfigResponse: GetOpenIDConfigResponse is an OIDC discovery -// document for the cluster. -// See the OpenID Connect Discovery 1.0 specification for details. +// document for the cluster. See the OpenID Connect Discovery 1.0 +// specification for details. type GetOpenIDConfigResponse struct { // CacheHeader: OnePlatform automatically extracts this field and uses - // it to set the HTTP - // Cache-Control header. + // it to set the HTTP Cache-Control header. CacheHeader *HttpCacheControlResponseHeader `json:"cacheHeader,omitempty"` // ClaimsSupported: Supported claims. @@ -1700,16 +1672,13 @@ func (s *GetOpenIDConfigResponse) MarshalJSON() ([]byte, error) { } // HorizontalPodAutoscaling: Configuration options for the horizontal -// pod autoscaling feature, which -// increases or decreases the number of replica pods a replication -// controller -// has based on the resource usage of the existing pods. +// pod autoscaling feature, which increases or decreases the number of +// replica pods a replication controller has based on the resource usage +// of the existing pods. type HorizontalPodAutoscaling struct { // Disabled: Whether the Horizontal Pod Autoscaling feature is enabled - // in the cluster. - // When enabled, it ensures that metrics are collected into - // Stackdriver - // Monitoring. + // in the cluster. When enabled, it ensures that metrics are collected + // into Stackdriver Monitoring. Disabled bool `json:"disabled,omitempty"` // ForceSendFields is a list of field names (e.g. "Disabled") to @@ -1771,15 +1740,12 @@ func (s *HttpCacheControlResponseHeader) MarshalJSON() ([]byte, error) { } // HttpLoadBalancing: Configuration options for the HTTP (L7) load -// balancing controller addon, -// which makes it easy to set up HTTP load balancers for services in a -// cluster. +// balancing controller addon, which makes it easy to set up HTTP load +// balancers for services in a cluster. type HttpLoadBalancing struct { // Disabled: Whether the HTTP Load Balancing controller is enabled in - // the cluster. - // When enabled, it runs a small pod in the cluster that manages the - // load - // balancers. + // the cluster. When enabled, it runs a small pod in the cluster that + // manages the load balancers. Disabled bool `json:"disabled,omitempty"` // ForceSendFields is a list of field names (e.g. "Disabled") to @@ -1813,68 +1779,41 @@ type IPAllocationPolicy struct { ClusterIpv4Cidr string `json:"clusterIpv4Cidr,omitempty"` // ClusterIpv4CidrBlock: The IP address range for the cluster pod IPs. - // If this field is set, then - // `cluster.cluster_ipv4_cidr` must be left blank. - // - // This field is only applicable when `use_ip_aliases` is true. - // - // Set to blank to have a range chosen with the default size. - // - // Set to /netmask (e.g. `/14`) to have a range chosen with a - // specific - // netmask. - // - // Set to - // a + // If this field is set, then `cluster.cluster_ipv4_cidr` must be left + // blank. This field is only applicable when `use_ip_aliases` is true. + // Set to blank to have a range chosen with the default size. Set to + // /netmask (e.g. `/14`) to have a range chosen with a specific netmask. + // Set to a // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks - // (e.g. - // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific - // range - // to use. + // (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a + // specific range to use. ClusterIpv4CidrBlock string `json:"clusterIpv4CidrBlock,omitempty"` // ClusterSecondaryRangeName: The name of the secondary range to be used - // for the cluster CIDR - // block. The secondary range will be used for pod IP - // addresses. This must be an existing secondary range associated - // with the cluster subnetwork. - // - // This field is only applicable with use_ip_aliases is true - // and - // create_subnetwork is false. + // for the cluster CIDR block. The secondary range will be used for pod + // IP addresses. This must be an existing secondary range associated + // with the cluster subnetwork. This field is only applicable with + // use_ip_aliases is true and create_subnetwork is false. ClusterSecondaryRangeName string `json:"clusterSecondaryRangeName,omitempty"` // CreateSubnetwork: Whether a new subnetwork will be created - // automatically for the cluster. - // - // This field is only applicable when `use_ip_aliases` is true. + // automatically for the cluster. This field is only applicable when + // `use_ip_aliases` is true. CreateSubnetwork bool `json:"createSubnetwork,omitempty"` // NodeIpv4Cidr: This field is deprecated, use node_ipv4_cidr_block. NodeIpv4Cidr string `json:"nodeIpv4Cidr,omitempty"` // NodeIpv4CidrBlock: The IP address range of the instance IPs in this - // cluster. - // - // This is applicable only if `create_subnetwork` is true. - // - // Set to blank to have a range chosen with the default size. - // - // Set to /netmask (e.g. `/14`) to have a range chosen with a - // specific - // netmask. - // - // Set to - // a + // cluster. This is applicable only if `create_subnetwork` is true. Set + // to blank to have a range chosen with the default size. Set to + // /netmask (e.g. `/14`) to have a range chosen with a specific netmask. + // Set to a // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks - // (e.g. - // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific - // range - // to use. + // (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a + // specific range to use. NodeIpv4CidrBlock string `json:"nodeIpv4CidrBlock,omitempty"` // ServicesIpv4Cidr: This field is deprecated, use @@ -1882,73 +1821,53 @@ type IPAllocationPolicy struct { ServicesIpv4Cidr string `json:"servicesIpv4Cidr,omitempty"` // ServicesIpv4CidrBlock: The IP address range of the services IPs in - // this cluster. If blank, a range - // will be automatically chosen with the default size. - // - // This field is only applicable when `use_ip_aliases` is true. - // - // Set to blank to have a range chosen with the default size. - // - // Set to /netmask (e.g. `/14`) to have a range chosen with a - // specific - // netmask. - // - // Set to - // a + // this cluster. If blank, a range will be automatically chosen with the + // default size. This field is only applicable when `use_ip_aliases` is + // true. Set to blank to have a range chosen with the default size. Set + // to /netmask (e.g. `/14`) to have a range chosen with a specific + // netmask. Set to a // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks - // (e.g. - // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific - // range - // to use. + // (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a + // specific range to use. ServicesIpv4CidrBlock string `json:"servicesIpv4CidrBlock,omitempty"` // ServicesSecondaryRangeName: The name of the secondary range to be - // used as for the services - // CIDR block. The secondary range will be used for service - // ClusterIPs. This must be an existing secondary range associated - // with the cluster subnetwork. - // - // This field is only applicable with use_ip_aliases is true - // and - // create_subnetwork is false. + // used as for the services CIDR block. The secondary range will be used + // for service ClusterIPs. This must be an existing secondary range + // associated with the cluster subnetwork. This field is only applicable + // with use_ip_aliases is true and create_subnetwork is false. ServicesSecondaryRangeName string `json:"servicesSecondaryRangeName,omitempty"` // SubnetworkName: A custom subnetwork name to be used if - // `create_subnetwork` is true. If - // this field is empty, then an automatic name will be chosen for the - // new - // subnetwork. + // `create_subnetwork` is true. If this field is empty, then an + // automatic name will be chosen for the new subnetwork. SubnetworkName string `json:"subnetworkName,omitempty"` // TpuIpv4CidrBlock: The IP address range of the Cloud TPUs in this - // cluster. If unspecified, a - // range will be automatically chosen with the default size. - // - // This field is only applicable when `use_ip_aliases` is true. - // - // If unspecified, the range will use the default size. - // - // Set to /netmask (e.g. `/14`) to have a range chosen with a - // specific - // netmask. - // - // Set to - // a + // cluster. If unspecified, a range will be automatically chosen with + // the default size. This field is only applicable when `use_ip_aliases` + // is true. If unspecified, the range will use the default size. Set to + // /netmask (e.g. `/14`) to have a range chosen with a specific netmask. + // Set to a // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks - // (e.g. - // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific - // range - // to use. + // (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a + // specific range to use. TpuIpv4CidrBlock string `json:"tpuIpv4CidrBlock,omitempty"` // UseIpAliases: Whether alias IPs will be used for pod IPs in the - // cluster. + // cluster. This is used in conjunction with use_routes. It cannot be + // true if use_routes is true. If both use_ip_aliases and use_routes are + // false, then the server picks the default IP allocation mode UseIpAliases bool `json:"useIpAliases,omitempty"` + // UseRoutes: Whether routes will be used for pod IPs in the cluster. + // This is used in conjunction with use_ip_aliases. It cannot be true if + // use_ip_aliases is true. If both use_ip_aliases and use_routes are + // false, then the server picks the default IP allocation mode + UseRoutes bool `json:"useRoutes,omitempty"` + // ForceSendFields is a list of field names (e.g. "ClusterIpv4Cidr") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -1974,8 +1893,7 @@ func (s *IPAllocationPolicy) MarshalJSON() ([]byte, error) { } // IntraNodeVisibilityConfig: IntraNodeVisibilityConfig contains the -// desired config of the intra-node -// visibility on this cluster. +// desired config of the intra-node visibility on this cluster. type IntraNodeVisibilityConfig struct { // Enabled: Enables intra node visibility for this cluster. Enabled bool `json:"enabled,omitempty"` @@ -2085,16 +2003,12 @@ func (s *KubernetesDashboard) MarshalJSON() ([]byte, error) { } // LegacyAbac: Configuration for the legacy Attribute Based Access -// Control authorization -// mode. +// Control authorization mode. type LegacyAbac struct { // Enabled: Whether the ABAC authorizer is enabled for this cluster. - // When enabled, - // identities in the system, including service accounts, nodes, - // and - // controllers, will have statically granted permissions beyond - // those - // provided by the RBAC configuration or IAM. + // When enabled, identities in the system, including service accounts, + // nodes, and controllers, will have statically granted permissions + // beyond those provided by the RBAC configuration or IAM. Enabled bool `json:"enabled,omitempty"` // ForceSendFields is a list of field names (e.g. "Enabled") to @@ -2123,14 +2037,12 @@ func (s *LegacyAbac) MarshalJSON() ([]byte, error) { // ListClustersResponse: ListClustersResponse is the result of // ListClustersRequest. type ListClustersResponse struct { - // Clusters: A list of clusters in the project in the specified zone, - // or + // Clusters: A list of clusters in the project in the specified zone, or // across all ones. Clusters []*Cluster `json:"clusters,omitempty"` // MissingZones: If any zones are listed here, the list of clusters - // returned - // may be missing those zones. + // returned may be missing those zones. MissingZones []string `json:"missingZones,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2197,8 +2109,7 @@ func (s *ListNodePoolsResponse) MarshalJSON() ([]byte, error) { // ListOperationsRequest. type ListOperationsResponse struct { // MissingZones: If any zones are listed here, the list of operations - // returned - // may be missing the operations from those zones. + // returned may be missing the operations from those zones. MissingZones []string `json:"missingZones,omitempty"` // Operations: A list of operations in the project in the specified @@ -2233,17 +2144,13 @@ func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { } // ListUsableSubnetworksResponse: ListUsableSubnetworksResponse is the -// response of -// ListUsableSubnetworksRequest. +// response of ListUsableSubnetworksRequest. type ListUsableSubnetworksResponse struct { // NextPageToken: This token allows you to get the next page of results - // for list requests. - // If the number of results is larger than `page_size`, use - // the - // `next_page_token` as a value for the query parameter `page_token` in - // the - // next request. The value will become empty when there are no more - // pages. + // for list requests. If the number of results is larger than + // `page_size`, use the `next_page_token` as a value for the query + // parameter `page_token` in the next request. The value will become + // empty when there are no more pages. NextPageToken string `json:"nextPageToken,omitempty"` // Subnetworks: A list of usable subnetworks in the specified network @@ -2281,14 +2188,11 @@ func (s *ListUsableSubnetworksResponse) MarshalJSON() ([]byte, error) { // to be used for the cluster. type MaintenancePolicy struct { // ResourceVersion: A hash identifying the version of this policy, so - // that updates to fields of - // the policy won't accidentally undo intermediate changes (and so that - // users - // of the API unaware of some fields won't accidentally remove other - // fields). - // Make a get() request to the cluster to get the - // current - // resource version and include it with requests to set the policy. + // that updates to fields of the policy won't accidentally undo + // intermediate changes (and so that users of the API unaware of some + // fields won't accidentally remove other fields). Make a `get()` + // request to the cluster to get the current resource version and + // include it with requests to set the policy. ResourceVersion string `json:"resourceVersion,omitempty"` // Window: Specifies the maintenance window in which maintenance may be @@ -2327,15 +2231,13 @@ type MaintenanceWindow struct { DailyMaintenanceWindow *DailyMaintenanceWindow `json:"dailyMaintenanceWindow,omitempty"` // MaintenanceExclusions: Exceptions to maintenance window. - // Non-emergency maintenance should not - // occur in these windows. + // Non-emergency maintenance should not occur in these windows. MaintenanceExclusions map[string]TimeWindow `json:"maintenanceExclusions,omitempty"` // RecurringWindow: RecurringWindow specifies some number of recurring - // time periods for - // maintenance to occur. The time windows may be overlapping. If - // no - // maintenance windows are set, maintenance can occur at any time. + // time periods for maintenance to occur. The time windows may be + // overlapping. If no maintenance windows are set, maintenance can occur + // at any time. RecurringWindow *RecurringTimeWindow `json:"recurringWindow,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -2364,47 +2266,43 @@ func (s *MaintenanceWindow) MarshalJSON() ([]byte, error) { } // MasterAuth: The authentication information for accessing the master -// endpoint. -// Authentication can be done using HTTP basic auth or using -// client -// certificates. +// endpoint. Authentication can be done using HTTP basic auth or using +// client certificates. type MasterAuth struct { // ClientCertificate: [Output only] Base64-encoded public certificate - // used by clients to - // authenticate to the cluster endpoint. + // used by clients to authenticate to the cluster endpoint. ClientCertificate string `json:"clientCertificate,omitempty"` // ClientCertificateConfig: Configuration for client certificate - // authentication on the cluster. For - // clusters before v1.12, if no configuration is specified, a - // client - // certificate is issued. + // authentication on the cluster. For clusters before v1.12, if no + // configuration is specified, a client certificate is issued. ClientCertificateConfig *ClientCertificateConfig `json:"clientCertificateConfig,omitempty"` // ClientKey: [Output only] Base64-encoded private key used by clients - // to authenticate - // to the cluster endpoint. + // to authenticate to the cluster endpoint. ClientKey string `json:"clientKey,omitempty"` // ClusterCaCertificate: [Output only] Base64-encoded public certificate - // that is the root of - // trust for the cluster. + // that is the root of trust for the cluster. ClusterCaCertificate string `json:"clusterCaCertificate,omitempty"` // Password: The password to use for HTTP basic authentication to the - // master endpoint. - // Because the master endpoint is open to the Internet, you should - // create a - // strong password. If a password is provided for cluster creation, - // username - // must be non-empty. + // master endpoint. Because the master endpoint is open to the Internet, + // you should create a strong password. If a password is provided for + // cluster creation, username must be non-empty. Warning: basic + // authentication is deprecated, and will be removed in GKE control + // plane versions 1.19 and newer. For a list of recommended + // authentication methods, see: + // https://cloud.google.com/kubernetes-engine/docs/how-to/api-server-authentication Password string `json:"password,omitempty"` // Username: The username to use for HTTP basic authentication to the - // master endpoint. - // For clusters v1.6.0 and later, basic authentication can be disabled - // by - // leaving username unspecified (or setting it to the empty string). + // master endpoint. For clusters v1.6.0 and later, basic authentication + // can be disabled by leaving username unspecified (or setting it to the + // empty string). Warning: basic authentication is deprecated, and will + // be removed in GKE control plane versions 1.19 and newer. For a list + // of recommended authentication methods, see: + // https://cloud.google.com/kubernetes-engine/docs/how-to/api-server-authentication Username string `json:"username,omitempty"` // ForceSendFields is a list of field names (e.g. "ClientCertificate") @@ -2432,16 +2330,13 @@ func (s *MasterAuth) MarshalJSON() ([]byte, error) { } // MasterAuthorizedNetworksConfig: Configuration options for the master -// authorized networks feature. Enabled -// master authorized networks will disallow all external traffic to -// access -// Kubernetes master through HTTPS except traffic from the given CIDR -// blocks, -// Google Compute Engine Public IPs and Google Prod IPs. +// authorized networks feature. Enabled master authorized networks will +// disallow all external traffic to access Kubernetes master through +// HTTPS except traffic from the given CIDR blocks, Google Compute +// Engine Public IPs and Google Prod IPs. type MasterAuthorizedNetworksConfig struct { // CidrBlocks: cidr_blocks define up to 50 external networks that could - // access - // Kubernetes master through HTTPS. + // access Kubernetes master through HTTPS. CidrBlocks []*CidrBlock `json:"cidrBlocks,omitempty"` // Enabled: Whether or not master authorized networks is enabled. @@ -2554,45 +2449,45 @@ func (s *Metric) UnmarshalJSON(data []byte) error { // NetworkConfig: NetworkConfig reports the relative names of network & // subnetwork. type NetworkConfig struct { + // DefaultSnatStatus: Whether the cluster disables default in-node sNAT + // rules. In-node sNAT rules will be disabled when default_snat_status + // is disabled. When disabled is set to false, default IP masquerade + // rules will be applied to the nodes to prevent sNAT on cluster + // internal traffic. + DefaultSnatStatus *DefaultSnatStatus `json:"defaultSnatStatus,omitempty"` + // EnableIntraNodeVisibility: Whether Intra-node visibility is enabled - // for this cluster. - // This makes same node pod to pod traffic visible for VPC network. + // for this cluster. This makes same node pod to pod traffic visible for + // VPC network. EnableIntraNodeVisibility bool `json:"enableIntraNodeVisibility,omitempty"` - // Network: Output only. The relative name of the Google Compute - // Engine - // network(https://cloud.google.com/compute/docs/networks-and-fire - // walls#networks) - // to which the cluster is connected. - // Example: + // Network: Output only. The relative name of the Google Compute Engine + // network(https://cloud.google.com/compute/docs/networks-and-firewalls#n + // etworks) to which the cluster is connected. Example: // projects/my-project/global/networks/my-network Network string `json:"network,omitempty"` // Subnetwork: Output only. The relative name of the Google Compute - // Engine - // [subnetwork](https://cloud.google.com/compute/docs/vpc) to which - // the - // cluster is connected. - // Example: + // Engine [subnetwork](https://cloud.google.com/compute/docs/vpc) to + // which the cluster is connected. Example: // projects/my-project/regions/us-central1/subnetworks/my-subnet Subnetwork string `json:"subnetwork,omitempty"` - // ForceSendFields is a list of field names (e.g. - // "EnableIntraNodeVisibility") to unconditionally include in API - // requests. By default, fields with empty values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. + // ForceSendFields is a list of field names (e.g. "DefaultSnatStatus") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. - // "EnableIntraNodeVisibility") to include in API requests with the JSON - // null value. By default, fields with empty values are omitted from API - // requests. However, any field with an empty value appearing in - // NullFields will be sent to the server as null. It is an error if a - // field in this list has a non-empty value. This may be used to include - // null fields in Patch requests. + // NullFields is a list of field names (e.g. "DefaultSnatStatus") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -2602,10 +2497,8 @@ func (s *NetworkConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkPolicy: Configuration options for the NetworkPolicy -// feature. -// https://kubernetes.io/docs/concepts/services-networking/netwo -// rkpolicies/ +// NetworkPolicy: Configuration options for the NetworkPolicy feature. +// https://kubernetes.io/docs/concepts/services-networking/networkpolicies/ type NetworkPolicy struct { // Enabled: Whether network policy is enabled on the cluster. Enabled bool `json:"enabled,omitempty"` @@ -2641,10 +2534,8 @@ func (s *NetworkPolicy) MarshalJSON() ([]byte, error) { } // NetworkPolicyConfig: Configuration for NetworkPolicy. This only -// tracks whether the addon -// is enabled or not on the Master, it does not track whether network -// policy -// is enabled for the nodes. +// tracks whether the addon is enabled or not on the Master, it does not +// track whether network policy is enabled for the nodes. type NetworkPolicyConfig struct { // Disabled: Whether NetworkPolicy is enabled for this cluster. Disabled bool `json:"disabled,omitempty"` @@ -2675,191 +2566,134 @@ func (s *NetworkPolicyConfig) MarshalJSON() ([]byte, error) { // NodeConfig: Parameters that describe the nodes in a cluster. type NodeConfig struct { // Accelerators: A list of hardware accelerators to be attached to each - // node. - // See https://cloud.google.com/compute/docs/gpus for more information - // about - // support for GPUs. + // node. See https://cloud.google.com/compute/docs/gpus for more + // information about support for GPUs. Accelerators []*AcceleratorConfig `json:"accelerators,omitempty"` - // DiskSizeGb: Size of the disk attached to each node, specified in - // GB. - // The smallest allowed disk size is 10GB. - // - // If unspecified, the default disk size is 100GB. + // BootDiskKmsKey: The Customer Managed Encryption Key used to encrypt + // the boot disk attached to each node in the node pool. This should be + // of the form + // projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cr + // yptoKeys/[KEY_NAME]. For more information about protecting resources + // with Cloud KMS Keys please see: + // https://cloud.google.com/compute/docs/disks/customer-managed-encryption + BootDiskKmsKey string `json:"bootDiskKmsKey,omitempty"` + + // DiskSizeGb: Size of the disk attached to each node, specified in GB. + // The smallest allowed disk size is 10GB. If unspecified, the default + // disk size is 100GB. DiskSizeGb int64 `json:"diskSizeGb,omitempty"` // DiskType: Type of the disk attached to each node (e.g. 'pd-standard' - // or 'pd-ssd') - // - // If unspecified, the default disk type is 'pd-standard' + // or 'pd-ssd') If unspecified, the default disk type is 'pd-standard' DiskType string `json:"diskType,omitempty"` // ImageType: The image type to use for this node. Note that for a given - // image type, - // the latest version of it will be used. + // image type, the latest version of it will be used. ImageType string `json:"imageType,omitempty"` // Labels: The map of Kubernetes labels (key/value pairs) to be applied - // to each node. - // These will added in addition to any default label(s) that - // Kubernetes may apply to the node. - // In case of conflict in label keys, the applied set may differ - // depending on - // the Kubernetes version -- it's best to assume the behavior is - // undefined - // and conflicts should be avoided. - // For more information, including usage and the valid values, - // see: - // https://kubernetes.io/docs/concepts/overview/working-with-objects - // /labels/ + // to each node. These will added in addition to any default label(s) + // that Kubernetes may apply to the node. In case of conflict in label + // keys, the applied set may differ depending on the Kubernetes version + // -- it's best to assume the behavior is undefined and conflicts should + // be avoided. For more information, including usage and the valid + // values, see: + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ Labels map[string]string `json:"labels,omitempty"` // LocalSsdCount: The number of local SSD disks to be attached to the - // node. - // - // The limit for this value is dependent upon the maximum number - // of - // disks available on a machine per zone. - // See: - // https://cloud.google.com/compute/docs/disks/local-ssd - // for more information. + // node. The limit for this value is dependent upon the maximum number + // of disks available on a machine per zone. See: + // https://cloud.google.com/compute/docs/disks/local-ssd for more + // information. LocalSsdCount int64 `json:"localSsdCount,omitempty"` - // MachineType: The name of a Google Compute Engine - // [machine - // type](https://cloud.google.com/compute/docs/machine-types) - // (e.g. - // `n1-standard-1`). - // - // If unspecified, the default machine type is - // `n1-standard-1`. + // MachineType: The name of a Google Compute Engine [machine + // type](https://cloud.google.com/compute/docs/machine-types) If + // unspecified, the default machine type is `e2-medium`. MachineType string `json:"machineType,omitempty"` // Metadata: The metadata key/value pairs assigned to instances in the - // cluster. - // - // Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 - // bytes - // in length. These are reflected as part of a URL in the metadata - // server. - // Additionally, to avoid ambiguity, keys must not conflict with any - // other - // metadata keys for the project or be one of the reserved keys: - // "cluster-location" - // "cluster-name" - // "cluster-uid" - // "configure-sh" - // "containerd-configure-sh" - // "enable-os-login" - // "gci-ensure-gke-docker" - // "gci-metrics-enabled" - // "gci-update-strategy" - // "instance-template" - // "kube-env" - // "startup-script" - // "user-data" - // "disable-address-manager" - // "windows-startup-script-ps1" - // "common-psm1" - // "k8s-node-setup-psm1" - // "install-ssh-psm1" - // "user-profile-psm1" - // "serial-port-logging-enable" - // - // Values are free-form strings, and only have meaning as interpreted - // by - // the image running in the instance. The only restriction placed on - // them is - // that each value's size must be less than or equal to 32 KB. - // - // The total size of all keys and values must be less than 512 KB. + // cluster. Keys must conform to the regexp `[a-zA-Z0-9-_]+` and be less + // than 128 bytes in length. These are reflected as part of a URL in the + // metadata server. Additionally, to avoid ambiguity, keys must not + // conflict with any other metadata keys for the project or be one of + // the reserved keys: - "cluster-location" - "cluster-name" - + // "cluster-uid" - "configure-sh" - "containerd-configure-sh" - + // "enable-os-login" - "gci-ensure-gke-docker" - "gci-metrics-enabled" - + // "gci-update-strategy" - "instance-template" - "kube-env" - + // "startup-script" - "user-data" - "disable-address-manager" - + // "windows-startup-script-ps1" - "common-psm1" - "k8s-node-setup-psm1" + // - "install-ssh-psm1" - "user-profile-psm1" - + // "serial-port-logging-enable" Values are free-form strings, and only + // have meaning as interpreted by the image running in the instance. The + // only restriction placed on them is that each value's size must be + // less than or equal to 32 KB. The total size of all keys and values + // must be less than 512 KB. Metadata map[string]string `json:"metadata,omitempty"` // MinCpuPlatform: Minimum CPU platform to be used by this instance. The - // instance may be - // scheduled on the specified or newer CPU platform. Applicable values - // are the - // friendly names of CPU platforms, such as - // minCpuPlatform: "Intel Haswell" - // or - // minCpuPlatform: "Intel Sandy Bridge". For - // more - // information, read [how to specify min - // CPU - // platform](https://cloud.google.com/compute/docs/instances/specify- - // min-cpu-platform) + // instance may be scheduled on the specified or newer CPU platform. + // Applicable values are the friendly names of CPU platforms, such as + // `minCpuPlatform: "Intel Haswell" or `minCpuPlatform: "Intel Sandy + // Bridge". For more information, read [how to specify min CPU + // platform](https://cloud.google.com/compute/docs/instances/specify-min- + // cpu-platform) MinCpuPlatform string `json:"minCpuPlatform,omitempty"` + // NodeGroup: Setting this field will assign instances of this pool to + // run on the specified node group. This is useful for running workloads + // on [sole tenant + // nodes](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes). + NodeGroup string `json:"nodeGroup,omitempty"` + // OauthScopes: The set of Google API scopes to be made available on all - // of the - // node VMs under the "default" service account. - // - // The following scopes are recommended, but not required, and by - // default are - // not included: - // - // * `https://www.googleapis.com/auth/compute` is required for - // mounting - // persistent storage on your nodes. - // * `https://www.googleapis.com/auth/devstorage.read_only` is required - // for - // communicating with **gcr.io** - // (the [Google - // Container - // Registry](https://cloud.google.com/container-registry/)). - // - // I - // f unspecified, no scopes are added, unless Cloud Logging or - // Cloud + // of the node VMs under the "default" service account. The following + // scopes are recommended, but not required, and by default are not + // included: * `https://www.googleapis.com/auth/compute` is required for + // mounting persistent storage on your nodes. * + // `https://www.googleapis.com/auth/devstorage.read_only` is required + // for communicating with **gcr.io** (the [Google Container + // Registry](https://cloud.google.com/container-registry/)). If + // unspecified, no scopes are added, unless Cloud Logging or Cloud // Monitoring are enabled, in which case their required scopes will be // added. OauthScopes []string `json:"oauthScopes,omitempty"` // Preemptible: Whether the nodes are created as preemptible VM - // instances. - // See: - // https://cloud.google.com/compute/docs/instances/preemptible for - // more + // instances. See: + // https://cloud.google.com/compute/docs/instances/preemptible for more // information about preemptible VM instances. Preemptible bool `json:"preemptible,omitempty"` // ReservationAffinity: The optional reservation affinity. Setting this - // field will apply - // the specified [Zonal - // Compute - // Reservation](https://cloud.google.com/compute/docs/instances/r - // eserving-zonal-resources) - // to this node pool. + // field will apply the specified [Zonal Compute + // Reservation](https://cloud.google.com/compute/docs/instances/reserving + // -zonal-resources) to this node pool. ReservationAffinity *ReservationAffinity `json:"reservationAffinity,omitempty"` // SandboxConfig: Sandbox configuration for this node. SandboxConfig *SandboxConfig `json:"sandboxConfig,omitempty"` // ServiceAccount: The Google Cloud Platform Service Account to be used - // by the node VMs. - // Specify the email address of the Service Account; otherwise, if no - // Service - // Account is specified, the "default" service account is used. + // by the node VMs. Specify the email address of the Service Account; + // otherwise, if no Service Account is specified, the "default" service + // account is used. ServiceAccount string `json:"serviceAccount,omitempty"` // ShieldedInstanceConfig: Shielded Instance options. ShieldedInstanceConfig *ShieldedInstanceConfig `json:"shieldedInstanceConfig,omitempty"` // Tags: The list of instance tags applied to all nodes. Tags are used - // to identify - // valid sources or targets for network firewalls and are specified - // by - // the client during cluster or node pool creation. Each tag within the - // list - // must comply with RFC1035. + // to identify valid sources or targets for network firewalls and are + // specified by the client during cluster or node pool creation. Each + // tag within the list must comply with RFC1035. Tags []string `json:"tags,omitempty"` - // Taints: List of kubernetes taints to be applied to each node. - // - // For more information, including usage and the valid values, - // see: - // https://kubernetes.io/docs/concepts/configuration/taint-and-toler - // ation/ + // Taints: List of kubernetes taints to be applied to each node. For + // more information, including usage and the valid values, see: + // https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ Taints []*NodeTaint `json:"taints,omitempty"` // WorkloadMetadataConfig: The workload metadata configuration for this @@ -2890,23 +2724,18 @@ func (s *NodeConfig) MarshalJSON() ([]byte, error) { } // NodeManagement: NodeManagement defines the set of node management -// services turned on for the -// node pool. +// services turned on for the node pool. type NodeManagement struct { // AutoRepair: A flag that specifies whether the node auto-repair is - // enabled for the node - // pool. If enabled, the nodes in this node pool will be monitored and, - // if - // they fail health checks too many times, an automatic repair action - // will be - // triggered. + // enabled for the node pool. If enabled, the nodes in this node pool + // will be monitored and, if they fail health checks too many times, an + // automatic repair action will be triggered. AutoRepair bool `json:"autoRepair,omitempty"` // AutoUpgrade: A flag that specifies whether node auto-upgrade is - // enabled for the node - // pool. If enabled, node auto-upgrade helps keep the nodes in your node - // pool - // up to date with the latest release version of Kubernetes. + // enabled for the node pool. If enabled, node auto-upgrade helps keep + // the nodes in your node pool up to date with the latest release + // version of Kubernetes. AutoUpgrade bool `json:"autoUpgrade,omitempty"` // UpgradeOptions: Specifies the Auto Upgrade knobs for the node pool. @@ -2936,20 +2765,14 @@ func (s *NodeManagement) MarshalJSON() ([]byte, error) { } // NodePool: NodePool contains the name and configuration for a -// cluster's node pool. -// Node pools are a set of nodes (i.e. VM's), with a common -// configuration and -// specification, under the control of the cluster master. They may have -// a set -// of Kubernetes labels applied to them, which may be used to reference -// them -// during pod scheduling. They may also be resized up or down, to -// accommodate -// the workload. +// cluster's node pool. Node pools are a set of nodes (i.e. VM's), with +// a common configuration and specification, under the control of the +// cluster master. They may have a set of Kubernetes labels applied to +// them, which may be used to reference them during pod scheduling. They +// may also be resized up or down, to accommodate the workload. type NodePool struct { // Autoscaling: Autoscaler configuration for this NodePool. Autoscaler - // is enabled - // only if a valid configuration is present. + // is enabled only if a valid configuration is present. Autoscaling *NodePoolAutoscaling `json:"autoscaling,omitempty"` // Conditions: Which conditions caused the current node pool state. @@ -2959,34 +2782,28 @@ type NodePool struct { Config *NodeConfig `json:"config,omitempty"` // InitialNodeCount: The initial node count for the pool. You must - // ensure that your - // Compute Engine resource - // quota - // is sufficient for this number of instances. You must also have - // available - // firewall and routes quota. + // ensure that your Compute Engine [resource + // quota](https://cloud.google.com/compute/quotas) is sufficient for + // this number of instances. You must also have available firewall and + // routes quota. InitialNodeCount int64 `json:"initialNodeCount,omitempty"` // InstanceGroupUrls: [Output only] The resource URLs of the [managed // instance - // groups](https://cloud.google.com/compute/docs/instance-groups - // /creating-groups-of-managed-instances) - // associated with this node pool. + // groups](https://cloud.google.com/compute/docs/instance-groups/creating + // -groups-of-managed-instances) associated with this node pool. InstanceGroupUrls []string `json:"instanceGroupUrls,omitempty"` - // Locations: The list of Google Compute - // Engine + // Locations: The list of Google Compute Engine // [zones](https://cloud.google.com/compute/docs/zones#available) in - // which the - // NodePool's nodes should be located. + // which the NodePool's nodes should be located. Locations []string `json:"locations,omitempty"` // Management: NodeManagement configuration for this NodePool. Management *NodeManagement `json:"management,omitempty"` // MaxPodsConstraint: The constraint on the maximum number of pods that - // can be run - // simultaneously on a node in the node pool. + // can be run simultaneously on a node in the node pool. MaxPodsConstraint *MaxPodsConstraint `json:"maxPodsConstraint,omitempty"` // Name: The name of the node pool. @@ -3006,29 +2823,22 @@ type NodePool struct { // "PROVISIONING" - The PROVISIONING state indicates the node pool is // being created. // "RUNNING" - The RUNNING state indicates the node pool has been - // created - // and is fully usable. + // created and is fully usable. // "RUNNING_WITH_ERROR" - The RUNNING_WITH_ERROR state indicates the - // node pool has been created - // and is partially usable. Some error state has occurred and - // some - // functionality may be impaired. Customer may need to reissue a - // request - // or trigger a new update. + // node pool has been created and is partially usable. Some error state + // has occurred and some functionality may be impaired. Customer may + // need to reissue a request or trigger a new update. // "RECONCILING" - The RECONCILING state indicates that some work is - // actively being done on - // the node pool, such as upgrading node software. Details can - // be found in the `statusMessage` field. + // actively being done on the node pool, such as upgrading node + // software. Details can be found in the `statusMessage` field. // "STOPPING" - The STOPPING state indicates the node pool is being // deleted. // "ERROR" - The ERROR state indicates the node pool may be unusable. - // Details - // can be found in the `statusMessage` field. + // Details can be found in the `statusMessage` field. Status string `json:"status,omitempty"` // StatusMessage: [Output only] Additional information about the current - // status of this - // node pool instance, if available. + // status of this node pool instance, if available. StatusMessage string `json:"statusMessage,omitempty"` // UpgradeSettings: Upgrade settings control disruption and speed of the @@ -3066,8 +2876,8 @@ func (s *NodePool) MarshalJSON() ([]byte, error) { } // NodePoolAutoscaling: NodePoolAutoscaling contains information -// required by cluster autoscaler to -// adjust the size of the node pool to the current cluster usage. +// required by cluster autoscaler to adjust the size of the node pool to +// the current cluster usage. type NodePoolAutoscaling struct { // Autoprovisioned: Can this node pool be deleted automatically. Autoprovisioned bool `json:"autoprovisioned,omitempty"` @@ -3076,13 +2886,11 @@ type NodePoolAutoscaling struct { Enabled bool `json:"enabled,omitempty"` // MaxNodeCount: Maximum number of nodes in the NodePool. Must be >= - // min_node_count. There - // has to enough quota to scale up the cluster. + // min_node_count. There has to enough quota to scale up the cluster. MaxNodeCount int64 `json:"maxNodeCount,omitempty"` // MinNodeCount: Minimum number of nodes in the NodePool. Must be >= 1 - // and <= - // max_node_count. + // and <= max_node_count. MinNodeCount int64 `json:"minNodeCount,omitempty"` // ForceSendFields is a list of field names (e.g. "Autoprovisioned") to @@ -3110,14 +2918,10 @@ func (s *NodePoolAutoscaling) MarshalJSON() ([]byte, error) { } // NodeTaint: Kubernetes taint is comprised of three fields: key, value, -// and effect. Effect -// can only be one of three types: NoSchedule, PreferNoSchedule or -// NoExecute. -// -// See -// [here](https://kubernetes.io/docs/concepts/configurati -// on/taint-and-toleration) -// for more information, including usage and the valid values. +// and effect. Effect can only be one of three types: NoSchedule, +// PreferNoSchedule or NoExecute. See +// [here](https://kubernetes.io/docs/concepts/configuration/taint-and-tol +// eration) for more information, including usage and the valid values. type NodeTaint struct { // Effect: Effect for taint. // @@ -3158,8 +2962,8 @@ func (s *NodeTaint) MarshalJSON() ([]byte, error) { } // Operation: This operation resource represents operations that may -// have happened or are -// happening on the cluster. All fields are output only. +// have happened or are happening on the cluster. All fields are output +// only. type Operation struct { // ClusterConditions: Which conditions caused the current cluster state. ClusterConditions []*StatusCondition `json:"clusterConditions,omitempty"` @@ -3167,19 +2971,15 @@ type Operation struct { // Detail: Detailed operation progress, if available. Detail string `json:"detail,omitempty"` - // EndTime: [Output only] The time the operation completed, - // in + // EndTime: [Output only] The time the operation completed, in // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. EndTime string `json:"endTime,omitempty"` - // Location: [Output only] The name of the Google Compute - // Engine - // [zone](https://cloud.google.com/compute/docs/regions-zones/regi - // ons-zones#available) - // or - // [region](https://cloud.google.com/compute/docs - // /regions-zones/regions-zones#available) - // in which the cluster resides. + // Location: [Output only] The name of the Google Compute Engine + // [zone](https://cloud.google.com/compute/docs/regions-zones/regions-zon + // es#available) or + // [region](https://cloud.google.com/compute/docs/regions-zones/regions-z + // ones#available) in which the cluster resides. Location string `json:"location,omitempty"` // Name: The server-assigned ID for the operation. @@ -3218,8 +3018,7 @@ type Operation struct { // SelfLink: Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // StartTime: [Output only] The time the operation started, - // in + // StartTime: [Output only] The time the operation started, in // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. StartTime string `json:"startTime,omitempty"` @@ -3240,12 +3039,10 @@ type Operation struct { // TargetLink: Server-defined URL for the target of the operation. TargetLink string `json:"targetLink,omitempty"` - // Zone: The name of the Google Compute - // Engine + // Zone: The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // operation is taking place. This field is deprecated, use location - // instead. + // which the operation is taking place. This field is deprecated, use + // location instead. Zone string `json:"zone,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -3279,23 +3076,21 @@ func (s *Operation) MarshalJSON() ([]byte, error) { // OperationProgress: Information about operation (or operation stage) // progress. type OperationProgress struct { - // Metrics: Progress metric bundle, for example: - // metrics: [{name: "nodes done", int_value: 15}, - // {name: "nodes total", int_value: 32}] - // or - // metrics: [{name: "progress", double_value: 0.56}, - // {name: "progress scale", double_value: 1.0}] + // Metrics: Progress metric bundle, for example: metrics: [{name: "nodes + // done", int_value: 15}, {name: "nodes total", int_value: 32}] or + // metrics: [{name: "progress", double_value: 0.56}, {name: "progress + // scale", double_value: 1.0}] Metrics []*Metric `json:"metrics,omitempty"` - // Name: A non-parameterized string describing an operation stage. - // Unset for single-stage operations. + // Name: A non-parameterized string describing an operation stage. Unset + // for single-stage operations. Name string `json:"name,omitempty"` // Stages: Substages of an operation or a stage. Stages []*OperationProgress `json:"stages,omitempty"` - // Status: Status of an operation stage. - // Unset for single-stage operations. + // Status: Status of an operation stage. Unset for single-stage + // operations. // // Possible values: // "STATUS_UNSPECIFIED" - Not set. @@ -3335,19 +3130,18 @@ type PrivateClusterConfig struct { EnablePrivateEndpoint bool `json:"enablePrivateEndpoint,omitempty"` // EnablePrivateNodes: Whether nodes have internal IP addresses only. If - // enabled, all nodes are - // given only RFC 1918 private addresses and communicate with the master - // via - // private networking. + // enabled, all nodes are given only RFC 1918 private addresses and + // communicate with the master via private networking. EnablePrivateNodes bool `json:"enablePrivateNodes,omitempty"` + // MasterGlobalAccessConfig: Controls master global access settings. + MasterGlobalAccessConfig *PrivateClusterMasterGlobalAccessConfig `json:"masterGlobalAccessConfig,omitempty"` + // MasterIpv4CidrBlock: The IP range in CIDR notation to use for the - // hosted master network. This - // range will be used for assigning internal IP addresses to the master - // or - // set of masters, as well as the ILB VIP. This range must not overlap - // with - // any other ranges in use within the cluster's network. + // hosted master network. This range will be used for assigning internal + // IP addresses to the master or set of masters, as well as the ILB VIP. + // This range must not overlap with any other ranges in use within the + // cluster's network. MasterIpv4CidrBlock string `json:"masterIpv4CidrBlock,omitempty"` // PeeringName: Output only. The peering name in the customer VPC used @@ -3387,43 +3181,57 @@ func (s *PrivateClusterConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// PrivateClusterMasterGlobalAccessConfig: Configuration for controlling +// master global access settings. +type PrivateClusterMasterGlobalAccessConfig struct { + // Enabled: Whenever master is accessible globally or not. + Enabled bool `json:"enabled,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Enabled") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PrivateClusterMasterGlobalAccessConfig) MarshalJSON() ([]byte, error) { + type NoMethod PrivateClusterMasterGlobalAccessConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // RecurringTimeWindow: Represents an arbitrary window of time that // recurs. type RecurringTimeWindow struct { // Recurrence: An RRULE - // (https://tools.ietf.org/html/rfc5545#section-3.8.5.3) for how - // this window reccurs. They go on for the span of time between the - // start and - // end time. - // - // For example, to have something repeat every weekday, you'd use: - // FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR - // To repeat some window daily (equivalent to the - // DailyMaintenanceWindow): - // FREQ=DAILY - // For the first weekend of every month: - // FREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU + // (https://tools.ietf.org/html/rfc5545#section-3.8.5.3) for how this + // window reccurs. They go on for the span of time between the start and + // end time. For example, to have something repeat every weekday, you'd + // use: `FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR` To repeat some window daily + // (equivalent to the DailyMaintenanceWindow): `FREQ=DAILY` For the + // first weekend of every month: `FREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU` // This specifies how frequently the window starts. Eg, if you wanted to - // have - // a 9-5 UTC-4 window every weekday, you'd use something like: - // - // start time = 2019-01-01T09:00:00-0400 - // end time = 2019-01-01T17:00:00-0400 - // recurrence = FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR - // - // Windows can span multiple days. Eg, to make the window encompass - // every - // weekend from midnight Saturday till the last minute of Sunday - // UTC: - // - // start time = 2019-01-05T00:00:00Z - // end time = 2019-01-07T23:59:00Z - // recurrence = FREQ=WEEKLY;BYDAY=SA - // - // Note the start and end time's specific dates are largely arbitrary - // except - // to specify duration of the window and when it first starts. - // The FREQ values of HOURLY, MINUTELY, and SECONDLY are not supported. + // have a 9-5 UTC-4 window every weekday, you'd use something like: ``` + // start time = 2019-01-01T09:00:00-0400 end time = + // 2019-01-01T17:00:00-0400 recurrence = + // FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR ``` Windows can span multiple days. + // Eg, to make the window encompass every weekend from midnight Saturday + // till the last minute of Sunday UTC: ``` start time = + // 2019-01-05T00:00:00Z end time = 2019-01-07T23:59:00Z recurrence = + // FREQ=WEEKLY;BYDAY=SA ``` Note the start and end time's specific dates + // are largely arbitrary except to specify duration of the window and + // when it first starts. The FREQ values of HOURLY, MINUTELY, and + // SECONDLY are not supported. Recurrence string `json:"recurrence,omitempty"` // Window: The window of the first recurrence. @@ -3452,12 +3260,103 @@ func (s *RecurringTimeWindow) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ReleaseChannel: ReleaseChannel indicates which release channel a +// cluster is subscribed to. Release channels are arranged in order of +// risk. When a cluster is subscribed to a release channel, Google +// maintains both the master version and the node version. Node +// auto-upgrade defaults to true and cannot be disabled. +type ReleaseChannel struct { + // Channel: channel specifies which release channel the cluster is + // subscribed to. + // + // Possible values: + // "UNSPECIFIED" - No channel specified. + // "RAPID" - RAPID channel is offered on an early access basis for + // customers who want to test new releases. WARNING: Versions available + // in the RAPID Channel may be subject to unresolved issues with no + // known workaround and are not subject to any SLAs. + // "REGULAR" - Clusters subscribed to REGULAR receive versions that + // are considered GA quality. REGULAR is intended for production users + // who want to take advantage of new features. + // "STABLE" - Clusters subscribed to STABLE receive versions that are + // known to be stable and reliable in production. + Channel string `json:"channel,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Channel") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Channel") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ReleaseChannel) MarshalJSON() ([]byte, error) { + type NoMethod ReleaseChannel + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ReleaseChannelConfig: ReleaseChannelConfig exposes configuration for +// a release channel. +type ReleaseChannelConfig struct { + // Channel: The release channel this configuration applies to. + // + // Possible values: + // "UNSPECIFIED" - No channel specified. + // "RAPID" - RAPID channel is offered on an early access basis for + // customers who want to test new releases. WARNING: Versions available + // in the RAPID Channel may be subject to unresolved issues with no + // known workaround and are not subject to any SLAs. + // "REGULAR" - Clusters subscribed to REGULAR receive versions that + // are considered GA quality. REGULAR is intended for production users + // who want to take advantage of new features. + // "STABLE" - Clusters subscribed to STABLE receive versions that are + // known to be stable and reliable in production. + Channel string `json:"channel,omitempty"` + + // DefaultVersion: The default version for newly created clusters on the + // channel. + DefaultVersion string `json:"defaultVersion,omitempty"` + + // ValidVersions: List of valid versions for the channel. + ValidVersions []string `json:"validVersions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Channel") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Channel") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ReleaseChannelConfig) MarshalJSON() ([]byte, error) { + type NoMethod ReleaseChannelConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ReservationAffinity: // [ReservationAffinity](https://cloud.google.com/compute/docs/instances/ -// reserving-zonal-resources) -// is the configuration of desired reservation which instances could -// take -// capacity from. +// reserving-zonal-resources) is the configuration of desired +// reservation which instances could take capacity from. type ReservationAffinity struct { // ConsumeReservationType: Corresponds to the type of reservation // consumption. @@ -3467,15 +3366,13 @@ type ReservationAffinity struct { // "NO_RESERVATION" - Do not consume from any reserved capacity. // "ANY_RESERVATION" - Consume any reservation available. // "SPECIFIC_RESERVATION" - Must consume from a specific reservation. - // Must specify key value fields - // for specifying the reservations. + // Must specify key value fields for specifying the reservations. ConsumeReservationType string `json:"consumeReservationType,omitempty"` // Key: Corresponds to the label key of a reservation resource. To - // target a - // SPECIFIC_RESERVATION by name, specify - // "googleapis.com/reservation-name" as - // the key and specify the name of your reservation as its value. + // target a SPECIFIC_RESERVATION by name, specify + // "googleapis.com/reservation-name" as the key and specify the name of + // your reservation as its value. Key string `json:"key,omitempty"` // Values: Corresponds to the label value(s) of reservation resource(s). @@ -3507,8 +3404,7 @@ func (s *ReservationAffinity) MarshalJSON() ([]byte, error) { } // ResourceLimit: Contains information about amount of some resource in -// the cluster. -// For memory, value should be in GB. +// the cluster. For memory, value should be in GB. type ResourceLimit struct { // Maximum: Maximum amount of the resource in the cluster. Maximum int64 `json:"maximum,omitempty,string"` @@ -3554,9 +3450,8 @@ type ResourceUsageExportConfig struct { ConsumptionMeteringConfig *ConsumptionMeteringConfig `json:"consumptionMeteringConfig,omitempty"` // EnableNetworkEgressMetering: Whether to enable network egress - // metering for this cluster. If enabled, a - // daemonset will be created in the cluster to meter network egress - // traffic. + // metering for this cluster. If enabled, a daemonset will be created in + // the cluster to meter network egress traffic. EnableNetworkEgressMetering bool `json:"enableNetworkEgressMetering,omitempty"` // ForceSendFields is a list of field names (e.g. "BigqueryDestination") @@ -3584,40 +3479,31 @@ func (s *ResourceUsageExportConfig) MarshalJSON() ([]byte, error) { } // RollbackNodePoolUpgradeRequest: RollbackNodePoolUpgradeRequest -// rollbacks the previously Aborted or Failed -// NodePool upgrade. This will be an no-op if the last upgrade -// successfully -// completed. +// rollbacks the previously Aborted or Failed NodePool upgrade. This +// will be an no-op if the last upgrade successfully completed. type RollbackNodePoolUpgradeRequest struct { - // ClusterId: Deprecated. The name of the cluster to rollback. - // This field has been deprecated and replaced by the name field. + // ClusterId: Deprecated. The name of the cluster to rollback. This + // field has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // Name: The name (project, location, cluster, node pool id) of the node - // poll to - // rollback upgrade. - // Specified in the format + // poll to rollback upgrade. Specified in the format // `projects/*/locations/*/clusters/*/nodePools/*`. Name string `json:"name,omitempty"` - // NodePoolId: Deprecated. The name of the node pool to rollback. - // This field has been deprecated and replaced by the name field. + // NodePoolId: Deprecated. The name of the node pool to rollback. This + // field has been deprecated and replaced by the name field. NodePoolId string `json:"nodePoolId,omitempty"` // ProjectId: Deprecated. The Google Developers Console [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // project number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Deprecated. The name of the Google Compute - // Engine + // Zone: Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -3678,6 +3564,9 @@ func (s *SandboxConfig) MarshalJSON() ([]byte, error) { // ServerConfig: Kubernetes Engine service configuration. type ServerConfig struct { + // Channels: List of release channel configurations. + Channels []*ReleaseChannelConfig `json:"channels,omitempty"` + // DefaultClusterVersion: Version of Kubernetes the service deploys by // default. DefaultClusterVersion string `json:"defaultClusterVersion,omitempty"` @@ -3688,32 +3577,32 @@ type ServerConfig struct { // ValidImageTypes: List of valid image types. ValidImageTypes []string `json:"validImageTypes,omitempty"` - // ValidMasterVersions: List of valid master versions. + // ValidMasterVersions: List of valid master versions, in descending + // order. ValidMasterVersions []string `json:"validMasterVersions,omitempty"` - // ValidNodeVersions: List of valid node upgrade target versions. + // ValidNodeVersions: List of valid node upgrade target versions, in + // descending order. ValidNodeVersions []string `json:"validNodeVersions,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. - // "DefaultClusterVersion") to unconditionally include in API requests. - // By default, fields with empty values are omitted from API requests. - // However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. + // ForceSendFields is a list of field names (e.g. "Channels") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DefaultClusterVersion") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Channels") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } @@ -3727,33 +3616,26 @@ func (s *ServerConfig) MarshalJSON() ([]byte, error) { // associated with the cluster. type SetAddonsConfigRequest struct { // AddonsConfig: Required. The desired configurations for the various - // addons available to run in the - // cluster. + // addons available to run in the cluster. AddonsConfig *AddonsConfig `json:"addonsConfig,omitempty"` - // ClusterId: Deprecated. The name of the cluster to upgrade. - // This field has been deprecated and replaced by the name field. + // ClusterId: Deprecated. The name of the cluster to upgrade. This field + // has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // Name: The name (project, location, cluster) of the cluster to set - // addons. - // Specified in the format `projects/*/locations/*/clusters/*`. + // addons. Specified in the format `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // ProjectId: Deprecated. The Google Developers Console [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // project number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Deprecated. The name of the Google Compute - // Engine + // Zone: Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "AddonsConfig") to @@ -3780,50 +3662,38 @@ func (s *SetAddonsConfigRequest) MarshalJSON() ([]byte, error) { } // SetLabelsRequest: SetLabelsRequest sets the Google Cloud Platform -// labels on a Google Container -// Engine cluster, which will in turn set them for Google Compute -// Engine -// resources used by that cluster +// labels on a Google Container Engine cluster, which will in turn set +// them for Google Compute Engine resources used by that cluster type SetLabelsRequest struct { - // ClusterId: Deprecated. The name of the cluster. - // This field has been deprecated and replaced by the name field. + // ClusterId: Deprecated. The name of the cluster. This field has been + // deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // LabelFingerprint: Required. The fingerprint of the previous set of - // labels for this resource, - // used to detect conflicts. The fingerprint is initially generated - // by - // Kubernetes Engine and changes after every request to modify or - // update - // labels. You must always provide an up-to-date fingerprint hash - // when - // updating or changing labels. Make a get() request to - // the - // resource to get the latest fingerprint. + // labels for this resource, used to detect conflicts. The fingerprint + // is initially generated by Kubernetes Engine and changes after every + // request to modify or update labels. You must always provide an + // up-to-date fingerprint hash when updating or changing labels. Make a + // `get()` request to the resource to get the latest fingerprint. LabelFingerprint string `json:"labelFingerprint,omitempty"` // Name: The name (project, location, cluster id) of the cluster to set - // labels. - // Specified in the format `projects/*/locations/*/clusters/*`. + // labels. Specified in the format `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // ProjectId: Deprecated. The Google Developers Console [project ID or // project - // number](https://developers.google.com/console/help/new/#projec - // tnumber). - // This field has been deprecated and replaced by the name field. + // number](https://developers.google.com/console/help/new/#projectnumber) + // . This field has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` // ResourceLabels: Required. The labels to set for that cluster. ResourceLabels map[string]string `json:"resourceLabels,omitempty"` - // Zone: Deprecated. The name of the Google Compute - // Engine + // Zone: Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -3850,11 +3720,10 @@ func (s *SetLabelsRequest) MarshalJSON() ([]byte, error) { } // SetLegacyAbacRequest: SetLegacyAbacRequest enables or disables the -// ABAC authorization mechanism for -// a cluster. +// ABAC authorization mechanism for a cluster. type SetLegacyAbacRequest struct { - // ClusterId: Deprecated. The name of the cluster to update. - // This field has been deprecated and replaced by the name field. + // ClusterId: Deprecated. The name of the cluster to update. This field + // has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // Enabled: Required. Whether ABAC authorization will be enabled in the @@ -3862,24 +3731,19 @@ type SetLegacyAbacRequest struct { Enabled bool `json:"enabled,omitempty"` // Name: The name (project, location, cluster id) of the cluster to set - // legacy abac. - // Specified in the format `projects/*/locations/*/clusters/*`. + // legacy abac. Specified in the format + // `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // ProjectId: Deprecated. The Google Developers Console [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // project number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Deprecated. The name of the Google Compute - // Engine + // Zone: Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -3908,42 +3772,32 @@ func (s *SetLegacyAbacRequest) MarshalJSON() ([]byte, error) { // SetLocationsRequest: SetLocationsRequest sets the locations of the // cluster. type SetLocationsRequest struct { - // ClusterId: Deprecated. The name of the cluster to upgrade. - // This field has been deprecated and replaced by the name field. + // ClusterId: Deprecated. The name of the cluster to upgrade. This field + // has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` - // Locations: Required. The desired list of Google Compute - // Engine + // Locations: Required. The desired list of Google Compute Engine // [zones](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster's nodes should be located. Changing the locations a cluster - // is in - // will result in nodes being either created or removed from the - // cluster, - // depending on whether locations are being added or removed. - // - // This list must always include the cluster's primary zone. + // which the cluster's nodes should be located. Changing the locations a + // cluster is in will result in nodes being either created or removed + // from the cluster, depending on whether locations are being added or + // removed. This list must always include the cluster's primary zone. Locations []string `json:"locations,omitempty"` // Name: The name (project, location, cluster) of the cluster to set - // locations. - // Specified in the format `projects/*/locations/*/clusters/*`. + // locations. Specified in the format + // `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // ProjectId: Deprecated. The Google Developers Console [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // project number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Deprecated. The name of the Google Compute - // Engine + // Zone: Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -3972,45 +3826,33 @@ func (s *SetLocationsRequest) MarshalJSON() ([]byte, error) { // SetLoggingServiceRequest: SetLoggingServiceRequest sets the logging // service of a cluster. type SetLoggingServiceRequest struct { - // ClusterId: Deprecated. The name of the cluster to upgrade. - // This field has been deprecated and replaced by the name field. + // ClusterId: Deprecated. The name of the cluster to upgrade. This field + // has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // LoggingService: Required. The logging service the cluster should use - // to write logs. - // Currently available options: - // - // * `logging.googleapis.com/kubernetes` - The Cloud Logging - // service with a Kubernetes-native resource model - // * `logging.googleapis.com` - The legacy Cloud Logging service (no - // longer - // available as of GKE 1.15). - // * `none` - no logs will be exported from the cluster. - // - // If left as an empty string,`logging.googleapis.com/kubernetes` will - // be - // used for GKE 1.14+ or `logging.googleapis.com` for earlier versions. + // to write logs. Currently available options: * + // `logging.googleapis.com/kubernetes` - The Cloud Logging service with + // a Kubernetes-native resource model * `logging.googleapis.com` - The + // legacy Cloud Logging service (no longer available as of GKE 1.15). * + // `none` - no logs will be exported from the cluster. If left as an + // empty string,`logging.googleapis.com/kubernetes` will be used for GKE + // 1.14+ or `logging.googleapis.com` for earlier versions. LoggingService string `json:"loggingService,omitempty"` // Name: The name (project, location, cluster) of the cluster to set - // logging. - // Specified in the format `projects/*/locations/*/clusters/*`. + // logging. Specified in the format `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // ProjectId: Deprecated. The Google Developers Console [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // project number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Deprecated. The name of the Google Compute - // Engine + // Zone: Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -4043,26 +3885,21 @@ type SetMaintenancePolicyRequest struct { ClusterId string `json:"clusterId,omitempty"` // MaintenancePolicy: Required. The maintenance policy to be set for the - // cluster. An empty field - // clears the existing maintenance policy. + // cluster. An empty field clears the existing maintenance policy. MaintenancePolicy *MaintenancePolicy `json:"maintenancePolicy,omitempty"` // Name: The name (project, location, cluster id) of the cluster to set - // maintenance - // policy. - // Specified in the format `projects/*/locations/*/clusters/*`. + // maintenance policy. Specified in the format + // `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // ProjectId: Required. The Google Developers Console [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). + // project number](https://support.google.com/cloud/answer/6158840). ProjectId string `json:"projectId,omitempty"` - // Zone: Required. The name of the Google Compute - // Engine + // Zone: Required. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. + // which the cluster resides. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -4098,41 +3935,32 @@ type SetMasterAuthRequest struct { // "UNKNOWN" - Operation is unknown and will error out. // "SET_PASSWORD" - Set the password to a user generated value. // "GENERATE_PASSWORD" - Generate a new password and set it to that. - // "SET_USERNAME" - Set the username. If an empty username is - // provided, basic authentication - // is disabled for the cluster. If a non-empty username is provided, - // basic - // authentication is enabled, with either a provided password or a - // generated - // one. + // "SET_USERNAME" - Set the username. If an empty username is + // provided, basic authentication is disabled for the cluster. If a + // non-empty username is provided, basic authentication is enabled, with + // either a provided password or a generated one. Action string `json:"action,omitempty"` - // ClusterId: Deprecated. The name of the cluster to upgrade. - // This field has been deprecated and replaced by the name field. + // ClusterId: Deprecated. The name of the cluster to upgrade. This field + // has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // Name: The name (project, location, cluster) of the cluster to set - // auth. - // Specified in the format `projects/*/locations/*/clusters/*`. + // auth. Specified in the format `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // ProjectId: Deprecated. The Google Developers Console [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // project number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` // Update: Required. A description of the update. Update *MasterAuth `json:"update,omitempty"` - // Zone: Deprecated. The name of the Google Compute - // Engine + // Zone: Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "Action") to @@ -4161,47 +3989,35 @@ func (s *SetMasterAuthRequest) MarshalJSON() ([]byte, error) { // SetMonitoringServiceRequest: SetMonitoringServiceRequest sets the // monitoring service of a cluster. type SetMonitoringServiceRequest struct { - // ClusterId: Deprecated. The name of the cluster to upgrade. - // This field has been deprecated and replaced by the name field. + // ClusterId: Deprecated. The name of the cluster to upgrade. This field + // has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // MonitoringService: Required. The monitoring service the cluster - // should use to write metrics. - // Currently available options: - // - // * "monitoring.googleapis.com/kubernetes" - The Cloud - // Monitoring - // service with a Kubernetes-native resource model - // * `monitoring.googleapis.com` - The legacy Cloud Monitoring service - // (no - // longer available as of GKE 1.15). - // * `none` - No metrics will be exported from the cluster. - // - // If left as an empty string,`monitoring.googleapis.com/kubernetes` - // will be - // used for GKE 1.14+ or `monitoring.googleapis.com` for earlier + // should use to write metrics. Currently available options: * + // "monitoring.googleapis.com/kubernetes" - The Cloud Monitoring service + // with a Kubernetes-native resource model * `monitoring.googleapis.com` + // - The legacy Cloud Monitoring service (no longer available as of GKE + // 1.15). * `none` - No metrics will be exported from the cluster. If + // left as an empty string,`monitoring.googleapis.com/kubernetes` will + // be used for GKE 1.14+ or `monitoring.googleapis.com` for earlier // versions. MonitoringService string `json:"monitoringService,omitempty"` // Name: The name (project, location, cluster) of the cluster to set - // monitoring. - // Specified in the format `projects/*/locations/*/clusters/*`. + // monitoring. Specified in the format + // `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // ProjectId: Deprecated. The Google Developers Console [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // project number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Deprecated. The name of the Google Compute - // Engine + // Zone: Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -4230,13 +4046,13 @@ func (s *SetMonitoringServiceRequest) MarshalJSON() ([]byte, error) { // SetNetworkPolicyRequest: SetNetworkPolicyRequest enables/disables // network policy for a cluster. type SetNetworkPolicyRequest struct { - // ClusterId: Deprecated. The name of the cluster. - // This field has been deprecated and replaced by the name field. + // ClusterId: Deprecated. The name of the cluster. This field has been + // deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // Name: The name (project, location, cluster id) of the cluster to set - // networking - // policy. Specified in the format `projects/*/locations/*/clusters/*`. + // networking policy. Specified in the format + // `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // NetworkPolicy: Required. Configuration options for the NetworkPolicy @@ -4245,18 +4061,14 @@ type SetNetworkPolicyRequest struct { // ProjectId: Deprecated. The Google Developers Console [project ID or // project - // number](https://developers.google.com/console/help/new/#projec - // tnumber). - // This field has been deprecated and replaced by the name field. + // number](https://developers.google.com/console/help/new/#projectnumber) + // . This field has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Deprecated. The name of the Google Compute - // Engine + // Zone: Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -4288,35 +4100,28 @@ type SetNodePoolAutoscalingRequest struct { // Autoscaling: Required. Autoscaling configuration for the node pool. Autoscaling *NodePoolAutoscaling `json:"autoscaling,omitempty"` - // ClusterId: Deprecated. The name of the cluster to upgrade. - // This field has been deprecated and replaced by the name field. + // ClusterId: Deprecated. The name of the cluster to upgrade. This field + // has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // Name: The name (project, location, cluster, node pool) of the node - // pool to set - // autoscaler settings. Specified in the - // format + // pool to set autoscaler settings. Specified in the format // `projects/*/locations/*/clusters/*/nodePools/*`. Name string `json:"name,omitempty"` - // NodePoolId: Deprecated. The name of the node pool to upgrade. - // This field has been deprecated and replaced by the name field. + // NodePoolId: Deprecated. The name of the node pool to upgrade. This + // field has been deprecated and replaced by the name field. NodePoolId string `json:"nodePoolId,omitempty"` // ProjectId: Deprecated. The Google Developers Console [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // project number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Deprecated. The name of the Google Compute - // Engine + // Zone: Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "Autoscaling") to @@ -4343,41 +4148,33 @@ func (s *SetNodePoolAutoscalingRequest) MarshalJSON() ([]byte, error) { } // SetNodePoolManagementRequest: SetNodePoolManagementRequest sets the -// node management properties of a node -// pool. +// node management properties of a node pool. type SetNodePoolManagementRequest struct { - // ClusterId: Deprecated. The name of the cluster to update. - // This field has been deprecated and replaced by the name field. + // ClusterId: Deprecated. The name of the cluster to update. This field + // has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // Management: Required. NodeManagement configuration for the node pool. Management *NodeManagement `json:"management,omitempty"` // Name: The name (project, location, cluster, node pool id) of the node - // pool to set - // management properties. Specified in the - // format + // pool to set management properties. Specified in the format // `projects/*/locations/*/clusters/*/nodePools/*`. Name string `json:"name,omitempty"` - // NodePoolId: Deprecated. The name of the node pool to update. - // This field has been deprecated and replaced by the name field. + // NodePoolId: Deprecated. The name of the node pool to update. This + // field has been deprecated and replaced by the name field. NodePoolId string `json:"nodePoolId,omitempty"` // ProjectId: Deprecated. The Google Developers Console [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // project number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Deprecated. The name of the Google Compute - // Engine + // Zone: Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -4403,42 +4200,34 @@ func (s *SetNodePoolManagementRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SetNodePoolSizeRequest: SetNodePoolSizeRequest sets the size a -// node +// SetNodePoolSizeRequest: SetNodePoolSizeRequest sets the size a node // pool. type SetNodePoolSizeRequest struct { - // ClusterId: Deprecated. The name of the cluster to update. - // This field has been deprecated and replaced by the name field. + // ClusterId: Deprecated. The name of the cluster to update. This field + // has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // Name: The name (project, location, cluster, node pool id) of the node - // pool to set - // size. - // Specified in the format + // pool to set size. Specified in the format // `projects/*/locations/*/clusters/*/nodePools/*`. Name string `json:"name,omitempty"` // NodeCount: Required. The desired node count for the pool. NodeCount int64 `json:"nodeCount,omitempty"` - // NodePoolId: Deprecated. The name of the node pool to update. - // This field has been deprecated and replaced by the name field. + // NodePoolId: Deprecated. The name of the node pool to update. This + // field has been deprecated and replaced by the name field. NodePoolId string `json:"nodePoolId,omitempty"` // ProjectId: Deprecated. The Google Developers Console [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // project number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Deprecated. The name of the Google Compute - // Engine + // Zone: Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -4467,25 +4256,16 @@ func (s *SetNodePoolSizeRequest) MarshalJSON() ([]byte, error) { // ShieldedInstanceConfig: A set of Shielded Instance options. type ShieldedInstanceConfig struct { // EnableIntegrityMonitoring: Defines whether the instance has integrity - // monitoring enabled. - // - // Enables monitoring and attestation of the boot integrity of the - // instance. - // The attestation is performed against the integrity policy baseline. - // This - // baseline is initially derived from the implicitly trusted boot image - // when - // the instance is created. + // monitoring enabled. Enables monitoring and attestation of the boot + // integrity of the instance. The attestation is performed against the + // integrity policy baseline. This baseline is initially derived from + // the implicitly trusted boot image when the instance is created. EnableIntegrityMonitoring bool `json:"enableIntegrityMonitoring,omitempty"` // EnableSecureBoot: Defines whether the instance has Secure Boot - // enabled. - // - // Secure Boot helps ensure that the system only runs authentic software - // by - // verifying the digital signature of all boot components, and halting - // the - // boot process if signature verification fails. + // enabled. Secure Boot helps ensure that the system only runs authentic + // software by verifying the digital signature of all boot components, + // and halting the boot process if signature verification fails. EnableSecureBoot bool `json:"enableSecureBoot,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -4543,36 +4323,31 @@ func (s *ShieldedNodes) MarshalJSON() ([]byte, error) { } // StartIPRotationRequest: StartIPRotationRequest creates a new IP for -// the cluster and then performs -// a node upgrade on each node pool to point to the new IP. +// the cluster and then performs a node upgrade on each node pool to +// point to the new IP. type StartIPRotationRequest struct { - // ClusterId: Deprecated. The name of the cluster. - // This field has been deprecated and replaced by the name field. + // ClusterId: Deprecated. The name of the cluster. This field has been + // deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // Name: The name (project, location, cluster id) of the cluster to - // start IP - // rotation. Specified in the format + // start IP rotation. Specified in the format // `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // ProjectId: Deprecated. The Google Developers Console [project ID or // project - // number](https://developers.google.com/console/help/new/#projec - // tnumber). - // This field has been deprecated and replaced by the name field. + // number](https://developers.google.com/console/help/new/#projectnumber) + // . This field has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` // RotateCredentials: Whether to rotate credentials during IP rotation. RotateCredentials bool `json:"rotateCredentials,omitempty"` - // Zone: Deprecated. The name of the Google Compute - // Engine + // Zone: Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -4599,26 +4374,22 @@ func (s *StartIPRotationRequest) MarshalJSON() ([]byte, error) { } // StatusCondition: StatusCondition describes why a cluster or a node -// pool has a certain status -// (e.g., ERROR or DEGRADED). +// pool has a certain status (e.g., ERROR or DEGRADED). type StatusCondition struct { // Code: Machine-friendly representation of the condition // // Possible values: // "UNKNOWN" - UNKNOWN indicates a generic condition. // "GCE_STOCKOUT" - GCE_STOCKOUT indicates that Google Compute Engine - // resources are - // temporarily unavailable. + // resources are temporarily unavailable. // "GKE_SERVICE_ACCOUNT_DELETED" - GKE_SERVICE_ACCOUNT_DELETED - // indicates that the user deleted their robot - // service account. + // indicates that the user deleted their robot service account. // "GCE_QUOTA_EXCEEDED" - Google Compute Engine quota was exceeded. // "SET_BY_OPERATOR" - Cluster state was manually changed by an SRE // due to a system logic error. // "CLOUD_KMS_KEY_ERROR" - Unable to perform an encrypt operation - // against the CloudKMS key used for - // etcd level encryption. - // More codes TBA + // against the CloudKMS key used for etcd level encryption. More codes + // TBA Code string `json:"code,omitempty"` // Message: Human-friendly representation of the condition @@ -4650,8 +4421,7 @@ func (s *StatusCondition) MarshalJSON() ([]byte, error) { // TimeWindow: Represents an arbitrary window of time. type TimeWindow struct { // EndTime: The time that the window ends. The end time should take - // place after the - // start time. + // place after the start time. EndTime string `json:"endTime,omitempty"` // StartTime: The time that the window first starts. @@ -4683,32 +4453,26 @@ func (s *TimeWindow) MarshalJSON() ([]byte, error) { // UpdateClusterRequest: UpdateClusterRequest updates the settings of a // cluster. type UpdateClusterRequest struct { - // ClusterId: Deprecated. The name of the cluster to upgrade. - // This field has been deprecated and replaced by the name field. + // ClusterId: Deprecated. The name of the cluster to upgrade. This field + // has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` - // Name: The name (project, location, cluster) of the cluster to - // update. + // Name: The name (project, location, cluster) of the cluster to update. // Specified in the format `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // ProjectId: Deprecated. The Google Developers Console [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // project number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` // Update: Required. A description of the update. Update *ClusterUpdate `json:"update,omitempty"` - // Zone: Deprecated. The name of the Google Compute - // Engine + // Zone: Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -4737,44 +4501,33 @@ func (s *UpdateClusterRequest) MarshalJSON() ([]byte, error) { // UpdateMasterRequest: UpdateMasterRequest updates the master of the // cluster. type UpdateMasterRequest struct { - // ClusterId: Deprecated. The name of the cluster to upgrade. - // This field has been deprecated and replaced by the name field. + // ClusterId: Deprecated. The name of the cluster to upgrade. This field + // has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // MasterVersion: Required. The Kubernetes version to change the master - // to. - // - // Users may specify either explicit versions offered by Kubernetes - // Engine or - // version aliases, which have the following behavior: - // - // - "latest": picks the highest valid Kubernetes version - // - "1.X": picks the highest valid patch+gke.N patch in the 1.X - // version - // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - // - "1.X.Y-gke.N": picks an explicit Kubernetes version - // - "-": picks the default Kubernetes version + // to. Users may specify either explicit versions offered by Kubernetes + // Engine or version aliases, which have the following behavior: - + // "latest": picks the highest valid Kubernetes version - "1.X": picks + // the highest valid patch+gke.N patch in the 1.X version - "1.X.Y": + // picks the highest valid gke.N patch in the 1.X.Y version - + // "1.X.Y-gke.N": picks an explicit Kubernetes version - "-": picks the + // default Kubernetes version MasterVersion string `json:"masterVersion,omitempty"` - // Name: The name (project, location, cluster) of the cluster to - // update. + // Name: The name (project, location, cluster) of the cluster to update. // Specified in the format `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // ProjectId: Deprecated. The Google Developers Console [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // project number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Deprecated. The name of the Google Compute - // Engine + // Zone: Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -4803,56 +4556,43 @@ func (s *UpdateMasterRequest) MarshalJSON() ([]byte, error) { // UpdateNodePoolRequest: UpdateNodePoolRequests update a node pool's // image and/or version. type UpdateNodePoolRequest struct { - // ClusterId: Deprecated. The name of the cluster to upgrade. - // This field has been deprecated and replaced by the name field. + // ClusterId: Deprecated. The name of the cluster to upgrade. This field + // has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // ImageType: Required. The desired image type for the node pool. ImageType string `json:"imageType,omitempty"` - // Locations: The desired list of Google Compute - // Engine + // Locations: The desired list of Google Compute Engine // [zones](https://cloud.google.com/compute/docs/zones#available) in - // which the - // node pool's nodes should be located. Changing the locations for a - // node pool - // will result in nodes being either created or removed from the node - // pool, - // depending on whether locations are being added or removed. + // which the node pool's nodes should be located. Changing the locations + // for a node pool will result in nodes being either created or removed + // from the node pool, depending on whether locations are being added or + // removed. Locations []string `json:"locations,omitempty"` // Name: The name (project, location, cluster, node pool) of the node - // pool to - // update. Specified in the - // format + // pool to update. Specified in the format // `projects/*/locations/*/clusters/*/nodePools/*`. Name string `json:"name,omitempty"` - // NodePoolId: Deprecated. The name of the node pool to upgrade. - // This field has been deprecated and replaced by the name field. + // NodePoolId: Deprecated. The name of the node pool to upgrade. This + // field has been deprecated and replaced by the name field. NodePoolId string `json:"nodePoolId,omitempty"` // NodeVersion: Required. The Kubernetes version to change the nodes to - // (typically an - // upgrade). - // - // Users may specify either explicit versions offered by Kubernetes - // Engine or - // version aliases, which have the following behavior: - // - // - "latest": picks the highest valid Kubernetes version - // - "1.X": picks the highest valid patch+gke.N patch in the 1.X - // version - // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - // - "1.X.Y-gke.N": picks an explicit Kubernetes version - // - "-": picks the Kubernetes master version + // (typically an upgrade). Users may specify either explicit versions + // offered by Kubernetes Engine or version aliases, which have the + // following behavior: - "latest": picks the highest valid Kubernetes + // version - "1.X": picks the highest valid patch+gke.N patch in the 1.X + // version - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y + // version - "1.X.Y-gke.N": picks an explicit Kubernetes version - "-": + // picks the Kubernetes master version NodeVersion string `json:"nodeVersion,omitempty"` // ProjectId: Deprecated. The Google Developers Console [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // project number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` // UpgradeSettings: Upgrade settings control disruption and speed of the @@ -4863,13 +4603,10 @@ type UpdateNodePoolRequest struct { // node pool. WorkloadMetadataConfig *WorkloadMetadataConfig `json:"workloadMetadataConfig,omitempty"` - // Zone: Deprecated. The name of the Google Compute - // Engine + // Zone: Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -4895,52 +4632,84 @@ func (s *UpdateNodePoolRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// UpgradeEvent: UpgradeEvent is a notification sent to customers by the +// cluster server when a resource is upgrading. +type UpgradeEvent struct { + // CurrentVersion: Required. The current version before the upgrade. + CurrentVersion string `json:"currentVersion,omitempty"` + + // Operation: Required. The operation associated with this upgrade. + Operation string `json:"operation,omitempty"` + + // OperationStartTime: Required. The time when the operation was + // started. + OperationStartTime string `json:"operationStartTime,omitempty"` + + // Resource: Optional. Optional relative path to the resource. For + // example in node pool upgrades, the relative path of the node pool. + Resource string `json:"resource,omitempty"` + + // ResourceType: Required. The resource type that is upgrading. + // + // Possible values: + // "UPGRADE_RESOURCE_TYPE_UNSPECIFIED" - Default value. This shouldn't + // be used. + // "MASTER" - Master / control plane + // "NODE_POOL" - Node pool + ResourceType string `json:"resourceType,omitempty"` + + // TargetVersion: Required. The target version for the upgrade. + TargetVersion string `json:"targetVersion,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CurrentVersion") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CurrentVersion") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *UpgradeEvent) MarshalJSON() ([]byte, error) { + type NoMethod UpgradeEvent + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // UpgradeSettings: These upgrade settings control the level of -// parallelism and the level of -// disruption caused by an upgrade. -// +// parallelism and the level of disruption caused by an upgrade. // maxUnavailable controls the number of nodes that can be -// simultaneously -// unavailable. -// -// maxSurge controls the number of additional nodes that can be added to -// the -// node pool temporarily for the time of the upgrade to increase the -// number of -// available nodes. -// +// simultaneously unavailable. maxSurge controls the number of +// additional nodes that can be added to the node pool temporarily for +// the time of the upgrade to increase the number of available nodes. // (maxUnavailable + maxSurge) determines the level of parallelism (how -// many -// nodes are being upgraded at the same time). -// -// Note: upgrades inevitably introduce some disruption since workloads -// need to -// be moved from old nodes to new, upgraded ones. Even if -// maxUnavailable=0, -// this holds true. (Disruption stays within the limits -// of -// PodDisruptionBudget, if it is configured.) -// -// Consider a hypothetical node pool with 5 nodes having -// maxSurge=2, -// maxUnavailable=1. This means the upgrade process upgrades 3 -// nodes -// simultaneously. It creates 2 additional (upgraded) nodes, then it -// brings -// down 3 old (not yet upgraded) nodes at the same time. This ensures -// that -// there are always at least 4 nodes available. +// many nodes are being upgraded at the same time). Note: upgrades +// inevitably introduce some disruption since workloads need to be moved +// from old nodes to new, upgraded ones. Even if maxUnavailable=0, this +// holds true. (Disruption stays within the limits of +// PodDisruptionBudget, if it is configured.) Consider a hypothetical +// node pool with 5 nodes having maxSurge=2, maxUnavailable=1. This +// means the upgrade process upgrades 3 nodes simultaneously. It creates +// 2 additional (upgraded) nodes, then it brings down 3 old (not yet +// upgraded) nodes at the same time. This ensures that there are always +// at least 4 nodes available. type UpgradeSettings struct { // MaxSurge: The maximum number of nodes that can be created beyond the - // current size - // of the node pool during the upgrade process. + // current size of the node pool during the upgrade process. MaxSurge int64 `json:"maxSurge,omitempty"` // MaxUnavailable: The maximum number of nodes that can be - // simultaneously unavailable during - // the upgrade process. A node is considered available if its status - // is - // Ready. + // simultaneously unavailable during the upgrade process. A node is + // considered available if its status is Ready. MaxUnavailable int64 `json:"maxUnavailable,omitempty"` // ForceSendFields is a list of field names (e.g. "MaxSurge") to @@ -4967,31 +4736,27 @@ func (s *UpgradeSettings) MarshalJSON() ([]byte, error) { } // UsableSubnetwork: UsableSubnetwork resource returns the subnetwork -// name, its associated network -// and the primary CIDR range. +// name, its associated network and the primary CIDR range. type UsableSubnetwork struct { // IpCidrRange: The range of internal addresses that are owned by this // subnetwork. IpCidrRange string `json:"ipCidrRange,omitempty"` - // Network: Network Name. - // Example: projects/my-project/global/networks/my-network + // Network: Network Name. Example: + // projects/my-project/global/networks/my-network Network string `json:"network,omitempty"` // SecondaryIpRanges: Secondary IP ranges. SecondaryIpRanges []*UsableSubnetworkSecondaryRange `json:"secondaryIpRanges,omitempty"` // StatusMessage: A human readable status message representing the - // reasons for cases where - // the caller cannot use the secondary ranges under the subnet. For - // example if - // the secondary_ip_ranges is empty due to a permission issue, an - // insufficient - // permission message will be given by status_message. + // reasons for cases where the caller cannot use the secondary ranges + // under the subnet. For example if the secondary_ip_ranges is empty due + // to a permission issue, an insufficient permission message will be + // given by status_message. StatusMessage string `json:"statusMessage,omitempty"` - // Subnetwork: Subnetwork Name. - // Example: + // Subnetwork: Subnetwork Name. Example: // projects/my-project/regions/us-central1/subnetworks/my-subnet Subnetwork string `json:"subnetwork,omitempty"` @@ -5026,8 +4791,7 @@ type UsableSubnetworkSecondaryRange struct { IpCidrRange string `json:"ipCidrRange,omitempty"` // RangeName: The name associated with this subnetwork secondary range, - // used when adding - // an alias IP range to a VM instance. + // used when adding an alias IP range to a VM instance. RangeName string `json:"rangeName,omitempty"` // Status: This field is to determine the status of the secondary range @@ -5039,16 +4803,15 @@ type UsableSubnetworkSecondaryRange struct { // "UNUSED" - UNUSED denotes that this range is unclaimed by any // cluster. // "IN_USE_SERVICE" - IN_USE_SERVICE denotes that this range is - // claimed by a cluster for - // services. It cannot be used for other clusters. + // claimed by a cluster for services. It cannot be used for other + // clusters. // "IN_USE_SHAREABLE_POD" - IN_USE_SHAREABLE_POD denotes this range - // was created by the network admin - // and is currently claimed by a cluster for pods. It can only be used - // by - // other clusters as a pod range. + // was created by the network admin and is currently claimed by a + // cluster for pods. It can only be used by other clusters as a pod + // range. // "IN_USE_MANAGED_POD" - IN_USE_MANAGED_POD denotes this range was - // created by GKE and is claimed - // for pods. It cannot be used for other clusters. + // created by GKE and is claimed for pods. It cannot be used for other + // clusters. Status string `json:"status,omitempty"` // ForceSendFields is a list of field names (e.g. "IpCidrRange") to @@ -5075,9 +4838,8 @@ func (s *UsableSubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { } // VerticalPodAutoscaling: VerticalPodAutoscaling contains global, -// per-cluster information -// required by Vertical Pod Autoscaler to automatically adjust -// the resources of pods controlled by it. +// per-cluster information required by Vertical Pod Autoscaler to +// automatically adjust the resources of pods controlled by it. type VerticalPodAutoscaling struct { // Enabled: Enables vertical pod autoscaling. Enabled bool `json:"enabled,omitempty"` @@ -5106,8 +4868,7 @@ func (s *VerticalPodAutoscaling) MarshalJSON() ([]byte, error) { } // WorkloadIdentityConfig: Configuration for the use of Kubernetes -// Service Accounts in GCP IAM -// policies. +// Service Accounts in GCP IAM policies. type WorkloadIdentityConfig struct { // WorkloadPool: The workload pool to attach all Kubernetes service // accounts to. @@ -5137,25 +4898,19 @@ func (s *WorkloadIdentityConfig) MarshalJSON() ([]byte, error) { } // WorkloadMetadataConfig: WorkloadMetadataConfig defines the metadata -// configuration to expose to -// workloads on the node pool. +// configuration to expose to workloads on the node pool. type WorkloadMetadataConfig struct { // Mode: Mode is the configuration for how to expose metadata to - // workloads running - // on the node pool. + // workloads running on the node pool. // // Possible values: // "MODE_UNSPECIFIED" - Not set. // "GCE_METADATA" - Expose all Compute Engine metadata to pods. // "GKE_METADATA" - Run the GKE Metadata Server on this node. The GKE - // Metadata Server exposes - // a metadata API to workloads that is compatible with the V1 - // Compute - // Metadata APIs exposed by the Compute Engine and App Engine - // Metadata - // Servers. This feature can only be enabled if Workload Identity is - // enabled - // at the cluster level. + // Metadata Server exposes a metadata API to workloads that is + // compatible with the V1 Compute Metadata APIs exposed by the Compute + // Engine and App Engine Metadata Servers. This feature can only be + // enabled if Workload Identity is enabled at the cluster level. Mode string `json:"mode,omitempty"` // ForceSendFields is a list of field names (e.g. "Mode") to @@ -5201,11 +4956,9 @@ func (r *ProjectsAggregatedUsableSubnetworksService) List(parent string) *Projec } // Filter sets the optional parameter "filter": Filtering currently only -// supports equality on the networkProjectId and must -// be in the form: "networkProjectId=[PROJECTID]", where -// `networkProjectId` -// is the project which owns the listed subnetworks. This defaults to -// the +// supports equality on the networkProjectId and must be in the form: +// "networkProjectId=[PROJECTID]", where `networkProjectId` is the +// project which owns the listed subnetworks. This defaults to the // parent project ID. func (c *ProjectsAggregatedUsableSubnetworksListCall) Filter(filter string) *ProjectsAggregatedUsableSubnetworksListCall { c.urlParams_.Set("filter", filter) @@ -5213,11 +4966,9 @@ func (c *ProjectsAggregatedUsableSubnetworksListCall) Filter(filter string) *Pro } // PageSize sets the optional parameter "pageSize": The max number of -// results per page that should be returned. If the number -// of available results is larger than `page_size`, a `next_page_token` -// is -// returned which can be used to get the next page of results in -// subsequent +// results per page that should be returned. If the number of available +// results is larger than `page_size`, a `next_page_token` is returned +// which can be used to get the next page of results in subsequent // requests. Acceptable values are 0 to 500, inclusive. (Default: 500) func (c *ProjectsAggregatedUsableSubnetworksListCall) PageSize(pageSize int64) *ProjectsAggregatedUsableSubnetworksListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) @@ -5225,8 +4976,8 @@ func (c *ProjectsAggregatedUsableSubnetworksListCall) PageSize(pageSize int64) * } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set this to the nextPageToken returned by -// previous list requests to get the next page of results. +// token to use. Set this to the nextPageToken returned by previous list +// requests to get the next page of results. func (c *ProjectsAggregatedUsableSubnetworksListCall) PageToken(pageToken string) *ProjectsAggregatedUsableSubnetworksListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -5269,7 +5020,7 @@ func (c *ProjectsAggregatedUsableSubnetworksListCall) Header() http.Header { func (c *ProjectsAggregatedUsableSubnetworksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5340,23 +5091,23 @@ func (c *ProjectsAggregatedUsableSubnetworksListCall) Do(opts ...googleapi.CallO // ], // "parameters": { // "filter": { - // "description": "Filtering currently only supports equality on the networkProjectId and must\nbe in the form: \"networkProjectId=[PROJECTID]\", where `networkProjectId`\nis the project which owns the listed subnetworks. This defaults to the\nparent project ID.", + // "description": "Filtering currently only supports equality on the networkProjectId and must be in the form: \"networkProjectId=[PROJECTID]\", where `networkProjectId` is the project which owns the listed subnetworks. This defaults to the parent project ID.", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "The max number of results per page that should be returned. If the number\nof available results is larger than `page_size`, a `next_page_token` is\nreturned which can be used to get the next page of results in subsequent\nrequests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The max number of results per page that should be returned. If the number of available results is larger than `page_size`, a `next_page_token` is returned which can be used to get the next page of results in subsequent requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set this to the nextPageToken returned by\nprevious list requests to get the next page of results.", + // "description": "Specifies a page token to use. Set this to the nextPageToken returned by previous list requests to get the next page of results.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "The parent project where subnetworks are usable.\nSpecified in the format `projects/*`.", + // "description": "The parent project where subnetworks are usable. Specified in the format `projects/*`.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -5415,24 +5166,19 @@ func (r *ProjectsLocationsService) GetServerConfig(name string) *ProjectsLocatio } // ProjectId sets the optional parameter "projectId": Deprecated. The -// Google Developers Console [project ID or -// project -// number](https://support.google.com/cloud/answer/6158840). -// This -// field has been deprecated and replaced by the name field. +// Google Developers Console [project ID or project +// number](https://support.google.com/cloud/answer/6158840). This field +// has been deprecated and replaced by the name field. func (c *ProjectsLocationsGetServerConfigCall) ProjectId(projectId string) *ProjectsLocationsGetServerConfigCall { c.urlParams_.Set("projectId", projectId) return c } // Zone sets the optional parameter "zone": Deprecated. The name of the -// Google Compute -// Engine +// Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) to -// return -// operations for. This field has been deprecated and replaced by the -// name -// field. +// return operations for. This field has been deprecated and replaced by +// the name field. func (c *ProjectsLocationsGetServerConfigCall) Zone(zone string) *ProjectsLocationsGetServerConfigCall { c.urlParams_.Set("zone", zone) return c @@ -5475,7 +5221,7 @@ func (c *ProjectsLocationsGetServerConfigCall) Header() http.Header { func (c *ProjectsLocationsGetServerConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5546,19 +5292,19 @@ func (c *ProjectsLocationsGetServerConfigCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "name": { - // "description": "The name (project and location) of the server config to get,\nspecified in the format `projects/*/locations/*`.", + // "description": "The name (project and location) of the server config to get, specified in the format `projects/*/locations/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) to return\noperations for. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) to return operations for. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // } @@ -5620,7 +5366,7 @@ func (c *ProjectsLocationsClustersCompleteIpRotationCall) Header() http.Header { func (c *ProjectsLocationsClustersCompleteIpRotationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5693,7 +5439,7 @@ func (c *ProjectsLocationsClustersCompleteIpRotationCall) Do(opts ...googleapi.C // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster id) of the cluster to complete IP\nrotation. Specified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster id) of the cluster to complete IP rotation. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -5726,24 +5472,14 @@ type ProjectsLocationsClustersCreateCall struct { } // Create: Creates a cluster, consisting of the specified number and -// type of Google -// Compute Engine instances. -// -// By default, the cluster is created in the -// project's -// [default -// network](https://cloud.google.com/compute/docs/netw -// orks-and-firewalls#networks). -// -// One firewall is added for the cluster. After cluster creation, -// the Kubelet creates routes for each node to allow the containers -// on that node to communicate with all other instances in -// the -// cluster. -// -// Finally, an entry is added to the project's global metadata -// indicating -// which CIDR range the cluster is using. +// type of Google Compute Engine instances. By default, the cluster is +// created in the project's [default +// network](https://cloud.google.com/compute/docs/networks-and-firewalls# +// networks). One firewall is added for the cluster. After cluster +// creation, the Kubelet creates routes for each node to allow the +// containers on that node to communicate with all other instances in +// the cluster. Finally, an entry is added to the project's global +// metadata indicating which CIDR range the cluster is using. func (r *ProjectsLocationsClustersService) Create(parent string, createclusterrequest *CreateClusterRequest) *ProjectsLocationsClustersCreateCall { c := &ProjectsLocationsClustersCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -5778,7 +5514,7 @@ func (c *ProjectsLocationsClustersCreateCall) Header() http.Header { func (c *ProjectsLocationsClustersCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5842,7 +5578,7 @@ func (c *ProjectsLocationsClustersCreateCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Creates a cluster, consisting of the specified number and type of Google\nCompute Engine instances.\n\nBy default, the cluster is created in the project's\n[default\nnetwork](https://cloud.google.com/compute/docs/networks-and-firewalls#networks).\n\nOne firewall is added for the cluster. After cluster creation,\nthe Kubelet creates routes for each node to allow the containers\non that node to communicate with all other instances in the\ncluster.\n\nFinally, an entry is added to the project's global metadata indicating\nwhich CIDR range the cluster is using.", + // "description": "Creates a cluster, consisting of the specified number and type of Google Compute Engine instances. By default, the cluster is created in the project's [default network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks). One firewall is added for the cluster. After cluster creation, the Kubelet creates routes for each node to allow the containers on that node to communicate with all other instances in the cluster. Finally, an entry is added to the project's global metadata indicating which CIDR range the cluster is using.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters", // "httpMethod": "POST", // "id": "container.projects.locations.clusters.create", @@ -5851,7 +5587,7 @@ func (c *ProjectsLocationsClustersCreateCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "parent": { - // "description": "The parent (project and location) where the cluster will be created.\nSpecified in the format `projects/*/locations/*`.", + // "description": "The parent (project and location) where the cluster will be created. Specified in the format `projects/*/locations/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -5883,17 +5619,11 @@ type ProjectsLocationsClustersDeleteCall struct { } // Delete: Deletes the cluster, including the Kubernetes endpoint and -// all worker -// nodes. -// -// Firewalls and routes that were configured during cluster creation -// are also deleted. -// -// Other Google Compute Engine resources that might be in use by the -// cluster, -// such as load balancer resources, are not deleted if they weren't -// present -// when the cluster was initially created. +// all worker nodes. Firewalls and routes that were configured during +// cluster creation are also deleted. Other Google Compute Engine +// resources that might be in use by the cluster, such as load balancer +// resources, are not deleted if they weren't present when the cluster +// was initially created. func (r *ProjectsLocationsClustersService) Delete(name string) *ProjectsLocationsClustersDeleteCall { c := &ProjectsLocationsClustersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5901,32 +5631,27 @@ func (r *ProjectsLocationsClustersService) Delete(name string) *ProjectsLocation } // ClusterId sets the optional parameter "clusterId": Deprecated. The -// name of the cluster to delete. -// This field has been deprecated and replaced by the name field. +// name of the cluster to delete. This field has been deprecated and +// replaced by the name field. func (c *ProjectsLocationsClustersDeleteCall) ClusterId(clusterId string) *ProjectsLocationsClustersDeleteCall { c.urlParams_.Set("clusterId", clusterId) return c } // ProjectId sets the optional parameter "projectId": Deprecated. The -// Google Developers Console [project ID or -// project -// number](https://support.google.com/cloud/answer/6158840). -// This -// field has been deprecated and replaced by the name field. +// Google Developers Console [project ID or project +// number](https://support.google.com/cloud/answer/6158840). This field +// has been deprecated and replaced by the name field. func (c *ProjectsLocationsClustersDeleteCall) ProjectId(projectId string) *ProjectsLocationsClustersDeleteCall { c.urlParams_.Set("projectId", projectId) return c } // Zone sets the optional parameter "zone": Deprecated. The name of the -// Google Compute -// Engine +// Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in -// which the -// cluster resides. This field has been deprecated and replaced by the -// name -// field. +// which the cluster resides. This field has been deprecated and +// replaced by the name field. func (c *ProjectsLocationsClustersDeleteCall) Zone(zone string) *ProjectsLocationsClustersDeleteCall { c.urlParams_.Set("zone", zone) return c @@ -5959,7 +5684,7 @@ func (c *ProjectsLocationsClustersDeleteCall) Header() http.Header { func (c *ProjectsLocationsClustersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6018,7 +5743,7 @@ func (c *ProjectsLocationsClustersDeleteCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Deletes the cluster, including the Kubernetes endpoint and all worker\nnodes.\n\nFirewalls and routes that were configured during cluster creation\nare also deleted.\n\nOther Google Compute Engine resources that might be in use by the cluster,\nsuch as load balancer resources, are not deleted if they weren't present\nwhen the cluster was initially created.", + // "description": "Deletes the cluster, including the Kubernetes endpoint and all worker nodes. Firewalls and routes that were configured during cluster creation are also deleted. Other Google Compute Engine resources that might be in use by the cluster, such as load balancer resources, are not deleted if they weren't present when the cluster was initially created.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}", // "httpMethod": "DELETE", // "id": "container.projects.locations.clusters.delete", @@ -6027,24 +5752,24 @@ func (c *ProjectsLocationsClustersDeleteCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster to delete.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the cluster to delete. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "name": { - // "description": "The name (project, location, cluster) of the cluster to delete.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster) of the cluster to delete. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // } @@ -6079,32 +5804,27 @@ func (r *ProjectsLocationsClustersService) Get(name string) *ProjectsLocationsCl } // ClusterId sets the optional parameter "clusterId": Deprecated. The -// name of the cluster to retrieve. -// This field has been deprecated and replaced by the name field. +// name of the cluster to retrieve. This field has been deprecated and +// replaced by the name field. func (c *ProjectsLocationsClustersGetCall) ClusterId(clusterId string) *ProjectsLocationsClustersGetCall { c.urlParams_.Set("clusterId", clusterId) return c } // ProjectId sets the optional parameter "projectId": Deprecated. The -// Google Developers Console [project ID or -// project -// number](https://support.google.com/cloud/answer/6158840). -// This -// field has been deprecated and replaced by the name field. +// Google Developers Console [project ID or project +// number](https://support.google.com/cloud/answer/6158840). This field +// has been deprecated and replaced by the name field. func (c *ProjectsLocationsClustersGetCall) ProjectId(projectId string) *ProjectsLocationsClustersGetCall { c.urlParams_.Set("projectId", projectId) return c } // Zone sets the optional parameter "zone": Deprecated. The name of the -// Google Compute -// Engine +// Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in -// which the -// cluster resides. This field has been deprecated and replaced by the -// name -// field. +// which the cluster resides. This field has been deprecated and +// replaced by the name field. func (c *ProjectsLocationsClustersGetCall) Zone(zone string) *ProjectsLocationsClustersGetCall { c.urlParams_.Set("zone", zone) return c @@ -6147,7 +5867,7 @@ func (c *ProjectsLocationsClustersGetCall) Header() http.Header { func (c *ProjectsLocationsClustersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6218,24 +5938,24 @@ func (c *ProjectsLocationsClustersGetCall) Do(opts ...googleapi.CallOption) (*Cl // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster to retrieve.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the cluster to retrieve. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "name": { - // "description": "The name (project, location, cluster) of the cluster to retrieve.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster) of the cluster to retrieve. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // } @@ -6262,12 +5982,9 @@ type ProjectsLocationsClustersGetJwksCall struct { header_ http.Header } -// GetJwks: Gets the public component of the cluster signing keys -// in -// JSON Web Key format. -// This API is not yet intended for general use, and is not available -// for all -// clusters. +// GetJwks: Gets the public component of the cluster signing keys in +// JSON Web Key format. This API is not yet intended for general use, +// and is not available for all clusters. func (r *ProjectsLocationsClustersService) GetJwks(parent string) *ProjectsLocationsClustersGetJwksCall { c := &ProjectsLocationsClustersGetJwksCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -6311,7 +6028,7 @@ func (c *ProjectsLocationsClustersGetJwksCall) Header() http.Header { func (c *ProjectsLocationsClustersGetJwksCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6373,7 +6090,7 @@ func (c *ProjectsLocationsClustersGetJwksCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Gets the public component of the cluster signing keys in\nJSON Web Key format.\nThis API is not yet intended for general use, and is not available for all\nclusters.", + // "description": "Gets the public component of the cluster signing keys in JSON Web Key format. This API is not yet intended for general use, and is not available for all clusters.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/jwks", // "httpMethod": "GET", // "id": "container.projects.locations.clusters.getJwks", @@ -6382,7 +6099,7 @@ func (c *ProjectsLocationsClustersGetJwksCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "parent": { - // "description": "The cluster (project, location, cluster id) to get keys for. Specified in\nthe format `projects/*/locations/*/clusters/*`.", + // "description": "The cluster (project, location, cluster id) to get keys for. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -6409,8 +6126,7 @@ type ProjectsLocationsClustersListCall struct { } // List: Lists all clusters owned by a project in either the specified -// zone or all -// zones. +// zone or all zones. func (r *ProjectsLocationsClustersService) List(parent string) *ProjectsLocationsClustersListCall { c := &ProjectsLocationsClustersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -6418,24 +6134,19 @@ func (r *ProjectsLocationsClustersService) List(parent string) *ProjectsLocation } // ProjectId sets the optional parameter "projectId": Deprecated. The -// Google Developers Console [project ID or -// project -// number](https://support.google.com/cloud/answer/6158840). -// This -// field has been deprecated and replaced by the parent field. +// Google Developers Console [project ID or project +// number](https://support.google.com/cloud/answer/6158840). This field +// has been deprecated and replaced by the parent field. func (c *ProjectsLocationsClustersListCall) ProjectId(projectId string) *ProjectsLocationsClustersListCall { c.urlParams_.Set("projectId", projectId) return c } // Zone sets the optional parameter "zone": Deprecated. The name of the -// Google Compute -// Engine +// Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in -// which the -// cluster resides, or "-" for all zones. This field has been deprecated -// and -// replaced by the parent field. +// which the cluster resides, or "-" for all zones. This field has been +// deprecated and replaced by the parent field. func (c *ProjectsLocationsClustersListCall) Zone(zone string) *ProjectsLocationsClustersListCall { c.urlParams_.Set("zone", zone) return c @@ -6478,7 +6189,7 @@ func (c *ProjectsLocationsClustersListCall) Header() http.Header { func (c *ProjectsLocationsClustersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6540,7 +6251,7 @@ func (c *ProjectsLocationsClustersListCall) Do(opts ...googleapi.CallOption) (*L } return ret, nil // { - // "description": "Lists all clusters owned by a project in either the specified zone or all\nzones.", + // "description": "Lists all clusters owned by a project in either the specified zone or all zones.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters", // "httpMethod": "GET", // "id": "container.projects.locations.clusters.list", @@ -6549,19 +6260,19 @@ func (c *ProjectsLocationsClustersListCall) Do(opts ...googleapi.CallOption) (*L // ], // "parameters": { // "parent": { - // "description": "The parent (project and location) where the clusters will be listed.\nSpecified in the format `projects/*/locations/*`.\nLocation \"-\" matches all zones and all regions.", + // "description": "The parent (project and location) where the clusters will be listed. Specified in the format `projects/*/locations/*`. Location \"-\" matches all zones and all regions.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the parent field.", // "location": "query", // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides, or \"-\" for all zones. This field has been deprecated and\nreplaced by the parent field.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides, or \"-\" for all zones. This field has been deprecated and replaced by the parent field.", // "location": "query", // "type": "string" // } @@ -6623,7 +6334,7 @@ func (c *ProjectsLocationsClustersSetAddonsCall) Header() http.Header { func (c *ProjectsLocationsClustersSetAddonsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6696,7 +6407,7 @@ func (c *ProjectsLocationsClustersSetAddonsCall) Do(opts ...googleapi.CallOption // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster) of the cluster to set addons.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster) of the cluster to set addons. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -6764,7 +6475,7 @@ func (c *ProjectsLocationsClustersSetLegacyAbacCall) Header() http.Header { func (c *ProjectsLocationsClustersSetLegacyAbacCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6837,7 +6548,7 @@ func (c *ProjectsLocationsClustersSetLegacyAbacCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster id) of the cluster to set legacy abac.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster id) of the cluster to set legacy abac. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -6869,12 +6580,10 @@ type ProjectsLocationsClustersSetLocationsCall struct { header_ http.Header } -// SetLocations: Sets the locations for a specific cluster. -// Deprecated. +// SetLocations: Sets the locations for a specific cluster. Deprecated. // Use -// [projects.locations.clusters.update](https://cloud.google.com/kube -// rnetes-engine/docs/reference/rest/v1/projects.locations.clusters/updat -// e) +// [projects.locations.clusters.update](https://cloud.google.com/kubernet +// es-engine/docs/reference/rest/v1/projects.locations.clusters/update) // instead. func (r *ProjectsLocationsClustersService) SetLocations(name string, setlocationsrequest *SetLocationsRequest) *ProjectsLocationsClustersSetLocationsCall { c := &ProjectsLocationsClustersSetLocationsCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -6910,7 +6619,7 @@ func (c *ProjectsLocationsClustersSetLocationsCall) Header() http.Header { func (c *ProjectsLocationsClustersSetLocationsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6974,7 +6683,7 @@ func (c *ProjectsLocationsClustersSetLocationsCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Sets the locations for a specific cluster.\nDeprecated. Use\n[projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/update)\ninstead.", + // "description": "Sets the locations for a specific cluster. Deprecated. Use [projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/update) instead.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setLocations", // "httpMethod": "POST", // "id": "container.projects.locations.clusters.setLocations", @@ -6983,7 +6692,7 @@ func (c *ProjectsLocationsClustersSetLocationsCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster) of the cluster to set locations.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster) of the cluster to set locations. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -7050,7 +6759,7 @@ func (c *ProjectsLocationsClustersSetLoggingCall) Header() http.Header { func (c *ProjectsLocationsClustersSetLoggingCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7123,7 +6832,7 @@ func (c *ProjectsLocationsClustersSetLoggingCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster) of the cluster to set logging.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster) of the cluster to set logging. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -7190,7 +6899,7 @@ func (c *ProjectsLocationsClustersSetMaintenancePolicyCall) Header() http.Header func (c *ProjectsLocationsClustersSetMaintenancePolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7263,7 +6972,7 @@ func (c *ProjectsLocationsClustersSetMaintenancePolicyCall) Do(opts ...googleapi // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster id) of the cluster to set maintenance\npolicy.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster id) of the cluster to set maintenance policy. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -7296,10 +7005,8 @@ type ProjectsLocationsClustersSetMasterAuthCall struct { } // SetMasterAuth: Sets master auth materials. Currently supports -// changing the admin password -// or a specific cluster, either via password generation or explicitly -// setting -// the password. +// changing the admin password or a specific cluster, either via +// password generation or explicitly setting the password. func (r *ProjectsLocationsClustersService) SetMasterAuth(name string, setmasterauthrequest *SetMasterAuthRequest) *ProjectsLocationsClustersSetMasterAuthCall { c := &ProjectsLocationsClustersSetMasterAuthCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -7334,7 +7041,7 @@ func (c *ProjectsLocationsClustersSetMasterAuthCall) Header() http.Header { func (c *ProjectsLocationsClustersSetMasterAuthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7398,7 +7105,7 @@ func (c *ProjectsLocationsClustersSetMasterAuthCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Sets master auth materials. Currently supports changing the admin password\nor a specific cluster, either via password generation or explicitly setting\nthe password.", + // "description": "Sets master auth materials. Currently supports changing the admin password or a specific cluster, either via password generation or explicitly setting the password.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setMasterAuth", // "httpMethod": "POST", // "id": "container.projects.locations.clusters.setMasterAuth", @@ -7407,7 +7114,7 @@ func (c *ProjectsLocationsClustersSetMasterAuthCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster) of the cluster to set auth.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster) of the cluster to set auth. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -7474,7 +7181,7 @@ func (c *ProjectsLocationsClustersSetMonitoringCall) Header() http.Header { func (c *ProjectsLocationsClustersSetMonitoringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7547,7 +7254,7 @@ func (c *ProjectsLocationsClustersSetMonitoringCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster) of the cluster to set monitoring.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster) of the cluster to set monitoring. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -7614,7 +7321,7 @@ func (c *ProjectsLocationsClustersSetNetworkPolicyCall) Header() http.Header { func (c *ProjectsLocationsClustersSetNetworkPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7687,7 +7394,7 @@ func (c *ProjectsLocationsClustersSetNetworkPolicyCall) Do(opts ...googleapi.Cal // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster id) of the cluster to set networking\npolicy. Specified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster id) of the cluster to set networking policy. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -7754,7 +7461,7 @@ func (c *ProjectsLocationsClustersSetResourceLabelsCall) Header() http.Header { func (c *ProjectsLocationsClustersSetResourceLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7827,7 +7534,7 @@ func (c *ProjectsLocationsClustersSetResourceLabelsCall) Do(opts ...googleapi.Ca // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster id) of the cluster to set labels.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster id) of the cluster to set labels. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -7894,7 +7601,7 @@ func (c *ProjectsLocationsClustersStartIpRotationCall) Header() http.Header { func (c *ProjectsLocationsClustersStartIpRotationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7967,7 +7674,7 @@ func (c *ProjectsLocationsClustersStartIpRotationCall) Do(opts ...googleapi.Call // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster id) of the cluster to start IP\nrotation. Specified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster id) of the cluster to start IP rotation. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -8034,7 +7741,7 @@ func (c *ProjectsLocationsClustersUpdateCall) Header() http.Header { func (c *ProjectsLocationsClustersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8107,7 +7814,7 @@ func (c *ProjectsLocationsClustersUpdateCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster) of the cluster to update.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster) of the cluster to update. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -8174,7 +7881,7 @@ func (c *ProjectsLocationsClustersUpdateMasterCall) Header() http.Header { func (c *ProjectsLocationsClustersUpdateMasterCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8247,7 +7954,7 @@ func (c *ProjectsLocationsClustersUpdateMasterCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster) of the cluster to update.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster) of the cluster to update. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -8314,7 +8021,7 @@ func (c *ProjectsLocationsClustersNodePoolsCreateCall) Header() http.Header { func (c *ProjectsLocationsClustersNodePoolsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8387,7 +8094,7 @@ func (c *ProjectsLocationsClustersNodePoolsCreateCall) Do(opts ...googleapi.Call // ], // "parameters": { // "parent": { - // "description": "The parent (project, location, cluster id) where the node pool will be\ncreated. Specified in the format\n`projects/*/locations/*/clusters/*`.", + // "description": "The parent (project, location, cluster id) where the node pool will be created. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -8426,40 +8133,35 @@ func (r *ProjectsLocationsClustersNodePoolsService) Delete(name string) *Project } // ClusterId sets the optional parameter "clusterId": Deprecated. The -// name of the cluster. -// This field has been deprecated and replaced by the name field. +// name of the cluster. This field has been deprecated and replaced by +// the name field. func (c *ProjectsLocationsClustersNodePoolsDeleteCall) ClusterId(clusterId string) *ProjectsLocationsClustersNodePoolsDeleteCall { c.urlParams_.Set("clusterId", clusterId) return c } // NodePoolId sets the optional parameter "nodePoolId": Deprecated. The -// name of the node pool to delete. -// This field has been deprecated and replaced by the name field. +// name of the node pool to delete. This field has been deprecated and +// replaced by the name field. func (c *ProjectsLocationsClustersNodePoolsDeleteCall) NodePoolId(nodePoolId string) *ProjectsLocationsClustersNodePoolsDeleteCall { c.urlParams_.Set("nodePoolId", nodePoolId) return c } // ProjectId sets the optional parameter "projectId": Deprecated. The -// Google Developers Console [project ID or -// project -// number](https://developers.google.com/console/help/new/#projec -// tnumber). -// This field has been deprecated and replaced by the name field. +// Google Developers Console [project ID or project +// number](https://developers.google.com/console/help/new/#projectnumber) +// . This field has been deprecated and replaced by the name field. func (c *ProjectsLocationsClustersNodePoolsDeleteCall) ProjectId(projectId string) *ProjectsLocationsClustersNodePoolsDeleteCall { c.urlParams_.Set("projectId", projectId) return c } // Zone sets the optional parameter "zone": Deprecated. The name of the -// Google Compute -// Engine +// Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in -// which the -// cluster resides. This field has been deprecated and replaced by the -// name -// field. +// which the cluster resides. This field has been deprecated and +// replaced by the name field. func (c *ProjectsLocationsClustersNodePoolsDeleteCall) Zone(zone string) *ProjectsLocationsClustersNodePoolsDeleteCall { c.urlParams_.Set("zone", zone) return c @@ -8492,7 +8194,7 @@ func (c *ProjectsLocationsClustersNodePoolsDeleteCall) Header() http.Header { func (c *ProjectsLocationsClustersNodePoolsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8560,29 +8262,29 @@ func (c *ProjectsLocationsClustersNodePoolsDeleteCall) Do(opts ...googleapi.Call // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "name": { - // "description": "The name (project, location, cluster, node pool id) of the node pool to\ndelete. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + // "description": "The name (project, location, cluster, node pool id) of the node pool to delete. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", // "required": true, // "type": "string" // }, // "nodePoolId": { - // "description": "Deprecated. The name of the node pool to delete.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the node pool to delete. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // } @@ -8617,40 +8319,35 @@ func (r *ProjectsLocationsClustersNodePoolsService) Get(name string) *ProjectsLo } // ClusterId sets the optional parameter "clusterId": Deprecated. The -// name of the cluster. -// This field has been deprecated and replaced by the name field. +// name of the cluster. This field has been deprecated and replaced by +// the name field. func (c *ProjectsLocationsClustersNodePoolsGetCall) ClusterId(clusterId string) *ProjectsLocationsClustersNodePoolsGetCall { c.urlParams_.Set("clusterId", clusterId) return c } // NodePoolId sets the optional parameter "nodePoolId": Deprecated. The -// name of the node pool. -// This field has been deprecated and replaced by the name field. +// name of the node pool. This field has been deprecated and replaced by +// the name field. func (c *ProjectsLocationsClustersNodePoolsGetCall) NodePoolId(nodePoolId string) *ProjectsLocationsClustersNodePoolsGetCall { c.urlParams_.Set("nodePoolId", nodePoolId) return c } // ProjectId sets the optional parameter "projectId": Deprecated. The -// Google Developers Console [project ID or -// project -// number](https://developers.google.com/console/help/new/#projec -// tnumber). -// This field has been deprecated and replaced by the name field. +// Google Developers Console [project ID or project +// number](https://developers.google.com/console/help/new/#projectnumber) +// . This field has been deprecated and replaced by the name field. func (c *ProjectsLocationsClustersNodePoolsGetCall) ProjectId(projectId string) *ProjectsLocationsClustersNodePoolsGetCall { c.urlParams_.Set("projectId", projectId) return c } // Zone sets the optional parameter "zone": Deprecated. The name of the -// Google Compute -// Engine +// Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in -// which the -// cluster resides. This field has been deprecated and replaced by the -// name -// field. +// which the cluster resides. This field has been deprecated and +// replaced by the name field. func (c *ProjectsLocationsClustersNodePoolsGetCall) Zone(zone string) *ProjectsLocationsClustersNodePoolsGetCall { c.urlParams_.Set("zone", zone) return c @@ -8693,7 +8390,7 @@ func (c *ProjectsLocationsClustersNodePoolsGetCall) Header() http.Header { func (c *ProjectsLocationsClustersNodePoolsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8764,29 +8461,29 @@ func (c *ProjectsLocationsClustersNodePoolsGetCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "name": { - // "description": "The name (project, location, cluster, node pool id) of the node pool to\nget. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + // "description": "The name (project, location, cluster, node pool id) of the node pool to get. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", // "required": true, // "type": "string" // }, // "nodePoolId": { - // "description": "Deprecated. The name of the node pool.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the node pool. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // } @@ -8821,32 +8518,27 @@ func (r *ProjectsLocationsClustersNodePoolsService) List(parent string) *Project } // ClusterId sets the optional parameter "clusterId": Deprecated. The -// name of the cluster. -// This field has been deprecated and replaced by the parent field. +// name of the cluster. This field has been deprecated and replaced by +// the parent field. func (c *ProjectsLocationsClustersNodePoolsListCall) ClusterId(clusterId string) *ProjectsLocationsClustersNodePoolsListCall { c.urlParams_.Set("clusterId", clusterId) return c } // ProjectId sets the optional parameter "projectId": Deprecated. The -// Google Developers Console [project ID or -// project -// number](https://developers.google.com/console/help/new/#projec -// tnumber). -// This field has been deprecated and replaced by the parent field. +// Google Developers Console [project ID or project +// number](https://developers.google.com/console/help/new/#projectnumber) +// . This field has been deprecated and replaced by the parent field. func (c *ProjectsLocationsClustersNodePoolsListCall) ProjectId(projectId string) *ProjectsLocationsClustersNodePoolsListCall { c.urlParams_.Set("projectId", projectId) return c } // Zone sets the optional parameter "zone": Deprecated. The name of the -// Google Compute -// Engine +// Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in -// which the -// cluster resides. This field has been deprecated and replaced by the -// parent -// field. +// which the cluster resides. This field has been deprecated and +// replaced by the parent field. func (c *ProjectsLocationsClustersNodePoolsListCall) Zone(zone string) *ProjectsLocationsClustersNodePoolsListCall { c.urlParams_.Set("zone", zone) return c @@ -8889,7 +8581,7 @@ func (c *ProjectsLocationsClustersNodePoolsListCall) Header() http.Header { func (c *ProjectsLocationsClustersNodePoolsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8960,24 +8652,24 @@ func (c *ProjectsLocationsClustersNodePoolsListCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the parent field.", + // "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the parent field.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "The parent (project, location, cluster id) where the node pools will be\nlisted. Specified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The parent (project, location, cluster id) where the node pools will be listed. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the parent field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the parent field.", // "location": "query", // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the parent\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the parent field.", // "location": "query", // "type": "string" // } @@ -9004,8 +8696,7 @@ type ProjectsLocationsClustersNodePoolsRollbackCall struct { header_ http.Header } -// Rollback: Rolls back a previously Aborted or Failed NodePool -// upgrade. +// Rollback: Rolls back a previously Aborted or Failed NodePool upgrade. // This makes no changes if the last upgrade successfully completed. func (r *ProjectsLocationsClustersNodePoolsService) Rollback(name string, rollbacknodepoolupgraderequest *RollbackNodePoolUpgradeRequest) *ProjectsLocationsClustersNodePoolsRollbackCall { c := &ProjectsLocationsClustersNodePoolsRollbackCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -9041,7 +8732,7 @@ func (c *ProjectsLocationsClustersNodePoolsRollbackCall) Header() http.Header { func (c *ProjectsLocationsClustersNodePoolsRollbackCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9105,7 +8796,7 @@ func (c *ProjectsLocationsClustersNodePoolsRollbackCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Rolls back a previously Aborted or Failed NodePool upgrade.\nThis makes no changes if the last upgrade successfully completed.", + // "description": "Rolls back a previously Aborted or Failed NodePool upgrade. This makes no changes if the last upgrade successfully completed.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}:rollback", // "httpMethod": "POST", // "id": "container.projects.locations.clusters.nodePools.rollback", @@ -9114,7 +8805,7 @@ func (c *ProjectsLocationsClustersNodePoolsRollbackCall) Do(opts ...googleapi.Ca // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster, node pool id) of the node poll to\nrollback upgrade.\nSpecified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", + // "description": "The name (project, location, cluster, node pool id) of the node poll to rollback upgrade. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", // "required": true, @@ -9182,7 +8873,7 @@ func (c *ProjectsLocationsClustersNodePoolsSetAutoscalingCall) Header() http.Hea func (c *ProjectsLocationsClustersNodePoolsSetAutoscalingCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9255,7 +8946,7 @@ func (c *ProjectsLocationsClustersNodePoolsSetAutoscalingCall) Do(opts ...google // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster, node pool) of the node pool to set\nautoscaler settings. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + // "description": "The name (project, location, cluster, node pool) of the node pool to set autoscaler settings. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", // "required": true, @@ -9322,7 +9013,7 @@ func (c *ProjectsLocationsClustersNodePoolsSetManagementCall) Header() http.Head func (c *ProjectsLocationsClustersNodePoolsSetManagementCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9395,7 +9086,7 @@ func (c *ProjectsLocationsClustersNodePoolsSetManagementCall) Do(opts ...googlea // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster, node pool id) of the node pool to set\nmanagement properties. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + // "description": "The name (project, location, cluster, node pool id) of the node pool to set management properties. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", // "required": true, @@ -9462,7 +9153,7 @@ func (c *ProjectsLocationsClustersNodePoolsSetSizeCall) Header() http.Header { func (c *ProjectsLocationsClustersNodePoolsSetSizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9535,7 +9226,7 @@ func (c *ProjectsLocationsClustersNodePoolsSetSizeCall) Do(opts ...googleapi.Cal // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster, node pool id) of the node pool to set\nsize.\nSpecified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", + // "description": "The name (project, location, cluster, node pool id) of the node pool to set size. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", // "required": true, @@ -9603,7 +9294,7 @@ func (c *ProjectsLocationsClustersNodePoolsUpdateCall) Header() http.Header { func (c *ProjectsLocationsClustersNodePoolsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9676,7 +9367,7 @@ func (c *ProjectsLocationsClustersNodePoolsUpdateCall) Do(opts ...googleapi.Call // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster, node pool) of the node pool to\nupdate. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + // "description": "The name (project, location, cluster, node pool) of the node pool to update. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", // "required": true, @@ -9709,16 +9400,10 @@ type ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall struct { } // GetOpenidConfiguration: Gets the OIDC discovery document for the -// cluster. -// See the -// [OpenID Connect Discovery -// 1.0 -// specification](https://openid.net/specs/openid-connect-discovery-1 -// _0.html) -// for details. -// This API is not yet intended for general use, and is not available -// for all -// clusters. +// cluster. See the [OpenID Connect Discovery 1.0 +// specification](https://openid.net/specs/openid-connect-discovery-1_0.h +// tml) for details. This API is not yet intended for general use, and +// is not available for all clusters. func (r *ProjectsLocationsClustersWellKnownService) GetOpenidConfiguration(parent string) *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall { c := &ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -9762,7 +9447,7 @@ func (c *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall) Header() func (c *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9824,7 +9509,7 @@ func (c *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall) Do(opts . } return ret, nil // { - // "description": "Gets the OIDC discovery document for the cluster.\nSee the\n[OpenID Connect Discovery 1.0\nspecification](https://openid.net/specs/openid-connect-discovery-1_0.html)\nfor details.\nThis API is not yet intended for general use, and is not available for all\nclusters.", + // "description": "Gets the OIDC discovery document for the cluster. See the [OpenID Connect Discovery 1.0 specification](https://openid.net/specs/openid-connect-discovery-1_0.html) for details. This API is not yet intended for general use, and is not available for all clusters.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/.well-known/openid-configuration", // "httpMethod": "GET", // "id": "container.projects.locations.clusters.well-known.getOpenid-configuration", @@ -9833,7 +9518,7 @@ func (c *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall) Do(opts . // ], // "parameters": { // "parent": { - // "description": "The cluster (project, location, cluster id) to get the discovery document\nfor. Specified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The cluster (project, location, cluster id) to get the discovery document for. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -9894,7 +9579,7 @@ func (c *ProjectsLocationsOperationsCancelCall) Header() http.Header { func (c *ProjectsLocationsOperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9967,7 +9652,7 @@ func (c *ProjectsLocationsOperationsCancelCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "name": { - // "description": "The name (project, location, operation id) of the operation to cancel.\nSpecified in the format `projects/*/locations/*/operations/*`.", + // "description": "The name (project, location, operation id) of the operation to cancel. Specified in the format `projects/*/locations/*/operations/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", // "required": true, @@ -10007,32 +9692,27 @@ func (r *ProjectsLocationsOperationsService) Get(name string) *ProjectsLocations } // OperationId sets the optional parameter "operationId": Deprecated. -// The server-assigned `name` of the operation. -// This field has been deprecated and replaced by the name field. +// The server-assigned `name` of the operation. This field has been +// deprecated and replaced by the name field. func (c *ProjectsLocationsOperationsGetCall) OperationId(operationId string) *ProjectsLocationsOperationsGetCall { c.urlParams_.Set("operationId", operationId) return c } // ProjectId sets the optional parameter "projectId": Deprecated. The -// Google Developers Console [project ID or -// project -// number](https://support.google.com/cloud/answer/6158840). -// This -// field has been deprecated and replaced by the name field. +// Google Developers Console [project ID or project +// number](https://support.google.com/cloud/answer/6158840). This field +// has been deprecated and replaced by the name field. func (c *ProjectsLocationsOperationsGetCall) ProjectId(projectId string) *ProjectsLocationsOperationsGetCall { c.urlParams_.Set("projectId", projectId) return c } // Zone sets the optional parameter "zone": Deprecated. The name of the -// Google Compute -// Engine +// Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in -// which the -// cluster resides. This field has been deprecated and replaced by the -// name -// field. +// which the cluster resides. This field has been deprecated and +// replaced by the name field. func (c *ProjectsLocationsOperationsGetCall) Zone(zone string) *ProjectsLocationsOperationsGetCall { c.urlParams_.Set("zone", zone) return c @@ -10075,7 +9755,7 @@ func (c *ProjectsLocationsOperationsGetCall) Header() http.Header { func (c *ProjectsLocationsOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10146,24 +9826,24 @@ func (c *ProjectsLocationsOperationsGetCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "name": { - // "description": "The name (project, location, operation id) of the operation to get.\nSpecified in the format `projects/*/locations/*/operations/*`.", + // "description": "The name (project, location, operation id) of the operation to get. Specified in the format `projects/*/locations/*/operations/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", // "required": true, // "type": "string" // }, // "operationId": { - // "description": "Deprecated. The server-assigned `name` of the operation.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The server-assigned `name` of the operation. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // } @@ -10199,24 +9879,19 @@ func (r *ProjectsLocationsOperationsService) List(parent string) *ProjectsLocati } // ProjectId sets the optional parameter "projectId": Deprecated. The -// Google Developers Console [project ID or -// project -// number](https://support.google.com/cloud/answer/6158840). -// This -// field has been deprecated and replaced by the parent field. +// Google Developers Console [project ID or project +// number](https://support.google.com/cloud/answer/6158840). This field +// has been deprecated and replaced by the parent field. func (c *ProjectsLocationsOperationsListCall) ProjectId(projectId string) *ProjectsLocationsOperationsListCall { c.urlParams_.Set("projectId", projectId) return c } // Zone sets the optional parameter "zone": Deprecated. The name of the -// Google Compute -// Engine +// Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) to -// return -// operations for, or `-` for all zones. This field has been deprecated -// and -// replaced by the parent field. +// return operations for, or `-` for all zones. This field has been +// deprecated and replaced by the parent field. func (c *ProjectsLocationsOperationsListCall) Zone(zone string) *ProjectsLocationsOperationsListCall { c.urlParams_.Set("zone", zone) return c @@ -10259,7 +9934,7 @@ func (c *ProjectsLocationsOperationsListCall) Header() http.Header { func (c *ProjectsLocationsOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10330,19 +10005,19 @@ func (c *ProjectsLocationsOperationsListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "parent": { - // "description": "The parent (project and location) where the operations will be listed.\nSpecified in the format `projects/*/locations/*`.\nLocation \"-\" matches all zones and all regions.", + // "description": "The parent (project and location) where the operations will be listed. Specified in the format `projects/*/locations/*`. Location \"-\" matches all zones and all regions.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the parent field.", // "location": "query", // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) to return\noperations for, or `-` for all zones. This field has been deprecated and\nreplaced by the parent field.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) to return operations for, or `-` for all zones. This field has been deprecated and replaced by the parent field.", // "location": "query", // "type": "string" // } @@ -10380,8 +10055,8 @@ func (r *ProjectsZonesService) GetServerconfig(projectId string, zone string) *P } // Name sets the optional parameter "name": The name (project and -// location) of the server config to get, -// specified in the format `projects/*/locations/*`. +// location) of the server config to get, specified in the format +// `projects/*/locations/*`. func (c *ProjectsZonesGetServerconfigCall) Name(name string) *ProjectsZonesGetServerconfigCall { c.urlParams_.Set("name", name) return c @@ -10424,7 +10099,7 @@ func (c *ProjectsZonesGetServerconfigCall) Header() http.Header { func (c *ProjectsZonesGetServerconfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10497,18 +10172,18 @@ func (c *ProjectsZonesGetServerconfigCall) Do(opts ...googleapi.CallOption) (*Se // ], // "parameters": { // "name": { - // "description": "The name (project and location) of the server config to get,\nspecified in the format `projects/*/locations/*`.", + // "description": "The name (project and location) of the server config to get, specified in the format `projects/*/locations/*`.", // "location": "query", // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) to return\noperations for. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) to return operations for. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -10575,7 +10250,7 @@ func (c *ProjectsZonesClustersAddonsCall) Header() http.Header { func (c *ProjectsZonesClustersAddonsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10652,19 +10327,19 @@ func (c *ProjectsZonesClustersAddonsCall) Do(opts ...googleapi.CallOption) (*Ope // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -10734,7 +10409,7 @@ func (c *ProjectsZonesClustersCompleteIpRotationCall) Header() http.Header { func (c *ProjectsZonesClustersCompleteIpRotationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10811,19 +10486,19 @@ func (c *ProjectsZonesClustersCompleteIpRotationCall) Do(opts ...googleapi.CallO // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -10856,24 +10531,14 @@ type ProjectsZonesClustersCreateCall struct { } // Create: Creates a cluster, consisting of the specified number and -// type of Google -// Compute Engine instances. -// -// By default, the cluster is created in the -// project's -// [default -// network](https://cloud.google.com/compute/docs/netw -// orks-and-firewalls#networks). -// -// One firewall is added for the cluster. After cluster creation, -// the Kubelet creates routes for each node to allow the containers -// on that node to communicate with all other instances in -// the -// cluster. -// -// Finally, an entry is added to the project's global metadata -// indicating -// which CIDR range the cluster is using. +// type of Google Compute Engine instances. By default, the cluster is +// created in the project's [default +// network](https://cloud.google.com/compute/docs/networks-and-firewalls# +// networks). One firewall is added for the cluster. After cluster +// creation, the Kubelet creates routes for each node to allow the +// containers on that node to communicate with all other instances in +// the cluster. Finally, an entry is added to the project's global +// metadata indicating which CIDR range the cluster is using. func (r *ProjectsZonesClustersService) Create(projectId string, zone string, createclusterrequest *CreateClusterRequest) *ProjectsZonesClustersCreateCall { c := &ProjectsZonesClustersCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -10909,7 +10574,7 @@ func (c *ProjectsZonesClustersCreateCall) Header() http.Header { func (c *ProjectsZonesClustersCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10974,7 +10639,7 @@ func (c *ProjectsZonesClustersCreateCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a cluster, consisting of the specified number and type of Google\nCompute Engine instances.\n\nBy default, the cluster is created in the project's\n[default\nnetwork](https://cloud.google.com/compute/docs/networks-and-firewalls#networks).\n\nOne firewall is added for the cluster. After cluster creation,\nthe Kubelet creates routes for each node to allow the containers\non that node to communicate with all other instances in the\ncluster.\n\nFinally, an entry is added to the project's global metadata indicating\nwhich CIDR range the cluster is using.", + // "description": "Creates a cluster, consisting of the specified number and type of Google Compute Engine instances. By default, the cluster is created in the project's [default network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks). One firewall is added for the cluster. After cluster creation, the Kubelet creates routes for each node to allow the containers on that node to communicate with all other instances in the cluster. Finally, an entry is added to the project's global metadata indicating which CIDR range the cluster is using.", // "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters", // "httpMethod": "POST", // "id": "container.projects.zones.clusters.create", @@ -10984,13 +10649,13 @@ func (c *ProjectsZonesClustersCreateCall) Do(opts ...googleapi.CallOption) (*Ope // ], // "parameters": { // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the parent\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" @@ -11023,17 +10688,11 @@ type ProjectsZonesClustersDeleteCall struct { } // Delete: Deletes the cluster, including the Kubernetes endpoint and -// all worker -// nodes. -// -// Firewalls and routes that were configured during cluster creation -// are also deleted. -// -// Other Google Compute Engine resources that might be in use by the -// cluster, -// such as load balancer resources, are not deleted if they weren't -// present -// when the cluster was initially created. +// all worker nodes. Firewalls and routes that were configured during +// cluster creation are also deleted. Other Google Compute Engine +// resources that might be in use by the cluster, such as load balancer +// resources, are not deleted if they weren't present when the cluster +// was initially created. func (r *ProjectsZonesClustersService) Delete(projectId string, zone string, clusterId string) *ProjectsZonesClustersDeleteCall { c := &ProjectsZonesClustersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -11043,8 +10702,8 @@ func (r *ProjectsZonesClustersService) Delete(projectId string, zone string, clu } // Name sets the optional parameter "name": The name (project, location, -// cluster) of the cluster to delete. -// Specified in the format `projects/*/locations/*/clusters/*`. +// cluster) of the cluster to delete. Specified in the format +// `projects/*/locations/*/clusters/*`. func (c *ProjectsZonesClustersDeleteCall) Name(name string) *ProjectsZonesClustersDeleteCall { c.urlParams_.Set("name", name) return c @@ -11077,7 +10736,7 @@ func (c *ProjectsZonesClustersDeleteCall) Header() http.Header { func (c *ProjectsZonesClustersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11138,7 +10797,7 @@ func (c *ProjectsZonesClustersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the cluster, including the Kubernetes endpoint and all worker\nnodes.\n\nFirewalls and routes that were configured during cluster creation\nare also deleted.\n\nOther Google Compute Engine resources that might be in use by the cluster,\nsuch as load balancer resources, are not deleted if they weren't present\nwhen the cluster was initially created.", + // "description": "Deletes the cluster, including the Kubernetes endpoint and all worker nodes. Firewalls and routes that were configured during cluster creation are also deleted. Other Google Compute Engine resources that might be in use by the cluster, such as load balancer resources, are not deleted if they weren't present when the cluster was initially created.", // "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", // "httpMethod": "DELETE", // "id": "container.projects.zones.clusters.delete", @@ -11149,24 +10808,24 @@ func (c *ProjectsZonesClustersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster to delete.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the cluster to delete. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "name": { - // "description": "The name (project, location, cluster) of the cluster to delete.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster) of the cluster to delete. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "query", // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -11206,8 +10865,8 @@ func (r *ProjectsZonesClustersService) Get(projectId string, zone string, cluste } // Name sets the optional parameter "name": The name (project, location, -// cluster) of the cluster to retrieve. -// Specified in the format `projects/*/locations/*/clusters/*`. +// cluster) of the cluster to retrieve. Specified in the format +// `projects/*/locations/*/clusters/*`. func (c *ProjectsZonesClustersGetCall) Name(name string) *ProjectsZonesClustersGetCall { c.urlParams_.Set("name", name) return c @@ -11250,7 +10909,7 @@ func (c *ProjectsZonesClustersGetCall) Header() http.Header { func (c *ProjectsZonesClustersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11325,24 +10984,24 @@ func (c *ProjectsZonesClustersGetCall) Do(opts ...googleapi.CallOption) (*Cluste // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster to retrieve.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the cluster to retrieve. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "name": { - // "description": "The name (project, location, cluster) of the cluster to retrieve.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster) of the cluster to retrieve. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "query", // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -11410,7 +11069,7 @@ func (c *ProjectsZonesClustersLegacyAbacCall) Header() http.Header { func (c *ProjectsZonesClustersLegacyAbacCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11487,19 +11146,19 @@ func (c *ProjectsZonesClustersLegacyAbacCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the cluster to update. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -11532,8 +11191,7 @@ type ProjectsZonesClustersListCall struct { } // List: Lists all clusters owned by a project in either the specified -// zone or all -// zones. +// zone or all zones. func (r *ProjectsZonesClustersService) List(projectId string, zone string) *ProjectsZonesClustersListCall { c := &ProjectsZonesClustersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -11542,9 +11200,9 @@ func (r *ProjectsZonesClustersService) List(projectId string, zone string) *Proj } // Parent sets the optional parameter "parent": The parent (project and -// location) where the clusters will be listed. -// Specified in the format `projects/*/locations/*`. -// Location "-" matches all zones and all regions. +// location) where the clusters will be listed. Specified in the format +// `projects/*/locations/*`. Location "-" matches all zones and all +// regions. func (c *ProjectsZonesClustersListCall) Parent(parent string) *ProjectsZonesClustersListCall { c.urlParams_.Set("parent", parent) return c @@ -11587,7 +11245,7 @@ func (c *ProjectsZonesClustersListCall) Header() http.Header { func (c *ProjectsZonesClustersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11650,7 +11308,7 @@ func (c *ProjectsZonesClustersListCall) Do(opts ...googleapi.CallOption) (*ListC } return ret, nil // { - // "description": "Lists all clusters owned by a project in either the specified zone or all\nzones.", + // "description": "Lists all clusters owned by a project in either the specified zone or all zones.", // "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters", // "httpMethod": "GET", // "id": "container.projects.zones.clusters.list", @@ -11660,18 +11318,18 @@ func (c *ProjectsZonesClustersListCall) Do(opts ...googleapi.CallOption) (*ListC // ], // "parameters": { // "parent": { - // "description": "The parent (project and location) where the clusters will be listed.\nSpecified in the format `projects/*/locations/*`.\nLocation \"-\" matches all zones and all regions.", + // "description": "The parent (project and location) where the clusters will be listed. Specified in the format `projects/*/locations/*`. Location \"-\" matches all zones and all regions.", // "location": "query", // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides, or \"-\" for all zones. This field has been deprecated and\nreplaced by the parent field.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides, or \"-\" for all zones. This field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" @@ -11701,12 +11359,9 @@ type ProjectsZonesClustersLocationsCall struct { header_ http.Header } -// Locations: Sets the locations for a specific cluster. -// Deprecated. -// Use -// [projects.locations.clusters.update](https://cloud.google.com/kube -// rnetes-engine/docs/reference/rest/v1/projects.locations.clusters/updat -// e) +// Locations: Sets the locations for a specific cluster. Deprecated. Use +// [projects.locations.clusters.update](https://cloud.google.com/kubernet +// es-engine/docs/reference/rest/v1/projects.locations.clusters/update) // instead. func (r *ProjectsZonesClustersService) Locations(projectId string, zone string, clusterId string, setlocationsrequest *SetLocationsRequest) *ProjectsZonesClustersLocationsCall { c := &ProjectsZonesClustersLocationsCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -11744,7 +11399,7 @@ func (c *ProjectsZonesClustersLocationsCall) Header() http.Header { func (c *ProjectsZonesClustersLocationsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11810,7 +11465,7 @@ func (c *ProjectsZonesClustersLocationsCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Sets the locations for a specific cluster.\nDeprecated. Use\n[projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/update)\ninstead.", + // "description": "Sets the locations for a specific cluster. Deprecated. Use [projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/update) instead.", // "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/locations", // "httpMethod": "POST", // "id": "container.projects.zones.clusters.locations", @@ -11821,19 +11476,19 @@ func (c *ProjectsZonesClustersLocationsCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -11903,7 +11558,7 @@ func (c *ProjectsZonesClustersLoggingCall) Header() http.Header { func (c *ProjectsZonesClustersLoggingCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11980,19 +11635,19 @@ func (c *ProjectsZonesClustersLoggingCall) Do(opts ...googleapi.CallOption) (*Op // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -12062,7 +11717,7 @@ func (c *ProjectsZonesClustersMasterCall) Header() http.Header { func (c *ProjectsZonesClustersMasterCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12139,19 +11794,19 @@ func (c *ProjectsZonesClustersMasterCall) Do(opts ...googleapi.CallOption) (*Ope // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -12221,7 +11876,7 @@ func (c *ProjectsZonesClustersMonitoringCall) Header() http.Header { func (c *ProjectsZonesClustersMonitoringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12298,19 +11953,19 @@ func (c *ProjectsZonesClustersMonitoringCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -12380,7 +12035,7 @@ func (c *ProjectsZonesClustersResourceLabelsCall) Header() http.Header { func (c *ProjectsZonesClustersResourceLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12457,19 +12112,19 @@ func (c *ProjectsZonesClustersResourceLabelsCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -12539,7 +12194,7 @@ func (c *ProjectsZonesClustersSetMaintenancePolicyCall) Header() http.Header { func (c *ProjectsZonesClustersSetMaintenancePolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12622,13 +12277,13 @@ func (c *ProjectsZonesClustersSetMaintenancePolicyCall) Do(opts ...googleapi.Cal // "type": "string" // }, // "projectId": { - // "description": "Required. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + // "description": "Required. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides.", + // "description": "Required. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides.", // "location": "path", // "required": true, // "type": "string" @@ -12662,10 +12317,8 @@ type ProjectsZonesClustersSetMasterAuthCall struct { } // SetMasterAuth: Sets master auth materials. Currently supports -// changing the admin password -// or a specific cluster, either via password generation or explicitly -// setting -// the password. +// changing the admin password or a specific cluster, either via +// password generation or explicitly setting the password. func (r *ProjectsZonesClustersService) SetMasterAuth(projectId string, zone string, clusterId string, setmasterauthrequest *SetMasterAuthRequest) *ProjectsZonesClustersSetMasterAuthCall { c := &ProjectsZonesClustersSetMasterAuthCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -12702,7 +12355,7 @@ func (c *ProjectsZonesClustersSetMasterAuthCall) Header() http.Header { func (c *ProjectsZonesClustersSetMasterAuthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12768,7 +12421,7 @@ func (c *ProjectsZonesClustersSetMasterAuthCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Sets master auth materials. Currently supports changing the admin password\nor a specific cluster, either via password generation or explicitly setting\nthe password.", + // "description": "Sets master auth materials. Currently supports changing the admin password or a specific cluster, either via password generation or explicitly setting the password.", // "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setMasterAuth", // "httpMethod": "POST", // "id": "container.projects.zones.clusters.setMasterAuth", @@ -12779,19 +12432,19 @@ func (c *ProjectsZonesClustersSetMasterAuthCall) Do(opts ...googleapi.CallOption // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -12861,7 +12514,7 @@ func (c *ProjectsZonesClustersSetNetworkPolicyCall) Header() http.Header { func (c *ProjectsZonesClustersSetNetworkPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12938,19 +12591,19 @@ func (c *ProjectsZonesClustersSetNetworkPolicyCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -13020,7 +12673,7 @@ func (c *ProjectsZonesClustersStartIpRotationCall) Header() http.Header { func (c *ProjectsZonesClustersStartIpRotationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13097,19 +12750,19 @@ func (c *ProjectsZonesClustersStartIpRotationCall) Do(opts ...googleapi.CallOpti // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -13179,7 +12832,7 @@ func (c *ProjectsZonesClustersUpdateCall) Header() http.Header { func (c *ProjectsZonesClustersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13256,19 +12909,19 @@ func (c *ProjectsZonesClustersUpdateCall) Do(opts ...googleapi.CallOption) (*Ope // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -13341,7 +12994,7 @@ func (c *ProjectsZonesClustersNodePoolsAutoscalingCall) Header() http.Header { func (c *ProjectsZonesClustersNodePoolsAutoscalingCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13420,25 +13073,25 @@ func (c *ProjectsZonesClustersNodePoolsAutoscalingCall) Do(opts ...googleapi.Cal // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "nodePoolId": { - // "description": "Deprecated. The name of the node pool to upgrade.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the node pool to upgrade. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -13508,7 +13161,7 @@ func (c *ProjectsZonesClustersNodePoolsCreateCall) Header() http.Header { func (c *ProjectsZonesClustersNodePoolsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13585,19 +13238,19 @@ func (c *ProjectsZonesClustersNodePoolsCreateCall) Do(opts ...googleapi.CallOpti // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the parent field.", + // "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the parent field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the parent\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" @@ -13641,10 +13294,8 @@ func (r *ProjectsZonesClustersNodePoolsService) Delete(projectId string, zone st } // Name sets the optional parameter "name": The name (project, location, -// cluster, node pool id) of the node pool to -// delete. Specified in the -// format -// `projects/*/locations/*/clusters/*/nodePools/*`. +// cluster, node pool id) of the node pool to delete. Specified in the +// format `projects/*/locations/*/clusters/*/nodePools/*`. func (c *ProjectsZonesClustersNodePoolsDeleteCall) Name(name string) *ProjectsZonesClustersNodePoolsDeleteCall { c.urlParams_.Set("name", name) return c @@ -13677,7 +13328,7 @@ func (c *ProjectsZonesClustersNodePoolsDeleteCall) Header() http.Header { func (c *ProjectsZonesClustersNodePoolsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13751,30 +13402,30 @@ func (c *ProjectsZonesClustersNodePoolsDeleteCall) Do(opts ...googleapi.CallOpti // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "name": { - // "description": "The name (project, location, cluster, node pool id) of the node pool to\ndelete. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + // "description": "The name (project, location, cluster, node pool id) of the node pool to delete. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", // "location": "query", // "type": "string" // }, // "nodePoolId": { - // "description": "Deprecated. The name of the node pool to delete.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the node pool to delete. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -13816,10 +13467,8 @@ func (r *ProjectsZonesClustersNodePoolsService) Get(projectId string, zone strin } // Name sets the optional parameter "name": The name (project, location, -// cluster, node pool id) of the node pool to -// get. Specified in the -// format -// `projects/*/locations/*/clusters/*/nodePools/*`. +// cluster, node pool id) of the node pool to get. Specified in the +// format `projects/*/locations/*/clusters/*/nodePools/*`. func (c *ProjectsZonesClustersNodePoolsGetCall) Name(name string) *ProjectsZonesClustersNodePoolsGetCall { c.urlParams_.Set("name", name) return c @@ -13862,7 +13511,7 @@ func (c *ProjectsZonesClustersNodePoolsGetCall) Header() http.Header { func (c *ProjectsZonesClustersNodePoolsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13939,30 +13588,30 @@ func (c *ProjectsZonesClustersNodePoolsGetCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "name": { - // "description": "The name (project, location, cluster, node pool id) of the node pool to\nget. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + // "description": "The name (project, location, cluster, node pool id) of the node pool to get. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", // "location": "query", // "type": "string" // }, // "nodePoolId": { - // "description": "Deprecated. The name of the node pool.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the node pool. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -14002,8 +13651,8 @@ func (r *ProjectsZonesClustersNodePoolsService) List(projectId string, zone stri } // Parent sets the optional parameter "parent": The parent (project, -// location, cluster id) where the node pools will be -// listed. Specified in the format `projects/*/locations/*/clusters/*`. +// location, cluster id) where the node pools will be listed. Specified +// in the format `projects/*/locations/*/clusters/*`. func (c *ProjectsZonesClustersNodePoolsListCall) Parent(parent string) *ProjectsZonesClustersNodePoolsListCall { c.urlParams_.Set("parent", parent) return c @@ -14046,7 +13695,7 @@ func (c *ProjectsZonesClustersNodePoolsListCall) Header() http.Header { func (c *ProjectsZonesClustersNodePoolsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14121,24 +13770,24 @@ func (c *ProjectsZonesClustersNodePoolsListCall) Do(opts ...googleapi.CallOption // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the parent field.", + // "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" // }, // "parent": { - // "description": "The parent (project, location, cluster id) where the node pools will be\nlisted. Specified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The parent (project, location, cluster id) where the node pools will be listed. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "query", // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the parent field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the parent\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" @@ -14169,8 +13818,7 @@ type ProjectsZonesClustersNodePoolsRollbackCall struct { header_ http.Header } -// Rollback: Rolls back a previously Aborted or Failed NodePool -// upgrade. +// Rollback: Rolls back a previously Aborted or Failed NodePool upgrade. // This makes no changes if the last upgrade successfully completed. func (r *ProjectsZonesClustersNodePoolsService) Rollback(projectId string, zone string, clusterId string, nodePoolId string, rollbacknodepoolupgraderequest *RollbackNodePoolUpgradeRequest) *ProjectsZonesClustersNodePoolsRollbackCall { c := &ProjectsZonesClustersNodePoolsRollbackCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -14209,7 +13857,7 @@ func (c *ProjectsZonesClustersNodePoolsRollbackCall) Header() http.Header { func (c *ProjectsZonesClustersNodePoolsRollbackCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14276,7 +13924,7 @@ func (c *ProjectsZonesClustersNodePoolsRollbackCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Rolls back a previously Aborted or Failed NodePool upgrade.\nThis makes no changes if the last upgrade successfully completed.", + // "description": "Rolls back a previously Aborted or Failed NodePool upgrade. This makes no changes if the last upgrade successfully completed.", // "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}:rollback", // "httpMethod": "POST", // "id": "container.projects.zones.clusters.nodePools.rollback", @@ -14288,25 +13936,25 @@ func (c *ProjectsZonesClustersNodePoolsRollbackCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster to rollback.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the cluster to rollback. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "nodePoolId": { - // "description": "Deprecated. The name of the node pool to rollback.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the node pool to rollback. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -14378,7 +14026,7 @@ func (c *ProjectsZonesClustersNodePoolsSetManagementCall) Header() http.Header { func (c *ProjectsZonesClustersNodePoolsSetManagementCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14457,25 +14105,25 @@ func (c *ProjectsZonesClustersNodePoolsSetManagementCall) Do(opts ...googleapi.C // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the cluster to update. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "nodePoolId": { - // "description": "Deprecated. The name of the node pool to update.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the node pool to update. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -14547,7 +14195,7 @@ func (c *ProjectsZonesClustersNodePoolsSetSizeCall) Header() http.Header { func (c *ProjectsZonesClustersNodePoolsSetSizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14626,25 +14274,25 @@ func (c *ProjectsZonesClustersNodePoolsSetSizeCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the cluster to update. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "nodePoolId": { - // "description": "Deprecated. The name of the node pool to update.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the node pool to update. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -14717,7 +14365,7 @@ func (c *ProjectsZonesClustersNodePoolsUpdateCall) Header() http.Header { func (c *ProjectsZonesClustersNodePoolsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14796,25 +14444,25 @@ func (c *ProjectsZonesClustersNodePoolsUpdateCall) Do(opts ...googleapi.CallOpti // ], // "parameters": { // "clusterId": { - // "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "nodePoolId": { - // "description": "Deprecated. The name of the node pool to upgrade.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The name of the node pool to upgrade. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -14884,7 +14532,7 @@ func (c *ProjectsZonesOperationsCancelCall) Header() http.Header { func (c *ProjectsZonesOperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14961,19 +14609,19 @@ func (c *ProjectsZonesOperationsCancelCall) Do(opts ...googleapi.CallOption) (*E // ], // "parameters": { // "operationId": { - // "description": "Deprecated. The server-assigned `name` of the operation.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The server-assigned `name` of the operation. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\noperation resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the operation resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -15016,8 +14664,8 @@ func (r *ProjectsZonesOperationsService) Get(projectId string, zone string, oper } // Name sets the optional parameter "name": The name (project, location, -// operation id) of the operation to get. -// Specified in the format `projects/*/locations/*/operations/*`. +// operation id) of the operation to get. Specified in the format +// `projects/*/locations/*/operations/*`. func (c *ProjectsZonesOperationsGetCall) Name(name string) *ProjectsZonesOperationsGetCall { c.urlParams_.Set("name", name) return c @@ -15060,7 +14708,7 @@ func (c *ProjectsZonesOperationsGetCall) Header() http.Header { func (c *ProjectsZonesOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -15135,24 +14783,24 @@ func (c *ProjectsZonesOperationsGetCall) Do(opts ...googleapi.CallOption) (*Oper // ], // "parameters": { // "name": { - // "description": "The name (project, location, operation id) of the operation to get.\nSpecified in the format `projects/*/locations/*/operations/*`.", + // "description": "The name (project, location, operation id) of the operation to get. Specified in the format `projects/*/locations/*/operations/*`.", // "location": "query", // "type": "string" // }, // "operationId": { - // "description": "Deprecated. The server-assigned `name` of the operation.\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The server-assigned `name` of the operation. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -15191,9 +14839,9 @@ func (r *ProjectsZonesOperationsService) List(projectId string, zone string) *Pr } // Parent sets the optional parameter "parent": The parent (project and -// location) where the operations will be listed. -// Specified in the format `projects/*/locations/*`. -// Location "-" matches all zones and all regions. +// location) where the operations will be listed. Specified in the +// format `projects/*/locations/*`. Location "-" matches all zones and +// all regions. func (c *ProjectsZonesOperationsListCall) Parent(parent string) *ProjectsZonesOperationsListCall { c.urlParams_.Set("parent", parent) return c @@ -15236,7 +14884,7 @@ func (c *ProjectsZonesOperationsListCall) Header() http.Header { func (c *ProjectsZonesOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -15309,18 +14957,18 @@ func (c *ProjectsZonesOperationsListCall) Do(opts ...googleapi.CallOption) (*Lis // ], // "parameters": { // "parent": { - // "description": "The parent (project and location) where the operations will be listed.\nSpecified in the format `projects/*/locations/*`.\nLocation \"-\" matches all zones and all regions.", + // "description": "The parent (project and location) where the operations will be listed. Specified in the format `projects/*/locations/*`. Location \"-\" matches all zones and all regions.", // "location": "query", // "type": "string" // }, // "projectId": { - // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + // "description": "Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) to return\noperations for, or `-` for all zones. This field has been deprecated and\nreplaced by the parent field.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) to return operations for, or `-` for all zones. This field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" diff --git a/vendor/google.golang.org/api/container/v1beta1/container-api.json b/vendor/google.golang.org/api/container/v1beta1/container-api.json index 047a9b2c657..3439a86d8c4 100644 --- a/vendor/google.golang.org/api/container/v1beta1/container-api.json +++ b/vendor/google.golang.org/api/container/v1beta1/container-api.json @@ -121,23 +121,23 @@ ], "parameters": { "filter": { - "description": "Filtering currently only supports equality on the networkProjectId and must\nbe in the form: \"networkProjectId=[PROJECTID]\", where `networkProjectId`\nis the project which owns the listed subnetworks. This defaults to the\nparent project ID.", + "description": "Filtering currently only supports equality on the networkProjectId and must be in the form: \"networkProjectId=[PROJECTID]\", where `networkProjectId` is the project which owns the listed subnetworks. This defaults to the parent project ID.", "location": "query", "type": "string" }, "pageSize": { - "description": "The max number of results per page that should be returned. If the number\nof available results is larger than `page_size`, a `next_page_token` is\nreturned which can be used to get the next page of results in subsequent\nrequests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The max number of results per page that should be returned. If the number of available results is larger than `page_size`, a `next_page_token` is returned which can be used to get the next page of results in subsequent requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Specifies a page token to use. Set this to the nextPageToken returned by\nprevious list requests to get the next page of results.", + "description": "Specifies a page token to use. Set this to the nextPageToken returned by previous list requests to get the next page of results.", "location": "query", "type": "string" }, "parent": { - "description": "Required. The parent project where subnetworks are usable.\nSpecified in the format `projects/*`.", + "description": "Required. The parent project where subnetworks are usable. Specified in the format `projects/*`.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -168,19 +168,19 @@ ], "parameters": { "name": { - "description": "The name (project and location) of the server config to get,\nspecified in the format `projects/*/locations/*`.", + "description": "The name (project and location) of the server config to get, specified in the format `projects/*/locations/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) to return\noperations for. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) to return operations for. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" } @@ -203,7 +203,7 @@ ], "parameters": { "parent": { - "description": "Required. Contains the name of the resource requested.\nSpecified in the format `projects/*`.", + "description": "Required. Contains the name of the resource requested. Specified in the format `projects/*`.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -232,7 +232,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster id) of the cluster to complete IP\nrotation. Specified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster id) of the cluster to complete IP rotation. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -251,7 +251,7 @@ ] }, "create": { - "description": "Creates a cluster, consisting of the specified number and type of Google\nCompute Engine instances.\n\nBy default, the cluster is created in the project's\n[default\nnetwork](https://cloud.google.com/compute/docs/networks-and-firewalls#networks).\n\nOne firewall is added for the cluster. After cluster creation,\nthe Kubelet creates routes for each node to allow the containers\non that node to communicate with all other instances in the\ncluster.\n\nFinally, an entry is added to the project's global metadata indicating\nwhich CIDR range the cluster is using.", + "description": "Creates a cluster, consisting of the specified number and type of Google Compute Engine instances. By default, the cluster is created in the project's [default network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks). One firewall is added for the cluster. After cluster creation, the Kubelet creates routes for each node to allow the containers on that node to communicate with all other instances in the cluster. Finally, an entry is added to the project's global metadata indicating which CIDR range the cluster is using.", "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/clusters", "httpMethod": "POST", "id": "container.projects.locations.clusters.create", @@ -260,7 +260,7 @@ ], "parameters": { "parent": { - "description": "The parent (project and location) where the cluster will be created.\nSpecified in the format `projects/*/locations/*`.", + "description": "The parent (project and location) where the cluster will be created. Specified in the format `projects/*/locations/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -279,7 +279,7 @@ ] }, "delete": { - "description": "Deletes the cluster, including the Kubernetes endpoint and all worker\nnodes.\n\nFirewalls and routes that were configured during cluster creation\nare also deleted.\n\nOther Google Compute Engine resources that might be in use by the cluster,\nsuch as load balancer resources, are not deleted if they weren't present\nwhen the cluster was initially created.", + "description": "Deletes the cluster, including the Kubernetes endpoint and all worker nodes. Firewalls and routes that were configured during cluster creation are also deleted. Other Google Compute Engine resources that might be in use by the cluster, such as load balancer resources, are not deleted if they weren't present when the cluster was initially created.", "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}", "httpMethod": "DELETE", "id": "container.projects.locations.clusters.delete", @@ -288,24 +288,24 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to delete.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to delete. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "name": { - "description": "The name (project, location, cluster) of the cluster to delete.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to delete. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" } @@ -328,24 +328,24 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to retrieve.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to retrieve. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "name": { - "description": "The name (project, location, cluster) of the cluster to retrieve.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to retrieve. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" } @@ -359,7 +359,7 @@ ] }, "getJwks": { - "description": "Gets the public component of the cluster signing keys in\nJSON Web Key format.\nThis API is not yet intended for general use, and is not available for all\nclusters.", + "description": "Gets the public component of the cluster signing keys in JSON Web Key format. This API is not yet intended for general use, and is not available for all clusters.", "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/jwks", "httpMethod": "GET", "id": "container.projects.locations.clusters.getJwks", @@ -368,7 +368,7 @@ ], "parameters": { "parent": { - "description": "The cluster (project, location, cluster id) to get keys for. Specified in\nthe format `projects/*/locations/*/clusters/*`.", + "description": "The cluster (project, location, cluster id) to get keys for. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -381,7 +381,7 @@ } }, "list": { - "description": "Lists all clusters owned by a project in either the specified zone or all\nzones.", + "description": "Lists all clusters owned by a project in either the specified zone or all zones.", "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/clusters", "httpMethod": "GET", "id": "container.projects.locations.clusters.list", @@ -390,19 +390,19 @@ ], "parameters": { "parent": { - "description": "The parent (project and location) where the clusters will be listed.\nSpecified in the format `projects/*/locations/*`.\nLocation \"-\" matches all zones and all regions.", + "description": "The parent (project and location) where the clusters will be listed. Specified in the format `projects/*/locations/*`. Location \"-\" matches all zones and all regions.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the parent field.", "location": "query", "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides, or \"-\" for all zones. This field has been deprecated and\nreplaced by the parent field.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides, or \"-\" for all zones. This field has been deprecated and replaced by the parent field.", "location": "query", "type": "string" } @@ -425,7 +425,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster) of the cluster to set addons.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to set addons. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -453,7 +453,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster id) of the cluster to set legacy abac.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster id) of the cluster to set legacy abac. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -472,7 +472,7 @@ ] }, "setLocations": { - "description": "Sets the locations for a specific cluster.\nDeprecated. Use\n[projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters/update)\ninstead.", + "description": "Sets the locations for a specific cluster. Deprecated. Use [projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters/update) instead.", "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setLocations", "httpMethod": "POST", "id": "container.projects.locations.clusters.setLocations", @@ -481,7 +481,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster) of the cluster to set locations.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to set locations. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -509,7 +509,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster) of the cluster to set logging.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to set logging. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -537,7 +537,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster id) of the cluster to set maintenance\npolicy.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster id) of the cluster to set maintenance policy. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -556,7 +556,7 @@ ] }, "setMasterAuth": { - "description": "Sets master auth materials. Currently supports changing the admin password\nor a specific cluster, either via password generation or explicitly setting\nthe password.", + "description": "Sets master auth materials. Currently supports changing the admin password or a specific cluster, either via password generation or explicitly setting the password.", "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setMasterAuth", "httpMethod": "POST", "id": "container.projects.locations.clusters.setMasterAuth", @@ -565,7 +565,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster) of the cluster to set auth.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to set auth. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -593,7 +593,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster) of the cluster to set monitoring.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to set monitoring. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -621,7 +621,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster id) of the cluster to set networking\npolicy. Specified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster id) of the cluster to set networking policy. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -649,7 +649,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster id) of the cluster to set labels.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster id) of the cluster to set labels. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -677,7 +677,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster id) of the cluster to start IP\nrotation. Specified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster id) of the cluster to start IP rotation. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -705,7 +705,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster) of the cluster to update.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to update. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -733,7 +733,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster) of the cluster to update.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to update. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -765,7 +765,7 @@ ], "parameters": { "parent": { - "description": "The parent (project, location, cluster id) where the node pool will be\ncreated. Specified in the format\n`projects/*/locations/*/clusters/*`.", + "description": "The parent (project, location, cluster id) where the node pool will be created. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -793,29 +793,29 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "name": { - "description": "The name (project, location, cluster, node pool id) of the node pool to\ndelete. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool id) of the node pool to delete. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", "required": true, "type": "string" }, "nodePoolId": { - "description": "Required. Deprecated. The name of the node pool to delete.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the node pool to delete. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" } @@ -838,29 +838,29 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "name": { - "description": "The name (project, location, cluster, node pool id) of the node pool to\nget. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool id) of the node pool to get. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", "required": true, "type": "string" }, "nodePoolId": { - "description": "Required. Deprecated. The name of the node pool.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the node pool. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" } @@ -883,24 +883,24 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the parent field.", + "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the parent field.", "location": "query", "type": "string" }, "parent": { - "description": "The parent (project, location, cluster id) where the node pools will be\nlisted. Specified in the format `projects/*/locations/*/clusters/*`.", + "description": "The parent (project, location, cluster id) where the node pools will be listed. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the parent field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the parent field.", "location": "query", "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the parent\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the parent field.", "location": "query", "type": "string" } @@ -914,7 +914,7 @@ ] }, "rollback": { - "description": "Rolls back a previously Aborted or Failed NodePool upgrade.\nThis makes no changes if the last upgrade successfully completed.", + "description": "Rolls back a previously Aborted or Failed NodePool upgrade. This makes no changes if the last upgrade successfully completed.", "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}:rollback", "httpMethod": "POST", "id": "container.projects.locations.clusters.nodePools.rollback", @@ -923,7 +923,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster, node pool id) of the node poll to\nrollback upgrade.\nSpecified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool id) of the node poll to rollback upgrade. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", "required": true, @@ -951,7 +951,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster, node pool) of the node pool to set\nautoscaler settings. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool) of the node pool to set autoscaler settings. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", "required": true, @@ -979,7 +979,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster, node pool id) of the node pool to set\nmanagement properties. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool id) of the node pool to set management properties. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", "required": true, @@ -1007,7 +1007,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster, node pool id) of the node pool to set\nsize.\nSpecified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool id) of the node pool to set size. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", "required": true, @@ -1035,7 +1035,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, cluster, node pool) of the node pool to\nupdate. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool) of the node pool to update. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", "required": true, @@ -1058,7 +1058,7 @@ "well-known": { "methods": { "getOpenid-configuration": { - "description": "Gets the OIDC discovery document for the cluster.\nSee the\n[OpenID Connect Discovery 1.0\nspecification](https://openid.net/specs/openid-connect-discovery-1_0.html)\nfor details.\nThis API is not yet intended for general use, and is not available for all\nclusters.", + "description": "Gets the OIDC discovery document for the cluster. See the [OpenID Connect Discovery 1.0 specification](https://openid.net/specs/openid-connect-discovery-1_0.html) for details. This API is not yet intended for general use, and is not available for all clusters.", "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/.well-known/openid-configuration", "httpMethod": "GET", "id": "container.projects.locations.clusters.well-known.getOpenid-configuration", @@ -1067,7 +1067,7 @@ ], "parameters": { "parent": { - "description": "The cluster (project, location, cluster id) to get the discovery document\nfor. Specified in the format `projects/*/locations/*/clusters/*`.", + "description": "The cluster (project, location, cluster id) to get the discovery document for. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", "required": true, @@ -1095,7 +1095,7 @@ ], "parameters": { "name": { - "description": "The name (project, location, operation id) of the operation to cancel.\nSpecified in the format `projects/*/locations/*/operations/*`.", + "description": "The name (project, location, operation id) of the operation to cancel. Specified in the format `projects/*/locations/*/operations/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", "required": true, @@ -1123,24 +1123,24 @@ ], "parameters": { "name": { - "description": "The name (project, location, operation id) of the operation to get.\nSpecified in the format `projects/*/locations/*/operations/*`.", + "description": "The name (project, location, operation id) of the operation to get. Specified in the format `projects/*/locations/*/operations/*`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", "required": true, "type": "string" }, "operationId": { - "description": "Required. Deprecated. The server-assigned `name` of the operation.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The server-assigned `name` of the operation. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" } @@ -1163,19 +1163,19 @@ ], "parameters": { "parent": { - "description": "The parent (project and location) where the operations will be listed.\nSpecified in the format `projects/*/locations/*`.\nLocation \"-\" matches all zones and all regions.", + "description": "The parent (project and location) where the operations will be listed. Specified in the format `projects/*/locations/*`. Location \"-\" matches all zones and all regions.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the parent field.", "location": "query", "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) to return\noperations for, or `-` for all zones. This field has been deprecated and\nreplaced by the parent field.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) to return operations for, or `-` for all zones. This field has been deprecated and replaced by the parent field.", "location": "query", "type": "string" } @@ -1205,18 +1205,18 @@ ], "parameters": { "name": { - "description": "The name (project and location) of the server config to get,\nspecified in the format `projects/*/locations/*`.", + "description": "The name (project and location) of the server config to get, specified in the format `projects/*/locations/*`.", "location": "query", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) to return\noperations for. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) to return operations for. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1246,19 +1246,19 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1287,19 +1287,19 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1317,7 +1317,7 @@ ] }, "create": { - "description": "Creates a cluster, consisting of the specified number and type of Google\nCompute Engine instances.\n\nBy default, the cluster is created in the project's\n[default\nnetwork](https://cloud.google.com/compute/docs/networks-and-firewalls#networks).\n\nOne firewall is added for the cluster. After cluster creation,\nthe Kubelet creates routes for each node to allow the containers\non that node to communicate with all other instances in the\ncluster.\n\nFinally, an entry is added to the project's global metadata indicating\nwhich CIDR range the cluster is using.", + "description": "Creates a cluster, consisting of the specified number and type of Google Compute Engine instances. By default, the cluster is created in the project's [default network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks). One firewall is added for the cluster. After cluster creation, the Kubelet creates routes for each node to allow the containers on that node to communicate with all other instances in the cluster. Finally, an entry is added to the project's global metadata indicating which CIDR range the cluster is using.", "flatPath": "v1beta1/projects/{projectId}/zones/{zone}/clusters", "httpMethod": "POST", "id": "container.projects.zones.clusters.create", @@ -1327,13 +1327,13 @@ ], "parameters": { "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the parent\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" @@ -1351,7 +1351,7 @@ ] }, "delete": { - "description": "Deletes the cluster, including the Kubernetes endpoint and all worker\nnodes.\n\nFirewalls and routes that were configured during cluster creation\nare also deleted.\n\nOther Google Compute Engine resources that might be in use by the cluster,\nsuch as load balancer resources, are not deleted if they weren't present\nwhen the cluster was initially created.", + "description": "Deletes the cluster, including the Kubernetes endpoint and all worker nodes. Firewalls and routes that were configured during cluster creation are also deleted. Other Google Compute Engine resources that might be in use by the cluster, such as load balancer resources, are not deleted if they weren't present when the cluster was initially created.", "flatPath": "v1beta1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", "httpMethod": "DELETE", "id": "container.projects.zones.clusters.delete", @@ -1362,24 +1362,24 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to delete.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to delete. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "name": { - "description": "The name (project, location, cluster) of the cluster to delete.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to delete. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "query", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1405,24 +1405,24 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to retrieve.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to retrieve. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "name": { - "description": "The name (project, location, cluster) of the cluster to retrieve.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to retrieve. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "query", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1448,19 +1448,19 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to update. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1478,7 +1478,7 @@ ] }, "list": { - "description": "Lists all clusters owned by a project in either the specified zone or all\nzones.", + "description": "Lists all clusters owned by a project in either the specified zone or all zones.", "flatPath": "v1beta1/projects/{projectId}/zones/{zone}/clusters", "httpMethod": "GET", "id": "container.projects.zones.clusters.list", @@ -1488,18 +1488,18 @@ ], "parameters": { "parent": { - "description": "The parent (project and location) where the clusters will be listed.\nSpecified in the format `projects/*/locations/*`.\nLocation \"-\" matches all zones and all regions.", + "description": "The parent (project and location) where the clusters will be listed. Specified in the format `projects/*/locations/*`. Location \"-\" matches all zones and all regions.", "location": "query", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides, or \"-\" for all zones. This field has been deprecated and\nreplaced by the parent field.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides, or \"-\" for all zones. This field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" @@ -1514,7 +1514,7 @@ ] }, "locations": { - "description": "Sets the locations for a specific cluster.\nDeprecated. Use\n[projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters/update)\ninstead.", + "description": "Sets the locations for a specific cluster. Deprecated. Use [projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters/update) instead.", "flatPath": "v1beta1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/locations", "httpMethod": "POST", "id": "container.projects.zones.clusters.locations", @@ -1525,19 +1525,19 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1566,19 +1566,19 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1607,19 +1607,19 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1648,19 +1648,19 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1689,19 +1689,19 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1736,13 +1736,13 @@ "type": "string" }, "projectId": { - "description": "Required. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "Required. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides.", + "description": "Required. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides.", "location": "path", "required": true, "type": "string" @@ -1760,7 +1760,7 @@ ] }, "setMasterAuth": { - "description": "Sets master auth materials. Currently supports changing the admin password\nor a specific cluster, either via password generation or explicitly setting\nthe password.", + "description": "Sets master auth materials. Currently supports changing the admin password or a specific cluster, either via password generation or explicitly setting the password.", "flatPath": "v1beta1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setMasterAuth", "httpMethod": "POST", "id": "container.projects.zones.clusters.setMasterAuth", @@ -1771,19 +1771,19 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1812,19 +1812,19 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1853,19 +1853,19 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1894,19 +1894,19 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1940,25 +1940,25 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "nodePoolId": { - "description": "Required. Deprecated. The name of the node pool to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the node pool to upgrade. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1987,19 +1987,19 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the parent field.", + "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the parent field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the parent\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" @@ -2029,30 +2029,30 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "name": { - "description": "The name (project, location, cluster, node pool id) of the node pool to\ndelete. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool id) of the node pool to delete. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "location": "query", "type": "string" }, "nodePoolId": { - "description": "Required. Deprecated. The name of the node pool to delete.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the node pool to delete. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -2079,30 +2079,30 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "name": { - "description": "The name (project, location, cluster, node pool id) of the node pool to\nget. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool id) of the node pool to get. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "location": "query", "type": "string" }, "nodePoolId": { - "description": "Required. Deprecated. The name of the node pool.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the node pool. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -2128,24 +2128,24 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the parent field.", + "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" }, "parent": { - "description": "The parent (project, location, cluster id) where the node pools will be\nlisted. Specified in the format `projects/*/locations/*/clusters/*`.", + "description": "The parent (project, location, cluster id) where the node pools will be listed. Specified in the format `projects/*/locations/*/clusters/*`.", "location": "query", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the parent field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the parent\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" @@ -2160,7 +2160,7 @@ ] }, "rollback": { - "description": "Rolls back a previously Aborted or Failed NodePool upgrade.\nThis makes no changes if the last upgrade successfully completed.", + "description": "Rolls back a previously Aborted or Failed NodePool upgrade. This makes no changes if the last upgrade successfully completed.", "flatPath": "v1beta1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}:rollback", "httpMethod": "POST", "id": "container.projects.zones.clusters.nodePools.rollback", @@ -2172,25 +2172,25 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to rollback.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to rollback. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "nodePoolId": { - "description": "Required. Deprecated. The name of the node pool to rollback.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the node pool to rollback. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -2220,25 +2220,25 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to update. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "nodePoolId": { - "description": "Required. Deprecated. The name of the node pool to update.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the node pool to update. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -2268,25 +2268,25 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to update. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "nodePoolId": { - "description": "Required. Deprecated. The name of the node pool to update.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the node pool to update. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -2316,25 +2316,25 @@ ], "parameters": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "nodePoolId": { - "description": "Required. Deprecated. The name of the node pool to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the node pool to upgrade. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -2369,19 +2369,19 @@ ], "parameters": { "operationId": { - "description": "Required. Deprecated. The server-assigned `name` of the operation.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The server-assigned `name` of the operation. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\noperation resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the operation resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -2410,24 +2410,24 @@ ], "parameters": { "name": { - "description": "The name (project, location, operation id) of the operation to get.\nSpecified in the format `projects/*/locations/*/operations/*`.", + "description": "The name (project, location, operation id) of the operation to get. Specified in the format `projects/*/locations/*/operations/*`.", "location": "query", "type": "string" }, "operationId": { - "description": "Required. Deprecated. The server-assigned `name` of the operation.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The server-assigned `name` of the operation. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -2452,18 +2452,18 @@ ], "parameters": { "parent": { - "description": "The parent (project and location) where the operations will be listed.\nSpecified in the format `projects/*/locations/*`.\nLocation \"-\" matches all zones and all regions.", + "description": "The parent (project and location) where the operations will be listed. Specified in the format `projects/*/locations/*`. Location \"-\" matches all zones and all regions.", "location": "query", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) to return\noperations for, or `-` for all zones. This field has been deprecated and\nreplaced by the parent field.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) to return operations for, or `-` for all zones. This field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" @@ -2484,7 +2484,7 @@ } } }, - "revision": "20200501", + "revision": "20200928", "rootUrl": "https://container.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -2497,23 +2497,23 @@ "type": "string" }, "acceleratorType": { - "description": "The accelerator type resource name. List of supported accelerators\n[here](https://cloud.google.com/compute/docs/gpus)", + "description": "The accelerator type resource name. List of supported accelerators [here](https://cloud.google.com/compute/docs/gpus)", "type": "string" } }, "type": "object" }, "AddonsConfig": { - "description": "Configuration for the addons that can be automatically spun up in the\ncluster, enabling additional functionality.", + "description": "Configuration for the addons that can be automatically spun up in the cluster, enabling additional functionality.", "id": "AddonsConfig", "properties": { "cloudRunConfig": { "$ref": "CloudRunConfig", - "description": "Configuration for the Cloud Run addon. The `IstioConfig` addon must be\nenabled in order to enable Cloud Run addon. This option can only be enabled\nat cluster creation time." + "description": "Configuration for the Cloud Run addon. The `IstioConfig` addon must be enabled in order to enable Cloud Run addon. This option can only be enabled at cluster creation time." }, "configConnectorConfig": { "$ref": "ConfigConnectorConfig", - "description": "Configuration for the ConfigConnector add-on, a Kubernetes\nextension to manage hosted GCP services through the Kubernetes API" + "description": "Configuration for the ConfigConnector add-on, a Kubernetes extension to manage hosted GCP services through the Kubernetes API" }, "dnsCacheConfig": { "$ref": "DnsCacheConfig", @@ -2525,27 +2525,27 @@ }, "horizontalPodAutoscaling": { "$ref": "HorizontalPodAutoscaling", - "description": "Configuration for the horizontal pod autoscaling feature, which\nincreases or decreases the number of replica pods a replication controller\nhas based on the resource usage of the existing pods." + "description": "Configuration for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods." }, "httpLoadBalancing": { "$ref": "HttpLoadBalancing", - "description": "Configuration for the HTTP (L7) load balancing controller addon, which\nmakes it easy to set up HTTP load balancers for services in a cluster." + "description": "Configuration for the HTTP (L7) load balancing controller addon, which makes it easy to set up HTTP load balancers for services in a cluster." }, "istioConfig": { "$ref": "IstioConfig", - "description": "Configuration for Istio, an open platform to connect, manage, and secure\nmicroservices." + "description": "Configuration for Istio, an open platform to connect, manage, and secure microservices." }, "kalmConfig": { "$ref": "KalmConfig", - "description": "Configuration for the KALM addon, which manages the lifecycle of k8s\napplications." + "description": "Configuration for the KALM addon, which manages the lifecycle of k8s applications." }, "kubernetesDashboard": { "$ref": "KubernetesDashboard", - "description": "Configuration for the Kubernetes Dashboard.\nThis addon is deprecated, and will be disabled in 1.15. It is recommended\nto use the Cloud Console to manage and monitor your Kubernetes clusters,\nworkloads and applications. For more information, see:\nhttps://cloud.google.com/kubernetes-engine/docs/concepts/dashboards" + "description": "Configuration for the Kubernetes Dashboard. This addon is deprecated, and will be disabled in 1.15. It is recommended to use the Cloud Console to manage and monitor your Kubernetes clusters, workloads and applications. For more information, see: https://cloud.google.com/kubernetes-engine/docs/concepts/dashboards" }, "networkPolicyConfig": { "$ref": "NetworkPolicyConfig", - "description": "Configuration for NetworkPolicy. This only tracks whether the addon\nis enabled or not on the Master, it does not track whether network policy\nis enabled for the nodes." + "description": "Configuration for NetworkPolicy. This only tracks whether the addon is enabled or not on the Master, it does not track whether network policy is enabled for the nodes." } }, "type": "object" @@ -2555,63 +2555,80 @@ "id": "AuthenticatorGroupsConfig", "properties": { "enabled": { - "description": "Whether this cluster should return group membership lookups\nduring authentication using a group of security groups.", + "description": "Whether this cluster should return group membership lookups during authentication using a group of security groups.", "type": "boolean" }, "securityGroup": { - "description": "The name of the security group-of-groups to be used. Only relevant\nif enabled = true.", + "description": "The name of the security group-of-groups to be used. Only relevant if enabled = true.", "type": "string" } }, "type": "object" }, "AutoUpgradeOptions": { - "description": "AutoUpgradeOptions defines the set of options for the user to control how\nthe Auto Upgrades will proceed.", + "description": "AutoUpgradeOptions defines the set of options for the user to control how the Auto Upgrades will proceed.", "id": "AutoUpgradeOptions", "properties": { "autoUpgradeStartTime": { - "description": "[Output only] This field is set when upgrades are about to commence\nwith the approximate start time for the upgrades, in\n[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "description": "[Output only] This field is set when upgrades are about to commence with the approximate start time for the upgrades, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", "type": "string" }, "description": { - "description": "[Output only] This field is set when upgrades are about to commence\nwith the description of the upgrade.", + "description": "[Output only] This field is set when upgrades are about to commence with the description of the upgrade.", "type": "string" } }, "type": "object" }, "AutoprovisioningNodePoolDefaults": { - "description": "AutoprovisioningNodePoolDefaults contains defaults for a node pool created\nby NAP.", + "description": "AutoprovisioningNodePoolDefaults contains defaults for a node pool created by NAP.", "id": "AutoprovisioningNodePoolDefaults", "properties": { + "bootDiskKmsKey": { + "description": " The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption", + "type": "string" + }, + "diskSizeGb": { + "description": "Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB.", + "format": "int32", + "type": "integer" + }, + "diskType": { + "description": "Type of the disk attached to each node (e.g. 'pd-standard' or 'pd-ssd') If unspecified, the default disk type is 'pd-standard'", + "type": "string" + }, "management": { "$ref": "NodeManagement", - "description": "Specifies the node management options for NAP created node-pools." + "description": "NodeManagement configuration for this NodePool." }, "minCpuPlatform": { - "description": "Minimum CPU platform to be used for NAP created node pools.\nThe instance may be scheduled on the specified or newer CPU platform.\nApplicable values are the friendly names of CPU platforms, such as\n\u003ccode\u003eminCpuPlatform: \u0026quot;Intel Haswell\u0026quot;\u003c/code\u003e or\n\u003ccode\u003eminCpuPlatform: \u0026quot;Intel Sandy Bridge\u0026quot;\u003c/code\u003e. For more\ninformation, read [how to specify min CPU\nplatform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)\nTo unset the min cpu platform field pass \"automatic\" as field value.", + "description": "Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform. Applicable values are the friendly names of CPU platforms, such as `minCpuPlatform: \"Intel Haswell\"` or `minCpuPlatform: \"Intel Sandy Bridge\"`. For more information, read [how to specify min CPU platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) To unset the min cpu platform field pass \"automatic\" as field value.", "type": "string" }, "oauthScopes": { - "description": "Scopes that are used by NAP when creating node pools. If oauth_scopes are\nspecified, service_account should be empty.", + "description": "The set of Google API scopes to be made available on all of the node VMs under the \"default\" service account. The following scopes are recommended, but not required, and by default are not included: * `https://www.googleapis.com/auth/compute` is required for mounting persistent storage on your nodes. * `https://www.googleapis.com/auth/devstorage.read_only` is required for communicating with **gcr.io** (the [Google Container Registry](https://cloud.google.com/container-registry/)). If unspecified, no scopes are added, unless Cloud Logging or Cloud Monitoring are enabled, in which case their required scopes will be added.", "items": { "type": "string" }, "type": "array" }, "serviceAccount": { - "description": "The Google Cloud Platform Service Account to be used by the node VMs. If\nservice_account is specified, scopes should be empty.", + "description": "The Google Cloud Platform Service Account to be used by the node VMs. Specify the email address of the Service Account; otherwise, if no Service Account is specified, the \"default\" service account is used.", "type": "string" }, + "shieldedInstanceConfig": { + "$ref": "ShieldedInstanceConfig", + "description": "Shielded Instance options." + }, "upgradeSettings": { "$ref": "UpgradeSettings", - "description": "Specifies the upgrade settings for NAP created node pools" + "description": "Upgrade settings control disruption and speed of the upgrade." } }, "type": "object" }, "AvailableVersion": { - "description": "AvailableVersion is an additional Kubernetes versions offered\nto users who subscribed to the release channel.", + "description": "Deprecated.", "id": "AvailableVersion", "properties": { "reason": { @@ -2641,7 +2658,7 @@ "id": "BinaryAuthorization", "properties": { "enabled": { - "description": "Enable Binary Authorization for this cluster. If enabled, all container\nimages will be validated by Google Binauthz.", + "description": "Enable Binary Authorization for this cluster. If enabled, all container images will be validated by Google Binauthz.", "type": "boolean" } }, @@ -2652,19 +2669,19 @@ "id": "CancelOperationRequest", "properties": { "name": { - "description": "The name (project, location, operation id) of the operation to cancel.\nSpecified in the format `projects/*/locations/*/operations/*`.", + "description": "The name (project, location, operation id) of the operation to cancel. Specified in the format `projects/*/locations/*/operations/*`.", "type": "string" }, "operationId": { - "description": "Required. Deprecated. The server-assigned `name` of the operation.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The server-assigned `name` of the operation. This field has been deprecated and replaced by the name field.", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\noperation resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the operation resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, @@ -2703,6 +2720,20 @@ "disabled": { "description": "Whether Cloud Run addon is enabled for this cluster.", "type": "boolean" + }, + "loadBalancerType": { + "description": "Which load balancer type is installed for Cloud Run.", + "enum": [ + "LOAD_BALANCER_TYPE_UNSPECIFIED", + "LOAD_BALANCER_TYPE_EXTERNAL", + "LOAD_BALANCER_TYPE_INTERNAL" + ], + "enumDescriptions": [ + "Load balancer type for Cloud Run is unspecified.", + "Install external load balancer for Cloud Run.", + "Install internal load balancer for Cloud Run." + ], + "type": "string" } }, "type": "object" @@ -2728,7 +2759,7 @@ "description": "Configuration for Binary Authorization." }, "clusterIpv4Cidr": { - "description": "The IP address range of the container pods in this cluster, in\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`). Leave blank to have\none automatically chosen or specify a `/14` block in `10.0.0.0/8`.", + "description": "The IP address range of the container pods in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`). Leave blank to have one automatically chosen or specify a `/14` block in `10.0.0.0/8`.", "type": "string" }, "clusterTelemetry": { @@ -2742,8 +2773,12 @@ }, "type": "array" }, + "confidentialNodes": { + "$ref": "ConfidentialNodes", + "description": "Configuration of Confidential Nodes" + }, "createTime": { - "description": "[Output only] The time the cluster was created, in\n[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "description": "[Output only] The time the cluster was created, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", "type": "string" }, "currentMasterVersion": { @@ -2751,12 +2786,12 @@ "type": "string" }, "currentNodeCount": { - "description": "[Output only] The number of nodes currently in the cluster. Deprecated.\nCall Kubernetes API directly to retrieve node information.", + "description": "[Output only] The number of nodes currently in the cluster. Deprecated. Call Kubernetes API directly to retrieve node information.", "format": "int32", "type": "integer" }, "currentNodeVersion": { - "description": "[Output only] Deprecated, use\n[NodePool.version](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters.nodePools)\ninstead. The current version of the node software components.\nIf they are currently at multiple versions because they're in the process\nof being upgraded, this reflects the minimum version of all nodes.", + "description": "[Output only] Deprecated, use [NodePool.version](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters.nodePools) instead. The current version of the node software components. If they are currently at multiple versions because they're in the process of being upgraded, this reflects the minimum version of all nodes.", "type": "string" }, "databaseEncryption": { @@ -2765,34 +2800,34 @@ }, "defaultMaxPodsConstraint": { "$ref": "MaxPodsConstraint", - "description": "The default constraint on the maximum number of pods that can be run\nsimultaneously on a node in the node pool of this cluster. Only honored\nif cluster created with IP Alias support." + "description": "The default constraint on the maximum number of pods that can be run simultaneously on a node in the node pool of this cluster. Only honored if cluster created with IP Alias support." }, "description": { "description": "An optional description of this cluster.", "type": "string" }, "enableKubernetesAlpha": { - "description": "Kubernetes alpha features are enabled on this cluster. This includes alpha\nAPI groups (e.g. v1beta1) and features that may not be production ready in\nthe kubernetes version of the master and nodes.\nThe cluster has no SLA for uptime and master/node upgrades are disabled.\nAlpha enabled clusters are automatically deleted thirty days after\ncreation.", + "description": "Kubernetes alpha features are enabled on this cluster. This includes alpha API groups (e.g. v1beta1) and features that may not be production ready in the kubernetes version of the master and nodes. The cluster has no SLA for uptime and master/node upgrades are disabled. Alpha enabled clusters are automatically deleted thirty days after creation.", "type": "boolean" }, "enableTpu": { - "description": "Enable the ability to use Cloud TPUs in this cluster.\nThis field is deprecated, use tpu_config.enabled instead.", + "description": "Enable the ability to use Cloud TPUs in this cluster. This field is deprecated, use tpu_config.enabled instead.", "type": "boolean" }, "endpoint": { - "description": "[Output only] The IP address of this cluster's master endpoint.\nThe endpoint can be accessed from the internet at\n`https://username:password@endpoint/`.\n\nSee the `masterAuth` property of this resource for username and\npassword information.", + "description": "[Output only] The IP address of this cluster's master endpoint. The endpoint can be accessed from the internet at `https://username:password@endpoint/`. See the `masterAuth` property of this resource for username and password information.", "type": "string" }, "expireTime": { - "description": "[Output only] The time the cluster will be automatically\ndeleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "description": "[Output only] The time the cluster will be automatically deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", "type": "string" }, "initialClusterVersion": { - "description": "The initial Kubernetes version for this cluster. Valid versions are those\nfound in validMasterVersions returned by getServerConfig. The version can\nbe upgraded over time; such upgrades are reflected in\ncurrentMasterVersion and currentNodeVersion.\n\nUsers may specify either explicit versions offered by\nKubernetes Engine or version aliases, which have the following behavior:\n\n- \"latest\": picks the highest valid Kubernetes version\n- \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version\n- \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version\n- \"1.X.Y-gke.N\": picks an explicit Kubernetes version\n- \"\",\"-\": picks the default Kubernetes version", + "description": "The initial Kubernetes version for this cluster. Valid versions are those found in validMasterVersions returned by getServerConfig. The version can be upgraded over time; such upgrades are reflected in currentMasterVersion and currentNodeVersion. Users may specify either explicit versions offered by Kubernetes Engine or version aliases, which have the following behavior: - \"latest\": picks the highest valid Kubernetes version - \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version - \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version - \"1.X.Y-gke.N\": picks an explicit Kubernetes version - \"\",\"-\": picks the default Kubernetes version", "type": "string" }, "initialNodeCount": { - "description": "The number of nodes to create in this cluster. You must ensure that your\nCompute Engine \u003ca href=\"/compute/docs/resource-quotas\"\u003eresource quota\u003c/a\u003e\nis sufficient for this number of instances. You must also have available\nfirewall and routes quota.\nFor requests, this field should only be used in lieu of a\n\"node_pool\" object, since this configuration (along with the\n\"node_config\") will be used to create a \"NodePool\" object with an\nauto-generated name. Do not use this and a node_pool at the same time.\n\nThis field is deprecated, use node_pool.initial_node_count instead.", + "description": "The number of nodes to create in this cluster. You must ensure that your Compute Engine [resource quota](https://cloud.google.com/compute/quotas) is sufficient for this number of instances. You must also have available firewall and routes quota. For requests, this field should only be used in lieu of a \"node_pool\" object, since this configuration (along with the \"node_config\") will be used to create a \"NodePool\" object with an auto-generated name. Do not use this and a node_pool at the same time. This field is deprecated, use node_pool.initial_node_count instead.", "format": "int32", "type": "integer" }, @@ -2816,46 +2851,50 @@ "description": "Configuration for the legacy ABAC authorization mode." }, "location": { - "description": "[Output only] The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available)\nor\n[region](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available)\nin which the cluster resides.", + "description": "[Output only] The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) or [region](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) in which the cluster resides.", "type": "string" }, "locations": { - "description": "The list of Google Compute Engine\n[zones](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster's nodes should be located.", + "description": "The list of Google Compute Engine [zones](https://cloud.google.com/compute/docs/zones#available) in which the cluster's nodes should be located.", "items": { "type": "string" }, "type": "array" }, "loggingService": { - "description": "The logging service the cluster should use to write logs.\nCurrently available options:\n\n* `logging.googleapis.com/kubernetes` - The Cloud Logging\nservice with a Kubernetes-native resource model\n* `logging.googleapis.com` - The legacy Cloud Logging service (no longer\n available as of GKE 1.15).\n* `none` - no logs will be exported from the cluster.\n\nIf left as an empty string,`logging.googleapis.com/kubernetes` will be\nused for GKE 1.14+ or `logging.googleapis.com` for earlier versions.", + "description": "The logging service the cluster should use to write logs. Currently available options: * `logging.googleapis.com/kubernetes` - The Cloud Logging service with a Kubernetes-native resource model * `logging.googleapis.com` - The legacy Cloud Logging service (no longer available as of GKE 1.15). * `none` - no logs will be exported from the cluster. If left as an empty string,`logging.googleapis.com/kubernetes` will be used for GKE 1.14+ or `logging.googleapis.com` for earlier versions.", "type": "string" }, "maintenancePolicy": { "$ref": "MaintenancePolicy", "description": "Configure the maintenance policy for this cluster." }, + "master": { + "$ref": "Master", + "description": "Configuration for master components." + }, "masterAuth": { "$ref": "MasterAuth", - "description": "The authentication information for accessing the master endpoint.\nIf unspecified, the defaults are used:\nFor clusters before v1.12, if master_auth is unspecified, `username` will\nbe set to \"admin\", a random password will be generated, and a client\ncertificate will be issued." + "description": "The authentication information for accessing the master endpoint. If unspecified, the defaults are used: For clusters before v1.12, if master_auth is unspecified, `username` will be set to \"admin\", a random password will be generated, and a client certificate will be issued." }, "masterAuthorizedNetworksConfig": { "$ref": "MasterAuthorizedNetworksConfig", "description": "The configuration options for master authorized networks feature." }, "masterIpv4CidrBlock": { - "description": "The IP prefix in CIDR notation to use for the hosted master network.\nThis prefix will be used for assigning private IP addresses to the\nmaster or set of masters, as well as the ILB VIP.\nThis field is deprecated, use\nprivate_cluster_config.master_ipv4_cidr_block instead.", + "description": "The IP prefix in CIDR notation to use for the hosted master network. This prefix will be used for assigning private IP addresses to the master or set of masters, as well as the ILB VIP. This field is deprecated, use private_cluster_config.master_ipv4_cidr_block instead.", "type": "string" }, "monitoringService": { - "description": "The monitoring service the cluster should use to write metrics.\nCurrently available options:\n\n* \"monitoring.googleapis.com/kubernetes\" - The Cloud Monitoring\nservice with a Kubernetes-native resource model\n* `monitoring.googleapis.com` - The legacy Cloud Monitoring service (no\n longer available as of GKE 1.15).\n* `none` - No metrics will be exported from the cluster.\n\nIf left as an empty string,`monitoring.googleapis.com/kubernetes` will be\nused for GKE 1.14+ or `monitoring.googleapis.com` for earlier versions.", + "description": "The monitoring service the cluster should use to write metrics. Currently available options: * \"monitoring.googleapis.com/kubernetes\" - The Cloud Monitoring service with a Kubernetes-native resource model * `monitoring.googleapis.com` - The legacy Cloud Monitoring service (no longer available as of GKE 1.15). * `none` - No metrics will be exported from the cluster. If left as an empty string,`monitoring.googleapis.com/kubernetes` will be used for GKE 1.14+ or `monitoring.googleapis.com` for earlier versions.", "type": "string" }, "name": { - "description": "The name of this cluster. The name must be unique within this project\nand location (e.g. zone or region), and can be up to 40 characters with\nthe following restrictions:\n\n* Lowercase letters, numbers, and hyphens only.\n* Must start with a letter.\n* Must end with a number or a letter.", + "description": "The name of this cluster. The name must be unique within this project and location (e.g. zone or region), and can be up to 40 characters with the following restrictions: * Lowercase letters, numbers, and hyphens only. * Must start with a letter. * Must end with a number or a letter.", "type": "string" }, "network": { - "description": "The name of the Google Compute Engine\n[network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)\nto which the cluster is connected. If left unspecified, the `default`\nnetwork will be used. On output this shows the network ID instead of the\nname.", + "description": "The name of the Google Compute Engine [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks) to which the cluster is connected. If left unspecified, the `default` network will be used. On output this shows the network ID instead of the name.", "type": "string" }, "networkConfig": { @@ -2868,26 +2907,30 @@ }, "nodeConfig": { "$ref": "NodeConfig", - "description": "Parameters used in creating the cluster's nodes.\nFor requests, this field should only be used in lieu of a\n\"node_pool\" object, since this configuration (along with the\n\"initial_node_count\") will be used to create a \"NodePool\" object with an\nauto-generated name. Do not use this and a node_pool at the same time.\nFor responses, this field will be populated with the node configuration of\nthe first node pool. (For configuration of each node pool, see\n`node_pool.config`)\n\nIf unspecified, the defaults are used.\nThis field is deprecated, use node_pool.config instead." + "description": "Parameters used in creating the cluster's nodes. For requests, this field should only be used in lieu of a \"node_pool\" object, since this configuration (along with the \"initial_node_count\") will be used to create a \"NodePool\" object with an auto-generated name. Do not use this and a node_pool at the same time. For responses, this field will be populated with the node configuration of the first node pool. (For configuration of each node pool, see `node_pool.config`) If unspecified, the defaults are used. This field is deprecated, use node_pool.config instead." }, "nodeIpv4CidrSize": { - "description": "[Output only] The size of the address space on each node for hosting\ncontainers. This is provisioned from within the `container_ipv4_cidr`\nrange. This field will only be set when cluster is in route-based network\nmode.", + "description": "[Output only] The size of the address space on each node for hosting containers. This is provisioned from within the `container_ipv4_cidr` range. This field will only be set when cluster is in route-based network mode.", "format": "int32", "type": "integer" }, "nodePools": { - "description": "The node pools associated with this cluster.\nThis field should not be set if \"node_config\" or \"initial_node_count\" are\nspecified.", + "description": "The node pools associated with this cluster. This field should not be set if \"node_config\" or \"initial_node_count\" are specified.", "items": { "$ref": "NodePool" }, "type": "array" }, + "notificationConfig": { + "$ref": "NotificationConfig", + "description": "Notification configuration of the cluster." + }, "podSecurityPolicyConfig": { "$ref": "PodSecurityPolicyConfig", "description": "Configuration for the PodSecurityPolicy feature." }, "privateCluster": { - "description": "If this is a private cluster setup. Private clusters are clusters that, by\ndefault have no external IP addresses on the nodes and where nodes and the\nmaster communicate over private IP addresses.\nThis field is deprecated, use private_cluster_config.enable_private_nodes\ninstead.", + "description": "If this is a private cluster setup. Private clusters are clusters that, by default have no external IP addresses on the nodes and where nodes and the master communicate over private IP addresses. This field is deprecated, use private_cluster_config.enable_private_nodes instead.", "type": "boolean" }, "privateClusterConfig": { @@ -2902,19 +2945,19 @@ "additionalProperties": { "type": "string" }, - "description": "The resource labels for the cluster to use to annotate any related\nGoogle Compute Engine resources.", + "description": "The resource labels for the cluster to use to annotate any related Google Compute Engine resources.", "type": "object" }, "resourceUsageExportConfig": { "$ref": "ResourceUsageExportConfig", - "description": "Configuration for exporting resource usages. Resource usage export is\ndisabled when this config unspecified." + "description": "Configuration for exporting resource usages. Resource usage export is disabled when this config unspecified." }, "selfLink": { "description": "[Output only] Server-defined URL for the resource.", "type": "string" }, "servicesIpv4Cidr": { - "description": "[Output only] The IP address range of the Kubernetes services in\nthis cluster, in\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `1.2.3.4/29`). Service addresses are\ntypically put in the last `/16` from the container CIDR.", + "description": "[Output only] The IP address range of the Kubernetes services in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`). Service addresses are typically put in the last `/16` from the container CIDR.", "type": "string" }, "shieldedNodes": { @@ -2935,20 +2978,20 @@ "enumDescriptions": [ "Not set.", "The PROVISIONING state indicates the cluster is being created.", - "The RUNNING state indicates the cluster has been created and is fully\nusable.", - "The RECONCILING state indicates that some work is actively being done on\nthe cluster, such as upgrading the master or node software. Details can\nbe found in the `statusMessage` field.", + "The RUNNING state indicates the cluster has been created and is fully usable.", + "The RECONCILING state indicates that some work is actively being done on the cluster, such as upgrading the master or node software. Details can be found in the `statusMessage` field.", "The STOPPING state indicates the cluster is being deleted.", - "The ERROR state indicates the cluster may be unusable. Details\ncan be found in the `statusMessage` field.", - "The DEGRADED state indicates the cluster requires user action to restore\nfull functionality. Details can be found in the `statusMessage` field." + "The ERROR state indicates the cluster may be unusable. Details can be found in the `statusMessage` field.", + "The DEGRADED state indicates the cluster requires user action to restore full functionality. Details can be found in the `statusMessage` field." ], "type": "string" }, "statusMessage": { - "description": "[Output only] Additional information about the current status of this\ncluster, if available.", + "description": "[Output only] Additional information about the current status of this cluster, if available.", "type": "string" }, "subnetwork": { - "description": "The name of the Google Compute Engine\n[subnetwork](https://cloud.google.com/compute/docs/subnetworks) to which\nthe cluster is connected. On output this shows the subnetwork ID instead of\nthe name.", + "description": "The name of the Google Compute Engine [subnetwork](https://cloud.google.com/compute/docs/subnetworks) to which the cluster is connected. On output this shows the subnetwork ID instead of the name.", "type": "string" }, "tpuConfig": { @@ -2956,7 +2999,7 @@ "description": "Configuration for Cloud TPU support;" }, "tpuIpv4CidrBlock": { - "description": "[Output only] The IP address range of the Cloud TPUs in this cluster, in\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `1.2.3.4/29`).", + "description": "[Output only] The IP address range of the Cloud TPUs in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`).", "type": "string" }, "verticalPodAutoscaling": { @@ -2965,21 +3008,21 @@ }, "workloadIdentityConfig": { "$ref": "WorkloadIdentityConfig", - "description": "Configuration for the use of Kubernetes Service Accounts in GCP IAM\npolicies." + "description": "Configuration for the use of Kubernetes Service Accounts in GCP IAM policies." }, "zone": { - "description": "[Output only] The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field is deprecated, use location instead.", + "description": "[Output only] The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field is deprecated, use location instead.", "type": "string" } }, "type": "object" }, "ClusterAutoscaling": { - "description": "ClusterAutoscaling contains global, per-cluster information\nrequired by Cluster Autoscaler to automatically adjust\nthe size of the cluster and create/delete\nnode pools based on the current needs.", + "description": "ClusterAutoscaling contains global, per-cluster information required by Cluster Autoscaler to automatically adjust the size of the cluster and create/delete node pools based on the current needs.", "id": "ClusterAutoscaling", "properties": { "autoprovisioningLocations": { - "description": "The list of Google Compute Engine\n[zones](https://cloud.google.com/compute/docs/zones#available) in which the\nNodePool's nodes can be created by NAP.", + "description": "The list of Google Compute Engine [zones](https://cloud.google.com/compute/docs/zones#available) in which the NodePool's nodes can be created by NAP.", "items": { "type": "string" }, @@ -2987,7 +3030,7 @@ }, "autoprovisioningNodePoolDefaults": { "$ref": "AutoprovisioningNodePoolDefaults", - "description": "AutoprovisioningNodePoolDefaults contains defaults for a node pool\ncreated by NAP." + "description": "AutoprovisioningNodePoolDefaults contains defaults for a node pool created by NAP." }, "autoscalingProfile": { "description": "Defines autoscaling behaviour.", @@ -3008,7 +3051,7 @@ "type": "boolean" }, "resourceLimits": { - "description": "Contains global constraints regarding minimum and maximum\namount of resources in the cluster.", + "description": "Contains global constraints regarding minimum and maximum amount of resources in the cluster.", "items": { "$ref": "ResourceLimit" }, @@ -3041,7 +3084,7 @@ "type": "object" }, "ClusterUpdate": { - "description": "ClusterUpdate describes an update to the cluster. Exactly one update can\nbe applied to a cluster with each request, so at most one field can be\nprovided.", + "description": "ClusterUpdate describes an update to the cluster. Exactly one update can be applied to a cluster with each request, so at most one field can be provided.", "id": "ClusterUpdate", "properties": { "desiredAddonsConfig": { @@ -3064,8 +3107,26 @@ "$ref": "DatabaseEncryption", "description": "Configuration of etcd encryption." }, + "desiredDatapathProvider": { + "description": "The desired datapath provider for the cluster.", + "enum": [ + "DATAPATH_PROVIDER_UNSPECIFIED", + "LEGACY_DATAPATH", + "ADVANCED_DATAPATH" + ], + "enumDescriptions": [ + "Default value.", + "Use the IPTables implementation based on kube-proxy.", + "Use the eBPF based GKE Dataplane V2 with additional features. See the [GKE Dataplane V2 documentation](https://cloud.google.com/kubernetes-enginw/docs/how-to/dataplane-v2) for more." + ], + "type": "string" + }, + "desiredDefaultSnatStatus": { + "$ref": "DefaultSnatStatus", + "description": "The desired status of whether to disable default sNAT for this cluster." + }, "desiredImageType": { - "description": "The desired image type for the node pool.\nNOTE: Set the \"desired_node_pool\" field as well.", + "description": "The desired image type for the node pool. NOTE: Set the \"desired_node_pool\" field as well.", "type": "string" }, "desiredIntraNodeVisibilityConfig": { @@ -3073,40 +3134,48 @@ "description": "The desired config of Intra-node visibility." }, "desiredLocations": { - "description": "The desired list of Google Compute Engine\n[zones](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster's nodes should be located. Changing the locations a cluster is in\nwill result in nodes being either created or removed from the cluster,\ndepending on whether locations are being added or removed.\n\nThis list must always include the cluster's primary zone.", + "description": "The desired list of Google Compute Engine [zones](https://cloud.google.com/compute/docs/zones#available) in which the cluster's nodes should be located. Changing the locations a cluster is in will result in nodes being either created or removed from the cluster, depending on whether locations are being added or removed. This list must always include the cluster's primary zone.", "items": { "type": "string" }, "type": "array" }, "desiredLoggingService": { - "description": "The logging service the cluster should use to write logs.\nCurrently available options:\n\n* `logging.googleapis.com/kubernetes` - The Cloud Logging\nservice with a Kubernetes-native resource model\n* `logging.googleapis.com` - The legacy Cloud Logging service (no longer\n available as of GKE 1.15).\n* `none` - no logs will be exported from the cluster.\n\nIf left as an empty string,`logging.googleapis.com/kubernetes` will be\nused for GKE 1.14+ or `logging.googleapis.com` for earlier versions.", + "description": "The logging service the cluster should use to write logs. Currently available options: * `logging.googleapis.com/kubernetes` - The Cloud Logging service with a Kubernetes-native resource model * `logging.googleapis.com` - The legacy Cloud Logging service (no longer available as of GKE 1.15). * `none` - no logs will be exported from the cluster. If left as an empty string,`logging.googleapis.com/kubernetes` will be used for GKE 1.14+ or `logging.googleapis.com` for earlier versions.", "type": "string" }, + "desiredMaster": { + "$ref": "Master", + "description": "Configuration for master components." + }, "desiredMasterAuthorizedNetworksConfig": { "$ref": "MasterAuthorizedNetworksConfig", "description": "The desired configuration options for master authorized networks feature." }, "desiredMasterVersion": { - "description": "The Kubernetes version to change the master to. The only valid value is the\nlatest supported version.\n\nUsers may specify either explicit versions offered by\nKubernetes Engine or version aliases, which have the following behavior:\n\n- \"latest\": picks the highest valid Kubernetes version\n- \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version\n- \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version\n- \"1.X.Y-gke.N\": picks an explicit Kubernetes version\n- \"-\": picks the default Kubernetes version", + "description": "The Kubernetes version to change the master to. The only valid value is the latest supported version. Users may specify either explicit versions offered by Kubernetes Engine or version aliases, which have the following behavior: - \"latest\": picks the highest valid Kubernetes version - \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version - \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version - \"1.X.Y-gke.N\": picks an explicit Kubernetes version - \"-\": picks the default Kubernetes version", "type": "string" }, "desiredMonitoringService": { - "description": "The monitoring service the cluster should use to write metrics.\nCurrently available options:\n\n* \"monitoring.googleapis.com/kubernetes\" - The Cloud Monitoring\nservice with a Kubernetes-native resource model\n* `monitoring.googleapis.com` - The legacy Cloud Monitoring service (no\n longer available as of GKE 1.15).\n* `none` - No metrics will be exported from the cluster.\n\nIf left as an empty string,`monitoring.googleapis.com/kubernetes` will be\nused for GKE 1.14+ or `monitoring.googleapis.com` for earlier versions.", + "description": "The monitoring service the cluster should use to write metrics. Currently available options: * \"monitoring.googleapis.com/kubernetes\" - The Cloud Monitoring service with a Kubernetes-native resource model * `monitoring.googleapis.com` - The legacy Cloud Monitoring service (no longer available as of GKE 1.15). * `none` - No metrics will be exported from the cluster. If left as an empty string,`monitoring.googleapis.com/kubernetes` will be used for GKE 1.14+ or `monitoring.googleapis.com` for earlier versions.", "type": "string" }, "desiredNodePoolAutoscaling": { "$ref": "NodePoolAutoscaling", - "description": "Autoscaler configuration for the node pool specified in\ndesired_node_pool_id. If there is only one pool in the\ncluster and desired_node_pool_id is not provided then\nthe change applies to that single node pool." + "description": "Autoscaler configuration for the node pool specified in desired_node_pool_id. If there is only one pool in the cluster and desired_node_pool_id is not provided then the change applies to that single node pool." }, "desiredNodePoolId": { - "description": "The node pool to be upgraded. This field is mandatory if\n\"desired_node_version\", \"desired_image_family\",\n\"desired_node_pool_autoscaling\", or \"desired_workload_metadata_config\"\nis specified and there is more than one node pool on the cluster.", + "description": "The node pool to be upgraded. This field is mandatory if \"desired_node_version\", \"desired_image_family\", \"desired_node_pool_autoscaling\", or \"desired_workload_metadata_config\" is specified and there is more than one node pool on the cluster.", "type": "string" }, "desiredNodeVersion": { - "description": "The Kubernetes version to change the nodes to (typically an\nupgrade).\n\nUsers may specify either explicit versions offered by\nKubernetes Engine or version aliases, which have the following behavior:\n\n- \"latest\": picks the highest valid Kubernetes version\n- \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version\n- \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version\n- \"1.X.Y-gke.N\": picks an explicit Kubernetes version\n- \"-\": picks the Kubernetes master version", + "description": "The Kubernetes version to change the nodes to (typically an upgrade). Users may specify either explicit versions offered by Kubernetes Engine or version aliases, which have the following behavior: - \"latest\": picks the highest valid Kubernetes version - \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version - \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version - \"1.X.Y-gke.N\": picks an explicit Kubernetes version - \"-\": picks the Kubernetes master version", "type": "string" }, + "desiredNotificationConfig": { + "$ref": "NotificationConfig", + "description": "The desired notification configuration." + }, "desiredPodSecurityPolicyConfig": { "$ref": "PodSecurityPolicyConfig", "description": "The desired configuration options for the PodSecurityPolicy feature." @@ -3147,24 +3216,35 @@ "id": "CompleteIPRotationRequest", "properties": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "type": "string" }, "name": { - "description": "The name (project, location, cluster id) of the cluster to complete IP\nrotation. Specified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster id) of the cluster to complete IP rotation. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, "type": "object" }, + "ConfidentialNodes": { + "description": "ConfidentialNodes is configuration for the confidential nodes feature, which makes nodes run on confidential VMs.", + "id": "ConfidentialNodes", + "properties": { + "enabled": { + "description": "Whether Confidential Nodes feature is enabled for all nodes in this cluster.", + "type": "boolean" + } + }, + "type": "object" + }, "ConfigConnectorConfig": { "description": "Configuration options for the Config Connector add-on.", "id": "ConfigConnectorConfig", @@ -3181,7 +3261,7 @@ "id": "ConsumptionMeteringConfig", "properties": { "enabled": { - "description": "Whether to enable consumption metering for this cluster. If enabled, a\nsecond BigQuery table will be created to hold resource consumption\nrecords.", + "description": "Whether to enable consumption metering for this cluster. If enabled, a second BigQuery table will be created to hold resource consumption records.", "type": "boolean" } }, @@ -3193,18 +3273,18 @@ "properties": { "cluster": { "$ref": "Cluster", - "description": "Required. A [cluster\nresource](https://cloud.google.com/container-engine/reference/rest/v1beta1/projects.zones.clusters)" + "description": "Required. A [cluster resource](https://cloud.google.com/container-engine/reference/rest/v1beta1/projects.locations.clusters)" }, "parent": { - "description": "The parent (project and location) where the cluster will be created.\nSpecified in the format `projects/*/locations/*`.", + "description": "The parent (project and location) where the cluster will be created. Specified in the format `projects/*/locations/*`.", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the parent field.", "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the parent\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the parent field.", "type": "string" } }, @@ -3215,7 +3295,7 @@ "id": "CreateNodePoolRequest", "properties": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the parent field.", + "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the parent field.", "type": "string" }, "nodePool": { @@ -3223,15 +3303,15 @@ "description": "Required. The node pool to create." }, "parent": { - "description": "The parent (project, location, cluster id) where the node pool will be\ncreated. Specified in the format\n`projects/*/locations/*/clusters/*`.", + "description": "The parent (project, location, cluster id) where the node pool will be created. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the parent field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the parent field.", "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the parent\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the parent field.", "type": "string" } }, @@ -3242,11 +3322,11 @@ "id": "DailyMaintenanceWindow", "properties": { "duration": { - "description": "[Output only] Duration of the time window, automatically chosen to be\nsmallest possible in the given scenario.", + "description": "[Output only] Duration of the time window, automatically chosen to be smallest possible in the given scenario.", "type": "string" }, "startTime": { - "description": "Time within the maintenance window to start the maintenance operations.\nIt must be in format \"HH:MM\", where HH : [00-23] and MM : [00-59] GMT.", + "description": "Time within the maintenance window to start the maintenance operations. It must be in format \"HH:MM\", where HH : [00-23] and MM : [00-59] GMT.", "type": "string" } }, @@ -3257,7 +3337,7 @@ "id": "DatabaseEncryption", "properties": { "keyName": { - "description": "Name of CloudKMS key to use for the encryption of secrets in etcd.\nEx. projects/my-project/locations/global/keyRings/my-ring/cryptoKeys/my-key", + "description": "Name of CloudKMS key to use for the encryption of secrets in etcd. Ex. projects/my-project/locations/global/keyRings/my-ring/cryptoKeys/my-key", "type": "string" }, "state": { @@ -3270,13 +3350,24 @@ "enumDescriptions": [ "Should never be set", "Secrets in etcd are encrypted.", - "Secrets in etcd are stored in plain text (at etcd level) - this is\nunrelated to Compute Engine level full disk encryption." + "Secrets in etcd are stored in plain text (at etcd level) - this is unrelated to Compute Engine level full disk encryption." ], "type": "string" } }, "type": "object" }, + "DefaultSnatStatus": { + "description": "DefaultSnatStatus contains the desired state of whether default sNAT should be disabled on the cluster.", + "id": "DefaultSnatStatus", + "properties": { + "disabled": { + "description": "Disables cluster default sNAT rules.", + "type": "boolean" + } + }, + "type": "object" + }, "DnsCacheConfig": { "description": "Configuration for NodeLocal DNSCache", "id": "DnsCacheConfig", @@ -3289,13 +3380,13 @@ "type": "object" }, "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", "properties": {}, "type": "object" }, "GcePersistentDiskCsiDriverConfig": { - "description": "Configuration for the Compute Engine PD CSI driver. This option can only be\nenabled at cluster creation time.", + "description": "Configuration for the Compute Engine PD CSI driver. This option can only be enabled at cluster creation time.", "id": "GcePersistentDiskCsiDriverConfig", "properties": { "enabled": { @@ -3311,10 +3402,10 @@ "properties": { "cacheHeader": { "$ref": "HttpCacheControlResponseHeader", - "description": "OnePlatform automatically extracts this field and uses it to set the HTTP\nCache-Control header." + "description": "OnePlatform automatically extracts this field and uses it to set the HTTP Cache-Control header." }, "keys": { - "description": "The public component of the keys used by the cluster to sign token\nrequests.", + "description": "The public component of the keys used by the cluster to sign token requests.", "items": { "$ref": "Jwk" }, @@ -3324,12 +3415,12 @@ "type": "object" }, "GetOpenIDConfigResponse": { - "description": "GetOpenIDConfigResponse is an OIDC discovery document for the cluster.\nSee the OpenID Connect Discovery 1.0 specification for details.", + "description": "GetOpenIDConfigResponse is an OIDC discovery document for the cluster. See the OpenID Connect Discovery 1.0 specification for details.", "id": "GetOpenIDConfigResponse", "properties": { "cacheHeader": { "$ref": "HttpCacheControlResponseHeader", - "description": "OnePlatform automatically extracts this field and uses it to set the HTTP\nCache-Control header." + "description": "OnePlatform automatically extracts this field and uses it to set the HTTP Cache-Control header." }, "claims_supported": { "description": "Supported claims.", @@ -3378,11 +3469,11 @@ "type": "object" }, "HorizontalPodAutoscaling": { - "description": "Configuration options for the horizontal pod autoscaling feature, which\nincreases or decreases the number of replica pods a replication controller\nhas based on the resource usage of the existing pods.", + "description": "Configuration options for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods.", "id": "HorizontalPodAutoscaling", "properties": { "disabled": { - "description": "Whether the Horizontal Pod Autoscaling feature is enabled in the cluster.\nWhen enabled, it ensures that metrics are collected into Stackdriver\nMonitoring.", + "description": "Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. When enabled, it ensures that metrics are collected into Stackdriver Monitoring.", "type": "boolean" } }, @@ -3409,11 +3500,11 @@ "type": "object" }, "HttpLoadBalancing": { - "description": "Configuration options for the HTTP (L7) load balancing controller addon,\nwhich makes it easy to set up HTTP load balancers for services in a cluster.", + "description": "Configuration options for the HTTP (L7) load balancing controller addon, which makes it easy to set up HTTP load balancers for services in a cluster.", "id": "HttpLoadBalancing", "properties": { "disabled": { - "description": "Whether the HTTP Load Balancing controller is enabled in the cluster.\nWhen enabled, it runs a small pod in the cluster that manages the load\nbalancers.", + "description": "Whether the HTTP Load Balancing controller is enabled in the cluster. When enabled, it runs a small pod in the cluster that manages the load balancers.", "type": "boolean" } }, @@ -3424,7 +3515,7 @@ "id": "IPAllocationPolicy", "properties": { "allowRouteOverlap": { - "description": "If true, allow allocation of cluster CIDR ranges that overlap with certain\nkinds of network routes. By default we do not allow cluster CIDR ranges to\nintersect with any user declared routes. With allow_route_overlap == true,\nwe allow overlapping with CIDR ranges that are larger than the cluster CIDR\nrange.\n\nIf this field is set to true, then cluster and services CIDRs must be\nfully-specified (e.g. `10.96.0.0/14`, but not `/14`), which means:\n1) When `use_ip_aliases` is true, `cluster_ipv4_cidr_block` and\n `services_ipv4_cidr_block` must be fully-specified.\n2) When `use_ip_aliases` is false, `cluster.cluster_ipv4_cidr` muse be\n fully-specified.", + "description": "If true, allow allocation of cluster CIDR ranges that overlap with certain kinds of network routes. By default we do not allow cluster CIDR ranges to intersect with any user declared routes. With allow_route_overlap == true, we allow overlapping with CIDR ranges that are larger than the cluster CIDR range. If this field is set to true, then cluster and services CIDRs must be fully-specified (e.g. `10.96.0.0/14`, but not `/14`), which means: 1) When `use_ip_aliases` is true, `cluster_ipv4_cidr_block` and `services_ipv4_cidr_block` must be fully-specified. 2) When `use_ip_aliases` is false, `cluster.cluster_ipv4_cidr` muse be fully-specified.", "type": "boolean" }, "clusterIpv4Cidr": { @@ -3432,15 +3523,15 @@ "type": "string" }, "clusterIpv4CidrBlock": { - "description": "The IP address range for the cluster pod IPs. If this field is set, then\n`cluster.cluster_ipv4_cidr` must be left blank.\n\nThis field is only applicable when `use_ip_aliases` is true.\n\nSet to blank to have a range chosen with the default size.\n\nSet to /netmask (e.g. `/14`) to have a range chosen with a specific\nnetmask.\n\nSet to a\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.\n`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range\nto use.", + "description": "The IP address range for the cluster pod IPs. If this field is set, then `cluster.cluster_ipv4_cidr` must be left blank. This field is only applicable when `use_ip_aliases` is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. `/14`) to have a range chosen with a specific netmask. Set to a [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.", "type": "string" }, "clusterSecondaryRangeName": { - "description": "The name of the secondary range to be used for the cluster CIDR\nblock. The secondary range will be used for pod IP\naddresses. This must be an existing secondary range associated\nwith the cluster subnetwork.\n\nThis field is only applicable with use_ip_aliases and\ncreate_subnetwork is false.", + "description": "The name of the secondary range to be used for the cluster CIDR block. The secondary range will be used for pod IP addresses. This must be an existing secondary range associated with the cluster subnetwork. This field is only applicable with use_ip_aliases and create_subnetwork is false.", "type": "string" }, "createSubnetwork": { - "description": "Whether a new subnetwork will be created automatically for the cluster.\n\nThis field is only applicable when `use_ip_aliases` is true.", + "description": "Whether a new subnetwork will be created automatically for the cluster. This field is only applicable when `use_ip_aliases` is true.", "type": "boolean" }, "nodeIpv4Cidr": { @@ -3448,7 +3539,7 @@ "type": "string" }, "nodeIpv4CidrBlock": { - "description": "The IP address range of the instance IPs in this cluster.\n\nThis is applicable only if `create_subnetwork` is true.\n\nSet to blank to have a range chosen with the default size.\n\nSet to /netmask (e.g. `/14`) to have a range chosen with a specific\nnetmask.\n\nSet to a\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.\n`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range\nto use.", + "description": "The IP address range of the instance IPs in this cluster. This is applicable only if `create_subnetwork` is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. `/14`) to have a range chosen with a specific netmask. Set to a [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.", "type": "string" }, "servicesIpv4Cidr": { @@ -3456,34 +3547,34 @@ "type": "string" }, "servicesIpv4CidrBlock": { - "description": "The IP address range of the services IPs in this cluster. If blank, a range\nwill be automatically chosen with the default size.\n\nThis field is only applicable when `use_ip_aliases` is true.\n\nSet to blank to have a range chosen with the default size.\n\nSet to /netmask (e.g. `/14`) to have a range chosen with a specific\nnetmask.\n\nSet to a\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.\n`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range\nto use.", + "description": "The IP address range of the services IPs in this cluster. If blank, a range will be automatically chosen with the default size. This field is only applicable when `use_ip_aliases` is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. `/14`) to have a range chosen with a specific netmask. Set to a [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.", "type": "string" }, "servicesSecondaryRangeName": { - "description": "The name of the secondary range to be used as for the services\nCIDR block. The secondary range will be used for service\nClusterIPs. This must be an existing secondary range associated\nwith the cluster subnetwork.\n\nThis field is only applicable with use_ip_aliases and\ncreate_subnetwork is false.", + "description": "The name of the secondary range to be used as for the services CIDR block. The secondary range will be used for service ClusterIPs. This must be an existing secondary range associated with the cluster subnetwork. This field is only applicable with use_ip_aliases and create_subnetwork is false.", "type": "string" }, "subnetworkName": { - "description": "A custom subnetwork name to be used if `create_subnetwork` is true. If\nthis field is empty, then an automatic name will be chosen for the new\nsubnetwork.", + "description": "A custom subnetwork name to be used if `create_subnetwork` is true. If this field is empty, then an automatic name will be chosen for the new subnetwork.", "type": "string" }, "tpuIpv4CidrBlock": { - "description": "The IP address range of the Cloud TPUs in this cluster. If unspecified, a\nrange will be automatically chosen with the default size.\n\nThis field is only applicable when `use_ip_aliases` is true.\n\nIf unspecified, the range will use the default size.\n\nSet to /netmask (e.g. `/14`) to have a range chosen with a specific\nnetmask.\n\nSet to a\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.\n`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range\nto use.\nThis field is deprecated, use cluster.tpu_config.ipv4_cidr_block instead.", + "description": "The IP address range of the Cloud TPUs in this cluster. If unspecified, a range will be automatically chosen with the default size. This field is only applicable when `use_ip_aliases` is true. If unspecified, the range will use the default size. Set to /netmask (e.g. `/14`) to have a range chosen with a specific netmask. Set to a [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. This field is deprecated, use cluster.tpu_config.ipv4_cidr_block instead.", "type": "string" }, "useIpAliases": { - "description": "Whether alias IPs will be used for pod IPs in the cluster.\nThis is used in conjunction with use_routes. It cannot\nbe true if use_routes is true. If both use_ip_aliases and use_routes are\nfalse, then the server picks the default IP allocation mode", + "description": "Whether alias IPs will be used for pod IPs in the cluster. This is used in conjunction with use_routes. It cannot be true if use_routes is true. If both use_ip_aliases and use_routes are false, then the server picks the default IP allocation mode", "type": "boolean" }, "useRoutes": { - "description": "Whether routes will be used for pod IPs in the cluster.\nThis is used in conjunction with use_ip_aliases. It cannot be true if\nuse_ip_aliases is true. If both use_ip_aliases and use_routes are false,\nthen the server picks the default IP allocation mode", + "description": "Whether routes will be used for pod IPs in the cluster. This is used in conjunction with use_ip_aliases. It cannot be true if use_ip_aliases is true. If both use_ip_aliases and use_routes are false, then the server picks the default IP allocation mode", "type": "boolean" } }, "type": "object" }, "IntraNodeVisibilityConfig": { - "description": "IntraNodeVisibilityConfig contains the desired config of the intra-node\nvisibility on this cluster.", + "description": "IntraNodeVisibilityConfig contains the desired config of the intra-node visibility on this cluster.", "id": "IntraNodeVisibilityConfig", "properties": { "enabled": { @@ -3582,29 +3673,43 @@ "type": "object" }, "LegacyAbac": { - "description": "Configuration for the legacy Attribute Based Access Control authorization\nmode.", + "description": "Configuration for the legacy Attribute Based Access Control authorization mode.", "id": "LegacyAbac", "properties": { "enabled": { - "description": "Whether the ABAC authorizer is enabled for this cluster. When enabled,\nidentities in the system, including service accounts, nodes, and\ncontrollers, will have statically granted permissions beyond those\nprovided by the RBAC configuration or IAM.", + "description": "Whether the ABAC authorizer is enabled for this cluster. When enabled, identities in the system, including service accounts, nodes, and controllers, will have statically granted permissions beyond those provided by the RBAC configuration or IAM.", "type": "boolean" } }, "type": "object" }, + "LinuxNodeConfig": { + "description": "Parameters that can be configured on Linux nodes.", + "id": "LinuxNodeConfig", + "properties": { + "sysctls": { + "additionalProperties": { + "type": "string" + }, + "description": "The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.netdev_max_backlog net.core.rmem_max net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse", + "type": "object" + } + }, + "type": "object" + }, "ListClustersResponse": { "description": "ListClustersResponse is the result of ListClustersRequest.", "id": "ListClustersResponse", "properties": { "clusters": { - "description": "A list of clusters in the project in the specified zone, or\nacross all ones.", + "description": "A list of clusters in the project in the specified zone, or across all ones.", "items": { "$ref": "Cluster" }, "type": "array" }, "missingZones": { - "description": "If any zones are listed here, the list of clusters returned\nmay be missing those zones.", + "description": "If any zones are listed here, the list of clusters returned may be missing those zones.", "items": { "type": "string" }, @@ -3614,7 +3719,7 @@ "type": "object" }, "ListLocationsResponse": { - "description": "ListLocationsResponse returns the list of all GKE locations and their\nrecommendation state.", + "description": "ListLocationsResponse returns the list of all GKE locations and their recommendation state.", "id": "ListLocationsResponse", "properties": { "locations": { @@ -3625,7 +3730,7 @@ "type": "array" }, "nextPageToken": { - "description": "Only return ListLocationsResponse that occur after the page_token. This\nvalue should be populated from the ListLocationsResponse.next_page_token if\nthat response token was set (which happens when listing more Locations than\nfit in a single ListLocationsResponse).", + "description": "Only return ListLocationsResponse that occur after the page_token. This value should be populated from the ListLocationsResponse.next_page_token if that response token was set (which happens when listing more Locations than fit in a single ListLocationsResponse).", "type": "string" } }, @@ -3650,7 +3755,7 @@ "id": "ListOperationsResponse", "properties": { "missingZones": { - "description": "If any zones are listed here, the list of operations returned\nmay be missing the operations from those zones.", + "description": "If any zones are listed here, the list of operations returned may be missing the operations from those zones.", "items": { "type": "string" }, @@ -3667,11 +3772,11 @@ "type": "object" }, "ListUsableSubnetworksResponse": { - "description": "ListUsableSubnetworksResponse is the response of\nListUsableSubnetworksRequest.", + "description": "ListUsableSubnetworksResponse is the response of ListUsableSubnetworksRequest.", "id": "ListUsableSubnetworksResponse", "properties": { "nextPageToken": { - "description": "This token allows you to get the next page of results for list requests.\nIf the number of results is larger than `page_size`, use the\n`next_page_token` as a value for the query parameter `page_token` in the\nnext request. The value will become empty when there are no more pages.", + "description": "This token allows you to get the next page of results for list requests. If the number of results is larger than `page_size`, use the `next_page_token` as a value for the query parameter `page_token` in the next request. The value will become empty when there are no more pages.", "type": "string" }, "subnetworks": { @@ -3685,11 +3790,11 @@ "type": "object" }, "Location": { - "description": "Location returns the location name, and if the location is recommended\nfor GKE cluster scheduling.", + "description": "Location returns the location name, and if the location is recommended for GKE cluster scheduling.", "id": "Location", "properties": { "name": { - "description": "Contains the name of the resource requested.\nSpecified in the format `projects/*/locations/*`.", + "description": "Contains the name of the resource requested. Specified in the format `projects/*/locations/*`.", "type": "string" }, "recommended": { @@ -3697,7 +3802,7 @@ "type": "boolean" }, "type": { - "description": "Contains the type of location this Location is for.\nRegional or Zonal.", + "description": "Contains the type of location this Location is for. Regional or Zonal.", "enum": [ "LOCATION_TYPE_UNSPECIFIED", "ZONE", @@ -3718,7 +3823,7 @@ "id": "MaintenancePolicy", "properties": { "resourceVersion": { - "description": "A hash identifying the version of this policy, so that updates to fields of\nthe policy won't accidentally undo intermediate changes (and so that users\nof the API unaware of some fields won't accidentally remove other fields).\nMake a \u003ccode\u003eget()\u003c/code\u003e request to the cluster to get the current\nresource version and include it with requests to set the policy.", + "description": "A hash identifying the version of this policy, so that updates to fields of the policy won't accidentally undo intermediate changes (and so that users of the API unaware of some fields won't accidentally remove other fields). Make a `get()` request to the cluster to get the current resource version and include it with requests to set the policy.", "type": "string" }, "window": { @@ -3740,52 +3845,58 @@ "additionalProperties": { "$ref": "TimeWindow" }, - "description": "Exceptions to maintenance window. Non-emergency maintenance should not\noccur in these windows.", + "description": "Exceptions to maintenance window. Non-emergency maintenance should not occur in these windows.", "type": "object" }, "recurringWindow": { "$ref": "RecurringTimeWindow", - "description": "RecurringWindow specifies some number of recurring time periods for\nmaintenance to occur. The time windows may be overlapping. If no\nmaintenance windows are set, maintenance can occur at any time." + "description": "RecurringWindow specifies some number of recurring time periods for maintenance to occur. The time windows may be overlapping. If no maintenance windows are set, maintenance can occur at any time." } }, "type": "object" }, + "Master": { + "description": "Master is the configuration for components on master.", + "id": "Master", + "properties": {}, + "type": "object" + }, "MasterAuth": { - "description": "The authentication information for accessing the master endpoint.\nAuthentication can be done using HTTP basic auth or using client\ncertificates.", + "description": "The authentication information for accessing the master endpoint. Authentication can be done using HTTP basic auth or using client certificates.", "id": "MasterAuth", "properties": { "clientCertificate": { - "description": "[Output only] Base64-encoded public certificate used by clients to\nauthenticate to the cluster endpoint.", + "description": "[Output only] Base64-encoded public certificate used by clients to authenticate to the cluster endpoint.", "type": "string" }, "clientCertificateConfig": { "$ref": "ClientCertificateConfig", - "description": "Configuration for client certificate authentication on the cluster. For\nclusters before v1.12, if no configuration is specified, a client\ncertificate is issued." + "description": "Configuration for client certificate authentication on the cluster. For clusters before v1.12, if no configuration is specified, a client certificate is issued." }, "clientKey": { - "description": "[Output only] Base64-encoded private key used by clients to authenticate\nto the cluster endpoint.", + "description": "[Output only] Base64-encoded private key used by clients to authenticate to the cluster endpoint.", "type": "string" }, "clusterCaCertificate": { "type": "string" }, "password": { - "description": "The password to use for HTTP basic authentication to the master endpoint.\nBecause the master endpoint is open to the Internet, you should create a\nstrong password. If a password is provided for cluster creation, username\nmust be non-empty.", + "description": "The password to use for HTTP basic authentication to the master endpoint. Because the master endpoint is open to the Internet, you should create a strong password. If a password is provided for cluster creation, username must be non-empty. Warning: basic authentication is deprecated, and will be removed in GKE control plane versions 1.19 and newer. For a list of recommended authentication methods, see: https://cloud.google.com/kubernetes-engine/docs/how-to/api-server-authentication", "type": "string" }, "username": { - "description": "The username to use for HTTP basic authentication to the master endpoint.\nFor clusters v1.6.0 and later, basic authentication can be disabled by\nleaving username unspecified (or setting it to the empty string).", + "description": "The username to use for HTTP basic authentication to the master endpoint. For clusters v1.6.0 and later, basic authentication can be disabled by leaving username unspecified (or setting it to the empty string). Warning: basic authentication is deprecated, and will be removed in GKE control plane versions 1.19 and newer. For a list of recommended authentication methods, see: https://cloud.google.com/kubernetes-engine/docs/how-to/api-server-authentication", "type": "string" } }, "type": "object" }, "MasterAuthorizedNetworksConfig": { - "description": "Configuration options for the master authorized networks feature. Enabled\nmaster authorized networks will disallow all external traffic to access\nKubernetes master through HTTPS except traffic from the given CIDR blocks,\nGoogle Compute Engine Public IPs and Google Prod IPs.", + "description": "Configuration options for the master authorized networks feature. Enabled master authorized networks will disallow all external traffic to access Kubernetes master through HTTPS except traffic from the given CIDR blocks, Google Compute Engine Public IPs and Google Prod IPs.", "id": "MasterAuthorizedNetworksConfig", "properties": { "cidrBlocks": { - "description": "cidr_blocks define up to 10 external networks that could access\nKubernetes master through HTTPS.", + "description": "cidr_blocks define up to 10 external networks that could access Kubernetes master through HTTPS.", "items": { "$ref": "CidrBlock" }, @@ -3839,23 +3950,41 @@ "description": "NetworkConfig reports the relative names of network \u0026 subnetwork.", "id": "NetworkConfig", "properties": { + "datapathProvider": { + "description": "The desired datapath provider for this cluster. By default, uses the IPTables-based kube-proxy implementation.", + "enum": [ + "DATAPATH_PROVIDER_UNSPECIFIED", + "LEGACY_DATAPATH", + "ADVANCED_DATAPATH" + ], + "enumDescriptions": [ + "Default value.", + "Use the IPTables implementation based on kube-proxy.", + "Use the eBPF based GKE Dataplane V2 with additional features. See the [GKE Dataplane V2 documentation](https://cloud.google.com/kubernetes-enginw/docs/how-to/dataplane-v2) for more." + ], + "type": "string" + }, + "defaultSnatStatus": { + "$ref": "DefaultSnatStatus", + "description": "Whether the cluster disables default in-node sNAT rules. In-node sNAT rules will be disabled when default_snat_status is disabled. When disabled is set to false, default IP masquerade rules will be applied to the nodes to prevent sNAT on cluster internal traffic." + }, "enableIntraNodeVisibility": { - "description": "Whether Intra-node visibility is enabled for this cluster.\nThis makes same node pod to pod traffic visible for VPC network.", + "description": "Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network.", "type": "boolean" }, "network": { - "description": "Output only. The relative name of the Google Compute Engine\nnetwork(https://cloud.google.com/compute/docs/networks-and-firewalls#networks)\nto which the cluster is connected. Example:\nprojects/my-project/global/networks/my-network", + "description": "Output only. The relative name of the Google Compute Engine network(https://cloud.google.com/compute/docs/networks-and-firewalls#networks) to which the cluster is connected. Example: projects/my-project/global/networks/my-network", "type": "string" }, "subnetwork": { - "description": "Output only. The relative name of the Google Compute Engine\n[subnetwork](https://cloud.google.com/compute/docs/vpc) to which the\ncluster is connected. Example:\nprojects/my-project/regions/us-central1/subnetworks/my-subnet", + "description": "Output only. The relative name of the Google Compute Engine [subnetwork](https://cloud.google.com/compute/docs/vpc) to which the cluster is connected. Example: projects/my-project/regions/us-central1/subnetworks/my-subnet", "type": "string" } }, "type": "object" }, "NetworkPolicy": { - "description": "Configuration options for the NetworkPolicy feature.\nhttps://kubernetes.io/docs/concepts/services-networking/networkpolicies/", + "description": "Configuration options for the NetworkPolicy feature. https://kubernetes.io/docs/concepts/services-networking/networkpolicies/", "id": "NetworkPolicy", "properties": { "enabled": { @@ -3878,7 +4007,7 @@ "type": "object" }, "NetworkPolicyConfig": { - "description": "Configuration for NetworkPolicy. This only tracks whether the addon\nis enabled or not on the Master, it does not track whether network policy\nis enabled for the nodes.", + "description": "Configuration for NetworkPolicy. This only tracks whether the addon is enabled or not on the Master, it does not track whether network policy is enabled for the nodes.", "id": "NetworkPolicyConfig", "properties": { "disabled": { @@ -3893,77 +4022,89 @@ "id": "NodeConfig", "properties": { "accelerators": { - "description": "A list of hardware accelerators to be attached to each node.\nSee https://cloud.google.com/compute/docs/gpus for more information about\nsupport for GPUs.", + "description": "A list of hardware accelerators to be attached to each node. See https://cloud.google.com/compute/docs/gpus for more information about support for GPUs.", "items": { "$ref": "AcceleratorConfig" }, "type": "array" }, "bootDiskKmsKey": { - "description": "\nThe Customer Managed Encryption Key used to encrypt the boot disk attached\nto each node in the node pool. This should be of the form\nprojects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME].\nFor more information about protecting resources with Cloud KMS Keys please\nsee:\nhttps://cloud.google.com/compute/docs/disks/customer-managed-encryption", + "description": " The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption", "type": "string" }, "diskSizeGb": { - "description": "Size of the disk attached to each node, specified in GB.\nThe smallest allowed disk size is 10GB.\n\nIf unspecified, the default disk size is 100GB.", + "description": "Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB.", "format": "int32", "type": "integer" }, "diskType": { - "description": "Type of the disk attached to each node (e.g. 'pd-standard' or 'pd-ssd')\n\nIf unspecified, the default disk type is 'pd-standard'", + "description": "Type of the disk attached to each node (e.g. 'pd-standard' or 'pd-ssd') If unspecified, the default disk type is 'pd-standard'", "type": "string" }, "imageType": { - "description": "The image type to use for this node. Note that for a given image type,\nthe latest version of it will be used.", + "description": "The image type to use for this node. Note that for a given image type, the latest version of it will be used.", "type": "string" }, + "kubeletConfig": { + "$ref": "NodeKubeletConfig", + "description": "Node kubelet configs." + }, "labels": { "additionalProperties": { "type": "string" }, - "description": "The map of Kubernetes labels (key/value pairs) to be applied to each node.\nThese will added in addition to any default label(s) that\nKubernetes may apply to the node.\nIn case of conflict in label keys, the applied set may differ depending on\nthe Kubernetes version -- it's best to assume the behavior is undefined\nand conflicts should be avoided.\nFor more information, including usage and the valid values, see:\nhttps://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", + "description": "The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node. In case of conflict in label keys, the applied set may differ depending on the Kubernetes version -- it's best to assume the behavior is undefined and conflicts should be avoided. For more information, including usage and the valid values, see: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", "type": "object" }, + "linuxNodeConfig": { + "$ref": "LinuxNodeConfig", + "description": "Parameters that can be configured on Linux nodes." + }, "localSsdCount": { - "description": "The number of local SSD disks to be attached to the node.\n\nThe limit for this value is dependent upon the maximum number of\ndisks available on a machine per zone. See:\nhttps://cloud.google.com/compute/docs/disks/local-ssd\nfor more information.", + "description": "The number of local SSD disks to be attached to the node. The limit for this value is dependent upon the maximum number of disks available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information.", "format": "int32", "type": "integer" }, "machineType": { - "description": "The name of a Google Compute Engine [machine\ntype](https://cloud.google.com/compute/docs/machine-types) (e.g.\n`n1-standard-1`).\n\nIf unspecified, the default machine type is\n`n1-standard-1`.", + "description": "The name of a Google Compute Engine [machine type](https://cloud.google.com/compute/docs/machine-types). If unspecified, the default machine type is `e2-medium`.", "type": "string" }, "metadata": { "additionalProperties": { "type": "string" }, - "description": "The metadata key/value pairs assigned to instances in the cluster.\n\nKeys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes\nin length. These are reflected as part of a URL in the metadata server.\nAdditionally, to avoid ambiguity, keys must not conflict with any other\nmetadata keys for the project or be one of the reserved keys:\n \"cluster-location\"\n \"cluster-name\"\n \"cluster-uid\"\n \"configure-sh\"\n \"containerd-configure-sh\"\n \"enable-oslogin\"\n \"gci-ensure-gke-docker\"\n \"gci-metrics-enabled\"\n \"gci-update-strategy\"\n \"instance-template\"\n \"kube-env\"\n \"startup-script\"\n \"user-data\"\n \"disable-address-manager\"\n \"windows-startup-script-ps1\"\n \"common-psm1\"\n \"k8s-node-setup-psm1\"\n \"install-ssh-psm1\"\n \"user-profile-psm1\"\n \"serial-port-logging-enable\"\nValues are free-form strings, and only have meaning as interpreted by\nthe image running in the instance. The only restriction placed on them is\nthat each value's size must be less than or equal to 32 KB.\n\nThe total size of all keys and values must be less than 512 KB.", + "description": "The metadata key/value pairs assigned to instances in the cluster. Keys must conform to the regexp `[a-zA-Z0-9-_]+` and be less than 128 bytes in length. These are reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project or be one of the reserved keys: - \"cluster-location\" - \"cluster-name\" - \"cluster-uid\" - \"configure-sh\" - \"containerd-configure-sh\" - \"enable-oslogin\" - \"gci-ensure-gke-docker\" - \"gci-metrics-enabled\" - \"gci-update-strategy\" - \"instance-template\" - \"kube-env\" - \"startup-script\" - \"user-data\" - \"disable-address-manager\" - \"windows-startup-script-ps1\" - \"common-psm1\" - \"k8s-node-setup-psm1\" - \"install-ssh-psm1\" - \"user-profile-psm1\" - \"serial-port-logging-enable\" Values are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on them is that each value's size must be less than or equal to 32 KB. The total size of all keys and values must be less than 512 KB.", "type": "object" }, "minCpuPlatform": { - "description": "Minimum CPU platform to be used by this instance. The instance may be\nscheduled on the specified or newer CPU platform. Applicable values are the\nfriendly names of CPU platforms, such as\n\u003ccode\u003eminCpuPlatform: \u0026quot;Intel Haswell\u0026quot;\u003c/code\u003e or\n\u003ccode\u003eminCpuPlatform: \u0026quot;Intel Sandy Bridge\u0026quot;\u003c/code\u003e. For more\ninformation, read [how to specify min CPU\nplatform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)", + "description": "Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform. Applicable values are the friendly names of CPU platforms, such as `minCpuPlatform: \"Intel Haswell\"` or `minCpuPlatform: \"Intel Sandy Bridge\"`. For more information, read [how to specify min CPU platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)", + "type": "string" + }, + "nodeGroup": { + "description": "Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on [sole tenant nodes](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes).", "type": "string" }, "oauthScopes": { - "description": "The set of Google API scopes to be made available on all of the\nnode VMs under the \"default\" service account.\n\nThe following scopes are recommended, but not required, and by default are\nnot included:\n\n* `https://www.googleapis.com/auth/compute` is required for mounting\npersistent storage on your nodes.\n* `https://www.googleapis.com/auth/devstorage.read_only` is required for\ncommunicating with **gcr.io**\n(the [Google Container\nRegistry](https://cloud.google.com/container-registry/)).\n\nIf unspecified, no scopes are added, unless Cloud Logging or Cloud\nMonitoring are enabled, in which case their required scopes will be added.", + "description": "The set of Google API scopes to be made available on all of the node VMs under the \"default\" service account. The following scopes are recommended, but not required, and by default are not included: * `https://www.googleapis.com/auth/compute` is required for mounting persistent storage on your nodes. * `https://www.googleapis.com/auth/devstorage.read_only` is required for communicating with **gcr.io** (the [Google Container Registry](https://cloud.google.com/container-registry/)). If unspecified, no scopes are added, unless Cloud Logging or Cloud Monitoring are enabled, in which case their required scopes will be added.", "items": { "type": "string" }, "type": "array" }, "preemptible": { - "description": "Whether the nodes are created as preemptible VM instances. See:\nhttps://cloud.google.com/compute/docs/instances/preemptible for more\ninforamtion about preemptible VM instances.", + "description": "Whether the nodes are created as preemptible VM instances. See: https://cloud.google.com/compute/docs/instances/preemptible for more inforamtion about preemptible VM instances.", "type": "boolean" }, "reservationAffinity": { "$ref": "ReservationAffinity", - "description": "The optional reservation affinity. Setting this field will apply\nthe specified [Zonal Compute\nReservation](https://cloud.google.com/compute/docs/instances/reserving-zonal-resources)\nto this node pool." + "description": "The optional reservation affinity. Setting this field will apply the specified [Zonal Compute Reservation](https://cloud.google.com/compute/docs/instances/reserving-zonal-resources) to this node pool." }, "sandboxConfig": { "$ref": "SandboxConfig", "description": "Sandbox configuration for this node." }, "serviceAccount": { - "description": "The Google Cloud Platform Service Account to be used by the node VMs.\nSpecify the email address of the Service Account; otherwise, if no Service\nAccount is specified, the \"default\" service account is used.", + "description": "The Google Cloud Platform Service Account to be used by the node VMs. Specify the email address of the Service Account; otherwise, if no Service Account is specified, the \"default\" service account is used.", "type": "string" }, "shieldedInstanceConfig": { @@ -3971,14 +4112,14 @@ "description": "Shielded Instance options." }, "tags": { - "description": "The list of instance tags applied to all nodes. Tags are used to identify\nvalid sources or targets for network firewalls and are specified by\nthe client during cluster or node pool creation. Each tag within the list\nmust comply with RFC1035.", + "description": "The list of instance tags applied to all nodes. Tags are used to identify valid sources or targets for network firewalls and are specified by the client during cluster or node pool creation. Each tag within the list must comply with RFC1035.", "items": { "type": "string" }, "type": "array" }, "taints": { - "description": "List of kubernetes taints to be applied to each node.\n\nFor more information, including usage and the valid values, see:\nhttps://kubernetes.io/docs/concepts/configuration/taint-and-toleration/", + "description": "List of kubernetes taints to be applied to each node. For more information, including usage and the valid values, see: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/", "items": { "$ref": "NodeTaint" }, @@ -3991,8 +4132,27 @@ }, "type": "object" }, + "NodeKubeletConfig": { + "description": "Node kubelet configs.", + "id": "NodeKubeletConfig", + "properties": { + "cpuCfsQuota": { + "description": "Enable CPU CFS quota enforcement for containers that specify CPU limits. This option is enabled by default which makes kubelet use CFS quota (https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt) to enforce container CPU limits. Otherwise, CPU limits will not be enforced at all. Disable this option to mitigate CPU throttling problems while still having your pods to be in Guaranteed QoS class by specifying the CPU limits. The default value is 'true' if unspecified.", + "type": "boolean" + }, + "cpuCfsQuotaPeriod": { + "description": "Set the CPU CFS quota period value 'cpu.cfs_period_us'. The string must be a sequence of decimal numbers, each with optional fraction and a unit suffix, such as \"300ms\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\". The value must be a positive duration.", + "type": "string" + }, + "cpuManagerPolicy": { + "description": "Control the CPU management policy on the node. See https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/ The following values are allowed. - \"none\": the default, which represents the existing scheduling behavior. - \"static\": allows pods with certain resource characteristics to be granted increased CPU affinity and exclusivity on the node. The default value is 'none' if unspecified.", + "type": "string" + } + }, + "type": "object" + }, "NodeManagement": { - "description": "NodeManagement defines the set of node management services turned on for the\nnode pool.", + "description": "NodeManagement defines the set of node management services turned on for the node pool.", "id": "NodeManagement", "properties": { "autoRepair": { @@ -4011,12 +4171,12 @@ "type": "object" }, "NodePool": { - "description": "NodePool contains the name and configuration for a cluster's node pool.\nNode pools are a set of nodes (i.e. VM's), with a common configuration and\nspecification, under the control of the cluster master. They may have a set\nof Kubernetes labels applied to them, which may be used to reference them\nduring pod scheduling. They may also be resized up or down, to accommodate\nthe workload.", + "description": "NodePool contains the name and configuration for a cluster's node pool. Node pools are a set of nodes (i.e. VM's), with a common configuration and specification, under the control of the cluster master. They may have a set of Kubernetes labels applied to them, which may be used to reference them during pod scheduling. They may also be resized up or down, to accommodate the workload.", "id": "NodePool", "properties": { "autoscaling": { "$ref": "NodePoolAutoscaling", - "description": "Autoscaler configuration for this NodePool. Autoscaler is enabled\nonly if a valid configuration is present." + "description": "Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present." }, "conditions": { "description": "Which conditions caused the current node pool state.", @@ -4030,19 +4190,19 @@ "description": "The node configuration of the pool." }, "initialNodeCount": { - "description": "The initial node count for the pool. You must ensure that your\nCompute Engine \u003ca href=\"/compute/docs/resource-quotas\"\u003eresource quota\u003c/a\u003e\nis sufficient for this number of instances. You must also have available\nfirewall and routes quota.", + "description": "The initial node count for the pool. You must ensure that your Compute Engine [resource quota](https://cloud.google.com/compute/quotas) is sufficient for this number of instances. You must also have available firewall and routes quota.", "format": "int32", "type": "integer" }, "instanceGroupUrls": { - "description": "[Output only] The resource URLs of the [managed instance\ngroups](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances)\nassociated with this node pool.", + "description": "[Output only] The resource URLs of the [managed instance groups](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances) associated with this node pool.", "items": { "type": "string" }, "type": "array" }, "locations": { - "description": "The list of Google Compute Engine\n[zones](https://cloud.google.com/compute/docs/zones#available) in which the\nNodePool's nodes should be located.", + "description": "The list of Google Compute Engine [zones](https://cloud.google.com/compute/docs/zones#available) in which the NodePool's nodes should be located.", "items": { "type": "string" }, @@ -4054,7 +4214,7 @@ }, "maxPodsConstraint": { "$ref": "MaxPodsConstraint", - "description": "The constraint on the maximum number of pods that can be run\nsimultaneously on a node in the node pool." + "description": "The constraint on the maximum number of pods that can be run simultaneously on a node in the node pool." }, "name": { "description": "The name of the node pool.", @@ -4083,16 +4243,16 @@ "enumDescriptions": [ "Not set.", "The PROVISIONING state indicates the node pool is being created.", - "The RUNNING state indicates the node pool has been created\nand is fully usable.", - "The RUNNING_WITH_ERROR state indicates the node pool has been created\nand is partially usable. Some error state has occurred and some\nfunctionality may be impaired. Customer may need to reissue a request\nor trigger a new update.", - "The RECONCILING state indicates that some work is actively being done on\nthe node pool, such as upgrading node software. Details can\nbe found in the `statusMessage` field.", + "The RUNNING state indicates the node pool has been created and is fully usable.", + "The RUNNING_WITH_ERROR state indicates the node pool has been created and is partially usable. Some error state has occurred and some functionality may be impaired. Customer may need to reissue a request or trigger a new update.", + "The RECONCILING state indicates that some work is actively being done on the node pool, such as upgrading node software. Details can be found in the `statusMessage` field.", "The STOPPING state indicates the node pool is being deleted.", - "The ERROR state indicates the node pool may be unusable. Details\ncan be found in the `statusMessage` field." + "The ERROR state indicates the node pool may be unusable. Details can be found in the `statusMessage` field." ], "type": "string" }, "statusMessage": { - "description": "[Output only] Additional information about the current status of this\nnode pool instance, if available.", + "description": "[Output only] Additional information about the current status of this node pool instance, if available.", "type": "string" }, "upgradeSettings": { @@ -4107,7 +4267,7 @@ "type": "object" }, "NodePoolAutoscaling": { - "description": "NodePoolAutoscaling contains information required by cluster autoscaler to\nadjust the size of the node pool to the current cluster usage.", + "description": "NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage.", "id": "NodePoolAutoscaling", "properties": { "autoprovisioned": { @@ -4119,12 +4279,12 @@ "type": "boolean" }, "maxNodeCount": { - "description": "Maximum number of nodes in the NodePool. Must be \u003e= min_node_count. There\nhas to enough quota to scale up the cluster.", + "description": "Maximum number of nodes in the NodePool. Must be \u003e= min_node_count. There has to enough quota to scale up the cluster.", "format": "int32", "type": "integer" }, "minNodeCount": { - "description": "Minimum number of nodes in the NodePool. Must be \u003e= 1 and \u003c=\nmax_node_count.", + "description": "Minimum number of nodes in the NodePool. Must be \u003e= 1 and \u003c= max_node_count.", "format": "int32", "type": "integer" } @@ -4132,7 +4292,7 @@ "type": "object" }, "NodeTaint": { - "description": "Kubernetes taint is comprised of three fields: key, value, and effect. Effect\ncan only be one of three types: NoSchedule, PreferNoSchedule or NoExecute.\n\nSee\n[here](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration)\nfor more information, including usage and the valid values.", + "description": "Kubernetes taint is comprised of three fields: key, value, and effect. Effect can only be one of three types: NoSchedule, PreferNoSchedule or NoExecute. See [here](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration) for more information, including usage and the valid values.", "id": "NodeTaint", "properties": { "effect": { @@ -4162,12 +4322,23 @@ }, "type": "object" }, + "NotificationConfig": { + "description": "NotificationConfig is the configuration of notifications.", + "id": "NotificationConfig", + "properties": { + "pubsub": { + "$ref": "PubSub", + "description": "Notification config for Pub/Sub." + } + }, + "type": "object" + }, "Operation": { - "description": "This operation resource represents operations that may have happened or are\nhappening on the cluster. All fields are output only.", + "description": "This operation resource represents operations that may have happened or are happening on the cluster. All fields are output only.", "id": "Operation", "properties": { "clusterConditions": { - "description": "Which conditions caused the current cluster state.", + "description": "Which conditions caused the current cluster state. Deprecated. Use field error instead.", "items": { "$ref": "StatusCondition" }, @@ -4178,11 +4349,15 @@ "type": "string" }, "endTime": { - "description": "[Output only] The time the operation completed, in\n[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "description": "[Output only] The time the operation completed, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", "type": "string" }, + "error": { + "$ref": "Status", + "description": "The error result of the operation in case of failure." + }, "location": { - "description": "[Output only] The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available)\nor\n[region](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available)\nin which the cluster resides.", + "description": "[Output only] The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) or [region](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) in which the cluster resides.", "type": "string" }, "name": { @@ -4190,7 +4365,7 @@ "type": "string" }, "nodepoolConditions": { - "description": "Which conditions caused the current node pool state.", + "description": "Which conditions caused the current node pool state. Deprecated. Use field error instead.", "items": { "$ref": "StatusCondition" }, @@ -4240,14 +4415,15 @@ }, "progress": { "$ref": "OperationProgress", - "description": "Output only. [Output only] Progress information for an operation." + "description": "Output only. [Output only] Progress information for an operation.", + "readOnly": true }, "selfLink": { "description": "Server-defined URL for the resource.", "type": "string" }, "startTime": { - "description": "[Output only] The time the operation started, in\n[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "description": "[Output only] The time the operation started, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", "type": "string" }, "status": { @@ -4269,7 +4445,8 @@ "type": "string" }, "statusMessage": { - "description": "Output only. If an error has occurred, a textual description of the error.", + "description": "Output only. If an error has occurred, a textual description of the error. Deprecated. Use field error instead.", + "readOnly": true, "type": "string" }, "targetLink": { @@ -4277,7 +4454,7 @@ "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\noperation is taking place. This field is deprecated, use location instead.", + "description": "The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the operation is taking place. This field is deprecated, use location instead.", "type": "string" } }, @@ -4288,14 +4465,14 @@ "id": "OperationProgress", "properties": { "metrics": { - "description": "Progress metric bundle, for example:\n metrics: [{name: \"nodes done\", int_value: 15},\n {name: \"nodes total\", int_value: 32}]\nor\n metrics: [{name: \"progress\", double_value: 0.56},\n {name: \"progress scale\", double_value: 1.0}]", + "description": "Progress metric bundle, for example: metrics: [{name: \"nodes done\", int_value: 15}, {name: \"nodes total\", int_value: 32}] or metrics: [{name: \"progress\", double_value: 0.56}, {name: \"progress scale\", double_value: 1.0}]", "items": { "$ref": "Metric" }, "type": "array" }, "name": { - "description": "A non-parameterized string describing an operation stage.\nUnset for single-stage operations.", + "description": "A non-parameterized string describing an operation stage. Unset for single-stage operations.", "type": "string" }, "stages": { @@ -4306,7 +4483,7 @@ "type": "array" }, "status": { - "description": "Status of an operation stage.\nUnset for single-stage operations.", + "description": "Status of an operation stage. Unset for single-stage operations.", "enum": [ "STATUS_UNSPECIFIED", "PENDING", @@ -4331,7 +4508,7 @@ "id": "PodSecurityPolicyConfig", "properties": { "enabled": { - "description": "Enable the PodSecurityPolicy controller for this cluster. If enabled, pods\nmust be valid under a PodSecurityPolicy to be created.", + "description": "Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created.", "type": "boolean" } }, @@ -4346,7 +4523,7 @@ "type": "boolean" }, "enablePrivateNodes": { - "description": "Whether nodes have internal IP addresses only. If enabled, all nodes are\ngiven only RFC 1918 private addresses and communicate with the master via\nprivate networking.", + "description": "Whether nodes have internal IP addresses only. If enabled, all nodes are given only RFC 1918 private addresses and communicate with the master via private networking.", "type": "boolean" }, "masterGlobalAccessConfig": { @@ -4354,7 +4531,7 @@ "description": "Controls master global access settings." }, "masterIpv4CidrBlock": { - "description": "The IP range in CIDR notation to use for the hosted master network. This\nrange will be used for assigning internal IP addresses to the master or\nset of masters, as well as the ILB VIP. This range must not overlap with\nany other ranges in use within the cluster's network.", + "description": "The IP range in CIDR notation to use for the hosted master network. This range will be used for assigning internal IP addresses to the master or set of masters, as well as the ILB VIP. This range must not overlap with any other ranges in use within the cluster's network.", "type": "string" }, "peeringName": { @@ -4383,12 +4560,27 @@ }, "type": "object" }, + "PubSub": { + "description": "Pub/Sub specific notification config.", + "id": "PubSub", + "properties": { + "enabled": { + "description": "Enable notifications for Pub/Sub.", + "type": "boolean" + }, + "topic": { + "description": "The desired Pub/Sub topic to which notifications will be sent by GKE. Format is `projects/{project}/topics/{topic}`.", + "type": "string" + } + }, + "type": "object" + }, "RecurringTimeWindow": { "description": "Represents an arbitrary window of time that recurs.", "id": "RecurringTimeWindow", "properties": { "recurrence": { - "description": "An RRULE (https://tools.ietf.org/html/rfc5545#section-3.8.5.3) for how\nthis window reccurs. They go on for the span of time between the start and\nend time.\n\nFor example, to have something repeat every weekday, you'd use:\n \u003ccode\u003eFREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR\u003c/code\u003e\nTo repeat some window daily (equivalent to the DailyMaintenanceWindow):\n \u003ccode\u003eFREQ=DAILY\u003c/code\u003e\nFor the first weekend of every month:\n \u003ccode\u003eFREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU\u003c/code\u003e\nThis specifies how frequently the window starts. Eg, if you wanted to have\na 9-5 UTC-4 window every weekday, you'd use something like:\n\u003ccode\u003e\n start time = 2019-01-01T09:00:00-0400\n end time = 2019-01-01T17:00:00-0400\n recurrence = FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR\n\u003c/code\u003e\nWindows can span multiple days. Eg, to make the window encompass every\nweekend from midnight Saturday till the last minute of Sunday UTC:\n\u003ccode\u003e\n start time = 2019-01-05T00:00:00Z\n end time = 2019-01-07T23:59:00Z\n recurrence = FREQ=WEEKLY;BYDAY=SA\n\u003c/code\u003e\nNote the start and end time's specific dates are largely arbitrary except\nto specify duration of the window and when it first starts.\nThe FREQ values of HOURLY, MINUTELY, and SECONDLY are not supported.", + "description": "An RRULE (https://tools.ietf.org/html/rfc5545#section-3.8.5.3) for how this window reccurs. They go on for the span of time between the start and end time. For example, to have something repeat every weekday, you'd use: `FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR` To repeat some window daily (equivalent to the DailyMaintenanceWindow): `FREQ=DAILY` For the first weekend of every month: `FREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU` This specifies how frequently the window starts. Eg, if you wanted to have a 9-5 UTC-4 window every weekday, you'd use something like: ``` start time = 2019-01-01T09:00:00-0400 end time = 2019-01-01T17:00:00-0400 recurrence = FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR ``` Windows can span multiple days. Eg, to make the window encompass every weekend from midnight Saturday till the last minute of Sunday UTC: ``` start time = 2019-01-05T00:00:00Z end time = 2019-01-07T23:59:00Z recurrence = FREQ=WEEKLY;BYDAY=SA ``` Note the start and end time's specific dates are largely arbitrary except to specify duration of the window and when it first starts. The FREQ values of HOURLY, MINUTELY, and SECONDLY are not supported.", "type": "string" }, "window": { @@ -4399,7 +4591,7 @@ "type": "object" }, "ReleaseChannel": { - "description": "ReleaseChannel indicates which release channel a cluster is\nsubscribed to. Release channels are arranged in order of risk and\nfrequency of updates.\n\nWhen a cluster is subscribed to a release channel, Google maintains\nboth the master version and the node version. Node auto-upgrade\ndefaults to true and cannot be disabled. Updates to version related\nfields (e.g. current_master_version) return an error.", + "description": "ReleaseChannel indicates which release channel a cluster is subscribed to. Release channels are arranged in order of risk. When a cluster is subscribed to a release channel, Google maintains both the master version and the node version. Node auto-upgrade defaults to true and cannot be disabled.", "id": "ReleaseChannel", "properties": { "channel": { @@ -4412,9 +4604,9 @@ ], "enumDescriptions": [ "No channel specified.", - "RAPID channel is offered on an early access basis for customers who want\nto test new releases before they are qualified for production use or\ngeneral availability. New upgrades will occur roughly weekly.\n\nWARNING: Versions available in the RAPID Channel may be subject to\nunresolved issues with no known workaround and are not for use with\nproduction workloads or subject to any SLAs.", - "Clusters subscribed to REGULAR receive versions that are considered GA\nquality. REGULAR is intended for production users who want to take\nadvantage of new features. New upgrades will occur roughly every few\nweeks.", - "Clusters subscribed to STABLE receive versions that are known to be\nstable and reliable in production. STABLE is intended for production\nusers who need stability above all else, or for whom frequent upgrades\nare too risky. New upgrades will occur roughly every few months." + "RAPID channel is offered on an early access basis for customers who want to test new releases. WARNING: Versions available in the RAPID Channel may be subject to unresolved issues with no known workaround and are not subject to any SLAs.", + "Clusters subscribed to REGULAR receive versions that are considered GA quality. REGULAR is intended for production users who want to take advantage of new features.", + "Clusters subscribed to STABLE receive versions that are known to be stable and reliable in production." ], "type": "string" } @@ -4426,7 +4618,7 @@ "id": "ReleaseChannelConfig", "properties": { "availableVersions": { - "description": "List of available versions for the release channel.", + "description": "Deprecated. This field has been deprecated and replaced with the valid_versions field.", "items": { "$ref": "AvailableVersion" }, @@ -4442,21 +4634,28 @@ ], "enumDescriptions": [ "No channel specified.", - "RAPID channel is offered on an early access basis for customers who want\nto test new releases before they are qualified for production use or\ngeneral availability. New upgrades will occur roughly weekly.\n\nWARNING: Versions available in the RAPID Channel may be subject to\nunresolved issues with no known workaround and are not for use with\nproduction workloads or subject to any SLAs.", - "Clusters subscribed to REGULAR receive versions that are considered GA\nquality. REGULAR is intended for production users who want to take\nadvantage of new features. New upgrades will occur roughly every few\nweeks.", - "Clusters subscribed to STABLE receive versions that are known to be\nstable and reliable in production. STABLE is intended for production\nusers who need stability above all else, or for whom frequent upgrades\nare too risky. New upgrades will occur roughly every few months." + "RAPID channel is offered on an early access basis for customers who want to test new releases. WARNING: Versions available in the RAPID Channel may be subject to unresolved issues with no known workaround and are not subject to any SLAs.", + "Clusters subscribed to REGULAR receive versions that are considered GA quality. REGULAR is intended for production users who want to take advantage of new features.", + "Clusters subscribed to STABLE receive versions that are known to be stable and reliable in production." ], "type": "string" }, "defaultVersion": { "description": "The default version for newly created clusters on the channel.", "type": "string" + }, + "validVersions": { + "description": "List of valid versions for the channel.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" }, "ReservationAffinity": { - "description": "[ReservationAffinity](https://cloud.google.com/compute/docs/instances/reserving-zonal-resources)\nis the configuration of desired reservation which instances could take\ncapacity from.", + "description": "[ReservationAffinity](https://cloud.google.com/compute/docs/instances/reserving-zonal-resources) is the configuration of desired reservation which instances could take capacity from.", "id": "ReservationAffinity", "properties": { "consumeReservationType": { @@ -4471,12 +4670,12 @@ "Default value. This should not be used.", "Do not consume from any reserved capacity.", "Consume any reservation available.", - "Must consume from a specific reservation. Must specify key value fields\nfor specifying the reservations." + "Must consume from a specific reservation. Must specify key value fields for specifying the reservations." ], "type": "string" }, "key": { - "description": "Corresponds to the label key of a reservation resource. To target a\nSPECIFIC_RESERVATION by name, specify \"googleapis.com/reservation-name\" as\nthe key and specify the name of your reservation as its value.", + "description": "Corresponds to the label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify \"googleapis.com/reservation-name\" as the key and specify the name of your reservation as its value.", "type": "string" }, "values": { @@ -4490,7 +4689,7 @@ "type": "object" }, "ResourceLimit": { - "description": "Contains information about amount of some resource in the cluster.\nFor memory, value should be in GB.", + "description": "Contains information about amount of some resource in the cluster. For memory, value should be in GB.", "id": "ResourceLimit", "properties": { "maximum": { @@ -4523,34 +4722,34 @@ "description": "Configuration to enable resource consumption metering." }, "enableNetworkEgressMetering": { - "description": "Whether to enable network egress metering for this cluster. If enabled, a\ndaemonset will be created in the cluster to meter network egress traffic.", + "description": "Whether to enable network egress metering for this cluster. If enabled, a daemonset will be created in the cluster to meter network egress traffic.", "type": "boolean" } }, "type": "object" }, "RollbackNodePoolUpgradeRequest": { - "description": "RollbackNodePoolUpgradeRequest rollbacks the previously Aborted or Failed\nNodePool upgrade. This will be an no-op if the last upgrade successfully\ncompleted.", + "description": "RollbackNodePoolUpgradeRequest rollbacks the previously Aborted or Failed NodePool upgrade. This will be an no-op if the last upgrade successfully completed.", "id": "RollbackNodePoolUpgradeRequest", "properties": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to rollback.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to rollback. This field has been deprecated and replaced by the name field.", "type": "string" }, "name": { - "description": "The name (project, location, cluster, node pool id) of the node poll to\nrollback upgrade.\nSpecified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool id) of the node poll to rollback upgrade. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "type": "string" }, "nodePoolId": { - "description": "Required. Deprecated. The name of the node pool to rollback.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the node pool to rollback. This field has been deprecated and replaced by the name field.", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, @@ -4606,14 +4805,14 @@ "type": "array" }, "validMasterVersions": { - "description": "List of valid master versions.", + "description": "List of valid master versions, in descending order.", "items": { "type": "string" }, "type": "array" }, "validNodeVersions": { - "description": "List of valid node upgrade target versions.", + "description": "List of valid node upgrade target versions, in descending order.", "items": { "type": "string" }, @@ -4628,45 +4827,45 @@ "properties": { "addonsConfig": { "$ref": "AddonsConfig", - "description": "Required. The desired configurations for the various addons available to run in the\ncluster." + "description": "Required. The desired configurations for the various addons available to run in the cluster." }, "clusterId": { - "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, "name": { - "description": "The name (project, location, cluster) of the cluster to set addons.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to set addons. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, "type": "object" }, "SetLabelsRequest": { - "description": "SetLabelsRequest sets the Google Cloud Platform labels on a Google Container\nEngine cluster, which will in turn set them for Google Compute Engine\nresources used by that cluster", + "description": "SetLabelsRequest sets the Google Cloud Platform labels on a Google Container Engine cluster, which will in turn set them for Google Compute Engine resources used by that cluster", "id": "SetLabelsRequest", "properties": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "type": "string" }, "labelFingerprint": { - "description": "Required. The fingerprint of the previous set of labels for this resource,\nused to detect conflicts. The fingerprint is initially generated by\nKubernetes Engine and changes after every request to modify or update\nlabels. You must always provide an up-to-date fingerprint hash when\nupdating or changing labels. Make a \u003ccode\u003eget()\u003c/code\u003e request to the\nresource to get the latest fingerprint.", + "description": "Required. The fingerprint of the previous set of labels for this resource, used to detect conflicts. The fingerprint is initially generated by Kubernetes Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash when updating or changing labels. Make a `get()` request to the resource to get the latest fingerprint.", "type": "string" }, "name": { - "description": "The name (project, location, cluster id) of the cluster to set labels.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster id) of the cluster to set labels. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", "type": "string" }, "resourceLabels": { @@ -4677,18 +4876,18 @@ "type": "object" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, "type": "object" }, "SetLegacyAbacRequest": { - "description": "SetLegacyAbacRequest enables or disables the ABAC authorization mechanism for\na cluster.", + "description": "SetLegacyAbacRequest enables or disables the ABAC authorization mechanism for a cluster.", "id": "SetLegacyAbacRequest", "properties": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to update. This field has been deprecated and replaced by the name field.", "type": "string" }, "enabled": { @@ -4696,15 +4895,15 @@ "type": "boolean" }, "name": { - "description": "The name (project, location, cluster id) of the cluster to set legacy abac.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster id) of the cluster to set legacy abac. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, @@ -4715,26 +4914,26 @@ "id": "SetLocationsRequest", "properties": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, "locations": { - "description": "Required. The desired list of Google Compute Engine\n[zones](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster's nodes should be located. Changing the locations a cluster is in\nwill result in nodes being either created or removed from the cluster,\ndepending on whether locations are being added or removed.\n\nThis list must always include the cluster's primary zone.", + "description": "Required. The desired list of Google Compute Engine [zones](https://cloud.google.com/compute/docs/zones#available) in which the cluster's nodes should be located. Changing the locations a cluster is in will result in nodes being either created or removed from the cluster, depending on whether locations are being added or removed. This list must always include the cluster's primary zone.", "items": { "type": "string" }, "type": "array" }, "name": { - "description": "The name (project, location, cluster) of the cluster to set locations.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to set locations. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, @@ -4745,23 +4944,23 @@ "id": "SetLoggingServiceRequest", "properties": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, "loggingService": { - "description": "Required. The logging service the cluster should use to write logs.\nCurrently available options:\n\n* `logging.googleapis.com/kubernetes` - The Cloud Logging\nservice with a Kubernetes-native resource model\n* `logging.googleapis.com` - The legacy Cloud Logging service (no longer\n available as of GKE 1.15).\n* `none` - no logs will be exported from the cluster.\n\nIf left as an empty string,`logging.googleapis.com/kubernetes` will be\nused for GKE 1.14+ or `logging.googleapis.com` for earlier versions.", + "description": "Required. The logging service the cluster should use to write logs. Currently available options: * `logging.googleapis.com/kubernetes` - The Cloud Logging service with a Kubernetes-native resource model * `logging.googleapis.com` - The legacy Cloud Logging service (no longer available as of GKE 1.15). * `none` - no logs will be exported from the cluster. If left as an empty string,`logging.googleapis.com/kubernetes` will be used for GKE 1.14+ or `logging.googleapis.com` for earlier versions.", "type": "string" }, "name": { - "description": "The name (project, location, cluster) of the cluster to set logging.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to set logging. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, @@ -4777,18 +4976,18 @@ }, "maintenancePolicy": { "$ref": "MaintenancePolicy", - "description": "Required. The maintenance policy to be set for the cluster. An empty field\nclears the existing maintenance policy." + "description": "Required. The maintenance policy to be set for the cluster. An empty field clears the existing maintenance policy." }, "name": { - "description": "The name (project, location, cluster id) of the cluster to set maintenance\npolicy.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster id) of the cluster to set maintenance policy. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Required. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "Required. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).", "type": "string" }, "zone": { - "description": "Required. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides.", + "description": "Required. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides.", "type": "string" } }, @@ -4810,20 +5009,20 @@ "Operation is unknown and will error out.", "Set the password to a user generated value.", "Generate a new password and set it to that.", - "Set the username. If an empty username is provided, basic authentication\nis disabled for the cluster. If a non-empty username is provided, basic\nauthentication is enabled, with either a provided password or a generated\none." + "Set the username. If an empty username is provided, basic authentication is disabled for the cluster. If a non-empty username is provided, basic authentication is enabled, with either a provided password or a generated one." ], "type": "string" }, "clusterId": { - "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, "name": { - "description": "The name (project, location, cluster) of the cluster to set auth.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to set auth. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "update": { @@ -4831,7 +5030,7 @@ "description": "Required. A description of the update." }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, @@ -4842,23 +5041,23 @@ "id": "SetMonitoringServiceRequest", "properties": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, "monitoringService": { - "description": "Required. The monitoring service the cluster should use to write metrics.\nCurrently available options:\n\n* \"monitoring.googleapis.com/kubernetes\" - The Cloud Monitoring\nservice with a Kubernetes-native resource model\n* `monitoring.googleapis.com` - The legacy Cloud Monitoring service (no\n longer available as of GKE 1.15).\n* `none` - No metrics will be exported from the cluster.\n\nIf left as an empty string,`monitoring.googleapis.com/kubernetes` will be\nused for GKE 1.14+ or `monitoring.googleapis.com` for earlier versions.", + "description": "Required. The monitoring service the cluster should use to write metrics. Currently available options: * \"monitoring.googleapis.com/kubernetes\" - The Cloud Monitoring service with a Kubernetes-native resource model * `monitoring.googleapis.com` - The legacy Cloud Monitoring service (no longer available as of GKE 1.15). * `none` - No metrics will be exported from the cluster. If left as an empty string,`monitoring.googleapis.com/kubernetes` will be used for GKE 1.14+ or `monitoring.googleapis.com` for earlier versions.", "type": "string" }, "name": { - "description": "The name (project, location, cluster) of the cluster to set monitoring.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to set monitoring. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, @@ -4869,11 +5068,11 @@ "id": "SetNetworkPolicyRequest", "properties": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "type": "string" }, "name": { - "description": "The name (project, location, cluster id) of the cluster to set networking\npolicy. Specified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster id) of the cluster to set networking policy. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "networkPolicy": { @@ -4881,11 +5080,11 @@ "description": "Required. Configuration options for the NetworkPolicy feature." }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, @@ -4900,34 +5099,34 @@ "description": "Required. Autoscaling configuration for the node pool." }, "clusterId": { - "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, "name": { - "description": "The name (project, location, cluster, node pool) of the node pool to set\nautoscaler settings. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool) of the node pool to set autoscaler settings. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "type": "string" }, "nodePoolId": { - "description": "Required. Deprecated. The name of the node pool to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the node pool to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, "type": "object" }, "SetNodePoolManagementRequest": { - "description": "SetNodePoolManagementRequest sets the node management properties of a node\npool.", + "description": "SetNodePoolManagementRequest sets the node management properties of a node pool.", "id": "SetNodePoolManagementRequest", "properties": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to update. This field has been deprecated and replaced by the name field.", "type": "string" }, "management": { @@ -4935,34 +5134,34 @@ "description": "Required. NodeManagement configuration for the node pool." }, "name": { - "description": "The name (project, location, cluster, node pool id) of the node pool to set\nmanagement properties. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool id) of the node pool to set management properties. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "type": "string" }, "nodePoolId": { - "description": "Required. Deprecated. The name of the node pool to update.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the node pool to update. This field has been deprecated and replaced by the name field.", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, "type": "object" }, "SetNodePoolSizeRequest": { - "description": "SetNodePoolSizeRequest sets the size a node\npool.", + "description": "SetNodePoolSizeRequest sets the size a node pool.", "id": "SetNodePoolSizeRequest", "properties": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to update. This field has been deprecated and replaced by the name field.", "type": "string" }, "name": { - "description": "The name (project, location, cluster, node pool id) of the node pool to set\nsize.\nSpecified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool id) of the node pool to set size. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "type": "string" }, "nodeCount": { @@ -4971,15 +5170,15 @@ "type": "integer" }, "nodePoolId": { - "description": "Required. Deprecated. The name of the node pool to update.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the node pool to update. This field has been deprecated and replaced by the name field.", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, @@ -4990,11 +5189,11 @@ "id": "ShieldedInstanceConfig", "properties": { "enableIntegrityMonitoring": { - "description": "Defines whether the instance has integrity monitoring enabled.\n\nEnables monitoring and attestation of the boot integrity of the instance.\nThe attestation is performed against the integrity policy baseline. This\nbaseline is initially derived from the implicitly trusted boot image when\nthe instance is created.", + "description": "Defines whether the instance has integrity monitoring enabled. Enables monitoring and attestation of the boot integrity of the instance. The attestation is performed against the integrity policy baseline. This baseline is initially derived from the implicitly trusted boot image when the instance is created.", "type": "boolean" }, "enableSecureBoot": { - "description": "Defines whether the instance has Secure Boot enabled.\n\nSecure Boot helps ensure that the system only runs authentic software by\nverifying the digital signature of all boot components, and halting the\nboot process if signature verification fails.", + "description": "Defines whether the instance has Secure Boot enabled. Secure Boot helps ensure that the system only runs authentic software by verifying the digital signature of all boot components, and halting the boot process if signature verification fails.", "type": "boolean" } }, @@ -5012,19 +5211,19 @@ "type": "object" }, "StartIPRotationRequest": { - "description": "StartIPRotationRequest creates a new IP for the cluster and then performs\na node upgrade on each node pool to point to the new IP.", + "description": "StartIPRotationRequest creates a new IP for the cluster and then performs a node upgrade on each node pool to point to the new IP.", "id": "StartIPRotationRequest", "properties": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "type": "string" }, "name": { - "description": "The name (project, location, cluster id) of the cluster to start IP\nrotation. Specified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster id) of the cluster to start IP rotation. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", "type": "string" }, "rotateCredentials": { @@ -5032,18 +5231,87 @@ "type": "boolean" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", + "type": "string" + } + }, + "type": "object" + }, + "Status": { + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", + "id": "Status", + "properties": { + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + }, + "details": { + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", + "items": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", "type": "string" } }, "type": "object" }, "StatusCondition": { - "description": "StatusCondition describes why a cluster or a node pool has a certain status\n(e.g., ERROR or DEGRADED).", + "description": "StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED).", "id": "StatusCondition", "properties": { + "canonicalCode": { + "description": "Canonical code of the condition.", + "enum": [ + "OK", + "CANCELLED", + "UNKNOWN", + "INVALID_ARGUMENT", + "DEADLINE_EXCEEDED", + "NOT_FOUND", + "ALREADY_EXISTS", + "PERMISSION_DENIED", + "UNAUTHENTICATED", + "RESOURCE_EXHAUSTED", + "FAILED_PRECONDITION", + "ABORTED", + "OUT_OF_RANGE", + "UNIMPLEMENTED", + "INTERNAL", + "UNAVAILABLE", + "DATA_LOSS" + ], + "enumDescriptions": [ + "Not an error; returned on success HTTP Mapping: 200 OK", + "The operation was cancelled, typically by the caller. HTTP Mapping: 499 Client Closed Request", + "Unknown error. For example, this error may be returned when a `Status` value received from another address space belongs to an error space that is not known in this address space. Also errors raised by APIs that do not return enough error information may be converted to this error. HTTP Mapping: 500 Internal Server Error", + "The client specified an invalid argument. Note that this differs from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments that are problematic regardless of the state of the system (e.g., a malformed file name). HTTP Mapping: 400 Bad Request", + "The deadline expired before the operation could complete. For operations that change the state of the system, this error may be returned even if the operation has completed successfully. For example, a successful response from a server could have been delayed long enough for the deadline to expire. HTTP Mapping: 504 Gateway Timeout", + "Some requested entity (e.g., file or directory) was not found. Note to server developers: if a request is denied for an entire class of users, such as gradual feature rollout or undocumented allowlist, `NOT_FOUND` may be used. If a request is denied for some users within a class of users, such as user-based access control, `PERMISSION_DENIED` must be used. HTTP Mapping: 404 Not Found", + "The entity that a client attempted to create (e.g., file or directory) already exists. HTTP Mapping: 409 Conflict", + "The caller does not have permission to execute the specified operation. `PERMISSION_DENIED` must not be used for rejections caused by exhausting some resource (use `RESOURCE_EXHAUSTED` instead for those errors). `PERMISSION_DENIED` must not be used if the caller can not be identified (use `UNAUTHENTICATED` instead for those errors). This error code does not imply the request is valid or the requested entity exists or satisfies other pre-conditions. HTTP Mapping: 403 Forbidden", + "The request does not have valid authentication credentials for the operation. HTTP Mapping: 401 Unauthorized", + "Some resource has been exhausted, perhaps a per-user quota, or perhaps the entire file system is out of space. HTTP Mapping: 429 Too Many Requests", + "The operation was rejected because the system is not in a state required for the operation's execution. For example, the directory to be deleted is non-empty, an rmdir operation is applied to a non-directory, etc. Service implementors can use the following guidelines to decide between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`: (a) Use `UNAVAILABLE` if the client can retry just the failing call. (b) Use `ABORTED` if the client should retry at a higher level (e.g., when a client-specified test-and-set fails, indicating the client should restart a read-modify-write sequence). (c) Use `FAILED_PRECONDITION` if the client should not retry until the system state has been explicitly fixed. E.g., if an \"rmdir\" fails because the directory is non-empty, `FAILED_PRECONDITION` should be returned since the client should not retry unless the files are deleted from the directory. HTTP Mapping: 400 Bad Request", + "The operation was aborted, typically due to a concurrency issue such as a sequencer check failure or transaction abort. See the guidelines above for deciding between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`. HTTP Mapping: 409 Conflict", + "The operation was attempted past the valid range. E.g., seeking or reading past end-of-file. Unlike `INVALID_ARGUMENT`, this error indicates a problem that may be fixed if the system state changes. For example, a 32-bit file system will generate `INVALID_ARGUMENT` if asked to read at an offset that is not in the range [0,2^32-1], but it will generate `OUT_OF_RANGE` if asked to read from an offset past the current file size. There is a fair bit of overlap between `FAILED_PRECONDITION` and `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific error) when it applies so that callers who are iterating through a space can easily look for an `OUT_OF_RANGE` error to detect when they are done. HTTP Mapping: 400 Bad Request", + "The operation is not implemented or is not supported/enabled in this service. HTTP Mapping: 501 Not Implemented", + "Internal errors. This means that some invariants expected by the underlying system have been broken. This error code is reserved for serious errors. HTTP Mapping: 500 Internal Server Error", + "The service is currently unavailable. This is most likely a transient condition, which can be corrected by retrying with a backoff. Note that it is not always safe to retry non-idempotent operations. See the guidelines above for deciding between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`. HTTP Mapping: 503 Service Unavailable", + "Unrecoverable data loss or corruption. HTTP Mapping: 500 Internal Server Error" + ], + "type": "string" + }, "code": { - "description": "Machine-friendly representation of the condition", + "description": "Machine-friendly representation of the condition Deprecated. Use canonical_code instead.", "enum": [ "UNKNOWN", "GCE_STOCKOUT", @@ -5054,11 +5322,11 @@ ], "enumDescriptions": [ "UNKNOWN indicates a generic condition.", - "GCE_STOCKOUT indicates that Google Compute Engine resources are\ntemporarily unavailable.", - "GKE_SERVICE_ACCOUNT_DELETED indicates that the user deleted their robot\nservice account.", + "GCE_STOCKOUT indicates that Google Compute Engine resources are temporarily unavailable.", + "GKE_SERVICE_ACCOUNT_DELETED indicates that the user deleted their robot service account.", "Google Compute Engine quota was exceeded.", "Cluster state was manually changed by an SRE due to a system logic error.", - "Unable to perform an encrypt operation against the CloudKMS key used for\netcd level encryption.\nMore codes TBA" + "Unable to perform an encrypt operation against the CloudKMS key used for etcd level encryption. More codes TBA" ], "type": "string" }, @@ -5074,7 +5342,7 @@ "id": "TimeWindow", "properties": { "endTime": { - "description": "The time that the window ends. The end time should take place after the\nstart time.", + "description": "The time that the window ends. The end time should take place after the start time.", "format": "google-datetime", "type": "string" }, @@ -5110,15 +5378,15 @@ "id": "UpdateClusterRequest", "properties": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, "name": { - "description": "The name (project, location, cluster) of the cluster to update.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to update. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "update": { @@ -5126,7 +5394,7 @@ "description": "Required. A description of the update." }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, @@ -5137,23 +5405,23 @@ "id": "UpdateMasterRequest", "properties": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, "masterVersion": { - "description": "Required. The Kubernetes version to change the master to.\n\nUsers may specify either explicit versions offered by\nKubernetes Engine or version aliases, which have the following behavior:\n\n- \"latest\": picks the highest valid Kubernetes version\n- \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version\n- \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version\n- \"1.X.Y-gke.N\": picks an explicit Kubernetes version\n- \"-\": picks the default Kubernetes version", + "description": "Required. The Kubernetes version to change the master to. Users may specify either explicit versions offered by Kubernetes Engine or version aliases, which have the following behavior: - \"latest\": picks the highest valid Kubernetes version - \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version - \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version - \"1.X.Y-gke.N\": picks an explicit Kubernetes version - \"-\": picks the default Kubernetes version", "type": "string" }, "name": { - "description": "The name (project, location, cluster) of the cluster to update.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + "description": "The name (project, location, cluster) of the cluster to update. Specified in the format `projects/*/locations/*/clusters/*`.", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } }, @@ -5164,34 +5432,42 @@ "id": "UpdateNodePoolRequest", "properties": { "clusterId": { - "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, "imageType": { "description": "Required. The desired image type for the node pool.", "type": "string" }, + "kubeletConfig": { + "$ref": "NodeKubeletConfig", + "description": "Node kubelet configs." + }, + "linuxNodeConfig": { + "$ref": "LinuxNodeConfig", + "description": "Parameters that can be configured on Linux nodes." + }, "locations": { - "description": "The desired list of Google Compute Engine\n[zones](https://cloud.google.com/compute/docs/zones#available) in which the\nnode pool's nodes should be located. Changing the locations for a node pool\nwill result in nodes being either created or removed from the node pool,\ndepending on whether locations are being added or removed.", + "description": "The desired list of Google Compute Engine [zones](https://cloud.google.com/compute/docs/zones#available) in which the node pool's nodes should be located. Changing the locations for a node pool will result in nodes being either created or removed from the node pool, depending on whether locations are being added or removed.", "items": { "type": "string" }, "type": "array" }, "name": { - "description": "The name (project, location, cluster, node pool) of the node pool to\nupdate. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + "description": "The name (project, location, cluster, node pool) of the node pool to update. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", "type": "string" }, "nodePoolId": { - "description": "Required. Deprecated. The name of the node pool to upgrade.\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The name of the node pool to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, "nodeVersion": { - "description": "Required. The Kubernetes version to change the nodes to (typically an\nupgrade).\n\nUsers may specify either explicit versions offered by Kubernetes Engine or\nversion aliases, which have the following behavior:\n\n- \"latest\": picks the highest valid Kubernetes version\n- \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version\n- \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version\n- \"1.X.Y-gke.N\": picks an explicit Kubernetes version\n- \"-\": picks the Kubernetes master version", + "description": "Required. The Kubernetes version to change the nodes to (typically an upgrade). Users may specify either explicit versions offered by Kubernetes Engine or version aliases, which have the following behavior: - \"latest\": picks the highest valid Kubernetes version - \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version - \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version - \"1.X.Y-gke.N\": picks an explicit Kubernetes version - \"-\": picks the Kubernetes master version", "type": "string" }, "projectId": { - "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", "type": "string" }, "upgradeSettings": { @@ -5203,23 +5479,65 @@ "description": "The desired workload metadata config for the node pool." }, "zone": { - "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", + "type": "string" + } + }, + "type": "object" + }, + "UpgradeEvent": { + "description": "UpgradeEvent is a notification sent to customers by the cluster server when a resource is upgrading.", + "id": "UpgradeEvent", + "properties": { + "currentVersion": { + "description": "Required. The current version before the upgrade.", + "type": "string" + }, + "operation": { + "description": "Required. The operation associated with this upgrade.", + "type": "string" + }, + "operationStartTime": { + "description": "Required. The time when the operation was started.", + "format": "google-datetime", + "type": "string" + }, + "resource": { + "description": "Optional. Optional relative path to the resource. For example in node pool upgrades, the relative path of the node pool.", + "type": "string" + }, + "resourceType": { + "description": "Required. The resource type that is upgrading.", + "enum": [ + "UPGRADE_RESOURCE_TYPE_UNSPECIFIED", + "MASTER", + "NODE_POOL" + ], + "enumDescriptions": [ + "Default value. This shouldn't be used.", + "Master / control plane", + "Node pool" + ], + "type": "string" + }, + "targetVersion": { + "description": "Required. The target version for the upgrade.", "type": "string" } }, "type": "object" }, "UpgradeSettings": { - "description": "These upgrade settings control the level of parallelism and the level of\ndisruption caused by an upgrade.\n\nmaxUnavailable controls the number of nodes that can be simultaneously\nunavailable.\n\nmaxSurge controls the number of additional nodes that can be added to the\nnode pool temporarily for the time of the upgrade to increase the number of\navailable nodes.\n\n(maxUnavailable + maxSurge) determines the level of parallelism (how many\nnodes are being upgraded at the same time).\n\nNote: upgrades inevitably introduce some disruption since workloads need to\nbe moved from old nodes to new, upgraded ones. Even if maxUnavailable=0,\nthis holds true. (Disruption stays within the limits of\nPodDisruptionBudget, if it is configured.)\n\nConsider a hypothetical node pool with 5 nodes having maxSurge=2,\nmaxUnavailable=1. This means the upgrade process upgrades 3 nodes\nsimultaneously. It creates 2 additional (upgraded) nodes, then it brings\ndown 3 old (not yet upgraded) nodes at the same time. This ensures that\nthere are always at least 4 nodes available.", + "description": "These upgrade settings control the level of parallelism and the level of disruption caused by an upgrade. maxUnavailable controls the number of nodes that can be simultaneously unavailable. maxSurge controls the number of additional nodes that can be added to the node pool temporarily for the time of the upgrade to increase the number of available nodes. (maxUnavailable + maxSurge) determines the level of parallelism (how many nodes are being upgraded at the same time). Note: upgrades inevitably introduce some disruption since workloads need to be moved from old nodes to new, upgraded ones. Even if maxUnavailable=0, this holds true. (Disruption stays within the limits of PodDisruptionBudget, if it is configured.) Consider a hypothetical node pool with 5 nodes having maxSurge=2, maxUnavailable=1. This means the upgrade process upgrades 3 nodes simultaneously. It creates 2 additional (upgraded) nodes, then it brings down 3 old (not yet upgraded) nodes at the same time. This ensures that there are always at least 4 nodes available.", "id": "UpgradeSettings", "properties": { "maxSurge": { - "description": "The maximum number of nodes that can be created beyond the current size\nof the node pool during the upgrade process.", + "description": "The maximum number of nodes that can be created beyond the current size of the node pool during the upgrade process.", "format": "int32", "type": "integer" }, "maxUnavailable": { - "description": "The maximum number of nodes that can be simultaneously unavailable during\nthe upgrade process. A node is considered available if its status is\nReady.", + "description": "The maximum number of nodes that can be simultaneously unavailable during the upgrade process. A node is considered available if its status is Ready.", "format": "int32", "type": "integer" } @@ -5227,7 +5545,7 @@ "type": "object" }, "UsableSubnetwork": { - "description": "UsableSubnetwork resource returns the subnetwork name, its associated network\nand the primary CIDR range.", + "description": "UsableSubnetwork resource returns the subnetwork name, its associated network and the primary CIDR range.", "id": "UsableSubnetwork", "properties": { "ipCidrRange": { @@ -5235,7 +5553,7 @@ "type": "string" }, "network": { - "description": "Network Name.\nExample: projects/my-project/global/networks/my-network", + "description": "Network Name. Example: projects/my-project/global/networks/my-network", "type": "string" }, "secondaryIpRanges": { @@ -5246,11 +5564,11 @@ "type": "array" }, "statusMessage": { - "description": "A human readable status message representing the reasons for cases where\nthe caller cannot use the secondary ranges under the subnet. For example if\nthe secondary_ip_ranges is empty due to a permission issue, an insufficient\npermission message will be given by status_message.", + "description": "A human readable status message representing the reasons for cases where the caller cannot use the secondary ranges under the subnet. For example if the secondary_ip_ranges is empty due to a permission issue, an insufficient permission message will be given by status_message.", "type": "string" }, "subnetwork": { - "description": "Subnetwork Name.\nExample: projects/my-project/regions/us-central1/subnetworks/my-subnet", + "description": "Subnetwork Name. Example: projects/my-project/regions/us-central1/subnetworks/my-subnet", "type": "string" } }, @@ -5265,7 +5583,7 @@ "type": "string" }, "rangeName": { - "description": "The name associated with this subnetwork secondary range, used when adding\nan alias IP range to a VM instance.", + "description": "The name associated with this subnetwork secondary range, used when adding an alias IP range to a VM instance.", "type": "string" }, "status": { @@ -5280,9 +5598,9 @@ "enumDescriptions": [ "UNKNOWN is the zero value of the Status enum. It's not a valid status.", "UNUSED denotes that this range is unclaimed by any cluster.", - "IN_USE_SERVICE denotes that this range is claimed by a cluster for\nservices. It cannot be used for other clusters.", - "IN_USE_SHAREABLE_POD denotes this range was created by the network admin\nand is currently claimed by a cluster for pods. It can only be used by\nother clusters as a pod range.", - "IN_USE_MANAGED_POD denotes this range was created by GKE and is claimed\nfor pods. It cannot be used for other clusters." + "IN_USE_SERVICE denotes that this range is claimed by a cluster for services. It cannot be used for other clusters.", + "IN_USE_SHAREABLE_POD denotes this range was created by the network admin and is currently claimed by a cluster for pods. It can only be used by other clusters as a pod range.", + "IN_USE_MANAGED_POD denotes this range was created by GKE and is claimed for pods. It cannot be used for other clusters." ], "type": "string" } @@ -5290,7 +5608,7 @@ "type": "object" }, "VerticalPodAutoscaling": { - "description": "VerticalPodAutoscaling contains global, per-cluster information\nrequired by Vertical Pod Autoscaler to automatically adjust\nthe resources of pods controlled by it.", + "description": "VerticalPodAutoscaling contains global, per-cluster information required by Vertical Pod Autoscaler to automatically adjust the resources of pods controlled by it.", "id": "VerticalPodAutoscaling", "properties": { "enabled": { @@ -5301,13 +5619,17 @@ "type": "object" }, "WorkloadIdentityConfig": { - "description": "Configuration for the use of Kubernetes Service Accounts in GCP IAM\npolicies.", + "description": "Configuration for the use of Kubernetes Service Accounts in GCP IAM policies.", "id": "WorkloadIdentityConfig", "properties": { "identityNamespace": { "description": "IAM Identity Namespace to attach all Kubernetes Service Accounts to.", "type": "string" }, + "identityProvider": { + "description": "identity provider is the third party identity provider.", + "type": "string" + }, "workloadPool": { "description": "The workload pool to attach all Kubernetes service accounts to.", "type": "string" @@ -5316,11 +5638,11 @@ "type": "object" }, "WorkloadMetadataConfig": { - "description": "WorkloadMetadataConfig defines the metadata configuration to expose to\nworkloads on the node pool.", + "description": "WorkloadMetadataConfig defines the metadata configuration to expose to workloads on the node pool.", "id": "WorkloadMetadataConfig", "properties": { "mode": { - "description": "Mode is the configuration for how to expose metadata to workloads running\non the node pool.", + "description": "Mode is the configuration for how to expose metadata to workloads running on the node pool.", "enum": [ "MODE_UNSPECIFIED", "GCE_METADATA", @@ -5329,12 +5651,12 @@ "enumDescriptions": [ "Not set.", "Expose all Compute Engine metadata to pods.", - "Run the GKE Metadata Server on this node. The GKE Metadata Server exposes\na metadata API to workloads that is compatible with the V1 Compute\nMetadata APIs exposed by the Compute Engine and App Engine Metadata\nServers. This feature can only be enabled if Workload Identity is enabled\nat the cluster level." + "Run the GKE Metadata Server on this node. The GKE Metadata Server exposes a metadata API to workloads that is compatible with the V1 Compute Metadata APIs exposed by the Compute Engine and App Engine Metadata Servers. This feature can only be enabled if Workload Identity is enabled at the cluster level." ], "type": "string" }, "nodeMetadata": { - "description": "NodeMetadata is the configuration for how to expose metadata to the\nworkloads running on the node.", + "description": "NodeMetadata is the configuration for how to expose metadata to the workloads running on the node.", "enum": [ "UNSPECIFIED", "SECURE", @@ -5343,9 +5665,9 @@ ], "enumDescriptions": [ "Not set.", - "Prevent workloads not in hostNetwork from accessing certain VM metadata,\nspecifically kube-env, which contains Kubelet credentials, and the\ninstance identity token.\n\nMetadata concealment is a temporary security solution available while the\nbootstrapping process for cluster nodes is being redesigned with\nsignificant security improvements. This feature is scheduled to be\ndeprecated in the future and later removed.", + "Prevent workloads not in hostNetwork from accessing certain VM metadata, specifically kube-env, which contains Kubelet credentials, and the instance identity token. Metadata concealment is a temporary security solution available while the bootstrapping process for cluster nodes is being redesigned with significant security improvements. This feature is scheduled to be deprecated in the future and later removed.", "Expose all VM metadata to pods.", - "Run the GKE Metadata Server on this node. The GKE Metadata Server exposes\na metadata API to workloads that is compatible with the V1 Compute\nMetadata APIs exposed by the Compute Engine and App Engine Metadata\nServers. This feature can only be enabled if Workload Identity is enabled\nat the cluster level." + "Run the GKE Metadata Server on this node. The GKE Metadata Server exposes a metadata API to workloads that is compatible with the V1 Compute Metadata APIs exposed by the Compute Engine and App Engine Metadata Servers. This feature can only be enabled if Workload Identity is enabled at the cluster level." ], "type": "string" } diff --git a/vendor/google.golang.org/api/container/v1beta1/container-gen.go b/vendor/google.golang.org/api/container/v1beta1/container-gen.go index c5d57fa0e4b..d53230b9a30 100644 --- a/vendor/google.golang.org/api/container/v1beta1/container-gen.go +++ b/vendor/google.golang.org/api/container/v1beta1/container-gen.go @@ -75,6 +75,7 @@ const apiId = "container:v1beta1" const apiName = "container" const apiVersion = "v1beta1" const basePath = "https://container.googleapis.com/" +const mtlsBasePath = "https://container.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -90,6 +91,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -282,8 +284,7 @@ type AcceleratorConfig struct { AcceleratorCount int64 `json:"acceleratorCount,omitempty,string"` // AcceleratorType: The accelerator type resource name. List of - // supported - // accelerators + // supported accelerators // [here](https://cloud.google.com/compute/docs/gpus) AcceleratorType string `json:"acceleratorType,omitempty"` @@ -312,19 +313,16 @@ func (s *AcceleratorConfig) MarshalJSON() ([]byte, error) { } // AddonsConfig: Configuration for the addons that can be automatically -// spun up in the -// cluster, enabling additional functionality. +// spun up in the cluster, enabling additional functionality. type AddonsConfig struct { // CloudRunConfig: Configuration for the Cloud Run addon. The - // `IstioConfig` addon must be - // enabled in order to enable Cloud Run addon. This option can only be - // enabled - // at cluster creation time. + // `IstioConfig` addon must be enabled in order to enable Cloud Run + // addon. This option can only be enabled at cluster creation time. CloudRunConfig *CloudRunConfig `json:"cloudRunConfig,omitempty"` // ConfigConnectorConfig: Configuration for the ConfigConnector add-on, - // a Kubernetes - // extension to manage hosted GCP services through the Kubernetes API + // a Kubernetes extension to manage hosted GCP services through the + // Kubernetes API ConfigConnectorConfig *ConfigConnectorConfig `json:"configConnectorConfig,omitempty"` // DnsCacheConfig: Configuration for NodeLocalDNS, a dns cache running @@ -336,44 +334,34 @@ type AddonsConfig struct { GcePersistentDiskCsiDriverConfig *GcePersistentDiskCsiDriverConfig `json:"gcePersistentDiskCsiDriverConfig,omitempty"` // HorizontalPodAutoscaling: Configuration for the horizontal pod - // autoscaling feature, which - // increases or decreases the number of replica pods a replication - // controller - // has based on the resource usage of the existing pods. + // autoscaling feature, which increases or decreases the number of + // replica pods a replication controller has based on the resource usage + // of the existing pods. HorizontalPodAutoscaling *HorizontalPodAutoscaling `json:"horizontalPodAutoscaling,omitempty"` // HttpLoadBalancing: Configuration for the HTTP (L7) load balancing - // controller addon, which - // makes it easy to set up HTTP load balancers for services in a - // cluster. + // controller addon, which makes it easy to set up HTTP load balancers + // for services in a cluster. HttpLoadBalancing *HttpLoadBalancing `json:"httpLoadBalancing,omitempty"` // IstioConfig: Configuration for Istio, an open platform to connect, - // manage, and secure - // microservices. + // manage, and secure microservices. IstioConfig *IstioConfig `json:"istioConfig,omitempty"` // KalmConfig: Configuration for the KALM addon, which manages the - // lifecycle of k8s - // applications. + // lifecycle of k8s applications. KalmConfig *KalmConfig `json:"kalmConfig,omitempty"` - // KubernetesDashboard: Configuration for the Kubernetes Dashboard. - // This addon is deprecated, and will be disabled in 1.15. It is - // recommended + // KubernetesDashboard: Configuration for the Kubernetes Dashboard. This + // addon is deprecated, and will be disabled in 1.15. It is recommended // to use the Cloud Console to manage and monitor your Kubernetes - // clusters, - // workloads and applications. For more information, - // see: - // https://cloud.google.com/kubernetes-engine/docs/concepts/dashboar - // ds + // clusters, workloads and applications. For more information, see: + // https://cloud.google.com/kubernetes-engine/docs/concepts/dashboards KubernetesDashboard *KubernetesDashboard `json:"kubernetesDashboard,omitempty"` // NetworkPolicyConfig: Configuration for NetworkPolicy. This only - // tracks whether the addon - // is enabled or not on the Master, it does not track whether network - // policy - // is enabled for the nodes. + // tracks whether the addon is enabled or not on the Master, it does not + // track whether network policy is enabled for the nodes. NetworkPolicyConfig *NetworkPolicyConfig `json:"networkPolicyConfig,omitempty"` // ForceSendFields is a list of field names (e.g. "CloudRunConfig") to @@ -403,14 +391,12 @@ func (s *AddonsConfig) MarshalJSON() ([]byte, error) { // AuthenticatorGroupsConfig: Configuration for returning group // information from authenticators. type AuthenticatorGroupsConfig struct { - // Enabled: Whether this cluster should return group membership - // lookups + // Enabled: Whether this cluster should return group membership lookups // during authentication using a group of security groups. Enabled bool `json:"enabled,omitempty"` // SecurityGroup: The name of the security group-of-groups to be used. - // Only relevant - // if enabled = true. + // Only relevant if enabled = true. SecurityGroup string `json:"securityGroup,omitempty"` // ForceSendFields is a list of field names (e.g. "Enabled") to @@ -437,19 +423,16 @@ func (s *AuthenticatorGroupsConfig) MarshalJSON() ([]byte, error) { } // AutoUpgradeOptions: AutoUpgradeOptions defines the set of options for -// the user to control how -// the Auto Upgrades will proceed. +// the user to control how the Auto Upgrades will proceed. type AutoUpgradeOptions struct { // AutoUpgradeStartTime: [Output only] This field is set when upgrades - // are about to commence - // with the approximate start time for the upgrades, - // in - // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + // are about to commence with the approximate start time for the + // upgrades, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text + // format. AutoUpgradeStartTime string `json:"autoUpgradeStartTime,omitempty"` // Description: [Output only] This field is set when upgrades are about - // to commence - // with the description of the upgrade. + // to commence with the description of the upgrade. Description string `json:"description,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -478,45 +461,66 @@ func (s *AutoUpgradeOptions) MarshalJSON() ([]byte, error) { } // AutoprovisioningNodePoolDefaults: AutoprovisioningNodePoolDefaults -// contains defaults for a node pool created -// by NAP. +// contains defaults for a node pool created by NAP. type AutoprovisioningNodePoolDefaults struct { - // Management: Specifies the node management options for NAP created - // node-pools. + // BootDiskKmsKey: The Customer Managed Encryption Key used to encrypt + // the boot disk attached to each node in the node pool. This should be + // of the form + // projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cr + // yptoKeys/[KEY_NAME]. For more information about protecting resources + // with Cloud KMS Keys please see: + // https://cloud.google.com/compute/docs/disks/customer-managed-encryption + BootDiskKmsKey string `json:"bootDiskKmsKey,omitempty"` + + // DiskSizeGb: Size of the disk attached to each node, specified in GB. + // The smallest allowed disk size is 10GB. If unspecified, the default + // disk size is 100GB. + DiskSizeGb int64 `json:"diskSizeGb,omitempty"` + + // DiskType: Type of the disk attached to each node (e.g. 'pd-standard' + // or 'pd-ssd') If unspecified, the default disk type is 'pd-standard' + DiskType string `json:"diskType,omitempty"` + + // Management: NodeManagement configuration for this NodePool. Management *NodeManagement `json:"management,omitempty"` - // MinCpuPlatform: Minimum CPU platform to be used for NAP created node - // pools. - // The instance may be scheduled on the specified or newer CPU - // platform. - // Applicable values are the friendly names of CPU platforms, such - // as - // minCpuPlatform: "Intel Haswell" - // or - // minCpuPlatform: "Intel Sandy Bridge". For - // more - // information, read [how to specify min - // CPU - // platform](https://cloud.google.com/compute/docs/instances/specify- - // min-cpu-platform) - // To unset the min cpu platform field pass "automatic" as field value. + // MinCpuPlatform: Minimum CPU platform to be used by this instance. The + // instance may be scheduled on the specified or newer CPU platform. + // Applicable values are the friendly names of CPU platforms, such as + // `minCpuPlatform: "Intel Haswell" or `minCpuPlatform: "Intel Sandy + // Bridge". For more information, read [how to specify min CPU + // platform](https://cloud.google.com/compute/docs/instances/specify-min- + // cpu-platform) To unset the min cpu platform field pass "automatic" as + // field value. MinCpuPlatform string `json:"minCpuPlatform,omitempty"` - // OauthScopes: Scopes that are used by NAP when creating node pools. If - // oauth_scopes are - // specified, service_account should be empty. + // OauthScopes: The set of Google API scopes to be made available on all + // of the node VMs under the "default" service account. The following + // scopes are recommended, but not required, and by default are not + // included: * `https://www.googleapis.com/auth/compute` is required for + // mounting persistent storage on your nodes. * + // `https://www.googleapis.com/auth/devstorage.read_only` is required + // for communicating with **gcr.io** (the [Google Container + // Registry](https://cloud.google.com/container-registry/)). If + // unspecified, no scopes are added, unless Cloud Logging or Cloud + // Monitoring are enabled, in which case their required scopes will be + // added. OauthScopes []string `json:"oauthScopes,omitempty"` // ServiceAccount: The Google Cloud Platform Service Account to be used - // by the node VMs. If - // service_account is specified, scopes should be empty. + // by the node VMs. Specify the email address of the Service Account; + // otherwise, if no Service Account is specified, the "default" service + // account is used. ServiceAccount string `json:"serviceAccount,omitempty"` - // UpgradeSettings: Specifies the upgrade settings for NAP created node - // pools + // ShieldedInstanceConfig: Shielded Instance options. + ShieldedInstanceConfig *ShieldedInstanceConfig `json:"shieldedInstanceConfig,omitempty"` + + // UpgradeSettings: Upgrade settings control disruption and speed of the + // upgrade. UpgradeSettings *UpgradeSettings `json:"upgradeSettings,omitempty"` - // ForceSendFields is a list of field names (e.g. "Management") to + // ForceSendFields is a list of field names (e.g. "BootDiskKmsKey") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -524,12 +528,13 @@ type AutoprovisioningNodePoolDefaults struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Management") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "BootDiskKmsKey") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -539,9 +544,7 @@ func (s *AutoprovisioningNodePoolDefaults) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// AvailableVersion: AvailableVersion is an additional Kubernetes -// versions offered -// to users who subscribed to the release channel. +// AvailableVersion: Deprecated. type AvailableVersion struct { // Reason: Reason for availability. Reason string `json:"reason,omitempty"` @@ -604,8 +607,7 @@ func (s *BigQueryDestination) MarshalJSON() ([]byte, error) { // BinaryAuthorization: Configuration for Binary Authorization. type BinaryAuthorization struct { // Enabled: Enable Binary Authorization for this cluster. If enabled, - // all container - // images will be validated by Google Binauthz. + // all container images will be validated by Google Binauthz. Enabled bool `json:"enabled,omitempty"` // ForceSendFields is a list of field names (e.g. "Enabled") to @@ -635,30 +637,25 @@ func (s *BinaryAuthorization) MarshalJSON() ([]byte, error) { // operation. type CancelOperationRequest struct { // Name: The name (project, location, operation id) of the operation to - // cancel. - // Specified in the format `projects/*/locations/*/operations/*`. + // cancel. Specified in the format + // `projects/*/locations/*/operations/*`. Name string `json:"name,omitempty"` // OperationId: Required. Deprecated. The server-assigned `name` of the - // operation. - // This field has been deprecated and replaced by the name field. + // operation. This field has been deprecated and replaced by the name + // field. OperationId string `json:"operationId,omitempty"` // ProjectId: Required. Deprecated. The Google Developers Console - // [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // [project ID or project + // number](https://support.google.com/cloud/answer/6158840). This field + // has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Required. Deprecated. The name of the Google Compute - // Engine + // Zone: Required. Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // operation resides. This field has been deprecated and replaced by the - // name - // field. + // which the operation resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "Name") to @@ -752,6 +749,18 @@ type CloudRunConfig struct { // Disabled: Whether Cloud Run addon is enabled for this cluster. Disabled bool `json:"disabled,omitempty"` + // LoadBalancerType: Which load balancer type is installed for Cloud + // Run. + // + // Possible values: + // "LOAD_BALANCER_TYPE_UNSPECIFIED" - Load balancer type for Cloud Run + // is unspecified. + // "LOAD_BALANCER_TYPE_EXTERNAL" - Install external load balancer for + // Cloud Run. + // "LOAD_BALANCER_TYPE_INTERNAL" - Install internal load balancer for + // Cloud Run. + LoadBalancerType string `json:"loadBalancerType,omitempty"` + // ForceSendFields is a list of field names (e.g. "Disabled") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -792,12 +801,10 @@ type Cluster struct { BinaryAuthorization *BinaryAuthorization `json:"binaryAuthorization,omitempty"` // ClusterIpv4Cidr: The IP address range of the container pods in this - // cluster, - // in + // cluster, in // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // - // notation (e.g. `10.96.0.0/14`). Leave blank to have - // one automatically chosen or specify a `/14` block in `10.0.0.0/8`. + // notation (e.g. `10.96.0.0/14`). Leave blank to have one automatically + // chosen or specify a `/14` block in `10.0.0.0/8`. ClusterIpv4Cidr string `json:"clusterIpv4Cidr,omitempty"` // ClusterTelemetry: Telemetry integration for the cluster. @@ -806,8 +813,10 @@ type Cluster struct { // Conditions: Which conditions caused the current cluster state. Conditions []*StatusCondition `json:"conditions,omitempty"` - // CreateTime: [Output only] The time the cluster was created, - // in + // ConfidentialNodes: Configuration of Confidential Nodes + ConfidentialNodes *ConfidentialNodes `json:"confidentialNodes,omitempty"` + + // CreateTime: [Output only] The time the cluster was created, in // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. CreateTime string `json:"createTime,omitempty"` @@ -815,104 +824,77 @@ type Cluster struct { // the master endpoint. CurrentMasterVersion string `json:"currentMasterVersion,omitempty"` - // CurrentNodeCount: [Output only] The number of nodes currently in the - // cluster. Deprecated. - // Call Kubernetes API directly to retrieve node information. + // CurrentNodeCount: [Output only] The number of nodes currently in the + // cluster. Deprecated. Call Kubernetes API directly to retrieve node + // information. CurrentNodeCount int64 `json:"currentNodeCount,omitempty"` - // CurrentNodeVersion: [Output only] Deprecated, - // use - // [NodePool.version](https://cloud.google.com/kubernetes-engine/docs - // /reference/rest/v1beta1/projects.locations.clusters.nodePools) - // instead - // . The current version of the node software components. - // If they are currently at multiple versions because they're in the - // process - // of being upgraded, this reflects the minimum version of all nodes. + // CurrentNodeVersion: [Output only] Deprecated, use + // [NodePool.version](https://cloud.google.com/kubernetes-engine/docs/ref + // erence/rest/v1beta1/projects.locations.clusters.nodePools) instead. + // The current version of the node software components. If they are + // currently at multiple versions because they're in the process of + // being upgraded, this reflects the minimum version of all nodes. CurrentNodeVersion string `json:"currentNodeVersion,omitempty"` // DatabaseEncryption: Configuration of etcd encryption. DatabaseEncryption *DatabaseEncryption `json:"databaseEncryption,omitempty"` // DefaultMaxPodsConstraint: The default constraint on the maximum - // number of pods that can be run - // simultaneously on a node in the node pool of this cluster. Only - // honored - // if cluster created with IP Alias support. + // number of pods that can be run simultaneously on a node in the node + // pool of this cluster. Only honored if cluster created with IP Alias + // support. DefaultMaxPodsConstraint *MaxPodsConstraint `json:"defaultMaxPodsConstraint,omitempty"` // Description: An optional description of this cluster. Description string `json:"description,omitempty"` // EnableKubernetesAlpha: Kubernetes alpha features are enabled on this - // cluster. This includes alpha - // API groups (e.g. v1beta1) and features that may not be production - // ready in - // the kubernetes version of the master and nodes. - // The cluster has no SLA for uptime and master/node upgrades are - // disabled. - // Alpha enabled clusters are automatically deleted thirty days - // after - // creation. + // cluster. This includes alpha API groups (e.g. v1beta1) and features + // that may not be production ready in the kubernetes version of the + // master and nodes. The cluster has no SLA for uptime and master/node + // upgrades are disabled. Alpha enabled clusters are automatically + // deleted thirty days after creation. EnableKubernetesAlpha bool `json:"enableKubernetesAlpha,omitempty"` - // EnableTpu: Enable the ability to use Cloud TPUs in this cluster. - // This field is deprecated, use tpu_config.enabled instead. + // EnableTpu: Enable the ability to use Cloud TPUs in this cluster. This + // field is deprecated, use tpu_config.enabled instead. EnableTpu bool `json:"enableTpu,omitempty"` // Endpoint: [Output only] The IP address of this cluster's master - // endpoint. - // The endpoint can be accessed from the internet - // at - // `https://username:password@endpoint/`. - // - // See the `masterAuth` property of this resource for username - // and - // password information. + // endpoint. The endpoint can be accessed from the internet at + // `https://username:password@endpoint/`. See the `masterAuth` property + // of this resource for username and password information. Endpoint string `json:"endpoint,omitempty"` - // ExpireTime: [Output only] The time the cluster will be - // automatically + // ExpireTime: [Output only] The time the cluster will be automatically // deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text // format. ExpireTime string `json:"expireTime,omitempty"` // InitialClusterVersion: The initial Kubernetes version for this - // cluster. Valid versions are those - // found in validMasterVersions returned by getServerConfig. The - // version can - // be upgraded over time; such upgrades are reflected - // in - // currentMasterVersion and currentNodeVersion. - // - // Users may specify either explicit versions offered by - // Kubernetes Engine or version aliases, which have the following - // behavior: - // - // - "latest": picks the highest valid Kubernetes version - // - "1.X": picks the highest valid patch+gke.N patch in the 1.X - // version - // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - // - "1.X.Y-gke.N": picks an explicit Kubernetes version - // - "","-": picks the default Kubernetes version + // cluster. Valid versions are those found in validMasterVersions + // returned by getServerConfig. The version can be upgraded over time; + // such upgrades are reflected in currentMasterVersion and + // currentNodeVersion. Users may specify either explicit versions + // offered by Kubernetes Engine or version aliases, which have the + // following behavior: - "latest": picks the highest valid Kubernetes + // version - "1.X": picks the highest valid patch+gke.N patch in the 1.X + // version - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y + // version - "1.X.Y-gke.N": picks an explicit Kubernetes version - + // "","-": picks the default Kubernetes version InitialClusterVersion string `json:"initialClusterVersion,omitempty"` // InitialNodeCount: The number of nodes to create in this cluster. You - // must ensure that your - // Compute Engine resource - // quota - // is sufficient for this number of instances. You must also have - // available - // firewall and routes quota. - // For requests, this field should only be used in lieu of a - // "node_pool" object, since this configuration (along with - // the - // "node_config") will be used to create a "NodePool" object with - // an + // must ensure that your Compute Engine [resource + // quota](https://cloud.google.com/compute/quotas) is sufficient for + // this number of instances. You must also have available firewall and + // routes quota. For requests, this field should only be used in lieu of + // a "node_pool" object, since this configuration (along with the + // "node_config") will be used to create a "NodePool" object with an // auto-generated name. Do not use this and a node_pool at the same - // time. - // - // This field is deprecated, use node_pool.initial_node_count instead. + // time. This field is deprecated, use node_pool.initial_node_count + // instead. InitialNodeCount int64 `json:"initialNodeCount,omitempty"` // InstanceGroupUrls: Deprecated. Use node_pools.instance_group_urls. @@ -928,49 +910,38 @@ type Cluster struct { // LegacyAbac: Configuration for the legacy ABAC authorization mode. LegacyAbac *LegacyAbac `json:"legacyAbac,omitempty"` - // Location: [Output only] The name of the Google Compute - // Engine - // [zone](https://cloud.google.com/compute/docs/regions-zones/regi - // ons-zones#available) - // or - // [region](https://cloud.google.com/compute/docs - // /regions-zones/regions-zones#available) - // in which the cluster resides. + // Location: [Output only] The name of the Google Compute Engine + // [zone](https://cloud.google.com/compute/docs/regions-zones/regions-zon + // es#available) or + // [region](https://cloud.google.com/compute/docs/regions-zones/regions-z + // ones#available) in which the cluster resides. Location string `json:"location,omitempty"` - // Locations: The list of Google Compute - // Engine + // Locations: The list of Google Compute Engine // [zones](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster's nodes should be located. + // which the cluster's nodes should be located. Locations []string `json:"locations,omitempty"` // LoggingService: The logging service the cluster should use to write - // logs. - // Currently available options: - // - // * `logging.googleapis.com/kubernetes` - The Cloud Logging - // service with a Kubernetes-native resource model - // * `logging.googleapis.com` - The legacy Cloud Logging service (no - // longer - // available as of GKE 1.15). - // * `none` - no logs will be exported from the cluster. - // - // If left as an empty string,`logging.googleapis.com/kubernetes` will - // be - // used for GKE 1.14+ or `logging.googleapis.com` for earlier versions. + // logs. Currently available options: * + // `logging.googleapis.com/kubernetes` - The Cloud Logging service with + // a Kubernetes-native resource model * `logging.googleapis.com` - The + // legacy Cloud Logging service (no longer available as of GKE 1.15). * + // `none` - no logs will be exported from the cluster. If left as an + // empty string,`logging.googleapis.com/kubernetes` will be used for GKE + // 1.14+ or `logging.googleapis.com` for earlier versions. LoggingService string `json:"loggingService,omitempty"` // MaintenancePolicy: Configure the maintenance policy for this cluster. MaintenancePolicy *MaintenancePolicy `json:"maintenancePolicy,omitempty"` + // Master: Configuration for master components. + Master *Master `json:"master,omitempty"` + // MasterAuth: The authentication information for accessing the master - // endpoint. - // If unspecified, the defaults are used: - // For clusters before v1.12, if master_auth is unspecified, `username` - // will - // be set to "admin", a random password will be generated, and a - // client + // endpoint. If unspecified, the defaults are used: For clusters before + // v1.12, if master_auth is unspecified, `username` will be set to + // "admin", a random password will be generated, and a client // certificate will be issued. MasterAuth *MasterAuth `json:"masterAuth,omitempty"` @@ -979,53 +950,35 @@ type Cluster struct { MasterAuthorizedNetworksConfig *MasterAuthorizedNetworksConfig `json:"masterAuthorizedNetworksConfig,omitempty"` // MasterIpv4CidrBlock: The IP prefix in CIDR notation to use for the - // hosted master network. - // This prefix will be used for assigning private IP addresses to - // the - // master or set of masters, as well as the ILB VIP. - // This field is deprecated, - // use + // hosted master network. This prefix will be used for assigning private + // IP addresses to the master or set of masters, as well as the ILB VIP. + // This field is deprecated, use // private_cluster_config.master_ipv4_cidr_block instead. MasterIpv4CidrBlock string `json:"masterIpv4CidrBlock,omitempty"` // MonitoringService: The monitoring service the cluster should use to - // write metrics. - // Currently available options: - // - // * "monitoring.googleapis.com/kubernetes" - The Cloud - // Monitoring - // service with a Kubernetes-native resource model - // * `monitoring.googleapis.com` - The legacy Cloud Monitoring service - // (no - // longer available as of GKE 1.15). - // * `none` - No metrics will be exported from the cluster. - // - // If left as an empty string,`monitoring.googleapis.com/kubernetes` - // will be - // used for GKE 1.14+ or `monitoring.googleapis.com` for earlier + // write metrics. Currently available options: * + // "monitoring.googleapis.com/kubernetes" - The Cloud Monitoring service + // with a Kubernetes-native resource model * `monitoring.googleapis.com` + // - The legacy Cloud Monitoring service (no longer available as of GKE + // 1.15). * `none` - No metrics will be exported from the cluster. If + // left as an empty string,`monitoring.googleapis.com/kubernetes` will + // be used for GKE 1.14+ or `monitoring.googleapis.com` for earlier // versions. MonitoringService string `json:"monitoringService,omitempty"` // Name: The name of this cluster. The name must be unique within this - // project - // and location (e.g. zone or region), and can be up to 40 characters - // with - // the following restrictions: - // - // * Lowercase letters, numbers, and hyphens only. - // * Must start with a letter. - // * Must end with a number or a letter. + // project and location (e.g. zone or region), and can be up to 40 + // characters with the following restrictions: * Lowercase letters, + // numbers, and hyphens only. * Must start with a letter. * Must end + // with a number or a letter. Name string `json:"name,omitempty"` - // Network: The name of the Google Compute - // Engine - // [network](https://cloud.google.com/compute/docs/networks-and-fi - // rewalls#networks) - // to which the cluster is connected. If left unspecified, the - // `default` - // network will be used. On output this shows the network ID instead of - // the - // name. + // Network: The name of the Google Compute Engine + // [network](https://cloud.google.com/compute/docs/networks-and-firewalls + // #networks) to which the cluster is connected. If left unspecified, + // the `default` network will be used. On output this shows the network + // ID instead of the name. Network string `json:"network,omitempty"` // NetworkConfig: Configuration for cluster networking. @@ -1034,51 +987,40 @@ type Cluster struct { // NetworkPolicy: Configuration options for the NetworkPolicy feature. NetworkPolicy *NetworkPolicy `json:"networkPolicy,omitempty"` - // NodeConfig: Parameters used in creating the cluster's nodes. - // For requests, this field should only be used in lieu of a - // "node_pool" object, since this configuration (along with - // the + // NodeConfig: Parameters used in creating the cluster's nodes. For + // requests, this field should only be used in lieu of a "node_pool" + // object, since this configuration (along with the // "initial_node_count") will be used to create a "NodePool" object with - // an - // auto-generated name. Do not use this and a node_pool at the same - // time. - // For responses, this field will be populated with the node - // configuration of - // the first node pool. (For configuration of each node pool, - // see - // `node_pool.config`) - // - // If unspecified, the defaults are used. + // an auto-generated name. Do not use this and a node_pool at the same + // time. For responses, this field will be populated with the node + // configuration of the first node pool. (For configuration of each node + // pool, see `node_pool.config`) If unspecified, the defaults are used. // This field is deprecated, use node_pool.config instead. NodeConfig *NodeConfig `json:"nodeConfig,omitempty"` // NodeIpv4CidrSize: [Output only] The size of the address space on each - // node for hosting - // containers. This is provisioned from within the - // `container_ipv4_cidr` - // range. This field will only be set when cluster is in route-based - // network - // mode. + // node for hosting containers. This is provisioned from within the + // `container_ipv4_cidr` range. This field will only be set when cluster + // is in route-based network mode. NodeIpv4CidrSize int64 `json:"nodeIpv4CidrSize,omitempty"` - // NodePools: The node pools associated with this cluster. - // This field should not be set if "node_config" or "initial_node_count" - // are + // NodePools: The node pools associated with this cluster. This field + // should not be set if "node_config" or "initial_node_count" are // specified. NodePools []*NodePool `json:"nodePools,omitempty"` + // NotificationConfig: Notification configuration of the cluster. + NotificationConfig *NotificationConfig `json:"notificationConfig,omitempty"` + // PodSecurityPolicyConfig: Configuration for the PodSecurityPolicy // feature. PodSecurityPolicyConfig *PodSecurityPolicyConfig `json:"podSecurityPolicyConfig,omitempty"` // PrivateCluster: If this is a private cluster setup. Private clusters - // are clusters that, by - // default have no external IP addresses on the nodes and where nodes - // and the - // master communicate over private IP addresses. - // This field is deprecated, use - // private_cluster_config.enable_private_nodes - // instead. + // are clusters that, by default have no external IP addresses on the + // nodes and where nodes and the master communicate over private IP + // addresses. This field is deprecated, use + // private_cluster_config.enable_private_nodes instead. PrivateCluster bool `json:"privateCluster,omitempty"` // PrivateClusterConfig: Configuration for private cluster. @@ -1088,26 +1030,22 @@ type Cluster struct { ReleaseChannel *ReleaseChannel `json:"releaseChannel,omitempty"` // ResourceLabels: The resource labels for the cluster to use to - // annotate any related - // Google Compute Engine resources. + // annotate any related Google Compute Engine resources. ResourceLabels map[string]string `json:"resourceLabels,omitempty"` // ResourceUsageExportConfig: Configuration for exporting resource - // usages. Resource usage export is - // disabled when this config unspecified. + // usages. Resource usage export is disabled when this config + // unspecified. ResourceUsageExportConfig *ResourceUsageExportConfig `json:"resourceUsageExportConfig,omitempty"` // SelfLink: [Output only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` // ServicesIpv4Cidr: [Output only] The IP address range of the - // Kubernetes services in - // this cluster, - // in + // Kubernetes services in this cluster, in // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // - // notation (e.g. `1.2.3.4/29`). Service addresses are - // typically put in the last `/16` from the container CIDR. + // notation (e.g. `1.2.3.4/29`). Service addresses are typically put in + // the last `/16` from the container CIDR. ServicesIpv4Cidr string `json:"servicesIpv4Cidr,omitempty"` // ShieldedNodes: Shielded Nodes configuration. @@ -1120,46 +1058,35 @@ type Cluster struct { // "PROVISIONING" - The PROVISIONING state indicates the cluster is // being created. // "RUNNING" - The RUNNING state indicates the cluster has been - // created and is fully - // usable. + // created and is fully usable. // "RECONCILING" - The RECONCILING state indicates that some work is - // actively being done on - // the cluster, such as upgrading the master or node software. Details - // can - // be found in the `statusMessage` field. + // actively being done on the cluster, such as upgrading the master or + // node software. Details can be found in the `statusMessage` field. // "STOPPING" - The STOPPING state indicates the cluster is being // deleted. // "ERROR" - The ERROR state indicates the cluster may be unusable. - // Details - // can be found in the `statusMessage` field. + // Details can be found in the `statusMessage` field. // "DEGRADED" - The DEGRADED state indicates the cluster requires user - // action to restore - // full functionality. Details can be found in the `statusMessage` - // field. + // action to restore full functionality. Details can be found in the + // `statusMessage` field. Status string `json:"status,omitempty"` // StatusMessage: [Output only] Additional information about the current - // status of this - // cluster, if available. + // status of this cluster, if available. StatusMessage string `json:"statusMessage,omitempty"` - // Subnetwork: The name of the Google Compute - // Engine - // [subnetwork](https://cloud.google.com/compute/docs/subnetworks) - // to which - // the cluster is connected. On output this shows the subnetwork ID - // instead of - // the name. + // Subnetwork: The name of the Google Compute Engine + // [subnetwork](https://cloud.google.com/compute/docs/subnetworks) to + // which the cluster is connected. On output this shows the subnetwork + // ID instead of the name. Subnetwork string `json:"subnetwork,omitempty"` // TpuConfig: Configuration for Cloud TPU support; TpuConfig *TpuConfig `json:"tpuConfig,omitempty"` // TpuIpv4CidrBlock: [Output only] The IP address range of the Cloud - // TPUs in this cluster, - // in + // TPUs in this cluster, in // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // // notation (e.g. `1.2.3.4/29`). TpuIpv4CidrBlock string `json:"tpuIpv4CidrBlock,omitempty"` @@ -1168,15 +1095,13 @@ type Cluster struct { VerticalPodAutoscaling *VerticalPodAutoscaling `json:"verticalPodAutoscaling,omitempty"` // WorkloadIdentityConfig: Configuration for the use of Kubernetes - // Service Accounts in GCP IAM - // policies. + // Service Accounts in GCP IAM policies. WorkloadIdentityConfig *WorkloadIdentityConfig `json:"workloadIdentityConfig,omitempty"` - // Zone: [Output only] The name of the Google Compute - // Engine + // Zone: [Output only] The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field is deprecated, use location instead. + // which the cluster resides. This field is deprecated, use location + // instead. Zone string `json:"zone,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1207,21 +1132,17 @@ func (s *Cluster) MarshalJSON() ([]byte, error) { } // ClusterAutoscaling: ClusterAutoscaling contains global, per-cluster -// information -// required by Cluster Autoscaler to automatically adjust -// the size of the cluster and create/delete -// node pools based on the current needs. +// information required by Cluster Autoscaler to automatically adjust +// the size of the cluster and create/delete node pools based on the +// current needs. type ClusterAutoscaling struct { - // AutoprovisioningLocations: The list of Google Compute - // Engine + // AutoprovisioningLocations: The list of Google Compute Engine // [zones](https://cloud.google.com/compute/docs/zones#available) in - // which the - // NodePool's nodes can be created by NAP. + // which the NodePool's nodes can be created by NAP. AutoprovisioningLocations []string `json:"autoprovisioningLocations,omitempty"` // AutoprovisioningNodePoolDefaults: AutoprovisioningNodePoolDefaults - // contains defaults for a node pool - // created by NAP. + // contains defaults for a node pool created by NAP. AutoprovisioningNodePoolDefaults *AutoprovisioningNodePoolDefaults `json:"autoprovisioningNodePoolDefaults,omitempty"` // AutoscalingProfile: Defines autoscaling behaviour. @@ -1238,8 +1159,7 @@ type ClusterAutoscaling struct { EnableNodeAutoprovisioning bool `json:"enableNodeAutoprovisioning,omitempty"` // ResourceLimits: Contains global constraints regarding minimum and - // maximum - // amount of resources in the cluster. + // maximum amount of resources in the cluster. ResourceLimits []*ResourceLimit `json:"resourceLimits,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1302,10 +1222,8 @@ func (s *ClusterTelemetry) MarshalJSON() ([]byte, error) { } // ClusterUpdate: ClusterUpdate describes an update to the cluster. -// Exactly one update can -// be applied to a cluster with each request, so at most one field can -// be -// provided. +// Exactly one update can be applied to a cluster with each request, so +// at most one field can be provided. type ClusterUpdate struct { // DesiredAddonsConfig: Configurations for the various addons available // to run in the cluster. @@ -1325,113 +1243,104 @@ type ClusterUpdate struct { // DesiredDatabaseEncryption: Configuration of etcd encryption. DesiredDatabaseEncryption *DatabaseEncryption `json:"desiredDatabaseEncryption,omitempty"` - // DesiredImageType: The desired image type for the node pool. - // NOTE: Set the "desired_node_pool" field as well. + // DesiredDatapathProvider: The desired datapath provider for the + // cluster. + // + // Possible values: + // "DATAPATH_PROVIDER_UNSPECIFIED" - Default value. + // "LEGACY_DATAPATH" - Use the IPTables implementation based on + // kube-proxy. + // "ADVANCED_DATAPATH" - Use the eBPF based GKE Dataplane V2 with + // additional features. See the [GKE Dataplane V2 + // documentation](https://cloud.google.com/kubernetes-enginw/docs/how-to/ + // dataplane-v2) for more. + DesiredDatapathProvider string `json:"desiredDatapathProvider,omitempty"` + + // DesiredDefaultSnatStatus: The desired status of whether to disable + // default sNAT for this cluster. + DesiredDefaultSnatStatus *DefaultSnatStatus `json:"desiredDefaultSnatStatus,omitempty"` + + // DesiredImageType: The desired image type for the node pool. NOTE: Set + // the "desired_node_pool" field as well. DesiredImageType string `json:"desiredImageType,omitempty"` // DesiredIntraNodeVisibilityConfig: The desired config of Intra-node // visibility. DesiredIntraNodeVisibilityConfig *IntraNodeVisibilityConfig `json:"desiredIntraNodeVisibilityConfig,omitempty"` - // DesiredLocations: The desired list of Google Compute - // Engine + // DesiredLocations: The desired list of Google Compute Engine // [zones](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster's nodes should be located. Changing the locations a cluster - // is in - // will result in nodes being either created or removed from the - // cluster, - // depending on whether locations are being added or removed. - // - // This list must always include the cluster's primary zone. + // which the cluster's nodes should be located. Changing the locations a + // cluster is in will result in nodes being either created or removed + // from the cluster, depending on whether locations are being added or + // removed. This list must always include the cluster's primary zone. DesiredLocations []string `json:"desiredLocations,omitempty"` // DesiredLoggingService: The logging service the cluster should use to - // write logs. - // Currently available options: - // - // * `logging.googleapis.com/kubernetes` - The Cloud Logging - // service with a Kubernetes-native resource model - // * `logging.googleapis.com` - The legacy Cloud Logging service (no - // longer - // available as of GKE 1.15). - // * `none` - no logs will be exported from the cluster. - // - // If left as an empty string,`logging.googleapis.com/kubernetes` will - // be - // used for GKE 1.14+ or `logging.googleapis.com` for earlier versions. + // write logs. Currently available options: * + // `logging.googleapis.com/kubernetes` - The Cloud Logging service with + // a Kubernetes-native resource model * `logging.googleapis.com` - The + // legacy Cloud Logging service (no longer available as of GKE 1.15). * + // `none` - no logs will be exported from the cluster. If left as an + // empty string,`logging.googleapis.com/kubernetes` will be used for GKE + // 1.14+ or `logging.googleapis.com` for earlier versions. DesiredLoggingService string `json:"desiredLoggingService,omitempty"` + // DesiredMaster: Configuration for master components. + DesiredMaster *Master `json:"desiredMaster,omitempty"` + // DesiredMasterAuthorizedNetworksConfig: The desired configuration // options for master authorized networks feature. DesiredMasterAuthorizedNetworksConfig *MasterAuthorizedNetworksConfig `json:"desiredMasterAuthorizedNetworksConfig,omitempty"` // DesiredMasterVersion: The Kubernetes version to change the master to. - // The only valid value is the - // latest supported version. - // - // Users may specify either explicit versions offered by - // Kubernetes Engine or version aliases, which have the following - // behavior: - // - // - "latest": picks the highest valid Kubernetes version - // - "1.X": picks the highest valid patch+gke.N patch in the 1.X + // The only valid value is the latest supported version. Users may + // specify either explicit versions offered by Kubernetes Engine or + // version aliases, which have the following behavior: - "latest": picks + // the highest valid Kubernetes version - "1.X": picks the highest valid + // patch+gke.N patch in the 1.X version - "1.X.Y": picks the highest + // valid gke.N patch in the 1.X.Y version - "1.X.Y-gke.N": picks an + // explicit Kubernetes version - "-": picks the default Kubernetes // version - // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - // - "1.X.Y-gke.N": picks an explicit Kubernetes version - // - "-": picks the default Kubernetes version DesiredMasterVersion string `json:"desiredMasterVersion,omitempty"` // DesiredMonitoringService: The monitoring service the cluster should - // use to write metrics. - // Currently available options: - // - // * "monitoring.googleapis.com/kubernetes" - The Cloud - // Monitoring - // service with a Kubernetes-native resource model - // * `monitoring.googleapis.com` - The legacy Cloud Monitoring service - // (no - // longer available as of GKE 1.15). - // * `none` - No metrics will be exported from the cluster. - // - // If left as an empty string,`monitoring.googleapis.com/kubernetes` - // will be - // used for GKE 1.14+ or `monitoring.googleapis.com` for earlier + // use to write metrics. Currently available options: * + // "monitoring.googleapis.com/kubernetes" - The Cloud Monitoring service + // with a Kubernetes-native resource model * `monitoring.googleapis.com` + // - The legacy Cloud Monitoring service (no longer available as of GKE + // 1.15). * `none` - No metrics will be exported from the cluster. If + // left as an empty string,`monitoring.googleapis.com/kubernetes` will + // be used for GKE 1.14+ or `monitoring.googleapis.com` for earlier // versions. DesiredMonitoringService string `json:"desiredMonitoringService,omitempty"` // DesiredNodePoolAutoscaling: Autoscaler configuration for the node - // pool specified in - // desired_node_pool_id. If there is only one pool in the - // cluster and desired_node_pool_id is not provided then - // the change applies to that single node pool. + // pool specified in desired_node_pool_id. If there is only one pool in + // the cluster and desired_node_pool_id is not provided then the change + // applies to that single node pool. DesiredNodePoolAutoscaling *NodePoolAutoscaling `json:"desiredNodePoolAutoscaling,omitempty"` // DesiredNodePoolId: The node pool to be upgraded. This field is - // mandatory if - // "desired_node_version", - // "desired_image_family", + // mandatory if "desired_node_version", "desired_image_family", // "desired_node_pool_autoscaling", or - // "desired_workload_metadata_config" - // is specified and there is more than one node pool on the cluster. + // "desired_workload_metadata_config" is specified and there is more + // than one node pool on the cluster. DesiredNodePoolId string `json:"desiredNodePoolId,omitempty"` // DesiredNodeVersion: The Kubernetes version to change the nodes to - // (typically an - // upgrade). - // - // Users may specify either explicit versions offered by - // Kubernetes Engine or version aliases, which have the following - // behavior: - // - // - "latest": picks the highest valid Kubernetes version - // - "1.X": picks the highest valid patch+gke.N patch in the 1.X - // version - // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - // - "1.X.Y-gke.N": picks an explicit Kubernetes version - // - "-": picks the Kubernetes master version + // (typically an upgrade). Users may specify either explicit versions + // offered by Kubernetes Engine or version aliases, which have the + // following behavior: - "latest": picks the highest valid Kubernetes + // version - "1.X": picks the highest valid patch+gke.N patch in the 1.X + // version - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y + // version - "1.X.Y-gke.N": picks an explicit Kubernetes version - "-": + // picks the Kubernetes master version DesiredNodeVersion string `json:"desiredNodeVersion,omitempty"` + // DesiredNotificationConfig: The desired notification configuration. + DesiredNotificationConfig *NotificationConfig `json:"desiredNotificationConfig,omitempty"` + // DesiredPodSecurityPolicyConfig: The desired configuration options for // the PodSecurityPolicy feature. DesiredPodSecurityPolicyConfig *PodSecurityPolicyConfig `json:"desiredPodSecurityPolicyConfig,omitempty"` @@ -1487,31 +1396,25 @@ func (s *ClusterUpdate) MarshalJSON() ([]byte, error) { // CompleteIPRotationRequest: CompleteIPRotationRequest moves the // cluster master back into single-IP mode. type CompleteIPRotationRequest struct { - // ClusterId: Required. Deprecated. The name of the cluster. - // This field has been deprecated and replaced by the name field. + // ClusterId: Required. Deprecated. The name of the cluster. This field + // has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // Name: The name (project, location, cluster id) of the cluster to - // complete IP - // rotation. Specified in the format + // complete IP rotation. Specified in the format // `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // ProjectId: Required. Deprecated. The Google Developers Console - // [project ID or - // project - // number](https://developers.google.com/console/help/new/#projec - // tnumber). - // This field has been deprecated and replaced by the name field. + // [project ID or project + // number](https://developers.google.com/console/help/new/#projectnumber) + // . This field has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Required. Deprecated. The name of the Google Compute - // Engine + // Zone: Required. Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -1537,6 +1440,37 @@ func (s *CompleteIPRotationRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ConfidentialNodes: ConfidentialNodes is configuration for the +// confidential nodes feature, which makes nodes run on confidential +// VMs. +type ConfidentialNodes struct { + // Enabled: Whether Confidential Nodes feature is enabled for all nodes + // in this cluster. + Enabled bool `json:"enabled,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Enabled") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ConfidentialNodes) MarshalJSON() ([]byte, error) { + type NoMethod ConfidentialNodes + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ConfigConnectorConfig: Configuration options for the Config Connector // add-on. type ConfigConnectorConfig struct { @@ -1570,10 +1504,8 @@ func (s *ConfigConnectorConfig) MarshalJSON() ([]byte, error) { // metering. type ConsumptionMeteringConfig struct { // Enabled: Whether to enable consumption metering for this cluster. If - // enabled, a - // second BigQuery table will be created to hold resource - // consumption - // records. + // enabled, a second BigQuery table will be created to hold resource + // consumption records. Enabled bool `json:"enabled,omitempty"` // ForceSendFields is a list of field names (e.g. "Enabled") to @@ -1601,32 +1533,25 @@ func (s *ConsumptionMeteringConfig) MarshalJSON() ([]byte, error) { // CreateClusterRequest: CreateClusterRequest creates a cluster. type CreateClusterRequest struct { - // Cluster: Required. A - // [cluster - // resource](https://cloud.google.com/container-engine/reference - // /rest/v1beta1/projects.zones.clusters) + // Cluster: Required. A [cluster + // resource](https://cloud.google.com/container-engine/reference/rest/v1b + // eta1/projects.locations.clusters) Cluster *Cluster `json:"cluster,omitempty"` // Parent: The parent (project and location) where the cluster will be - // created. - // Specified in the format `projects/*/locations/*`. + // created. Specified in the format `projects/*/locations/*`. Parent string `json:"parent,omitempty"` // ProjectId: Required. Deprecated. The Google Developers Console - // [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the parent field. + // [project ID or project + // number](https://support.google.com/cloud/answer/6158840). This field + // has been deprecated and replaced by the parent field. ProjectId string `json:"projectId,omitempty"` - // Zone: Required. Deprecated. The name of the Google Compute - // Engine + // Zone: Required. Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // parent - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the parent field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "Cluster") to @@ -1655,34 +1580,28 @@ func (s *CreateClusterRequest) MarshalJSON() ([]byte, error) { // CreateNodePoolRequest: CreateNodePoolRequest creates a node pool for // a cluster. type CreateNodePoolRequest struct { - // ClusterId: Required. Deprecated. The name of the cluster. - // This field has been deprecated and replaced by the parent field. + // ClusterId: Required. Deprecated. The name of the cluster. This field + // has been deprecated and replaced by the parent field. ClusterId string `json:"clusterId,omitempty"` // NodePool: Required. The node pool to create. NodePool *NodePool `json:"nodePool,omitempty"` // Parent: The parent (project, location, cluster id) where the node - // pool will be - // created. Specified in the format + // pool will be created. Specified in the format // `projects/*/locations/*/clusters/*`. Parent string `json:"parent,omitempty"` // ProjectId: Required. Deprecated. The Google Developers Console - // [project ID or - // project - // number](https://developers.google.com/console/help/new/#projec - // tnumber). - // This field has been deprecated and replaced by the parent field. + // [project ID or project + // number](https://developers.google.com/console/help/new/#projectnumber) + // . This field has been deprecated and replaced by the parent field. ProjectId string `json:"projectId,omitempty"` - // Zone: Required. Deprecated. The name of the Google Compute - // Engine + // Zone: Required. Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // parent - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the parent field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -1712,14 +1631,12 @@ func (s *CreateNodePoolRequest) MarshalJSON() ([]byte, error) { // operations. type DailyMaintenanceWindow struct { // Duration: [Output only] Duration of the time window, automatically - // chosen to be - // smallest possible in the given scenario. + // chosen to be smallest possible in the given scenario. Duration string `json:"duration,omitempty"` // StartTime: Time within the maintenance window to start the - // maintenance operations. - // It must be in format "HH:MM", where HH : [00-23] and MM : [00-59] - // GMT. + // maintenance operations. It must be in format "HH:MM", where HH : + // [00-23] and MM : [00-59] GMT. StartTime string `json:"startTime,omitempty"` // ForceSendFields is a list of field names (e.g. "Duration") to @@ -1748,8 +1665,7 @@ func (s *DailyMaintenanceWindow) MarshalJSON() ([]byte, error) { // DatabaseEncryption: Configuration of etcd encryption. type DatabaseEncryption struct { // KeyName: Name of CloudKMS key to use for the encryption of secrets in - // etcd. - // Ex. + // etcd. Ex. // projects/my-project/locations/global/keyRings/my-ring/cryptoKeys/my-ke // y KeyName string `json:"keyName,omitempty"` @@ -1760,8 +1676,8 @@ type DatabaseEncryption struct { // "UNKNOWN" - Should never be set // "ENCRYPTED" - Secrets in etcd are encrypted. // "DECRYPTED" - Secrets in etcd are stored in plain text (at etcd - // level) - this is - // unrelated to Compute Engine level full disk encryption. + // level) - this is unrelated to Compute Engine level full disk + // encryption. State string `json:"state,omitempty"` // ForceSendFields is a list of field names (e.g. "KeyName") to @@ -1787,6 +1703,35 @@ func (s *DatabaseEncryption) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DefaultSnatStatus: DefaultSnatStatus contains the desired state of +// whether default sNAT should be disabled on the cluster. +type DefaultSnatStatus struct { + // Disabled: Disables cluster default sNAT rules. + Disabled bool `json:"disabled,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Disabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Disabled") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DefaultSnatStatus) MarshalJSON() ([]byte, error) { + type NoMethod DefaultSnatStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // DnsCacheConfig: Configuration for NodeLocal DNSCache type DnsCacheConfig struct { // Enabled: Whether NodeLocal DNSCache is enabled for this cluster. @@ -1816,17 +1761,11 @@ func (s *DnsCacheConfig) MarshalJSON() ([]byte, error) { } // Empty: A generic empty message that you can re-use to avoid defining -// duplicated -// empty messages in your APIs. A typical example is to use it as the -// request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. +// duplicated empty messages in your APIs. A typical example is to use +// it as the request or the response type of an API method. For +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } The JSON representation for `Empty` is +// empty JSON object `{}`. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -1834,8 +1773,8 @@ type Empty struct { } // GcePersistentDiskCsiDriverConfig: Configuration for the Compute -// Engine PD CSI driver. This option can only be -// enabled at cluster creation time. +// Engine PD CSI driver. This option can only be enabled at cluster +// creation time. type GcePersistentDiskCsiDriverConfig struct { // Enabled: Whether the Compute Engine PD CSI driver is enabled for this // cluster. @@ -1868,13 +1807,11 @@ func (s *GcePersistentDiskCsiDriverConfig) MarshalJSON() ([]byte, error) { // Key Set as specififed in rfc 7517 type GetJSONWebKeysResponse struct { // CacheHeader: OnePlatform automatically extracts this field and uses - // it to set the HTTP - // Cache-Control header. + // it to set the HTTP Cache-Control header. CacheHeader *HttpCacheControlResponseHeader `json:"cacheHeader,omitempty"` // Keys: The public component of the keys used by the cluster to sign - // token - // requests. + // token requests. Keys []*Jwk `json:"keys,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1905,12 +1842,11 @@ func (s *GetJSONWebKeysResponse) MarshalJSON() ([]byte, error) { } // GetOpenIDConfigResponse: GetOpenIDConfigResponse is an OIDC discovery -// document for the cluster. -// See the OpenID Connect Discovery 1.0 specification for details. +// document for the cluster. See the OpenID Connect Discovery 1.0 +// specification for details. type GetOpenIDConfigResponse struct { // CacheHeader: OnePlatform automatically extracts this field and uses - // it to set the HTTP - // Cache-Control header. + // it to set the HTTP Cache-Control header. CacheHeader *HttpCacheControlResponseHeader `json:"cacheHeader,omitempty"` // ClaimsSupported: Supported claims. @@ -1963,16 +1899,13 @@ func (s *GetOpenIDConfigResponse) MarshalJSON() ([]byte, error) { } // HorizontalPodAutoscaling: Configuration options for the horizontal -// pod autoscaling feature, which -// increases or decreases the number of replica pods a replication -// controller -// has based on the resource usage of the existing pods. +// pod autoscaling feature, which increases or decreases the number of +// replica pods a replication controller has based on the resource usage +// of the existing pods. type HorizontalPodAutoscaling struct { // Disabled: Whether the Horizontal Pod Autoscaling feature is enabled - // in the cluster. - // When enabled, it ensures that metrics are collected into - // Stackdriver - // Monitoring. + // in the cluster. When enabled, it ensures that metrics are collected + // into Stackdriver Monitoring. Disabled bool `json:"disabled,omitempty"` // ForceSendFields is a list of field names (e.g. "Disabled") to @@ -2034,15 +1967,12 @@ func (s *HttpCacheControlResponseHeader) MarshalJSON() ([]byte, error) { } // HttpLoadBalancing: Configuration options for the HTTP (L7) load -// balancing controller addon, -// which makes it easy to set up HTTP load balancers for services in a -// cluster. +// balancing controller addon, which makes it easy to set up HTTP load +// balancers for services in a cluster. type HttpLoadBalancing struct { // Disabled: Whether the HTTP Load Balancing controller is enabled in - // the cluster. - // When enabled, it runs a small pod in the cluster that manages the - // load - // balancers. + // the cluster. When enabled, it runs a small pod in the cluster that + // manages the load balancers. Disabled bool `json:"disabled,omitempty"` // ForceSendFields is a list of field names (e.g. "Disabled") to @@ -2072,23 +2002,16 @@ func (s *HttpLoadBalancing) MarshalJSON() ([]byte, error) { // allocated in the cluster. type IPAllocationPolicy struct { // AllowRouteOverlap: If true, allow allocation of cluster CIDR ranges - // that overlap with certain - // kinds of network routes. By default we do not allow cluster CIDR - // ranges to - // intersect with any user declared routes. With allow_route_overlap == - // true, - // we allow overlapping with CIDR ranges that are larger than the - // cluster CIDR - // range. - // - // If this field is set to true, then cluster and services CIDRs must - // be - // fully-specified (e.g. `10.96.0.0/14`, but not `/14`), which means: - // 1) When `use_ip_aliases` is true, `cluster_ipv4_cidr_block` and - // `services_ipv4_cidr_block` must be fully-specified. - // 2) When `use_ip_aliases` is false, `cluster.cluster_ipv4_cidr` muse - // be - // fully-specified. + // that overlap with certain kinds of network routes. By default we do + // not allow cluster CIDR ranges to intersect with any user declared + // routes. With allow_route_overlap == true, we allow overlapping with + // CIDR ranges that are larger than the cluster CIDR range. If this + // field is set to true, then cluster and services CIDRs must be + // fully-specified (e.g. `10.96.0.0/14`, but not `/14`), which means: 1) + // When `use_ip_aliases` is true, `cluster_ipv4_cidr_block` and + // `services_ipv4_cidr_block` must be fully-specified. 2) When + // `use_ip_aliases` is false, `cluster.cluster_ipv4_cidr` muse be + // fully-specified. AllowRouteOverlap bool `json:"allowRouteOverlap,omitempty"` // ClusterIpv4Cidr: This field is deprecated, use @@ -2096,68 +2019,41 @@ type IPAllocationPolicy struct { ClusterIpv4Cidr string `json:"clusterIpv4Cidr,omitempty"` // ClusterIpv4CidrBlock: The IP address range for the cluster pod IPs. - // If this field is set, then - // `cluster.cluster_ipv4_cidr` must be left blank. - // - // This field is only applicable when `use_ip_aliases` is true. - // - // Set to blank to have a range chosen with the default size. - // - // Set to /netmask (e.g. `/14`) to have a range chosen with a - // specific - // netmask. - // - // Set to - // a + // If this field is set, then `cluster.cluster_ipv4_cidr` must be left + // blank. This field is only applicable when `use_ip_aliases` is true. + // Set to blank to have a range chosen with the default size. Set to + // /netmask (e.g. `/14`) to have a range chosen with a specific netmask. + // Set to a // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks - // (e.g. - // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific - // range - // to use. + // (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a + // specific range to use. ClusterIpv4CidrBlock string `json:"clusterIpv4CidrBlock,omitempty"` // ClusterSecondaryRangeName: The name of the secondary range to be used - // for the cluster CIDR - // block. The secondary range will be used for pod IP - // addresses. This must be an existing secondary range associated - // with the cluster subnetwork. - // - // This field is only applicable with use_ip_aliases - // and - // create_subnetwork is false. + // for the cluster CIDR block. The secondary range will be used for pod + // IP addresses. This must be an existing secondary range associated + // with the cluster subnetwork. This field is only applicable with + // use_ip_aliases and create_subnetwork is false. ClusterSecondaryRangeName string `json:"clusterSecondaryRangeName,omitempty"` // CreateSubnetwork: Whether a new subnetwork will be created - // automatically for the cluster. - // - // This field is only applicable when `use_ip_aliases` is true. + // automatically for the cluster. This field is only applicable when + // `use_ip_aliases` is true. CreateSubnetwork bool `json:"createSubnetwork,omitempty"` // NodeIpv4Cidr: This field is deprecated, use node_ipv4_cidr_block. NodeIpv4Cidr string `json:"nodeIpv4Cidr,omitempty"` // NodeIpv4CidrBlock: The IP address range of the instance IPs in this - // cluster. - // - // This is applicable only if `create_subnetwork` is true. - // - // Set to blank to have a range chosen with the default size. - // - // Set to /netmask (e.g. `/14`) to have a range chosen with a - // specific - // netmask. - // - // Set to - // a + // cluster. This is applicable only if `create_subnetwork` is true. Set + // to blank to have a range chosen with the default size. Set to + // /netmask (e.g. `/14`) to have a range chosen with a specific netmask. + // Set to a // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks - // (e.g. - // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific - // range - // to use. + // (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a + // specific range to use. NodeIpv4CidrBlock string `json:"nodeIpv4CidrBlock,omitempty"` // ServicesIpv4Cidr: This field is deprecated, use @@ -2165,86 +2061,52 @@ type IPAllocationPolicy struct { ServicesIpv4Cidr string `json:"servicesIpv4Cidr,omitempty"` // ServicesIpv4CidrBlock: The IP address range of the services IPs in - // this cluster. If blank, a range - // will be automatically chosen with the default size. - // - // This field is only applicable when `use_ip_aliases` is true. - // - // Set to blank to have a range chosen with the default size. - // - // Set to /netmask (e.g. `/14`) to have a range chosen with a - // specific - // netmask. - // - // Set to - // a + // this cluster. If blank, a range will be automatically chosen with the + // default size. This field is only applicable when `use_ip_aliases` is + // true. Set to blank to have a range chosen with the default size. Set + // to /netmask (e.g. `/14`) to have a range chosen with a specific + // netmask. Set to a // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks - // (e.g. - // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific - // range - // to use. + // (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a + // specific range to use. ServicesIpv4CidrBlock string `json:"servicesIpv4CidrBlock,omitempty"` // ServicesSecondaryRangeName: The name of the secondary range to be - // used as for the services - // CIDR block. The secondary range will be used for service - // ClusterIPs. This must be an existing secondary range associated - // with the cluster subnetwork. - // - // This field is only applicable with use_ip_aliases - // and - // create_subnetwork is false. + // used as for the services CIDR block. The secondary range will be used + // for service ClusterIPs. This must be an existing secondary range + // associated with the cluster subnetwork. This field is only applicable + // with use_ip_aliases and create_subnetwork is false. ServicesSecondaryRangeName string `json:"servicesSecondaryRangeName,omitempty"` // SubnetworkName: A custom subnetwork name to be used if - // `create_subnetwork` is true. If - // this field is empty, then an automatic name will be chosen for the - // new - // subnetwork. + // `create_subnetwork` is true. If this field is empty, then an + // automatic name will be chosen for the new subnetwork. SubnetworkName string `json:"subnetworkName,omitempty"` // TpuIpv4CidrBlock: The IP address range of the Cloud TPUs in this - // cluster. If unspecified, a - // range will be automatically chosen with the default size. - // - // This field is only applicable when `use_ip_aliases` is true. - // - // If unspecified, the range will use the default size. - // - // Set to /netmask (e.g. `/14`) to have a range chosen with a - // specific - // netmask. - // - // Set to - // a + // cluster. If unspecified, a range will be automatically chosen with + // the default size. This field is only applicable when `use_ip_aliases` + // is true. If unspecified, the range will use the default size. Set to + // /netmask (e.g. `/14`) to have a range chosen with a specific netmask. + // Set to a // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks - // (e.g. - // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific - // range - // to use. - // This field is deprecated, use cluster.tpu_config.ipv4_cidr_block - // instead. + // (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a + // specific range to use. This field is deprecated, use + // cluster.tpu_config.ipv4_cidr_block instead. TpuIpv4CidrBlock string `json:"tpuIpv4CidrBlock,omitempty"` // UseIpAliases: Whether alias IPs will be used for pod IPs in the - // cluster. - // This is used in conjunction with use_routes. It cannot - // be true if use_routes is true. If both use_ip_aliases and use_routes - // are + // cluster. This is used in conjunction with use_routes. It cannot be + // true if use_routes is true. If both use_ip_aliases and use_routes are // false, then the server picks the default IP allocation mode UseIpAliases bool `json:"useIpAliases,omitempty"` - // UseRoutes: Whether routes will be used for pod IPs in the - // cluster. - // This is used in conjunction with use_ip_aliases. It cannot be true - // if + // UseRoutes: Whether routes will be used for pod IPs in the cluster. + // This is used in conjunction with use_ip_aliases. It cannot be true if // use_ip_aliases is true. If both use_ip_aliases and use_routes are - // false, - // then the server picks the default IP allocation mode + // false, then the server picks the default IP allocation mode UseRoutes bool `json:"useRoutes,omitempty"` // ForceSendFields is a list of field names (e.g. "AllowRouteOverlap") @@ -2272,8 +2134,7 @@ func (s *IPAllocationPolicy) MarshalJSON() ([]byte, error) { } // IntraNodeVisibilityConfig: IntraNodeVisibilityConfig contains the -// desired config of the intra-node -// visibility on this cluster. +// desired config of the intra-node visibility on this cluster. type IntraNodeVisibilityConfig struct { // Enabled: Enables intra node visibility for this cluster. Enabled bool `json:"enabled,omitempty"` @@ -2446,16 +2307,12 @@ func (s *KubernetesDashboard) MarshalJSON() ([]byte, error) { } // LegacyAbac: Configuration for the legacy Attribute Based Access -// Control authorization -// mode. +// Control authorization mode. type LegacyAbac struct { // Enabled: Whether the ABAC authorizer is enabled for this cluster. - // When enabled, - // identities in the system, including service accounts, nodes, - // and - // controllers, will have statically granted permissions beyond - // those - // provided by the RBAC configuration or IAM. + // When enabled, identities in the system, including service accounts, + // nodes, and controllers, will have statically granted permissions + // beyond those provided by the RBAC configuration or IAM. Enabled bool `json:"enabled,omitempty"` // ForceSendFields is a list of field names (e.g. "Enabled") to @@ -2481,17 +2338,48 @@ func (s *LegacyAbac) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// LinuxNodeConfig: Parameters that can be configured on Linux nodes. +type LinuxNodeConfig struct { + // Sysctls: The Linux kernel parameters to be applied to the nodes and + // all pods running on the nodes. The following parameters are + // supported. net.core.netdev_max_backlog net.core.rmem_max + // net.core.wmem_default net.core.wmem_max net.core.optmem_max + // net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem + // net.ipv4.tcp_tw_reuse + Sysctls map[string]string `json:"sysctls,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Sysctls") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Sysctls") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LinuxNodeConfig) MarshalJSON() ([]byte, error) { + type NoMethod LinuxNodeConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ListClustersResponse: ListClustersResponse is the result of // ListClustersRequest. type ListClustersResponse struct { - // Clusters: A list of clusters in the project in the specified zone, - // or + // Clusters: A list of clusters in the project in the specified zone, or // across all ones. Clusters []*Cluster `json:"clusters,omitempty"` // MissingZones: If any zones are listed here, the list of clusters - // returned - // may be missing those zones. + // returned may be missing those zones. MissingZones []string `json:"missingZones,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2522,19 +2410,16 @@ func (s *ListClustersResponse) MarshalJSON() ([]byte, error) { } // ListLocationsResponse: ListLocationsResponse returns the list of all -// GKE locations and their -// recommendation state. +// GKE locations and their recommendation state. type ListLocationsResponse struct { // Locations: A full list of GKE locations. Locations []*Location `json:"locations,omitempty"` // NextPageToken: Only return ListLocationsResponse that occur after the - // page_token. This - // value should be populated from the - // ListLocationsResponse.next_page_token if - // that response token was set (which happens when listing more - // Locations than - // fit in a single ListLocationsResponse). + // page_token. This value should be populated from the + // ListLocationsResponse.next_page_token if that response token was set + // (which happens when listing more Locations than fit in a single + // ListLocationsResponse). NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2601,8 +2486,7 @@ func (s *ListNodePoolsResponse) MarshalJSON() ([]byte, error) { // ListOperationsRequest. type ListOperationsResponse struct { // MissingZones: If any zones are listed here, the list of operations - // returned - // may be missing the operations from those zones. + // returned may be missing the operations from those zones. MissingZones []string `json:"missingZones,omitempty"` // Operations: A list of operations in the project in the specified @@ -2637,17 +2521,13 @@ func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { } // ListUsableSubnetworksResponse: ListUsableSubnetworksResponse is the -// response of -// ListUsableSubnetworksRequest. +// response of ListUsableSubnetworksRequest. type ListUsableSubnetworksResponse struct { // NextPageToken: This token allows you to get the next page of results - // for list requests. - // If the number of results is larger than `page_size`, use - // the - // `next_page_token` as a value for the query parameter `page_token` in - // the - // next request. The value will become empty when there are no more - // pages. + // for list requests. If the number of results is larger than + // `page_size`, use the `next_page_token` as a value for the query + // parameter `page_token` in the next request. The value will become + // empty when there are no more pages. NextPageToken string `json:"nextPageToken,omitempty"` // Subnetworks: A list of usable subnetworks in the specified network @@ -2682,19 +2562,18 @@ func (s *ListUsableSubnetworksResponse) MarshalJSON() ([]byte, error) { } // Location: Location returns the location name, and if the location is -// recommended -// for GKE cluster scheduling. +// recommended for GKE cluster scheduling. type Location struct { - // Name: Contains the name of the resource requested. - // Specified in the format `projects/*/locations/*`. + // Name: Contains the name of the resource requested. Specified in the + // format `projects/*/locations/*`. Name string `json:"name,omitempty"` // Recommended: Whether the location is recomended for GKE cluster // scheduling. Recommended bool `json:"recommended,omitempty"` - // Type: Contains the type of location this Location is for. - // Regional or Zonal. + // Type: Contains the type of location this Location is for. Regional or + // Zonal. // // Possible values: // "LOCATION_TYPE_UNSPECIFIED" - LOCATION_TYPE_UNSPECIFIED means the @@ -2730,14 +2609,11 @@ func (s *Location) MarshalJSON() ([]byte, error) { // to be used for the cluster. type MaintenancePolicy struct { // ResourceVersion: A hash identifying the version of this policy, so - // that updates to fields of - // the policy won't accidentally undo intermediate changes (and so that - // users - // of the API unaware of some fields won't accidentally remove other - // fields). - // Make a get() request to the cluster to get the - // current - // resource version and include it with requests to set the policy. + // that updates to fields of the policy won't accidentally undo + // intermediate changes (and so that users of the API unaware of some + // fields won't accidentally remove other fields). Make a `get()` + // request to the cluster to get the current resource version and + // include it with requests to set the policy. ResourceVersion string `json:"resourceVersion,omitempty"` // Window: Specifies the maintenance window in which maintenance may be @@ -2776,15 +2652,13 @@ type MaintenanceWindow struct { DailyMaintenanceWindow *DailyMaintenanceWindow `json:"dailyMaintenanceWindow,omitempty"` // MaintenanceExclusions: Exceptions to maintenance window. - // Non-emergency maintenance should not - // occur in these windows. + // Non-emergency maintenance should not occur in these windows. MaintenanceExclusions map[string]TimeWindow `json:"maintenanceExclusions,omitempty"` // RecurringWindow: RecurringWindow specifies some number of recurring - // time periods for - // maintenance to occur. The time windows may be overlapping. If - // no - // maintenance windows are set, maintenance can occur at any time. + // time periods for maintenance to occur. The time windows may be + // overlapping. If no maintenance windows are set, maintenance can occur + // at any time. RecurringWindow *RecurringTimeWindow `json:"recurringWindow,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -2812,45 +2686,46 @@ func (s *MaintenanceWindow) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Master: Master is the configuration for components on master. +type Master struct { +} + // MasterAuth: The authentication information for accessing the master -// endpoint. -// Authentication can be done using HTTP basic auth or using -// client -// certificates. +// endpoint. Authentication can be done using HTTP basic auth or using +// client certificates. type MasterAuth struct { // ClientCertificate: [Output only] Base64-encoded public certificate - // used by clients to - // authenticate to the cluster endpoint. + // used by clients to authenticate to the cluster endpoint. ClientCertificate string `json:"clientCertificate,omitempty"` // ClientCertificateConfig: Configuration for client certificate - // authentication on the cluster. For - // clusters before v1.12, if no configuration is specified, a - // client - // certificate is issued. + // authentication on the cluster. For clusters before v1.12, if no + // configuration is specified, a client certificate is issued. ClientCertificateConfig *ClientCertificateConfig `json:"clientCertificateConfig,omitempty"` // ClientKey: [Output only] Base64-encoded private key used by clients - // to authenticate - // to the cluster endpoint. + // to authenticate to the cluster endpoint. ClientKey string `json:"clientKey,omitempty"` ClusterCaCertificate string `json:"clusterCaCertificate,omitempty"` // Password: The password to use for HTTP basic authentication to the - // master endpoint. - // Because the master endpoint is open to the Internet, you should - // create a - // strong password. If a password is provided for cluster creation, - // username - // must be non-empty. + // master endpoint. Because the master endpoint is open to the Internet, + // you should create a strong password. If a password is provided for + // cluster creation, username must be non-empty. Warning: basic + // authentication is deprecated, and will be removed in GKE control + // plane versions 1.19 and newer. For a list of recommended + // authentication methods, see: + // https://cloud.google.com/kubernetes-engine/docs/how-to/api-server-authentication Password string `json:"password,omitempty"` // Username: The username to use for HTTP basic authentication to the - // master endpoint. - // For clusters v1.6.0 and later, basic authentication can be disabled - // by - // leaving username unspecified (or setting it to the empty string). + // master endpoint. For clusters v1.6.0 and later, basic authentication + // can be disabled by leaving username unspecified (or setting it to the + // empty string). Warning: basic authentication is deprecated, and will + // be removed in GKE control plane versions 1.19 and newer. For a list + // of recommended authentication methods, see: + // https://cloud.google.com/kubernetes-engine/docs/how-to/api-server-authentication Username string `json:"username,omitempty"` // ForceSendFields is a list of field names (e.g. "ClientCertificate") @@ -2878,16 +2753,13 @@ func (s *MasterAuth) MarshalJSON() ([]byte, error) { } // MasterAuthorizedNetworksConfig: Configuration options for the master -// authorized networks feature. Enabled -// master authorized networks will disallow all external traffic to -// access -// Kubernetes master through HTTPS except traffic from the given CIDR -// blocks, -// Google Compute Engine Public IPs and Google Prod IPs. +// authorized networks feature. Enabled master authorized networks will +// disallow all external traffic to access Kubernetes master through +// HTTPS except traffic from the given CIDR blocks, Google Compute +// Engine Public IPs and Google Prod IPs. type MasterAuthorizedNetworksConfig struct { // CidrBlocks: cidr_blocks define up to 10 external networks that could - // access - // Kubernetes master through HTTPS. + // access Kubernetes master through HTTPS. CidrBlocks []*CidrBlock `json:"cidrBlocks,omitempty"` // Enabled: Whether or not master authorized networks is enabled. @@ -3000,45 +2872,58 @@ func (s *Metric) UnmarshalJSON(data []byte) error { // NetworkConfig: NetworkConfig reports the relative names of network & // subnetwork. type NetworkConfig struct { + // DatapathProvider: The desired datapath provider for this cluster. By + // default, uses the IPTables-based kube-proxy implementation. + // + // Possible values: + // "DATAPATH_PROVIDER_UNSPECIFIED" - Default value. + // "LEGACY_DATAPATH" - Use the IPTables implementation based on + // kube-proxy. + // "ADVANCED_DATAPATH" - Use the eBPF based GKE Dataplane V2 with + // additional features. See the [GKE Dataplane V2 + // documentation](https://cloud.google.com/kubernetes-enginw/docs/how-to/ + // dataplane-v2) for more. + DatapathProvider string `json:"datapathProvider,omitempty"` + + // DefaultSnatStatus: Whether the cluster disables default in-node sNAT + // rules. In-node sNAT rules will be disabled when default_snat_status + // is disabled. When disabled is set to false, default IP masquerade + // rules will be applied to the nodes to prevent sNAT on cluster + // internal traffic. + DefaultSnatStatus *DefaultSnatStatus `json:"defaultSnatStatus,omitempty"` + // EnableIntraNodeVisibility: Whether Intra-node visibility is enabled - // for this cluster. - // This makes same node pod to pod traffic visible for VPC network. + // for this cluster. This makes same node pod to pod traffic visible for + // VPC network. EnableIntraNodeVisibility bool `json:"enableIntraNodeVisibility,omitempty"` - // Network: Output only. The relative name of the Google Compute - // Engine - // network(https://cloud.google.com/compute/docs/networks-and-fire - // walls#networks) - // to which the cluster is connected. - // Example: + // Network: Output only. The relative name of the Google Compute Engine + // network(https://cloud.google.com/compute/docs/networks-and-firewalls#n + // etworks) to which the cluster is connected. Example: // projects/my-project/global/networks/my-network Network string `json:"network,omitempty"` // Subnetwork: Output only. The relative name of the Google Compute - // Engine - // [subnetwork](https://cloud.google.com/compute/docs/vpc) to which - // the - // cluster is connected. - // Example: + // Engine [subnetwork](https://cloud.google.com/compute/docs/vpc) to + // which the cluster is connected. Example: // projects/my-project/regions/us-central1/subnetworks/my-subnet Subnetwork string `json:"subnetwork,omitempty"` - // ForceSendFields is a list of field names (e.g. - // "EnableIntraNodeVisibility") to unconditionally include in API - // requests. By default, fields with empty values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. + // ForceSendFields is a list of field names (e.g. "DatapathProvider") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. - // "EnableIntraNodeVisibility") to include in API requests with the JSON - // null value. By default, fields with empty values are omitted from API - // requests. However, any field with an empty value appearing in - // NullFields will be sent to the server as null. It is an error if a - // field in this list has a non-empty value. This may be used to include - // null fields in Patch requests. + // NullFields is a list of field names (e.g. "DatapathProvider") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -3048,10 +2933,8 @@ func (s *NetworkConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkPolicy: Configuration options for the NetworkPolicy -// feature. -// https://kubernetes.io/docs/concepts/services-networking/netwo -// rkpolicies/ +// NetworkPolicy: Configuration options for the NetworkPolicy feature. +// https://kubernetes.io/docs/concepts/services-networking/networkpolicies/ type NetworkPolicy struct { // Enabled: Whether network policy is enabled on the cluster. Enabled bool `json:"enabled,omitempty"` @@ -3087,10 +2970,8 @@ func (s *NetworkPolicy) MarshalJSON() ([]byte, error) { } // NetworkPolicyConfig: Configuration for NetworkPolicy. This only -// tracks whether the addon -// is enabled or not on the Master, it does not track whether network -// policy -// is enabled for the nodes. +// tracks whether the addon is enabled or not on the Master, it does not +// track whether network policy is enabled for the nodes. type NetworkPolicyConfig struct { // Disabled: Whether NetworkPolicy is enabled for this cluster. Disabled bool `json:"disabled,omitempty"` @@ -3121,204 +3002,140 @@ func (s *NetworkPolicyConfig) MarshalJSON() ([]byte, error) { // NodeConfig: Parameters that describe the nodes in a cluster. type NodeConfig struct { // Accelerators: A list of hardware accelerators to be attached to each - // node. - // See https://cloud.google.com/compute/docs/gpus for more information - // about - // support for GPUs. + // node. See https://cloud.google.com/compute/docs/gpus for more + // information about support for GPUs. Accelerators []*AcceleratorConfig `json:"accelerators,omitempty"` - // BootDiskKmsKey: - // The Customer Managed Encryption Key used to encrypt the boot disk - // attached - // to each node in the node pool. This should be of the - // form - // projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAM - // E]/cryptoKeys/[KEY_NAME]. - // For more information about protecting resources with Cloud KMS Keys - // please - // see: - // https://cloud.google.com/compute/docs/disks/customer-manag - // ed-encryption + // BootDiskKmsKey: The Customer Managed Encryption Key used to encrypt + // the boot disk attached to each node in the node pool. This should be + // of the form + // projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cr + // yptoKeys/[KEY_NAME]. For more information about protecting resources + // with Cloud KMS Keys please see: + // https://cloud.google.com/compute/docs/disks/customer-managed-encryption BootDiskKmsKey string `json:"bootDiskKmsKey,omitempty"` - // DiskSizeGb: Size of the disk attached to each node, specified in - // GB. - // The smallest allowed disk size is 10GB. - // - // If unspecified, the default disk size is 100GB. + // DiskSizeGb: Size of the disk attached to each node, specified in GB. + // The smallest allowed disk size is 10GB. If unspecified, the default + // disk size is 100GB. DiskSizeGb int64 `json:"diskSizeGb,omitempty"` // DiskType: Type of the disk attached to each node (e.g. 'pd-standard' - // or 'pd-ssd') - // - // If unspecified, the default disk type is 'pd-standard' + // or 'pd-ssd') If unspecified, the default disk type is 'pd-standard' DiskType string `json:"diskType,omitempty"` // ImageType: The image type to use for this node. Note that for a given - // image type, - // the latest version of it will be used. + // image type, the latest version of it will be used. ImageType string `json:"imageType,omitempty"` + // KubeletConfig: Node kubelet configs. + KubeletConfig *NodeKubeletConfig `json:"kubeletConfig,omitempty"` + // Labels: The map of Kubernetes labels (key/value pairs) to be applied - // to each node. - // These will added in addition to any default label(s) that - // Kubernetes may apply to the node. - // In case of conflict in label keys, the applied set may differ - // depending on - // the Kubernetes version -- it's best to assume the behavior is - // undefined - // and conflicts should be avoided. - // For more information, including usage and the valid values, - // see: - // https://kubernetes.io/docs/concepts/overview/working-with-objects - // /labels/ + // to each node. These will added in addition to any default label(s) + // that Kubernetes may apply to the node. In case of conflict in label + // keys, the applied set may differ depending on the Kubernetes version + // -- it's best to assume the behavior is undefined and conflicts should + // be avoided. For more information, including usage and the valid + // values, see: + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ Labels map[string]string `json:"labels,omitempty"` + // LinuxNodeConfig: Parameters that can be configured on Linux nodes. + LinuxNodeConfig *LinuxNodeConfig `json:"linuxNodeConfig,omitempty"` + // LocalSsdCount: The number of local SSD disks to be attached to the - // node. - // - // The limit for this value is dependent upon the maximum number - // of - // disks available on a machine per zone. - // See: - // https://cloud.google.com/compute/docs/disks/local-ssd - // for more information. + // node. The limit for this value is dependent upon the maximum number + // of disks available on a machine per zone. See: + // https://cloud.google.com/compute/docs/disks/local-ssd for more + // information. LocalSsdCount int64 `json:"localSsdCount,omitempty"` - // MachineType: The name of a Google Compute Engine - // [machine - // type](https://cloud.google.com/compute/docs/machine-types) - // (e.g. - // `n1-standard-1`). - // - // If unspecified, the default machine type is - // `n1-standard-1`. + // MachineType: The name of a Google Compute Engine [machine + // type](https://cloud.google.com/compute/docs/machine-types). If + // unspecified, the default machine type is `e2-medium`. MachineType string `json:"machineType,omitempty"` // Metadata: The metadata key/value pairs assigned to instances in the - // cluster. - // - // Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 - // bytes - // in length. These are reflected as part of a URL in the metadata - // server. - // Additionally, to avoid ambiguity, keys must not conflict with any - // other - // metadata keys for the project or be one of the reserved keys: - // "cluster-location" - // "cluster-name" - // "cluster-uid" - // "configure-sh" - // "containerd-configure-sh" - // "enable-oslogin" - // "gci-ensure-gke-docker" - // "gci-metrics-enabled" - // "gci-update-strategy" - // "instance-template" - // "kube-env" - // "startup-script" - // "user-data" - // "disable-address-manager" - // "windows-startup-script-ps1" - // "common-psm1" - // "k8s-node-setup-psm1" - // "install-ssh-psm1" - // "user-profile-psm1" - // "serial-port-logging-enable" - // Values are free-form strings, and only have meaning as interpreted - // by - // the image running in the instance. The only restriction placed on - // them is - // that each value's size must be less than or equal to 32 KB. - // - // The total size of all keys and values must be less than 512 KB. + // cluster. Keys must conform to the regexp `[a-zA-Z0-9-_]+` and be less + // than 128 bytes in length. These are reflected as part of a URL in the + // metadata server. Additionally, to avoid ambiguity, keys must not + // conflict with any other metadata keys for the project or be one of + // the reserved keys: - "cluster-location" - "cluster-name" - + // "cluster-uid" - "configure-sh" - "containerd-configure-sh" - + // "enable-oslogin" - "gci-ensure-gke-docker" - "gci-metrics-enabled" - + // "gci-update-strategy" - "instance-template" - "kube-env" - + // "startup-script" - "user-data" - "disable-address-manager" - + // "windows-startup-script-ps1" - "common-psm1" - "k8s-node-setup-psm1" + // - "install-ssh-psm1" - "user-profile-psm1" - + // "serial-port-logging-enable" Values are free-form strings, and only + // have meaning as interpreted by the image running in the instance. The + // only restriction placed on them is that each value's size must be + // less than or equal to 32 KB. The total size of all keys and values + // must be less than 512 KB. Metadata map[string]string `json:"metadata,omitempty"` // MinCpuPlatform: Minimum CPU platform to be used by this instance. The - // instance may be - // scheduled on the specified or newer CPU platform. Applicable values - // are the - // friendly names of CPU platforms, such as - // minCpuPlatform: "Intel Haswell" - // or - // minCpuPlatform: "Intel Sandy Bridge". For - // more - // information, read [how to specify min - // CPU - // platform](https://cloud.google.com/compute/docs/instances/specify- - // min-cpu-platform) + // instance may be scheduled on the specified or newer CPU platform. + // Applicable values are the friendly names of CPU platforms, such as + // `minCpuPlatform: "Intel Haswell" or `minCpuPlatform: "Intel Sandy + // Bridge". For more information, read [how to specify min CPU + // platform](https://cloud.google.com/compute/docs/instances/specify-min- + // cpu-platform) MinCpuPlatform string `json:"minCpuPlatform,omitempty"` + // NodeGroup: Setting this field will assign instances of this pool to + // run on the specified node group. This is useful for running workloads + // on [sole tenant + // nodes](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes). + NodeGroup string `json:"nodeGroup,omitempty"` + // OauthScopes: The set of Google API scopes to be made available on all - // of the - // node VMs under the "default" service account. - // - // The following scopes are recommended, but not required, and by - // default are - // not included: - // - // * `https://www.googleapis.com/auth/compute` is required for - // mounting - // persistent storage on your nodes. - // * `https://www.googleapis.com/auth/devstorage.read_only` is required - // for - // communicating with **gcr.io** - // (the [Google - // Container - // Registry](https://cloud.google.com/container-registry/)). - // - // I - // f unspecified, no scopes are added, unless Cloud Logging or - // Cloud + // of the node VMs under the "default" service account. The following + // scopes are recommended, but not required, and by default are not + // included: * `https://www.googleapis.com/auth/compute` is required for + // mounting persistent storage on your nodes. * + // `https://www.googleapis.com/auth/devstorage.read_only` is required + // for communicating with **gcr.io** (the [Google Container + // Registry](https://cloud.google.com/container-registry/)). If + // unspecified, no scopes are added, unless Cloud Logging or Cloud // Monitoring are enabled, in which case their required scopes will be // added. OauthScopes []string `json:"oauthScopes,omitempty"` // Preemptible: Whether the nodes are created as preemptible VM - // instances. - // See: - // https://cloud.google.com/compute/docs/instances/preemptible for - // more + // instances. See: + // https://cloud.google.com/compute/docs/instances/preemptible for more // inforamtion about preemptible VM instances. Preemptible bool `json:"preemptible,omitempty"` // ReservationAffinity: The optional reservation affinity. Setting this - // field will apply - // the specified [Zonal - // Compute - // Reservation](https://cloud.google.com/compute/docs/instances/r - // eserving-zonal-resources) - // to this node pool. + // field will apply the specified [Zonal Compute + // Reservation](https://cloud.google.com/compute/docs/instances/reserving + // -zonal-resources) to this node pool. ReservationAffinity *ReservationAffinity `json:"reservationAffinity,omitempty"` // SandboxConfig: Sandbox configuration for this node. SandboxConfig *SandboxConfig `json:"sandboxConfig,omitempty"` // ServiceAccount: The Google Cloud Platform Service Account to be used - // by the node VMs. - // Specify the email address of the Service Account; otherwise, if no - // Service - // Account is specified, the "default" service account is used. + // by the node VMs. Specify the email address of the Service Account; + // otherwise, if no Service Account is specified, the "default" service + // account is used. ServiceAccount string `json:"serviceAccount,omitempty"` // ShieldedInstanceConfig: Shielded Instance options. ShieldedInstanceConfig *ShieldedInstanceConfig `json:"shieldedInstanceConfig,omitempty"` // Tags: The list of instance tags applied to all nodes. Tags are used - // to identify - // valid sources or targets for network firewalls and are specified - // by - // the client during cluster or node pool creation. Each tag within the - // list - // must comply with RFC1035. + // to identify valid sources or targets for network firewalls and are + // specified by the client during cluster or node pool creation. Each + // tag within the list must comply with RFC1035. Tags []string `json:"tags,omitempty"` - // Taints: List of kubernetes taints to be applied to each node. - // - // For more information, including usage and the valid values, - // see: - // https://kubernetes.io/docs/concepts/configuration/taint-and-toler - // ation/ + // Taints: List of kubernetes taints to be applied to each node. For + // more information, including usage and the valid values, see: + // https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ Taints []*NodeTaint `json:"taints,omitempty"` // WorkloadMetadataConfig: The workload metadata configuration for this @@ -3348,9 +3165,56 @@ func (s *NodeConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// NodeKubeletConfig: Node kubelet configs. +type NodeKubeletConfig struct { + // CpuCfsQuota: Enable CPU CFS quota enforcement for containers that + // specify CPU limits. This option is enabled by default which makes + // kubelet use CFS quota + // (https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt) to + // enforce container CPU limits. Otherwise, CPU limits will not be + // enforced at all. Disable this option to mitigate CPU throttling + // problems while still having your pods to be in Guaranteed QoS class + // by specifying the CPU limits. The default value is 'true' if + // unspecified. + CpuCfsQuota bool `json:"cpuCfsQuota,omitempty"` + + // CpuCfsQuotaPeriod: Set the CPU CFS quota period value + // 'cpu.cfs_period_us'. The string must be a sequence of decimal + // numbers, each with optional fraction and a unit suffix, such as + // "300ms". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", + // "h". The value must be a positive duration. + CpuCfsQuotaPeriod string `json:"cpuCfsQuotaPeriod,omitempty"` + + // CpuManagerPolicy: Control the CPU management policy on the node. See + // https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/ The following values are allowed. - "none": the default, which represents the existing scheduling behavior. - "static": allows pods with certain resource characteristics to be granted increased CPU affinity and exclusivity on the node. The default value is 'none' if + // unspecified. + CpuManagerPolicy string `json:"cpuManagerPolicy,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CpuCfsQuota") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CpuCfsQuota") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NodeKubeletConfig) MarshalJSON() ([]byte, error) { + type NoMethod NodeKubeletConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // NodeManagement: NodeManagement defines the set of node management -// services turned on for the -// node pool. +// services turned on for the node pool. type NodeManagement struct { // AutoRepair: Whether the nodes will be automatically repaired. AutoRepair bool `json:"autoRepair,omitempty"` @@ -3385,20 +3249,14 @@ func (s *NodeManagement) MarshalJSON() ([]byte, error) { } // NodePool: NodePool contains the name and configuration for a -// cluster's node pool. -// Node pools are a set of nodes (i.e. VM's), with a common -// configuration and -// specification, under the control of the cluster master. They may have -// a set -// of Kubernetes labels applied to them, which may be used to reference -// them -// during pod scheduling. They may also be resized up or down, to -// accommodate -// the workload. +// cluster's node pool. Node pools are a set of nodes (i.e. VM's), with +// a common configuration and specification, under the control of the +// cluster master. They may have a set of Kubernetes labels applied to +// them, which may be used to reference them during pod scheduling. They +// may also be resized up or down, to accommodate the workload. type NodePool struct { // Autoscaling: Autoscaler configuration for this NodePool. Autoscaler - // is enabled - // only if a valid configuration is present. + // is enabled only if a valid configuration is present. Autoscaling *NodePoolAutoscaling `json:"autoscaling,omitempty"` // Conditions: Which conditions caused the current node pool state. @@ -3408,34 +3266,28 @@ type NodePool struct { Config *NodeConfig `json:"config,omitempty"` // InitialNodeCount: The initial node count for the pool. You must - // ensure that your - // Compute Engine resource - // quota - // is sufficient for this number of instances. You must also have - // available - // firewall and routes quota. + // ensure that your Compute Engine [resource + // quota](https://cloud.google.com/compute/quotas) is sufficient for + // this number of instances. You must also have available firewall and + // routes quota. InitialNodeCount int64 `json:"initialNodeCount,omitempty"` // InstanceGroupUrls: [Output only] The resource URLs of the [managed // instance - // groups](https://cloud.google.com/compute/docs/instance-groups - // /creating-groups-of-managed-instances) - // associated with this node pool. + // groups](https://cloud.google.com/compute/docs/instance-groups/creating + // -groups-of-managed-instances) associated with this node pool. InstanceGroupUrls []string `json:"instanceGroupUrls,omitempty"` - // Locations: The list of Google Compute - // Engine + // Locations: The list of Google Compute Engine // [zones](https://cloud.google.com/compute/docs/zones#available) in - // which the - // NodePool's nodes should be located. + // which the NodePool's nodes should be located. Locations []string `json:"locations,omitempty"` // Management: NodeManagement configuration for this NodePool. Management *NodeManagement `json:"management,omitempty"` // MaxPodsConstraint: The constraint on the maximum number of pods that - // can be run - // simultaneously on a node in the node pool. + // can be run simultaneously on a node in the node pool. MaxPodsConstraint *MaxPodsConstraint `json:"maxPodsConstraint,omitempty"` // Name: The name of the node pool. @@ -3455,29 +3307,22 @@ type NodePool struct { // "PROVISIONING" - The PROVISIONING state indicates the node pool is // being created. // "RUNNING" - The RUNNING state indicates the node pool has been - // created - // and is fully usable. + // created and is fully usable. // "RUNNING_WITH_ERROR" - The RUNNING_WITH_ERROR state indicates the - // node pool has been created - // and is partially usable. Some error state has occurred and - // some - // functionality may be impaired. Customer may need to reissue a - // request - // or trigger a new update. + // node pool has been created and is partially usable. Some error state + // has occurred and some functionality may be impaired. Customer may + // need to reissue a request or trigger a new update. // "RECONCILING" - The RECONCILING state indicates that some work is - // actively being done on - // the node pool, such as upgrading node software. Details can - // be found in the `statusMessage` field. + // actively being done on the node pool, such as upgrading node + // software. Details can be found in the `statusMessage` field. // "STOPPING" - The STOPPING state indicates the node pool is being // deleted. // "ERROR" - The ERROR state indicates the node pool may be unusable. - // Details - // can be found in the `statusMessage` field. + // Details can be found in the `statusMessage` field. Status string `json:"status,omitempty"` // StatusMessage: [Output only] Additional information about the current - // status of this - // node pool instance, if available. + // status of this node pool instance, if available. StatusMessage string `json:"statusMessage,omitempty"` // UpgradeSettings: Upgrade settings control disruption and speed of the @@ -3515,8 +3360,8 @@ func (s *NodePool) MarshalJSON() ([]byte, error) { } // NodePoolAutoscaling: NodePoolAutoscaling contains information -// required by cluster autoscaler to -// adjust the size of the node pool to the current cluster usage. +// required by cluster autoscaler to adjust the size of the node pool to +// the current cluster usage. type NodePoolAutoscaling struct { // Autoprovisioned: Can this node pool be deleted automatically. Autoprovisioned bool `json:"autoprovisioned,omitempty"` @@ -3525,13 +3370,11 @@ type NodePoolAutoscaling struct { Enabled bool `json:"enabled,omitempty"` // MaxNodeCount: Maximum number of nodes in the NodePool. Must be >= - // min_node_count. There - // has to enough quota to scale up the cluster. + // min_node_count. There has to enough quota to scale up the cluster. MaxNodeCount int64 `json:"maxNodeCount,omitempty"` // MinNodeCount: Minimum number of nodes in the NodePool. Must be >= 1 - // and <= - // max_node_count. + // and <= max_node_count. MinNodeCount int64 `json:"minNodeCount,omitempty"` // ForceSendFields is a list of field names (e.g. "Autoprovisioned") to @@ -3559,14 +3402,10 @@ func (s *NodePoolAutoscaling) MarshalJSON() ([]byte, error) { } // NodeTaint: Kubernetes taint is comprised of three fields: key, value, -// and effect. Effect -// can only be one of three types: NoSchedule, PreferNoSchedule or -// NoExecute. -// -// See -// [here](https://kubernetes.io/docs/concepts/configurati -// on/taint-and-toleration) -// for more information, including usage and the valid values. +// and effect. Effect can only be one of three types: NoSchedule, +// PreferNoSchedule or NoExecute. See +// [here](https://kubernetes.io/docs/concepts/configuration/taint-and-tol +// eration) for more information, including usage and the valid values. type NodeTaint struct { // Effect: Effect for taint. // @@ -3606,36 +3445,65 @@ func (s *NodeTaint) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// NotificationConfig: NotificationConfig is the configuration of +// notifications. +type NotificationConfig struct { + // Pubsub: Notification config for Pub/Sub. + Pubsub *PubSub `json:"pubsub,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Pubsub") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Pubsub") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NotificationConfig) MarshalJSON() ([]byte, error) { + type NoMethod NotificationConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Operation: This operation resource represents operations that may -// have happened or are -// happening on the cluster. All fields are output only. +// have happened or are happening on the cluster. All fields are output +// only. type Operation struct { // ClusterConditions: Which conditions caused the current cluster state. + // Deprecated. Use field error instead. ClusterConditions []*StatusCondition `json:"clusterConditions,omitempty"` // Detail: Detailed operation progress, if available. Detail string `json:"detail,omitempty"` - // EndTime: [Output only] The time the operation completed, - // in + // EndTime: [Output only] The time the operation completed, in // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. EndTime string `json:"endTime,omitempty"` - // Location: [Output only] The name of the Google Compute - // Engine - // [zone](https://cloud.google.com/compute/docs/regions-zones/regi - // ons-zones#available) - // or - // [region](https://cloud.google.com/compute/docs - // /regions-zones/regions-zones#available) - // in which the cluster resides. + // Error: The error result of the operation in case of failure. + Error *Status `json:"error,omitempty"` + + // Location: [Output only] The name of the Google Compute Engine + // [zone](https://cloud.google.com/compute/docs/regions-zones/regions-zon + // es#available) or + // [region](https://cloud.google.com/compute/docs/regions-zones/regions-z + // ones#available) in which the cluster resides. Location string `json:"location,omitempty"` // Name: The server-assigned ID for the operation. Name string `json:"name,omitempty"` // NodepoolConditions: Which conditions caused the current node pool - // state. + // state. Deprecated. Use field error instead. NodepoolConditions []*StatusCondition `json:"nodepoolConditions,omitempty"` // OperationType: The operation type. @@ -3667,8 +3535,7 @@ type Operation struct { // SelfLink: Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // StartTime: [Output only] The time the operation started, - // in + // StartTime: [Output only] The time the operation started, in // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. StartTime string `json:"startTime,omitempty"` @@ -3683,18 +3550,16 @@ type Operation struct { Status string `json:"status,omitempty"` // StatusMessage: Output only. If an error has occurred, a textual - // description of the error. + // description of the error. Deprecated. Use field error instead. StatusMessage string `json:"statusMessage,omitempty"` // TargetLink: Server-defined URL for the target of the operation. TargetLink string `json:"targetLink,omitempty"` - // Zone: The name of the Google Compute - // Engine + // Zone: The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // operation is taking place. This field is deprecated, use location - // instead. + // which the operation is taking place. This field is deprecated, use + // location instead. Zone string `json:"zone,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -3728,23 +3593,21 @@ func (s *Operation) MarshalJSON() ([]byte, error) { // OperationProgress: Information about operation (or operation stage) // progress. type OperationProgress struct { - // Metrics: Progress metric bundle, for example: - // metrics: [{name: "nodes done", int_value: 15}, - // {name: "nodes total", int_value: 32}] - // or - // metrics: [{name: "progress", double_value: 0.56}, - // {name: "progress scale", double_value: 1.0}] + // Metrics: Progress metric bundle, for example: metrics: [{name: "nodes + // done", int_value: 15}, {name: "nodes total", int_value: 32}] or + // metrics: [{name: "progress", double_value: 0.56}, {name: "progress + // scale", double_value: 1.0}] Metrics []*Metric `json:"metrics,omitempty"` - // Name: A non-parameterized string describing an operation stage. - // Unset for single-stage operations. + // Name: A non-parameterized string describing an operation stage. Unset + // for single-stage operations. Name string `json:"name,omitempty"` // Stages: Substages of an operation or a stage. Stages []*OperationProgress `json:"stages,omitempty"` - // Status: Status of an operation stage. - // Unset for single-stage operations. + // Status: Status of an operation stage. Unset for single-stage + // operations. // // Possible values: // "STATUS_UNSPECIFIED" - Not set. @@ -3781,8 +3644,7 @@ func (s *OperationProgress) MarshalJSON() ([]byte, error) { // feature. type PodSecurityPolicyConfig struct { // Enabled: Enable the PodSecurityPolicy controller for this cluster. If - // enabled, pods - // must be valid under a PodSecurityPolicy to be created. + // enabled, pods must be valid under a PodSecurityPolicy to be created. Enabled bool `json:"enabled,omitempty"` // ForceSendFields is a list of field names (e.g. "Enabled") to @@ -3815,22 +3677,18 @@ type PrivateClusterConfig struct { EnablePrivateEndpoint bool `json:"enablePrivateEndpoint,omitempty"` // EnablePrivateNodes: Whether nodes have internal IP addresses only. If - // enabled, all nodes are - // given only RFC 1918 private addresses and communicate with the master - // via - // private networking. + // enabled, all nodes are given only RFC 1918 private addresses and + // communicate with the master via private networking. EnablePrivateNodes bool `json:"enablePrivateNodes,omitempty"` // MasterGlobalAccessConfig: Controls master global access settings. MasterGlobalAccessConfig *PrivateClusterMasterGlobalAccessConfig `json:"masterGlobalAccessConfig,omitempty"` // MasterIpv4CidrBlock: The IP range in CIDR notation to use for the - // hosted master network. This - // range will be used for assigning internal IP addresses to the master - // or - // set of masters, as well as the ILB VIP. This range must not overlap - // with - // any other ranges in use within the cluster's network. + // hosted master network. This range will be used for assigning internal + // IP addresses to the master or set of masters, as well as the ILB VIP. + // This range must not overlap with any other ranges in use within the + // cluster's network. MasterIpv4CidrBlock string `json:"masterIpv4CidrBlock,omitempty"` // PeeringName: Output only. The peering name in the customer VPC used @@ -3899,43 +3757,60 @@ func (s *PrivateClusterMasterGlobalAccessConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// PubSub: Pub/Sub specific notification config. +type PubSub struct { + // Enabled: Enable notifications for Pub/Sub. + Enabled bool `json:"enabled,omitempty"` + + // Topic: The desired Pub/Sub topic to which notifications will be sent + // by GKE. Format is `projects/{project}/topics/{topic}`. + Topic string `json:"topic,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Enabled") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PubSub) MarshalJSON() ([]byte, error) { + type NoMethod PubSub + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // RecurringTimeWindow: Represents an arbitrary window of time that // recurs. type RecurringTimeWindow struct { // Recurrence: An RRULE - // (https://tools.ietf.org/html/rfc5545#section-3.8.5.3) for how - // this window reccurs. They go on for the span of time between the - // start and - // end time. - // - // For example, to have something repeat every weekday, you'd use: - // FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR - // To repeat some window daily (equivalent to the - // DailyMaintenanceWindow): - // FREQ=DAILY - // For the first weekend of every month: - // FREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU + // (https://tools.ietf.org/html/rfc5545#section-3.8.5.3) for how this + // window reccurs. They go on for the span of time between the start and + // end time. For example, to have something repeat every weekday, you'd + // use: `FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR` To repeat some window daily + // (equivalent to the DailyMaintenanceWindow): `FREQ=DAILY` For the + // first weekend of every month: `FREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU` // This specifies how frequently the window starts. Eg, if you wanted to - // have - // a 9-5 UTC-4 window every weekday, you'd use something like: - // - // start time = 2019-01-01T09:00:00-0400 - // end time = 2019-01-01T17:00:00-0400 - // recurrence = FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR - // - // Windows can span multiple days. Eg, to make the window encompass - // every - // weekend from midnight Saturday till the last minute of Sunday - // UTC: - // - // start time = 2019-01-05T00:00:00Z - // end time = 2019-01-07T23:59:00Z - // recurrence = FREQ=WEEKLY;BYDAY=SA - // - // Note the start and end time's specific dates are largely arbitrary - // except - // to specify duration of the window and when it first starts. - // The FREQ values of HOURLY, MINUTELY, and SECONDLY are not supported. + // have a 9-5 UTC-4 window every weekday, you'd use something like: ``` + // start time = 2019-01-01T09:00:00-0400 end time = + // 2019-01-01T17:00:00-0400 recurrence = + // FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR ``` Windows can span multiple days. + // Eg, to make the window encompass every weekend from midnight Saturday + // till the last minute of Sunday UTC: ``` start time = + // 2019-01-05T00:00:00Z end time = 2019-01-07T23:59:00Z recurrence = + // FREQ=WEEKLY;BYDAY=SA ``` Note the start and end time's specific dates + // are largely arbitrary except to specify duration of the window and + // when it first starts. The FREQ values of HOURLY, MINUTELY, and + // SECONDLY are not supported. Recurrence string `json:"recurrence,omitempty"` // Window: The window of the first recurrence. @@ -3965,18 +3840,10 @@ func (s *RecurringTimeWindow) MarshalJSON() ([]byte, error) { } // ReleaseChannel: ReleaseChannel indicates which release channel a -// cluster is -// subscribed to. Release channels are arranged in order of risk -// and -// frequency of updates. -// -// When a cluster is subscribed to a release channel, Google -// maintains -// both the master version and the node version. Node -// auto-upgrade -// defaults to true and cannot be disabled. Updates to version -// related -// fields (e.g. current_master_version) return an error. +// cluster is subscribed to. Release channels are arranged in order of +// risk. When a cluster is subscribed to a release channel, Google +// maintains both the master version and the node version. Node +// auto-upgrade defaults to true and cannot be disabled. type ReleaseChannel struct { // Channel: channel specifies which release channel the cluster is // subscribed to. @@ -3984,31 +3851,14 @@ type ReleaseChannel struct { // Possible values: // "UNSPECIFIED" - No channel specified. // "RAPID" - RAPID channel is offered on an early access basis for - // customers who want - // to test new releases before they are qualified for production use - // or - // general availability. New upgrades will occur roughly - // weekly. - // - // WARNING: Versions available in the RAPID Channel may be subject - // to - // unresolved issues with no known workaround and are not for use - // with - // production workloads or subject to any SLAs. + // customers who want to test new releases. WARNING: Versions available + // in the RAPID Channel may be subject to unresolved issues with no + // known workaround and are not subject to any SLAs. // "REGULAR" - Clusters subscribed to REGULAR receive versions that - // are considered GA - // quality. REGULAR is intended for production users who want to - // take - // advantage of new features. New upgrades will occur roughly every - // few - // weeks. + // are considered GA quality. REGULAR is intended for production users + // who want to take advantage of new features. // "STABLE" - Clusters subscribed to STABLE receive versions that are - // known to be - // stable and reliable in production. STABLE is intended for - // production - // users who need stability above all else, or for whom frequent - // upgrades - // are too risky. New upgrades will occur roughly every few months. + // known to be stable and reliable in production. Channel string `json:"channel,omitempty"` // ForceSendFields is a list of field names (e.g. "Channel") to @@ -4037,8 +3887,8 @@ func (s *ReleaseChannel) MarshalJSON() ([]byte, error) { // ReleaseChannelConfig: ReleaseChannelConfig exposes configuration for // a release channel. type ReleaseChannelConfig struct { - // AvailableVersions: List of available versions for the release - // channel. + // AvailableVersions: Deprecated. This field has been deprecated and + // replaced with the valid_versions field. AvailableVersions []*AvailableVersion `json:"availableVersions,omitempty"` // Channel: The release channel this configuration applies to. @@ -4046,37 +3896,23 @@ type ReleaseChannelConfig struct { // Possible values: // "UNSPECIFIED" - No channel specified. // "RAPID" - RAPID channel is offered on an early access basis for - // customers who want - // to test new releases before they are qualified for production use - // or - // general availability. New upgrades will occur roughly - // weekly. - // - // WARNING: Versions available in the RAPID Channel may be subject - // to - // unresolved issues with no known workaround and are not for use - // with - // production workloads or subject to any SLAs. + // customers who want to test new releases. WARNING: Versions available + // in the RAPID Channel may be subject to unresolved issues with no + // known workaround and are not subject to any SLAs. // "REGULAR" - Clusters subscribed to REGULAR receive versions that - // are considered GA - // quality. REGULAR is intended for production users who want to - // take - // advantage of new features. New upgrades will occur roughly every - // few - // weeks. + // are considered GA quality. REGULAR is intended for production users + // who want to take advantage of new features. // "STABLE" - Clusters subscribed to STABLE receive versions that are - // known to be - // stable and reliable in production. STABLE is intended for - // production - // users who need stability above all else, or for whom frequent - // upgrades - // are too risky. New upgrades will occur roughly every few months. + // known to be stable and reliable in production. Channel string `json:"channel,omitempty"` // DefaultVersion: The default version for newly created clusters on the // channel. DefaultVersion string `json:"defaultVersion,omitempty"` + // ValidVersions: List of valid versions for the channel. + ValidVersions []string `json:"validVersions,omitempty"` + // ForceSendFields is a list of field names (e.g. "AvailableVersions") // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -4103,10 +3939,8 @@ func (s *ReleaseChannelConfig) MarshalJSON() ([]byte, error) { // ReservationAffinity: // [ReservationAffinity](https://cloud.google.com/compute/docs/instances/ -// reserving-zonal-resources) -// is the configuration of desired reservation which instances could -// take -// capacity from. +// reserving-zonal-resources) is the configuration of desired +// reservation which instances could take capacity from. type ReservationAffinity struct { // ConsumeReservationType: Corresponds to the type of reservation // consumption. @@ -4116,15 +3950,13 @@ type ReservationAffinity struct { // "NO_RESERVATION" - Do not consume from any reserved capacity. // "ANY_RESERVATION" - Consume any reservation available. // "SPECIFIC_RESERVATION" - Must consume from a specific reservation. - // Must specify key value fields - // for specifying the reservations. + // Must specify key value fields for specifying the reservations. ConsumeReservationType string `json:"consumeReservationType,omitempty"` // Key: Corresponds to the label key of a reservation resource. To - // target a - // SPECIFIC_RESERVATION by name, specify - // "googleapis.com/reservation-name" as - // the key and specify the name of your reservation as its value. + // target a SPECIFIC_RESERVATION by name, specify + // "googleapis.com/reservation-name" as the key and specify the name of + // your reservation as its value. Key string `json:"key,omitempty"` // Values: Corresponds to the label value(s) of reservation resource(s). @@ -4156,8 +3988,7 @@ func (s *ReservationAffinity) MarshalJSON() ([]byte, error) { } // ResourceLimit: Contains information about amount of some resource in -// the cluster. -// For memory, value should be in GB. +// the cluster. For memory, value should be in GB. type ResourceLimit struct { // Maximum: Maximum amount of the resource in the cluster. Maximum int64 `json:"maximum,omitempty,string"` @@ -4203,9 +4034,8 @@ type ResourceUsageExportConfig struct { ConsumptionMeteringConfig *ConsumptionMeteringConfig `json:"consumptionMeteringConfig,omitempty"` // EnableNetworkEgressMetering: Whether to enable network egress - // metering for this cluster. If enabled, a - // daemonset will be created in the cluster to meter network egress - // traffic. + // metering for this cluster. If enabled, a daemonset will be created in + // the cluster to meter network egress traffic. EnableNetworkEgressMetering bool `json:"enableNetworkEgressMetering,omitempty"` // ForceSendFields is a list of field names (e.g. "BigqueryDestination") @@ -4233,43 +4063,33 @@ func (s *ResourceUsageExportConfig) MarshalJSON() ([]byte, error) { } // RollbackNodePoolUpgradeRequest: RollbackNodePoolUpgradeRequest -// rollbacks the previously Aborted or Failed -// NodePool upgrade. This will be an no-op if the last upgrade -// successfully -// completed. +// rollbacks the previously Aborted or Failed NodePool upgrade. This +// will be an no-op if the last upgrade successfully completed. type RollbackNodePoolUpgradeRequest struct { - // ClusterId: Required. Deprecated. The name of the cluster to - // rollback. + // ClusterId: Required. Deprecated. The name of the cluster to rollback. // This field has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // Name: The name (project, location, cluster, node pool id) of the node - // poll to - // rollback upgrade. - // Specified in the format + // poll to rollback upgrade. Specified in the format // `projects/*/locations/*/clusters/*/nodePools/*`. Name string `json:"name,omitempty"` // NodePoolId: Required. Deprecated. The name of the node pool to - // rollback. - // This field has been deprecated and replaced by the name field. + // rollback. This field has been deprecated and replaced by the name + // field. NodePoolId string `json:"nodePoolId,omitempty"` // ProjectId: Required. Deprecated. The Google Developers Console - // [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // [project ID or project + // number](https://support.google.com/cloud/answer/6158840). This field + // has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Required. Deprecated. The name of the Google Compute - // Engine + // Zone: Required. Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -4346,10 +4166,12 @@ type ServerConfig struct { // ValidImageTypes: List of valid image types. ValidImageTypes []string `json:"validImageTypes,omitempty"` - // ValidMasterVersions: List of valid master versions. + // ValidMasterVersions: List of valid master versions, in descending + // order. ValidMasterVersions []string `json:"validMasterVersions,omitempty"` - // ValidNodeVersions: List of valid node upgrade target versions. + // ValidNodeVersions: List of valid node upgrade target versions, in + // descending order. ValidNodeVersions []string `json:"validNodeVersions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -4383,35 +4205,27 @@ func (s *ServerConfig) MarshalJSON() ([]byte, error) { // with the cluster. type SetAddonsConfigRequest struct { // AddonsConfig: Required. The desired configurations for the various - // addons available to run in the - // cluster. + // addons available to run in the cluster. AddonsConfig *AddonsConfig `json:"addonsConfig,omitempty"` - // ClusterId: Required. Deprecated. The name of the cluster to - // upgrade. + // ClusterId: Required. Deprecated. The name of the cluster to upgrade. // This field has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // Name: The name (project, location, cluster) of the cluster to set - // addons. - // Specified in the format `projects/*/locations/*/clusters/*`. + // addons. Specified in the format `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // ProjectId: Required. Deprecated. The Google Developers Console - // [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // [project ID or project + // number](https://support.google.com/cloud/answer/6158840). This field + // has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Required. Deprecated. The name of the Google Compute - // Engine + // Zone: Required. Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "AddonsConfig") to @@ -4438,51 +4252,38 @@ func (s *SetAddonsConfigRequest) MarshalJSON() ([]byte, error) { } // SetLabelsRequest: SetLabelsRequest sets the Google Cloud Platform -// labels on a Google Container -// Engine cluster, which will in turn set them for Google Compute -// Engine -// resources used by that cluster +// labels on a Google Container Engine cluster, which will in turn set +// them for Google Compute Engine resources used by that cluster type SetLabelsRequest struct { - // ClusterId: Required. Deprecated. The name of the cluster. - // This field has been deprecated and replaced by the name field. + // ClusterId: Required. Deprecated. The name of the cluster. This field + // has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // LabelFingerprint: Required. The fingerprint of the previous set of - // labels for this resource, - // used to detect conflicts. The fingerprint is initially generated - // by - // Kubernetes Engine and changes after every request to modify or - // update - // labels. You must always provide an up-to-date fingerprint hash - // when - // updating or changing labels. Make a get() request to - // the - // resource to get the latest fingerprint. + // labels for this resource, used to detect conflicts. The fingerprint + // is initially generated by Kubernetes Engine and changes after every + // request to modify or update labels. You must always provide an + // up-to-date fingerprint hash when updating or changing labels. Make a + // `get()` request to the resource to get the latest fingerprint. LabelFingerprint string `json:"labelFingerprint,omitempty"` // Name: The name (project, location, cluster id) of the cluster to set - // labels. - // Specified in the format `projects/*/locations/*/clusters/*`. + // labels. Specified in the format `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // ProjectId: Required. Deprecated. The Google Developers Console - // [project ID or - // project - // number](https://developers.google.com/console/help/new/#projec - // tnumber). - // This field has been deprecated and replaced by the name field. + // [project ID or project + // number](https://developers.google.com/console/help/new/#projectnumber) + // . This field has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` // ResourceLabels: Required. The labels to set for that cluster. ResourceLabels map[string]string `json:"resourceLabels,omitempty"` - // Zone: Required. Deprecated. The name of the Google Compute - // Engine + // Zone: Required. Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -4509,11 +4310,9 @@ func (s *SetLabelsRequest) MarshalJSON() ([]byte, error) { } // SetLegacyAbacRequest: SetLegacyAbacRequest enables or disables the -// ABAC authorization mechanism for -// a cluster. +// ABAC authorization mechanism for a cluster. type SetLegacyAbacRequest struct { - // ClusterId: Required. Deprecated. The name of the cluster to - // update. + // ClusterId: Required. Deprecated. The name of the cluster to update. // This field has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` @@ -4522,25 +4321,20 @@ type SetLegacyAbacRequest struct { Enabled bool `json:"enabled,omitempty"` // Name: The name (project, location, cluster id) of the cluster to set - // legacy abac. - // Specified in the format `projects/*/locations/*/clusters/*`. + // legacy abac. Specified in the format + // `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // ProjectId: Required. Deprecated. The Google Developers Console - // [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // [project ID or project + // number](https://support.google.com/cloud/answer/6158840). This field + // has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Required. Deprecated. The name of the Google Compute - // Engine + // Zone: Required. Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -4569,44 +4363,33 @@ func (s *SetLegacyAbacRequest) MarshalJSON() ([]byte, error) { // SetLocationsRequest: SetLocationsRequest sets the locations of the // cluster. type SetLocationsRequest struct { - // ClusterId: Required. Deprecated. The name of the cluster to - // upgrade. + // ClusterId: Required. Deprecated. The name of the cluster to upgrade. // This field has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` - // Locations: Required. The desired list of Google Compute - // Engine + // Locations: Required. The desired list of Google Compute Engine // [zones](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster's nodes should be located. Changing the locations a cluster - // is in - // will result in nodes being either created or removed from the - // cluster, - // depending on whether locations are being added or removed. - // - // This list must always include the cluster's primary zone. + // which the cluster's nodes should be located. Changing the locations a + // cluster is in will result in nodes being either created or removed + // from the cluster, depending on whether locations are being added or + // removed. This list must always include the cluster's primary zone. Locations []string `json:"locations,omitempty"` // Name: The name (project, location, cluster) of the cluster to set - // locations. - // Specified in the format `projects/*/locations/*/clusters/*`. + // locations. Specified in the format + // `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // ProjectId: Required. Deprecated. The Google Developers Console - // [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // [project ID or project + // number](https://support.google.com/cloud/answer/6158840). This field + // has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Required. Deprecated. The name of the Google Compute - // Engine + // Zone: Required. Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -4635,47 +4418,34 @@ func (s *SetLocationsRequest) MarshalJSON() ([]byte, error) { // SetLoggingServiceRequest: SetLoggingServiceRequest sets the logging // service of a cluster. type SetLoggingServiceRequest struct { - // ClusterId: Required. Deprecated. The name of the cluster to - // upgrade. + // ClusterId: Required. Deprecated. The name of the cluster to upgrade. // This field has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // LoggingService: Required. The logging service the cluster should use - // to write logs. - // Currently available options: - // - // * `logging.googleapis.com/kubernetes` - The Cloud Logging - // service with a Kubernetes-native resource model - // * `logging.googleapis.com` - The legacy Cloud Logging service (no - // longer - // available as of GKE 1.15). - // * `none` - no logs will be exported from the cluster. - // - // If left as an empty string,`logging.googleapis.com/kubernetes` will - // be - // used for GKE 1.14+ or `logging.googleapis.com` for earlier versions. + // to write logs. Currently available options: * + // `logging.googleapis.com/kubernetes` - The Cloud Logging service with + // a Kubernetes-native resource model * `logging.googleapis.com` - The + // legacy Cloud Logging service (no longer available as of GKE 1.15). * + // `none` - no logs will be exported from the cluster. If left as an + // empty string,`logging.googleapis.com/kubernetes` will be used for GKE + // 1.14+ or `logging.googleapis.com` for earlier versions. LoggingService string `json:"loggingService,omitempty"` // Name: The name (project, location, cluster) of the cluster to set - // logging. - // Specified in the format `projects/*/locations/*/clusters/*`. + // logging. Specified in the format `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // ProjectId: Required. Deprecated. The Google Developers Console - // [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // [project ID or project + // number](https://support.google.com/cloud/answer/6158840). This field + // has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Required. Deprecated. The name of the Google Compute - // Engine + // Zone: Required. Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -4708,26 +4478,21 @@ type SetMaintenancePolicyRequest struct { ClusterId string `json:"clusterId,omitempty"` // MaintenancePolicy: Required. The maintenance policy to be set for the - // cluster. An empty field - // clears the existing maintenance policy. + // cluster. An empty field clears the existing maintenance policy. MaintenancePolicy *MaintenancePolicy `json:"maintenancePolicy,omitempty"` // Name: The name (project, location, cluster id) of the cluster to set - // maintenance - // policy. - // Specified in the format `projects/*/locations/*/clusters/*`. + // maintenance policy. Specified in the format + // `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // ProjectId: Required. The Google Developers Console [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). + // project number](https://support.google.com/cloud/answer/6158840). ProjectId string `json:"projectId,omitempty"` - // Zone: Required. The name of the Google Compute - // Engine + // Zone: Required. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. + // which the cluster resides. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -4763,43 +4528,33 @@ type SetMasterAuthRequest struct { // "UNKNOWN" - Operation is unknown and will error out. // "SET_PASSWORD" - Set the password to a user generated value. // "GENERATE_PASSWORD" - Generate a new password and set it to that. - // "SET_USERNAME" - Set the username. If an empty username is - // provided, basic authentication - // is disabled for the cluster. If a non-empty username is provided, - // basic - // authentication is enabled, with either a provided password or a - // generated - // one. + // "SET_USERNAME" - Set the username. If an empty username is + // provided, basic authentication is disabled for the cluster. If a + // non-empty username is provided, basic authentication is enabled, with + // either a provided password or a generated one. Action string `json:"action,omitempty"` - // ClusterId: Required. Deprecated. The name of the cluster to - // upgrade. + // ClusterId: Required. Deprecated. The name of the cluster to upgrade. // This field has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // Name: The name (project, location, cluster) of the cluster to set - // auth. - // Specified in the format `projects/*/locations/*/clusters/*`. + // auth. Specified in the format `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // ProjectId: Required. Deprecated. The Google Developers Console - // [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // [project ID or project + // number](https://support.google.com/cloud/answer/6158840). This field + // has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` // Update: Required. A description of the update. Update *MasterAuth `json:"update,omitempty"` - // Zone: Required. Deprecated. The name of the Google Compute - // Engine + // Zone: Required. Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "Action") to @@ -4828,49 +4583,36 @@ func (s *SetMasterAuthRequest) MarshalJSON() ([]byte, error) { // SetMonitoringServiceRequest: SetMonitoringServiceRequest sets the // monitoring service of a cluster. type SetMonitoringServiceRequest struct { - // ClusterId: Required. Deprecated. The name of the cluster to - // upgrade. + // ClusterId: Required. Deprecated. The name of the cluster to upgrade. // This field has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // MonitoringService: Required. The monitoring service the cluster - // should use to write metrics. - // Currently available options: - // - // * "monitoring.googleapis.com/kubernetes" - The Cloud - // Monitoring - // service with a Kubernetes-native resource model - // * `monitoring.googleapis.com` - The legacy Cloud Monitoring service - // (no - // longer available as of GKE 1.15). - // * `none` - No metrics will be exported from the cluster. - // - // If left as an empty string,`monitoring.googleapis.com/kubernetes` - // will be - // used for GKE 1.14+ or `monitoring.googleapis.com` for earlier + // should use to write metrics. Currently available options: * + // "monitoring.googleapis.com/kubernetes" - The Cloud Monitoring service + // with a Kubernetes-native resource model * `monitoring.googleapis.com` + // - The legacy Cloud Monitoring service (no longer available as of GKE + // 1.15). * `none` - No metrics will be exported from the cluster. If + // left as an empty string,`monitoring.googleapis.com/kubernetes` will + // be used for GKE 1.14+ or `monitoring.googleapis.com` for earlier // versions. MonitoringService string `json:"monitoringService,omitempty"` // Name: The name (project, location, cluster) of the cluster to set - // monitoring. - // Specified in the format `projects/*/locations/*/clusters/*`. + // monitoring. Specified in the format + // `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // ProjectId: Required. Deprecated. The Google Developers Console - // [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // [project ID or project + // number](https://support.google.com/cloud/answer/6158840). This field + // has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Required. Deprecated. The name of the Google Compute - // Engine + // Zone: Required. Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -4899,13 +4641,13 @@ func (s *SetMonitoringServiceRequest) MarshalJSON() ([]byte, error) { // SetNetworkPolicyRequest: SetNetworkPolicyRequest enables/disables // network policy for a cluster. type SetNetworkPolicyRequest struct { - // ClusterId: Required. Deprecated. The name of the cluster. - // This field has been deprecated and replaced by the name field. + // ClusterId: Required. Deprecated. The name of the cluster. This field + // has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // Name: The name (project, location, cluster id) of the cluster to set - // networking - // policy. Specified in the format `projects/*/locations/*/clusters/*`. + // networking policy. Specified in the format + // `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // NetworkPolicy: Required. Configuration options for the NetworkPolicy @@ -4913,20 +4655,15 @@ type SetNetworkPolicyRequest struct { NetworkPolicy *NetworkPolicy `json:"networkPolicy,omitempty"` // ProjectId: Required. Deprecated. The Google Developers Console - // [project ID or - // project - // number](https://developers.google.com/console/help/new/#projec - // tnumber). - // This field has been deprecated and replaced by the name field. + // [project ID or project + // number](https://developers.google.com/console/help/new/#projectnumber) + // . This field has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Required. Deprecated. The name of the Google Compute - // Engine + // Zone: Required. Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -4958,38 +4695,30 @@ type SetNodePoolAutoscalingRequest struct { // Autoscaling: Required. Autoscaling configuration for the node pool. Autoscaling *NodePoolAutoscaling `json:"autoscaling,omitempty"` - // ClusterId: Required. Deprecated. The name of the cluster to - // upgrade. + // ClusterId: Required. Deprecated. The name of the cluster to upgrade. // This field has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // Name: The name (project, location, cluster, node pool) of the node - // pool to set - // autoscaler settings. Specified in the - // format + // pool to set autoscaler settings. Specified in the format // `projects/*/locations/*/clusters/*/nodePools/*`. Name string `json:"name,omitempty"` // NodePoolId: Required. Deprecated. The name of the node pool to - // upgrade. - // This field has been deprecated and replaced by the name field. + // upgrade. This field has been deprecated and replaced by the name + // field. NodePoolId string `json:"nodePoolId,omitempty"` // ProjectId: Required. Deprecated. The Google Developers Console - // [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // [project ID or project + // number](https://support.google.com/cloud/answer/6158840). This field + // has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Required. Deprecated. The name of the Google Compute - // Engine + // Zone: Required. Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "Autoscaling") to @@ -5016,11 +4745,9 @@ func (s *SetNodePoolAutoscalingRequest) MarshalJSON() ([]byte, error) { } // SetNodePoolManagementRequest: SetNodePoolManagementRequest sets the -// node management properties of a node -// pool. +// node management properties of a node pool. type SetNodePoolManagementRequest struct { - // ClusterId: Required. Deprecated. The name of the cluster to - // update. + // ClusterId: Required. Deprecated. The name of the cluster to update. // This field has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` @@ -5028,32 +4755,25 @@ type SetNodePoolManagementRequest struct { Management *NodeManagement `json:"management,omitempty"` // Name: The name (project, location, cluster, node pool id) of the node - // pool to set - // management properties. Specified in the - // format + // pool to set management properties. Specified in the format // `projects/*/locations/*/clusters/*/nodePools/*`. Name string `json:"name,omitempty"` // NodePoolId: Required. Deprecated. The name of the node pool to - // update. - // This field has been deprecated and replaced by the name field. + // update. This field has been deprecated and replaced by the name + // field. NodePoolId string `json:"nodePoolId,omitempty"` // ProjectId: Required. Deprecated. The Google Developers Console - // [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // [project ID or project + // number](https://support.google.com/cloud/answer/6158840). This field + // has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Required. Deprecated. The name of the Google Compute - // Engine + // Zone: Required. Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -5079,19 +4799,15 @@ func (s *SetNodePoolManagementRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SetNodePoolSizeRequest: SetNodePoolSizeRequest sets the size a -// node +// SetNodePoolSizeRequest: SetNodePoolSizeRequest sets the size a node // pool. type SetNodePoolSizeRequest struct { - // ClusterId: Required. Deprecated. The name of the cluster to - // update. + // ClusterId: Required. Deprecated. The name of the cluster to update. // This field has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // Name: The name (project, location, cluster, node pool id) of the node - // pool to set - // size. - // Specified in the format + // pool to set size. Specified in the format // `projects/*/locations/*/clusters/*/nodePools/*`. Name string `json:"name,omitempty"` @@ -5099,25 +4815,20 @@ type SetNodePoolSizeRequest struct { NodeCount int64 `json:"nodeCount,omitempty"` // NodePoolId: Required. Deprecated. The name of the node pool to - // update. - // This field has been deprecated and replaced by the name field. + // update. This field has been deprecated and replaced by the name + // field. NodePoolId string `json:"nodePoolId,omitempty"` // ProjectId: Required. Deprecated. The Google Developers Console - // [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // [project ID or project + // number](https://support.google.com/cloud/answer/6158840). This field + // has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Required. Deprecated. The name of the Google Compute - // Engine + // Zone: Required. Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -5146,25 +4857,16 @@ func (s *SetNodePoolSizeRequest) MarshalJSON() ([]byte, error) { // ShieldedInstanceConfig: A set of Shielded Instance options. type ShieldedInstanceConfig struct { // EnableIntegrityMonitoring: Defines whether the instance has integrity - // monitoring enabled. - // - // Enables monitoring and attestation of the boot integrity of the - // instance. - // The attestation is performed against the integrity policy baseline. - // This - // baseline is initially derived from the implicitly trusted boot image - // when - // the instance is created. + // monitoring enabled. Enables monitoring and attestation of the boot + // integrity of the instance. The attestation is performed against the + // integrity policy baseline. This baseline is initially derived from + // the implicitly trusted boot image when the instance is created. EnableIntegrityMonitoring bool `json:"enableIntegrityMonitoring,omitempty"` // EnableSecureBoot: Defines whether the instance has Secure Boot - // enabled. - // - // Secure Boot helps ensure that the system only runs authentic software - // by - // verifying the digital signature of all boot components, and halting - // the - // boot process if signature verification fails. + // enabled. Secure Boot helps ensure that the system only runs authentic + // software by verifying the digital signature of all boot components, + // and halting the boot process if signature verification fails. EnableSecureBoot bool `json:"enableSecureBoot,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -5222,37 +4924,31 @@ func (s *ShieldedNodes) MarshalJSON() ([]byte, error) { } // StartIPRotationRequest: StartIPRotationRequest creates a new IP for -// the cluster and then performs -// a node upgrade on each node pool to point to the new IP. +// the cluster and then performs a node upgrade on each node pool to +// point to the new IP. type StartIPRotationRequest struct { - // ClusterId: Required. Deprecated. The name of the cluster. - // This field has been deprecated and replaced by the name field. + // ClusterId: Required. Deprecated. The name of the cluster. This field + // has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // Name: The name (project, location, cluster id) of the cluster to - // start IP - // rotation. Specified in the format + // start IP rotation. Specified in the format // `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // ProjectId: Required. Deprecated. The Google Developers Console - // [project ID or - // project - // number](https://developers.google.com/console/help/new/#projec - // tnumber). - // This field has been deprecated and replaced by the name field. + // [project ID or project + // number](https://developers.google.com/console/help/new/#projectnumber) + // . This field has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` // RotateCredentials: Whether to rotate credentials during IP rotation. RotateCredentials bool `json:"rotateCredentials,omitempty"` - // Zone: Required. Deprecated. The name of the Google Compute - // Engine + // Zone: Required. Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -5278,33 +4974,166 @@ func (s *StartIPRotationRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Status: The `Status` type defines a logical error model that is +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each +// `Status` message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the [API Design +// Guide](https://cloud.google.com/apis/design/errors). +type Status struct { + // Code: The status code, which should be an enum value of + // google.rpc.Code. + Code int64 `json:"code,omitempty"` + + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. + Details []googleapi.RawMessage `json:"details,omitempty"` + + // Message: A developer-facing error message, which should be in + // English. Any user-facing error message should be localized and sent + // in the google.rpc.Status.details field, or localized by the client. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Status) MarshalJSON() ([]byte, error) { + type NoMethod Status + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // StatusCondition: StatusCondition describes why a cluster or a node -// pool has a certain status -// (e.g., ERROR or DEGRADED). +// pool has a certain status (e.g., ERROR or DEGRADED). type StatusCondition struct { - // Code: Machine-friendly representation of the condition + // CanonicalCode: Canonical code of the condition. + // + // Possible values: + // "OK" - Not an error; returned on success HTTP Mapping: 200 OK + // "CANCELLED" - The operation was cancelled, typically by the caller. + // HTTP Mapping: 499 Client Closed Request + // "UNKNOWN" - Unknown error. For example, this error may be returned + // when a `Status` value received from another address space belongs to + // an error space that is not known in this address space. Also errors + // raised by APIs that do not return enough error information may be + // converted to this error. HTTP Mapping: 500 Internal Server Error + // "INVALID_ARGUMENT" - The client specified an invalid argument. Note + // that this differs from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` + // indicates arguments that are problematic regardless of the state of + // the system (e.g., a malformed file name). HTTP Mapping: 400 Bad + // Request + // "DEADLINE_EXCEEDED" - The deadline expired before the operation + // could complete. For operations that change the state of the system, + // this error may be returned even if the operation has completed + // successfully. For example, a successful response from a server could + // have been delayed long enough for the deadline to expire. HTTP + // Mapping: 504 Gateway Timeout + // "NOT_FOUND" - Some requested entity (e.g., file or directory) was + // not found. Note to server developers: if a request is denied for an + // entire class of users, such as gradual feature rollout or + // undocumented allowlist, `NOT_FOUND` may be used. If a request is + // denied for some users within a class of users, such as user-based + // access control, `PERMISSION_DENIED` must be used. HTTP Mapping: 404 + // Not Found + // "ALREADY_EXISTS" - The entity that a client attempted to create + // (e.g., file or directory) already exists. HTTP Mapping: 409 Conflict + // "PERMISSION_DENIED" - The caller does not have permission to + // execute the specified operation. `PERMISSION_DENIED` must not be used + // for rejections caused by exhausting some resource (use + // `RESOURCE_EXHAUSTED` instead for those errors). `PERMISSION_DENIED` + // must not be used if the caller can not be identified (use + // `UNAUTHENTICATED` instead for those errors). This error code does not + // imply the request is valid or the requested entity exists or + // satisfies other pre-conditions. HTTP Mapping: 403 Forbidden + // "UNAUTHENTICATED" - The request does not have valid authentication + // credentials for the operation. HTTP Mapping: 401 Unauthorized + // "RESOURCE_EXHAUSTED" - Some resource has been exhausted, perhaps a + // per-user quota, or perhaps the entire file system is out of space. + // HTTP Mapping: 429 Too Many Requests + // "FAILED_PRECONDITION" - The operation was rejected because the + // system is not in a state required for the operation's execution. For + // example, the directory to be deleted is non-empty, an rmdir operation + // is applied to a non-directory, etc. Service implementors can use the + // following guidelines to decide between `FAILED_PRECONDITION`, + // `ABORTED`, and `UNAVAILABLE`: (a) Use `UNAVAILABLE` if the client can + // retry just the failing call. (b) Use `ABORTED` if the client should + // retry at a higher level (e.g., when a client-specified test-and-set + // fails, indicating the client should restart a read-modify-write + // sequence). (c) Use `FAILED_PRECONDITION` if the client should not + // retry until the system state has been explicitly fixed. E.g., if an + // "rmdir" fails because the directory is non-empty, + // `FAILED_PRECONDITION` should be returned since the client should not + // retry unless the files are deleted from the directory. HTTP Mapping: + // 400 Bad Request + // "ABORTED" - The operation was aborted, typically due to a + // concurrency issue such as a sequencer check failure or transaction + // abort. See the guidelines above for deciding between + // `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`. HTTP Mapping: + // 409 Conflict + // "OUT_OF_RANGE" - The operation was attempted past the valid range. + // E.g., seeking or reading past end-of-file. Unlike `INVALID_ARGUMENT`, + // this error indicates a problem that may be fixed if the system state + // changes. For example, a 32-bit file system will generate + // `INVALID_ARGUMENT` if asked to read at an offset that is not in the + // range [0,2^32-1], but it will generate `OUT_OF_RANGE` if asked to + // read from an offset past the current file size. There is a fair bit + // of overlap between `FAILED_PRECONDITION` and `OUT_OF_RANGE`. We + // recommend using `OUT_OF_RANGE` (the more specific error) when it + // applies so that callers who are iterating through a space can easily + // look for an `OUT_OF_RANGE` error to detect when they are done. HTTP + // Mapping: 400 Bad Request + // "UNIMPLEMENTED" - The operation is not implemented or is not + // supported/enabled in this service. HTTP Mapping: 501 Not Implemented + // "INTERNAL" - Internal errors. This means that some invariants + // expected by the underlying system have been broken. This error code + // is reserved for serious errors. HTTP Mapping: 500 Internal Server + // Error + // "UNAVAILABLE" - The service is currently unavailable. This is most + // likely a transient condition, which can be corrected by retrying with + // a backoff. Note that it is not always safe to retry non-idempotent + // operations. See the guidelines above for deciding between + // `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`. HTTP Mapping: + // 503 Service Unavailable + // "DATA_LOSS" - Unrecoverable data loss or corruption. HTTP Mapping: + // 500 Internal Server Error + CanonicalCode string `json:"canonicalCode,omitempty"` + + // Code: Machine-friendly representation of the condition Deprecated. + // Use canonical_code instead. // // Possible values: // "UNKNOWN" - UNKNOWN indicates a generic condition. // "GCE_STOCKOUT" - GCE_STOCKOUT indicates that Google Compute Engine - // resources are - // temporarily unavailable. + // resources are temporarily unavailable. // "GKE_SERVICE_ACCOUNT_DELETED" - GKE_SERVICE_ACCOUNT_DELETED - // indicates that the user deleted their robot - // service account. + // indicates that the user deleted their robot service account. // "GCE_QUOTA_EXCEEDED" - Google Compute Engine quota was exceeded. // "SET_BY_OPERATOR" - Cluster state was manually changed by an SRE // due to a system logic error. // "CLOUD_KMS_KEY_ERROR" - Unable to perform an encrypt operation - // against the CloudKMS key used for - // etcd level encryption. - // More codes TBA + // against the CloudKMS key used for etcd level encryption. More codes + // TBA Code string `json:"code,omitempty"` // Message: Human-friendly representation of the condition Message string `json:"message,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to + // ForceSendFields is a list of field names (e.g. "CanonicalCode") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -5312,10 +5141,10 @@ type StatusCondition struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "CanonicalCode") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` @@ -5330,8 +5159,7 @@ func (s *StatusCondition) MarshalJSON() ([]byte, error) { // TimeWindow: Represents an arbitrary window of time. type TimeWindow struct { // EndTime: The time that the window ends. The end time should take - // place after the - // start time. + // place after the start time. EndTime string `json:"endTime,omitempty"` // StartTime: The time that the window first starts. @@ -5398,34 +5226,27 @@ func (s *TpuConfig) MarshalJSON() ([]byte, error) { // UpdateClusterRequest: UpdateClusterRequest updates the settings of a // cluster. type UpdateClusterRequest struct { - // ClusterId: Required. Deprecated. The name of the cluster to - // upgrade. + // ClusterId: Required. Deprecated. The name of the cluster to upgrade. // This field has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` - // Name: The name (project, location, cluster) of the cluster to - // update. + // Name: The name (project, location, cluster) of the cluster to update. // Specified in the format `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // ProjectId: Required. Deprecated. The Google Developers Console - // [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // [project ID or project + // number](https://support.google.com/cloud/answer/6158840). This field + // has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` // Update: Required. A description of the update. Update *ClusterUpdate `json:"update,omitempty"` - // Zone: Required. Deprecated. The name of the Google Compute - // Engine + // Zone: Required. Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -5454,46 +5275,34 @@ func (s *UpdateClusterRequest) MarshalJSON() ([]byte, error) { // UpdateMasterRequest: UpdateMasterRequest updates the master of the // cluster. type UpdateMasterRequest struct { - // ClusterId: Required. Deprecated. The name of the cluster to - // upgrade. + // ClusterId: Required. Deprecated. The name of the cluster to upgrade. // This field has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // MasterVersion: Required. The Kubernetes version to change the master - // to. - // - // Users may specify either explicit versions offered by - // Kubernetes Engine or version aliases, which have the following - // behavior: - // - // - "latest": picks the highest valid Kubernetes version - // - "1.X": picks the highest valid patch+gke.N patch in the 1.X - // version - // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - // - "1.X.Y-gke.N": picks an explicit Kubernetes version - // - "-": picks the default Kubernetes version + // to. Users may specify either explicit versions offered by Kubernetes + // Engine or version aliases, which have the following behavior: - + // "latest": picks the highest valid Kubernetes version - "1.X": picks + // the highest valid patch+gke.N patch in the 1.X version - "1.X.Y": + // picks the highest valid gke.N patch in the 1.X.Y version - + // "1.X.Y-gke.N": picks an explicit Kubernetes version - "-": picks the + // default Kubernetes version MasterVersion string `json:"masterVersion,omitempty"` - // Name: The name (project, location, cluster) of the cluster to - // update. + // Name: The name (project, location, cluster) of the cluster to update. // Specified in the format `projects/*/locations/*/clusters/*`. Name string `json:"name,omitempty"` // ProjectId: Required. Deprecated. The Google Developers Console - // [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // [project ID or project + // number](https://support.google.com/cloud/answer/6158840). This field + // has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` - // Zone: Required. Deprecated. The name of the Google Compute - // Engine + // Zone: Required. Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -5522,59 +5331,51 @@ func (s *UpdateMasterRequest) MarshalJSON() ([]byte, error) { // UpdateNodePoolRequest: SetNodePoolVersionRequest updates the version // of a node pool. type UpdateNodePoolRequest struct { - // ClusterId: Required. Deprecated. The name of the cluster to - // upgrade. + // ClusterId: Required. Deprecated. The name of the cluster to upgrade. // This field has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` // ImageType: Required. The desired image type for the node pool. ImageType string `json:"imageType,omitempty"` - // Locations: The desired list of Google Compute - // Engine + // KubeletConfig: Node kubelet configs. + KubeletConfig *NodeKubeletConfig `json:"kubeletConfig,omitempty"` + + // LinuxNodeConfig: Parameters that can be configured on Linux nodes. + LinuxNodeConfig *LinuxNodeConfig `json:"linuxNodeConfig,omitempty"` + + // Locations: The desired list of Google Compute Engine // [zones](https://cloud.google.com/compute/docs/zones#available) in - // which the - // node pool's nodes should be located. Changing the locations for a - // node pool - // will result in nodes being either created or removed from the node - // pool, - // depending on whether locations are being added or removed. + // which the node pool's nodes should be located. Changing the locations + // for a node pool will result in nodes being either created or removed + // from the node pool, depending on whether locations are being added or + // removed. Locations []string `json:"locations,omitempty"` // Name: The name (project, location, cluster, node pool) of the node - // pool to - // update. Specified in the - // format + // pool to update. Specified in the format // `projects/*/locations/*/clusters/*/nodePools/*`. Name string `json:"name,omitempty"` // NodePoolId: Required. Deprecated. The name of the node pool to - // upgrade. - // This field has been deprecated and replaced by the name field. + // upgrade. This field has been deprecated and replaced by the name + // field. NodePoolId string `json:"nodePoolId,omitempty"` // NodeVersion: Required. The Kubernetes version to change the nodes to - // (typically an - // upgrade). - // - // Users may specify either explicit versions offered by Kubernetes - // Engine or - // version aliases, which have the following behavior: - // - // - "latest": picks the highest valid Kubernetes version - // - "1.X": picks the highest valid patch+gke.N patch in the 1.X - // version - // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - // - "1.X.Y-gke.N": picks an explicit Kubernetes version - // - "-": picks the Kubernetes master version + // (typically an upgrade). Users may specify either explicit versions + // offered by Kubernetes Engine or version aliases, which have the + // following behavior: - "latest": picks the highest valid Kubernetes + // version - "1.X": picks the highest valid patch+gke.N patch in the 1.X + // version - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y + // version - "1.X.Y-gke.N": picks an explicit Kubernetes version - "-": + // picks the Kubernetes master version NodeVersion string `json:"nodeVersion,omitempty"` // ProjectId: Required. Deprecated. The Google Developers Console - // [project ID or - // project - // number](https://support.google.com/cloud/answer/6158840). - // This - // field has been deprecated and replaced by the name field. + // [project ID or project + // number](https://support.google.com/cloud/answer/6158840). This field + // has been deprecated and replaced by the name field. ProjectId string `json:"projectId,omitempty"` // UpgradeSettings: Upgrade settings control disruption and speed of the @@ -5585,13 +5386,10 @@ type UpdateNodePoolRequest struct { // node pool. WorkloadMetadataConfig *WorkloadMetadataConfig `json:"workloadMetadataConfig,omitempty"` - // Zone: Required. Deprecated. The name of the Google Compute - // Engine + // Zone: Required. Deprecated. The name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in - // which the - // cluster resides. This field has been deprecated and replaced by the - // name - // field. + // which the cluster resides. This field has been deprecated and + // replaced by the name field. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterId") to @@ -5617,52 +5415,84 @@ func (s *UpdateNodePoolRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// UpgradeEvent: UpgradeEvent is a notification sent to customers by the +// cluster server when a resource is upgrading. +type UpgradeEvent struct { + // CurrentVersion: Required. The current version before the upgrade. + CurrentVersion string `json:"currentVersion,omitempty"` + + // Operation: Required. The operation associated with this upgrade. + Operation string `json:"operation,omitempty"` + + // OperationStartTime: Required. The time when the operation was + // started. + OperationStartTime string `json:"operationStartTime,omitempty"` + + // Resource: Optional. Optional relative path to the resource. For + // example in node pool upgrades, the relative path of the node pool. + Resource string `json:"resource,omitempty"` + + // ResourceType: Required. The resource type that is upgrading. + // + // Possible values: + // "UPGRADE_RESOURCE_TYPE_UNSPECIFIED" - Default value. This shouldn't + // be used. + // "MASTER" - Master / control plane + // "NODE_POOL" - Node pool + ResourceType string `json:"resourceType,omitempty"` + + // TargetVersion: Required. The target version for the upgrade. + TargetVersion string `json:"targetVersion,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CurrentVersion") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CurrentVersion") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *UpgradeEvent) MarshalJSON() ([]byte, error) { + type NoMethod UpgradeEvent + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // UpgradeSettings: These upgrade settings control the level of -// parallelism and the level of -// disruption caused by an upgrade. -// +// parallelism and the level of disruption caused by an upgrade. // maxUnavailable controls the number of nodes that can be -// simultaneously -// unavailable. -// -// maxSurge controls the number of additional nodes that can be added to -// the -// node pool temporarily for the time of the upgrade to increase the -// number of -// available nodes. -// +// simultaneously unavailable. maxSurge controls the number of +// additional nodes that can be added to the node pool temporarily for +// the time of the upgrade to increase the number of available nodes. // (maxUnavailable + maxSurge) determines the level of parallelism (how -// many -// nodes are being upgraded at the same time). -// -// Note: upgrades inevitably introduce some disruption since workloads -// need to -// be moved from old nodes to new, upgraded ones. Even if -// maxUnavailable=0, -// this holds true. (Disruption stays within the limits -// of -// PodDisruptionBudget, if it is configured.) -// -// Consider a hypothetical node pool with 5 nodes having -// maxSurge=2, -// maxUnavailable=1. This means the upgrade process upgrades 3 -// nodes -// simultaneously. It creates 2 additional (upgraded) nodes, then it -// brings -// down 3 old (not yet upgraded) nodes at the same time. This ensures -// that -// there are always at least 4 nodes available. +// many nodes are being upgraded at the same time). Note: upgrades +// inevitably introduce some disruption since workloads need to be moved +// from old nodes to new, upgraded ones. Even if maxUnavailable=0, this +// holds true. (Disruption stays within the limits of +// PodDisruptionBudget, if it is configured.) Consider a hypothetical +// node pool with 5 nodes having maxSurge=2, maxUnavailable=1. This +// means the upgrade process upgrades 3 nodes simultaneously. It creates +// 2 additional (upgraded) nodes, then it brings down 3 old (not yet +// upgraded) nodes at the same time. This ensures that there are always +// at least 4 nodes available. type UpgradeSettings struct { // MaxSurge: The maximum number of nodes that can be created beyond the - // current size - // of the node pool during the upgrade process. + // current size of the node pool during the upgrade process. MaxSurge int64 `json:"maxSurge,omitempty"` // MaxUnavailable: The maximum number of nodes that can be - // simultaneously unavailable during - // the upgrade process. A node is considered available if its status - // is - // Ready. + // simultaneously unavailable during the upgrade process. A node is + // considered available if its status is Ready. MaxUnavailable int64 `json:"maxUnavailable,omitempty"` // ForceSendFields is a list of field names (e.g. "MaxSurge") to @@ -5689,31 +5519,27 @@ func (s *UpgradeSettings) MarshalJSON() ([]byte, error) { } // UsableSubnetwork: UsableSubnetwork resource returns the subnetwork -// name, its associated network -// and the primary CIDR range. +// name, its associated network and the primary CIDR range. type UsableSubnetwork struct { // IpCidrRange: The range of internal addresses that are owned by this // subnetwork. IpCidrRange string `json:"ipCidrRange,omitempty"` - // Network: Network Name. - // Example: projects/my-project/global/networks/my-network + // Network: Network Name. Example: + // projects/my-project/global/networks/my-network Network string `json:"network,omitempty"` // SecondaryIpRanges: Secondary IP ranges. SecondaryIpRanges []*UsableSubnetworkSecondaryRange `json:"secondaryIpRanges,omitempty"` // StatusMessage: A human readable status message representing the - // reasons for cases where - // the caller cannot use the secondary ranges under the subnet. For - // example if - // the secondary_ip_ranges is empty due to a permission issue, an - // insufficient - // permission message will be given by status_message. + // reasons for cases where the caller cannot use the secondary ranges + // under the subnet. For example if the secondary_ip_ranges is empty due + // to a permission issue, an insufficient permission message will be + // given by status_message. StatusMessage string `json:"statusMessage,omitempty"` - // Subnetwork: Subnetwork Name. - // Example: + // Subnetwork: Subnetwork Name. Example: // projects/my-project/regions/us-central1/subnetworks/my-subnet Subnetwork string `json:"subnetwork,omitempty"` @@ -5748,8 +5574,7 @@ type UsableSubnetworkSecondaryRange struct { IpCidrRange string `json:"ipCidrRange,omitempty"` // RangeName: The name associated with this subnetwork secondary range, - // used when adding - // an alias IP range to a VM instance. + // used when adding an alias IP range to a VM instance. RangeName string `json:"rangeName,omitempty"` // Status: This field is to determine the status of the secondary range @@ -5761,16 +5586,15 @@ type UsableSubnetworkSecondaryRange struct { // "UNUSED" - UNUSED denotes that this range is unclaimed by any // cluster. // "IN_USE_SERVICE" - IN_USE_SERVICE denotes that this range is - // claimed by a cluster for - // services. It cannot be used for other clusters. + // claimed by a cluster for services. It cannot be used for other + // clusters. // "IN_USE_SHAREABLE_POD" - IN_USE_SHAREABLE_POD denotes this range - // was created by the network admin - // and is currently claimed by a cluster for pods. It can only be used - // by - // other clusters as a pod range. + // was created by the network admin and is currently claimed by a + // cluster for pods. It can only be used by other clusters as a pod + // range. // "IN_USE_MANAGED_POD" - IN_USE_MANAGED_POD denotes this range was - // created by GKE and is claimed - // for pods. It cannot be used for other clusters. + // created by GKE and is claimed for pods. It cannot be used for other + // clusters. Status string `json:"status,omitempty"` // ForceSendFields is a list of field names (e.g. "IpCidrRange") to @@ -5797,9 +5621,8 @@ func (s *UsableSubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { } // VerticalPodAutoscaling: VerticalPodAutoscaling contains global, -// per-cluster information -// required by Vertical Pod Autoscaler to automatically adjust -// the resources of pods controlled by it. +// per-cluster information required by Vertical Pod Autoscaler to +// automatically adjust the resources of pods controlled by it. type VerticalPodAutoscaling struct { // Enabled: Enables vertical pod autoscaling. Enabled bool `json:"enabled,omitempty"` @@ -5828,13 +5651,16 @@ func (s *VerticalPodAutoscaling) MarshalJSON() ([]byte, error) { } // WorkloadIdentityConfig: Configuration for the use of Kubernetes -// Service Accounts in GCP IAM -// policies. +// Service Accounts in GCP IAM policies. type WorkloadIdentityConfig struct { // IdentityNamespace: IAM Identity Namespace to attach all Kubernetes // Service Accounts to. IdentityNamespace string `json:"identityNamespace,omitempty"` + // IdentityProvider: identity provider is the third party identity + // provider. + IdentityProvider string `json:"identityProvider,omitempty"` + // WorkloadPool: The workload pool to attach all Kubernetes service // accounts to. WorkloadPool string `json:"workloadPool,omitempty"` @@ -5864,56 +5690,39 @@ func (s *WorkloadIdentityConfig) MarshalJSON() ([]byte, error) { } // WorkloadMetadataConfig: WorkloadMetadataConfig defines the metadata -// configuration to expose to -// workloads on the node pool. +// configuration to expose to workloads on the node pool. type WorkloadMetadataConfig struct { // Mode: Mode is the configuration for how to expose metadata to - // workloads running - // on the node pool. + // workloads running on the node pool. // // Possible values: // "MODE_UNSPECIFIED" - Not set. // "GCE_METADATA" - Expose all Compute Engine metadata to pods. // "GKE_METADATA" - Run the GKE Metadata Server on this node. The GKE - // Metadata Server exposes - // a metadata API to workloads that is compatible with the V1 - // Compute - // Metadata APIs exposed by the Compute Engine and App Engine - // Metadata - // Servers. This feature can only be enabled if Workload Identity is - // enabled - // at the cluster level. + // Metadata Server exposes a metadata API to workloads that is + // compatible with the V1 Compute Metadata APIs exposed by the Compute + // Engine and App Engine Metadata Servers. This feature can only be + // enabled if Workload Identity is enabled at the cluster level. Mode string `json:"mode,omitempty"` // NodeMetadata: NodeMetadata is the configuration for how to expose - // metadata to the - // workloads running on the node. + // metadata to the workloads running on the node. // // Possible values: // "UNSPECIFIED" - Not set. // "SECURE" - Prevent workloads not in hostNetwork from accessing - // certain VM metadata, - // specifically kube-env, which contains Kubelet credentials, and - // the - // instance identity token. - // - // Metadata concealment is a temporary security solution available while - // the - // bootstrapping process for cluster nodes is being redesigned - // with - // significant security improvements. This feature is scheduled to - // be - // deprecated in the future and later removed. + // certain VM metadata, specifically kube-env, which contains Kubelet + // credentials, and the instance identity token. Metadata concealment is + // a temporary security solution available while the bootstrapping + // process for cluster nodes is being redesigned with significant + // security improvements. This feature is scheduled to be deprecated in + // the future and later removed. // "EXPOSE" - Expose all VM metadata to pods. // "GKE_METADATA_SERVER" - Run the GKE Metadata Server on this node. - // The GKE Metadata Server exposes - // a metadata API to workloads that is compatible with the V1 - // Compute - // Metadata APIs exposed by the Compute Engine and App Engine - // Metadata - // Servers. This feature can only be enabled if Workload Identity is - // enabled - // at the cluster level. + // The GKE Metadata Server exposes a metadata API to workloads that is + // compatible with the V1 Compute Metadata APIs exposed by the Compute + // Engine and App Engine Metadata Servers. This feature can only be + // enabled if Workload Identity is enabled at the cluster level. NodeMetadata string `json:"nodeMetadata,omitempty"` // ForceSendFields is a list of field names (e.g. "Mode") to @@ -5959,11 +5768,9 @@ func (r *ProjectsAggregatedUsableSubnetworksService) List(parent string) *Projec } // Filter sets the optional parameter "filter": Filtering currently only -// supports equality on the networkProjectId and must -// be in the form: "networkProjectId=[PROJECTID]", where -// `networkProjectId` -// is the project which owns the listed subnetworks. This defaults to -// the +// supports equality on the networkProjectId and must be in the form: +// "networkProjectId=[PROJECTID]", where `networkProjectId` is the +// project which owns the listed subnetworks. This defaults to the // parent project ID. func (c *ProjectsAggregatedUsableSubnetworksListCall) Filter(filter string) *ProjectsAggregatedUsableSubnetworksListCall { c.urlParams_.Set("filter", filter) @@ -5971,11 +5778,9 @@ func (c *ProjectsAggregatedUsableSubnetworksListCall) Filter(filter string) *Pro } // PageSize sets the optional parameter "pageSize": The max number of -// results per page that should be returned. If the number -// of available results is larger than `page_size`, a `next_page_token` -// is -// returned which can be used to get the next page of results in -// subsequent +// results per page that should be returned. If the number of available +// results is larger than `page_size`, a `next_page_token` is returned +// which can be used to get the next page of results in subsequent // requests. Acceptable values are 0 to 500, inclusive. (Default: 500) func (c *ProjectsAggregatedUsableSubnetworksListCall) PageSize(pageSize int64) *ProjectsAggregatedUsableSubnetworksListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) @@ -5983,8 +5788,8 @@ func (c *ProjectsAggregatedUsableSubnetworksListCall) PageSize(pageSize int64) * } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set this to the nextPageToken returned by -// previous list requests to get the next page of results. +// token to use. Set this to the nextPageToken returned by previous list +// requests to get the next page of results. func (c *ProjectsAggregatedUsableSubnetworksListCall) PageToken(pageToken string) *ProjectsAggregatedUsableSubnetworksListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -6027,7 +5832,7 @@ func (c *ProjectsAggregatedUsableSubnetworksListCall) Header() http.Header { func (c *ProjectsAggregatedUsableSubnetworksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6098,23 +5903,23 @@ func (c *ProjectsAggregatedUsableSubnetworksListCall) Do(opts ...googleapi.CallO // ], // "parameters": { // "filter": { - // "description": "Filtering currently only supports equality on the networkProjectId and must\nbe in the form: \"networkProjectId=[PROJECTID]\", where `networkProjectId`\nis the project which owns the listed subnetworks. This defaults to the\nparent project ID.", + // "description": "Filtering currently only supports equality on the networkProjectId and must be in the form: \"networkProjectId=[PROJECTID]\", where `networkProjectId` is the project which owns the listed subnetworks. This defaults to the parent project ID.", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "The max number of results per page that should be returned. If the number\nof available results is larger than `page_size`, a `next_page_token` is\nreturned which can be used to get the next page of results in subsequent\nrequests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The max number of results per page that should be returned. If the number of available results is larger than `page_size`, a `next_page_token` is returned which can be used to get the next page of results in subsequent requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set this to the nextPageToken returned by\nprevious list requests to get the next page of results.", + // "description": "Specifies a page token to use. Set this to the nextPageToken returned by previous list requests to get the next page of results.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The parent project where subnetworks are usable.\nSpecified in the format `projects/*`.", + // "description": "Required. The parent project where subnetworks are usable. Specified in the format `projects/*`.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -6173,24 +5978,19 @@ func (r *ProjectsLocationsService) GetServerConfig(name string) *ProjectsLocatio } // ProjectId sets the optional parameter "projectId": Required. -// Deprecated. The Google Developers Console [project ID or -// project -// number](https://support.google.com/cloud/answer/6158840). -// This -// field has been deprecated and replaced by the name field. +// Deprecated. The Google Developers Console [project ID or project +// number](https://support.google.com/cloud/answer/6158840). This field +// has been deprecated and replaced by the name field. func (c *ProjectsLocationsGetServerConfigCall) ProjectId(projectId string) *ProjectsLocationsGetServerConfigCall { c.urlParams_.Set("projectId", projectId) return c } // Zone sets the optional parameter "zone": Required. Deprecated. The -// name of the Google Compute -// Engine +// name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) to -// return -// operations for. This field has been deprecated and replaced by the -// name -// field. +// return operations for. This field has been deprecated and replaced by +// the name field. func (c *ProjectsLocationsGetServerConfigCall) Zone(zone string) *ProjectsLocationsGetServerConfigCall { c.urlParams_.Set("zone", zone) return c @@ -6233,7 +6033,7 @@ func (c *ProjectsLocationsGetServerConfigCall) Header() http.Header { func (c *ProjectsLocationsGetServerConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6304,19 +6104,19 @@ func (c *ProjectsLocationsGetServerConfigCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "name": { - // "description": "The name (project and location) of the server config to get,\nspecified in the format `projects/*/locations/*`.", + // "description": "The name (project and location) of the server config to get, specified in the format `projects/*/locations/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) to return\noperations for. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) to return operations for. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // } @@ -6387,7 +6187,7 @@ func (c *ProjectsLocationsListCall) Header() http.Header { func (c *ProjectsLocationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6458,7 +6258,7 @@ func (c *ProjectsLocationsListCall) Do(opts ...googleapi.CallOption) (*ListLocat // ], // "parameters": { // "parent": { - // "description": "Required. Contains the name of the resource requested.\nSpecified in the format `projects/*`.", + // "description": "Required. Contains the name of the resource requested. Specified in the format `projects/*`.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -6522,7 +6322,7 @@ func (c *ProjectsLocationsClustersCompleteIpRotationCall) Header() http.Header { func (c *ProjectsLocationsClustersCompleteIpRotationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6595,7 +6395,7 @@ func (c *ProjectsLocationsClustersCompleteIpRotationCall) Do(opts ...googleapi.C // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster id) of the cluster to complete IP\nrotation. Specified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster id) of the cluster to complete IP rotation. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -6628,24 +6428,14 @@ type ProjectsLocationsClustersCreateCall struct { } // Create: Creates a cluster, consisting of the specified number and -// type of Google -// Compute Engine instances. -// -// By default, the cluster is created in the -// project's -// [default -// network](https://cloud.google.com/compute/docs/netw -// orks-and-firewalls#networks). -// -// One firewall is added for the cluster. After cluster creation, -// the Kubelet creates routes for each node to allow the containers -// on that node to communicate with all other instances in -// the -// cluster. -// -// Finally, an entry is added to the project's global metadata -// indicating -// which CIDR range the cluster is using. +// type of Google Compute Engine instances. By default, the cluster is +// created in the project's [default +// network](https://cloud.google.com/compute/docs/networks-and-firewalls# +// networks). One firewall is added for the cluster. After cluster +// creation, the Kubelet creates routes for each node to allow the +// containers on that node to communicate with all other instances in +// the cluster. Finally, an entry is added to the project's global +// metadata indicating which CIDR range the cluster is using. func (r *ProjectsLocationsClustersService) Create(parent string, createclusterrequest *CreateClusterRequest) *ProjectsLocationsClustersCreateCall { c := &ProjectsLocationsClustersCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -6680,7 +6470,7 @@ func (c *ProjectsLocationsClustersCreateCall) Header() http.Header { func (c *ProjectsLocationsClustersCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6744,7 +6534,7 @@ func (c *ProjectsLocationsClustersCreateCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Creates a cluster, consisting of the specified number and type of Google\nCompute Engine instances.\n\nBy default, the cluster is created in the project's\n[default\nnetwork](https://cloud.google.com/compute/docs/networks-and-firewalls#networks).\n\nOne firewall is added for the cluster. After cluster creation,\nthe Kubelet creates routes for each node to allow the containers\non that node to communicate with all other instances in the\ncluster.\n\nFinally, an entry is added to the project's global metadata indicating\nwhich CIDR range the cluster is using.", + // "description": "Creates a cluster, consisting of the specified number and type of Google Compute Engine instances. By default, the cluster is created in the project's [default network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks). One firewall is added for the cluster. After cluster creation, the Kubelet creates routes for each node to allow the containers on that node to communicate with all other instances in the cluster. Finally, an entry is added to the project's global metadata indicating which CIDR range the cluster is using.", // "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/clusters", // "httpMethod": "POST", // "id": "container.projects.locations.clusters.create", @@ -6753,7 +6543,7 @@ func (c *ProjectsLocationsClustersCreateCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "parent": { - // "description": "The parent (project and location) where the cluster will be created.\nSpecified in the format `projects/*/locations/*`.", + // "description": "The parent (project and location) where the cluster will be created. Specified in the format `projects/*/locations/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -6785,17 +6575,11 @@ type ProjectsLocationsClustersDeleteCall struct { } // Delete: Deletes the cluster, including the Kubernetes endpoint and -// all worker -// nodes. -// -// Firewalls and routes that were configured during cluster creation -// are also deleted. -// -// Other Google Compute Engine resources that might be in use by the -// cluster, -// such as load balancer resources, are not deleted if they weren't -// present -// when the cluster was initially created. +// all worker nodes. Firewalls and routes that were configured during +// cluster creation are also deleted. Other Google Compute Engine +// resources that might be in use by the cluster, such as load balancer +// resources, are not deleted if they weren't present when the cluster +// was initially created. func (r *ProjectsLocationsClustersService) Delete(name string) *ProjectsLocationsClustersDeleteCall { c := &ProjectsLocationsClustersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -6803,32 +6587,27 @@ func (r *ProjectsLocationsClustersService) Delete(name string) *ProjectsLocation } // ClusterId sets the optional parameter "clusterId": Required. -// Deprecated. The name of the cluster to delete. -// This field has been deprecated and replaced by the name field. +// Deprecated. The name of the cluster to delete. This field has been +// deprecated and replaced by the name field. func (c *ProjectsLocationsClustersDeleteCall) ClusterId(clusterId string) *ProjectsLocationsClustersDeleteCall { c.urlParams_.Set("clusterId", clusterId) return c } // ProjectId sets the optional parameter "projectId": Required. -// Deprecated. The Google Developers Console [project ID or -// project -// number](https://support.google.com/cloud/answer/6158840). -// This -// field has been deprecated and replaced by the name field. +// Deprecated. The Google Developers Console [project ID or project +// number](https://support.google.com/cloud/answer/6158840). This field +// has been deprecated and replaced by the name field. func (c *ProjectsLocationsClustersDeleteCall) ProjectId(projectId string) *ProjectsLocationsClustersDeleteCall { c.urlParams_.Set("projectId", projectId) return c } // Zone sets the optional parameter "zone": Required. Deprecated. The -// name of the Google Compute -// Engine +// name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in -// which the -// cluster resides. This field has been deprecated and replaced by the -// name -// field. +// which the cluster resides. This field has been deprecated and +// replaced by the name field. func (c *ProjectsLocationsClustersDeleteCall) Zone(zone string) *ProjectsLocationsClustersDeleteCall { c.urlParams_.Set("zone", zone) return c @@ -6861,7 +6640,7 @@ func (c *ProjectsLocationsClustersDeleteCall) Header() http.Header { func (c *ProjectsLocationsClustersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6920,7 +6699,7 @@ func (c *ProjectsLocationsClustersDeleteCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Deletes the cluster, including the Kubernetes endpoint and all worker\nnodes.\n\nFirewalls and routes that were configured during cluster creation\nare also deleted.\n\nOther Google Compute Engine resources that might be in use by the cluster,\nsuch as load balancer resources, are not deleted if they weren't present\nwhen the cluster was initially created.", + // "description": "Deletes the cluster, including the Kubernetes endpoint and all worker nodes. Firewalls and routes that were configured during cluster creation are also deleted. Other Google Compute Engine resources that might be in use by the cluster, such as load balancer resources, are not deleted if they weren't present when the cluster was initially created.", // "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}", // "httpMethod": "DELETE", // "id": "container.projects.locations.clusters.delete", @@ -6929,24 +6708,24 @@ func (c *ProjectsLocationsClustersDeleteCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster to delete.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the cluster to delete. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "name": { - // "description": "The name (project, location, cluster) of the cluster to delete.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster) of the cluster to delete. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // } @@ -6981,32 +6760,27 @@ func (r *ProjectsLocationsClustersService) Get(name string) *ProjectsLocationsCl } // ClusterId sets the optional parameter "clusterId": Required. -// Deprecated. The name of the cluster to retrieve. -// This field has been deprecated and replaced by the name field. +// Deprecated. The name of the cluster to retrieve. This field has been +// deprecated and replaced by the name field. func (c *ProjectsLocationsClustersGetCall) ClusterId(clusterId string) *ProjectsLocationsClustersGetCall { c.urlParams_.Set("clusterId", clusterId) return c } // ProjectId sets the optional parameter "projectId": Required. -// Deprecated. The Google Developers Console [project ID or -// project -// number](https://support.google.com/cloud/answer/6158840). -// This -// field has been deprecated and replaced by the name field. +// Deprecated. The Google Developers Console [project ID or project +// number](https://support.google.com/cloud/answer/6158840). This field +// has been deprecated and replaced by the name field. func (c *ProjectsLocationsClustersGetCall) ProjectId(projectId string) *ProjectsLocationsClustersGetCall { c.urlParams_.Set("projectId", projectId) return c } // Zone sets the optional parameter "zone": Required. Deprecated. The -// name of the Google Compute -// Engine +// name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in -// which the -// cluster resides. This field has been deprecated and replaced by the -// name -// field. +// which the cluster resides. This field has been deprecated and +// replaced by the name field. func (c *ProjectsLocationsClustersGetCall) Zone(zone string) *ProjectsLocationsClustersGetCall { c.urlParams_.Set("zone", zone) return c @@ -7049,7 +6823,7 @@ func (c *ProjectsLocationsClustersGetCall) Header() http.Header { func (c *ProjectsLocationsClustersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7120,24 +6894,24 @@ func (c *ProjectsLocationsClustersGetCall) Do(opts ...googleapi.CallOption) (*Cl // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster to retrieve.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the cluster to retrieve. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "name": { - // "description": "The name (project, location, cluster) of the cluster to retrieve.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster) of the cluster to retrieve. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // } @@ -7164,12 +6938,9 @@ type ProjectsLocationsClustersGetJwksCall struct { header_ http.Header } -// GetJwks: Gets the public component of the cluster signing keys -// in -// JSON Web Key format. -// This API is not yet intended for general use, and is not available -// for all -// clusters. +// GetJwks: Gets the public component of the cluster signing keys in +// JSON Web Key format. This API is not yet intended for general use, +// and is not available for all clusters. func (r *ProjectsLocationsClustersService) GetJwks(parent string) *ProjectsLocationsClustersGetJwksCall { c := &ProjectsLocationsClustersGetJwksCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -7213,7 +6984,7 @@ func (c *ProjectsLocationsClustersGetJwksCall) Header() http.Header { func (c *ProjectsLocationsClustersGetJwksCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7275,7 +7046,7 @@ func (c *ProjectsLocationsClustersGetJwksCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Gets the public component of the cluster signing keys in\nJSON Web Key format.\nThis API is not yet intended for general use, and is not available for all\nclusters.", + // "description": "Gets the public component of the cluster signing keys in JSON Web Key format. This API is not yet intended for general use, and is not available for all clusters.", // "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/jwks", // "httpMethod": "GET", // "id": "container.projects.locations.clusters.getJwks", @@ -7284,7 +7055,7 @@ func (c *ProjectsLocationsClustersGetJwksCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "parent": { - // "description": "The cluster (project, location, cluster id) to get keys for. Specified in\nthe format `projects/*/locations/*/clusters/*`.", + // "description": "The cluster (project, location, cluster id) to get keys for. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -7311,8 +7082,7 @@ type ProjectsLocationsClustersListCall struct { } // List: Lists all clusters owned by a project in either the specified -// zone or all -// zones. +// zone or all zones. func (r *ProjectsLocationsClustersService) List(parent string) *ProjectsLocationsClustersListCall { c := &ProjectsLocationsClustersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -7320,24 +7090,19 @@ func (r *ProjectsLocationsClustersService) List(parent string) *ProjectsLocation } // ProjectId sets the optional parameter "projectId": Required. -// Deprecated. The Google Developers Console [project ID or -// project -// number](https://support.google.com/cloud/answer/6158840). -// This -// field has been deprecated and replaced by the parent field. +// Deprecated. The Google Developers Console [project ID or project +// number](https://support.google.com/cloud/answer/6158840). This field +// has been deprecated and replaced by the parent field. func (c *ProjectsLocationsClustersListCall) ProjectId(projectId string) *ProjectsLocationsClustersListCall { c.urlParams_.Set("projectId", projectId) return c } // Zone sets the optional parameter "zone": Required. Deprecated. The -// name of the Google Compute -// Engine +// name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in -// which the -// cluster resides, or "-" for all zones. This field has been deprecated -// and -// replaced by the parent field. +// which the cluster resides, or "-" for all zones. This field has been +// deprecated and replaced by the parent field. func (c *ProjectsLocationsClustersListCall) Zone(zone string) *ProjectsLocationsClustersListCall { c.urlParams_.Set("zone", zone) return c @@ -7380,7 +7145,7 @@ func (c *ProjectsLocationsClustersListCall) Header() http.Header { func (c *ProjectsLocationsClustersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7442,7 +7207,7 @@ func (c *ProjectsLocationsClustersListCall) Do(opts ...googleapi.CallOption) (*L } return ret, nil // { - // "description": "Lists all clusters owned by a project in either the specified zone or all\nzones.", + // "description": "Lists all clusters owned by a project in either the specified zone or all zones.", // "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/clusters", // "httpMethod": "GET", // "id": "container.projects.locations.clusters.list", @@ -7451,19 +7216,19 @@ func (c *ProjectsLocationsClustersListCall) Do(opts ...googleapi.CallOption) (*L // ], // "parameters": { // "parent": { - // "description": "The parent (project and location) where the clusters will be listed.\nSpecified in the format `projects/*/locations/*`.\nLocation \"-\" matches all zones and all regions.", + // "description": "The parent (project and location) where the clusters will be listed. Specified in the format `projects/*/locations/*`. Location \"-\" matches all zones and all regions.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the parent field.", // "location": "query", // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides, or \"-\" for all zones. This field has been deprecated and\nreplaced by the parent field.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides, or \"-\" for all zones. This field has been deprecated and replaced by the parent field.", // "location": "query", // "type": "string" // } @@ -7525,7 +7290,7 @@ func (c *ProjectsLocationsClustersSetAddonsCall) Header() http.Header { func (c *ProjectsLocationsClustersSetAddonsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7598,7 +7363,7 @@ func (c *ProjectsLocationsClustersSetAddonsCall) Do(opts ...googleapi.CallOption // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster) of the cluster to set addons.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster) of the cluster to set addons. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -7666,7 +7431,7 @@ func (c *ProjectsLocationsClustersSetLegacyAbacCall) Header() http.Header { func (c *ProjectsLocationsClustersSetLegacyAbacCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7739,7 +7504,7 @@ func (c *ProjectsLocationsClustersSetLegacyAbacCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster id) of the cluster to set legacy abac.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster id) of the cluster to set legacy abac. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -7771,13 +7536,11 @@ type ProjectsLocationsClustersSetLocationsCall struct { header_ http.Header } -// SetLocations: Sets the locations for a specific cluster. -// Deprecated. +// SetLocations: Sets the locations for a specific cluster. Deprecated. // Use -// [projects.locations.clusters.update](https://cloud.google.com/kube -// rnetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters/ -// update) -// instead. +// [projects.locations.clusters.update](https://cloud.google.com/kubernet +// es-engine/docs/reference/rest/v1beta1/projects.locations.clusters/upda +// te) instead. func (r *ProjectsLocationsClustersService) SetLocations(name string, setlocationsrequest *SetLocationsRequest) *ProjectsLocationsClustersSetLocationsCall { c := &ProjectsLocationsClustersSetLocationsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -7812,7 +7575,7 @@ func (c *ProjectsLocationsClustersSetLocationsCall) Header() http.Header { func (c *ProjectsLocationsClustersSetLocationsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7876,7 +7639,7 @@ func (c *ProjectsLocationsClustersSetLocationsCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Sets the locations for a specific cluster.\nDeprecated. Use\n[projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters/update)\ninstead.", + // "description": "Sets the locations for a specific cluster. Deprecated. Use [projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters/update) instead.", // "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setLocations", // "httpMethod": "POST", // "id": "container.projects.locations.clusters.setLocations", @@ -7885,7 +7648,7 @@ func (c *ProjectsLocationsClustersSetLocationsCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster) of the cluster to set locations.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster) of the cluster to set locations. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -7952,7 +7715,7 @@ func (c *ProjectsLocationsClustersSetLoggingCall) Header() http.Header { func (c *ProjectsLocationsClustersSetLoggingCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8025,7 +7788,7 @@ func (c *ProjectsLocationsClustersSetLoggingCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster) of the cluster to set logging.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster) of the cluster to set logging. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -8092,7 +7855,7 @@ func (c *ProjectsLocationsClustersSetMaintenancePolicyCall) Header() http.Header func (c *ProjectsLocationsClustersSetMaintenancePolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8165,7 +7928,7 @@ func (c *ProjectsLocationsClustersSetMaintenancePolicyCall) Do(opts ...googleapi // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster id) of the cluster to set maintenance\npolicy.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster id) of the cluster to set maintenance policy. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -8198,10 +7961,8 @@ type ProjectsLocationsClustersSetMasterAuthCall struct { } // SetMasterAuth: Sets master auth materials. Currently supports -// changing the admin password -// or a specific cluster, either via password generation or explicitly -// setting -// the password. +// changing the admin password or a specific cluster, either via +// password generation or explicitly setting the password. func (r *ProjectsLocationsClustersService) SetMasterAuth(name string, setmasterauthrequest *SetMasterAuthRequest) *ProjectsLocationsClustersSetMasterAuthCall { c := &ProjectsLocationsClustersSetMasterAuthCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -8236,7 +7997,7 @@ func (c *ProjectsLocationsClustersSetMasterAuthCall) Header() http.Header { func (c *ProjectsLocationsClustersSetMasterAuthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8300,7 +8061,7 @@ func (c *ProjectsLocationsClustersSetMasterAuthCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Sets master auth materials. Currently supports changing the admin password\nor a specific cluster, either via password generation or explicitly setting\nthe password.", + // "description": "Sets master auth materials. Currently supports changing the admin password or a specific cluster, either via password generation or explicitly setting the password.", // "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setMasterAuth", // "httpMethod": "POST", // "id": "container.projects.locations.clusters.setMasterAuth", @@ -8309,7 +8070,7 @@ func (c *ProjectsLocationsClustersSetMasterAuthCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster) of the cluster to set auth.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster) of the cluster to set auth. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -8376,7 +8137,7 @@ func (c *ProjectsLocationsClustersSetMonitoringCall) Header() http.Header { func (c *ProjectsLocationsClustersSetMonitoringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8449,7 +8210,7 @@ func (c *ProjectsLocationsClustersSetMonitoringCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster) of the cluster to set monitoring.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster) of the cluster to set monitoring. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -8516,7 +8277,7 @@ func (c *ProjectsLocationsClustersSetNetworkPolicyCall) Header() http.Header { func (c *ProjectsLocationsClustersSetNetworkPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8589,7 +8350,7 @@ func (c *ProjectsLocationsClustersSetNetworkPolicyCall) Do(opts ...googleapi.Cal // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster id) of the cluster to set networking\npolicy. Specified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster id) of the cluster to set networking policy. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -8656,7 +8417,7 @@ func (c *ProjectsLocationsClustersSetResourceLabelsCall) Header() http.Header { func (c *ProjectsLocationsClustersSetResourceLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8729,7 +8490,7 @@ func (c *ProjectsLocationsClustersSetResourceLabelsCall) Do(opts ...googleapi.Ca // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster id) of the cluster to set labels.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster id) of the cluster to set labels. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -8796,7 +8557,7 @@ func (c *ProjectsLocationsClustersStartIpRotationCall) Header() http.Header { func (c *ProjectsLocationsClustersStartIpRotationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8869,7 +8630,7 @@ func (c *ProjectsLocationsClustersStartIpRotationCall) Do(opts ...googleapi.Call // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster id) of the cluster to start IP\nrotation. Specified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster id) of the cluster to start IP rotation. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -8936,7 +8697,7 @@ func (c *ProjectsLocationsClustersUpdateCall) Header() http.Header { func (c *ProjectsLocationsClustersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9009,7 +8770,7 @@ func (c *ProjectsLocationsClustersUpdateCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster) of the cluster to update.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster) of the cluster to update. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -9076,7 +8837,7 @@ func (c *ProjectsLocationsClustersUpdateMasterCall) Header() http.Header { func (c *ProjectsLocationsClustersUpdateMasterCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9149,7 +8910,7 @@ func (c *ProjectsLocationsClustersUpdateMasterCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster) of the cluster to update.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster) of the cluster to update. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -9216,7 +8977,7 @@ func (c *ProjectsLocationsClustersNodePoolsCreateCall) Header() http.Header { func (c *ProjectsLocationsClustersNodePoolsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9289,7 +9050,7 @@ func (c *ProjectsLocationsClustersNodePoolsCreateCall) Do(opts ...googleapi.Call // ], // "parameters": { // "parent": { - // "description": "The parent (project, location, cluster id) where the node pool will be\ncreated. Specified in the format\n`projects/*/locations/*/clusters/*`.", + // "description": "The parent (project, location, cluster id) where the node pool will be created. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -9328,40 +9089,35 @@ func (r *ProjectsLocationsClustersNodePoolsService) Delete(name string) *Project } // ClusterId sets the optional parameter "clusterId": Required. -// Deprecated. The name of the cluster. -// This field has been deprecated and replaced by the name field. +// Deprecated. The name of the cluster. This field has been deprecated +// and replaced by the name field. func (c *ProjectsLocationsClustersNodePoolsDeleteCall) ClusterId(clusterId string) *ProjectsLocationsClustersNodePoolsDeleteCall { c.urlParams_.Set("clusterId", clusterId) return c } // NodePoolId sets the optional parameter "nodePoolId": Required. -// Deprecated. The name of the node pool to delete. -// This field has been deprecated and replaced by the name field. +// Deprecated. The name of the node pool to delete. This field has been +// deprecated and replaced by the name field. func (c *ProjectsLocationsClustersNodePoolsDeleteCall) NodePoolId(nodePoolId string) *ProjectsLocationsClustersNodePoolsDeleteCall { c.urlParams_.Set("nodePoolId", nodePoolId) return c } // ProjectId sets the optional parameter "projectId": Required. -// Deprecated. The Google Developers Console [project ID or -// project -// number](https://developers.google.com/console/help/new/#projec -// tnumber). -// This field has been deprecated and replaced by the name field. +// Deprecated. The Google Developers Console [project ID or project +// number](https://developers.google.com/console/help/new/#projectnumber) +// . This field has been deprecated and replaced by the name field. func (c *ProjectsLocationsClustersNodePoolsDeleteCall) ProjectId(projectId string) *ProjectsLocationsClustersNodePoolsDeleteCall { c.urlParams_.Set("projectId", projectId) return c } // Zone sets the optional parameter "zone": Required. Deprecated. The -// name of the Google Compute -// Engine +// name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in -// which the -// cluster resides. This field has been deprecated and replaced by the -// name -// field. +// which the cluster resides. This field has been deprecated and +// replaced by the name field. func (c *ProjectsLocationsClustersNodePoolsDeleteCall) Zone(zone string) *ProjectsLocationsClustersNodePoolsDeleteCall { c.urlParams_.Set("zone", zone) return c @@ -9394,7 +9150,7 @@ func (c *ProjectsLocationsClustersNodePoolsDeleteCall) Header() http.Header { func (c *ProjectsLocationsClustersNodePoolsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9462,29 +9218,29 @@ func (c *ProjectsLocationsClustersNodePoolsDeleteCall) Do(opts ...googleapi.Call // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "name": { - // "description": "The name (project, location, cluster, node pool id) of the node pool to\ndelete. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + // "description": "The name (project, location, cluster, node pool id) of the node pool to delete. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", // "required": true, // "type": "string" // }, // "nodePoolId": { - // "description": "Required. Deprecated. The name of the node pool to delete.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the node pool to delete. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // } @@ -9519,40 +9275,35 @@ func (r *ProjectsLocationsClustersNodePoolsService) Get(name string) *ProjectsLo } // ClusterId sets the optional parameter "clusterId": Required. -// Deprecated. The name of the cluster. -// This field has been deprecated and replaced by the name field. +// Deprecated. The name of the cluster. This field has been deprecated +// and replaced by the name field. func (c *ProjectsLocationsClustersNodePoolsGetCall) ClusterId(clusterId string) *ProjectsLocationsClustersNodePoolsGetCall { c.urlParams_.Set("clusterId", clusterId) return c } // NodePoolId sets the optional parameter "nodePoolId": Required. -// Deprecated. The name of the node pool. -// This field has been deprecated and replaced by the name field. +// Deprecated. The name of the node pool. This field has been deprecated +// and replaced by the name field. func (c *ProjectsLocationsClustersNodePoolsGetCall) NodePoolId(nodePoolId string) *ProjectsLocationsClustersNodePoolsGetCall { c.urlParams_.Set("nodePoolId", nodePoolId) return c } // ProjectId sets the optional parameter "projectId": Required. -// Deprecated. The Google Developers Console [project ID or -// project -// number](https://developers.google.com/console/help/new/#projec -// tnumber). -// This field has been deprecated and replaced by the name field. +// Deprecated. The Google Developers Console [project ID or project +// number](https://developers.google.com/console/help/new/#projectnumber) +// . This field has been deprecated and replaced by the name field. func (c *ProjectsLocationsClustersNodePoolsGetCall) ProjectId(projectId string) *ProjectsLocationsClustersNodePoolsGetCall { c.urlParams_.Set("projectId", projectId) return c } // Zone sets the optional parameter "zone": Required. Deprecated. The -// name of the Google Compute -// Engine +// name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in -// which the -// cluster resides. This field has been deprecated and replaced by the -// name -// field. +// which the cluster resides. This field has been deprecated and +// replaced by the name field. func (c *ProjectsLocationsClustersNodePoolsGetCall) Zone(zone string) *ProjectsLocationsClustersNodePoolsGetCall { c.urlParams_.Set("zone", zone) return c @@ -9595,7 +9346,7 @@ func (c *ProjectsLocationsClustersNodePoolsGetCall) Header() http.Header { func (c *ProjectsLocationsClustersNodePoolsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9666,29 +9417,29 @@ func (c *ProjectsLocationsClustersNodePoolsGetCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "name": { - // "description": "The name (project, location, cluster, node pool id) of the node pool to\nget. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + // "description": "The name (project, location, cluster, node pool id) of the node pool to get. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", // "required": true, // "type": "string" // }, // "nodePoolId": { - // "description": "Required. Deprecated. The name of the node pool.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the node pool. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // } @@ -9723,32 +9474,27 @@ func (r *ProjectsLocationsClustersNodePoolsService) List(parent string) *Project } // ClusterId sets the optional parameter "clusterId": Required. -// Deprecated. The name of the cluster. -// This field has been deprecated and replaced by the parent field. +// Deprecated. The name of the cluster. This field has been deprecated +// and replaced by the parent field. func (c *ProjectsLocationsClustersNodePoolsListCall) ClusterId(clusterId string) *ProjectsLocationsClustersNodePoolsListCall { c.urlParams_.Set("clusterId", clusterId) return c } // ProjectId sets the optional parameter "projectId": Required. -// Deprecated. The Google Developers Console [project ID or -// project -// number](https://developers.google.com/console/help/new/#projec -// tnumber). -// This field has been deprecated and replaced by the parent field. +// Deprecated. The Google Developers Console [project ID or project +// number](https://developers.google.com/console/help/new/#projectnumber) +// . This field has been deprecated and replaced by the parent field. func (c *ProjectsLocationsClustersNodePoolsListCall) ProjectId(projectId string) *ProjectsLocationsClustersNodePoolsListCall { c.urlParams_.Set("projectId", projectId) return c } // Zone sets the optional parameter "zone": Required. Deprecated. The -// name of the Google Compute -// Engine +// name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in -// which the -// cluster resides. This field has been deprecated and replaced by the -// parent -// field. +// which the cluster resides. This field has been deprecated and +// replaced by the parent field. func (c *ProjectsLocationsClustersNodePoolsListCall) Zone(zone string) *ProjectsLocationsClustersNodePoolsListCall { c.urlParams_.Set("zone", zone) return c @@ -9791,7 +9537,7 @@ func (c *ProjectsLocationsClustersNodePoolsListCall) Header() http.Header { func (c *ProjectsLocationsClustersNodePoolsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9862,24 +9608,24 @@ func (c *ProjectsLocationsClustersNodePoolsListCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the parent field.", + // "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the parent field.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "The parent (project, location, cluster id) where the node pools will be\nlisted. Specified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The parent (project, location, cluster id) where the node pools will be listed. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the parent field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the parent field.", // "location": "query", // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the parent\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the parent field.", // "location": "query", // "type": "string" // } @@ -9906,8 +9652,7 @@ type ProjectsLocationsClustersNodePoolsRollbackCall struct { header_ http.Header } -// Rollback: Rolls back a previously Aborted or Failed NodePool -// upgrade. +// Rollback: Rolls back a previously Aborted or Failed NodePool upgrade. // This makes no changes if the last upgrade successfully completed. func (r *ProjectsLocationsClustersNodePoolsService) Rollback(name string, rollbacknodepoolupgraderequest *RollbackNodePoolUpgradeRequest) *ProjectsLocationsClustersNodePoolsRollbackCall { c := &ProjectsLocationsClustersNodePoolsRollbackCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -9943,7 +9688,7 @@ func (c *ProjectsLocationsClustersNodePoolsRollbackCall) Header() http.Header { func (c *ProjectsLocationsClustersNodePoolsRollbackCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10007,7 +9752,7 @@ func (c *ProjectsLocationsClustersNodePoolsRollbackCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Rolls back a previously Aborted or Failed NodePool upgrade.\nThis makes no changes if the last upgrade successfully completed.", + // "description": "Rolls back a previously Aborted or Failed NodePool upgrade. This makes no changes if the last upgrade successfully completed.", // "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}:rollback", // "httpMethod": "POST", // "id": "container.projects.locations.clusters.nodePools.rollback", @@ -10016,7 +9761,7 @@ func (c *ProjectsLocationsClustersNodePoolsRollbackCall) Do(opts ...googleapi.Ca // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster, node pool id) of the node poll to\nrollback upgrade.\nSpecified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", + // "description": "The name (project, location, cluster, node pool id) of the node poll to rollback upgrade. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", // "required": true, @@ -10084,7 +9829,7 @@ func (c *ProjectsLocationsClustersNodePoolsSetAutoscalingCall) Header() http.Hea func (c *ProjectsLocationsClustersNodePoolsSetAutoscalingCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10157,7 +9902,7 @@ func (c *ProjectsLocationsClustersNodePoolsSetAutoscalingCall) Do(opts ...google // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster, node pool) of the node pool to set\nautoscaler settings. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + // "description": "The name (project, location, cluster, node pool) of the node pool to set autoscaler settings. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", // "required": true, @@ -10224,7 +9969,7 @@ func (c *ProjectsLocationsClustersNodePoolsSetManagementCall) Header() http.Head func (c *ProjectsLocationsClustersNodePoolsSetManagementCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10297,7 +10042,7 @@ func (c *ProjectsLocationsClustersNodePoolsSetManagementCall) Do(opts ...googlea // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster, node pool id) of the node pool to set\nmanagement properties. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + // "description": "The name (project, location, cluster, node pool id) of the node pool to set management properties. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", // "required": true, @@ -10364,7 +10109,7 @@ func (c *ProjectsLocationsClustersNodePoolsSetSizeCall) Header() http.Header { func (c *ProjectsLocationsClustersNodePoolsSetSizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10437,7 +10182,7 @@ func (c *ProjectsLocationsClustersNodePoolsSetSizeCall) Do(opts ...googleapi.Cal // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster, node pool id) of the node pool to set\nsize.\nSpecified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", + // "description": "The name (project, location, cluster, node pool id) of the node pool to set size. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", // "required": true, @@ -10505,7 +10250,7 @@ func (c *ProjectsLocationsClustersNodePoolsUpdateCall) Header() http.Header { func (c *ProjectsLocationsClustersNodePoolsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10578,7 +10323,7 @@ func (c *ProjectsLocationsClustersNodePoolsUpdateCall) Do(opts ...googleapi.Call // ], // "parameters": { // "name": { - // "description": "The name (project, location, cluster, node pool) of the node pool to\nupdate. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + // "description": "The name (project, location, cluster, node pool) of the node pool to update. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", // "required": true, @@ -10611,16 +10356,10 @@ type ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall struct { } // GetOpenidConfiguration: Gets the OIDC discovery document for the -// cluster. -// See the -// [OpenID Connect Discovery -// 1.0 -// specification](https://openid.net/specs/openid-connect-discovery-1 -// _0.html) -// for details. -// This API is not yet intended for general use, and is not available -// for all -// clusters. +// cluster. See the [OpenID Connect Discovery 1.0 +// specification](https://openid.net/specs/openid-connect-discovery-1_0.h +// tml) for details. This API is not yet intended for general use, and +// is not available for all clusters. func (r *ProjectsLocationsClustersWellKnownService) GetOpenidConfiguration(parent string) *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall { c := &ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -10664,7 +10403,7 @@ func (c *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall) Header() func (c *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10726,7 +10465,7 @@ func (c *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall) Do(opts . } return ret, nil // { - // "description": "Gets the OIDC discovery document for the cluster.\nSee the\n[OpenID Connect Discovery 1.0\nspecification](https://openid.net/specs/openid-connect-discovery-1_0.html)\nfor details.\nThis API is not yet intended for general use, and is not available for all\nclusters.", + // "description": "Gets the OIDC discovery document for the cluster. See the [OpenID Connect Discovery 1.0 specification](https://openid.net/specs/openid-connect-discovery-1_0.html) for details. This API is not yet intended for general use, and is not available for all clusters.", // "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/.well-known/openid-configuration", // "httpMethod": "GET", // "id": "container.projects.locations.clusters.well-known.getOpenid-configuration", @@ -10735,7 +10474,7 @@ func (c *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall) Do(opts . // ], // "parameters": { // "parent": { - // "description": "The cluster (project, location, cluster id) to get the discovery document\nfor. Specified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The cluster (project, location, cluster id) to get the discovery document for. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", // "required": true, @@ -10796,7 +10535,7 @@ func (c *ProjectsLocationsOperationsCancelCall) Header() http.Header { func (c *ProjectsLocationsOperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10869,7 +10608,7 @@ func (c *ProjectsLocationsOperationsCancelCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "name": { - // "description": "The name (project, location, operation id) of the operation to cancel.\nSpecified in the format `projects/*/locations/*/operations/*`.", + // "description": "The name (project, location, operation id) of the operation to cancel. Specified in the format `projects/*/locations/*/operations/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", // "required": true, @@ -10909,32 +10648,27 @@ func (r *ProjectsLocationsOperationsService) Get(name string) *ProjectsLocations } // OperationId sets the optional parameter "operationId": Required. -// Deprecated. The server-assigned `name` of the operation. -// This field has been deprecated and replaced by the name field. +// Deprecated. The server-assigned `name` of the operation. This field +// has been deprecated and replaced by the name field. func (c *ProjectsLocationsOperationsGetCall) OperationId(operationId string) *ProjectsLocationsOperationsGetCall { c.urlParams_.Set("operationId", operationId) return c } // ProjectId sets the optional parameter "projectId": Required. -// Deprecated. The Google Developers Console [project ID or -// project -// number](https://support.google.com/cloud/answer/6158840). -// This -// field has been deprecated and replaced by the name field. +// Deprecated. The Google Developers Console [project ID or project +// number](https://support.google.com/cloud/answer/6158840). This field +// has been deprecated and replaced by the name field. func (c *ProjectsLocationsOperationsGetCall) ProjectId(projectId string) *ProjectsLocationsOperationsGetCall { c.urlParams_.Set("projectId", projectId) return c } // Zone sets the optional parameter "zone": Required. Deprecated. The -// name of the Google Compute -// Engine +// name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) in -// which the -// cluster resides. This field has been deprecated and replaced by the -// name -// field. +// which the cluster resides. This field has been deprecated and +// replaced by the name field. func (c *ProjectsLocationsOperationsGetCall) Zone(zone string) *ProjectsLocationsOperationsGetCall { c.urlParams_.Set("zone", zone) return c @@ -10977,7 +10711,7 @@ func (c *ProjectsLocationsOperationsGetCall) Header() http.Header { func (c *ProjectsLocationsOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11048,24 +10782,24 @@ func (c *ProjectsLocationsOperationsGetCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "name": { - // "description": "The name (project, location, operation id) of the operation to get.\nSpecified in the format `projects/*/locations/*/operations/*`.", + // "description": "The name (project, location, operation id) of the operation to get. Specified in the format `projects/*/locations/*/operations/*`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", // "required": true, // "type": "string" // }, // "operationId": { - // "description": "Required. Deprecated. The server-assigned `name` of the operation.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The server-assigned `name` of the operation. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // } @@ -11101,24 +10835,19 @@ func (r *ProjectsLocationsOperationsService) List(parent string) *ProjectsLocati } // ProjectId sets the optional parameter "projectId": Required. -// Deprecated. The Google Developers Console [project ID or -// project -// number](https://support.google.com/cloud/answer/6158840). -// This -// field has been deprecated and replaced by the parent field. +// Deprecated. The Google Developers Console [project ID or project +// number](https://support.google.com/cloud/answer/6158840). This field +// has been deprecated and replaced by the parent field. func (c *ProjectsLocationsOperationsListCall) ProjectId(projectId string) *ProjectsLocationsOperationsListCall { c.urlParams_.Set("projectId", projectId) return c } // Zone sets the optional parameter "zone": Required. Deprecated. The -// name of the Google Compute -// Engine +// name of the Google Compute Engine // [zone](https://cloud.google.com/compute/docs/zones#available) to -// return -// operations for, or `-` for all zones. This field has been deprecated -// and -// replaced by the parent field. +// return operations for, or `-` for all zones. This field has been +// deprecated and replaced by the parent field. func (c *ProjectsLocationsOperationsListCall) Zone(zone string) *ProjectsLocationsOperationsListCall { c.urlParams_.Set("zone", zone) return c @@ -11161,7 +10890,7 @@ func (c *ProjectsLocationsOperationsListCall) Header() http.Header { func (c *ProjectsLocationsOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11232,19 +10961,19 @@ func (c *ProjectsLocationsOperationsListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "parent": { - // "description": "The parent (project and location) where the operations will be listed.\nSpecified in the format `projects/*/locations/*`.\nLocation \"-\" matches all zones and all regions.", + // "description": "The parent (project and location) where the operations will be listed. Specified in the format `projects/*/locations/*`. Location \"-\" matches all zones and all regions.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the parent field.", // "location": "query", // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) to return\noperations for, or `-` for all zones. This field has been deprecated and\nreplaced by the parent field.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) to return operations for, or `-` for all zones. This field has been deprecated and replaced by the parent field.", // "location": "query", // "type": "string" // } @@ -11282,8 +11011,8 @@ func (r *ProjectsZonesService) GetServerconfig(projectId string, zone string) *P } // Name sets the optional parameter "name": The name (project and -// location) of the server config to get, -// specified in the format `projects/*/locations/*`. +// location) of the server config to get, specified in the format +// `projects/*/locations/*`. func (c *ProjectsZonesGetServerconfigCall) Name(name string) *ProjectsZonesGetServerconfigCall { c.urlParams_.Set("name", name) return c @@ -11326,7 +11055,7 @@ func (c *ProjectsZonesGetServerconfigCall) Header() http.Header { func (c *ProjectsZonesGetServerconfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11399,18 +11128,18 @@ func (c *ProjectsZonesGetServerconfigCall) Do(opts ...googleapi.CallOption) (*Se // ], // "parameters": { // "name": { - // "description": "The name (project and location) of the server config to get,\nspecified in the format `projects/*/locations/*`.", + // "description": "The name (project and location) of the server config to get, specified in the format `projects/*/locations/*`.", // "location": "query", // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) to return\noperations for. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) to return operations for. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -11477,7 +11206,7 @@ func (c *ProjectsZonesClustersAddonsCall) Header() http.Header { func (c *ProjectsZonesClustersAddonsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11554,19 +11283,19 @@ func (c *ProjectsZonesClustersAddonsCall) Do(opts ...googleapi.CallOption) (*Ope // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -11636,7 +11365,7 @@ func (c *ProjectsZonesClustersCompleteIpRotationCall) Header() http.Header { func (c *ProjectsZonesClustersCompleteIpRotationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11713,19 +11442,19 @@ func (c *ProjectsZonesClustersCompleteIpRotationCall) Do(opts ...googleapi.CallO // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -11758,24 +11487,14 @@ type ProjectsZonesClustersCreateCall struct { } // Create: Creates a cluster, consisting of the specified number and -// type of Google -// Compute Engine instances. -// -// By default, the cluster is created in the -// project's -// [default -// network](https://cloud.google.com/compute/docs/netw -// orks-and-firewalls#networks). -// -// One firewall is added for the cluster. After cluster creation, -// the Kubelet creates routes for each node to allow the containers -// on that node to communicate with all other instances in -// the -// cluster. -// -// Finally, an entry is added to the project's global metadata -// indicating -// which CIDR range the cluster is using. +// type of Google Compute Engine instances. By default, the cluster is +// created in the project's [default +// network](https://cloud.google.com/compute/docs/networks-and-firewalls# +// networks). One firewall is added for the cluster. After cluster +// creation, the Kubelet creates routes for each node to allow the +// containers on that node to communicate with all other instances in +// the cluster. Finally, an entry is added to the project's global +// metadata indicating which CIDR range the cluster is using. func (r *ProjectsZonesClustersService) Create(projectId string, zone string, createclusterrequest *CreateClusterRequest) *ProjectsZonesClustersCreateCall { c := &ProjectsZonesClustersCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -11811,7 +11530,7 @@ func (c *ProjectsZonesClustersCreateCall) Header() http.Header { func (c *ProjectsZonesClustersCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11876,7 +11595,7 @@ func (c *ProjectsZonesClustersCreateCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a cluster, consisting of the specified number and type of Google\nCompute Engine instances.\n\nBy default, the cluster is created in the project's\n[default\nnetwork](https://cloud.google.com/compute/docs/networks-and-firewalls#networks).\n\nOne firewall is added for the cluster. After cluster creation,\nthe Kubelet creates routes for each node to allow the containers\non that node to communicate with all other instances in the\ncluster.\n\nFinally, an entry is added to the project's global metadata indicating\nwhich CIDR range the cluster is using.", + // "description": "Creates a cluster, consisting of the specified number and type of Google Compute Engine instances. By default, the cluster is created in the project's [default network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks). One firewall is added for the cluster. After cluster creation, the Kubelet creates routes for each node to allow the containers on that node to communicate with all other instances in the cluster. Finally, an entry is added to the project's global metadata indicating which CIDR range the cluster is using.", // "flatPath": "v1beta1/projects/{projectId}/zones/{zone}/clusters", // "httpMethod": "POST", // "id": "container.projects.zones.clusters.create", @@ -11886,13 +11605,13 @@ func (c *ProjectsZonesClustersCreateCall) Do(opts ...googleapi.CallOption) (*Ope // ], // "parameters": { // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the parent\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" @@ -11925,17 +11644,11 @@ type ProjectsZonesClustersDeleteCall struct { } // Delete: Deletes the cluster, including the Kubernetes endpoint and -// all worker -// nodes. -// -// Firewalls and routes that were configured during cluster creation -// are also deleted. -// -// Other Google Compute Engine resources that might be in use by the -// cluster, -// such as load balancer resources, are not deleted if they weren't -// present -// when the cluster was initially created. +// all worker nodes. Firewalls and routes that were configured during +// cluster creation are also deleted. Other Google Compute Engine +// resources that might be in use by the cluster, such as load balancer +// resources, are not deleted if they weren't present when the cluster +// was initially created. func (r *ProjectsZonesClustersService) Delete(projectId string, zone string, clusterId string) *ProjectsZonesClustersDeleteCall { c := &ProjectsZonesClustersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -11945,8 +11658,8 @@ func (r *ProjectsZonesClustersService) Delete(projectId string, zone string, clu } // Name sets the optional parameter "name": The name (project, location, -// cluster) of the cluster to delete. -// Specified in the format `projects/*/locations/*/clusters/*`. +// cluster) of the cluster to delete. Specified in the format +// `projects/*/locations/*/clusters/*`. func (c *ProjectsZonesClustersDeleteCall) Name(name string) *ProjectsZonesClustersDeleteCall { c.urlParams_.Set("name", name) return c @@ -11979,7 +11692,7 @@ func (c *ProjectsZonesClustersDeleteCall) Header() http.Header { func (c *ProjectsZonesClustersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12040,7 +11753,7 @@ func (c *ProjectsZonesClustersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the cluster, including the Kubernetes endpoint and all worker\nnodes.\n\nFirewalls and routes that were configured during cluster creation\nare also deleted.\n\nOther Google Compute Engine resources that might be in use by the cluster,\nsuch as load balancer resources, are not deleted if they weren't present\nwhen the cluster was initially created.", + // "description": "Deletes the cluster, including the Kubernetes endpoint and all worker nodes. Firewalls and routes that were configured during cluster creation are also deleted. Other Google Compute Engine resources that might be in use by the cluster, such as load balancer resources, are not deleted if they weren't present when the cluster was initially created.", // "flatPath": "v1beta1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", // "httpMethod": "DELETE", // "id": "container.projects.zones.clusters.delete", @@ -12051,24 +11764,24 @@ func (c *ProjectsZonesClustersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster to delete.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the cluster to delete. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "name": { - // "description": "The name (project, location, cluster) of the cluster to delete.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster) of the cluster to delete. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "query", // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -12108,8 +11821,8 @@ func (r *ProjectsZonesClustersService) Get(projectId string, zone string, cluste } // Name sets the optional parameter "name": The name (project, location, -// cluster) of the cluster to retrieve. -// Specified in the format `projects/*/locations/*/clusters/*`. +// cluster) of the cluster to retrieve. Specified in the format +// `projects/*/locations/*/clusters/*`. func (c *ProjectsZonesClustersGetCall) Name(name string) *ProjectsZonesClustersGetCall { c.urlParams_.Set("name", name) return c @@ -12152,7 +11865,7 @@ func (c *ProjectsZonesClustersGetCall) Header() http.Header { func (c *ProjectsZonesClustersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12227,24 +11940,24 @@ func (c *ProjectsZonesClustersGetCall) Do(opts ...googleapi.CallOption) (*Cluste // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster to retrieve.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the cluster to retrieve. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "name": { - // "description": "The name (project, location, cluster) of the cluster to retrieve.\nSpecified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The name (project, location, cluster) of the cluster to retrieve. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "query", // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -12312,7 +12025,7 @@ func (c *ProjectsZonesClustersLegacyAbacCall) Header() http.Header { func (c *ProjectsZonesClustersLegacyAbacCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12389,19 +12102,19 @@ func (c *ProjectsZonesClustersLegacyAbacCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the cluster to update. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -12434,8 +12147,7 @@ type ProjectsZonesClustersListCall struct { } // List: Lists all clusters owned by a project in either the specified -// zone or all -// zones. +// zone or all zones. func (r *ProjectsZonesClustersService) List(projectId string, zone string) *ProjectsZonesClustersListCall { c := &ProjectsZonesClustersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -12444,9 +12156,9 @@ func (r *ProjectsZonesClustersService) List(projectId string, zone string) *Proj } // Parent sets the optional parameter "parent": The parent (project and -// location) where the clusters will be listed. -// Specified in the format `projects/*/locations/*`. -// Location "-" matches all zones and all regions. +// location) where the clusters will be listed. Specified in the format +// `projects/*/locations/*`. Location "-" matches all zones and all +// regions. func (c *ProjectsZonesClustersListCall) Parent(parent string) *ProjectsZonesClustersListCall { c.urlParams_.Set("parent", parent) return c @@ -12489,7 +12201,7 @@ func (c *ProjectsZonesClustersListCall) Header() http.Header { func (c *ProjectsZonesClustersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12552,7 +12264,7 @@ func (c *ProjectsZonesClustersListCall) Do(opts ...googleapi.CallOption) (*ListC } return ret, nil // { - // "description": "Lists all clusters owned by a project in either the specified zone or all\nzones.", + // "description": "Lists all clusters owned by a project in either the specified zone or all zones.", // "flatPath": "v1beta1/projects/{projectId}/zones/{zone}/clusters", // "httpMethod": "GET", // "id": "container.projects.zones.clusters.list", @@ -12562,18 +12274,18 @@ func (c *ProjectsZonesClustersListCall) Do(opts ...googleapi.CallOption) (*ListC // ], // "parameters": { // "parent": { - // "description": "The parent (project and location) where the clusters will be listed.\nSpecified in the format `projects/*/locations/*`.\nLocation \"-\" matches all zones and all regions.", + // "description": "The parent (project and location) where the clusters will be listed. Specified in the format `projects/*/locations/*`. Location \"-\" matches all zones and all regions.", // "location": "query", // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides, or \"-\" for all zones. This field has been deprecated and\nreplaced by the parent field.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides, or \"-\" for all zones. This field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" @@ -12603,13 +12315,10 @@ type ProjectsZonesClustersLocationsCall struct { header_ http.Header } -// Locations: Sets the locations for a specific cluster. -// Deprecated. -// Use -// [projects.locations.clusters.update](https://cloud.google.com/kube -// rnetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters/ -// update) -// instead. +// Locations: Sets the locations for a specific cluster. Deprecated. Use +// [projects.locations.clusters.update](https://cloud.google.com/kubernet +// es-engine/docs/reference/rest/v1beta1/projects.locations.clusters/upda +// te) instead. func (r *ProjectsZonesClustersService) Locations(projectId string, zone string, clusterId string, setlocationsrequest *SetLocationsRequest) *ProjectsZonesClustersLocationsCall { c := &ProjectsZonesClustersLocationsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -12646,7 +12355,7 @@ func (c *ProjectsZonesClustersLocationsCall) Header() http.Header { func (c *ProjectsZonesClustersLocationsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12712,7 +12421,7 @@ func (c *ProjectsZonesClustersLocationsCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Sets the locations for a specific cluster.\nDeprecated. Use\n[projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters/update)\ninstead.", + // "description": "Sets the locations for a specific cluster. Deprecated. Use [projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters/update) instead.", // "flatPath": "v1beta1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/locations", // "httpMethod": "POST", // "id": "container.projects.zones.clusters.locations", @@ -12723,19 +12432,19 @@ func (c *ProjectsZonesClustersLocationsCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -12805,7 +12514,7 @@ func (c *ProjectsZonesClustersLoggingCall) Header() http.Header { func (c *ProjectsZonesClustersLoggingCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12882,19 +12591,19 @@ func (c *ProjectsZonesClustersLoggingCall) Do(opts ...googleapi.CallOption) (*Op // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -12964,7 +12673,7 @@ func (c *ProjectsZonesClustersMasterCall) Header() http.Header { func (c *ProjectsZonesClustersMasterCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13041,19 +12750,19 @@ func (c *ProjectsZonesClustersMasterCall) Do(opts ...googleapi.CallOption) (*Ope // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -13123,7 +12832,7 @@ func (c *ProjectsZonesClustersMonitoringCall) Header() http.Header { func (c *ProjectsZonesClustersMonitoringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13200,19 +12909,19 @@ func (c *ProjectsZonesClustersMonitoringCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -13282,7 +12991,7 @@ func (c *ProjectsZonesClustersResourceLabelsCall) Header() http.Header { func (c *ProjectsZonesClustersResourceLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13359,19 +13068,19 @@ func (c *ProjectsZonesClustersResourceLabelsCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -13441,7 +13150,7 @@ func (c *ProjectsZonesClustersSetMaintenancePolicyCall) Header() http.Header { func (c *ProjectsZonesClustersSetMaintenancePolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13524,13 +13233,13 @@ func (c *ProjectsZonesClustersSetMaintenancePolicyCall) Do(opts ...googleapi.Cal // "type": "string" // }, // "projectId": { - // "description": "Required. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + // "description": "Required. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides.", + // "description": "Required. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides.", // "location": "path", // "required": true, // "type": "string" @@ -13564,10 +13273,8 @@ type ProjectsZonesClustersSetMasterAuthCall struct { } // SetMasterAuth: Sets master auth materials. Currently supports -// changing the admin password -// or a specific cluster, either via password generation or explicitly -// setting -// the password. +// changing the admin password or a specific cluster, either via +// password generation or explicitly setting the password. func (r *ProjectsZonesClustersService) SetMasterAuth(projectId string, zone string, clusterId string, setmasterauthrequest *SetMasterAuthRequest) *ProjectsZonesClustersSetMasterAuthCall { c := &ProjectsZonesClustersSetMasterAuthCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -13604,7 +13311,7 @@ func (c *ProjectsZonesClustersSetMasterAuthCall) Header() http.Header { func (c *ProjectsZonesClustersSetMasterAuthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13670,7 +13377,7 @@ func (c *ProjectsZonesClustersSetMasterAuthCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Sets master auth materials. Currently supports changing the admin password\nor a specific cluster, either via password generation or explicitly setting\nthe password.", + // "description": "Sets master auth materials. Currently supports changing the admin password or a specific cluster, either via password generation or explicitly setting the password.", // "flatPath": "v1beta1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setMasterAuth", // "httpMethod": "POST", // "id": "container.projects.zones.clusters.setMasterAuth", @@ -13681,19 +13388,19 @@ func (c *ProjectsZonesClustersSetMasterAuthCall) Do(opts ...googleapi.CallOption // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -13763,7 +13470,7 @@ func (c *ProjectsZonesClustersSetNetworkPolicyCall) Header() http.Header { func (c *ProjectsZonesClustersSetNetworkPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13840,19 +13547,19 @@ func (c *ProjectsZonesClustersSetNetworkPolicyCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -13922,7 +13629,7 @@ func (c *ProjectsZonesClustersStartIpRotationCall) Header() http.Header { func (c *ProjectsZonesClustersStartIpRotationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13999,19 +13706,19 @@ func (c *ProjectsZonesClustersStartIpRotationCall) Do(opts ...googleapi.CallOpti // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -14081,7 +13788,7 @@ func (c *ProjectsZonesClustersUpdateCall) Header() http.Header { func (c *ProjectsZonesClustersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14158,19 +13865,19 @@ func (c *ProjectsZonesClustersUpdateCall) Do(opts ...googleapi.CallOption) (*Ope // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -14242,7 +13949,7 @@ func (c *ProjectsZonesClustersNodePoolsAutoscalingCall) Header() http.Header { func (c *ProjectsZonesClustersNodePoolsAutoscalingCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14321,25 +14028,25 @@ func (c *ProjectsZonesClustersNodePoolsAutoscalingCall) Do(opts ...googleapi.Cal // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "nodePoolId": { - // "description": "Required. Deprecated. The name of the node pool to upgrade.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the node pool to upgrade. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -14409,7 +14116,7 @@ func (c *ProjectsZonesClustersNodePoolsCreateCall) Header() http.Header { func (c *ProjectsZonesClustersNodePoolsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14486,19 +14193,19 @@ func (c *ProjectsZonesClustersNodePoolsCreateCall) Do(opts ...googleapi.CallOpti // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the parent field.", + // "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the parent field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the parent\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" @@ -14542,10 +14249,8 @@ func (r *ProjectsZonesClustersNodePoolsService) Delete(projectId string, zone st } // Name sets the optional parameter "name": The name (project, location, -// cluster, node pool id) of the node pool to -// delete. Specified in the -// format -// `projects/*/locations/*/clusters/*/nodePools/*`. +// cluster, node pool id) of the node pool to delete. Specified in the +// format `projects/*/locations/*/clusters/*/nodePools/*`. func (c *ProjectsZonesClustersNodePoolsDeleteCall) Name(name string) *ProjectsZonesClustersNodePoolsDeleteCall { c.urlParams_.Set("name", name) return c @@ -14578,7 +14283,7 @@ func (c *ProjectsZonesClustersNodePoolsDeleteCall) Header() http.Header { func (c *ProjectsZonesClustersNodePoolsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14652,30 +14357,30 @@ func (c *ProjectsZonesClustersNodePoolsDeleteCall) Do(opts ...googleapi.CallOpti // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "name": { - // "description": "The name (project, location, cluster, node pool id) of the node pool to\ndelete. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + // "description": "The name (project, location, cluster, node pool id) of the node pool to delete. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", // "location": "query", // "type": "string" // }, // "nodePoolId": { - // "description": "Required. Deprecated. The name of the node pool to delete.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the node pool to delete. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -14717,10 +14422,8 @@ func (r *ProjectsZonesClustersNodePoolsService) Get(projectId string, zone strin } // Name sets the optional parameter "name": The name (project, location, -// cluster, node pool id) of the node pool to -// get. Specified in the -// format -// `projects/*/locations/*/clusters/*/nodePools/*`. +// cluster, node pool id) of the node pool to get. Specified in the +// format `projects/*/locations/*/clusters/*/nodePools/*`. func (c *ProjectsZonesClustersNodePoolsGetCall) Name(name string) *ProjectsZonesClustersNodePoolsGetCall { c.urlParams_.Set("name", name) return c @@ -14763,7 +14466,7 @@ func (c *ProjectsZonesClustersNodePoolsGetCall) Header() http.Header { func (c *ProjectsZonesClustersNodePoolsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14840,30 +14543,30 @@ func (c *ProjectsZonesClustersNodePoolsGetCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "name": { - // "description": "The name (project, location, cluster, node pool id) of the node pool to\nget. Specified in the format\n`projects/*/locations/*/clusters/*/nodePools/*`.", + // "description": "The name (project, location, cluster, node pool id) of the node pool to get. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`.", // "location": "query", // "type": "string" // }, // "nodePoolId": { - // "description": "Required. Deprecated. The name of the node pool.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the node pool. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -14903,8 +14606,8 @@ func (r *ProjectsZonesClustersNodePoolsService) List(projectId string, zone stri } // Parent sets the optional parameter "parent": The parent (project, -// location, cluster id) where the node pools will be -// listed. Specified in the format `projects/*/locations/*/clusters/*`. +// location, cluster id) where the node pools will be listed. Specified +// in the format `projects/*/locations/*/clusters/*`. func (c *ProjectsZonesClustersNodePoolsListCall) Parent(parent string) *ProjectsZonesClustersNodePoolsListCall { c.urlParams_.Set("parent", parent) return c @@ -14947,7 +14650,7 @@ func (c *ProjectsZonesClustersNodePoolsListCall) Header() http.Header { func (c *ProjectsZonesClustersNodePoolsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -15022,24 +14725,24 @@ func (c *ProjectsZonesClustersNodePoolsListCall) Do(opts ...googleapi.CallOption // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the parent field.", + // "description": "Required. Deprecated. The name of the cluster. This field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" // }, // "parent": { - // "description": "The parent (project, location, cluster id) where the node pools will be\nlisted. Specified in the format `projects/*/locations/*/clusters/*`.", + // "description": "The parent (project, location, cluster id) where the node pools will be listed. Specified in the format `projects/*/locations/*/clusters/*`.", // "location": "query", // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the parent field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber). This field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the parent\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" @@ -15070,8 +14773,7 @@ type ProjectsZonesClustersNodePoolsRollbackCall struct { header_ http.Header } -// Rollback: Rolls back a previously Aborted or Failed NodePool -// upgrade. +// Rollback: Rolls back a previously Aborted or Failed NodePool upgrade. // This makes no changes if the last upgrade successfully completed. func (r *ProjectsZonesClustersNodePoolsService) Rollback(projectId string, zone string, clusterId string, nodePoolId string, rollbacknodepoolupgraderequest *RollbackNodePoolUpgradeRequest) *ProjectsZonesClustersNodePoolsRollbackCall { c := &ProjectsZonesClustersNodePoolsRollbackCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -15110,7 +14812,7 @@ func (c *ProjectsZonesClustersNodePoolsRollbackCall) Header() http.Header { func (c *ProjectsZonesClustersNodePoolsRollbackCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -15177,7 +14879,7 @@ func (c *ProjectsZonesClustersNodePoolsRollbackCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Rolls back a previously Aborted or Failed NodePool upgrade.\nThis makes no changes if the last upgrade successfully completed.", + // "description": "Rolls back a previously Aborted or Failed NodePool upgrade. This makes no changes if the last upgrade successfully completed.", // "flatPath": "v1beta1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}:rollback", // "httpMethod": "POST", // "id": "container.projects.zones.clusters.nodePools.rollback", @@ -15189,25 +14891,25 @@ func (c *ProjectsZonesClustersNodePoolsRollbackCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster to rollback.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the cluster to rollback. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "nodePoolId": { - // "description": "Required. Deprecated. The name of the node pool to rollback.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the node pool to rollback. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -15279,7 +14981,7 @@ func (c *ProjectsZonesClustersNodePoolsSetManagementCall) Header() http.Header { func (c *ProjectsZonesClustersNodePoolsSetManagementCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -15358,25 +15060,25 @@ func (c *ProjectsZonesClustersNodePoolsSetManagementCall) Do(opts ...googleapi.C // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the cluster to update. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "nodePoolId": { - // "description": "Required. Deprecated. The name of the node pool to update.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the node pool to update. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -15448,7 +15150,7 @@ func (c *ProjectsZonesClustersNodePoolsSetSizeCall) Header() http.Header { func (c *ProjectsZonesClustersNodePoolsSetSizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -15527,25 +15229,25 @@ func (c *ProjectsZonesClustersNodePoolsSetSizeCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the cluster to update. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "nodePoolId": { - // "description": "Required. Deprecated. The name of the node pool to update.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the node pool to update. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -15618,7 +15320,7 @@ func (c *ProjectsZonesClustersNodePoolsUpdateCall) Header() http.Header { func (c *ProjectsZonesClustersNodePoolsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -15697,25 +15399,25 @@ func (c *ProjectsZonesClustersNodePoolsUpdateCall) Do(opts ...googleapi.CallOpti // ], // "parameters": { // "clusterId": { - // "description": "Required. Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "nodePoolId": { - // "description": "Required. Deprecated. The name of the node pool to upgrade.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The name of the node pool to upgrade. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -15785,7 +15487,7 @@ func (c *ProjectsZonesOperationsCancelCall) Header() http.Header { func (c *ProjectsZonesOperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -15862,19 +15564,19 @@ func (c *ProjectsZonesOperationsCancelCall) Do(opts ...googleapi.CallOption) (*E // ], // "parameters": { // "operationId": { - // "description": "Required. Deprecated. The server-assigned `name` of the operation.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The server-assigned `name` of the operation. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\noperation resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the operation resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -15917,8 +15619,8 @@ func (r *ProjectsZonesOperationsService) Get(projectId string, zone string, oper } // Name sets the optional parameter "name": The name (project, location, -// operation id) of the operation to get. -// Specified in the format `projects/*/locations/*/operations/*`. +// operation id) of the operation to get. Specified in the format +// `projects/*/locations/*/operations/*`. func (c *ProjectsZonesOperationsGetCall) Name(name string) *ProjectsZonesOperationsGetCall { c.urlParams_.Set("name", name) return c @@ -15961,7 +15663,7 @@ func (c *ProjectsZonesOperationsGetCall) Header() http.Header { func (c *ProjectsZonesOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -16036,24 +15738,24 @@ func (c *ProjectsZonesOperationsGetCall) Do(opts ...googleapi.CallOption) (*Oper // ], // "parameters": { // "name": { - // "description": "The name (project, location, operation id) of the operation to get.\nSpecified in the format `projects/*/locations/*/operations/*`.", + // "description": "The name (project, location, operation id) of the operation to get. Specified in the format `projects/*/locations/*/operations/*`.", // "location": "query", // "type": "string" // }, // "operationId": { - // "description": "Required. Deprecated. The server-assigned `name` of the operation.\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The server-assigned `name` of the operation. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) in which the\ncluster resides. This field has been deprecated and replaced by the name\nfield.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -16092,9 +15794,9 @@ func (r *ProjectsZonesOperationsService) List(projectId string, zone string) *Pr } // Parent sets the optional parameter "parent": The parent (project and -// location) where the operations will be listed. -// Specified in the format `projects/*/locations/*`. -// Location "-" matches all zones and all regions. +// location) where the operations will be listed. Specified in the +// format `projects/*/locations/*`. Location "-" matches all zones and +// all regions. func (c *ProjectsZonesOperationsListCall) Parent(parent string) *ProjectsZonesOperationsListCall { c.urlParams_.Set("parent", parent) return c @@ -16137,7 +15839,7 @@ func (c *ProjectsZonesOperationsListCall) Header() http.Header { func (c *ProjectsZonesOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -16210,18 +15912,18 @@ func (c *ProjectsZonesOperationsListCall) Do(opts ...googleapi.CallOption) (*Lis // ], // "parameters": { // "parent": { - // "description": "The parent (project and location) where the operations will be listed.\nSpecified in the format `projects/*/locations/*`.\nLocation \"-\" matches all zones and all regions.", + // "description": "The parent (project and location) where the operations will be listed. Specified in the format `projects/*/locations/*`. Location \"-\" matches all zones and all regions.", // "location": "query", // "type": "string" // }, // "projectId": { - // "description": "Required. Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + // "description": "Required. Deprecated. The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840). This field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "Required. Deprecated. The name of the Google Compute Engine\n[zone](https://cloud.google.com/compute/docs/zones#available) to return\noperations for, or `-` for all zones. This field has been deprecated and\nreplaced by the parent field.", + // "description": "Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) to return operations for, or `-` for all zones. This field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" diff --git a/vendor/google.golang.org/api/dataflow/v1b3/dataflow-api.json b/vendor/google.golang.org/api/dataflow/v1b3/dataflow-api.json index 4b47ff7cd51..cdbf58664eb 100644 --- a/vendor/google.golang.org/api/dataflow/v1b3/dataflow-api.json +++ b/vendor/google.golang.org/api/dataflow/v1b3/dataflow-api.json @@ -183,6 +183,184 @@ } }, "resources": { + "catalogTemplates": { + "methods": { + "commit": { + "description": "Creates a new TemplateVersion (Important: not new Template) entry in the spanner table. Requires project_id and display_name (template).", + "flatPath": "v1b3/projects/{projectsId}/catalogTemplates/{catalogTemplatesId}:commit", + "httpMethod": "POST", + "id": "dataflow.projects.catalogTemplates.commit", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The location of the template, name includes project_id and display_name. Commit using project_id(pid1) and display_name(tid1). Format: projects/{pid1}/catalogTemplates/{tid1}", + "location": "path", + "pattern": "^projects/[^/]+/catalogTemplates/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1b3/{+name}:commit", + "request": { + "$ref": "CommitTemplateVersionRequest" + }, + "response": { + "$ref": "TemplateVersion" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/userinfo.email" + ] + }, + "delete": { + "description": "Deletes an existing Template. Do nothing if Template does not exist.", + "flatPath": "v1b3/projects/{projectsId}/catalogTemplates/{catalogTemplatesId}", + "httpMethod": "DELETE", + "id": "dataflow.projects.catalogTemplates.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "name includes project_id and display_name. Delete by project_id(pid1) and display_name(tid1). Format: projects/{pid1}/catalogTemplates/{tid1}", + "location": "path", + "pattern": "^projects/[^/]+/catalogTemplates/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1b3/{+name}", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/userinfo.email" + ] + }, + "get": { + "description": "Get TemplateVersion using project_id and display_name with an optional version_id field. Get latest (has tag \"latest\") TemplateVersion if version_id not set.", + "flatPath": "v1b3/projects/{projectsId}/catalogTemplates/{catalogTemplatesId}", + "httpMethod": "GET", + "id": "dataflow.projects.catalogTemplates.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Resource name includes project_id and display_name. version_id is optional. Get the latest TemplateVersion if version_id not set. Get by project_id(pid1) and display_name(tid1): Format: projects/{pid1}/catalogTemplates/{tid1} Get by project_id(pid1), display_name(tid1), and version_id(vid1): Format: projects/{pid1}/catalogTemplates/{tid1@vid}", + "location": "path", + "pattern": "^projects/[^/]+/catalogTemplates/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1b3/{+name}", + "response": { + "$ref": "TemplateVersion" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/userinfo.email" + ] + }, + "label": { + "description": "Updates the label of the TemplateVersion. Label can be duplicated in Template, so either add or remove the label in the TemplateVersion.", + "flatPath": "v1b3/projects/{projectsId}/catalogTemplates/{catalogTemplatesId}:label", + "httpMethod": "POST", + "id": "dataflow.projects.catalogTemplates.label", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Resource name includes project_id, display_name, and version_id. Updates by project_id(pid1), display_name(tid1), and version_id(vid1): Format: projects/{pid1}/catalogTemplates/{tid1@vid}", + "location": "path", + "pattern": "^projects/[^/]+/catalogTemplates/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1b3/{+name}:label", + "request": { + "$ref": "ModifyTemplateVersionLabelRequest" + }, + "response": { + "$ref": "ModifyTemplateVersionLabelResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/userinfo.email" + ] + }, + "tag": { + "description": "Updates the tag of the TemplateVersion, and tag is unique in Template. If tag exists in another TemplateVersion in the Template, updates the tag to this TemplateVersion will remove it from the old TemplateVersion and add it to this TemplateVersion. If request is remove_only (remove_only = true), remove the tag from this TemplateVersion.", + "flatPath": "v1b3/projects/{projectsId}/catalogTemplates/{catalogTemplatesId}:tag", + "httpMethod": "POST", + "id": "dataflow.projects.catalogTemplates.tag", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Resource name includes project_id, display_name, and version_id. Updates by project_id(pid1), display_name(tid1), and version_id(vid1): Format: projects/{pid1}/catalogTemplates/{tid1@vid}", + "location": "path", + "pattern": "^projects/[^/]+/catalogTemplates/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1b3/{+name}:tag", + "request": { + "$ref": "ModifyTemplateVersionTagRequest" + }, + "response": { + "$ref": "ModifyTemplateVersionTagResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/userinfo.email" + ] + } + }, + "resources": { + "templateVersions": { + "methods": { + "create": { + "description": "Creates a new Template with TemplateVersion. Requires project_id(projects) and template display_name(catalogTemplates). The template display_name is set by the user.", + "flatPath": "v1b3/projects/{projectsId}/catalogTemplates/{catalogTemplatesId}/templateVersions", + "httpMethod": "POST", + "id": "dataflow.projects.catalogTemplates.templateVersions.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "The parent project and template that the TemplateVersion will be created under. Create using project_id(pid1) and display_name(tid1). Format: projects/{pid1}/catalogTemplates/{tid1}", + "location": "path", + "pattern": "^projects/[^/]+/catalogTemplates/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1b3/{+parent}/templateVersions", + "request": { + "$ref": "CreateTemplateVersionRequest" + }, + "response": { + "$ref": "TemplateVersion" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/userinfo.email" + ] + } + } + } + } + }, "jobs": { "methods": { "aggregated": { @@ -202,22 +380,28 @@ "TERMINATED", "ACTIVE" ], + "enumDescriptions": [ + "The filter isn't specified, or is unknown. This returns all jobs ordered on descending `JobUuid`.", + "Returns all running jobs first ordered on creation timestamp, then returns all terminated jobs ordered on the termination timestamp.", + "Filters the jobs that have a terminated state, ordered on the termination timestamp. Example terminated states: `JOB_STATE_STOPPED`, `JOB_STATE_UPDATED`, `JOB_STATE_DRAINED`, etc.", + "Filters the jobs that are running ordered on the creation timestamp." + ], "location": "query", "type": "string" }, "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains this job.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job.", "location": "query", "type": "string" }, "pageSize": { - "description": "If there are many jobs, limit response to at most this many.\nThe actual number of jobs returned will be the lesser of max_responses\nand an unspecified server-defined limit.", + "description": "If there are many jobs, limit response to at most this many. The actual number of jobs returned will be the lesser of max_responses and an unspecified server-defined limit.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Set this to the 'next_page_token' field of a previous response\nto request additional results in a long list.", + "description": "Set this to the 'next_page_token' field of a previous response to request additional results in a long list.", "location": "query", "type": "string" }, @@ -235,6 +419,12 @@ "JOB_VIEW_ALL", "JOB_VIEW_DESCRIPTION" ], + "enumDescriptions": [ + "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", + "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", + "Request all information available for this job.", + "Request summary info and limited job description data for steps, labels and environment." + ], "location": "query", "type": "string" } @@ -251,7 +441,7 @@ ] }, "create": { - "description": "Creates a Cloud Dataflow job.\n\nTo create a job, we recommend using `projects.locations.jobs.create` with a\n[regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using\n`projects.jobs.create` is not recommended, as your job will always start\nin `us-central1`.", + "description": "Creates a Cloud Dataflow job. To create a job, we recommend using `projects.locations.jobs.create` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.jobs.create` is not recommended, as your job will always start in `us-central1`.", "flatPath": "v1b3/projects/{projectId}/jobs", "httpMethod": "POST", "id": "dataflow.projects.jobs.create", @@ -260,7 +450,7 @@ ], "parameters": { "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains this job.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job.", "location": "query", "type": "string" }, @@ -283,6 +473,12 @@ "JOB_VIEW_ALL", "JOB_VIEW_DESCRIPTION" ], + "enumDescriptions": [ + "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", + "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", + "Request all information available for this job.", + "Request summary info and limited job description data for steps, labels and environment." + ], "location": "query", "type": "string" } @@ -302,7 +498,7 @@ ] }, "get": { - "description": "Gets the state of the specified Cloud Dataflow job.\n\nTo get the state of a job, we recommend using `projects.locations.jobs.get`\nwith a [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using\n`projects.jobs.get` is not recommended, as you can only get the state of\njobs that are running in `us-central1`.", + "description": "Gets the state of the specified Cloud Dataflow job. To get the state of a job, we recommend using `projects.locations.jobs.get` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.jobs.get` is not recommended, as you can only get the state of jobs that are running in `us-central1`.", "flatPath": "v1b3/projects/{projectId}/jobs/{jobId}", "httpMethod": "GET", "id": "dataflow.projects.jobs.get", @@ -318,7 +514,7 @@ "type": "string" }, "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains this job.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job.", "location": "query", "type": "string" }, @@ -336,6 +532,12 @@ "JOB_VIEW_ALL", "JOB_VIEW_DESCRIPTION" ], + "enumDescriptions": [ + "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", + "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", + "Request all information available for this job.", + "Request summary info and limited job description data for steps, labels and environment." + ], "location": "query", "type": "string" } @@ -352,7 +554,7 @@ ] }, "getMetrics": { - "description": "Request the job status.\n\nTo request the status of a job, we recommend using\n`projects.locations.jobs.getMetrics` with a [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using\n`projects.jobs.getMetrics` is not recommended, as you can only request the\nstatus of jobs that are running in `us-central1`.", + "description": "Request the job status. To request the status of a job, we recommend using `projects.locations.jobs.getMetrics` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.jobs.getMetrics` is not recommended, as you can only request the status of jobs that are running in `us-central1`.", "flatPath": "v1b3/projects/{projectId}/jobs/{jobId}/metrics", "httpMethod": "GET", "id": "dataflow.projects.jobs.getMetrics", @@ -362,13 +564,13 @@ ], "parameters": { "jobId": { - "description": "The job to get messages for.", + "description": "The job to get metrics for.", "location": "path", "required": true, "type": "string" }, "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains the job specified by job_id.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the job specified by job_id.", "location": "query", "type": "string" }, @@ -379,7 +581,7 @@ "type": "string" }, "startTime": { - "description": "Return only metric data that has changed since this time.\nDefault is to return all information about all metrics for the job.", + "description": "Return only metric data that has changed since this time. Default is to return all information about all metrics for the job.", "format": "google-datetime", "location": "query", "type": "string" @@ -397,7 +599,7 @@ ] }, "list": { - "description": "List the jobs of a project.\n\nTo list the jobs of a project in a region, we recommend using\n`projects.locations.jobs.get` with a [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). To\nlist the all jobs across all regions, use `projects.jobs.aggregated`. Using\n`projects.jobs.list` is not recommended, as you can only get the list of\njobs that are running in `us-central1`.", + "description": "List the jobs of a project. To list the jobs of a project in a region, we recommend using `projects.locations.jobs.list` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). To list the all jobs across all regions, use `projects.jobs.aggregated`. Using `projects.jobs.list` is not recommended, as you can only get the list of jobs that are running in `us-central1`.", "flatPath": "v1b3/projects/{projectId}/jobs", "httpMethod": "GET", "id": "dataflow.projects.jobs.list", @@ -413,22 +615,28 @@ "TERMINATED", "ACTIVE" ], + "enumDescriptions": [ + "The filter isn't specified, or is unknown. This returns all jobs ordered on descending `JobUuid`.", + "Returns all running jobs first ordered on creation timestamp, then returns all terminated jobs ordered on the termination timestamp.", + "Filters the jobs that have a terminated state, ordered on the termination timestamp. Example terminated states: `JOB_STATE_STOPPED`, `JOB_STATE_UPDATED`, `JOB_STATE_DRAINED`, etc.", + "Filters the jobs that are running ordered on the creation timestamp." + ], "location": "query", "type": "string" }, "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains this job.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job.", "location": "query", "type": "string" }, "pageSize": { - "description": "If there are many jobs, limit response to at most this many.\nThe actual number of jobs returned will be the lesser of max_responses\nand an unspecified server-defined limit.", + "description": "If there are many jobs, limit response to at most this many. The actual number of jobs returned will be the lesser of max_responses and an unspecified server-defined limit.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Set this to the 'next_page_token' field of a previous response\nto request additional results in a long list.", + "description": "Set this to the 'next_page_token' field of a previous response to request additional results in a long list.", "location": "query", "type": "string" }, @@ -446,6 +654,12 @@ "JOB_VIEW_ALL", "JOB_VIEW_DESCRIPTION" ], + "enumDescriptions": [ + "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", + "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", + "Request all information available for this job.", + "Request summary info and limited job description data for steps, labels and environment." + ], "location": "query", "type": "string" } @@ -499,7 +713,7 @@ ] }, "update": { - "description": "Updates the state of an existing Cloud Dataflow job.\n\nTo update the state of an existing job, we recommend using\n`projects.locations.jobs.update` with a [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using\n`projects.jobs.update` is not recommended, as you can only update the state\nof jobs that are running in `us-central1`.", + "description": "Updates the state of an existing Cloud Dataflow job. To update the state of an existing job, we recommend using `projects.locations.jobs.update` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.jobs.update` is not recommended, as you can only update the state of jobs that are running in `us-central1`.", "flatPath": "v1b3/projects/{projectId}/jobs/{jobId}", "httpMethod": "PUT", "id": "dataflow.projects.jobs.update", @@ -515,7 +729,7 @@ "type": "string" }, "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains this job.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job.", "location": "query", "type": "string" }, @@ -623,7 +837,7 @@ "messages": { "methods": { "list": { - "description": "Request the job status.\n\nTo request the status of a job, we recommend using\n`projects.locations.jobs.messages.list` with a [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using\n`projects.jobs.messages.list` is not recommended, as you can only request\nthe status of jobs that are running in `us-central1`.", + "description": "Request the job status. To request the status of a job, we recommend using `projects.locations.jobs.messages.list` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.jobs.messages.list` is not recommended, as you can only request the status of jobs that are running in `us-central1`.", "flatPath": "v1b3/projects/{projectId}/jobs/{jobId}/messages", "httpMethod": "GET", "id": "dataflow.projects.jobs.messages.list", @@ -633,7 +847,7 @@ ], "parameters": { "endTime": { - "description": "Return only messages with timestamps \u003c end_time. The default is now\n(i.e. return up to the latest messages available).", + "description": "Return only messages with timestamps \u003c end_time. The default is now (i.e. return up to the latest messages available).", "format": "google-datetime", "location": "query", "type": "string" @@ -645,7 +859,7 @@ "type": "string" }, "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains the job specified by job_id.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the job specified by job_id.", "location": "query", "type": "string" }, @@ -659,17 +873,25 @@ "JOB_MESSAGE_WARNING", "JOB_MESSAGE_ERROR" ], + "enumDescriptions": [ + "The message importance isn't specified, or is unknown.", + "The message is at the 'debug' level: typically only useful for software engineers working on the code the job is running. Typically, Dataflow pipeline runners do not display log messages at this level by default.", + "The message is at the 'detailed' level: somewhat verbose, but potentially useful to users. Typically, Dataflow pipeline runners do not display log messages at this level by default. These messages are displayed by default in the Dataflow monitoring UI.", + "The message is at the 'basic' level: useful for keeping track of the execution of a Dataflow pipeline. Typically, Dataflow pipeline runners display log messages at this level by default, and these messages are displayed by default in the Dataflow monitoring UI.", + "The message is at the 'warning' level: indicating a condition pertaining to a job which may require human intervention. Typically, Dataflow pipeline runners display log messages at this level by default, and these messages are displayed by default in the Dataflow monitoring UI.", + "The message is at the 'error' level: indicating a condition preventing a job from succeeding. Typically, Dataflow pipeline runners display log messages at this level by default, and these messages are displayed by default in the Dataflow monitoring UI." + ], "location": "query", "type": "string" }, "pageSize": { - "description": "If specified, determines the maximum number of messages to\nreturn. If unspecified, the service may choose an appropriate\ndefault, or may return an arbitrarily large number of results.", + "description": "If specified, determines the maximum number of messages to return. If unspecified, the service may choose an appropriate default, or may return an arbitrarily large number of results.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "If supplied, this should be the value of next_page_token returned\nby an earlier call. This will cause the next page of results to\nbe returned.", + "description": "If supplied, this should be the value of next_page_token returned by an earlier call. This will cause the next page of results to be returned.", "location": "query", "type": "string" }, @@ -680,7 +902,7 @@ "type": "string" }, "startTime": { - "description": "If specified, return only messages with timestamps \u003e= start_time.\nThe default is the job creation time (i.e. beginning of messages).", + "description": "If specified, return only messages with timestamps \u003e= start_time. The default is the job creation time (i.e. beginning of messages).", "format": "google-datetime", "location": "query", "type": "string" @@ -792,7 +1014,7 @@ ], "parameters": { "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains the job.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the job.", "location": "path", "required": true, "type": "string" @@ -833,7 +1055,7 @@ ], "parameters": { "location": { - "description": "Required. The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to\nwhich to direct the request. E.g., us-central1, us-west1.", + "description": "Required. The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to which to direct the request. E.g., us-central1, us-west1.", "location": "path", "required": true, "type": "string" @@ -864,7 +1086,7 @@ "jobs": { "methods": { "create": { - "description": "Creates a Cloud Dataflow job.\n\nTo create a job, we recommend using `projects.locations.jobs.create` with a\n[regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using\n`projects.jobs.create` is not recommended, as your job will always start\nin `us-central1`.", + "description": "Creates a Cloud Dataflow job. To create a job, we recommend using `projects.locations.jobs.create` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.jobs.create` is not recommended, as your job will always start in `us-central1`.", "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs", "httpMethod": "POST", "id": "dataflow.projects.locations.jobs.create", @@ -874,7 +1096,7 @@ ], "parameters": { "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains this job.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job.", "location": "path", "required": true, "type": "string" @@ -898,6 +1120,12 @@ "JOB_VIEW_ALL", "JOB_VIEW_DESCRIPTION" ], + "enumDescriptions": [ + "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", + "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", + "Request all information available for this job.", + "Request summary info and limited job description data for steps, labels and environment." + ], "location": "query", "type": "string" } @@ -917,7 +1145,7 @@ ] }, "get": { - "description": "Gets the state of the specified Cloud Dataflow job.\n\nTo get the state of a job, we recommend using `projects.locations.jobs.get`\nwith a [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using\n`projects.jobs.get` is not recommended, as you can only get the state of\njobs that are running in `us-central1`.", + "description": "Gets the state of the specified Cloud Dataflow job. To get the state of a job, we recommend using `projects.locations.jobs.get` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.jobs.get` is not recommended, as you can only get the state of jobs that are running in `us-central1`.", "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}", "httpMethod": "GET", "id": "dataflow.projects.locations.jobs.get", @@ -934,7 +1162,7 @@ "type": "string" }, "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains this job.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job.", "location": "path", "required": true, "type": "string" @@ -953,6 +1181,12 @@ "JOB_VIEW_ALL", "JOB_VIEW_DESCRIPTION" ], + "enumDescriptions": [ + "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", + "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", + "Request all information available for this job.", + "Request summary info and limited job description data for steps, labels and environment." + ], "location": "query", "type": "string" } @@ -968,8 +1202,60 @@ "https://www.googleapis.com/auth/userinfo.email" ] }, + "getExecutionDetails": { + "description": "Request detailed information about the execution status of the job. EXPERIMENTAL. This API is subject to change or removal without notice.", + "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/executionDetails", + "httpMethod": "GET", + "id": "dataflow.projects.locations.jobs.getExecutionDetails", + "parameterOrder": [ + "projectId", + "location", + "jobId" + ], + "parameters": { + "jobId": { + "description": "The job to get execution details for.", + "location": "path", + "required": true, + "type": "string" + }, + "location": { + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the job specified by job_id.", + "location": "path", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "If specified, determines the maximum number of stages to return. If unspecified, the service may choose an appropriate default, or may return an arbitrarily large number of results.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "If supplied, this should be the value of next_page_token returned by an earlier call. This will cause the next page of results to be returned.", + "location": "query", + "type": "string" + }, + "projectId": { + "description": "A project id.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/executionDetails", + "response": { + "$ref": "JobExecutionDetails" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly", + "https://www.googleapis.com/auth/userinfo.email" + ] + }, "getMetrics": { - "description": "Request the job status.\n\nTo request the status of a job, we recommend using\n`projects.locations.jobs.getMetrics` with a [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using\n`projects.jobs.getMetrics` is not recommended, as you can only request the\nstatus of jobs that are running in `us-central1`.", + "description": "Request the job status. To request the status of a job, we recommend using `projects.locations.jobs.getMetrics` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.jobs.getMetrics` is not recommended, as you can only request the status of jobs that are running in `us-central1`.", "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/metrics", "httpMethod": "GET", "id": "dataflow.projects.locations.jobs.getMetrics", @@ -980,13 +1266,13 @@ ], "parameters": { "jobId": { - "description": "The job to get messages for.", + "description": "The job to get metrics for.", "location": "path", "required": true, "type": "string" }, "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains the job specified by job_id.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the job specified by job_id.", "location": "path", "required": true, "type": "string" @@ -998,7 +1284,7 @@ "type": "string" }, "startTime": { - "description": "Return only metric data that has changed since this time.\nDefault is to return all information about all metrics for the job.", + "description": "Return only metric data that has changed since this time. Default is to return all information about all metrics for the job.", "format": "google-datetime", "location": "query", "type": "string" @@ -1016,7 +1302,7 @@ ] }, "list": { - "description": "List the jobs of a project.\n\nTo list the jobs of a project in a region, we recommend using\n`projects.locations.jobs.get` with a [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). To\nlist the all jobs across all regions, use `projects.jobs.aggregated`. Using\n`projects.jobs.list` is not recommended, as you can only get the list of\njobs that are running in `us-central1`.", + "description": "List the jobs of a project. To list the jobs of a project in a region, we recommend using `projects.locations.jobs.list` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). To list the all jobs across all regions, use `projects.jobs.aggregated`. Using `projects.jobs.list` is not recommended, as you can only get the list of jobs that are running in `us-central1`.", "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs", "httpMethod": "GET", "id": "dataflow.projects.locations.jobs.list", @@ -1033,23 +1319,29 @@ "TERMINATED", "ACTIVE" ], + "enumDescriptions": [ + "The filter isn't specified, or is unknown. This returns all jobs ordered on descending `JobUuid`.", + "Returns all running jobs first ordered on creation timestamp, then returns all terminated jobs ordered on the termination timestamp.", + "Filters the jobs that have a terminated state, ordered on the termination timestamp. Example terminated states: `JOB_STATE_STOPPED`, `JOB_STATE_UPDATED`, `JOB_STATE_DRAINED`, etc.", + "Filters the jobs that are running ordered on the creation timestamp." + ], "location": "query", "type": "string" }, "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains this job.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job.", "location": "path", "required": true, "type": "string" }, "pageSize": { - "description": "If there are many jobs, limit response to at most this many.\nThe actual number of jobs returned will be the lesser of max_responses\nand an unspecified server-defined limit.", + "description": "If there are many jobs, limit response to at most this many. The actual number of jobs returned will be the lesser of max_responses and an unspecified server-defined limit.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Set this to the 'next_page_token' field of a previous response\nto request additional results in a long list.", + "description": "Set this to the 'next_page_token' field of a previous response to request additional results in a long list.", "location": "query", "type": "string" }, @@ -1067,6 +1359,12 @@ "JOB_VIEW_ALL", "JOB_VIEW_DESCRIPTION" ], + "enumDescriptions": [ + "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", + "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", + "Request all information available for this job.", + "Request summary info and limited job description data for steps, labels and environment." + ], "location": "query", "type": "string" } @@ -1127,7 +1425,7 @@ ] }, "update": { - "description": "Updates the state of an existing Cloud Dataflow job.\n\nTo update the state of an existing job, we recommend using\n`projects.locations.jobs.update` with a [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using\n`projects.jobs.update` is not recommended, as you can only update the state\nof jobs that are running in `us-central1`.", + "description": "Updates the state of an existing Cloud Dataflow job. To update the state of an existing job, we recommend using `projects.locations.jobs.update` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.jobs.update` is not recommended, as you can only update the state of jobs that are running in `us-central1`.", "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}", "httpMethod": "PUT", "id": "dataflow.projects.locations.jobs.update", @@ -1144,7 +1442,7 @@ "type": "string" }, "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains this job.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job.", "location": "path", "required": true, "type": "string" @@ -1192,7 +1490,7 @@ "type": "string" }, "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains the job specified by job_id.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the job specified by job_id.", "location": "path", "required": true, "type": "string" @@ -1236,7 +1534,7 @@ "type": "string" }, "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains the job specified by job_id.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the job specified by job_id.", "location": "path", "required": true, "type": "string" @@ -1267,7 +1565,7 @@ "messages": { "methods": { "list": { - "description": "Request the job status.\n\nTo request the status of a job, we recommend using\n`projects.locations.jobs.messages.list` with a [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using\n`projects.jobs.messages.list` is not recommended, as you can only request\nthe status of jobs that are running in `us-central1`.", + "description": "Request the job status. To request the status of a job, we recommend using `projects.locations.jobs.messages.list` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.jobs.messages.list` is not recommended, as you can only request the status of jobs that are running in `us-central1`.", "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/messages", "httpMethod": "GET", "id": "dataflow.projects.locations.jobs.messages.list", @@ -1278,7 +1576,7 @@ ], "parameters": { "endTime": { - "description": "Return only messages with timestamps \u003c end_time. The default is now\n(i.e. return up to the latest messages available).", + "description": "Return only messages with timestamps \u003c end_time. The default is now (i.e. return up to the latest messages available).", "format": "google-datetime", "location": "query", "type": "string" @@ -1290,7 +1588,7 @@ "type": "string" }, "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains the job specified by job_id.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the job specified by job_id.", "location": "path", "required": true, "type": "string" @@ -1305,17 +1603,25 @@ "JOB_MESSAGE_WARNING", "JOB_MESSAGE_ERROR" ], + "enumDescriptions": [ + "The message importance isn't specified, or is unknown.", + "The message is at the 'debug' level: typically only useful for software engineers working on the code the job is running. Typically, Dataflow pipeline runners do not display log messages at this level by default.", + "The message is at the 'detailed' level: somewhat verbose, but potentially useful to users. Typically, Dataflow pipeline runners do not display log messages at this level by default. These messages are displayed by default in the Dataflow monitoring UI.", + "The message is at the 'basic' level: useful for keeping track of the execution of a Dataflow pipeline. Typically, Dataflow pipeline runners display log messages at this level by default, and these messages are displayed by default in the Dataflow monitoring UI.", + "The message is at the 'warning' level: indicating a condition pertaining to a job which may require human intervention. Typically, Dataflow pipeline runners display log messages at this level by default, and these messages are displayed by default in the Dataflow monitoring UI.", + "The message is at the 'error' level: indicating a condition preventing a job from succeeding. Typically, Dataflow pipeline runners display log messages at this level by default, and these messages are displayed by default in the Dataflow monitoring UI." + ], "location": "query", "type": "string" }, "pageSize": { - "description": "If specified, determines the maximum number of messages to\nreturn. If unspecified, the service may choose an appropriate\ndefault, or may return an arbitrarily large number of results.", + "description": "If specified, determines the maximum number of messages to return. If unspecified, the service may choose an appropriate default, or may return an arbitrarily large number of results.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "If supplied, this should be the value of next_page_token returned\nby an earlier call. This will cause the next page of results to\nbe returned.", + "description": "If supplied, this should be the value of next_page_token returned by an earlier call. This will cause the next page of results to be returned.", "location": "query", "type": "string" }, @@ -1326,7 +1632,7 @@ "type": "string" }, "startTime": { - "description": "If specified, return only messages with timestamps \u003e= start_time.\nThe default is the job creation time (i.e. beginning of messages).", + "description": "If specified, return only messages with timestamps \u003e= start_time. The default is the job creation time (i.e. beginning of messages).", "format": "google-datetime", "location": "query", "type": "string" @@ -1390,6 +1696,81 @@ } } }, + "stages": { + "methods": { + "getExecutionDetails": { + "description": "Request detailed information about the execution status of a stage of the job. EXPERIMENTAL. This API is subject to change or removal without notice.", + "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/stages/{stageId}/executionDetails", + "httpMethod": "GET", + "id": "dataflow.projects.locations.jobs.stages.getExecutionDetails", + "parameterOrder": [ + "projectId", + "location", + "jobId", + "stageId" + ], + "parameters": { + "endTime": { + "description": "Upper time bound of work items to include, by start time.", + "format": "google-datetime", + "location": "query", + "type": "string" + }, + "jobId": { + "description": "The job to get execution details for.", + "location": "path", + "required": true, + "type": "string" + }, + "location": { + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the job specified by job_id.", + "location": "path", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "If specified, determines the maximum number of work items to return. If unspecified, the service may choose an appropriate default, or may return an arbitrarily large number of results.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "If supplied, this should be the value of next_page_token returned by an earlier call. This will cause the next page of results to be returned.", + "location": "query", + "type": "string" + }, + "projectId": { + "description": "A project id.", + "location": "path", + "required": true, + "type": "string" + }, + "stageId": { + "description": "The stage for which to fetch information.", + "location": "path", + "required": true, + "type": "string" + }, + "startTime": { + "description": "Lower time bound of work items to include, by start time.", + "format": "google-datetime", + "location": "query", + "type": "string" + } + }, + "path": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/stages/{stageId}/executionDetails", + "response": { + "$ref": "StageExecutionDetails" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly", + "https://www.googleapis.com/auth/userinfo.email" + ] + } + } + }, "workItems": { "methods": { "lease": { @@ -1410,7 +1791,7 @@ "type": "string" }, "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains the WorkItem's job.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the WorkItem's job.", "location": "path", "required": true, "type": "string" @@ -1454,7 +1835,7 @@ "type": "string" }, "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains the WorkItem's job.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the WorkItem's job.", "location": "path", "required": true, "type": "string" @@ -1612,7 +1993,7 @@ "sql": { "methods": { "validate": { - "description": "Validates a GoogleSQL query for Cloud Dataflow syntax. Will always\nconfirm the given query parses correctly, and if able to look up\nschema information from DataCatalog, will validate that the query\nanalyzes properly as well.", + "description": "Validates a GoogleSQL query for Cloud Dataflow syntax. Will always confirm the given query parses correctly, and if able to look up schema information from DataCatalog, will validate that the query analyzes properly as well.", "flatPath": "v1b3/projects/{projectId}/locations/{location}/sql:validate", "httpMethod": "GET", "id": "dataflow.projects.locations.sql.validate", @@ -1622,7 +2003,7 @@ ], "parameters": { "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to\nwhich to direct the request.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to which to direct the request.", "location": "path", "required": true, "type": "string" @@ -1663,7 +2044,7 @@ ], "parameters": { "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to\nwhich to direct the request.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to which to direct the request.", "location": "path", "required": true, "type": "string" @@ -1700,12 +2081,12 @@ ], "parameters": { "gcsPath": { - "description": "Required. A Cloud Storage path to the template from which to\ncreate the job.\nMust be valid Cloud Storage URL, beginning with 'gs://'.", + "description": "Required. A Cloud Storage path to the template from which to create the job. Must be valid Cloud Storage URL, beginning with 'gs://'.", "location": "query", "type": "string" }, "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to\nwhich to direct the request.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to which to direct the request.", "location": "path", "required": true, "type": "string" @@ -1721,6 +2102,9 @@ "enum": [ "METADATA_ONLY" ], + "enumDescriptions": [ + "Template view that retrieves only the metadata associated with the template." + ], "location": "query", "type": "string" } @@ -1747,22 +2131,22 @@ ], "parameters": { "dynamicTemplate.gcsPath": { - "description": "Path to dynamic template spec file on GCS.\nThe file must be a Json serialized DynamicTemplateFieSpec object.", + "description": "Path to dynamic template spec file on GCS. The file must be a Json serialized DynamicTemplateFieSpec object.", "location": "query", "type": "string" }, "dynamicTemplate.stagingLocation": { - "description": "Cloud Storage path for staging dependencies.\nMust be a valid Cloud Storage URL, beginning with `gs://`.", + "description": "Cloud Storage path for staging dependencies. Must be a valid Cloud Storage URL, beginning with `gs://`.", "location": "query", "type": "string" }, "gcsPath": { - "description": "A Cloud Storage path to the template from which to create\nthe job.\nMust be valid Cloud Storage URL, beginning with 'gs://'.", + "description": "A Cloud Storage path to the template from which to create the job. Must be valid Cloud Storage URL, beginning with 'gs://'.", "location": "query", "type": "string" }, "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to\nwhich to direct the request.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to which to direct the request.", "location": "path", "required": true, "type": "string" @@ -1774,7 +2158,7 @@ "type": "string" }, "validateOnly": { - "description": "If true, the request is validated but not actually executed.\nDefaults to false.", + "description": "If true, the request is validated but not actually executed. Defaults to false.", "location": "query", "type": "boolean" } @@ -1877,6 +2261,47 @@ } } }, + "templateVersions": { + "methods": { + "list": { + "description": "List TemplateVersions using project_id and an optional display_name field. List all the TemplateVersions in the Template if display set. List all the TemplateVersions in the Project if display_name not set.", + "flatPath": "v1b3/projects/{projectsId}/templateVersions", + "httpMethod": "GET", + "id": "dataflow.projects.templateVersions.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "The maximum number of TemplateVersions to return per page.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The page token, received from a previous ListTemplateVersions call. Provide this to retrieve the subsequent page.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "parent includes project_id, and display_name is optional. List by project_id(pid1) and display_name(tid1). Format: projects/{pid1}/catalogTemplates/{tid1} List by project_id(pid1). Format: projects/{pid1}", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1b3/{+parent}/templateVersions", + "response": { + "$ref": "ListTemplateVersionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/userinfo.email" + ] + } + } + }, "templates": { "methods": { "create": { @@ -1919,12 +2344,12 @@ ], "parameters": { "gcsPath": { - "description": "Required. A Cloud Storage path to the template from which to\ncreate the job.\nMust be valid Cloud Storage URL, beginning with 'gs://'.", + "description": "Required. A Cloud Storage path to the template from which to create the job. Must be valid Cloud Storage URL, beginning with 'gs://'.", "location": "query", "type": "string" }, "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to\nwhich to direct the request.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to which to direct the request.", "location": "query", "type": "string" }, @@ -1939,6 +2364,9 @@ "enum": [ "METADATA_ONLY" ], + "enumDescriptions": [ + "Template view that retrieves only the metadata associated with the template." + ], "location": "query", "type": "string" } @@ -1964,22 +2392,22 @@ ], "parameters": { "dynamicTemplate.gcsPath": { - "description": "Path to dynamic template spec file on GCS.\nThe file must be a Json serialized DynamicTemplateFieSpec object.", + "description": "Path to dynamic template spec file on GCS. The file must be a Json serialized DynamicTemplateFieSpec object.", "location": "query", "type": "string" }, "dynamicTemplate.stagingLocation": { - "description": "Cloud Storage path for staging dependencies.\nMust be a valid Cloud Storage URL, beginning with `gs://`.", + "description": "Cloud Storage path for staging dependencies. Must be a valid Cloud Storage URL, beginning with `gs://`.", "location": "query", "type": "string" }, "gcsPath": { - "description": "A Cloud Storage path to the template from which to create\nthe job.\nMust be valid Cloud Storage URL, beginning with 'gs://'.", + "description": "A Cloud Storage path to the template from which to create the job. Must be valid Cloud Storage URL, beginning with 'gs://'.", "location": "query", "type": "string" }, "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to\nwhich to direct the request.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to which to direct the request.", "location": "query", "type": "string" }, @@ -1990,7 +2418,7 @@ "type": "string" }, "validateOnly": { - "description": "If true, the request is validated but not actually executed.\nDefaults to false.", + "description": "If true, the request is validated but not actually executed. Defaults to false.", "location": "query", "type": "boolean" } @@ -2014,7 +2442,7 @@ } } }, - "revision": "20200319", + "revision": "20200916", "rootUrl": "https://dataflow.googleapis.com/", "schemas": { "ApproximateProgress": { @@ -2044,10 +2472,10 @@ "properties": { "consumedParallelism": { "$ref": "ReportedParallelism", - "description": "Total amount of parallelism in the portion of input of this task that has\nalready been consumed and is no longer active. In the first two examples\nabove (see remaining_parallelism), the value should be 29 or 2\nrespectively. The sum of remaining_parallelism and consumed_parallelism\nshould equal the total amount of parallelism in this work item. If\nspecified, must be finite." + "description": "Total amount of parallelism in the portion of input of this task that has already been consumed and is no longer active. In the first two examples above (see remaining_parallelism), the value should be 29 or 2 respectively. The sum of remaining_parallelism and consumed_parallelism should equal the total amount of parallelism in this work item. If specified, must be finite." }, "fractionConsumed": { - "description": "Completion as fraction of the input consumed, from 0.0 (beginning, nothing\nconsumed), to 1.0 (end of the input, entire input consumed).", + "description": "Completion as fraction of the input consumed, from 0.0 (beginning, nothing consumed), to 1.0 (end of the input, entire input consumed).", "format": "double", "type": "number" }, @@ -2057,7 +2485,7 @@ }, "remainingParallelism": { "$ref": "ReportedParallelism", - "description": "Total amount of parallelism in the input of this task that remains,\n(i.e. can be delegated to this task and any new tasks via dynamic\nsplitting). Always at least 1 for non-finished work items and 0 for\nfinished.\n\n\"Amount of parallelism\" refers to how many non-empty parts of the input\ncan be read in parallel. This does not necessarily equal number\nof records. An input that can be read in parallel down to the\nindividual records is called \"perfectly splittable\".\nAn example of non-perfectly parallelizable input is a block-compressed\nfile format where a block of records has to be read as a whole,\nbut different blocks can be read in parallel.\n\nExamples:\n* If we are processing record #30 (starting at 1) out of 50 in a perfectly\n splittable 50-record input, this value should be 21 (20 remaining + 1\n current).\n* If we are reading through block 3 in a block-compressed file consisting\n of 5 blocks, this value should be 3 (since blocks 4 and 5 can be\n processed in parallel by new tasks via dynamic splitting and the current\n task remains processing block 3).\n* If we are reading through the last block in a block-compressed file,\n or reading or processing the last record in a perfectly splittable\n input, this value should be 1, because apart from the current task, no\n additional remainder can be split off." + "description": "Total amount of parallelism in the input of this task that remains, (i.e. can be delegated to this task and any new tasks via dynamic splitting). Always at least 1 for non-finished work items and 0 for finished. \"Amount of parallelism\" refers to how many non-empty parts of the input can be read in parallel. This does not necessarily equal number of records. An input that can be read in parallel down to the individual records is called \"perfectly splittable\". An example of non-perfectly parallelizable input is a block-compressed file format where a block of records has to be read as a whole, but different blocks can be read in parallel. Examples: * If we are processing record #30 (starting at 1) out of 50 in a perfectly splittable 50-record input, this value should be 21 (20 remaining + 1 current). * If we are reading through block 3 in a block-compressed file consisting of 5 blocks, this value should be 3 (since blocks 4 and 5 can be processed in parallel by new tasks via dynamic splitting and the current task remains processing block 3). * If we are reading through the last block in a block-compressed file, or reading or processing the last record in a perfectly splittable input, this value should be 1, because apart from the current task, no additional remainder can be split off." } }, "type": "object" @@ -2067,12 +2495,12 @@ "id": "ApproximateSplitRequest", "properties": { "fractionConsumed": { - "description": "A fraction at which to split the work item, from 0.0 (beginning of the\ninput) to 1.0 (end of the input).", + "description": "A fraction at which to split the work item, from 0.0 (beginning of the input) to 1.0 (end of the input).", "format": "double", "type": "number" }, "fractionOfRemainder": { - "description": "The fraction of the remainder of work to split the work item at, from 0.0\n(split at the current position) to 1.0 (end of the input).", + "description": "The fraction of the remainder of work to split the work item at, from 0.0 (split at the current position) to 1.0 (end of the input).", "format": "double", "type": "number" }, @@ -2083,8 +2511,27 @@ }, "type": "object" }, + "Artifact": { + "description": "Job information for templates.", + "id": "Artifact", + "properties": { + "containerSpec": { + "$ref": "ContainerSpec", + "description": "Container image path set for flex Template." + }, + "jobGraphGcsPath": { + "description": "job_graph_gcs_path set for legacy Template.", + "type": "string" + }, + "metadata": { + "$ref": "TemplateMetadata", + "description": "Metadata set for legacy Template." + } + }, + "type": "object" + }, "AutoscalingEvent": { - "description": "A structured message reporting an autoscaling decision made by the Dataflow\nservice.", + "description": "A structured message reporting an autoscaling decision made by the Dataflow service.", "id": "AutoscalingEvent", "properties": { "currentNumWorkers": { @@ -2094,7 +2541,7 @@ }, "description": { "$ref": "StructuredMessage", - "description": "A message describing why the system decided to adjust the current\nnumber of workers, why it failed, or why the system decided to\nnot make any changes to the number of workers." + "description": "A message describing why the system decided to adjust the current number of workers, why it failed, or why the system decided to not make any changes to the number of workers." }, "eventType": { "description": "The type of autoscaling event to report.", @@ -2106,11 +2553,11 @@ "NO_CHANGE" ], "enumDescriptions": [ - "Default type for the enum. Value should never be returned.", - "The TARGET_NUM_WORKERS_CHANGED type should be used when the target\nworker pool size has changed at the start of an actuation. An event\nshould always be specified as TARGET_NUM_WORKERS_CHANGED if it reflects\na change in the target_num_workers.", - "The CURRENT_NUM_WORKERS_CHANGED type should be used when actual worker\npool size has been changed, but the target_num_workers has not changed.", - "The ACTUATION_FAILURE type should be used when we want to report\nan error to the user indicating why the current number of workers\nin the pool could not be changed.\nDisplayed in the current status and history widgets.", - "Used when we want to report to the user a reason why we are\nnot currently adjusting the number of workers.\nShould specify both target_num_workers, current_num_workers and a\ndecision_message." + "Default type for the enum. Value should never be returned.", + "The TARGET_NUM_WORKERS_CHANGED type should be used when the target worker pool size has changed at the start of an actuation. An event should always be specified as TARGET_NUM_WORKERS_CHANGED if it reflects a change in the target_num_workers.", + "The CURRENT_NUM_WORKERS_CHANGED type should be used when actual worker pool size has been changed, but the target_num_workers has not changed.", + "The ACTUATION_FAILURE type should be used when we want to report an error to the user indicating why the current number of workers in the pool could not be changed. Displayed in the current status and history widgets.", + "Used when we want to report to the user a reason why we are not currently adjusting the number of workers. Should specify both target_num_workers, current_num_workers and a decision_message." ], "type": "string" }, @@ -2120,12 +2567,12 @@ "type": "string" }, "time": { - "description": "The time this event was emitted to indicate a new target or current\nnum_workers value.", + "description": "The time this event was emitted to indicate a new target or current num_workers value.", "format": "google-datetime", "type": "string" }, "workerPool": { - "description": "A short and friendly name for the worker pool this event refers to,\npopulated from the value of PoolStageRelation::user_pool_name.", + "description": "A short and friendly name for the worker pool this event refers to, populated from the value of PoolStageRelation::user_pool_name.", "type": "string" } }, @@ -2204,7 +2651,7 @@ "id": "CPUTime", "properties": { "rate": { - "description": "Average CPU utilization rate (% non-idle cpu / second) since previous\nsample.", + "description": "Average CPU utilization rate (% non-idle cpu / second) since previous sample.", "format": "double", "type": "number" }, @@ -2214,15 +2661,26 @@ "type": "string" }, "totalMs": { - "description": "Total active CPU time across all cores (ie., non-idle) in milliseconds\nsince start-up.", + "description": "Total active CPU time across all cores (ie., non-idle) in milliseconds since start-up.", "format": "uint64", "type": "string" } }, "type": "object" }, + "CommitTemplateVersionRequest": { + "description": "Commit will add a new TemplateVersion to an existing template.", + "id": "CommitTemplateVersionRequest", + "properties": { + "templateVersion": { + "$ref": "TemplateVersion", + "description": "TemplateVersion obejct to create." + } + }, + "type": "object" + }, "ComponentSource": { - "description": "Description of an interstitial value between transforms in an execution\nstage.", + "description": "Description of an interstitial value between transforms in an execution stage.", "id": "ComponentSource", "properties": { "name": { @@ -2230,7 +2688,7 @@ "type": "string" }, "originalTransformOrCollection": { - "description": "User name for the original user transform or collection with which this\nsource is most closely associated.", + "description": "User name for the original user transform or collection with which this source is most closely associated.", "type": "string" }, "userName": { @@ -2249,7 +2707,7 @@ "type": "string" }, "originalTransform": { - "description": "User name for the original user transform with which this transform is\nmost closely associated.", + "description": "User name for the original user transform with which this transform is most closely associated.", "type": "string" }, "userName": { @@ -2303,7 +2761,7 @@ "type": "object" }, "ConcatPosition": { - "description": "A position that encapsulates an inner position and an index for the inner\nposition. A ConcatPosition can be used by a reader of a source that\nencapsulates a set of other sources.", + "description": "A position that encapsulates an inner position and an index for the inner position. A ConcatPosition can be used by a reader of a source that encapsulates a set of other sources.", "id": "ConcatPosition", "properties": { "index": { @@ -2405,7 +2863,7 @@ "type": "object" }, "CounterStructuredName": { - "description": "Identifies a counter within a per-job namespace. Counters whose structured\nnames are the same get merged into a single value for the job.", + "description": "Identifies a counter within a per-job namespace. Counters whose structured names are the same get merged into a single value for the job.", "id": "CounterStructuredName", "properties": { "componentStepName": { @@ -2417,12 +2875,12 @@ "type": "string" }, "inputIndex": { - "description": "Index of an input collection that's being read from/written to as a side\ninput.\nThe index identifies a step's side inputs starting by 1 (e.g. the first\nside input has input_index 1, the third has input_index 3).\nSide inputs are identified by a pair of (original_step_name, input_index).\nThis field helps uniquely identify them.", + "description": "Index of an input collection that's being read from/written to as a side input. The index identifies a step's side inputs starting by 1 (e.g. the first side input has input_index 1, the third has input_index 3). Side inputs are identified by a pair of (original_step_name, input_index). This field helps uniquely identify them.", "format": "int32", "type": "integer" }, "name": { - "description": "Counter name. Not necessarily globally-unique, but unique within the\ncontext of the other fields.\nRequired.", + "description": "Counter name. Not necessarily globally-unique, but unique within the context of the other fields. Required.", "type": "string" }, "origin": { @@ -2442,11 +2900,11 @@ "type": "string" }, "originalRequestingStepName": { - "description": "The step name requesting an operation, such as GBK.\nI.e. the ParDo causing a read/write from shuffle to occur, or a\nread from side inputs.", + "description": "The step name requesting an operation, such as GBK. I.e. the ParDo causing a read/write from shuffle to occur, or a read from side inputs.", "type": "string" }, "originalStepName": { - "description": "System generated name of the original step in the user's graph, before\noptimization.", + "description": "System generated name of the original step in the user's graph, before optimization.", "type": "string" }, "portion": { @@ -2471,7 +2929,7 @@ "type": "object" }, "CounterStructuredNameAndMetadata": { - "description": "A single message which encapsulates structured name and metadata for a given\ncounter.", + "description": "A single message which encapsulates structured name and metadata for a given counter.", "id": "CounterStructuredNameAndMetadata", "properties": { "metadata": { @@ -2494,7 +2952,7 @@ "type": "boolean" }, "cumulative": { - "description": "True if this counter is reported as the total cumulative aggregate\nvalue accumulated since the worker started working on this WorkItem.\nBy default this is false, indicating that this counter is reported\nas a delta.", + "description": "True if this counter is reported as the total cumulative aggregate value accumulated since the worker started working on this WorkItem. By default this is false, indicating that this counter is reported as a delta.", "type": "boolean" }, "distribution": { @@ -2539,7 +2997,7 @@ "description": "Counter name and aggregation type." }, "shortId": { - "description": "The service-generated short identifier for this counter.\nThe short_id -\u003e (name, metadata) mapping is constant for the lifetime of\na job.", + "description": "The service-generated short identifier for this counter. The short_id -\u003e (name, metadata) mapping is constant for the lifetime of a job.", "format": "int64", "type": "string" }, @@ -2563,7 +3021,7 @@ "description": "The runtime environment for the job." }, "gcsPath": { - "description": "Required. A Cloud Storage path to the template from which to\ncreate the job.\nMust be a valid Cloud Storage URL, beginning with `gs://`.", + "description": "Required. A Cloud Storage path to the template from which to create the job. Must be a valid Cloud Storage URL, beginning with `gs://`.", "type": "string" }, "jobName": { @@ -2571,7 +3029,7 @@ "type": "string" }, "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to\nwhich to direct the request.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to which to direct the request.", "type": "string" }, "parameters": { @@ -2584,6 +3042,17 @@ }, "type": "object" }, + "CreateTemplateVersionRequest": { + "description": "Creates a new Template with TemplateVersions.", + "id": "CreateTemplateVersionRequest", + "properties": { + "templateVersion": { + "$ref": "TemplateVersion", + "description": "The TemplateVersion object to create." + } + }, + "type": "object" + }, "CustomSourceLocation": { "description": "Identifies the location of a custom souce.", "id": "CustomSourceLocation", @@ -2600,14 +3069,14 @@ "id": "DataDiskAssignment", "properties": { "dataDisks": { - "description": "Mounted data disks. The order is important a data disk's 0-based index in\nthis list defines which persistent directory the disk is mounted to, for\nexample the list of { \"myproject-1014-104817-4c2-harness-0-disk-0\" },\n{ \"myproject-1014-104817-4c2-harness-0-disk-1\" }.", + "description": "Mounted data disks. The order is important a data disk's 0-based index in this list defines which persistent directory the disk is mounted to, for example the list of { \"myproject-1014-104817-4c2-harness-0-disk-0\" }, { \"myproject-1014-104817-4c2-harness-0-disk-1\" }.", "items": { "type": "string" }, "type": "array" }, "vmInstance": { - "description": "VM instance name the data disks mounted to, for example\n\"myproject-1014-104817-4c2-harness-0\".", + "description": "VM instance name the data disks mounted to, for example \"myproject-1014-104817-4c2-harness-0\".", "type": "string" } }, @@ -2635,7 +3104,7 @@ "type": "object" }, "DerivedSource": { - "description": "Specification of one of the bundles produced as a result of splitting\na Source (e.g. when executing a SourceSplitRequest, or when\nsplitting an active task using WorkItemStatus.dynamic_source_split),\nrelative to the source being split.", + "description": "Specification of one of the bundles produced as a result of splitting a Source (e.g. when executing a SourceSplitRequest, or when splitting an active task using WorkItemStatus.dynamic_source_split), relative to the source being split.", "id": "DerivedSource", "properties": { "derivationMode": { @@ -2666,7 +3135,7 @@ "id": "Disk", "properties": { "diskType": { - "description": "Disk storage type, as defined by Google Compute Engine. This\nmust be a disk type appropriate to the project and zone in which\nthe workers will run. If unknown or unspecified, the service\nwill attempt to choose a reasonable default.\n\nFor example, the standard persistent disk type is a resource name\ntypically ending in \"pd-standard\". If SSD persistent disks are\navailable, the resource name typically ends with \"pd-ssd\". The\nactual valid values are defined the Google Compute Engine API,\nnot by the Cloud Dataflow API; consult the Google Compute Engine\ndocumentation for more information about determining the set of\navailable disk types for a particular project and zone.\n\nGoogle Compute Engine Disk types are local to a particular\nproject in a particular zone, and so the resource name will\ntypically look something like this:\n\ncompute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-standard", + "description": "Disk storage type, as defined by Google Compute Engine. This must be a disk type appropriate to the project and zone in which the workers will run. If unknown or unspecified, the service will attempt to choose a reasonable default. For example, the standard persistent disk type is a resource name typically ending in \"pd-standard\". If SSD persistent disks are available, the resource name typically ends with \"pd-ssd\". The actual valid values are defined the Google Compute Engine API, not by the Cloud Dataflow API; consult the Google Compute Engine documentation for more information about determining the set of available disk types for a particular project and zone. Google Compute Engine Disk types are local to a particular project in a particular zone, and so the resource name will typically look something like this: compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-standard", "type": "string" }, "mountPoint": { @@ -2674,7 +3143,7 @@ "type": "string" }, "sizeGb": { - "description": "Size of disk in GB. If zero or unspecified, the service will\nattempt to choose a reasonable default.", + "description": "Size of disk in GB. If zero or unspecified, the service will attempt to choose a reasonable default.", "format": "int32", "type": "integer" } @@ -2709,7 +3178,7 @@ "type": "string" }, "key": { - "description": "The key identifying the display data.\nThis is intended to be used as a label for the display data\nwhen viewed in a dax monitoring system.", + "description": "The key identifying the display data. This is intended to be used as a label for the display data when viewed in a dax monitoring system.", "type": "string" }, "label": { @@ -2717,11 +3186,11 @@ "type": "string" }, "namespace": { - "description": "The namespace for the key. This is usually a class name or programming\nlanguage namespace (i.e. python module) which defines the display data.\nThis allows a dax monitoring system to specially handle the data\nand perform custom rendering.", + "description": "The namespace for the key. This is usually a class name or programming language namespace (i.e. python module) which defines the display data. This allows a dax monitoring system to specially handle the data and perform custom rendering.", "type": "string" }, "shortStrValue": { - "description": "A possible additional shorter value to display.\nFor example a java_class_name_value of com.mypackage.MyDoFn\nwill be stored with MyDoFn as the short_str_value and\ncom.mypackage.MyDoFn as the java_class_name value.\nshort_str_value can be displayed and java_class_name_value\nwill be displayed as a tooltip.", + "description": "A possible additional shorter value to display. For example a java_class_name_value of com.mypackage.MyDoFn will be stored with MyDoFn as the short_str_value and com.mypackage.MyDoFn as the java_class_name value. short_str_value can be displayed and java_class_name_value will be displayed as a tooltip.", "type": "string" }, "strValue": { @@ -2762,7 +3231,7 @@ }, "sum": { "$ref": "SplitInt64", - "description": "Use an int64 since we'd prefer the added precision. If overflow is a common\nproblem we can detect it and use an additional int64 or a double." + "description": "Use an int64 since we'd prefer the added precision. If overflow is a common problem we can detect it and use an additional int64 or a double." }, "sumOfSquares": { "description": "Use a double since the sum of squares is likely to overflow int64.", @@ -2773,30 +3242,36 @@ "type": "object" }, "DynamicSourceSplit": { - "description": "When a task splits using WorkItemStatus.dynamic_source_split, this\nmessage describes the two parts of the split relative to the\ndescription of the current task's input.", + "description": "When a task splits using WorkItemStatus.dynamic_source_split, this message describes the two parts of the split relative to the description of the current task's input.", "id": "DynamicSourceSplit", "properties": { "primary": { "$ref": "DerivedSource", - "description": "Primary part (continued to be processed by worker).\nSpecified relative to the previously-current source.\nBecomes current." + "description": "Primary part (continued to be processed by worker). Specified relative to the previously-current source. Becomes current." }, "residual": { "$ref": "DerivedSource", - "description": "Residual part (returned to the pool of work).\nSpecified relative to the previously-current source." + "description": "Residual part (returned to the pool of work). Specified relative to the previously-current source." } }, "type": "object" }, + "Empty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", + "id": "Empty", + "properties": {}, + "type": "object" + }, "Environment": { "description": "Describes the environment in which a Dataflow Job runs.", "id": "Environment", "properties": { "clusterManagerApiService": { - "description": "The type of cluster manager API to use. If unknown or\nunspecified, the service will attempt to choose a reasonable\ndefault. This should be in the form of the API service name,\ne.g. \"compute.googleapis.com\".", + "description": "The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. \"compute.googleapis.com\".", "type": "string" }, "dataset": { - "description": "The dataset for the current project where various workflow\nrelated tables are stored.\n\nThe supported resource type is:\n\nGoogle BigQuery:\n bigquery.googleapis.com/{dataset}", + "description": "The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset}", "type": "string" }, "experiments": { @@ -2833,7 +3308,7 @@ "description": "Properties of the object.", "type": "any" }, - "description": "The Cloud Dataflow SDK pipeline options specified by the user. These\noptions are passed through the service and are used to recreate the\nSDK pipeline options on the worker in a language agnostic and platform\nindependent way.", + "description": "The Cloud Dataflow SDK pipeline options specified by the user. These options are passed through the service and are used to recreate the SDK pipeline options on the worker in a language agnostic and platform independent way.", "type": "object" }, "serviceAccountEmail": { @@ -2841,11 +3316,11 @@ "type": "string" }, "serviceKmsKeyName": { - "description": "If set, contains the Cloud KMS key identifier used to encrypt data\nat rest, AKA a Customer Managed Encryption Key (CMEK).\n\nFormat:\n projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY", + "description": "If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY", "type": "string" }, "tempStoragePrefix": { - "description": "The prefix of the resources the system should use for temporary\nstorage. The system will append the suffix \"/temp-{JOBNAME} to\nthis resource prefix, where {JOBNAME} is the value of the\njob_name field. The resulting bucket and object prefix is used\nas the prefix of the resources used to store temporary data\nneeded during the job execution. NOTE: This will override the\nvalue in taskrunner_settings.\nThe supported resource type is:\n\nGoogle Cloud Storage:\n\n storage.googleapis.com/{bucket}/{object}\n bucket.storage.googleapis.com/{object}", + "description": "The prefix of the resources the system should use for temporary storage. The system will append the suffix \"/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}", "type": "string" }, "userAgent": { @@ -2861,22 +3336,22 @@ "description": "Properties of the object.", "type": "any" }, - "description": "A structure describing which components and their versions of the service\nare required in order to run the job.", + "description": "A structure describing which components and their versions of the service are required in order to run the job.", "type": "object" }, "workerPools": { - "description": "The worker pools. At least one \"harness\" worker pool must be\nspecified in order for the job to have workers.", + "description": "The worker pools. At least one \"harness\" worker pool must be specified in order for the job to have workers.", "items": { "$ref": "WorkerPool" }, "type": "array" }, "workerRegion": { - "description": "The Compute Engine region\n(https://cloud.google.com/compute/docs/regions-zones/regions-zones) in\nwhich worker processing should occur, e.g. \"us-west1\". Mutually exclusive\nwith worker_zone. If neither worker_region nor worker_zone is specified,\ndefault to the control plane's region.", + "description": "The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. \"us-west1\". Mutually exclusive with worker_zone. If neither worker_region nor worker_zone is specified, default to the control plane's region.", "type": "string" }, "workerZone": { - "description": "The Compute Engine zone\n(https://cloud.google.com/compute/docs/regions-zones/regions-zones) in\nwhich worker processing should occur, e.g. \"us-west1-a\". Mutually exclusive\nwith worker_region. If neither worker_region nor worker_zone is specified,\na zone in the control plane's region is chosen based on available capacity.", + "description": "The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. \"us-west1-a\". Mutually exclusive with worker_region. If neither worker_region nor worker_zone is specified, a zone in the control plane's region is chosen based on available capacity.", "type": "string" } }, @@ -2913,17 +3388,17 @@ ], "enumDescriptions": [ "The job's run state isn't specified.", - "`JOB_STATE_STOPPED` indicates that the job has not\nyet started to run.", + "`JOB_STATE_STOPPED` indicates that the job has not yet started to run.", "`JOB_STATE_RUNNING` indicates that the job is currently running.", - "`JOB_STATE_DONE` indicates that the job has successfully completed.\nThis is a terminal job state. This state may be set by the Cloud Dataflow\nservice, as a transition from `JOB_STATE_RUNNING`. It may also be set via a\nCloud Dataflow `UpdateJob` call, if the job has not yet reached a terminal\nstate.", - "`JOB_STATE_FAILED` indicates that the job has failed. This is a\nterminal job state. This state may only be set by the Cloud Dataflow\nservice, and only as a transition from `JOB_STATE_RUNNING`.", - "`JOB_STATE_CANCELLED` indicates that the job has been explicitly\ncancelled. This is a terminal job state. This state may only be\nset via a Cloud Dataflow `UpdateJob` call, and only if the job has not\nyet reached another terminal state.", - "`JOB_STATE_UPDATED` indicates that the job was successfully updated,\nmeaning that this job was stopped and another job was started, inheriting\nstate from this one. This is a terminal job state. This state may only be\nset by the Cloud Dataflow service, and only as a transition from\n`JOB_STATE_RUNNING`.", - "`JOB_STATE_DRAINING` indicates that the job is in the process of draining.\nA draining job has stopped pulling from its input sources and is processing\nany data that remains in-flight. This state may be set via a Cloud Dataflow\n`UpdateJob` call, but only as a transition from `JOB_STATE_RUNNING`. Jobs\nthat are draining may only transition to `JOB_STATE_DRAINED`,\n`JOB_STATE_CANCELLED`, or `JOB_STATE_FAILED`.", - "`JOB_STATE_DRAINED` indicates that the job has been drained.\nA drained job terminated by stopping pulling from its input sources and\nprocessing any data that remained in-flight when draining was requested.\nThis state is a terminal state, may only be set by the Cloud Dataflow\nservice, and only as a transition from `JOB_STATE_DRAINING`.", - "`JOB_STATE_PENDING` indicates that the job has been created but is not yet\nrunning. Jobs that are pending may only transition to `JOB_STATE_RUNNING`,\nor `JOB_STATE_FAILED`.", - "`JOB_STATE_CANCELLING` indicates that the job has been explicitly cancelled\nand is in the process of stopping. Jobs that are cancelling may only\ntransition to `JOB_STATE_CANCELLED` or `JOB_STATE_FAILED`.", - "`JOB_STATE_QUEUED` indicates that the job has been created but is being\ndelayed until launch. Jobs that are queued may only transition to\n`JOB_STATE_PENDING` or `JOB_STATE_CANCELLED`." + "`JOB_STATE_DONE` indicates that the job has successfully completed. This is a terminal job state. This state may be set by the Cloud Dataflow service, as a transition from `JOB_STATE_RUNNING`. It may also be set via a Cloud Dataflow `UpdateJob` call, if the job has not yet reached a terminal state.", + "`JOB_STATE_FAILED` indicates that the job has failed. This is a terminal job state. This state may only be set by the Cloud Dataflow service, and only as a transition from `JOB_STATE_RUNNING`.", + "`JOB_STATE_CANCELLED` indicates that the job has been explicitly cancelled. This is a terminal job state. This state may only be set via a Cloud Dataflow `UpdateJob` call, and only if the job has not yet reached another terminal state.", + "`JOB_STATE_UPDATED` indicates that the job was successfully updated, meaning that this job was stopped and another job was started, inheriting state from this one. This is a terminal job state. This state may only be set by the Cloud Dataflow service, and only as a transition from `JOB_STATE_RUNNING`.", + "`JOB_STATE_DRAINING` indicates that the job is in the process of draining. A draining job has stopped pulling from its input sources and is processing any data that remains in-flight. This state may be set via a Cloud Dataflow `UpdateJob` call, but only as a transition from `JOB_STATE_RUNNING`. Jobs that are draining may only transition to `JOB_STATE_DRAINED`, `JOB_STATE_CANCELLED`, or `JOB_STATE_FAILED`.", + "`JOB_STATE_DRAINED` indicates that the job has been drained. A drained job terminated by stopping pulling from its input sources and processing any data that remained in-flight when draining was requested. This state is a terminal state, may only be set by the Cloud Dataflow service, and only as a transition from `JOB_STATE_DRAINING`.", + "`JOB_STATE_PENDING` indicates that the job has been created but is not yet running. Jobs that are pending may only transition to `JOB_STATE_RUNNING`, or `JOB_STATE_FAILED`.", + "`JOB_STATE_CANCELLING` indicates that the job has been explicitly cancelled and is in the process of stopping. Jobs that are cancelling may only transition to `JOB_STATE_CANCELLED` or `JOB_STATE_FAILED`.", + "`JOB_STATE_QUEUED` indicates that the job has been created but is being delayed until launch. Jobs that are queued may only transition to `JOB_STATE_PENDING` or `JOB_STATE_CANCELLED`." ], "type": "string" } @@ -2931,7 +3406,7 @@ "type": "object" }, "ExecutionStageSummary": { - "description": "Description of the composing transforms, names/ids, and input/outputs of a\nstage of execution. Some composing transforms and sources may have been\ngenerated by the Dataflow service during execution planning.", + "description": "Description of the composing transforms, names/ids, and input/outputs of a stage of execution. Some composing transforms and sources may have been generated by the Dataflow service during execution planning.", "id": "ExecutionStageSummary", "properties": { "componentSource": { @@ -3000,11 +3475,11 @@ "type": "object" }, "FailedLocation": { - "description": "Indicates which [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) failed\nto respond to a request for data.", + "description": "Indicates which [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) failed to respond to a request for data.", "id": "FailedLocation", "properties": { "name": { - "description": "The name of the [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\nfailed to respond.", + "description": "The name of the [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that failed to respond.", "type": "string" } }, @@ -3017,20 +3492,105 @@ "filePattern": { "description": "File Pattern used to access files by the connector.", "type": "string" - } - }, - "type": "object" - }, - "FlattenInstruction": { - "description": "An instruction that copies its inputs (zero or more) to its (single) output.", - "id": "FlattenInstruction", - "properties": { - "inputs": { - "description": "Describes the inputs to the flatten instruction.", - "items": { - "$ref": "InstructionInput" - }, - "type": "array" + } + }, + "type": "object" + }, + "FlattenInstruction": { + "description": "An instruction that copies its inputs (zero or more) to its (single) output.", + "id": "FlattenInstruction", + "properties": { + "inputs": { + "description": "Describes the inputs to the flatten instruction.", + "items": { + "$ref": "InstructionInput" + }, + "type": "array" + } + }, + "type": "object" + }, + "FlexTemplateRuntimeEnvironment": { + "description": "The environment values to be set at runtime for flex template.", + "id": "FlexTemplateRuntimeEnvironment", + "properties": { + "additionalExperiments": { + "description": "Additional experiment flags for the job.", + "items": { + "type": "string" + }, + "type": "array" + }, + "additionalUserLabels": { + "additionalProperties": { + "type": "string" + }, + "description": "Additional user labels to be specified for the job. Keys and values must follow the restrictions specified in the [labeling restrictions](https://cloud.google.com/compute/docs/labeling-resources#restrictions) page. An object containing a list of \"key\": value pairs. Example: { \"name\": \"wrench\", \"mass\": \"1kg\", \"count\": \"3\" }.", + "type": "object" + }, + "enableStreamingEngine": { + "description": "Whether to enable Streaming Engine for the job.", + "type": "boolean" + }, + "ipConfiguration": { + "description": "Configuration for VM IPs.", + "enum": [ + "WORKER_IP_UNSPECIFIED", + "WORKER_IP_PUBLIC", + "WORKER_IP_PRIVATE" + ], + "enumDescriptions": [ + "The configuration is unknown, or unspecified.", + "Workers should have public IP addresses.", + "Workers should have private IP addresses." + ], + "type": "string" + }, + "kmsKeyName": { + "description": "Name for the Cloud KMS key for the job. Key format is: projects//locations//keyRings//cryptoKeys/", + "type": "string" + }, + "machineType": { + "description": "The machine type to use for the job. Defaults to the value from the template if not specified.", + "type": "string" + }, + "maxWorkers": { + "description": "The maximum number of Google Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.", + "format": "int32", + "type": "integer" + }, + "network": { + "description": "Network to which VMs will be assigned. If empty or unspecified, the service will use the network \"default\".", + "type": "string" + }, + "numWorkers": { + "description": "The initial number of Google Compute Engine instances for the job.", + "format": "int32", + "type": "integer" + }, + "serviceAccountEmail": { + "description": "The email address of the service account to run the job as.", + "type": "string" + }, + "subnetwork": { + "description": "Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form \"https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK\" or \"regions/REGION/subnetworks/SUBNETWORK\". If the subnetwork is located in a Shared VPC network, you must use the complete URL.", + "type": "string" + }, + "tempLocation": { + "description": "The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with `gs://`.", + "type": "string" + }, + "workerRegion": { + "description": "The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. \"us-west1\". Mutually exclusive with worker_zone. If neither worker_region nor worker_zone is specified, default to the control plane's region.", + "type": "string" + }, + "workerZone": { + "description": "The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. \"us-west1-a\". Mutually exclusive with worker_region. If neither worker_region nor worker_zone is specified, a zone in the control plane's region is chosen based on available capacity. If both `worker_zone` and `zone` are set, `worker_zone` takes precedence.", + "type": "string" + }, + "zone": { + "description": "The Compute Engine [availability zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones) for launching worker instances to run your pipeline. In the future, worker_zone will take precedence.", + "type": "string" } }, "type": "object" @@ -3071,11 +3631,11 @@ "id": "GetDebugConfigRequest", "properties": { "componentId": { - "description": "The internal component id for which debug configuration is\nrequested.", + "description": "The internal component id for which debug configuration is requested.", "type": "string" }, "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains the job specified by job_id.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the job specified by job_id.", "type": "string" }, "workerId": { @@ -3102,7 +3662,7 @@ "properties": { "metadata": { "$ref": "TemplateMetadata", - "description": "The template metadata describing the template name, available\nparameters, etc." + "description": "The template metadata describing the template name, available parameters, etc." }, "runtimeMetadata": { "$ref": "RuntimeMetadata", @@ -3110,7 +3670,7 @@ }, "status": { "$ref": "Status", - "description": "The status of the get template request. Any problems with the\nrequest will be indicated in the error_details." + "description": "The status of the get template request. Any problems with the request will be indicated in the error_details." }, "templateType": { "description": "Template Type.", @@ -3130,11 +3690,11 @@ "type": "object" }, "Histogram": { - "description": "Histogram of value counts for a distribution.\n\nBuckets have an inclusive lower bound and exclusive upper bound and use\n\"1,2,5 bucketing\": The first bucket range is from [0,1) and all subsequent\nbucket boundaries are powers of ten multiplied by 1, 2, or 5. Thus, bucket\nboundaries are 0, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, ...\nNegative values are not supported.", + "description": "Histogram of value counts for a distribution. Buckets have an inclusive lower bound and exclusive upper bound and use \"1,2,5 bucketing\": The first bucket range is from [0,1) and all subsequent bucket boundaries are powers of ten multiplied by 1, 2, or 5. Thus, bucket boundaries are 0, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, ... Negative values are not supported.", "id": "Histogram", "properties": { "bucketCounts": { - "description": "Counts of values in each bucket. For efficiency, prefix and trailing\nbuckets with count = 0 are elided. Buckets can store the full range of\nvalues of an unsigned long, with ULLONG_MAX falling into the 59th bucket\nwith range [1e19, 2e19).", + "description": "Counts of values in each bucket. For efficiency, prefix and trailing buckets with count = 0 are elided. Buckets can store the full range of values of an unsigned long, with ULLONG_MAX falling into the 59th bucket with range [1e19, 2e19).", "items": { "format": "int64", "type": "string" @@ -3142,7 +3702,7 @@ "type": "array" }, "firstBucketOffset": { - "description": "Starting index of first stored bucket. The non-inclusive upper-bound of\nthe ith bucket is given by:\n pow(10,(i-first_bucket_offset)/3) * (1,2,5)[(i-first_bucket_offset)%3]", + "description": "Starting index of first stored bucket. The non-inclusive upper-bound of the ith bucket is given by: pow(10,(i-first_bucket_offset)/3) * (1,2,5)[(i-first_bucket_offset)%3]", "format": "int32", "type": "integer" } @@ -3159,7 +3719,7 @@ "type": "string" }, "systemName": { - "description": "System-defined name of the step containing this hot key.\nUnique across the workflow.", + "description": "System-defined name of the step containing this hot key. Unique across the workflow.", "type": "string" }, "userStepName": { @@ -3170,7 +3730,7 @@ "type": "object" }, "InstructionInput": { - "description": "An input of an instruction, as a reference to an output of a\nproducer instruction.", + "description": "An input of an instruction, as a reference to an output of a producer instruction.", "id": "InstructionInput", "properties": { "outputNum": { @@ -3179,7 +3739,7 @@ "type": "integer" }, "producerInstructionIndex": { - "description": "The index (origin zero) of the parallel instruction that produces\nthe output to be consumed by this input. This index is relative\nto the list of instructions in this input's instruction's\ncontaining MapTask.", + "description": "The index (origin zero) of the parallel instruction that produces the output to be consumed by this input. This index is relative to the list of instructions in this input's instruction's containing MapTask.", "format": "int32", "type": "integer" } @@ -3203,19 +3763,19 @@ "type": "string" }, "onlyCountKeyBytes": { - "description": "For system-generated byte and mean byte metrics, certain instructions\nshould only report the key size.", + "description": "For system-generated byte and mean byte metrics, certain instructions should only report the key size.", "type": "boolean" }, "onlyCountValueBytes": { - "description": "For system-generated byte and mean byte metrics, certain instructions\nshould only report the value size.", + "description": "For system-generated byte and mean byte metrics, certain instructions should only report the value size.", "type": "boolean" }, "originalName": { - "description": "System-defined name for this output in the original workflow graph.\nOutputs that do not contribute to an original instruction do not set this.", + "description": "System-defined name for this output in the original workflow graph. Outputs that do not contribute to an original instruction do not set this.", "type": "string" }, "systemName": { - "description": "System-defined name of this output.\nUnique across the workflow.", + "description": "System-defined name of this output. Unique across the workflow.", "type": "string" } }, @@ -3271,20 +3831,20 @@ "id": "Job", "properties": { "clientRequestId": { - "description": "The client's unique identifier of the job, re-used across retried attempts.\nIf this field is set, the service will ensure its uniqueness.\nThe request to create a job will fail if the service has knowledge of a\npreviously submitted job with the same client's ID and job name.\nThe caller may use this field to ensure idempotence of job\ncreation across retried attempts to create a job.\nBy default, the field is empty and, in that case, the service ignores it.", + "description": "The client's unique identifier of the job, re-used across retried attempts. If this field is set, the service will ensure its uniqueness. The request to create a job will fail if the service has knowledge of a previously submitted job with the same client's ID and job name. The caller may use this field to ensure idempotence of job creation across retried attempts to create a job. By default, the field is empty and, in that case, the service ignores it.", "type": "string" }, "createTime": { - "description": "The timestamp when the job was initially created. Immutable and set by the\nCloud Dataflow service.", + "description": "The timestamp when the job was initially created. Immutable and set by the Cloud Dataflow service.", "format": "google-datetime", "type": "string" }, "createdFromSnapshotId": { - "description": "If this is specified, the job's initial state is populated from the given\nsnapshot.", + "description": "If this is specified, the job's initial state is populated from the given snapshot.", "type": "string" }, "currentState": { - "description": "The current state of the job.\n\nJobs are created in the `JOB_STATE_STOPPED` state unless otherwise\nspecified.\n\nA job in the `JOB_STATE_RUNNING` state may asynchronously enter a\nterminal state. After a job has reached a terminal state, no\nfurther state updates may be made.\n\nThis field may be mutated by the Cloud Dataflow service;\ncallers cannot mutate it.", + "description": "The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Cloud Dataflow service; callers cannot mutate it.", "enum": [ "JOB_STATE_UNKNOWN", "JOB_STATE_STOPPED", @@ -3301,17 +3861,17 @@ ], "enumDescriptions": [ "The job's run state isn't specified.", - "`JOB_STATE_STOPPED` indicates that the job has not\nyet started to run.", + "`JOB_STATE_STOPPED` indicates that the job has not yet started to run.", "`JOB_STATE_RUNNING` indicates that the job is currently running.", - "`JOB_STATE_DONE` indicates that the job has successfully completed.\nThis is a terminal job state. This state may be set by the Cloud Dataflow\nservice, as a transition from `JOB_STATE_RUNNING`. It may also be set via a\nCloud Dataflow `UpdateJob` call, if the job has not yet reached a terminal\nstate.", - "`JOB_STATE_FAILED` indicates that the job has failed. This is a\nterminal job state. This state may only be set by the Cloud Dataflow\nservice, and only as a transition from `JOB_STATE_RUNNING`.", - "`JOB_STATE_CANCELLED` indicates that the job has been explicitly\ncancelled. This is a terminal job state. This state may only be\nset via a Cloud Dataflow `UpdateJob` call, and only if the job has not\nyet reached another terminal state.", - "`JOB_STATE_UPDATED` indicates that the job was successfully updated,\nmeaning that this job was stopped and another job was started, inheriting\nstate from this one. This is a terminal job state. This state may only be\nset by the Cloud Dataflow service, and only as a transition from\n`JOB_STATE_RUNNING`.", - "`JOB_STATE_DRAINING` indicates that the job is in the process of draining.\nA draining job has stopped pulling from its input sources and is processing\nany data that remains in-flight. This state may be set via a Cloud Dataflow\n`UpdateJob` call, but only as a transition from `JOB_STATE_RUNNING`. Jobs\nthat are draining may only transition to `JOB_STATE_DRAINED`,\n`JOB_STATE_CANCELLED`, or `JOB_STATE_FAILED`.", - "`JOB_STATE_DRAINED` indicates that the job has been drained.\nA drained job terminated by stopping pulling from its input sources and\nprocessing any data that remained in-flight when draining was requested.\nThis state is a terminal state, may only be set by the Cloud Dataflow\nservice, and only as a transition from `JOB_STATE_DRAINING`.", - "`JOB_STATE_PENDING` indicates that the job has been created but is not yet\nrunning. Jobs that are pending may only transition to `JOB_STATE_RUNNING`,\nor `JOB_STATE_FAILED`.", - "`JOB_STATE_CANCELLING` indicates that the job has been explicitly cancelled\nand is in the process of stopping. Jobs that are cancelling may only\ntransition to `JOB_STATE_CANCELLED` or `JOB_STATE_FAILED`.", - "`JOB_STATE_QUEUED` indicates that the job has been created but is being\ndelayed until launch. Jobs that are queued may only transition to\n`JOB_STATE_PENDING` or `JOB_STATE_CANCELLED`." + "`JOB_STATE_DONE` indicates that the job has successfully completed. This is a terminal job state. This state may be set by the Cloud Dataflow service, as a transition from `JOB_STATE_RUNNING`. It may also be set via a Cloud Dataflow `UpdateJob` call, if the job has not yet reached a terminal state.", + "`JOB_STATE_FAILED` indicates that the job has failed. This is a terminal job state. This state may only be set by the Cloud Dataflow service, and only as a transition from `JOB_STATE_RUNNING`.", + "`JOB_STATE_CANCELLED` indicates that the job has been explicitly cancelled. This is a terminal job state. This state may only be set via a Cloud Dataflow `UpdateJob` call, and only if the job has not yet reached another terminal state.", + "`JOB_STATE_UPDATED` indicates that the job was successfully updated, meaning that this job was stopped and another job was started, inheriting state from this one. This is a terminal job state. This state may only be set by the Cloud Dataflow service, and only as a transition from `JOB_STATE_RUNNING`.", + "`JOB_STATE_DRAINING` indicates that the job is in the process of draining. A draining job has stopped pulling from its input sources and is processing any data that remains in-flight. This state may be set via a Cloud Dataflow `UpdateJob` call, but only as a transition from `JOB_STATE_RUNNING`. Jobs that are draining may only transition to `JOB_STATE_DRAINED`, `JOB_STATE_CANCELLED`, or `JOB_STATE_FAILED`.", + "`JOB_STATE_DRAINED` indicates that the job has been drained. A drained job terminated by stopping pulling from its input sources and processing any data that remained in-flight when draining was requested. This state is a terminal state, may only be set by the Cloud Dataflow service, and only as a transition from `JOB_STATE_DRAINING`.", + "`JOB_STATE_PENDING` indicates that the job has been created but is not yet running. Jobs that are pending may only transition to `JOB_STATE_RUNNING`, or `JOB_STATE_FAILED`.", + "`JOB_STATE_CANCELLING` indicates that the job has been explicitly cancelled and is in the process of stopping. Jobs that are cancelling may only transition to `JOB_STATE_CANCELLED` or `JOB_STATE_FAILED`.", + "`JOB_STATE_QUEUED` indicates that the job has been created but is being delayed until launch. Jobs that are queued may only transition to `JOB_STATE_PENDING` or `JOB_STATE_CANCELLED`." ], "type": "string" }, @@ -3329,46 +3889,46 @@ "description": "Deprecated." }, "id": { - "description": "The unique ID of this job.\n\nThis field is set by the Cloud Dataflow service when the Job is\ncreated, and is immutable for the life of the job.", + "description": "The unique ID of this job. This field is set by the Cloud Dataflow service when the Job is created, and is immutable for the life of the job.", "type": "string" }, "jobMetadata": { "$ref": "JobMetadata", - "description": "This field is populated by the Dataflow service to support filtering jobs\nby the metadata values provided here. Populated for ListJobs and all GetJob\nviews SUMMARY and higher." + "description": "This field is populated by the Dataflow service to support filtering jobs by the metadata values provided here. Populated for ListJobs and all GetJob views SUMMARY and higher." }, "labels": { "additionalProperties": { "type": "string" }, - "description": "User-defined labels for this job.\n\nThe labels map can contain no more than 64 entries. Entries of the labels\nmap are UTF8 strings that comply with the following restrictions:\n\n* Keys must conform to regexp: \\p{Ll}\\p{Lo}{0,62}\n* Values must conform to regexp: [\\p{Ll}\\p{Lo}\\p{N}_-]{0,63}\n* Both keys and values are additionally constrained to be \u003c= 128 bytes in\nsize.", + "description": "User-defined labels for this job. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \\p{Ll}\\p{Lo}{0,62} * Values must conform to regexp: [\\p{Ll}\\p{Lo}\\p{N}_-]{0,63} * Both keys and values are additionally constrained to be \u003c= 128 bytes in size.", "type": "object" }, "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains this job.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job.", "type": "string" }, "name": { - "description": "The user-specified Cloud Dataflow job name.\n\nOnly one Job with a given name may exist in a project at any\ngiven time. If a caller attempts to create a Job with the same\nname as an already-existing Job, the attempt returns the\nexisting Job.\n\nThe name must match the regular expression\n`[a-z]([-a-z0-9]{0,38}[a-z0-9])?`", + "description": "The user-specified Cloud Dataflow job name. Only one Job with a given name may exist in a project at any given time. If a caller attempts to create a Job with the same name as an already-existing Job, the attempt returns the existing Job. The name must match the regular expression `[a-z]([-a-z0-9]{0,38}[a-z0-9])?`", "type": "string" }, "pipelineDescription": { "$ref": "PipelineDescription", - "description": "Preliminary field: The format of this data may change at any time.\nA description of the user pipeline and stages through which it is executed.\nCreated by Cloud Dataflow service. Only retrieved with\nJOB_VIEW_DESCRIPTION or JOB_VIEW_ALL." + "description": "Preliminary field: The format of this data may change at any time. A description of the user pipeline and stages through which it is executed. Created by Cloud Dataflow service. Only retrieved with JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL." }, "projectId": { "description": "The ID of the Cloud Platform project that the job belongs to.", "type": "string" }, "replaceJobId": { - "description": "If this job is an update of an existing job, this field is the job ID\nof the job it replaced.\n\nWhen sending a `CreateJobRequest`, you can update a job by specifying it\nhere. The job named here is stopped, and its intermediate state is\ntransferred to this job.", + "description": "If this job is an update of an existing job, this field is the job ID of the job it replaced. When sending a `CreateJobRequest`, you can update a job by specifying it here. The job named here is stopped, and its intermediate state is transferred to this job.", "type": "string" }, "replacedByJobId": { - "description": "If another job is an update of this job (and thus, this job is in\n`JOB_STATE_UPDATED`), this field contains the ID of that job.", + "description": "If another job is an update of this job (and thus, this job is in `JOB_STATE_UPDATED`), this field contains the ID of that job.", "type": "string" }, "requestedState": { - "description": "The job's requested state.\n\n`UpdateJob` may be used to switch between the `JOB_STATE_STOPPED` and\n`JOB_STATE_RUNNING` states, by setting requested_state. `UpdateJob` may\nalso be used to directly set a job's requested state to\n`JOB_STATE_CANCELLED` or `JOB_STATE_DONE`, irrevocably terminating the\njob if it has not already reached a terminal state.", + "description": "The job's requested state. `UpdateJob` may be used to switch between the `JOB_STATE_STOPPED` and `JOB_STATE_RUNNING` states, by setting requested_state. `UpdateJob` may also be used to directly set a job's requested state to `JOB_STATE_CANCELLED` or `JOB_STATE_DONE`, irrevocably terminating the job if it has not already reached a terminal state.", "enum": [ "JOB_STATE_UNKNOWN", "JOB_STATE_STOPPED", @@ -3385,34 +3945,34 @@ ], "enumDescriptions": [ "The job's run state isn't specified.", - "`JOB_STATE_STOPPED` indicates that the job has not\nyet started to run.", + "`JOB_STATE_STOPPED` indicates that the job has not yet started to run.", "`JOB_STATE_RUNNING` indicates that the job is currently running.", - "`JOB_STATE_DONE` indicates that the job has successfully completed.\nThis is a terminal job state. This state may be set by the Cloud Dataflow\nservice, as a transition from `JOB_STATE_RUNNING`. It may also be set via a\nCloud Dataflow `UpdateJob` call, if the job has not yet reached a terminal\nstate.", - "`JOB_STATE_FAILED` indicates that the job has failed. This is a\nterminal job state. This state may only be set by the Cloud Dataflow\nservice, and only as a transition from `JOB_STATE_RUNNING`.", - "`JOB_STATE_CANCELLED` indicates that the job has been explicitly\ncancelled. This is a terminal job state. This state may only be\nset via a Cloud Dataflow `UpdateJob` call, and only if the job has not\nyet reached another terminal state.", - "`JOB_STATE_UPDATED` indicates that the job was successfully updated,\nmeaning that this job was stopped and another job was started, inheriting\nstate from this one. This is a terminal job state. This state may only be\nset by the Cloud Dataflow service, and only as a transition from\n`JOB_STATE_RUNNING`.", - "`JOB_STATE_DRAINING` indicates that the job is in the process of draining.\nA draining job has stopped pulling from its input sources and is processing\nany data that remains in-flight. This state may be set via a Cloud Dataflow\n`UpdateJob` call, but only as a transition from `JOB_STATE_RUNNING`. Jobs\nthat are draining may only transition to `JOB_STATE_DRAINED`,\n`JOB_STATE_CANCELLED`, or `JOB_STATE_FAILED`.", - "`JOB_STATE_DRAINED` indicates that the job has been drained.\nA drained job terminated by stopping pulling from its input sources and\nprocessing any data that remained in-flight when draining was requested.\nThis state is a terminal state, may only be set by the Cloud Dataflow\nservice, and only as a transition from `JOB_STATE_DRAINING`.", - "`JOB_STATE_PENDING` indicates that the job has been created but is not yet\nrunning. Jobs that are pending may only transition to `JOB_STATE_RUNNING`,\nor `JOB_STATE_FAILED`.", - "`JOB_STATE_CANCELLING` indicates that the job has been explicitly cancelled\nand is in the process of stopping. Jobs that are cancelling may only\ntransition to `JOB_STATE_CANCELLED` or `JOB_STATE_FAILED`.", - "`JOB_STATE_QUEUED` indicates that the job has been created but is being\ndelayed until launch. Jobs that are queued may only transition to\n`JOB_STATE_PENDING` or `JOB_STATE_CANCELLED`." + "`JOB_STATE_DONE` indicates that the job has successfully completed. This is a terminal job state. This state may be set by the Cloud Dataflow service, as a transition from `JOB_STATE_RUNNING`. It may also be set via a Cloud Dataflow `UpdateJob` call, if the job has not yet reached a terminal state.", + "`JOB_STATE_FAILED` indicates that the job has failed. This is a terminal job state. This state may only be set by the Cloud Dataflow service, and only as a transition from `JOB_STATE_RUNNING`.", + "`JOB_STATE_CANCELLED` indicates that the job has been explicitly cancelled. This is a terminal job state. This state may only be set via a Cloud Dataflow `UpdateJob` call, and only if the job has not yet reached another terminal state.", + "`JOB_STATE_UPDATED` indicates that the job was successfully updated, meaning that this job was stopped and another job was started, inheriting state from this one. This is a terminal job state. This state may only be set by the Cloud Dataflow service, and only as a transition from `JOB_STATE_RUNNING`.", + "`JOB_STATE_DRAINING` indicates that the job is in the process of draining. A draining job has stopped pulling from its input sources and is processing any data that remains in-flight. This state may be set via a Cloud Dataflow `UpdateJob` call, but only as a transition from `JOB_STATE_RUNNING`. Jobs that are draining may only transition to `JOB_STATE_DRAINED`, `JOB_STATE_CANCELLED`, or `JOB_STATE_FAILED`.", + "`JOB_STATE_DRAINED` indicates that the job has been drained. A drained job terminated by stopping pulling from its input sources and processing any data that remained in-flight when draining was requested. This state is a terminal state, may only be set by the Cloud Dataflow service, and only as a transition from `JOB_STATE_DRAINING`.", + "`JOB_STATE_PENDING` indicates that the job has been created but is not yet running. Jobs that are pending may only transition to `JOB_STATE_RUNNING`, or `JOB_STATE_FAILED`.", + "`JOB_STATE_CANCELLING` indicates that the job has been explicitly cancelled and is in the process of stopping. Jobs that are cancelling may only transition to `JOB_STATE_CANCELLED` or `JOB_STATE_FAILED`.", + "`JOB_STATE_QUEUED` indicates that the job has been created but is being delayed until launch. Jobs that are queued may only transition to `JOB_STATE_PENDING` or `JOB_STATE_CANCELLED`." ], "type": "string" }, "stageStates": { - "description": "This field may be mutated by the Cloud Dataflow service;\ncallers cannot mutate it.", + "description": "This field may be mutated by the Cloud Dataflow service; callers cannot mutate it.", "items": { "$ref": "ExecutionStageState" }, "type": "array" }, "startTime": { - "description": "The timestamp when the job was started (transitioned to JOB_STATE_PENDING).\nFlexible resource scheduling jobs are started with some delay after job\ncreation, so start_time is unset before start and is updated when the\njob is started by the Cloud Dataflow service. For other jobs, start_time\nalways equals to create_time and is immutable and set by the Cloud Dataflow\nservice.", + "description": "The timestamp when the job was started (transitioned to JOB_STATE_PENDING). Flexible resource scheduling jobs are started with some delay after job creation, so start_time is unset before start and is updated when the job is started by the Cloud Dataflow service. For other jobs, start_time always equals to create_time and is immutable and set by the Cloud Dataflow service.", "format": "google-datetime", "type": "string" }, "steps": { - "description": "Exactly one of step or steps_location should be specified.\n\nThe top-level steps that constitute the entire job.", + "description": "Exactly one of step or steps_location should be specified. The top-level steps that constitute the entire job. Only retrieved with JOB_VIEW_ALL.", "items": { "$ref": "Step" }, @@ -3423,7 +3983,7 @@ "type": "string" }, "tempFiles": { - "description": "A set of files the system should be aware of that are used\nfor temporary storage. These temporary files will be\nremoved on job completion.\nNo duplicates are allowed.\nNo file patterns are supported.\n\nThe supported files are:\n\nGoogle Cloud Storage:\n\n storage.googleapis.com/{bucket}/{object}\n bucket.storage.googleapis.com/{object}", + "description": "A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}", "items": { "type": "string" }, @@ -3433,7 +3993,7 @@ "additionalProperties": { "type": "string" }, - "description": "The map of transform name prefixes of the job to be replaced to the\ncorresponding name prefixes of the new job.", + "description": "The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.", "type": "object" }, "type": { @@ -3445,16 +4005,34 @@ ], "enumDescriptions": [ "The type of the job is unspecified, or unknown.", - "A batch job with a well-defined end point: data is read, data is\nprocessed, data is written, and the job is done.", - "A continuously streaming job with no end: data is read,\nprocessed, and written continuously." + "A batch job with a well-defined end point: data is read, data is processed, data is written, and the job is done.", + "A continuously streaming job with no end: data is read, processed, and written continuously." ], "type": "string" } }, "type": "object" }, + "JobExecutionDetails": { + "description": "Information about the execution of a job.", + "id": "JobExecutionDetails", + "properties": { + "nextPageToken": { + "description": "If present, this response does not contain all requested tasks. To obtain the next page of results, repeat the request with page_token set to this value.", + "type": "string" + }, + "stages": { + "description": "The stages of the job execution.", + "items": { + "$ref": "StageSummary" + }, + "type": "array" + } + }, + "type": "object" + }, "JobExecutionInfo": { - "description": "Additional information about how a Cloud Dataflow job will be executed that\nisn't contained in the submitted job.", + "description": "Additional information about how a Cloud Dataflow job will be executed that isn't contained in the submitted job.", "id": "JobExecutionInfo", "properties": { "stages": { @@ -3468,11 +4046,11 @@ "type": "object" }, "JobExecutionStageInfo": { - "description": "Contains information about how a particular\ngoogle.dataflow.v1beta3.Step will be executed.", + "description": "Contains information about how a particular google.dataflow.v1beta3.Step will be executed.", "id": "JobExecutionStageInfo", "properties": { "stepName": { - "description": "The steps associated with the execution stage.\nNote that stages may have several steps, and that a given step\nmight be run by more than one stage.", + "description": "The steps associated with the execution stage. Note that stages may have several steps, and that a given step might be run by more than one stage.", "items": { "type": "string" }, @@ -3501,11 +4079,11 @@ ], "enumDescriptions": [ "The message importance isn't specified, or is unknown.", - "The message is at the 'debug' level: typically only useful for\nsoftware engineers working on the code the job is running.\nTypically, Dataflow pipeline runners do not display log messages\nat this level by default.", - "The message is at the 'detailed' level: somewhat verbose, but\npotentially useful to users. Typically, Dataflow pipeline\nrunners do not display log messages at this level by default.\nThese messages are displayed by default in the Dataflow\nmonitoring UI.", - "The message is at the 'basic' level: useful for keeping\ntrack of the execution of a Dataflow pipeline. Typically,\nDataflow pipeline runners display log messages at this level by\ndefault, and these messages are displayed by default in the\nDataflow monitoring UI.", - "The message is at the 'warning' level: indicating a condition\npertaining to a job which may require human intervention.\nTypically, Dataflow pipeline runners display log messages at this\nlevel by default, and these messages are displayed by default in\nthe Dataflow monitoring UI.", - "The message is at the 'error' level: indicating a condition\npreventing a job from succeeding. Typically, Dataflow pipeline\nrunners display log messages at this level by default, and these\nmessages are displayed by default in the Dataflow monitoring UI." + "The message is at the 'debug' level: typically only useful for software engineers working on the code the job is running. Typically, Dataflow pipeline runners do not display log messages at this level by default.", + "The message is at the 'detailed' level: somewhat verbose, but potentially useful to users. Typically, Dataflow pipeline runners do not display log messages at this level by default. These messages are displayed by default in the Dataflow monitoring UI.", + "The message is at the 'basic' level: useful for keeping track of the execution of a Dataflow pipeline. Typically, Dataflow pipeline runners display log messages at this level by default, and these messages are displayed by default in the Dataflow monitoring UI.", + "The message is at the 'warning' level: indicating a condition pertaining to a job which may require human intervention. Typically, Dataflow pipeline runners display log messages at this level by default, and these messages are displayed by default in the Dataflow monitoring UI.", + "The message is at the 'error' level: indicating a condition preventing a job from succeeding. Typically, Dataflow pipeline runners display log messages at this level by default, and these messages are displayed by default in the Dataflow monitoring UI." ], "type": "string" }, @@ -3522,7 +4100,7 @@ "type": "object" }, "JobMetadata": { - "description": "Metadata available primarily for filtering jobs. Will be included in the\nListJob response and Job SUMMARY view.", + "description": "Metadata available primarily for filtering jobs. Will be included in the ListJob response and Job SUMMARY view.", "id": "JobMetadata", "properties": { "bigTableDetails": { @@ -3575,7 +4153,7 @@ "type": "object" }, "JobMetrics": { - "description": "JobMetrics contains a collection of metrics describing the detailed progress\nof a Dataflow job. Metrics correspond to user-defined and system-defined\nmetrics in the job.\n\nThis resource captures only the most recent values of each metric;\ntime-series data can be queried for them (under the same metric names)\nfrom Cloud Monitoring.", + "description": "JobMetrics contains a collection of metrics describing the detailed progress of a Dataflow job. Metrics correspond to user-defined and system-defined metrics in the job. This resource captures only the most recent values of each metric; time-series data can be queried for them (under the same metric names) from Cloud Monitoring.", "id": "JobMetrics", "properties": { "metricTime": { @@ -3594,11 +4172,11 @@ "type": "object" }, "KeyRangeDataDiskAssignment": { - "description": "Data disk assignment information for a specific key-range of a sharded\ncomputation.\nCurrently we only support UTF-8 character splits to simplify encoding into\nJSON.", + "description": "Data disk assignment information for a specific key-range of a sharded computation. Currently we only support UTF-8 character splits to simplify encoding into JSON.", "id": "KeyRangeDataDiskAssignment", "properties": { "dataDisk": { - "description": "The name of the data disk where data for this range is stored.\nThis name is local to the Google Cloud Platform project and uniquely\nidentifies the disk within that project, for example\n\"myproject-1014-104817-4c2-harness-0-disk-1\".", + "description": "The name of the data disk where data for this range is stored. This name is local to the Google Cloud Platform project and uniquely identifies the disk within that project, for example \"myproject-1014-104817-4c2-harness-0-disk-1\".", "type": "string" }, "end": { @@ -3613,19 +4191,19 @@ "type": "object" }, "KeyRangeLocation": { - "description": "Location information for a specific key-range of a sharded computation.\nCurrently we only support UTF-8 character splits to simplify encoding into\nJSON.", + "description": "Location information for a specific key-range of a sharded computation. Currently we only support UTF-8 character splits to simplify encoding into JSON.", "id": "KeyRangeLocation", "properties": { "dataDisk": { - "description": "The name of the data disk where data for this range is stored.\nThis name is local to the Google Cloud Platform project and uniquely\nidentifies the disk within that project, for example\n\"myproject-1014-104817-4c2-harness-0-disk-1\".", + "description": "The name of the data disk where data for this range is stored. This name is local to the Google Cloud Platform project and uniquely identifies the disk within that project, for example \"myproject-1014-104817-4c2-harness-0-disk-1\".", "type": "string" }, "deliveryEndpoint": { - "description": "The physical location of this range assignment to be used for\nstreaming computation cross-worker message delivery.", + "description": "The physical location of this range assignment to be used for streaming computation cross-worker message delivery.", "type": "string" }, "deprecatedPersistentDirectory": { - "description": "DEPRECATED. The location of the persistent state for this range, as a\npersistent directory in the worker local filesystem.", + "description": "DEPRECATED. The location of the persistent state for this range, as a persistent directory in the worker local filesystem.", "type": "string" }, "end": { @@ -3651,15 +4229,26 @@ "description": "Gcs path to a file with json serialized ContainerSpec as content.", "type": "string" }, + "environment": { + "$ref": "FlexTemplateRuntimeEnvironment", + "description": "The runtime environment for the FlexTemplate job" + }, "jobName": { "description": "Required. The job name to use for the created job.", "type": "string" }, + "launchOptions": { + "additionalProperties": { + "type": "string" + }, + "description": "Launch options for this flex template job. This is a common set of options across languages and templates. This should not be used to pass job parameters.", + "type": "object" + }, "parameters": { "additionalProperties": { "type": "string" }, - "description": "The parameters for FlexTemplate.\nEx. {\"num_workers\":\"5\"}", + "description": "The parameters for FlexTemplate. Ex. {\"num_workers\":\"5\"}", "type": "object" } }, @@ -3674,7 +4263,7 @@ "description": "Required. Parameter to launch a job form Flex Template." }, "validateOnly": { - "description": "If true, the request is validated but not actually executed.\nDefaults to false.", + "description": "If true, the request is validated but not actually executed. Defaults to false.", "type": "boolean" } }, @@ -3686,7 +4275,7 @@ "properties": { "job": { "$ref": "Job", - "description": "The job that was launched, if the request was not a dry run and\nthe job was successfully launched." + "description": "The job that was launched, if the request was not a dry run and the job was successfully launched." } }, "type": "object" @@ -3714,11 +4303,11 @@ "additionalProperties": { "type": "string" }, - "description": "Only applicable when updating a pipeline. Map of transform name prefixes of\nthe job to be replaced to the corresponding name prefixes of the new job.", + "description": "Only applicable when updating a pipeline. Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.", "type": "object" }, "update": { - "description": "If set, replace the existing pipeline with the name specified by jobName\nwith this pipeline, preserving state.", + "description": "If set, replace the existing pipeline with the name specified by jobName with this pipeline, preserving state.", "type": "boolean" } }, @@ -3730,7 +4319,7 @@ "properties": { "job": { "$ref": "Job", - "description": "The job that was launched, if the request was not a dry run and\nthe job was successfully launched." + "description": "The job that was launched, if the request was not a dry run and the job was successfully launched." } }, "type": "object" @@ -3745,7 +4334,7 @@ "type": "string" }, "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains the WorkItem's job.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the WorkItem's job.", "type": "string" }, "requestedLeaseDuration": { @@ -3769,14 +4358,14 @@ "type": "array" }, "workerCapabilities": { - "description": "Worker capabilities. WorkItems might be limited to workers with specific\ncapabilities.", + "description": "Worker capabilities. WorkItems might be limited to workers with specific capabilities.", "items": { "type": "string" }, "type": "array" }, "workerId": { - "description": "Identifies the worker leasing work -- typically the ID of the\nvirtual machine running the worker.", + "description": "Identifies the worker leasing work -- typically the ID of the virtual machine running the worker.", "type": "string" } }, @@ -3830,11 +4419,11 @@ "type": "object" }, "ListJobsResponse": { - "description": "Response to a request to list Cloud Dataflow jobs in a project. This might\nbe a partial response, depending on the page size in the ListJobsRequest.\nHowever, if the project does not have any jobs, an instance of\nListJobsResponse is not returned and the requests's response\nbody is empty {}.", + "description": "Response to a request to list Cloud Dataflow jobs in a project. This might be a partial response, depending on the page size in the ListJobsRequest. However, if the project does not have any jobs, an instance of ListJobsResponse is not returned and the requests's response body is empty {}.", "id": "ListJobsResponse", "properties": { "failedLocation": { - "description": "Zero or more messages describing the [regional endpoints]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\nfailed to respond.", + "description": "Zero or more messages describing the [regional endpoints] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that failed to respond.", "items": { "$ref": "FailedLocation" }, @@ -3868,12 +4457,30 @@ }, "type": "object" }, + "ListTemplateVersionsResponse": { + "description": "Respond a list of TemplateVersions.", + "id": "ListTemplateVersionsResponse", + "properties": { + "nextPageToken": { + "description": "A token that can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.", + "type": "string" + }, + "templateVersions": { + "description": "A list of TemplateVersions.", + "items": { + "$ref": "TemplateVersion" + }, + "type": "array" + } + }, + "type": "object" + }, "MapTask": { - "description": "MapTask consists of an ordered set of instructions, each of which\ndescribes one particular low-level operation for the worker to\nperform in order to accomplish the MapTask's WorkItem.\n\nEach instruction must appear in the list before any instructions which\ndepends on its output.", + "description": "MapTask consists of an ordered set of instructions, each of which describes one particular low-level operation for the worker to perform in order to accomplish the MapTask's WorkItem. Each instruction must appear in the list before any instructions which depends on its output.", "id": "MapTask", "properties": { "counterPrefix": { - "description": "Counter prefix that can be used to prefix counters. Not currently used in\nDataflow.", + "description": "Counter prefix that can be used to prefix counters. Not currently used in Dataflow.", "type": "string" }, "instructions": { @@ -3884,18 +4491,18 @@ "type": "array" }, "stageName": { - "description": "System-defined name of the stage containing this MapTask.\nUnique across the workflow.", + "description": "System-defined name of the stage containing this MapTask. Unique across the workflow.", "type": "string" }, "systemName": { - "description": "System-defined name of this MapTask.\nUnique across the workflow.", + "description": "System-defined name of this MapTask. Unique across the workflow.", "type": "string" } }, "type": "object" }, "MemInfo": { - "description": "Information about the memory usage of a worker or a container within a\nworker.", + "description": "Information about the memory usage of a worker or a container within a worker.", "id": "MemInfo", "properties": { "currentLimitBytes": { @@ -3922,11 +4529,11 @@ "type": "object" }, "MetricShortId": { - "description": "The metric short id is returned to the user alongside an offset into\nReportWorkItemStatusRequest", + "description": "The metric short id is returned to the user alongside an offset into ReportWorkItemStatusRequest", "id": "MetricShortId", "properties": { "metricIndex": { - "description": "The index of the corresponding metric in\nthe ReportWorkItemStatusRequest. Required.", + "description": "The index of the corresponding metric in the ReportWorkItemStatusRequest. Required.", "format": "int32", "type": "integer" }, @@ -3939,14 +4546,14 @@ "type": "object" }, "MetricStructuredName": { - "description": "Identifies a metric, by describing the source which generated the\nmetric.", + "description": "Identifies a metric, by describing the source which generated the metric.", "id": "MetricStructuredName", "properties": { "context": { "additionalProperties": { "type": "string" }, - "description": "Zero or more labeled fields which identify the part of the job this\nmetric is associated with, such as the name of a step or collection.\n\nFor example, built-in counters associated with steps will have\ncontext['step'] = \u003cstep-name\u003e. Counters associated with PCollections\nin the SDK will have context['pcollection'] = \u003cpcollection-name\u003e.", + "description": "Zero or more labeled fields which identify the part of the job this metric is associated with, such as the name of a step or collection. For example, built-in counters associated with steps will have context['step'] = . Counters associated with PCollections in the SDK will have context['pcollection'] = .", "type": "object" }, "name": { @@ -3954,7 +4561,7 @@ "type": "string" }, "origin": { - "description": "Origin (namespace) of metric name. May be blank for user-define metrics;\nwill be \"dataflow\" for metrics defined by the Dataflow service or SDK.", + "description": "Origin (namespace) of metric name. May be blank for user-define metrics; will be \"dataflow\" for metrics defined by the Dataflow service or SDK.", "type": "string" } }, @@ -3965,7 +4572,7 @@ "id": "MetricUpdate", "properties": { "cumulative": { - "description": "True if this metric is reported as the total cumulative aggregate\nvalue accumulated since the worker started working on this WorkItem.\nBy default this is false, indicating that this metric is reported\nas a delta that is not associated with any WorkItem.", + "description": "True if this metric is reported as the total cumulative aggregate value accumulated since the worker started working on this WorkItem. By default this is false, indicating that this metric is reported as a delta that is not associated with any WorkItem.", "type": "boolean" }, "distribution": { @@ -3973,23 +4580,23 @@ "type": "any" }, "gauge": { - "description": "A struct value describing properties of a Gauge.\nMetrics of gauge type show the value of a metric across time, and is\naggregated based on the newest value.", + "description": "A struct value describing properties of a Gauge. Metrics of gauge type show the value of a metric across time, and is aggregated based on the newest value.", "type": "any" }, "internal": { - "description": "Worker-computed aggregate value for internal use by the Dataflow\nservice.", + "description": "Worker-computed aggregate value for internal use by the Dataflow service.", "type": "any" }, "kind": { - "description": "Metric aggregation kind. The possible metric aggregation kinds are\n\"Sum\", \"Max\", \"Min\", \"Mean\", \"Set\", \"And\", \"Or\", and \"Distribution\".\nThe specified aggregation kind is case-insensitive.\n\nIf omitted, this is not an aggregated value but instead\na single metric sample value.", + "description": "Metric aggregation kind. The possible metric aggregation kinds are \"Sum\", \"Max\", \"Min\", \"Mean\", \"Set\", \"And\", \"Or\", and \"Distribution\". The specified aggregation kind is case-insensitive. If omitted, this is not an aggregated value but instead a single metric sample value.", "type": "string" }, "meanCount": { - "description": "Worker-computed aggregate value for the \"Mean\" aggregation kind.\nThis holds the count of the aggregated values and is used in combination\nwith mean_sum above to obtain the actual mean aggregate value.\nThe only possible value type is Long.", + "description": "Worker-computed aggregate value for the \"Mean\" aggregation kind. This holds the count of the aggregated values and is used in combination with mean_sum above to obtain the actual mean aggregate value. The only possible value type is Long.", "type": "any" }, "meanSum": { - "description": "Worker-computed aggregate value for the \"Mean\" aggregation kind.\nThis holds the sum of the aggregated values and is used in combination\nwith mean_count below to obtain the actual mean aggregate value.\nThe only possible value types are Long and Double.", + "description": "Worker-computed aggregate value for the \"Mean\" aggregation kind. This holds the sum of the aggregated values and is used in combination with mean_count below to obtain the actual mean aggregate value. The only possible value types are Long and Double.", "type": "any" }, "name": { @@ -3997,27 +4604,99 @@ "description": "Name of the metric." }, "scalar": { - "description": "Worker-computed aggregate value for aggregation kinds \"Sum\", \"Max\", \"Min\",\n\"And\", and \"Or\". The possible value types are Long, Double, and Boolean.", + "description": "Worker-computed aggregate value for aggregation kinds \"Sum\", \"Max\", \"Min\", \"And\", and \"Or\". The possible value types are Long, Double, and Boolean.", "type": "any" }, "set": { - "description": "Worker-computed aggregate value for the \"Set\" aggregation kind. The only\npossible value type is a list of Values whose type can be Long, Double,\nor String, according to the metric's type. All Values in the list must\nbe of the same type.", + "description": "Worker-computed aggregate value for the \"Set\" aggregation kind. The only possible value type is a list of Values whose type can be Long, Double, or String, according to the metric's type. All Values in the list must be of the same type.", "type": "any" }, "updateTime": { - "description": "Timestamp associated with the metric value. Optional when workers are\nreporting work progress; it will be filled in responses from the\nmetrics API.", + "description": "Timestamp associated with the metric value. Optional when workers are reporting work progress; it will be filled in responses from the metrics API.", "format": "google-datetime", "type": "string" } }, "type": "object" }, + "ModifyTemplateVersionLabelRequest": { + "description": "Either add the label to TemplateVersion or remove it from the TemplateVersion.", + "id": "ModifyTemplateVersionLabelRequest", + "properties": { + "key": { + "description": "The label key for update.", + "type": "string" + }, + "op": { + "description": "Requests for add label to TemplateVersion or remove label from TemplateVersion.", + "enum": [ + "OPERATION_UNSPECIFIED", + "ADD", + "REMOVE" + ], + "enumDescriptions": [ + "Default value.", + "Add the label to the TemplateVersion object.", + "Remove the label from the TemplateVersion object." + ], + "type": "string" + }, + "value": { + "description": "The label value for update.", + "type": "string" + } + }, + "type": "object" + }, + "ModifyTemplateVersionLabelResponse": { + "description": "Respond the labels in the TemplateVersion.", + "id": "ModifyTemplateVersionLabelResponse", + "properties": { + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "All the label in the TemplateVersion.", + "type": "object" + } + }, + "type": "object" + }, + "ModifyTemplateVersionTagRequest": { + "description": "Add a tag to the current TemplateVersion. If tag exist in another TemplateVersion in the Template, remove the tag before add it to the current TemplateVersion. If remove_only set, remove the tag from the current TemplateVersion.", + "id": "ModifyTemplateVersionTagRequest", + "properties": { + "removeOnly": { + "description": "The flag that indicates if the request is only for remove tag from TemplateVersion.", + "type": "boolean" + }, + "tag": { + "description": "The tag for update.", + "type": "string" + } + }, + "type": "object" + }, + "ModifyTemplateVersionTagResponse": { + "description": "Respond the current tags in the TemplateVersion.", + "id": "ModifyTemplateVersionTagResponse", + "properties": { + "tags": { + "description": "All the tags in the TemplateVersion.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "MountedDataDisk": { "description": "Describes mounted data disk.", "id": "MountedDataDisk", "properties": { "dataDisk": { - "description": "The name of the data disk.\nThis name is local to the Google Cloud Platform project and uniquely\nidentifies the disk within that project, for example\n\"myproject-1014-104817-4c2-harness-0-disk-1\".", + "description": "The name of the data disk. This name is local to the Google Cloud Platform project and uniquely identifies the disk within that project, for example \"myproject-1014-104817-4c2-harness-0-disk-1\".", "type": "string" } }, @@ -4028,7 +4707,7 @@ "id": "MultiOutputInfo", "properties": { "tag": { - "description": "The id of the tag the user code will emit to this output by; this\nshould correspond to the tag of some SideInputInfo.", + "description": "The id of the tag the user code will emit to this output by; this should correspond to the tag of some SideInputInfo.", "type": "string" } }, @@ -4074,11 +4753,11 @@ "type": "object" }, "Package": { - "description": "The packages that must be installed in order for a worker to run the\nsteps of the Cloud Dataflow job that will be assigned to its worker\npool.\n\nThis is the mechanism by which the Cloud Dataflow SDK causes code to\nbe loaded onto the workers. For example, the Cloud Dataflow Java SDK\nmight use this to install jars containing the user's code and all of the\nvarious dependencies (libraries, data files, etc.) required in order\nfor that code to run.", + "description": "The packages that must be installed in order for a worker to run the steps of the Cloud Dataflow job that will be assigned to its worker pool. This is the mechanism by which the Cloud Dataflow SDK causes code to be loaded onto the workers. For example, the Cloud Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc.) required in order for that code to run.", "id": "Package", "properties": { "location": { - "description": "The resource to read the package from. The supported resource type is:\n\nGoogle Cloud Storage:\n\n storage.googleapis.com/{bucket}\n bucket.storage.googleapis.com/", + "description": "The resource to read the package from. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket} bucket.storage.googleapis.com/", "type": "string" }, "name": { @@ -4089,7 +4768,7 @@ "type": "object" }, "ParDoInstruction": { - "description": "An instruction that does a ParDo operation.\nTakes one main input and zero or more side inputs, and produces\nzero or more outputs.\nRuns user code.", + "description": "An instruction that does a ParDo operation. Takes one main input and zero or more side inputs, and produces zero or more outputs. Runs user code.", "id": "ParDoInstruction", "properties": { "input": { @@ -4097,7 +4776,7 @@ "description": "The input." }, "multiOutputInfos": { - "description": "Information about each of the outputs, if user_fn is a MultiDoFn.", + "description": "Information about each of the outputs, if user_fn is a MultiDoFn.", "items": { "$ref": "MultiOutputInfo" }, @@ -4162,7 +4841,7 @@ "description": "Additional information for Read instructions." }, "systemName": { - "description": "System-defined name of this operation.\nUnique across the workflow.", + "description": "System-defined name of this operation. Unique across the workflow.", "type": "string" }, "write": { @@ -4208,7 +4887,7 @@ "type": "string" }, "paramType": { - "description": "Optional. The type of the parameter.\nUsed for selecting input picker.", + "description": "Optional. The type of the parameter. Used for selecting input picker.", "enum": [ "DEFAULT", "TEXT", @@ -4246,7 +4925,7 @@ "type": "object" }, "PartialGroupByKeyInstruction": { - "description": "An instruction that does a partial group-by-key.\nOne input and one output.", + "description": "An instruction that does a partial group-by-key. One input and one output.", "id": "PartialGroupByKeyInstruction", "properties": { "input": { @@ -4262,11 +4941,11 @@ "type": "object" }, "originalCombineValuesInputStoreName": { - "description": "If this instruction includes a combining function this is the name of the\nintermediate store between the GBK and the CombineValues.", + "description": "If this instruction includes a combining function this is the name of the intermediate store between the GBK and the CombineValues.", "type": "string" }, "originalCombineValuesStepName": { - "description": "If this instruction includes a combining function, this is the name of the\nCombineValues instruction lifted into this instruction.", + "description": "If this instruction includes a combining function, this is the name of the CombineValues instruction lifted into this instruction.", "type": "string" }, "sideInputs": { @@ -4288,7 +4967,7 @@ "type": "object" }, "PipelineDescription": { - "description": "A descriptive representation of submitted pipeline as well as the executed\nform. This data is provided by the Dataflow service for ease of visualizing\nthe pipeline and interpreting Dataflow provided metrics.", + "description": "A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics.", "id": "PipelineDescription", "properties": { "displayData": { @@ -4315,8 +4994,25 @@ }, "type": "object" }, + "Point": { + "description": "A point in the timeseries.", + "id": "Point", + "properties": { + "time": { + "description": "The timestamp of the point.", + "format": "google-datetime", + "type": "string" + }, + "value": { + "description": "The value of the point.", + "format": "double", + "type": "number" + } + }, + "type": "object" + }, "Position": { - "description": "Position defines a position within a collection of data. The value\ncan be either the end position, a key (used with ordered\ncollections), a byte offset, or a record index.", + "description": "Position defines a position within a collection of data. The value can be either the end position, a key (used with ordered collections), a byte offset, or a record index.", "id": "Position", "properties": { "byteOffset": { @@ -4329,7 +5025,7 @@ "description": "CloudPosition is a concat position." }, "end": { - "description": "Position is past all other positions. Also useful for the end\nposition of an unbounded range.", + "description": "Position is past all other positions. Also useful for the end position of an unbounded range.", "type": "boolean" }, "key": { @@ -4342,12 +5038,31 @@ "type": "string" }, "shufflePosition": { - "description": "CloudPosition is a base64 encoded BatchShufflePosition (with FIXED\nsharding).", + "description": "CloudPosition is a base64 encoded BatchShufflePosition (with FIXED sharding).", "type": "string" } }, "type": "object" }, + "ProgressTimeseries": { + "description": "Information about the progress of some component of job execution.", + "id": "ProgressTimeseries", + "properties": { + "currentProgress": { + "description": "The current progress of the component, in the range [0,1].", + "format": "double", + "type": "number" + }, + "dataPoints": { + "description": "History of progress for the component. Points are sorted by time.", + "items": { + "$ref": "Point" + }, + "type": "array" + } + }, + "type": "object" + }, "PubSubIODetails": { "description": "Metadata for a PubSub connector used by the job.", "id": "PubSubIODetails", @@ -4364,7 +5079,7 @@ "type": "object" }, "PubsubLocation": { - "description": "Identifies a pubsub location to use for transferring data into or\nout of a streaming Dataflow job.", + "description": "Identifies a pubsub location to use for transferring data into or out of a streaming Dataflow job.", "id": "PubsubLocation", "properties": { "dropLateData": { @@ -4372,23 +5087,23 @@ "type": "boolean" }, "idLabel": { - "description": "If set, contains a pubsub label from which to extract record ids.\nIf left empty, record deduplication will be strictly best effort.", + "description": "If set, contains a pubsub label from which to extract record ids. If left empty, record deduplication will be strictly best effort.", "type": "string" }, "subscription": { - "description": "A pubsub subscription, in the form of\n\"pubsub.googleapis.com/subscriptions/\u003cproject-id\u003e/\u003csubscription-name\u003e\"", + "description": "A pubsub subscription, in the form of \"pubsub.googleapis.com/subscriptions//\"", "type": "string" }, "timestampLabel": { - "description": "If set, contains a pubsub label from which to extract record timestamps.\nIf left empty, record timestamps will be generated upon arrival.", + "description": "If set, contains a pubsub label from which to extract record timestamps. If left empty, record timestamps will be generated upon arrival.", "type": "string" }, "topic": { - "description": "A pubsub topic, in the form of\n\"pubsub.googleapis.com/topics/\u003cproject-id\u003e/\u003ctopic-name\u003e\"", + "description": "A pubsub topic, in the form of \"pubsub.googleapis.com/topics//\"", "type": "string" }, "trackingSubscription": { - "description": "If set, specifies the pubsub subscription that will be used for tracking\ncustom time timestamps for watermark estimation.", + "description": "If set, specifies the pubsub subscription that will be used for tracking custom time timestamps for watermark estimation.", "type": "string" }, "withAttributes": { @@ -4418,8 +5133,30 @@ }, "type": "object" }, + "QueryInfo": { + "description": "Information about a validated query.", + "id": "QueryInfo", + "properties": { + "queryProperty": { + "description": "Includes an entry for each satisfied QueryProperty.", + "items": { + "enum": [ + "QUERY_PROPERTY_UNSPECIFIED", + "HAS_UNBOUNDED_SOURCE" + ], + "enumDescriptions": [ + "The query property is unknown or unspecified.", + "Indicates this query reads from \u003e= 1 unbounded source." + ], + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "ReadInstruction": { - "description": "An instruction that reads records.\nTakes no inputs, produces one output.", + "description": "An instruction that reads records. Takes no inputs, produces one output.", "id": "ReadInstruction", "properties": { "source": { @@ -4439,7 +5176,7 @@ "type": "string" }, "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains the WorkItem's job.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the WorkItem's job.", "type": "string" }, "unifiedWorkerRequest": { @@ -4451,14 +5188,14 @@ "type": "object" }, "workItemStatuses": { - "description": "The order is unimportant, except that the order of the\nWorkItemServiceState messages in the ReportWorkItemStatusResponse\ncorresponds to the order of WorkItemStatus messages here.", + "description": "The order is unimportant, except that the order of the WorkItemServiceState messages in the ReportWorkItemStatusResponse corresponds to the order of WorkItemStatus messages here.", "items": { "$ref": "WorkItemStatus" }, "type": "array" }, "workerId": { - "description": "The ID of the worker reporting the WorkItem status. If this\ndoes not match the ID of the worker which the Dataflow service\nbelieves currently has the lease on the WorkItem, the report\nwill be dropped (with an error response).", + "description": "The ID of the worker reporting the WorkItem status. If this does not match the ID of the worker which the Dataflow service believes currently has the lease on the WorkItem, the report will be dropped (with an error response).", "type": "string" } }, @@ -4477,7 +5214,7 @@ "type": "object" }, "workItemServiceStates": { - "description": "A set of messages indicating the service-side state for each\nWorkItem whose status was reported, in the same order as the\nWorkItemStatus messages in the ReportWorkItemStatusRequest which\nresulting in this response.", + "description": "A set of messages indicating the service-side state for each WorkItem whose status was reported, in the same order as the WorkItemStatus messages in the ReportWorkItemStatusRequest which resulting in this response.", "items": { "$ref": "WorkItemServiceState" }, @@ -4487,11 +5224,11 @@ "type": "object" }, "ReportedParallelism": { - "description": "Represents the level of parallelism in a WorkItem's input,\nreported by the worker.", + "description": "Represents the level of parallelism in a WorkItem's input, reported by the worker.", "id": "ReportedParallelism", "properties": { "isInfinite": { - "description": "Specifies whether the parallelism is infinite. If true, \"value\" is\nignored.\nInfinite parallelism means the service will assume that the work item\ncan always be split into more non-empty work items by dynamic splitting.\nThis is a work-around for lack of support for infinity by the current\nJSON-based Java RPC stack.", + "description": "Specifies whether the parallelism is infinite. If true, \"value\" is ignored. Infinite parallelism means the service will assume that the work item can always be split into more non-empty work items by dynamic splitting. This is a work-around for lack of support for infinity by the current JSON-based Java RPC stack.", "type": "boolean" }, "value": { @@ -4503,14 +5240,14 @@ "type": "object" }, "ResourceUtilizationReport": { - "description": "Worker metrics exported from workers. This contains resource utilization\nmetrics accumulated from a variety of sources. For more information, see\ngo/df-resource-signals.", + "description": "Worker metrics exported from workers. This contains resource utilization metrics accumulated from a variety of sources. For more information, see go/df-resource-signals.", "id": "ResourceUtilizationReport", "properties": { "containers": { "additionalProperties": { "$ref": "ResourceUtilizationReport" }, - "description": "Per container information.\nKey: container name.", + "description": "Per container information. Key: container name.", "type": "object" }, "cpuTime": { @@ -4551,11 +5288,15 @@ "additionalProperties": { "type": "string" }, - "description": "Additional user labels to be specified for the job.\nKeys and values should follow the restrictions specified in the [labeling\nrestrictions](https://cloud.google.com/compute/docs/labeling-resources#restrictions)\npage.", + "description": "Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the [labeling restrictions](https://cloud.google.com/compute/docs/labeling-resources#restrictions) page. An object containing a list of \"key\": value pairs. Example: { \"name\": \"wrench\", \"mass\": \"1kg\", \"count\": \"3\" }.", "type": "object" }, "bypassTempDirValidation": { - "description": "Whether to bypass the safety checks for the job's temporary directory.\nUse with caution.", + "description": "Whether to bypass the safety checks for the job's temporary directory. Use with caution.", + "type": "boolean" + }, + "enableStreamingEngine": { + "description": "Whether to enable Streaming Engine for the job.", "type": "boolean" }, "ipConfiguration": { @@ -4573,20 +5314,20 @@ "type": "string" }, "kmsKeyName": { - "description": "Optional. Name for the Cloud KMS key for the job.\nKey format is:\nprojects/\u003cproject\u003e/locations/\u003clocation\u003e/keyRings/\u003ckeyring\u003e/cryptoKeys/\u003ckey\u003e", + "description": "Name for the Cloud KMS key for the job. Key format is: projects//locations//keyRings//cryptoKeys/", "type": "string" }, "machineType": { - "description": "The machine type to use for the job. Defaults to the value from the\ntemplate if not specified.", + "description": "The machine type to use for the job. Defaults to the value from the template if not specified.", "type": "string" }, "maxWorkers": { - "description": "The maximum number of Google Compute Engine instances to be made\navailable to your pipeline during execution, from 1 to 1000.", + "description": "The maximum number of Google Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.", "format": "int32", "type": "integer" }, "network": { - "description": "Network to which VMs will be assigned. If empty or unspecified,\nthe service will use the network \"default\".", + "description": "Network to which VMs will be assigned. If empty or unspecified, the service will use the network \"default\".", "type": "string" }, "numWorkers": { @@ -4599,23 +5340,23 @@ "type": "string" }, "subnetwork": { - "description": "Subnetwork to which VMs will be assigned, if desired. Expected to be of\nthe form \"regions/REGION/subnetworks/SUBNETWORK\".", + "description": "Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form \"https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK\" or \"regions/REGION/subnetworks/SUBNETWORK\". If the subnetwork is located in a Shared VPC network, you must use the complete URL.", "type": "string" }, "tempLocation": { - "description": "The Cloud Storage path to use for temporary files.\nMust be a valid Cloud Storage URL, beginning with `gs://`.", + "description": "The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with `gs://`.", "type": "string" }, "workerRegion": { - "description": "The Compute Engine region\n(https://cloud.google.com/compute/docs/regions-zones/regions-zones) in\nwhich worker processing should occur, e.g. \"us-west1\". Mutually exclusive\nwith worker_zone. If neither worker_region nor worker_zone is specified,\ndefault to the control plane's region.", + "description": "The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. \"us-west1\". Mutually exclusive with worker_zone. If neither worker_region nor worker_zone is specified, default to the control plane's region.", "type": "string" }, "workerZone": { - "description": "The Compute Engine zone\n(https://cloud.google.com/compute/docs/regions-zones/regions-zones) in\nwhich worker processing should occur, e.g. \"us-west1-a\". Mutually exclusive\nwith worker_region. If neither worker_region nor worker_zone is specified,\na zone in the control plane's region is chosen based on available capacity.\nIf both `worker_zone` and `zone` are set, `worker_zone` takes precedence.", + "description": "The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. \"us-west1-a\". Mutually exclusive with worker_region. If neither worker_region nor worker_zone is specified, a zone in the control plane's region is chosen based on available capacity. If both `worker_zone` and `zone` are set, `worker_zone` takes precedence.", "type": "string" }, "zone": { - "description": "The Compute Engine [availability\nzone](https://cloud.google.com/compute/docs/regions-zones/regions-zones)\nfor launching worker instances to run your pipeline.\nIn the future, worker_zone will take precedence.", + "description": "The Compute Engine [availability zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones) for launching worker instances to run your pipeline. In the future, worker_zone will take precedence.", "type": "string" } }, @@ -4673,7 +5414,7 @@ "type": "string" }, "useSingleCorePerContainer": { - "description": "If true, recommends the Dataflow service to use only one core per SDK\ncontainer instance with this image. If false (or unset) recommends using\nmore than one core per SDK container instance with this image for\nefficiency. Note that Dataflow service may choose to override this property\nif needed.", + "description": "If true, recommends the Dataflow service to use only one core per SDK container instance with this image. If false (or unset) recommends using more than one core per SDK container instance with this image for efficiency. Note that Dataflow service may choose to override this property if needed.", "type": "boolean" } }, @@ -4696,7 +5437,7 @@ "Cloud Dataflow is unaware of this version.", "This is a known version of an SDK, and is supported.", "A newer version of the SDK family exists, and an update is recommended.", - "This version of the SDK is deprecated and will eventually be no\nlonger supported.", + "This version of the SDK is deprecated and will eventually be no longer supported.", "Support for this SDK version has ended and it should no longer be used." ], "type": "string" @@ -4725,7 +5466,7 @@ "type": "string" }, "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains the job specified by job_id.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the job specified by job_id.", "type": "string" }, "workerId": { @@ -4736,7 +5477,7 @@ "type": "object" }, "SendDebugCaptureResponse": { - "description": "Response to a send capture request.\nnothing", + "description": "Response to a send capture request. nothing", "id": "SendDebugCaptureResponse", "properties": {}, "type": "object" @@ -4746,7 +5487,7 @@ "id": "SendWorkerMessagesRequest", "properties": { "location": { - "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains the job.", + "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the job.", "type": "string" }, "workerMessages": { @@ -4796,11 +5537,11 @@ "type": "array" }, "stageName": { - "description": "System-defined name of the stage containing the SeqDo operation.\nUnique across the workflow.", + "description": "System-defined name of the stage containing the SeqDo operation. Unique across the workflow.", "type": "string" }, "systemName": { - "description": "System-defined name of the SeqDo operation.\nUnique across the workflow.", + "description": "System-defined name of the SeqDo operation. Unique across the workflow.", "type": "string" }, "userFn": { @@ -4858,14 +5599,14 @@ "type": "object" }, "sources": { - "description": "The source(s) to read element(s) from to get the value of this side input.\nIf more than one source, then the elements are taken from the\nsources, in the specified order if order matters.\nAt least one source is required.", + "description": "The source(s) to read element(s) from to get the value of this side input. If more than one source, then the elements are taken from the sources, in the specified order if order matters. At least one source is required.", "items": { "$ref": "Source" }, "type": "array" }, "tag": { - "description": "The id of the tag the user code will access this side input by;\nthis should correspond to the tag of some MultiOutputInfo.", + "description": "The id of the tag the user code will access this side input by; this should correspond to the tag of some MultiOutputInfo.", "type": "string" } }, @@ -4908,7 +5649,7 @@ "type": "string" }, "diskSizeBytes": { - "description": "The disk byte size of the snapshot. Only available for snapshots in READY\nstate.", + "description": "The disk byte size of the snapshot. Only available for snapshots in READY state.", "format": "int64", "type": "string" }, @@ -4943,7 +5684,7 @@ ], "enumDescriptions": [ "Unknown state.", - "Snapshot intent to create has been persisted, snapshotting of state has not\nyet started.", + "Snapshot intent to create has been persisted, snapshotting of state has not yet started.", "Snapshotting is being performed.", "Snapshot has been created and is ready to be used.", "Snapshot failed to be created.", @@ -4988,7 +5729,7 @@ "id": "Source", "properties": { "baseSpecs": { - "description": "While splitting, sources may specify the produced bundles\nas differences against another source, in order to save backend-side\nmemory and allow bigger jobs. For details, see SourceSplitRequest.\nTo support this use case, the full set of parameters of the source\nis logically obtained by taking the latest explicitly specified value\nof each parameter in the order:\nbase_specs (later items win), spec (overrides anything in base_specs).", + "description": "While splitting, sources may specify the produced bundles as differences against another source, in order to save backend-side memory and allow bigger jobs. For details, see SourceSplitRequest. To support this use case, the full set of parameters of the source is logically obtained by taking the latest explicitly specified value of each parameter in the order: base_specs (later items win), spec (overrides anything in base_specs).", "items": { "additionalProperties": { "description": "Properties of the object.", @@ -5007,12 +5748,12 @@ "type": "object" }, "doesNotNeedSplitting": { - "description": "Setting this value to true hints to the framework that the source\ndoesn't need splitting, and using SourceSplitRequest on it would\nyield SOURCE_SPLIT_OUTCOME_USE_CURRENT.\n\nE.g. a file splitter may set this to true when splitting a single file\ninto a set of byte ranges of appropriate size, and set this\nto false when splitting a filepattern into individual files.\nHowever, for efficiency, a file splitter may decide to produce\nfile subranges directly from the filepattern to avoid a splitting\nround-trip.\n\nSee SourceSplitRequest for an overview of the splitting process.\n\nThis field is meaningful only in the Source objects populated\nby the user (e.g. when filling in a DerivedSource).\nSource objects supplied by the framework to the user don't have\nthis field populated.", + "description": "Setting this value to true hints to the framework that the source doesn't need splitting, and using SourceSplitRequest on it would yield SOURCE_SPLIT_OUTCOME_USE_CURRENT. E.g. a file splitter may set this to true when splitting a single file into a set of byte ranges of appropriate size, and set this to false when splitting a filepattern into individual files. However, for efficiency, a file splitter may decide to produce file subranges directly from the filepattern to avoid a splitting round-trip. See SourceSplitRequest for an overview of the splitting process. This field is meaningful only in the Source objects populated by the user (e.g. when filling in a DerivedSource). Source objects supplied by the framework to the user don't have this field populated.", "type": "boolean" }, "metadata": { "$ref": "SourceMetadata", - "description": "Optionally, metadata for this source can be supplied right away,\navoiding a SourceGetMetadataOperation roundtrip\n(see SourceOperationRequest).\n\nThis field is meaningful only in the Source objects populated\nby the user (e.g. when filling in a DerivedSource).\nSource objects supplied by the framework to the user don't have\nthis field populated." + "description": "Optionally, metadata for this source can be supplied right away, avoiding a SourceGetMetadataOperation roundtrip (see SourceOperationRequest). This field is meaningful only in the Source objects populated by the user (e.g. when filling in a DerivedSource). Source objects supplied by the framework to the user don't have this field populated." }, "spec": { "additionalProperties": { @@ -5071,27 +5812,27 @@ "type": "object" }, "SourceMetadata": { - "description": "Metadata about a Source useful for automatically optimizing\nand tuning the pipeline, etc.", + "description": "Metadata about a Source useful for automatically optimizing and tuning the pipeline, etc.", "id": "SourceMetadata", "properties": { "estimatedSizeBytes": { - "description": "An estimate of the total size (in bytes) of the data that would be\nread from this source. This estimate is in terms of external storage\nsize, before any decompression or other processing done by the reader.", + "description": "An estimate of the total size (in bytes) of the data that would be read from this source. This estimate is in terms of external storage size, before any decompression or other processing done by the reader.", "format": "int64", "type": "string" }, "infinite": { - "description": "Specifies that the size of this source is known to be infinite\n(this is a streaming source).", + "description": "Specifies that the size of this source is known to be infinite (this is a streaming source).", "type": "boolean" }, "producesSortedKeys": { - "description": "Whether this source is known to produce key/value pairs with\nthe (encoded) keys in lexicographically sorted order.", + "description": "Whether this source is known to produce key/value pairs with the (encoded) keys in lexicographically sorted order.", "type": "boolean" } }, "type": "object" }, "SourceOperationRequest": { - "description": "A work item that represents the different operations that can be\nperformed on a user-defined Source specification.", + "description": "A work item that represents the different operations that can be performed on a user-defined Source specification.", "id": "SourceOperationRequest", "properties": { "getMetadata": { @@ -5103,7 +5844,7 @@ "type": "string" }, "originalName": { - "description": "System-defined name for the Read instruction for this source\nin the original workflow graph.", + "description": "System-defined name for the Read instruction for this source in the original workflow graph.", "type": "string" }, "split": { @@ -5111,18 +5852,18 @@ "description": "Information about a request to split a source." }, "stageName": { - "description": "System-defined name of the stage containing the source operation.\nUnique across the workflow.", + "description": "System-defined name of the stage containing the source operation. Unique across the workflow.", "type": "string" }, "systemName": { - "description": "System-defined name of the Read instruction for this source.\nUnique across the workflow.", + "description": "System-defined name of the Read instruction for this source. Unique across the workflow.", "type": "string" } }, "type": "object" }, "SourceOperationResponse": { - "description": "The result of a SourceOperationRequest, specified in\nReportWorkItemStatusRequest.source_operation when the work item\nis completed.", + "description": "The result of a SourceOperationRequest, specified in ReportWorkItemStatusRequest.source_operation when the work item is completed.", "id": "SourceOperationResponse", "properties": { "getMetadata": { @@ -5137,11 +5878,11 @@ "type": "object" }, "SourceSplitOptions": { - "description": "Hints for splitting a Source into bundles (parts for parallel\nprocessing) using SourceSplitRequest.", + "description": "Hints for splitting a Source into bundles (parts for parallel processing) using SourceSplitRequest.", "id": "SourceSplitOptions", "properties": { "desiredBundleSizeBytes": { - "description": "The source should be split into a set of bundles where the estimated size\nof each is approximately this many bytes.", + "description": "The source should be split into a set of bundles where the estimated size of each is approximately this many bytes.", "format": "int64", "type": "string" }, @@ -5154,7 +5895,7 @@ "type": "object" }, "SourceSplitRequest": { - "description": "Represents the operation to split a high-level Source specification\ninto bundles (parts for parallel processing).\n\nAt a high level, splitting of a source into bundles happens as follows:\nSourceSplitRequest is applied to the source. If it returns\nSOURCE_SPLIT_OUTCOME_USE_CURRENT, no further splitting happens and the source\nis used \"as is\". Otherwise, splitting is applied recursively to each\nproduced DerivedSource.\n\nAs an optimization, for any Source, if its does_not_need_splitting is\ntrue, the framework assumes that splitting this source would return\nSOURCE_SPLIT_OUTCOME_USE_CURRENT, and doesn't initiate a SourceSplitRequest.\nThis applies both to the initial source being split and to bundles\nproduced from it.", + "description": "Represents the operation to split a high-level Source specification into bundles (parts for parallel processing). At a high level, splitting of a source into bundles happens as follows: SourceSplitRequest is applied to the source. If it returns SOURCE_SPLIT_OUTCOME_USE_CURRENT, no further splitting happens and the source is used \"as is\". Otherwise, splitting is applied recursively to each produced DerivedSource. As an optimization, for any Source, if its does_not_need_splitting is true, the framework assumes that splitting this source would return SOURCE_SPLIT_OUTCOME_USE_CURRENT, and doesn't initiate a SourceSplitRequest. This applies both to the initial source being split and to bundles produced from it.", "id": "SourceSplitRequest", "properties": { "options": { @@ -5173,14 +5914,14 @@ "id": "SourceSplitResponse", "properties": { "bundles": { - "description": "If outcome is SPLITTING_HAPPENED, then this is a list of bundles\ninto which the source was split. Otherwise this field is ignored.\nThis list can be empty, which means the source represents an empty input.", + "description": "If outcome is SPLITTING_HAPPENED, then this is a list of bundles into which the source was split. Otherwise this field is ignored. This list can be empty, which means the source represents an empty input.", "items": { "$ref": "DerivedSource" }, "type": "array" }, "outcome": { - "description": "Indicates whether splitting happened and produced a list of bundles.\nIf this is USE_CURRENT_SOURCE_AS_IS, the current source should\nbe processed \"as is\" without splitting. \"bundles\" is ignored in this case.\nIf this is SPLITTING_HAPPENED, then \"bundles\" contains a list of\nbundles into which the source was split.", + "description": "Indicates whether splitting happened and produced a list of bundles. If this is USE_CURRENT_SOURCE_AS_IS, the current source should be processed \"as is\" without splitting. \"bundles\" is ignored in this case. If this is SPLITTING_HAPPENED, then \"bundles\" contains a list of bundles into which the source was split.", "enum": [ "SOURCE_SPLIT_OUTCOME_UNKNOWN", "SOURCE_SPLIT_OUTCOME_USE_CURRENT", @@ -5250,7 +5991,7 @@ "type": "object" }, "SplitInt64": { - "description": "A representation of an int64, n, that is immune to precision loss when\nencoded in JSON.", + "description": "A representation of an int64, n, that is immune to precision loss when encoded in JSON.", "id": "SplitInt64", "properties": { "highBits": { @@ -5266,6 +6007,24 @@ }, "type": "object" }, + "StageExecutionDetails": { + "description": "Information about the workers and work items within a stage.", + "id": "StageExecutionDetails", + "properties": { + "nextPageToken": { + "description": "If present, this response does not contain all requested tasks. To obtain the next page of results, repeat the request with page_token set to this value.", + "type": "string" + }, + "workers": { + "description": "Workers that have done work on the stage.", + "items": { + "$ref": "WorkerDetails" + }, + "type": "array" + } + }, + "type": "object" + }, "StageSource": { "description": "Description of an input or output of an execution stage.", "id": "StageSource", @@ -5275,7 +6034,7 @@ "type": "string" }, "originalTransformOrCollection": { - "description": "User name for the original user transform or collection with which this\nsource is most closely associated.", + "description": "User name for the original user transform or collection with which this source is most closely associated.", "type": "string" }, "sizeBytes": { @@ -5290,6 +6049,58 @@ }, "type": "object" }, + "StageSummary": { + "description": "Information about a particular execution stage of a job.", + "id": "StageSummary", + "properties": { + "endTime": { + "description": "End time of this stage. If the work item is completed, this is the actual end time of the stage. Otherwise, it is the predicted end time.", + "format": "google-datetime", + "type": "string" + }, + "metrics": { + "description": "Metrics for this stage.", + "items": { + "$ref": "MetricUpdate" + }, + "type": "array" + }, + "progress": { + "$ref": "ProgressTimeseries", + "description": "Progress for this stage. Only applicable to Batch jobs." + }, + "stageId": { + "description": "ID of this stage", + "type": "string" + }, + "startTime": { + "description": "Start time of this stage.", + "format": "google-datetime", + "type": "string" + }, + "state": { + "description": "State of this stage.", + "enum": [ + "EXECUTION_STATE_UNKNOWN", + "EXECUTION_STATE_NOT_STARTED", + "EXECUTION_STATE_RUNNING", + "EXECUTION_STATE_SUCCEEDED", + "EXECUTION_STATE_FAILED", + "EXECUTION_STATE_CANCELLED" + ], + "enumDescriptions": [ + "The component state is unknown or unspecified.", + "The component is not yet running.", + "The component is currently running.", + "The component succeeded.", + "The component failed.", + "Execution of the component was cancelled." + ], + "type": "string" + } + }, + "type": "object" + }, "StateFamilyConfig": { "description": "State family configuration.", "id": "StateFamilyConfig", @@ -5306,7 +6117,7 @@ "type": "object" }, "Status": { - "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors).", + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", "id": "Status", "properties": { "code": { @@ -5315,7 +6126,7 @@ "type": "integer" }, "details": { - "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use.", + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", "items": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -5326,14 +6137,14 @@ "type": "array" }, "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", "type": "string" } }, "type": "object" }, "Step": { - "description": "Defines a particular step within a Cloud Dataflow job.\n\nA job consists of multiple steps, each of which performs some\nspecific operation as part of the overall job. Data is typically\npassed from one step to another as part of the job.\n\nHere's an example of a sequence of steps which together implement a\nMap-Reduce job:\n\n * Read a collection of data from some source, parsing the\n collection's elements.\n\n * Validate the elements.\n\n * Apply a user-defined function to map each element to some value\n and extract an element-specific key value.\n\n * Group elements with the same key into a single element with\n that key, transforming a multiply-keyed collection into a\n uniquely-keyed collection.\n\n * Write the elements out to some data sink.\n\nNote that the Cloud Dataflow service may be used to run many different\ntypes of jobs, not just Map-Reduce.", + "description": "Defines a particular step within a Cloud Dataflow job. A job consists of multiple steps, each of which performs some specific operation as part of the overall job. Data is typically passed from one step to another as part of the job. Here's an example of a sequence of steps which together implement a Map-Reduce job: * Read a collection of data from some source, parsing the collection's elements. * Validate the elements. * Apply a user-defined function to map each element to some value and extract an element-specific key value. * Group elements with the same key into a single element with that key, transforming a multiply-keyed collection into a uniquely-keyed collection. * Write the elements out to some data sink. Note that the Cloud Dataflow service may be used to run many different types of jobs, not just Map-Reduce.", "id": "Step", "properties": { "kind": { @@ -5341,7 +6152,7 @@ "type": "string" }, "name": { - "description": "The name that identifies the step. This must be unique for each\nstep with respect to all other steps in the Cloud Dataflow job.", + "description": "The name that identifies the step. This must be unique for each step with respect to all other steps in the Cloud Dataflow job.", "type": "string" }, "properties": { @@ -5349,14 +6160,14 @@ "description": "Properties of the object.", "type": "any" }, - "description": "Named properties associated with the step. Each kind of\npredefined step has its own required set of properties.\nMust be provided on Create. Only retrieved with JOB_VIEW_ALL.", + "description": "Named properties associated with the step. Each kind of predefined step has its own required set of properties. Must be provided on Create. Only retrieved with JOB_VIEW_ALL.", "type": "object" } }, "type": "object" }, "StreamLocation": { - "description": "Describes a stream of data, either as input to be processed or as\noutput of a streaming Dataflow job.", + "description": "Describes a stream of data, either as input to be processed or as output of a streaming Dataflow job.", "id": "StreamLocation", "properties": { "customSourceLocation": { @@ -5373,7 +6184,7 @@ }, "streamingStageLocation": { "$ref": "StreamingStageLocation", - "description": "The stream is part of another computation within the current\nstreaming Dataflow job." + "description": "The stream is part of another computation within the current streaming Dataflow job." } }, "type": "object" @@ -5420,14 +6231,14 @@ "additionalProperties": { "type": "string" }, - "description": "Map from user name of stateful transforms in this stage to their state\nfamily.", + "description": "Map from user name of stateful transforms in this stage to their state family.", "type": "object" } }, "type": "object" }, "StreamingComputationRanges": { - "description": "Describes full or partial data disk assignment information of the computation\nranges.", + "description": "Describes full or partial data disk assignment information of the computation ranges.", "id": "StreamingComputationRanges", "properties": { "computationId": { @@ -5445,7 +6256,7 @@ "type": "object" }, "StreamingComputationTask": { - "description": "A task which describes what action should be performed for the specified\nstreaming computation ranges.", + "description": "A task which describes what action should be performed for the specified streaming computation ranges.", "id": "StreamingComputationTask", "properties": { "computationRanges": { @@ -5513,11 +6324,11 @@ "type": "object" }, "windmillServiceEndpoint": { - "description": "If present, the worker must use this endpoint to communicate with Windmill\nService dispatchers, otherwise the worker must continue to use whatever\nendpoint it had been using.", + "description": "If present, the worker must use this endpoint to communicate with Windmill Service dispatchers, otherwise the worker must continue to use whatever endpoint it had been using.", "type": "string" }, "windmillServicePort": { - "description": "If present, the worker must use this port to communicate with Windmill\nService dispatchers. Only applicable when windmill_service_endpoint is\nspecified.", + "description": "If present, the worker must use this port to communicate with Windmill Service dispatchers. Only applicable when windmill_service_endpoint is specified.", "format": "int64", "type": "string" } @@ -5533,7 +6344,7 @@ "type": "boolean" }, "receiveWorkPort": { - "description": "The TCP port on which the worker should listen for messages from\nother streaming computation workers.", + "description": "The TCP port on which the worker should listen for messages from other streaming computation workers.", "format": "int32", "type": "integer" }, @@ -5546,7 +6357,7 @@ "description": "The global topology of the streaming Dataflow job." }, "workerHarnessPort": { - "description": "The TCP port used by the worker to communicate with the Dataflow\nworker harness.", + "description": "The TCP port used by the worker to communicate with the Dataflow worker harness.", "format": "int32", "type": "integer" } @@ -5569,11 +6380,11 @@ "type": "object" }, "StreamingStageLocation": { - "description": "Identifies the location of a streaming computation stage, for\nstage-to-stage communication.", + "description": "Identifies the location of a streaming computation stage, for stage-to-stage communication.", "id": "StreamingStageLocation", "properties": { "streamId": { - "description": "Identifies the particular stream within the streaming Dataflow\njob.", + "description": "Identifies the particular stream within the streaming Dataflow job.", "type": "string" } }, @@ -5594,11 +6405,11 @@ "type": "object" }, "StructuredMessage": { - "description": "A rich message format, including a human readable string, a key for\nidentifying the message, and structured data associated with the message for\nprogrammatic consumption.", + "description": "A rich message format, including a human readable string, a key for identifying the message, and structured data associated with the message for programmatic consumption.", "id": "StructuredMessage", "properties": { "messageKey": { - "description": "Identifier for this message type. Used by external systems to\ninternationalize or personalize message.", + "description": "Identifier for this message type. Used by external systems to internationalize or personalize message.", "type": "string" }, "messageText": { @@ -5628,7 +6439,7 @@ "type": "string" }, "baseUrl": { - "description": "The base URL for the taskrunner to use when accessing Google Cloud APIs.\n\nWhen workers access Google Cloud APIs, they logically do so via\nrelative URLs. If this field is specified, it supplies the base\nURL to use for resolving these relative URLs. The normative\nalgorithm used is defined by RFC 1808, \"Relative Uniform Resource\nLocators\".\n\nIf not specified, the default value is \"http://www.googleapis.com/\"", + "description": "The base URL for the taskrunner to use when accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, \"Relative Uniform Resource Locators\". If not specified, the default value is \"http://www.googleapis.com/\"", "type": "string" }, "commandlinesFileName": { @@ -5656,15 +6467,15 @@ "type": "string" }, "logToSerialconsole": { - "description": "Whether to send taskrunner log info to Google Compute Engine VM serial\nconsole.", + "description": "Whether to send taskrunner log info to Google Compute Engine VM serial console.", "type": "boolean" }, "logUploadLocation": { - "description": "Indicates where to put logs. If this is not specified, the logs\nwill not be uploaded.\n\nThe supported resource type is:\n\nGoogle Cloud Storage:\n storage.googleapis.com/{bucket}/{object}\n bucket.storage.googleapis.com/{object}", + "description": "Indicates where to put logs. If this is not specified, the logs will not be uploaded. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}", "type": "string" }, "oauthScopes": { - "description": "The OAuth2 scopes to be requested by the taskrunner in order to\naccess the Cloud Dataflow API.", + "description": "The OAuth2 scopes to be requested by the taskrunner in order to access the Cloud Dataflow API.", "items": { "type": "string" }, @@ -5679,15 +6490,15 @@ "type": "string" }, "taskGroup": { - "description": "The UNIX group ID on the worker VM to use for tasks launched by\ntaskrunner; e.g. \"wheel\".", + "description": "The UNIX group ID on the worker VM to use for tasks launched by taskrunner; e.g. \"wheel\".", "type": "string" }, "taskUser": { - "description": "The UNIX user ID on the worker VM to use for tasks launched by\ntaskrunner; e.g. \"root\".", + "description": "The UNIX user ID on the worker VM to use for tasks launched by taskrunner; e.g. \"root\".", "type": "string" }, "tempStoragePrefix": { - "description": "The prefix of the resources the taskrunner should use for\ntemporary storage.\n\nThe supported resource type is:\n\nGoogle Cloud Storage:\n storage.googleapis.com/{bucket}/{object}\n bucket.storage.googleapis.com/{object}", + "description": "The prefix of the resources the taskrunner should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}", "type": "string" }, "vmId": { @@ -5723,8 +6534,68 @@ }, "type": "object" }, + "TemplateVersion": { + "description": "///////////////////////////////////////////////////////////////////////////// //// Template Catalog is used to organize user TemplateVersions. //// TemplateVersions that have the same project_id and display_name are //// belong to the same Template. //// Templates with the same project_id belong to the same Project. //// TemplateVersion may have labels and multiple labels are allowed. //// Duplicated labels in the same `TemplateVersion` are not allowed. //// TemplateVersion may have tags and multiple tags are allowed. Duplicated //// tags in the same `Template` are not allowed!", + "id": "TemplateVersion", + "properties": { + "artifact": { + "$ref": "Artifact", + "description": "Job graph and metadata if it is a legacy Template. Container image path and metadata if it is flex Template." + }, + "createTime": { + "description": "Creation time of this TemplateVersion.", + "format": "google-datetime", + "type": "string" + }, + "description": { + "description": "Template description from the user.", + "type": "string" + }, + "displayName": { + "description": "A customized name for Template. Multiple TemplateVersions per Template.", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels for the Template Version. Labels can be duplicate within Template.", + "type": "object" + }, + "projectId": { + "description": "A unique project_id. Multiple Templates per Project.", + "type": "string" + }, + "tags": { + "description": "Alias for version_id, helps locate a TemplateVersion.", + "items": { + "type": "string" + }, + "type": "array" + }, + "type": { + "description": "Either LEGACY or FLEX. This should match with the type of artifact.", + "enum": [ + "TEMPLATE_TYPE_UNSPECIFIED", + "LEGACY", + "FLEX" + ], + "enumDescriptions": [ + "Default value. Not a useful zero case.", + "Legacy Template.", + "Flex Template." + ], + "type": "string" + }, + "versionId": { + "description": "An auto generated version_id for TemplateVersion.", + "type": "string" + } + }, + "type": "object" + }, "TopologyConfig": { - "description": "Global topology of the streaming Dataflow job, including all\ncomputations and their sharded locations.", + "description": "Global topology of the streaming Dataflow job, including all computations and their sharded locations.", "id": "TopologyConfig", "properties": { "computations": { @@ -5814,7 +6685,7 @@ "type": "string" }, "outputCollectionName": { - "description": "User names for all collection outputs to this transform.", + "description": "User names for all collection outputs to this transform.", "items": { "type": "string" }, @@ -5830,12 +6701,16 @@ "errorMessage": { "description": "Will be empty if validation succeeds.", "type": "string" + }, + "queryInfo": { + "$ref": "QueryInfo", + "description": "Information about the validated query. Not defined if validation fails." } }, "type": "object" }, "WorkItem": { - "description": "WorkItem represents basic information about a WorkItem to be executed\nin the cloud.", + "description": "WorkItem represents basic information about a WorkItem to be executed in the cloud.", "id": "WorkItem", "properties": { "configuration": { @@ -5866,7 +6741,7 @@ "description": "Additional information for MapTask WorkItems." }, "packages": { - "description": "Any required packages that need to be fetched in order to execute\nthis WorkItem.", + "description": "Any required packages that need to be fetched in order to execute this WorkItem.", "items": { "$ref": "Package" }, @@ -5908,21 +6783,81 @@ }, "type": "object" }, + "WorkItemDetails": { + "description": "Information about an individual work item execution.", + "id": "WorkItemDetails", + "properties": { + "attemptId": { + "description": "Attempt ID of this work item", + "type": "string" + }, + "endTime": { + "description": "End time of this work item attempt. If the work item is completed, this is the actual end time of the work item. Otherwise, it is the predicted end time.", + "format": "google-datetime", + "type": "string" + }, + "metrics": { + "description": "Metrics for this work item.", + "items": { + "$ref": "MetricUpdate" + }, + "type": "array" + }, + "progress": { + "$ref": "ProgressTimeseries", + "description": "Progress of this work item." + }, + "startTime": { + "description": "Start time of this work item attempt.", + "format": "google-datetime", + "type": "string" + }, + "state": { + "description": "State of this work item.", + "enum": [ + "EXECUTION_STATE_UNKNOWN", + "EXECUTION_STATE_NOT_STARTED", + "EXECUTION_STATE_RUNNING", + "EXECUTION_STATE_SUCCEEDED", + "EXECUTION_STATE_FAILED", + "EXECUTION_STATE_CANCELLED" + ], + "enumDescriptions": [ + "The component state is unknown or unspecified.", + "The component is not yet running.", + "The component is currently running.", + "The component succeeded.", + "The component failed.", + "Execution of the component was cancelled." + ], + "type": "string" + }, + "taskId": { + "description": "Name of this work item.", + "type": "string" + } + }, + "type": "object" + }, "WorkItemServiceState": { - "description": "The Dataflow service's idea of the current state of a WorkItem\nbeing processed by a worker.", + "description": "The Dataflow service's idea of the current state of a WorkItem being processed by a worker.", "id": "WorkItemServiceState", "properties": { + "completeWorkStatus": { + "$ref": "Status", + "description": "If set, a request to complete the work item with the given status. This will not be set to OK, unless supported by the specific kind of WorkItem. It can be used for the backend to indicate a WorkItem must terminate, e.g., for aborting work." + }, "harnessData": { "additionalProperties": { "description": "Properties of the object.", "type": "any" }, - "description": "Other data returned by the service, specific to the particular\nworker harness.", + "description": "Other data returned by the service, specific to the particular worker harness.", "type": "object" }, "hotKeyDetection": { "$ref": "HotKeyDetection", - "description": "A hot key is a symptom of poor data distribution in which there are enough\nelements mapped to a single key to impact pipeline performance. When\npresent, this field includes metadata associated with any hot key." + "description": "A hot key is a symptom of poor data distribution in which there are enough elements mapped to a single key to impact pipeline performance. When present, this field includes metadata associated with any hot key." }, "leaseExpireTime": { "description": "Time at which the current lease will expire.", @@ -5930,14 +6865,14 @@ "type": "string" }, "metricShortId": { - "description": "The short ids that workers should use in subsequent metric updates.\nWorkers should strive to use short ids whenever possible, but it is ok\nto request the short_id again if a worker lost track of it\n(e.g. if the worker is recovering from a crash).\nNOTE: it is possible that the response may have short ids for a subset\nof the metrics.", + "description": "The short ids that workers should use in subsequent metric updates. Workers should strive to use short ids whenever possible, but it is ok to request the short_id again if a worker lost track of it (e.g. if the worker is recovering from a crash). NOTE: it is possible that the response may have short ids for a subset of the metrics.", "items": { "$ref": "MetricShortId" }, "type": "array" }, "nextReportIndex": { - "description": "The index value to use for the next report sent by the worker.\nNote: If the report call fails for whatever reason, the worker should\nreuse this index for subsequent report attempts.", + "description": "The index value to use for the next report sent by the worker. Note: If the report call fails for whatever reason, the worker should reuse this index for subsequent report attempts.", "format": "int64", "type": "string" }, @@ -5948,7 +6883,7 @@ }, "splitRequest": { "$ref": "ApproximateSplitRequest", - "description": "The progress point in the WorkItem where the Dataflow service\nsuggests that the worker truncate the task." + "description": "The progress point in the WorkItem where the Dataflow service suggests that the worker truncate the task." }, "suggestedStopPoint": { "$ref": "ApproximateProgress", @@ -5981,7 +6916,7 @@ "description": "See documentation of stop_position." }, "errors": { - "description": "Specifies errors which occurred during processing. If errors are\nprovided, and completed = true, then the WorkItem is considered\nto have failed.", + "description": "Specifies errors which occurred during processing. If errors are provided, and completed = true, then the WorkItem is considered to have failed.", "items": { "$ref": "Status" }, @@ -5999,7 +6934,7 @@ "description": "DEPRECATED in favor of reported_progress." }, "reportIndex": { - "description": "The report index. When a WorkItem is leased, the lease will\ncontain an initial report index. When a WorkItem's status is\nreported to the system, the report should be sent with\nthat report index, and the response will contain the index the\nworker should use for the next report. Reports received with\nunexpected index values will be rejected by the service.\n\nIn order to preserve idempotency, the worker should not alter the\ncontents of a report, even if the worker must submit the same\nreport multiple times before getting back a response. The worker\nshould not submit a subsequent report until the response for the\nprevious report had been received from the service.", + "description": "The report index. When a WorkItem is leased, the lease will contain an initial report index. When a WorkItem's status is reported to the system, the report should be sent with that report index, and the response will contain the index the worker should use for the next report. Reports received with unexpected index values will be rejected by the service. In order to preserve idempotency, the worker should not alter the contents of a report, even if the worker must submit the same report multiple times before getting back a response. The worker should not submit a subsequent report until the response for the previous report had been received from the service.", "format": "int64", "type": "string" }, @@ -6018,11 +6953,11 @@ }, "sourceOperationResponse": { "$ref": "SourceOperationResponse", - "description": "If the work item represented a SourceOperationRequest, and the work\nis completed, contains the result of the operation." + "description": "If the work item represented a SourceOperationRequest, and the work is completed, contains the result of the operation." }, "stopPosition": { "$ref": "Position", - "description": "A worker may split an active map task in two parts, \"primary\" and\n\"residual\", continuing to process the primary part and returning the\nresidual part into the pool of available work.\nThis event is called a \"dynamic split\" and is critical to the dynamic\nwork rebalancing feature. The two obtained sub-tasks are called\n\"parts\" of the split.\nThe parts, if concatenated, must represent the same input as would\nbe read by the current task if the split did not happen.\nThe exact way in which the original task is decomposed into the two\nparts is specified either as a position demarcating them\n(stop_position), or explicitly as two DerivedSources, if this\ntask consumes a user-defined source type (dynamic_source_split).\n\nThe \"current\" task is adjusted as a result of the split: after a task\nwith range [A, B) sends a stop_position update at C, its range is\nconsidered to be [A, C), e.g.:\n* Progress should be interpreted relative to the new range, e.g.\n \"75% completed\" means \"75% of [A, C) completed\"\n* The worker should interpret proposed_stop_position relative to the\n new range, e.g. \"split at 68%\" should be interpreted as\n \"split at 68% of [A, C)\".\n* If the worker chooses to split again using stop_position, only\n stop_positions in [A, C) will be accepted.\n* Etc.\ndynamic_source_split has similar semantics: e.g., if a task with\nsource S splits using dynamic_source_split into {P, R}\n(where P and R must be together equivalent to S), then subsequent\nprogress and proposed_stop_position should be interpreted relative\nto P, and in a potential subsequent dynamic_source_split into {P', R'},\nP' and R' must be together equivalent to P, etc." + "description": "A worker may split an active map task in two parts, \"primary\" and \"residual\", continuing to process the primary part and returning the residual part into the pool of available work. This event is called a \"dynamic split\" and is critical to the dynamic work rebalancing feature. The two obtained sub-tasks are called \"parts\" of the split. The parts, if concatenated, must represent the same input as would be read by the current task if the split did not happen. The exact way in which the original task is decomposed into the two parts is specified either as a position demarcating them (stop_position), or explicitly as two DerivedSources, if this task consumes a user-defined source type (dynamic_source_split). The \"current\" task is adjusted as a result of the split: after a task with range [A, B) sends a stop_position update at C, its range is considered to be [A, C), e.g.: * Progress should be interpreted relative to the new range, e.g. \"75% completed\" means \"75% of [A, C) completed\" * The worker should interpret proposed_stop_position relative to the new range, e.g. \"split at 68%\" should be interpreted as \"split at 68% of [A, C)\". * If the worker chooses to split again using stop_position, only stop_positions in [A, C) will be accepted. * Etc. dynamic_source_split has similar semantics: e.g., if a task with source S splits using dynamic_source_split into {P, R} (where P and R must be together equivalent to S), then subsequent progress and proposed_stop_position should be interpreted relative to P, and in a potential subsequent dynamic_source_split into {P', R'}, P' and R' must be together equivalent to P, etc." }, "totalThrottlerWaitTimeSeconds": { "description": "Total time the worker spent being throttled by external systems.", @@ -6036,8 +6971,26 @@ }, "type": "object" }, + "WorkerDetails": { + "description": "Information about a worker", + "id": "WorkerDetails", + "properties": { + "workItems": { + "description": "Work items processed by this worker, sorted by time.", + "items": { + "$ref": "WorkItemDetails" + }, + "type": "array" + }, + "workerName": { + "description": "Name of this worker", + "type": "string" + } + }, + "type": "object" + }, "WorkerHealthReport": { - "description": "WorkerHealthReport contains information about the health of a worker.\n\nThe VM should be identified by the labels attached to the WorkerMessage that\nthis health ping belongs to.", + "description": "WorkerHealthReport contains information about the health of a worker. The VM should be identified by the labels attached to the WorkerMessage that this health ping belongs to.", "id": "WorkerHealthReport", "properties": { "msg": { @@ -6045,7 +6998,7 @@ "type": "string" }, "pods": { - "description": "The pods running on the worker. See:\nhttp://kubernetes.io/v1.1/docs/api-reference/v1/definitions.html#_v1_pod\n\nThis field is used by the worker to send the status of the indvidual\ncontainers running on each worker.", + "description": "The pods running on the worker. See: http://kubernetes.io/v1.1/docs/api-reference/v1/definitions.html#_v1_pod This field is used by the worker to send the status of the indvidual containers running on each worker.", "items": { "additionalProperties": { "description": "Properties of the object.", @@ -6056,12 +7009,12 @@ "type": "array" }, "reportInterval": { - "description": "The interval at which the worker is sending health reports.\nThe default value of 0 should be interpreted as the field is not being\nexplicitly set by the worker.", + "description": "The interval at which the worker is sending health reports. The default value of 0 should be interpreted as the field is not being explicitly set by the worker.", "format": "google-duration", "type": "string" }, "vmIsBroken": { - "description": "Whether the VM is in a permanently broken state.\nBroken VMs should be abandoned or deleted ASAP to avoid assigning or\ncompleting any work.", + "description": "Whether the VM is in a permanently broken state. Broken VMs should be abandoned or deleted ASAP to avoid assigning or completing any work.", "type": "boolean" }, "vmIsHealthy": { @@ -6077,11 +7030,11 @@ "type": "object" }, "WorkerHealthReportResponse": { - "description": "WorkerHealthReportResponse contains information returned to the worker\nin response to a health ping.", + "description": "WorkerHealthReportResponse contains information returned to the worker in response to a health ping.", "id": "WorkerHealthReportResponse", "properties": { "reportInterval": { - "description": "A positive value indicates the worker should change its reporting interval\nto the specified value.\n\nThe default value of zero means no change in report rate is requested by\nthe server.", + "description": "A positive value indicates the worker should change its reporting interval to the specified value. The default value of zero means no change in report rate is requested by the server.", "format": "google-duration", "type": "string" } @@ -6089,11 +7042,11 @@ "type": "object" }, "WorkerLifecycleEvent": { - "description": "A report of an event in a worker's lifecycle.\nThe proto contains one event, because the worker is expected to\nasynchronously send each message immediately after the event.\nDue to this asynchrony, messages may arrive out of order (or missing), and it\nis up to the consumer to interpret.\nThe timestamp of the event is in the enclosing WorkerMessage proto.", + "description": "A report of an event in a worker's lifecycle. The proto contains one event, because the worker is expected to asynchronously send each message immediately after the event. Due to this asynchrony, messages may arrive out of order (or missing), and it is up to the consumer to interpret. The timestamp of the event is in the enclosing WorkerMessage proto.", "id": "WorkerLifecycleEvent", "properties": { "containerStartTime": { - "description": "The start time of this container. All events will report this so that\nevents can be grouped together across container/VM restarts.", + "description": "The start time of this container. All events will report this so that events can be grouped together across container/VM restarts.", "format": "google-datetime", "type": "string" }, @@ -6112,7 +7065,7 @@ "enumDescriptions": [ "Invalid event.", "The time the VM started.", - "Our container code starts running. Multiple containers could be\ndistinguished with WorkerMessage.labels if desired.", + "Our container code starts running. Multiple containers could be distinguished with WorkerMessage.labels if desired.", "The worker has a functional external network connection.", "Started downloading staging files.", "Finished downloading all staging files.", @@ -6125,7 +7078,7 @@ "additionalProperties": { "type": "string" }, - "description": "Other stats that can accompany an event. E.g.\n{ \"downloaded_bytes\" : \"123456\" }", + "description": "Other stats that can accompany an event. E.g. { \"downloaded_bytes\" : \"123456\" }", "type": "object" } }, @@ -6139,7 +7092,7 @@ "additionalProperties": { "type": "string" }, - "description": "Labels are used to group WorkerMessages.\nFor example, a worker_message about a particular container\nmight have the labels:\n{ \"JOB_ID\": \"2015-04-22\",\n \"WORKER_ID\": \"wordcount-vm-2015…\"\n \"CONTAINER_TYPE\": \"worker\",\n \"CONTAINER_ID\": \"ac1234def\"}\nLabel tags typically correspond to Label enum values. However, for ease\nof development other strings can be used as tags. LABEL_UNSPECIFIED should\nnot be used here.", + "description": "Labels are used to group WorkerMessages. For example, a worker_message about a particular container might have the labels: { \"JOB_ID\": \"2015-04-22\", \"WORKER_ID\": \"wordcount-vm-2015…\" \"CONTAINER_TYPE\": \"worker\", \"CONTAINER_ID\": \"ac1234def\"} Label tags typically correspond to Label enum values. However, for ease of development other strings can be used as tags. LABEL_UNSPECIFIED should not be used here.", "type": "object" }, "time": { @@ -6171,11 +7124,11 @@ "type": "object" }, "WorkerMessageCode": { - "description": "A message code is used to report status and error messages to the service.\nThe message codes are intended to be machine readable. The service will\ntake care of translating these into user understandable messages if\nnecessary.\n\nExample use cases:\n 1. Worker processes reporting successful startup.\n 2. Worker processes reporting specific errors (e.g. package staging\n failure).", + "description": "A message code is used to report status and error messages to the service. The message codes are intended to be machine readable. The service will take care of translating these into user understandable messages if necessary. Example use cases: 1. Worker processes reporting successful startup. 2. Worker processes reporting specific errors (e.g. package staging failure).", "id": "WorkerMessageCode", "properties": { "code": { - "description": "The code is a string intended for consumption by a machine that identifies\nthe type of message being sent.\nExamples:\n 1. \"HARNESS_STARTED\" might be used to indicate the worker harness has\n started.\n 2. \"GCS_DOWNLOAD_ERROR\" might be used to indicate an error downloading\n a GCS file as part of the boot process of one of the worker containers.\n\nThis is a string and not an enum to make it easy to add new codes without\nwaiting for an API change.", + "description": "The code is a string intended for consumption by a machine that identifies the type of message being sent. Examples: 1. \"HARNESS_STARTED\" might be used to indicate the worker harness has started. 2. \"GCS_DOWNLOAD_ERROR\" might be used to indicate an error downloading a GCS file as part of the boot process of one of the worker containers. This is a string and not an enum to make it easy to add new codes without waiting for an API change.", "type": "string" }, "parameters": { @@ -6183,14 +7136,14 @@ "description": "Properties of the object.", "type": "any" }, - "description": "Parameters contains specific information about the code.\n\nThis is a struct to allow parameters of different types.\n\nExamples:\n 1. For a \"HARNESS_STARTED\" message parameters might provide the name\n of the worker and additional data like timing information.\n 2. For a \"GCS_DOWNLOAD_ERROR\" parameters might contain fields listing\n the GCS objects being downloaded and fields containing errors.\n\nIn general complex data structures should be avoided. If a worker\nneeds to send a specific and complicated data structure then please\nconsider defining a new proto and adding it to the data oneof in\nWorkerMessageResponse.\n\nConventions:\n Parameters should only be used for information that isn't typically passed\n as a label.\n hostname and other worker identifiers should almost always be passed\n as labels since they will be included on most messages.", + "description": "Parameters contains specific information about the code. This is a struct to allow parameters of different types. Examples: 1. For a \"HARNESS_STARTED\" message parameters might provide the name of the worker and additional data like timing information. 2. For a \"GCS_DOWNLOAD_ERROR\" parameters might contain fields listing the GCS objects being downloaded and fields containing errors. In general complex data structures should be avoided. If a worker needs to send a specific and complicated data structure then please consider defining a new proto and adding it to the data oneof in WorkerMessageResponse. Conventions: Parameters should only be used for information that isn't typically passed as a label. hostname and other worker identifiers should almost always be passed as labels since they will be included on most messages.", "type": "object" } }, "type": "object" }, "WorkerMessageResponse": { - "description": "A worker_message response allows the server to pass information to the\nsender.", + "description": "A worker_message response allows the server to pass information to the sender.", "id": "WorkerMessageResponse", "properties": { "workerHealthReportResponse": { @@ -6209,7 +7162,7 @@ "type": "object" }, "WorkerPool": { - "description": "Describes one particular pool of Cloud Dataflow workers to be\ninstantiated by the Cloud Dataflow service in order to perform the\ncomputations required by a job. Note that a workflow job may use\nmultiple pools, in order to match the various computational\nrequirements of the various stages of the job.", + "description": "Describes one particular pool of Cloud Dataflow workers to be instantiated by the Cloud Dataflow service in order to perform the computations required by a job. Note that a workflow job may use multiple pools, in order to match the various computational requirements of the various stages of the job.", "id": "WorkerPool", "properties": { "autoscalingSettings": { @@ -6224,7 +7177,7 @@ "type": "array" }, "defaultPackageSet": { - "description": "The default package set to install. This allows the service to\nselect a default set of packages which are useful to worker\nharnesses written in a particular language.", + "description": "The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.", "enum": [ "DEFAULT_PACKAGE_SET_UNKNOWN", "DEFAULT_PACKAGE_SET_NONE", @@ -6233,14 +7186,14 @@ ], "enumDescriptions": [ "The default set of packages to stage is unknown, or unspecified.", - "Indicates that no packages should be staged at the worker unless\nexplicitly specified by the job.", + "Indicates that no packages should be staged at the worker unless explicitly specified by the job.", "Stage packages typically useful to workers written in Java.", "Stage pacakges typically useful to workers written in Python." ], "type": "string" }, "diskSizeGb": { - "description": "Size of root disk for VMs, in GB. If zero or unspecified, the service will\nattempt to choose a reasonable default.", + "description": "Size of root disk for VMs, in GB. If zero or unspecified, the service will attempt to choose a reasonable default.", "format": "int32", "type": "integer" }, @@ -6249,7 +7202,7 @@ "type": "string" }, "diskType": { - "description": "Type of root disk for VMs. If empty or unspecified, the service will\nattempt to choose a reasonable default.", + "description": "Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.", "type": "string" }, "ipConfiguration": { @@ -6267,11 +7220,11 @@ "type": "string" }, "kind": { - "description": "The kind of the worker pool; currently only `harness` and `shuffle`\nare supported.", + "description": "The kind of the worker pool; currently only `harness` and `shuffle` are supported.", "type": "string" }, "machineType": { - "description": "Machine type (e.g. \"n1-standard-1\"). If empty or unspecified, the\nservice will attempt to choose a reasonable default.", + "description": "Machine type (e.g. \"n1-standard-1\"). If empty or unspecified, the service will attempt to choose a reasonable default.", "type": "string" }, "metadata": { @@ -6282,21 +7235,21 @@ "type": "object" }, "network": { - "description": "Network to which VMs will be assigned. If empty or unspecified,\nthe service will use the network \"default\".", + "description": "Network to which VMs will be assigned. If empty or unspecified, the service will use the network \"default\".", "type": "string" }, "numThreadsPerWorker": { - "description": "The number of threads per worker harness. If empty or unspecified, the\nservice will choose a number of threads (according to the number of cores\non the selected machine type for batch, or 1 by convention for streaming).", + "description": "The number of threads per worker harness. If empty or unspecified, the service will choose a number of threads (according to the number of cores on the selected machine type for batch, or 1 by convention for streaming).", "format": "int32", "type": "integer" }, "numWorkers": { - "description": "Number of Google Compute Engine workers in this pool needed to\nexecute the job. If zero or unspecified, the service will\nattempt to choose a reasonable default.", + "description": "Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.", "format": "int32", "type": "integer" }, "onHostMaintenance": { - "description": "The action to take on host maintenance, as defined by the Google\nCompute Engine API.", + "description": "The action to take on host maintenance, as defined by the Google Compute Engine API.", "type": "string" }, "packages": { @@ -6315,22 +7268,22 @@ "type": "object" }, "sdkHarnessContainerImages": { - "description": "Set of SDK harness containers needed to execute this pipeline. This will\nonly be set in the Fn API path. For non-cross-language pipelines this\nshould have only one entry. Cross-language pipelines will have two or more\nentries.", + "description": "Set of SDK harness containers needed to execute this pipeline. This will only be set in the Fn API path. For non-cross-language pipelines this should have only one entry. Cross-language pipelines will have two or more entries.", "items": { "$ref": "SdkHarnessContainerImage" }, "type": "array" }, "subnetwork": { - "description": "Subnetwork to which VMs will be assigned, if desired. Expected to be of\nthe form \"regions/REGION/subnetworks/SUBNETWORK\".", + "description": "Subnetwork to which VMs will be assigned, if desired. Expected to be of the form \"regions/REGION/subnetworks/SUBNETWORK\".", "type": "string" }, "taskrunnerSettings": { "$ref": "TaskRunnerSettings", - "description": "Settings passed through to Google Compute Engine workers when\nusing the standard Dataflow task runner. Users should ignore\nthis field." + "description": "Settings passed through to Google Compute Engine workers when using the standard Dataflow task runner. Users should ignore this field." }, "teardownPolicy": { - "description": "Sets the policy for determining when to turndown worker pool.\nAllowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and\n`TEARDOWN_NEVER`.\n`TEARDOWN_ALWAYS` means workers are always torn down regardless of whether\nthe job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down\nif the job succeeds. `TEARDOWN_NEVER` means the workers are never torn\ndown.\n\nIf the workers are not torn down by the service, they will\ncontinue to run and use Google Compute Engine VM resources in the\nuser's project until they are explicitly terminated by the user.\nBecause of this, Google recommends using the `TEARDOWN_ALWAYS`\npolicy except for small, manually supervised test jobs.\n\nIf unknown or unspecified, the service will attempt to choose a reasonable\ndefault.", + "description": "Sets the policy for determining when to turndown worker pool. Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and `TEARDOWN_NEVER`. `TEARDOWN_ALWAYS` means workers are always torn down regardless of whether the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down if the job succeeds. `TEARDOWN_NEVER` means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the `TEARDOWN_ALWAYS` policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.", "enum": [ "TEARDOWN_POLICY_UNKNOWN", "TEARDOWN_ALWAYS", @@ -6340,17 +7293,17 @@ "enumDescriptions": [ "The teardown policy isn't specified, or is unknown.", "Always teardown the resource.", - "Teardown the resource on success. This is useful for debugging\nfailures.", - "Never teardown the resource. This is useful for debugging and\ndevelopment." + "Teardown the resource on success. This is useful for debugging failures.", + "Never teardown the resource. This is useful for debugging and development." ], "type": "string" }, "workerHarnessContainerImage": { - "description": "Required. Docker container image that executes the Cloud Dataflow worker\nharness, residing in Google Container Registry.\n\nDeprecated for the Fn API path. Use sdk_harness_container_images instead.", + "description": "Required. Docker container image that executes the Cloud Dataflow worker harness, residing in Google Container Registry. Deprecated for the Fn API path. Use sdk_harness_container_images instead.", "type": "string" }, "zone": { - "description": "Zone to run the worker pools in. If empty or unspecified, the service\nwill attempt to choose a reasonable default.", + "description": "Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.", "type": "string" } }, @@ -6361,7 +7314,7 @@ "id": "WorkerSettings", "properties": { "baseUrl": { - "description": "The base URL for accessing Google Cloud APIs.\n\nWhen workers access Google Cloud APIs, they logically do so via\nrelative URLs. If this field is specified, it supplies the base\nURL to use for resolving these relative URLs. The normative\nalgorithm used is defined by RFC 1808, \"Relative Uniform Resource\nLocators\".\n\nIf not specified, the default value is \"http://www.googleapis.com/\"", + "description": "The base URL for accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, \"Relative Uniform Resource Locators\". If not specified, the default value is \"http://www.googleapis.com/\"", "type": "string" }, "reportingEnabled": { @@ -6369,15 +7322,15 @@ "type": "boolean" }, "servicePath": { - "description": "The Cloud Dataflow service path relative to the root URL, for example,\n\"dataflow/v1b3/projects\".", + "description": "The Cloud Dataflow service path relative to the root URL, for example, \"dataflow/v1b3/projects\".", "type": "string" }, "shuffleServicePath": { - "description": "The Shuffle service path relative to the root URL, for example,\n\"shuffle/v1beta1\".", + "description": "The Shuffle service path relative to the root URL, for example, \"shuffle/v1beta1\".", "type": "string" }, "tempStoragePrefix": { - "description": "The prefix of the resources the system should use for temporary\nstorage.\n\nThe supported resource type is:\n\nGoogle Cloud Storage:\n\n storage.googleapis.com/{bucket}/{object}\n bucket.storage.googleapis.com/{object}", + "description": "The prefix of the resources the system should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}", "type": "string" }, "workerId": { @@ -6388,11 +7341,11 @@ "type": "object" }, "WorkerShutdownNotice": { - "description": "Shutdown notification from workers. This is to be sent by the shutdown\nscript of the worker VM so that the backend knows that the VM is being\nshut down.", + "description": "Shutdown notification from workers. This is to be sent by the shutdown script of the worker VM so that the backend knows that the VM is being shut down.", "id": "WorkerShutdownNotice", "properties": { "reason": { - "description": "The reason for the worker shutdown.\nCurrent possible values are:\n \"UNKNOWN\": shutdown reason is unknown.\n \"PREEMPTION\": shutdown reason is preemption.\nOther possible reasons may be added in the future.", + "description": "The reason for the worker shutdown. Current possible values are: \"UNKNOWN\": shutdown reason is unknown. \"PREEMPTION\": shutdown reason is preemption. Other possible reasons may be added in the future.", "type": "string" } }, @@ -6405,7 +7358,7 @@ "type": "object" }, "WriteInstruction": { - "description": "An instruction that writes records.\nTakes one input, produces no outputs.", + "description": "An instruction that writes records. Takes one input, produces no outputs.", "id": "WriteInstruction", "properties": { "input": { diff --git a/vendor/google.golang.org/api/dataflow/v1b3/dataflow-gen.go b/vendor/google.golang.org/api/dataflow/v1b3/dataflow-gen.go index 5be5e4f3376..3a2fe9fa0e2 100644 --- a/vendor/google.golang.org/api/dataflow/v1b3/dataflow-gen.go +++ b/vendor/google.golang.org/api/dataflow/v1b3/dataflow-gen.go @@ -79,6 +79,7 @@ const apiId = "dataflow:v1b3" const apiName = "dataflow" const apiVersion = "v1b3" const basePath = "https://dataflow.googleapis.com/" +const mtlsBasePath = "https://dataflow.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -106,6 +107,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -151,9 +153,11 @@ func (s *Service) userAgent() string { func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} + rs.CatalogTemplates = NewProjectsCatalogTemplatesService(s) rs.Jobs = NewProjectsJobsService(s) rs.Locations = NewProjectsLocationsService(s) rs.Snapshots = NewProjectsSnapshotsService(s) + rs.TemplateVersions = NewProjectsTemplateVersionsService(s) rs.Templates = NewProjectsTemplatesService(s) return rs } @@ -161,15 +165,40 @@ func NewProjectsService(s *Service) *ProjectsService { type ProjectsService struct { s *Service + CatalogTemplates *ProjectsCatalogTemplatesService + Jobs *ProjectsJobsService Locations *ProjectsLocationsService Snapshots *ProjectsSnapshotsService + TemplateVersions *ProjectsTemplateVersionsService + Templates *ProjectsTemplatesService } +func NewProjectsCatalogTemplatesService(s *Service) *ProjectsCatalogTemplatesService { + rs := &ProjectsCatalogTemplatesService{s: s} + rs.TemplateVersions = NewProjectsCatalogTemplatesTemplateVersionsService(s) + return rs +} + +type ProjectsCatalogTemplatesService struct { + s *Service + + TemplateVersions *ProjectsCatalogTemplatesTemplateVersionsService +} + +func NewProjectsCatalogTemplatesTemplateVersionsService(s *Service) *ProjectsCatalogTemplatesTemplateVersionsService { + rs := &ProjectsCatalogTemplatesTemplateVersionsService{s: s} + return rs +} + +type ProjectsCatalogTemplatesTemplateVersionsService struct { + s *Service +} + func NewProjectsJobsService(s *Service) *ProjectsJobsService { rs := &ProjectsJobsService{s: s} rs.Debug = NewProjectsJobsDebugService(s) @@ -253,6 +282,7 @@ func NewProjectsLocationsJobsService(s *Service) *ProjectsLocationsJobsService { rs.Debug = NewProjectsLocationsJobsDebugService(s) rs.Messages = NewProjectsLocationsJobsMessagesService(s) rs.Snapshots = NewProjectsLocationsJobsSnapshotsService(s) + rs.Stages = NewProjectsLocationsJobsStagesService(s) rs.WorkItems = NewProjectsLocationsJobsWorkItemsService(s) return rs } @@ -266,6 +296,8 @@ type ProjectsLocationsJobsService struct { Snapshots *ProjectsLocationsJobsSnapshotsService + Stages *ProjectsLocationsJobsStagesService + WorkItems *ProjectsLocationsJobsWorkItemsService } @@ -296,6 +328,15 @@ type ProjectsLocationsJobsSnapshotsService struct { s *Service } +func NewProjectsLocationsJobsStagesService(s *Service) *ProjectsLocationsJobsStagesService { + rs := &ProjectsLocationsJobsStagesService{s: s} + return rs +} + +type ProjectsLocationsJobsStagesService struct { + s *Service +} + func NewProjectsLocationsJobsWorkItemsService(s *Service) *ProjectsLocationsJobsWorkItemsService { rs := &ProjectsLocationsJobsWorkItemsService{s: s} return rs @@ -341,6 +382,15 @@ type ProjectsSnapshotsService struct { s *Service } +func NewProjectsTemplateVersionsService(s *Service) *ProjectsTemplateVersionsService { + rs := &ProjectsTemplateVersionsService{s: s} + return rs +} + +type ProjectsTemplateVersionsService struct { + s *Service +} + func NewProjectsTemplatesService(s *Service) *ProjectsTemplatesService { rs := &ProjectsTemplatesService{s: s} return rs @@ -404,63 +454,42 @@ func (s *ApproximateProgress) UnmarshalJSON(data []byte) error { // a worker. type ApproximateReportedProgress struct { // ConsumedParallelism: Total amount of parallelism in the portion of - // input of this task that has - // already been consumed and is no longer active. In the first two - // examples - // above (see remaining_parallelism), the value should be 29 or - // 2 - // respectively. The sum of remaining_parallelism and - // consumed_parallelism - // should equal the total amount of parallelism in this work item. - // If - // specified, must be finite. + // input of this task that has already been consumed and is no longer + // active. In the first two examples above (see remaining_parallelism), + // the value should be 29 or 2 respectively. The sum of + // remaining_parallelism and consumed_parallelism should equal the total + // amount of parallelism in this work item. If specified, must be + // finite. ConsumedParallelism *ReportedParallelism `json:"consumedParallelism,omitempty"` // FractionConsumed: Completion as fraction of the input consumed, from - // 0.0 (beginning, nothing - // consumed), to 1.0 (end of the input, entire input consumed). + // 0.0 (beginning, nothing consumed), to 1.0 (end of the input, entire + // input consumed). FractionConsumed float64 `json:"fractionConsumed,omitempty"` // Position: A Position within the work to represent a progress. Position *Position `json:"position,omitempty"` // RemainingParallelism: Total amount of parallelism in the input of - // this task that remains, - // (i.e. can be delegated to this task and any new tasks via - // dynamic - // splitting). Always at least 1 for non-finished work items and 0 - // for - // finished. - // - // "Amount of parallelism" refers to how many non-empty parts of the - // input - // can be read in parallel. This does not necessarily equal number - // of records. An input that can be read in parallel down to - // the - // individual records is called "perfectly splittable". - // An example of non-perfectly parallelizable input is a - // block-compressed - // file format where a block of records has to be read as a whole, - // but different blocks can be read in parallel. - // - // Examples: - // * If we are processing record #30 (starting at 1) out of 50 in a - // perfectly - // splittable 50-record input, this value should be 21 (20 remaining + - // 1 - // current). - // * If we are reading through block 3 in a block-compressed file - // consisting - // of 5 blocks, this value should be 3 (since blocks 4 and 5 can be - // processed in parallel by new tasks via dynamic splitting and the - // current - // task remains processing block 3). - // * If we are reading through the last block in a block-compressed - // file, - // or reading or processing the last record in a perfectly splittable - // input, this value should be 1, because apart from the current task, - // no - // additional remainder can be split off. + // this task that remains, (i.e. can be delegated to this task and any + // new tasks via dynamic splitting). Always at least 1 for non-finished + // work items and 0 for finished. "Amount of parallelism" refers to how + // many non-empty parts of the input can be read in parallel. This does + // not necessarily equal number of records. An input that can be read in + // parallel down to the individual records is called "perfectly + // splittable". An example of non-perfectly parallelizable input is a + // block-compressed file format where a block of records has to be read + // as a whole, but different blocks can be read in parallel. Examples: * + // If we are processing record #30 (starting at 1) out of 50 in a + // perfectly splittable 50-record input, this value should be 21 (20 + // remaining + 1 current). * If we are reading through block 3 in a + // block-compressed file consisting of 5 blocks, this value should be 3 + // (since blocks 4 and 5 can be processed in parallel by new tasks via + // dynamic splitting and the current task remains processing block 3). * + // If we are reading through the last block in a block-compressed file, + // or reading or processing the last record in a perfectly splittable + // input, this value should be 1, because apart from the current task, + // no additional remainder can be split off. RemainingParallelism *ReportedParallelism `json:"remainingParallelism,omitempty"` // ForceSendFields is a list of field names (e.g. "ConsumedParallelism") @@ -505,13 +534,12 @@ func (s *ApproximateReportedProgress) UnmarshalJSON(data []byte) error { // dynamically split the WorkItem. type ApproximateSplitRequest struct { // FractionConsumed: A fraction at which to split the work item, from - // 0.0 (beginning of the - // input) to 1.0 (end of the input). + // 0.0 (beginning of the input) to 1.0 (end of the input). FractionConsumed float64 `json:"fractionConsumed,omitempty"` // FractionOfRemainder: The fraction of the remainder of work to split - // the work item at, from 0.0 - // (split at the current position) to 1.0 (end of the input). + // the work item at, from 0.0 (split at the current position) to 1.0 + // (end of the input). FractionOfRemainder float64 `json:"fractionOfRemainder,omitempty"` // Position: A Position at which to split the work item. @@ -557,46 +585,71 @@ func (s *ApproximateSplitRequest) UnmarshalJSON(data []byte) error { return nil } +// Artifact: Job information for templates. +type Artifact struct { + // ContainerSpec: Container image path set for flex Template. + ContainerSpec *ContainerSpec `json:"containerSpec,omitempty"` + + // JobGraphGcsPath: job_graph_gcs_path set for legacy Template. + JobGraphGcsPath string `json:"jobGraphGcsPath,omitempty"` + + // Metadata: Metadata set for legacy Template. + Metadata *TemplateMetadata `json:"metadata,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ContainerSpec") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ContainerSpec") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Artifact) MarshalJSON() ([]byte, error) { + type NoMethod Artifact + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // AutoscalingEvent: A structured message reporting an autoscaling -// decision made by the Dataflow -// service. +// decision made by the Dataflow service. type AutoscalingEvent struct { // CurrentNumWorkers: The current number of workers the job has. CurrentNumWorkers int64 `json:"currentNumWorkers,omitempty,string"` // Description: A message describing why the system decided to adjust - // the current - // number of workers, why it failed, or why the system decided to - // not make any changes to the number of workers. + // the current number of workers, why it failed, or why the system + // decided to not make any changes to the number of workers. Description *StructuredMessage `json:"description,omitempty"` // EventType: The type of autoscaling event to report. // // Possible values: - // "TYPE_UNKNOWN" - Default type for the enum. Value should never be + // "TYPE_UNKNOWN" - Default type for the enum. Value should never be // returned. // "TARGET_NUM_WORKERS_CHANGED" - The TARGET_NUM_WORKERS_CHANGED type - // should be used when the target - // worker pool size has changed at the start of an actuation. An - // event - // should always be specified as TARGET_NUM_WORKERS_CHANGED if it - // reflects - // a change in the target_num_workers. + // should be used when the target worker pool size has changed at the + // start of an actuation. An event should always be specified as + // TARGET_NUM_WORKERS_CHANGED if it reflects a change in the + // target_num_workers. // "CURRENT_NUM_WORKERS_CHANGED" - The CURRENT_NUM_WORKERS_CHANGED - // type should be used when actual worker - // pool size has been changed, but the target_num_workers has not - // changed. + // type should be used when actual worker pool size has been changed, + // but the target_num_workers has not changed. // "ACTUATION_FAILURE" - The ACTUATION_FAILURE type should be used - // when we want to report - // an error to the user indicating why the current number of workers - // in the pool could not be changed. - // Displayed in the current status and history widgets. + // when we want to report an error to the user indicating why the + // current number of workers in the pool could not be changed. Displayed + // in the current status and history widgets. // "NO_CHANGE" - Used when we want to report to the user a reason why - // we are - // not currently adjusting the number of workers. - // Should specify both target_num_workers, current_num_workers and - // a - // decision_message. + // we are not currently adjusting the number of workers. Should specify + // both target_num_workers, current_num_workers and a decision_message. EventType string `json:"eventType,omitempty"` // TargetNumWorkers: The target number of workers the worker pool wants @@ -604,13 +657,12 @@ type AutoscalingEvent struct { TargetNumWorkers int64 `json:"targetNumWorkers,omitempty,string"` // Time: The time this event was emitted to indicate a new target or - // current - // num_workers value. + // current num_workers value. Time string `json:"time,omitempty"` // WorkerPool: A short and friendly name for the worker pool this event - // refers to, - // populated from the value of PoolStageRelation::user_pool_name. + // refers to, populated from the value of + // PoolStageRelation::user_pool_name. WorkerPool string `json:"workerPool,omitempty"` // ForceSendFields is a list of field names (e.g. "CurrentNumWorkers") @@ -749,16 +801,14 @@ func (s *BigTableIODetails) MarshalJSON() ([]byte, error) { // CPUTime: Modeled after information exposed by /proc/stat. type CPUTime struct { // Rate: Average CPU utilization rate (% non-idle cpu / second) since - // previous - // sample. + // previous sample. Rate float64 `json:"rate,omitempty"` // Timestamp: Timestamp of the measurement. Timestamp string `json:"timestamp,omitempty"` // TotalMs: Total active CPU time across all cores (ie., non-idle) in - // milliseconds - // since start-up. + // milliseconds since start-up. TotalMs uint64 `json:"totalMs,omitempty,string"` // ForceSendFields is a list of field names (e.g. "Rate") to @@ -798,16 +848,45 @@ func (s *CPUTime) UnmarshalJSON(data []byte) error { return nil } +// CommitTemplateVersionRequest: Commit will add a new TemplateVersion +// to an existing template. +type CommitTemplateVersionRequest struct { + // TemplateVersion: TemplateVersion obejct to create. + TemplateVersion *TemplateVersion `json:"templateVersion,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TemplateVersion") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TemplateVersion") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *CommitTemplateVersionRequest) MarshalJSON() ([]byte, error) { + type NoMethod CommitTemplateVersionRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ComponentSource: Description of an interstitial value between -// transforms in an execution -// stage. +// transforms in an execution stage. type ComponentSource struct { // Name: Dataflow service generated name for this source. Name string `json:"name,omitempty"` // OriginalTransformOrCollection: User name for the original user - // transform or collection with which this - // source is most closely associated. + // transform or collection with which this source is most closely + // associated. OriginalTransformOrCollection string `json:"originalTransformOrCollection,omitempty"` // UserName: Human-readable name for this transform; may be user or @@ -844,8 +923,7 @@ type ComponentTransform struct { Name string `json:"name,omitempty"` // OriginalTransform: User name for the original user transform with - // which this transform is - // most closely associated. + // which this transform is most closely associated. OriginalTransform string `json:"originalTransform,omitempty"` // UserName: Human-readable name for this transform; may be user or @@ -920,10 +998,8 @@ func (s *ComputationTopology) MarshalJSON() ([]byte, error) { } // ConcatPosition: A position that encapsulates an inner position and an -// index for the inner -// position. A ConcatPosition can be used by a reader of a source -// that -// encapsulates a set of other sources. +// index for the inner position. A ConcatPosition can be used by a +// reader of a source that encapsulates a set of other sources. type ConcatPosition struct { // Index: Index of the inner source. Index int64 `json:"index,omitempty"` @@ -1055,8 +1131,8 @@ func (s *CounterMetadata) MarshalJSON() ([]byte, error) { } // CounterStructuredName: Identifies a counter within a per-job -// namespace. Counters whose structured -// names are the same get merged into a single value for the job. +// namespace. Counters whose structured names are the same get merged +// into a single value for the job. type CounterStructuredName struct { // ComponentStepName: Name of the optimized step being executed by the // workers. @@ -1067,20 +1143,15 @@ type CounterStructuredName struct { ExecutionStepName string `json:"executionStepName,omitempty"` // InputIndex: Index of an input collection that's being read - // from/written to as a side - // input. - // The index identifies a step's side inputs starting by 1 (e.g. the - // first - // side input has input_index 1, the third has input_index 3). - // Side inputs are identified by a pair of (original_step_name, - // input_index). - // This field helps uniquely identify them. + // from/written to as a side input. The index identifies a step's side + // inputs starting by 1 (e.g. the first side input has input_index 1, + // the third has input_index 3). Side inputs are identified by a pair of + // (original_step_name, input_index). This field helps uniquely identify + // them. InputIndex int64 `json:"inputIndex,omitempty"` // Name: Counter name. Not necessarily globally-unique, but unique - // within the - // context of the other fields. - // Required. + // within the context of the other fields. Required. Name string `json:"name,omitempty"` // Origin: One of the standard Origins defined above. @@ -1095,14 +1166,12 @@ type CounterStructuredName struct { OriginNamespace string `json:"originNamespace,omitempty"` // OriginalRequestingStepName: The step name requesting an operation, - // such as GBK. - // I.e. the ParDo causing a read/write from shuffle to occur, or a - // read from side inputs. + // such as GBK. I.e. the ParDo causing a read/write from shuffle to + // occur, or a read from side inputs. OriginalRequestingStepName string `json:"originalRequestingStepName,omitempty"` // OriginalStepName: System generated name of the original step in the - // user's graph, before - // optimization. + // user's graph, before optimization. OriginalStepName string `json:"originalStepName,omitempty"` // Portion: Portion of this counter, either key or value. @@ -1141,8 +1210,7 @@ func (s *CounterStructuredName) MarshalJSON() ([]byte, error) { } // CounterStructuredNameAndMetadata: A single message which encapsulates -// structured name and metadata for a given -// counter. +// structured name and metadata for a given counter. type CounterStructuredNameAndMetadata struct { // Metadata: Metadata associated with a counter Metadata *CounterMetadata `json:"metadata,omitempty"` @@ -1179,11 +1247,9 @@ type CounterUpdate struct { Boolean bool `json:"boolean,omitempty"` // Cumulative: True if this counter is reported as the total cumulative - // aggregate - // value accumulated since the worker started working on this - // WorkItem. - // By default this is false, indicating that this counter is reported - // as a delta. + // aggregate value accumulated since the worker started working on this + // WorkItem. By default this is false, indicating that this counter is + // reported as a delta. Cumulative bool `json:"cumulative,omitempty"` // Distribution: Distribution data @@ -1217,9 +1283,8 @@ type CounterUpdate struct { // NameAndKind: Counter name and aggregation type. NameAndKind *NameAndKind `json:"nameAndKind,omitempty"` - // ShortId: The service-generated short identifier for this counter. - // The short_id -> (name, metadata) mapping is constant for the lifetime - // of + // ShortId: The service-generated short identifier for this counter. The + // short_id -> (name, metadata) mapping is constant for the lifetime of // a job. ShortId int64 `json:"shortId,omitempty,string"` @@ -1272,20 +1337,17 @@ type CreateJobFromTemplateRequest struct { // Environment: The runtime environment for the job. Environment *RuntimeEnvironment `json:"environment,omitempty"` - // GcsPath: Required. A Cloud Storage path to the template from which - // to - // create the job. - // Must be a valid Cloud Storage URL, beginning with `gs://`. + // GcsPath: Required. A Cloud Storage path to the template from which to + // create the job. Must be a valid Cloud Storage URL, beginning with + // `gs://`. GcsPath string `json:"gcsPath,omitempty"` // JobName: Required. The job name to use for the created job. JobName string `json:"jobName,omitempty"` - // Location: The [regional - // endpoint] - // (https://cloud.google.com/dataflow/docs/concepts/regional-en - // dpoints) to - // which to direct the request. + // Location: The [regional endpoint] + // (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) + // to which to direct the request. Location string `json:"location,omitempty"` // Parameters: The runtime parameters to pass to the job. @@ -1314,6 +1376,36 @@ func (s *CreateJobFromTemplateRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// CreateTemplateVersionRequest: Creates a new Template with +// TemplateVersions. +type CreateTemplateVersionRequest struct { + // TemplateVersion: The TemplateVersion object to create. + TemplateVersion *TemplateVersion `json:"templateVersion,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TemplateVersion") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TemplateVersion") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *CreateTemplateVersionRequest) MarshalJSON() ([]byte, error) { + type NoMethod CreateTemplateVersionRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // CustomSourceLocation: Identifies the location of a custom souce. type CustomSourceLocation struct { // Stateful: Whether this source is stateful. @@ -1345,16 +1437,13 @@ func (s *CustomSourceLocation) MarshalJSON() ([]byte, error) { // DataDiskAssignment: Data disk assignment for a given VM instance. type DataDiskAssignment struct { // DataDisks: Mounted data disks. The order is important a data disk's - // 0-based index in - // this list defines which persistent directory the disk is mounted to, - // for - // example the list of { "myproject-1014-104817-4c2-harness-0-disk-0" - // }, - // { "myproject-1014-104817-4c2-harness-0-disk-1" }. + // 0-based index in this list defines which persistent directory the + // disk is mounted to, for example the list of { + // "myproject-1014-104817-4c2-harness-0-disk-0" }, { + // "myproject-1014-104817-4c2-harness-0-disk-1" }. DataDisks []string `json:"dataDisks,omitempty"` - // VmInstance: VM instance name the data disks mounted to, for - // example + // VmInstance: VM instance name the data disks mounted to, for example // "myproject-1014-104817-4c2-harness-0". VmInstance string `json:"vmInstance,omitempty"` @@ -1421,11 +1510,10 @@ type DeleteSnapshotResponse struct { } // DerivedSource: Specification of one of the bundles produced as a -// result of splitting -// a Source (e.g. when executing a SourceSplitRequest, or when -// splitting an active task using -// WorkItemStatus.dynamic_source_split), -// relative to the source being split. +// result of splitting a Source (e.g. when executing a +// SourceSplitRequest, or when splitting an active task using +// WorkItemStatus.dynamic_source_split), relative to the source being +// split. type DerivedSource struct { // DerivationMode: What source to base the produced source on (if any). // @@ -1470,38 +1558,26 @@ func (s *DerivedSource) MarshalJSON() ([]byte, error) { // Disk: Describes the data disk used by a workflow job. type Disk struct { // DiskType: Disk storage type, as defined by Google Compute Engine. - // This - // must be a disk type appropriate to the project and zone in which - // the workers will run. If unknown or unspecified, the service - // will attempt to choose a reasonable default. - // - // For example, the standard persistent disk type is a resource - // name - // typically ending in "pd-standard". If SSD persistent disks - // are - // available, the resource name typically ends with "pd-ssd". - // The - // actual valid values are defined the Google Compute Engine API, - // not by the Cloud Dataflow API; consult the Google Compute - // Engine - // documentation for more information about determining the set - // of - // available disk types for a particular project and zone. - // - // Google Compute Engine Disk types are local to a particular - // project in a particular zone, and so the resource name will - // typically look something like - // this: - // - // compute.googleapis.com/projects/project-id/zones/zone/diskTypes - // /pd-standard + // This must be a disk type appropriate to the project and zone in which + // the workers will run. If unknown or unspecified, the service will + // attempt to choose a reasonable default. For example, the standard + // persistent disk type is a resource name typically ending in + // "pd-standard". If SSD persistent disks are available, the resource + // name typically ends with "pd-ssd". The actual valid values are + // defined the Google Compute Engine API, not by the Cloud Dataflow API; + // consult the Google Compute Engine documentation for more information + // about determining the set of available disk types for a particular + // project and zone. Google Compute Engine Disk types are local to a + // particular project in a particular zone, and so the resource name + // will typically look something like this: + // compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-sta + // ndard DiskType string `json:"diskType,omitempty"` // MountPoint: Directory in a VM where disk is mounted. MountPoint string `json:"mountPoint,omitempty"` - // SizeGb: Size of disk in GB. If zero or unspecified, the service - // will + // SizeGb: Size of disk in GB. If zero or unspecified, the service will // attempt to choose a reasonable default. SizeGb int64 `json:"sizeGb,omitempty"` @@ -1546,29 +1622,25 @@ type DisplayData struct { // JavaClassValue: Contains value if the data is of java class type. JavaClassValue string `json:"javaClassValue,omitempty"` - // Key: The key identifying the display data. - // This is intended to be used as a label for the display data - // when viewed in a dax monitoring system. + // Key: The key identifying the display data. This is intended to be + // used as a label for the display data when viewed in a dax monitoring + // system. Key string `json:"key,omitempty"` // Label: An optional label to display in a dax UI for the element. Label string `json:"label,omitempty"` // Namespace: The namespace for the key. This is usually a class name or - // programming - // language namespace (i.e. python module) which defines the display - // data. - // This allows a dax monitoring system to specially handle the data - // and perform custom rendering. + // programming language namespace (i.e. python module) which defines the + // display data. This allows a dax monitoring system to specially handle + // the data and perform custom rendering. Namespace string `json:"namespace,omitempty"` - // ShortStrValue: A possible additional shorter value to display. - // For example a java_class_name_value of com.mypackage.MyDoFn - // will be stored with MyDoFn as the short_str_value - // and - // com.mypackage.MyDoFn as the java_class_name value. - // short_str_value can be displayed and java_class_name_value - // will be displayed as a tooltip. + // ShortStrValue: A possible additional shorter value to display. For + // example a java_class_name_value of com.mypackage.MyDoFn will be + // stored with MyDoFn as the short_str_value and com.mypackage.MyDoFn as + // the java_class_name value. short_str_value can be displayed and + // java_class_name_value will be displayed as a tooltip. ShortStrValue string `json:"shortStrValue,omitempty"` // StrValue: Contains value if the data is of string type. @@ -1633,8 +1705,8 @@ type DistributionUpdate struct { Min *SplitInt64 `json:"min,omitempty"` // Sum: Use an int64 since we'd prefer the added precision. If overflow - // is a common - // problem we can detect it and use an additional int64 or a double. + // is a common problem we can detect it and use an additional int64 or a + // double. Sum *SplitInt64 `json:"sum,omitempty"` // SumOfSquares: Use a double since the sum of squares is likely to @@ -1679,19 +1751,16 @@ func (s *DistributionUpdate) UnmarshalJSON(data []byte) error { } // DynamicSourceSplit: When a task splits using -// WorkItemStatus.dynamic_source_split, this -// message describes the two parts of the split relative to -// the -// description of the current task's input. +// WorkItemStatus.dynamic_source_split, this message describes the two +// parts of the split relative to the description of the current task's +// input. type DynamicSourceSplit struct { - // Primary: Primary part (continued to be processed by - // worker). - // Specified relative to the previously-current source. - // Becomes current. + // Primary: Primary part (continued to be processed by worker). + // Specified relative to the previously-current source. Becomes current. Primary *DerivedSource `json:"primary,omitempty"` - // Residual: Residual part (returned to the pool of work). - // Specified relative to the previously-current source. + // Residual: Residual part (returned to the pool of work). Specified + // relative to the previously-current source. Residual *DerivedSource `json:"residual,omitempty"` // ForceSendFields is a list of field names (e.g. "Primary") to @@ -1717,23 +1786,29 @@ func (s *DynamicSourceSplit) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Empty: A generic empty message that you can re-use to avoid defining +// duplicated empty messages in your APIs. A typical example is to use +// it as the request or the response type of an API method. For +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } The JSON representation for `Empty` is +// empty JSON object `{}`. +type Empty struct { + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` +} + // Environment: Describes the environment in which a Dataflow Job runs. type Environment struct { - // ClusterManagerApiService: The type of cluster manager API to use. If - // unknown or - // unspecified, the service will attempt to choose a reasonable - // default. This should be in the form of the API service name, - // e.g. "compute.googleapis.com". + // ClusterManagerApiService: The type of cluster manager API to use. If + // unknown or unspecified, the service will attempt to choose a + // reasonable default. This should be in the form of the API service + // name, e.g. "compute.googleapis.com". ClusterManagerApiService string `json:"clusterManagerApiService,omitempty"` - // Dataset: The dataset for the current project where various - // workflow - // related tables are stored. - // - // The supported resource type is: - // - // Google BigQuery: - // bigquery.googleapis.com/{dataset} + // Dataset: The dataset for the current project where various workflow + // related tables are stored. The supported resource type is: Google + // BigQuery: bigquery.googleapis.com/{dataset} Dataset string `json:"dataset,omitempty"` // Experiments: The list of experiments to enable. @@ -1752,12 +1827,9 @@ type Environment struct { InternalExperiments googleapi.RawMessage `json:"internalExperiments,omitempty"` // SdkPipelineOptions: The Cloud Dataflow SDK pipeline options specified - // by the user. These - // options are passed through the service and are used to recreate - // the - // SDK pipeline options on the worker in a language agnostic and - // platform - // independent way. + // by the user. These options are passed through the service and are + // used to recreate the SDK pipeline options on the worker in a language + // agnostic and platform independent way. SdkPipelineOptions googleapi.RawMessage `json:"sdkPipelineOptions,omitempty"` // ServiceAccountEmail: Identity to run virtual machines as. Defaults to @@ -1765,65 +1837,47 @@ type Environment struct { ServiceAccountEmail string `json:"serviceAccountEmail,omitempty"` // ServiceKmsKeyName: If set, contains the Cloud KMS key identifier used - // to encrypt data - // at rest, AKA a Customer Managed Encryption Key (CMEK). - // - // Format: - // + // to encrypt data at rest, AKA a Customer Managed Encryption Key + // (CMEK). Format: // projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KE // Y ServiceKmsKeyName string `json:"serviceKmsKeyName,omitempty"` // TempStoragePrefix: The prefix of the resources the system should use - // for temporary - // storage. The system will append the suffix "/temp-{JOBNAME} to - // this resource prefix, where {JOBNAME} is the value of the - // job_name field. The resulting bucket and object prefix is used - // as the prefix of the resources used to store temporary data - // needed during the job execution. NOTE: This will override the - // value in taskrunner_settings. - // The supported resource type is: - // - // Google Cloud Storage: - // - // storage.googleapis.com/{bucket}/{object} - // bucket.storage.googleapis.com/{object} + // for temporary storage. The system will append the suffix + // "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the + // value of the job_name field. The resulting bucket and object prefix + // is used as the prefix of the resources used to store temporary data + // needed during the job execution. NOTE: This will override the value + // in taskrunner_settings. The supported resource type is: Google Cloud + // Storage: storage.googleapis.com/{bucket}/{object} + // bucket.storage.googleapis.com/{object} TempStoragePrefix string `json:"tempStoragePrefix,omitempty"` // UserAgent: A description of the process that generated the request. UserAgent googleapi.RawMessage `json:"userAgent,omitempty"` // Version: A structure describing which components and their versions - // of the service - // are required in order to run the job. + // of the service are required in order to run the job. Version googleapi.RawMessage `json:"version,omitempty"` // WorkerPools: The worker pools. At least one "harness" worker pool - // must be - // specified in order for the job to have workers. + // must be specified in order for the job to have workers. WorkerPools []*WorkerPool `json:"workerPools,omitempty"` - // WorkerRegion: The Compute Engine - // region - // (https://cloud.google.com/compute/docs/regions-zones/regions-zo - // nes) in - // which worker processing should occur, e.g. "us-west1". Mutually - // exclusive - // with worker_zone. If neither worker_region nor worker_zone is - // specified, - // default to the control plane's region. + // WorkerRegion: The Compute Engine region + // (https://cloud.google.com/compute/docs/regions-zones/regions-zones) + // in which worker processing should occur, e.g. "us-west1". Mutually + // exclusive with worker_zone. If neither worker_region nor worker_zone + // is specified, default to the control plane's region. WorkerRegion string `json:"workerRegion,omitempty"` - // WorkerZone: The Compute Engine - // zone - // (https://cloud.google.com/compute/docs/regions-zones/regions-zone - // s) in - // which worker processing should occur, e.g. "us-west1-a". Mutually - // exclusive - // with worker_region. If neither worker_region nor worker_zone is - // specified, - // a zone in the control plane's region is chosen based on available - // capacity. + // WorkerZone: The Compute Engine zone + // (https://cloud.google.com/compute/docs/regions-zones/regions-zones) + // in which worker processing should occur, e.g. "us-west1-a". Mutually + // exclusive with worker_region. If neither worker_region nor + // worker_zone is specified, a zone in the control plane's region is + // chosen based on available capacity. WorkerZone string `json:"workerZone,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1867,74 +1921,50 @@ type ExecutionStageState struct { // Possible values: // "JOB_STATE_UNKNOWN" - The job's run state isn't specified. // "JOB_STATE_STOPPED" - `JOB_STATE_STOPPED` indicates that the job - // has not - // yet started to run. + // has not yet started to run. // "JOB_STATE_RUNNING" - `JOB_STATE_RUNNING` indicates that the job is // currently running. // "JOB_STATE_DONE" - `JOB_STATE_DONE` indicates that the job has - // successfully completed. - // This is a terminal job state. This state may be set by the Cloud - // Dataflow - // service, as a transition from `JOB_STATE_RUNNING`. It may also be set - // via a - // Cloud Dataflow `UpdateJob` call, if the job has not yet reached a - // terminal - // state. + // successfully completed. This is a terminal job state. This state may + // be set by the Cloud Dataflow service, as a transition from + // `JOB_STATE_RUNNING`. It may also be set via a Cloud Dataflow + // `UpdateJob` call, if the job has not yet reached a terminal state. // "JOB_STATE_FAILED" - `JOB_STATE_FAILED` indicates that the job has - // failed. This is a - // terminal job state. This state may only be set by the Cloud - // Dataflow - // service, and only as a transition from `JOB_STATE_RUNNING`. + // failed. This is a terminal job state. This state may only be set by + // the Cloud Dataflow service, and only as a transition from + // `JOB_STATE_RUNNING`. // "JOB_STATE_CANCELLED" - `JOB_STATE_CANCELLED` indicates that the - // job has been explicitly - // cancelled. This is a terminal job state. This state may only be - // set via a Cloud Dataflow `UpdateJob` call, and only if the job has - // not - // yet reached another terminal state. + // job has been explicitly cancelled. This is a terminal job state. This + // state may only be set via a Cloud Dataflow `UpdateJob` call, and only + // if the job has not yet reached another terminal state. // "JOB_STATE_UPDATED" - `JOB_STATE_UPDATED` indicates that the job - // was successfully updated, - // meaning that this job was stopped and another job was started, - // inheriting - // state from this one. This is a terminal job state. This state may - // only be - // set by the Cloud Dataflow service, and only as a transition - // from - // `JOB_STATE_RUNNING`. + // was successfully updated, meaning that this job was stopped and + // another job was started, inheriting state from this one. This is a + // terminal job state. This state may only be set by the Cloud Dataflow + // service, and only as a transition from `JOB_STATE_RUNNING`. // "JOB_STATE_DRAINING" - `JOB_STATE_DRAINING` indicates that the job - // is in the process of draining. - // A draining job has stopped pulling from its input sources and is - // processing - // any data that remains in-flight. This state may be set via a Cloud - // Dataflow - // `UpdateJob` call, but only as a transition from `JOB_STATE_RUNNING`. - // Jobs - // that are draining may only transition to - // `JOB_STATE_DRAINED`, + // is in the process of draining. A draining job has stopped pulling + // from its input sources and is processing any data that remains + // in-flight. This state may be set via a Cloud Dataflow `UpdateJob` + // call, but only as a transition from `JOB_STATE_RUNNING`. Jobs that + // are draining may only transition to `JOB_STATE_DRAINED`, // `JOB_STATE_CANCELLED`, or `JOB_STATE_FAILED`. // "JOB_STATE_DRAINED" - `JOB_STATE_DRAINED` indicates that the job - // has been drained. - // A drained job terminated by stopping pulling from its input sources - // and - // processing any data that remained in-flight when draining was - // requested. - // This state is a terminal state, may only be set by the Cloud - // Dataflow - // service, and only as a transition from `JOB_STATE_DRAINING`. + // has been drained. A drained job terminated by stopping pulling from + // its input sources and processing any data that remained in-flight + // when draining was requested. This state is a terminal state, may only + // be set by the Cloud Dataflow service, and only as a transition from + // `JOB_STATE_DRAINING`. // "JOB_STATE_PENDING" - `JOB_STATE_PENDING` indicates that the job - // has been created but is not yet - // running. Jobs that are pending may only transition to - // `JOB_STATE_RUNNING`, - // or `JOB_STATE_FAILED`. + // has been created but is not yet running. Jobs that are pending may + // only transition to `JOB_STATE_RUNNING`, or `JOB_STATE_FAILED`. // "JOB_STATE_CANCELLING" - `JOB_STATE_CANCELLING` indicates that the - // job has been explicitly cancelled - // and is in the process of stopping. Jobs that are cancelling may - // only - // transition to `JOB_STATE_CANCELLED` or `JOB_STATE_FAILED`. + // job has been explicitly cancelled and is in the process of stopping. + // Jobs that are cancelling may only transition to `JOB_STATE_CANCELLED` + // or `JOB_STATE_FAILED`. // "JOB_STATE_QUEUED" - `JOB_STATE_QUEUED` indicates that the job has - // been created but is being - // delayed until launch. Jobs that are queued may only transition - // to - // `JOB_STATE_PENDING` or `JOB_STATE_CANCELLED`. + // been created but is being delayed until launch. Jobs that are queued + // may only transition to `JOB_STATE_PENDING` or `JOB_STATE_CANCELLED`. ExecutionStageState string `json:"executionStageState,omitempty"` // ForceSendFields is a list of field names (e.g. "CurrentStateTime") to @@ -1962,10 +1992,9 @@ func (s *ExecutionStageState) MarshalJSON() ([]byte, error) { } // ExecutionStageSummary: Description of the composing transforms, -// names/ids, and input/outputs of a -// stage of execution. Some composing transforms and sources may have -// been -// generated by the Dataflow service during execution planning. +// names/ids, and input/outputs of a stage of execution. Some composing +// transforms and sources may have been generated by the Dataflow +// service during execution planning. type ExecutionStageSummary struct { // ComponentSource: Collections produced and consumed by component // transforms of this stage. @@ -2026,17 +2055,13 @@ func (s *ExecutionStageSummary) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// FailedLocation: Indicates which [regional -// endpoint] -// (https://cloud.google.com/dataflow/docs/concepts/regional-en -// dpoints) failed -// to respond to a request for data. +// FailedLocation: Indicates which [regional endpoint] +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) +// failed to respond to a request for data. type FailedLocation struct { - // Name: The name of the [regional - // endpoint] - // (https://cloud.google.com/dataflow/docs/concepts/regional-en - // dpoints) that - // failed to respond. + // Name: The name of the [regional endpoint] + // (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) + // that failed to respond. Name string `json:"name,omitempty"` // ForceSendFields is a list of field names (e.g. "Name") to @@ -2119,6 +2144,117 @@ func (s *FlattenInstruction) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// FlexTemplateRuntimeEnvironment: The environment values to be set at +// runtime for flex template. +type FlexTemplateRuntimeEnvironment struct { + // AdditionalExperiments: Additional experiment flags for the job. + AdditionalExperiments []string `json:"additionalExperiments,omitempty"` + + // AdditionalUserLabels: Additional user labels to be specified for the + // job. Keys and values must follow the restrictions specified in the + // [labeling + // restrictions](https://cloud.google.com/compute/docs/labeling-resources + // #restrictions) page. An object containing a list of "key": value + // pairs. Example: { "name": "wrench", "mass": "1kg", "count": "3" }. + AdditionalUserLabels map[string]string `json:"additionalUserLabels,omitempty"` + + // EnableStreamingEngine: Whether to enable Streaming Engine for the + // job. + EnableStreamingEngine bool `json:"enableStreamingEngine,omitempty"` + + // IpConfiguration: Configuration for VM IPs. + // + // Possible values: + // "WORKER_IP_UNSPECIFIED" - The configuration is unknown, or + // unspecified. + // "WORKER_IP_PUBLIC" - Workers should have public IP addresses. + // "WORKER_IP_PRIVATE" - Workers should have private IP addresses. + IpConfiguration string `json:"ipConfiguration,omitempty"` + + // KmsKeyName: Name for the Cloud KMS key for the job. Key format is: + // projects//locations//keyRings//cryptoKeys/ + KmsKeyName string `json:"kmsKeyName,omitempty"` + + // MachineType: The machine type to use for the job. Defaults to the + // value from the template if not specified. + MachineType string `json:"machineType,omitempty"` + + // MaxWorkers: The maximum number of Google Compute Engine instances to + // be made available to your pipeline during execution, from 1 to 1000. + MaxWorkers int64 `json:"maxWorkers,omitempty"` + + // Network: Network to which VMs will be assigned. If empty or + // unspecified, the service will use the network "default". + Network string `json:"network,omitempty"` + + // NumWorkers: The initial number of Google Compute Engine instances for + // the job. + NumWorkers int64 `json:"numWorkers,omitempty"` + + // ServiceAccountEmail: The email address of the service account to run + // the job as. + ServiceAccountEmail string `json:"serviceAccountEmail,omitempty"` + + // Subnetwork: Subnetwork to which VMs will be assigned, if desired. You + // can specify a subnetwork using either a complete URL or an + // abbreviated path. Expected to be of the form + // "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/region + // s/REGION/subnetworks/SUBNETWORK" or + // "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located + // in a Shared VPC network, you must use the complete URL. + Subnetwork string `json:"subnetwork,omitempty"` + + // TempLocation: The Cloud Storage path to use for temporary files. Must + // be a valid Cloud Storage URL, beginning with `gs://`. + TempLocation string `json:"tempLocation,omitempty"` + + // WorkerRegion: The Compute Engine region + // (https://cloud.google.com/compute/docs/regions-zones/regions-zones) + // in which worker processing should occur, e.g. "us-west1". Mutually + // exclusive with worker_zone. If neither worker_region nor worker_zone + // is specified, default to the control plane's region. + WorkerRegion string `json:"workerRegion,omitempty"` + + // WorkerZone: The Compute Engine zone + // (https://cloud.google.com/compute/docs/regions-zones/regions-zones) + // in which worker processing should occur, e.g. "us-west1-a". Mutually + // exclusive with worker_region. If neither worker_region nor + // worker_zone is specified, a zone in the control plane's region is + // chosen based on available capacity. If both `worker_zone` and `zone` + // are set, `worker_zone` takes precedence. + WorkerZone string `json:"workerZone,omitempty"` + + // Zone: The Compute Engine [availability + // zone](https://cloud.google.com/compute/docs/regions-zones/regions-zone + // s) for launching worker instances to run your pipeline. In the + // future, worker_zone will take precedence. + Zone string `json:"zone,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "AdditionalExperiments") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AdditionalExperiments") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *FlexTemplateRuntimeEnvironment) MarshalJSON() ([]byte, error) { + type NoMethod FlexTemplateRuntimeEnvironment + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // FloatingPointList: A metric value representing a list of floating // point numbers. type FloatingPointList struct { @@ -2198,15 +2334,12 @@ func (s *FloatingPointMean) UnmarshalJSON(data []byte) error { // component. type GetDebugConfigRequest struct { // ComponentId: The internal component id for which debug configuration - // is - // requested. + // is requested. ComponentId string `json:"componentId,omitempty"` - // Location: The [regional - // endpoint] - // (https://cloud.google.com/dataflow/docs/concepts/regional-en - // dpoints) that - // contains the job specified by job_id. + // Location: The [regional endpoint] + // (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) + // that contains the job specified by job_id. Location string `json:"location,omitempty"` // WorkerId: The worker id, i.e., VM hostname. @@ -2271,16 +2404,14 @@ func (s *GetDebugConfigResponse) MarshalJSON() ([]byte, error) { // GetTemplateResponse: The response to a GetTemplate request. type GetTemplateResponse struct { // Metadata: The template metadata describing the template name, - // available - // parameters, etc. + // available parameters, etc. Metadata *TemplateMetadata `json:"metadata,omitempty"` // RuntimeMetadata: Describes the runtime metadata with SDKInfo and // available parameters. RuntimeMetadata *RuntimeMetadata `json:"runtimeMetadata,omitempty"` - // Status: The status of the get template request. Any problems with - // the + // Status: The status of the get template request. Any problems with the // request will be indicated in the error_details. Status *Status `json:"status,omitempty"` @@ -2319,31 +2450,22 @@ func (s *GetTemplateResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Histogram: Histogram of value counts for a distribution. -// -// Buckets have an inclusive lower bound and exclusive upper bound and -// use -// "1,2,5 bucketing": The first bucket range is from [0,1) and all -// subsequent +// Histogram: Histogram of value counts for a distribution. Buckets have +// an inclusive lower bound and exclusive upper bound and use "1,2,5 +// bucketing": The first bucket range is from [0,1) and all subsequent // bucket boundaries are powers of ten multiplied by 1, 2, or 5. Thus, -// bucket -// boundaries are 0, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, -// ... -// Negative values are not supported. +// bucket boundaries are 0, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, +// ... Negative values are not supported. type Histogram struct { // BucketCounts: Counts of values in each bucket. For efficiency, prefix - // and trailing - // buckets with count = 0 are elided. Buckets can store the full range - // of - // values of an unsigned long, with ULLONG_MAX falling into the 59th - // bucket - // with range [1e19, 2e19). + // and trailing buckets with count = 0 are elided. Buckets can store the + // full range of values of an unsigned long, with ULLONG_MAX falling + // into the 59th bucket with range [1e19, 2e19). BucketCounts googleapi.Int64s `json:"bucketCounts,omitempty"` // FirstBucketOffset: Starting index of first stored bucket. The - // non-inclusive upper-bound of - // the ith bucket is given by: - // pow(10,(i-first_bucket_offset)/3) * + // non-inclusive upper-bound of the ith bucket is given by: + // pow(10,(i-first_bucket_offset)/3) * // (1,2,5)[(i-first_bucket_offset)%3] FirstBucketOffset int64 `json:"firstBucketOffset,omitempty"` @@ -2377,8 +2499,7 @@ type HotKeyDetection struct { // detected. HotKeyAge string `json:"hotKeyAge,omitempty"` - // SystemName: System-defined name of the step containing this hot - // key. + // SystemName: System-defined name of the step containing this hot key. // Unique across the workflow. SystemName string `json:"systemName,omitempty"` @@ -2410,17 +2531,15 @@ func (s *HotKeyDetection) MarshalJSON() ([]byte, error) { } // InstructionInput: An input of an instruction, as a reference to an -// output of a -// producer instruction. +// output of a producer instruction. type InstructionInput struct { // OutputNum: The output index (origin zero) within the producer. OutputNum int64 `json:"outputNum,omitempty"` // ProducerInstructionIndex: The index (origin zero) of the parallel - // instruction that produces - // the output to be consumed by this input. This index is relative - // to the list of instructions in this input's instruction's - // containing MapTask. + // instruction that produces the output to be consumed by this input. + // This index is relative to the list of instructions in this input's + // instruction's containing MapTask. ProducerInstructionIndex int64 `json:"producerInstructionIndex,omitempty"` // ForceSendFields is a list of field names (e.g. "OutputNum") to @@ -2455,23 +2574,20 @@ type InstructionOutput struct { Name string `json:"name,omitempty"` // OnlyCountKeyBytes: For system-generated byte and mean byte metrics, - // certain instructions - // should only report the key size. + // certain instructions should only report the key size. OnlyCountKeyBytes bool `json:"onlyCountKeyBytes,omitempty"` // OnlyCountValueBytes: For system-generated byte and mean byte metrics, - // certain instructions - // should only report the value size. + // certain instructions should only report the value size. OnlyCountValueBytes bool `json:"onlyCountValueBytes,omitempty"` // OriginalName: System-defined name for this output in the original - // workflow graph. - // Outputs that do not contribute to an original instruction do not set - // this. + // workflow graph. Outputs that do not contribute to an original + // instruction do not set this. OriginalName string `json:"originalName,omitempty"` - // SystemName: System-defined name of this output. - // Unique across the workflow. + // SystemName: System-defined name of this output. Unique across the + // workflow. SystemName string `json:"systemName,omitempty"` // ForceSendFields is a list of field names (e.g. "Codec") to @@ -2592,112 +2708,77 @@ func (s *IntegerMean) MarshalJSON() ([]byte, error) { // Job: Defines a job to be run by the Cloud Dataflow service. type Job struct { // ClientRequestId: The client's unique identifier of the job, re-used - // across retried attempts. - // If this field is set, the service will ensure its uniqueness. - // The request to create a job will fail if the service has knowledge of - // a - // previously submitted job with the same client's ID and job name. - // The caller may use this field to ensure idempotence of job - // creation across retried attempts to create a job. + // across retried attempts. If this field is set, the service will + // ensure its uniqueness. The request to create a job will fail if the + // service has knowledge of a previously submitted job with the same + // client's ID and job name. The caller may use this field to ensure + // idempotence of job creation across retried attempts to create a job. // By default, the field is empty and, in that case, the service ignores // it. ClientRequestId string `json:"clientRequestId,omitempty"` // CreateTime: The timestamp when the job was initially created. - // Immutable and set by the - // Cloud Dataflow service. + // Immutable and set by the Cloud Dataflow service. CreateTime string `json:"createTime,omitempty"` // CreatedFromSnapshotId: If this is specified, the job's initial state - // is populated from the given - // snapshot. + // is populated from the given snapshot. CreatedFromSnapshotId string `json:"createdFromSnapshotId,omitempty"` - // CurrentState: The current state of the job. - // - // Jobs are created in the `JOB_STATE_STOPPED` state unless - // otherwise - // specified. - // - // A job in the `JOB_STATE_RUNNING` state may asynchronously enter - // a - // terminal state. After a job has reached a terminal state, no - // further state updates may be made. - // - // This field may be mutated by the Cloud Dataflow service; + // CurrentState: The current state of the job. Jobs are created in the + // `JOB_STATE_STOPPED` state unless otherwise specified. A job in the + // `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. + // After a job has reached a terminal state, no further state updates + // may be made. This field may be mutated by the Cloud Dataflow service; // callers cannot mutate it. // // Possible values: // "JOB_STATE_UNKNOWN" - The job's run state isn't specified. // "JOB_STATE_STOPPED" - `JOB_STATE_STOPPED` indicates that the job - // has not - // yet started to run. + // has not yet started to run. // "JOB_STATE_RUNNING" - `JOB_STATE_RUNNING` indicates that the job is // currently running. // "JOB_STATE_DONE" - `JOB_STATE_DONE` indicates that the job has - // successfully completed. - // This is a terminal job state. This state may be set by the Cloud - // Dataflow - // service, as a transition from `JOB_STATE_RUNNING`. It may also be set - // via a - // Cloud Dataflow `UpdateJob` call, if the job has not yet reached a - // terminal - // state. + // successfully completed. This is a terminal job state. This state may + // be set by the Cloud Dataflow service, as a transition from + // `JOB_STATE_RUNNING`. It may also be set via a Cloud Dataflow + // `UpdateJob` call, if the job has not yet reached a terminal state. // "JOB_STATE_FAILED" - `JOB_STATE_FAILED` indicates that the job has - // failed. This is a - // terminal job state. This state may only be set by the Cloud - // Dataflow - // service, and only as a transition from `JOB_STATE_RUNNING`. + // failed. This is a terminal job state. This state may only be set by + // the Cloud Dataflow service, and only as a transition from + // `JOB_STATE_RUNNING`. // "JOB_STATE_CANCELLED" - `JOB_STATE_CANCELLED` indicates that the - // job has been explicitly - // cancelled. This is a terminal job state. This state may only be - // set via a Cloud Dataflow `UpdateJob` call, and only if the job has - // not - // yet reached another terminal state. + // job has been explicitly cancelled. This is a terminal job state. This + // state may only be set via a Cloud Dataflow `UpdateJob` call, and only + // if the job has not yet reached another terminal state. // "JOB_STATE_UPDATED" - `JOB_STATE_UPDATED` indicates that the job - // was successfully updated, - // meaning that this job was stopped and another job was started, - // inheriting - // state from this one. This is a terminal job state. This state may - // only be - // set by the Cloud Dataflow service, and only as a transition - // from - // `JOB_STATE_RUNNING`. + // was successfully updated, meaning that this job was stopped and + // another job was started, inheriting state from this one. This is a + // terminal job state. This state may only be set by the Cloud Dataflow + // service, and only as a transition from `JOB_STATE_RUNNING`. // "JOB_STATE_DRAINING" - `JOB_STATE_DRAINING` indicates that the job - // is in the process of draining. - // A draining job has stopped pulling from its input sources and is - // processing - // any data that remains in-flight. This state may be set via a Cloud - // Dataflow - // `UpdateJob` call, but only as a transition from `JOB_STATE_RUNNING`. - // Jobs - // that are draining may only transition to - // `JOB_STATE_DRAINED`, + // is in the process of draining. A draining job has stopped pulling + // from its input sources and is processing any data that remains + // in-flight. This state may be set via a Cloud Dataflow `UpdateJob` + // call, but only as a transition from `JOB_STATE_RUNNING`. Jobs that + // are draining may only transition to `JOB_STATE_DRAINED`, // `JOB_STATE_CANCELLED`, or `JOB_STATE_FAILED`. // "JOB_STATE_DRAINED" - `JOB_STATE_DRAINED` indicates that the job - // has been drained. - // A drained job terminated by stopping pulling from its input sources - // and - // processing any data that remained in-flight when draining was - // requested. - // This state is a terminal state, may only be set by the Cloud - // Dataflow - // service, and only as a transition from `JOB_STATE_DRAINING`. + // has been drained. A drained job terminated by stopping pulling from + // its input sources and processing any data that remained in-flight + // when draining was requested. This state is a terminal state, may only + // be set by the Cloud Dataflow service, and only as a transition from + // `JOB_STATE_DRAINING`. // "JOB_STATE_PENDING" - `JOB_STATE_PENDING` indicates that the job - // has been created but is not yet - // running. Jobs that are pending may only transition to - // `JOB_STATE_RUNNING`, - // or `JOB_STATE_FAILED`. + // has been created but is not yet running. Jobs that are pending may + // only transition to `JOB_STATE_RUNNING`, or `JOB_STATE_FAILED`. // "JOB_STATE_CANCELLING" - `JOB_STATE_CANCELLING` indicates that the - // job has been explicitly cancelled - // and is in the process of stopping. Jobs that are cancelling may - // only - // transition to `JOB_STATE_CANCELLED` or `JOB_STATE_FAILED`. + // job has been explicitly cancelled and is in the process of stopping. + // Jobs that are cancelling may only transition to `JOB_STATE_CANCELLED` + // or `JOB_STATE_FAILED`. // "JOB_STATE_QUEUED" - `JOB_STATE_QUEUED` indicates that the job has - // been created but is being - // delayed until launch. Jobs that are queued may only transition - // to - // `JOB_STATE_PENDING` or `JOB_STATE_CANCELLED`. + // been created but is being delayed until launch. Jobs that are queued + // may only transition to `JOB_STATE_PENDING` or `JOB_STATE_CANCELLED`. CurrentState string `json:"currentState,omitempty"` // CurrentStateTime: The timestamp associated with the current state. @@ -2709,59 +2790,40 @@ type Job struct { // ExecutionInfo: Deprecated. ExecutionInfo *JobExecutionInfo `json:"executionInfo,omitempty"` - // Id: The unique ID of this job. - // - // This field is set by the Cloud Dataflow service when the Job - // is - // created, and is immutable for the life of the job. + // Id: The unique ID of this job. This field is set by the Cloud + // Dataflow service when the Job is created, and is immutable for the + // life of the job. Id string `json:"id,omitempty"` // JobMetadata: This field is populated by the Dataflow service to - // support filtering jobs - // by the metadata values provided here. Populated for ListJobs and all - // GetJob - // views SUMMARY and higher. + // support filtering jobs by the metadata values provided here. + // Populated for ListJobs and all GetJob views SUMMARY and higher. JobMetadata *JobMetadata `json:"jobMetadata,omitempty"` - // Labels: User-defined labels for this job. - // - // The labels map can contain no more than 64 entries. Entries of the - // labels - // map are UTF8 strings that comply with the following restrictions: - // - // * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} - // * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} - // * Both keys and values are additionally constrained to be <= 128 - // bytes in - // size. + // Labels: User-defined labels for this job. The labels map can contain + // no more than 64 entries. Entries of the labels map are UTF8 strings + // that comply with the following restrictions: * Keys must conform to + // regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: + // [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally + // constrained to be <= 128 bytes in size. Labels map[string]string `json:"labels,omitempty"` - // Location: The [regional - // endpoint] - // (https://cloud.google.com/dataflow/docs/concepts/regional-en - // dpoints) that - // contains this job. + // Location: The [regional endpoint] + // (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) + // that contains this job. Location string `json:"location,omitempty"` - // Name: The user-specified Cloud Dataflow job name. - // - // Only one Job with a given name may exist in a project at any - // given time. If a caller attempts to create a Job with the same - // name as an already-existing Job, the attempt returns the - // existing Job. - // - // The name must match the regular - // expression - // `[a-z]([-a-z0-9]{0,38}[a-z0-9])?` + // Name: The user-specified Cloud Dataflow job name. Only one Job with a + // given name may exist in a project at any given time. If a caller + // attempts to create a Job with the same name as an already-existing + // Job, the attempt returns the existing Job. The name must match the + // regular expression `[a-z]([-a-z0-9]{0,38}[a-z0-9])?` Name string `json:"name,omitempty"` // PipelineDescription: Preliminary field: The format of this data may - // change at any time. - // A description of the user pipeline and stages through which it is - // executed. - // Created by Cloud Dataflow service. Only retrieved - // with - // JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL. + // change at any time. A description of the user pipeline and stages + // through which it is executed. Created by Cloud Dataflow service. Only + // retrieved with JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL. PipelineDescription *PipelineDescription `json:"pipelineDescription,omitempty"` // ProjectId: The ID of the Cloud Platform project that the job belongs @@ -2769,151 +2831,103 @@ type Job struct { ProjectId string `json:"projectId,omitempty"` // ReplaceJobId: If this job is an update of an existing job, this field - // is the job ID - // of the job it replaced. - // - // When sending a `CreateJobRequest`, you can update a job by specifying - // it - // here. The job named here is stopped, and its intermediate state - // is - // transferred to this job. + // is the job ID of the job it replaced. When sending a + // `CreateJobRequest`, you can update a job by specifying it here. The + // job named here is stopped, and its intermediate state is transferred + // to this job. ReplaceJobId string `json:"replaceJobId,omitempty"` // ReplacedByJobId: If another job is an update of this job (and thus, - // this job is in - // `JOB_STATE_UPDATED`), this field contains the ID of that job. + // this job is in `JOB_STATE_UPDATED`), this field contains the ID of + // that job. ReplacedByJobId string `json:"replacedByJobId,omitempty"` - // RequestedState: The job's requested state. - // - // `UpdateJob` may be used to switch between the `JOB_STATE_STOPPED` - // and - // `JOB_STATE_RUNNING` states, by setting requested_state. `UpdateJob` - // may - // also be used to directly set a job's requested state - // to - // `JOB_STATE_CANCELLED` or `JOB_STATE_DONE`, irrevocably terminating - // the - // job if it has not already reached a terminal state. + // RequestedState: The job's requested state. `UpdateJob` may be used to + // switch between the `JOB_STATE_STOPPED` and `JOB_STATE_RUNNING` + // states, by setting requested_state. `UpdateJob` may also be used to + // directly set a job's requested state to `JOB_STATE_CANCELLED` or + // `JOB_STATE_DONE`, irrevocably terminating the job if it has not + // already reached a terminal state. // // Possible values: // "JOB_STATE_UNKNOWN" - The job's run state isn't specified. // "JOB_STATE_STOPPED" - `JOB_STATE_STOPPED` indicates that the job - // has not - // yet started to run. + // has not yet started to run. // "JOB_STATE_RUNNING" - `JOB_STATE_RUNNING` indicates that the job is // currently running. // "JOB_STATE_DONE" - `JOB_STATE_DONE` indicates that the job has - // successfully completed. - // This is a terminal job state. This state may be set by the Cloud - // Dataflow - // service, as a transition from `JOB_STATE_RUNNING`. It may also be set - // via a - // Cloud Dataflow `UpdateJob` call, if the job has not yet reached a - // terminal - // state. + // successfully completed. This is a terminal job state. This state may + // be set by the Cloud Dataflow service, as a transition from + // `JOB_STATE_RUNNING`. It may also be set via a Cloud Dataflow + // `UpdateJob` call, if the job has not yet reached a terminal state. // "JOB_STATE_FAILED" - `JOB_STATE_FAILED` indicates that the job has - // failed. This is a - // terminal job state. This state may only be set by the Cloud - // Dataflow - // service, and only as a transition from `JOB_STATE_RUNNING`. + // failed. This is a terminal job state. This state may only be set by + // the Cloud Dataflow service, and only as a transition from + // `JOB_STATE_RUNNING`. // "JOB_STATE_CANCELLED" - `JOB_STATE_CANCELLED` indicates that the - // job has been explicitly - // cancelled. This is a terminal job state. This state may only be - // set via a Cloud Dataflow `UpdateJob` call, and only if the job has - // not - // yet reached another terminal state. + // job has been explicitly cancelled. This is a terminal job state. This + // state may only be set via a Cloud Dataflow `UpdateJob` call, and only + // if the job has not yet reached another terminal state. // "JOB_STATE_UPDATED" - `JOB_STATE_UPDATED` indicates that the job - // was successfully updated, - // meaning that this job was stopped and another job was started, - // inheriting - // state from this one. This is a terminal job state. This state may - // only be - // set by the Cloud Dataflow service, and only as a transition - // from - // `JOB_STATE_RUNNING`. + // was successfully updated, meaning that this job was stopped and + // another job was started, inheriting state from this one. This is a + // terminal job state. This state may only be set by the Cloud Dataflow + // service, and only as a transition from `JOB_STATE_RUNNING`. // "JOB_STATE_DRAINING" - `JOB_STATE_DRAINING` indicates that the job - // is in the process of draining. - // A draining job has stopped pulling from its input sources and is - // processing - // any data that remains in-flight. This state may be set via a Cloud - // Dataflow - // `UpdateJob` call, but only as a transition from `JOB_STATE_RUNNING`. - // Jobs - // that are draining may only transition to - // `JOB_STATE_DRAINED`, + // is in the process of draining. A draining job has stopped pulling + // from its input sources and is processing any data that remains + // in-flight. This state may be set via a Cloud Dataflow `UpdateJob` + // call, but only as a transition from `JOB_STATE_RUNNING`. Jobs that + // are draining may only transition to `JOB_STATE_DRAINED`, // `JOB_STATE_CANCELLED`, or `JOB_STATE_FAILED`. // "JOB_STATE_DRAINED" - `JOB_STATE_DRAINED` indicates that the job - // has been drained. - // A drained job terminated by stopping pulling from its input sources - // and - // processing any data that remained in-flight when draining was - // requested. - // This state is a terminal state, may only be set by the Cloud - // Dataflow - // service, and only as a transition from `JOB_STATE_DRAINING`. + // has been drained. A drained job terminated by stopping pulling from + // its input sources and processing any data that remained in-flight + // when draining was requested. This state is a terminal state, may only + // be set by the Cloud Dataflow service, and only as a transition from + // `JOB_STATE_DRAINING`. // "JOB_STATE_PENDING" - `JOB_STATE_PENDING` indicates that the job - // has been created but is not yet - // running. Jobs that are pending may only transition to - // `JOB_STATE_RUNNING`, - // or `JOB_STATE_FAILED`. + // has been created but is not yet running. Jobs that are pending may + // only transition to `JOB_STATE_RUNNING`, or `JOB_STATE_FAILED`. // "JOB_STATE_CANCELLING" - `JOB_STATE_CANCELLING` indicates that the - // job has been explicitly cancelled - // and is in the process of stopping. Jobs that are cancelling may - // only - // transition to `JOB_STATE_CANCELLED` or `JOB_STATE_FAILED`. + // job has been explicitly cancelled and is in the process of stopping. + // Jobs that are cancelling may only transition to `JOB_STATE_CANCELLED` + // or `JOB_STATE_FAILED`. // "JOB_STATE_QUEUED" - `JOB_STATE_QUEUED` indicates that the job has - // been created but is being - // delayed until launch. Jobs that are queued may only transition - // to - // `JOB_STATE_PENDING` or `JOB_STATE_CANCELLED`. + // been created but is being delayed until launch. Jobs that are queued + // may only transition to `JOB_STATE_PENDING` or `JOB_STATE_CANCELLED`. RequestedState string `json:"requestedState,omitempty"` - // StageStates: This field may be mutated by the Cloud Dataflow - // service; + // StageStates: This field may be mutated by the Cloud Dataflow service; // callers cannot mutate it. StageStates []*ExecutionStageState `json:"stageStates,omitempty"` // StartTime: The timestamp when the job was started (transitioned to - // JOB_STATE_PENDING). - // Flexible resource scheduling jobs are started with some delay after - // job - // creation, so start_time is unset before start and is updated when - // the - // job is started by the Cloud Dataflow service. For other jobs, - // start_time - // always equals to create_time and is immutable and set by the Cloud - // Dataflow - // service. + // JOB_STATE_PENDING). Flexible resource scheduling jobs are started + // with some delay after job creation, so start_time is unset before + // start and is updated when the job is started by the Cloud Dataflow + // service. For other jobs, start_time always equals to create_time and + // is immutable and set by the Cloud Dataflow service. StartTime string `json:"startTime,omitempty"` - // Steps: Exactly one of step or steps_location should be - // specified. - // - // The top-level steps that constitute the entire job. + // Steps: Exactly one of step or steps_location should be specified. The + // top-level steps that constitute the entire job. Only retrieved with + // JOB_VIEW_ALL. Steps []*Step `json:"steps,omitempty"` // StepsLocation: The GCS location where the steps are stored. StepsLocation string `json:"stepsLocation,omitempty"` - // TempFiles: A set of files the system should be aware of that are - // used - // for temporary storage. These temporary files will be - // removed on job completion. - // No duplicates are allowed. - // No file patterns are supported. - // - // The supported files are: - // - // Google Cloud Storage: - // - // storage.googleapis.com/{bucket}/{object} - // bucket.storage.googleapis.com/{object} + // TempFiles: A set of files the system should be aware of that are used + // for temporary storage. These temporary files will be removed on job + // completion. No duplicates are allowed. No file patterns are + // supported. The supported files are: Google Cloud Storage: + // storage.googleapis.com/{bucket}/{object} + // bucket.storage.googleapis.com/{object} TempFiles []string `json:"tempFiles,omitempty"` // TransformNameMapping: The map of transform name prefixes of the job - // to be replaced to the - // corresponding name prefixes of the new job. + // to be replaced to the corresponding name prefixes of the new job. TransformNameMapping map[string]string `json:"transformNameMapping,omitempty"` // Type: The type of Cloud Dataflow job. @@ -2922,11 +2936,9 @@ type Job struct { // "JOB_TYPE_UNKNOWN" - The type of the job is unspecified, or // unknown. // "JOB_TYPE_BATCH" - A batch job with a well-defined end point: data - // is read, data is - // processed, data is written, and the job is done. + // is read, data is processed, data is written, and the job is done. // "JOB_TYPE_STREAMING" - A continuously streaming job with no end: - // data is read, - // processed, and written continuously. + // data is read, processed, and written continuously. Type string `json:"type,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2957,9 +2969,45 @@ func (s *Job) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// JobExecutionDetails: Information about the execution of a job. +type JobExecutionDetails struct { + // NextPageToken: If present, this response does not contain all + // requested tasks. To obtain the next page of results, repeat the + // request with page_token set to this value. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Stages: The stages of the job execution. + Stages []*StageSummary `json:"stages,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *JobExecutionDetails) MarshalJSON() ([]byte, error) { + type NoMethod JobExecutionDetails + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // JobExecutionInfo: Additional information about how a Cloud Dataflow -// job will be executed that -// isn't contained in the submitted job. +// job will be executed that isn't contained in the submitted job. type JobExecutionInfo struct { // Stages: A mapping from each stage to the information about that // stage. @@ -2988,13 +3036,12 @@ func (s *JobExecutionInfo) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// JobExecutionStageInfo: Contains information about how a -// particular +// JobExecutionStageInfo: Contains information about how a particular // google.dataflow.v1beta3.Step will be executed. type JobExecutionStageInfo struct { - // StepName: The steps associated with the execution stage. - // Note that stages may have several steps, and that a given step - // might be run by more than one stage. + // StepName: The steps associated with the execution stage. Note that + // stages may have several steps, and that a given step might be run by + // more than one stage. StepName []string `json:"stepName,omitempty"` // ForceSendFields is a list of field names (e.g. "StepName") to @@ -3031,37 +3078,29 @@ type JobMessage struct { // "JOB_MESSAGE_IMPORTANCE_UNKNOWN" - The message importance isn't // specified, or is unknown. // "JOB_MESSAGE_DEBUG" - The message is at the 'debug' level: - // typically only useful for - // software engineers working on the code the job is running. - // Typically, Dataflow pipeline runners do not display log messages - // at this level by default. + // typically only useful for software engineers working on the code the + // job is running. Typically, Dataflow pipeline runners do not display + // log messages at this level by default. // "JOB_MESSAGE_DETAILED" - The message is at the 'detailed' level: - // somewhat verbose, but - // potentially useful to users. Typically, Dataflow pipeline - // runners do not display log messages at this level by default. - // These messages are displayed by default in the Dataflow + // somewhat verbose, but potentially useful to users. Typically, + // Dataflow pipeline runners do not display log messages at this level + // by default. These messages are displayed by default in the Dataflow // monitoring UI. // "JOB_MESSAGE_BASIC" - The message is at the 'basic' level: useful - // for keeping - // track of the execution of a Dataflow pipeline. Typically, - // Dataflow pipeline runners display log messages at this level - // by - // default, and these messages are displayed by default in the - // Dataflow monitoring UI. + // for keeping track of the execution of a Dataflow pipeline. Typically, + // Dataflow pipeline runners display log messages at this level by + // default, and these messages are displayed by default in the Dataflow + // monitoring UI. // "JOB_MESSAGE_WARNING" - The message is at the 'warning' level: - // indicating a condition - // pertaining to a job which may require human intervention. - // Typically, Dataflow pipeline runners display log messages at - // this - // level by default, and these messages are displayed by default in - // the Dataflow monitoring UI. + // indicating a condition pertaining to a job which may require human + // intervention. Typically, Dataflow pipeline runners display log + // messages at this level by default, and these messages are displayed + // by default in the Dataflow monitoring UI. // "JOB_MESSAGE_ERROR" - The message is at the 'error' level: - // indicating a condition - // preventing a job from succeeding. Typically, Dataflow - // pipeline - // runners display log messages at this level by default, and - // these - // messages are displayed by default in the Dataflow monitoring UI. + // indicating a condition preventing a job from succeeding. Typically, + // Dataflow pipeline runners display log messages at this level by + // default, and these messages are displayed by default in the Dataflow + // monitoring UI. MessageImportance string `json:"messageImportance,omitempty"` // MessageText: The text of the message. @@ -3094,8 +3133,7 @@ func (s *JobMessage) MarshalJSON() ([]byte, error) { } // JobMetadata: Metadata available primarily for filtering jobs. Will be -// included in the -// ListJob response and Job SUMMARY view. +// included in the ListJob response and Job SUMMARY view. type JobMetadata struct { // BigTableDetails: Identification of a BigTable source used in the // Dataflow job. @@ -3149,16 +3187,11 @@ func (s *JobMetadata) MarshalJSON() ([]byte, error) { } // JobMetrics: JobMetrics contains a collection of metrics describing -// the detailed progress -// of a Dataflow job. Metrics correspond to user-defined and -// system-defined -// metrics in the job. -// -// This resource captures only the most recent values of each -// metric; -// time-series data can be queried for them (under the same metric -// names) -// from Cloud Monitoring. +// the detailed progress of a Dataflow job. Metrics correspond to +// user-defined and system-defined metrics in the job. This resource +// captures only the most recent values of each metric; time-series data +// can be queried for them (under the same metric names) from Cloud +// Monitoring. type JobMetrics struct { // MetricTime: Timestamp as of which metric values are current. MetricTime string `json:"metricTime,omitempty"` @@ -3194,18 +3227,12 @@ func (s *JobMetrics) MarshalJSON() ([]byte, error) { } // KeyRangeDataDiskAssignment: Data disk assignment information for a -// specific key-range of a sharded -// computation. -// Currently we only support UTF-8 character splits to simplify encoding -// into -// JSON. +// specific key-range of a sharded computation. Currently we only +// support UTF-8 character splits to simplify encoding into JSON. type KeyRangeDataDiskAssignment struct { // DataDisk: The name of the data disk where data for this range is - // stored. - // This name is local to the Google Cloud Platform project and - // uniquely - // identifies the disk within that project, for - // example + // stored. This name is local to the Google Cloud Platform project and + // uniquely identifies the disk within that project, for example // "myproject-1014-104817-4c2-harness-0-disk-1". DataDisk string `json:"dataDisk,omitempty"` @@ -3239,28 +3266,22 @@ func (s *KeyRangeDataDiskAssignment) MarshalJSON() ([]byte, error) { } // KeyRangeLocation: Location information for a specific key-range of a -// sharded computation. -// Currently we only support UTF-8 character splits to simplify encoding -// into -// JSON. +// sharded computation. Currently we only support UTF-8 character splits +// to simplify encoding into JSON. type KeyRangeLocation struct { // DataDisk: The name of the data disk where data for this range is - // stored. - // This name is local to the Google Cloud Platform project and - // uniquely - // identifies the disk within that project, for - // example + // stored. This name is local to the Google Cloud Platform project and + // uniquely identifies the disk within that project, for example // "myproject-1014-104817-4c2-harness-0-disk-1". DataDisk string `json:"dataDisk,omitempty"` // DeliveryEndpoint: The physical location of this range assignment to - // be used for - // streaming computation cross-worker message delivery. + // be used for streaming computation cross-worker message delivery. DeliveryEndpoint string `json:"deliveryEndpoint,omitempty"` // DeprecatedPersistentDirectory: DEPRECATED. The location of the - // persistent state for this range, as a - // persistent directory in the worker local filesystem. + // persistent state for this range, as a persistent directory in the + // worker local filesystem. DeprecatedPersistentDirectory string `json:"deprecatedPersistentDirectory,omitempty"` // End: The end (exclusive) of the key range. @@ -3301,11 +3322,18 @@ type LaunchFlexTemplateParameter struct { // ContainerSpec as content. ContainerSpecGcsPath string `json:"containerSpecGcsPath,omitempty"` + // Environment: The runtime environment for the FlexTemplate job + Environment *FlexTemplateRuntimeEnvironment `json:"environment,omitempty"` + // JobName: Required. The job name to use for the created job. JobName string `json:"jobName,omitempty"` - // Parameters: The parameters for FlexTemplate. - // Ex. {"num_workers":"5"} + // LaunchOptions: Launch options for this flex template job. This is a + // common set of options across languages and templates. This should not + // be used to pass job parameters. + LaunchOptions map[string]string `json:"launchOptions,omitempty"` + + // Parameters: The parameters for FlexTemplate. Ex. {"num_workers":"5"} Parameters map[string]string `json:"parameters,omitempty"` // ForceSendFields is a list of field names (e.g. "ContainerSpec") to @@ -3339,8 +3367,7 @@ type LaunchFlexTemplateRequest struct { LaunchParameter *LaunchFlexTemplateParameter `json:"launchParameter,omitempty"` // ValidateOnly: If true, the request is validated but not actually - // executed. - // Defaults to false. + // executed. Defaults to false. ValidateOnly bool `json:"validateOnly,omitempty"` // ForceSendFields is a list of field names (e.g. "LaunchParameter") to @@ -3370,8 +3397,7 @@ func (s *LaunchFlexTemplateRequest) MarshalJSON() ([]byte, error) { // LaunchFlexTemplateResponse: Response to the request to launch a job // from Flex Template. type LaunchFlexTemplateResponse struct { - // Job: The job that was launched, if the request was not a dry run - // and + // Job: The job that was launched, if the request was not a dry run and // the job was successfully launched. Job *Job `json:"job,omitempty"` @@ -3415,14 +3441,12 @@ type LaunchTemplateParameters struct { Parameters map[string]string `json:"parameters,omitempty"` // TransformNameMapping: Only applicable when updating a pipeline. Map - // of transform name prefixes of - // the job to be replaced to the corresponding name prefixes of the new - // job. + // of transform name prefixes of the job to be replaced to the + // corresponding name prefixes of the new job. TransformNameMapping map[string]string `json:"transformNameMapping,omitempty"` // Update: If set, replace the existing pipeline with the name specified - // by jobName - // with this pipeline, preserving state. + // by jobName with this pipeline, preserving state. Update bool `json:"update,omitempty"` // ForceSendFields is a list of field names (e.g. "Environment") to @@ -3450,8 +3474,7 @@ func (s *LaunchTemplateParameters) MarshalJSON() ([]byte, error) { // LaunchTemplateResponse: Response to the request to launch a template. type LaunchTemplateResponse struct { - // Job: The job that was launched, if the request was not a dry run - // and + // Job: The job that was launched, if the request was not a dry run and // the job was successfully launched. Job *Job `json:"job,omitempty"` @@ -3487,11 +3510,9 @@ type LeaseWorkItemRequest struct { // CurrentWorkerTime: The current timestamp at the worker. CurrentWorkerTime string `json:"currentWorkerTime,omitempty"` - // Location: The [regional - // endpoint] - // (https://cloud.google.com/dataflow/docs/concepts/regional-en - // dpoints) that - // contains the WorkItem's job. + // Location: The [regional endpoint] + // (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) + // that contains the WorkItem's job. Location string `json:"location,omitempty"` // RequestedLeaseDuration: The initial lease period. @@ -3505,13 +3526,11 @@ type LeaseWorkItemRequest struct { WorkItemTypes []string `json:"workItemTypes,omitempty"` // WorkerCapabilities: Worker capabilities. WorkItems might be limited - // to workers with specific - // capabilities. + // to workers with specific capabilities. WorkerCapabilities []string `json:"workerCapabilities,omitempty"` // WorkerId: Identifies the worker leasing work -- typically the ID of - // the - // virtual machine running the worker. + // the virtual machine running the worker. WorkerId string `json:"workerId,omitempty"` // ForceSendFields is a list of field names (e.g. "CurrentWorkerTime") @@ -3617,19 +3636,15 @@ func (s *ListJobMessagesResponse) MarshalJSON() ([]byte, error) { } // ListJobsResponse: Response to a request to list Cloud Dataflow jobs -// in a project. This might -// be a partial response, depending on the page size in the -// ListJobsRequest. -// However, if the project does not have any jobs, an instance -// of -// ListJobsResponse is not returned and the requests's response -// body is empty {}. +// in a project. This might be a partial response, depending on the page +// size in the ListJobsRequest. However, if the project does not have +// any jobs, an instance of ListJobsResponse is not returned and the +// requests's response body is empty {}. type ListJobsResponse struct { // FailedLocation: Zero or more messages describing the [regional // endpoints] - // (https://cloud.google.com/dataflow/docs/concepts/regional-e - // ndpoints) that - // failed to respond. + // (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) + // that failed to respond. FailedLocation []*FailedLocation `json:"failedLocation,omitempty"` // Jobs: A subset of the requested job information. @@ -3699,31 +3714,62 @@ func (s *ListSnapshotsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ListTemplateVersionsResponse: Respond a list of TemplateVersions. +type ListTemplateVersionsResponse struct { + // NextPageToken: A token that can be sent as `page_token` to retrieve + // the next page. If this field is omitted, there are no subsequent + // pages. + NextPageToken string `json:"nextPageToken,omitempty"` + + // TemplateVersions: A list of TemplateVersions. + TemplateVersions []*TemplateVersion `json:"templateVersions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListTemplateVersionsResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListTemplateVersionsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // MapTask: MapTask consists of an ordered set of instructions, each of -// which -// describes one particular low-level operation for the worker -// to -// perform in order to accomplish the MapTask's WorkItem. -// -// Each instruction must appear in the list before any instructions -// which +// which describes one particular low-level operation for the worker to +// perform in order to accomplish the MapTask's WorkItem. Each +// instruction must appear in the list before any instructions which // depends on its output. type MapTask struct { // CounterPrefix: Counter prefix that can be used to prefix counters. - // Not currently used in - // Dataflow. + // Not currently used in Dataflow. CounterPrefix string `json:"counterPrefix,omitempty"` // Instructions: The instructions in the MapTask. Instructions []*ParallelInstruction `json:"instructions,omitempty"` - // StageName: System-defined name of the stage containing this - // MapTask. + // StageName: System-defined name of the stage containing this MapTask. // Unique across the workflow. StageName string `json:"stageName,omitempty"` - // SystemName: System-defined name of this MapTask. - // Unique across the workflow. + // SystemName: System-defined name of this MapTask. Unique across the + // workflow. SystemName string `json:"systemName,omitempty"` // ForceSendFields is a list of field names (e.g. "CounterPrefix") to @@ -3750,8 +3796,7 @@ func (s *MapTask) MarshalJSON() ([]byte, error) { } // MemInfo: Information about the memory usage of a worker or a -// container within a -// worker. +// container within a worker. type MemInfo struct { // CurrentLimitBytes: Instantenous memory limit in bytes. CurrentLimitBytes uint64 `json:"currentLimitBytes,omitempty,string"` @@ -3790,11 +3835,10 @@ func (s *MemInfo) MarshalJSON() ([]byte, error) { } // MetricShortId: The metric short id is returned to the user alongside -// an offset into -// ReportWorkItemStatusRequest +// an offset into ReportWorkItemStatusRequest type MetricShortId struct { - // MetricIndex: The index of the corresponding metric in - // the ReportWorkItemStatusRequest. Required. + // MetricIndex: The index of the corresponding metric in the + // ReportWorkItemStatusRequest. Required. MetricIndex int64 `json:"metricIndex,omitempty"` // ShortId: The service-generated short identifier for the metric. @@ -3824,28 +3868,21 @@ func (s *MetricShortId) MarshalJSON() ([]byte, error) { } // MetricStructuredName: Identifies a metric, by describing the source -// which generated the -// metric. +// which generated the metric. type MetricStructuredName struct { // Context: Zero or more labeled fields which identify the part of the - // job this - // metric is associated with, such as the name of a step or - // collection. - // - // For example, built-in counters associated with steps will - // have - // context['step'] = . Counters associated with - // PCollections - // in the SDK will have context['pcollection'] = . + // job this metric is associated with, such as the name of a step or + // collection. For example, built-in counters associated with steps will + // have context['step'] = . Counters associated with PCollections in the + // SDK will have context['pcollection'] = . Context map[string]string `json:"context,omitempty"` // Name: Worker-defined metric name. Name string `json:"name,omitempty"` // Origin: Origin (namespace) of metric name. May be blank for - // user-define metrics; - // will be "dataflow" for metrics defined by the Dataflow service or - // SDK. + // user-define metrics; will be "dataflow" for metrics defined by the + // Dataflow service or SDK. Origin string `json:"origin,omitempty"` // ForceSendFields is a list of field names (e.g. "Context") to @@ -3874,77 +3911,60 @@ func (s *MetricStructuredName) MarshalJSON() ([]byte, error) { // MetricUpdate: Describes the state of a metric. type MetricUpdate struct { // Cumulative: True if this metric is reported as the total cumulative - // aggregate - // value accumulated since the worker started working on this - // WorkItem. - // By default this is false, indicating that this metric is reported - // as a delta that is not associated with any WorkItem. + // aggregate value accumulated since the worker started working on this + // WorkItem. By default this is false, indicating that this metric is + // reported as a delta that is not associated with any WorkItem. Cumulative bool `json:"cumulative,omitempty"` // Distribution: A struct value describing properties of a distribution // of numeric values. Distribution interface{} `json:"distribution,omitempty"` - // Gauge: A struct value describing properties of a Gauge. - // Metrics of gauge type show the value of a metric across time, and - // is - // aggregated based on the newest value. + // Gauge: A struct value describing properties of a Gauge. Metrics of + // gauge type show the value of a metric across time, and is aggregated + // based on the newest value. Gauge interface{} `json:"gauge,omitempty"` // Internal: Worker-computed aggregate value for internal use by the - // Dataflow - // service. + // Dataflow service. Internal interface{} `json:"internal,omitempty"` - // Kind: Metric aggregation kind. The possible metric aggregation kinds - // are - // "Sum", "Max", "Min", "Mean", "Set", "And", "Or", and - // "Distribution". - // The specified aggregation kind is case-insensitive. - // - // If omitted, this is not an aggregated value but instead - // a single metric sample value. + // Kind: Metric aggregation kind. The possible metric aggregation kinds + // are "Sum", "Max", "Min", "Mean", "Set", "And", "Or", and + // "Distribution". The specified aggregation kind is case-insensitive. + // If omitted, this is not an aggregated value but instead a single + // metric sample value. Kind string `json:"kind,omitempty"` // MeanCount: Worker-computed aggregate value for the "Mean" aggregation - // kind. - // This holds the count of the aggregated values and is used in - // combination - // with mean_sum above to obtain the actual mean aggregate value. - // The only possible value type is Long. + // kind. This holds the count of the aggregated values and is used in + // combination with mean_sum above to obtain the actual mean aggregate + // value. The only possible value type is Long. MeanCount interface{} `json:"meanCount,omitempty"` // MeanSum: Worker-computed aggregate value for the "Mean" aggregation - // kind. - // This holds the sum of the aggregated values and is used in - // combination - // with mean_count below to obtain the actual mean aggregate value. - // The only possible value types are Long and Double. + // kind. This holds the sum of the aggregated values and is used in + // combination with mean_count below to obtain the actual mean aggregate + // value. The only possible value types are Long and Double. MeanSum interface{} `json:"meanSum,omitempty"` // Name: Name of the metric. Name *MetricStructuredName `json:"name,omitempty"` // Scalar: Worker-computed aggregate value for aggregation kinds "Sum", - // "Max", "Min", - // "And", and "Or". The possible value types are Long, Double, and - // Boolean. + // "Max", "Min", "And", and "Or". The possible value types are Long, + // Double, and Boolean. Scalar interface{} `json:"scalar,omitempty"` // Set: Worker-computed aggregate value for the "Set" aggregation kind. - // The only - // possible value type is a list of Values whose type can be Long, - // Double, - // or String, according to the metric's type. All Values in the list - // must - // be of the same type. + // The only possible value type is a list of Values whose type can be + // Long, Double, or String, according to the metric's type. All Values + // in the list must be of the same type. Set interface{} `json:"set,omitempty"` // UpdateTime: Timestamp associated with the metric value. Optional when - // workers are - // reporting work progress; it will be filled in responses from - // the - // metrics API. + // workers are reporting work progress; it will be filled in responses + // from the metrics API. UpdateTime string `json:"updateTime,omitempty"` // ForceSendFields is a list of field names (e.g. "Cumulative") to @@ -3970,14 +3990,154 @@ func (s *MetricUpdate) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ModifyTemplateVersionLabelRequest: Either add the label to +// TemplateVersion or remove it from the TemplateVersion. +type ModifyTemplateVersionLabelRequest struct { + // Key: The label key for update. + Key string `json:"key,omitempty"` + + // Op: Requests for add label to TemplateVersion or remove label from + // TemplateVersion. + // + // Possible values: + // "OPERATION_UNSPECIFIED" - Default value. + // "ADD" - Add the label to the TemplateVersion object. + // "REMOVE" - Remove the label from the TemplateVersion object. + Op string `json:"op,omitempty"` + + // Value: The label value for update. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ModifyTemplateVersionLabelRequest) MarshalJSON() ([]byte, error) { + type NoMethod ModifyTemplateVersionLabelRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ModifyTemplateVersionLabelResponse: Respond the labels in the +// TemplateVersion. +type ModifyTemplateVersionLabelResponse struct { + // Labels: All the label in the TemplateVersion. + Labels map[string]string `json:"labels,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Labels") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Labels") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ModifyTemplateVersionLabelResponse) MarshalJSON() ([]byte, error) { + type NoMethod ModifyTemplateVersionLabelResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ModifyTemplateVersionTagRequest: Add a tag to the current +// TemplateVersion. If tag exist in another TemplateVersion in the +// Template, remove the tag before add it to the current +// TemplateVersion. If remove_only set, remove the tag from the current +// TemplateVersion. +type ModifyTemplateVersionTagRequest struct { + // RemoveOnly: The flag that indicates if the request is only for remove + // tag from TemplateVersion. + RemoveOnly bool `json:"removeOnly,omitempty"` + + // Tag: The tag for update. + Tag string `json:"tag,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RemoveOnly") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RemoveOnly") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ModifyTemplateVersionTagRequest) MarshalJSON() ([]byte, error) { + type NoMethod ModifyTemplateVersionTagRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ModifyTemplateVersionTagResponse: Respond the current tags in the +// TemplateVersion. +type ModifyTemplateVersionTagResponse struct { + // Tags: All the tags in the TemplateVersion. + Tags []string `json:"tags,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Tags") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Tags") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ModifyTemplateVersionTagResponse) MarshalJSON() ([]byte, error) { + type NoMethod ModifyTemplateVersionTagResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // MountedDataDisk: Describes mounted data disk. type MountedDataDisk struct { - // DataDisk: The name of the data disk. - // This name is local to the Google Cloud Platform project and - // uniquely - // identifies the disk within that project, for - // example - // "myproject-1014-104817-4c2-harness-0-disk-1". + // DataDisk: The name of the data disk. This name is local to the Google + // Cloud Platform project and uniquely identifies the disk within that + // project, for example "myproject-1014-104817-4c2-harness-0-disk-1". DataDisk string `json:"dataDisk,omitempty"` // ForceSendFields is a list of field names (e.g. "DataDisk") to @@ -4006,8 +4166,7 @@ func (s *MountedDataDisk) MarshalJSON() ([]byte, error) { // MultiOutputInfo: Information about an output of a multi-output DoFn. type MultiOutputInfo struct { // Tag: The id of the tag the user code will emit to this output by; - // this - // should correspond to the tag of some SideInputInfo. + // this should correspond to the tag of some SideInputInfo. Tag string `json:"tag,omitempty"` // ForceSendFields is a list of field names (e.g. "Tag") to @@ -4081,28 +4240,16 @@ func (s *NameAndKind) MarshalJSON() ([]byte, error) { } // Package: The packages that must be installed in order for a worker to -// run the -// steps of the Cloud Dataflow job that will be assigned to its -// worker -// pool. -// -// This is the mechanism by which the Cloud Dataflow SDK causes code -// to -// be loaded onto the workers. For example, the Cloud Dataflow Java -// SDK -// might use this to install jars containing the user's code and all of -// the -// various dependencies (libraries, data files, etc.) required in -// order -// for that code to run. +// run the steps of the Cloud Dataflow job that will be assigned to its +// worker pool. This is the mechanism by which the Cloud Dataflow SDK +// causes code to be loaded onto the workers. For example, the Cloud +// Dataflow Java SDK might use this to install jars containing the +// user's code and all of the various dependencies (libraries, data +// files, etc.) required in order for that code to run. type Package struct { // Location: The resource to read the package from. The supported - // resource type is: - // - // Google Cloud Storage: - // - // storage.googleapis.com/{bucket} - // bucket.storage.googleapis.com/ + // resource type is: Google Cloud Storage: + // storage.googleapis.com/{bucket} bucket.storage.googleapis.com/ Location string `json:"location,omitempty"` // Name: The name of the package. @@ -4131,16 +4278,15 @@ func (s *Package) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ParDoInstruction: An instruction that does a ParDo operation. -// Takes one main input and zero or more side inputs, and produces -// zero or more outputs. -// Runs user code. +// ParDoInstruction: An instruction that does a ParDo operation. Takes +// one main input and zero or more side inputs, and produces zero or +// more outputs. Runs user code. type ParDoInstruction struct { // Input: The input. Input *InstructionInput `json:"input,omitempty"` // MultiOutputInfos: Information about each of the outputs, if user_fn - // is a MultiDoFn. + // is a MultiDoFn. MultiOutputInfos []*MultiOutputInfo `json:"multiOutputInfos,omitempty"` // NumOutputs: The number of outputs. @@ -4201,8 +4347,8 @@ type ParallelInstruction struct { // Read: Additional information for Read instructions. Read *ReadInstruction `json:"read,omitempty"` - // SystemName: System-defined name of this operation. - // Unique across the workflow. + // SystemName: System-defined name of this operation. Unique across the + // workflow. SystemName string `json:"systemName,omitempty"` // Write: Additional information for Write instructions. @@ -4277,8 +4423,8 @@ type ParameterMetadata struct { // Name: Required. The name of the parameter. Name string `json:"name,omitempty"` - // ParamType: Optional. The type of the parameter. - // Used for selecting input picker. + // ParamType: Optional. The type of the parameter. Used for selecting + // input picker. // // Possible values: // "DEFAULT" - Default input type. @@ -4327,8 +4473,7 @@ func (s *ParameterMetadata) MarshalJSON() ([]byte, error) { } // PartialGroupByKeyInstruction: An instruction that does a partial -// group-by-key. -// One input and one output. +// group-by-key. One input and one output. type PartialGroupByKeyInstruction struct { // Input: Describes the input to the partial group-by-key instruction. Input *InstructionInput `json:"input,omitempty"` @@ -4338,13 +4483,13 @@ type PartialGroupByKeyInstruction struct { InputElementCodec googleapi.RawMessage `json:"inputElementCodec,omitempty"` // OriginalCombineValuesInputStoreName: If this instruction includes a - // combining function this is the name of the - // intermediate store between the GBK and the CombineValues. + // combining function this is the name of the intermediate store between + // the GBK and the CombineValues. OriginalCombineValuesInputStoreName string `json:"originalCombineValuesInputStoreName,omitempty"` // OriginalCombineValuesStepName: If this instruction includes a - // combining function, this is the name of the - // CombineValues instruction lifted into this instruction. + // combining function, this is the name of the CombineValues instruction + // lifted into this instruction. OriginalCombineValuesStepName string `json:"originalCombineValuesStepName,omitempty"` // SideInputs: Zero or more side inputs. @@ -4377,10 +4522,9 @@ func (s *PartialGroupByKeyInstruction) MarshalJSON() ([]byte, error) { } // PipelineDescription: A descriptive representation of submitted -// pipeline as well as the executed -// form. This data is provided by the Dataflow service for ease of -// visualizing -// the pipeline and interpreting Dataflow provided metrics. +// pipeline as well as the executed form. This data is provided by the +// Dataflow service for ease of visualizing the pipeline and +// interpreting Dataflow provided metrics. type PipelineDescription struct { // DisplayData: Pipeline level display data. DisplayData []*DisplayData `json:"displayData,omitempty"` @@ -4416,10 +4560,53 @@ func (s *PipelineDescription) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Point: A point in the timeseries. +type Point struct { + // Time: The timestamp of the point. + Time string `json:"time,omitempty"` + + // Value: The value of the point. + Value float64 `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Time") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Time") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Point) MarshalJSON() ([]byte, error) { + type NoMethod Point + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *Point) UnmarshalJSON(data []byte) error { + type NoMethod Point + var s1 struct { + Value gensupport.JSONFloat64 `json:"value"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Value = float64(s1.Value) + return nil +} + // Position: Position defines a position within a collection of data. -// The value -// can be either the end position, a key (used with -// ordered +// The value can be either the end position, a key (used with ordered // collections), a byte offset, or a record index. type Position struct { // ByteOffset: Position is a byte offset. @@ -4428,8 +4615,7 @@ type Position struct { // ConcatPosition: CloudPosition is a concat position. ConcatPosition *ConcatPosition `json:"concatPosition,omitempty"` - // End: Position is past all other positions. Also useful for the - // end + // End: Position is past all other positions. Also useful for the end // position of an unbounded range. End bool `json:"end,omitempty"` @@ -4440,8 +4626,7 @@ type Position struct { RecordIndex int64 `json:"recordIndex,omitempty,string"` // ShufflePosition: CloudPosition is a base64 encoded - // BatchShufflePosition (with FIXED - // sharding). + // BatchShufflePosition (with FIXED sharding). ShufflePosition string `json:"shufflePosition,omitempty"` // ForceSendFields is a list of field names (e.g. "ByteOffset") to @@ -4467,6 +4652,55 @@ func (s *Position) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ProgressTimeseries: Information about the progress of some component +// of job execution. +type ProgressTimeseries struct { + // CurrentProgress: The current progress of the component, in the range + // [0,1]. + CurrentProgress float64 `json:"currentProgress,omitempty"` + + // DataPoints: History of progress for the component. Points are sorted + // by time. + DataPoints []*Point `json:"dataPoints,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CurrentProgress") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CurrentProgress") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ProgressTimeseries) MarshalJSON() ([]byte, error) { + type NoMethod ProgressTimeseries + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *ProgressTimeseries) UnmarshalJSON(data []byte) error { + type NoMethod ProgressTimeseries + var s1 struct { + CurrentProgress gensupport.JSONFloat64 `json:"currentProgress"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.CurrentProgress = float64(s1.CurrentProgress) + return nil +} + // PubSubIODetails: Metadata for a PubSub connector used by the job. type PubSubIODetails struct { // Subscription: Subscription used in the connection. @@ -4499,37 +4733,33 @@ func (s *PubSubIODetails) MarshalJSON() ([]byte, error) { } // PubsubLocation: Identifies a pubsub location to use for transferring -// data into or -// out of a streaming Dataflow job. +// data into or out of a streaming Dataflow job. type PubsubLocation struct { // DropLateData: Indicates whether the pipeline allows late-arriving // data. DropLateData bool `json:"dropLateData,omitempty"` // IdLabel: If set, contains a pubsub label from which to extract record - // ids. - // If left empty, record deduplication will be strictly best effort. + // ids. If left empty, record deduplication will be strictly best + // effort. IdLabel string `json:"idLabel,omitempty"` - // Subscription: A pubsub subscription, in the form - // of - // "pubsub.googleapis.com/subscriptions//" + // Subscription: A pubsub subscription, in the form of + // "pubsub.googleapis.com/subscriptions//" Subscription string `json:"subscription,omitempty"` // TimestampLabel: If set, contains a pubsub label from which to extract - // record timestamps. - // If left empty, record timestamps will be generated upon arrival. + // record timestamps. If left empty, record timestamps will be generated + // upon arrival. TimestampLabel string `json:"timestampLabel,omitempty"` - // Topic: A pubsub topic, in the form - // of - // "pubsub.googleapis.com/topics//" + // Topic: A pubsub topic, in the form of + // "pubsub.googleapis.com/topics//" Topic string `json:"topic,omitempty"` // TrackingSubscription: If set, specifies the pubsub subscription that - // will be used for tracking - // custom time timestamps for watermark estimation. + // will be used for tracking custom time timestamps for watermark + // estimation. TrackingSubscription string `json:"trackingSubscription,omitempty"` // WithAttributes: If true, then the client has requested to get pubsub @@ -4593,8 +4823,42 @@ func (s *PubsubSnapshotMetadata) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ReadInstruction: An instruction that reads records. -// Takes no inputs, produces one output. +// QueryInfo: Information about a validated query. +type QueryInfo struct { + // QueryProperty: Includes an entry for each satisfied QueryProperty. + // + // Possible values: + // "QUERY_PROPERTY_UNSPECIFIED" - The query property is unknown or + // unspecified. + // "HAS_UNBOUNDED_SOURCE" - Indicates this query reads from >= 1 + // unbounded source. + QueryProperty []string `json:"queryProperty,omitempty"` + + // ForceSendFields is a list of field names (e.g. "QueryProperty") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "QueryProperty") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *QueryInfo) MarshalJSON() ([]byte, error) { + type NoMethod QueryInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ReadInstruction: An instruction that reads records. Takes no inputs, +// produces one output. type ReadInstruction struct { // Source: The source to read from. Source *Source `json:"source,omitempty"` @@ -4628,11 +4892,9 @@ type ReportWorkItemStatusRequest struct { // CurrentWorkerTime: The current timestamp at the worker. CurrentWorkerTime string `json:"currentWorkerTime,omitempty"` - // Location: The [regional - // endpoint] - // (https://cloud.google.com/dataflow/docs/concepts/regional-en - // dpoints) that - // contains the WorkItem's job. + // Location: The [regional endpoint] + // (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) + // that contains the WorkItem's job. Location string `json:"location,omitempty"` // UnifiedWorkerRequest: Untranslated bag-of-bytes @@ -4640,18 +4902,14 @@ type ReportWorkItemStatusRequest struct { UnifiedWorkerRequest googleapi.RawMessage `json:"unifiedWorkerRequest,omitempty"` // WorkItemStatuses: The order is unimportant, except that the order of - // the - // WorkItemServiceState messages in the - // ReportWorkItemStatusResponse + // the WorkItemServiceState messages in the ReportWorkItemStatusResponse // corresponds to the order of WorkItemStatus messages here. WorkItemStatuses []*WorkItemStatus `json:"workItemStatuses,omitempty"` - // WorkerId: The ID of the worker reporting the WorkItem status. If - // this - // does not match the ID of the worker which the Dataflow - // service - // believes currently has the lease on the WorkItem, the report - // will be dropped (with an error response). + // WorkerId: The ID of the worker reporting the WorkItem status. If this + // does not match the ID of the worker which the Dataflow service + // believes currently has the lease on the WorkItem, the report will be + // dropped (with an error response). WorkerId string `json:"workerId,omitempty"` // ForceSendFields is a list of field names (e.g. "CurrentWorkerTime") @@ -4686,12 +4944,9 @@ type ReportWorkItemStatusResponse struct { UnifiedWorkerResponse googleapi.RawMessage `json:"unifiedWorkerResponse,omitempty"` // WorkItemServiceStates: A set of messages indicating the service-side - // state for each - // WorkItem whose status was reported, in the same order as - // the - // WorkItemStatus messages in the ReportWorkItemStatusRequest - // which - // resulting in this response. + // state for each WorkItem whose status was reported, in the same order + // as the WorkItemStatus messages in the ReportWorkItemStatusRequest + // which resulting in this response. WorkItemServiceStates []*WorkItemServiceState `json:"workItemServiceStates,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -4724,19 +4979,13 @@ func (s *ReportWorkItemStatusResponse) MarshalJSON() ([]byte, error) { } // ReportedParallelism: Represents the level of parallelism in a -// WorkItem's input, -// reported by the worker. +// WorkItem's input, reported by the worker. type ReportedParallelism struct { // IsInfinite: Specifies whether the parallelism is infinite. If true, - // "value" is - // ignored. - // Infinite parallelism means the service will assume that the work - // item - // can always be split into more non-empty work items by dynamic - // splitting. - // This is a work-around for lack of support for infinity by the - // current - // JSON-based Java RPC stack. + // "value" is ignored. Infinite parallelism means the service will + // assume that the work item can always be split into more non-empty + // work items by dynamic splitting. This is a work-around for lack of + // support for infinity by the current JSON-based Java RPC stack. IsInfinite bool `json:"isInfinite,omitempty"` // Value: Specifies the level of parallelism in case it is finite. @@ -4780,13 +5029,10 @@ func (s *ReportedParallelism) UnmarshalJSON(data []byte) error { } // ResourceUtilizationReport: Worker metrics exported from workers. This -// contains resource utilization -// metrics accumulated from a variety of sources. For more information, -// see -// go/df-resource-signals. +// contains resource utilization metrics accumulated from a variety of +// sources. For more information, see go/df-resource-signals. type ResourceUtilizationReport struct { - // Containers: Per container information. - // Key: container name. + // Containers: Per container information. Key: container name. Containers map[string]ResourceUtilizationReport `json:"containers,omitempty"` // CpuTime: CPU utilization samples. @@ -4829,19 +5075,21 @@ type RuntimeEnvironment struct { AdditionalExperiments []string `json:"additionalExperiments,omitempty"` // AdditionalUserLabels: Additional user labels to be specified for the - // job. - // Keys and values should follow the restrictions specified in the + // job. Keys and values should follow the restrictions specified in the // [labeling - // restrictions](https://cloud.google.com/compute/docs/labeling - // -resources#restrictions) - // page. + // restrictions](https://cloud.google.com/compute/docs/labeling-resources + // #restrictions) page. An object containing a list of "key": value + // pairs. Example: { "name": "wrench", "mass": "1kg", "count": "3" }. AdditionalUserLabels map[string]string `json:"additionalUserLabels,omitempty"` // BypassTempDirValidation: Whether to bypass the safety checks for the - // job's temporary directory. - // Use with caution. + // job's temporary directory. Use with caution. BypassTempDirValidation bool `json:"bypassTempDirValidation,omitempty"` + // EnableStreamingEngine: Whether to enable Streaming Engine for the + // job. + EnableStreamingEngine bool `json:"enableStreamingEngine,omitempty"` + // IpConfiguration: Configuration for VM IPs. // // Possible values: @@ -4851,26 +5099,20 @@ type RuntimeEnvironment struct { // "WORKER_IP_PRIVATE" - Workers should have private IP addresses. IpConfiguration string `json:"ipConfiguration,omitempty"` - // KmsKeyName: Optional. Name for the Cloud KMS key for the job. - // Key format - // is: - // projects//locations//keyRings//cryptoK - // eys/ + // KmsKeyName: Name for the Cloud KMS key for the job. Key format is: + // projects//locations//keyRings//cryptoKeys/ KmsKeyName string `json:"kmsKeyName,omitempty"` // MachineType: The machine type to use for the job. Defaults to the - // value from the - // template if not specified. + // value from the template if not specified. MachineType string `json:"machineType,omitempty"` // MaxWorkers: The maximum number of Google Compute Engine instances to - // be made - // available to your pipeline during execution, from 1 to 1000. + // be made available to your pipeline during execution, from 1 to 1000. MaxWorkers int64 `json:"maxWorkers,omitempty"` - // Network: Network to which VMs will be assigned. If empty or - // unspecified, - // the service will use the network "default". + // Network: Network to which VMs will be assigned. If empty or + // unspecified, the service will use the network "default". Network string `json:"network,omitempty"` // NumWorkers: The initial number of Google Compute Engine instnaces for @@ -4881,46 +5123,39 @@ type RuntimeEnvironment struct { // the job as. ServiceAccountEmail string `json:"serviceAccountEmail,omitempty"` - // Subnetwork: Subnetwork to which VMs will be assigned, if desired. - // Expected to be of - // the form "regions/REGION/subnetworks/SUBNETWORK". + // Subnetwork: Subnetwork to which VMs will be assigned, if desired. You + // can specify a subnetwork using either a complete URL or an + // abbreviated path. Expected to be of the form + // "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/region + // s/REGION/subnetworks/SUBNETWORK" or + // "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located + // in a Shared VPC network, you must use the complete URL. Subnetwork string `json:"subnetwork,omitempty"` - // TempLocation: The Cloud Storage path to use for temporary files. - // Must be a valid Cloud Storage URL, beginning with `gs://`. + // TempLocation: The Cloud Storage path to use for temporary files. Must + // be a valid Cloud Storage URL, beginning with `gs://`. TempLocation string `json:"tempLocation,omitempty"` - // WorkerRegion: The Compute Engine - // region - // (https://cloud.google.com/compute/docs/regions-zones/regions-zo - // nes) in - // which worker processing should occur, e.g. "us-west1". Mutually - // exclusive - // with worker_zone. If neither worker_region nor worker_zone is - // specified, - // default to the control plane's region. + // WorkerRegion: The Compute Engine region + // (https://cloud.google.com/compute/docs/regions-zones/regions-zones) + // in which worker processing should occur, e.g. "us-west1". Mutually + // exclusive with worker_zone. If neither worker_region nor worker_zone + // is specified, default to the control plane's region. WorkerRegion string `json:"workerRegion,omitempty"` - // WorkerZone: The Compute Engine - // zone - // (https://cloud.google.com/compute/docs/regions-zones/regions-zone - // s) in - // which worker processing should occur, e.g. "us-west1-a". Mutually - // exclusive - // with worker_region. If neither worker_region nor worker_zone is - // specified, - // a zone in the control plane's region is chosen based on available - // capacity. - // If both `worker_zone` and `zone` are set, `worker_zone` takes - // precedence. + // WorkerZone: The Compute Engine zone + // (https://cloud.google.com/compute/docs/regions-zones/regions-zones) + // in which worker processing should occur, e.g. "us-west1-a". Mutually + // exclusive with worker_region. If neither worker_region nor + // worker_zone is specified, a zone in the control plane's region is + // chosen based on available capacity. If both `worker_zone` and `zone` + // are set, `worker_zone` takes precedence. WorkerZone string `json:"workerZone,omitempty"` - // Zone: The Compute Engine - // [availability - // zone](https://cloud.google.com/compute/docs/regions-zone - // s/regions-zones) - // for launching worker instances to run your pipeline. - // In the future, worker_zone will take precedence. + // Zone: The Compute Engine [availability + // zone](https://cloud.google.com/compute/docs/regions-zones/regions-zone + // s) for launching worker instances to run your pipeline. In the + // future, worker_zone will take precedence. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -5023,14 +5258,10 @@ type SdkHarnessContainerImage struct { ContainerImage string `json:"containerImage,omitempty"` // UseSingleCorePerContainer: If true, recommends the Dataflow service - // to use only one core per SDK - // container instance with this image. If false (or unset) recommends - // using - // more than one core per SDK container instance with this image - // for - // efficiency. Note that Dataflow service may choose to override this - // property - // if needed. + // to use only one core per SDK container instance with this image. If + // false (or unset) recommends using more than one core per SDK + // container instance with this image for efficiency. Note that Dataflow + // service may choose to override this property if needed. UseSingleCorePerContainer bool `json:"useSingleCorePerContainer,omitempty"` // ForceSendFields is a list of field names (e.g. "ContainerImage") to @@ -5067,8 +5298,7 @@ type SdkVersion struct { // "STALE" - A newer version of the SDK family exists, and an update // is recommended. // "DEPRECATED" - This version of the SDK is deprecated and will - // eventually be no - // longer supported. + // eventually be no longer supported. // "UNSUPPORTED" - Support for this SDK version has ended and it // should no longer be used. SdkSupportStatus string `json:"sdkSupportStatus,omitempty"` @@ -5113,11 +5343,9 @@ type SendDebugCaptureRequest struct { // Data: The encoded debug information. Data string `json:"data,omitempty"` - // Location: The [regional - // endpoint] - // (https://cloud.google.com/dataflow/docs/concepts/regional-en - // dpoints) that - // contains the job specified by job_id. + // Location: The [regional endpoint] + // (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) + // that contains the job specified by job_id. Location string `json:"location,omitempty"` // WorkerId: The worker id, i.e., VM hostname. @@ -5146,8 +5374,7 @@ func (s *SendDebugCaptureRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SendDebugCaptureResponse: Response to a send capture request. -// nothing +// SendDebugCaptureResponse: Response to a send capture request. nothing type SendDebugCaptureResponse struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -5157,11 +5384,9 @@ type SendDebugCaptureResponse struct { // SendWorkerMessagesRequest: A request for sending worker messages to // the service. type SendWorkerMessagesRequest struct { - // Location: The [regional - // endpoint] - // (https://cloud.google.com/dataflow/docs/concepts/regional-en - // dpoints) that - // contains the job. + // Location: The [regional endpoint] + // (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) + // that contains the job. Location string `json:"location,omitempty"` // WorkerMessages: The WorkerMessages to send. @@ -5236,12 +5461,11 @@ type SeqMapTask struct { OutputInfos []*SeqMapTaskOutputInfo `json:"outputInfos,omitempty"` // StageName: System-defined name of the stage containing the SeqDo - // operation. - // Unique across the workflow. + // operation. Unique across the workflow. StageName string `json:"stageName,omitempty"` - // SystemName: System-defined name of the SeqDo operation. - // Unique across the workflow. + // SystemName: System-defined name of the SeqDo operation. Unique across + // the workflow. SystemName string `json:"systemName,omitempty"` // UserFn: The user function to invoke. @@ -5341,15 +5565,12 @@ type SideInputInfo struct { Kind googleapi.RawMessage `json:"kind,omitempty"` // Sources: The source(s) to read element(s) from to get the value of - // this side input. - // If more than one source, then the elements are taken from - // the - // sources, in the specified order if order matters. - // At least one source is required. + // this side input. If more than one source, then the elements are taken + // from the sources, in the specified order if order matters. At least + // one source is required. Sources []*Source `json:"sources,omitempty"` - // Tag: The id of the tag the user code will access this side input - // by; + // Tag: The id of the tag the user code will access this side input by; // this should correspond to the tag of some MultiOutputInfo. Tag string `json:"tag,omitempty"` @@ -5416,8 +5637,7 @@ type Snapshot struct { Description string `json:"description,omitempty"` // DiskSizeBytes: The disk byte size of the snapshot. Only available for - // snapshots in READY - // state. + // snapshots in READY state. DiskSizeBytes int64 `json:"diskSizeBytes,omitempty,string"` // Id: The unique ID of this snapshot. @@ -5437,8 +5657,7 @@ type Snapshot struct { // Possible values: // "UNKNOWN_SNAPSHOT_STATE" - Unknown state. // "PENDING" - Snapshot intent to create has been persisted, - // snapshotting of state has not - // yet started. + // snapshotting of state has not yet started. // "RUNNING" - Snapshotting is being performed. // "READY" - Snapshot has been created and is ready to be used. // "FAILED" - Snapshot failed to be created. @@ -5516,55 +5735,39 @@ func (s *SnapshotJobRequest) MarshalJSON() ([]byte, error) { // Source: A source that records can be read and decoded from. type Source struct { - // BaseSpecs: While splitting, sources may specify the produced - // bundles - // as differences against another source, in order to save - // backend-side - // memory and allow bigger jobs. For details, see SourceSplitRequest. - // To support this use case, the full set of parameters of the source - // is logically obtained by taking the latest explicitly specified - // value - // of each parameter in the order: - // base_specs (later items win), spec (overrides anything in - // base_specs). + // BaseSpecs: While splitting, sources may specify the produced bundles + // as differences against another source, in order to save backend-side + // memory and allow bigger jobs. For details, see SourceSplitRequest. To + // support this use case, the full set of parameters of the source is + // logically obtained by taking the latest explicitly specified value of + // each parameter in the order: base_specs (later items win), spec + // (overrides anything in base_specs). BaseSpecs []googleapi.RawMessage `json:"baseSpecs,omitempty"` // Codec: The codec to use to decode data read from the source. Codec googleapi.RawMessage `json:"codec,omitempty"` // DoesNotNeedSplitting: Setting this value to true hints to the - // framework that the source - // doesn't need splitting, and using SourceSplitRequest on it - // would - // yield SOURCE_SPLIT_OUTCOME_USE_CURRENT. - // - // E.g. a file splitter may set this to true when splitting a single - // file - // into a set of byte ranges of appropriate size, and set this - // to false when splitting a filepattern into individual files. - // However, for efficiency, a file splitter may decide to produce - // file subranges directly from the filepattern to avoid a - // splitting - // round-trip. - // - // See SourceSplitRequest for an overview of the splitting - // process. - // - // This field is meaningful only in the Source objects populated - // by the user (e.g. when filling in a DerivedSource). - // Source objects supplied by the framework to the user don't have - // this field populated. + // framework that the source doesn't need splitting, and using + // SourceSplitRequest on it would yield + // SOURCE_SPLIT_OUTCOME_USE_CURRENT. E.g. a file splitter may set this + // to true when splitting a single file into a set of byte ranges of + // appropriate size, and set this to false when splitting a filepattern + // into individual files. However, for efficiency, a file splitter may + // decide to produce file subranges directly from the filepattern to + // avoid a splitting round-trip. See SourceSplitRequest for an overview + // of the splitting process. This field is meaningful only in the Source + // objects populated by the user (e.g. when filling in a DerivedSource). + // Source objects supplied by the framework to the user don't have this + // field populated. DoesNotNeedSplitting bool `json:"doesNotNeedSplitting,omitempty"` // Metadata: Optionally, metadata for this source can be supplied right - // away, - // avoiding a SourceGetMetadataOperation roundtrip - // (see SourceOperationRequest). - // - // This field is meaningful only in the Source objects populated - // by the user (e.g. when filling in a DerivedSource). - // Source objects supplied by the framework to the user don't have - // this field populated. + // away, avoiding a SourceGetMetadataOperation roundtrip (see + // SourceOperationRequest). This field is meaningful only in the Source + // objects populated by the user (e.g. when filling in a DerivedSource). + // Source objects supplied by the framework to the user don't have this + // field populated. Metadata *SourceMetadata `json:"metadata,omitempty"` // Spec: The source to read from, plus its parameters. @@ -5690,25 +5893,20 @@ func (s *SourceGetMetadataResponse) MarshalJSON() ([]byte, error) { } // SourceMetadata: Metadata about a Source useful for automatically -// optimizing -// and tuning the pipeline, etc. +// optimizing and tuning the pipeline, etc. type SourceMetadata struct { // EstimatedSizeBytes: An estimate of the total size (in bytes) of the - // data that would be - // read from this source. This estimate is in terms of external - // storage - // size, before any decompression or other processing done by the - // reader. + // data that would be read from this source. This estimate is in terms + // of external storage size, before any decompression or other + // processing done by the reader. EstimatedSizeBytes int64 `json:"estimatedSizeBytes,omitempty,string"` // Infinite: Specifies that the size of this source is known to be - // infinite - // (this is a streaming source). + // infinite (this is a streaming source). Infinite bool `json:"infinite,omitempty"` // ProducesSortedKeys: Whether this source is known to produce key/value - // pairs with - // the (encoded) keys in lexicographically sorted order. + // pairs with the (encoded) keys in lexicographically sorted order. ProducesSortedKeys bool `json:"producesSortedKeys,omitempty"` // ForceSendFields is a list of field names (e.g. "EstimatedSizeBytes") @@ -5736,8 +5934,8 @@ func (s *SourceMetadata) MarshalJSON() ([]byte, error) { } // SourceOperationRequest: A work item that represents the different -// operations that can be -// performed on a user-defined Source specification. +// operations that can be performed on a user-defined Source +// specification. type SourceOperationRequest struct { // GetMetadata: Information about a request to get metadata about a // source. @@ -5747,21 +5945,18 @@ type SourceOperationRequest struct { Name string `json:"name,omitempty"` // OriginalName: System-defined name for the Read instruction for this - // source - // in the original workflow graph. + // source in the original workflow graph. OriginalName string `json:"originalName,omitempty"` // Split: Information about a request to split a source. Split *SourceSplitRequest `json:"split,omitempty"` // StageName: System-defined name of the stage containing the source - // operation. - // Unique across the workflow. + // operation. Unique across the workflow. StageName string `json:"stageName,omitempty"` // SystemName: System-defined name of the Read instruction for this - // source. - // Unique across the workflow. + // source. Unique across the workflow. SystemName string `json:"systemName,omitempty"` // ForceSendFields is a list of field names (e.g. "GetMetadata") to @@ -5788,9 +5983,8 @@ func (s *SourceOperationRequest) MarshalJSON() ([]byte, error) { } // SourceOperationResponse: The result of a SourceOperationRequest, -// specified in -// ReportWorkItemStatusRequest.source_operation when the work item -// is completed. +// specified in ReportWorkItemStatusRequest.source_operation when the +// work item is completed. type SourceOperationResponse struct { // GetMetadata: A response to a request to get metadata about a source. GetMetadata *SourceGetMetadataResponse `json:"getMetadata,omitempty"` @@ -5822,12 +6016,11 @@ func (s *SourceOperationResponse) MarshalJSON() ([]byte, error) { } // SourceSplitOptions: Hints for splitting a Source into bundles (parts -// for parallel -// processing) using SourceSplitRequest. +// for parallel processing) using SourceSplitRequest. type SourceSplitOptions struct { // DesiredBundleSizeBytes: The source should be split into a set of - // bundles where the estimated size - // of each is approximately this many bytes. + // bundles where the estimated size of each is approximately this many + // bytes. DesiredBundleSizeBytes int64 `json:"desiredBundleSizeBytes,omitempty,string"` // DesiredShardSizeBytes: DEPRECATED in favor of @@ -5860,28 +6053,17 @@ func (s *SourceSplitOptions) MarshalJSON() ([]byte, error) { } // SourceSplitRequest: Represents the operation to split a high-level -// Source specification -// into bundles (parts for parallel processing). -// -// At a high level, splitting of a source into bundles happens as -// follows: -// SourceSplitRequest is applied to the source. If it -// returns +// Source specification into bundles (parts for parallel processing). At +// a high level, splitting of a source into bundles happens as follows: +// SourceSplitRequest is applied to the source. If it returns // SOURCE_SPLIT_OUTCOME_USE_CURRENT, no further splitting happens and -// the source -// is used "as is". Otherwise, splitting is applied recursively to -// each -// produced DerivedSource. -// -// As an optimization, for any Source, if its does_not_need_splitting -// is -// true, the framework assumes that splitting this source would -// return +// the source is used "as is". Otherwise, splitting is applied +// recursively to each produced DerivedSource. As an optimization, for +// any Source, if its does_not_need_splitting is true, the framework +// assumes that splitting this source would return // SOURCE_SPLIT_OUTCOME_USE_CURRENT, and doesn't initiate a -// SourceSplitRequest. -// This applies both to the initial source being split and to -// bundles -// produced from it. +// SourceSplitRequest. This applies both to the initial source being +// split and to bundles produced from it. type SourceSplitRequest struct { // Options: Hints for tuning the splitting process. Options *SourceSplitOptions `json:"options,omitempty"` @@ -5915,21 +6097,16 @@ func (s *SourceSplitRequest) MarshalJSON() ([]byte, error) { // SourceSplitResponse: The response to a SourceSplitRequest. type SourceSplitResponse struct { // Bundles: If outcome is SPLITTING_HAPPENED, then this is a list of - // bundles - // into which the source was split. Otherwise this field is - // ignored. - // This list can be empty, which means the source represents an empty - // input. + // bundles into which the source was split. Otherwise this field is + // ignored. This list can be empty, which means the source represents an + // empty input. Bundles []*DerivedSource `json:"bundles,omitempty"` // Outcome: Indicates whether splitting happened and produced a list of - // bundles. - // If this is USE_CURRENT_SOURCE_AS_IS, the current source should - // be processed "as is" without splitting. "bundles" is ignored in this - // case. - // If this is SPLITTING_HAPPENED, then "bundles" contains a list - // of - // bundles into which the source was split. + // bundles. If this is USE_CURRENT_SOURCE_AS_IS, the current source + // should be processed "as is" without splitting. "bundles" is ignored + // in this case. If this is SPLITTING_HAPPENED, then "bundles" contains + // a list of bundles into which the source was split. // // Possible values: // "SOURCE_SPLIT_OUTCOME_UNKNOWN" - The source split outcome is @@ -6043,8 +6220,7 @@ func (s *SpannerIODetails) MarshalJSON() ([]byte, error) { } // SplitInt64: A representation of an int64, n, that is immune to -// precision loss when -// encoded in JSON. +// precision loss when encoded in JSON. type SplitInt64 struct { // HighBits: The high order bits, including the sign: n >> 32. HighBits int64 `json:"highBits,omitempty"` @@ -6075,17 +6251,55 @@ func (s *SplitInt64) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// StageSource: Description of an input or output of an execution stage. -type StageSource struct { - // Name: Dataflow service generated name for this source. - Name string `json:"name,omitempty"` +// StageExecutionDetails: Information about the workers and work items +// within a stage. +type StageExecutionDetails struct { + // NextPageToken: If present, this response does not contain all + // requested tasks. To obtain the next page of results, repeat the + // request with page_token set to this value. + NextPageToken string `json:"nextPageToken,omitempty"` - // OriginalTransformOrCollection: User name for the original user - // transform or collection with which this - // source is most closely associated. - OriginalTransformOrCollection string `json:"originalTransformOrCollection,omitempty"` + // Workers: Workers that have done work on the stage. + Workers []*WorkerDetails `json:"workers,omitempty"` - // SizeBytes: Size of the source, if measurable. + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *StageExecutionDetails) MarshalJSON() ([]byte, error) { + type NoMethod StageExecutionDetails + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// StageSource: Description of an input or output of an execution stage. +type StageSource struct { + // Name: Dataflow service generated name for this source. + Name string `json:"name,omitempty"` + + // OriginalTransformOrCollection: User name for the original user + // transform or collection with which this source is most closely + // associated. + OriginalTransformOrCollection string `json:"originalTransformOrCollection,omitempty"` + + // SizeBytes: Size of the source, if measurable. SizeBytes int64 `json:"sizeBytes,omitempty,string"` // UserName: Human-readable name for this source; may be user or system @@ -6115,6 +6329,62 @@ func (s *StageSource) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// StageSummary: Information about a particular execution stage of a +// job. +type StageSummary struct { + // EndTime: End time of this stage. If the work item is completed, this + // is the actual end time of the stage. Otherwise, it is the predicted + // end time. + EndTime string `json:"endTime,omitempty"` + + // Metrics: Metrics for this stage. + Metrics []*MetricUpdate `json:"metrics,omitempty"` + + // Progress: Progress for this stage. Only applicable to Batch jobs. + Progress *ProgressTimeseries `json:"progress,omitempty"` + + // StageId: ID of this stage + StageId string `json:"stageId,omitempty"` + + // StartTime: Start time of this stage. + StartTime string `json:"startTime,omitempty"` + + // State: State of this stage. + // + // Possible values: + // "EXECUTION_STATE_UNKNOWN" - The component state is unknown or + // unspecified. + // "EXECUTION_STATE_NOT_STARTED" - The component is not yet running. + // "EXECUTION_STATE_RUNNING" - The component is currently running. + // "EXECUTION_STATE_SUCCEEDED" - The component succeeded. + // "EXECUTION_STATE_FAILED" - The component failed. + // "EXECUTION_STATE_CANCELLED" - Execution of the component was + // cancelled. + State string `json:"state,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EndTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EndTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *StageSummary) MarshalJSON() ([]byte, error) { + type NoMethod StageSummary + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // StateFamilyConfig: State family configuration. type StateFamilyConfig struct { // IsRead: If true, this family corresponds to a read operation. @@ -6147,32 +6417,24 @@ func (s *StateFamilyConfig) MarshalJSON() ([]byte, error) { } // Status: The `Status` type defines a logical error model that is -// suitable for -// different programming environments, including REST APIs and RPC APIs. -// It is -// used by [gRPC](https://github.com/grpc). Each `Status` message -// contains -// three pieces of data: error code, error message, and error -// details. -// -// You can find out more about this error model and how to work with it -// in the -// [API Design Guide](https://cloud.google.com/apis/design/errors). +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each +// `Status` message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the [API Design +// Guide](https://cloud.google.com/apis/design/errors). type Status struct { // Code: The status code, which should be an enum value of // google.rpc.Code. Code int64 `json:"code,omitempty"` - // Details: A list of messages that carry the error details. There is a - // common set of - // message types for APIs to use. + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. Details []googleapi.RawMessage `json:"details,omitempty"` // Message: A developer-facing error message, which should be in - // English. Any - // user-facing error message should be localized and sent in - // the - // google.rpc.Status.details field, or localized by the client. + // English. Any user-facing error message should be localized and sent + // in the google.rpc.Status.details field, or localized by the client. Message string `json:"message,omitempty"` // ForceSendFields is a list of field names (e.g. "Code") to @@ -6198,48 +6460,30 @@ func (s *Status) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Step: Defines a particular step within a Cloud Dataflow job. -// -// A job consists of multiple steps, each of which performs -// some -// specific operation as part of the overall job. Data is -// typically -// passed from one step to another as part of the job. -// -// Here's an example of a sequence of steps which together implement -// a -// Map-Reduce job: -// -// * Read a collection of data from some source, parsing the -// collection's elements. -// -// * Validate the elements. -// -// * Apply a user-defined function to map each element to some value -// and extract an element-specific key value. -// -// * Group elements with the same key into a single element with -// that key, transforming a multiply-keyed collection into a -// uniquely-keyed collection. -// -// * Write the elements out to some data sink. -// -// Note that the Cloud Dataflow service may be used to run many -// different -// types of jobs, not just Map-Reduce. +// Step: Defines a particular step within a Cloud Dataflow job. A job +// consists of multiple steps, each of which performs some specific +// operation as part of the overall job. Data is typically passed from +// one step to another as part of the job. Here's an example of a +// sequence of steps which together implement a Map-Reduce job: * Read a +// collection of data from some source, parsing the collection's +// elements. * Validate the elements. * Apply a user-defined function to +// map each element to some value and extract an element-specific key +// value. * Group elements with the same key into a single element with +// that key, transforming a multiply-keyed collection into a +// uniquely-keyed collection. * Write the elements out to some data +// sink. Note that the Cloud Dataflow service may be used to run many +// different types of jobs, not just Map-Reduce. type Step struct { // Kind: The kind of step in the Cloud Dataflow job. Kind string `json:"kind,omitempty"` - // Name: The name that identifies the step. This must be unique for - // each + // Name: The name that identifies the step. This must be unique for each // step with respect to all other steps in the Cloud Dataflow job. Name string `json:"name,omitempty"` - // Properties: Named properties associated with the step. Each kind - // of - // predefined step has its own required set of properties. - // Must be provided on Create. Only retrieved with JOB_VIEW_ALL. + // Properties: Named properties associated with the step. Each kind of + // predefined step has its own required set of properties. Must be + // provided on Create. Only retrieved with JOB_VIEW_ALL. Properties googleapi.RawMessage `json:"properties,omitempty"` // ForceSendFields is a list of field names (e.g. "Kind") to @@ -6266,8 +6510,7 @@ func (s *Step) MarshalJSON() ([]byte, error) { } // StreamLocation: Describes a stream of data, either as input to be -// processed or as -// output of a streaming Dataflow job. +// processed or as output of a streaming Dataflow job. type StreamLocation struct { // CustomSourceLocation: The stream is a custom source. CustomSourceLocation *CustomSourceLocation `json:"customSourceLocation,omitempty"` @@ -6279,8 +6522,7 @@ type StreamLocation struct { SideInputLocation *StreamingSideInputLocation `json:"sideInputLocation,omitempty"` // StreamingStageLocation: The stream is part of another computation - // within the current - // streaming Dataflow job. + // within the current streaming Dataflow job. StreamingStageLocation *StreamingStageLocation `json:"streamingStageLocation,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -6359,8 +6601,7 @@ type StreamingComputationConfig struct { SystemName string `json:"systemName,omitempty"` // TransformUserNameToStateFamily: Map from user name of stateful - // transforms in this stage to their state - // family. + // transforms in this stage to their state family. TransformUserNameToStateFamily map[string]string `json:"transformUserNameToStateFamily,omitempty"` // ForceSendFields is a list of field names (e.g. "ComputationId") to @@ -6387,8 +6628,7 @@ func (s *StreamingComputationConfig) MarshalJSON() ([]byte, error) { } // StreamingComputationRanges: Describes full or partial data disk -// assignment information of the computation -// ranges. +// assignment information of the computation ranges. type StreamingComputationRanges struct { // ComputationId: The ID of the computation. ComputationId string `json:"computationId,omitempty"` @@ -6421,8 +6661,7 @@ func (s *StreamingComputationRanges) MarshalJSON() ([]byte, error) { } // StreamingComputationTask: A task which describes what action should -// be performed for the specified -// streaming computation ranges. +// be performed for the specified streaming computation ranges. type StreamingComputationTask struct { // ComputationRanges: Contains ranges of a streaming computation this // task should apply to. @@ -6490,17 +6729,13 @@ type StreamingConfigTask struct { UserStepToStateFamilyNameMap map[string]string `json:"userStepToStateFamilyNameMap,omitempty"` // WindmillServiceEndpoint: If present, the worker must use this - // endpoint to communicate with Windmill - // Service dispatchers, otherwise the worker must continue to use - // whatever - // endpoint it had been using. + // endpoint to communicate with Windmill Service dispatchers, otherwise + // the worker must continue to use whatever endpoint it had been using. WindmillServiceEndpoint string `json:"windmillServiceEndpoint,omitempty"` // WindmillServicePort: If present, the worker must use this port to - // communicate with Windmill - // Service dispatchers. Only applicable when windmill_service_endpoint - // is - // specified. + // communicate with Windmill Service dispatchers. Only applicable when + // windmill_service_endpoint is specified. WindmillServicePort int64 `json:"windmillServicePort,omitempty,string"` // ForceSendFields is a list of field names (e.g. @@ -6535,8 +6770,7 @@ type StreamingSetupTask struct { Drain bool `json:"drain,omitempty"` // ReceiveWorkPort: The TCP port on which the worker should listen for - // messages from - // other streaming computation workers. + // messages from other streaming computation workers. ReceiveWorkPort int64 `json:"receiveWorkPort,omitempty"` // SnapshotConfig: Configures streaming appliance snapshot. @@ -6547,8 +6781,7 @@ type StreamingSetupTask struct { StreamingComputationTopology *TopologyConfig `json:"streamingComputationTopology,omitempty"` // WorkerHarnessPort: The TCP port used by the worker to communicate - // with the Dataflow - // worker harness. + // with the Dataflow worker harness. WorkerHarnessPort int64 `json:"workerHarnessPort,omitempty"` // ForceSendFields is a list of field names (e.g. "Drain") to @@ -6609,12 +6842,10 @@ func (s *StreamingSideInputLocation) MarshalJSON() ([]byte, error) { } // StreamingStageLocation: Identifies the location of a streaming -// computation stage, for -// stage-to-stage communication. +// computation stage, for stage-to-stage communication. type StreamingStageLocation struct { // StreamId: Identifies the particular stream within the streaming - // Dataflow - // job. + // Dataflow job. StreamId string `json:"streamId,omitempty"` // ForceSendFields is a list of field names (e.g. "StreamId") to @@ -6669,14 +6900,11 @@ func (s *StringList) MarshalJSON() ([]byte, error) { } // StructuredMessage: A rich message format, including a human readable -// string, a key for -// identifying the message, and structured data associated with the -// message for -// programmatic consumption. +// string, a key for identifying the message, and structured data +// associated with the message for programmatic consumption. type StructuredMessage struct { - // MessageKey: Identifier for this message type. Used by external - // systems to - // internationalize or personalize message. + // MessageKey: Identifier for this message type. Used by external + // systems to internationalize or personalize message. MessageKey string `json:"messageKey,omitempty"` // MessageText: Human-readable version of message. @@ -6718,18 +6946,12 @@ type TaskRunnerSettings struct { BaseTaskDir string `json:"baseTaskDir,omitempty"` // BaseUrl: The base URL for the taskrunner to use when accessing Google - // Cloud APIs. - // - // When workers access Google Cloud APIs, they logically do so - // via - // relative URLs. If this field is specified, it supplies the base - // URL to use for resolving these relative URLs. The - // normative - // algorithm used is defined by RFC 1808, "Relative Uniform - // Resource - // Locators". - // - // If not specified, the default value is "http://www.googleapis.com/" + // Cloud APIs. When workers access Google Cloud APIs, they logically do + // so via relative URLs. If this field is specified, it supplies the + // base URL to use for resolving these relative URLs. The normative + // algorithm used is defined by RFC 1808, "Relative Uniform Resource + // Locators". If not specified, the default value is + // "http://www.googleapis.com/" BaseUrl string `json:"baseUrl,omitempty"` // CommandlinesFileName: The file to store preprocessing commands in. @@ -6752,24 +6974,17 @@ type TaskRunnerSettings struct { LogDir string `json:"logDir,omitempty"` // LogToSerialconsole: Whether to send taskrunner log info to Google - // Compute Engine VM serial - // console. + // Compute Engine VM serial console. LogToSerialconsole bool `json:"logToSerialconsole,omitempty"` - // LogUploadLocation: Indicates where to put logs. If this is not - // specified, the logs - // will not be uploaded. - // - // The supported resource type is: - // - // Google Cloud Storage: - // storage.googleapis.com/{bucket}/{object} - // bucket.storage.googleapis.com/{object} + // LogUploadLocation: Indicates where to put logs. If this is not + // specified, the logs will not be uploaded. The supported resource type + // is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} + // bucket.storage.googleapis.com/{object} LogUploadLocation string `json:"logUploadLocation,omitempty"` // OauthScopes: The OAuth2 scopes to be requested by the taskrunner in - // order to - // access the Cloud Dataflow API. + // order to access the Cloud Dataflow API. OauthScopes []string `json:"oauthScopes,omitempty"` // ParallelWorkerSettings: The settings to pass to the parallel worker @@ -6780,24 +6995,17 @@ type TaskRunnerSettings struct { StreamingWorkerMainClass string `json:"streamingWorkerMainClass,omitempty"` // TaskGroup: The UNIX group ID on the worker VM to use for tasks - // launched by - // taskrunner; e.g. "wheel". + // launched by taskrunner; e.g. "wheel". TaskGroup string `json:"taskGroup,omitempty"` // TaskUser: The UNIX user ID on the worker VM to use for tasks launched - // by - // taskrunner; e.g. "root". + // by taskrunner; e.g. "root". TaskUser string `json:"taskUser,omitempty"` // TempStoragePrefix: The prefix of the resources the taskrunner should - // use for - // temporary storage. - // - // The supported resource type is: - // - // Google Cloud Storage: - // storage.googleapis.com/{bucket}/{object} - // bucket.storage.googleapis.com/{object} + // use for temporary storage. The supported resource type is: Google + // Cloud Storage: storage.googleapis.com/{bucket}/{object} + // bucket.storage.googleapis.com/{object} TempStoragePrefix string `json:"tempStoragePrefix,omitempty"` // VmId: The ID string of the VM. @@ -6864,9 +7072,83 @@ func (s *TemplateMetadata) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// TemplateVersion: +// ////////////////////////////////////////////////////////////////////// +// /////// //// Template Catalog is used to organize user +// TemplateVersions. //// TemplateVersions that have the same project_id +// and display_name are //// belong to the same Template. //// Templates +// with the same project_id belong to the same Project. //// +// TemplateVersion may have labels and multiple labels are allowed. //// +// Duplicated labels in the same `TemplateVersion` are not allowed. //// +// TemplateVersion may have tags and multiple tags are allowed. +// Duplicated //// tags in the same `Template` are not allowed! +type TemplateVersion struct { + // Artifact: Job graph and metadata if it is a legacy Template. + // Container image path and metadata if it is flex Template. + Artifact *Artifact `json:"artifact,omitempty"` + + // CreateTime: Creation time of this TemplateVersion. + CreateTime string `json:"createTime,omitempty"` + + // Description: Template description from the user. + Description string `json:"description,omitempty"` + + // DisplayName: A customized name for Template. Multiple + // TemplateVersions per Template. + DisplayName string `json:"displayName,omitempty"` + + // Labels: Labels for the Template Version. Labels can be duplicate + // within Template. + Labels map[string]string `json:"labels,omitempty"` + + // ProjectId: A unique project_id. Multiple Templates per Project. + ProjectId string `json:"projectId,omitempty"` + + // Tags: Alias for version_id, helps locate a TemplateVersion. + Tags []string `json:"tags,omitempty"` + + // Type: Either LEGACY or FLEX. This should match with the type of + // artifact. + // + // Possible values: + // "TEMPLATE_TYPE_UNSPECIFIED" - Default value. Not a useful zero + // case. + // "LEGACY" - Legacy Template. + // "FLEX" - Flex Template. + Type string `json:"type,omitempty"` + + // VersionId: An auto generated version_id for TemplateVersion. + VersionId string `json:"versionId,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Artifact") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Artifact") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TemplateVersion) MarshalJSON() ([]byte, error) { + type NoMethod TemplateVersion + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // TopologyConfig: Global topology of the streaming Dataflow job, -// including all -// computations and their sharded locations. +// including all computations and their sharded locations. type TopologyConfig struct { // Computations: The computations associated with a streaming Dataflow // job. @@ -6941,7 +7223,7 @@ type TransformSummary struct { // Name: User provided name for this transform instance. Name string `json:"name,omitempty"` - // OutputCollectionName: User names for all collection outputs to this + // OutputCollectionName: User names for all collection outputs to this // transform. OutputCollectionName []string `json:"outputCollectionName,omitempty"` @@ -6973,6 +7255,10 @@ type ValidateResponse struct { // ErrorMessage: Will be empty if validation succeeds. ErrorMessage string `json:"errorMessage,omitempty"` + // QueryInfo: Information about the validated query. Not defined if + // validation fails. + QueryInfo *QueryInfo `json:"queryInfo,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -7001,8 +7287,7 @@ func (s *ValidateResponse) MarshalJSON() ([]byte, error) { } // WorkItem: WorkItem represents basic information about a WorkItem to -// be executed -// in the cloud. +// be executed in the cloud. type WorkItem struct { // Configuration: Work item-specific configuration as an opaque blob. Configuration string `json:"configuration,omitempty"` @@ -7024,8 +7309,7 @@ type WorkItem struct { MapTask *MapTask `json:"mapTask,omitempty"` // Packages: Any required packages that need to be fetched in order to - // execute - // this WorkItem. + // execute this WorkItem. Packages []*Package `json:"packages,omitempty"` // ProjectId: Identifies the cloud project this WorkItem belongs to. @@ -7079,49 +7363,104 @@ func (s *WorkItem) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// WorkItemDetails: Information about an individual work item execution. +type WorkItemDetails struct { + // AttemptId: Attempt ID of this work item + AttemptId string `json:"attemptId,omitempty"` + + // EndTime: End time of this work item attempt. If the work item is + // completed, this is the actual end time of the work item. Otherwise, + // it is the predicted end time. + EndTime string `json:"endTime,omitempty"` + + // Metrics: Metrics for this work item. + Metrics []*MetricUpdate `json:"metrics,omitempty"` + + // Progress: Progress of this work item. + Progress *ProgressTimeseries `json:"progress,omitempty"` + + // StartTime: Start time of this work item attempt. + StartTime string `json:"startTime,omitempty"` + + // State: State of this work item. + // + // Possible values: + // "EXECUTION_STATE_UNKNOWN" - The component state is unknown or + // unspecified. + // "EXECUTION_STATE_NOT_STARTED" - The component is not yet running. + // "EXECUTION_STATE_RUNNING" - The component is currently running. + // "EXECUTION_STATE_SUCCEEDED" - The component succeeded. + // "EXECUTION_STATE_FAILED" - The component failed. + // "EXECUTION_STATE_CANCELLED" - Execution of the component was + // cancelled. + State string `json:"state,omitempty"` + + // TaskId: Name of this work item. + TaskId string `json:"taskId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AttemptId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AttemptId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *WorkItemDetails) MarshalJSON() ([]byte, error) { + type NoMethod WorkItemDetails + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // WorkItemServiceState: The Dataflow service's idea of the current -// state of a WorkItem -// being processed by a worker. +// state of a WorkItem being processed by a worker. type WorkItemServiceState struct { + // CompleteWorkStatus: If set, a request to complete the work item with + // the given status. This will not be set to OK, unless supported by the + // specific kind of WorkItem. It can be used for the backend to indicate + // a WorkItem must terminate, e.g., for aborting work. + CompleteWorkStatus *Status `json:"completeWorkStatus,omitempty"` + // HarnessData: Other data returned by the service, specific to the - // particular - // worker harness. + // particular worker harness. HarnessData googleapi.RawMessage `json:"harnessData,omitempty"` // HotKeyDetection: A hot key is a symptom of poor data distribution in - // which there are enough - // elements mapped to a single key to impact pipeline performance. - // When - // present, this field includes metadata associated with any hot key. + // which there are enough elements mapped to a single key to impact + // pipeline performance. When present, this field includes metadata + // associated with any hot key. HotKeyDetection *HotKeyDetection `json:"hotKeyDetection,omitempty"` // LeaseExpireTime: Time at which the current lease will expire. LeaseExpireTime string `json:"leaseExpireTime,omitempty"` // MetricShortId: The short ids that workers should use in subsequent - // metric updates. - // Workers should strive to use short ids whenever possible, but it is - // ok - // to request the short_id again if a worker lost track of it - // (e.g. if the worker is recovering from a crash). - // NOTE: it is possible that the response may have short ids for a - // subset - // of the metrics. + // metric updates. Workers should strive to use short ids whenever + // possible, but it is ok to request the short_id again if a worker lost + // track of it (e.g. if the worker is recovering from a crash). NOTE: it + // is possible that the response may have short ids for a subset of the + // metrics. MetricShortId []*MetricShortId `json:"metricShortId,omitempty"` // NextReportIndex: The index value to use for the next report sent by - // the worker. - // Note: If the report call fails for whatever reason, the worker - // should - // reuse this index for subsequent report attempts. + // the worker. Note: If the report call fails for whatever reason, the + // worker should reuse this index for subsequent report attempts. NextReportIndex int64 `json:"nextReportIndex,omitempty,string"` // ReportStatusInterval: New recommended reporting interval. ReportStatusInterval string `json:"reportStatusInterval,omitempty"` // SplitRequest: The progress point in the WorkItem where the Dataflow - // service - // suggests that the worker truncate the task. + // service suggests that the worker truncate the task. SplitRequest *ApproximateSplitRequest `json:"splitRequest,omitempty"` // SuggestedStopPoint: DEPRECATED in favor of split_request. @@ -7130,20 +7469,21 @@ type WorkItemServiceState struct { // SuggestedStopPosition: Obsolete, always empty. SuggestedStopPosition *Position `json:"suggestedStopPosition,omitempty"` - // ForceSendFields is a list of field names (e.g. "HarnessData") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "CompleteWorkStatus") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "HarnessData") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "CompleteWorkStatus") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -7166,9 +7506,8 @@ type WorkItemStatus struct { // DynamicSourceSplit: See documentation of stop_position. DynamicSourceSplit *DynamicSourceSplit `json:"dynamicSourceSplit,omitempty"` - // Errors: Specifies errors which occurred during processing. If errors - // are - // provided, and completed = true, then the WorkItem is considered + // Errors: Specifies errors which occurred during processing. If errors + // are provided, and completed = true, then the WorkItem is considered // to have failed. Errors []*Status `json:"errors,omitempty"` @@ -7178,24 +7517,17 @@ type WorkItemStatus struct { // Progress: DEPRECATED in favor of reported_progress. Progress *ApproximateProgress `json:"progress,omitempty"` - // ReportIndex: The report index. When a WorkItem is leased, the lease - // will - // contain an initial report index. When a WorkItem's status - // is - // reported to the system, the report should be sent with - // that report index, and the response will contain the index the - // worker should use for the next report. Reports received - // with - // unexpected index values will be rejected by the service. - // - // In order to preserve idempotency, the worker should not alter - // the - // contents of a report, even if the worker must submit the same - // report multiple times before getting back a response. The - // worker - // should not submit a subsequent report until the response for - // the - // previous report had been received from the service. + // ReportIndex: The report index. When a WorkItem is leased, the lease + // will contain an initial report index. When a WorkItem's status is + // reported to the system, the report should be sent with that report + // index, and the response will contain the index the worker should use + // for the next report. Reports received with unexpected index values + // will be rejected by the service. In order to preserve idempotency, + // the worker should not alter the contents of a report, even if the + // worker must submit the same report multiple times before getting back + // a response. The worker should not submit a subsequent report until + // the response for the previous report had been received from the + // service. ReportIndex int64 `json:"reportIndex,omitempty,string"` // ReportedProgress: The worker's progress through this WorkItem. @@ -7209,51 +7541,35 @@ type WorkItemStatus struct { SourceFork *SourceFork `json:"sourceFork,omitempty"` // SourceOperationResponse: If the work item represented a - // SourceOperationRequest, and the work - // is completed, contains the result of the operation. + // SourceOperationRequest, and the work is completed, contains the + // result of the operation. SourceOperationResponse *SourceOperationResponse `json:"sourceOperationResponse,omitempty"` // StopPosition: A worker may split an active map task in two parts, - // "primary" and - // "residual", continuing to process the primary part and returning - // the - // residual part into the pool of available work. - // This event is called a "dynamic split" and is critical to the - // dynamic - // work rebalancing feature. The two obtained sub-tasks are - // called - // "parts" of the split. - // The parts, if concatenated, must represent the same input as would - // be read by the current task if the split did not happen. - // The exact way in which the original task is decomposed into the - // two - // parts is specified either as a position demarcating - // them - // (stop_position), or explicitly as two DerivedSources, if this - // task consumes a user-defined source type (dynamic_source_split). - // - // The "current" task is adjusted as a result of the split: after a - // task - // with range [A, B) sends a stop_position update at C, its range - // is - // considered to be [A, C), e.g.: - // * Progress should be interpreted relative to the new range, e.g. - // "75% completed" means "75% of [A, C) completed" - // * The worker should interpret proposed_stop_position relative to the - // new range, e.g. "split at 68%" should be interpreted as - // "split at 68% of [A, C)". - // * If the worker chooses to split again using stop_position, only - // stop_positions in [A, C) will be accepted. - // * Etc. - // dynamic_source_split has similar semantics: e.g., if a task - // with - // source S splits using dynamic_source_split into {P, R} - // (where P and R must be together equivalent to S), then - // subsequent - // progress and proposed_stop_position should be interpreted relative - // to P, and in a potential subsequent dynamic_source_split into {P', - // R'}, - // P' and R' must be together equivalent to P, etc. + // "primary" and "residual", continuing to process the primary part and + // returning the residual part into the pool of available work. This + // event is called a "dynamic split" and is critical to the dynamic work + // rebalancing feature. The two obtained sub-tasks are called "parts" of + // the split. The parts, if concatenated, must represent the same input + // as would be read by the current task if the split did not happen. The + // exact way in which the original task is decomposed into the two parts + // is specified either as a position demarcating them (stop_position), + // or explicitly as two DerivedSources, if this task consumes a + // user-defined source type (dynamic_source_split). The "current" task + // is adjusted as a result of the split: after a task with range [A, B) + // sends a stop_position update at C, its range is considered to be [A, + // C), e.g.: * Progress should be interpreted relative to the new range, + // e.g. "75% completed" means "75% of [A, C) completed" * The worker + // should interpret proposed_stop_position relative to the new range, + // e.g. "split at 68%" should be interpreted as "split at 68% of [A, + // C)". * If the worker chooses to split again using stop_position, only + // stop_positions in [A, C) will be accepted. * Etc. + // dynamic_source_split has similar semantics: e.g., if a task with + // source S splits using dynamic_source_split into {P, R} (where P and R + // must be together equivalent to S), then subsequent progress and + // proposed_stop_position should be interpreted relative to P, and in a + // potential subsequent dynamic_source_split into {P', R'}, P' and R' + // must be together equivalent to P, etc. StopPosition *Position `json:"stopPosition,omitempty"` // TotalThrottlerWaitTimeSeconds: Total time the worker spent being @@ -7300,36 +7616,56 @@ func (s *WorkItemStatus) UnmarshalJSON(data []byte) error { return nil } +// WorkerDetails: Information about a worker +type WorkerDetails struct { + // WorkItems: Work items processed by this worker, sorted by time. + WorkItems []*WorkItemDetails `json:"workItems,omitempty"` + + // WorkerName: Name of this worker + WorkerName string `json:"workerName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "WorkItems") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "WorkItems") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *WorkerDetails) MarshalJSON() ([]byte, error) { + type NoMethod WorkerDetails + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // WorkerHealthReport: WorkerHealthReport contains information about the -// health of a worker. -// -// The VM should be identified by the labels attached to the -// WorkerMessage that -// this health ping belongs to. +// health of a worker. The VM should be identified by the labels +// attached to the WorkerMessage that this health ping belongs to. type WorkerHealthReport struct { // Msg: A message describing any unusual health reports. Msg string `json:"msg,omitempty"` - // Pods: The pods running on the worker. - // See: - // http://kubernetes.io/v1.1/docs/api-reference/v1/definitions.html# - // _v1_pod - // - // This field is used by the worker to send the status of the - // indvidual - // containers running on each worker. + // Pods: The pods running on the worker. See: + // http://kubernetes.io/v1.1/docs/api-reference/v1/definitions.html#_v1_pod This field is used by the worker to send the status of the indvidual containers running on each + // worker. Pods []googleapi.RawMessage `json:"pods,omitempty"` // ReportInterval: The interval at which the worker is sending health - // reports. - // The default value of 0 should be interpreted as the field is not - // being - // explicitly set by the worker. + // reports. The default value of 0 should be interpreted as the field is + // not being explicitly set by the worker. ReportInterval string `json:"reportInterval,omitempty"` - // VmIsBroken: Whether the VM is in a permanently broken state. - // Broken VMs should be abandoned or deleted ASAP to avoid assigning - // or + // VmIsBroken: Whether the VM is in a permanently broken state. Broken + // VMs should be abandoned or deleted ASAP to avoid assigning or // completing any work. VmIsBroken bool `json:"vmIsBroken,omitempty"` @@ -7363,16 +7699,11 @@ func (s *WorkerHealthReport) MarshalJSON() ([]byte, error) { } // WorkerHealthReportResponse: WorkerHealthReportResponse contains -// information returned to the worker -// in response to a health ping. +// information returned to the worker in response to a health ping. type WorkerHealthReportResponse struct { // ReportInterval: A positive value indicates the worker should change - // its reporting interval - // to the specified value. - // - // The default value of zero means no change in report rate is requested - // by - // the server. + // its reporting interval to the specified value. The default value of + // zero means no change in report rate is requested by the server. ReportInterval string `json:"reportInterval,omitempty"` // ForceSendFields is a list of field names (e.g. "ReportInterval") to @@ -7399,19 +7730,16 @@ func (s *WorkerHealthReportResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// WorkerLifecycleEvent: A report of an event in a worker's -// lifecycle. -// The proto contains one event, because the worker is expected -// to -// asynchronously send each message immediately after the event. -// Due to this asynchrony, messages may arrive out of order (or -// missing), and it -// is up to the consumer to interpret. -// The timestamp of the event is in the enclosing WorkerMessage proto. +// WorkerLifecycleEvent: A report of an event in a worker's lifecycle. +// The proto contains one event, because the worker is expected to +// asynchronously send each message immediately after the event. Due to +// this asynchrony, messages may arrive out of order (or missing), and +// it is up to the consumer to interpret. The timestamp of the event is +// in the enclosing WorkerMessage proto. type WorkerLifecycleEvent struct { // ContainerStartTime: The start time of this container. All events will - // report this so that - // events can be grouped together across container/VM restarts. + // report this so that events can be grouped together across + // container/VM restarts. ContainerStartTime string `json:"containerStartTime,omitempty"` // Event: The event being reported. @@ -7420,8 +7748,8 @@ type WorkerLifecycleEvent struct { // "UNKNOWN_EVENT" - Invalid event. // "OS_START" - The time the VM started. // "CONTAINER_START" - Our container code starts running. Multiple - // containers could be - // distinguished with WorkerMessage.labels if desired. + // containers could be distinguished with WorkerMessage.labels if + // desired. // "NETWORK_UP" - The worker has a functional external network // connection. // "STAGING_FILES_DOWNLOAD_START" - Started downloading staging files. @@ -7432,8 +7760,8 @@ type WorkerLifecycleEvent struct { // "SDK_INSTALL_FINISH" - Finished installing SDK. Event string `json:"event,omitempty"` - // Metadata: Other stats that can accompany an event. E.g. - // { "downloaded_bytes" : "123456" } + // Metadata: Other stats that can accompany an event. E.g. { + // "downloaded_bytes" : "123456" } Metadata map[string]string `json:"metadata,omitempty"` // ForceSendFields is a list of field names (e.g. "ContainerStartTime") @@ -7463,18 +7791,13 @@ func (s *WorkerLifecycleEvent) MarshalJSON() ([]byte, error) { // WorkerMessage: WorkerMessage provides information to the backend // about a worker. type WorkerMessage struct { - // Labels: Labels are used to group WorkerMessages. - // For example, a worker_message about a particular container - // might have the labels: - // { "JOB_ID": "2015-04-22", - // "WORKER_ID": "wordcount-vm-2015…" - // "CONTAINER_TYPE": "worker", - // "CONTAINER_ID": "ac1234def"} - // Label tags typically correspond to Label enum values. However, for - // ease - // of development other strings can be used as tags. LABEL_UNSPECIFIED - // should - // not be used here. + // Labels: Labels are used to group WorkerMessages. For example, a + // worker_message about a particular container might have the labels: { + // "JOB_ID": "2015-04-22", "WORKER_ID": "wordcount-vm-2015…" + // "CONTAINER_TYPE": "worker", "CONTAINER_ID": "ac1234def"} Label tags + // typically correspond to Label enum values. However, for ease of + // development other strings can be used as tags. LABEL_UNSPECIFIED + // should not be used here. Labels map[string]string `json:"labels,omitempty"` // Time: The timestamp of the worker_message. @@ -7519,65 +7842,34 @@ func (s *WorkerMessage) MarshalJSON() ([]byte, error) { } // WorkerMessageCode: A message code is used to report status and error -// messages to the service. -// The message codes are intended to be machine readable. The service -// will -// take care of translating these into user understandable messages -// if -// necessary. -// -// Example use cases: -// 1. Worker processes reporting successful startup. -// 2. Worker processes reporting specific errors (e.g. package -// staging -// failure). +// messages to the service. The message codes are intended to be machine +// readable. The service will take care of translating these into user +// understandable messages if necessary. Example use cases: 1. Worker +// processes reporting successful startup. 2. Worker processes reporting +// specific errors (e.g. package staging failure). type WorkerMessageCode struct { // Code: The code is a string intended for consumption by a machine that - // identifies - // the type of message being sent. - // Examples: - // 1. "HARNESS_STARTED" might be used to indicate the worker harness - // has - // started. - // 2. "GCS_DOWNLOAD_ERROR" might be used to indicate an error - // downloading - // a GCS file as part of the boot process of one of the worker - // containers. - // - // This is a string and not an enum to make it easy to add new codes - // without - // waiting for an API change. + // identifies the type of message being sent. Examples: 1. + // "HARNESS_STARTED" might be used to indicate the worker harness has + // started. 2. "GCS_DOWNLOAD_ERROR" might be used to indicate an error + // downloading a GCS file as part of the boot process of one of the + // worker containers. This is a string and not an enum to make it easy + // to add new codes without waiting for an API change. Code string `json:"code,omitempty"` - // Parameters: Parameters contains specific information about the - // code. - // - // This is a struct to allow parameters of different types. - // - // Examples: - // 1. For a "HARNESS_STARTED" message parameters might provide the - // name - // of the worker and additional data like timing information. - // 2. For a "GCS_DOWNLOAD_ERROR" parameters might contain fields - // listing - // the GCS objects being downloaded and fields containing - // errors. - // - // In general complex data structures should be avoided. If a - // worker - // needs to send a specific and complicated data structure then - // please - // consider defining a new proto and adding it to the data oneof - // in - // WorkerMessageResponse. - // - // Conventions: - // Parameters should only be used for information that isn't typically - // passed - // as a label. - // hostname and other worker identifiers should almost always be - // passed - // as labels since they will be included on most messages. + // Parameters: Parameters contains specific information about the code. + // This is a struct to allow parameters of different types. Examples: 1. + // For a "HARNESS_STARTED" message parameters might provide the name of + // the worker and additional data like timing information. 2. For a + // "GCS_DOWNLOAD_ERROR" parameters might contain fields listing the GCS + // objects being downloaded and fields containing errors. In general + // complex data structures should be avoided. If a worker needs to send + // a specific and complicated data structure then please consider + // defining a new proto and adding it to the data oneof in + // WorkerMessageResponse. Conventions: Parameters should only be used + // for information that isn't typically passed as a label. hostname and + // other worker identifiers should almost always be passed as labels + // since they will be included on most messages. Parameters googleapi.RawMessage `json:"parameters,omitempty"` // ForceSendFields is a list of field names (e.g. "Code") to @@ -7604,8 +7896,7 @@ func (s *WorkerMessageCode) MarshalJSON() ([]byte, error) { } // WorkerMessageResponse: A worker_message response allows the server to -// pass information to the -// sender. +// pass information to the sender. type WorkerMessageResponse struct { // WorkerHealthReportResponse: The service's response to a worker's // health report. @@ -7645,13 +7936,9 @@ func (s *WorkerMessageResponse) MarshalJSON() ([]byte, error) { } // WorkerPool: Describes one particular pool of Cloud Dataflow workers -// to be -// instantiated by the Cloud Dataflow service in order to perform -// the -// computations required by a job. Note that a workflow job may -// use -// multiple pools, in order to match the various -// computational +// to be instantiated by the Cloud Dataflow service in order to perform +// the computations required by a job. Note that a workflow job may use +// multiple pools, in order to match the various computational // requirements of the various stages of the job. type WorkerPool struct { // AutoscalingSettings: Settings for autoscaling of this WorkerPool. @@ -7660,34 +7947,30 @@ type WorkerPool struct { // DataDisks: Data disks that are used by a VM in this workflow. DataDisks []*Disk `json:"dataDisks,omitempty"` - // DefaultPackageSet: The default package set to install. This allows - // the service to - // select a default set of packages which are useful to worker - // harnesses written in a particular language. + // DefaultPackageSet: The default package set to install. This allows + // the service to select a default set of packages which are useful to + // worker harnesses written in a particular language. // // Possible values: // "DEFAULT_PACKAGE_SET_UNKNOWN" - The default set of packages to // stage is unknown, or unspecified. // "DEFAULT_PACKAGE_SET_NONE" - Indicates that no packages should be - // staged at the worker unless - // explicitly specified by the job. + // staged at the worker unless explicitly specified by the job. // "DEFAULT_PACKAGE_SET_JAVA" - Stage packages typically useful to // workers written in Java. // "DEFAULT_PACKAGE_SET_PYTHON" - Stage pacakges typically useful to // workers written in Python. DefaultPackageSet string `json:"defaultPackageSet,omitempty"` - // DiskSizeGb: Size of root disk for VMs, in GB. If zero or - // unspecified, the service will - // attempt to choose a reasonable default. + // DiskSizeGb: Size of root disk for VMs, in GB. If zero or unspecified, + // the service will attempt to choose a reasonable default. DiskSizeGb int64 `json:"diskSizeGb,omitempty"` // DiskSourceImage: Fully qualified source image for disks. DiskSourceImage string `json:"diskSourceImage,omitempty"` - // DiskType: Type of root disk for VMs. If empty or unspecified, the - // service will - // attempt to choose a reasonable default. + // DiskType: Type of root disk for VMs. If empty or unspecified, the + // service will attempt to choose a reasonable default. DiskType string `json:"diskType,omitempty"` // IpConfiguration: Configuration for VM IPs. @@ -7700,40 +7983,33 @@ type WorkerPool struct { IpConfiguration string `json:"ipConfiguration,omitempty"` // Kind: The kind of the worker pool; currently only `harness` and - // `shuffle` - // are supported. + // `shuffle` are supported. Kind string `json:"kind,omitempty"` - // MachineType: Machine type (e.g. "n1-standard-1"). If empty or - // unspecified, the - // service will attempt to choose a reasonable default. + // MachineType: Machine type (e.g. "n1-standard-1"). If empty or + // unspecified, the service will attempt to choose a reasonable default. MachineType string `json:"machineType,omitempty"` // Metadata: Metadata to set on the Google Compute Engine VMs. Metadata map[string]string `json:"metadata,omitempty"` - // Network: Network to which VMs will be assigned. If empty or - // unspecified, - // the service will use the network "default". + // Network: Network to which VMs will be assigned. If empty or + // unspecified, the service will use the network "default". Network string `json:"network,omitempty"` // NumThreadsPerWorker: The number of threads per worker harness. If - // empty or unspecified, the - // service will choose a number of threads (according to the number of - // cores - // on the selected machine type for batch, or 1 by convention for - // streaming). + // empty or unspecified, the service will choose a number of threads + // (according to the number of cores on the selected machine type for + // batch, or 1 by convention for streaming). NumThreadsPerWorker int64 `json:"numThreadsPerWorker,omitempty"` // NumWorkers: Number of Google Compute Engine workers in this pool - // needed to - // execute the job. If zero or unspecified, the service will + // needed to execute the job. If zero or unspecified, the service will // attempt to choose a reasonable default. NumWorkers int64 `json:"numWorkers,omitempty"` // OnHostMaintenance: The action to take on host maintenance, as defined - // by the Google - // Compute Engine API. + // by the Google Compute Engine API. OnHostMaintenance string `json:"onHostMaintenance,omitempty"` // Packages: Packages to be installed on workers. @@ -7743,73 +8019,51 @@ type WorkerPool struct { PoolArgs googleapi.RawMessage `json:"poolArgs,omitempty"` // SdkHarnessContainerImages: Set of SDK harness containers needed to - // execute this pipeline. This will - // only be set in the Fn API path. For non-cross-language pipelines - // this - // should have only one entry. Cross-language pipelines will have two or - // more - // entries. + // execute this pipeline. This will only be set in the Fn API path. For + // non-cross-language pipelines this should have only one entry. + // Cross-language pipelines will have two or more entries. SdkHarnessContainerImages []*SdkHarnessContainerImage `json:"sdkHarnessContainerImages,omitempty"` // Subnetwork: Subnetwork to which VMs will be assigned, if desired. - // Expected to be of - // the form "regions/REGION/subnetworks/SUBNETWORK". + // Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK". Subnetwork string `json:"subnetwork,omitempty"` // TaskrunnerSettings: Settings passed through to Google Compute Engine - // workers when - // using the standard Dataflow task runner. Users should ignore - // this field. + // workers when using the standard Dataflow task runner. Users should + // ignore this field. TaskrunnerSettings *TaskRunnerSettings `json:"taskrunnerSettings,omitempty"` // TeardownPolicy: Sets the policy for determining when to turndown - // worker pool. - // Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, - // and - // `TEARDOWN_NEVER`. - // `TEARDOWN_ALWAYS` means workers are always torn down regardless of - // whether - // the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn - // down - // if the job succeeds. `TEARDOWN_NEVER` means the workers are never - // torn - // down. - // - // If the workers are not torn down by the service, they will - // continue to run and use Google Compute Engine VM resources in - // the - // user's project until they are explicitly terminated by the - // user. - // Because of this, Google recommends using the `TEARDOWN_ALWAYS` - // policy except for small, manually supervised test jobs. - // - // If unknown or unspecified, the service will attempt to choose a - // reasonable - // default. + // worker pool. Allowed values are: `TEARDOWN_ALWAYS`, + // `TEARDOWN_ON_SUCCESS`, and `TEARDOWN_NEVER`. `TEARDOWN_ALWAYS` means + // workers are always torn down regardless of whether the job succeeds. + // `TEARDOWN_ON_SUCCESS` means workers are torn down if the job + // succeeds. `TEARDOWN_NEVER` means the workers are never torn down. If + // the workers are not torn down by the service, they will continue to + // run and use Google Compute Engine VM resources in the user's project + // until they are explicitly terminated by the user. Because of this, + // Google recommends using the `TEARDOWN_ALWAYS` policy except for + // small, manually supervised test jobs. If unknown or unspecified, the + // service will attempt to choose a reasonable default. // // Possible values: // "TEARDOWN_POLICY_UNKNOWN" - The teardown policy isn't specified, or // is unknown. // "TEARDOWN_ALWAYS" - Always teardown the resource. // "TEARDOWN_ON_SUCCESS" - Teardown the resource on success. This is - // useful for debugging - // failures. + // useful for debugging failures. // "TEARDOWN_NEVER" - Never teardown the resource. This is useful for - // debugging and - // development. + // debugging and development. TeardownPolicy string `json:"teardownPolicy,omitempty"` // WorkerHarnessContainerImage: Required. Docker container image that - // executes the Cloud Dataflow worker - // harness, residing in Google Container Registry. - // - // Deprecated for the Fn API path. Use sdk_harness_container_images - // instead. + // executes the Cloud Dataflow worker harness, residing in Google + // Container Registry. Deprecated for the Fn API path. Use + // sdk_harness_container_images instead. WorkerHarnessContainerImage string `json:"workerHarnessContainerImage,omitempty"` - // Zone: Zone to run the worker pools in. If empty or unspecified, the - // service - // will attempt to choose a reasonable default. + // Zone: Zone to run the worker pools in. If empty or unspecified, the + // service will attempt to choose a reasonable default. Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. "AutoscalingSettings") @@ -7838,18 +8092,12 @@ func (s *WorkerPool) MarshalJSON() ([]byte, error) { // WorkerSettings: Provides data to pass through to the worker harness. type WorkerSettings struct { - // BaseUrl: The base URL for accessing Google Cloud APIs. - // - // When workers access Google Cloud APIs, they logically do so - // via - // relative URLs. If this field is specified, it supplies the base - // URL to use for resolving these relative URLs. The - // normative - // algorithm used is defined by RFC 1808, "Relative Uniform - // Resource - // Locators". - // - // If not specified, the default value is "http://www.googleapis.com/" + // BaseUrl: The base URL for accessing Google Cloud APIs. When workers + // access Google Cloud APIs, they logically do so via relative URLs. If + // this field is specified, it supplies the base URL to use for + // resolving these relative URLs. The normative algorithm used is + // defined by RFC 1808, "Relative Uniform Resource Locators". If not + // specified, the default value is "http://www.googleapis.com/" BaseUrl string `json:"baseUrl,omitempty"` // ReportingEnabled: Whether to send work progress updates to the @@ -7857,25 +8105,17 @@ type WorkerSettings struct { ReportingEnabled bool `json:"reportingEnabled,omitempty"` // ServicePath: The Cloud Dataflow service path relative to the root - // URL, for example, - // "dataflow/v1b3/projects". + // URL, for example, "dataflow/v1b3/projects". ServicePath string `json:"servicePath,omitempty"` // ShuffleServicePath: The Shuffle service path relative to the root - // URL, for example, - // "shuffle/v1beta1". + // URL, for example, "shuffle/v1beta1". ShuffleServicePath string `json:"shuffleServicePath,omitempty"` // TempStoragePrefix: The prefix of the resources the system should use - // for temporary - // storage. - // - // The supported resource type is: - // - // Google Cloud Storage: - // - // storage.googleapis.com/{bucket}/{object} - // bucket.storage.googleapis.com/{object} + // for temporary storage. The supported resource type is: Google Cloud + // Storage: storage.googleapis.com/{bucket}/{object} + // bucket.storage.googleapis.com/{object} TempStoragePrefix string `json:"tempStoragePrefix,omitempty"` // WorkerId: The ID of the worker running this pipeline. @@ -7905,16 +8145,13 @@ func (s *WorkerSettings) MarshalJSON() ([]byte, error) { } // WorkerShutdownNotice: Shutdown notification from workers. This is to -// be sent by the shutdown -// script of the worker VM so that the backend knows that the VM is -// being -// shut down. +// be sent by the shutdown script of the worker VM so that the backend +// knows that the VM is being shut down. type WorkerShutdownNotice struct { - // Reason: The reason for the worker shutdown. - // Current possible values are: - // "UNKNOWN": shutdown reason is unknown. - // "PREEMPTION": shutdown reason is preemption. - // Other possible reasons may be added in the future. + // Reason: The reason for the worker shutdown. Current possible values + // are: "UNKNOWN": shutdown reason is unknown. "PREEMPTION": shutdown + // reason is preemption. Other possible reasons may be added in the + // future. Reason string `json:"reason,omitempty"` // ForceSendFields is a list of field names (e.g. "Reason") to @@ -7945,8 +8182,8 @@ func (s *WorkerShutdownNotice) MarshalJSON() ([]byte, error) { type WorkerShutdownNoticeResponse struct { } -// WriteInstruction: An instruction that writes records. -// Takes one input, produces no outputs. +// WriteInstruction: An instruction that writes records. Takes one +// input, produces no outputs. type WriteInstruction struct { // Input: The input. Input *InstructionInput `json:"input,omitempty"` @@ -8035,7 +8272,7 @@ func (c *ProjectsDeleteSnapshotsCall) Header() http.Header { func (c *ProjectsDeleteSnapshotsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8179,7 +8416,7 @@ func (c *ProjectsWorkerMessagesCall) Header() http.Header { func (c *ProjectsWorkerMessagesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8275,60 +8512,919 @@ func (c *ProjectsWorkerMessagesCall) Do(opts ...googleapi.CallOption) (*SendWork } -// method id "dataflow.projects.jobs.aggregated": - -type ProjectsJobsAggregatedCall struct { - s *Service - projectId string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} +// method id "dataflow.projects.catalogTemplates.commit": -// Aggregated: List the jobs of a project across all regions. -func (r *ProjectsJobsService) Aggregated(projectId string) *ProjectsJobsAggregatedCall { - c := &ProjectsJobsAggregatedCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.projectId = projectId - return c +type ProjectsCatalogTemplatesCommitCall struct { + s *Service + name string + committemplateversionrequest *CommitTemplateVersionRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Filter sets the optional parameter "filter": The kind of filter to -// use. -// -// Possible values: -// "UNKNOWN" -// "ALL" -// "TERMINATED" -// "ACTIVE" -func (c *ProjectsJobsAggregatedCall) Filter(filter string) *ProjectsJobsAggregatedCall { - c.urlParams_.Set("filter", filter) +// Commit: Creates a new TemplateVersion (Important: not new Template) +// entry in the spanner table. Requires project_id and display_name +// (template). +func (r *ProjectsCatalogTemplatesService) Commit(name string, committemplateversionrequest *CommitTemplateVersionRequest) *ProjectsCatalogTemplatesCommitCall { + c := &ProjectsCatalogTemplatesCommitCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.committemplateversionrequest = committemplateversionrequest return c } -// Location sets the optional parameter "location": The [regional -// endpoint] -// (https://cloud.google.com/dataflow/docs/concepts/regional-en -// dpoints) that -// contains this job. -func (c *ProjectsJobsAggregatedCall) Location(location string) *ProjectsJobsAggregatedCall { - c.urlParams_.Set("location", location) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsCatalogTemplatesCommitCall) Fields(s ...googleapi.Field) *ProjectsCatalogTemplatesCommitCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// PageSize sets the optional parameter "pageSize": If there are many -// jobs, limit response to at most this many. -// The actual number of jobs returned will be the lesser of -// max_responses -// and an unspecified server-defined limit. -func (c *ProjectsJobsAggregatedCall) PageSize(pageSize int64) *ProjectsJobsAggregatedCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsCatalogTemplatesCommitCall) Context(ctx context.Context) *ProjectsCatalogTemplatesCommitCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsCatalogTemplatesCommitCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsCatalogTemplatesCommitCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.committemplateversionrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1b3/{+name}:commit") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataflow.projects.catalogTemplates.commit" call. +// Exactly one of *TemplateVersion or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TemplateVersion.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsCatalogTemplatesCommitCall) Do(opts ...googleapi.CallOption) (*TemplateVersion, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TemplateVersion{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new TemplateVersion (Important: not new Template) entry in the spanner table. Requires project_id and display_name (template).", + // "flatPath": "v1b3/projects/{projectsId}/catalogTemplates/{catalogTemplatesId}:commit", + // "httpMethod": "POST", + // "id": "dataflow.projects.catalogTemplates.commit", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The location of the template, name includes project_id and display_name. Commit using project_id(pid1) and display_name(tid1). Format: projects/{pid1}/catalogTemplates/{tid1}", + // "location": "path", + // "pattern": "^projects/[^/]+/catalogTemplates/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1b3/{+name}:commit", + // "request": { + // "$ref": "CommitTemplateVersionRequest" + // }, + // "response": { + // "$ref": "TemplateVersion" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/userinfo.email" + // ] + // } + +} + +// method id "dataflow.projects.catalogTemplates.delete": + +type ProjectsCatalogTemplatesDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes an existing Template. Do nothing if Template does not +// exist. +func (r *ProjectsCatalogTemplatesService) Delete(name string) *ProjectsCatalogTemplatesDeleteCall { + c := &ProjectsCatalogTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsCatalogTemplatesDeleteCall) Fields(s ...googleapi.Field) *ProjectsCatalogTemplatesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsCatalogTemplatesDeleteCall) Context(ctx context.Context) *ProjectsCatalogTemplatesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsCatalogTemplatesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsCatalogTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1b3/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataflow.projects.catalogTemplates.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsCatalogTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes an existing Template. Do nothing if Template does not exist.", + // "flatPath": "v1b3/projects/{projectsId}/catalogTemplates/{catalogTemplatesId}", + // "httpMethod": "DELETE", + // "id": "dataflow.projects.catalogTemplates.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "name includes project_id and display_name. Delete by project_id(pid1) and display_name(tid1). Format: projects/{pid1}/catalogTemplates/{tid1}", + // "location": "path", + // "pattern": "^projects/[^/]+/catalogTemplates/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1b3/{+name}", + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/userinfo.email" + // ] + // } + +} + +// method id "dataflow.projects.catalogTemplates.get": + +type ProjectsCatalogTemplatesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Get TemplateVersion using project_id and display_name with an +// optional version_id field. Get latest (has tag "latest") +// TemplateVersion if version_id not set. +func (r *ProjectsCatalogTemplatesService) Get(name string) *ProjectsCatalogTemplatesGetCall { + c := &ProjectsCatalogTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsCatalogTemplatesGetCall) Fields(s ...googleapi.Field) *ProjectsCatalogTemplatesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsCatalogTemplatesGetCall) IfNoneMatch(entityTag string) *ProjectsCatalogTemplatesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsCatalogTemplatesGetCall) Context(ctx context.Context) *ProjectsCatalogTemplatesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsCatalogTemplatesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsCatalogTemplatesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1b3/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataflow.projects.catalogTemplates.get" call. +// Exactly one of *TemplateVersion or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TemplateVersion.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsCatalogTemplatesGetCall) Do(opts ...googleapi.CallOption) (*TemplateVersion, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TemplateVersion{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Get TemplateVersion using project_id and display_name with an optional version_id field. Get latest (has tag \"latest\") TemplateVersion if version_id not set.", + // "flatPath": "v1b3/projects/{projectsId}/catalogTemplates/{catalogTemplatesId}", + // "httpMethod": "GET", + // "id": "dataflow.projects.catalogTemplates.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Resource name includes project_id and display_name. version_id is optional. Get the latest TemplateVersion if version_id not set. Get by project_id(pid1) and display_name(tid1): Format: projects/{pid1}/catalogTemplates/{tid1} Get by project_id(pid1), display_name(tid1), and version_id(vid1): Format: projects/{pid1}/catalogTemplates/{tid1@vid}", + // "location": "path", + // "pattern": "^projects/[^/]+/catalogTemplates/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1b3/{+name}", + // "response": { + // "$ref": "TemplateVersion" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/userinfo.email" + // ] + // } + +} + +// method id "dataflow.projects.catalogTemplates.label": + +type ProjectsCatalogTemplatesLabelCall struct { + s *Service + name string + modifytemplateversionlabelrequest *ModifyTemplateVersionLabelRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Label: Updates the label of the TemplateVersion. Label can be +// duplicated in Template, so either add or remove the label in the +// TemplateVersion. +func (r *ProjectsCatalogTemplatesService) Label(name string, modifytemplateversionlabelrequest *ModifyTemplateVersionLabelRequest) *ProjectsCatalogTemplatesLabelCall { + c := &ProjectsCatalogTemplatesLabelCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.modifytemplateversionlabelrequest = modifytemplateversionlabelrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsCatalogTemplatesLabelCall) Fields(s ...googleapi.Field) *ProjectsCatalogTemplatesLabelCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsCatalogTemplatesLabelCall) Context(ctx context.Context) *ProjectsCatalogTemplatesLabelCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsCatalogTemplatesLabelCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsCatalogTemplatesLabelCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.modifytemplateversionlabelrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1b3/{+name}:label") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataflow.projects.catalogTemplates.label" call. +// Exactly one of *ModifyTemplateVersionLabelResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ModifyTemplateVersionLabelResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsCatalogTemplatesLabelCall) Do(opts ...googleapi.CallOption) (*ModifyTemplateVersionLabelResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ModifyTemplateVersionLabelResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the label of the TemplateVersion. Label can be duplicated in Template, so either add or remove the label in the TemplateVersion.", + // "flatPath": "v1b3/projects/{projectsId}/catalogTemplates/{catalogTemplatesId}:label", + // "httpMethod": "POST", + // "id": "dataflow.projects.catalogTemplates.label", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Resource name includes project_id, display_name, and version_id. Updates by project_id(pid1), display_name(tid1), and version_id(vid1): Format: projects/{pid1}/catalogTemplates/{tid1@vid}", + // "location": "path", + // "pattern": "^projects/[^/]+/catalogTemplates/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1b3/{+name}:label", + // "request": { + // "$ref": "ModifyTemplateVersionLabelRequest" + // }, + // "response": { + // "$ref": "ModifyTemplateVersionLabelResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/userinfo.email" + // ] + // } + +} + +// method id "dataflow.projects.catalogTemplates.tag": + +type ProjectsCatalogTemplatesTagCall struct { + s *Service + name string + modifytemplateversiontagrequest *ModifyTemplateVersionTagRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Tag: Updates the tag of the TemplateVersion, and tag is unique in +// Template. If tag exists in another TemplateVersion in the Template, +// updates the tag to this TemplateVersion will remove it from the old +// TemplateVersion and add it to this TemplateVersion. If request is +// remove_only (remove_only = true), remove the tag from this +// TemplateVersion. +func (r *ProjectsCatalogTemplatesService) Tag(name string, modifytemplateversiontagrequest *ModifyTemplateVersionTagRequest) *ProjectsCatalogTemplatesTagCall { + c := &ProjectsCatalogTemplatesTagCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.modifytemplateversiontagrequest = modifytemplateversiontagrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsCatalogTemplatesTagCall) Fields(s ...googleapi.Field) *ProjectsCatalogTemplatesTagCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsCatalogTemplatesTagCall) Context(ctx context.Context) *ProjectsCatalogTemplatesTagCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsCatalogTemplatesTagCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsCatalogTemplatesTagCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.modifytemplateversiontagrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1b3/{+name}:tag") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataflow.projects.catalogTemplates.tag" call. +// Exactly one of *ModifyTemplateVersionTagResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ModifyTemplateVersionTagResponse.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsCatalogTemplatesTagCall) Do(opts ...googleapi.CallOption) (*ModifyTemplateVersionTagResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ModifyTemplateVersionTagResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the tag of the TemplateVersion, and tag is unique in Template. If tag exists in another TemplateVersion in the Template, updates the tag to this TemplateVersion will remove it from the old TemplateVersion and add it to this TemplateVersion. If request is remove_only (remove_only = true), remove the tag from this TemplateVersion.", + // "flatPath": "v1b3/projects/{projectsId}/catalogTemplates/{catalogTemplatesId}:tag", + // "httpMethod": "POST", + // "id": "dataflow.projects.catalogTemplates.tag", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Resource name includes project_id, display_name, and version_id. Updates by project_id(pid1), display_name(tid1), and version_id(vid1): Format: projects/{pid1}/catalogTemplates/{tid1@vid}", + // "location": "path", + // "pattern": "^projects/[^/]+/catalogTemplates/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1b3/{+name}:tag", + // "request": { + // "$ref": "ModifyTemplateVersionTagRequest" + // }, + // "response": { + // "$ref": "ModifyTemplateVersionTagResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/userinfo.email" + // ] + // } + +} + +// method id "dataflow.projects.catalogTemplates.templateVersions.create": + +type ProjectsCatalogTemplatesTemplateVersionsCreateCall struct { + s *Service + parent string + createtemplateversionrequest *CreateTemplateVersionRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a new Template with TemplateVersion. Requires +// project_id(projects) and template display_name(catalogTemplates). The +// template display_name is set by the user. +func (r *ProjectsCatalogTemplatesTemplateVersionsService) Create(parent string, createtemplateversionrequest *CreateTemplateVersionRequest) *ProjectsCatalogTemplatesTemplateVersionsCreateCall { + c := &ProjectsCatalogTemplatesTemplateVersionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.createtemplateversionrequest = createtemplateversionrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsCatalogTemplatesTemplateVersionsCreateCall) Fields(s ...googleapi.Field) *ProjectsCatalogTemplatesTemplateVersionsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsCatalogTemplatesTemplateVersionsCreateCall) Context(ctx context.Context) *ProjectsCatalogTemplatesTemplateVersionsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsCatalogTemplatesTemplateVersionsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsCatalogTemplatesTemplateVersionsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.createtemplateversionrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1b3/{+parent}/templateVersions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataflow.projects.catalogTemplates.templateVersions.create" call. +// Exactly one of *TemplateVersion or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TemplateVersion.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsCatalogTemplatesTemplateVersionsCreateCall) Do(opts ...googleapi.CallOption) (*TemplateVersion, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TemplateVersion{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new Template with TemplateVersion. Requires project_id(projects) and template display_name(catalogTemplates). The template display_name is set by the user.", + // "flatPath": "v1b3/projects/{projectsId}/catalogTemplates/{catalogTemplatesId}/templateVersions", + // "httpMethod": "POST", + // "id": "dataflow.projects.catalogTemplates.templateVersions.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "The parent project and template that the TemplateVersion will be created under. Create using project_id(pid1) and display_name(tid1). Format: projects/{pid1}/catalogTemplates/{tid1}", + // "location": "path", + // "pattern": "^projects/[^/]+/catalogTemplates/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1b3/{+parent}/templateVersions", + // "request": { + // "$ref": "CreateTemplateVersionRequest" + // }, + // "response": { + // "$ref": "TemplateVersion" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/userinfo.email" + // ] + // } + +} + +// method id "dataflow.projects.jobs.aggregated": + +type ProjectsJobsAggregatedCall struct { + s *Service + projectId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Aggregated: List the jobs of a project across all regions. +func (r *ProjectsJobsService) Aggregated(projectId string) *ProjectsJobsAggregatedCall { + c := &ProjectsJobsAggregatedCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + return c +} + +// Filter sets the optional parameter "filter": The kind of filter to +// use. +// +// Possible values: +// "UNKNOWN" - The filter isn't specified, or is unknown. This returns +// all jobs ordered on descending `JobUuid`. +// "ALL" - Returns all running jobs first ordered on creation +// timestamp, then returns all terminated jobs ordered on the +// termination timestamp. +// "TERMINATED" - Filters the jobs that have a terminated state, +// ordered on the termination timestamp. Example terminated states: +// `JOB_STATE_STOPPED`, `JOB_STATE_UPDATED`, `JOB_STATE_DRAINED`, etc. +// "ACTIVE" - Filters the jobs that are running ordered on the +// creation timestamp. +func (c *ProjectsJobsAggregatedCall) Filter(filter string) *ProjectsJobsAggregatedCall { + c.urlParams_.Set("filter", filter) + return c +} + +// Location sets the optional parameter "location": The [regional +// endpoint] +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) +// that contains this job. +func (c *ProjectsJobsAggregatedCall) Location(location string) *ProjectsJobsAggregatedCall { + c.urlParams_.Set("location", location) + return c +} + +// PageSize sets the optional parameter "pageSize": If there are many +// jobs, limit response to at most this many. The actual number of jobs +// returned will be the lesser of max_responses and an unspecified +// server-defined limit. +func (c *ProjectsJobsAggregatedCall) PageSize(pageSize int64) *ProjectsJobsAggregatedCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": Set this to the -// 'next_page_token' field of a previous response -// to request additional results in a long list. +// 'next_page_token' field of a previous response to request additional +// results in a long list. func (c *ProjectsJobsAggregatedCall) PageToken(pageToken string) *ProjectsJobsAggregatedCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -8338,10 +9434,15 @@ func (c *ProjectsJobsAggregatedCall) PageToken(pageToken string) *ProjectsJobsAg // requested in response. Default is `JOB_VIEW_SUMMARY`. // // Possible values: -// "JOB_VIEW_UNKNOWN" -// "JOB_VIEW_SUMMARY" -// "JOB_VIEW_ALL" -// "JOB_VIEW_DESCRIPTION" +// "JOB_VIEW_UNKNOWN" - The job view to return isn't specified, or is +// unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` +// information, and may contain additional information. +// "JOB_VIEW_SUMMARY" - Request summary information only: Project ID, +// Job ID, job name, job type, job status, start/end time, and Cloud SDK +// version details. +// "JOB_VIEW_ALL" - Request all information available for this job. +// "JOB_VIEW_DESCRIPTION" - Request summary info and limited job +// description data for steps, labels and environment. func (c *ProjectsJobsAggregatedCall) View(view string) *ProjectsJobsAggregatedCall { c.urlParams_.Set("view", view) return c @@ -8384,7 +9485,7 @@ func (c *ProjectsJobsAggregatedCall) Header() http.Header { func (c *ProjectsJobsAggregatedCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8462,22 +9563,28 @@ func (c *ProjectsJobsAggregatedCall) Do(opts ...googleapi.CallOption) (*ListJobs // "TERMINATED", // "ACTIVE" // ], + // "enumDescriptions": [ + // "The filter isn't specified, or is unknown. This returns all jobs ordered on descending `JobUuid`.", + // "Returns all running jobs first ordered on creation timestamp, then returns all terminated jobs ordered on the termination timestamp.", + // "Filters the jobs that have a terminated state, ordered on the termination timestamp. Example terminated states: `JOB_STATE_STOPPED`, `JOB_STATE_UPDATED`, `JOB_STATE_DRAINED`, etc.", + // "Filters the jobs that are running ordered on the creation timestamp." + // ], // "location": "query", // "type": "string" // }, // "location": { - // "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains this job.", + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job.", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "If there are many jobs, limit response to at most this many.\nThe actual number of jobs returned will be the lesser of max_responses\nand an unspecified server-defined limit.", + // "description": "If there are many jobs, limit response to at most this many. The actual number of jobs returned will be the lesser of max_responses and an unspecified server-defined limit.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "Set this to the 'next_page_token' field of a previous response\nto request additional results in a long list.", + // "description": "Set this to the 'next_page_token' field of a previous response to request additional results in a long list.", // "location": "query", // "type": "string" // }, @@ -8495,6 +9602,12 @@ func (c *ProjectsJobsAggregatedCall) Do(opts ...googleapi.CallOption) (*ListJobs // "JOB_VIEW_ALL", // "JOB_VIEW_DESCRIPTION" // ], + // "enumDescriptions": [ + // "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", + // "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", + // "Request all information available for this job.", + // "Request summary info and limited job description data for steps, labels and environment." + // ], // "location": "query", // "type": "string" // } @@ -8545,17 +9658,11 @@ type ProjectsJobsCreateCall struct { header_ http.Header } -// Create: Creates a Cloud Dataflow job. -// -// To create a job, we recommend using `projects.locations.jobs.create` -// with a -// [regional -// endpoint] -// (https://cloud.google.com/dataflow/docs/concepts/regional-en -// dpoints). Using -// `projects.jobs.create` is not recommended, as your job will always -// start -// in `us-central1`. +// Create: Creates a Cloud Dataflow job. To create a job, we recommend +// using `projects.locations.jobs.create` with a [regional endpoint] +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). +// Using `projects.jobs.create` is not recommended, as your job will +// always start in `us-central1`. func (r *ProjectsJobsService) Create(projectId string, job *Job) *ProjectsJobsCreateCall { c := &ProjectsJobsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -8565,9 +9672,8 @@ func (r *ProjectsJobsService) Create(projectId string, job *Job) *ProjectsJobsCr // Location sets the optional parameter "location": The [regional // endpoint] -// (https://cloud.google.com/dataflow/docs/concepts/regional-en -// dpoints) that -// contains this job. +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) +// that contains this job. func (c *ProjectsJobsCreateCall) Location(location string) *ProjectsJobsCreateCall { c.urlParams_.Set("location", location) return c @@ -8584,10 +9690,15 @@ func (c *ProjectsJobsCreateCall) ReplaceJobId(replaceJobId string) *ProjectsJobs // requested in response. // // Possible values: -// "JOB_VIEW_UNKNOWN" -// "JOB_VIEW_SUMMARY" -// "JOB_VIEW_ALL" -// "JOB_VIEW_DESCRIPTION" +// "JOB_VIEW_UNKNOWN" - The job view to return isn't specified, or is +// unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` +// information, and may contain additional information. +// "JOB_VIEW_SUMMARY" - Request summary information only: Project ID, +// Job ID, job name, job type, job status, start/end time, and Cloud SDK +// version details. +// "JOB_VIEW_ALL" - Request all information available for this job. +// "JOB_VIEW_DESCRIPTION" - Request summary info and limited job +// description data for steps, labels and environment. func (c *ProjectsJobsCreateCall) View(view string) *ProjectsJobsCreateCall { c.urlParams_.Set("view", view) return c @@ -8620,7 +9731,7 @@ func (c *ProjectsJobsCreateCall) Header() http.Header { func (c *ProjectsJobsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8684,7 +9795,7 @@ func (c *ProjectsJobsCreateCall) Do(opts ...googleapi.CallOption) (*Job, error) } return ret, nil // { - // "description": "Creates a Cloud Dataflow job.\n\nTo create a job, we recommend using `projects.locations.jobs.create` with a\n[regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using\n`projects.jobs.create` is not recommended, as your job will always start\nin `us-central1`.", + // "description": "Creates a Cloud Dataflow job. To create a job, we recommend using `projects.locations.jobs.create` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.jobs.create` is not recommended, as your job will always start in `us-central1`.", // "flatPath": "v1b3/projects/{projectId}/jobs", // "httpMethod": "POST", // "id": "dataflow.projects.jobs.create", @@ -8693,7 +9804,7 @@ func (c *ProjectsJobsCreateCall) Do(opts ...googleapi.CallOption) (*Job, error) // ], // "parameters": { // "location": { - // "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains this job.", + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job.", // "location": "query", // "type": "string" // }, @@ -8716,6 +9827,12 @@ func (c *ProjectsJobsCreateCall) Do(opts ...googleapi.CallOption) (*Job, error) // "JOB_VIEW_ALL", // "JOB_VIEW_DESCRIPTION" // ], + // "enumDescriptions": [ + // "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", + // "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", + // "Request all information available for this job.", + // "Request summary info and limited job description data for steps, labels and environment." + // ], // "location": "query", // "type": "string" // } @@ -8749,17 +9866,12 @@ type ProjectsJobsGetCall struct { header_ http.Header } -// Get: Gets the state of the specified Cloud Dataflow job. -// -// To get the state of a job, we recommend using -// `projects.locations.jobs.get` -// with a [regional -// endpoint] -// (https://cloud.google.com/dataflow/docs/concepts/regional-en -// dpoints). Using -// `projects.jobs.get` is not recommended, as you can only get the state -// of -// jobs that are running in `us-central1`. +// Get: Gets the state of the specified Cloud Dataflow job. To get the +// state of a job, we recommend using `projects.locations.jobs.get` with +// a [regional endpoint] +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). +// Using `projects.jobs.get` is not recommended, as you can only get the +// state of jobs that are running in `us-central1`. func (r *ProjectsJobsService) Get(projectId string, jobId string) *ProjectsJobsGetCall { c := &ProjectsJobsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -8769,9 +9881,8 @@ func (r *ProjectsJobsService) Get(projectId string, jobId string) *ProjectsJobsG // Location sets the optional parameter "location": The [regional // endpoint] -// (https://cloud.google.com/dataflow/docs/concepts/regional-en -// dpoints) that -// contains this job. +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) +// that contains this job. func (c *ProjectsJobsGetCall) Location(location string) *ProjectsJobsGetCall { c.urlParams_.Set("location", location) return c @@ -8781,10 +9892,15 @@ func (c *ProjectsJobsGetCall) Location(location string) *ProjectsJobsGetCall { // requested in response. // // Possible values: -// "JOB_VIEW_UNKNOWN" -// "JOB_VIEW_SUMMARY" -// "JOB_VIEW_ALL" -// "JOB_VIEW_DESCRIPTION" +// "JOB_VIEW_UNKNOWN" - The job view to return isn't specified, or is +// unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` +// information, and may contain additional information. +// "JOB_VIEW_SUMMARY" - Request summary information only: Project ID, +// Job ID, job name, job type, job status, start/end time, and Cloud SDK +// version details. +// "JOB_VIEW_ALL" - Request all information available for this job. +// "JOB_VIEW_DESCRIPTION" - Request summary info and limited job +// description data for steps, labels and environment. func (c *ProjectsJobsGetCall) View(view string) *ProjectsJobsGetCall { c.urlParams_.Set("view", view) return c @@ -8827,7 +9943,7 @@ func (c *ProjectsJobsGetCall) Header() http.Header { func (c *ProjectsJobsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8890,7 +10006,7 @@ func (c *ProjectsJobsGetCall) Do(opts ...googleapi.CallOption) (*Job, error) { } return ret, nil // { - // "description": "Gets the state of the specified Cloud Dataflow job.\n\nTo get the state of a job, we recommend using `projects.locations.jobs.get`\nwith a [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using\n`projects.jobs.get` is not recommended, as you can only get the state of\njobs that are running in `us-central1`.", + // "description": "Gets the state of the specified Cloud Dataflow job. To get the state of a job, we recommend using `projects.locations.jobs.get` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.jobs.get` is not recommended, as you can only get the state of jobs that are running in `us-central1`.", // "flatPath": "v1b3/projects/{projectId}/jobs/{jobId}", // "httpMethod": "GET", // "id": "dataflow.projects.jobs.get", @@ -8906,7 +10022,7 @@ func (c *ProjectsJobsGetCall) Do(opts ...googleapi.CallOption) (*Job, error) { // "type": "string" // }, // "location": { - // "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains this job.", + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job.", // "location": "query", // "type": "string" // }, @@ -8924,6 +10040,12 @@ func (c *ProjectsJobsGetCall) Do(opts ...googleapi.CallOption) (*Job, error) { // "JOB_VIEW_ALL", // "JOB_VIEW_DESCRIPTION" // ], + // "enumDescriptions": [ + // "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", + // "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", + // "Request all information available for this job.", + // "Request summary info and limited job description data for steps, labels and environment." + // ], // "location": "query", // "type": "string" // } @@ -8954,17 +10076,12 @@ type ProjectsJobsGetMetricsCall struct { header_ http.Header } -// GetMetrics: Request the job status. -// -// To request the status of a job, we recommend -// using -// `projects.locations.jobs.getMetrics` with a [regional -// endpoint] -// (https://cloud.google.com/dataflow/docs/concepts/regional-en -// dpoints). Using -// `projects.jobs.getMetrics` is not recommended, as you can only -// request the -// status of jobs that are running in `us-central1`. +// GetMetrics: Request the job status. To request the status of a job, +// we recommend using `projects.locations.jobs.getMetrics` with a +// [regional endpoint] +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). +// Using `projects.jobs.getMetrics` is not recommended, as you can only +// request the status of jobs that are running in `us-central1`. func (r *ProjectsJobsService) GetMetrics(projectId string, jobId string) *ProjectsJobsGetMetricsCall { c := &ProjectsJobsGetMetricsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -8974,17 +10091,16 @@ func (r *ProjectsJobsService) GetMetrics(projectId string, jobId string) *Projec // Location sets the optional parameter "location": The [regional // endpoint] -// (https://cloud.google.com/dataflow/docs/concepts/regional-en -// dpoints) that -// contains the job specified by job_id. +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) +// that contains the job specified by job_id. func (c *ProjectsJobsGetMetricsCall) Location(location string) *ProjectsJobsGetMetricsCall { c.urlParams_.Set("location", location) return c } // StartTime sets the optional parameter "startTime": Return only metric -// data that has changed since this time. -// Default is to return all information about all metrics for the job. +// data that has changed since this time. Default is to return all +// information about all metrics for the job. func (c *ProjectsJobsGetMetricsCall) StartTime(startTime string) *ProjectsJobsGetMetricsCall { c.urlParams_.Set("startTime", startTime) return c @@ -9027,7 +10143,7 @@ func (c *ProjectsJobsGetMetricsCall) Header() http.Header { func (c *ProjectsJobsGetMetricsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9090,7 +10206,7 @@ func (c *ProjectsJobsGetMetricsCall) Do(opts ...googleapi.CallOption) (*JobMetri } return ret, nil // { - // "description": "Request the job status.\n\nTo request the status of a job, we recommend using\n`projects.locations.jobs.getMetrics` with a [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using\n`projects.jobs.getMetrics` is not recommended, as you can only request the\nstatus of jobs that are running in `us-central1`.", + // "description": "Request the job status. To request the status of a job, we recommend using `projects.locations.jobs.getMetrics` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.jobs.getMetrics` is not recommended, as you can only request the status of jobs that are running in `us-central1`.", // "flatPath": "v1b3/projects/{projectId}/jobs/{jobId}/metrics", // "httpMethod": "GET", // "id": "dataflow.projects.jobs.getMetrics", @@ -9100,13 +10216,13 @@ func (c *ProjectsJobsGetMetricsCall) Do(opts ...googleapi.CallOption) (*JobMetri // ], // "parameters": { // "jobId": { - // "description": "The job to get messages for.", + // "description": "The job to get metrics for.", // "location": "path", // "required": true, // "type": "string" // }, // "location": { - // "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains the job specified by job_id.", + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the job specified by job_id.", // "location": "query", // "type": "string" // }, @@ -9117,7 +10233,7 @@ func (c *ProjectsJobsGetMetricsCall) Do(opts ...googleapi.CallOption) (*JobMetri // "type": "string" // }, // "startTime": { - // "description": "Return only metric data that has changed since this time.\nDefault is to return all information about all metrics for the job.", + // "description": "Return only metric data that has changed since this time. Default is to return all information about all metrics for the job.", // "format": "google-datetime", // "location": "query", // "type": "string" @@ -9148,19 +10264,14 @@ type ProjectsJobsListCall struct { header_ http.Header } -// List: List the jobs of a project. -// -// To list the jobs of a project in a region, we recommend -// using -// `projects.locations.jobs.get` with a [regional -// endpoint] -// (https://cloud.google.com/dataflow/docs/concepts/regional-en -// dpoints). To -// list the all jobs across all regions, use `projects.jobs.aggregated`. -// Using -// `projects.jobs.list` is not recommended, as you can only get the list -// of -// jobs that are running in `us-central1`. +// List: List the jobs of a project. To list the jobs of a project in a +// region, we recommend using `projects.locations.jobs.list` with a +// [regional endpoint] +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). +// To list the all jobs across all regions, use +// `projects.jobs.aggregated`. Using `projects.jobs.list` is not +// recommended, as you can only get the list of jobs that are running in +// `us-central1`. func (r *ProjectsJobsService) List(projectId string) *ProjectsJobsListCall { c := &ProjectsJobsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -9171,10 +10282,16 @@ func (r *ProjectsJobsService) List(projectId string) *ProjectsJobsListCall { // use. // // Possible values: -// "UNKNOWN" -// "ALL" -// "TERMINATED" -// "ACTIVE" +// "UNKNOWN" - The filter isn't specified, or is unknown. This returns +// all jobs ordered on descending `JobUuid`. +// "ALL" - Returns all running jobs first ordered on creation +// timestamp, then returns all terminated jobs ordered on the +// termination timestamp. +// "TERMINATED" - Filters the jobs that have a terminated state, +// ordered on the termination timestamp. Example terminated states: +// `JOB_STATE_STOPPED`, `JOB_STATE_UPDATED`, `JOB_STATE_DRAINED`, etc. +// "ACTIVE" - Filters the jobs that are running ordered on the +// creation timestamp. func (c *ProjectsJobsListCall) Filter(filter string) *ProjectsJobsListCall { c.urlParams_.Set("filter", filter) return c @@ -9182,27 +10299,25 @@ func (c *ProjectsJobsListCall) Filter(filter string) *ProjectsJobsListCall { // Location sets the optional parameter "location": The [regional // endpoint] -// (https://cloud.google.com/dataflow/docs/concepts/regional-en -// dpoints) that -// contains this job. +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) +// that contains this job. func (c *ProjectsJobsListCall) Location(location string) *ProjectsJobsListCall { c.urlParams_.Set("location", location) return c } // PageSize sets the optional parameter "pageSize": If there are many -// jobs, limit response to at most this many. -// The actual number of jobs returned will be the lesser of -// max_responses -// and an unspecified server-defined limit. +// jobs, limit response to at most this many. The actual number of jobs +// returned will be the lesser of max_responses and an unspecified +// server-defined limit. func (c *ProjectsJobsListCall) PageSize(pageSize int64) *ProjectsJobsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": Set this to the -// 'next_page_token' field of a previous response -// to request additional results in a long list. +// 'next_page_token' field of a previous response to request additional +// results in a long list. func (c *ProjectsJobsListCall) PageToken(pageToken string) *ProjectsJobsListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -9212,10 +10327,15 @@ func (c *ProjectsJobsListCall) PageToken(pageToken string) *ProjectsJobsListCall // requested in response. Default is `JOB_VIEW_SUMMARY`. // // Possible values: -// "JOB_VIEW_UNKNOWN" -// "JOB_VIEW_SUMMARY" -// "JOB_VIEW_ALL" -// "JOB_VIEW_DESCRIPTION" +// "JOB_VIEW_UNKNOWN" - The job view to return isn't specified, or is +// unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` +// information, and may contain additional information. +// "JOB_VIEW_SUMMARY" - Request summary information only: Project ID, +// Job ID, job name, job type, job status, start/end time, and Cloud SDK +// version details. +// "JOB_VIEW_ALL" - Request all information available for this job. +// "JOB_VIEW_DESCRIPTION" - Request summary info and limited job +// description data for steps, labels and environment. func (c *ProjectsJobsListCall) View(view string) *ProjectsJobsListCall { c.urlParams_.Set("view", view) return c @@ -9258,7 +10378,7 @@ func (c *ProjectsJobsListCall) Header() http.Header { func (c *ProjectsJobsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9320,7 +10440,7 @@ func (c *ProjectsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJobsRespon } return ret, nil // { - // "description": "List the jobs of a project.\n\nTo list the jobs of a project in a region, we recommend using\n`projects.locations.jobs.get` with a [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). To\nlist the all jobs across all regions, use `projects.jobs.aggregated`. Using\n`projects.jobs.list` is not recommended, as you can only get the list of\njobs that are running in `us-central1`.", + // "description": "List the jobs of a project. To list the jobs of a project in a region, we recommend using `projects.locations.jobs.list` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). To list the all jobs across all regions, use `projects.jobs.aggregated`. Using `projects.jobs.list` is not recommended, as you can only get the list of jobs that are running in `us-central1`.", // "flatPath": "v1b3/projects/{projectId}/jobs", // "httpMethod": "GET", // "id": "dataflow.projects.jobs.list", @@ -9336,22 +10456,28 @@ func (c *ProjectsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJobsRespon // "TERMINATED", // "ACTIVE" // ], + // "enumDescriptions": [ + // "The filter isn't specified, or is unknown. This returns all jobs ordered on descending `JobUuid`.", + // "Returns all running jobs first ordered on creation timestamp, then returns all terminated jobs ordered on the termination timestamp.", + // "Filters the jobs that have a terminated state, ordered on the termination timestamp. Example terminated states: `JOB_STATE_STOPPED`, `JOB_STATE_UPDATED`, `JOB_STATE_DRAINED`, etc.", + // "Filters the jobs that are running ordered on the creation timestamp." + // ], // "location": "query", // "type": "string" // }, // "location": { - // "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains this job.", + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job.", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "If there are many jobs, limit response to at most this many.\nThe actual number of jobs returned will be the lesser of max_responses\nand an unspecified server-defined limit.", + // "description": "If there are many jobs, limit response to at most this many. The actual number of jobs returned will be the lesser of max_responses and an unspecified server-defined limit.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "Set this to the 'next_page_token' field of a previous response\nto request additional results in a long list.", + // "description": "Set this to the 'next_page_token' field of a previous response to request additional results in a long list.", // "location": "query", // "type": "string" // }, @@ -9369,6 +10495,12 @@ func (c *ProjectsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJobsRespon // "JOB_VIEW_ALL", // "JOB_VIEW_DESCRIPTION" // ], + // "enumDescriptions": [ + // "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", + // "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", + // "Request all information available for this job.", + // "Request summary info and limited job description data for steps, labels and environment." + // ], // "location": "query", // "type": "string" // } @@ -9456,7 +10588,7 @@ func (c *ProjectsJobsSnapshotCall) Header() http.Header { func (c *ProjectsJobsSnapshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9572,17 +10704,12 @@ type ProjectsJobsUpdateCall struct { header_ http.Header } -// Update: Updates the state of an existing Cloud Dataflow job. -// -// To update the state of an existing job, we recommend -// using -// `projects.locations.jobs.update` with a [regional -// endpoint] -// (https://cloud.google.com/dataflow/docs/concepts/regional-en -// dpoints). Using -// `projects.jobs.update` is not recommended, as you can only update the -// state -// of jobs that are running in `us-central1`. +// Update: Updates the state of an existing Cloud Dataflow job. To +// update the state of an existing job, we recommend using +// `projects.locations.jobs.update` with a [regional endpoint] +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). +// Using `projects.jobs.update` is not recommended, as you can only +// update the state of jobs that are running in `us-central1`. func (r *ProjectsJobsService) Update(projectId string, jobId string, job *Job) *ProjectsJobsUpdateCall { c := &ProjectsJobsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -9593,9 +10720,8 @@ func (r *ProjectsJobsService) Update(projectId string, jobId string, job *Job) * // Location sets the optional parameter "location": The [regional // endpoint] -// (https://cloud.google.com/dataflow/docs/concepts/regional-en -// dpoints) that -// contains this job. +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) +// that contains this job. func (c *ProjectsJobsUpdateCall) Location(location string) *ProjectsJobsUpdateCall { c.urlParams_.Set("location", location) return c @@ -9628,7 +10754,7 @@ func (c *ProjectsJobsUpdateCall) Header() http.Header { func (c *ProjectsJobsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9693,7 +10819,7 @@ func (c *ProjectsJobsUpdateCall) Do(opts ...googleapi.CallOption) (*Job, error) } return ret, nil // { - // "description": "Updates the state of an existing Cloud Dataflow job.\n\nTo update the state of an existing job, we recommend using\n`projects.locations.jobs.update` with a [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using\n`projects.jobs.update` is not recommended, as you can only update the state\nof jobs that are running in `us-central1`.", + // "description": "Updates the state of an existing Cloud Dataflow job. To update the state of an existing job, we recommend using `projects.locations.jobs.update` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.jobs.update` is not recommended, as you can only update the state of jobs that are running in `us-central1`.", // "flatPath": "v1b3/projects/{projectId}/jobs/{jobId}", // "httpMethod": "PUT", // "id": "dataflow.projects.jobs.update", @@ -9709,7 +10835,7 @@ func (c *ProjectsJobsUpdateCall) Do(opts ...googleapi.CallOption) (*Job, error) // "type": "string" // }, // "location": { - // "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains this job.", + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job.", // "location": "query", // "type": "string" // }, @@ -9786,7 +10912,7 @@ func (c *ProjectsJobsDebugGetConfigCall) Header() http.Header { func (c *ProjectsJobsDebugGetConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9938,7 +11064,7 @@ func (c *ProjectsJobsDebugSendCaptureCall) Header() http.Header { func (c *ProjectsJobsDebugSendCaptureCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10054,17 +11180,12 @@ type ProjectsJobsMessagesListCall struct { header_ http.Header } -// List: Request the job status. -// -// To request the status of a job, we recommend -// using -// `projects.locations.jobs.messages.list` with a [regional -// endpoint] -// (https://cloud.google.com/dataflow/docs/concepts/regional-en -// dpoints). Using -// `projects.jobs.messages.list` is not recommended, as you can only -// request -// the status of jobs that are running in `us-central1`. +// List: Request the job status. To request the status of a job, we +// recommend using `projects.locations.jobs.messages.list` with a +// [regional endpoint] +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). +// Using `projects.jobs.messages.list` is not recommended, as you can +// only request the status of jobs that are running in `us-central1`. func (r *ProjectsJobsMessagesService) List(projectId string, jobId string) *ProjectsJobsMessagesListCall { c := &ProjectsJobsMessagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -10073,8 +11194,8 @@ func (r *ProjectsJobsMessagesService) List(projectId string, jobId string) *Proj } // EndTime sets the optional parameter "endTime": Return only messages -// with timestamps < end_time. The default is now -// (i.e. return up to the latest messages available). +// with timestamps < end_time. The default is now (i.e. return up to the +// latest messages available). func (c *ProjectsJobsMessagesListCall) EndTime(endTime string) *ProjectsJobsMessagesListCall { c.urlParams_.Set("endTime", endTime) return c @@ -10082,9 +11203,8 @@ func (c *ProjectsJobsMessagesListCall) EndTime(endTime string) *ProjectsJobsMess // Location sets the optional parameter "location": The [regional // endpoint] -// (https://cloud.google.com/dataflow/docs/concepts/regional-en -// dpoints) that -// contains the job specified by job_id. +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) +// that contains the job specified by job_id. func (c *ProjectsJobsMessagesListCall) Location(location string) *ProjectsJobsMessagesListCall { c.urlParams_.Set("location", location) return c @@ -10094,39 +11214,57 @@ func (c *ProjectsJobsMessagesListCall) Location(location string) *ProjectsJobsMe // Filter to only get messages with importance >= level // // Possible values: -// "JOB_MESSAGE_IMPORTANCE_UNKNOWN" -// "JOB_MESSAGE_DEBUG" -// "JOB_MESSAGE_DETAILED" -// "JOB_MESSAGE_BASIC" -// "JOB_MESSAGE_WARNING" -// "JOB_MESSAGE_ERROR" +// "JOB_MESSAGE_IMPORTANCE_UNKNOWN" - The message importance isn't +// specified, or is unknown. +// "JOB_MESSAGE_DEBUG" - The message is at the 'debug' level: +// typically only useful for software engineers working on the code the +// job is running. Typically, Dataflow pipeline runners do not display +// log messages at this level by default. +// "JOB_MESSAGE_DETAILED" - The message is at the 'detailed' level: +// somewhat verbose, but potentially useful to users. Typically, +// Dataflow pipeline runners do not display log messages at this level +// by default. These messages are displayed by default in the Dataflow +// monitoring UI. +// "JOB_MESSAGE_BASIC" - The message is at the 'basic' level: useful +// for keeping track of the execution of a Dataflow pipeline. Typically, +// Dataflow pipeline runners display log messages at this level by +// default, and these messages are displayed by default in the Dataflow +// monitoring UI. +// "JOB_MESSAGE_WARNING" - The message is at the 'warning' level: +// indicating a condition pertaining to a job which may require human +// intervention. Typically, Dataflow pipeline runners display log +// messages at this level by default, and these messages are displayed +// by default in the Dataflow monitoring UI. +// "JOB_MESSAGE_ERROR" - The message is at the 'error' level: +// indicating a condition preventing a job from succeeding. Typically, +// Dataflow pipeline runners display log messages at this level by +// default, and these messages are displayed by default in the Dataflow +// monitoring UI. func (c *ProjectsJobsMessagesListCall) MinimumImportance(minimumImportance string) *ProjectsJobsMessagesListCall { c.urlParams_.Set("minimumImportance", minimumImportance) return c } // PageSize sets the optional parameter "pageSize": If specified, -// determines the maximum number of messages to -// return. If unspecified, the service may choose an -// appropriate -// default, or may return an arbitrarily large number of results. +// determines the maximum number of messages to return. If unspecified, +// the service may choose an appropriate default, or may return an +// arbitrarily large number of results. func (c *ProjectsJobsMessagesListCall) PageSize(pageSize int64) *ProjectsJobsMessagesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": If supplied, this -// should be the value of next_page_token returned -// by an earlier call. This will cause the next page of results to -// be returned. +// should be the value of next_page_token returned by an earlier call. +// This will cause the next page of results to be returned. func (c *ProjectsJobsMessagesListCall) PageToken(pageToken string) *ProjectsJobsMessagesListCall { c.urlParams_.Set("pageToken", pageToken) return c } // StartTime sets the optional parameter "startTime": If specified, -// return only messages with timestamps >= start_time. -// The default is the job creation time (i.e. beginning of messages). +// return only messages with timestamps >= start_time. The default is +// the job creation time (i.e. beginning of messages). func (c *ProjectsJobsMessagesListCall) StartTime(startTime string) *ProjectsJobsMessagesListCall { c.urlParams_.Set("startTime", startTime) return c @@ -10169,7 +11307,7 @@ func (c *ProjectsJobsMessagesListCall) Header() http.Header { func (c *ProjectsJobsMessagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10232,7 +11370,7 @@ func (c *ProjectsJobsMessagesListCall) Do(opts ...googleapi.CallOption) (*ListJo } return ret, nil // { - // "description": "Request the job status.\n\nTo request the status of a job, we recommend using\n`projects.locations.jobs.messages.list` with a [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using\n`projects.jobs.messages.list` is not recommended, as you can only request\nthe status of jobs that are running in `us-central1`.", + // "description": "Request the job status. To request the status of a job, we recommend using `projects.locations.jobs.messages.list` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.jobs.messages.list` is not recommended, as you can only request the status of jobs that are running in `us-central1`.", // "flatPath": "v1b3/projects/{projectId}/jobs/{jobId}/messages", // "httpMethod": "GET", // "id": "dataflow.projects.jobs.messages.list", @@ -10242,7 +11380,7 @@ func (c *ProjectsJobsMessagesListCall) Do(opts ...googleapi.CallOption) (*ListJo // ], // "parameters": { // "endTime": { - // "description": "Return only messages with timestamps \u003c end_time. The default is now\n(i.e. return up to the latest messages available).", + // "description": "Return only messages with timestamps \u003c end_time. The default is now (i.e. return up to the latest messages available).", // "format": "google-datetime", // "location": "query", // "type": "string" @@ -10254,7 +11392,7 @@ func (c *ProjectsJobsMessagesListCall) Do(opts ...googleapi.CallOption) (*ListJo // "type": "string" // }, // "location": { - // "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains the job specified by job_id.", + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the job specified by job_id.", // "location": "query", // "type": "string" // }, @@ -10268,17 +11406,25 @@ func (c *ProjectsJobsMessagesListCall) Do(opts ...googleapi.CallOption) (*ListJo // "JOB_MESSAGE_WARNING", // "JOB_MESSAGE_ERROR" // ], + // "enumDescriptions": [ + // "The message importance isn't specified, or is unknown.", + // "The message is at the 'debug' level: typically only useful for software engineers working on the code the job is running. Typically, Dataflow pipeline runners do not display log messages at this level by default.", + // "The message is at the 'detailed' level: somewhat verbose, but potentially useful to users. Typically, Dataflow pipeline runners do not display log messages at this level by default. These messages are displayed by default in the Dataflow monitoring UI.", + // "The message is at the 'basic' level: useful for keeping track of the execution of a Dataflow pipeline. Typically, Dataflow pipeline runners display log messages at this level by default, and these messages are displayed by default in the Dataflow monitoring UI.", + // "The message is at the 'warning' level: indicating a condition pertaining to a job which may require human intervention. Typically, Dataflow pipeline runners display log messages at this level by default, and these messages are displayed by default in the Dataflow monitoring UI.", + // "The message is at the 'error' level: indicating a condition preventing a job from succeeding. Typically, Dataflow pipeline runners display log messages at this level by default, and these messages are displayed by default in the Dataflow monitoring UI." + // ], // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "If specified, determines the maximum number of messages to\nreturn. If unspecified, the service may choose an appropriate\ndefault, or may return an arbitrarily large number of results.", + // "description": "If specified, determines the maximum number of messages to return. If unspecified, the service may choose an appropriate default, or may return an arbitrarily large number of results.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "If supplied, this should be the value of next_page_token returned\nby an earlier call. This will cause the next page of results to\nbe returned.", + // "description": "If supplied, this should be the value of next_page_token returned by an earlier call. This will cause the next page of results to be returned.", // "location": "query", // "type": "string" // }, @@ -10289,7 +11435,7 @@ func (c *ProjectsJobsMessagesListCall) Do(opts ...googleapi.CallOption) (*ListJo // "type": "string" // }, // "startTime": { - // "description": "If specified, return only messages with timestamps \u003e= start_time.\nThe default is the job creation time (i.e. beginning of messages).", + // "description": "If specified, return only messages with timestamps \u003e= start_time. The default is the job creation time (i.e. beginning of messages).", // "format": "google-datetime", // "location": "query", // "type": "string" @@ -10378,7 +11524,7 @@ func (c *ProjectsJobsWorkItemsLeaseCall) Header() http.Header { func (c *ProjectsJobsWorkItemsLeaseCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10531,7 +11677,7 @@ func (c *ProjectsJobsWorkItemsReportStatusCall) Header() http.Header { func (c *ProjectsJobsWorkItemsReportStatusCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10683,7 +11829,7 @@ func (c *ProjectsLocationsWorkerMessagesCall) Header() http.Header { func (c *ProjectsLocationsWorkerMessagesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10758,7 +11904,7 @@ func (c *ProjectsLocationsWorkerMessagesCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "location": { - // "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains the job.", + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the job.", // "location": "path", // "required": true, // "type": "string" @@ -10835,7 +11981,7 @@ func (c *ProjectsLocationsFlexTemplatesLaunchCall) Header() http.Header { func (c *ProjectsLocationsFlexTemplatesLaunchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10910,7 +12056,7 @@ func (c *ProjectsLocationsFlexTemplatesLaunchCall) Do(opts ...googleapi.CallOpti // ], // "parameters": { // "location": { - // "description": "Required. The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to\nwhich to direct the request. E.g., us-central1, us-west1.", + // "description": "Required. The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to which to direct the request. E.g., us-central1, us-west1.", // "location": "path", // "required": true, // "type": "string" @@ -10951,17 +12097,11 @@ type ProjectsLocationsJobsCreateCall struct { header_ http.Header } -// Create: Creates a Cloud Dataflow job. -// -// To create a job, we recommend using `projects.locations.jobs.create` -// with a -// [regional -// endpoint] -// (https://cloud.google.com/dataflow/docs/concepts/regional-en -// dpoints). Using -// `projects.jobs.create` is not recommended, as your job will always -// start -// in `us-central1`. +// Create: Creates a Cloud Dataflow job. To create a job, we recommend +// using `projects.locations.jobs.create` with a [regional endpoint] +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). +// Using `projects.jobs.create` is not recommended, as your job will +// always start in `us-central1`. func (r *ProjectsLocationsJobsService) Create(projectId string, location string, job *Job) *ProjectsLocationsJobsCreateCall { c := &ProjectsLocationsJobsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -10981,10 +12121,15 @@ func (c *ProjectsLocationsJobsCreateCall) ReplaceJobId(replaceJobId string) *Pro // requested in response. // // Possible values: -// "JOB_VIEW_UNKNOWN" -// "JOB_VIEW_SUMMARY" -// "JOB_VIEW_ALL" -// "JOB_VIEW_DESCRIPTION" +// "JOB_VIEW_UNKNOWN" - The job view to return isn't specified, or is +// unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` +// information, and may contain additional information. +// "JOB_VIEW_SUMMARY" - Request summary information only: Project ID, +// Job ID, job name, job type, job status, start/end time, and Cloud SDK +// version details. +// "JOB_VIEW_ALL" - Request all information available for this job. +// "JOB_VIEW_DESCRIPTION" - Request summary info and limited job +// description data for steps, labels and environment. func (c *ProjectsLocationsJobsCreateCall) View(view string) *ProjectsLocationsJobsCreateCall { c.urlParams_.Set("view", view) return c @@ -11017,7 +12162,7 @@ func (c *ProjectsLocationsJobsCreateCall) Header() http.Header { func (c *ProjectsLocationsJobsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11082,7 +12227,7 @@ func (c *ProjectsLocationsJobsCreateCall) Do(opts ...googleapi.CallOption) (*Job } return ret, nil // { - // "description": "Creates a Cloud Dataflow job.\n\nTo create a job, we recommend using `projects.locations.jobs.create` with a\n[regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using\n`projects.jobs.create` is not recommended, as your job will always start\nin `us-central1`.", + // "description": "Creates a Cloud Dataflow job. To create a job, we recommend using `projects.locations.jobs.create` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.jobs.create` is not recommended, as your job will always start in `us-central1`.", // "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs", // "httpMethod": "POST", // "id": "dataflow.projects.locations.jobs.create", @@ -11092,7 +12237,7 @@ func (c *ProjectsLocationsJobsCreateCall) Do(opts ...googleapi.CallOption) (*Job // ], // "parameters": { // "location": { - // "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains this job.", + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job.", // "location": "path", // "required": true, // "type": "string" @@ -11116,14 +12261,226 @@ func (c *ProjectsLocationsJobsCreateCall) Do(opts ...googleapi.CallOption) (*Job // "JOB_VIEW_ALL", // "JOB_VIEW_DESCRIPTION" // ], + // "enumDescriptions": [ + // "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", + // "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", + // "Request all information available for this job.", + // "Request summary info and limited job description data for steps, labels and environment." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1b3/projects/{projectId}/locations/{location}/jobs", + // "request": { + // "$ref": "Job" + // }, + // "response": { + // "$ref": "Job" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly", + // "https://www.googleapis.com/auth/userinfo.email" + // ] + // } + +} + +// method id "dataflow.projects.locations.jobs.get": + +type ProjectsLocationsJobsGetCall struct { + s *Service + projectId string + location string + jobId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the state of the specified Cloud Dataflow job. To get the +// state of a job, we recommend using `projects.locations.jobs.get` with +// a [regional endpoint] +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). +// Using `projects.jobs.get` is not recommended, as you can only get the +// state of jobs that are running in `us-central1`. +func (r *ProjectsLocationsJobsService) Get(projectId string, location string, jobId string) *ProjectsLocationsJobsGetCall { + c := &ProjectsLocationsJobsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.location = location + c.jobId = jobId + return c +} + +// View sets the optional parameter "view": The level of information +// requested in response. +// +// Possible values: +// "JOB_VIEW_UNKNOWN" - The job view to return isn't specified, or is +// unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` +// information, and may contain additional information. +// "JOB_VIEW_SUMMARY" - Request summary information only: Project ID, +// Job ID, job name, job type, job status, start/end time, and Cloud SDK +// version details. +// "JOB_VIEW_ALL" - Request all information available for this job. +// "JOB_VIEW_DESCRIPTION" - Request summary info and limited job +// description data for steps, labels and environment. +func (c *ProjectsLocationsJobsGetCall) View(view string) *ProjectsLocationsJobsGetCall { + c.urlParams_.Set("view", view) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsJobsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsJobsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsJobsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsJobsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsJobsGetCall) Context(ctx context.Context) *ProjectsLocationsJobsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsJobsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsJobsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "location": c.location, + "jobId": c.jobId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataflow.projects.locations.jobs.get" call. +// Exactly one of *Job or error will be non-nil. Any non-2xx status code +// is an error. Response headers are in either +// *Job.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsLocationsJobsGetCall) Do(opts ...googleapi.CallOption) (*Job, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Job{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the state of the specified Cloud Dataflow job. To get the state of a job, we recommend using `projects.locations.jobs.get` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.jobs.get` is not recommended, as you can only get the state of jobs that are running in `us-central1`.", + // "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}", + // "httpMethod": "GET", + // "id": "dataflow.projects.locations.jobs.get", + // "parameterOrder": [ + // "projectId", + // "location", + // "jobId" + // ], + // "parameters": { + // "jobId": { + // "description": "The job ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "location": { + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "The ID of the Cloud Platform project that the job belongs to.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "view": { + // "description": "The level of information requested in response.", + // "enum": [ + // "JOB_VIEW_UNKNOWN", + // "JOB_VIEW_SUMMARY", + // "JOB_VIEW_ALL", + // "JOB_VIEW_DESCRIPTION" + // ], + // "enumDescriptions": [ + // "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", + // "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", + // "Request all information available for this job.", + // "Request summary info and limited job description data for steps, labels and environment." + // ], // "location": "query", // "type": "string" // } // }, - // "path": "v1b3/projects/{projectId}/locations/{location}/jobs", - // "request": { - // "$ref": "Job" - // }, + // "path": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}", // "response": { // "$ref": "Job" // }, @@ -11137,9 +12494,9 @@ func (c *ProjectsLocationsJobsCreateCall) Do(opts ...googleapi.CallOption) (*Job } -// method id "dataflow.projects.locations.jobs.get": +// method id "dataflow.projects.locations.jobs.getExecutionDetails": -type ProjectsLocationsJobsGetCall struct { +type ProjectsLocationsJobsGetExecutionDetailsCall struct { s *Service projectId string location string @@ -11150,42 +12507,38 @@ type ProjectsLocationsJobsGetCall struct { header_ http.Header } -// Get: Gets the state of the specified Cloud Dataflow job. -// -// To get the state of a job, we recommend using -// `projects.locations.jobs.get` -// with a [regional -// endpoint] -// (https://cloud.google.com/dataflow/docs/concepts/regional-en -// dpoints). Using -// `projects.jobs.get` is not recommended, as you can only get the state -// of -// jobs that are running in `us-central1`. -func (r *ProjectsLocationsJobsService) Get(projectId string, location string, jobId string) *ProjectsLocationsJobsGetCall { - c := &ProjectsLocationsJobsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetExecutionDetails: Request detailed information about the execution +// status of the job. EXPERIMENTAL. This API is subject to change or +// removal without notice. +func (r *ProjectsLocationsJobsService) GetExecutionDetails(projectId string, location string, jobId string) *ProjectsLocationsJobsGetExecutionDetailsCall { + c := &ProjectsLocationsJobsGetExecutionDetailsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId c.location = location c.jobId = jobId return c } -// View sets the optional parameter "view": The level of information -// requested in response. -// -// Possible values: -// "JOB_VIEW_UNKNOWN" -// "JOB_VIEW_SUMMARY" -// "JOB_VIEW_ALL" -// "JOB_VIEW_DESCRIPTION" -func (c *ProjectsLocationsJobsGetCall) View(view string) *ProjectsLocationsJobsGetCall { - c.urlParams_.Set("view", view) +// PageSize sets the optional parameter "pageSize": If specified, +// determines the maximum number of stages to return. If unspecified, +// the service may choose an appropriate default, or may return an +// arbitrarily large number of results. +func (c *ProjectsLocationsJobsGetExecutionDetailsCall) PageSize(pageSize int64) *ProjectsLocationsJobsGetExecutionDetailsCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If supplied, this +// should be the value of next_page_token returned by an earlier call. +// This will cause the next page of results to be returned. +func (c *ProjectsLocationsJobsGetExecutionDetailsCall) PageToken(pageToken string) *ProjectsLocationsJobsGetExecutionDetailsCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsLocationsJobsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsJobsGetCall { +func (c *ProjectsLocationsJobsGetExecutionDetailsCall) Fields(s ...googleapi.Field) *ProjectsLocationsJobsGetExecutionDetailsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -11195,7 +12548,7 @@ func (c *ProjectsLocationsJobsGetCall) Fields(s ...googleapi.Field) *ProjectsLoc // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ProjectsLocationsJobsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsJobsGetCall { +func (c *ProjectsLocationsJobsGetExecutionDetailsCall) IfNoneMatch(entityTag string) *ProjectsLocationsJobsGetExecutionDetailsCall { c.ifNoneMatch_ = entityTag return c } @@ -11203,23 +12556,23 @@ func (c *ProjectsLocationsJobsGetCall) IfNoneMatch(entityTag string) *ProjectsLo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsLocationsJobsGetCall) Context(ctx context.Context) *ProjectsLocationsJobsGetCall { +func (c *ProjectsLocationsJobsGetExecutionDetailsCall) Context(ctx context.Context) *ProjectsLocationsJobsGetExecutionDetailsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsLocationsJobsGetCall) Header() http.Header { +func (c *ProjectsLocationsJobsGetExecutionDetailsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsJobsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsJobsGetExecutionDetailsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11230,7 +12583,7 @@ func (c *ProjectsLocationsJobsGetCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/executionDetails") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -11245,14 +12598,14 @@ func (c *ProjectsLocationsJobsGetCall) doRequest(alt string) (*http.Response, er return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataflow.projects.locations.jobs.get" call. -// Exactly one of *Job or error will be non-nil. Any non-2xx status code -// is an error. Response headers are in either -// *Job.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *ProjectsLocationsJobsGetCall) Do(opts ...googleapi.CallOption) (*Job, error) { +// Do executes the "dataflow.projects.locations.jobs.getExecutionDetails" call. +// Exactly one of *JobExecutionDetails or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *JobExecutionDetails.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsLocationsJobsGetExecutionDetailsCall) Do(opts ...googleapi.CallOption) (*JobExecutionDetails, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -11271,7 +12624,7 @@ func (c *ProjectsLocationsJobsGetCall) Do(opts ...googleapi.CallOption) (*Job, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Job{ + ret := &JobExecutionDetails{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -11283,10 +12636,10 @@ func (c *ProjectsLocationsJobsGetCall) Do(opts ...googleapi.CallOption) (*Job, e } return ret, nil // { - // "description": "Gets the state of the specified Cloud Dataflow job.\n\nTo get the state of a job, we recommend using `projects.locations.jobs.get`\nwith a [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using\n`projects.jobs.get` is not recommended, as you can only get the state of\njobs that are running in `us-central1`.", - // "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}", + // "description": "Request detailed information about the execution status of the job. EXPERIMENTAL. This API is subject to change or removal without notice.", + // "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/executionDetails", // "httpMethod": "GET", - // "id": "dataflow.projects.locations.jobs.get", + // "id": "dataflow.projects.locations.jobs.getExecutionDetails", // "parameterOrder": [ // "projectId", // "location", @@ -11294,38 +12647,38 @@ func (c *ProjectsLocationsJobsGetCall) Do(opts ...googleapi.CallOption) (*Job, e // ], // "parameters": { // "jobId": { - // "description": "The job ID.", + // "description": "The job to get execution details for.", // "location": "path", // "required": true, // "type": "string" // }, // "location": { - // "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains this job.", + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the job specified by job_id.", // "location": "path", // "required": true, // "type": "string" // }, + // "pageSize": { + // "description": "If specified, determines the maximum number of stages to return. If unspecified, the service may choose an appropriate default, or may return an arbitrarily large number of results.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "If supplied, this should be the value of next_page_token returned by an earlier call. This will cause the next page of results to be returned.", + // "location": "query", + // "type": "string" + // }, // "projectId": { - // "description": "The ID of the Cloud Platform project that the job belongs to.", + // "description": "A project id.", // "location": "path", // "required": true, // "type": "string" - // }, - // "view": { - // "description": "The level of information requested in response.", - // "enum": [ - // "JOB_VIEW_UNKNOWN", - // "JOB_VIEW_SUMMARY", - // "JOB_VIEW_ALL", - // "JOB_VIEW_DESCRIPTION" - // ], - // "location": "query", - // "type": "string" // } // }, - // "path": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}", + // "path": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/executionDetails", // "response": { - // "$ref": "Job" + // "$ref": "JobExecutionDetails" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -11337,6 +12690,27 @@ func (c *ProjectsLocationsJobsGetCall) Do(opts ...googleapi.CallOption) (*Job, e } +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsJobsGetExecutionDetailsCall) Pages(ctx context.Context, f func(*JobExecutionDetails) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "dataflow.projects.locations.jobs.getMetrics": type ProjectsLocationsJobsGetMetricsCall struct { @@ -11350,17 +12724,12 @@ type ProjectsLocationsJobsGetMetricsCall struct { header_ http.Header } -// GetMetrics: Request the job status. -// -// To request the status of a job, we recommend -// using -// `projects.locations.jobs.getMetrics` with a [regional -// endpoint] -// (https://cloud.google.com/dataflow/docs/concepts/regional-en -// dpoints). Using -// `projects.jobs.getMetrics` is not recommended, as you can only -// request the -// status of jobs that are running in `us-central1`. +// GetMetrics: Request the job status. To request the status of a job, +// we recommend using `projects.locations.jobs.getMetrics` with a +// [regional endpoint] +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). +// Using `projects.jobs.getMetrics` is not recommended, as you can only +// request the status of jobs that are running in `us-central1`. func (r *ProjectsLocationsJobsService) GetMetrics(projectId string, location string, jobId string) *ProjectsLocationsJobsGetMetricsCall { c := &ProjectsLocationsJobsGetMetricsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -11370,8 +12739,8 @@ func (r *ProjectsLocationsJobsService) GetMetrics(projectId string, location str } // StartTime sets the optional parameter "startTime": Return only metric -// data that has changed since this time. -// Default is to return all information about all metrics for the job. +// data that has changed since this time. Default is to return all +// information about all metrics for the job. func (c *ProjectsLocationsJobsGetMetricsCall) StartTime(startTime string) *ProjectsLocationsJobsGetMetricsCall { c.urlParams_.Set("startTime", startTime) return c @@ -11414,7 +12783,7 @@ func (c *ProjectsLocationsJobsGetMetricsCall) Header() http.Header { func (c *ProjectsLocationsJobsGetMetricsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11478,7 +12847,7 @@ func (c *ProjectsLocationsJobsGetMetricsCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Request the job status.\n\nTo request the status of a job, we recommend using\n`projects.locations.jobs.getMetrics` with a [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using\n`projects.jobs.getMetrics` is not recommended, as you can only request the\nstatus of jobs that are running in `us-central1`.", + // "description": "Request the job status. To request the status of a job, we recommend using `projects.locations.jobs.getMetrics` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.jobs.getMetrics` is not recommended, as you can only request the status of jobs that are running in `us-central1`.", // "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/metrics", // "httpMethod": "GET", // "id": "dataflow.projects.locations.jobs.getMetrics", @@ -11489,13 +12858,13 @@ func (c *ProjectsLocationsJobsGetMetricsCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "jobId": { - // "description": "The job to get messages for.", + // "description": "The job to get metrics for.", // "location": "path", // "required": true, // "type": "string" // }, // "location": { - // "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains the job specified by job_id.", + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the job specified by job_id.", // "location": "path", // "required": true, // "type": "string" @@ -11507,7 +12876,7 @@ func (c *ProjectsLocationsJobsGetMetricsCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // }, // "startTime": { - // "description": "Return only metric data that has changed since this time.\nDefault is to return all information about all metrics for the job.", + // "description": "Return only metric data that has changed since this time. Default is to return all information about all metrics for the job.", // "format": "google-datetime", // "location": "query", // "type": "string" @@ -11539,19 +12908,14 @@ type ProjectsLocationsJobsListCall struct { header_ http.Header } -// List: List the jobs of a project. -// -// To list the jobs of a project in a region, we recommend -// using -// `projects.locations.jobs.get` with a [regional -// endpoint] -// (https://cloud.google.com/dataflow/docs/concepts/regional-en -// dpoints). To -// list the all jobs across all regions, use `projects.jobs.aggregated`. -// Using -// `projects.jobs.list` is not recommended, as you can only get the list -// of -// jobs that are running in `us-central1`. +// List: List the jobs of a project. To list the jobs of a project in a +// region, we recommend using `projects.locations.jobs.list` with a +// [regional endpoint] +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). +// To list the all jobs across all regions, use +// `projects.jobs.aggregated`. Using `projects.jobs.list` is not +// recommended, as you can only get the list of jobs that are running in +// `us-central1`. func (r *ProjectsLocationsJobsService) List(projectId string, location string) *ProjectsLocationsJobsListCall { c := &ProjectsLocationsJobsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -11563,28 +12927,33 @@ func (r *ProjectsLocationsJobsService) List(projectId string, location string) * // use. // // Possible values: -// "UNKNOWN" -// "ALL" -// "TERMINATED" -// "ACTIVE" +// "UNKNOWN" - The filter isn't specified, or is unknown. This returns +// all jobs ordered on descending `JobUuid`. +// "ALL" - Returns all running jobs first ordered on creation +// timestamp, then returns all terminated jobs ordered on the +// termination timestamp. +// "TERMINATED" - Filters the jobs that have a terminated state, +// ordered on the termination timestamp. Example terminated states: +// `JOB_STATE_STOPPED`, `JOB_STATE_UPDATED`, `JOB_STATE_DRAINED`, etc. +// "ACTIVE" - Filters the jobs that are running ordered on the +// creation timestamp. func (c *ProjectsLocationsJobsListCall) Filter(filter string) *ProjectsLocationsJobsListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": If there are many -// jobs, limit response to at most this many. -// The actual number of jobs returned will be the lesser of -// max_responses -// and an unspecified server-defined limit. +// jobs, limit response to at most this many. The actual number of jobs +// returned will be the lesser of max_responses and an unspecified +// server-defined limit. func (c *ProjectsLocationsJobsListCall) PageSize(pageSize int64) *ProjectsLocationsJobsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": Set this to the -// 'next_page_token' field of a previous response -// to request additional results in a long list. +// 'next_page_token' field of a previous response to request additional +// results in a long list. func (c *ProjectsLocationsJobsListCall) PageToken(pageToken string) *ProjectsLocationsJobsListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -11594,10 +12963,15 @@ func (c *ProjectsLocationsJobsListCall) PageToken(pageToken string) *ProjectsLoc // requested in response. Default is `JOB_VIEW_SUMMARY`. // // Possible values: -// "JOB_VIEW_UNKNOWN" -// "JOB_VIEW_SUMMARY" -// "JOB_VIEW_ALL" -// "JOB_VIEW_DESCRIPTION" +// "JOB_VIEW_UNKNOWN" - The job view to return isn't specified, or is +// unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` +// information, and may contain additional information. +// "JOB_VIEW_SUMMARY" - Request summary information only: Project ID, +// Job ID, job name, job type, job status, start/end time, and Cloud SDK +// version details. +// "JOB_VIEW_ALL" - Request all information available for this job. +// "JOB_VIEW_DESCRIPTION" - Request summary info and limited job +// description data for steps, labels and environment. func (c *ProjectsLocationsJobsListCall) View(view string) *ProjectsLocationsJobsListCall { c.urlParams_.Set("view", view) return c @@ -11640,7 +13014,7 @@ func (c *ProjectsLocationsJobsListCall) Header() http.Header { func (c *ProjectsLocationsJobsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11703,7 +13077,7 @@ func (c *ProjectsLocationsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJ } return ret, nil // { - // "description": "List the jobs of a project.\n\nTo list the jobs of a project in a region, we recommend using\n`projects.locations.jobs.get` with a [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). To\nlist the all jobs across all regions, use `projects.jobs.aggregated`. Using\n`projects.jobs.list` is not recommended, as you can only get the list of\njobs that are running in `us-central1`.", + // "description": "List the jobs of a project. To list the jobs of a project in a region, we recommend using `projects.locations.jobs.list` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). To list the all jobs across all regions, use `projects.jobs.aggregated`. Using `projects.jobs.list` is not recommended, as you can only get the list of jobs that are running in `us-central1`.", // "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs", // "httpMethod": "GET", // "id": "dataflow.projects.locations.jobs.list", @@ -11720,23 +13094,29 @@ func (c *ProjectsLocationsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJ // "TERMINATED", // "ACTIVE" // ], + // "enumDescriptions": [ + // "The filter isn't specified, or is unknown. This returns all jobs ordered on descending `JobUuid`.", + // "Returns all running jobs first ordered on creation timestamp, then returns all terminated jobs ordered on the termination timestamp.", + // "Filters the jobs that have a terminated state, ordered on the termination timestamp. Example terminated states: `JOB_STATE_STOPPED`, `JOB_STATE_UPDATED`, `JOB_STATE_DRAINED`, etc.", + // "Filters the jobs that are running ordered on the creation timestamp." + // ], // "location": "query", // "type": "string" // }, // "location": { - // "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains this job.", + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job.", // "location": "path", // "required": true, // "type": "string" // }, // "pageSize": { - // "description": "If there are many jobs, limit response to at most this many.\nThe actual number of jobs returned will be the lesser of max_responses\nand an unspecified server-defined limit.", + // "description": "If there are many jobs, limit response to at most this many. The actual number of jobs returned will be the lesser of max_responses and an unspecified server-defined limit.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "Set this to the 'next_page_token' field of a previous response\nto request additional results in a long list.", + // "description": "Set this to the 'next_page_token' field of a previous response to request additional results in a long list.", // "location": "query", // "type": "string" // }, @@ -11754,6 +13134,12 @@ func (c *ProjectsLocationsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJ // "JOB_VIEW_ALL", // "JOB_VIEW_DESCRIPTION" // ], + // "enumDescriptions": [ + // "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", + // "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", + // "Request all information available for this job.", + // "Request summary info and limited job description data for steps, labels and environment." + // ], // "location": "query", // "type": "string" // } @@ -11843,7 +13229,7 @@ func (c *ProjectsLocationsJobsSnapshotCall) Header() http.Header { func (c *ProjectsLocationsJobsSnapshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11968,17 +13354,12 @@ type ProjectsLocationsJobsUpdateCall struct { header_ http.Header } -// Update: Updates the state of an existing Cloud Dataflow job. -// -// To update the state of an existing job, we recommend -// using -// `projects.locations.jobs.update` with a [regional -// endpoint] -// (https://cloud.google.com/dataflow/docs/concepts/regional-en -// dpoints). Using -// `projects.jobs.update` is not recommended, as you can only update the -// state -// of jobs that are running in `us-central1`. +// Update: Updates the state of an existing Cloud Dataflow job. To +// update the state of an existing job, we recommend using +// `projects.locations.jobs.update` with a [regional endpoint] +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). +// Using `projects.jobs.update` is not recommended, as you can only +// update the state of jobs that are running in `us-central1`. func (r *ProjectsLocationsJobsService) Update(projectId string, location string, jobId string, job *Job) *ProjectsLocationsJobsUpdateCall { c := &ProjectsLocationsJobsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -12015,7 +13396,7 @@ func (c *ProjectsLocationsJobsUpdateCall) Header() http.Header { func (c *ProjectsLocationsJobsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12081,7 +13462,7 @@ func (c *ProjectsLocationsJobsUpdateCall) Do(opts ...googleapi.CallOption) (*Job } return ret, nil // { - // "description": "Updates the state of an existing Cloud Dataflow job.\n\nTo update the state of an existing job, we recommend using\n`projects.locations.jobs.update` with a [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using\n`projects.jobs.update` is not recommended, as you can only update the state\nof jobs that are running in `us-central1`.", + // "description": "Updates the state of an existing Cloud Dataflow job. To update the state of an existing job, we recommend using `projects.locations.jobs.update` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.jobs.update` is not recommended, as you can only update the state of jobs that are running in `us-central1`.", // "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}", // "httpMethod": "PUT", // "id": "dataflow.projects.locations.jobs.update", @@ -12098,7 +13479,7 @@ func (c *ProjectsLocationsJobsUpdateCall) Do(opts ...googleapi.CallOption) (*Job // "type": "string" // }, // "location": { - // "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains this job.", + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job.", // "location": "path", // "required": true, // "type": "string" @@ -12178,7 +13559,7 @@ func (c *ProjectsLocationsJobsDebugGetConfigCall) Header() http.Header { func (c *ProjectsLocationsJobsDebugGetConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12261,7 +13642,7 @@ func (c *ProjectsLocationsJobsDebugGetConfigCall) Do(opts ...googleapi.CallOptio // "type": "string" // }, // "location": { - // "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains the job specified by job_id.", + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the job specified by job_id.", // "location": "path", // "required": true, // "type": "string" @@ -12340,7 +13721,7 @@ func (c *ProjectsLocationsJobsDebugSendCaptureCall) Header() http.Header { func (c *ProjectsLocationsJobsDebugSendCaptureCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12423,7 +13804,7 @@ func (c *ProjectsLocationsJobsDebugSendCaptureCall) Do(opts ...googleapi.CallOpt // "type": "string" // }, // "location": { - // "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains the job specified by job_id.", + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the job specified by job_id.", // "location": "path", // "required": true, // "type": "string" @@ -12465,17 +13846,12 @@ type ProjectsLocationsJobsMessagesListCall struct { header_ http.Header } -// List: Request the job status. -// -// To request the status of a job, we recommend -// using -// `projects.locations.jobs.messages.list` with a [regional -// endpoint] -// (https://cloud.google.com/dataflow/docs/concepts/regional-en -// dpoints). Using -// `projects.jobs.messages.list` is not recommended, as you can only -// request -// the status of jobs that are running in `us-central1`. +// List: Request the job status. To request the status of a job, we +// recommend using `projects.locations.jobs.messages.list` with a +// [regional endpoint] +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). +// Using `projects.jobs.messages.list` is not recommended, as you can +// only request the status of jobs that are running in `us-central1`. func (r *ProjectsLocationsJobsMessagesService) List(projectId string, location string, jobId string) *ProjectsLocationsJobsMessagesListCall { c := &ProjectsLocationsJobsMessagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -12485,8 +13861,8 @@ func (r *ProjectsLocationsJobsMessagesService) List(projectId string, location s } // EndTime sets the optional parameter "endTime": Return only messages -// with timestamps < end_time. The default is now -// (i.e. return up to the latest messages available). +// with timestamps < end_time. The default is now (i.e. return up to the +// latest messages available). func (c *ProjectsLocationsJobsMessagesListCall) EndTime(endTime string) *ProjectsLocationsJobsMessagesListCall { c.urlParams_.Set("endTime", endTime) return c @@ -12496,39 +13872,57 @@ func (c *ProjectsLocationsJobsMessagesListCall) EndTime(endTime string) *Project // Filter to only get messages with importance >= level // // Possible values: -// "JOB_MESSAGE_IMPORTANCE_UNKNOWN" -// "JOB_MESSAGE_DEBUG" -// "JOB_MESSAGE_DETAILED" -// "JOB_MESSAGE_BASIC" -// "JOB_MESSAGE_WARNING" -// "JOB_MESSAGE_ERROR" +// "JOB_MESSAGE_IMPORTANCE_UNKNOWN" - The message importance isn't +// specified, or is unknown. +// "JOB_MESSAGE_DEBUG" - The message is at the 'debug' level: +// typically only useful for software engineers working on the code the +// job is running. Typically, Dataflow pipeline runners do not display +// log messages at this level by default. +// "JOB_MESSAGE_DETAILED" - The message is at the 'detailed' level: +// somewhat verbose, but potentially useful to users. Typically, +// Dataflow pipeline runners do not display log messages at this level +// by default. These messages are displayed by default in the Dataflow +// monitoring UI. +// "JOB_MESSAGE_BASIC" - The message is at the 'basic' level: useful +// for keeping track of the execution of a Dataflow pipeline. Typically, +// Dataflow pipeline runners display log messages at this level by +// default, and these messages are displayed by default in the Dataflow +// monitoring UI. +// "JOB_MESSAGE_WARNING" - The message is at the 'warning' level: +// indicating a condition pertaining to a job which may require human +// intervention. Typically, Dataflow pipeline runners display log +// messages at this level by default, and these messages are displayed +// by default in the Dataflow monitoring UI. +// "JOB_MESSAGE_ERROR" - The message is at the 'error' level: +// indicating a condition preventing a job from succeeding. Typically, +// Dataflow pipeline runners display log messages at this level by +// default, and these messages are displayed by default in the Dataflow +// monitoring UI. func (c *ProjectsLocationsJobsMessagesListCall) MinimumImportance(minimumImportance string) *ProjectsLocationsJobsMessagesListCall { c.urlParams_.Set("minimumImportance", minimumImportance) return c } // PageSize sets the optional parameter "pageSize": If specified, -// determines the maximum number of messages to -// return. If unspecified, the service may choose an -// appropriate -// default, or may return an arbitrarily large number of results. +// determines the maximum number of messages to return. If unspecified, +// the service may choose an appropriate default, or may return an +// arbitrarily large number of results. func (c *ProjectsLocationsJobsMessagesListCall) PageSize(pageSize int64) *ProjectsLocationsJobsMessagesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": If supplied, this -// should be the value of next_page_token returned -// by an earlier call. This will cause the next page of results to -// be returned. +// should be the value of next_page_token returned by an earlier call. +// This will cause the next page of results to be returned. func (c *ProjectsLocationsJobsMessagesListCall) PageToken(pageToken string) *ProjectsLocationsJobsMessagesListCall { c.urlParams_.Set("pageToken", pageToken) return c } // StartTime sets the optional parameter "startTime": If specified, -// return only messages with timestamps >= start_time. -// The default is the job creation time (i.e. beginning of messages). +// return only messages with timestamps >= start_time. The default is +// the job creation time (i.e. beginning of messages). func (c *ProjectsLocationsJobsMessagesListCall) StartTime(startTime string) *ProjectsLocationsJobsMessagesListCall { c.urlParams_.Set("startTime", startTime) return c @@ -12571,7 +13965,7 @@ func (c *ProjectsLocationsJobsMessagesListCall) Header() http.Header { func (c *ProjectsLocationsJobsMessagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12635,7 +14029,7 @@ func (c *ProjectsLocationsJobsMessagesListCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Request the job status.\n\nTo request the status of a job, we recommend using\n`projects.locations.jobs.messages.list` with a [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using\n`projects.jobs.messages.list` is not recommended, as you can only request\nthe status of jobs that are running in `us-central1`.", + // "description": "Request the job status. To request the status of a job, we recommend using `projects.locations.jobs.messages.list` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.jobs.messages.list` is not recommended, as you can only request the status of jobs that are running in `us-central1`.", // "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/messages", // "httpMethod": "GET", // "id": "dataflow.projects.locations.jobs.messages.list", @@ -12646,7 +14040,7 @@ func (c *ProjectsLocationsJobsMessagesListCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "endTime": { - // "description": "Return only messages with timestamps \u003c end_time. The default is now\n(i.e. return up to the latest messages available).", + // "description": "Return only messages with timestamps \u003c end_time. The default is now (i.e. return up to the latest messages available).", // "format": "google-datetime", // "location": "query", // "type": "string" @@ -12658,7 +14052,7 @@ func (c *ProjectsLocationsJobsMessagesListCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "location": { - // "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains the job specified by job_id.", + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the job specified by job_id.", // "location": "path", // "required": true, // "type": "string" @@ -12673,17 +14067,25 @@ func (c *ProjectsLocationsJobsMessagesListCall) Do(opts ...googleapi.CallOption) // "JOB_MESSAGE_WARNING", // "JOB_MESSAGE_ERROR" // ], + // "enumDescriptions": [ + // "The message importance isn't specified, or is unknown.", + // "The message is at the 'debug' level: typically only useful for software engineers working on the code the job is running. Typically, Dataflow pipeline runners do not display log messages at this level by default.", + // "The message is at the 'detailed' level: somewhat verbose, but potentially useful to users. Typically, Dataflow pipeline runners do not display log messages at this level by default. These messages are displayed by default in the Dataflow monitoring UI.", + // "The message is at the 'basic' level: useful for keeping track of the execution of a Dataflow pipeline. Typically, Dataflow pipeline runners display log messages at this level by default, and these messages are displayed by default in the Dataflow monitoring UI.", + // "The message is at the 'warning' level: indicating a condition pertaining to a job which may require human intervention. Typically, Dataflow pipeline runners display log messages at this level by default, and these messages are displayed by default in the Dataflow monitoring UI.", + // "The message is at the 'error' level: indicating a condition preventing a job from succeeding. Typically, Dataflow pipeline runners display log messages at this level by default, and these messages are displayed by default in the Dataflow monitoring UI." + // ], // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "If specified, determines the maximum number of messages to\nreturn. If unspecified, the service may choose an appropriate\ndefault, or may return an arbitrarily large number of results.", + // "description": "If specified, determines the maximum number of messages to return. If unspecified, the service may choose an appropriate default, or may return an arbitrarily large number of results.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "If supplied, this should be the value of next_page_token returned\nby an earlier call. This will cause the next page of results to\nbe returned.", + // "description": "If supplied, this should be the value of next_page_token returned by an earlier call. This will cause the next page of results to be returned.", // "location": "query", // "type": "string" // }, @@ -12694,7 +14096,7 @@ func (c *ProjectsLocationsJobsMessagesListCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "startTime": { - // "description": "If specified, return only messages with timestamps \u003e= start_time.\nThe default is the job creation time (i.e. beginning of messages).", + // "description": "If specified, return only messages with timestamps \u003e= start_time. The default is the job creation time (i.e. beginning of messages).", // "format": "google-datetime", // "location": "query", // "type": "string" @@ -12735,32 +14137,233 @@ func (c *ProjectsLocationsJobsMessagesListCall) Pages(ctx context.Context, f fun } } -// method id "dataflow.projects.locations.jobs.snapshots.list": +// method id "dataflow.projects.locations.jobs.snapshots.list": + +type ProjectsLocationsJobsSnapshotsListCall struct { + s *Service + projectId string + location string + jobId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists snapshots. +func (r *ProjectsLocationsJobsSnapshotsService) List(projectId string, location string, jobId string) *ProjectsLocationsJobsSnapshotsListCall { + c := &ProjectsLocationsJobsSnapshotsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.location = location + c.jobId = jobId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsJobsSnapshotsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsJobsSnapshotsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsJobsSnapshotsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsJobsSnapshotsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsJobsSnapshotsListCall) Context(ctx context.Context) *ProjectsLocationsJobsSnapshotsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsJobsSnapshotsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsJobsSnapshotsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/snapshots") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "location": c.location, + "jobId": c.jobId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataflow.projects.locations.jobs.snapshots.list" call. +// Exactly one of *ListSnapshotsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListSnapshotsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsLocationsJobsSnapshotsListCall) Do(opts ...googleapi.CallOption) (*ListSnapshotsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListSnapshotsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists snapshots.", + // "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/snapshots", + // "httpMethod": "GET", + // "id": "dataflow.projects.locations.jobs.snapshots.list", + // "parameterOrder": [ + // "projectId", + // "location", + // "jobId" + // ], + // "parameters": { + // "jobId": { + // "description": "If specified, list snapshots created from this job.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "location": { + // "description": "The location to list snapshots in.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "The project ID to list snapshots for.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/snapshots", + // "response": { + // "$ref": "ListSnapshotsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly", + // "https://www.googleapis.com/auth/userinfo.email" + // ] + // } + +} + +// method id "dataflow.projects.locations.jobs.stages.getExecutionDetails": -type ProjectsLocationsJobsSnapshotsListCall struct { +type ProjectsLocationsJobsStagesGetExecutionDetailsCall struct { s *Service projectId string location string jobId string + stageId string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Lists snapshots. -func (r *ProjectsLocationsJobsSnapshotsService) List(projectId string, location string, jobId string) *ProjectsLocationsJobsSnapshotsListCall { - c := &ProjectsLocationsJobsSnapshotsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetExecutionDetails: Request detailed information about the execution +// status of a stage of the job. EXPERIMENTAL. This API is subject to +// change or removal without notice. +func (r *ProjectsLocationsJobsStagesService) GetExecutionDetails(projectId string, location string, jobId string, stageId string) *ProjectsLocationsJobsStagesGetExecutionDetailsCall { + c := &ProjectsLocationsJobsStagesGetExecutionDetailsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId c.location = location c.jobId = jobId + c.stageId = stageId + return c +} + +// EndTime sets the optional parameter "endTime": Upper time bound of +// work items to include, by start time. +func (c *ProjectsLocationsJobsStagesGetExecutionDetailsCall) EndTime(endTime string) *ProjectsLocationsJobsStagesGetExecutionDetailsCall { + c.urlParams_.Set("endTime", endTime) + return c +} + +// PageSize sets the optional parameter "pageSize": If specified, +// determines the maximum number of work items to return. If +// unspecified, the service may choose an appropriate default, or may +// return an arbitrarily large number of results. +func (c *ProjectsLocationsJobsStagesGetExecutionDetailsCall) PageSize(pageSize int64) *ProjectsLocationsJobsStagesGetExecutionDetailsCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If supplied, this +// should be the value of next_page_token returned by an earlier call. +// This will cause the next page of results to be returned. +func (c *ProjectsLocationsJobsStagesGetExecutionDetailsCall) PageToken(pageToken string) *ProjectsLocationsJobsStagesGetExecutionDetailsCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// StartTime sets the optional parameter "startTime": Lower time bound +// of work items to include, by start time. +func (c *ProjectsLocationsJobsStagesGetExecutionDetailsCall) StartTime(startTime string) *ProjectsLocationsJobsStagesGetExecutionDetailsCall { + c.urlParams_.Set("startTime", startTime) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsLocationsJobsSnapshotsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsJobsSnapshotsListCall { +func (c *ProjectsLocationsJobsStagesGetExecutionDetailsCall) Fields(s ...googleapi.Field) *ProjectsLocationsJobsStagesGetExecutionDetailsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -12770,7 +14373,7 @@ func (c *ProjectsLocationsJobsSnapshotsListCall) Fields(s ...googleapi.Field) *P // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ProjectsLocationsJobsSnapshotsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsJobsSnapshotsListCall { +func (c *ProjectsLocationsJobsStagesGetExecutionDetailsCall) IfNoneMatch(entityTag string) *ProjectsLocationsJobsStagesGetExecutionDetailsCall { c.ifNoneMatch_ = entityTag return c } @@ -12778,23 +14381,23 @@ func (c *ProjectsLocationsJobsSnapshotsListCall) IfNoneMatch(entityTag string) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsLocationsJobsSnapshotsListCall) Context(ctx context.Context) *ProjectsLocationsJobsSnapshotsListCall { +func (c *ProjectsLocationsJobsStagesGetExecutionDetailsCall) Context(ctx context.Context) *ProjectsLocationsJobsStagesGetExecutionDetailsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsLocationsJobsSnapshotsListCall) Header() http.Header { +func (c *ProjectsLocationsJobsStagesGetExecutionDetailsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsJobsSnapshotsListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsJobsStagesGetExecutionDetailsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12805,7 +14408,7 @@ func (c *ProjectsLocationsJobsSnapshotsListCall) doRequest(alt string) (*http.Re var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/snapshots") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/stages/{stageId}/executionDetails") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -12816,18 +14419,19 @@ func (c *ProjectsLocationsJobsSnapshotsListCall) doRequest(alt string) (*http.Re "projectId": c.projectId, "location": c.location, "jobId": c.jobId, + "stageId": c.stageId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dataflow.projects.locations.jobs.snapshots.list" call. -// Exactly one of *ListSnapshotsResponse or error will be non-nil. Any +// Do executes the "dataflow.projects.locations.jobs.stages.getExecutionDetails" call. +// Exactly one of *StageExecutionDetails or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *ListSnapshotsResponse.ServerResponse.Header or (if a response was +// *StageExecutionDetails.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ProjectsLocationsJobsSnapshotsListCall) Do(opts ...googleapi.CallOption) (*ListSnapshotsResponse, error) { +func (c *ProjectsLocationsJobsStagesGetExecutionDetailsCall) Do(opts ...googleapi.CallOption) (*StageExecutionDetails, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -12846,7 +14450,7 @@ func (c *ProjectsLocationsJobsSnapshotsListCall) Do(opts ...googleapi.CallOption if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ListSnapshotsResponse{ + ret := &StageExecutionDetails{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -12858,38 +14462,68 @@ func (c *ProjectsLocationsJobsSnapshotsListCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Lists snapshots.", - // "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/snapshots", + // "description": "Request detailed information about the execution status of a stage of the job. EXPERIMENTAL. This API is subject to change or removal without notice.", + // "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/stages/{stageId}/executionDetails", // "httpMethod": "GET", - // "id": "dataflow.projects.locations.jobs.snapshots.list", + // "id": "dataflow.projects.locations.jobs.stages.getExecutionDetails", // "parameterOrder": [ // "projectId", // "location", - // "jobId" + // "jobId", + // "stageId" // ], // "parameters": { + // "endTime": { + // "description": "Upper time bound of work items to include, by start time.", + // "format": "google-datetime", + // "location": "query", + // "type": "string" + // }, // "jobId": { - // "description": "If specified, list snapshots created from this job.", + // "description": "The job to get execution details for.", // "location": "path", // "required": true, // "type": "string" // }, // "location": { - // "description": "The location to list snapshots in.", + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the job specified by job_id.", // "location": "path", // "required": true, // "type": "string" // }, + // "pageSize": { + // "description": "If specified, determines the maximum number of work items to return. If unspecified, the service may choose an appropriate default, or may return an arbitrarily large number of results.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "If supplied, this should be the value of next_page_token returned by an earlier call. This will cause the next page of results to be returned.", + // "location": "query", + // "type": "string" + // }, // "projectId": { - // "description": "The project ID to list snapshots for.", + // "description": "A project id.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "stageId": { + // "description": "The stage for which to fetch information.", // "location": "path", // "required": true, // "type": "string" + // }, + // "startTime": { + // "description": "Lower time bound of work items to include, by start time.", + // "format": "google-datetime", + // "location": "query", + // "type": "string" // } // }, - // "path": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/snapshots", + // "path": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/stages/{stageId}/executionDetails", // "response": { - // "$ref": "ListSnapshotsResponse" + // "$ref": "StageExecutionDetails" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -12901,6 +14535,27 @@ func (c *ProjectsLocationsJobsSnapshotsListCall) Do(opts ...googleapi.CallOption } +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsJobsStagesGetExecutionDetailsCall) Pages(ctx context.Context, f func(*StageExecutionDetails) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "dataflow.projects.locations.jobs.workItems.lease": type ProjectsLocationsJobsWorkItemsLeaseCall struct { @@ -12951,7 +14606,7 @@ func (c *ProjectsLocationsJobsWorkItemsLeaseCall) Header() http.Header { func (c *ProjectsLocationsJobsWorkItemsLeaseCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13034,7 +14689,7 @@ func (c *ProjectsLocationsJobsWorkItemsLeaseCall) Do(opts ...googleapi.CallOptio // "type": "string" // }, // "location": { - // "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains the WorkItem's job.", + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the WorkItem's job.", // "location": "path", // "required": true, // "type": "string" @@ -13114,7 +14769,7 @@ func (c *ProjectsLocationsJobsWorkItemsReportStatusCall) Header() http.Header { func (c *ProjectsLocationsJobsWorkItemsReportStatusCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13197,7 +14852,7 @@ func (c *ProjectsLocationsJobsWorkItemsReportStatusCall) Do(opts ...googleapi.Ca // "type": "string" // }, // "location": { - // "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that\ncontains the WorkItem's job.", + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the WorkItem's job.", // "location": "path", // "required": true, // "type": "string" @@ -13274,7 +14929,7 @@ func (c *ProjectsLocationsSnapshotsDeleteCall) Header() http.Header { func (c *ProjectsLocationsSnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13437,7 +15092,7 @@ func (c *ProjectsLocationsSnapshotsGetCall) Header() http.Header { func (c *ProjectsLocationsSnapshotsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13608,7 +15263,7 @@ func (c *ProjectsLocationsSnapshotsListCall) Header() http.Header { func (c *ProjectsLocationsSnapshotsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13725,11 +15380,8 @@ type ProjectsLocationsSqlValidateCall struct { } // Validate: Validates a GoogleSQL query for Cloud Dataflow syntax. Will -// always -// confirm the given query parses correctly, and if able to look -// up -// schema information from DataCatalog, will validate that the -// query +// always confirm the given query parses correctly, and if able to look +// up schema information from DataCatalog, will validate that the query // analyzes properly as well. func (r *ProjectsLocationsSqlService) Validate(projectId string, location string) *ProjectsLocationsSqlValidateCall { c := &ProjectsLocationsSqlValidateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -13781,7 +15433,7 @@ func (c *ProjectsLocationsSqlValidateCall) Header() http.Header { func (c *ProjectsLocationsSqlValidateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13844,7 +15496,7 @@ func (c *ProjectsLocationsSqlValidateCall) Do(opts ...googleapi.CallOption) (*Va } return ret, nil // { - // "description": "Validates a GoogleSQL query for Cloud Dataflow syntax. Will always\nconfirm the given query parses correctly, and if able to look up\nschema information from DataCatalog, will validate that the query\nanalyzes properly as well.", + // "description": "Validates a GoogleSQL query for Cloud Dataflow syntax. Will always confirm the given query parses correctly, and if able to look up schema information from DataCatalog, will validate that the query analyzes properly as well.", // "flatPath": "v1b3/projects/{projectId}/locations/{location}/sql:validate", // "httpMethod": "GET", // "id": "dataflow.projects.locations.sql.validate", @@ -13854,7 +15506,7 @@ func (c *ProjectsLocationsSqlValidateCall) Do(opts ...googleapi.CallOption) (*Va // ], // "parameters": { // "location": { - // "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to\nwhich to direct the request.", + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to which to direct the request.", // "location": "path", // "required": true, // "type": "string" @@ -13931,7 +15583,7 @@ func (c *ProjectsLocationsTemplatesCreateCall) Header() http.Header { func (c *ProjectsLocationsTemplatesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14006,7 +15658,7 @@ func (c *ProjectsLocationsTemplatesCreateCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "location": { - // "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to\nwhich to direct the request.", + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to which to direct the request.", // "location": "path", // "required": true, // "type": "string" @@ -14056,9 +15708,8 @@ func (r *ProjectsLocationsTemplatesService) Get(projectId string, location strin } // GcsPath sets the optional parameter "gcsPath": Required. A Cloud -// Storage path to the template from which to -// create the job. -// Must be valid Cloud Storage URL, beginning with 'gs://'. +// Storage path to the template from which to create the job. Must be +// valid Cloud Storage URL, beginning with 'gs://'. func (c *ProjectsLocationsTemplatesGetCall) GcsPath(gcsPath string) *ProjectsLocationsTemplatesGetCall { c.urlParams_.Set("gcsPath", gcsPath) return c @@ -14068,7 +15719,8 @@ func (c *ProjectsLocationsTemplatesGetCall) GcsPath(gcsPath string) *ProjectsLoc // Defaults to METADATA_ONLY. // // Possible values: -// "METADATA_ONLY" +// "METADATA_ONLY" - Template view that retrieves only the metadata +// associated with the template. func (c *ProjectsLocationsTemplatesGetCall) View(view string) *ProjectsLocationsTemplatesGetCall { c.urlParams_.Set("view", view) return c @@ -14111,7 +15763,7 @@ func (c *ProjectsLocationsTemplatesGetCall) Header() http.Header { func (c *ProjectsLocationsTemplatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14184,12 +15836,12 @@ func (c *ProjectsLocationsTemplatesGetCall) Do(opts ...googleapi.CallOption) (*G // ], // "parameters": { // "gcsPath": { - // "description": "Required. A Cloud Storage path to the template from which to\ncreate the job.\nMust be valid Cloud Storage URL, beginning with 'gs://'.", + // "description": "Required. A Cloud Storage path to the template from which to create the job. Must be valid Cloud Storage URL, beginning with 'gs://'.", // "location": "query", // "type": "string" // }, // "location": { - // "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to\nwhich to direct the request.", + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to which to direct the request.", // "location": "path", // "required": true, // "type": "string" @@ -14205,6 +15857,9 @@ func (c *ProjectsLocationsTemplatesGetCall) Do(opts ...googleapi.CallOption) (*G // "enum": [ // "METADATA_ONLY" // ], + // "enumDescriptions": [ + // "Template view that retrieves only the metadata associated with the template." + // ], // "location": "query", // "type": "string" // } @@ -14245,8 +15900,7 @@ func (r *ProjectsLocationsTemplatesService) Launch(projectId string, location st } // DynamicTemplateGcsPath sets the optional parameter -// "dynamicTemplate.gcsPath": Path to dynamic template spec file on -// GCS. +// "dynamicTemplate.gcsPath": Path to dynamic template spec file on GCS. // The file must be a Json serialized DynamicTemplateFieSpec object. func (c *ProjectsLocationsTemplatesLaunchCall) DynamicTemplateGcsPath(dynamicTemplateGcsPath string) *ProjectsLocationsTemplatesLaunchCall { c.urlParams_.Set("dynamicTemplate.gcsPath", dynamicTemplateGcsPath) @@ -14255,25 +15909,23 @@ func (c *ProjectsLocationsTemplatesLaunchCall) DynamicTemplateGcsPath(dynamicTem // DynamicTemplateStagingLocation sets the optional parameter // "dynamicTemplate.stagingLocation": Cloud Storage path for staging -// dependencies. -// Must be a valid Cloud Storage URL, beginning with `gs://`. +// dependencies. Must be a valid Cloud Storage URL, beginning with +// `gs://`. func (c *ProjectsLocationsTemplatesLaunchCall) DynamicTemplateStagingLocation(dynamicTemplateStagingLocation string) *ProjectsLocationsTemplatesLaunchCall { c.urlParams_.Set("dynamicTemplate.stagingLocation", dynamicTemplateStagingLocation) return c } // GcsPath sets the optional parameter "gcsPath": A Cloud Storage path -// to the template from which to create -// the job. -// Must be valid Cloud Storage URL, beginning with 'gs://'. +// to the template from which to create the job. Must be valid Cloud +// Storage URL, beginning with 'gs://'. func (c *ProjectsLocationsTemplatesLaunchCall) GcsPath(gcsPath string) *ProjectsLocationsTemplatesLaunchCall { c.urlParams_.Set("gcsPath", gcsPath) return c } // ValidateOnly sets the optional parameter "validateOnly": If true, the -// request is validated but not actually executed. -// Defaults to false. +// request is validated but not actually executed. Defaults to false. func (c *ProjectsLocationsTemplatesLaunchCall) ValidateOnly(validateOnly bool) *ProjectsLocationsTemplatesLaunchCall { c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) return c @@ -14306,7 +15958,7 @@ func (c *ProjectsLocationsTemplatesLaunchCall) Header() http.Header { func (c *ProjectsLocationsTemplatesLaunchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14381,22 +16033,22 @@ func (c *ProjectsLocationsTemplatesLaunchCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "dynamicTemplate.gcsPath": { - // "description": "Path to dynamic template spec file on GCS.\nThe file must be a Json serialized DynamicTemplateFieSpec object.", + // "description": "Path to dynamic template spec file on GCS. The file must be a Json serialized DynamicTemplateFieSpec object.", // "location": "query", // "type": "string" // }, // "dynamicTemplate.stagingLocation": { - // "description": "Cloud Storage path for staging dependencies.\nMust be a valid Cloud Storage URL, beginning with `gs://`.", + // "description": "Cloud Storage path for staging dependencies. Must be a valid Cloud Storage URL, beginning with `gs://`.", // "location": "query", // "type": "string" // }, // "gcsPath": { - // "description": "A Cloud Storage path to the template from which to create\nthe job.\nMust be valid Cloud Storage URL, beginning with 'gs://'.", + // "description": "A Cloud Storage path to the template from which to create the job. Must be valid Cloud Storage URL, beginning with 'gs://'.", // "location": "query", // "type": "string" // }, // "location": { - // "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to\nwhich to direct the request.", + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to which to direct the request.", // "location": "path", // "required": true, // "type": "string" @@ -14408,7 +16060,7 @@ func (c *ProjectsLocationsTemplatesLaunchCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "validateOnly": { - // "description": "If true, the request is validated but not actually executed.\nDefaults to false.", + // "description": "If true, the request is validated but not actually executed. Defaults to false.", // "location": "query", // "type": "boolean" // } @@ -14494,7 +16146,7 @@ func (c *ProjectsSnapshotsGetCall) Header() http.Header { func (c *ProjectsSnapshotsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14667,7 +16319,7 @@ func (c *ProjectsSnapshotsListCall) Header() http.Header { func (c *ProjectsSnapshotsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14768,6 +16420,201 @@ func (c *ProjectsSnapshotsListCall) Do(opts ...googleapi.CallOption) (*ListSnaps } +// method id "dataflow.projects.templateVersions.list": + +type ProjectsTemplateVersionsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: List TemplateVersions using project_id and an optional +// display_name field. List all the TemplateVersions in the Template if +// display set. List all the TemplateVersions in the Project if +// display_name not set. +func (r *ProjectsTemplateVersionsService) List(parent string) *ProjectsTemplateVersionsListCall { + c := &ProjectsTemplateVersionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of TemplateVersions to return per page. +func (c *ProjectsTemplateVersionsListCall) PageSize(pageSize int64) *ProjectsTemplateVersionsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The page token, +// received from a previous ListTemplateVersions call. Provide this to +// retrieve the subsequent page. +func (c *ProjectsTemplateVersionsListCall) PageToken(pageToken string) *ProjectsTemplateVersionsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsTemplateVersionsListCall) Fields(s ...googleapi.Field) *ProjectsTemplateVersionsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsTemplateVersionsListCall) IfNoneMatch(entityTag string) *ProjectsTemplateVersionsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsTemplateVersionsListCall) Context(ctx context.Context) *ProjectsTemplateVersionsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsTemplateVersionsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsTemplateVersionsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1b3/{+parent}/templateVersions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dataflow.projects.templateVersions.list" call. +// Exactly one of *ListTemplateVersionsResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ListTemplateVersionsResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsTemplateVersionsListCall) Do(opts ...googleapi.CallOption) (*ListTemplateVersionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListTemplateVersionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "List TemplateVersions using project_id and an optional display_name field. List all the TemplateVersions in the Template if display set. List all the TemplateVersions in the Project if display_name not set.", + // "flatPath": "v1b3/projects/{projectsId}/templateVersions", + // "httpMethod": "GET", + // "id": "dataflow.projects.templateVersions.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "pageSize": { + // "description": "The maximum number of TemplateVersions to return per page.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The page token, received from a previous ListTemplateVersions call. Provide this to retrieve the subsequent page.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "parent includes project_id, and display_name is optional. List by project_id(pid1) and display_name(tid1). Format: projects/{pid1}/catalogTemplates/{tid1} List by project_id(pid1). Format: projects/{pid1}", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1b3/{+parent}/templateVersions", + // "response": { + // "$ref": "ListTemplateVersionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/userinfo.email" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsTemplateVersionsListCall) Pages(ctx context.Context, f func(*ListTemplateVersionsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "dataflow.projects.templates.create": type ProjectsTemplatesCreateCall struct { @@ -14814,7 +16661,7 @@ func (c *ProjectsTemplatesCreateCall) Header() http.Header { func (c *ProjectsTemplatesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14929,9 +16776,8 @@ func (r *ProjectsTemplatesService) Get(projectId string) *ProjectsTemplatesGetCa } // GcsPath sets the optional parameter "gcsPath": Required. A Cloud -// Storage path to the template from which to -// create the job. -// Must be valid Cloud Storage URL, beginning with 'gs://'. +// Storage path to the template from which to create the job. Must be +// valid Cloud Storage URL, beginning with 'gs://'. func (c *ProjectsTemplatesGetCall) GcsPath(gcsPath string) *ProjectsTemplatesGetCall { c.urlParams_.Set("gcsPath", gcsPath) return c @@ -14939,9 +16785,8 @@ func (c *ProjectsTemplatesGetCall) GcsPath(gcsPath string) *ProjectsTemplatesGet // Location sets the optional parameter "location": The [regional // endpoint] -// (https://cloud.google.com/dataflow/docs/concepts/regional-en -// dpoints) to -// which to direct the request. +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) +// to which to direct the request. func (c *ProjectsTemplatesGetCall) Location(location string) *ProjectsTemplatesGetCall { c.urlParams_.Set("location", location) return c @@ -14951,7 +16796,8 @@ func (c *ProjectsTemplatesGetCall) Location(location string) *ProjectsTemplatesG // Defaults to METADATA_ONLY. // // Possible values: -// "METADATA_ONLY" +// "METADATA_ONLY" - Template view that retrieves only the metadata +// associated with the template. func (c *ProjectsTemplatesGetCall) View(view string) *ProjectsTemplatesGetCall { c.urlParams_.Set("view", view) return c @@ -14994,7 +16840,7 @@ func (c *ProjectsTemplatesGetCall) Header() http.Header { func (c *ProjectsTemplatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -15065,12 +16911,12 @@ func (c *ProjectsTemplatesGetCall) Do(opts ...googleapi.CallOption) (*GetTemplat // ], // "parameters": { // "gcsPath": { - // "description": "Required. A Cloud Storage path to the template from which to\ncreate the job.\nMust be valid Cloud Storage URL, beginning with 'gs://'.", + // "description": "Required. A Cloud Storage path to the template from which to create the job. Must be valid Cloud Storage URL, beginning with 'gs://'.", // "location": "query", // "type": "string" // }, // "location": { - // "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to\nwhich to direct the request.", + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to which to direct the request.", // "location": "query", // "type": "string" // }, @@ -15085,6 +16931,9 @@ func (c *ProjectsTemplatesGetCall) Do(opts ...googleapi.CallOption) (*GetTemplat // "enum": [ // "METADATA_ONLY" // ], + // "enumDescriptions": [ + // "Template view that retrieves only the metadata associated with the template." + // ], // "location": "query", // "type": "string" // } @@ -15123,8 +16972,7 @@ func (r *ProjectsTemplatesService) Launch(projectId string, launchtemplateparame } // DynamicTemplateGcsPath sets the optional parameter -// "dynamicTemplate.gcsPath": Path to dynamic template spec file on -// GCS. +// "dynamicTemplate.gcsPath": Path to dynamic template spec file on GCS. // The file must be a Json serialized DynamicTemplateFieSpec object. func (c *ProjectsTemplatesLaunchCall) DynamicTemplateGcsPath(dynamicTemplateGcsPath string) *ProjectsTemplatesLaunchCall { c.urlParams_.Set("dynamicTemplate.gcsPath", dynamicTemplateGcsPath) @@ -15133,17 +16981,16 @@ func (c *ProjectsTemplatesLaunchCall) DynamicTemplateGcsPath(dynamicTemplateGcsP // DynamicTemplateStagingLocation sets the optional parameter // "dynamicTemplate.stagingLocation": Cloud Storage path for staging -// dependencies. -// Must be a valid Cloud Storage URL, beginning with `gs://`. +// dependencies. Must be a valid Cloud Storage URL, beginning with +// `gs://`. func (c *ProjectsTemplatesLaunchCall) DynamicTemplateStagingLocation(dynamicTemplateStagingLocation string) *ProjectsTemplatesLaunchCall { c.urlParams_.Set("dynamicTemplate.stagingLocation", dynamicTemplateStagingLocation) return c } // GcsPath sets the optional parameter "gcsPath": A Cloud Storage path -// to the template from which to create -// the job. -// Must be valid Cloud Storage URL, beginning with 'gs://'. +// to the template from which to create the job. Must be valid Cloud +// Storage URL, beginning with 'gs://'. func (c *ProjectsTemplatesLaunchCall) GcsPath(gcsPath string) *ProjectsTemplatesLaunchCall { c.urlParams_.Set("gcsPath", gcsPath) return c @@ -15151,17 +16998,15 @@ func (c *ProjectsTemplatesLaunchCall) GcsPath(gcsPath string) *ProjectsTemplates // Location sets the optional parameter "location": The [regional // endpoint] -// (https://cloud.google.com/dataflow/docs/concepts/regional-en -// dpoints) to -// which to direct the request. +// (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) +// to which to direct the request. func (c *ProjectsTemplatesLaunchCall) Location(location string) *ProjectsTemplatesLaunchCall { c.urlParams_.Set("location", location) return c } // ValidateOnly sets the optional parameter "validateOnly": If true, the -// request is validated but not actually executed. -// Defaults to false. +// request is validated but not actually executed. Defaults to false. func (c *ProjectsTemplatesLaunchCall) ValidateOnly(validateOnly bool) *ProjectsTemplatesLaunchCall { c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) return c @@ -15194,7 +17039,7 @@ func (c *ProjectsTemplatesLaunchCall) Header() http.Header { func (c *ProjectsTemplatesLaunchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -15267,22 +17112,22 @@ func (c *ProjectsTemplatesLaunchCall) Do(opts ...googleapi.CallOption) (*LaunchT // ], // "parameters": { // "dynamicTemplate.gcsPath": { - // "description": "Path to dynamic template spec file on GCS.\nThe file must be a Json serialized DynamicTemplateFieSpec object.", + // "description": "Path to dynamic template spec file on GCS. The file must be a Json serialized DynamicTemplateFieSpec object.", // "location": "query", // "type": "string" // }, // "dynamicTemplate.stagingLocation": { - // "description": "Cloud Storage path for staging dependencies.\nMust be a valid Cloud Storage URL, beginning with `gs://`.", + // "description": "Cloud Storage path for staging dependencies. Must be a valid Cloud Storage URL, beginning with `gs://`.", // "location": "query", // "type": "string" // }, // "gcsPath": { - // "description": "A Cloud Storage path to the template from which to create\nthe job.\nMust be valid Cloud Storage URL, beginning with 'gs://'.", + // "description": "A Cloud Storage path to the template from which to create the job. Must be valid Cloud Storage URL, beginning with 'gs://'.", // "location": "query", // "type": "string" // }, // "location": { - // "description": "The [regional endpoint]\n(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to\nwhich to direct the request.", + // "description": "The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to which to direct the request.", // "location": "query", // "type": "string" // }, @@ -15293,7 +17138,7 @@ func (c *ProjectsTemplatesLaunchCall) Do(opts ...googleapi.CallOption) (*LaunchT // "type": "string" // }, // "validateOnly": { - // "description": "If true, the request is validated but not actually executed.\nDefaults to false.", + // "description": "If true, the request is validated but not actually executed. Defaults to false.", // "location": "query", // "type": "boolean" // } diff --git a/vendor/google.golang.org/api/dataproc/v1/dataproc-api.json b/vendor/google.golang.org/api/dataproc/v1/dataproc-api.json index a5ca0752494..6200c09cfdc 100644 --- a/vendor/google.golang.org/api/dataproc/v1/dataproc-api.json +++ b/vendor/google.golang.org/api/dataproc/v1/dataproc-api.json @@ -121,7 +121,7 @@ ], "parameters": { "parent": { - "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.create, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.autoscalingPolicies.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.create, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.autoscalingPolicies.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -149,7 +149,7 @@ ], "parameters": { "name": { - "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/autoscalingPolicies/[^/]+$", "required": true, @@ -174,7 +174,7 @@ ], "parameters": { "name": { - "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/autoscalingPolicies/[^/]+$", "required": true, @@ -238,7 +238,7 @@ "type": "string" }, "parent": { - "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.list, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.autoscalingPolicies.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.autoscalingPolicies.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -254,7 +254,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/autoscalingPolicies/{autoscalingPoliciesId}:setIamPolicy", "httpMethod": "POST", "id": "dataproc.projects.locations.autoscalingPolicies.setIamPolicy", @@ -319,7 +319,7 @@ ], "parameters": { "name": { - "description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + "description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/autoscalingPolicies/[^/]+$", "required": true, @@ -351,7 +351,7 @@ ], "parameters": { "parent": { - "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates,create, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.workflowTemplates.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,create, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -379,7 +379,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates.delete, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.delete, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", "required": true, @@ -410,7 +410,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", "required": true, @@ -469,7 +469,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", "required": true, @@ -497,7 +497,7 @@ ], "parameters": { "parent": { - "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates,instantiateinline, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.workflowTemplates.instantiateinline, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,instantiateinline, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.instantiateinline, the resource name of the location has the following format: projects/{project_id}/locations/{location}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -541,7 +541,7 @@ "type": "string" }, "parent": { - "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates,list, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.workflowTemplates.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -557,7 +557,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}:setIamPolicy", "httpMethod": "POST", "id": "dataproc.projects.locations.workflowTemplates.setIamPolicy", @@ -622,7 +622,7 @@ ], "parameters": { "name": { - "description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", "required": true, @@ -658,7 +658,7 @@ ], "parameters": { "parent": { - "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.create, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.autoscalingPolicies.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.create, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.autoscalingPolicies.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", "location": "path", "pattern": "^projects/[^/]+/regions/[^/]+$", "required": true, @@ -686,7 +686,7 @@ ], "parameters": { "name": { - "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", "location": "path", "pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", "required": true, @@ -711,7 +711,7 @@ ], "parameters": { "name": { - "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", "location": "path", "pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", "required": true, @@ -775,7 +775,7 @@ "type": "string" }, "parent": { - "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.list, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.autoscalingPolicies.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.autoscalingPolicies.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", "location": "path", "pattern": "^projects/[^/]+/regions/[^/]+$", "required": true, @@ -791,7 +791,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}:setIamPolicy", "httpMethod": "POST", "id": "dataproc.projects.regions.autoscalingPolicies.setIamPolicy", @@ -856,7 +856,7 @@ ], "parameters": { "name": { - "description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + "description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", "location": "path", "pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", "required": true, @@ -1160,7 +1160,7 @@ "type": "string" }, "updateMask": { - "description": "Required. Specifies the path, relative to Cluster, of the field to update. For example, to change the number of workers in a cluster to 5, the update_mask parameter would be specified as config.worker_config.num_instances, and the PATCH request body would specify the new value, as follows:\n{\n \"config\":{\n \"workerConfig\":{\n \"numInstances\":\"5\"\n }\n }\n}\nSimilarly, to change the number of preemptible workers in a cluster to 5, the update_mask parameter would be config.secondary_worker_config.num_instances, and the PATCH request body would be set as follows:\n{\n \"config\":{\n \"secondaryWorkerConfig\":{\n \"numInstances\":\"5\"\n }\n }\n}\n\u003cstrong\u003eNote:\u003c/strong\u003e Currently, only the following fields can be updated:\u003ctable\u003e \u003ctbody\u003e \u003ctr\u003e \u003ctd\u003e\u003cstrong\u003eMask\u003c/strong\u003e\u003c/td\u003e \u003ctd\u003e\u003cstrong\u003ePurpose\u003c/strong\u003e\u003c/td\u003e \u003c/tr\u003e \u003ctr\u003e \u003ctd\u003e\u003cstrong\u003e\u003cem\u003elabels\u003c/em\u003e\u003c/strong\u003e\u003c/td\u003e \u003ctd\u003eUpdate labels\u003c/td\u003e \u003c/tr\u003e \u003ctr\u003e \u003ctd\u003e\u003cstrong\u003e\u003cem\u003econfig.worker_config.num_instances\u003c/em\u003e\u003c/strong\u003e\u003c/td\u003e \u003ctd\u003eResize primary worker group\u003c/td\u003e \u003c/tr\u003e \u003ctr\u003e \u003ctd\u003e\u003cstrong\u003e\u003cem\u003econfig.secondary_worker_config.num_instances\u003c/em\u003e\u003c/strong\u003e\u003c/td\u003e \u003ctd\u003eResize secondary worker group\u003c/td\u003e \u003c/tr\u003e \u003ctr\u003e \u003ctd\u003econfig.autoscaling_config.policy_uri\u003c/td\u003e\u003ctd\u003eUse, stop using, or change autoscaling policies\u003c/td\u003e \u003c/tr\u003e \u003c/tbody\u003e \u003c/table\u003e", + "description": "Required. Specifies the path, relative to Cluster, of the field to update. For example, to change the number of workers in a cluster to 5, the update_mask parameter would be specified as config.worker_config.num_instances, and the PATCH request body would specify the new value, as follows: { \"config\":{ \"workerConfig\":{ \"numInstances\":\"5\" } } } Similarly, to change the number of preemptible workers in a cluster to 5, the update_mask parameter would be config.secondary_worker_config.num_instances, and the PATCH request body would be set as follows: { \"config\":{ \"secondaryWorkerConfig\":{ \"numInstances\":\"5\" } } } *Note:* Currently, only the following fields can be updated: *Mask* *Purpose* *labels* Update labels *config.worker_config.num_instances* Resize primary worker group *config.secondary_worker_config.num_instances* Resize secondary worker group config.autoscaling_config.policy_uri Use, stop using, or change autoscaling policies ", "format": "google-fieldmask", "location": "query", "type": "string" @@ -1178,7 +1178,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}:setIamPolicy", "httpMethod": "POST", "id": "dataproc.projects.regions.clusters.setIamPolicy", @@ -1409,6 +1409,11 @@ "ACTIVE", "NON_ACTIVE" ], + "enumDescriptions": [ + "Match all jobs, regardless of state.", + "Only match jobs in non-terminal states: PENDING, RUNNING, or CANCEL_PENDING.", + "Only match jobs in terminal states: CANCELLED, DONE, or ERROR." + ], "location": "query", "type": "string" }, @@ -1474,7 +1479,7 @@ "type": "string" }, "updateMask": { - "description": "Required. Specifies the path, relative to \u003ccode\u003eJob\u003c/code\u003e, of the field to update. For example, to update the labels of a Job the \u003ccode\u003eupdate_mask\u003c/code\u003e parameter would be specified as \u003ccode\u003elabels\u003c/code\u003e, and the PATCH request body would specify the new value. \u003cstrong\u003eNote:\u003c/strong\u003e Currently, \u003ccode\u003elabels\u003c/code\u003e is the only field that can be updated.", + "description": "Required. Specifies the path, relative to Job, of the field to update. For example, to update the labels of a Job the update_mask parameter would be specified as labels, and the PATCH request body would specify the new value. *Note:* Currently, labels is the only field that can be updated.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -1492,7 +1497,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/jobs/{jobsId}:setIamPolicy", "httpMethod": "POST", "id": "dataproc.projects.regions.jobs.setIamPolicy", @@ -1764,7 +1769,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:setIamPolicy", "httpMethod": "POST", "id": "dataproc.projects.regions.operations.setIamPolicy", @@ -1833,7 +1838,7 @@ ], "parameters": { "parent": { - "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates,create, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.workflowTemplates.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,create, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", "location": "path", "pattern": "^projects/[^/]+/regions/[^/]+$", "required": true, @@ -1861,7 +1866,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates.delete, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.delete, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", "location": "path", "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", "required": true, @@ -1892,7 +1897,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", "location": "path", "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", "required": true, @@ -1951,7 +1956,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", "location": "path", "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", "required": true, @@ -1979,7 +1984,7 @@ ], "parameters": { "parent": { - "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates,instantiateinline, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.workflowTemplates.instantiateinline, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,instantiateinline, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.instantiateinline, the resource name of the location has the following format: projects/{project_id}/locations/{location}", "location": "path", "pattern": "^projects/[^/]+/regions/[^/]+$", "required": true, @@ -2023,7 +2028,7 @@ "type": "string" }, "parent": { - "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates,list, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.workflowTemplates.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", "location": "path", "pattern": "^projects/[^/]+/regions/[^/]+$", "required": true, @@ -2039,7 +2044,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}:setIamPolicy", "httpMethod": "POST", "id": "dataproc.projects.regions.workflowTemplates.setIamPolicy", @@ -2104,7 +2109,7 @@ ], "parameters": { "name": { - "description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", "location": "path", "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", "required": true, @@ -2129,7 +2134,7 @@ } } }, - "revision": "20200409", + "revision": "20200925", "rootUrl": "https://dataproc.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -2142,7 +2147,7 @@ "type": "integer" }, "acceleratorTypeUri": { - "description": "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes).Examples:\nhttps://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80\nprojects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80\nnvidia-tesla-k80Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.", + "description": "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes).Examples: https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80 projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80 nvidia-tesla-k80Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.", "type": "string" } }, @@ -2153,7 +2158,7 @@ "id": "AutoscalingConfig", "properties": { "policyUri": { - "description": "Optional. The autoscaling policy used by the cluster.Only resource names including projectid and location (region) are valid. Examples:\nhttps://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]\nprojects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Dataproc region.", + "description": "Optional. The autoscaling policy used by the cluster.Only resource names including projectid and location (region) are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Dataproc region.", "type": "string" } }, @@ -2171,7 +2176,8 @@ "type": "string" }, "name": { - "description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + "description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + "readOnly": true, "type": "string" }, "secondaryWorkerConfig": { @@ -2211,7 +2217,7 @@ "type": "string" }, "scaleDownFactor": { - "description": "Required. Fraction of average pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job.Bounds: 0.0, 1.0.", + "description": "Required. Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0.", "format": "double", "type": "number" }, @@ -2221,7 +2227,7 @@ "type": "number" }, "scaleUpFactor": { - "description": "Required. Fraction of average pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.", + "description": "Required. Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0.", "format": "double", "type": "number" }, @@ -2237,12 +2243,16 @@ "description": "Associates members with a role.", "id": "Binding", "properties": { + "bindingId": { + "description": "A client-specified ID for this binding. Expected to be globally unique to support the internal bindings-by-ID API.", + "type": "string" + }, "condition": { "$ref": "Expr", - "description": "The condition that is associated with this binding. NOTE: An unsatisfied condition will not allow user access via current binding. Different bindings, including their conditions, are examined independently." + "description": "The condition that is associated with this binding.If the condition evaluates to true, then this binding applies to the current request.If the condition evaluates to false, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the members in this binding.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies)." }, "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource. members can have the following values:\nallUsers: A special identifier that represents anyone who is on the internet; with or without a Google account.\nallAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account.\nuser:{emailid}: An email address that represents a specific Google account. For example, alice@example.com .\nserviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com.\ngroup:{emailid}: An email address that represents a Google group. For example, admins@example.com.\ndeleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding.\ndeleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding.\ndeleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.\ndomain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com.", + "description": "Specifies the identities requesting access for a Cloud Platform resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com.", "items": { "type": "string" }, @@ -2271,6 +2281,7 @@ }, "clusterUuid": { "description": "Output only. A cluster UUID (Unique Universal Identifier). Dataproc generates this value when it creates the cluster.", + "readOnly": true, "type": "string" }, "config": { @@ -2286,7 +2297,8 @@ }, "metrics": { "$ref": "ClusterMetrics", - "description": "Output only. Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release." + "description": "Output only. Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release.", + "readOnly": true }, "projectId": { "description": "Required. The Google Cloud Platform project ID that the cluster belongs to.", @@ -2294,13 +2306,15 @@ }, "status": { "$ref": "ClusterStatus", - "description": "Output only. Cluster status." + "description": "Output only. Cluster status.", + "readOnly": true }, "statusHistory": { "description": "Output only. The previous cluster status.", "items": { "$ref": "ClusterStatus" }, + "readOnly": true, "type": "array" } }, @@ -2322,12 +2336,16 @@ "$ref": "EncryptionConfig", "description": "Optional. Encryption settings for the cluster." }, + "endpointConfig": { + "$ref": "EndpointConfig", + "description": "Optional. Port/endpoint configuration for this cluster" + }, "gceClusterConfig": { "$ref": "GceClusterConfig", "description": "Optional. The shared Compute Engine config settings for all instances in a cluster." }, "initializationActions": { - "description": "Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget):\nROLE=$(curl -H Metadata-Flavor:Google\nhttp://metadata/computeMetadata/v1/instance/attributes/dataproc-role)\nif [[ \"${ROLE}\" == 'Master' ]]; then\n ... master specific actions ...\nelse\n ... worker specific actions ...\nfi\n", + "description": "Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ \"${ROLE}\" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi ", "items": { "$ref": "NodeInitializationAction" }, @@ -2353,6 +2371,10 @@ "$ref": "SoftwareConfig", "description": "Optional. The config settings for software inside the cluster." }, + "tempBucket": { + "description": "Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket.", + "type": "string" + }, "workerConfig": { "$ref": "InstanceGroupConfig", "description": "Optional. The Compute Engine config settings for worker instances in a cluster." @@ -2389,14 +2411,17 @@ "properties": { "done": { "description": "Output only. Indicates the operation is done.", + "readOnly": true, "type": "boolean" }, "error": { "description": "Output only. Error, if operation failed.", + "readOnly": true, "type": "string" }, "operationId": { "description": "Output only. The id of the cluster operation.", + "readOnly": true, "type": "string" } }, @@ -2408,14 +2433,17 @@ "properties": { "clusterName": { "description": "Output only. Name of the cluster for the operation.", + "readOnly": true, "type": "string" }, "clusterUuid": { "description": "Output only. Cluster UUID for the operation.", + "readOnly": true, "type": "string" }, "description": { "description": "Output only. Short description of operation.", + "readOnly": true, "type": "string" }, "labels": { @@ -2423,21 +2451,25 @@ "type": "string" }, "description": "Output only. Labels associated with the operation", + "readOnly": true, "type": "object" }, "operationType": { "description": "Output only. The operation type.", + "readOnly": true, "type": "string" }, "status": { "$ref": "ClusterOperationStatus", - "description": "Output only. Current operation status." + "description": "Output only. Current operation status.", + "readOnly": true }, "statusHistory": { "description": "Output only. The previous operation status.", "items": { "$ref": "ClusterOperationStatus" }, + "readOnly": true, "type": "array" }, "warnings": { @@ -2445,6 +2477,7 @@ "items": { "type": "string" }, + "readOnly": true, "type": "array" } }, @@ -2456,10 +2489,12 @@ "properties": { "details": { "description": "Output only. A message containing any operation metadata details.", + "readOnly": true, "type": "string" }, "innerState": { "description": "Output only. A message containing the detailed operation state.", + "readOnly": true, "type": "string" }, "state": { @@ -2476,11 +2511,13 @@ "The operation is running.", "The operation is done; either cancelled or completed." ], + "readOnly": true, "type": "string" }, "stateStartTime": { "description": "Output only. The time this state was entered.", "format": "google-datetime", + "readOnly": true, "type": "string" } }, @@ -2510,6 +2547,7 @@ "properties": { "detail": { "description": "Optional. Output only. Details of cluster's state.", + "readOnly": true, "type": "string" }, "state": { @@ -2530,11 +2568,13 @@ "The cluster is being deleted. It cannot be used.", "The cluster is being updated. It continues to accept and process jobs." ], + "readOnly": true, "type": "string" }, "stateStartTime": { "description": "Output only. Time when this state was entered (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).", "format": "google-datetime", + "readOnly": true, "type": "string" }, "substate": { @@ -2549,6 +2589,7 @@ "The cluster is known to be in an unhealthy state (for example, critical daemons are not running or HDFS capacity is exhausted).Applies to RUNNING state.", "The agent-reported status is out of date (may occur if Dataproc loses communication with Agent).Applies to RUNNING state." ], + "readOnly": true, "type": "string" } }, @@ -2566,6 +2607,7 @@ "properties": { "outputUri": { "description": "Output only. The Cloud Storage URI of the diagnostic output. The output report is a plain text file with a summary of collected diagnostics.", + "readOnly": true, "type": "string" } }, @@ -2593,7 +2635,7 @@ "type": "object" }, "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance:\nservice Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n}\nThe JSON representation for Empty is empty JSON object {}.", + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}.", "id": "Empty", "properties": {}, "type": "object" @@ -2609,8 +2651,27 @@ }, "type": "object" }, + "EndpointConfig": { + "description": "Endpoint config for this cluster", + "id": "EndpointConfig", + "properties": { + "enableHttpPortAccess": { + "description": "Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false.", + "type": "boolean" + }, + "httpPorts": { + "additionalProperties": { + "type": "string" + }, + "description": "Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.", + "readOnly": true, + "type": "object" + } + }, + "type": "object" + }, "Expr": { - "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec.Example (Comparison):\ntitle: \"Summary size limit\"\ndescription: \"Determines if a summary is less than 100 chars\"\nexpression: \"document.summary.size() \u003c 100\"\nExample (Equality):\ntitle: \"Requestor is owner\"\ndescription: \"Determines if requestor is the document owner\"\nexpression: \"document.owner == request.auth.claims.email\"\nExample (Logic):\ntitle: \"Public documents\"\ndescription: \"Determine whether the document should be publicly visible\"\nexpression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\"\nExample (Data Manipulation):\ntitle: \"Notification string\"\ndescription: \"Create a notification string with a timestamp.\"\nexpression: \"'New message received at ' + string(document.create_time)\"\nThe exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", + "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec.Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() \u003c 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", "id": "Expr", "properties": { "description": { @@ -2648,7 +2709,27 @@ "type": "object" }, "networkUri": { - "description": "Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples:\nhttps://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default\nprojects/[project_id]/regions/global/default\ndefault", + "description": "Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default projects/[project_id]/regions/global/default default", + "type": "string" + }, + "nodeGroupAffinity": { + "$ref": "NodeGroupAffinity", + "description": "Optional. Node Group Affinity for sole-tenant clusters." + }, + "privateIpv6GoogleAccess": { + "description": "Optional. The type of IPv6 access for a cluster.", + "enum": [ + "PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED", + "INHERIT_FROM_SUBNETWORK", + "OUTBOUND", + "BIDIRECTIONAL" + ], + "enumDescriptions": [ + "If unspecified, Compute Engine default behavior will apply, which is the same as INHERIT_FROM_SUBNETWORK.", + "Private access to and from Google Services configuration inherited from the subnetwork configuration. This is the default Compute Engine behavior.", + "Enables outbound private IPv6 access to Google Services from the Dataproc cluster.", + "Enables bidirectional private IPv6 access between Google Services and the Dataproc cluster." + ], "type": "string" }, "reservationAffinity": { @@ -2656,18 +2737,18 @@ "description": "Optional. Reservation Affinity for consuming Zonal reservation." }, "serviceAccount": { - "description": "Optional. The Dataproc service account (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_cloud_dataproc) (also see VM Data Plane identity (https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services.If not specified, the Compute Engine default service account (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.", + "description": "Optional. The Dataproc service account (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see VM Data Plane identity (https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services.If not specified, the Compute Engine default service account (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.", "type": "string" }, "serviceAccountScopes": { - "description": "Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included:\nhttps://www.googleapis.com/auth/cloud.useraccounts.readonly\nhttps://www.googleapis.com/auth/devstorage.read_write\nhttps://www.googleapis.com/auth/logging.writeIf no scopes are specified, the following defaults are also provided:\nhttps://www.googleapis.com/auth/bigquery\nhttps://www.googleapis.com/auth/bigtable.admin.table\nhttps://www.googleapis.com/auth/bigtable.data\nhttps://www.googleapis.com/auth/devstorage.full_control", + "description": "Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: https://www.googleapis.com/auth/cloud.useraccounts.readonly https://www.googleapis.com/auth/devstorage.read_write https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the following defaults are also provided: https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/bigtable.admin.table https://www.googleapis.com/auth/bigtable.data https://www.googleapis.com/auth/devstorage.full_control", "items": { "type": "string" }, "type": "array" }, "subnetworkUri": { - "description": "Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri.A full URL, partial URI, or short name are valid. Examples:\nhttps://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0\nprojects/[project_id]/regions/us-east1/subnetworks/sub0\nsub0", + "description": "Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0 projects/[project_id]/regions/us-east1/subnetworks/sub0 sub0", "type": "string" }, "tags": { @@ -2678,7 +2759,7 @@ "type": "array" }, "zoneUri": { - "description": "Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the \"global\" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present.A full URL, partial URI, or short name are valid. Examples:\nhttps://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]\nprojects/[project_id]/zones/[zone]\nus-central1-f", + "description": "Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the \"global\" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] projects/[project_id]/zones/[zone] us-central1-f", "type": "string" } }, @@ -2690,7 +2771,7 @@ "properties": { "options": { "$ref": "GetPolicyOptions", - "description": "OPTIONAL: A GetPolicyOptions object for specifying options to GetIamPolicy. This field is only used by Cloud IAM." + "description": "OPTIONAL: A GetPolicyOptions object for specifying options to GetIamPolicy." } }, "type": "object" @@ -2700,7 +2781,7 @@ "id": "GetPolicyOptions", "properties": { "requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.", + "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } @@ -2748,7 +2829,7 @@ "type": "string" }, "mainJarFileUri": { - "description": "The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'", + "description": "The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'", "type": "string" }, "properties": { @@ -2839,7 +2920,7 @@ "description": "Optional. Disk option config settings." }, "imageUri": { - "description": "Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples:\nhttps://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]\nprojects/[project_id]/global/images/[image-id]\nimage-idImage family examples. Dataproc will use the most recent image from the family:\nhttps://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]\nprojects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.", + "description": "Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.", "type": "string" }, "instanceNames": { @@ -2847,22 +2928,33 @@ "items": { "type": "string" }, + "readOnly": true, + "type": "array" + }, + "instanceReferences": { + "description": "Output only. List of references to Compute Engine instances.", + "items": { + "$ref": "InstanceReference" + }, + "readOnly": true, "type": "array" }, "isPreemptible": { "description": "Output only. Specifies that this instance group contains preemptible instances.", + "readOnly": true, "type": "boolean" }, "machineTypeUri": { - "description": "Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples:\nhttps://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2\nprojects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2\nn1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2.", + "description": "Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2.", "type": "string" }, "managedGroupConfig": { "$ref": "ManagedGroupConfig", - "description": "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups." + "description": "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", + "readOnly": true }, "minCpuPlatform": { - "description": "Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -\u0026gt; Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", + "description": "Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -\u003e Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", "type": "string" }, "numInstances": { @@ -2887,6 +2979,21 @@ }, "type": "object" }, + "InstanceReference": { + "description": "A reference to a Compute Engine instance.", + "id": "InstanceReference", + "properties": { + "instanceId": { + "description": "The unique identifier of the Compute Engine instance.", + "type": "string" + }, + "instanceName": { + "description": "The user-friendly name of the Compute Engine instance.", + "type": "string" + } + }, + "type": "object" + }, "InstantiateWorkflowTemplateRequest": { "description": "A request to instantiate a workflow template.", "id": "InstantiateWorkflowTemplateRequest", @@ -2895,7 +3002,7 @@ "additionalProperties": { "type": "string" }, - "description": "Optional. Map from parameter names to values that should be used for those parameters. Values may not exceed 100 characters.", + "description": "Optional. Map from parameter names to values that should be used for those parameters. Values may not exceed 1000 characters.", "type": "object" }, "requestId": { @@ -2916,14 +3023,17 @@ "properties": { "done": { "description": "Output only. Indicates whether the job is completed. If the value is false, the job is still in progress. If true, the job is completed, and status.state field will indicate if it was successful, failed, or cancelled.", + "readOnly": true, "type": "boolean" }, "driverControlFilesUri": { "description": "Output only. If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.", + "readOnly": true, "type": "string" }, "driverOutputResourceUri": { "description": "Output only. A URI pointing to the location of the stdout of the job's driver program.", + "readOnly": true, "type": "string" }, "hadoopJob": { @@ -2936,6 +3046,7 @@ }, "jobUuid": { "description": "Output only. A UUID that uniquely identifies a job within the project over time. This is in contrast to a user-settable reference.job_id that may be reused over time.", + "readOnly": true, "type": "string" }, "labels": { @@ -2963,7 +3074,7 @@ }, "reference": { "$ref": "JobReference", - "description": "Optional. The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a \u003ccode\u003ejob_id\u003c/code\u003e." + "description": "Optional. The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id." }, "scheduling": { "$ref": "JobScheduling", @@ -2983,13 +3094,15 @@ }, "status": { "$ref": "JobStatus", - "description": "Output only. The job status. Additional application-specific status information may be contained in the \u003ccode\u003etype_job\u003c/code\u003e and \u003ccode\u003eyarn_applications\u003c/code\u003e fields." + "description": "Output only. The job status. Additional application-specific status information may be contained in the type_job and yarn_applications fields.", + "readOnly": true }, "statusHistory": { "description": "Output only. The previous job status.", "items": { "$ref": "JobStatus" }, + "readOnly": true, "type": "array" }, "yarnApplications": { @@ -2997,6 +3110,7 @@ "items": { "$ref": "YarnApplication" }, + "readOnly": true, "type": "array" } }, @@ -3008,20 +3122,24 @@ "properties": { "jobId": { "description": "Output only. The job id.", + "readOnly": true, "type": "string" }, "operationType": { "description": "Output only. Operation type.", + "readOnly": true, "type": "string" }, "startTime": { "description": "Output only. Job submission time.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "status": { "$ref": "JobStatus", - "description": "Output only. Most recent job status." + "description": "Output only. Most recent job status.", + "readOnly": true } }, "type": "object" @@ -3036,6 +3154,7 @@ }, "clusterUuid": { "description": "Output only. A cluster UUID generated by the Dataproc service when the job is submitted.", + "readOnly": true, "type": "string" } }, @@ -3050,7 +3169,7 @@ "type": "string" }, "projectId": { - "description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", + "description": "Optional. The ID of the Google Cloud Platform project that the job belongs to. If specified, must match the request project ID.", "type": "string" } }, @@ -3061,7 +3180,7 @@ "id": "JobScheduling", "properties": { "maxFailuresPerHour": { - "description": "Optional. Maximum number of times per hour a driver may be restarted as a result of driver terminating with non-zero code before job is reported failed.A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window.Maximum value is 10.", + "description": "Optional. Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed.A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window.Maximum value is 10.", "format": "int32", "type": "integer" } @@ -3073,7 +3192,8 @@ "id": "JobStatus", "properties": { "details": { - "description": "Optional. Output only. Job state details, such as an error description if the state is \u003ccode\u003eERROR\u003c/code\u003e.", + "description": "Optional. Output only. Job state details, such as an error description if the state is ERROR.", + "readOnly": true, "type": "string" }, "state": { @@ -3102,11 +3222,13 @@ "The job has completed, but encountered an error.", "Job attempt has failed. The detail field contains failure details for this attempt.Applies to restartable jobs only." ], + "readOnly": true, "type": "string" }, "stateStartTime": { "description": "Output only. The time when this state was entered.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "substate": { @@ -3123,6 +3245,7 @@ "The Job has been received and is awaiting execution (it may be waiting for a condition to be met). See the \"details\" field for the reason for the delay.Applies to RUNNING state.", "The agent-reported status is out of date, which may be caused by a loss of communication between the agent and Dataproc. If the agent does not send a timely update, the job will fail.Applies to RUNNING state." ], + "readOnly": true, "type": "string" } }, @@ -3218,6 +3341,7 @@ "idleStartTime": { "description": "Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).", "format": "google-datetime", + "readOnly": true, "type": "string" } }, @@ -3229,6 +3353,7 @@ "properties": { "nextPageToken": { "description": "Output only. This token is included in the response if there are more results to fetch.", + "readOnly": true, "type": "string" }, "policies": { @@ -3236,6 +3361,7 @@ "items": { "$ref": "AutoscalingPolicy" }, + "readOnly": true, "type": "array" } }, @@ -3250,10 +3376,12 @@ "items": { "$ref": "Cluster" }, + "readOnly": true, "type": "array" }, "nextPageToken": { "description": "Output only. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListClustersRequest.", + "readOnly": true, "type": "string" } }, @@ -3268,10 +3396,11 @@ "items": { "$ref": "Job" }, + "readOnly": true, "type": "array" }, "nextPageToken": { - "description": "Optional. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent \u003ccode\u003eListJobsRequest\u003c/code\u003e.", + "description": "Optional. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListJobsRequest.", "type": "string" } }, @@ -3300,7 +3429,8 @@ "id": "ListWorkflowTemplatesResponse", "properties": { "nextPageToken": { - "description": "Output only. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent \u003ccode\u003eListWorkflowTemplatesRequest\u003c/code\u003e.", + "description": "Output only. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListWorkflowTemplatesRequest.", + "readOnly": true, "type": "string" }, "templates": { @@ -3308,6 +3438,7 @@ "items": { "$ref": "WorkflowTemplate" }, + "readOnly": true, "type": "array" } }, @@ -3330,9 +3461,20 @@ "FATAL", "OFF" ], + "enumDescriptions": [ + "Level is unspecified. Use default level for log4j.", + "Use ALL level for log4j.", + "Use TRACE level for log4j.", + "Use DEBUG level for log4j.", + "Use INFO level for log4j.", + "Use WARN level for log4j.", + "Use ERROR level for log4j.", + "Use FATAL level for log4j.", + "Turn off log4j." + ], "type": "string" }, - "description": "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + "description": "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", "type": "object" } }, @@ -3366,10 +3508,23 @@ "properties": { "instanceGroupManagerName": { "description": "Output only. The name of the Instance Group Manager for this group.", + "readOnly": true, "type": "string" }, "instanceTemplateName": { "description": "Output only. The name of the Instance Template used for the Managed Instance Group.", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "NodeGroupAffinity": { + "description": "Node Group Affinity for clusters using sole-tenant node groups.", + "id": "NodeGroupAffinity", + "properties": { + "nodeGroupUri": { + "description": "Required. The name of a single node group (https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) a cluster will be created on.", "type": "string" } }, @@ -3547,7 +3702,7 @@ "type": "object" }, "Policy": { - "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources.A Policy is a collection of bindings. A binding binds one or more members to a single role. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A role is a named list of permissions; each role can be an IAM predefined role or a user-created custom role.Optionally, a binding can specify a condition, which is a logical expression that allows access to a resource only if the expression evaluates to true. A condition can add constraints based on attributes of the request, the resource, or both.JSON example:\n{\n \"bindings\": [\n {\n \"role\": \"roles/resourcemanager.organizationAdmin\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-project-id@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles/resourcemanager.organizationViewer\",\n \"members\": [\"user:eve@example.com\"],\n \"condition\": {\n \"title\": \"expirable access\",\n \"description\": \"Does not grant access after Sep 2020\",\n \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\",\n }\n }\n ],\n \"etag\": \"BwWWja0YfJA=\",\n \"version\": 3\n}\nYAML example:\nbindings:\n- members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-project-id@appspot.gserviceaccount.com\n role: roles/resourcemanager.organizationAdmin\n- members:\n - user:eve@example.com\n role: roles/resourcemanager.organizationViewer\n condition:\n title: expirable access\n description: Does not grant access after Sep 2020\n expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\n- etag: BwWWja0YfJA=\n- version: 3\nFor a description of IAM and its features, see the IAM documentation (https://cloud.google.com/iam/docs/).", + "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources.A Policy is a collection of bindings. A binding binds one or more members to a single role. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A role is a named list of permissions; each role can be an IAM predefined role or a user-created custom role.For some types of Google Cloud resources, a binding can also specify a condition, which is a logical expression that allows access to a resource only if the expression evaluates to true. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).JSON example: { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } YAML example: bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 For a description of IAM and its features, see the IAM documentation (https://cloud.google.com/iam/docs/).", "id": "Policy", "properties": { "bindings": { @@ -3563,7 +3718,7 @@ "type": "string" }, "version": { - "description": "Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations:\nGetting a policy that includes a conditional role binding\nAdding a conditional role binding to a policy\nChanging a conditional role binding in a policy\nRemoving any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.", + "description": "Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } @@ -3616,7 +3771,7 @@ "id": "PySparkJob", "properties": { "archiveUris": { - "description": "Optional. HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.", + "description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", "items": { "type": "string" }, @@ -3630,7 +3785,7 @@ "type": "array" }, "fileUris": { - "description": "Optional. HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks.", + "description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", "items": { "type": "string" }, @@ -3673,7 +3828,7 @@ "id": "QueryList", "properties": { "queries": { - "description": "Required. The queries to execute. You do not need to terminate a query with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of an Cloud Dataproc API snippet that uses a QueryList to specify a HiveJob:\n\"hiveJob\": {\n \"queryList\": {\n \"queries\": [\n \"query1\",\n \"query2\",\n \"query3;query4\",\n ]\n }\n}\n", + "description": "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } } ", "items": { "type": "string" }, @@ -3757,30 +3912,38 @@ "id": "SoftwareConfig", "properties": { "imageVersion": { - "description": "Optional. The version of software inside the cluster. It must be one of the supported Dataproc Versions (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions), such as \"1.2\" (including a subminor version, such as \"1.2.29\"), or the \"preview\" version (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.", + "description": "Optional. The version of software inside the cluster. It must be one of the supported Dataproc Versions (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as \"1.2\" (including a subminor version, such as \"1.2.29\"), or the \"preview\" version (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.", "type": "string" }, "optionalComponents": { "description": "Optional. The set of components to activate on the cluster.", - "enumDescriptions": [ - "Unspecified component.", - "The Anaconda python distribution.", - "The Hive Web HCatalog (the REST service for accessing HCatalog).", - "The Jupyter Notebook.", - "The Presto query engine.", - "The Zeppelin notebook.", - "The Zookeeper service." - ], "items": { "enum": [ "COMPONENT_UNSPECIFIED", "ANACONDA", + "DOCKER", + "FLINK", "HIVE_WEBHCAT", "JUPYTER", "PRESTO", + "RANGER", + "SOLR", "ZEPPELIN", "ZOOKEEPER" ], + "enumDescriptions": [ + "Unspecified component. Specifying this will cause Cluster creation to fail.", + "The Anaconda python distribution.", + "Docker", + "Flink", + "The Hive Web HCatalog (the REST service for accessing HCatalog).", + "The Jupyter Notebook.", + "The Presto query engine.", + "The Ranger service.", + "The Solr service.", + "The Zeppelin notebook.", + "The Zookeeper service." + ], "type": "string" }, "type": "array" @@ -3789,7 +3952,7 @@ "additionalProperties": { "type": "string" }, - "description": "Optional. The properties to set on daemon config files.Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings:\ncapacity-scheduler: capacity-scheduler.xml\ncore: core-site.xml\ndistcp: distcp-default.xml\nhdfs: hdfs-site.xml\nhive: hive-site.xml\nmapred: mapred-site.xml\npig: pig.properties\nspark: spark-defaults.conf\nyarn: yarn-site.xmlFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).", + "description": "Optional. The properties to set on daemon config files.Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml mapred: mapred-site.xml pig: pig.properties spark: spark-defaults.conf yarn: yarn-site.xmlFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).", "type": "object" } }, @@ -3800,7 +3963,7 @@ "id": "SparkJob", "properties": { "archiveUris": { - "description": "Optional. HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + "description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", "items": { "type": "string" }, @@ -3814,7 +3977,7 @@ "type": "array" }, "fileUris": { - "description": "Optional. HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.", + "description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", "items": { "type": "string" }, @@ -3854,7 +4017,7 @@ "id": "SparkRJob", "properties": { "archiveUris": { - "description": "Optional. HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + "description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", "items": { "type": "string" }, @@ -3868,7 +4031,7 @@ "type": "array" }, "fileUris": { - "description": "Optional. HCFS URIs of files to be copied to the working directory of R drivers and distributed tasks. Useful for naively parallel tasks.", + "description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", "items": { "type": "string" }, @@ -3983,7 +4146,7 @@ "type": "string" }, "fields": { - "description": "Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths.A field path is similar in syntax to a google.protobuf.FieldMask. For example, a field path that references the zone field of a workflow template's cluster selector would be specified as placement.clusterSelector.zone.Also, field paths can reference fields using the following syntax:\nValues in maps can be referenced by key:\nlabels'key'\nplacement.clusterSelector.clusterLabels'key'\nplacement.managedCluster.labels'key'\nplacement.clusterSelector.clusterLabels'key'\njobs'step-id'.labels'key'\nJobs in the jobs list can be referenced by step-id:\njobs'step-id'.hadoopJob.mainJarFileUri\njobs'step-id'.hiveJob.queryFileUri\njobs'step-id'.pySparkJob.mainPythonFileUri\njobs'step-id'.hadoopJob.jarFileUris0\njobs'step-id'.hadoopJob.archiveUris0\njobs'step-id'.hadoopJob.fileUris0\njobs'step-id'.pySparkJob.pythonFileUris0\nItems in repeated fields can be referenced by a zero-based index:\njobs'step-id'.sparkJob.args0\nOther examples:\njobs'step-id'.hadoopJob.properties'key'\njobs'step-id'.hadoopJob.args0\njobs'step-id'.hiveJob.scriptVariables'key'\njobs'step-id'.hadoopJob.mainJarFileUri\nplacement.clusterSelector.zoneIt may not be possible to parameterize maps and repeated fields in their entirety since only individual map values and individual items in repeated fields can be referenced. For example, the following field paths are invalid:\nplacement.clusterSelector.clusterLabels\njobs'step-id'.sparkJob.args", + "description": "Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths.A field path is similar in syntax to a google.protobuf.FieldMask. For example, a field path that references the zone field of a workflow template's cluster selector would be specified as placement.clusterSelector.zone.Also, field paths can reference fields using the following syntax: Values in maps can be referenced by key: labels'key' placement.clusterSelector.clusterLabels'key' placement.managedCluster.labels'key' placement.clusterSelector.clusterLabels'key' jobs'step-id'.labels'key' Jobs in the jobs list can be referenced by step-id: jobs'step-id'.hadoopJob.mainJarFileUri jobs'step-id'.hiveJob.queryFileUri jobs'step-id'.pySparkJob.mainPythonFileUri jobs'step-id'.hadoopJob.jarFileUris0 jobs'step-id'.hadoopJob.archiveUris0 jobs'step-id'.hadoopJob.fileUris0 jobs'step-id'.pySparkJob.pythonFileUris0 Items in repeated fields can be referenced by a zero-based index: jobs'step-id'.sparkJob.args0 Other examples: jobs'step-id'.hadoopJob.properties'key' jobs'step-id'.hadoopJob.args0 jobs'step-id'.hiveJob.scriptVariables'key' jobs'step-id'.hadoopJob.mainJarFileUri placement.clusterSelector.zoneIt may not be possible to parameterize maps and repeated fields in their entirety since only individual map values and individual items in repeated fields can be referenced. For example, the following field paths are invalid: placement.clusterSelector.clusterLabels jobs'step-id'.sparkJob.args", "items": { "type": "string" }, @@ -4051,6 +4214,7 @@ "items": { "$ref": "WorkflowNode" }, + "readOnly": true, "type": "array" } }, @@ -4062,28 +4226,34 @@ "properties": { "clusterName": { "description": "Output only. The name of the target cluster.", + "readOnly": true, "type": "string" }, "clusterUuid": { "description": "Output only. The UUID of target cluster.", + "readOnly": true, "type": "string" }, "createCluster": { "$ref": "ClusterOperation", - "description": "Output only. The create cluster operation metadata." + "description": "Output only. The create cluster operation metadata.", + "readOnly": true }, "deleteCluster": { "$ref": "ClusterOperation", - "description": "Output only. The delete cluster operation metadata." + "description": "Output only. The delete cluster operation metadata.", + "readOnly": true }, "endTime": { "description": "Output only. Workflow end time.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "graph": { "$ref": "WorkflowGraph", - "description": "Output only. The workflow graph." + "description": "Output only. The workflow graph.", + "readOnly": true }, "parameters": { "additionalProperties": { @@ -4095,6 +4265,7 @@ "startTime": { "description": "Output only. Workflow start time.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "state": { @@ -4111,15 +4282,18 @@ "The operation is running.", "The operation is done; either cancelled or completed." ], + "readOnly": true, "type": "string" }, "template": { - "description": "Output only. The resource name of the workflow template as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "description": "Output only. The resource name of the workflow template as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "readOnly": true, "type": "string" }, "version": { "description": "Output only. The version of template at the time of workflow instantiation.", "format": "int32", + "readOnly": true, "type": "integer" } }, @@ -4131,10 +4305,12 @@ "properties": { "error": { "description": "Output only. The error detail.", + "readOnly": true, "type": "string" }, "jobId": { "description": "Output only. The job id; populated after the node enters RUNNING state.", + "readOnly": true, "type": "string" }, "prerequisiteStepIds": { @@ -4142,6 +4318,7 @@ "items": { "type": "string" }, + "readOnly": true, "type": "array" }, "state": { @@ -4162,10 +4339,12 @@ "The node completed successfully.", "The node failed. A node can be marked FAILED because its ancestor or peer failed." ], + "readOnly": true, "type": "string" }, "stepId": { "description": "Output only. The name of the node.", + "readOnly": true, "type": "string" } }, @@ -4178,6 +4357,7 @@ "createTime": { "description": "Output only. The time template was created.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "id": { @@ -4198,11 +4378,12 @@ "type": "object" }, "name": { - "description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "readOnly": true, "type": "string" }, "parameters": { - "description": "Optional. emplate parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.", + "description": "Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.", "items": { "$ref": "TemplateParameter" }, @@ -4215,6 +4396,7 @@ "updateTime": { "description": "Output only. The time template was last updated.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "version": { @@ -4241,7 +4423,7 @@ "type": "object" }, "YarnApplication": { - "description": "A YARN application created by a job. Application information is a subset of \u003ccode\u003eorg.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto\u003c/code\u003e.Beta Feature: This report is available for testing purposes only. It may be changed before final release.", + "description": "A YARN application created by a job. Application information is a subset of org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto.Beta Feature: This report is available for testing purposes only. It may be changed before final release.", "id": "YarnApplication", "properties": { "name": { diff --git a/vendor/google.golang.org/api/dataproc/v1/dataproc-gen.go b/vendor/google.golang.org/api/dataproc/v1/dataproc-gen.go index f980e5a04ce..3fca8d6f1af 100644 --- a/vendor/google.golang.org/api/dataproc/v1/dataproc-gen.go +++ b/vendor/google.golang.org/api/dataproc/v1/dataproc-gen.go @@ -77,6 +77,7 @@ const apiId = "dataproc:v1" const apiName = "dataproc" const apiVersion = "v1" const basePath = "https://dataproc.googleapis.com/" +const mtlsBasePath = "https://dataproc.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -92,6 +93,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -265,16 +267,8 @@ type AcceleratorConfig struct { // Engine AcceleratorTypes // (https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes // ).Examples: - // https://www.googleapis.com/compute/beta/projects/[project_ - // id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80 - // projects/[proje - // ct_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80 - // nvidia-tesla - // -k80Auto Zone Exception: If you are using the Dataproc Auto Zone - // Placement - // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/ - // auto-zone#using_auto_zone_placement) feature, you must use the short - // name of the accelerator type resource, for example, nvidia-tesla-k80. + // https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80 projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80 nvidia-tesla-k80Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, + // nvidia-tesla-k80. AcceleratorTypeUri string `json:"acceleratorTypeUri,omitempty"` // ForceSendFields is a list of field names (e.g. "AcceleratorCount") to @@ -307,11 +301,7 @@ type AutoscalingConfig struct { // PolicyUri: Optional. The autoscaling policy used by the cluster.Only // resource names including projectid and location (region) are valid. // Examples: - // https://www.googleapis.com/compute/v1/projects/[project_id]/ - // locations/[dataproc_region]/autoscalingPolicies/[policy_id] - // projects/[ - // project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id - // ]Note that the policy must be in the same project and Dataproc + // https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Dataproc // region. PolicyUri string `json:"policyUri,omitempty"` @@ -350,12 +340,11 @@ type AutoscalingPolicy struct { Id string `json:"id,omitempty"` // Name: Output only. The "resource name" of the autoscaling policy, as - // described in https://cloud.google.com/apis/design/resource_names. - // For projects.regions.autoscalingPolicies, the resource name of the - // policy has the following format: + // described in https://cloud.google.com/apis/design/resource_names. For + // projects.regions.autoscalingPolicies, the resource name of the policy + // has the following format: // projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} - // - // For projects.locations.autoscalingPolicies, the resource name of the + // For projects.locations.autoscalingPolicies, the resource name of the // policy has the following format: // projects/{project_id}/locations/{location}/autoscalingPolicies/{policy // _id} @@ -441,12 +430,15 @@ type BasicYarnAutoscalingConfig struct { // 0s, 1d. GracefulDecommissionTimeout string `json:"gracefulDecommissionTimeout,omitempty"` - // ScaleDownFactor: Required. Fraction of average pending memory in the - // last cooldown period for which to remove workers. A scale-down factor - // of 1 will result in scaling down so that there is no available memory - // remaining after the update (more aggressive scaling). A scale-down - // factor of 0 disables removing workers, which can be beneficial for - // autoscaling a single job.Bounds: 0.0, 1.0. + // ScaleDownFactor: Required. Fraction of average YARN pending memory in + // the last cooldown period for which to remove workers. A scale-down + // factor of 1 will result in scaling down so that there is no available + // memory remaining after the update (more aggressive scaling). A + // scale-down factor of 0 disables removing workers, which can be + // beneficial for autoscaling a single job. See How autoscaling works + // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/ + // autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, + // 1.0. ScaleDownFactor float64 `json:"scaleDownFactor,omitempty"` // ScaleDownMinWorkerFraction: Optional. Minimum scale-down threshold as @@ -457,12 +449,15 @@ type BasicYarnAutoscalingConfig struct { // recommended change.Bounds: 0.0, 1.0. Default: 0.0. ScaleDownMinWorkerFraction float64 `json:"scaleDownMinWorkerFraction,omitempty"` - // ScaleUpFactor: Required. Fraction of average pending memory in the - // last cooldown period for which to add workers. A scale-up factor of - // 1.0 will result in scaling up so that there is no pending memory + // ScaleUpFactor: Required. Fraction of average YARN pending memory in + // the last cooldown period for which to add workers. A scale-up factor + // of 1.0 will result in scaling up so that there is no pending memory // remaining after the update (more aggressive scaling). A scale-up // factor closer to 0 will result in a smaller magnitude of scaling up - // (less aggressive scaling).Bounds: 0.0, 1.0. + // (less aggressive scaling). See How autoscaling works + // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/ + // autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, + // 1.0. ScaleUpFactor float64 `json:"scaleUpFactor,omitempty"` // ScaleUpMinWorkerFraction: Optional. Minimum scale-up threshold as a @@ -520,55 +515,58 @@ func (s *BasicYarnAutoscalingConfig) UnmarshalJSON(data []byte) error { // Binding: Associates members with a role. type Binding struct { - // Condition: The condition that is associated with this binding. NOTE: - // An unsatisfied condition will not allow user access via current - // binding. Different bindings, including their conditions, are examined - // independently. + // BindingId: A client-specified ID for this binding. Expected to be + // globally unique to support the internal bindings-by-ID API. + BindingId string `json:"bindingId,omitempty"` + + // Condition: The condition that is associated with this binding.If the + // condition evaluates to true, then this binding applies to the current + // request.If the condition evaluates to false, then this binding does + // not apply to the current request. However, a different role binding + // might grant the same role to one or more of the members in this + // binding.To learn which resources support conditions in their IAM + // policies, see the IAM documentation + // (https://cloud.google.com/iam/help/conditions/resource-policies). Condition *Expr `json:"condition,omitempty"` // Members: Specifies the identities requesting access for a Cloud - // Platform resource. members can have the following values: - // allUsers: A special identifier that represents anyone who is on the - // internet; with or without a Google account. - // allAuthenticatedUsers: A special identifier that represents anyone - // who is authenticated with a Google account or a service - // account. - // user:{emailid}: An email address that represents a specific Google - // account. For example, alice@example.com . - // serviceAccount:{emailid}: An email address that represents a service - // account. For example, - // my-other-app@appspot.gserviceaccount.com. - // group:{emailid}: An email address that represents a Google group. - // For example, - // admins@example.com. - // deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique - // identifier) representing a user that has been recently deleted. For - // example, alice@example.com?uid=123456789012345678901. If the user is + // Platform resource. members can have the following values: allUsers: A + // special identifier that represents anyone who is on the internet; + // with or without a Google account. allAuthenticatedUsers: A special + // identifier that represents anyone who is authenticated with a Google + // account or a service account. user:{emailid}: An email address that + // represents a specific Google account. For example, alice@example.com + // . serviceAccount:{emailid}: An email address that represents a + // service account. For example, + // my-other-app@appspot.gserviceaccount.com. group:{emailid}: An email + // address that represents a Google group. For example, + // admins@example.com. deleted:user:{emailid}?uid={uniqueid}: An email + // address (plus unique identifier) representing a user that has been + // recently deleted. For example, + // alice@example.com?uid=123456789012345678901. If the user is // recovered, this value reverts to user:{emailid} and the recovered - // user retains the role in the - // binding. + // user retains the role in the binding. // deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address - // (plus unique identifier) representing a service account that has - // been recently deleted. For example, + // (plus unique identifier) representing a service account that has been + // recently deleted. For example, // my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. // If the service account is undeleted, this value reverts to // serviceAccount:{emailid} and the undeleted service account retains - // the role in the binding. - // deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique - // identifier) representing a Google group that has been recently - // deleted. For example, admins@example.com?uid=123456789012345678901. - // If the group is recovered, this value reverts to group:{emailid} and - // the recovered group retains the role in the - // binding. - // domain:{domain}: The G Suite domain (primary) that represents all the - // users of that domain. For example, google.com or example.com. + // the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An + // email address (plus unique identifier) representing a Google group + // that has been recently deleted. For example, + // admins@example.com?uid=123456789012345678901. If the group is + // recovered, this value reverts to group:{emailid} and the recovered + // group retains the role in the binding. domain:{domain}: The G Suite + // domain (primary) that represents all the users of that domain. For + // example, google.com or example.com. Members []string `json:"members,omitempty"` // Role: Role that is assigned to members. For example, roles/viewer, // roles/editor, or roles/owner. Role string `json:"role,omitempty"` - // ForceSendFields is a list of field names (e.g. "Condition") to + // ForceSendFields is a list of field names (e.g. "BindingId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -576,7 +574,7 @@ type Binding struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Condition") to include in + // NullFields is a list of field names (e.g. "BindingId") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -682,6 +680,10 @@ type ClusterConfig struct { // EncryptionConfig: Optional. Encryption settings for the cluster. EncryptionConfig *EncryptionConfig `json:"encryptionConfig,omitempty"` + // EndpointConfig: Optional. Port/endpoint configuration for this + // cluster + EndpointConfig *EndpointConfig `json:"endpointConfig,omitempty"` + // GceClusterConfig: Optional. The shared Compute Engine config settings // for all instances in a cluster. GceClusterConfig *GceClusterConfig `json:"gceClusterConfig,omitempty"` @@ -690,17 +692,10 @@ type ClusterConfig struct { // after config is completed. By default, executables are run on master // and all worker nodes. You can test a node's role metadata to run an // executable on a master or worker node, as shown below using curl (you - // can also use wget): - // ROLE=$(curl -H - // Metadata-Flavor:Google - // http://metadata/computeMetadata/v1/instance/att - // ributes/dataproc-role) - // if [[ "${ROLE}" == 'Master' ]]; then - // ... master specific actions ... - // else - // ... worker specific actions ... - // fi - // + // can also use wget): ROLE=$(curl -H Metadata-Flavor:Google + // http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + // if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... + // else ... worker specific actions ... fi InitializationActions []*NodeInitializationAction `json:"initializationActions,omitempty"` // LifecycleConfig: Optional. Lifecycle setting for the cluster. @@ -721,6 +716,16 @@ type ClusterConfig struct { // cluster. SoftwareConfig *SoftwareConfig `json:"softwareConfig,omitempty"` + // TempBucket: Optional. A Cloud Storage bucket used to store ephemeral + // cluster and jobs data, such as Spark and MapReduce history files. If + // you do not specify a temp bucket, Dataproc will determine a Cloud + // Storage location (US, ASIA, or EU) for your cluster's temp bucket + // according to the Compute Engine zone where your cluster is deployed, + // and then create and manage this project-level, per-location bucket. + // The default bucket has a TTL of 90 days, but you can use any TTL (or + // none) if you specify a bucket. + TempBucket string `json:"tempBucket,omitempty"` + // WorkerConfig: Optional. The Compute Engine config settings for worker // instances in a cluster. WorkerConfig *InstanceGroupConfig `json:"workerConfig,omitempty"` @@ -1090,12 +1095,9 @@ func (s *DiskConfig) MarshalJSON() ([]byte, error) { // Empty: A generic empty message that you can re-use to avoid defining // duplicated empty messages in your APIs. A typical example is to use // it as the request or the response type of an API method. For -// instance: -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// The JSON representation for Empty is empty JSON object {}. +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } The JSON representation for Empty is empty +// JSON object {}. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -1132,34 +1134,60 @@ func (s *EncryptionConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// EndpointConfig: Endpoint config for this cluster +type EndpointConfig struct { + // EnableHttpPortAccess: Optional. If true, enable http access to + // specific ports on the cluster from external sources. Defaults to + // false. + EnableHttpPortAccess bool `json:"enableHttpPortAccess,omitempty"` + + // HttpPorts: Output only. The map of port descriptions to URLs. Will + // only be populated if enable_http_port_access is true. + HttpPorts map[string]string `json:"httpPorts,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "EnableHttpPortAccess") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EnableHttpPortAccess") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *EndpointConfig) MarshalJSON() ([]byte, error) { + type NoMethod EndpointConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Expr: Represents a textual expression in the Common Expression // Language (CEL) syntax. CEL is a C-like expression language. The // syntax and semantics of CEL are documented at -// https://github.com/google/cel-spec.Example (Comparison): -// title: "Summary size limit" -// description: "Determines if a summary is less than 100 -// chars" -// expression: "document.summary.size() < 100" -// Example (Equality): -// title: "Requestor is owner" -// description: "Determines if requestor is the document -// owner" -// expression: "document.owner == request.auth.claims.email" -// Example (Logic): -// title: "Public documents" +// https://github.com/google/cel-spec.Example (Comparison): title: +// "Summary size limit" description: "Determines if a summary is less +// than 100 chars" expression: "document.summary.size() < 100" Example +// (Equality): title: "Requestor is owner" description: "Determines if +// requestor is the document owner" expression: "document.owner == +// request.auth.claims.email" Example (Logic): title: "Public documents" // description: "Determine whether the document should be publicly -// visible" -// expression: "document.type != 'private' && document.type != -// 'internal'" -// Example (Data Manipulation): -// title: "Notification string" -// description: "Create a notification string with a -// timestamp." +// visible" expression: "document.type != 'private' && document.type != +// 'internal'" Example (Data Manipulation): title: "Notification string" +// description: "Create a notification string with a timestamp." // expression: "'New message received at ' + -// string(document.create_time)" -// The exact variables and functions that may be referenced within an -// expression are determined by the service that evaluates it. See the -// service documentation for additional information. +// string(document.create_time)" The exact variables and functions that +// may be referenced within an expression are determined by the service +// that evaluates it. See the service documentation for additional +// information. type Expr struct { // Description: Optional. Description of the expression. This is a // longer text which describes the expression, e.g. when hovered over it @@ -1228,21 +1256,38 @@ type GceClusterConfig struct { // (https://cloud.google.com/compute/docs/subnetworks) for more // information).A full URL, partial URI, or short name are valid. // Examples: - // https://www.googleapis.com/compute/v1/projects/[project_id]/ - // regions/global/default - // projects/[project_id]/regions/global/default - // de - // fault + // https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default projects/[project_id]/regions/global/default + // default NetworkUri string `json:"networkUri,omitempty"` + // NodeGroupAffinity: Optional. Node Group Affinity for sole-tenant + // clusters. + NodeGroupAffinity *NodeGroupAffinity `json:"nodeGroupAffinity,omitempty"` + + // PrivateIpv6GoogleAccess: Optional. The type of IPv6 access for a + // cluster. + // + // Possible values: + // "PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED" - If unspecified, Compute + // Engine default behavior will apply, which is the same as + // INHERIT_FROM_SUBNETWORK. + // "INHERIT_FROM_SUBNETWORK" - Private access to and from Google + // Services configuration inherited from the subnetwork configuration. + // This is the default Compute Engine behavior. + // "OUTBOUND" - Enables outbound private IPv6 access to Google + // Services from the Dataproc cluster. + // "BIDIRECTIONAL" - Enables bidirectional private IPv6 access between + // Google Services and the Dataproc cluster. + PrivateIpv6GoogleAccess string `json:"privateIpv6GoogleAccess,omitempty"` + // ReservationAffinity: Optional. Reservation Affinity for consuming // Zonal reservation. ReservationAffinity *ReservationAffinity `json:"reservationAffinity,omitempty"` // ServiceAccount: Optional. The Dataproc service account // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/ - // service-accounts#service_accounts_in_cloud_dataproc) (also see VM - // Data Plane identity + // service-accounts#service_accounts_in_dataproc) (also see VM Data + // Plane identity // (https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principa // ls#vm_service_account_data_plane_identity)) used by Dataproc cluster // VM instances to access Google Cloud Platform services.If not @@ -1253,31 +1298,21 @@ type GceClusterConfig struct { // ServiceAccountScopes: Optional. The URIs of service account scopes to // be included in Compute Engine instances. The following base set of - // scopes is always - // included: + // scopes is always included: // https://www.googleapis.com/auth/cloud.useraccounts.readonly - // // https://www.googleapis.com/auth/devstorage.read_write - // https://www.goog - // leapis.com/auth/logging.writeIf no scopes are specified, the - // following defaults are also - // provided: + // https://www.googleapis.com/auth/logging.writeIf no scopes are + // specified, the following defaults are also provided: // https://www.googleapis.com/auth/bigquery - // https://www.googlea - // pis.com/auth/bigtable.admin.table - // https://www.googleapis.com/auth/bigt - // able.data + // https://www.googleapis.com/auth/bigtable.admin.table + // https://www.googleapis.com/auth/bigtable.data // https://www.googleapis.com/auth/devstorage.full_control ServiceAccountScopes []string `json:"serviceAccountScopes,omitempty"` // SubnetworkUri: Optional. The Compute Engine subnetwork to be used for // machine communications. Cannot be specified with network_uri.A full - // URL, partial URI, or short name are valid. - // Examples: - // https://www.googleapis.com/compute/v1/projects/[project_id]/ - // regions/us-east1/subnetworks/sub0 - // projects/[project_id]/regions/us-eas - // t1/subnetworks/sub0 + // URL, partial URI, or short name are valid. Examples: + // https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0 projects/[project_id]/regions/us-east1/subnetworks/sub0 // sub0 SubnetworkUri string `json:"subnetworkUri,omitempty"` @@ -1291,11 +1326,8 @@ type GceClusterConfig struct { // If omitted in a non-global Dataproc region, the service will pick a // zone in the corresponding Compute Engine region. On a get request, // zone will always be present.A full URL, partial URI, or short name - // are valid. - // Examples: - // https://www.googleapis.com/compute/v1/projects/[project_id]/ - // zones/[zone] - // projects/[project_id]/zones/[zone] + // are valid. Examples: + // https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] projects/[project_id]/zones/[zone] // us-central1-f ZoneUri string `json:"zoneUri,omitempty"` @@ -1326,7 +1358,7 @@ func (s *GceClusterConfig) MarshalJSON() ([]byte, error) { // GetIamPolicyRequest: Request message for GetIamPolicy method. type GetIamPolicyRequest struct { // Options: OPTIONAL: A GetPolicyOptions object for specifying options - // to GetIamPolicy. This field is only used by Cloud IAM. + // to GetIamPolicy. Options *GetPolicyOptions `json:"options,omitempty"` // ForceSendFields is a list of field names (e.g. "Options") to @@ -1358,7 +1390,10 @@ type GetPolicyOptions struct { // returned.Valid values are 0, 1, and 3. Requests specifying an invalid // value will be rejected.Requests for policies with any conditional // bindings must specify version 3. Policies without any conditional - // bindings may specify any valid value or leave the field unset. + // bindings may specify any valid value or leave the field unset.To + // learn which resources support conditions in their IAM policies, see + // the IAM documentation + // (https://cloud.google.com/iam/help/conditions/resource-policies). RequestedPolicyVersion int64 `json:"requestedPolicyVersion,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1580,21 +1615,9 @@ type InstanceGroupConfig struct { // ImageUri: Optional. The Compute Engine image resource used for // cluster instances.The URI can represent an image or image - // family.Image - // examples: - // https://www.googleapis.com/compute/beta/projects/[project_id - // ]/global/images/[image-id] - // projects/[project_id]/global/images/[image- - // id] - // image-idImage family examples. Dataproc will use the most recent - // image from the - // family: - // https://www.googleapis.com/compute/beta/projects/[project_id]/ - // global/images/family/[custom-image-family-name] - // projects/[project_id]/ - // global/images/family/[custom-image-family-name]If the URI is - // unspecified, it will be inferred from SoftwareConfig.image_version or - // the system default. + // family.Image examples: + // https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system + // default. ImageUri string `json:"imageUri,omitempty"` // InstanceNames: Output only. The list of instance names. Dataproc @@ -1602,6 +1625,10 @@ type InstanceGroupConfig struct { // group. InstanceNames []string `json:"instanceNames,omitempty"` + // InstanceReferences: Output only. List of references to Compute Engine + // instances. + InstanceReferences []*InstanceReference `json:"instanceReferences,omitempty"` + // IsPreemptible: Output only. Specifies that this instance group // contains preemptible instances. IsPreemptible bool `json:"isPreemptible,omitempty"` @@ -1609,15 +1636,8 @@ type InstanceGroupConfig struct { // MachineTypeUri: Optional. The Compute Engine machine type used for // cluster instances.A full URL, partial URI, or short name are valid. // Examples: - // https://www.googleapis.com/compute/v1/projects/[project_id]/ - // zones/us-east1-a/machineTypes/n1-standard-2 - // projects/[project_id]/zone - // s/us-east1-a/machineTypes/n1-standard-2 - // n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto - // Zone Placement - // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/ - // auto-zone#using_auto_zone_placement) feature, you must use the short - // name of the machine type resource, for example, n1-standard-2. + // https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, + // n1-standard-2. MachineTypeUri string `json:"machineTypeUri,omitempty"` // ManagedGroupConfig: Output only. The config for Compute Engine @@ -1626,7 +1646,7 @@ type InstanceGroupConfig struct { ManagedGroupConfig *ManagedGroupConfig `json:"managedGroupConfig,omitempty"` // MinCpuPlatform: Optional. Specifies the minimum cpu platform for the - // Instance Group. See Dataproc -> Minimum CPU Platform + // Instance Group. See Dataproc -> Minimum CPU Platform // (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min- // cpu). MinCpuPlatform string `json:"minCpuPlatform,omitempty"` @@ -1673,11 +1693,42 @@ func (s *InstanceGroupConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceReference: A reference to a Compute Engine instance. +type InstanceReference struct { + // InstanceId: The unique identifier of the Compute Engine instance. + InstanceId string `json:"instanceId,omitempty"` + + // InstanceName: The user-friendly name of the Compute Engine instance. + InstanceName string `json:"instanceName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InstanceId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InstanceId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceReference) MarshalJSON() ([]byte, error) { + type NoMethod InstanceReference + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InstantiateWorkflowTemplateRequest: A request to instantiate a // workflow template. type InstantiateWorkflowTemplateRequest struct { // Parameters: Optional. Map from parameter names to values that should - // be used for those parameters. Values may not exceed 100 characters. + // be used for those parameters. Values may not exceed 1000 characters. Parameters map[string]string `json:"parameters,omitempty"` // RequestId: Optional. A tag that prevents multiple concurrent workflow @@ -1772,7 +1823,7 @@ type Job struct { // Reference: Optional. The fully qualified reference to the job, which // can be used to obtain the equivalent REST path of the job resource. // If this property is not specified when a job is created, the server - // generates a job_id. + // generates a job_id. Reference *JobReference `json:"reference,omitempty"` // Scheduling: Optional. Job scheduling configuration. @@ -1788,8 +1839,8 @@ type Job struct { SparkSqlJob *SparkSqlJob `json:"sparkSqlJob,omitempty"` // Status: Output only. The job status. Additional application-specific - // status information may be contained in the type_job and - // yarn_applications fields. + // status information may be contained in the type_job and + // yarn_applications fields. Status *JobStatus `json:"status,omitempty"` // StatusHistory: Output only. The previous job status. @@ -1906,8 +1957,8 @@ type JobReference struct { // provided by the server. JobId string `json:"jobId,omitempty"` - // ProjectId: Required. The ID of the Google Cloud Platform project that - // the job belongs to. + // ProjectId: Optional. The ID of the Google Cloud Platform project that + // the job belongs to. If specified, must match the request project ID. ProjectId string `json:"projectId,omitempty"` // ForceSendFields is a list of field names (e.g. "JobId") to @@ -1936,9 +1987,9 @@ func (s *JobReference) MarshalJSON() ([]byte, error) { // JobScheduling: Job scheduling options. type JobScheduling struct { // MaxFailuresPerHour: Optional. Maximum number of times per hour a - // driver may be restarted as a result of driver terminating with - // non-zero code before job is reported failed.A job may be reported as - // thrashing if driver exits with non-zero code 4 times within 10 minute + // driver may be restarted as a result of driver exiting with non-zero + // code before job is reported failed.A job may be reported as thrashing + // if driver exits with non-zero code 4 times within 10 minute // window.Maximum value is 10. MaxFailuresPerHour int64 `json:"maxFailuresPerHour,omitempty"` @@ -1969,7 +2020,7 @@ func (s *JobScheduling) MarshalJSON() ([]byte, error) { // JobStatus: Dataproc job status. type JobStatus struct { // Details: Optional. Output only. Job state details, such as an error - // description if the state is ERROR. + // description if the state is ERROR. Details string `json:"details,omitempty"` // State: Output only. A state message specifying the overall job state. @@ -2268,8 +2319,7 @@ type ListJobsResponse struct { // NextPageToken: Optional. This token is included in the response if // there are more results to fetch. To fetch additional results, provide - // this value as the page_token in a subsequent - // ListJobsRequest. + // this value as the page_token in a subsequent ListJobsRequest. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2342,7 +2392,7 @@ type ListWorkflowTemplatesResponse struct { // NextPageToken: Output only. This token is included in the response if // there are more results to fetch. To fetch additional results, provide // this value as the page_token in a subsequent - // ListWorkflowTemplatesRequest. + // ListWorkflowTemplatesRequest. NextPageToken string `json:"nextPageToken,omitempty"` // Templates: Output only. WorkflowTemplates list. @@ -2485,6 +2535,37 @@ func (s *ManagedGroupConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// NodeGroupAffinity: Node Group Affinity for clusters using sole-tenant +// node groups. +type NodeGroupAffinity struct { + // NodeGroupUri: Required. The name of a single node group + // (https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) + // a cluster will be created on. + NodeGroupUri string `json:"nodeGroupUri,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NodeGroupUri") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NodeGroupUri") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NodeGroupAffinity) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupAffinity + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // NodeInitializationAction: Specifies an executable to run on a fully // configured node and a timeout period for executable completion. type NodeInitializationAction struct { @@ -2753,56 +2834,33 @@ func (s *PigJob) MarshalJSON() ([]byte, error) { // single role. Members can be user accounts, service accounts, Google // groups, and domains (such as G Suite). A role is a named list of // permissions; each role can be an IAM predefined role or a -// user-created custom role.Optionally, a binding can specify a -// condition, which is a logical expression that allows access to a -// resource only if the expression evaluates to true. A condition can -// add constraints based on attributes of the request, the resource, or -// both.JSON example: -// { -// "bindings": [ -// { -// "role": "roles/resourcemanager.organizationAdmin", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" -// ] -// }, -// { -// "role": "roles/resourcemanager.organizationViewer", -// "members": ["user:eve@example.com"], -// "condition": { -// "title": "expirable access", -// "description": "Does not grant access after Sep 2020", -// "expression": "request.time < -// timestamp('2020-10-01T00:00:00.000Z')", -// } -// } -// ], -// "etag": "BwWWja0YfJA=", -// "version": 3 -// } -// YAML example: -// bindings: -// - members: -// - user:mike@example.com -// - group:admins@example.com -// - domain:google.com -// - serviceAccount:my-project-id@appspot.gserviceaccount.com -// role: roles/resourcemanager.organizationAdmin -// - members: -// - user:eve@example.com -// role: roles/resourcemanager.organizationViewer -// condition: -// title: expirable access -// description: Does not grant access after Sep 2020 -// expression: request.time < -// timestamp('2020-10-01T00:00:00.000Z') -// - etag: BwWWja0YfJA= -// - version: 3 -// For a description of IAM and its features, see the IAM documentation -// (https://cloud.google.com/iam/docs/). +// user-created custom role.For some types of Google Cloud resources, a +// binding can also specify a condition, which is a logical expression +// that allows access to a resource only if the expression evaluates to +// true. A condition can add constraints based on attributes of the +// request, the resource, or both. To learn which resources support +// conditions in their IAM policies, see the IAM documentation +// (https://cloud.google.com/iam/help/conditions/resource-policies).JSON +// example: { "bindings": [ { "role": +// "roles/resourcemanager.organizationAdmin", "members": [ +// "user:mike@example.com", "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { +// "role": "roles/resourcemanager.organizationViewer", "members": [ +// "user:eve@example.com" ], "condition": { "title": "expirable access", +// "description": "Does not grant access after Sep 2020", "expression": +// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], +// "etag": "BwWWja0YfJA=", "version": 3 } YAML example: bindings: - +// members: - user:mike@example.com - group:admins@example.com - +// domain:google.com - +// serviceAccount:my-project-id@appspot.gserviceaccount.com role: +// roles/resourcemanager.organizationAdmin - members: - +// user:eve@example.com role: roles/resourcemanager.organizationViewer +// condition: title: expirable access description: Does not grant access +// after Sep 2020 expression: request.time < +// timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: +// 3 For a description of IAM and its features, see the IAM +// documentation (https://cloud.google.com/iam/docs/). type Policy struct { // Bindings: Associates a list of members to a role. Optionally, may // specify a condition that determines how and when the bindings are @@ -2826,18 +2884,19 @@ type Policy struct { // Version: Specifies the format of the policy.Valid values are 0, 1, // and 3. Requests that specify an invalid value are rejected.Any // operation that affects conditional role bindings must specify version - // 3. This requirement applies to the following operations: - // Getting a policy that includes a conditional role binding - // Adding a conditional role binding to a policy - // Changing a conditional role binding in a policy - // Removing any role binding, with or without a condition, from a policy - // that includes conditionsImportant: If you use IAM Conditions, you - // must include the etag field whenever you call setIamPolicy. If you - // omit this field, then IAM allows you to overwrite a version 3 policy - // with a version 1 policy, and all of the conditions in the version 3 - // policy are lost.If a policy does not include any conditions, - // operations on that policy may specify any valid version or leave the - // field unset. + // 3. This requirement applies to the following operations: Getting a + // policy that includes a conditional role binding Adding a conditional + // role binding to a policy Changing a conditional role binding in a + // policy Removing any role binding, with or without a condition, from a + // policy that includes conditionsImportant: If you use IAM Conditions, + // you must include the etag field whenever you call setIamPolicy. If + // you omit this field, then IAM allows you to overwrite a version 3 + // policy with a version 1 policy, and all of the conditions in the + // version 3 policy are lost.If a policy does not include any + // conditions, operations on that policy may specify any valid version + // or leave the field unset.To learn which resources support conditions + // in their IAM policies, see the IAM documentation + // (https://cloud.google.com/iam/help/conditions/resource-policies). Version int64 `json:"version,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2927,8 +2986,9 @@ func (s *PrestoJob) MarshalJSON() ([]byte, error) { // (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) // applications on YARN. type PySparkJob struct { - // ArchiveUris: Optional. HCFS URIs of archives to be extracted in the - // working directory of .jar, .tar, .tar.gz, .tgz, and .zip. + // ArchiveUris: Optional. HCFS URIs of archives to be extracted into the + // working directory of each executor. Supported file types: .jar, .tar, + // .tar.gz, .tgz, and .zip. ArchiveUris []string `json:"archiveUris,omitempty"` // Args: Optional. The arguments to pass to the driver. Do not include @@ -2936,9 +2996,8 @@ type PySparkJob struct { // collision may occur that causes an incorrect job submission. Args []string `json:"args,omitempty"` - // FileUris: Optional. HCFS URIs of files to be copied to the working - // directory of Python drivers and distributed tasks. Useful for naively - // parallel tasks. + // FileUris: Optional. HCFS URIs of files to be placed in the working + // directory of each executor. Useful for naively parallel tasks. FileUris []string `json:"fileUris,omitempty"` // JarFileUris: Optional. HCFS URIs of jar files to add to the @@ -2987,21 +3046,12 @@ func (s *PySparkJob) MarshalJSON() ([]byte, error) { // QueryList: A list of queries to run on a cluster. type QueryList struct { - // Queries: Required. The queries to execute. You do not need to - // terminate a query with a semicolon. Multiple queries can be specified + // Queries: Required. The queries to execute. You do not need to end a + // query expression with a semicolon. Multiple queries can be specified // in one string by separating each with a semicolon. Here is an example - // of an Cloud Dataproc API snippet that uses a QueryList to specify a - // HiveJob: - // "hiveJob": { - // "queryList": { - // "queries": [ - // "query1", - // "query2", - // "query3;query4", - // ] - // } - // } - // + // of a Dataproc API snippet that uses a QueryList to specify a HiveJob: + // "hiveJob": { "queryList": { "queries": [ "query1", "query2", + // "query3;query4", ] } } Queries []string `json:"queries,omitempty"` // ForceSendFields is a list of field names (e.g. "Queries") to @@ -3168,8 +3218,8 @@ type SoftwareConfig struct { // ImageVersion: Optional. The version of software inside the cluster. // It must be one of the supported Dataproc Versions // (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-v - // ersions#supported_cloud_dataproc_versions), such as "1.2" (including - // a subminor version, such as "1.2.29"), or the "preview" version + // ersions#supported_dataproc_versions), such as "1.2" (including a + // subminor version, such as "1.2.29"), or the "preview" version // (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-v // ersions#other_versions). If unspecified, it defaults to the latest // Debian version. @@ -3179,12 +3229,17 @@ type SoftwareConfig struct { // the cluster. // // Possible values: - // "COMPONENT_UNSPECIFIED" - Unspecified component. + // "COMPONENT_UNSPECIFIED" - Unspecified component. Specifying this + // will cause Cluster creation to fail. // "ANACONDA" - The Anaconda python distribution. + // "DOCKER" - Docker + // "FLINK" - Flink // "HIVE_WEBHCAT" - The Hive Web HCatalog (the REST service for // accessing HCatalog). // "JUPYTER" - The Jupyter Notebook. // "PRESTO" - The Presto query engine. + // "RANGER" - The Ranger service. + // "SOLR" - The Solr service. // "ZEPPELIN" - The Zeppelin notebook. // "ZOOKEEPER" - The Zookeeper service. OptionalComponents []string `json:"optionalComponents,omitempty"` @@ -3192,16 +3247,11 @@ type SoftwareConfig struct { // Properties: Optional. The properties to set on daemon config // files.Property keys are specified in prefix:property format, for // example core:hadoop.tmp.dir. The following are supported prefixes and - // their mappings: - // capacity-scheduler: capacity-scheduler.xml - // core: core-site.xml - // distcp: distcp-default.xml - // hdfs: hdfs-site.xml - // hive: hive-site.xml - // mapred: mapred-site.xml - // pig: pig.properties - // spark: spark-defaults.conf - // yarn: yarn-site.xmlFor more information, see Cluster properties + // their mappings: capacity-scheduler: capacity-scheduler.xml core: + // core-site.xml distcp: distcp-default.xml hdfs: hdfs-site.xml hive: + // hive-site.xml mapred: mapred-site.xml pig: pig.properties spark: + // spark-defaults.conf yarn: yarn-site.xmlFor more information, see + // Cluster properties // (https://cloud.google.com/dataproc/docs/concepts/cluster-properties). Properties map[string]string `json:"properties,omitempty"` @@ -3231,9 +3281,9 @@ func (s *SoftwareConfig) MarshalJSON() ([]byte, error) { // SparkJob: A Dataproc job for running Apache Spark // (http://spark.apache.org/) applications on YARN. type SparkJob struct { - // ArchiveUris: Optional. HCFS URIs of archives to be extracted in the - // working directory of Spark drivers and tasks. Supported file types: - // .jar, .tar, .tar.gz, .tgz, and .zip. + // ArchiveUris: Optional. HCFS URIs of archives to be extracted into the + // working directory of each executor. Supported file types: .jar, .tar, + // .tar.gz, .tgz, and .zip. ArchiveUris []string `json:"archiveUris,omitempty"` // Args: Optional. The arguments to pass to the driver. Do not include @@ -3241,9 +3291,8 @@ type SparkJob struct { // collision may occur that causes an incorrect job submission. Args []string `json:"args,omitempty"` - // FileUris: Optional. HCFS URIs of files to be copied to the working - // directory of Spark drivers and distributed tasks. Useful for naively - // parallel tasks. + // FileUris: Optional. HCFS URIs of files to be placed in the working + // directory of each executor. Useful for naively parallel tasks. FileUris []string `json:"fileUris,omitempty"` // JarFileUris: Optional. HCFS URIs of jar files to add to the @@ -3295,9 +3344,9 @@ func (s *SparkJob) MarshalJSON() ([]byte, error) { // (https://spark.apache.org/docs/latest/sparkr.html) applications on // YARN. type SparkRJob struct { - // ArchiveUris: Optional. HCFS URIs of archives to be extracted in the - // working directory of Spark drivers and tasks. Supported file types: - // .jar, .tar, .tar.gz, .tgz, and .zip. + // ArchiveUris: Optional. HCFS URIs of archives to be extracted into the + // working directory of each executor. Supported file types: .jar, .tar, + // .tar.gz, .tgz, and .zip. ArchiveUris []string `json:"archiveUris,omitempty"` // Args: Optional. The arguments to pass to the driver. Do not include @@ -3305,9 +3354,8 @@ type SparkRJob struct { // collision may occur that causes an incorrect job submission. Args []string `json:"args,omitempty"` - // FileUris: Optional. HCFS URIs of files to be copied to the working - // directory of R drivers and distributed tasks. Useful for naively - // parallel tasks. + // FileUris: Optional. HCFS URIs of files to be placed in the working + // directory of each executor. Useful for naively parallel tasks. FileUris []string `json:"fileUris,omitempty"` // LoggingConfig: Optional. The runtime log config for job execution. @@ -3491,48 +3539,29 @@ type TemplateParameter struct { // google.protobuf.FieldMask. For example, a field path that references // the zone field of a workflow template's cluster selector would be // specified as placement.clusterSelector.zone.Also, field paths can - // reference fields using the following syntax: - // Values in maps can be referenced by - // key: - // labels'key' + // reference fields using the following syntax: Values in maps can be + // referenced by key: labels'key' // placement.clusterSelector.clusterLabels'key' - // placemen - // t.managedCluster.labels'key' - // placement.clusterSelector.clusterLabels'k - // ey' - // jobs'step-id'.labels'key' - // Jobs in the jobs list can be referenced by - // step-id: - // jobs'step-id'.hadoopJob.mainJarFileUri - // jobs'step-id'.hiveJob. - // queryFileUri + // placement.managedCluster.labels'key' + // placement.clusterSelector.clusterLabels'key' + // jobs'step-id'.labels'key' Jobs in the jobs list can be referenced by + // step-id: jobs'step-id'.hadoopJob.mainJarFileUri + // jobs'step-id'.hiveJob.queryFileUri // jobs'step-id'.pySparkJob.mainPythonFileUri - // jobs'step-id'. - // hadoopJob.jarFileUris0 + // jobs'step-id'.hadoopJob.jarFileUris0 // jobs'step-id'.hadoopJob.archiveUris0 - // jobs'step- - // id'.hadoopJob.fileUris0 - // jobs'step-id'.pySparkJob.pythonFileUris0 - // Items - // in repeated fields can be referenced by a zero-based - // index: - // jobs'step-id'.sparkJob.args0 - // Other - // examples: - // jobs'step-id'.hadoopJob.properties'key' - // jobs'step-id'.hadoop - // Job.args0 + // jobs'step-id'.hadoopJob.fileUris0 + // jobs'step-id'.pySparkJob.pythonFileUris0 Items in repeated fields can + // be referenced by a zero-based index: jobs'step-id'.sparkJob.args0 + // Other examples: jobs'step-id'.hadoopJob.properties'key' + // jobs'step-id'.hadoopJob.args0 // jobs'step-id'.hiveJob.scriptVariables'key' - // jobs'step-id'.had - // oopJob.mainJarFileUri + // jobs'step-id'.hadoopJob.mainJarFileUri // placement.clusterSelector.zoneIt may not be possible to parameterize // maps and repeated fields in their entirety since only individual map // values and individual items in repeated fields can be referenced. For - // example, the following field paths are - // invalid: - // placement.clusterSelector.clusterLabels - // jobs'step-id'.sparkJo - // b.args + // example, the following field paths are invalid: + // placement.clusterSelector.clusterLabels jobs'step-id'.sparkJob.args Fields []string `json:"fields,omitempty"` // Name: Required. Parameter name. The parameter name is used as the @@ -3728,12 +3757,11 @@ type WorkflowMetadata struct { State string `json:"state,omitempty"` // Template: Output only. The resource name of the workflow template as - // described in https://cloud.google.com/apis/design/resource_names. - // For projects.regions.workflowTemplates, the resource name of the - // template has the following format: + // described in https://cloud.google.com/apis/design/resource_names. For + // projects.regions.workflowTemplates, the resource name of the template + // has the following format: // projects/{project_id}/regions/{region}/workflowTemplates/{template_id} - // - // For projects.locations.workflowTemplates, the resource name of the + // For projects.locations.workflowTemplates, the resource name of the // template has the following format: // projects/{project_id}/locations/{location}/workflowTemplates/{template // _id} @@ -3837,20 +3865,19 @@ type WorkflowTemplate struct { Labels map[string]string `json:"labels,omitempty"` // Name: Output only. The resource name of the workflow template, as - // described in https://cloud.google.com/apis/design/resource_names. - // For projects.regions.workflowTemplates, the resource name of the - // template has the following format: + // described in https://cloud.google.com/apis/design/resource_names. For + // projects.regions.workflowTemplates, the resource name of the template + // has the following format: // projects/{project_id}/regions/{region}/workflowTemplates/{template_id} - // - // For projects.locations.workflowTemplates, the resource name of the + // For projects.locations.workflowTemplates, the resource name of the // template has the following format: // projects/{project_id}/locations/{location}/workflowTemplates/{template // _id} Name string `json:"name,omitempty"` - // Parameters: Optional. emplate parameters whose values are substituted - // into the template. Values for parameters must be provided when the - // template is instantiated. + // Parameters: Optional. Template parameters whose values are + // substituted into the template. Values for parameters must be provided + // when the template is instantiated. Parameters []*TemplateParameter `json:"parameters,omitempty"` // Placement: Required. WorkflowTemplate scheduling information. @@ -3934,9 +3961,9 @@ func (s *WorkflowTemplatePlacement) MarshalJSON() ([]byte, error) { // YarnApplication: A YARN application created by a job. Application // information is a subset of -// org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto.Beta Feature: This report is available for testing purposes -// only. It may be changed before final release. +// org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto.Beta +// Feature: This report is available for testing purposes only. It may +// be changed before final release. type YarnApplication struct { // Name: Required. The application name. Name string `json:"name,omitempty"` @@ -4048,7 +4075,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesCreateCall) Header() http.Header { func (c *ProjectsLocationsAutoscalingPoliciesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4121,7 +4148,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesCreateCall) Do(opts ...googleapi.Ca // ], // "parameters": { // "parent": { - // "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.create, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.autoscalingPolicies.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + // "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.create, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.autoscalingPolicies.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -4187,7 +4214,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesDeleteCall) Header() http.Header { func (c *ProjectsLocationsAutoscalingPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4255,7 +4282,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesDeleteCall) Do(opts ...googleapi.Ca // ], // "parameters": { // "name": { - // "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + // "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/autoscalingPolicies/[^/]+$", // "required": true, @@ -4328,7 +4355,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesGetCall) Header() http.Header { func (c *ProjectsLocationsAutoscalingPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4399,7 +4426,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesGetCall) Do(opts ...googleapi.CallO // ], // "parameters": { // "name": { - // "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + // "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/autoscalingPolicies/[^/]+$", // "required": true, @@ -4465,7 +4492,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall) Header() http.Hea func (c *ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4629,7 +4656,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesListCall) Header() http.Header { func (c *ProjectsLocationsAutoscalingPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4711,7 +4738,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesListCall) Do(opts ...googleapi.Call // "type": "string" // }, // "parent": { - // "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.list, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.autoscalingPolicies.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + // "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.autoscalingPolicies.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -4762,8 +4789,8 @@ type ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy.Can return Public Errors: -// NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED +// resource. Replaces any existing policy.Can return NOT_FOUND, +// INVALID_ARGUMENT, and PERMISSION_DENIED errors. func (r *ProjectsLocationsAutoscalingPoliciesService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall { c := &ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -4798,7 +4825,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall) Header() http.Hea func (c *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4862,7 +4889,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall) Do(opts ...google } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/autoscalingPolicies/{autoscalingPoliciesId}:setIamPolicy", // "httpMethod": "POST", // "id": "dataproc.projects.locations.autoscalingPolicies.setIamPolicy", @@ -4943,7 +4970,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall) Header() ht func (c *ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5084,7 +5111,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesUpdateCall) Header() http.Header { func (c *ProjectsLocationsAutoscalingPoliciesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5157,7 +5184,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesUpdateCall) Do(opts ...googleapi.Ca // ], // "parameters": { // "name": { - // "description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + // "description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/autoscalingPolicies/[^/]+$", // "required": true, @@ -5224,7 +5251,7 @@ func (c *ProjectsLocationsWorkflowTemplatesCreateCall) Header() http.Header { func (c *ProjectsLocationsWorkflowTemplatesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5297,7 +5324,7 @@ func (c *ProjectsLocationsWorkflowTemplatesCreateCall) Do(opts ...googleapi.Call // ], // "parameters": { // "parent": { - // "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates,create, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.workflowTemplates.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + // "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,create, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -5371,7 +5398,7 @@ func (c *ProjectsLocationsWorkflowTemplatesDeleteCall) Header() http.Header { func (c *ProjectsLocationsWorkflowTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5439,7 +5466,7 @@ func (c *ProjectsLocationsWorkflowTemplatesDeleteCall) Do(opts ...googleapi.Call // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates.delete, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + // "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.delete, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", // "required": true, @@ -5527,7 +5554,7 @@ func (c *ProjectsLocationsWorkflowTemplatesGetCall) Header() http.Header { func (c *ProjectsLocationsWorkflowTemplatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5598,7 +5625,7 @@ func (c *ProjectsLocationsWorkflowTemplatesGetCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + // "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", // "required": true, @@ -5670,7 +5697,7 @@ func (c *ProjectsLocationsWorkflowTemplatesGetIamPolicyCall) Header() http.Heade func (c *ProjectsLocationsWorkflowTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5821,7 +5848,7 @@ func (c *ProjectsLocationsWorkflowTemplatesInstantiateCall) Header() http.Header func (c *ProjectsLocationsWorkflowTemplatesInstantiateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5894,7 +5921,7 @@ func (c *ProjectsLocationsWorkflowTemplatesInstantiateCall) Do(opts ...googleapi // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + // "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", // "required": true, @@ -5986,7 +6013,7 @@ func (c *ProjectsLocationsWorkflowTemplatesInstantiateInlineCall) Header() http. func (c *ProjectsLocationsWorkflowTemplatesInstantiateInlineCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6059,7 +6086,7 @@ func (c *ProjectsLocationsWorkflowTemplatesInstantiateInlineCall) Do(opts ...goo // ], // "parameters": { // "parent": { - // "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates,instantiateinline, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.workflowTemplates.instantiateinline, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + // "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,instantiateinline, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.instantiateinline, the resource name of the location has the following format: projects/{project_id}/locations/{location}", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -6154,7 +6181,7 @@ func (c *ProjectsLocationsWorkflowTemplatesListCall) Header() http.Header { func (c *ProjectsLocationsWorkflowTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6236,7 +6263,7 @@ func (c *ProjectsLocationsWorkflowTemplatesListCall) Do(opts ...googleapi.CallOp // "type": "string" // }, // "parent": { - // "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates,list, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.workflowTemplates.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + // "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -6287,8 +6314,8 @@ type ProjectsLocationsWorkflowTemplatesSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy.Can return Public Errors: -// NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED +// resource. Replaces any existing policy.Can return NOT_FOUND, +// INVALID_ARGUMENT, and PERMISSION_DENIED errors. func (r *ProjectsLocationsWorkflowTemplatesService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsWorkflowTemplatesSetIamPolicyCall { c := &ProjectsLocationsWorkflowTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -6323,7 +6350,7 @@ func (c *ProjectsLocationsWorkflowTemplatesSetIamPolicyCall) Header() http.Heade func (c *ProjectsLocationsWorkflowTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6387,7 +6414,7 @@ func (c *ProjectsLocationsWorkflowTemplatesSetIamPolicyCall) Do(opts ...googleap } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}:setIamPolicy", // "httpMethod": "POST", // "id": "dataproc.projects.locations.workflowTemplates.setIamPolicy", @@ -6468,7 +6495,7 @@ func (c *ProjectsLocationsWorkflowTemplatesTestIamPermissionsCall) Header() http func (c *ProjectsLocationsWorkflowTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6609,7 +6636,7 @@ func (c *ProjectsLocationsWorkflowTemplatesUpdateCall) Header() http.Header { func (c *ProjectsLocationsWorkflowTemplatesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6682,7 +6709,7 @@ func (c *ProjectsLocationsWorkflowTemplatesUpdateCall) Do(opts ...googleapi.Call // ], // "parameters": { // "name": { - // "description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + // "description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", // "required": true, @@ -6749,7 +6776,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesCreateCall) Header() http.Header { func (c *ProjectsRegionsAutoscalingPoliciesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6822,7 +6849,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesCreateCall) Do(opts ...googleapi.Call // ], // "parameters": { // "parent": { - // "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.create, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.autoscalingPolicies.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + // "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.create, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.autoscalingPolicies.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", // "location": "path", // "pattern": "^projects/[^/]+/regions/[^/]+$", // "required": true, @@ -6888,7 +6915,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesDeleteCall) Header() http.Header { func (c *ProjectsRegionsAutoscalingPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6956,7 +6983,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesDeleteCall) Do(opts ...googleapi.Call // ], // "parameters": { // "name": { - // "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + // "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", // "location": "path", // "pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", // "required": true, @@ -7029,7 +7056,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesGetCall) Header() http.Header { func (c *ProjectsRegionsAutoscalingPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7100,7 +7127,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesGetCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "name": { - // "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + // "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", // "location": "path", // "pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", // "required": true, @@ -7166,7 +7193,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesGetIamPolicyCall) Header() http.Heade func (c *ProjectsRegionsAutoscalingPoliciesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7330,7 +7357,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesListCall) Header() http.Header { func (c *ProjectsRegionsAutoscalingPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7412,7 +7439,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesListCall) Do(opts ...googleapi.CallOp // "type": "string" // }, // "parent": { - // "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.list, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.autoscalingPolicies.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + // "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.autoscalingPolicies.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", // "location": "path", // "pattern": "^projects/[^/]+/regions/[^/]+$", // "required": true, @@ -7463,8 +7490,8 @@ type ProjectsRegionsAutoscalingPoliciesSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy.Can return Public Errors: -// NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED +// resource. Replaces any existing policy.Can return NOT_FOUND, +// INVALID_ARGUMENT, and PERMISSION_DENIED errors. func (r *ProjectsRegionsAutoscalingPoliciesService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsRegionsAutoscalingPoliciesSetIamPolicyCall { c := &ProjectsRegionsAutoscalingPoliciesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -7499,7 +7526,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesSetIamPolicyCall) Header() http.Heade func (c *ProjectsRegionsAutoscalingPoliciesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7563,7 +7590,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesSetIamPolicyCall) Do(opts ...googleap } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", // "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}:setIamPolicy", // "httpMethod": "POST", // "id": "dataproc.projects.regions.autoscalingPolicies.setIamPolicy", @@ -7644,7 +7671,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesTestIamPermissionsCall) Header() http func (c *ProjectsRegionsAutoscalingPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7785,7 +7812,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesUpdateCall) Header() http.Header { func (c *ProjectsRegionsAutoscalingPoliciesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7858,7 +7885,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesUpdateCall) Do(opts ...googleapi.Call // ], // "parameters": { // "name": { - // "description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + // "description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", // "location": "path", // "pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", // "required": true, @@ -7944,7 +7971,7 @@ func (c *ProjectsRegionsClustersCreateCall) Header() http.Header { func (c *ProjectsRegionsClustersCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8123,7 +8150,7 @@ func (c *ProjectsRegionsClustersDeleteCall) Header() http.Header { func (c *ProjectsRegionsClustersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8290,7 +8317,7 @@ func (c *ProjectsRegionsClustersDiagnoseCall) Header() http.Header { func (c *ProjectsRegionsClustersDiagnoseCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8458,7 +8485,7 @@ func (c *ProjectsRegionsClustersGetCall) Header() http.Header { func (c *ProjectsRegionsClustersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8610,7 +8637,7 @@ func (c *ProjectsRegionsClustersGetIamPolicyCall) Header() http.Header { func (c *ProjectsRegionsClustersGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8794,7 +8821,7 @@ func (c *ProjectsRegionsClustersListCall) Header() http.Header { func (c *ProjectsRegionsClustersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8987,36 +9014,17 @@ func (c *ProjectsRegionsClustersPatchCall) RequestId(requestId string) *Projects // example, to change the number of workers in a cluster to 5, the // update_mask parameter would be specified as // config.worker_config.num_instances, and the PATCH request body would -// specify the new value, as follows: -// { -// "config":{ -// "workerConfig":{ -// "numInstances":"5" -// } -// } -// } -// Similarly, to change the number of preemptible workers in a cluster -// to 5, the update_mask parameter would be -// config.secondary_worker_config.num_instances, and the PATCH request -// body would be set as follows: -// { -// "config":{ -// "secondaryWorkerConfig":{ -// "numInstances":"5" -// } -// } -// } -// Note: Currently, only the following fields can be -// updated: -// -// -// -// -// -// -//
MaskPurpose
labels Update labels
config.worker_config.num_instancesResize primary worker group
config.secondary_worker_config.num_instances Resize secondary worker group
config.autoscaling_config.policy_uriUse, stop using, or -// change autoscaling policies
+// specify the new value, as follows: { "config":{ "workerConfig":{ +// "numInstances":"5" } } } Similarly, to change the number of +// preemptible workers in a cluster to 5, the update_mask parameter +// would be config.secondary_worker_config.num_instances, and the PATCH +// request body would be set as follows: { "config":{ +// "secondaryWorkerConfig":{ "numInstances":"5" } } } *Note:* Currently, +// only the following fields can be updated: *Mask* *Purpose* *labels* +// Update labels *config.worker_config.num_instances* Resize primary +// worker group *config.secondary_worker_config.num_instances* Resize +// secondary worker group config.autoscaling_config.policy_uri Use, stop +// using, or change autoscaling policies func (c *ProjectsRegionsClustersPatchCall) UpdateMask(updateMask string) *ProjectsRegionsClustersPatchCall { c.urlParams_.Set("updateMask", updateMask) return c @@ -9049,7 +9057,7 @@ func (c *ProjectsRegionsClustersPatchCall) Header() http.Header { func (c *ProjectsRegionsClustersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9155,7 +9163,7 @@ func (c *ProjectsRegionsClustersPatchCall) Do(opts ...googleapi.CallOption) (*Op // "type": "string" // }, // "updateMask": { - // "description": "Required. Specifies the path, relative to Cluster, of the field to update. For example, to change the number of workers in a cluster to 5, the update_mask parameter would be specified as config.worker_config.num_instances, and the PATCH request body would specify the new value, as follows:\n{\n \"config\":{\n \"workerConfig\":{\n \"numInstances\":\"5\"\n }\n }\n}\nSimilarly, to change the number of preemptible workers in a cluster to 5, the update_mask parameter would be config.secondary_worker_config.num_instances, and the PATCH request body would be set as follows:\n{\n \"config\":{\n \"secondaryWorkerConfig\":{\n \"numInstances\":\"5\"\n }\n }\n}\n\u003cstrong\u003eNote:\u003c/strong\u003e Currently, only the following fields can be updated:\u003ctable\u003e \u003ctbody\u003e \u003ctr\u003e \u003ctd\u003e\u003cstrong\u003eMask\u003c/strong\u003e\u003c/td\u003e \u003ctd\u003e\u003cstrong\u003ePurpose\u003c/strong\u003e\u003c/td\u003e \u003c/tr\u003e \u003ctr\u003e \u003ctd\u003e\u003cstrong\u003e\u003cem\u003elabels\u003c/em\u003e\u003c/strong\u003e\u003c/td\u003e \u003ctd\u003eUpdate labels\u003c/td\u003e \u003c/tr\u003e \u003ctr\u003e \u003ctd\u003e\u003cstrong\u003e\u003cem\u003econfig.worker_config.num_instances\u003c/em\u003e\u003c/strong\u003e\u003c/td\u003e \u003ctd\u003eResize primary worker group\u003c/td\u003e \u003c/tr\u003e \u003ctr\u003e \u003ctd\u003e\u003cstrong\u003e\u003cem\u003econfig.secondary_worker_config.num_instances\u003c/em\u003e\u003c/strong\u003e\u003c/td\u003e \u003ctd\u003eResize secondary worker group\u003c/td\u003e \u003c/tr\u003e \u003ctr\u003e \u003ctd\u003econfig.autoscaling_config.policy_uri\u003c/td\u003e\u003ctd\u003eUse, stop using, or change autoscaling policies\u003c/td\u003e \u003c/tr\u003e \u003c/tbody\u003e \u003c/table\u003e", + // "description": "Required. Specifies the path, relative to Cluster, of the field to update. For example, to change the number of workers in a cluster to 5, the update_mask parameter would be specified as config.worker_config.num_instances, and the PATCH request body would specify the new value, as follows: { \"config\":{ \"workerConfig\":{ \"numInstances\":\"5\" } } } Similarly, to change the number of preemptible workers in a cluster to 5, the update_mask parameter would be config.secondary_worker_config.num_instances, and the PATCH request body would be set as follows: { \"config\":{ \"secondaryWorkerConfig\":{ \"numInstances\":\"5\" } } } *Note:* Currently, only the following fields can be updated: *Mask* *Purpose* *labels* Update labels *config.worker_config.num_instances* Resize primary worker group *config.secondary_worker_config.num_instances* Resize secondary worker group config.autoscaling_config.policy_uri Use, stop using, or change autoscaling policies ", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -9187,8 +9195,8 @@ type ProjectsRegionsClustersSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy.Can return Public Errors: -// NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED +// resource. Replaces any existing policy.Can return NOT_FOUND, +// INVALID_ARGUMENT, and PERMISSION_DENIED errors. func (r *ProjectsRegionsClustersService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsRegionsClustersSetIamPolicyCall { c := &ProjectsRegionsClustersSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -9223,7 +9231,7 @@ func (c *ProjectsRegionsClustersSetIamPolicyCall) Header() http.Header { func (c *ProjectsRegionsClustersSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9287,7 +9295,7 @@ func (c *ProjectsRegionsClustersSetIamPolicyCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", // "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}:setIamPolicy", // "httpMethod": "POST", // "id": "dataproc.projects.regions.clusters.setIamPolicy", @@ -9368,7 +9376,7 @@ func (c *ProjectsRegionsClustersTestIamPermissionsCall) Header() http.Header { func (c *ProjectsRegionsClustersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9517,7 +9525,7 @@ func (c *ProjectsRegionsJobsCancelCall) Header() http.Header { func (c *ProjectsRegionsJobsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9675,7 +9683,7 @@ func (c *ProjectsRegionsJobsDeleteCall) Header() http.Header { func (c *ProjectsRegionsJobsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9835,7 +9843,7 @@ func (c *ProjectsRegionsJobsGetCall) Header() http.Header { func (c *ProjectsRegionsJobsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9987,7 +9995,7 @@ func (c *ProjectsRegionsJobsGetIamPolicyCall) Header() http.Header { func (c *ProjectsRegionsJobsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10127,9 +10135,11 @@ func (c *ProjectsRegionsJobsListCall) Filter(filter string) *ProjectsRegionsJobs // jobs).If filter is provided, jobStateMatcher will be ignored. // // Possible values: -// "ALL" -// "ACTIVE" -// "NON_ACTIVE" +// "ALL" - Match all jobs, regardless of state. +// "ACTIVE" - Only match jobs in non-terminal states: PENDING, +// RUNNING, or CANCEL_PENDING. +// "NON_ACTIVE" - Only match jobs in terminal states: CANCELLED, DONE, +// or ERROR. func (c *ProjectsRegionsJobsListCall) JobStateMatcher(jobStateMatcher string) *ProjectsRegionsJobsListCall { c.urlParams_.Set("jobStateMatcher", jobStateMatcher) return c @@ -10186,7 +10196,7 @@ func (c *ProjectsRegionsJobsListCall) Header() http.Header { func (c *ProjectsRegionsJobsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10275,6 +10285,11 @@ func (c *ProjectsRegionsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJob // "ACTIVE", // "NON_ACTIVE" // ], + // "enumDescriptions": [ + // "Match all jobs, regardless of state.", + // "Only match jobs in non-terminal states: PENDING, RUNNING, or CANCEL_PENDING.", + // "Only match jobs in terminal states: CANCELLED, DONE, or ERROR." + // ], // "location": "query", // "type": "string" // }, @@ -10358,12 +10373,11 @@ func (r *ProjectsRegionsJobsService) Patch(projectId string, region string, jobI } // UpdateMask sets the optional parameter "updateMask": Required. -// Specifies the path, relative to Job, of the field to -// update. For example, to update the labels of a Job the -// update_mask parameter would be specified as -// labels, and the PATCH request body would specify the new -// value. Note: Currently, labels is the -// only field that can be updated. +// Specifies the path, relative to Job, of the field to update. For +// example, to update the labels of a Job the update_mask parameter +// would be specified as labels, and the PATCH request body would +// specify the new value. *Note:* Currently, labels is the only field +// that can be updated. func (c *ProjectsRegionsJobsPatchCall) UpdateMask(updateMask string) *ProjectsRegionsJobsPatchCall { c.urlParams_.Set("updateMask", updateMask) return c @@ -10396,7 +10410,7 @@ func (c *ProjectsRegionsJobsPatchCall) Header() http.Header { func (c *ProjectsRegionsJobsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10491,7 +10505,7 @@ func (c *ProjectsRegionsJobsPatchCall) Do(opts ...googleapi.CallOption) (*Job, e // "type": "string" // }, // "updateMask": { - // "description": "Required. Specifies the path, relative to \u003ccode\u003eJob\u003c/code\u003e, of the field to update. For example, to update the labels of a Job the \u003ccode\u003eupdate_mask\u003c/code\u003e parameter would be specified as \u003ccode\u003elabels\u003c/code\u003e, and the PATCH request body would specify the new value. \u003cstrong\u003eNote:\u003c/strong\u003e Currently, \u003ccode\u003elabels\u003c/code\u003e is the only field that can be updated.", + // "description": "Required. Specifies the path, relative to Job, of the field to update. For example, to update the labels of a Job the update_mask parameter would be specified as labels, and the PATCH request body would specify the new value. *Note:* Currently, labels is the only field that can be updated.", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -10523,8 +10537,8 @@ type ProjectsRegionsJobsSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy.Can return Public Errors: -// NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED +// resource. Replaces any existing policy.Can return NOT_FOUND, +// INVALID_ARGUMENT, and PERMISSION_DENIED errors. func (r *ProjectsRegionsJobsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsRegionsJobsSetIamPolicyCall { c := &ProjectsRegionsJobsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -10559,7 +10573,7 @@ func (c *ProjectsRegionsJobsSetIamPolicyCall) Header() http.Header { func (c *ProjectsRegionsJobsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10623,7 +10637,7 @@ func (c *ProjectsRegionsJobsSetIamPolicyCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", // "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/jobs/{jobsId}:setIamPolicy", // "httpMethod": "POST", // "id": "dataproc.projects.regions.jobs.setIamPolicy", @@ -10701,7 +10715,7 @@ func (c *ProjectsRegionsJobsSubmitCall) Header() http.Header { func (c *ProjectsRegionsJobsSubmitCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10850,7 +10864,7 @@ func (c *ProjectsRegionsJobsSubmitAsOperationCall) Header() http.Header { func (c *ProjectsRegionsJobsSubmitAsOperationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11002,7 +11016,7 @@ func (c *ProjectsRegionsJobsTestIamPermissionsCall) Header() http.Header { func (c *ProjectsRegionsJobsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11149,7 +11163,7 @@ func (c *ProjectsRegionsOperationsCancelCall) Header() http.Header { func (c *ProjectsRegionsOperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11282,7 +11296,7 @@ func (c *ProjectsRegionsOperationsDeleteCall) Header() http.Header { func (c *ProjectsRegionsOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11425,7 +11439,7 @@ func (c *ProjectsRegionsOperationsGetCall) Header() http.Header { func (c *ProjectsRegionsOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11562,7 +11576,7 @@ func (c *ProjectsRegionsOperationsGetIamPolicyCall) Header() http.Header { func (c *ProjectsRegionsOperationsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11741,7 +11755,7 @@ func (c *ProjectsRegionsOperationsListCall) Header() http.Header { func (c *ProjectsRegionsOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11879,8 +11893,8 @@ type ProjectsRegionsOperationsSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy.Can return Public Errors: -// NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED +// resource. Replaces any existing policy.Can return NOT_FOUND, +// INVALID_ARGUMENT, and PERMISSION_DENIED errors. func (r *ProjectsRegionsOperationsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsRegionsOperationsSetIamPolicyCall { c := &ProjectsRegionsOperationsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -11915,7 +11929,7 @@ func (c *ProjectsRegionsOperationsSetIamPolicyCall) Header() http.Header { func (c *ProjectsRegionsOperationsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11979,7 +11993,7 @@ func (c *ProjectsRegionsOperationsSetIamPolicyCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", // "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:setIamPolicy", // "httpMethod": "POST", // "id": "dataproc.projects.regions.operations.setIamPolicy", @@ -12060,7 +12074,7 @@ func (c *ProjectsRegionsOperationsTestIamPermissionsCall) Header() http.Header { func (c *ProjectsRegionsOperationsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12200,7 +12214,7 @@ func (c *ProjectsRegionsWorkflowTemplatesCreateCall) Header() http.Header { func (c *ProjectsRegionsWorkflowTemplatesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12273,7 +12287,7 @@ func (c *ProjectsRegionsWorkflowTemplatesCreateCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "parent": { - // "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates,create, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.workflowTemplates.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + // "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,create, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", // "location": "path", // "pattern": "^projects/[^/]+/regions/[^/]+$", // "required": true, @@ -12347,7 +12361,7 @@ func (c *ProjectsRegionsWorkflowTemplatesDeleteCall) Header() http.Header { func (c *ProjectsRegionsWorkflowTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12415,7 +12429,7 @@ func (c *ProjectsRegionsWorkflowTemplatesDeleteCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates.delete, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + // "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.delete, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", // "location": "path", // "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", // "required": true, @@ -12503,7 +12517,7 @@ func (c *ProjectsRegionsWorkflowTemplatesGetCall) Header() http.Header { func (c *ProjectsRegionsWorkflowTemplatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12574,7 +12588,7 @@ func (c *ProjectsRegionsWorkflowTemplatesGetCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + // "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", // "location": "path", // "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", // "required": true, @@ -12646,7 +12660,7 @@ func (c *ProjectsRegionsWorkflowTemplatesGetIamPolicyCall) Header() http.Header func (c *ProjectsRegionsWorkflowTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12797,7 +12811,7 @@ func (c *ProjectsRegionsWorkflowTemplatesInstantiateCall) Header() http.Header { func (c *ProjectsRegionsWorkflowTemplatesInstantiateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12870,7 +12884,7 @@ func (c *ProjectsRegionsWorkflowTemplatesInstantiateCall) Do(opts ...googleapi.C // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + // "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", // "location": "path", // "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", // "required": true, @@ -12962,7 +12976,7 @@ func (c *ProjectsRegionsWorkflowTemplatesInstantiateInlineCall) Header() http.He func (c *ProjectsRegionsWorkflowTemplatesInstantiateInlineCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13035,7 +13049,7 @@ func (c *ProjectsRegionsWorkflowTemplatesInstantiateInlineCall) Do(opts ...googl // ], // "parameters": { // "parent": { - // "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates,instantiateinline, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.workflowTemplates.instantiateinline, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + // "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,instantiateinline, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.instantiateinline, the resource name of the location has the following format: projects/{project_id}/locations/{location}", // "location": "path", // "pattern": "^projects/[^/]+/regions/[^/]+$", // "required": true, @@ -13130,7 +13144,7 @@ func (c *ProjectsRegionsWorkflowTemplatesListCall) Header() http.Header { func (c *ProjectsRegionsWorkflowTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13212,7 +13226,7 @@ func (c *ProjectsRegionsWorkflowTemplatesListCall) Do(opts ...googleapi.CallOpti // "type": "string" // }, // "parent": { - // "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates,list, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.workflowTemplates.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + // "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", // "location": "path", // "pattern": "^projects/[^/]+/regions/[^/]+$", // "required": true, @@ -13263,8 +13277,8 @@ type ProjectsRegionsWorkflowTemplatesSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy.Can return Public Errors: -// NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED +// resource. Replaces any existing policy.Can return NOT_FOUND, +// INVALID_ARGUMENT, and PERMISSION_DENIED errors. func (r *ProjectsRegionsWorkflowTemplatesService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsRegionsWorkflowTemplatesSetIamPolicyCall { c := &ProjectsRegionsWorkflowTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -13299,7 +13313,7 @@ func (c *ProjectsRegionsWorkflowTemplatesSetIamPolicyCall) Header() http.Header func (c *ProjectsRegionsWorkflowTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13363,7 +13377,7 @@ func (c *ProjectsRegionsWorkflowTemplatesSetIamPolicyCall) Do(opts ...googleapi. } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", // "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}:setIamPolicy", // "httpMethod": "POST", // "id": "dataproc.projects.regions.workflowTemplates.setIamPolicy", @@ -13444,7 +13458,7 @@ func (c *ProjectsRegionsWorkflowTemplatesTestIamPermissionsCall) Header() http.H func (c *ProjectsRegionsWorkflowTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13585,7 +13599,7 @@ func (c *ProjectsRegionsWorkflowTemplatesUpdateCall) Header() http.Header { func (c *ProjectsRegionsWorkflowTemplatesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13658,7 +13672,7 @@ func (c *ProjectsRegionsWorkflowTemplatesUpdateCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "name": { - // "description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + // "description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", // "location": "path", // "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", // "required": true, diff --git a/vendor/google.golang.org/api/dataproc/v1beta2/dataproc-api.json b/vendor/google.golang.org/api/dataproc/v1beta2/dataproc-api.json index b7da15f1bb3..2e6f3153322 100644 --- a/vendor/google.golang.org/api/dataproc/v1beta2/dataproc-api.json +++ b/vendor/google.golang.org/api/dataproc/v1beta2/dataproc-api.json @@ -121,7 +121,7 @@ ], "parameters": { "parent": { - "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.create, the resource name has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.autoscalingPolicies.create, the resource name has the following format: projects/{project_id}/locations/{location}", + "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.create, the resource name has the following format: projects/{project_id}/regions/{region} For projects.locations.autoscalingPolicies.create, the resource name has the following format: projects/{project_id}/locations/{location}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -149,7 +149,7 @@ ], "parameters": { "name": { - "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/autoscalingPolicies/[^/]+$", "required": true, @@ -174,7 +174,7 @@ ], "parameters": { "name": { - "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/autoscalingPolicies/[^/]+$", "required": true, @@ -199,7 +199,7 @@ ], "parameters": { "options.requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.", + "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "location": "query", "type": "integer" @@ -241,7 +241,7 @@ "type": "string" }, "parent": { - "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.list, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.autoscalingPolicies.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.autoscalingPolicies.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -257,7 +257,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", "flatPath": "v1beta2/projects/{projectsId}/locations/{locationsId}/autoscalingPolicies/{autoscalingPoliciesId}:setIamPolicy", "httpMethod": "POST", "id": "dataproc.projects.locations.autoscalingPolicies.setIamPolicy", @@ -322,7 +322,7 @@ ], "parameters": { "name": { - "description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + "description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/autoscalingPolicies/[^/]+$", "required": true, @@ -354,7 +354,7 @@ ], "parameters": { "parent": { - "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates,create, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.workflowTemplates.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,create, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -382,7 +382,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates.delete, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.delete, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", "required": true, @@ -413,7 +413,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", "required": true, @@ -444,7 +444,7 @@ ], "parameters": { "options.requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.", + "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "location": "query", "type": "integer" @@ -475,7 +475,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", "required": true, @@ -508,7 +508,7 @@ "type": "string" }, "parent": { - "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates,instantiateinline, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.workflowTemplates.instantiateinline, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,instantiateinline, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.instantiateinline, the resource name of the location has the following format: projects/{project_id}/locations/{location}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -552,7 +552,7 @@ "type": "string" }, "parent": { - "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates,list, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.workflowTemplates.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -568,7 +568,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", "flatPath": "v1beta2/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}:setIamPolicy", "httpMethod": "POST", "id": "dataproc.projects.locations.workflowTemplates.setIamPolicy", @@ -633,7 +633,7 @@ ], "parameters": { "name": { - "description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", "required": true, @@ -669,7 +669,7 @@ ], "parameters": { "parent": { - "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.create, the resource name has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.autoscalingPolicies.create, the resource name has the following format: projects/{project_id}/locations/{location}", + "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.create, the resource name has the following format: projects/{project_id}/regions/{region} For projects.locations.autoscalingPolicies.create, the resource name has the following format: projects/{project_id}/locations/{location}", "location": "path", "pattern": "^projects/[^/]+/regions/[^/]+$", "required": true, @@ -697,7 +697,7 @@ ], "parameters": { "name": { - "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", "location": "path", "pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", "required": true, @@ -722,7 +722,7 @@ ], "parameters": { "name": { - "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", "location": "path", "pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", "required": true, @@ -747,7 +747,7 @@ ], "parameters": { "options.requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.", + "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "location": "query", "type": "integer" @@ -789,7 +789,7 @@ "type": "string" }, "parent": { - "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.list, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.autoscalingPolicies.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.autoscalingPolicies.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", "location": "path", "pattern": "^projects/[^/]+/regions/[^/]+$", "required": true, @@ -805,7 +805,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", "flatPath": "v1beta2/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}:setIamPolicy", "httpMethod": "POST", "id": "dataproc.projects.regions.autoscalingPolicies.setIamPolicy", @@ -870,7 +870,7 @@ ], "parameters": { "name": { - "description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + "description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", "location": "path", "pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", "required": true, @@ -1068,7 +1068,7 @@ ], "parameters": { "options.requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.", + "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "location": "query", "type": "integer" @@ -1177,7 +1177,7 @@ "type": "string" }, "updateMask": { - "description": "Required. Specifies the path, relative to Cluster, of the field to update. For example, to change the number of workers in a cluster to 5, the update_mask parameter would be specified as config.worker_config.num_instances, and the PATCH request body would specify the new value, as follows:\n{\n \"config\":{\n \"workerConfig\":{\n \"numInstances\":\"5\"\n }\n }\n}\nSimilarly, to change the number of preemptible workers in a cluster to 5, the update_mask parameter would be config.secondary_worker_config.num_instances, and the PATCH request body would be set as follows:\n{\n \"config\":{\n \"secondaryWorkerConfig\":{\n \"numInstances\":\"5\"\n }\n }\n}\n\u003cstrong\u003eNote:\u003c/strong\u003e currently only the following fields can be updated:\n\u003ctable\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cstrong\u003eMask\u003c/strong\u003e\u003c/td\u003e\u003ctd\u003e\u003cstrong\u003ePurpose\u003c/strong\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003elabels\u003c/td\u003e\u003ctd\u003eUpdates labels\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003econfig.worker_config.num_instances\u003c/td\u003e\u003ctd\u003eResize primary worker\ngroup\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003econfig.secondary_worker_config.num_instances\u003c/td\u003e\u003ctd\u003eResize secondary\nworker group\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003econfig.lifecycle_config.auto_delete_ttl\u003c/td\u003e\u003ctd\u003eReset MAX TTL\nduration\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003econfig.lifecycle_config.auto_delete_time\u003c/td\u003e\u003ctd\u003eUpdate MAX TTL\ndeletion timestamp\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003econfig.lifecycle_config.idle_delete_ttl\u003c/td\u003e\u003ctd\u003eUpdate Idle TTL\nduration\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003econfig.autoscaling_config.policy_uri\u003c/td\u003e\u003ctd\u003eUse, stop using, or change\nautoscaling policies\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/table\u003e", + "description": "Required. Specifies the path, relative to Cluster, of the field to update. For example, to change the number of workers in a cluster to 5, the update_mask parameter would be specified as config.worker_config.num_instances, and the PATCH request body would specify the new value, as follows: { \"config\":{ \"workerConfig\":{ \"numInstances\":\"5\" } } } Similarly, to change the number of preemptible workers in a cluster to 5, the update_mask parameter would be config.secondary_worker_config.num_instances, and the PATCH request body would be set as follows: { \"config\":{ \"secondaryWorkerConfig\":{ \"numInstances\":\"5\" } } } *Note:* currently only the following fields can be updated: *Mask* *Purpose* labels Updates labels config.worker_config.num_instances Resize primary worker group config.secondary_worker_config.num_instances Resize secondary worker group config.lifecycle_config.auto_delete_ttl Reset MAX TTL duration config.lifecycle_config.auto_delete_time Update MAX TTL deletion timestamp config.lifecycle_config.idle_delete_ttl Update Idle TTL duration config.autoscaling_config.policy_uri Use, stop using, or change autoscaling policies ", "format": "google-fieldmask", "location": "query", "type": "string" @@ -1195,7 +1195,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", "flatPath": "v1beta2/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}:setIamPolicy", "httpMethod": "POST", "id": "dataproc.projects.regions.clusters.setIamPolicy", @@ -1463,7 +1463,7 @@ ], "parameters": { "options.requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.", + "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "location": "query", "type": "integer" @@ -1511,6 +1511,11 @@ "ACTIVE", "NON_ACTIVE" ], + "enumDescriptions": [ + "Match all jobs, regardless of state.", + "Only match jobs in non-terminal states: PENDING, RUNNING, or CANCEL_PENDING.", + "Only match jobs in terminal states: CANCELLED, DONE, or ERROR." + ], "location": "query", "type": "string" }, @@ -1576,7 +1581,7 @@ "type": "string" }, "updateMask": { - "description": "Required. Specifies the path, relative to \u003ccode\u003eJob\u003c/code\u003e, of the field to update. For example, to update the labels of a Job the \u003ccode\u003eupdate_mask\u003c/code\u003e parameter would be specified as \u003ccode\u003elabels\u003c/code\u003e, and the PATCH request body would specify the new value. \u003cstrong\u003eNote:\u003c/strong\u003e Currently, \u003ccode\u003elabels\u003c/code\u003e is the only field that can be updated.", + "description": "Required. Specifies the path, relative to Job, of the field to update. For example, to update the labels of a Job the update_mask parameter would be specified as labels, and the PATCH request body would specify the new value. *Note:* Currently, labels is the only field that can be updated.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -1594,7 +1599,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", "flatPath": "v1beta2/projects/{projectsId}/regions/{regionsId}/jobs/{jobsId}:setIamPolicy", "httpMethod": "POST", "id": "dataproc.projects.regions.jobs.setIamPolicy", @@ -1806,7 +1811,7 @@ ], "parameters": { "options.requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.", + "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "location": "query", "type": "integer" @@ -1869,7 +1874,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", "flatPath": "v1beta2/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:setIamPolicy", "httpMethod": "POST", "id": "dataproc.projects.regions.operations.setIamPolicy", @@ -1938,7 +1943,7 @@ ], "parameters": { "parent": { - "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates,create, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.workflowTemplates.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,create, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", "location": "path", "pattern": "^projects/[^/]+/regions/[^/]+$", "required": true, @@ -1966,7 +1971,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates.delete, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.delete, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", "location": "path", "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", "required": true, @@ -1997,7 +2002,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", "location": "path", "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", "required": true, @@ -2028,7 +2033,7 @@ ], "parameters": { "options.requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.", + "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "location": "query", "type": "integer" @@ -2059,7 +2064,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", "location": "path", "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", "required": true, @@ -2092,7 +2097,7 @@ "type": "string" }, "parent": { - "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates,instantiateinline, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.workflowTemplates.instantiateinline, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,instantiateinline, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.instantiateinline, the resource name of the location has the following format: projects/{project_id}/locations/{location}", "location": "path", "pattern": "^projects/[^/]+/regions/[^/]+$", "required": true, @@ -2136,7 +2141,7 @@ "type": "string" }, "parent": { - "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates,list, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.workflowTemplates.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", "location": "path", "pattern": "^projects/[^/]+/regions/[^/]+$", "required": true, @@ -2152,7 +2157,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", "flatPath": "v1beta2/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}:setIamPolicy", "httpMethod": "POST", "id": "dataproc.projects.regions.workflowTemplates.setIamPolicy", @@ -2217,7 +2222,7 @@ ], "parameters": { "name": { - "description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", "location": "path", "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", "required": true, @@ -2242,7 +2247,7 @@ } } }, - "revision": "20200409", + "revision": "20200925", "rootUrl": "https://dataproc.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -2266,7 +2271,7 @@ "id": "AutoscalingConfig", "properties": { "policyUri": { - "description": "Optional. The autoscaling policy used by the cluster.Only resource names including projectid and location (region) are valid. Examples:\nhttps://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]\nprojects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Dataproc region.", + "description": "Optional. The autoscaling policy used by the cluster.Only resource names including projectid and location (region) are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Dataproc region.", "type": "string" } }, @@ -2284,7 +2289,8 @@ "type": "string" }, "name": { - "description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + "description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + "readOnly": true, "type": "string" }, "secondaryWorkerConfig": { @@ -2324,7 +2330,7 @@ "type": "string" }, "scaleDownFactor": { - "description": "Required. Fraction of average pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job.Bounds: 0.0, 1.0.", + "description": "Required. Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See How autoscaling works for more information.Bounds: 0.0, 1.0.", "format": "double", "type": "number" }, @@ -2334,7 +2340,7 @@ "type": "number" }, "scaleUpFactor": { - "description": "Required. Fraction of average pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.", + "description": "Required. Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See How autoscaling works for more information.Bounds: 0.0, 1.0.", "format": "double", "type": "number" }, @@ -2350,12 +2356,16 @@ "description": "Associates members with a role.", "id": "Binding", "properties": { + "bindingId": { + "description": "A client-specified ID for this binding. Expected to be globally unique to support the internal bindings-by-ID API.", + "type": "string" + }, "condition": { "$ref": "Expr", - "description": "The condition that is associated with this binding. NOTE: An unsatisfied condition will not allow user access via current binding. Different bindings, including their conditions, are examined independently." + "description": "The condition that is associated with this binding.If the condition evaluates to true, then this binding applies to the current request.If the condition evaluates to false, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the members in this binding.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies)." }, "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource. members can have the following values:\nallUsers: A special identifier that represents anyone who is on the internet; with or without a Google account.\nallAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account.\nuser:{emailid}: An email address that represents a specific Google account. For example, alice@example.com .\nserviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com.\ngroup:{emailid}: An email address that represents a Google group. For example, admins@example.com.\ndeleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding.\ndeleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding.\ndeleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding.\ndomain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com.", + "description": "Specifies the identities requesting access for a Cloud Platform resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com.", "items": { "type": "string" }, @@ -2384,6 +2394,7 @@ }, "clusterUuid": { "description": "Output only. A cluster UUID (Unique Universal Identifier). Dataproc generates this value when it creates the cluster.", + "readOnly": true, "type": "string" }, "config": { @@ -2399,7 +2410,8 @@ }, "metrics": { "$ref": "ClusterMetrics", - "description": "Output only. Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release." + "description": "Output only. Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release.", + "readOnly": true }, "projectId": { "description": "Required. The Google Cloud Platform project ID that the cluster belongs to.", @@ -2407,13 +2419,15 @@ }, "status": { "$ref": "ClusterStatus", - "description": "Output only. Cluster status." + "description": "Output only. Cluster status.", + "readOnly": true }, "statusHistory": { "description": "Output only. The previous cluster status.", "items": { "$ref": "ClusterStatus" }, + "readOnly": true, "type": "array" } }, @@ -2448,7 +2462,7 @@ "description": "Optional. The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as gce_cluster_config, master_config, worker_config, secondary_worker_config, and autoscaling_config." }, "initializationActions": { - "description": "Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's \u003ccode\u003erole\u003c/code\u003e metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget):\nROLE=$(curl -H Metadata-Flavor:Google\nhttp://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)\nif [[ \"${ROLE}\" == 'Master' ]]; then\n ... master specific actions ...\nelse\n ... worker specific actions ...\nfi\n", + "description": "Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) if [[ \"${ROLE}\" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi ", "items": { "$ref": "NodeInitializationAction" }, @@ -2462,6 +2476,10 @@ "$ref": "InstanceGroupConfig", "description": "Optional. The Compute Engine config settings for the master instance in a cluster." }, + "metastoreConfig": { + "$ref": "MetastoreConfig", + "description": "Optional. Metastore configuration." + }, "secondaryWorkerConfig": { "$ref": "InstanceGroupConfig", "description": "Optional. The Compute Engine config settings for additional worker instances in a cluster." @@ -2474,6 +2492,10 @@ "$ref": "SoftwareConfig", "description": "Optional. The config settings for software inside the cluster." }, + "tempBucket": { + "description": "Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket.", + "type": "string" + }, "workerConfig": { "$ref": "InstanceGroupConfig", "description": "Optional. The Compute Engine config settings for worker instances in a cluster." @@ -2510,14 +2532,17 @@ "properties": { "done": { "description": "Output only. Indicates the operation is done.", + "readOnly": true, "type": "boolean" }, "error": { "description": "Output only. Error, if operation failed.", + "readOnly": true, "type": "string" }, "operationId": { "description": "Output only. The id of the cluster operation.", + "readOnly": true, "type": "string" } }, @@ -2631,6 +2656,7 @@ "properties": { "detail": { "description": "Output only. Optional details of cluster's state.", + "readOnly": true, "type": "string" }, "state": { @@ -2657,11 +2683,13 @@ "The cluster is currently stopped. It is not ready for use.", "The cluster is being started. It is not ready for use." ], + "readOnly": true, "type": "string" }, "stateStartTime": { "description": "Output only. Time when this state was entered (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).", "format": "google-datetime", + "readOnly": true, "type": "string" }, "substate": { @@ -2676,6 +2704,7 @@ "The cluster is known to be in an unhealthy state (for example, critical daemons are not running or HDFS capacity is exhausted).Applies to RUNNING state.", "The agent-reported status is out of date (may occur if Dataproc loses communication with Agent).Applies to RUNNING state." ], + "readOnly": true, "type": "string" } }, @@ -2693,6 +2722,7 @@ "properties": { "outputUri": { "description": "Output only. The Cloud Storage URI of the diagnostic output. The output report is a plain text file with a summary of collected diagnostics.", + "readOnly": true, "type": "string" } }, @@ -2720,7 +2750,7 @@ "type": "object" }, "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance:\nservice Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n}\nThe JSON representation for Empty is empty JSON object {}.", + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}.", "id": "Empty", "properties": {}, "type": "object" @@ -2749,13 +2779,14 @@ "type": "string" }, "description": "Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.", + "readOnly": true, "type": "object" } }, "type": "object" }, "Expr": { - "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec.Example (Comparison):\ntitle: \"Summary size limit\"\ndescription: \"Determines if a summary is less than 100 chars\"\nexpression: \"document.summary.size() \u003c 100\"\nExample (Equality):\ntitle: \"Requestor is owner\"\ndescription: \"Determines if requestor is the document owner\"\nexpression: \"document.owner == request.auth.claims.email\"\nExample (Logic):\ntitle: \"Public documents\"\ndescription: \"Determine whether the document should be publicly visible\"\nexpression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\"\nExample (Data Manipulation):\ntitle: \"Notification string\"\ndescription: \"Create a notification string with a timestamp.\"\nexpression: \"'New message received at ' + string(document.create_time)\"\nThe exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", + "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec.Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() \u003c 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", "id": "Expr", "properties": { "description": { @@ -2793,7 +2824,27 @@ "type": "object" }, "networkUri": { - "description": "Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples:\nhttps://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default\nprojects/[project_id]/regions/global/default\ndefault", + "description": "Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default projects/[project_id]/regions/global/default default", + "type": "string" + }, + "nodeGroupAffinity": { + "$ref": "NodeGroupAffinity", + "description": "Optional. Node Group Affinity for sole-tenant clusters." + }, + "privateIpv6GoogleAccess": { + "description": "Optional. The type of IPv6 access for a cluster.", + "enum": [ + "PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED", + "INHERIT_FROM_SUBNETWORK", + "OUTBOUND", + "BIDIRECTIONAL" + ], + "enumDescriptions": [ + "If unspecified, Compute Engine default behavior will apply, which is the same as INHERIT_FROM_SUBNETWORK.", + "Private access to and from Google Services configuration inherited from the subnetwork configuration. This is the default Compute Engine behavior.", + "Enables outbound private IPv6 access to Google Services from the Dataproc cluster.", + "Enables bidirectional private IPv6 access between Google Services and the Dataproc cluster." + ], "type": "string" }, "reservationAffinity": { @@ -2801,18 +2852,18 @@ "description": "Optional. Reservation Affinity for consuming Zonal reservation." }, "serviceAccount": { - "description": "Optional. The Dataproc service account (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_cloud_dataproc) (also see VM Data Plane identity (https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services.If not specified, the Compute Engine default service account (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.", + "description": "Optional. The Dataproc service account (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see VM Data Plane identity (https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services.If not specified, the Compute Engine default service account (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.", "type": "string" }, "serviceAccountScopes": { - "description": "Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included:\nhttps://www.googleapis.com/auth/cloud.useraccounts.readonly\nhttps://www.googleapis.com/auth/devstorage.read_write\nhttps://www.googleapis.com/auth/logging.writeIf no scopes are specified, the following defaults are also provided:\nhttps://www.googleapis.com/auth/bigquery\nhttps://www.googleapis.com/auth/bigtable.admin.table\nhttps://www.googleapis.com/auth/bigtable.data\nhttps://www.googleapis.com/auth/devstorage.full_control", + "description": "Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: https://www.googleapis.com/auth/cloud.useraccounts.readonly https://www.googleapis.com/auth/devstorage.read_write https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the following defaults are also provided: https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/bigtable.admin.table https://www.googleapis.com/auth/bigtable.data https://www.googleapis.com/auth/devstorage.full_control", "items": { "type": "string" }, "type": "array" }, "subnetworkUri": { - "description": "Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri.A full URL, partial URI, or short name are valid. Examples:\nhttps://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0\nprojects/[project_id]/regions/us-east1/subnetworks/sub0\nsub0", + "description": "Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0 projects/[project_id]/regions/us-east1/subnetworks/sub0 sub0", "type": "string" }, "tags": { @@ -2823,7 +2874,7 @@ "type": "array" }, "zoneUri": { - "description": "Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the \"global\" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present.A full URL, partial URI, or short name are valid. Examples:\nhttps://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]\nprojects/[project_id]/zones/[zone]\nus-central1-f", + "description": "Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the \"global\" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] projects/[project_id]/zones/[zone] us-central1-f", "type": "string" } }, @@ -2835,7 +2886,7 @@ "properties": { "options": { "$ref": "GetPolicyOptions", - "description": "OPTIONAL: A GetPolicyOptions object for specifying options to GetIamPolicy. This field is only used by Cloud IAM." + "description": "OPTIONAL: A GetPolicyOptions object for specifying options to GetIamPolicy." } }, "type": "object" @@ -2845,7 +2896,7 @@ "id": "GetPolicyOptions", "properties": { "requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.", + "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } @@ -2904,7 +2955,7 @@ "type": "string" }, "mainJarFileUri": { - "description": "The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'", + "description": "The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'", "type": "string" }, "properties": { @@ -2995,7 +3046,7 @@ "description": "Optional. Disk option config settings." }, "imageUri": { - "description": "Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples:\nhttps://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]\nprojects/[project_id]/global/images/[image-id]\nimage-idImage family examples. Dataproc will use the most recent image from the family:\nhttps://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]\nprojects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.", + "description": "Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.", "type": "string" }, "instanceNames": { @@ -3003,22 +3054,33 @@ "items": { "type": "string" }, + "readOnly": true, + "type": "array" + }, + "instanceReferences": { + "description": "Output only. List of references to Compute Engine instances.", + "items": { + "$ref": "InstanceReference" + }, + "readOnly": true, "type": "array" }, "isPreemptible": { "description": "Output only. Specifies that this instance group contains preemptible instances.", + "readOnly": true, "type": "boolean" }, "machineTypeUri": { - "description": "Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples:\nhttps://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2\nprojects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2\nn1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2.", + "description": "Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2.", "type": "string" }, "managedGroupConfig": { "$ref": "ManagedGroupConfig", - "description": "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups." + "description": "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", + "readOnly": true }, "minCpuPlatform": { - "description": "Specifies the minimum cpu platform for the Instance Group. See Dataproc -\u0026gt; Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", + "description": "Specifies the minimum cpu platform for the Instance Group. See Dataproc -\u003e Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", "type": "string" }, "numInstances": { @@ -3043,6 +3105,21 @@ }, "type": "object" }, + "InstanceReference": { + "description": "A reference to a Compute Engine instance.", + "id": "InstanceReference", + "properties": { + "instanceId": { + "description": "The unique identifier of the Compute Engine instance.", + "type": "string" + }, + "instanceName": { + "description": "The user-friendly name of the Compute Engine instance.", + "type": "string" + } + }, + "type": "object" + }, "InstantiateWorkflowTemplateRequest": { "description": "A request to instantiate a workflow template.", "id": "InstantiateWorkflowTemplateRequest", @@ -3055,7 +3132,7 @@ "additionalProperties": { "type": "string" }, - "description": "Optional. Map from parameter names to values that should be used for those parameters. Values may not exceed 100 characters.", + "description": "Optional. Map from parameter names to values that should be used for those parameters. Values may not exceed 1000 characters.", "type": "object" }, "requestId": { @@ -3076,14 +3153,17 @@ "properties": { "done": { "description": "Output only. Indicates whether the job is completed. If the value is false, the job is still in progress. If true, the job is completed, and status.state field will indicate if it was successful, failed, or cancelled.", + "readOnly": true, "type": "boolean" }, "driverControlFilesUri": { "description": "Output only. If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.", + "readOnly": true, "type": "string" }, "driverOutputResourceUri": { "description": "Output only. A URI pointing to the location of the stdout of the job's driver program.", + "readOnly": true, "type": "string" }, "hadoopJob": { @@ -3096,6 +3176,7 @@ }, "jobUuid": { "description": "Output only. A UUID that uniquely identifies a job within the project over time. This is in contrast to a user-settable reference.job_id that may be reused over time.", + "readOnly": true, "type": "string" }, "labels": { @@ -3123,7 +3204,7 @@ }, "reference": { "$ref": "JobReference", - "description": "Optional. The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a \u003ccode\u003ejob_id\u003c/code\u003e." + "description": "Optional. The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id." }, "scheduling": { "$ref": "JobScheduling", @@ -3143,17 +3224,20 @@ }, "status": { "$ref": "JobStatus", - "description": "Output only. The job status. Additional application-specific status information may be contained in the \u003ccode\u003etype_job\u003c/code\u003e and \u003ccode\u003eyarn_applications\u003c/code\u003e fields." + "description": "Output only. The job status. Additional application-specific status information may be contained in the type_job and yarn_applications fields.", + "readOnly": true }, "statusHistory": { "description": "Output only. The previous job status.", "items": { "$ref": "JobStatus" }, + "readOnly": true, "type": "array" }, "submittedBy": { - "description": "Output only. The email address of the user submitting the job. For jobs submitted on the cluster, the address is \u003ccode\u003eusername@hostname\u003c/code\u003e.", + "description": "Output only. The email address of the user submitting the job. For jobs submitted on the cluster, the address is username@hostname.", + "readOnly": true, "type": "string" }, "yarnApplications": { @@ -3161,6 +3245,7 @@ "items": { "$ref": "YarnApplication" }, + "readOnly": true, "type": "array" } }, @@ -3172,20 +3257,24 @@ "properties": { "jobId": { "description": "Output only. The job id.", + "readOnly": true, "type": "string" }, "operationType": { "description": "Output only. Operation type.", + "readOnly": true, "type": "string" }, "startTime": { "description": "Output only. Job submission time.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "status": { "$ref": "JobStatus", - "description": "Output only. Most recent job status." + "description": "Output only. Most recent job status.", + "readOnly": true } }, "type": "object" @@ -3200,6 +3289,7 @@ }, "clusterUuid": { "description": "Output only. A cluster UUID generated by the Dataproc service when the job is submitted.", + "readOnly": true, "type": "string" } }, @@ -3214,7 +3304,7 @@ "type": "string" }, "projectId": { - "description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", + "description": "Optional. The ID of the Google Cloud Platform project that the job belongs to. If specified, must match the request project ID.", "type": "string" } }, @@ -3237,7 +3327,8 @@ "id": "JobStatus", "properties": { "details": { - "description": "Output only. Optional Job state details, such as an error description if the state is \u003ccode\u003eERROR\u003c/code\u003e.", + "description": "Output only. Optional Job state details, such as an error description if the state is ERROR.", + "readOnly": true, "type": "string" }, "state": { @@ -3266,11 +3357,13 @@ "The job has completed, but encountered an error.", "Job attempt has failed. The detail field contains failure details for this attempt.Applies to restartable jobs only." ], + "readOnly": true, "type": "string" }, "stateStartTime": { "description": "Output only. The time when this state was entered.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "substate": { @@ -3287,6 +3380,7 @@ "The Job has been received and is awaiting execution (it may be waiting for a condition to be met). See the \"details\" field for the reason for the delay.Applies to RUNNING state.", "The agent-reported status is out of date, which may be caused by a loss of communication between the agent and Dataproc. If the agent does not send a timely update, the job will fail.Applies to RUNNING state." ], + "readOnly": true, "type": "string" } }, @@ -3382,6 +3476,7 @@ "idleStartTime": { "description": "Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).", "format": "google-datetime", + "readOnly": true, "type": "string" } }, @@ -3393,6 +3488,7 @@ "properties": { "nextPageToken": { "description": "Output only. This token is included in the response if there are more results to fetch.", + "readOnly": true, "type": "string" }, "policies": { @@ -3400,6 +3496,7 @@ "items": { "$ref": "AutoscalingPolicy" }, + "readOnly": true, "type": "array" } }, @@ -3414,10 +3511,12 @@ "items": { "$ref": "Cluster" }, + "readOnly": true, "type": "array" }, "nextPageToken": { - "description": "Output only. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent \u003ccode\u003eListClustersRequest\u003c/code\u003e.", + "description": "Output only. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListClustersRequest.", + "readOnly": true, "type": "string" } }, @@ -3432,10 +3531,11 @@ "items": { "$ref": "Job" }, + "readOnly": true, "type": "array" }, "nextPageToken": { - "description": "Optional. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent \u003ccode\u003eListJobsRequest\u003c/code\u003e.", + "description": "Optional. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListJobsRequest.", "type": "string" } }, @@ -3464,7 +3564,8 @@ "id": "ListWorkflowTemplatesResponse", "properties": { "nextPageToken": { - "description": "Output only. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent \u003ccode\u003eListWorkflowTemplatesRequest\u003c/code\u003e.", + "description": "Output only. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListWorkflowTemplatesRequest.", + "readOnly": true, "type": "string" }, "templates": { @@ -3472,6 +3573,7 @@ "items": { "$ref": "WorkflowTemplate" }, + "readOnly": true, "type": "array" } }, @@ -3494,9 +3596,20 @@ "FATAL", "OFF" ], + "enumDescriptions": [ + "Level is unspecified. Use default level for log4j.", + "Use ALL level for log4j.", + "Use TRACE level for log4j.", + "Use DEBUG level for log4j.", + "Use INFO level for log4j.", + "Use WARN level for log4j.", + "Use ERROR level for log4j.", + "Use FATAL level for log4j.", + "Turn off log4j." + ], "type": "string" }, - "description": "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + "description": "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", "type": "object" } }, @@ -3530,10 +3643,23 @@ "properties": { "instanceGroupManagerName": { "description": "Output only. The name of the Instance Group Manager for this group.", + "readOnly": true, "type": "string" }, "instanceTemplateName": { "description": "Output only. The name of the Instance Template used for the Managed Instance Group.", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "MetastoreConfig": { + "description": "Specifies a Metastore configuration.", + "id": "MetastoreConfig", + "properties": { + "dataprocMetastoreService": { + "description": "Required. Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[dataproc_region]/services/[service-name]", "type": "string" } }, @@ -3554,6 +3680,17 @@ }, "type": "object" }, + "NodeGroupAffinity": { + "description": "Node Group Affinity for clusters using sole-tenant node groups.", + "id": "NodeGroupAffinity", + "properties": { + "nodeGroupUri": { + "description": "Required. The name of a single node group (https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) a cluster will be created on.", + "type": "string" + } + }, + "type": "object" + }, "NodeInitializationAction": { "description": "Specifies an executable to run on a fully configured node and a timeout period for executable completion.", "id": "NodeInitializationAction", @@ -3726,7 +3863,7 @@ "type": "object" }, "Policy": { - "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources.A Policy is a collection of bindings. A binding binds one or more members to a single role. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A role is a named list of permissions; each role can be an IAM predefined role or a user-created custom role.Optionally, a binding can specify a condition, which is a logical expression that allows access to a resource only if the expression evaluates to true. A condition can add constraints based on attributes of the request, the resource, or both.JSON example:\n{\n \"bindings\": [\n {\n \"role\": \"roles/resourcemanager.organizationAdmin\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-project-id@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles/resourcemanager.organizationViewer\",\n \"members\": [\"user:eve@example.com\"],\n \"condition\": {\n \"title\": \"expirable access\",\n \"description\": \"Does not grant access after Sep 2020\",\n \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\",\n }\n }\n ],\n \"etag\": \"BwWWja0YfJA=\",\n \"version\": 3\n}\nYAML example:\nbindings:\n- members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-project-id@appspot.gserviceaccount.com\n role: roles/resourcemanager.organizationAdmin\n- members:\n - user:eve@example.com\n role: roles/resourcemanager.organizationViewer\n condition:\n title: expirable access\n description: Does not grant access after Sep 2020\n expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\n- etag: BwWWja0YfJA=\n- version: 3\nFor a description of IAM and its features, see the IAM documentation (https://cloud.google.com/iam/docs/).", + "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources.A Policy is a collection of bindings. A binding binds one or more members to a single role. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A role is a named list of permissions; each role can be an IAM predefined role or a user-created custom role.For some types of Google Cloud resources, a binding can also specify a condition, which is a logical expression that allows access to a resource only if the expression evaluates to true. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).JSON example: { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } YAML example: bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 For a description of IAM and its features, see the IAM documentation (https://cloud.google.com/iam/docs/).", "id": "Policy", "properties": { "bindings": { @@ -3742,7 +3879,7 @@ "type": "string" }, "version": { - "description": "Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations:\nGetting a policy that includes a conditional role binding\nAdding a conditional role binding to a policy\nChanging a conditional role binding in a policy\nRemoving any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.", + "description": "Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } @@ -3795,7 +3932,7 @@ "id": "PySparkJob", "properties": { "archiveUris": { - "description": "Optional. HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.", + "description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", "items": { "type": "string" }, @@ -3809,7 +3946,7 @@ "type": "array" }, "fileUris": { - "description": "Optional. HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks.", + "description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", "items": { "type": "string" }, @@ -3852,7 +3989,7 @@ "id": "QueryList", "properties": { "queries": { - "description": "Required. The queries to execute. You do not need to terminate a query with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of an Cloud Dataproc API snippet that uses a QueryList to specify a HiveJob:\n\"hiveJob\": {\n \"queryList\": {\n \"queries\": [\n \"query1\",\n \"query2\",\n \"query3;query4\",\n ]\n }\n}\n", + "description": "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } } ", "items": { "type": "string" }, @@ -3936,27 +4073,11 @@ "id": "SoftwareConfig", "properties": { "imageVersion": { - "description": "Optional. The version of software inside the cluster. It must be one of the supported Dataproc Versions (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions), such as \"1.2\" (including a subminor version, such as \"1.2.29\"), or the \"preview\" version (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.", + "description": "Optional. The version of software inside the cluster. It must be one of the supported Dataproc Versions (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as \"1.2\" (including a subminor version, such as \"1.2.29\"), or the \"preview\" version (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.", "type": "string" }, "optionalComponents": { "description": "The set of optional components to activate on the cluster.", - "enumDescriptions": [ - "Unspecified component.", - "The Anaconda python distribution.", - "Docker", - "The Druid query engine.", - "Flink", - "HBase.", - "The Hive Web HCatalog (the REST service for accessing HCatalog).", - "The Jupyter Notebook.", - "The Kerberos security feature.", - "The Presto query engine.", - "The Ranger service.", - "The Solr service.", - "The Zeppelin notebook.", - "The Zookeeper service." - ], "items": { "enum": [ "COMPONENT_UNSPECIFIED", @@ -3974,6 +4095,22 @@ "ZEPPELIN", "ZOOKEEPER" ], + "enumDescriptions": [ + "Unspecified component. Specifying this will cause Cluster creation to fail.", + "The Anaconda python distribution.", + "Docker", + "The Druid query engine.", + "Flink", + "HBase.", + "The Hive Web HCatalog (the REST service for accessing HCatalog).", + "The Jupyter Notebook.", + "The Kerberos security feature.", + "The Presto query engine.", + "The Ranger service.", + "The Solr service.", + "The Zeppelin notebook.", + "The Zookeeper service." + ], "type": "string" }, "type": "array" @@ -3982,7 +4119,7 @@ "additionalProperties": { "type": "string" }, - "description": "Optional. The properties to set on daemon config files.Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings:\ncapacity-scheduler: capacity-scheduler.xml\ncore: core-site.xml\ndistcp: distcp-default.xml\nhdfs: hdfs-site.xml\nhive: hive-site.xml\nmapred: mapred-site.xml\npig: pig.properties\nspark: spark-defaults.conf\nyarn: yarn-site.xmlFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).", + "description": "Optional. The properties to set on daemon config files.Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml mapred: mapred-site.xml pig: pig.properties spark: spark-defaults.conf yarn: yarn-site.xmlFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).", "type": "object" } }, @@ -3993,7 +4130,7 @@ "id": "SparkJob", "properties": { "archiveUris": { - "description": "Optional. HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + "description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", "items": { "type": "string" }, @@ -4007,7 +4144,7 @@ "type": "array" }, "fileUris": { - "description": "Optional. HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.", + "description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", "items": { "type": "string" }, @@ -4047,7 +4184,7 @@ "id": "SparkRJob", "properties": { "archiveUris": { - "description": "Optional. HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + "description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", "items": { "type": "string" }, @@ -4061,7 +4198,7 @@ "type": "array" }, "fileUris": { - "description": "Optional. HCFS URIs of files to be copied to the working directory of R drivers and distributed tasks. Useful for naively parallel tasks.", + "description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", "items": { "type": "string" }, @@ -4206,7 +4343,7 @@ "type": "string" }, "fields": { - "description": "Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths.A field path is similar in syntax to a google.protobuf.FieldMask. For example, a field path that references the zone field of a workflow template's cluster selector would be specified as placement.clusterSelector.zone.Also, field paths can reference fields using the following syntax:\nValues in maps can be referenced by key:\nlabels'key'\nplacement.clusterSelector.clusterLabels'key'\nplacement.managedCluster.labels'key'\nplacement.clusterSelector.clusterLabels'key'\njobs'step-id'.labels'key'\nJobs in the jobs list can be referenced by step-id:\njobs'step-id'.hadoopJob.mainJarFileUri\njobs'step-id'.hiveJob.queryFileUri\njobs'step-id'.pySparkJob.mainPythonFileUri\njobs'step-id'.hadoopJob.jarFileUris0\njobs'step-id'.hadoopJob.archiveUris0\njobs'step-id'.hadoopJob.fileUris0\njobs'step-id'.pySparkJob.pythonFileUris0\nItems in repeated fields can be referenced by a zero-based index:\njobs'step-id'.sparkJob.args0\nOther examples:\njobs'step-id'.hadoopJob.properties'key'\njobs'step-id'.hadoopJob.args0\njobs'step-id'.hiveJob.scriptVariables'key'\njobs'step-id'.hadoopJob.mainJarFileUri\nplacement.clusterSelector.zoneIt may not be possible to parameterize maps and repeated fields in their entirety since only individual map values and individual items in repeated fields can be referenced. For example, the following field paths are invalid:\nplacement.clusterSelector.clusterLabels\njobs'step-id'.sparkJob.args", + "description": "Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths.A field path is similar in syntax to a google.protobuf.FieldMask. For example, a field path that references the zone field of a workflow template's cluster selector would be specified as placement.clusterSelector.zone.Also, field paths can reference fields using the following syntax: Values in maps can be referenced by key: labels'key' placement.clusterSelector.clusterLabels'key' placement.managedCluster.labels'key' placement.clusterSelector.clusterLabels'key' jobs'step-id'.labels'key' Jobs in the jobs list can be referenced by step-id: jobs'step-id'.hadoopJob.mainJarFileUri jobs'step-id'.hiveJob.queryFileUri jobs'step-id'.pySparkJob.mainPythonFileUri jobs'step-id'.hadoopJob.jarFileUris0 jobs'step-id'.hadoopJob.archiveUris0 jobs'step-id'.hadoopJob.fileUris0 jobs'step-id'.pySparkJob.pythonFileUris0 Items in repeated fields can be referenced by a zero-based index: jobs'step-id'.sparkJob.args0 Other examples: jobs'step-id'.hadoopJob.properties'key' jobs'step-id'.hadoopJob.args0 jobs'step-id'.hiveJob.scriptVariables'key' jobs'step-id'.hadoopJob.mainJarFileUri placement.clusterSelector.zoneIt may not be possible to parameterize maps and repeated fields in their entirety since only individual map values and individual items in repeated fields can be referenced. For example, the following field paths are invalid: placement.clusterSelector.clusterLabels jobs'step-id'.sparkJob.args", "items": { "type": "string" }, @@ -4274,6 +4411,7 @@ "items": { "$ref": "WorkflowNode" }, + "readOnly": true, "type": "array" } }, @@ -4285,28 +4423,52 @@ "properties": { "clusterName": { "description": "Output only. The name of the target cluster.", + "readOnly": true, "type": "string" }, "clusterUuid": { "description": "Output only. The UUID of target cluster.", + "readOnly": true, "type": "string" }, "createCluster": { "$ref": "ClusterOperation", - "description": "Output only. The create cluster operation metadata." + "description": "Output only. The create cluster operation metadata.", + "readOnly": true + }, + "dagEndTime": { + "description": "Output only. DAG end time, which is only set for workflows with dag_timeout when the DAG ends.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "dagStartTime": { + "description": "Output only. DAG start time, which is only set for workflows with dag_timeout when the DAG begins.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "dagTimeout": { + "description": "Output only. The timeout duration for the DAG of jobs. Minimum timeout duration is 10 minutes and maximum is 24 hours, expressed as a google.protobuf.Duration. For example, \"1800\" = 1800 seconds/30 minutes duration.", + "format": "google-duration", + "readOnly": true, + "type": "string" }, "deleteCluster": { "$ref": "ClusterOperation", - "description": "Output only. The delete cluster operation metadata." + "description": "Output only. The delete cluster operation metadata.", + "readOnly": true }, "endTime": { "description": "Output only. Workflow end time.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "graph": { "$ref": "WorkflowGraph", - "description": "Output only. The workflow graph." + "description": "Output only. The workflow graph.", + "readOnly": true }, "parameters": { "additionalProperties": { @@ -4318,6 +4480,7 @@ "startTime": { "description": "Output only. Workflow start time.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "state": { @@ -4334,15 +4497,18 @@ "The operation is running.", "The operation is done; either cancelled or completed." ], + "readOnly": true, "type": "string" }, "template": { - "description": "Output only. The resource name of the workflow template as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "description": "Output only. The resource name of the workflow template as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "readOnly": true, "type": "string" }, "version": { "description": "Output only. The version of template at the time of workflow instantiation.", "format": "int32", + "readOnly": true, "type": "integer" } }, @@ -4354,10 +4520,12 @@ "properties": { "error": { "description": "Output only. The error detail.", + "readOnly": true, "type": "string" }, "jobId": { "description": "Output only. The job id; populated after the node enters RUNNING state.", + "readOnly": true, "type": "string" }, "prerequisiteStepIds": { @@ -4365,6 +4533,7 @@ "items": { "type": "string" }, + "readOnly": true, "type": "array" }, "state": { @@ -4385,10 +4554,12 @@ "The node completed successfully.", "The node failed. A node can be marked FAILED because its ancestor or peer failed." ], + "readOnly": true, "type": "string" }, "stepId": { "description": "Output only. The name of the node.", + "readOnly": true, "type": "string" } }, @@ -4401,6 +4572,12 @@ "createTime": { "description": "Output only. The time template was created.", "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "dagTimeout": { + "description": "Optional. Timeout duration for the DAG of jobs. You can use \"s\", \"m\", \"h\", and \"d\" suffixes for second, minute, hour, and day duration values, respectively. The timeout duration must be from 10 minutes (\"10m\") to 24 hours (\"24h\" or \"1d\"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.", + "format": "google-duration", "type": "string" }, "id": { @@ -4422,7 +4599,8 @@ "type": "object" }, "name": { - "description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + "readOnly": true, "type": "string" }, "parameters": { @@ -4439,6 +4617,7 @@ "updateTime": { "description": "Output only. The time template was last updated.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "version": { @@ -4465,16 +4644,18 @@ "type": "object" }, "YarnApplication": { - "description": "A YARN application created by a job. Application information is a subset of \u003ccode\u003eorg.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto\u003c/code\u003e.Beta Feature: This report is available for testing purposes only. It may be changed before final release.", + "description": "A YARN application created by a job. Application information is a subset of org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto.Beta Feature: This report is available for testing purposes only. It may be changed before final release.", "id": "YarnApplication", "properties": { "name": { "description": "Output only. The application name.", + "readOnly": true, "type": "string" }, "progress": { "description": "Output only. The numerical progress of the application, from 1 to 100.", "format": "float", + "readOnly": true, "type": "number" }, "state": { @@ -4501,10 +4682,12 @@ "Status is FAILED.", "Status is KILLED." ], + "readOnly": true, "type": "string" }, "trackingUrl": { "description": "Output only. The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.", + "readOnly": true, "type": "string" } }, diff --git a/vendor/google.golang.org/api/dataproc/v1beta2/dataproc-gen.go b/vendor/google.golang.org/api/dataproc/v1beta2/dataproc-gen.go index 0edda087662..fb94960c4ea 100644 --- a/vendor/google.golang.org/api/dataproc/v1beta2/dataproc-gen.go +++ b/vendor/google.golang.org/api/dataproc/v1beta2/dataproc-gen.go @@ -77,6 +77,7 @@ const apiId = "dataproc:v1beta2" const apiName = "dataproc" const apiVersion = "v1beta2" const basePath = "https://dataproc.googleapis.com/" +const mtlsBasePath = "https://dataproc.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -92,6 +93,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -299,11 +301,7 @@ type AutoscalingConfig struct { // PolicyUri: Optional. The autoscaling policy used by the cluster.Only // resource names including projectid and location (region) are valid. // Examples: - // https://www.googleapis.com/compute/v1/projects/[project_id]/ - // locations/[dataproc_region]/autoscalingPolicies/[policy_id] - // projects/[ - // project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id - // ]Note that the policy must be in the same project and Dataproc + // https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Dataproc // region. PolicyUri string `json:"policyUri,omitempty"` @@ -342,12 +340,11 @@ type AutoscalingPolicy struct { Id string `json:"id,omitempty"` // Name: Output only. The "resource name" of the autoscaling policy, as - // described in https://cloud.google.com/apis/design/resource_names. - // For projects.regions.autoscalingPolicies, the resource name of the - // policy has the following format: + // described in https://cloud.google.com/apis/design/resource_names. For + // projects.regions.autoscalingPolicies, the resource name of the policy + // has the following format: // projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} - // - // For projects.locations.autoscalingPolicies, the resource name of the + // For projects.locations.autoscalingPolicies, the resource name of the // policy has the following format: // projects/{project_id}/locations/{location}/autoscalingPolicies/{policy // _id} @@ -433,12 +430,13 @@ type BasicYarnAutoscalingConfig struct { // 0s, 1d. GracefulDecommissionTimeout string `json:"gracefulDecommissionTimeout,omitempty"` - // ScaleDownFactor: Required. Fraction of average pending memory in the - // last cooldown period for which to remove workers. A scale-down factor - // of 1 will result in scaling down so that there is no available memory - // remaining after the update (more aggressive scaling). A scale-down - // factor of 0 disables removing workers, which can be beneficial for - // autoscaling a single job.Bounds: 0.0, 1.0. + // ScaleDownFactor: Required. Fraction of average YARN pending memory in + // the last cooldown period for which to remove workers. A scale-down + // factor of 1 will result in scaling down so that there is no available + // memory remaining after the update (more aggressive scaling). A + // scale-down factor of 0 disables removing workers, which can be + // beneficial for autoscaling a single job. See How autoscaling works + // for more information.Bounds: 0.0, 1.0. ScaleDownFactor float64 `json:"scaleDownFactor,omitempty"` // ScaleDownMinWorkerFraction: Optional. Minimum scale-down threshold as @@ -449,12 +447,13 @@ type BasicYarnAutoscalingConfig struct { // recommended change.Bounds: 0.0, 1.0. Default: 0.0. ScaleDownMinWorkerFraction float64 `json:"scaleDownMinWorkerFraction,omitempty"` - // ScaleUpFactor: Required. Fraction of average pending memory in the - // last cooldown period for which to add workers. A scale-up factor of - // 1.0 will result in scaling up so that there is no pending memory + // ScaleUpFactor: Required. Fraction of average YARN pending memory in + // the last cooldown period for which to add workers. A scale-up factor + // of 1.0 will result in scaling up so that there is no pending memory // remaining after the update (more aggressive scaling). A scale-up // factor closer to 0 will result in a smaller magnitude of scaling up - // (less aggressive scaling).Bounds: 0.0, 1.0. + // (less aggressive scaling). See How autoscaling works for more + // information.Bounds: 0.0, 1.0. ScaleUpFactor float64 `json:"scaleUpFactor,omitempty"` // ScaleUpMinWorkerFraction: Optional. Minimum scale-up threshold as a @@ -512,55 +511,58 @@ func (s *BasicYarnAutoscalingConfig) UnmarshalJSON(data []byte) error { // Binding: Associates members with a role. type Binding struct { - // Condition: The condition that is associated with this binding. NOTE: - // An unsatisfied condition will not allow user access via current - // binding. Different bindings, including their conditions, are examined - // independently. + // BindingId: A client-specified ID for this binding. Expected to be + // globally unique to support the internal bindings-by-ID API. + BindingId string `json:"bindingId,omitempty"` + + // Condition: The condition that is associated with this binding.If the + // condition evaluates to true, then this binding applies to the current + // request.If the condition evaluates to false, then this binding does + // not apply to the current request. However, a different role binding + // might grant the same role to one or more of the members in this + // binding.To learn which resources support conditions in their IAM + // policies, see the IAM documentation + // (https://cloud.google.com/iam/help/conditions/resource-policies). Condition *Expr `json:"condition,omitempty"` // Members: Specifies the identities requesting access for a Cloud - // Platform resource. members can have the following values: - // allUsers: A special identifier that represents anyone who is on the - // internet; with or without a Google account. - // allAuthenticatedUsers: A special identifier that represents anyone - // who is authenticated with a Google account or a service - // account. - // user:{emailid}: An email address that represents a specific Google - // account. For example, alice@example.com . - // serviceAccount:{emailid}: An email address that represents a service - // account. For example, - // my-other-app@appspot.gserviceaccount.com. - // group:{emailid}: An email address that represents a Google group. - // For example, - // admins@example.com. - // deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique - // identifier) representing a user that has been recently deleted. For - // example, alice@example.com?uid=123456789012345678901. If the user is + // Platform resource. members can have the following values: allUsers: A + // special identifier that represents anyone who is on the internet; + // with or without a Google account. allAuthenticatedUsers: A special + // identifier that represents anyone who is authenticated with a Google + // account or a service account. user:{emailid}: An email address that + // represents a specific Google account. For example, alice@example.com + // . serviceAccount:{emailid}: An email address that represents a + // service account. For example, + // my-other-app@appspot.gserviceaccount.com. group:{emailid}: An email + // address that represents a Google group. For example, + // admins@example.com. deleted:user:{emailid}?uid={uniqueid}: An email + // address (plus unique identifier) representing a user that has been + // recently deleted. For example, + // alice@example.com?uid=123456789012345678901. If the user is // recovered, this value reverts to user:{emailid} and the recovered - // user retains the role in the - // binding. + // user retains the role in the binding. // deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address - // (plus unique identifier) representing a service account that has - // been recently deleted. For example, + // (plus unique identifier) representing a service account that has been + // recently deleted. For example, // my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. // If the service account is undeleted, this value reverts to // serviceAccount:{emailid} and the undeleted service account retains - // the role in the binding. - // deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique - // identifier) representing a Google group that has been recently - // deleted. For example, admins@example.com?uid=123456789012345678901. - // If the group is recovered, this value reverts to group:{emailid} and - // the recovered group retains the role in the - // binding. - // domain:{domain}: The G Suite domain (primary) that represents all the - // users of that domain. For example, google.com or example.com. + // the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An + // email address (plus unique identifier) representing a Google group + // that has been recently deleted. For example, + // admins@example.com?uid=123456789012345678901. If the group is + // recovered, this value reverts to group:{emailid} and the recovered + // group retains the role in the binding. domain:{domain}: The G Suite + // domain (primary) that represents all the users of that domain. For + // example, google.com or example.com. Members []string `json:"members,omitempty"` // Role: Role that is assigned to members. For example, roles/viewer, // roles/editor, or roles/owner. Role string `json:"role,omitempty"` - // ForceSendFields is a list of field names (e.g. "Condition") to + // ForceSendFields is a list of field names (e.g. "BindingId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -568,7 +570,7 @@ type Binding struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Condition") to include in + // NullFields is a list of field names (e.g. "BindingId") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -691,19 +693,10 @@ type ClusterConfig struct { // InitializationActions: Optional. Commands to execute on each node // after config is completed. By default, executables are run on master - // and all worker nodes. You can test a node's role - // metadata to run an executable on a master or worker node, as shown - // below using curl (you can also use wget): - // ROLE=$(curl -H - // Metadata-Flavor:Google - // http://metadata/computeMetadata/v1beta2/instanc - // e/attributes/dataproc-role) - // if [[ "${ROLE}" == 'Master' ]]; then - // ... master specific actions ... - // else - // ... worker specific actions ... - // fi - // + // and all worker nodes. You can test a node's role metadata to run an + // executable on a master or worker node, as shown below using curl (you + // can also use wget): ROLE=$(curl -H Metadata-Flavor:Google + // http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi InitializationActions []*NodeInitializationAction `json:"initializationActions,omitempty"` // LifecycleConfig: Optional. The config setting for auto delete cluster @@ -714,6 +707,9 @@ type ClusterConfig struct { // master instance in a cluster. MasterConfig *InstanceGroupConfig `json:"masterConfig,omitempty"` + // MetastoreConfig: Optional. Metastore configuration. + MetastoreConfig *MetastoreConfig `json:"metastoreConfig,omitempty"` + // SecondaryWorkerConfig: Optional. The Compute Engine config settings // for additional worker instances in a cluster. SecondaryWorkerConfig *InstanceGroupConfig `json:"secondaryWorkerConfig,omitempty"` @@ -725,6 +721,16 @@ type ClusterConfig struct { // cluster. SoftwareConfig *SoftwareConfig `json:"softwareConfig,omitempty"` + // TempBucket: Optional. A Cloud Storage bucket used to store ephemeral + // cluster and jobs data, such as Spark and MapReduce history files. If + // you do not specify a temp bucket, Dataproc will determine a Cloud + // Storage location (US, ASIA, or EU) for your cluster's temp bucket + // according to the Compute Engine zone where your cluster is deployed, + // and then create and manage this project-level, per-location bucket. + // The default bucket has a TTL of 90 days, but you can use any TTL (or + // none) if you specify a bucket. + TempBucket string `json:"tempBucket,omitempty"` + // WorkerConfig: Optional. The Compute Engine config settings for worker // instances in a cluster. WorkerConfig *InstanceGroupConfig `json:"workerConfig,omitempty"` @@ -1097,12 +1103,9 @@ func (s *DiskConfig) MarshalJSON() ([]byte, error) { // Empty: A generic empty message that you can re-use to avoid defining // duplicated empty messages in your APIs. A typical example is to use // it as the request or the response type of an API method. For -// instance: -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// The JSON representation for Empty is empty JSON object {}. +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } The JSON representation for Empty is empty +// JSON object {}. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -1178,31 +1181,21 @@ func (s *EndpointConfig) MarshalJSON() ([]byte, error) { // Expr: Represents a textual expression in the Common Expression // Language (CEL) syntax. CEL is a C-like expression language. The // syntax and semantics of CEL are documented at -// https://github.com/google/cel-spec.Example (Comparison): -// title: "Summary size limit" -// description: "Determines if a summary is less than 100 -// chars" -// expression: "document.summary.size() < 100" -// Example (Equality): -// title: "Requestor is owner" -// description: "Determines if requestor is the document -// owner" -// expression: "document.owner == request.auth.claims.email" -// Example (Logic): -// title: "Public documents" +// https://github.com/google/cel-spec.Example (Comparison): title: +// "Summary size limit" description: "Determines if a summary is less +// than 100 chars" expression: "document.summary.size() < 100" Example +// (Equality): title: "Requestor is owner" description: "Determines if +// requestor is the document owner" expression: "document.owner == +// request.auth.claims.email" Example (Logic): title: "Public documents" // description: "Determine whether the document should be publicly -// visible" -// expression: "document.type != 'private' && document.type != -// 'internal'" -// Example (Data Manipulation): -// title: "Notification string" -// description: "Create a notification string with a -// timestamp." +// visible" expression: "document.type != 'private' && document.type != +// 'internal'" Example (Data Manipulation): title: "Notification string" +// description: "Create a notification string with a timestamp." // expression: "'New message received at ' + -// string(document.create_time)" -// The exact variables and functions that may be referenced within an -// expression are determined by the service that evaluates it. See the -// service documentation for additional information. +// string(document.create_time)" The exact variables and functions that +// may be referenced within an expression are determined by the service +// that evaluates it. See the service documentation for additional +// information. type Expr struct { // Description: Optional. Description of the expression. This is a // longer text which describes the expression, e.g. when hovered over it @@ -1271,21 +1264,38 @@ type GceClusterConfig struct { // (https://cloud.google.com/compute/docs/subnetworks) for more // information).A full URL, partial URI, or short name are valid. // Examples: - // https://www.googleapis.com/compute/v1/projects/[project_id]/ - // regions/global/default - // projects/[project_id]/regions/global/default - // de - // fault + // https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default projects/[project_id]/regions/global/default + // default NetworkUri string `json:"networkUri,omitempty"` + // NodeGroupAffinity: Optional. Node Group Affinity for sole-tenant + // clusters. + NodeGroupAffinity *NodeGroupAffinity `json:"nodeGroupAffinity,omitempty"` + + // PrivateIpv6GoogleAccess: Optional. The type of IPv6 access for a + // cluster. + // + // Possible values: + // "PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED" - If unspecified, Compute + // Engine default behavior will apply, which is the same as + // INHERIT_FROM_SUBNETWORK. + // "INHERIT_FROM_SUBNETWORK" - Private access to and from Google + // Services configuration inherited from the subnetwork configuration. + // This is the default Compute Engine behavior. + // "OUTBOUND" - Enables outbound private IPv6 access to Google + // Services from the Dataproc cluster. + // "BIDIRECTIONAL" - Enables bidirectional private IPv6 access between + // Google Services and the Dataproc cluster. + PrivateIpv6GoogleAccess string `json:"privateIpv6GoogleAccess,omitempty"` + // ReservationAffinity: Optional. Reservation Affinity for consuming // Zonal reservation. ReservationAffinity *ReservationAffinity `json:"reservationAffinity,omitempty"` // ServiceAccount: Optional. The Dataproc service account // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/ - // service-accounts#service_accounts_in_cloud_dataproc) (also see VM - // Data Plane identity + // service-accounts#service_accounts_in_dataproc) (also see VM Data + // Plane identity // (https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principa // ls#vm_service_account_data_plane_identity)) used by Dataproc cluster // VM instances to access Google Cloud Platform services.If not @@ -1296,31 +1306,21 @@ type GceClusterConfig struct { // ServiceAccountScopes: Optional. The URIs of service account scopes to // be included in Compute Engine instances. The following base set of - // scopes is always - // included: + // scopes is always included: // https://www.googleapis.com/auth/cloud.useraccounts.readonly - // // https://www.googleapis.com/auth/devstorage.read_write - // https://www.goog - // leapis.com/auth/logging.writeIf no scopes are specified, the - // following defaults are also - // provided: + // https://www.googleapis.com/auth/logging.writeIf no scopes are + // specified, the following defaults are also provided: // https://www.googleapis.com/auth/bigquery - // https://www.googlea - // pis.com/auth/bigtable.admin.table - // https://www.googleapis.com/auth/bigt - // able.data + // https://www.googleapis.com/auth/bigtable.admin.table + // https://www.googleapis.com/auth/bigtable.data // https://www.googleapis.com/auth/devstorage.full_control ServiceAccountScopes []string `json:"serviceAccountScopes,omitempty"` // SubnetworkUri: Optional. The Compute Engine subnetwork to be used for // machine communications. Cannot be specified with network_uri.A full - // URL, partial URI, or short name are valid. - // Examples: - // https://www.googleapis.com/compute/v1/projects/[project_id]/ - // regions/us-east1/subnetworks/sub0 - // projects/[project_id]/regions/us-eas - // t1/subnetworks/sub0 + // URL, partial URI, or short name are valid. Examples: + // https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0 projects/[project_id]/regions/us-east1/subnetworks/sub0 // sub0 SubnetworkUri string `json:"subnetworkUri,omitempty"` @@ -1334,11 +1334,8 @@ type GceClusterConfig struct { // If omitted in a non-global Dataproc region, the service will pick a // zone in the corresponding Compute Engine region. On a get request, // zone will always be present.A full URL, partial URI, or short name - // are valid. - // Examples: - // https://www.googleapis.com/compute/v1/projects/[project_id]/ - // zones/[zone] - // projects/[project_id]/zones/[zone] + // are valid. Examples: + // https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] projects/[project_id]/zones/[zone] // us-central1-f ZoneUri string `json:"zoneUri,omitempty"` @@ -1369,7 +1366,7 @@ func (s *GceClusterConfig) MarshalJSON() ([]byte, error) { // GetIamPolicyRequest: Request message for GetIamPolicy method. type GetIamPolicyRequest struct { // Options: OPTIONAL: A GetPolicyOptions object for specifying options - // to GetIamPolicy. This field is only used by Cloud IAM. + // to GetIamPolicy. Options *GetPolicyOptions `json:"options,omitempty"` // ForceSendFields is a list of field names (e.g. "Options") to @@ -1401,7 +1398,10 @@ type GetPolicyOptions struct { // returned.Valid values are 0, 1, and 3. Requests specifying an invalid // value will be rejected.Requests for policies with any conditional // bindings must specify version 3. Policies without any conditional - // bindings may specify any valid value or leave the field unset. + // bindings may specify any valid value or leave the field unset.To + // learn which resources support conditions in their IAM policies, see + // the IAM documentation + // (https://cloud.google.com/iam/help/conditions/resource-policies). RequestedPolicyVersion int64 `json:"requestedPolicyVersion,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1654,21 +1654,9 @@ type InstanceGroupConfig struct { // ImageUri: Optional. The Compute Engine image resource used for // cluster instances.The URI can represent an image or image - // family.Image - // examples: - // https://www.googleapis.com/compute/beta/projects/[project_id - // ]/global/images/[image-id] - // projects/[project_id]/global/images/[image- - // id] - // image-idImage family examples. Dataproc will use the most recent - // image from the - // family: - // https://www.googleapis.com/compute/beta/projects/[project_id]/ - // global/images/family/[custom-image-family-name] - // projects/[project_id]/ - // global/images/family/[custom-image-family-name]If the URI is - // unspecified, it will be inferred from SoftwareConfig.image_version or - // the system default. + // family.Image examples: + // https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system + // default. ImageUri string `json:"imageUri,omitempty"` // InstanceNames: Output only. The list of instance names. Dataproc @@ -1676,6 +1664,10 @@ type InstanceGroupConfig struct { // group. InstanceNames []string `json:"instanceNames,omitempty"` + // InstanceReferences: Output only. List of references to Compute Engine + // instances. + InstanceReferences []*InstanceReference `json:"instanceReferences,omitempty"` + // IsPreemptible: Output only. Specifies that this instance group // contains preemptible instances. IsPreemptible bool `json:"isPreemptible,omitempty"` @@ -1683,15 +1675,8 @@ type InstanceGroupConfig struct { // MachineTypeUri: Optional. The Compute Engine machine type used for // cluster instances.A full URL, partial URI, or short name are valid. // Examples: - // https://www.googleapis.com/compute/v1/projects/[project_id]/ - // zones/us-east1-a/machineTypes/n1-standard-2 - // projects/[project_id]/zone - // s/us-east1-a/machineTypes/n1-standard-2 - // n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto - // Zone Placement - // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/ - // auto-zone#using_auto_zone_placement) feature, you must use the short - // name of the machine type resource, for example, n1-standard-2. + // https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, + // n1-standard-2. MachineTypeUri string `json:"machineTypeUri,omitempty"` // ManagedGroupConfig: Output only. The config for Compute Engine @@ -1700,7 +1685,7 @@ type InstanceGroupConfig struct { ManagedGroupConfig *ManagedGroupConfig `json:"managedGroupConfig,omitempty"` // MinCpuPlatform: Specifies the minimum cpu platform for the Instance - // Group. See Dataproc -> Minimum CPU Platform + // Group. See Dataproc -> Minimum CPU Platform // (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min- // cpu). MinCpuPlatform string `json:"minCpuPlatform,omitempty"` @@ -1747,6 +1732,37 @@ func (s *InstanceGroupConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceReference: A reference to a Compute Engine instance. +type InstanceReference struct { + // InstanceId: The unique identifier of the Compute Engine instance. + InstanceId string `json:"instanceId,omitempty"` + + // InstanceName: The user-friendly name of the Compute Engine instance. + InstanceName string `json:"instanceName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InstanceId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InstanceId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceReference) MarshalJSON() ([]byte, error) { + type NoMethod InstanceReference + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InstantiateWorkflowTemplateRequest: A request to instantiate a // workflow template. type InstantiateWorkflowTemplateRequest struct { @@ -1754,7 +1770,7 @@ type InstantiateWorkflowTemplateRequest struct { InstanceId string `json:"instanceId,omitempty"` // Parameters: Optional. Map from parameter names to values that should - // be used for those parameters. Values may not exceed 100 characters. + // be used for those parameters. Values may not exceed 1000 characters. Parameters map[string]string `json:"parameters,omitempty"` // RequestId: Optional. A tag that prevents multiple concurrent workflow @@ -1849,7 +1865,7 @@ type Job struct { // Reference: Optional. The fully qualified reference to the job, which // can be used to obtain the equivalent REST path of the job resource. // If this property is not specified when a job is created, the server - // generates a job_id. + // generates a job_id. Reference *JobReference `json:"reference,omitempty"` // Scheduling: Optional. Job scheduling configuration. @@ -1865,8 +1881,8 @@ type Job struct { SparkSqlJob *SparkSqlJob `json:"sparkSqlJob,omitempty"` // Status: Output only. The job status. Additional application-specific - // status information may be contained in the type_job and - // yarn_applications fields. + // status information may be contained in the type_job and + // yarn_applications fields. Status *JobStatus `json:"status,omitempty"` // StatusHistory: Output only. The previous job status. @@ -1874,7 +1890,7 @@ type Job struct { // SubmittedBy: Output only. The email address of the user submitting // the job. For jobs submitted on the cluster, the address is - // username@hostname. + // username@hostname. SubmittedBy string `json:"submittedBy,omitempty"` // YarnApplications: Output only. The collection of YARN applications @@ -1988,8 +2004,8 @@ type JobReference struct { // provided by the server. JobId string `json:"jobId,omitempty"` - // ProjectId: Required. The ID of the Google Cloud Platform project that - // the job belongs to. + // ProjectId: Optional. The ID of the Google Cloud Platform project that + // the job belongs to. If specified, must match the request project ID. ProjectId string `json:"projectId,omitempty"` // ForceSendFields is a list of field names (e.g. "JobId") to @@ -2051,7 +2067,7 @@ func (s *JobScheduling) MarshalJSON() ([]byte, error) { // JobStatus: Dataproc job status. type JobStatus struct { // Details: Output only. Optional Job state details, such as an error - // description if the state is ERROR. + // description if the state is ERROR. Details string `json:"details,omitempty"` // State: Output only. A state message specifying the overall job state. @@ -2313,8 +2329,7 @@ type ListClustersResponse struct { // NextPageToken: Output only. This token is included in the response if // there are more results to fetch. To fetch additional results, provide - // this value as the page_token in a subsequent - // ListClustersRequest. + // this value as the page_token in a subsequent ListClustersRequest. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2351,8 +2366,7 @@ type ListJobsResponse struct { // NextPageToken: Optional. This token is included in the response if // there are more results to fetch. To fetch additional results, provide - // this value as the page_token in a subsequent - // ListJobsRequest. + // this value as the page_token in a subsequent ListJobsRequest. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2425,7 +2439,7 @@ type ListWorkflowTemplatesResponse struct { // NextPageToken: Output only. This token is included in the response if // there are more results to fetch. To fetch additional results, provide // this value as the page_token in a subsequent - // ListWorkflowTemplatesRequest. + // ListWorkflowTemplatesRequest. NextPageToken string `json:"nextPageToken,omitempty"` // Templates: Output only. WorkflowTemplates list. @@ -2568,6 +2582,39 @@ func (s *ManagedGroupConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// MetastoreConfig: Specifies a Metastore configuration. +type MetastoreConfig struct { + // DataprocMetastoreService: Required. Resource name of an existing + // Dataproc Metastore service.Example: + // projects/[project_id]/locations/[dataproc_region]/services/[service-na + // me] + DataprocMetastoreService string `json:"dataprocMetastoreService,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "DataprocMetastoreService") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DataprocMetastoreService") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *MetastoreConfig) MarshalJSON() ([]byte, error) { + type NoMethod MetastoreConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // NamespacedGkeDeploymentTarget: A full, namespace-isolated deployment // target for an existing GKE cluster. type NamespacedGkeDeploymentTarget struct { @@ -2604,6 +2651,37 @@ func (s *NamespacedGkeDeploymentTarget) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// NodeGroupAffinity: Node Group Affinity for clusters using sole-tenant +// node groups. +type NodeGroupAffinity struct { + // NodeGroupUri: Required. The name of a single node group + // (https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) + // a cluster will be created on. + NodeGroupUri string `json:"nodeGroupUri,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NodeGroupUri") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NodeGroupUri") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NodeGroupAffinity) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupAffinity + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // NodeInitializationAction: Specifies an executable to run on a fully // configured node and a timeout period for executable completion. type NodeInitializationAction struct { @@ -2872,56 +2950,33 @@ func (s *PigJob) MarshalJSON() ([]byte, error) { // single role. Members can be user accounts, service accounts, Google // groups, and domains (such as G Suite). A role is a named list of // permissions; each role can be an IAM predefined role or a -// user-created custom role.Optionally, a binding can specify a -// condition, which is a logical expression that allows access to a -// resource only if the expression evaluates to true. A condition can -// add constraints based on attributes of the request, the resource, or -// both.JSON example: -// { -// "bindings": [ -// { -// "role": "roles/resourcemanager.organizationAdmin", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" -// ] -// }, -// { -// "role": "roles/resourcemanager.organizationViewer", -// "members": ["user:eve@example.com"], -// "condition": { -// "title": "expirable access", -// "description": "Does not grant access after Sep 2020", -// "expression": "request.time < -// timestamp('2020-10-01T00:00:00.000Z')", -// } -// } -// ], -// "etag": "BwWWja0YfJA=", -// "version": 3 -// } -// YAML example: -// bindings: -// - members: -// - user:mike@example.com -// - group:admins@example.com -// - domain:google.com -// - serviceAccount:my-project-id@appspot.gserviceaccount.com -// role: roles/resourcemanager.organizationAdmin -// - members: -// - user:eve@example.com -// role: roles/resourcemanager.organizationViewer -// condition: -// title: expirable access -// description: Does not grant access after Sep 2020 -// expression: request.time < -// timestamp('2020-10-01T00:00:00.000Z') -// - etag: BwWWja0YfJA= -// - version: 3 -// For a description of IAM and its features, see the IAM documentation -// (https://cloud.google.com/iam/docs/). +// user-created custom role.For some types of Google Cloud resources, a +// binding can also specify a condition, which is a logical expression +// that allows access to a resource only if the expression evaluates to +// true. A condition can add constraints based on attributes of the +// request, the resource, or both. To learn which resources support +// conditions in their IAM policies, see the IAM documentation +// (https://cloud.google.com/iam/help/conditions/resource-policies).JSON +// example: { "bindings": [ { "role": +// "roles/resourcemanager.organizationAdmin", "members": [ +// "user:mike@example.com", "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { +// "role": "roles/resourcemanager.organizationViewer", "members": [ +// "user:eve@example.com" ], "condition": { "title": "expirable access", +// "description": "Does not grant access after Sep 2020", "expression": +// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], +// "etag": "BwWWja0YfJA=", "version": 3 } YAML example: bindings: - +// members: - user:mike@example.com - group:admins@example.com - +// domain:google.com - +// serviceAccount:my-project-id@appspot.gserviceaccount.com role: +// roles/resourcemanager.organizationAdmin - members: - +// user:eve@example.com role: roles/resourcemanager.organizationViewer +// condition: title: expirable access description: Does not grant access +// after Sep 2020 expression: request.time < +// timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: +// 3 For a description of IAM and its features, see the IAM +// documentation (https://cloud.google.com/iam/docs/). type Policy struct { // Bindings: Associates a list of members to a role. Optionally, may // specify a condition that determines how and when the bindings are @@ -2945,18 +3000,19 @@ type Policy struct { // Version: Specifies the format of the policy.Valid values are 0, 1, // and 3. Requests that specify an invalid value are rejected.Any // operation that affects conditional role bindings must specify version - // 3. This requirement applies to the following operations: - // Getting a policy that includes a conditional role binding - // Adding a conditional role binding to a policy - // Changing a conditional role binding in a policy - // Removing any role binding, with or without a condition, from a policy - // that includes conditionsImportant: If you use IAM Conditions, you - // must include the etag field whenever you call setIamPolicy. If you - // omit this field, then IAM allows you to overwrite a version 3 policy - // with a version 1 policy, and all of the conditions in the version 3 - // policy are lost.If a policy does not include any conditions, - // operations on that policy may specify any valid version or leave the - // field unset. + // 3. This requirement applies to the following operations: Getting a + // policy that includes a conditional role binding Adding a conditional + // role binding to a policy Changing a conditional role binding in a + // policy Removing any role binding, with or without a condition, from a + // policy that includes conditionsImportant: If you use IAM Conditions, + // you must include the etag field whenever you call setIamPolicy. If + // you omit this field, then IAM allows you to overwrite a version 3 + // policy with a version 1 policy, and all of the conditions in the + // version 3 policy are lost.If a policy does not include any + // conditions, operations on that policy may specify any valid version + // or leave the field unset.To learn which resources support conditions + // in their IAM policies, see the IAM documentation + // (https://cloud.google.com/iam/help/conditions/resource-policies). Version int64 `json:"version,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -3046,8 +3102,9 @@ func (s *PrestoJob) MarshalJSON() ([]byte, error) { // (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) // applications on YARN. type PySparkJob struct { - // ArchiveUris: Optional. HCFS URIs of archives to be extracted in the - // working directory of .jar, .tar, .tar.gz, .tgz, and .zip. + // ArchiveUris: Optional. HCFS URIs of archives to be extracted into the + // working directory of each executor. Supported file types: .jar, .tar, + // .tar.gz, .tgz, and .zip. ArchiveUris []string `json:"archiveUris,omitempty"` // Args: Optional. The arguments to pass to the driver. Do not include @@ -3055,9 +3112,8 @@ type PySparkJob struct { // collision may occur that causes an incorrect job submission. Args []string `json:"args,omitempty"` - // FileUris: Optional. HCFS URIs of files to be copied to the working - // directory of Python drivers and distributed tasks. Useful for naively - // parallel tasks. + // FileUris: Optional. HCFS URIs of files to be placed in the working + // directory of each executor. Useful for naively parallel tasks. FileUris []string `json:"fileUris,omitempty"` // JarFileUris: Optional. HCFS URIs of jar files to add to the @@ -3106,21 +3162,12 @@ func (s *PySparkJob) MarshalJSON() ([]byte, error) { // QueryList: A list of queries to run on a cluster. type QueryList struct { - // Queries: Required. The queries to execute. You do not need to - // terminate a query with a semicolon. Multiple queries can be specified + // Queries: Required. The queries to execute. You do not need to end a + // query expression with a semicolon. Multiple queries can be specified // in one string by separating each with a semicolon. Here is an example - // of an Cloud Dataproc API snippet that uses a QueryList to specify a - // HiveJob: - // "hiveJob": { - // "queryList": { - // "queries": [ - // "query1", - // "query2", - // "query3;query4", - // ] - // } - // } - // + // of a Dataproc API snippet that uses a QueryList to specify a HiveJob: + // "hiveJob": { "queryList": { "queries": [ "query1", "query2", + // "query3;query4", ] } } Queries []string `json:"queries,omitempty"` // ForceSendFields is a list of field names (e.g. "Queries") to @@ -3288,8 +3335,8 @@ type SoftwareConfig struct { // ImageVersion: Optional. The version of software inside the cluster. // It must be one of the supported Dataproc Versions // (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-v - // ersions#supported_cloud_dataproc_versions), such as "1.2" (including - // a subminor version, such as "1.2.29"), or the "preview" version + // ersions#supported_dataproc_versions), such as "1.2" (including a + // subminor version, such as "1.2.29"), or the "preview" version // (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-v // ersions#other_versions). If unspecified, it defaults to the latest // Debian version. @@ -3299,7 +3346,8 @@ type SoftwareConfig struct { // cluster. // // Possible values: - // "COMPONENT_UNSPECIFIED" - Unspecified component. + // "COMPONENT_UNSPECIFIED" - Unspecified component. Specifying this + // will cause Cluster creation to fail. // "ANACONDA" - The Anaconda python distribution. // "DOCKER" - Docker // "DRUID" - The Druid query engine. @@ -3319,16 +3367,11 @@ type SoftwareConfig struct { // Properties: Optional. The properties to set on daemon config // files.Property keys are specified in prefix:property format, for // example core:hadoop.tmp.dir. The following are supported prefixes and - // their mappings: - // capacity-scheduler: capacity-scheduler.xml - // core: core-site.xml - // distcp: distcp-default.xml - // hdfs: hdfs-site.xml - // hive: hive-site.xml - // mapred: mapred-site.xml - // pig: pig.properties - // spark: spark-defaults.conf - // yarn: yarn-site.xmlFor more information, see Cluster properties + // their mappings: capacity-scheduler: capacity-scheduler.xml core: + // core-site.xml distcp: distcp-default.xml hdfs: hdfs-site.xml hive: + // hive-site.xml mapred: mapred-site.xml pig: pig.properties spark: + // spark-defaults.conf yarn: yarn-site.xmlFor more information, see + // Cluster properties // (https://cloud.google.com/dataproc/docs/concepts/cluster-properties). Properties map[string]string `json:"properties,omitempty"` @@ -3363,9 +3406,9 @@ func (s *SoftwareConfig) MarshalJSON() ([]byte, error) { // CommonJob.jar_file_uris, and then specify the main class name in // main_class. type SparkJob struct { - // ArchiveUris: Optional. HCFS URIs of archives to be extracted in the - // working directory of Spark drivers and tasks. Supported file types: - // .jar, .tar, .tar.gz, .tgz, and .zip. + // ArchiveUris: Optional. HCFS URIs of archives to be extracted into the + // working directory of each executor. Supported file types: .jar, .tar, + // .tar.gz, .tgz, and .zip. ArchiveUris []string `json:"archiveUris,omitempty"` // Args: Optional. The arguments to pass to the driver. Do not include @@ -3373,9 +3416,8 @@ type SparkJob struct { // collision may occur that causes an incorrect job submission. Args []string `json:"args,omitempty"` - // FileUris: Optional. HCFS URIs of files to be copied to the working - // directory of Spark drivers and distributed tasks. Useful for naively - // parallel tasks. + // FileUris: Optional. HCFS URIs of files to be placed in the working + // directory of each executor. Useful for naively parallel tasks. FileUris []string `json:"fileUris,omitempty"` // JarFileUris: Optional. HCFS URIs of jar files to add to the @@ -3427,9 +3469,9 @@ func (s *SparkJob) MarshalJSON() ([]byte, error) { // (https://spark.apache.org/docs/latest/sparkr.html) applications on // YARN. type SparkRJob struct { - // ArchiveUris: Optional. HCFS URIs of archives to be extracted in the - // working directory of Spark drivers and tasks. Supported file types: - // .jar, .tar, .tar.gz, .tgz, and .zip. + // ArchiveUris: Optional. HCFS URIs of archives to be extracted into the + // working directory of each executor. Supported file types: .jar, .tar, + // .tar.gz, .tgz, and .zip. ArchiveUris []string `json:"archiveUris,omitempty"` // Args: Optional. The arguments to pass to the driver. Do not include @@ -3437,9 +3479,8 @@ type SparkRJob struct { // collision may occur that causes an incorrect job submission. Args []string `json:"args,omitempty"` - // FileUris: Optional. HCFS URIs of files to be copied to the working - // directory of R drivers and distributed tasks. Useful for naively - // parallel tasks. + // FileUris: Optional. HCFS URIs of files to be placed in the working + // directory of each executor. Useful for naively parallel tasks. FileUris []string `json:"fileUris,omitempty"` // LoggingConfig: Optional. The runtime log config for job execution. @@ -3703,48 +3744,29 @@ type TemplateParameter struct { // google.protobuf.FieldMask. For example, a field path that references // the zone field of a workflow template's cluster selector would be // specified as placement.clusterSelector.zone.Also, field paths can - // reference fields using the following syntax: - // Values in maps can be referenced by - // key: - // labels'key' + // reference fields using the following syntax: Values in maps can be + // referenced by key: labels'key' // placement.clusterSelector.clusterLabels'key' - // placemen - // t.managedCluster.labels'key' - // placement.clusterSelector.clusterLabels'k - // ey' - // jobs'step-id'.labels'key' - // Jobs in the jobs list can be referenced by - // step-id: - // jobs'step-id'.hadoopJob.mainJarFileUri - // jobs'step-id'.hiveJob. - // queryFileUri + // placement.managedCluster.labels'key' + // placement.clusterSelector.clusterLabels'key' + // jobs'step-id'.labels'key' Jobs in the jobs list can be referenced by + // step-id: jobs'step-id'.hadoopJob.mainJarFileUri + // jobs'step-id'.hiveJob.queryFileUri // jobs'step-id'.pySparkJob.mainPythonFileUri - // jobs'step-id'. - // hadoopJob.jarFileUris0 + // jobs'step-id'.hadoopJob.jarFileUris0 // jobs'step-id'.hadoopJob.archiveUris0 - // jobs'step- - // id'.hadoopJob.fileUris0 - // jobs'step-id'.pySparkJob.pythonFileUris0 - // Items - // in repeated fields can be referenced by a zero-based - // index: - // jobs'step-id'.sparkJob.args0 - // Other - // examples: - // jobs'step-id'.hadoopJob.properties'key' - // jobs'step-id'.hadoop - // Job.args0 + // jobs'step-id'.hadoopJob.fileUris0 + // jobs'step-id'.pySparkJob.pythonFileUris0 Items in repeated fields can + // be referenced by a zero-based index: jobs'step-id'.sparkJob.args0 + // Other examples: jobs'step-id'.hadoopJob.properties'key' + // jobs'step-id'.hadoopJob.args0 // jobs'step-id'.hiveJob.scriptVariables'key' - // jobs'step-id'.had - // oopJob.mainJarFileUri + // jobs'step-id'.hadoopJob.mainJarFileUri // placement.clusterSelector.zoneIt may not be possible to parameterize // maps and repeated fields in their entirety since only individual map // values and individual items in repeated fields can be referenced. For - // example, the following field paths are - // invalid: - // placement.clusterSelector.clusterLabels - // jobs'step-id'.sparkJo - // b.args + // example, the following field paths are invalid: + // placement.clusterSelector.clusterLabels jobs'step-id'.sparkJob.args Fields []string `json:"fields,omitempty"` // Name: Required. Parameter name. The parameter name is used as the @@ -3914,6 +3936,20 @@ type WorkflowMetadata struct { // CreateCluster: Output only. The create cluster operation metadata. CreateCluster *ClusterOperation `json:"createCluster,omitempty"` + // DagEndTime: Output only. DAG end time, which is only set for + // workflows with dag_timeout when the DAG ends. + DagEndTime string `json:"dagEndTime,omitempty"` + + // DagStartTime: Output only. DAG start time, which is only set for + // workflows with dag_timeout when the DAG begins. + DagStartTime string `json:"dagStartTime,omitempty"` + + // DagTimeout: Output only. The timeout duration for the DAG of jobs. + // Minimum timeout duration is 10 minutes and maximum is 24 hours, + // expressed as a google.protobuf.Duration. For example, "1800" = 1800 + // seconds/30 minutes duration. + DagTimeout string `json:"dagTimeout,omitempty"` + // DeleteCluster: Output only. The delete cluster operation metadata. DeleteCluster *ClusterOperation `json:"deleteCluster,omitempty"` @@ -3940,12 +3976,11 @@ type WorkflowMetadata struct { State string `json:"state,omitempty"` // Template: Output only. The resource name of the workflow template as - // described in https://cloud.google.com/apis/design/resource_names. - // For projects.regions.workflowTemplates, the resource name of the - // template has the following format: + // described in https://cloud.google.com/apis/design/resource_names. For + // projects.regions.workflowTemplates, the resource name of the template + // has the following format: // projects/{project_id}/regions/{region}/workflowTemplates/{template_id} - // - // For projects.locations.workflowTemplates, the resource name of the + // For projects.locations.workflowTemplates, the resource name of the // template has the following format: // projects/{project_id}/locations/{location}/workflowTemplates/{template // _id} @@ -4033,6 +4068,16 @@ type WorkflowTemplate struct { // CreateTime: Output only. The time template was created. CreateTime string `json:"createTime,omitempty"` + // DagTimeout: Optional. Timeout duration for the DAG of jobs. You can + // use "s", "m", "h", and "d" suffixes for second, minute, hour, and day + // duration values, respectively. The timeout duration must be from 10 + // minutes ("10m") to 24 hours ("24h" or "1d"). The timer begins when + // the first job is submitted. If the workflow is running at the end of + // the timeout period, any remaining jobs are cancelled, the workflow is + // ended, and if the workflow was running on a managed cluster, the + // cluster is deleted. + DagTimeout string `json:"dagTimeout,omitempty"` + // Id: Required. The template id.The id must contain only letters (a-z, // A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin // or end with underscore or hyphen. Must consist of between 3 and 50 @@ -4053,12 +4098,11 @@ type WorkflowTemplate struct { Labels map[string]string `json:"labels,omitempty"` // Name: Output only. The resource name of the workflow template, as - // described in https://cloud.google.com/apis/design/resource_names. - // For projects.regions.workflowTemplates, the resource name of the - // template has the following format: + // described in https://cloud.google.com/apis/design/resource_names. For + // projects.regions.workflowTemplates, the resource name of the template + // has the following format: // projects/{project_id}/regions/{region}/workflowTemplates/{template_id} - // - // For projects.locations.workflowTemplates, the resource name of the + // For projects.locations.workflowTemplates, the resource name of the // template has the following format: // projects/{project_id}/locations/{location}/workflowTemplates/{template // _id} @@ -4150,9 +4194,9 @@ func (s *WorkflowTemplatePlacement) MarshalJSON() ([]byte, error) { // YarnApplication: A YARN application created by a job. Application // information is a subset of -// org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto.Beta Feature: This report is available for testing purposes -// only. It may be changed before final release. +// org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto.Beta +// Feature: This report is available for testing purposes only. It may +// be changed before final release. type YarnApplication struct { // Name: Output only. The application name. Name string `json:"name,omitempty"` @@ -4264,7 +4308,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesCreateCall) Header() http.Header { func (c *ProjectsLocationsAutoscalingPoliciesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4337,7 +4381,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesCreateCall) Do(opts ...googleapi.Ca // ], // "parameters": { // "parent": { - // "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.create, the resource name has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.autoscalingPolicies.create, the resource name has the following format: projects/{project_id}/locations/{location}", + // "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.create, the resource name has the following format: projects/{project_id}/regions/{region} For projects.locations.autoscalingPolicies.create, the resource name has the following format: projects/{project_id}/locations/{location}", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -4403,7 +4447,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesDeleteCall) Header() http.Header { func (c *ProjectsLocationsAutoscalingPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4471,7 +4515,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesDeleteCall) Do(opts ...googleapi.Ca // ], // "parameters": { // "name": { - // "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + // "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/autoscalingPolicies/[^/]+$", // "required": true, @@ -4544,7 +4588,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesGetCall) Header() http.Header { func (c *ProjectsLocationsAutoscalingPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4615,7 +4659,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesGetCall) Do(opts ...googleapi.CallO // ], // "parameters": { // "name": { - // "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + // "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/autoscalingPolicies/[^/]+$", // "required": true, @@ -4658,7 +4702,10 @@ func (r *ProjectsLocationsAutoscalingPoliciesService) GetIamPolicy(resource stri // returned.Valid values are 0, 1, and 3. Requests specifying an invalid // value will be rejected.Requests for policies with any conditional // bindings must specify version 3. Policies without any conditional -// bindings may specify any valid value or leave the field unset. +// bindings may specify any valid value or leave the field unset.To +// learn which resources support conditions in their IAM policies, see +// the IAM documentation +// (https://cloud.google.com/iam/help/conditions/resource-policies). func (c *ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall { c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c @@ -4701,7 +4748,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall) Header() http.Hea func (c *ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4772,7 +4819,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesGetIamPolicyCall) Do(opts ...google // ], // "parameters": { // "options.requestedPolicyVersion": { - // "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.", + // "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", // "format": "int32", // "location": "query", // "type": "integer" @@ -4866,7 +4913,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesListCall) Header() http.Header { func (c *ProjectsLocationsAutoscalingPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4948,7 +4995,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesListCall) Do(opts ...googleapi.Call // "type": "string" // }, // "parent": { - // "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.list, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.autoscalingPolicies.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + // "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.autoscalingPolicies.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -4999,8 +5046,8 @@ type ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy.Can return Public Errors: -// NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED +// resource. Replaces any existing policy.Can return NOT_FOUND, +// INVALID_ARGUMENT, and PERMISSION_DENIED errors. func (r *ProjectsLocationsAutoscalingPoliciesService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall { c := &ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -5035,7 +5082,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall) Header() http.Hea func (c *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5099,7 +5146,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesSetIamPolicyCall) Do(opts ...google } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", // "flatPath": "v1beta2/projects/{projectsId}/locations/{locationsId}/autoscalingPolicies/{autoscalingPoliciesId}:setIamPolicy", // "httpMethod": "POST", // "id": "dataproc.projects.locations.autoscalingPolicies.setIamPolicy", @@ -5180,7 +5227,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall) Header() ht func (c *ProjectsLocationsAutoscalingPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5321,7 +5368,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesUpdateCall) Header() http.Header { func (c *ProjectsLocationsAutoscalingPoliciesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5394,7 +5441,7 @@ func (c *ProjectsLocationsAutoscalingPoliciesUpdateCall) Do(opts ...googleapi.Ca // ], // "parameters": { // "name": { - // "description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + // "description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/autoscalingPolicies/[^/]+$", // "required": true, @@ -5461,7 +5508,7 @@ func (c *ProjectsLocationsWorkflowTemplatesCreateCall) Header() http.Header { func (c *ProjectsLocationsWorkflowTemplatesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5534,7 +5581,7 @@ func (c *ProjectsLocationsWorkflowTemplatesCreateCall) Do(opts ...googleapi.Call // ], // "parameters": { // "parent": { - // "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates,create, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.workflowTemplates.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + // "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,create, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -5608,7 +5655,7 @@ func (c *ProjectsLocationsWorkflowTemplatesDeleteCall) Header() http.Header { func (c *ProjectsLocationsWorkflowTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5676,7 +5723,7 @@ func (c *ProjectsLocationsWorkflowTemplatesDeleteCall) Do(opts ...googleapi.Call // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates.delete, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + // "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.delete, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", // "required": true, @@ -5764,7 +5811,7 @@ func (c *ProjectsLocationsWorkflowTemplatesGetCall) Header() http.Header { func (c *ProjectsLocationsWorkflowTemplatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5835,7 +5882,7 @@ func (c *ProjectsLocationsWorkflowTemplatesGetCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + // "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", // "required": true, @@ -5884,7 +5931,10 @@ func (r *ProjectsLocationsWorkflowTemplatesService) GetIamPolicy(resource string // returned.Valid values are 0, 1, and 3. Requests specifying an invalid // value will be rejected.Requests for policies with any conditional // bindings must specify version 3. Policies without any conditional -// bindings may specify any valid value or leave the field unset. +// bindings may specify any valid value or leave the field unset.To +// learn which resources support conditions in their IAM policies, see +// the IAM documentation +// (https://cloud.google.com/iam/help/conditions/resource-policies). func (c *ProjectsLocationsWorkflowTemplatesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsLocationsWorkflowTemplatesGetIamPolicyCall { c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c @@ -5927,7 +5977,7 @@ func (c *ProjectsLocationsWorkflowTemplatesGetIamPolicyCall) Header() http.Heade func (c *ProjectsLocationsWorkflowTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5998,7 +6048,7 @@ func (c *ProjectsLocationsWorkflowTemplatesGetIamPolicyCall) Do(opts ...googleap // ], // "parameters": { // "options.requestedPolicyVersion": { - // "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.", + // "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", // "format": "int32", // "location": "query", // "type": "integer" @@ -6079,7 +6129,7 @@ func (c *ProjectsLocationsWorkflowTemplatesInstantiateCall) Header() http.Header func (c *ProjectsLocationsWorkflowTemplatesInstantiateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6152,7 +6202,7 @@ func (c *ProjectsLocationsWorkflowTemplatesInstantiateCall) Do(opts ...googleapi // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + // "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", // "required": true, @@ -6251,7 +6301,7 @@ func (c *ProjectsLocationsWorkflowTemplatesInstantiateInlineCall) Header() http. func (c *ProjectsLocationsWorkflowTemplatesInstantiateInlineCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6329,7 +6379,7 @@ func (c *ProjectsLocationsWorkflowTemplatesInstantiateInlineCall) Do(opts ...goo // "type": "string" // }, // "parent": { - // "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates,instantiateinline, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.workflowTemplates.instantiateinline, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + // "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,instantiateinline, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.instantiateinline, the resource name of the location has the following format: projects/{project_id}/locations/{location}", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -6424,7 +6474,7 @@ func (c *ProjectsLocationsWorkflowTemplatesListCall) Header() http.Header { func (c *ProjectsLocationsWorkflowTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6506,7 +6556,7 @@ func (c *ProjectsLocationsWorkflowTemplatesListCall) Do(opts ...googleapi.CallOp // "type": "string" // }, // "parent": { - // "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates,list, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.workflowTemplates.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + // "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -6557,8 +6607,8 @@ type ProjectsLocationsWorkflowTemplatesSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy.Can return Public Errors: -// NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED +// resource. Replaces any existing policy.Can return NOT_FOUND, +// INVALID_ARGUMENT, and PERMISSION_DENIED errors. func (r *ProjectsLocationsWorkflowTemplatesService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsWorkflowTemplatesSetIamPolicyCall { c := &ProjectsLocationsWorkflowTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -6593,7 +6643,7 @@ func (c *ProjectsLocationsWorkflowTemplatesSetIamPolicyCall) Header() http.Heade func (c *ProjectsLocationsWorkflowTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6657,7 +6707,7 @@ func (c *ProjectsLocationsWorkflowTemplatesSetIamPolicyCall) Do(opts ...googleap } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", // "flatPath": "v1beta2/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}:setIamPolicy", // "httpMethod": "POST", // "id": "dataproc.projects.locations.workflowTemplates.setIamPolicy", @@ -6738,7 +6788,7 @@ func (c *ProjectsLocationsWorkflowTemplatesTestIamPermissionsCall) Header() http func (c *ProjectsLocationsWorkflowTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6879,7 +6929,7 @@ func (c *ProjectsLocationsWorkflowTemplatesUpdateCall) Header() http.Header { func (c *ProjectsLocationsWorkflowTemplatesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6952,7 +7002,7 @@ func (c *ProjectsLocationsWorkflowTemplatesUpdateCall) Do(opts ...googleapi.Call // ], // "parameters": { // "name": { - // "description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + // "description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", // "required": true, @@ -7019,7 +7069,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesCreateCall) Header() http.Header { func (c *ProjectsRegionsAutoscalingPoliciesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7092,7 +7142,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesCreateCall) Do(opts ...googleapi.Call // ], // "parameters": { // "parent": { - // "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.create, the resource name has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.autoscalingPolicies.create, the resource name has the following format: projects/{project_id}/locations/{location}", + // "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.create, the resource name has the following format: projects/{project_id}/regions/{region} For projects.locations.autoscalingPolicies.create, the resource name has the following format: projects/{project_id}/locations/{location}", // "location": "path", // "pattern": "^projects/[^/]+/regions/[^/]+$", // "required": true, @@ -7158,7 +7208,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesDeleteCall) Header() http.Header { func (c *ProjectsRegionsAutoscalingPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7226,7 +7276,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesDeleteCall) Do(opts ...googleapi.Call // ], // "parameters": { // "name": { - // "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + // "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", // "location": "path", // "pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", // "required": true, @@ -7299,7 +7349,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesGetCall) Header() http.Header { func (c *ProjectsRegionsAutoscalingPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7370,7 +7420,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesGetCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "name": { - // "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + // "description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", // "location": "path", // "pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", // "required": true, @@ -7413,7 +7463,10 @@ func (r *ProjectsRegionsAutoscalingPoliciesService) GetIamPolicy(resource string // returned.Valid values are 0, 1, and 3. Requests specifying an invalid // value will be rejected.Requests for policies with any conditional // bindings must specify version 3. Policies without any conditional -// bindings may specify any valid value or leave the field unset. +// bindings may specify any valid value or leave the field unset.To +// learn which resources support conditions in their IAM policies, see +// the IAM documentation +// (https://cloud.google.com/iam/help/conditions/resource-policies). func (c *ProjectsRegionsAutoscalingPoliciesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsRegionsAutoscalingPoliciesGetIamPolicyCall { c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c @@ -7456,7 +7509,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesGetIamPolicyCall) Header() http.Heade func (c *ProjectsRegionsAutoscalingPoliciesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7527,7 +7580,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesGetIamPolicyCall) Do(opts ...googleap // ], // "parameters": { // "options.requestedPolicyVersion": { - // "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.", + // "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", // "format": "int32", // "location": "query", // "type": "integer" @@ -7621,7 +7674,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesListCall) Header() http.Header { func (c *ProjectsRegionsAutoscalingPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7703,7 +7756,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesListCall) Do(opts ...googleapi.CallOp // "type": "string" // }, // "parent": { - // "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies.list, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.autoscalingPolicies.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + // "description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.autoscalingPolicies.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", // "location": "path", // "pattern": "^projects/[^/]+/regions/[^/]+$", // "required": true, @@ -7754,8 +7807,8 @@ type ProjectsRegionsAutoscalingPoliciesSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy.Can return Public Errors: -// NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED +// resource. Replaces any existing policy.Can return NOT_FOUND, +// INVALID_ARGUMENT, and PERMISSION_DENIED errors. func (r *ProjectsRegionsAutoscalingPoliciesService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsRegionsAutoscalingPoliciesSetIamPolicyCall { c := &ProjectsRegionsAutoscalingPoliciesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -7790,7 +7843,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesSetIamPolicyCall) Header() http.Heade func (c *ProjectsRegionsAutoscalingPoliciesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7854,7 +7907,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesSetIamPolicyCall) Do(opts ...googleap } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", // "flatPath": "v1beta2/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}:setIamPolicy", // "httpMethod": "POST", // "id": "dataproc.projects.regions.autoscalingPolicies.setIamPolicy", @@ -7935,7 +7988,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesTestIamPermissionsCall) Header() http func (c *ProjectsRegionsAutoscalingPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8076,7 +8129,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesUpdateCall) Header() http.Header { func (c *ProjectsRegionsAutoscalingPoliciesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8149,7 +8202,7 @@ func (c *ProjectsRegionsAutoscalingPoliciesUpdateCall) Do(opts ...googleapi.Call // ], // "parameters": { // "name": { - // "description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}\nFor projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", + // "description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", // "location": "path", // "pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", // "required": true, @@ -8235,7 +8288,7 @@ func (c *ProjectsRegionsClustersCreateCall) Header() http.Header { func (c *ProjectsRegionsClustersCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8414,7 +8467,7 @@ func (c *ProjectsRegionsClustersDeleteCall) Header() http.Header { func (c *ProjectsRegionsClustersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8579,7 +8632,7 @@ func (c *ProjectsRegionsClustersDiagnoseCall) Header() http.Header { func (c *ProjectsRegionsClustersDiagnoseCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8747,7 +8800,7 @@ func (c *ProjectsRegionsClustersGetCall) Header() http.Header { func (c *ProjectsRegionsClustersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8876,7 +8929,10 @@ func (r *ProjectsRegionsClustersService) GetIamPolicy(resource string) *Projects // returned.Valid values are 0, 1, and 3. Requests specifying an invalid // value will be rejected.Requests for policies with any conditional // bindings must specify version 3. Policies without any conditional -// bindings may specify any valid value or leave the field unset. +// bindings may specify any valid value or leave the field unset.To +// learn which resources support conditions in their IAM policies, see +// the IAM documentation +// (https://cloud.google.com/iam/help/conditions/resource-policies). func (c *ProjectsRegionsClustersGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsRegionsClustersGetIamPolicyCall { c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c @@ -8919,7 +8975,7 @@ func (c *ProjectsRegionsClustersGetIamPolicyCall) Header() http.Header { func (c *ProjectsRegionsClustersGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8990,7 +9046,7 @@ func (c *ProjectsRegionsClustersGetIamPolicyCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "options.requestedPolicyVersion": { - // "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.", + // "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", // "format": "int32", // "location": "query", // "type": "integer" @@ -9104,7 +9160,7 @@ func (c *ProjectsRegionsClustersListCall) Header() http.Header { func (c *ProjectsRegionsClustersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9297,72 +9353,20 @@ func (c *ProjectsRegionsClustersPatchCall) RequestId(requestId string) *Projects // example, to change the number of workers in a cluster to 5, the // update_mask parameter would be specified as // config.worker_config.num_instances, and the PATCH request body would -// specify the new value, as follows: -// { -// "config":{ -// "workerConfig":{ -// "numInstances":"5" -// } -// } -// } -// Similarly, to change the number of preemptible workers in a cluster -// to 5, the update_mask parameter would be -// config.secondary_worker_config.num_instances, and the PATCH request -// body would be set as follows: -// { -// "config":{ -// "secondaryWorkerConfig":{ -// "numInstances":"5" -// } -// } -// } -// Note: currently only the following fields can be -// updated: -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -//
MaskPurpos -// e
labelsUpdates -// labels
config.worker_config.num_instances -// Resize primary -// worker -// group
config.secondary_worker_config.num_in -// stancesResize secondary -// worker -// group
config.lifecycle_config.auto_delete_ttlReset MAX -// TTL -// duration
config.lifecycle_config.auto_delete_t -// imeUpdate MAX TTL -// deletion -// timestamp
config.lifecycle_config.idle_delete_ttl< -// /td>Update Idle -// TTL -// duration
config.autoscaling_config.policy_uri< -// /td>Use, stop using, or change -// autoscaling policies
+// specify the new value, as follows: { "config":{ "workerConfig":{ +// "numInstances":"5" } } } Similarly, to change the number of +// preemptible workers in a cluster to 5, the update_mask parameter +// would be config.secondary_worker_config.num_instances, and the PATCH +// request body would be set as follows: { "config":{ +// "secondaryWorkerConfig":{ "numInstances":"5" } } } *Note:* currently +// only the following fields can be updated: *Mask* *Purpose* labels +// Updates labels config.worker_config.num_instances Resize primary +// worker group config.secondary_worker_config.num_instances Resize +// secondary worker group config.lifecycle_config.auto_delete_ttl Reset +// MAX TTL duration config.lifecycle_config.auto_delete_time Update MAX +// TTL deletion timestamp config.lifecycle_config.idle_delete_ttl Update +// Idle TTL duration config.autoscaling_config.policy_uri Use, stop +// using, or change autoscaling policies func (c *ProjectsRegionsClustersPatchCall) UpdateMask(updateMask string) *ProjectsRegionsClustersPatchCall { c.urlParams_.Set("updateMask", updateMask) return c @@ -9395,7 +9399,7 @@ func (c *ProjectsRegionsClustersPatchCall) Header() http.Header { func (c *ProjectsRegionsClustersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9501,7 +9505,7 @@ func (c *ProjectsRegionsClustersPatchCall) Do(opts ...googleapi.CallOption) (*Op // "type": "string" // }, // "updateMask": { - // "description": "Required. Specifies the path, relative to Cluster, of the field to update. For example, to change the number of workers in a cluster to 5, the update_mask parameter would be specified as config.worker_config.num_instances, and the PATCH request body would specify the new value, as follows:\n{\n \"config\":{\n \"workerConfig\":{\n \"numInstances\":\"5\"\n }\n }\n}\nSimilarly, to change the number of preemptible workers in a cluster to 5, the update_mask parameter would be config.secondary_worker_config.num_instances, and the PATCH request body would be set as follows:\n{\n \"config\":{\n \"secondaryWorkerConfig\":{\n \"numInstances\":\"5\"\n }\n }\n}\n\u003cstrong\u003eNote:\u003c/strong\u003e currently only the following fields can be updated:\n\u003ctable\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cstrong\u003eMask\u003c/strong\u003e\u003c/td\u003e\u003ctd\u003e\u003cstrong\u003ePurpose\u003c/strong\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003elabels\u003c/td\u003e\u003ctd\u003eUpdates labels\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003econfig.worker_config.num_instances\u003c/td\u003e\u003ctd\u003eResize primary worker\ngroup\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003econfig.secondary_worker_config.num_instances\u003c/td\u003e\u003ctd\u003eResize secondary\nworker group\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003econfig.lifecycle_config.auto_delete_ttl\u003c/td\u003e\u003ctd\u003eReset MAX TTL\nduration\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003econfig.lifecycle_config.auto_delete_time\u003c/td\u003e\u003ctd\u003eUpdate MAX TTL\ndeletion timestamp\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003econfig.lifecycle_config.idle_delete_ttl\u003c/td\u003e\u003ctd\u003eUpdate Idle TTL\nduration\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003econfig.autoscaling_config.policy_uri\u003c/td\u003e\u003ctd\u003eUse, stop using, or change\nautoscaling policies\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/table\u003e", + // "description": "Required. Specifies the path, relative to Cluster, of the field to update. For example, to change the number of workers in a cluster to 5, the update_mask parameter would be specified as config.worker_config.num_instances, and the PATCH request body would specify the new value, as follows: { \"config\":{ \"workerConfig\":{ \"numInstances\":\"5\" } } } Similarly, to change the number of preemptible workers in a cluster to 5, the update_mask parameter would be config.secondary_worker_config.num_instances, and the PATCH request body would be set as follows: { \"config\":{ \"secondaryWorkerConfig\":{ \"numInstances\":\"5\" } } } *Note:* currently only the following fields can be updated: *Mask* *Purpose* labels Updates labels config.worker_config.num_instances Resize primary worker group config.secondary_worker_config.num_instances Resize secondary worker group config.lifecycle_config.auto_delete_ttl Reset MAX TTL duration config.lifecycle_config.auto_delete_time Update MAX TTL deletion timestamp config.lifecycle_config.idle_delete_ttl Update Idle TTL duration config.autoscaling_config.policy_uri Use, stop using, or change autoscaling policies ", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -9533,8 +9537,8 @@ type ProjectsRegionsClustersSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy.Can return Public Errors: -// NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED +// resource. Replaces any existing policy.Can return NOT_FOUND, +// INVALID_ARGUMENT, and PERMISSION_DENIED errors. func (r *ProjectsRegionsClustersService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsRegionsClustersSetIamPolicyCall { c := &ProjectsRegionsClustersSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -9569,7 +9573,7 @@ func (c *ProjectsRegionsClustersSetIamPolicyCall) Header() http.Header { func (c *ProjectsRegionsClustersSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9633,7 +9637,7 @@ func (c *ProjectsRegionsClustersSetIamPolicyCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", // "flatPath": "v1beta2/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}:setIamPolicy", // "httpMethod": "POST", // "id": "dataproc.projects.regions.clusters.setIamPolicy", @@ -9713,7 +9717,7 @@ func (c *ProjectsRegionsClustersStartCall) Header() http.Header { func (c *ProjectsRegionsClustersStartCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9872,7 +9876,7 @@ func (c *ProjectsRegionsClustersStopCall) Header() http.Header { func (c *ProjectsRegionsClustersStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10032,7 +10036,7 @@ func (c *ProjectsRegionsClustersTestIamPermissionsCall) Header() http.Header { func (c *ProjectsRegionsClustersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10181,7 +10185,7 @@ func (c *ProjectsRegionsJobsCancelCall) Header() http.Header { func (c *ProjectsRegionsJobsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10339,7 +10343,7 @@ func (c *ProjectsRegionsJobsDeleteCall) Header() http.Header { func (c *ProjectsRegionsJobsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10499,7 +10503,7 @@ func (c *ProjectsRegionsJobsGetCall) Header() http.Header { func (c *ProjectsRegionsJobsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10628,7 +10632,10 @@ func (r *ProjectsRegionsJobsService) GetIamPolicy(resource string) *ProjectsRegi // returned.Valid values are 0, 1, and 3. Requests specifying an invalid // value will be rejected.Requests for policies with any conditional // bindings must specify version 3. Policies without any conditional -// bindings may specify any valid value or leave the field unset. +// bindings may specify any valid value or leave the field unset.To +// learn which resources support conditions in their IAM policies, see +// the IAM documentation +// (https://cloud.google.com/iam/help/conditions/resource-policies). func (c *ProjectsRegionsJobsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsRegionsJobsGetIamPolicyCall { c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c @@ -10671,7 +10678,7 @@ func (c *ProjectsRegionsJobsGetIamPolicyCall) Header() http.Header { func (c *ProjectsRegionsJobsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10742,7 +10749,7 @@ func (c *ProjectsRegionsJobsGetIamPolicyCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "options.requestedPolicyVersion": { - // "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.", + // "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", // "format": "int32", // "location": "query", // "type": "integer" @@ -10812,9 +10819,11 @@ func (c *ProjectsRegionsJobsListCall) Filter(filter string) *ProjectsRegionsJobs // jobs).If filter is provided, jobStateMatcher will be ignored. // // Possible values: -// "ALL" -// "ACTIVE" -// "NON_ACTIVE" +// "ALL" - Match all jobs, regardless of state. +// "ACTIVE" - Only match jobs in non-terminal states: PENDING, +// RUNNING, or CANCEL_PENDING. +// "NON_ACTIVE" - Only match jobs in terminal states: CANCELLED, DONE, +// or ERROR. func (c *ProjectsRegionsJobsListCall) JobStateMatcher(jobStateMatcher string) *ProjectsRegionsJobsListCall { c.urlParams_.Set("jobStateMatcher", jobStateMatcher) return c @@ -10871,7 +10880,7 @@ func (c *ProjectsRegionsJobsListCall) Header() http.Header { func (c *ProjectsRegionsJobsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10960,6 +10969,11 @@ func (c *ProjectsRegionsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJob // "ACTIVE", // "NON_ACTIVE" // ], + // "enumDescriptions": [ + // "Match all jobs, regardless of state.", + // "Only match jobs in non-terminal states: PENDING, RUNNING, or CANCEL_PENDING.", + // "Only match jobs in terminal states: CANCELLED, DONE, or ERROR." + // ], // "location": "query", // "type": "string" // }, @@ -11043,12 +11057,11 @@ func (r *ProjectsRegionsJobsService) Patch(projectId string, region string, jobI } // UpdateMask sets the optional parameter "updateMask": Required. -// Specifies the path, relative to Job, of the field to -// update. For example, to update the labels of a Job the -// update_mask parameter would be specified as -// labels, and the PATCH request body would specify the new -// value. Note: Currently, labels is the -// only field that can be updated. +// Specifies the path, relative to Job, of the field to update. For +// example, to update the labels of a Job the update_mask parameter +// would be specified as labels, and the PATCH request body would +// specify the new value. *Note:* Currently, labels is the only field +// that can be updated. func (c *ProjectsRegionsJobsPatchCall) UpdateMask(updateMask string) *ProjectsRegionsJobsPatchCall { c.urlParams_.Set("updateMask", updateMask) return c @@ -11081,7 +11094,7 @@ func (c *ProjectsRegionsJobsPatchCall) Header() http.Header { func (c *ProjectsRegionsJobsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11176,7 +11189,7 @@ func (c *ProjectsRegionsJobsPatchCall) Do(opts ...googleapi.CallOption) (*Job, e // "type": "string" // }, // "updateMask": { - // "description": "Required. Specifies the path, relative to \u003ccode\u003eJob\u003c/code\u003e, of the field to update. For example, to update the labels of a Job the \u003ccode\u003eupdate_mask\u003c/code\u003e parameter would be specified as \u003ccode\u003elabels\u003c/code\u003e, and the PATCH request body would specify the new value. \u003cstrong\u003eNote:\u003c/strong\u003e Currently, \u003ccode\u003elabels\u003c/code\u003e is the only field that can be updated.", + // "description": "Required. Specifies the path, relative to Job, of the field to update. For example, to update the labels of a Job the update_mask parameter would be specified as labels, and the PATCH request body would specify the new value. *Note:* Currently, labels is the only field that can be updated.", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -11208,8 +11221,8 @@ type ProjectsRegionsJobsSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy.Can return Public Errors: -// NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED +// resource. Replaces any existing policy.Can return NOT_FOUND, +// INVALID_ARGUMENT, and PERMISSION_DENIED errors. func (r *ProjectsRegionsJobsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsRegionsJobsSetIamPolicyCall { c := &ProjectsRegionsJobsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -11244,7 +11257,7 @@ func (c *ProjectsRegionsJobsSetIamPolicyCall) Header() http.Header { func (c *ProjectsRegionsJobsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11308,7 +11321,7 @@ func (c *ProjectsRegionsJobsSetIamPolicyCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", // "flatPath": "v1beta2/projects/{projectsId}/regions/{regionsId}/jobs/{jobsId}:setIamPolicy", // "httpMethod": "POST", // "id": "dataproc.projects.regions.jobs.setIamPolicy", @@ -11386,7 +11399,7 @@ func (c *ProjectsRegionsJobsSubmitCall) Header() http.Header { func (c *ProjectsRegionsJobsSubmitCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11535,7 +11548,7 @@ func (c *ProjectsRegionsJobsSubmitAsOperationCall) Header() http.Header { func (c *ProjectsRegionsJobsSubmitAsOperationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11687,7 +11700,7 @@ func (c *ProjectsRegionsJobsTestIamPermissionsCall) Header() http.Header { func (c *ProjectsRegionsJobsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11834,7 +11847,7 @@ func (c *ProjectsRegionsOperationsCancelCall) Header() http.Header { func (c *ProjectsRegionsOperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11967,7 +11980,7 @@ func (c *ProjectsRegionsOperationsDeleteCall) Header() http.Header { func (c *ProjectsRegionsOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12110,7 +12123,7 @@ func (c *ProjectsRegionsOperationsGetCall) Header() http.Header { func (c *ProjectsRegionsOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12224,7 +12237,10 @@ func (r *ProjectsRegionsOperationsService) GetIamPolicy(resource string) *Projec // returned.Valid values are 0, 1, and 3. Requests specifying an invalid // value will be rejected.Requests for policies with any conditional // bindings must specify version 3. Policies without any conditional -// bindings may specify any valid value or leave the field unset. +// bindings may specify any valid value or leave the field unset.To +// learn which resources support conditions in their IAM policies, see +// the IAM documentation +// (https://cloud.google.com/iam/help/conditions/resource-policies). func (c *ProjectsRegionsOperationsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsRegionsOperationsGetIamPolicyCall { c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c @@ -12267,7 +12283,7 @@ func (c *ProjectsRegionsOperationsGetIamPolicyCall) Header() http.Header { func (c *ProjectsRegionsOperationsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12338,7 +12354,7 @@ func (c *ProjectsRegionsOperationsGetIamPolicyCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "options.requestedPolicyVersion": { - // "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.", + // "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", // "format": "int32", // "location": "query", // "type": "integer" @@ -12447,7 +12463,7 @@ func (c *ProjectsRegionsOperationsListCall) Header() http.Header { func (c *ProjectsRegionsOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12585,8 +12601,8 @@ type ProjectsRegionsOperationsSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy.Can return Public Errors: -// NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED +// resource. Replaces any existing policy.Can return NOT_FOUND, +// INVALID_ARGUMENT, and PERMISSION_DENIED errors. func (r *ProjectsRegionsOperationsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsRegionsOperationsSetIamPolicyCall { c := &ProjectsRegionsOperationsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -12621,7 +12637,7 @@ func (c *ProjectsRegionsOperationsSetIamPolicyCall) Header() http.Header { func (c *ProjectsRegionsOperationsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12685,7 +12701,7 @@ func (c *ProjectsRegionsOperationsSetIamPolicyCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", // "flatPath": "v1beta2/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:setIamPolicy", // "httpMethod": "POST", // "id": "dataproc.projects.regions.operations.setIamPolicy", @@ -12766,7 +12782,7 @@ func (c *ProjectsRegionsOperationsTestIamPermissionsCall) Header() http.Header { func (c *ProjectsRegionsOperationsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12906,7 +12922,7 @@ func (c *ProjectsRegionsWorkflowTemplatesCreateCall) Header() http.Header { func (c *ProjectsRegionsWorkflowTemplatesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12979,7 +12995,7 @@ func (c *ProjectsRegionsWorkflowTemplatesCreateCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "parent": { - // "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates,create, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.workflowTemplates.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + // "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,create, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", // "location": "path", // "pattern": "^projects/[^/]+/regions/[^/]+$", // "required": true, @@ -13053,7 +13069,7 @@ func (c *ProjectsRegionsWorkflowTemplatesDeleteCall) Header() http.Header { func (c *ProjectsRegionsWorkflowTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13121,7 +13137,7 @@ func (c *ProjectsRegionsWorkflowTemplatesDeleteCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates.delete, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + // "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.delete, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", // "location": "path", // "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", // "required": true, @@ -13209,7 +13225,7 @@ func (c *ProjectsRegionsWorkflowTemplatesGetCall) Header() http.Header { func (c *ProjectsRegionsWorkflowTemplatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13280,7 +13296,7 @@ func (c *ProjectsRegionsWorkflowTemplatesGetCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + // "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", // "location": "path", // "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", // "required": true, @@ -13329,7 +13345,10 @@ func (r *ProjectsRegionsWorkflowTemplatesService) GetIamPolicy(resource string) // returned.Valid values are 0, 1, and 3. Requests specifying an invalid // value will be rejected.Requests for policies with any conditional // bindings must specify version 3. Policies without any conditional -// bindings may specify any valid value or leave the field unset. +// bindings may specify any valid value or leave the field unset.To +// learn which resources support conditions in their IAM policies, see +// the IAM documentation +// (https://cloud.google.com/iam/help/conditions/resource-policies). func (c *ProjectsRegionsWorkflowTemplatesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsRegionsWorkflowTemplatesGetIamPolicyCall { c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c @@ -13372,7 +13391,7 @@ func (c *ProjectsRegionsWorkflowTemplatesGetIamPolicyCall) Header() http.Header func (c *ProjectsRegionsWorkflowTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13443,7 +13462,7 @@ func (c *ProjectsRegionsWorkflowTemplatesGetIamPolicyCall) Do(opts ...googleapi. // ], // "parameters": { // "options.requestedPolicyVersion": { - // "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.", + // "description": "Optional. The policy format version to be returned.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", // "format": "int32", // "location": "query", // "type": "integer" @@ -13524,7 +13543,7 @@ func (c *ProjectsRegionsWorkflowTemplatesInstantiateCall) Header() http.Header { func (c *ProjectsRegionsWorkflowTemplatesInstantiateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13597,7 +13616,7 @@ func (c *ProjectsRegionsWorkflowTemplatesInstantiateCall) Do(opts ...googleapi.C // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + // "description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", // "location": "path", // "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", // "required": true, @@ -13696,7 +13715,7 @@ func (c *ProjectsRegionsWorkflowTemplatesInstantiateInlineCall) Header() http.He func (c *ProjectsRegionsWorkflowTemplatesInstantiateInlineCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13774,7 +13793,7 @@ func (c *ProjectsRegionsWorkflowTemplatesInstantiateInlineCall) Do(opts ...googl // "type": "string" // }, // "parent": { - // "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates,instantiateinline, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.workflowTemplates.instantiateinline, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + // "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,instantiateinline, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.instantiateinline, the resource name of the location has the following format: projects/{project_id}/locations/{location}", // "location": "path", // "pattern": "^projects/[^/]+/regions/[^/]+$", // "required": true, @@ -13869,7 +13888,7 @@ func (c *ProjectsRegionsWorkflowTemplatesListCall) Header() http.Header { func (c *ProjectsRegionsWorkflowTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13951,7 +13970,7 @@ func (c *ProjectsRegionsWorkflowTemplatesListCall) Do(opts ...googleapi.CallOpti // "type": "string" // }, // "parent": { - // "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates,list, the resource name of the region has the following format: projects/{project_id}/regions/{region}\nFor projects.locations.workflowTemplates.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", + // "description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", // "location": "path", // "pattern": "^projects/[^/]+/regions/[^/]+$", // "required": true, @@ -14002,8 +14021,8 @@ type ProjectsRegionsWorkflowTemplatesSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy.Can return Public Errors: -// NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED +// resource. Replaces any existing policy.Can return NOT_FOUND, +// INVALID_ARGUMENT, and PERMISSION_DENIED errors. func (r *ProjectsRegionsWorkflowTemplatesService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsRegionsWorkflowTemplatesSetIamPolicyCall { c := &ProjectsRegionsWorkflowTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -14038,7 +14057,7 @@ func (c *ProjectsRegionsWorkflowTemplatesSetIamPolicyCall) Header() http.Header func (c *ProjectsRegionsWorkflowTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14102,7 +14121,7 @@ func (c *ProjectsRegionsWorkflowTemplatesSetIamPolicyCall) Do(opts ...googleapi. } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", // "flatPath": "v1beta2/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}:setIamPolicy", // "httpMethod": "POST", // "id": "dataproc.projects.regions.workflowTemplates.setIamPolicy", @@ -14183,7 +14202,7 @@ func (c *ProjectsRegionsWorkflowTemplatesTestIamPermissionsCall) Header() http.H func (c *ProjectsRegionsWorkflowTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14324,7 +14343,7 @@ func (c *ProjectsRegionsWorkflowTemplatesUpdateCall) Header() http.Header { func (c *ProjectsRegionsWorkflowTemplatesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14397,7 +14416,7 @@ func (c *ProjectsRegionsWorkflowTemplatesUpdateCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "name": { - // "description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names.\nFor projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id}\nFor projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", + // "description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", // "location": "path", // "pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", // "required": true, diff --git a/vendor/google.golang.org/api/dns/v1/dns-api.json b/vendor/google.golang.org/api/dns/v1/dns-api.json index 6beb70dd246..a94a8ee9323 100644 --- a/vendor/google.golang.org/api/dns/v1/dns-api.json +++ b/vendor/google.golang.org/api/dns/v1/dns-api.json @@ -17,35 +17,64 @@ } } }, - "basePath": "/dns/v1/projects/", - "baseUrl": "https://dns.googleapis.com/dns/v1/projects/", - "batchPath": "batch/dns/v1", - "description": "Configures and serves authoritative DNS records.", + "basePath": "", + "baseUrl": "https://dns.googleapis.com/", + "batchPath": "batch", + "canonicalName": "Dns", + "description": "", "discoveryVersion": "v1", - "documentationLink": "https://developers.google.com/cloud-dns", - "etag": "\"LYADMvHWYH2ul9D6m9UT9gT77YM/C0vgwgSkNuwxiJI-C0iMb3JKMEQ\"", + "documentationLink": "http://developers.google.com/cloud-dns", + "fullyEncodeReservedExpansion": true, "icons": { - "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", - "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" }, "id": "dns:v1", "kind": "discovery#restDescription", + "mtlsRootUrl": "https://dns.mtls.googleapis.com/", "name": "dns", "ownerDomain": "google.com", "ownerName": "Google", "parameters": { + "$.xgafv": { + "description": "V1 error format.", + "enum": [ + "1", + "2" + ], + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "type": "string" + }, + "access_token": { + "description": "OAuth access token.", + "location": "query", + "type": "string" + }, "alt": { "default": "json", - "description": "Data format for the response.", + "description": "Data format for response.", "enum": [ - "json" + "json", + "media", + "proto" ], "enumDescriptions": [ - "Responses with Content-Type of application/json" + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" ], "location": "query", "type": "string" }, + "callback": { + "description": "JSONP", + "location": "query", + "type": "string" + }, "fields": { "description": "Selector specifying which fields to include in a partial response.", "location": "query", @@ -68,12 +97,17 @@ "type": "boolean" }, "quotaUser": { - "description": "An opaque string that represents a user for quota purposes. Must not exceed 40 characters.", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "location": "query", + "type": "string" + }, + "uploadType": { + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", "location": "query", "type": "string" }, - "userIp": { - "description": "Deprecated. Please use quotaUser instead.", + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", "location": "query", "type": "string" } @@ -84,6 +118,7 @@ "methods": { "create": { "description": "Atomically update the ResourceRecordSet collection.", + "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/changes", "httpMethod": "POST", "id": "dns.changes.create", "parameterOrder": [ @@ -97,7 +132,7 @@ "type": "string" }, "managedZone": { - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", "location": "path", "required": true, "type": "string" @@ -109,7 +144,7 @@ "type": "string" } }, - "path": "{project}/managedZones/{managedZone}/changes", + "path": "dns/v1/projects/{project}/managedZones/{managedZone}/changes", "request": { "$ref": "Change" }, @@ -123,6 +158,7 @@ }, "get": { "description": "Fetch the representation of an existing Change.", + "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/changes/{changeId}", "httpMethod": "GET", "id": "dns.changes.get", "parameterOrder": [ @@ -143,7 +179,7 @@ "type": "string" }, "managedZone": { - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", "location": "path", "required": true, "type": "string" @@ -155,7 +191,7 @@ "type": "string" } }, - "path": "{project}/managedZones/{managedZone}/changes/{changeId}", + "path": "dns/v1/projects/{project}/managedZones/{managedZone}/changes/{changeId}", "response": { "$ref": "Change" }, @@ -168,6 +204,7 @@ }, "list": { "description": "Enumerate Changes to a ResourceRecordSet collection.", + "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/changes", "httpMethod": "GET", "id": "dns.changes.list", "parameterOrder": [ @@ -176,7 +213,7 @@ ], "parameters": { "managedZone": { - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", "location": "path", "required": true, "type": "string" @@ -216,7 +253,7 @@ "type": "string" } }, - "path": "{project}/managedZones/{managedZone}/changes", + "path": "dns/v1/projects/{project}/managedZones/{managedZone}/changes", "response": { "$ref": "ChangesListResponse" }, @@ -233,6 +270,7 @@ "methods": { "get": { "description": "Fetch the representation of an existing DnsKey.", + "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}", "httpMethod": "GET", "id": "dns.dnsKeys.get", "parameterOrder": [ @@ -258,7 +296,7 @@ "type": "string" }, "managedZone": { - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", "location": "path", "required": true, "type": "string" @@ -270,7 +308,7 @@ "type": "string" } }, - "path": "{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}", + "path": "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}", "response": { "$ref": "DnsKey" }, @@ -283,6 +321,7 @@ }, "list": { "description": "Enumerate DnsKeys to a ResourceRecordSet collection.", + "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys", "httpMethod": "GET", "id": "dns.dnsKeys.list", "parameterOrder": [ @@ -296,7 +335,7 @@ "type": "string" }, "managedZone": { - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", "location": "path", "required": true, "type": "string" @@ -319,7 +358,7 @@ "type": "string" } }, - "path": "{project}/managedZones/{managedZone}/dnsKeys", + "path": "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys", "response": { "$ref": "DnsKeysListResponse" }, @@ -336,6 +375,7 @@ "methods": { "get": { "description": "Fetch the representation of an existing Operation.", + "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/operations/{operation}", "httpMethod": "GET", "id": "dns.managedZoneOperations.get", "parameterOrder": [ @@ -368,7 +408,7 @@ "type": "string" } }, - "path": "{project}/managedZones/{managedZone}/operations/{operation}", + "path": "dns/v1/projects/{project}/managedZones/{managedZone}/operations/{operation}", "response": { "$ref": "Operation" }, @@ -381,6 +421,7 @@ }, "list": { "description": "Enumerate Operations for the given ManagedZone.", + "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/operations", "httpMethod": "GET", "id": "dns.managedZoneOperations.list", "parameterOrder": [ @@ -415,8 +456,8 @@ "default": "startTime", "description": "Sorting criterion. The only supported values are START_TIME and ID.", "enum": [ - "id", - "startTime" + "startTime", + "id" ], "enumDescriptions": [ "", @@ -426,7 +467,7 @@ "type": "string" } }, - "path": "{project}/managedZones/{managedZone}/operations", + "path": "dns/v1/projects/{project}/managedZones/{managedZone}/operations", "response": { "$ref": "ManagedZoneOperationsListResponse" }, @@ -443,6 +484,7 @@ "methods": { "create": { "description": "Create a new ManagedZone.", + "flatPath": "dns/v1/projects/{project}/managedZones", "httpMethod": "POST", "id": "dns.managedZones.create", "parameterOrder": [ @@ -461,7 +503,7 @@ "type": "string" } }, - "path": "{project}/managedZones", + "path": "dns/v1/projects/{project}/managedZones", "request": { "$ref": "ManagedZone" }, @@ -475,6 +517,7 @@ }, "delete": { "description": "Delete a previously created ManagedZone.", + "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}", "httpMethod": "DELETE", "id": "dns.managedZones.delete", "parameterOrder": [ @@ -488,7 +531,7 @@ "type": "string" }, "managedZone": { - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", "location": "path", "required": true, "type": "string" @@ -500,7 +543,7 @@ "type": "string" } }, - "path": "{project}/managedZones/{managedZone}", + "path": "dns/v1/projects/{project}/managedZones/{managedZone}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/ndev.clouddns.readwrite" @@ -508,6 +551,7 @@ }, "get": { "description": "Fetch the representation of an existing ManagedZone.", + "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}", "httpMethod": "GET", "id": "dns.managedZones.get", "parameterOrder": [ @@ -521,7 +565,7 @@ "type": "string" }, "managedZone": { - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", "location": "path", "required": true, "type": "string" @@ -533,7 +577,7 @@ "type": "string" } }, - "path": "{project}/managedZones/{managedZone}", + "path": "dns/v1/projects/{project}/managedZones/{managedZone}", "response": { "$ref": "ManagedZone" }, @@ -546,6 +590,7 @@ }, "list": { "description": "Enumerate ManagedZones that have been created but not yet deleted.", + "flatPath": "dns/v1/projects/{project}/managedZones", "httpMethod": "GET", "id": "dns.managedZones.list", "parameterOrder": [ @@ -575,7 +620,7 @@ "type": "string" } }, - "path": "{project}/managedZones", + "path": "dns/v1/projects/{project}/managedZones", "response": { "$ref": "ManagedZonesListResponse" }, @@ -588,6 +633,7 @@ }, "patch": { "description": "Apply a partial update to an existing ManagedZone.", + "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}", "httpMethod": "PATCH", "id": "dns.managedZones.patch", "parameterOrder": [ @@ -601,7 +647,7 @@ "type": "string" }, "managedZone": { - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", "location": "path", "required": true, "type": "string" @@ -613,7 +659,7 @@ "type": "string" } }, - "path": "{project}/managedZones/{managedZone}", + "path": "dns/v1/projects/{project}/managedZones/{managedZone}", "request": { "$ref": "ManagedZone" }, @@ -627,6 +673,7 @@ }, "update": { "description": "Update an existing ManagedZone.", + "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}", "httpMethod": "PUT", "id": "dns.managedZones.update", "parameterOrder": [ @@ -640,7 +687,7 @@ "type": "string" }, "managedZone": { - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", "location": "path", "required": true, "type": "string" @@ -652,7 +699,7 @@ "type": "string" } }, - "path": "{project}/managedZones/{managedZone}", + "path": "dns/v1/projects/{project}/managedZones/{managedZone}", "request": { "$ref": "ManagedZone" }, @@ -670,6 +717,7 @@ "methods": { "create": { "description": "Create a new Policy", + "flatPath": "dns/v1/projects/{project}/policies", "httpMethod": "POST", "id": "dns.policies.create", "parameterOrder": [ @@ -688,7 +736,7 @@ "type": "string" } }, - "path": "{project}/policies", + "path": "dns/v1/projects/{project}/policies", "request": { "$ref": "Policy" }, @@ -702,6 +750,7 @@ }, "delete": { "description": "Delete a previously created Policy. Will fail if the policy is still being referenced by a network.", + "flatPath": "dns/v1/projects/{project}/policies/{policy}", "httpMethod": "DELETE", "id": "dns.policies.delete", "parameterOrder": [ @@ -727,7 +776,7 @@ "type": "string" } }, - "path": "{project}/policies/{policy}", + "path": "dns/v1/projects/{project}/policies/{policy}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/ndev.clouddns.readwrite" @@ -735,6 +784,7 @@ }, "get": { "description": "Fetch the representation of an existing Policy.", + "flatPath": "dns/v1/projects/{project}/policies/{policy}", "httpMethod": "GET", "id": "dns.policies.get", "parameterOrder": [ @@ -760,7 +810,7 @@ "type": "string" } }, - "path": "{project}/policies/{policy}", + "path": "dns/v1/projects/{project}/policies/{policy}", "response": { "$ref": "Policy" }, @@ -773,6 +823,7 @@ }, "list": { "description": "Enumerate all Policies associated with a project.", + "flatPath": "dns/v1/projects/{project}/policies", "httpMethod": "GET", "id": "dns.policies.list", "parameterOrder": [ @@ -797,7 +848,7 @@ "type": "string" } }, - "path": "{project}/policies", + "path": "dns/v1/projects/{project}/policies", "response": { "$ref": "PoliciesListResponse" }, @@ -810,6 +861,7 @@ }, "patch": { "description": "Apply a partial update to an existing Policy.", + "flatPath": "dns/v1/projects/{project}/policies/{policy}", "httpMethod": "PATCH", "id": "dns.policies.patch", "parameterOrder": [ @@ -835,7 +887,7 @@ "type": "string" } }, - "path": "{project}/policies/{policy}", + "path": "dns/v1/projects/{project}/policies/{policy}", "request": { "$ref": "Policy" }, @@ -849,6 +901,7 @@ }, "update": { "description": "Update an existing Policy.", + "flatPath": "dns/v1/projects/{project}/policies/{policy}", "httpMethod": "PUT", "id": "dns.policies.update", "parameterOrder": [ @@ -874,7 +927,7 @@ "type": "string" } }, - "path": "{project}/policies/{policy}", + "path": "dns/v1/projects/{project}/policies/{policy}", "request": { "$ref": "Policy" }, @@ -892,6 +945,7 @@ "methods": { "get": { "description": "Fetch the representation of an existing Project.", + "flatPath": "dns/v1/projects/{project}", "httpMethod": "GET", "id": "dns.projects.get", "parameterOrder": [ @@ -910,7 +964,7 @@ "type": "string" } }, - "path": "{project}", + "path": "dns/v1/projects/{project}", "response": { "$ref": "Project" }, @@ -927,6 +981,7 @@ "methods": { "list": { "description": "Enumerate ResourceRecordSets that have been created but not yet deleted.", + "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets", "httpMethod": "GET", "id": "dns.resourceRecordSets.list", "parameterOrder": [ @@ -935,7 +990,7 @@ ], "parameters": { "managedZone": { - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", "location": "path", "required": true, "type": "string" @@ -968,7 +1023,7 @@ "type": "string" } }, - "path": "{project}/managedZones/{managedZone}/rrsets", + "path": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets", "response": { "$ref": "ResourceRecordSetsListResponse" }, @@ -982,7 +1037,7 @@ } } }, - "revision": "20191205", + "revision": "20200927", "rootUrl": "https://dns.googleapis.com/", "schemas": { "Change": { @@ -1013,7 +1068,6 @@ }, "kind": { "default": "dns#change", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#change\".", "type": "string" }, "startTime": { @@ -1021,10 +1075,10 @@ "type": "string" }, "status": { - "description": "Status of the operation (output only). A status of \"done\" means that the request to update the authoritative servers has been sent, but the servers might not be updated yet.", + "description": "Status of the operation (output only). A status of \"done\" means that the request to update the authoritative servers has been sent but the servers might not be updated yet.", "enum": [ - "done", - "pending" + "pending", + "done" ], "enumDescriptions": [ "", @@ -1055,7 +1109,7 @@ "type": "string" }, "nextPageToken": { - "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token.\n\nIn this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a \"snapshot\" of collections larger than the maximum page size.", + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token. In this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a \"snapshot\" of collections larger than the maximum page size.", "type": "string" } }, @@ -1068,11 +1122,11 @@ "algorithm": { "description": "String mnemonic specifying the DNSSEC algorithm of this key. Immutable after creation time.", "enum": [ - "ecdsap256sha256", - "ecdsap384sha384", "rsasha1", "rsasha256", - "rsasha512" + "rsasha512", + "ecdsap256sha256", + "ecdsap384sha384" ], "enumDescriptions": [ "", @@ -1118,7 +1172,6 @@ }, "kind": { "default": "dns#dnsKey", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#dnsKey\".", "type": "string" }, "publicKey": { @@ -1171,11 +1224,11 @@ "algorithm": { "description": "String mnemonic specifying the DNSSEC algorithm of this key.", "enum": [ - "ecdsap256sha256", - "ecdsap384sha384", "rsasha1", "rsasha256", - "rsasha512" + "rsasha512", + "ecdsap256sha256", + "ecdsap384sha384" ], "enumDescriptions": [ "", @@ -1205,7 +1258,6 @@ }, "kind": { "default": "dns#dnsKeySpec", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#dnsKeySpec\".", "type": "string" } }, @@ -1231,7 +1283,7 @@ "type": "string" }, "nextPageToken": { - "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token.\n\nIn this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a \"snapshot\" of collections larger than the maximum page size.", + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token. In this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a \"snapshot\" of collections larger than the maximum page size.", "type": "string" } }, @@ -1268,7 +1320,6 @@ }, "kind": { "default": "dns#managedZone", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#managedZone\".", "type": "string" }, "labels": { @@ -1301,11 +1352,15 @@ "$ref": "ManagedZonePrivateVisibilityConfig", "description": "For privately visible zones, the set of Virtual Private Cloud resources that the zone is visible from." }, + "reverseLookupConfig": { + "$ref": "ManagedZoneReverseLookupConfig", + "description": "The presence of this field indicates that this is a managed reverse lookup zone and Cloud DNS will resolve reverse lookup queries using automatically configured records for VPC resources. This only applies to networks listed under private_visibility_config." + }, "visibility": { "description": "The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources.", "enum": [ - "private", - "public" + "public", + "private" ], "enumDescriptions": [ "", @@ -1328,7 +1383,6 @@ }, "kind": { "default": "dns#managedZoneDnsSecConfig", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#managedZoneDnsSecConfig\".", "type": "string" }, "nonExistence": { @@ -1351,9 +1405,9 @@ "transfer" ], "enumDescriptions": [ - "", - "", - "" + "DNSSEC is disabled; the zone is not signed.", + "DNSSEC is enabled; the zone is signed and fully managed.", + "DNSSEC is enabled, but in a \"transfer\" mode." ], "type": "string" } @@ -1365,7 +1419,6 @@ "properties": { "kind": { "default": "dns#managedZoneForwardingConfig", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#managedZoneForwardingConfig\".", "type": "string" }, "targetNameServers": { @@ -1381,13 +1434,24 @@ "ManagedZoneForwardingConfigNameServerTarget": { "id": "ManagedZoneForwardingConfigNameServerTarget", "properties": { + "forwardingPath": { + "description": "Forwarding path for this NameServerTarget. If unset or set to DEFAULT, Cloud DNS will make forwarding decision based on address ranges, i.e. RFC1918 addresses go to the VPC, non-RFC1918 addresses go to the Internet. When set to PRIVATE, Cloud DNS will always send queries through VPC for this target.", + "enum": [ + "default", + "private" + ], + "enumDescriptions": [ + "Cloud DNS will make forwarding decision based on address ranges, i.e. RFC1918 addresses forward to the target through the VPC and non-RFC1918 addresses will forward to the target through the Internet", + "Cloud DNS will always forward to this target through the VPC." + ], + "type": "string" + }, "ipv4Address": { "description": "IPv4 address of a target name server.", "type": "string" }, "kind": { "default": "dns#managedZoneForwardingConfigNameServerTarget", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#managedZoneForwardingConfigNameServerTarget\".", "type": "string" } }, @@ -1401,11 +1465,10 @@ }, "kind": { "default": "dns#managedZoneOperationsListResponse", - "description": "Type of resource.", "type": "string" }, "nextPageToken": { - "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your page token.\n\nIn this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size.", + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your page token. In this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size.", "type": "string" }, "operations": { @@ -1423,7 +1486,6 @@ "properties": { "kind": { "default": "dns#managedZonePeeringConfig", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#managedZonePeeringConfig\".", "type": "string" }, "targetNetwork": { @@ -1442,7 +1504,6 @@ }, "kind": { "default": "dns#managedZonePeeringConfigTargetNetwork", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#managedZonePeeringConfigTargetNetwork\".", "type": "string" }, "networkUrl": { @@ -1457,7 +1518,6 @@ "properties": { "kind": { "default": "dns#managedZonePrivateVisibilityConfig", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#managedZonePrivateVisibilityConfig\".", "type": "string" }, "networks": { @@ -1475,7 +1535,6 @@ "properties": { "kind": { "default": "dns#managedZonePrivateVisibilityConfigNetwork", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#managedZonePrivateVisibilityConfigNetwork\".", "type": "string" }, "networkUrl": { @@ -1485,6 +1544,16 @@ }, "type": "object" }, + "ManagedZoneReverseLookupConfig": { + "id": "ManagedZoneReverseLookupConfig", + "properties": { + "kind": { + "default": "dns#managedZoneReverseLookupConfig", + "type": "string" + } + }, + "type": "object" + }, "ManagedZonesListResponse": { "id": "ManagedZonesListResponse", "properties": { @@ -1504,7 +1573,7 @@ "type": "array" }, "nextPageToken": { - "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your page token.\n\nIn this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size.", + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your page token. In this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size.", "type": "string" } }, @@ -1524,7 +1593,6 @@ }, "kind": { "default": "dns#operation", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#operation\".", "type": "string" }, "startTime": { @@ -1534,8 +1602,8 @@ "status": { "description": "Status of the operation. Can be one of the following: \"PENDING\" or \"DONE\" (output only). A status of \"DONE\" means that the request to update the authoritative servers has been sent, but the servers might not be updated yet.", "enum": [ - "done", - "pending" + "pending", + "done" ], "enumDescriptions": [ "", @@ -1598,7 +1666,7 @@ "type": "string" }, "nextPageToken": { - "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your page token.\n\nIn this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size.", + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your page token. In this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size.", "type": "string" }, "policies": { @@ -1662,7 +1730,6 @@ }, "kind": { "default": "dns#policy", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#policy\".", "type": "string" }, "name": { @@ -1684,7 +1751,6 @@ "properties": { "kind": { "default": "dns#policyAlternativeNameServerConfig", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#policyAlternativeNameServerConfig\".", "type": "string" }, "targetNameServers": { @@ -1700,13 +1766,24 @@ "PolicyAlternativeNameServerConfigTargetNameServer": { "id": "PolicyAlternativeNameServerConfigTargetNameServer", "properties": { + "forwardingPath": { + "description": "Forwarding path for this TargetNameServer. If unset or set to DEFAULT, Cloud DNS will make forwarding decision based on address ranges, i.e. RFC1918 addresses go to the VPC, non-RFC1918 addresses go to the Internet. When set to PRIVATE, Cloud DNS will always send queries through VPC for this target.", + "enum": [ + "default", + "private" + ], + "enumDescriptions": [ + "Cloud DNS will make forwarding decision based on address ranges, i.e. RFC1918 addresses forward to the target through the VPC and non-RFC1918 addresses will forward to the target through the Internet", + "Cloud DNS will always forward to this target through the VPC." + ], + "type": "string" + }, "ipv4Address": { "description": "IPv4 address to forward to.", "type": "string" }, "kind": { "default": "dns#policyAlternativeNameServerConfigTargetNameServer", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#policyAlternativeNameServerConfigTargetNameServer\".", "type": "string" } }, @@ -1717,7 +1794,6 @@ "properties": { "kind": { "default": "dns#policyNetwork", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#policyNetwork\".", "type": "string" }, "networkUrl": { @@ -1737,7 +1813,6 @@ }, "kind": { "default": "dns#project", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#project\".", "type": "string" }, "number": { @@ -1763,7 +1838,6 @@ }, "kind": { "default": "dns#quota", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#quota\".", "type": "string" }, "managedZones": { @@ -1842,7 +1916,6 @@ "properties": { "kind": { "default": "dns#resourceRecordSet", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#resourceRecordSet\".", "type": "string" }, "name": { @@ -1887,7 +1960,7 @@ "type": "string" }, "nextPageToken": { - "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token.\n\nIn this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size.", + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token. In this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size.", "type": "string" }, "rrsets": { @@ -1912,7 +1985,7 @@ "type": "object" } }, - "servicePath": "dns/v1/projects/", - "title": "Google Cloud DNS API", + "servicePath": "", + "title": "Cloud DNS API", "version": "v1" } \ No newline at end of file diff --git a/vendor/google.golang.org/api/dns/v1/dns-gen.go b/vendor/google.golang.org/api/dns/v1/dns-gen.go index 7fe82e2ff4c..5d6a154ac8b 100644 --- a/vendor/google.golang.org/api/dns/v1/dns-gen.go +++ b/vendor/google.golang.org/api/dns/v1/dns-gen.go @@ -4,9 +4,9 @@ // Code generated file. DO NOT EDIT. -// Package dns provides access to the Google Cloud DNS API. +// Package dns provides access to the Cloud DNS API. // -// For product documentation, see: https://developers.google.com/cloud-dns +// For product documentation, see: http://developers.google.com/cloud-dns // // Creating a client // @@ -78,7 +78,8 @@ var _ = internaloption.WithDefaultEndpoint const apiId = "dns:v1" const apiName = "dns" const apiVersion = "v1" -const basePath = "https://dns.googleapis.com/dns/v1/projects/" +const basePath = "https://dns.googleapis.com/" +const mtlsBasePath = "https://dns.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -106,6 +107,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -252,8 +254,6 @@ type Change struct { // IsServing: If the DNS queries for the zone will be served. IsServing bool `json:"isServing,omitempty"` - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#change". Kind string `json:"kind,omitempty"` // StartTime: The time that this operation was started by the server @@ -262,11 +262,11 @@ type Change struct { // Status: Status of the operation (output only). A status of "done" // means that the request to update the authoritative servers has been - // sent, but the servers might not be updated yet. + // sent but the servers might not be updated yet. // // Possible values: - // "done" // "pending" + // "done" Status string `json:"status,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -310,14 +310,12 @@ type ChangesListResponse struct { // NextPageToken: The presence of this field indicates that there exist // more results following your last page of results in pagination order. // To fetch them, make another list request using this value as your - // pagination token. - // - // In this way you can retrieve the complete contents of even very large - // collections one page at a time. However, if the contents of the - // collection change between the first and last paginated list request, - // the set of all elements returned will be an inconsistent view of the - // collection. There is no way to retrieve a "snapshot" of collections - // larger than the maximum page size. + // pagination token. In this way you can retrieve the complete contents + // of even very large collections one page at a time. However, if the + // contents of the collection change between the first and last + // paginated list request, the set of all elements returned will be an + // inconsistent view of the collection. There is no way to retrieve a + // "snapshot" of collections larger than the maximum page size. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -353,11 +351,11 @@ type DnsKey struct { // key. Immutable after creation time. // // Possible values: - // "ecdsap256sha256" - // "ecdsap384sha384" // "rsasha1" // "rsasha256" // "rsasha512" + // "ecdsap256sha256" + // "ecdsap384sha384" Algorithm string `json:"algorithm,omitempty"` // CreationTime: The time that this resource was created in the control @@ -396,8 +394,6 @@ type DnsKey struct { // is specified in RFC4034 Appendix B. Output only. KeyTag int64 `json:"keyTag,omitempty"` - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#dnsKey". Kind string `json:"kind,omitempty"` // PublicKey: Base64 encoded public half of this key. Output only. @@ -486,11 +482,11 @@ type DnsKeySpec struct { // key. // // Possible values: - // "ecdsap256sha256" - // "ecdsap384sha384" // "rsasha1" // "rsasha256" // "rsasha512" + // "ecdsap256sha256" + // "ecdsap384sha384" Algorithm string `json:"algorithm,omitempty"` // KeyLength: Length of the keys in bits. @@ -508,8 +504,6 @@ type DnsKeySpec struct { // "zoneSigning" KeyType string `json:"keyType,omitempty"` - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#dnsKeySpec". Kind string `json:"kind,omitempty"` // ForceSendFields is a list of field names (e.g. "Algorithm") to @@ -549,14 +543,12 @@ type DnsKeysListResponse struct { // NextPageToken: The presence of this field indicates that there exist // more results following your last page of results in pagination order. // To fetch them, make another list request using this value as your - // pagination token. - // - // In this way you can retrieve the complete contents of even very large - // collections one page at a time. However, if the contents of the - // collection change between the first and last paginated list request, - // the set of all elements returned will be an inconsistent view of the - // collection. There is no way to retrieve a "snapshot" of collections - // larger than the maximum page size. + // pagination token. In this way you can retrieve the complete contents + // of even very large collections one page at a time. However, if the + // contents of the collection change between the first and last + // paginated list request, the set of all elements returned will be an + // inconsistent view of the collection. There is no way to retrieve a + // "snapshot" of collections larger than the maximum page size. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -615,8 +607,6 @@ type ManagedZone struct { // only) Id uint64 `json:"id,omitempty,string"` - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#managedZone". Kind string `json:"kind,omitempty"` // Labels: User labels. @@ -646,13 +636,20 @@ type ManagedZone struct { // Virtual Private Cloud resources that the zone is visible from. PrivateVisibilityConfig *ManagedZonePrivateVisibilityConfig `json:"privateVisibilityConfig,omitempty"` + // ReverseLookupConfig: The presence of this field indicates that this + // is a managed reverse lookup zone and Cloud DNS will resolve reverse + // lookup queries using automatically configured records for VPC + // resources. This only applies to networks listed under + // private_visibility_config. + ReverseLookupConfig *ManagedZoneReverseLookupConfig `json:"reverseLookupConfig,omitempty"` + // Visibility: The zone's visibility: public zones are exposed to the // Internet, while private zones are visible only to Virtual Private // Cloud resources. // // Possible values: - // "private" // "public" + // "private" Visibility string `json:"visibility,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -687,8 +684,6 @@ type ManagedZoneDnsSecConfig struct { // for this ManagedZone. Can only be changed while the state is OFF. DefaultKeySpecs []*DnsKeySpec `json:"defaultKeySpecs,omitempty"` - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#managedZoneDnsSecConfig". Kind string `json:"kind,omitempty"` // NonExistence: Specifies the mechanism for authenticated @@ -703,9 +698,9 @@ type ManagedZoneDnsSecConfig struct { // State: Specifies whether DNSSEC is enabled, and what mode it is in. // // Possible values: - // "off" - // "on" - // "transfer" + // "off" - DNSSEC is disabled; the zone is not signed. + // "on" - DNSSEC is enabled; the zone is signed and fully managed. + // "transfer" - DNSSEC is enabled, but in a "transfer" mode. State string `json:"state,omitempty"` // ForceSendFields is a list of field names (e.g. "DefaultKeySpecs") to @@ -733,8 +728,6 @@ func (s *ManagedZoneDnsSecConfig) MarshalJSON() ([]byte, error) { } type ManagedZoneForwardingConfig struct { - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#managedZoneForwardingConfig". Kind string `json:"kind,omitempty"` // TargetNameServers: List of target name servers to forward to. Cloud @@ -766,14 +759,27 @@ func (s *ManagedZoneForwardingConfig) MarshalJSON() ([]byte, error) { } type ManagedZoneForwardingConfigNameServerTarget struct { + // ForwardingPath: Forwarding path for this NameServerTarget. If unset + // or set to DEFAULT, Cloud DNS will make forwarding decision based on + // address ranges, i.e. RFC1918 addresses go to the VPC, non-RFC1918 + // addresses go to the Internet. When set to PRIVATE, Cloud DNS will + // always send queries through VPC for this target. + // + // Possible values: + // "default" - Cloud DNS will make forwarding decision based on + // address ranges, i.e. RFC1918 addresses forward to the target through + // the VPC and non-RFC1918 addresses will forward to the target through + // the Internet + // "private" - Cloud DNS will always forward to this target through + // the VPC. + ForwardingPath string `json:"forwardingPath,omitempty"` + // Ipv4Address: IPv4 address of a target name server. Ipv4Address string `json:"ipv4Address,omitempty"` - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#managedZoneForwardingConfigNameServerTarget". Kind string `json:"kind,omitempty"` - // ForceSendFields is a list of field names (e.g. "Ipv4Address") to + // ForceSendFields is a list of field names (e.g. "ForwardingPath") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -781,12 +787,13 @@ type ManagedZoneForwardingConfigNameServerTarget struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Ipv4Address") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "ForwardingPath") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -799,20 +806,18 @@ func (s *ManagedZoneForwardingConfigNameServerTarget) MarshalJSON() ([]byte, err type ManagedZoneOperationsListResponse struct { Header *ResponseHeader `json:"header,omitempty"` - // Kind: Type of resource. Kind string `json:"kind,omitempty"` // NextPageToken: The presence of this field indicates that there exist // more results following your last page of results in pagination order. // To fetch them, make another list request using this value as your - // page token. - // - // In this way you can retrieve the complete contents of even very large - // collections one page at a time. However, if the contents of the - // collection change between the first and last paginated list request, - // the set of all elements returned will be an inconsistent view of the - // collection. There is no way to retrieve a consistent snapshot of a - // collection larger than the maximum page size. + // page token. In this way you can retrieve the complete contents of + // even very large collections one page at a time. However, if the + // contents of the collection change between the first and last + // paginated list request, the set of all elements returned will be an + // inconsistent view of the collection. There is no way to retrieve a + // consistent snapshot of a collection larger than the maximum page + // size. NextPageToken string `json:"nextPageToken,omitempty"` // Operations: The operation resources. @@ -846,8 +851,6 @@ func (s *ManagedZoneOperationsListResponse) MarshalJSON() ([]byte, error) { } type ManagedZonePeeringConfig struct { - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#managedZonePeeringConfig". Kind string `json:"kind,omitempty"` // TargetNetwork: The network with which to peer. @@ -884,8 +887,6 @@ type ManagedZonePeeringConfigTargetNetwork struct { // zone targeted is deleted. Output only. DeactivateTime string `json:"deactivateTime,omitempty"` - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#managedZonePeeringConfigTargetNetwork". Kind string `json:"kind,omitempty"` // NetworkUrl: The fully qualified URL of the VPC network to forward @@ -918,8 +919,6 @@ func (s *ManagedZonePeeringConfigTargetNetwork) MarshalJSON() ([]byte, error) { } type ManagedZonePrivateVisibilityConfig struct { - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#managedZonePrivateVisibilityConfig". Kind string `json:"kind,omitempty"` // Networks: The list of VPC networks that can see this zone. @@ -949,8 +948,6 @@ func (s *ManagedZonePrivateVisibilityConfig) MarshalJSON() ([]byte, error) { } type ManagedZonePrivateVisibilityConfigNetwork struct { - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#managedZonePrivateVisibilityConfigNetwork". Kind string `json:"kind,omitempty"` // NetworkUrl: The fully qualified URL of the VPC network to bind to. @@ -981,6 +978,32 @@ func (s *ManagedZonePrivateVisibilityConfigNetwork) MarshalJSON() ([]byte, error return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type ManagedZoneReverseLookupConfig struct { + Kind string `json:"kind,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ManagedZoneReverseLookupConfig) MarshalJSON() ([]byte, error) { + type NoMethod ManagedZoneReverseLookupConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type ManagedZonesListResponse struct { Header *ResponseHeader `json:"header,omitempty"` @@ -993,14 +1016,13 @@ type ManagedZonesListResponse struct { // NextPageToken: The presence of this field indicates that there exist // more results following your last page of results in pagination order. // To fetch them, make another list request using this value as your - // page token. - // - // In this way you can retrieve the complete contents of even very large - // collections one page at a time. However, if the contents of the - // collection change between the first and last paginated list request, - // the set of all elements returned will be an inconsistent view of the - // collection. There is no way to retrieve a consistent snapshot of a - // collection larger than the maximum page size. + // page token. In this way you can retrieve the complete contents of + // even very large collections one page at a time. However, if the + // contents of the collection change between the first and last + // paginated list request, the set of all elements returned will be an + // inconsistent view of the collection. There is no way to retrieve a + // consistent snapshot of a collection larger than the maximum page + // size. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1047,8 +1069,6 @@ type Operation struct { // (output only) Id string `json:"id,omitempty"` - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#operation". Kind string `json:"kind,omitempty"` // StartTime: The time that this operation was started by the server. @@ -1061,8 +1081,8 @@ type Operation struct { // servers might not be updated yet. // // Possible values: - // "done" // "pending" + // "done" Status string `json:"status,omitempty"` // Type: Type of the operation. Operations include insert, update, and @@ -1174,14 +1194,13 @@ type PoliciesListResponse struct { // NextPageToken: The presence of this field indicates that there exist // more results following your last page of results in pagination order. // To fetch them, make another list request using this value as your - // page token. - // - // In this way you can retrieve the complete contents of even very large - // collections one page at a time. However, if the contents of the - // collection change between the first and last paginated list request, - // the set of all elements returned will be an inconsistent view of the - // collection. There is no way to retrieve a consistent snapshot of a - // collection larger than the maximum page size. + // page token. In this way you can retrieve the complete contents of + // even very large collections one page at a time. However, if the + // contents of the collection change between the first and last + // paginated list request, the set of all elements returned will be an + // inconsistent view of the collection. There is no way to retrieve a + // consistent snapshot of a collection larger than the maximum page + // size. NextPageToken string `json:"nextPageToken,omitempty"` // Policies: The policy resources. @@ -1306,8 +1325,6 @@ type Policy struct { // only). Id uint64 `json:"id,omitempty,string"` - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#policy". Kind string `json:"kind,omitempty"` // Name: User assigned name for this policy. @@ -1347,8 +1364,6 @@ func (s *Policy) MarshalJSON() ([]byte, error) { } type PolicyAlternativeNameServerConfig struct { - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#policyAlternativeNameServerConfig". Kind string `json:"kind,omitempty"` // TargetNameServers: Sets an alternative name server for the associated @@ -1381,14 +1396,27 @@ func (s *PolicyAlternativeNameServerConfig) MarshalJSON() ([]byte, error) { } type PolicyAlternativeNameServerConfigTargetNameServer struct { + // ForwardingPath: Forwarding path for this TargetNameServer. If unset + // or set to DEFAULT, Cloud DNS will make forwarding decision based on + // address ranges, i.e. RFC1918 addresses go to the VPC, non-RFC1918 + // addresses go to the Internet. When set to PRIVATE, Cloud DNS will + // always send queries through VPC for this target. + // + // Possible values: + // "default" - Cloud DNS will make forwarding decision based on + // address ranges, i.e. RFC1918 addresses forward to the target through + // the VPC and non-RFC1918 addresses will forward to the target through + // the Internet + // "private" - Cloud DNS will always forward to this target through + // the VPC. + ForwardingPath string `json:"forwardingPath,omitempty"` + // Ipv4Address: IPv4 address to forward to. Ipv4Address string `json:"ipv4Address,omitempty"` - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#policyAlternativeNameServerConfigTargetNameServer". Kind string `json:"kind,omitempty"` - // ForceSendFields is a list of field names (e.g. "Ipv4Address") to + // ForceSendFields is a list of field names (e.g. "ForwardingPath") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -1396,12 +1424,13 @@ type PolicyAlternativeNameServerConfigTargetNameServer struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Ipv4Address") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "ForwardingPath") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -1412,8 +1441,6 @@ func (s *PolicyAlternativeNameServerConfigTargetNameServer) MarshalJSON() ([]byt } type PolicyNetwork struct { - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#policyNetwork". Kind string `json:"kind,omitempty"` // NetworkUrl: The fully qualified URL of the VPC network to bind to. @@ -1451,8 +1478,6 @@ type Project struct { // Id: User assigned unique identifier for the resource (output only). Id string `json:"id,omitempty"` - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#project". Kind string `json:"kind,omitempty"` // Number: Unique numeric identifier for the resource; defined by the @@ -1495,8 +1520,6 @@ type Quota struct { // ManagedZone. DnsKeysPerManagedZone int64 `json:"dnsKeysPerManagedZone,omitempty"` - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#quota". Kind string `json:"kind,omitempty"` // ManagedZones: Maximum allowed number of managed zones in the project. @@ -1576,8 +1599,6 @@ func (s *Quota) MarshalJSON() ([]byte, error) { // ResourceRecordSet: A unit of data that will be returned by the DNS // servers. type ResourceRecordSet struct { - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#resourceRecordSet". Kind string `json:"kind,omitempty"` // Name: For example, www.example.com. @@ -1630,14 +1651,13 @@ type ResourceRecordSetsListResponse struct { // NextPageToken: The presence of this field indicates that there exist // more results following your last page of results in pagination order. // To fetch them, make another list request using this value as your - // pagination token. - // - // In this way you can retrieve the complete contents of even very large - // collections one page at a time. However, if the contents of the - // collection change between the first and last paginated list request, - // the set of all elements returned will be an inconsistent view of the - // collection. There is no way to retrieve a consistent snapshot of a - // collection larger than the maximum page size. + // pagination token. In this way you can retrieve the complete contents + // of even very large collections one page at a time. However, if the + // contents of the collection change between the first and last + // paginated list request, the set of all elements returned will be an + // inconsistent view of the collection. There is no way to retrieve a + // consistent snapshot of a collection larger than the maximum page + // size. NextPageToken string `json:"nextPageToken,omitempty"` // Rrsets: The resource record set resources. @@ -1757,7 +1777,7 @@ func (c *ChangesCreateCall) Header() http.Header { func (c *ChangesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1770,7 +1790,7 @@ func (c *ChangesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/changes") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/changes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -1823,6 +1843,7 @@ func (c *ChangesCreateCall) Do(opts ...googleapi.CallOption) (*Change, error) { return ret, nil // { // "description": "Atomically update the ResourceRecordSet collection.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/changes", // "httpMethod": "POST", // "id": "dns.changes.create", // "parameterOrder": [ @@ -1836,7 +1857,7 @@ func (c *ChangesCreateCall) Do(opts ...googleapi.CallOption) (*Change, error) { // "type": "string" // }, // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", // "location": "path", // "required": true, // "type": "string" @@ -1848,7 +1869,7 @@ func (c *ChangesCreateCall) Do(opts ...googleapi.CallOption) (*Change, error) { // "type": "string" // } // }, - // "path": "{project}/managedZones/{managedZone}/changes", + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/changes", // "request": { // "$ref": "Change" // }, @@ -1931,7 +1952,7 @@ func (c *ChangesGetCall) Header() http.Header { func (c *ChangesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1942,7 +1963,7 @@ func (c *ChangesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/changes/{changeId}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/changes/{changeId}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -1996,6 +2017,7 @@ func (c *ChangesGetCall) Do(opts ...googleapi.CallOption) (*Change, error) { return ret, nil // { // "description": "Fetch the representation of an existing Change.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/changes/{changeId}", // "httpMethod": "GET", // "id": "dns.changes.get", // "parameterOrder": [ @@ -2016,7 +2038,7 @@ func (c *ChangesGetCall) Do(opts ...googleapi.CallOption) (*Change, error) { // "type": "string" // }, // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", // "location": "path", // "required": true, // "type": "string" @@ -2028,7 +2050,7 @@ func (c *ChangesGetCall) Do(opts ...googleapi.CallOption) (*Change, error) { // "type": "string" // } // }, - // "path": "{project}/managedZones/{managedZone}/changes/{changeId}", + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/changes/{changeId}", // "response": { // "$ref": "Change" // }, @@ -2132,7 +2154,7 @@ func (c *ChangesListCall) Header() http.Header { func (c *ChangesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2143,7 +2165,7 @@ func (c *ChangesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/changes") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/changes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -2196,6 +2218,7 @@ func (c *ChangesListCall) Do(opts ...googleapi.CallOption) (*ChangesListResponse return ret, nil // { // "description": "Enumerate Changes to a ResourceRecordSet collection.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/changes", // "httpMethod": "GET", // "id": "dns.changes.list", // "parameterOrder": [ @@ -2204,7 +2227,7 @@ func (c *ChangesListCall) Do(opts ...googleapi.CallOption) (*ChangesListResponse // ], // "parameters": { // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", // "location": "path", // "required": true, // "type": "string" @@ -2244,7 +2267,7 @@ func (c *ChangesListCall) Do(opts ...googleapi.CallOption) (*ChangesListResponse // "type": "string" // } // }, - // "path": "{project}/managedZones/{managedZone}/changes", + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/changes", // "response": { // "$ref": "ChangesListResponse" // }, @@ -2356,7 +2379,7 @@ func (c *DnsKeysGetCall) Header() http.Header { func (c *DnsKeysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2367,7 +2390,7 @@ func (c *DnsKeysGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -2421,6 +2444,7 @@ func (c *DnsKeysGetCall) Do(opts ...googleapi.CallOption) (*DnsKey, error) { return ret, nil // { // "description": "Fetch the representation of an existing DnsKey.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}", // "httpMethod": "GET", // "id": "dns.dnsKeys.get", // "parameterOrder": [ @@ -2446,7 +2470,7 @@ func (c *DnsKeysGetCall) Do(opts ...googleapi.CallOption) (*DnsKey, error) { // "type": "string" // }, // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", // "location": "path", // "required": true, // "type": "string" @@ -2458,7 +2482,7 @@ func (c *DnsKeysGetCall) Do(opts ...googleapi.CallOption) (*DnsKey, error) { // "type": "string" // } // }, - // "path": "{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}", + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}", // "response": { // "$ref": "DnsKey" // }, @@ -2554,7 +2578,7 @@ func (c *DnsKeysListCall) Header() http.Header { func (c *DnsKeysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2565,7 +2589,7 @@ func (c *DnsKeysListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/dnsKeys") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -2618,6 +2642,7 @@ func (c *DnsKeysListCall) Do(opts ...googleapi.CallOption) (*DnsKeysListResponse return ret, nil // { // "description": "Enumerate DnsKeys to a ResourceRecordSet collection.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys", // "httpMethod": "GET", // "id": "dns.dnsKeys.list", // "parameterOrder": [ @@ -2631,7 +2656,7 @@ func (c *DnsKeysListCall) Do(opts ...googleapi.CallOption) (*DnsKeysListResponse // "type": "string" // }, // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", // "location": "path", // "required": true, // "type": "string" @@ -2654,7 +2679,7 @@ func (c *DnsKeysListCall) Do(opts ...googleapi.CallOption) (*DnsKeysListResponse // "type": "string" // } // }, - // "path": "{project}/managedZones/{managedZone}/dnsKeys", + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys", // "response": { // "$ref": "DnsKeysListResponse" // }, @@ -2757,7 +2782,7 @@ func (c *ManagedZoneOperationsGetCall) Header() http.Header { func (c *ManagedZoneOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2768,7 +2793,7 @@ func (c *ManagedZoneOperationsGetCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -2822,6 +2847,7 @@ func (c *ManagedZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operat return ret, nil // { // "description": "Fetch the representation of an existing Operation.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/operations/{operation}", // "httpMethod": "GET", // "id": "dns.managedZoneOperations.get", // "parameterOrder": [ @@ -2854,7 +2880,7 @@ func (c *ManagedZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operat // "type": "string" // } // }, - // "path": "{project}/managedZones/{managedZone}/operations/{operation}", + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/operations/{operation}", // "response": { // "$ref": "Operation" // }, @@ -2908,8 +2934,8 @@ func (c *ManagedZoneOperationsListCall) PageToken(pageToken string) *ManagedZone // only supported values are START_TIME and ID. // // Possible values: -// "id" // "startTime" (default) +// "id" func (c *ManagedZoneOperationsListCall) SortBy(sortBy string) *ManagedZoneOperationsListCall { c.urlParams_.Set("sortBy", sortBy) return c @@ -2952,7 +2978,7 @@ func (c *ManagedZoneOperationsListCall) Header() http.Header { func (c *ManagedZoneOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2963,7 +2989,7 @@ func (c *ManagedZoneOperationsListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/operations") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/operations") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -3017,6 +3043,7 @@ func (c *ManagedZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*Manag return ret, nil // { // "description": "Enumerate Operations for the given ManagedZone.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/operations", // "httpMethod": "GET", // "id": "dns.managedZoneOperations.list", // "parameterOrder": [ @@ -3051,8 +3078,8 @@ func (c *ManagedZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*Manag // "default": "startTime", // "description": "Sorting criterion. The only supported values are START_TIME and ID.", // "enum": [ - // "id", - // "startTime" + // "startTime", + // "id" // ], // "enumDescriptions": [ // "", @@ -3062,7 +3089,7 @@ func (c *ManagedZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*Manag // "type": "string" // } // }, - // "path": "{project}/managedZones/{managedZone}/operations", + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/operations", // "response": { // "$ref": "ManagedZoneOperationsListResponse" // }, @@ -3152,7 +3179,7 @@ func (c *ManagedZonesCreateCall) Header() http.Header { func (c *ManagedZonesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3165,7 +3192,7 @@ func (c *ManagedZonesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -3217,6 +3244,7 @@ func (c *ManagedZonesCreateCall) Do(opts ...googleapi.CallOption) (*ManagedZone, return ret, nil // { // "description": "Create a new ManagedZone.", + // "flatPath": "dns/v1/projects/{project}/managedZones", // "httpMethod": "POST", // "id": "dns.managedZones.create", // "parameterOrder": [ @@ -3235,7 +3263,7 @@ func (c *ManagedZonesCreateCall) Do(opts ...googleapi.CallOption) (*ManagedZone, // "type": "string" // } // }, - // "path": "{project}/managedZones", + // "path": "dns/v1/projects/{project}/managedZones", // "request": { // "$ref": "ManagedZone" // }, @@ -3305,7 +3333,7 @@ func (c *ManagedZonesDeleteCall) Header() http.Header { func (c *ManagedZonesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3313,7 +3341,7 @@ func (c *ManagedZonesDeleteCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -3341,6 +3369,7 @@ func (c *ManagedZonesDeleteCall) Do(opts ...googleapi.CallOption) error { return nil // { // "description": "Delete a previously created ManagedZone.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}", // "httpMethod": "DELETE", // "id": "dns.managedZones.delete", // "parameterOrder": [ @@ -3354,7 +3383,7 @@ func (c *ManagedZonesDeleteCall) Do(opts ...googleapi.CallOption) error { // "type": "string" // }, // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", // "location": "path", // "required": true, // "type": "string" @@ -3366,7 +3395,7 @@ func (c *ManagedZonesDeleteCall) Do(opts ...googleapi.CallOption) error { // "type": "string" // } // }, - // "path": "{project}/managedZones/{managedZone}", + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" @@ -3441,7 +3470,7 @@ func (c *ManagedZonesGetCall) Header() http.Header { func (c *ManagedZonesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3452,7 +3481,7 @@ func (c *ManagedZonesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -3505,6 +3534,7 @@ func (c *ManagedZonesGetCall) Do(opts ...googleapi.CallOption) (*ManagedZone, er return ret, nil // { // "description": "Fetch the representation of an existing ManagedZone.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}", // "httpMethod": "GET", // "id": "dns.managedZones.get", // "parameterOrder": [ @@ -3518,7 +3548,7 @@ func (c *ManagedZonesGetCall) Do(opts ...googleapi.CallOption) (*ManagedZone, er // "type": "string" // }, // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", // "location": "path", // "required": true, // "type": "string" @@ -3530,7 +3560,7 @@ func (c *ManagedZonesGetCall) Do(opts ...googleapi.CallOption) (*ManagedZone, er // "type": "string" // } // }, - // "path": "{project}/managedZones/{managedZone}", + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}", // "response": { // "$ref": "ManagedZone" // }, @@ -3623,7 +3653,7 @@ func (c *ManagedZonesListCall) Header() http.Header { func (c *ManagedZonesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3634,7 +3664,7 @@ func (c *ManagedZonesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -3686,6 +3716,7 @@ func (c *ManagedZonesListCall) Do(opts ...googleapi.CallOption) (*ManagedZonesLi return ret, nil // { // "description": "Enumerate ManagedZones that have been created but not yet deleted.", + // "flatPath": "dns/v1/projects/{project}/managedZones", // "httpMethod": "GET", // "id": "dns.managedZones.list", // "parameterOrder": [ @@ -3715,7 +3746,7 @@ func (c *ManagedZonesListCall) Do(opts ...googleapi.CallOption) (*ManagedZonesLi // "type": "string" // } // }, - // "path": "{project}/managedZones", + // "path": "dns/v1/projects/{project}/managedZones", // "response": { // "$ref": "ManagedZonesListResponse" // }, @@ -3807,7 +3838,7 @@ func (c *ManagedZonesPatchCall) Header() http.Header { func (c *ManagedZonesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3820,7 +3851,7 @@ func (c *ManagedZonesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { @@ -3873,6 +3904,7 @@ func (c *ManagedZonesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er return ret, nil // { // "description": "Apply a partial update to an existing ManagedZone.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}", // "httpMethod": "PATCH", // "id": "dns.managedZones.patch", // "parameterOrder": [ @@ -3886,7 +3918,7 @@ func (c *ManagedZonesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er // "type": "string" // }, // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", // "location": "path", // "required": true, // "type": "string" @@ -3898,7 +3930,7 @@ func (c *ManagedZonesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er // "type": "string" // } // }, - // "path": "{project}/managedZones/{managedZone}", + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}", // "request": { // "$ref": "ManagedZone" // }, @@ -3970,7 +4002,7 @@ func (c *ManagedZonesUpdateCall) Header() http.Header { func (c *ManagedZonesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3983,7 +4015,7 @@ func (c *ManagedZonesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PUT", urls, body) if err != nil { @@ -4036,6 +4068,7 @@ func (c *ManagedZonesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e return ret, nil // { // "description": "Update an existing ManagedZone.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}", // "httpMethod": "PUT", // "id": "dns.managedZones.update", // "parameterOrder": [ @@ -4049,7 +4082,7 @@ func (c *ManagedZonesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e // "type": "string" // }, // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", // "location": "path", // "required": true, // "type": "string" @@ -4061,7 +4094,7 @@ func (c *ManagedZonesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e // "type": "string" // } // }, - // "path": "{project}/managedZones/{managedZone}", + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}", // "request": { // "$ref": "ManagedZone" // }, @@ -4131,7 +4164,7 @@ func (c *PoliciesCreateCall) Header() http.Header { func (c *PoliciesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4144,7 +4177,7 @@ func (c *PoliciesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/policies") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/policies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -4196,6 +4229,7 @@ func (c *PoliciesCreateCall) Do(opts ...googleapi.CallOption) (*Policy, error) { return ret, nil // { // "description": "Create a new Policy", + // "flatPath": "dns/v1/projects/{project}/policies", // "httpMethod": "POST", // "id": "dns.policies.create", // "parameterOrder": [ @@ -4214,7 +4248,7 @@ func (c *PoliciesCreateCall) Do(opts ...googleapi.CallOption) (*Policy, error) { // "type": "string" // } // }, - // "path": "{project}/policies", + // "path": "dns/v1/projects/{project}/policies", // "request": { // "$ref": "Policy" // }, @@ -4285,7 +4319,7 @@ func (c *PoliciesDeleteCall) Header() http.Header { func (c *PoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4293,7 +4327,7 @@ func (c *PoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/policies/{policy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/policies/{policy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -4321,6 +4355,7 @@ func (c *PoliciesDeleteCall) Do(opts ...googleapi.CallOption) error { return nil // { // "description": "Delete a previously created Policy. Will fail if the policy is still being referenced by a network.", + // "flatPath": "dns/v1/projects/{project}/policies/{policy}", // "httpMethod": "DELETE", // "id": "dns.policies.delete", // "parameterOrder": [ @@ -4346,7 +4381,7 @@ func (c *PoliciesDeleteCall) Do(opts ...googleapi.CallOption) error { // "type": "string" // } // }, - // "path": "{project}/policies/{policy}", + // "path": "dns/v1/projects/{project}/policies/{policy}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" @@ -4421,7 +4456,7 @@ func (c *PoliciesGetCall) Header() http.Header { func (c *PoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4432,7 +4467,7 @@ func (c *PoliciesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/policies/{policy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/policies/{policy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -4485,6 +4520,7 @@ func (c *PoliciesGetCall) Do(opts ...googleapi.CallOption) (*Policy, error) { return ret, nil // { // "description": "Fetch the representation of an existing Policy.", + // "flatPath": "dns/v1/projects/{project}/policies/{policy}", // "httpMethod": "GET", // "id": "dns.policies.get", // "parameterOrder": [ @@ -4510,7 +4546,7 @@ func (c *PoliciesGetCall) Do(opts ...googleapi.CallOption) (*Policy, error) { // "type": "string" // } // }, - // "path": "{project}/policies/{policy}", + // "path": "dns/v1/projects/{project}/policies/{policy}", // "response": { // "$ref": "Policy" // }, @@ -4595,7 +4631,7 @@ func (c *PoliciesListCall) Header() http.Header { func (c *PoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4606,7 +4642,7 @@ func (c *PoliciesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/policies") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/policies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -4658,6 +4694,7 @@ func (c *PoliciesListCall) Do(opts ...googleapi.CallOption) (*PoliciesListRespon return ret, nil // { // "description": "Enumerate all Policies associated with a project.", + // "flatPath": "dns/v1/projects/{project}/policies", // "httpMethod": "GET", // "id": "dns.policies.list", // "parameterOrder": [ @@ -4682,7 +4719,7 @@ func (c *PoliciesListCall) Do(opts ...googleapi.CallOption) (*PoliciesListRespon // "type": "string" // } // }, - // "path": "{project}/policies", + // "path": "dns/v1/projects/{project}/policies", // "response": { // "$ref": "PoliciesListResponse" // }, @@ -4774,7 +4811,7 @@ func (c *PoliciesPatchCall) Header() http.Header { func (c *PoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4787,7 +4824,7 @@ func (c *PoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/policies/{policy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/policies/{policy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { @@ -4840,6 +4877,7 @@ func (c *PoliciesPatchCall) Do(opts ...googleapi.CallOption) (*PoliciesPatchResp return ret, nil // { // "description": "Apply a partial update to an existing Policy.", + // "flatPath": "dns/v1/projects/{project}/policies/{policy}", // "httpMethod": "PATCH", // "id": "dns.policies.patch", // "parameterOrder": [ @@ -4865,7 +4903,7 @@ func (c *PoliciesPatchCall) Do(opts ...googleapi.CallOption) (*PoliciesPatchResp // "type": "string" // } // }, - // "path": "{project}/policies/{policy}", + // "path": "dns/v1/projects/{project}/policies/{policy}", // "request": { // "$ref": "Policy" // }, @@ -4937,7 +4975,7 @@ func (c *PoliciesUpdateCall) Header() http.Header { func (c *PoliciesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4950,7 +4988,7 @@ func (c *PoliciesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/policies/{policy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/policies/{policy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PUT", urls, body) if err != nil { @@ -5003,6 +5041,7 @@ func (c *PoliciesUpdateCall) Do(opts ...googleapi.CallOption) (*PoliciesUpdateRe return ret, nil // { // "description": "Update an existing Policy.", + // "flatPath": "dns/v1/projects/{project}/policies/{policy}", // "httpMethod": "PUT", // "id": "dns.policies.update", // "parameterOrder": [ @@ -5028,7 +5067,7 @@ func (c *PoliciesUpdateCall) Do(opts ...googleapi.CallOption) (*PoliciesUpdateRe // "type": "string" // } // }, - // "path": "{project}/policies/{policy}", + // "path": "dns/v1/projects/{project}/policies/{policy}", // "request": { // "$ref": "Policy" // }, @@ -5107,7 +5146,7 @@ func (c *ProjectsGetCall) Header() http.Header { func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5118,7 +5157,7 @@ func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -5170,6 +5209,7 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { return ret, nil // { // "description": "Fetch the representation of an existing Project.", + // "flatPath": "dns/v1/projects/{project}", // "httpMethod": "GET", // "id": "dns.projects.get", // "parameterOrder": [ @@ -5188,7 +5228,7 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { // "type": "string" // } // }, - // "path": "{project}", + // "path": "dns/v1/projects/{project}", // "response": { // "$ref": "Project" // }, @@ -5291,7 +5331,7 @@ func (c *ResourceRecordSetsListCall) Header() http.Header { func (c *ResourceRecordSetsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5302,7 +5342,7 @@ func (c *ResourceRecordSetsListCall) doRequest(alt string) (*http.Response, erro var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/rrsets") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -5355,6 +5395,7 @@ func (c *ResourceRecordSetsListCall) Do(opts ...googleapi.CallOption) (*Resource return ret, nil // { // "description": "Enumerate ResourceRecordSets that have been created but not yet deleted.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets", // "httpMethod": "GET", // "id": "dns.resourceRecordSets.list", // "parameterOrder": [ @@ -5363,7 +5404,7 @@ func (c *ResourceRecordSetsListCall) Do(opts ...googleapi.CallOption) (*Resource // ], // "parameters": { // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", // "location": "path", // "required": true, // "type": "string" @@ -5396,7 +5437,7 @@ func (c *ResourceRecordSetsListCall) Do(opts ...googleapi.CallOption) (*Resource // "type": "string" // } // }, - // "path": "{project}/managedZones/{managedZone}/rrsets", + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets", // "response": { // "$ref": "ResourceRecordSetsListResponse" // }, diff --git a/vendor/google.golang.org/api/dns/v1beta2/dns-api.json b/vendor/google.golang.org/api/dns/v1beta2/dns-api.json index 3fa035fc06e..c45ab9f0e50 100644 --- a/vendor/google.golang.org/api/dns/v1beta2/dns-api.json +++ b/vendor/google.golang.org/api/dns/v1beta2/dns-api.json @@ -17,35 +17,64 @@ } } }, - "basePath": "/dns/v1beta2/projects/", - "baseUrl": "https://dns.googleapis.com/dns/v1beta2/projects/", - "batchPath": "batch/dns/v1beta2", - "description": "Configures and serves authoritative DNS records.", + "basePath": "", + "baseUrl": "https://dns.googleapis.com/", + "batchPath": "batch", + "canonicalName": "Dns", + "description": "", "discoveryVersion": "v1", - "documentationLink": "https://developers.google.com/cloud-dns", - "etag": "\"F5McR9eEaw0XRpaO3M9gbIugkbs/keAz3lpqQqjiyJjBvJRqliew5V8\"", + "documentationLink": "http://developers.google.com/cloud-dns", + "fullyEncodeReservedExpansion": true, "icons": { - "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", - "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" }, "id": "dns:v1beta2", "kind": "discovery#restDescription", + "mtlsRootUrl": "https://dns.mtls.googleapis.com/", "name": "dns", "ownerDomain": "google.com", "ownerName": "Google", "parameters": { + "$.xgafv": { + "description": "V1 error format.", + "enum": [ + "1", + "2" + ], + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "type": "string" + }, + "access_token": { + "description": "OAuth access token.", + "location": "query", + "type": "string" + }, "alt": { "default": "json", - "description": "Data format for the response.", + "description": "Data format for response.", "enum": [ - "json" + "json", + "media", + "proto" ], "enumDescriptions": [ - "Responses with Content-Type of application/json" + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" ], "location": "query", "type": "string" }, + "callback": { + "description": "JSONP", + "location": "query", + "type": "string" + }, "fields": { "description": "Selector specifying which fields to include in a partial response.", "location": "query", @@ -68,12 +97,17 @@ "type": "boolean" }, "quotaUser": { - "description": "An opaque string that represents a user for quota purposes. Must not exceed 40 characters.", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", "location": "query", "type": "string" }, - "userIp": { - "description": "Deprecated. Please use quotaUser instead.", + "uploadType": { + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "location": "query", + "type": "string" + }, + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", "location": "query", "type": "string" } @@ -84,6 +118,7 @@ "methods": { "create": { "description": "Atomically update the ResourceRecordSet collection.", + "flatPath": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/changes", "httpMethod": "POST", "id": "dns.changes.create", "parameterOrder": [ @@ -97,7 +132,7 @@ "type": "string" }, "managedZone": { - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", "location": "path", "required": true, "type": "string" @@ -109,7 +144,7 @@ "type": "string" } }, - "path": "{project}/managedZones/{managedZone}/changes", + "path": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/changes", "request": { "$ref": "Change" }, @@ -123,6 +158,7 @@ }, "get": { "description": "Fetch the representation of an existing Change.", + "flatPath": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/changes/{changeId}", "httpMethod": "GET", "id": "dns.changes.get", "parameterOrder": [ @@ -143,7 +179,7 @@ "type": "string" }, "managedZone": { - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", "location": "path", "required": true, "type": "string" @@ -155,7 +191,7 @@ "type": "string" } }, - "path": "{project}/managedZones/{managedZone}/changes/{changeId}", + "path": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/changes/{changeId}", "response": { "$ref": "Change" }, @@ -168,6 +204,7 @@ }, "list": { "description": "Enumerate Changes to a ResourceRecordSet collection.", + "flatPath": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/changes", "httpMethod": "GET", "id": "dns.changes.list", "parameterOrder": [ @@ -176,7 +213,7 @@ ], "parameters": { "managedZone": { - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", "location": "path", "required": true, "type": "string" @@ -216,7 +253,7 @@ "type": "string" } }, - "path": "{project}/managedZones/{managedZone}/changes", + "path": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/changes", "response": { "$ref": "ChangesListResponse" }, @@ -233,6 +270,7 @@ "methods": { "get": { "description": "Fetch the representation of an existing DnsKey.", + "flatPath": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}", "httpMethod": "GET", "id": "dns.dnsKeys.get", "parameterOrder": [ @@ -258,7 +296,7 @@ "type": "string" }, "managedZone": { - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", "location": "path", "required": true, "type": "string" @@ -270,7 +308,7 @@ "type": "string" } }, - "path": "{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}", + "path": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}", "response": { "$ref": "DnsKey" }, @@ -283,6 +321,7 @@ }, "list": { "description": "Enumerate DnsKeys to a ResourceRecordSet collection.", + "flatPath": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/dnsKeys", "httpMethod": "GET", "id": "dns.dnsKeys.list", "parameterOrder": [ @@ -296,7 +335,7 @@ "type": "string" }, "managedZone": { - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", "location": "path", "required": true, "type": "string" @@ -319,7 +358,7 @@ "type": "string" } }, - "path": "{project}/managedZones/{managedZone}/dnsKeys", + "path": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/dnsKeys", "response": { "$ref": "DnsKeysListResponse" }, @@ -336,6 +375,7 @@ "methods": { "get": { "description": "Fetch the representation of an existing Operation.", + "flatPath": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/operations/{operation}", "httpMethod": "GET", "id": "dns.managedZoneOperations.get", "parameterOrder": [ @@ -368,7 +408,7 @@ "type": "string" } }, - "path": "{project}/managedZones/{managedZone}/operations/{operation}", + "path": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/operations/{operation}", "response": { "$ref": "Operation" }, @@ -381,6 +421,7 @@ }, "list": { "description": "Enumerate Operations for the given ManagedZone.", + "flatPath": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/operations", "httpMethod": "GET", "id": "dns.managedZoneOperations.list", "parameterOrder": [ @@ -415,8 +456,8 @@ "default": "startTime", "description": "Sorting criterion. The only supported values are START_TIME and ID.", "enum": [ - "id", - "startTime" + "startTime", + "id" ], "enumDescriptions": [ "", @@ -426,7 +467,7 @@ "type": "string" } }, - "path": "{project}/managedZones/{managedZone}/operations", + "path": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/operations", "response": { "$ref": "ManagedZoneOperationsListResponse" }, @@ -443,6 +484,7 @@ "methods": { "create": { "description": "Create a new ManagedZone.", + "flatPath": "dns/v1beta2/projects/{project}/managedZones", "httpMethod": "POST", "id": "dns.managedZones.create", "parameterOrder": [ @@ -461,7 +503,7 @@ "type": "string" } }, - "path": "{project}/managedZones", + "path": "dns/v1beta2/projects/{project}/managedZones", "request": { "$ref": "ManagedZone" }, @@ -475,6 +517,7 @@ }, "delete": { "description": "Delete a previously created ManagedZone.", + "flatPath": "dns/v1beta2/projects/{project}/managedZones/{managedZone}", "httpMethod": "DELETE", "id": "dns.managedZones.delete", "parameterOrder": [ @@ -488,7 +531,7 @@ "type": "string" }, "managedZone": { - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", "location": "path", "required": true, "type": "string" @@ -500,7 +543,7 @@ "type": "string" } }, - "path": "{project}/managedZones/{managedZone}", + "path": "dns/v1beta2/projects/{project}/managedZones/{managedZone}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/ndev.clouddns.readwrite" @@ -508,6 +551,7 @@ }, "get": { "description": "Fetch the representation of an existing ManagedZone.", + "flatPath": "dns/v1beta2/projects/{project}/managedZones/{managedZone}", "httpMethod": "GET", "id": "dns.managedZones.get", "parameterOrder": [ @@ -521,7 +565,7 @@ "type": "string" }, "managedZone": { - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", "location": "path", "required": true, "type": "string" @@ -533,7 +577,7 @@ "type": "string" } }, - "path": "{project}/managedZones/{managedZone}", + "path": "dns/v1beta2/projects/{project}/managedZones/{managedZone}", "response": { "$ref": "ManagedZone" }, @@ -546,6 +590,7 @@ }, "list": { "description": "Enumerate ManagedZones that have been created but not yet deleted.", + "flatPath": "dns/v1beta2/projects/{project}/managedZones", "httpMethod": "GET", "id": "dns.managedZones.list", "parameterOrder": [ @@ -575,7 +620,7 @@ "type": "string" } }, - "path": "{project}/managedZones", + "path": "dns/v1beta2/projects/{project}/managedZones", "response": { "$ref": "ManagedZonesListResponse" }, @@ -588,6 +633,7 @@ }, "patch": { "description": "Apply a partial update to an existing ManagedZone.", + "flatPath": "dns/v1beta2/projects/{project}/managedZones/{managedZone}", "httpMethod": "PATCH", "id": "dns.managedZones.patch", "parameterOrder": [ @@ -601,7 +647,7 @@ "type": "string" }, "managedZone": { - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", "location": "path", "required": true, "type": "string" @@ -613,7 +659,7 @@ "type": "string" } }, - "path": "{project}/managedZones/{managedZone}", + "path": "dns/v1beta2/projects/{project}/managedZones/{managedZone}", "request": { "$ref": "ManagedZone" }, @@ -627,6 +673,7 @@ }, "update": { "description": "Update an existing ManagedZone.", + "flatPath": "dns/v1beta2/projects/{project}/managedZones/{managedZone}", "httpMethod": "PUT", "id": "dns.managedZones.update", "parameterOrder": [ @@ -640,7 +687,7 @@ "type": "string" }, "managedZone": { - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", "location": "path", "required": true, "type": "string" @@ -652,7 +699,7 @@ "type": "string" } }, - "path": "{project}/managedZones/{managedZone}", + "path": "dns/v1beta2/projects/{project}/managedZones/{managedZone}", "request": { "$ref": "ManagedZone" }, @@ -670,6 +717,7 @@ "methods": { "create": { "description": "Create a new Policy", + "flatPath": "dns/v1beta2/projects/{project}/policies", "httpMethod": "POST", "id": "dns.policies.create", "parameterOrder": [ @@ -688,7 +736,7 @@ "type": "string" } }, - "path": "{project}/policies", + "path": "dns/v1beta2/projects/{project}/policies", "request": { "$ref": "Policy" }, @@ -702,6 +750,7 @@ }, "delete": { "description": "Delete a previously created Policy. Will fail if the policy is still being referenced by a network.", + "flatPath": "dns/v1beta2/projects/{project}/policies/{policy}", "httpMethod": "DELETE", "id": "dns.policies.delete", "parameterOrder": [ @@ -727,7 +776,7 @@ "type": "string" } }, - "path": "{project}/policies/{policy}", + "path": "dns/v1beta2/projects/{project}/policies/{policy}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/ndev.clouddns.readwrite" @@ -735,6 +784,7 @@ }, "get": { "description": "Fetch the representation of an existing Policy.", + "flatPath": "dns/v1beta2/projects/{project}/policies/{policy}", "httpMethod": "GET", "id": "dns.policies.get", "parameterOrder": [ @@ -760,7 +810,7 @@ "type": "string" } }, - "path": "{project}/policies/{policy}", + "path": "dns/v1beta2/projects/{project}/policies/{policy}", "response": { "$ref": "Policy" }, @@ -773,6 +823,7 @@ }, "list": { "description": "Enumerate all Policies associated with a project.", + "flatPath": "dns/v1beta2/projects/{project}/policies", "httpMethod": "GET", "id": "dns.policies.list", "parameterOrder": [ @@ -797,7 +848,7 @@ "type": "string" } }, - "path": "{project}/policies", + "path": "dns/v1beta2/projects/{project}/policies", "response": { "$ref": "PoliciesListResponse" }, @@ -810,6 +861,7 @@ }, "patch": { "description": "Apply a partial update to an existing Policy.", + "flatPath": "dns/v1beta2/projects/{project}/policies/{policy}", "httpMethod": "PATCH", "id": "dns.policies.patch", "parameterOrder": [ @@ -835,7 +887,7 @@ "type": "string" } }, - "path": "{project}/policies/{policy}", + "path": "dns/v1beta2/projects/{project}/policies/{policy}", "request": { "$ref": "Policy" }, @@ -849,6 +901,7 @@ }, "update": { "description": "Update an existing Policy.", + "flatPath": "dns/v1beta2/projects/{project}/policies/{policy}", "httpMethod": "PUT", "id": "dns.policies.update", "parameterOrder": [ @@ -874,7 +927,7 @@ "type": "string" } }, - "path": "{project}/policies/{policy}", + "path": "dns/v1beta2/projects/{project}/policies/{policy}", "request": { "$ref": "Policy" }, @@ -892,6 +945,7 @@ "methods": { "get": { "description": "Fetch the representation of an existing Project.", + "flatPath": "dns/v1beta2/projects/{project}", "httpMethod": "GET", "id": "dns.projects.get", "parameterOrder": [ @@ -910,7 +964,7 @@ "type": "string" } }, - "path": "{project}", + "path": "dns/v1beta2/projects/{project}", "response": { "$ref": "Project" }, @@ -927,6 +981,7 @@ "methods": { "list": { "description": "Enumerate ResourceRecordSets that have been created but not yet deleted.", + "flatPath": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/rrsets", "httpMethod": "GET", "id": "dns.resourceRecordSets.list", "parameterOrder": [ @@ -935,7 +990,7 @@ ], "parameters": { "managedZone": { - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", "location": "path", "required": true, "type": "string" @@ -968,7 +1023,7 @@ "type": "string" } }, - "path": "{project}/managedZones/{managedZone}/rrsets", + "path": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/rrsets", "response": { "$ref": "ResourceRecordSetsListResponse" }, @@ -982,7 +1037,7 @@ } } }, - "revision": "20191029", + "revision": "20200927", "rootUrl": "https://dns.googleapis.com/", "schemas": { "Change": { @@ -1013,7 +1068,6 @@ }, "kind": { "default": "dns#change", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#change\".", "type": "string" }, "startTime": { @@ -1021,10 +1075,10 @@ "type": "string" }, "status": { - "description": "Status of the operation (output only). A status of \"done\" means that the request to update the authoritative servers has been sent, but the servers might not be updated yet.", + "description": "Status of the operation (output only). A status of \"done\" means that the request to update the authoritative servers has been sent but the servers might not be updated yet.", "enum": [ - "done", - "pending" + "pending", + "done" ], "enumDescriptions": [ "", @@ -1055,7 +1109,7 @@ "type": "string" }, "nextPageToken": { - "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token.\n\nIn this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a \"snapshot\" of collections larger than the maximum page size.", + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token. In this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a \"snapshot\" of collections larger than the maximum page size.", "type": "string" } }, @@ -1068,11 +1122,11 @@ "algorithm": { "description": "String mnemonic specifying the DNSSEC algorithm of this key. Immutable after creation time.", "enum": [ - "ecdsap256sha256", - "ecdsap384sha384", "rsasha1", "rsasha256", - "rsasha512" + "rsasha512", + "ecdsap256sha256", + "ecdsap384sha384" ], "enumDescriptions": [ "", @@ -1118,7 +1172,6 @@ }, "kind": { "default": "dns#dnsKey", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#dnsKey\".", "type": "string" }, "publicKey": { @@ -1171,11 +1224,11 @@ "algorithm": { "description": "String mnemonic specifying the DNSSEC algorithm of this key.", "enum": [ - "ecdsap256sha256", - "ecdsap384sha384", "rsasha1", "rsasha256", - "rsasha512" + "rsasha512", + "ecdsap256sha256", + "ecdsap384sha384" ], "enumDescriptions": [ "", @@ -1205,7 +1258,6 @@ }, "kind": { "default": "dns#dnsKeySpec", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#dnsKeySpec\".", "type": "string" } }, @@ -1231,7 +1283,7 @@ "type": "string" }, "nextPageToken": { - "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token.\n\nIn this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a \"snapshot\" of collections larger than the maximum page size.", + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token. In this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a \"snapshot\" of collections larger than the maximum page size.", "type": "string" } }, @@ -1268,7 +1320,6 @@ }, "kind": { "default": "dns#managedZone", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#managedZone\".", "type": "string" }, "labels": { @@ -1305,11 +1356,15 @@ "$ref": "ManagedZoneReverseLookupConfig", "description": "The presence of this field indicates that this is a managed reverse lookup zone and Cloud DNS will resolve reverse lookup queries using automatically configured records for VPC resources. This only applies to networks listed under private_visibility_config." }, + "serviceDirectoryConfig": { + "$ref": "ManagedZoneServiceDirectoryConfig", + "description": "This field links to the associated service directory namespace. This field should not be set for public zones or forwarding zones." + }, "visibility": { "description": "The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources.", "enum": [ - "private", - "public" + "public", + "private" ], "enumDescriptions": [ "", @@ -1332,7 +1387,6 @@ }, "kind": { "default": "dns#managedZoneDnsSecConfig", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#managedZoneDnsSecConfig\".", "type": "string" }, "nonExistence": { @@ -1355,9 +1409,9 @@ "transfer" ], "enumDescriptions": [ - "", - "", - "" + "DNSSEC is disabled; the zone is not signed.", + "DNSSEC is enabled; the zone is signed and fully managed.", + "DNSSEC is enabled, but in a \"transfer\" mode." ], "type": "string" } @@ -1369,7 +1423,6 @@ "properties": { "kind": { "default": "dns#managedZoneForwardingConfig", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#managedZoneForwardingConfig\".", "type": "string" }, "targetNameServers": { @@ -1386,14 +1439,14 @@ "id": "ManagedZoneForwardingConfigNameServerTarget", "properties": { "forwardingPath": { - "description": "Forwarding path for this NameServerTarget, if unset or set to DEFAULT, Cloud DNS will make forwarding decision based on address ranges, i.e. RFC1918 addresses go to the VPC, Non-RFC1918 addresses go to the Internet. When set to PRIVATE, Cloud DNS will always send queries through VPC for this target", + "description": "Forwarding path for this NameServerTarget. If unset or set to DEFAULT, Cloud DNS will make forwarding decision based on address ranges, i.e. RFC1918 addresses go to the VPC, non-RFC1918 addresses go to the Internet. When set to PRIVATE, Cloud DNS will always send queries through VPC for this target.", "enum": [ "default", "private" ], "enumDescriptions": [ - "", - "" + "Cloud DNS will make forwarding decision based on address ranges, i.e. RFC1918 addresses forward to the target through the VPC and non-RFC1918 addresses will forward to the target through the Internet", + "Cloud DNS will always forward to this target through the VPC." ], "type": "string" }, @@ -1403,7 +1456,6 @@ }, "kind": { "default": "dns#managedZoneForwardingConfigNameServerTarget", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#managedZoneForwardingConfigNameServerTarget\".", "type": "string" } }, @@ -1417,11 +1469,10 @@ }, "kind": { "default": "dns#managedZoneOperationsListResponse", - "description": "Type of resource.", "type": "string" }, "nextPageToken": { - "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your page token.\n\nIn this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size.", + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your page token. In this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size.", "type": "string" }, "operations": { @@ -1439,7 +1490,6 @@ "properties": { "kind": { "default": "dns#managedZonePeeringConfig", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#managedZonePeeringConfig\".", "type": "string" }, "targetNetwork": { @@ -1458,7 +1508,6 @@ }, "kind": { "default": "dns#managedZonePeeringConfigTargetNetwork", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#managedZonePeeringConfigTargetNetwork\".", "type": "string" }, "networkUrl": { @@ -1473,7 +1522,6 @@ "properties": { "kind": { "default": "dns#managedZonePrivateVisibilityConfig", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#managedZonePrivateVisibilityConfig\".", "type": "string" }, "networks": { @@ -1491,7 +1539,6 @@ "properties": { "kind": { "default": "dns#managedZonePrivateVisibilityConfigNetwork", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#managedZonePrivateVisibilityConfigNetwork\".", "type": "string" }, "networkUrl": { @@ -1506,7 +1553,39 @@ "properties": { "kind": { "default": "dns#managedZoneReverseLookupConfig", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#managedZoneReverseLookupConfig\".", + "type": "string" + } + }, + "type": "object" + }, + "ManagedZoneServiceDirectoryConfig": { + "description": "Contains information about Service Directory-backed zones.", + "id": "ManagedZoneServiceDirectoryConfig", + "properties": { + "kind": { + "default": "dns#managedZoneServiceDirectoryConfig", + "type": "string" + }, + "namespace": { + "$ref": "ManagedZoneServiceDirectoryConfigNamespace", + "description": "Contains information about the namespace associated with the zone." + } + }, + "type": "object" + }, + "ManagedZoneServiceDirectoryConfigNamespace": { + "id": "ManagedZoneServiceDirectoryConfigNamespace", + "properties": { + "deletionTime": { + "description": "The time that the namespace backing this zone was deleted, empty string if it still exists. This is in RFC3339 text format. Output only.", + "type": "string" + }, + "kind": { + "default": "dns#managedZoneServiceDirectoryConfigNamespace", + "type": "string" + }, + "namespaceUrl": { + "description": "The fully qualified URL of the namespace associated with the zone. This should be formatted like https://servicedirectory.googleapis.com/v1/projects/{project}/locations/{location}/namespaces/{namespace}", "type": "string" } }, @@ -1531,7 +1610,7 @@ "type": "array" }, "nextPageToken": { - "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your page token.\n\nIn this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size.", + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your page token. In this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size.", "type": "string" } }, @@ -1551,7 +1630,6 @@ }, "kind": { "default": "dns#operation", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#operation\".", "type": "string" }, "startTime": { @@ -1561,8 +1639,8 @@ "status": { "description": "Status of the operation. Can be one of the following: \"PENDING\" or \"DONE\" (output only). A status of \"DONE\" means that the request to update the authoritative servers has been sent, but the servers might not be updated yet.", "enum": [ - "done", - "pending" + "pending", + "done" ], "enumDescriptions": [ "", @@ -1625,7 +1703,7 @@ "type": "string" }, "nextPageToken": { - "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your page token.\n\nIn this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size.", + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your page token. In this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size.", "type": "string" }, "policies": { @@ -1689,7 +1767,6 @@ }, "kind": { "default": "dns#policy", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#policy\".", "type": "string" }, "name": { @@ -1711,7 +1788,6 @@ "properties": { "kind": { "default": "dns#policyAlternativeNameServerConfig", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#policyAlternativeNameServerConfig\".", "type": "string" }, "targetNameServers": { @@ -1728,14 +1804,14 @@ "id": "PolicyAlternativeNameServerConfigTargetNameServer", "properties": { "forwardingPath": { - "description": "Forwarding path for this TargetNameServer, if unset or set to DEFAULT, Cloud DNS will make forwarding decision based on address ranges, i.e. RFC1918 addresses go to the VPC, Non-RFC1918 addresses go to the Internet. When set to PRIVATE, Cloud DNS will always send queries through VPC for this target", + "description": "Forwarding path for this TargetNameServer. If unset or set to DEFAULT, Cloud DNS will make forwarding decision based on address ranges, i.e. RFC1918 addresses go to the VPC, non-RFC1918 addresses go to the Internet. When set to PRIVATE, Cloud DNS will always send queries through VPC for this target.", "enum": [ "default", "private" ], "enumDescriptions": [ - "", - "" + "Cloud DNS will make forwarding decision based on address ranges, i.e. RFC1918 addresses forward to the target through the VPC and non-RFC1918 addresses will forward to the target through the Internet", + "Cloud DNS will always forward to this target through the VPC." ], "type": "string" }, @@ -1745,7 +1821,6 @@ }, "kind": { "default": "dns#policyAlternativeNameServerConfigTargetNameServer", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#policyAlternativeNameServerConfigTargetNameServer\".", "type": "string" } }, @@ -1756,7 +1831,6 @@ "properties": { "kind": { "default": "dns#policyNetwork", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#policyNetwork\".", "type": "string" }, "networkUrl": { @@ -1776,7 +1850,6 @@ }, "kind": { "default": "dns#project", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#project\".", "type": "string" }, "number": { @@ -1802,7 +1875,6 @@ }, "kind": { "default": "dns#quota", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#quota\".", "type": "string" }, "managedZones": { @@ -1881,7 +1953,6 @@ "properties": { "kind": { "default": "dns#resourceRecordSet", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#resourceRecordSet\".", "type": "string" }, "name": { @@ -1926,7 +1997,7 @@ "type": "string" }, "nextPageToken": { - "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token.\n\nIn this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size.", + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token. In this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size.", "type": "string" }, "rrsets": { @@ -1951,7 +2022,7 @@ "type": "object" } }, - "servicePath": "dns/v1beta2/projects/", - "title": "Google Cloud DNS API", + "servicePath": "", + "title": "Cloud DNS API", "version": "v1beta2" } \ No newline at end of file diff --git a/vendor/google.golang.org/api/dns/v1beta2/dns-gen.go b/vendor/google.golang.org/api/dns/v1beta2/dns-gen.go index 2d2c8c6e95a..1158ca4d798 100644 --- a/vendor/google.golang.org/api/dns/v1beta2/dns-gen.go +++ b/vendor/google.golang.org/api/dns/v1beta2/dns-gen.go @@ -4,9 +4,9 @@ // Code generated file. DO NOT EDIT. -// Package dns provides access to the Google Cloud DNS API. +// Package dns provides access to the Cloud DNS API. // -// For product documentation, see: https://developers.google.com/cloud-dns +// For product documentation, see: http://developers.google.com/cloud-dns // // Creating a client // @@ -78,7 +78,8 @@ var _ = internaloption.WithDefaultEndpoint const apiId = "dns:v1beta2" const apiName = "dns" const apiVersion = "v1beta2" -const basePath = "https://dns.googleapis.com/dns/v1beta2/projects/" +const basePath = "https://dns.googleapis.com/" +const mtlsBasePath = "https://dns.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -106,6 +107,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -252,8 +254,6 @@ type Change struct { // IsServing: If the DNS queries for the zone will be served. IsServing bool `json:"isServing,omitempty"` - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#change". Kind string `json:"kind,omitempty"` // StartTime: The time that this operation was started by the server @@ -262,11 +262,11 @@ type Change struct { // Status: Status of the operation (output only). A status of "done" // means that the request to update the authoritative servers has been - // sent, but the servers might not be updated yet. + // sent but the servers might not be updated yet. // // Possible values: - // "done" // "pending" + // "done" Status string `json:"status,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -310,14 +310,12 @@ type ChangesListResponse struct { // NextPageToken: The presence of this field indicates that there exist // more results following your last page of results in pagination order. // To fetch them, make another list request using this value as your - // pagination token. - // - // In this way you can retrieve the complete contents of even very large - // collections one page at a time. However, if the contents of the - // collection change between the first and last paginated list request, - // the set of all elements returned will be an inconsistent view of the - // collection. There is no way to retrieve a "snapshot" of collections - // larger than the maximum page size. + // pagination token. In this way you can retrieve the complete contents + // of even very large collections one page at a time. However, if the + // contents of the collection change between the first and last + // paginated list request, the set of all elements returned will be an + // inconsistent view of the collection. There is no way to retrieve a + // "snapshot" of collections larger than the maximum page size. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -353,11 +351,11 @@ type DnsKey struct { // key. Immutable after creation time. // // Possible values: - // "ecdsap256sha256" - // "ecdsap384sha384" // "rsasha1" // "rsasha256" // "rsasha512" + // "ecdsap256sha256" + // "ecdsap384sha384" Algorithm string `json:"algorithm,omitempty"` // CreationTime: The time that this resource was created in the control @@ -396,8 +394,6 @@ type DnsKey struct { // is specified in RFC4034 Appendix B. Output only. KeyTag int64 `json:"keyTag,omitempty"` - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#dnsKey". Kind string `json:"kind,omitempty"` // PublicKey: Base64 encoded public half of this key. Output only. @@ -486,11 +482,11 @@ type DnsKeySpec struct { // key. // // Possible values: - // "ecdsap256sha256" - // "ecdsap384sha384" // "rsasha1" // "rsasha256" // "rsasha512" + // "ecdsap256sha256" + // "ecdsap384sha384" Algorithm string `json:"algorithm,omitempty"` // KeyLength: Length of the keys in bits. @@ -508,8 +504,6 @@ type DnsKeySpec struct { // "zoneSigning" KeyType string `json:"keyType,omitempty"` - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#dnsKeySpec". Kind string `json:"kind,omitempty"` // ForceSendFields is a list of field names (e.g. "Algorithm") to @@ -549,14 +543,12 @@ type DnsKeysListResponse struct { // NextPageToken: The presence of this field indicates that there exist // more results following your last page of results in pagination order. // To fetch them, make another list request using this value as your - // pagination token. - // - // In this way you can retrieve the complete contents of even very large - // collections one page at a time. However, if the contents of the - // collection change between the first and last paginated list request, - // the set of all elements returned will be an inconsistent view of the - // collection. There is no way to retrieve a "snapshot" of collections - // larger than the maximum page size. + // pagination token. In this way you can retrieve the complete contents + // of even very large collections one page at a time. However, if the + // contents of the collection change between the first and last + // paginated list request, the set of all elements returned will be an + // inconsistent view of the collection. There is no way to retrieve a + // "snapshot" of collections larger than the maximum page size. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -615,8 +607,6 @@ type ManagedZone struct { // only) Id uint64 `json:"id,omitempty,string"` - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#managedZone". Kind string `json:"kind,omitempty"` // Labels: User labels. @@ -653,13 +643,18 @@ type ManagedZone struct { // private_visibility_config. ReverseLookupConfig *ManagedZoneReverseLookupConfig `json:"reverseLookupConfig,omitempty"` + // ServiceDirectoryConfig: This field links to the associated service + // directory namespace. This field should not be set for public zones or + // forwarding zones. + ServiceDirectoryConfig *ManagedZoneServiceDirectoryConfig `json:"serviceDirectoryConfig,omitempty"` + // Visibility: The zone's visibility: public zones are exposed to the // Internet, while private zones are visible only to Virtual Private // Cloud resources. // // Possible values: - // "private" // "public" + // "private" Visibility string `json:"visibility,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -694,8 +689,6 @@ type ManagedZoneDnsSecConfig struct { // for this ManagedZone. Can only be changed while the state is OFF. DefaultKeySpecs []*DnsKeySpec `json:"defaultKeySpecs,omitempty"` - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#managedZoneDnsSecConfig". Kind string `json:"kind,omitempty"` // NonExistence: Specifies the mechanism for authenticated @@ -710,9 +703,9 @@ type ManagedZoneDnsSecConfig struct { // State: Specifies whether DNSSEC is enabled, and what mode it is in. // // Possible values: - // "off" - // "on" - // "transfer" + // "off" - DNSSEC is disabled; the zone is not signed. + // "on" - DNSSEC is enabled; the zone is signed and fully managed. + // "transfer" - DNSSEC is enabled, but in a "transfer" mode. State string `json:"state,omitempty"` // ForceSendFields is a list of field names (e.g. "DefaultKeySpecs") to @@ -740,8 +733,6 @@ func (s *ManagedZoneDnsSecConfig) MarshalJSON() ([]byte, error) { } type ManagedZoneForwardingConfig struct { - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#managedZoneForwardingConfig". Kind string `json:"kind,omitempty"` // TargetNameServers: List of target name servers to forward to. Cloud @@ -773,22 +764,24 @@ func (s *ManagedZoneForwardingConfig) MarshalJSON() ([]byte, error) { } type ManagedZoneForwardingConfigNameServerTarget struct { - // ForwardingPath: Forwarding path for this NameServerTarget, if unset + // ForwardingPath: Forwarding path for this NameServerTarget. If unset // or set to DEFAULT, Cloud DNS will make forwarding decision based on - // address ranges, i.e. RFC1918 addresses go to the VPC, Non-RFC1918 + // address ranges, i.e. RFC1918 addresses go to the VPC, non-RFC1918 // addresses go to the Internet. When set to PRIVATE, Cloud DNS will - // always send queries through VPC for this target + // always send queries through VPC for this target. // // Possible values: - // "default" - // "private" + // "default" - Cloud DNS will make forwarding decision based on + // address ranges, i.e. RFC1918 addresses forward to the target through + // the VPC and non-RFC1918 addresses will forward to the target through + // the Internet + // "private" - Cloud DNS will always forward to this target through + // the VPC. ForwardingPath string `json:"forwardingPath,omitempty"` // Ipv4Address: IPv4 address of a target name server. Ipv4Address string `json:"ipv4Address,omitempty"` - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#managedZoneForwardingConfigNameServerTarget". Kind string `json:"kind,omitempty"` // ForceSendFields is a list of field names (e.g. "ForwardingPath") to @@ -818,20 +811,18 @@ func (s *ManagedZoneForwardingConfigNameServerTarget) MarshalJSON() ([]byte, err type ManagedZoneOperationsListResponse struct { Header *ResponseHeader `json:"header,omitempty"` - // Kind: Type of resource. Kind string `json:"kind,omitempty"` // NextPageToken: The presence of this field indicates that there exist // more results following your last page of results in pagination order. // To fetch them, make another list request using this value as your - // page token. - // - // In this way you can retrieve the complete contents of even very large - // collections one page at a time. However, if the contents of the - // collection change between the first and last paginated list request, - // the set of all elements returned will be an inconsistent view of the - // collection. There is no way to retrieve a consistent snapshot of a - // collection larger than the maximum page size. + // page token. In this way you can retrieve the complete contents of + // even very large collections one page at a time. However, if the + // contents of the collection change between the first and last + // paginated list request, the set of all elements returned will be an + // inconsistent view of the collection. There is no way to retrieve a + // consistent snapshot of a collection larger than the maximum page + // size. NextPageToken string `json:"nextPageToken,omitempty"` // Operations: The operation resources. @@ -865,8 +856,6 @@ func (s *ManagedZoneOperationsListResponse) MarshalJSON() ([]byte, error) { } type ManagedZonePeeringConfig struct { - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#managedZonePeeringConfig". Kind string `json:"kind,omitempty"` // TargetNetwork: The network with which to peer. @@ -903,8 +892,6 @@ type ManagedZonePeeringConfigTargetNetwork struct { // zone targeted is deleted. Output only. DeactivateTime string `json:"deactivateTime,omitempty"` - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#managedZonePeeringConfigTargetNetwork". Kind string `json:"kind,omitempty"` // NetworkUrl: The fully qualified URL of the VPC network to forward @@ -937,8 +924,6 @@ func (s *ManagedZonePeeringConfigTargetNetwork) MarshalJSON() ([]byte, error) { } type ManagedZonePrivateVisibilityConfig struct { - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#managedZonePrivateVisibilityConfig". Kind string `json:"kind,omitempty"` // Networks: The list of VPC networks that can see this zone. @@ -968,8 +953,6 @@ func (s *ManagedZonePrivateVisibilityConfig) MarshalJSON() ([]byte, error) { } type ManagedZonePrivateVisibilityConfigNetwork struct { - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#managedZonePrivateVisibilityConfigNetwork". Kind string `json:"kind,omitempty"` // NetworkUrl: The fully qualified URL of the VPC network to bind to. @@ -1001,8 +984,6 @@ func (s *ManagedZonePrivateVisibilityConfigNetwork) MarshalJSON() ([]byte, error } type ManagedZoneReverseLookupConfig struct { - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#managedZoneReverseLookupConfig". Kind string `json:"kind,omitempty"` // ForceSendFields is a list of field names (e.g. "Kind") to @@ -1028,6 +1009,74 @@ func (s *ManagedZoneReverseLookupConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ManagedZoneServiceDirectoryConfig: Contains information about Service +// Directory-backed zones. +type ManagedZoneServiceDirectoryConfig struct { + Kind string `json:"kind,omitempty"` + + // Namespace: Contains information about the namespace associated with + // the zone. + Namespace *ManagedZoneServiceDirectoryConfigNamespace `json:"namespace,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ManagedZoneServiceDirectoryConfig) MarshalJSON() ([]byte, error) { + type NoMethod ManagedZoneServiceDirectoryConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ManagedZoneServiceDirectoryConfigNamespace struct { + // DeletionTime: The time that the namespace backing this zone was + // deleted, empty string if it still exists. This is in RFC3339 text + // format. Output only. + DeletionTime string `json:"deletionTime,omitempty"` + + Kind string `json:"kind,omitempty"` + + // NamespaceUrl: The fully qualified URL of the namespace associated + // with the zone. This should be formatted like + // https://servicedirectory.googleapis.com/v1/projects/{project}/locations/{location}/namespaces/{namespace} + NamespaceUrl string `json:"namespaceUrl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DeletionTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DeletionTime") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ManagedZoneServiceDirectoryConfigNamespace) MarshalJSON() ([]byte, error) { + type NoMethod ManagedZoneServiceDirectoryConfigNamespace + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type ManagedZonesListResponse struct { Header *ResponseHeader `json:"header,omitempty"` @@ -1040,14 +1089,13 @@ type ManagedZonesListResponse struct { // NextPageToken: The presence of this field indicates that there exist // more results following your last page of results in pagination order. // To fetch them, make another list request using this value as your - // page token. - // - // In this way you can retrieve the complete contents of even very large - // collections one page at a time. However, if the contents of the - // collection change between the first and last paginated list request, - // the set of all elements returned will be an inconsistent view of the - // collection. There is no way to retrieve a consistent snapshot of a - // collection larger than the maximum page size. + // page token. In this way you can retrieve the complete contents of + // even very large collections one page at a time. However, if the + // contents of the collection change between the first and last + // paginated list request, the set of all elements returned will be an + // inconsistent view of the collection. There is no way to retrieve a + // consistent snapshot of a collection larger than the maximum page + // size. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1094,8 +1142,6 @@ type Operation struct { // (output only) Id string `json:"id,omitempty"` - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#operation". Kind string `json:"kind,omitempty"` // StartTime: The time that this operation was started by the server. @@ -1108,8 +1154,8 @@ type Operation struct { // servers might not be updated yet. // // Possible values: - // "done" // "pending" + // "done" Status string `json:"status,omitempty"` // Type: Type of the operation. Operations include insert, update, and @@ -1221,14 +1267,13 @@ type PoliciesListResponse struct { // NextPageToken: The presence of this field indicates that there exist // more results following your last page of results in pagination order. // To fetch them, make another list request using this value as your - // page token. - // - // In this way you can retrieve the complete contents of even very large - // collections one page at a time. However, if the contents of the - // collection change between the first and last paginated list request, - // the set of all elements returned will be an inconsistent view of the - // collection. There is no way to retrieve a consistent snapshot of a - // collection larger than the maximum page size. + // page token. In this way you can retrieve the complete contents of + // even very large collections one page at a time. However, if the + // contents of the collection change between the first and last + // paginated list request, the set of all elements returned will be an + // inconsistent view of the collection. There is no way to retrieve a + // consistent snapshot of a collection larger than the maximum page + // size. NextPageToken string `json:"nextPageToken,omitempty"` // Policies: The policy resources. @@ -1353,8 +1398,6 @@ type Policy struct { // only). Id uint64 `json:"id,omitempty,string"` - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#policy". Kind string `json:"kind,omitempty"` // Name: User assigned name for this policy. @@ -1394,8 +1437,6 @@ func (s *Policy) MarshalJSON() ([]byte, error) { } type PolicyAlternativeNameServerConfig struct { - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#policyAlternativeNameServerConfig". Kind string `json:"kind,omitempty"` // TargetNameServers: Sets an alternative name server for the associated @@ -1428,22 +1469,24 @@ func (s *PolicyAlternativeNameServerConfig) MarshalJSON() ([]byte, error) { } type PolicyAlternativeNameServerConfigTargetNameServer struct { - // ForwardingPath: Forwarding path for this TargetNameServer, if unset + // ForwardingPath: Forwarding path for this TargetNameServer. If unset // or set to DEFAULT, Cloud DNS will make forwarding decision based on - // address ranges, i.e. RFC1918 addresses go to the VPC, Non-RFC1918 + // address ranges, i.e. RFC1918 addresses go to the VPC, non-RFC1918 // addresses go to the Internet. When set to PRIVATE, Cloud DNS will - // always send queries through VPC for this target + // always send queries through VPC for this target. // // Possible values: - // "default" - // "private" + // "default" - Cloud DNS will make forwarding decision based on + // address ranges, i.e. RFC1918 addresses forward to the target through + // the VPC and non-RFC1918 addresses will forward to the target through + // the Internet + // "private" - Cloud DNS will always forward to this target through + // the VPC. ForwardingPath string `json:"forwardingPath,omitempty"` // Ipv4Address: IPv4 address to forward to. Ipv4Address string `json:"ipv4Address,omitempty"` - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#policyAlternativeNameServerConfigTargetNameServer". Kind string `json:"kind,omitempty"` // ForceSendFields is a list of field names (e.g. "ForwardingPath") to @@ -1471,8 +1514,6 @@ func (s *PolicyAlternativeNameServerConfigTargetNameServer) MarshalJSON() ([]byt } type PolicyNetwork struct { - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#policyNetwork". Kind string `json:"kind,omitempty"` // NetworkUrl: The fully qualified URL of the VPC network to bind to. @@ -1510,8 +1551,6 @@ type Project struct { // Id: User assigned unique identifier for the resource (output only). Id string `json:"id,omitempty"` - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#project". Kind string `json:"kind,omitempty"` // Number: Unique numeric identifier for the resource; defined by the @@ -1554,8 +1593,6 @@ type Quota struct { // ManagedZone. DnsKeysPerManagedZone int64 `json:"dnsKeysPerManagedZone,omitempty"` - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#quota". Kind string `json:"kind,omitempty"` // ManagedZones: Maximum allowed number of managed zones in the project. @@ -1635,8 +1672,6 @@ func (s *Quota) MarshalJSON() ([]byte, error) { // ResourceRecordSet: A unit of data that will be returned by the DNS // servers. type ResourceRecordSet struct { - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#resourceRecordSet". Kind string `json:"kind,omitempty"` // Name: For example, www.example.com. @@ -1689,14 +1724,13 @@ type ResourceRecordSetsListResponse struct { // NextPageToken: The presence of this field indicates that there exist // more results following your last page of results in pagination order. // To fetch them, make another list request using this value as your - // pagination token. - // - // In this way you can retrieve the complete contents of even very large - // collections one page at a time. However, if the contents of the - // collection change between the first and last paginated list request, - // the set of all elements returned will be an inconsistent view of the - // collection. There is no way to retrieve a consistent snapshot of a - // collection larger than the maximum page size. + // pagination token. In this way you can retrieve the complete contents + // of even very large collections one page at a time. However, if the + // contents of the collection change between the first and last + // paginated list request, the set of all elements returned will be an + // inconsistent view of the collection. There is no way to retrieve a + // consistent snapshot of a collection larger than the maximum page + // size. NextPageToken string `json:"nextPageToken,omitempty"` // Rrsets: The resource record set resources. @@ -1816,7 +1850,7 @@ func (c *ChangesCreateCall) Header() http.Header { func (c *ChangesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1829,7 +1863,7 @@ func (c *ChangesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/changes") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1beta2/projects/{project}/managedZones/{managedZone}/changes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -1882,6 +1916,7 @@ func (c *ChangesCreateCall) Do(opts ...googleapi.CallOption) (*Change, error) { return ret, nil // { // "description": "Atomically update the ResourceRecordSet collection.", + // "flatPath": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/changes", // "httpMethod": "POST", // "id": "dns.changes.create", // "parameterOrder": [ @@ -1895,7 +1930,7 @@ func (c *ChangesCreateCall) Do(opts ...googleapi.CallOption) (*Change, error) { // "type": "string" // }, // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", // "location": "path", // "required": true, // "type": "string" @@ -1907,7 +1942,7 @@ func (c *ChangesCreateCall) Do(opts ...googleapi.CallOption) (*Change, error) { // "type": "string" // } // }, - // "path": "{project}/managedZones/{managedZone}/changes", + // "path": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/changes", // "request": { // "$ref": "Change" // }, @@ -1990,7 +2025,7 @@ func (c *ChangesGetCall) Header() http.Header { func (c *ChangesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2001,7 +2036,7 @@ func (c *ChangesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/changes/{changeId}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1beta2/projects/{project}/managedZones/{managedZone}/changes/{changeId}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -2055,6 +2090,7 @@ func (c *ChangesGetCall) Do(opts ...googleapi.CallOption) (*Change, error) { return ret, nil // { // "description": "Fetch the representation of an existing Change.", + // "flatPath": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/changes/{changeId}", // "httpMethod": "GET", // "id": "dns.changes.get", // "parameterOrder": [ @@ -2075,7 +2111,7 @@ func (c *ChangesGetCall) Do(opts ...googleapi.CallOption) (*Change, error) { // "type": "string" // }, // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", // "location": "path", // "required": true, // "type": "string" @@ -2087,7 +2123,7 @@ func (c *ChangesGetCall) Do(opts ...googleapi.CallOption) (*Change, error) { // "type": "string" // } // }, - // "path": "{project}/managedZones/{managedZone}/changes/{changeId}", + // "path": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/changes/{changeId}", // "response": { // "$ref": "Change" // }, @@ -2191,7 +2227,7 @@ func (c *ChangesListCall) Header() http.Header { func (c *ChangesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2202,7 +2238,7 @@ func (c *ChangesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/changes") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1beta2/projects/{project}/managedZones/{managedZone}/changes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -2255,6 +2291,7 @@ func (c *ChangesListCall) Do(opts ...googleapi.CallOption) (*ChangesListResponse return ret, nil // { // "description": "Enumerate Changes to a ResourceRecordSet collection.", + // "flatPath": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/changes", // "httpMethod": "GET", // "id": "dns.changes.list", // "parameterOrder": [ @@ -2263,7 +2300,7 @@ func (c *ChangesListCall) Do(opts ...googleapi.CallOption) (*ChangesListResponse // ], // "parameters": { // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", // "location": "path", // "required": true, // "type": "string" @@ -2303,7 +2340,7 @@ func (c *ChangesListCall) Do(opts ...googleapi.CallOption) (*ChangesListResponse // "type": "string" // } // }, - // "path": "{project}/managedZones/{managedZone}/changes", + // "path": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/changes", // "response": { // "$ref": "ChangesListResponse" // }, @@ -2415,7 +2452,7 @@ func (c *DnsKeysGetCall) Header() http.Header { func (c *DnsKeysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2426,7 +2463,7 @@ func (c *DnsKeysGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1beta2/projects/{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -2480,6 +2517,7 @@ func (c *DnsKeysGetCall) Do(opts ...googleapi.CallOption) (*DnsKey, error) { return ret, nil // { // "description": "Fetch the representation of an existing DnsKey.", + // "flatPath": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}", // "httpMethod": "GET", // "id": "dns.dnsKeys.get", // "parameterOrder": [ @@ -2505,7 +2543,7 @@ func (c *DnsKeysGetCall) Do(opts ...googleapi.CallOption) (*DnsKey, error) { // "type": "string" // }, // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", // "location": "path", // "required": true, // "type": "string" @@ -2517,7 +2555,7 @@ func (c *DnsKeysGetCall) Do(opts ...googleapi.CallOption) (*DnsKey, error) { // "type": "string" // } // }, - // "path": "{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}", + // "path": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}", // "response": { // "$ref": "DnsKey" // }, @@ -2613,7 +2651,7 @@ func (c *DnsKeysListCall) Header() http.Header { func (c *DnsKeysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2624,7 +2662,7 @@ func (c *DnsKeysListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/dnsKeys") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1beta2/projects/{project}/managedZones/{managedZone}/dnsKeys") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -2677,6 +2715,7 @@ func (c *DnsKeysListCall) Do(opts ...googleapi.CallOption) (*DnsKeysListResponse return ret, nil // { // "description": "Enumerate DnsKeys to a ResourceRecordSet collection.", + // "flatPath": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/dnsKeys", // "httpMethod": "GET", // "id": "dns.dnsKeys.list", // "parameterOrder": [ @@ -2690,7 +2729,7 @@ func (c *DnsKeysListCall) Do(opts ...googleapi.CallOption) (*DnsKeysListResponse // "type": "string" // }, // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", // "location": "path", // "required": true, // "type": "string" @@ -2713,7 +2752,7 @@ func (c *DnsKeysListCall) Do(opts ...googleapi.CallOption) (*DnsKeysListResponse // "type": "string" // } // }, - // "path": "{project}/managedZones/{managedZone}/dnsKeys", + // "path": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/dnsKeys", // "response": { // "$ref": "DnsKeysListResponse" // }, @@ -2816,7 +2855,7 @@ func (c *ManagedZoneOperationsGetCall) Header() http.Header { func (c *ManagedZoneOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2827,7 +2866,7 @@ func (c *ManagedZoneOperationsGetCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1beta2/projects/{project}/managedZones/{managedZone}/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -2881,6 +2920,7 @@ func (c *ManagedZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operat return ret, nil // { // "description": "Fetch the representation of an existing Operation.", + // "flatPath": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/operations/{operation}", // "httpMethod": "GET", // "id": "dns.managedZoneOperations.get", // "parameterOrder": [ @@ -2913,7 +2953,7 @@ func (c *ManagedZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operat // "type": "string" // } // }, - // "path": "{project}/managedZones/{managedZone}/operations/{operation}", + // "path": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/operations/{operation}", // "response": { // "$ref": "Operation" // }, @@ -2967,8 +3007,8 @@ func (c *ManagedZoneOperationsListCall) PageToken(pageToken string) *ManagedZone // only supported values are START_TIME and ID. // // Possible values: -// "id" // "startTime" (default) +// "id" func (c *ManagedZoneOperationsListCall) SortBy(sortBy string) *ManagedZoneOperationsListCall { c.urlParams_.Set("sortBy", sortBy) return c @@ -3011,7 +3051,7 @@ func (c *ManagedZoneOperationsListCall) Header() http.Header { func (c *ManagedZoneOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3022,7 +3062,7 @@ func (c *ManagedZoneOperationsListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/operations") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1beta2/projects/{project}/managedZones/{managedZone}/operations") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -3076,6 +3116,7 @@ func (c *ManagedZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*Manag return ret, nil // { // "description": "Enumerate Operations for the given ManagedZone.", + // "flatPath": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/operations", // "httpMethod": "GET", // "id": "dns.managedZoneOperations.list", // "parameterOrder": [ @@ -3110,8 +3151,8 @@ func (c *ManagedZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*Manag // "default": "startTime", // "description": "Sorting criterion. The only supported values are START_TIME and ID.", // "enum": [ - // "id", - // "startTime" + // "startTime", + // "id" // ], // "enumDescriptions": [ // "", @@ -3121,7 +3162,7 @@ func (c *ManagedZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*Manag // "type": "string" // } // }, - // "path": "{project}/managedZones/{managedZone}/operations", + // "path": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/operations", // "response": { // "$ref": "ManagedZoneOperationsListResponse" // }, @@ -3211,7 +3252,7 @@ func (c *ManagedZonesCreateCall) Header() http.Header { func (c *ManagedZonesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3224,7 +3265,7 @@ func (c *ManagedZonesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1beta2/projects/{project}/managedZones") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -3276,6 +3317,7 @@ func (c *ManagedZonesCreateCall) Do(opts ...googleapi.CallOption) (*ManagedZone, return ret, nil // { // "description": "Create a new ManagedZone.", + // "flatPath": "dns/v1beta2/projects/{project}/managedZones", // "httpMethod": "POST", // "id": "dns.managedZones.create", // "parameterOrder": [ @@ -3294,7 +3336,7 @@ func (c *ManagedZonesCreateCall) Do(opts ...googleapi.CallOption) (*ManagedZone, // "type": "string" // } // }, - // "path": "{project}/managedZones", + // "path": "dns/v1beta2/projects/{project}/managedZones", // "request": { // "$ref": "ManagedZone" // }, @@ -3364,7 +3406,7 @@ func (c *ManagedZonesDeleteCall) Header() http.Header { func (c *ManagedZonesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3372,7 +3414,7 @@ func (c *ManagedZonesDeleteCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1beta2/projects/{project}/managedZones/{managedZone}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -3400,6 +3442,7 @@ func (c *ManagedZonesDeleteCall) Do(opts ...googleapi.CallOption) error { return nil // { // "description": "Delete a previously created ManagedZone.", + // "flatPath": "dns/v1beta2/projects/{project}/managedZones/{managedZone}", // "httpMethod": "DELETE", // "id": "dns.managedZones.delete", // "parameterOrder": [ @@ -3413,7 +3456,7 @@ func (c *ManagedZonesDeleteCall) Do(opts ...googleapi.CallOption) error { // "type": "string" // }, // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", // "location": "path", // "required": true, // "type": "string" @@ -3425,7 +3468,7 @@ func (c *ManagedZonesDeleteCall) Do(opts ...googleapi.CallOption) error { // "type": "string" // } // }, - // "path": "{project}/managedZones/{managedZone}", + // "path": "dns/v1beta2/projects/{project}/managedZones/{managedZone}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" @@ -3500,7 +3543,7 @@ func (c *ManagedZonesGetCall) Header() http.Header { func (c *ManagedZonesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3511,7 +3554,7 @@ func (c *ManagedZonesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1beta2/projects/{project}/managedZones/{managedZone}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -3564,6 +3607,7 @@ func (c *ManagedZonesGetCall) Do(opts ...googleapi.CallOption) (*ManagedZone, er return ret, nil // { // "description": "Fetch the representation of an existing ManagedZone.", + // "flatPath": "dns/v1beta2/projects/{project}/managedZones/{managedZone}", // "httpMethod": "GET", // "id": "dns.managedZones.get", // "parameterOrder": [ @@ -3577,7 +3621,7 @@ func (c *ManagedZonesGetCall) Do(opts ...googleapi.CallOption) (*ManagedZone, er // "type": "string" // }, // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", // "location": "path", // "required": true, // "type": "string" @@ -3589,7 +3633,7 @@ func (c *ManagedZonesGetCall) Do(opts ...googleapi.CallOption) (*ManagedZone, er // "type": "string" // } // }, - // "path": "{project}/managedZones/{managedZone}", + // "path": "dns/v1beta2/projects/{project}/managedZones/{managedZone}", // "response": { // "$ref": "ManagedZone" // }, @@ -3682,7 +3726,7 @@ func (c *ManagedZonesListCall) Header() http.Header { func (c *ManagedZonesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3693,7 +3737,7 @@ func (c *ManagedZonesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1beta2/projects/{project}/managedZones") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -3745,6 +3789,7 @@ func (c *ManagedZonesListCall) Do(opts ...googleapi.CallOption) (*ManagedZonesLi return ret, nil // { // "description": "Enumerate ManagedZones that have been created but not yet deleted.", + // "flatPath": "dns/v1beta2/projects/{project}/managedZones", // "httpMethod": "GET", // "id": "dns.managedZones.list", // "parameterOrder": [ @@ -3774,7 +3819,7 @@ func (c *ManagedZonesListCall) Do(opts ...googleapi.CallOption) (*ManagedZonesLi // "type": "string" // } // }, - // "path": "{project}/managedZones", + // "path": "dns/v1beta2/projects/{project}/managedZones", // "response": { // "$ref": "ManagedZonesListResponse" // }, @@ -3866,7 +3911,7 @@ func (c *ManagedZonesPatchCall) Header() http.Header { func (c *ManagedZonesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3879,7 +3924,7 @@ func (c *ManagedZonesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1beta2/projects/{project}/managedZones/{managedZone}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { @@ -3932,6 +3977,7 @@ func (c *ManagedZonesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er return ret, nil // { // "description": "Apply a partial update to an existing ManagedZone.", + // "flatPath": "dns/v1beta2/projects/{project}/managedZones/{managedZone}", // "httpMethod": "PATCH", // "id": "dns.managedZones.patch", // "parameterOrder": [ @@ -3945,7 +3991,7 @@ func (c *ManagedZonesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er // "type": "string" // }, // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", // "location": "path", // "required": true, // "type": "string" @@ -3957,7 +4003,7 @@ func (c *ManagedZonesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er // "type": "string" // } // }, - // "path": "{project}/managedZones/{managedZone}", + // "path": "dns/v1beta2/projects/{project}/managedZones/{managedZone}", // "request": { // "$ref": "ManagedZone" // }, @@ -4029,7 +4075,7 @@ func (c *ManagedZonesUpdateCall) Header() http.Header { func (c *ManagedZonesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4042,7 +4088,7 @@ func (c *ManagedZonesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1beta2/projects/{project}/managedZones/{managedZone}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PUT", urls, body) if err != nil { @@ -4095,6 +4141,7 @@ func (c *ManagedZonesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e return ret, nil // { // "description": "Update an existing ManagedZone.", + // "flatPath": "dns/v1beta2/projects/{project}/managedZones/{managedZone}", // "httpMethod": "PUT", // "id": "dns.managedZones.update", // "parameterOrder": [ @@ -4108,7 +4155,7 @@ func (c *ManagedZonesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e // "type": "string" // }, // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", // "location": "path", // "required": true, // "type": "string" @@ -4120,7 +4167,7 @@ func (c *ManagedZonesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e // "type": "string" // } // }, - // "path": "{project}/managedZones/{managedZone}", + // "path": "dns/v1beta2/projects/{project}/managedZones/{managedZone}", // "request": { // "$ref": "ManagedZone" // }, @@ -4190,7 +4237,7 @@ func (c *PoliciesCreateCall) Header() http.Header { func (c *PoliciesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4203,7 +4250,7 @@ func (c *PoliciesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/policies") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1beta2/projects/{project}/policies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -4255,6 +4302,7 @@ func (c *PoliciesCreateCall) Do(opts ...googleapi.CallOption) (*Policy, error) { return ret, nil // { // "description": "Create a new Policy", + // "flatPath": "dns/v1beta2/projects/{project}/policies", // "httpMethod": "POST", // "id": "dns.policies.create", // "parameterOrder": [ @@ -4273,7 +4321,7 @@ func (c *PoliciesCreateCall) Do(opts ...googleapi.CallOption) (*Policy, error) { // "type": "string" // } // }, - // "path": "{project}/policies", + // "path": "dns/v1beta2/projects/{project}/policies", // "request": { // "$ref": "Policy" // }, @@ -4344,7 +4392,7 @@ func (c *PoliciesDeleteCall) Header() http.Header { func (c *PoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4352,7 +4400,7 @@ func (c *PoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/policies/{policy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1beta2/projects/{project}/policies/{policy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -4380,6 +4428,7 @@ func (c *PoliciesDeleteCall) Do(opts ...googleapi.CallOption) error { return nil // { // "description": "Delete a previously created Policy. Will fail if the policy is still being referenced by a network.", + // "flatPath": "dns/v1beta2/projects/{project}/policies/{policy}", // "httpMethod": "DELETE", // "id": "dns.policies.delete", // "parameterOrder": [ @@ -4405,7 +4454,7 @@ func (c *PoliciesDeleteCall) Do(opts ...googleapi.CallOption) error { // "type": "string" // } // }, - // "path": "{project}/policies/{policy}", + // "path": "dns/v1beta2/projects/{project}/policies/{policy}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" @@ -4480,7 +4529,7 @@ func (c *PoliciesGetCall) Header() http.Header { func (c *PoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4491,7 +4540,7 @@ func (c *PoliciesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/policies/{policy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1beta2/projects/{project}/policies/{policy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -4544,6 +4593,7 @@ func (c *PoliciesGetCall) Do(opts ...googleapi.CallOption) (*Policy, error) { return ret, nil // { // "description": "Fetch the representation of an existing Policy.", + // "flatPath": "dns/v1beta2/projects/{project}/policies/{policy}", // "httpMethod": "GET", // "id": "dns.policies.get", // "parameterOrder": [ @@ -4569,7 +4619,7 @@ func (c *PoliciesGetCall) Do(opts ...googleapi.CallOption) (*Policy, error) { // "type": "string" // } // }, - // "path": "{project}/policies/{policy}", + // "path": "dns/v1beta2/projects/{project}/policies/{policy}", // "response": { // "$ref": "Policy" // }, @@ -4654,7 +4704,7 @@ func (c *PoliciesListCall) Header() http.Header { func (c *PoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4665,7 +4715,7 @@ func (c *PoliciesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/policies") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1beta2/projects/{project}/policies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -4717,6 +4767,7 @@ func (c *PoliciesListCall) Do(opts ...googleapi.CallOption) (*PoliciesListRespon return ret, nil // { // "description": "Enumerate all Policies associated with a project.", + // "flatPath": "dns/v1beta2/projects/{project}/policies", // "httpMethod": "GET", // "id": "dns.policies.list", // "parameterOrder": [ @@ -4741,7 +4792,7 @@ func (c *PoliciesListCall) Do(opts ...googleapi.CallOption) (*PoliciesListRespon // "type": "string" // } // }, - // "path": "{project}/policies", + // "path": "dns/v1beta2/projects/{project}/policies", // "response": { // "$ref": "PoliciesListResponse" // }, @@ -4833,7 +4884,7 @@ func (c *PoliciesPatchCall) Header() http.Header { func (c *PoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4846,7 +4897,7 @@ func (c *PoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/policies/{policy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1beta2/projects/{project}/policies/{policy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { @@ -4899,6 +4950,7 @@ func (c *PoliciesPatchCall) Do(opts ...googleapi.CallOption) (*PoliciesPatchResp return ret, nil // { // "description": "Apply a partial update to an existing Policy.", + // "flatPath": "dns/v1beta2/projects/{project}/policies/{policy}", // "httpMethod": "PATCH", // "id": "dns.policies.patch", // "parameterOrder": [ @@ -4924,7 +4976,7 @@ func (c *PoliciesPatchCall) Do(opts ...googleapi.CallOption) (*PoliciesPatchResp // "type": "string" // } // }, - // "path": "{project}/policies/{policy}", + // "path": "dns/v1beta2/projects/{project}/policies/{policy}", // "request": { // "$ref": "Policy" // }, @@ -4996,7 +5048,7 @@ func (c *PoliciesUpdateCall) Header() http.Header { func (c *PoliciesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5009,7 +5061,7 @@ func (c *PoliciesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/policies/{policy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1beta2/projects/{project}/policies/{policy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PUT", urls, body) if err != nil { @@ -5062,6 +5114,7 @@ func (c *PoliciesUpdateCall) Do(opts ...googleapi.CallOption) (*PoliciesUpdateRe return ret, nil // { // "description": "Update an existing Policy.", + // "flatPath": "dns/v1beta2/projects/{project}/policies/{policy}", // "httpMethod": "PUT", // "id": "dns.policies.update", // "parameterOrder": [ @@ -5087,7 +5140,7 @@ func (c *PoliciesUpdateCall) Do(opts ...googleapi.CallOption) (*PoliciesUpdateRe // "type": "string" // } // }, - // "path": "{project}/policies/{policy}", + // "path": "dns/v1beta2/projects/{project}/policies/{policy}", // "request": { // "$ref": "Policy" // }, @@ -5166,7 +5219,7 @@ func (c *ProjectsGetCall) Header() http.Header { func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5177,7 +5230,7 @@ func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1beta2/projects/{project}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -5229,6 +5282,7 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { return ret, nil // { // "description": "Fetch the representation of an existing Project.", + // "flatPath": "dns/v1beta2/projects/{project}", // "httpMethod": "GET", // "id": "dns.projects.get", // "parameterOrder": [ @@ -5247,7 +5301,7 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { // "type": "string" // } // }, - // "path": "{project}", + // "path": "dns/v1beta2/projects/{project}", // "response": { // "$ref": "Project" // }, @@ -5350,7 +5404,7 @@ func (c *ResourceRecordSetsListCall) Header() http.Header { func (c *ResourceRecordSetsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5361,7 +5415,7 @@ func (c *ResourceRecordSetsListCall) doRequest(alt string) (*http.Response, erro var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/rrsets") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1beta2/projects/{project}/managedZones/{managedZone}/rrsets") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -5414,6 +5468,7 @@ func (c *ResourceRecordSetsListCall) Do(opts ...googleapi.CallOption) (*Resource return ret, nil // { // "description": "Enumerate ResourceRecordSets that have been created but not yet deleted.", + // "flatPath": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/rrsets", // "httpMethod": "GET", // "id": "dns.resourceRecordSets.list", // "parameterOrder": [ @@ -5422,7 +5477,7 @@ func (c *ResourceRecordSetsListCall) Do(opts ...googleapi.CallOption) (*Resource // ], // "parameters": { // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", // "location": "path", // "required": true, // "type": "string" @@ -5455,7 +5510,7 @@ func (c *ResourceRecordSetsListCall) Do(opts ...googleapi.CallOption) (*Resource // "type": "string" // } // }, - // "path": "{project}/managedZones/{managedZone}/rrsets", + // "path": "dns/v1beta2/projects/{project}/managedZones/{managedZone}/rrsets", // "response": { // "$ref": "ResourceRecordSetsListResponse" // }, diff --git a/vendor/google.golang.org/api/file/v1beta1/file-api.json b/vendor/google.golang.org/api/file/v1beta1/file-api.json index f43bafeb609..a2bf7cde4f2 100644 --- a/vendor/google.golang.org/api/file/v1beta1/file-api.json +++ b/vendor/google.golang.org/api/file/v1beta1/file-api.json @@ -149,7 +149,7 @@ "type": "string" }, "includeUnrevealedLocations": { - "description": "If true, the returned list will include locations which are not yet\nrevealed.", + "description": "If true, the returned list will include locations which are not yet revealed.", "location": "query", "type": "boolean" }, @@ -182,6 +182,173 @@ } }, "resources": { + "backups": { + "methods": { + "create": { + "description": "Creates a backup.", + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/backups", + "httpMethod": "POST", + "id": "file.projects.locations.backups.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "backupId": { + "description": "Required. The ID to use for the backup. The ID must be unique within the specified project and location. This value must start with a lowercase letter followed by up to 62 lowercase letters, numbers, or hyphens, and cannot end with a hyphen.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The backup's project and location, in the format projects/{project_id}/locations/{location}. In Cloud Filestore, backup locations map to GCP regions, for example **us-west1**.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1beta1/{+parent}/backups", + "request": { + "$ref": "Backup" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes a backup.", + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/backups/{backupsId}", + "httpMethod": "DELETE", + "id": "file.projects.locations.backups.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The backup resource name, in the format projects/{project_id}/locations/{location}/backups/{backup_id}", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/backups/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1beta1/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets the details of a specific backup.", + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/backups/{backupsId}", + "httpMethod": "GET", + "id": "file.projects.locations.backups.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The backup resource name, in the format projects/{project_id}/locations/{location}/backups/{backup_id}.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/backups/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1beta1/{+name}", + "response": { + "$ref": "Backup" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists all backups in a project for either a specified location or for all locations.", + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/backups", + "httpMethod": "GET", + "id": "file.projects.locations.backups.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "filter": { + "description": "List filter.", + "location": "query", + "type": "string" + }, + "orderBy": { + "description": "Sort results. Supported values are \"name\", \"name desc\" or \"\" (unsorted).", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "The maximum number of items to return.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The next_page_token value to use if there are additional results to retrieve for this list request.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The project and location for which to retrieve backup information, in the format projects/{project_id}/locations/{location}. In Cloud Filestore, backup locations map to GCP regions, for example **us-west1**. To retrieve backup information for all locations, use \"-\" for the {location} value.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1beta1/{+parent}/backups", + "response": { + "$ref": "ListBackupsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates the settings of a specific backup.", + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/backups/{backupsId}", + "httpMethod": "PATCH", + "id": "file.projects.locations.backups.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Output only. The resource name of the backup, in the format projects/{project_id}/locations/{location_id}/backups/{backup_id}.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/backups/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Required. Mask of fields to update. At least one path must be supplied in this field.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v1beta1/{+name}", + "request": { + "$ref": "Backup" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, "instances": { "methods": { "create": { @@ -194,12 +361,12 @@ ], "parameters": { "instanceId": { - "description": "Required. The ID of the instance to create.\nThe ID must be unique within the specified project and location.\n\nThis value must start with a lowercase letter followed by up to 62\nlowercase letters, numbers, or hyphens, and cannot end with a hyphen.", + "description": "Required. The ID of the instance to create. The ID must be unique within the specified project and location. This value must start with a lowercase letter followed by up to 62 lowercase letters, numbers, or hyphens, and cannot end with a hyphen.", "location": "query", "type": "string" }, "parent": { - "description": "Required. The instance's project and location, in the format\nprojects/{project_id}/locations/{location}. In Cloud Filestore,\nlocations map to GCP zones, for example **us-west1-b**.", + "description": "Required. The instance's project and location, in the format projects/{project_id}/locations/{location}. In Cloud Filestore, locations map to GCP zones, for example **us-west1-b**.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -227,7 +394,7 @@ ], "parameters": { "name": { - "description": "Required. The instance resource name, in the format\nprojects/{project_id}/locations/{location}/instances/{instance_id}", + "description": "Required. The instance resource name, in the format projects/{project_id}/locations/{location}/instances/{instance_id}", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/instances/[^/]+$", "required": true, @@ -252,7 +419,7 @@ ], "parameters": { "name": { - "description": "Required. The instance resource name, in the format\nprojects/{project_id}/locations/{location}/instances/{instance_id}.", + "description": "Required. The instance resource name, in the format projects/{project_id}/locations/{location}/instances/{instance_id}.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/instances/[^/]+$", "required": true, @@ -268,7 +435,7 @@ ] }, "list": { - "description": "Lists all instances in a project for either a specified location\nor for all locations.", + "description": "Lists all instances in a project for either a specified location or for all locations.", "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/instances", "httpMethod": "GET", "id": "file.projects.locations.instances.list", @@ -293,12 +460,12 @@ "type": "integer" }, "pageToken": { - "description": "The next_page_token value to use if there are additional\nresults to retrieve for this list request.", + "description": "The next_page_token value to use if there are additional results to retrieve for this list request.", "location": "query", "type": "string" }, "parent": { - "description": "Required. The project and location for which to retrieve instance information,\nin the format projects/{project_id}/locations/{location}. In Cloud\nFilestore, locations map to GCP zones, for example **us-west1-b**. To\nretrieve instance information for all locations, use \"-\" for the {location}\nvalue.", + "description": "Required. The project and location for which to retrieve instance information, in the format projects/{project_id}/locations/{location}. In Cloud Filestore, locations map to GCP zones, for example **us-west1-b**. To retrieve instance information for all locations, use \"-\" for the {location} value.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -323,14 +490,14 @@ ], "parameters": { "name": { - "description": "Output only. The resource name of the instance, in the format\nprojects/{project_id}/locations/{location_id}/instances/{instance_id}.", + "description": "Output only. The resource name of the instance, in the format projects/{project_id}/locations/{location_id}/instances/{instance_id}.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/instances/[^/]+$", "required": true, "type": "string" }, "updateMask": { - "description": "Required. Mask of fields to update. At least one path must be supplied in this\nfield. The elements of the repeated paths field may only include these\nfields:\n\n* \"description\"\n* \"file_shares\"\n* \"labels\"", + "description": "Required. Mask of fields to update. At least one path must be supplied in this field. The elements of the repeated paths field may only include these fields: * \"description\" * \"file_shares\" * \"labels\"", "format": "google-fieldmask", "location": "query", "type": "string" @@ -346,13 +513,41 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] + }, + "restore": { + "description": "Restores an existing instance's file share from a snapshot or backup.", + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:restore", + "httpMethod": "POST", + "id": "file.projects.locations.instances.restore", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the instance, in the format projects/{project_id}/locations/{location_id}/instances/{instance_id}.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/instances/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1beta1/{+name}:restore", + "request": { + "$ref": "RestoreInstanceRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] } } }, "operations": { "methods": { "cancel": { - "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}:cancel", "httpMethod": "POST", "id": "file.projects.locations.operations.cancel", @@ -380,7 +575,7 @@ ] }, "delete": { - "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.", + "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", "httpMethod": "DELETE", "id": "file.projects.locations.operations.delete", @@ -405,7 +600,7 @@ ] }, "get": { - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", "httpMethod": "GET", "id": "file.projects.locations.operations.get", @@ -430,7 +625,7 @@ ] }, "list": { - "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/operations", "httpMethod": "GET", "id": "file.projects.locations.operations.list", @@ -477,196 +672,114 @@ } } }, - "revision": "20200513", + "revision": "20201001", "rootUrl": "https://file.googleapis.com/", "schemas": { - "AttributeValue": { - "description": "The allowed types for [VALUE] in a `[KEY]:[VALUE]` attribute.", - "id": "AttributeValue", + "Backup": { + "description": "A Cloud Filestore backup.", + "id": "Backup", "properties": { - "boolValue": { - "description": "A Boolean value represented by `true` or `false`.", - "type": "boolean" + "capacityGb": { + "description": "Output only. Capacity of the backup. This would be the size of the file share when the backup is restored.", + "format": "int64", + "readOnly": true, + "type": "string" }, - "intValue": { - "description": "A 64-bit signed integer.", + "createTime": { + "description": "Output only. The time when the backup was created.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "description": { + "description": "A description of the backup with 2048 characters or less. Requests with longer descriptions will be rejected.", + "type": "string" + }, + "downloadBytes": { + "description": "Output only. Amount of bytes that will be downloaded if the backup is restored", "format": "int64", + "readOnly": true, "type": "string" }, - "stringValue": { - "$ref": "TruncatableString", - "description": "A string up to 256 bytes long." - } - }, - "type": "object" - }, - "Attributes": { - "description": "A set of attributes, each in the format `[KEY]:[VALUE]`.", - "id": "Attributes", - "properties": { - "attributeMap": { + "labels": { "additionalProperties": { - "$ref": "AttributeValue" + "type": "string" }, - "description": "The set of attributes. Each attribute's key can be up to 128 bytes\nlong. The value can be a string up to 256 bytes, a signed 64-bit integer,\nor the Boolean values `true` and `false`. For example:\n\n \"/instance_id\": \"my-instance\"\n \"/http/user_agent\": \"\"\n \"/http/request_bytes\": 300\n \"abc.com/myattribute\": true", + "description": "Resource labels to represent user provided metadata.", "type": "object" }, - "droppedAttributesCount": { - "description": "The number of attributes that were discarded. Attributes can be discarded\nbecause their keys are too long or because there are too many attributes.\nIf this value is 0 then all attributes are valid.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "BillingView": { - "description": "Message for reporting billing requests through Eventstream.", - "id": "BillingView", - "properties": { - "reportRequests": { - "description": "Billing requests to be reported for an [Eventstream\nresource](http://google3/cloud/eventstream/v2/resource_event.proto).\n\nEach request contains billing operations to be reported under a service\nname. See go/billing-view-construction for documentation on constructing\nbilling view report requests.", - "items": { - "$ref": "ReportRequest" - }, - "type": "array" - } - }, - "type": "object" - }, - "CancelOperationRequest": { - "description": "The request message for Operations.CancelOperation.", - "id": "CancelOperationRequest", - "properties": {}, - "type": "object" - }, - "Distribution": { - "description": "Distribution represents a frequency distribution of double-valued sample\npoints. It contains the size of the population of sample points plus\nadditional optional information:\n\n - the arithmetic mean of the samples\n - the minimum and maximum of the samples\n - the sum-squared-deviation of the samples, used to compute variance\n - a histogram of the values of the sample points", - "id": "Distribution", - "properties": { - "bucketCounts": { - "description": "The number of samples in each histogram bucket. `bucket_counts` are\noptional. If present, they must sum to the `count` value.\n\nThe buckets are defined below in `bucket_option`. There are N buckets.\n`bucket_counts[0]` is the number of samples in the underflow bucket.\n`bucket_counts[1]` to `bucket_counts[N-1]` are the numbers of samples\nin each of the finite buckets. And `bucket_counts[N] is the number\nof samples in the overflow bucket. See the comments of `bucket_option`\nbelow for more details.\n\nAny suffix of trailing zeros may be omitted.", - "items": { - "format": "int64", - "type": "string" - }, - "type": "array" + "name": { + "description": "Output only. The resource name of the backup, in the format projects/{project_id}/locations/{location_id}/backups/{backup_id}.", + "readOnly": true, + "type": "string" }, - "count": { - "description": "The total number of samples in the distribution. Must be \u003e= 0.", - "format": "int64", + "sourceFileShare": { + "description": "Name of the file share in the source Cloud Filestore instance that the backup is created from.", "type": "string" }, - "exemplars": { - "description": "Example points. Must be in increasing order of `value` field.", - "items": { - "$ref": "Exemplar" - }, - "type": "array" + "sourceInstance": { + "description": "The resource name of the source Cloud Filestore instance, in the format projects/{project_id}/locations/{location_id}/instances/{instance_id}, used to create this backup.", + "type": "string" }, - "explicitBuckets": { - "$ref": "ExplicitBuckets", - "description": "Buckets with arbitrary user-provided width." - }, - "exponentialBuckets": { - "$ref": "ExponentialBuckets", - "description": "Buckets with exponentially growing width." - }, - "linearBuckets": { - "$ref": "LinearBuckets", - "description": "Buckets with constant width." - }, - "maximum": { - "description": "The maximum of the population of values. Ignored if `count` is zero.", - "format": "double", - "type": "number" - }, - "mean": { - "description": "The arithmetic mean of the samples in the distribution. If `count` is\nzero then this field must be zero.", - "format": "double", - "type": "number" - }, - "minimum": { - "description": "The minimum of the population of values. Ignored if `count` is zero.", - "format": "double", - "type": "number" - }, - "sumOfSquaredDeviation": { - "description": "The sum of squared deviations from the mean:\n Sum[i=1..count]((x_i - mean)^2)\nwhere each x_i is a sample values. If `count` is zero then this field\nmust be zero, otherwise validation of the request fails.", - "format": "double", - "type": "number" - } - }, - "type": "object" - }, - "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", - "id": "Empty", - "properties": {}, - "type": "object" - }, - "Exemplar": { - "description": "Exemplars are example points that may be used to annotate aggregated\ndistribution values. They are metadata that gives information about a\nparticular value added to a Distribution bucket, such as a trace ID that\nwas active when a value was added. They may contain further information,\nsuch as a example values and timestamps, origin, etc.", - "id": "Exemplar", - "properties": { - "attachments": { - "description": "Contextual information about the example value. Examples are:\n\n Trace: type.googleapis.com/google.monitoring.v3.SpanContext\n\n Literal string: type.googleapis.com/google.protobuf.StringValue\n\n Labels dropped during aggregation:\n type.googleapis.com/google.monitoring.v3.DroppedLabels\n\nThere may be only a single attachment of any given message type in a\nsingle exemplar, and this is enforced by the system.", - "items": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "type": "object" - }, - "type": "array" + "sourceInstanceTier": { + "description": "Output only. The service tier of the source Cloud Filestore instance that this backup is created from.", + "enum": [ + "TIER_UNSPECIFIED", + "STANDARD", + "PREMIUM", + "BASIC_HDD", + "BASIC_SSD", + "HIGH_SCALE_SSD" + ], + "enumDescriptions": [ + "Not set.", + "STANDARD tier. BASIC_HDD is the preferred term for this tier.", + "PREMIUM tier. BASIC_SSD is the preferred term for this tier.", + "BASIC instances offer a maximum capacity of 63.9 TB. BASIC_HDD is an alias for STANDARD Tier, offering economical performance backed by HDD.", + "BASIC instances offer a maximum capacity of 63.9 TB. BASIC_SSD is an alias for PREMIUM Tier, and offers improved performance backed by SSD.", + "HIGH_SCALE instances offer expanded capacity and performance scaling capabilities." + ], + "readOnly": true, + "type": "string" }, - "timestamp": { - "description": "The observation (sampling) time of the above value.", - "format": "google-datetime", + "state": { + "description": "Output only. The backup state.", + "enum": [ + "STATE_UNSPECIFIED", + "CREATING", + "FINALIZING", + "READY", + "DELETING" + ], + "enumDescriptions": [ + "State not set.", + "Backup is being created.", + "Backup has been taken and the operation is being finalized. At this point, changes to the file share will not be reflected in the backup.", + "Backup is available for use.", + "Backup is being deleted." + ], + "readOnly": true, "type": "string" }, - "value": { - "description": "Value of the exemplar point. This value determines to which bucket the\nexemplar belongs.", - "format": "double", - "type": "number" + "storageBytes": { + "description": "Output only. The size of the storage used by the backup. As backups share storage, this number is expected to change with backup creation/deletion.", + "format": "int64", + "readOnly": true, + "type": "string" } }, "type": "object" }, - "ExplicitBuckets": { - "description": "Describing buckets with arbitrary user-provided width.", - "id": "ExplicitBuckets", - "properties": { - "bounds": { - "description": "'bound' is a list of strictly increasing boundaries between\nbuckets. Note that a list of length N-1 defines N buckets because\nof fenceposting. See comments on `bucket_options` for details.\n\nThe i'th finite bucket covers the interval\n [bound[i-1], bound[i])\nwhere i ranges from 1 to bound_size() - 1. Note that there are no\nfinite buckets at all if 'bound' only contains a single element; in\nthat special case the single bound defines the boundary between the\nunderflow and overflow buckets.\n\nbucket number lower bound upper bound\n i == 0 (underflow) -inf bound[i]\n 0 \u003c i \u003c bound_size() bound[i-1] bound[i]\n i == bound_size() (overflow) bound[i-1] +inf", - "items": { - "format": "double", - "type": "number" - }, - "type": "array" - } - }, + "CancelOperationRequest": { + "description": "The request message for Operations.CancelOperation.", + "id": "CancelOperationRequest", + "properties": {}, "type": "object" }, - "ExponentialBuckets": { - "description": "Describing buckets with exponentially growing width.", - "id": "ExponentialBuckets", - "properties": { - "growthFactor": { - "description": "The i'th exponential bucket covers the interval\n [scale * growth_factor^(i-1), scale * growth_factor^i)\nwhere i ranges from 1 to num_finite_buckets inclusive.\nMust be larger than 1.0.", - "format": "double", - "type": "number" - }, - "numFiniteBuckets": { - "description": "The number of finite buckets. With the underflow and overflow buckets,\nthe total number of buckets is `num_finite_buckets` + 2.\nSee comments on `bucket_options` for details.", - "format": "int32", - "type": "integer" - }, - "scale": { - "description": "The i'th exponential bucket covers the interval\n [scale * growth_factor^(i-1), scale * growth_factor^i)\nwhere i ranges from 1 to num_finite_buckets inclusive.\nMust be \u003e 0.", - "format": "double", - "type": "number" - } - }, + "Empty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", + "id": "Empty", + "properties": {}, "type": "object" }, "FileShareConfig": { @@ -674,102 +787,24 @@ "id": "FileShareConfig", "properties": { "capacityGb": { - "description": "File share capacity in gigabytes (GB).\nCloud Filestore defines 1 GB as 1024^3 bytes.", + "description": "File share capacity in gigabytes (GB). Cloud Filestore defines 1 GB as 1024^3 bytes.", "format": "int64", "type": "string" }, "name": { "description": "The name of the file share (must be 16 characters or less).", "type": "string" - } - }, - "type": "object" - }, - "GoogleApiServicecontrolV1Operation": { - "description": "Represents information regarding an operation.", - "id": "GoogleApiServicecontrolV1Operation", - "properties": { - "consumerId": { - "description": "Identity of the consumer who is using the service.\nThis field should be filled in for the operations initiated by a\nconsumer, but not for service-initiated operations that are\nnot related to a specific consumer.\n\n- This can be in one of the following formats:\n - project:PROJECT_ID,\n - project`_`number:PROJECT_NUMBER,\n - projects/PROJECT_ID or PROJECT_NUMBER,\n - folders/FOLDER_NUMBER,\n - organizations/ORGANIZATION_NUMBER,\n - api`_`key:API_KEY.", - "type": "string" - }, - "endTime": { - "description": "End time of the operation.\nRequired when the operation is used in ServiceController.Report,\nbut optional when the operation is used in ServiceController.Check.", - "format": "google-datetime", - "type": "string" - }, - "importance": { - "description": "DO NOT USE. This is an experimental field.", - "enum": [ - "LOW", - "HIGH", - "DEBUG" - ], - "enumDescriptions": [ - "The API implementation may cache and aggregate the data.\nThe data may be lost when rare and unexpected system failures occur.", - "The API implementation doesn't cache and aggregate the data.\nIf the method returns successfully, it's guaranteed that the data has\nbeen persisted in durable storage.", - "In addition to the behavior described in HIGH, DEBUG enables\nadditional validation logic that is only useful during the onboarding\nprocess. This is only available to Google internal services and\nthe service must be whitelisted by chemist-dev@google.com in order\nto use this level." - ], - "type": "string" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Labels describing the operation. Only the following labels are allowed:\n\n- Labels describing monitored resources as defined in\n the service configuration.\n- Default labels of metric values. When specified, labels defined in the\n metric value override these default.\n- The following labels defined by Google Cloud Platform:\n - `cloud.googleapis.com/location` describing the location where the\n operation happened,\n - `servicecontrol.googleapis.com/user_agent` describing the user agent\n of the API request,\n - `servicecontrol.googleapis.com/service_agent` describing the service\n used to handle the API request (e.g. ESP),\n - `servicecontrol.googleapis.com/platform` describing the platform\n where the API is served, such as App Engine, Compute Engine, or\n Kubernetes Engine.", - "type": "object" - }, - "logEntries": { - "description": "Represents information to be logged.", - "items": { - "$ref": "LogEntry" - }, - "type": "array" - }, - "metricValueSets": { - "description": "Represents information about this operation. Each MetricValueSet\ncorresponds to a metric defined in the service configuration.\nThe data type used in the MetricValueSet must agree with\nthe data type specified in the metric definition.\n\nWithin a single operation, it is not allowed to have more than one\nMetricValue instances that have the same metric names and identical\nlabel value combinations. If a request has such duplicated MetricValue\ninstances, the entire request is rejected with\nan invalid argument error.", - "items": { - "$ref": "MetricValueSet" - }, - "type": "array" - }, - "operationId": { - "description": "Identity of the operation. This must be unique within the scope of the\nservice that generated the operation. If the service calls\nCheck() and Report() on the same operation, the two calls should carry\nthe same id.\n\nUUID version 4 is recommended, though not required.\nIn scenarios where an operation is computed from existing information\nand an idempotent id is desirable for deduplication purpose, UUID version 5\nis recommended. See RFC 4122 for details.", - "type": "string" }, - "operationName": { - "description": "Fully qualified name of the operation. Reserved for future use.", - "type": "string" - }, - "quotaProperties": { - "$ref": "QuotaProperties", - "description": "Represents the properties needed for quota check. Applicable only if this\noperation is for a quota check request. If this is not specified, no quota\ncheck will be performed." - }, - "resources": { - "description": "The resources that are involved in the operation.\nThe maximum supported number of entries in this field is 100.", + "nfsExportOptions": { + "description": "Nfs Export Options. There is a limit of 10 export options per file share.", "items": { - "$ref": "ResourceInfo" + "$ref": "NfsExportOptions" }, "type": "array" }, - "startTime": { - "description": "Required. Start time of the operation.", - "format": "google-datetime", + "sourceBackup": { + "description": "The resource name of the backup, in the format projects/{project_id}/locations/{location_id}/backups/{backup_id}, that this file share has been restored from.", "type": "string" - }, - "traceSpans": { - "description": "Unimplemented. A list of Cloud Trace spans. The span names shall contain\nthe id of the destination project which can be either the produce or the\nconsumer project.", - "items": { - "$ref": "TraceSpan" - }, - "type": "array" - }, - "userLabels": { - "additionalProperties": { - "type": "string" - }, - "description": "User defined labels for the resource that this operation is associated\nwith. Only a combination of 1000 user labels per consumer project are\nallowed.", - "type": "object" } }, "type": "object" @@ -778,70 +813,78 @@ "id": "GoogleCloudSaasacceleratorManagementProvidersV1Instance", "properties": { "consumerDefinedName": { - "description": "consumer_defined_name is the name that is set by the consumer. On the other\nhand Name field represents system-assigned id of an instance so consumers\nare not necessarily aware of it.\nconsumer_defined_name is used for notification/UI purposes for consumer to\nrecognize their instances.", + "description": "consumer_defined_name is the name that is set by the consumer. On the other hand Name field represents system-assigned id of an instance so consumers are not necessarily aware of it. consumer_defined_name is used for notification/UI purposes for consumer to recognize their instances.", "type": "string" }, "createTime": { "description": "Output only. Timestamp when the resource was created.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "labels": { "additionalProperties": { "type": "string" }, - "description": "Optional. Resource labels to represent user provided metadata. Each label\nis a key-value pair, where both the key and the value are arbitrary strings\nprovided by the user.", + "description": "Optional. Resource labels to represent user provided metadata. Each label is a key-value pair, where both the key and the value are arbitrary strings provided by the user.", "type": "object" }, "maintenancePolicyNames": { "additionalProperties": { "type": "string" }, - "description": "The MaintenancePolicies that have been attached to the instance.\nThe key must be of the type name of the oneof policy name defined in\nMaintenancePolicy, and the referenced policy must define the same policy\ntype. For complete details of MaintenancePolicy, please refer to\ngo/cloud-saas-mw-ug.", + "description": "The MaintenancePolicies that have been attached to the instance. The key must be of the type name of the oneof policy name defined in MaintenancePolicy, and the referenced policy must define the same policy type. For complete details of MaintenancePolicy, please refer to go/cloud-saas-mw-ug.", "type": "object" }, "maintenanceSchedules": { "additionalProperties": { "$ref": "GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSchedule" }, - "description": "The MaintenanceSchedule contains the scheduling information of published\nmaintenance schedule.", + "description": "The MaintenanceSchedule contains the scheduling information of published maintenance schedule with same key as software_versions.", "type": "object" }, + "maintenanceSettings": { + "$ref": "GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSettings", + "description": "Optional. The MaintenanceSettings associated with instance." + }, "name": { - "description": "Unique name of the resource. It uses the form:\n `projects/{project_id}/locations/{location_id}/instances/{instance_id}`", + "description": "Unique name of the resource. It uses the form: `projects/{project_id}/locations/{location_id}/instances/{instance_id}`", "type": "string" }, "producerMetadata": { "additionalProperties": { "type": "string" }, - "description": "Output only. Custom string attributes used primarily to expose\nproducer-specific information in monitoring dashboards.\nSee go/get-instance-metadata.", + "description": "Output only. Custom string attributes used primarily to expose producer-specific information in monitoring dashboards. See go/get-instance-metadata.", + "readOnly": true, "type": "object" }, "provisionedResources": { - "description": "Output only. The list of data plane resources provisioned for this\ninstance, e.g. compute VMs. See go/get-instance-metadata.", + "description": "Output only. The list of data plane resources provisioned for this instance, e.g. compute VMs. See go/get-instance-metadata.", "items": { "$ref": "GoogleCloudSaasacceleratorManagementProvidersV1ProvisionedResource" }, + "readOnly": true, "type": "array" }, "slmInstanceTemplate": { - "description": "Link to the SLM instance template. Only populated when updating SLM\ninstances via SSA's Actuation service adaptor.\nService producers with custom control plane (e.g. Cloud SQL) doesn't\nneed to populate this field. Instead they should use software_versions.", + "description": "Link to the SLM instance template. Only populated when updating SLM instances via SSA's Actuation service adaptor. Service producers with custom control plane (e.g. Cloud SQL) doesn't need to populate this field. Instead they should use software_versions.", "type": "string" }, "sloMetadata": { "$ref": "GoogleCloudSaasacceleratorManagementProvidersV1SloMetadata", - "description": "Output only. SLO metadata for instance classification in the\nStandardized dataplane SLO platform.\nSee go/cloud-ssa-standard-slo for feature description." + "description": "Output only. SLO metadata for instance classification in the Standardized dataplane SLO platform. See go/cloud-ssa-standard-slo for feature description.", + "readOnly": true }, "softwareVersions": { "additionalProperties": { "type": "string" }, - "description": "Software versions that are used to deploy this instance. This can be\nmutated by rollout services.", + "description": "Software versions that are used to deploy this instance. This can be mutated by rollout services.", "type": "object" }, "state": { - "description": "Output only. Current lifecycle state of the resource (e.g. if it's being\ncreated or ready to use).", + "description": "Output only. Current lifecycle state of the resource (e.g. if it's being created or ready to use).", "enum": [ "STATE_UNSPECIFIED", "CREATING", @@ -860,26 +903,29 @@ "Instance is being deleted.", "Instance encountered an error and is in indeterministic state." ], + "readOnly": true, "type": "string" }, "tenantProjectId": { - "description": "Output only. ID of the associated GCP tenant project.\nSee go/get-instance-metadata.", + "description": "Output only. ID of the associated GCP tenant project. See go/get-instance-metadata.", + "readOnly": true, "type": "string" }, "updateTime": { "description": "Output only. Timestamp when the resource was last modified.", "format": "google-datetime", + "readOnly": true, "type": "string" } }, "type": "object" }, "GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSchedule": { - "description": "Maintenance schedule which is exposed to customer and potentially end user,\nindicating published upcoming future maintenance schedule", + "description": "Maintenance schedule which is exposed to customer and potentially end user, indicating published upcoming future maintenance schedule", "id": "GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSchedule", "properties": { "canReschedule": { - "description": "Can this scheduled update be rescheduled?\nBy default, it's true and API needs to do explicitly check whether it's\nset, if it's set as false explicitly, it's false", + "description": "Can this scheduled update be rescheduled? By default, it's true and API needs to do explicitly check whether it's set, if it's set as false explicitly, it's false", "type": "boolean" }, "endTime": { @@ -888,7 +934,7 @@ "type": "string" }, "rolloutManagementPolicy": { - "description": "The rollout management policy this maintenance schedule is associated\nwith. When doing reschedule update request, the reschedule should be\nagainst this given policy.", + "description": "The rollout management policy this maintenance schedule is associated with. When doing reschedule update request, the reschedule should be against this given policy.", "type": "string" }, "startTime": { @@ -899,12 +945,23 @@ }, "type": "object" }, + "GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSettings": { + "description": "Maintenance settings associated with instance. Allows service producers and end users to assign settings that controls maintenance on this instance.", + "id": "GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSettings", + "properties": { + "exclude": { + "description": "Optional. Exclude instance from maintenance. When true, rollout service will not attempt maintenance on the instance. Rollout service will include the instance in reported rollout progress as not attempted.", + "type": "boolean" + } + }, + "type": "object" + }, "GoogleCloudSaasacceleratorManagementProvidersV1NodeSloMetadata": { - "description": "Node information for custom per-node SLO implementations.\nSSA does not support per-node SLO, but producers can populate per-node\ninformation in SloMetadata for custom precomputations.\nSSA Eligibility Exporter will emit per-node metric based on this information.", + "description": "Node information for custom per-node SLO implementations. SSA does not support per-node SLO, but producers can populate per-node information in SloMetadata for custom precomputations. SSA Eligibility Exporter will emit per-node metric based on this information.", "id": "GoogleCloudSaasacceleratorManagementProvidersV1NodeSloMetadata", "properties": { "exclusions": { - "description": "By default node is eligible if instance is eligible.\nBut individual node might be excluded from SLO by adding entry here.\nFor semantic see SloMetadata.exclusions.\nIf both instance and node level exclusions are present for time period,\nthe node level's reason will be reported by Eligibility Exporter.", + "description": "By default node is eligible if instance is eligible. But individual node might be excluded from SLO by adding entry here. For semantic see SloMetadata.exclusions. If both instance and node level exclusions are present for time period, the node level's reason will be reported by Eligibility Exporter.", "items": { "$ref": "GoogleCloudSaasacceleratorManagementProvidersV1SloExclusion" }, @@ -915,7 +972,7 @@ "type": "string" }, "nodeId": { - "description": "The id of the node.\nThis should be equal to SaasInstanceNode.node_id.", + "description": "The id of the node. This should be equal to SaasInstanceNode.node_id.", "type": "string" } }, @@ -926,18 +983,18 @@ "id": "GoogleCloudSaasacceleratorManagementProvidersV1ProvisionedResource", "properties": { "resourceType": { - "description": "Type of the resource. This can be either a GCP resource or a custom one\n(e.g. another cloud provider's VM). For GCP compute resources use singular\nform of the names listed in GCP compute API documentation\n(https://cloud.google.com/compute/docs/reference/rest/v1/), prefixed with\n'compute-', for example: 'compute-instance', 'compute-disk',\n'compute-autoscaler'.", + "description": "Type of the resource. This can be either a GCP resource or a custom one (e.g. another cloud provider's VM). For GCP compute resources use singular form of the names listed in GCP compute API documentation (https://cloud.google.com/compute/docs/reference/rest/v1/), prefixed with 'compute-', for example: 'compute-instance', 'compute-disk', 'compute-autoscaler'.", "type": "string" }, "resourceUrl": { - "description": "URL identifying the resource, e.g.\n\"https://www.googleapis.com/compute/v1/projects/...)\".", + "description": "URL identifying the resource, e.g. \"https://www.googleapis.com/compute/v1/projects/...)\".", "type": "string" } }, "type": "object" }, "GoogleCloudSaasacceleratorManagementProvidersV1SloEligibility": { - "description": "SloEligibility is a tuple containing eligibility value: true if an instance\nis eligible for SLO calculation or false if it should be excluded from all\nSLO-related calculations along with a user-defined reason.", + "description": "SloEligibility is a tuple containing eligibility value: true if an instance is eligible for SLO calculation or false if it should be excluded from all SLO-related calculations along with a user-defined reason.", "id": "GoogleCloudSaasacceleratorManagementProvidersV1SloEligibility", "properties": { "eligible": { @@ -945,7 +1002,7 @@ "type": "boolean" }, "reason": { - "description": "User-defined reason for the current value of instance eligibility. Usually,\nthis can be directly mapped to the internal state. An empty reason is\nallowed.", + "description": "User-defined reason for the current value of instance eligibility. Usually, this can be directly mapped to the internal state. An empty reason is allowed.", "type": "string" } }, @@ -956,16 +1013,16 @@ "id": "GoogleCloudSaasacceleratorManagementProvidersV1SloExclusion", "properties": { "duration": { - "description": "Exclusion duration. No restrictions on the possible values.\n\nWhen an ongoing operation is taking longer than initially expected,\nan existing entry in the exclusion list can be updated by extending the\nduration. This is supported by the subsystem exporting eligibility data\nas long as such extension is committed at least 10 minutes before the\noriginal exclusion expiration - otherwise it is possible that there will\nbe \"gaps\" in the exclusion application in the exported timeseries.", + "description": "Exclusion duration. No restrictions on the possible values. When an ongoing operation is taking longer than initially expected, an existing entry in the exclusion list can be updated by extending the duration. This is supported by the subsystem exporting eligibility data as long as such extension is committed at least 10 minutes before the original exclusion expiration - otherwise it is possible that there will be \"gaps\" in the exclusion application in the exported timeseries.", "format": "google-duration", "type": "string" }, "reason": { - "description": "Human-readable reason for the exclusion.\nThis should be a static string (e.g. \"Disruptive update in progress\")\nand should not contain dynamically generated data (e.g. instance name).\nCan be left empty.", + "description": "Human-readable reason for the exclusion. This should be a static string (e.g. \"Disruptive update in progress\") and should not contain dynamically generated data (e.g. instance name). Can be left empty.", "type": "string" }, "sliName": { - "description": "Name of an SLI that this exclusion applies to. Can be left empty,\nsignaling that the instance should be excluded from all SLIs defined\nin the service SLO configuration.", + "description": "Name of an SLI that this exclusion applies to. Can be left empty, signaling that the instance should be excluded from all SLIs defined in the service SLO configuration.", "type": "string" }, "startTime": { @@ -977,7 +1034,7 @@ "type": "object" }, "GoogleCloudSaasacceleratorManagementProvidersV1SloMetadata": { - "description": "SloMetadata contains resources required for proper SLO classification of the\ninstance.", + "description": "SloMetadata contains resources required for proper SLO classification of the instance.", "id": "GoogleCloudSaasacceleratorManagementProvidersV1SloMetadata", "properties": { "eligibility": { @@ -985,93 +1042,21 @@ "description": "Optional. User-defined instance eligibility." }, "exclusions": { - "description": "List of SLO exclusion windows. When multiple entries in the list match\n(matching the exclusion time-window against current time point)\nthe exclusion reason used in the first matching entry will be published.\n\nIt is not needed to include expired exclusion in this list, as only the\ncurrently applicable exclusions are taken into account by the eligibility\nexporting subsystem (the historical state of exclusions will be reflected\nin the historically produced timeseries regardless of the current state).\n\nThis field can be used to mark the instance as temporary ineligible\nfor the purpose of SLO calculation. For permanent instance SLO exclusion,\nuse of custom instance eligibility is recommended. See 'eligibility' field\nbelow.", + "description": "List of SLO exclusion windows. When multiple entries in the list match (matching the exclusion time-window against current time point) the exclusion reason used in the first matching entry will be published. It is not needed to include expired exclusion in this list, as only the currently applicable exclusions are taken into account by the eligibility exporting subsystem (the historical state of exclusions will be reflected in the historically produced timeseries regardless of the current state). This field can be used to mark the instance as temporary ineligible for the purpose of SLO calculation. For permanent instance SLO exclusion, use of custom instance eligibility is recommended. See 'eligibility' field below.", "items": { "$ref": "GoogleCloudSaasacceleratorManagementProvidersV1SloExclusion" }, "type": "array" }, "nodes": { - "description": "Optional. List of nodes.\nSome producers need to use per-node metadata to calculate SLO.\nThis field allows such producers to publish per-node SLO meta data,\nwhich will be consumed by SSA Eligibility Exporter and published in the\nform of per node metric to Monarch.", + "description": "Optional. List of nodes. Some producers need to use per-node metadata to calculate SLO. This field allows such producers to publish per-node SLO meta data, which will be consumed by SSA Eligibility Exporter and published in the form of per node metric to Monarch.", "items": { "$ref": "GoogleCloudSaasacceleratorManagementProvidersV1NodeSloMetadata" }, "type": "array" }, "tier": { - "description": "Name of the SLO tier the Instance belongs to. This name will be expected to\nmatch the tiers specified in the service SLO configuration.\n\nField is mandatory and must not be empty.", - "type": "string" - } - }, - "type": "object" - }, - "HttpRequest": { - "description": "A common proto for logging HTTP requests. Only contains semantics\ndefined by the HTTP specification. Product-specific logging\ninformation MUST be defined in a separate message.", - "id": "HttpRequest", - "properties": { - "cacheFillBytes": { - "description": "The number of HTTP response bytes inserted into cache. Set only when a\ncache fill was attempted.", - "format": "int64", - "type": "string" - }, - "cacheHit": { - "description": "Whether or not an entity was served from cache\n(with or without validation).", - "type": "boolean" - }, - "cacheLookup": { - "description": "Whether or not a cache lookup was attempted.", - "type": "boolean" - }, - "cacheValidatedWithOriginServer": { - "description": "Whether or not the response was validated with the origin server before\nbeing served from cache. This field is only meaningful if `cache_hit` is\nTrue.", - "type": "boolean" - }, - "latency": { - "description": "The request processing latency on the server, from the time the request was\nreceived until the response was sent.", - "format": "google-duration", - "type": "string" - }, - "protocol": { - "description": "Protocol used for the request. Examples: \"HTTP/1.1\", \"HTTP/2\", \"websocket\"", - "type": "string" - }, - "referer": { - "description": "The referer URL of the request, as defined in\n[HTTP/1.1 Header Field\nDefinitions](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html).", - "type": "string" - }, - "remoteIp": { - "description": "The IP address (IPv4 or IPv6) of the client that issued the HTTP\nrequest. Examples: `\"192.168.1.1\"`, `\"FE80::0202:B3FF:FE1E:8329\"`.", - "type": "string" - }, - "requestMethod": { - "description": "The request method. Examples: `\"GET\"`, `\"HEAD\"`, `\"PUT\"`, `\"POST\"`.", - "type": "string" - }, - "requestSize": { - "description": "The size of the HTTP request message in bytes, including the request\nheaders and the request body.", - "format": "int64", - "type": "string" - }, - "requestUrl": { - "description": "The scheme (http, https), the host name, the path, and the query\nportion of the URL that was requested.\nExample: `\"http://example.com/some/info?color=red\"`.", - "type": "string" - }, - "responseSize": { - "description": "The size of the HTTP response message sent back to the client, in bytes,\nincluding the response headers and the response body.", - "format": "int64", - "type": "string" - }, - "serverIp": { - "description": "The IP address (IPv4 or IPv6) of the origin server that the request was\nsent to.", - "type": "string" - }, - "status": { - "description": "The response code indicating the status of the response.\nExamples: 200, 404.", - "format": "int32", - "type": "integer" - }, - "userAgent": { - "description": "The user agent sent by the client. Example:\n`\"Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; Q312461; .NET\nCLR 1.0.3705)\"`.", + "description": "Name of the SLO tier the Instance belongs to. This name will be expected to match the tiers specified in the service SLO configuration. Field is mandatory and must not be empty.", "type": "string" } }, @@ -1084,6 +1069,7 @@ "createTime": { "description": "Output only. The time when the instance was created.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "description": { @@ -1091,11 +1077,11 @@ "type": "string" }, "etag": { - "description": "Server-specified ETag for the instance resource to prevent simultaneous\nupdates from overwriting each other.", + "description": "Server-specified ETag for the instance resource to prevent simultaneous updates from overwriting each other.", "type": "string" }, "fileShares": { - "description": "File system shares on the instance.\nFor this version, only a single file share is supported.", + "description": "File system shares on the instance. For this version, only a single file share is supported.", "items": { "$ref": "FileShareConfig" }, @@ -1109,11 +1095,12 @@ "type": "object" }, "name": { - "description": "Output only. The resource name of the instance, in the format\nprojects/{project_id}/locations/{location_id}/instances/{instance_id}.", + "description": "Output only. The resource name of the instance, in the format projects/{project_id}/locations/{location_id}/instances/{instance_id}.", + "readOnly": true, "type": "string" }, "networks": { - "description": "VPC networks to which the instance is connected.\nFor this version, only a single network is supported.", + "description": "VPC networks to which the instance is connected. For this version, only a single network is supported.", "items": { "$ref": "NetworkConfig" }, @@ -1134,15 +1121,17 @@ "State not set.", "The instance is being created.", "The instance is available for use.", - "Work is being done on the instance. You can get further details from the\n`statusMessage` field of the `Instance` resource.", + "Work is being done on the instance. You can get further details from the `statusMessage` field of the `Instance` resource.", "The instance is shutting down.", - "The instance is experiencing an issue and might be unusable. You can get\nfurther details from the `statusMessage` field of the `Instance`\nresource.", - "The instance is restoring a snapshot or backup to an existing file share\nand may be unusable during this time." + "The instance is experiencing an issue and might be unusable. You can get further details from the `statusMessage` field of the `Instance` resource.", + "The instance is restoring a snapshot or backup to an existing file share and may be unusable during this time." ], + "readOnly": true, "type": "string" }, "statusMessage": { "description": "Output only. Additional information about the instance state, if available.", + "readOnly": true, "type": "string" }, "tier": { @@ -1159,33 +1148,36 @@ "Not set.", "STANDARD tier. BASIC_HDD is the preferred term for this tier.", "PREMIUM tier. BASIC_SSD is the preferred term for this tier.", - "BASIC instances offer a maximum capacity of 63.9 TB.\nBASIC_HDD is an alias for STANDARD Tier, offering economical\nperformance backed by HDD.", - "BASIC instances offer a maximum capacity of 63.9 TB.\nBASIC_SSD is an alias for PREMIUM Tier, and offers improved\nperformance backed by SSD.", - "HIGH_SCALE instances offer expanded capacity and performance scaling\ncapabilities." + "BASIC instances offer a maximum capacity of 63.9 TB. BASIC_HDD is an alias for STANDARD Tier, offering economical performance backed by HDD.", + "BASIC instances offer a maximum capacity of 63.9 TB. BASIC_SSD is an alias for PREMIUM Tier, and offers improved performance backed by SSD.", + "HIGH_SCALE instances offer expanded capacity and performance scaling capabilities." ], "type": "string" } }, "type": "object" }, - "LinearBuckets": { - "description": "Describing buckets with constant width.", - "id": "LinearBuckets", + "ListBackupsResponse": { + "description": "ListBackupsResponse is the result of ListBackupsRequest.", + "id": "ListBackupsResponse", "properties": { - "numFiniteBuckets": { - "description": "The number of finite buckets. With the underflow and overflow buckets,\nthe total number of buckets is `num_finite_buckets` + 2.\nSee comments on `bucket_options` for details.", - "format": "int32", - "type": "integer" + "backups": { + "description": "A list of backups in the project for the specified location. If the {location} value in the request is \"-\", the response contains a list of backups from all locations. If any location is unreachable, the response will only return backups in reachable locations and the \"unreachable\" field will be populated with a list of unreachable locations.", + "items": { + "$ref": "Backup" + }, + "type": "array" }, - "offset": { - "description": "The i'th linear bucket covers the interval\n [offset + (i-1) * width, offset + i * width)\nwhere i ranges from 1 to num_finite_buckets, inclusive.", - "format": "double", - "type": "number" + "nextPageToken": { + "description": "The token you can use to retrieve the next page of results. Not returned if there are no more results in the list.", + "type": "string" }, - "width": { - "description": "The i'th linear bucket covers the interval\n [offset + (i-1) * width, offset + i * width)\nwhere i ranges from 1 to num_finite_buckets, inclusive.\nMust be strictly positive.", - "format": "double", - "type": "number" + "unreachable": { + "description": "Locations that could not be reached.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" @@ -1195,14 +1187,14 @@ "id": "ListInstancesResponse", "properties": { "instances": { - "description": "A list of instances in the project for the specified location.\n\nIf the {location} value in the request is \"-\", the response contains a list\nof instances from all locations. If any location is unreachable, the\nresponse will only return instances in reachable locations and the\n\"unreachable\" field will be populated with a list of unreachable locations.", + "description": "A list of instances in the project for the specified location. If the {location} value in the request is \"-\", the response contains a list of instances from all locations. If any location is unreachable, the response will only return instances in reachable locations and the \"unreachable\" field will be populated with a list of unreachable locations.", "items": { "$ref": "Instance" }, "type": "array" }, "nextPageToken": { - "description": "The token you can use to retrieve the next page of results. Not returned\nif there are no more results in the list.", + "description": "The token you can use to retrieve the next page of results. Not returned if there are no more results in the list.", "type": "string" }, "unreachable": { @@ -1256,14 +1248,14 @@ "id": "Location", "properties": { "displayName": { - "description": "The friendly name for this location, typically a nearby city name.\nFor example, \"Tokyo\".", + "description": "The friendly name for this location, typically a nearby city name. For example, \"Tokyo\".", "type": "string" }, "labels": { "additionalProperties": { "type": "string" }, - "description": "Cross-service attributes for the location. For example\n\n {\"cloud.googleapis.com/region\": \"us-east1\"}", + "description": "Cross-service attributes for the location. For example {\"cloud.googleapis.com/region\": \"us-east1\"}", "type": "object" }, "locationId": { @@ -1275,280 +1267,112 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "Service-specific metadata. For example the available capacity at the given\nlocation.", + "description": "Service-specific metadata. For example the available capacity at the given location.", "type": "object" }, "name": { - "description": "Resource name for the location, which may vary between implementations.\nFor example: `\"projects/example-project/locations/us-east1\"`", + "description": "Resource name for the location, which may vary between implementations. For example: `\"projects/example-project/locations/us-east1\"`", "type": "string" } }, "type": "object" }, - "LogEntry": { - "description": "An individual log entry.", - "id": "LogEntry", + "NetworkConfig": { + "description": "Network configuration for the instance.", + "id": "NetworkConfig", "properties": { - "httpRequest": { - "$ref": "HttpRequest", - "description": "Optional. Information about the HTTP request associated with this\nlog entry, if applicable." - }, - "insertId": { - "description": "A unique ID for the log entry used for deduplication. If omitted,\nthe implementation will generate one based on operation_id.", - "type": "string" - }, - "labels": { - "additionalProperties": { + "ipAddresses": { + "description": "Output only. IPv4 addresses in the format {octet 1}.{octet 2}.{octet 3}.{octet 4} or IPv6 addresses in the format {block 1}:{block 2}:{block 3}:{block 4}:{block 5}:{block 6}:{block 7}:{block 8}.", + "items": { "type": "string" }, - "description": "A set of user-defined (key, value) data that provides additional\ninformation about the log entry.", - "type": "object" - }, - "name": { - "description": "Required. The log to which this log entry belongs. Examples: `\"syslog\"`,\n`\"book_log\"`.", - "type": "string" - }, - "operation": { - "$ref": "LogEntryOperation", - "description": "Optional. Information about an operation associated with the log entry, if\napplicable." - }, - "protoPayload": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "description": "The log entry payload, represented as a protocol buffer that is\nexpressed as a JSON object. The only accepted type currently is\nAuditLog.", - "type": "object" - }, - "severity": { - "description": "The severity of the log entry. The default value is\n`LogSeverity.DEFAULT`.", - "enum": [ - "DEFAULT", - "DEBUG", - "INFO", - "NOTICE", - "WARNING", - "ERROR", - "CRITICAL", - "ALERT", - "EMERGENCY" - ], - "enumDescriptions": [ - "(0) The log entry has no assigned severity level.", - "(100) Debug or trace information.", - "(200) Routine information, such as ongoing status or performance.", - "(300) Normal but significant events, such as start up, shut down, or\na configuration change.", - "(400) Warning events might cause problems.", - "(500) Error events are likely to cause problems.", - "(600) Critical events cause more severe problems or outages.", - "(700) A person must take an action immediately.", - "(800) One or more systems are unusable." - ], - "type": "string" - }, - "sourceLocation": { - "$ref": "LogEntrySourceLocation", - "description": "Optional. Source code location information associated with the log entry,\nif any." + "readOnly": true, + "type": "array" }, - "structPayload": { - "additionalProperties": { - "description": "Properties of the object.", - "type": "any" + "modes": { + "description": "Internet protocol versions for which the instance has IP addresses assigned. For this version, only MODE_IPV4 is supported.", + "items": { + "enum": [ + "ADDRESS_MODE_UNSPECIFIED", + "MODE_IPV4" + ], + "enumDescriptions": [ + "Internet protocol not set.", + "Use the IPv4 internet protocol." + ], + "type": "string" }, - "description": "The log entry payload, represented as a structure that\nis expressed as a JSON object.", - "type": "object" - }, - "textPayload": { - "description": "The log entry payload, represented as a Unicode string (UTF-8).", - "type": "string" - }, - "timestamp": { - "description": "The time the event described by the log entry occurred. If\nomitted, defaults to operation start time.", - "format": "google-datetime", - "type": "string" - }, - "trace": { - "description": "Optional. Resource name of the trace associated with the log entry, if any.\nIf this field contains a relative resource name, you can assume the name is\nrelative to `//tracing.googleapis.com`. Example:\n`projects/my-projectid/traces/06796866738c859f2f19b7cfb3214824`", - "type": "string" - } - }, - "type": "object" - }, - "LogEntryOperation": { - "description": "Additional information about a potentially long-running operation with which\na log entry is associated.", - "id": "LogEntryOperation", - "properties": { - "first": { - "description": "Optional. Set this to True if this is the first log entry in the operation.", - "type": "boolean" + "type": "array" }, - "id": { - "description": "Optional. An arbitrary operation identifier. Log entries with the\nsame identifier are assumed to be part of the same operation.", + "network": { + "description": "The name of the Google Compute Engine [VPC network](/compute/docs/networks-and-firewalls#networks) to which the instance is connected.", "type": "string" }, - "last": { - "description": "Optional. Set this to True if this is the last log entry in the operation.", - "type": "boolean" - }, - "producer": { - "description": "Optional. An arbitrary producer identifier. The combination of\n`id` and `producer` must be globally unique. Examples for `producer`:\n`\"MyDivision.MyBigCompany.com\"`, `\"github.com/MyProject/MyApplication\"`.", + "reservedIpRange": { + "description": "A /29 CIDR block for Basic or a /23 CIDR block for High Scale in one of the [internal IP address ranges](https://www.arin.net/knowledge/address_filters.html) that identifies the range of IP addresses reserved for this instance. For example, 10.0.0.0/29 or 192.168.0.0/23. The range you specify can't overlap with either existing subnets or assigned IP address ranges for other Cloud Filestore instances in the selected VPC network.", "type": "string" } }, "type": "object" }, - "LogEntrySourceLocation": { - "description": "Additional information about the source code location that produced the log\nentry.", - "id": "LogEntrySourceLocation", + "NfsExportOptions": { + "description": "NFS export options specifications.", + "id": "NfsExportOptions", "properties": { - "file": { - "description": "Optional. Source file name. Depending on the runtime environment, this\nmight be a simple name or a fully-qualified name.", - "type": "string" - }, - "function": { - "description": "Optional. Human-readable name of the function or method being invoked, with\noptional context such as the class or package name. This information may be\nused in contexts such as the logs viewer, where a file and line number are\nless meaningful. The format can vary by language. For example:\n`qual.if.ied.Class.method` (Java), `dir/package.func` (Go), `function`\n(Python).", + "accessMode": { + "description": "Either READ_ONLY, for allowing only read requests on the exported directory, or READ_WRITE, for allowing both read and write requests. The default is READ_WRITE.", + "enum": [ + "ACCESS_MODE_UNSPECIFIED", + "READ_ONLY", + "READ_WRITE" + ], + "enumDescriptions": [ + "AccessMode not set.", + "The client can only read the file share.", + "The client can read and write the file share (default)." + ], "type": "string" }, - "line": { - "description": "Optional. Line within the source file. 1-based; 0 indicates no line number\navailable.", + "anonGid": { + "description": "An integer representing the anonymous group id with a default value of 65534. Anon_gid may only be set with squash_mode of ROOT_SQUASH. An error will be returned if this field is specified for other squash_mode settings.", "format": "int64", "type": "string" - } - }, - "type": "object" - }, - "MetricValue": { - "description": "Represents a single metric value.", - "id": "MetricValue", - "properties": { - "boolValue": { - "description": "A boolean value.", - "type": "boolean" - }, - "distributionValue": { - "$ref": "Distribution", - "description": "A distribution value." - }, - "doubleValue": { - "description": "A double precision floating point value.", - "format": "double", - "type": "number" - }, - "endTime": { - "description": "The end of the time period over which this metric value's measurement\napplies.", - "format": "google-datetime", - "type": "string" }, - "int64Value": { - "description": "A signed 64-bit integer value.", + "anonUid": { + "description": "An integer representing the anonymous user id with a default value of 65534. Anon_uid may only be set with squash_mode of ROOT_SQUASH. An error will be returned if this field is specified for other squash_mode settings.", "format": "int64", "type": "string" }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "The labels describing the metric value.\nSee comments on google.api.servicecontrol.v1.Operation.labels for\nthe overriding relationship.\nNote that this map must not contain monitored resource labels.", - "type": "object" - }, - "moneyValue": { - "$ref": "Money", - "description": "A money value." - }, - "startTime": { - "description": "The start of the time period over which this metric value's measurement\napplies. The time period has different semantics for different metric\ntypes (cumulative, delta, and gauge). See the metric definition\ndocumentation in the service configuration for details.", - "format": "google-datetime", - "type": "string" - }, - "stringValue": { - "description": "A text string value.", - "type": "string" - } - }, - "type": "object" - }, - "MetricValueSet": { - "description": "Represents a set of metric values in the same metric.\nEach metric value in the set should have a unique combination of start time,\nend time, and label values.", - "id": "MetricValueSet", - "properties": { - "metricName": { - "description": "The metric name defined in the service configuration.", - "type": "string" - }, - "metricValues": { - "description": "The values in this metric.", - "items": { - "$ref": "MetricValue" - }, - "type": "array" - } - }, - "type": "object" - }, - "Money": { - "description": "Represents an amount of money with its currency type.", - "id": "Money", - "properties": { - "currencyCode": { - "description": "The 3-letter currency code defined in ISO 4217.", - "type": "string" - }, - "nanos": { - "description": "Number of nano (10^-9) units of the amount.\nThe value must be between -999,999,999 and +999,999,999 inclusive.\nIf `units` is positive, `nanos` must be positive or zero.\nIf `units` is zero, `nanos` can be positive, zero, or negative.\nIf `units` is negative, `nanos` must be negative or zero.\nFor example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.", - "format": "int32", - "type": "integer" - }, - "units": { - "description": "The whole units of the amount.\nFor example if `currencyCode` is `\"USD\"`, then 1 unit is one US dollar.", - "format": "int64", - "type": "string" - } - }, - "type": "object" - }, - "NetworkConfig": { - "description": "Network configuration for the instance.", - "id": "NetworkConfig", - "properties": { - "ipAddresses": { - "description": "Output only. IPv4 addresses in the format\n{octet 1}.{octet 2}.{octet 3}.{octet 4} or IPv6 addresses in the format\n{block 1}:{block 2}:{block 3}:{block 4}:{block 5}:{block 6}:{block\n7}:{block 8}.", + "ipRanges": { + "description": "List of either an IPv4 addresses in the format {octet 1}.{octet 2}.{octet 3}.{octet 4} or CIDR ranges in the format {octet 1}.{octet 2}.{octet 3}.{octet 4}/{mask size} which may mount the file share. Overlapping IP ranges are not allowed, both within and across NfsExportOptions. An error will be returned. The limit is 64 IP ranges/addresses for each FileShareConfig among all NfsExportOptions.", "items": { "type": "string" }, "type": "array" }, - "modes": { - "description": "Internet protocol versions for which the instance has IP addresses\nassigned. For this version, only MODE_IPV4 is supported.", + "squashMode": { + "description": "Either NO_ROOT_SQUASH, for allowing root access on the exported directory, or ROOT_SQUASH, for not allowing root access. The default is NO_ROOT_SQUASH.", + "enum": [ + "SQUASH_MODE_UNSPECIFIED", + "NO_ROOT_SQUASH", + "ROOT_SQUASH" + ], "enumDescriptions": [ - "Internet protocol not set.", - "Use the IPv4 internet protocol." + "SquashMode not set.", + "The Root user has root access to the file share (default).", + "The Root user has squashed access to the anonymous uid/gid." ], - "items": { - "enum": [ - "ADDRESS_MODE_UNSPECIFIED", - "MODE_IPV4" - ], - "type": "string" - }, - "type": "array" - }, - "network": { - "description": "The name of the Google Compute Engine\n[VPC network](/compute/docs/networks-and-firewalls#networks) to which the\ninstance is connected.", - "type": "string" - }, - "reservedIpRange": { - "description": "A /29 CIDR block for Basic or a /23 CIDR block for High Scale in one of the\n[internal IP address\nranges](https://www.arin.net/knowledge/address_filters.html) that\nidentifies the range of IP addresses reserved for this instance. For\nexample, 10.0.0.0/29 or 192.168.0.0/23. The range you specify can't overlap\nwith either existing subnets or assigned IP address ranges for other Cloud\nFilestore instances in the selected VPC network.", "type": "string" } }, "type": "object" }, "Operation": { - "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "description": "This resource represents a long-running operation that is the result of a network API call.", "id": "Operation", "properties": { "done": { - "description": "If the value is `false`, it means the operation is still in progress.\nIf `true`, the operation is completed, and either `error` or `response` is\navailable.", + "description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.", "type": "boolean" }, "error": { @@ -1560,11 +1384,11 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", "type": "object" }, "name": { - "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should be a resource name ending with `operations/{unique_id}`.", + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.", "type": "string" }, "response": { @@ -1572,7 +1396,7 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", + "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", "type": "object" } }, @@ -1587,7 +1411,7 @@ "type": "string" }, "cancelRequested": { - "description": "[Output only] Identifies whether the user has requested cancellation\nof the operation. Operations that have successfully been cancelled\nhave Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + "description": "[Output only] Identifies whether the user has requested cancellation of the operation. Operations that have successfully been cancelled have Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", "type": "boolean" }, "createTime": { @@ -1615,72 +1439,27 @@ }, "type": "object" }, - "QuotaProperties": { - "description": "Represents the properties needed for quota operations.", - "id": "QuotaProperties", - "properties": { - "quotaMode": { - "description": "Quota mode for this operation.", - "enum": [ - "ACQUIRE", - "ACQUIRE_BEST_EFFORT", - "CHECK", - "RELEASE" - ], - "enumDescriptions": [ - "Decreases available quota by the cost specified for the operation.\nIf cost is higher than available quota, operation fails and returns\nerror.", - "Decreases available quota by the cost specified for the operation.\nIf cost is higher than available quota, operation does not fail and\navailable quota goes down to zero but it returns error.", - "Does not change any available quota. Only checks if there is enough\nquota.\nNo lock is placed on the checked tokens neither.", - "Increases available quota by the operation cost specified for the\noperation." - ], - "type": "string" - } - }, - "type": "object" - }, - "ReportRequest": { - "description": "Request message for the Report method.", - "id": "ReportRequest", - "properties": { - "operations": { - "description": "Operations to be reported.\n\nTypically the service should report one operation per request.\nPutting multiple operations into a single request is allowed, but should\nbe used only when multiple operations are natually available at the time\nof the report.\n\nThere is no limit on the number of operations in the same ReportRequest,\nhowever the ReportRequest size should be no larger than 1MB. See\nReportResponse.report_errors for partial failure behavior.", - "items": { - "$ref": "GoogleApiServicecontrolV1Operation" - }, - "type": "array" - }, - "serviceConfigId": { - "description": "Specifies which version of service config should be used to process the\nrequest.\n\nIf unspecified or no matching version can be found, the\nlatest one will be used.", - "type": "string" - }, - "serviceName": { - "description": "The service name as specified in its service configuration. For example,\n`\"pubsub.googleapis.com\"`.\n\nSee\n[google.api.Service](https://cloud.google.com/service-management/reference/rpc/google.api#google.api.Service)\nfor the definition of a service name.", - "type": "string" - } - }, - "type": "object" - }, - "ResourceInfo": { - "description": "Describes a resource associated with this operation.", - "id": "ResourceInfo", + "RestoreInstanceRequest": { + "description": "RestoreInstanceRequest restores an existing instances's file share from a snapshot or backup.", + "id": "RestoreInstanceRequest", "properties": { - "resourceContainer": { - "description": "The identifier of the parent of this resource instance.\nMust be in one of the following formats:\n - “projects/\u003cproject-id or project-number\u003e”\n - “folders/\u003cfolder-id\u003e”\n - “organizations/\u003corganization-id\u003e”", + "fileShare": { + "description": "Required. Name of the file share in the Cloud Filestore instance that the snapshot is being restored to.", "type": "string" }, - "resourceLocation": { - "description": "The location of the resource. If not empty, the resource will be checked\nagainst location policy. The value must be a valid zone, region or\nmultiregion. For example: \"europe-west4\" or \"northamerica-northeast1-a\"", + "sourceBackup": { + "description": "The resource name of the backup, in the format projects/{project_id}/locations/{location_id}/backups/{backup_id}.", "type": "string" }, - "resourceName": { - "description": "Name of the resource. This is used for auditing purposes.", + "sourceSnapshot": { + "description": "The resource name of the snapshot, in the format projects/{project_id}/locations/{location_id}/snapshots/{snapshot_id}.", "type": "string" } }, "type": "object" }, "Status": { - "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors).", + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", "id": "Status", "properties": { "code": { @@ -1689,7 +1468,7 @@ "type": "integer" }, "details": { - "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use.", + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", "items": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -1700,93 +1479,7 @@ "type": "array" }, "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", - "type": "string" - } - }, - "type": "object" - }, - "TraceSpan": { - "description": "A span represents a single operation within a trace. Spans can be\nnested to form a trace tree. Often, a trace contains a root span\nthat describes the end-to-end latency, and one or more subspans for\nits sub-operations. A trace can also contain multiple root spans,\nor none at all. Spans do not need to be contiguous\u0026mdash;there may be\ngaps or overlaps between spans in a trace.", - "id": "TraceSpan", - "properties": { - "attributes": { - "$ref": "Attributes", - "description": "A set of attributes on the span. You can have up to 32 attributes per\nspan." - }, - "childSpanCount": { - "description": "An optional number of child spans that were generated while this span\nwas active. If set, allows implementation to detect missing child spans.", - "format": "int32", - "type": "integer" - }, - "displayName": { - "$ref": "TruncatableString", - "description": "A description of the span's operation (up to 128 bytes).\nStackdriver Trace displays the description in the\nGoogle Cloud Platform Console.\nFor example, the display name can be a qualified method name or a file name\nand a line number where the operation is called. A best practice is to use\nthe same display name within an application and at the same call point.\nThis makes it easier to correlate spans in different traces." - }, - "endTime": { - "description": "The end time of the span. On the client side, this is the time kept by\nthe local machine where the span execution ends. On the server side, this\nis the time when the server application handler stops running.", - "format": "google-datetime", - "type": "string" - }, - "name": { - "description": "The resource name of the span in the following format:\n\n projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/SPAN_ID is a unique identifier for a trace within a project;\nit is a 32-character hexadecimal encoding of a 16-byte array.\n\n[SPAN_ID] is a unique identifier for a span within a trace; it\nis a 16-character hexadecimal encoding of an 8-byte array.", - "type": "string" - }, - "parentSpanId": { - "description": "The [SPAN_ID] of this span's parent span. If this is a root span,\nthen this field must be empty.", - "type": "string" - }, - "sameProcessAsParentSpan": { - "description": "(Optional) Set this parameter to indicate whether this span is in\nthe same process as its parent. If you do not set this parameter,\nStackdriver Trace is unable to take advantage of this helpful\ninformation.", - "type": "boolean" - }, - "spanId": { - "description": "The [SPAN_ID] portion of the span's resource name.", - "type": "string" - }, - "spanKind": { - "description": "Distinguishes between spans generated in a particular context. For example,\ntwo spans with the same name may be distinguished using `CLIENT` (caller)\nand `SERVER` (callee) to identify an RPC call.", - "enum": [ - "SPAN_KIND_UNSPECIFIED", - "INTERNAL", - "SERVER", - "CLIENT", - "PRODUCER", - "CONSUMER" - ], - "enumDescriptions": [ - "Unspecified. Do NOT use as default.\nImplementations MAY assume SpanKind.INTERNAL to be default.", - "Indicates that the span is used internally. Default value.", - "Indicates that the span covers server-side handling of an RPC or other\nremote network request.", - "Indicates that the span covers the client-side wrapper around an RPC or\nother remote request.", - "Indicates that the span describes producer sending a message to a broker.\nUnlike client and server, there is no direct critical path latency\nrelationship between producer and consumer spans (e.g. publishing a\nmessage to a pubsub service).", - "Indicates that the span describes consumer receiving a message from a\nbroker. Unlike client and server, there is no direct critical path\nlatency relationship between producer and consumer spans (e.g. receiving\na message from a pubsub service subscription)." - ], - "type": "string" - }, - "startTime": { - "description": "The start time of the span. On the client side, this is the time kept by\nthe local machine where the span execution starts. On the server side, this\nis the time when the server's application handler starts running.", - "format": "google-datetime", - "type": "string" - }, - "status": { - "$ref": "Status", - "description": "An optional final status for this span." - } - }, - "type": "object" - }, - "TruncatableString": { - "description": "Represents a string that might be shortened to a specified length.", - "id": "TruncatableString", - "properties": { - "truncatedByteCount": { - "description": "The number of bytes removed from the original string. If this\nvalue is 0, then the string was not shortened.", - "format": "int32", - "type": "integer" - }, - "value": { - "description": "The shortened string. For example, if the original string is 500\nbytes long and the limit of the string is 128 bytes, then\n`value` contains the first 128 bytes of the 500-byte string.\n\nTruncation always happens on a UTF8 character boundary. If there\nare multi-byte characters in the string, then the length of the\nshortened string might be less than the size limit.", + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", "type": "string" } }, diff --git a/vendor/google.golang.org/api/file/v1beta1/file-gen.go b/vendor/google.golang.org/api/file/v1beta1/file-gen.go index d77dadca3bf..62e6bf06b30 100644 --- a/vendor/google.golang.org/api/file/v1beta1/file-gen.go +++ b/vendor/google.golang.org/api/file/v1beta1/file-gen.go @@ -75,6 +75,7 @@ const apiId = "file:v1beta1" const apiName = "file" const apiVersion = "v1beta1" const basePath = "https://file.googleapis.com/" +const mtlsBasePath = "https://file.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -90,6 +91,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -147,6 +149,7 @@ type ProjectsService struct { func NewProjectsLocationsService(s *Service) *ProjectsLocationsService { rs := &ProjectsLocationsService{s: s} + rs.Backups = NewProjectsLocationsBackupsService(s) rs.Instances = NewProjectsLocationsInstancesService(s) rs.Operations = NewProjectsLocationsOperationsService(s) return rs @@ -155,11 +158,22 @@ func NewProjectsLocationsService(s *Service) *ProjectsLocationsService { type ProjectsLocationsService struct { s *Service + Backups *ProjectsLocationsBackupsService + Instances *ProjectsLocationsInstancesService Operations *ProjectsLocationsOperationsService } +func NewProjectsLocationsBackupsService(s *Service) *ProjectsLocationsBackupsService { + rs := &ProjectsLocationsBackupsService{s: s} + return rs +} + +type ProjectsLocationsBackupsService struct { + s *Service +} + func NewProjectsLocationsInstancesService(s *Service) *ProjectsLocationsInstancesService { rs := &ProjectsLocationsInstancesService{s: s} return rs @@ -178,351 +192,81 @@ type ProjectsLocationsOperationsService struct { s *Service } -// AttributeValue: The allowed types for [VALUE] in a `[KEY]:[VALUE]` -// attribute. -type AttributeValue struct { - // BoolValue: A Boolean value represented by `true` or `false`. - BoolValue bool `json:"boolValue,omitempty"` - - // IntValue: A 64-bit signed integer. - IntValue int64 `json:"intValue,omitempty,string"` +// Backup: A Cloud Filestore backup. +type Backup struct { + // CapacityGb: Output only. Capacity of the backup. This would be the + // size of the file share when the backup is restored. + CapacityGb int64 `json:"capacityGb,omitempty,string"` - // StringValue: A string up to 256 bytes long. - StringValue *TruncatableString `json:"stringValue,omitempty"` + // CreateTime: Output only. The time when the backup was created. + CreateTime string `json:"createTime,omitempty"` - // ForceSendFields is a list of field names (e.g. "BoolValue") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // Description: A description of the backup with 2048 characters or + // less. Requests with longer descriptions will be rejected. + Description string `json:"description,omitempty"` - // NullFields is a list of field names (e.g. "BoolValue") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // DownloadBytes: Output only. Amount of bytes that will be downloaded + // if the backup is restored + DownloadBytes int64 `json:"downloadBytes,omitempty,string"` -func (s *AttributeValue) MarshalJSON() ([]byte, error) { - type NoMethod AttributeValue - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Labels: Resource labels to represent user provided metadata. + Labels map[string]string `json:"labels,omitempty"` -// Attributes: A set of attributes, each in the format `[KEY]:[VALUE]`. -type Attributes struct { - // AttributeMap: The set of attributes. Each attribute's key can be up - // to 128 bytes - // long. The value can be a string up to 256 bytes, a signed 64-bit - // integer, - // or the Boolean values `true` and `false`. For example: - // - // "/instance_id": "my-instance" - // "/http/user_agent": "" - // "/http/request_bytes": 300 - // "abc.com/myattribute": true - AttributeMap map[string]AttributeValue `json:"attributeMap,omitempty"` - - // DroppedAttributesCount: The number of attributes that were discarded. - // Attributes can be discarded - // because their keys are too long or because there are too many - // attributes. - // If this value is 0 then all attributes are valid. - DroppedAttributesCount int64 `json:"droppedAttributesCount,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AttributeMap") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // Name: Output only. The resource name of the backup, in the format + // projects/{project_id}/locations/{location_id}/backups/{backup_id}. + Name string `json:"name,omitempty"` - // NullFields is a list of field names (e.g. "AttributeMap") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // SourceFileShare: Name of the file share in the source Cloud Filestore + // instance that the backup is created from. + SourceFileShare string `json:"sourceFileShare,omitempty"` -func (s *Attributes) MarshalJSON() ([]byte, error) { - type NoMethod Attributes - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // SourceInstance: The resource name of the source Cloud Filestore + // instance, in the format + // projects/{project_id}/locations/{location_id}/instances/{instance_id}, + // used to create this backup. + SourceInstance string `json:"sourceInstance,omitempty"` -// BillingView: Message for reporting billing requests through -// Eventstream. -type BillingView struct { - // ReportRequests: Billing requests to be reported for an - // [Eventstream - // resource](http://google3/cloud/eventstream/v2/resource_ev - // ent.proto). + // SourceInstanceTier: Output only. The service tier of the source Cloud + // Filestore instance that this backup is created from. // - // Each request contains billing operations to be reported under a - // service - // name. See go/billing-view-construction for documentation on - // constructing - // billing view report requests. - ReportRequests []*ReportRequest `json:"reportRequests,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ReportRequests") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ReportRequests") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *BillingView) MarshalJSON() ([]byte, error) { - type NoMethod BillingView - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// CancelOperationRequest: The request message for -// Operations.CancelOperation. -type CancelOperationRequest struct { -} + // Possible values: + // "TIER_UNSPECIFIED" - Not set. + // "STANDARD" - STANDARD tier. BASIC_HDD is the preferred term for + // this tier. + // "PREMIUM" - PREMIUM tier. BASIC_SSD is the preferred term for this + // tier. + // "BASIC_HDD" - BASIC instances offer a maximum capacity of 63.9 TB. + // BASIC_HDD is an alias for STANDARD Tier, offering economical + // performance backed by HDD. + // "BASIC_SSD" - BASIC instances offer a maximum capacity of 63.9 TB. + // BASIC_SSD is an alias for PREMIUM Tier, and offers improved + // performance backed by SSD. + // "HIGH_SCALE_SSD" - HIGH_SCALE instances offer expanded capacity and + // performance scaling capabilities. + SourceInstanceTier string `json:"sourceInstanceTier,omitempty"` -// Distribution: Distribution represents a frequency distribution of -// double-valued sample -// points. It contains the size of the population of sample points -// plus -// additional optional information: -// -// - the arithmetic mean of the samples -// - the minimum and maximum of the samples -// - the sum-squared-deviation of the samples, used to compute -// variance -// - a histogram of the values of the sample points -type Distribution struct { - // BucketCounts: The number of samples in each histogram bucket. - // `bucket_counts` are - // optional. If present, they must sum to the `count` value. - // - // The buckets are defined below in `bucket_option`. There are N - // buckets. - // `bucket_counts[0]` is the number of samples in the underflow - // bucket. - // `bucket_counts[1]` to `bucket_counts[N-1]` are the numbers of - // samples - // in each of the finite buckets. And `bucket_counts[N] is the number - // of samples in the overflow bucket. See the comments of - // `bucket_option` - // below for more details. + // State: Output only. The backup state. // - // Any suffix of trailing zeros may be omitted. - BucketCounts googleapi.Int64s `json:"bucketCounts,omitempty"` - - // Count: The total number of samples in the distribution. Must be >= 0. - Count int64 `json:"count,omitempty,string"` - - // Exemplars: Example points. Must be in increasing order of `value` - // field. - Exemplars []*Exemplar `json:"exemplars,omitempty"` - - // ExplicitBuckets: Buckets with arbitrary user-provided width. - ExplicitBuckets *ExplicitBuckets `json:"explicitBuckets,omitempty"` - - // ExponentialBuckets: Buckets with exponentially growing width. - ExponentialBuckets *ExponentialBuckets `json:"exponentialBuckets,omitempty"` - - // LinearBuckets: Buckets with constant width. - LinearBuckets *LinearBuckets `json:"linearBuckets,omitempty"` - - // Maximum: The maximum of the population of values. Ignored if `count` - // is zero. - Maximum float64 `json:"maximum,omitempty"` - - // Mean: The arithmetic mean of the samples in the distribution. If - // `count` is - // zero then this field must be zero. - Mean float64 `json:"mean,omitempty"` - - // Minimum: The minimum of the population of values. Ignored if `count` - // is zero. - Minimum float64 `json:"minimum,omitempty"` - - // SumOfSquaredDeviation: The sum of squared deviations from the mean: - // Sum[i=1..count]((x_i - mean)^2) - // where each x_i is a sample values. If `count` is zero then this - // field - // must be zero, otherwise validation of the request fails. - SumOfSquaredDeviation float64 `json:"sumOfSquaredDeviation,omitempty"` - - // ForceSendFields is a list of field names (e.g. "BucketCounts") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "BucketCounts") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Distribution) MarshalJSON() ([]byte, error) { - type NoMethod Distribution - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Possible values: + // "STATE_UNSPECIFIED" - State not set. + // "CREATING" - Backup is being created. + // "FINALIZING" - Backup has been taken and the operation is being + // finalized. At this point, changes to the file share will not be + // reflected in the backup. + // "READY" - Backup is available for use. + // "DELETING" - Backup is being deleted. + State string `json:"state,omitempty"` -func (s *Distribution) UnmarshalJSON(data []byte) error { - type NoMethod Distribution - var s1 struct { - Maximum gensupport.JSONFloat64 `json:"maximum"` - Mean gensupport.JSONFloat64 `json:"mean"` - Minimum gensupport.JSONFloat64 `json:"minimum"` - SumOfSquaredDeviation gensupport.JSONFloat64 `json:"sumOfSquaredDeviation"` - *NoMethod - } - s1.NoMethod = (*NoMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.Maximum = float64(s1.Maximum) - s.Mean = float64(s1.Mean) - s.Minimum = float64(s1.Minimum) - s.SumOfSquaredDeviation = float64(s1.SumOfSquaredDeviation) - return nil -} + // StorageBytes: Output only. The size of the storage used by the + // backup. As backups share storage, this number is expected to change + // with backup creation/deletion. + StorageBytes int64 `json:"storageBytes,omitempty,string"` -// Empty: A generic empty message that you can re-use to avoid defining -// duplicated -// empty messages in your APIs. A typical example is to use it as the -// request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. -type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` -} - -// Exemplar: Exemplars are example points that may be used to annotate -// aggregated -// distribution values. They are metadata that gives information about -// a -// particular value added to a Distribution bucket, such as a trace ID -// that -// was active when a value was added. They may contain further -// information, -// such as a example values and timestamps, origin, etc. -type Exemplar struct { - // Attachments: Contextual information about the example value. Examples - // are: - // - // Trace: type.googleapis.com/google.monitoring.v3.SpanContext - // - // Literal string: type.googleapis.com/google.protobuf.StringValue - // - // Labels dropped during aggregation: - // type.googleapis.com/google.monitoring.v3.DroppedLabels - // - // There may be only a single attachment of any given message type in - // a - // single exemplar, and this is enforced by the system. - Attachments []googleapi.RawMessage `json:"attachments,omitempty"` - - // Timestamp: The observation (sampling) time of the above value. - Timestamp string `json:"timestamp,omitempty"` - - // Value: Value of the exemplar point. This value determines to which - // bucket the - // exemplar belongs. - Value float64 `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "Attachments") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Attachments") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Exemplar) MarshalJSON() ([]byte, error) { - type NoMethod Exemplar - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -func (s *Exemplar) UnmarshalJSON(data []byte) error { - type NoMethod Exemplar - var s1 struct { - Value gensupport.JSONFloat64 `json:"value"` - *NoMethod - } - s1.NoMethod = (*NoMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.Value = float64(s1.Value) - return nil -} - -// ExplicitBuckets: Describing buckets with arbitrary user-provided -// width. -type ExplicitBuckets struct { - // Bounds: 'bound' is a list of strictly increasing boundaries - // between - // buckets. Note that a list of length N-1 defines N buckets because - // of fenceposting. See comments on `bucket_options` for details. - // - // The i'th finite bucket covers the interval - // [bound[i-1], bound[i]) - // where i ranges from 1 to bound_size() - 1. Note that there are - // no - // finite buckets at all if 'bound' only contains a single element; - // in - // that special case the single bound defines the boundary between - // the - // underflow and overflow buckets. - // - // bucket number lower bound upper bound - // i == 0 (underflow) -inf bound[i] - // 0 < i < bound_size() bound[i-1] bound[i] - // i == bound_size() (overflow) bound[i-1] +inf - Bounds []float64 `json:"bounds,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Bounds") to + // ForceSendFields is a list of field names (e.g. "CapacityGb") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -530,8 +274,8 @@ type ExplicitBuckets struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Bounds") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "CapacityGb") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -539,81 +283,47 @@ type ExplicitBuckets struct { NullFields []string `json:"-"` } -func (s *ExplicitBuckets) MarshalJSON() ([]byte, error) { - type NoMethod ExplicitBuckets +func (s *Backup) MarshalJSON() ([]byte, error) { + type NoMethod Backup raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ExponentialBuckets: Describing buckets with exponentially growing -// width. -type ExponentialBuckets struct { - // GrowthFactor: The i'th exponential bucket covers the interval - // [scale * growth_factor^(i-1), scale * growth_factor^i) - // where i ranges from 1 to num_finite_buckets inclusive. - // Must be larger than 1.0. - GrowthFactor float64 `json:"growthFactor,omitempty"` - - // NumFiniteBuckets: The number of finite buckets. With the underflow - // and overflow buckets, - // the total number of buckets is `num_finite_buckets` + 2. - // See comments on `bucket_options` for details. - NumFiniteBuckets int64 `json:"numFiniteBuckets,omitempty"` - - // Scale: The i'th exponential bucket covers the interval - // [scale * growth_factor^(i-1), scale * growth_factor^i) - // where i ranges from 1 to num_finite_buckets inclusive. - // Must be > 0. - Scale float64 `json:"scale,omitempty"` - - // ForceSendFields is a list of field names (e.g. "GrowthFactor") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "GrowthFactor") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ExponentialBuckets) MarshalJSON() ([]byte, error) { - type NoMethod ExponentialBuckets - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +// CancelOperationRequest: The request message for +// Operations.CancelOperation. +type CancelOperationRequest struct { } -func (s *ExponentialBuckets) UnmarshalJSON(data []byte) error { - type NoMethod ExponentialBuckets - var s1 struct { - GrowthFactor gensupport.JSONFloat64 `json:"growthFactor"` - Scale gensupport.JSONFloat64 `json:"scale"` - *NoMethod - } - s1.NoMethod = (*NoMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.GrowthFactor = float64(s1.GrowthFactor) - s.Scale = float64(s1.Scale) - return nil +// Empty: A generic empty message that you can re-use to avoid defining +// duplicated empty messages in your APIs. A typical example is to use +// it as the request or the response type of an API method. For +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } The JSON representation for `Empty` is +// empty JSON object `{}`. +type Empty struct { + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` } // FileShareConfig: File share configuration for the instance. type FileShareConfig struct { - // CapacityGb: File share capacity in gigabytes (GB). - // Cloud Filestore defines 1 GB as 1024^3 bytes. + // CapacityGb: File share capacity in gigabytes (GB). Cloud Filestore + // defines 1 GB as 1024^3 bytes. CapacityGb int64 `json:"capacityGb,omitempty,string"` // Name: The name of the file share (must be 16 characters or less). Name string `json:"name,omitempty"` + // NfsExportOptions: Nfs Export Options. There is a limit of 10 export + // options per file share. + NfsExportOptions []*NfsExportOptions `json:"nfsExportOptions,omitempty"` + + // SourceBackup: The resource name of the backup, in the format + // projects/{project_id}/locations/{location_id}/backups/{backup_id}, + // that this file share has been restored from. + SourceBackup string `json:"sourceBackup,omitempty"` + // ForceSendFields is a list of field names (e.g. "CapacityGb") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -637,240 +347,71 @@ func (s *FileShareConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// GoogleApiServicecontrolV1Operation: Represents information regarding -// an operation. -type GoogleApiServicecontrolV1Operation struct { - // ConsumerId: Identity of the consumer who is using the service. - // This field should be filled in for the operations initiated by - // a - // consumer, but not for service-initiated operations that are - // not related to a specific consumer. - // - // - This can be in one of the following formats: - // - project:PROJECT_ID, - // - project`_`number:PROJECT_NUMBER, - // - projects/PROJECT_ID or PROJECT_NUMBER, - // - folders/FOLDER_NUMBER, - // - organizations/ORGANIZATION_NUMBER, - // - api`_`key:API_KEY. - ConsumerId string `json:"consumerId,omitempty"` - - // EndTime: End time of the operation. - // Required when the operation is used in ServiceController.Report, - // but optional when the operation is used in ServiceController.Check. - EndTime string `json:"endTime,omitempty"` - - // Importance: DO NOT USE. This is an experimental field. - // - // Possible values: - // "LOW" - The API implementation may cache and aggregate the - // data. - // The data may be lost when rare and unexpected system failures occur. - // "HIGH" - The API implementation doesn't cache and aggregate the - // data. - // If the method returns successfully, it's guaranteed that the data - // has - // been persisted in durable storage. - // "DEBUG" - In addition to the behavior described in HIGH, DEBUG - // enables - // additional validation logic that is only useful during the - // onboarding - // process. This is only available to Google internal services and - // the service must be whitelisted by chemist-dev@google.com in order - // to use this level. - Importance string `json:"importance,omitempty"` - - // Labels: Labels describing the operation. Only the following labels - // are allowed: - // - // - Labels describing monitored resources as defined in - // the service configuration. - // - Default labels of metric values. When specified, labels defined in - // the - // metric value override these default. - // - The following labels defined by Google Cloud Platform: - // - `cloud.googleapis.com/location` describing the location where - // the - // operation happened, - // - `servicecontrol.googleapis.com/user_agent` describing the user - // agent - // of the API request, - // - `servicecontrol.googleapis.com/service_agent` describing the - // service - // used to handle the API request (e.g. ESP), - // - `servicecontrol.googleapis.com/platform` describing the - // platform - // where the API is served, such as App Engine, Compute Engine, - // or - // Kubernetes Engine. - Labels map[string]string `json:"labels,omitempty"` - - // LogEntries: Represents information to be logged. - LogEntries []*LogEntry `json:"logEntries,omitempty"` - - // MetricValueSets: Represents information about this operation. Each - // MetricValueSet - // corresponds to a metric defined in the service configuration. - // The data type used in the MetricValueSet must agree with - // the data type specified in the metric definition. - // - // Within a single operation, it is not allowed to have more than - // one - // MetricValue instances that have the same metric names and - // identical - // label value combinations. If a request has such duplicated - // MetricValue - // instances, the entire request is rejected with - // an invalid argument error. - MetricValueSets []*MetricValueSet `json:"metricValueSets,omitempty"` - - // OperationId: Identity of the operation. This must be unique within - // the scope of the - // service that generated the operation. If the service calls - // Check() and Report() on the same operation, the two calls should - // carry - // the same id. - // - // UUID version 4 is recommended, though not required. - // In scenarios where an operation is computed from existing - // information - // and an idempotent id is desirable for deduplication purpose, UUID - // version 5 - // is recommended. See RFC 4122 for details. - OperationId string `json:"operationId,omitempty"` - - // OperationName: Fully qualified name of the operation. Reserved for - // future use. - OperationName string `json:"operationName,omitempty"` - - // QuotaProperties: Represents the properties needed for quota check. - // Applicable only if this - // operation is for a quota check request. If this is not specified, no - // quota - // check will be performed. - QuotaProperties *QuotaProperties `json:"quotaProperties,omitempty"` - - // Resources: The resources that are involved in the operation. - // The maximum supported number of entries in this field is 100. - Resources []*ResourceInfo `json:"resources,omitempty"` - - // StartTime: Required. Start time of the operation. - StartTime string `json:"startTime,omitempty"` - - // TraceSpans: Unimplemented. A list of Cloud Trace spans. The span - // names shall contain - // the id of the destination project which can be either the produce or - // the - // consumer project. - TraceSpans []*TraceSpan `json:"traceSpans,omitempty"` - - // UserLabels: User defined labels for the resource that this operation - // is associated - // with. Only a combination of 1000 user labels per consumer project - // are - // allowed. - UserLabels map[string]string `json:"userLabels,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ConsumerId") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ConsumerId") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *GoogleApiServicecontrolV1Operation) MarshalJSON() ([]byte, error) { - type NoMethod GoogleApiServicecontrolV1Operation - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - type GoogleCloudSaasacceleratorManagementProvidersV1Instance struct { // ConsumerDefinedName: consumer_defined_name is the name that is set by - // the consumer. On the other - // hand Name field represents system-assigned id of an instance so - // consumers - // are not necessarily aware of it. + // the consumer. On the other hand Name field represents system-assigned + // id of an instance so consumers are not necessarily aware of it. // consumer_defined_name is used for notification/UI purposes for - // consumer to - // recognize their instances. + // consumer to recognize their instances. ConsumerDefinedName string `json:"consumerDefinedName,omitempty"` // CreateTime: Output only. Timestamp when the resource was created. CreateTime string `json:"createTime,omitempty"` // Labels: Optional. Resource labels to represent user provided - // metadata. Each label - // is a key-value pair, where both the key and the value are arbitrary - // strings - // provided by the user. + // metadata. Each label is a key-value pair, where both the key and the + // value are arbitrary strings provided by the user. Labels map[string]string `json:"labels,omitempty"` // MaintenancePolicyNames: The MaintenancePolicies that have been - // attached to the instance. - // The key must be of the type name of the oneof policy name defined - // in - // MaintenancePolicy, and the referenced policy must define the same - // policy - // type. For complete details of MaintenancePolicy, please refer - // to - // go/cloud-saas-mw-ug. + // attached to the instance. The key must be of the type name of the + // oneof policy name defined in MaintenancePolicy, and the referenced + // policy must define the same policy type. For complete details of + // MaintenancePolicy, please refer to go/cloud-saas-mw-ug. MaintenancePolicyNames map[string]string `json:"maintenancePolicyNames,omitempty"` // MaintenanceSchedules: The MaintenanceSchedule contains the scheduling - // information of published - // maintenance schedule. + // information of published maintenance schedule with same key as + // software_versions. MaintenanceSchedules map[string]GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSchedule `json:"maintenanceSchedules,omitempty"` + // MaintenanceSettings: Optional. The MaintenanceSettings associated + // with instance. + MaintenanceSettings *GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSettings `json:"maintenanceSettings,omitempty"` + // Name: Unique name of the resource. It uses the form: - // // `projects/{project_id}/locations/{location_id}/instances/{instance_id} // ` Name string `json:"name,omitempty"` // ProducerMetadata: Output only. Custom string attributes used - // primarily to expose - // producer-specific information in monitoring dashboards. - // See go/get-instance-metadata. + // primarily to expose producer-specific information in monitoring + // dashboards. See go/get-instance-metadata. ProducerMetadata map[string]string `json:"producerMetadata,omitempty"` // ProvisionedResources: Output only. The list of data plane resources - // provisioned for this - // instance, e.g. compute VMs. See go/get-instance-metadata. + // provisioned for this instance, e.g. compute VMs. See + // go/get-instance-metadata. ProvisionedResources []*GoogleCloudSaasacceleratorManagementProvidersV1ProvisionedResource `json:"provisionedResources,omitempty"` // SlmInstanceTemplate: Link to the SLM instance template. Only - // populated when updating SLM - // instances via SSA's Actuation service adaptor. - // Service producers with custom control plane (e.g. Cloud SQL) - // doesn't - // need to populate this field. Instead they should use + // populated when updating SLM instances via SSA's Actuation service + // adaptor. Service producers with custom control plane (e.g. Cloud SQL) + // doesn't need to populate this field. Instead they should use // software_versions. SlmInstanceTemplate string `json:"slmInstanceTemplate,omitempty"` // SloMetadata: Output only. SLO metadata for instance classification in - // the - // Standardized dataplane SLO platform. - // See go/cloud-ssa-standard-slo for feature description. + // the Standardized dataplane SLO platform. See + // go/cloud-ssa-standard-slo for feature description. SloMetadata *GoogleCloudSaasacceleratorManagementProvidersV1SloMetadata `json:"sloMetadata,omitempty"` // SoftwareVersions: Software versions that are used to deploy this - // instance. This can be - // mutated by rollout services. + // instance. This can be mutated by rollout services. SoftwareVersions map[string]string `json:"softwareVersions,omitempty"` // State: Output only. Current lifecycle state of the resource (e.g. if - // it's being - // created or ready to use). + // it's being created or ready to use). // // Possible values: // "STATE_UNSPECIFIED" - Unspecified state. @@ -884,8 +425,7 @@ type GoogleCloudSaasacceleratorManagementProvidersV1Instance struct { State string `json:"state,omitempty"` // TenantProjectId: Output only. ID of the associated GCP tenant - // project. - // See go/get-instance-metadata. + // project. See go/get-instance-metadata. TenantProjectId string `json:"tenantProjectId,omitempty"` // UpdateTime: Output only. Timestamp when the resource was last @@ -918,23 +458,19 @@ func (s *GoogleCloudSaasacceleratorManagementProvidersV1Instance) MarshalJSON() // GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSchedule: // Maintenance schedule which is exposed to customer and potentially end -// user, -// indicating published upcoming future maintenance schedule +// user, indicating published upcoming future maintenance schedule type GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSchedule struct { - // CanReschedule: Can this scheduled update be rescheduled? - // By default, it's true and API needs to do explicitly check whether - // it's - // set, if it's set as false explicitly, it's false + // CanReschedule: Can this scheduled update be rescheduled? By default, + // it's true and API needs to do explicitly check whether it's set, if + // it's set as false explicitly, it's false CanReschedule bool `json:"canReschedule,omitempty"` // EndTime: The scheduled end time for the maintenance. EndTime string `json:"endTime,omitempty"` // RolloutManagementPolicy: The rollout management policy this - // maintenance schedule is associated - // with. When doing reschedule update request, the reschedule should - // be - // against this given policy. + // maintenance schedule is associated with. When doing reschedule update + // request, the reschedule should be against this given policy. RolloutManagementPolicy string `json:"rolloutManagementPolicy,omitempty"` // StartTime: The scheduled start time for the maintenance. @@ -963,29 +499,59 @@ func (s *GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSchedule) Mar return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSettings: +// Maintenance settings associated with instance. Allows service +// producers and end users to assign settings that controls maintenance +// on this instance. +type GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSettings struct { + // Exclude: Optional. Exclude instance from maintenance. When true, + // rollout service will not attempt maintenance on the instance. Rollout + // service will include the instance in reported rollout progress as not + // attempted. + Exclude bool `json:"exclude,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Exclude") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Exclude") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSettings) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // GoogleCloudSaasacceleratorManagementProvidersV1NodeSloMetadata: Node -// information for custom per-node SLO implementations. -// SSA does not support per-node SLO, but producers can populate -// per-node -// information in SloMetadata for custom precomputations. -// SSA Eligibility Exporter will emit per-node metric based on this -// information. +// information for custom per-node SLO implementations. SSA does not +// support per-node SLO, but producers can populate per-node information +// in SloMetadata for custom precomputations. SSA Eligibility Exporter +// will emit per-node metric based on this information. type GoogleCloudSaasacceleratorManagementProvidersV1NodeSloMetadata struct { - // Exclusions: By default node is eligible if instance is eligible. - // But individual node might be excluded from SLO by adding entry - // here. - // For semantic see SloMetadata.exclusions. - // If both instance and node level exclusions are present for time - // period, - // the node level's reason will be reported by Eligibility Exporter. + // Exclusions: By default node is eligible if instance is eligible. But + // individual node might be excluded from SLO by adding entry here. For + // semantic see SloMetadata.exclusions. If both instance and node level + // exclusions are present for time period, the node level's reason will + // be reported by Eligibility Exporter. Exclusions []*GoogleCloudSaasacceleratorManagementProvidersV1SloExclusion `json:"exclusions,omitempty"` // Location: The location of the node, if different from instance // location. Location string `json:"location,omitempty"` - // NodeId: The id of the node. - // This should be equal to SaasInstanceNode.node_id. + // NodeId: The id of the node. This should be equal to + // SaasInstanceNode.node_id. NodeId string `json:"nodeId,omitempty"` // ForceSendFields is a list of field names (e.g. "Exclusions") to @@ -1015,20 +581,15 @@ func (s *GoogleCloudSaasacceleratorManagementProvidersV1NodeSloMetadata) Marshal // Describes provisioned dataplane resources. type GoogleCloudSaasacceleratorManagementProvidersV1ProvisionedResource struct { // ResourceType: Type of the resource. This can be either a GCP resource - // or a custom one - // (e.g. another cloud provider's VM). For GCP compute resources use - // singular - // form of the names listed in GCP compute API + // or a custom one (e.g. another cloud provider's VM). For GCP compute + // resources use singular form of the names listed in GCP compute API // documentation - // (https://cloud.google.com/compute/docs/reference/rest/v1 - // /), prefixed with - // 'compute-', for example: 'compute-instance', - // 'compute-disk', + // (https://cloud.google.com/compute/docs/reference/rest/v1/), prefixed + // with 'compute-', for example: 'compute-instance', 'compute-disk', // 'compute-autoscaler'. ResourceType string `json:"resourceType,omitempty"` - // ResourceUrl: URL identifying the resource, - // e.g. + // ResourceUrl: URL identifying the resource, e.g. // "https://www.googleapis.com/compute/v1/projects/...)". ResourceUrl string `json:"resourceUrl,omitempty"` @@ -1057,19 +618,16 @@ func (s *GoogleCloudSaasacceleratorManagementProvidersV1ProvisionedResource) Mar // GoogleCloudSaasacceleratorManagementProvidersV1SloEligibility: // SloEligibility is a tuple containing eligibility value: true if an -// instance -// is eligible for SLO calculation or false if it should be excluded -// from all -// SLO-related calculations along with a user-defined reason. +// instance is eligible for SLO calculation or false if it should be +// excluded from all SLO-related calculations along with a user-defined +// reason. type GoogleCloudSaasacceleratorManagementProvidersV1SloEligibility struct { // Eligible: Whether an instance is eligible or ineligible. Eligible bool `json:"eligible,omitempty"` // Reason: User-defined reason for the current value of instance - // eligibility. Usually, - // this can be directly mapped to the internal state. An empty reason - // is - // allowed. + // eligibility. Usually, this can be directly mapped to the internal + // state. An empty reason is allowed. Reason string `json:"reason,omitempty"` // ForceSendFields is a list of field names (e.g. "Eligible") to @@ -1099,35 +657,25 @@ func (s *GoogleCloudSaasacceleratorManagementProvidersV1SloEligibility) MarshalJ // SloExclusion represents an exclusion in SLI calculation applies to // all SLOs. type GoogleCloudSaasacceleratorManagementProvidersV1SloExclusion struct { - // Duration: Exclusion duration. No restrictions on the possible - // values. - // - // When an ongoing operation is taking longer than initially - // expected, + // Duration: Exclusion duration. No restrictions on the possible values. + // When an ongoing operation is taking longer than initially expected, // an existing entry in the exclusion list can be updated by extending - // the - // duration. This is supported by the subsystem exporting eligibility - // data - // as long as such extension is committed at least 10 minutes before - // the - // original exclusion expiration - otherwise it is possible that there - // will - // be "gaps" in the exclusion application in the exported timeseries. + // the duration. This is supported by the subsystem exporting + // eligibility data as long as such extension is committed at least 10 + // minutes before the original exclusion expiration - otherwise it is + // possible that there will be "gaps" in the exclusion application in + // the exported timeseries. Duration string `json:"duration,omitempty"` - // Reason: Human-readable reason for the exclusion. - // This should be a static string (e.g. "Disruptive update in - // progress") - // and should not contain dynamically generated data (e.g. instance - // name). - // Can be left empty. + // Reason: Human-readable reason for the exclusion. This should be a + // static string (e.g. "Disruptive update in progress") and should not + // contain dynamically generated data (e.g. instance name). Can be left + // empty. Reason string `json:"reason,omitempty"` // SliName: Name of an SLI that this exclusion applies to. Can be left - // empty, - // signaling that the instance should be excluded from all SLIs - // defined - // in the service SLO configuration. + // empty, signaling that the instance should be excluded from all SLIs + // defined in the service SLO configuration. SliName string `json:"sliName,omitempty"` // StartTime: Start time of the exclusion. No alignment (e.g. to a full @@ -1159,50 +707,34 @@ func (s *GoogleCloudSaasacceleratorManagementProvidersV1SloExclusion) MarshalJSO // GoogleCloudSaasacceleratorManagementProvidersV1SloMetadata: // SloMetadata contains resources required for proper SLO classification -// of the -// instance. +// of the instance. type GoogleCloudSaasacceleratorManagementProvidersV1SloMetadata struct { // Eligibility: Optional. User-defined instance eligibility. Eligibility *GoogleCloudSaasacceleratorManagementProvidersV1SloEligibility `json:"eligibility,omitempty"` // Exclusions: List of SLO exclusion windows. When multiple entries in - // the list match - // (matching the exclusion time-window against current time point) - // the exclusion reason used in the first matching entry will be - // published. - // - // It is not needed to include expired exclusion in this list, as only - // the - // currently applicable exclusions are taken into account by the - // eligibility - // exporting subsystem (the historical state of exclusions will be - // reflected - // in the historically produced timeseries regardless of the current - // state). - // - // This field can be used to mark the instance as temporary - // ineligible - // for the purpose of SLO calculation. For permanent instance SLO - // exclusion, - // use of custom instance eligibility is recommended. See 'eligibility' - // field - // below. + // the list match (matching the exclusion time-window against current + // time point) the exclusion reason used in the first matching entry + // will be published. It is not needed to include expired exclusion in + // this list, as only the currently applicable exclusions are taken into + // account by the eligibility exporting subsystem (the historical state + // of exclusions will be reflected in the historically produced + // timeseries regardless of the current state). This field can be used + // to mark the instance as temporary ineligible for the purpose of SLO + // calculation. For permanent instance SLO exclusion, use of custom + // instance eligibility is recommended. See 'eligibility' field below. Exclusions []*GoogleCloudSaasacceleratorManagementProvidersV1SloExclusion `json:"exclusions,omitempty"` - // Nodes: Optional. List of nodes. - // Some producers need to use per-node metadata to calculate SLO. - // This field allows such producers to publish per-node SLO meta - // data, - // which will be consumed by SSA Eligibility Exporter and published in - // the - // form of per node metric to Monarch. + // Nodes: Optional. List of nodes. Some producers need to use per-node + // metadata to calculate SLO. This field allows such producers to + // publish per-node SLO meta data, which will be consumed by SSA + // Eligibility Exporter and published in the form of per node metric to + // Monarch. Nodes []*GoogleCloudSaasacceleratorManagementProvidersV1NodeSloMetadata `json:"nodes,omitempty"` // Tier: Name of the SLO tier the Instance belongs to. This name will be - // expected to - // match the tiers specified in the service SLO configuration. - // - // Field is mandatory and must not be empty. + // expected to match the tiers specified in the service SLO + // configuration. Field is mandatory and must not be empty. Tier string `json:"tier,omitempty"` // ForceSendFields is a list of field names (e.g. "Eligibility") to @@ -1228,140 +760,32 @@ func (s *GoogleCloudSaasacceleratorManagementProvidersV1SloMetadata) MarshalJSON return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// HttpRequest: A common proto for logging HTTP requests. Only contains -// semantics -// defined by the HTTP specification. Product-specific -// logging -// information MUST be defined in a separate message. -type HttpRequest struct { - // CacheFillBytes: The number of HTTP response bytes inserted into - // cache. Set only when a - // cache fill was attempted. - CacheFillBytes int64 `json:"cacheFillBytes,omitempty,string"` - - // CacheHit: Whether or not an entity was served from cache - // (with or without validation). - CacheHit bool `json:"cacheHit,omitempty"` - - // CacheLookup: Whether or not a cache lookup was attempted. - CacheLookup bool `json:"cacheLookup,omitempty"` - - // CacheValidatedWithOriginServer: Whether or not the response was - // validated with the origin server before - // being served from cache. This field is only meaningful if `cache_hit` - // is - // True. - CacheValidatedWithOriginServer bool `json:"cacheValidatedWithOriginServer,omitempty"` - - // Latency: The request processing latency on the server, from the time - // the request was - // received until the response was sent. - Latency string `json:"latency,omitempty"` - - // Protocol: Protocol used for the request. Examples: "HTTP/1.1", - // "HTTP/2", "websocket" - Protocol string `json:"protocol,omitempty"` - - // Referer: The referer URL of the request, as defined in - // [HTTP/1.1 Header - // Field - // Definitions](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.h - // tml). - Referer string `json:"referer,omitempty"` - - // RemoteIp: The IP address (IPv4 or IPv6) of the client that issued the - // HTTP - // request. Examples: "192.168.1.1", "FE80::0202:B3FF:FE1E:8329". - RemoteIp string `json:"remoteIp,omitempty"` - - // RequestMethod: The request method. Examples: "GET", "HEAD", - // "PUT", "POST". - RequestMethod string `json:"requestMethod,omitempty"` - - // RequestSize: The size of the HTTP request message in bytes, including - // the request - // headers and the request body. - RequestSize int64 `json:"requestSize,omitempty,string"` - - // RequestUrl: The scheme (http, https), the host name, the path, and - // the query - // portion of the URL that was requested. - // Example: "http://example.com/some/info?color=red". - RequestUrl string `json:"requestUrl,omitempty"` - - // ResponseSize: The size of the HTTP response message sent back to the - // client, in bytes, - // including the response headers and the response body. - ResponseSize int64 `json:"responseSize,omitempty,string"` - - // ServerIp: The IP address (IPv4 or IPv6) of the origin server that the - // request was - // sent to. - ServerIp string `json:"serverIp,omitempty"` - - // Status: The response code indicating the status of the - // response. - // Examples: 200, 404. - Status int64 `json:"status,omitempty"` - - // UserAgent: The user agent sent by the client. Example: - // "Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; Q312461; .NET - // CLR 1.0.3705)". - UserAgent string `json:"userAgent,omitempty"` - - // ForceSendFields is a list of field names (e.g. "CacheFillBytes") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CacheFillBytes") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *HttpRequest) MarshalJSON() ([]byte, error) { - type NoMethod HttpRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Instance: A Cloud Filestore instance. -type Instance struct { - // CreateTime: Output only. The time when the instance was created. - CreateTime string `json:"createTime,omitempty"` +// Instance: A Cloud Filestore instance. +type Instance struct { + // CreateTime: Output only. The time when the instance was created. + CreateTime string `json:"createTime,omitempty"` // Description: The description of the instance (2048 characters or // less). Description string `json:"description,omitempty"` // Etag: Server-specified ETag for the instance resource to prevent - // simultaneous - // updates from overwriting each other. + // simultaneous updates from overwriting each other. Etag string `json:"etag,omitempty"` - // FileShares: File system shares on the instance. - // For this version, only a single file share is supported. + // FileShares: File system shares on the instance. For this version, + // only a single file share is supported. FileShares []*FileShareConfig `json:"fileShares,omitempty"` // Labels: Resource labels to represent user provided metadata. Labels map[string]string `json:"labels,omitempty"` - // Name: Output only. The resource name of the instance, in the - // format - // projects/{project_id}/locations/{location_id}/instances/{instan - // ce_id}. + // Name: Output only. The resource name of the instance, in the format + // projects/{project_id}/locations/{location_id}/instances/{instance_id}. Name string `json:"name,omitempty"` - // Networks: VPC networks to which the instance is connected. - // For this version, only a single network is supported. + // Networks: VPC networks to which the instance is connected. For this + // version, only a single network is supported. Networks []*NetworkConfig `json:"networks,omitempty"` // State: Output only. The instance state. @@ -1371,17 +795,14 @@ type Instance struct { // "CREATING" - The instance is being created. // "READY" - The instance is available for use. // "REPAIRING" - Work is being done on the instance. You can get - // further details from the - // `statusMessage` field of the `Instance` resource. + // further details from the `statusMessage` field of the `Instance` + // resource. // "DELETING" - The instance is shutting down. // "ERROR" - The instance is experiencing an issue and might be - // unusable. You can get - // further details from the `statusMessage` field of the - // `Instance` - // resource. + // unusable. You can get further details from the `statusMessage` field + // of the `Instance` resource. // "RESTORING" - The instance is restoring a snapshot or backup to an - // existing file share - // and may be unusable during this time. + // existing file share and may be unusable during this time. State string `json:"state,omitempty"` // StatusMessage: Output only. Additional information about the instance @@ -1396,19 +817,14 @@ type Instance struct { // this tier. // "PREMIUM" - PREMIUM tier. BASIC_SSD is the preferred term for this // tier. - // "BASIC_HDD" - BASIC instances offer a maximum capacity of 63.9 - // TB. - // BASIC_HDD is an alias for STANDARD Tier, offering - // economical + // "BASIC_HDD" - BASIC instances offer a maximum capacity of 63.9 TB. + // BASIC_HDD is an alias for STANDARD Tier, offering economical // performance backed by HDD. - // "BASIC_SSD" - BASIC instances offer a maximum capacity of 63.9 - // TB. - // BASIC_SSD is an alias for PREMIUM Tier, and offers - // improved + // "BASIC_SSD" - BASIC instances offer a maximum capacity of 63.9 TB. + // BASIC_SSD is an alias for PREMIUM Tier, and offers improved // performance backed by SSD. // "HIGH_SCALE_SSD" - HIGH_SCALE instances offer expanded capacity and - // performance scaling - // capabilities. + // performance scaling capabilities. Tier string `json:"tier,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1438,26 +854,29 @@ func (s *Instance) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// LinearBuckets: Describing buckets with constant width. -type LinearBuckets struct { - // NumFiniteBuckets: The number of finite buckets. With the underflow - // and overflow buckets, - // the total number of buckets is `num_finite_buckets` + 2. - // See comments on `bucket_options` for details. - NumFiniteBuckets int64 `json:"numFiniteBuckets,omitempty"` +// ListBackupsResponse: ListBackupsResponse is the result of +// ListBackupsRequest. +type ListBackupsResponse struct { + // Backups: A list of backups in the project for the specified location. + // If the {location} value in the request is "-", the response contains + // a list of backups from all locations. If any location is unreachable, + // the response will only return backups in reachable locations and the + // "unreachable" field will be populated with a list of unreachable + // locations. + Backups []*Backup `json:"backups,omitempty"` + + // NextPageToken: The token you can use to retrieve the next page of + // results. Not returned if there are no more results in the list. + NextPageToken string `json:"nextPageToken,omitempty"` - // Offset: The i'th linear bucket covers the interval - // [offset + (i-1) * width, offset + i * width) - // where i ranges from 1 to num_finite_buckets, inclusive. - Offset float64 `json:"offset,omitempty"` + // Unreachable: Locations that could not be reached. + Unreachable []string `json:"unreachable,omitempty"` - // Width: The i'th linear bucket covers the interval - // [offset + (i-1) * width, offset + i * width) - // where i ranges from 1 to num_finite_buckets, inclusive. - // Must be strictly positive. - Width float64 `json:"width,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "NumFiniteBuckets") to + // ForceSendFields is a list of field names (e.g. "Backups") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -1465,57 +884,34 @@ type LinearBuckets struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NumFiniteBuckets") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Backups") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *LinearBuckets) MarshalJSON() ([]byte, error) { - type NoMethod LinearBuckets +func (s *ListBackupsResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListBackupsResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -func (s *LinearBuckets) UnmarshalJSON(data []byte) error { - type NoMethod LinearBuckets - var s1 struct { - Offset gensupport.JSONFloat64 `json:"offset"` - Width gensupport.JSONFloat64 `json:"width"` - *NoMethod - } - s1.NoMethod = (*NoMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.Offset = float64(s1.Offset) - s.Width = float64(s1.Width) - return nil -} - // ListInstancesResponse: ListInstancesResponse is the result of // ListInstancesRequest. type ListInstancesResponse struct { // Instances: A list of instances in the project for the specified - // location. - // - // If the {location} value in the request is "-", the response contains - // a list - // of instances from all locations. If any location is unreachable, - // the - // response will only return instances in reachable locations and - // the - // "unreachable" field will be populated with a list of unreachable - // locations. + // location. If the {location} value in the request is "-", the response + // contains a list of instances from all locations. If any location is + // unreachable, the response will only return instances in reachable + // locations and the "unreachable" field will be populated with a list + // of unreachable locations. Instances []*Instance `json:"instances,omitempty"` // NextPageToken: The token you can use to retrieve the next page of - // results. Not returned - // if there are no more results in the list. + // results. Not returned if there are no more results in the list. NextPageToken string `json:"nextPageToken,omitempty"` // Unreachable: Locations that could not be reached. @@ -1625,13 +1021,11 @@ func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { // Location: A resource that represents Google Cloud Platform location. type Location struct { // DisplayName: The friendly name for this location, typically a nearby - // city name. - // For example, "Tokyo". + // city name. For example, "Tokyo". DisplayName string `json:"displayName,omitempty"` // Labels: Cross-service attributes for the location. For example - // - // {"cloud.googleapis.com/region": "us-east1"} + // {"cloud.googleapis.com/region": "us-east1"} Labels map[string]string `json:"labels,omitempty"` // LocationId: The canonical id for this location. For example: @@ -1639,13 +1033,12 @@ type Location struct { LocationId string `json:"locationId,omitempty"` // Metadata: Service-specific metadata. For example the available - // capacity at the given - // location. + // capacity at the given location. Metadata googleapi.RawMessage `json:"metadata,omitempty"` // Name: Resource name for the location, which may vary between - // implementations. - // For example: "projects/example-project/locations/us-east1" + // implementations. For example: + // "projects/example-project/locations/us-east1" Name string `json:"name,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1675,342 +1068,37 @@ func (s *Location) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// LogEntry: An individual log entry. -type LogEntry struct { - // HttpRequest: Optional. Information about the HTTP request associated - // with this - // log entry, if applicable. - HttpRequest *HttpRequest `json:"httpRequest,omitempty"` - - // InsertId: A unique ID for the log entry used for deduplication. If - // omitted, - // the implementation will generate one based on operation_id. - InsertId string `json:"insertId,omitempty"` - - // Labels: A set of user-defined (key, value) data that provides - // additional - // information about the log entry. - Labels map[string]string `json:"labels,omitempty"` - - // Name: Required. The log to which this log entry belongs. Examples: - // "syslog", - // "book_log". - Name string `json:"name,omitempty"` +// NetworkConfig: Network configuration for the instance. +type NetworkConfig struct { + // IpAddresses: Output only. IPv4 addresses in the format {octet + // 1}.{octet 2}.{octet 3}.{octet 4} or IPv6 addresses in the format + // {block 1}:{block 2}:{block 3}:{block 4}:{block 5}:{block 6}:{block + // 7}:{block 8}. + IpAddresses []string `json:"ipAddresses,omitempty"` - // Operation: Optional. Information about an operation associated with - // the log entry, if - // applicable. - Operation *LogEntryOperation `json:"operation,omitempty"` - - // ProtoPayload: The log entry payload, represented as a protocol buffer - // that is - // expressed as a JSON object. The only accepted type currently - // is - // AuditLog. - ProtoPayload googleapi.RawMessage `json:"protoPayload,omitempty"` - - // Severity: The severity of the log entry. The default value - // is - // `LogSeverity.DEFAULT`. + // Modes: Internet protocol versions for which the instance has IP + // addresses assigned. For this version, only MODE_IPV4 is supported. // // Possible values: - // "DEFAULT" - (0) The log entry has no assigned severity level. - // "DEBUG" - (100) Debug or trace information. - // "INFO" - (200) Routine information, such as ongoing status or - // performance. - // "NOTICE" - (300) Normal but significant events, such as start up, - // shut down, or - // a configuration change. - // "WARNING" - (400) Warning events might cause problems. - // "ERROR" - (500) Error events are likely to cause problems. - // "CRITICAL" - (600) Critical events cause more severe problems or - // outages. - // "ALERT" - (700) A person must take an action immediately. - // "EMERGENCY" - (800) One or more systems are unusable. - Severity string `json:"severity,omitempty"` - - // SourceLocation: Optional. Source code location information associated - // with the log entry, - // if any. - SourceLocation *LogEntrySourceLocation `json:"sourceLocation,omitempty"` - - // StructPayload: The log entry payload, represented as a structure - // that - // is expressed as a JSON object. - StructPayload googleapi.RawMessage `json:"structPayload,omitempty"` - - // TextPayload: The log entry payload, represented as a Unicode string - // (UTF-8). - TextPayload string `json:"textPayload,omitempty"` - - // Timestamp: The time the event described by the log entry occurred. - // If - // omitted, defaults to operation start time. - Timestamp string `json:"timestamp,omitempty"` - - // Trace: Optional. Resource name of the trace associated with the log - // entry, if any. - // If this field contains a relative resource name, you can assume the - // name is - // relative to `//tracing.googleapis.com`. - // Example: - // `projects/my-projectid/traces/06796866738c859f2f19b7cfb321482 - // 4` - Trace string `json:"trace,omitempty"` - - // ForceSendFields is a list of field names (e.g. "HttpRequest") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "HttpRequest") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *LogEntry) MarshalJSON() ([]byte, error) { - type NoMethod LogEntry - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// LogEntryOperation: Additional information about a potentially -// long-running operation with which -// a log entry is associated. -type LogEntryOperation struct { - // First: Optional. Set this to True if this is the first log entry in - // the operation. - First bool `json:"first,omitempty"` - - // Id: Optional. An arbitrary operation identifier. Log entries with - // the - // same identifier are assumed to be part of the same operation. - Id string `json:"id,omitempty"` - - // Last: Optional. Set this to True if this is the last log entry in the - // operation. - Last bool `json:"last,omitempty"` - - // Producer: Optional. An arbitrary producer identifier. The combination - // of - // `id` and `producer` must be globally unique. Examples for - // `producer`: - // "MyDivision.MyBigCompany.com", - // "github.com/MyProject/MyApplication". - Producer string `json:"producer,omitempty"` - - // ForceSendFields is a list of field names (e.g. "First") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "First") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *LogEntryOperation) MarshalJSON() ([]byte, error) { - type NoMethod LogEntryOperation - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// LogEntrySourceLocation: Additional information about the source code -// location that produced the log -// entry. -type LogEntrySourceLocation struct { - // File: Optional. Source file name. Depending on the runtime - // environment, this - // might be a simple name or a fully-qualified name. - File string `json:"file,omitempty"` - - // Function: Optional. Human-readable name of the function or method - // being invoked, with - // optional context such as the class or package name. This information - // may be - // used in contexts such as the logs viewer, where a file and line - // number are - // less meaningful. The format can vary by language. For - // example: - // `qual.if.ied.Class.method` (Java), `dir/package.func` (Go), - // `function` - // (Python). - Function string `json:"function,omitempty"` - - // Line: Optional. Line within the source file. 1-based; 0 indicates no - // line number - // available. - Line int64 `json:"line,omitempty,string"` - - // ForceSendFields is a list of field names (e.g. "File") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "File") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *LogEntrySourceLocation) MarshalJSON() ([]byte, error) { - type NoMethod LogEntrySourceLocation - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// MetricValue: Represents a single metric value. -type MetricValue struct { - // BoolValue: A boolean value. - BoolValue bool `json:"boolValue,omitempty"` - - // DistributionValue: A distribution value. - DistributionValue *Distribution `json:"distributionValue,omitempty"` - - // DoubleValue: A double precision floating point value. - DoubleValue float64 `json:"doubleValue,omitempty"` - - // EndTime: The end of the time period over which this metric value's - // measurement - // applies. - EndTime string `json:"endTime,omitempty"` - - // Int64Value: A signed 64-bit integer value. - Int64Value int64 `json:"int64Value,omitempty,string"` - - // Labels: The labels describing the metric value. - // See comments on google.api.servicecontrol.v1.Operation.labels for - // the overriding relationship. - // Note that this map must not contain monitored resource labels. - Labels map[string]string `json:"labels,omitempty"` - - // MoneyValue: A money value. - MoneyValue *Money `json:"moneyValue,omitempty"` - - // StartTime: The start of the time period over which this metric - // value's measurement - // applies. The time period has different semantics for different - // metric - // types (cumulative, delta, and gauge). See the metric - // definition - // documentation in the service configuration for details. - StartTime string `json:"startTime,omitempty"` - - // StringValue: A text string value. - StringValue string `json:"stringValue,omitempty"` - - // ForceSendFields is a list of field names (e.g. "BoolValue") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "BoolValue") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *MetricValue) MarshalJSON() ([]byte, error) { - type NoMethod MetricValue - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -func (s *MetricValue) UnmarshalJSON(data []byte) error { - type NoMethod MetricValue - var s1 struct { - DoubleValue gensupport.JSONFloat64 `json:"doubleValue"` - *NoMethod - } - s1.NoMethod = (*NoMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.DoubleValue = float64(s1.DoubleValue) - return nil -} - -// MetricValueSet: Represents a set of metric values in the same -// metric. -// Each metric value in the set should have a unique combination of -// start time, -// end time, and label values. -type MetricValueSet struct { - // MetricName: The metric name defined in the service configuration. - MetricName string `json:"metricName,omitempty"` - - // MetricValues: The values in this metric. - MetricValues []*MetricValue `json:"metricValues,omitempty"` - - // ForceSendFields is a list of field names (e.g. "MetricName") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "MetricName") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *MetricValueSet) MarshalJSON() ([]byte, error) { - type NoMethod MetricValueSet - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Money: Represents an amount of money with its currency type. -type Money struct { - // CurrencyCode: The 3-letter currency code defined in ISO 4217. - CurrencyCode string `json:"currencyCode,omitempty"` + // "ADDRESS_MODE_UNSPECIFIED" - Internet protocol not set. + // "MODE_IPV4" - Use the IPv4 internet protocol. + Modes []string `json:"modes,omitempty"` - // Nanos: Number of nano (10^-9) units of the amount. - // The value must be between -999,999,999 and +999,999,999 inclusive. - // If `units` is positive, `nanos` must be positive or zero. - // If `units` is zero, `nanos` can be positive, zero, or negative. - // If `units` is negative, `nanos` must be negative or zero. - // For example $-1.75 is represented as `units`=-1 and - // `nanos`=-750,000,000. - Nanos int64 `json:"nanos,omitempty"` + // Network: The name of the Google Compute Engine [VPC + // network](/compute/docs/networks-and-firewalls#networks) to which the + // instance is connected. + Network string `json:"network,omitempty"` - // Units: The whole units of the amount. - // For example if `currencyCode` is "USD", then 1 unit is one US - // dollar. - Units int64 `json:"units,omitempty,string"` + // ReservedIpRange: A /29 CIDR block for Basic or a /23 CIDR block for + // High Scale in one of the [internal IP address + // ranges](https://www.arin.net/knowledge/address_filters.html) that + // identifies the range of IP addresses reserved for this instance. For + // example, 10.0.0.0/29 or 192.168.0.0/23. The range you specify can't + // overlap with either existing subnets or assigned IP address ranges + // for other Cloud Filestore instances in the selected VPC network. + ReservedIpRange string `json:"reservedIpRange,omitempty"` - // ForceSendFields is a list of field names (e.g. "CurrencyCode") to + // ForceSendFields is a list of field names (e.g. "IpAddresses") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -2018,7 +1106,7 @@ type Money struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CurrencyCode") to include + // NullFields is a list of field names (e.g. "IpAddresses") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -2027,53 +1115,58 @@ type Money struct { NullFields []string `json:"-"` } -func (s *Money) MarshalJSON() ([]byte, error) { - type NoMethod Money +func (s *NetworkConfig) MarshalJSON() ([]byte, error) { + type NoMethod NetworkConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkConfig: Network configuration for the instance. -type NetworkConfig struct { - // IpAddresses: Output only. IPv4 addresses in the format - // {octet 1}.{octet 2}.{octet 3}.{octet 4} or IPv6 addresses in the - // format - // {block 1}:{block 2}:{block 3}:{block 4}:{block 5}:{block - // 6}:{block - // 7}:{block 8}. - IpAddresses []string `json:"ipAddresses,omitempty"` - - // Modes: Internet protocol versions for which the instance has IP - // addresses - // assigned. For this version, only MODE_IPV4 is supported. +// NfsExportOptions: NFS export options specifications. +type NfsExportOptions struct { + // AccessMode: Either READ_ONLY, for allowing only read requests on the + // exported directory, or READ_WRITE, for allowing both read and write + // requests. The default is READ_WRITE. // // Possible values: - // "ADDRESS_MODE_UNSPECIFIED" - Internet protocol not set. - // "MODE_IPV4" - Use the IPv4 internet protocol. - Modes []string `json:"modes,omitempty"` - - // Network: The name of the Google Compute Engine - // [VPC network](/compute/docs/networks-and-firewalls#networks) to which - // the - // instance is connected. - Network string `json:"network,omitempty"` - - // ReservedIpRange: A /29 CIDR block for Basic or a /23 CIDR block for - // High Scale in one of the - // [internal IP - // address - // ranges](https://www.arin.net/knowledge/address_filters.html) - // that - // identifies the range of IP addresses reserved for this instance. - // For - // example, 10.0.0.0/29 or 192.168.0.0/23. The range you specify can't - // overlap - // with either existing subnets or assigned IP address ranges for other - // Cloud - // Filestore instances in the selected VPC network. - ReservedIpRange string `json:"reservedIpRange,omitempty"` - - // ForceSendFields is a list of field names (e.g. "IpAddresses") to + // "ACCESS_MODE_UNSPECIFIED" - AccessMode not set. + // "READ_ONLY" - The client can only read the file share. + // "READ_WRITE" - The client can read and write the file share + // (default). + AccessMode string `json:"accessMode,omitempty"` + + // AnonGid: An integer representing the anonymous group id with a + // default value of 65534. Anon_gid may only be set with squash_mode of + // ROOT_SQUASH. An error will be returned if this field is specified for + // other squash_mode settings. + AnonGid int64 `json:"anonGid,omitempty,string"` + + // AnonUid: An integer representing the anonymous user id with a default + // value of 65534. Anon_uid may only be set with squash_mode of + // ROOT_SQUASH. An error will be returned if this field is specified for + // other squash_mode settings. + AnonUid int64 `json:"anonUid,omitempty,string"` + + // IpRanges: List of either an IPv4 addresses in the format {octet + // 1}.{octet 2}.{octet 3}.{octet 4} or CIDR ranges in the format {octet + // 1}.{octet 2}.{octet 3}.{octet 4}/{mask size} which may mount the file + // share. Overlapping IP ranges are not allowed, both within and across + // NfsExportOptions. An error will be returned. The limit is 64 IP + // ranges/addresses for each FileShareConfig among all NfsExportOptions. + IpRanges []string `json:"ipRanges,omitempty"` + + // SquashMode: Either NO_ROOT_SQUASH, for allowing root access on the + // exported directory, or ROOT_SQUASH, for not allowing root access. The + // default is NO_ROOT_SQUASH. + // + // Possible values: + // "SQUASH_MODE_UNSPECIFIED" - SquashMode not set. + // "NO_ROOT_SQUASH" - The Root user has root access to the file share + // (default). + // "ROOT_SQUASH" - The Root user has squashed access to the anonymous + // uid/gid. + SquashMode string `json:"squashMode,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AccessMode") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -2081,68 +1174,54 @@ type NetworkConfig struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "IpAddresses") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "AccessMode") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *NetworkConfig) MarshalJSON() ([]byte, error) { - type NoMethod NetworkConfig +func (s *NfsExportOptions) MarshalJSON() ([]byte, error) { + type NoMethod NfsExportOptions raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Operation: This resource represents a long-running operation that is -// the result of a -// network API call. +// the result of a network API call. type Operation struct { // Done: If the value is `false`, it means the operation is still in - // progress. - // If `true`, the operation is completed, and either `error` or - // `response` is - // available. + // progress. If `true`, the operation is completed, and either `error` + // or `response` is available. Done bool `json:"done,omitempty"` // Error: The error result of the operation in case of failure or // cancellation. Error *Status `json:"error,omitempty"` - // Metadata: Service-specific metadata associated with the operation. - // It typically - // contains progress information and common metadata such as create - // time. - // Some services might not provide such metadata. Any method that - // returns a - // long-running operation should document the metadata type, if any. + // Metadata: Service-specific metadata associated with the operation. It + // typically contains progress information and common metadata such as + // create time. Some services might not provide such metadata. Any + // method that returns a long-running operation should document the + // metadata type, if any. Metadata googleapi.RawMessage `json:"metadata,omitempty"` // Name: The server-assigned name, which is only unique within the same - // service that - // originally returns it. If you use the default HTTP mapping, - // the - // `name` should be a resource name ending with + // service that originally returns it. If you use the default HTTP + // mapping, the `name` should be a resource name ending with // `operations/{unique_id}`. Name string `json:"name,omitempty"` - // Response: The normal response of the operation in case of success. - // If the original - // method returns no data on success, such as `Delete`, the response - // is - // `google.protobuf.Empty`. If the original method is - // standard - // `Get`/`Create`/`Update`, the response should be the resource. For - // other - // methods, the response should have the type `XxxResponse`, where - // `Xxx` - // is the original method name. For example, if the original method - // name - // is `TakeSnapshot()`, the inferred response type - // is - // `TakeSnapshotResponse`. + // Response: The normal response of the operation in case of success. If + // the original method returns no data on success, such as `Delete`, the + // response is `google.protobuf.Empty`. If the original method is + // standard `Get`/`Create`/`Update`, the response should be the + // resource. For other methods, the response should have the type + // `XxxResponse`, where `Xxx` is the original method name. For example, + // if the original method name is `TakeSnapshot()`, the inferred + // response type is `TakeSnapshotResponse`. Response googleapi.RawMessage `json:"response,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2179,12 +1258,9 @@ type OperationMetadata struct { ApiVersion string `json:"apiVersion,omitempty"` // CancelRequested: [Output only] Identifies whether the user has - // requested cancellation - // of the operation. Operations that have successfully been - // cancelled - // have Operation.error value with a google.rpc.Status.code of - // 1, - // corresponding to `Code.CANCELLED`. + // requested cancellation of the operation. Operations that have + // successfully been cancelled have Operation.error value with a + // google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`. CancelRequested bool `json:"cancelRequested,omitempty"` // CreateTime: [Output only] The time the operation was created. @@ -2227,32 +1303,22 @@ func (s *OperationMetadata) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// QuotaProperties: Represents the properties needed for quota -// operations. -type QuotaProperties struct { - // QuotaMode: Quota mode for this operation. - // - // Possible values: - // "ACQUIRE" - Decreases available quota by the cost specified for the - // operation. - // If cost is higher than available quota, operation fails and - // returns - // error. - // "ACQUIRE_BEST_EFFORT" - Decreases available quota by the cost - // specified for the operation. - // If cost is higher than available quota, operation does not fail - // and - // available quota goes down to zero but it returns error. - // "CHECK" - Does not change any available quota. Only checks if there - // is enough - // quota. - // No lock is placed on the checked tokens neither. - // "RELEASE" - Increases available quota by the operation cost - // specified for the - // operation. - QuotaMode string `json:"quotaMode,omitempty"` - - // ForceSendFields is a list of field names (e.g. "QuotaMode") to +// RestoreInstanceRequest: RestoreInstanceRequest restores an existing +// instances's file share from a snapshot or backup. +type RestoreInstanceRequest struct { + // FileShare: Required. Name of the file share in the Cloud Filestore + // instance that the snapshot is being restored to. + FileShare string `json:"fileShare,omitempty"` + + // SourceBackup: The resource name of the backup, in the format + // projects/{project_id}/locations/{location_id}/backups/{backup_id}. + SourceBackup string `json:"sourceBackup,omitempty"` + + // SourceSnapshot: The resource name of the snapshot, in the format + // projects/{project_id}/locations/{location_id}/snapshots/{snapshot_id}. + SourceSnapshot string `json:"sourceSnapshot,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FileShare") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -2260,7 +1326,7 @@ type QuotaProperties struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "QuotaMode") to include in + // NullFields is a list of field names (e.g. "FileShare") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -2269,52 +1335,34 @@ type QuotaProperties struct { NullFields []string `json:"-"` } -func (s *QuotaProperties) MarshalJSON() ([]byte, error) { - type NoMethod QuotaProperties +func (s *RestoreInstanceRequest) MarshalJSON() ([]byte, error) { + type NoMethod RestoreInstanceRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ReportRequest: Request message for the Report method. -type ReportRequest struct { - // Operations: Operations to be reported. - // - // Typically the service should report one operation per - // request. - // Putting multiple operations into a single request is allowed, but - // should - // be used only when multiple operations are natually available at the - // time - // of the report. - // - // There is no limit on the number of operations in the same - // ReportRequest, - // however the ReportRequest size should be no larger than 1MB. - // See - // ReportResponse.report_errors for partial failure behavior. - Operations []*GoogleApiServicecontrolV1Operation `json:"operations,omitempty"` - - // ServiceConfigId: Specifies which version of service config should be - // used to process the - // request. - // - // If unspecified or no matching version can be found, the - // latest one will be used. - ServiceConfigId string `json:"serviceConfigId,omitempty"` - - // ServiceName: The service name as specified in its service - // configuration. For - // example, - // "pubsub.googleapis.com". - // - // See - // [google.api.Service](https:// - // cloud.google.com/service-management/reference/rpc/google.api#google.ap - // i.Service) - // for the definition of a service name. - ServiceName string `json:"serviceName,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Operations") to +// Status: The `Status` type defines a logical error model that is +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each +// `Status` message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the [API Design +// Guide](https://cloud.google.com/apis/design/errors). +type Status struct { + // Code: The status code, which should be an enum value of + // google.rpc.Code. + Code int64 `json:"code,omitempty"` + + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. + Details []googleapi.RawMessage `json:"details,omitempty"` + + // Message: A developer-facing error message, which should be in + // English. Any user-facing error message should be localized and sent + // in the google.rpc.Status.details field, or localized by the client. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -2322,8 +1370,8 @@ type ReportRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Operations") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -2331,291 +1379,13 @@ type ReportRequest struct { NullFields []string `json:"-"` } -func (s *ReportRequest) MarshalJSON() ([]byte, error) { - type NoMethod ReportRequest +func (s *Status) MarshalJSON() ([]byte, error) { + type NoMethod Status raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ResourceInfo: Describes a resource associated with this operation. -type ResourceInfo struct { - // ResourceContainer: The identifier of the parent of this resource - // instance. - // Must be in one of the following formats: - // - “projects/” - // - “folders/” - // - “organizations/” - ResourceContainer string `json:"resourceContainer,omitempty"` - - // ResourceLocation: The location of the resource. If not empty, the - // resource will be checked - // against location policy. The value must be a valid zone, region - // or - // multiregion. For example: "europe-west4" or - // "northamerica-northeast1-a" - ResourceLocation string `json:"resourceLocation,omitempty"` - - // ResourceName: Name of the resource. This is used for auditing - // purposes. - ResourceName string `json:"resourceName,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ResourceContainer") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ResourceContainer") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *ResourceInfo) MarshalJSON() ([]byte, error) { - type NoMethod ResourceInfo - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Status: The `Status` type defines a logical error model that is -// suitable for -// different programming environments, including REST APIs and RPC APIs. -// It is -// used by [gRPC](https://github.com/grpc). Each `Status` message -// contains -// three pieces of data: error code, error message, and error -// details. -// -// You can find out more about this error model and how to work with it -// in the -// [API Design Guide](https://cloud.google.com/apis/design/errors). -type Status struct { - // Code: The status code, which should be an enum value of - // google.rpc.Code. - Code int64 `json:"code,omitempty"` - - // Details: A list of messages that carry the error details. There is a - // common set of - // message types for APIs to use. - Details []googleapi.RawMessage `json:"details,omitempty"` - - // Message: A developer-facing error message, which should be in - // English. Any - // user-facing error message should be localized and sent in - // the - // google.rpc.Status.details field, or localized by the client. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Status) MarshalJSON() ([]byte, error) { - type NoMethod Status - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TraceSpan: A span represents a single operation within a trace. Spans -// can be -// nested to form a trace tree. Often, a trace contains a root span -// that describes the end-to-end latency, and one or more subspans -// for -// its sub-operations. A trace can also contain multiple root spans, -// or none at all. Spans do not need to be contiguous—there may -// be -// gaps or overlaps between spans in a trace. -type TraceSpan struct { - // Attributes: A set of attributes on the span. You can have up to 32 - // attributes per - // span. - Attributes *Attributes `json:"attributes,omitempty"` - - // ChildSpanCount: An optional number of child spans that were generated - // while this span - // was active. If set, allows implementation to detect missing child - // spans. - ChildSpanCount int64 `json:"childSpanCount,omitempty"` - - // DisplayName: A description of the span's operation (up to 128 - // bytes). - // Stackdriver Trace displays the description in the - // Google Cloud Platform Console. - // For example, the display name can be a qualified method name or a - // file name - // and a line number where the operation is called. A best practice is - // to use - // the same display name within an application and at the same call - // point. - // This makes it easier to correlate spans in different traces. - DisplayName *TruncatableString `json:"displayName,omitempty"` - - // EndTime: The end time of the span. On the client side, this is the - // time kept by - // the local machine where the span execution ends. On the server side, - // this - // is the time when the server application handler stops running. - EndTime string `json:"endTime,omitempty"` - - // Name: The resource name of the span in the following format: - // - // projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/SPAN_ID is a unique - // identifier for a trace within a project; - // it is a 32-character hexadecimal encoding of a 16-byte - // array. - // - // [SPAN_ID] is a unique identifier for a span within a trace; it - // is a 16-character hexadecimal encoding of an 8-byte array. - Name string `json:"name,omitempty"` - - // ParentSpanId: The [SPAN_ID] of this span's parent span. If this is a - // root span, - // then this field must be empty. - ParentSpanId string `json:"parentSpanId,omitempty"` - - // SameProcessAsParentSpan: (Optional) Set this parameter to indicate - // whether this span is in - // the same process as its parent. If you do not set this - // parameter, - // Stackdriver Trace is unable to take advantage of this - // helpful - // information. - SameProcessAsParentSpan bool `json:"sameProcessAsParentSpan,omitempty"` - - // SpanId: The [SPAN_ID] portion of the span's resource name. - SpanId string `json:"spanId,omitempty"` - - // SpanKind: Distinguishes between spans generated in a particular - // context. For example, - // two spans with the same name may be distinguished using `CLIENT` - // (caller) - // and `SERVER` (callee) to identify an RPC call. - // - // Possible values: - // "SPAN_KIND_UNSPECIFIED" - Unspecified. Do NOT use as - // default. - // Implementations MAY assume SpanKind.INTERNAL to be default. - // "INTERNAL" - Indicates that the span is used internally. Default - // value. - // "SERVER" - Indicates that the span covers server-side handling of - // an RPC or other - // remote network request. - // "CLIENT" - Indicates that the span covers the client-side wrapper - // around an RPC or - // other remote request. - // "PRODUCER" - Indicates that the span describes producer sending a - // message to a broker. - // Unlike client and server, there is no direct critical path - // latency - // relationship between producer and consumer spans (e.g. publishing - // a - // message to a pubsub service). - // "CONSUMER" - Indicates that the span describes consumer receiving a - // message from a - // broker. Unlike client and server, there is no direct critical - // path - // latency relationship between producer and consumer spans (e.g. - // receiving - // a message from a pubsub service subscription). - SpanKind string `json:"spanKind,omitempty"` - - // StartTime: The start time of the span. On the client side, this is - // the time kept by - // the local machine where the span execution starts. On the server - // side, this - // is the time when the server's application handler starts running. - StartTime string `json:"startTime,omitempty"` - - // Status: An optional final status for this span. - Status *Status `json:"status,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Attributes") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Attributes") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TraceSpan) MarshalJSON() ([]byte, error) { - type NoMethod TraceSpan - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TruncatableString: Represents a string that might be shortened to a -// specified length. -type TruncatableString struct { - // TruncatedByteCount: The number of bytes removed from the original - // string. If this - // value is 0, then the string was not shortened. - TruncatedByteCount int64 `json:"truncatedByteCount,omitempty"` - - // Value: The shortened string. For example, if the original string is - // 500 - // bytes long and the limit of the string is 128 bytes, then - // `value` contains the first 128 bytes of the 500-byte - // string. - // - // Truncation always happens on a UTF8 character boundary. If there - // are multi-byte characters in the string, then the length of - // the - // shortened string might be less than the size limit. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "TruncatedByteCount") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "TruncatedByteCount") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TruncatableString) MarshalJSON() ([]byte, error) { - type NoMethod TruncatableString - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// method id "file.projects.locations.get": +// method id "file.projects.locations.get": type ProjectsLocationsGetCall struct { s *Service @@ -2670,7 +1440,7 @@ func (c *ProjectsLocationsGetCall) Header() http.Header { func (c *ProjectsLocationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2778,87 +1548,917 @@ func (r *ProjectsLocationsService) List(name string) *ProjectsLocationsListCall return c } -// Filter sets the optional parameter "filter": The standard list -// filter. -func (c *ProjectsLocationsListCall) Filter(filter string) *ProjectsLocationsListCall { - c.urlParams_.Set("filter", filter) - return c -} +// Filter sets the optional parameter "filter": The standard list +// filter. +func (c *ProjectsLocationsListCall) Filter(filter string) *ProjectsLocationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeUnrevealedLocations sets the optional parameter +// "includeUnrevealedLocations": If true, the returned list will include +// locations which are not yet revealed. +func (c *ProjectsLocationsListCall) IncludeUnrevealedLocations(includeUnrevealedLocations bool) *ProjectsLocationsListCall { + c.urlParams_.Set("includeUnrevealedLocations", fmt.Sprint(includeUnrevealedLocations)) + return c +} + +// PageSize sets the optional parameter "pageSize": The standard list +// page size. +func (c *ProjectsLocationsListCall) PageSize(pageSize int64) *ProjectsLocationsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The standard list +// page token. +func (c *ProjectsLocationsListCall) PageToken(pageToken string) *ProjectsLocationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsListCall) Context(ctx context.Context) *ProjectsLocationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}/locations") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "file.projects.locations.list" call. +// Exactly one of *ListLocationsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListLocationsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsLocationsListCall) Do(opts ...googleapi.CallOption) (*ListLocationsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListLocationsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists information about the supported locations for this service.", + // "flatPath": "v1beta1/projects/{projectsId}/locations", + // "httpMethod": "GET", + // "id": "file.projects.locations.list", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "filter": { + // "description": "The standard list filter.", + // "location": "query", + // "type": "string" + // }, + // "includeUnrevealedLocations": { + // "description": "If true, the returned list will include locations which are not yet revealed.", + // "location": "query", + // "type": "boolean" + // }, + // "name": { + // "description": "The resource that owns the locations collection, if applicable.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "pageSize": { + // "description": "The standard list page size.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The standard list page token.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1beta1/{+name}/locations", + // "response": { + // "$ref": "ListLocationsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsListCall) Pages(ctx context.Context, f func(*ListLocationsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "file.projects.locations.backups.create": + +type ProjectsLocationsBackupsCreateCall struct { + s *Service + parent string + backup *Backup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a backup. +func (r *ProjectsLocationsBackupsService) Create(parent string, backup *Backup) *ProjectsLocationsBackupsCreateCall { + c := &ProjectsLocationsBackupsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.backup = backup + return c +} + +// BackupId sets the optional parameter "backupId": Required. The ID to +// use for the backup. The ID must be unique within the specified +// project and location. This value must start with a lowercase letter +// followed by up to 62 lowercase letters, numbers, or hyphens, and +// cannot end with a hyphen. +func (c *ProjectsLocationsBackupsCreateCall) BackupId(backupId string) *ProjectsLocationsBackupsCreateCall { + c.urlParams_.Set("backupId", backupId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsBackupsCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsBackupsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsBackupsCreateCall) Context(ctx context.Context) *ProjectsLocationsBackupsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsBackupsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBackupsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backup) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+parent}/backups") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "file.projects.locations.backups.create" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBackupsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a backup.", + // "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/backups", + // "httpMethod": "POST", + // "id": "file.projects.locations.backups.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "backupId": { + // "description": "Required. The ID to use for the backup. The ID must be unique within the specified project and location. This value must start with a lowercase letter followed by up to 62 lowercase letters, numbers, or hyphens, and cannot end with a hyphen.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The backup's project and location, in the format projects/{project_id}/locations/{location}. In Cloud Filestore, backup locations map to GCP regions, for example **us-west1**.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1beta1/{+parent}/backups", + // "request": { + // "$ref": "Backup" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "file.projects.locations.backups.delete": + +type ProjectsLocationsBackupsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a backup. +func (r *ProjectsLocationsBackupsService) Delete(name string) *ProjectsLocationsBackupsDeleteCall { + c := &ProjectsLocationsBackupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsBackupsDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsBackupsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsBackupsDeleteCall) Context(ctx context.Context) *ProjectsLocationsBackupsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsBackupsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBackupsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "file.projects.locations.backups.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBackupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a backup.", + // "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/backups/{backupsId}", + // "httpMethod": "DELETE", + // "id": "file.projects.locations.backups.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The backup resource name, in the format projects/{project_id}/locations/{location}/backups/{backup_id}", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/backups/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1beta1/{+name}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "file.projects.locations.backups.get": + +type ProjectsLocationsBackupsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the details of a specific backup. +func (r *ProjectsLocationsBackupsService) Get(name string) *ProjectsLocationsBackupsGetCall { + c := &ProjectsLocationsBackupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsBackupsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsBackupsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsBackupsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsBackupsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsBackupsGetCall) Context(ctx context.Context) *ProjectsLocationsBackupsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsBackupsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBackupsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "file.projects.locations.backups.get" call. +// Exactly one of *Backup or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Backup.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsLocationsBackupsGetCall) Do(opts ...googleapi.CallOption) (*Backup, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Backup{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the details of a specific backup.", + // "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/backups/{backupsId}", + // "httpMethod": "GET", + // "id": "file.projects.locations.backups.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The backup resource name, in the format projects/{project_id}/locations/{location}/backups/{backup_id}.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/backups/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1beta1/{+name}", + // "response": { + // "$ref": "Backup" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "file.projects.locations.backups.list": + +type ProjectsLocationsBackupsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists all backups in a project for either a specified location +// or for all locations. +func (r *ProjectsLocationsBackupsService) List(parent string) *ProjectsLocationsBackupsListCall { + c := &ProjectsLocationsBackupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Filter sets the optional parameter "filter": List filter. +func (c *ProjectsLocationsBackupsListCall) Filter(filter string) *ProjectsLocationsBackupsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sort results. +// Supported values are "name", "name desc" or "" (unsorted). +func (c *ProjectsLocationsBackupsListCall) OrderBy(orderBy string) *ProjectsLocationsBackupsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of items to return. +func (c *ProjectsLocationsBackupsListCall) PageSize(pageSize int64) *ProjectsLocationsBackupsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The +// next_page_token value to use if there are additional results to +// retrieve for this list request. +func (c *ProjectsLocationsBackupsListCall) PageToken(pageToken string) *ProjectsLocationsBackupsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsBackupsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsBackupsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsBackupsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsBackupsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsBackupsListCall) Context(ctx context.Context) *ProjectsLocationsBackupsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsBackupsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBackupsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+parent}/backups") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "file.projects.locations.backups.list" call. +// Exactly one of *ListBackupsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListBackupsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsLocationsBackupsListCall) Do(opts ...googleapi.CallOption) (*ListBackupsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListBackupsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all backups in a project for either a specified location or for all locations.", + // "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/backups", + // "httpMethod": "GET", + // "id": "file.projects.locations.backups.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "filter": { + // "description": "List filter.", + // "location": "query", + // "type": "string" + // }, + // "orderBy": { + // "description": "Sort results. Supported values are \"name\", \"name desc\" or \"\" (unsorted).", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "description": "The maximum number of items to return.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The next_page_token value to use if there are additional results to retrieve for this list request.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The project and location for which to retrieve backup information, in the format projects/{project_id}/locations/{location}. In Cloud Filestore, backup locations map to GCP regions, for example **us-west1**. To retrieve backup information for all locations, use \"-\" for the {location} value.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1beta1/{+parent}/backups", + // "response": { + // "$ref": "ListBackupsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsBackupsListCall) Pages(ctx context.Context, f func(*ListBackupsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "file.projects.locations.backups.patch": -// IncludeUnrevealedLocations sets the optional parameter -// "includeUnrevealedLocations": If true, the returned list will include -// locations which are not yet -// revealed. -func (c *ProjectsLocationsListCall) IncludeUnrevealedLocations(includeUnrevealedLocations bool) *ProjectsLocationsListCall { - c.urlParams_.Set("includeUnrevealedLocations", fmt.Sprint(includeUnrevealedLocations)) - return c +type ProjectsLocationsBackupsPatchCall struct { + s *Service + name string + backup *Backup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// PageSize sets the optional parameter "pageSize": The standard list -// page size. -func (c *ProjectsLocationsListCall) PageSize(pageSize int64) *ProjectsLocationsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) +// Patch: Updates the settings of a specific backup. +func (r *ProjectsLocationsBackupsService) Patch(name string, backup *Backup) *ProjectsLocationsBackupsPatchCall { + c := &ProjectsLocationsBackupsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.backup = backup return c } -// PageToken sets the optional parameter "pageToken": The standard list -// page token. -func (c *ProjectsLocationsListCall) PageToken(pageToken string) *ProjectsLocationsListCall { - c.urlParams_.Set("pageToken", pageToken) +// UpdateMask sets the optional parameter "updateMask": Required. Mask +// of fields to update. At least one path must be supplied in this +// field. +func (c *ProjectsLocationsBackupsPatchCall) UpdateMask(updateMask string) *ProjectsLocationsBackupsPatchCall { + c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsLocationsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsListCall { +func (c *ProjectsLocationsBackupsPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsBackupsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsLocationsListCall) Context(ctx context.Context) *ProjectsLocationsListCall { +func (c *ProjectsLocationsBackupsPatchCall) Context(ctx context.Context) *ProjectsLocationsBackupsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsLocationsListCall) Header() http.Header { +func (c *ProjectsLocationsBackupsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsBackupsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backup) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}/locations") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } @@ -2869,14 +2469,14 @@ func (c *ProjectsLocationsListCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "file.projects.locations.list" call. -// Exactly one of *ListLocationsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListLocationsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsListCall) Do(opts ...googleapi.CallOption) (*ListLocationsResponse, error) { +// Do executes the "file.projects.locations.backups.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBackupsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -2895,7 +2495,7 @@ func (c *ProjectsLocationsListCall) Do(opts ...googleapi.CallOption) (*ListLocat if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ListLocationsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -2907,46 +2507,34 @@ func (c *ProjectsLocationsListCall) Do(opts ...googleapi.CallOption) (*ListLocat } return ret, nil // { - // "description": "Lists information about the supported locations for this service.", - // "flatPath": "v1beta1/projects/{projectsId}/locations", - // "httpMethod": "GET", - // "id": "file.projects.locations.list", + // "description": "Updates the settings of a specific backup.", + // "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/backups/{backupsId}", + // "httpMethod": "PATCH", + // "id": "file.projects.locations.backups.patch", // "parameterOrder": [ // "name" // ], // "parameters": { - // "filter": { - // "description": "The standard list filter.", - // "location": "query", - // "type": "string" - // }, - // "includeUnrevealedLocations": { - // "description": "If true, the returned list will include locations which are not yet\nrevealed.", - // "location": "query", - // "type": "boolean" - // }, // "name": { - // "description": "The resource that owns the locations collection, if applicable.", + // "description": "Output only. The resource name of the backup, in the format projects/{project_id}/locations/{location_id}/backups/{backup_id}.", // "location": "path", - // "pattern": "^projects/[^/]+$", + // "pattern": "^projects/[^/]+/locations/[^/]+/backups/[^/]+$", // "required": true, // "type": "string" // }, - // "pageSize": { - // "description": "The standard list page size.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "The standard list page token.", + // "updateMask": { + // "description": "Required. Mask of fields to update. At least one path must be supplied in this field.", + // "format": "google-fieldmask", // "location": "query", // "type": "string" // } // }, - // "path": "v1beta1/{+name}/locations", + // "path": "v1beta1/{+name}", + // "request": { + // "$ref": "Backup" + // }, // "response": { - // "$ref": "ListLocationsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" @@ -2955,27 +2543,6 @@ func (c *ProjectsLocationsListCall) Do(opts ...googleapi.CallOption) (*ListLocat } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsLocationsListCall) Pages(ctx context.Context, f func(*ListLocationsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - // method id "file.projects.locations.instances.create": type ProjectsLocationsInstancesCreateCall struct { @@ -2996,13 +2563,10 @@ func (r *ProjectsLocationsInstancesService) Create(parent string, instance *Inst } // InstanceId sets the optional parameter "instanceId": Required. The ID -// of the instance to create. -// The ID must be unique within the specified project and -// location. -// -// This value must start with a lowercase letter followed by up to -// 62 -// lowercase letters, numbers, or hyphens, and cannot end with a hyphen. +// of the instance to create. The ID must be unique within the specified +// project and location. This value must start with a lowercase letter +// followed by up to 62 lowercase letters, numbers, or hyphens, and +// cannot end with a hyphen. func (c *ProjectsLocationsInstancesCreateCall) InstanceId(instanceId string) *ProjectsLocationsInstancesCreateCall { c.urlParams_.Set("instanceId", instanceId) return c @@ -3035,7 +2599,7 @@ func (c *ProjectsLocationsInstancesCreateCall) Header() http.Header { func (c *ProjectsLocationsInstancesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3108,12 +2672,12 @@ func (c *ProjectsLocationsInstancesCreateCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "instanceId": { - // "description": "Required. The ID of the instance to create.\nThe ID must be unique within the specified project and location.\n\nThis value must start with a lowercase letter followed by up to 62\nlowercase letters, numbers, or hyphens, and cannot end with a hyphen.", + // "description": "Required. The ID of the instance to create. The ID must be unique within the specified project and location. This value must start with a lowercase letter followed by up to 62 lowercase letters, numbers, or hyphens, and cannot end with a hyphen.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The instance's project and location, in the format\nprojects/{project_id}/locations/{location}. In Cloud Filestore,\nlocations map to GCP zones, for example **us-west1-b**.", + // "description": "Required. The instance's project and location, in the format projects/{project_id}/locations/{location}. In Cloud Filestore, locations map to GCP zones, for example **us-west1-b**.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -3178,7 +2742,7 @@ func (c *ProjectsLocationsInstancesDeleteCall) Header() http.Header { func (c *ProjectsLocationsInstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3246,7 +2810,7 @@ func (c *ProjectsLocationsInstancesDeleteCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "name": { - // "description": "Required. The instance resource name, in the format\nprojects/{project_id}/locations/{location}/instances/{instance_id}", + // "description": "Required. The instance resource name, in the format projects/{project_id}/locations/{location}/instances/{instance_id}", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/instances/[^/]+$", // "required": true, @@ -3319,7 +2883,7 @@ func (c *ProjectsLocationsInstancesGetCall) Header() http.Header { func (c *ProjectsLocationsInstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3390,7 +2954,7 @@ func (c *ProjectsLocationsInstancesGetCall) Do(opts ...googleapi.CallOption) (*I // ], // "parameters": { // "name": { - // "description": "Required. The instance resource name, in the format\nprojects/{project_id}/locations/{location}/instances/{instance_id}.", + // "description": "Required. The instance resource name, in the format projects/{project_id}/locations/{location}/instances/{instance_id}.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/instances/[^/]+$", // "required": true, @@ -3420,8 +2984,7 @@ type ProjectsLocationsInstancesListCall struct { } // List: Lists all instances in a project for either a specified -// location -// or for all locations. +// location or for all locations. func (r *ProjectsLocationsInstancesService) List(parent string) *ProjectsLocationsInstancesListCall { c := &ProjectsLocationsInstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -3449,8 +3012,8 @@ func (c *ProjectsLocationsInstancesListCall) PageSize(pageSize int64) *ProjectsL } // PageToken sets the optional parameter "pageToken": The -// next_page_token value to use if there are additional -// results to retrieve for this list request. +// next_page_token value to use if there are additional results to +// retrieve for this list request. func (c *ProjectsLocationsInstancesListCall) PageToken(pageToken string) *ProjectsLocationsInstancesListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -3493,7 +3056,7 @@ func (c *ProjectsLocationsInstancesListCall) Header() http.Header { func (c *ProjectsLocationsInstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3555,7 +3118,7 @@ func (c *ProjectsLocationsInstancesListCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Lists all instances in a project for either a specified location\nor for all locations.", + // "description": "Lists all instances in a project for either a specified location or for all locations.", // "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/instances", // "httpMethod": "GET", // "id": "file.projects.locations.instances.list", @@ -3580,12 +3143,12 @@ func (c *ProjectsLocationsInstancesListCall) Do(opts ...googleapi.CallOption) (* // "type": "integer" // }, // "pageToken": { - // "description": "The next_page_token value to use if there are additional\nresults to retrieve for this list request.", + // "description": "The next_page_token value to use if there are additional results to retrieve for this list request.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The project and location for which to retrieve instance information,\nin the format projects/{project_id}/locations/{location}. In Cloud\nFilestore, locations map to GCP zones, for example **us-west1-b**. To\nretrieve instance information for all locations, use \"-\" for the {location}\nvalue.", + // "description": "Required. The project and location for which to retrieve instance information, in the format projects/{project_id}/locations/{location}. In Cloud Filestore, locations map to GCP zones, for example **us-west1-b**. To retrieve instance information for all locations, use \"-\" for the {location} value.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -3644,15 +3207,9 @@ func (r *ProjectsLocationsInstancesService) Patch(name string, instance *Instanc } // UpdateMask sets the optional parameter "updateMask": Required. Mask -// of fields to update. At least one path must be supplied in -// this -// field. The elements of the repeated paths field may only include -// these -// fields: -// -// * "description" -// * "file_shares" -// * "labels" +// of fields to update. At least one path must be supplied in this +// field. The elements of the repeated paths field may only include +// these fields: * "description" * "file_shares" * "labels" func (c *ProjectsLocationsInstancesPatchCall) UpdateMask(updateMask string) *ProjectsLocationsInstancesPatchCall { c.urlParams_.Set("updateMask", updateMask) return c @@ -3685,7 +3242,7 @@ func (c *ProjectsLocationsInstancesPatchCall) Header() http.Header { func (c *ProjectsLocationsInstancesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3758,14 +3315,14 @@ func (c *ProjectsLocationsInstancesPatchCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "name": { - // "description": "Output only. The resource name of the instance, in the format\nprojects/{project_id}/locations/{location_id}/instances/{instance_id}.", + // "description": "Output only. The resource name of the instance, in the format projects/{project_id}/locations/{location_id}/instances/{instance_id}.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/instances/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { - // "description": "Required. Mask of fields to update. At least one path must be supplied in this\nfield. The elements of the repeated paths field may only include these\nfields:\n\n* \"description\"\n* \"file_shares\"\n* \"labels\"", + // "description": "Required. Mask of fields to update. At least one path must be supplied in this field. The elements of the repeated paths field may only include these fields: * \"description\" * \"file_shares\" * \"labels\"", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -3785,6 +3342,147 @@ func (c *ProjectsLocationsInstancesPatchCall) Do(opts ...googleapi.CallOption) ( } +// method id "file.projects.locations.instances.restore": + +type ProjectsLocationsInstancesRestoreCall struct { + s *Service + name string + restoreinstancerequest *RestoreInstanceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Restore: Restores an existing instance's file share from a snapshot +// or backup. +func (r *ProjectsLocationsInstancesService) Restore(name string, restoreinstancerequest *RestoreInstanceRequest) *ProjectsLocationsInstancesRestoreCall { + c := &ProjectsLocationsInstancesRestoreCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.restoreinstancerequest = restoreinstancerequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsInstancesRestoreCall) Fields(s ...googleapi.Field) *ProjectsLocationsInstancesRestoreCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsInstancesRestoreCall) Context(ctx context.Context) *ProjectsLocationsInstancesRestoreCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsInstancesRestoreCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsInstancesRestoreCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.restoreinstancerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}:restore") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "file.projects.locations.instances.restore" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsInstancesRestoreCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Restores an existing instance's file share from a snapshot or backup.", + // "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:restore", + // "httpMethod": "POST", + // "id": "file.projects.locations.instances.restore", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The resource name of the instance, in the format projects/{project_id}/locations/{location_id}/instances/{instance_id}.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/instances/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1beta1/{+name}:restore", + // "request": { + // "$ref": "RestoreInstanceRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + // method id "file.projects.locations.operations.cancel": type ProjectsLocationsOperationsCancelCall struct { @@ -3797,23 +3495,15 @@ type ProjectsLocationsOperationsCancelCall struct { } // Cancel: Starts asynchronous cancellation on a long-running operation. -// The server -// makes a best effort to cancel the operation, but success is -// not -// guaranteed. If the server doesn't support this method, it -// returns -// `google.rpc.Code.UNIMPLEMENTED`. Clients can -// use -// Operations.GetOperation or -// other methods to check whether the cancellation succeeded or whether -// the -// operation completed despite cancellation. On successful -// cancellation, -// the operation is not deleted; instead, it becomes an operation -// with -// an Operation.error value with a google.rpc.Status.code of -// 1, -// corresponding to `Code.CANCELLED`. +// The server makes a best effort to cancel the operation, but success +// is not guaranteed. If the server doesn't support this method, it +// returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use +// Operations.GetOperation or other methods to check whether the +// cancellation succeeded or whether the operation completed despite +// cancellation. On successful cancellation, the operation is not +// deleted; instead, it becomes an operation with an Operation.error +// value with a google.rpc.Status.code of 1, corresponding to +// `Code.CANCELLED`. func (r *ProjectsLocationsOperationsService) Cancel(name string, canceloperationrequest *CancelOperationRequest) *ProjectsLocationsOperationsCancelCall { c := &ProjectsLocationsOperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -3848,7 +3538,7 @@ func (c *ProjectsLocationsOperationsCancelCall) Header() http.Header { func (c *ProjectsLocationsOperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3912,7 +3602,7 @@ func (c *ProjectsLocationsOperationsCancelCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + // "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", // "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}:cancel", // "httpMethod": "POST", // "id": "file.projects.locations.operations.cancel", @@ -3953,12 +3643,9 @@ type ProjectsLocationsOperationsDeleteCall struct { } // Delete: Deletes a long-running operation. This method indicates that -// the client is -// no longer interested in the operation result. It does not cancel -// the -// operation. If the server doesn't support this method, it -// returns -// `google.rpc.Code.UNIMPLEMENTED`. +// the client is no longer interested in the operation result. It does +// not cancel the operation. If the server doesn't support this method, +// it returns `google.rpc.Code.UNIMPLEMENTED`. func (r *ProjectsLocationsOperationsService) Delete(name string) *ProjectsLocationsOperationsDeleteCall { c := &ProjectsLocationsOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -3992,7 +3679,7 @@ func (c *ProjectsLocationsOperationsDeleteCall) Header() http.Header { func (c *ProjectsLocationsOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4051,7 +3738,7 @@ func (c *ProjectsLocationsOperationsDeleteCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.", + // "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", // "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", // "httpMethod": "DELETE", // "id": "file.projects.locations.operations.delete", @@ -4089,11 +3776,9 @@ type ProjectsLocationsOperationsGetCall struct { header_ http.Header } -// Get: Gets the latest state of a long-running operation. Clients can -// use this -// method to poll the operation result at intervals as recommended by -// the API -// service. +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. func (r *ProjectsLocationsOperationsService) Get(name string) *ProjectsLocationsOperationsGetCall { c := &ProjectsLocationsOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4137,7 +3822,7 @@ func (c *ProjectsLocationsOperationsGetCall) Header() http.Header { func (c *ProjectsLocationsOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4199,7 +3884,7 @@ func (c *ProjectsLocationsOperationsGetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", // "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", // "httpMethod": "GET", // "id": "file.projects.locations.operations.get", @@ -4238,22 +3923,15 @@ type ProjectsLocationsOperationsListCall struct { } // List: Lists operations that match the specified filter in the -// request. If the -// server doesn't support this method, it returns -// `UNIMPLEMENTED`. -// -// NOTE: the `name` binding allows API services to override the -// binding -// to use different resource name schemes, such as `users/*/operations`. -// To -// override the binding, API services can add a binding such -// as -// "/v1/{name=users/*}/operations" to their service configuration. -// For backwards compatibility, the default name includes the -// operations -// collection id, however overriding users must ensure the name -// binding -// is the parent resource, without the operations collection id. +// request. If the server doesn't support this method, it returns +// `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to +// override the binding to use different resource name schemes, such as +// `users/*/operations`. To override the binding, API services can add a +// binding such as "/v1/{name=users/*}/operations" to their service +// configuration. For backwards compatibility, the default name includes +// the operations collection id, however overriding users must ensure +// the name binding is the parent resource, without the operations +// collection id. func (r *ProjectsLocationsOperationsService) List(name string) *ProjectsLocationsOperationsListCall { c := &ProjectsLocationsOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4318,7 +3996,7 @@ func (c *ProjectsLocationsOperationsListCall) Header() http.Header { func (c *ProjectsLocationsOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4380,7 +4058,7 @@ func (c *ProjectsLocationsOperationsListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", + // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", // "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/operations", // "httpMethod": "GET", // "id": "file.projects.locations.operations.list", diff --git a/vendor/google.golang.org/api/healthcare/v1/healthcare-api.json b/vendor/google.golang.org/api/healthcare/v1/healthcare-api.json index 26f898e3303..b12af86fcab 100644 --- a/vendor/google.golang.org/api/healthcare/v1/healthcare-api.json +++ b/vendor/google.golang.org/api/healthcare/v1/healthcare-api.json @@ -108,11 +108,79 @@ "projects": { "resources": { "locations": { + "methods": { + "get": { + "description": "Gets information about a location.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}", + "httpMethod": "GET", + "id": "healthcare.projects.locations.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Resource name for the location.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Location" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists information about the supported locations for this service.", + "flatPath": "v1/projects/{projectsId}/locations", + "httpMethod": "GET", + "id": "healthcare.projects.locations.list", + "parameterOrder": [ + "name" + ], + "parameters": { + "filter": { + "description": "The standard list filter.", + "location": "query", + "type": "string" + }, + "name": { + "description": "The resource that owns the locations collection, if applicable.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "The standard list page size.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The standard list page token.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}/locations", + "response": { + "$ref": "ListLocationsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + }, "resources": { "datasets": { "methods": { "create": { - "description": "Creates a new health dataset. Results are returned through the\nOperation interface which returns either an\n`Operation.response` which contains a Dataset or\n`Operation.error`. The metadata\nfield type is OperationMetadata.\nA Google Cloud Platform project can contain up to 500 datasets across all\nregions.", + "description": "Creates a new health dataset. Results are returned through the Operation interface which returns either an `Operation.response` which contains a Dataset or `Operation.error`. The metadata field type is OperationMetadata.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.create", @@ -121,12 +189,12 @@ ], "parameters": { "datasetId": { - "description": "The ID of the dataset that is being created.\nThe string must match the following regex: `[\\p{L}\\p{N}_\\-\\.]{1,256}`.", + "description": "The ID of the dataset that is being created. The string must match the following regex: `[\\p{L}\\p{N}_\\-\\.]{1,256}`.", "location": "query", "type": "string" }, "parent": { - "description": "The name of the project where the server creates the dataset. For\nexample, `projects/{project_id}/locations/{location_id}`.", + "description": "The name of the project where the server creates the dataset. For example, `projects/{project_id}/locations/{location_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -145,7 +213,7 @@ ] }, "deidentify": { - "description": "Creates a new dataset containing de-identified data from the source\ndataset. The metadata field type\nis OperationMetadata.\nIf the request is successful, the\nresponse field type is\nDeidentifySummary.\nIf errors occur, error is set.\nThe LRO result may still be successful if de-identification fails for some\nDICOM instances. The new de-identified dataset will not contain these\nfailed resources. Failed resource totals are tracked in\nOperation.metadata.\nError details are also logged to Cloud Logging. For more information,\nsee [Viewing logs](/healthcare/docs/how-tos/logging).", + "description": "Creates a new dataset containing de-identified data from the source dataset. The metadata field type is OperationMetadata. If the request is successful, the response field type is DeidentifySummary. If errors occur, error is set. The LRO result may still be successful if de-identification fails for some DICOM instances. The new de-identified dataset will not contain these failed resources. Failed resource totals are tracked in Operation.metadata. Error details are also logged to Cloud Logging. For more information, see [Viewing logs](/healthcare/docs/how-tos/logging).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}:deidentify", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.deidentify", @@ -154,7 +222,7 @@ ], "parameters": { "sourceDataset": { - "description": "Source dataset resource name. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`.", + "description": "Source dataset resource name. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+$", "required": true, @@ -173,7 +241,7 @@ ] }, "delete": { - "description": "Deletes the specified health dataset and all data contained in the dataset.\nDeleting a dataset does not affect the sources from which the dataset was\nimported (if any).", + "description": "Deletes the specified health dataset and all data contained in the dataset. Deleting a dataset does not affect the sources from which the dataset was imported (if any).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}", "httpMethod": "DELETE", "id": "healthcare.projects.locations.datasets.delete", @@ -182,7 +250,7 @@ ], "parameters": { "name": { - "description": "The name of the dataset to delete. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`.", + "description": "The name of the dataset to delete. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+$", "required": true, @@ -207,7 +275,7 @@ ], "parameters": { "name": { - "description": "The name of the dataset to read. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`.", + "description": "The name of the dataset to read. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+$", "required": true, @@ -223,7 +291,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}:getIamPolicy", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.getIamPolicy", @@ -232,13 +300,13 @@ ], "parameters": { "options.requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "location": "query", "type": "integer" }, "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+$", "required": true, @@ -263,7 +331,7 @@ ], "parameters": { "pageSize": { - "description": "The maximum number of items to return. Capped to 100 if not specified.\nMay not be larger than 1000.", + "description": "The maximum number of items to return. Capped to 100 if not specified. May not be larger than 1000.", "format": "int32", "location": "query", "type": "integer" @@ -274,7 +342,7 @@ "type": "string" }, "parent": { - "description": "The name of the project whose datasets should be listed.\nFor example, `projects/{project_id}/locations/{location_id}`.", + "description": "The name of the project whose datasets should be listed. For example, `projects/{project_id}/locations/{location_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -299,14 +367,14 @@ ], "parameters": { "name": { - "description": "Output only. Resource name of the dataset, of the form\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`.", + "description": "Resource name of the dataset, of the form `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+$", "required": true, "type": "string" }, "updateMask": { - "description": "The update mask applies to the resource. For the `FieldMask` definition,\nsee\nhttps://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask", + "description": "The update mask applies to the resource. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask", "format": "google-fieldmask", "location": "query", "type": "string" @@ -324,7 +392,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}:setIamPolicy", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.setIamPolicy", @@ -333,7 +401,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+$", "required": true, @@ -352,7 +420,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}:testIamPermissions", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.testIamPermissions", @@ -361,7 +429,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+$", "required": true, @@ -393,7 +461,7 @@ ], "parameters": { "dicomStoreId": { - "description": "The ID of the DICOM store that is being created.\nAny string value up to 256 characters in length.", + "description": "The ID of the DICOM store that is being created. Any string value up to 256 characters in length.", "location": "query", "type": "string" }, @@ -417,7 +485,7 @@ ] }, "deidentify": { - "description": "De-identifies data from the source store and writes it to the destination\nstore. The metadata field type\nis OperationMetadata.\nIf the request is successful, the\nresponse field type is\nDeidentifyDicomStoreSummary. If errors occur,\nerror is set.\nThe LRO result may still be successful if de-identification fails for some\nDICOM instances. The output DICOM store will not contain\nthese failed resources. Failed resource totals are tracked in\nOperation.metadata.\nError details are also logged to Cloud Logging\n(see [Viewing logs](/healthcare/docs/how-tos/logging)).", + "description": "De-identifies data from the source store and writes it to the destination store. The metadata field type is OperationMetadata. If the request is successful, the response field type is DeidentifyDicomStoreSummary. If errors occur, error is set. The LRO result may still be successful if de-identification fails for some DICOM instances. The output DICOM store will not contain these failed resources. Failed resource totals are tracked in Operation.metadata. Error details are also logged to Cloud Logging (see [Viewing logs](/healthcare/docs/how-tos/logging)).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}:deidentify", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.dicomStores.deidentify", @@ -426,7 +494,7 @@ ], "parameters": { "sourceStore": { - "description": "Source DICOM store resource name. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + "description": "Source DICOM store resource name. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, @@ -445,7 +513,7 @@ ] }, "delete": { - "description": "Deletes the specified DICOM store and removes all images that are contained\nwithin it.", + "description": "Deletes the specified DICOM store and removes all images that are contained within it.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}", "httpMethod": "DELETE", "id": "healthcare.projects.locations.datasets.dicomStores.delete", @@ -470,7 +538,7 @@ ] }, "export": { - "description": "Exports data to the specified destination by copying it from the DICOM\nstore.\nErrors are also logged to Cloud Logging. For more information,\nsee [Viewing logs](/healthcare/docs/how-tos/logging).\nThe metadata field type is\nOperationMetadata.", + "description": "Exports data to the specified destination by copying it from the DICOM store. Errors are also logged to Cloud Logging. For more information, see [Viewing logs](/healthcare/docs/how-tos/logging). The metadata field type is OperationMetadata.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}:export", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.dicomStores.export", @@ -479,7 +547,7 @@ ], "parameters": { "name": { - "description": "The DICOM store resource name from which to export the data. For\nexample,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + "description": "The DICOM store resource name from which to export the data. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, @@ -523,7 +591,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}:getIamPolicy", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.dicomStores.getIamPolicy", @@ -532,13 +600,13 @@ ], "parameters": { "options.requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "location": "query", "type": "integer" }, "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, @@ -554,7 +622,7 @@ ] }, "import": { - "description": "Imports data into the DICOM store by copying it from the specified source.\nErrors are logged to Cloud Logging. For more information, see\n[Viewing logs](/healthcare/docs/how-tos/logging). The\nmetadata field type is\nOperationMetadata.", + "description": "Imports data into the DICOM store by copying it from the specified source. Errors are logged to Cloud Logging. For more information, see [Viewing logs](/healthcare/docs/how-tos/logging). The metadata field type is OperationMetadata.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}:import", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.dicomStores.import", @@ -563,7 +631,7 @@ ], "parameters": { "name": { - "description": "The name of the DICOM store resource into which the data is imported.\nFor example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + "description": "The name of the DICOM store resource into which the data is imported. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, @@ -591,12 +659,12 @@ ], "parameters": { "filter": { - "description": "Restricts stores returned to those matching a filter. Syntax:\nhttps://cloud.google.com/appengine/docs/standard/python/search/query_strings\nOnly filtering on labels is supported. For example, `labels.key=value`.", + "description": "Restricts stores returned to those matching a filter. Syntax: https://cloud.google.com/appengine/docs/standard/python/search/query_strings Only filtering on labels is supported. For example, `labels.key=value`.", "location": "query", "type": "string" }, "pageSize": { - "description": "Limit on the number of DICOM stores to return in a single response.\nIf zero the default page size of 100 is used.", + "description": "Limit on the number of DICOM stores to return in a single response. If zero the default page size of 100 is used.", "format": "int32", "location": "query", "type": "integer" @@ -632,14 +700,14 @@ ], "parameters": { "name": { - "description": "Output only. Resource name of the DICOM store, of the form\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + "description": "Resource name of the DICOM store, of the form `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, "type": "string" }, "updateMask": { - "description": "The update mask applies to the resource. For the `FieldMask` definition,\nsee\nhttps://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask", + "description": "The update mask applies to the resource. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask", "format": "google-fieldmask", "location": "query", "type": "string" @@ -657,7 +725,7 @@ ] }, "searchForInstances": { - "description": "SearchForInstances returns a list of matching instances. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.6.", + "description": "SearchForInstances returns a list of matching instances. See [Search Transaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.6). For details on the implementation of SearchForInstances, see [Search transaction](https://cloud.google.com/healthcare/docs/dicom#search_transaction) in the Cloud Healthcare API conformance statement. For samples that show how to call SearchForInstances, see [Searching for studies, series, instances, and frames](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#searching_for_studies_series_instances_and_frames).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/instances", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.dicomStores.searchForInstances", @@ -667,14 +735,14 @@ ], "parameters": { "dicomWebPath": { - "description": "The path of the SearchForInstancesRequest DICOMweb request. For example,\n`instances`, `series/{series_uid}/instances`, or\n`studies/{study_uid}/instances`.", + "description": "The path of the SearchForInstancesRequest DICOMweb request. For example, `instances`, `series/{series_uid}/instances`, or `studies/{study_uid}/instances`.", "location": "path", "pattern": "^instances$", "required": true, "type": "string" }, "parent": { - "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, @@ -690,7 +758,7 @@ ] }, "searchForSeries": { - "description": "SearchForSeries returns a list of matching series. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.6.", + "description": "SearchForSeries returns a list of matching series. See [Search Transaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.6). For details on the implementation of SearchForSeries, see [Search transaction](https://cloud.google.com/healthcare/docs/dicom#search_transaction) in the Cloud Healthcare API conformance statement. For samples that show how to call SearchForSeries, see [Searching for studies, series, instances, and frames](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#searching_for_studies_series_instances_and_frames).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/series", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.dicomStores.searchForSeries", @@ -700,14 +768,14 @@ ], "parameters": { "dicomWebPath": { - "description": "The path of the SearchForSeries DICOMweb request. For example, `series` or\n`studies/{study_uid}/series`.", + "description": "The path of the SearchForSeries DICOMweb request. For example, `series` or `studies/{study_uid}/series`.", "location": "path", "pattern": "^series$", "required": true, "type": "string" }, "parent": { - "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, @@ -723,7 +791,7 @@ ] }, "searchForStudies": { - "description": "SearchForStudies returns a list of matching studies. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.6.", + "description": "SearchForStudies returns a list of matching studies. See [Search Transaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.6). For details on the implementation of SearchForStudies, see [Search transaction](https://cloud.google.com/healthcare/docs/dicom#search_transaction) in the Cloud Healthcare API conformance statement. For samples that show how to call SearchForStudies, see [Searching for studies, series, instances, and frames](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#searching_for_studies_series_instances_and_frames).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.dicomStores.searchForStudies", @@ -740,7 +808,7 @@ "type": "string" }, "parent": { - "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, @@ -756,7 +824,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}:setIamPolicy", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.dicomStores.setIamPolicy", @@ -765,7 +833,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, @@ -784,7 +852,7 @@ ] }, "storeInstances": { - "description": "StoreInstances stores DICOM instances associated with study instance unique\nidentifiers (SUID). See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.5.", + "description": "StoreInstances stores DICOM instances associated with study instance unique identifiers (SUID). See [Store Transaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.5). For details on the implementation of StoreInstances, see [Store transaction](https://cloud.google.com/healthcare/docs/dicom#store_transaction) in the Cloud Healthcare API conformance statement. For samples that show how to call StoreInstances, see [Storing DICOM data](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#storing_dicom_data).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.dicomStores.storeInstances", @@ -794,14 +862,14 @@ ], "parameters": { "dicomWebPath": { - "description": "The path of the StoreInstances DICOMweb request. For example,\n`studies/[{study_uid}]`. Note that the `study_uid` is optional.", + "description": "The path of the StoreInstances DICOMweb request. For example, `studies/[{study_uid}]`. Note that the `study_uid` is optional.", "location": "path", "pattern": "^studies$", "required": true, "type": "string" }, "parent": { - "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, @@ -820,7 +888,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}:testIamPermissions", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.dicomStores.testIamPermissions", @@ -829,7 +897,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, @@ -852,7 +920,7 @@ "studies": { "methods": { "delete": { - "description": "DeleteStudy deletes all instances within the given study. Delete requests\nare equivalent to the GET requests specified in the Retrieve transaction.\nThe method returns an Operation which\nwill be marked successful when the deletion is complete.", + "description": "DeleteStudy deletes all instances within the given study. Delete requests are equivalent to the GET requests specified in the Retrieve transaction. The method returns an Operation which will be marked successful when the deletion is complete. Warning: Inserting instances into a study while a delete operation is running for that study could result in the new instances not appearing in search results until the deletion operation finishes. For samples that show how to call DeleteStudy, see [Deleting a study, series, or instance](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#deleting_a_study_series_or_instance).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}", "httpMethod": "DELETE", "id": "healthcare.projects.locations.datasets.dicomStores.studies.delete", @@ -884,7 +952,7 @@ ] }, "retrieveMetadata": { - "description": "RetrieveStudyMetadata returns instance associated with the given study\npresented as metadata with the bulk data removed. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4.", + "description": "RetrieveStudyMetadata returns instance associated with the given study presented as metadata with the bulk data removed. See [RetrieveTransaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4). For details on the implementation of RetrieveStudyMetadata, see [Metadata resources](https://cloud.google.com/healthcare/docs/dicom#metadata_resources) in the Cloud Healthcare API conformance statement. For samples that show how to call RetrieveStudyMetadata, see [Retrieving metadata](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#retrieving_metadata).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/metadata", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.dicomStores.studies.retrieveMetadata", @@ -894,14 +962,14 @@ ], "parameters": { "dicomWebPath": { - "description": "The path of the RetrieveStudyMetadata DICOMweb request. For example,\n`studies/{study_uid}/metadata`.", + "description": "The path of the RetrieveStudyMetadata DICOMweb request. For example, `studies/{study_uid}/metadata`.", "location": "path", "pattern": "^studies/[^/]+/metadata$", "required": true, "type": "string" }, "parent": { - "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, @@ -917,7 +985,7 @@ ] }, "retrieveStudy": { - "description": "RetrieveStudy returns all instances within the given study. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4.", + "description": "RetrieveStudy returns all instances within the given study. See [RetrieveTransaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4). For details on the implementation of RetrieveStudy, see [DICOM study/series/instances](https://cloud.google.com/healthcare/docs/dicom#dicom_studyseriesinstances) in the Cloud Healthcare API conformance statement. For samples that show how to call RetrieveStudy, see [Retrieving DICOM data](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#retrieving_dicom_data).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.dicomStores.studies.retrieveStudy", @@ -927,14 +995,14 @@ ], "parameters": { "dicomWebPath": { - "description": "The path of the RetrieveStudy DICOMweb request. For example,\n`studies/{study_uid}`.", + "description": "The path of the RetrieveStudy DICOMweb request. For example, `studies/{study_uid}`.", "location": "path", "pattern": "^studies/[^/]+$", "required": true, "type": "string" }, "parent": { - "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, @@ -950,7 +1018,7 @@ ] }, "searchForInstances": { - "description": "SearchForInstances returns a list of matching instances. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.6.", + "description": "SearchForInstances returns a list of matching instances. See [Search Transaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.6). For details on the implementation of SearchForInstances, see [Search transaction](https://cloud.google.com/healthcare/docs/dicom#search_transaction) in the Cloud Healthcare API conformance statement. For samples that show how to call SearchForInstances, see [Searching for studies, series, instances, and frames](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#searching_for_studies_series_instances_and_frames).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/instances", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.dicomStores.studies.searchForInstances", @@ -960,14 +1028,14 @@ ], "parameters": { "dicomWebPath": { - "description": "The path of the SearchForInstancesRequest DICOMweb request. For example,\n`instances`, `series/{series_uid}/instances`, or\n`studies/{study_uid}/instances`.", + "description": "The path of the SearchForInstancesRequest DICOMweb request. For example, `instances`, `series/{series_uid}/instances`, or `studies/{study_uid}/instances`.", "location": "path", "pattern": "^studies/[^/]+/instances$", "required": true, "type": "string" }, "parent": { - "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, @@ -983,7 +1051,7 @@ ] }, "searchForSeries": { - "description": "SearchForSeries returns a list of matching series. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.6.", + "description": "SearchForSeries returns a list of matching series. See [Search Transaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.6). For details on the implementation of SearchForSeries, see [Search transaction](https://cloud.google.com/healthcare/docs/dicom#search_transaction) in the Cloud Healthcare API conformance statement. For samples that show how to call SearchForSeries, see [Searching for studies, series, instances, and frames](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#searching_for_studies_series_instances_and_frames).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/series", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.dicomStores.studies.searchForSeries", @@ -993,14 +1061,14 @@ ], "parameters": { "dicomWebPath": { - "description": "The path of the SearchForSeries DICOMweb request. For example, `series` or\n`studies/{study_uid}/series`.", + "description": "The path of the SearchForSeries DICOMweb request. For example, `series` or `studies/{study_uid}/series`.", "location": "path", "pattern": "^studies/[^/]+/series$", "required": true, "type": "string" }, "parent": { - "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, @@ -1016,7 +1084,7 @@ ] }, "storeInstances": { - "description": "StoreInstances stores DICOM instances associated with study instance unique\nidentifiers (SUID). See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.5.", + "description": "StoreInstances stores DICOM instances associated with study instance unique identifiers (SUID). See [Store Transaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.5). For details on the implementation of StoreInstances, see [Store transaction](https://cloud.google.com/healthcare/docs/dicom#store_transaction) in the Cloud Healthcare API conformance statement. For samples that show how to call StoreInstances, see [Storing DICOM data](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#storing_dicom_data).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.dicomStores.studies.storeInstances", @@ -1026,14 +1094,14 @@ ], "parameters": { "dicomWebPath": { - "description": "The path of the StoreInstances DICOMweb request. For example,\n`studies/[{study_uid}]`. Note that the `study_uid` is optional.", + "description": "The path of the StoreInstances DICOMweb request. For example, `studies/[{study_uid}]`. Note that the `study_uid` is optional.", "location": "path", "pattern": "^studies/[^/]+$", "required": true, "type": "string" }, "parent": { - "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, @@ -1056,7 +1124,7 @@ "series": { "methods": { "delete": { - "description": "DeleteSeries deletes all instances within the given study and series.\nDelete requests are equivalent to the GET requests specified in the\nRetrieve transaction.\nThe method returns an Operation which\nwill be marked successful when the deletion is complete.", + "description": "DeleteSeries deletes all instances within the given study and series. Delete requests are equivalent to the GET requests specified in the Retrieve transaction. The method returns an Operation which will be marked successful when the deletion is complete. Warning: Inserting instances into a series while a delete operation is running for that series could result in the new instances not appearing in search results until the deletion operation finishes. For samples that show how to call DeleteSeries, see [Deleting a study, series, or instance](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#deleting_a_study_series_or_instance).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/series/{seriesId}", "httpMethod": "DELETE", "id": "healthcare.projects.locations.datasets.dicomStores.studies.series.delete", @@ -1066,14 +1134,14 @@ ], "parameters": { "dicomWebPath": { - "description": "The path of the DeleteSeries request. For example,\n`studies/{study_uid}/series/{series_uid}`.", + "description": "The path of the DeleteSeries request. For example, `studies/{study_uid}/series/{series_uid}`.", "location": "path", "pattern": "^studies/[^/]+/series/[^/]+$", "required": true, "type": "string" }, "parent": { - "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, @@ -1089,7 +1157,7 @@ ] }, "retrieveMetadata": { - "description": "RetrieveSeriesMetadata returns instance associated with the given study and\nseries, presented as metadata with the bulk data removed. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4.", + "description": "RetrieveSeriesMetadata returns instance associated with the given study and series, presented as metadata with the bulk data removed. See [RetrieveTransaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4). For details on the implementation of RetrieveSeriesMetadata, see [Metadata resources](https://cloud.google.com/healthcare/docs/dicom#metadata_resources) in the Cloud Healthcare API conformance statement. For samples that show how to call RetrieveSeriesMetadata, see [Retrieving metadata](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#retrieving_metadata).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/series/{seriesId}/metadata", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.dicomStores.studies.series.retrieveMetadata", @@ -1099,14 +1167,14 @@ ], "parameters": { "dicomWebPath": { - "description": "The path of the RetrieveSeriesMetadata DICOMweb request. For example,\n`studies/{study_uid}/series/{series_uid}/metadata`.", + "description": "The path of the RetrieveSeriesMetadata DICOMweb request. For example, `studies/{study_uid}/series/{series_uid}/metadata`.", "location": "path", "pattern": "^studies/[^/]+/series/[^/]+/metadata$", "required": true, "type": "string" }, "parent": { - "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, @@ -1122,7 +1190,7 @@ ] }, "retrieveSeries": { - "description": "RetrieveSeries returns all instances within the given study and series. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4.", + "description": "RetrieveSeries returns all instances within the given study and series. See [RetrieveTransaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4). For details on the implementation of RetrieveSeries, see [DICOM study/series/instances](https://cloud.google.com/healthcare/docs/dicom#dicom_studyseriesinstances) in the Cloud Healthcare API conformance statement. For samples that show how to call RetrieveSeries, see [Retrieving DICOM data](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#retrieving_dicom_data).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/series/{seriesId}", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.dicomStores.studies.series.retrieveSeries", @@ -1132,14 +1200,14 @@ ], "parameters": { "dicomWebPath": { - "description": "The path of the RetrieveSeries DICOMweb request. For example,\n`studies/{study_uid}/series/{series_uid}`.", + "description": "The path of the RetrieveSeries DICOMweb request. For example, `studies/{study_uid}/series/{series_uid}`.", "location": "path", "pattern": "^studies/[^/]+/series/[^/]+$", "required": true, "type": "string" }, "parent": { - "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, @@ -1155,7 +1223,7 @@ ] }, "searchForInstances": { - "description": "SearchForInstances returns a list of matching instances. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.6.", + "description": "SearchForInstances returns a list of matching instances. See [Search Transaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.6). For details on the implementation of SearchForInstances, see [Search transaction](https://cloud.google.com/healthcare/docs/dicom#search_transaction) in the Cloud Healthcare API conformance statement. For samples that show how to call SearchForInstances, see [Searching for studies, series, instances, and frames](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#searching_for_studies_series_instances_and_frames).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/series/{seriesId}/instances", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.dicomStores.studies.series.searchForInstances", @@ -1165,14 +1233,14 @@ ], "parameters": { "dicomWebPath": { - "description": "The path of the SearchForInstancesRequest DICOMweb request. For example,\n`instances`, `series/{series_uid}/instances`, or\n`studies/{study_uid}/instances`.", + "description": "The path of the SearchForInstancesRequest DICOMweb request. For example, `instances`, `series/{series_uid}/instances`, or `studies/{study_uid}/instances`.", "location": "path", "pattern": "^studies/[^/]+/series/[^/]+/instances$", "required": true, "type": "string" }, "parent": { - "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, @@ -1192,7 +1260,7 @@ "instances": { "methods": { "delete": { - "description": "DeleteInstance deletes an instance associated with the given study, series,\nand SOP Instance UID. Delete requests are equivalent to the GET requests\nspecified in the Retrieve transaction.", + "description": "DeleteInstance deletes an instance associated with the given study, series, and SOP Instance UID. Delete requests are equivalent to the GET requests specified in the Retrieve transaction. Study and series search results can take a few seconds to be updated after an instance is deleted using DeleteInstance. For samples that show how to call DeleteInstance, see [Deleting a study, series, or instance](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#deleting_a_study_series_or_instance).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/series/{seriesId}/instances/{instancesId}", "httpMethod": "DELETE", "id": "healthcare.projects.locations.datasets.dicomStores.studies.series.instances.delete", @@ -1202,14 +1270,14 @@ ], "parameters": { "dicomWebPath": { - "description": "The path of the DeleteInstance request. For example,\n`studies/{study_uid}/series/{series_uid}/instances/{instance_uid}`.", + "description": "The path of the DeleteInstance request. For example, `studies/{study_uid}/series/{series_uid}/instances/{instance_uid}`.", "location": "path", "pattern": "^studies/[^/]+/series/[^/]+/instances/[^/]+$", "required": true, "type": "string" }, "parent": { - "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, @@ -1225,7 +1293,7 @@ ] }, "retrieveInstance": { - "description": "RetrieveInstance returns instance associated with the given study, series,\nand SOP Instance UID. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4.", + "description": "RetrieveInstance returns instance associated with the given study, series, and SOP Instance UID. See [RetrieveTransaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4). For details on the implementation of RetrieveInstance, see [DICOM study/series/instances](https://cloud.google.com/healthcare/docs/dicom#dicom_studyseriesinstances) and [DICOM instances](https://cloud.google.com/healthcare/docs/dicom#dicom_instances) in the Cloud Healthcare API conformance statement. For samples that show how to call RetrieveInstance, see [Retrieving an instance](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#retrieving_an_instance).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/series/{seriesId}/instances/{instancesId}", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.dicomStores.studies.series.instances.retrieveInstance", @@ -1235,14 +1303,14 @@ ], "parameters": { "dicomWebPath": { - "description": "The path of the RetrieveInstance DICOMweb request. For example,\n`studies/{study_uid}/series/{series_uid}/instances/{instance_uid}`.", + "description": "The path of the RetrieveInstance DICOMweb request. For example, `studies/{study_uid}/series/{series_uid}/instances/{instance_uid}`.", "location": "path", "pattern": "^studies/[^/]+/series/[^/]+/instances/[^/]+$", "required": true, "type": "string" }, "parent": { - "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, @@ -1258,7 +1326,7 @@ ] }, "retrieveMetadata": { - "description": "RetrieveInstanceMetadata returns instance associated with the given study,\nseries, and SOP Instance UID presented as metadata with the bulk data\nremoved. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4.", + "description": "RetrieveInstanceMetadata returns instance associated with the given study, series, and SOP Instance UID presented as metadata with the bulk data removed. See [RetrieveTransaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4). For details on the implementation of RetrieveInstanceMetadata, see [Metadata resources](https://cloud.google.com/healthcare/docs/dicom#metadata_resources) in the Cloud Healthcare API conformance statement. For samples that show how to call RetrieveInstanceMetadata, see [Retrieving metadata](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#retrieving_metadata).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/series/{seriesId}/instances/{instancesId}/metadata", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.dicomStores.studies.series.instances.retrieveMetadata", @@ -1268,14 +1336,14 @@ ], "parameters": { "dicomWebPath": { - "description": "The path of the RetrieveInstanceMetadata DICOMweb request. For example,\n`studies/{study_uid}/series/{series_uid}/instances/{instance_uid}/metadata`.", + "description": "The path of the RetrieveInstanceMetadata DICOMweb request. For example, `studies/{study_uid}/series/{series_uid}/instances/{instance_uid}/metadata`.", "location": "path", "pattern": "^studies/[^/]+/series/[^/]+/instances/[^/]+/metadata$", "required": true, "type": "string" }, "parent": { - "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, @@ -1291,7 +1359,7 @@ ] }, "retrieveRendered": { - "description": "RetrieveRenderedInstance returns instance associated with the given study,\nseries, and SOP Instance UID in an acceptable Rendered Media Type. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4.", + "description": "RetrieveRenderedInstance returns instance associated with the given study, series, and SOP Instance UID in an acceptable Rendered Media Type. See [RetrieveTransaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4). For details on the implementation of RetrieveRenderedInstance, see [Rendered resources](https://cloud.google.com/healthcare/docs/dicom#rendered_resources) in the Cloud Healthcare API conformance statement. For samples that show how to call RetrieveRenderedInstance, see [Retrieving consumer image formats](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#retrieving_consumer_image_formats).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/series/{seriesId}/instances/{instancesId}/rendered", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.dicomStores.studies.series.instances.retrieveRendered", @@ -1301,14 +1369,14 @@ ], "parameters": { "dicomWebPath": { - "description": "The path of the RetrieveRenderedInstance DICOMweb request. For example,\n`studies/{study_uid}/series/{series_uid}/instances/{instance_uid}/rendered`.", + "description": "The path of the RetrieveRenderedInstance DICOMweb request. For example, `studies/{study_uid}/series/{series_uid}/instances/{instance_uid}/rendered`.", "location": "path", "pattern": "^studies/[^/]+/series/[^/]+/instances/[^/]+/rendered$", "required": true, "type": "string" }, "parent": { - "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, @@ -1328,7 +1396,7 @@ "frames": { "methods": { "retrieveFrames": { - "description": "RetrieveFrames returns instances associated with the given study, series,\nSOP Instance UID and frame numbers. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4.", + "description": "RetrieveFrames returns instances associated with the given study, series, SOP Instance UID and frame numbers. See [RetrieveTransaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4}. For details on the implementation of RetrieveFrames, see [DICOM frames](https://cloud.google.com/healthcare/docs/dicom#dicom_frames) in the Cloud Healthcare API conformance statement. For samples that show how to call RetrieveFrames, see [Retrieving DICOM data](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#retrieving_dicom_data).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/series/{seriesId}/instances/{instancesId}/frames/{framesId}", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.dicomStores.studies.series.instances.frames.retrieveFrames", @@ -1338,14 +1406,14 @@ ], "parameters": { "dicomWebPath": { - "description": "The path of the RetrieveFrames DICOMweb request. For example,\n`studies/{study_uid}/series/{series_uid}/instances/{instance_uid}/frames/{frame_list}`.", + "description": "The path of the RetrieveFrames DICOMweb request. For example, `studies/{study_uid}/series/{series_uid}/instances/{instance_uid}/frames/{frame_list}`.", "location": "path", "pattern": "^studies/[^/]+/series/[^/]+/instances/[^/]+/frames/[^/]+$", "required": true, "type": "string" }, "parent": { - "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, @@ -1361,7 +1429,7 @@ ] }, "retrieveRendered": { - "description": "RetrieveRenderedFrames returns instances associated with the given study,\nseries, SOP Instance UID and frame numbers in an acceptable Rendered Media\nType. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4.", + "description": "RetrieveRenderedFrames returns instances associated with the given study, series, SOP Instance UID and frame numbers in an acceptable Rendered Media Type. See [RetrieveTransaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4). For details on the implementation of RetrieveRenderedFrames, see [Rendered resources](https://cloud.google.com/healthcare/docs/dicom#rendered_resources) in the Cloud Healthcare API conformance statement. For samples that show how to call RetrieveRenderedFrames, see [Retrieving consumer image formats](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#retrieving_consumer_image_formats).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/series/{seriesId}/instances/{instancesId}/frames/{framesId}/rendered", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.dicomStores.studies.series.instances.frames.retrieveRendered", @@ -1371,14 +1439,14 @@ ], "parameters": { "dicomWebPath": { - "description": "The path of the RetrieveRenderedFrames DICOMweb request. For example,\n`studies/{study_uid}/series/{series_uid}/instances/{instance_uid}/frames/{frame_list}/rendered`.", + "description": "The path of the RetrieveRenderedFrames DICOMweb request. For example, `studies/{study_uid}/series/{series_uid}/instances/{instance_uid}/frames/{frame_list}/rendered`.", "location": "path", "pattern": "^studies/[^/]+/series/[^/]+/instances/[^/]+/frames/[^/]+/rendered$", "required": true, "type": "string" }, "parent": { - "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", "required": true, @@ -1415,7 +1483,7 @@ ], "parameters": { "fhirStoreId": { - "description": "The ID of the FHIR store that is being created.\nThe string must match the following regex: `[\\p{L}\\p{N}_\\-\\.]{1,256}`.", + "description": "The ID of the FHIR store that is being created. The string must match the following regex: `[\\p{L}\\p{N}_\\-\\.]{1,256}`.", "location": "query", "type": "string" }, @@ -1439,7 +1507,7 @@ ] }, "deidentify": { - "description": "De-identifies data from the source store and writes it to the destination\nstore. The metadata field type\nis OperationMetadata.\nIf the request is successful, the\nresponse field type is\nDeidentifyFhirStoreSummary. If errors occur,\nerror is set.\nError details are also logged to Cloud Logging\n(see [Viewing logs](/healthcare/docs/how-tos/logging)).", + "description": "De-identifies data from the source store and writes it to the destination store. The metadata field type is OperationMetadata. If the request is successful, the response field type is DeidentifyFhirStoreSummary. If errors occur, error is set. Error details are also logged to Cloud Logging (see [Viewing logs](/healthcare/docs/how-tos/logging)).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}:deidentify", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.fhirStores.deidentify", @@ -1448,7 +1516,7 @@ ], "parameters": { "sourceStore": { - "description": "Source FHIR store resource name. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/fhirStores/{fhir_store_id}`.", + "description": "Source FHIR store resource name. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/fhirStores/{fhir_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/fhirStores/[^/]+$", "required": true, @@ -1492,7 +1560,7 @@ ] }, "export": { - "description": "Export resources from the FHIR store to the specified destination.\n\nThis method returns an Operation that can\nbe used to track the status of the export by calling\nGetOperation.\n\nImmediate fatal errors appear in the\nerror field, errors are also logged\nto Cloud Logging (see [Viewing\nlogs](/healthcare/docs/how-tos/logging)).\nOtherwise, when the operation finishes, a detailed response of type\nExportResourcesResponse is returned in the\nresponse field.\nThe metadata field type for this\noperation is OperationMetadata.", + "description": "Export resources from the FHIR store to the specified destination. This method returns an Operation that can be used to track the status of the export by calling GetOperation. Immediate fatal errors appear in the error field, errors are also logged to Cloud Logging (see [Viewing logs](/healthcare/docs/how-tos/logging)). Otherwise, when the operation finishes, a detailed response of type ExportResourcesResponse is returned in the response field. The metadata field type for this operation is OperationMetadata.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}:export", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.fhirStores.export", @@ -1501,7 +1569,7 @@ ], "parameters": { "name": { - "description": "The name of the FHIR store to export resource from. The name should be in\nthe format of\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/fhirStores/{fhir_store_id}`.", + "description": "The name of the FHIR store to export resource from, in the format of `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/fhirStores/{fhir_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/fhirStores/[^/]+$", "required": true, @@ -1545,7 +1613,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}:getIamPolicy", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.fhirStores.getIamPolicy", @@ -1554,13 +1622,13 @@ ], "parameters": { "options.requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "location": "query", "type": "integer" }, "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/fhirStores/[^/]+$", "required": true, @@ -1576,7 +1644,7 @@ ] }, "import": { - "description": "Import resources to the FHIR store by loading data from the specified\nsources. This method is optimized to load large quantities of data using\nimport semantics that ignore some FHIR store configuration options and are\nnot suitable for all use cases. It is primarily intended to load data into\nan empty FHIR store that is not being used by other clients. In cases\nwhere this method is not appropriate, consider using ExecuteBundle to\nload data.\n\nEvery resource in the input must contain a client-supplied ID, and will be\nstored using that ID regardless of the\nenable_update_create setting on the FHIR\nstore.\n\nThe import process does not enforce referential integrity, regardless of\nthe\ndisable_referential_integrity\nsetting on the FHIR store. This allows the import of resources with\narbitrary interdependencies without considering grouping or ordering, but\nif the input data contains invalid references or if some resources fail to\nbe imported, the FHIR store might be left in a state that violates\nreferential integrity.\n\nThe import process does not trigger Pub/Sub notification or BigQuery\nstreaming update, regardless of how those are configured on the FHIR store.\n\nIf a resource with the specified ID already exists, the most recent\nversion of the resource is overwritten without creating a new historical\nversion, regardless of the\ndisable_resource_versioning\nsetting on the FHIR store. If transient failures occur during the import,\nit is possible that successfully imported resources will be overwritten\nmore than once.\n\nThe import operation is idempotent unless the input data contains multiple\nvalid resources with the same ID but different contents. In that case,\nafter the import completes, the store will contain exactly one resource\nwith that ID but there is no ordering guarantee on which version of the\ncontents it will have. The operation result counters do not count\nduplicate IDs as an error and will count one success for each resource in\nthe input, which might result in a success count larger than the number\nof resources in the FHIR store. This often occurs when importing data\norganized in bundles produced by Patient-everything\nwhere each bundle contains its own copy of a resource such as Practitioner\nthat might be referred to by many patients.\n\nIf some resources fail to import, for example due to parsing errors,\nsuccessfully imported resources are not rolled back.\n\nThe location and format of the input data is specified by the parameters\nbelow. Note that if no format is specified, this method assumes the\n`BUNDLE` format. When using the `BUNDLE` format this method ignores the\n`Bundle.type` field, except that `history` bundles are rejected, and does\nnot apply any of the bundle processing semantics for batch or transaction\nbundles. Unlike in ExecuteBundle, transaction bundles are not executed\nas a single transaction and bundle-internal references are not rewritten.\nThe bundle is treated as a collection of resources to be written as\nprovided in `Bundle.entry.resource`, ignoring `Bundle.entry.request`. As\nan example, this allows the import of `searchset` bundles produced by a\nFHIR search or\nPatient-everything operation.\n\nThis method returns an Operation that can\nbe used to track the status of the import by calling\nGetOperation.\n\nImmediate fatal errors appear in the\nerror field, errors are also logged\nto Cloud Logging (see [Viewing\nlogs](/healthcare/docs/how-tos/logging)). Otherwise, when the\noperation finishes, a detailed response of type ImportResourcesResponse\nis returned in the response field.\nThe metadata field type for this\noperation is OperationMetadata.", + "description": "Imports resources to the FHIR store by loading data from the specified sources. This method is optimized to load large quantities of data using import semantics that ignore some FHIR store configuration options and are not suitable for all use cases. It is primarily intended to load data into an empty FHIR store that is not being used by other clients. In cases where this method is not appropriate, consider using ExecuteBundle to load data. Every resource in the input must contain a client-supplied ID. Each resource is stored using the supplied ID regardless of the enable_update_create setting on the FHIR store. The import process does not enforce referential integrity, regardless of the disable_referential_integrity setting on the FHIR store. This allows the import of resources with arbitrary interdependencies without considering grouping or ordering, but if the input data contains invalid references or if some resources fail to be imported, the FHIR store might be left in a state that violates referential integrity. The import process does not trigger Pub/Sub notification or BigQuery streaming update, regardless of how those are configured on the FHIR store. If a resource with the specified ID already exists, the most recent version of the resource is overwritten without creating a new historical version, regardless of the disable_resource_versioning setting on the FHIR store. If transient failures occur during the import, it's possible that successfully imported resources will be overwritten more than once. The import operation is idempotent unless the input data contains multiple valid resources with the same ID but different contents. In that case, after the import completes, the store contains exactly one resource with that ID but there is no ordering guarantee on which version of the contents it will have. The operation result counters do not count duplicate IDs as an error and count one success for each resource in the input, which might result in a success count larger than the number of resources in the FHIR store. This often occurs when importing data organized in bundles produced by Patient-everything where each bundle contains its own copy of a resource such as Practitioner that might be referred to by many patients. If some resources fail to import, for example due to parsing errors, successfully imported resources are not rolled back. The location and format of the input data is specified by the parameters in ImportResourcesRequest. Note that if no format is specified, this method assumes the `BUNDLE` format. When using the `BUNDLE` format this method ignores the `Bundle.type` field, except that `history` bundles are rejected, and does not apply any of the bundle processing semantics for batch or transaction bundles. Unlike in ExecuteBundle, transaction bundles are not executed as a single transaction and bundle-internal references are not rewritten. The bundle is treated as a collection of resources to be written as provided in `Bundle.entry.resource`, ignoring `Bundle.entry.request`. As an example, this allows the import of `searchset` bundles produced by a FHIR search or Patient-everything operation. This method returns an Operation that can be used to track the status of the import by calling GetOperation. Immediate fatal errors appear in the error field, errors are also logged to Cloud Logging (see [Viewing logs](/healthcare/docs/how-tos/logging)). Otherwise, when the operation finishes, a detailed response of type ImportResourcesResponse is returned in the response field. The metadata field type for this operation is OperationMetadata.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}:import", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.fhirStores.import", @@ -1585,7 +1653,7 @@ ], "parameters": { "name": { - "description": "The name of the FHIR store to import FHIR resources to. The name should be\nin the format of\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/fhirStores/{fhir_store_id}`.", + "description": "The name of the FHIR store to import FHIR resources to, in the format of `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/fhirStores/{fhir_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/fhirStores/[^/]+$", "required": true, @@ -1613,12 +1681,12 @@ ], "parameters": { "filter": { - "description": "Restricts stores returned to those matching a filter. Syntax:\nhttps://cloud.google.com/appengine/docs/standard/python/search/query_strings\nOnly filtering on labels is supported, for example `labels.key=value`.", + "description": "Restricts stores returned to those matching a filter. Syntax: https://cloud.google.com/appengine/docs/standard/python/search/query_strings Only filtering on labels is supported, for example `labels.key=value`.", "location": "query", "type": "string" }, "pageSize": { - "description": "Limit on the number of FHIR stores to return in a single response. If zero\nthe default page size of 100 is used.", + "description": "Limit on the number of FHIR stores to return in a single response. If zero the default page size of 100 is used.", "format": "int32", "location": "query", "type": "integer" @@ -1654,14 +1722,14 @@ ], "parameters": { "name": { - "description": "Output only. Resource name of the FHIR store, of the form\n`projects/{project_id}/datasets/{dataset_id}/fhirStores/{fhir_store_id}`.", + "description": "Output only. Resource name of the FHIR store, of the form `projects/{project_id}/datasets/{dataset_id}/fhirStores/{fhir_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/fhirStores/[^/]+$", "required": true, "type": "string" }, "updateMask": { - "description": "The update mask applies to the resource. For the `FieldMask` definition,\nsee\nhttps://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask", + "description": "The update mask applies to the resource. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask", "format": "google-fieldmask", "location": "query", "type": "string" @@ -1679,7 +1747,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}:setIamPolicy", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.fhirStores.setIamPolicy", @@ -1688,7 +1756,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/fhirStores/[^/]+$", "required": true, @@ -1707,7 +1775,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}:testIamPermissions", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.fhirStores.testIamPermissions", @@ -1716,7 +1784,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/fhirStores/[^/]+$", "required": true, @@ -1739,7 +1807,7 @@ "fhir": { "methods": { "Patient-everything": { - "description": "Retrieves a Patient resource and resources related to that patient.\n\nImplements the FHIR extended operation Patient-everything\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/patient-operations.html#everything),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/patient-operations.html#everything),\n[R4](http://hl7.org/implement/standards/fhir/R4/patient-operations.html#everything)).\n\nOn success, the response body will contain a JSON-encoded representation\nof a `Bundle` resource of type `searchset`, containing the results of the\noperation.\nErrors generated by the FHIR store will contain a JSON-encoded\n`OperationOutcome` resource describing the reason for the error. If the\nrequest cannot be mapped to a valid API method on a FHIR store, a generic\nGCP error might be returned instead.\n\nThe resources in scope for the response are:\n\n* The patient resource itself.\n* All the resources directly referenced by the patient resource.\n* Resources directly referencing the patient resource that meet the\n inclusion criteria. The inclusion criteria are based on the membership\n rules in the patient compartment definition\n ([DSTU2](http://hl7.org/fhir/DSTU2/compartment-patient.html),\n [STU3](http://www.hl7.org/fhir/stu3/compartmentdefinition-patient.html),\n [R4](http://hl7.org/fhir/R4/compartmentdefinition-patient.html)), which\n details the eligible resource types and referencing search parameters.", + "description": "Retrieves a Patient resource and resources related to that patient. Implements the FHIR extended operation Patient-everything ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/patient-operations.html#everything), [STU3](http://hl7.org/implement/standards/fhir/STU3/patient-operations.html#everything), [R4](http://hl7.org/implement/standards/fhir/R4/patient-operations.html#everything)). On success, the response body will contain a JSON-encoded representation of a `Bundle` resource of type `searchset`, containing the results of the operation. Errors generated by the FHIR store will contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. The resources in scope for the response are: * The patient resource itself. * All the resources directly referenced by the patient resource. * Resources directly referencing the patient resource that meet the inclusion criteria. The inclusion criteria are based on the membership rules in the patient compartment definition ([DSTU2](http://hl7.org/fhir/DSTU2/compartment-patient.html), [STU3](http://www.hl7.org/fhir/stu3/compartmentdefinition-patient.html), [R4](http://hl7.org/fhir/R4/compartmentdefinition-patient.html)), which details the eligible resource types and referencing search parameters. For samples that show how to call `Patient-everything`, see [Getting all patient compartment resources](/healthcare/docs/how-tos/fhir-resources#getting_all_patient_compartment_resources).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/Patient/{PatientId}/$everything", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.fhirStores.fhir.Patient-everything", @@ -1754,12 +1822,22 @@ "type": "integer" }, "_page_token": { - "description": "Used to retrieve the next or previous page of results\nwhen using pagination. Value should be set to the value of page_token set\nin next or previous page links' urls. Next and previous page are returned\nin the response bundle's links field, where `link.relation` is \"previous\"\nor \"next\".\n\nOmit `page_token` if no previous request has been made.", + "description": "Used to retrieve the next or previous page of results when using pagination. Set `_page_token` to the value of _page_token set in next or previous page links' url. Next and previous page are returned in the response bundle's links field, where `link.relation` is \"previous\" or \"next\". Omit `_page_token` if no previous request has been made.", + "location": "query", + "type": "string" + }, + "_since": { + "description": "If provided, only resources updated after this time are returned. The time uses the format YYYY-MM-DDThh:mm:ss.sss+zz:zz. For example, `2015-02-07T13:28:17.239+02:00` or `2017-01-01T00:00:00Z`. The time must be specified to the second and include a time zone.", + "location": "query", + "type": "string" + }, + "_type": { + "description": "String of comma-delimited FHIR resource types. If provided, only resources of the specified resource type(s) are returned.", "location": "query", "type": "string" }, "end": { - "description": "The response includes records prior to the end date. If no end date is\nprovided, all records subsequent to the start date are in scope.", + "description": "The response includes records prior to the end date. If no end date is provided, all records subsequent to the start date are in scope.", "location": "query", "type": "string" }, @@ -1771,7 +1849,7 @@ "type": "string" }, "start": { - "description": "The response includes records subsequent to the start date. If no start\ndate is provided, all records prior to the end date are in scope.", + "description": "The response includes records subsequent to the start date. If no start date is provided, all records prior to the end date are in scope.", "location": "query", "type": "string" } @@ -1785,7 +1863,7 @@ ] }, "Resource-purge": { - "description": "Deletes all the historical versions of a resource (excluding the current\nversion) from the FHIR store. To remove all versions of a resource, first\ndelete the current version and then call this method.\n\nThis is not a FHIR standard operation.", + "description": "Deletes all the historical versions of a resource (excluding the current version) from the FHIR store. To remove all versions of a resource, first delete the current version and then call this method. This is not a FHIR standard operation. For samples that show how to call `Resource-purge`, see [Deleting historical versions of a FHIR resource](/healthcare/docs/how-tos/fhir-resources#deleting_historical_versions_of_a_fhir_resource).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/{fhirId}/{fhirId1}/$purge", "httpMethod": "DELETE", "id": "healthcare.projects.locations.datasets.fhirStores.fhir.Resource-purge", @@ -1810,7 +1888,7 @@ ] }, "capabilities": { - "description": "Gets the FHIR capability statement\n([STU3](http://hl7.org/implement/standards/fhir/STU3/capabilitystatement.html),\n[R4](http://hl7.org/implement/standards/fhir/R4/capabilitystatement.html)),\nor the [conformance\nstatement](http://hl7.org/implement/standards/fhir/DSTU2/conformance.html)\nin the DSTU2 case for the store, which contains a description of\nfunctionality supported by the server.\n\nImplements the FHIR standard capabilities interaction\n([STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#capabilities),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#capabilities)),\nor the [conformance\ninteraction](http://hl7.org/implement/standards/fhir/DSTU2/http.html#conformance)\nin the DSTU2 case.\n\nOn success, the response body will contain a JSON-encoded representation\nof a `CapabilityStatement` resource.", + "description": "Gets the FHIR capability statement ([STU3](http://hl7.org/implement/standards/fhir/STU3/capabilitystatement.html), [R4](http://hl7.org/implement/standards/fhir/R4/capabilitystatement.html)), or the [conformance statement](http://hl7.org/implement/standards/fhir/DSTU2/conformance.html) in the DSTU2 case for the store, which contains a description of functionality supported by the server. Implements the FHIR standard capabilities interaction ([STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#capabilities), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#capabilities)), or the [conformance interaction](http://hl7.org/implement/standards/fhir/DSTU2/http.html#conformance) in the DSTU2 case. On success, the response body will contain a JSON-encoded representation of a `CapabilityStatement` resource.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/metadata", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.fhirStores.fhir.capabilities", @@ -1835,7 +1913,7 @@ ] }, "create": { - "description": "Creates a FHIR resource.\n\nImplements the FHIR standard create interaction\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#create),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#create),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#create)),\nwhich creates a new resource with a server-assigned resource ID.\n\nThe request body must contain a JSON-encoded FHIR resource, and the request\nheaders must contain `Content-Type: application/fhir+json`.\n\nOn success, the response body will contain a JSON-encoded representation\nof the resource as it was created on the server, including the\nserver-assigned resource ID and version ID.\nErrors generated by the FHIR store will contain a JSON-encoded\n`OperationOutcome` resource describing the reason for the error. If the\nrequest cannot be mapped to a valid API method on a FHIR store, a generic\nGCP error might be returned instead.", + "description": "Creates a FHIR resource. Implements the FHIR standard create interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#create), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#create), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#create)), which creates a new resource with a server-assigned resource ID. The request body must contain a JSON-encoded FHIR resource, and the request headers must contain `Content-Type: application/fhir+json`. On success, the response body will contain a JSON-encoded representation of the resource as it was created on the server, including the server-assigned resource ID and version ID. Errors generated by the FHIR store will contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. For samples that show how to call `create`, see [Creating a FHIR resource](/healthcare/docs/how-tos/fhir-resources#creating_a_fhir_resource).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/{fhirId}", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.fhirStores.fhir.create", @@ -1852,7 +1930,7 @@ "type": "string" }, "type": { - "description": "The FHIR resource type to create, such as Patient or Observation. For a\ncomplete list, see the FHIR Resource Index\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/resourcelist.html),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/resourcelist.html),\n[R4](http://hl7.org/implement/standards/fhir/R4/resourcelist.html)).\nMust match the resource type in the provided content.", + "description": "The FHIR resource type to create, such as Patient or Observation. For a complete list, see the FHIR Resource Index ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/resourcelist.html), [STU3](http://hl7.org/implement/standards/fhir/STU3/resourcelist.html), [R4](http://hl7.org/implement/standards/fhir/R4/resourcelist.html)). Must match the resource type in the provided content.", "location": "path", "pattern": "^[^/]+$", "required": true, @@ -1871,7 +1949,7 @@ ] }, "delete": { - "description": "Deletes a FHIR resource.\n\nImplements the FHIR standard delete interaction\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#delete),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#delete),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#delete)).\n\nNote: Unless resource versioning is disabled by setting the\ndisable_resource_versioning flag\non the FHIR store, the deleted resources will be moved to a history\nrepository that can still be retrieved through vread\nand related methods, unless they are removed by the\npurge method.", + "description": "Deletes a FHIR resource. Implements the FHIR standard delete interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#delete), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#delete), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#delete)). Note: Unless resource versioning is disabled by setting the disable_resource_versioning flag on the FHIR store, the deleted resources will be moved to a history repository that can still be retrieved through vread and related methods, unless they are removed by the purge method. For samples that show how to call `delete`, see [Deleting a FHIR resource](/healthcare/docs/how-tos/fhir-resources#deleting_a_fhir_resource).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/{fhirId}/{fhirId1}", "httpMethod": "DELETE", "id": "healthcare.projects.locations.datasets.fhirStores.fhir.delete", @@ -1896,7 +1974,7 @@ ] }, "executeBundle": { - "description": "Executes all the requests in the given Bundle.\n\nImplements the FHIR standard batch/transaction interaction\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#transaction),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#transaction),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#transaction)).\n\nSupports all interactions within a bundle, except search. This method\naccepts Bundles of type `batch` and `transaction`, processing them\naccording to the batch processing rules\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#2.1.0.16.1),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#2.21.0.17.1),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#brules))\nand transaction processing rules\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#2.1.0.16.2),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#2.21.0.17.2),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#trules)).\n\nThe request body must contain a JSON-encoded FHIR `Bundle` resource, and\nthe request headers must contain `Content-Type: application/fhir+json`.\n\nFor a batch bundle or a successful transaction the response body will\ncontain a JSON-encoded representation of a `Bundle` resource of type\n`batch-response` or `transaction-response` containing one entry for each\nentry in the request, with the outcome of processing the entry. In the\ncase of an error for a transaction bundle, the response body will contain\na JSON-encoded `OperationOutcome` resource describing the reason for the\nerror. If the request cannot be mapped to a valid API method on a FHIR\nstore, a generic GCP error might be returned instead.", + "description": "Executes all the requests in the given Bundle. Implements the FHIR standard batch/transaction interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#transaction), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#transaction), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#transaction)). Supports all interactions within a bundle, except search. This method accepts Bundles of type `batch` and `transaction`, processing them according to the batch processing rules ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#2.1.0.16.1), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#2.21.0.17.1), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#brules)) and transaction processing rules ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#2.1.0.16.2), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#2.21.0.17.2), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#trules)). The request body must contain a JSON-encoded FHIR `Bundle` resource, and the request headers must contain `Content-Type: application/fhir+json`. For a batch bundle or a successful transaction the response body will contain a JSON-encoded representation of a `Bundle` resource of type `batch-response` or `transaction-response` containing one entry for each entry in the request, with the outcome of processing the entry. In the case of an error for a transaction bundle, the response body will contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. For samples that show how to call `executeBundle`, see [Managing FHIR resources using FHIR bundles](/healthcare/docs/how-tos/fhir-bundles).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.fhirStores.fhir.executeBundle", @@ -1924,7 +2002,7 @@ ] }, "history": { - "description": "Lists all the versions of a resource (including the current version and\ndeleted versions) from the FHIR store.\n\nImplements the per-resource form of the FHIR standard history interaction\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#history),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#history),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#history)).\n\nOn success, the response body will contain a JSON-encoded representation\nof a `Bundle` resource of type `history`, containing the version history\nsorted from most recent to oldest versions.\nErrors generated by the FHIR store will contain a JSON-encoded\n`OperationOutcome` resource describing the reason for the error. If the\nrequest cannot be mapped to a valid API method on a FHIR store, a generic\nGCP error might be returned instead.", + "description": "Lists all the versions of a resource (including the current version and deleted versions) from the FHIR store. Implements the per-resource form of the FHIR standard history interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#history), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#history), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#history)). On success, the response body will contain a JSON-encoded representation of a `Bundle` resource of type `history`, containing the version history sorted from most recent to oldest versions. Errors generated by the FHIR store will contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. For samples that show how to call `history`, see [Listing FHIR resource versions](/healthcare/docs/how-tos/fhir-resources#listing_fhir_resource_versions).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/{fhirId}/{fhirId1}/_history", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.fhirStores.fhir.history", @@ -1933,7 +2011,7 @@ ], "parameters": { "_at": { - "description": "Only include resource versions that were current at some point during the\ntime period specified in the date time value. The date parameter format is\nyyyy-mm-ddThh:mm:ss[Z|(+|-)hh:mm]\n\nClients may specify any of the following:\n\n* An entire year: `_at=2019`\n* An entire month: `_at=2019-01`\n* A specific day: `_at=2019-01-20`\n* A specific second: `_at=2018-12-31T23:59:58Z`", + "description": "Only include resource versions that were current at some point during the time period specified in the date time value. The date parameter format is yyyy-mm-ddThh:mm:ss[Z|(+|-)hh:mm] Clients may specify any of the following: * An entire year: `_at=2019` * An entire month: `_at=2019-01` * A specific day: `_at=2019-01-20` * A specific second: `_at=2018-12-31T23:59:58Z`", "location": "query", "type": "string" }, @@ -1944,12 +2022,12 @@ "type": "integer" }, "_page_token": { - "description": "Used to retrieve the first, previous, next, or last page of resource\nversions when using pagination. Value should be set to the value of\n`_page_token` set in next or previous page links' URLs. Next and previous\npage are returned in the response bundle's links field, where\n`link.relation` is \"previous\" or \"next\".\n\nOmit `_page_token` if no previous request has been made.", + "description": "Used to retrieve the first, previous, next, or last page of resource versions when using pagination. Value should be set to the value of `_page_token` set in next or previous page links' URLs. Next and previous page are returned in the response bundle's links field, where `link.relation` is \"previous\" or \"next\". Omit `_page_token` if no previous request has been made.", "location": "query", "type": "string" }, "_since": { - "description": "Only include resource versions that were created at or after the given\ninstant in time. The instant in time uses the format\nYYYY-MM-DDThh:mm:ss.sss+zz:zz (for example 2015-02-07T13:28:17.239+02:00 or\n2017-01-01T00:00:00Z). The time must be specified to the second and\ninclude a time zone.", + "description": "Only include resource versions that were created at or after the given instant in time. The instant in time uses the format YYYY-MM-DDThh:mm:ss.sss+zz:zz (for example 2015-02-07T13:28:17.239+02:00 or 2017-01-01T00:00:00Z). The time must be specified to the second and include a time zone.", "location": "query", "type": "string" }, @@ -1970,7 +2048,7 @@ ] }, "patch": { - "description": "Updates part of an existing resource by applying the operations specified\nin a [JSON Patch](http://jsonpatch.com/) document.\n\nImplements the FHIR standard patch interaction\n([STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#patch),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#patch)).\n\nDSTU2 doesn't define a patch method, but the server supports it in the same\nway it supports STU3.\n\nThe request body must contain a JSON Patch document, and the request\nheaders must contain `Content-Type: application/json-patch+json`.\n\nOn success, the response body will contain a JSON-encoded representation\nof the updated resource, including the server-assigned version ID.\nErrors generated by the FHIR store will contain a JSON-encoded\n`OperationOutcome` resource describing the reason for the error. If the\nrequest cannot be mapped to a valid API method on a FHIR store, a generic\nGCP error might be returned instead.", + "description": "Updates part of an existing resource by applying the operations specified in a [JSON Patch](http://jsonpatch.com/) document. Implements the FHIR standard patch interaction ([STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#patch), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#patch)). DSTU2 doesn't define a patch method, but the server supports it in the same way it supports STU3. The request body must contain a JSON Patch document, and the request headers must contain `Content-Type: application/json-patch+json`. On success, the response body will contain a JSON-encoded representation of the updated resource, including the server-assigned version ID. Errors generated by the FHIR store will contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. For samples that show how to call `patch`, see [Patching a FHIR resource](/healthcare/docs/how-tos/fhir-resources#patching_a_fhir_resource).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/{fhirId}/{fhirId1}", "httpMethod": "PATCH", "id": "healthcare.projects.locations.datasets.fhirStores.fhir.patch", @@ -1998,7 +2076,7 @@ ] }, "read": { - "description": "Gets the contents of a FHIR resource.\n\nImplements the FHIR standard read interaction\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#read),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#read),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#read)).\n\nAlso supports the FHIR standard conditional read interaction\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#cread),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#cread),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#cread))\nspecified by supplying an `If-Modified-Since` header with a date/time value\nor an `If-None-Match` header with an ETag value.\n\nOn success, the response body will contain a JSON-encoded representation\nof the resource.\nErrors generated by the FHIR store will contain a JSON-encoded\n`OperationOutcome` resource describing the reason for the error. If the\nrequest cannot be mapped to a valid API method on a FHIR store, a generic\nGCP error might be returned instead.", + "description": "Gets the contents of a FHIR resource. Implements the FHIR standard read interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#read), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#read), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#read)). Also supports the FHIR standard conditional read interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#cread), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#cread), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#cread)) specified by supplying an `If-Modified-Since` header with a date/time value or an `If-None-Match` header with an ETag value. On success, the response body will contain a JSON-encoded representation of the resource. Errors generated by the FHIR store will contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. For samples that show how to call `read`, see [Getting a FHIR resource](/healthcare/docs/how-tos/fhir-resources#getting_a_fhir_resource).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/{fhirId}/{fhirId1}", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.fhirStores.fhir.read", @@ -2023,7 +2101,7 @@ ] }, "search": { - "description": "Searches for resources in the given FHIR store according to criteria\nspecified as query parameters.\n\nImplements the FHIR standard search interaction\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#search),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#search),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#search))\nusing the search semantics described in the FHIR Search specification\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/search.html),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/search.html),\n[R4](http://hl7.org/implement/standards/fhir/R4/search.html)).\n\nSupports three methods of search defined by the specification:\n\n* `GET [base]?[parameters]` to search across all resources.\n* `GET [base]/[type]?[parameters]` to search resources of a specified\ntype.\n* `POST [base]/[type]/_search?[parameters]` as an alternate form having\nthe same semantics as the `GET` method.\n\nThe `GET` methods do not support compartment searches. The `POST` method\ndoes not support `application/x-www-form-urlencoded` search parameters.\n\nOn success, the response body will contain a JSON-encoded representation\nof a `Bundle` resource of type `searchset`, containing the results of the\nsearch.\nErrors generated by the FHIR store will contain a JSON-encoded\n`OperationOutcome` resource describing the reason for the error. If the\nrequest cannot be mapped to a valid API method on a FHIR store, a generic\nGCP error might be returned instead.\n\nThe server's capability statement, retrieved through\ncapabilities, indicates what search parameters\nare supported on each FHIR resource. A list of all search parameters\ndefined by the specification can be found in the FHIR Search Parameter\nRegistry\n([STU3](http://hl7.org/implement/standards/fhir/STU3/searchparameter-registry.html),\n[R4](http://hl7.org/implement/standards/fhir/R4/searchparameter-registry.html)).\nFHIR search parameters for DSTU2 can be found on each resource's definition\npage.\n\nSupported search modifiers: `:missing`, `:exact`, `:contains`, `:text`,\n`:in`, `:not-in`, `:above`, `:below`, `:[type]`, `:not`, and `:recurse`.\n\nSupported search result parameters: `_sort`, `_count`, `_include`,\n`_revinclude`, `_summary=text`, `_summary=data`, and `_elements`.\n\nThe maximum number of search results returned defaults to 100, which can\nbe overridden by the `_count` parameter up to a maximum limit of 1000. If\nthere are additional results, the returned `Bundle` will contain\npagination links.\n\nResources with a total size larger than 5MB or a field count larger than\n50,000 might not be fully searchable as the server might trim its generated\nsearch index in those cases.\n\nNote: FHIR resources are indexed asynchronously, so there might be a slight\ndelay between the time a resource is created or changes and when the change\nis reflected in search results.", + "description": "Searches for resources in the given FHIR store according to criteria specified as query parameters. Implements the FHIR standard search interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#search), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#search), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#search)) using the search semantics described in the FHIR Search specification ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/search.html), [STU3](http://hl7.org/implement/standards/fhir/STU3/search.html), [R4](http://hl7.org/implement/standards/fhir/R4/search.html)). Supports three methods of search defined by the specification: * `GET [base]?[parameters]` to search across all resources. * `GET [base]/[type]?[parameters]` to search resources of a specified type. * `POST [base]/[type]/_search?[parameters]` as an alternate form having the same semantics as the `GET` method. The `GET` methods do not support compartment searches. The `POST` method does not support `application/x-www-form-urlencoded` search parameters. On success, the response body will contain a JSON-encoded representation of a `Bundle` resource of type `searchset`, containing the results of the search. Errors generated by the FHIR store will contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. The server's capability statement, retrieved through capabilities, indicates what search parameters are supported on each FHIR resource. A list of all search parameters defined by the specification can be found in the FHIR Search Parameter Registry ([STU3](http://hl7.org/implement/standards/fhir/STU3/searchparameter-registry.html), [R4](http://hl7.org/implement/standards/fhir/R4/searchparameter-registry.html)). FHIR search parameters for DSTU2 can be found on each resource's definition page. Supported search modifiers: `:missing`, `:exact`, `:contains`, `:text`, `:in`, `:not-in`, `:above`, `:below`, `:[type]`, `:not`, and `:recurse`. Supported search result parameters: `_sort`, `_count`, `_include`, `_revinclude`, `_summary=text`, `_summary=data`, and `_elements`. The maximum number of search results returned defaults to 100, which can be overridden by the `_count` parameter up to a maximum limit of 1000. If there are additional results, the returned `Bundle` will contain pagination links. Resources with a total size larger than 5MB or a field count larger than 50,000 might not be fully searchable as the server might trim its generated search index in those cases. Note: FHIR resources are indexed asynchronously, so there might be a slight delay between the time a resource is created or changes and when the change is reflected in search results. For samples and detailed information, see [Searching for FHIR resources](/healthcare/docs/how-tos/fhir-search) and [Advanced FHIR search features](/healthcare/docs/how-tos/fhir-advanced-search).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/_search", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.fhirStores.fhir.search", @@ -2051,7 +2129,7 @@ ] }, "update": { - "description": "Updates the entire contents of a resource.\n\nImplements the FHIR standard update interaction\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#update),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#update),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#update)).\n\nIf the specified resource does\nnot exist and the FHIR store has\nenable_update_create set, creates the\nresource with the client-specified ID.\n\nThe request body must contain a JSON-encoded FHIR resource, and the request\nheaders must contain `Content-Type: application/fhir+json`. The resource\nmust contain an `id` element having an identical value to the ID in the\nREST path of the request.\n\nOn success, the response body will contain a JSON-encoded representation\nof the updated resource, including the server-assigned version ID.\nErrors generated by the FHIR store will contain a JSON-encoded\n`OperationOutcome` resource describing the reason for the error. If the\nrequest cannot be mapped to a valid API method on a FHIR store, a generic\nGCP error might be returned instead.", + "description": "Updates the entire contents of a resource. Implements the FHIR standard update interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#update), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#update), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#update)). If the specified resource does not exist and the FHIR store has enable_update_create set, creates the resource with the client-specified ID. The request body must contain a JSON-encoded FHIR resource, and the request headers must contain `Content-Type: application/fhir+json`. The resource must contain an `id` element having an identical value to the ID in the REST path of the request. On success, the response body will contain a JSON-encoded representation of the updated resource, including the server-assigned version ID. Errors generated by the FHIR store will contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. For samples that show how to call `update`, see [Updating a FHIR resource](/healthcare/docs/how-tos/fhir-resources#updating_a_fhir_resource).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/{fhirId}/{fhirId1}", "httpMethod": "PUT", "id": "healthcare.projects.locations.datasets.fhirStores.fhir.update", @@ -2079,7 +2157,7 @@ ] }, "vread": { - "description": "Gets the contents of a version (current or historical) of a FHIR resource\nby version ID.\n\nImplements the FHIR standard vread interaction\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#vread),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#vread),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#vread)).\n\nOn success, the response body will contain a JSON-encoded representation\nof the resource.\nErrors generated by the FHIR store will contain a JSON-encoded\n`OperationOutcome` resource describing the reason for the error. If the\nrequest cannot be mapped to a valid API method on a FHIR store, a generic\nGCP error might be returned instead.", + "description": "Gets the contents of a version (current or historical) of a FHIR resource by version ID. Implements the FHIR standard vread interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#vread), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#vread), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#vread)). On success, the response body will contain a JSON-encoded representation of the resource. Errors generated by the FHIR store will contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. For samples that show how to call `vread`, see [Retrieving a FHIR resource version](/healthcare/docs/how-tos/fhir-resources#retrieving_a_fhir_resource_version).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/{fhirId}/{fhirId1}/_history/{_historyId}", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.fhirStores.fhir.vread", @@ -2119,7 +2197,7 @@ ], "parameters": { "hl7V2StoreId": { - "description": "The ID of the HL7v2 store that is being created.\nThe string must match the following regex: `[\\p{L}\\p{N}_\\-\\.]{1,256}`.", + "description": "The ID of the HL7v2 store that is being created. The string must match the following regex: `[\\p{L}\\p{N}_\\-\\.]{1,256}`.", "location": "query", "type": "string" }, @@ -2143,7 +2221,7 @@ ] }, "delete": { - "description": "Deletes the specified HL7v2 store and removes all messages that are\ncontained within it.", + "description": "Deletes the specified HL7v2 store and removes all messages that it contains.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/hl7V2Stores/{hl7V2StoresId}", "httpMethod": "DELETE", "id": "healthcare.projects.locations.datasets.hl7V2Stores.delete", @@ -2193,7 +2271,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/hl7V2Stores/{hl7V2StoresId}:getIamPolicy", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.hl7V2Stores.getIamPolicy", @@ -2202,13 +2280,13 @@ ], "parameters": { "options.requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "location": "query", "type": "integer" }, "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/hl7V2Stores/[^/]+$", "required": true, @@ -2233,12 +2311,12 @@ ], "parameters": { "filter": { - "description": "Restricts stores returned to those matching a filter. Syntax:\nhttps://cloud.google.com/appengine/docs/standard/python/search/query_strings\nOnly filtering on labels is supported. For example, `labels.key=value`.", + "description": "Restricts stores returned to those matching a filter. Syntax: https://cloud.google.com/appengine/docs/standard/python/search/query_strings Only filtering on labels is supported. For example, `labels.key=value`.", "location": "query", "type": "string" }, "pageSize": { - "description": "Limit on the number of HL7v2 stores to return in a single response.\nIf zero the default page size of 100 is used.", + "description": "Limit on the number of HL7v2 stores to return in a single response. If zero the default page size of 100 is used.", "format": "int32", "location": "query", "type": "integer" @@ -2274,14 +2352,14 @@ ], "parameters": { "name": { - "description": "Output only. Resource name of the HL7v2 store, of the form\n`projects/{project_id}/datasets/{dataset_id}/hl7V2Stores/{hl7v2_store_id}`.", + "description": "Resource name of the HL7v2 store, of the form `projects/{project_id}/datasets/{dataset_id}/hl7V2Stores/{hl7v2_store_id}`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/hl7V2Stores/[^/]+$", "required": true, "type": "string" }, "updateMask": { - "description": "The update mask applies to the resource. For the `FieldMask` definition,\nsee\nhttps://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask", + "description": "The update mask applies to the resource. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask", "format": "google-fieldmask", "location": "query", "type": "string" @@ -2299,7 +2377,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/hl7V2Stores/{hl7V2StoresId}:setIamPolicy", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.hl7V2Stores.setIamPolicy", @@ -2308,7 +2386,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/hl7V2Stores/[^/]+$", "required": true, @@ -2327,7 +2405,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/hl7V2Stores/{hl7V2StoresId}:testIamPermissions", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.hl7V2Stores.testIamPermissions", @@ -2336,7 +2414,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/hl7V2Stores/[^/]+$", "required": true, @@ -2359,7 +2437,7 @@ "messages": { "methods": { "create": { - "description": "Creates a message and sends a notification to the Cloud Pub/Sub topic. If\nconfigured, the MLLP adapter listens to messages created by this method and\nsends those back to the hospital. A successful response indicates the\nmessage has been persisted to storage and a Cloud Pub/Sub notification has\nbeen sent. Sending to the hospital by the MLLP adapter happens\nasynchronously.", + "description": "Parses and stores an HL7v2 message. This method triggers an asynchronous notification to any Cloud Pub/Sub topic configured in projects.locations.datasets.hl7V2Stores.Hl7V2NotificationConfig, if the filtering matches the message. If an MLLP adapter is configured to listen to a Cloud Pub/Sub topic, the adapter transmits the message when a notification is received.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/hl7V2Stores/{hl7V2StoresId}/messages", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.hl7V2Stores.messages.create", @@ -2428,7 +2506,7 @@ "type": "string" }, "view": { - "description": "Specifies which parts of the Message resource to return in the response.\nWhen unspecified, equivalent to FULL.", + "description": "Specifies which parts of the Message resource to return in the response. When unspecified, equivalent to FULL.", "enum": [ "MESSAGE_VIEW_UNSPECIFIED", "RAW_ONLY", @@ -2436,6 +2514,13 @@ "FULL", "BASIC" ], + "enumDescriptions": [ + "Not specified, equivalent to FULL.", + "Server responses include all the message fields except parsed_data field.", + "Server responses include all the message fields except data field.", + "Server responses include all the message fields.", + "Server responses include only the name field." + ], "location": "query", "type": "string" } @@ -2449,7 +2534,7 @@ ] }, "ingest": { - "description": "Ingests a new HL7v2 message from the hospital and sends a notification to\nthe Cloud Pub/Sub topic. Return is an HL7v2 ACK message if the message was\nsuccessfully stored. Otherwise an error is returned.", + "description": "Parses and stores an HL7v2 message. This method triggers an asynchronous notification to any Cloud Pub/Sub topic configured in projects.locations.datasets.hl7V2Stores.Hl7V2NotificationConfig, if the filtering matches the message. If an MLLP adapter is configured to listen to a Cloud Pub/Sub topic, the adapter transmits the message when a notification is received. This method also generates a response containing an HL7v2 acknowledgement (`ACK`) message when successful or a negative acknowledgement (`NACK`) message in case of error, suitable for replying to HL7v2 interface systems that expect these acknowledgements.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/hl7V2Stores/{hl7V2StoresId}/messages:ingest", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.hl7V2Stores.messages.ingest", @@ -2477,7 +2562,7 @@ ] }, "list": { - "description": "Lists all the messages in the given HL7v2 store with support for filtering.\n\nNote: HL7v2 messages are indexed asynchronously, so there might be a slight\ndelay between the time a message is created and when it can be found\nthrough a filter.", + "description": "Lists all the messages in the given HL7v2 store with support for filtering. Note: HL7v2 messages are indexed asynchronously, so there might be a slight delay between the time a message is created and when it can be found through a filter.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/hl7V2Stores/{hl7V2StoresId}/messages", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.hl7V2Stores.messages.list", @@ -2486,17 +2571,17 @@ ], "parameters": { "filter": { - "description": "Restricts messages returned to those matching a filter. Syntax:\nhttps://cloud.google.com/appengine/docs/standard/python/search/query_strings\n\nFields/functions available for filtering are:\n\n* `message_type`, from the MSH-9.1 field. For example,\n`NOT message_type = \"ADT\"`.\n* `send_date` or `sendDate`, the YYYY-MM-DD date the message was sent in\nthe dataset's time_zone, from the MSH-7 segment. For example,\n`send_date \u003c \"2017-01-02\"`.\n* `send_time`, the timestamp when the message was sent, using the\nRFC3339 time format for comparisons, from the MSH-7 segment. For example,\n`send_time \u003c \"2017-01-02T00:00:00-05:00\"`.\n* `send_facility`, the care center that the message came from, from the\nMSH-4 segment. For example, `send_facility = \"ABC\"`.\n* `PatientId(value, type)`, which matches if the message lists a patient\nhaving an ID of the given value and type in the PID-2, PID-3, or PID-4\nsegments. For example, `PatientId(\"123456\", \"MRN\")`.\n* `labels.x`, a string value of the label with key `x` as set using the\nMessage.labels\nmap. For example, `labels.\"priority\"=\"high\"`. The operator `:*` can be used\nto assert the existence of a label. For example, `labels.\"priority\":*`.", + "description": "Restricts messages returned to those matching a filter. Syntax: https://cloud.google.com/appengine/docs/standard/python/search/query_strings Fields/functions available for filtering are: * `message_type`, from the MSH-9.1 field. For example, `NOT message_type = \"ADT\"`. * `send_date` or `sendDate`, the YYYY-MM-DD date the message was sent in the dataset's time_zone, from the MSH-7 segment. For example, `send_date \u003c \"2017-01-02\"`. * `send_time`, the timestamp when the message was sent, using the RFC3339 time format for comparisons, from the MSH-7 segment. For example, `send_time \u003c \"2017-01-02T00:00:00-05:00\"`. * `send_facility`, the care center that the message came from, from the MSH-4 segment. For example, `send_facility = \"ABC\"`. * `PatientId(value, type)`, which matches if the message lists a patient having an ID of the given value and type in the PID-2, PID-3, or PID-4 segments. For example, `PatientId(\"123456\", \"MRN\")`. * `labels.x`, a string value of the label with key `x` as set using the Message.labels map. For example, `labels.\"priority\"=\"high\"`. The operator `:*` can be used to assert the existence of a label. For example, `labels.\"priority\":*`.", "location": "query", "type": "string" }, "orderBy": { - "description": "Orders messages returned by the specified order_by clause.\nSyntax: https://cloud.google.com/apis/design/design_patterns#sorting_order\n\nFields available for ordering are:\n\n* `send_time`", + "description": "Orders messages returned by the specified order_by clause. Syntax: https://cloud.google.com/apis/design/design_patterns#sorting_order Fields available for ordering are: * `send_time`", "location": "query", "type": "string" }, "pageSize": { - "description": "Limit on the number of messages to return in a single response.\nIf zero the default page size of 100 is used.", + "description": "Limit on the number of messages to return in a single response. If zero the default page size of 100 is used.", "format": "int32", "location": "query", "type": "integer" @@ -2514,7 +2599,7 @@ "type": "string" }, "view": { - "description": "Specifies the parts of the Message to return in the response.\nWhen unspecified, equivalent to BASIC. Setting this to anything other than\nBASIC with a `page_size` larger than the default can generate a large\nresponse, which impacts the performance of this method.", + "description": "Specifies the parts of the Message to return in the response. When unspecified, equivalent to BASIC. Setting this to anything other than BASIC with a `page_size` larger than the default can generate a large response, which impacts the performance of this method.", "enum": [ "MESSAGE_VIEW_UNSPECIFIED", "RAW_ONLY", @@ -2522,6 +2607,13 @@ "FULL", "BASIC" ], + "enumDescriptions": [ + "Not specified, equivalent to FULL.", + "Server responses include all the message fields except parsed_data field.", + "Server responses include all the message fields except data field.", + "Server responses include all the message fields.", + "Server responses include only the name field." + ], "location": "query", "type": "string" } @@ -2535,7 +2627,7 @@ ] }, "patch": { - "description": "Update the message.\n\nThe contents of the message in Message.data and data extracted from\nthe contents such as Message.create_time cannot be altered. Only the\nMessage.labels field is allowed to be updated. The labels in the\nrequest are merged with the existing set of labels. Existing labels with\nthe same keys are updated.", + "description": "Update the message. The contents of the message in Message.data and data extracted from the contents such as Message.create_time cannot be altered. Only the Message.labels field is allowed to be updated. The labels in the request are merged with the existing set of labels. Existing labels with the same keys are updated.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/hl7V2Stores/{hl7V2StoresId}/messages/{messagesId}", "httpMethod": "PATCH", "id": "healthcare.projects.locations.datasets.hl7V2Stores.messages.patch", @@ -2544,14 +2636,14 @@ ], "parameters": { "name": { - "description": "Resource name of the Message, of the form\n`projects/{project_id}/datasets/{dataset_id}/hl7V2Stores/{hl7_v2_store_id}/messages/{message_id}`.\nAssigned by the server.", + "description": "Resource name of the Message, of the form `projects/{project_id}/datasets/{dataset_id}/hl7V2Stores/{hl7_v2_store_id}/messages/{message_id}`. Assigned by the server.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/hl7V2Stores/[^/]+/messages/[^/]+$", "required": true, "type": "string" }, "updateMask": { - "description": "The update mask applies to the resource. For the `FieldMask` definition,\nsee\nhttps://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask", + "description": "The update mask applies to the resource. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask", "format": "google-fieldmask", "location": "query", "type": "string" @@ -2575,7 +2667,7 @@ "operations": { "methods": { "cancel": { - "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/operations/{operationsId}:cancel", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.operations.cancel", @@ -2603,7 +2695,7 @@ ] }, "get": { - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/operations/{operationsId}", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.operations.get", @@ -2628,7 +2720,7 @@ ] }, "list": { - "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/operations", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.operations.list", @@ -2677,11 +2769,11 @@ } } }, - "revision": "20200501", + "revision": "20200917", "rootUrl": "https://healthcare.googleapis.com/", "schemas": { "AuditConfig": { - "description": "Specifies the audit configuration for a service.\nThe configuration determines which permission types are logged, and what\nidentities, if any, are exempted from logging.\nAn AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service,\nthe union of the two AuditConfigs is used for that service: the log_types\nspecified in each AuditConfig are enabled, and the exempted_members in each\nAuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n {\n \"audit_configs\": [\n {\n \"service\": \"allServices\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n },\n {\n \"log_type\": \"ADMIN_READ\",\n }\n ]\n },\n {\n \"service\": \"sampleservice.googleapis.com\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n },\n {\n \"log_type\": \"DATA_WRITE\",\n \"exempted_members\": [\n \"user:aliya@example.com\"\n ]\n }\n ]\n }\n ]\n }\n\nFor sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ\nlogging. It also exempts jose@example.com from DATA_READ logging, and\naliya@example.com from DATA_WRITE logging.", + "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { \"audit_configs\": [ { \"service\": \"allServices\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" }, { \"log_type\": \"ADMIN_READ\" } ] }, { \"service\": \"sampleservice.googleapis.com\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\" }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging.", "id": "AuditConfig", "properties": { "auditLogConfigs": { @@ -2692,18 +2784,18 @@ "type": "array" }, "service": { - "description": "Specifies a service that will be enabled for audit logging.\nFor example, `storage.googleapis.com`, `cloudsql.googleapis.com`.\n`allServices` is a special value that covers all services.", + "description": "Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.", "type": "string" } }, "type": "object" }, "AuditLogConfig": { - "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\njose@example.com from DATA_READ logging.", + "description": "Provides the configuration for logging a type of permissions. Example: { \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.", "id": "AuditLogConfig", "properties": { "exemptedMembers": { - "description": "Specifies the identities that do not cause logging for this type of\npermission.\nFollows the same format of Binding.members.", + "description": "Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.", "items": { "type": "string" }, @@ -2734,17 +2826,17 @@ "properties": { "condition": { "$ref": "Expr", - "description": "The condition that is associated with this binding.\n\nIf the condition evaluates to `true`, then this binding applies to the\ncurrent request.\n\nIf the condition evaluates to `false`, then this binding does not apply to\nthe current request. However, a different role binding might grant the same\nrole to one or more of the members in this binding.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies)." + "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the members in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." }, "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@example.com` .\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a user that has been recently deleted. For\n example, `alice@example.com?uid=123456789012345678901`. If the user is\n recovered, this value reverts to `user:{emailid}` and the recovered user\n retains the role in the binding.\n\n* `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus\n unique identifier) representing a service account that has been recently\n deleted. For example,\n `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.\n If the service account is undeleted, this value reverts to\n `serviceAccount:{emailid}` and the undeleted service account retains the\n role in the binding.\n\n* `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a Google group that has been recently\n deleted. For example, `admins@example.com?uid=123456789012345678901`. If\n the group is recovered, this value reverts to `group:{emailid}` and the\n recovered group retains the role in the binding.\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", "items": { "type": "string" }, "type": "array" }, "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.", + "description": "Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.", "type": "string" } }, @@ -2779,11 +2871,11 @@ "type": "object" }, "CryptoHashConfig": { - "description": "Pseudonymization method that generates surrogates via cryptographic hashing.\nUses SHA-256.\nOutputs a base64-encoded representation of the hashed output\n(for example, `L7k0BHmF1ha5U3NfGykjro4xWi1MPVQPjhMAZbSV9mM=`).", + "description": "Pseudonymization method that generates surrogates via cryptographic hashing. Uses SHA-256. Outputs a base64-encoded representation of the hashed output (for example, `L7k0BHmF1ha5U3NfGykjro4xWi1MPVQPjhMAZbSV9mM=`).", "id": "CryptoHashConfig", "properties": { "cryptoKey": { - "description": "An AES 128/192/256 bit key. Causes the hash to be computed based on this\nkey. A default key is generated for each Deidentify operation and is used\nwherever crypto_key is not specified.", + "description": "An AES 128/192/256 bit key. Causes the hash to be computed based on this key. A default key is generated for each Deidentify operation and is used wherever crypto_key is not specified.", "format": "byte", "type": "string" } @@ -2791,26 +2883,26 @@ "type": "object" }, "Dataset": { - "description": "A message representing a health dataset.\n\nA health dataset represents a collection of healthcare data pertaining to one\nor more patients. This may include multiple modalities of healthcare data,\nsuch as electronic medical records or medical imaging data.", + "description": "A message representing a health dataset. A health dataset represents a collection of healthcare data pertaining to one or more patients. This may include multiple modalities of healthcare data, such as electronic medical records or medical imaging data.", "id": "Dataset", "properties": { "name": { - "description": "Output only. Resource name of the dataset, of the form\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`.", + "description": "Resource name of the dataset, of the form `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`.", "type": "string" }, "timeZone": { - "description": "The default timezone used by this dataset. Must be a either a valid IANA\ntime zone name such as \"America/New_York\" or empty, which defaults to UTC.\nThis is used for parsing times in resources, such as HL7 messages, where no\nexplicit timezone is specified.", + "description": "The default timezone used by this dataset. Must be a either a valid IANA time zone name such as \"America/New_York\" or empty, which defaults to UTC. This is used for parsing times in resources, such as HL7 messages, where no explicit timezone is specified.", "type": "string" } }, "type": "object" }, "DateShiftConfig": { - "description": "Shift a date forward or backward in time by a random amount which is\nconsistent for a given patient and crypto key combination.", + "description": "Shift a date forward or backward in time by a random amount which is consistent for a given patient and crypto key combination.", "id": "DateShiftConfig", "properties": { "cryptoKey": { - "description": "An AES 128/192/256 bit key. Causes the shift to be computed based on this\nkey and the patient ID. A default key is generated for each\nDeidentify operation and is used wherever crypto_key is not specified.", + "description": "An AES 128/192/256 bit key. Causes the shift to be computed based on this key and the patient ID. A default key is generated for each Deidentify operation and is used wherever crypto_key is not specified.", "format": "byte", "type": "string" } @@ -2818,7 +2910,7 @@ "type": "object" }, "DeidentifyConfig": { - "description": "Configures de-id options specific to different types of content.\nEach submessage customizes the handling of an\nhttps://tools.ietf.org/html/rfc6838 media type or subtype. Configs are\napplied in a nested manner at runtime.", + "description": "Configures de-id options specific to different types of content. Each submessage customizes the handling of an https://tools.ietf.org/html/rfc6838 media type or subtype. Configs are applied in a nested manner at runtime.", "id": "DeidentifyConfig", "properties": { "dicom": { @@ -2831,11 +2923,11 @@ }, "image": { "$ref": "ImageConfig", - "description": "Configures de-identification of image pixels wherever they are found in the\nsource_dataset." + "description": "Configures de-identification of image pixels wherever they are found in the source_dataset." }, "text": { "$ref": "TextConfig", - "description": "Configures de-identification of text wherever it is found in the\nsource_dataset." + "description": "Configures de-identification of text wherever it is found in the source_dataset." } }, "type": "object" @@ -2849,7 +2941,7 @@ "description": "Deidentify configuration." }, "destinationDataset": { - "description": "The name of the dataset resource to create and write the redacted data to.\n\n * The destination dataset must not exist.\n * The destination dataset must be in the same project and location as the\n source dataset. De-identifying data across multiple projects or locations\n is not supported.", + "description": "The name of the dataset resource to create and write the redacted data to. * The destination dataset must not exist. * The destination dataset must be in the same project and location as the source dataset. De-identifying data across multiple projects or locations is not supported.", "type": "string" } }, @@ -2864,7 +2956,7 @@ "description": "De-identify configuration." }, "destinationStore": { - "description": "The name of the DICOM store to create and write the redacted data to.\nFor example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.\n\n * The destination dataset must exist.\n * The source dataset and destination dataset must both reside in the same\n project. De-identifying data across multiple projects is not supported.\n * The destination DICOM store must not exist.\n * The caller must have the necessary permissions to create the destination\n DICOM store.", + "description": "The name of the DICOM store to create and write the redacted data to. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`. * The destination dataset must exist. * The source dataset and destination dataset must both reside in the same project. De-identifying data across multiple projects is not supported. * The destination DICOM store must not exist. * The caller must have the necessary permissions to create the destination DICOM store.", "type": "string" }, "filterConfig": { @@ -2883,12 +2975,12 @@ "description": "Deidentify configuration." }, "destinationStore": { - "description": "The name of the FHIR store to create and write the redacted data to.\nFor example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/fhirStores/{fhir_store_id}`.\n\n * The destination dataset must exist.\n * The source dataset and destination dataset must both reside in the same\n project. De-identifying data across multiple projects is not supported.\n * The destination FHIR store must exist.\n * The caller must have the healthcare.fhirResources.update permission to\n write to the destination FHIR store.", + "description": "The name of the FHIR store to create and write the redacted data to. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/fhirStores/{fhir_store_id}`. * The destination dataset must exist. * The source dataset and destination dataset must both reside in the same project. De-identifying data across multiple projects is not supported. * The destination FHIR store must exist. * The caller must have the healthcare.fhirResources.update permission to write to the destination FHIR store.", "type": "string" }, "resourceFilter": { "$ref": "FhirFilter", - "description": "A filter specifying the resources to include in the output. If not\nspecified, all resources are included in the output." + "description": "A filter specifying the resources to include in the output. If not specified, all resources are included in the output." } }, "type": "object" @@ -2915,9 +3007,9 @@ "enumDescriptions": [ "No tag filtration profile provided. Same as KEEP_ALL_PROFILE.", "Keep only tags required to produce valid DICOM.", - "Remove tags based on DICOM Standard's Attribute Confidentiality Basic\nProfile (DICOM Standard Edition 2018e)\nhttp://dicom.nema.org/medical/dicom/2018e/output/chtml/part15/chapter_E.html.", + "Remove tags based on DICOM Standard's Attribute Confidentiality Basic Profile (DICOM Standard Edition 2018e) http://dicom.nema.org/medical/dicom/2018e/output/chtml/part15/chapter_E.html.", "Keep all tags.", - "Inspects within tag contents and replaces sensitive text. The process\ncan be configured using the TextConfig.\nApplies to all tags with the following Value Representation names:\nAE, LO, LT, PN, SH, ST, UC, UT, DA, DT, AS" + "Inspects within tag contents and replaces sensitive text. The process can be configured using the TextConfig. Applies to all tags with the following Value Representation names: AE, LO, LT, PN, SH, ST, UC, UT, DA, DT, AS" ], "type": "string" }, @@ -2930,7 +3022,7 @@ "description": "List of tags to remove. Keep all other tags." }, "skipIdRedaction": { - "description": "If true, skip replacing StudyInstanceUID, SeriesInstanceUID,\nSOPInstanceUID, and MediaStorageSOPInstanceUID and leave them untouched.\nThe Cloud Healthcare API regenerates these UIDs by default based on the\nDICOM Standard's reasoning: \"Whilst these UIDs cannot be mapped directly\nto an individual out of context, given access to the original images, or\nto a database of the original images containing the UIDs, it would be\npossible to recover the individual's identity.\"\nhttp://dicom.nema.org/medical/dicom/current/output/chtml/part15/sect_E.3.9.html", + "description": "If true, skip replacing StudyInstanceUID, SeriesInstanceUID, SOPInstanceUID, and MediaStorageSOPInstanceUID and leave them untouched. The Cloud Healthcare API regenerates these UIDs by default based on the DICOM Standard's reasoning: \"Whilst these UIDs cannot be mapped directly to an individual out of context, given access to the original images, or to a database of the original images containing the UIDs, it would be possible to recover the individual's identity.\" http://dicom.nema.org/medical/dicom/current/output/chtml/part15/sect_E.3.9.html", "type": "boolean" } }, @@ -2941,7 +3033,7 @@ "id": "DicomFilterConfig", "properties": { "resourcePathsGcsUri": { - "description": "The Cloud Storage location of the filter configuration file.\nThe `gcs_uri` must be in the format `gs://bucket/path/to/object`.\nThe filter configuration file must contain a list of resource paths\nseparated by newline characters (\\n or \\r\\n). Each resource path\nmust be in the format\n\"/studies/{studyUID}[/series/{seriesUID}[/instances/{instanceUID}]]\"\n\nThe Cloud Healthcare API service account must have the\n`roles/storage.objectViewer` Cloud IAM role for this Cloud Storage\nlocation.", + "description": "The Cloud Storage location of the filter configuration file. The `gcs_uri` must be in the format `gs://bucket/path/to/object`. The filter configuration file must contain a list of resource paths separated by newline characters (\\n or \\r\\n). Each resource path must be in the format \"/studies/{studyUID}[/series/{seriesUID}[/instances/{instanceUID}]]\" The Cloud Healthcare API service account must have the `roles/storage.objectViewer` Cloud IAM role for this Cloud Storage location.", "type": "string" } }, @@ -2955,37 +3047,37 @@ "additionalProperties": { "type": "string" }, - "description": "User-supplied key-value pairs used to organize DICOM stores.\n\nLabel keys must be between 1 and 63 characters long, have a UTF-8 encoding\nof maximum 128 bytes, and must conform to the\nfollowing PCRE regular expression:\n\\p{Ll}\\p{Lo}{0,62}\n\nLabel values are optional, must be between 1 and 63 characters long, have\na UTF-8 encoding of maximum 128 bytes, and must conform to the\nfollowing PCRE regular expression: [\\p{Ll}\\p{Lo}\\p{N}_-]{0,63}\n\nNo more than 64 labels can be associated with a given store.", + "description": "User-supplied key-value pairs used to organize DICOM stores. Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: \\p{Ll}\\p{Lo}{0,62} Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\\p{Ll}\\p{Lo}\\p{N}_-]{0,63} No more than 64 labels can be associated with a given store.", "type": "object" }, "name": { - "description": "Output only. Resource name of the DICOM store, of the form\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + "description": "Resource name of the DICOM store, of the form `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", "type": "string" }, "notificationConfig": { "$ref": "NotificationConfig", - "description": "Notification destination for new DICOM instances.\nSupplied by the client." + "description": "Notification destination for new DICOM instances. Supplied by the client." } }, "type": "object" }, "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", "properties": {}, "type": "object" }, "ExportDicomDataRequest": { - "description": "Exports data from the specified DICOM store.\nIf a given resource, such as a DICOM object with the same SOPInstance UID,\nalready exists in the output, it is overwritten with the version\nin the source dataset.\nExported DICOM data persists when the DICOM store from which it was\nexported is deleted.", + "description": "Exports data from the specified DICOM store. If a given resource, such as a DICOM object with the same SOPInstance UID, already exists in the output, it is overwritten with the version in the source dataset. Exported DICOM data persists when the DICOM store from which it was exported is deleted.", "id": "ExportDicomDataRequest", "properties": { "bigqueryDestination": { "$ref": "GoogleCloudHealthcareV1DicomBigQueryDestination", - "description": "The BigQuery output destination.\n\nYou can only export to a BigQuery dataset that's in the same project as\nthe DICOM store you're exporting from.\n\nThe BigQuery location requires two IAM roles:\n`roles/bigquery.dataEditor` and `roles/bigquery.jobUser`." + "description": "The BigQuery output destination. You can only export to a BigQuery dataset that's in the same project as the DICOM store you're exporting from. The BigQuery location requires two IAM roles: `roles/bigquery.dataEditor` and `roles/bigquery.jobUser`." }, "gcsDestination": { "$ref": "GoogleCloudHealthcareV1DicomGcsDestination", - "description": "The Cloud Storage output destination.\n\nThe Cloud Storage location requires the `roles/storage.objectAdmin` Cloud\nIAM role." + "description": "The Cloud Storage output destination. The Cloud Storage location requires the `roles/storage.objectAdmin` Cloud IAM role." } }, "type": "object" @@ -3002,39 +3094,39 @@ "properties": { "bigqueryDestination": { "$ref": "GoogleCloudHealthcareV1FhirBigQueryDestination", - "description": "The BigQuery output destination.\n\nThe BigQuery location requires two IAM roles:\n`roles/bigquery.dataEditor` and `roles/bigquery.jobUser`.\n\nThe output will be one BigQuery table per resource type." + "description": "The BigQuery output destination. The BigQuery location requires two IAM roles: `roles/bigquery.dataEditor` and `roles/bigquery.jobUser`. The output is one BigQuery table per resource type." }, "gcsDestination": { "$ref": "GoogleCloudHealthcareV1FhirGcsDestination", - "description": "The Cloud Storage output destination.\n\nThe Cloud Storage location requires the `roles/storage.objectAdmin` Cloud\nIAM role.\n\nThe exported outputs are\norganized by FHIR resource types. The server will create one object per\nresource type. Each object contains newline delimited JSON, and each line\nis a FHIR resource." + "description": "The Cloud Storage output destination. The Healthcare Service Agent account requires the `roles/storage.objectAdmin` role on the Cloud Storage location. The exported outputs are organized by FHIR resource types. The server creates one object per resource type. Each object contains newline delimited JSON, and each line is a FHIR resource." } }, "type": "object" }, "ExportResourcesResponse": { - "description": "Response when all resources export successfully.\nThis structure will be included in the\nresponse to describe the detailed\noutcome. It will only be included when the operation finishes successfully.", + "description": "Response when all resources export successfully. This structure is included in the response to describe the detailed outcome after the operation finishes successfully.", "id": "ExportResourcesResponse", "properties": {}, "type": "object" }, "Expr": { - "description": "Represents a textual expression in the Common Expression Language (CEL)\nsyntax. CEL is a C-like expression language. The syntax and semantics of CEL\nare documented at https://github.com/google/cel-spec.\n\nExample (Comparison):\n\n title: \"Summary size limit\"\n description: \"Determines if a summary is less than 100 chars\"\n expression: \"document.summary.size() \u003c 100\"\n\nExample (Equality):\n\n title: \"Requestor is owner\"\n description: \"Determines if requestor is the document owner\"\n expression: \"document.owner == request.auth.claims.email\"\n\nExample (Logic):\n\n title: \"Public documents\"\n description: \"Determine whether the document should be publicly visible\"\n expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\"\n\nExample (Data Manipulation):\n\n title: \"Notification string\"\n description: \"Create a notification string with a timestamp.\"\n expression: \"'New message received at ' + string(document.create_time)\"\n\nThe exact variables and functions that may be referenced within an expression\nare determined by the service that evaluates it. See the service\ndocumentation for additional information.", + "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() \u003c 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", "id": "Expr", "properties": { "description": { - "description": "Optional. Description of the expression. This is a longer text which\ndescribes the expression, e.g. when hovered over it in a UI.", + "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", "type": "string" }, "expression": { - "description": "Textual representation of an expression in Common Expression Language\nsyntax.", + "description": "Textual representation of an expression in Common Expression Language syntax.", "type": "string" }, "location": { - "description": "Optional. String indicating the location of the expression for error\nreporting, e.g. a file name and a position in the file.", + "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", "type": "string" }, "title": { - "description": "Optional. Title for the expression, i.e. a short string describing\nits purpose. This can be used e.g. in UIs which allow to enter the\nexpression.", + "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", "type": "string" } }, @@ -3045,7 +3137,7 @@ "id": "FhirConfig", "properties": { "fieldMetadataList": { - "description": "Specifies FHIR paths to match and how to transform them. Any field that\nis not matched by a FieldMetadata is passed through to the output\ndataset unmodified. All extensions are removed in the output.", + "description": "Specifies FHIR paths to match and how to transform them. Any field that is not matched by a FieldMetadata is passed through to the output dataset unmodified. All extensions are removed in the output.", "items": { "$ref": "FieldMetadata" }, @@ -3060,7 +3152,7 @@ "properties": { "resources": { "$ref": "Resources", - "description": "List of resources to include in the output. If this list is empty or\nnot specified, all resources are included in the output." + "description": "List of resources to include in the output. If this list is empty or not specified, all resources are included in the output." } }, "type": "object" @@ -3070,41 +3162,41 @@ "id": "FhirStore", "properties": { "disableReferentialIntegrity": { - "description": "Whether to disable referential integrity in this FHIR store. This field is\nimmutable after FHIR store creation.\nThe default value is false, meaning that the API enforces referential\nintegrity and fails the requests that result in inconsistent state in\nthe FHIR store.\nWhen this field is set to true, the API skips referential integrity\nchecks. Consequently, operations that rely on references, such as\nGetPatientEverything, do not return all the results if broken references\nexist.", + "description": "Whether to disable referential integrity in this FHIR store. This field is immutable after FHIR store creation. The default value is false, meaning that the API enforces referential integrity and fails the requests that result in inconsistent state in the FHIR store. When this field is set to true, the API skips referential integrity checks. Consequently, operations that rely on references, such as GetPatientEverything, do not return all the results if broken references exist.", "type": "boolean" }, "disableResourceVersioning": { - "description": "Whether to disable resource versioning for this FHIR store. This field can\nnot be changed after the creation of FHIR store.\nIf set to false, which is the default behavior, all write operations\ncause historical versions to be recorded automatically. The historical\nversions can be fetched through the history APIs, but cannot be updated.\nIf set to true, no historical versions are kept. The server sends\nerrors for attempts to read the historical versions.", + "description": "Whether to disable resource versioning for this FHIR store. This field can not be changed after the creation of FHIR store. If set to false, which is the default behavior, all write operations cause historical versions to be recorded automatically. The historical versions can be fetched through the history APIs, but cannot be updated. If set to true, no historical versions are kept. The server sends errors for attempts to read the historical versions.", "type": "boolean" }, "enableUpdateCreate": { - "description": "Whether this FHIR store has the [updateCreate\ncapability](https://www.hl7.org/fhir/capabilitystatement-definitions.html#CapabilityStatement.rest.resource.updateCreate).\nThis determines if the client can use an Update operation to create a new\nresource with a client-specified ID. If false, all IDs are server-assigned\nthrough the Create operation and attempts to update a non-existent resource\nreturn errors. Please treat the audit logs with appropriate levels of\ncare if client-specified resource IDs contain sensitive data such as\npatient identifiers, those IDs are part of the FHIR resource path\nrecorded in Cloud audit logs and Cloud Pub/Sub notifications.", + "description": "Whether this FHIR store has the [updateCreate capability](https://www.hl7.org/fhir/capabilitystatement-definitions.html#CapabilityStatement.rest.resource.updateCreate). This determines if the client can use an Update operation to create a new resource with a client-specified ID. If false, all IDs are server-assigned through the Create operation and attempts to update a non-existent resource return errors. Please treat the audit logs with appropriate levels of care if client-specified resource IDs contain sensitive data such as patient identifiers, those IDs are part of the FHIR resource path recorded in Cloud audit logs and Cloud Pub/Sub notifications.", "type": "boolean" }, "labels": { "additionalProperties": { "type": "string" }, - "description": "User-supplied key-value pairs used to organize FHIR stores.\n\nLabel keys must be between 1 and 63 characters long, have a UTF-8 encoding\nof maximum 128 bytes, and must conform to the\nfollowing PCRE regular expression:\n\\p{Ll}\\p{Lo}{0,62}\n\nLabel values are optional, must be between 1 and 63 characters long, have\na UTF-8 encoding of maximum 128 bytes, and must conform to the\nfollowing PCRE regular expression: [\\p{Ll}\\p{Lo}\\p{N}_-]{0,63}\n\nNo more than 64 labels can be associated with a given store.", + "description": "User-supplied key-value pairs used to organize FHIR stores. Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: \\p{Ll}\\p{Lo}{0,62} Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\\p{Ll}\\p{Lo}\\p{N}_-]{0,63} No more than 64 labels can be associated with a given store.", "type": "object" }, "name": { - "description": "Output only. Resource name of the FHIR store, of the form\n`projects/{project_id}/datasets/{dataset_id}/fhirStores/{fhir_store_id}`.", + "description": "Output only. Resource name of the FHIR store, of the form `projects/{project_id}/datasets/{dataset_id}/fhirStores/{fhir_store_id}`.", "type": "string" }, "notificationConfig": { "$ref": "NotificationConfig", - "description": "If non-empty, publish all resource modifications of this FHIR store to\nthis destination. The Cloud Pub/Sub message attributes contain a map\nwith a string describing the action that has triggered the notification.\nFor example, \"action\":\"CreateResource\"." + "description": "If non-empty, publish all resource modifications of this FHIR store to this destination. The Cloud Pub/Sub message attributes contain a map with a string describing the action that has triggered the notification. For example, \"action\":\"CreateResource\"." }, "streamConfigs": { - "description": "A list of streaming configs that configure the destinations of streaming\nexport for every resource mutation in this FHIR store. Each store is\nallowed to have up to 10 streaming configs.\nAfter a new config is added, the next resource mutation is streamed to\nthe new location in addition to the existing ones.\nWhen a location is removed from the list, the server stops\nstreaming to that location. Before adding a new config, you must add the\nrequired\n[`bigquery.dataEditor`](https://cloud.google.com/bigquery/docs/access-control#bigquery.dataEditor)\nrole to your project's **Cloud Healthcare Service Agent**\n[service account](https://cloud.google.com/iam/docs/service-accounts).\nSome lag (typically on the order of dozens of seconds) is expected before\nthe results show up in the streaming destination.", + "description": "A list of streaming configs that configure the destinations of streaming export for every resource mutation in this FHIR store. Each store is allowed to have up to 10 streaming configs. After a new config is added, the next resource mutation is streamed to the new location in addition to the existing ones. When a location is removed from the list, the server stops streaming to that location. Before adding a new config, you must add the required [`bigquery.dataEditor`](https://cloud.google.com/bigquery/docs/access-control#bigquery.dataEditor) role to your project's **Cloud Healthcare Service Agent** [service account](https://cloud.google.com/iam/docs/service-accounts). Some lag (typically on the order of dozens of seconds) is expected before the results show up in the streaming destination.", "items": { "$ref": "StreamConfig" }, "type": "array" }, "version": { - "description": "The FHIR specification version that this FHIR store supports natively. This\nfield is immutable after store creation. Requests are rejected if they\ncontain FHIR resources of a different version.", + "description": "The FHIR specification version that this FHIR store supports natively. This field is immutable after store creation. Requests are rejected if they contain FHIR resources of a different version. Version is required for every FHIR store.", "enum": [ "VERSION_UNSPECIFIED", "DSTU2", @@ -3112,7 +3204,7 @@ "R4" ], "enumDescriptions": [ - "Users must specify a version on store creation or an error will be\nreturned.", + "Users must specify a version on store creation or an error is returned.", "Draft Standard for Trial Use, [Release 2](https://www.hl7.org/fhir/DSTU2)", "Standard for Trial Use, [Release 3](https://www.hl7.org/fhir/STU3)", "[Release 4](https://www.hl7.org/fhir/R4)" @@ -3123,7 +3215,7 @@ "type": "object" }, "FieldMetadata": { - "description": "Specifies FHIR paths to match, and how to handle de-identification of\nmatching fields.", + "description": "Specifies FHIR paths to match, and how to handle de-identification of matching fields.", "id": "FieldMetadata", "properties": { "action": { @@ -3143,7 +3235,7 @@ "type": "string" }, "paths": { - "description": "List of paths to FHIR fields to be redacted. Each path is a\nperiod-separated list where each component is either a field name or\nFHIR type name, for example: Patient, HumanName.\nFor \"choice\" types (those defined in the FHIR spec with the form:\nfield[x]) we use two separate components. For example,\n\"deceasedAge.unit\" is matched by \"Deceased.Age.unit\".\nSupported types are: AdministrativeGenderCode, Code, Date, DateTime,\nDecimal, HumanName, Id, LanguageCode, Markdown, Oid, String, Uri, Uuid,\nXhtml.", + "description": "List of paths to FHIR fields to be redacted. Each path is a period-separated list where each component is either a field name or FHIR type name, for example: Patient, HumanName. For \"choice\" types (those defined in the FHIR spec with the form: field[x]) we use two separate components. For example, \"deceasedAge.unit\" is matched by \"Deceased.Age.unit\". Supported types are: AdministrativeGenderCode, Code, Date, DateTime, Decimal, HumanName, Id, LanguageCode, Markdown, Oid, String, Uri, Uuid, Xhtml. Base64Binary is also supported, but may only be kept as-is or have all the content removed.", "items": { "type": "string" }, @@ -3169,26 +3261,26 @@ "id": "GoogleCloudHealthcareV1DicomBigQueryDestination", "properties": { "force": { - "description": "If the destination table already exists and this flag is `TRUE`, the table\nis overwritten by the contents of the DICOM store. If the flag is not\nset and the destination table already exists, the export call returns an\nerror.", + "description": "If the destination table already exists and this flag is `TRUE`, the table is overwritten by the contents of the DICOM store. If the flag is not set and the destination table already exists, the export call returns an error.", "type": "boolean" }, "tableUri": { - "description": "BigQuery URI to a table, up to 2000 characters long, in the format\n`bq://projectId.bqDatasetId.tableId`", + "description": "BigQuery URI to a table, up to 2000 characters long, in the format `bq://projectId.bqDatasetId.tableId`", "type": "string" } }, "type": "object" }, "GoogleCloudHealthcareV1DicomGcsDestination": { - "description": "The Cloud Storage location where the server writes the output and the export\nconfiguration.", + "description": "The Cloud Storage location where the server writes the output and the export configuration.", "id": "GoogleCloudHealthcareV1DicomGcsDestination", "properties": { "mimeType": { - "description": "MIME types supported by DICOM spec.\nEach file is written in the following format:\n`.../{study_id}/{series_id}/{instance_id}[/{frame_number}].{extension}`\nThe frame_number component exists only for multi-frame instances.\n\nSupported MIME types are consistent with supported formats in DICOMweb:\nhttps://cloud.google.com/healthcare/docs/dicom#retrieve_transaction.\nSpecifically, the following are supported:\n\n - application/dicom; transfer-syntax=1.2.840.10008.1.2.1\n (uncompressed DICOM)\n - application/dicom; transfer-syntax=1.2.840.10008.1.2.4.50\n (DICOM with embedded JPEG Baseline)\n - application/dicom; transfer-syntax=1.2.840.10008.1.2.4.90\n (DICOM with embedded JPEG 2000 Lossless Only)\n - application/dicom; transfer-syntax=1.2.840.10008.1.2.4.91\n (DICOM with embedded JPEG 2000)\n - application/dicom; transfer-syntax=*\n (DICOM with no transcoding)\n - application/octet-stream; transfer-syntax=1.2.840.10008.1.2.1\n (raw uncompressed PixelData)\n - application/octet-stream; transfer-syntax=*\n (raw PixelData in whatever format it was uploaded in)\n - image/jpeg; transfer-syntax=1.2.840.10008.1.2.4.50\n (Consumer JPEG)\n - image/png\n\nThe following extensions are used for output files:\n\n - application/dicom -\u003e .dcm\n - image/jpeg -\u003e .jpg\n - image/png -\u003e .png\n - application/octet-stream -\u003e no extension\n\nIf unspecified, the instances are exported in the original\nDICOM format they were uploaded in.", + "description": "MIME types supported by DICOM spec. Each file is written in the following format: `.../{study_id}/{series_id}/{instance_id}[/{frame_number}].{extension}` The frame_number component exists only for multi-frame instances. Supported MIME types are consistent with supported formats in DICOMweb: https://cloud.google.com/healthcare/docs/dicom#retrieve_transaction. Specifically, the following are supported: - application/dicom; transfer-syntax=1.2.840.10008.1.2.1 (uncompressed DICOM) - application/dicom; transfer-syntax=1.2.840.10008.1.2.4.50 (DICOM with embedded JPEG Baseline) - application/dicom; transfer-syntax=1.2.840.10008.1.2.4.90 (DICOM with embedded JPEG 2000 Lossless Only) - application/dicom; transfer-syntax=1.2.840.10008.1.2.4.91 (DICOM with embedded JPEG 2000) - application/dicom; transfer-syntax=* (DICOM with no transcoding) - application/octet-stream; transfer-syntax=1.2.840.10008.1.2.1 (raw uncompressed PixelData) - application/octet-stream; transfer-syntax=* (raw PixelData in whatever format it was uploaded in) - image/jpeg; transfer-syntax=1.2.840.10008.1.2.4.50 (Consumer JPEG) - image/png The following extensions are used for output files: - application/dicom -\u003e .dcm - image/jpeg -\u003e .jpg - image/png -\u003e .png - application/octet-stream -\u003e no extension If unspecified, the instances are exported in the original DICOM format they were uploaded in.", "type": "string" }, "uriPrefix": { - "description": "The Cloud Storage destination to export to.\n\nURI for a Cloud Storage directory where the server writes the result files,\nin the format `gs://{bucket-id}/{path/to/destination/dir}`). If there is no\ntrailing slash, the service appends one when composing the object path.\nThe user is responsible for creating the Cloud Storage bucket referenced in\n`uri_prefix`.", + "description": "The Cloud Storage destination to export to. URI for a Cloud Storage directory where the server writes the result files, in the format `gs://{bucket-id}/{path/to/destination/dir}`). If there is no trailing slash, the service appends one when composing the object path. The user is responsible for creating the Cloud Storage bucket referenced in `uri_prefix`.", "type": "string" } }, @@ -3199,7 +3291,7 @@ "id": "GoogleCloudHealthcareV1DicomGcsSource", "properties": { "uri": { - "description": "Points to a Cloud Storage URI containing file(s) with\ncontent only. The URI must be in the following format:\n`gs://{bucket_id}/{object_id}`. The URI can include wildcards in\n`object_id` and thus identify multiple files. Supported wildcards:\n '*' to match 0 or more non-separator characters\n '**' to match 0 or more characters (including separators). Must be used at\n the end of a path and with no other wildcards in the\n path. Can also be used with a file extension (such as .dcm), which\n imports all files with the extension in the specified directory and\n its sub-directories. For example,\n `gs://my-bucket/my-directory/**.dcm` imports all files with .dcm\n extensions in `my-directory/` and its sub-directories.\n '?' to match 1 character\nAll other URI formats are invalid.\nFiles matching the wildcard are expected to contain content only, no\nmetadata.", + "description": "Points to a Cloud Storage URI containing file(s) with content only. The URI must be in the following format: `gs://{bucket_id}/{object_id}`. The URI can include wildcards in `object_id` and thus identify multiple files. Supported wildcards: '*' to match 0 or more non-separator characters '**' to match 0 or more characters (including separators). Must be used at the end of a path and with no other wildcards in the path. Can also be used with a file extension (such as .dcm), which imports all files with the extension in the specified directory and its sub-directories. For example, `gs://my-bucket/my-directory/**.dcm` imports all files with .dcm extensions in `my-directory/` and its sub-directories. '?' to match 1 character All other URI formats are invalid. Files matching the wildcard are expected to contain content only, no metadata.", "type": "string" } }, @@ -3210,11 +3302,11 @@ "id": "GoogleCloudHealthcareV1FhirBigQueryDestination", "properties": { "datasetUri": { - "description": "BigQuery URI to a dataset, up to 2000 characters long, in the format\n`bq://projectId.bqDatasetId`", + "description": "BigQuery URI to an existing dataset, up to 2000 characters long, in the format `bq://projectId.bqDatasetId`.", "type": "string" }, "force": { - "description": "If this flag is `TRUE`, all tables will be deleted from the dataset before\nthe new exported tables are written. If the flag is not set and the\ndestination dataset contains tables, the export call returns an error.", + "description": "If this flag is `TRUE`, all tables are deleted from the dataset before the new exported tables are written. If the flag is not set and the destination dataset contains tables, the export call returns an error.", "type": "boolean" }, "schemaConfig": { @@ -3229,7 +3321,7 @@ "id": "GoogleCloudHealthcareV1FhirGcsDestination", "properties": { "uriPrefix": { - "description": "URI for a Cloud Storage directory where result files should be written (in\nthe format `gs://{bucket-id}/{path/to/destination/dir}`). If there is no\ntrailing slash, the service will append one when composing the object path.\nThe user is responsible for creating the Cloud Storage bucket referenced in\n`uri_prefix`.", + "description": "URI for a Cloud Storage directory where result files should be written, in the format of `gs://{bucket-id}/{path/to/destination/dir}`. If there is no trailing slash, the service appends one when composing the object path. The user is responsible for creating the Cloud Storage bucket referenced in `uri_prefix`.", "type": "string" } }, @@ -3240,22 +3332,22 @@ "id": "GoogleCloudHealthcareV1FhirGcsSource", "properties": { "uri": { - "description": "Points to a Cloud Storage URI containing file(s) to import.\n\nThe URI must be in the following format: `gs://{bucket_id}/{object_id}`.\nThe URI can include wildcards in `object_id` and thus identify multiple\nfiles. Supported wildcards:\n\n* `*` to match 0 or more non-separator characters\n* `**` to match 0 or more characters (including separators). Must be used\nat the end of a path and with no other wildcards in the\npath. Can also be used with a file extension (such as .ndjson), which\nimports all files with the extension in the specified directory and\nits sub-directories. For example, `gs://my-bucket/my-directory/**.ndjson`\nimports all files with `.ndjson` extensions in `my-directory/` and its\nsub-directories.\n* `?` to match 1 character\n\nFiles matching the wildcard are expected to contain content only, no\nmetadata.", + "description": "Points to a Cloud Storage URI containing file(s) to import. The URI must be in the following format: `gs://{bucket_id}/{object_id}`. The URI can include wildcards in `object_id` and thus identify multiple files. Supported wildcards: * `*` to match 0 or more non-separator characters * `**` to match 0 or more characters (including separators). Must be used at the end of a path and with no other wildcards in the path. Can also be used with a file extension (such as .ndjson), which imports all files with the extension in the specified directory and its sub-directories. For example, `gs://my-bucket/my-directory/**.ndjson` imports all files with `.ndjson` extensions in `my-directory/` and its sub-directories. * `?` to match 1 character Files matching the wildcard are expected to contain content only, no metadata.", "type": "string" } }, "type": "object" }, "Hl7V2NotificationConfig": { - "description": "Specifies where and whether to send notifications upon changes to a\ndata store.", + "description": "Specifies where and whether to send notifications upon changes to a data store.", "id": "Hl7V2NotificationConfig", "properties": { "filter": { - "description": "Restricts notifications sent for messages matching a filter. If this is\nempty, all messages are matched. Syntax:\nhttps://cloud.google.com/appengine/docs/standard/python/search/query_strings\n\nFields/functions available for filtering are:\n\n* `message_type`, from the MSH-9.1 field. For example,\n`NOT message_type = \"ADT\"`.\n* `send_date` or `sendDate`, the YYYY-MM-DD date the message was sent in\nthe dataset's time_zone, from the MSH-7 segment. For example,\n`send_date \u003c \"2017-01-02\"`.\n* `send_time`, the timestamp when the message was sent, using the\nRFC3339 time format for comparisons, from the MSH-7 segment. For example,\n`send_time \u003c \"2017-01-02T00:00:00-05:00\"`.\n* `send_facility`, the care center that the message came from, from the\nMSH-4 segment. For example, `send_facility = \"ABC\"`.\n* `PatientId(value, type)`, which matches if the message lists a patient\nhaving an ID of the given value and type in the PID-2, PID-3, or PID-4\nsegments. For example, `PatientId(\"123456\", \"MRN\")`.\n* `labels.x`, a string value of the label with key `x` as set using the\nMessage.labels\nmap. For example, `labels.\"priority\"=\"high\"`. The operator `:*` can be\nused to assert the existence of a label. For example,\n`labels.\"priority\":*`.", + "description": "Restricts notifications sent for messages matching a filter. If this is empty, all messages are matched. Syntax: https://cloud.google.com/appengine/docs/standard/python/search/query_strings The following fields and functions are available for filtering: * `message_type`, from the MSH-9.1 field. For example, `NOT message_type = \"ADT\"`. * `send_date` or `sendDate`, the YYYY-MM-DD date the message was sent in the dataset's time_zone, from the MSH-7 segment. For example, `send_date \u003c \"2017-01-02\"`. * `send_time`, the timestamp when the message was sent, using the RFC3339 time format for comparisons, from the MSH-7 segment. For example, `send_time \u003c \"2017-01-02T00:00:00-05:00\"`. * `send_facility`, the care center that the message came from, from the MSH-4 segment. For example, `send_facility = \"ABC\"`. * `PatientId(value, type)`, which matches if the message lists a patient having an ID of the given value and type in the PID-2, PID-3, or PID-4 segments. For example, `PatientId(\"123456\", \"MRN\")`. * `labels.x`, a string value of the label with key `x` as set using the Message.labels map. For example, `labels.\"priority\"=\"high\"`. The operator `:*` can be used to assert the existence of a label. For example, `labels.\"priority\":*`.", "type": "string" }, "pubsubTopic": { - "description": "The [Cloud Pub/Sub](https://cloud.google.com/pubsub/docs/) topic that\nnotifications of changes are published on. Supplied by the client. The\nnotification is a `PubsubMessage` with the following fields:\n\n* `PubsubMessage.Data` contains the resource name.\n* `PubsubMessage.MessageId` is the ID of this notification. It is\nguaranteed to be unique within the topic.\n* `PubsubMessage.PublishTime` is the time at which the message was\npublished.\n\nNote that notifications are only sent if the topic is non-empty. [Topic\nnames](https://cloud.google.com/pubsub/docs/overview#names) must be\nscoped to a project. Cloud Healthcare API service account must have\npublisher permissions on the given Pub/Sub topic. Not having adequate\npermissions causes the calls that send notifications to fail.\n\nIf a notification cannot be published to Cloud Pub/Sub, errors will be\nlogged to Cloud Logging (see [Viewing logs](/healthcare/docs/how-\ntos/logging)).", + "description": "The [Cloud Pub/Sub](https://cloud.google.com/pubsub/docs/) topic that notifications of changes are published on. Supplied by the client. The notification is a `PubsubMessage` with the following fields: * `PubsubMessage.Data` contains the resource name. * `PubsubMessage.MessageId` is the ID of this notification. It's guaranteed to be unique within the topic. * `PubsubMessage.PublishTime` is the time when the message was published. Note that notifications are only sent if the topic is non-empty. [Topic names](https://cloud.google.com/pubsub/docs/overview#names) must be scoped to a project. The Cloud Healthcare API service account, service-PROJECT_NUMBER@gcp-sa-healthcare.iam.gserviceaccount.com, must have publisher permissions on the given Pub/Sub topic. Not having adequate permissions causes the calls that send notifications to fail. If a notification cannot be published to Cloud Pub/Sub, errors are logged to Cloud Logging. For more information, see [Viewing error logs in Cloud Logging](/healthcare/docs/how-tos/logging)).", "type": "string" } }, @@ -3269,15 +3361,15 @@ "additionalProperties": { "type": "string" }, - "description": "User-supplied key-value pairs used to organize HL7v2 stores.\n\nLabel keys must be between 1 and 63 characters long, have a UTF-8 encoding\nof maximum 128 bytes, and must conform to the\nfollowing PCRE regular expression:\n\\p{Ll}\\p{Lo}{0,62}\n\nLabel values are optional, must be between 1 and 63 characters long, have\na UTF-8 encoding of maximum 128 bytes, and must conform to the\nfollowing PCRE regular expression: [\\p{Ll}\\p{Lo}\\p{N}_-]{0,63}\n\nNo more than 64 labels can be associated with a given store.", + "description": "User-supplied key-value pairs used to organize HL7v2 stores. Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: \\p{Ll}\\p{Lo}{0,62} Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\\p{Ll}\\p{Lo}\\p{N}_-]{0,63} No more than 64 labels can be associated with a given store.", "type": "object" }, "name": { - "description": "Output only. Resource name of the HL7v2 store, of the form\n`projects/{project_id}/datasets/{dataset_id}/hl7V2Stores/{hl7v2_store_id}`.", + "description": "Resource name of the HL7v2 store, of the form `projects/{project_id}/datasets/{dataset_id}/hl7V2Stores/{hl7v2_store_id}`.", "type": "string" }, "notificationConfigs": { - "description": "A list of notification configs. Each configuration uses a filter to\ndetermine whether to publish a message (both Ingest \u0026 Create) on\nthe corresponding notification destination. Only the message name is sent\nas part of the notification. Supplied by the client.", + "description": "A list of notification configs. Each configuration uses a filter to determine whether to publish a message (both Ingest \u0026 Create) on the corresponding notification destination. Only the message name is sent as part of the notification. Supplied by the client.", "items": { "$ref": "Hl7V2NotificationConfig" }, @@ -3285,17 +3377,17 @@ }, "parserConfig": { "$ref": "ParserConfig", - "description": "The configuration for the parser. It determines how the server parses the\nmessages." + "description": "The configuration for the parser. It determines how the server parses the messages." }, "rejectDuplicateMessage": { - "description": "Determines whether duplicate messages should be rejected. A duplicate\nmessage is a message with the same raw bytes as a message that has already\nbeen ingested/created in this HL7v2 store.\nThe default value is false, meaning that the store accepts the duplicate\nmessages and it also returns the same ACK message in the\nIngestMessageResponse as has been returned previously. Note that only\none resource is created in the store.\nWhen this field is set to true,\nCreateMessage/IngestMessage\nrequests with a duplicate message will be rejected by the store, and\nIngestMessageErrorDetail returns a NACK message upon rejection.", + "description": "Determines whether to reject duplicate messages. A duplicate message is a message with the same raw bytes as a message that has already been ingested/created in this HL7v2 store. The default value is false, meaning that the store accepts the duplicate messages and it also returns the same ACK message in the IngestMessageResponse as has been returned previously. Note that only one resource is created in the store. When this field is set to true, CreateMessage/IngestMessage requests with a duplicate message will be rejected by the store, and IngestMessageErrorDetail returns a NACK message upon rejection.", "type": "boolean" } }, "type": "object" }, "HttpBody": { - "description": "Message that represents an arbitrary HTTP body. It should only be used for\npayload formats that can't be represented as JSON, such as raw binary or\nan HTML page.\n\n\nThis message can be used both in streaming and non-streaming API methods in\nthe request as well as the response.\n\nIt can be used as a top-level request field, which is convenient if one\nwants to extract parameters from either the URL or HTTP template into the\nrequest fields and also want access to the raw HTTP body.\n\nExample:\n\n message GetResourceRequest {\n // A unique request id.\n string request_id = 1;\n\n // The raw HTTP body is bound to this field.\n google.api.HttpBody http_body = 2;\n }\n\n service ResourceService {\n rpc GetResource(GetResourceRequest) returns (google.api.HttpBody);\n rpc UpdateResource(google.api.HttpBody) returns\n (google.protobuf.Empty);\n }\n\nExample with streaming methods:\n\n service CaldavService {\n rpc GetCalendar(stream google.api.HttpBody)\n returns (stream google.api.HttpBody);\n rpc UpdateCalendar(stream google.api.HttpBody)\n returns (stream google.api.HttpBody);\n }\n\nUse of this type only changes how the request and response bodies are\nhandled, all other features will continue to work unchanged.", + "description": "Message that represents an arbitrary HTTP body. It should only be used for payload formats that can't be represented as JSON, such as raw binary or an HTML page. This message can be used both in streaming and non-streaming API methods in the request as well as the response. It can be used as a top-level request field, which is convenient if one wants to extract parameters from either the URL or HTTP template into the request fields and also want access to the raw HTTP body. Example: message GetResourceRequest { // A unique request id. string request_id = 1; // The raw HTTP body is bound to this field. google.api.HttpBody http_body = 2; } service ResourceService { rpc GetResource(GetResourceRequest) returns (google.api.HttpBody); rpc UpdateResource(google.api.HttpBody) returns (google.protobuf.Empty); } Example with streaming methods: service CaldavService { rpc GetCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); rpc UpdateCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); } Use of this type only changes how the request and response bodies are handled, all other features will continue to work unchanged.", "id": "HttpBody", "properties": { "contentType": { @@ -3308,7 +3400,7 @@ "type": "string" }, "extensions": { - "description": "Application specific response metadata. Must be set in the first response\nfor streaming APIs.", + "description": "Application specific response metadata. Must be set in the first response for streaming APIs.", "items": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -3345,12 +3437,12 @@ "type": "object" }, "ImportDicomDataRequest": { - "description": "Imports data into the specified DICOM store.\nReturns an error if any of the files to import are not DICOM files. This\nAPI accepts duplicate DICOM instances by ignoring the newly-pushed instance.\nIt does not overwrite.", + "description": "Imports data into the specified DICOM store. Returns an error if any of the files to import are not DICOM files. This API accepts duplicate DICOM instances by ignoring the newly-pushed instance. It does not overwrite.", "id": "ImportDicomDataRequest", "properties": { "gcsSource": { "$ref": "GoogleCloudHealthcareV1DicomGcsSource", - "description": "Cloud Storage source data location and import configuration.\n\nThe Cloud Storage location requires the `roles/storage.objectViewer`\nCloud IAM role." + "description": "Cloud Storage source data location and import configuration. The Cloud Storage location requires the `roles/storage.objectViewer` Cloud IAM role." } }, "type": "object" @@ -3366,7 +3458,7 @@ "id": "ImportResourcesRequest", "properties": { "contentStructure": { - "description": "The content structure in the source location. If not specified, the server\ntreats the input source files as BUNDLE.", + "description": "The content structure in the source location. If not specified, the server treats the input source files as BUNDLE.", "enum": [ "CONTENT_STRUCTURE_UNSPECIFIED", "BUNDLE", @@ -3375,9 +3467,9 @@ "RESOURCE_PRETTY" ], "enumDescriptions": [ - "If the content structure is not specified, the default value `BUNDLE`\nwill be used.", - "The source file contains one or more lines of newline-delimited JSON\n(ndjson). Each line is a bundle, which contains one or more resources.\nSet the bundle type to `history` to import resource versions.", - "The source file contains one or more lines of newline-delimited JSON\n(ndjson). Each line is a single resource.", + "If the content structure is not specified, the default value `BUNDLE` is used.", + "The source file contains one or more lines of newline-delimited JSON (ndjson). Each line is a bundle that contains one or more resources. Set the bundle type to `history` to import resource versions.", + "The source file contains one or more lines of newline-delimited JSON (ndjson). Each line is a single resource.", "The entire file is one JSON bundle. The JSON can span multiple lines.", "The entire file is one JSON resource. The JSON can span multiple lines." ], @@ -3385,19 +3477,19 @@ }, "gcsSource": { "$ref": "GoogleCloudHealthcareV1FhirGcsSource", - "description": "Cloud Storage source data location and import configuration.\n\nThe Cloud Storage location requires the `roles/storage.objectViewer`\nCloud IAM role.\n\nEach Cloud Storage object should be a text file that contains the format\nspecified in ContentStructure." + "description": "Cloud Storage source data location and import configuration. The Healthcare Service Agent account requires the `roles/storage.objectAdmin` role on the Cloud Storage location. Each Cloud Storage object should be a text file that contains the format specified in ContentStructure." } }, "type": "object" }, "ImportResourcesResponse": { - "description": "Final response of importing resources.\nThis structure will be included in the\nresponse to describe the detailed\noutcome. It will only be included when the operation finishes successfully.", + "description": "Final response of importing resources. This structure is included in the response to describe the detailed outcome after the operation finishes successfully.", "id": "ImportResourcesResponse", "properties": {}, "type": "object" }, "InfoTypeTransformation": { - "description": "A transformation to apply to text that is identified as a specific\ninfo_type.", + "description": "A transformation to apply to text that is identified as a specific info_type.", "id": "InfoTypeTransformation", "properties": { "characterMaskConfig": { @@ -3413,7 +3505,7 @@ "description": "Config for date shift." }, "infoTypes": { - "description": "InfoTypes to apply this transformation to. If this is not specified, the\ntransformation applies to any info_type.", + "description": "InfoTypes to apply this transformation to. If this is not specified, the transformation applies to any info_type.", "items": { "type": "string" }, @@ -3442,7 +3534,7 @@ "type": "object" }, "IngestMessageResponse": { - "description": "Acknowledges that a message has been ingested into the specified\nHL7v2 store.", + "description": "Acknowledges that a message has been ingested into the specified HL7v2 store.", "id": "IngestMessageResponse", "properties": { "hl7Ack": { @@ -3469,7 +3561,7 @@ "type": "array" }, "nextPageToken": { - "description": "Token to retrieve the next page of results, or empty if there are no\nmore results in the list.", + "description": "Token to retrieve the next page of results, or empty if there are no more results in the list.", "type": "string" } }, @@ -3480,14 +3572,14 @@ "id": "ListDicomStoresResponse", "properties": { "dicomStores": { - "description": "The returned DICOM stores. Won't be more DICOM stores than the value of\npage_size in the request.", + "description": "The returned DICOM stores. Won't be more DICOM stores than the value of page_size in the request.", "items": { "$ref": "DicomStore" }, "type": "array" }, "nextPageToken": { - "description": "Token to retrieve the next page of results or empty if there are no more\nresults in the list.", + "description": "Token to retrieve the next page of results or empty if there are no more results in the list.", "type": "string" } }, @@ -3498,14 +3590,14 @@ "id": "ListFhirStoresResponse", "properties": { "fhirStores": { - "description": "The returned FHIR stores. Won't be more FHIR stores than the value of\npage_size in the request.", + "description": "The returned FHIR stores. Won't be more FHIR stores than the value of page_size in the request.", "items": { "$ref": "FhirStore" }, "type": "array" }, "nextPageToken": { - "description": "Token to retrieve the next page of results or empty if there are no more\nresults in the list.", + "description": "Token to retrieve the next page of results or empty if there are no more results in the list.", "type": "string" } }, @@ -3516,14 +3608,32 @@ "id": "ListHl7V2StoresResponse", "properties": { "hl7V2Stores": { - "description": "The returned HL7v2 stores. Won't be more HL7v2 stores than the value of\npage_size in the request.", + "description": "The returned HL7v2 stores. Won't be more HL7v2 stores than the value of page_size in the request.", "items": { "$ref": "Hl7V2Store" }, "type": "array" }, "nextPageToken": { - "description": "Token to retrieve the next page of results or empty if there are no more\nresults in the list.", + "description": "Token to retrieve the next page of results or empty if there are no more results in the list.", + "type": "string" + } + }, + "type": "object" + }, + "ListLocationsResponse": { + "description": "The response message for Locations.ListLocations.", + "id": "ListLocationsResponse", + "properties": { + "locations": { + "description": "A list of locations that matches the specified filter in the request.", + "items": { + "$ref": "Location" + }, + "type": "array" + }, + "nextPageToken": { + "description": "The standard List next-page token.", "type": "string" } }, @@ -3534,14 +3644,14 @@ "id": "ListMessagesResponse", "properties": { "hl7V2Messages": { - "description": "The returned Messages. Won't be more Messages than the value of\npage_size in the request. See\nview for\npopulated fields.", + "description": "The returned Messages. Won't be more Messages than the value of page_size in the request. See view for populated fields.", "items": { "$ref": "Message" }, "type": "array" }, "nextPageToken": { - "description": "Token to retrieve the next page of results or empty if there are no more\nresults in the list.", + "description": "Token to retrieve the next page of results or empty if there are no more results in the list.", "type": "string" } }, @@ -3565,13 +3675,48 @@ }, "type": "object" }, + "Location": { + "description": "A resource that represents Google Cloud Platform location.", + "id": "Location", + "properties": { + "displayName": { + "description": "The friendly name for this location, typically a nearby city name. For example, \"Tokyo\".", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Cross-service attributes for the location. For example {\"cloud.googleapis.com/region\": \"us-east1\"}", + "type": "object" + }, + "locationId": { + "description": "The canonical id for this location. For example: `\"us-east1\"`.", + "type": "string" + }, + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata. For example the available capacity at the given location.", + "type": "object" + }, + "name": { + "description": "Resource name for the location, which may vary between implementations. For example: `\"projects/example-project/locations/us-east1\"`", + "type": "string" + } + }, + "type": "object" + }, "Message": { - "description": "A complete HL7v2 message.\nSee http://www.hl7.org/implement/standards/index.cfm?ref=common for details\non the standard.", + "description": "A complete HL7v2 message. See [Introduction to HL7 Standards] (https://www.hl7.org/implement/standards/index.cfm?ref=common) for details on the standard.", "id": "Message", "properties": { "createTime": { "description": "Output only. The datetime when the message was created. Set by the server.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "data": { @@ -3583,7 +3728,7 @@ "additionalProperties": { "type": "string" }, - "description": "User-supplied key-value pairs used to organize HL7v2 stores.\n\nLabel keys must be between 1 and 63 characters long, have a UTF-8 encoding\nof maximum 128 bytes, and must conform to the\nfollowing PCRE regular expression:\n\\p{Ll}\\p{Lo}{0,62}\n\nLabel values are optional, must be between 1 and 63 characters long, have\na UTF-8 encoding of maximum 128 bytes, and must conform to the\nfollowing PCRE regular expression: [\\p{Ll}\\p{Lo}\\p{N}_-]{0,63}\n\nNo more than 64 labels can be associated with a given store.", + "description": "User-supplied key-value pairs used to organize HL7v2 stores. Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: \\p{Ll}\\p{Lo}{0,62} Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\\p{Ll}\\p{Lo}\\p{N}_-]{0,63} No more than 64 labels can be associated with a given store.", "type": "object" }, "messageType": { @@ -3591,15 +3736,16 @@ "type": "string" }, "name": { - "description": "Resource name of the Message, of the form\n`projects/{project_id}/datasets/{dataset_id}/hl7V2Stores/{hl7_v2_store_id}/messages/{message_id}`.\nAssigned by the server.", + "description": "Resource name of the Message, of the form `projects/{project_id}/datasets/{dataset_id}/hl7V2Stores/{hl7_v2_store_id}/messages/{message_id}`. Assigned by the server.", "type": "string" }, "parsedData": { "$ref": "ParsedData", - "description": "Output only. The parsed version of the raw message data." + "description": "Output only. The parsed version of the raw message data.", + "readOnly": true }, "patientIds": { - "description": "All patient IDs listed in the PID-2, PID-3, and PID-4 segments of this\nmessage.", + "description": "All patient IDs listed in the PID-2, PID-3, and PID-4 segments of this message.", "items": { "$ref": "PatientId" }, @@ -3622,18 +3768,18 @@ "id": "NotificationConfig", "properties": { "pubsubTopic": { - "description": "The [Cloud Pub/Sub](https://cloud.google.com/pubsub/docs/) topic that\nnotifications of changes are published on. Supplied by the client.\nPubsubMessage.Data contains the resource name.\nPubsubMessage.MessageId is the ID of this message. It is guaranteed to be\nunique within the topic.\nPubsubMessage.PublishTime is the time at which the message was published.\nNotifications are only sent if the topic is\nnon-empty. [Topic\nnames](https://cloud.google.com/pubsub/docs/overview#names) must be scoped\nto a project. Cloud Healthcare API service account must have publisher\npermissions on the given Cloud Pub/Sub topic. Not having adequate\npermissions causes the calls that send notifications to fail.\n\nIf a notification can't be published to Cloud Pub/Sub, errors are logged to\nCloud Logging (see [Viewing\nlogs](/healthcare/docs/how-tos/logging)). If the number of\nerrors exceeds a certain rate, some aren't submitted.", + "description": "The [Cloud Pub/Sub](https://cloud.google.com/pubsub/docs/) topic that notifications of changes are published on. Supplied by the client. PubsubMessage.Data contains the resource name. PubsubMessage.MessageId is the ID of this message. It is guaranteed to be unique within the topic. PubsubMessage.PublishTime is the time at which the message was published. Notifications are only sent if the topic is non-empty. [Topic names](https://cloud.google.com/pubsub/docs/overview#names) must be scoped to a project. Cloud Healthcare API service account must have publisher permissions on the given Cloud Pub/Sub topic. Not having adequate permissions causes the calls that send notifications to fail. If a notification can't be published to Cloud Pub/Sub, errors are logged to Cloud Logging (see [Viewing logs](/healthcare/docs/how-tos/logging)). If the number of errors exceeds a certain rate, some aren't submitted. Note that not all operations trigger notifications, see [Configuring Pub/Sub notifications](https://cloud.google.com/healthcare/docs/how-tos/pubsub) for specific details.", "type": "string" } }, "type": "object" }, "Operation": { - "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "description": "This resource represents a long-running operation that is the result of a network API call.", "id": "Operation", "properties": { "done": { - "description": "If the value is `false`, it means the operation is still in progress.\nIf `true`, the operation is completed, and either `error` or `response` is\navailable.", + "description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.", "type": "boolean" }, "error": { @@ -3645,11 +3791,11 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", "type": "object" }, "name": { - "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should be a resource name ending with `operations/{unique_id}`.", + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.", "type": "string" }, "response": { @@ -3657,14 +3803,14 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", + "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", "type": "object" } }, "type": "object" }, "OperationMetadata": { - "description": "OperationMetadata provides information about the operation execution.\nReturned in the long-running operation's metadata field.", + "description": "OperationMetadata provides information about the operation execution. Returned in the long-running operation's metadata field.", "id": "OperationMetadata", "properties": { "apiMethodName": { @@ -3689,7 +3835,7 @@ "type": "string" }, "logsUrl": { - "description": "A link to audit and error logs in the log viewer. Error logs are generated\nonly by some operations, listed at\n[Viewing logs](/healthcare/docs/how-tos/logging).", + "description": "A link to audit and error logs in the log viewer. Error logs are generated only by some operations, listed at [Viewing logs](/healthcare/docs/how-tos/logging).", "type": "string" } }, @@ -3709,7 +3855,7 @@ "type": "object" }, "ParserConfig": { - "description": "The configuration for the parser. It determines how the server parses the\nmessages.", + "description": "The configuration for the parser. It determines how the server parses the messages.", "id": "ParserConfig", "properties": { "allowNullHeader": { @@ -3717,7 +3863,7 @@ "type": "boolean" }, "segmentTerminator": { - "description": "Byte(s) to use as the segment terminator. If this is unset, '\\r' is\nused as segment terminator.", + "description": "Byte(s) to use as the segment terminator. If this is unset, '\\r' is used as segment terminator.", "format": "byte", "type": "string" } @@ -3740,7 +3886,7 @@ "type": "object" }, "Policy": { - "description": "An Identity and Access Management (IAM) policy, which specifies access\ncontrols for Google Cloud resources.\n\n\nA `Policy` is a collection of `bindings`. A `binding` binds one or more\n`members` to a single `role`. Members can be user accounts, service accounts,\nGoogle groups, and domains (such as G Suite). A `role` is a named list of\npermissions; each `role` can be an IAM predefined role or a user-created\ncustom role.\n\nFor some types of Google Cloud resources, a `binding` can also specify a\n`condition`, which is a logical expression that allows access to a resource\nonly if the expression evaluates to `true`. A condition can add constraints\nbased on attributes of the request, the resource, or both. To learn which\nresources support conditions in their IAM policies, see the\n[IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).\n\n**JSON example:**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/resourcemanager.organizationAdmin\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-project-id@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles/resourcemanager.organizationViewer\",\n \"members\": [\n \"user:eve@example.com\"\n ],\n \"condition\": {\n \"title\": \"expirable access\",\n \"description\": \"Does not grant access after Sep 2020\",\n \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\",\n }\n }\n ],\n \"etag\": \"BwWWja0YfJA=\",\n \"version\": 3\n }\n\n**YAML example:**\n\n bindings:\n - members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-project-id@appspot.gserviceaccount.com\n role: roles/resourcemanager.organizationAdmin\n - members:\n - user:eve@example.com\n role: roles/resourcemanager.organizationViewer\n condition:\n title: expirable access\n description: Does not grant access after Sep 2020\n expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\n - etag: BwWWja0YfJA=\n - version: 3\n\nFor a description of IAM and its features, see the\n[IAM documentation](https://cloud.google.com/iam/docs/).", + "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } **YAML example:** bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", "id": "Policy", "properties": { "auditConfigs": { @@ -3751,19 +3897,19 @@ "type": "array" }, "bindings": { - "description": "Associates a list of `members` to a `role`. Optionally, may specify a\n`condition` that determines how and when the `bindings` are applied. Each\nof the `bindings` must contain at least one member.", + "description": "Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member.", "items": { "$ref": "Binding" }, "type": "array" }, "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.", + "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.", "format": "byte", "type": "string" }, "version": { - "description": "Specifies the format of the policy.\n\nValid values are `0`, `1`, and `3`. Requests that specify an invalid value\nare rejected.\n\nAny operation that affects conditional role bindings must specify version\n`3`. This requirement applies to the following operations:\n\n* Getting a policy that includes a conditional role binding\n* Adding a conditional role binding to a policy\n* Changing a conditional role binding in a policy\n* Removing any role binding, with or without a condition, from a policy\n that includes conditions\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.\n\nIf a policy does not include any conditions, operations on that policy may\nspecify any valid version or leave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } @@ -3793,13 +3939,13 @@ "type": "object" }, "RedactConfig": { - "description": "Define how to redact sensitive values. Default behaviour is erase.\nFor example, \"My name is Jane.\" becomes \"My name is .\"", + "description": "Define how to redact sensitive values. Default behaviour is erase. For example, \"My name is Jane.\" becomes \"My name is .\"", "id": "RedactConfig", "properties": {}, "type": "object" }, "ReplaceWithInfoTypeConfig": { - "description": "When using the\nINSPECT_AND_TRANSFORM\naction, each match is replaced with the name of the info_type. For example,\n\"My name is Jane\" becomes \"My name is [PERSON_NAME].\" The\nTRANSFORM\naction is equivalent to redacting.", + "description": "When using the INSPECT_AND_TRANSFORM action, each match is replaced with the name of the info_type. For example, \"My name is Jane\" becomes \"My name is [PERSON_NAME].\" The TRANSFORM action is equivalent to redacting.", "id": "ReplaceWithInfoTypeConfig", "properties": {}, "type": "object" @@ -3819,23 +3965,23 @@ "type": "object" }, "SchemaConfig": { - "description": "Configuration for the FHIR BigQuery schema. Determines how the server\ngenerates the schema.", + "description": "Configuration for the FHIR BigQuery schema. Determines how the server generates the schema.", "id": "SchemaConfig", "properties": { "recursiveStructureDepth": { - "description": "The depth for all recursive structures in the output analytics\nschema. For example, `concept` in the CodeSystem resource is a recursive\nstructure; when the depth is 2, the CodeSystem table will have a column\ncalled `concept.concept` but not `concept.concept.concept`. If not\nspecified or set to 0, the server will use the default value 2. The\nmaximum depth allowed is 5.", + "description": "The depth for all recursive structures in the output analytics schema. For example, `concept` in the CodeSystem resource is a recursive structure; when the depth is 2, the CodeSystem table will have a column called `concept.concept` but not `concept.concept.concept`. If not specified or set to 0, the server will use the default value 2. The maximum depth allowed is 5.", "format": "int64", "type": "string" }, "schemaType": { - "description": "Specifies the output schema type.", + "description": "Specifies the output schema type. Schema type is required.", "enum": [ "SCHEMA_TYPE_UNSPECIFIED", "ANALYTICS" ], "enumDescriptions": [ - "No schema type specified.", - "Analytics schema defined by the FHIR community.\nSee https://github.com/FHIR/sql-on-fhir/blob/master/sql-on-fhir.md.\n\nBigQuery only allows a maximum of 10,000 columns per table. Due to this\nlimitation, the server will not generate schemas for fields of type\n`Resource`, which can hold any resource type. The affected fields are\n`Parameters.parameter.resource`, `Bundle.entry.resource`, and\n`Bundle.entry.response.outcome`." + "No schema type specified. This type is unsupported.", + "Analytics schema defined by the FHIR community. See https://github.com/FHIR/sql-on-fhir/blob/master/sql-on-fhir.md. BigQuery only allows a maximum of 10,000 columns per table. Due to this limitation, the server will not generate schemas for fields of type `Resource`, which can hold any resource type. The affected fields are `Parameters.parameter.resource`, `Bundle.entry.resource`, and `Bundle.entry.response.outcome`." ], "type": "string" } @@ -3847,7 +3993,7 @@ "id": "SearchResourcesRequest", "properties": { "resourceType": { - "description": "The FHIR resource type to search, such as Patient or Observation. For a\ncomplete list, see the FHIR Resource Index\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/resourcelist.html),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/resourcelist.html),\n[R4](http://hl7.org/implement/standards/fhir/R4/resourcelist.html)).", + "description": "The FHIR resource type to search, such as Patient or Observation. For a complete list, see the FHIR Resource Index ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/resourcelist.html), [STU3](http://hl7.org/implement/standards/fhir/STU3/resourcelist.html), [R4](http://hl7.org/implement/standards/fhir/R4/resourcelist.html)).", "type": "string" } }, @@ -3861,7 +4007,7 @@ "additionalProperties": { "type": "string" }, - "description": "A mapping from the positional location to the value.\nThe key string uses zero-based indexes separated by dots to identify\nFields, components and sub-components. A bracket notation is also used to\nidentify different instances of a repeated field.\nRegex for key: (\\d+)(\\[\\d+\\])?(.\\d+)?(.\\d+)?\n\nExamples of (key, value) pairs:\n\n* (0.1, \"hemoglobin\") denotes that the first component of Field 0 has the\n value \"hemoglobin\".\n\n* (1.1.2, \"CBC\") denotes that the second sub-component of the first\n component of Field 1 has the value \"CBC\".\n\n* (1[0].1, \"HbA1c\") denotes that the first component of the\n first Instance of Field 1, which is repeated, has the value \"HbA1c\".", + "description": "A mapping from the positional location to the value. The key string uses zero-based indexes separated by dots to identify Fields, components and sub-components. A bracket notation is also used to identify different instances of a repeated field. Regex for key: (\\d+)(\\[\\d+\\])?(.\\d+)?(.\\d+)? Examples of (key, value) pairs: * (0.1, \"hemoglobin\") denotes that the first component of Field 0 has the value \"hemoglobin\". * (1.1.2, \"CBC\") denotes that the second sub-component of the first component of Field 1 has the value \"CBC\". * (1[0].1, \"HbA1c\") denotes that the first component of the first Instance of Field 1, which is repeated, has the value \"HbA1c\".", "type": "object" }, "segmentId": { @@ -3869,7 +4015,7 @@ "type": "string" }, "setId": { - "description": "Set ID for segments that can be in a set. This can be empty if it's\nmissing or isn't applicable.", + "description": "Set ID for segments that can be in a set. This can be empty if it's missing or isn't applicable.", "type": "string" } }, @@ -3881,10 +4027,10 @@ "properties": { "policy": { "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them." }, "updateMask": { - "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only\nthe fields in the mask will be modified. If no mask is provided, the\nfollowing default mask is used:\n\n`paths: \"bindings, etag\"`", + "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only the fields in the mask will be modified. If no mask is provided, the following default mask is used: `paths: \"bindings, etag\"`", "format": "google-fieldmask", "type": "string" } @@ -3892,7 +4038,7 @@ "type": "object" }, "Status": { - "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors).", + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", "id": "Status", "properties": { "code": { @@ -3901,7 +4047,7 @@ "type": "integer" }, "details": { - "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use.", + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", "items": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -3912,22 +4058,22 @@ "type": "array" }, "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", "type": "string" } }, "type": "object" }, "StreamConfig": { - "description": "This structure contains configuration for streaming FHIR export.", + "description": "Contains configuration for streaming FHIR export.", "id": "StreamConfig", "properties": { "bigqueryDestination": { "$ref": "GoogleCloudHealthcareV1FhirBigQueryDestination", - "description": "The destination BigQuery structure that contains both the dataset\nlocation and corresponding schema config.\n\nThe output is organized in one table per resource type. The server\nreuses the existing tables (if any) that are named after the resource\ntypes, e.g. \"Patient\", \"Observation\". When there is no existing table\nfor a given resource type, the server attempts to create one.\n\nWhen a table schema doesn't align with the schema config, either\nbecause of existing incompatible schema or out of band incompatible\nmodification, the server does not stream in new data.\n\nBigQuery imposes a 1 MB limit on streaming insert row size, therefore\nany resource mutation that generates more than 1 MB of BigQuery data\nwill not be streamed.\n\nOne resolution in this case is to delete the incompatible\ntable and let the server recreate one, though the newly created table\nonly contains data after the table recreation.\n\nResults are appended to the corresponding BigQuery tables. Different\nversions of the same resource are distinguishable by the meta.versionId\nand meta.lastUpdated columns. The operation (CREATE/UPDATE/DELETE) that\nresults in the new version is recorded in the meta.tag.\n\nThe tables contain all historical resource versions since streaming was\nenabled. For query convenience, the server also creates one view per\ntable of the same name containing only the current resource version.\n\nThe streamed data in the BigQuery dataset is not guaranteed to be\ncompletely unique. The combination of the id and meta.versionId columns\nshould ideally identify a single unique row. But in rare cases,\nduplicates may exist. At query time, users may use the SQL select\nstatement to keep only one of the duplicate rows given an id and\nmeta.versionId pair. Alternatively, the server created view mentioned\nabove also filters out duplicates.\n\nIf a resource mutation cannot be streamed to BigQuery, errors will be\nlogged to Cloud Logging (see\n[Viewing logs](/healthcare/docs/how-tos/logging))." + "description": "The destination BigQuery structure that contains both the dataset location and corresponding schema config. The output is organized in one table per resource type. The server reuses the existing tables (if any) that are named after the resource types. For example, \"Patient\", \"Observation\". When there is no existing table for a given resource type, the server attempts to create one. When a table schema doesn't align with the schema config, either because of existing incompatible schema or out of band incompatible modification, the server does not stream in new data. BigQuery imposes a 1 MB limit on streaming insert row size, therefore any resource mutation that generates more than 1 MB of BigQuery data is not streamed. One resolution in this case is to delete the incompatible table and let the server recreate one, though the newly created table only contains data after the table recreation. Results are appended to the corresponding BigQuery tables. Different versions of the same resource are distinguishable by the meta.versionId and meta.lastUpdated columns. The operation (CREATE/UPDATE/DELETE) that results in the new version is recorded in the meta.tag. The tables contain all historical resource versions since streaming was enabled. For query convenience, the server also creates one view per table of the same name containing only the current resource version. The streamed data in the BigQuery dataset is not guaranteed to be completely unique. The combination of the id and meta.versionId columns should ideally identify a single unique row. But in rare cases, duplicates may exist. At query time, users may use the SQL select statement to keep only one of the duplicate rows given an id and meta.versionId pair. Alternatively, the server created view mentioned above also filters out duplicates. If a resource mutation cannot be streamed to BigQuery, errors are logged to Cloud Logging. For more information, see [Viewing error logs in Cloud Logging](/healthcare/docs/how-tos/logging))." }, "resourceTypes": { - "description": "Supply a FHIR resource type (such as \"Patient\" or \"Observation\").\nSee https://www.hl7.org/fhir/valueset-resource-types.html for a list of\nall FHIR resource types.\nThe server treats an empty list as an intent to stream all the\nsupported resource types in this FHIR store.", + "description": "Supply a FHIR resource type (such as \"Patient\" or \"Observation\"). See https://www.hl7.org/fhir/valueset-resource-types.html for a list of all FHIR resource types. The server treats an empty list as an intent to stream all the supported resource types in this FHIR store.", "items": { "type": "string" }, @@ -3941,7 +4087,7 @@ "id": "TagFilterList", "properties": { "tags": { - "description": "Tags to be filtered. Tags must be DICOM Data Elements, File Meta\nElements, or Directory Structuring Elements, as defined at:\nhttp://dicom.nema.org/medical/dicom/current/output/html/part06.html#table_6-1,.\nThey may be provided by \"Keyword\" or \"Tag\". For example \"PatientID\",\n\"00100010\".", + "description": "Tags to be filtered. Tags must be DICOM Data Elements, File Meta Elements, or Directory Structuring Elements, as defined at: http://dicom.nema.org/medical/dicom/current/output/html/part06.html#table_6-1,. They may be provided by \"Keyword\" or \"Tag\". For example \"PatientID\", \"00100010\".", "items": { "type": "string" }, @@ -3955,7 +4101,7 @@ "id": "TestIamPermissionsRequest", "properties": { "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "description": "The set of permissions to check for the `resource`. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", "items": { "type": "string" }, @@ -3969,7 +4115,7 @@ "id": "TestIamPermissionsResponse", "properties": { "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", "items": { "type": "string" }, diff --git a/vendor/google.golang.org/api/healthcare/v1/healthcare-gen.go b/vendor/google.golang.org/api/healthcare/v1/healthcare-gen.go index 95621d3828b..c420e15f3f1 100644 --- a/vendor/google.golang.org/api/healthcare/v1/healthcare-gen.go +++ b/vendor/google.golang.org/api/healthcare/v1/healthcare-gen.go @@ -75,6 +75,7 @@ const apiId = "healthcare:v1" const apiName = "healthcare" const apiVersion = "v1" const basePath = "https://healthcare.googleapis.com/" +const mtlsBasePath = "https://healthcare.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -90,6 +91,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -286,72 +288,31 @@ type ProjectsLocationsDatasetsOperationsService struct { s *Service } -// AuditConfig: Specifies the audit configuration for a service. -// The configuration determines which permission types are logged, and -// what -// identities, if any, are exempted from logging. -// An AuditConfig must have one or more AuditLogConfigs. -// -// If there are AuditConfigs for both `allServices` and a specific -// service, -// the union of the two AuditConfigs is used for that service: the -// log_types -// specified in each AuditConfig are enabled, and the exempted_members -// in each -// AuditLogConfig are exempted. -// -// Example Policy with multiple AuditConfigs: -// -// { -// "audit_configs": [ -// { -// "service": "allServices" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// }, -// { -// "log_type": "ADMIN_READ", -// } -// ] -// }, -// { -// "service": "sampleservice.googleapis.com" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// }, -// { -// "log_type": "DATA_WRITE", -// "exempted_members": [ -// "user:aliya@example.com" -// ] -// } -// ] -// } -// ] -// } -// -// For sampleservice, this policy enables DATA_READ, DATA_WRITE and -// ADMIN_READ -// logging. It also exempts jose@example.com from DATA_READ logging, -// and -// aliya@example.com from DATA_WRITE logging. +// AuditConfig: Specifies the audit configuration for a service. The +// configuration determines which permission types are logged, and what +// identities, if any, are exempted from logging. An AuditConfig must +// have one or more AuditLogConfigs. If there are AuditConfigs for both +// `allServices` and a specific service, the union of the two +// AuditConfigs is used for that service: the log_types specified in +// each AuditConfig are enabled, and the exempted_members in each +// AuditLogConfig are exempted. Example Policy with multiple +// AuditConfigs: { "audit_configs": [ { "service": "allServices", +// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": +// [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { +// "log_type": "ADMIN_READ" } ] }, { "service": +// "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": +// "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ +// "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy +// enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts +// jose@example.com from DATA_READ logging, and aliya@example.com from +// DATA_WRITE logging. type AuditConfig struct { // AuditLogConfigs: The configuration for logging of each type of // permission. AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"` - // Service: Specifies a service that will be enabled for audit - // logging. - // For example, `storage.googleapis.com`, - // `cloudsql.googleapis.com`. + // Service: Specifies a service that will be enabled for audit logging. + // For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. // `allServices` is a special value that covers all services. Service string `json:"service,omitempty"` @@ -380,31 +341,15 @@ func (s *AuditConfig) MarshalJSON() ([]byte, error) { } // AuditLogConfig: Provides the configuration for logging a type of -// permissions. -// Example: -// -// { -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// } -// ] -// } -// -// This enables 'DATA_READ' and 'DATA_WRITE' logging, while -// exempting -// jose@example.com from DATA_READ logging. +// permissions. Example: { "audit_log_configs": [ { "log_type": +// "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { +// "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and +// 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ +// logging. type AuditLogConfig struct { // ExemptedMembers: Specifies the identities that do not cause logging - // for this type of - // permission. - // Follows the same format of Binding.members. + // for this type of permission. Follows the same format of + // Binding.members. ExemptedMembers []string `json:"exemptedMembers,omitempty"` // LogType: The log type that this config enables. @@ -442,95 +387,53 @@ func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { // Binding: Associates `members` with a `role`. type Binding struct { - // Condition: The condition that is associated with this binding. - // - // If the condition evaluates to `true`, then this binding applies to - // the - // current request. - // - // If the condition evaluates to `false`, then this binding does not - // apply to - // the current request. However, a different role binding might grant - // the same - // role to one or more of the members in this binding. - // - // To learn which resources support conditions in their IAM policies, - // see - // the - // [IAM - // documentation](https://cloud.google.com/iam/help/conditions/r - // esource-policies). + // Condition: The condition that is associated with this binding. If the + // condition evaluates to `true`, then this binding applies to the + // current request. If the condition evaluates to `false`, then this + // binding does not apply to the current request. However, a different + // role binding might grant the same role to one or more of the members + // in this binding. To learn which resources support conditions in their + // IAM policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). Condition *Expr `json:"condition,omitempty"` // Members: Specifies the identities requesting access for a Cloud - // Platform resource. - // `members` can have the following values: - // - // * `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. - // - // * `allAuthenticatedUsers`: A special identifier that represents - // anyone - // who is authenticated with a Google account or a service - // account. - // - // * `user:{emailid}`: An email address that represents a specific - // Google - // account. For example, `alice@example.com` . - // - // - // * `serviceAccount:{emailid}`: An email address that represents a - // service - // account. For example, - // `my-other-app@appspot.gserviceaccount.com`. - // - // * `group:{emailid}`: An email address that represents a Google - // group. - // For example, `admins@example.com`. - // - // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a user that has been recently deleted. - // For - // example, `alice@example.com?uid=123456789012345678901`. If the - // user is - // recovered, this value reverts to `user:{emailid}` and the - // recovered user - // retains the role in the binding. - // - // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address - // (plus - // unique identifier) representing a service account that has been - // recently - // deleted. For example, - // + // Platform resource. `members` can have the following values: * + // `allUsers`: A special identifier that represents anyone who is on the + // internet; with or without a Google account. * + // `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. * + // `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . * + // `serviceAccount:{emailid}`: An email address that represents a + // service account. For example, + // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An + // email address that represents a Google group. For example, + // `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An + // email address (plus unique identifier) representing a user that has + // been recently deleted. For example, + // `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered + // user retains the role in the binding. * + // `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address + // (plus unique identifier) representing a service account that has been + // recently deleted. For example, // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account - // retains the - // role in the binding. - // - // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a Google group that has been recently - // deleted. For example, - // `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` - // and the - // recovered group retains the role in the binding. - // - // - // * `domain:{domain}`: The G Suite domain (primary) that represents all - // the - // users of that domain. For example, `google.com` or - // `example.com`. - // - // + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains + // the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: + // An email address (plus unique identifier) representing a Google group + // that has been recently deleted. For example, + // `admins@example.com?uid=123456789012345678901`. If the group is + // recovered, this value reverts to `group:{emailid}` and the recovered + // group retains the role in the binding. * `domain:{domain}`: The G + // Suite domain (primary) that represents all the users of that domain. + // For example, `google.com` or `example.com`. Members []string `json:"members,omitempty"` - // Role: Role that is assigned to `members`. - // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + // Role: Role that is assigned to `members`. For example, + // `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `json:"role,omitempty"` // ForceSendFields is a list of field names (e.g. "Condition") to @@ -621,16 +524,13 @@ func (s *CreateMessageRequest) MarshalJSON() ([]byte, error) { } // CryptoHashConfig: Pseudonymization method that generates surrogates -// via cryptographic hashing. -// Uses SHA-256. -// Outputs a base64-encoded representation of the hashed output -// (for example, `L7k0BHmF1ha5U3NfGykjro4xWi1MPVQPjhMAZbSV9mM=`). +// via cryptographic hashing. Uses SHA-256. Outputs a base64-encoded +// representation of the hashed output (for example, +// `L7k0BHmF1ha5U3NfGykjro4xWi1MPVQPjhMAZbSV9mM=`). type CryptoHashConfig struct { // CryptoKey: An AES 128/192/256 bit key. Causes the hash to be computed - // based on this - // key. A default key is generated for each Deidentify operation and is - // used - // wherever crypto_key is not specified. + // based on this key. A default key is generated for each Deidentify + // operation and is used wherever crypto_key is not specified. CryptoKey string `json:"cryptoKey,omitempty"` // ForceSendFields is a list of field names (e.g. "CryptoKey") to @@ -656,27 +556,19 @@ func (s *CryptoHashConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Dataset: A message representing a health dataset. -// -// A health dataset represents a collection of healthcare data -// pertaining to one -// or more patients. This may include multiple modalities of healthcare -// data, +// Dataset: A message representing a health dataset. A health dataset +// represents a collection of healthcare data pertaining to one or more +// patients. This may include multiple modalities of healthcare data, // such as electronic medical records or medical imaging data. type Dataset struct { - // Name: Output only. Resource name of the dataset, of the - // form - // `projects/{project_id}/locations/{location_id}/datasets/{dataset_ - // id}`. + // Name: Resource name of the dataset, of the form + // `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`. Name string `json:"name,omitempty"` // TimeZone: The default timezone used by this dataset. Must be a either - // a valid IANA - // time zone name such as "America/New_York" or empty, which defaults to - // UTC. - // This is used for parsing times in resources, such as HL7 messages, - // where no - // explicit timezone is specified. + // a valid IANA time zone name such as "America/New_York" or empty, + // which defaults to UTC. This is used for parsing times in resources, + // such as HL7 messages, where no explicit timezone is specified. TimeZone string `json:"timeZone,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -707,15 +599,13 @@ func (s *Dataset) MarshalJSON() ([]byte, error) { } // DateShiftConfig: Shift a date forward or backward in time by a random -// amount which is -// consistent for a given patient and crypto key combination. +// amount which is consistent for a given patient and crypto key +// combination. type DateShiftConfig struct { // CryptoKey: An AES 128/192/256 bit key. Causes the shift to be - // computed based on this - // key and the patient ID. A default key is generated for - // each - // Deidentify operation and is used wherever crypto_key is not - // specified. + // computed based on this key and the patient ID. A default key is + // generated for each Deidentify operation and is used wherever + // crypto_key is not specified. CryptoKey string `json:"cryptoKey,omitempty"` // ForceSendFields is a list of field names (e.g. "CryptoKey") to @@ -742,12 +632,9 @@ func (s *DateShiftConfig) MarshalJSON() ([]byte, error) { } // DeidentifyConfig: Configures de-id options specific to different -// types of content. -// Each submessage customizes the handling of -// an +// types of content. Each submessage customizes the handling of an // https://tools.ietf.org/html/rfc6838 media type or subtype. Configs -// are -// applied in a nested manner at runtime. +// are applied in a nested manner at runtime. type DeidentifyConfig struct { // Dicom: Configures de-id of application/DICOM content. Dicom *DicomConfig `json:"dicom,omitempty"` @@ -756,13 +643,11 @@ type DeidentifyConfig struct { Fhir *FhirConfig `json:"fhir,omitempty"` // Image: Configures de-identification of image pixels wherever they are - // found in the - // source_dataset. + // found in the source_dataset. Image *ImageConfig `json:"image,omitempty"` // Text: Configures de-identification of text wherever it is found in - // the - // source_dataset. + // the source_dataset. Text *TextConfig `json:"text,omitempty"` // ForceSendFields is a list of field names (e.g. "Dicom") to @@ -795,14 +680,10 @@ type DeidentifyDatasetRequest struct { Config *DeidentifyConfig `json:"config,omitempty"` // DestinationDataset: The name of the dataset resource to create and - // write the redacted data to. - // - // * The destination dataset must not exist. - // * The destination dataset must be in the same project and location - // as the - // source dataset. De-identifying data across multiple projects or - // locations - // is not supported. + // write the redacted data to. * The destination dataset must not exist. + // * The destination dataset must be in the same project and location as + // the source dataset. De-identifying data across multiple projects or + // locations is not supported. DestinationDataset string `json:"destinationDataset,omitempty"` // ForceSendFields is a list of field names (e.g. "Config") to @@ -835,21 +716,14 @@ type DeidentifyDicomStoreRequest struct { Config *DeidentifyConfig `json:"config,omitempty"` // DestinationStore: The name of the DICOM store to create and write the - // redacted data to. - // For - // example, - // `projects/{project_id}/locations/{location_id}/datasets/{data - // set_id}/dicomStores/{dicom_store_id}`. - // - // * The destination dataset must exist. - // * The source dataset and destination dataset must both reside in the - // same - // project. De-identifying data across multiple projects is not - // supported. - // * The destination DICOM store must not exist. - // * The caller must have the necessary permissions to create the - // destination - // DICOM store. + // redacted data to. For example, + // `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/d + // icomStores/{dicom_store_id}`. * The destination dataset must exist. * + // The source dataset and destination dataset must both reside in the + // same project. De-identifying data across multiple projects is not + // supported. * The destination DICOM store must not exist. * The caller + // must have the necessary permissions to create the destination DICOM + // store. DestinationStore string `json:"destinationStore,omitempty"` // FilterConfig: Filter configuration. @@ -885,26 +759,18 @@ type DeidentifyFhirStoreRequest struct { Config *DeidentifyConfig `json:"config,omitempty"` // DestinationStore: The name of the FHIR store to create and write the - // redacted data to. - // For - // example, - // `projects/{project_id}/locations/{location_id}/datasets/{data - // set_id}/fhirStores/{fhir_store_id}`. - // - // * The destination dataset must exist. - // * The source dataset and destination dataset must both reside in the - // same - // project. De-identifying data across multiple projects is not - // supported. - // * The destination FHIR store must exist. - // * The caller must have the healthcare.fhirResources.update - // permission to - // write to the destination FHIR store. + // redacted data to. For example, + // `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/f + // hirStores/{fhir_store_id}`. * The destination dataset must exist. * + // The source dataset and destination dataset must both reside in the + // same project. De-identifying data across multiple projects is not + // supported. * The destination FHIR store must exist. * The caller must + // have the healthcare.fhirResources.update permission to write to the + // destination FHIR store. DestinationStore string `json:"destinationStore,omitempty"` // ResourceFilter: A filter specifying the resources to include in the - // output. If not - // specified, all resources are included in the output. + // output. If not specified, all resources are included in the output. ResourceFilter *FhirFilter `json:"resourceFilter,omitempty"` // ForceSendFields is a list of field names (e.g. "Config") to @@ -946,18 +812,14 @@ type DicomConfig struct { // "MINIMAL_KEEP_LIST_PROFILE" - Keep only tags required to produce // valid DICOM. // "ATTRIBUTE_CONFIDENTIALITY_BASIC_PROFILE" - Remove tags based on - // DICOM Standard's Attribute Confidentiality Basic - // Profile (DICOM Standard Edition - // 2018e) - // http://dicom.nema.org/medical/dicom/2018e/output/chtml/part15/c - // hapter_E.html. + // DICOM Standard's Attribute Confidentiality Basic Profile (DICOM + // Standard Edition 2018e) + // http://dicom.nema.org/medical/dicom/2018e/output/chtml/part15/chapter_E.html. // "KEEP_ALL_PROFILE" - Keep all tags. // "DEIDENTIFY_TAG_CONTENTS" - Inspects within tag contents and - // replaces sensitive text. The process - // can be configured using the TextConfig. - // Applies to all tags with the following Value Representation - // names: - // AE, LO, LT, PN, SH, ST, UC, UT, DA, DT, AS + // replaces sensitive text. The process can be configured using the + // TextConfig. Applies to all tags with the following Value + // Representation names: AE, LO, LT, PN, SH, ST, UC, UT, DA, DT, AS FilterProfile string `json:"filterProfile,omitempty"` // KeepList: List of tags to keep. Remove all other tags. @@ -967,21 +829,14 @@ type DicomConfig struct { RemoveList *TagFilterList `json:"removeList,omitempty"` // SkipIdRedaction: If true, skip replacing StudyInstanceUID, - // SeriesInstanceUID, - // SOPInstanceUID, and MediaStorageSOPInstanceUID and leave them - // untouched. - // The Cloud Healthcare API regenerates these UIDs by default based on - // the - // DICOM Standard's reasoning: "Whilst these UIDs cannot be mapped - // directly - // to an individual out of context, given access to the original images, - // or - // to a database of the original images containing the UIDs, it would - // be - // possible to recover the individual's - // identity." - // http://dicom.nema.org/medical/dicom/current/output/chtml/pa - // rt15/sect_E.3.9.html + // SeriesInstanceUID, SOPInstanceUID, and MediaStorageSOPInstanceUID and + // leave them untouched. The Cloud Healthcare API regenerates these UIDs + // by default based on the DICOM Standard's reasoning: "Whilst these + // UIDs cannot be mapped directly to an individual out of context, given + // access to the original images, or to a database of the original + // images containing the UIDs, it would be possible to recover the + // individual's identity." + // http://dicom.nema.org/medical/dicom/current/output/chtml/part15/sect_E.3.9.html SkipIdRedaction bool `json:"skipIdRedaction,omitempty"` // ForceSendFields is a list of field names (e.g. "FilterProfile") to @@ -1011,20 +866,13 @@ func (s *DicomConfig) MarshalJSON() ([]byte, error) { // resources. type DicomFilterConfig struct { // ResourcePathsGcsUri: The Cloud Storage location of the filter - // configuration file. - // The `gcs_uri` must be in the format `gs://bucket/path/to/object`. - // The filter configuration file must contain a list of resource - // paths - // separated by newline characters (\n or \r\n). Each resource path - // must be in the - // format - // "/studies/{studyUID}[/series/{seriesUID}[/instances/{instanceUI - // D}]]" - // - // The Cloud Healthcare API service account must have - // the - // `roles/storage.objectViewer` Cloud IAM role for this Cloud - // Storage + // configuration file. The `gcs_uri` must be in the format + // `gs://bucket/path/to/object`. The filter configuration file must + // contain a list of resource paths separated by newline characters (\n + // or \r\n). Each resource path must be in the format + // "/studies/{studyUID}[/series/{seriesUID}[/instances/{instanceUID}]]" + // The Cloud Healthcare API service account must have the + // `roles/storage.objectViewer` Cloud IAM role for this Cloud Storage // location. ResourcePathsGcsUri string `json:"resourcePathsGcsUri,omitempty"` @@ -1054,32 +902,22 @@ func (s *DicomFilterConfig) MarshalJSON() ([]byte, error) { // DicomStore: Represents a DICOM store. type DicomStore struct { - // Labels: User-supplied key-value pairs used to organize DICOM - // stores. - // + // Labels: User-supplied key-value pairs used to organize DICOM stores. // Label keys must be between 1 and 63 characters long, have a UTF-8 - // encoding - // of maximum 128 bytes, and must conform to the - // following PCRE regular expression: - // \p{Ll}\p{Lo}{0,62} - // - // Label values are optional, must be between 1 and 63 characters long, - // have - // a UTF-8 encoding of maximum 128 bytes, and must conform to - // the - // following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} - // - // No more than 64 labels can be associated with a given store. + // encoding of maximum 128 bytes, and must conform to the following PCRE + // regular expression: \p{Ll}\p{Lo}{0,62} Label values are optional, + // must be between 1 and 63 characters long, have a UTF-8 encoding of + // maximum 128 bytes, and must conform to the following PCRE regular + // expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} No more than 64 labels can be + // associated with a given store. Labels map[string]string `json:"labels,omitempty"` - // Name: Output only. Resource name of the DICOM store, of the - // form - // `projects/{project_id}/locations/{location_id}/datasets/{dataset_ - // id}/dicomStores/{dicom_store_id}`. + // Name: Resource name of the DICOM store, of the form + // `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/d + // icomStores/{dicom_store_id}`. Name string `json:"name,omitempty"` - // NotificationConfig: Notification destination for new DICOM - // instances. + // NotificationConfig: Notification destination for new DICOM instances. // Supplied by the client. NotificationConfig *NotificationConfig `json:"notificationConfig,omitempty"` @@ -1111,49 +949,32 @@ func (s *DicomStore) MarshalJSON() ([]byte, error) { } // Empty: A generic empty message that you can re-use to avoid defining -// duplicated -// empty messages in your APIs. A typical example is to use it as the -// request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. +// duplicated empty messages in your APIs. A typical example is to use +// it as the request or the response type of an API method. For +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } The JSON representation for `Empty` is +// empty JSON object `{}`. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` } -// ExportDicomDataRequest: Exports data from the specified DICOM -// store. +// ExportDicomDataRequest: Exports data from the specified DICOM store. // If a given resource, such as a DICOM object with the same SOPInstance -// UID, -// already exists in the output, it is overwritten with the version -// in the source dataset. -// Exported DICOM data persists when the DICOM store from which it -// was -// exported is deleted. +// UID, already exists in the output, it is overwritten with the version +// in the source dataset. Exported DICOM data persists when the DICOM +// store from which it was exported is deleted. type ExportDicomDataRequest struct { - // BigqueryDestination: The BigQuery output destination. - // - // You can only export to a BigQuery dataset that's in the same project - // as - // the DICOM store you're exporting from. - // - // The BigQuery location requires two IAM - // roles: - // `roles/bigquery.dataEditor` and `roles/bigquery.jobUser`. + // BigqueryDestination: The BigQuery output destination. You can only + // export to a BigQuery dataset that's in the same project as the DICOM + // store you're exporting from. The BigQuery location requires two IAM + // roles: `roles/bigquery.dataEditor` and `roles/bigquery.jobUser`. BigqueryDestination *GoogleCloudHealthcareV1DicomBigQueryDestination `json:"bigqueryDestination,omitempty"` - // GcsDestination: The Cloud Storage output destination. - // - // The Cloud Storage location requires the `roles/storage.objectAdmin` - // Cloud - // IAM role. + // GcsDestination: The Cloud Storage output destination. The Cloud + // Storage location requires the `roles/storage.objectAdmin` Cloud IAM + // role. GcsDestination *GoogleCloudHealthcareV1DicomGcsDestination `json:"gcsDestination,omitempty"` // ForceSendFields is a list of field names (e.g. "BigqueryDestination") @@ -1187,27 +1008,18 @@ type ExportDicomDataResponse struct { // ExportResourcesRequest: Request to export resources. type ExportResourcesRequest struct { - // BigqueryDestination: The BigQuery output destination. - // - // The BigQuery location requires two IAM - // roles: - // `roles/bigquery.dataEditor` and `roles/bigquery.jobUser`. - // - // The output will be one BigQuery table per resource type. + // BigqueryDestination: The BigQuery output destination. The BigQuery + // location requires two IAM roles: `roles/bigquery.dataEditor` and + // `roles/bigquery.jobUser`. The output is one BigQuery table per + // resource type. BigqueryDestination *GoogleCloudHealthcareV1FhirBigQueryDestination `json:"bigqueryDestination,omitempty"` - // GcsDestination: The Cloud Storage output destination. - // - // The Cloud Storage location requires the `roles/storage.objectAdmin` - // Cloud - // IAM role. - // - // The exported outputs are - // organized by FHIR resource types. The server will create one object - // per - // resource type. Each object contains newline delimited JSON, and each - // line - // is a FHIR resource. + // GcsDestination: The Cloud Storage output destination. The Healthcare + // Service Agent account requires the `roles/storage.objectAdmin` role + // on the Cloud Storage location. The exported outputs are organized by + // FHIR resource types. The server creates one object per resource type. + // Each object contains newline delimited JSON, and each line is a FHIR + // resource. GcsDestination *GoogleCloudHealthcareV1FhirGcsDestination `json:"gcsDestination,omitempty"` // ForceSendFields is a list of field names (e.g. "BigqueryDestination") @@ -1235,74 +1047,46 @@ func (s *ExportResourcesRequest) MarshalJSON() ([]byte, error) { } // ExportResourcesResponse: Response when all resources export -// successfully. -// This structure will be included in the -// response to describe the detailed -// outcome. It will only be included when the operation finishes -// successfully. +// successfully. This structure is included in the response to describe +// the detailed outcome after the operation finishes successfully. type ExportResourcesResponse struct { } // Expr: Represents a textual expression in the Common Expression -// Language (CEL) -// syntax. CEL is a C-like expression language. The syntax and semantics -// of CEL -// are documented at https://github.com/google/cel-spec. -// -// Example (Comparison): -// -// title: "Summary size limit" -// description: "Determines if a summary is less than 100 chars" -// expression: "document.summary.size() < 100" -// -// Example (Equality): -// -// title: "Requestor is owner" -// description: "Determines if requestor is the document owner" -// expression: "document.owner == -// request.auth.claims.email" -// -// Example (Logic): -// -// title: "Public documents" -// description: "Determine whether the document should be publicly -// visible" -// expression: "document.type != 'private' && document.type != -// 'internal'" -// -// Example (Data Manipulation): -// -// title: "Notification string" -// description: "Create a notification string with a timestamp." -// expression: "'New message received at ' + -// string(document.create_time)" -// -// The exact variables and functions that may be referenced within an -// expression -// are determined by the service that evaluates it. See the -// service -// documentation for additional information. +// Language (CEL) syntax. CEL is a C-like expression language. The +// syntax and semantics of CEL are documented at +// https://github.com/google/cel-spec. Example (Comparison): title: +// "Summary size limit" description: "Determines if a summary is less +// than 100 chars" expression: "document.summary.size() < 100" Example +// (Equality): title: "Requestor is owner" description: "Determines if +// requestor is the document owner" expression: "document.owner == +// request.auth.claims.email" Example (Logic): title: "Public documents" +// description: "Determine whether the document should be publicly +// visible" expression: "document.type != 'private' && document.type != +// 'internal'" Example (Data Manipulation): title: "Notification string" +// description: "Create a notification string with a timestamp." +// expression: "'New message received at ' + +// string(document.create_time)" The exact variables and functions that +// may be referenced within an expression are determined by the service +// that evaluates it. See the service documentation for additional +// information. type Expr struct { // Description: Optional. Description of the expression. This is a - // longer text which - // describes the expression, e.g. when hovered over it in a UI. + // longer text which describes the expression, e.g. when hovered over it + // in a UI. Description string `json:"description,omitempty"` // Expression: Textual representation of an expression in Common - // Expression Language - // syntax. + // Expression Language syntax. Expression string `json:"expression,omitempty"` // Location: Optional. String indicating the location of the expression - // for error - // reporting, e.g. a file name and a position in the file. + // for error reporting, e.g. a file name and a position in the file. Location string `json:"location,omitempty"` // Title: Optional. Title for the expression, i.e. a short string - // describing - // its purpose. This can be used e.g. in UIs which allow to enter - // the - // expression. + // describing its purpose. This can be used e.g. in UIs which allow to + // enter the expression. Title string `json:"title,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -1332,10 +1116,9 @@ func (s *Expr) MarshalJSON() ([]byte, error) { // store. type FhirConfig struct { // FieldMetadataList: Specifies FHIR paths to match and how to transform - // them. Any field that - // is not matched by a FieldMetadata is passed through to the - // output - // dataset unmodified. All extensions are removed in the output. + // them. Any field that is not matched by a FieldMetadata is passed + // through to the output dataset unmodified. All extensions are removed + // in the output. FieldMetadataList []*FieldMetadata `json:"fieldMetadataList,omitempty"` // ForceSendFields is a list of field names (e.g. "FieldMetadataList") @@ -1365,8 +1148,7 @@ func (s *FhirConfig) MarshalJSON() ([]byte, error) { // FhirFilter: Filter configuration. type FhirFilter struct { // Resources: List of resources to include in the output. If this list - // is empty or - // not specified, all resources are included in the output. + // is empty or not specified, all resources are included in the output. Resources *Resources `json:"resources,omitempty"` // ForceSendFields is a list of field names (e.g. "Resources") to @@ -1395,121 +1177,81 @@ func (s *FhirFilter) MarshalJSON() ([]byte, error) { // FhirStore: Represents a FHIR store. type FhirStore struct { // DisableReferentialIntegrity: Whether to disable referential integrity - // in this FHIR store. This field is - // immutable after FHIR store creation. - // The default value is false, meaning that the API enforces - // referential - // integrity and fails the requests that result in inconsistent state - // in - // the FHIR store. - // When this field is set to true, the API skips referential - // integrity - // checks. Consequently, operations that rely on references, such - // as - // GetPatientEverything, do not return all the results if broken - // references - // exist. + // in this FHIR store. This field is immutable after FHIR store + // creation. The default value is false, meaning that the API enforces + // referential integrity and fails the requests that result in + // inconsistent state in the FHIR store. When this field is set to true, + // the API skips referential integrity checks. Consequently, operations + // that rely on references, such as GetPatientEverything, do not return + // all the results if broken references exist. DisableReferentialIntegrity bool `json:"disableReferentialIntegrity,omitempty"` // DisableResourceVersioning: Whether to disable resource versioning for - // this FHIR store. This field can - // not be changed after the creation of FHIR store. - // If set to false, which is the default behavior, all write - // operations - // cause historical versions to be recorded automatically. The - // historical - // versions can be fetched through the history APIs, but cannot be - // updated. - // If set to true, no historical versions are kept. The server - // sends - // errors for attempts to read the historical versions. + // this FHIR store. This field can not be changed after the creation of + // FHIR store. If set to false, which is the default behavior, all write + // operations cause historical versions to be recorded automatically. + // The historical versions can be fetched through the history APIs, but + // cannot be updated. If set to true, no historical versions are kept. + // The server sends errors for attempts to read the historical versions. DisableResourceVersioning bool `json:"disableResourceVersioning,omitempty"` - // EnableUpdateCreate: Whether this FHIR store has the - // [updateCreate - // capability](https://www.hl7.org/fhir/capabilitystatement - // -definitions.html#CapabilityStatement.rest.resource.updateCreate). - // Thi - // s determines if the client can use an Update operation to create a - // new - // resource with a client-specified ID. If false, all IDs are - // server-assigned + // EnableUpdateCreate: Whether this FHIR store has the [updateCreate + // capability](https://www.hl7.org/fhir/capabilitystatement-definitions.h + // tml#CapabilityStatement.rest.resource.updateCreate). This determines + // if the client can use an Update operation to create a new resource + // with a client-specified ID. If false, all IDs are server-assigned // through the Create operation and attempts to update a non-existent - // resource - // return errors. Please treat the audit logs with appropriate levels - // of - // care if client-specified resource IDs contain sensitive data such - // as - // patient identifiers, those IDs are part of the FHIR resource - // path - // recorded in Cloud audit logs and Cloud Pub/Sub notifications. + // resource return errors. Please treat the audit logs with appropriate + // levels of care if client-specified resource IDs contain sensitive + // data such as patient identifiers, those IDs are part of the FHIR + // resource path recorded in Cloud audit logs and Cloud Pub/Sub + // notifications. EnableUpdateCreate bool `json:"enableUpdateCreate,omitempty"` - // Labels: User-supplied key-value pairs used to organize FHIR - // stores. - // + // Labels: User-supplied key-value pairs used to organize FHIR stores. // Label keys must be between 1 and 63 characters long, have a UTF-8 - // encoding - // of maximum 128 bytes, and must conform to the - // following PCRE regular expression: - // \p{Ll}\p{Lo}{0,62} - // - // Label values are optional, must be between 1 and 63 characters long, - // have - // a UTF-8 encoding of maximum 128 bytes, and must conform to - // the - // following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} - // - // No more than 64 labels can be associated with a given store. + // encoding of maximum 128 bytes, and must conform to the following PCRE + // regular expression: \p{Ll}\p{Lo}{0,62} Label values are optional, + // must be between 1 and 63 characters long, have a UTF-8 encoding of + // maximum 128 bytes, and must conform to the following PCRE regular + // expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} No more than 64 labels can be + // associated with a given store. Labels map[string]string `json:"labels,omitempty"` - // Name: Output only. Resource name of the FHIR store, of the - // form - // `projects/{project_id}/datasets/{dataset_id}/fhirStores/{fhir_sto - // re_id}`. + // Name: Output only. Resource name of the FHIR store, of the form + // `projects/{project_id}/datasets/{dataset_id}/fhirStores/{fhir_store_id + // }`. Name string `json:"name,omitempty"` // NotificationConfig: If non-empty, publish all resource modifications - // of this FHIR store to - // this destination. The Cloud Pub/Sub message attributes contain a - // map - // with a string describing the action that has triggered the - // notification. - // For example, "action":"CreateResource". + // of this FHIR store to this destination. The Cloud Pub/Sub message + // attributes contain a map with a string describing the action that has + // triggered the notification. For example, "action":"CreateResource". NotificationConfig *NotificationConfig `json:"notificationConfig,omitempty"` // StreamConfigs: A list of streaming configs that configure the - // destinations of streaming - // export for every resource mutation in this FHIR store. Each store - // is - // allowed to have up to 10 streaming configs. + // destinations of streaming export for every resource mutation in this + // FHIR store. Each store is allowed to have up to 10 streaming configs. // After a new config is added, the next resource mutation is streamed - // to - // the new location in addition to the existing ones. - // When a location is removed from the list, the server stops - // streaming to that location. Before adding a new config, you must add - // the - // required - // [`bigquery.dataEditor`](https://cloud.google.com/bigquery - // /docs/access-control#bigquery.dataEditor) - // role to your project's **Cloud Healthcare Service Agent** - // [service - // account](https://cloud.google.com/iam/docs/service-accounts). - // Some lag (typically on the order of dozens of seconds) is expected - // before + // to the new location in addition to the existing ones. When a location + // is removed from the list, the server stops streaming to that + // location. Before adding a new config, you must add the required + // [`bigquery.dataEditor`](https://cloud.google.com/bigquery/docs/access- + // control#bigquery.dataEditor) role to your project's **Cloud + // Healthcare Service Agent** [service + // account](https://cloud.google.com/iam/docs/service-accounts). Some + // lag (typically on the order of dozens of seconds) is expected before // the results show up in the streaming destination. StreamConfigs []*StreamConfig `json:"streamConfigs,omitempty"` // Version: The FHIR specification version that this FHIR store supports - // natively. This - // field is immutable after store creation. Requests are rejected if - // they - // contain FHIR resources of a different version. + // natively. This field is immutable after store creation. Requests are + // rejected if they contain FHIR resources of a different version. + // Version is required for every FHIR store. // // Possible values: // "VERSION_UNSPECIFIED" - Users must specify a version on store - // creation or an error will be - // returned. + // creation or an error is returned. // "DSTU2" - Draft Standard for Trial Use, [Release // 2](https://www.hl7.org/fhir/DSTU2) // "STU3" - Standard for Trial Use, [Release @@ -1547,8 +1289,7 @@ func (s *FhirStore) MarshalJSON() ([]byte, error) { } // FieldMetadata: Specifies FHIR paths to match, and how to handle -// de-identification of -// matching fields. +// de-identification of matching fields. type FieldMetadata struct { // Action: Deidentify action for one field. // @@ -1559,21 +1300,15 @@ type FieldMetadata struct { // "DO_NOT_TRANSFORM" - Do not transform. Action string `json:"action,omitempty"` - // Paths: List of paths to FHIR fields to be redacted. Each path is - // a - // period-separated list where each component is either a field name - // or - // FHIR type name, for example: Patient, HumanName. - // For "choice" types (those defined in the FHIR spec with the - // form: - // field[x]) we use two separate components. For - // example, - // "deceasedAge.unit" is matched by "Deceased.Age.unit". - // Supported types are: AdministrativeGenderCode, Code, Date, - // DateTime, - // Decimal, HumanName, Id, LanguageCode, Markdown, Oid, String, Uri, - // Uuid, - // Xhtml. + // Paths: List of paths to FHIR fields to be redacted. Each path is a + // period-separated list where each component is either a field name or + // FHIR type name, for example: Patient, HumanName. For "choice" types + // (those defined in the FHIR spec with the form: field[x]) we use two + // separate components. For example, "deceasedAge.unit" is matched by + // "Deceased.Age.unit". Supported types are: AdministrativeGenderCode, + // Code, Date, DateTime, Decimal, HumanName, Id, LanguageCode, Markdown, + // Oid, String, Uri, Uuid, Xhtml. Base64Binary is also supported, but + // may only be kept as-is or have all the content removed. Paths []string `json:"paths,omitempty"` // ForceSendFields is a list of field names (e.g. "Action") to @@ -1613,17 +1348,13 @@ type GoogleCloudHealthcareV1DeidentifyDeidentifyFhirStoreSummary struct { // where the server writes the output. type GoogleCloudHealthcareV1DicomBigQueryDestination struct { // Force: If the destination table already exists and this flag is - // `TRUE`, the table - // is overwritten by the contents of the DICOM store. If the flag is - // not - // set and the destination table already exists, the export call returns - // an - // error. + // `TRUE`, the table is overwritten by the contents of the DICOM store. + // If the flag is not set and the destination table already exists, the + // export call returns an error. Force bool `json:"force,omitempty"` // TableUri: BigQuery URI to a table, up to 2000 characters long, in the - // format - // `bq://projectId.bqDatasetId.tableId` + // format `bq://projectId.bqDatasetId.tableId` TableUri string `json:"tableUri,omitempty"` // ForceSendFields is a list of field names (e.g. "Force") to @@ -1650,64 +1381,42 @@ func (s *GoogleCloudHealthcareV1DicomBigQueryDestination) MarshalJSON() ([]byte, } // GoogleCloudHealthcareV1DicomGcsDestination: The Cloud Storage -// location where the server writes the output and the -// export +// location where the server writes the output and the export // configuration. type GoogleCloudHealthcareV1DicomGcsDestination struct { - // MimeType: MIME types supported by DICOM spec. - // Each file is written in the following - // format: - // `.../{study_id}/{series_id}/{instance_id}[/{frame_number}].{ex - // tension}` - // The frame_number component exists only for multi-frame - // instances. - // + // MimeType: MIME types supported by DICOM spec. Each file is written in + // the following format: + // `.../{study_id}/{series_id}/{instance_id}[/{frame_number}].{extension} + // ` The frame_number component exists only for multi-frame instances. // Supported MIME types are consistent with supported formats in // DICOMweb: - // https://cloud.google.com/healthcare/docs/dicom#retrieve_tran - // saction. - // Specifically, the following are supported: - // - // - application/dicom; transfer-syntax=1.2.840.10008.1.2.1 - // (uncompressed DICOM) - // - application/dicom; transfer-syntax=1.2.840.10008.1.2.4.50 - // (DICOM with embedded JPEG Baseline) - // - application/dicom; transfer-syntax=1.2.840.10008.1.2.4.90 - // (DICOM with embedded JPEG 2000 Lossless Only) - // - application/dicom; transfer-syntax=1.2.840.10008.1.2.4.91 - // (DICOM with embedded JPEG 2000) - // - application/dicom; transfer-syntax=* - // (DICOM with no transcoding) - // - application/octet-stream; transfer-syntax=1.2.840.10008.1.2.1 - // (raw uncompressed PixelData) - // - application/octet-stream; transfer-syntax=* - // (raw PixelData in whatever format it was uploaded in) - // - image/jpeg; transfer-syntax=1.2.840.10008.1.2.4.50 - // (Consumer JPEG) - // - image/png - // - // The following extensions are used for output files: - // - // - application/dicom -> .dcm - // - image/jpeg -> .jpg - // - image/png -> .png - // - application/octet-stream -> no extension - // - // If unspecified, the instances are exported in the original - // DICOM format they were uploaded in. + // https://cloud.google.com/healthcare/docs/dicom#retrieve_transaction. + // Specifically, the following are supported: - application/dicom; + // transfer-syntax=1.2.840.10008.1.2.1 (uncompressed DICOM) - + // application/dicom; transfer-syntax=1.2.840.10008.1.2.4.50 (DICOM with + // embedded JPEG Baseline) - application/dicom; + // transfer-syntax=1.2.840.10008.1.2.4.90 (DICOM with embedded JPEG 2000 + // Lossless Only) - application/dicom; + // transfer-syntax=1.2.840.10008.1.2.4.91 (DICOM with embedded JPEG + // 2000) - application/dicom; transfer-syntax=* (DICOM with no + // transcoding) - application/octet-stream; + // transfer-syntax=1.2.840.10008.1.2.1 (raw uncompressed PixelData) - + // application/octet-stream; transfer-syntax=* (raw PixelData in + // whatever format it was uploaded in) - image/jpeg; + // transfer-syntax=1.2.840.10008.1.2.4.50 (Consumer JPEG) - image/png + // The following extensions are used for output files: - + // application/dicom -> .dcm - image/jpeg -> .jpg - image/png -> .png - + // application/octet-stream -> no extension If unspecified, the + // instances are exported in the original DICOM format they were + // uploaded in. MimeType string `json:"mimeType,omitempty"` - // UriPrefix: The Cloud Storage destination to export to. - // - // URI for a Cloud Storage directory where the server writes the result - // files, - // in the format `gs://{bucket-id}/{path/to/destination/dir}`). If there - // is no - // trailing slash, the service appends one when composing the object - // path. - // The user is responsible for creating the Cloud Storage bucket - // referenced in - // `uri_prefix`. + // UriPrefix: The Cloud Storage destination to export to. URI for a + // Cloud Storage directory where the server writes the result files, in + // the format `gs://{bucket-id}/{path/to/destination/dir}`). If there is + // no trailing slash, the service appends one when composing the object + // path. The user is responsible for creating the Cloud Storage bucket + // referenced in `uri_prefix`. UriPrefix string `json:"uriPrefix,omitempty"` // ForceSendFields is a list of field names (e.g. "MimeType") to @@ -1736,29 +1445,19 @@ func (s *GoogleCloudHealthcareV1DicomGcsDestination) MarshalJSON() ([]byte, erro // GoogleCloudHealthcareV1DicomGcsSource: Specifies the configuration // for importing data from Cloud Storage. type GoogleCloudHealthcareV1DicomGcsSource struct { - // Uri: Points to a Cloud Storage URI containing file(s) with - // content only. The URI must be in the following - // format: - // `gs://{bucket_id}/{object_id}`. The URI can include wildcards - // in + // Uri: Points to a Cloud Storage URI containing file(s) with content + // only. The URI must be in the following format: + // `gs://{bucket_id}/{object_id}`. The URI can include wildcards in // `object_id` and thus identify multiple files. Supported wildcards: - // '*' to match 0 or more non-separator characters - // '**' to match 0 or more characters (including separators). Must be - // used at - // the end of a path and with no other wildcards in the - // path. Can also be used with a file extension (such as .dcm), - // which - // imports all files with the extension in the specified directory - // and - // its sub-directories. For example, - // `gs://my-bucket/my-directory/**.dcm` imports all files with - // .dcm - // extensions in `my-directory/` and its sub-directories. - // '?' to match 1 character - // All other URI formats are invalid. - // Files matching the wildcard are expected to contain content only, - // no - // metadata. + // '*' to match 0 or more non-separator characters '**' to match 0 or + // more characters (including separators). Must be used at the end of a + // path and with no other wildcards in the path. Can also be used with a + // file extension (such as .dcm), which imports all files with the + // extension in the specified directory and its sub-directories. For + // example, `gs://my-bucket/my-directory/**.dcm` imports all files with + // .dcm extensions in `my-directory/` and its sub-directories. '?' to + // match 1 character All other URI formats are invalid. Files matching + // the wildcard are expected to contain content only, no metadata. Uri string `json:"uri,omitempty"` // ForceSendFields is a list of field names (e.g. "Uri") to @@ -1787,17 +1486,14 @@ func (s *GoogleCloudHealthcareV1DicomGcsSource) MarshalJSON() ([]byte, error) { // GoogleCloudHealthcareV1FhirBigQueryDestination: The configuration for // exporting to BigQuery. type GoogleCloudHealthcareV1FhirBigQueryDestination struct { - // DatasetUri: BigQuery URI to a dataset, up to 2000 characters long, in - // the format - // `bq://projectId.bqDatasetId` + // DatasetUri: BigQuery URI to an existing dataset, up to 2000 + // characters long, in the format `bq://projectId.bqDatasetId`. DatasetUri string `json:"datasetUri,omitempty"` - // Force: If this flag is `TRUE`, all tables will be deleted from the - // dataset before - // the new exported tables are written. If the flag is not set and - // the - // destination dataset contains tables, the export call returns an - // error. + // Force: If this flag is `TRUE`, all tables are deleted from the + // dataset before the new exported tables are written. If the flag is + // not set and the destination dataset contains tables, the export call + // returns an error. Force bool `json:"force,omitempty"` // SchemaConfig: The configuration for the exported BigQuery schema. @@ -1830,14 +1526,11 @@ func (s *GoogleCloudHealthcareV1FhirBigQueryDestination) MarshalJSON() ([]byte, // exporting to Cloud Storage. type GoogleCloudHealthcareV1FhirGcsDestination struct { // UriPrefix: URI for a Cloud Storage directory where result files - // should be written (in - // the format `gs://{bucket-id}/{path/to/destination/dir}`). If there is - // no - // trailing slash, the service will append one when composing the object - // path. - // The user is responsible for creating the Cloud Storage bucket - // referenced in - // `uri_prefix`. + // should be written, in the format of + // `gs://{bucket-id}/{path/to/destination/dir}`. If there is no trailing + // slash, the service appends one when composing the object path. The + // user is responsible for creating the Cloud Storage bucket referenced + // in `uri_prefix`. UriPrefix string `json:"uriPrefix,omitempty"` // ForceSendFields is a list of field names (e.g. "UriPrefix") to @@ -1866,32 +1559,19 @@ func (s *GoogleCloudHealthcareV1FhirGcsDestination) MarshalJSON() ([]byte, error // GoogleCloudHealthcareV1FhirGcsSource: Specifies the configuration for // importing data from Cloud Storage. type GoogleCloudHealthcareV1FhirGcsSource struct { - // Uri: Points to a Cloud Storage URI containing file(s) to import. - // - // The URI must be in the following format: - // `gs://{bucket_id}/{object_id}`. + // Uri: Points to a Cloud Storage URI containing file(s) to import. The + // URI must be in the following format: `gs://{bucket_id}/{object_id}`. // The URI can include wildcards in `object_id` and thus identify - // multiple - // files. Supported wildcards: - // - // * `*` to match 0 or more non-separator characters - // * `**` to match 0 or more characters (including separators). Must be - // used - // at the end of a path and with no other wildcards in the - // path. Can also be used with a file extension (such as .ndjson), - // which - // imports all files with the extension in the specified directory - // and - // its sub-directories. For example, - // `gs://my-bucket/my-directory/**.ndjson` - // imports all files with `.ndjson` extensions in `my-directory/` and - // its - // sub-directories. - // * `?` to match 1 character - // - // Files matching the wildcard are expected to contain content only, - // no - // metadata. + // multiple files. Supported wildcards: * `*` to match 0 or more + // non-separator characters * `**` to match 0 or more characters + // (including separators). Must be used at the end of a path and with no + // other wildcards in the path. Can also be used with a file extension + // (such as .ndjson), which imports all files with the extension in the + // specified directory and its sub-directories. For example, + // `gs://my-bucket/my-directory/**.ndjson` imports all files with + // `.ndjson` extensions in `my-directory/` and its sub-directories. * + // `?` to match 1 character Files matching the wildcard are expected to + // contain content only, no metadata. Uri string `json:"uri,omitempty"` // ForceSendFields is a list of field names (e.g. "Uri") to @@ -1918,78 +1598,33 @@ func (s *GoogleCloudHealthcareV1FhirGcsSource) MarshalJSON() ([]byte, error) { } // Hl7V2NotificationConfig: Specifies where and whether to send -// notifications upon changes to a -// data store. +// notifications upon changes to a data store. type Hl7V2NotificationConfig struct { // Filter: Restricts notifications sent for messages matching a filter. - // If this is - // empty, all messages are matched. - // Syntax: - // https://cloud.google.com/appengine/docs/standard/python/search - // /query_strings - // - // Fields/functions available for filtering are: - // - // * `message_type`, from the MSH-9.1 field. For example, - // `NOT message_type = "ADT". - // * `send_date` or `sendDate`, the YYYY-MM-DD date the message was - // sent in - // the dataset's time_zone, from the MSH-7 segment. For - // example, - // `send_date < "2017-01-02". - // * `send_time`, the timestamp when the message was sent, using - // the - // RFC3339 time format for comparisons, from the MSH-7 segment. For - // example, - // `send_time < "2017-01-02T00:00:00-05:00". - // * `send_facility`, the care center that the message came from, from - // the - // MSH-4 segment. For example, `send_facility = "ABC". - // * `PatientId(value, type)`, which matches if the message lists a - // patient - // having an ID of the given value and type in the PID-2, PID-3, or - // PID-4 - // segments. For example, `PatientId("123456", "MRN")`. - // * `labels.x`, a string value of the label with key `x` as set using - // the - // Message.labels - // map. For example, `labels."priority"="high". The operator `:*` can - // be - // used to assert the existence of a label. For - // example, + // If this is empty, all messages are matched. Syntax: + // https://cloud.google.com/appengine/docs/standard/python/search/query_strings The following fields and functions are available for filtering: * `message_type`, from the MSH-9.1 field. For example, `NOT message_type = "ADT". * `send_date` or `sendDate`, the YYYY-MM-DD date the message was sent in the dataset's time_zone, from the MSH-7 segment. For example, `send_date < "2017-01-02". * `send_time`, the timestamp when the message was sent, using the RFC3339 time format for comparisons, from the MSH-7 segment. For example, `send_time < "2017-01-02T00:00:00-05:00". * `send_facility`, the care center that the message came from, from the MSH-4 segment. For example, `send_facility = "ABC". * `PatientId(value, type)`, which matches if the message lists a patient having an ID of the given value and type in the PID-2, PID-3, or PID-4 segments. For example, `PatientId("123456", "MRN")`. * `labels.x`, a string value of the label with key `x` as set using the Message.labels map. For example, `labels."priority"="high". The operator `:*` can be used to assert the existence of a label. For example, // `labels."priority":*`. Filter string `json:"filter,omitempty"` // PubsubTopic: The [Cloud - // Pub/Sub](https://cloud.google.com/pubsub/docs/) topic - // that + // Pub/Sub](https://cloud.google.com/pubsub/docs/) topic that // notifications of changes are published on. Supplied by the client. - // The - // notification is a `PubsubMessage` with the following fields: - // - // * `PubsubMessage.Data` contains the resource name. - // * `PubsubMessage.MessageId` is the ID of this notification. It - // is - // guaranteed to be unique within the topic. - // * `PubsubMessage.PublishTime` is the time at which the message - // was - // published. - // - // Note that notifications are only sent if the topic is non-empty. - // [Topic - // names](https://cloud.google.com/pubsub/docs/overview#names) must - // be - // scoped to a project. Cloud Healthcare API service account must - // have - // publisher permissions on the given Pub/Sub topic. Not having - // adequate - // permissions causes the calls that send notifications to fail. - // - // If a notification cannot be published to Cloud Pub/Sub, errors will - // be - // logged to Cloud Logging (see [Viewing - // logs](/healthcare/docs/how- - // tos/logging)). + // The notification is a `PubsubMessage` with the following fields: * + // `PubsubMessage.Data` contains the resource name. * + // `PubsubMessage.MessageId` is the ID of this notification. It's + // guaranteed to be unique within the topic. * + // `PubsubMessage.PublishTime` is the time when the message was + // published. Note that notifications are only sent if the topic is + // non-empty. [Topic + // names](https://cloud.google.com/pubsub/docs/overview#names) must be + // scoped to a project. The Cloud Healthcare API service account, + // service-PROJECT_NUMBER@gcp-sa-healthcare.iam.gserviceaccount.com, + // must have publisher permissions on the given Pub/Sub topic. Not + // having adequate permissions causes the calls that send notifications + // to fail. If a notification cannot be published to Cloud Pub/Sub, + // errors are logged to Cloud Logging. For more information, see + // [Viewing error logs in Cloud + // Logging](/healthcare/docs/how-tos/logging)). PubsubTopic string `json:"pubsubTopic,omitempty"` // ForceSendFields is a list of field names (e.g. "Filter") to @@ -2017,60 +1652,42 @@ func (s *Hl7V2NotificationConfig) MarshalJSON() ([]byte, error) { // Hl7V2Store: Represents an HL7v2 store. type Hl7V2Store struct { - // Labels: User-supplied key-value pairs used to organize HL7v2 - // stores. - // + // Labels: User-supplied key-value pairs used to organize HL7v2 stores. // Label keys must be between 1 and 63 characters long, have a UTF-8 - // encoding - // of maximum 128 bytes, and must conform to the - // following PCRE regular expression: - // \p{Ll}\p{Lo}{0,62} - // - // Label values are optional, must be between 1 and 63 characters long, - // have - // a UTF-8 encoding of maximum 128 bytes, and must conform to - // the - // following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} - // - // No more than 64 labels can be associated with a given store. + // encoding of maximum 128 bytes, and must conform to the following PCRE + // regular expression: \p{Ll}\p{Lo}{0,62} Label values are optional, + // must be between 1 and 63 characters long, have a UTF-8 encoding of + // maximum 128 bytes, and must conform to the following PCRE regular + // expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} No more than 64 labels can be + // associated with a given store. Labels map[string]string `json:"labels,omitempty"` - // Name: Output only. Resource name of the HL7v2 store, of the - // form - // `projects/{project_id}/datasets/{dataset_id}/hl7V2Stores/{hl7v2_s - // tore_id}`. + // Name: Resource name of the HL7v2 store, of the form + // `projects/{project_id}/datasets/{dataset_id}/hl7V2Stores/{hl7v2_store_ + // id}`. Name string `json:"name,omitempty"` // NotificationConfigs: A list of notification configs. Each - // configuration uses a filter to - // determine whether to publish a message (both Ingest & Create) on - // the corresponding notification destination. Only the message name is - // sent - // as part of the notification. Supplied by the client. + // configuration uses a filter to determine whether to publish a message + // (both Ingest & Create) on the corresponding notification destination. + // Only the message name is sent as part of the notification. Supplied + // by the client. NotificationConfigs []*Hl7V2NotificationConfig `json:"notificationConfigs,omitempty"` // ParserConfig: The configuration for the parser. It determines how the - // server parses the - // messages. + // server parses the messages. ParserConfig *ParserConfig `json:"parserConfig,omitempty"` - // RejectDuplicateMessage: Determines whether duplicate messages should - // be rejected. A duplicate - // message is a message with the same raw bytes as a message that has - // already - // been ingested/created in this HL7v2 store. + // RejectDuplicateMessage: Determines whether to reject duplicate + // messages. A duplicate message is a message with the same raw bytes as + // a message that has already been ingested/created in this HL7v2 store. // The default value is false, meaning that the store accepts the - // duplicate - // messages and it also returns the same ACK message in - // the - // IngestMessageResponse as has been returned previously. Note that - // only - // one resource is created in the store. - // When this field is set to true, - // CreateMessage/IngestMessage - // requests with a duplicate message will be rejected by the store, - // and - // IngestMessageErrorDetail returns a NACK message upon rejection. + // duplicate messages and it also returns the same ACK message in the + // IngestMessageResponse as has been returned previously. Note that only + // one resource is created in the store. When this field is set to true, + // CreateMessage/IngestMessage requests with a duplicate message will be + // rejected by the store, and IngestMessageErrorDetail returns a NACK + // message upon rejection. RejectDuplicateMessage bool `json:"rejectDuplicateMessage,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2101,50 +1718,22 @@ func (s *Hl7V2Store) MarshalJSON() ([]byte, error) { } // HttpBody: Message that represents an arbitrary HTTP body. It should -// only be used for -// payload formats that can't be represented as JSON, such as raw binary -// or -// an HTML page. -// -// -// This message can be used both in streaming and non-streaming API -// methods in -// the request as well as the response. -// -// It can be used as a top-level request field, which is convenient if -// one -// wants to extract parameters from either the URL or HTTP template into -// the -// request fields and also want access to the raw HTTP body. -// -// Example: -// -// message GetResourceRequest { -// // A unique request id. -// string request_id = 1; -// -// // The raw HTTP body is bound to this field. -// google.api.HttpBody http_body = 2; -// } -// -// service ResourceService { -// rpc GetResource(GetResourceRequest) returns -// (google.api.HttpBody); -// rpc UpdateResource(google.api.HttpBody) returns -// (google.protobuf.Empty); -// } -// -// Example with streaming methods: -// -// service CaldavService { -// rpc GetCalendar(stream google.api.HttpBody) -// returns (stream google.api.HttpBody); -// rpc UpdateCalendar(stream google.api.HttpBody) -// returns (stream google.api.HttpBody); -// } -// -// Use of this type only changes how the request and response bodies -// are +// only be used for payload formats that can't be represented as JSON, +// such as raw binary or an HTML page. This message can be used both in +// streaming and non-streaming API methods in the request as well as the +// response. It can be used as a top-level request field, which is +// convenient if one wants to extract parameters from either the URL or +// HTTP template into the request fields and also want access to the raw +// HTTP body. Example: message GetResourceRequest { // A unique request +// id. string request_id = 1; // The raw HTTP body is bound to this +// field. google.api.HttpBody http_body = 2; } service ResourceService { +// rpc GetResource(GetResourceRequest) returns (google.api.HttpBody); +// rpc UpdateResource(google.api.HttpBody) returns +// (google.protobuf.Empty); } Example with streaming methods: service +// CaldavService { rpc GetCalendar(stream google.api.HttpBody) returns +// (stream google.api.HttpBody); rpc UpdateCalendar(stream +// google.api.HttpBody) returns (stream google.api.HttpBody); } Use of +// this type only changes how the request and response bodies are // handled, all other features will continue to work unchanged. type HttpBody struct { // ContentType: The HTTP Content-Type header value specifying the @@ -2155,8 +1744,7 @@ type HttpBody struct { Data string `json:"data,omitempty"` // Extensions: Application specific response metadata. Must be set in - // the first response - // for streaming APIs. + // the first response for streaming APIs. Extensions []googleapi.RawMessage `json:"extensions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2223,20 +1811,14 @@ func (s *ImageConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ImportDicomDataRequest: Imports data into the specified DICOM -// store. +// ImportDicomDataRequest: Imports data into the specified DICOM store. // Returns an error if any of the files to import are not DICOM files. -// This -// API accepts duplicate DICOM instances by ignoring the newly-pushed -// instance. -// It does not overwrite. +// This API accepts duplicate DICOM instances by ignoring the +// newly-pushed instance. It does not overwrite. type ImportDicomDataRequest struct { // GcsSource: Cloud Storage source data location and import - // configuration. - // - // The Cloud Storage location requires the - // `roles/storage.objectViewer` - // Cloud IAM role. + // configuration. The Cloud Storage location requires the + // `roles/storage.objectViewer` Cloud IAM role. GcsSource *GoogleCloudHealthcareV1DicomGcsSource `json:"gcsSource,omitempty"` // ForceSendFields is a list of field names (e.g. "GcsSource") to @@ -2270,21 +1852,17 @@ type ImportDicomDataResponse struct { // ImportResourcesRequest: Request to import resources. type ImportResourcesRequest struct { // ContentStructure: The content structure in the source location. If - // not specified, the server - // treats the input source files as BUNDLE. + // not specified, the server treats the input source files as BUNDLE. // // Possible values: // "CONTENT_STRUCTURE_UNSPECIFIED" - If the content structure is not - // specified, the default value `BUNDLE` - // will be used. + // specified, the default value `BUNDLE` is used. // "BUNDLE" - The source file contains one or more lines of - // newline-delimited JSON - // (ndjson). Each line is a bundle, which contains one or more - // resources. - // Set the bundle type to `history` to import resource versions. + // newline-delimited JSON (ndjson). Each line is a bundle that contains + // one or more resources. Set the bundle type to `history` to import + // resource versions. // "RESOURCE" - The source file contains one or more lines of - // newline-delimited JSON - // (ndjson). Each line is a single resource. + // newline-delimited JSON (ndjson). Each line is a single resource. // "BUNDLE_PRETTY" - The entire file is one JSON bundle. The JSON can // span multiple lines. // "RESOURCE_PRETTY" - The entire file is one JSON resource. The JSON @@ -2292,14 +1870,9 @@ type ImportResourcesRequest struct { ContentStructure string `json:"contentStructure,omitempty"` // GcsSource: Cloud Storage source data location and import - // configuration. - // - // The Cloud Storage location requires the - // `roles/storage.objectViewer` - // Cloud IAM role. - // - // Each Cloud Storage object should be a text file that contains the - // format + // configuration. The Healthcare Service Agent account requires the + // `roles/storage.objectAdmin` role on the Cloud Storage location. Each + // Cloud Storage object should be a text file that contains the format // specified in ContentStructure. GcsSource *GoogleCloudHealthcareV1FhirGcsSource `json:"gcsSource,omitempty"` @@ -2327,17 +1900,14 @@ func (s *ImportResourcesRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ImportResourcesResponse: Final response of importing resources. -// This structure will be included in the -// response to describe the detailed -// outcome. It will only be included when the operation finishes -// successfully. +// ImportResourcesResponse: Final response of importing resources. This +// structure is included in the response to describe the detailed +// outcome after the operation finishes successfully. type ImportResourcesResponse struct { } // InfoTypeTransformation: A transformation to apply to text that is -// identified as a specific -// info_type. +// identified as a specific info_type. type InfoTypeTransformation struct { // CharacterMaskConfig: Config for character mask. CharacterMaskConfig *CharacterMaskConfig `json:"characterMaskConfig,omitempty"` @@ -2349,8 +1919,7 @@ type InfoTypeTransformation struct { DateShiftConfig *DateShiftConfig `json:"dateShiftConfig,omitempty"` // InfoTypes: InfoTypes to apply this transformation to. If this is not - // specified, the - // transformation applies to any info_type. + // specified, the transformation applies to any info_type. InfoTypes []string `json:"infoTypes,omitempty"` // RedactConfig: Config for text redaction. @@ -2413,8 +1982,7 @@ func (s *IngestMessageRequest) MarshalJSON() ([]byte, error) { } // IngestMessageResponse: Acknowledges that a message has been ingested -// into the specified -// HL7v2 store. +// into the specified HL7v2 store. type IngestMessageResponse struct { // Hl7Ack: HL7v2 ACK message. Hl7Ack string `json:"hl7Ack,omitempty"` @@ -2455,8 +2023,7 @@ type ListDatasetsResponse struct { Datasets []*Dataset `json:"datasets,omitempty"` // NextPageToken: Token to retrieve the next page of results, or empty - // if there are no - // more results in the list. + // if there are no more results in the list. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2489,13 +2056,11 @@ func (s *ListDatasetsResponse) MarshalJSON() ([]byte, error) { // ListDicomStoresResponse: Lists the DICOM stores in the given dataset. type ListDicomStoresResponse struct { // DicomStores: The returned DICOM stores. Won't be more DICOM stores - // than the value of - // page_size in the request. + // than the value of page_size in the request. DicomStores []*DicomStore `json:"dicomStores,omitempty"` // NextPageToken: Token to retrieve the next page of results or empty if - // there are no more - // results in the list. + // there are no more results in the list. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2528,13 +2093,11 @@ func (s *ListDicomStoresResponse) MarshalJSON() ([]byte, error) { // ListFhirStoresResponse: Lists the FHIR stores in the given dataset. type ListFhirStoresResponse struct { // FhirStores: The returned FHIR stores. Won't be more FHIR stores than - // the value of - // page_size in the request. + // the value of page_size in the request. FhirStores []*FhirStore `json:"fhirStores,omitempty"` // NextPageToken: Token to retrieve the next page of results or empty if - // there are no more - // results in the list. + // there are no more results in the list. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2567,13 +2130,11 @@ func (s *ListFhirStoresResponse) MarshalJSON() ([]byte, error) { // ListHl7V2StoresResponse: Lists the HL7v2 stores in the given dataset. type ListHl7V2StoresResponse struct { // Hl7V2Stores: The returned HL7v2 stores. Won't be more HL7v2 stores - // than the value of - // page_size in the request. + // than the value of page_size in the request. Hl7V2Stores []*Hl7V2Store `json:"hl7V2Stores,omitempty"` // NextPageToken: Token to retrieve the next page of results or empty if - // there are no more - // results in the list. + // there are no more results in the list. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2603,19 +2164,52 @@ func (s *ListHl7V2StoresResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ListLocationsResponse: The response message for +// Locations.ListLocations. +type ListLocationsResponse struct { + // Locations: A list of locations that matches the specified filter in + // the request. + Locations []*Location `json:"locations,omitempty"` + + // NextPageToken: The standard List next-page token. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Locations") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Locations") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListLocationsResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListLocationsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ListMessagesResponse: Lists the messages in the specified HL7v2 // store. type ListMessagesResponse struct { // Hl7V2Messages: The returned Messages. Won't be more Messages than the - // value of - // page_size in the request. See - // view for - // populated fields. + // value of page_size in the request. See view for populated fields. Hl7V2Messages []*Message `json:"hl7V2Messages,omitempty"` // NextPageToken: Token to retrieve the next page of results or empty if - // there are no more - // results in the list. + // there are no more results in the list. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2682,10 +2276,60 @@ func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Message: A complete HL7v2 message. -// See http://www.hl7.org/implement/standards/index.cfm?ref=common for -// details -// on the standard. +// Location: A resource that represents Google Cloud Platform location. +type Location struct { + // DisplayName: The friendly name for this location, typically a nearby + // city name. For example, "Tokyo". + DisplayName string `json:"displayName,omitempty"` + + // Labels: Cross-service attributes for the location. For example + // {"cloud.googleapis.com/region": "us-east1"} + Labels map[string]string `json:"labels,omitempty"` + + // LocationId: The canonical id for this location. For example: + // "us-east1". + LocationId string `json:"locationId,omitempty"` + + // Metadata: Service-specific metadata. For example the available + // capacity at the given location. + Metadata googleapi.RawMessage `json:"metadata,omitempty"` + + // Name: Resource name for the location, which may vary between + // implementations. For example: + // "projects/example-project/locations/us-east1" + Name string `json:"name,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "DisplayName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DisplayName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Location) MarshalJSON() ([]byte, error) { + type NoMethod Location + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Message: A complete HL7v2 message. See [Introduction to HL7 +// Standards] +// (https://www.hl7.org/implement/standards/index.cfm?ref=common) for +// details on the standard. type Message struct { // CreateTime: Output only. The datetime when the message was created. // Set by the server. @@ -2694,40 +2338,29 @@ type Message struct { // Data: Raw message bytes. Data string `json:"data,omitempty"` - // Labels: User-supplied key-value pairs used to organize HL7v2 - // stores. - // + // Labels: User-supplied key-value pairs used to organize HL7v2 stores. // Label keys must be between 1 and 63 characters long, have a UTF-8 - // encoding - // of maximum 128 bytes, and must conform to the - // following PCRE regular expression: - // \p{Ll}\p{Lo}{0,62} - // - // Label values are optional, must be between 1 and 63 characters long, - // have - // a UTF-8 encoding of maximum 128 bytes, and must conform to - // the - // following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} - // - // No more than 64 labels can be associated with a given store. + // encoding of maximum 128 bytes, and must conform to the following PCRE + // regular expression: \p{Ll}\p{Lo}{0,62} Label values are optional, + // must be between 1 and 63 characters long, have a UTF-8 encoding of + // maximum 128 bytes, and must conform to the following PCRE regular + // expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} No more than 64 labels can be + // associated with a given store. Labels map[string]string `json:"labels,omitempty"` // MessageType: The message type for this message. MSH-9.1. MessageType string `json:"messageType,omitempty"` - // Name: Resource name of the Message, of the - // form - // `projects/{project_id}/datasets/{dataset_id}/hl7V2Stores/{hl7_v2_ - // store_id}/messages/{message_id}`. - // Assigned by the server. + // Name: Resource name of the Message, of the form + // `projects/{project_id}/datasets/{dataset_id}/hl7V2Stores/{hl7_v2_store + // _id}/messages/{message_id}`. Assigned by the server. Name string `json:"name,omitempty"` // ParsedData: Output only. The parsed version of the raw message data. ParsedData *ParsedData `json:"parsedData,omitempty"` // PatientIds: All patient IDs listed in the PID-2, PID-3, and PID-4 - // segments of this - // message. + // segments of this message. PatientIds []*PatientId `json:"patientIds,omitempty"` // SendFacility: The hospital that this message came from. MSH-4. @@ -2768,33 +2401,24 @@ func (s *Message) MarshalJSON() ([]byte, error) { // changes to a data store. type NotificationConfig struct { // PubsubTopic: The [Cloud - // Pub/Sub](https://cloud.google.com/pubsub/docs/) topic - // that - // notifications of changes are published on. Supplied by the - // client. - // PubsubMessage.Data contains the resource - // name. + // Pub/Sub](https://cloud.google.com/pubsub/docs/) topic that + // notifications of changes are published on. Supplied by the client. + // PubsubMessage.Data contains the resource name. // PubsubMessage.MessageId is the ID of this message. It is guaranteed - // to be - // unique within the topic. - // PubsubMessage.PublishTime is the time at which the message was - // published. - // Notifications are only sent if the topic is - // non-empty. - // [Topic + // to be unique within the topic. PubsubMessage.PublishTime is the time + // at which the message was published. Notifications are only sent if + // the topic is non-empty. [Topic // names](https://cloud.google.com/pubsub/docs/overview#names) must be - // scoped - // to a project. Cloud Healthcare API service account must have - // publisher - // permissions on the given Cloud Pub/Sub topic. Not having - // adequate - // permissions causes the calls that send notifications to fail. - // - // If a notification can't be published to Cloud Pub/Sub, errors are - // logged to - // Cloud Logging (see [Viewing - // logs](/healthcare/docs/how-tos/logging)). If the number of - // errors exceeds a certain rate, some aren't submitted. + // scoped to a project. Cloud Healthcare API service account must have + // publisher permissions on the given Cloud Pub/Sub topic. Not having + // adequate permissions causes the calls that send notifications to + // fail. If a notification can't be published to Cloud Pub/Sub, errors + // are logged to Cloud Logging (see [Viewing + // logs](/healthcare/docs/how-tos/logging)). If the number of errors + // exceeds a certain rate, some aren't submitted. Note that not all + // operations trigger notifications, see [Configuring Pub/Sub + // notifications](https://cloud.google.com/healthcare/docs/how-tos/pubsub + // ) for specific details. PubsubTopic string `json:"pubsubTopic,omitempty"` // ForceSendFields is a list of field names (e.g. "PubsubTopic") to @@ -2821,52 +2445,38 @@ func (s *NotificationConfig) MarshalJSON() ([]byte, error) { } // Operation: This resource represents a long-running operation that is -// the result of a -// network API call. +// the result of a network API call. type Operation struct { // Done: If the value is `false`, it means the operation is still in - // progress. - // If `true`, the operation is completed, and either `error` or - // `response` is - // available. + // progress. If `true`, the operation is completed, and either `error` + // or `response` is available. Done bool `json:"done,omitempty"` // Error: The error result of the operation in case of failure or // cancellation. Error *Status `json:"error,omitempty"` - // Metadata: Service-specific metadata associated with the operation. - // It typically - // contains progress information and common metadata such as create - // time. - // Some services might not provide such metadata. Any method that - // returns a - // long-running operation should document the metadata type, if any. + // Metadata: Service-specific metadata associated with the operation. It + // typically contains progress information and common metadata such as + // create time. Some services might not provide such metadata. Any + // method that returns a long-running operation should document the + // metadata type, if any. Metadata googleapi.RawMessage `json:"metadata,omitempty"` // Name: The server-assigned name, which is only unique within the same - // service that - // originally returns it. If you use the default HTTP mapping, - // the - // `name` should be a resource name ending with + // service that originally returns it. If you use the default HTTP + // mapping, the `name` should be a resource name ending with // `operations/{unique_id}`. Name string `json:"name,omitempty"` - // Response: The normal response of the operation in case of success. - // If the original - // method returns no data on success, such as `Delete`, the response - // is - // `google.protobuf.Empty`. If the original method is - // standard - // `Get`/`Create`/`Update`, the response should be the resource. For - // other - // methods, the response should have the type `XxxResponse`, where - // `Xxx` - // is the original method name. For example, if the original method - // name - // is `TakeSnapshot()`, the inferred response type - // is - // `TakeSnapshotResponse`. + // Response: The normal response of the operation in case of success. If + // the original method returns no data on success, such as `Delete`, the + // response is `google.protobuf.Empty`. If the original method is + // standard `Get`/`Create`/`Update`, the response should be the + // resource. For other methods, the response should have the type + // `XxxResponse`, where `Xxx` is the original method name. For example, + // if the original method name is `TakeSnapshot()`, the inferred + // response type is `TakeSnapshotResponse`. Response googleapi.RawMessage `json:"response,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2897,8 +2507,8 @@ func (s *Operation) MarshalJSON() ([]byte, error) { } // OperationMetadata: OperationMetadata provides information about the -// operation execution. -// Returned in the long-running operation's metadata field. +// operation execution. Returned in the long-running operation's +// metadata field. type OperationMetadata struct { // ApiMethodName: The name of the API method that initiated the // operation. @@ -2917,9 +2527,8 @@ type OperationMetadata struct { EndTime string `json:"endTime,omitempty"` // LogsUrl: A link to audit and error logs in the log viewer. Error logs - // are generated - // only by some operations, listed at - // [Viewing logs](/healthcare/docs/how-tos/logging). + // are generated only by some operations, listed at [Viewing + // logs](/healthcare/docs/how-tos/logging). LogsUrl string `json:"logsUrl,omitempty"` // ForceSendFields is a list of field names (e.g. "ApiMethodName") to @@ -2973,16 +2582,14 @@ func (s *ParsedData) MarshalJSON() ([]byte, error) { } // ParserConfig: The configuration for the parser. It determines how the -// server parses the -// messages. +// server parses the messages. type ParserConfig struct { // AllowNullHeader: Determines whether messages with no header are // allowed. AllowNullHeader bool `json:"allowNullHeader,omitempty"` // SegmentTerminator: Byte(s) to use as the segment terminator. If this - // is unset, '\r' is - // used as segment terminator. + // is unset, '\r' is used as segment terminator. SegmentTerminator string `json:"segmentTerminator,omitempty"` // ForceSendFields is a list of field names (e.g. "AllowNullHeader") to @@ -3041,154 +2648,77 @@ func (s *PatientId) MarshalJSON() ([]byte, error) { } // Policy: An Identity and Access Management (IAM) policy, which -// specifies access -// controls for Google Cloud resources. -// -// -// A `Policy` is a collection of `bindings`. A `binding` binds one or -// more -// `members` to a single `role`. Members can be user accounts, service -// accounts, +// specifies access controls for Google Cloud resources. A `Policy` is a +// collection of `bindings`. A `binding` binds one or more `members` to +// a single `role`. Members can be user accounts, service accounts, // Google groups, and domains (such as G Suite). A `role` is a named -// list of -// permissions; each `role` can be an IAM predefined role or a -// user-created -// custom role. -// -// For some types of Google Cloud resources, a `binding` can also -// specify a -// `condition`, which is a logical expression that allows access to a -// resource -// only if the expression evaluates to `true`. A condition can add -// constraints -// based on attributes of the request, the resource, or both. To learn -// which -// resources support conditions in their IAM policies, see the -// [IAM +// list of permissions; each `role` can be an IAM predefined role or a +// user-created custom role. For some types of Google Cloud resources, a +// `binding` can also specify a `condition`, which is a logical +// expression that allows access to a resource only if the expression +// evaluates to `true`. A condition can add constraints based on +// attributes of the request, the resource, or both. To learn which +// resources support conditions in their IAM policies, see the [IAM // documentation](https://cloud.google.com/iam/help/conditions/resource-p -// olicies). -// -// **JSON example:** -// -// { -// "bindings": [ -// { -// "role": "roles/resourcemanager.organizationAdmin", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" -// ] -// }, -// { -// "role": "roles/resourcemanager.organizationViewer", -// "members": [ -// "user:eve@example.com" -// ], -// "condition": { -// "title": "expirable access", -// "description": "Does not grant access after Sep 2020", -// "expression": "request.time < -// timestamp('2020-10-01T00:00:00.000Z')", -// } -// } -// ], -// "etag": "BwWWja0YfJA=", -// "version": 3 -// } -// -// **YAML example:** -// -// bindings: -// - members: -// - user:mike@example.com -// - group:admins@example.com -// - domain:google.com -// - serviceAccount:my-project-id@appspot.gserviceaccount.com -// role: roles/resourcemanager.organizationAdmin -// - members: -// - user:eve@example.com -// role: roles/resourcemanager.organizationViewer -// condition: -// title: expirable access -// description: Does not grant access after Sep 2020 -// expression: request.time < -// timestamp('2020-10-01T00:00:00.000Z') -// - etag: BwWWja0YfJA= -// - version: 3 -// -// For a description of IAM and its features, see the -// [IAM documentation](https://cloud.google.com/iam/docs/). +// olicies). **JSON example:** { "bindings": [ { "role": +// "roles/resourcemanager.organizationAdmin", "members": [ +// "user:mike@example.com", "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { +// "role": "roles/resourcemanager.organizationViewer", "members": [ +// "user:eve@example.com" ], "condition": { "title": "expirable access", +// "description": "Does not grant access after Sep 2020", "expression": +// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], +// "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - +// members: - user:mike@example.com - group:admins@example.com - +// domain:google.com - +// serviceAccount:my-project-id@appspot.gserviceaccount.com role: +// roles/resourcemanager.organizationAdmin - members: - +// user:eve@example.com role: roles/resourcemanager.organizationViewer +// condition: title: expirable access description: Does not grant access +// after Sep 2020 expression: request.time < +// timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: +// 3 For a description of IAM and its features, see the [IAM +// documentation](https://cloud.google.com/iam/docs/). type Policy struct { // AuditConfigs: Specifies cloud audit logging configuration for this // policy. AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` // Bindings: Associates a list of `members` to a `role`. Optionally, may - // specify a - // `condition` that determines how and when the `bindings` are applied. - // Each - // of the `bindings` must contain at least one member. + // specify a `condition` that determines how and when the `bindings` are + // applied. Each of the `bindings` must contain at least one member. Bindings []*Binding `json:"bindings,omitempty"` // Etag: `etag` is used for optimistic concurrency control as a way to - // help - // prevent simultaneous updates of a policy from overwriting each - // other. - // It is strongly suggested that systems make use of the `etag` in - // the - // read-modify-write cycle to perform policy updates in order to avoid - // race - // conditions: An `etag` is returned in the response to `getIamPolicy`, - // and - // systems are expected to put that etag in the request to - // `setIamPolicy` to - // ensure that their change will be applied to the same version of the - // policy. - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of + // help prevent simultaneous updates of a policy from overwriting each + // other. It is strongly suggested that systems make use of the `etag` + // in the read-modify-write cycle to perform policy updates in order to + // avoid race conditions: An `etag` is returned in the response to + // `getIamPolicy`, and systems are expected to put that etag in the + // request to `setIamPolicy` to ensure that their change will be applied + // to the same version of the policy. **Important:** If you use IAM + // Conditions, you must include the `etag` field whenever you call + // `setIamPolicy`. If you omit this field, then IAM allows you to + // overwrite a version `3` policy with a version `1` policy, and all of // the conditions in the version `3` policy are lost. Etag string `json:"etag,omitempty"` - // Version: Specifies the format of the policy. - // - // Valid values are `0`, `1`, and `3`. Requests that specify an invalid - // value - // are rejected. - // + // Version: Specifies the format of the policy. Valid values are `0`, + // `1`, and `3`. Requests that specify an invalid value are rejected. // Any operation that affects conditional role bindings must specify - // version - // `3`. This requirement applies to the following operations: - // - // * Getting a policy that includes a conditional role binding - // * Adding a conditional role binding to a policy - // * Changing a conditional role binding in a policy - // * Removing any role binding, with or without a condition, from a - // policy - // that includes conditions - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of - // the conditions in the version `3` policy are lost. - // - // If a policy does not include any conditions, operations on that - // policy may - // specify any valid version or leave the field unset. - // - // To learn which resources support conditions in their IAM policies, - // see the - // [IAM + // version `3`. This requirement applies to the following operations: * + // Getting a policy that includes a conditional role binding * Adding a + // conditional role binding to a policy * Changing a conditional role + // binding in a policy * Removing any role binding, with or without a + // condition, from a policy that includes conditions **Important:** If + // you use IAM Conditions, you must include the `etag` field whenever + // you call `setIamPolicy`. If you omit this field, then IAM allows you + // to overwrite a version `3` policy with a version `1` policy, and all + // of the conditions in the version `3` policy are lost. If a policy + // does not include any conditions, operations on that policy may + // specify any valid version or leave the field unset. To learn which + // resources support conditions in their IAM policies, see the [IAM // documentation](https://cloud.google.com/iam/help/conditions/resource-p // olicies). Version int64 `json:"version,omitempty"` @@ -3256,20 +2786,15 @@ func (s *ProgressCounter) MarshalJSON() ([]byte, error) { } // RedactConfig: Define how to redact sensitive values. Default -// behaviour is erase. -// For example, "My name is Jane." becomes "My name is ." +// behaviour is erase. For example, "My name is Jane." becomes "My name +// is ." type RedactConfig struct { } -// ReplaceWithInfoTypeConfig: When using -// the -// INSPECT_AND_TRANSFORM +// ReplaceWithInfoTypeConfig: When using the INSPECT_AND_TRANSFORM // action, each match is replaced with the name of the info_type. For -// example, -// "My name is Jane" becomes "My name is [PERSON_NAME]." -// The -// TRANSFORM -// action is equivalent to redacting. +// example, "My name is Jane" becomes "My name is [PERSON_NAME]." The +// TRANSFORM action is equivalent to redacting. type ReplaceWithInfoTypeConfig struct { } @@ -3302,40 +2827,29 @@ func (s *Resources) MarshalJSON() ([]byte, error) { } // SchemaConfig: Configuration for the FHIR BigQuery schema. Determines -// how the server -// generates the schema. +// how the server generates the schema. type SchemaConfig struct { // RecursiveStructureDepth: The depth for all recursive structures in - // the output analytics - // schema. For example, `concept` in the CodeSystem resource is a - // recursive - // structure; when the depth is 2, the CodeSystem table will have a - // column - // called `concept.concept` but not `concept.concept.concept`. If - // not - // specified or set to 0, the server will use the default value 2. - // The - // maximum depth allowed is 5. + // the output analytics schema. For example, `concept` in the CodeSystem + // resource is a recursive structure; when the depth is 2, the + // CodeSystem table will have a column called `concept.concept` but not + // `concept.concept.concept`. If not specified or set to 0, the server + // will use the default value 2. The maximum depth allowed is 5. RecursiveStructureDepth int64 `json:"recursiveStructureDepth,omitempty,string"` - // SchemaType: Specifies the output schema type. + // SchemaType: Specifies the output schema type. Schema type is + // required. // // Possible values: - // "SCHEMA_TYPE_UNSPECIFIED" - No schema type specified. - // "ANALYTICS" - Analytics schema defined by the FHIR community. - // See + // "SCHEMA_TYPE_UNSPECIFIED" - No schema type specified. This type is + // unsupported. + // "ANALYTICS" - Analytics schema defined by the FHIR community. See // https://github.com/FHIR/sql-on-fhir/blob/master/sql-on-fhir.md. - // - // BigQu - // ery only allows a maximum of 10,000 columns per table. Due to - // this - // limitation, the server will not generate schemas for fields of - // type - // `Resource`, which can hold any resource type. The affected fields - // are - // `Parameters.parameter.resource`, `Bundle.entry.resource`, - // and - // `Bundle.entry.response.outcome`. + // BigQuery only allows a maximum of 10,000 columns per table. Due to + // this limitation, the server will not generate schemas for fields of + // type `Resource`, which can hold any resource type. The affected + // fields are `Parameters.parameter.resource`, `Bundle.entry.resource`, + // and `Bundle.entry.response.outcome`. SchemaType string `json:"schemaType,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -3367,15 +2881,12 @@ func (s *SchemaConfig) MarshalJSON() ([]byte, error) { // specified FHIR store. type SearchResourcesRequest struct { // ResourceType: The FHIR resource type to search, such as Patient or - // Observation. For a - // complete list, see the FHIR Resource - // Index - // ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/resourcel - // ist.html), - // [STU3](http://hl7.org/implement/standards/fhir/STU3/resourc - // elist.html), - // [R4](http://hl7.org/implement/standards/fhir/R4/resourcel - // ist.html)). + // Observation. For a complete list, see the FHIR Resource Index + // ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/resourcelist.ht + // ml), + // [STU3](http://hl7.org/implement/standards/fhir/STU3/resourcelist.html) + // , + // [R4](http://hl7.org/implement/standards/fhir/R4/resourcelist.html)). ResourceType string `json:"resourceType,omitempty"` // ForceSendFields is a list of field names (e.g. "ResourceType") to @@ -3403,26 +2914,16 @@ func (s *SearchResourcesRequest) MarshalJSON() ([]byte, error) { // Segment: A segment in a structured format. type Segment struct { - // Fields: A mapping from the positional location to the value. - // The key string uses zero-based indexes separated by dots to - // identify - // Fields, components and sub-components. A bracket notation is also - // used to - // identify different instances of a repeated field. - // Regex for key: (\d+)(\[\d+\])?(.\d+)?(.\d+)? - // - // Examples of (key, value) pairs: - // - // * (0.1, "hemoglobin") denotes that the first component of Field 0 has - // the - // value "hemoglobin". - // - // * (1.1.2, "CBC") denotes that the second sub-component of the first - // component of Field 1 has the value "CBC". - // - // * (1[0].1, "HbA1c") denotes that the first component of the - // first Instance of Field 1, which is repeated, has the value - // "HbA1c". + // Fields: A mapping from the positional location to the value. The key + // string uses zero-based indexes separated by dots to identify Fields, + // components and sub-components. A bracket notation is also used to + // identify different instances of a repeated field. Regex for key: + // (\d+)(\[\d+\])?(.\d+)?(.\d+)? Examples of (key, value) pairs: * (0.1, + // "hemoglobin") denotes that the first component of Field 0 has the + // value "hemoglobin". * (1.1.2, "CBC") denotes that the second + // sub-component of the first component of Field 1 has the value "CBC". + // * (1[0].1, "HbA1c") denotes that the first component of the first + // Instance of Field 1, which is repeated, has the value "HbA1c". Fields map[string]string `json:"fields,omitempty"` // SegmentId: A string that indicates the type of segment. For example, @@ -3430,8 +2931,7 @@ type Segment struct { SegmentId string `json:"segmentId,omitempty"` // SetId: Set ID for segments that can be in a set. This can be empty if - // it's - // missing or isn't applicable. + // it's missing or isn't applicable. SetId string `json:"setId,omitempty"` // ForceSendFields is a list of field names (e.g. "Fields") to @@ -3460,20 +2960,15 @@ func (s *Segment) MarshalJSON() ([]byte, error) { // SetIamPolicyRequest: Request message for `SetIamPolicy` method. type SetIamPolicyRequest struct { // Policy: REQUIRED: The complete policy to be applied to the - // `resource`. The size of - // the policy is limited to a few 10s of KB. An empty policy is a - // valid policy but certain Cloud Platform services (such as - // Projects) - // might reject them. + // `resource`. The size of the policy is limited to a few 10s of KB. An + // empty policy is a valid policy but certain Cloud Platform services + // (such as Projects) might reject them. Policy *Policy `json:"policy,omitempty"` // UpdateMask: OPTIONAL: A FieldMask specifying which fields of the - // policy to modify. Only - // the fields in the mask will be modified. If no mask is provided, - // the - // following default mask is used: - // - // `paths: "bindings, etag" + // policy to modify. Only the fields in the mask will be modified. If no + // mask is provided, the following default mask is used: `paths: + // "bindings, etag" UpdateMask string `json:"updateMask,omitempty"` // ForceSendFields is a list of field names (e.g. "Policy") to @@ -3500,32 +2995,24 @@ func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { } // Status: The `Status` type defines a logical error model that is -// suitable for -// different programming environments, including REST APIs and RPC APIs. -// It is -// used by [gRPC](https://github.com/grpc). Each `Status` message -// contains -// three pieces of data: error code, error message, and error -// details. -// -// You can find out more about this error model and how to work with it -// in the -// [API Design Guide](https://cloud.google.com/apis/design/errors). +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each +// `Status` message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the [API Design +// Guide](https://cloud.google.com/apis/design/errors). type Status struct { // Code: The status code, which should be an enum value of // google.rpc.Code. Code int64 `json:"code,omitempty"` - // Details: A list of messages that carry the error details. There is a - // common set of - // message types for APIs to use. + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. Details []googleapi.RawMessage `json:"details,omitempty"` // Message: A developer-facing error message, which should be in - // English. Any - // user-facing error message should be localized and sent in - // the - // google.rpc.Status.details field, or localized by the client. + // English. Any user-facing error message should be localized and sent + // in the google.rpc.Status.details field, or localized by the client. Message string `json:"message,omitempty"` // ForceSendFields is a list of field names (e.g. "Code") to @@ -3551,81 +3038,46 @@ func (s *Status) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// StreamConfig: This structure contains configuration for streaming -// FHIR export. +// StreamConfig: Contains configuration for streaming FHIR export. type StreamConfig struct { // BigqueryDestination: The destination BigQuery structure that contains - // both the dataset - // location and corresponding schema config. - // - // The output is organized in one table per resource type. The - // server - // reuses the existing tables (if any) that are named after the - // resource - // types, e.g. "Patient", "Observation". When there is no existing - // table - // for a given resource type, the server attempts to create one. - // - // When a table schema doesn't align with the schema config, - // either - // because of existing incompatible schema or out of band - // incompatible - // modification, the server does not stream in new data. - // - // BigQuery imposes a 1 MB limit on streaming insert row size, - // therefore - // any resource mutation that generates more than 1 MB of BigQuery - // data - // will not be streamed. - // - // One resolution in this case is to delete the incompatible - // table and let the server recreate one, though the newly created - // table - // only contains data after the table recreation. - // - // Results are appended to the corresponding BigQuery tables. - // Different - // versions of the same resource are distinguishable by the - // meta.versionId - // and meta.lastUpdated columns. The operation (CREATE/UPDATE/DELETE) - // that - // results in the new version is recorded in the meta.tag. - // - // The tables contain all historical resource versions since streaming - // was - // enabled. For query convenience, the server also creates one view - // per - // table of the same name containing only the current resource - // version. - // - // The streamed data in the BigQuery dataset is not guaranteed to - // be + // both the dataset location and corresponding schema config. The output + // is organized in one table per resource type. The server reuses the + // existing tables (if any) that are named after the resource types. For + // example, "Patient", "Observation". When there is no existing table + // for a given resource type, the server attempts to create one. When a + // table schema doesn't align with the schema config, either because of + // existing incompatible schema or out of band incompatible + // modification, the server does not stream in new data. BigQuery + // imposes a 1 MB limit on streaming insert row size, therefore any + // resource mutation that generates more than 1 MB of BigQuery data is + // not streamed. One resolution in this case is to delete the + // incompatible table and let the server recreate one, though the newly + // created table only contains data after the table recreation. Results + // are appended to the corresponding BigQuery tables. Different versions + // of the same resource are distinguishable by the meta.versionId and + // meta.lastUpdated columns. The operation (CREATE/UPDATE/DELETE) that + // results in the new version is recorded in the meta.tag. The tables + // contain all historical resource versions since streaming was enabled. + // For query convenience, the server also creates one view per table of + // the same name containing only the current resource version. The + // streamed data in the BigQuery dataset is not guaranteed to be // completely unique. The combination of the id and meta.versionId - // columns - // should ideally identify a single unique row. But in rare - // cases, - // duplicates may exist. At query time, users may use the SQL - // select - // statement to keep only one of the duplicate rows given an id - // and - // meta.versionId pair. Alternatively, the server created view - // mentioned - // above also filters out duplicates. - // - // If a resource mutation cannot be streamed to BigQuery, errors will - // be - // logged to Cloud Logging (see - // [Viewing logs](/healthcare/docs/how-tos/logging)). + // columns should ideally identify a single unique row. But in rare + // cases, duplicates may exist. At query time, users may use the SQL + // select statement to keep only one of the duplicate rows given an id + // and meta.versionId pair. Alternatively, the server created view + // mentioned above also filters out duplicates. If a resource mutation + // cannot be streamed to BigQuery, errors are logged to Cloud Logging. + // For more information, see [Viewing error logs in Cloud + // Logging](/healthcare/docs/how-tos/logging)). BigqueryDestination *GoogleCloudHealthcareV1FhirBigQueryDestination `json:"bigqueryDestination,omitempty"` // ResourceTypes: Supply a FHIR resource type (such as "Patient" or - // "Observation"). - // See https://www.hl7.org/fhir/valueset-resource-types.html for a list - // of - // all FHIR resource types. - // The server treats an empty list as an intent to stream all - // the - // supported resource types in this FHIR store. + // "Observation"). See + // https://www.hl7.org/fhir/valueset-resource-types.html for a list of + // all FHIR resource types. The server treats an empty list as an intent + // to stream all the supported resource types in this FHIR store. ResourceTypes []string `json:"resourceTypes,omitempty"` // ForceSendFields is a list of field names (e.g. "BigqueryDestination") @@ -3655,13 +3107,8 @@ func (s *StreamConfig) MarshalJSON() ([]byte, error) { // TagFilterList: List of tags to be filtered. type TagFilterList struct { // Tags: Tags to be filtered. Tags must be DICOM Data Elements, File - // Meta - // Elements, or Directory Structuring Elements, as defined - // at: - // http://dicom.nema.org/medical/dicom/current/output/html/part06.htm - // l#table_6-1,. - // They may be provided by "Keyword" or "Tag". For example - // "PatientID", + // Meta Elements, or Directory Structuring Elements, as defined at: + // http://dicom.nema.org/medical/dicom/current/output/html/part06.html#table_6-1,. They may be provided by "Keyword" or "Tag". For example "PatientID", // "00100010". Tags []string `json:"tags,omitempty"` @@ -3692,11 +3139,8 @@ func (s *TagFilterList) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsRequest struct { // Permissions: The set of permissions to check for the `resource`. - // Permissions with - // wildcards (such as '*' or 'storage.*') are not allowed. For - // more - // information see - // [IAM + // Permissions with wildcards (such as '*' or 'storage.*') are not + // allowed. For more information see [IAM // Overview](https://cloud.google.com/iam/docs/overview#permissions). Permissions []string `json:"permissions,omitempty"` @@ -3727,8 +3171,7 @@ func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsResponse struct { // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is - // allowed. + // the caller is allowed. Permissions []string `json:"permissions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -3758,32 +3201,379 @@ func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TextConfig struct { - // Transformations: The transformations to apply to the detected data. - Transformations []*InfoTypeTransformation `json:"transformations,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Transformations") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` +type TextConfig struct { + // Transformations: The transformations to apply to the detected data. + Transformations []*InfoTypeTransformation `json:"transformations,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Transformations") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Transformations") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TextConfig) MarshalJSON() ([]byte, error) { + type NoMethod TextConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "healthcare.projects.locations.get": + +type ProjectsLocationsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets information about a location. +func (r *ProjectsLocationsService) Get(name string) *ProjectsLocationsGetCall { + c := &ProjectsLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsGetCall) Context(ctx context.Context) *ProjectsLocationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "healthcare.projects.locations.get" call. +// Exactly one of *Location or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Location.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsGetCall) Do(opts ...googleapi.CallOption) (*Location, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Location{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets information about a location.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}", + // "httpMethod": "GET", + // "id": "healthcare.projects.locations.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Resource name for the location.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Location" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "healthcare.projects.locations.list": + +type ProjectsLocationsListCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists information about the supported locations for this +// service. +func (r *ProjectsLocationsService) List(name string) *ProjectsLocationsListCall { + c := &ProjectsLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Filter sets the optional parameter "filter": The standard list +// filter. +func (c *ProjectsLocationsListCall) Filter(filter string) *ProjectsLocationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// PageSize sets the optional parameter "pageSize": The standard list +// page size. +func (c *ProjectsLocationsListCall) PageSize(pageSize int64) *ProjectsLocationsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The standard list +// page token. +func (c *ProjectsLocationsListCall) PageToken(pageToken string) *ProjectsLocationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsListCall) Context(ctx context.Context) *ProjectsLocationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}/locations") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "healthcare.projects.locations.list" call. +// Exactly one of *ListLocationsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListLocationsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsLocationsListCall) Do(opts ...googleapi.CallOption) (*ListLocationsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListLocationsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists information about the supported locations for this service.", + // "flatPath": "v1/projects/{projectsId}/locations", + // "httpMethod": "GET", + // "id": "healthcare.projects.locations.list", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "filter": { + // "description": "The standard list filter.", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "The resource that owns the locations collection, if applicable.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "pageSize": { + // "description": "The standard list page size.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The standard list page token.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/{+name}/locations", + // "response": { + // "$ref": "ListLocationsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } - // NullFields is a list of field names (e.g. "Transformations") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` } -func (s *TextConfig) MarshalJSON() ([]byte, error) { - type NoMethod TextConfig - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsListCall) Pages(ctx context.Context, f func(*ListLocationsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } // method id "healthcare.projects.locations.datasets.create": @@ -3798,14 +3588,9 @@ type ProjectsLocationsDatasetsCreateCall struct { } // Create: Creates a new health dataset. Results are returned through -// the -// Operation interface which returns either an -// `Operation.response` which contains a Dataset or -// `Operation.error`. The metadata -// field type is OperationMetadata. -// A Google Cloud Platform project can contain up to 500 datasets across -// all -// regions. +// the Operation interface which returns either an `Operation.response` +// which contains a Dataset or `Operation.error`. The metadata field +// type is OperationMetadata. func (r *ProjectsLocationsDatasetsService) Create(parent string, dataset *Dataset) *ProjectsLocationsDatasetsCreateCall { c := &ProjectsLocationsDatasetsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -3814,9 +3599,8 @@ func (r *ProjectsLocationsDatasetsService) Create(parent string, dataset *Datase } // DatasetId sets the optional parameter "datasetId": The ID of the -// dataset that is being created. -// The string must match the following regex: -// `[\p{L}\p{N}_\-\.]{1,256}`. +// dataset that is being created. The string must match the following +// regex: `[\p{L}\p{N}_\-\.]{1,256}`. func (c *ProjectsLocationsDatasetsCreateCall) DatasetId(datasetId string) *ProjectsLocationsDatasetsCreateCall { c.urlParams_.Set("datasetId", datasetId) return c @@ -3849,7 +3633,7 @@ func (c *ProjectsLocationsDatasetsCreateCall) Header() http.Header { func (c *ProjectsLocationsDatasetsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3913,7 +3697,7 @@ func (c *ProjectsLocationsDatasetsCreateCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Creates a new health dataset. Results are returned through the\nOperation interface which returns either an\n`Operation.response` which contains a Dataset or\n`Operation.error`. The metadata\nfield type is OperationMetadata.\nA Google Cloud Platform project can contain up to 500 datasets across all\nregions.", + // "description": "Creates a new health dataset. Results are returned through the Operation interface which returns either an `Operation.response` which contains a Dataset or `Operation.error`. The metadata field type is OperationMetadata.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.create", @@ -3922,12 +3706,12 @@ func (c *ProjectsLocationsDatasetsCreateCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "datasetId": { - // "description": "The ID of the dataset that is being created.\nThe string must match the following regex: `[\\p{L}\\p{N}_\\-\\.]{1,256}`.", + // "description": "The ID of the dataset that is being created. The string must match the following regex: `[\\p{L}\\p{N}_\\-\\.]{1,256}`.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "The name of the project where the server creates the dataset. For\nexample, `projects/{project_id}/locations/{location_id}`.", + // "description": "The name of the project where the server creates the dataset. For example, `projects/{project_id}/locations/{location_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -3960,23 +3744,15 @@ type ProjectsLocationsDatasetsDeidentifyCall struct { } // Deidentify: Creates a new dataset containing de-identified data from -// the source -// dataset. The metadata field type -// is OperationMetadata. -// If the request is successful, the -// response field type is -// DeidentifySummary. -// If errors occur, error is set. -// The LRO result may still be successful if de-identification fails for -// some -// DICOM instances. The new de-identified dataset will not contain -// these -// failed resources. Failed resource totals are tracked -// in -// Operation.metadata. -// Error details are also logged to Cloud Logging. For more -// information, -// see [Viewing logs](/healthcare/docs/how-tos/logging). +// the source dataset. The metadata field type is OperationMetadata. If +// the request is successful, the response field type is +// DeidentifySummary. If errors occur, error is set. The LRO result may +// still be successful if de-identification fails for some DICOM +// instances. The new de-identified dataset will not contain these +// failed resources. Failed resource totals are tracked in +// Operation.metadata. Error details are also logged to Cloud Logging. +// For more information, see [Viewing +// logs](/healthcare/docs/how-tos/logging). func (r *ProjectsLocationsDatasetsService) Deidentify(sourceDataset string, deidentifydatasetrequest *DeidentifyDatasetRequest) *ProjectsLocationsDatasetsDeidentifyCall { c := &ProjectsLocationsDatasetsDeidentifyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sourceDataset = sourceDataset @@ -4011,7 +3787,7 @@ func (c *ProjectsLocationsDatasetsDeidentifyCall) Header() http.Header { func (c *ProjectsLocationsDatasetsDeidentifyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4075,7 +3851,7 @@ func (c *ProjectsLocationsDatasetsDeidentifyCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Creates a new dataset containing de-identified data from the source\ndataset. The metadata field type\nis OperationMetadata.\nIf the request is successful, the\nresponse field type is\nDeidentifySummary.\nIf errors occur, error is set.\nThe LRO result may still be successful if de-identification fails for some\nDICOM instances. The new de-identified dataset will not contain these\nfailed resources. Failed resource totals are tracked in\nOperation.metadata.\nError details are also logged to Cloud Logging. For more information,\nsee [Viewing logs](/healthcare/docs/how-tos/logging).", + // "description": "Creates a new dataset containing de-identified data from the source dataset. The metadata field type is OperationMetadata. If the request is successful, the response field type is DeidentifySummary. If errors occur, error is set. The LRO result may still be successful if de-identification fails for some DICOM instances. The new de-identified dataset will not contain these failed resources. Failed resource totals are tracked in Operation.metadata. Error details are also logged to Cloud Logging. For more information, see [Viewing logs](/healthcare/docs/how-tos/logging).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}:deidentify", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.deidentify", @@ -4084,7 +3860,7 @@ func (c *ProjectsLocationsDatasetsDeidentifyCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "sourceDataset": { - // "description": "Source dataset resource name. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`.", + // "description": "Source dataset resource name. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+$", // "required": true, @@ -4116,10 +3892,8 @@ type ProjectsLocationsDatasetsDeleteCall struct { } // Delete: Deletes the specified health dataset and all data contained -// in the dataset. -// Deleting a dataset does not affect the sources from which the dataset -// was -// imported (if any). +// in the dataset. Deleting a dataset does not affect the sources from +// which the dataset was imported (if any). func (r *ProjectsLocationsDatasetsService) Delete(name string) *ProjectsLocationsDatasetsDeleteCall { c := &ProjectsLocationsDatasetsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4153,7 +3927,7 @@ func (c *ProjectsLocationsDatasetsDeleteCall) Header() http.Header { func (c *ProjectsLocationsDatasetsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4212,7 +3986,7 @@ func (c *ProjectsLocationsDatasetsDeleteCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Deletes the specified health dataset and all data contained in the dataset.\nDeleting a dataset does not affect the sources from which the dataset was\nimported (if any).", + // "description": "Deletes the specified health dataset and all data contained in the dataset. Deleting a dataset does not affect the sources from which the dataset was imported (if any).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}", // "httpMethod": "DELETE", // "id": "healthcare.projects.locations.datasets.delete", @@ -4221,7 +3995,7 @@ func (c *ProjectsLocationsDatasetsDeleteCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "name": { - // "description": "The name of the dataset to delete. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`.", + // "description": "The name of the dataset to delete. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+$", // "required": true, @@ -4294,7 +4068,7 @@ func (c *ProjectsLocationsDatasetsGetCall) Header() http.Header { func (c *ProjectsLocationsDatasetsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4365,7 +4139,7 @@ func (c *ProjectsLocationsDatasetsGetCall) Do(opts ...googleapi.CallOption) (*Da // ], // "parameters": { // "name": { - // "description": "The name of the dataset to read. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`.", + // "description": "The name of the dataset to read. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+$", // "required": true, @@ -4394,9 +4168,8 @@ type ProjectsLocationsDatasetsGetIamPolicyCall struct { header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. -// Returns an empty policy if the resource exists and does not have a -// policy +// GetIamPolicy: Gets the access control policy for a resource. Returns +// an empty policy if the resource exists and does not have a policy // set. func (r *ProjectsLocationsDatasetsService) GetIamPolicy(resource string) *ProjectsLocationsDatasetsGetIamPolicyCall { c := &ProjectsLocationsDatasetsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -4406,24 +4179,14 @@ func (r *ProjectsLocationsDatasetsService) GetIamPolicy(resource string) *Projec // OptionsRequestedPolicyVersion sets the optional parameter // "options.requestedPolicyVersion": The policy format version to be -// returned. -// -// Valid values are 0, 1, and 3. Requests specifying an invalid value -// will be -// rejected. -// -// Requests for policies with any conditional bindings must specify -// version 3. -// Policies without any conditional bindings may specify any valid value -// or -// leave the field unset. -// -// To learn which resources support conditions in their IAM policies, -// see -// the -// [IAM -// documentation](https://cloud.google.com/iam/help/conditions/r -// esource-policies). +// returned. Valid values are 0, 1, and 3. Requests specifying an +// invalid value will be rejected. Requests for policies with any +// conditional bindings must specify version 3. Policies without any +// conditional bindings may specify any valid value or leave the field +// unset. To learn which resources support conditions in their IAM +// policies, see the [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-p +// olicies). func (c *ProjectsLocationsDatasetsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsLocationsDatasetsGetIamPolicyCall { c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c @@ -4466,7 +4229,7 @@ func (c *ProjectsLocationsDatasetsGetIamPolicyCall) Header() http.Header { func (c *ProjectsLocationsDatasetsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4528,7 +4291,7 @@ func (c *ProjectsLocationsDatasetsGetIamPolicyCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + // "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}:getIamPolicy", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.getIamPolicy", @@ -4537,13 +4300,13 @@ func (c *ProjectsLocationsDatasetsGetIamPolicyCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "options.requestedPolicyVersion": { - // "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + // "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", // "format": "int32", // "location": "query", // "type": "integer" // }, // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+$", // "required": true, @@ -4580,8 +4343,8 @@ func (r *ProjectsLocationsDatasetsService) List(parent string) *ProjectsLocation } // PageSize sets the optional parameter "pageSize": The maximum number -// of items to return. Capped to 100 if not specified. -// May not be larger than 1000. +// of items to return. Capped to 100 if not specified. May not be larger +// than 1000. func (c *ProjectsLocationsDatasetsListCall) PageSize(pageSize int64) *ProjectsLocationsDatasetsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c @@ -4631,7 +4394,7 @@ func (c *ProjectsLocationsDatasetsListCall) Header() http.Header { func (c *ProjectsLocationsDatasetsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4702,7 +4465,7 @@ func (c *ProjectsLocationsDatasetsListCall) Do(opts ...googleapi.CallOption) (*L // ], // "parameters": { // "pageSize": { - // "description": "The maximum number of items to return. Capped to 100 if not specified.\nMay not be larger than 1000.", + // "description": "The maximum number of items to return. Capped to 100 if not specified. May not be larger than 1000.", // "format": "int32", // "location": "query", // "type": "integer" @@ -4713,7 +4476,7 @@ func (c *ProjectsLocationsDatasetsListCall) Do(opts ...googleapi.CallOption) (*L // "type": "string" // }, // "parent": { - // "description": "The name of the project whose datasets should be listed.\nFor example, `projects/{project_id}/locations/{location_id}`.", + // "description": "The name of the project whose datasets should be listed. For example, `projects/{project_id}/locations/{location_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -4772,11 +4535,8 @@ func (r *ProjectsLocationsDatasetsService) Patch(name string, dataset *Dataset) } // UpdateMask sets the optional parameter "updateMask": The update mask -// applies to the resource. For the `FieldMask` -// definition, -// see -// https://developers.google.com/protocol-buffers/docs/re -// ference/google.protobuf#fieldmask +// applies to the resource. For the `FieldMask` definition, see +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask func (c *ProjectsLocationsDatasetsPatchCall) UpdateMask(updateMask string) *ProjectsLocationsDatasetsPatchCall { c.urlParams_.Set("updateMask", updateMask) return c @@ -4809,7 +4569,7 @@ func (c *ProjectsLocationsDatasetsPatchCall) Header() http.Header { func (c *ProjectsLocationsDatasetsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4882,14 +4642,14 @@ func (c *ProjectsLocationsDatasetsPatchCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "name": { - // "description": "Output only. Resource name of the dataset, of the form\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`.", + // "description": "Resource name of the dataset, of the form `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { - // "description": "The update mask applies to the resource. For the `FieldMask` definition,\nsee\nhttps://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask", + // "description": "The update mask applies to the resource. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -4921,11 +4681,8 @@ type ProjectsLocationsDatasetsSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any -// existing policy. -// -// Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` -// errors. +// resource. Replaces any existing policy. Can return `NOT_FOUND`, +// `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. func (r *ProjectsLocationsDatasetsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsDatasetsSetIamPolicyCall { c := &ProjectsLocationsDatasetsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -4960,7 +4717,7 @@ func (c *ProjectsLocationsDatasetsSetIamPolicyCall) Header() http.Header { func (c *ProjectsLocationsDatasetsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5024,7 +4781,7 @@ func (c *ProjectsLocationsDatasetsSetIamPolicyCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}:setIamPolicy", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.setIamPolicy", @@ -5033,7 +4790,7 @@ func (c *ProjectsLocationsDatasetsSetIamPolicyCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+$", // "required": true, @@ -5066,16 +4823,11 @@ type ProjectsLocationsDatasetsTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a `NOT_FOUND` error. -// -// Note: This operation is designed to be used for building -// permission-aware -// UIs and command-line tools, not for authorization checking. This -// operation -// may "fail open" without warning. +// specified resource. If the resource does not exist, this will return +// an empty set of permissions, not a `NOT_FOUND` error. Note: This +// operation is designed to be used for building permission-aware UIs +// and command-line tools, not for authorization checking. This +// operation may "fail open" without warning. func (r *ProjectsLocationsDatasetsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsDatasetsTestIamPermissionsCall { c := &ProjectsLocationsDatasetsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -5110,7 +4862,7 @@ func (c *ProjectsLocationsDatasetsTestIamPermissionsCall) Header() http.Header { func (c *ProjectsLocationsDatasetsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5174,7 +4926,7 @@ func (c *ProjectsLocationsDatasetsTestIamPermissionsCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + // "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}:testIamPermissions", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.testIamPermissions", @@ -5183,7 +4935,7 @@ func (c *ProjectsLocationsDatasetsTestIamPermissionsCall) Do(opts ...googleapi.C // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+$", // "required": true, @@ -5224,8 +4976,8 @@ func (r *ProjectsLocationsDatasetsDicomStoresService) Create(parent string, dico } // DicomStoreId sets the optional parameter "dicomStoreId": The ID of -// the DICOM store that is being created. -// Any string value up to 256 characters in length. +// the DICOM store that is being created. Any string value up to 256 +// characters in length. func (c *ProjectsLocationsDatasetsDicomStoresCreateCall) DicomStoreId(dicomStoreId string) *ProjectsLocationsDatasetsDicomStoresCreateCall { c.urlParams_.Set("dicomStoreId", dicomStoreId) return c @@ -5258,7 +5010,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresCreateCall) Header() http.Header { func (c *ProjectsLocationsDatasetsDicomStoresCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5331,7 +5083,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresCreateCall) Do(opts ...googleapi.Ca // ], // "parameters": { // "dicomStoreId": { - // "description": "The ID of the DICOM store that is being created.\nAny string value up to 256 characters in length.", + // "description": "The ID of the DICOM store that is being created. Any string value up to 256 characters in length.", // "location": "query", // "type": "string" // }, @@ -5369,21 +5121,14 @@ type ProjectsLocationsDatasetsDicomStoresDeidentifyCall struct { } // Deidentify: De-identifies data from the source store and writes it to -// the destination -// store. The metadata field type -// is OperationMetadata. -// If the request is successful, the -// response field type is -// DeidentifyDicomStoreSummary. If errors occur, -// error is set. -// The LRO result may still be successful if de-identification fails for -// some -// DICOM instances. The output DICOM store will not contain -// these failed resources. Failed resource totals are tracked -// in -// Operation.metadata. -// Error details are also logged to Cloud Logging -// (see [Viewing logs](/healthcare/docs/how-tos/logging)). +// the destination store. The metadata field type is OperationMetadata. +// If the request is successful, the response field type is +// DeidentifyDicomStoreSummary. If errors occur, error is set. The LRO +// result may still be successful if de-identification fails for some +// DICOM instances. The output DICOM store will not contain these failed +// resources. Failed resource totals are tracked in Operation.metadata. +// Error details are also logged to Cloud Logging (see [Viewing +// logs](/healthcare/docs/how-tos/logging)). func (r *ProjectsLocationsDatasetsDicomStoresService) Deidentify(sourceStore string, deidentifydicomstorerequest *DeidentifyDicomStoreRequest) *ProjectsLocationsDatasetsDicomStoresDeidentifyCall { c := &ProjectsLocationsDatasetsDicomStoresDeidentifyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sourceStore = sourceStore @@ -5418,7 +5163,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresDeidentifyCall) Header() http.Heade func (c *ProjectsLocationsDatasetsDicomStoresDeidentifyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5482,7 +5227,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresDeidentifyCall) Do(opts ...googleap } return ret, nil // { - // "description": "De-identifies data from the source store and writes it to the destination\nstore. The metadata field type\nis OperationMetadata.\nIf the request is successful, the\nresponse field type is\nDeidentifyDicomStoreSummary. If errors occur,\nerror is set.\nThe LRO result may still be successful if de-identification fails for some\nDICOM instances. The output DICOM store will not contain\nthese failed resources. Failed resource totals are tracked in\nOperation.metadata.\nError details are also logged to Cloud Logging\n(see [Viewing logs](/healthcare/docs/how-tos/logging)).", + // "description": "De-identifies data from the source store and writes it to the destination store. The metadata field type is OperationMetadata. If the request is successful, the response field type is DeidentifyDicomStoreSummary. If errors occur, error is set. The LRO result may still be successful if de-identification fails for some DICOM instances. The output DICOM store will not contain these failed resources. Failed resource totals are tracked in Operation.metadata. Error details are also logged to Cloud Logging (see [Viewing logs](/healthcare/docs/how-tos/logging)).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}:deidentify", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.dicomStores.deidentify", @@ -5491,7 +5236,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresDeidentifyCall) Do(opts ...googleap // ], // "parameters": { // "sourceStore": { - // "description": "Source DICOM store resource name. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + // "description": "Source DICOM store resource name. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, @@ -5523,8 +5268,7 @@ type ProjectsLocationsDatasetsDicomStoresDeleteCall struct { } // Delete: Deletes the specified DICOM store and removes all images that -// are contained -// within it. +// are contained within it. func (r *ProjectsLocationsDatasetsDicomStoresService) Delete(name string) *ProjectsLocationsDatasetsDicomStoresDeleteCall { c := &ProjectsLocationsDatasetsDicomStoresDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5558,7 +5302,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresDeleteCall) Header() http.Header { func (c *ProjectsLocationsDatasetsDicomStoresDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5617,7 +5361,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresDeleteCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Deletes the specified DICOM store and removes all images that are contained\nwithin it.", + // "description": "Deletes the specified DICOM store and removes all images that are contained within it.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}", // "httpMethod": "DELETE", // "id": "healthcare.projects.locations.datasets.dicomStores.delete", @@ -5656,12 +5400,9 @@ type ProjectsLocationsDatasetsDicomStoresExportCall struct { } // Export: Exports data to the specified destination by copying it from -// the DICOM -// store. -// Errors are also logged to Cloud Logging. For more information, -// see [Viewing logs](/healthcare/docs/how-tos/logging). -// The metadata field type is -// OperationMetadata. +// the DICOM store. Errors are also logged to Cloud Logging. For more +// information, see [Viewing logs](/healthcare/docs/how-tos/logging). +// The metadata field type is OperationMetadata. func (r *ProjectsLocationsDatasetsDicomStoresService) Export(name string, exportdicomdatarequest *ExportDicomDataRequest) *ProjectsLocationsDatasetsDicomStoresExportCall { c := &ProjectsLocationsDatasetsDicomStoresExportCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5696,7 +5437,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresExportCall) Header() http.Header { func (c *ProjectsLocationsDatasetsDicomStoresExportCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5760,7 +5501,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresExportCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Exports data to the specified destination by copying it from the DICOM\nstore.\nErrors are also logged to Cloud Logging. For more information,\nsee [Viewing logs](/healthcare/docs/how-tos/logging).\nThe metadata field type is\nOperationMetadata.", + // "description": "Exports data to the specified destination by copying it from the DICOM store. Errors are also logged to Cloud Logging. For more information, see [Viewing logs](/healthcare/docs/how-tos/logging). The metadata field type is OperationMetadata.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}:export", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.dicomStores.export", @@ -5769,7 +5510,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresExportCall) Do(opts ...googleapi.Ca // ], // "parameters": { // "name": { - // "description": "The DICOM store resource name from which to export the data. For\nexample,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + // "description": "The DICOM store resource name from which to export the data. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, @@ -5845,7 +5586,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresGetCall) Header() http.Header { func (c *ProjectsLocationsDatasetsDicomStoresGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5945,9 +5686,8 @@ type ProjectsLocationsDatasetsDicomStoresGetIamPolicyCall struct { header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. -// Returns an empty policy if the resource exists and does not have a -// policy +// GetIamPolicy: Gets the access control policy for a resource. Returns +// an empty policy if the resource exists and does not have a policy // set. func (r *ProjectsLocationsDatasetsDicomStoresService) GetIamPolicy(resource string) *ProjectsLocationsDatasetsDicomStoresGetIamPolicyCall { c := &ProjectsLocationsDatasetsDicomStoresGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -5957,24 +5697,14 @@ func (r *ProjectsLocationsDatasetsDicomStoresService) GetIamPolicy(resource stri // OptionsRequestedPolicyVersion sets the optional parameter // "options.requestedPolicyVersion": The policy format version to be -// returned. -// -// Valid values are 0, 1, and 3. Requests specifying an invalid value -// will be -// rejected. -// -// Requests for policies with any conditional bindings must specify -// version 3. -// Policies without any conditional bindings may specify any valid value -// or -// leave the field unset. -// -// To learn which resources support conditions in their IAM policies, -// see -// the -// [IAM -// documentation](https://cloud.google.com/iam/help/conditions/r -// esource-policies). +// returned. Valid values are 0, 1, and 3. Requests specifying an +// invalid value will be rejected. Requests for policies with any +// conditional bindings must specify version 3. Policies without any +// conditional bindings may specify any valid value or leave the field +// unset. To learn which resources support conditions in their IAM +// policies, see the [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-p +// olicies). func (c *ProjectsLocationsDatasetsDicomStoresGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsLocationsDatasetsDicomStoresGetIamPolicyCall { c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c @@ -6017,7 +5747,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresGetIamPolicyCall) Header() http.Hea func (c *ProjectsLocationsDatasetsDicomStoresGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6079,7 +5809,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresGetIamPolicyCall) Do(opts ...google } return ret, nil // { - // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + // "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}:getIamPolicy", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.dicomStores.getIamPolicy", @@ -6088,13 +5818,13 @@ func (c *ProjectsLocationsDatasetsDicomStoresGetIamPolicyCall) Do(opts ...google // ], // "parameters": { // "options.requestedPolicyVersion": { - // "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + // "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", // "format": "int32", // "location": "query", // "type": "integer" // }, // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, @@ -6124,12 +5854,9 @@ type ProjectsLocationsDatasetsDicomStoresImportCall struct { } // Import: Imports data into the DICOM store by copying it from the -// specified source. -// Errors are logged to Cloud Logging. For more information, -// see -// [Viewing logs](/healthcare/docs/how-tos/logging). The -// metadata field type is -// OperationMetadata. +// specified source. Errors are logged to Cloud Logging. For more +// information, see [Viewing logs](/healthcare/docs/how-tos/logging). +// The metadata field type is OperationMetadata. func (r *ProjectsLocationsDatasetsDicomStoresService) Import(name string, importdicomdatarequest *ImportDicomDataRequest) *ProjectsLocationsDatasetsDicomStoresImportCall { c := &ProjectsLocationsDatasetsDicomStoresImportCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -6164,7 +5891,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresImportCall) Header() http.Header { func (c *ProjectsLocationsDatasetsDicomStoresImportCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6228,7 +5955,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresImportCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Imports data into the DICOM store by copying it from the specified source.\nErrors are logged to Cloud Logging. For more information, see\n[Viewing logs](/healthcare/docs/how-tos/logging). The\nmetadata field type is\nOperationMetadata.", + // "description": "Imports data into the DICOM store by copying it from the specified source. Errors are logged to Cloud Logging. For more information, see [Viewing logs](/healthcare/docs/how-tos/logging). The metadata field type is OperationMetadata.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}:import", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.dicomStores.import", @@ -6237,7 +5964,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresImportCall) Do(opts ...googleapi.Ca // ], // "parameters": { // "name": { - // "description": "The name of the DICOM store resource into which the data is imported.\nFor example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + // "description": "The name of the DICOM store resource into which the data is imported. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, @@ -6277,11 +6004,8 @@ func (r *ProjectsLocationsDatasetsDicomStoresService) List(parent string) *Proje } // Filter sets the optional parameter "filter": Restricts stores -// returned to those matching a filter. -// Syntax: -// https://cloud.google.com/appengine/docs/standard/python/search -// /query_strings -// Only filtering on labels is supported. For example, +// returned to those matching a filter. Syntax: +// https://cloud.google.com/appengine/docs/standard/python/search/query_strings Only filtering on labels is supported. For example, // `labels.key=value`. func (c *ProjectsLocationsDatasetsDicomStoresListCall) Filter(filter string) *ProjectsLocationsDatasetsDicomStoresListCall { c.urlParams_.Set("filter", filter) @@ -6289,8 +6013,8 @@ func (c *ProjectsLocationsDatasetsDicomStoresListCall) Filter(filter string) *Pr } // PageSize sets the optional parameter "pageSize": Limit on the number -// of DICOM stores to return in a single response. -// If zero the default page size of 100 is used. +// of DICOM stores to return in a single response. If zero the default +// page size of 100 is used. func (c *ProjectsLocationsDatasetsDicomStoresListCall) PageSize(pageSize int64) *ProjectsLocationsDatasetsDicomStoresListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c @@ -6341,7 +6065,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresListCall) Header() http.Header { func (c *ProjectsLocationsDatasetsDicomStoresListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6412,12 +6136,12 @@ func (c *ProjectsLocationsDatasetsDicomStoresListCall) Do(opts ...googleapi.Call // ], // "parameters": { // "filter": { - // "description": "Restricts stores returned to those matching a filter. Syntax:\nhttps://cloud.google.com/appengine/docs/standard/python/search/query_strings\nOnly filtering on labels is supported. For example, `labels.key=value`.", + // "description": "Restricts stores returned to those matching a filter. Syntax: https://cloud.google.com/appengine/docs/standard/python/search/query_strings Only filtering on labels is supported. For example, `labels.key=value`.", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "Limit on the number of DICOM stores to return in a single response.\nIf zero the default page size of 100 is used.", + // "description": "Limit on the number of DICOM stores to return in a single response. If zero the default page size of 100 is used.", // "format": "int32", // "location": "query", // "type": "integer" @@ -6487,11 +6211,8 @@ func (r *ProjectsLocationsDatasetsDicomStoresService) Patch(name string, dicomst } // UpdateMask sets the optional parameter "updateMask": The update mask -// applies to the resource. For the `FieldMask` -// definition, -// see -// https://developers.google.com/protocol-buffers/docs/re -// ference/google.protobuf#fieldmask +// applies to the resource. For the `FieldMask` definition, see +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask func (c *ProjectsLocationsDatasetsDicomStoresPatchCall) UpdateMask(updateMask string) *ProjectsLocationsDatasetsDicomStoresPatchCall { c.urlParams_.Set("updateMask", updateMask) return c @@ -6524,7 +6245,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresPatchCall) Header() http.Header { func (c *ProjectsLocationsDatasetsDicomStoresPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6597,14 +6318,14 @@ func (c *ProjectsLocationsDatasetsDicomStoresPatchCall) Do(opts ...googleapi.Cal // ], // "parameters": { // "name": { - // "description": "Output only. Resource name of the DICOM store, of the form\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + // "description": "Resource name of the DICOM store, of the form `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { - // "description": "The update mask applies to the resource. For the `FieldMask` definition,\nsee\nhttps://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask", + // "description": "The update mask applies to the resource. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -6637,10 +6358,16 @@ type ProjectsLocationsDatasetsDicomStoresSearchForInstancesCall struct { } // SearchForInstances: SearchForInstances returns a list of matching -// instances. -// See -// http://dicom.nema.org/medical/dicom/current/output/html/part18.htm -// l#sect_10.6. +// instances. See [Search Transaction] +// (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#s +// ect_10.6). For details on the implementation of SearchForInstances, +// see [Search +// transaction](https://cloud.google.com/healthcare/docs/dicom#search_tra +// nsaction) in the Cloud Healthcare API conformance statement. For +// samples that show how to call SearchForInstances, see [Searching for +// studies, series, instances, and +// frames](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#sear +// ching_for_studies_series_instances_and_frames). func (r *ProjectsLocationsDatasetsDicomStoresService) SearchForInstances(parent string, dicomWebPath string) *ProjectsLocationsDatasetsDicomStoresSearchForInstancesCall { c := &ProjectsLocationsDatasetsDicomStoresSearchForInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -6685,7 +6412,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresSearchForInstancesCall) Header() ht func (c *ProjectsLocationsDatasetsDicomStoresSearchForInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6715,7 +6442,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresSearchForInstancesCall) Do(opts ... gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "SearchForInstances returns a list of matching instances. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.6.", + // "description": "SearchForInstances returns a list of matching instances. See [Search Transaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.6). For details on the implementation of SearchForInstances, see [Search transaction](https://cloud.google.com/healthcare/docs/dicom#search_transaction) in the Cloud Healthcare API conformance statement. For samples that show how to call SearchForInstances, see [Searching for studies, series, instances, and frames](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#searching_for_studies_series_instances_and_frames).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/instances", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.dicomStores.searchForInstances", @@ -6725,14 +6452,14 @@ func (c *ProjectsLocationsDatasetsDicomStoresSearchForInstancesCall) Do(opts ... // ], // "parameters": { // "dicomWebPath": { - // "description": "The path of the SearchForInstancesRequest DICOMweb request. For example,\n`instances`, `series/{series_uid}/instances`, or\n`studies/{study_uid}/instances`.", + // "description": "The path of the SearchForInstancesRequest DICOMweb request. For example, `instances`, `series/{series_uid}/instances`, or `studies/{study_uid}/instances`.", // "location": "path", // "pattern": "^instances$", // "required": true, // "type": "string" // }, // "parent": { - // "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + // "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, @@ -6763,9 +6490,16 @@ type ProjectsLocationsDatasetsDicomStoresSearchForSeriesCall struct { } // SearchForSeries: SearchForSeries returns a list of matching series. -// See -// http://dicom.nema.org/medical/dicom/current/output/html/part18.htm -// l#sect_10.6. +// See [Search Transaction] +// (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#s +// ect_10.6). For details on the implementation of SearchForSeries, see +// [Search +// transaction](https://cloud.google.com/healthcare/docs/dicom#search_tra +// nsaction) in the Cloud Healthcare API conformance statement. For +// samples that show how to call SearchForSeries, see [Searching for +// studies, series, instances, and +// frames](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#sear +// ching_for_studies_series_instances_and_frames). func (r *ProjectsLocationsDatasetsDicomStoresService) SearchForSeries(parent string, dicomWebPath string) *ProjectsLocationsDatasetsDicomStoresSearchForSeriesCall { c := &ProjectsLocationsDatasetsDicomStoresSearchForSeriesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -6810,7 +6544,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresSearchForSeriesCall) Header() http. func (c *ProjectsLocationsDatasetsDicomStoresSearchForSeriesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6840,7 +6574,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresSearchForSeriesCall) Do(opts ...goo gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "SearchForSeries returns a list of matching series. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.6.", + // "description": "SearchForSeries returns a list of matching series. See [Search Transaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.6). For details on the implementation of SearchForSeries, see [Search transaction](https://cloud.google.com/healthcare/docs/dicom#search_transaction) in the Cloud Healthcare API conformance statement. For samples that show how to call SearchForSeries, see [Searching for studies, series, instances, and frames](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#searching_for_studies_series_instances_and_frames).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/series", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.dicomStores.searchForSeries", @@ -6850,14 +6584,14 @@ func (c *ProjectsLocationsDatasetsDicomStoresSearchForSeriesCall) Do(opts ...goo // ], // "parameters": { // "dicomWebPath": { - // "description": "The path of the SearchForSeries DICOMweb request. For example, `series` or\n`studies/{study_uid}/series`.", + // "description": "The path of the SearchForSeries DICOMweb request. For example, `series` or `studies/{study_uid}/series`.", // "location": "path", // "pattern": "^series$", // "required": true, // "type": "string" // }, // "parent": { - // "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + // "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, @@ -6888,10 +6622,16 @@ type ProjectsLocationsDatasetsDicomStoresSearchForStudiesCall struct { } // SearchForStudies: SearchForStudies returns a list of matching -// studies. -// See -// http://dicom.nema.org/medical/dicom/current/output/html/part18.htm -// l#sect_10.6. +// studies. See [Search Transaction] +// (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#s +// ect_10.6). For details on the implementation of SearchForStudies, see +// [Search +// transaction](https://cloud.google.com/healthcare/docs/dicom#search_tra +// nsaction) in the Cloud Healthcare API conformance statement. For +// samples that show how to call SearchForStudies, see [Searching for +// studies, series, instances, and +// frames](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#sear +// ching_for_studies_series_instances_and_frames). func (r *ProjectsLocationsDatasetsDicomStoresService) SearchForStudies(parent string, dicomWebPath string) *ProjectsLocationsDatasetsDicomStoresSearchForStudiesCall { c := &ProjectsLocationsDatasetsDicomStoresSearchForStudiesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -6936,7 +6676,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresSearchForStudiesCall) Header() http func (c *ProjectsLocationsDatasetsDicomStoresSearchForStudiesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6966,7 +6706,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresSearchForStudiesCall) Do(opts ...go gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "SearchForStudies returns a list of matching studies. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.6.", + // "description": "SearchForStudies returns a list of matching studies. See [Search Transaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.6). For details on the implementation of SearchForStudies, see [Search transaction](https://cloud.google.com/healthcare/docs/dicom#search_transaction) in the Cloud Healthcare API conformance statement. For samples that show how to call SearchForStudies, see [Searching for studies, series, instances, and frames](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#searching_for_studies_series_instances_and_frames).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.dicomStores.searchForStudies", @@ -6983,7 +6723,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresSearchForStudiesCall) Do(opts ...go // "type": "string" // }, // "parent": { - // "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + // "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, @@ -7013,11 +6753,8 @@ type ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any -// existing policy. -// -// Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` -// errors. +// resource. Replaces any existing policy. Can return `NOT_FOUND`, +// `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. func (r *ProjectsLocationsDatasetsDicomStoresService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall { c := &ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -7052,7 +6789,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall) Header() http.Hea func (c *ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7116,7 +6853,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall) Do(opts ...google } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}:setIamPolicy", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.dicomStores.setIamPolicy", @@ -7125,7 +6862,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresSetIamPolicyCall) Do(opts ...google // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, @@ -7159,11 +6896,15 @@ type ProjectsLocationsDatasetsDicomStoresStoreInstancesCall struct { } // StoreInstances: StoreInstances stores DICOM instances associated with -// study instance unique -// identifiers (SUID). -// See -// http://dicom.nema.org/medical/dicom/current/output/html/part18.htm -// l#sect_10.5. +// study instance unique identifiers (SUID). See [Store Transaction] +// (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#s +// ect_10.5). For details on the implementation of StoreInstances, see +// [Store +// transaction](https://cloud.google.com/healthcare/docs/dicom#store_tran +// saction) in the Cloud Healthcare API conformance statement. For +// samples that show how to call StoreInstances, see [Storing DICOM +// data](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#storin +// g_dicom_data). func (r *ProjectsLocationsDatasetsDicomStoresService) StoreInstances(parent string, dicomWebPath string, body_ io.Reader) *ProjectsLocationsDatasetsDicomStoresStoreInstancesCall { c := &ProjectsLocationsDatasetsDicomStoresStoreInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -7199,7 +6940,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStoreInstancesCall) Header() http.H func (c *ProjectsLocationsDatasetsDicomStoresStoreInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7225,7 +6966,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStoreInstancesCall) Do(opts ...goog gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "StoreInstances stores DICOM instances associated with study instance unique\nidentifiers (SUID). See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.5.", + // "description": "StoreInstances stores DICOM instances associated with study instance unique identifiers (SUID). See [Store Transaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.5). For details on the implementation of StoreInstances, see [Store transaction](https://cloud.google.com/healthcare/docs/dicom#store_transaction) in the Cloud Healthcare API conformance statement. For samples that show how to call StoreInstances, see [Storing DICOM data](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#storing_dicom_data).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.dicomStores.storeInstances", @@ -7235,14 +6976,14 @@ func (c *ProjectsLocationsDatasetsDicomStoresStoreInstancesCall) Do(opts ...goog // ], // "parameters": { // "dicomWebPath": { - // "description": "The path of the StoreInstances DICOMweb request. For example,\n`studies/[{study_uid}]`. Note that the `study_uid` is optional.", + // "description": "The path of the StoreInstances DICOMweb request. For example, `studies/[{study_uid}]`. Note that the `study_uid` is optional.", // "location": "path", // "pattern": "^studies$", // "required": true, // "type": "string" // }, // "parent": { - // "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + // "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, @@ -7275,16 +7016,11 @@ type ProjectsLocationsDatasetsDicomStoresTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a `NOT_FOUND` error. -// -// Note: This operation is designed to be used for building -// permission-aware -// UIs and command-line tools, not for authorization checking. This -// operation -// may "fail open" without warning. +// specified resource. If the resource does not exist, this will return +// an empty set of permissions, not a `NOT_FOUND` error. Note: This +// operation is designed to be used for building permission-aware UIs +// and command-line tools, not for authorization checking. This +// operation may "fail open" without warning. func (r *ProjectsLocationsDatasetsDicomStoresService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsDatasetsDicomStoresTestIamPermissionsCall { c := &ProjectsLocationsDatasetsDicomStoresTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -7319,7 +7055,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresTestIamPermissionsCall) Header() ht func (c *ProjectsLocationsDatasetsDicomStoresTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7383,7 +7119,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresTestIamPermissionsCall) Do(opts ... } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + // "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}:testIamPermissions", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.dicomStores.testIamPermissions", @@ -7392,7 +7128,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresTestIamPermissionsCall) Do(opts ... // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, @@ -7425,11 +7161,15 @@ type ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall struct { } // Delete: DeleteStudy deletes all instances within the given study. -// Delete requests -// are equivalent to the GET requests specified in the Retrieve -// transaction. -// The method returns an Operation which -// will be marked successful when the deletion is complete. +// Delete requests are equivalent to the GET requests specified in the +// Retrieve transaction. The method returns an Operation which will be +// marked successful when the deletion is complete. Warning: Inserting +// instances into a study while a delete operation is running for that +// study could result in the new instances not appearing in search +// results until the deletion operation finishes. For samples that show +// how to call DeleteStudy, see [Deleting a study, series, or +// instance](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#de +// leting_a_study_series_or_instance). func (r *ProjectsLocationsDatasetsDicomStoresStudiesService) Delete(parent string, dicomWebPath string) *ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall { c := &ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -7464,7 +7204,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall) Header() http.He func (c *ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7524,7 +7264,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesDeleteCall) Do(opts ...googl } return ret, nil // { - // "description": "DeleteStudy deletes all instances within the given study. Delete requests\nare equivalent to the GET requests specified in the Retrieve transaction.\nThe method returns an Operation which\nwill be marked successful when the deletion is complete.", + // "description": "DeleteStudy deletes all instances within the given study. Delete requests are equivalent to the GET requests specified in the Retrieve transaction. The method returns an Operation which will be marked successful when the deletion is complete. Warning: Inserting instances into a study while a delete operation is running for that study could result in the new instances not appearing in search results until the deletion operation finishes. For samples that show how to call DeleteStudy, see [Deleting a study, series, or instance](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#deleting_a_study_series_or_instance).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}", // "httpMethod": "DELETE", // "id": "healthcare.projects.locations.datasets.dicomStores.studies.delete", @@ -7571,11 +7311,16 @@ type ProjectsLocationsDatasetsDicomStoresStudiesRetrieveMetadataCall struct { } // RetrieveMetadata: RetrieveStudyMetadata returns instance associated -// with the given study -// presented as metadata with the bulk data removed. -// See -// http://dicom.nema.org/medical/dicom/current/output/html/part18.htm -// l#sect_10.4. +// with the given study presented as metadata with the bulk data +// removed. See [RetrieveTransaction] +// (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#s +// ect_10.4). For details on the implementation of +// RetrieveStudyMetadata, see [Metadata +// resources](https://cloud.google.com/healthcare/docs/dicom#metadata_res +// ources) in the Cloud Healthcare API conformance statement. For +// samples that show how to call RetrieveStudyMetadata, see [Retrieving +// metadata](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#re +// trieving_metadata). func (r *ProjectsLocationsDatasetsDicomStoresStudiesService) RetrieveMetadata(parent string, dicomWebPath string) *ProjectsLocationsDatasetsDicomStoresStudiesRetrieveMetadataCall { c := &ProjectsLocationsDatasetsDicomStoresStudiesRetrieveMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -7620,7 +7365,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesRetrieveMetadataCall) Header func (c *ProjectsLocationsDatasetsDicomStoresStudiesRetrieveMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7650,7 +7395,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesRetrieveMetadataCall) Do(opt gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "RetrieveStudyMetadata returns instance associated with the given study\npresented as metadata with the bulk data removed. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4.", + // "description": "RetrieveStudyMetadata returns instance associated with the given study presented as metadata with the bulk data removed. See [RetrieveTransaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4). For details on the implementation of RetrieveStudyMetadata, see [Metadata resources](https://cloud.google.com/healthcare/docs/dicom#metadata_resources) in the Cloud Healthcare API conformance statement. For samples that show how to call RetrieveStudyMetadata, see [Retrieving metadata](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#retrieving_metadata).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/metadata", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.dicomStores.studies.retrieveMetadata", @@ -7660,14 +7405,14 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesRetrieveMetadataCall) Do(opt // ], // "parameters": { // "dicomWebPath": { - // "description": "The path of the RetrieveStudyMetadata DICOMweb request. For example,\n`studies/{study_uid}/metadata`.", + // "description": "The path of the RetrieveStudyMetadata DICOMweb request. For example, `studies/{study_uid}/metadata`.", // "location": "path", // "pattern": "^studies/[^/]+/metadata$", // "required": true, // "type": "string" // }, // "parent": { - // "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + // "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, @@ -7698,10 +7443,16 @@ type ProjectsLocationsDatasetsDicomStoresStudiesRetrieveStudyCall struct { } // RetrieveStudy: RetrieveStudy returns all instances within the given -// study. -// See -// http://dicom.nema.org/medical/dicom/current/output/html/part18.htm -// l#sect_10.4. +// study. See [RetrieveTransaction] +// (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#s +// ect_10.4). For details on the implementation of RetrieveStudy, see +// [DICOM +// study/series/instances](https://cloud.google.com/healthcare/docs/dicom +// #dicom_studyseriesinstances) in the Cloud Healthcare API conformance +// statement. For samples that show how to call RetrieveStudy, see +// [Retrieving DICOM +// data](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#retrie +// ving_dicom_data). func (r *ProjectsLocationsDatasetsDicomStoresStudiesService) RetrieveStudy(parent string, dicomWebPath string) *ProjectsLocationsDatasetsDicomStoresStudiesRetrieveStudyCall { c := &ProjectsLocationsDatasetsDicomStoresStudiesRetrieveStudyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -7746,7 +7497,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesRetrieveStudyCall) Header() func (c *ProjectsLocationsDatasetsDicomStoresStudiesRetrieveStudyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7776,7 +7527,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesRetrieveStudyCall) Do(opts . gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "RetrieveStudy returns all instances within the given study. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4.", + // "description": "RetrieveStudy returns all instances within the given study. See [RetrieveTransaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4). For details on the implementation of RetrieveStudy, see [DICOM study/series/instances](https://cloud.google.com/healthcare/docs/dicom#dicom_studyseriesinstances) in the Cloud Healthcare API conformance statement. For samples that show how to call RetrieveStudy, see [Retrieving DICOM data](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#retrieving_dicom_data).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.dicomStores.studies.retrieveStudy", @@ -7786,14 +7537,14 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesRetrieveStudyCall) Do(opts . // ], // "parameters": { // "dicomWebPath": { - // "description": "The path of the RetrieveStudy DICOMweb request. For example,\n`studies/{study_uid}`.", + // "description": "The path of the RetrieveStudy DICOMweb request. For example, `studies/{study_uid}`.", // "location": "path", // "pattern": "^studies/[^/]+$", // "required": true, // "type": "string" // }, // "parent": { - // "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + // "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, @@ -7824,10 +7575,16 @@ type ProjectsLocationsDatasetsDicomStoresStudiesSearchForInstancesCall struct { } // SearchForInstances: SearchForInstances returns a list of matching -// instances. -// See -// http://dicom.nema.org/medical/dicom/current/output/html/part18.htm -// l#sect_10.6. +// instances. See [Search Transaction] +// (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#s +// ect_10.6). For details on the implementation of SearchForInstances, +// see [Search +// transaction](https://cloud.google.com/healthcare/docs/dicom#search_tra +// nsaction) in the Cloud Healthcare API conformance statement. For +// samples that show how to call SearchForInstances, see [Searching for +// studies, series, instances, and +// frames](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#sear +// ching_for_studies_series_instances_and_frames). func (r *ProjectsLocationsDatasetsDicomStoresStudiesService) SearchForInstances(parent string, dicomWebPath string) *ProjectsLocationsDatasetsDicomStoresStudiesSearchForInstancesCall { c := &ProjectsLocationsDatasetsDicomStoresStudiesSearchForInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -7872,7 +7629,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSearchForInstancesCall) Head func (c *ProjectsLocationsDatasetsDicomStoresStudiesSearchForInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7902,7 +7659,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSearchForInstancesCall) Do(o gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "SearchForInstances returns a list of matching instances. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.6.", + // "description": "SearchForInstances returns a list of matching instances. See [Search Transaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.6). For details on the implementation of SearchForInstances, see [Search transaction](https://cloud.google.com/healthcare/docs/dicom#search_transaction) in the Cloud Healthcare API conformance statement. For samples that show how to call SearchForInstances, see [Searching for studies, series, instances, and frames](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#searching_for_studies_series_instances_and_frames).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/instances", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.dicomStores.studies.searchForInstances", @@ -7912,14 +7669,14 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSearchForInstancesCall) Do(o // ], // "parameters": { // "dicomWebPath": { - // "description": "The path of the SearchForInstancesRequest DICOMweb request. For example,\n`instances`, `series/{series_uid}/instances`, or\n`studies/{study_uid}/instances`.", + // "description": "The path of the SearchForInstancesRequest DICOMweb request. For example, `instances`, `series/{series_uid}/instances`, or `studies/{study_uid}/instances`.", // "location": "path", // "pattern": "^studies/[^/]+/instances$", // "required": true, // "type": "string" // }, // "parent": { - // "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + // "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, @@ -7950,9 +7707,16 @@ type ProjectsLocationsDatasetsDicomStoresStudiesSearchForSeriesCall struct { } // SearchForSeries: SearchForSeries returns a list of matching series. -// See -// http://dicom.nema.org/medical/dicom/current/output/html/part18.htm -// l#sect_10.6. +// See [Search Transaction] +// (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#s +// ect_10.6). For details on the implementation of SearchForSeries, see +// [Search +// transaction](https://cloud.google.com/healthcare/docs/dicom#search_tra +// nsaction) in the Cloud Healthcare API conformance statement. For +// samples that show how to call SearchForSeries, see [Searching for +// studies, series, instances, and +// frames](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#sear +// ching_for_studies_series_instances_and_frames). func (r *ProjectsLocationsDatasetsDicomStoresStudiesService) SearchForSeries(parent string, dicomWebPath string) *ProjectsLocationsDatasetsDicomStoresStudiesSearchForSeriesCall { c := &ProjectsLocationsDatasetsDicomStoresStudiesSearchForSeriesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -7997,7 +7761,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSearchForSeriesCall) Header( func (c *ProjectsLocationsDatasetsDicomStoresStudiesSearchForSeriesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8027,7 +7791,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSearchForSeriesCall) Do(opts gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "SearchForSeries returns a list of matching series. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.6.", + // "description": "SearchForSeries returns a list of matching series. See [Search Transaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.6). For details on the implementation of SearchForSeries, see [Search transaction](https://cloud.google.com/healthcare/docs/dicom#search_transaction) in the Cloud Healthcare API conformance statement. For samples that show how to call SearchForSeries, see [Searching for studies, series, instances, and frames](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#searching_for_studies_series_instances_and_frames).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/series", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.dicomStores.studies.searchForSeries", @@ -8037,14 +7801,14 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSearchForSeriesCall) Do(opts // ], // "parameters": { // "dicomWebPath": { - // "description": "The path of the SearchForSeries DICOMweb request. For example, `series` or\n`studies/{study_uid}/series`.", + // "description": "The path of the SearchForSeries DICOMweb request. For example, `series` or `studies/{study_uid}/series`.", // "location": "path", // "pattern": "^studies/[^/]+/series$", // "required": true, // "type": "string" // }, // "parent": { - // "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + // "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, @@ -8075,11 +7839,15 @@ type ProjectsLocationsDatasetsDicomStoresStudiesStoreInstancesCall struct { } // StoreInstances: StoreInstances stores DICOM instances associated with -// study instance unique -// identifiers (SUID). -// See -// http://dicom.nema.org/medical/dicom/current/output/html/part18.htm -// l#sect_10.5. +// study instance unique identifiers (SUID). See [Store Transaction] +// (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#s +// ect_10.5). For details on the implementation of StoreInstances, see +// [Store +// transaction](https://cloud.google.com/healthcare/docs/dicom#store_tran +// saction) in the Cloud Healthcare API conformance statement. For +// samples that show how to call StoreInstances, see [Storing DICOM +// data](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#storin +// g_dicom_data). func (r *ProjectsLocationsDatasetsDicomStoresStudiesService) StoreInstances(parent string, dicomWebPath string, body_ io.Reader) *ProjectsLocationsDatasetsDicomStoresStudiesStoreInstancesCall { c := &ProjectsLocationsDatasetsDicomStoresStudiesStoreInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -8115,7 +7883,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesStoreInstancesCall) Header() func (c *ProjectsLocationsDatasetsDicomStoresStudiesStoreInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8141,7 +7909,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesStoreInstancesCall) Do(opts gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "StoreInstances stores DICOM instances associated with study instance unique\nidentifiers (SUID). See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.5.", + // "description": "StoreInstances stores DICOM instances associated with study instance unique identifiers (SUID). See [Store Transaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.5). For details on the implementation of StoreInstances, see [Store transaction](https://cloud.google.com/healthcare/docs/dicom#store_transaction) in the Cloud Healthcare API conformance statement. For samples that show how to call StoreInstances, see [Storing DICOM data](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#storing_dicom_data).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.dicomStores.studies.storeInstances", @@ -8151,14 +7919,14 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesStoreInstancesCall) Do(opts // ], // "parameters": { // "dicomWebPath": { - // "description": "The path of the StoreInstances DICOMweb request. For example,\n`studies/[{study_uid}]`. Note that the `study_uid` is optional.", + // "description": "The path of the StoreInstances DICOMweb request. For example, `studies/[{study_uid}]`. Note that the `study_uid` is optional.", // "location": "path", // "pattern": "^studies/[^/]+$", // "required": true, // "type": "string" // }, // "parent": { - // "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + // "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, @@ -8191,12 +7959,15 @@ type ProjectsLocationsDatasetsDicomStoresStudiesSeriesDeleteCall struct { } // Delete: DeleteSeries deletes all instances within the given study and -// series. -// Delete requests are equivalent to the GET requests specified in -// the -// Retrieve transaction. -// The method returns an Operation which -// will be marked successful when the deletion is complete. +// series. Delete requests are equivalent to the GET requests specified +// in the Retrieve transaction. The method returns an Operation which +// will be marked successful when the deletion is complete. Warning: +// Inserting instances into a series while a delete operation is running +// for that series could result in the new instances not appearing in +// search results until the deletion operation finishes. For samples +// that show how to call DeleteSeries, see [Deleting a study, series, or +// instance](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#de +// leting_a_study_series_or_instance). func (r *ProjectsLocationsDatasetsDicomStoresStudiesSeriesService) Delete(parent string, dicomWebPath string) *ProjectsLocationsDatasetsDicomStoresStudiesSeriesDeleteCall { c := &ProjectsLocationsDatasetsDicomStoresStudiesSeriesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -8231,7 +8002,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesDeleteCall) Header() h func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8291,7 +8062,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesDeleteCall) Do(opts .. } return ret, nil // { - // "description": "DeleteSeries deletes all instances within the given study and series.\nDelete requests are equivalent to the GET requests specified in the\nRetrieve transaction.\nThe method returns an Operation which\nwill be marked successful when the deletion is complete.", + // "description": "DeleteSeries deletes all instances within the given study and series. Delete requests are equivalent to the GET requests specified in the Retrieve transaction. The method returns an Operation which will be marked successful when the deletion is complete. Warning: Inserting instances into a series while a delete operation is running for that series could result in the new instances not appearing in search results until the deletion operation finishes. For samples that show how to call DeleteSeries, see [Deleting a study, series, or instance](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#deleting_a_study_series_or_instance).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/series/{seriesId}", // "httpMethod": "DELETE", // "id": "healthcare.projects.locations.datasets.dicomStores.studies.series.delete", @@ -8301,14 +8072,14 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesDeleteCall) Do(opts .. // ], // "parameters": { // "dicomWebPath": { - // "description": "The path of the DeleteSeries request. For example,\n`studies/{study_uid}/series/{series_uid}`.", + // "description": "The path of the DeleteSeries request. For example, `studies/{study_uid}/series/{series_uid}`.", // "location": "path", // "pattern": "^studies/[^/]+/series/[^/]+$", // "required": true, // "type": "string" // }, // "parent": { - // "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + // "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, @@ -8339,11 +8110,16 @@ type ProjectsLocationsDatasetsDicomStoresStudiesSeriesRetrieveMetadataCall struc } // RetrieveMetadata: RetrieveSeriesMetadata returns instance associated -// with the given study and -// series, presented as metadata with the bulk data removed. -// See -// http://dicom.nema.org/medical/dicom/current/output/html/part18.htm -// l#sect_10.4. +// with the given study and series, presented as metadata with the bulk +// data removed. See [RetrieveTransaction] +// (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#s +// ect_10.4). For details on the implementation of +// RetrieveSeriesMetadata, see [Metadata +// resources](https://cloud.google.com/healthcare/docs/dicom#metadata_res +// ources) in the Cloud Healthcare API conformance statement. For +// samples that show how to call RetrieveSeriesMetadata, see [Retrieving +// metadata](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#re +// trieving_metadata). func (r *ProjectsLocationsDatasetsDicomStoresStudiesSeriesService) RetrieveMetadata(parent string, dicomWebPath string) *ProjectsLocationsDatasetsDicomStoresStudiesSeriesRetrieveMetadataCall { c := &ProjectsLocationsDatasetsDicomStoresStudiesSeriesRetrieveMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -8388,7 +8164,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesRetrieveMetadataCall) func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesRetrieveMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8418,7 +8194,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesRetrieveMetadataCall) gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "RetrieveSeriesMetadata returns instance associated with the given study and\nseries, presented as metadata with the bulk data removed. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4.", + // "description": "RetrieveSeriesMetadata returns instance associated with the given study and series, presented as metadata with the bulk data removed. See [RetrieveTransaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4). For details on the implementation of RetrieveSeriesMetadata, see [Metadata resources](https://cloud.google.com/healthcare/docs/dicom#metadata_resources) in the Cloud Healthcare API conformance statement. For samples that show how to call RetrieveSeriesMetadata, see [Retrieving metadata](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#retrieving_metadata).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/series/{seriesId}/metadata", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.dicomStores.studies.series.retrieveMetadata", @@ -8428,14 +8204,14 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesRetrieveMetadataCall) // ], // "parameters": { // "dicomWebPath": { - // "description": "The path of the RetrieveSeriesMetadata DICOMweb request. For example,\n`studies/{study_uid}/series/{series_uid}/metadata`.", + // "description": "The path of the RetrieveSeriesMetadata DICOMweb request. For example, `studies/{study_uid}/series/{series_uid}/metadata`.", // "location": "path", // "pattern": "^studies/[^/]+/series/[^/]+/metadata$", // "required": true, // "type": "string" // }, // "parent": { - // "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + // "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, @@ -8466,10 +8242,16 @@ type ProjectsLocationsDatasetsDicomStoresStudiesSeriesRetrieveSeriesCall struct } // RetrieveSeries: RetrieveSeries returns all instances within the given -// study and series. -// See -// http://dicom.nema.org/medical/dicom/current/output/html/part18.htm -// l#sect_10.4. +// study and series. See [RetrieveTransaction] +// (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#s +// ect_10.4). For details on the implementation of RetrieveSeries, see +// [DICOM +// study/series/instances](https://cloud.google.com/healthcare/docs/dicom +// #dicom_studyseriesinstances) in the Cloud Healthcare API conformance +// statement. For samples that show how to call RetrieveSeries, see +// [Retrieving DICOM +// data](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#retrie +// ving_dicom_data). func (r *ProjectsLocationsDatasetsDicomStoresStudiesSeriesService) RetrieveSeries(parent string, dicomWebPath string) *ProjectsLocationsDatasetsDicomStoresStudiesSeriesRetrieveSeriesCall { c := &ProjectsLocationsDatasetsDicomStoresStudiesSeriesRetrieveSeriesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -8514,7 +8296,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesRetrieveSeriesCall) He func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesRetrieveSeriesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8544,7 +8326,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesRetrieveSeriesCall) Do gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "RetrieveSeries returns all instances within the given study and series. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4.", + // "description": "RetrieveSeries returns all instances within the given study and series. See [RetrieveTransaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4). For details on the implementation of RetrieveSeries, see [DICOM study/series/instances](https://cloud.google.com/healthcare/docs/dicom#dicom_studyseriesinstances) in the Cloud Healthcare API conformance statement. For samples that show how to call RetrieveSeries, see [Retrieving DICOM data](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#retrieving_dicom_data).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/series/{seriesId}", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.dicomStores.studies.series.retrieveSeries", @@ -8554,14 +8336,14 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesRetrieveSeriesCall) Do // ], // "parameters": { // "dicomWebPath": { - // "description": "The path of the RetrieveSeries DICOMweb request. For example,\n`studies/{study_uid}/series/{series_uid}`.", + // "description": "The path of the RetrieveSeries DICOMweb request. For example, `studies/{study_uid}/series/{series_uid}`.", // "location": "path", // "pattern": "^studies/[^/]+/series/[^/]+$", // "required": true, // "type": "string" // }, // "parent": { - // "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + // "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, @@ -8592,10 +8374,16 @@ type ProjectsLocationsDatasetsDicomStoresStudiesSeriesSearchForInstancesCall str } // SearchForInstances: SearchForInstances returns a list of matching -// instances. -// See -// http://dicom.nema.org/medical/dicom/current/output/html/part18.htm -// l#sect_10.6. +// instances. See [Search Transaction] +// (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#s +// ect_10.6). For details on the implementation of SearchForInstances, +// see [Search +// transaction](https://cloud.google.com/healthcare/docs/dicom#search_tra +// nsaction) in the Cloud Healthcare API conformance statement. For +// samples that show how to call SearchForInstances, see [Searching for +// studies, series, instances, and +// frames](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#sear +// ching_for_studies_series_instances_and_frames). func (r *ProjectsLocationsDatasetsDicomStoresStudiesSeriesService) SearchForInstances(parent string, dicomWebPath string) *ProjectsLocationsDatasetsDicomStoresStudiesSeriesSearchForInstancesCall { c := &ProjectsLocationsDatasetsDicomStoresStudiesSeriesSearchForInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -8640,7 +8428,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesSearchForInstancesCall func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesSearchForInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8670,7 +8458,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesSearchForInstancesCall gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "SearchForInstances returns a list of matching instances. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.6.", + // "description": "SearchForInstances returns a list of matching instances. See [Search Transaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.6). For details on the implementation of SearchForInstances, see [Search transaction](https://cloud.google.com/healthcare/docs/dicom#search_transaction) in the Cloud Healthcare API conformance statement. For samples that show how to call SearchForInstances, see [Searching for studies, series, instances, and frames](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#searching_for_studies_series_instances_and_frames).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/series/{seriesId}/instances", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.dicomStores.studies.series.searchForInstances", @@ -8680,14 +8468,14 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesSearchForInstancesCall // ], // "parameters": { // "dicomWebPath": { - // "description": "The path of the SearchForInstancesRequest DICOMweb request. For example,\n`instances`, `series/{series_uid}/instances`, or\n`studies/{study_uid}/instances`.", + // "description": "The path of the SearchForInstancesRequest DICOMweb request. For example, `instances`, `series/{series_uid}/instances`, or `studies/{study_uid}/instances`.", // "location": "path", // "pattern": "^studies/[^/]+/series/[^/]+/instances$", // "required": true, // "type": "string" // }, // "parent": { - // "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + // "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, @@ -8717,10 +8505,13 @@ type ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesDeleteCall struct } // Delete: DeleteInstance deletes an instance associated with the given -// study, series, -// and SOP Instance UID. Delete requests are equivalent to the GET -// requests -// specified in the Retrieve transaction. +// study, series, and SOP Instance UID. Delete requests are equivalent +// to the GET requests specified in the Retrieve transaction. Study and +// series search results can take a few seconds to be updated after an +// instance is deleted using DeleteInstance. For samples that show how +// to call DeleteInstance, see [Deleting a study, series, or +// instance](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#de +// leting_a_study_series_or_instance). func (r *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesService) Delete(parent string, dicomWebPath string) *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesDeleteCall { c := &ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -8755,7 +8546,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesDeleteCall) H func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8815,7 +8606,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesDeleteCall) D } return ret, nil // { - // "description": "DeleteInstance deletes an instance associated with the given study, series,\nand SOP Instance UID. Delete requests are equivalent to the GET requests\nspecified in the Retrieve transaction.", + // "description": "DeleteInstance deletes an instance associated with the given study, series, and SOP Instance UID. Delete requests are equivalent to the GET requests specified in the Retrieve transaction. Study and series search results can take a few seconds to be updated after an instance is deleted using DeleteInstance. For samples that show how to call DeleteInstance, see [Deleting a study, series, or instance](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#deleting_a_study_series_or_instance).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/series/{seriesId}/instances/{instancesId}", // "httpMethod": "DELETE", // "id": "healthcare.projects.locations.datasets.dicomStores.studies.series.instances.delete", @@ -8825,14 +8616,14 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesDeleteCall) D // ], // "parameters": { // "dicomWebPath": { - // "description": "The path of the DeleteInstance request. For example,\n`studies/{study_uid}/series/{series_uid}/instances/{instance_uid}`.", + // "description": "The path of the DeleteInstance request. For example, `studies/{study_uid}/series/{series_uid}/instances/{instance_uid}`.", // "location": "path", // "pattern": "^studies/[^/]+/series/[^/]+/instances/[^/]+$", // "required": true, // "type": "string" // }, // "parent": { - // "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + // "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, @@ -8863,11 +8654,18 @@ type ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesRetrieveInstanceC } // RetrieveInstance: RetrieveInstance returns instance associated with -// the given study, series, -// and SOP Instance UID. -// See -// http://dicom.nema.org/medical/dicom/current/output/html/part18.htm -// l#sect_10.4. +// the given study, series, and SOP Instance UID. See +// [RetrieveTransaction] +// (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#s +// ect_10.4). For details on the implementation of RetrieveInstance, see +// [DICOM +// study/series/instances](https://cloud.google.com/healthcare/docs/dicom +// #dicom_studyseriesinstances) and [DICOM +// instances](https://cloud.google.com/healthcare/docs/dicom#dicom_instan +// ces) in the Cloud Healthcare API conformance statement. For samples +// that show how to call RetrieveInstance, see [Retrieving an +// instance](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#re +// trieving_an_instance). func (r *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesService) RetrieveInstance(parent string, dicomWebPath string) *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesRetrieveInstanceCall { c := &ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesRetrieveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -8912,7 +8710,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesRetrieveInsta func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesRetrieveInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8942,7 +8740,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesRetrieveInsta gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "RetrieveInstance returns instance associated with the given study, series,\nand SOP Instance UID. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4.", + // "description": "RetrieveInstance returns instance associated with the given study, series, and SOP Instance UID. See [RetrieveTransaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4). For details on the implementation of RetrieveInstance, see [DICOM study/series/instances](https://cloud.google.com/healthcare/docs/dicom#dicom_studyseriesinstances) and [DICOM instances](https://cloud.google.com/healthcare/docs/dicom#dicom_instances) in the Cloud Healthcare API conformance statement. For samples that show how to call RetrieveInstance, see [Retrieving an instance](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#retrieving_an_instance).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/series/{seriesId}/instances/{instancesId}", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.dicomStores.studies.series.instances.retrieveInstance", @@ -8952,14 +8750,14 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesRetrieveInsta // ], // "parameters": { // "dicomWebPath": { - // "description": "The path of the RetrieveInstance DICOMweb request. For example,\n`studies/{study_uid}/series/{series_uid}/instances/{instance_uid}`.", + // "description": "The path of the RetrieveInstance DICOMweb request. For example, `studies/{study_uid}/series/{series_uid}/instances/{instance_uid}`.", // "location": "path", // "pattern": "^studies/[^/]+/series/[^/]+/instances/[^/]+$", // "required": true, // "type": "string" // }, // "parent": { - // "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + // "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, @@ -8990,13 +8788,18 @@ type ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesRetrieveMetadataC } // RetrieveMetadata: RetrieveInstanceMetadata returns instance -// associated with the given study, -// series, and SOP Instance UID presented as metadata with the bulk -// data -// removed. -// See -// http://dicom.nema.org/medical/dicom/current/output/html/part18.htm -// l#sect_10.4. +// associated with the given study, series, and SOP Instance UID +// presented as metadata with the bulk data removed. See +// [RetrieveTransaction] +// (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#s +// ect_10.4). For details on the implementation of +// RetrieveInstanceMetadata, see [Metadata +// resources](https://cloud.google.com/healthcare/docs/dicom#metadata_res +// ources) in the Cloud Healthcare API conformance statement. For +// samples that show how to call RetrieveInstanceMetadata, see +// [Retrieving +// metadata](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#re +// trieving_metadata). func (r *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesService) RetrieveMetadata(parent string, dicomWebPath string) *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesRetrieveMetadataCall { c := &ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesRetrieveMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -9041,7 +8844,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesRetrieveMetad func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesRetrieveMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9071,7 +8874,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesRetrieveMetad gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "RetrieveInstanceMetadata returns instance associated with the given study,\nseries, and SOP Instance UID presented as metadata with the bulk data\nremoved. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4.", + // "description": "RetrieveInstanceMetadata returns instance associated with the given study, series, and SOP Instance UID presented as metadata with the bulk data removed. See [RetrieveTransaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4). For details on the implementation of RetrieveInstanceMetadata, see [Metadata resources](https://cloud.google.com/healthcare/docs/dicom#metadata_resources) in the Cloud Healthcare API conformance statement. For samples that show how to call RetrieveInstanceMetadata, see [Retrieving metadata](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#retrieving_metadata).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/series/{seriesId}/instances/{instancesId}/metadata", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.dicomStores.studies.series.instances.retrieveMetadata", @@ -9081,14 +8884,14 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesRetrieveMetad // ], // "parameters": { // "dicomWebPath": { - // "description": "The path of the RetrieveInstanceMetadata DICOMweb request. For example,\n`studies/{study_uid}/series/{series_uid}/instances/{instance_uid}/metadata`.", + // "description": "The path of the RetrieveInstanceMetadata DICOMweb request. For example, `studies/{study_uid}/series/{series_uid}/instances/{instance_uid}/metadata`.", // "location": "path", // "pattern": "^studies/[^/]+/series/[^/]+/instances/[^/]+/metadata$", // "required": true, // "type": "string" // }, // "parent": { - // "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + // "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, @@ -9119,11 +8922,17 @@ type ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesRetrieveRenderedC } // RetrieveRendered: RetrieveRenderedInstance returns instance -// associated with the given study, -// series, and SOP Instance UID in an acceptable Rendered Media Type. -// See -// http://dicom.nema.org/medical/dicom/current/output/html/part18.htm -// l#sect_10.4. +// associated with the given study, series, and SOP Instance UID in an +// acceptable Rendered Media Type. See [RetrieveTransaction] +// (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#s +// ect_10.4). For details on the implementation of +// RetrieveRenderedInstance, see [Rendered +// resources](https://cloud.google.com/healthcare/docs/dicom#rendered_res +// ources) in the Cloud Healthcare API conformance statement. For +// samples that show how to call RetrieveRenderedInstance, see +// [Retrieving consumer image +// formats](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#ret +// rieving_consumer_image_formats). func (r *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesService) RetrieveRendered(parent string, dicomWebPath string) *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesRetrieveRenderedCall { c := &ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesRetrieveRenderedCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -9168,7 +8977,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesRetrieveRende func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesRetrieveRenderedCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9198,7 +9007,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesRetrieveRende gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "RetrieveRenderedInstance returns instance associated with the given study,\nseries, and SOP Instance UID in an acceptable Rendered Media Type. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4.", + // "description": "RetrieveRenderedInstance returns instance associated with the given study, series, and SOP Instance UID in an acceptable Rendered Media Type. See [RetrieveTransaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4). For details on the implementation of RetrieveRenderedInstance, see [Rendered resources](https://cloud.google.com/healthcare/docs/dicom#rendered_resources) in the Cloud Healthcare API conformance statement. For samples that show how to call RetrieveRenderedInstance, see [Retrieving consumer image formats](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#retrieving_consumer_image_formats).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/series/{seriesId}/instances/{instancesId}/rendered", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.dicomStores.studies.series.instances.retrieveRendered", @@ -9208,14 +9017,14 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesRetrieveRende // ], // "parameters": { // "dicomWebPath": { - // "description": "The path of the RetrieveRenderedInstance DICOMweb request. For example,\n`studies/{study_uid}/series/{series_uid}/instances/{instance_uid}/rendered`.", + // "description": "The path of the RetrieveRenderedInstance DICOMweb request. For example, `studies/{study_uid}/series/{series_uid}/instances/{instance_uid}/rendered`.", // "location": "path", // "pattern": "^studies/[^/]+/series/[^/]+/instances/[^/]+/rendered$", // "required": true, // "type": "string" // }, // "parent": { - // "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + // "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, @@ -9246,11 +9055,16 @@ type ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesFramesRetrieveFra } // RetrieveFrames: RetrieveFrames returns instances associated with the -// given study, series, -// SOP Instance UID and frame numbers. -// See -// http://dicom.nema.org/medical/dicom/current/output/html/part18.htm -// l#sect_10.4. +// given study, series, SOP Instance UID and frame numbers. See +// [RetrieveTransaction] +// (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#s +// ect_10.4}. For details on the implementation of RetrieveFrames, see +// [DICOM +// frames](https://cloud.google.com/healthcare/docs/dicom#dicom_frames) +// in the Cloud Healthcare API conformance statement. For samples that +// show how to call RetrieveFrames, see [Retrieving DICOM +// data](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#retrie +// ving_dicom_data). func (r *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesFramesService) RetrieveFrames(parent string, dicomWebPath string) *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesFramesRetrieveFramesCall { c := &ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesFramesRetrieveFramesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -9295,7 +9109,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesFramesRetriev func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesFramesRetrieveFramesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9325,7 +9139,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesFramesRetriev gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "RetrieveFrames returns instances associated with the given study, series,\nSOP Instance UID and frame numbers. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4.", + // "description": "RetrieveFrames returns instances associated with the given study, series, SOP Instance UID and frame numbers. See [RetrieveTransaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4}. For details on the implementation of RetrieveFrames, see [DICOM frames](https://cloud.google.com/healthcare/docs/dicom#dicom_frames) in the Cloud Healthcare API conformance statement. For samples that show how to call RetrieveFrames, see [Retrieving DICOM data](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#retrieving_dicom_data).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/series/{seriesId}/instances/{instancesId}/frames/{framesId}", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.dicomStores.studies.series.instances.frames.retrieveFrames", @@ -9335,14 +9149,14 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesFramesRetriev // ], // "parameters": { // "dicomWebPath": { - // "description": "The path of the RetrieveFrames DICOMweb request. For example,\n`studies/{study_uid}/series/{series_uid}/instances/{instance_uid}/frames/{frame_list}`.", + // "description": "The path of the RetrieveFrames DICOMweb request. For example, `studies/{study_uid}/series/{series_uid}/instances/{instance_uid}/frames/{frame_list}`.", // "location": "path", // "pattern": "^studies/[^/]+/series/[^/]+/instances/[^/]+/frames/[^/]+$", // "required": true, // "type": "string" // }, // "parent": { - // "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + // "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, @@ -9373,13 +9187,17 @@ type ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesFramesRetrieveRen } // RetrieveRendered: RetrieveRenderedFrames returns instances associated -// with the given study, -// series, SOP Instance UID and frame numbers in an acceptable Rendered -// Media -// Type. -// See -// http://dicom.nema.org/medical/dicom/current/output/html/part18.htm -// l#sect_10.4. +// with the given study, series, SOP Instance UID and frame numbers in +// an acceptable Rendered Media Type. See [RetrieveTransaction] +// (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#s +// ect_10.4). For details on the implementation of +// RetrieveRenderedFrames, see [Rendered +// resources](https://cloud.google.com/healthcare/docs/dicom#rendered_res +// ources) in the Cloud Healthcare API conformance statement. For +// samples that show how to call RetrieveRenderedFrames, see [Retrieving +// consumer image +// formats](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#ret +// rieving_consumer_image_formats). func (r *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesFramesService) RetrieveRendered(parent string, dicomWebPath string) *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesFramesRetrieveRenderedCall { c := &ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesFramesRetrieveRenderedCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -9424,7 +9242,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesFramesRetriev func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesFramesRetrieveRenderedCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9454,7 +9272,7 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesFramesRetriev gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "RetrieveRenderedFrames returns instances associated with the given study,\nseries, SOP Instance UID and frame numbers in an acceptable Rendered Media\nType. See\nhttp://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4.", + // "description": "RetrieveRenderedFrames returns instances associated with the given study, series, SOP Instance UID and frame numbers in an acceptable Rendered Media Type. See [RetrieveTransaction] (http://dicom.nema.org/medical/dicom/current/output/html/part18.html#sect_10.4). For details on the implementation of RetrieveRenderedFrames, see [Rendered resources](https://cloud.google.com/healthcare/docs/dicom#rendered_resources) in the Cloud Healthcare API conformance statement. For samples that show how to call RetrieveRenderedFrames, see [Retrieving consumer image formats](https://cloud.google.com/healthcare/docs/how-tos/dicomweb#retrieving_consumer_image_formats).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/dicomStores/{dicomStoresId}/dicomWeb/studies/{studiesId}/series/{seriesId}/instances/{instancesId}/frames/{framesId}/rendered", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.dicomStores.studies.series.instances.frames.retrieveRendered", @@ -9464,14 +9282,14 @@ func (c *ProjectsLocationsDatasetsDicomStoresStudiesSeriesInstancesFramesRetriev // ], // "parameters": { // "dicomWebPath": { - // "description": "The path of the RetrieveRenderedFrames DICOMweb request. For example,\n`studies/{study_uid}/series/{series_uid}/instances/{instance_uid}/frames/{frame_list}/rendered`.", + // "description": "The path of the RetrieveRenderedFrames DICOMweb request. For example, `studies/{study_uid}/series/{series_uid}/instances/{instance_uid}/frames/{frame_list}/rendered`.", // "location": "path", // "pattern": "^studies/[^/]+/series/[^/]+/instances/[^/]+/frames/[^/]+/rendered$", // "required": true, // "type": "string" // }, // "parent": { - // "description": "The name of the DICOM store that is being accessed. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", + // "description": "The name of the DICOM store that is being accessed. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/dicomStores/{dicom_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/dicomStores/[^/]+$", // "required": true, @@ -9509,9 +9327,8 @@ func (r *ProjectsLocationsDatasetsFhirStoresService) Create(parent string, fhirs } // FhirStoreId sets the optional parameter "fhirStoreId": The ID of the -// FHIR store that is being created. -// The string must match the following regex: -// `[\p{L}\p{N}_\-\.]{1,256}`. +// FHIR store that is being created. The string must match the following +// regex: `[\p{L}\p{N}_\-\.]{1,256}`. func (c *ProjectsLocationsDatasetsFhirStoresCreateCall) FhirStoreId(fhirStoreId string) *ProjectsLocationsDatasetsFhirStoresCreateCall { c.urlParams_.Set("fhirStoreId", fhirStoreId) return c @@ -9544,7 +9361,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresCreateCall) Header() http.Header { func (c *ProjectsLocationsDatasetsFhirStoresCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9617,7 +9434,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresCreateCall) Do(opts ...googleapi.Cal // ], // "parameters": { // "fhirStoreId": { - // "description": "The ID of the FHIR store that is being created.\nThe string must match the following regex: `[\\p{L}\\p{N}_\\-\\.]{1,256}`.", + // "description": "The ID of the FHIR store that is being created. The string must match the following regex: `[\\p{L}\\p{N}_\\-\\.]{1,256}`.", // "location": "query", // "type": "string" // }, @@ -9655,15 +9472,11 @@ type ProjectsLocationsDatasetsFhirStoresDeidentifyCall struct { } // Deidentify: De-identifies data from the source store and writes it to -// the destination -// store. The metadata field type -// is OperationMetadata. -// If the request is successful, the -// response field type is -// DeidentifyFhirStoreSummary. If errors occur, -// error is set. -// Error details are also logged to Cloud Logging -// (see [Viewing logs](/healthcare/docs/how-tos/logging)). +// the destination store. The metadata field type is OperationMetadata. +// If the request is successful, the response field type is +// DeidentifyFhirStoreSummary. If errors occur, error is set. Error +// details are also logged to Cloud Logging (see [Viewing +// logs](/healthcare/docs/how-tos/logging)). func (r *ProjectsLocationsDatasetsFhirStoresService) Deidentify(sourceStore string, deidentifyfhirstorerequest *DeidentifyFhirStoreRequest) *ProjectsLocationsDatasetsFhirStoresDeidentifyCall { c := &ProjectsLocationsDatasetsFhirStoresDeidentifyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sourceStore = sourceStore @@ -9698,7 +9511,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresDeidentifyCall) Header() http.Header func (c *ProjectsLocationsDatasetsFhirStoresDeidentifyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9762,7 +9575,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresDeidentifyCall) Do(opts ...googleapi } return ret, nil // { - // "description": "De-identifies data from the source store and writes it to the destination\nstore. The metadata field type\nis OperationMetadata.\nIf the request is successful, the\nresponse field type is\nDeidentifyFhirStoreSummary. If errors occur,\nerror is set.\nError details are also logged to Cloud Logging\n(see [Viewing logs](/healthcare/docs/how-tos/logging)).", + // "description": "De-identifies data from the source store and writes it to the destination store. The metadata field type is OperationMetadata. If the request is successful, the response field type is DeidentifyFhirStoreSummary. If errors occur, error is set. Error details are also logged to Cloud Logging (see [Viewing logs](/healthcare/docs/how-tos/logging)).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}:deidentify", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.fhirStores.deidentify", @@ -9771,7 +9584,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresDeidentifyCall) Do(opts ...googleapi // ], // "parameters": { // "sourceStore": { - // "description": "Source FHIR store resource name. For example,\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/fhirStores/{fhir_store_id}`.", + // "description": "Source FHIR store resource name. For example, `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/fhirStores/{fhir_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/fhirStores/[^/]+$", // "required": true, @@ -9837,7 +9650,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresDeleteCall) Header() http.Header { func (c *ProjectsLocationsDatasetsFhirStoresDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9935,24 +9748,13 @@ type ProjectsLocationsDatasetsFhirStoresExportCall struct { } // Export: Export resources from the FHIR store to the specified -// destination. -// -// This method returns an Operation that can -// be used to track the status of the export by -// calling -// GetOperation. -// -// Immediate fatal errors appear in the -// error field, errors are also logged -// to Cloud Logging (see -// [Viewing -// logs](/healthcare/docs/how-tos/logging)). -// Otherwise, when the operation finishes, a detailed response of -// type -// ExportResourcesResponse is returned in the -// response field. -// The metadata field type for this -// operation is OperationMetadata. +// destination. This method returns an Operation that can be used to +// track the status of the export by calling GetOperation. Immediate +// fatal errors appear in the error field, errors are also logged to +// Cloud Logging (see [Viewing logs](/healthcare/docs/how-tos/logging)). +// Otherwise, when the operation finishes, a detailed response of type +// ExportResourcesResponse is returned in the response field. The +// metadata field type for this operation is OperationMetadata. func (r *ProjectsLocationsDatasetsFhirStoresService) Export(name string, exportresourcesrequest *ExportResourcesRequest) *ProjectsLocationsDatasetsFhirStoresExportCall { c := &ProjectsLocationsDatasetsFhirStoresExportCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -9987,7 +9789,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresExportCall) Header() http.Header { func (c *ProjectsLocationsDatasetsFhirStoresExportCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10051,7 +9853,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresExportCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Export resources from the FHIR store to the specified destination.\n\nThis method returns an Operation that can\nbe used to track the status of the export by calling\nGetOperation.\n\nImmediate fatal errors appear in the\nerror field, errors are also logged\nto Cloud Logging (see [Viewing\nlogs](/healthcare/docs/how-tos/logging)).\nOtherwise, when the operation finishes, a detailed response of type\nExportResourcesResponse is returned in the\nresponse field.\nThe metadata field type for this\noperation is OperationMetadata.", + // "description": "Export resources from the FHIR store to the specified destination. This method returns an Operation that can be used to track the status of the export by calling GetOperation. Immediate fatal errors appear in the error field, errors are also logged to Cloud Logging (see [Viewing logs](/healthcare/docs/how-tos/logging)). Otherwise, when the operation finishes, a detailed response of type ExportResourcesResponse is returned in the response field. The metadata field type for this operation is OperationMetadata.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}:export", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.fhirStores.export", @@ -10060,7 +9862,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresExportCall) Do(opts ...googleapi.Cal // ], // "parameters": { // "name": { - // "description": "The name of the FHIR store to export resource from. The name should be in\nthe format of\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/fhirStores/{fhir_store_id}`.", + // "description": "The name of the FHIR store to export resource from, in the format of `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/fhirStores/{fhir_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/fhirStores/[^/]+$", // "required": true, @@ -10136,7 +9938,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresGetCall) Header() http.Header { func (c *ProjectsLocationsDatasetsFhirStoresGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10236,9 +10038,8 @@ type ProjectsLocationsDatasetsFhirStoresGetIamPolicyCall struct { header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. -// Returns an empty policy if the resource exists and does not have a -// policy +// GetIamPolicy: Gets the access control policy for a resource. Returns +// an empty policy if the resource exists and does not have a policy // set. func (r *ProjectsLocationsDatasetsFhirStoresService) GetIamPolicy(resource string) *ProjectsLocationsDatasetsFhirStoresGetIamPolicyCall { c := &ProjectsLocationsDatasetsFhirStoresGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -10248,24 +10049,14 @@ func (r *ProjectsLocationsDatasetsFhirStoresService) GetIamPolicy(resource strin // OptionsRequestedPolicyVersion sets the optional parameter // "options.requestedPolicyVersion": The policy format version to be -// returned. -// -// Valid values are 0, 1, and 3. Requests specifying an invalid value -// will be -// rejected. -// -// Requests for policies with any conditional bindings must specify -// version 3. -// Policies without any conditional bindings may specify any valid value -// or -// leave the field unset. -// -// To learn which resources support conditions in their IAM policies, -// see -// the -// [IAM -// documentation](https://cloud.google.com/iam/help/conditions/r -// esource-policies). +// returned. Valid values are 0, 1, and 3. Requests specifying an +// invalid value will be rejected. Requests for policies with any +// conditional bindings must specify version 3. Policies without any +// conditional bindings may specify any valid value or leave the field +// unset. To learn which resources support conditions in their IAM +// policies, see the [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-p +// olicies). func (c *ProjectsLocationsDatasetsFhirStoresGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsLocationsDatasetsFhirStoresGetIamPolicyCall { c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c @@ -10308,7 +10099,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresGetIamPolicyCall) Header() http.Head func (c *ProjectsLocationsDatasetsFhirStoresGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10370,7 +10161,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresGetIamPolicyCall) Do(opts ...googlea } return ret, nil // { - // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + // "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}:getIamPolicy", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.fhirStores.getIamPolicy", @@ -10379,13 +10170,13 @@ func (c *ProjectsLocationsDatasetsFhirStoresGetIamPolicyCall) Do(opts ...googlea // ], // "parameters": { // "options.requestedPolicyVersion": { - // "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + // "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", // "format": "int32", // "location": "query", // "type": "integer" // }, // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/fhirStores/[^/]+$", // "required": true, @@ -10414,121 +10205,61 @@ type ProjectsLocationsDatasetsFhirStoresImportCall struct { header_ http.Header } -// Import: Import resources to the FHIR store by loading data from the -// specified -// sources. This method is optimized to load large quantities of data -// using -// import semantics that ignore some FHIR store configuration options -// and are -// not suitable for all use cases. It is primarily intended to load data -// into -// an empty FHIR store that is not being used by other clients. In -// cases -// where this method is not appropriate, consider using ExecuteBundle -// to -// load data. -// -// Every resource in the input must contain a client-supplied ID, and -// will be -// stored using that ID regardless of the -// enable_update_create setting on the FHIR -// store. -// -// The import process does not enforce referential integrity, regardless -// of -// the -// disable_referential_integrity -// setting on the FHIR store. This allows the import of resources -// with -// arbitrary interdependencies without considering grouping or ordering, -// but -// if the input data contains invalid references or if some resources -// fail to -// be imported, the FHIR store might be left in a state that -// violates -// referential integrity. -// -// The import process does not trigger Pub/Sub notification or -// BigQuery +// Import: Imports resources to the FHIR store by loading data from the +// specified sources. This method is optimized to load large quantities +// of data using import semantics that ignore some FHIR store +// configuration options and are not suitable for all use cases. It is +// primarily intended to load data into an empty FHIR store that is not +// being used by other clients. In cases where this method is not +// appropriate, consider using ExecuteBundle to load data. Every +// resource in the input must contain a client-supplied ID. Each +// resource is stored using the supplied ID regardless of the +// enable_update_create setting on the FHIR store. The import process +// does not enforce referential integrity, regardless of the +// disable_referential_integrity setting on the FHIR store. This allows +// the import of resources with arbitrary interdependencies without +// considering grouping or ordering, but if the input data contains +// invalid references or if some resources fail to be imported, the FHIR +// store might be left in a state that violates referential integrity. +// The import process does not trigger Pub/Sub notification or BigQuery // streaming update, regardless of how those are configured on the FHIR -// store. -// -// If a resource with the specified ID already exists, the most -// recent -// version of the resource is overwritten without creating a new -// historical -// version, regardless of the -// disable_resource_versioning +// store. If a resource with the specified ID already exists, the most +// recent version of the resource is overwritten without creating a new +// historical version, regardless of the disable_resource_versioning // setting on the FHIR store. If transient failures occur during the -// import, -// it is possible that successfully imported resources will be -// overwritten -// more than once. -// -// The import operation is idempotent unless the input data contains -// multiple -// valid resources with the same ID but different contents. In that -// case, -// after the import completes, the store will contain exactly one -// resource -// with that ID but there is no ordering guarantee on which version of -// the -// contents it will have. The operation result counters do not -// count -// duplicate IDs as an error and will count one success for each -// resource in -// the input, which might result in a success count larger than the -// number -// of resources in the FHIR store. This often occurs when importing -// data -// organized in bundles produced by Patient-everything -// where each bundle contains its own copy of a resource such as -// Practitioner -// that might be referred to by many patients. -// -// If some resources fail to import, for example due to parsing -// errors, -// successfully imported resources are not rolled back. -// +// import, it's possible that successfully imported resources will be +// overwritten more than once. The import operation is idempotent unless +// the input data contains multiple valid resources with the same ID but +// different contents. In that case, after the import completes, the +// store contains exactly one resource with that ID but there is no +// ordering guarantee on which version of the contents it will have. The +// operation result counters do not count duplicate IDs as an error and +// count one success for each resource in the input, which might result +// in a success count larger than the number of resources in the FHIR +// store. This often occurs when importing data organized in bundles +// produced by Patient-everything where each bundle contains its own +// copy of a resource such as Practitioner that might be referred to by +// many patients. If some resources fail to import, for example due to +// parsing errors, successfully imported resources are not rolled back. // The location and format of the input data is specified by the -// parameters -// below. Note that if no format is specified, this method assumes -// the -// `BUNDLE` format. When using the `BUNDLE` format this method ignores -// the -// `Bundle.type` field, except that `history` bundles are rejected, and -// does -// not apply any of the bundle processing semantics for batch or -// transaction -// bundles. Unlike in ExecuteBundle, transaction bundles are not -// executed -// as a single transaction and bundle-internal references are not -// rewritten. -// The bundle is treated as a collection of resources to be written -// as +// parameters in ImportResourcesRequest. Note that if no format is +// specified, this method assumes the `BUNDLE` format. When using the +// `BUNDLE` format this method ignores the `Bundle.type` field, except +// that `history` bundles are rejected, and does not apply any of the +// bundle processing semantics for batch or transaction bundles. Unlike +// in ExecuteBundle, transaction bundles are not executed as a single +// transaction and bundle-internal references are not rewritten. The +// bundle is treated as a collection of resources to be written as // provided in `Bundle.entry.resource`, ignoring `Bundle.entry.request`. -// As -// an example, this allows the import of `searchset` bundles produced by -// a -// FHIR search or -// Patient-everything operation. -// -// This method returns an Operation that can -// be used to track the status of the import by -// calling -// GetOperation. -// -// Immediate fatal errors appear in the -// error field, errors are also logged -// to Cloud Logging (see -// [Viewing -// logs](/healthcare/docs/how-tos/logging)). Otherwise, when -// the +// As an example, this allows the import of `searchset` bundles produced +// by a FHIR search or Patient-everything operation. This method returns +// an Operation that can be used to track the status of the import by +// calling GetOperation. Immediate fatal errors appear in the error +// field, errors are also logged to Cloud Logging (see [Viewing +// logs](/healthcare/docs/how-tos/logging)). Otherwise, when the // operation finishes, a detailed response of type -// ImportResourcesResponse -// is returned in the response field. -// The metadata field type for this -// operation is OperationMetadata. +// ImportResourcesResponse is returned in the response field. The +// metadata field type for this operation is OperationMetadata. func (r *ProjectsLocationsDatasetsFhirStoresService) Import(name string, importresourcesrequest *ImportResourcesRequest) *ProjectsLocationsDatasetsFhirStoresImportCall { c := &ProjectsLocationsDatasetsFhirStoresImportCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -10563,7 +10294,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresImportCall) Header() http.Header { func (c *ProjectsLocationsDatasetsFhirStoresImportCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10627,7 +10358,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresImportCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Import resources to the FHIR store by loading data from the specified\nsources. This method is optimized to load large quantities of data using\nimport semantics that ignore some FHIR store configuration options and are\nnot suitable for all use cases. It is primarily intended to load data into\nan empty FHIR store that is not being used by other clients. In cases\nwhere this method is not appropriate, consider using ExecuteBundle to\nload data.\n\nEvery resource in the input must contain a client-supplied ID, and will be\nstored using that ID regardless of the\nenable_update_create setting on the FHIR\nstore.\n\nThe import process does not enforce referential integrity, regardless of\nthe\ndisable_referential_integrity\nsetting on the FHIR store. This allows the import of resources with\narbitrary interdependencies without considering grouping or ordering, but\nif the input data contains invalid references or if some resources fail to\nbe imported, the FHIR store might be left in a state that violates\nreferential integrity.\n\nThe import process does not trigger Pub/Sub notification or BigQuery\nstreaming update, regardless of how those are configured on the FHIR store.\n\nIf a resource with the specified ID already exists, the most recent\nversion of the resource is overwritten without creating a new historical\nversion, regardless of the\ndisable_resource_versioning\nsetting on the FHIR store. If transient failures occur during the import,\nit is possible that successfully imported resources will be overwritten\nmore than once.\n\nThe import operation is idempotent unless the input data contains multiple\nvalid resources with the same ID but different contents. In that case,\nafter the import completes, the store will contain exactly one resource\nwith that ID but there is no ordering guarantee on which version of the\ncontents it will have. The operation result counters do not count\nduplicate IDs as an error and will count one success for each resource in\nthe input, which might result in a success count larger than the number\nof resources in the FHIR store. This often occurs when importing data\norganized in bundles produced by Patient-everything\nwhere each bundle contains its own copy of a resource such as Practitioner\nthat might be referred to by many patients.\n\nIf some resources fail to import, for example due to parsing errors,\nsuccessfully imported resources are not rolled back.\n\nThe location and format of the input data is specified by the parameters\nbelow. Note that if no format is specified, this method assumes the\n`BUNDLE` format. When using the `BUNDLE` format this method ignores the\n`Bundle.type` field, except that `history` bundles are rejected, and does\nnot apply any of the bundle processing semantics for batch or transaction\nbundles. Unlike in ExecuteBundle, transaction bundles are not executed\nas a single transaction and bundle-internal references are not rewritten.\nThe bundle is treated as a collection of resources to be written as\nprovided in `Bundle.entry.resource`, ignoring `Bundle.entry.request`. As\nan example, this allows the import of `searchset` bundles produced by a\nFHIR search or\nPatient-everything operation.\n\nThis method returns an Operation that can\nbe used to track the status of the import by calling\nGetOperation.\n\nImmediate fatal errors appear in the\nerror field, errors are also logged\nto Cloud Logging (see [Viewing\nlogs](/healthcare/docs/how-tos/logging)). Otherwise, when the\noperation finishes, a detailed response of type ImportResourcesResponse\nis returned in the response field.\nThe metadata field type for this\noperation is OperationMetadata.", + // "description": "Imports resources to the FHIR store by loading data from the specified sources. This method is optimized to load large quantities of data using import semantics that ignore some FHIR store configuration options and are not suitable for all use cases. It is primarily intended to load data into an empty FHIR store that is not being used by other clients. In cases where this method is not appropriate, consider using ExecuteBundle to load data. Every resource in the input must contain a client-supplied ID. Each resource is stored using the supplied ID regardless of the enable_update_create setting on the FHIR store. The import process does not enforce referential integrity, regardless of the disable_referential_integrity setting on the FHIR store. This allows the import of resources with arbitrary interdependencies without considering grouping or ordering, but if the input data contains invalid references or if some resources fail to be imported, the FHIR store might be left in a state that violates referential integrity. The import process does not trigger Pub/Sub notification or BigQuery streaming update, regardless of how those are configured on the FHIR store. If a resource with the specified ID already exists, the most recent version of the resource is overwritten without creating a new historical version, regardless of the disable_resource_versioning setting on the FHIR store. If transient failures occur during the import, it's possible that successfully imported resources will be overwritten more than once. The import operation is idempotent unless the input data contains multiple valid resources with the same ID but different contents. In that case, after the import completes, the store contains exactly one resource with that ID but there is no ordering guarantee on which version of the contents it will have. The operation result counters do not count duplicate IDs as an error and count one success for each resource in the input, which might result in a success count larger than the number of resources in the FHIR store. This often occurs when importing data organized in bundles produced by Patient-everything where each bundle contains its own copy of a resource such as Practitioner that might be referred to by many patients. If some resources fail to import, for example due to parsing errors, successfully imported resources are not rolled back. The location and format of the input data is specified by the parameters in ImportResourcesRequest. Note that if no format is specified, this method assumes the `BUNDLE` format. When using the `BUNDLE` format this method ignores the `Bundle.type` field, except that `history` bundles are rejected, and does not apply any of the bundle processing semantics for batch or transaction bundles. Unlike in ExecuteBundle, transaction bundles are not executed as a single transaction and bundle-internal references are not rewritten. The bundle is treated as a collection of resources to be written as provided in `Bundle.entry.resource`, ignoring `Bundle.entry.request`. As an example, this allows the import of `searchset` bundles produced by a FHIR search or Patient-everything operation. This method returns an Operation that can be used to track the status of the import by calling GetOperation. Immediate fatal errors appear in the error field, errors are also logged to Cloud Logging (see [Viewing logs](/healthcare/docs/how-tos/logging)). Otherwise, when the operation finishes, a detailed response of type ImportResourcesResponse is returned in the response field. The metadata field type for this operation is OperationMetadata.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}:import", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.fhirStores.import", @@ -10636,7 +10367,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresImportCall) Do(opts ...googleapi.Cal // ], // "parameters": { // "name": { - // "description": "The name of the FHIR store to import FHIR resources to. The name should be\nin the format of\n`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/fhirStores/{fhir_store_id}`.", + // "description": "The name of the FHIR store to import FHIR resources to, in the format of `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/fhirStores/{fhir_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/fhirStores/[^/]+$", // "required": true, @@ -10676,11 +10407,8 @@ func (r *ProjectsLocationsDatasetsFhirStoresService) List(parent string) *Projec } // Filter sets the optional parameter "filter": Restricts stores -// returned to those matching a filter. -// Syntax: -// https://cloud.google.com/appengine/docs/standard/python/search -// /query_strings -// Only filtering on labels is supported, for example +// returned to those matching a filter. Syntax: +// https://cloud.google.com/appengine/docs/standard/python/search/query_strings Only filtering on labels is supported, for example // `labels.key=value`. func (c *ProjectsLocationsDatasetsFhirStoresListCall) Filter(filter string) *ProjectsLocationsDatasetsFhirStoresListCall { c.urlParams_.Set("filter", filter) @@ -10688,8 +10416,8 @@ func (c *ProjectsLocationsDatasetsFhirStoresListCall) Filter(filter string) *Pro } // PageSize sets the optional parameter "pageSize": Limit on the number -// of FHIR stores to return in a single response. If zero -// the default page size of 100 is used. +// of FHIR stores to return in a single response. If zero the default +// page size of 100 is used. func (c *ProjectsLocationsDatasetsFhirStoresListCall) PageSize(pageSize int64) *ProjectsLocationsDatasetsFhirStoresListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c @@ -10740,7 +10468,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresListCall) Header() http.Header { func (c *ProjectsLocationsDatasetsFhirStoresListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10811,12 +10539,12 @@ func (c *ProjectsLocationsDatasetsFhirStoresListCall) Do(opts ...googleapi.CallO // ], // "parameters": { // "filter": { - // "description": "Restricts stores returned to those matching a filter. Syntax:\nhttps://cloud.google.com/appengine/docs/standard/python/search/query_strings\nOnly filtering on labels is supported, for example `labels.key=value`.", + // "description": "Restricts stores returned to those matching a filter. Syntax: https://cloud.google.com/appengine/docs/standard/python/search/query_strings Only filtering on labels is supported, for example `labels.key=value`.", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "Limit on the number of FHIR stores to return in a single response. If zero\nthe default page size of 100 is used.", + // "description": "Limit on the number of FHIR stores to return in a single response. If zero the default page size of 100 is used.", // "format": "int32", // "location": "query", // "type": "integer" @@ -10886,11 +10614,8 @@ func (r *ProjectsLocationsDatasetsFhirStoresService) Patch(name string, fhirstor } // UpdateMask sets the optional parameter "updateMask": The update mask -// applies to the resource. For the `FieldMask` -// definition, -// see -// https://developers.google.com/protocol-buffers/docs/re -// ference/google.protobuf#fieldmask +// applies to the resource. For the `FieldMask` definition, see +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask func (c *ProjectsLocationsDatasetsFhirStoresPatchCall) UpdateMask(updateMask string) *ProjectsLocationsDatasetsFhirStoresPatchCall { c.urlParams_.Set("updateMask", updateMask) return c @@ -10923,7 +10648,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresPatchCall) Header() http.Header { func (c *ProjectsLocationsDatasetsFhirStoresPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10996,14 +10721,14 @@ func (c *ProjectsLocationsDatasetsFhirStoresPatchCall) Do(opts ...googleapi.Call // ], // "parameters": { // "name": { - // "description": "Output only. Resource name of the FHIR store, of the form\n`projects/{project_id}/datasets/{dataset_id}/fhirStores/{fhir_store_id}`.", + // "description": "Output only. Resource name of the FHIR store, of the form `projects/{project_id}/datasets/{dataset_id}/fhirStores/{fhir_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/fhirStores/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { - // "description": "The update mask applies to the resource. For the `FieldMask` definition,\nsee\nhttps://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask", + // "description": "The update mask applies to the resource. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -11035,11 +10760,8 @@ type ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any -// existing policy. -// -// Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` -// errors. +// resource. Replaces any existing policy. Can return `NOT_FOUND`, +// `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. func (r *ProjectsLocationsDatasetsFhirStoresService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall { c := &ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -11074,7 +10796,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall) Header() http.Head func (c *ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11138,7 +10860,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall) Do(opts ...googlea } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}:setIamPolicy", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.fhirStores.setIamPolicy", @@ -11147,7 +10869,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresSetIamPolicyCall) Do(opts ...googlea // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/fhirStores/[^/]+$", // "required": true, @@ -11180,16 +10902,11 @@ type ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a `NOT_FOUND` error. -// -// Note: This operation is designed to be used for building -// permission-aware -// UIs and command-line tools, not for authorization checking. This -// operation -// may "fail open" without warning. +// specified resource. If the resource does not exist, this will return +// an empty set of permissions, not a `NOT_FOUND` error. Note: This +// operation is designed to be used for building permission-aware UIs +// and command-line tools, not for authorization checking. This +// operation may "fail open" without warning. func (r *ProjectsLocationsDatasetsFhirStoresService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall { c := &ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -11224,7 +10941,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall) Header() htt func (c *ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11288,7 +11005,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall) Do(opts ...g } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + // "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}:testIamPermissions", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.fhirStores.testIamPermissions", @@ -11297,7 +11014,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresTestIamPermissionsCall) Do(opts ...g // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/fhirStores/[^/]+$", // "required": true, @@ -11330,47 +11047,33 @@ type ProjectsLocationsDatasetsFhirStoresFhirPatientEverythingCall struct { } // PatientEverything: Retrieves a Patient resource and resources related -// to that patient. -// -// Implements the FHIR extended operation +// to that patient. Implements the FHIR extended operation // Patient-everything -// ([DSTU2](http://hl7.org/implement/standards/fhir/DS -// TU2/patient-operations.html#everything), -// [STU3](http://hl7.org/impleme -// nt/standards/fhir/STU3/patient-operations.html#everything), -// [R4](http: -// //hl7.org/implement/standards/fhir/R4/patient-operations.html#everythi -// ng)). -// -// On success, the response body will contain a JSON-encoded -// representation -// of a `Bundle` resource of type `searchset`, containing the results of -// the -// operation. -// Errors generated by the FHIR store will contain a -// JSON-encoded +// ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/patient-operati +// ons.html#everything), +// [STU3](http://hl7.org/implement/standards/fhir/STU3/patient-operations +// .html#everything), +// [R4](http://hl7.org/implement/standards/fhir/R4/patient-operations.htm +// l#everything)). On success, the response body will contain a +// JSON-encoded representation of a `Bundle` resource of type +// `searchset`, containing the results of the operation. Errors +// generated by the FHIR store will contain a JSON-encoded // `OperationOutcome` resource describing the reason for the error. If -// the -// request cannot be mapped to a valid API method on a FHIR store, a -// generic -// GCP error might be returned instead. -// -// The resources in scope for the response are: -// -// * The patient resource itself. -// * All the resources directly referenced by the patient resource. -// * Resources directly referencing the patient resource that meet the -// inclusion criteria. The inclusion criteria are based on the -// membership -// rules in the patient compartment definition -// ([DSTU2](http://hl7.org/fhir/DSTU2/compartment-patient.html), -// +// the request cannot be mapped to a valid API method on a FHIR store, a +// generic GCP error might be returned instead. The resources in scope +// for the response are: * The patient resource itself. * All the +// resources directly referenced by the patient resource. * Resources +// directly referencing the patient resource that meet the inclusion +// criteria. The inclusion criteria are based on the membership rules in +// the patient compartment definition +// ([DSTU2](http://hl7.org/fhir/DSTU2/compartment-patient.html), // [STU3](http://www.hl7.org/fhir/stu3/compartmentdefinition-patient.html -// ), -// [R4](http://hl7.org/fhir/R4/compartmentdefinition-patient.html)), -// which -// details the eligible resource types and referencing search -// parameters. +// ), [R4](http://hl7.org/fhir/R4/compartmentdefinition-patient.html)), +// which details the eligible resource types and referencing search +// parameters. For samples that show how to call `Patient-everything`, +// see [Getting all patient compartment +// resources](/healthcare/docs/how-tos/fhir-resources#getting_all_patient +// _compartment_resources). func (r *ProjectsLocationsDatasetsFhirStoresFhirService) PatientEverything(name string) *ProjectsLocationsDatasetsFhirStoresFhirPatientEverythingCall { c := &ProjectsLocationsDatasetsFhirStoresFhirPatientEverythingCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -11385,32 +11088,45 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirPatientEverythingCall) Count(Cou } // PageToken sets the optional parameter "_page_token": Used to retrieve -// the next or previous page of results -// when using pagination. Value should be set to the value of page_token -// set -// in next or previous page links' urls. Next and previous page are -// returned -// in the response bundle's links field, where `link.relation` is -// "previous" -// or "next". -// -// Omit `page_token` if no previous request has been made. +// the next or previous page of results when using pagination. Set +// `_page_token` to the value of _page_token set in next or previous +// page links' url. Next and previous page are returned in the response +// bundle's links field, where `link.relation` is "previous" or "next". +// Omit `_page_token` if no previous request has been made. func (c *ProjectsLocationsDatasetsFhirStoresFhirPatientEverythingCall) PageToken(PageToken string) *ProjectsLocationsDatasetsFhirStoresFhirPatientEverythingCall { c.urlParams_.Set("_page_token", PageToken) return c } +// Since sets the optional parameter "_since": If provided, only +// resources updated after this time are returned. The time uses the +// format YYYY-MM-DDThh:mm:ss.sss+zz:zz. For example, +// `2015-02-07T13:28:17.239+02:00` or `2017-01-01T00:00:00Z`. The time +// must be specified to the second and include a time zone. +func (c *ProjectsLocationsDatasetsFhirStoresFhirPatientEverythingCall) Since(Since string) *ProjectsLocationsDatasetsFhirStoresFhirPatientEverythingCall { + c.urlParams_.Set("_since", Since) + return c +} + +// Type sets the optional parameter "_type": String of comma-delimited +// FHIR resource types. If provided, only resources of the specified +// resource type(s) are returned. +func (c *ProjectsLocationsDatasetsFhirStoresFhirPatientEverythingCall) Type(Type string) *ProjectsLocationsDatasetsFhirStoresFhirPatientEverythingCall { + c.urlParams_.Set("_type", Type) + return c +} + // End sets the optional parameter "end": The response includes records -// prior to the end date. If no end date is -// provided, all records subsequent to the start date are in scope. +// prior to the end date. If no end date is provided, all records +// subsequent to the start date are in scope. func (c *ProjectsLocationsDatasetsFhirStoresFhirPatientEverythingCall) End(end string) *ProjectsLocationsDatasetsFhirStoresFhirPatientEverythingCall { c.urlParams_.Set("end", end) return c } // Start sets the optional parameter "start": The response includes -// records subsequent to the start date. If no start -// date is provided, all records prior to the end date are in scope. +// records subsequent to the start date. If no start date is provided, +// all records prior to the end date are in scope. func (c *ProjectsLocationsDatasetsFhirStoresFhirPatientEverythingCall) Start(start string) *ProjectsLocationsDatasetsFhirStoresFhirPatientEverythingCall { c.urlParams_.Set("start", start) return c @@ -11453,7 +11169,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirPatientEverythingCall) Header() func (c *ProjectsLocationsDatasetsFhirStoresFhirPatientEverythingCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11482,7 +11198,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirPatientEverythingCall) Do(opts . gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "Retrieves a Patient resource and resources related to that patient.\n\nImplements the FHIR extended operation Patient-everything\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/patient-operations.html#everything),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/patient-operations.html#everything),\n[R4](http://hl7.org/implement/standards/fhir/R4/patient-operations.html#everything)).\n\nOn success, the response body will contain a JSON-encoded representation\nof a `Bundle` resource of type `searchset`, containing the results of the\noperation.\nErrors generated by the FHIR store will contain a JSON-encoded\n`OperationOutcome` resource describing the reason for the error. If the\nrequest cannot be mapped to a valid API method on a FHIR store, a generic\nGCP error might be returned instead.\n\nThe resources in scope for the response are:\n\n* The patient resource itself.\n* All the resources directly referenced by the patient resource.\n* Resources directly referencing the patient resource that meet the\n inclusion criteria. The inclusion criteria are based on the membership\n rules in the patient compartment definition\n ([DSTU2](http://hl7.org/fhir/DSTU2/compartment-patient.html),\n [STU3](http://www.hl7.org/fhir/stu3/compartmentdefinition-patient.html),\n [R4](http://hl7.org/fhir/R4/compartmentdefinition-patient.html)), which\n details the eligible resource types and referencing search parameters.", + // "description": "Retrieves a Patient resource and resources related to that patient. Implements the FHIR extended operation Patient-everything ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/patient-operations.html#everything), [STU3](http://hl7.org/implement/standards/fhir/STU3/patient-operations.html#everything), [R4](http://hl7.org/implement/standards/fhir/R4/patient-operations.html#everything)). On success, the response body will contain a JSON-encoded representation of a `Bundle` resource of type `searchset`, containing the results of the operation. Errors generated by the FHIR store will contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. The resources in scope for the response are: * The patient resource itself. * All the resources directly referenced by the patient resource. * Resources directly referencing the patient resource that meet the inclusion criteria. The inclusion criteria are based on the membership rules in the patient compartment definition ([DSTU2](http://hl7.org/fhir/DSTU2/compartment-patient.html), [STU3](http://www.hl7.org/fhir/stu3/compartmentdefinition-patient.html), [R4](http://hl7.org/fhir/R4/compartmentdefinition-patient.html)), which details the eligible resource types and referencing search parameters. For samples that show how to call `Patient-everything`, see [Getting all patient compartment resources](/healthcare/docs/how-tos/fhir-resources#getting_all_patient_compartment_resources).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/Patient/{PatientId}/$everything", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.fhirStores.fhir.Patient-everything", @@ -11497,12 +11213,22 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirPatientEverythingCall) Do(opts . // "type": "integer" // }, // "_page_token": { - // "description": "Used to retrieve the next or previous page of results\nwhen using pagination. Value should be set to the value of page_token set\nin next or previous page links' urls. Next and previous page are returned\nin the response bundle's links field, where `link.relation` is \"previous\"\nor \"next\".\n\nOmit `page_token` if no previous request has been made.", + // "description": "Used to retrieve the next or previous page of results when using pagination. Set `_page_token` to the value of _page_token set in next or previous page links' url. Next and previous page are returned in the response bundle's links field, where `link.relation` is \"previous\" or \"next\". Omit `_page_token` if no previous request has been made.", + // "location": "query", + // "type": "string" + // }, + // "_since": { + // "description": "If provided, only resources updated after this time are returned. The time uses the format YYYY-MM-DDThh:mm:ss.sss+zz:zz. For example, `2015-02-07T13:28:17.239+02:00` or `2017-01-01T00:00:00Z`. The time must be specified to the second and include a time zone.", + // "location": "query", + // "type": "string" + // }, + // "_type": { + // "description": "String of comma-delimited FHIR resource types. If provided, only resources of the specified resource type(s) are returned.", // "location": "query", // "type": "string" // }, // "end": { - // "description": "The response includes records prior to the end date. If no end date is\nprovided, all records subsequent to the start date are in scope.", + // "description": "The response includes records prior to the end date. If no end date is provided, all records subsequent to the start date are in scope.", // "location": "query", // "type": "string" // }, @@ -11514,7 +11240,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirPatientEverythingCall) Do(opts . // "type": "string" // }, // "start": { - // "description": "The response includes records subsequent to the start date. If no start\ndate is provided, all records prior to the end date are in scope.", + // "description": "The response includes records subsequent to the start date. If no start date is provided, all records prior to the end date are in scope.", // "location": "query", // "type": "string" // } @@ -11541,12 +11267,13 @@ type ProjectsLocationsDatasetsFhirStoresFhirResourcePurgeCall struct { } // ResourcePurge: Deletes all the historical versions of a resource -// (excluding the current -// version) from the FHIR store. To remove all versions of a resource, -// first -// delete the current version and then call this method. -// -// This is not a FHIR standard operation. +// (excluding the current version) from the FHIR store. To remove all +// versions of a resource, first delete the current version and then +// call this method. This is not a FHIR standard operation. For samples +// that show how to call `Resource-purge`, see [Deleting historical +// versions of a FHIR +// resource](/healthcare/docs/how-tos/fhir-resources#deleting_historical_ +// versions_of_a_fhir_resource). func (r *ProjectsLocationsDatasetsFhirStoresFhirService) ResourcePurge(name string) *ProjectsLocationsDatasetsFhirStoresFhirResourcePurgeCall { c := &ProjectsLocationsDatasetsFhirStoresFhirResourcePurgeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -11580,7 +11307,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirResourcePurgeCall) Header() http func (c *ProjectsLocationsDatasetsFhirStoresFhirResourcePurgeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11639,7 +11366,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirResourcePurgeCall) Do(opts ...go } return ret, nil // { - // "description": "Deletes all the historical versions of a resource (excluding the current\nversion) from the FHIR store. To remove all versions of a resource, first\ndelete the current version and then call this method.\n\nThis is not a FHIR standard operation.", + // "description": "Deletes all the historical versions of a resource (excluding the current version) from the FHIR store. To remove all versions of a resource, first delete the current version and then call this method. This is not a FHIR standard operation. For samples that show how to call `Resource-purge`, see [Deleting historical versions of a FHIR resource](/healthcare/docs/how-tos/fhir-resources#deleting_historical_versions_of_a_fhir_resource).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/{fhirId}/{fhirId1}/$purge", // "httpMethod": "DELETE", // "id": "healthcare.projects.locations.datasets.fhirStores.fhir.Resource-purge", @@ -11677,35 +11404,23 @@ type ProjectsLocationsDatasetsFhirStoresFhirCapabilitiesCall struct { header_ http.Header } -// Capabilities: Gets the FHIR capability -// statement -// ([STU3](http://hl7.org/implement/standards/fhir/STU3/capabil -// itystatement.html), -// [R4](http://hl7.org/implement/standards/fhir/R4/ca -// pabilitystatement.html)), -// or the -// [conformance -// statement](http://hl7.org/implement/standards/fhir/DSTU2/ -// conformance.html) -// in the DSTU2 case for the store, which contains a description -// of -// functionality supported by the server. -// -// Implements the FHIR standard capabilities -// interaction -// ([STU3](http://hl7.org/implement/standards/fhir/STU3/http. -// html#capabilities), -// [R4](http://hl7.org/implement/standards/fhir/R4/ht -// tp.html#capabilities)), -// or the -// [conformance -// interaction](http://hl7.org/implement/standards/fhir/DSTU -// 2/http.html#conformance) -// in the DSTU2 case. -// -// On success, the response body will contain a JSON-encoded -// representation -// of a `CapabilityStatement` resource. +// Capabilities: Gets the FHIR capability statement +// ([STU3](http://hl7.org/implement/standards/fhir/STU3/capabilitystateme +// nt.html), +// [R4](http://hl7.org/implement/standards/fhir/R4/capabilitystatement.ht +// ml)), or the [conformance +// statement](http://hl7.org/implement/standards/fhir/DSTU2/conformance.h +// tml) in the DSTU2 case for the store, which contains a description of +// functionality supported by the server. Implements the FHIR standard +// capabilities interaction +// ([STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#capabil +// ities), +// [R4](http://hl7.org/implement/standards/fhir/R4/http.html#capabilities +// )), or the [conformance +// interaction](http://hl7.org/implement/standards/fhir/DSTU2/http.html#c +// onformance) in the DSTU2 case. On success, the response body will +// contain a JSON-encoded representation of a `CapabilityStatement` +// resource. func (r *ProjectsLocationsDatasetsFhirStoresFhirService) Capabilities(name string) *ProjectsLocationsDatasetsFhirStoresFhirCapabilitiesCall { c := &ProjectsLocationsDatasetsFhirStoresFhirCapabilitiesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -11749,7 +11464,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirCapabilitiesCall) Header() http. func (c *ProjectsLocationsDatasetsFhirStoresFhirCapabilitiesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11778,7 +11493,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirCapabilitiesCall) Do(opts ...goo gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "Gets the FHIR capability statement\n([STU3](http://hl7.org/implement/standards/fhir/STU3/capabilitystatement.html),\n[R4](http://hl7.org/implement/standards/fhir/R4/capabilitystatement.html)),\nor the [conformance\nstatement](http://hl7.org/implement/standards/fhir/DSTU2/conformance.html)\nin the DSTU2 case for the store, which contains a description of\nfunctionality supported by the server.\n\nImplements the FHIR standard capabilities interaction\n([STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#capabilities),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#capabilities)),\nor the [conformance\ninteraction](http://hl7.org/implement/standards/fhir/DSTU2/http.html#conformance)\nin the DSTU2 case.\n\nOn success, the response body will contain a JSON-encoded representation\nof a `CapabilityStatement` resource.", + // "description": "Gets the FHIR capability statement ([STU3](http://hl7.org/implement/standards/fhir/STU3/capabilitystatement.html), [R4](http://hl7.org/implement/standards/fhir/R4/capabilitystatement.html)), or the [conformance statement](http://hl7.org/implement/standards/fhir/DSTU2/conformance.html) in the DSTU2 case for the store, which contains a description of functionality supported by the server. Implements the FHIR standard capabilities interaction ([STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#capabilities), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#capabilities)), or the [conformance interaction](http://hl7.org/implement/standards/fhir/DSTU2/http.html#conformance) in the DSTU2 case. On success, the response body will contain a JSON-encoded representation of a `CapabilityStatement` resource.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/metadata", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.fhirStores.fhir.capabilities", @@ -11817,34 +11532,25 @@ type ProjectsLocationsDatasetsFhirStoresFhirCreateCall struct { header_ http.Header } -// Create: Creates a FHIR resource. -// -// Implements the FHIR standard create +// Create: Creates a FHIR resource. Implements the FHIR standard create // interaction -// ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/htt -// p.html#create), -// [STU3](http://hl7.org/implement/standards/fhir/STU3/ht -// tp.html#create), -// [R4](http://hl7.org/implement/standards/fhir/R4/http. -// html#create)), -// which creates a new resource with a server-assigned resource ID. -// -// The request body must contain a JSON-encoded FHIR resource, and the -// request -// headers must contain `Content-Type: application/fhir+json`. -// +// ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#creat +// e), +// [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#create), +// [R4](http://hl7.org/implement/standards/fhir/R4/http.html#create)), +// which creates a new resource with a server-assigned resource ID. The +// request body must contain a JSON-encoded FHIR resource, and the +// request headers must contain `Content-Type: application/fhir+json`. // On success, the response body will contain a JSON-encoded -// representation -// of the resource as it was created on the server, including -// the -// server-assigned resource ID and version ID. -// Errors generated by the FHIR store will contain a -// JSON-encoded +// representation of the resource as it was created on the server, +// including the server-assigned resource ID and version ID. Errors +// generated by the FHIR store will contain a JSON-encoded // `OperationOutcome` resource describing the reason for the error. If -// the -// request cannot be mapped to a valid API method on a FHIR store, a -// generic -// GCP error might be returned instead. +// the request cannot be mapped to a valid API method on a FHIR store, a +// generic GCP error might be returned instead. For samples that show +// how to call `create`, see [Creating a FHIR +// resource](/healthcare/docs/how-tos/fhir-resources#creating_a_fhir_reso +// urce). func (r *ProjectsLocationsDatasetsFhirStoresFhirService) Create(parent string, type_ string, body_ io.Reader) *ProjectsLocationsDatasetsFhirStoresFhirCreateCall { c := &ProjectsLocationsDatasetsFhirStoresFhirCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -11880,7 +11586,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirCreateCall) Header() http.Header func (c *ProjectsLocationsDatasetsFhirStoresFhirCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11906,7 +11612,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirCreateCall) Do(opts ...googleapi gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "Creates a FHIR resource.\n\nImplements the FHIR standard create interaction\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#create),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#create),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#create)),\nwhich creates a new resource with a server-assigned resource ID.\n\nThe request body must contain a JSON-encoded FHIR resource, and the request\nheaders must contain `Content-Type: application/fhir+json`.\n\nOn success, the response body will contain a JSON-encoded representation\nof the resource as it was created on the server, including the\nserver-assigned resource ID and version ID.\nErrors generated by the FHIR store will contain a JSON-encoded\n`OperationOutcome` resource describing the reason for the error. If the\nrequest cannot be mapped to a valid API method on a FHIR store, a generic\nGCP error might be returned instead.", + // "description": "Creates a FHIR resource. Implements the FHIR standard create interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#create), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#create), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#create)), which creates a new resource with a server-assigned resource ID. The request body must contain a JSON-encoded FHIR resource, and the request headers must contain `Content-Type: application/fhir+json`. On success, the response body will contain a JSON-encoded representation of the resource as it was created on the server, including the server-assigned resource ID and version ID. Errors generated by the FHIR store will contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. For samples that show how to call `create`, see [Creating a FHIR resource](/healthcare/docs/how-tos/fhir-resources#creating_a_fhir_resource).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/{fhirId}", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.fhirStores.fhir.create", @@ -11923,7 +11629,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirCreateCall) Do(opts ...googleapi // "type": "string" // }, // "type": { - // "description": "The FHIR resource type to create, such as Patient or Observation. For a\ncomplete list, see the FHIR Resource Index\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/resourcelist.html),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/resourcelist.html),\n[R4](http://hl7.org/implement/standards/fhir/R4/resourcelist.html)).\nMust match the resource type in the provided content.", + // "description": "The FHIR resource type to create, such as Patient or Observation. For a complete list, see the FHIR Resource Index ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/resourcelist.html), [STU3](http://hl7.org/implement/standards/fhir/STU3/resourcelist.html), [R4](http://hl7.org/implement/standards/fhir/R4/resourcelist.html)). Must match the resource type in the provided content.", // "location": "path", // "pattern": "^[^/]+$", // "required": true, @@ -11954,25 +11660,20 @@ type ProjectsLocationsDatasetsFhirStoresFhirDeleteCall struct { header_ http.Header } -// Delete: Deletes a FHIR resource. -// -// Implements the FHIR standard delete +// Delete: Deletes a FHIR resource. Implements the FHIR standard delete // interaction -// ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/htt -// p.html#delete), -// [STU3](http://hl7.org/implement/standards/fhir/STU3/ht -// tp.html#delete), -// [R4](http://hl7.org/implement/standards/fhir/R4/http. -// html#delete)). -// -// Note: Unless resource versioning is disabled by setting -// the -// disable_resource_versioning flag -// on the FHIR store, the deleted resources will be moved to a -// history -// repository that can still be retrieved through vread -// and related methods, unless they are removed by the -// purge method. +// ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#delet +// e), +// [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#delete), +// [R4](http://hl7.org/implement/standards/fhir/R4/http.html#delete)). +// Note: Unless resource versioning is disabled by setting the +// disable_resource_versioning flag on the FHIR store, the deleted +// resources will be moved to a history repository that can still be +// retrieved through vread and related methods, unless they are removed +// by the purge method. For samples that show how to call `delete`, see +// [Deleting a FHIR +// resource](/healthcare/docs/how-tos/fhir-resources#deleting_a_fhir_reso +// urce). func (r *ProjectsLocationsDatasetsFhirStoresFhirService) Delete(name string) *ProjectsLocationsDatasetsFhirStoresFhirDeleteCall { c := &ProjectsLocationsDatasetsFhirStoresFhirDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -12006,7 +11707,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirDeleteCall) Header() http.Header func (c *ProjectsLocationsDatasetsFhirStoresFhirDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12032,7 +11733,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirDeleteCall) Do(opts ...googleapi gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "Deletes a FHIR resource.\n\nImplements the FHIR standard delete interaction\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#delete),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#delete),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#delete)).\n\nNote: Unless resource versioning is disabled by setting the\ndisable_resource_versioning flag\non the FHIR store, the deleted resources will be moved to a history\nrepository that can still be retrieved through vread\nand related methods, unless they are removed by the\npurge method.", + // "description": "Deletes a FHIR resource. Implements the FHIR standard delete interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#delete), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#delete), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#delete)). Note: Unless resource versioning is disabled by setting the disable_resource_versioning flag on the FHIR store, the deleted resources will be moved to a history repository that can still be retrieved through vread and related methods, unless they are removed by the purge method. For samples that show how to call `delete`, see [Deleting a FHIR resource](/healthcare/docs/how-tos/fhir-resources#deleting_a_fhir_resource).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/{fhirId}/{fhirId1}", // "httpMethod": "DELETE", // "id": "healthcare.projects.locations.datasets.fhirStores.fhir.delete", @@ -12070,59 +11771,40 @@ type ProjectsLocationsDatasetsFhirStoresFhirExecuteBundleCall struct { header_ http.Header } -// ExecuteBundle: Executes all the requests in the given -// Bundle. -// -// Implements the FHIR standard batch/transaction -// interaction -// ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/htt -// p.html#transaction), -// [STU3](http://hl7.org/implement/standards/fhir/ST -// U3/http.html#transaction), -// [R4](http://hl7.org/implement/standards/fhi -// r/R4/http.html#transaction)). -// -// Supports all interactions within a bundle, except search. This -// method -// accepts Bundles of type `batch` and `transaction`, processing -// them -// according to the batch processing -// rules -// ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html -// #2.1.0.16.1), -// [STU3](http://hl7.org/implement/standards/fhir/STU3/http -// .html#2.21.0.17.1), -// [R4](http://hl7.org/implement/standards/fhir/R4/ht -// tp.html#brules)) -// and transaction processing -// rules -// ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html -// #2.1.0.16.2), -// [STU3](http://hl7.org/implement/standards/fhir/STU3/http -// .html#2.21.0.17.2), -// [R4](http://hl7.org/implement/standards/fhir/R4/ht -// tp.html#trules)). -// +// ExecuteBundle: Executes all the requests in the given Bundle. +// Implements the FHIR standard batch/transaction interaction +// ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#trans +// action), +// [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#transact +// ion), +// [R4](http://hl7.org/implement/standards/fhir/R4/http.html#transaction) +// ). Supports all interactions within a bundle, except search. This +// method accepts Bundles of type `batch` and `transaction`, processing +// them according to the batch processing rules +// ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#2.1.0 +// .16.1), +// [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#2.21.0.1 +// 7.1), +// [R4](http://hl7.org/implement/standards/fhir/R4/http.html#brules)) +// and transaction processing rules +// ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#2.1.0 +// .16.2), +// [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#2.21.0.1 +// 7.2), +// [R4](http://hl7.org/implement/standards/fhir/R4/http.html#trules)). // The request body must contain a JSON-encoded FHIR `Bundle` resource, -// and -// the request headers must contain `Content-Type: -// application/fhir+json`. -// -// For a batch bundle or a successful transaction the response body -// will -// contain a JSON-encoded representation of a `Bundle` resource of -// type -// `batch-response` or `transaction-response` containing one entry for -// each -// entry in the request, with the outcome of processing the entry. In -// the -// case of an error for a transaction bundle, the response body will -// contain -// a JSON-encoded `OperationOutcome` resource describing the reason for -// the -// error. If the request cannot be mapped to a valid API method on a -// FHIR -// store, a generic GCP error might be returned instead. +// and the request headers must contain `Content-Type: +// application/fhir+json`. For a batch bundle or a successful +// transaction the response body will contain a JSON-encoded +// representation of a `Bundle` resource of type `batch-response` or +// `transaction-response` containing one entry for each entry in the +// request, with the outcome of processing the entry. In the case of an +// error for a transaction bundle, the response body will contain a +// JSON-encoded `OperationOutcome` resource describing the reason for +// the error. If the request cannot be mapped to a valid API method on a +// FHIR store, a generic GCP error might be returned instead. For +// samples that show how to call `executeBundle`, see [Managing FHIR +// resources using FHIR bundles](/healthcare/docs/how-tos/fhir-bundles). func (r *ProjectsLocationsDatasetsFhirStoresFhirService) ExecuteBundle(parent string, body_ io.Reader) *ProjectsLocationsDatasetsFhirStoresFhirExecuteBundleCall { c := &ProjectsLocationsDatasetsFhirStoresFhirExecuteBundleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -12157,7 +11839,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirExecuteBundleCall) Header() http func (c *ProjectsLocationsDatasetsFhirStoresFhirExecuteBundleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12182,7 +11864,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirExecuteBundleCall) Do(opts ...go gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "Executes all the requests in the given Bundle.\n\nImplements the FHIR standard batch/transaction interaction\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#transaction),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#transaction),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#transaction)).\n\nSupports all interactions within a bundle, except search. This method\naccepts Bundles of type `batch` and `transaction`, processing them\naccording to the batch processing rules\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#2.1.0.16.1),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#2.21.0.17.1),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#brules))\nand transaction processing rules\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#2.1.0.16.2),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#2.21.0.17.2),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#trules)).\n\nThe request body must contain a JSON-encoded FHIR `Bundle` resource, and\nthe request headers must contain `Content-Type: application/fhir+json`.\n\nFor a batch bundle or a successful transaction the response body will\ncontain a JSON-encoded representation of a `Bundle` resource of type\n`batch-response` or `transaction-response` containing one entry for each\nentry in the request, with the outcome of processing the entry. In the\ncase of an error for a transaction bundle, the response body will contain\na JSON-encoded `OperationOutcome` resource describing the reason for the\nerror. If the request cannot be mapped to a valid API method on a FHIR\nstore, a generic GCP error might be returned instead.", + // "description": "Executes all the requests in the given Bundle. Implements the FHIR standard batch/transaction interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#transaction), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#transaction), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#transaction)). Supports all interactions within a bundle, except search. This method accepts Bundles of type `batch` and `transaction`, processing them according to the batch processing rules ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#2.1.0.16.1), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#2.21.0.17.1), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#brules)) and transaction processing rules ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#2.1.0.16.2), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#2.21.0.17.2), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#trules)). The request body must contain a JSON-encoded FHIR `Bundle` resource, and the request headers must contain `Content-Type: application/fhir+json`. For a batch bundle or a successful transaction the response body will contain a JSON-encoded representation of a `Bundle` resource of type `batch-response` or `transaction-response` containing one entry for each entry in the request, with the outcome of processing the entry. In the case of an error for a transaction bundle, the response body will contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. For samples that show how to call `executeBundle`, see [Managing FHIR resources using FHIR bundles](/healthcare/docs/how-tos/fhir-bundles).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.fhirStores.fhir.executeBundle", @@ -12224,30 +11906,23 @@ type ProjectsLocationsDatasetsFhirStoresFhirHistoryCall struct { } // History: Lists all the versions of a resource (including the current -// version and -// deleted versions) from the FHIR store. -// -// Implements the per-resource form of the FHIR standard history -// interaction -// ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/htt -// p.html#history), -// [STU3](http://hl7.org/implement/standards/fhir/STU3/h -// ttp.html#history), -// [R4](http://hl7.org/implement/standards/fhir/R4/htt -// p.html#history)). -// +// version and deleted versions) from the FHIR store. Implements the +// per-resource form of the FHIR standard history interaction +// ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#histo +// ry), +// [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#history) +// , +// [R4](http://hl7.org/implement/standards/fhir/R4/http.html#history)). // On success, the response body will contain a JSON-encoded -// representation -// of a `Bundle` resource of type `history`, containing the version -// history -// sorted from most recent to oldest versions. -// Errors generated by the FHIR store will contain a -// JSON-encoded +// representation of a `Bundle` resource of type `history`, containing +// the version history sorted from most recent to oldest versions. +// Errors generated by the FHIR store will contain a JSON-encoded // `OperationOutcome` resource describing the reason for the error. If -// the -// request cannot be mapped to a valid API method on a FHIR store, a -// generic -// GCP error might be returned instead. +// the request cannot be mapped to a valid API method on a FHIR store, a +// generic GCP error might be returned instead. For samples that show +// how to call `history`, see [Listing FHIR resource +// versions](/healthcare/docs/how-tos/fhir-resources#listing_fhir_resourc +// e_versions). func (r *ProjectsLocationsDatasetsFhirStoresFhirService) History(name string) *ProjectsLocationsDatasetsFhirStoresFhirHistoryCall { c := &ProjectsLocationsDatasetsFhirStoresFhirHistoryCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -12255,17 +11930,12 @@ func (r *ProjectsLocationsDatasetsFhirStoresFhirService) History(name string) *P } // At sets the optional parameter "_at": Only include resource versions -// that were current at some point during the -// time period specified in the date time value. The date parameter -// format is -// yyyy-mm-ddThh:mm:ss[Z|(+|-)hh:mm] -// -// Clients may specify any of the following: -// -// * An entire year: `_at=2019` -// * An entire month: `_at=2019-01` -// * A specific day: `_at=2019-01-20` -// * A specific second: `_at=2018-12-31T23:59:58Z` +// that were current at some point during the time period specified in +// the date time value. The date parameter format is +// yyyy-mm-ddThh:mm:ss[Z|(+|-)hh:mm] Clients may specify any of the +// following: * An entire year: `_at=2019` * An entire month: +// `_at=2019-01` * A specific day: `_at=2019-01-20` * A specific second: +// `_at=2018-12-31T23:59:58Z` func (c *ProjectsLocationsDatasetsFhirStoresFhirHistoryCall) At(At string) *ProjectsLocationsDatasetsFhirStoresFhirHistoryCall { c.urlParams_.Set("_at", At) return c @@ -12279,30 +11949,22 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirHistoryCall) Count(Count int64) } // PageToken sets the optional parameter "_page_token": Used to retrieve -// the first, previous, next, or last page of resource -// versions when using pagination. Value should be set to the value -// of -// `_page_token` set in next or previous page links' URLs. Next and -// previous -// page are returned in the response bundle's links field, -// where -// `link.relation` is "previous" or "next". -// -// Omit `_page_token` if no previous request has been made. +// the first, previous, next, or last page of resource versions when +// using pagination. Value should be set to the value of `_page_token` +// set in next or previous page links' URLs. Next and previous page are +// returned in the response bundle's links field, where `link.relation` +// is "previous" or "next". Omit `_page_token` if no previous request +// has been made. func (c *ProjectsLocationsDatasetsFhirStoresFhirHistoryCall) PageToken(PageToken string) *ProjectsLocationsDatasetsFhirStoresFhirHistoryCall { c.urlParams_.Set("_page_token", PageToken) return c } // Since sets the optional parameter "_since": Only include resource -// versions that were created at or after the given -// instant in time. The instant in time uses the -// format -// YYYY-MM-DDThh:mm:ss.sss+zz:zz (for example -// 2015-02-07T13:28:17.239+02:00 or -// 2017-01-01T00:00:00Z). The time must be specified to the second -// and -// include a time zone. +// versions that were created at or after the given instant in time. The +// instant in time uses the format YYYY-MM-DDThh:mm:ss.sss+zz:zz (for +// example 2015-02-07T13:28:17.239+02:00 or 2017-01-01T00:00:00Z). The +// time must be specified to the second and include a time zone. func (c *ProjectsLocationsDatasetsFhirStoresFhirHistoryCall) Since(Since string) *ProjectsLocationsDatasetsFhirStoresFhirHistoryCall { c.urlParams_.Set("_since", Since) return c @@ -12345,7 +12007,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirHistoryCall) Header() http.Heade func (c *ProjectsLocationsDatasetsFhirStoresFhirHistoryCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12374,7 +12036,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirHistoryCall) Do(opts ...googleap gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "Lists all the versions of a resource (including the current version and\ndeleted versions) from the FHIR store.\n\nImplements the per-resource form of the FHIR standard history interaction\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#history),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#history),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#history)).\n\nOn success, the response body will contain a JSON-encoded representation\nof a `Bundle` resource of type `history`, containing the version history\nsorted from most recent to oldest versions.\nErrors generated by the FHIR store will contain a JSON-encoded\n`OperationOutcome` resource describing the reason for the error. If the\nrequest cannot be mapped to a valid API method on a FHIR store, a generic\nGCP error might be returned instead.", + // "description": "Lists all the versions of a resource (including the current version and deleted versions) from the FHIR store. Implements the per-resource form of the FHIR standard history interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#history), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#history), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#history)). On success, the response body will contain a JSON-encoded representation of a `Bundle` resource of type `history`, containing the version history sorted from most recent to oldest versions. Errors generated by the FHIR store will contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. For samples that show how to call `history`, see [Listing FHIR resource versions](/healthcare/docs/how-tos/fhir-resources#listing_fhir_resource_versions).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/{fhirId}/{fhirId1}/_history", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.fhirStores.fhir.history", @@ -12383,7 +12045,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirHistoryCall) Do(opts ...googleap // ], // "parameters": { // "_at": { - // "description": "Only include resource versions that were current at some point during the\ntime period specified in the date time value. The date parameter format is\nyyyy-mm-ddThh:mm:ss[Z|(+|-)hh:mm]\n\nClients may specify any of the following:\n\n* An entire year: `_at=2019`\n* An entire month: `_at=2019-01`\n* A specific day: `_at=2019-01-20`\n* A specific second: `_at=2018-12-31T23:59:58Z`", + // "description": "Only include resource versions that were current at some point during the time period specified in the date time value. The date parameter format is yyyy-mm-ddThh:mm:ss[Z|(+|-)hh:mm] Clients may specify any of the following: * An entire year: `_at=2019` * An entire month: `_at=2019-01` * A specific day: `_at=2019-01-20` * A specific second: `_at=2018-12-31T23:59:58Z`", // "location": "query", // "type": "string" // }, @@ -12394,12 +12056,12 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirHistoryCall) Do(opts ...googleap // "type": "integer" // }, // "_page_token": { - // "description": "Used to retrieve the first, previous, next, or last page of resource\nversions when using pagination. Value should be set to the value of\n`_page_token` set in next or previous page links' URLs. Next and previous\npage are returned in the response bundle's links field, where\n`link.relation` is \"previous\" or \"next\".\n\nOmit `_page_token` if no previous request has been made.", + // "description": "Used to retrieve the first, previous, next, or last page of resource versions when using pagination. Value should be set to the value of `_page_token` set in next or previous page links' URLs. Next and previous page are returned in the response bundle's links field, where `link.relation` is \"previous\" or \"next\". Omit `_page_token` if no previous request has been made.", // "location": "query", // "type": "string" // }, // "_since": { - // "description": "Only include resource versions that were created at or after the given\ninstant in time. The instant in time uses the format\nYYYY-MM-DDThh:mm:ss.sss+zz:zz (for example 2015-02-07T13:28:17.239+02:00 or\n2017-01-01T00:00:00Z). The time must be specified to the second and\ninclude a time zone.", + // "description": "Only include resource versions that were created at or after the given instant in time. The instant in time uses the format YYYY-MM-DDThh:mm:ss.sss+zz:zz (for example 2015-02-07T13:28:17.239+02:00 or 2017-01-01T00:00:00Z). The time must be specified to the second and include a time zone.", // "location": "query", // "type": "string" // }, @@ -12434,35 +12096,23 @@ type ProjectsLocationsDatasetsFhirStoresFhirPatchCall struct { } // Patch: Updates part of an existing resource by applying the -// operations specified -// in a [JSON Patch](http://jsonpatch.com/) document. -// -// Implements the FHIR standard patch -// interaction -// ([STU3](http://hl7.org/implement/standards/fhir/STU3/http. -// html#patch), -// [R4](http://hl7.org/implement/standards/fhir/R4/http.html -// #patch)). -// +// operations specified in a [JSON Patch](http://jsonpatch.com/) +// document. Implements the FHIR standard patch interaction +// ([STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#patch), +// [R4](http://hl7.org/implement/standards/fhir/R4/http.html#patch)). // DSTU2 doesn't define a patch method, but the server supports it in -// the same -// way it supports STU3. -// -// The request body must contain a JSON Patch document, and the -// request -// headers must contain `Content-Type: application/json-patch+json`. -// -// On success, the response body will contain a JSON-encoded -// representation -// of the updated resource, including the server-assigned version -// ID. -// Errors generated by the FHIR store will contain a -// JSON-encoded -// `OperationOutcome` resource describing the reason for the error. If -// the -// request cannot be mapped to a valid API method on a FHIR store, a -// generic -// GCP error might be returned instead. +// the same way it supports STU3. The request body must contain a JSON +// Patch document, and the request headers must contain `Content-Type: +// application/json-patch+json`. On success, the response body will +// contain a JSON-encoded representation of the updated resource, +// including the server-assigned version ID. Errors generated by the +// FHIR store will contain a JSON-encoded `OperationOutcome` resource +// describing the reason for the error. If the request cannot be mapped +// to a valid API method on a FHIR store, a generic GCP error might be +// returned instead. For samples that show how to call `patch`, see +// [Patching a FHIR +// resource](/healthcare/docs/how-tos/fhir-resources#patching_a_fhir_reso +// urce). func (r *ProjectsLocationsDatasetsFhirStoresFhirService) Patch(name string, body_ io.Reader) *ProjectsLocationsDatasetsFhirStoresFhirPatchCall { c := &ProjectsLocationsDatasetsFhirStoresFhirPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -12497,7 +12147,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirPatchCall) Header() http.Header func (c *ProjectsLocationsDatasetsFhirStoresFhirPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12522,7 +12172,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirPatchCall) Do(opts ...googleapi. gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "Updates part of an existing resource by applying the operations specified\nin a [JSON Patch](http://jsonpatch.com/) document.\n\nImplements the FHIR standard patch interaction\n([STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#patch),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#patch)).\n\nDSTU2 doesn't define a patch method, but the server supports it in the same\nway it supports STU3.\n\nThe request body must contain a JSON Patch document, and the request\nheaders must contain `Content-Type: application/json-patch+json`.\n\nOn success, the response body will contain a JSON-encoded representation\nof the updated resource, including the server-assigned version ID.\nErrors generated by the FHIR store will contain a JSON-encoded\n`OperationOutcome` resource describing the reason for the error. If the\nrequest cannot be mapped to a valid API method on a FHIR store, a generic\nGCP error might be returned instead.", + // "description": "Updates part of an existing resource by applying the operations specified in a [JSON Patch](http://jsonpatch.com/) document. Implements the FHIR standard patch interaction ([STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#patch), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#patch)). DSTU2 doesn't define a patch method, but the server supports it in the same way it supports STU3. The request body must contain a JSON Patch document, and the request headers must contain `Content-Type: application/json-patch+json`. On success, the response body will contain a JSON-encoded representation of the updated resource, including the server-assigned version ID. Errors generated by the FHIR store will contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. For samples that show how to call `patch`, see [Patching a FHIR resource](/healthcare/docs/how-tos/fhir-resources#patching_a_fhir_resource).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/{fhirId}/{fhirId1}", // "httpMethod": "PATCH", // "id": "healthcare.projects.locations.datasets.fhirStores.fhir.patch", @@ -12563,39 +12213,27 @@ type ProjectsLocationsDatasetsFhirStoresFhirReadCall struct { header_ http.Header } -// Read: Gets the contents of a FHIR resource. -// -// Implements the FHIR standard read -// interaction -// ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/htt -// p.html#read), -// [STU3](http://hl7.org/implement/standards/fhir/STU3/http -// .html#read), -// [R4](http://hl7.org/implement/standards/fhir/R4/http.html -// #read)). -// -// Also supports the FHIR standard conditional read -// interaction -// ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/htt -// p.html#cread), -// [STU3](http://hl7.org/implement/standards/fhir/STU3/htt -// p.html#cread), -// [R4](http://hl7.org/implement/standards/fhir/R4/http.ht -// ml#cread)) +// Read: Gets the contents of a FHIR resource. Implements the FHIR +// standard read interaction +// ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#read) +// , +// [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#read), +// [R4](http://hl7.org/implement/standards/fhir/R4/http.html#read)). +// Also supports the FHIR standard conditional read interaction +// ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#cread +// ), +// [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#cread), +// [R4](http://hl7.org/implement/standards/fhir/R4/http.html#cread)) // specified by supplying an `If-Modified-Since` header with a date/time -// value -// or an `If-None-Match` header with an ETag value. -// -// On success, the response body will contain a JSON-encoded -// representation -// of the resource. -// Errors generated by the FHIR store will contain a -// JSON-encoded -// `OperationOutcome` resource describing the reason for the error. If -// the -// request cannot be mapped to a valid API method on a FHIR store, a -// generic -// GCP error might be returned instead. +// value or an `If-None-Match` header with an ETag value. On success, +// the response body will contain a JSON-encoded representation of the +// resource. Errors generated by the FHIR store will contain a +// JSON-encoded `OperationOutcome` resource describing the reason for +// the error. If the request cannot be mapped to a valid API method on a +// FHIR store, a generic GCP error might be returned instead. For +// samples that show how to call `read`, see [Getting a FHIR +// resource](/healthcare/docs/how-tos/fhir-resources#getting_a_fhir_resou +// rce). func (r *ProjectsLocationsDatasetsFhirStoresFhirService) Read(name string) *ProjectsLocationsDatasetsFhirStoresFhirReadCall { c := &ProjectsLocationsDatasetsFhirStoresFhirReadCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -12639,7 +12277,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirReadCall) Header() http.Header { func (c *ProjectsLocationsDatasetsFhirStoresFhirReadCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12668,7 +12306,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirReadCall) Do(opts ...googleapi.C gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "Gets the contents of a FHIR resource.\n\nImplements the FHIR standard read interaction\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#read),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#read),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#read)).\n\nAlso supports the FHIR standard conditional read interaction\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#cread),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#cread),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#cread))\nspecified by supplying an `If-Modified-Since` header with a date/time value\nor an `If-None-Match` header with an ETag value.\n\nOn success, the response body will contain a JSON-encoded representation\nof the resource.\nErrors generated by the FHIR store will contain a JSON-encoded\n`OperationOutcome` resource describing the reason for the error. If the\nrequest cannot be mapped to a valid API method on a FHIR store, a generic\nGCP error might be returned instead.", + // "description": "Gets the contents of a FHIR resource. Implements the FHIR standard read interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#read), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#read), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#read)). Also supports the FHIR standard conditional read interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#cread), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#cread), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#cread)) specified by supplying an `If-Modified-Since` header with a date/time value or an `If-None-Match` header with an ETag value. On success, the response body will contain a JSON-encoded representation of the resource. Errors generated by the FHIR store will contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. For samples that show how to call `read`, see [Getting a FHIR resource](/healthcare/docs/how-tos/fhir-resources#getting_a_fhir_resource).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/{fhirId}/{fhirId1}", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.fhirStores.fhir.read", @@ -12707,98 +12345,54 @@ type ProjectsLocationsDatasetsFhirStoresFhirSearchCall struct { } // Search: Searches for resources in the given FHIR store according to -// criteria -// specified as query parameters. -// -// Implements the FHIR standard search -// interaction -// ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/htt -// p.html#search), -// [STU3](http://hl7.org/implement/standards/fhir/STU3/ht -// tp.html#search), -// [R4](http://hl7.org/implement/standards/fhir/R4/http. -// html#search)) -// using the search semantics described in the FHIR Search -// specification -// ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/s -// earch.html), -// [STU3](http://hl7.org/implement/standards/fhir/STU3/searc -// h.html), -// [R4](http://hl7.org/implement/standards/fhir/R4/search.html)) -// . -// -// Supports three methods of search defined by the specification: -// -// * `GET [base]?[parameters]` to search across all resources. -// * `GET [base]/[type]?[parameters]` to search resources of a -// specified -// type. -// * `POST [base]/[type]/_search?[parameters]` as an alternate form -// having -// the same semantics as the `GET` method. -// -// The `GET` methods do not support compartment searches. The `POST` -// method -// does not support `application/x-www-form-urlencoded` search -// parameters. -// -// On success, the response body will contain a JSON-encoded -// representation -// of a `Bundle` resource of type `searchset`, containing the results of -// the -// search. -// Errors generated by the FHIR store will contain a -// JSON-encoded -// `OperationOutcome` resource describing the reason for the error. If -// the -// request cannot be mapped to a valid API method on a FHIR store, a -// generic -// GCP error might be returned instead. -// -// The server's capability statement, retrieved through -// capabilities, indicates what search parameters -// are supported on each FHIR resource. A list of all search -// parameters -// defined by the specification can be found in the FHIR Search -// Parameter -// Registry -// ([STU3](http://hl7.org/implement/standards/fhir/STU -// 3/searchparameter-registry.html), -// [R4](http://hl7.org/implement/standa -// rds/fhir/R4/searchparameter-registry.html)). -// FHIR search parameters for DSTU2 can be found on each resource's -// definition -// page. -// -// Supported search modifiers: `:missing`, `:exact`, `:contains`, -// `:text`, -// `:in`, `:not-in`, `:above`, `:below`, `:[type]`, `:not`, and -// `:recurse`. -// -// Supported search result parameters: `_sort`, `_count`, -// `_include`, -// `_revinclude`, `_summary=text`, `_summary=data`, and -// `_elements`. -// -// The maximum number of search results returned defaults to 100, which -// can -// be overridden by the `_count` parameter up to a maximum limit of -// 1000. If -// there are additional results, the returned `Bundle` will -// contain -// pagination links. -// -// Resources with a total size larger than 5MB or a field count larger -// than -// 50,000 might not be fully searchable as the server might trim its -// generated -// search index in those cases. -// -// Note: FHIR resources are indexed asynchronously, so there might be a -// slight -// delay between the time a resource is created or changes and when the -// change -// is reflected in search results. +// criteria specified as query parameters. Implements the FHIR standard +// search interaction +// ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#searc +// h), +// [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#search), +// [R4](http://hl7.org/implement/standards/fhir/R4/http.html#search)) +// using the search semantics described in the FHIR Search specification +// ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/search.html), +// [STU3](http://hl7.org/implement/standards/fhir/STU3/search.html), +// [R4](http://hl7.org/implement/standards/fhir/R4/search.html)). +// Supports three methods of search defined by the specification: * `GET +// [base]?[parameters]` to search across all resources. * `GET +// [base]/[type]?[parameters]` to search resources of a specified type. +// * `POST [base]/[type]/_search?[parameters]` as an alternate form +// having the same semantics as the `GET` method. The `GET` methods do +// not support compartment searches. The `POST` method does not support +// `application/x-www-form-urlencoded` search parameters. On success, +// the response body will contain a JSON-encoded representation of a +// `Bundle` resource of type `searchset`, containing the results of the +// search. Errors generated by the FHIR store will contain a +// JSON-encoded `OperationOutcome` resource describing the reason for +// the error. If the request cannot be mapped to a valid API method on a +// FHIR store, a generic GCP error might be returned instead. The +// server's capability statement, retrieved through capabilities, +// indicates what search parameters are supported on each FHIR resource. +// A list of all search parameters defined by the specification can be +// found in the FHIR Search Parameter Registry +// ([STU3](http://hl7.org/implement/standards/fhir/STU3/searchparameter-r +// egistry.html), +// [R4](http://hl7.org/implement/standards/fhir/R4/searchparameter-regist +// ry.html)). FHIR search parameters for DSTU2 can be found on each +// resource's definition page. Supported search modifiers: `:missing`, +// `:exact`, `:contains`, `:text`, `:in`, `:not-in`, `:above`, `:below`, +// `:[type]`, `:not`, and `:recurse`. Supported search result +// parameters: `_sort`, `_count`, `_include`, `_revinclude`, +// `_summary=text`, `_summary=data`, and `_elements`. The maximum number +// of search results returned defaults to 100, which can be overridden +// by the `_count` parameter up to a maximum limit of 1000. If there are +// additional results, the returned `Bundle` will contain pagination +// links. Resources with a total size larger than 5MB or a field count +// larger than 50,000 might not be fully searchable as the server might +// trim its generated search index in those cases. Note: FHIR resources +// are indexed asynchronously, so there might be a slight delay between +// the time a resource is created or changes and when the change is +// reflected in search results. For samples and detailed information, +// see [Searching for FHIR +// resources](/healthcare/docs/how-tos/fhir-search) and [Advanced FHIR +// search features](/healthcare/docs/how-tos/fhir-advanced-search). func (r *ProjectsLocationsDatasetsFhirStoresFhirService) Search(parent string, searchresourcesrequest *SearchResourcesRequest) *ProjectsLocationsDatasetsFhirStoresFhirSearchCall { c := &ProjectsLocationsDatasetsFhirStoresFhirSearchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -12833,7 +12427,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirSearchCall) Header() http.Header func (c *ProjectsLocationsDatasetsFhirStoresFhirSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12864,7 +12458,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirSearchCall) Do(opts ...googleapi gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "Searches for resources in the given FHIR store according to criteria\nspecified as query parameters.\n\nImplements the FHIR standard search interaction\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#search),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#search),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#search))\nusing the search semantics described in the FHIR Search specification\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/search.html),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/search.html),\n[R4](http://hl7.org/implement/standards/fhir/R4/search.html)).\n\nSupports three methods of search defined by the specification:\n\n* `GET [base]?[parameters]` to search across all resources.\n* `GET [base]/[type]?[parameters]` to search resources of a specified\ntype.\n* `POST [base]/[type]/_search?[parameters]` as an alternate form having\nthe same semantics as the `GET` method.\n\nThe `GET` methods do not support compartment searches. The `POST` method\ndoes not support `application/x-www-form-urlencoded` search parameters.\n\nOn success, the response body will contain a JSON-encoded representation\nof a `Bundle` resource of type `searchset`, containing the results of the\nsearch.\nErrors generated by the FHIR store will contain a JSON-encoded\n`OperationOutcome` resource describing the reason for the error. If the\nrequest cannot be mapped to a valid API method on a FHIR store, a generic\nGCP error might be returned instead.\n\nThe server's capability statement, retrieved through\ncapabilities, indicates what search parameters\nare supported on each FHIR resource. A list of all search parameters\ndefined by the specification can be found in the FHIR Search Parameter\nRegistry\n([STU3](http://hl7.org/implement/standards/fhir/STU3/searchparameter-registry.html),\n[R4](http://hl7.org/implement/standards/fhir/R4/searchparameter-registry.html)).\nFHIR search parameters for DSTU2 can be found on each resource's definition\npage.\n\nSupported search modifiers: `:missing`, `:exact`, `:contains`, `:text`,\n`:in`, `:not-in`, `:above`, `:below`, `:[type]`, `:not`, and `:recurse`.\n\nSupported search result parameters: `_sort`, `_count`, `_include`,\n`_revinclude`, `_summary=text`, `_summary=data`, and `_elements`.\n\nThe maximum number of search results returned defaults to 100, which can\nbe overridden by the `_count` parameter up to a maximum limit of 1000. If\nthere are additional results, the returned `Bundle` will contain\npagination links.\n\nResources with a total size larger than 5MB or a field count larger than\n50,000 might not be fully searchable as the server might trim its generated\nsearch index in those cases.\n\nNote: FHIR resources are indexed asynchronously, so there might be a slight\ndelay between the time a resource is created or changes and when the change\nis reflected in search results.", + // "description": "Searches for resources in the given FHIR store according to criteria specified as query parameters. Implements the FHIR standard search interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#search), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#search), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#search)) using the search semantics described in the FHIR Search specification ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/search.html), [STU3](http://hl7.org/implement/standards/fhir/STU3/search.html), [R4](http://hl7.org/implement/standards/fhir/R4/search.html)). Supports three methods of search defined by the specification: * `GET [base]?[parameters]` to search across all resources. * `GET [base]/[type]?[parameters]` to search resources of a specified type. * `POST [base]/[type]/_search?[parameters]` as an alternate form having the same semantics as the `GET` method. The `GET` methods do not support compartment searches. The `POST` method does not support `application/x-www-form-urlencoded` search parameters. On success, the response body will contain a JSON-encoded representation of a `Bundle` resource of type `searchset`, containing the results of the search. Errors generated by the FHIR store will contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. The server's capability statement, retrieved through capabilities, indicates what search parameters are supported on each FHIR resource. A list of all search parameters defined by the specification can be found in the FHIR Search Parameter Registry ([STU3](http://hl7.org/implement/standards/fhir/STU3/searchparameter-registry.html), [R4](http://hl7.org/implement/standards/fhir/R4/searchparameter-registry.html)). FHIR search parameters for DSTU2 can be found on each resource's definition page. Supported search modifiers: `:missing`, `:exact`, `:contains`, `:text`, `:in`, `:not-in`, `:above`, `:below`, `:[type]`, `:not`, and `:recurse`. Supported search result parameters: `_sort`, `_count`, `_include`, `_revinclude`, `_summary=text`, `_summary=data`, and `_elements`. The maximum number of search results returned defaults to 100, which can be overridden by the `_count` parameter up to a maximum limit of 1000. If there are additional results, the returned `Bundle` will contain pagination links. Resources with a total size larger than 5MB or a field count larger than 50,000 might not be fully searchable as the server might trim its generated search index in those cases. Note: FHIR resources are indexed asynchronously, so there might be a slight delay between the time a resource is created or changes and when the change is reflected in search results. For samples and detailed information, see [Searching for FHIR resources](/healthcare/docs/how-tos/fhir-search) and [Advanced FHIR search features](/healthcare/docs/how-tos/fhir-advanced-search).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/_search", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.fhirStores.fhir.search", @@ -12905,41 +12499,27 @@ type ProjectsLocationsDatasetsFhirStoresFhirUpdateCall struct { header_ http.Header } -// Update: Updates the entire contents of a resource. -// -// Implements the FHIR standard update -// interaction -// ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/htt -// p.html#update), -// [STU3](http://hl7.org/implement/standards/fhir/STU3/ht -// tp.html#update), -// [R4](http://hl7.org/implement/standards/fhir/R4/http. -// html#update)). -// -// If the specified resource does -// not exist and the FHIR store has -// enable_update_create set, creates the -// resource with the client-specified ID. -// -// The request body must contain a JSON-encoded FHIR resource, and the -// request -// headers must contain `Content-Type: application/fhir+json`. The -// resource -// must contain an `id` element having an identical value to the ID in -// the -// REST path of the request. -// +// Update: Updates the entire contents of a resource. Implements the +// FHIR standard update interaction +// ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#updat +// e), +// [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#update), +// [R4](http://hl7.org/implement/standards/fhir/R4/http.html#update)). +// If the specified resource does not exist and the FHIR store has +// enable_update_create set, creates the resource with the +// client-specified ID. The request body must contain a JSON-encoded +// FHIR resource, and the request headers must contain `Content-Type: +// application/fhir+json`. The resource must contain an `id` element +// having an identical value to the ID in the REST path of the request. // On success, the response body will contain a JSON-encoded -// representation -// of the updated resource, including the server-assigned version -// ID. -// Errors generated by the FHIR store will contain a -// JSON-encoded -// `OperationOutcome` resource describing the reason for the error. If -// the -// request cannot be mapped to a valid API method on a FHIR store, a -// generic -// GCP error might be returned instead. +// representation of the updated resource, including the server-assigned +// version ID. Errors generated by the FHIR store will contain a +// JSON-encoded `OperationOutcome` resource describing the reason for +// the error. If the request cannot be mapped to a valid API method on a +// FHIR store, a generic GCP error might be returned instead. For +// samples that show how to call `update`, see [Updating a FHIR +// resource](/healthcare/docs/how-tos/fhir-resources#updating_a_fhir_reso +// urce). func (r *ProjectsLocationsDatasetsFhirStoresFhirService) Update(name string, body_ io.Reader) *ProjectsLocationsDatasetsFhirStoresFhirUpdateCall { c := &ProjectsLocationsDatasetsFhirStoresFhirUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -12974,7 +12554,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirUpdateCall) Header() http.Header func (c *ProjectsLocationsDatasetsFhirStoresFhirUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12999,7 +12579,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirUpdateCall) Do(opts ...googleapi gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "Updates the entire contents of a resource.\n\nImplements the FHIR standard update interaction\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#update),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#update),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#update)).\n\nIf the specified resource does\nnot exist and the FHIR store has\nenable_update_create set, creates the\nresource with the client-specified ID.\n\nThe request body must contain a JSON-encoded FHIR resource, and the request\nheaders must contain `Content-Type: application/fhir+json`. The resource\nmust contain an `id` element having an identical value to the ID in the\nREST path of the request.\n\nOn success, the response body will contain a JSON-encoded representation\nof the updated resource, including the server-assigned version ID.\nErrors generated by the FHIR store will contain a JSON-encoded\n`OperationOutcome` resource describing the reason for the error. If the\nrequest cannot be mapped to a valid API method on a FHIR store, a generic\nGCP error might be returned instead.", + // "description": "Updates the entire contents of a resource. Implements the FHIR standard update interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#update), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#update), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#update)). If the specified resource does not exist and the FHIR store has enable_update_create set, creates the resource with the client-specified ID. The request body must contain a JSON-encoded FHIR resource, and the request headers must contain `Content-Type: application/fhir+json`. The resource must contain an `id` element having an identical value to the ID in the REST path of the request. On success, the response body will contain a JSON-encoded representation of the updated resource, including the server-assigned version ID. Errors generated by the FHIR store will contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. For samples that show how to call `update`, see [Updating a FHIR resource](/healthcare/docs/how-tos/fhir-resources#updating_a_fhir_resource).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/{fhirId}/{fhirId1}", // "httpMethod": "PUT", // "id": "healthcare.projects.locations.datasets.fhirStores.fhir.update", @@ -13041,28 +12621,21 @@ type ProjectsLocationsDatasetsFhirStoresFhirVreadCall struct { } // Vread: Gets the contents of a version (current or historical) of a -// FHIR resource -// by version ID. -// -// Implements the FHIR standard vread +// FHIR resource by version ID. Implements the FHIR standard vread // interaction -// ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/htt -// p.html#vread), -// [STU3](http://hl7.org/implement/standards/fhir/STU3/htt -// p.html#vread), -// [R4](http://hl7.org/implement/standards/fhir/R4/http.ht -// ml#vread)). -// -// On success, the response body will contain a JSON-encoded -// representation -// of the resource. -// Errors generated by the FHIR store will contain a -// JSON-encoded -// `OperationOutcome` resource describing the reason for the error. If -// the -// request cannot be mapped to a valid API method on a FHIR store, a -// generic -// GCP error might be returned instead. +// ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#vread +// ), +// [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#vread), +// [R4](http://hl7.org/implement/standards/fhir/R4/http.html#vread)). On +// success, the response body will contain a JSON-encoded representation +// of the resource. Errors generated by the FHIR store will contain a +// JSON-encoded `OperationOutcome` resource describing the reason for +// the error. If the request cannot be mapped to a valid API method on a +// FHIR store, a generic GCP error might be returned instead. For +// samples that show how to call `vread`, see [Retrieving a FHIR +// resource +// version](/healthcare/docs/how-tos/fhir-resources#retrieving_a_fhir_res +// ource_version). func (r *ProjectsLocationsDatasetsFhirStoresFhirService) Vread(name string) *ProjectsLocationsDatasetsFhirStoresFhirVreadCall { c := &ProjectsLocationsDatasetsFhirStoresFhirVreadCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -13106,7 +12679,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirVreadCall) Header() http.Header func (c *ProjectsLocationsDatasetsFhirStoresFhirVreadCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13135,7 +12708,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirVreadCall) Do(opts ...googleapi. gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "Gets the contents of a version (current or historical) of a FHIR resource\nby version ID.\n\nImplements the FHIR standard vread interaction\n([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#vread),\n[STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#vread),\n[R4](http://hl7.org/implement/standards/fhir/R4/http.html#vread)).\n\nOn success, the response body will contain a JSON-encoded representation\nof the resource.\nErrors generated by the FHIR store will contain a JSON-encoded\n`OperationOutcome` resource describing the reason for the error. If the\nrequest cannot be mapped to a valid API method on a FHIR store, a generic\nGCP error might be returned instead.", + // "description": "Gets the contents of a version (current or historical) of a FHIR resource by version ID. Implements the FHIR standard vread interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#vread), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#vread), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#vread)). On success, the response body will contain a JSON-encoded representation of the resource. Errors generated by the FHIR store will contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. For samples that show how to call `vread`, see [Retrieving a FHIR resource version](/healthcare/docs/how-tos/fhir-resources#retrieving_a_fhir_resource_version).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/{fhirId}/{fhirId1}/_history/{_historyId}", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.fhirStores.fhir.vread", @@ -13182,9 +12755,8 @@ func (r *ProjectsLocationsDatasetsHl7V2StoresService) Create(parent string, hl7v } // Hl7V2StoreId sets the optional parameter "hl7V2StoreId": The ID of -// the HL7v2 store that is being created. -// The string must match the following regex: -// `[\p{L}\p{N}_\-\.]{1,256}`. +// the HL7v2 store that is being created. The string must match the +// following regex: `[\p{L}\p{N}_\-\.]{1,256}`. func (c *ProjectsLocationsDatasetsHl7V2StoresCreateCall) Hl7V2StoreId(hl7V2StoreId string) *ProjectsLocationsDatasetsHl7V2StoresCreateCall { c.urlParams_.Set("hl7V2StoreId", hl7V2StoreId) return c @@ -13217,7 +12789,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresCreateCall) Header() http.Header { func (c *ProjectsLocationsDatasetsHl7V2StoresCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13290,7 +12862,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresCreateCall) Do(opts ...googleapi.Ca // ], // "parameters": { // "hl7V2StoreId": { - // "description": "The ID of the HL7v2 store that is being created.\nThe string must match the following regex: `[\\p{L}\\p{N}_\\-\\.]{1,256}`.", + // "description": "The ID of the HL7v2 store that is being created. The string must match the following regex: `[\\p{L}\\p{N}_\\-\\.]{1,256}`.", // "location": "query", // "type": "string" // }, @@ -13327,8 +12899,7 @@ type ProjectsLocationsDatasetsHl7V2StoresDeleteCall struct { } // Delete: Deletes the specified HL7v2 store and removes all messages -// that are -// contained within it. +// that it contains. func (r *ProjectsLocationsDatasetsHl7V2StoresService) Delete(name string) *ProjectsLocationsDatasetsHl7V2StoresDeleteCall { c := &ProjectsLocationsDatasetsHl7V2StoresDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -13362,7 +12933,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresDeleteCall) Header() http.Header { func (c *ProjectsLocationsDatasetsHl7V2StoresDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13421,7 +12992,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresDeleteCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Deletes the specified HL7v2 store and removes all messages that are\ncontained within it.", + // "description": "Deletes the specified HL7v2 store and removes all messages that it contains.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/hl7V2Stores/{hl7V2StoresId}", // "httpMethod": "DELETE", // "id": "healthcare.projects.locations.datasets.hl7V2Stores.delete", @@ -13503,7 +13074,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresGetCall) Header() http.Header { func (c *ProjectsLocationsDatasetsHl7V2StoresGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13603,9 +13174,8 @@ type ProjectsLocationsDatasetsHl7V2StoresGetIamPolicyCall struct { header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. -// Returns an empty policy if the resource exists and does not have a -// policy +// GetIamPolicy: Gets the access control policy for a resource. Returns +// an empty policy if the resource exists and does not have a policy // set. func (r *ProjectsLocationsDatasetsHl7V2StoresService) GetIamPolicy(resource string) *ProjectsLocationsDatasetsHl7V2StoresGetIamPolicyCall { c := &ProjectsLocationsDatasetsHl7V2StoresGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -13615,24 +13185,14 @@ func (r *ProjectsLocationsDatasetsHl7V2StoresService) GetIamPolicy(resource stri // OptionsRequestedPolicyVersion sets the optional parameter // "options.requestedPolicyVersion": The policy format version to be -// returned. -// -// Valid values are 0, 1, and 3. Requests specifying an invalid value -// will be -// rejected. -// -// Requests for policies with any conditional bindings must specify -// version 3. -// Policies without any conditional bindings may specify any valid value -// or -// leave the field unset. -// -// To learn which resources support conditions in their IAM policies, -// see -// the -// [IAM -// documentation](https://cloud.google.com/iam/help/conditions/r -// esource-policies). +// returned. Valid values are 0, 1, and 3. Requests specifying an +// invalid value will be rejected. Requests for policies with any +// conditional bindings must specify version 3. Policies without any +// conditional bindings may specify any valid value or leave the field +// unset. To learn which resources support conditions in their IAM +// policies, see the [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-p +// olicies). func (c *ProjectsLocationsDatasetsHl7V2StoresGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsLocationsDatasetsHl7V2StoresGetIamPolicyCall { c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c @@ -13675,7 +13235,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresGetIamPolicyCall) Header() http.Hea func (c *ProjectsLocationsDatasetsHl7V2StoresGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13737,7 +13297,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresGetIamPolicyCall) Do(opts ...google } return ret, nil // { - // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + // "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/hl7V2Stores/{hl7V2StoresId}:getIamPolicy", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.hl7V2Stores.getIamPolicy", @@ -13746,13 +13306,13 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresGetIamPolicyCall) Do(opts ...google // ], // "parameters": { // "options.requestedPolicyVersion": { - // "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + // "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", // "format": "int32", // "location": "query", // "type": "integer" // }, // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/hl7V2Stores/[^/]+$", // "required": true, @@ -13789,11 +13349,8 @@ func (r *ProjectsLocationsDatasetsHl7V2StoresService) List(parent string) *Proje } // Filter sets the optional parameter "filter": Restricts stores -// returned to those matching a filter. -// Syntax: -// https://cloud.google.com/appengine/docs/standard/python/search -// /query_strings -// Only filtering on labels is supported. For example, +// returned to those matching a filter. Syntax: +// https://cloud.google.com/appengine/docs/standard/python/search/query_strings Only filtering on labels is supported. For example, // `labels.key=value`. func (c *ProjectsLocationsDatasetsHl7V2StoresListCall) Filter(filter string) *ProjectsLocationsDatasetsHl7V2StoresListCall { c.urlParams_.Set("filter", filter) @@ -13801,8 +13358,8 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresListCall) Filter(filter string) *Pr } // PageSize sets the optional parameter "pageSize": Limit on the number -// of HL7v2 stores to return in a single response. -// If zero the default page size of 100 is used. +// of HL7v2 stores to return in a single response. If zero the default +// page size of 100 is used. func (c *ProjectsLocationsDatasetsHl7V2StoresListCall) PageSize(pageSize int64) *ProjectsLocationsDatasetsHl7V2StoresListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c @@ -13853,7 +13410,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresListCall) Header() http.Header { func (c *ProjectsLocationsDatasetsHl7V2StoresListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13924,12 +13481,12 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresListCall) Do(opts ...googleapi.Call // ], // "parameters": { // "filter": { - // "description": "Restricts stores returned to those matching a filter. Syntax:\nhttps://cloud.google.com/appengine/docs/standard/python/search/query_strings\nOnly filtering on labels is supported. For example, `labels.key=value`.", + // "description": "Restricts stores returned to those matching a filter. Syntax: https://cloud.google.com/appengine/docs/standard/python/search/query_strings Only filtering on labels is supported. For example, `labels.key=value`.", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "Limit on the number of HL7v2 stores to return in a single response.\nIf zero the default page size of 100 is used.", + // "description": "Limit on the number of HL7v2 stores to return in a single response. If zero the default page size of 100 is used.", // "format": "int32", // "location": "query", // "type": "integer" @@ -13999,11 +13556,8 @@ func (r *ProjectsLocationsDatasetsHl7V2StoresService) Patch(name string, hl7v2st } // UpdateMask sets the optional parameter "updateMask": The update mask -// applies to the resource. For the `FieldMask` -// definition, -// see -// https://developers.google.com/protocol-buffers/docs/re -// ference/google.protobuf#fieldmask +// applies to the resource. For the `FieldMask` definition, see +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask func (c *ProjectsLocationsDatasetsHl7V2StoresPatchCall) UpdateMask(updateMask string) *ProjectsLocationsDatasetsHl7V2StoresPatchCall { c.urlParams_.Set("updateMask", updateMask) return c @@ -14036,7 +13590,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresPatchCall) Header() http.Header { func (c *ProjectsLocationsDatasetsHl7V2StoresPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14109,14 +13663,14 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresPatchCall) Do(opts ...googleapi.Cal // ], // "parameters": { // "name": { - // "description": "Output only. Resource name of the HL7v2 store, of the form\n`projects/{project_id}/datasets/{dataset_id}/hl7V2Stores/{hl7v2_store_id}`.", + // "description": "Resource name of the HL7v2 store, of the form `projects/{project_id}/datasets/{dataset_id}/hl7V2Stores/{hl7v2_store_id}`.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/hl7V2Stores/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { - // "description": "The update mask applies to the resource. For the `FieldMask` definition,\nsee\nhttps://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask", + // "description": "The update mask applies to the resource. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -14148,11 +13702,8 @@ type ProjectsLocationsDatasetsHl7V2StoresSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any -// existing policy. -// -// Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` -// errors. +// resource. Replaces any existing policy. Can return `NOT_FOUND`, +// `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. func (r *ProjectsLocationsDatasetsHl7V2StoresService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsDatasetsHl7V2StoresSetIamPolicyCall { c := &ProjectsLocationsDatasetsHl7V2StoresSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -14187,7 +13738,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresSetIamPolicyCall) Header() http.Hea func (c *ProjectsLocationsDatasetsHl7V2StoresSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14251,7 +13802,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresSetIamPolicyCall) Do(opts ...google } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/hl7V2Stores/{hl7V2StoresId}:setIamPolicy", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.hl7V2Stores.setIamPolicy", @@ -14260,7 +13811,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresSetIamPolicyCall) Do(opts ...google // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/hl7V2Stores/[^/]+$", // "required": true, @@ -14293,16 +13844,11 @@ type ProjectsLocationsDatasetsHl7V2StoresTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a `NOT_FOUND` error. -// -// Note: This operation is designed to be used for building -// permission-aware -// UIs and command-line tools, not for authorization checking. This -// operation -// may "fail open" without warning. +// specified resource. If the resource does not exist, this will return +// an empty set of permissions, not a `NOT_FOUND` error. Note: This +// operation is designed to be used for building permission-aware UIs +// and command-line tools, not for authorization checking. This +// operation may "fail open" without warning. func (r *ProjectsLocationsDatasetsHl7V2StoresService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsDatasetsHl7V2StoresTestIamPermissionsCall { c := &ProjectsLocationsDatasetsHl7V2StoresTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -14337,7 +13883,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresTestIamPermissionsCall) Header() ht func (c *ProjectsLocationsDatasetsHl7V2StoresTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14401,7 +13947,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresTestIamPermissionsCall) Do(opts ... } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + // "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/hl7V2Stores/{hl7V2StoresId}:testIamPermissions", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.hl7V2Stores.testIamPermissions", @@ -14410,7 +13956,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresTestIamPermissionsCall) Do(opts ... // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/hl7V2Stores/[^/]+$", // "required": true, @@ -14442,17 +13988,12 @@ type ProjectsLocationsDatasetsHl7V2StoresMessagesCreateCall struct { header_ http.Header } -// Create: Creates a message and sends a notification to the Cloud -// Pub/Sub topic. If -// configured, the MLLP adapter listens to messages created by this -// method and -// sends those back to the hospital. A successful response indicates -// the -// message has been persisted to storage and a Cloud Pub/Sub -// notification has -// been sent. Sending to the hospital by the MLLP adapter -// happens -// asynchronously. +// Create: Parses and stores an HL7v2 message. This method triggers an +// asynchronous notification to any Cloud Pub/Sub topic configured in +// projects.locations.datasets.hl7V2Stores.Hl7V2NotificationConfig, if +// the filtering matches the message. If an MLLP adapter is configured +// to listen to a Cloud Pub/Sub topic, the adapter transmits the message +// when a notification is received. func (r *ProjectsLocationsDatasetsHl7V2StoresMessagesService) Create(parent string, createmessagerequest *CreateMessageRequest) *ProjectsLocationsDatasetsHl7V2StoresMessagesCreateCall { c := &ProjectsLocationsDatasetsHl7V2StoresMessagesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -14487,7 +14028,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesCreateCall) Header() http.H func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14551,7 +14092,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesCreateCall) Do(opts ...goog } return ret, nil // { - // "description": "Creates a message and sends a notification to the Cloud Pub/Sub topic. If\nconfigured, the MLLP adapter listens to messages created by this method and\nsends those back to the hospital. A successful response indicates the\nmessage has been persisted to storage and a Cloud Pub/Sub notification has\nbeen sent. Sending to the hospital by the MLLP adapter happens\nasynchronously.", + // "description": "Parses and stores an HL7v2 message. This method triggers an asynchronous notification to any Cloud Pub/Sub topic configured in projects.locations.datasets.hl7V2Stores.Hl7V2NotificationConfig, if the filtering matches the message. If an MLLP adapter is configured to listen to a Cloud Pub/Sub topic, the adapter transmits the message when a notification is received.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/hl7V2Stores/{hl7V2StoresId}/messages", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.hl7V2Stores.messages.create", @@ -14625,7 +14166,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesDeleteCall) Header() http.H func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14730,15 +14271,17 @@ func (r *ProjectsLocationsDatasetsHl7V2StoresMessagesService) Get(name string) * } // View sets the optional parameter "view": Specifies which parts of the -// Message resource to return in the response. -// When unspecified, equivalent to FULL. +// Message resource to return in the response. When unspecified, +// equivalent to FULL. // // Possible values: -// "MESSAGE_VIEW_UNSPECIFIED" -// "RAW_ONLY" -// "PARSED_ONLY" -// "FULL" -// "BASIC" +// "MESSAGE_VIEW_UNSPECIFIED" - Not specified, equivalent to FULL. +// "RAW_ONLY" - Server responses include all the message fields except +// parsed_data field. +// "PARSED_ONLY" - Server responses include all the message fields +// except data field. +// "FULL" - Server responses include all the message fields. +// "BASIC" - Server responses include only the name field. func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesGetCall) View(view string) *ProjectsLocationsDatasetsHl7V2StoresMessagesGetCall { c.urlParams_.Set("view", view) return c @@ -14781,7 +14324,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesGetCall) Header() http.Head func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14859,7 +14402,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesGetCall) Do(opts ...googlea // "type": "string" // }, // "view": { - // "description": "Specifies which parts of the Message resource to return in the response.\nWhen unspecified, equivalent to FULL.", + // "description": "Specifies which parts of the Message resource to return in the response. When unspecified, equivalent to FULL.", // "enum": [ // "MESSAGE_VIEW_UNSPECIFIED", // "RAW_ONLY", @@ -14867,6 +14410,13 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesGetCall) Do(opts ...googlea // "FULL", // "BASIC" // ], + // "enumDescriptions": [ + // "Not specified, equivalent to FULL.", + // "Server responses include all the message fields except parsed_data field.", + // "Server responses include all the message fields except data field.", + // "Server responses include all the message fields.", + // "Server responses include only the name field." + // ], // "location": "query", // "type": "string" // } @@ -14893,11 +14443,16 @@ type ProjectsLocationsDatasetsHl7V2StoresMessagesIngestCall struct { header_ http.Header } -// Ingest: Ingests a new HL7v2 message from the hospital and sends a -// notification to -// the Cloud Pub/Sub topic. Return is an HL7v2 ACK message if the -// message was -// successfully stored. Otherwise an error is returned. +// Ingest: Parses and stores an HL7v2 message. This method triggers an +// asynchronous notification to any Cloud Pub/Sub topic configured in +// projects.locations.datasets.hl7V2Stores.Hl7V2NotificationConfig, if +// the filtering matches the message. If an MLLP adapter is configured +// to listen to a Cloud Pub/Sub topic, the adapter transmits the message +// when a notification is received. This method also generates a +// response containing an HL7v2 acknowledgement (`ACK`) message when +// successful or a negative acknowledgement (`NACK`) message in case of +// error, suitable for replying to HL7v2 interface systems that expect +// these acknowledgements. func (r *ProjectsLocationsDatasetsHl7V2StoresMessagesService) Ingest(parent string, ingestmessagerequest *IngestMessageRequest) *ProjectsLocationsDatasetsHl7V2StoresMessagesIngestCall { c := &ProjectsLocationsDatasetsHl7V2StoresMessagesIngestCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -14932,7 +14487,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesIngestCall) Header() http.H func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesIngestCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14996,7 +14551,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesIngestCall) Do(opts ...goog } return ret, nil // { - // "description": "Ingests a new HL7v2 message from the hospital and sends a notification to\nthe Cloud Pub/Sub topic. Return is an HL7v2 ACK message if the message was\nsuccessfully stored. Otherwise an error is returned.", + // "description": "Parses and stores an HL7v2 message. This method triggers an asynchronous notification to any Cloud Pub/Sub topic configured in projects.locations.datasets.hl7V2Stores.Hl7V2NotificationConfig, if the filtering matches the message. If an MLLP adapter is configured to listen to a Cloud Pub/Sub topic, the adapter transmits the message when a notification is received. This method also generates a response containing an HL7v2 acknowledgement (`ACK`) message when successful or a negative acknowledgement (`NACK`) message in case of error, suitable for replying to HL7v2 interface systems that expect these acknowledgements.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/hl7V2Stores/{hl7V2StoresId}/messages:ingest", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.hl7V2Stores.messages.ingest", @@ -15038,13 +14593,9 @@ type ProjectsLocationsDatasetsHl7V2StoresMessagesListCall struct { } // List: Lists all the messages in the given HL7v2 store with support -// for filtering. -// -// Note: HL7v2 messages are indexed asynchronously, so there might be a -// slight -// delay between the time a message is created and when it can be -// found -// through a filter. +// for filtering. Note: HL7v2 messages are indexed asynchronously, so +// there might be a slight delay between the time a message is created +// and when it can be found through a filter. func (r *ProjectsLocationsDatasetsHl7V2StoresMessagesService) List(parent string) *ProjectsLocationsDatasetsHl7V2StoresMessagesListCall { c := &ProjectsLocationsDatasetsHl7V2StoresMessagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -15052,39 +14603,8 @@ func (r *ProjectsLocationsDatasetsHl7V2StoresMessagesService) List(parent string } // Filter sets the optional parameter "filter": Restricts messages -// returned to those matching a filter. -// Syntax: -// https://cloud.google.com/appengine/docs/standard/python/search -// /query_strings -// -// Fields/functions available for filtering are: -// -// * `message_type`, from the MSH-9.1 field. For example, -// `NOT message_type = "ADT". -// * `send_date` or `sendDate`, the YYYY-MM-DD date the message was -// sent in -// the dataset's time_zone, from the MSH-7 segment. For -// example, -// `send_date < "2017-01-02". -// * `send_time`, the timestamp when the message was sent, using -// the -// RFC3339 time format for comparisons, from the MSH-7 segment. For -// example, -// `send_time < "2017-01-02T00:00:00-05:00". -// * `send_facility`, the care center that the message came from, from -// the -// MSH-4 segment. For example, `send_facility = "ABC". -// * `PatientId(value, type)`, which matches if the message lists a -// patient -// having an ID of the given value and type in the PID-2, PID-3, or -// PID-4 -// segments. For example, `PatientId("123456", "MRN")`. -// * `labels.x`, a string value of the label with key `x` as set using -// the -// Message.labels -// map. For example, `labels."priority"="high". The operator `:*` can -// be used -// to assert the existence of a label. For example, +// returned to those matching a filter. Syntax: +// https://cloud.google.com/appengine/docs/standard/python/search/query_strings Fields/functions available for filtering are: * `message_type`, from the MSH-9.1 field. For example, `NOT message_type = "ADT". * `send_date` or `sendDate`, the YYYY-MM-DD date the message was sent in the dataset's time_zone, from the MSH-7 segment. For example, `send_date < "2017-01-02". * `send_time`, the timestamp when the message was sent, using the RFC3339 time format for comparisons, from the MSH-7 segment. For example, `send_time < "2017-01-02T00:00:00-05:00". * `send_facility`, the care center that the message came from, from the MSH-4 segment. For example, `send_facility = "ABC". * `PatientId(value, type)`, which matches if the message lists a patient having an ID of the given value and type in the PID-2, PID-3, or PID-4 segments. For example, `PatientId("123456", "MRN")`. * `labels.x`, a string value of the label with key `x` as set using the Message.labels map. For example, `labels."priority"="high". The operator `:*` can be used to assert the existence of a label. For example, // `labels."priority":*`. func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesListCall) Filter(filter string) *ProjectsLocationsDatasetsHl7V2StoresMessagesListCall { c.urlParams_.Set("filter", filter) @@ -15092,22 +14612,17 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesListCall) Filter(filter str } // OrderBy sets the optional parameter "orderBy": Orders messages -// returned by the specified order_by clause. -// Syntax: +// returned by the specified order_by clause. Syntax: // https://cloud.google.com/apis/design/design_patterns#sorting_order -// -// Fi -// elds available for ordering are: -// -// * `send_time` +// Fields available for ordering are: * `send_time` func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesListCall) OrderBy(orderBy string) *ProjectsLocationsDatasetsHl7V2StoresMessagesListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageSize sets the optional parameter "pageSize": Limit on the number -// of messages to return in a single response. -// If zero the default page size of 100 is used. +// of messages to return in a single response. If zero the default page +// size of 100 is used. func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesListCall) PageSize(pageSize int64) *ProjectsLocationsDatasetsHl7V2StoresMessagesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c @@ -15122,19 +14637,19 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesListCall) PageToken(pageTok } // View sets the optional parameter "view": Specifies the parts of the -// Message to return in the response. -// When unspecified, equivalent to BASIC. Setting this to anything other -// than -// BASIC with a `page_size` larger than the default can generate a -// large -// response, which impacts the performance of this method. +// Message to return in the response. When unspecified, equivalent to +// BASIC. Setting this to anything other than BASIC with a `page_size` +// larger than the default can generate a large response, which impacts +// the performance of this method. // // Possible values: -// "MESSAGE_VIEW_UNSPECIFIED" -// "RAW_ONLY" -// "PARSED_ONLY" -// "FULL" -// "BASIC" +// "MESSAGE_VIEW_UNSPECIFIED" - Not specified, equivalent to FULL. +// "RAW_ONLY" - Server responses include all the message fields except +// parsed_data field. +// "PARSED_ONLY" - Server responses include all the message fields +// except data field. +// "FULL" - Server responses include all the message fields. +// "BASIC" - Server responses include only the name field. func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesListCall) View(view string) *ProjectsLocationsDatasetsHl7V2StoresMessagesListCall { c.urlParams_.Set("view", view) return c @@ -15177,7 +14692,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesListCall) Header() http.Hea func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -15239,7 +14754,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesListCall) Do(opts ...google } return ret, nil // { - // "description": "Lists all the messages in the given HL7v2 store with support for filtering.\n\nNote: HL7v2 messages are indexed asynchronously, so there might be a slight\ndelay between the time a message is created and when it can be found\nthrough a filter.", + // "description": "Lists all the messages in the given HL7v2 store with support for filtering. Note: HL7v2 messages are indexed asynchronously, so there might be a slight delay between the time a message is created and when it can be found through a filter.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/hl7V2Stores/{hl7V2StoresId}/messages", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.hl7V2Stores.messages.list", @@ -15248,17 +14763,17 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesListCall) Do(opts ...google // ], // "parameters": { // "filter": { - // "description": "Restricts messages returned to those matching a filter. Syntax:\nhttps://cloud.google.com/appengine/docs/standard/python/search/query_strings\n\nFields/functions available for filtering are:\n\n* `message_type`, from the MSH-9.1 field. For example,\n`NOT message_type = \"ADT\"`.\n* `send_date` or `sendDate`, the YYYY-MM-DD date the message was sent in\nthe dataset's time_zone, from the MSH-7 segment. For example,\n`send_date \u003c \"2017-01-02\"`.\n* `send_time`, the timestamp when the message was sent, using the\nRFC3339 time format for comparisons, from the MSH-7 segment. For example,\n`send_time \u003c \"2017-01-02T00:00:00-05:00\"`.\n* `send_facility`, the care center that the message came from, from the\nMSH-4 segment. For example, `send_facility = \"ABC\"`.\n* `PatientId(value, type)`, which matches if the message lists a patient\nhaving an ID of the given value and type in the PID-2, PID-3, or PID-4\nsegments. For example, `PatientId(\"123456\", \"MRN\")`.\n* `labels.x`, a string value of the label with key `x` as set using the\nMessage.labels\nmap. For example, `labels.\"priority\"=\"high\"`. The operator `:*` can be used\nto assert the existence of a label. For example, `labels.\"priority\":*`.", + // "description": "Restricts messages returned to those matching a filter. Syntax: https://cloud.google.com/appengine/docs/standard/python/search/query_strings Fields/functions available for filtering are: * `message_type`, from the MSH-9.1 field. For example, `NOT message_type = \"ADT\"`. * `send_date` or `sendDate`, the YYYY-MM-DD date the message was sent in the dataset's time_zone, from the MSH-7 segment. For example, `send_date \u003c \"2017-01-02\"`. * `send_time`, the timestamp when the message was sent, using the RFC3339 time format for comparisons, from the MSH-7 segment. For example, `send_time \u003c \"2017-01-02T00:00:00-05:00\"`. * `send_facility`, the care center that the message came from, from the MSH-4 segment. For example, `send_facility = \"ABC\"`. * `PatientId(value, type)`, which matches if the message lists a patient having an ID of the given value and type in the PID-2, PID-3, or PID-4 segments. For example, `PatientId(\"123456\", \"MRN\")`. * `labels.x`, a string value of the label with key `x` as set using the Message.labels map. For example, `labels.\"priority\"=\"high\"`. The operator `:*` can be used to assert the existence of a label. For example, `labels.\"priority\":*`.", // "location": "query", // "type": "string" // }, // "orderBy": { - // "description": "Orders messages returned by the specified order_by clause.\nSyntax: https://cloud.google.com/apis/design/design_patterns#sorting_order\n\nFields available for ordering are:\n\n* `send_time`", + // "description": "Orders messages returned by the specified order_by clause. Syntax: https://cloud.google.com/apis/design/design_patterns#sorting_order Fields available for ordering are: * `send_time`", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "Limit on the number of messages to return in a single response.\nIf zero the default page size of 100 is used.", + // "description": "Limit on the number of messages to return in a single response. If zero the default page size of 100 is used.", // "format": "int32", // "location": "query", // "type": "integer" @@ -15276,7 +14791,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesListCall) Do(opts ...google // "type": "string" // }, // "view": { - // "description": "Specifies the parts of the Message to return in the response.\nWhen unspecified, equivalent to BASIC. Setting this to anything other than\nBASIC with a `page_size` larger than the default can generate a large\nresponse, which impacts the performance of this method.", + // "description": "Specifies the parts of the Message to return in the response. When unspecified, equivalent to BASIC. Setting this to anything other than BASIC with a `page_size` larger than the default can generate a large response, which impacts the performance of this method.", // "enum": [ // "MESSAGE_VIEW_UNSPECIFIED", // "RAW_ONLY", @@ -15284,6 +14799,13 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesListCall) Do(opts ...google // "FULL", // "BASIC" // ], + // "enumDescriptions": [ + // "Not specified, equivalent to FULL.", + // "Server responses include all the message fields except parsed_data field.", + // "Server responses include all the message fields except data field.", + // "Server responses include all the message fields.", + // "Server responses include only the name field." + // ], // "location": "query", // "type": "string" // } @@ -15331,17 +14853,12 @@ type ProjectsLocationsDatasetsHl7V2StoresMessagesPatchCall struct { header_ http.Header } -// Patch: Update the message. -// -// The contents of the message in Message.data and data extracted -// from -// the contents such as Message.create_time cannot be altered. Only -// the -// Message.labels field is allowed to be updated. The labels in -// the -// request are merged with the existing set of labels. Existing labels -// with -// the same keys are updated. +// Patch: Update the message. The contents of the message in +// Message.data and data extracted from the contents such as +// Message.create_time cannot be altered. Only the Message.labels field +// is allowed to be updated. The labels in the request are merged with +// the existing set of labels. Existing labels with the same keys are +// updated. func (r *ProjectsLocationsDatasetsHl7V2StoresMessagesService) Patch(name string, message *Message) *ProjectsLocationsDatasetsHl7V2StoresMessagesPatchCall { c := &ProjectsLocationsDatasetsHl7V2StoresMessagesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -15350,11 +14867,8 @@ func (r *ProjectsLocationsDatasetsHl7V2StoresMessagesService) Patch(name string, } // UpdateMask sets the optional parameter "updateMask": The update mask -// applies to the resource. For the `FieldMask` -// definition, -// see -// https://developers.google.com/protocol-buffers/docs/re -// ference/google.protobuf#fieldmask +// applies to the resource. For the `FieldMask` definition, see +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesPatchCall) UpdateMask(updateMask string) *ProjectsLocationsDatasetsHl7V2StoresMessagesPatchCall { c.urlParams_.Set("updateMask", updateMask) return c @@ -15387,7 +14901,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesPatchCall) Header() http.He func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -15451,7 +14965,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesPatchCall) Do(opts ...googl } return ret, nil // { - // "description": "Update the message.\n\nThe contents of the message in Message.data and data extracted from\nthe contents such as Message.create_time cannot be altered. Only the\nMessage.labels field is allowed to be updated. The labels in the\nrequest are merged with the existing set of labels. Existing labels with\nthe same keys are updated.", + // "description": "Update the message. The contents of the message in Message.data and data extracted from the contents such as Message.create_time cannot be altered. Only the Message.labels field is allowed to be updated. The labels in the request are merged with the existing set of labels. Existing labels with the same keys are updated.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/hl7V2Stores/{hl7V2StoresId}/messages/{messagesId}", // "httpMethod": "PATCH", // "id": "healthcare.projects.locations.datasets.hl7V2Stores.messages.patch", @@ -15460,14 +14974,14 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesPatchCall) Do(opts ...googl // ], // "parameters": { // "name": { - // "description": "Resource name of the Message, of the form\n`projects/{project_id}/datasets/{dataset_id}/hl7V2Stores/{hl7_v2_store_id}/messages/{message_id}`.\nAssigned by the server.", + // "description": "Resource name of the Message, of the form `projects/{project_id}/datasets/{dataset_id}/hl7V2Stores/{hl7_v2_store_id}/messages/{message_id}`. Assigned by the server.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/hl7V2Stores/[^/]+/messages/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { - // "description": "The update mask applies to the resource. For the `FieldMask` definition,\nsee\nhttps://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask", + // "description": "The update mask applies to the resource. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -15499,23 +15013,15 @@ type ProjectsLocationsDatasetsOperationsCancelCall struct { } // Cancel: Starts asynchronous cancellation on a long-running operation. -// The server -// makes a best effort to cancel the operation, but success is -// not -// guaranteed. If the server doesn't support this method, it -// returns -// `google.rpc.Code.UNIMPLEMENTED`. Clients can -// use -// Operations.GetOperation or -// other methods to check whether the cancellation succeeded or whether -// the -// operation completed despite cancellation. On successful -// cancellation, -// the operation is not deleted; instead, it becomes an operation -// with -// an Operation.error value with a google.rpc.Status.code of -// 1, -// corresponding to `Code.CANCELLED`. +// The server makes a best effort to cancel the operation, but success +// is not guaranteed. If the server doesn't support this method, it +// returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use +// Operations.GetOperation or other methods to check whether the +// cancellation succeeded or whether the operation completed despite +// cancellation. On successful cancellation, the operation is not +// deleted; instead, it becomes an operation with an Operation.error +// value with a google.rpc.Status.code of 1, corresponding to +// `Code.CANCELLED`. func (r *ProjectsLocationsDatasetsOperationsService) Cancel(name string, canceloperationrequest *CancelOperationRequest) *ProjectsLocationsDatasetsOperationsCancelCall { c := &ProjectsLocationsDatasetsOperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -15550,7 +15056,7 @@ func (c *ProjectsLocationsDatasetsOperationsCancelCall) Header() http.Header { func (c *ProjectsLocationsDatasetsOperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -15614,7 +15120,7 @@ func (c *ProjectsLocationsDatasetsOperationsCancelCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + // "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/operations/{operationsId}:cancel", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.operations.cancel", @@ -15655,11 +15161,9 @@ type ProjectsLocationsDatasetsOperationsGetCall struct { header_ http.Header } -// Get: Gets the latest state of a long-running operation. Clients can -// use this -// method to poll the operation result at intervals as recommended by -// the API -// service. +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. func (r *ProjectsLocationsDatasetsOperationsService) Get(name string) *ProjectsLocationsDatasetsOperationsGetCall { c := &ProjectsLocationsDatasetsOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -15703,7 +15207,7 @@ func (c *ProjectsLocationsDatasetsOperationsGetCall) Header() http.Header { func (c *ProjectsLocationsDatasetsOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -15765,7 +15269,7 @@ func (c *ProjectsLocationsDatasetsOperationsGetCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/operations/{operationsId}", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.operations.get", @@ -15804,22 +15308,15 @@ type ProjectsLocationsDatasetsOperationsListCall struct { } // List: Lists operations that match the specified filter in the -// request. If the -// server doesn't support this method, it returns -// `UNIMPLEMENTED`. -// -// NOTE: the `name` binding allows API services to override the -// binding -// to use different resource name schemes, such as `users/*/operations`. -// To -// override the binding, API services can add a binding such -// as -// "/v1/{name=users/*}/operations" to their service configuration. -// For backwards compatibility, the default name includes the -// operations -// collection id, however overriding users must ensure the name -// binding -// is the parent resource, without the operations collection id. +// request. If the server doesn't support this method, it returns +// `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to +// override the binding to use different resource name schemes, such as +// `users/*/operations`. To override the binding, API services can add a +// binding such as "/v1/{name=users/*}/operations" to their service +// configuration. For backwards compatibility, the default name includes +// the operations collection id, however overriding users must ensure +// the name binding is the parent resource, without the operations +// collection id. func (r *ProjectsLocationsDatasetsOperationsService) List(name string) *ProjectsLocationsDatasetsOperationsListCall { c := &ProjectsLocationsDatasetsOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -15884,7 +15381,7 @@ func (c *ProjectsLocationsDatasetsOperationsListCall) Header() http.Header { func (c *ProjectsLocationsDatasetsOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -15946,7 +15443,7 @@ func (c *ProjectsLocationsDatasetsOperationsListCall) Do(opts ...googleapi.CallO } return ret, nil // { - // "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", + // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/operations", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.operations.list", diff --git a/vendor/google.golang.org/api/iam/v1/iam-api.json b/vendor/google.golang.org/api/iam/v1/iam-api.json index f26a190fa8d..d8557db0c75 100644 --- a/vendor/google.golang.org/api/iam/v1/iam-api.json +++ b/vendor/google.golang.org/api/iam/v1/iam-api.json @@ -12,7 +12,7 @@ "baseUrl": "https://iam.googleapis.com/", "batchPath": "batch", "canonicalName": "iam", - "description": "Manages identity and access control for Google Cloud Platform resources, including the creation of service accounts, which you can use to authenticate to Google and make API calls.", + "description": " Manages identity and access control for Google Cloud Platform resources, including the creation of service accounts, which you can use to authenticate to Google and make API calls. *Note:* This API is tied to the IAM service account credentials API ( iamcredentials.googleapis.com). Enabling or disabling this API will also enable or disable the IAM service account credentials API. ", "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/iam/", "fullyEncodeReservedExpansion": true, @@ -108,7 +108,7 @@ "iamPolicies": { "methods": { "lintPolicy": { - "description": "Lints a Cloud IAM policy object or its sub fields. Currently supports\ngoogle.iam.v1.Binding.condition.\n\nEach lint operation consists of multiple lint validation units.\nEach unit inspects the input object in regard to a particular linting\naspect and issues a google.iam.admin.v1.LintResult disclosing the\nresult.\n\nThe set of applicable validation units is determined by the Cloud IAM\nserver and is not configurable.\n\nRegardless of any lint issues or their severities, successful calls to\n`lintPolicy` return an HTTP 200 OK status code.", + "description": "Lints, or validates, an IAM policy. Currently checks the google.iam.v1.Binding.condition field, which contains a condition expression for a role binding. Successful calls to this method always return an HTTP `200 OK` status code, even if the linter detects an issue in the IAM policy.", "flatPath": "v1/iamPolicies:lintPolicy", "httpMethod": "POST", "id": "iam.iamPolicies.lintPolicy", @@ -126,7 +126,7 @@ ] }, "queryAuditableServices": { - "description": "Returns a list of services that support service level audit logging\nconfiguration for the given resource.", + "description": "Returns a list of services that allow you to opt into audit logs that are not generated by default. To learn more about audit logs, see the [Logging documentation](https://cloud.google.com/logging/docs/audit).", "flatPath": "v1/iamPolicies:queryAuditableServices", "httpMethod": "POST", "id": "iam.iamPolicies.queryAuditableServices", @@ -150,7 +150,7 @@ "roles": { "methods": { "create": { - "description": "Creates a new Role.", + "description": "Creates a new custom Role.", "flatPath": "v1/organizations/{organizationsId}/roles", "httpMethod": "POST", "id": "iam.organizations.roles.create", @@ -159,7 +159,7 @@ ], "parameters": { "parent": { - "description": "The `parent` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `parent` value format is described below:\n\n* [`projects.roles.create()`](/iam/reference/rest/v1/projects.roles/create):\n `projects/{PROJECT_ID}`. This method creates project-level\n [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles`\n\n* [`organizations.roles.create()`](/iam/reference/rest/v1/organizations.roles/create):\n `organizations/{ORGANIZATION_ID}`. This method creates organization-level\n [custom roles](/iam/docs/understanding-custom-roles). Example request\n URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + "description": "The `parent` parameter's value depends on the target resource for the request, namely [`projects`](/iam/reference/rest/v1/projects.roles) or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `parent` value format is described below: * [`projects.roles.create()`](/iam/reference/rest/v1/projects.roles/create): `projects/{PROJECT_ID}`. This method creates project-level [custom roles](/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles` * [`organizations.roles.create()`](/iam/reference/rest/v1/organizations.roles/create): `organizations/{ORGANIZATION_ID}`. This method creates organization-level [custom roles](/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", "location": "path", "pattern": "^organizations/[^/]+$", "required": true, @@ -178,7 +178,7 @@ ] }, "delete": { - "description": "Soft deletes a role. The role is suspended and cannot be used to create new\nIAM Policy Bindings.\nThe Role will not be included in `ListRoles()` unless `show_deleted` is set\nin the `ListRolesRequest`. The Role contains the deleted boolean set.\nExisting Bindings remains, but are inactive. The Role can be undeleted\nwithin 7 days. After 7 days the Role is deleted and all Bindings associated\nwith the role are removed.", + "description": "Deletes a custom Role. When you delete a custom role, the following changes occur immediately: * You cannot bind a member to the custom role in an IAM Policy. * Existing bindings to the custom role are not changed, but they have no effect. * By default, the response from ListRoles does not include the custom role. You have 7 days to undelete the custom role. After 7 days, the following changes occur: * The custom role is permanently deleted and cannot be recovered. * If an IAM policy contains a binding to the custom role, the binding is permanently removed.", "flatPath": "v1/organizations/{organizationsId}/roles/{rolesId}", "httpMethod": "DELETE", "id": "iam.organizations.roles.delete", @@ -193,7 +193,7 @@ "type": "string" }, "name": { - "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`projects.roles.delete()`](/iam/reference/rest/v1/projects.roles/delete):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method deletes only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.delete()`](/iam/reference/rest/v1/organizations.roles/delete):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n deletes only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + "description": "The `name` parameter's value depends on the target resource for the request, namely [`projects`](/iam/reference/rest/v1/projects.roles) or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`projects.roles.delete()`](/iam/reference/rest/v1/projects.roles/delete): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method deletes only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.delete()`](/iam/reference/rest/v1/organizations.roles/delete): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method deletes only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", "location": "path", "pattern": "^organizations/[^/]+/roles/[^/]+$", "required": true, @@ -209,7 +209,7 @@ ] }, "get": { - "description": "Gets a Role definition.", + "description": "Gets the definition of a Role.", "flatPath": "v1/organizations/{organizationsId}/roles/{rolesId}", "httpMethod": "GET", "id": "iam.organizations.roles.get", @@ -218,7 +218,7 @@ ], "parameters": { "name": { - "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`roles`](/iam/reference/rest/v1/roles),\n[`projects`](/iam/reference/rest/v1/projects.roles), or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`roles.get()`](/iam/reference/rest/v1/roles/get): `roles/{ROLE_NAME}`.\n This method returns results from all\n [predefined roles](/iam/docs/understanding-roles#predefined_roles) in\n Cloud IAM. Example request URL:\n `https://iam.googleapis.com/v1/roles/{ROLE_NAME}`\n\n* [`projects.roles.get()`](/iam/reference/rest/v1/projects.roles/get):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.get()`](/iam/reference/rest/v1/organizations.roles/get):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n returns only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + "description": "The `name` parameter's value depends on the target resource for the request, namely [`roles`](/iam/reference/rest/v1/roles), [`projects`](/iam/reference/rest/v1/projects.roles), or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`roles.get()`](/iam/reference/rest/v1/roles/get): `roles/{ROLE_NAME}`. This method returns results from all [predefined roles](/iam/docs/understanding-roles#predefined_roles) in Cloud IAM. Example request URL: `https://iam.googleapis.com/v1/roles/{ROLE_NAME}` * [`projects.roles.get()`](/iam/reference/rest/v1/projects.roles/get): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.get()`](/iam/reference/rest/v1/organizations.roles/get): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", "location": "path", "pattern": "^organizations/[^/]+/roles/[^/]+$", "required": true, @@ -234,7 +234,7 @@ ] }, "list": { - "description": "Lists the Roles defined on a resource.", + "description": "Lists every predefined Role that IAM supports, or every custom role that is defined for an organization or project.", "flatPath": "v1/organizations/{organizationsId}/roles", "httpMethod": "GET", "id": "iam.organizations.roles.list", @@ -243,7 +243,7 @@ ], "parameters": { "pageSize": { - "description": "Optional limit on the number of roles to include in the response.", + "description": "Optional limit on the number of roles to include in the response. The default is 300, and the maximum is 1,000.", "format": "int32", "location": "query", "type": "integer" @@ -254,7 +254,7 @@ "type": "string" }, "parent": { - "description": "The `parent` parameter's value depends on the target resource for the\nrequest, namely\n[`roles`](/iam/reference/rest/v1/roles),\n[`projects`](/iam/reference/rest/v1/projects.roles), or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `parent` value format is described below:\n\n* [`roles.list()`](/iam/reference/rest/v1/roles/list): An empty string.\n This method doesn't require a resource; it simply returns all\n [predefined roles](/iam/docs/understanding-roles#predefined_roles) in\n Cloud IAM. Example request URL:\n `https://iam.googleapis.com/v1/roles`\n\n* [`projects.roles.list()`](/iam/reference/rest/v1/projects.roles/list):\n `projects/{PROJECT_ID}`. This method lists all project-level\n [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles`\n\n* [`organizations.roles.list()`](/iam/reference/rest/v1/organizations.roles/list):\n `organizations/{ORGANIZATION_ID}`. This method lists all\n organization-level [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + "description": "The `parent` parameter's value depends on the target resource for the request, namely [`roles`](/iam/reference/rest/v1/roles), [`projects`](/iam/reference/rest/v1/projects.roles), or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `parent` value format is described below: * [`roles.list()`](/iam/reference/rest/v1/roles/list): An empty string. This method doesn't require a resource; it simply returns all [predefined roles](/iam/docs/understanding-roles#predefined_roles) in Cloud IAM. Example request URL: `https://iam.googleapis.com/v1/roles` * [`projects.roles.list()`](/iam/reference/rest/v1/projects.roles/list): `projects/{PROJECT_ID}`. This method lists all project-level [custom roles](/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles` * [`organizations.roles.list()`](/iam/reference/rest/v1/organizations.roles/list): `organizations/{ORGANIZATION_ID}`. This method lists all organization-level [custom roles](/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", "location": "path", "pattern": "^organizations/[^/]+$", "required": true, @@ -266,11 +266,15 @@ "type": "boolean" }, "view": { - "description": "Optional view for the returned Role objects. When `FULL` is specified,\nthe `includedPermissions` field is returned, which includes a list of all\npermissions in the role. The default value is `BASIC`, which does not\nreturn the `includedPermissions` field.", + "description": "Optional view for the returned Role objects. When `FULL` is specified, the `includedPermissions` field is returned, which includes a list of all permissions in the role. The default value is `BASIC`, which does not return the `includedPermissions` field.", "enum": [ "BASIC", "FULL" ], + "enumDescriptions": [ + "Omits the `included_permissions` field. This is the default value.", + "Returns all fields." + ], "location": "query", "type": "string" } @@ -284,7 +288,7 @@ ] }, "patch": { - "description": "Updates a Role definition.", + "description": "Updates the definition of a custom Role.", "flatPath": "v1/organizations/{organizationsId}/roles/{rolesId}", "httpMethod": "PATCH", "id": "iam.organizations.roles.patch", @@ -293,7 +297,7 @@ ], "parameters": { "name": { - "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`projects.roles.patch()`](/iam/reference/rest/v1/projects.roles/patch):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method updates only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.patch()`](/iam/reference/rest/v1/organizations.roles/patch):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n updates only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + "description": "The `name` parameter's value depends on the target resource for the request, namely [`projects`](/iam/reference/rest/v1/projects.roles) or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`projects.roles.patch()`](/iam/reference/rest/v1/projects.roles/patch): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method updates only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.patch()`](/iam/reference/rest/v1/organizations.roles/patch): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method updates only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", "location": "path", "pattern": "^organizations/[^/]+/roles/[^/]+$", "required": true, @@ -318,7 +322,7 @@ ] }, "undelete": { - "description": "Undelete a Role, bringing it back in its previous state.", + "description": "Undeletes a custom Role.", "flatPath": "v1/organizations/{organizationsId}/roles/{rolesId}:undelete", "httpMethod": "POST", "id": "iam.organizations.roles.undelete", @@ -327,7 +331,7 @@ ], "parameters": { "name": { - "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`projects.roles.undelete()`](/iam/reference/rest/v1/projects.roles/undelete):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method undeletes\n only [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.undelete()`](/iam/reference/rest/v1/organizations.roles/undelete):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n undeletes only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + "description": "The `name` parameter's value depends on the target resource for the request, namely [`projects`](/iam/reference/rest/v1/projects.roles) or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`projects.roles.undelete()`](/iam/reference/rest/v1/projects.roles/undelete): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method undeletes only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.undelete()`](/iam/reference/rest/v1/organizations.roles/undelete): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method undeletes only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", "location": "path", "pattern": "^organizations/[^/]+/roles/[^/]+$", "required": true, @@ -352,7 +356,7 @@ "permissions": { "methods": { "queryTestablePermissions": { - "description": "Lists the permissions testable on a resource.\nA permission is testable if it can be tested for an identity on a resource.", + "description": "Lists every permission that you can test on a resource. A permission is testable if you can check whether a member has that permission on the resource.", "flatPath": "v1/permissions:queryTestablePermissions", "httpMethod": "POST", "id": "iam.permissions.queryTestablePermissions", @@ -376,7 +380,7 @@ "roles": { "methods": { "create": { - "description": "Creates a new Role.", + "description": "Creates a new custom Role.", "flatPath": "v1/projects/{projectsId}/roles", "httpMethod": "POST", "id": "iam.projects.roles.create", @@ -385,7 +389,7 @@ ], "parameters": { "parent": { - "description": "The `parent` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `parent` value format is described below:\n\n* [`projects.roles.create()`](/iam/reference/rest/v1/projects.roles/create):\n `projects/{PROJECT_ID}`. This method creates project-level\n [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles`\n\n* [`organizations.roles.create()`](/iam/reference/rest/v1/organizations.roles/create):\n `organizations/{ORGANIZATION_ID}`. This method creates organization-level\n [custom roles](/iam/docs/understanding-custom-roles). Example request\n URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + "description": "The `parent` parameter's value depends on the target resource for the request, namely [`projects`](/iam/reference/rest/v1/projects.roles) or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `parent` value format is described below: * [`projects.roles.create()`](/iam/reference/rest/v1/projects.roles/create): `projects/{PROJECT_ID}`. This method creates project-level [custom roles](/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles` * [`organizations.roles.create()`](/iam/reference/rest/v1/organizations.roles/create): `organizations/{ORGANIZATION_ID}`. This method creates organization-level [custom roles](/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -404,7 +408,7 @@ ] }, "delete": { - "description": "Soft deletes a role. The role is suspended and cannot be used to create new\nIAM Policy Bindings.\nThe Role will not be included in `ListRoles()` unless `show_deleted` is set\nin the `ListRolesRequest`. The Role contains the deleted boolean set.\nExisting Bindings remains, but are inactive. The Role can be undeleted\nwithin 7 days. After 7 days the Role is deleted and all Bindings associated\nwith the role are removed.", + "description": "Deletes a custom Role. When you delete a custom role, the following changes occur immediately: * You cannot bind a member to the custom role in an IAM Policy. * Existing bindings to the custom role are not changed, but they have no effect. * By default, the response from ListRoles does not include the custom role. You have 7 days to undelete the custom role. After 7 days, the following changes occur: * The custom role is permanently deleted and cannot be recovered. * If an IAM policy contains a binding to the custom role, the binding is permanently removed.", "flatPath": "v1/projects/{projectsId}/roles/{rolesId}", "httpMethod": "DELETE", "id": "iam.projects.roles.delete", @@ -419,7 +423,7 @@ "type": "string" }, "name": { - "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`projects.roles.delete()`](/iam/reference/rest/v1/projects.roles/delete):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method deletes only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.delete()`](/iam/reference/rest/v1/organizations.roles/delete):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n deletes only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + "description": "The `name` parameter's value depends on the target resource for the request, namely [`projects`](/iam/reference/rest/v1/projects.roles) or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`projects.roles.delete()`](/iam/reference/rest/v1/projects.roles/delete): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method deletes only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.delete()`](/iam/reference/rest/v1/organizations.roles/delete): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method deletes only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", "location": "path", "pattern": "^projects/[^/]+/roles/[^/]+$", "required": true, @@ -435,7 +439,7 @@ ] }, "get": { - "description": "Gets a Role definition.", + "description": "Gets the definition of a Role.", "flatPath": "v1/projects/{projectsId}/roles/{rolesId}", "httpMethod": "GET", "id": "iam.projects.roles.get", @@ -444,7 +448,7 @@ ], "parameters": { "name": { - "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`roles`](/iam/reference/rest/v1/roles),\n[`projects`](/iam/reference/rest/v1/projects.roles), or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`roles.get()`](/iam/reference/rest/v1/roles/get): `roles/{ROLE_NAME}`.\n This method returns results from all\n [predefined roles](/iam/docs/understanding-roles#predefined_roles) in\n Cloud IAM. Example request URL:\n `https://iam.googleapis.com/v1/roles/{ROLE_NAME}`\n\n* [`projects.roles.get()`](/iam/reference/rest/v1/projects.roles/get):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.get()`](/iam/reference/rest/v1/organizations.roles/get):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n returns only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + "description": "The `name` parameter's value depends on the target resource for the request, namely [`roles`](/iam/reference/rest/v1/roles), [`projects`](/iam/reference/rest/v1/projects.roles), or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`roles.get()`](/iam/reference/rest/v1/roles/get): `roles/{ROLE_NAME}`. This method returns results from all [predefined roles](/iam/docs/understanding-roles#predefined_roles) in Cloud IAM. Example request URL: `https://iam.googleapis.com/v1/roles/{ROLE_NAME}` * [`projects.roles.get()`](/iam/reference/rest/v1/projects.roles/get): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.get()`](/iam/reference/rest/v1/organizations.roles/get): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", "location": "path", "pattern": "^projects/[^/]+/roles/[^/]+$", "required": true, @@ -460,7 +464,7 @@ ] }, "list": { - "description": "Lists the Roles defined on a resource.", + "description": "Lists every predefined Role that IAM supports, or every custom role that is defined for an organization or project.", "flatPath": "v1/projects/{projectsId}/roles", "httpMethod": "GET", "id": "iam.projects.roles.list", @@ -469,7 +473,7 @@ ], "parameters": { "pageSize": { - "description": "Optional limit on the number of roles to include in the response.", + "description": "Optional limit on the number of roles to include in the response. The default is 300, and the maximum is 1,000.", "format": "int32", "location": "query", "type": "integer" @@ -480,7 +484,7 @@ "type": "string" }, "parent": { - "description": "The `parent` parameter's value depends on the target resource for the\nrequest, namely\n[`roles`](/iam/reference/rest/v1/roles),\n[`projects`](/iam/reference/rest/v1/projects.roles), or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `parent` value format is described below:\n\n* [`roles.list()`](/iam/reference/rest/v1/roles/list): An empty string.\n This method doesn't require a resource; it simply returns all\n [predefined roles](/iam/docs/understanding-roles#predefined_roles) in\n Cloud IAM. Example request URL:\n `https://iam.googleapis.com/v1/roles`\n\n* [`projects.roles.list()`](/iam/reference/rest/v1/projects.roles/list):\n `projects/{PROJECT_ID}`. This method lists all project-level\n [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles`\n\n* [`organizations.roles.list()`](/iam/reference/rest/v1/organizations.roles/list):\n `organizations/{ORGANIZATION_ID}`. This method lists all\n organization-level [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + "description": "The `parent` parameter's value depends on the target resource for the request, namely [`roles`](/iam/reference/rest/v1/roles), [`projects`](/iam/reference/rest/v1/projects.roles), or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `parent` value format is described below: * [`roles.list()`](/iam/reference/rest/v1/roles/list): An empty string. This method doesn't require a resource; it simply returns all [predefined roles](/iam/docs/understanding-roles#predefined_roles) in Cloud IAM. Example request URL: `https://iam.googleapis.com/v1/roles` * [`projects.roles.list()`](/iam/reference/rest/v1/projects.roles/list): `projects/{PROJECT_ID}`. This method lists all project-level [custom roles](/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles` * [`organizations.roles.list()`](/iam/reference/rest/v1/organizations.roles/list): `organizations/{ORGANIZATION_ID}`. This method lists all organization-level [custom roles](/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -492,11 +496,15 @@ "type": "boolean" }, "view": { - "description": "Optional view for the returned Role objects. When `FULL` is specified,\nthe `includedPermissions` field is returned, which includes a list of all\npermissions in the role. The default value is `BASIC`, which does not\nreturn the `includedPermissions` field.", + "description": "Optional view for the returned Role objects. When `FULL` is specified, the `includedPermissions` field is returned, which includes a list of all permissions in the role. The default value is `BASIC`, which does not return the `includedPermissions` field.", "enum": [ "BASIC", "FULL" ], + "enumDescriptions": [ + "Omits the `included_permissions` field. This is the default value.", + "Returns all fields." + ], "location": "query", "type": "string" } @@ -510,7 +518,7 @@ ] }, "patch": { - "description": "Updates a Role definition.", + "description": "Updates the definition of a custom Role.", "flatPath": "v1/projects/{projectsId}/roles/{rolesId}", "httpMethod": "PATCH", "id": "iam.projects.roles.patch", @@ -519,7 +527,7 @@ ], "parameters": { "name": { - "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`projects.roles.patch()`](/iam/reference/rest/v1/projects.roles/patch):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method updates only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.patch()`](/iam/reference/rest/v1/organizations.roles/patch):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n updates only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + "description": "The `name` parameter's value depends on the target resource for the request, namely [`projects`](/iam/reference/rest/v1/projects.roles) or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`projects.roles.patch()`](/iam/reference/rest/v1/projects.roles/patch): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method updates only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.patch()`](/iam/reference/rest/v1/organizations.roles/patch): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method updates only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", "location": "path", "pattern": "^projects/[^/]+/roles/[^/]+$", "required": true, @@ -544,7 +552,7 @@ ] }, "undelete": { - "description": "Undelete a Role, bringing it back in its previous state.", + "description": "Undeletes a custom Role.", "flatPath": "v1/projects/{projectsId}/roles/{rolesId}:undelete", "httpMethod": "POST", "id": "iam.projects.roles.undelete", @@ -553,7 +561,7 @@ ], "parameters": { "name": { - "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`projects.roles.undelete()`](/iam/reference/rest/v1/projects.roles/undelete):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method undeletes\n only [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.undelete()`](/iam/reference/rest/v1/organizations.roles/undelete):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n undeletes only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + "description": "The `name` parameter's value depends on the target resource for the request, namely [`projects`](/iam/reference/rest/v1/projects.roles) or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`projects.roles.undelete()`](/iam/reference/rest/v1/projects.roles/undelete): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method undeletes only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.undelete()`](/iam/reference/rest/v1/organizations.roles/undelete): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method undeletes only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", "location": "path", "pattern": "^projects/[^/]+/roles/[^/]+$", "required": true, @@ -576,7 +584,7 @@ "serviceAccounts": { "methods": { "create": { - "description": "Creates a ServiceAccount\nand returns it.", + "description": "Creates a ServiceAccount.", "flatPath": "v1/projects/{projectsId}/serviceAccounts", "httpMethod": "POST", "id": "iam.projects.serviceAccounts.create", @@ -585,7 +593,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the project associated with the service\naccounts, such as `projects/my-project-123`.", + "description": "Required. The resource name of the project associated with the service accounts, such as `projects/my-project-123`.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -604,7 +612,7 @@ ] }, "delete": { - "description": "Deletes a ServiceAccount.", + "description": "Deletes a ServiceAccount. **Warning:** After you delete a service account, you might not be able to undelete it. If you know that you need to re-enable the service account in the future, use DisableServiceAccount instead. If you delete a service account, IAM permanently removes the service account 30 days later. Google Cloud cannot recover the service account after it is permanently removed, even if you file a support request. To help avoid unplanned outages, we recommend that you disable the service account before you delete it. Use DisableServiceAccount to disable the service account, then wait at least 24 hours and watch for unintended consequences. If there are no unintended consequences, you can delete the service account.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}", "httpMethod": "DELETE", "id": "iam.projects.serviceAccounts.delete", @@ -613,7 +621,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + "description": "Required. The resource name of the service account in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account.", "location": "path", "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", "required": true, @@ -629,7 +637,7 @@ ] }, "disable": { - "description": "DisableServiceAccount is currently in the alpha launch stage.\n\nDisables a ServiceAccount,\nwhich immediately prevents the service account from authenticating and\ngaining access to APIs.\n\nDisabled service accounts can be safely restored by using\nEnableServiceAccount at any point. Deleted service accounts cannot be\nrestored using this method.\n\nDisabling a service account that is bound to VMs, Apps, Functions, or\nother jobs will cause those jobs to lose access to resources if they are\nusing the disabled service account.\n\nTo improve reliability of your services and avoid unexpected outages, it\nis recommended to first disable a service account rather than delete it.\nAfter disabling the service account, wait at least 24 hours to verify there\nare no unintended consequences, and then delete the service account.", + "description": "Disables a ServiceAccount immediately. If an application uses the service account to authenticate, that application can no longer call Google APIs or access Google Cloud resources. Existing access tokens for the service account are rejected, and requests for new access tokens will fail. To re-enable the service account, use EnableServiceAccount. After you re-enable the service account, its existing access tokens will be accepted, and you can request new access tokens. To help avoid unplanned outages, we recommend that you disable the service account before you delete it. Use this method to disable the service account, then wait at least 24 hours and watch for unintended consequences. If there are no unintended consequences, you can delete the service account with DeleteServiceAccount.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:disable", "httpMethod": "POST", "id": "iam.projects.serviceAccounts.disable", @@ -638,7 +646,7 @@ ], "parameters": { "name": { - "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + "description": "The resource name of the service account in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account.", "location": "path", "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", "required": true, @@ -657,7 +665,7 @@ ] }, "enable": { - "description": "EnableServiceAccount is currently in the alpha launch stage.\n\n Restores a disabled ServiceAccount\n that has been manually disabled by using DisableServiceAccount. Service\n accounts that have been disabled by other means or for other reasons,\n such as abuse, cannot be restored using this method.\n\n EnableServiceAccount will have no effect on a service account that is\n not disabled. Enabling an already enabled service account will have no\n effect.", + "description": "Enables a ServiceAccount that was disabled by DisableServiceAccount. If the service account is already enabled, then this method has no effect. If the service account was disabled by other means—for example, if Google disabled the service account because it was compromised—you cannot use this method to enable the service account.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:enable", "httpMethod": "POST", "id": "iam.projects.serviceAccounts.enable", @@ -666,7 +674,7 @@ ], "parameters": { "name": { - "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + "description": "The resource name of the service account in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account.", "location": "path", "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", "required": true, @@ -694,7 +702,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + "description": "Required. The resource name of the service account in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account.", "location": "path", "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", "required": true, @@ -710,7 +718,7 @@ ] }, "getIamPolicy": { - "description": "Returns the Cloud IAM access control policy for a\nServiceAccount.\n\nNote: Service accounts are both\n[resources and\nidentities](/iam/docs/service-accounts#service_account_permissions). This\nmethod treats the service account as a resource. It returns the Cloud IAM\npolicy that reflects what members have access to the service account.\n\nThis method does not return what resources the service account has access\nto. To see if a service account has access to a resource, call the\n`getIamPolicy` method on the target resource. For example, to view grants\nfor a project, call the\n[projects.getIamPolicy](/resource-manager/reference/rest/v1/projects/getIamPolicy)\nmethod.", + "description": "Gets the IAM policy that is attached to a ServiceAccount. This IAM policy specifies which members have access to the service account. This method does not tell you whether the service account has been granted any roles on other resources. To check whether a service account has role grants on a resource, use the `getIamPolicy` method for that resource. For example, to view the role grants for a project, call the Resource Manager API's [`projects.getIamPolicy`](https://cloud.google.com/resource-manager/reference/rest/v1/projects/getIamPolicy) method.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:getIamPolicy", "httpMethod": "POST", "id": "iam.projects.serviceAccounts.getIamPolicy", @@ -719,13 +727,13 @@ ], "parameters": { "options.requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.", + "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "location": "query", "type": "integer" }, "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", "required": true, @@ -741,7 +749,7 @@ ] }, "list": { - "description": "Lists ServiceAccounts for a project.", + "description": "Lists every ServiceAccount that belongs to a specific project.", "flatPath": "v1/projects/{projectsId}/serviceAccounts", "httpMethod": "GET", "id": "iam.projects.serviceAccounts.list", @@ -750,20 +758,20 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the project associated with the service\naccounts, such as `projects/my-project-123`.", + "description": "Required. The resource name of the project associated with the service accounts, such as `projects/my-project-123`.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, "type": "string" }, "pageSize": { - "description": "Optional limit on the number of service accounts to include in the\nresponse. Further accounts can subsequently be obtained by including the\nListServiceAccountsResponse.next_page_token\nin a subsequent request.", + "description": "Optional limit on the number of service accounts to include in the response. Further accounts can subsequently be obtained by including the ListServiceAccountsResponse.next_page_token in a subsequent request. The default is 20, and the maximum is 100.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Optional pagination token returned in an earlier\nListServiceAccountsResponse.next_page_token.", + "description": "Optional pagination token returned in an earlier ListServiceAccountsResponse.next_page_token.", "location": "query", "type": "string" } @@ -777,7 +785,7 @@ ] }, "patch": { - "description": "Patches a ServiceAccount.\n\nCurrently, only the following fields are updatable:\n`display_name` and `description`.\n\nOnly fields specified in the request are guaranteed to be returned in\nthe response. Other fields in the response may be empty.\n\nNote: The field mask is required.", + "description": "Patches a ServiceAccount.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}", "httpMethod": "PATCH", "id": "iam.projects.serviceAccounts.patch", @@ -786,7 +794,7 @@ ], "parameters": { "name": { - "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\n\nRequests using `-` as a wildcard for the `PROJECT_ID` will infer the\nproject from the `account` and the `ACCOUNT` value can be the `email`\naddress or the `unique_id` of the service account.\n\nIn responses the resource name will always be in the format\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.", + "description": "The resource name of the service account. Use one of the following formats: * `projects/{PROJECT_ID}/serviceAccounts/{EMAIL_ADDRESS}` * `projects/{PROJECT_ID}/serviceAccounts/{UNIQUE_ID}` As an alternative, you can use the `-` wildcard character instead of the project ID: * `projects/-/serviceAccounts/{EMAIL_ADDRESS}` * `projects/-/serviceAccounts/{UNIQUE_ID}` When possible, avoid using the `-` wildcard character, because it can cause response messages to contain misleading error codes. For example, if you try to get the service account `projects/-/serviceAccounts/fake@example.com`, which does not exist, the response contains an HTTP `403 Forbidden` error instead of a `404 Not Found` error.", "location": "path", "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", "required": true, @@ -805,7 +813,7 @@ ] }, "setIamPolicy": { - "description": "Sets the Cloud IAM access control policy for a\nServiceAccount.\n\nNote: Service accounts are both\n[resources and\nidentities](/iam/docs/service-accounts#service_account_permissions). This\nmethod treats the service account as a resource. Use it to grant members\naccess to the service account, such as when they need to impersonate it.\n\nThis method does not grant the service account access to other resources,\nsuch as projects. To grant a service account access to resources, include\nthe service account in the Cloud IAM policy for the desired resource, then\ncall the appropriate `setIamPolicy` method on the target resource. For\nexample, to grant a service account access to a project, call the\n[projects.setIamPolicy](/resource-manager/reference/rest/v1/projects/setIamPolicy)\nmethod.", + "description": "Sets the IAM policy that is attached to a ServiceAccount. Use this method to grant or revoke access to the service account. For example, you could grant a member the ability to impersonate the service account. This method does not enable the service account to access other resources. To grant roles to a service account on a resource, follow these steps: 1. Call the resource's `getIamPolicy` method to get its current IAM policy. 2. Edit the policy so that it binds the service account to an IAM role for the resource. 3. Call the resource's `setIamPolicy` method to update its IAM policy. For detailed instructions, see [Granting roles to a service account for specific resources](https://cloud.google.com/iam/help/service-accounts/granting-access-to-service-accounts).", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:setIamPolicy", "httpMethod": "POST", "id": "iam.projects.serviceAccounts.setIamPolicy", @@ -814,7 +822,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", "required": true, @@ -833,7 +841,7 @@ ] }, "signBlob": { - "description": "**Note**: This method is in the process of being deprecated. Call the\n[`signBlob()`](/iam/credentials/reference/rest/v1/projects.serviceAccounts/signBlob)\nmethod of the Cloud IAM Service Account Credentials API instead.\n\nSigns a blob using a service account's system-managed private key.", + "description": "**Note:** This method is deprecated and will stop working on July 1, 2021. Use the [`signBlob`](https://cloud.google.com/iam/help/rest-credentials/v1/projects.serviceAccounts/signBlob) method in the IAM Service Account Credentials API instead. If you currently use this method, see the [migration guide](https://cloud.google.com/iam/help/credentials/migrate-api) for instructions. Signs a blob using the system-managed private key for a ServiceAccount.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signBlob", "httpMethod": "POST", "id": "iam.projects.serviceAccounts.signBlob", @@ -842,7 +850,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + "description": "Required. Deprecated. [Migrate to Service Account Credentials API](https://cloud.google.com/iam/help/credentials/migrate-api). The resource name of the service account in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account.", "location": "path", "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", "required": true, @@ -861,7 +869,7 @@ ] }, "signJwt": { - "description": "**Note**: This method is in the process of being deprecated. Call the\n[`signJwt()`](/iam/credentials/reference/rest/v1/projects.serviceAccounts/signJwt)\nmethod of the Cloud IAM Service Account Credentials API instead.\n\nSigns a JWT using a service account's system-managed private key.\n\nIf no expiry time (`exp`) is provided in the `SignJwtRequest`, IAM sets an\nan expiry time of one hour by default. If you request an expiry time of\nmore than one hour, the request will fail.", + "description": "**Note:** This method is deprecated and will stop working on July 1, 2021. Use the [`signJwt`](https://cloud.google.com/iam/help/rest-credentials/v1/projects.serviceAccounts/signJwt) method in the IAM Service Account Credentials API instead. If you currently use this method, see the [migration guide](https://cloud.google.com/iam/help/credentials/migrate-api) for instructions. Signs a JSON Web Token (JWT) using the system-managed private key for a ServiceAccount.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signJwt", "httpMethod": "POST", "id": "iam.projects.serviceAccounts.signJwt", @@ -870,7 +878,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + "description": "Required. Deprecated. [Migrate to Service Account Credentials API](https://cloud.google.com/iam/help/credentials/migrate-api). The resource name of the service account in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account.", "location": "path", "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", "required": true, @@ -889,7 +897,7 @@ ] }, "testIamPermissions": { - "description": "Tests the specified permissions against the IAM access control policy\nfor a ServiceAccount.", + "description": "Tests whether the caller has the specified permissions on a ServiceAccount.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:testIamPermissions", "httpMethod": "POST", "id": "iam.projects.serviceAccounts.testIamPermissions", @@ -898,7 +906,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", "required": true, @@ -917,7 +925,7 @@ ] }, "undelete": { - "description": "Restores a deleted ServiceAccount.\nThis is to be used as an action of last resort. A service account may\nnot always be restorable.", + "description": "Restores a deleted ServiceAccount. **Important:** It is not always possible to restore a deleted service account. Use this method only as a last resort. After you delete a service account, IAM permanently removes the service account 30 days later. There is no way to restore a deleted service account that has been permanently removed.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:undelete", "httpMethod": "POST", "id": "iam.projects.serviceAccounts.undelete", @@ -926,7 +934,7 @@ ], "parameters": { "name": { - "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT_UNIQUE_ID}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account.", + "description": "The resource name of the service account in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT_UNIQUE_ID}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account.", "location": "path", "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", "required": true, @@ -945,7 +953,7 @@ ] }, "update": { - "description": "Note: This method is in the process of being deprecated. Use\nPatchServiceAccount instead.\n\nUpdates a ServiceAccount.\n\nCurrently, only the following fields are updatable:\n`display_name` and `description`.", + "description": "**Note:** We are in the process of deprecating this method. Use PatchServiceAccount instead. Updates a ServiceAccount. You can update only the `display_name` and `description` fields.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}", "httpMethod": "PUT", "id": "iam.projects.serviceAccounts.update", @@ -954,7 +962,7 @@ ], "parameters": { "name": { - "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\n\nRequests using `-` as a wildcard for the `PROJECT_ID` will infer the\nproject from the `account` and the `ACCOUNT` value can be the `email`\naddress or the `unique_id` of the service account.\n\nIn responses the resource name will always be in the format\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.", + "description": "The resource name of the service account. Use one of the following formats: * `projects/{PROJECT_ID}/serviceAccounts/{EMAIL_ADDRESS}` * `projects/{PROJECT_ID}/serviceAccounts/{UNIQUE_ID}` As an alternative, you can use the `-` wildcard character instead of the project ID: * `projects/-/serviceAccounts/{EMAIL_ADDRESS}` * `projects/-/serviceAccounts/{UNIQUE_ID}` When possible, avoid using the `-` wildcard character, because it can cause response messages to contain misleading error codes. For example, if you try to get the service account `projects/-/serviceAccounts/fake@example.com`, which does not exist, the response contains an HTTP `403 Forbidden` error instead of a `404 Not Found` error.", "location": "path", "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", "required": true, @@ -977,7 +985,7 @@ "keys": { "methods": { "create": { - "description": "Creates a ServiceAccountKey\nand returns it.", + "description": "Creates a ServiceAccountKey.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys", "httpMethod": "POST", "id": "iam.projects.serviceAccounts.keys.create", @@ -986,7 +994,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + "description": "Required. The resource name of the service account in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account.", "location": "path", "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", "required": true, @@ -1005,7 +1013,7 @@ ] }, "delete": { - "description": "Deletes a ServiceAccountKey.", + "description": "Deletes a ServiceAccountKey. Deleting a service account key does not revoke short-lived credentials that have been issued based on the service account key.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys/{keysId}", "httpMethod": "DELETE", "id": "iam.projects.serviceAccounts.keys.delete", @@ -1014,7 +1022,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the service account key in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + "description": "Required. The resource name of the service account key in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account.", "location": "path", "pattern": "^projects/[^/]+/serviceAccounts/[^/]+/keys/[^/]+$", "required": true, @@ -1030,7 +1038,7 @@ ] }, "get": { - "description": "Gets the ServiceAccountKey\nby key id.", + "description": "Gets a ServiceAccountKey.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys/{keysId}", "httpMethod": "GET", "id": "iam.projects.serviceAccounts.keys.get", @@ -1039,19 +1047,24 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the service account key in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`.\n\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + "description": "Required. The resource name of the service account key in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account.", "location": "path", "pattern": "^projects/[^/]+/serviceAccounts/[^/]+/keys/[^/]+$", "required": true, "type": "string" }, "publicKeyType": { - "description": "The output format of the public key requested.\nX509_PEM is the default output format.", + "description": "The output format of the public key requested. X509_PEM is the default output format.", "enum": [ "TYPE_NONE", "TYPE_X509_PEM_FILE", "TYPE_RAW_PUBLIC_KEY" ], + "enumDescriptions": [ + "Unspecified. Returns nothing here.", + "X509 PEM format.", + "Raw public key." + ], "location": "query", "type": "string" } @@ -1065,7 +1078,7 @@ ] }, "list": { - "description": "Lists ServiceAccountKeys.", + "description": "Lists every ServiceAccountKey for a service account.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys", "httpMethod": "GET", "id": "iam.projects.serviceAccounts.keys.list", @@ -1074,18 +1087,23 @@ ], "parameters": { "keyTypes": { - "description": "Filters the types of keys the user wants to include in the list\nresponse. Duplicate key types are not allowed. If no key type\nis provided, all keys are returned.", + "description": "Filters the types of keys the user wants to include in the list response. Duplicate key types are not allowed. If no key type is provided, all keys are returned.", "enum": [ "KEY_TYPE_UNSPECIFIED", "USER_MANAGED", "SYSTEM_MANAGED" ], + "enumDescriptions": [ + "Unspecified key type. The presence of this in the message will immediately result in an error.", + "User-managed keys (managed and rotated by the user).", + "System-managed keys (managed and rotated by Google)." + ], "location": "query", "repeated": true, "type": "string" }, "name": { - "description": "Required. The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\n\nUsing `-` as a wildcard for the `PROJECT_ID`, will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + "description": "Required. The resource name of the service account in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. Using `-` as a wildcard for the `PROJECT_ID`, will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account.", "location": "path", "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", "required": true, @@ -1101,7 +1119,7 @@ ] }, "upload": { - "description": "Upload public key for a given service account.\nThis rpc will create a\nServiceAccountKey that has the\nprovided public key and returns it.", + "description": "Creates a ServiceAccountKey, using a public key that you provide.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys:upload", "httpMethod": "POST", "id": "iam.projects.serviceAccounts.keys.upload", @@ -1110,7 +1128,7 @@ ], "parameters": { "name": { - "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + "description": "The resource name of the service account in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account.", "location": "path", "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", "required": true, @@ -1137,7 +1155,7 @@ "roles": { "methods": { "get": { - "description": "Gets a Role definition.", + "description": "Gets the definition of a Role.", "flatPath": "v1/roles/{rolesId}", "httpMethod": "GET", "id": "iam.roles.get", @@ -1146,7 +1164,7 @@ ], "parameters": { "name": { - "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`roles`](/iam/reference/rest/v1/roles),\n[`projects`](/iam/reference/rest/v1/projects.roles), or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`roles.get()`](/iam/reference/rest/v1/roles/get): `roles/{ROLE_NAME}`.\n This method returns results from all\n [predefined roles](/iam/docs/understanding-roles#predefined_roles) in\n Cloud IAM. Example request URL:\n `https://iam.googleapis.com/v1/roles/{ROLE_NAME}`\n\n* [`projects.roles.get()`](/iam/reference/rest/v1/projects.roles/get):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.get()`](/iam/reference/rest/v1/organizations.roles/get):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n returns only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + "description": "The `name` parameter's value depends on the target resource for the request, namely [`roles`](/iam/reference/rest/v1/roles), [`projects`](/iam/reference/rest/v1/projects.roles), or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`roles.get()`](/iam/reference/rest/v1/roles/get): `roles/{ROLE_NAME}`. This method returns results from all [predefined roles](/iam/docs/understanding-roles#predefined_roles) in Cloud IAM. Example request URL: `https://iam.googleapis.com/v1/roles/{ROLE_NAME}` * [`projects.roles.get()`](/iam/reference/rest/v1/projects.roles/get): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.get()`](/iam/reference/rest/v1/organizations.roles/get): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", "location": "path", "pattern": "^roles/[^/]+$", "required": true, @@ -1162,14 +1180,14 @@ ] }, "list": { - "description": "Lists the Roles defined on a resource.", + "description": "Lists every predefined Role that IAM supports, or every custom role that is defined for an organization or project.", "flatPath": "v1/roles", "httpMethod": "GET", "id": "iam.roles.list", "parameterOrder": [], "parameters": { "pageSize": { - "description": "Optional limit on the number of roles to include in the response.", + "description": "Optional limit on the number of roles to include in the response. The default is 300, and the maximum is 1,000.", "format": "int32", "location": "query", "type": "integer" @@ -1180,7 +1198,7 @@ "type": "string" }, "parent": { - "description": "The `parent` parameter's value depends on the target resource for the\nrequest, namely\n[`roles`](/iam/reference/rest/v1/roles),\n[`projects`](/iam/reference/rest/v1/projects.roles), or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `parent` value format is described below:\n\n* [`roles.list()`](/iam/reference/rest/v1/roles/list): An empty string.\n This method doesn't require a resource; it simply returns all\n [predefined roles](/iam/docs/understanding-roles#predefined_roles) in\n Cloud IAM. Example request URL:\n `https://iam.googleapis.com/v1/roles`\n\n* [`projects.roles.list()`](/iam/reference/rest/v1/projects.roles/list):\n `projects/{PROJECT_ID}`. This method lists all project-level\n [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles`\n\n* [`organizations.roles.list()`](/iam/reference/rest/v1/organizations.roles/list):\n `organizations/{ORGANIZATION_ID}`. This method lists all\n organization-level [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + "description": "The `parent` parameter's value depends on the target resource for the request, namely [`roles`](/iam/reference/rest/v1/roles), [`projects`](/iam/reference/rest/v1/projects.roles), or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `parent` value format is described below: * [`roles.list()`](/iam/reference/rest/v1/roles/list): An empty string. This method doesn't require a resource; it simply returns all [predefined roles](/iam/docs/understanding-roles#predefined_roles) in Cloud IAM. Example request URL: `https://iam.googleapis.com/v1/roles` * [`projects.roles.list()`](/iam/reference/rest/v1/projects.roles/list): `projects/{PROJECT_ID}`. This method lists all project-level [custom roles](/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles` * [`organizations.roles.list()`](/iam/reference/rest/v1/organizations.roles/list): `organizations/{ORGANIZATION_ID}`. This method lists all organization-level [custom roles](/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", "location": "query", "type": "string" }, @@ -1190,11 +1208,15 @@ "type": "boolean" }, "view": { - "description": "Optional view for the returned Role objects. When `FULL` is specified,\nthe `includedPermissions` field is returned, which includes a list of all\npermissions in the role. The default value is `BASIC`, which does not\nreturn the `includedPermissions` field.", + "description": "Optional view for the returned Role objects. When `FULL` is specified, the `includedPermissions` field is returned, which includes a list of all permissions in the role. The default value is `BASIC`, which does not return the `includedPermissions` field.", "enum": [ "BASIC", "FULL" ], + "enumDescriptions": [ + "Omits the `included_permissions` field. This is the default value.", + "Returns all fields." + ], "location": "query", "type": "string" } @@ -1208,7 +1230,7 @@ ] }, "queryGrantableRoles": { - "description": "Queries roles that can be granted on a particular resource.\nA role is grantable if it can be used as the role in a binding for a policy\nfor that resource.", + "description": "Lists roles that can be granted on a Google Cloud resource. A role is grantable if the IAM policy for the resource can contain bindings to the role.", "flatPath": "v1/roles:queryGrantableRoles", "httpMethod": "POST", "id": "iam.roles.queryGrantableRoles", @@ -1228,11 +1250,11 @@ } } }, - "revision": "20200221", + "revision": "20200910", "rootUrl": "https://iam.googleapis.com/", "schemas": { "AdminAuditData": { - "description": "Audit log information specific to Cloud IAM admin APIs. This message is\nserialized as an `Any` type in the `ServiceData` message of an\n`AuditLog` message.", + "description": "Audit log information specific to Cloud IAM admin APIs. This message is serialized as an `Any` type in the `ServiceData` message of an `AuditLog` message.", "id": "AdminAuditData", "properties": { "permissionDelta": { @@ -1243,7 +1265,7 @@ "type": "object" }, "AuditConfig": { - "description": "Specifies the audit configuration for a service.\nThe configuration determines which permission types are logged, and what\nidentities, if any, are exempted from logging.\nAn AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service,\nthe union of the two AuditConfigs is used for that service: the log_types\nspecified in each AuditConfig are enabled, and the exempted_members in each\nAuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n {\n \"audit_configs\": [\n {\n \"service\": \"allServices\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n },\n {\n \"log_type\": \"ADMIN_READ\",\n }\n ]\n },\n {\n \"service\": \"sampleservice.googleapis.com\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n },\n {\n \"log_type\": \"DATA_WRITE\",\n \"exempted_members\": [\n \"user:aliya@example.com\"\n ]\n }\n ]\n }\n ]\n }\n\nFor sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ\nlogging. It also exempts jose@example.com from DATA_READ logging, and\naliya@example.com from DATA_WRITE logging.", + "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { \"audit_configs\": [ { \"service\": \"allServices\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" }, { \"log_type\": \"ADMIN_READ\" } ] }, { \"service\": \"sampleservice.googleapis.com\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\" }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging.", "id": "AuditConfig", "properties": { "auditLogConfigs": { @@ -1254,14 +1276,14 @@ "type": "array" }, "service": { - "description": "Specifies a service that will be enabled for audit logging.\nFor example, `storage.googleapis.com`, `cloudsql.googleapis.com`.\n`allServices` is a special value that covers all services.", + "description": "Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.", "type": "string" } }, "type": "object" }, "AuditData": { - "description": "Audit log information specific to Cloud IAM. This message is serialized\nas an `Any` type in the `ServiceData` message of an\n`AuditLog` message.", + "description": "Audit log information specific to Cloud IAM. This message is serialized as an `Any` type in the `ServiceData` message of an `AuditLog` message.", "id": "AuditData", "properties": { "policyDelta": { @@ -1272,11 +1294,11 @@ "type": "object" }, "AuditLogConfig": { - "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\njose@example.com from DATA_READ logging.", + "description": "Provides the configuration for logging a type of permissions. Example: { \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.", "id": "AuditLogConfig", "properties": { "exemptedMembers": { - "description": "Specifies the identities that do not cause logging for this type of\npermission.\nFollows the same format of Binding.members.", + "description": "Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.", "items": { "type": "string" }, @@ -1306,7 +1328,7 @@ "id": "AuditableService", "properties": { "name": { - "description": "Public name of the service.\nFor example, the service name for Cloud IAM is 'iam.googleapis.com'.", + "description": "Public name of the service. For example, the service name for Cloud IAM is 'iam.googleapis.com'.", "type": "string" } }, @@ -1316,30 +1338,34 @@ "description": "Associates `members` with a `role`.", "id": "Binding", "properties": { + "bindingId": { + "description": "A client-specified ID for this binding. Expected to be globally unique to support the internal bindings-by-ID API.", + "type": "string" + }, "condition": { "$ref": "Expr", - "description": "The condition that is associated with this binding.\nNOTE: An unsatisfied condition will not allow user access via current\nbinding. Different bindings, including their conditions, are examined\nindependently." + "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the members in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." }, "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@example.com` .\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a user that has been recently deleted. For\n example, `alice@example.com?uid=123456789012345678901`. If the user is\n recovered, this value reverts to `user:{emailid}` and the recovered user\n retains the role in the binding.\n\n* `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus\n unique identifier) representing a service account that has been recently\n deleted. For example,\n `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.\n If the service account is undeleted, this value reverts to\n `serviceAccount:{emailid}` and the undeleted service account retains the\n role in the binding.\n\n* `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a Google group that has been recently\n deleted. For example, `admins@example.com?uid=123456789012345678901`. If\n the group is recovered, this value reverts to `group:{emailid}` and the\n recovered group retains the role in the binding.\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", "items": { "type": "string" }, "type": "array" }, "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.", + "description": "Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.", "type": "string" } }, "type": "object" }, "BindingDelta": { - "description": "One delta entry for Binding. Each individual change (only one member in each\nentry) to a binding will be a separate entry.", + "description": "One delta entry for Binding. Each individual change (only one member in each entry) to a binding will be a separate entry.", "id": "BindingDelta", "properties": { "action": { - "description": "The action that was performed on a Binding.\nRequired", + "description": "The action that was performed on a Binding. Required", "enum": [ "ACTION_UNSPECIFIED", "ADD", @@ -1357,11 +1383,11 @@ "description": "The condition that is associated with this binding." }, "member": { - "description": "A single identity requesting access for a Cloud Platform resource.\nFollows the same format of Binding.members.\nRequired", + "description": "A single identity requesting access for a Cloud Platform resource. Follows the same format of Binding.members. Required", "type": "string" }, "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", + "description": "Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. Required", "type": "string" } }, @@ -1376,7 +1402,7 @@ "description": "The Role resource to create." }, "roleId": { - "description": "The role ID to use for this role.", + "description": "The role ID to use for this role. A role ID may contain alphanumeric characters, underscores (`_`), and periods (`.`). It must contain a minimum of 3 characters and a maximum of 64 characters.", "type": "string" } }, @@ -1387,7 +1413,7 @@ "id": "CreateServiceAccountKeyRequest", "properties": { "keyAlgorithm": { - "description": "Which type of key and algorithm to use for the key.\nThe default is currently a 2K RSA key. However this may change in the\nfuture.", + "description": "Which type of key and algorithm to use for the key. The default is currently a 2K RSA key. However this may change in the future.", "enum": [ "KEY_ALG_UNSPECIFIED", "KEY_ALG_RSA_1024", @@ -1401,7 +1427,7 @@ "type": "string" }, "privateKeyType": { - "description": "The output format of the private key. The default value is\n`TYPE_GOOGLE_CREDENTIALS_FILE`, which is the Google Credentials File\nformat.", + "description": "The output format of the private key. The default value is `TYPE_GOOGLE_CREDENTIALS_FILE`, which is the Google Credentials File format.", "enum": [ "TYPE_UNSPECIFIED", "TYPE_PKCS12_FILE", @@ -1409,7 +1435,7 @@ ], "enumDescriptions": [ "Unspecified. Equivalent to `TYPE_GOOGLE_CREDENTIALS_FILE`.", - "PKCS12 format.\nThe password for the PKCS12 file is `notasecret`.\nFor more information, see https://tools.ietf.org/html/rfc7292.", + "PKCS12 format. The password for the PKCS12 file is `notasecret`. For more information, see https://tools.ietf.org/html/rfc7292.", "Google Credentials File format." ], "type": "string" @@ -1422,12 +1448,12 @@ "id": "CreateServiceAccountRequest", "properties": { "accountId": { - "description": "Required. The account id that is used to generate the service account\nemail address and a stable unique id. It is unique within a project,\nmust be 6-30 characters long, and match the regular expression\n`[a-z]([-a-z0-9]*[a-z0-9])` to comply with RFC1035.", + "description": "Required. The account id that is used to generate the service account email address and a stable unique id. It is unique within a project, must be 6-30 characters long, and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])` to comply with RFC1035.", "type": "string" }, "serviceAccount": { "$ref": "ServiceAccount", - "description": "The ServiceAccount resource to\ncreate. Currently, only the following values are user assignable:\n`display_name` and `description`." + "description": "The ServiceAccount resource to create. Currently, only the following values are user assignable: `display_name` and `description`." } }, "type": "object" @@ -1439,7 +1465,7 @@ "type": "object" }, "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", "properties": {}, "type": "object" @@ -1451,23 +1477,23 @@ "type": "object" }, "Expr": { - "description": "Represents a textual expression in the Common Expression Language (CEL)\nsyntax. CEL is a C-like expression language. The syntax and semantics of CEL\nare documented at https://github.com/google/cel-spec.\n\nExample (Comparison):\n\n title: \"Summary size limit\"\n description: \"Determines if a summary is less than 100 chars\"\n expression: \"document.summary.size() \u003c 100\"\n\nExample (Equality):\n\n title: \"Requestor is owner\"\n description: \"Determines if requestor is the document owner\"\n expression: \"document.owner == request.auth.claims.email\"\n\nExample (Logic):\n\n title: \"Public documents\"\n description: \"Determine whether the document should be publicly visible\"\n expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\"\n\nExample (Data Manipulation):\n\n title: \"Notification string\"\n description: \"Create a notification string with a timestamp.\"\n expression: \"'New message received at ' + string(document.create_time)\"\n\nThe exact variables and functions that may be referenced within an expression\nare determined by the service that evaluates it. See the service\ndocumentation for additional information.", + "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() \u003c 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", "id": "Expr", "properties": { "description": { - "description": "Optional. Description of the expression. This is a longer text which\ndescribes the expression, e.g. when hovered over it in a UI.", + "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", "type": "string" }, "expression": { - "description": "Textual representation of an expression in Common Expression Language\nsyntax.", + "description": "Textual representation of an expression in Common Expression Language syntax.", "type": "string" }, "location": { - "description": "Optional. String indicating the location of the expression for error\nreporting, e.g. a file name and a position in the file.", + "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", "type": "string" }, "title": { - "description": "Optional. Title for the expression, i.e. a short string describing\nits purpose. This can be used e.g. in UIs which allow to enter the\nexpression.", + "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", "type": "string" } }, @@ -1482,14 +1508,14 @@ "description": "google.iam.v1.Binding.condition object to be linted." }, "fullResourceName": { - "description": "The full resource name of the policy this lint request is about.\n\nThe name follows the Google Cloud Platform (GCP) resource format.\nFor example, a GCP project with ID `my-project` will be named\n`//cloudresourcemanager.googleapis.com/projects/my-project`.\n\nThe resource name is not used to read the policy instance from the Cloud\nIAM database. The candidate policy for lint has to be provided in the same\nrequest object.", + "description": "The full resource name of the policy this lint request is about. The name follows the Google Cloud Platform (GCP) resource format. For example, a GCP project with ID `my-project` will be named `//cloudresourcemanager.googleapis.com/projects/my-project`. The resource name is not used to read the policy instance from the Cloud IAM database. The candidate policy for lint has to be provided in the same request object.", "type": "string" } }, "type": "object" }, "LintPolicyResponse": { - "description": "The response of a lint operation. An empty response indicates\nthe operation was able to fully execute and no lint issue was found.", + "description": "The response of a lint operation. An empty response indicates the operation was able to fully execute and no lint issue was found.", "id": "LintPolicyResponse", "properties": { "lintResults": { @@ -1511,7 +1537,7 @@ "type": "string" }, "fieldName": { - "description": "The name of the field for which this lint result is about.\n\nFor nested messages `field_name` consists of names of the embedded fields\nseparated by period character. The top-level qualifier is the input object\nto lint in the request. For example, the `field_name` value\n`condition.expression` identifies a lint result for the `expression` field\nof the provided condition.", + "description": "The name of the field for which this lint result is about. For nested messages `field_name` consists of names of the embedded fields separated by period character. The top-level qualifier is the input object to lint in the request. For example, the `field_name` value `condition.expression` identifies a lint result for the `expression` field of the provided condition.", "type": "string" }, "level": { @@ -1522,12 +1548,12 @@ ], "enumDescriptions": [ "Level is unspecified.", - "A validation unit which operates on an individual condition within a\nbinding." + "A validation unit which operates on an individual condition within a binding." ], "type": "string" }, "locationOffset": { - "description": "0-based character position of problematic construct within the object\nidentified by `field_name`. Currently, this is populated only for condition\nexpression.", + "description": "0-based character position of problematic construct within the object identified by `field_name`. Currently, this is populated only for condition expression.", "format": "int32", "type": "integer" }, @@ -1543,16 +1569,16 @@ ], "enumDescriptions": [ "Severity is unspecified.", - "A validation unit returns an error only for critical issues. If an\nattempt is made to set the problematic policy without rectifying the\ncritical issue, it causes the `setPolicy` operation to fail.", - "Any issue which is severe enough but does not cause an error.\nFor example, suspicious constructs in the input object will not\nnecessarily fail `setPolicy`, but there is a high likelihood that they\nwon't behave as expected during policy evaluation in `checkPolicy`.\nThis includes the following common scenarios:\n\n- Unsatisfiable condition: Expired timestamp in date/time condition.\n- Ineffective condition: Condition on a \u003cmember, role\u003e pair which is\n granted unconditionally in another binding of the same policy.", - "Reserved for the issues that are not severe as `ERROR`/`WARNING`, but\nneed special handling. For instance, messages about skipped validation\nunits are issued as `NOTICE`.", - "Any informative statement which is not severe enough to raise\n`ERROR`/`WARNING`/`NOTICE`, like auto-correction recommendations on the\ninput content. Note that current version of the linter does not utilize\n`INFO`.", + "A validation unit returns an error only for critical issues. If an attempt is made to set the problematic policy without rectifying the critical issue, it causes the `setPolicy` operation to fail.", + "Any issue which is severe enough but does not cause an error. For example, suspicious constructs in the input object will not necessarily fail `setPolicy`, but there is a high likelihood that they won't behave as expected during policy evaluation in `checkPolicy`. This includes the following common scenarios: - Unsatisfiable condition: Expired timestamp in date/time condition. - Ineffective condition: Condition on a pair which is granted unconditionally in another binding of the same policy.", + "Reserved for the issues that are not severe as `ERROR`/`WARNING`, but need special handling. For instance, messages about skipped validation units are issued as `NOTICE`.", + "Any informative statement which is not severe enough to raise `ERROR`/`WARNING`/`NOTICE`, like auto-correction recommendations on the input content. Note that current version of the linter does not utilize `INFO`.", "Deprecated severity level." ], "type": "string" }, "validationUnitName": { - "description": "The validation unit name, for instance\n\"lintValidationUnits/ConditionComplexityCheck\".", + "description": "The validation unit name, for instance \"lintValidationUnits/ConditionComplexityCheck\".", "type": "string" } }, @@ -1563,7 +1589,7 @@ "id": "ListRolesResponse", "properties": { "nextPageToken": { - "description": "To retrieve the next page of results, set\n`ListRolesRequest.page_token` to this value.", + "description": "To retrieve the next page of results, set `ListRolesRequest.page_token` to this value.", "type": "string" }, "roles": { @@ -1602,14 +1628,14 @@ "type": "array" }, "nextPageToken": { - "description": "To retrieve the next page of results, set\nListServiceAccountsRequest.page_token\nto this value.", + "description": "To retrieve the next page of results, set ListServiceAccountsRequest.page_token to this value.", "type": "string" } }, "type": "object" }, "PatchServiceAccountRequest": { - "description": "The patch service account request.", + "description": "The request for PatchServiceAccount. You can patch only the `display_name` and `description` fields. You must use the `update_mask` field to specify which of these fields you want to patch. Only the fields specified in the request are guaranteed to be returned in the response. Other fields may be empty in the response.", "id": "PatchServiceAccountRequest", "properties": { "serviceAccount": { @@ -1645,7 +1671,7 @@ "type": "string" }, "description": { - "description": "A brief description of what this Permission is used for.\nThis permission can ONLY be used in predefined roles.", + "description": "A brief description of what this Permission is used for. This permission can ONLY be used in predefined roles.", "type": "string" }, "name": { @@ -1656,7 +1682,7 @@ "type": "boolean" }, "primaryPermission": { - "description": "The preferred name for this permission. If present, then this permission is\nan alias of, and equivalent to, the listed primary_permission.", + "description": "The preferred name for this permission. If present, then this permission is an alias of, and equivalent to, the listed primary_permission.", "type": "string" }, "stage": { @@ -1683,7 +1709,7 @@ "type": "object" }, "PermissionDelta": { - "description": "A PermissionDelta message to record the added_permissions and\nremoved_permissions inside a role.", + "description": "A PermissionDelta message to record the added_permissions and removed_permissions inside a role.", "id": "PermissionDelta", "properties": { "addedPermissions": { @@ -1704,7 +1730,7 @@ "type": "object" }, "Policy": { - "description": "An Identity and Access Management (IAM) policy, which specifies access\ncontrols for Google Cloud resources.\n\n\nA `Policy` is a collection of `bindings`. A `binding` binds one or more\n`members` to a single `role`. Members can be user accounts, service accounts,\nGoogle groups, and domains (such as G Suite). A `role` is a named list of\npermissions; each `role` can be an IAM predefined role or a user-created\ncustom role.\n\nOptionally, a `binding` can specify a `condition`, which is a logical\nexpression that allows access to a resource only if the expression evaluates\nto `true`. A condition can add constraints based on attributes of the\nrequest, the resource, or both.\n\n**JSON example:**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/resourcemanager.organizationAdmin\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-project-id@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles/resourcemanager.organizationViewer\",\n \"members\": [\"user:eve@example.com\"],\n \"condition\": {\n \"title\": \"expirable access\",\n \"description\": \"Does not grant access after Sep 2020\",\n \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\",\n }\n }\n ],\n \"etag\": \"BwWWja0YfJA=\",\n \"version\": 3\n }\n\n**YAML example:**\n\n bindings:\n - members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-project-id@appspot.gserviceaccount.com\n role: roles/resourcemanager.organizationAdmin\n - members:\n - user:eve@example.com\n role: roles/resourcemanager.organizationViewer\n condition:\n title: expirable access\n description: Does not grant access after Sep 2020\n expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\n - etag: BwWWja0YfJA=\n - version: 3\n\nFor a description of IAM and its features, see the\n[IAM documentation](https://cloud.google.com/iam/docs/).", + "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } **YAML example:** bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", "id": "Policy", "properties": { "auditConfigs": { @@ -1715,19 +1741,19 @@ "type": "array" }, "bindings": { - "description": "Associates a list of `members` to a `role`. Optionally, may specify a\n`condition` that determines how and when the `bindings` are applied. Each\nof the `bindings` must contain at least one member.", + "description": "Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member.", "items": { "$ref": "Binding" }, "type": "array" }, "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.", + "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.", "format": "byte", "type": "string" }, "version": { - "description": "Specifies the format of the policy.\n\nValid values are `0`, `1`, and `3`. Requests that specify an invalid value\nare rejected.\n\nAny operation that affects conditional role bindings must specify version\n`3`. This requirement applies to the following operations:\n\n* Getting a policy that includes a conditional role binding\n* Adding a conditional role binding to a policy\n* Changing a conditional role binding in a policy\n* Removing any role binding, with or without a condition, from a policy\n that includes conditions\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.\n\nIf a policy does not include any conditions, operations on that policy may\nspecify any valid version or leave the field unset.", + "description": "Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } @@ -1753,7 +1779,7 @@ "id": "QueryAuditableServicesRequest", "properties": { "fullResourceName": { - "description": "Required. The full resource name to query from the list of auditable\nservices.\n\nThe name follows the Google Cloud Platform resource format.\nFor example, a Cloud Platform project with id `my-project` will be named\n`//cloudresourcemanager.googleapis.com/projects/my-project`.", + "description": "Required. The full resource name to query from the list of auditable services. The name follows the Google Cloud Platform resource format. For example, a Cloud Platform project with id `my-project` will be named `//cloudresourcemanager.googleapis.com/projects/my-project`.", "type": "string" } }, @@ -1778,16 +1804,16 @@ "id": "QueryGrantableRolesRequest", "properties": { "fullResourceName": { - "description": "Required. The full resource name to query from the list of grantable roles.\n\nThe name follows the Google Cloud Platform resource format.\nFor example, a Cloud Platform project with id `my-project` will be named\n`//cloudresourcemanager.googleapis.com/projects/my-project`.", + "description": "Required. The full resource name to query from the list of grantable roles. The name follows the Google Cloud Platform resource format. For example, a Cloud Platform project with id `my-project` will be named `//cloudresourcemanager.googleapis.com/projects/my-project`.", "type": "string" }, "pageSize": { - "description": "Optional limit on the number of roles to include in the response.", + "description": "Optional limit on the number of roles to include in the response. The default is 300, and the maximum is 1,000.", "format": "int32", "type": "integer" }, "pageToken": { - "description": "Optional pagination token returned in an earlier\nQueryGrantableRolesResponse.", + "description": "Optional pagination token returned in an earlier QueryGrantableRolesResponse.", "type": "string" }, "view": { @@ -1796,7 +1822,7 @@ "FULL" ], "enumDescriptions": [ - "Omits the `included_permissions` field.\nThis is the default value.", + "Omits the `included_permissions` field. This is the default value.", "Returns all fields." ], "type": "string" @@ -1809,7 +1835,7 @@ "id": "QueryGrantableRolesResponse", "properties": { "nextPageToken": { - "description": "To retrieve the next page of results, set\n`QueryGrantableRolesRequest.page_token` to this value.", + "description": "To retrieve the next page of results, set `QueryGrantableRolesRequest.page_token` to this value.", "type": "string" }, "roles": { @@ -1827,16 +1853,16 @@ "id": "QueryTestablePermissionsRequest", "properties": { "fullResourceName": { - "description": "Required. The full resource name to query from the list of testable\npermissions.\n\nThe name follows the Google Cloud Platform resource format.\nFor example, a Cloud Platform project with id `my-project` will be named\n`//cloudresourcemanager.googleapis.com/projects/my-project`.", + "description": "Required. The full resource name to query from the list of testable permissions. The name follows the Google Cloud Platform resource format. For example, a Cloud Platform project with id `my-project` will be named `//cloudresourcemanager.googleapis.com/projects/my-project`.", "type": "string" }, "pageSize": { - "description": "Optional limit on the number of permissions to include in the response.", + "description": "Optional limit on the number of permissions to include in the response. The default is 100, and the maximum is 1,000.", "format": "int32", "type": "integer" }, "pageToken": { - "description": "Optional pagination token returned in an earlier\nQueryTestablePermissionsRequest.", + "description": "Optional pagination token returned in an earlier QueryTestablePermissionsRequest.", "type": "string" } }, @@ -1847,7 +1873,7 @@ "id": "QueryTestablePermissionsResponse", "properties": { "nextPageToken": { - "description": "To retrieve the next page of results, set\n`QueryTestableRolesRequest.page_token` to this value.", + "description": "To retrieve the next page of results, set `QueryTestableRolesRequest.page_token` to this value.", "type": "string" }, "permissions": { @@ -1865,7 +1891,7 @@ "id": "Role", "properties": { "deleted": { - "description": "The current deleted state of the role. This field is read only.\nIt will be ignored in calls to CreateRole and UpdateRole.", + "description": "The current deleted state of the role. This field is read only. It will be ignored in calls to CreateRole and UpdateRole.", "type": "boolean" }, "description": { @@ -1885,11 +1911,11 @@ "type": "array" }, "name": { - "description": "The name of the role.\n\nWhen Role is used in CreateRole, the role name must not be set.\n\nWhen Role is used in output and other input such as UpdateRole, the role\nname is the complete path, e.g., roles/logging.viewer for predefined roles\nand organizations/{ORGANIZATION_ID}/roles/logging.viewer for custom roles.", + "description": "The name of the role. When Role is used in CreateRole, the role name must not be set. When Role is used in output and other input such as UpdateRole, the role name is the complete path, e.g., roles/logging.viewer for predefined roles and organizations/{ORGANIZATION_ID}/roles/logging.viewer for custom roles.", "type": "string" }, "stage": { - "description": "The current launch stage of the role. If the `ALPHA` launch stage has been\nselected for a role, the `stage` field will not be included in the\nreturned definition for the role.", + "description": "The current launch stage of the role. If the `ALPHA` launch stage has been selected for a role, the `stage` field will not be included in the returned definition for the role.", "enum": [ "ALPHA", "BETA", @@ -1899,68 +1925,73 @@ "EAP" ], "enumDescriptions": [ - "The user has indicated this role is currently in an Alpha phase. If this\nlaunch stage is selected, the `stage` field will not be included when\nrequesting the definition for a given role.", + "The user has indicated this role is currently in an Alpha phase. If this launch stage is selected, the `stage` field will not be included when requesting the definition for a given role.", "The user has indicated this role is currently in a Beta phase.", "The user has indicated this role is generally available.", "The user has indicated this role is being deprecated.", - "This role is disabled and will not contribute permissions to any members\nit is granted to in policies.", + "This role is disabled and will not contribute permissions to any members it is granted to in policies.", "The user has indicated this role is currently in an EAP phase." ], "type": "string" }, "title": { - "description": "Optional. A human-readable title for the role. Typically this\nis limited to 100 UTF-8 bytes.", + "description": "Optional. A human-readable title for the role. Typically this is limited to 100 UTF-8 bytes.", "type": "string" } }, "type": "object" }, "ServiceAccount": { - "description": "A service account in the Identity and Access Management API.\n\nTo create a service account, specify the `project_id` and the `account_id`\nfor the account. The `account_id` is unique within the project, and is used\nto generate the service account email address and a stable\n`unique_id`.\n\nIf the account already exists, the account's resource name is returned\nin the format of projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}. The caller\ncan use the name in other methods to access the account.\n\nAll other methods can identify the service account using the format\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + "description": "An IAM service account. A service account is an account for an application or a virtual machine (VM) instance, not a person. You can use a service account to call Google APIs. To learn more, read the [overview of service accounts](https://cloud.google.com/iam/help/service-accounts/overview). When you create a service account, you specify the project ID that owns the service account, as well as a name that must be unique within the project. IAM uses these values to create an email address that identifies the service account.", "id": "ServiceAccount", "properties": { "description": { - "description": "Optional. A user-specified opaque description of the service account.\nMust be less than or equal to 256 UTF-8 bytes.", + "description": "Optional. A user-specified, human-readable description of the service account. The maximum length is 256 UTF-8 bytes.", "type": "string" }, "disabled": { - "description": "@OutputOnly A bool indicate if the service account is disabled.\nThe field is currently in alpha phase.", + "description": "Output only. Whether the service account is disabled.", + "readOnly": true, "type": "boolean" }, "displayName": { - "description": "Optional. A user-specified name for the service account.\nMust be less than or equal to 100 UTF-8 bytes.", + "description": "Optional. A user-specified, human-readable name for the service account. The maximum length is 100 UTF-8 bytes.", "type": "string" }, "email": { - "description": "@OutputOnly The email address of the service account.", + "description": "Output only. The email address of the service account.", + "readOnly": true, "type": "string" }, "etag": { - "description": "Optional. Note: `etag` is an inoperable legacy field that is only returned\nfor backwards compatibility.", + "description": "Deprecated. Do not use.", "format": "byte", "type": "string" }, "name": { - "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\n\nRequests using `-` as a wildcard for the `PROJECT_ID` will infer the\nproject from the `account` and the `ACCOUNT` value can be the `email`\naddress or the `unique_id` of the service account.\n\nIn responses the resource name will always be in the format\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.", + "description": "The resource name of the service account. Use one of the following formats: * `projects/{PROJECT_ID}/serviceAccounts/{EMAIL_ADDRESS}` * `projects/{PROJECT_ID}/serviceAccounts/{UNIQUE_ID}` As an alternative, you can use the `-` wildcard character instead of the project ID: * `projects/-/serviceAccounts/{EMAIL_ADDRESS}` * `projects/-/serviceAccounts/{UNIQUE_ID}` When possible, avoid using the `-` wildcard character, because it can cause response messages to contain misleading error codes. For example, if you try to get the service account `projects/-/serviceAccounts/fake@example.com`, which does not exist, the response contains an HTTP `403 Forbidden` error instead of a `404 Not Found` error.", "type": "string" }, "oauth2ClientId": { - "description": "@OutputOnly The OAuth2 client id for the service account.\nThis is used in conjunction with the OAuth2 clientconfig API to make\nthree legged OAuth2 (3LO) flows to access the data of Google users.", + "description": "Output only. The OAuth 2.0 client ID for the service account.", + "readOnly": true, "type": "string" }, "projectId": { - "description": "@OutputOnly The id of the project that owns the service account.", + "description": "Output only. The ID of the project that owns the service account.", + "readOnly": true, "type": "string" }, "uniqueId": { - "description": "@OutputOnly The unique and stable id of the service account.", + "description": "Output only. The unique, stable numeric ID for the service account. Each service account retains its unique ID even if you delete the service account. For example, if you delete a service account, then create a new service account with the same name, the new service account has a different unique ID than the deleted service account.", + "readOnly": true, "type": "string" } }, "type": "object" }, "ServiceAccountKey": { - "description": "Represents a service account key.\n\nA service account has two sets of key-pairs: user-managed, and\nsystem-managed.\n\nUser-managed key-pairs can be created and deleted by users. Users are\nresponsible for rotating these keys periodically to ensure security of\ntheir service accounts. Users retain the private key of these key-pairs,\nand Google retains ONLY the public key.\n\nSystem-managed keys are automatically rotated by Google, and are used for\nsigning for a maximum of two weeks. The rotation process is probabilistic,\nand usage of the new key will gradually ramp up and down over the key's\nlifetime. We recommend caching the public key set for a service account for\nno more than 24 hours to ensure you have access to the latest keys.\n\nPublic keys for all service accounts are also published at the OAuth2\nService Account API.", + "description": "Represents a service account key. A service account has two sets of key-pairs: user-managed, and system-managed. User-managed key-pairs can be created and deleted by users. Users are responsible for rotating these keys periodically to ensure security of their service accounts. Users retain the private key of these key-pairs, and Google retains ONLY the public key. System-managed keys are automatically rotated by Google, and are used for signing for a maximum of two weeks. The rotation process is probabilistic, and usage of the new key will gradually ramp up and down over the key's lifetime. We recommend caching the public key set for a service account for no more than 24 hours to ensure you have access to the latest keys. Public keys for all service accounts are also published at the OAuth2 Service Account API.", "id": "ServiceAccountKey", "properties": { "keyAlgorithm": { @@ -1999,23 +2030,23 @@ "SYSTEM_MANAGED" ], "enumDescriptions": [ - "Unspecified key type. The presence of this in the\nmessage will immediately result in an error.", + "Unspecified key type. The presence of this in the message will immediately result in an error.", "User-managed keys (managed and rotated by the user).", "System-managed keys (managed and rotated by Google)." ], "type": "string" }, "name": { - "description": "The resource name of the service account key in the following format\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`.", + "description": "The resource name of the service account key in the following format `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`.", "type": "string" }, "privateKeyData": { - "description": "The private key data. Only provided in `CreateServiceAccountKey`\nresponses. Make sure to keep the private key data secure because it\nallows for the assertion of the service account identity.\nWhen base64 decoded, the private key data can be used to authenticate with\nGoogle API client libraries and with\n\u003ca href=\"/sdk/gcloud/reference/auth/activate-service-account\"\u003egcloud\nauth activate-service-account\u003c/a\u003e.", + "description": "The private key data. Only provided in `CreateServiceAccountKey` responses. Make sure to keep the private key data secure because it allows for the assertion of the service account identity. When base64 decoded, the private key data can be used to authenticate with Google API client libraries and with gcloud auth activate-service-account.", "format": "byte", "type": "string" }, "privateKeyType": { - "description": "The output format for the private key.\nOnly provided in `CreateServiceAccountKey` responses, not\nin `GetServiceAccountKey` or `ListServiceAccountKey` responses.\n\nGoogle never exposes system-managed private keys, and never retains\nuser-managed private keys.", + "description": "The output format for the private key. Only provided in `CreateServiceAccountKey` responses, not in `GetServiceAccountKey` or `ListServiceAccountKey` responses. Google never exposes system-managed private keys, and never retains user-managed private keys.", "enum": [ "TYPE_UNSPECIFIED", "TYPE_PKCS12_FILE", @@ -2023,7 +2054,7 @@ ], "enumDescriptions": [ "Unspecified. Equivalent to `TYPE_GOOGLE_CREDENTIALS_FILE`.", - "PKCS12 format.\nThe password for the PKCS12 file is `notasecret`.\nFor more information, see https://tools.ietf.org/html/rfc7292.", + "PKCS12 format. The password for the PKCS12 file is `notasecret`. For more information, see https://tools.ietf.org/html/rfc7292.", "Google Credentials File format." ], "type": "string" @@ -2039,7 +2070,7 @@ "type": "string" }, "validBeforeTime": { - "description": "The key can be used before this timestamp.\nFor system-managed key pairs, this timestamp is the end time for the\nprivate key signing operation. The public key could still be used\nfor verification for a few hours after this time.", + "description": "The key can be used before this timestamp. For system-managed key pairs, this timestamp is the end time for the private key signing operation. The public key could still be used for verification for a few hours after this time.", "format": "google-datetime", "type": "string" } @@ -2052,10 +2083,10 @@ "properties": { "policy": { "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them." }, "updateMask": { - "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only\nthe fields in the mask will be modified. If no mask is provided, the\nfollowing default mask is used:\npaths: \"bindings, etag\"\nThis field is only used by Cloud IAM.", + "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only the fields in the mask will be modified. If no mask is provided, the following default mask is used: `paths: \"bindings, etag\"`", "format": "google-fieldmask", "type": "string" } @@ -2063,11 +2094,11 @@ "type": "object" }, "SignBlobRequest": { - "description": "The service account sign blob request.", + "description": "Deprecated. [Migrate to Service Account Credentials API](https://cloud.google.com/iam/help/credentials/migrate-api). The service account sign blob request.", "id": "SignBlobRequest", "properties": { "bytesToSign": { - "description": "Required. The bytes to sign.", + "description": "Required. Deprecated. [Migrate to Service Account Credentials API](https://cloud.google.com/iam/help/credentials/migrate-api). The bytes to sign.", "format": "byte", "type": "string" } @@ -2075,15 +2106,15 @@ "type": "object" }, "SignBlobResponse": { - "description": "The service account sign blob response.", + "description": "Deprecated. [Migrate to Service Account Credentials API](https://cloud.google.com/iam/help/credentials/migrate-api). The service account sign blob response.", "id": "SignBlobResponse", "properties": { "keyId": { - "description": "The id of the key used to sign the blob.", + "description": "Deprecated. [Migrate to Service Account Credentials API](https://cloud.google.com/iam/help/credentials/migrate-api). The id of the key used to sign the blob.", "type": "string" }, "signature": { - "description": "The signed blob.", + "description": "Deprecated. [Migrate to Service Account Credentials API](https://cloud.google.com/iam/help/credentials/migrate-api). The signed blob.", "format": "byte", "type": "string" } @@ -2091,26 +2122,26 @@ "type": "object" }, "SignJwtRequest": { - "description": "The service account sign JWT request.", + "description": "Deprecated. [Migrate to Service Account Credentials API](https://cloud.google.com/iam/help/credentials/migrate-api). The service account sign JWT request.", "id": "SignJwtRequest", "properties": { "payload": { - "description": "Required. The JWT payload to sign, a JSON JWT Claim set.", + "description": "Required. Deprecated. [Migrate to Service Account Credentials API](https://cloud.google.com/iam/help/credentials/migrate-api). The JWT payload to sign. Must be a serialized JSON object that contains a JWT Claims Set. For example: `{\"sub\": \"user@example.com\", \"iat\": 313435}` If the JWT Claims Set contains an expiration time (`exp`) claim, it must be an integer timestamp that is not in the past and no more than 1 hour in the future. If the JWT Claims Set does not contain an expiration time (`exp`) claim, this claim is added automatically, with a timestamp that is 1 hour in the future.", "type": "string" } }, "type": "object" }, "SignJwtResponse": { - "description": "The service account sign JWT response.", + "description": "Deprecated. [Migrate to Service Account Credentials API](https://cloud.google.com/iam/help/credentials/migrate-api). The service account sign JWT response.", "id": "SignJwtResponse", "properties": { "keyId": { - "description": "The id of the key used to sign the JWT.", + "description": "Deprecated. [Migrate to Service Account Credentials API](https://cloud.google.com/iam/help/credentials/migrate-api). The id of the key used to sign the JWT.", "type": "string" }, "signedJwt": { - "description": "The signed JWT.", + "description": "Deprecated. [Migrate to Service Account Credentials API](https://cloud.google.com/iam/help/credentials/migrate-api). The signed JWT.", "type": "string" } }, @@ -2121,7 +2152,7 @@ "id": "TestIamPermissionsRequest", "properties": { "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "description": "The set of permissions to check for the `resource`. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", "items": { "type": "string" }, @@ -2135,7 +2166,7 @@ "id": "TestIamPermissionsResponse", "properties": { "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", "items": { "type": "string" }, @@ -2177,7 +2208,7 @@ "id": "UploadServiceAccountKeyRequest", "properties": { "publicKeyData": { - "description": "A field that allows clients to upload their own public key. If set,\nuse this public key data to create a service account key for given\nservice account.\nPlease note, the expected format for this field is X509_PEM.", + "description": "A field that allows clients to upload their own public key. If set, use this public key data to create a service account key for given service account. Please note, the expected format for this field is X509_PEM.", "format": "byte", "type": "string" } diff --git a/vendor/google.golang.org/api/iam/v1/iam-gen.go b/vendor/google.golang.org/api/iam/v1/iam-gen.go index a7be201129f..994cdb8aaa3 100644 --- a/vendor/google.golang.org/api/iam/v1/iam-gen.go +++ b/vendor/google.golang.org/api/iam/v1/iam-gen.go @@ -75,6 +75,7 @@ const apiId = "iam:v1" const apiName = "iam" const apiVersion = "v1" const basePath = "https://iam.googleapis.com/" +const mtlsBasePath = "https://iam.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -90,6 +91,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -239,10 +241,8 @@ type RolesService struct { } // AdminAuditData: Audit log information specific to Cloud IAM admin -// APIs. This message is -// serialized as an `Any` type in the `ServiceData` message of -// an -// `AuditLog` message. +// APIs. This message is serialized as an `Any` type in the +// `ServiceData` message of an `AuditLog` message. type AdminAuditData struct { // PermissionDelta: The permission_delta when when creating or updating // a Role. @@ -272,72 +272,31 @@ func (s *AdminAuditData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// AuditConfig: Specifies the audit configuration for a service. -// The configuration determines which permission types are logged, and -// what -// identities, if any, are exempted from logging. -// An AuditConfig must have one or more AuditLogConfigs. -// -// If there are AuditConfigs for both `allServices` and a specific -// service, -// the union of the two AuditConfigs is used for that service: the -// log_types -// specified in each AuditConfig are enabled, and the exempted_members -// in each -// AuditLogConfig are exempted. -// -// Example Policy with multiple AuditConfigs: -// -// { -// "audit_configs": [ -// { -// "service": "allServices" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// }, -// { -// "log_type": "ADMIN_READ", -// } -// ] -// }, -// { -// "service": "sampleservice.googleapis.com" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// }, -// { -// "log_type": "DATA_WRITE", -// "exempted_members": [ -// "user:aliya@example.com" -// ] -// } -// ] -// } -// ] -// } -// -// For sampleservice, this policy enables DATA_READ, DATA_WRITE and -// ADMIN_READ -// logging. It also exempts jose@example.com from DATA_READ logging, -// and -// aliya@example.com from DATA_WRITE logging. +// AuditConfig: Specifies the audit configuration for a service. The +// configuration determines which permission types are logged, and what +// identities, if any, are exempted from logging. An AuditConfig must +// have one or more AuditLogConfigs. If there are AuditConfigs for both +// `allServices` and a specific service, the union of the two +// AuditConfigs is used for that service: the log_types specified in +// each AuditConfig are enabled, and the exempted_members in each +// AuditLogConfig are exempted. Example Policy with multiple +// AuditConfigs: { "audit_configs": [ { "service": "allServices", +// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": +// [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { +// "log_type": "ADMIN_READ" } ] }, { "service": +// "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": +// "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ +// "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy +// enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts +// jose@example.com from DATA_READ logging, and aliya@example.com from +// DATA_WRITE logging. type AuditConfig struct { // AuditLogConfigs: The configuration for logging of each type of // permission. AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"` - // Service: Specifies a service that will be enabled for audit - // logging. - // For example, `storage.googleapis.com`, - // `cloudsql.googleapis.com`. + // Service: Specifies a service that will be enabled for audit logging. + // For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. // `allServices` is a special value that covers all services. Service string `json:"service,omitempty"` @@ -366,8 +325,7 @@ func (s *AuditConfig) MarshalJSON() ([]byte, error) { } // AuditData: Audit log information specific to Cloud IAM. This message -// is serialized -// as an `Any` type in the `ServiceData` message of an +// is serialized as an `Any` type in the `ServiceData` message of an // `AuditLog` message. type AuditData struct { // PolicyDelta: Policy delta between the original policy and the newly @@ -398,31 +356,15 @@ func (s *AuditData) MarshalJSON() ([]byte, error) { } // AuditLogConfig: Provides the configuration for logging a type of -// permissions. -// Example: -// -// { -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// } -// ] -// } -// -// This enables 'DATA_READ' and 'DATA_WRITE' logging, while -// exempting -// jose@example.com from DATA_READ logging. +// permissions. Example: { "audit_log_configs": [ { "log_type": +// "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { +// "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and +// 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ +// logging. type AuditLogConfig struct { // ExemptedMembers: Specifies the identities that do not cause logging - // for this type of - // permission. - // Follows the same format of Binding.members. + // for this type of permission. Follows the same format of + // Binding.members. ExemptedMembers []string `json:"exemptedMembers,omitempty"` // LogType: The log type that this config enables. @@ -460,8 +402,8 @@ func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { // AuditableService: Contains information about an auditable service. type AuditableService struct { - // Name: Public name of the service. - // For example, the service name for Cloud IAM is 'iam.googleapis.com'. + // Name: Public name of the service. For example, the service name for + // Cloud IAM is 'iam.googleapis.com'. Name string `json:"name,omitempty"` // ForceSendFields is a list of field names (e.g. "Name") to @@ -489,86 +431,60 @@ func (s *AuditableService) MarshalJSON() ([]byte, error) { // Binding: Associates `members` with a `role`. type Binding struct { - // Condition: The condition that is associated with this binding. - // NOTE: An unsatisfied condition will not allow user access via - // current - // binding. Different bindings, including their conditions, are - // examined - // independently. + // BindingId: A client-specified ID for this binding. Expected to be + // globally unique to support the internal bindings-by-ID API. + BindingId string `json:"bindingId,omitempty"` + + // Condition: The condition that is associated with this binding. If the + // condition evaluates to `true`, then this binding applies to the + // current request. If the condition evaluates to `false`, then this + // binding does not apply to the current request. However, a different + // role binding might grant the same role to one or more of the members + // in this binding. To learn which resources support conditions in their + // IAM policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). Condition *Expr `json:"condition,omitempty"` // Members: Specifies the identities requesting access for a Cloud - // Platform resource. - // `members` can have the following values: - // - // * `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. - // - // * `allAuthenticatedUsers`: A special identifier that represents - // anyone - // who is authenticated with a Google account or a service - // account. - // - // * `user:{emailid}`: An email address that represents a specific - // Google - // account. For example, `alice@example.com` . - // - // - // * `serviceAccount:{emailid}`: An email address that represents a - // service - // account. For example, - // `my-other-app@appspot.gserviceaccount.com`. - // - // * `group:{emailid}`: An email address that represents a Google - // group. - // For example, `admins@example.com`. - // - // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a user that has been recently deleted. - // For - // example, `alice@example.com?uid=123456789012345678901`. If the - // user is - // recovered, this value reverts to `user:{emailid}` and the - // recovered user - // retains the role in the binding. - // - // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address - // (plus - // unique identifier) representing a service account that has been - // recently - // deleted. For example, - // + // Platform resource. `members` can have the following values: * + // `allUsers`: A special identifier that represents anyone who is on the + // internet; with or without a Google account. * + // `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. * + // `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . * + // `serviceAccount:{emailid}`: An email address that represents a + // service account. For example, + // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An + // email address that represents a Google group. For example, + // `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An + // email address (plus unique identifier) representing a user that has + // been recently deleted. For example, + // `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered + // user retains the role in the binding. * + // `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address + // (plus unique identifier) representing a service account that has been + // recently deleted. For example, // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account - // retains the - // role in the binding. - // - // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a Google group that has been recently - // deleted. For example, - // `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` - // and the - // recovered group retains the role in the binding. - // - // - // * `domain:{domain}`: The G Suite domain (primary) that represents all - // the - // users of that domain. For example, `google.com` or - // `example.com`. - // - // + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains + // the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: + // An email address (plus unique identifier) representing a Google group + // that has been recently deleted. For example, + // `admins@example.com?uid=123456789012345678901`. If the group is + // recovered, this value reverts to `group:{emailid}` and the recovered + // group retains the role in the binding. * `domain:{domain}`: The G + // Suite domain (primary) that represents all the users of that domain. + // For example, `google.com` or `example.com`. Members []string `json:"members,omitempty"` - // Role: Role that is assigned to `members`. - // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + // Role: Role that is assigned to `members`. For example, + // `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `json:"role,omitempty"` - // ForceSendFields is a list of field names (e.g. "Condition") to + // ForceSendFields is a list of field names (e.g. "BindingId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -576,7 +492,7 @@ type Binding struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Condition") to include in + // NullFields is a list of field names (e.g. "BindingId") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -592,11 +508,10 @@ func (s *Binding) MarshalJSON() ([]byte, error) { } // BindingDelta: One delta entry for Binding. Each individual change -// (only one member in each -// entry) to a binding will be a separate entry. +// (only one member in each entry) to a binding will be a separate +// entry. type BindingDelta struct { - // Action: The action that was performed on a Binding. - // Required + // Action: The action that was performed on a Binding. Required // // Possible values: // "ACTION_UNSPECIFIED" - Unspecified. @@ -608,15 +523,11 @@ type BindingDelta struct { Condition *Expr `json:"condition,omitempty"` // Member: A single identity requesting access for a Cloud Platform - // resource. - // Follows the same format of Binding.members. - // Required + // resource. Follows the same format of Binding.members. Required Member string `json:"member,omitempty"` - // Role: Role that is assigned to `members`. - // For example, `roles/viewer`, `roles/editor`, or - // `roles/owner`. - // Required + // Role: Role that is assigned to `members`. For example, + // `roles/viewer`, `roles/editor`, or `roles/owner`. Required Role string `json:"role,omitempty"` // ForceSendFields is a list of field names (e.g. "Action") to @@ -647,7 +558,10 @@ type CreateRoleRequest struct { // Role: The Role resource to create. Role *Role `json:"role,omitempty"` - // RoleId: The role ID to use for this role. + // RoleId: The role ID to use for this role. A role ID may contain + // alphanumeric characters, underscores (`_`), and periods (`.`). It + // must contain a minimum of 3 characters and a maximum of 64 + // characters. RoleId string `json:"roleId,omitempty"` // ForceSendFields is a list of field names (e.g. "Role") to @@ -676,9 +590,8 @@ func (s *CreateRoleRequest) MarshalJSON() ([]byte, error) { // CreateServiceAccountKeyRequest: The service account key create // request. type CreateServiceAccountKeyRequest struct { - // KeyAlgorithm: Which type of key and algorithm to use for the key. - // The default is currently a 2K RSA key. However this may change in - // the + // KeyAlgorithm: Which type of key and algorithm to use for the key. The + // default is currently a 2K RSA key. However this may change in the // future. // // Possible values: @@ -688,17 +601,15 @@ type CreateServiceAccountKeyRequest struct { KeyAlgorithm string `json:"keyAlgorithm,omitempty"` // PrivateKeyType: The output format of the private key. The default - // value is - // `TYPE_GOOGLE_CREDENTIALS_FILE`, which is the Google Credentials - // File - // format. + // value is `TYPE_GOOGLE_CREDENTIALS_FILE`, which is the Google + // Credentials File format. // // Possible values: // "TYPE_UNSPECIFIED" - Unspecified. Equivalent to // `TYPE_GOOGLE_CREDENTIALS_FILE`. - // "TYPE_PKCS12_FILE" - PKCS12 format. - // The password for the PKCS12 file is `notasecret`. - // For more information, see https://tools.ietf.org/html/rfc7292. + // "TYPE_PKCS12_FILE" - PKCS12 format. The password for the PKCS12 + // file is `notasecret`. For more information, see + // https://tools.ietf.org/html/rfc7292. // "TYPE_GOOGLE_CREDENTIALS_FILE" - Google Credentials File format. PrivateKeyType string `json:"privateKeyType,omitempty"` @@ -728,18 +639,14 @@ func (s *CreateServiceAccountKeyRequest) MarshalJSON() ([]byte, error) { // CreateServiceAccountRequest: The service account create request. type CreateServiceAccountRequest struct { // AccountId: Required. The account id that is used to generate the - // service account - // email address and a stable unique id. It is unique within a - // project, - // must be 6-30 characters long, and match the regular - // expression - // `[a-z]([-a-z0-9]*[a-z0-9])` to comply with RFC1035. + // service account email address and a stable unique id. It is unique + // within a project, must be 6-30 characters long, and match the regular + // expression `[a-z]([-a-z0-9]*[a-z0-9])` to comply with RFC1035. AccountId string `json:"accountId,omitempty"` - // ServiceAccount: The ServiceAccount resource to - // create. Currently, only the following values are user - // assignable: - // `display_name` and `description`. + // ServiceAccount: The ServiceAccount resource to create. Currently, + // only the following values are user assignable: `display_name` and + // `description`. ServiceAccount *ServiceAccount `json:"serviceAccount,omitempty"` // ForceSendFields is a list of field names (e.g. "AccountId") to @@ -770,17 +677,11 @@ type DisableServiceAccountRequest struct { } // Empty: A generic empty message that you can re-use to avoid defining -// duplicated -// empty messages in your APIs. A typical example is to use it as the -// request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. +// duplicated empty messages in your APIs. A typical example is to use +// it as the request or the response type of an API method. For +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } The JSON representation for `Empty` is +// empty JSON object `{}`. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -792,65 +693,40 @@ type EnableServiceAccountRequest struct { } // Expr: Represents a textual expression in the Common Expression -// Language (CEL) -// syntax. CEL is a C-like expression language. The syntax and semantics -// of CEL -// are documented at https://github.com/google/cel-spec. -// -// Example (Comparison): -// -// title: "Summary size limit" -// description: "Determines if a summary is less than 100 chars" -// expression: "document.summary.size() < 100" -// -// Example (Equality): -// -// title: "Requestor is owner" -// description: "Determines if requestor is the document owner" -// expression: "document.owner == -// request.auth.claims.email" -// -// Example (Logic): -// -// title: "Public documents" -// description: "Determine whether the document should be publicly -// visible" -// expression: "document.type != 'private' && document.type != -// 'internal'" -// -// Example (Data Manipulation): -// -// title: "Notification string" -// description: "Create a notification string with a timestamp." -// expression: "'New message received at ' + -// string(document.create_time)" -// -// The exact variables and functions that may be referenced within an -// expression -// are determined by the service that evaluates it. See the -// service -// documentation for additional information. +// Language (CEL) syntax. CEL is a C-like expression language. The +// syntax and semantics of CEL are documented at +// https://github.com/google/cel-spec. Example (Comparison): title: +// "Summary size limit" description: "Determines if a summary is less +// than 100 chars" expression: "document.summary.size() < 100" Example +// (Equality): title: "Requestor is owner" description: "Determines if +// requestor is the document owner" expression: "document.owner == +// request.auth.claims.email" Example (Logic): title: "Public documents" +// description: "Determine whether the document should be publicly +// visible" expression: "document.type != 'private' && document.type != +// 'internal'" Example (Data Manipulation): title: "Notification string" +// description: "Create a notification string with a timestamp." +// expression: "'New message received at ' + +// string(document.create_time)" The exact variables and functions that +// may be referenced within an expression are determined by the service +// that evaluates it. See the service documentation for additional +// information. type Expr struct { // Description: Optional. Description of the expression. This is a - // longer text which - // describes the expression, e.g. when hovered over it in a UI. + // longer text which describes the expression, e.g. when hovered over it + // in a UI. Description string `json:"description,omitempty"` // Expression: Textual representation of an expression in Common - // Expression Language - // syntax. + // Expression Language syntax. Expression string `json:"expression,omitempty"` // Location: Optional. String indicating the location of the expression - // for error - // reporting, e.g. a file name and a position in the file. + // for error reporting, e.g. a file name and a position in the file. Location string `json:"location,omitempty"` // Title: Optional. Title for the expression, i.e. a short string - // describing - // its purpose. This can be used e.g. in UIs which allow to enter - // the - // expression. + // describing its purpose. This can be used e.g. in UIs which allow to + // enter the expression. Title string `json:"title,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -882,19 +758,12 @@ type LintPolicyRequest struct { Condition *Expr `json:"condition,omitempty"` // FullResourceName: The full resource name of the policy this lint - // request is about. - // - // The name follows the Google Cloud Platform (GCP) resource format. - // For example, a GCP project with ID `my-project` will be - // named - // `//cloudresourcemanager.googleapis.com/projects/my-project`. - // - // Th - // e resource name is not used to read the policy instance from the - // Cloud - // IAM database. The candidate policy for lint has to be provided in the - // same - // request object. + // request is about. The name follows the Google Cloud Platform (GCP) + // resource format. For example, a GCP project with ID `my-project` will + // be named `//cloudresourcemanager.googleapis.com/projects/my-project`. + // The resource name is not used to read the policy instance from the + // Cloud IAM database. The candidate policy for lint has to be provided + // in the same request object. FullResourceName string `json:"fullResourceName,omitempty"` // ForceSendFields is a list of field names (e.g. "Condition") to @@ -921,8 +790,8 @@ func (s *LintPolicyRequest) MarshalJSON() ([]byte, error) { } // LintPolicyResponse: The response of a lint operation. An empty -// response indicates -// the operation was able to fully execute and no lint issue was found. +// response indicates the operation was able to fully execute and no +// lint issue was found. type LintPolicyResponse struct { // LintResults: List of lint results sorted by `severity` in descending // order. @@ -960,18 +829,12 @@ type LintResult struct { // DebugMessage: Human readable debug message associated with the issue. DebugMessage string `json:"debugMessage,omitempty"` - // FieldName: The name of the field for which this lint result is - // about. - // + // FieldName: The name of the field for which this lint result is about. // For nested messages `field_name` consists of names of the embedded - // fields - // separated by period character. The top-level qualifier is the input - // object - // to lint in the request. For example, the `field_name` - // value - // `condition.expression` identifies a lint result for the `expression` - // field - // of the provided condition. + // fields separated by period character. The top-level qualifier is the + // input object to lint in the request. For example, the `field_name` + // value `condition.expression` identifies a lint result for the + // `expression` field of the provided condition. FieldName string `json:"fieldName,omitempty"` // Level: The validation unit level. @@ -979,15 +842,12 @@ type LintResult struct { // Possible values: // "LEVEL_UNSPECIFIED" - Level is unspecified. // "CONDITION" - A validation unit which operates on an individual - // condition within a - // binding. + // condition within a binding. Level string `json:"level,omitempty"` // LocationOffset: 0-based character position of problematic construct - // within the object - // identified by `field_name`. Currently, this is populated only for - // condition - // expression. + // within the object identified by `field_name`. Currently, this is + // populated only for condition expression. LocationOffset int64 `json:"locationOffset,omitempty"` // Severity: The validation unit severity. @@ -995,41 +855,28 @@ type LintResult struct { // Possible values: // "SEVERITY_UNSPECIFIED" - Severity is unspecified. // "ERROR" - A validation unit returns an error only for critical - // issues. If an - // attempt is made to set the problematic policy without rectifying - // the - // critical issue, it causes the `setPolicy` operation to fail. + // issues. If an attempt is made to set the problematic policy without + // rectifying the critical issue, it causes the `setPolicy` operation to + // fail. // "WARNING" - Any issue which is severe enough but does not cause an - // error. - // For example, suspicious constructs in the input object will - // not - // necessarily fail `setPolicy`, but there is a high likelihood that - // they - // won't behave as expected during policy evaluation in - // `checkPolicy`. - // This includes the following common scenarios: - // - // - Unsatisfiable condition: Expired timestamp in date/time - // condition. - // - Ineffective condition: Condition on a pair which is - // granted unconditionally in another binding of the same policy. + // error. For example, suspicious constructs in the input object will + // not necessarily fail `setPolicy`, but there is a high likelihood that + // they won't behave as expected during policy evaluation in + // `checkPolicy`. This includes the following common scenarios: - + // Unsatisfiable condition: Expired timestamp in date/time condition. - + // Ineffective condition: Condition on a pair which is granted + // unconditionally in another binding of the same policy. // "NOTICE" - Reserved for the issues that are not severe as - // `ERROR`/`WARNING`, but - // need special handling. For instance, messages about skipped - // validation - // units are issued as `NOTICE`. + // `ERROR`/`WARNING`, but need special handling. For instance, messages + // about skipped validation units are issued as `NOTICE`. // "INFO" - Any informative statement which is not severe enough to - // raise - // `ERROR`/`WARNING`/`NOTICE`, like auto-correction recommendations on - // the - // input content. Note that current version of the linter does not - // utilize - // `INFO`. + // raise `ERROR`/`WARNING`/`NOTICE`, like auto-correction + // recommendations on the input content. Note that current version of + // the linter does not utilize `INFO`. // "DEPRECATED" - Deprecated severity level. Severity string `json:"severity,omitempty"` - // ValidationUnitName: The validation unit name, for - // instance + // ValidationUnitName: The validation unit name, for instance // "lintValidationUnits/ConditionComplexityCheck". ValidationUnitName string `json:"validationUnitName,omitempty"` @@ -1059,8 +906,7 @@ func (s *LintResult) MarshalJSON() ([]byte, error) { // ListRolesResponse: The response containing the roles defined under a // resource. type ListRolesResponse struct { - // NextPageToken: To retrieve the next page of results, - // set + // NextPageToken: To retrieve the next page of results, set // `ListRolesRequest.page_token` to this value. NextPageToken string `json:"nextPageToken,omitempty"` @@ -1132,10 +978,8 @@ type ListServiceAccountsResponse struct { // Accounts: The list of matching service accounts. Accounts []*ServiceAccount `json:"accounts,omitempty"` - // NextPageToken: To retrieve the next page of results, - // set - // ListServiceAccountsRequest.page_token - // to this value. + // NextPageToken: To retrieve the next page of results, set + // ListServiceAccountsRequest.page_token to this value. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1165,7 +1009,12 @@ func (s *ListServiceAccountsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PatchServiceAccountRequest: The patch service account request. +// PatchServiceAccountRequest: The request for PatchServiceAccount. You +// can patch only the `display_name` and `description` fields. You must +// use the `update_mask` field to specify which of these fields you want +// to patch. Only the fields specified in the request are guaranteed to +// be returned in the response. Other fields may be empty in the +// response. type PatchServiceAccountRequest struct { ServiceAccount *ServiceAccount `json:"serviceAccount,omitempty"` @@ -1210,8 +1059,7 @@ type Permission struct { // "NOT_SUPPORTED" - Permission is not supported for custom role use. CustomRolesSupportLevel string `json:"customRolesSupportLevel,omitempty"` - // Description: A brief description of what this Permission is used - // for. + // Description: A brief description of what this Permission is used for. // This permission can ONLY be used in predefined roles. Description string `json:"description,omitempty"` @@ -1221,8 +1069,8 @@ type Permission struct { OnlyInPredefinedRoles bool `json:"onlyInPredefinedRoles,omitempty"` // PrimaryPermission: The preferred name for this permission. If - // present, then this permission is - // an alias of, and equivalent to, the listed primary_permission. + // present, then this permission is an alias of, and equivalent to, the + // listed primary_permission. PrimaryPermission string `json:"primaryPermission,omitempty"` // Stage: The current launch stage of the permission. @@ -1261,8 +1109,7 @@ func (s *Permission) MarshalJSON() ([]byte, error) { } // PermissionDelta: A PermissionDelta message to record the -// added_permissions and -// removed_permissions inside a role. +// added_permissions and removed_permissions inside a role. type PermissionDelta struct { // AddedPermissions: Added permissions. AddedPermissions []string `json:"addedPermissions,omitempty"` @@ -1295,143 +1142,79 @@ func (s *PermissionDelta) MarshalJSON() ([]byte, error) { } // Policy: An Identity and Access Management (IAM) policy, which -// specifies access -// controls for Google Cloud resources. -// -// -// A `Policy` is a collection of `bindings`. A `binding` binds one or -// more -// `members` to a single `role`. Members can be user accounts, service -// accounts, +// specifies access controls for Google Cloud resources. A `Policy` is a +// collection of `bindings`. A `binding` binds one or more `members` to +// a single `role`. Members can be user accounts, service accounts, // Google groups, and domains (such as G Suite). A `role` is a named -// list of -// permissions; each `role` can be an IAM predefined role or a -// user-created -// custom role. -// -// Optionally, a `binding` can specify a `condition`, which is a -// logical +// list of permissions; each `role` can be an IAM predefined role or a +// user-created custom role. For some types of Google Cloud resources, a +// `binding` can also specify a `condition`, which is a logical // expression that allows access to a resource only if the expression -// evaluates -// to `true`. A condition can add constraints based on attributes of -// the -// request, the resource, or both. -// -// **JSON example:** -// -// { -// "bindings": [ -// { -// "role": "roles/resourcemanager.organizationAdmin", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" -// ] -// }, -// { -// "role": "roles/resourcemanager.organizationViewer", -// "members": ["user:eve@example.com"], -// "condition": { -// "title": "expirable access", -// "description": "Does not grant access after Sep 2020", -// "expression": "request.time < -// timestamp('2020-10-01T00:00:00.000Z')", -// } -// } -// ], -// "etag": "BwWWja0YfJA=", -// "version": 3 -// } -// -// **YAML example:** -// -// bindings: -// - members: -// - user:mike@example.com -// - group:admins@example.com -// - domain:google.com -// - serviceAccount:my-project-id@appspot.gserviceaccount.com -// role: roles/resourcemanager.organizationAdmin -// - members: -// - user:eve@example.com -// role: roles/resourcemanager.organizationViewer -// condition: -// title: expirable access -// description: Does not grant access after Sep 2020 -// expression: request.time < -// timestamp('2020-10-01T00:00:00.000Z') -// - etag: BwWWja0YfJA= -// - version: 3 -// -// For a description of IAM and its features, see the -// [IAM documentation](https://cloud.google.com/iam/docs/). +// evaluates to `true`. A condition can add constraints based on +// attributes of the request, the resource, or both. To learn which +// resources support conditions in their IAM policies, see the [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-p +// olicies). **JSON example:** { "bindings": [ { "role": +// "roles/resourcemanager.organizationAdmin", "members": [ +// "user:mike@example.com", "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { +// "role": "roles/resourcemanager.organizationViewer", "members": [ +// "user:eve@example.com" ], "condition": { "title": "expirable access", +// "description": "Does not grant access after Sep 2020", "expression": +// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], +// "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - +// members: - user:mike@example.com - group:admins@example.com - +// domain:google.com - +// serviceAccount:my-project-id@appspot.gserviceaccount.com role: +// roles/resourcemanager.organizationAdmin - members: - +// user:eve@example.com role: roles/resourcemanager.organizationViewer +// condition: title: expirable access description: Does not grant access +// after Sep 2020 expression: request.time < +// timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: +// 3 For a description of IAM and its features, see the [IAM +// documentation](https://cloud.google.com/iam/docs/). type Policy struct { // AuditConfigs: Specifies cloud audit logging configuration for this // policy. AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` // Bindings: Associates a list of `members` to a `role`. Optionally, may - // specify a - // `condition` that determines how and when the `bindings` are applied. - // Each - // of the `bindings` must contain at least one member. + // specify a `condition` that determines how and when the `bindings` are + // applied. Each of the `bindings` must contain at least one member. Bindings []*Binding `json:"bindings,omitempty"` // Etag: `etag` is used for optimistic concurrency control as a way to - // help - // prevent simultaneous updates of a policy from overwriting each - // other. - // It is strongly suggested that systems make use of the `etag` in - // the - // read-modify-write cycle to perform policy updates in order to avoid - // race - // conditions: An `etag` is returned in the response to `getIamPolicy`, - // and - // systems are expected to put that etag in the request to - // `setIamPolicy` to - // ensure that their change will be applied to the same version of the - // policy. - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of + // help prevent simultaneous updates of a policy from overwriting each + // other. It is strongly suggested that systems make use of the `etag` + // in the read-modify-write cycle to perform policy updates in order to + // avoid race conditions: An `etag` is returned in the response to + // `getIamPolicy`, and systems are expected to put that etag in the + // request to `setIamPolicy` to ensure that their change will be applied + // to the same version of the policy. **Important:** If you use IAM + // Conditions, you must include the `etag` field whenever you call + // `setIamPolicy`. If you omit this field, then IAM allows you to + // overwrite a version `3` policy with a version `1` policy, and all of // the conditions in the version `3` policy are lost. Etag string `json:"etag,omitempty"` - // Version: Specifies the format of the policy. - // - // Valid values are `0`, `1`, and `3`. Requests that specify an invalid - // value - // are rejected. - // + // Version: Specifies the format of the policy. Valid values are `0`, + // `1`, and `3`. Requests that specify an invalid value are rejected. // Any operation that affects conditional role bindings must specify - // version - // `3`. This requirement applies to the following operations: - // - // * Getting a policy that includes a conditional role binding - // * Adding a conditional role binding to a policy - // * Changing a conditional role binding in a policy - // * Removing any role binding, with or without a condition, from a - // policy - // that includes conditions - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of - // the conditions in the version `3` policy are lost. - // - // If a policy does not include any conditions, operations on that - // policy may - // specify any valid version or leave the field unset. + // version `3`. This requirement applies to the following operations: * + // Getting a policy that includes a conditional role binding * Adding a + // conditional role binding to a policy * Changing a conditional role + // binding in a policy * Removing any role binding, with or without a + // condition, from a policy that includes conditions **Important:** If + // you use IAM Conditions, you must include the `etag` field whenever + // you call `setIamPolicy`. If you omit this field, then IAM allows you + // to overwrite a version `3` policy with a version `1` policy, and all + // of the conditions in the version `3` policy are lost. If a policy + // does not include any conditions, operations on that policy may + // specify any valid version or leave the field unset. To learn which + // resources support conditions in their IAM policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). Version int64 `json:"version,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1493,12 +1276,9 @@ func (s *PolicyDelta) MarshalJSON() ([]byte, error) { // services for a resource. type QueryAuditableServicesRequest struct { // FullResourceName: Required. The full resource name to query from the - // list of auditable - // services. - // - // The name follows the Google Cloud Platform resource format. - // For example, a Cloud Platform project with id `my-project` will be - // named + // list of auditable services. The name follows the Google Cloud + // Platform resource format. For example, a Cloud Platform project with + // id `my-project` will be named // `//cloudresourcemanager.googleapis.com/projects/my-project`. FullResourceName string `json:"fullResourceName,omitempty"` @@ -1562,26 +1342,23 @@ func (s *QueryAuditableServicesResponse) MarshalJSON() ([]byte, error) { // QueryGrantableRolesRequest: The grantable role query request. type QueryGrantableRolesRequest struct { // FullResourceName: Required. The full resource name to query from the - // list of grantable roles. - // - // The name follows the Google Cloud Platform resource format. - // For example, a Cloud Platform project with id `my-project` will be - // named + // list of grantable roles. The name follows the Google Cloud Platform + // resource format. For example, a Cloud Platform project with id + // `my-project` will be named // `//cloudresourcemanager.googleapis.com/projects/my-project`. FullResourceName string `json:"fullResourceName,omitempty"` // PageSize: Optional limit on the number of roles to include in the - // response. + // response. The default is 300, and the maximum is 1,000. PageSize int64 `json:"pageSize,omitempty"` - // PageToken: Optional pagination token returned in an - // earlier + // PageToken: Optional pagination token returned in an earlier // QueryGrantableRolesResponse. PageToken string `json:"pageToken,omitempty"` // Possible values: - // "BASIC" - Omits the `included_permissions` field. - // This is the default value. + // "BASIC" - Omits the `included_permissions` field. This is the + // default value. // "FULL" - Returns all fields. View string `json:"view,omitempty"` @@ -1611,8 +1388,7 @@ func (s *QueryGrantableRolesRequest) MarshalJSON() ([]byte, error) { // QueryGrantableRolesResponse: The grantable role query response. type QueryGrantableRolesResponse struct { - // NextPageToken: To retrieve the next page of results, - // set + // NextPageToken: To retrieve the next page of results, set // `QueryGrantableRolesRequest.page_token` to this value. NextPageToken string `json:"nextPageToken,omitempty"` @@ -1650,21 +1426,17 @@ func (s *QueryGrantableRolesResponse) MarshalJSON() ([]byte, error) { // can be tested on a resource. type QueryTestablePermissionsRequest struct { // FullResourceName: Required. The full resource name to query from the - // list of testable - // permissions. - // - // The name follows the Google Cloud Platform resource format. - // For example, a Cloud Platform project with id `my-project` will be - // named + // list of testable permissions. The name follows the Google Cloud + // Platform resource format. For example, a Cloud Platform project with + // id `my-project` will be named // `//cloudresourcemanager.googleapis.com/projects/my-project`. FullResourceName string `json:"fullResourceName,omitempty"` // PageSize: Optional limit on the number of permissions to include in - // the response. + // the response. The default is 100, and the maximum is 1,000. PageSize int64 `json:"pageSize,omitempty"` - // PageToken: Optional pagination token returned in an - // earlier + // PageToken: Optional pagination token returned in an earlier // QueryTestablePermissionsRequest. PageToken string `json:"pageToken,omitempty"` @@ -1695,8 +1467,7 @@ func (s *QueryTestablePermissionsRequest) MarshalJSON() ([]byte, error) { // QueryTestablePermissionsResponse: The response containing permissions // which can be tested on a resource. type QueryTestablePermissionsResponse struct { - // NextPageToken: To retrieve the next page of results, - // set + // NextPageToken: To retrieve the next page of results, set // `QueryTestableRolesRequest.page_token` to this value. NextPageToken string `json:"nextPageToken,omitempty"` @@ -1733,8 +1504,7 @@ func (s *QueryTestablePermissionsResponse) MarshalJSON() ([]byte, error) { // Role: A role in the Identity and Access Management API. type Role struct { // Deleted: The current deleted state of the role. This field is read - // only. - // It will be ignored in calls to CreateRole and UpdateRole. + // only. It will be ignored in calls to CreateRole and UpdateRole. Deleted bool `json:"deleted,omitempty"` // Description: Optional. A human-readable description for the role. @@ -1747,44 +1517,34 @@ type Role struct { // when bound in an IAM policy. IncludedPermissions []string `json:"includedPermissions,omitempty"` - // Name: The name of the role. - // - // When Role is used in CreateRole, the role name must not be set. - // - // When Role is used in output and other input such as UpdateRole, the - // role - // name is the complete path, e.g., roles/logging.viewer for predefined - // roles - // and organizations/{ORGANIZATION_ID}/roles/logging.viewer for custom + // Name: The name of the role. When Role is used in CreateRole, the role + // name must not be set. When Role is used in output and other input + // such as UpdateRole, the role name is the complete path, e.g., + // roles/logging.viewer for predefined roles and + // organizations/{ORGANIZATION_ID}/roles/logging.viewer for custom // roles. Name string `json:"name,omitempty"` // Stage: The current launch stage of the role. If the `ALPHA` launch - // stage has been - // selected for a role, the `stage` field will not be included in - // the - // returned definition for the role. + // stage has been selected for a role, the `stage` field will not be + // included in the returned definition for the role. // // Possible values: // "ALPHA" - The user has indicated this role is currently in an Alpha - // phase. If this - // launch stage is selected, the `stage` field will not be included - // when - // requesting the definition for a given role. + // phase. If this launch stage is selected, the `stage` field will not + // be included when requesting the definition for a given role. // "BETA" - The user has indicated this role is currently in a Beta // phase. // "GA" - The user has indicated this role is generally available. // "DEPRECATED" - The user has indicated this role is being // deprecated. // "DISABLED" - This role is disabled and will not contribute - // permissions to any members - // it is granted to in policies. + // permissions to any members it is granted to in policies. // "EAP" - The user has indicated this role is currently in an EAP // phase. Stage string `json:"stage,omitempty"` - // Title: Optional. A human-readable title for the role. Typically - // this + // Title: Optional. A human-readable title for the role. Typically this // is limited to 100 UTF-8 bytes. Title string `json:"title,omitempty"` @@ -1815,83 +1575,61 @@ func (s *Role) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ServiceAccount: A service account in the Identity and Access -// Management API. -// -// To create a service account, specify the `project_id` and the -// `account_id` -// for the account. The `account_id` is unique within the project, and -// is used -// to generate the service account email address and a -// stable -// `unique_id`. -// -// If the account already exists, the account's resource name is -// returned -// in the format of projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}. The -// caller -// can use the name in other methods to access the account. -// -// All other methods can identify the service account using the -// format -// `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. -// Using `-` as a wildcard for the `PROJECT_ID` will infer the project -// from -// the account. The `ACCOUNT` value can be the `email` address or -// the -// `unique_id` of the service account. +// ServiceAccount: An IAM service account. A service account is an +// account for an application or a virtual machine (VM) instance, not a +// person. You can use a service account to call Google APIs. To learn +// more, read the [overview of service +// accounts](https://cloud.google.com/iam/help/service-accounts/overview) +// . When you create a service account, you specify the project ID that +// owns the service account, as well as a name that must be unique +// within the project. IAM uses these values to create an email address +// that identifies the service account. type ServiceAccount struct { - // Description: Optional. A user-specified opaque description of the - // service account. - // Must be less than or equal to 256 UTF-8 bytes. + // Description: Optional. A user-specified, human-readable description + // of the service account. The maximum length is 256 UTF-8 bytes. Description string `json:"description,omitempty"` - // Disabled: @OutputOnly A bool indicate if the service account is - // disabled. - // The field is currently in alpha phase. + // Disabled: Output only. Whether the service account is disabled. Disabled bool `json:"disabled,omitempty"` - // DisplayName: Optional. A user-specified name for the service - // account. - // Must be less than or equal to 100 UTF-8 bytes. + // DisplayName: Optional. A user-specified, human-readable name for the + // service account. The maximum length is 100 UTF-8 bytes. DisplayName string `json:"displayName,omitempty"` - // Email: @OutputOnly The email address of the service account. + // Email: Output only. The email address of the service account. Email string `json:"email,omitempty"` - // Etag: Optional. Note: `etag` is an inoperable legacy field that is - // only returned - // for backwards compatibility. + // Etag: Deprecated. Do not use. Etag string `json:"etag,omitempty"` - // Name: The resource name of the service account in the following - // format: - // `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. - // - // Requests using `-` as a wildcard for the `PROJECT_ID` will infer - // the - // project from the `account` and the `ACCOUNT` value can be the - // `email` - // address or the `unique_id` of the service account. - // - // In responses the resource name will always be in the - // format - // `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. + // Name: The resource name of the service account. Use one of the + // following formats: * + // `projects/{PROJECT_ID}/serviceAccounts/{EMAIL_ADDRESS}` * + // `projects/{PROJECT_ID}/serviceAccounts/{UNIQUE_ID}` As an + // alternative, you can use the `-` wildcard character instead of the + // project ID: * `projects/-/serviceAccounts/{EMAIL_ADDRESS}` * + // `projects/-/serviceAccounts/{UNIQUE_ID}` When possible, avoid using + // the `-` wildcard character, because it can cause response messages to + // contain misleading error codes. For example, if you try to get the + // service account `projects/-/serviceAccounts/fake@example.com`, which + // does not exist, the response contains an HTTP `403 Forbidden` error + // instead of a `404 Not Found` error. Name string `json:"name,omitempty"` - // Oauth2ClientId: @OutputOnly The OAuth2 client id for the service + // Oauth2ClientId: Output only. The OAuth 2.0 client ID for the service // account. - // This is used in conjunction with the OAuth2 clientconfig API to - // make - // three legged OAuth2 (3LO) flows to access the data of Google users. Oauth2ClientId string `json:"oauth2ClientId,omitempty"` - // ProjectId: @OutputOnly The id of the project that owns the service + // ProjectId: Output only. The ID of the project that owns the service // account. ProjectId string `json:"projectId,omitempty"` - // UniqueId: @OutputOnly The unique and stable id of the service - // account. + // UniqueId: Output only. The unique, stable numeric ID for the service + // account. Each service account retains its unique ID even if you + // delete the service account. For example, if you delete a service + // account, then create a new service account with the same name, the + // new service account has a different unique ID than the deleted + // service account. UniqueId string `json:"uniqueId,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1921,34 +1659,19 @@ func (s *ServiceAccount) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ServiceAccountKey: Represents a service account key. -// -// A service account has two sets of key-pairs: user-managed, -// and -// system-managed. -// -// User-managed key-pairs can be created and deleted by users. Users -// are +// ServiceAccountKey: Represents a service account key. A service +// account has two sets of key-pairs: user-managed, and system-managed. +// User-managed key-pairs can be created and deleted by users. Users are // responsible for rotating these keys periodically to ensure security -// of -// their service accounts. Users retain the private key of these -// key-pairs, -// and Google retains ONLY the public key. -// -// System-managed keys are automatically rotated by Google, and are used -// for -// signing for a maximum of two weeks. The rotation process is -// probabilistic, +// of their service accounts. Users retain the private key of these +// key-pairs, and Google retains ONLY the public key. System-managed +// keys are automatically rotated by Google, and are used for signing +// for a maximum of two weeks. The rotation process is probabilistic, // and usage of the new key will gradually ramp up and down over the -// key's -// lifetime. We recommend caching the public key set for a service -// account for -// no more than 24 hours to ensure you have access to the latest -// keys. -// -// Public keys for all service accounts are also published at the -// OAuth2 -// Service Account API. +// key's lifetime. We recommend caching the public key set for a service +// account for no more than 24 hours to ensure you have access to the +// latest keys. Public keys for all service accounts are also published +// at the OAuth2 Service Account API. type ServiceAccountKey struct { // KeyAlgorithm: Specifies the algorithm (and possibly key size) for the // key. @@ -1971,8 +1694,7 @@ type ServiceAccountKey struct { // // Possible values: // "KEY_TYPE_UNSPECIFIED" - Unspecified key type. The presence of this - // in the - // message will immediately result in an error. + // in the message will immediately result in an error. // "USER_MANAGED" - User-managed keys (managed and rotated by the // user). // "SYSTEM_MANAGED" - System-managed keys (managed and rotated by @@ -1980,39 +1702,29 @@ type ServiceAccountKey struct { KeyType string `json:"keyType,omitempty"` // Name: The resource name of the service account key in the following - // format - // `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`. + // format `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`. Name string `json:"name,omitempty"` // PrivateKeyData: The private key data. Only provided in - // `CreateServiceAccountKey` - // responses. Make sure to keep the private key data secure because - // it - // allows for the assertion of the service account identity. - // When base64 decoded, the private key data can be used to authenticate - // with - // Google API client libraries and with - // gcloud - // auth - // activate-service-account. + // `CreateServiceAccountKey` responses. Make sure to keep the private + // key data secure because it allows for the assertion of the service + // account identity. When base64 decoded, the private key data can be + // used to authenticate with Google API client libraries and with gcloud + // auth activate-service-account. PrivateKeyData string `json:"privateKeyData,omitempty"` - // PrivateKeyType: The output format for the private key. - // Only provided in `CreateServiceAccountKey` responses, not - // in `GetServiceAccountKey` or `ListServiceAccountKey` - // responses. - // - // Google never exposes system-managed private keys, and never - // retains - // user-managed private keys. + // PrivateKeyType: The output format for the private key. Only provided + // in `CreateServiceAccountKey` responses, not in `GetServiceAccountKey` + // or `ListServiceAccountKey` responses. Google never exposes + // system-managed private keys, and never retains user-managed private + // keys. // // Possible values: // "TYPE_UNSPECIFIED" - Unspecified. Equivalent to // `TYPE_GOOGLE_CREDENTIALS_FILE`. - // "TYPE_PKCS12_FILE" - PKCS12 format. - // The password for the PKCS12 file is `notasecret`. - // For more information, see https://tools.ietf.org/html/rfc7292. + // "TYPE_PKCS12_FILE" - PKCS12 format. The password for the PKCS12 + // file is `notasecret`. For more information, see + // https://tools.ietf.org/html/rfc7292. // "TYPE_GOOGLE_CREDENTIALS_FILE" - Google Credentials File format. PrivateKeyType string `json:"privateKeyType,omitempty"` @@ -2023,11 +1735,10 @@ type ServiceAccountKey struct { // ValidAfterTime: The key can be used after this timestamp. ValidAfterTime string `json:"validAfterTime,omitempty"` - // ValidBeforeTime: The key can be used before this timestamp. - // For system-managed key pairs, this timestamp is the end time for - // the - // private key signing operation. The public key could still be used - // for verification for a few hours after this time. + // ValidBeforeTime: The key can be used before this timestamp. For + // system-managed key pairs, this timestamp is the end time for the + // private key signing operation. The public key could still be used for + // verification for a few hours after this time. ValidBeforeTime string `json:"validBeforeTime,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2060,20 +1771,15 @@ func (s *ServiceAccountKey) MarshalJSON() ([]byte, error) { // SetIamPolicyRequest: Request message for `SetIamPolicy` method. type SetIamPolicyRequest struct { // Policy: REQUIRED: The complete policy to be applied to the - // `resource`. The size of - // the policy is limited to a few 10s of KB. An empty policy is a - // valid policy but certain Cloud Platform services (such as - // Projects) - // might reject them. + // `resource`. The size of the policy is limited to a few 10s of KB. An + // empty policy is a valid policy but certain Cloud Platform services + // (such as Projects) might reject them. Policy *Policy `json:"policy,omitempty"` // UpdateMask: OPTIONAL: A FieldMask specifying which fields of the - // policy to modify. Only - // the fields in the mask will be modified. If no mask is provided, - // the - // following default mask is used: - // paths: "bindings, etag" - // This field is only used by Cloud IAM. + // policy to modify. Only the fields in the mask will be modified. If no + // mask is provided, the following default mask is used: `paths: + // "bindings, etag" UpdateMask string `json:"updateMask,omitempty"` // ForceSendFields is a list of field names (e.g. "Policy") to @@ -2099,9 +1805,14 @@ func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SignBlobRequest: The service account sign blob request. +// SignBlobRequest: Deprecated. [Migrate to Service Account Credentials +// API](https://cloud.google.com/iam/help/credentials/migrate-api). The +// service account sign blob request. type SignBlobRequest struct { - // BytesToSign: Required. The bytes to sign. + // BytesToSign: Required. Deprecated. [Migrate to Service Account + // Credentials + // API](https://cloud.google.com/iam/help/credentials/migrate-api). The + // bytes to sign. BytesToSign string `json:"bytesToSign,omitempty"` // ForceSendFields is a list of field names (e.g. "BytesToSign") to @@ -2127,12 +1838,18 @@ func (s *SignBlobRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SignBlobResponse: The service account sign blob response. +// SignBlobResponse: Deprecated. [Migrate to Service Account Credentials +// API](https://cloud.google.com/iam/help/credentials/migrate-api). The +// service account sign blob response. type SignBlobResponse struct { - // KeyId: The id of the key used to sign the blob. + // KeyId: Deprecated. [Migrate to Service Account Credentials + // API](https://cloud.google.com/iam/help/credentials/migrate-api). The + // id of the key used to sign the blob. KeyId string `json:"keyId,omitempty"` - // Signature: The signed blob. + // Signature: Deprecated. [Migrate to Service Account Credentials + // API](https://cloud.google.com/iam/help/credentials/migrate-api). The + // signed blob. Signature string `json:"signature,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2162,9 +1879,20 @@ func (s *SignBlobResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SignJwtRequest: The service account sign JWT request. +// SignJwtRequest: Deprecated. [Migrate to Service Account Credentials +// API](https://cloud.google.com/iam/help/credentials/migrate-api). The +// service account sign JWT request. type SignJwtRequest struct { - // Payload: Required. The JWT payload to sign, a JSON JWT Claim set. + // Payload: Required. Deprecated. [Migrate to Service Account + // Credentials + // API](https://cloud.google.com/iam/help/credentials/migrate-api). The + // JWT payload to sign. Must be a serialized JSON object that contains a + // JWT Claims Set. For example: `{"sub": "user@example.com", "iat": + // 313435}` If the JWT Claims Set contains an expiration time (`exp`) + // claim, it must be an integer timestamp that is not in the past and no + // more than 1 hour in the future. If the JWT Claims Set does not + // contain an expiration time (`exp`) claim, this claim is added + // automatically, with a timestamp that is 1 hour in the future. Payload string `json:"payload,omitempty"` // ForceSendFields is a list of field names (e.g. "Payload") to @@ -2190,12 +1918,18 @@ func (s *SignJwtRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SignJwtResponse: The service account sign JWT response. +// SignJwtResponse: Deprecated. [Migrate to Service Account Credentials +// API](https://cloud.google.com/iam/help/credentials/migrate-api). The +// service account sign JWT response. type SignJwtResponse struct { - // KeyId: The id of the key used to sign the JWT. + // KeyId: Deprecated. [Migrate to Service Account Credentials + // API](https://cloud.google.com/iam/help/credentials/migrate-api). The + // id of the key used to sign the JWT. KeyId string `json:"keyId,omitempty"` - // SignedJwt: The signed JWT. + // SignedJwt: Deprecated. [Migrate to Service Account Credentials + // API](https://cloud.google.com/iam/help/credentials/migrate-api). The + // signed JWT. SignedJwt string `json:"signedJwt,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2229,11 +1963,8 @@ func (s *SignJwtResponse) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsRequest struct { // Permissions: The set of permissions to check for the `resource`. - // Permissions with - // wildcards (such as '*' or 'storage.*') are not allowed. For - // more - // information see - // [IAM + // Permissions with wildcards (such as '*' or 'storage.*') are not + // allowed. For more information see [IAM // Overview](https://cloud.google.com/iam/docs/overview#permissions). Permissions []string `json:"permissions,omitempty"` @@ -2264,8 +1995,7 @@ func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsResponse struct { // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is - // allowed. + // the caller is allowed. Permissions []string `json:"permissions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2363,11 +2093,9 @@ func (s *UndeleteServiceAccountResponse) MarshalJSON() ([]byte, error) { // request. type UploadServiceAccountKeyRequest struct { // PublicKeyData: A field that allows clients to upload their own public - // key. If set, - // use this public key data to create a service account key for - // given - // service account. - // Please note, the expected format for this field is X509_PEM. + // key. If set, use this public key data to create a service account key + // for given service account. Please note, the expected format for this + // field is X509_PEM. PublicKeyData string `json:"publicKeyData,omitempty"` // ForceSendFields is a list of field names (e.g. "PublicKeyData") to @@ -2403,24 +2131,11 @@ type IamPoliciesLintPolicyCall struct { header_ http.Header } -// LintPolicy: Lints a Cloud IAM policy object or its sub fields. -// Currently supports -// google.iam.v1.Binding.condition. -// -// Each lint operation consists of multiple lint validation units. -// Each unit inspects the input object in regard to a particular -// linting -// aspect and issues a google.iam.admin.v1.LintResult disclosing -// the -// result. -// -// The set of applicable validation units is determined by the Cloud -// IAM -// server and is not configurable. -// -// Regardless of any lint issues or their severities, successful calls -// to -// `lintPolicy` return an HTTP 200 OK status code. +// LintPolicy: Lints, or validates, an IAM policy. Currently checks the +// google.iam.v1.Binding.condition field, which contains a condition +// expression for a role binding. Successful calls to this method always +// return an HTTP `200 OK` status code, even if the linter detects an +// issue in the IAM policy. func (r *IamPoliciesService) LintPolicy(lintpolicyrequest *LintPolicyRequest) *IamPoliciesLintPolicyCall { c := &IamPoliciesLintPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.lintpolicyrequest = lintpolicyrequest @@ -2454,7 +2169,7 @@ func (c *IamPoliciesLintPolicyCall) Header() http.Header { func (c *IamPoliciesLintPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2515,7 +2230,7 @@ func (c *IamPoliciesLintPolicyCall) Do(opts ...googleapi.CallOption) (*LintPolic } return ret, nil // { - // "description": "Lints a Cloud IAM policy object or its sub fields. Currently supports\ngoogle.iam.v1.Binding.condition.\n\nEach lint operation consists of multiple lint validation units.\nEach unit inspects the input object in regard to a particular linting\naspect and issues a google.iam.admin.v1.LintResult disclosing the\nresult.\n\nThe set of applicable validation units is determined by the Cloud IAM\nserver and is not configurable.\n\nRegardless of any lint issues or their severities, successful calls to\n`lintPolicy` return an HTTP 200 OK status code.", + // "description": "Lints, or validates, an IAM policy. Currently checks the google.iam.v1.Binding.condition field, which contains a condition expression for a role binding. Successful calls to this method always return an HTTP `200 OK` status code, even if the linter detects an issue in the IAM policy.", // "flatPath": "v1/iamPolicies:lintPolicy", // "httpMethod": "POST", // "id": "iam.iamPolicies.lintPolicy", @@ -2545,9 +2260,10 @@ type IamPoliciesQueryAuditableServicesCall struct { header_ http.Header } -// QueryAuditableServices: Returns a list of services that support -// service level audit logging -// configuration for the given resource. +// QueryAuditableServices: Returns a list of services that allow you to +// opt into audit logs that are not generated by default. To learn more +// about audit logs, see the [Logging +// documentation](https://cloud.google.com/logging/docs/audit). func (r *IamPoliciesService) QueryAuditableServices(queryauditableservicesrequest *QueryAuditableServicesRequest) *IamPoliciesQueryAuditableServicesCall { c := &IamPoliciesQueryAuditableServicesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.queryauditableservicesrequest = queryauditableservicesrequest @@ -2581,7 +2297,7 @@ func (c *IamPoliciesQueryAuditableServicesCall) Header() http.Header { func (c *IamPoliciesQueryAuditableServicesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2642,7 +2358,7 @@ func (c *IamPoliciesQueryAuditableServicesCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Returns a list of services that support service level audit logging\nconfiguration for the given resource.", + // "description": "Returns a list of services that allow you to opt into audit logs that are not generated by default. To learn more about audit logs, see the [Logging documentation](https://cloud.google.com/logging/docs/audit).", // "flatPath": "v1/iamPolicies:queryAuditableServices", // "httpMethod": "POST", // "id": "iam.iamPolicies.queryAuditableServices", @@ -2673,7 +2389,7 @@ type OrganizationsRolesCreateCall struct { header_ http.Header } -// Create: Creates a new Role. +// Create: Creates a new custom Role. func (r *OrganizationsRolesService) Create(parent string, createrolerequest *CreateRoleRequest) *OrganizationsRolesCreateCall { c := &OrganizationsRolesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -2708,7 +2424,7 @@ func (c *OrganizationsRolesCreateCall) Header() http.Header { func (c *OrganizationsRolesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2772,7 +2488,7 @@ func (c *OrganizationsRolesCreateCall) Do(opts ...googleapi.CallOption) (*Role, } return ret, nil // { - // "description": "Creates a new Role.", + // "description": "Creates a new custom Role.", // "flatPath": "v1/organizations/{organizationsId}/roles", // "httpMethod": "POST", // "id": "iam.organizations.roles.create", @@ -2781,7 +2497,7 @@ func (c *OrganizationsRolesCreateCall) Do(opts ...googleapi.CallOption) (*Role, // ], // "parameters": { // "parent": { - // "description": "The `parent` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `parent` value format is described below:\n\n* [`projects.roles.create()`](/iam/reference/rest/v1/projects.roles/create):\n `projects/{PROJECT_ID}`. This method creates project-level\n [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles`\n\n* [`organizations.roles.create()`](/iam/reference/rest/v1/organizations.roles/create):\n `organizations/{ORGANIZATION_ID}`. This method creates organization-level\n [custom roles](/iam/docs/understanding-custom-roles). Example request\n URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + // "description": "The `parent` parameter's value depends on the target resource for the request, namely [`projects`](/iam/reference/rest/v1/projects.roles) or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `parent` value format is described below: * [`projects.roles.create()`](/iam/reference/rest/v1/projects.roles/create): `projects/{PROJECT_ID}`. This method creates project-level [custom roles](/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles` * [`organizations.roles.create()`](/iam/reference/rest/v1/organizations.roles/create): `organizations/{ORGANIZATION_ID}`. This method creates organization-level [custom roles](/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", // "location": "path", // "pattern": "^organizations/[^/]+$", // "required": true, @@ -2812,18 +2528,15 @@ type OrganizationsRolesDeleteCall struct { header_ http.Header } -// Delete: Soft deletes a role. The role is suspended and cannot be used -// to create new -// IAM Policy Bindings. -// The Role will not be included in `ListRoles()` unless `show_deleted` -// is set -// in the `ListRolesRequest`. The Role contains the deleted boolean -// set. -// Existing Bindings remains, but are inactive. The Role can be -// undeleted -// within 7 days. After 7 days the Role is deleted and all Bindings -// associated -// with the role are removed. +// Delete: Deletes a custom Role. When you delete a custom role, the +// following changes occur immediately: * You cannot bind a member to +// the custom role in an IAM Policy. * Existing bindings to the custom +// role are not changed, but they have no effect. * By default, the +// response from ListRoles does not include the custom role. You have 7 +// days to undelete the custom role. After 7 days, the following changes +// occur: * The custom role is permanently deleted and cannot be +// recovered. * If an IAM policy contains a binding to the custom role, +// the binding is permanently removed. func (r *OrganizationsRolesService) Delete(name string) *OrganizationsRolesDeleteCall { c := &OrganizationsRolesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -2864,7 +2577,7 @@ func (c *OrganizationsRolesDeleteCall) Header() http.Header { func (c *OrganizationsRolesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2923,7 +2636,7 @@ func (c *OrganizationsRolesDeleteCall) Do(opts ...googleapi.CallOption) (*Role, } return ret, nil // { - // "description": "Soft deletes a role. The role is suspended and cannot be used to create new\nIAM Policy Bindings.\nThe Role will not be included in `ListRoles()` unless `show_deleted` is set\nin the `ListRolesRequest`. The Role contains the deleted boolean set.\nExisting Bindings remains, but are inactive. The Role can be undeleted\nwithin 7 days. After 7 days the Role is deleted and all Bindings associated\nwith the role are removed.", + // "description": "Deletes a custom Role. When you delete a custom role, the following changes occur immediately: * You cannot bind a member to the custom role in an IAM Policy. * Existing bindings to the custom role are not changed, but they have no effect. * By default, the response from ListRoles does not include the custom role. You have 7 days to undelete the custom role. After 7 days, the following changes occur: * The custom role is permanently deleted and cannot be recovered. * If an IAM policy contains a binding to the custom role, the binding is permanently removed.", // "flatPath": "v1/organizations/{organizationsId}/roles/{rolesId}", // "httpMethod": "DELETE", // "id": "iam.organizations.roles.delete", @@ -2938,7 +2651,7 @@ func (c *OrganizationsRolesDeleteCall) Do(opts ...googleapi.CallOption) (*Role, // "type": "string" // }, // "name": { - // "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`projects.roles.delete()`](/iam/reference/rest/v1/projects.roles/delete):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method deletes only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.delete()`](/iam/reference/rest/v1/organizations.roles/delete):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n deletes only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + // "description": "The `name` parameter's value depends on the target resource for the request, namely [`projects`](/iam/reference/rest/v1/projects.roles) or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`projects.roles.delete()`](/iam/reference/rest/v1/projects.roles/delete): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method deletes only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.delete()`](/iam/reference/rest/v1/organizations.roles/delete): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method deletes only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", // "location": "path", // "pattern": "^organizations/[^/]+/roles/[^/]+$", // "required": true, @@ -2967,7 +2680,7 @@ type OrganizationsRolesGetCall struct { header_ http.Header } -// Get: Gets a Role definition. +// Get: Gets the definition of a Role. func (r *OrganizationsRolesService) Get(name string) *OrganizationsRolesGetCall { c := &OrganizationsRolesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -3011,7 +2724,7 @@ func (c *OrganizationsRolesGetCall) Header() http.Header { func (c *OrganizationsRolesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3073,7 +2786,7 @@ func (c *OrganizationsRolesGetCall) Do(opts ...googleapi.CallOption) (*Role, err } return ret, nil // { - // "description": "Gets a Role definition.", + // "description": "Gets the definition of a Role.", // "flatPath": "v1/organizations/{organizationsId}/roles/{rolesId}", // "httpMethod": "GET", // "id": "iam.organizations.roles.get", @@ -3082,7 +2795,7 @@ func (c *OrganizationsRolesGetCall) Do(opts ...googleapi.CallOption) (*Role, err // ], // "parameters": { // "name": { - // "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`roles`](/iam/reference/rest/v1/roles),\n[`projects`](/iam/reference/rest/v1/projects.roles), or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`roles.get()`](/iam/reference/rest/v1/roles/get): `roles/{ROLE_NAME}`.\n This method returns results from all\n [predefined roles](/iam/docs/understanding-roles#predefined_roles) in\n Cloud IAM. Example request URL:\n `https://iam.googleapis.com/v1/roles/{ROLE_NAME}`\n\n* [`projects.roles.get()`](/iam/reference/rest/v1/projects.roles/get):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.get()`](/iam/reference/rest/v1/organizations.roles/get):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n returns only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + // "description": "The `name` parameter's value depends on the target resource for the request, namely [`roles`](/iam/reference/rest/v1/roles), [`projects`](/iam/reference/rest/v1/projects.roles), or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`roles.get()`](/iam/reference/rest/v1/roles/get): `roles/{ROLE_NAME}`. This method returns results from all [predefined roles](/iam/docs/understanding-roles#predefined_roles) in Cloud IAM. Example request URL: `https://iam.googleapis.com/v1/roles/{ROLE_NAME}` * [`projects.roles.get()`](/iam/reference/rest/v1/projects.roles/get): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.get()`](/iam/reference/rest/v1/organizations.roles/get): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", // "location": "path", // "pattern": "^organizations/[^/]+/roles/[^/]+$", // "required": true, @@ -3111,7 +2824,8 @@ type OrganizationsRolesListCall struct { header_ http.Header } -// List: Lists the Roles defined on a resource. +// List: Lists every predefined Role that IAM supports, or every custom +// role that is defined for an organization or project. func (r *OrganizationsRolesService) List(parent string) *OrganizationsRolesListCall { c := &OrganizationsRolesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -3119,7 +2833,8 @@ func (r *OrganizationsRolesService) List(parent string) *OrganizationsRolesListC } // PageSize sets the optional parameter "pageSize": Optional limit on -// the number of roles to include in the response. +// the number of roles to include in the response. The default is 300, +// and the maximum is 1,000. func (c *OrganizationsRolesListCall) PageSize(pageSize int64) *OrganizationsRolesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c @@ -3140,16 +2855,15 @@ func (c *OrganizationsRolesListCall) ShowDeleted(showDeleted bool) *Organization } // View sets the optional parameter "view": Optional view for the -// returned Role objects. When `FULL` is specified, -// the `includedPermissions` field is returned, which includes a list of -// all -// permissions in the role. The default value is `BASIC`, which does -// not +// returned Role objects. When `FULL` is specified, the +// `includedPermissions` field is returned, which includes a list of all +// permissions in the role. The default value is `BASIC`, which does not // return the `includedPermissions` field. // // Possible values: -// "BASIC" -// "FULL" +// "BASIC" - Omits the `included_permissions` field. This is the +// default value. +// "FULL" - Returns all fields. func (c *OrganizationsRolesListCall) View(view string) *OrganizationsRolesListCall { c.urlParams_.Set("view", view) return c @@ -3192,7 +2906,7 @@ func (c *OrganizationsRolesListCall) Header() http.Header { func (c *OrganizationsRolesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3254,7 +2968,7 @@ func (c *OrganizationsRolesListCall) Do(opts ...googleapi.CallOption) (*ListRole } return ret, nil // { - // "description": "Lists the Roles defined on a resource.", + // "description": "Lists every predefined Role that IAM supports, or every custom role that is defined for an organization or project.", // "flatPath": "v1/organizations/{organizationsId}/roles", // "httpMethod": "GET", // "id": "iam.organizations.roles.list", @@ -3263,7 +2977,7 @@ func (c *OrganizationsRolesListCall) Do(opts ...googleapi.CallOption) (*ListRole // ], // "parameters": { // "pageSize": { - // "description": "Optional limit on the number of roles to include in the response.", + // "description": "Optional limit on the number of roles to include in the response. The default is 300, and the maximum is 1,000.", // "format": "int32", // "location": "query", // "type": "integer" @@ -3274,7 +2988,7 @@ func (c *OrganizationsRolesListCall) Do(opts ...googleapi.CallOption) (*ListRole // "type": "string" // }, // "parent": { - // "description": "The `parent` parameter's value depends on the target resource for the\nrequest, namely\n[`roles`](/iam/reference/rest/v1/roles),\n[`projects`](/iam/reference/rest/v1/projects.roles), or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `parent` value format is described below:\n\n* [`roles.list()`](/iam/reference/rest/v1/roles/list): An empty string.\n This method doesn't require a resource; it simply returns all\n [predefined roles](/iam/docs/understanding-roles#predefined_roles) in\n Cloud IAM. Example request URL:\n `https://iam.googleapis.com/v1/roles`\n\n* [`projects.roles.list()`](/iam/reference/rest/v1/projects.roles/list):\n `projects/{PROJECT_ID}`. This method lists all project-level\n [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles`\n\n* [`organizations.roles.list()`](/iam/reference/rest/v1/organizations.roles/list):\n `organizations/{ORGANIZATION_ID}`. This method lists all\n organization-level [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + // "description": "The `parent` parameter's value depends on the target resource for the request, namely [`roles`](/iam/reference/rest/v1/roles), [`projects`](/iam/reference/rest/v1/projects.roles), or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `parent` value format is described below: * [`roles.list()`](/iam/reference/rest/v1/roles/list): An empty string. This method doesn't require a resource; it simply returns all [predefined roles](/iam/docs/understanding-roles#predefined_roles) in Cloud IAM. Example request URL: `https://iam.googleapis.com/v1/roles` * [`projects.roles.list()`](/iam/reference/rest/v1/projects.roles/list): `projects/{PROJECT_ID}`. This method lists all project-level [custom roles](/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles` * [`organizations.roles.list()`](/iam/reference/rest/v1/organizations.roles/list): `organizations/{ORGANIZATION_ID}`. This method lists all organization-level [custom roles](/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", // "location": "path", // "pattern": "^organizations/[^/]+$", // "required": true, @@ -3286,11 +3000,15 @@ func (c *OrganizationsRolesListCall) Do(opts ...googleapi.CallOption) (*ListRole // "type": "boolean" // }, // "view": { - // "description": "Optional view for the returned Role objects. When `FULL` is specified,\nthe `includedPermissions` field is returned, which includes a list of all\npermissions in the role. The default value is `BASIC`, which does not\nreturn the `includedPermissions` field.", + // "description": "Optional view for the returned Role objects. When `FULL` is specified, the `includedPermissions` field is returned, which includes a list of all permissions in the role. The default value is `BASIC`, which does not return the `includedPermissions` field.", // "enum": [ // "BASIC", // "FULL" // ], + // "enumDescriptions": [ + // "Omits the `included_permissions` field. This is the default value.", + // "Returns all fields." + // ], // "location": "query", // "type": "string" // } @@ -3338,7 +3056,7 @@ type OrganizationsRolesPatchCall struct { header_ http.Header } -// Patch: Updates a Role definition. +// Patch: Updates the definition of a custom Role. func (r *OrganizationsRolesService) Patch(name string, role *Role) *OrganizationsRolesPatchCall { c := &OrganizationsRolesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -3380,7 +3098,7 @@ func (c *OrganizationsRolesPatchCall) Header() http.Header { func (c *OrganizationsRolesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3444,7 +3162,7 @@ func (c *OrganizationsRolesPatchCall) Do(opts ...googleapi.CallOption) (*Role, e } return ret, nil // { - // "description": "Updates a Role definition.", + // "description": "Updates the definition of a custom Role.", // "flatPath": "v1/organizations/{organizationsId}/roles/{rolesId}", // "httpMethod": "PATCH", // "id": "iam.organizations.roles.patch", @@ -3453,7 +3171,7 @@ func (c *OrganizationsRolesPatchCall) Do(opts ...googleapi.CallOption) (*Role, e // ], // "parameters": { // "name": { - // "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`projects.roles.patch()`](/iam/reference/rest/v1/projects.roles/patch):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method updates only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.patch()`](/iam/reference/rest/v1/organizations.roles/patch):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n updates only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + // "description": "The `name` parameter's value depends on the target resource for the request, namely [`projects`](/iam/reference/rest/v1/projects.roles) or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`projects.roles.patch()`](/iam/reference/rest/v1/projects.roles/patch): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method updates only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.patch()`](/iam/reference/rest/v1/organizations.roles/patch): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method updates only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", // "location": "path", // "pattern": "^organizations/[^/]+/roles/[^/]+$", // "required": true, @@ -3491,7 +3209,7 @@ type OrganizationsRolesUndeleteCall struct { header_ http.Header } -// Undelete: Undelete a Role, bringing it back in its previous state. +// Undelete: Undeletes a custom Role. func (r *OrganizationsRolesService) Undelete(name string, undeleterolerequest *UndeleteRoleRequest) *OrganizationsRolesUndeleteCall { c := &OrganizationsRolesUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -3526,7 +3244,7 @@ func (c *OrganizationsRolesUndeleteCall) Header() http.Header { func (c *OrganizationsRolesUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3590,7 +3308,7 @@ func (c *OrganizationsRolesUndeleteCall) Do(opts ...googleapi.CallOption) (*Role } return ret, nil // { - // "description": "Undelete a Role, bringing it back in its previous state.", + // "description": "Undeletes a custom Role.", // "flatPath": "v1/organizations/{organizationsId}/roles/{rolesId}:undelete", // "httpMethod": "POST", // "id": "iam.organizations.roles.undelete", @@ -3599,7 +3317,7 @@ func (c *OrganizationsRolesUndeleteCall) Do(opts ...googleapi.CallOption) (*Role // ], // "parameters": { // "name": { - // "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`projects.roles.undelete()`](/iam/reference/rest/v1/projects.roles/undelete):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method undeletes\n only [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.undelete()`](/iam/reference/rest/v1/organizations.roles/undelete):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n undeletes only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + // "description": "The `name` parameter's value depends on the target resource for the request, namely [`projects`](/iam/reference/rest/v1/projects.roles) or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`projects.roles.undelete()`](/iam/reference/rest/v1/projects.roles/undelete): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method undeletes only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.undelete()`](/iam/reference/rest/v1/organizations.roles/undelete): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method undeletes only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", // "location": "path", // "pattern": "^organizations/[^/]+/roles/[^/]+$", // "required": true, @@ -3630,10 +3348,9 @@ type PermissionsQueryTestablePermissionsCall struct { header_ http.Header } -// QueryTestablePermissions: Lists the permissions testable on a -// resource. -// A permission is testable if it can be tested for an identity on a -// resource. +// QueryTestablePermissions: Lists every permission that you can test on +// a resource. A permission is testable if you can check whether a +// member has that permission on the resource. func (r *PermissionsService) QueryTestablePermissions(querytestablepermissionsrequest *QueryTestablePermissionsRequest) *PermissionsQueryTestablePermissionsCall { c := &PermissionsQueryTestablePermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.querytestablepermissionsrequest = querytestablepermissionsrequest @@ -3667,7 +3384,7 @@ func (c *PermissionsQueryTestablePermissionsCall) Header() http.Header { func (c *PermissionsQueryTestablePermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3728,7 +3445,7 @@ func (c *PermissionsQueryTestablePermissionsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Lists the permissions testable on a resource.\nA permission is testable if it can be tested for an identity on a resource.", + // "description": "Lists every permission that you can test on a resource. A permission is testable if you can check whether a member has that permission on the resource.", // "flatPath": "v1/permissions:queryTestablePermissions", // "httpMethod": "POST", // "id": "iam.permissions.queryTestablePermissions", @@ -3780,7 +3497,7 @@ type ProjectsRolesCreateCall struct { header_ http.Header } -// Create: Creates a new Role. +// Create: Creates a new custom Role. func (r *ProjectsRolesService) Create(parent string, createrolerequest *CreateRoleRequest) *ProjectsRolesCreateCall { c := &ProjectsRolesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -3815,7 +3532,7 @@ func (c *ProjectsRolesCreateCall) Header() http.Header { func (c *ProjectsRolesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3879,7 +3596,7 @@ func (c *ProjectsRolesCreateCall) Do(opts ...googleapi.CallOption) (*Role, error } return ret, nil // { - // "description": "Creates a new Role.", + // "description": "Creates a new custom Role.", // "flatPath": "v1/projects/{projectsId}/roles", // "httpMethod": "POST", // "id": "iam.projects.roles.create", @@ -3888,7 +3605,7 @@ func (c *ProjectsRolesCreateCall) Do(opts ...googleapi.CallOption) (*Role, error // ], // "parameters": { // "parent": { - // "description": "The `parent` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `parent` value format is described below:\n\n* [`projects.roles.create()`](/iam/reference/rest/v1/projects.roles/create):\n `projects/{PROJECT_ID}`. This method creates project-level\n [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles`\n\n* [`organizations.roles.create()`](/iam/reference/rest/v1/organizations.roles/create):\n `organizations/{ORGANIZATION_ID}`. This method creates organization-level\n [custom roles](/iam/docs/understanding-custom-roles). Example request\n URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + // "description": "The `parent` parameter's value depends on the target resource for the request, namely [`projects`](/iam/reference/rest/v1/projects.roles) or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `parent` value format is described below: * [`projects.roles.create()`](/iam/reference/rest/v1/projects.roles/create): `projects/{PROJECT_ID}`. This method creates project-level [custom roles](/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles` * [`organizations.roles.create()`](/iam/reference/rest/v1/organizations.roles/create): `organizations/{ORGANIZATION_ID}`. This method creates organization-level [custom roles](/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -3919,18 +3636,15 @@ type ProjectsRolesDeleteCall struct { header_ http.Header } -// Delete: Soft deletes a role. The role is suspended and cannot be used -// to create new -// IAM Policy Bindings. -// The Role will not be included in `ListRoles()` unless `show_deleted` -// is set -// in the `ListRolesRequest`. The Role contains the deleted boolean -// set. -// Existing Bindings remains, but are inactive. The Role can be -// undeleted -// within 7 days. After 7 days the Role is deleted and all Bindings -// associated -// with the role are removed. +// Delete: Deletes a custom Role. When you delete a custom role, the +// following changes occur immediately: * You cannot bind a member to +// the custom role in an IAM Policy. * Existing bindings to the custom +// role are not changed, but they have no effect. * By default, the +// response from ListRoles does not include the custom role. You have 7 +// days to undelete the custom role. After 7 days, the following changes +// occur: * The custom role is permanently deleted and cannot be +// recovered. * If an IAM policy contains a binding to the custom role, +// the binding is permanently removed. func (r *ProjectsRolesService) Delete(name string) *ProjectsRolesDeleteCall { c := &ProjectsRolesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -3971,7 +3685,7 @@ func (c *ProjectsRolesDeleteCall) Header() http.Header { func (c *ProjectsRolesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4030,7 +3744,7 @@ func (c *ProjectsRolesDeleteCall) Do(opts ...googleapi.CallOption) (*Role, error } return ret, nil // { - // "description": "Soft deletes a role. The role is suspended and cannot be used to create new\nIAM Policy Bindings.\nThe Role will not be included in `ListRoles()` unless `show_deleted` is set\nin the `ListRolesRequest`. The Role contains the deleted boolean set.\nExisting Bindings remains, but are inactive. The Role can be undeleted\nwithin 7 days. After 7 days the Role is deleted and all Bindings associated\nwith the role are removed.", + // "description": "Deletes a custom Role. When you delete a custom role, the following changes occur immediately: * You cannot bind a member to the custom role in an IAM Policy. * Existing bindings to the custom role are not changed, but they have no effect. * By default, the response from ListRoles does not include the custom role. You have 7 days to undelete the custom role. After 7 days, the following changes occur: * The custom role is permanently deleted and cannot be recovered. * If an IAM policy contains a binding to the custom role, the binding is permanently removed.", // "flatPath": "v1/projects/{projectsId}/roles/{rolesId}", // "httpMethod": "DELETE", // "id": "iam.projects.roles.delete", @@ -4045,7 +3759,7 @@ func (c *ProjectsRolesDeleteCall) Do(opts ...googleapi.CallOption) (*Role, error // "type": "string" // }, // "name": { - // "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`projects.roles.delete()`](/iam/reference/rest/v1/projects.roles/delete):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method deletes only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.delete()`](/iam/reference/rest/v1/organizations.roles/delete):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n deletes only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + // "description": "The `name` parameter's value depends on the target resource for the request, namely [`projects`](/iam/reference/rest/v1/projects.roles) or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`projects.roles.delete()`](/iam/reference/rest/v1/projects.roles/delete): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method deletes only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.delete()`](/iam/reference/rest/v1/organizations.roles/delete): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method deletes only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", // "location": "path", // "pattern": "^projects/[^/]+/roles/[^/]+$", // "required": true, @@ -4074,7 +3788,7 @@ type ProjectsRolesGetCall struct { header_ http.Header } -// Get: Gets a Role definition. +// Get: Gets the definition of a Role. func (r *ProjectsRolesService) Get(name string) *ProjectsRolesGetCall { c := &ProjectsRolesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4118,7 +3832,7 @@ func (c *ProjectsRolesGetCall) Header() http.Header { func (c *ProjectsRolesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4180,7 +3894,7 @@ func (c *ProjectsRolesGetCall) Do(opts ...googleapi.CallOption) (*Role, error) { } return ret, nil // { - // "description": "Gets a Role definition.", + // "description": "Gets the definition of a Role.", // "flatPath": "v1/projects/{projectsId}/roles/{rolesId}", // "httpMethod": "GET", // "id": "iam.projects.roles.get", @@ -4189,7 +3903,7 @@ func (c *ProjectsRolesGetCall) Do(opts ...googleapi.CallOption) (*Role, error) { // ], // "parameters": { // "name": { - // "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`roles`](/iam/reference/rest/v1/roles),\n[`projects`](/iam/reference/rest/v1/projects.roles), or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`roles.get()`](/iam/reference/rest/v1/roles/get): `roles/{ROLE_NAME}`.\n This method returns results from all\n [predefined roles](/iam/docs/understanding-roles#predefined_roles) in\n Cloud IAM. Example request URL:\n `https://iam.googleapis.com/v1/roles/{ROLE_NAME}`\n\n* [`projects.roles.get()`](/iam/reference/rest/v1/projects.roles/get):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.get()`](/iam/reference/rest/v1/organizations.roles/get):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n returns only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + // "description": "The `name` parameter's value depends on the target resource for the request, namely [`roles`](/iam/reference/rest/v1/roles), [`projects`](/iam/reference/rest/v1/projects.roles), or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`roles.get()`](/iam/reference/rest/v1/roles/get): `roles/{ROLE_NAME}`. This method returns results from all [predefined roles](/iam/docs/understanding-roles#predefined_roles) in Cloud IAM. Example request URL: `https://iam.googleapis.com/v1/roles/{ROLE_NAME}` * [`projects.roles.get()`](/iam/reference/rest/v1/projects.roles/get): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.get()`](/iam/reference/rest/v1/organizations.roles/get): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", // "location": "path", // "pattern": "^projects/[^/]+/roles/[^/]+$", // "required": true, @@ -4218,7 +3932,8 @@ type ProjectsRolesListCall struct { header_ http.Header } -// List: Lists the Roles defined on a resource. +// List: Lists every predefined Role that IAM supports, or every custom +// role that is defined for an organization or project. func (r *ProjectsRolesService) List(parent string) *ProjectsRolesListCall { c := &ProjectsRolesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -4226,7 +3941,8 @@ func (r *ProjectsRolesService) List(parent string) *ProjectsRolesListCall { } // PageSize sets the optional parameter "pageSize": Optional limit on -// the number of roles to include in the response. +// the number of roles to include in the response. The default is 300, +// and the maximum is 1,000. func (c *ProjectsRolesListCall) PageSize(pageSize int64) *ProjectsRolesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c @@ -4247,16 +3963,15 @@ func (c *ProjectsRolesListCall) ShowDeleted(showDeleted bool) *ProjectsRolesList } // View sets the optional parameter "view": Optional view for the -// returned Role objects. When `FULL` is specified, -// the `includedPermissions` field is returned, which includes a list of -// all -// permissions in the role. The default value is `BASIC`, which does -// not +// returned Role objects. When `FULL` is specified, the +// `includedPermissions` field is returned, which includes a list of all +// permissions in the role. The default value is `BASIC`, which does not // return the `includedPermissions` field. // // Possible values: -// "BASIC" -// "FULL" +// "BASIC" - Omits the `included_permissions` field. This is the +// default value. +// "FULL" - Returns all fields. func (c *ProjectsRolesListCall) View(view string) *ProjectsRolesListCall { c.urlParams_.Set("view", view) return c @@ -4299,7 +4014,7 @@ func (c *ProjectsRolesListCall) Header() http.Header { func (c *ProjectsRolesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4361,7 +4076,7 @@ func (c *ProjectsRolesListCall) Do(opts ...googleapi.CallOption) (*ListRolesResp } return ret, nil // { - // "description": "Lists the Roles defined on a resource.", + // "description": "Lists every predefined Role that IAM supports, or every custom role that is defined for an organization or project.", // "flatPath": "v1/projects/{projectsId}/roles", // "httpMethod": "GET", // "id": "iam.projects.roles.list", @@ -4370,7 +4085,7 @@ func (c *ProjectsRolesListCall) Do(opts ...googleapi.CallOption) (*ListRolesResp // ], // "parameters": { // "pageSize": { - // "description": "Optional limit on the number of roles to include in the response.", + // "description": "Optional limit on the number of roles to include in the response. The default is 300, and the maximum is 1,000.", // "format": "int32", // "location": "query", // "type": "integer" @@ -4381,7 +4096,7 @@ func (c *ProjectsRolesListCall) Do(opts ...googleapi.CallOption) (*ListRolesResp // "type": "string" // }, // "parent": { - // "description": "The `parent` parameter's value depends on the target resource for the\nrequest, namely\n[`roles`](/iam/reference/rest/v1/roles),\n[`projects`](/iam/reference/rest/v1/projects.roles), or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `parent` value format is described below:\n\n* [`roles.list()`](/iam/reference/rest/v1/roles/list): An empty string.\n This method doesn't require a resource; it simply returns all\n [predefined roles](/iam/docs/understanding-roles#predefined_roles) in\n Cloud IAM. Example request URL:\n `https://iam.googleapis.com/v1/roles`\n\n* [`projects.roles.list()`](/iam/reference/rest/v1/projects.roles/list):\n `projects/{PROJECT_ID}`. This method lists all project-level\n [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles`\n\n* [`organizations.roles.list()`](/iam/reference/rest/v1/organizations.roles/list):\n `organizations/{ORGANIZATION_ID}`. This method lists all\n organization-level [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + // "description": "The `parent` parameter's value depends on the target resource for the request, namely [`roles`](/iam/reference/rest/v1/roles), [`projects`](/iam/reference/rest/v1/projects.roles), or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `parent` value format is described below: * [`roles.list()`](/iam/reference/rest/v1/roles/list): An empty string. This method doesn't require a resource; it simply returns all [predefined roles](/iam/docs/understanding-roles#predefined_roles) in Cloud IAM. Example request URL: `https://iam.googleapis.com/v1/roles` * [`projects.roles.list()`](/iam/reference/rest/v1/projects.roles/list): `projects/{PROJECT_ID}`. This method lists all project-level [custom roles](/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles` * [`organizations.roles.list()`](/iam/reference/rest/v1/organizations.roles/list): `organizations/{ORGANIZATION_ID}`. This method lists all organization-level [custom roles](/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -4393,11 +4108,15 @@ func (c *ProjectsRolesListCall) Do(opts ...googleapi.CallOption) (*ListRolesResp // "type": "boolean" // }, // "view": { - // "description": "Optional view for the returned Role objects. When `FULL` is specified,\nthe `includedPermissions` field is returned, which includes a list of all\npermissions in the role. The default value is `BASIC`, which does not\nreturn the `includedPermissions` field.", + // "description": "Optional view for the returned Role objects. When `FULL` is specified, the `includedPermissions` field is returned, which includes a list of all permissions in the role. The default value is `BASIC`, which does not return the `includedPermissions` field.", // "enum": [ // "BASIC", // "FULL" // ], + // "enumDescriptions": [ + // "Omits the `included_permissions` field. This is the default value.", + // "Returns all fields." + // ], // "location": "query", // "type": "string" // } @@ -4445,7 +4164,7 @@ type ProjectsRolesPatchCall struct { header_ http.Header } -// Patch: Updates a Role definition. +// Patch: Updates the definition of a custom Role. func (r *ProjectsRolesService) Patch(name string, role *Role) *ProjectsRolesPatchCall { c := &ProjectsRolesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4487,7 +4206,7 @@ func (c *ProjectsRolesPatchCall) Header() http.Header { func (c *ProjectsRolesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4551,7 +4270,7 @@ func (c *ProjectsRolesPatchCall) Do(opts ...googleapi.CallOption) (*Role, error) } return ret, nil // { - // "description": "Updates a Role definition.", + // "description": "Updates the definition of a custom Role.", // "flatPath": "v1/projects/{projectsId}/roles/{rolesId}", // "httpMethod": "PATCH", // "id": "iam.projects.roles.patch", @@ -4560,7 +4279,7 @@ func (c *ProjectsRolesPatchCall) Do(opts ...googleapi.CallOption) (*Role, error) // ], // "parameters": { // "name": { - // "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`projects.roles.patch()`](/iam/reference/rest/v1/projects.roles/patch):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method updates only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.patch()`](/iam/reference/rest/v1/organizations.roles/patch):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n updates only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + // "description": "The `name` parameter's value depends on the target resource for the request, namely [`projects`](/iam/reference/rest/v1/projects.roles) or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`projects.roles.patch()`](/iam/reference/rest/v1/projects.roles/patch): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method updates only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.patch()`](/iam/reference/rest/v1/organizations.roles/patch): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method updates only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", // "location": "path", // "pattern": "^projects/[^/]+/roles/[^/]+$", // "required": true, @@ -4598,7 +4317,7 @@ type ProjectsRolesUndeleteCall struct { header_ http.Header } -// Undelete: Undelete a Role, bringing it back in its previous state. +// Undelete: Undeletes a custom Role. func (r *ProjectsRolesService) Undelete(name string, undeleterolerequest *UndeleteRoleRequest) *ProjectsRolesUndeleteCall { c := &ProjectsRolesUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4633,7 +4352,7 @@ func (c *ProjectsRolesUndeleteCall) Header() http.Header { func (c *ProjectsRolesUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4697,7 +4416,7 @@ func (c *ProjectsRolesUndeleteCall) Do(opts ...googleapi.CallOption) (*Role, err } return ret, nil // { - // "description": "Undelete a Role, bringing it back in its previous state.", + // "description": "Undeletes a custom Role.", // "flatPath": "v1/projects/{projectsId}/roles/{rolesId}:undelete", // "httpMethod": "POST", // "id": "iam.projects.roles.undelete", @@ -4706,7 +4425,7 @@ func (c *ProjectsRolesUndeleteCall) Do(opts ...googleapi.CallOption) (*Role, err // ], // "parameters": { // "name": { - // "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`projects.roles.undelete()`](/iam/reference/rest/v1/projects.roles/undelete):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method undeletes\n only [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.undelete()`](/iam/reference/rest/v1/organizations.roles/undelete):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n undeletes only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + // "description": "The `name` parameter's value depends on the target resource for the request, namely [`projects`](/iam/reference/rest/v1/projects.roles) or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`projects.roles.undelete()`](/iam/reference/rest/v1/projects.roles/undelete): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method undeletes only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.undelete()`](/iam/reference/rest/v1/organizations.roles/undelete): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method undeletes only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", // "location": "path", // "pattern": "^projects/[^/]+/roles/[^/]+$", // "required": true, @@ -4738,8 +4457,7 @@ type ProjectsServiceAccountsCreateCall struct { header_ http.Header } -// Create: Creates a ServiceAccount -// and returns it. +// Create: Creates a ServiceAccount. func (r *ProjectsServiceAccountsService) Create(name string, createserviceaccountrequest *CreateServiceAccountRequest) *ProjectsServiceAccountsCreateCall { c := &ProjectsServiceAccountsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4774,7 +4492,7 @@ func (c *ProjectsServiceAccountsCreateCall) Header() http.Header { func (c *ProjectsServiceAccountsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4838,7 +4556,7 @@ func (c *ProjectsServiceAccountsCreateCall) Do(opts ...googleapi.CallOption) (*S } return ret, nil // { - // "description": "Creates a ServiceAccount\nand returns it.", + // "description": "Creates a ServiceAccount.", // "flatPath": "v1/projects/{projectsId}/serviceAccounts", // "httpMethod": "POST", // "id": "iam.projects.serviceAccounts.create", @@ -4847,7 +4565,7 @@ func (c *ProjectsServiceAccountsCreateCall) Do(opts ...googleapi.CallOption) (*S // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the project associated with the service\naccounts, such as `projects/my-project-123`.", + // "description": "Required. The resource name of the project associated with the service accounts, such as `projects/my-project-123`.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -4878,7 +4596,18 @@ type ProjectsServiceAccountsDeleteCall struct { header_ http.Header } -// Delete: Deletes a ServiceAccount. +// Delete: Deletes a ServiceAccount. **Warning:** After you delete a +// service account, you might not be able to undelete it. If you know +// that you need to re-enable the service account in the future, use +// DisableServiceAccount instead. If you delete a service account, IAM +// permanently removes the service account 30 days later. Google Cloud +// cannot recover the service account after it is permanently removed, +// even if you file a support request. To help avoid unplanned outages, +// we recommend that you disable the service account before you delete +// it. Use DisableServiceAccount to disable the service account, then +// wait at least 24 hours and watch for unintended consequences. If +// there are no unintended consequences, you can delete the service +// account. func (r *ProjectsServiceAccountsService) Delete(name string) *ProjectsServiceAccountsDeleteCall { c := &ProjectsServiceAccountsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4912,7 +4641,7 @@ func (c *ProjectsServiceAccountsDeleteCall) Header() http.Header { func (c *ProjectsServiceAccountsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4971,7 +4700,7 @@ func (c *ProjectsServiceAccountsDeleteCall) Do(opts ...googleapi.CallOption) (*E } return ret, nil // { - // "description": "Deletes a ServiceAccount.", + // "description": "Deletes a ServiceAccount. **Warning:** After you delete a service account, you might not be able to undelete it. If you know that you need to re-enable the service account in the future, use DisableServiceAccount instead. If you delete a service account, IAM permanently removes the service account 30 days later. Google Cloud cannot recover the service account after it is permanently removed, even if you file a support request. To help avoid unplanned outages, we recommend that you disable the service account before you delete it. Use DisableServiceAccount to disable the service account, then wait at least 24 hours and watch for unintended consequences. If there are no unintended consequences, you can delete the service account.", // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}", // "httpMethod": "DELETE", // "id": "iam.projects.serviceAccounts.delete", @@ -4980,7 +4709,7 @@ func (c *ProjectsServiceAccountsDeleteCall) Do(opts ...googleapi.CallOption) (*E // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + // "description": "Required. The resource name of the service account in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -5009,33 +4738,18 @@ type ProjectsServiceAccountsDisableCall struct { header_ http.Header } -// Disable: DisableServiceAccount is currently in the alpha launch -// stage. -// -// Disables a ServiceAccount, -// which immediately prevents the service account from authenticating -// and -// gaining access to APIs. -// -// Disabled service accounts can be safely restored by -// using -// EnableServiceAccount at any point. Deleted service accounts cannot -// be -// restored using this method. -// -// Disabling a service account that is bound to VMs, Apps, Functions, -// or -// other jobs will cause those jobs to lose access to resources if they -// are -// using the disabled service account. -// -// To improve reliability of your services and avoid unexpected outages, -// it -// is recommended to first disable a service account rather than delete -// it. -// After disabling the service account, wait at least 24 hours to verify -// there -// are no unintended consequences, and then delete the service account. +// Disable: Disables a ServiceAccount immediately. If an application +// uses the service account to authenticate, that application can no +// longer call Google APIs or access Google Cloud resources. Existing +// access tokens for the service account are rejected, and requests for +// new access tokens will fail. To re-enable the service account, use +// EnableServiceAccount. After you re-enable the service account, its +// existing access tokens will be accepted, and you can request new +// access tokens. To help avoid unplanned outages, we recommend that you +// disable the service account before you delete it. Use this method to +// disable the service account, then wait at least 24 hours and watch +// for unintended consequences. If there are no unintended consequences, +// you can delete the service account with DeleteServiceAccount. func (r *ProjectsServiceAccountsService) Disable(name string, disableserviceaccountrequest *DisableServiceAccountRequest) *ProjectsServiceAccountsDisableCall { c := &ProjectsServiceAccountsDisableCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5070,7 +4784,7 @@ func (c *ProjectsServiceAccountsDisableCall) Header() http.Header { func (c *ProjectsServiceAccountsDisableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5134,7 +4848,7 @@ func (c *ProjectsServiceAccountsDisableCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "DisableServiceAccount is currently in the alpha launch stage.\n\nDisables a ServiceAccount,\nwhich immediately prevents the service account from authenticating and\ngaining access to APIs.\n\nDisabled service accounts can be safely restored by using\nEnableServiceAccount at any point. Deleted service accounts cannot be\nrestored using this method.\n\nDisabling a service account that is bound to VMs, Apps, Functions, or\nother jobs will cause those jobs to lose access to resources if they are\nusing the disabled service account.\n\nTo improve reliability of your services and avoid unexpected outages, it\nis recommended to first disable a service account rather than delete it.\nAfter disabling the service account, wait at least 24 hours to verify there\nare no unintended consequences, and then delete the service account.", + // "description": "Disables a ServiceAccount immediately. If an application uses the service account to authenticate, that application can no longer call Google APIs or access Google Cloud resources. Existing access tokens for the service account are rejected, and requests for new access tokens will fail. To re-enable the service account, use EnableServiceAccount. After you re-enable the service account, its existing access tokens will be accepted, and you can request new access tokens. To help avoid unplanned outages, we recommend that you disable the service account before you delete it. Use this method to disable the service account, then wait at least 24 hours and watch for unintended consequences. If there are no unintended consequences, you can delete the service account with DeleteServiceAccount.", // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:disable", // "httpMethod": "POST", // "id": "iam.projects.serviceAccounts.disable", @@ -5143,7 +4857,7 @@ func (c *ProjectsServiceAccountsDisableCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "name": { - // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + // "description": "The resource name of the service account in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -5175,21 +4889,12 @@ type ProjectsServiceAccountsEnableCall struct { header_ http.Header } -// Enable: EnableServiceAccount is currently in the alpha launch -// stage. -// -// Restores a disabled ServiceAccount -// that has been manually disabled by using DisableServiceAccount. -// Service -// accounts that have been disabled by other means or for other -// reasons, -// such as abuse, cannot be restored using this method. -// -// EnableServiceAccount will have no effect on a service account that -// is -// not disabled. Enabling an already enabled service account will have -// no -// effect. +// Enable: Enables a ServiceAccount that was disabled by +// DisableServiceAccount. If the service account is already enabled, +// then this method has no effect. If the service account was disabled +// by other means—for example, if Google disabled the service account +// because it was compromised—you cannot use this method to enable the +// service account. func (r *ProjectsServiceAccountsService) Enable(name string, enableserviceaccountrequest *EnableServiceAccountRequest) *ProjectsServiceAccountsEnableCall { c := &ProjectsServiceAccountsEnableCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5224,7 +4929,7 @@ func (c *ProjectsServiceAccountsEnableCall) Header() http.Header { func (c *ProjectsServiceAccountsEnableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5288,7 +4993,7 @@ func (c *ProjectsServiceAccountsEnableCall) Do(opts ...googleapi.CallOption) (*E } return ret, nil // { - // "description": "EnableServiceAccount is currently in the alpha launch stage.\n\n Restores a disabled ServiceAccount\n that has been manually disabled by using DisableServiceAccount. Service\n accounts that have been disabled by other means or for other reasons,\n such as abuse, cannot be restored using this method.\n\n EnableServiceAccount will have no effect on a service account that is\n not disabled. Enabling an already enabled service account will have no\n effect.", + // "description": "Enables a ServiceAccount that was disabled by DisableServiceAccount. If the service account is already enabled, then this method has no effect. If the service account was disabled by other means—for example, if Google disabled the service account because it was compromised—you cannot use this method to enable the service account.", // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:enable", // "httpMethod": "POST", // "id": "iam.projects.serviceAccounts.enable", @@ -5297,7 +5002,7 @@ func (c *ProjectsServiceAccountsEnableCall) Do(opts ...googleapi.CallOption) (*E // ], // "parameters": { // "name": { - // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + // "description": "The resource name of the service account in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -5373,7 +5078,7 @@ func (c *ProjectsServiceAccountsGetCall) Header() http.Header { func (c *ProjectsServiceAccountsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5444,7 +5149,7 @@ func (c *ProjectsServiceAccountsGetCall) Do(opts ...googleapi.CallOption) (*Serv // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + // "description": "Required. The resource name of the service account in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -5472,31 +5177,15 @@ type ProjectsServiceAccountsGetIamPolicyCall struct { header_ http.Header } -// GetIamPolicy: Returns the Cloud IAM access control policy for -// a -// ServiceAccount. -// -// Note: Service accounts are both -// [resources -// and -// identities](/iam/docs/service-accounts#service_account_permissions -// ). This -// method treats the service account as a resource. It returns the Cloud -// IAM -// policy that reflects what members have access to the service -// account. -// -// This method does not return what resources the service account has -// access -// to. To see if a service account has access to a resource, call -// the -// `getIamPolicy` method on the target resource. For example, to view -// grants -// for a project, call -// the -// [projects.getIamPolicy](/resource-manager/reference/rest/v1/projec -// ts/getIamPolicy) -// method. +// GetIamPolicy: Gets the IAM policy that is attached to a +// ServiceAccount. This IAM policy specifies which members have access +// to the service account. This method does not tell you whether the +// service account has been granted any roles on other resources. To +// check whether a service account has role grants on a resource, use +// the `getIamPolicy` method for that resource. For example, to view the +// role grants for a project, call the Resource Manager API's +// [`projects.getIamPolicy`](https://cloud.google.com/resource-manager/re +// ference/rest/v1/projects/getIamPolicy) method. func (r *ProjectsServiceAccountsService) GetIamPolicy(resource string) *ProjectsServiceAccountsGetIamPolicyCall { c := &ProjectsServiceAccountsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -5505,17 +5194,14 @@ func (r *ProjectsServiceAccountsService) GetIamPolicy(resource string) *Projects // OptionsRequestedPolicyVersion sets the optional parameter // "options.requestedPolicyVersion": The policy format version to be -// returned. -// -// Valid values are 0, 1, and 3. Requests specifying an invalid value -// will be -// rejected. -// -// Requests for policies with any conditional bindings must specify -// version 3. -// Policies without any conditional bindings may specify any valid value -// or -// leave the field unset. +// returned. Valid values are 0, 1, and 3. Requests specifying an +// invalid value will be rejected. Requests for policies with any +// conditional bindings must specify version 3. Policies without any +// conditional bindings may specify any valid value or leave the field +// unset. To learn which resources support conditions in their IAM +// policies, see the [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-p +// olicies). func (c *ProjectsServiceAccountsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsServiceAccountsGetIamPolicyCall { c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c @@ -5548,7 +5234,7 @@ func (c *ProjectsServiceAccountsGetIamPolicyCall) Header() http.Header { func (c *ProjectsServiceAccountsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5607,7 +5293,7 @@ func (c *ProjectsServiceAccountsGetIamPolicyCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Returns the Cloud IAM access control policy for a\nServiceAccount.\n\nNote: Service accounts are both\n[resources and\nidentities](/iam/docs/service-accounts#service_account_permissions). This\nmethod treats the service account as a resource. It returns the Cloud IAM\npolicy that reflects what members have access to the service account.\n\nThis method does not return what resources the service account has access\nto. To see if a service account has access to a resource, call the\n`getIamPolicy` method on the target resource. For example, to view grants\nfor a project, call the\n[projects.getIamPolicy](/resource-manager/reference/rest/v1/projects/getIamPolicy)\nmethod.", + // "description": "Gets the IAM policy that is attached to a ServiceAccount. This IAM policy specifies which members have access to the service account. This method does not tell you whether the service account has been granted any roles on other resources. To check whether a service account has role grants on a resource, use the `getIamPolicy` method for that resource. For example, to view the role grants for a project, call the Resource Manager API's [`projects.getIamPolicy`](https://cloud.google.com/resource-manager/reference/rest/v1/projects/getIamPolicy) method.", // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:getIamPolicy", // "httpMethod": "POST", // "id": "iam.projects.serviceAccounts.getIamPolicy", @@ -5616,13 +5302,13 @@ func (c *ProjectsServiceAccountsGetIamPolicyCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "options.requestedPolicyVersion": { - // "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.", + // "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", // "format": "int32", // "location": "query", // "type": "integer" // }, // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -5651,7 +5337,7 @@ type ProjectsServiceAccountsListCall struct { header_ http.Header } -// List: Lists ServiceAccounts for a project. +// List: Lists every ServiceAccount that belongs to a specific project. func (r *ProjectsServiceAccountsService) List(name string) *ProjectsServiceAccountsListCall { c := &ProjectsServiceAccountsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5659,19 +5345,17 @@ func (r *ProjectsServiceAccountsService) List(name string) *ProjectsServiceAccou } // PageSize sets the optional parameter "pageSize": Optional limit on -// the number of service accounts to include in the -// response. Further accounts can subsequently be obtained by including -// the -// ListServiceAccountsResponse.next_page_token -// in a subsequent request. +// the number of service accounts to include in the response. Further +// accounts can subsequently be obtained by including the +// ListServiceAccountsResponse.next_page_token in a subsequent request. +// The default is 20, and the maximum is 100. func (c *ProjectsServiceAccountsListCall) PageSize(pageSize int64) *ProjectsServiceAccountsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": Optional -// pagination token returned in an -// earlier +// pagination token returned in an earlier // ListServiceAccountsResponse.next_page_token. func (c *ProjectsServiceAccountsListCall) PageToken(pageToken string) *ProjectsServiceAccountsListCall { c.urlParams_.Set("pageToken", pageToken) @@ -5715,7 +5399,7 @@ func (c *ProjectsServiceAccountsListCall) Header() http.Header { func (c *ProjectsServiceAccountsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5777,7 +5461,7 @@ func (c *ProjectsServiceAccountsListCall) Do(opts ...googleapi.CallOption) (*Lis } return ret, nil // { - // "description": "Lists ServiceAccounts for a project.", + // "description": "Lists every ServiceAccount that belongs to a specific project.", // "flatPath": "v1/projects/{projectsId}/serviceAccounts", // "httpMethod": "GET", // "id": "iam.projects.serviceAccounts.list", @@ -5786,20 +5470,20 @@ func (c *ProjectsServiceAccountsListCall) Do(opts ...googleapi.CallOption) (*Lis // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the project associated with the service\naccounts, such as `projects/my-project-123`.", + // "description": "Required. The resource name of the project associated with the service accounts, such as `projects/my-project-123`.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, // "type": "string" // }, // "pageSize": { - // "description": "Optional limit on the number of service accounts to include in the\nresponse. Further accounts can subsequently be obtained by including the\nListServiceAccountsResponse.next_page_token\nin a subsequent request.", + // "description": "Optional limit on the number of service accounts to include in the response. Further accounts can subsequently be obtained by including the ListServiceAccountsResponse.next_page_token in a subsequent request. The default is 20, and the maximum is 100.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "Optional pagination token returned in an earlier\nListServiceAccountsResponse.next_page_token.", + // "description": "Optional pagination token returned in an earlier ListServiceAccountsResponse.next_page_token.", // "location": "query", // "type": "string" // } @@ -5848,15 +5532,6 @@ type ProjectsServiceAccountsPatchCall struct { } // Patch: Patches a ServiceAccount. -// -// Currently, only the following fields are updatable: -// `display_name` and `description`. -// -// Only fields specified in the request are guaranteed to be returned -// in -// the response. Other fields in the response may be empty. -// -// Note: The field mask is required. func (r *ProjectsServiceAccountsService) Patch(name string, patchserviceaccountrequest *PatchServiceAccountRequest) *ProjectsServiceAccountsPatchCall { c := &ProjectsServiceAccountsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5891,7 +5566,7 @@ func (c *ProjectsServiceAccountsPatchCall) Header() http.Header { func (c *ProjectsServiceAccountsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5955,7 +5630,7 @@ func (c *ProjectsServiceAccountsPatchCall) Do(opts ...googleapi.CallOption) (*Se } return ret, nil // { - // "description": "Patches a ServiceAccount.\n\nCurrently, only the following fields are updatable:\n`display_name` and `description`.\n\nOnly fields specified in the request are guaranteed to be returned in\nthe response. Other fields in the response may be empty.\n\nNote: The field mask is required.", + // "description": "Patches a ServiceAccount.", // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}", // "httpMethod": "PATCH", // "id": "iam.projects.serviceAccounts.patch", @@ -5964,7 +5639,7 @@ func (c *ProjectsServiceAccountsPatchCall) Do(opts ...googleapi.CallOption) (*Se // ], // "parameters": { // "name": { - // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\n\nRequests using `-` as a wildcard for the `PROJECT_ID` will infer the\nproject from the `account` and the `ACCOUNT` value can be the `email`\naddress or the `unique_id` of the service account.\n\nIn responses the resource name will always be in the format\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.", + // "description": "The resource name of the service account. Use one of the following formats: * `projects/{PROJECT_ID}/serviceAccounts/{EMAIL_ADDRESS}` * `projects/{PROJECT_ID}/serviceAccounts/{UNIQUE_ID}` As an alternative, you can use the `-` wildcard character instead of the project ID: * `projects/-/serviceAccounts/{EMAIL_ADDRESS}` * `projects/-/serviceAccounts/{UNIQUE_ID}` When possible, avoid using the `-` wildcard character, because it can cause response messages to contain misleading error codes. For example, if you try to get the service account `projects/-/serviceAccounts/fake@example.com`, which does not exist, the response contains an HTTP `403 Forbidden` error instead of a `404 Not Found` error.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -5996,33 +5671,19 @@ type ProjectsServiceAccountsSetIamPolicyCall struct { header_ http.Header } -// SetIamPolicy: Sets the Cloud IAM access control policy for -// a -// ServiceAccount. -// -// Note: Service accounts are both -// [resources -// and -// identities](/iam/docs/service-accounts#service_account_permissions -// ). This -// method treats the service account as a resource. Use it to grant -// members -// access to the service account, such as when they need to impersonate -// it. -// -// This method does not grant the service account access to other -// resources, -// such as projects. To grant a service account access to resources, -// include -// the service account in the Cloud IAM policy for the desired resource, -// then -// call the appropriate `setIamPolicy` method on the target resource. -// For -// example, to grant a service account access to a project, call -// the -// [projects.setIamPolicy](/resource-manager/reference/rest/v1/projec -// ts/setIamPolicy) -// method. +// SetIamPolicy: Sets the IAM policy that is attached to a +// ServiceAccount. Use this method to grant or revoke access to the +// service account. For example, you could grant a member the ability to +// impersonate the service account. This method does not enable the +// service account to access other resources. To grant roles to a +// service account on a resource, follow these steps: 1. Call the +// resource's `getIamPolicy` method to get its current IAM policy. 2. +// Edit the policy so that it binds the service account to an IAM role +// for the resource. 3. Call the resource's `setIamPolicy` method to +// update its IAM policy. For detailed instructions, see [Granting roles +// to a service account for specific +// resources](https://cloud.google.com/iam/help/service-accounts/granting +// -access-to-service-accounts). func (r *ProjectsServiceAccountsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsServiceAccountsSetIamPolicyCall { c := &ProjectsServiceAccountsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -6057,7 +5718,7 @@ func (c *ProjectsServiceAccountsSetIamPolicyCall) Header() http.Header { func (c *ProjectsServiceAccountsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6121,7 +5782,7 @@ func (c *ProjectsServiceAccountsSetIamPolicyCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Sets the Cloud IAM access control policy for a\nServiceAccount.\n\nNote: Service accounts are both\n[resources and\nidentities](/iam/docs/service-accounts#service_account_permissions). This\nmethod treats the service account as a resource. Use it to grant members\naccess to the service account, such as when they need to impersonate it.\n\nThis method does not grant the service account access to other resources,\nsuch as projects. To grant a service account access to resources, include\nthe service account in the Cloud IAM policy for the desired resource, then\ncall the appropriate `setIamPolicy` method on the target resource. For\nexample, to grant a service account access to a project, call the\n[projects.setIamPolicy](/resource-manager/reference/rest/v1/projects/setIamPolicy)\nmethod.", + // "description": "Sets the IAM policy that is attached to a ServiceAccount. Use this method to grant or revoke access to the service account. For example, you could grant a member the ability to impersonate the service account. This method does not enable the service account to access other resources. To grant roles to a service account on a resource, follow these steps: 1. Call the resource's `getIamPolicy` method to get its current IAM policy. 2. Edit the policy so that it binds the service account to an IAM role for the resource. 3. Call the resource's `setIamPolicy` method to update its IAM policy. For detailed instructions, see [Granting roles to a service account for specific resources](https://cloud.google.com/iam/help/service-accounts/granting-access-to-service-accounts).", // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:setIamPolicy", // "httpMethod": "POST", // "id": "iam.projects.serviceAccounts.setIamPolicy", @@ -6130,7 +5791,7 @@ func (c *ProjectsServiceAccountsSetIamPolicyCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -6162,15 +5823,15 @@ type ProjectsServiceAccountsSignBlobCall struct { header_ http.Header } -// SignBlob: **Note**: This method is in the process of being -// deprecated. Call -// the -// [`signBlob()`](/iam/credentials/reference/rest/v1/projects.service -// Accounts/signBlob) -// method of the Cloud IAM Service Account Credentials API -// instead. -// -// Signs a blob using a service account's system-managed private key. +// SignBlob: **Note:** This method is deprecated and will stop working +// on July 1, 2021. Use the +// [`signBlob`](https://cloud.google.com/iam/help/rest-credentials/v1/pro +// jects.serviceAccounts/signBlob) method in the IAM Service Account +// Credentials API instead. If you currently use this method, see the +// [migration +// guide](https://cloud.google.com/iam/help/credentials/migrate-api) for +// instructions. Signs a blob using the system-managed private key for a +// ServiceAccount. func (r *ProjectsServiceAccountsService) SignBlob(name string, signblobrequest *SignBlobRequest) *ProjectsServiceAccountsSignBlobCall { c := &ProjectsServiceAccountsSignBlobCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -6205,7 +5866,7 @@ func (c *ProjectsServiceAccountsSignBlobCall) Header() http.Header { func (c *ProjectsServiceAccountsSignBlobCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6269,7 +5930,7 @@ func (c *ProjectsServiceAccountsSignBlobCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "**Note**: This method is in the process of being deprecated. Call the\n[`signBlob()`](/iam/credentials/reference/rest/v1/projects.serviceAccounts/signBlob)\nmethod of the Cloud IAM Service Account Credentials API instead.\n\nSigns a blob using a service account's system-managed private key.", + // "description": "**Note:** This method is deprecated and will stop working on July 1, 2021. Use the [`signBlob`](https://cloud.google.com/iam/help/rest-credentials/v1/projects.serviceAccounts/signBlob) method in the IAM Service Account Credentials API instead. If you currently use this method, see the [migration guide](https://cloud.google.com/iam/help/credentials/migrate-api) for instructions. Signs a blob using the system-managed private key for a ServiceAccount.", // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signBlob", // "httpMethod": "POST", // "id": "iam.projects.serviceAccounts.signBlob", @@ -6278,7 +5939,7 @@ func (c *ProjectsServiceAccountsSignBlobCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + // "description": "Required. Deprecated. [Migrate to Service Account Credentials API](https://cloud.google.com/iam/help/credentials/migrate-api). The resource name of the service account in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -6310,21 +5971,15 @@ type ProjectsServiceAccountsSignJwtCall struct { header_ http.Header } -// SignJwt: **Note**: This method is in the process of being deprecated. -// Call -// the -// [`signJwt()`](/iam/credentials/reference/rest/v1/projects.serviceA -// ccounts/signJwt) -// method of the Cloud IAM Service Account Credentials API -// instead. -// -// Signs a JWT using a service account's system-managed private key. -// -// If no expiry time (`exp`) is provided in the `SignJwtRequest`, IAM -// sets an -// an expiry time of one hour by default. If you request an expiry time -// of -// more than one hour, the request will fail. +// SignJwt: **Note:** This method is deprecated and will stop working on +// July 1, 2021. Use the +// [`signJwt`](https://cloud.google.com/iam/help/rest-credentials/v1/proj +// ects.serviceAccounts/signJwt) method in the IAM Service Account +// Credentials API instead. If you currently use this method, see the +// [migration +// guide](https://cloud.google.com/iam/help/credentials/migrate-api) for +// instructions. Signs a JSON Web Token (JWT) using the system-managed +// private key for a ServiceAccount. func (r *ProjectsServiceAccountsService) SignJwt(name string, signjwtrequest *SignJwtRequest) *ProjectsServiceAccountsSignJwtCall { c := &ProjectsServiceAccountsSignJwtCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -6359,7 +6014,7 @@ func (c *ProjectsServiceAccountsSignJwtCall) Header() http.Header { func (c *ProjectsServiceAccountsSignJwtCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6423,7 +6078,7 @@ func (c *ProjectsServiceAccountsSignJwtCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "**Note**: This method is in the process of being deprecated. Call the\n[`signJwt()`](/iam/credentials/reference/rest/v1/projects.serviceAccounts/signJwt)\nmethod of the Cloud IAM Service Account Credentials API instead.\n\nSigns a JWT using a service account's system-managed private key.\n\nIf no expiry time (`exp`) is provided in the `SignJwtRequest`, IAM sets an\nan expiry time of one hour by default. If you request an expiry time of\nmore than one hour, the request will fail.", + // "description": "**Note:** This method is deprecated and will stop working on July 1, 2021. Use the [`signJwt`](https://cloud.google.com/iam/help/rest-credentials/v1/projects.serviceAccounts/signJwt) method in the IAM Service Account Credentials API instead. If you currently use this method, see the [migration guide](https://cloud.google.com/iam/help/credentials/migrate-api) for instructions. Signs a JSON Web Token (JWT) using the system-managed private key for a ServiceAccount.", // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signJwt", // "httpMethod": "POST", // "id": "iam.projects.serviceAccounts.signJwt", @@ -6432,7 +6087,7 @@ func (c *ProjectsServiceAccountsSignJwtCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + // "description": "Required. Deprecated. [Migrate to Service Account Credentials API](https://cloud.google.com/iam/help/credentials/migrate-api). The resource name of the service account in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -6464,9 +6119,8 @@ type ProjectsServiceAccountsTestIamPermissionsCall struct { header_ http.Header } -// TestIamPermissions: Tests the specified permissions against the IAM -// access control policy -// for a ServiceAccount. +// TestIamPermissions: Tests whether the caller has the specified +// permissions on a ServiceAccount. func (r *ProjectsServiceAccountsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsServiceAccountsTestIamPermissionsCall { c := &ProjectsServiceAccountsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -6501,7 +6155,7 @@ func (c *ProjectsServiceAccountsTestIamPermissionsCall) Header() http.Header { func (c *ProjectsServiceAccountsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6565,7 +6219,7 @@ func (c *ProjectsServiceAccountsTestIamPermissionsCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Tests the specified permissions against the IAM access control policy\nfor a ServiceAccount.", + // "description": "Tests whether the caller has the specified permissions on a ServiceAccount.", // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:testIamPermissions", // "httpMethod": "POST", // "id": "iam.projects.serviceAccounts.testIamPermissions", @@ -6574,7 +6228,7 @@ func (c *ProjectsServiceAccountsTestIamPermissionsCall) Do(opts ...googleapi.Cal // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -6606,10 +6260,12 @@ type ProjectsServiceAccountsUndeleteCall struct { header_ http.Header } -// Undelete: Restores a deleted ServiceAccount. -// This is to be used as an action of last resort. A service account -// may -// not always be restorable. +// Undelete: Restores a deleted ServiceAccount. **Important:** It is not +// always possible to restore a deleted service account. Use this method +// only as a last resort. After you delete a service account, IAM +// permanently removes the service account 30 days later. There is no +// way to restore a deleted service account that has been permanently +// removed. func (r *ProjectsServiceAccountsService) Undelete(name string, undeleteserviceaccountrequest *UndeleteServiceAccountRequest) *ProjectsServiceAccountsUndeleteCall { c := &ProjectsServiceAccountsUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -6644,7 +6300,7 @@ func (c *ProjectsServiceAccountsUndeleteCall) Header() http.Header { func (c *ProjectsServiceAccountsUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6708,7 +6364,7 @@ func (c *ProjectsServiceAccountsUndeleteCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Restores a deleted ServiceAccount.\nThis is to be used as an action of last resort. A service account may\nnot always be restorable.", + // "description": "Restores a deleted ServiceAccount. **Important:** It is not always possible to restore a deleted service account. Use this method only as a last resort. After you delete a service account, IAM permanently removes the service account 30 days later. There is no way to restore a deleted service account that has been permanently removed.", // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:undelete", // "httpMethod": "POST", // "id": "iam.projects.serviceAccounts.undelete", @@ -6717,7 +6373,7 @@ func (c *ProjectsServiceAccountsUndeleteCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "name": { - // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT_UNIQUE_ID}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account.", + // "description": "The resource name of the service account in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT_UNIQUE_ID}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -6749,14 +6405,9 @@ type ProjectsServiceAccountsUpdateCall struct { header_ http.Header } -// Update: Note: This method is in the process of being deprecated. -// Use -// PatchServiceAccount instead. -// -// Updates a ServiceAccount. -// -// Currently, only the following fields are updatable: -// `display_name` and `description`. +// Update: **Note:** We are in the process of deprecating this method. +// Use PatchServiceAccount instead. Updates a ServiceAccount. You can +// update only the `display_name` and `description` fields. func (r *ProjectsServiceAccountsService) Update(name string, serviceaccount *ServiceAccount) *ProjectsServiceAccountsUpdateCall { c := &ProjectsServiceAccountsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -6791,7 +6442,7 @@ func (c *ProjectsServiceAccountsUpdateCall) Header() http.Header { func (c *ProjectsServiceAccountsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6855,7 +6506,7 @@ func (c *ProjectsServiceAccountsUpdateCall) Do(opts ...googleapi.CallOption) (*S } return ret, nil // { - // "description": "Note: This method is in the process of being deprecated. Use\nPatchServiceAccount instead.\n\nUpdates a ServiceAccount.\n\nCurrently, only the following fields are updatable:\n`display_name` and `description`.", + // "description": "**Note:** We are in the process of deprecating this method. Use PatchServiceAccount instead. Updates a ServiceAccount. You can update only the `display_name` and `description` fields.", // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}", // "httpMethod": "PUT", // "id": "iam.projects.serviceAccounts.update", @@ -6864,7 +6515,7 @@ func (c *ProjectsServiceAccountsUpdateCall) Do(opts ...googleapi.CallOption) (*S // ], // "parameters": { // "name": { - // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\n\nRequests using `-` as a wildcard for the `PROJECT_ID` will infer the\nproject from the `account` and the `ACCOUNT` value can be the `email`\naddress or the `unique_id` of the service account.\n\nIn responses the resource name will always be in the format\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.", + // "description": "The resource name of the service account. Use one of the following formats: * `projects/{PROJECT_ID}/serviceAccounts/{EMAIL_ADDRESS}` * `projects/{PROJECT_ID}/serviceAccounts/{UNIQUE_ID}` As an alternative, you can use the `-` wildcard character instead of the project ID: * `projects/-/serviceAccounts/{EMAIL_ADDRESS}` * `projects/-/serviceAccounts/{UNIQUE_ID}` When possible, avoid using the `-` wildcard character, because it can cause response messages to contain misleading error codes. For example, if you try to get the service account `projects/-/serviceAccounts/fake@example.com`, which does not exist, the response contains an HTTP `403 Forbidden` error instead of a `404 Not Found` error.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -6896,8 +6547,7 @@ type ProjectsServiceAccountsKeysCreateCall struct { header_ http.Header } -// Create: Creates a ServiceAccountKey -// and returns it. +// Create: Creates a ServiceAccountKey. func (r *ProjectsServiceAccountsKeysService) Create(name string, createserviceaccountkeyrequest *CreateServiceAccountKeyRequest) *ProjectsServiceAccountsKeysCreateCall { c := &ProjectsServiceAccountsKeysCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -6932,7 +6582,7 @@ func (c *ProjectsServiceAccountsKeysCreateCall) Header() http.Header { func (c *ProjectsServiceAccountsKeysCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6996,7 +6646,7 @@ func (c *ProjectsServiceAccountsKeysCreateCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Creates a ServiceAccountKey\nand returns it.", + // "description": "Creates a ServiceAccountKey.", // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys", // "httpMethod": "POST", // "id": "iam.projects.serviceAccounts.keys.create", @@ -7005,7 +6655,7 @@ func (c *ProjectsServiceAccountsKeysCreateCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + // "description": "Required. The resource name of the service account in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -7036,7 +6686,9 @@ type ProjectsServiceAccountsKeysDeleteCall struct { header_ http.Header } -// Delete: Deletes a ServiceAccountKey. +// Delete: Deletes a ServiceAccountKey. Deleting a service account key +// does not revoke short-lived credentials that have been issued based +// on the service account key. func (r *ProjectsServiceAccountsKeysService) Delete(name string) *ProjectsServiceAccountsKeysDeleteCall { c := &ProjectsServiceAccountsKeysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -7070,7 +6722,7 @@ func (c *ProjectsServiceAccountsKeysDeleteCall) Header() http.Header { func (c *ProjectsServiceAccountsKeysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7129,7 +6781,7 @@ func (c *ProjectsServiceAccountsKeysDeleteCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes a ServiceAccountKey.", + // "description": "Deletes a ServiceAccountKey. Deleting a service account key does not revoke short-lived credentials that have been issued based on the service account key.", // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys/{keysId}", // "httpMethod": "DELETE", // "id": "iam.projects.serviceAccounts.keys.delete", @@ -7138,7 +6790,7 @@ func (c *ProjectsServiceAccountsKeysDeleteCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the service account key in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + // "description": "Required. The resource name of the service account key in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+/keys/[^/]+$", // "required": true, @@ -7167,8 +6819,7 @@ type ProjectsServiceAccountsKeysGetCall struct { header_ http.Header } -// Get: Gets the ServiceAccountKey -// by key id. +// Get: Gets a ServiceAccountKey. func (r *ProjectsServiceAccountsKeysService) Get(name string) *ProjectsServiceAccountsKeysGetCall { c := &ProjectsServiceAccountsKeysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -7176,13 +6827,13 @@ func (r *ProjectsServiceAccountsKeysService) Get(name string) *ProjectsServiceAc } // PublicKeyType sets the optional parameter "publicKeyType": The output -// format of the public key requested. -// X509_PEM is the default output format. +// format of the public key requested. X509_PEM is the default output +// format. // // Possible values: -// "TYPE_NONE" -// "TYPE_X509_PEM_FILE" -// "TYPE_RAW_PUBLIC_KEY" +// "TYPE_NONE" - Unspecified. Returns nothing here. +// "TYPE_X509_PEM_FILE" - X509 PEM format. +// "TYPE_RAW_PUBLIC_KEY" - Raw public key. func (c *ProjectsServiceAccountsKeysGetCall) PublicKeyType(publicKeyType string) *ProjectsServiceAccountsKeysGetCall { c.urlParams_.Set("publicKeyType", publicKeyType) return c @@ -7225,7 +6876,7 @@ func (c *ProjectsServiceAccountsKeysGetCall) Header() http.Header { func (c *ProjectsServiceAccountsKeysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7287,7 +6938,7 @@ func (c *ProjectsServiceAccountsKeysGetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Gets the ServiceAccountKey\nby key id.", + // "description": "Gets a ServiceAccountKey.", // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys/{keysId}", // "httpMethod": "GET", // "id": "iam.projects.serviceAccounts.keys.get", @@ -7296,19 +6947,24 @@ func (c *ProjectsServiceAccountsKeysGetCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the service account key in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`.\n\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + // "description": "Required. The resource name of the service account key in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+/keys/[^/]+$", // "required": true, // "type": "string" // }, // "publicKeyType": { - // "description": "The output format of the public key requested.\nX509_PEM is the default output format.", + // "description": "The output format of the public key requested. X509_PEM is the default output format.", // "enum": [ // "TYPE_NONE", // "TYPE_X509_PEM_FILE", // "TYPE_RAW_PUBLIC_KEY" // ], + // "enumDescriptions": [ + // "Unspecified. Returns nothing here.", + // "X509 PEM format.", + // "Raw public key." + // ], // "location": "query", // "type": "string" // } @@ -7335,7 +6991,7 @@ type ProjectsServiceAccountsKeysListCall struct { header_ http.Header } -// List: Lists ServiceAccountKeys. +// List: Lists every ServiceAccountKey for a service account. func (r *ProjectsServiceAccountsKeysService) List(name string) *ProjectsServiceAccountsKeysListCall { c := &ProjectsServiceAccountsKeysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -7343,14 +6999,17 @@ func (r *ProjectsServiceAccountsKeysService) List(name string) *ProjectsServiceA } // KeyTypes sets the optional parameter "keyTypes": Filters the types of -// keys the user wants to include in the list -// response. Duplicate key types are not allowed. If no key type -// is provided, all keys are returned. +// keys the user wants to include in the list response. Duplicate key +// types are not allowed. If no key type is provided, all keys are +// returned. // // Possible values: -// "KEY_TYPE_UNSPECIFIED" -// "USER_MANAGED" -// "SYSTEM_MANAGED" +// "KEY_TYPE_UNSPECIFIED" - Unspecified key type. The presence of this +// in the message will immediately result in an error. +// "USER_MANAGED" - User-managed keys (managed and rotated by the +// user). +// "SYSTEM_MANAGED" - System-managed keys (managed and rotated by +// Google). func (c *ProjectsServiceAccountsKeysListCall) KeyTypes(keyTypes ...string) *ProjectsServiceAccountsKeysListCall { c.urlParams_.SetMulti("keyTypes", append([]string{}, keyTypes...)) return c @@ -7393,7 +7052,7 @@ func (c *ProjectsServiceAccountsKeysListCall) Header() http.Header { func (c *ProjectsServiceAccountsKeysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7455,7 +7114,7 @@ func (c *ProjectsServiceAccountsKeysListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Lists ServiceAccountKeys.", + // "description": "Lists every ServiceAccountKey for a service account.", // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys", // "httpMethod": "GET", // "id": "iam.projects.serviceAccounts.keys.list", @@ -7464,18 +7123,23 @@ func (c *ProjectsServiceAccountsKeysListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "keyTypes": { - // "description": "Filters the types of keys the user wants to include in the list\nresponse. Duplicate key types are not allowed. If no key type\nis provided, all keys are returned.", + // "description": "Filters the types of keys the user wants to include in the list response. Duplicate key types are not allowed. If no key type is provided, all keys are returned.", // "enum": [ // "KEY_TYPE_UNSPECIFIED", // "USER_MANAGED", // "SYSTEM_MANAGED" // ], + // "enumDescriptions": [ + // "Unspecified key type. The presence of this in the message will immediately result in an error.", + // "User-managed keys (managed and rotated by the user).", + // "System-managed keys (managed and rotated by Google)." + // ], // "location": "query", // "repeated": true, // "type": "string" // }, // "name": { - // "description": "Required. The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\n\nUsing `-` as a wildcard for the `PROJECT_ID`, will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + // "description": "Required. The resource name of the service account in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. Using `-` as a wildcard for the `PROJECT_ID`, will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -7504,10 +7168,8 @@ type ProjectsServiceAccountsKeysUploadCall struct { header_ http.Header } -// Upload: Upload public key for a given service account. -// This rpc will create a -// ServiceAccountKey that has the -// provided public key and returns it. +// Upload: Creates a ServiceAccountKey, using a public key that you +// provide. func (r *ProjectsServiceAccountsKeysService) Upload(name string, uploadserviceaccountkeyrequest *UploadServiceAccountKeyRequest) *ProjectsServiceAccountsKeysUploadCall { c := &ProjectsServiceAccountsKeysUploadCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -7542,7 +7204,7 @@ func (c *ProjectsServiceAccountsKeysUploadCall) Header() http.Header { func (c *ProjectsServiceAccountsKeysUploadCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7606,7 +7268,7 @@ func (c *ProjectsServiceAccountsKeysUploadCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Upload public key for a given service account.\nThis rpc will create a\nServiceAccountKey that has the\nprovided public key and returns it.", + // "description": "Creates a ServiceAccountKey, using a public key that you provide.", // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys:upload", // "httpMethod": "POST", // "id": "iam.projects.serviceAccounts.keys.upload", @@ -7615,7 +7277,7 @@ func (c *ProjectsServiceAccountsKeysUploadCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "name": { - // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + // "description": "The resource name of the service account in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -7647,7 +7309,7 @@ type RolesGetCall struct { header_ http.Header } -// Get: Gets a Role definition. +// Get: Gets the definition of a Role. func (r *RolesService) Get(name string) *RolesGetCall { c := &RolesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -7691,7 +7353,7 @@ func (c *RolesGetCall) Header() http.Header { func (c *RolesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7753,7 +7415,7 @@ func (c *RolesGetCall) Do(opts ...googleapi.CallOption) (*Role, error) { } return ret, nil // { - // "description": "Gets a Role definition.", + // "description": "Gets the definition of a Role.", // "flatPath": "v1/roles/{rolesId}", // "httpMethod": "GET", // "id": "iam.roles.get", @@ -7762,7 +7424,7 @@ func (c *RolesGetCall) Do(opts ...googleapi.CallOption) (*Role, error) { // ], // "parameters": { // "name": { - // "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`roles`](/iam/reference/rest/v1/roles),\n[`projects`](/iam/reference/rest/v1/projects.roles), or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`roles.get()`](/iam/reference/rest/v1/roles/get): `roles/{ROLE_NAME}`.\n This method returns results from all\n [predefined roles](/iam/docs/understanding-roles#predefined_roles) in\n Cloud IAM. Example request URL:\n `https://iam.googleapis.com/v1/roles/{ROLE_NAME}`\n\n* [`projects.roles.get()`](/iam/reference/rest/v1/projects.roles/get):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.get()`](/iam/reference/rest/v1/organizations.roles/get):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n returns only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + // "description": "The `name` parameter's value depends on the target resource for the request, namely [`roles`](/iam/reference/rest/v1/roles), [`projects`](/iam/reference/rest/v1/projects.roles), or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`roles.get()`](/iam/reference/rest/v1/roles/get): `roles/{ROLE_NAME}`. This method returns results from all [predefined roles](/iam/docs/understanding-roles#predefined_roles) in Cloud IAM. Example request URL: `https://iam.googleapis.com/v1/roles/{ROLE_NAME}` * [`projects.roles.get()`](/iam/reference/rest/v1/projects.roles/get): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.get()`](/iam/reference/rest/v1/organizations.roles/get): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only [custom roles](/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", // "location": "path", // "pattern": "^roles/[^/]+$", // "required": true, @@ -7790,14 +7452,16 @@ type RolesListCall struct { header_ http.Header } -// List: Lists the Roles defined on a resource. +// List: Lists every predefined Role that IAM supports, or every custom +// role that is defined for an organization or project. func (r *RolesService) List() *RolesListCall { c := &RolesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} return c } // PageSize sets the optional parameter "pageSize": Optional limit on -// the number of roles to include in the response. +// the number of roles to include in the response. The default is 300, +// and the maximum is 1,000. func (c *RolesListCall) PageSize(pageSize int64) *RolesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c @@ -7811,47 +7475,27 @@ func (c *RolesListCall) PageToken(pageToken string) *RolesListCall { } // Parent sets the optional parameter "parent": The `parent` parameter's -// value depends on the target resource for the -// request, -// namely +// value depends on the target resource for the request, namely // [`roles`](/iam/reference/rest/v1/roles), -// [`projects`](/iam/refe -// rence/rest/v1/projects.roles), -// or -// [`organizations`](/iam/reference/rest/v1/organizations.roles). -// Each -// resource type's `parent` value format is described below: -// -// * [`roles.list()`](/iam/reference/rest/v1/roles/list): An empty -// string. -// This method doesn't require a resource; it simply returns all -// [predefined roles](/iam/docs/understanding-roles#predefined_roles) -// in -// Cloud IAM. Example request URL: -// `https://iam.googleapis.com/v1/roles` -// +// [`projects`](/iam/reference/rest/v1/projects.roles), or +// [`organizations`](/iam/reference/rest/v1/organizations.roles). Each +// resource type's `parent` value format is described below: * +// [`roles.list()`](/iam/reference/rest/v1/roles/list): An empty string. +// This method doesn't require a resource; it simply returns all +// [predefined roles](/iam/docs/understanding-roles#predefined_roles) in +// Cloud IAM. Example request URL: `https://iam.googleapis.com/v1/roles` // * // [`projects.roles.list()`](/iam/reference/rest/v1/projects.roles/list): -// -// `projects/{PROJECT_ID}`. This method lists all project-level -// [custom roles](/iam/docs/understanding-custom-roles). -// Example request URL: -// `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles` -// -// * +// `projects/{PROJECT_ID}`. This method lists all project-level [custom +// roles](/iam/docs/understanding-custom-roles). Example request URL: +// `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles` * // [`organizations.roles.list()`](/iam/reference/rest/v1/organizations.ro -// les/list): -// `organizations/{ORGANIZATION_ID}`. This method lists all -// organization-level [custom -// roles](/iam/docs/understanding-custom-roles). -// Example request URL: -// +// les/list): `organizations/{ORGANIZATION_ID}`. This method lists all +// organization-level [custom +// roles](/iam/docs/understanding-custom-roles). Example request URL: // `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles` -// -// // Note: Wildcard (*) values are invalid; you must specify a complete -// project -// ID or organization ID. +// project ID or organization ID. func (c *RolesListCall) Parent(parent string) *RolesListCall { c.urlParams_.Set("parent", parent) return c @@ -7865,16 +7509,15 @@ func (c *RolesListCall) ShowDeleted(showDeleted bool) *RolesListCall { } // View sets the optional parameter "view": Optional view for the -// returned Role objects. When `FULL` is specified, -// the `includedPermissions` field is returned, which includes a list of -// all -// permissions in the role. The default value is `BASIC`, which does -// not +// returned Role objects. When `FULL` is specified, the +// `includedPermissions` field is returned, which includes a list of all +// permissions in the role. The default value is `BASIC`, which does not // return the `includedPermissions` field. // // Possible values: -// "BASIC" -// "FULL" +// "BASIC" - Omits the `included_permissions` field. This is the +// default value. +// "FULL" - Returns all fields. func (c *RolesListCall) View(view string) *RolesListCall { c.urlParams_.Set("view", view) return c @@ -7917,7 +7560,7 @@ func (c *RolesListCall) Header() http.Header { func (c *RolesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7976,14 +7619,14 @@ func (c *RolesListCall) Do(opts ...googleapi.CallOption) (*ListRolesResponse, er } return ret, nil // { - // "description": "Lists the Roles defined on a resource.", + // "description": "Lists every predefined Role that IAM supports, or every custom role that is defined for an organization or project.", // "flatPath": "v1/roles", // "httpMethod": "GET", // "id": "iam.roles.list", // "parameterOrder": [], // "parameters": { // "pageSize": { - // "description": "Optional limit on the number of roles to include in the response.", + // "description": "Optional limit on the number of roles to include in the response. The default is 300, and the maximum is 1,000.", // "format": "int32", // "location": "query", // "type": "integer" @@ -7994,7 +7637,7 @@ func (c *RolesListCall) Do(opts ...googleapi.CallOption) (*ListRolesResponse, er // "type": "string" // }, // "parent": { - // "description": "The `parent` parameter's value depends on the target resource for the\nrequest, namely\n[`roles`](/iam/reference/rest/v1/roles),\n[`projects`](/iam/reference/rest/v1/projects.roles), or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `parent` value format is described below:\n\n* [`roles.list()`](/iam/reference/rest/v1/roles/list): An empty string.\n This method doesn't require a resource; it simply returns all\n [predefined roles](/iam/docs/understanding-roles#predefined_roles) in\n Cloud IAM. Example request URL:\n `https://iam.googleapis.com/v1/roles`\n\n* [`projects.roles.list()`](/iam/reference/rest/v1/projects.roles/list):\n `projects/{PROJECT_ID}`. This method lists all project-level\n [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles`\n\n* [`organizations.roles.list()`](/iam/reference/rest/v1/organizations.roles/list):\n `organizations/{ORGANIZATION_ID}`. This method lists all\n organization-level [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", + // "description": "The `parent` parameter's value depends on the target resource for the request, namely [`roles`](/iam/reference/rest/v1/roles), [`projects`](/iam/reference/rest/v1/projects.roles), or [`organizations`](/iam/reference/rest/v1/organizations.roles). Each resource type's `parent` value format is described below: * [`roles.list()`](/iam/reference/rest/v1/roles/list): An empty string. This method doesn't require a resource; it simply returns all [predefined roles](/iam/docs/understanding-roles#predefined_roles) in Cloud IAM. Example request URL: `https://iam.googleapis.com/v1/roles` * [`projects.roles.list()`](/iam/reference/rest/v1/projects.roles/list): `projects/{PROJECT_ID}`. This method lists all project-level [custom roles](/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles` * [`organizations.roles.list()`](/iam/reference/rest/v1/organizations.roles/list): `organizations/{ORGANIZATION_ID}`. This method lists all organization-level [custom roles](/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", // "location": "query", // "type": "string" // }, @@ -8004,11 +7647,15 @@ func (c *RolesListCall) Do(opts ...googleapi.CallOption) (*ListRolesResponse, er // "type": "boolean" // }, // "view": { - // "description": "Optional view for the returned Role objects. When `FULL` is specified,\nthe `includedPermissions` field is returned, which includes a list of all\npermissions in the role. The default value is `BASIC`, which does not\nreturn the `includedPermissions` field.", + // "description": "Optional view for the returned Role objects. When `FULL` is specified, the `includedPermissions` field is returned, which includes a list of all permissions in the role. The default value is `BASIC`, which does not return the `includedPermissions` field.", // "enum": [ // "BASIC", // "FULL" // ], + // "enumDescriptions": [ + // "Omits the `included_permissions` field. This is the default value.", + // "Returns all fields." + // ], // "location": "query", // "type": "string" // } @@ -8055,11 +7702,9 @@ type RolesQueryGrantableRolesCall struct { header_ http.Header } -// QueryGrantableRoles: Queries roles that can be granted on a -// particular resource. -// A role is grantable if it can be used as the role in a binding for a -// policy -// for that resource. +// QueryGrantableRoles: Lists roles that can be granted on a Google +// Cloud resource. A role is grantable if the IAM policy for the +// resource can contain bindings to the role. func (r *RolesService) QueryGrantableRoles(querygrantablerolesrequest *QueryGrantableRolesRequest) *RolesQueryGrantableRolesCall { c := &RolesQueryGrantableRolesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.querygrantablerolesrequest = querygrantablerolesrequest @@ -8093,7 +7738,7 @@ func (c *RolesQueryGrantableRolesCall) Header() http.Header { func (c *RolesQueryGrantableRolesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8154,7 +7799,7 @@ func (c *RolesQueryGrantableRolesCall) Do(opts ...googleapi.CallOption) (*QueryG } return ret, nil // { - // "description": "Queries roles that can be granted on a particular resource.\nA role is grantable if it can be used as the role in a binding for a policy\nfor that resource.", + // "description": "Lists roles that can be granted on a Google Cloud resource. A role is grantable if the IAM policy for the resource can contain bindings to the role.", // "flatPath": "v1/roles:queryGrantableRoles", // "httpMethod": "POST", // "id": "iam.roles.queryGrantableRoles", diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json index 24c7377d080..e9b421cf2a1 100644 --- a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json +++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json @@ -12,7 +12,7 @@ "baseUrl": "https://iamcredentials.googleapis.com/", "batchPath": "batch", "canonicalName": "IAM Credentials", - "description": "Creates short-lived, limited-privilege credentials for IAM service accounts.", + "description": " Creates short-lived credentials for impersonating IAM service accounts. *Note:* This API is tied to the IAM API (iam.googleapis.com). Enabling or disabling this API will also enable or disable the IAM API. ", "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials", "fullyEncodeReservedExpansion": true, @@ -119,7 +119,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the service account for which the credentials\nare requested, in the following format:\n`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard\ncharacter is required; replacing it with a project ID is invalid.", + "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", "location": "path", "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", "required": true, @@ -147,7 +147,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the service account for which the credentials\nare requested, in the following format:\n`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard\ncharacter is required; replacing it with a project ID is invalid.", + "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", "location": "path", "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", "required": true, @@ -175,7 +175,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the service account for which the credentials\nare requested, in the following format:\n`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard\ncharacter is required; replacing it with a project ID is invalid.", + "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", "location": "path", "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", "required": true, @@ -203,7 +203,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the service account for which the credentials\nare requested, in the following format:\n`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard\ncharacter is required; replacing it with a project ID is invalid.", + "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", "location": "path", "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", "required": true, @@ -226,26 +226,26 @@ } } }, - "revision": "20200417", + "revision": "20200821", "rootUrl": "https://iamcredentials.googleapis.com/", "schemas": { "GenerateAccessTokenRequest": { "id": "GenerateAccessTokenRequest", "properties": { "delegates": { - "description": "The sequence of service accounts in a delegation chain. Each service\naccount must be granted the `roles/iam.serviceAccountTokenCreator` role\non its next service account in the chain. The last service account in the\nchain must be granted the `roles/iam.serviceAccountTokenCreator` role\non the service account that is specified in the `name` field of the\nrequest.\n\nThe delegates must have the following format:\n`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard\ncharacter is required; replacing it with a project ID is invalid.", + "description": "The sequence of service accounts in a delegation chain. Each service account must be granted the `roles/iam.serviceAccountTokenCreator` role on its next service account in the chain. The last service account in the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on the service account that is specified in the `name` field of the request. The delegates must have the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", "items": { "type": "string" }, "type": "array" }, "lifetime": { - "description": "The desired lifetime duration of the access token in seconds.\nMust be set to a value less than or equal to 3600 (1 hour). If a value is\nnot specified, the token's lifetime will be set to a default value of one\nhour.", + "description": "The desired lifetime duration of the access token in seconds. By default, the maximum allowed value is 1 hour. To set a lifetime of up to 12 hours, you can add the service account as an allowed value in an Organization Policy that enforces the `constraints/iam.allowServiceAccountCredentialLifetimeExtension` constraint. See detailed instructions at https://cloud.google.com/iam/help/credentials/lifetime If a value is not specified, the token's lifetime will be set to a default value of 1 hour.", "format": "google-duration", "type": "string" }, "scope": { - "description": "Required. Code to identify the scopes to be included in the OAuth 2.0 access token.\nSee https://developers.google.com/identity/protocols/googlescopes for more\ninformation.\nAt least one value required.", + "description": "Required. Code to identify the scopes to be included in the OAuth 2.0 access token. See https://developers.google.com/identity/protocols/googlescopes for more information. At least one value required.", "items": { "type": "string" }, @@ -262,7 +262,7 @@ "type": "string" }, "expireTime": { - "description": "Token expiration time.\nThe expiration time is always set.", + "description": "Token expiration time. The expiration time is always set.", "format": "google-datetime", "type": "string" } @@ -273,18 +273,18 @@ "id": "GenerateIdTokenRequest", "properties": { "audience": { - "description": "Required. The audience for the token, such as the API or account that this token\ngrants access to.", + "description": "Required. The audience for the token, such as the API or account that this token grants access to.", "type": "string" }, "delegates": { - "description": "The sequence of service accounts in a delegation chain. Each service\naccount must be granted the `roles/iam.serviceAccountTokenCreator` role\non its next service account in the chain. The last service account in the\nchain must be granted the `roles/iam.serviceAccountTokenCreator` role\non the service account that is specified in the `name` field of the\nrequest.\n\nThe delegates must have the following format:\n`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard\ncharacter is required; replacing it with a project ID is invalid.", + "description": "The sequence of service accounts in a delegation chain. Each service account must be granted the `roles/iam.serviceAccountTokenCreator` role on its next service account in the chain. The last service account in the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on the service account that is specified in the `name` field of the request. The delegates must have the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", "items": { "type": "string" }, "type": "array" }, "includeEmail": { - "description": "Include the service account email in the token. If set to `true`, the\ntoken will contain `email` and `email_verified` claims.", + "description": "Include the service account email in the token. If set to `true`, the token will contain `email` and `email_verified` claims.", "type": "boolean" } }, @@ -304,7 +304,7 @@ "id": "SignBlobRequest", "properties": { "delegates": { - "description": "The sequence of service accounts in a delegation chain. Each service\naccount must be granted the `roles/iam.serviceAccountTokenCreator` role\non its next service account in the chain. The last service account in the\nchain must be granted the `roles/iam.serviceAccountTokenCreator` role\non the service account that is specified in the `name` field of the\nrequest.\n\nThe delegates must have the following format:\n`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard\ncharacter is required; replacing it with a project ID is invalid.", + "description": "The sequence of service accounts in a delegation chain. Each service account must be granted the `roles/iam.serviceAccountTokenCreator` role on its next service account in the chain. The last service account in the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on the service account that is specified in the `name` field of the request. The delegates must have the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", "items": { "type": "string" }, @@ -322,11 +322,11 @@ "id": "SignBlobResponse", "properties": { "keyId": { - "description": "The ID of the key used to sign the blob.", + "description": "The ID of the key used to sign the blob. The key used for signing will remain valid for at least 12 hours after the blob is signed. To verify the signature, you can retrieve the public key in several formats from the following endpoints: - RSA public key wrapped in an X.509 v3 certificate: `https://www.googleapis.com/service_accounts/v1/metadata/x509/{ACCOUNT_EMAIL}` - Raw key in JSON format: `https://www.googleapis.com/service_accounts/v1/metadata/raw/{ACCOUNT_EMAIL}` - JSON Web Key (JWK): `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACCOUNT_EMAIL}`", "type": "string" }, "signedBlob": { - "description": "The signature for the blob. Does not include the original blob.", + "description": "The signature for the blob. Does not include the original blob. After the key pair referenced by the `key_id` response field expires, Google no longer exposes the public key that can be used to verify the blob. As a result, the receiver can no longer verify the signature.", "format": "byte", "type": "string" } @@ -337,14 +337,14 @@ "id": "SignJwtRequest", "properties": { "delegates": { - "description": "The sequence of service accounts in a delegation chain. Each service\naccount must be granted the `roles/iam.serviceAccountTokenCreator` role\non its next service account in the chain. The last service account in the\nchain must be granted the `roles/iam.serviceAccountTokenCreator` role\non the service account that is specified in the `name` field of the\nrequest.\n\nThe delegates must have the following format:\n`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard\ncharacter is required; replacing it with a project ID is invalid.", + "description": "The sequence of service accounts in a delegation chain. Each service account must be granted the `roles/iam.serviceAccountTokenCreator` role on its next service account in the chain. The last service account in the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on the service account that is specified in the `name` field of the request. The delegates must have the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", "items": { "type": "string" }, "type": "array" }, "payload": { - "description": "Required. The JWT payload to sign. Must be a serialized JSON object that contains a\nJWT Claim Set. For example: `{\"sub\": \"user@example.com\", \"iat\": 313435}`\n\nIf the claim set contains an `exp` claim, it must be an integer timestamp\nthat is not in the past and at most 12 hours in the future.", + "description": "Required. The JWT payload to sign. Must be a serialized JSON object that contains a JWT Claims Set. For example: `{\"sub\": \"user@example.com\", \"iat\": 313435}` If the JWT Claims Set contains an expiration time (`exp`) claim, it must be an integer timestamp that is not in the past and no more than 12 hours in the future.", "type": "string" } }, @@ -354,11 +354,11 @@ "id": "SignJwtResponse", "properties": { "keyId": { - "description": "The ID of the key used to sign the JWT.", + "description": "The ID of the key used to sign the JWT. The key used for signing will remain valid for at least 12 hours after the JWT is signed. To verify the signature, you can retrieve the public key in several formats from the following endpoints: - RSA public key wrapped in an X.509 v3 certificate: `https://www.googleapis.com/service_accounts/v1/metadata/x509/{ACCOUNT_EMAIL}` - Raw key in JSON format: `https://www.googleapis.com/service_accounts/v1/metadata/raw/{ACCOUNT_EMAIL}` - JSON Web Key (JWK): `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACCOUNT_EMAIL}`", "type": "string" }, "signedJwt": { - "description": "The signed JWT. Contains the automatically generated header; the\nclient-supplied payload; and the signature, which is generated using the\nkey referenced by the `kid` field in the header.", + "description": "The signed JWT. Contains the automatically generated header; the client-supplied payload; and the signature, which is generated using the key referenced by the `kid` field in the header. After the key pair referenced by the `key_id` response field expires, Google no longer exposes the public key that can be used to verify the JWT. As a result, the receiver can no longer verify the signature.", "type": "string" } }, diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go index 6e50979e083..95c1e9a28d5 100644 --- a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go +++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go @@ -75,6 +75,7 @@ const apiId = "iamcredentials:v1" const apiName = "iamcredentials" const apiVersion = "v1" const basePath = "https://iamcredentials.googleapis.com/" +const mtlsBasePath = "https://iamcredentials.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -90,6 +91,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -156,39 +158,32 @@ type ProjectsServiceAccountsService struct { type GenerateAccessTokenRequest struct { // Delegates: The sequence of service accounts in a delegation chain. - // Each service - // account must be granted the `roles/iam.serviceAccountTokenCreator` - // role - // on its next service account in the chain. The last service account in - // the - // chain must be granted the `roles/iam.serviceAccountTokenCreator` - // role - // on the service account that is specified in the `name` field of - // the - // request. - // - // The delegates must have the following - // format: + // Each service account must be granted the + // `roles/iam.serviceAccountTokenCreator` role on its next service + // account in the chain. The last service account in the chain must be + // granted the `roles/iam.serviceAccountTokenCreator` role on the + // service account that is specified in the `name` field of the request. + // The delegates must have the following format: // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` - // wildcard - // character is required; replacing it with a project ID is invalid. + // wildcard character is required; replacing it with a project ID is + // invalid. Delegates []string `json:"delegates,omitempty"` // Lifetime: The desired lifetime duration of the access token in - // seconds. - // Must be set to a value less than or equal to 3600 (1 hour). If a - // value is + // seconds. By default, the maximum allowed value is 1 hour. To set a + // lifetime of up to 12 hours, you can add the service account as an + // allowed value in an Organization Policy that enforces the + // `constraints/iam.allowServiceAccountCredentialLifetimeExtension` + // constraint. See detailed instructions at + // https://cloud.google.com/iam/help/credentials/lifetime If a value is // not specified, the token's lifetime will be set to a default value of - // one - // hour. + // 1 hour. Lifetime string `json:"lifetime,omitempty"` // Scope: Required. Code to identify the scopes to be included in the - // OAuth 2.0 access token. - // See https://developers.google.com/identity/protocols/googlescopes for - // more - // information. - // At least one value required. + // OAuth 2.0 access token. See + // https://developers.google.com/identity/protocols/googlescopes for + // more information. At least one value required. Scope []string `json:"scope,omitempty"` // ForceSendFields is a list of field names (e.g. "Delegates") to @@ -218,8 +213,7 @@ type GenerateAccessTokenResponse struct { // AccessToken: The OAuth 2.0 access token. AccessToken string `json:"accessToken,omitempty"` - // ExpireTime: Token expiration time. - // The expiration time is always set. + // ExpireTime: Token expiration time. The expiration time is always set. ExpireTime string `json:"expireTime,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -251,32 +245,24 @@ func (s *GenerateAccessTokenResponse) MarshalJSON() ([]byte, error) { type GenerateIdTokenRequest struct { // Audience: Required. The audience for the token, such as the API or - // account that this token - // grants access to. + // account that this token grants access to. Audience string `json:"audience,omitempty"` // Delegates: The sequence of service accounts in a delegation chain. - // Each service - // account must be granted the `roles/iam.serviceAccountTokenCreator` - // role - // on its next service account in the chain. The last service account in - // the - // chain must be granted the `roles/iam.serviceAccountTokenCreator` - // role - // on the service account that is specified in the `name` field of - // the - // request. - // - // The delegates must have the following - // format: + // Each service account must be granted the + // `roles/iam.serviceAccountTokenCreator` role on its next service + // account in the chain. The last service account in the chain must be + // granted the `roles/iam.serviceAccountTokenCreator` role on the + // service account that is specified in the `name` field of the request. + // The delegates must have the following format: // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` - // wildcard - // character is required; replacing it with a project ID is invalid. + // wildcard character is required; replacing it with a project ID is + // invalid. Delegates []string `json:"delegates,omitempty"` // IncludeEmail: Include the service account email in the token. If set - // to `true`, the - // token will contain `email` and `email_verified` claims. + // to `true`, the token will contain `email` and `email_verified` + // claims. IncludeEmail bool `json:"includeEmail,omitempty"` // ForceSendFields is a list of field names (e.g. "Audience") to @@ -335,22 +321,15 @@ func (s *GenerateIdTokenResponse) MarshalJSON() ([]byte, error) { type SignBlobRequest struct { // Delegates: The sequence of service accounts in a delegation chain. - // Each service - // account must be granted the `roles/iam.serviceAccountTokenCreator` - // role - // on its next service account in the chain. The last service account in - // the - // chain must be granted the `roles/iam.serviceAccountTokenCreator` - // role - // on the service account that is specified in the `name` field of - // the - // request. - // - // The delegates must have the following - // format: + // Each service account must be granted the + // `roles/iam.serviceAccountTokenCreator` role on its next service + // account in the chain. The last service account in the chain must be + // granted the `roles/iam.serviceAccountTokenCreator` role on the + // service account that is specified in the `name` field of the request. + // The delegates must have the following format: // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` - // wildcard - // character is required; replacing it with a project ID is invalid. + // wildcard character is required; replacing it with a project ID is + // invalid. Delegates []string `json:"delegates,omitempty"` // Payload: Required. The bytes to sign. @@ -380,11 +359,24 @@ func (s *SignBlobRequest) MarshalJSON() ([]byte, error) { } type SignBlobResponse struct { - // KeyId: The ID of the key used to sign the blob. + // KeyId: The ID of the key used to sign the blob. The key used for + // signing will remain valid for at least 12 hours after the blob is + // signed. To verify the signature, you can retrieve the public key in + // several formats from the following endpoints: - RSA public key + // wrapped in an X.509 v3 certificate: + // `https://www.googleapis.com/service_accounts/v1/metadata/x509/{ACCOUNT + // _EMAIL}` - Raw key in JSON format: + // `https://www.googleapis.com/service_accounts/v1/metadata/raw/{ACCOUNT_ + // EMAIL}` - JSON Web Key (JWK): + // `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACCOUNT_ + // EMAIL}` KeyId string `json:"keyId,omitempty"` // SignedBlob: The signature for the blob. Does not include the original - // blob. + // blob. After the key pair referenced by the `key_id` response field + // expires, Google no longer exposes the public key that can be used to + // verify the blob. As a result, the receiver can no longer verify the + // signature. SignedBlob string `json:"signedBlob,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -416,32 +408,22 @@ func (s *SignBlobResponse) MarshalJSON() ([]byte, error) { type SignJwtRequest struct { // Delegates: The sequence of service accounts in a delegation chain. - // Each service - // account must be granted the `roles/iam.serviceAccountTokenCreator` - // role - // on its next service account in the chain. The last service account in - // the - // chain must be granted the `roles/iam.serviceAccountTokenCreator` - // role - // on the service account that is specified in the `name` field of - // the - // request. - // - // The delegates must have the following - // format: + // Each service account must be granted the + // `roles/iam.serviceAccountTokenCreator` role on its next service + // account in the chain. The last service account in the chain must be + // granted the `roles/iam.serviceAccountTokenCreator` role on the + // service account that is specified in the `name` field of the request. + // The delegates must have the following format: // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` - // wildcard - // character is required; replacing it with a project ID is invalid. + // wildcard character is required; replacing it with a project ID is + // invalid. Delegates []string `json:"delegates,omitempty"` // Payload: Required. The JWT payload to sign. Must be a serialized JSON - // object that contains a - // JWT Claim Set. For example: `{"sub": "user@example.com", "iat": - // 313435}` - // - // If the claim set contains an `exp` claim, it must be an integer - // timestamp - // that is not in the past and at most 12 hours in the future. + // object that contains a JWT Claims Set. For example: `{"sub": + // "user@example.com", "iat": 313435}` If the JWT Claims Set contains an + // expiration time (`exp`) claim, it must be an integer timestamp that + // is not in the past and no more than 12 hours in the future. Payload string `json:"payload,omitempty"` // ForceSendFields is a list of field names (e.g. "Delegates") to @@ -468,14 +450,26 @@ func (s *SignJwtRequest) MarshalJSON() ([]byte, error) { } type SignJwtResponse struct { - // KeyId: The ID of the key used to sign the JWT. + // KeyId: The ID of the key used to sign the JWT. The key used for + // signing will remain valid for at least 12 hours after the JWT is + // signed. To verify the signature, you can retrieve the public key in + // several formats from the following endpoints: - RSA public key + // wrapped in an X.509 v3 certificate: + // `https://www.googleapis.com/service_accounts/v1/metadata/x509/{ACCOUNT + // _EMAIL}` - Raw key in JSON format: + // `https://www.googleapis.com/service_accounts/v1/metadata/raw/{ACCOUNT_ + // EMAIL}` - JSON Web Key (JWK): + // `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACCOUNT_ + // EMAIL}` KeyId string `json:"keyId,omitempty"` // SignedJwt: The signed JWT. Contains the automatically generated - // header; the - // client-supplied payload; and the signature, which is generated using - // the - // key referenced by the `kid` field in the header. + // header; the client-supplied payload; and the signature, which is + // generated using the key referenced by the `kid` field in the header. + // After the key pair referenced by the `key_id` response field expires, + // Google no longer exposes the public key that can be used to verify + // the JWT. As a result, the receiver can no longer verify the + // signature. SignedJwt string `json:"signedJwt,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -552,7 +546,7 @@ func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Header() http.Header { func (c *ProjectsServiceAccountsGenerateAccessTokenCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -625,7 +619,7 @@ func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Do(opts ...googleapi.Ca // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the service account for which the credentials\nare requested, in the following format:\n`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard\ncharacter is required; replacing it with a project ID is invalid.", + // "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -693,7 +687,7 @@ func (c *ProjectsServiceAccountsGenerateIdTokenCall) Header() http.Header { func (c *ProjectsServiceAccountsGenerateIdTokenCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -766,7 +760,7 @@ func (c *ProjectsServiceAccountsGenerateIdTokenCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the service account for which the credentials\nare requested, in the following format:\n`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard\ncharacter is required; replacing it with a project ID is invalid.", + // "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -834,7 +828,7 @@ func (c *ProjectsServiceAccountsSignBlobCall) Header() http.Header { func (c *ProjectsServiceAccountsSignBlobCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -907,7 +901,7 @@ func (c *ProjectsServiceAccountsSignBlobCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the service account for which the credentials\nare requested, in the following format:\n`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard\ncharacter is required; replacing it with a project ID is invalid.", + // "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -975,7 +969,7 @@ func (c *ProjectsServiceAccountsSignJwtCall) Header() http.Header { func (c *ProjectsServiceAccountsSignJwtCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1048,7 +1042,7 @@ func (c *ProjectsServiceAccountsSignJwtCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the service account for which the credentials\nare requested, in the following format:\n`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard\ncharacter is required; replacing it with a project ID is invalid.", + // "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index 75e9445e1b3..dc6d50e96aa 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -11,6 +11,7 @@ import ( "io/ioutil" "golang.org/x/oauth2" + "google.golang.org/api/internal/impersonate" "golang.org/x/oauth2/google" ) @@ -18,6 +19,17 @@ import ( // Creds returns credential information obtained from DialSettings, or if none, then // it returns default credential information. func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { + creds, err := baseCreds(ctx, ds) + if err != nil { + return nil, err + } + if ds.ImpersonationConfig != nil { + return impersonateCredentials(ctx, creds, ds) + } + return creds, nil +} + +func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { if ds.Credentials != nil { return ds.Credentials, nil } @@ -103,3 +115,17 @@ func QuotaProjectFromCreds(cred *google.Credentials) string { } return v.QuotaProject } + +func impersonateCredentials(ctx context.Context, creds *google.Credentials, ds *DialSettings) (*google.Credentials, error) { + if len(ds.ImpersonationConfig.Scopes) == 0 { + ds.ImpersonationConfig.Scopes = ds.Scopes + } + ts, err := impersonate.TokenSource(ctx, creds.TokenSource, ds.ImpersonationConfig) + if err != nil { + return nil, err + } + return &google.Credentials{ + TokenSource: ts, + ProjectID: creds.ProjectID, + }, nil +} diff --git a/vendor/google.golang.org/api/internal/gensupport/media.go b/vendor/google.golang.org/api/internal/gensupport/media.go index 0288cc30427..0460ab59406 100644 --- a/vendor/google.golang.org/api/internal/gensupport/media.go +++ b/vendor/google.golang.org/api/internal/gensupport/media.go @@ -55,7 +55,7 @@ func (cs *contentSniffer) Read(p []byte) (n int, err error) { return cs.r.Read(p) } -// ContentType returns the sniffed content type, and whether the content type was succesfully sniffed. +// ContentType returns the sniffed content type, and whether the content type was successfully sniffed. func (cs *contentSniffer) ContentType() (string, bool) { if cs.sniffed { return cs.ctype, cs.ctype != "" @@ -88,7 +88,7 @@ func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) { return media, ctype } - // For backwards compatability, allow clients to set content + // For backwards compatibility, allow clients to set content // type by providing a ContentTyper for media. if typer, ok := media.(googleapi.ContentTyper); ok { return media, typer.ContentType() diff --git a/vendor/google.golang.org/api/internal/gensupport/resumable.go b/vendor/google.golang.org/api/internal/gensupport/resumable.go index e67ccd9a614..edc87ec24f6 100644 --- a/vendor/google.golang.org/api/internal/gensupport/resumable.go +++ b/vendor/google.golang.org/api/internal/gensupport/resumable.go @@ -28,6 +28,8 @@ var ( backoff = func() Backoff { return &gax.Backoff{Initial: 100 * time.Millisecond} } + // isRetryable is a platform-specific hook, specified in retryable_linux.go + syscallRetryable func(error) bool = func(err error) bool { return false } ) const ( @@ -160,21 +162,6 @@ func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, e // rx is private to the auto-generated API code. // Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close. func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) { - var shouldRetry = func(status int, err error) bool { - if 500 <= status && status <= 599 { - return true - } - if status == statusTooManyRequests { - return true - } - if err == io.ErrUnexpectedEOF { - return true - } - if err, ok := err.(interface{ Temporary() bool }); ok { - return err.Temporary() - } - return false - } // There are a couple of cases where it's possible for err and resp to both // be non-nil. However, we expose a simpler contract to our callers: exactly @@ -239,3 +226,33 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err return prepareReturn(resp, err) } } + +// shouldRetry indicates whether an error is retryable for the purposes of this +// package, following guidance from +// https://cloud.google.com/storage/docs/exponential-backoff . +func shouldRetry(status int, err error) bool { + if 500 <= status && status <= 599 { + return true + } + if status == statusTooManyRequests { + return true + } + if err == io.ErrUnexpectedEOF { + return true + } + // Transient network errors should be retried. + if syscallRetryable(err) { + return true + } + if err, ok := err.(interface{ Temporary() bool }); ok { + if err.Temporary() { + return true + } + } + // If Go 1.13 error unwrapping is available, use this to examine wrapped + // errors. + if err, ok := err.(interface{ Unwrap() error }); ok { + return shouldRetry(status, err.Unwrap()) + } + return false +} diff --git a/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go b/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go new file mode 100644 index 00000000000..fed998b5d07 --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go @@ -0,0 +1,15 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package gensupport + +import "syscall" + +func init() { + // Initialize syscallRetryable to return true on transient socket-level + // errors. These errors are specific to Linux. + syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } +} diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go index 5799393093d..3338c8d193a 100644 --- a/vendor/google.golang.org/api/internal/gensupport/send.go +++ b/vendor/google.golang.org/api/internal/gensupport/send.go @@ -9,6 +9,7 @@ import ( "encoding/json" "errors" "net/http" + "time" ) // Hook is the type of a function that is called once before each HTTP request @@ -77,6 +78,90 @@ func send(ctx context.Context, client *http.Client, req *http.Request) (*http.Re return resp, err } +// SendRequestWithRetry sends a single HTTP request using the given client, +// with retries if a retryable error is returned. +// If ctx is non-nil, it calls all hooks, then sends the request with +// req.WithContext, then calls any functions returned by the hooks in +// reverse order. +func SendRequestWithRetry(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + // Disallow Accept-Encoding because it interferes with the automatic gzip handling + // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. + if _, ok := req.Header["Accept-Encoding"]; ok { + return nil, errors.New("google api: custom Accept-Encoding headers not allowed") + } + if ctx == nil { + return client.Do(req) + } + // Call hooks in order of registration, store returned funcs. + post := make([]func(resp *http.Response), len(hooks)) + for i, h := range hooks { + fn := h(ctx, req) + post[i] = fn + } + + // Send request with retry. + resp, err := sendAndRetry(ctx, client, req) + + // Call returned funcs in reverse order. + for i := len(post) - 1; i >= 0; i-- { + if fn := post[i]; fn != nil { + fn(resp) + } + } + return resp, err +} + +func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + var resp *http.Response + var err error + + // Loop to retry the request, up to the context deadline. + var pause time.Duration + bo := backoff() + + for { + select { + case <-ctx.Done(): + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + if err == nil { + err = ctx.Err() + } + return resp, err + case <-time.After(pause): + } + + resp, err = client.Do(req.WithContext(ctx)) + + var status int + if resp != nil { + status = resp.StatusCode + } + + // Check if we can retry the request. A retry can only be done if the error + // is retryable and the request body can be re-created using GetBody (this + // will not be possible if the body was unbuffered). + if req.GetBody == nil || !shouldRetry(status, err) { + break + } + var errBody error + req.Body, errBody = req.GetBody() + if errBody != nil { + break + } + + pause = bo.Pause() + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + } + return resp, err +} + // DecodeResponse decodes the body of res into target. If there is no body, // target is unchanged. func DecodeResponse(target interface{}, res *http.Response) error { diff --git a/vendor/google.golang.org/api/internal/impersonate/impersonate.go b/vendor/google.golang.org/api/internal/impersonate/impersonate.go new file mode 100644 index 00000000000..b465bbcd12e --- /dev/null +++ b/vendor/google.golang.org/api/internal/impersonate/impersonate.go @@ -0,0 +1,128 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package impersonate is used to impersonate Google Credentials. +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "golang.org/x/oauth2" +) + +// Config for generating impersonated credentials. +type Config struct { + // Target is the service account to impersonate. Required. + Target string + // Scopes the impersonated credential should have. Required. + Scopes []string + // Delegates are the service accounts in a delegation chain. Each service + // account must be granted roles/iam.serviceAccountTokenCreator on the next + // service account in the chain. Optional. + Delegates []string +} + +// TokenSource returns an impersonated TokenSource configured with the provided +// config using ts as the base credential provider for making requests. +func TokenSource(ctx context.Context, ts oauth2.TokenSource, config *Config) (oauth2.TokenSource, error) { + if len(config.Scopes) == 0 { + return nil, fmt.Errorf("impersonate: scopes must be provided") + } + its := impersonatedTokenSource{ + ctx: ctx, + ts: ts, + name: formatIAMServiceAccountName(config.Target), + // Default to the longest acceptable value of one hour as the token will + // be refreshed automatically. + lifetime: "3600s", + } + + its.delegates = make([]string, len(config.Delegates)) + for i, v := range config.Delegates { + its.delegates[i] = formatIAMServiceAccountName(v) + } + its.scopes = make([]string, len(config.Scopes)) + copy(its.scopes, config.Scopes) + + return oauth2.ReuseTokenSource(nil, its), nil +} + +func formatIAMServiceAccountName(name string) string { + return fmt.Sprintf("projects/-/serviceAccounts/%s", name) +} + +type generateAccessTokenReq struct { + Delegates []string `json:"delegates,omitempty"` + Lifetime string `json:"lifetime,omitempty"` + Scope []string `json:"scope,omitempty"` +} + +type generateAccessTokenResp struct { + AccessToken string `json:"accessToken"` + ExpireTime string `json:"expireTime"` +} + +type impersonatedTokenSource struct { + ctx context.Context + ts oauth2.TokenSource + + name string + lifetime string + scopes []string + delegates []string +} + +// Token returns an impersonated Token. +func (i impersonatedTokenSource) Token() (*oauth2.Token, error) { + hc := oauth2.NewClient(i.ctx, i.ts) + reqBody := generateAccessTokenReq{ + Delegates: i.delegates, + Lifetime: i.lifetime, + Scope: i.scopes, + } + b, err := json.Marshal(reqBody) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to marshal request: %v", err) + } + url := fmt.Sprintf("https://iamcredentials.googleapis.com/v1/%s:generateAccessToken", i.name) + req, err := http.NewRequest("POST", url, bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to create request: %v", err) + } + req = req.WithContext(i.ctx) + req.Header.Set("Content-Type", "application/json") + + resp, err := hc.Do(req) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to generate access token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to read body: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) + } + + var accessTokenResp generateAccessTokenResp + if err := json.Unmarshal(body, &accessTokenResp); err != nil { + return nil, fmt.Errorf("impersonate: unable to parse response: %v", err) + } + expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to parse expiry: %v", err) + } + return &oauth2.Token{ + AccessToken: accessTokenResp.AccessToken, + Expiry: expiry, + }, nil +} diff --git a/vendor/google.golang.org/api/internal/service-account.json b/vendor/google.golang.org/api/internal/service-account.json deleted file mode 100644 index 6b36a92961e..00000000000 --- a/vendor/google.golang.org/api/internal/service-account.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "type": "service_account", - "project_id": "project_id", - "private_key_id": "private_key_id", - "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCzd9ZdbPLAR4/g\nj+Rodu15kEasMpxf/Mz+gKRb2fmgR2Y18Y/iRBYZ4SkmF2pBSfzvwE/aTCzSPBGl\njHhPzohXnSN029eWoItmxVONlqCbR29pD07aLzv08LGeIGdHIEdhVjhvRwTkYZIF\ndXmlHNDRUU/EbJN9D+3ahw22BNnC4PaDgfIWTs3xIlTCSf2rL39I4DSNLTS/LzxK\n/XrQfBMtfwMWwyQaemXbc7gRgzOy8L56wa1W1zyXx99th97j1bLnoAXBGplhB4Co\n25ohyDAuhxRm+XGMEaO0Mzo7u97kvhj48a569RH1QRhOf7EBf60jO4h5eOmfi5P5\nPV3l7041AgMBAAECggEAEZ0RTNoEeRqM5F067YW+iM/AH+ZXspP9Cn1VpC4gcbqQ\nLXsnw+0qvh97CmIB66Z3TJBzRdl0DK4YjUbcB/kdKHwjnrR01DOtesijCqJd4N+B\n762w73jzSXbV9872U+S3HLZ5k3JE6KUqz55X8fyCAgkY6w4862lEzs2yasrPFHEV\nRoQp3PM0Miif8R3hGDhOWcHxcobullthG6JHAQFfc1ctwEjZI4TK0iWqlzfWGyKN\nT9UgvjUDud5cGvS9el0AiLN6keAf77tcPn1zetUVhxN1KN4bVAm1Q+6O8esl63Rj\n7JXpHzxaRnit9S6/aH/twHsGGtLg5Puw6jey6xs4AQKBgQD2JNy1wzewCRkD+jug\n8CHbJ+LIJVRNIaWa/RK1QD8/UjmFPkIzRQSF3AKC5mRAWSa2FL3yVK3N/DD7hazW\n85XSBB7IDcnoJnA9SkUeWwqQGkDx3EntlU3gX8Kn/+ofF8O9jLXxAa901MAVXVuf\n5YDzrl4PNE3bFnPCdiNmSdRfhQKBgQC6p4DsCpwqbeTu9f5ak9VW/fQP47Fgt+Mf\nwGjBnKP5PbbNJpHCfamF7jqSRH83Xy0KNssH7jD/NZ2oT594sMmiQPUC5ni9VYY6\nsuYB0JbD5Mq+EjKIVhYtxaQJ76LzHreEI+G4z6k3H7/hRpr3/C48n9G/uVkT9DbJ\noplxxEx68QKBgQCdJ23vcwO0Firtmi/GEmtbVHz70rGfSXNFoHz4UlvPXv0wsE5u\nE4vOt2i3EMhDOWh46odYGG6bzH+tp2xyFTW70Dui+QLHgPs6dpfoyLHWzZxXj5F3\n6lK9hgZvYvqk/XRRKmzjwnK2wjsdqOyeC1covlR5mqh20D/6kZkKbur0TQKBgAwy\nCZBimRWEnKKoW/gbFKNccGfhXqONID/g2Hdd/rC4QYth68AjacIgcJ9B7nX1uAGk\n1tsryvPB0w0+NpMyKdp6GAgaeuUUA3MuYSzZLiCagEyu77JMvaI7+Z3UlHcCGMd/\neK4Uk1/QqT7U2Cc/yN2ZK6E1QQa2vCWshA4U31JhAoGAbtbSSSsul1c+PsJ13Cfk\n6qVnqYzPqt23QTyOZmGAvUHH/M4xRiQpOE0cDF4t/r5PwenAQPQzTvMmWRzj6uAY\n3eaU0eAK7ZfoweCoOIAPnpFbbRLrXfoY46H7MYh7euWGXOKEpxz5yzuEkd9ByNUE\n86vSEidqbMIiXVgEgnu/k08=\n-----END PRIVATE KEY-----\n", - "client_email": "xyz@developer.gserviceaccount.com", - "client_id": "123", - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://accounts.google.com/o/oauth2/token", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/xyz%40developer.gserviceaccount.com" -} diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 0d8210baa7b..26259b82abb 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -12,31 +12,35 @@ import ( "golang.org/x/oauth2" "golang.org/x/oauth2/google" + "google.golang.org/api/internal/impersonate" "google.golang.org/grpc" ) // DialSettings holds information needed to establish a connection with a // Google API service. type DialSettings struct { - Endpoint string - DefaultEndpoint string - Scopes []string - TokenSource oauth2.TokenSource - Credentials *google.Credentials - CredentialsFile string // if set, Token Source is ignored. - CredentialsJSON []byte - UserAgent string - APIKey string - Audiences []string - HTTPClient *http.Client - GRPCDialOpts []grpc.DialOption - GRPCConn *grpc.ClientConn - GRPCConnPool ConnPool - GRPCConnPoolSize int - NoAuth bool - TelemetryDisabled bool - ClientCertSource func(*tls.CertificateRequestInfo) (*tls.Certificate, error) - CustomClaims map[string]interface{} + Endpoint string + DefaultEndpoint string + DefaultMTLSEndpoint string + Scopes []string + TokenSource oauth2.TokenSource + Credentials *google.Credentials + CredentialsFile string // if set, Token Source is ignored. + CredentialsJSON []byte + UserAgent string + APIKey string + Audiences []string + HTTPClient *http.Client + GRPCDialOpts []grpc.DialOption + GRPCConn *grpc.ClientConn + GRPCConnPool ConnPool + GRPCConnPoolSize int + NoAuth bool + TelemetryDisabled bool + ClientCertSource func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + CustomClaims map[string]interface{} + SkipValidation bool + ImpersonationConfig *impersonate.Config // Google API system parameters. For more information please read: // https://cloud.google.com/apis/docs/system-parameters @@ -46,6 +50,9 @@ type DialSettings struct { // Validate reports an error if ds is invalid. func (ds *DialSettings) Validate() error { + if ds.SkipValidation { + return nil + } hasCreds := ds.APIKey != "" || ds.TokenSource != nil || ds.CredentialsFile != "" || ds.Credentials != nil if ds.NoAuth && hasCreds { return errors.New("options.WithoutAuthentication is incompatible with any option that provides credentials") @@ -100,6 +107,8 @@ func (ds *DialSettings) Validate() error { if ds.ClientCertSource != nil && (ds.GRPCConn != nil || ds.GRPCConnPool != nil || ds.GRPCConnPoolSize != 0 || ds.GRPCDialOpts != nil) { return errors.New("WithClientCertSource is currently only supported for HTTP. gRPC settings are incompatible") } - + if ds.ImpersonationConfig != nil && len(ds.ImpersonationConfig.Scopes) == 0 && len(ds.Scopes) == 0 { + return errors.New("WithImpersonatedCredentials requires scopes being provided") + } return nil } diff --git a/vendor/google.golang.org/api/logging/v2/logging-api.json b/vendor/google.golang.org/api/logging/v2/logging-api.json index cf9452bea21..1240dfb3805 100644 --- a/vendor/google.golang.org/api/logging/v2/logging-api.json +++ b/vendor/google.golang.org/api/logging/v2/logging-api.json @@ -131,7 +131,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the bucket:\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\nExample: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", + "description": "Required. The resource name of the bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", "location": "path", "pattern": "^billingAccounts/[^/]+/buckets/[^/]+$", "required": true, @@ -163,7 +163,7 @@ ], "parameters": { "parent": { - "description": "Required. The parent resource in which to create the exclusion:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + "description": "Required. The parent resource in which to create the exclusion: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" Examples: \"projects/my-logging-project\", \"organizations/123456789\".", "location": "path", "pattern": "^billingAccounts/[^/]+$", "required": true, @@ -192,7 +192,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of an existing exclusion to delete:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + "description": "Required. The resource name of an existing exclusion to delete: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", "location": "path", "pattern": "^billingAccounts/[^/]+/exclusions/[^/]+$", "required": true, @@ -218,7 +218,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of an existing exclusion:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + "description": "Required. The resource name of an existing exclusion: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", "location": "path", "pattern": "^billingAccounts/[^/]+/exclusions/[^/]+$", "required": true, @@ -257,7 +257,7 @@ "type": "string" }, "parent": { - "description": "Required. The parent resource whose exclusions are to be listed.\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "description": "Required. The parent resource whose exclusions are to be listed. \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", "location": "path", "pattern": "^billingAccounts/[^/]+$", "required": true, @@ -285,7 +285,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the exclusion to update:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + "description": "Required. The resource name of the exclusion to update: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", "location": "path", "pattern": "^billingAccounts/[^/]+/exclusions/[^/]+$", "required": true, @@ -316,6 +316,66 @@ "resources": { "buckets": { "methods": { + "create": { + "description": "Creates a bucket that can be used to store log entries. Once a bucket has been created, the region cannot be changed.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets", + "httpMethod": "POST", + "id": "logging.billingAccounts.locations.buckets.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "bucketId": { + "description": "Required. A client-assigned identifier such as \"my-bucket\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The resource in which to create the bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" Example: \"projects/my-logging-project/locations/global\"", + "location": "path", + "pattern": "^billingAccounts/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/buckets", + "request": { + "$ref": "LogBucket" + }, + "response": { + "$ref": "LogBucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "delete": { + "description": "Deletes a bucket. Moves the bucket to the DELETE_REQUESTED state. After 7 days, the bucket will be purged and all logs in the bucket will be permanently deleted.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets/{bucketsId}", + "httpMethod": "DELETE", + "id": "logging.billingAccounts.locations.buckets.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The full resource name of the bucket to delete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", + "location": "path", + "pattern": "^billingAccounts/[^/]+/locations/[^/]+/buckets/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, "list": { "description": "Lists buckets (Beta).", "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets", @@ -337,7 +397,7 @@ "type": "string" }, "parent": { - "description": "Required. The parent resource whose buckets are to be listed:\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]\"\nNote: The locations portion of the resource must be specified, but supplying the character - in place of LOCATION_ID will return all buckets.", + "description": "Required. The parent resource whose buckets are to be listed: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]\" Note: The locations portion of the resource must be specified, but supplying the character - in place of LOCATION_ID will return all buckets.", "location": "path", "pattern": "^billingAccounts/[^/]+/locations/[^/]+$", "required": true, @@ -365,7 +425,7 @@ ], "parameters": { "name": { - "description": "Required. The full resource name of the bucket to update.\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\nExample: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\". Also requires permission \"resourcemanager.projects.updateLiens\" to set the locked property", + "description": "Required. The full resource name of the bucket to update. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\". Also requires permission \"resourcemanager.projects.updateLiens\" to set the locked property", "location": "path", "pattern": "^billingAccounts/[^/]+/locations/[^/]+/buckets/[^/]+$", "required": true, @@ -389,6 +449,35 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/logging.admin" ] + }, + "undelete": { + "description": "Undeletes a bucket. A bucket that has been deleted may be undeleted within the grace period of 7 days.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets/{bucketsId}:undelete", + "httpMethod": "POST", + "id": "logging.billingAccounts.locations.buckets.undelete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The full resource name of the bucket to undelete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", + "location": "path", + "pattern": "^billingAccounts/[^/]+/locations/[^/]+/buckets/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}:undelete", + "request": { + "$ref": "UndeleteBucketRequest" + }, + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] } } } @@ -406,7 +495,7 @@ ], "parameters": { "logName": { - "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + "description": "Required. The resource name of the log to delete: \"projects/[PROJECT_ID]/logs/[LOG_ID]\" \"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\" \"folders/[FOLDER_ID]/logs/[LOG_ID]\" [LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", "location": "path", "pattern": "^billingAccounts/[^/]+/logs/[^/]+$", "required": true, @@ -443,7 +532,7 @@ "type": "string" }, "parent": { - "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "description": "Required. The resource name that owns the logs: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", "location": "path", "pattern": "^billingAccounts/[^/]+$", "required": true, @@ -475,7 +564,7 @@ ], "parameters": { "parent": { - "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + "description": "Required. The resource in which to create the sink: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" Examples: \"projects/my-logging-project\", \"organizations/123456789\".", "location": "path", "pattern": "^billingAccounts/[^/]+$", "required": true, @@ -509,7 +598,7 @@ ], "parameters": { "sinkName": { - "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", "location": "path", "pattern": "^billingAccounts/[^/]+/sinks/[^/]+$", "required": true, @@ -535,7 +624,7 @@ ], "parameters": { "sinkName": { - "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "description": "Required. The resource name of the sink: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", "location": "path", "pattern": "^billingAccounts/[^/]+/sinks/[^/]+$", "required": true, @@ -574,7 +663,7 @@ "type": "string" }, "parent": { - "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "description": "Required. The parent resource whose sinks are to be listed: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", "location": "path", "pattern": "^billingAccounts/[^/]+$", "required": true, @@ -602,19 +691,19 @@ ], "parameters": { "sinkName": { - "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", "location": "path", "pattern": "^billingAccounts/[^/]+/sinks/[^/]+$", "required": true, "type": "string" }, "uniqueWriterIdentity": { - "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is set to false or defaulted to false.", + "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", "location": "query", "type": "boolean" }, "updateMask": { - "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", + "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -642,19 +731,19 @@ ], "parameters": { "sinkName": { - "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", "location": "path", "pattern": "^billingAccounts/[^/]+/sinks/[^/]+$", "required": true, "type": "string" }, "uniqueWriterIdentity": { - "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is set to false or defaulted to false.", + "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", "location": "query", "type": "boolean" }, "updateMask": { - "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", + "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -733,7 +822,7 @@ ], "parameters": { "parent": { - "description": "Required. The parent resource in which to create the exclusion:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + "description": "Required. The parent resource in which to create the exclusion: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" Examples: \"projects/my-logging-project\", \"organizations/123456789\".", "location": "path", "pattern": "^[^/]+/[^/]+$", "required": true, @@ -762,7 +851,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of an existing exclusion to delete:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + "description": "Required. The resource name of an existing exclusion to delete: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", "location": "path", "pattern": "^[^/]+/[^/]+/exclusions/[^/]+$", "required": true, @@ -788,7 +877,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of an existing exclusion:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + "description": "Required. The resource name of an existing exclusion: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", "location": "path", "pattern": "^[^/]+/[^/]+/exclusions/[^/]+$", "required": true, @@ -827,7 +916,7 @@ "type": "string" }, "parent": { - "description": "Required. The parent resource whose exclusions are to be listed.\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "description": "Required. The parent resource whose exclusions are to be listed. \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", "location": "path", "pattern": "^[^/]+/[^/]+$", "required": true, @@ -855,7 +944,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the exclusion to update:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + "description": "Required. The resource name of the exclusion to update: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", "location": "path", "pattern": "^[^/]+/[^/]+/exclusions/[^/]+$", "required": true, @@ -896,7 +985,7 @@ ], "parameters": { "parent": { - "description": "Required. The parent resource in which to create the exclusion:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + "description": "Required. The parent resource in which to create the exclusion: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" Examples: \"projects/my-logging-project\", \"organizations/123456789\".", "location": "path", "pattern": "^folders/[^/]+$", "required": true, @@ -925,7 +1014,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of an existing exclusion to delete:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + "description": "Required. The resource name of an existing exclusion to delete: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", "location": "path", "pattern": "^folders/[^/]+/exclusions/[^/]+$", "required": true, @@ -951,7 +1040,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of an existing exclusion:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + "description": "Required. The resource name of an existing exclusion: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", "location": "path", "pattern": "^folders/[^/]+/exclusions/[^/]+$", "required": true, @@ -990,7 +1079,7 @@ "type": "string" }, "parent": { - "description": "Required. The parent resource whose exclusions are to be listed.\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "description": "Required. The parent resource whose exclusions are to be listed. \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", "location": "path", "pattern": "^folders/[^/]+$", "required": true, @@ -1018,7 +1107,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the exclusion to update:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + "description": "Required. The resource name of the exclusion to update: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", "location": "path", "pattern": "^folders/[^/]+/exclusions/[^/]+$", "required": true, @@ -1049,6 +1138,66 @@ "resources": { "buckets": { "methods": { + "create": { + "description": "Creates a bucket that can be used to store log entries. Once a bucket has been created, the region cannot be changed.", + "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets", + "httpMethod": "POST", + "id": "logging.folders.locations.buckets.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "bucketId": { + "description": "Required. A client-assigned identifier such as \"my-bucket\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The resource in which to create the bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" Example: \"projects/my-logging-project/locations/global\"", + "location": "path", + "pattern": "^folders/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/buckets", + "request": { + "$ref": "LogBucket" + }, + "response": { + "$ref": "LogBucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "delete": { + "description": "Deletes a bucket. Moves the bucket to the DELETE_REQUESTED state. After 7 days, the bucket will be purged and all logs in the bucket will be permanently deleted.", + "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets/{bucketsId}", + "httpMethod": "DELETE", + "id": "logging.folders.locations.buckets.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The full resource name of the bucket to delete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", + "location": "path", + "pattern": "^folders/[^/]+/locations/[^/]+/buckets/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, "get": { "description": "Gets a bucket (Beta).", "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets/{bucketsId}", @@ -1059,7 +1208,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the bucket:\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\nExample: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", + "description": "Required. The resource name of the bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", "location": "path", "pattern": "^folders/[^/]+/locations/[^/]+/buckets/[^/]+$", "required": true, @@ -1098,7 +1247,7 @@ "type": "string" }, "parent": { - "description": "Required. The parent resource whose buckets are to be listed:\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]\"\nNote: The locations portion of the resource must be specified, but supplying the character - in place of LOCATION_ID will return all buckets.", + "description": "Required. The parent resource whose buckets are to be listed: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]\" Note: The locations portion of the resource must be specified, but supplying the character - in place of LOCATION_ID will return all buckets.", "location": "path", "pattern": "^folders/[^/]+/locations/[^/]+$", "required": true, @@ -1126,7 +1275,7 @@ ], "parameters": { "name": { - "description": "Required. The full resource name of the bucket to update.\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\nExample: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\". Also requires permission \"resourcemanager.projects.updateLiens\" to set the locked property", + "description": "Required. The full resource name of the bucket to update. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\". Also requires permission \"resourcemanager.projects.updateLiens\" to set the locked property", "location": "path", "pattern": "^folders/[^/]+/locations/[^/]+/buckets/[^/]+$", "required": true, @@ -1150,6 +1299,35 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/logging.admin" ] + }, + "undelete": { + "description": "Undeletes a bucket. A bucket that has been deleted may be undeleted within the grace period of 7 days.", + "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets/{bucketsId}:undelete", + "httpMethod": "POST", + "id": "logging.folders.locations.buckets.undelete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The full resource name of the bucket to undelete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", + "location": "path", + "pattern": "^folders/[^/]+/locations/[^/]+/buckets/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}:undelete", + "request": { + "$ref": "UndeleteBucketRequest" + }, + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] } } } @@ -1167,7 +1345,7 @@ ], "parameters": { "logName": { - "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + "description": "Required. The resource name of the log to delete: \"projects/[PROJECT_ID]/logs/[LOG_ID]\" \"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\" \"folders/[FOLDER_ID]/logs/[LOG_ID]\" [LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", "location": "path", "pattern": "^folders/[^/]+/logs/[^/]+$", "required": true, @@ -1204,7 +1382,7 @@ "type": "string" }, "parent": { - "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "description": "Required. The resource name that owns the logs: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", "location": "path", "pattern": "^folders/[^/]+$", "required": true, @@ -1236,7 +1414,7 @@ ], "parameters": { "parent": { - "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + "description": "Required. The resource in which to create the sink: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" Examples: \"projects/my-logging-project\", \"organizations/123456789\".", "location": "path", "pattern": "^folders/[^/]+$", "required": true, @@ -1270,7 +1448,7 @@ ], "parameters": { "sinkName": { - "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", "location": "path", "pattern": "^folders/[^/]+/sinks/[^/]+$", "required": true, @@ -1296,7 +1474,7 @@ ], "parameters": { "sinkName": { - "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "description": "Required. The resource name of the sink: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", "location": "path", "pattern": "^folders/[^/]+/sinks/[^/]+$", "required": true, @@ -1335,7 +1513,7 @@ "type": "string" }, "parent": { - "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "description": "Required. The parent resource whose sinks are to be listed: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", "location": "path", "pattern": "^folders/[^/]+$", "required": true, @@ -1363,19 +1541,19 @@ ], "parameters": { "sinkName": { - "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", "location": "path", "pattern": "^folders/[^/]+/sinks/[^/]+$", "required": true, "type": "string" }, "uniqueWriterIdentity": { - "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is set to false or defaulted to false.", + "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", "location": "query", "type": "boolean" }, "updateMask": { - "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", + "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -1403,19 +1581,19 @@ ], "parameters": { "sinkName": { - "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", "location": "path", "pattern": "^folders/[^/]+/sinks/[^/]+$", "required": true, "type": "string" }, "uniqueWriterIdentity": { - "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is set to false or defaulted to false.", + "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", "location": "query", "type": "boolean" }, "updateMask": { - "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", + "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -1441,6 +1619,66 @@ "resources": { "buckets": { "methods": { + "create": { + "description": "Creates a bucket that can be used to store log entries. Once a bucket has been created, the region cannot be changed.", + "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/buckets", + "httpMethod": "POST", + "id": "logging.locations.buckets.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "bucketId": { + "description": "Required. A client-assigned identifier such as \"my-bucket\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The resource in which to create the bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" Example: \"projects/my-logging-project/locations/global\"", + "location": "path", + "pattern": "^[^/]+/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/buckets", + "request": { + "$ref": "LogBucket" + }, + "response": { + "$ref": "LogBucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "delete": { + "description": "Deletes a bucket. Moves the bucket to the DELETE_REQUESTED state. After 7 days, the bucket will be purged and all logs in the bucket will be permanently deleted.", + "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/buckets/{bucketsId}", + "httpMethod": "DELETE", + "id": "logging.locations.buckets.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The full resource name of the bucket to delete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", + "location": "path", + "pattern": "^[^/]+/[^/]+/locations/[^/]+/buckets/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, "get": { "description": "Gets a bucket (Beta).", "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/buckets/{bucketsId}", @@ -1451,7 +1689,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the bucket:\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\nExample: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", + "description": "Required. The resource name of the bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", "location": "path", "pattern": "^[^/]+/[^/]+/locations/[^/]+/buckets/[^/]+$", "required": true, @@ -1490,7 +1728,7 @@ "type": "string" }, "parent": { - "description": "Required. The parent resource whose buckets are to be listed:\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]\"\nNote: The locations portion of the resource must be specified, but supplying the character - in place of LOCATION_ID will return all buckets.", + "description": "Required. The parent resource whose buckets are to be listed: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]\" Note: The locations portion of the resource must be specified, but supplying the character - in place of LOCATION_ID will return all buckets.", "location": "path", "pattern": "^[^/]+/[^/]+/locations/[^/]+$", "required": true, @@ -1518,7 +1756,7 @@ ], "parameters": { "name": { - "description": "Required. The full resource name of the bucket to update.\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\nExample: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\". Also requires permission \"resourcemanager.projects.updateLiens\" to set the locked property", + "description": "Required. The full resource name of the bucket to update. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\". Also requires permission \"resourcemanager.projects.updateLiens\" to set the locked property", "location": "path", "pattern": "^[^/]+/[^/]+/locations/[^/]+/buckets/[^/]+$", "required": true, @@ -1542,6 +1780,35 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/logging.admin" ] + }, + "undelete": { + "description": "Undeletes a bucket. A bucket that has been deleted may be undeleted within the grace period of 7 days.", + "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/buckets/{bucketsId}:undelete", + "httpMethod": "POST", + "id": "logging.locations.buckets.undelete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The full resource name of the bucket to undelete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", + "location": "path", + "pattern": "^[^/]+/[^/]+/locations/[^/]+/buckets/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}:undelete", + "request": { + "$ref": "UndeleteBucketRequest" + }, + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] } } } @@ -1559,7 +1826,7 @@ ], "parameters": { "logName": { - "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + "description": "Required. The resource name of the log to delete: \"projects/[PROJECT_ID]/logs/[LOG_ID]\" \"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\" \"folders/[FOLDER_ID]/logs/[LOG_ID]\" [LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", "location": "path", "pattern": "^[^/]+/[^/]+/logs/[^/]+$", "required": true, @@ -1596,7 +1863,7 @@ "type": "string" }, "parent": { - "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "description": "Required. The resource name that owns the logs: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", "location": "path", "pattern": "^[^/]+/[^/]+$", "required": true, @@ -1662,7 +1929,7 @@ ], "parameters": { "name": { - "description": "Required. The resource for which to retrieve CMEK settings.\n\"projects/[PROJECT_ID]/cmekSettings\"\n\"organizations/[ORGANIZATION_ID]/cmekSettings\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\"\n\"folders/[FOLDER_ID]/cmekSettings\"\nExample: \"organizations/12345/cmekSettings\".Note: CMEK for the Logs Router can currently only be configured for GCP organizations. Once configured, it applies to all projects and folders in the GCP organization.", + "description": "Required. The resource for which to retrieve CMEK settings. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" Example: \"organizations/12345/cmekSettings\".Note: CMEK for the Logs Router can currently only be configured for GCP organizations. Once configured, it applies to all projects and folders in the GCP organization.", "location": "path", "pattern": "^organizations/[^/]+$", "required": true, @@ -1690,7 +1957,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name for the CMEK settings to update.\n\"projects/[PROJECT_ID]/cmekSettings\"\n\"organizations/[ORGANIZATION_ID]/cmekSettings\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\"\n\"folders/[FOLDER_ID]/cmekSettings\"\nExample: \"organizations/12345/cmekSettings\".Note: CMEK for the Logs Router can currently only be configured for GCP organizations. Once configured, it applies to all projects and folders in the GCP organization.", + "description": "Required. The resource name for the CMEK settings to update. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" Example: \"organizations/12345/cmekSettings\".Note: CMEK for the Logs Router can currently only be configured for GCP organizations. Once configured, it applies to all projects and folders in the GCP organization.", "location": "path", "pattern": "^organizations/[^/]+$", "required": true, @@ -1729,7 +1996,7 @@ ], "parameters": { "parent": { - "description": "Required. The parent resource in which to create the exclusion:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + "description": "Required. The parent resource in which to create the exclusion: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" Examples: \"projects/my-logging-project\", \"organizations/123456789\".", "location": "path", "pattern": "^organizations/[^/]+$", "required": true, @@ -1758,7 +2025,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of an existing exclusion to delete:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + "description": "Required. The resource name of an existing exclusion to delete: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", "location": "path", "pattern": "^organizations/[^/]+/exclusions/[^/]+$", "required": true, @@ -1784,7 +2051,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of an existing exclusion:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + "description": "Required. The resource name of an existing exclusion: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", "location": "path", "pattern": "^organizations/[^/]+/exclusions/[^/]+$", "required": true, @@ -1823,7 +2090,7 @@ "type": "string" }, "parent": { - "description": "Required. The parent resource whose exclusions are to be listed.\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "description": "Required. The parent resource whose exclusions are to be listed. \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", "location": "path", "pattern": "^organizations/[^/]+$", "required": true, @@ -1851,7 +2118,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the exclusion to update:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + "description": "Required. The resource name of the exclusion to update: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", "location": "path", "pattern": "^organizations/[^/]+/exclusions/[^/]+$", "required": true, @@ -1882,6 +2149,66 @@ "resources": { "buckets": { "methods": { + "create": { + "description": "Creates a bucket that can be used to store log entries. Once a bucket has been created, the region cannot be changed.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets", + "httpMethod": "POST", + "id": "logging.organizations.locations.buckets.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "bucketId": { + "description": "Required. A client-assigned identifier such as \"my-bucket\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The resource in which to create the bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" Example: \"projects/my-logging-project/locations/global\"", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/buckets", + "request": { + "$ref": "LogBucket" + }, + "response": { + "$ref": "LogBucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "delete": { + "description": "Deletes a bucket. Moves the bucket to the DELETE_REQUESTED state. After 7 days, the bucket will be purged and all logs in the bucket will be permanently deleted.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets/{bucketsId}", + "httpMethod": "DELETE", + "id": "logging.organizations.locations.buckets.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The full resource name of the bucket to delete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+/buckets/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, "get": { "description": "Gets a bucket (Beta).", "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets/{bucketsId}", @@ -1892,7 +2219,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the bucket:\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\nExample: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", + "description": "Required. The resource name of the bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", "location": "path", "pattern": "^organizations/[^/]+/locations/[^/]+/buckets/[^/]+$", "required": true, @@ -1931,7 +2258,7 @@ "type": "string" }, "parent": { - "description": "Required. The parent resource whose buckets are to be listed:\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]\"\nNote: The locations portion of the resource must be specified, but supplying the character - in place of LOCATION_ID will return all buckets.", + "description": "Required. The parent resource whose buckets are to be listed: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]\" Note: The locations portion of the resource must be specified, but supplying the character - in place of LOCATION_ID will return all buckets.", "location": "path", "pattern": "^organizations/[^/]+/locations/[^/]+$", "required": true, @@ -1959,7 +2286,7 @@ ], "parameters": { "name": { - "description": "Required. The full resource name of the bucket to update.\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\nExample: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\". Also requires permission \"resourcemanager.projects.updateLiens\" to set the locked property", + "description": "Required. The full resource name of the bucket to update. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\". Also requires permission \"resourcemanager.projects.updateLiens\" to set the locked property", "location": "path", "pattern": "^organizations/[^/]+/locations/[^/]+/buckets/[^/]+$", "required": true, @@ -1983,6 +2310,35 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/logging.admin" ] + }, + "undelete": { + "description": "Undeletes a bucket. A bucket that has been deleted may be undeleted within the grace period of 7 days.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets/{bucketsId}:undelete", + "httpMethod": "POST", + "id": "logging.organizations.locations.buckets.undelete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The full resource name of the bucket to undelete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+/buckets/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}:undelete", + "request": { + "$ref": "UndeleteBucketRequest" + }, + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] } } } @@ -2000,7 +2356,7 @@ ], "parameters": { "logName": { - "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + "description": "Required. The resource name of the log to delete: \"projects/[PROJECT_ID]/logs/[LOG_ID]\" \"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\" \"folders/[FOLDER_ID]/logs/[LOG_ID]\" [LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", "location": "path", "pattern": "^organizations/[^/]+/logs/[^/]+$", "required": true, @@ -2037,7 +2393,7 @@ "type": "string" }, "parent": { - "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "description": "Required. The resource name that owns the logs: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", "location": "path", "pattern": "^organizations/[^/]+$", "required": true, @@ -2069,7 +2425,7 @@ ], "parameters": { "parent": { - "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + "description": "Required. The resource in which to create the sink: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" Examples: \"projects/my-logging-project\", \"organizations/123456789\".", "location": "path", "pattern": "^organizations/[^/]+$", "required": true, @@ -2103,7 +2459,7 @@ ], "parameters": { "sinkName": { - "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", "location": "path", "pattern": "^organizations/[^/]+/sinks/[^/]+$", "required": true, @@ -2129,7 +2485,7 @@ ], "parameters": { "sinkName": { - "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "description": "Required. The resource name of the sink: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", "location": "path", "pattern": "^organizations/[^/]+/sinks/[^/]+$", "required": true, @@ -2168,7 +2524,7 @@ "type": "string" }, "parent": { - "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "description": "Required. The parent resource whose sinks are to be listed: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", "location": "path", "pattern": "^organizations/[^/]+$", "required": true, @@ -2196,19 +2552,19 @@ ], "parameters": { "sinkName": { - "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", "location": "path", "pattern": "^organizations/[^/]+/sinks/[^/]+$", "required": true, "type": "string" }, "uniqueWriterIdentity": { - "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is set to false or defaulted to false.", + "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", "location": "query", "type": "boolean" }, "updateMask": { - "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", + "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -2236,19 +2592,19 @@ ], "parameters": { "sinkName": { - "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", "location": "path", "pattern": "^organizations/[^/]+/sinks/[^/]+$", "required": true, "type": "string" }, "uniqueWriterIdentity": { - "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is set to false or defaulted to false.", + "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", "location": "query", "type": "boolean" }, "updateMask": { - "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", + "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -2284,7 +2640,7 @@ ], "parameters": { "parent": { - "description": "Required. The parent resource in which to create the exclusion:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + "description": "Required. The parent resource in which to create the exclusion: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" Examples: \"projects/my-logging-project\", \"organizations/123456789\".", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -2313,7 +2669,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of an existing exclusion to delete:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + "description": "Required. The resource name of an existing exclusion to delete: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", "location": "path", "pattern": "^projects/[^/]+/exclusions/[^/]+$", "required": true, @@ -2339,7 +2695,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of an existing exclusion:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + "description": "Required. The resource name of an existing exclusion: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", "location": "path", "pattern": "^projects/[^/]+/exclusions/[^/]+$", "required": true, @@ -2378,7 +2734,7 @@ "type": "string" }, "parent": { - "description": "Required. The parent resource whose exclusions are to be listed.\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "description": "Required. The parent resource whose exclusions are to be listed. \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -2406,7 +2762,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the exclusion to update:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + "description": "Required. The resource name of the exclusion to update: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", "location": "path", "pattern": "^projects/[^/]+/exclusions/[^/]+$", "required": true, @@ -2437,6 +2793,66 @@ "resources": { "buckets": { "methods": { + "create": { + "description": "Creates a bucket that can be used to store log entries. Once a bucket has been created, the region cannot be changed.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/buckets", + "httpMethod": "POST", + "id": "logging.projects.locations.buckets.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "bucketId": { + "description": "Required. A client-assigned identifier such as \"my-bucket\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The resource in which to create the bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" Example: \"projects/my-logging-project/locations/global\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/buckets", + "request": { + "$ref": "LogBucket" + }, + "response": { + "$ref": "LogBucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "delete": { + "description": "Deletes a bucket. Moves the bucket to the DELETE_REQUESTED state. After 7 days, the bucket will be purged and all logs in the bucket will be permanently deleted.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}", + "httpMethod": "DELETE", + "id": "logging.projects.locations.buckets.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The full resource name of the bucket to delete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, "get": { "description": "Gets a bucket (Beta).", "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}", @@ -2447,7 +2863,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name of the bucket:\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\nExample: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", + "description": "Required. The resource name of the bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+$", "required": true, @@ -2486,7 +2902,7 @@ "type": "string" }, "parent": { - "description": "Required. The parent resource whose buckets are to be listed:\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]\"\nNote: The locations portion of the resource must be specified, but supplying the character - in place of LOCATION_ID will return all buckets.", + "description": "Required. The parent resource whose buckets are to be listed: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]\" Note: The locations portion of the resource must be specified, but supplying the character - in place of LOCATION_ID will return all buckets.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -2514,7 +2930,7 @@ ], "parameters": { "name": { - "description": "Required. The full resource name of the bucket to update.\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\nExample: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\". Also requires permission \"resourcemanager.projects.updateLiens\" to set the locked property", + "description": "Required. The full resource name of the bucket to update. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\". Also requires permission \"resourcemanager.projects.updateLiens\" to set the locked property", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+$", "required": true, @@ -2538,6 +2954,35 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/logging.admin" ] + }, + "undelete": { + "description": "Undeletes a bucket. A bucket that has been deleted may be undeleted within the grace period of 7 days.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}:undelete", + "httpMethod": "POST", + "id": "logging.projects.locations.buckets.undelete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The full resource name of the bucket to undelete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}:undelete", + "request": { + "$ref": "UndeleteBucketRequest" + }, + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] } } } @@ -2555,7 +3000,7 @@ ], "parameters": { "logName": { - "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + "description": "Required. The resource name of the log to delete: \"projects/[PROJECT_ID]/logs/[LOG_ID]\" \"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\" \"folders/[FOLDER_ID]/logs/[LOG_ID]\" [LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", "location": "path", "pattern": "^projects/[^/]+/logs/[^/]+$", "required": true, @@ -2592,7 +3037,7 @@ "type": "string" }, "parent": { - "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "description": "Required. The resource name that owns the logs: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -2624,7 +3069,7 @@ ], "parameters": { "parent": { - "description": "Required. The resource name of the project in which to create the metric:\n\"projects/[PROJECT_ID]\"\nThe new metric must be provided in the request.", + "description": "Required. The resource name of the project in which to create the metric: \"projects/[PROJECT_ID]\" The new metric must be provided in the request.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -2654,7 +3099,7 @@ ], "parameters": { "metricName": { - "description": "Required. The resource name of the metric to delete:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\n", + "description": "Required. The resource name of the metric to delete: \"projects/[PROJECT_ID]/metrics/[METRIC_ID]\" ", "location": "path", "pattern": "^projects/[^/]+/metrics/[^/]+$", "required": true, @@ -2681,7 +3126,7 @@ ], "parameters": { "metricName": { - "description": "Required. The resource name of the desired metric:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\n", + "description": "Required. The resource name of the desired metric: \"projects/[PROJECT_ID]/metrics/[METRIC_ID]\" ", "location": "path", "pattern": "^projects/[^/]+/metrics/[^/]+$", "required": true, @@ -2720,7 +3165,7 @@ "type": "string" }, "parent": { - "description": "Required. The name of the project containing the metrics:\n\"projects/[PROJECT_ID]\"\n", + "description": "Required. The name of the project containing the metrics: \"projects/[PROJECT_ID]\" ", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -2748,7 +3193,7 @@ ], "parameters": { "metricName": { - "description": "Required. The resource name of the metric to update:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\nThe updated metric must be provided in the request and it's name field must be the same as [METRIC_ID] If the metric does not exist in [PROJECT_ID], then a new metric is created.", + "description": "Required. The resource name of the metric to update: \"projects/[PROJECT_ID]/metrics/[METRIC_ID]\" The updated metric must be provided in the request and it's name field must be the same as [METRIC_ID] If the metric does not exist in [PROJECT_ID], then a new metric is created.", "location": "path", "pattern": "^projects/[^/]+/metrics/[^/]+$", "required": true, @@ -2782,7 +3227,7 @@ ], "parameters": { "parent": { - "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + "description": "Required. The resource in which to create the sink: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" Examples: \"projects/my-logging-project\", \"organizations/123456789\".", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -2816,7 +3261,7 @@ ], "parameters": { "sinkName": { - "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", "location": "path", "pattern": "^projects/[^/]+/sinks/[^/]+$", "required": true, @@ -2842,7 +3287,7 @@ ], "parameters": { "sinkName": { - "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "description": "Required. The resource name of the sink: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", "location": "path", "pattern": "^projects/[^/]+/sinks/[^/]+$", "required": true, @@ -2881,7 +3326,7 @@ "type": "string" }, "parent": { - "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "description": "Required. The parent resource whose sinks are to be listed: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -2909,19 +3354,19 @@ ], "parameters": { "sinkName": { - "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", "location": "path", "pattern": "^projects/[^/]+/sinks/[^/]+$", "required": true, "type": "string" }, "uniqueWriterIdentity": { - "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is set to false or defaulted to false.", + "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", "location": "query", "type": "boolean" }, "updateMask": { - "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", + "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -2949,19 +3394,19 @@ ], "parameters": { "sinkName": { - "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", "location": "path", "pattern": "^projects/[^/]+/sinks/[^/]+$", "required": true, "type": "string" }, "uniqueWriterIdentity": { - "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is set to false or defaulted to false.", + "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", "location": "query", "type": "boolean" }, "updateMask": { - "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", + "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -2995,7 +3440,7 @@ ], "parameters": { "parent": { - "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + "description": "Required. The resource in which to create the sink: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" Examples: \"projects/my-logging-project\", \"organizations/123456789\".", "location": "path", "pattern": "^[^/]+/[^/]+$", "required": true, @@ -3029,7 +3474,7 @@ ], "parameters": { "sinkName": { - "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", "location": "path", "pattern": "^[^/]+/[^/]+/sinks/[^/]+$", "required": true, @@ -3055,7 +3500,7 @@ ], "parameters": { "sinkName": { - "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "description": "Required. The resource name of the sink: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", "location": "path", "pattern": "^[^/]+/[^/]+/sinks/[^/]+$", "required": true, @@ -3094,7 +3539,7 @@ "type": "string" }, "parent": { - "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "description": "Required. The parent resource whose sinks are to be listed: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", "location": "path", "pattern": "^[^/]+/[^/]+$", "required": true, @@ -3122,19 +3567,19 @@ ], "parameters": { "sinkName": { - "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", "location": "path", "pattern": "^[^/]+/[^/]+/sinks/[^/]+$", "required": true, "type": "string" }, "uniqueWriterIdentity": { - "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is set to false or defaulted to false.", + "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", "location": "query", "type": "boolean" }, "updateMask": { - "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", + "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -3166,7 +3611,7 @@ ], "parameters": { "name": { - "description": "Required. The resource for which to retrieve CMEK settings.\n\"projects/[PROJECT_ID]/cmekSettings\"\n\"organizations/[ORGANIZATION_ID]/cmekSettings\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\"\n\"folders/[FOLDER_ID]/cmekSettings\"\nExample: \"organizations/12345/cmekSettings\".Note: CMEK for the Logs Router can currently only be configured for GCP organizations. Once configured, it applies to all projects and folders in the GCP organization.", + "description": "Required. The resource for which to retrieve CMEK settings. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" Example: \"organizations/12345/cmekSettings\".Note: CMEK for the Logs Router can currently only be configured for GCP organizations. Once configured, it applies to all projects and folders in the GCP organization.", "location": "path", "pattern": "^[^/]+/[^/]+$", "required": true, @@ -3194,7 +3639,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name for the CMEK settings to update.\n\"projects/[PROJECT_ID]/cmekSettings\"\n\"organizations/[ORGANIZATION_ID]/cmekSettings\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\"\n\"folders/[FOLDER_ID]/cmekSettings\"\nExample: \"organizations/12345/cmekSettings\".Note: CMEK for the Logs Router can currently only be configured for GCP organizations. Once configured, it applies to all projects and folders in the GCP organization.", + "description": "Required. The resource name for the CMEK settings to update. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" Example: \"organizations/12345/cmekSettings\".Note: CMEK for the Logs Router can currently only be configured for GCP organizations. Once configured, it applies to all projects and folders in the GCP organization.", "location": "path", "pattern": "^[^/]+/[^/]+$", "required": true, @@ -3222,7 +3667,7 @@ } } }, - "revision": "20200501", + "revision": "20200801", "rootUrl": "https://logging.googleapis.com/", "schemas": { "BigQueryOptions": { @@ -3235,6 +3680,7 @@ }, "usesTimestampColumnPartitioning": { "description": "Output only. True if new timestamp column based partitioning is in use, false if legacy ingestion-time partitioning is in use. All new sinks will have this field set true and will use timestamp column based partitioning. If use_partitioned_tables is false, this value has no meaning and will be false. Legacy sinks using partitioned tables will have this field set to false.", + "readOnly": true, "type": "boolean" } }, @@ -3264,28 +3710,30 @@ "id": "CmekSettings", "properties": { "kmsKeyName": { - "description": "The resource name for the configured Cloud KMS key.KMS key name format: \"projects/PROJECT_ID/locations/LOCATION/keyRings/KEYRING/cryptoKeys/KEY\"For example: \"projects/my-project-id/locations/my-region/keyRings/key-ring-name/cryptoKeys/key-name\"To enable CMEK for the Logs Router, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name. Encryption operations that are in progress will be completed with the key that was in use when they started. Decryption operations will be completed using the key that was used at the time of encryption unless access to that key has been revoked.To disable CMEK for the Logs Router, set this field to an empty string.See Enabling CMEK for Logs Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", + "description": "The resource name for the configured Cloud KMS key.KMS key name format: \"projects/PROJECT_ID/locations/LOCATION/keyRings/KEYRING/cryptoKeys/KEY\"For example: \"projects/my-project-id/locations/my-region/keyRings/key-ring-name/cryptoKeys/key-name\"To enable CMEK for the Logs Router, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name. Encryption operations that are in progress will be completed with the key that was in use when they started. Decryption operations will be completed using the key that was used at the time of encryption unless access to that key has been revoked.To disable CMEK for the Logs Router, set this field to an empty string.See Enabling CMEK for Logs Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", "type": "string" }, "name": { "description": "Output only. The resource name of the CMEK settings.", + "readOnly": true, "type": "string" }, "serviceAccountId": { "description": "Output only. The service account that will be used by the Logs Router to access your Cloud KMS key.Before enabling CMEK for Logs Router, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that the Logs Router will use to access your Cloud KMS key. Use GetCmekSettings to obtain the service account ID.See Enabling CMEK for Logs Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", + "readOnly": true, "type": "string" } }, "type": "object" }, "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance:\nservice Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n}\nThe JSON representation for Empty is empty JSON object {}.", + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}.", "id": "Empty", "properties": {}, "type": "object" }, "Explicit": { - "description": "Specifies a set of buckets with arbitrary widths.There are size(bounds) + 1 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): boundsi Lower bound (1 \u003c= i \u003c N); boundsi - 1The bounds field must contain at least one element. If bounds has only one element, then there are no finite buckets, and that single element is the common boundary of the overflow and underflow buckets.", + "description": "Specifies a set of buckets with arbitrary widths.There are size(bounds) + 1 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): boundsi Lower bound (1 \u003c= i \u003c N); boundsi - 1The bounds field must contain at least one element. If bounds has only one element, then there are no finite buckets, and that single element is the common boundary of the overflow and underflow buckets.", "id": "Explicit", "properties": { "bounds": { @@ -3300,7 +3748,7 @@ "type": "object" }, "Exponential": { - "description": "Specifies an exponential sequence of buckets that have a width that is proportional to the value of the lower bound. Each bucket represents a constant relative uncertainty on a specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): scale * (growth_factor ^ i). Lower bound (1 \u003c= i \u003c N): scale * (growth_factor ^ (i - 1)).", + "description": "Specifies an exponential sequence of buckets that have a width that is proportional to the value of the lower bound. Each bucket represents a constant relative uncertainty on a specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): scale * (growth_factor ^ i). Lower bound (1 \u003c= i \u003c N): scale * (growth_factor ^ (i - 1)).", "id": "Exponential", "properties": { "growthFactor": { @@ -3387,7 +3835,7 @@ "type": "integer" }, "userAgent": { - "description": "The user agent sent by the client. Example: \"Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; Q312461; .NET\nCLR 1.0.3705)\".", + "description": "The user agent sent by the client. Example: \"Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; Q312461; .NET CLR 1.0.3705)\".", "type": "string" } }, @@ -3423,7 +3871,7 @@ "type": "object" }, "Linear": { - "description": "Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). Each bucket represents a constant absolute uncertainty on the specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): offset + (width * i). Lower bound (1 \u003c= i \u003c N): offset + (width * (i - 1)).", + "description": "Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). Each bucket represents a constant absolute uncertainty on the specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): offset + (width * i). Lower bound (1 \u003c= i \u003c N): offset + (width * (i - 1)).", "id": "Linear", "properties": { "numFiniteBuckets": { @@ -3493,7 +3941,7 @@ "type": "string" }, "pageSize": { - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of next_page_token in the response indicates that more results might be available.", + "description": "Optional. The maximum number of results to return from this request. Default is 50. If the value is negative or exceeds 1000, the request is rejected. The presence of next_page_token in the response indicates that more results might be available.", "format": "int32", "type": "integer" }, @@ -3509,7 +3957,7 @@ "type": "array" }, "resourceNames": { - "description": "Required. Names of one or more parent resources from which to retrieve log entries:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nProjects listed in the project_ids field are added to this list.", + "description": "Required. Names of one or more parent resources from which to retrieve log entries: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" Projects listed in the project_ids field are added to this list.", "items": { "type": "string" }, @@ -3615,6 +4063,7 @@ "createTime": { "description": "Output only. The creation timestamp of the bucket. This is not set for any of the default buckets.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "description": { @@ -3633,10 +4082,15 @@ "The normal and active state.", "The bucket has been marked for deletion by the user." ], + "readOnly": true, "type": "string" }, + "locked": { + "description": "Whether the bucket has been locked. The retention period on a locked bucket may not be changed. Locked buckets may only be deleted if they are empty.", + "type": "boolean" + }, "name": { - "description": "The resource name of the bucket. For example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id The supported locations are: \"global\" \"us-central1\"For the location of global it is unspecified where logs are actually stored. Once a bucket has been created, the location can not be changed.", + "description": "The resource name of the bucket. For example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id The supported locations are: \"global\"For the location of global it is unspecified where logs are actually stored. Once a bucket has been created, the location can not be changed.", "type": "string" }, "retentionDays": { @@ -3647,6 +4101,7 @@ "updateTime": { "description": "Output only. The last update timestamp of the bucket.", "format": "google-datetime", + "readOnly": true, "type": "string" } }, @@ -3661,7 +4116,7 @@ "description": "Optional. Information about the HTTP request associated with this log entry, if applicable." }, "insertId": { - "description": "Optional. A unique identifier for the log entry. If you provide a value, then Logging considers other log entries in the same project, with the same timestamp, and with the same insert_id to be duplicates which are removed in a single query result. However, there are no guarantees of de-duplication in the export of logs.If the insert_id is omitted when writing a log entry, the Logging API assigns its own unique identifier in this field.In queries, the insert_id is also used to order log entries that have the same log_name and timestamp values.", + "description": "Optional. A unique identifier for the log entry. If you provide a value, then Logging considers other log entries in the same project, with the same timestamp, and with the same insert_id to be duplicates which are removed in a single query result. However, there are no guarantees of de-duplication in the export of logs.If the insert_id is omitted when writing a log entry, the Logging API assigns its own unique identifier in this field.In queries, the insert_id is also used to order log entries that have the same log_name and timestamp values.", "type": "string" }, "jsonPayload": { @@ -3680,12 +4135,13 @@ "type": "object" }, "logName": { - "description": "Required. The resource name of the log to which this log entry belongs:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\nA project number may be used in place of PROJECT_ID. The project number is translated to its corresponding PROJECT_ID internally and the log_name field will contain PROJECT_ID in queries and exports.[LOG_ID] must be URL-encoded within log_name. Example: \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". [LOG_ID] must be less than 512 characters long and can only include the following characters: upper and lower case alphanumeric characters, forward-slash, underscore, hyphen, and period.For backward compatibility, if log_name begins with a forward-slash, such as /projects/..., then the log entry is ingested as usual but the forward-slash is removed. Listing the log entry will not show the leading slash and filtering for a log name with a leading slash will never return any results.", + "description": "Required. The resource name of the log to which this log entry belongs: \"projects/[PROJECT_ID]/logs/[LOG_ID]\" \"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\" \"folders/[FOLDER_ID]/logs/[LOG_ID]\" A project number may be used in place of PROJECT_ID. The project number is translated to its corresponding PROJECT_ID internally and the log_name field will contain PROJECT_ID in queries and exports.[LOG_ID] must be URL-encoded within log_name. Example: \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". [LOG_ID] must be less than 512 characters long and can only include the following characters: upper and lower case alphanumeric characters, forward-slash, underscore, hyphen, and period.For backward compatibility, if log_name begins with a forward-slash, such as /projects/..., then the log entry is ingested as usual but the forward-slash is removed. Listing the log entry will not show the leading slash and filtering for a log name with a leading slash will never return any results.", "type": "string" }, "metadata": { "$ref": "MonitoredResourceMetadata", - "description": "Output only. Deprecated. Additional metadata about the monitored resource.Only k8s_container, k8s_pod, and k8s_node MonitoredResources have this field populated for GKE versions older than 1.12.6. For GKE versions 1.12.6 and above, the metadata field has been deprecated. The Kubernetes pod labels that used to be in metadata.userLabels will now be present in the labels field with a key prefix of k8s-pod/. The system labels that were present in the metadata.systemLabels field will no longer be available in the LogEntry." + "description": "Output only. Deprecated. Additional metadata about the monitored resource.Only k8s_container, k8s_pod, and k8s_node MonitoredResources have this field populated for GKE versions older than 1.12.6. For GKE versions 1.12.6 and above, the metadata field has been deprecated. The Kubernetes pod labels that used to be in metadata.userLabels will now be present in the labels field with a key prefix of k8s-pod/. The system labels that were present in the metadata.systemLabels field will no longer be available in the LogEntry.", + "readOnly": true }, "operation": { "$ref": "LogEntryOperation", @@ -3696,12 +4152,13 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "The log entry payload, represented as a protocol buffer. Some Google Cloud Platform services use this field for their log entry payloads.The following protocol buffer types are supported; user-defined types are not supported:\"type.googleapis.com/google.cloud.audit.AuditLog\" \"type.googleapis.com/google.appengine.logging.v1.RequestLog\"", + "description": "The log entry payload, represented as a protocol buffer. Some Google Cloud Platform services use this field for their log entry payloads.The following protocol buffer types are supported; user-defined types are not supported:\"type.googleapis.com/google.cloud.audit.AuditLog\" \"type.googleapis.com/google.appengine.logging.v1.RequestLog\"", "type": "object" }, "receiveTimestamp": { "description": "Output only. The time the log entry was received by Logging.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "resource": { @@ -3812,6 +4269,7 @@ "createTime": { "description": "Output only. The creation timestamp of the exclusion.This field may not be present for older exclusions.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "description": { @@ -3833,6 +4291,7 @@ "updateTime": { "description": "Output only. The last update timestamp of the exclusion.This field may not be present for older exclusions.", "format": "google-datetime", + "readOnly": true, "type": "string" } }, @@ -3895,6 +4354,7 @@ "createTime": { "description": "Output only. The creation timestamp of the metric.This field may not be present for older metrics.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "description": { @@ -3902,7 +4362,7 @@ "type": "string" }, "filter": { - "description": "Required. An advanced logs filter (https://cloud.google.com/logging/docs/view/advanced_filters) which is used to match log entries. Example:\n\"resource.type=gae_app AND severity\u003e=ERROR\"\nThe maximum length of the filter is 20000 characters.", + "description": "Required. An advanced logs filter (https://cloud.google.com/logging/docs/view/advanced_filters) which is used to match log entries. Example: \"resource.type=gae_app AND severity\u003e=ERROR\" The maximum length of the filter is 20000 characters.", "type": "string" }, "labelExtractors": { @@ -3923,10 +4383,11 @@ "updateTime": { "description": "Output only. The last update timestamp of the metric.This field may not be present for older metrics.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "valueExtractor": { - "description": "Optional. A value_extractor is required when using a distribution logs-based metric to extract the values to record from a log entry. Two functions are supported for value extraction: EXTRACT(field) or REGEXP_EXTRACT(field, regex). The argument are: 1. field: The name of the log entry field from which the value is to be extracted. 2. regex: A regular expression using the Google RE2 syntax (https://github.com/google/re2/wiki/Syntax) with a single capture group to extract data from the specified log entry field. The value of the field is converted to a string before applying the regex. It is an error to specify a regex that does not include exactly one capture group.The result of the extraction must be convertible to a double type, as the distribution always records double values. If either the extraction or the conversion to double fails, then those values are not recorded in the distribution.Example: REGEXP_EXTRACT(jsonPayload.request, \".*quantity=(\\d+).*\")", + "description": "Optional. A value_extractor is required when using a distribution logs-based metric to extract the values to record from a log entry. Two functions are supported for value extraction: EXTRACT(field) or REGEXP_EXTRACT(field, regex). The argument are: 1. field: The name of the log entry field from which the value is to be extracted. 2. regex: A regular expression using the Google RE2 syntax (https://github.com/google/re2/wiki/Syntax) with a single capture group to extract data from the specified log entry field. The value of the field is converted to a string before applying the regex. It is an error to specify a regex that does not include exactly one capture group.The result of the extraction must be convertible to a double type, as the distribution always records double values. If either the extraction or the conversion to double fails, then those values are not recorded in the distribution.Example: REGEXP_EXTRACT(jsonPayload.request, \".*quantity=(\\d+).*\")", "type": "string" }, "version": { @@ -3955,6 +4416,7 @@ "createTime": { "description": "Output only. The creation timestamp of the sink.This field may not be present for older sinks.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "description": { @@ -3962,19 +4424,26 @@ "type": "string" }, "destination": { - "description": "Required. The export destination:\n\"storage.googleapis.com/[GCS_BUCKET]\"\n\"bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]\"\n\"pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]\"\nThe sink's writer_identity, set when the sink is created, must have permission to write to the destination or else the log entries are not exported. For more information, see Exporting Logs with Sinks (https://cloud.google.com/logging/docs/api/tasks/exporting-logs).", + "description": "Required. The export destination: \"storage.googleapis.com/[GCS_BUCKET]\" \"bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]\" \"pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]\" The sink's writer_identity, set when the sink is created, must have permission to write to the destination or else the log entries are not exported. For more information, see Exporting Logs with Sinks (https://cloud.google.com/logging/docs/api/tasks/exporting-logs).", "type": "string" }, "disabled": { "description": "Optional. If set to True, then this sink is disabled and it does not export any log entries.", "type": "boolean" }, + "exclusions": { + "description": "Optional. Log entries that match any of the exclusion filters will not be exported. If a log entry is matched by both filter and one of exclusion_filters it will not be exported.", + "items": { + "$ref": "LogExclusion" + }, + "type": "array" + }, "filter": { - "description": "Optional. An advanced logs filter (https://cloud.google.com/logging/docs/view/advanced-queries). The only exported log entries are those that are in the resource owning the sink and that match the filter. For example:\nlogName=\"projects/[PROJECT_ID]/logs/[LOG_ID]\" AND severity\u003e=ERROR\n", + "description": "Optional. An advanced logs filter (https://cloud.google.com/logging/docs/view/advanced-queries). The only exported log entries are those that are in the resource owning the sink and that match the filter. For example: logName=\"projects/[PROJECT_ID]/logs/[LOG_ID]\" AND severity\u003e=ERROR ", "type": "string" }, "includeChildren": { - "description": "Optional. This field applies only to sinks owned by organizations and folders. If the field is false, the default, only the logs owned by the sink's parent resource are available for export. If the field is true, then logs from all the projects, folders, and billing accounts contained in the sink's parent resource are also available for export. Whether a particular log entry from the children is exported depends on the sink's filter expression. For example, if this field is true, then the filter resource.type=gce_instance would export all Compute Engine VM instance log entries from all projects in the sink's parent. To only export entries from certain child projects, filter on the project part of the log name:\nlogName:(\"projects/test-project1/\" OR \"projects/test-project2/\") AND\nresource.type=gce_instance\n", + "description": "Optional. This field applies only to sinks owned by organizations and folders. If the field is false, the default, only the logs owned by the sink's parent resource are available for export. If the field is true, then logs from all the projects, folders, and billing accounts contained in the sink's parent resource are also available for export. Whether a particular log entry from the children is exported depends on the sink's filter expression. For example, if this field is true, then the filter resource.type=gce_instance would export all Compute Engine VM instance log entries from all projects in the sink's parent. To only export entries from certain child projects, filter on the project part of the log name: logName:(\"projects/test-project1/\" OR \"projects/test-project2/\") AND resource.type=gce_instance ", "type": "boolean" }, "name": { @@ -3998,17 +4467,19 @@ "updateTime": { "description": "Output only. The last update timestamp of the sink.This field may not be present for older sinks.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "writerIdentity": { - "description": "Output only. An IAM identity\u0026mdash;a service account or group\u0026mdash;under which Logging writes the exported log entries to the sink's destination. This field is set by sinks.create and sinks.update based on the value of unique_writer_identity in those methods.Until you grant this identity write-access to the destination, log entry exports from this sink will fail. For more information, see Granting Access for a Resource (https://cloud.google.com/iam/docs/granting-roles-to-service-accounts#granting_access_to_a_service_account_for_a_resource). Consult the destination service's documentation to determine the appropriate IAM roles to assign to the identity.", + "description": "Output only. An IAM identity—a service account or group—under which Logging writes the exported log entries to the sink's destination. This field is set by sinks.create and sinks.update based on the value of unique_writer_identity in those methods.Until you grant this identity write-access to the destination, log entry exports from this sink will fail. For more information, see Granting Access for a Resource (https://cloud.google.com/iam/docs/granting-roles-to-service-accounts#granting_access_to_a_service_account_for_a_resource). Consult the destination service's documentation to determine the appropriate IAM roles to assign to the identity.", + "readOnly": true, "type": "string" } }, "type": "object" }, "MetricDescriptor": { - "description": "Defines a metric type and its schema. Once a metric descriptor is created, deleting or altering it stops data collection and makes the metric type's existing data unusable.", + "description": "Defines a metric type and its schema. Once a metric descriptor is created, deleting or altering it stops data collection and makes the metric type's existing data unusable.The following are specific rules for service defined Monitoring metric descriptors: type, metric_kind, value_type and description fields are all required. The unit field must be specified if the value_type is any of DOUBLE, INT64, DISTRIBUTION. Maximum of default 500 metric descriptors per service is allowed. Maximum of default 10 labels per metric descriptor is allowed.The default maximum limit can be overridden. Please follow https://cloud.google.com/monitoring/quotas", "id": "MetricDescriptor", "properties": { "description": { @@ -4020,7 +4491,7 @@ "type": "string" }, "labels": { - "description": "The set of labels that can be used to describe a specific instance of this metric type. For example, the appengine.googleapis.com/http/server/response_latencies metric type has a label for the HTTP response code, response_code, so you can look at latencies for successful responses or just for responses that failed.", + "description": "The set of labels that can be used to describe a specific instance of this metric type.The label key name must follow: Only upper and lower-case letters, digits and underscores (_) are allowed. Label name must start with a letter or digit. The maximum length of a label name is 100 characters.For example, the appengine.googleapis.com/http/server/response_latencies metric type has a label for the HTTP response code, response_code, so you can look at latencies for successful responses or just for responses that failed.", "items": { "$ref": "LabelDescriptor" }, @@ -4082,11 +4553,11 @@ "type": "string" }, "type": { - "description": "The metric type, including its DNS name prefix. The type is not URL-encoded. All user-defined metric types have the DNS name custom.googleapis.com or external.googleapis.com. Metric types should use a natural hierarchical grouping. For example:\n\"custom.googleapis.com/invoice/paid/amount\"\n\"external.googleapis.com/prometheus/up\"\n\"appengine.googleapis.com/http/server/response_latencies\"\n", + "description": "The metric type, including its DNS name prefix. The type is not URL-encoded.All service defined metrics must be prefixed with the service name, in the format of {service name}/{relative metric name}, such as cloudsql.googleapis.com/database/cpu/utilization. The relative metric name must follow: Only upper and lower-case letters, digits, '/' and underscores '_' are allowed. The maximum number of characters allowed for the relative_metric_name is 100.All user-defined metric types have the DNS name custom.googleapis.com, external.googleapis.com, or logging.googleapis.com/user/.Metric types should use a natural hierarchical grouping. For example: \"custom.googleapis.com/invoice/paid/amount\" \"external.googleapis.com/prometheus/up\" \"appengine.googleapis.com/http/server/response_latencies\" ", "type": "string" }, "unit": { - "description": "The units in which the metric value is reported. It is only applicable if the value_type is INT64, DOUBLE, or DISTRIBUTION. The unit defines the representation of the stored metric values.Different systems may scale the values to be more easily displayed (so a value of 0.02KBy might be displayed as 20By, and a value of 3523KBy might be displayed as 3.5MBy). However, if the unit is KBy, then the value of the metric is always in thousands of bytes, no matter how it may be displayed..If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an INT64 CUMULATIVE metric whose unit is s{CPU} (or equivalently 1s{CPU} or just s). If the job uses 12,005 CPU-seconds, then the value is written as 12005.Alternatively, if you want a custom metric to record data in a more granular way, you can create a DOUBLE CUMULATIVE metric whose unit is ks{CPU}, and then write the value 12.005 (which is 12005/1000), or use Kis{CPU} and write 11.723 (which is 12005/1024).The supported units are a subset of The Unified Code for Units of Measure (http://unitsofmeasure.org/ucum.html) standard:Basic units (UNIT)\nbit bit\nBy byte\ns second\nmin minute\nh hour\nd dayPrefixes (PREFIX)\nk kilo (10^3)\nM mega (10^6)\nG giga (10^9)\nT tera (10^12)\nP peta (10^15)\nE exa (10^18)\nZ zetta (10^21)\nY yotta (10^24)\nm milli (10^-3)\nu micro (10^-6)\nn nano (10^-9)\np pico (10^-12)\nf femto (10^-15)\na atto (10^-18)\nz zepto (10^-21)\ny yocto (10^-24)\nKi kibi (2^10)\nMi mebi (2^20)\nGi gibi (2^30)\nTi tebi (2^40)\nPi pebi (2^50)GrammarThe grammar also includes these connectors:\n/ division or ratio (as an infix operator). For examples, kBy/{email} or MiBy/10ms (although you should almost never have /s in a metric unit; rates should always be computed at query time from the underlying cumulative or delta value).\n. multiplication or composition (as an infix operator). For examples, GBy.d or k{watt}.h.The grammar for a unit is as follows:\nExpression = Component { \".\" Component } { \"/\" Component } ;\n\nComponent = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ]\n | Annotation\n | \"1\"\n ;\n\nAnnotation = \"{\" NAME \"}\" ;\nNotes:\nAnnotation is just a comment if it follows a UNIT. If the annotation is used alone, then the unit is equivalent to 1. For examples, {request}/s == 1/s, By{transmitted}/s == By/s.\nNAME is a sequence of non-blank printable ASCII characters not containing { or }.\n1 represents a unitary dimensionless unit (https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in 1/s. It is typically used when none of the basic units are appropriate. For example, \"new users per day\" can be represented as 1/d or {new-users}/d (and a metric value 5 would mean \"5 new users). Alternatively, \"thousands of page views per day\" would be represented as 1000/d or k1/d or k{page_views}/d (and a metric value of 5.3 would mean \"5300 page views per day\").\n% represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value 3 means \"3 percent\").\n10^2.% indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value 0.03 means \"3 percent\").", + "description": "The units in which the metric value is reported. It is only applicable if the value_type is INT64, DOUBLE, or DISTRIBUTION. The unit defines the representation of the stored metric values.Different systems may scale the values to be more easily displayed (so a value of 0.02KBy might be displayed as 20By, and a value of 3523KBy might be displayed as 3.5MBy). However, if the unit is KBy, then the value of the metric is always in thousands of bytes, no matter how it may be displayed..If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an INT64 CUMULATIVE metric whose unit is s{CPU} (or equivalently 1s{CPU} or just s). If the job uses 12,005 CPU-seconds, then the value is written as 12005.Alternatively, if you want a custom metric to record data in a more granular way, you can create a DOUBLE CUMULATIVE metric whose unit is ks{CPU}, and then write the value 12.005 (which is 12005/1000), or use Kis{CPU} and write 11.723 (which is 12005/1024).The supported units are a subset of The Unified Code for Units of Measure (http://unitsofmeasure.org/ucum.html) standard:Basic units (UNIT) bit bit By byte s second min minute h hour d day 1 dimensionlessPrefixes (PREFIX) k kilo (10^3) M mega (10^6) G giga (10^9) T tera (10^12) P peta (10^15) E exa (10^18) Z zetta (10^21) Y yotta (10^24) m milli (10^-3) u micro (10^-6) n nano (10^-9) p pico (10^-12) f femto (10^-15) a atto (10^-18) z zepto (10^-21) y yocto (10^-24) Ki kibi (2^10) Mi mebi (2^20) Gi gibi (2^30) Ti tebi (2^40) Pi pebi (2^50)GrammarThe grammar also includes these connectors: / division or ratio (as an infix operator). For examples, kBy/{email} or MiBy/10ms (although you should almost never have /s in a metric unit; rates should always be computed at query time from the underlying cumulative or delta value). . multiplication or composition (as an infix operator). For examples, GBy.d or k{watt}.h.The grammar for a unit is as follows: Expression = Component { \".\" Component } { \"/\" Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ] | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: Annotation is just a comment if it follows a UNIT. If the annotation is used alone, then the unit is equivalent to 1. For examples, {request}/s == 1/s, By{transmitted}/s == By/s. NAME is a sequence of non-blank printable ASCII characters not containing { or }. 1 represents a unitary dimensionless unit (https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in 1/s. It is typically used when none of the basic units are appropriate. For example, \"new users per day\" can be represented as 1/d or {new-users}/d (and a metric value 5 would mean \"5 new users). Alternatively, \"thousands of page views per day\" would be represented as 1000/d or k1/d or k{page_views}/d (and a metric value of 5.3 would mean \"5300 page views per day\"). % represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value 3 means \"3 percent\"). 10^2.% indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value 0.03 means \"3 percent\").", "type": "string" }, "valueType": { @@ -4156,7 +4627,7 @@ "type": "object" }, "MonitoredResource": { - "description": "An object representing a resource that can be used for monitoring, logging, billing, or other purposes. Examples include virtual machine instances, databases, and storage devices such as disks. The type field identifies a MonitoredResourceDescriptor object that describes the resource's schema. Information in the labels field identifies the actual resource and its attributes according to the schema. For example, a particular Compute Engine VM instance could be represented by the following object, because the MonitoredResourceDescriptor for \"gce_instance\" has labels \"instance_id\" and \"zone\":\n{ \"type\": \"gce_instance\",\n \"labels\": { \"instance_id\": \"12345678901234\",\n \"zone\": \"us-central1-a\" }}\n", + "description": "An object representing a resource that can be used for monitoring, logging, billing, or other purposes. Examples include virtual machine instances, databases, and storage devices such as disks. The type field identifies a MonitoredResourceDescriptor object that describes the resource's schema. Information in the labels field identifies the actual resource and its attributes according to the schema. For example, a particular Compute Engine VM instance could be represented by the following object, because the MonitoredResourceDescriptor for \"gce_instance\" has labels \"instance_id\" and \"zone\": { \"type\": \"gce_instance\", \"labels\": { \"instance_id\": \"12345678901234\", \"zone\": \"us-central1-a\" }} ", "id": "MonitoredResource", "properties": { "labels": { @@ -4174,7 +4645,7 @@ "type": "object" }, "MonitoredResourceDescriptor": { - "description": "An object that describes the schema of a MonitoredResource object using a type name and a set of labels. For example, the monitored resource descriptor for Google Compute Engine VM instances has a type of \"gce_instance\" and specifies the use of the labels \"instance_id\" and \"zone\" to identify particular VM instances.Different APIs can support different monitored resource types. APIs generally provide a list method that returns the monitored resource descriptors used by the API.", + "description": "An object that describes the schema of a MonitoredResource object using a type name and a set of labels. For example, the monitored resource descriptor for Google Compute Engine VM instances has a type of \"gce_instance\" and specifies the use of the labels \"instance_id\" and \"zone\" to identify particular VM instances.Different services can support different monitored resource types.The following are specific rules to service defined monitored resources for Monitoring and Logging: The type, display_name, description, labels and launch_stage fields are all required. The first label of the monitored resource descriptor must be resource_container. There are legacy monitored resource descritptors start with project_id. It must include a location label. Maximum of default 5 service defined monitored resource descriptors is allowed per service. Maximum of default 10 labels per monitored resource is allowed.The default maximum limit can be overridden. Please follow https://cloud.google.com/monitoring/quotas", "id": "MonitoredResourceDescriptor", "properties": { "description": { @@ -4186,7 +4657,7 @@ "type": "string" }, "labels": { - "description": "Required. A set of labels used to describe instances of this monitored resource type. For example, an individual Google Cloud SQL database is identified by values for the labels \"database_id\" and \"zone\".", + "description": "Required. A set of labels used to describe instances of this monitored resource type. The label key name must follow: Only upper and lower-case letters, digits and underscores (_) are allowed. Label name must start with a letter or digit. The maximum length of a label name is 100 characters.For example, an individual Google Cloud SQL database is identified by values for the labels database_id and location.", "items": { "$ref": "LabelDescriptor" }, @@ -4221,7 +4692,7 @@ "type": "string" }, "type": { - "description": "Required. The monitored resource type. For example, the type \"cloudsql_database\" represents databases in Google Cloud SQL. The maximum length of this value is 256 characters.", + "description": "Required. The monitored resource type. For example, the type cloudsql_database represents databases in Google Cloud SQL.All service defined monitored resource types must be prefixed with the service name, in the format of {service name}/{relative resource name}. The relative resource name must follow: Only upper and lower-case letters and digits are allowed. It must start with upper case character and is recommended to use Upper Camel Case style. The maximum number of characters allowed for the relative_resource_name is 100.Note there are legacy service monitored resources not following this rule.", "type": "string" } }, @@ -4236,7 +4707,7 @@ "description": "Properties of the object.", "type": "any" }, - "description": "Output only. Values for predefined system metadata labels. System labels are a kind of metadata extracted by Google, including \"machine_image\", \"vpc\", \"subnet_id\", \"security_group\", \"name\", etc. System label values can be only strings, Boolean values, or a list of strings. For example:\n{ \"name\": \"my-test-instance\",\n \"security_group\": [\"a\", \"b\", \"c\"],\n \"spot_instance\": false }\n", + "description": "Output only. Values for predefined system metadata labels. System labels are a kind of metadata extracted by Google, including \"machine_image\", \"vpc\", \"subnet_id\", \"security_group\", \"name\", etc. System label values can be only strings, Boolean values, or a list of strings. For example: { \"name\": \"my-test-instance\", \"security_group\": [\"a\", \"b\", \"c\"], \"spot_instance\": false } ", "type": "object" }, "userLabels": { @@ -4438,6 +4909,12 @@ }, "type": "object" }, + "UndeleteBucketRequest": { + "description": "The parameters to UndeleteBucket.", + "id": "UndeleteBucketRequest", + "properties": {}, + "type": "object" + }, "WriteLogEntriesRequest": { "description": "The parameters to WriteLogEntries.", "id": "WriteLogEntriesRequest", @@ -4461,7 +4938,7 @@ "type": "object" }, "logName": { - "description": "Optional. A default log resource name that is assigned to all log entries in entries that do not specify a value for log_name:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example:\n\"projects/my-project-id/logs/syslog\"\n\"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\"\nThe permission logging.logEntries.create is needed on each project, organization, billing account, or folder that is receiving new log entries, whether the resource is specified in logName or in an individual log entry.", + "description": "Optional. A default log resource name that is assigned to all log entries in entries that do not specify a value for log_name: \"projects/[PROJECT_ID]/logs/[LOG_ID]\" \"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\" \"folders/[FOLDER_ID]/logs/[LOG_ID]\" [LOG_ID] must be URL-encoded. For example: \"projects/my-project-id/logs/syslog\" \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\" The permission logging.logEntries.create is needed on each project, organization, billing account, or folder that is receiving new log entries, whether the resource is specified in logName or in an individual log entry.", "type": "string" }, "partialSuccess": { @@ -4470,7 +4947,7 @@ }, "resource": { "$ref": "MonitoredResource", - "description": "Optional. A default monitored resource object that is assigned to all log entries in entries that do not specify a value for resource. Example:\n{ \"type\": \"gce_instance\",\n \"labels\": {\n \"zone\": \"us-central1-a\", \"instance_id\": \"00000000000000000000\" }}\nSee LogEntry." + "description": "Optional. A default monitored resource object that is assigned to all log entries in entries that do not specify a value for resource. Example: { \"type\": \"gce_instance\", \"labels\": { \"zone\": \"us-central1-a\", \"instance_id\": \"00000000000000000000\" }} See LogEntry." } }, "type": "object" diff --git a/vendor/google.golang.org/api/logging/v2/logging-gen.go b/vendor/google.golang.org/api/logging/v2/logging-gen.go index 14526190237..a1dae642bb8 100644 --- a/vendor/google.golang.org/api/logging/v2/logging-gen.go +++ b/vendor/google.golang.org/api/logging/v2/logging-gen.go @@ -81,6 +81,7 @@ const apiId = "logging:v2" const apiName = "logging" const apiVersion = "v2" const basePath = "https://logging.googleapis.com/" +const mtlsBasePath = "https://logging.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -112,6 +113,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -728,12 +730,9 @@ func (s *CmekSettings) MarshalJSON() ([]byte, error) { // Empty: A generic empty message that you can re-use to avoid defining // duplicated empty messages in your APIs. A typical example is to use // it as the request or the response type of an API method. For -// instance: -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// The JSON representation for Empty is empty JSON object {}. +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } The JSON representation for Empty is empty +// JSON object {}. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -742,7 +741,7 @@ type Empty struct { // Explicit: Specifies a set of buckets with arbitrary widths.There are // size(bounds) + 1 (= N) buckets. Bucket i has the following -// boundaries:Upper bound (0 <= i < N-1): boundsi Lower bound (1 <= i < +// boundaries:Upper bound (0 <= i < N-1): boundsi Lower bound (1 <= i < // N); boundsi - 1The bounds field must contain at least one element. If // bounds has only one element, then there are no finite buckets, and // that single element is the common boundary of the overflow and @@ -779,8 +778,8 @@ func (s *Explicit) MarshalJSON() ([]byte, error) { // bucket represents a constant relative uncertainty on a specific value // in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket // i has the following boundaries:Upper bound (0 <= i < N-1): scale * -// (growth_factor ^ i). Lower bound (1 <= i < N): scale * -// (growth_factor ^ (i - 1)). +// (growth_factor ^ i). Lower bound (1 <= i < N): scale * (growth_factor +// ^ (i - 1)). type Exponential struct { // GrowthFactor: Must be greater than 1. GrowthFactor float64 `json:"growthFactor,omitempty"` @@ -894,8 +893,7 @@ type HttpRequest struct { Status int64 `json:"status,omitempty"` // UserAgent: The user agent sent by the client. Example: "Mozilla/4.0 - // (compatible; MSIE 6.0; Windows 98; Q312461; .NET - // CLR 1.0.3705)". + // (compatible; MSIE 6.0; Windows 98; Q312461; .NET CLR 1.0.3705)". UserAgent string `json:"userAgent,omitempty"` // ForceSendFields is a list of field names (e.g. "CacheFillBytes") to @@ -966,7 +964,7 @@ func (s *LabelDescriptor) MarshalJSON() ([]byte, error) { // constant absolute uncertainty on the specific value in the // bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has // the following boundaries:Upper bound (0 <= i < N-1): offset + (width -// * i). Lower bound (1 <= i < N): offset + (width * (i - 1)). +// * i). Lower bound (1 <= i < N): offset + (width * (i - 1)). type Linear struct { // NumFiniteBuckets: Must be greater than 0. NumFiniteBuckets int64 `json:"numFiniteBuckets,omitempty"` @@ -1115,9 +1113,9 @@ type ListLogEntriesRequest struct { OrderBy string `json:"orderBy,omitempty"` // PageSize: Optional. The maximum number of results to return from this - // request. Non-positive values are ignored. The presence of - // next_page_token in the response indicates that more results might be - // available. + // request. Default is 50. If the value is negative or exceeds 1000, the + // request is rejected. The presence of next_page_token in the response + // indicates that more results might be available. PageSize int64 `json:"pageSize,omitempty"` // PageToken: Optional. If present, then retrieve the next batch of @@ -1133,14 +1131,10 @@ type ListLogEntriesRequest struct { ProjectIds []string `json:"projectIds,omitempty"` // ResourceNames: Required. Names of one or more parent resources from - // which to retrieve log - // entries: - // "projects/[PROJECT_ID]" + // which to retrieve log entries: "projects/[PROJECT_ID]" // "organizations/[ORGANIZATION_ID]" - // "bi - // llingAccounts/[BILLING_ACCOUNT_ID]" - // "folders/[FOLDER_ID]" - // Projects listed in the project_ids field are added to this list. + // "billingAccounts/[BILLING_ACCOUNT_ID]" "folders/[FOLDER_ID]" Projects + // listed in the project_ids field are added to this list. ResourceNames []string `json:"resourceNames,omitempty"` // ForceSendFields is a list of field names (e.g. "Filter") to @@ -1387,11 +1381,16 @@ type LogBucket struct { // user. LifecycleState string `json:"lifecycleState,omitempty"` + // Locked: Whether the bucket has been locked. The retention period on a + // locked bucket may not be changed. Locked buckets may only be deleted + // if they are empty. + Locked bool `json:"locked,omitempty"` + // Name: The resource name of the bucket. For example: // "projects/my-project-id/locations/my-location/buckets/my-bucket-id - // The supported locations are: "global" "us-central1"For the location - // of global it is unspecified where logs are actually stored. Once a - // bucket has been created, the location can not be changed. + // The supported locations are: "global"For the location of global it is + // unspecified where logs are actually stored. Once a bucket has been + // created, the location can not be changed. Name string `json:"name,omitempty"` // RetentionDays: Logs will be retained by default for this amount of @@ -1456,18 +1455,14 @@ type LogEntry struct { Labels map[string]string `json:"labels,omitempty"` // LogName: Required. The resource name of the log to which this log - // entry - // belongs: - // "projects/[PROJECT_ID]/logs/[LOG_ID]" - // "organizations/[ORGANIZ - // ATION_ID]/logs/[LOG_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[L - // OG_ID]" - // "folders/[FOLDER_ID]/logs/[LOG_ID]" - // A project number may be used in place of PROJECT_ID. The project - // number is translated to its corresponding PROJECT_ID internally and - // the log_name field will contain PROJECT_ID in queries and - // exports.[LOG_ID] must be URL-encoded within log_name. Example: + // entry belongs: "projects/[PROJECT_ID]/logs/[LOG_ID]" + // "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]" + // "folders/[FOLDER_ID]/logs/[LOG_ID]" A project number may be used in + // place of PROJECT_ID. The project number is translated to its + // corresponding PROJECT_ID internally and the log_name field will + // contain PROJECT_ID in queries and exports.[LOG_ID] must be + // URL-encoded within log_name. Example: // "organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Fa // ctivity". [LOG_ID] must be less than 512 characters long and can only // include the following characters: upper and lower case alphanumeric @@ -1822,9 +1817,9 @@ type LogMetric struct { // Filter: Required. An advanced logs filter // (https://cloud.google.com/logging/docs/view/advanced_filters) which - // is used to match log entries. Example: - // "resource.type=gae_app AND severity>=ERROR" - // The maximum length of the filter is 20000 characters. + // is used to match log entries. Example: "resource.type=gae_app AND + // severity>=ERROR" The maximum length of the filter is 20000 + // characters. Filter string `json:"filter,omitempty"` // LabelExtractors: Optional. A map from a label key string to an @@ -1879,19 +1874,18 @@ type LogMetric struct { // ValueExtractor: Optional. A value_extractor is required when using a // distribution logs-based metric to extract the values to record from a // log entry. Two functions are supported for value extraction: - // EXTRACT(field) or REGEXP_EXTRACT(field, regex). The argument are: 1. + // EXTRACT(field) or REGEXP_EXTRACT(field, regex). The argument are: 1. // field: The name of the log entry field from which the value is to be - // extracted. 2. regex: A regular expression using the Google RE2 - // syntax (https://github.com/google/re2/wiki/Syntax) with a single - // capture group to extract data from the specified log entry field. - // The value of the field is converted to a string before applying the - // regex. It is an error to specify a regex that does not include - // exactly one capture group.The result of the extraction must be - // convertible to a double type, as the distribution always records - // double values. If either the extraction or the conversion to double - // fails, then those values are not recorded in the - // distribution.Example: REGEXP_EXTRACT(jsonPayload.request, - // ".*quantity=(\d+).*") + // extracted. 2. regex: A regular expression using the Google RE2 syntax + // (https://github.com/google/re2/wiki/Syntax) with a single capture + // group to extract data from the specified log entry field. The value + // of the field is converted to a string before applying the regex. It + // is an error to specify a regex that does not include exactly one + // capture group.The result of the extraction must be convertible to a + // double type, as the distribution always records double values. If + // either the extraction or the conversion to double fails, then those + // values are not recorded in the distribution.Example: + // REGEXP_EXTRACT(jsonPayload.request, ".*quantity=(\d+).*") ValueExtractor string `json:"valueExtractor,omitempty"` // Version: Deprecated. The API version that created or updated this @@ -1947,14 +1941,11 @@ type LogSink struct { // of the description is 8000 characters. Description string `json:"description,omitempty"` - // Destination: Required. The export - // destination: + // Destination: Required. The export destination: // "storage.googleapis.com/[GCS_BUCKET]" - // "bigquery.googleapi - // s.com/projects/[PROJECT_ID]/datasets/[DATASET]" - // "pubsub.googleapis.com - // /projects/[PROJECT_ID]/topics/[TOPIC_ID]" - // The sink's writer_identity, set when the sink is created, must have + // "bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]" + // "pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]" The + // sink's writer_identity, set when the sink is created, must have // permission to write to the destination or else the log entries are // not exported. For more information, see Exporting Logs with Sinks // (https://cloud.google.com/logging/docs/api/tasks/exporting-logs). @@ -1964,13 +1955,16 @@ type LogSink struct { // does not export any log entries. Disabled bool `json:"disabled,omitempty"` + // Exclusions: Optional. Log entries that match any of the exclusion + // filters will not be exported. If a log entry is matched by both + // filter and one of exclusion_filters it will not be exported. + Exclusions []*LogExclusion `json:"exclusions,omitempty"` + // Filter: Optional. An advanced logs filter // (https://cloud.google.com/logging/docs/view/advanced-queries). The // only exported log entries are those that are in the resource owning - // the sink and that match the filter. For - // example: + // the sink and that match the filter. For example: // logName="projects/[PROJECT_ID]/logs/[LOG_ID]" AND severity>=ERROR - // Filter string `json:"filter,omitempty"` // IncludeChildren: Optional. This field applies only to sinks owned by @@ -1984,11 +1978,8 @@ type LogSink struct { // resource.type=gce_instance would export all Compute Engine VM // instance log entries from all projects in the sink's parent. To only // export entries from certain child projects, filter on the project - // part of the log name: - // logName:("projects/test-project1/" OR "projects/test-project2/") - // AND - // resource.type=gce_instance - // + // part of the log name: logName:("projects/test-project1/" OR + // "projects/test-project2/") AND resource.type=gce_instance IncludeChildren bool `json:"includeChildren,omitempty"` // Name: Required. The client-assigned sink identifier, unique within @@ -2012,9 +2003,9 @@ type LogSink struct { // field may not be present for older sinks. UpdateTime string `json:"updateTime,omitempty"` - // WriterIdentity: Output only. An IAM identity—a service account - // or group—under which Logging writes the exported log entries to - // the sink's destination. This field is set by sinks.create and + // WriterIdentity: Output only. An IAM identity—a service account or + // group—under which Logging writes the exported log entries to the + // sink's destination. This field is set by sinks.create and // sinks.update based on the value of unique_writer_identity in those // methods.Until you grant this identity write-access to the // destination, log entry exports from this sink will fail. For more @@ -2055,7 +2046,14 @@ func (s *LogSink) MarshalJSON() ([]byte, error) { // MetricDescriptor: Defines a metric type and its schema. Once a metric // descriptor is created, deleting or altering it stops data collection -// and makes the metric type's existing data unusable. +// and makes the metric type's existing data unusable.The following are +// specific rules for service defined Monitoring metric descriptors: +// type, metric_kind, value_type and description fields are all +// required. The unit field must be specified if the value_type is any +// of DOUBLE, INT64, DISTRIBUTION. Maximum of default 500 metric +// descriptors per service is allowed. Maximum of default 10 labels per +// metric descriptor is allowed.The default maximum limit can be +// overridden. Please follow https://cloud.google.com/monitoring/quotas type MetricDescriptor struct { // Description: A detailed description of the metric, which can be used // in documentation. @@ -2069,7 +2067,10 @@ type MetricDescriptor struct { DisplayName string `json:"displayName,omitempty"` // Labels: The set of labels that can be used to describe a specific - // instance of this metric type. For example, the + // instance of this metric type.The label key name must follow: Only + // upper and lower-case letters, digits and underscores (_) are allowed. + // Label name must start with a letter or digit. The maximum length of a + // label name is 100 characters.For example, the // appengine.googleapis.com/http/server/response_latencies metric type // has a label for the HTTP response code, response_code, so you can // look at latencies for successful responses or just for responses that @@ -2142,16 +2143,18 @@ type MetricDescriptor struct { Name string `json:"name,omitempty"` // Type: The metric type, including its DNS name prefix. The type is not - // URL-encoded. All user-defined metric types have the DNS name - // custom.googleapis.com or external.googleapis.com. Metric types should - // use a natural hierarchical grouping. For - // example: + // URL-encoded.All service defined metrics must be prefixed with the + // service name, in the format of {service name}/{relative metric name}, + // such as cloudsql.googleapis.com/database/cpu/utilization. The + // relative metric name must follow: Only upper and lower-case letters, + // digits, '/' and underscores '_' are allowed. The maximum number of + // characters allowed for the relative_metric_name is 100.All + // user-defined metric types have the DNS name custom.googleapis.com, + // external.googleapis.com, or logging.googleapis.com/user/.Metric types + // should use a natural hierarchical grouping. For example: // "custom.googleapis.com/invoice/paid/amount" - // "external.googlea - // pis.com/prometheus/up" - // "appengine.googleapis.com/http/server/response_ - // latencies" - // + // "external.googleapis.com/prometheus/up" + // "appengine.googleapis.com/http/server/response_latencies" Type string `json:"type,omitempty"` // Unit: The units in which the metric value is reported. It is only @@ -2171,70 +2174,38 @@ type MetricDescriptor struct { // 12005/1000), or use Kis{CPU} and write 11.723 (which is // 12005/1024).The supported units are a subset of The Unified Code for // Units of Measure (http://unitsofmeasure.org/ucum.html) standard:Basic - // units (UNIT) - // bit bit - // By byte - // s second - // min minute - // h hour - // d dayPrefixes (PREFIX) - // k kilo (10^3) - // M mega (10^6) - // G giga (10^9) - // T tera (10^12) - // P peta (10^15) - // E exa (10^18) - // Z zetta (10^21) - // Y yotta (10^24) - // m milli (10^-3) - // u micro (10^-6) - // n nano (10^-9) - // p pico (10^-12) - // f femto (10^-15) - // a atto (10^-18) - // z zepto (10^-21) - // y yocto (10^-24) - // Ki kibi (2^10) - // Mi mebi (2^20) - // Gi gibi (2^30) - // Ti tebi (2^40) - // Pi pebi (2^50)GrammarThe grammar also includes these connectors: - // / division or ratio (as an infix operator). For examples, - // kBy/{email} or MiBy/10ms (although you should almost never have /s - // in a metric unit; rates should always be computed at query time from - // the underlying cumulative or delta value). - // . multiplication or composition (as an infix operator). For - // examples, GBy.d or k{watt}.h.The grammar for a unit is as - // follows: - // Expression = Component { "." Component } { "/" Component } - // ; - // - // Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] - // | Annotation - // | "1" - // ; - // - // Annotation = "{" NAME "}" ; - // Notes: - // Annotation is just a comment if it follows a UNIT. If the annotation - // is used alone, then the unit is equivalent to 1. For examples, - // {request}/s == 1/s, By{transmitted}/s == By/s. - // NAME is a sequence of non-blank printable ASCII characters not - // containing { or }. - // 1 represents a unitary dimensionless unit - // (https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as + // units (UNIT) bit bit By byte s second min minute h hour d day 1 + // dimensionlessPrefixes (PREFIX) k kilo (10^3) M mega (10^6) G giga + // (10^9) T tera (10^12) P peta (10^15) E exa (10^18) Z zetta (10^21) Y + // yotta (10^24) m milli (10^-3) u micro (10^-6) n nano (10^-9) p pico + // (10^-12) f femto (10^-15) a atto (10^-18) z zepto (10^-21) y yocto + // (10^-24) Ki kibi (2^10) Mi mebi (2^20) Gi gibi (2^30) Ti tebi (2^40) + // Pi pebi (2^50)GrammarThe grammar also includes these connectors: / + // division or ratio (as an infix operator). For examples, kBy/{email} + // or MiBy/10ms (although you should almost never have /s in a metric + // unit; rates should always be computed at query time from the + // underlying cumulative or delta value). . multiplication or + // composition (as an infix operator). For examples, GBy.d or + // k{watt}.h.The grammar for a unit is as follows: Expression = + // Component { "." Component } { "/" Component } ; Component = ( [ + // PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; Annotation + // = "{" NAME "}" ; Notes: Annotation is just a comment if it follows a + // UNIT. If the annotation is used alone, then the unit is equivalent to + // 1. For examples, {request}/s == 1/s, By{transmitted}/s == By/s. NAME + // is a sequence of non-blank printable ASCII characters not containing + // { or }. 1 represents a unitary dimensionless unit + // (https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as // in 1/s. It is typically used when none of the basic units are // appropriate. For example, "new users per day" can be represented as - // 1/d or {new-users}/d (and a metric value 5 would mean "5 new users). - // Alternatively, "thousands of page views per day" would be - // represented as 1000/d or k1/d or k{page_views}/d (and a metric value - // of 5.3 would mean "5300 page views per day"). - // % represents dimensionless value of 1/100, and annotates values - // giving a percentage (so the metric values are typically in the range - // of 0..100, and a metric value 3 means "3 percent"). - // 10^2.% indicates a metric contains a ratio, typically in the range - // 0..1, that will be multiplied by 100 and displayed as a percentage - // (so a metric value 0.03 means "3 percent"). + // 1/d or {new-users}/d (and a metric value 5 would mean "5 new users). + // Alternatively, "thousands of page views per day" would be represented + // as 1000/d or k1/d or k{page_views}/d (and a metric value of 5.3 would + // mean "5300 page views per day"). % represents dimensionless value of + // 1/100, and annotates values giving a percentage (so the metric values + // are typically in the range of 0..100, and a metric value 3 means "3 + // percent"). 10^2.% indicates a metric contains a ratio, typically in + // the range 0..1, that will be multiplied by 100 and displayed as a + // percentage (so a metric value 0.03 means "3 percent"). Unit string `json:"unit,omitempty"` // ValueType: Whether the measurement is an integer, a floating-point @@ -2360,11 +2331,8 @@ func (s *MetricDescriptorMetadata) MarshalJSON() ([]byte, error) { // schema. For example, a particular Compute Engine VM instance could be // represented by the following object, because the // MonitoredResourceDescriptor for "gce_instance" has labels -// "instance_id" and "zone": -// { "type": "gce_instance", -// "labels": { "instance_id": "12345678901234", -// "zone": "us-central1-a" }} -// +// "instance_id" and "zone": { "type": "gce_instance", "labels": { +// "instance_id": "12345678901234", "zone": "us-central1-a" }} type MonitoredResource struct { // Labels: Required. Values for all of the labels listed in the // associated monitored resource descriptor. For example, Compute Engine @@ -2404,9 +2372,17 @@ func (s *MonitoredResource) MarshalJSON() ([]byte, error) { // example, the monitored resource descriptor for Google Compute Engine // VM instances has a type of "gce_instance" and specifies the use of // the labels "instance_id" and "zone" to identify particular VM -// instances.Different APIs can support different monitored resource -// types. APIs generally provide a list method that returns the -// monitored resource descriptors used by the API. +// instances.Different services can support different monitored resource +// types.The following are specific rules to service defined monitored +// resources for Monitoring and Logging: The type, display_name, +// description, labels and launch_stage fields are all required. The +// first label of the monitored resource descriptor must be +// resource_container. There are legacy monitored resource descritptors +// start with project_id. It must include a location label. Maximum of +// default 5 service defined monitored resource descriptors is allowed +// per service. Maximum of default 10 labels per monitored resource is +// allowed.The default maximum limit can be overridden. Please follow +// https://cloud.google.com/monitoring/quotas type MonitoredResourceDescriptor struct { // Description: Optional. A detailed description of the monitored // resource type that might be used in documentation. @@ -2419,9 +2395,12 @@ type MonitoredResourceDescriptor struct { DisplayName string `json:"displayName,omitempty"` // Labels: Required. A set of labels used to describe instances of this - // monitored resource type. For example, an individual Google Cloud SQL - // database is identified by values for the labels "database_id" and - // "zone". + // monitored resource type. The label key name must follow: Only upper + // and lower-case letters, digits and underscores (_) are allowed. Label + // name must start with a letter or digit. The maximum length of a label + // name is 100 characters.For example, an individual Google Cloud SQL + // database is identified by values for the labels database_id and + // location. Labels []*LabelDescriptor `json:"labels,omitempty"` // LaunchStage: Optional. The launch stage of the monitored resource @@ -2472,8 +2451,15 @@ type MonitoredResourceDescriptor struct { Name string `json:"name,omitempty"` // Type: Required. The monitored resource type. For example, the type - // "cloudsql_database" represents databases in Google Cloud SQL. The - // maximum length of this value is 256 characters. + // cloudsql_database represents databases in Google Cloud SQL.All + // service defined monitored resource types must be prefixed with the + // service name, in the format of {service name}/{relative resource + // name}. The relative resource name must follow: Only upper and + // lower-case letters and digits are allowed. It must start with upper + // case character and is recommended to use Upper Camel Case style. The + // maximum number of characters allowed for the relative_resource_name + // is 100.Note there are legacy service monitored resources not + // following this rule. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -2510,11 +2496,8 @@ type MonitoredResourceMetadata struct { // labels. System labels are a kind of metadata extracted by Google, // including "machine_image", "vpc", "subnet_id", "security_group", // "name", etc. System label values can be only strings, Boolean values, - // or a list of strings. For example: - // { "name": "my-test-instance", - // "security_group": ["a", "b", "c"], - // "spot_instance": false } - // + // or a list of strings. For example: { "name": "my-test-instance", + // "security_group": ["a", "b", "c"], "spot_instance": false } SystemLabels googleapi.RawMessage `json:"systemLabels,omitempty"` // UserLabels: Output only. A map of user-defined metadata labels. @@ -2782,6 +2765,10 @@ func (s *SourceReference) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// UndeleteBucketRequest: The parameters to UndeleteBucket. +type UndeleteBucketRequest struct { +} + // WriteLogEntriesRequest: The parameters to WriteLogEntries. type WriteLogEntriesRequest struct { // DryRun: Optional. If true, the request should expect normal response, @@ -2820,23 +2807,17 @@ type WriteLogEntriesRequest struct { Labels map[string]string `json:"labels,omitempty"` // LogName: Optional. A default log resource name that is assigned to - // all log entries in entries that do not specify a value for - // log_name: + // all log entries in entries that do not specify a value for log_name: // "projects/[PROJECT_ID]/logs/[LOG_ID]" - // "organizations/[ORGANI - // ZATION_ID]/logs/[LOG_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[ - // LOG_ID]" - // "folders/[FOLDER_ID]/logs/[LOG_ID]" - // [LOG_ID] must be URL-encoded. For - // example: - // "projects/my-project-id/logs/syslog" - // "organizations/123456789 - // 0/logs/cloudresourcemanager.googleapis.com%2Factivity" - // The permission logging.logEntries.create is needed on each project, - // organization, billing account, or folder that is receiving new log - // entries, whether the resource is specified in logName or in an - // individual log entry. + // "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]" + // "folders/[FOLDER_ID]/logs/[LOG_ID]" [LOG_ID] must be URL-encoded. For + // example: "projects/my-project-id/logs/syslog" + // "organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Fa + // ctivity" The permission logging.logEntries.create is needed on each + // project, organization, billing account, or folder that is receiving + // new log entries, whether the resource is specified in logName or in + // an individual log entry. LogName string `json:"logName,omitempty"` // PartialSuccess: Optional. Whether valid entries should be written @@ -2849,12 +2830,9 @@ type WriteLogEntriesRequest struct { // Resource: Optional. A default monitored resource object that is // assigned to all log entries in entries that do not specify a value - // for resource. Example: - // { "type": "gce_instance", - // "labels": { - // "zone": "us-central1-a", "instance_id": "00000000000000000000" - // }} - // See LogEntry. + // for resource. Example: { "type": "gce_instance", "labels": { "zone": + // "us-central1-a", "instance_id": "00000000000000000000" }} See + // LogEntry. Resource *MonitoredResource `json:"resource,omitempty"` // ForceSendFields is a list of field names (e.g. "DryRun") to @@ -2942,7 +2920,7 @@ func (c *BillingAccountsBucketsGetCall) Header() http.Header { func (c *BillingAccountsBucketsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3013,7 +2991,7 @@ func (c *BillingAccountsBucketsGetCall) Do(opts ...googleapi.CallOption) (*LogBu // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the bucket:\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\nExample: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", + // "description": "Required. The resource name of the bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", // "location": "path", // "pattern": "^billingAccounts/[^/]+/buckets/[^/]+$", // "required": true, @@ -3082,7 +3060,7 @@ func (c *BillingAccountsExclusionsCreateCall) Header() http.Header { func (c *BillingAccountsExclusionsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3155,7 +3133,7 @@ func (c *BillingAccountsExclusionsCreateCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "parent": { - // "description": "Required. The parent resource in which to create the exclusion:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + // "description": "Required. The parent resource in which to create the exclusion: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" Examples: \"projects/my-logging-project\", \"organizations/123456789\".", // "location": "path", // "pattern": "^billingAccounts/[^/]+$", // "required": true, @@ -3221,7 +3199,7 @@ func (c *BillingAccountsExclusionsDeleteCall) Header() http.Header { func (c *BillingAccountsExclusionsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3289,7 +3267,7 @@ func (c *BillingAccountsExclusionsDeleteCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "name": { - // "description": "Required. The resource name of an existing exclusion to delete:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + // "description": "Required. The resource name of an existing exclusion to delete: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", // "location": "path", // "pattern": "^billingAccounts/[^/]+/exclusions/[^/]+$", // "required": true, @@ -3363,7 +3341,7 @@ func (c *BillingAccountsExclusionsGetCall) Header() http.Header { func (c *BillingAccountsExclusionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3434,7 +3412,7 @@ func (c *BillingAccountsExclusionsGetCall) Do(opts ...googleapi.CallOption) (*Lo // ], // "parameters": { // "name": { - // "description": "Required. The resource name of an existing exclusion:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + // "description": "Required. The resource name of an existing exclusion: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", // "location": "path", // "pattern": "^billingAccounts/[^/]+/exclusions/[^/]+$", // "required": true, @@ -3529,7 +3507,7 @@ func (c *BillingAccountsExclusionsListCall) Header() http.Header { func (c *BillingAccountsExclusionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3611,7 +3589,7 @@ func (c *BillingAccountsExclusionsListCall) Do(opts ...googleapi.CallOption) (*L // "type": "string" // }, // "parent": { - // "description": "Required. The parent resource whose exclusions are to be listed.\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + // "description": "Required. The parent resource whose exclusions are to be listed. \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", // "location": "path", // "pattern": "^billingAccounts/[^/]+$", // "required": true, @@ -3711,7 +3689,7 @@ func (c *BillingAccountsExclusionsPatchCall) Header() http.Header { func (c *BillingAccountsExclusionsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3784,7 +3762,7 @@ func (c *BillingAccountsExclusionsPatchCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the exclusion to update:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + // "description": "Required. The resource name of the exclusion to update: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", // "location": "path", // "pattern": "^billingAccounts/[^/]+/exclusions/[^/]+$", // "required": true, @@ -3812,94 +3790,78 @@ func (c *BillingAccountsExclusionsPatchCall) Do(opts ...googleapi.CallOption) (* } -// method id "logging.billingAccounts.locations.buckets.list": +// method id "logging.billingAccounts.locations.buckets.create": -type BillingAccountsLocationsBucketsListCall struct { - s *Service - parent string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type BillingAccountsLocationsBucketsCreateCall struct { + s *Service + parent string + logbucket *LogBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Lists buckets (Beta). -func (r *BillingAccountsLocationsBucketsService) List(parent string) *BillingAccountsLocationsBucketsListCall { - c := &BillingAccountsLocationsBucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Create: Creates a bucket that can be used to store log entries. Once +// a bucket has been created, the region cannot be changed. +func (r *BillingAccountsLocationsBucketsService) Create(parent string, logbucket *LogBucket) *BillingAccountsLocationsBucketsCreateCall { + c := &BillingAccountsLocationsBucketsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent + c.logbucket = logbucket return c } -// PageSize sets the optional parameter "pageSize": The maximum number -// of results to return from this request. Non-positive values are -// ignored. The presence of nextPageToken in the response indicates that -// more results might be available. -func (c *BillingAccountsLocationsBucketsListCall) PageSize(pageSize int64) *BillingAccountsLocationsBucketsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": If present, then -// retrieve the next batch of results from the preceding call to this -// method. pageToken must be the value of nextPageToken from the -// previous response. The values of other method parameters should be -// identical to those in the previous call. -func (c *BillingAccountsLocationsBucketsListCall) PageToken(pageToken string) *BillingAccountsLocationsBucketsListCall { - c.urlParams_.Set("pageToken", pageToken) +// BucketId sets the optional parameter "bucketId": Required. A +// client-assigned identifier such as "my-bucket". Identifiers are +// limited to 100 characters and can include only letters, digits, +// underscores, hyphens, and periods. +func (c *BillingAccountsLocationsBucketsCreateCall) BucketId(bucketId string) *BillingAccountsLocationsBucketsCreateCall { + c.urlParams_.Set("bucketId", bucketId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BillingAccountsLocationsBucketsListCall) Fields(s ...googleapi.Field) *BillingAccountsLocationsBucketsListCall { +func (c *BillingAccountsLocationsBucketsCreateCall) Fields(s ...googleapi.Field) *BillingAccountsLocationsBucketsCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BillingAccountsLocationsBucketsListCall) IfNoneMatch(entityTag string) *BillingAccountsLocationsBucketsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BillingAccountsLocationsBucketsListCall) Context(ctx context.Context) *BillingAccountsLocationsBucketsListCall { +func (c *BillingAccountsLocationsBucketsCreateCall) Context(ctx context.Context) *BillingAccountsLocationsBucketsCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BillingAccountsLocationsBucketsListCall) Header() http.Header { +func (c *BillingAccountsLocationsBucketsCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BillingAccountsLocationsBucketsListCall) doRequest(alt string) (*http.Response, error) { +func (c *BillingAccountsLocationsBucketsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logbucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/buckets") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -3910,14 +3872,14 @@ func (c *BillingAccountsLocationsBucketsListCall) doRequest(alt string) (*http.R return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.billingAccounts.locations.buckets.list" call. -// Exactly one of *ListBucketsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListBucketsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BillingAccountsLocationsBucketsListCall) Do(opts ...googleapi.CallOption) (*ListBucketsResponse, error) { +// Do executes the "logging.billingAccounts.locations.buckets.create" call. +// Exactly one of *LogBucket or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *LogBucket.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BillingAccountsLocationsBucketsCreateCall) Do(opts ...googleapi.CallOption) (*LogBucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -3936,7 +3898,7 @@ func (c *BillingAccountsLocationsBucketsListCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ListBucketsResponse{ + ret := &LogBucket{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -3948,27 +3910,21 @@ func (c *BillingAccountsLocationsBucketsListCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Lists buckets (Beta).", + // "description": "Creates a bucket that can be used to store log entries. Once a bucket has been created, the region cannot be changed.", // "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets", - // "httpMethod": "GET", - // "id": "logging.billingAccounts.locations.buckets.list", + // "httpMethod": "POST", + // "id": "logging.billingAccounts.locations.buckets.create", // "parameterOrder": [ // "parent" // ], // "parameters": { - // "pageSize": { - // "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + // "bucketId": { + // "description": "Required. A client-assigned identifier such as \"my-bucket\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The parent resource whose buckets are to be listed:\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]\"\nNote: The locations portion of the resource must be specified, but supplying the character - in place of LOCATION_ID will return all buckets.", + // "description": "Required. The resource in which to create the bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" Example: \"projects/my-logging-project/locations/global\"", // "location": "path", // "pattern": "^billingAccounts/[^/]+/locations/[^/]+$", // "required": true, @@ -3976,81 +3932,43 @@ func (c *BillingAccountsLocationsBucketsListCall) Do(opts ...googleapi.CallOptio // } // }, // "path": "v2/{+parent}/buckets", + // "request": { + // "$ref": "LogBucket" + // }, // "response": { - // "$ref": "ListBucketsResponse" + // "$ref": "LogBucket" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.read" + // "https://www.googleapis.com/auth/logging.admin" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *BillingAccountsLocationsBucketsListCall) Pages(ctx context.Context, f func(*ListBucketsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "logging.billingAccounts.locations.buckets.patch": +// method id "logging.billingAccounts.locations.buckets.delete": -type BillingAccountsLocationsBucketsPatchCall struct { +type BillingAccountsLocationsBucketsDeleteCall struct { s *Service name string - logbucket *LogBucket urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Patch: Updates a bucket. This method replaces the following fields in -// the existing bucket with values from the new bucket: -// retention_periodIf the retention period is decreased and the bucket -// is locked, FAILED_PRECONDITION will be returned.If the bucket has a -// LifecycleState of DELETE_REQUESTED, FAILED_PRECONDITION will be -// returned.A buckets region may not be modified after it is created. -// This method is in Beta. -func (r *BillingAccountsLocationsBucketsService) Patch(name string, logbucket *LogBucket) *BillingAccountsLocationsBucketsPatchCall { - c := &BillingAccountsLocationsBucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes a bucket. Moves the bucket to the DELETE_REQUESTED +// state. After 7 days, the bucket will be purged and all logs in the +// bucket will be permanently deleted. +func (r *BillingAccountsLocationsBucketsService) Delete(name string) *BillingAccountsLocationsBucketsDeleteCall { + c := &BillingAccountsLocationsBucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name - c.logbucket = logbucket - return c -} - -// UpdateMask sets the optional parameter "updateMask": Required. Field -// mask that specifies the fields in bucket that need an update. A -// bucket field will be overwritten if, and only if, it is in the update -// mask. name and output only fields cannot be updated.For a detailed -// FieldMask definition, see -// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: -// updateMask=retention_days. -func (c *BillingAccountsLocationsBucketsPatchCall) UpdateMask(updateMask string) *BillingAccountsLocationsBucketsPatchCall { - c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BillingAccountsLocationsBucketsPatchCall) Fields(s ...googleapi.Field) *BillingAccountsLocationsBucketsPatchCall { +func (c *BillingAccountsLocationsBucketsDeleteCall) Fields(s ...googleapi.Field) *BillingAccountsLocationsBucketsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -4058,38 +3976,33 @@ func (c *BillingAccountsLocationsBucketsPatchCall) Fields(s ...googleapi.Field) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BillingAccountsLocationsBucketsPatchCall) Context(ctx context.Context) *BillingAccountsLocationsBucketsPatchCall { +func (c *BillingAccountsLocationsBucketsDeleteCall) Context(ctx context.Context) *BillingAccountsLocationsBucketsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BillingAccountsLocationsBucketsPatchCall) Header() http.Header { +func (c *BillingAccountsLocationsBucketsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BillingAccountsLocationsBucketsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *BillingAccountsLocationsBucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.logbucket) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } @@ -4100,14 +4013,14 @@ func (c *BillingAccountsLocationsBucketsPatchCall) doRequest(alt string) (*http. return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.billingAccounts.locations.buckets.patch" call. -// Exactly one of *LogBucket or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *LogBucket.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BillingAccountsLocationsBucketsPatchCall) Do(opts ...googleapi.CallOption) (*LogBucket, error) { +// Do executes the "logging.billingAccounts.locations.buckets.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BillingAccountsLocationsBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -4126,7 +4039,7 @@ func (c *BillingAccountsLocationsBucketsPatchCall) Do(opts ...googleapi.CallOpti if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LogBucket{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -4138,34 +4051,25 @@ func (c *BillingAccountsLocationsBucketsPatchCall) Do(opts ...googleapi.CallOpti } return ret, nil // { - // "description": "Updates a bucket. This method replaces the following fields in the existing bucket with values from the new bucket: retention_periodIf the retention period is decreased and the bucket is locked, FAILED_PRECONDITION will be returned.If the bucket has a LifecycleState of DELETE_REQUESTED, FAILED_PRECONDITION will be returned.A buckets region may not be modified after it is created. This method is in Beta.", + // "description": "Deletes a bucket. Moves the bucket to the DELETE_REQUESTED state. After 7 days, the bucket will be purged and all logs in the bucket will be permanently deleted.", // "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets/{bucketsId}", - // "httpMethod": "PATCH", - // "id": "logging.billingAccounts.locations.buckets.patch", + // "httpMethod": "DELETE", + // "id": "logging.billingAccounts.locations.buckets.delete", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The full resource name of the bucket to update.\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\nExample: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\". Also requires permission \"resourcemanager.projects.updateLiens\" to set the locked property", + // "description": "Required. The full resource name of the bucket to delete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", // "location": "path", // "pattern": "^billingAccounts/[^/]+/locations/[^/]+/buckets/[^/]+$", // "required": true, // "type": "string" - // }, - // "updateMask": { - // "description": "Required. Field mask that specifies the fields in bucket that need an update. A bucket field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=retention_days.", - // "format": "google-fieldmask", - // "location": "query", - // "type": "string" // } // }, // "path": "v2/{+name}", - // "request": { - // "$ref": "LogBucket" - // }, // "response": { - // "$ref": "LogBucket" + // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -4175,216 +4079,81 @@ func (c *BillingAccountsLocationsBucketsPatchCall) Do(opts ...googleapi.CallOpti } -// method id "logging.billingAccounts.logs.delete": +// method id "logging.billingAccounts.locations.buckets.list": -type BillingAccountsLogsDeleteCall struct { - s *Service - logName string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BillingAccountsLocationsBucketsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes all the log entries in a log. The log reappears if it -// receives new entries. Log entries written shortly before the delete -// operation might not be deleted. Entries received after the delete -// operation with a timestamp before the operation will be deleted. -func (r *BillingAccountsLogsService) Delete(logName string) *BillingAccountsLogsDeleteCall { - c := &BillingAccountsLogsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.logName = logName +// List: Lists buckets (Beta). +func (r *BillingAccountsLocationsBucketsService) List(parent string) *BillingAccountsLocationsBucketsListCall { + c := &BillingAccountsLocationsBucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of results to return from this request. Non-positive values are +// ignored. The presence of nextPageToken in the response indicates that +// more results might be available. +func (c *BillingAccountsLocationsBucketsListCall) PageSize(pageSize int64) *BillingAccountsLocationsBucketsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If present, then +// retrieve the next batch of results from the preceding call to this +// method. pageToken must be the value of nextPageToken from the +// previous response. The values of other method parameters should be +// identical to those in the previous call. +func (c *BillingAccountsLocationsBucketsListCall) PageToken(pageToken string) *BillingAccountsLocationsBucketsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BillingAccountsLogsDeleteCall) Fields(s ...googleapi.Field) *BillingAccountsLogsDeleteCall { +func (c *BillingAccountsLocationsBucketsListCall) Fields(s ...googleapi.Field) *BillingAccountsLocationsBucketsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BillingAccountsLocationsBucketsListCall) IfNoneMatch(entityTag string) *BillingAccountsLocationsBucketsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BillingAccountsLogsDeleteCall) Context(ctx context.Context) *BillingAccountsLogsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BillingAccountsLogsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BillingAccountsLogsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+logName}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "logName": c.logName, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "logging.billingAccounts.logs.delete" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BillingAccountsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Empty{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.", - // "flatPath": "v2/billingAccounts/{billingAccountsId}/logs/{logsId}", - // "httpMethod": "DELETE", - // "id": "logging.billingAccounts.logs.delete", - // "parameterOrder": [ - // "logName" - // ], - // "parameters": { - // "logName": { - // "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", - // "location": "path", - // "pattern": "^billingAccounts/[^/]+/logs/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v2/{+logName}", - // "response": { - // "$ref": "Empty" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/logging.admin" - // ] - // } - -} - -// method id "logging.billingAccounts.logs.list": - -type BillingAccountsLogsListCall struct { - s *Service - parent string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists the logs in projects, organizations, folders, or billing -// accounts. Only logs that have entries are listed. -func (r *BillingAccountsLogsService) List(parent string) *BillingAccountsLogsListCall { - c := &BillingAccountsLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} - -// PageSize sets the optional parameter "pageSize": The maximum number -// of results to return from this request. Non-positive values are -// ignored. The presence of nextPageToken in the response indicates that -// more results might be available. -func (c *BillingAccountsLogsListCall) PageSize(pageSize int64) *BillingAccountsLogsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": If present, then -// retrieve the next batch of results from the preceding call to this -// method. pageToken must be the value of nextPageToken from the -// previous response. The values of other method parameters should be -// identical to those in the previous call. -func (c *BillingAccountsLogsListCall) PageToken(pageToken string) *BillingAccountsLogsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BillingAccountsLogsListCall) Fields(s ...googleapi.Field) *BillingAccountsLogsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BillingAccountsLogsListCall) IfNoneMatch(entityTag string) *BillingAccountsLogsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BillingAccountsLogsListCall) Context(ctx context.Context) *BillingAccountsLogsListCall { +func (c *BillingAccountsLocationsBucketsListCall) Context(ctx context.Context) *BillingAccountsLocationsBucketsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BillingAccountsLogsListCall) Header() http.Header { +func (c *BillingAccountsLocationsBucketsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BillingAccountsLogsListCall) doRequest(alt string) (*http.Response, error) { +func (c *BillingAccountsLocationsBucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4395,7 +4164,7 @@ func (c *BillingAccountsLogsListCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/logs") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/buckets") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -4408,14 +4177,14 @@ func (c *BillingAccountsLogsListCall) doRequest(alt string) (*http.Response, err return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.billingAccounts.logs.list" call. -// Exactly one of *ListLogsResponse or error will be non-nil. Any +// Do executes the "logging.billingAccounts.locations.buckets.list" call. +// Exactly one of *ListBucketsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *ListLogsResponse.ServerResponse.Header or (if a response was +// *ListBucketsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BillingAccountsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsResponse, error) { +func (c *BillingAccountsLocationsBucketsListCall) Do(opts ...googleapi.CallOption) (*ListBucketsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -4434,7 +4203,7 @@ func (c *BillingAccountsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLog if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ListLogsResponse{ + ret := &ListBucketsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -4446,10 +4215,10 @@ func (c *BillingAccountsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLog } return ret, nil // { - // "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", - // "flatPath": "v2/billingAccounts/{billingAccountsId}/logs", + // "description": "Lists buckets (Beta).", + // "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets", // "httpMethod": "GET", - // "id": "logging.billingAccounts.logs.list", + // "id": "logging.billingAccounts.locations.buckets.list", // "parameterOrder": [ // "parent" // ], @@ -4466,16 +4235,16 @@ func (c *BillingAccountsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLog // "type": "string" // }, // "parent": { - // "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + // "description": "Required. The parent resource whose buckets are to be listed: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]\" Note: The locations portion of the resource must be specified, but supplying the character - in place of LOCATION_ID will return all buckets.", // "location": "path", - // "pattern": "^billingAccounts/[^/]+$", + // "pattern": "^billingAccounts/[^/]+/locations/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v2/{+parent}/logs", + // "path": "v2/{+parent}/buckets", // "response": { - // "$ref": "ListLogsResponse" + // "$ref": "ListBucketsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -4490,7 +4259,7 @@ func (c *BillingAccountsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLog // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *BillingAccountsLogsListCall) Pages(ctx context.Context, f func(*ListLogsResponse) error) error { +func (c *BillingAccountsLocationsBucketsListCall) Pages(ctx context.Context, f func(*ListBucketsResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -4508,49 +4277,47 @@ func (c *BillingAccountsLogsListCall) Pages(ctx context.Context, f func(*ListLog } } -// method id "logging.billingAccounts.sinks.create": +// method id "logging.billingAccounts.locations.buckets.patch": -type BillingAccountsSinksCreateCall struct { +type BillingAccountsLocationsBucketsPatchCall struct { s *Service - parent string - logsink *LogSink + name string + logbucket *LogBucket urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Create: Creates a sink that exports specified log entries to a -// destination. The export of newly-ingested log entries begins -// immediately, unless the sink's writer_identity is not permitted to -// write to the destination. A sink can export log entries only from the -// resource owning the sink. -func (r *BillingAccountsSinksService) Create(parent string, logsink *LogSink) *BillingAccountsSinksCreateCall { - c := &BillingAccountsSinksCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.logsink = logsink +// Patch: Updates a bucket. This method replaces the following fields in +// the existing bucket with values from the new bucket: +// retention_periodIf the retention period is decreased and the bucket +// is locked, FAILED_PRECONDITION will be returned.If the bucket has a +// LifecycleState of DELETE_REQUESTED, FAILED_PRECONDITION will be +// returned.A buckets region may not be modified after it is created. +// This method is in Beta. +func (r *BillingAccountsLocationsBucketsService) Patch(name string, logbucket *LogBucket) *BillingAccountsLocationsBucketsPatchCall { + c := &BillingAccountsLocationsBucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.logbucket = logbucket return c } -// UniqueWriterIdentity sets the optional parameter -// "uniqueWriterIdentity": Determines the kind of IAM identity returned -// as writer_identity in the new sink. If this value is omitted or set -// to false, and if the sink's parent is a project, then the value -// returned as writer_identity is the same group or service account used -// by Logging before the addition of writer identities to this API. The -// sink's destination must be in the same project as the sink itself.If -// this field is set to true, or if the sink is owned by a non-project -// resource such as an organization, then the value of writer_identity -// will be a unique service account used only for exports from the new -// sink. For more information, see writer_identity in LogSink. -func (c *BillingAccountsSinksCreateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *BillingAccountsSinksCreateCall { - c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) +// UpdateMask sets the optional parameter "updateMask": Required. Field +// mask that specifies the fields in bucket that need an update. A +// bucket field will be overwritten if, and only if, it is in the update +// mask. name and output only fields cannot be updated.For a detailed +// FieldMask definition, see +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: +// updateMask=retention_days. +func (c *BillingAccountsLocationsBucketsPatchCall) UpdateMask(updateMask string) *BillingAccountsLocationsBucketsPatchCall { + c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BillingAccountsSinksCreateCall) Fields(s ...googleapi.Field) *BillingAccountsSinksCreateCall { +func (c *BillingAccountsLocationsBucketsPatchCall) Fields(s ...googleapi.Field) *BillingAccountsLocationsBucketsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -4558,56 +4325,56 @@ func (c *BillingAccountsSinksCreateCall) Fields(s ...googleapi.Field) *BillingAc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BillingAccountsSinksCreateCall) Context(ctx context.Context) *BillingAccountsSinksCreateCall { +func (c *BillingAccountsLocationsBucketsPatchCall) Context(ctx context.Context) *BillingAccountsLocationsBucketsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BillingAccountsSinksCreateCall) Header() http.Header { +func (c *BillingAccountsLocationsBucketsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BillingAccountsSinksCreateCall) doRequest(alt string) (*http.Response, error) { +func (c *BillingAccountsLocationsBucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logbucket) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/sinks") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.billingAccounts.sinks.create" call. -// Exactly one of *LogSink or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *LogSink.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BillingAccountsSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { +// Do executes the "logging.billingAccounts.locations.buckets.patch" call. +// Exactly one of *LogBucket or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *LogBucket.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BillingAccountsLocationsBucketsPatchCall) Do(opts ...googleapi.CallOption) (*LogBucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -4626,7 +4393,7 @@ func (c *BillingAccountsSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogS if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LogSink{ + ret := &LogBucket{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -4638,33 +4405,34 @@ func (c *BillingAccountsSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogS } return ret, nil // { - // "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.", - // "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks", - // "httpMethod": "POST", - // "id": "logging.billingAccounts.sinks.create", + // "description": "Updates a bucket. This method replaces the following fields in the existing bucket with values from the new bucket: retention_periodIf the retention period is decreased and the bucket is locked, FAILED_PRECONDITION will be returned.If the bucket has a LifecycleState of DELETE_REQUESTED, FAILED_PRECONDITION will be returned.A buckets region may not be modified after it is created. This method is in Beta.", + // "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets/{bucketsId}", + // "httpMethod": "PATCH", + // "id": "logging.billingAccounts.locations.buckets.patch", // "parameterOrder": [ - // "parent" + // "name" // ], // "parameters": { - // "parent": { - // "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + // "name": { + // "description": "Required. The full resource name of the bucket to update. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\". Also requires permission \"resourcemanager.projects.updateLiens\" to set the locked property", // "location": "path", - // "pattern": "^billingAccounts/[^/]+$", + // "pattern": "^billingAccounts/[^/]+/locations/[^/]+/buckets/[^/]+$", // "required": true, // "type": "string" // }, - // "uniqueWriterIdentity": { - // "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", + // "updateMask": { + // "description": "Required. Field mask that specifies the fields in bucket that need an update. A bucket field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=retention_days.", + // "format": "google-fieldmask", // "location": "query", - // "type": "boolean" + // "type": "string" // } // }, - // "path": "v2/{+parent}/sinks", + // "path": "v2/{+name}", // "request": { - // "$ref": "LogSink" + // "$ref": "LogBucket" // }, // "response": { - // "$ref": "LogSink" + // "$ref": "LogBucket" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -4674,28 +4442,30 @@ func (c *BillingAccountsSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogS } -// method id "logging.billingAccounts.sinks.delete": +// method id "logging.billingAccounts.locations.buckets.undelete": -type BillingAccountsSinksDeleteCall struct { - s *Service - sinkNameid string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BillingAccountsLocationsBucketsUndeleteCall struct { + s *Service + name string + undeletebucketrequest *UndeleteBucketRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes a sink. If the sink has a unique writer_identity, -// then that service account is also deleted. -func (r *BillingAccountsSinksService) Delete(sinkNameid string) *BillingAccountsSinksDeleteCall { - c := &BillingAccountsSinksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.sinkNameid = sinkNameid +// Undelete: Undeletes a bucket. A bucket that has been deleted may be +// undeleted within the grace period of 7 days. +func (r *BillingAccountsLocationsBucketsService) Undelete(name string, undeletebucketrequest *UndeleteBucketRequest) *BillingAccountsLocationsBucketsUndeleteCall { + c := &BillingAccountsLocationsBucketsUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.undeletebucketrequest = undeletebucketrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BillingAccountsSinksDeleteCall) Fields(s ...googleapi.Field) *BillingAccountsSinksDeleteCall { +func (c *BillingAccountsLocationsBucketsUndeleteCall) Fields(s ...googleapi.Field) *BillingAccountsLocationsBucketsUndeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -4703,51 +4473,56 @@ func (c *BillingAccountsSinksDeleteCall) Fields(s ...googleapi.Field) *BillingAc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BillingAccountsSinksDeleteCall) Context(ctx context.Context) *BillingAccountsSinksDeleteCall { +func (c *BillingAccountsLocationsBucketsUndeleteCall) Context(ctx context.Context) *BillingAccountsLocationsBucketsUndeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BillingAccountsSinksDeleteCall) Header() http.Header { +func (c *BillingAccountsLocationsBucketsUndeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BillingAccountsSinksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *BillingAccountsLocationsBucketsUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.undeletebucketrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}:undelete") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "sinkName": c.sinkNameid, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.billingAccounts.sinks.delete" call. +// Do executes the "logging.billingAccounts.locations.buckets.undelete" call. // Exactly one of *Empty or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Empty.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *BillingAccountsSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { +func (c *BillingAccountsLocationsBucketsUndeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -4778,23 +4553,26 @@ func (c *BillingAccountsSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empt } return ret, nil // { - // "description": "Deletes a sink. If the sink has a unique writer_identity, then that service account is also deleted.", - // "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks/{sinksId}", - // "httpMethod": "DELETE", - // "id": "logging.billingAccounts.sinks.delete", + // "description": "Undeletes a bucket. A bucket that has been deleted may be undeleted within the grace period of 7 days.", + // "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets/{bucketsId}:undelete", + // "httpMethod": "POST", + // "id": "logging.billingAccounts.locations.buckets.undelete", // "parameterOrder": [ - // "sinkName" + // "name" // ], // "parameters": { - // "sinkName": { - // "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "name": { + // "description": "Required. The full resource name of the bucket to undelete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", // "location": "path", - // "pattern": "^billingAccounts/[^/]+/sinks/[^/]+$", + // "pattern": "^billingAccounts/[^/]+/locations/[^/]+/buckets/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v2/{+sinkName}", + // "path": "v2/{+name}:undelete", + // "request": { + // "$ref": "UndeleteBucketRequest" + // }, // "response": { // "$ref": "Empty" // }, @@ -4806,93 +4584,82 @@ func (c *BillingAccountsSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empt } -// method id "logging.billingAccounts.sinks.get": +// method id "logging.billingAccounts.logs.delete": -type BillingAccountsSinksGetCall struct { - s *Service - sinkName string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type BillingAccountsLogsDeleteCall struct { + s *Service + logName string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Gets a sink. -func (r *BillingAccountsSinksService) Get(sinkName string) *BillingAccountsSinksGetCall { - c := &BillingAccountsSinksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.sinkName = sinkName +// Delete: Deletes all the log entries in a log. The log reappears if it +// receives new entries. Log entries written shortly before the delete +// operation might not be deleted. Entries received after the delete +// operation with a timestamp before the operation will be deleted. +func (r *BillingAccountsLogsService) Delete(logName string) *BillingAccountsLogsDeleteCall { + c := &BillingAccountsLogsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.logName = logName return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BillingAccountsSinksGetCall) Fields(s ...googleapi.Field) *BillingAccountsSinksGetCall { +func (c *BillingAccountsLogsDeleteCall) Fields(s ...googleapi.Field) *BillingAccountsLogsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BillingAccountsSinksGetCall) IfNoneMatch(entityTag string) *BillingAccountsSinksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BillingAccountsSinksGetCall) Context(ctx context.Context) *BillingAccountsSinksGetCall { +func (c *BillingAccountsLogsDeleteCall) Context(ctx context.Context) *BillingAccountsLogsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BillingAccountsSinksGetCall) Header() http.Header { +func (c *BillingAccountsLogsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BillingAccountsSinksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *BillingAccountsLogsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+logName}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "sinkName": c.sinkName, + "logName": c.logName, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.billingAccounts.sinks.get" call. -// Exactly one of *LogSink or error will be non-nil. Any non-2xx status +// Do executes the "logging.billingAccounts.logs.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *LogSink.ServerResponse.Header or (if a response was returned at all) +// *Empty.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *BillingAccountsSinksGetCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { +func (c *BillingAccountsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -4911,7 +4678,7 @@ func (c *BillingAccountsSinksGetCall) Do(opts ...googleapi.CallOption) (*LogSink if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LogSink{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -4923,39 +4690,37 @@ func (c *BillingAccountsSinksGetCall) Do(opts ...googleapi.CallOption) (*LogSink } return ret, nil // { - // "description": "Gets a sink.", - // "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks/{sinksId}", - // "httpMethod": "GET", - // "id": "logging.billingAccounts.sinks.get", + // "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.", + // "flatPath": "v2/billingAccounts/{billingAccountsId}/logs/{logsId}", + // "httpMethod": "DELETE", + // "id": "logging.billingAccounts.logs.delete", // "parameterOrder": [ - // "sinkName" + // "logName" // ], // "parameters": { - // "sinkName": { - // "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "logName": { + // "description": "Required. The resource name of the log to delete: \"projects/[PROJECT_ID]/logs/[LOG_ID]\" \"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\" \"folders/[FOLDER_ID]/logs/[LOG_ID]\" [LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", // "location": "path", - // "pattern": "^billingAccounts/[^/]+/sinks/[^/]+$", + // "pattern": "^billingAccounts/[^/]+/logs/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v2/{+sinkName}", + // "path": "v2/{+logName}", // "response": { - // "$ref": "LogSink" + // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.read" + // "https://www.googleapis.com/auth/logging.admin" // ] // } } -// method id "logging.billingAccounts.sinks.list": +// method id "logging.billingAccounts.logs.list": -type BillingAccountsSinksListCall struct { +type BillingAccountsLogsListCall struct { s *Service parent string urlParams_ gensupport.URLParams @@ -4964,9 +4729,10 @@ type BillingAccountsSinksListCall struct { header_ http.Header } -// List: Lists sinks. -func (r *BillingAccountsSinksService) List(parent string) *BillingAccountsSinksListCall { - c := &BillingAccountsSinksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Lists the logs in projects, organizations, folders, or billing +// accounts. Only logs that have entries are listed. +func (r *BillingAccountsLogsService) List(parent string) *BillingAccountsLogsListCall { + c := &BillingAccountsLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent return c } @@ -4975,7 +4741,7 @@ func (r *BillingAccountsSinksService) List(parent string) *BillingAccountsSinksL // of results to return from this request. Non-positive values are // ignored. The presence of nextPageToken in the response indicates that // more results might be available. -func (c *BillingAccountsSinksListCall) PageSize(pageSize int64) *BillingAccountsSinksListCall { +func (c *BillingAccountsLogsListCall) PageSize(pageSize int64) *BillingAccountsLogsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } @@ -4985,7 +4751,7 @@ func (c *BillingAccountsSinksListCall) PageSize(pageSize int64) *BillingAccounts // method. pageToken must be the value of nextPageToken from the // previous response. The values of other method parameters should be // identical to those in the previous call. -func (c *BillingAccountsSinksListCall) PageToken(pageToken string) *BillingAccountsSinksListCall { +func (c *BillingAccountsLogsListCall) PageToken(pageToken string) *BillingAccountsLogsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -4993,7 +4759,7 @@ func (c *BillingAccountsSinksListCall) PageToken(pageToken string) *BillingAccou // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BillingAccountsSinksListCall) Fields(s ...googleapi.Field) *BillingAccountsSinksListCall { +func (c *BillingAccountsLogsListCall) Fields(s ...googleapi.Field) *BillingAccountsLogsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -5003,7 +4769,7 @@ func (c *BillingAccountsSinksListCall) Fields(s ...googleapi.Field) *BillingAcco // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BillingAccountsSinksListCall) IfNoneMatch(entityTag string) *BillingAccountsSinksListCall { +func (c *BillingAccountsLogsListCall) IfNoneMatch(entityTag string) *BillingAccountsLogsListCall { c.ifNoneMatch_ = entityTag return c } @@ -5011,23 +4777,23 @@ func (c *BillingAccountsSinksListCall) IfNoneMatch(entityTag string) *BillingAcc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BillingAccountsSinksListCall) Context(ctx context.Context) *BillingAccountsSinksListCall { +func (c *BillingAccountsLogsListCall) Context(ctx context.Context) *BillingAccountsLogsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BillingAccountsSinksListCall) Header() http.Header { +func (c *BillingAccountsLogsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BillingAccountsSinksListCall) doRequest(alt string) (*http.Response, error) { +func (c *BillingAccountsLogsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5038,7 +4804,7 @@ func (c *BillingAccountsSinksListCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/sinks") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/logs") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -5051,14 +4817,14 @@ func (c *BillingAccountsSinksListCall) doRequest(alt string) (*http.Response, er return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.billingAccounts.sinks.list" call. -// Exactly one of *ListSinksResponse or error will be non-nil. Any +// Do executes the "logging.billingAccounts.logs.list" call. +// Exactly one of *ListLogsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *ListSinksResponse.ServerResponse.Header or (if a response was +// *ListLogsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BillingAccountsSinksListCall) Do(opts ...googleapi.CallOption) (*ListSinksResponse, error) { +func (c *BillingAccountsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -5077,7 +4843,7 @@ func (c *BillingAccountsSinksListCall) Do(opts ...googleapi.CallOption) (*ListSi if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ListSinksResponse{ + ret := &ListLogsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -5089,10 +4855,10 @@ func (c *BillingAccountsSinksListCall) Do(opts ...googleapi.CallOption) (*ListSi } return ret, nil // { - // "description": "Lists sinks.", - // "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks", + // "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", + // "flatPath": "v2/billingAccounts/{billingAccountsId}/logs", // "httpMethod": "GET", - // "id": "logging.billingAccounts.sinks.list", + // "id": "logging.billingAccounts.logs.list", // "parameterOrder": [ // "parent" // ], @@ -5109,16 +4875,16 @@ func (c *BillingAccountsSinksListCall) Do(opts ...googleapi.CallOption) (*ListSi // "type": "string" // }, // "parent": { - // "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + // "description": "Required. The resource name that owns the logs: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", // "location": "path", // "pattern": "^billingAccounts/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v2/{+parent}/sinks", + // "path": "v2/{+parent}/logs", // "response": { - // "$ref": "ListSinksResponse" + // "$ref": "ListLogsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -5133,7 +4899,7 @@ func (c *BillingAccountsSinksListCall) Do(opts ...googleapi.CallOption) (*ListSi // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *BillingAccountsSinksListCall) Pages(ctx context.Context, f func(*ListSinksResponse) error) error { +func (c *BillingAccountsLogsListCall) Pages(ctx context.Context, f func(*ListLogsResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -5151,63 +4917,49 @@ func (c *BillingAccountsSinksListCall) Pages(ctx context.Context, f func(*ListSi } } -// method id "logging.billingAccounts.sinks.patch": +// method id "logging.billingAccounts.sinks.create": -type BillingAccountsSinksPatchCall struct { +type BillingAccountsSinksCreateCall struct { s *Service - sinkNameid string + parent string logsink *LogSink urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Patch: Updates a sink. This method replaces the following fields in -// the existing sink with values from the new sink: destination, and -// filter.The updated sink might also have a new writer_identity; see -// the unique_writer_identity field. -func (r *BillingAccountsSinksService) Patch(sinkNameid string, logsink *LogSink) *BillingAccountsSinksPatchCall { - c := &BillingAccountsSinksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.sinkNameid = sinkNameid +// Create: Creates a sink that exports specified log entries to a +// destination. The export of newly-ingested log entries begins +// immediately, unless the sink's writer_identity is not permitted to +// write to the destination. A sink can export log entries only from the +// resource owning the sink. +func (r *BillingAccountsSinksService) Create(parent string, logsink *LogSink) *BillingAccountsSinksCreateCall { + c := &BillingAccountsSinksCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent c.logsink = logsink return c } // UniqueWriterIdentity sets the optional parameter -// "uniqueWriterIdentity": See sinks.create for a description of this -// field. When updating a sink, the effect of this field on the value of -// writer_identity in the updated sink depends on both the old and new -// values of this field: -// If the old and new values of this field are both false or both true, -// then there is no change to the sink's writer_identity. -// If the old value is false and the new value is true, then -// writer_identity is changed to a unique service account. -// It is an error if the old value is true and the new value is set to -// false or defaulted to false. -func (c *BillingAccountsSinksPatchCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *BillingAccountsSinksPatchCall { - c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) - return c -} - -// UpdateMask sets the optional parameter "updateMask": Field mask that -// specifies the fields in sink that need an update. A sink field will -// be overwritten if, and only if, it is in the update mask. name and -// output only fields cannot be updated.An empty updateMask is -// temporarily treated as using the following mask for backwards -// compatibility purposes: destination,filter,includeChildren At some -// point in the future, behavior will be removed and specifying an empty -// updateMask will be an error.For a detailed FieldMask definition, see -// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: -// updateMask=filter. -func (c *BillingAccountsSinksPatchCall) UpdateMask(updateMask string) *BillingAccountsSinksPatchCall { - c.urlParams_.Set("updateMask", updateMask) +// "uniqueWriterIdentity": Determines the kind of IAM identity returned +// as writer_identity in the new sink. If this value is omitted or set +// to false, and if the sink's parent is a project, then the value +// returned as writer_identity is the same group or service account used +// by Logging before the addition of writer identities to this API. The +// sink's destination must be in the same project as the sink itself.If +// this field is set to true, or if the sink is owned by a non-project +// resource such as an organization, then the value of writer_identity +// will be a unique service account used only for exports from the new +// sink. For more information, see writer_identity in LogSink. +func (c *BillingAccountsSinksCreateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *BillingAccountsSinksCreateCall { + c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BillingAccountsSinksPatchCall) Fields(s ...googleapi.Field) *BillingAccountsSinksPatchCall { +func (c *BillingAccountsSinksCreateCall) Fields(s ...googleapi.Field) *BillingAccountsSinksCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -5215,23 +4967,23 @@ func (c *BillingAccountsSinksPatchCall) Fields(s ...googleapi.Field) *BillingAcc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BillingAccountsSinksPatchCall) Context(ctx context.Context) *BillingAccountsSinksPatchCall { +func (c *BillingAccountsSinksCreateCall) Context(ctx context.Context) *BillingAccountsSinksCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BillingAccountsSinksPatchCall) Header() http.Header { +func (c *BillingAccountsSinksCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BillingAccountsSinksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *BillingAccountsSinksCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5244,27 +4996,27 @@ func (c *BillingAccountsSinksPatchCall) doRequest(alt string) (*http.Response, e reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/sinks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "sinkName": c.sinkNameid, + "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.billingAccounts.sinks.patch" call. +// Do executes the "logging.billingAccounts.sinks.create" call. // Exactly one of *LogSink or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *LogSink.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *BillingAccountsSinksPatchCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { +func (c *BillingAccountsSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -5295,34 +5047,28 @@ func (c *BillingAccountsSinksPatchCall) Do(opts ...googleapi.CallOption) (*LogSi } return ret, nil // { - // "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", - // "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks/{sinksId}", - // "httpMethod": "PATCH", - // "id": "logging.billingAccounts.sinks.patch", + // "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.", + // "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks", + // "httpMethod": "POST", + // "id": "logging.billingAccounts.sinks.create", // "parameterOrder": [ - // "sinkName" + // "parent" // ], // "parameters": { - // "sinkName": { - // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "parent": { + // "description": "Required. The resource in which to create the sink: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" Examples: \"projects/my-logging-project\", \"organizations/123456789\".", // "location": "path", - // "pattern": "^billingAccounts/[^/]+/sinks/[^/]+$", + // "pattern": "^billingAccounts/[^/]+$", // "required": true, // "type": "string" // }, // "uniqueWriterIdentity": { - // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is set to false or defaulted to false.", + // "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", // "location": "query", // "type": "boolean" - // }, - // "updateMask": { - // "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", - // "format": "google-fieldmask", - // "location": "query", - // "type": "string" // } // }, - // "path": "v2/{+sinkName}", + // "path": "v2/{+parent}/sinks", // "request": { // "$ref": "LogSink" // }, @@ -5337,63 +5083,28 @@ func (c *BillingAccountsSinksPatchCall) Do(opts ...googleapi.CallOption) (*LogSi } -// method id "logging.billingAccounts.sinks.update": +// method id "logging.billingAccounts.sinks.delete": -type BillingAccountsSinksUpdateCall struct { +type BillingAccountsSinksDeleteCall struct { s *Service sinkNameid string - logsink *LogSink urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Update: Updates a sink. This method replaces the following fields in -// the existing sink with values from the new sink: destination, and -// filter.The updated sink might also have a new writer_identity; see -// the unique_writer_identity field. -func (r *BillingAccountsSinksService) Update(sinkNameid string, logsink *LogSink) *BillingAccountsSinksUpdateCall { - c := &BillingAccountsSinksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes a sink. If the sink has a unique writer_identity, +// then that service account is also deleted. +func (r *BillingAccountsSinksService) Delete(sinkNameid string) *BillingAccountsSinksDeleteCall { + c := &BillingAccountsSinksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sinkNameid = sinkNameid - c.logsink = logsink - return c -} - -// UniqueWriterIdentity sets the optional parameter -// "uniqueWriterIdentity": See sinks.create for a description of this -// field. When updating a sink, the effect of this field on the value of -// writer_identity in the updated sink depends on both the old and new -// values of this field: -// If the old and new values of this field are both false or both true, -// then there is no change to the sink's writer_identity. -// If the old value is false and the new value is true, then -// writer_identity is changed to a unique service account. -// It is an error if the old value is true and the new value is set to -// false or defaulted to false. -func (c *BillingAccountsSinksUpdateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *BillingAccountsSinksUpdateCall { - c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) - return c -} - -// UpdateMask sets the optional parameter "updateMask": Field mask that -// specifies the fields in sink that need an update. A sink field will -// be overwritten if, and only if, it is in the update mask. name and -// output only fields cannot be updated.An empty updateMask is -// temporarily treated as using the following mask for backwards -// compatibility purposes: destination,filter,includeChildren At some -// point in the future, behavior will be removed and specifying an empty -// updateMask will be an error.For a detailed FieldMask definition, see -// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: -// updateMask=filter. -func (c *BillingAccountsSinksUpdateCall) UpdateMask(updateMask string) *BillingAccountsSinksUpdateCall { - c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BillingAccountsSinksUpdateCall) Fields(s ...googleapi.Field) *BillingAccountsSinksUpdateCall { +func (c *BillingAccountsSinksDeleteCall) Fields(s ...googleapi.Field) *BillingAccountsSinksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -5401,38 +5112,33 @@ func (c *BillingAccountsSinksUpdateCall) Fields(s ...googleapi.Field) *BillingAc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BillingAccountsSinksUpdateCall) Context(ctx context.Context) *BillingAccountsSinksUpdateCall { +func (c *BillingAccountsSinksDeleteCall) Context(ctx context.Context) *BillingAccountsSinksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BillingAccountsSinksUpdateCall) Header() http.Header { +func (c *BillingAccountsSinksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BillingAccountsSinksUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *BillingAccountsSinksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } @@ -5443,14 +5149,14 @@ func (c *BillingAccountsSinksUpdateCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.billingAccounts.sinks.update" call. -// Exactly one of *LogSink or error will be non-nil. Any non-2xx status +// Do executes the "logging.billingAccounts.sinks.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *LogSink.ServerResponse.Header or (if a response was returned at all) +// *Empty.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *BillingAccountsSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { +func (c *BillingAccountsSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -5469,7 +5175,7 @@ func (c *BillingAccountsSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogS if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LogSink{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -5481,39 +5187,25 @@ func (c *BillingAccountsSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogS } return ret, nil // { - // "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", + // "description": "Deletes a sink. If the sink has a unique writer_identity, then that service account is also deleted.", // "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks/{sinksId}", - // "httpMethod": "PUT", - // "id": "logging.billingAccounts.sinks.update", + // "httpMethod": "DELETE", + // "id": "logging.billingAccounts.sinks.delete", // "parameterOrder": [ // "sinkName" // ], // "parameters": { // "sinkName": { - // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", // "pattern": "^billingAccounts/[^/]+/sinks/[^/]+$", // "required": true, // "type": "string" - // }, - // "uniqueWriterIdentity": { - // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is set to false or defaulted to false.", - // "location": "query", - // "type": "boolean" - // }, - // "updateMask": { - // "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", - // "format": "google-fieldmask", - // "location": "query", - // "type": "string" // } // }, // "path": "v2/{+sinkName}", - // "request": { - // "$ref": "LogSink" - // }, // "response": { - // "$ref": "LogSink" + // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -5523,84 +5215,93 @@ func (c *BillingAccountsSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogS } -// method id "logging.entries.list": +// method id "logging.billingAccounts.sinks.get": -type EntriesListCall struct { - s *Service - listlogentriesrequest *ListLogEntriesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BillingAccountsSinksGetCall struct { + s *Service + sinkName string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// List: Lists log entries. Use this method to retrieve log entries that -// originated from a project/folder/organization/billing account. For -// ways to export log entries, see Exporting Logs -// (https://cloud.google.com/logging/docs/export). -func (r *EntriesService) List(listlogentriesrequest *ListLogEntriesRequest) *EntriesListCall { - c := &EntriesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.listlogentriesrequest = listlogentriesrequest +// Get: Gets a sink. +func (r *BillingAccountsSinksService) Get(sinkName string) *BillingAccountsSinksGetCall { + c := &BillingAccountsSinksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sinkName = sinkName return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *EntriesListCall) Fields(s ...googleapi.Field) *EntriesListCall { +func (c *BillingAccountsSinksGetCall) Fields(s ...googleapi.Field) *BillingAccountsSinksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BillingAccountsSinksGetCall) IfNoneMatch(entityTag string) *BillingAccountsSinksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *EntriesListCall) Context(ctx context.Context) *EntriesListCall { +func (c *BillingAccountsSinksGetCall) Context(ctx context.Context) *BillingAccountsSinksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *EntriesListCall) Header() http.Header { +func (c *BillingAccountsSinksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *EntriesListCall) doRequest(alt string) (*http.Response, error) { +func (c *BillingAccountsSinksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.listlogentriesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/entries:list") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "sinkName": c.sinkName, + }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.entries.list" call. -// Exactly one of *ListLogEntriesResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListLogEntriesResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *EntriesListCall) Do(opts ...googleapi.CallOption) (*ListLogEntriesResponse, error) { +// Do executes the "logging.billingAccounts.sinks.get" call. +// Exactly one of *LogSink or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *LogSink.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BillingAccountsSinksGetCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -5619,7 +5320,7 @@ func (c *EntriesListCall) Do(opts ...googleapi.CallOption) (*ListLogEntriesRespo if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ListLogEntriesResponse{ + ret := &LogSink{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -5631,18 +5332,25 @@ func (c *EntriesListCall) Do(opts ...googleapi.CallOption) (*ListLogEntriesRespo } return ret, nil // { - // "description": "Lists log entries. Use this method to retrieve log entries that originated from a project/folder/organization/billing account. For ways to export log entries, see Exporting Logs (https://cloud.google.com/logging/docs/export).", - // "flatPath": "v2/entries:list", - // "httpMethod": "POST", - // "id": "logging.entries.list", - // "parameterOrder": [], - // "parameters": {}, - // "path": "v2/entries:list", - // "request": { - // "$ref": "ListLogEntriesRequest" + // "description": "Gets a sink.", + // "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks/{sinksId}", + // "httpMethod": "GET", + // "id": "logging.billingAccounts.sinks.get", + // "parameterOrder": [ + // "sinkName" + // ], + // "parameters": { + // "sinkName": { + // "description": "Required. The resource name of the sink: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", + // "location": "path", + // "pattern": "^billingAccounts/[^/]+/sinks/[^/]+$", + // "required": true, + // "type": "string" + // } // }, + // "path": "v2/{+sinkName}", // "response": { - // "$ref": "ListLogEntriesResponse" + // "$ref": "LogSink" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -5654,107 +5362,112 @@ func (c *EntriesListCall) Do(opts ...googleapi.CallOption) (*ListLogEntriesRespo } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *EntriesListCall) Pages(ctx context.Context, f func(*ListLogEntriesResponse) error) error { - c.ctx_ = ctx - defer func(pt string) { c.listlogentriesrequest.PageToken = pt }(c.listlogentriesrequest.PageToken) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.listlogentriesrequest.PageToken = x.NextPageToken - } +// method id "logging.billingAccounts.sinks.list": + +type BillingAccountsSinksListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// method id "logging.entries.write": +// List: Lists sinks. +func (r *BillingAccountsSinksService) List(parent string) *BillingAccountsSinksListCall { + c := &BillingAccountsSinksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} -type EntriesWriteCall struct { - s *Service - writelogentriesrequest *WriteLogEntriesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// PageSize sets the optional parameter "pageSize": The maximum number +// of results to return from this request. Non-positive values are +// ignored. The presence of nextPageToken in the response indicates that +// more results might be available. +func (c *BillingAccountsSinksListCall) PageSize(pageSize int64) *BillingAccountsSinksListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c } -// Write: Writes log entries to Logging. This API method is the only way -// to send log entries to Logging. This method is used, directly or -// indirectly, by the Logging agent (fluentd) and all logging libraries -// configured to use Logging. A single request may contain log entries -// for a maximum of 1000 different resources (projects, organizations, -// billing accounts or folders) -func (r *EntriesService) Write(writelogentriesrequest *WriteLogEntriesRequest) *EntriesWriteCall { - c := &EntriesWriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.writelogentriesrequest = writelogentriesrequest +// PageToken sets the optional parameter "pageToken": If present, then +// retrieve the next batch of results from the preceding call to this +// method. pageToken must be the value of nextPageToken from the +// previous response. The values of other method parameters should be +// identical to those in the previous call. +func (c *BillingAccountsSinksListCall) PageToken(pageToken string) *BillingAccountsSinksListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *EntriesWriteCall) Fields(s ...googleapi.Field) *EntriesWriteCall { +func (c *BillingAccountsSinksListCall) Fields(s ...googleapi.Field) *BillingAccountsSinksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BillingAccountsSinksListCall) IfNoneMatch(entityTag string) *BillingAccountsSinksListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *EntriesWriteCall) Context(ctx context.Context) *EntriesWriteCall { +func (c *BillingAccountsSinksListCall) Context(ctx context.Context) *BillingAccountsSinksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *EntriesWriteCall) Header() http.Header { +func (c *BillingAccountsSinksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *EntriesWriteCall) doRequest(alt string) (*http.Response, error) { +func (c *BillingAccountsSinksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.writelogentriesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/entries:write") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/sinks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.entries.write" call. -// Exactly one of *WriteLogEntriesResponse or error will be non-nil. Any +// Do executes the "logging.billingAccounts.sinks.list" call. +// Exactly one of *ListSinksResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *WriteLogEntriesResponse.ServerResponse.Header or (if a response was +// *ListSinksResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *EntriesWriteCall) Do(opts ...googleapi.CallOption) (*WriteLogEntriesResponse, error) { +func (c *BillingAccountsSinksListCall) Do(opts ...googleapi.CallOption) (*ListSinksResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -5773,7 +5486,7 @@ func (c *EntriesWriteCall) Do(opts ...googleapi.CallOption) (*WriteLogEntriesRes if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &WriteLogEntriesResponse{ + ret := &ListSinksResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -5785,53 +5498,124 @@ func (c *EntriesWriteCall) Do(opts ...googleapi.CallOption) (*WriteLogEntriesRes } return ret, nil // { - // "description": "Writes log entries to Logging. This API method is the only way to send log entries to Logging. This method is used, directly or indirectly, by the Logging agent (fluentd) and all logging libraries configured to use Logging. A single request may contain log entries for a maximum of 1000 different resources (projects, organizations, billing accounts or folders)", - // "flatPath": "v2/entries:write", - // "httpMethod": "POST", - // "id": "logging.entries.write", - // "parameterOrder": [], - // "parameters": {}, - // "path": "v2/entries:write", - // "request": { - // "$ref": "WriteLogEntriesRequest" + // "description": "Lists sinks.", + // "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks", + // "httpMethod": "GET", + // "id": "logging.billingAccounts.sinks.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "pageSize": { + // "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The parent resource whose sinks are to be listed: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", + // "location": "path", + // "pattern": "^billingAccounts/[^/]+$", + // "required": true, + // "type": "string" + // } // }, + // "path": "v2/{+parent}/sinks", // "response": { - // "$ref": "WriteLogEntriesResponse" + // "$ref": "ListSinksResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.write" + // "https://www.googleapis.com/auth/logging.read" // ] // } } -// method id "logging.exclusions.create": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *BillingAccountsSinksListCall) Pages(ctx context.Context, f func(*ListSinksResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ExclusionsCreateCall struct { - s *Service - parent string - logexclusion *LogExclusion - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "logging.billingAccounts.sinks.patch": + +type BillingAccountsSinksPatchCall struct { + s *Service + sinkNameid string + logsink *LogSink + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Create: Creates a new exclusion in a specified parent resource. Only -// log entries belonging to that resource can be excluded. You can have -// up to 10 exclusions in a resource. -func (r *ExclusionsService) Create(parent string, logexclusion *LogExclusion) *ExclusionsCreateCall { - c := &ExclusionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.logexclusion = logexclusion +// Patch: Updates a sink. This method replaces the following fields in +// the existing sink with values from the new sink: destination, and +// filter.The updated sink might also have a new writer_identity; see +// the unique_writer_identity field. +func (r *BillingAccountsSinksService) Patch(sinkNameid string, logsink *LogSink) *BillingAccountsSinksPatchCall { + c := &BillingAccountsSinksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sinkNameid = sinkNameid + c.logsink = logsink + return c +} + +// UniqueWriterIdentity sets the optional parameter +// "uniqueWriterIdentity": See sinks.create for a description of this +// field. When updating a sink, the effect of this field on the value of +// writer_identity in the updated sink depends on both the old and new +// values of this field: If the old and new values of this field are +// both false or both true, then there is no change to the sink's +// writer_identity. If the old value is false and the new value is true, +// then writer_identity is changed to a unique service account. It is an +// error if the old value is true and the new value is set to false or +// defaulted to false. +func (c *BillingAccountsSinksPatchCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *BillingAccountsSinksPatchCall { + c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) + return c +} + +// UpdateMask sets the optional parameter "updateMask": Field mask that +// specifies the fields in sink that need an update. A sink field will +// be overwritten if, and only if, it is in the update mask. name and +// output only fields cannot be updated.An empty updateMask is +// temporarily treated as using the following mask for backwards +// compatibility purposes: destination,filter,includeChildren At some +// point in the future, behavior will be removed and specifying an empty +// updateMask will be an error.For a detailed FieldMask definition, see +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: +// updateMask=filter. +func (c *BillingAccountsSinksPatchCall) UpdateMask(updateMask string) *BillingAccountsSinksPatchCall { + c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ExclusionsCreateCall) Fields(s ...googleapi.Field) *ExclusionsCreateCall { +func (c *BillingAccountsSinksPatchCall) Fields(s ...googleapi.Field) *BillingAccountsSinksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -5839,56 +5623,56 @@ func (c *ExclusionsCreateCall) Fields(s ...googleapi.Field) *ExclusionsCreateCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ExclusionsCreateCall) Context(ctx context.Context) *ExclusionsCreateCall { +func (c *BillingAccountsSinksPatchCall) Context(ctx context.Context) *BillingAccountsSinksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ExclusionsCreateCall) Header() http.Header { +func (c *BillingAccountsSinksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ExclusionsCreateCall) doRequest(alt string) (*http.Response, error) { +func (c *BillingAccountsSinksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.logexclusion) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/exclusions") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, + "sinkName": c.sinkNameid, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.exclusions.create" call. -// Exactly one of *LogExclusion or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *LogExclusion.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ExclusionsCreateCall) Do(opts ...googleapi.CallOption) (*LogExclusion, error) { +// Do executes the "logging.billingAccounts.sinks.patch" call. +// Exactly one of *LogSink or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *LogSink.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BillingAccountsSinksPatchCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -5907,7 +5691,7 @@ func (c *ExclusionsCreateCall) Do(opts ...googleapi.CallOption) (*LogExclusion, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LogExclusion{ + ret := &LogSink{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -5919,28 +5703,39 @@ func (c *ExclusionsCreateCall) Do(opts ...googleapi.CallOption) (*LogExclusion, } return ret, nil // { - // "description": "Creates a new exclusion in a specified parent resource. Only log entries belonging to that resource can be excluded. You can have up to 10 exclusions in a resource.", - // "flatPath": "v2/{v2Id}/{v2Id1}/exclusions", - // "httpMethod": "POST", - // "id": "logging.exclusions.create", - // "parameterOrder": [ - // "parent" + // "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", + // "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks/{sinksId}", + // "httpMethod": "PATCH", + // "id": "logging.billingAccounts.sinks.patch", + // "parameterOrder": [ + // "sinkName" // ], // "parameters": { - // "parent": { - // "description": "Required. The parent resource in which to create the exclusion:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + // "sinkName": { + // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", - // "pattern": "^[^/]+/[^/]+$", + // "pattern": "^billingAccounts/[^/]+/sinks/[^/]+$", // "required": true, // "type": "string" + // }, + // "uniqueWriterIdentity": { + // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", + // "location": "query", + // "type": "boolean" + // }, + // "updateMask": { + // "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" // } // }, - // "path": "v2/{+parent}/exclusions", + // "path": "v2/{+sinkName}", // "request": { - // "$ref": "LogExclusion" + // "$ref": "LogSink" // }, // "response": { - // "$ref": "LogExclusion" + // "$ref": "LogSink" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -5950,27 +5745,62 @@ func (c *ExclusionsCreateCall) Do(opts ...googleapi.CallOption) (*LogExclusion, } -// method id "logging.exclusions.delete": +// method id "logging.billingAccounts.sinks.update": -type ExclusionsDeleteCall struct { +type BillingAccountsSinksUpdateCall struct { s *Service - name string + sinkNameid string + logsink *LogSink urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes an exclusion. -func (r *ExclusionsService) Delete(name string) *ExclusionsDeleteCall { - c := &ExclusionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name +// Update: Updates a sink. This method replaces the following fields in +// the existing sink with values from the new sink: destination, and +// filter.The updated sink might also have a new writer_identity; see +// the unique_writer_identity field. +func (r *BillingAccountsSinksService) Update(sinkNameid string, logsink *LogSink) *BillingAccountsSinksUpdateCall { + c := &BillingAccountsSinksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sinkNameid = sinkNameid + c.logsink = logsink + return c +} + +// UniqueWriterIdentity sets the optional parameter +// "uniqueWriterIdentity": See sinks.create for a description of this +// field. When updating a sink, the effect of this field on the value of +// writer_identity in the updated sink depends on both the old and new +// values of this field: If the old and new values of this field are +// both false or both true, then there is no change to the sink's +// writer_identity. If the old value is false and the new value is true, +// then writer_identity is changed to a unique service account. It is an +// error if the old value is true and the new value is set to false or +// defaulted to false. +func (c *BillingAccountsSinksUpdateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *BillingAccountsSinksUpdateCall { + c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) + return c +} + +// UpdateMask sets the optional parameter "updateMask": Field mask that +// specifies the fields in sink that need an update. A sink field will +// be overwritten if, and only if, it is in the update mask. name and +// output only fields cannot be updated.An empty updateMask is +// temporarily treated as using the following mask for backwards +// compatibility purposes: destination,filter,includeChildren At some +// point in the future, behavior will be removed and specifying an empty +// updateMask will be an error.For a detailed FieldMask definition, see +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: +// updateMask=filter. +func (c *BillingAccountsSinksUpdateCall) UpdateMask(updateMask string) *BillingAccountsSinksUpdateCall { + c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ExclusionsDeleteCall) Fields(s ...googleapi.Field) *ExclusionsDeleteCall { +func (c *BillingAccountsSinksUpdateCall) Fields(s ...googleapi.Field) *BillingAccountsSinksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -5978,51 +5808,56 @@ func (c *ExclusionsDeleteCall) Fields(s ...googleapi.Field) *ExclusionsDeleteCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ExclusionsDeleteCall) Context(ctx context.Context) *ExclusionsDeleteCall { +func (c *BillingAccountsSinksUpdateCall) Context(ctx context.Context) *BillingAccountsSinksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ExclusionsDeleteCall) Header() http.Header { +func (c *BillingAccountsSinksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ExclusionsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *BillingAccountsSinksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "name": c.name, + "sinkName": c.sinkNameid, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.exclusions.delete" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// Do executes the "logging.billingAccounts.sinks.update" call. +// Exactly one of *LogSink or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) +// *LogSink.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *ExclusionsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { +func (c *BillingAccountsSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6041,7 +5876,7 @@ func (c *ExclusionsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Empty{ + ret := &LogSink{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6053,25 +5888,39 @@ func (c *ExclusionsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) } return ret, nil // { - // "description": "Deletes an exclusion.", - // "flatPath": "v2/{v2Id}/{v2Id1}/exclusions/{exclusionsId}", - // "httpMethod": "DELETE", - // "id": "logging.exclusions.delete", + // "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", + // "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks/{sinksId}", + // "httpMethod": "PUT", + // "id": "logging.billingAccounts.sinks.update", // "parameterOrder": [ - // "name" + // "sinkName" // ], // "parameters": { - // "name": { - // "description": "Required. The resource name of an existing exclusion to delete:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + // "sinkName": { + // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", - // "pattern": "^[^/]+/[^/]+/exclusions/[^/]+$", + // "pattern": "^billingAccounts/[^/]+/sinks/[^/]+$", // "required": true, // "type": "string" + // }, + // "uniqueWriterIdentity": { + // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", + // "location": "query", + // "type": "boolean" + // }, + // "updateMask": { + // "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" // } // }, - // "path": "v2/{+name}", + // "path": "v2/{+sinkName}", + // "request": { + // "$ref": "LogSink" + // }, // "response": { - // "$ref": "Empty" + // "$ref": "LogSink" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -6081,93 +5930,84 @@ func (c *ExclusionsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) } -// method id "logging.exclusions.get": +// method id "logging.entries.list": -type ExclusionsGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type EntriesListCall struct { + s *Service + listlogentriesrequest *ListLogEntriesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Gets the description of an exclusion. -func (r *ExclusionsService) Get(name string) *ExclusionsGetCall { - c := &ExclusionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name +// List: Lists log entries. Use this method to retrieve log entries that +// originated from a project/folder/organization/billing account. For +// ways to export log entries, see Exporting Logs +// (https://cloud.google.com/logging/docs/export). +func (r *EntriesService) List(listlogentriesrequest *ListLogEntriesRequest) *EntriesListCall { + c := &EntriesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.listlogentriesrequest = listlogentriesrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ExclusionsGetCall) Fields(s ...googleapi.Field) *ExclusionsGetCall { +func (c *EntriesListCall) Fields(s ...googleapi.Field) *EntriesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ExclusionsGetCall) IfNoneMatch(entityTag string) *ExclusionsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ExclusionsGetCall) Context(ctx context.Context) *ExclusionsGetCall { +func (c *EntriesListCall) Context(ctx context.Context) *EntriesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ExclusionsGetCall) Header() http.Header { +func (c *EntriesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ExclusionsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *EntriesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.listlogentriesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/entries:list") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.exclusions.get" call. -// Exactly one of *LogExclusion or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *LogExclusion.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ExclusionsGetCall) Do(opts ...googleapi.CallOption) (*LogExclusion, error) { +// Do executes the "logging.entries.list" call. +// Exactly one of *ListLogEntriesResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListLogEntriesResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *EntriesListCall) Do(opts ...googleapi.CallOption) (*ListLogEntriesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6186,7 +6026,7 @@ func (c *ExclusionsGetCall) Do(opts ...googleapi.CallOption) (*LogExclusion, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LogExclusion{ + ret := &ListLogEntriesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6198,25 +6038,18 @@ func (c *ExclusionsGetCall) Do(opts ...googleapi.CallOption) (*LogExclusion, err } return ret, nil // { - // "description": "Gets the description of an exclusion.", - // "flatPath": "v2/{v2Id}/{v2Id1}/exclusions/{exclusionsId}", - // "httpMethod": "GET", - // "id": "logging.exclusions.get", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Required. The resource name of an existing exclusion:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", - // "location": "path", - // "pattern": "^[^/]+/[^/]+/exclusions/[^/]+$", - // "required": true, - // "type": "string" - // } + // "description": "Lists log entries. Use this method to retrieve log entries that originated from a project/folder/organization/billing account. For ways to export log entries, see Exporting Logs (https://cloud.google.com/logging/docs/export).", + // "flatPath": "v2/entries:list", + // "httpMethod": "POST", + // "id": "logging.entries.list", + // "parameterOrder": [], + // "parameters": {}, + // "path": "v2/entries:list", + // "request": { + // "$ref": "ListLogEntriesRequest" // }, - // "path": "v2/{+name}", // "response": { - // "$ref": "LogExclusion" + // "$ref": "ListLogEntriesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -6228,112 +6061,107 @@ func (c *ExclusionsGetCall) Do(opts ...googleapi.CallOption) (*LogExclusion, err } -// method id "logging.exclusions.list": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *EntriesListCall) Pages(ctx context.Context, f func(*ListLogEntriesResponse) error) error { + c.ctx_ = ctx + defer func(pt string) { c.listlogentriesrequest.PageToken = pt }(c.listlogentriesrequest.PageToken) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.listlogentriesrequest.PageToken = x.NextPageToken + } +} -type ExclusionsListCall struct { - s *Service - parent string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists all the exclusions in a parent resource. -func (r *ExclusionsService) List(parent string) *ExclusionsListCall { - c := &ExclusionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} +// method id "logging.entries.write": -// PageSize sets the optional parameter "pageSize": The maximum number -// of results to return from this request. Non-positive values are -// ignored. The presence of nextPageToken in the response indicates that -// more results might be available. -func (c *ExclusionsListCall) PageSize(pageSize int64) *ExclusionsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c +type EntriesWriteCall struct { + s *Service + writelogentriesrequest *WriteLogEntriesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// PageToken sets the optional parameter "pageToken": If present, then -// retrieve the next batch of results from the preceding call to this -// method. pageToken must be the value of nextPageToken from the -// previous response. The values of other method parameters should be -// identical to those in the previous call. -func (c *ExclusionsListCall) PageToken(pageToken string) *ExclusionsListCall { - c.urlParams_.Set("pageToken", pageToken) +// Write: Writes log entries to Logging. This API method is the only way +// to send log entries to Logging. This method is used, directly or +// indirectly, by the Logging agent (fluentd) and all logging libraries +// configured to use Logging. A single request may contain log entries +// for a maximum of 1000 different resources (projects, organizations, +// billing accounts or folders) +func (r *EntriesService) Write(writelogentriesrequest *WriteLogEntriesRequest) *EntriesWriteCall { + c := &EntriesWriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.writelogentriesrequest = writelogentriesrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ExclusionsListCall) Fields(s ...googleapi.Field) *ExclusionsListCall { +func (c *EntriesWriteCall) Fields(s ...googleapi.Field) *EntriesWriteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ExclusionsListCall) IfNoneMatch(entityTag string) *ExclusionsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ExclusionsListCall) Context(ctx context.Context) *ExclusionsListCall { +func (c *EntriesWriteCall) Context(ctx context.Context) *EntriesWriteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ExclusionsListCall) Header() http.Header { +func (c *EntriesWriteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ExclusionsListCall) doRequest(alt string) (*http.Response, error) { +func (c *EntriesWriteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.writelogentriesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/exclusions") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/entries:write") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.exclusions.list" call. -// Exactly one of *ListExclusionsResponse or error will be non-nil. Any +// Do executes the "logging.entries.write" call. +// Exactly one of *WriteLogEntriesResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *ListExclusionsResponse.ServerResponse.Header or (if a response was +// *WriteLogEntriesResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ExclusionsListCall) Do(opts ...googleapi.CallOption) (*ListExclusionsResponse, error) { +func (c *EntriesWriteCall) Do(opts ...googleapi.CallOption) (*WriteLogEntriesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6352,7 +6180,7 @@ func (c *ExclusionsListCall) Do(opts ...googleapi.CallOption) (*ListExclusionsRe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ListExclusionsResponse{ + ret := &WriteLogEntriesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6364,103 +6192,53 @@ func (c *ExclusionsListCall) Do(opts ...googleapi.CallOption) (*ListExclusionsRe } return ret, nil // { - // "description": "Lists all the exclusions in a parent resource.", - // "flatPath": "v2/{v2Id}/{v2Id1}/exclusions", - // "httpMethod": "GET", - // "id": "logging.exclusions.list", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "pageSize": { - // "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Required. The parent resource whose exclusions are to be listed.\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", - // "location": "path", - // "pattern": "^[^/]+/[^/]+$", - // "required": true, - // "type": "string" - // } + // "description": "Writes log entries to Logging. This API method is the only way to send log entries to Logging. This method is used, directly or indirectly, by the Logging agent (fluentd) and all logging libraries configured to use Logging. A single request may contain log entries for a maximum of 1000 different resources (projects, organizations, billing accounts or folders)", + // "flatPath": "v2/entries:write", + // "httpMethod": "POST", + // "id": "logging.entries.write", + // "parameterOrder": [], + // "parameters": {}, + // "path": "v2/entries:write", + // "request": { + // "$ref": "WriteLogEntriesRequest" // }, - // "path": "v2/{+parent}/exclusions", // "response": { - // "$ref": "ListExclusionsResponse" + // "$ref": "WriteLogEntriesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.read" + // "https://www.googleapis.com/auth/logging.write" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ExclusionsListCall) Pages(ctx context.Context, f func(*ListExclusionsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "logging.exclusions.patch": +// method id "logging.exclusions.create": -type ExclusionsPatchCall struct { +type ExclusionsCreateCall struct { s *Service - name string + parent string logexclusion *LogExclusion urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Patch: Changes one or more properties of an existing exclusion. -func (r *ExclusionsService) Patch(name string, logexclusion *LogExclusion) *ExclusionsPatchCall { - c := &ExclusionsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name +// Create: Creates a new exclusion in a specified parent resource. Only +// log entries belonging to that resource can be excluded. You can have +// up to 10 exclusions in a resource. +func (r *ExclusionsService) Create(parent string, logexclusion *LogExclusion) *ExclusionsCreateCall { + c := &ExclusionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent c.logexclusion = logexclusion return c } -// UpdateMask sets the optional parameter "updateMask": Required. A -// non-empty list of fields to change in the existing exclusion. New -// values for the fields are taken from the corresponding fields in the -// LogExclusion included in this request. Fields not mentioned in -// update_mask are not changed and are ignored in the request.For -// example, to change the filter and description of an exclusion, -// specify an update_mask of "filter,description". -func (c *ExclusionsPatchCall) UpdateMask(updateMask string) *ExclusionsPatchCall { - c.urlParams_.Set("updateMask", updateMask) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ExclusionsPatchCall) Fields(s ...googleapi.Field) *ExclusionsPatchCall { +func (c *ExclusionsCreateCall) Fields(s ...googleapi.Field) *ExclusionsCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -6468,23 +6246,23 @@ func (c *ExclusionsPatchCall) Fields(s ...googleapi.Field) *ExclusionsPatchCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ExclusionsPatchCall) Context(ctx context.Context) *ExclusionsPatchCall { +func (c *ExclusionsCreateCall) Context(ctx context.Context) *ExclusionsCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ExclusionsPatchCall) Header() http.Header { +func (c *ExclusionsCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ExclusionsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ExclusionsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6497,27 +6275,27 @@ func (c *ExclusionsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/exclusions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "name": c.name, + "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.exclusions.patch" call. +// Do executes the "logging.exclusions.create" call. // Exactly one of *LogExclusion or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *LogExclusion.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ExclusionsPatchCall) Do(opts ...googleapi.CallOption) (*LogExclusion, error) { +func (c *ExclusionsCreateCall) Do(opts ...googleapi.CallOption) (*LogExclusion, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6548,29 +6326,23 @@ func (c *ExclusionsPatchCall) Do(opts ...googleapi.CallOption) (*LogExclusion, e } return ret, nil // { - // "description": "Changes one or more properties of an existing exclusion.", - // "flatPath": "v2/{v2Id}/{v2Id1}/exclusions/{exclusionsId}", - // "httpMethod": "PATCH", - // "id": "logging.exclusions.patch", + // "description": "Creates a new exclusion in a specified parent resource. Only log entries belonging to that resource can be excluded. You can have up to 10 exclusions in a resource.", + // "flatPath": "v2/{v2Id}/{v2Id1}/exclusions", + // "httpMethod": "POST", + // "id": "logging.exclusions.create", // "parameterOrder": [ - // "name" + // "parent" // ], // "parameters": { - // "name": { - // "description": "Required. The resource name of the exclusion to update:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + // "parent": { + // "description": "Required. The parent resource in which to create the exclusion: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" Examples: \"projects/my-logging-project\", \"organizations/123456789\".", // "location": "path", - // "pattern": "^[^/]+/[^/]+/exclusions/[^/]+$", + // "pattern": "^[^/]+/[^/]+$", // "required": true, // "type": "string" - // }, - // "updateMask": { - // "description": "Required. A non-empty list of fields to change in the existing exclusion. New values for the fields are taken from the corresponding fields in the LogExclusion included in this request. Fields not mentioned in update_mask are not changed and are ignored in the request.For example, to change the filter and description of an exclusion, specify an update_mask of \"filter,description\".", - // "format": "google-fieldmask", - // "location": "query", - // "type": "string" // } // }, - // "path": "v2/{+name}", + // "path": "v2/{+parent}/exclusions", // "request": { // "$ref": "LogExclusion" // }, @@ -6585,31 +6357,27 @@ func (c *ExclusionsPatchCall) Do(opts ...googleapi.CallOption) (*LogExclusion, e } -// method id "logging.folders.exclusions.create": +// method id "logging.exclusions.delete": -type FoldersExclusionsCreateCall struct { - s *Service - parent string - logexclusion *LogExclusion - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ExclusionsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Create: Creates a new exclusion in a specified parent resource. Only -// log entries belonging to that resource can be excluded. You can have -// up to 10 exclusions in a resource. -func (r *FoldersExclusionsService) Create(parent string, logexclusion *LogExclusion) *FoldersExclusionsCreateCall { - c := &FoldersExclusionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.logexclusion = logexclusion +// Delete: Deletes an exclusion. +func (r *ExclusionsService) Delete(name string) *ExclusionsDeleteCall { + c := &ExclusionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FoldersExclusionsCreateCall) Fields(s ...googleapi.Field) *FoldersExclusionsCreateCall { +func (c *ExclusionsDeleteCall) Fields(s ...googleapi.Field) *ExclusionsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -6617,190 +6385,51 @@ func (c *FoldersExclusionsCreateCall) Fields(s ...googleapi.Field) *FoldersExclu // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FoldersExclusionsCreateCall) Context(ctx context.Context) *FoldersExclusionsCreateCall { +func (c *ExclusionsDeleteCall) Context(ctx context.Context) *ExclusionsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FoldersExclusionsCreateCall) Header() http.Header { +func (c *ExclusionsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FoldersExclusionsCreateCall) doRequest(alt string) (*http.Response, error) { +func (c *ExclusionsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.logexclusion) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/exclusions") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.folders.exclusions.create" call. -// Exactly one of *LogExclusion or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *LogExclusion.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FoldersExclusionsCreateCall) Do(opts ...googleapi.CallOption) (*LogExclusion, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &LogExclusion{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a new exclusion in a specified parent resource. Only log entries belonging to that resource can be excluded. You can have up to 10 exclusions in a resource.", - // "flatPath": "v2/folders/{foldersId}/exclusions", - // "httpMethod": "POST", - // "id": "logging.folders.exclusions.create", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "parent": { - // "description": "Required. The parent resource in which to create the exclusion:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", - // "location": "path", - // "pattern": "^folders/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v2/{+parent}/exclusions", - // "request": { - // "$ref": "LogExclusion" - // }, - // "response": { - // "$ref": "LogExclusion" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/logging.admin" - // ] - // } - -} - -// method id "logging.folders.exclusions.delete": - -type FoldersExclusionsDeleteCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes an exclusion. -func (r *FoldersExclusionsService) Delete(name string) *FoldersExclusionsDeleteCall { - c := &FoldersExclusionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *FoldersExclusionsDeleteCall) Fields(s ...googleapi.Field) *FoldersExclusionsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *FoldersExclusionsDeleteCall) Context(ctx context.Context) *FoldersExclusionsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *FoldersExclusionsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *FoldersExclusionsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "logging.folders.exclusions.delete" call. +// Do executes the "logging.exclusions.delete" call. // Exactly one of *Empty or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Empty.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *FoldersExclusionsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { +func (c *ExclusionsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6832,17 +6461,17 @@ func (c *FoldersExclusionsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, return ret, nil // { // "description": "Deletes an exclusion.", - // "flatPath": "v2/folders/{foldersId}/exclusions/{exclusionsId}", + // "flatPath": "v2/{v2Id}/{v2Id1}/exclusions/{exclusionsId}", // "httpMethod": "DELETE", - // "id": "logging.folders.exclusions.delete", + // "id": "logging.exclusions.delete", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The resource name of an existing exclusion to delete:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + // "description": "Required. The resource name of an existing exclusion to delete: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", // "location": "path", - // "pattern": "^folders/[^/]+/exclusions/[^/]+$", + // "pattern": "^[^/]+/[^/]+/exclusions/[^/]+$", // "required": true, // "type": "string" // } @@ -6859,9 +6488,9 @@ func (c *FoldersExclusionsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, } -// method id "logging.folders.exclusions.get": +// method id "logging.exclusions.get": -type FoldersExclusionsGetCall struct { +type ExclusionsGetCall struct { s *Service name string urlParams_ gensupport.URLParams @@ -6871,8 +6500,8 @@ type FoldersExclusionsGetCall struct { } // Get: Gets the description of an exclusion. -func (r *FoldersExclusionsService) Get(name string) *FoldersExclusionsGetCall { - c := &FoldersExclusionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ExclusionsService) Get(name string) *ExclusionsGetCall { + c := &ExclusionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } @@ -6880,7 +6509,7 @@ func (r *FoldersExclusionsService) Get(name string) *FoldersExclusionsGetCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FoldersExclusionsGetCall) Fields(s ...googleapi.Field) *FoldersExclusionsGetCall { +func (c *ExclusionsGetCall) Fields(s ...googleapi.Field) *ExclusionsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -6890,7 +6519,7 @@ func (c *FoldersExclusionsGetCall) Fields(s ...googleapi.Field) *FoldersExclusio // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *FoldersExclusionsGetCall) IfNoneMatch(entityTag string) *FoldersExclusionsGetCall { +func (c *ExclusionsGetCall) IfNoneMatch(entityTag string) *ExclusionsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -6898,23 +6527,23 @@ func (c *FoldersExclusionsGetCall) IfNoneMatch(entityTag string) *FoldersExclusi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FoldersExclusionsGetCall) Context(ctx context.Context) *FoldersExclusionsGetCall { +func (c *ExclusionsGetCall) Context(ctx context.Context) *ExclusionsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FoldersExclusionsGetCall) Header() http.Header { +func (c *ExclusionsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FoldersExclusionsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ExclusionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6938,14 +6567,14 @@ func (c *FoldersExclusionsGetCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.folders.exclusions.get" call. +// Do executes the "logging.exclusions.get" call. // Exactly one of *LogExclusion or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *LogExclusion.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FoldersExclusionsGetCall) Do(opts ...googleapi.CallOption) (*LogExclusion, error) { +func (c *ExclusionsGetCall) Do(opts ...googleapi.CallOption) (*LogExclusion, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6977,17 +6606,17 @@ func (c *FoldersExclusionsGetCall) Do(opts ...googleapi.CallOption) (*LogExclusi return ret, nil // { // "description": "Gets the description of an exclusion.", - // "flatPath": "v2/folders/{foldersId}/exclusions/{exclusionsId}", + // "flatPath": "v2/{v2Id}/{v2Id1}/exclusions/{exclusionsId}", // "httpMethod": "GET", - // "id": "logging.folders.exclusions.get", + // "id": "logging.exclusions.get", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The resource name of an existing exclusion:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + // "description": "Required. The resource name of an existing exclusion: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", // "location": "path", - // "pattern": "^folders/[^/]+/exclusions/[^/]+$", + // "pattern": "^[^/]+/[^/]+/exclusions/[^/]+$", // "required": true, // "type": "string" // } @@ -7006,9 +6635,9 @@ func (c *FoldersExclusionsGetCall) Do(opts ...googleapi.CallOption) (*LogExclusi } -// method id "logging.folders.exclusions.list": +// method id "logging.exclusions.list": -type FoldersExclusionsListCall struct { +type ExclusionsListCall struct { s *Service parent string urlParams_ gensupport.URLParams @@ -7018,8 +6647,8 @@ type FoldersExclusionsListCall struct { } // List: Lists all the exclusions in a parent resource. -func (r *FoldersExclusionsService) List(parent string) *FoldersExclusionsListCall { - c := &FoldersExclusionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ExclusionsService) List(parent string) *ExclusionsListCall { + c := &ExclusionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent return c } @@ -7028,7 +6657,7 @@ func (r *FoldersExclusionsService) List(parent string) *FoldersExclusionsListCal // of results to return from this request. Non-positive values are // ignored. The presence of nextPageToken in the response indicates that // more results might be available. -func (c *FoldersExclusionsListCall) PageSize(pageSize int64) *FoldersExclusionsListCall { +func (c *ExclusionsListCall) PageSize(pageSize int64) *ExclusionsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } @@ -7038,7 +6667,7 @@ func (c *FoldersExclusionsListCall) PageSize(pageSize int64) *FoldersExclusionsL // method. pageToken must be the value of nextPageToken from the // previous response. The values of other method parameters should be // identical to those in the previous call. -func (c *FoldersExclusionsListCall) PageToken(pageToken string) *FoldersExclusionsListCall { +func (c *ExclusionsListCall) PageToken(pageToken string) *ExclusionsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -7046,7 +6675,7 @@ func (c *FoldersExclusionsListCall) PageToken(pageToken string) *FoldersExclusio // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FoldersExclusionsListCall) Fields(s ...googleapi.Field) *FoldersExclusionsListCall { +func (c *ExclusionsListCall) Fields(s ...googleapi.Field) *ExclusionsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -7056,7 +6685,7 @@ func (c *FoldersExclusionsListCall) Fields(s ...googleapi.Field) *FoldersExclusi // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *FoldersExclusionsListCall) IfNoneMatch(entityTag string) *FoldersExclusionsListCall { +func (c *ExclusionsListCall) IfNoneMatch(entityTag string) *ExclusionsListCall { c.ifNoneMatch_ = entityTag return c } @@ -7064,23 +6693,23 @@ func (c *FoldersExclusionsListCall) IfNoneMatch(entityTag string) *FoldersExclus // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FoldersExclusionsListCall) Context(ctx context.Context) *FoldersExclusionsListCall { +func (c *ExclusionsListCall) Context(ctx context.Context) *ExclusionsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FoldersExclusionsListCall) Header() http.Header { +func (c *ExclusionsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FoldersExclusionsListCall) doRequest(alt string) (*http.Response, error) { +func (c *ExclusionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7104,14 +6733,14 @@ func (c *FoldersExclusionsListCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.folders.exclusions.list" call. +// Do executes the "logging.exclusions.list" call. // Exactly one of *ListExclusionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *ListExclusionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *FoldersExclusionsListCall) Do(opts ...googleapi.CallOption) (*ListExclusionsResponse, error) { +func (c *ExclusionsListCall) Do(opts ...googleapi.CallOption) (*ListExclusionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7143,9 +6772,9 @@ func (c *FoldersExclusionsListCall) Do(opts ...googleapi.CallOption) (*ListExclu return ret, nil // { // "description": "Lists all the exclusions in a parent resource.", - // "flatPath": "v2/folders/{foldersId}/exclusions", + // "flatPath": "v2/{v2Id}/{v2Id1}/exclusions", // "httpMethod": "GET", - // "id": "logging.folders.exclusions.list", + // "id": "logging.exclusions.list", // "parameterOrder": [ // "parent" // ], @@ -7162,9 +6791,9 @@ func (c *FoldersExclusionsListCall) Do(opts ...googleapi.CallOption) (*ListExclu // "type": "string" // }, // "parent": { - // "description": "Required. The parent resource whose exclusions are to be listed.\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + // "description": "Required. The parent resource whose exclusions are to be listed. \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", // "location": "path", - // "pattern": "^folders/[^/]+$", + // "pattern": "^[^/]+/[^/]+$", // "required": true, // "type": "string" // } @@ -7186,7 +6815,7 @@ func (c *FoldersExclusionsListCall) Do(opts ...googleapi.CallOption) (*ListExclu // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *FoldersExclusionsListCall) Pages(ctx context.Context, f func(*ListExclusionsResponse) error) error { +func (c *ExclusionsListCall) Pages(ctx context.Context, f func(*ListExclusionsResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -7204,9 +6833,9 @@ func (c *FoldersExclusionsListCall) Pages(ctx context.Context, f func(*ListExclu } } -// method id "logging.folders.exclusions.patch": +// method id "logging.exclusions.patch": -type FoldersExclusionsPatchCall struct { +type ExclusionsPatchCall struct { s *Service name string logexclusion *LogExclusion @@ -7216,8 +6845,8 @@ type FoldersExclusionsPatchCall struct { } // Patch: Changes one or more properties of an existing exclusion. -func (r *FoldersExclusionsService) Patch(name string, logexclusion *LogExclusion) *FoldersExclusionsPatchCall { - c := &FoldersExclusionsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ExclusionsService) Patch(name string, logexclusion *LogExclusion) *ExclusionsPatchCall { + c := &ExclusionsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.logexclusion = logexclusion return c @@ -7230,7 +6859,7 @@ func (r *FoldersExclusionsService) Patch(name string, logexclusion *LogExclusion // update_mask are not changed and are ignored in the request.For // example, to change the filter and description of an exclusion, // specify an update_mask of "filter,description". -func (c *FoldersExclusionsPatchCall) UpdateMask(updateMask string) *FoldersExclusionsPatchCall { +func (c *ExclusionsPatchCall) UpdateMask(updateMask string) *ExclusionsPatchCall { c.urlParams_.Set("updateMask", updateMask) return c } @@ -7238,7 +6867,7 @@ func (c *FoldersExclusionsPatchCall) UpdateMask(updateMask string) *FoldersExclu // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FoldersExclusionsPatchCall) Fields(s ...googleapi.Field) *FoldersExclusionsPatchCall { +func (c *ExclusionsPatchCall) Fields(s ...googleapi.Field) *ExclusionsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -7246,23 +6875,23 @@ func (c *FoldersExclusionsPatchCall) Fields(s ...googleapi.Field) *FoldersExclus // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FoldersExclusionsPatchCall) Context(ctx context.Context) *FoldersExclusionsPatchCall { +func (c *ExclusionsPatchCall) Context(ctx context.Context) *ExclusionsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FoldersExclusionsPatchCall) Header() http.Header { +func (c *ExclusionsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FoldersExclusionsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ExclusionsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7288,14 +6917,14 @@ func (c *FoldersExclusionsPatchCall) doRequest(alt string) (*http.Response, erro return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.folders.exclusions.patch" call. +// Do executes the "logging.exclusions.patch" call. // Exactly one of *LogExclusion or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *LogExclusion.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FoldersExclusionsPatchCall) Do(opts ...googleapi.CallOption) (*LogExclusion, error) { +func (c *ExclusionsPatchCall) Do(opts ...googleapi.CallOption) (*LogExclusion, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7327,17 +6956,17 @@ func (c *FoldersExclusionsPatchCall) Do(opts ...googleapi.CallOption) (*LogExclu return ret, nil // { // "description": "Changes one or more properties of an existing exclusion.", - // "flatPath": "v2/folders/{foldersId}/exclusions/{exclusionsId}", + // "flatPath": "v2/{v2Id}/{v2Id1}/exclusions/{exclusionsId}", // "httpMethod": "PATCH", - // "id": "logging.folders.exclusions.patch", + // "id": "logging.exclusions.patch", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the exclusion to update:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + // "description": "Required. The resource name of the exclusion to update: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", // "location": "path", - // "pattern": "^folders/[^/]+/exclusions/[^/]+$", + // "pattern": "^[^/]+/[^/]+/exclusions/[^/]+$", // "required": true, // "type": "string" // }, @@ -7363,93 +6992,88 @@ func (c *FoldersExclusionsPatchCall) Do(opts ...googleapi.CallOption) (*LogExclu } -// method id "logging.folders.locations.buckets.get": +// method id "logging.folders.exclusions.create": -type FoldersLocationsBucketsGetCall struct { +type FoldersExclusionsCreateCall struct { s *Service - name string + parent string + logexclusion *LogExclusion urlParams_ gensupport.URLParams - ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Gets a bucket (Beta). -func (r *FoldersLocationsBucketsService) Get(name string) *FoldersLocationsBucketsGetCall { - c := &FoldersLocationsBucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name +// Create: Creates a new exclusion in a specified parent resource. Only +// log entries belonging to that resource can be excluded. You can have +// up to 10 exclusions in a resource. +func (r *FoldersExclusionsService) Create(parent string, logexclusion *LogExclusion) *FoldersExclusionsCreateCall { + c := &FoldersExclusionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.logexclusion = logexclusion return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FoldersLocationsBucketsGetCall) Fields(s ...googleapi.Field) *FoldersLocationsBucketsGetCall { +func (c *FoldersExclusionsCreateCall) Fields(s ...googleapi.Field) *FoldersExclusionsCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *FoldersLocationsBucketsGetCall) IfNoneMatch(entityTag string) *FoldersLocationsBucketsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FoldersLocationsBucketsGetCall) Context(ctx context.Context) *FoldersLocationsBucketsGetCall { +func (c *FoldersExclusionsCreateCall) Context(ctx context.Context) *FoldersExclusionsCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FoldersLocationsBucketsGetCall) Header() http.Header { +func (c *FoldersExclusionsCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FoldersLocationsBucketsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *FoldersExclusionsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logexclusion) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/exclusions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "name": c.name, + "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.folders.locations.buckets.get" call. -// Exactly one of *LogBucket or error will be non-nil. Any non-2xx +// Do executes the "logging.folders.exclusions.create" call. +// Exactly one of *LogExclusion or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *LogBucket.ServerResponse.Header or (if a response was returned at +// *LogExclusion.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FoldersLocationsBucketsGetCall) Do(opts ...googleapi.CallOption) (*LogBucket, error) { +func (c *FoldersExclusionsCreateCall) Do(opts ...googleapi.CallOption) (*LogExclusion, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7468,7 +7092,7 @@ func (c *FoldersLocationsBucketsGetCall) Do(opts ...googleapi.CallOption) (*LogB if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LogBucket{ + ret := &LogExclusion{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7480,142 +7104,110 @@ func (c *FoldersLocationsBucketsGetCall) Do(opts ...googleapi.CallOption) (*LogB } return ret, nil // { - // "description": "Gets a bucket (Beta).", - // "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets/{bucketsId}", - // "httpMethod": "GET", - // "id": "logging.folders.locations.buckets.get", + // "description": "Creates a new exclusion in a specified parent resource. Only log entries belonging to that resource can be excluded. You can have up to 10 exclusions in a resource.", + // "flatPath": "v2/folders/{foldersId}/exclusions", + // "httpMethod": "POST", + // "id": "logging.folders.exclusions.create", // "parameterOrder": [ - // "name" + // "parent" // ], // "parameters": { - // "name": { - // "description": "Required. The resource name of the bucket:\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\nExample: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", + // "parent": { + // "description": "Required. The parent resource in which to create the exclusion: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" Examples: \"projects/my-logging-project\", \"organizations/123456789\".", // "location": "path", - // "pattern": "^folders/[^/]+/locations/[^/]+/buckets/[^/]+$", + // "pattern": "^folders/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v2/{+name}", + // "path": "v2/{+parent}/exclusions", + // "request": { + // "$ref": "LogExclusion" + // }, // "response": { - // "$ref": "LogBucket" + // "$ref": "LogExclusion" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.read" + // "https://www.googleapis.com/auth/logging.admin" // ] // } } -// method id "logging.folders.locations.buckets.list": - -type FoldersLocationsBucketsListCall struct { - s *Service - parent string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists buckets (Beta). -func (r *FoldersLocationsBucketsService) List(parent string) *FoldersLocationsBucketsListCall { - c := &FoldersLocationsBucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} +// method id "logging.folders.exclusions.delete": -// PageSize sets the optional parameter "pageSize": The maximum number -// of results to return from this request. Non-positive values are -// ignored. The presence of nextPageToken in the response indicates that -// more results might be available. -func (c *FoldersLocationsBucketsListCall) PageSize(pageSize int64) *FoldersLocationsBucketsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c +type FoldersExclusionsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// PageToken sets the optional parameter "pageToken": If present, then -// retrieve the next batch of results from the preceding call to this -// method. pageToken must be the value of nextPageToken from the -// previous response. The values of other method parameters should be -// identical to those in the previous call. -func (c *FoldersLocationsBucketsListCall) PageToken(pageToken string) *FoldersLocationsBucketsListCall { - c.urlParams_.Set("pageToken", pageToken) +// Delete: Deletes an exclusion. +func (r *FoldersExclusionsService) Delete(name string) *FoldersExclusionsDeleteCall { + c := &FoldersExclusionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FoldersLocationsBucketsListCall) Fields(s ...googleapi.Field) *FoldersLocationsBucketsListCall { +func (c *FoldersExclusionsDeleteCall) Fields(s ...googleapi.Field) *FoldersExclusionsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *FoldersLocationsBucketsListCall) IfNoneMatch(entityTag string) *FoldersLocationsBucketsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FoldersLocationsBucketsListCall) Context(ctx context.Context) *FoldersLocationsBucketsListCall { +func (c *FoldersExclusionsDeleteCall) Context(ctx context.Context) *FoldersExclusionsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FoldersLocationsBucketsListCall) Header() http.Header { +func (c *FoldersExclusionsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FoldersLocationsBucketsListCall) doRequest(alt string) (*http.Response, error) { +func (c *FoldersExclusionsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/buckets") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.folders.locations.buckets.list" call. -// Exactly one of *ListBucketsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListBucketsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *FoldersLocationsBucketsListCall) Do(opts ...googleapi.CallOption) (*ListBucketsResponse, error) { +// Do executes the "logging.folders.exclusions.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *FoldersExclusionsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7634,7 +7226,7 @@ func (c *FoldersLocationsBucketsListCall) Do(opts ...googleapi.CallOption) (*Lis if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ListBucketsResponse{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7646,148 +7238,103 @@ func (c *FoldersLocationsBucketsListCall) Do(opts ...googleapi.CallOption) (*Lis } return ret, nil // { - // "description": "Lists buckets (Beta).", - // "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets", - // "httpMethod": "GET", - // "id": "logging.folders.locations.buckets.list", + // "description": "Deletes an exclusion.", + // "flatPath": "v2/folders/{foldersId}/exclusions/{exclusionsId}", + // "httpMethod": "DELETE", + // "id": "logging.folders.exclusions.delete", // "parameterOrder": [ - // "parent" + // "name" // ], // "parameters": { - // "pageSize": { - // "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Required. The parent resource whose buckets are to be listed:\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]\"\nNote: The locations portion of the resource must be specified, but supplying the character - in place of LOCATION_ID will return all buckets.", + // "name": { + // "description": "Required. The resource name of an existing exclusion to delete: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", // "location": "path", - // "pattern": "^folders/[^/]+/locations/[^/]+$", + // "pattern": "^folders/[^/]+/exclusions/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v2/{+parent}/buckets", + // "path": "v2/{+name}", // "response": { - // "$ref": "ListBucketsResponse" + // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.read" + // "https://www.googleapis.com/auth/logging.admin" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *FoldersLocationsBucketsListCall) Pages(ctx context.Context, f func(*ListBucketsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "logging.folders.locations.buckets.patch": +// method id "logging.folders.exclusions.get": -type FoldersLocationsBucketsPatchCall struct { - s *Service - name string - logbucket *LogBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FoldersExclusionsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Patch: Updates a bucket. This method replaces the following fields in -// the existing bucket with values from the new bucket: -// retention_periodIf the retention period is decreased and the bucket -// is locked, FAILED_PRECONDITION will be returned.If the bucket has a -// LifecycleState of DELETE_REQUESTED, FAILED_PRECONDITION will be -// returned.A buckets region may not be modified after it is created. -// This method is in Beta. -func (r *FoldersLocationsBucketsService) Patch(name string, logbucket *LogBucket) *FoldersLocationsBucketsPatchCall { - c := &FoldersLocationsBucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Gets the description of an exclusion. +func (r *FoldersExclusionsService) Get(name string) *FoldersExclusionsGetCall { + c := &FoldersExclusionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name - c.logbucket = logbucket - return c -} - -// UpdateMask sets the optional parameter "updateMask": Required. Field -// mask that specifies the fields in bucket that need an update. A -// bucket field will be overwritten if, and only if, it is in the update -// mask. name and output only fields cannot be updated.For a detailed -// FieldMask definition, see -// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: -// updateMask=retention_days. -func (c *FoldersLocationsBucketsPatchCall) UpdateMask(updateMask string) *FoldersLocationsBucketsPatchCall { - c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FoldersLocationsBucketsPatchCall) Fields(s ...googleapi.Field) *FoldersLocationsBucketsPatchCall { +func (c *FoldersExclusionsGetCall) Fields(s ...googleapi.Field) *FoldersExclusionsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *FoldersExclusionsGetCall) IfNoneMatch(entityTag string) *FoldersExclusionsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FoldersLocationsBucketsPatchCall) Context(ctx context.Context) *FoldersLocationsBucketsPatchCall { +func (c *FoldersExclusionsGetCall) Context(ctx context.Context) *FoldersExclusionsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FoldersLocationsBucketsPatchCall) Header() http.Header { +func (c *FoldersExclusionsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FoldersLocationsBucketsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *FoldersExclusionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.logbucket) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -7798,14 +7345,14 @@ func (c *FoldersLocationsBucketsPatchCall) doRequest(alt string) (*http.Response return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.folders.locations.buckets.patch" call. -// Exactly one of *LogBucket or error will be non-nil. Any non-2xx +// Do executes the "logging.folders.exclusions.get" call. +// Exactly one of *LogExclusion or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *LogBucket.ServerResponse.Header or (if a response was returned at +// *LogExclusion.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FoldersLocationsBucketsPatchCall) Do(opts ...googleapi.CallOption) (*LogBucket, error) { +func (c *FoldersExclusionsGetCall) Do(opts ...googleapi.CallOption) (*LogExclusion, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7824,7 +7371,7 @@ func (c *FoldersLocationsBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Lo if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LogBucket{ + ret := &LogExclusion{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7836,180 +7383,39 @@ func (c *FoldersLocationsBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Lo } return ret, nil // { - // "description": "Updates a bucket. This method replaces the following fields in the existing bucket with values from the new bucket: retention_periodIf the retention period is decreased and the bucket is locked, FAILED_PRECONDITION will be returned.If the bucket has a LifecycleState of DELETE_REQUESTED, FAILED_PRECONDITION will be returned.A buckets region may not be modified after it is created. This method is in Beta.", - // "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets/{bucketsId}", - // "httpMethod": "PATCH", - // "id": "logging.folders.locations.buckets.patch", + // "description": "Gets the description of an exclusion.", + // "flatPath": "v2/folders/{foldersId}/exclusions/{exclusionsId}", + // "httpMethod": "GET", + // "id": "logging.folders.exclusions.get", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The full resource name of the bucket to update.\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\nExample: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\". Also requires permission \"resourcemanager.projects.updateLiens\" to set the locked property", + // "description": "Required. The resource name of an existing exclusion: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", // "location": "path", - // "pattern": "^folders/[^/]+/locations/[^/]+/buckets/[^/]+$", + // "pattern": "^folders/[^/]+/exclusions/[^/]+$", // "required": true, // "type": "string" - // }, - // "updateMask": { - // "description": "Required. Field mask that specifies the fields in bucket that need an update. A bucket field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=retention_days.", - // "format": "google-fieldmask", - // "location": "query", - // "type": "string" // } // }, // "path": "v2/{+name}", - // "request": { - // "$ref": "LogBucket" - // }, - // "response": { - // "$ref": "LogBucket" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/logging.admin" - // ] - // } - -} - -// method id "logging.folders.logs.delete": - -type FoldersLogsDeleteCall struct { - s *Service - logName string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes all the log entries in a log. The log reappears if it -// receives new entries. Log entries written shortly before the delete -// operation might not be deleted. Entries received after the delete -// operation with a timestamp before the operation will be deleted. -func (r *FoldersLogsService) Delete(logName string) *FoldersLogsDeleteCall { - c := &FoldersLogsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.logName = logName - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *FoldersLogsDeleteCall) Fields(s ...googleapi.Field) *FoldersLogsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *FoldersLogsDeleteCall) Context(ctx context.Context) *FoldersLogsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *FoldersLogsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *FoldersLogsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+logName}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "logName": c.logName, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "logging.folders.logs.delete" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *FoldersLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Empty{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.", - // "flatPath": "v2/folders/{foldersId}/logs/{logsId}", - // "httpMethod": "DELETE", - // "id": "logging.folders.logs.delete", - // "parameterOrder": [ - // "logName" - // ], - // "parameters": { - // "logName": { - // "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", - // "location": "path", - // "pattern": "^folders/[^/]+/logs/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v2/{+logName}", // "response": { - // "$ref": "Empty" + // "$ref": "LogExclusion" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/logging.admin" + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/logging.admin", + // "https://www.googleapis.com/auth/logging.read" // ] // } } -// method id "logging.folders.logs.list": +// method id "logging.folders.exclusions.list": -type FoldersLogsListCall struct { +type FoldersExclusionsListCall struct { s *Service parent string urlParams_ gensupport.URLParams @@ -8018,10 +7424,9 @@ type FoldersLogsListCall struct { header_ http.Header } -// List: Lists the logs in projects, organizations, folders, or billing -// accounts. Only logs that have entries are listed. -func (r *FoldersLogsService) List(parent string) *FoldersLogsListCall { - c := &FoldersLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Lists all the exclusions in a parent resource. +func (r *FoldersExclusionsService) List(parent string) *FoldersExclusionsListCall { + c := &FoldersExclusionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent return c } @@ -8030,7 +7435,7 @@ func (r *FoldersLogsService) List(parent string) *FoldersLogsListCall { // of results to return from this request. Non-positive values are // ignored. The presence of nextPageToken in the response indicates that // more results might be available. -func (c *FoldersLogsListCall) PageSize(pageSize int64) *FoldersLogsListCall { +func (c *FoldersExclusionsListCall) PageSize(pageSize int64) *FoldersExclusionsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } @@ -8040,7 +7445,7 @@ func (c *FoldersLogsListCall) PageSize(pageSize int64) *FoldersLogsListCall { // method. pageToken must be the value of nextPageToken from the // previous response. The values of other method parameters should be // identical to those in the previous call. -func (c *FoldersLogsListCall) PageToken(pageToken string) *FoldersLogsListCall { +func (c *FoldersExclusionsListCall) PageToken(pageToken string) *FoldersExclusionsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -8048,7 +7453,7 @@ func (c *FoldersLogsListCall) PageToken(pageToken string) *FoldersLogsListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FoldersLogsListCall) Fields(s ...googleapi.Field) *FoldersLogsListCall { +func (c *FoldersExclusionsListCall) Fields(s ...googleapi.Field) *FoldersExclusionsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -8058,7 +7463,7 @@ func (c *FoldersLogsListCall) Fields(s ...googleapi.Field) *FoldersLogsListCall // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *FoldersLogsListCall) IfNoneMatch(entityTag string) *FoldersLogsListCall { +func (c *FoldersExclusionsListCall) IfNoneMatch(entityTag string) *FoldersExclusionsListCall { c.ifNoneMatch_ = entityTag return c } @@ -8066,23 +7471,23 @@ func (c *FoldersLogsListCall) IfNoneMatch(entityTag string) *FoldersLogsListCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FoldersLogsListCall) Context(ctx context.Context) *FoldersLogsListCall { +func (c *FoldersExclusionsListCall) Context(ctx context.Context) *FoldersExclusionsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FoldersLogsListCall) Header() http.Header { +func (c *FoldersExclusionsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FoldersLogsListCall) doRequest(alt string) (*http.Response, error) { +func (c *FoldersExclusionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8093,7 +7498,7 @@ func (c *FoldersLogsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/logs") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/exclusions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -8106,14 +7511,14 @@ func (c *FoldersLogsListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.folders.logs.list" call. -// Exactly one of *ListLogsResponse or error will be non-nil. Any +// Do executes the "logging.folders.exclusions.list" call. +// Exactly one of *ListExclusionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *ListLogsResponse.ServerResponse.Header or (if a response was +// *ListExclusionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *FoldersLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsResponse, error) { +func (c *FoldersExclusionsListCall) Do(opts ...googleapi.CallOption) (*ListExclusionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8132,7 +7537,7 @@ func (c *FoldersLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsRespons if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ListLogsResponse{ + ret := &ListExclusionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8144,10 +7549,10 @@ func (c *FoldersLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsRespons } return ret, nil // { - // "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", - // "flatPath": "v2/folders/{foldersId}/logs", + // "description": "Lists all the exclusions in a parent resource.", + // "flatPath": "v2/folders/{foldersId}/exclusions", // "httpMethod": "GET", - // "id": "logging.folders.logs.list", + // "id": "logging.folders.exclusions.list", // "parameterOrder": [ // "parent" // ], @@ -8164,16 +7569,16 @@ func (c *FoldersLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsRespons // "type": "string" // }, // "parent": { - // "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + // "description": "Required. The parent resource whose exclusions are to be listed. \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", // "location": "path", // "pattern": "^folders/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v2/{+parent}/logs", + // "path": "v2/{+parent}/exclusions", // "response": { - // "$ref": "ListLogsResponse" + // "$ref": "ListExclusionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -8188,7 +7593,7 @@ func (c *FoldersLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsRespons // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *FoldersLogsListCall) Pages(ctx context.Context, f func(*ListLogsResponse) error) error { +func (c *FoldersExclusionsListCall) Pages(ctx context.Context, f func(*ListExclusionsResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -8206,49 +7611,41 @@ func (c *FoldersLogsListCall) Pages(ctx context.Context, f func(*ListLogsRespons } } -// method id "logging.folders.sinks.create": +// method id "logging.folders.exclusions.patch": -type FoldersSinksCreateCall struct { - s *Service - parent string - logsink *LogSink - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FoldersExclusionsPatchCall struct { + s *Service + name string + logexclusion *LogExclusion + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Create: Creates a sink that exports specified log entries to a -// destination. The export of newly-ingested log entries begins -// immediately, unless the sink's writer_identity is not permitted to -// write to the destination. A sink can export log entries only from the -// resource owning the sink. -func (r *FoldersSinksService) Create(parent string, logsink *LogSink) *FoldersSinksCreateCall { - c := &FoldersSinksCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.logsink = logsink +// Patch: Changes one or more properties of an existing exclusion. +func (r *FoldersExclusionsService) Patch(name string, logexclusion *LogExclusion) *FoldersExclusionsPatchCall { + c := &FoldersExclusionsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.logexclusion = logexclusion return c } -// UniqueWriterIdentity sets the optional parameter -// "uniqueWriterIdentity": Determines the kind of IAM identity returned -// as writer_identity in the new sink. If this value is omitted or set -// to false, and if the sink's parent is a project, then the value -// returned as writer_identity is the same group or service account used -// by Logging before the addition of writer identities to this API. The -// sink's destination must be in the same project as the sink itself.If -// this field is set to true, or if the sink is owned by a non-project -// resource such as an organization, then the value of writer_identity -// will be a unique service account used only for exports from the new -// sink. For more information, see writer_identity in LogSink. -func (c *FoldersSinksCreateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *FoldersSinksCreateCall { - c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) +// UpdateMask sets the optional parameter "updateMask": Required. A +// non-empty list of fields to change in the existing exclusion. New +// values for the fields are taken from the corresponding fields in the +// LogExclusion included in this request. Fields not mentioned in +// update_mask are not changed and are ignored in the request.For +// example, to change the filter and description of an exclusion, +// specify an update_mask of "filter,description". +func (c *FoldersExclusionsPatchCall) UpdateMask(updateMask string) *FoldersExclusionsPatchCall { + c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FoldersSinksCreateCall) Fields(s ...googleapi.Field) *FoldersSinksCreateCall { +func (c *FoldersExclusionsPatchCall) Fields(s ...googleapi.Field) *FoldersExclusionsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -8256,56 +7653,56 @@ func (c *FoldersSinksCreateCall) Fields(s ...googleapi.Field) *FoldersSinksCreat // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FoldersSinksCreateCall) Context(ctx context.Context) *FoldersSinksCreateCall { +func (c *FoldersExclusionsPatchCall) Context(ctx context.Context) *FoldersExclusionsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FoldersSinksCreateCall) Header() http.Header { +func (c *FoldersExclusionsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FoldersSinksCreateCall) doRequest(alt string) (*http.Response, error) { +func (c *FoldersExclusionsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logexclusion) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/sinks") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.folders.sinks.create" call. -// Exactly one of *LogSink or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *LogSink.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *FoldersSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { +// Do executes the "logging.folders.exclusions.patch" call. +// Exactly one of *LogExclusion or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *LogExclusion.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FoldersExclusionsPatchCall) Do(opts ...googleapi.CallOption) (*LogExclusion, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8324,7 +7721,7 @@ func (c *FoldersSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSink, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LogSink{ + ret := &LogExclusion{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8336,33 +7733,34 @@ func (c *FoldersSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSink, err } return ret, nil // { - // "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.", - // "flatPath": "v2/folders/{foldersId}/sinks", - // "httpMethod": "POST", - // "id": "logging.folders.sinks.create", + // "description": "Changes one or more properties of an existing exclusion.", + // "flatPath": "v2/folders/{foldersId}/exclusions/{exclusionsId}", + // "httpMethod": "PATCH", + // "id": "logging.folders.exclusions.patch", // "parameterOrder": [ - // "parent" + // "name" // ], // "parameters": { - // "parent": { - // "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + // "name": { + // "description": "Required. The resource name of the exclusion to update: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", // "location": "path", - // "pattern": "^folders/[^/]+$", + // "pattern": "^folders/[^/]+/exclusions/[^/]+$", // "required": true, // "type": "string" // }, - // "uniqueWriterIdentity": { - // "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", + // "updateMask": { + // "description": "Required. A non-empty list of fields to change in the existing exclusion. New values for the fields are taken from the corresponding fields in the LogExclusion included in this request. Fields not mentioned in update_mask are not changed and are ignored in the request.For example, to change the filter and description of an exclusion, specify an update_mask of \"filter,description\".", + // "format": "google-fieldmask", // "location": "query", - // "type": "boolean" + // "type": "string" // } // }, - // "path": "v2/{+parent}/sinks", + // "path": "v2/{+name}", // "request": { - // "$ref": "LogSink" + // "$ref": "LogExclusion" // }, // "response": { - // "$ref": "LogSink" + // "$ref": "LogExclusion" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -8372,28 +7770,39 @@ func (c *FoldersSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSink, err } -// method id "logging.folders.sinks.delete": +// method id "logging.folders.locations.buckets.create": -type FoldersSinksDeleteCall struct { +type FoldersLocationsBucketsCreateCall struct { s *Service - sinkNameid string + parent string + logbucket *LogBucket urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes a sink. If the sink has a unique writer_identity, -// then that service account is also deleted. -func (r *FoldersSinksService) Delete(sinkNameid string) *FoldersSinksDeleteCall { - c := &FoldersSinksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.sinkNameid = sinkNameid +// Create: Creates a bucket that can be used to store log entries. Once +// a bucket has been created, the region cannot be changed. +func (r *FoldersLocationsBucketsService) Create(parent string, logbucket *LogBucket) *FoldersLocationsBucketsCreateCall { + c := &FoldersLocationsBucketsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.logbucket = logbucket + return c +} + +// BucketId sets the optional parameter "bucketId": Required. A +// client-assigned identifier such as "my-bucket". Identifiers are +// limited to 100 characters and can include only letters, digits, +// underscores, hyphens, and periods. +func (c *FoldersLocationsBucketsCreateCall) BucketId(bucketId string) *FoldersLocationsBucketsCreateCall { + c.urlParams_.Set("bucketId", bucketId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FoldersSinksDeleteCall) Fields(s ...googleapi.Field) *FoldersSinksDeleteCall { +func (c *FoldersLocationsBucketsCreateCall) Fields(s ...googleapi.Field) *FoldersLocationsBucketsCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -8401,51 +7810,56 @@ func (c *FoldersSinksDeleteCall) Fields(s ...googleapi.Field) *FoldersSinksDelet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FoldersSinksDeleteCall) Context(ctx context.Context) *FoldersSinksDeleteCall { +func (c *FoldersLocationsBucketsCreateCall) Context(ctx context.Context) *FoldersLocationsBucketsCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FoldersSinksDeleteCall) Header() http.Header { +func (c *FoldersLocationsBucketsCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FoldersSinksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *FoldersLocationsBucketsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logbucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/buckets") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "sinkName": c.sinkNameid, + "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.folders.sinks.delete" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *FoldersSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { +// Do executes the "logging.folders.locations.buckets.create" call. +// Exactly one of *LogBucket or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *LogBucket.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FoldersLocationsBucketsCreateCall) Do(opts ...googleapi.CallOption) (*LogBucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8464,7 +7878,7 @@ func (c *FoldersSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Empty{ + ret := &LogBucket{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8476,25 +7890,33 @@ func (c *FoldersSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error } return ret, nil // { - // "description": "Deletes a sink. If the sink has a unique writer_identity, then that service account is also deleted.", - // "flatPath": "v2/folders/{foldersId}/sinks/{sinksId}", - // "httpMethod": "DELETE", - // "id": "logging.folders.sinks.delete", + // "description": "Creates a bucket that can be used to store log entries. Once a bucket has been created, the region cannot be changed.", + // "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets", + // "httpMethod": "POST", + // "id": "logging.folders.locations.buckets.create", // "parameterOrder": [ - // "sinkName" + // "parent" // ], // "parameters": { - // "sinkName": { - // "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "bucketId": { + // "description": "Required. A client-assigned identifier such as \"my-bucket\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The resource in which to create the bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" Example: \"projects/my-logging-project/locations/global\"", // "location": "path", - // "pattern": "^folders/[^/]+/sinks/[^/]+$", + // "pattern": "^folders/[^/]+/locations/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v2/{+sinkName}", + // "path": "v2/{+parent}/buckets", + // "request": { + // "$ref": "LogBucket" + // }, // "response": { - // "$ref": "Empty" + // "$ref": "LogBucket" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -8504,93 +7926,81 @@ func (c *FoldersSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error } -// method id "logging.folders.sinks.get": +// method id "logging.folders.locations.buckets.delete": -type FoldersSinksGetCall struct { - s *Service - sinkName string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type FoldersLocationsBucketsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Gets a sink. -func (r *FoldersSinksService) Get(sinkName string) *FoldersSinksGetCall { - c := &FoldersSinksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.sinkName = sinkName +// Delete: Deletes a bucket. Moves the bucket to the DELETE_REQUESTED +// state. After 7 days, the bucket will be purged and all logs in the +// bucket will be permanently deleted. +func (r *FoldersLocationsBucketsService) Delete(name string) *FoldersLocationsBucketsDeleteCall { + c := &FoldersLocationsBucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FoldersSinksGetCall) Fields(s ...googleapi.Field) *FoldersSinksGetCall { +func (c *FoldersLocationsBucketsDeleteCall) Fields(s ...googleapi.Field) *FoldersLocationsBucketsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *FoldersSinksGetCall) IfNoneMatch(entityTag string) *FoldersSinksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FoldersSinksGetCall) Context(ctx context.Context) *FoldersSinksGetCall { +func (c *FoldersLocationsBucketsDeleteCall) Context(ctx context.Context) *FoldersLocationsBucketsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FoldersSinksGetCall) Header() http.Header { +func (c *FoldersLocationsBucketsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FoldersSinksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *FoldersLocationsBucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "sinkName": c.sinkName, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.folders.sinks.get" call. -// Exactly one of *LogSink or error will be non-nil. Any non-2xx status +// Do executes the "logging.folders.locations.buckets.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *LogSink.ServerResponse.Header or (if a response was returned at all) +// *Empty.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *FoldersSinksGetCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { +func (c *FoldersLocationsBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8609,7 +8019,7 @@ func (c *FoldersSinksGetCall) Do(opts ...googleapi.CallOption) (*LogSink, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LogSink{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8621,69 +8031,214 @@ func (c *FoldersSinksGetCall) Do(opts ...googleapi.CallOption) (*LogSink, error) } return ret, nil // { - // "description": "Gets a sink.", - // "flatPath": "v2/folders/{foldersId}/sinks/{sinksId}", - // "httpMethod": "GET", - // "id": "logging.folders.sinks.get", + // "description": "Deletes a bucket. Moves the bucket to the DELETE_REQUESTED state. After 7 days, the bucket will be purged and all logs in the bucket will be permanently deleted.", + // "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets/{bucketsId}", + // "httpMethod": "DELETE", + // "id": "logging.folders.locations.buckets.delete", // "parameterOrder": [ - // "sinkName" + // "name" // ], // "parameters": { - // "sinkName": { - // "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "name": { + // "description": "Required. The full resource name of the bucket to delete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", // "location": "path", - // "pattern": "^folders/[^/]+/sinks/[^/]+$", + // "pattern": "^folders/[^/]+/locations/[^/]+/buckets/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v2/{+sinkName}", + // "path": "v2/{+name}", // "response": { - // "$ref": "LogSink" + // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.read" + // "https://www.googleapis.com/auth/logging.admin" // ] // } } -// method id "logging.folders.sinks.list": +// method id "logging.folders.locations.buckets.get": -type FoldersSinksListCall struct { +type FoldersLocationsBucketsGetCall struct { s *Service - parent string + name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Lists sinks. -func (r *FoldersSinksService) List(parent string) *FoldersSinksListCall { - c := &FoldersSinksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent +// Get: Gets a bucket (Beta). +func (r *FoldersLocationsBucketsService) Get(name string) *FoldersLocationsBucketsGetCall { + c := &FoldersLocationsBucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name return c } -// PageSize sets the optional parameter "pageSize": The maximum number -// of results to return from this request. Non-positive values are -// ignored. The presence of nextPageToken in the response indicates that -// more results might be available. -func (c *FoldersSinksListCall) PageSize(pageSize int64) *FoldersSinksListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FoldersLocationsBucketsGetCall) Fields(s ...googleapi.Field) *FoldersLocationsBucketsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// PageToken sets the optional parameter "pageToken": If present, then -// retrieve the next batch of results from the preceding call to this -// method. pageToken must be the value of nextPageToken from the -// previous response. The values of other method parameters should be -// identical to those in the previous call. -func (c *FoldersSinksListCall) PageToken(pageToken string) *FoldersSinksListCall { +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *FoldersLocationsBucketsGetCall) IfNoneMatch(entityTag string) *FoldersLocationsBucketsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FoldersLocationsBucketsGetCall) Context(ctx context.Context) *FoldersLocationsBucketsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FoldersLocationsBucketsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FoldersLocationsBucketsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.folders.locations.buckets.get" call. +// Exactly one of *LogBucket or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *LogBucket.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FoldersLocationsBucketsGetCall) Do(opts ...googleapi.CallOption) (*LogBucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &LogBucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a bucket (Beta).", + // "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets/{bucketsId}", + // "httpMethod": "GET", + // "id": "logging.folders.locations.buckets.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The resource name of the bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", + // "location": "path", + // "pattern": "^folders/[^/]+/locations/[^/]+/buckets/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "LogBucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/logging.admin", + // "https://www.googleapis.com/auth/logging.read" + // ] + // } + +} + +// method id "logging.folders.locations.buckets.list": + +type FoldersLocationsBucketsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists buckets (Beta). +func (r *FoldersLocationsBucketsService) List(parent string) *FoldersLocationsBucketsListCall { + c := &FoldersLocationsBucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of results to return from this request. Non-positive values are +// ignored. The presence of nextPageToken in the response indicates that +// more results might be available. +func (c *FoldersLocationsBucketsListCall) PageSize(pageSize int64) *FoldersLocationsBucketsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If present, then +// retrieve the next batch of results from the preceding call to this +// method. pageToken must be the value of nextPageToken from the +// previous response. The values of other method parameters should be +// identical to those in the previous call. +func (c *FoldersLocationsBucketsListCall) PageToken(pageToken string) *FoldersLocationsBucketsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -8691,7 +8246,7 @@ func (c *FoldersSinksListCall) PageToken(pageToken string) *FoldersSinksListCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FoldersSinksListCall) Fields(s ...googleapi.Field) *FoldersSinksListCall { +func (c *FoldersLocationsBucketsListCall) Fields(s ...googleapi.Field) *FoldersLocationsBucketsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -8701,7 +8256,7 @@ func (c *FoldersSinksListCall) Fields(s ...googleapi.Field) *FoldersSinksListCal // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *FoldersSinksListCall) IfNoneMatch(entityTag string) *FoldersSinksListCall { +func (c *FoldersLocationsBucketsListCall) IfNoneMatch(entityTag string) *FoldersLocationsBucketsListCall { c.ifNoneMatch_ = entityTag return c } @@ -8709,23 +8264,23 @@ func (c *FoldersSinksListCall) IfNoneMatch(entityTag string) *FoldersSinksListCa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FoldersSinksListCall) Context(ctx context.Context) *FoldersSinksListCall { +func (c *FoldersLocationsBucketsListCall) Context(ctx context.Context) *FoldersLocationsBucketsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FoldersSinksListCall) Header() http.Header { +func (c *FoldersLocationsBucketsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FoldersSinksListCall) doRequest(alt string) (*http.Response, error) { +func (c *FoldersLocationsBucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8736,7 +8291,7 @@ func (c *FoldersSinksListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/sinks") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/buckets") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -8749,14 +8304,14 @@ func (c *FoldersSinksListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.folders.sinks.list" call. -// Exactly one of *ListSinksResponse or error will be non-nil. Any +// Do executes the "logging.folders.locations.buckets.list" call. +// Exactly one of *ListBucketsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *ListSinksResponse.ServerResponse.Header or (if a response was +// *ListBucketsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *FoldersSinksListCall) Do(opts ...googleapi.CallOption) (*ListSinksResponse, error) { +func (c *FoldersLocationsBucketsListCall) Do(opts ...googleapi.CallOption) (*ListBucketsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8775,7 +8330,7 @@ func (c *FoldersSinksListCall) Do(opts ...googleapi.CallOption) (*ListSinksRespo if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ListSinksResponse{ + ret := &ListBucketsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8787,10 +8342,10 @@ func (c *FoldersSinksListCall) Do(opts ...googleapi.CallOption) (*ListSinksRespo } return ret, nil // { - // "description": "Lists sinks.", - // "flatPath": "v2/folders/{foldersId}/sinks", + // "description": "Lists buckets (Beta).", + // "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets", // "httpMethod": "GET", - // "id": "logging.folders.sinks.list", + // "id": "logging.folders.locations.buckets.list", // "parameterOrder": [ // "parent" // ], @@ -8807,16 +8362,16 @@ func (c *FoldersSinksListCall) Do(opts ...googleapi.CallOption) (*ListSinksRespo // "type": "string" // }, // "parent": { - // "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + // "description": "Required. The parent resource whose buckets are to be listed: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]\" Note: The locations portion of the resource must be specified, but supplying the character - in place of LOCATION_ID will return all buckets.", // "location": "path", - // "pattern": "^folders/[^/]+$", + // "pattern": "^folders/[^/]+/locations/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v2/{+parent}/sinks", + // "path": "v2/{+parent}/buckets", // "response": { - // "$ref": "ListSinksResponse" + // "$ref": "ListBucketsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -8831,7 +8386,7 @@ func (c *FoldersSinksListCall) Do(opts ...googleapi.CallOption) (*ListSinksRespo // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *FoldersSinksListCall) Pages(ctx context.Context, f func(*ListSinksResponse) error) error { +func (c *FoldersLocationsBucketsListCall) Pages(ctx context.Context, f func(*ListBucketsResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -8849,55 +8404,39 @@ func (c *FoldersSinksListCall) Pages(ctx context.Context, f func(*ListSinksRespo } } -// method id "logging.folders.sinks.patch": +// method id "logging.folders.locations.buckets.patch": -type FoldersSinksPatchCall struct { +type FoldersLocationsBucketsPatchCall struct { s *Service - sinkNameid string - logsink *LogSink + name string + logbucket *LogBucket urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Patch: Updates a sink. This method replaces the following fields in -// the existing sink with values from the new sink: destination, and -// filter.The updated sink might also have a new writer_identity; see -// the unique_writer_identity field. -func (r *FoldersSinksService) Patch(sinkNameid string, logsink *LogSink) *FoldersSinksPatchCall { - c := &FoldersSinksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.sinkNameid = sinkNameid - c.logsink = logsink - return c -} - -// UniqueWriterIdentity sets the optional parameter -// "uniqueWriterIdentity": See sinks.create for a description of this -// field. When updating a sink, the effect of this field on the value of -// writer_identity in the updated sink depends on both the old and new -// values of this field: -// If the old and new values of this field are both false or both true, -// then there is no change to the sink's writer_identity. -// If the old value is false and the new value is true, then -// writer_identity is changed to a unique service account. -// It is an error if the old value is true and the new value is set to -// false or defaulted to false. -func (c *FoldersSinksPatchCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *FoldersSinksPatchCall { - c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) +// Patch: Updates a bucket. This method replaces the following fields in +// the existing bucket with values from the new bucket: +// retention_periodIf the retention period is decreased and the bucket +// is locked, FAILED_PRECONDITION will be returned.If the bucket has a +// LifecycleState of DELETE_REQUESTED, FAILED_PRECONDITION will be +// returned.A buckets region may not be modified after it is created. +// This method is in Beta. +func (r *FoldersLocationsBucketsService) Patch(name string, logbucket *LogBucket) *FoldersLocationsBucketsPatchCall { + c := &FoldersLocationsBucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.logbucket = logbucket return c } -// UpdateMask sets the optional parameter "updateMask": Field mask that -// specifies the fields in sink that need an update. A sink field will -// be overwritten if, and only if, it is in the update mask. name and -// output only fields cannot be updated.An empty updateMask is -// temporarily treated as using the following mask for backwards -// compatibility purposes: destination,filter,includeChildren At some -// point in the future, behavior will be removed and specifying an empty -// updateMask will be an error.For a detailed FieldMask definition, see +// UpdateMask sets the optional parameter "updateMask": Required. Field +// mask that specifies the fields in bucket that need an update. A +// bucket field will be overwritten if, and only if, it is in the update +// mask. name and output only fields cannot be updated.For a detailed +// FieldMask definition, see // https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: -// updateMask=filter. -func (c *FoldersSinksPatchCall) UpdateMask(updateMask string) *FoldersSinksPatchCall { +// updateMask=retention_days. +func (c *FoldersLocationsBucketsPatchCall) UpdateMask(updateMask string) *FoldersLocationsBucketsPatchCall { c.urlParams_.Set("updateMask", updateMask) return c } @@ -8905,7 +8444,7 @@ func (c *FoldersSinksPatchCall) UpdateMask(updateMask string) *FoldersSinksPatch // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FoldersSinksPatchCall) Fields(s ...googleapi.Field) *FoldersSinksPatchCall { +func (c *FoldersLocationsBucketsPatchCall) Fields(s ...googleapi.Field) *FoldersLocationsBucketsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -8913,36 +8452,36 @@ func (c *FoldersSinksPatchCall) Fields(s ...googleapi.Field) *FoldersSinksPatchC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FoldersSinksPatchCall) Context(ctx context.Context) *FoldersSinksPatchCall { +func (c *FoldersLocationsBucketsPatchCall) Context(ctx context.Context) *FoldersLocationsBucketsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FoldersSinksPatchCall) Header() http.Header { +func (c *FoldersLocationsBucketsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FoldersSinksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *FoldersLocationsBucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logbucket) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { @@ -8950,19 +8489,19 @@ func (c *FoldersSinksPatchCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "sinkName": c.sinkNameid, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.folders.sinks.patch" call. -// Exactly one of *LogSink or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *LogSink.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *FoldersSinksPatchCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { +// Do executes the "logging.folders.locations.buckets.patch" call. +// Exactly one of *LogBucket or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *LogBucket.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FoldersLocationsBucketsPatchCall) Do(opts ...googleapi.CallOption) (*LogBucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8981,7 +8520,7 @@ func (c *FoldersSinksPatchCall) Do(opts ...googleapi.CallOption) (*LogSink, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LogSink{ + ret := &LogBucket{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8993,39 +8532,34 @@ func (c *FoldersSinksPatchCall) Do(opts ...googleapi.CallOption) (*LogSink, erro } return ret, nil // { - // "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", - // "flatPath": "v2/folders/{foldersId}/sinks/{sinksId}", + // "description": "Updates a bucket. This method replaces the following fields in the existing bucket with values from the new bucket: retention_periodIf the retention period is decreased and the bucket is locked, FAILED_PRECONDITION will be returned.If the bucket has a LifecycleState of DELETE_REQUESTED, FAILED_PRECONDITION will be returned.A buckets region may not be modified after it is created. This method is in Beta.", + // "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets/{bucketsId}", // "httpMethod": "PATCH", - // "id": "logging.folders.sinks.patch", + // "id": "logging.folders.locations.buckets.patch", // "parameterOrder": [ - // "sinkName" + // "name" // ], // "parameters": { - // "sinkName": { - // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "name": { + // "description": "Required. The full resource name of the bucket to update. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\". Also requires permission \"resourcemanager.projects.updateLiens\" to set the locked property", // "location": "path", - // "pattern": "^folders/[^/]+/sinks/[^/]+$", + // "pattern": "^folders/[^/]+/locations/[^/]+/buckets/[^/]+$", // "required": true, // "type": "string" // }, - // "uniqueWriterIdentity": { - // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is set to false or defaulted to false.", - // "location": "query", - // "type": "boolean" - // }, // "updateMask": { - // "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", + // "description": "Required. Field mask that specifies the fields in bucket that need an update. A bucket field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=retention_days.", // "format": "google-fieldmask", // "location": "query", // "type": "string" // } // }, - // "path": "v2/{+sinkName}", + // "path": "v2/{+name}", // "request": { - // "$ref": "LogSink" + // "$ref": "LogBucket" // }, // "response": { - // "$ref": "LogSink" + // "$ref": "LogBucket" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -9035,63 +8569,30 @@ func (c *FoldersSinksPatchCall) Do(opts ...googleapi.CallOption) (*LogSink, erro } -// method id "logging.folders.sinks.update": - -type FoldersSinksUpdateCall struct { - s *Service - sinkNameid string - logsink *LogSink - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates a sink. This method replaces the following fields in -// the existing sink with values from the new sink: destination, and -// filter.The updated sink might also have a new writer_identity; see -// the unique_writer_identity field. -func (r *FoldersSinksService) Update(sinkNameid string, logsink *LogSink) *FoldersSinksUpdateCall { - c := &FoldersSinksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.sinkNameid = sinkNameid - c.logsink = logsink - return c -} +// method id "logging.folders.locations.buckets.undelete": -// UniqueWriterIdentity sets the optional parameter -// "uniqueWriterIdentity": See sinks.create for a description of this -// field. When updating a sink, the effect of this field on the value of -// writer_identity in the updated sink depends on both the old and new -// values of this field: -// If the old and new values of this field are both false or both true, -// then there is no change to the sink's writer_identity. -// If the old value is false and the new value is true, then -// writer_identity is changed to a unique service account. -// It is an error if the old value is true and the new value is set to -// false or defaulted to false. -func (c *FoldersSinksUpdateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *FoldersSinksUpdateCall { - c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) - return c +type FoldersLocationsBucketsUndeleteCall struct { + s *Service + name string + undeletebucketrequest *UndeleteBucketRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// UpdateMask sets the optional parameter "updateMask": Field mask that -// specifies the fields in sink that need an update. A sink field will -// be overwritten if, and only if, it is in the update mask. name and -// output only fields cannot be updated.An empty updateMask is -// temporarily treated as using the following mask for backwards -// compatibility purposes: destination,filter,includeChildren At some -// point in the future, behavior will be removed and specifying an empty -// updateMask will be an error.For a detailed FieldMask definition, see -// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: -// updateMask=filter. -func (c *FoldersSinksUpdateCall) UpdateMask(updateMask string) *FoldersSinksUpdateCall { - c.urlParams_.Set("updateMask", updateMask) +// Undelete: Undeletes a bucket. A bucket that has been deleted may be +// undeleted within the grace period of 7 days. +func (r *FoldersLocationsBucketsService) Undelete(name string, undeletebucketrequest *UndeleteBucketRequest) *FoldersLocationsBucketsUndeleteCall { + c := &FoldersLocationsBucketsUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.undeletebucketrequest = undeletebucketrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FoldersSinksUpdateCall) Fields(s ...googleapi.Field) *FoldersSinksUpdateCall { +func (c *FoldersLocationsBucketsUndeleteCall) Fields(s ...googleapi.Field) *FoldersLocationsBucketsUndeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -9099,56 +8600,56 @@ func (c *FoldersSinksUpdateCall) Fields(s ...googleapi.Field) *FoldersSinksUpdat // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FoldersSinksUpdateCall) Context(ctx context.Context) *FoldersSinksUpdateCall { +func (c *FoldersLocationsBucketsUndeleteCall) Context(ctx context.Context) *FoldersLocationsBucketsUndeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FoldersSinksUpdateCall) Header() http.Header { +func (c *FoldersLocationsBucketsUndeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FoldersSinksUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *FoldersLocationsBucketsUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.undeletebucketrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}:undelete") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "sinkName": c.sinkNameid, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.folders.sinks.update" call. -// Exactly one of *LogSink or error will be non-nil. Any non-2xx status +// Do executes the "logging.folders.locations.buckets.undelete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *LogSink.ServerResponse.Header or (if a response was returned at all) +// *Empty.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *FoldersSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { +func (c *FoldersLocationsBucketsUndeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -9167,7 +8668,7 @@ func (c *FoldersSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSink, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LogSink{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -9179,39 +8680,28 @@ func (c *FoldersSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSink, err } return ret, nil // { - // "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", - // "flatPath": "v2/folders/{foldersId}/sinks/{sinksId}", - // "httpMethod": "PUT", - // "id": "logging.folders.sinks.update", + // "description": "Undeletes a bucket. A bucket that has been deleted may be undeleted within the grace period of 7 days.", + // "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets/{bucketsId}:undelete", + // "httpMethod": "POST", + // "id": "logging.folders.locations.buckets.undelete", // "parameterOrder": [ - // "sinkName" + // "name" // ], // "parameters": { - // "sinkName": { - // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "name": { + // "description": "Required. The full resource name of the bucket to undelete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", // "location": "path", - // "pattern": "^folders/[^/]+/sinks/[^/]+$", + // "pattern": "^folders/[^/]+/locations/[^/]+/buckets/[^/]+$", // "required": true, // "type": "string" - // }, - // "uniqueWriterIdentity": { - // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is set to false or defaulted to false.", - // "location": "query", - // "type": "boolean" - // }, - // "updateMask": { - // "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", - // "format": "google-fieldmask", - // "location": "query", - // "type": "string" // } // }, - // "path": "v2/{+sinkName}", + // "path": "v2/{+name}:undelete", // "request": { - // "$ref": "LogSink" + // "$ref": "UndeleteBucketRequest" // }, // "response": { - // "$ref": "LogSink" + // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -9221,93 +8711,82 @@ func (c *FoldersSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSink, err } -// method id "logging.locations.buckets.get": +// method id "logging.folders.logs.delete": -type LocationsBucketsGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type FoldersLogsDeleteCall struct { + s *Service + logName string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Gets a bucket (Beta). -func (r *LocationsBucketsService) Get(name string) *LocationsBucketsGetCall { - c := &LocationsBucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name +// Delete: Deletes all the log entries in a log. The log reappears if it +// receives new entries. Log entries written shortly before the delete +// operation might not be deleted. Entries received after the delete +// operation with a timestamp before the operation will be deleted. +func (r *FoldersLogsService) Delete(logName string) *FoldersLogsDeleteCall { + c := &FoldersLogsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.logName = logName return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LocationsBucketsGetCall) Fields(s ...googleapi.Field) *LocationsBucketsGetCall { +func (c *FoldersLogsDeleteCall) Fields(s ...googleapi.Field) *FoldersLogsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *LocationsBucketsGetCall) IfNoneMatch(entityTag string) *LocationsBucketsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LocationsBucketsGetCall) Context(ctx context.Context) *LocationsBucketsGetCall { +func (c *FoldersLogsDeleteCall) Context(ctx context.Context) *FoldersLogsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LocationsBucketsGetCall) Header() http.Header { +func (c *FoldersLogsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LocationsBucketsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *FoldersLogsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+logName}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "name": c.name, + "logName": c.logName, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.locations.buckets.get" call. -// Exactly one of *LogBucket or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *LogBucket.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *LocationsBucketsGetCall) Do(opts ...googleapi.CallOption) (*LogBucket, error) { +// Do executes the "logging.folders.logs.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *FoldersLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -9326,7 +8805,7 @@ func (c *LocationsBucketsGetCall) Do(opts ...googleapi.CallOption) (*LogBucket, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LogBucket{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -9338,39 +8817,37 @@ func (c *LocationsBucketsGetCall) Do(opts ...googleapi.CallOption) (*LogBucket, } return ret, nil // { - // "description": "Gets a bucket (Beta).", - // "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/buckets/{bucketsId}", - // "httpMethod": "GET", - // "id": "logging.locations.buckets.get", + // "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.", + // "flatPath": "v2/folders/{foldersId}/logs/{logsId}", + // "httpMethod": "DELETE", + // "id": "logging.folders.logs.delete", // "parameterOrder": [ - // "name" + // "logName" // ], // "parameters": { - // "name": { - // "description": "Required. The resource name of the bucket:\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\nExample: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", + // "logName": { + // "description": "Required. The resource name of the log to delete: \"projects/[PROJECT_ID]/logs/[LOG_ID]\" \"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\" \"folders/[FOLDER_ID]/logs/[LOG_ID]\" [LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", // "location": "path", - // "pattern": "^[^/]+/[^/]+/locations/[^/]+/buckets/[^/]+$", + // "pattern": "^folders/[^/]+/logs/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v2/{+name}", + // "path": "v2/{+logName}", // "response": { - // "$ref": "LogBucket" + // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.read" + // "https://www.googleapis.com/auth/logging.admin" // ] // } } -// method id "logging.locations.buckets.list": +// method id "logging.folders.logs.list": -type LocationsBucketsListCall struct { +type FoldersLogsListCall struct { s *Service parent string urlParams_ gensupport.URLParams @@ -9379,9 +8856,10 @@ type LocationsBucketsListCall struct { header_ http.Header } -// List: Lists buckets (Beta). -func (r *LocationsBucketsService) List(parent string) *LocationsBucketsListCall { - c := &LocationsBucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Lists the logs in projects, organizations, folders, or billing +// accounts. Only logs that have entries are listed. +func (r *FoldersLogsService) List(parent string) *FoldersLogsListCall { + c := &FoldersLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent return c } @@ -9390,7 +8868,7 @@ func (r *LocationsBucketsService) List(parent string) *LocationsBucketsListCall // of results to return from this request. Non-positive values are // ignored. The presence of nextPageToken in the response indicates that // more results might be available. -func (c *LocationsBucketsListCall) PageSize(pageSize int64) *LocationsBucketsListCall { +func (c *FoldersLogsListCall) PageSize(pageSize int64) *FoldersLogsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } @@ -9400,7 +8878,7 @@ func (c *LocationsBucketsListCall) PageSize(pageSize int64) *LocationsBucketsLis // method. pageToken must be the value of nextPageToken from the // previous response. The values of other method parameters should be // identical to those in the previous call. -func (c *LocationsBucketsListCall) PageToken(pageToken string) *LocationsBucketsListCall { +func (c *FoldersLogsListCall) PageToken(pageToken string) *FoldersLogsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -9408,7 +8886,7 @@ func (c *LocationsBucketsListCall) PageToken(pageToken string) *LocationsBuckets // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LocationsBucketsListCall) Fields(s ...googleapi.Field) *LocationsBucketsListCall { +func (c *FoldersLogsListCall) Fields(s ...googleapi.Field) *FoldersLogsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -9418,7 +8896,7 @@ func (c *LocationsBucketsListCall) Fields(s ...googleapi.Field) *LocationsBucket // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *LocationsBucketsListCall) IfNoneMatch(entityTag string) *LocationsBucketsListCall { +func (c *FoldersLogsListCall) IfNoneMatch(entityTag string) *FoldersLogsListCall { c.ifNoneMatch_ = entityTag return c } @@ -9426,23 +8904,23 @@ func (c *LocationsBucketsListCall) IfNoneMatch(entityTag string) *LocationsBucke // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LocationsBucketsListCall) Context(ctx context.Context) *LocationsBucketsListCall { +func (c *FoldersLogsListCall) Context(ctx context.Context) *FoldersLogsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LocationsBucketsListCall) Header() http.Header { +func (c *FoldersLogsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LocationsBucketsListCall) doRequest(alt string) (*http.Response, error) { +func (c *FoldersLogsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9453,7 +8931,7 @@ func (c *LocationsBucketsListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/buckets") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/logs") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -9466,14 +8944,14 @@ func (c *LocationsBucketsListCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.locations.buckets.list" call. -// Exactly one of *ListBucketsResponse or error will be non-nil. Any +// Do executes the "logging.folders.logs.list" call. +// Exactly one of *ListLogsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *ListBucketsResponse.ServerResponse.Header or (if a response was +// *ListLogsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *LocationsBucketsListCall) Do(opts ...googleapi.CallOption) (*ListBucketsResponse, error) { +func (c *FoldersLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -9492,7 +8970,7 @@ func (c *LocationsBucketsListCall) Do(opts ...googleapi.CallOption) (*ListBucket if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ListBucketsResponse{ + ret := &ListLogsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -9504,10 +8982,10 @@ func (c *LocationsBucketsListCall) Do(opts ...googleapi.CallOption) (*ListBucket } return ret, nil // { - // "description": "Lists buckets (Beta).", - // "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/buckets", + // "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", + // "flatPath": "v2/folders/{foldersId}/logs", // "httpMethod": "GET", - // "id": "logging.locations.buckets.list", + // "id": "logging.folders.logs.list", // "parameterOrder": [ // "parent" // ], @@ -9524,16 +9002,16 @@ func (c *LocationsBucketsListCall) Do(opts ...googleapi.CallOption) (*ListBucket // "type": "string" // }, // "parent": { - // "description": "Required. The parent resource whose buckets are to be listed:\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]\"\nNote: The locations portion of the resource must be specified, but supplying the character - in place of LOCATION_ID will return all buckets.", + // "description": "Required. The resource name that owns the logs: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", // "location": "path", - // "pattern": "^[^/]+/[^/]+/locations/[^/]+$", + // "pattern": "^folders/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v2/{+parent}/buckets", + // "path": "v2/{+parent}/logs", // "response": { - // "$ref": "ListBucketsResponse" + // "$ref": "ListLogsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -9548,7 +9026,7 @@ func (c *LocationsBucketsListCall) Do(opts ...googleapi.CallOption) (*ListBucket // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *LocationsBucketsListCall) Pages(ctx context.Context, f func(*ListBucketsResponse) error) error { +func (c *FoldersLogsListCall) Pages(ctx context.Context, f func(*ListLogsResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -9566,47 +9044,49 @@ func (c *LocationsBucketsListCall) Pages(ctx context.Context, f func(*ListBucket } } -// method id "logging.locations.buckets.patch": +// method id "logging.folders.sinks.create": -type LocationsBucketsPatchCall struct { +type FoldersSinksCreateCall struct { s *Service - name string - logbucket *LogBucket + parent string + logsink *LogSink urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Patch: Updates a bucket. This method replaces the following fields in -// the existing bucket with values from the new bucket: -// retention_periodIf the retention period is decreased and the bucket -// is locked, FAILED_PRECONDITION will be returned.If the bucket has a -// LifecycleState of DELETE_REQUESTED, FAILED_PRECONDITION will be -// returned.A buckets region may not be modified after it is created. -// This method is in Beta. -func (r *LocationsBucketsService) Patch(name string, logbucket *LogBucket) *LocationsBucketsPatchCall { - c := &LocationsBucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.logbucket = logbucket +// Create: Creates a sink that exports specified log entries to a +// destination. The export of newly-ingested log entries begins +// immediately, unless the sink's writer_identity is not permitted to +// write to the destination. A sink can export log entries only from the +// resource owning the sink. +func (r *FoldersSinksService) Create(parent string, logsink *LogSink) *FoldersSinksCreateCall { + c := &FoldersSinksCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.logsink = logsink return c } -// UpdateMask sets the optional parameter "updateMask": Required. Field -// mask that specifies the fields in bucket that need an update. A -// bucket field will be overwritten if, and only if, it is in the update -// mask. name and output only fields cannot be updated.For a detailed -// FieldMask definition, see -// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: -// updateMask=retention_days. -func (c *LocationsBucketsPatchCall) UpdateMask(updateMask string) *LocationsBucketsPatchCall { - c.urlParams_.Set("updateMask", updateMask) +// UniqueWriterIdentity sets the optional parameter +// "uniqueWriterIdentity": Determines the kind of IAM identity returned +// as writer_identity in the new sink. If this value is omitted or set +// to false, and if the sink's parent is a project, then the value +// returned as writer_identity is the same group or service account used +// by Logging before the addition of writer identities to this API. The +// sink's destination must be in the same project as the sink itself.If +// this field is set to true, or if the sink is owned by a non-project +// resource such as an organization, then the value of writer_identity +// will be a unique service account used only for exports from the new +// sink. For more information, see writer_identity in LogSink. +func (c *FoldersSinksCreateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *FoldersSinksCreateCall { + c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LocationsBucketsPatchCall) Fields(s ...googleapi.Field) *LocationsBucketsPatchCall { +func (c *FoldersSinksCreateCall) Fields(s ...googleapi.Field) *FoldersSinksCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -9614,56 +9094,56 @@ func (c *LocationsBucketsPatchCall) Fields(s ...googleapi.Field) *LocationsBucke // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LocationsBucketsPatchCall) Context(ctx context.Context) *LocationsBucketsPatchCall { +func (c *FoldersSinksCreateCall) Context(ctx context.Context) *FoldersSinksCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LocationsBucketsPatchCall) Header() http.Header { +func (c *FoldersSinksCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LocationsBucketsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *FoldersSinksCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.logbucket) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/sinks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "name": c.name, + "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.locations.buckets.patch" call. -// Exactly one of *LogBucket or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *LogBucket.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *LocationsBucketsPatchCall) Do(opts ...googleapi.CallOption) (*LogBucket, error) { +// Do executes the "logging.folders.sinks.create" call. +// Exactly one of *LogSink or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *LogSink.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *FoldersSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -9682,7 +9162,7 @@ func (c *LocationsBucketsPatchCall) Do(opts ...googleapi.CallOption) (*LogBucket if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LogBucket{ + ret := &LogSink{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -9694,34 +9174,33 @@ func (c *LocationsBucketsPatchCall) Do(opts ...googleapi.CallOption) (*LogBucket } return ret, nil // { - // "description": "Updates a bucket. This method replaces the following fields in the existing bucket with values from the new bucket: retention_periodIf the retention period is decreased and the bucket is locked, FAILED_PRECONDITION will be returned.If the bucket has a LifecycleState of DELETE_REQUESTED, FAILED_PRECONDITION will be returned.A buckets region may not be modified after it is created. This method is in Beta.", - // "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/buckets/{bucketsId}", - // "httpMethod": "PATCH", - // "id": "logging.locations.buckets.patch", + // "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.", + // "flatPath": "v2/folders/{foldersId}/sinks", + // "httpMethod": "POST", + // "id": "logging.folders.sinks.create", // "parameterOrder": [ - // "name" + // "parent" // ], // "parameters": { - // "name": { - // "description": "Required. The full resource name of the bucket to update.\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\nExample: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\". Also requires permission \"resourcemanager.projects.updateLiens\" to set the locked property", + // "parent": { + // "description": "Required. The resource in which to create the sink: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" Examples: \"projects/my-logging-project\", \"organizations/123456789\".", // "location": "path", - // "pattern": "^[^/]+/[^/]+/locations/[^/]+/buckets/[^/]+$", + // "pattern": "^folders/[^/]+$", // "required": true, // "type": "string" // }, - // "updateMask": { - // "description": "Required. Field mask that specifies the fields in bucket that need an update. A bucket field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=retention_days.", - // "format": "google-fieldmask", + // "uniqueWriterIdentity": { + // "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", // "location": "query", - // "type": "string" + // "type": "boolean" // } // }, - // "path": "v2/{+name}", + // "path": "v2/{+parent}/sinks", // "request": { - // "$ref": "LogBucket" + // "$ref": "LogSink" // }, // "response": { - // "$ref": "LogBucket" + // "$ref": "LogSink" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -9731,30 +9210,28 @@ func (c *LocationsBucketsPatchCall) Do(opts ...googleapi.CallOption) (*LogBucket } -// method id "logging.logs.delete": +// method id "logging.folders.sinks.delete": -type LogsDeleteCall struct { +type FoldersSinksDeleteCall struct { s *Service - logName string + sinkNameid string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes all the log entries in a log. The log reappears if it -// receives new entries. Log entries written shortly before the delete -// operation might not be deleted. Entries received after the delete -// operation with a timestamp before the operation will be deleted. -func (r *LogsService) Delete(logName string) *LogsDeleteCall { - c := &LogsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.logName = logName +// Delete: Deletes a sink. If the sink has a unique writer_identity, +// then that service account is also deleted. +func (r *FoldersSinksService) Delete(sinkNameid string) *FoldersSinksDeleteCall { + c := &FoldersSinksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sinkNameid = sinkNameid return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LogsDeleteCall) Fields(s ...googleapi.Field) *LogsDeleteCall { +func (c *FoldersSinksDeleteCall) Fields(s ...googleapi.Field) *FoldersSinksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -9762,23 +9239,23 @@ func (c *LogsDeleteCall) Fields(s ...googleapi.Field) *LogsDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LogsDeleteCall) Context(ctx context.Context) *LogsDeleteCall { +func (c *FoldersSinksDeleteCall) Context(ctx context.Context) *FoldersSinksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LogsDeleteCall) Header() http.Header { +func (c *FoldersSinksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LogsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *FoldersSinksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9786,7 +9263,7 @@ func (c *LogsDeleteCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+logName}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -9794,19 +9271,19 @@ func (c *LogsDeleteCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "logName": c.logName, + "sinkName": c.sinkNameid, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.logs.delete" call. +// Do executes the "logging.folders.sinks.delete" call. // Exactly one of *Empty or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Empty.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *LogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { +func (c *FoldersSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -9837,23 +9314,23 @@ func (c *LogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { } return ret, nil // { - // "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.", - // "flatPath": "v2/{v2Id}/{v2Id1}/logs/{logsId}", + // "description": "Deletes a sink. If the sink has a unique writer_identity, then that service account is also deleted.", + // "flatPath": "v2/folders/{foldersId}/sinks/{sinksId}", // "httpMethod": "DELETE", - // "id": "logging.logs.delete", + // "id": "logging.folders.sinks.delete", // "parameterOrder": [ - // "logName" + // "sinkName" // ], // "parameters": { - // "logName": { - // "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + // "sinkName": { + // "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", - // "pattern": "^[^/]+/[^/]+/logs/[^/]+$", + // "pattern": "^folders/[^/]+/sinks/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v2/{+logName}", + // "path": "v2/{+sinkName}", // "response": { // "$ref": "Empty" // }, @@ -9865,48 +9342,28 @@ func (c *LogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { } -// method id "logging.logs.list": +// method id "logging.folders.sinks.get": -type LogsListCall struct { +type FoldersSinksGetCall struct { s *Service - parent string + sinkName string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Lists the logs in projects, organizations, folders, or billing -// accounts. Only logs that have entries are listed. -func (r *LogsService) List(parent string) *LogsListCall { - c := &LogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} - -// PageSize sets the optional parameter "pageSize": The maximum number -// of results to return from this request. Non-positive values are -// ignored. The presence of nextPageToken in the response indicates that -// more results might be available. -func (c *LogsListCall) PageSize(pageSize int64) *LogsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": If present, then -// retrieve the next batch of results from the preceding call to this -// method. pageToken must be the value of nextPageToken from the -// previous response. The values of other method parameters should be -// identical to those in the previous call. -func (c *LogsListCall) PageToken(pageToken string) *LogsListCall { - c.urlParams_.Set("pageToken", pageToken) +// Get: Gets a sink. +func (r *FoldersSinksService) Get(sinkName string) *FoldersSinksGetCall { + c := &FoldersSinksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sinkName = sinkName return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LogsListCall) Fields(s ...googleapi.Field) *LogsListCall { +func (c *FoldersSinksGetCall) Fields(s ...googleapi.Field) *FoldersSinksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -9916,7 +9373,7 @@ func (c *LogsListCall) Fields(s ...googleapi.Field) *LogsListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *LogsListCall) IfNoneMatch(entityTag string) *LogsListCall { +func (c *FoldersSinksGetCall) IfNoneMatch(entityTag string) *FoldersSinksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -9924,23 +9381,23 @@ func (c *LogsListCall) IfNoneMatch(entityTag string) *LogsListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LogsListCall) Context(ctx context.Context) *LogsListCall { +func (c *FoldersSinksGetCall) Context(ctx context.Context) *FoldersSinksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LogsListCall) Header() http.Header { +func (c *FoldersSinksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LogsListCall) doRequest(alt string) (*http.Response, error) { +func (c *FoldersSinksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9951,7 +9408,7 @@ func (c *LogsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/logs") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -9959,19 +9416,19 @@ func (c *LogsListCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, + "sinkName": c.sinkName, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.logs.list" call. -// Exactly one of *ListLogsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListLogsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *LogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsResponse, error) { +// Do executes the "logging.folders.sinks.get" call. +// Exactly one of *LogSink or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *LogSink.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *FoldersSinksGetCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -9990,7 +9447,7 @@ func (c *LogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsResponse, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ListLogsResponse{ + ret := &LogSink{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -10002,36 +9459,25 @@ func (c *LogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsResponse, erro } return ret, nil // { - // "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", - // "flatPath": "v2/{v2Id}/{v2Id1}/logs", + // "description": "Gets a sink.", + // "flatPath": "v2/folders/{foldersId}/sinks/{sinksId}", // "httpMethod": "GET", - // "id": "logging.logs.list", + // "id": "logging.folders.sinks.get", // "parameterOrder": [ - // "parent" + // "sinkName" // ], // "parameters": { - // "pageSize": { - // "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + // "sinkName": { + // "description": "Required. The resource name of the sink: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", - // "pattern": "^[^/]+/[^/]+$", + // "pattern": "^folders/[^/]+/sinks/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v2/{+parent}/logs", + // "path": "v2/{+sinkName}", // "response": { - // "$ref": "ListLogsResponse" + // "$ref": "LogSink" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -10043,41 +9489,21 @@ func (c *LogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsResponse, erro } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *LogsListCall) Pages(ctx context.Context, f func(*ListLogsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "logging.monitoredResourceDescriptors.list": +// method id "logging.folders.sinks.list": -type MonitoredResourceDescriptorsListCall struct { +type FoldersSinksListCall struct { s *Service + parent string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Lists the descriptors for monitored resource types used by -// Logging. -func (r *MonitoredResourceDescriptorsService) List() *MonitoredResourceDescriptorsListCall { - c := &MonitoredResourceDescriptorsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Lists sinks. +func (r *FoldersSinksService) List(parent string) *FoldersSinksListCall { + c := &FoldersSinksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent return c } @@ -10085,7 +9511,7 @@ func (r *MonitoredResourceDescriptorsService) List() *MonitoredResourceDescripto // of results to return from this request. Non-positive values are // ignored. The presence of nextPageToken in the response indicates that // more results might be available. -func (c *MonitoredResourceDescriptorsListCall) PageSize(pageSize int64) *MonitoredResourceDescriptorsListCall { +func (c *FoldersSinksListCall) PageSize(pageSize int64) *FoldersSinksListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } @@ -10095,7 +9521,7 @@ func (c *MonitoredResourceDescriptorsListCall) PageSize(pageSize int64) *Monitor // method. pageToken must be the value of nextPageToken from the // previous response. The values of other method parameters should be // identical to those in the previous call. -func (c *MonitoredResourceDescriptorsListCall) PageToken(pageToken string) *MonitoredResourceDescriptorsListCall { +func (c *FoldersSinksListCall) PageToken(pageToken string) *FoldersSinksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -10103,7 +9529,7 @@ func (c *MonitoredResourceDescriptorsListCall) PageToken(pageToken string) *Moni // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MonitoredResourceDescriptorsListCall) Fields(s ...googleapi.Field) *MonitoredResourceDescriptorsListCall { +func (c *FoldersSinksListCall) Fields(s ...googleapi.Field) *FoldersSinksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -10113,7 +9539,7 @@ func (c *MonitoredResourceDescriptorsListCall) Fields(s ...googleapi.Field) *Mon // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *MonitoredResourceDescriptorsListCall) IfNoneMatch(entityTag string) *MonitoredResourceDescriptorsListCall { +func (c *FoldersSinksListCall) IfNoneMatch(entityTag string) *FoldersSinksListCall { c.ifNoneMatch_ = entityTag return c } @@ -10121,23 +9547,23 @@ func (c *MonitoredResourceDescriptorsListCall) IfNoneMatch(entityTag string) *Mo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MonitoredResourceDescriptorsListCall) Context(ctx context.Context) *MonitoredResourceDescriptorsListCall { +func (c *FoldersSinksListCall) Context(ctx context.Context) *FoldersSinksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MonitoredResourceDescriptorsListCall) Header() http.Header { +func (c *FoldersSinksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MonitoredResourceDescriptorsListCall) doRequest(alt string) (*http.Response, error) { +func (c *FoldersSinksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10148,26 +9574,27 @@ func (c *MonitoredResourceDescriptorsListCall) doRequest(alt string) (*http.Resp var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/monitoredResourceDescriptors") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/sinks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.monitoredResourceDescriptors.list" call. -// Exactly one of *ListMonitoredResourceDescriptorsResponse or error -// will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *ListMonitoredResourceDescriptorsResponse.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *MonitoredResourceDescriptorsListCall) Do(opts ...googleapi.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) { +// Do executes the "logging.folders.sinks.list" call. +// Exactly one of *ListSinksResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListSinksResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *FoldersSinksListCall) Do(opts ...googleapi.CallOption) (*ListSinksResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -10186,7 +9613,7 @@ func (c *MonitoredResourceDescriptorsListCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ListMonitoredResourceDescriptorsResponse{ + ret := &ListSinksResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -10198,11 +9625,13 @@ func (c *MonitoredResourceDescriptorsListCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Lists the descriptors for monitored resource types used by Logging.", - // "flatPath": "v2/monitoredResourceDescriptors", + // "description": "Lists sinks.", + // "flatPath": "v2/folders/{foldersId}/sinks", // "httpMethod": "GET", - // "id": "logging.monitoredResourceDescriptors.list", - // "parameterOrder": [], + // "id": "logging.folders.sinks.list", + // "parameterOrder": [ + // "parent" + // ], // "parameters": { // "pageSize": { // "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", @@ -10214,11 +9643,18 @@ func (c *MonitoredResourceDescriptorsListCall) Do(opts ...googleapi.CallOption) // "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", // "location": "query", // "type": "string" + // }, + // "parent": { + // "description": "Required. The parent resource whose sinks are to be listed: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", + // "location": "path", + // "pattern": "^folders/[^/]+$", + // "required": true, + // "type": "string" // } // }, - // "path": "v2/monitoredResourceDescriptors", + // "path": "v2/{+parent}/sinks", // "response": { - // "$ref": "ListMonitoredResourceDescriptorsResponse" + // "$ref": "ListSinksResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -10233,7 +9669,7 @@ func (c *MonitoredResourceDescriptorsListCall) Do(opts ...googleapi.CallOption) // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *MonitoredResourceDescriptorsListCall) Pages(ctx context.Context, f func(*ListMonitoredResourceDescriptorsResponse) error) error { +func (c *FoldersSinksListCall) Pages(ctx context.Context, f func(*ListSinksResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -10251,99 +9687,119 @@ func (c *MonitoredResourceDescriptorsListCall) Pages(ctx context.Context, f func } } -// method id "logging.organizations.getCmekSettings": +// method id "logging.folders.sinks.patch": -type OrganizationsGetCmekSettingsCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type FoldersSinksPatchCall struct { + s *Service + sinkNameid string + logsink *LogSink + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetCmekSettings: Gets the Logs Router CMEK settings for the given -// resource.Note: CMEK for the Logs Router can currently only be -// configured for GCP organizations. Once configured, it applies to all -// projects and folders in the GCP organization.See Enabling CMEK for -// Logs Router -// (https://cloud.google.com/logging/docs/routing/managed-encryption) -// for more information. -func (r *OrganizationsService) GetCmekSettings(name string) *OrganizationsGetCmekSettingsCall { - c := &OrganizationsGetCmekSettingsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name +// Patch: Updates a sink. This method replaces the following fields in +// the existing sink with values from the new sink: destination, and +// filter.The updated sink might also have a new writer_identity; see +// the unique_writer_identity field. +func (r *FoldersSinksService) Patch(sinkNameid string, logsink *LogSink) *FoldersSinksPatchCall { + c := &FoldersSinksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sinkNameid = sinkNameid + c.logsink = logsink + return c +} + +// UniqueWriterIdentity sets the optional parameter +// "uniqueWriterIdentity": See sinks.create for a description of this +// field. When updating a sink, the effect of this field on the value of +// writer_identity in the updated sink depends on both the old and new +// values of this field: If the old and new values of this field are +// both false or both true, then there is no change to the sink's +// writer_identity. If the old value is false and the new value is true, +// then writer_identity is changed to a unique service account. It is an +// error if the old value is true and the new value is set to false or +// defaulted to false. +func (c *FoldersSinksPatchCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *FoldersSinksPatchCall { + c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) + return c +} + +// UpdateMask sets the optional parameter "updateMask": Field mask that +// specifies the fields in sink that need an update. A sink field will +// be overwritten if, and only if, it is in the update mask. name and +// output only fields cannot be updated.An empty updateMask is +// temporarily treated as using the following mask for backwards +// compatibility purposes: destination,filter,includeChildren At some +// point in the future, behavior will be removed and specifying an empty +// updateMask will be an error.For a detailed FieldMask definition, see +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: +// updateMask=filter. +func (c *FoldersSinksPatchCall) UpdateMask(updateMask string) *FoldersSinksPatchCall { + c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationsGetCmekSettingsCall) Fields(s ...googleapi.Field) *OrganizationsGetCmekSettingsCall { +func (c *FoldersSinksPatchCall) Fields(s ...googleapi.Field) *FoldersSinksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *OrganizationsGetCmekSettingsCall) IfNoneMatch(entityTag string) *OrganizationsGetCmekSettingsCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationsGetCmekSettingsCall) Context(ctx context.Context) *OrganizationsGetCmekSettingsCall { +func (c *FoldersSinksPatchCall) Context(ctx context.Context) *FoldersSinksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationsGetCmekSettingsCall) Header() http.Header { +func (c *FoldersSinksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsGetCmekSettingsCall) doRequest(alt string) (*http.Response, error) { +func (c *FoldersSinksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}/cmekSettings") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "name": c.name, + "sinkName": c.sinkNameid, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.organizations.getCmekSettings" call. -// Exactly one of *CmekSettings or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *CmekSettings.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *OrganizationsGetCmekSettingsCall) Do(opts ...googleapi.CallOption) (*CmekSettings, error) { +// Do executes the "logging.folders.sinks.patch" call. +// Exactly one of *LogSink or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *LogSink.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *FoldersSinksPatchCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -10362,7 +9818,7 @@ func (c *OrganizationsGetCmekSettingsCall) Do(opts ...googleapi.CallOption) (*Cm if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &CmekSettings{ + ret := &LogSink{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -10374,70 +9830,96 @@ func (c *OrganizationsGetCmekSettingsCall) Do(opts ...googleapi.CallOption) (*Cm } return ret, nil // { - // "description": "Gets the Logs Router CMEK settings for the given resource.Note: CMEK for the Logs Router can currently only be configured for GCP organizations. Once configured, it applies to all projects and folders in the GCP organization.See Enabling CMEK for Logs Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", - // "flatPath": "v2/organizations/{organizationsId}/cmekSettings", - // "httpMethod": "GET", - // "id": "logging.organizations.getCmekSettings", + // "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", + // "flatPath": "v2/folders/{foldersId}/sinks/{sinksId}", + // "httpMethod": "PATCH", + // "id": "logging.folders.sinks.patch", // "parameterOrder": [ - // "name" + // "sinkName" // ], // "parameters": { - // "name": { - // "description": "Required. The resource for which to retrieve CMEK settings.\n\"projects/[PROJECT_ID]/cmekSettings\"\n\"organizations/[ORGANIZATION_ID]/cmekSettings\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\"\n\"folders/[FOLDER_ID]/cmekSettings\"\nExample: \"organizations/12345/cmekSettings\".Note: CMEK for the Logs Router can currently only be configured for GCP organizations. Once configured, it applies to all projects and folders in the GCP organization.", + // "sinkName": { + // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", - // "pattern": "^organizations/[^/]+$", + // "pattern": "^folders/[^/]+/sinks/[^/]+$", // "required": true, // "type": "string" + // }, + // "uniqueWriterIdentity": { + // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", + // "location": "query", + // "type": "boolean" + // }, + // "updateMask": { + // "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" // } // }, - // "path": "v2/{+name}/cmekSettings", + // "path": "v2/{+sinkName}", + // "request": { + // "$ref": "LogSink" + // }, // "response": { - // "$ref": "CmekSettings" + // "$ref": "LogSink" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.read" + // "https://www.googleapis.com/auth/logging.admin" // ] // } } -// method id "logging.organizations.updateCmekSettings": +// method id "logging.folders.sinks.update": -type OrganizationsUpdateCmekSettingsCall struct { - s *Service - name string - cmeksettings *CmekSettings - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} +type FoldersSinksUpdateCall struct { + s *Service + sinkNameid string + logsink *LogSink + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} -// UpdateCmekSettings: Updates the Logs Router CMEK settings for the -// given resource.Note: CMEK for the Logs Router can currently only be -// configured for GCP organizations. Once configured, it applies to all -// projects and folders in the GCP organization.UpdateCmekSettings will -// fail if 1) kms_key_name is invalid, or 2) the associated service -// account does not have the required -// roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, -// or 3) access to the key is disabled.See Enabling CMEK for Logs Router -// (https://cloud.google.com/logging/docs/routing/managed-encryption) -// for more information. -func (r *OrganizationsService) UpdateCmekSettings(name string, cmeksettings *CmekSettings) *OrganizationsUpdateCmekSettingsCall { - c := &OrganizationsUpdateCmekSettingsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.cmeksettings = cmeksettings +// Update: Updates a sink. This method replaces the following fields in +// the existing sink with values from the new sink: destination, and +// filter.The updated sink might also have a new writer_identity; see +// the unique_writer_identity field. +func (r *FoldersSinksService) Update(sinkNameid string, logsink *LogSink) *FoldersSinksUpdateCall { + c := &FoldersSinksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sinkNameid = sinkNameid + c.logsink = logsink return c } -// UpdateMask sets the optional parameter "updateMask": Field mask -// identifying which fields from cmek_settings should be updated. A -// field will be overwritten if and only if it is in the update mask. -// Output only fields cannot be updated.See FieldMask for more -// information.Example: "updateMask=kmsKeyName" -func (c *OrganizationsUpdateCmekSettingsCall) UpdateMask(updateMask string) *OrganizationsUpdateCmekSettingsCall { +// UniqueWriterIdentity sets the optional parameter +// "uniqueWriterIdentity": See sinks.create for a description of this +// field. When updating a sink, the effect of this field on the value of +// writer_identity in the updated sink depends on both the old and new +// values of this field: If the old and new values of this field are +// both false or both true, then there is no change to the sink's +// writer_identity. If the old value is false and the new value is true, +// then writer_identity is changed to a unique service account. It is an +// error if the old value is true and the new value is set to false or +// defaulted to false. +func (c *FoldersSinksUpdateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *FoldersSinksUpdateCall { + c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) + return c +} + +// UpdateMask sets the optional parameter "updateMask": Field mask that +// specifies the fields in sink that need an update. A sink field will +// be overwritten if, and only if, it is in the update mask. name and +// output only fields cannot be updated.An empty updateMask is +// temporarily treated as using the following mask for backwards +// compatibility purposes: destination,filter,includeChildren At some +// point in the future, behavior will be removed and specifying an empty +// updateMask will be an error.For a detailed FieldMask definition, see +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: +// updateMask=filter. +func (c *FoldersSinksUpdateCall) UpdateMask(updateMask string) *FoldersSinksUpdateCall { c.urlParams_.Set("updateMask", updateMask) return c } @@ -10445,7 +9927,7 @@ func (c *OrganizationsUpdateCmekSettingsCall) UpdateMask(updateMask string) *Org // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationsUpdateCmekSettingsCall) Fields(s ...googleapi.Field) *OrganizationsUpdateCmekSettingsCall { +func (c *FoldersSinksUpdateCall) Fields(s ...googleapi.Field) *FoldersSinksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -10453,56 +9935,56 @@ func (c *OrganizationsUpdateCmekSettingsCall) Fields(s ...googleapi.Field) *Orga // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationsUpdateCmekSettingsCall) Context(ctx context.Context) *OrganizationsUpdateCmekSettingsCall { +func (c *FoldersSinksUpdateCall) Context(ctx context.Context) *FoldersSinksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationsUpdateCmekSettingsCall) Header() http.Header { +func (c *FoldersSinksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsUpdateCmekSettingsCall) doRequest(alt string) (*http.Response, error) { +func (c *FoldersSinksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.cmeksettings) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}/cmekSettings") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "name": c.name, + "sinkName": c.sinkNameid, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.organizations.updateCmekSettings" call. -// Exactly one of *CmekSettings or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *CmekSettings.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *OrganizationsUpdateCmekSettingsCall) Do(opts ...googleapi.CallOption) (*CmekSettings, error) { +// Do executes the "logging.folders.sinks.update" call. +// Exactly one of *LogSink or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *LogSink.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *FoldersSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -10521,7 +10003,7 @@ func (c *OrganizationsUpdateCmekSettingsCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &CmekSettings{ + ret := &LogSink{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -10533,34 +10015,39 @@ func (c *OrganizationsUpdateCmekSettingsCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Updates the Logs Router CMEK settings for the given resource.Note: CMEK for the Logs Router can currently only be configured for GCP organizations. Once configured, it applies to all projects and folders in the GCP organization.UpdateCmekSettings will fail if 1) kms_key_name is invalid, or 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, or 3) access to the key is disabled.See Enabling CMEK for Logs Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", - // "flatPath": "v2/organizations/{organizationsId}/cmekSettings", - // "httpMethod": "PATCH", - // "id": "logging.organizations.updateCmekSettings", + // "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", + // "flatPath": "v2/folders/{foldersId}/sinks/{sinksId}", + // "httpMethod": "PUT", + // "id": "logging.folders.sinks.update", // "parameterOrder": [ - // "name" + // "sinkName" // ], // "parameters": { - // "name": { - // "description": "Required. The resource name for the CMEK settings to update.\n\"projects/[PROJECT_ID]/cmekSettings\"\n\"organizations/[ORGANIZATION_ID]/cmekSettings\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\"\n\"folders/[FOLDER_ID]/cmekSettings\"\nExample: \"organizations/12345/cmekSettings\".Note: CMEK for the Logs Router can currently only be configured for GCP organizations. Once configured, it applies to all projects and folders in the GCP organization.", + // "sinkName": { + // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", - // "pattern": "^organizations/[^/]+$", + // "pattern": "^folders/[^/]+/sinks/[^/]+$", // "required": true, // "type": "string" // }, + // "uniqueWriterIdentity": { + // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", + // "location": "query", + // "type": "boolean" + // }, // "updateMask": { - // "description": "Optional. Field mask identifying which fields from cmek_settings should be updated. A field will be overwritten if and only if it is in the update mask. Output only fields cannot be updated.See FieldMask for more information.Example: \"updateMask=kmsKeyName\"", + // "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", // "format": "google-fieldmask", // "location": "query", // "type": "string" // } // }, - // "path": "v2/{+name}/cmekSettings", + // "path": "v2/{+sinkName}", // "request": { - // "$ref": "CmekSettings" + // "$ref": "LogSink" // }, // "response": { - // "$ref": "CmekSettings" + // "$ref": "LogSink" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -10570,31 +10057,39 @@ func (c *OrganizationsUpdateCmekSettingsCall) Do(opts ...googleapi.CallOption) ( } -// method id "logging.organizations.exclusions.create": +// method id "logging.locations.buckets.create": -type OrganizationsExclusionsCreateCall struct { - s *Service - parent string - logexclusion *LogExclusion - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type LocationsBucketsCreateCall struct { + s *Service + parent string + logbucket *LogBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Create: Creates a new exclusion in a specified parent resource. Only -// log entries belonging to that resource can be excluded. You can have -// up to 10 exclusions in a resource. -func (r *OrganizationsExclusionsService) Create(parent string, logexclusion *LogExclusion) *OrganizationsExclusionsCreateCall { - c := &OrganizationsExclusionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Create: Creates a bucket that can be used to store log entries. Once +// a bucket has been created, the region cannot be changed. +func (r *LocationsBucketsService) Create(parent string, logbucket *LogBucket) *LocationsBucketsCreateCall { + c := &LocationsBucketsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent - c.logexclusion = logexclusion + c.logbucket = logbucket + return c +} + +// BucketId sets the optional parameter "bucketId": Required. A +// client-assigned identifier such as "my-bucket". Identifiers are +// limited to 100 characters and can include only letters, digits, +// underscores, hyphens, and periods. +func (c *LocationsBucketsCreateCall) BucketId(bucketId string) *LocationsBucketsCreateCall { + c.urlParams_.Set("bucketId", bucketId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationsExclusionsCreateCall) Fields(s ...googleapi.Field) *OrganizationsExclusionsCreateCall { +func (c *LocationsBucketsCreateCall) Fields(s ...googleapi.Field) *LocationsBucketsCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -10602,36 +10097,36 @@ func (c *OrganizationsExclusionsCreateCall) Fields(s ...googleapi.Field) *Organi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationsExclusionsCreateCall) Context(ctx context.Context) *OrganizationsExclusionsCreateCall { +func (c *LocationsBucketsCreateCall) Context(ctx context.Context) *LocationsBucketsCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationsExclusionsCreateCall) Header() http.Header { +func (c *LocationsBucketsCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsExclusionsCreateCall) doRequest(alt string) (*http.Response, error) { +func (c *LocationsBucketsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.logexclusion) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logbucket) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/exclusions") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/buckets") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -10644,14 +10139,14 @@ func (c *OrganizationsExclusionsCreateCall) doRequest(alt string) (*http.Respons return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.organizations.exclusions.create" call. -// Exactly one of *LogExclusion or error will be non-nil. Any non-2xx +// Do executes the "logging.locations.buckets.create" call. +// Exactly one of *LogBucket or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *LogExclusion.ServerResponse.Header or (if a response was returned at +// *LogBucket.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *OrganizationsExclusionsCreateCall) Do(opts ...googleapi.CallOption) (*LogExclusion, error) { +func (c *LocationsBucketsCreateCall) Do(opts ...googleapi.CallOption) (*LogBucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -10670,7 +10165,7 @@ func (c *OrganizationsExclusionsCreateCall) Do(opts ...googleapi.CallOption) (*L if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LogExclusion{ + ret := &LogBucket{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -10682,28 +10177,33 @@ func (c *OrganizationsExclusionsCreateCall) Do(opts ...googleapi.CallOption) (*L } return ret, nil // { - // "description": "Creates a new exclusion in a specified parent resource. Only log entries belonging to that resource can be excluded. You can have up to 10 exclusions in a resource.", - // "flatPath": "v2/organizations/{organizationsId}/exclusions", + // "description": "Creates a bucket that can be used to store log entries. Once a bucket has been created, the region cannot be changed.", + // "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/buckets", // "httpMethod": "POST", - // "id": "logging.organizations.exclusions.create", + // "id": "logging.locations.buckets.create", // "parameterOrder": [ // "parent" // ], // "parameters": { + // "bucketId": { + // "description": "Required. A client-assigned identifier such as \"my-bucket\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods.", + // "location": "query", + // "type": "string" + // }, // "parent": { - // "description": "Required. The parent resource in which to create the exclusion:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + // "description": "Required. The resource in which to create the bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" Example: \"projects/my-logging-project/locations/global\"", // "location": "path", - // "pattern": "^organizations/[^/]+$", + // "pattern": "^[^/]+/[^/]+/locations/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v2/{+parent}/exclusions", + // "path": "v2/{+parent}/buckets", // "request": { - // "$ref": "LogExclusion" + // "$ref": "LogBucket" // }, // "response": { - // "$ref": "LogExclusion" + // "$ref": "LogBucket" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -10713,9 +10213,9 @@ func (c *OrganizationsExclusionsCreateCall) Do(opts ...googleapi.CallOption) (*L } -// method id "logging.organizations.exclusions.delete": +// method id "logging.locations.buckets.delete": -type OrganizationsExclusionsDeleteCall struct { +type LocationsBucketsDeleteCall struct { s *Service name string urlParams_ gensupport.URLParams @@ -10723,9 +10223,11 @@ type OrganizationsExclusionsDeleteCall struct { header_ http.Header } -// Delete: Deletes an exclusion. -func (r *OrganizationsExclusionsService) Delete(name string) *OrganizationsExclusionsDeleteCall { - c := &OrganizationsExclusionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes a bucket. Moves the bucket to the DELETE_REQUESTED +// state. After 7 days, the bucket will be purged and all logs in the +// bucket will be permanently deleted. +func (r *LocationsBucketsService) Delete(name string) *LocationsBucketsDeleteCall { + c := &LocationsBucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } @@ -10733,7 +10235,7 @@ func (r *OrganizationsExclusionsService) Delete(name string) *OrganizationsExclu // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationsExclusionsDeleteCall) Fields(s ...googleapi.Field) *OrganizationsExclusionsDeleteCall { +func (c *LocationsBucketsDeleteCall) Fields(s ...googleapi.Field) *LocationsBucketsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -10741,23 +10243,23 @@ func (c *OrganizationsExclusionsDeleteCall) Fields(s ...googleapi.Field) *Organi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationsExclusionsDeleteCall) Context(ctx context.Context) *OrganizationsExclusionsDeleteCall { +func (c *LocationsBucketsDeleteCall) Context(ctx context.Context) *LocationsBucketsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationsExclusionsDeleteCall) Header() http.Header { +func (c *LocationsBucketsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsExclusionsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *LocationsBucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10778,14 +10280,14 @@ func (c *OrganizationsExclusionsDeleteCall) doRequest(alt string) (*http.Respons return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.organizations.exclusions.delete" call. +// Do executes the "logging.locations.buckets.delete" call. // Exactly one of *Empty or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Empty.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *OrganizationsExclusionsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { +func (c *LocationsBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -10816,18 +10318,18 @@ func (c *OrganizationsExclusionsDeleteCall) Do(opts ...googleapi.CallOption) (*E } return ret, nil // { - // "description": "Deletes an exclusion.", - // "flatPath": "v2/organizations/{organizationsId}/exclusions/{exclusionsId}", + // "description": "Deletes a bucket. Moves the bucket to the DELETE_REQUESTED state. After 7 days, the bucket will be purged and all logs in the bucket will be permanently deleted.", + // "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/buckets/{bucketsId}", // "httpMethod": "DELETE", - // "id": "logging.organizations.exclusions.delete", + // "id": "logging.locations.buckets.delete", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The resource name of an existing exclusion to delete:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + // "description": "Required. The full resource name of the bucket to delete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", // "location": "path", - // "pattern": "^organizations/[^/]+/exclusions/[^/]+$", + // "pattern": "^[^/]+/[^/]+/locations/[^/]+/buckets/[^/]+$", // "required": true, // "type": "string" // } @@ -10844,9 +10346,9 @@ func (c *OrganizationsExclusionsDeleteCall) Do(opts ...googleapi.CallOption) (*E } -// method id "logging.organizations.exclusions.get": +// method id "logging.locations.buckets.get": -type OrganizationsExclusionsGetCall struct { +type LocationsBucketsGetCall struct { s *Service name string urlParams_ gensupport.URLParams @@ -10855,9 +10357,9 @@ type OrganizationsExclusionsGetCall struct { header_ http.Header } -// Get: Gets the description of an exclusion. -func (r *OrganizationsExclusionsService) Get(name string) *OrganizationsExclusionsGetCall { - c := &OrganizationsExclusionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Gets a bucket (Beta). +func (r *LocationsBucketsService) Get(name string) *LocationsBucketsGetCall { + c := &LocationsBucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } @@ -10865,7 +10367,7 @@ func (r *OrganizationsExclusionsService) Get(name string) *OrganizationsExclusio // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationsExclusionsGetCall) Fields(s ...googleapi.Field) *OrganizationsExclusionsGetCall { +func (c *LocationsBucketsGetCall) Fields(s ...googleapi.Field) *LocationsBucketsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -10875,7 +10377,7 @@ func (c *OrganizationsExclusionsGetCall) Fields(s ...googleapi.Field) *Organizat // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *OrganizationsExclusionsGetCall) IfNoneMatch(entityTag string) *OrganizationsExclusionsGetCall { +func (c *LocationsBucketsGetCall) IfNoneMatch(entityTag string) *LocationsBucketsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -10883,23 +10385,23 @@ func (c *OrganizationsExclusionsGetCall) IfNoneMatch(entityTag string) *Organiza // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationsExclusionsGetCall) Context(ctx context.Context) *OrganizationsExclusionsGetCall { +func (c *LocationsBucketsGetCall) Context(ctx context.Context) *LocationsBucketsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationsExclusionsGetCall) Header() http.Header { +func (c *LocationsBucketsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsExclusionsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *LocationsBucketsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10923,14 +10425,14 @@ func (c *OrganizationsExclusionsGetCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.organizations.exclusions.get" call. -// Exactly one of *LogExclusion or error will be non-nil. Any non-2xx +// Do executes the "logging.locations.buckets.get" call. +// Exactly one of *LogBucket or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *LogExclusion.ServerResponse.Header or (if a response was returned at +// *LogBucket.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *OrganizationsExclusionsGetCall) Do(opts ...googleapi.CallOption) (*LogExclusion, error) { +func (c *LocationsBucketsGetCall) Do(opts ...googleapi.CallOption) (*LogBucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -10949,7 +10451,7 @@ func (c *OrganizationsExclusionsGetCall) Do(opts ...googleapi.CallOption) (*LogE if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LogExclusion{ + ret := &LogBucket{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -10961,25 +10463,25 @@ func (c *OrganizationsExclusionsGetCall) Do(opts ...googleapi.CallOption) (*LogE } return ret, nil // { - // "description": "Gets the description of an exclusion.", - // "flatPath": "v2/organizations/{organizationsId}/exclusions/{exclusionsId}", + // "description": "Gets a bucket (Beta).", + // "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/buckets/{bucketsId}", // "httpMethod": "GET", - // "id": "logging.organizations.exclusions.get", + // "id": "logging.locations.buckets.get", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The resource name of an existing exclusion:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + // "description": "Required. The resource name of the bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", // "location": "path", - // "pattern": "^organizations/[^/]+/exclusions/[^/]+$", + // "pattern": "^[^/]+/[^/]+/locations/[^/]+/buckets/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v2/{+name}", // "response": { - // "$ref": "LogExclusion" + // "$ref": "LogBucket" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -10991,9 +10493,9 @@ func (c *OrganizationsExclusionsGetCall) Do(opts ...googleapi.CallOption) (*LogE } -// method id "logging.organizations.exclusions.list": +// method id "logging.locations.buckets.list": -type OrganizationsExclusionsListCall struct { +type LocationsBucketsListCall struct { s *Service parent string urlParams_ gensupport.URLParams @@ -11002,9 +10504,9 @@ type OrganizationsExclusionsListCall struct { header_ http.Header } -// List: Lists all the exclusions in a parent resource. -func (r *OrganizationsExclusionsService) List(parent string) *OrganizationsExclusionsListCall { - c := &OrganizationsExclusionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Lists buckets (Beta). +func (r *LocationsBucketsService) List(parent string) *LocationsBucketsListCall { + c := &LocationsBucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent return c } @@ -11013,7 +10515,7 @@ func (r *OrganizationsExclusionsService) List(parent string) *OrganizationsExclu // of results to return from this request. Non-positive values are // ignored. The presence of nextPageToken in the response indicates that // more results might be available. -func (c *OrganizationsExclusionsListCall) PageSize(pageSize int64) *OrganizationsExclusionsListCall { +func (c *LocationsBucketsListCall) PageSize(pageSize int64) *LocationsBucketsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } @@ -11023,7 +10525,7 @@ func (c *OrganizationsExclusionsListCall) PageSize(pageSize int64) *Organization // method. pageToken must be the value of nextPageToken from the // previous response. The values of other method parameters should be // identical to those in the previous call. -func (c *OrganizationsExclusionsListCall) PageToken(pageToken string) *OrganizationsExclusionsListCall { +func (c *LocationsBucketsListCall) PageToken(pageToken string) *LocationsBucketsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -11031,7 +10533,7 @@ func (c *OrganizationsExclusionsListCall) PageToken(pageToken string) *Organizat // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationsExclusionsListCall) Fields(s ...googleapi.Field) *OrganizationsExclusionsListCall { +func (c *LocationsBucketsListCall) Fields(s ...googleapi.Field) *LocationsBucketsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -11041,7 +10543,7 @@ func (c *OrganizationsExclusionsListCall) Fields(s ...googleapi.Field) *Organiza // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *OrganizationsExclusionsListCall) IfNoneMatch(entityTag string) *OrganizationsExclusionsListCall { +func (c *LocationsBucketsListCall) IfNoneMatch(entityTag string) *LocationsBucketsListCall { c.ifNoneMatch_ = entityTag return c } @@ -11049,23 +10551,23 @@ func (c *OrganizationsExclusionsListCall) IfNoneMatch(entityTag string) *Organiz // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationsExclusionsListCall) Context(ctx context.Context) *OrganizationsExclusionsListCall { +func (c *LocationsBucketsListCall) Context(ctx context.Context) *LocationsBucketsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationsExclusionsListCall) Header() http.Header { +func (c *LocationsBucketsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsExclusionsListCall) doRequest(alt string) (*http.Response, error) { +func (c *LocationsBucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11076,7 +10578,7 @@ func (c *OrganizationsExclusionsListCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/exclusions") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/buckets") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -11089,14 +10591,14 @@ func (c *OrganizationsExclusionsListCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.organizations.exclusions.list" call. -// Exactly one of *ListExclusionsResponse or error will be non-nil. Any +// Do executes the "logging.locations.buckets.list" call. +// Exactly one of *ListBucketsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *ListExclusionsResponse.ServerResponse.Header or (if a response was +// *ListBucketsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *OrganizationsExclusionsListCall) Do(opts ...googleapi.CallOption) (*ListExclusionsResponse, error) { +func (c *LocationsBucketsListCall) Do(opts ...googleapi.CallOption) (*ListBucketsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -11115,7 +10617,7 @@ func (c *OrganizationsExclusionsListCall) Do(opts ...googleapi.CallOption) (*Lis if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ListExclusionsResponse{ + ret := &ListBucketsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -11127,10 +10629,10 @@ func (c *OrganizationsExclusionsListCall) Do(opts ...googleapi.CallOption) (*Lis } return ret, nil // { - // "description": "Lists all the exclusions in a parent resource.", - // "flatPath": "v2/organizations/{organizationsId}/exclusions", + // "description": "Lists buckets (Beta).", + // "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/buckets", // "httpMethod": "GET", - // "id": "logging.organizations.exclusions.list", + // "id": "logging.locations.buckets.list", // "parameterOrder": [ // "parent" // ], @@ -11147,16 +10649,16 @@ func (c *OrganizationsExclusionsListCall) Do(opts ...googleapi.CallOption) (*Lis // "type": "string" // }, // "parent": { - // "description": "Required. The parent resource whose exclusions are to be listed.\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + // "description": "Required. The parent resource whose buckets are to be listed: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]\" Note: The locations portion of the resource must be specified, but supplying the character - in place of LOCATION_ID will return all buckets.", // "location": "path", - // "pattern": "^organizations/[^/]+$", + // "pattern": "^[^/]+/[^/]+/locations/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v2/{+parent}/exclusions", + // "path": "v2/{+parent}/buckets", // "response": { - // "$ref": "ListExclusionsResponse" + // "$ref": "ListBucketsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -11171,7 +10673,7 @@ func (c *OrganizationsExclusionsListCall) Do(opts ...googleapi.CallOption) (*Lis // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *OrganizationsExclusionsListCall) Pages(ctx context.Context, f func(*ListExclusionsResponse) error) error { +func (c *LocationsBucketsListCall) Pages(ctx context.Context, f func(*ListBucketsResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -11189,33 +10691,39 @@ func (c *OrganizationsExclusionsListCall) Pages(ctx context.Context, f func(*Lis } } -// method id "logging.organizations.exclusions.patch": +// method id "logging.locations.buckets.patch": -type OrganizationsExclusionsPatchCall struct { - s *Service - name string - logexclusion *LogExclusion - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type LocationsBucketsPatchCall struct { + s *Service + name string + logbucket *LogBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Changes one or more properties of an existing exclusion. -func (r *OrganizationsExclusionsService) Patch(name string, logexclusion *LogExclusion) *OrganizationsExclusionsPatchCall { - c := &OrganizationsExclusionsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a bucket. This method replaces the following fields in +// the existing bucket with values from the new bucket: +// retention_periodIf the retention period is decreased and the bucket +// is locked, FAILED_PRECONDITION will be returned.If the bucket has a +// LifecycleState of DELETE_REQUESTED, FAILED_PRECONDITION will be +// returned.A buckets region may not be modified after it is created. +// This method is in Beta. +func (r *LocationsBucketsService) Patch(name string, logbucket *LogBucket) *LocationsBucketsPatchCall { + c := &LocationsBucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name - c.logexclusion = logexclusion + c.logbucket = logbucket return c } -// UpdateMask sets the optional parameter "updateMask": Required. A -// non-empty list of fields to change in the existing exclusion. New -// values for the fields are taken from the corresponding fields in the -// LogExclusion included in this request. Fields not mentioned in -// update_mask are not changed and are ignored in the request.For -// example, to change the filter and description of an exclusion, -// specify an update_mask of "filter,description". -func (c *OrganizationsExclusionsPatchCall) UpdateMask(updateMask string) *OrganizationsExclusionsPatchCall { +// UpdateMask sets the optional parameter "updateMask": Required. Field +// mask that specifies the fields in bucket that need an update. A +// bucket field will be overwritten if, and only if, it is in the update +// mask. name and output only fields cannot be updated.For a detailed +// FieldMask definition, see +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: +// updateMask=retention_days. +func (c *LocationsBucketsPatchCall) UpdateMask(updateMask string) *LocationsBucketsPatchCall { c.urlParams_.Set("updateMask", updateMask) return c } @@ -11223,7 +10731,7 @@ func (c *OrganizationsExclusionsPatchCall) UpdateMask(updateMask string) *Organi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationsExclusionsPatchCall) Fields(s ...googleapi.Field) *OrganizationsExclusionsPatchCall { +func (c *LocationsBucketsPatchCall) Fields(s ...googleapi.Field) *LocationsBucketsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -11231,29 +10739,29 @@ func (c *OrganizationsExclusionsPatchCall) Fields(s ...googleapi.Field) *Organiz // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationsExclusionsPatchCall) Context(ctx context.Context) *OrganizationsExclusionsPatchCall { +func (c *LocationsBucketsPatchCall) Context(ctx context.Context) *LocationsBucketsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationsExclusionsPatchCall) Header() http.Header { +func (c *LocationsBucketsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsExclusionsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *LocationsBucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.logexclusion) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logbucket) if err != nil { return nil, err } @@ -11273,14 +10781,14 @@ func (c *OrganizationsExclusionsPatchCall) doRequest(alt string) (*http.Response return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.organizations.exclusions.patch" call. -// Exactly one of *LogExclusion or error will be non-nil. Any non-2xx +// Do executes the "logging.locations.buckets.patch" call. +// Exactly one of *LogBucket or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *LogExclusion.ServerResponse.Header or (if a response was returned at +// *LogBucket.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *OrganizationsExclusionsPatchCall) Do(opts ...googleapi.CallOption) (*LogExclusion, error) { +func (c *LocationsBucketsPatchCall) Do(opts ...googleapi.CallOption) (*LogBucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -11299,7 +10807,7 @@ func (c *OrganizationsExclusionsPatchCall) Do(opts ...googleapi.CallOption) (*Lo if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LogExclusion{ + ret := &LogBucket{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -11311,23 +10819,23 @@ func (c *OrganizationsExclusionsPatchCall) Do(opts ...googleapi.CallOption) (*Lo } return ret, nil // { - // "description": "Changes one or more properties of an existing exclusion.", - // "flatPath": "v2/organizations/{organizationsId}/exclusions/{exclusionsId}", + // "description": "Updates a bucket. This method replaces the following fields in the existing bucket with values from the new bucket: retention_periodIf the retention period is decreased and the bucket is locked, FAILED_PRECONDITION will be returned.If the bucket has a LifecycleState of DELETE_REQUESTED, FAILED_PRECONDITION will be returned.A buckets region may not be modified after it is created. This method is in Beta.", + // "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/buckets/{bucketsId}", // "httpMethod": "PATCH", - // "id": "logging.organizations.exclusions.patch", + // "id": "logging.locations.buckets.patch", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the exclusion to update:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + // "description": "Required. The full resource name of the bucket to update. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\". Also requires permission \"resourcemanager.projects.updateLiens\" to set the locked property", // "location": "path", - // "pattern": "^organizations/[^/]+/exclusions/[^/]+$", + // "pattern": "^[^/]+/[^/]+/locations/[^/]+/buckets/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { - // "description": "Required. A non-empty list of fields to change in the existing exclusion. New values for the fields are taken from the corresponding fields in the LogExclusion included in this request. Fields not mentioned in update_mask are not changed and are ignored in the request.For example, to change the filter and description of an exclusion, specify an update_mask of \"filter,description\".", + // "description": "Required. Field mask that specifies the fields in bucket that need an update. A bucket field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=retention_days.", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -11335,10 +10843,10 @@ func (c *OrganizationsExclusionsPatchCall) Do(opts ...googleapi.CallOption) (*Lo // }, // "path": "v2/{+name}", // "request": { - // "$ref": "LogExclusion" + // "$ref": "LogBucket" // }, // "response": { - // "$ref": "LogExclusion" + // "$ref": "LogBucket" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -11348,75 +10856,69 @@ func (c *OrganizationsExclusionsPatchCall) Do(opts ...googleapi.CallOption) (*Lo } -// method id "logging.organizations.locations.buckets.get": +// method id "logging.locations.buckets.undelete": -type OrganizationsLocationsBucketsGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type LocationsBucketsUndeleteCall struct { + s *Service + name string + undeletebucketrequest *UndeleteBucketRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Gets a bucket (Beta). -func (r *OrganizationsLocationsBucketsService) Get(name string) *OrganizationsLocationsBucketsGetCall { - c := &OrganizationsLocationsBucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Undelete: Undeletes a bucket. A bucket that has been deleted may be +// undeleted within the grace period of 7 days. +func (r *LocationsBucketsService) Undelete(name string, undeletebucketrequest *UndeleteBucketRequest) *LocationsBucketsUndeleteCall { + c := &LocationsBucketsUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name + c.undeletebucketrequest = undeletebucketrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationsLocationsBucketsGetCall) Fields(s ...googleapi.Field) *OrganizationsLocationsBucketsGetCall { +func (c *LocationsBucketsUndeleteCall) Fields(s ...googleapi.Field) *LocationsBucketsUndeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *OrganizationsLocationsBucketsGetCall) IfNoneMatch(entityTag string) *OrganizationsLocationsBucketsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationsLocationsBucketsGetCall) Context(ctx context.Context) *OrganizationsLocationsBucketsGetCall { +func (c *LocationsBucketsUndeleteCall) Context(ctx context.Context) *LocationsBucketsUndeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationsLocationsBucketsGetCall) Header() http.Header { +func (c *LocationsBucketsUndeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsLocationsBucketsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *LocationsBucketsUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.undeletebucketrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}:undelete") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -11427,14 +10929,14 @@ func (c *OrganizationsLocationsBucketsGetCall) doRequest(alt string) (*http.Resp return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.organizations.locations.buckets.get" call. -// Exactly one of *LogBucket or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *LogBucket.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *OrganizationsLocationsBucketsGetCall) Do(opts ...googleapi.CallOption) (*LogBucket, error) { +// Do executes the "logging.locations.buckets.undelete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LocationsBucketsUndeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -11453,7 +10955,7 @@ func (c *OrganizationsLocationsBucketsGetCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LogBucket{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -11465,147 +10967,118 @@ func (c *OrganizationsLocationsBucketsGetCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Gets a bucket (Beta).", - // "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets/{bucketsId}", - // "httpMethod": "GET", - // "id": "logging.organizations.locations.buckets.get", + // "description": "Undeletes a bucket. A bucket that has been deleted may be undeleted within the grace period of 7 days.", + // "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/buckets/{bucketsId}:undelete", + // "httpMethod": "POST", + // "id": "logging.locations.buckets.undelete", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the bucket:\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\nExample: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", + // "description": "Required. The full resource name of the bucket to undelete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", // "location": "path", - // "pattern": "^organizations/[^/]+/locations/[^/]+/buckets/[^/]+$", + // "pattern": "^[^/]+/[^/]+/locations/[^/]+/buckets/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v2/{+name}", + // "path": "v2/{+name}:undelete", + // "request": { + // "$ref": "UndeleteBucketRequest" + // }, // "response": { - // "$ref": "LogBucket" + // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.read" + // "https://www.googleapis.com/auth/logging.admin" // ] // } } -// method id "logging.organizations.locations.buckets.list": - -type OrganizationsLocationsBucketsListCall struct { - s *Service - parent string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists buckets (Beta). -func (r *OrganizationsLocationsBucketsService) List(parent string) *OrganizationsLocationsBucketsListCall { - c := &OrganizationsLocationsBucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} +// method id "logging.logs.delete": -// PageSize sets the optional parameter "pageSize": The maximum number -// of results to return from this request. Non-positive values are -// ignored. The presence of nextPageToken in the response indicates that -// more results might be available. -func (c *OrganizationsLocationsBucketsListCall) PageSize(pageSize int64) *OrganizationsLocationsBucketsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c +type LogsDeleteCall struct { + s *Service + logName string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// PageToken sets the optional parameter "pageToken": If present, then -// retrieve the next batch of results from the preceding call to this -// method. pageToken must be the value of nextPageToken from the -// previous response. The values of other method parameters should be -// identical to those in the previous call. -func (c *OrganizationsLocationsBucketsListCall) PageToken(pageToken string) *OrganizationsLocationsBucketsListCall { - c.urlParams_.Set("pageToken", pageToken) +// Delete: Deletes all the log entries in a log. The log reappears if it +// receives new entries. Log entries written shortly before the delete +// operation might not be deleted. Entries received after the delete +// operation with a timestamp before the operation will be deleted. +func (r *LogsService) Delete(logName string) *LogsDeleteCall { + c := &LogsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.logName = logName return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationsLocationsBucketsListCall) Fields(s ...googleapi.Field) *OrganizationsLocationsBucketsListCall { +func (c *LogsDeleteCall) Fields(s ...googleapi.Field) *LogsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *OrganizationsLocationsBucketsListCall) IfNoneMatch(entityTag string) *OrganizationsLocationsBucketsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationsLocationsBucketsListCall) Context(ctx context.Context) *OrganizationsLocationsBucketsListCall { +func (c *LogsDeleteCall) Context(ctx context.Context) *LogsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationsLocationsBucketsListCall) Header() http.Header { +func (c *LogsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsLocationsBucketsListCall) doRequest(alt string) (*http.Response, error) { +func (c *LogsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/buckets") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+logName}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, + "logName": c.logName, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.organizations.locations.buckets.list" call. -// Exactly one of *ListBucketsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListBucketsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *OrganizationsLocationsBucketsListCall) Do(opts ...googleapi.CallOption) (*ListBucketsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() +// Do executes the "logging.logs.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, @@ -11619,7 +11092,7 @@ func (c *OrganizationsLocationsBucketsListCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ListBucketsResponse{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -11631,10 +11104,175 @@ func (c *OrganizationsLocationsBucketsListCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Lists buckets (Beta).", - // "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets", + // "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.", + // "flatPath": "v2/{v2Id}/{v2Id1}/logs/{logsId}", + // "httpMethod": "DELETE", + // "id": "logging.logs.delete", + // "parameterOrder": [ + // "logName" + // ], + // "parameters": { + // "logName": { + // "description": "Required. The resource name of the log to delete: \"projects/[PROJECT_ID]/logs/[LOG_ID]\" \"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\" \"folders/[FOLDER_ID]/logs/[LOG_ID]\" [LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + // "location": "path", + // "pattern": "^[^/]+/[^/]+/logs/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+logName}", + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/logging.admin" + // ] + // } + +} + +// method id "logging.logs.list": + +type LogsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists the logs in projects, organizations, folders, or billing +// accounts. Only logs that have entries are listed. +func (r *LogsService) List(parent string) *LogsListCall { + c := &LogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of results to return from this request. Non-positive values are +// ignored. The presence of nextPageToken in the response indicates that +// more results might be available. +func (c *LogsListCall) PageSize(pageSize int64) *LogsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If present, then +// retrieve the next batch of results from the preceding call to this +// method. pageToken must be the value of nextPageToken from the +// previous response. The values of other method parameters should be +// identical to those in the previous call. +func (c *LogsListCall) PageToken(pageToken string) *LogsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LogsListCall) Fields(s ...googleapi.Field) *LogsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *LogsListCall) IfNoneMatch(entityTag string) *LogsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LogsListCall) Context(ctx context.Context) *LogsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *LogsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *LogsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/logs") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.logs.list" call. +// Exactly one of *ListLogsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListLogsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *LogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListLogsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", + // "flatPath": "v2/{v2Id}/{v2Id1}/logs", // "httpMethod": "GET", - // "id": "logging.organizations.locations.buckets.list", + // "id": "logging.logs.list", // "parameterOrder": [ // "parent" // ], @@ -11651,16 +11289,16 @@ func (c *OrganizationsLocationsBucketsListCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "parent": { - // "description": "Required. The parent resource whose buckets are to be listed:\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]\"\nNote: The locations portion of the resource must be specified, but supplying the character - in place of LOCATION_ID will return all buckets.", + // "description": "Required. The resource name that owns the logs: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", // "location": "path", - // "pattern": "^organizations/[^/]+/locations/[^/]+$", + // "pattern": "^[^/]+/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v2/{+parent}/buckets", + // "path": "v2/{+parent}/logs", // "response": { - // "$ref": "ListBucketsResponse" + // "$ref": "ListLogsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -11675,7 +11313,7 @@ func (c *OrganizationsLocationsBucketsListCall) Do(opts ...googleapi.CallOption) // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *OrganizationsLocationsBucketsListCall) Pages(ctx context.Context, f func(*ListBucketsResponse) error) error { +func (c *LogsListCall) Pages(ctx context.Context, f func(*ListLogsResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -11693,104 +11331,2472 @@ func (c *OrganizationsLocationsBucketsListCall) Pages(ctx context.Context, f fun } } -// method id "logging.organizations.locations.buckets.patch": +// method id "logging.monitoredResourceDescriptors.list": + +type MonitoredResourceDescriptorsListCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists the descriptors for monitored resource types used by +// Logging. +func (r *MonitoredResourceDescriptorsService) List() *MonitoredResourceDescriptorsListCall { + c := &MonitoredResourceDescriptorsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of results to return from this request. Non-positive values are +// ignored. The presence of nextPageToken in the response indicates that +// more results might be available. +func (c *MonitoredResourceDescriptorsListCall) PageSize(pageSize int64) *MonitoredResourceDescriptorsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If present, then +// retrieve the next batch of results from the preceding call to this +// method. pageToken must be the value of nextPageToken from the +// previous response. The values of other method parameters should be +// identical to those in the previous call. +func (c *MonitoredResourceDescriptorsListCall) PageToken(pageToken string) *MonitoredResourceDescriptorsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MonitoredResourceDescriptorsListCall) Fields(s ...googleapi.Field) *MonitoredResourceDescriptorsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MonitoredResourceDescriptorsListCall) IfNoneMatch(entityTag string) *MonitoredResourceDescriptorsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MonitoredResourceDescriptorsListCall) Context(ctx context.Context) *MonitoredResourceDescriptorsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MonitoredResourceDescriptorsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MonitoredResourceDescriptorsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/monitoredResourceDescriptors") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.monitoredResourceDescriptors.list" call. +// Exactly one of *ListMonitoredResourceDescriptorsResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *ListMonitoredResourceDescriptorsResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *MonitoredResourceDescriptorsListCall) Do(opts ...googleapi.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListMonitoredResourceDescriptorsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the descriptors for monitored resource types used by Logging.", + // "flatPath": "v2/monitoredResourceDescriptors", + // "httpMethod": "GET", + // "id": "logging.monitoredResourceDescriptors.list", + // "parameterOrder": [], + // "parameters": { + // "pageSize": { + // "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v2/monitoredResourceDescriptors", + // "response": { + // "$ref": "ListMonitoredResourceDescriptorsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/logging.admin", + // "https://www.googleapis.com/auth/logging.read" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *MonitoredResourceDescriptorsListCall) Pages(ctx context.Context, f func(*ListMonitoredResourceDescriptorsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "logging.organizations.getCmekSettings": + +type OrganizationsGetCmekSettingsCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetCmekSettings: Gets the Logs Router CMEK settings for the given +// resource.Note: CMEK for the Logs Router can currently only be +// configured for GCP organizations. Once configured, it applies to all +// projects and folders in the GCP organization.See Enabling CMEK for +// Logs Router +// (https://cloud.google.com/logging/docs/routing/managed-encryption) +// for more information. +func (r *OrganizationsService) GetCmekSettings(name string) *OrganizationsGetCmekSettingsCall { + c := &OrganizationsGetCmekSettingsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsGetCmekSettingsCall) Fields(s ...googleapi.Field) *OrganizationsGetCmekSettingsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OrganizationsGetCmekSettingsCall) IfNoneMatch(entityTag string) *OrganizationsGetCmekSettingsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsGetCmekSettingsCall) Context(ctx context.Context) *OrganizationsGetCmekSettingsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsGetCmekSettingsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsGetCmekSettingsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}/cmekSettings") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.organizations.getCmekSettings" call. +// Exactly one of *CmekSettings or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *CmekSettings.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *OrganizationsGetCmekSettingsCall) Do(opts ...googleapi.CallOption) (*CmekSettings, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &CmekSettings{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the Logs Router CMEK settings for the given resource.Note: CMEK for the Logs Router can currently only be configured for GCP organizations. Once configured, it applies to all projects and folders in the GCP organization.See Enabling CMEK for Logs Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", + // "flatPath": "v2/organizations/{organizationsId}/cmekSettings", + // "httpMethod": "GET", + // "id": "logging.organizations.getCmekSettings", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The resource for which to retrieve CMEK settings. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" Example: \"organizations/12345/cmekSettings\".Note: CMEK for the Logs Router can currently only be configured for GCP organizations. Once configured, it applies to all projects and folders in the GCP organization.", + // "location": "path", + // "pattern": "^organizations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}/cmekSettings", + // "response": { + // "$ref": "CmekSettings" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/logging.admin", + // "https://www.googleapis.com/auth/logging.read" + // ] + // } + +} + +// method id "logging.organizations.updateCmekSettings": + +type OrganizationsUpdateCmekSettingsCall struct { + s *Service + name string + cmeksettings *CmekSettings + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// UpdateCmekSettings: Updates the Logs Router CMEK settings for the +// given resource.Note: CMEK for the Logs Router can currently only be +// configured for GCP organizations. Once configured, it applies to all +// projects and folders in the GCP organization.UpdateCmekSettings will +// fail if 1) kms_key_name is invalid, or 2) the associated service +// account does not have the required +// roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, +// or 3) access to the key is disabled.See Enabling CMEK for Logs Router +// (https://cloud.google.com/logging/docs/routing/managed-encryption) +// for more information. +func (r *OrganizationsService) UpdateCmekSettings(name string, cmeksettings *CmekSettings) *OrganizationsUpdateCmekSettingsCall { + c := &OrganizationsUpdateCmekSettingsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.cmeksettings = cmeksettings + return c +} + +// UpdateMask sets the optional parameter "updateMask": Field mask +// identifying which fields from cmek_settings should be updated. A +// field will be overwritten if and only if it is in the update mask. +// Output only fields cannot be updated.See FieldMask for more +// information.Example: "updateMask=kmsKeyName" +func (c *OrganizationsUpdateCmekSettingsCall) UpdateMask(updateMask string) *OrganizationsUpdateCmekSettingsCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsUpdateCmekSettingsCall) Fields(s ...googleapi.Field) *OrganizationsUpdateCmekSettingsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsUpdateCmekSettingsCall) Context(ctx context.Context) *OrganizationsUpdateCmekSettingsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsUpdateCmekSettingsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsUpdateCmekSettingsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.cmeksettings) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}/cmekSettings") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.organizations.updateCmekSettings" call. +// Exactly one of *CmekSettings or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *CmekSettings.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *OrganizationsUpdateCmekSettingsCall) Do(opts ...googleapi.CallOption) (*CmekSettings, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &CmekSettings{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the Logs Router CMEK settings for the given resource.Note: CMEK for the Logs Router can currently only be configured for GCP organizations. Once configured, it applies to all projects and folders in the GCP organization.UpdateCmekSettings will fail if 1) kms_key_name is invalid, or 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, or 3) access to the key is disabled.See Enabling CMEK for Logs Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", + // "flatPath": "v2/organizations/{organizationsId}/cmekSettings", + // "httpMethod": "PATCH", + // "id": "logging.organizations.updateCmekSettings", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The resource name for the CMEK settings to update. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" Example: \"organizations/12345/cmekSettings\".Note: CMEK for the Logs Router can currently only be configured for GCP organizations. Once configured, it applies to all projects and folders in the GCP organization.", + // "location": "path", + // "pattern": "^organizations/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "Optional. Field mask identifying which fields from cmek_settings should be updated. A field will be overwritten if and only if it is in the update mask. Output only fields cannot be updated.See FieldMask for more information.Example: \"updateMask=kmsKeyName\"", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v2/{+name}/cmekSettings", + // "request": { + // "$ref": "CmekSettings" + // }, + // "response": { + // "$ref": "CmekSettings" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/logging.admin" + // ] + // } + +} + +// method id "logging.organizations.exclusions.create": + +type OrganizationsExclusionsCreateCall struct { + s *Service + parent string + logexclusion *LogExclusion + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a new exclusion in a specified parent resource. Only +// log entries belonging to that resource can be excluded. You can have +// up to 10 exclusions in a resource. +func (r *OrganizationsExclusionsService) Create(parent string, logexclusion *LogExclusion) *OrganizationsExclusionsCreateCall { + c := &OrganizationsExclusionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.logexclusion = logexclusion + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsExclusionsCreateCall) Fields(s ...googleapi.Field) *OrganizationsExclusionsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsExclusionsCreateCall) Context(ctx context.Context) *OrganizationsExclusionsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsExclusionsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsExclusionsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logexclusion) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/exclusions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.organizations.exclusions.create" call. +// Exactly one of *LogExclusion or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *LogExclusion.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *OrganizationsExclusionsCreateCall) Do(opts ...googleapi.CallOption) (*LogExclusion, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &LogExclusion{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new exclusion in a specified parent resource. Only log entries belonging to that resource can be excluded. You can have up to 10 exclusions in a resource.", + // "flatPath": "v2/organizations/{organizationsId}/exclusions", + // "httpMethod": "POST", + // "id": "logging.organizations.exclusions.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The parent resource in which to create the exclusion: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" Examples: \"projects/my-logging-project\", \"organizations/123456789\".", + // "location": "path", + // "pattern": "^organizations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/exclusions", + // "request": { + // "$ref": "LogExclusion" + // }, + // "response": { + // "$ref": "LogExclusion" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/logging.admin" + // ] + // } + +} + +// method id "logging.organizations.exclusions.delete": + +type OrganizationsExclusionsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes an exclusion. +func (r *OrganizationsExclusionsService) Delete(name string) *OrganizationsExclusionsDeleteCall { + c := &OrganizationsExclusionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsExclusionsDeleteCall) Fields(s ...googleapi.Field) *OrganizationsExclusionsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsExclusionsDeleteCall) Context(ctx context.Context) *OrganizationsExclusionsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsExclusionsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsExclusionsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.organizations.exclusions.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *OrganizationsExclusionsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes an exclusion.", + // "flatPath": "v2/organizations/{organizationsId}/exclusions/{exclusionsId}", + // "httpMethod": "DELETE", + // "id": "logging.organizations.exclusions.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The resource name of an existing exclusion to delete: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", + // "location": "path", + // "pattern": "^organizations/[^/]+/exclusions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/logging.admin" + // ] + // } + +} + +// method id "logging.organizations.exclusions.get": + +type OrganizationsExclusionsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the description of an exclusion. +func (r *OrganizationsExclusionsService) Get(name string) *OrganizationsExclusionsGetCall { + c := &OrganizationsExclusionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsExclusionsGetCall) Fields(s ...googleapi.Field) *OrganizationsExclusionsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OrganizationsExclusionsGetCall) IfNoneMatch(entityTag string) *OrganizationsExclusionsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsExclusionsGetCall) Context(ctx context.Context) *OrganizationsExclusionsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsExclusionsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsExclusionsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.organizations.exclusions.get" call. +// Exactly one of *LogExclusion or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *LogExclusion.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *OrganizationsExclusionsGetCall) Do(opts ...googleapi.CallOption) (*LogExclusion, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &LogExclusion{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the description of an exclusion.", + // "flatPath": "v2/organizations/{organizationsId}/exclusions/{exclusionsId}", + // "httpMethod": "GET", + // "id": "logging.organizations.exclusions.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The resource name of an existing exclusion: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", + // "location": "path", + // "pattern": "^organizations/[^/]+/exclusions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "LogExclusion" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/logging.admin", + // "https://www.googleapis.com/auth/logging.read" + // ] + // } + +} + +// method id "logging.organizations.exclusions.list": + +type OrganizationsExclusionsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists all the exclusions in a parent resource. +func (r *OrganizationsExclusionsService) List(parent string) *OrganizationsExclusionsListCall { + c := &OrganizationsExclusionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of results to return from this request. Non-positive values are +// ignored. The presence of nextPageToken in the response indicates that +// more results might be available. +func (c *OrganizationsExclusionsListCall) PageSize(pageSize int64) *OrganizationsExclusionsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If present, then +// retrieve the next batch of results from the preceding call to this +// method. pageToken must be the value of nextPageToken from the +// previous response. The values of other method parameters should be +// identical to those in the previous call. +func (c *OrganizationsExclusionsListCall) PageToken(pageToken string) *OrganizationsExclusionsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsExclusionsListCall) Fields(s ...googleapi.Field) *OrganizationsExclusionsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OrganizationsExclusionsListCall) IfNoneMatch(entityTag string) *OrganizationsExclusionsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsExclusionsListCall) Context(ctx context.Context) *OrganizationsExclusionsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsExclusionsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsExclusionsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/exclusions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.organizations.exclusions.list" call. +// Exactly one of *ListExclusionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListExclusionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *OrganizationsExclusionsListCall) Do(opts ...googleapi.CallOption) (*ListExclusionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListExclusionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all the exclusions in a parent resource.", + // "flatPath": "v2/organizations/{organizationsId}/exclusions", + // "httpMethod": "GET", + // "id": "logging.organizations.exclusions.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "pageSize": { + // "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The parent resource whose exclusions are to be listed. \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", + // "location": "path", + // "pattern": "^organizations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/exclusions", + // "response": { + // "$ref": "ListExclusionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/logging.admin", + // "https://www.googleapis.com/auth/logging.read" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *OrganizationsExclusionsListCall) Pages(ctx context.Context, f func(*ListExclusionsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "logging.organizations.exclusions.patch": + +type OrganizationsExclusionsPatchCall struct { + s *Service + name string + logexclusion *LogExclusion + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Changes one or more properties of an existing exclusion. +func (r *OrganizationsExclusionsService) Patch(name string, logexclusion *LogExclusion) *OrganizationsExclusionsPatchCall { + c := &OrganizationsExclusionsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.logexclusion = logexclusion + return c +} + +// UpdateMask sets the optional parameter "updateMask": Required. A +// non-empty list of fields to change in the existing exclusion. New +// values for the fields are taken from the corresponding fields in the +// LogExclusion included in this request. Fields not mentioned in +// update_mask are not changed and are ignored in the request.For +// example, to change the filter and description of an exclusion, +// specify an update_mask of "filter,description". +func (c *OrganizationsExclusionsPatchCall) UpdateMask(updateMask string) *OrganizationsExclusionsPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsExclusionsPatchCall) Fields(s ...googleapi.Field) *OrganizationsExclusionsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsExclusionsPatchCall) Context(ctx context.Context) *OrganizationsExclusionsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsExclusionsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsExclusionsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logexclusion) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.organizations.exclusions.patch" call. +// Exactly one of *LogExclusion or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *LogExclusion.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *OrganizationsExclusionsPatchCall) Do(opts ...googleapi.CallOption) (*LogExclusion, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &LogExclusion{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes one or more properties of an existing exclusion.", + // "flatPath": "v2/organizations/{organizationsId}/exclusions/{exclusionsId}", + // "httpMethod": "PATCH", + // "id": "logging.organizations.exclusions.patch", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The resource name of the exclusion to update: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", + // "location": "path", + // "pattern": "^organizations/[^/]+/exclusions/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "Required. A non-empty list of fields to change in the existing exclusion. New values for the fields are taken from the corresponding fields in the LogExclusion included in this request. Fields not mentioned in update_mask are not changed and are ignored in the request.For example, to change the filter and description of an exclusion, specify an update_mask of \"filter,description\".", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "request": { + // "$ref": "LogExclusion" + // }, + // "response": { + // "$ref": "LogExclusion" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/logging.admin" + // ] + // } + +} + +// method id "logging.organizations.locations.buckets.create": + +type OrganizationsLocationsBucketsCreateCall struct { + s *Service + parent string + logbucket *LogBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a bucket that can be used to store log entries. Once +// a bucket has been created, the region cannot be changed. +func (r *OrganizationsLocationsBucketsService) Create(parent string, logbucket *LogBucket) *OrganizationsLocationsBucketsCreateCall { + c := &OrganizationsLocationsBucketsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.logbucket = logbucket + return c +} + +// BucketId sets the optional parameter "bucketId": Required. A +// client-assigned identifier such as "my-bucket". Identifiers are +// limited to 100 characters and can include only letters, digits, +// underscores, hyphens, and periods. +func (c *OrganizationsLocationsBucketsCreateCall) BucketId(bucketId string) *OrganizationsLocationsBucketsCreateCall { + c.urlParams_.Set("bucketId", bucketId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsLocationsBucketsCreateCall) Fields(s ...googleapi.Field) *OrganizationsLocationsBucketsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsLocationsBucketsCreateCall) Context(ctx context.Context) *OrganizationsLocationsBucketsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsLocationsBucketsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsLocationsBucketsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logbucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/buckets") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.organizations.locations.buckets.create" call. +// Exactly one of *LogBucket or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *LogBucket.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *OrganizationsLocationsBucketsCreateCall) Do(opts ...googleapi.CallOption) (*LogBucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &LogBucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a bucket that can be used to store log entries. Once a bucket has been created, the region cannot be changed.", + // "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets", + // "httpMethod": "POST", + // "id": "logging.organizations.locations.buckets.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "bucketId": { + // "description": "Required. A client-assigned identifier such as \"my-bucket\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The resource in which to create the bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" Example: \"projects/my-logging-project/locations/global\"", + // "location": "path", + // "pattern": "^organizations/[^/]+/locations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/buckets", + // "request": { + // "$ref": "LogBucket" + // }, + // "response": { + // "$ref": "LogBucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/logging.admin" + // ] + // } + +} + +// method id "logging.organizations.locations.buckets.delete": + +type OrganizationsLocationsBucketsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a bucket. Moves the bucket to the DELETE_REQUESTED +// state. After 7 days, the bucket will be purged and all logs in the +// bucket will be permanently deleted. +func (r *OrganizationsLocationsBucketsService) Delete(name string) *OrganizationsLocationsBucketsDeleteCall { + c := &OrganizationsLocationsBucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsLocationsBucketsDeleteCall) Fields(s ...googleapi.Field) *OrganizationsLocationsBucketsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsLocationsBucketsDeleteCall) Context(ctx context.Context) *OrganizationsLocationsBucketsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsLocationsBucketsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsLocationsBucketsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.organizations.locations.buckets.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *OrganizationsLocationsBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a bucket. Moves the bucket to the DELETE_REQUESTED state. After 7 days, the bucket will be purged and all logs in the bucket will be permanently deleted.", + // "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets/{bucketsId}", + // "httpMethod": "DELETE", + // "id": "logging.organizations.locations.buckets.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The full resource name of the bucket to delete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", + // "location": "path", + // "pattern": "^organizations/[^/]+/locations/[^/]+/buckets/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/logging.admin" + // ] + // } + +} + +// method id "logging.organizations.locations.buckets.get": + +type OrganizationsLocationsBucketsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets a bucket (Beta). +func (r *OrganizationsLocationsBucketsService) Get(name string) *OrganizationsLocationsBucketsGetCall { + c := &OrganizationsLocationsBucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsLocationsBucketsGetCall) Fields(s ...googleapi.Field) *OrganizationsLocationsBucketsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OrganizationsLocationsBucketsGetCall) IfNoneMatch(entityTag string) *OrganizationsLocationsBucketsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsLocationsBucketsGetCall) Context(ctx context.Context) *OrganizationsLocationsBucketsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsLocationsBucketsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsLocationsBucketsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.organizations.locations.buckets.get" call. +// Exactly one of *LogBucket or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *LogBucket.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *OrganizationsLocationsBucketsGetCall) Do(opts ...googleapi.CallOption) (*LogBucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &LogBucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a bucket (Beta).", + // "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets/{bucketsId}", + // "httpMethod": "GET", + // "id": "logging.organizations.locations.buckets.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The resource name of the bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", + // "location": "path", + // "pattern": "^organizations/[^/]+/locations/[^/]+/buckets/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "LogBucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/logging.admin", + // "https://www.googleapis.com/auth/logging.read" + // ] + // } + +} + +// method id "logging.organizations.locations.buckets.list": + +type OrganizationsLocationsBucketsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists buckets (Beta). +func (r *OrganizationsLocationsBucketsService) List(parent string) *OrganizationsLocationsBucketsListCall { + c := &OrganizationsLocationsBucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of results to return from this request. Non-positive values are +// ignored. The presence of nextPageToken in the response indicates that +// more results might be available. +func (c *OrganizationsLocationsBucketsListCall) PageSize(pageSize int64) *OrganizationsLocationsBucketsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If present, then +// retrieve the next batch of results from the preceding call to this +// method. pageToken must be the value of nextPageToken from the +// previous response. The values of other method parameters should be +// identical to those in the previous call. +func (c *OrganizationsLocationsBucketsListCall) PageToken(pageToken string) *OrganizationsLocationsBucketsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsLocationsBucketsListCall) Fields(s ...googleapi.Field) *OrganizationsLocationsBucketsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OrganizationsLocationsBucketsListCall) IfNoneMatch(entityTag string) *OrganizationsLocationsBucketsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsLocationsBucketsListCall) Context(ctx context.Context) *OrganizationsLocationsBucketsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsLocationsBucketsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsLocationsBucketsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/buckets") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.organizations.locations.buckets.list" call. +// Exactly one of *ListBucketsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListBucketsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *OrganizationsLocationsBucketsListCall) Do(opts ...googleapi.CallOption) (*ListBucketsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListBucketsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists buckets (Beta).", + // "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets", + // "httpMethod": "GET", + // "id": "logging.organizations.locations.buckets.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "pageSize": { + // "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The parent resource whose buckets are to be listed: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]\" Note: The locations portion of the resource must be specified, but supplying the character - in place of LOCATION_ID will return all buckets.", + // "location": "path", + // "pattern": "^organizations/[^/]+/locations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/buckets", + // "response": { + // "$ref": "ListBucketsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/logging.admin", + // "https://www.googleapis.com/auth/logging.read" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *OrganizationsLocationsBucketsListCall) Pages(ctx context.Context, f func(*ListBucketsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "logging.organizations.locations.buckets.patch": + +type OrganizationsLocationsBucketsPatchCall struct { + s *Service + name string + logbucket *LogBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a bucket. This method replaces the following fields in +// the existing bucket with values from the new bucket: +// retention_periodIf the retention period is decreased and the bucket +// is locked, FAILED_PRECONDITION will be returned.If the bucket has a +// LifecycleState of DELETE_REQUESTED, FAILED_PRECONDITION will be +// returned.A buckets region may not be modified after it is created. +// This method is in Beta. +func (r *OrganizationsLocationsBucketsService) Patch(name string, logbucket *LogBucket) *OrganizationsLocationsBucketsPatchCall { + c := &OrganizationsLocationsBucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.logbucket = logbucket + return c +} + +// UpdateMask sets the optional parameter "updateMask": Required. Field +// mask that specifies the fields in bucket that need an update. A +// bucket field will be overwritten if, and only if, it is in the update +// mask. name and output only fields cannot be updated.For a detailed +// FieldMask definition, see +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: +// updateMask=retention_days. +func (c *OrganizationsLocationsBucketsPatchCall) UpdateMask(updateMask string) *OrganizationsLocationsBucketsPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsLocationsBucketsPatchCall) Fields(s ...googleapi.Field) *OrganizationsLocationsBucketsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsLocationsBucketsPatchCall) Context(ctx context.Context) *OrganizationsLocationsBucketsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsLocationsBucketsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsLocationsBucketsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logbucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.organizations.locations.buckets.patch" call. +// Exactly one of *LogBucket or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *LogBucket.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *OrganizationsLocationsBucketsPatchCall) Do(opts ...googleapi.CallOption) (*LogBucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &LogBucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a bucket. This method replaces the following fields in the existing bucket with values from the new bucket: retention_periodIf the retention period is decreased and the bucket is locked, FAILED_PRECONDITION will be returned.If the bucket has a LifecycleState of DELETE_REQUESTED, FAILED_PRECONDITION will be returned.A buckets region may not be modified after it is created. This method is in Beta.", + // "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets/{bucketsId}", + // "httpMethod": "PATCH", + // "id": "logging.organizations.locations.buckets.patch", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The full resource name of the bucket to update. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\". Also requires permission \"resourcemanager.projects.updateLiens\" to set the locked property", + // "location": "path", + // "pattern": "^organizations/[^/]+/locations/[^/]+/buckets/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "Required. Field mask that specifies the fields in bucket that need an update. A bucket field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=retention_days.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "request": { + // "$ref": "LogBucket" + // }, + // "response": { + // "$ref": "LogBucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/logging.admin" + // ] + // } + +} + +// method id "logging.organizations.locations.buckets.undelete": + +type OrganizationsLocationsBucketsUndeleteCall struct { + s *Service + name string + undeletebucketrequest *UndeleteBucketRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Undelete: Undeletes a bucket. A bucket that has been deleted may be +// undeleted within the grace period of 7 days. +func (r *OrganizationsLocationsBucketsService) Undelete(name string, undeletebucketrequest *UndeleteBucketRequest) *OrganizationsLocationsBucketsUndeleteCall { + c := &OrganizationsLocationsBucketsUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.undeletebucketrequest = undeletebucketrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsLocationsBucketsUndeleteCall) Fields(s ...googleapi.Field) *OrganizationsLocationsBucketsUndeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsLocationsBucketsUndeleteCall) Context(ctx context.Context) *OrganizationsLocationsBucketsUndeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsLocationsBucketsUndeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsLocationsBucketsUndeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.undeletebucketrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}:undelete") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.organizations.locations.buckets.undelete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *OrganizationsLocationsBucketsUndeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Undeletes a bucket. A bucket that has been deleted may be undeleted within the grace period of 7 days.", + // "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets/{bucketsId}:undelete", + // "httpMethod": "POST", + // "id": "logging.organizations.locations.buckets.undelete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The full resource name of the bucket to undelete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", + // "location": "path", + // "pattern": "^organizations/[^/]+/locations/[^/]+/buckets/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}:undelete", + // "request": { + // "$ref": "UndeleteBucketRequest" + // }, + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/logging.admin" + // ] + // } + +} + +// method id "logging.organizations.logs.delete": + +type OrganizationsLogsDeleteCall struct { + s *Service + logName string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes all the log entries in a log. The log reappears if it +// receives new entries. Log entries written shortly before the delete +// operation might not be deleted. Entries received after the delete +// operation with a timestamp before the operation will be deleted. +func (r *OrganizationsLogsService) Delete(logName string) *OrganizationsLogsDeleteCall { + c := &OrganizationsLogsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.logName = logName + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsLogsDeleteCall) Fields(s ...googleapi.Field) *OrganizationsLogsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsLogsDeleteCall) Context(ctx context.Context) *OrganizationsLogsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsLogsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsLogsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+logName}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "logName": c.logName, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.organizations.logs.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *OrganizationsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.", + // "flatPath": "v2/organizations/{organizationsId}/logs/{logsId}", + // "httpMethod": "DELETE", + // "id": "logging.organizations.logs.delete", + // "parameterOrder": [ + // "logName" + // ], + // "parameters": { + // "logName": { + // "description": "Required. The resource name of the log to delete: \"projects/[PROJECT_ID]/logs/[LOG_ID]\" \"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\" \"folders/[FOLDER_ID]/logs/[LOG_ID]\" [LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + // "location": "path", + // "pattern": "^organizations/[^/]+/logs/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+logName}", + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/logging.admin" + // ] + // } + +} + +// method id "logging.organizations.logs.list": -type OrganizationsLocationsBucketsPatchCall struct { - s *Service - name string - logbucket *LogBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type OrganizationsLogsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Patch: Updates a bucket. This method replaces the following fields in -// the existing bucket with values from the new bucket: -// retention_periodIf the retention period is decreased and the bucket -// is locked, FAILED_PRECONDITION will be returned.If the bucket has a -// LifecycleState of DELETE_REQUESTED, FAILED_PRECONDITION will be -// returned.A buckets region may not be modified after it is created. -// This method is in Beta. -func (r *OrganizationsLocationsBucketsService) Patch(name string, logbucket *LogBucket) *OrganizationsLocationsBucketsPatchCall { - c := &OrganizationsLocationsBucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.logbucket = logbucket +// List: Lists the logs in projects, organizations, folders, or billing +// accounts. Only logs that have entries are listed. +func (r *OrganizationsLogsService) List(parent string) *OrganizationsLogsListCall { + c := &OrganizationsLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent return c } -// UpdateMask sets the optional parameter "updateMask": Required. Field -// mask that specifies the fields in bucket that need an update. A -// bucket field will be overwritten if, and only if, it is in the update -// mask. name and output only fields cannot be updated.For a detailed -// FieldMask definition, see -// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: -// updateMask=retention_days. -func (c *OrganizationsLocationsBucketsPatchCall) UpdateMask(updateMask string) *OrganizationsLocationsBucketsPatchCall { - c.urlParams_.Set("updateMask", updateMask) +// PageSize sets the optional parameter "pageSize": The maximum number +// of results to return from this request. Non-positive values are +// ignored. The presence of nextPageToken in the response indicates that +// more results might be available. +func (c *OrganizationsLogsListCall) PageSize(pageSize int64) *OrganizationsLogsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If present, then +// retrieve the next batch of results from the preceding call to this +// method. pageToken must be the value of nextPageToken from the +// previous response. The values of other method parameters should be +// identical to those in the previous call. +func (c *OrganizationsLogsListCall) PageToken(pageToken string) *OrganizationsLogsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationsLocationsBucketsPatchCall) Fields(s ...googleapi.Field) *OrganizationsLocationsBucketsPatchCall { +func (c *OrganizationsLogsListCall) Fields(s ...googleapi.Field) *OrganizationsLogsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OrganizationsLogsListCall) IfNoneMatch(entityTag string) *OrganizationsLogsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationsLocationsBucketsPatchCall) Context(ctx context.Context) *OrganizationsLocationsBucketsPatchCall { +func (c *OrganizationsLogsListCall) Context(ctx context.Context) *OrganizationsLogsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationsLocationsBucketsPatchCall) Header() http.Header { +func (c *OrganizationsLogsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsLocationsBucketsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *OrganizationsLogsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.logbucket) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/logs") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "name": c.name, + "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.organizations.locations.buckets.patch" call. -// Exactly one of *LogBucket or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *LogBucket.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *OrganizationsLocationsBucketsPatchCall) Do(opts ...googleapi.CallOption) (*LogBucket, error) { +// Do executes the "logging.organizations.logs.list" call. +// Exactly one of *ListLogsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListLogsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *OrganizationsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -11809,7 +13815,7 @@ func (c *OrganizationsLocationsBucketsPatchCall) Do(opts ...googleapi.CallOption if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LogBucket{ + ret := &ListLogsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -11821,67 +13827,111 @@ func (c *OrganizationsLocationsBucketsPatchCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Updates a bucket. This method replaces the following fields in the existing bucket with values from the new bucket: retention_periodIf the retention period is decreased and the bucket is locked, FAILED_PRECONDITION will be returned.If the bucket has a LifecycleState of DELETE_REQUESTED, FAILED_PRECONDITION will be returned.A buckets region may not be modified after it is created. This method is in Beta.", - // "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets/{bucketsId}", - // "httpMethod": "PATCH", - // "id": "logging.organizations.locations.buckets.patch", + // "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", + // "flatPath": "v2/organizations/{organizationsId}/logs", + // "httpMethod": "GET", + // "id": "logging.organizations.logs.list", // "parameterOrder": [ - // "name" + // "parent" // ], // "parameters": { - // "name": { - // "description": "Required. The full resource name of the bucket to update.\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\nExample: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\". Also requires permission \"resourcemanager.projects.updateLiens\" to set the locked property", - // "location": "path", - // "pattern": "^organizations/[^/]+/locations/[^/]+/buckets/[^/]+$", - // "required": true, - // "type": "string" + // "pageSize": { + // "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + // "format": "int32", + // "location": "query", + // "type": "integer" // }, - // "updateMask": { - // "description": "Required. Field mask that specifies the fields in bucket that need an update. A bucket field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=retention_days.", - // "format": "google-fieldmask", + // "pageToken": { + // "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", // "location": "query", // "type": "string" + // }, + // "parent": { + // "description": "Required. The resource name that owns the logs: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", + // "location": "path", + // "pattern": "^organizations/[^/]+$", + // "required": true, + // "type": "string" // } // }, - // "path": "v2/{+name}", - // "request": { - // "$ref": "LogBucket" - // }, + // "path": "v2/{+parent}/logs", // "response": { - // "$ref": "LogBucket" + // "$ref": "ListLogsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/logging.admin" + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/logging.admin", + // "https://www.googleapis.com/auth/logging.read" // ] // } } -// method id "logging.organizations.logs.delete": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *OrganizationsLogsListCall) Pages(ctx context.Context, f func(*ListLogsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type OrganizationsLogsDeleteCall struct { +// method id "logging.organizations.sinks.create": + +type OrganizationsSinksCreateCall struct { s *Service - logName string + parent string + logsink *LogSink urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes all the log entries in a log. The log reappears if it -// receives new entries. Log entries written shortly before the delete -// operation might not be deleted. Entries received after the delete -// operation with a timestamp before the operation will be deleted. -func (r *OrganizationsLogsService) Delete(logName string) *OrganizationsLogsDeleteCall { - c := &OrganizationsLogsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.logName = logName +// Create: Creates a sink that exports specified log entries to a +// destination. The export of newly-ingested log entries begins +// immediately, unless the sink's writer_identity is not permitted to +// write to the destination. A sink can export log entries only from the +// resource owning the sink. +func (r *OrganizationsSinksService) Create(parent string, logsink *LogSink) *OrganizationsSinksCreateCall { + c := &OrganizationsSinksCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.logsink = logsink + return c +} + +// UniqueWriterIdentity sets the optional parameter +// "uniqueWriterIdentity": Determines the kind of IAM identity returned +// as writer_identity in the new sink. If this value is omitted or set +// to false, and if the sink's parent is a project, then the value +// returned as writer_identity is the same group or service account used +// by Logging before the addition of writer identities to this API. The +// sink's destination must be in the same project as the sink itself.If +// this field is set to true, or if the sink is owned by a non-project +// resource such as an organization, then the value of writer_identity +// will be a unique service account used only for exports from the new +// sink. For more information, see writer_identity in LogSink. +func (c *OrganizationsSinksCreateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *OrganizationsSinksCreateCall { + c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationsLogsDeleteCall) Fields(s ...googleapi.Field) *OrganizationsLogsDeleteCall { +func (c *OrganizationsSinksCreateCall) Fields(s ...googleapi.Field) *OrganizationsSinksCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -11889,51 +13939,56 @@ func (c *OrganizationsLogsDeleteCall) Fields(s ...googleapi.Field) *Organization // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationsLogsDeleteCall) Context(ctx context.Context) *OrganizationsLogsDeleteCall { +func (c *OrganizationsSinksCreateCall) Context(ctx context.Context) *OrganizationsSinksCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationsLogsDeleteCall) Header() http.Header { +func (c *OrganizationsSinksCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsLogsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *OrganizationsSinksCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+logName}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/sinks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "logName": c.logName, + "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.organizations.logs.delete" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// Do executes the "logging.organizations.sinks.create" call. +// Exactly one of *LogSink or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) +// *LogSink.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *OrganizationsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { +func (c *OrganizationsSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -11952,7 +14007,7 @@ func (c *OrganizationsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Empty{ + ret := &LogSink{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -11964,25 +14019,33 @@ func (c *OrganizationsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, } return ret, nil // { - // "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted. Entries received after the delete operation with a timestamp before the operation will be deleted.", - // "flatPath": "v2/organizations/{organizationsId}/logs/{logsId}", - // "httpMethod": "DELETE", - // "id": "logging.organizations.logs.delete", + // "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.", + // "flatPath": "v2/organizations/{organizationsId}/sinks", + // "httpMethod": "POST", + // "id": "logging.organizations.sinks.create", // "parameterOrder": [ - // "logName" + // "parent" // ], // "parameters": { - // "logName": { - // "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + // "parent": { + // "description": "Required. The resource in which to create the sink: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" Examples: \"projects/my-logging-project\", \"organizations/123456789\".", // "location": "path", - // "pattern": "^organizations/[^/]+/logs/[^/]+$", + // "pattern": "^organizations/[^/]+$", // "required": true, // "type": "string" + // }, + // "uniqueWriterIdentity": { + // "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "v2/{+logName}", + // "path": "v2/{+parent}/sinks", + // "request": { + // "$ref": "LogSink" + // }, // "response": { - // "$ref": "Empty" + // "$ref": "LogSink" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -11992,113 +14055,80 @@ func (c *OrganizationsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, } -// method id "logging.organizations.logs.list": - -type OrganizationsLogsListCall struct { - s *Service - parent string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists the logs in projects, organizations, folders, or billing -// accounts. Only logs that have entries are listed. -func (r *OrganizationsLogsService) List(parent string) *OrganizationsLogsListCall { - c := &OrganizationsLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} +// method id "logging.organizations.sinks.delete": -// PageSize sets the optional parameter "pageSize": The maximum number -// of results to return from this request. Non-positive values are -// ignored. The presence of nextPageToken in the response indicates that -// more results might be available. -func (c *OrganizationsLogsListCall) PageSize(pageSize int64) *OrganizationsLogsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c +type OrganizationsSinksDeleteCall struct { + s *Service + sinkNameid string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// PageToken sets the optional parameter "pageToken": If present, then -// retrieve the next batch of results from the preceding call to this -// method. pageToken must be the value of nextPageToken from the -// previous response. The values of other method parameters should be -// identical to those in the previous call. -func (c *OrganizationsLogsListCall) PageToken(pageToken string) *OrganizationsLogsListCall { - c.urlParams_.Set("pageToken", pageToken) +// Delete: Deletes a sink. If the sink has a unique writer_identity, +// then that service account is also deleted. +func (r *OrganizationsSinksService) Delete(sinkNameid string) *OrganizationsSinksDeleteCall { + c := &OrganizationsSinksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sinkNameid = sinkNameid return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationsLogsListCall) Fields(s ...googleapi.Field) *OrganizationsLogsListCall { +func (c *OrganizationsSinksDeleteCall) Fields(s ...googleapi.Field) *OrganizationsSinksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *OrganizationsLogsListCall) IfNoneMatch(entityTag string) *OrganizationsLogsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationsLogsListCall) Context(ctx context.Context) *OrganizationsLogsListCall { +func (c *OrganizationsSinksDeleteCall) Context(ctx context.Context) *OrganizationsSinksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationsLogsListCall) Header() http.Header { +func (c *OrganizationsSinksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsLogsListCall) doRequest(alt string) (*http.Response, error) { +func (c *OrganizationsSinksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/logs") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, + "sinkName": c.sinkNameid, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.organizations.logs.list" call. -// Exactly one of *ListLogsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListLogsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *OrganizationsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsResponse, error) { +// Do executes the "logging.organizations.sinks.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *OrganizationsSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -12117,7 +14147,7 @@ func (c *OrganizationsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsR if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ListLogsResponse{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -12129,168 +14159,121 @@ func (c *OrganizationsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsR } return ret, nil // { - // "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", - // "flatPath": "v2/organizations/{organizationsId}/logs", - // "httpMethod": "GET", - // "id": "logging.organizations.logs.list", + // "description": "Deletes a sink. If the sink has a unique writer_identity, then that service account is also deleted.", + // "flatPath": "v2/organizations/{organizationsId}/sinks/{sinksId}", + // "httpMethod": "DELETE", + // "id": "logging.organizations.sinks.delete", // "parameterOrder": [ - // "parent" + // "sinkName" // ], // "parameters": { - // "pageSize": { - // "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + // "sinkName": { + // "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", - // "pattern": "^organizations/[^/]+$", + // "pattern": "^organizations/[^/]+/sinks/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v2/{+parent}/logs", + // "path": "v2/{+sinkName}", // "response": { - // "$ref": "ListLogsResponse" + // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.read" + // "https://www.googleapis.com/auth/logging.admin" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *OrganizationsLogsListCall) Pages(ctx context.Context, f func(*ListLogsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "logging.organizations.sinks.create": - -type OrganizationsSinksCreateCall struct { - s *Service - parent string - logsink *LogSink - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} +// method id "logging.organizations.sinks.get": -// Create: Creates a sink that exports specified log entries to a -// destination. The export of newly-ingested log entries begins -// immediately, unless the sink's writer_identity is not permitted to -// write to the destination. A sink can export log entries only from the -// resource owning the sink. -func (r *OrganizationsSinksService) Create(parent string, logsink *LogSink) *OrganizationsSinksCreateCall { - c := &OrganizationsSinksCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.logsink = logsink - return c +type OrganizationsSinksGetCall struct { + s *Service + sinkName string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// UniqueWriterIdentity sets the optional parameter -// "uniqueWriterIdentity": Determines the kind of IAM identity returned -// as writer_identity in the new sink. If this value is omitted or set -// to false, and if the sink's parent is a project, then the value -// returned as writer_identity is the same group or service account used -// by Logging before the addition of writer identities to this API. The -// sink's destination must be in the same project as the sink itself.If -// this field is set to true, or if the sink is owned by a non-project -// resource such as an organization, then the value of writer_identity -// will be a unique service account used only for exports from the new -// sink. For more information, see writer_identity in LogSink. -func (c *OrganizationsSinksCreateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *OrganizationsSinksCreateCall { - c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) +// Get: Gets a sink. +func (r *OrganizationsSinksService) Get(sinkName string) *OrganizationsSinksGetCall { + c := &OrganizationsSinksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sinkName = sinkName return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationsSinksCreateCall) Fields(s ...googleapi.Field) *OrganizationsSinksCreateCall { +func (c *OrganizationsSinksGetCall) Fields(s ...googleapi.Field) *OrganizationsSinksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OrganizationsSinksGetCall) IfNoneMatch(entityTag string) *OrganizationsSinksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationsSinksCreateCall) Context(ctx context.Context) *OrganizationsSinksCreateCall { +func (c *OrganizationsSinksGetCall) Context(ctx context.Context) *OrganizationsSinksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationsSinksCreateCall) Header() http.Header { +func (c *OrganizationsSinksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsSinksCreateCall) doRequest(alt string) (*http.Response, error) { +func (c *OrganizationsSinksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/sinks") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, + "sinkName": c.sinkName, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.organizations.sinks.create" call. +// Do executes the "logging.organizations.sinks.get" call. // Exactly one of *LogSink or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *LogSink.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *OrganizationsSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { +func (c *OrganizationsSinksGetCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -12321,116 +14304,142 @@ func (c *OrganizationsSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSin } return ret, nil // { - // "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.", - // "flatPath": "v2/organizations/{organizationsId}/sinks", - // "httpMethod": "POST", - // "id": "logging.organizations.sinks.create", + // "description": "Gets a sink.", + // "flatPath": "v2/organizations/{organizationsId}/sinks/{sinksId}", + // "httpMethod": "GET", + // "id": "logging.organizations.sinks.get", // "parameterOrder": [ - // "parent" + // "sinkName" // ], // "parameters": { - // "parent": { - // "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + // "sinkName": { + // "description": "Required. The resource name of the sink: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", - // "pattern": "^organizations/[^/]+$", - // "required": true, - // "type": "string" - // }, - // "uniqueWriterIdentity": { - // "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", - // "location": "query", - // "type": "boolean" + // "pattern": "^organizations/[^/]+/sinks/[^/]+$", + // "required": true, + // "type": "string" // } // }, - // "path": "v2/{+parent}/sinks", - // "request": { - // "$ref": "LogSink" - // }, + // "path": "v2/{+sinkName}", // "response": { // "$ref": "LogSink" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/logging.admin" + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/logging.admin", + // "https://www.googleapis.com/auth/logging.read" // ] // } } -// method id "logging.organizations.sinks.delete": +// method id "logging.organizations.sinks.list": -type OrganizationsSinksDeleteCall struct { - s *Service - sinkNameid string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type OrganizationsSinksListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes a sink. If the sink has a unique writer_identity, -// then that service account is also deleted. -func (r *OrganizationsSinksService) Delete(sinkNameid string) *OrganizationsSinksDeleteCall { - c := &OrganizationsSinksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.sinkNameid = sinkNameid +// List: Lists sinks. +func (r *OrganizationsSinksService) List(parent string) *OrganizationsSinksListCall { + c := &OrganizationsSinksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of results to return from this request. Non-positive values are +// ignored. The presence of nextPageToken in the response indicates that +// more results might be available. +func (c *OrganizationsSinksListCall) PageSize(pageSize int64) *OrganizationsSinksListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If present, then +// retrieve the next batch of results from the preceding call to this +// method. pageToken must be the value of nextPageToken from the +// previous response. The values of other method parameters should be +// identical to those in the previous call. +func (c *OrganizationsSinksListCall) PageToken(pageToken string) *OrganizationsSinksListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationsSinksDeleteCall) Fields(s ...googleapi.Field) *OrganizationsSinksDeleteCall { +func (c *OrganizationsSinksListCall) Fields(s ...googleapi.Field) *OrganizationsSinksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OrganizationsSinksListCall) IfNoneMatch(entityTag string) *OrganizationsSinksListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationsSinksDeleteCall) Context(ctx context.Context) *OrganizationsSinksDeleteCall { +func (c *OrganizationsSinksListCall) Context(ctx context.Context) *OrganizationsSinksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationsSinksDeleteCall) Header() http.Header { +func (c *OrganizationsSinksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsSinksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *OrganizationsSinksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/sinks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "sinkName": c.sinkNameid, + "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.organizations.sinks.delete" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *OrganizationsSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { +// Do executes the "logging.organizations.sinks.list" call. +// Exactly one of *ListSinksResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListSinksResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *OrganizationsSinksListCall) Do(opts ...googleapi.CallOption) (*ListSinksResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -12449,7 +14458,7 @@ func (c *OrganizationsSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Empty{ + ret := &ListSinksResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -12461,121 +14470,181 @@ func (c *OrganizationsSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, } return ret, nil // { - // "description": "Deletes a sink. If the sink has a unique writer_identity, then that service account is also deleted.", - // "flatPath": "v2/organizations/{organizationsId}/sinks/{sinksId}", - // "httpMethod": "DELETE", - // "id": "logging.organizations.sinks.delete", + // "description": "Lists sinks.", + // "flatPath": "v2/organizations/{organizationsId}/sinks", + // "httpMethod": "GET", + // "id": "logging.organizations.sinks.list", // "parameterOrder": [ - // "sinkName" + // "parent" // ], // "parameters": { - // "sinkName": { - // "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "pageSize": { + // "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The parent resource whose sinks are to be listed: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", // "location": "path", - // "pattern": "^organizations/[^/]+/sinks/[^/]+$", + // "pattern": "^organizations/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v2/{+sinkName}", + // "path": "v2/{+parent}/sinks", // "response": { - // "$ref": "Empty" + // "$ref": "ListSinksResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/logging.admin" + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/logging.admin", + // "https://www.googleapis.com/auth/logging.read" // ] // } } -// method id "logging.organizations.sinks.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *OrganizationsSinksListCall) Pages(ctx context.Context, f func(*ListSinksResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type OrganizationsSinksGetCall struct { - s *Service - sinkName string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "logging.organizations.sinks.patch": + +type OrganizationsSinksPatchCall struct { + s *Service + sinkNameid string + logsink *LogSink + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Gets a sink. -func (r *OrganizationsSinksService) Get(sinkName string) *OrganizationsSinksGetCall { - c := &OrganizationsSinksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.sinkName = sinkName +// Patch: Updates a sink. This method replaces the following fields in +// the existing sink with values from the new sink: destination, and +// filter.The updated sink might also have a new writer_identity; see +// the unique_writer_identity field. +func (r *OrganizationsSinksService) Patch(sinkNameid string, logsink *LogSink) *OrganizationsSinksPatchCall { + c := &OrganizationsSinksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sinkNameid = sinkNameid + c.logsink = logsink + return c +} + +// UniqueWriterIdentity sets the optional parameter +// "uniqueWriterIdentity": See sinks.create for a description of this +// field. When updating a sink, the effect of this field on the value of +// writer_identity in the updated sink depends on both the old and new +// values of this field: If the old and new values of this field are +// both false or both true, then there is no change to the sink's +// writer_identity. If the old value is false and the new value is true, +// then writer_identity is changed to a unique service account. It is an +// error if the old value is true and the new value is set to false or +// defaulted to false. +func (c *OrganizationsSinksPatchCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *OrganizationsSinksPatchCall { + c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) + return c +} + +// UpdateMask sets the optional parameter "updateMask": Field mask that +// specifies the fields in sink that need an update. A sink field will +// be overwritten if, and only if, it is in the update mask. name and +// output only fields cannot be updated.An empty updateMask is +// temporarily treated as using the following mask for backwards +// compatibility purposes: destination,filter,includeChildren At some +// point in the future, behavior will be removed and specifying an empty +// updateMask will be an error.For a detailed FieldMask definition, see +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: +// updateMask=filter. +func (c *OrganizationsSinksPatchCall) UpdateMask(updateMask string) *OrganizationsSinksPatchCall { + c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationsSinksGetCall) Fields(s ...googleapi.Field) *OrganizationsSinksGetCall { +func (c *OrganizationsSinksPatchCall) Fields(s ...googleapi.Field) *OrganizationsSinksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *OrganizationsSinksGetCall) IfNoneMatch(entityTag string) *OrganizationsSinksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationsSinksGetCall) Context(ctx context.Context) *OrganizationsSinksGetCall { +func (c *OrganizationsSinksPatchCall) Context(ctx context.Context) *OrganizationsSinksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationsSinksGetCall) Header() http.Header { +func (c *OrganizationsSinksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsSinksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *OrganizationsSinksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "sinkName": c.sinkName, + "sinkName": c.sinkNameid, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.organizations.sinks.get" call. +// Do executes the "logging.organizations.sinks.patch" call. // Exactly one of *LogSink or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *LogSink.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *OrganizationsSinksGetCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { +func (c *OrganizationsSinksPatchCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -12606,142 +14675,161 @@ func (c *OrganizationsSinksGetCall) Do(opts ...googleapi.CallOption) (*LogSink, } return ret, nil // { - // "description": "Gets a sink.", + // "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", // "flatPath": "v2/organizations/{organizationsId}/sinks/{sinksId}", - // "httpMethod": "GET", - // "id": "logging.organizations.sinks.get", + // "httpMethod": "PATCH", + // "id": "logging.organizations.sinks.patch", // "parameterOrder": [ // "sinkName" // ], // "parameters": { // "sinkName": { - // "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", // "pattern": "^organizations/[^/]+/sinks/[^/]+$", // "required": true, // "type": "string" + // }, + // "uniqueWriterIdentity": { + // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", + // "location": "query", + // "type": "boolean" + // }, + // "updateMask": { + // "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" // } // }, // "path": "v2/{+sinkName}", + // "request": { + // "$ref": "LogSink" + // }, // "response": { // "$ref": "LogSink" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.read" + // "https://www.googleapis.com/auth/logging.admin" // ] // } } -// method id "logging.organizations.sinks.list": +// method id "logging.organizations.sinks.update": -type OrganizationsSinksListCall struct { - s *Service - parent string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type OrganizationsSinksUpdateCall struct { + s *Service + sinkNameid string + logsink *LogSink + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Lists sinks. -func (r *OrganizationsSinksService) List(parent string) *OrganizationsSinksListCall { - c := &OrganizationsSinksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent +// Update: Updates a sink. This method replaces the following fields in +// the existing sink with values from the new sink: destination, and +// filter.The updated sink might also have a new writer_identity; see +// the unique_writer_identity field. +func (r *OrganizationsSinksService) Update(sinkNameid string, logsink *LogSink) *OrganizationsSinksUpdateCall { + c := &OrganizationsSinksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sinkNameid = sinkNameid + c.logsink = logsink return c } -// PageSize sets the optional parameter "pageSize": The maximum number -// of results to return from this request. Non-positive values are -// ignored. The presence of nextPageToken in the response indicates that -// more results might be available. -func (c *OrganizationsSinksListCall) PageSize(pageSize int64) *OrganizationsSinksListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) +// UniqueWriterIdentity sets the optional parameter +// "uniqueWriterIdentity": See sinks.create for a description of this +// field. When updating a sink, the effect of this field on the value of +// writer_identity in the updated sink depends on both the old and new +// values of this field: If the old and new values of this field are +// both false or both true, then there is no change to the sink's +// writer_identity. If the old value is false and the new value is true, +// then writer_identity is changed to a unique service account. It is an +// error if the old value is true and the new value is set to false or +// defaulted to false. +func (c *OrganizationsSinksUpdateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *OrganizationsSinksUpdateCall { + c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) return c } -// PageToken sets the optional parameter "pageToken": If present, then -// retrieve the next batch of results from the preceding call to this -// method. pageToken must be the value of nextPageToken from the -// previous response. The values of other method parameters should be -// identical to those in the previous call. -func (c *OrganizationsSinksListCall) PageToken(pageToken string) *OrganizationsSinksListCall { - c.urlParams_.Set("pageToken", pageToken) +// UpdateMask sets the optional parameter "updateMask": Field mask that +// specifies the fields in sink that need an update. A sink field will +// be overwritten if, and only if, it is in the update mask. name and +// output only fields cannot be updated.An empty updateMask is +// temporarily treated as using the following mask for backwards +// compatibility purposes: destination,filter,includeChildren At some +// point in the future, behavior will be removed and specifying an empty +// updateMask will be an error.For a detailed FieldMask definition, see +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: +// updateMask=filter. +func (c *OrganizationsSinksUpdateCall) UpdateMask(updateMask string) *OrganizationsSinksUpdateCall { + c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationsSinksListCall) Fields(s ...googleapi.Field) *OrganizationsSinksListCall { +func (c *OrganizationsSinksUpdateCall) Fields(s ...googleapi.Field) *OrganizationsSinksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *OrganizationsSinksListCall) IfNoneMatch(entityTag string) *OrganizationsSinksListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationsSinksListCall) Context(ctx context.Context) *OrganizationsSinksListCall { +func (c *OrganizationsSinksUpdateCall) Context(ctx context.Context) *OrganizationsSinksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationsSinksListCall) Header() http.Header { +func (c *OrganizationsSinksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsSinksListCall) doRequest(alt string) (*http.Response, error) { +func (c *OrganizationsSinksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/sinks") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, + "sinkName": c.sinkNameid, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.organizations.sinks.list" call. -// Exactly one of *ListSinksResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListSinksResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *OrganizationsSinksListCall) Do(opts ...googleapi.CallOption) (*ListSinksResponse, error) { +// Do executes the "logging.organizations.sinks.update" call. +// Exactly one of *LogSink or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *LogSink.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *OrganizationsSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -12760,7 +14848,7 @@ func (c *OrganizationsSinksListCall) Do(opts ...googleapi.CallOption) (*ListSink if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ListSinksResponse{ + ret := &LogSink{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -12772,125 +14860,73 @@ func (c *OrganizationsSinksListCall) Do(opts ...googleapi.CallOption) (*ListSink } return ret, nil // { - // "description": "Lists sinks.", - // "flatPath": "v2/organizations/{organizationsId}/sinks", - // "httpMethod": "GET", - // "id": "logging.organizations.sinks.list", + // "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", + // "flatPath": "v2/organizations/{organizationsId}/sinks/{sinksId}", + // "httpMethod": "PUT", + // "id": "logging.organizations.sinks.update", // "parameterOrder": [ - // "parent" + // "sinkName" // ], // "parameters": { - // "pageSize": { - // "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - // "format": "int32", - // "location": "query", - // "type": "integer" + // "sinkName": { + // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", + // "location": "path", + // "pattern": "^organizations/[^/]+/sinks/[^/]+$", + // "required": true, + // "type": "string" // }, - // "pageToken": { - // "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + // "uniqueWriterIdentity": { + // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", // "location": "query", - // "type": "string" + // "type": "boolean" // }, - // "parent": { - // "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", - // "location": "path", - // "pattern": "^organizations/[^/]+$", - // "required": true, + // "updateMask": { + // "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", + // "format": "google-fieldmask", + // "location": "query", // "type": "string" // } // }, - // "path": "v2/{+parent}/sinks", + // "path": "v2/{+sinkName}", + // "request": { + // "$ref": "LogSink" + // }, // "response": { - // "$ref": "ListSinksResponse" + // "$ref": "LogSink" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.read" + // "https://www.googleapis.com/auth/logging.admin" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *OrganizationsSinksListCall) Pages(ctx context.Context, f func(*ListSinksResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "logging.organizations.sinks.patch": - -type OrganizationsSinksPatchCall struct { - s *Service - sinkNameid string - logsink *LogSink - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Updates a sink. This method replaces the following fields in -// the existing sink with values from the new sink: destination, and -// filter.The updated sink might also have a new writer_identity; see -// the unique_writer_identity field. -func (r *OrganizationsSinksService) Patch(sinkNameid string, logsink *LogSink) *OrganizationsSinksPatchCall { - c := &OrganizationsSinksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.sinkNameid = sinkNameid - c.logsink = logsink - return c -} +// method id "logging.projects.exclusions.create": -// UniqueWriterIdentity sets the optional parameter -// "uniqueWriterIdentity": See sinks.create for a description of this -// field. When updating a sink, the effect of this field on the value of -// writer_identity in the updated sink depends on both the old and new -// values of this field: -// If the old and new values of this field are both false or both true, -// then there is no change to the sink's writer_identity. -// If the old value is false and the new value is true, then -// writer_identity is changed to a unique service account. -// It is an error if the old value is true and the new value is set to -// false or defaulted to false. -func (c *OrganizationsSinksPatchCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *OrganizationsSinksPatchCall { - c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) - return c +type ProjectsExclusionsCreateCall struct { + s *Service + parent string + logexclusion *LogExclusion + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// UpdateMask sets the optional parameter "updateMask": Field mask that -// specifies the fields in sink that need an update. A sink field will -// be overwritten if, and only if, it is in the update mask. name and -// output only fields cannot be updated.An empty updateMask is -// temporarily treated as using the following mask for backwards -// compatibility purposes: destination,filter,includeChildren At some -// point in the future, behavior will be removed and specifying an empty -// updateMask will be an error.For a detailed FieldMask definition, see -// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: -// updateMask=filter. -func (c *OrganizationsSinksPatchCall) UpdateMask(updateMask string) *OrganizationsSinksPatchCall { - c.urlParams_.Set("updateMask", updateMask) +// Create: Creates a new exclusion in a specified parent resource. Only +// log entries belonging to that resource can be excluded. You can have +// up to 10 exclusions in a resource. +func (r *ProjectsExclusionsService) Create(parent string, logexclusion *LogExclusion) *ProjectsExclusionsCreateCall { + c := &ProjectsExclusionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.logexclusion = logexclusion return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationsSinksPatchCall) Fields(s ...googleapi.Field) *OrganizationsSinksPatchCall { +func (c *ProjectsExclusionsCreateCall) Fields(s ...googleapi.Field) *ProjectsExclusionsCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -12898,56 +14934,56 @@ func (c *OrganizationsSinksPatchCall) Fields(s ...googleapi.Field) *Organization // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationsSinksPatchCall) Context(ctx context.Context) *OrganizationsSinksPatchCall { +func (c *ProjectsExclusionsCreateCall) Context(ctx context.Context) *ProjectsExclusionsCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationsSinksPatchCall) Header() http.Header { +func (c *ProjectsExclusionsCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsSinksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsExclusionsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logexclusion) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/exclusions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "sinkName": c.sinkNameid, + "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "logging.organizations.sinks.patch" call. -// Exactly one of *LogSink or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *LogSink.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *OrganizationsSinksPatchCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { +} + +// Do executes the "logging.projects.exclusions.create" call. +// Exactly one of *LogExclusion or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *LogExclusion.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsExclusionsCreateCall) Do(opts ...googleapi.CallOption) (*LogExclusion, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -12966,7 +15002,7 @@ func (c *OrganizationsSinksPatchCall) Do(opts ...googleapi.CallOption) (*LogSink if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LogSink{ + ret := &LogExclusion{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -12978,39 +15014,28 @@ func (c *OrganizationsSinksPatchCall) Do(opts ...googleapi.CallOption) (*LogSink } return ret, nil // { - // "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", - // "flatPath": "v2/organizations/{organizationsId}/sinks/{sinksId}", - // "httpMethod": "PATCH", - // "id": "logging.organizations.sinks.patch", + // "description": "Creates a new exclusion in a specified parent resource. Only log entries belonging to that resource can be excluded. You can have up to 10 exclusions in a resource.", + // "flatPath": "v2/projects/{projectsId}/exclusions", + // "httpMethod": "POST", + // "id": "logging.projects.exclusions.create", // "parameterOrder": [ - // "sinkName" + // "parent" // ], // "parameters": { - // "sinkName": { - // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "parent": { + // "description": "Required. The parent resource in which to create the exclusion: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" Examples: \"projects/my-logging-project\", \"organizations/123456789\".", // "location": "path", - // "pattern": "^organizations/[^/]+/sinks/[^/]+$", + // "pattern": "^projects/[^/]+$", // "required": true, // "type": "string" - // }, - // "uniqueWriterIdentity": { - // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is set to false or defaulted to false.", - // "location": "query", - // "type": "boolean" - // }, - // "updateMask": { - // "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", - // "format": "google-fieldmask", - // "location": "query", - // "type": "string" // } // }, - // "path": "v2/{+sinkName}", + // "path": "v2/{+parent}/exclusions", // "request": { - // "$ref": "LogSink" + // "$ref": "LogExclusion" // }, // "response": { - // "$ref": "LogSink" + // "$ref": "LogExclusion" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -13020,63 +15045,27 @@ func (c *OrganizationsSinksPatchCall) Do(opts ...googleapi.CallOption) (*LogSink } -// method id "logging.organizations.sinks.update": +// method id "logging.projects.exclusions.delete": -type OrganizationsSinksUpdateCall struct { +type ProjectsExclusionsDeleteCall struct { s *Service - sinkNameid string - logsink *LogSink + name string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Update: Updates a sink. This method replaces the following fields in -// the existing sink with values from the new sink: destination, and -// filter.The updated sink might also have a new writer_identity; see -// the unique_writer_identity field. -func (r *OrganizationsSinksService) Update(sinkNameid string, logsink *LogSink) *OrganizationsSinksUpdateCall { - c := &OrganizationsSinksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.sinkNameid = sinkNameid - c.logsink = logsink - return c -} - -// UniqueWriterIdentity sets the optional parameter -// "uniqueWriterIdentity": See sinks.create for a description of this -// field. When updating a sink, the effect of this field on the value of -// writer_identity in the updated sink depends on both the old and new -// values of this field: -// If the old and new values of this field are both false or both true, -// then there is no change to the sink's writer_identity. -// If the old value is false and the new value is true, then -// writer_identity is changed to a unique service account. -// It is an error if the old value is true and the new value is set to -// false or defaulted to false. -func (c *OrganizationsSinksUpdateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *OrganizationsSinksUpdateCall { - c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) - return c -} - -// UpdateMask sets the optional parameter "updateMask": Field mask that -// specifies the fields in sink that need an update. A sink field will -// be overwritten if, and only if, it is in the update mask. name and -// output only fields cannot be updated.An empty updateMask is -// temporarily treated as using the following mask for backwards -// compatibility purposes: destination,filter,includeChildren At some -// point in the future, behavior will be removed and specifying an empty -// updateMask will be an error.For a detailed FieldMask definition, see -// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: -// updateMask=filter. -func (c *OrganizationsSinksUpdateCall) UpdateMask(updateMask string) *OrganizationsSinksUpdateCall { - c.urlParams_.Set("updateMask", updateMask) +// Delete: Deletes an exclusion. +func (r *ProjectsExclusionsService) Delete(name string) *ProjectsExclusionsDeleteCall { + c := &ProjectsExclusionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationsSinksUpdateCall) Fields(s ...googleapi.Field) *OrganizationsSinksUpdateCall { +func (c *ProjectsExclusionsDeleteCall) Fields(s ...googleapi.Field) *ProjectsExclusionsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -13084,56 +15073,51 @@ func (c *OrganizationsSinksUpdateCall) Fields(s ...googleapi.Field) *Organizatio // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationsSinksUpdateCall) Context(ctx context.Context) *OrganizationsSinksUpdateCall { +func (c *ProjectsExclusionsDeleteCall) Context(ctx context.Context) *ProjectsExclusionsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationsSinksUpdateCall) Header() http.Header { +func (c *ProjectsExclusionsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsSinksUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsExclusionsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "sinkName": c.sinkNameid, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.organizations.sinks.update" call. -// Exactly one of *LogSink or error will be non-nil. Any non-2xx status +// Do executes the "logging.projects.exclusions.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *LogSink.ServerResponse.Header or (if a response was returned at all) +// *Empty.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *OrganizationsSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { +func (c *ProjectsExclusionsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -13152,7 +15136,7 @@ func (c *OrganizationsSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSin if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LogSink{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -13164,39 +15148,25 @@ func (c *OrganizationsSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSin } return ret, nil // { - // "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", - // "flatPath": "v2/organizations/{organizationsId}/sinks/{sinksId}", - // "httpMethod": "PUT", - // "id": "logging.organizations.sinks.update", + // "description": "Deletes an exclusion.", + // "flatPath": "v2/projects/{projectsId}/exclusions/{exclusionsId}", + // "httpMethod": "DELETE", + // "id": "logging.projects.exclusions.delete", // "parameterOrder": [ - // "sinkName" + // "name" // ], // "parameters": { - // "sinkName": { - // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "name": { + // "description": "Required. The resource name of an existing exclusion to delete: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", // "location": "path", - // "pattern": "^organizations/[^/]+/sinks/[^/]+$", + // "pattern": "^projects/[^/]+/exclusions/[^/]+$", // "required": true, // "type": "string" - // }, - // "uniqueWriterIdentity": { - // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is set to false or defaulted to false.", - // "location": "query", - // "type": "boolean" - // }, - // "updateMask": { - // "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", - // "format": "google-fieldmask", - // "location": "query", - // "type": "string" // } // }, - // "path": "v2/{+sinkName}", - // "request": { - // "$ref": "LogSink" - // }, + // "path": "v2/{+name}", // "response": { - // "$ref": "LogSink" + // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -13206,88 +15176,93 @@ func (c *OrganizationsSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSin } -// method id "logging.projects.exclusions.create": +// method id "logging.projects.exclusions.get": -type ProjectsExclusionsCreateCall struct { +type ProjectsExclusionsGetCall struct { s *Service - parent string - logexclusion *LogExclusion + name string urlParams_ gensupport.URLParams + ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Create: Creates a new exclusion in a specified parent resource. Only -// log entries belonging to that resource can be excluded. You can have -// up to 10 exclusions in a resource. -func (r *ProjectsExclusionsService) Create(parent string, logexclusion *LogExclusion) *ProjectsExclusionsCreateCall { - c := &ProjectsExclusionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.logexclusion = logexclusion +// Get: Gets the description of an exclusion. +func (r *ProjectsExclusionsService) Get(name string) *ProjectsExclusionsGetCall { + c := &ProjectsExclusionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsExclusionsCreateCall) Fields(s ...googleapi.Field) *ProjectsExclusionsCreateCall { +func (c *ProjectsExclusionsGetCall) Fields(s ...googleapi.Field) *ProjectsExclusionsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsExclusionsGetCall) IfNoneMatch(entityTag string) *ProjectsExclusionsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsExclusionsCreateCall) Context(ctx context.Context) *ProjectsExclusionsCreateCall { +func (c *ProjectsExclusionsGetCall) Context(ctx context.Context) *ProjectsExclusionsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsExclusionsCreateCall) Header() http.Header { +func (c *ProjectsExclusionsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsExclusionsCreateCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsExclusionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.logexclusion) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/exclusions") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.projects.exclusions.create" call. +// Do executes the "logging.projects.exclusions.get" call. // Exactly one of *LogExclusion or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *LogExclusion.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsExclusionsCreateCall) Do(opts ...googleapi.CallOption) (*LogExclusion, error) { +func (c *ProjectsExclusionsGetCall) Do(opts ...googleapi.CallOption) (*LogExclusion, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -13318,110 +15293,142 @@ func (c *ProjectsExclusionsCreateCall) Do(opts ...googleapi.CallOption) (*LogExc } return ret, nil // { - // "description": "Creates a new exclusion in a specified parent resource. Only log entries belonging to that resource can be excluded. You can have up to 10 exclusions in a resource.", - // "flatPath": "v2/projects/{projectsId}/exclusions", - // "httpMethod": "POST", - // "id": "logging.projects.exclusions.create", + // "description": "Gets the description of an exclusion.", + // "flatPath": "v2/projects/{projectsId}/exclusions/{exclusionsId}", + // "httpMethod": "GET", + // "id": "logging.projects.exclusions.get", // "parameterOrder": [ - // "parent" + // "name" // ], // "parameters": { - // "parent": { - // "description": "Required. The parent resource in which to create the exclusion:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + // "name": { + // "description": "Required. The resource name of an existing exclusion: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", // "location": "path", - // "pattern": "^projects/[^/]+$", + // "pattern": "^projects/[^/]+/exclusions/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v2/{+parent}/exclusions", - // "request": { - // "$ref": "LogExclusion" - // }, + // "path": "v2/{+name}", // "response": { // "$ref": "LogExclusion" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/logging.admin" + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/logging.admin", + // "https://www.googleapis.com/auth/logging.read" // ] // } } -// method id "logging.projects.exclusions.delete": +// method id "logging.projects.exclusions.list": -type ProjectsExclusionsDeleteCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsExclusionsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes an exclusion. -func (r *ProjectsExclusionsService) Delete(name string) *ProjectsExclusionsDeleteCall { - c := &ProjectsExclusionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name +// List: Lists all the exclusions in a parent resource. +func (r *ProjectsExclusionsService) List(parent string) *ProjectsExclusionsListCall { + c := &ProjectsExclusionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of results to return from this request. Non-positive values are +// ignored. The presence of nextPageToken in the response indicates that +// more results might be available. +func (c *ProjectsExclusionsListCall) PageSize(pageSize int64) *ProjectsExclusionsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If present, then +// retrieve the next batch of results from the preceding call to this +// method. pageToken must be the value of nextPageToken from the +// previous response. The values of other method parameters should be +// identical to those in the previous call. +func (c *ProjectsExclusionsListCall) PageToken(pageToken string) *ProjectsExclusionsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsExclusionsDeleteCall) Fields(s ...googleapi.Field) *ProjectsExclusionsDeleteCall { +func (c *ProjectsExclusionsListCall) Fields(s ...googleapi.Field) *ProjectsExclusionsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsExclusionsListCall) IfNoneMatch(entityTag string) *ProjectsExclusionsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsExclusionsDeleteCall) Context(ctx context.Context) *ProjectsExclusionsDeleteCall { +func (c *ProjectsExclusionsListCall) Context(ctx context.Context) *ProjectsExclusionsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsExclusionsDeleteCall) Header() http.Header { +func (c *ProjectsExclusionsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsExclusionsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsExclusionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/exclusions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "name": c.name, + "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.projects.exclusions.delete" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsExclusionsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { +// Do executes the "logging.projects.exclusions.list" call. +// Exactly one of *ListExclusionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListExclusionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsExclusionsListCall) Do(opts ...googleapi.CallOption) (*ListExclusionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -13440,7 +15447,7 @@ func (c *ProjectsExclusionsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Empty{ + ret := &ListExclusionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -13452,103 +15459,142 @@ func (c *ProjectsExclusionsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, } return ret, nil // { - // "description": "Deletes an exclusion.", - // "flatPath": "v2/projects/{projectsId}/exclusions/{exclusionsId}", - // "httpMethod": "DELETE", - // "id": "logging.projects.exclusions.delete", + // "description": "Lists all the exclusions in a parent resource.", + // "flatPath": "v2/projects/{projectsId}/exclusions", + // "httpMethod": "GET", + // "id": "logging.projects.exclusions.list", // "parameterOrder": [ - // "name" + // "parent" // ], // "parameters": { - // "name": { - // "description": "Required. The resource name of an existing exclusion to delete:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + // "pageSize": { + // "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The parent resource whose exclusions are to be listed. \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", // "location": "path", - // "pattern": "^projects/[^/]+/exclusions/[^/]+$", + // "pattern": "^projects/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v2/{+name}", + // "path": "v2/{+parent}/exclusions", // "response": { - // "$ref": "Empty" + // "$ref": "ListExclusionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/logging.admin" + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/logging.admin", + // "https://www.googleapis.com/auth/logging.read" // ] // } } -// method id "logging.projects.exclusions.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsExclusionsListCall) Pages(ctx context.Context, f func(*ListExclusionsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ProjectsExclusionsGetCall struct { +// method id "logging.projects.exclusions.patch": + +type ProjectsExclusionsPatchCall struct { s *Service name string + logexclusion *LogExclusion urlParams_ gensupport.URLParams - ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Gets the description of an exclusion. -func (r *ProjectsExclusionsService) Get(name string) *ProjectsExclusionsGetCall { - c := &ProjectsExclusionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Changes one or more properties of an existing exclusion. +func (r *ProjectsExclusionsService) Patch(name string, logexclusion *LogExclusion) *ProjectsExclusionsPatchCall { + c := &ProjectsExclusionsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name + c.logexclusion = logexclusion + return c +} + +// UpdateMask sets the optional parameter "updateMask": Required. A +// non-empty list of fields to change in the existing exclusion. New +// values for the fields are taken from the corresponding fields in the +// LogExclusion included in this request. Fields not mentioned in +// update_mask are not changed and are ignored in the request.For +// example, to change the filter and description of an exclusion, +// specify an update_mask of "filter,description". +func (c *ProjectsExclusionsPatchCall) UpdateMask(updateMask string) *ProjectsExclusionsPatchCall { + c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsExclusionsGetCall) Fields(s ...googleapi.Field) *ProjectsExclusionsGetCall { +func (c *ProjectsExclusionsPatchCall) Fields(s ...googleapi.Field) *ProjectsExclusionsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsExclusionsGetCall) IfNoneMatch(entityTag string) *ProjectsExclusionsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsExclusionsGetCall) Context(ctx context.Context) *ProjectsExclusionsGetCall { +func (c *ProjectsExclusionsPatchCall) Context(ctx context.Context) *ProjectsExclusionsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsExclusionsGetCall) Header() http.Header { +func (c *ProjectsExclusionsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsExclusionsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsExclusionsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logexclusion) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } @@ -13559,14 +15605,14 @@ func (c *ProjectsExclusionsGetCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.projects.exclusions.get" call. +// Do executes the "logging.projects.exclusions.patch" call. // Exactly one of *LogExclusion or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *LogExclusion.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsExclusionsGetCall) Do(opts ...googleapi.CallOption) (*LogExclusion, error) { +func (c *ProjectsExclusionsPatchCall) Do(opts ...googleapi.CallOption) (*LogExclusion, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -13597,142 +15643,133 @@ func (c *ProjectsExclusionsGetCall) Do(opts ...googleapi.CallOption) (*LogExclus } return ret, nil // { - // "description": "Gets the description of an exclusion.", + // "description": "Changes one or more properties of an existing exclusion.", // "flatPath": "v2/projects/{projectsId}/exclusions/{exclusionsId}", - // "httpMethod": "GET", - // "id": "logging.projects.exclusions.get", + // "httpMethod": "PATCH", + // "id": "logging.projects.exclusions.patch", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The resource name of an existing exclusion:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + // "description": "Required. The resource name of the exclusion to update: \"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\" \"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\" \"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\" Example: \"projects/my-project-id/exclusions/my-exclusion-id\".", // "location": "path", // "pattern": "^projects/[^/]+/exclusions/[^/]+$", // "required": true, // "type": "string" + // }, + // "updateMask": { + // "description": "Required. A non-empty list of fields to change in the existing exclusion. New values for the fields are taken from the corresponding fields in the LogExclusion included in this request. Fields not mentioned in update_mask are not changed and are ignored in the request.For example, to change the filter and description of an exclusion, specify an update_mask of \"filter,description\".", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" // } // }, // "path": "v2/{+name}", + // "request": { + // "$ref": "LogExclusion" + // }, // "response": { // "$ref": "LogExclusion" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.read" + // "https://www.googleapis.com/auth/logging.admin" // ] // } } -// method id "logging.projects.exclusions.list": +// method id "logging.projects.locations.buckets.create": -type ProjectsExclusionsListCall struct { - s *Service - parent string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ProjectsLocationsBucketsCreateCall struct { + s *Service + parent string + logbucket *LogBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Lists all the exclusions in a parent resource. -func (r *ProjectsExclusionsService) List(parent string) *ProjectsExclusionsListCall { - c := &ProjectsExclusionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Create: Creates a bucket that can be used to store log entries. Once +// a bucket has been created, the region cannot be changed. +func (r *ProjectsLocationsBucketsService) Create(parent string, logbucket *LogBucket) *ProjectsLocationsBucketsCreateCall { + c := &ProjectsLocationsBucketsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent + c.logbucket = logbucket return c } -// PageSize sets the optional parameter "pageSize": The maximum number -// of results to return from this request. Non-positive values are -// ignored. The presence of nextPageToken in the response indicates that -// more results might be available. -func (c *ProjectsExclusionsListCall) PageSize(pageSize int64) *ProjectsExclusionsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": If present, then -// retrieve the next batch of results from the preceding call to this -// method. pageToken must be the value of nextPageToken from the -// previous response. The values of other method parameters should be -// identical to those in the previous call. -func (c *ProjectsExclusionsListCall) PageToken(pageToken string) *ProjectsExclusionsListCall { - c.urlParams_.Set("pageToken", pageToken) +// BucketId sets the optional parameter "bucketId": Required. A +// client-assigned identifier such as "my-bucket". Identifiers are +// limited to 100 characters and can include only letters, digits, +// underscores, hyphens, and periods. +func (c *ProjectsLocationsBucketsCreateCall) BucketId(bucketId string) *ProjectsLocationsBucketsCreateCall { + c.urlParams_.Set("bucketId", bucketId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsExclusionsListCall) Fields(s ...googleapi.Field) *ProjectsExclusionsListCall { +func (c *ProjectsLocationsBucketsCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsBucketsCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsExclusionsListCall) IfNoneMatch(entityTag string) *ProjectsExclusionsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsExclusionsListCall) Context(ctx context.Context) *ProjectsExclusionsListCall { +func (c *ProjectsLocationsBucketsCreateCall) Context(ctx context.Context) *ProjectsLocationsBucketsCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsExclusionsListCall) Header() http.Header { +func (c *ProjectsLocationsBucketsCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsExclusionsListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsBucketsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.logbucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/exclusions") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/buckets") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "logging.projects.exclusions.list" call. -// Exactly one of *ListExclusionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListExclusionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsExclusionsListCall) Do(opts ...googleapi.CallOption) (*ListExclusionsResponse, error) { + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.projects.locations.buckets.create" call. +// Exactly one of *LogBucket or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *LogBucket.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsBucketsCreateCall) Do(opts ...googleapi.CallOption) (*LogBucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -13751,7 +15788,7 @@ func (c *ProjectsExclusionsListCall) Do(opts ...googleapi.CallOption) (*ListExcl if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ListExclusionsResponse{ + ret := &LogBucket{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -13763,103 +15800,65 @@ func (c *ProjectsExclusionsListCall) Do(opts ...googleapi.CallOption) (*ListExcl } return ret, nil // { - // "description": "Lists all the exclusions in a parent resource.", - // "flatPath": "v2/projects/{projectsId}/exclusions", - // "httpMethod": "GET", - // "id": "logging.projects.exclusions.list", + // "description": "Creates a bucket that can be used to store log entries. Once a bucket has been created, the region cannot be changed.", + // "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/buckets", + // "httpMethod": "POST", + // "id": "logging.projects.locations.buckets.create", // "parameterOrder": [ // "parent" // ], // "parameters": { - // "pageSize": { - // "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + // "bucketId": { + // "description": "Required. A client-assigned identifier such as \"my-bucket\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The parent resource whose exclusions are to be listed.\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + // "description": "Required. The resource in which to create the bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" Example: \"projects/my-logging-project/locations/global\"", // "location": "path", - // "pattern": "^projects/[^/]+$", + // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v2/{+parent}/exclusions", + // "path": "v2/{+parent}/buckets", + // "request": { + // "$ref": "LogBucket" + // }, // "response": { - // "$ref": "ListExclusionsResponse" + // "$ref": "LogBucket" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.read" + // "https://www.googleapis.com/auth/logging.admin" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsExclusionsListCall) Pages(ctx context.Context, f func(*ListExclusionsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "logging.projects.exclusions.patch": +// method id "logging.projects.locations.buckets.delete": -type ProjectsExclusionsPatchCall struct { - s *Service - name string - logexclusion *LogExclusion - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsBucketsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Changes one or more properties of an existing exclusion. -func (r *ProjectsExclusionsService) Patch(name string, logexclusion *LogExclusion) *ProjectsExclusionsPatchCall { - c := &ProjectsExclusionsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes a bucket. Moves the bucket to the DELETE_REQUESTED +// state. After 7 days, the bucket will be purged and all logs in the +// bucket will be permanently deleted. +func (r *ProjectsLocationsBucketsService) Delete(name string) *ProjectsLocationsBucketsDeleteCall { + c := &ProjectsLocationsBucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name - c.logexclusion = logexclusion - return c -} - -// UpdateMask sets the optional parameter "updateMask": Required. A -// non-empty list of fields to change in the existing exclusion. New -// values for the fields are taken from the corresponding fields in the -// LogExclusion included in this request. Fields not mentioned in -// update_mask are not changed and are ignored in the request.For -// example, to change the filter and description of an exclusion, -// specify an update_mask of "filter,description". -func (c *ProjectsExclusionsPatchCall) UpdateMask(updateMask string) *ProjectsExclusionsPatchCall { - c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsExclusionsPatchCall) Fields(s ...googleapi.Field) *ProjectsExclusionsPatchCall { +func (c *ProjectsLocationsBucketsDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsBucketsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -13867,38 +15866,33 @@ func (c *ProjectsExclusionsPatchCall) Fields(s ...googleapi.Field) *ProjectsExcl // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsExclusionsPatchCall) Context(ctx context.Context) *ProjectsExclusionsPatchCall { +func (c *ProjectsLocationsBucketsDeleteCall) Context(ctx context.Context) *ProjectsLocationsBucketsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsExclusionsPatchCall) Header() http.Header { +func (c *ProjectsLocationsBucketsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsExclusionsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsBucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.logexclusion) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } @@ -13909,14 +15903,14 @@ func (c *ProjectsExclusionsPatchCall) doRequest(alt string) (*http.Response, err return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "logging.projects.exclusions.patch" call. -// Exactly one of *LogExclusion or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *LogExclusion.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsExclusionsPatchCall) Do(opts ...googleapi.CallOption) (*LogExclusion, error) { +// Do executes the "logging.projects.locations.buckets.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsLocationsBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -13935,7 +15929,7 @@ func (c *ProjectsExclusionsPatchCall) Do(opts ...googleapi.CallOption) (*LogExcl if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LogExclusion{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -13947,34 +15941,25 @@ func (c *ProjectsExclusionsPatchCall) Do(opts ...googleapi.CallOption) (*LogExcl } return ret, nil // { - // "description": "Changes one or more properties of an existing exclusion.", - // "flatPath": "v2/projects/{projectsId}/exclusions/{exclusionsId}", - // "httpMethod": "PATCH", - // "id": "logging.projects.exclusions.patch", + // "description": "Deletes a bucket. Moves the bucket to the DELETE_REQUESTED state. After 7 days, the bucket will be purged and all logs in the bucket will be permanently deleted.", + // "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}", + // "httpMethod": "DELETE", + // "id": "logging.projects.locations.buckets.delete", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the exclusion to update:\n\"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]\"\n\"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]\"\n\"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]\"\nExample: \"projects/my-project-id/exclusions/my-exclusion-id\".", + // "description": "Required. The full resource name of the bucket to delete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", // "location": "path", - // "pattern": "^projects/[^/]+/exclusions/[^/]+$", + // "pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+$", // "required": true, // "type": "string" - // }, - // "updateMask": { - // "description": "Required. A non-empty list of fields to change in the existing exclusion. New values for the fields are taken from the corresponding fields in the LogExclusion included in this request. Fields not mentioned in update_mask are not changed and are ignored in the request.For example, to change the filter and description of an exclusion, specify an update_mask of \"filter,description\".", - // "format": "google-fieldmask", - // "location": "query", - // "type": "string" // } // }, // "path": "v2/{+name}", - // "request": { - // "$ref": "LogExclusion" - // }, // "response": { - // "$ref": "LogExclusion" + // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -14039,7 +16024,7 @@ func (c *ProjectsLocationsBucketsGetCall) Header() http.Header { func (c *ProjectsLocationsBucketsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14110,7 +16095,7 @@ func (c *ProjectsLocationsBucketsGetCall) Do(opts ...googleapi.CallOption) (*Log // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the bucket:\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\nExample: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", + // "description": "Required. The resource name of the bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+$", // "required": true, @@ -14205,7 +16190,7 @@ func (c *ProjectsLocationsBucketsListCall) Header() http.Header { func (c *ProjectsLocationsBucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14287,7 +16272,7 @@ func (c *ProjectsLocationsBucketsListCall) Do(opts ...googleapi.CallOption) (*Li // "type": "string" // }, // "parent": { - // "description": "Required. The parent resource whose buckets are to be listed:\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]\"\nNote: The locations portion of the resource must be specified, but supplying the character - in place of LOCATION_ID will return all buckets.", + // "description": "Required. The parent resource whose buckets are to be listed: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]\" Note: The locations portion of the resource must be specified, but supplying the character - in place of LOCATION_ID will return all buckets.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -14393,7 +16378,7 @@ func (c *ProjectsLocationsBucketsPatchCall) Header() http.Header { func (c *ProjectsLocationsBucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14466,7 +16451,7 @@ func (c *ProjectsLocationsBucketsPatchCall) Do(opts ...googleapi.CallOption) (*L // ], // "parameters": { // "name": { - // "description": "Required. The full resource name of the bucket to update.\n\"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\n\"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"\nExample: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\". Also requires permission \"resourcemanager.projects.updateLiens\" to set the locked property", + // "description": "Required. The full resource name of the bucket to update. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\". Also requires permission \"resourcemanager.projects.updateLiens\" to set the locked property", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+$", // "required": true, @@ -14494,6 +16479,148 @@ func (c *ProjectsLocationsBucketsPatchCall) Do(opts ...googleapi.CallOption) (*L } +// method id "logging.projects.locations.buckets.undelete": + +type ProjectsLocationsBucketsUndeleteCall struct { + s *Service + name string + undeletebucketrequest *UndeleteBucketRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Undelete: Undeletes a bucket. A bucket that has been deleted may be +// undeleted within the grace period of 7 days. +func (r *ProjectsLocationsBucketsService) Undelete(name string, undeletebucketrequest *UndeleteBucketRequest) *ProjectsLocationsBucketsUndeleteCall { + c := &ProjectsLocationsBucketsUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.undeletebucketrequest = undeletebucketrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsBucketsUndeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsBucketsUndeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsBucketsUndeleteCall) Context(ctx context.Context) *ProjectsLocationsBucketsUndeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsBucketsUndeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsBucketsUndeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.undeletebucketrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}:undelete") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "logging.projects.locations.buckets.undelete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsLocationsBucketsUndeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Undeletes a bucket. A bucket that has been deleted may be undeleted within the grace period of 7 days.", + // "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}:undelete", + // "httpMethod": "POST", + // "id": "logging.projects.locations.buckets.undelete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The full resource name of the bucket to undelete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\".", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}:undelete", + // "request": { + // "$ref": "UndeleteBucketRequest" + // }, + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/logging.admin" + // ] + // } + +} + // method id "logging.projects.logs.delete": type ProjectsLogsDeleteCall struct { @@ -14541,7 +16668,7 @@ func (c *ProjectsLogsDeleteCall) Header() http.Header { func (c *ProjectsLogsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14609,7 +16736,7 @@ func (c *ProjectsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error // ], // "parameters": { // "logName": { - // "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + // "description": "Required. The resource name of the log to delete: \"projects/[PROJECT_ID]/logs/[LOG_ID]\" \"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\" \"folders/[FOLDER_ID]/logs/[LOG_ID]\" [LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", // "location": "path", // "pattern": "^projects/[^/]+/logs/[^/]+$", // "required": true, @@ -14703,7 +16830,7 @@ func (c *ProjectsLogsListCall) Header() http.Header { func (c *ProjectsLogsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14785,7 +16912,7 @@ func (c *ProjectsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsRespon // "type": "string" // }, // "parent": { - // "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + // "description": "Required. The resource name that owns the logs: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -14873,7 +17000,7 @@ func (c *ProjectsMetricsCreateCall) Header() http.Header { func (c *ProjectsMetricsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14946,7 +17073,7 @@ func (c *ProjectsMetricsCreateCall) Do(opts ...googleapi.CallOption) (*LogMetric // ], // "parameters": { // "parent": { - // "description": "Required. The resource name of the project in which to create the metric:\n\"projects/[PROJECT_ID]\"\nThe new metric must be provided in the request.", + // "description": "Required. The resource name of the project in which to create the metric: \"projects/[PROJECT_ID]\" The new metric must be provided in the request.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -15013,7 +17140,7 @@ func (c *ProjectsMetricsDeleteCall) Header() http.Header { func (c *ProjectsMetricsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -15081,7 +17208,7 @@ func (c *ProjectsMetricsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, er // ], // "parameters": { // "metricName": { - // "description": "Required. The resource name of the metric to delete:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\n", + // "description": "Required. The resource name of the metric to delete: \"projects/[PROJECT_ID]/metrics/[METRIC_ID]\" ", // "location": "path", // "pattern": "^projects/[^/]+/metrics/[^/]+$", // "required": true, @@ -15156,7 +17283,7 @@ func (c *ProjectsMetricsGetCall) Header() http.Header { func (c *ProjectsMetricsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -15227,7 +17354,7 @@ func (c *ProjectsMetricsGetCall) Do(opts ...googleapi.CallOption) (*LogMetric, e // ], // "parameters": { // "metricName": { - // "description": "Required. The resource name of the desired metric:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\n", + // "description": "Required. The resource name of the desired metric: \"projects/[PROJECT_ID]/metrics/[METRIC_ID]\" ", // "location": "path", // "pattern": "^projects/[^/]+/metrics/[^/]+$", // "required": true, @@ -15322,7 +17449,7 @@ func (c *ProjectsMetricsListCall) Header() http.Header { func (c *ProjectsMetricsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -15404,7 +17531,7 @@ func (c *ProjectsMetricsListCall) Do(opts ...googleapi.CallOption) (*ListLogMetr // "type": "string" // }, // "parent": { - // "description": "Required. The name of the project containing the metrics:\n\"projects/[PROJECT_ID]\"\n", + // "description": "Required. The name of the project containing the metrics: \"projects/[PROJECT_ID]\" ", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -15492,7 +17619,7 @@ func (c *ProjectsMetricsUpdateCall) Header() http.Header { func (c *ProjectsMetricsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -15565,7 +17692,7 @@ func (c *ProjectsMetricsUpdateCall) Do(opts ...googleapi.CallOption) (*LogMetric // ], // "parameters": { // "metricName": { - // "description": "Required. The resource name of the metric to update:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\nThe updated metric must be provided in the request and it's name field must be the same as [METRIC_ID] If the metric does not exist in [PROJECT_ID], then a new metric is created.", + // "description": "Required. The resource name of the metric to update: \"projects/[PROJECT_ID]/metrics/[METRIC_ID]\" The updated metric must be provided in the request and it's name field must be the same as [METRIC_ID] If the metric does not exist in [PROJECT_ID], then a new metric is created.", // "location": "path", // "pattern": "^projects/[^/]+/metrics/[^/]+$", // "required": true, @@ -15654,7 +17781,7 @@ func (c *ProjectsSinksCreateCall) Header() http.Header { func (c *ProjectsSinksCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -15727,7 +17854,7 @@ func (c *ProjectsSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSink, er // ], // "parameters": { // "parent": { - // "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + // "description": "Required. The resource in which to create the sink: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" Examples: \"projects/my-logging-project\", \"organizations/123456789\".", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -15799,7 +17926,7 @@ func (c *ProjectsSinksDeleteCall) Header() http.Header { func (c *ProjectsSinksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -15867,7 +17994,7 @@ func (c *ProjectsSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, erro // ], // "parameters": { // "sinkName": { - // "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", // "pattern": "^projects/[^/]+/sinks/[^/]+$", // "required": true, @@ -15941,7 +18068,7 @@ func (c *ProjectsSinksGetCall) Header() http.Header { func (c *ProjectsSinksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -16012,7 +18139,7 @@ func (c *ProjectsSinksGetCall) Do(opts ...googleapi.CallOption) (*LogSink, error // ], // "parameters": { // "sinkName": { - // "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "description": "Required. The resource name of the sink: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", // "pattern": "^projects/[^/]+/sinks/[^/]+$", // "required": true, @@ -16107,7 +18234,7 @@ func (c *ProjectsSinksListCall) Header() http.Header { func (c *ProjectsSinksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -16189,7 +18316,7 @@ func (c *ProjectsSinksListCall) Do(opts ...googleapi.CallOption) (*ListSinksResp // "type": "string" // }, // "parent": { - // "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + // "description": "Required. The parent resource whose sinks are to be listed: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -16257,13 +18384,12 @@ func (r *ProjectsSinksService) Patch(sinkNameid string, logsink *LogSink) *Proje // "uniqueWriterIdentity": See sinks.create for a description of this // field. When updating a sink, the effect of this field on the value of // writer_identity in the updated sink depends on both the old and new -// values of this field: -// If the old and new values of this field are both false or both true, -// then there is no change to the sink's writer_identity. -// If the old value is false and the new value is true, then -// writer_identity is changed to a unique service account. -// It is an error if the old value is true and the new value is set to -// false or defaulted to false. +// values of this field: If the old and new values of this field are +// both false or both true, then there is no change to the sink's +// writer_identity. If the old value is false and the new value is true, +// then writer_identity is changed to a unique service account. It is an +// error if the old value is true and the new value is set to false or +// defaulted to false. func (c *ProjectsSinksPatchCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *ProjectsSinksPatchCall { c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) return c @@ -16274,7 +18400,7 @@ func (c *ProjectsSinksPatchCall) UniqueWriterIdentity(uniqueWriterIdentity bool) // be overwritten if, and only if, it is in the update mask. name and // output only fields cannot be updated.An empty updateMask is // temporarily treated as using the following mask for backwards -// compatibility purposes: destination,filter,includeChildren At some +// compatibility purposes: destination,filter,includeChildren At some // point in the future, behavior will be removed and specifying an empty // updateMask will be an error.For a detailed FieldMask definition, see // https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: @@ -16311,7 +18437,7 @@ func (c *ProjectsSinksPatchCall) Header() http.Header { func (c *ProjectsSinksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -16384,19 +18510,19 @@ func (c *ProjectsSinksPatchCall) Do(opts ...googleapi.CallOption) (*LogSink, err // ], // "parameters": { // "sinkName": { - // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", // "pattern": "^projects/[^/]+/sinks/[^/]+$", // "required": true, // "type": "string" // }, // "uniqueWriterIdentity": { - // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is set to false or defaulted to false.", + // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", // "location": "query", // "type": "boolean" // }, // "updateMask": { - // "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", + // "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -16443,13 +18569,12 @@ func (r *ProjectsSinksService) Update(sinkNameid string, logsink *LogSink) *Proj // "uniqueWriterIdentity": See sinks.create for a description of this // field. When updating a sink, the effect of this field on the value of // writer_identity in the updated sink depends on both the old and new -// values of this field: -// If the old and new values of this field are both false or both true, -// then there is no change to the sink's writer_identity. -// If the old value is false and the new value is true, then -// writer_identity is changed to a unique service account. -// It is an error if the old value is true and the new value is set to -// false or defaulted to false. +// values of this field: If the old and new values of this field are +// both false or both true, then there is no change to the sink's +// writer_identity. If the old value is false and the new value is true, +// then writer_identity is changed to a unique service account. It is an +// error if the old value is true and the new value is set to false or +// defaulted to false. func (c *ProjectsSinksUpdateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *ProjectsSinksUpdateCall { c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) return c @@ -16460,7 +18585,7 @@ func (c *ProjectsSinksUpdateCall) UniqueWriterIdentity(uniqueWriterIdentity bool // be overwritten if, and only if, it is in the update mask. name and // output only fields cannot be updated.An empty updateMask is // temporarily treated as using the following mask for backwards -// compatibility purposes: destination,filter,includeChildren At some +// compatibility purposes: destination,filter,includeChildren At some // point in the future, behavior will be removed and specifying an empty // updateMask will be an error.For a detailed FieldMask definition, see // https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: @@ -16497,7 +18622,7 @@ func (c *ProjectsSinksUpdateCall) Header() http.Header { func (c *ProjectsSinksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -16570,19 +18695,19 @@ func (c *ProjectsSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSink, er // ], // "parameters": { // "sinkName": { - // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", // "pattern": "^projects/[^/]+/sinks/[^/]+$", // "required": true, // "type": "string" // }, // "uniqueWriterIdentity": { - // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is set to false or defaulted to false.", + // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", // "location": "query", // "type": "boolean" // }, // "updateMask": { - // "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", + // "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -16669,7 +18794,7 @@ func (c *SinksCreateCall) Header() http.Header { func (c *SinksCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -16742,7 +18867,7 @@ func (c *SinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { // ], // "parameters": { // "parent": { - // "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + // "description": "Required. The resource in which to create the sink: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" Examples: \"projects/my-logging-project\", \"organizations/123456789\".", // "location": "path", // "pattern": "^[^/]+/[^/]+$", // "required": true, @@ -16814,7 +18939,7 @@ func (c *SinksDeleteCall) Header() http.Header { func (c *SinksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -16882,7 +19007,7 @@ func (c *SinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { // ], // "parameters": { // "sinkName": { - // "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", // "pattern": "^[^/]+/[^/]+/sinks/[^/]+$", // "required": true, @@ -16956,7 +19081,7 @@ func (c *SinksGetCall) Header() http.Header { func (c *SinksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -17027,7 +19152,7 @@ func (c *SinksGetCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { // ], // "parameters": { // "sinkName": { - // "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "description": "Required. The resource name of the sink: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", // "pattern": "^[^/]+/[^/]+/sinks/[^/]+$", // "required": true, @@ -17122,7 +19247,7 @@ func (c *SinksListCall) Header() http.Header { func (c *SinksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -17204,7 +19329,7 @@ func (c *SinksListCall) Do(opts ...googleapi.CallOption) (*ListSinksResponse, er // "type": "string" // }, // "parent": { - // "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + // "description": "Required. The parent resource whose sinks are to be listed: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" ", // "location": "path", // "pattern": "^[^/]+/[^/]+$", // "required": true, @@ -17272,13 +19397,12 @@ func (r *SinksService) Update(sinkNameid string, logsink *LogSink) *SinksUpdateC // "uniqueWriterIdentity": See sinks.create for a description of this // field. When updating a sink, the effect of this field on the value of // writer_identity in the updated sink depends on both the old and new -// values of this field: -// If the old and new values of this field are both false or both true, -// then there is no change to the sink's writer_identity. -// If the old value is false and the new value is true, then -// writer_identity is changed to a unique service account. -// It is an error if the old value is true and the new value is set to -// false or defaulted to false. +// values of this field: If the old and new values of this field are +// both false or both true, then there is no change to the sink's +// writer_identity. If the old value is false and the new value is true, +// then writer_identity is changed to a unique service account. It is an +// error if the old value is true and the new value is set to false or +// defaulted to false. func (c *SinksUpdateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *SinksUpdateCall { c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) return c @@ -17289,7 +19413,7 @@ func (c *SinksUpdateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *Sinks // be overwritten if, and only if, it is in the update mask. name and // output only fields cannot be updated.An empty updateMask is // temporarily treated as using the following mask for backwards -// compatibility purposes: destination,filter,includeChildren At some +// compatibility purposes: destination,filter,includeChildren At some // point in the future, behavior will be removed and specifying an empty // updateMask will be an error.For a detailed FieldMask definition, see // https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: @@ -17326,7 +19450,7 @@ func (c *SinksUpdateCall) Header() http.Header { func (c *SinksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -17399,19 +19523,19 @@ func (c *SinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { // ], // "parameters": { // "sinkName": { - // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" Example: \"projects/my-project-id/sinks/my-sink-id\".", // "location": "path", // "pattern": "^[^/]+/[^/]+/sinks/[^/]+$", // "required": true, // "type": "string" // }, // "uniqueWriterIdentity": { - // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is set to false or defaulted to false.", + // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", // "location": "query", // "type": "boolean" // }, // "updateMask": { - // "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", + // "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -17493,7 +19617,7 @@ func (c *V2GetCmekSettingsCall) Header() http.Header { func (c *V2GetCmekSettingsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -17564,7 +19688,7 @@ func (c *V2GetCmekSettingsCall) Do(opts ...googleapi.CallOption) (*CmekSettings, // ], // "parameters": { // "name": { - // "description": "Required. The resource for which to retrieve CMEK settings.\n\"projects/[PROJECT_ID]/cmekSettings\"\n\"organizations/[ORGANIZATION_ID]/cmekSettings\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\"\n\"folders/[FOLDER_ID]/cmekSettings\"\nExample: \"organizations/12345/cmekSettings\".Note: CMEK for the Logs Router can currently only be configured for GCP organizations. Once configured, it applies to all projects and folders in the GCP organization.", + // "description": "Required. The resource for which to retrieve CMEK settings. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" Example: \"organizations/12345/cmekSettings\".Note: CMEK for the Logs Router can currently only be configured for GCP organizations. Once configured, it applies to all projects and folders in the GCP organization.", // "location": "path", // "pattern": "^[^/]+/[^/]+$", // "required": true, @@ -17650,7 +19774,7 @@ func (c *V2UpdateCmekSettingsCall) Header() http.Header { func (c *V2UpdateCmekSettingsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -17723,7 +19847,7 @@ func (c *V2UpdateCmekSettingsCall) Do(opts ...googleapi.CallOption) (*CmekSettin // ], // "parameters": { // "name": { - // "description": "Required. The resource name for the CMEK settings to update.\n\"projects/[PROJECT_ID]/cmekSettings\"\n\"organizations/[ORGANIZATION_ID]/cmekSettings\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\"\n\"folders/[FOLDER_ID]/cmekSettings\"\nExample: \"organizations/12345/cmekSettings\".Note: CMEK for the Logs Router can currently only be configured for GCP organizations. Once configured, it applies to all projects and folders in the GCP organization.", + // "description": "Required. The resource name for the CMEK settings to update. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" Example: \"organizations/12345/cmekSettings\".Note: CMEK for the Logs Router can currently only be configured for GCP organizations. Once configured, it applies to all projects and folders in the GCP organization.", // "location": "path", // "pattern": "^[^/]+/[^/]+$", // "required": true, diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go index 48121e42f90..b4d78a830ae 100644 --- a/vendor/google.golang.org/api/option/internaloption/internaloption.go +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -20,7 +20,33 @@ func (o defaultEndpointOption) Apply(settings *internal.DialSettings) { // // It should only be used internally by generated clients. // -// This is similar to WithEndpoint, but allows us to determine whether the user has overriden the default endpoint. +// This is similar to WithEndpoint, but allows us to determine whether the user has overridden the default endpoint. func WithDefaultEndpoint(url string) option.ClientOption { return defaultEndpointOption(url) } + +type defaultMTLSEndpointOption string + +func (o defaultMTLSEndpointOption) Apply(settings *internal.DialSettings) { + settings.DefaultMTLSEndpoint = string(o) +} + +// WithDefaultMTLSEndpoint is an option that indicates the default mTLS endpoint. +// +// It should only be used internally by generated clients. +func WithDefaultMTLSEndpoint(url string) option.ClientOption { + return defaultMTLSEndpointOption(url) +} + +// SkipDialSettingsValidation bypasses validation on ClientOptions. +// +// It should only be used internally. +func SkipDialSettingsValidation() option.ClientOption { + return skipDialSettingsValidation{} +} + +type skipDialSettingsValidation struct{} + +func (s skipDialSettingsValidation) Apply(settings *internal.DialSettings) { + settings.SkipValidation = true +} diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go index b7c40d60a45..686476f9cbb 100644 --- a/vendor/google.golang.org/api/option/option.go +++ b/vendor/google.golang.org/api/option/option.go @@ -11,6 +11,7 @@ import ( "golang.org/x/oauth2" "google.golang.org/api/internal" + "google.golang.org/api/internal/impersonate" "google.golang.org/grpc" ) @@ -269,3 +270,57 @@ type withClientCertSource struct{ s ClientCertSource } func (w withClientCertSource) Apply(o *internal.DialSettings) { o.ClientCertSource = w.s } + +// ImpersonateCredentials returns a ClientOption that will impersonate the +// target service account. +// +// In order to impersonate the target service account +// the base service account must have the Service Account Token Creator role, +// roles/iam.serviceAccountTokenCreator, on the target service account. +// See https://cloud.google.com/iam/docs/understanding-service-accounts. +// +// Optionally, delegates can be used during impersonation if the base service +// account lacks the token creator role on the target. When using delegates, +// each service account must be granted roles/iam.serviceAccountTokenCreator +// on the next service account in the chain. +// +// For example, if a base service account of SA1 is trying to impersonate target +// service account SA2 while using delegate service accounts DSA1 and DSA2, +// the following must be true: +// +// 1. Base service account SA1 has roles/iam.serviceAccountTokenCreator on +// DSA1. +// 2. DSA1 has roles/iam.serviceAccountTokenCreator on DSA2. +// 3. DSA2 has roles/iam.serviceAccountTokenCreator on target SA2. +// +// The resulting impersonated credential will either have the default scopes of +// the client being instantiating or the scopes from WithScopes if provided. +// Scopes are required for creating impersonated credentials, so if this option +// is used while not using a NewClient/NewService function, WithScopes must also +// be explicitly passed in as well. +// +// If the base credential is an authorized user and not a service account, or if +// the option WithQuotaProject is set, the target service account must have a +// role that grants the serviceusage.services.use permission such as +// roles/serviceusage.serviceUsageConsumer. +// +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func ImpersonateCredentials(target string, delegates ...string) ClientOption { + return impersonateServiceAccount{ + target: target, + delegates: delegates, + } +} + +type impersonateServiceAccount struct { + target string + delegates []string +} + +func (i impersonateServiceAccount) Apply(o *internal.DialSettings) { + o.ImpersonationConfig = &impersonate.Config{ + Target: i.target, + } + o.ImpersonationConfig.Delegates = make([]string, len(i.delegates)) + copy(o.ImpersonationConfig.Delegates, i.delegates) +} diff --git a/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json b/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json index 89661d49c67..a03b1d915e0 100644 --- a/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json +++ b/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json @@ -15,7 +15,7 @@ "baseUrl": "https://pubsub.googleapis.com/", "batchPath": "batch", "canonicalName": "Pubsub", - "description": "Provides reliable, many-to-many, asynchronous messaging between applications.\n", + "description": "Provides reliable, many-to-many, asynchronous messaging between applications. ", "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/pubsub/docs", "icons": { @@ -112,7 +112,7 @@ "snapshots": { "methods": { "create": { - "description": "Creates a snapshot from the requested subscription. Snapshots are used in\n\u003ca href=\"https://cloud.google.com/pubsub/docs/replay-overview\"\u003eSeek\u003c/a\u003e\noperations, which allow\nyou to manage message acknowledgments in bulk. That is, you can set the\nacknowledgment state of messages in an existing subscription to the state\ncaptured by a snapshot.\n\u003cbr\u003e\u003cbr\u003eIf the snapshot already exists, returns `ALREADY_EXISTS`.\nIf the requested subscription doesn't exist, returns `NOT_FOUND`.\nIf the backlog in the subscription is too old -- and the resulting snapshot\nwould expire in less than 1 hour -- then `FAILED_PRECONDITION` is returned.\nSee also the `Snapshot.expire_time` field. If the name is not provided in\nthe request, the server will assign a random\nname for this snapshot on the same project as the subscription, conforming\nto the\n[resource name\nformat](https://cloud.google.com/pubsub/docs/admin#resource_names). The\ngenerated name is populated in the returned Snapshot object. Note that for\nREST API requests, you must specify a name in the request.", + "description": "Creates a snapshot from the requested subscription. Snapshots are used in [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, which allow you to manage message acknowledgments in bulk. That is, you can set the acknowledgment state of messages in an existing subscription to the state captured by a snapshot. If the snapshot already exists, returns `ALREADY_EXISTS`. If the requested subscription doesn't exist, returns `NOT_FOUND`. If the backlog in the subscription is too old -- and the resulting snapshot would expire in less than 1 hour -- then `FAILED_PRECONDITION` is returned. See also the `Snapshot.expire_time` field. If the name is not provided in the request, the server will assign a random name for this snapshot on the same project as the subscription, conforming to the [resource name format] (https://cloud.google.com/pubsub/docs/admin#resource_names). The generated name is populated in the returned Snapshot object. Note that for REST API requests, you must specify a name in the request.", "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}", "httpMethod": "PUT", "id": "pubsub.projects.snapshots.create", @@ -121,7 +121,7 @@ ], "parameters": { "name": { - "description": "Required. User-provided name for this snapshot. If the name is not provided in the\nrequest, the server will assign a random name for this snapshot on the same\nproject as the subscription. Note that for REST API requests, you must\nspecify a name. See the \u003ca\nhref=\"https://cloud.google.com/pubsub/docs/admin#resource_names\"\u003e resource\nname rules\u003c/a\u003e. Format is `projects/{project}/snapshots/{snap}`.", + "description": "Required. User-provided name for this snapshot. If the name is not provided in the request, the server will assign a random name for this snapshot on the same project as the subscription. Note that for REST API requests, you must specify a name. See the resource name rules. Format is `projects/{project}/snapshots/{snap}`.", "location": "path", "pattern": "^projects/[^/]+/snapshots/[^/]+$", "required": true, @@ -141,7 +141,7 @@ ] }, "delete": { - "description": "Removes an existing snapshot. Snapshots are used in\n\u003ca href=\"https://cloud.google.com/pubsub/docs/replay-overview\"\u003eSeek\u003c/a\u003e\noperations, which allow\nyou to manage message acknowledgments in bulk. That is, you can set the\nacknowledgment state of messages in an existing subscription to the state\ncaptured by a snapshot.\u003cbr\u003e\u003cbr\u003e\nWhen the snapshot is deleted, all messages retained in the snapshot\nare immediately dropped. After a snapshot is deleted, a new one may be\ncreated with the same name, but the new one has no association with the old\nsnapshot or its subscription, unless the same subscription is specified.", + "description": "Removes an existing snapshot. Snapshots are used in [Seek] (https://cloud.google.com/pubsub/docs/replay-overview) operations, which allow you to manage message acknowledgments in bulk. That is, you can set the acknowledgment state of messages in an existing subscription to the state captured by a snapshot. When the snapshot is deleted, all messages retained in the snapshot are immediately dropped. After a snapshot is deleted, a new one may be created with the same name, but the new one has no association with the old snapshot or its subscription, unless the same subscription is specified.", "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}", "httpMethod": "DELETE", "id": "pubsub.projects.snapshots.delete", @@ -150,7 +150,7 @@ ], "parameters": { "snapshot": { - "description": "Required. The name of the snapshot to delete.\nFormat is `projects/{project}/snapshots/{snap}`.", + "description": "Required. The name of the snapshot to delete. Format is `projects/{project}/snapshots/{snap}`.", "location": "path", "pattern": "^projects/[^/]+/snapshots/[^/]+$", "required": true, @@ -167,7 +167,7 @@ ] }, "get": { - "description": "Gets the configuration details of a snapshot. Snapshots are used in\n\u003ca href=\"https://cloud.google.com/pubsub/docs/replay-overview\"\u003eSeek\u003c/a\u003e\noperations, which allow you to manage message acknowledgments in bulk. That\nis, you can set the acknowledgment state of messages in an existing\nsubscription to the state captured by a snapshot.", + "description": "Gets the configuration details of a snapshot. Snapshots are used in Seek operations, which allow you to manage message acknowledgments in bulk. That is, you can set the acknowledgment state of messages in an existing subscription to the state captured by a snapshot.", "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}", "httpMethod": "GET", "id": "pubsub.projects.snapshots.get", @@ -176,7 +176,7 @@ ], "parameters": { "snapshot": { - "description": "Required. The name of the snapshot to get.\nFormat is `projects/{project}/snapshots/{snap}`.", + "description": "Required. The name of the snapshot to get. Format is `projects/{project}/snapshots/{snap}`.", "location": "path", "pattern": "^projects/[^/]+/snapshots/[^/]+$", "required": true, @@ -193,7 +193,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}:getIamPolicy", "httpMethod": "GET", "id": "pubsub.projects.snapshots.getIamPolicy", @@ -202,13 +202,13 @@ ], "parameters": { "options.requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "location": "query", "type": "integer" }, "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/snapshots/[^/]+$", "required": true, @@ -225,7 +225,7 @@ ] }, "list": { - "description": "Lists the existing snapshots. Snapshots are used in\n\u003ca href=\"https://cloud.google.com/pubsub/docs/replay-overview\"\u003eSeek\u003c/a\u003e\noperations, which allow\nyou to manage message acknowledgments in bulk. That is, you can set the\nacknowledgment state of messages in an existing subscription to the state\ncaptured by a snapshot.", + "description": "Lists the existing snapshots. Snapshots are used in [Seek]( https://cloud.google.com/pubsub/docs/replay-overview) operations, which allow you to manage message acknowledgments in bulk. That is, you can set the acknowledgment state of messages in an existing subscription to the state captured by a snapshot.", "flatPath": "v1/projects/{projectsId}/snapshots", "httpMethod": "GET", "id": "pubsub.projects.snapshots.list", @@ -240,12 +240,12 @@ "type": "integer" }, "pageToken": { - "description": "The value returned by the last `ListSnapshotsResponse`; indicates that this\nis a continuation of a prior `ListSnapshots` call, and that the system\nshould return the next page of data.", + "description": "The value returned by the last `ListSnapshotsResponse`; indicates that this is a continuation of a prior `ListSnapshots` call, and that the system should return the next page of data.", "location": "query", "type": "string" }, "project": { - "description": "Required. The name of the project in which to list snapshots.\nFormat is `projects/{project-id}`.", + "description": "Required. The name of the project in which to list snapshots. Format is `projects/{project-id}`.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -262,7 +262,7 @@ ] }, "patch": { - "description": "Updates an existing snapshot. Snapshots are used in\n\u003ca href=\"https://cloud.google.com/pubsub/docs/replay-overview\"\u003eSeek\u003c/a\u003e\noperations, which allow\nyou to manage message acknowledgments in bulk. That is, you can set the\nacknowledgment state of messages in an existing subscription to the state\ncaptured by a snapshot.", + "description": "Updates an existing snapshot. Snapshots are used in Seek operations, which allow you to manage message acknowledgments in bulk. That is, you can set the acknowledgment state of messages in an existing subscription to the state captured by a snapshot.", "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}", "httpMethod": "PATCH", "id": "pubsub.projects.snapshots.patch", @@ -291,7 +291,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}:setIamPolicy", "httpMethod": "POST", "id": "pubsub.projects.snapshots.setIamPolicy", @@ -300,7 +300,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/snapshots/[^/]+$", "required": true, @@ -320,7 +320,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}:testIamPermissions", "httpMethod": "POST", "id": "pubsub.projects.snapshots.testIamPermissions", @@ -329,7 +329,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/snapshots/[^/]+$", "required": true, @@ -353,7 +353,7 @@ "subscriptions": { "methods": { "acknowledge": { - "description": "Acknowledges the messages associated with the `ack_ids` in the\n`AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages\nfrom the subscription.\n\nAcknowledging a message whose ack deadline has expired may succeed,\nbut such a message may be redelivered later. Acknowledging a message more\nthan once will not result in an error.", + "description": "Acknowledges the messages associated with the `ack_ids` in the `AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages from the subscription. Acknowledging a message whose ack deadline has expired may succeed, but such a message may be redelivered later. Acknowledging a message more than once will not result in an error.", "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:acknowledge", "httpMethod": "POST", "id": "pubsub.projects.subscriptions.acknowledge", @@ -362,7 +362,7 @@ ], "parameters": { "subscription": { - "description": "Required. The subscription whose message is being acknowledged.\nFormat is `projects/{project}/subscriptions/{sub}`.", + "description": "Required. The subscription whose message is being acknowledged. Format is `projects/{project}/subscriptions/{sub}`.", "location": "path", "pattern": "^projects/[^/]+/subscriptions/[^/]+$", "required": true, @@ -382,7 +382,7 @@ ] }, "create": { - "description": "Creates a subscription to a given topic. See the\n\u003ca href=\"https://cloud.google.com/pubsub/docs/admin#resource_names\"\u003e\nresource name rules\u003c/a\u003e.\nIf the subscription already exists, returns `ALREADY_EXISTS`.\nIf the corresponding topic doesn't exist, returns `NOT_FOUND`.\n\nIf the name is not provided in the request, the server will assign a random\nname for this subscription on the same project as the topic, conforming\nto the\n[resource name\nformat](https://cloud.google.com/pubsub/docs/admin#resource_names). The\ngenerated name is populated in the returned Subscription object. Note that\nfor REST API requests, you must specify a name in the request.", + "description": "Creates a subscription to a given topic. See the [resource name rules] (https://cloud.google.com/pubsub/docs/admin#resource_names). If the subscription already exists, returns `ALREADY_EXISTS`. If the corresponding topic doesn't exist, returns `NOT_FOUND`. If the name is not provided in the request, the server will assign a random name for this subscription on the same project as the topic, conforming to the [resource name format] (https://cloud.google.com/pubsub/docs/admin#resource_names). The generated name is populated in the returned Subscription object. Note that for REST API requests, you must specify a name in the request.", "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", "httpMethod": "PUT", "id": "pubsub.projects.subscriptions.create", @@ -391,7 +391,7 @@ ], "parameters": { "name": { - "description": "Required. The name of the subscription. It must have the format\n`\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must\nstart with a letter, and contain only letters (`[A-Za-z]`), numbers\n(`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),\nplus (`+`) or percent signs (`%`). It must be between 3 and 255 characters\nin length, and it must not start with `\"goog\"`.", + "description": "Required. The name of the subscription. It must have the format `\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in length, and it must not start with `\"goog\"`.", "location": "path", "pattern": "^projects/[^/]+/subscriptions/[^/]+$", "required": true, @@ -411,7 +411,7 @@ ] }, "delete": { - "description": "Deletes an existing subscription. All messages retained in the subscription\nare immediately dropped. Calls to `Pull` after deletion will return\n`NOT_FOUND`. After a subscription is deleted, a new one may be created with\nthe same name, but the new one has no association with the old\nsubscription or its topic unless the same topic is specified.", + "description": "Deletes an existing subscription. All messages retained in the subscription are immediately dropped. Calls to `Pull` after deletion will return `NOT_FOUND`. After a subscription is deleted, a new one may be created with the same name, but the new one has no association with the old subscription or its topic unless the same topic is specified.", "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", "httpMethod": "DELETE", "id": "pubsub.projects.subscriptions.delete", @@ -420,7 +420,7 @@ ], "parameters": { "subscription": { - "description": "Required. The subscription to delete.\nFormat is `projects/{project}/subscriptions/{sub}`.", + "description": "Required. The subscription to delete. Format is `projects/{project}/subscriptions/{sub}`.", "location": "path", "pattern": "^projects/[^/]+/subscriptions/[^/]+$", "required": true, @@ -436,6 +436,32 @@ "https://www.googleapis.com/auth/pubsub" ] }, + "detach": { + "description": "Detaches a subscription from this topic. All messages retained in the subscription are dropped. Subsequent `Pull` and `StreamingPull` requests will return FAILED_PRECONDITION. If the subscription is a push subscription, pushes to the endpoint will stop.", + "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:detach", + "httpMethod": "POST", + "id": "pubsub.projects.subscriptions.detach", + "parameterOrder": [ + "subscription" + ], + "parameters": { + "subscription": { + "description": "Required. The subscription to detach. Format is `projects/{project}/subscriptions/{subscription}`.", + "location": "path", + "pattern": "^projects/[^/]+/subscriptions/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+subscription}:detach", + "response": { + "$ref": "DetachSubscriptionResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ] + }, "get": { "description": "Gets the configuration details of a subscription.", "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", @@ -446,7 +472,7 @@ ], "parameters": { "subscription": { - "description": "Required. The name of the subscription to get.\nFormat is `projects/{project}/subscriptions/{sub}`.", + "description": "Required. The name of the subscription to get. Format is `projects/{project}/subscriptions/{sub}`.", "location": "path", "pattern": "^projects/[^/]+/subscriptions/[^/]+$", "required": true, @@ -463,7 +489,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:getIamPolicy", "httpMethod": "GET", "id": "pubsub.projects.subscriptions.getIamPolicy", @@ -472,13 +498,13 @@ ], "parameters": { "options.requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "location": "query", "type": "integer" }, "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/subscriptions/[^/]+$", "required": true, @@ -510,12 +536,12 @@ "type": "integer" }, "pageToken": { - "description": "The value returned by the last `ListSubscriptionsResponse`; indicates that\nthis is a continuation of a prior `ListSubscriptions` call, and that the\nsystem should return the next page of data.", + "description": "The value returned by the last `ListSubscriptionsResponse`; indicates that this is a continuation of a prior `ListSubscriptions` call, and that the system should return the next page of data.", "location": "query", "type": "string" }, "project": { - "description": "Required. The name of the project in which to list subscriptions.\nFormat is `projects/{project-id}`.", + "description": "Required. The name of the project in which to list subscriptions. Format is `projects/{project-id}`.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -532,7 +558,7 @@ ] }, "modifyAckDeadline": { - "description": "Modifies the ack deadline for a specific message. This method is useful\nto indicate that more time is needed to process a message by the\nsubscriber, or to make the message available for redelivery if the\nprocessing was interrupted. Note that this does not modify the\nsubscription-level `ackDeadlineSeconds` used for subsequent messages.", + "description": "Modifies the ack deadline for a specific message. This method is useful to indicate that more time is needed to process a message by the subscriber, or to make the message available for redelivery if the processing was interrupted. Note that this does not modify the subscription-level `ackDeadlineSeconds` used for subsequent messages.", "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:modifyAckDeadline", "httpMethod": "POST", "id": "pubsub.projects.subscriptions.modifyAckDeadline", @@ -541,7 +567,7 @@ ], "parameters": { "subscription": { - "description": "Required. The name of the subscription.\nFormat is `projects/{project}/subscriptions/{sub}`.", + "description": "Required. The name of the subscription. Format is `projects/{project}/subscriptions/{sub}`.", "location": "path", "pattern": "^projects/[^/]+/subscriptions/[^/]+$", "required": true, @@ -561,7 +587,7 @@ ] }, "modifyPushConfig": { - "description": "Modifies the `PushConfig` for a specified subscription.\n\nThis may be used to change a push subscription to a pull one (signified by\nan empty `PushConfig`) or vice versa, or change the endpoint URL and other\nattributes of a push subscription. Messages will accumulate for delivery\ncontinuously through the call regardless of changes to the `PushConfig`.", + "description": "Modifies the `PushConfig` for a specified subscription. This may be used to change a push subscription to a pull one (signified by an empty `PushConfig`) or vice versa, or change the endpoint URL and other attributes of a push subscription. Messages will accumulate for delivery continuously through the call regardless of changes to the `PushConfig`.", "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:modifyPushConfig", "httpMethod": "POST", "id": "pubsub.projects.subscriptions.modifyPushConfig", @@ -570,7 +596,7 @@ ], "parameters": { "subscription": { - "description": "Required. The name of the subscription.\nFormat is `projects/{project}/subscriptions/{sub}`.", + "description": "Required. The name of the subscription. Format is `projects/{project}/subscriptions/{sub}`.", "location": "path", "pattern": "^projects/[^/]+/subscriptions/[^/]+$", "required": true, @@ -590,7 +616,7 @@ ] }, "patch": { - "description": "Updates an existing subscription. Note that certain properties of a\nsubscription, such as its topic, are not modifiable.", + "description": "Updates an existing subscription. Note that certain properties of a subscription, such as its topic, are not modifiable.", "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", "httpMethod": "PATCH", "id": "pubsub.projects.subscriptions.patch", @@ -599,7 +625,7 @@ ], "parameters": { "name": { - "description": "Required. The name of the subscription. It must have the format\n`\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must\nstart with a letter, and contain only letters (`[A-Za-z]`), numbers\n(`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),\nplus (`+`) or percent signs (`%`). It must be between 3 and 255 characters\nin length, and it must not start with `\"goog\"`.", + "description": "Required. The name of the subscription. It must have the format `\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in length, and it must not start with `\"goog\"`.", "location": "path", "pattern": "^projects/[^/]+/subscriptions/[^/]+$", "required": true, @@ -619,7 +645,7 @@ ] }, "pull": { - "description": "Pulls messages from the server. The server may return `UNAVAILABLE` if\nthere are too many concurrent pull requests pending for the given\nsubscription.", + "description": "Pulls messages from the server. The server may return `UNAVAILABLE` if there are too many concurrent pull requests pending for the given subscription.", "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:pull", "httpMethod": "POST", "id": "pubsub.projects.subscriptions.pull", @@ -628,7 +654,7 @@ ], "parameters": { "subscription": { - "description": "Required. The subscription from which messages should be pulled.\nFormat is `projects/{project}/subscriptions/{sub}`.", + "description": "Required. The subscription from which messages should be pulled. Format is `projects/{project}/subscriptions/{sub}`.", "location": "path", "pattern": "^projects/[^/]+/subscriptions/[^/]+$", "required": true, @@ -648,7 +674,7 @@ ] }, "seek": { - "description": "Seeks an existing subscription to a point in time or to a given snapshot,\nwhichever is provided in the request. Snapshots are used in\n\u003ca href=\"https://cloud.google.com/pubsub/docs/replay-overview\"\u003eSeek\u003c/a\u003e\noperations, which allow\nyou to manage message acknowledgments in bulk. That is, you can set the\nacknowledgment state of messages in an existing subscription to the state\ncaptured by a snapshot. Note that both the subscription and the snapshot\nmust be on the same topic.", + "description": "Seeks an existing subscription to a point in time or to a given snapshot, whichever is provided in the request. Snapshots are used in [Seek]( https://cloud.google.com/pubsub/docs/replay-overview) operations, which allow you to manage message acknowledgments in bulk. That is, you can set the acknowledgment state of messages in an existing subscription to the state captured by a snapshot. Note that both the subscription and the snapshot must be on the same topic.", "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:seek", "httpMethod": "POST", "id": "pubsub.projects.subscriptions.seek", @@ -677,7 +703,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:setIamPolicy", "httpMethod": "POST", "id": "pubsub.projects.subscriptions.setIamPolicy", @@ -686,7 +712,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/subscriptions/[^/]+$", "required": true, @@ -706,7 +732,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:testIamPermissions", "httpMethod": "POST", "id": "pubsub.projects.subscriptions.testIamPermissions", @@ -715,7 +741,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/subscriptions/[^/]+$", "required": true, @@ -739,7 +765,7 @@ "topics": { "methods": { "create": { - "description": "Creates the given topic with the given name. See the\n\u003ca href=\"https://cloud.google.com/pubsub/docs/admin#resource_names\"\u003e\nresource name rules\u003c/a\u003e.", + "description": "Creates the given topic with the given name. See the [resource name rules]( https://cloud.google.com/pubsub/docs/admin#resource_names).", "flatPath": "v1/projects/{projectsId}/topics/{topicsId}", "httpMethod": "PUT", "id": "pubsub.projects.topics.create", @@ -748,7 +774,7 @@ ], "parameters": { "name": { - "description": "Required. The name of the topic. It must have the format\n`\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter,\nand contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),\nunderscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent\nsigns (`%`). It must be between 3 and 255 characters in length, and it\nmust not start with `\"goog\"`.", + "description": "Required. The name of the topic. It must have the format `\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in length, and it must not start with `\"goog\"`.", "location": "path", "pattern": "^projects/[^/]+/topics/[^/]+$", "required": true, @@ -768,7 +794,7 @@ ] }, "delete": { - "description": "Deletes the topic with the given name. Returns `NOT_FOUND` if the topic\ndoes not exist. After a topic is deleted, a new topic may be created with\nthe same name; this is an entirely new topic with none of the old\nconfiguration or subscriptions. Existing subscriptions to this topic are\nnot deleted, but their `topic` field is set to `_deleted-topic_`.", + "description": "Deletes the topic with the given name. Returns `NOT_FOUND` if the topic does not exist. After a topic is deleted, a new topic may be created with the same name; this is an entirely new topic with none of the old configuration or subscriptions. Existing subscriptions to this topic are not deleted, but their `topic` field is set to `_deleted-topic_`.", "flatPath": "v1/projects/{projectsId}/topics/{topicsId}", "httpMethod": "DELETE", "id": "pubsub.projects.topics.delete", @@ -777,7 +803,7 @@ ], "parameters": { "topic": { - "description": "Required. Name of the topic to delete.\nFormat is `projects/{project}/topics/{topic}`.", + "description": "Required. Name of the topic to delete. Format is `projects/{project}/topics/{topic}`.", "location": "path", "pattern": "^projects/[^/]+/topics/[^/]+$", "required": true, @@ -803,7 +829,7 @@ ], "parameters": { "topic": { - "description": "Required. The name of the topic to get.\nFormat is `projects/{project}/topics/{topic}`.", + "description": "Required. The name of the topic to get. Format is `projects/{project}/topics/{topic}`.", "location": "path", "pattern": "^projects/[^/]+/topics/[^/]+$", "required": true, @@ -820,7 +846,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:getIamPolicy", "httpMethod": "GET", "id": "pubsub.projects.topics.getIamPolicy", @@ -829,13 +855,13 @@ ], "parameters": { "options.requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "location": "query", "type": "integer" }, "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/topics/[^/]+$", "required": true, @@ -867,12 +893,12 @@ "type": "integer" }, "pageToken": { - "description": "The value returned by the last `ListTopicsResponse`; indicates that this is\na continuation of a prior `ListTopics` call, and that the system should\nreturn the next page of data.", + "description": "The value returned by the last `ListTopicsResponse`; indicates that this is a continuation of a prior `ListTopics` call, and that the system should return the next page of data.", "location": "query", "type": "string" }, "project": { - "description": "Required. The name of the project in which to list topics.\nFormat is `projects/{project-id}`.", + "description": "Required. The name of the project in which to list topics. Format is `projects/{project-id}`.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -889,7 +915,7 @@ ] }, "patch": { - "description": "Updates an existing topic. Note that certain properties of a\ntopic are not modifiable.", + "description": "Updates an existing topic. Note that certain properties of a topic are not modifiable.", "flatPath": "v1/projects/{projectsId}/topics/{topicsId}", "httpMethod": "PATCH", "id": "pubsub.projects.topics.patch", @@ -898,7 +924,7 @@ ], "parameters": { "name": { - "description": "Required. The name of the topic. It must have the format\n`\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter,\nand contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),\nunderscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent\nsigns (`%`). It must be between 3 and 255 characters in length, and it\nmust not start with `\"goog\"`.", + "description": "Required. The name of the topic. It must have the format `\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in length, and it must not start with `\"goog\"`.", "location": "path", "pattern": "^projects/[^/]+/topics/[^/]+$", "required": true, @@ -918,7 +944,7 @@ ] }, "publish": { - "description": "Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic\ndoes not exist.", + "description": "Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic does not exist.", "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:publish", "httpMethod": "POST", "id": "pubsub.projects.topics.publish", @@ -927,7 +953,7 @@ ], "parameters": { "topic": { - "description": "Required. The messages in the request will be published on this topic.\nFormat is `projects/{project}/topics/{topic}`.", + "description": "Required. The messages in the request will be published on this topic. Format is `projects/{project}/topics/{topic}`.", "location": "path", "pattern": "^projects/[^/]+/topics/[^/]+$", "required": true, @@ -947,7 +973,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:setIamPolicy", "httpMethod": "POST", "id": "pubsub.projects.topics.setIamPolicy", @@ -956,7 +982,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/topics/[^/]+$", "required": true, @@ -976,7 +1002,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:testIamPermissions", "httpMethod": "POST", "id": "pubsub.projects.topics.testIamPermissions", @@ -985,7 +1011,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/topics/[^/]+$", "required": true, @@ -1009,7 +1035,7 @@ "snapshots": { "methods": { "list": { - "description": "Lists the names of the snapshots on this topic. Snapshots are used in\n\u003ca href=\"https://cloud.google.com/pubsub/docs/replay-overview\"\u003eSeek\u003c/a\u003e\noperations, which allow\nyou to manage message acknowledgments in bulk. That is, you can set the\nacknowledgment state of messages in an existing subscription to the state\ncaptured by a snapshot.", + "description": "Lists the names of the snapshots on this topic. Snapshots are used in [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, which allow you to manage message acknowledgments in bulk. That is, you can set the acknowledgment state of messages in an existing subscription to the state captured by a snapshot.", "flatPath": "v1/projects/{projectsId}/topics/{topicsId}/snapshots", "httpMethod": "GET", "id": "pubsub.projects.topics.snapshots.list", @@ -1024,12 +1050,12 @@ "type": "integer" }, "pageToken": { - "description": "The value returned by the last `ListTopicSnapshotsResponse`; indicates\nthat this is a continuation of a prior `ListTopicSnapshots` call, and\nthat the system should return the next page of data.", + "description": "The value returned by the last `ListTopicSnapshotsResponse`; indicates that this is a continuation of a prior `ListTopicSnapshots` call, and that the system should return the next page of data.", "location": "query", "type": "string" }, "topic": { - "description": "Required. The name of the topic that snapshots are attached to.\nFormat is `projects/{project}/topics/{topic}`.", + "description": "Required. The name of the topic that snapshots are attached to. Format is `projects/{project}/topics/{topic}`.", "location": "path", "pattern": "^projects/[^/]+/topics/[^/]+$", "required": true, @@ -1050,7 +1076,7 @@ "subscriptions": { "methods": { "list": { - "description": "Lists the names of the subscriptions on this topic.", + "description": "Lists the names of the attached subscriptions on this topic.", "flatPath": "v1/projects/{projectsId}/topics/{topicsId}/subscriptions", "httpMethod": "GET", "id": "pubsub.projects.topics.subscriptions.list", @@ -1065,12 +1091,12 @@ "type": "integer" }, "pageToken": { - "description": "The value returned by the last `ListTopicSubscriptionsResponse`; indicates\nthat this is a continuation of a prior `ListTopicSubscriptions` call, and\nthat the system should return the next page of data.", + "description": "The value returned by the last `ListTopicSubscriptionsResponse`; indicates that this is a continuation of a prior `ListTopicSubscriptions` call, and that the system should return the next page of data.", "location": "query", "type": "string" }, "topic": { - "description": "Required. The name of the topic that subscriptions are attached to.\nFormat is `projects/{project}/topics/{topic}`.", + "description": "Required. The name of the topic that subscriptions are attached to. Format is `projects/{project}/topics/{topic}`.", "location": "path", "pattern": "^projects/[^/]+/topics/[^/]+$", "required": true, @@ -1093,7 +1119,7 @@ } } }, - "revision": "20200505", + "revision": "20200909", "rootUrl": "https://pubsub.googleapis.com/", "schemas": { "AcknowledgeRequest": { @@ -1101,7 +1127,7 @@ "id": "AcknowledgeRequest", "properties": { "ackIds": { - "description": "Required. The acknowledgment ID for the messages being acknowledged that was returned\nby the Pub/Sub system in the `Pull` response. Must not be empty.", + "description": "Required. The acknowledgment ID for the messages being acknowledged that was returned by the Pub/Sub system in the `Pull` response. Must not be empty.", "items": { "type": "string" }, @@ -1114,19 +1140,23 @@ "description": "Associates `members` with a `role`.", "id": "Binding", "properties": { + "bindingId": { + "description": "A client-specified ID for this binding. Expected to be globally unique to support the internal bindings-by-ID API.", + "type": "string" + }, "condition": { "$ref": "Expr", - "description": "The condition that is associated with this binding.\n\nIf the condition evaluates to `true`, then this binding applies to the\ncurrent request.\n\nIf the condition evaluates to `false`, then this binding does not apply to\nthe current request. However, a different role binding might grant the same\nrole to one or more of the members in this binding.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies)." + "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the members in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." }, "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@example.com` .\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a user that has been recently deleted. For\n example, `alice@example.com?uid=123456789012345678901`. If the user is\n recovered, this value reverts to `user:{emailid}` and the recovered user\n retains the role in the binding.\n\n* `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus\n unique identifier) representing a service account that has been recently\n deleted. For example,\n `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.\n If the service account is undeleted, this value reverts to\n `serviceAccount:{emailid}` and the undeleted service account retains the\n role in the binding.\n\n* `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a Google group that has been recently\n deleted. For example, `admins@example.com?uid=123456789012345678901`. If\n the group is recovered, this value reverts to `group:{emailid}` and the\n recovered group retains the role in the binding.\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", "items": { "type": "string" }, "type": "array" }, "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.", + "description": "Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.", "type": "string" } }, @@ -1140,44 +1170,50 @@ "additionalProperties": { "type": "string" }, - "description": "See \u003ca href=\"https://cloud.google.com/pubsub/docs/labels\"\u003e Creating and\nmanaging labels\u003c/a\u003e.", + "description": "See Creating and managing labels.", "type": "object" }, "subscription": { - "description": "Required. The subscription whose backlog the snapshot retains.\nSpecifically, the created snapshot is guaranteed to retain:\n (a) The existing backlog on the subscription. More precisely, this is\n defined as the messages in the subscription's backlog that are\n unacknowledged upon the successful completion of the\n `CreateSnapshot` request; as well as:\n (b) Any messages published to the subscription's topic following the\n successful completion of the CreateSnapshot request.\nFormat is `projects/{project}/subscriptions/{sub}`.", + "description": "Required. The subscription whose backlog the snapshot retains. Specifically, the created snapshot is guaranteed to retain: (a) The existing backlog on the subscription. More precisely, this is defined as the messages in the subscription's backlog that are unacknowledged upon the successful completion of the `CreateSnapshot` request; as well as: (b) Any messages published to the subscription's topic following the successful completion of the CreateSnapshot request. Format is `projects/{project}/subscriptions/{sub}`.", "type": "string" } }, "type": "object" }, "DeadLetterPolicy": { - "description": "Dead lettering is done on a best effort basis. The same message might be\ndead lettered multiple times.\n\nIf validation on any of the fields fails at subscription creation/updation,\nthe create/update subscription request will fail.", + "description": "Dead lettering is done on a best effort basis. The same message might be dead lettered multiple times. If validation on any of the fields fails at subscription creation/updation, the create/update subscription request will fail.", "id": "DeadLetterPolicy", "properties": { "deadLetterTopic": { - "description": "The name of the topic to which dead letter messages should be published.\nFormat is `projects/{project}/topics/{topic}`.The Cloud Pub/Sub service\naccount associated with the enclosing subscription's parent project (i.e.,\nservice-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have\npermission to Publish() to this topic.\n\nThe operation will fail if the topic does not exist.\nUsers should ensure that there is a subscription attached to this topic\nsince messages published to a topic with no subscriptions are lost.", + "description": "The name of the topic to which dead letter messages should be published. Format is `projects/{project}/topics/{topic}`.The Cloud Pub/Sub service account associated with the enclosing subscription's parent project (i.e., service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have permission to Publish() to this topic. The operation will fail if the topic does not exist. Users should ensure that there is a subscription attached to this topic since messages published to a topic with no subscriptions are lost.", "type": "string" }, "maxDeliveryAttempts": { - "description": "The maximum number of delivery attempts for any message. The value must be\nbetween 5 and 100.\n\nThe number of delivery attempts is defined as 1 + (the sum of number of\nNACKs and number of times the acknowledgement deadline has been exceeded\nfor the message).\n\nA NACK is any call to ModifyAckDeadline with a 0 deadline. Note that\nclient libraries may automatically extend ack_deadlines.\n\nThis field will be honored on a best effort basis.\n\nIf this parameter is 0, a default value of 5 is used.", + "description": "The maximum number of delivery attempts for any message. The value must be between 5 and 100. The number of delivery attempts is defined as 1 + (the sum of number of NACKs and number of times the acknowledgement deadline has been exceeded for the message). A NACK is any call to ModifyAckDeadline with a 0 deadline. Note that client libraries may automatically extend ack_deadlines. This field will be honored on a best effort basis. If this parameter is 0, a default value of 5 is used.", "format": "int32", "type": "integer" } }, "type": "object" }, + "DetachSubscriptionResponse": { + "description": "Response for the DetachSubscription method. Reserved for future use.", + "id": "DetachSubscriptionResponse", + "properties": {}, + "type": "object" + }, "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", "properties": {}, "type": "object" }, "ExpirationPolicy": { - "description": "A policy that specifies the conditions for resource expiration (i.e.,\nautomatic resource deletion).", + "description": "A policy that specifies the conditions for resource expiration (i.e., automatic resource deletion).", "id": "ExpirationPolicy", "properties": { "ttl": { - "description": "Specifies the \"time-to-live\" duration for an associated resource. The\nresource expires if it is not active for a period of `ttl`. The definition\nof \"activity\" depends on the type of the associated resource. The minimum\nand maximum allowed values for `ttl` depend on the type of the associated\nresource, as well. If `ttl` is not set, the associated resource never\nexpires.", + "description": "Specifies the \"time-to-live\" duration for an associated resource. The resource expires if it is not active for a period of `ttl`. The definition of \"activity\" depends on the type of the associated resource. The minimum and maximum allowed values for `ttl` depend on the type of the associated resource, as well. If `ttl` is not set, the associated resource never expires.", "format": "google-duration", "type": "string" } @@ -1185,23 +1221,23 @@ "type": "object" }, "Expr": { - "description": "Represents a textual expression in the Common Expression Language (CEL)\nsyntax. CEL is a C-like expression language. The syntax and semantics of CEL\nare documented at https://github.com/google/cel-spec.\n\nExample (Comparison):\n\n title: \"Summary size limit\"\n description: \"Determines if a summary is less than 100 chars\"\n expression: \"document.summary.size() \u003c 100\"\n\nExample (Equality):\n\n title: \"Requestor is owner\"\n description: \"Determines if requestor is the document owner\"\n expression: \"document.owner == request.auth.claims.email\"\n\nExample (Logic):\n\n title: \"Public documents\"\n description: \"Determine whether the document should be publicly visible\"\n expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\"\n\nExample (Data Manipulation):\n\n title: \"Notification string\"\n description: \"Create a notification string with a timestamp.\"\n expression: \"'New message received at ' + string(document.create_time)\"\n\nThe exact variables and functions that may be referenced within an expression\nare determined by the service that evaluates it. See the service\ndocumentation for additional information.", + "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() \u003c 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", "id": "Expr", "properties": { "description": { - "description": "Optional. Description of the expression. This is a longer text which\ndescribes the expression, e.g. when hovered over it in a UI.", + "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", "type": "string" }, "expression": { - "description": "Textual representation of an expression in Common Expression Language\nsyntax.", + "description": "Textual representation of an expression in Common Expression Language syntax.", "type": "string" }, "location": { - "description": "Optional. String indicating the location of the expression for error\nreporting, e.g. a file name and a position in the file.", + "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", "type": "string" }, "title": { - "description": "Optional. Title for the expression, i.e. a short string describing\nits purpose. This can be used e.g. in UIs which allow to enter the\nexpression.", + "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", "type": "string" } }, @@ -1212,7 +1248,7 @@ "id": "ListSnapshotsResponse", "properties": { "nextPageToken": { - "description": "If not empty, indicates that there may be more snapshot that match the\nrequest; this value should be passed in a new `ListSnapshotsRequest`.", + "description": "If not empty, indicates that there may be more snapshot that match the request; this value should be passed in a new `ListSnapshotsRequest`.", "type": "string" }, "snapshots": { @@ -1230,7 +1266,7 @@ "id": "ListSubscriptionsResponse", "properties": { "nextPageToken": { - "description": "If not empty, indicates that there may be more subscriptions that match\nthe request; this value should be passed in a new\n`ListSubscriptionsRequest` to get more subscriptions.", + "description": "If not empty, indicates that there may be more subscriptions that match the request; this value should be passed in a new `ListSubscriptionsRequest` to get more subscriptions.", "type": "string" }, "subscriptions": { @@ -1248,7 +1284,7 @@ "id": "ListTopicSnapshotsResponse", "properties": { "nextPageToken": { - "description": "If not empty, indicates that there may be more snapshots that match\nthe request; this value should be passed in a new\n`ListTopicSnapshotsRequest` to get more snapshots.", + "description": "If not empty, indicates that there may be more snapshots that match the request; this value should be passed in a new `ListTopicSnapshotsRequest` to get more snapshots.", "type": "string" }, "snapshots": { @@ -1266,11 +1302,11 @@ "id": "ListTopicSubscriptionsResponse", "properties": { "nextPageToken": { - "description": "If not empty, indicates that there may be more subscriptions that match\nthe request; this value should be passed in a new\n`ListTopicSubscriptionsRequest` to get more subscriptions.", + "description": "If not empty, indicates that there may be more subscriptions that match the request; this value should be passed in a new `ListTopicSubscriptionsRequest` to get more subscriptions.", "type": "string" }, "subscriptions": { - "description": "The names of the subscriptions that match the request.", + "description": "The names of subscriptions attached to the topic specified in the request.", "items": { "type": "string" }, @@ -1284,7 +1320,7 @@ "id": "ListTopicsResponse", "properties": { "nextPageToken": { - "description": "If not empty, indicates that there may be more topics that match the\nrequest; this value should be passed in a new `ListTopicsRequest`.", + "description": "If not empty, indicates that there may be more topics that match the request; this value should be passed in a new `ListTopicsRequest`.", "type": "string" }, "topics": { @@ -1298,10 +1334,11 @@ "type": "object" }, "MessageStoragePolicy": { + "description": "A policy constraining the storage of messages published to the topic.", "id": "MessageStoragePolicy", "properties": { "allowedPersistenceRegions": { - "description": "A list of IDs of GCP regions where messages that are published to the topic\nmay be persisted in storage. Messages published by publishers running in\nnon-allowed GCP regions (or running outside of GCP altogether) will be\nrouted for storage in one of the allowed regions. An empty list means that\nno regions are allowed, and is not a valid configuration.", + "description": "A list of IDs of GCP regions where messages that are published to the topic may be persisted in storage. Messages published by publishers running in non-allowed GCP regions (or running outside of GCP altogether) will be routed for storage in one of the allowed regions. An empty list means that no regions are allowed, and is not a valid configuration.", "items": { "type": "string" }, @@ -1315,7 +1352,7 @@ "id": "ModifyAckDeadlineRequest", "properties": { "ackDeadlineSeconds": { - "description": "Required. The new ack deadline with respect to the time this request was sent to\nthe Pub/Sub system. For example, if the value is 10, the new\nack deadline will expire 10 seconds after the `ModifyAckDeadline` call\nwas made. Specifying zero might immediately make the message available for\ndelivery to another subscriber client. This typically results in an\nincrease in the rate of message redeliveries (that is, duplicates).\nThe minimum deadline you can specify is 0 seconds.\nThe maximum deadline you can specify is 600 seconds (10 minutes).", + "description": "Required. The new ack deadline with respect to the time this request was sent to the Pub/Sub system. For example, if the value is 10, the new ack deadline will expire 10 seconds after the `ModifyAckDeadline` call was made. Specifying zero might immediately make the message available for delivery to another subscriber client. This typically results in an increase in the rate of message redeliveries (that is, duplicates). The minimum deadline you can specify is 0 seconds. The maximum deadline you can specify is 600 seconds (10 minutes).", "format": "int32", "type": "integer" }, @@ -1335,44 +1372,44 @@ "properties": { "pushConfig": { "$ref": "PushConfig", - "description": "Required. The push configuration for future deliveries.\n\nAn empty `pushConfig` indicates that the Pub/Sub system should\nstop pushing messages from the given subscription and allow\nmessages to be pulled and acknowledged - effectively pausing\nthe subscription if `Pull` or `StreamingPull` is not called." + "description": "Required. The push configuration for future deliveries. An empty `pushConfig` indicates that the Pub/Sub system should stop pushing messages from the given subscription and allow messages to be pulled and acknowledged - effectively pausing the subscription if `Pull` or `StreamingPull` is not called." } }, "type": "object" }, "OidcToken": { - "description": "Contains information needed for generating an\n[OpenID Connect\ntoken](https://developers.google.com/identity/protocols/OpenIDConnect).", + "description": "Contains information needed for generating an [OpenID Connect token](https://developers.google.com/identity/protocols/OpenIDConnect).", "id": "OidcToken", "properties": { "audience": { - "description": "Audience to be used when generating OIDC token. The audience claim\nidentifies the recipients that the JWT is intended for. The audience\nvalue is a single case-sensitive string. Having multiple values (array)\nfor the audience field is not supported. More info about the OIDC JWT\ntoken audience here: https://tools.ietf.org/html/rfc7519#section-4.1.3\nNote: if not specified, the Push endpoint URL will be used.", + "description": "Audience to be used when generating OIDC token. The audience claim identifies the recipients that the JWT is intended for. The audience value is a single case-sensitive string. Having multiple values (array) for the audience field is not supported. More info about the OIDC JWT token audience here: https://tools.ietf.org/html/rfc7519#section-4.1.3 Note: if not specified, the Push endpoint URL will be used.", "type": "string" }, "serviceAccountEmail": { - "description": "[Service account\nemail](https://cloud.google.com/iam/docs/service-accounts)\nto be used for generating the OIDC token. The caller (for\nCreateSubscription, UpdateSubscription, and ModifyPushConfig RPCs) must\nhave the iam.serviceAccounts.actAs permission for the service account.", + "description": "[Service account email](https://cloud.google.com/iam/docs/service-accounts) to be used for generating the OIDC token. The caller (for CreateSubscription, UpdateSubscription, and ModifyPushConfig RPCs) must have the iam.serviceAccounts.actAs permission for the service account.", "type": "string" } }, "type": "object" }, "Policy": { - "description": "An Identity and Access Management (IAM) policy, which specifies access\ncontrols for Google Cloud resources.\n\n\nA `Policy` is a collection of `bindings`. A `binding` binds one or more\n`members` to a single `role`. Members can be user accounts, service accounts,\nGoogle groups, and domains (such as G Suite). A `role` is a named list of\npermissions; each `role` can be an IAM predefined role or a user-created\ncustom role.\n\nFor some types of Google Cloud resources, a `binding` can also specify a\n`condition`, which is a logical expression that allows access to a resource\nonly if the expression evaluates to `true`. A condition can add constraints\nbased on attributes of the request, the resource, or both. To learn which\nresources support conditions in their IAM policies, see the\n[IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).\n\n**JSON example:**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/resourcemanager.organizationAdmin\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-project-id@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles/resourcemanager.organizationViewer\",\n \"members\": [\n \"user:eve@example.com\"\n ],\n \"condition\": {\n \"title\": \"expirable access\",\n \"description\": \"Does not grant access after Sep 2020\",\n \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\",\n }\n }\n ],\n \"etag\": \"BwWWja0YfJA=\",\n \"version\": 3\n }\n\n**YAML example:**\n\n bindings:\n - members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-project-id@appspot.gserviceaccount.com\n role: roles/resourcemanager.organizationAdmin\n - members:\n - user:eve@example.com\n role: roles/resourcemanager.organizationViewer\n condition:\n title: expirable access\n description: Does not grant access after Sep 2020\n expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\n - etag: BwWWja0YfJA=\n - version: 3\n\nFor a description of IAM and its features, see the\n[IAM documentation](https://cloud.google.com/iam/docs/).", + "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } **YAML example:** bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", "id": "Policy", "properties": { "bindings": { - "description": "Associates a list of `members` to a `role`. Optionally, may specify a\n`condition` that determines how and when the `bindings` are applied. Each\nof the `bindings` must contain at least one member.", + "description": "Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member.", "items": { "$ref": "Binding" }, "type": "array" }, "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.", + "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.", "format": "byte", "type": "string" }, "version": { - "description": "Specifies the format of the policy.\n\nValid values are `0`, `1`, and `3`. Requests that specify an invalid value\nare rejected.\n\nAny operation that affects conditional role bindings must specify version\n`3`. This requirement applies to the following operations:\n\n* Getting a policy that includes a conditional role binding\n* Adding a conditional role binding to a policy\n* Changing a conditional role binding in a policy\n* Removing any role binding, with or without a condition, from a policy\n that includes conditions\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.\n\nIf a policy does not include any conditions, operations on that policy may\nspecify any valid version or leave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } @@ -1398,7 +1435,7 @@ "id": "PublishResponse", "properties": { "messageIds": { - "description": "The server-assigned ID of each published message, in the same order as\nthe messages in the request. IDs are guaranteed to be unique within\nthe topic.", + "description": "The server-assigned ID of each published message, in the same order as the messages in the request. IDs are guaranteed to be unique within the topic.", "items": { "type": "string" }, @@ -1408,27 +1445,31 @@ "type": "object" }, "PubsubMessage": { - "description": "A message that is published by publishers and consumed by subscribers. The\nmessage must contain either a non-empty data field or at least one attribute.\nNote that client libraries represent this object differently\ndepending on the language. See the corresponding\n\u003ca href=\"https://cloud.google.com/pubsub/docs/reference/libraries\"\u003eclient\nlibrary documentation\u003c/a\u003e for more information. See\n\u003ca href=\"https://cloud.google.com/pubsub/quotas\"\u003eQuotas and limits\u003c/a\u003e\nfor more information about message limits.", + "description": "A message that is published by publishers and consumed by subscribers. The message must contain either a non-empty data field or at least one attribute. Note that client libraries represent this object differently depending on the language. See the corresponding [client library documentation](https://cloud.google.com/pubsub/docs/reference/libraries) for more information. See [quotas and limits] (https://cloud.google.com/pubsub/quotas) for more information about message limits.", "id": "PubsubMessage", "properties": { "attributes": { "additionalProperties": { "type": "string" }, - "description": "Attributes for this message. If this field is empty, the message must\ncontain non-empty data.", + "description": "Attributes for this message. If this field is empty, the message must contain non-empty data. This can be used to filter messages on the subscription.", "type": "object" }, "data": { - "description": "The message data field. If this field is empty, the message must contain\nat least one attribute.", + "description": "The message data field. If this field is empty, the message must contain at least one attribute.", "format": "byte", "type": "string" }, "messageId": { - "description": "ID of this message, assigned by the server when the message is published.\nGuaranteed to be unique within the topic. This value may be read by a\nsubscriber that receives a `PubsubMessage` via a `Pull` call or a push\ndelivery. It must not be populated by the publisher in a `Publish` call.", + "description": "ID of this message, assigned by the server when the message is published. Guaranteed to be unique within the topic. This value may be read by a subscriber that receives a `PubsubMessage` via a `Pull` call or a push delivery. It must not be populated by the publisher in a `Publish` call.", + "type": "string" + }, + "orderingKey": { + "description": "If non-empty, identifies related messages for which publish order should be respected. If a `Subscription` has `enable_message_ordering` set to `true`, messages published with the same non-empty `ordering_key` value will be delivered to subscribers in the order in which they are received by the Pub/Sub system. All `PubsubMessage`s published in a given `PublishRequest` must specify the same `ordering_key` value.", "type": "string" }, "publishTime": { - "description": "The time at which the message was published, populated by the server when\nit receives the `Publish` call. It must not be populated by the\npublisher in a `Publish` call.", + "description": "The time at which the message was published, populated by the server when it receives the `Publish` call. It must not be populated by the publisher in a `Publish` call.", "format": "google-datetime", "type": "string" } @@ -1440,12 +1481,12 @@ "id": "PullRequest", "properties": { "maxMessages": { - "description": "Required. The maximum number of messages to return for this request. Must be a\npositive integer. The Pub/Sub system may return fewer than the number\nspecified.", + "description": "Required. The maximum number of messages to return for this request. Must be a positive integer. The Pub/Sub system may return fewer than the number specified.", "format": "int32", "type": "integer" }, "returnImmediately": { - "description": "Optional. If this field set to true, the system will respond immediately even if\nit there are no messages available to return in the `Pull` response.\nOtherwise, the system may wait (for a bounded amount of time) until at\nleast one message is available, rather than returning no messages. Warning:\nsetting this field to `true` is discouraged because it adversely impacts\nthe performance of `Pull` operations. We recommend that users do not set\nthis field.", + "description": "Optional. If this field set to true, the system will respond immediately even if it there are no messages available to return in the `Pull` response. Otherwise, the system may wait (for a bounded amount of time) until at least one message is available, rather than returning no messages. Warning: setting this field to `true` is discouraged because it adversely impacts the performance of `Pull` operations. We recommend that users do not set this field.", "type": "boolean" } }, @@ -1456,7 +1497,7 @@ "id": "PullResponse", "properties": { "receivedMessages": { - "description": "Received Pub/Sub messages. The list will be empty if there are no more\nmessages available in the backlog. For JSON, the response can be entirely\nempty. The Pub/Sub system may return fewer than the `maxMessages` requested\neven if there are more messages available in the backlog.", + "description": "Received Pub/Sub messages. The list will be empty if there are no more messages available in the backlog. For JSON, the response can be entirely empty. The Pub/Sub system may return fewer than the `maxMessages` requested even if there are more messages available in the backlog.", "items": { "$ref": "ReceivedMessage" }, @@ -1473,15 +1514,15 @@ "additionalProperties": { "type": "string" }, - "description": "Endpoint configuration attributes that can be used to control different\naspects of the message delivery.\n\nThe only currently supported attribute is `x-goog-version`, which you can\nuse to change the format of the pushed message. This attribute\nindicates the version of the data expected by the endpoint. This\ncontrols the shape of the pushed message (i.e., its fields and metadata).\n\nIf not present during the `CreateSubscription` call, it will default to\nthe version of the Pub/Sub API used to make such call. If not present in a\n`ModifyPushConfig` call, its value will not be changed. `GetSubscription`\ncalls will always return a valid version, even if the subscription was\ncreated without this attribute.\n\nThe only supported values for the `x-goog-version` attribute are:\n\n* `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub API.\n* `v1` or `v1beta2`: uses the push format defined in the v1 Pub/Sub API.\n\nFor example:\n\u003cpre\u003e\u003ccode\u003eattributes { \"x-goog-version\": \"v1\" } \u003c/code\u003e\u003c/pre\u003e", + "description": "Endpoint configuration attributes that can be used to control different aspects of the message delivery. The only currently supported attribute is `x-goog-version`, which you can use to change the format of the pushed message. This attribute indicates the version of the data expected by the endpoint. This controls the shape of the pushed message (i.e., its fields and metadata). If not present during the `CreateSubscription` call, it will default to the version of the Pub/Sub API used to make such call. If not present in a `ModifyPushConfig` call, its value will not be changed. `GetSubscription` calls will always return a valid version, even if the subscription was created without this attribute. The only supported values for the `x-goog-version` attribute are: * `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub API. * `v1` or `v1beta2`: uses the push format defined in the v1 Pub/Sub API. For example: attributes { \"x-goog-version\": \"v1\" } ", "type": "object" }, "oidcToken": { "$ref": "OidcToken", - "description": "If specified, Pub/Sub will generate and attach an OIDC JWT token as an\n`Authorization` header in the HTTP request for every pushed message." + "description": "If specified, Pub/Sub will generate and attach an OIDC JWT token as an `Authorization` header in the HTTP request for every pushed message." }, "pushEndpoint": { - "description": "A URL locating the endpoint to which messages should be pushed.\nFor example, a Webhook endpoint might use `https://example.com/push`.", + "description": "A URL locating the endpoint to which messages should be pushed. For example, a Webhook endpoint might use `https://example.com/push`.", "type": "string" } }, @@ -1496,7 +1537,7 @@ "type": "string" }, "deliveryAttempt": { - "description": "The approximate number of times that Cloud Pub/Sub has attempted to deliver\nthe associated message to a subscriber.\n\nMore precisely, this is 1 + (number of NACKs) +\n(number of ack_deadline exceeds) for this message.\n\nA NACK is any call to ModifyAckDeadline with a 0 deadline. An ack_deadline\nexceeds event is whenever a message is not acknowledged within\nack_deadline. Note that ack_deadline is initially\nSubscription.ackDeadlineSeconds, but may get extended automatically by\nthe client library.\n\nUpon the first delivery of a given message, `delivery_attempt` will have a\nvalue of 1. The value is calculated at best effort and is approximate.\n\nIf a DeadLetterPolicy is not set on the subscription, this will be 0.", + "description": "The approximate number of times that Cloud Pub/Sub has attempted to deliver the associated message to a subscriber. More precisely, this is 1 + (number of NACKs) + (number of ack_deadline exceeds) for this message. A NACK is any call to ModifyAckDeadline with a 0 deadline. An ack_deadline exceeds event is whenever a message is not acknowledged within ack_deadline. Note that ack_deadline is initially Subscription.ackDeadlineSeconds, but may get extended automatically by the client library. Upon the first delivery of a given message, `delivery_attempt` will have a value of 1. The value is calculated at best effort and is approximate. If a DeadLetterPolicy is not set on the subscription, this will be 0.", "format": "int32", "type": "integer" }, @@ -1507,16 +1548,33 @@ }, "type": "object" }, + "RetryPolicy": { + "description": "A policy that specifies how Cloud Pub/Sub retries message delivery. Retry delay will be exponential based on provided minimum and maximum backoffs. https://en.wikipedia.org/wiki/Exponential_backoff. RetryPolicy will be triggered on NACKs or acknowledgement deadline exceeded events for a given message. Retry Policy is implemented on a best effort basis. At times, the delay between consecutive deliveries may not match the configuration. That is, delay can be more or less than configured backoff.", + "id": "RetryPolicy", + "properties": { + "maximumBackoff": { + "description": "The maximum delay between consecutive deliveries of a given message. Value should be between 0 and 600 seconds. Defaults to 600 seconds.", + "format": "google-duration", + "type": "string" + }, + "minimumBackoff": { + "description": "The minimum delay between consecutive deliveries of a given message. Value should be between 0 and 600 seconds. Defaults to 10 seconds.", + "format": "google-duration", + "type": "string" + } + }, + "type": "object" + }, "SeekRequest": { "description": "Request for the `Seek` method.", "id": "SeekRequest", "properties": { "snapshot": { - "description": "The snapshot to seek to. The snapshot's topic must be the same as that of\nthe provided subscription.\nFormat is `projects/{project}/snapshots/{snap}`.", + "description": "The snapshot to seek to. The snapshot's topic must be the same as that of the provided subscription. Format is `projects/{project}/snapshots/{snap}`.", "type": "string" }, "time": { - "description": "The time to seek to.\nMessages retained in the subscription that were published before this\ntime are marked as acknowledged, and messages retained in the\nsubscription that were published after this time are marked as\nunacknowledged. Note that this operation affects only those messages\nretained in the subscription (configured by the combination of\n`message_retention_duration` and `retain_acked_messages`). For example,\nif `time` corresponds to a point before the message retention\nwindow (or to a point before the system's notion of the subscription\ncreation time), only retained messages will be marked as unacknowledged,\nand already-expunged messages will not be restored.", + "description": "The time to seek to. Messages retained in the subscription that were published before this time are marked as acknowledged, and messages retained in the subscription that were published after this time are marked as unacknowledged. Note that this operation affects only those messages retained in the subscription (configured by the combination of `message_retention_duration` and `retain_acked_messages`). For example, if `time` corresponds to a point before the message retention window (or to a point before the system's notion of the subscription creation time), only retained messages will be marked as unacknowledged, and already-expunged messages will not be restored.", "format": "google-datetime", "type": "string" } @@ -1535,17 +1593,17 @@ "properties": { "policy": { "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them." } }, "type": "object" }, "Snapshot": { - "description": "A snapshot resource. Snapshots are used in\n\u003ca href=\"https://cloud.google.com/pubsub/docs/replay-overview\"\u003eSeek\u003c/a\u003e\noperations, which allow\nyou to manage message acknowledgments in bulk. That is, you can set the\nacknowledgment state of messages in an existing subscription to the state\ncaptured by a snapshot.", + "description": "A snapshot resource. Snapshots are used in [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, which allow you to manage message acknowledgments in bulk. That is, you can set the acknowledgment state of messages in an existing subscription to the state captured by a snapshot.", "id": "Snapshot", "properties": { "expireTime": { - "description": "The snapshot is guaranteed to exist up until this time.\nA newly-created snapshot expires no later than 7 days from the time of its\ncreation. Its exact lifetime is determined at creation by the existing\nbacklog in the source subscription. Specifically, the lifetime of the\nsnapshot is `7 days - (age of oldest unacked message in the subscription)`.\nFor example, consider a subscription whose oldest unacked message is 3 days\nold. If a snapshot is created from this subscription, the snapshot -- which\nwill always capture this 3-day-old backlog as long as the snapshot\nexists -- will expire in 4 days. The service will refuse to create a\nsnapshot that would expire in less than 1 hour after creation.", + "description": "The snapshot is guaranteed to exist up until this time. A newly-created snapshot expires no later than 7 days from the time of its creation. Its exact lifetime is determined at creation by the existing backlog in the source subscription. Specifically, the lifetime of the snapshot is `7 days - (age of oldest unacked message in the subscription)`. For example, consider a subscription whose oldest unacked message is 3 days old. If a snapshot is created from this subscription, the snapshot -- which will always capture this 3-day-old backlog as long as the snapshot exists -- will expire in 4 days. The service will refuse to create a snapshot that would expire in less than 1 hour after creation.", "format": "google-datetime", "type": "string" }, @@ -1553,7 +1611,7 @@ "additionalProperties": { "type": "string" }, - "description": "See \u003ca href=\"https://cloud.google.com/pubsub/docs/labels\"\u003e Creating and\nmanaging labels\u003c/a\u003e.", + "description": "See [Creating and managing labels] (https://cloud.google.com/pubsub/docs/labels).", "type": "object" }, "name": { @@ -1572,48 +1630,60 @@ "id": "Subscription", "properties": { "ackDeadlineSeconds": { - "description": "The approximate amount of time (on a best-effort basis) Pub/Sub waits for\nthe subscriber to acknowledge receipt before resending the message. In the\ninterval after the message is delivered and before it is acknowledged, it\nis considered to be \u003ci\u003eoutstanding\u003c/i\u003e. During that time period, the\nmessage will not be redelivered (on a best-effort basis).\n\nFor pull subscriptions, this value is used as the initial value for the ack\ndeadline. To override this value for a given message, call\n`ModifyAckDeadline` with the corresponding `ack_id` if using\nnon-streaming pull or send the `ack_id` in a\n`StreamingModifyAckDeadlineRequest` if using streaming pull.\nThe minimum custom deadline you can specify is 10 seconds.\nThe maximum custom deadline you can specify is 600 seconds (10 minutes).\nIf this parameter is 0, a default value of 10 seconds is used.\n\nFor push delivery, this value is also used to set the request timeout for\nthe call to the push endpoint.\n\nIf the subscriber never acknowledges the message, the Pub/Sub\nsystem will eventually redeliver the message.", + "description": "The approximate amount of time (on a best-effort basis) Pub/Sub waits for the subscriber to acknowledge receipt before resending the message. In the interval after the message is delivered and before it is acknowledged, it is considered to be *outstanding*. During that time period, the message will not be redelivered (on a best-effort basis). For pull subscriptions, this value is used as the initial value for the ack deadline. To override this value for a given message, call `ModifyAckDeadline` with the corresponding `ack_id` if using non-streaming pull or send the `ack_id` in a `StreamingModifyAckDeadlineRequest` if using streaming pull. The minimum custom deadline you can specify is 10 seconds. The maximum custom deadline you can specify is 600 seconds (10 minutes). If this parameter is 0, a default value of 10 seconds is used. For push delivery, this value is also used to set the request timeout for the call to the push endpoint. If the subscriber never acknowledges the message, the Pub/Sub system will eventually redeliver the message.", "format": "int32", "type": "integer" }, "deadLetterPolicy": { "$ref": "DeadLetterPolicy", - "description": "A policy that specifies the conditions for dead lettering messages in\nthis subscription. If dead_letter_policy is not set, dead lettering\nis disabled.\n\nThe Cloud Pub/Sub service account associated with this subscriptions's\nparent project (i.e.,\nservice-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have\npermission to Acknowledge() messages on this subscription." + "description": "A policy that specifies the conditions for dead lettering messages in this subscription. If dead_letter_policy is not set, dead lettering is disabled. The Cloud Pub/Sub service account associated with this subscriptions's parent project (i.e., service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have permission to Acknowledge() messages on this subscription." + }, + "detached": { + "description": "Indicates whether the subscription is detached from its topic. Detached subscriptions don't receive messages from their topic and don't retain any backlog. `Pull` and `StreamingPull` requests will return FAILED_PRECONDITION. If the subscription is a push subscription, pushes to the endpoint will not be made.", + "type": "boolean" + }, + "enableMessageOrdering": { + "description": "If true, messages published with the same `ordering_key` in `PubsubMessage` will be delivered to the subscribers in the order in which they are received by the Pub/Sub system. Otherwise, they may be delivered in any order.", + "type": "boolean" }, "expirationPolicy": { "$ref": "ExpirationPolicy", - "description": "A policy that specifies the conditions for this subscription's expiration.\nA subscription is considered active as long as any connected subscriber is\nsuccessfully consuming messages from the subscription or is issuing\noperations on the subscription. If `expiration_policy` is not set, a\n*default policy* with `ttl` of 31 days will be used. The minimum allowed\nvalue for `expiration_policy.ttl` is 1 day." + "description": "A policy that specifies the conditions for this subscription's expiration. A subscription is considered active as long as any connected subscriber is successfully consuming messages from the subscription or is issuing operations on the subscription. If `expiration_policy` is not set, a *default policy* with `ttl` of 31 days will be used. The minimum allowed value for `expiration_policy.ttl` is 1 day." }, "filter": { - "description": "An expression written in the Cloud Pub/Sub filter language. If non-empty,\nthen only `PubsubMessage`s whose `attributes` field matches the filter are\ndelivered on this subscription. If empty, then no messages are filtered\nout.\n\u003cb\u003eEXPERIMENTAL:\u003c/b\u003e This feature is part of a closed alpha release. This\nAPI might be changed in backward-incompatible ways and is not recommended\nfor production use. It is not subject to any SLA or deprecation policy.", + "description": "An expression written in the Pub/Sub [filter language](https://cloud.google.com/pubsub/docs/filtering). If non-empty, then only `PubsubMessage`s whose `attributes` field matches the filter are delivered on this subscription. If empty, then no messages are filtered out.", "type": "string" }, "labels": { "additionalProperties": { "type": "string" }, - "description": "See \u003ca href=\"https://cloud.google.com/pubsub/docs/labels\"\u003e Creating and\nmanaging labels\u003c/a\u003e.", + "description": "See Creating and managing labels.", "type": "object" }, "messageRetentionDuration": { - "description": "How long to retain unacknowledged messages in the subscription's backlog,\nfrom the moment a message is published.\nIf `retain_acked_messages` is true, then this also configures the retention\nof acknowledged messages, and thus configures how far back in time a `Seek`\ncan be done. Defaults to 7 days. Cannot be more than 7 days or less than 10\nminutes.", + "description": "How long to retain unacknowledged messages in the subscription's backlog, from the moment a message is published. If `retain_acked_messages` is true, then this also configures the retention of acknowledged messages, and thus configures how far back in time a `Seek` can be done. Defaults to 7 days. Cannot be more than 7 days or less than 10 minutes.", "format": "google-duration", "type": "string" }, "name": { - "description": "Required. The name of the subscription. It must have the format\n`\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must\nstart with a letter, and contain only letters (`[A-Za-z]`), numbers\n(`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),\nplus (`+`) or percent signs (`%`). It must be between 3 and 255 characters\nin length, and it must not start with `\"goog\"`.", + "description": "Required. The name of the subscription. It must have the format `\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in length, and it must not start with `\"goog\"`.", "type": "string" }, "pushConfig": { "$ref": "PushConfig", - "description": "If push delivery is used with this subscription, this field is\nused to configure it. An empty `pushConfig` signifies that the subscriber\nwill pull and ack messages using API methods." + "description": "If push delivery is used with this subscription, this field is used to configure it. An empty `pushConfig` signifies that the subscriber will pull and ack messages using API methods." }, "retainAckedMessages": { - "description": "Indicates whether to retain acknowledged messages. If true, then\nmessages are not expunged from the subscription's backlog, even if they are\nacknowledged, until they fall out of the `message_retention_duration`\nwindow. This must be true if you would like to\n\u003ca\nhref=\"https://cloud.google.com/pubsub/docs/replay-overview#seek_to_a_time\"\u003e\nSeek to a timestamp\u003c/a\u003e.", + "description": "Indicates whether to retain acknowledged messages. If true, then messages are not expunged from the subscription's backlog, even if they are acknowledged, until they fall out of the `message_retention_duration` window. This must be true if you would like to [Seek to a timestamp] (https://cloud.google.com/pubsub/docs/replay-overview#seek_to_a_time).", "type": "boolean" }, + "retryPolicy": { + "$ref": "RetryPolicy", + "description": "A policy that specifies how Pub/Sub retries message delivery for this subscription. If not set, the default retry policy is applied. This generally implies that messages will be retried as soon as possible for healthy subscribers. RetryPolicy will be triggered on NACKs or acknowledgement deadline exceeded events for a given message." + }, "topic": { - "description": "Required. The name of the topic from which this subscription is receiving messages.\nFormat is `projects/{project}/topics/{topic}`.\nThe value of this field will be `_deleted-topic_` if the topic has been\ndeleted.", + "description": "Required. The name of the topic from which this subscription is receiving messages. Format is `projects/{project}/topics/{topic}`. The value of this field will be `_deleted-topic_` if the topic has been deleted.", "type": "string" } }, @@ -1624,7 +1694,7 @@ "id": "TestIamPermissionsRequest", "properties": { "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "description": "The set of permissions to check for the `resource`. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", "items": { "type": "string" }, @@ -1638,7 +1708,7 @@ "id": "TestIamPermissionsResponse", "properties": { "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", "items": { "type": "string" }, @@ -1652,22 +1722,22 @@ "id": "Topic", "properties": { "kmsKeyName": { - "description": "The resource name of the Cloud KMS CryptoKey to be used to protect access\nto messages published on this topic.\n\nThe expected format is `projects/*/locations/*/keyRings/*/cryptoKeys/*`.", + "description": "The resource name of the Cloud KMS CryptoKey to be used to protect access to messages published on this topic. The expected format is `projects/*/locations/*/keyRings/*/cryptoKeys/*`.", "type": "string" }, "labels": { "additionalProperties": { "type": "string" }, - "description": "See \u003ca href=\"https://cloud.google.com/pubsub/docs/labels\"\u003e Creating and\nmanaging labels\u003c/a\u003e.", + "description": "See [Creating and managing labels] (https://cloud.google.com/pubsub/docs/labels).", "type": "object" }, "messageStoragePolicy": { "$ref": "MessageStoragePolicy", - "description": "Policy constraining the set of Google Cloud Platform regions where messages\npublished to the topic may be stored. If not present, then no constraints\nare in effect." + "description": "Policy constraining the set of Google Cloud Platform regions where messages published to the topic may be stored. If not present, then no constraints are in effect." }, "name": { - "description": "Required. The name of the topic. It must have the format\n`\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter,\nand contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),\nunderscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent\nsigns (`%`). It must be between 3 and 255 characters in length, and it\nmust not start with `\"goog\"`.", + "description": "Required. The name of the topic. It must have the format `\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in length, and it must not start with `\"goog\"`.", "type": "string" } }, @@ -1682,7 +1752,7 @@ "description": "Required. The updated snapshot object." }, "updateMask": { - "description": "Required. Indicates which fields in the provided snapshot to update.\nMust be specified and non-empty.", + "description": "Required. Indicates which fields in the provided snapshot to update. Must be specified and non-empty.", "format": "google-fieldmask", "type": "string" } @@ -1698,7 +1768,7 @@ "description": "Required. The updated subscription object." }, "updateMask": { - "description": "Required. Indicates which fields in the provided subscription to update.\nMust be specified and non-empty.", + "description": "Required. Indicates which fields in the provided subscription to update. Must be specified and non-empty.", "format": "google-fieldmask", "type": "string" } @@ -1714,7 +1784,7 @@ "description": "Required. The updated topic object." }, "updateMask": { - "description": "Required. Indicates which fields in the provided topic to update. Must be specified\nand non-empty. Note that if `update_mask` contains\n\"message_storage_policy\" but the `message_storage_policy` is not set in\nthe `topic` provided above, then the updated value is determined by the\npolicy configured at the project or organization level.", + "description": "Required. Indicates which fields in the provided topic to update. Must be specified and non-empty. Note that if `update_mask` contains \"message_storage_policy\" but the `message_storage_policy` is not set in the `topic` provided above, then the updated value is determined by the policy configured at the project or organization level.", "format": "google-fieldmask", "type": "string" } diff --git a/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go b/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go index 3266dcab1d2..e0d9f15bde7 100644 --- a/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go +++ b/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go @@ -81,6 +81,7 @@ const apiId = "pubsub:v1" const apiName = "pubsub" const apiVersion = "v1" const basePath = "https://pubsub.googleapis.com/" +const mtlsBasePath = "https://pubsub.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -100,6 +101,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -215,8 +217,8 @@ type ProjectsTopicsSubscriptionsService struct { // AcknowledgeRequest: Request for the Acknowledge method. type AcknowledgeRequest struct { // AckIds: Required. The acknowledgment ID for the messages being - // acknowledged that was returned - // by the Pub/Sub system in the `Pull` response. Must not be empty. + // acknowledged that was returned by the Pub/Sub system in the `Pull` + // response. Must not be empty. AckIds []string `json:"ackIds,omitempty"` // ForceSendFields is a list of field names (e.g. "AckIds") to @@ -244,98 +246,60 @@ func (s *AcknowledgeRequest) MarshalJSON() ([]byte, error) { // Binding: Associates `members` with a `role`. type Binding struct { - // Condition: The condition that is associated with this binding. - // - // If the condition evaluates to `true`, then this binding applies to - // the - // current request. - // - // If the condition evaluates to `false`, then this binding does not - // apply to - // the current request. However, a different role binding might grant - // the same - // role to one or more of the members in this binding. - // - // To learn which resources support conditions in their IAM policies, - // see - // the - // [IAM - // documentation](https://cloud.google.com/iam/help/conditions/r - // esource-policies). + // BindingId: A client-specified ID for this binding. Expected to be + // globally unique to support the internal bindings-by-ID API. + BindingId string `json:"bindingId,omitempty"` + + // Condition: The condition that is associated with this binding. If the + // condition evaluates to `true`, then this binding applies to the + // current request. If the condition evaluates to `false`, then this + // binding does not apply to the current request. However, a different + // role binding might grant the same role to one or more of the members + // in this binding. To learn which resources support conditions in their + // IAM policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). Condition *Expr `json:"condition,omitempty"` // Members: Specifies the identities requesting access for a Cloud - // Platform resource. - // `members` can have the following values: - // - // * `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. - // - // * `allAuthenticatedUsers`: A special identifier that represents - // anyone - // who is authenticated with a Google account or a service - // account. - // - // * `user:{emailid}`: An email address that represents a specific - // Google - // account. For example, `alice@example.com` . - // - // - // * `serviceAccount:{emailid}`: An email address that represents a - // service - // account. For example, - // `my-other-app@appspot.gserviceaccount.com`. - // - // * `group:{emailid}`: An email address that represents a Google - // group. - // For example, `admins@example.com`. - // - // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a user that has been recently deleted. - // For - // example, `alice@example.com?uid=123456789012345678901`. If the - // user is - // recovered, this value reverts to `user:{emailid}` and the - // recovered user - // retains the role in the binding. - // - // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address - // (plus - // unique identifier) representing a service account that has been - // recently - // deleted. For example, - // + // Platform resource. `members` can have the following values: * + // `allUsers`: A special identifier that represents anyone who is on the + // internet; with or without a Google account. * + // `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. * + // `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . * + // `serviceAccount:{emailid}`: An email address that represents a + // service account. For example, + // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An + // email address that represents a Google group. For example, + // `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An + // email address (plus unique identifier) representing a user that has + // been recently deleted. For example, + // `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered + // user retains the role in the binding. * + // `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address + // (plus unique identifier) representing a service account that has been + // recently deleted. For example, // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account - // retains the - // role in the binding. - // - // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a Google group that has been recently - // deleted. For example, - // `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` - // and the - // recovered group retains the role in the binding. - // - // - // * `domain:{domain}`: The G Suite domain (primary) that represents all - // the - // users of that domain. For example, `google.com` or - // `example.com`. - // - // + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains + // the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: + // An email address (plus unique identifier) representing a Google group + // that has been recently deleted. For example, + // `admins@example.com?uid=123456789012345678901`. If the group is + // recovered, this value reverts to `group:{emailid}` and the recovered + // group retains the role in the binding. * `domain:{domain}`: The G + // Suite domain (primary) that represents all the users of that domain. + // For example, `google.com` or `example.com`. Members []string `json:"members,omitempty"` - // Role: Role that is assigned to `members`. - // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + // Role: Role that is assigned to `members`. For example, + // `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `json:"role,omitempty"` - // ForceSendFields is a list of field names (e.g. "Condition") to + // ForceSendFields is a list of field names (e.g. "BindingId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -343,7 +307,7 @@ type Binding struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Condition") to include in + // NullFields is a list of field names (e.g. "BindingId") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -360,23 +324,17 @@ func (s *Binding) MarshalJSON() ([]byte, error) { // CreateSnapshotRequest: Request for the `CreateSnapshot` method. type CreateSnapshotRequest struct { - // Labels: See - // Creating and - // managing labels. + // Labels: See Creating and managing labels. Labels map[string]string `json:"labels,omitempty"` // Subscription: Required. The subscription whose backlog the snapshot - // retains. - // Specifically, the created snapshot is guaranteed to retain: - // (a) The existing backlog on the subscription. More precisely, this - // is - // defined as the messages in the subscription's backlog that are - // unacknowledged upon the successful completion of the - // `CreateSnapshot` request; as well as: - // (b) Any messages published to the subscription's topic following - // the - // successful completion of the CreateSnapshot request. - // Format is `projects/{project}/subscriptions/{sub}`. + // retains. Specifically, the created snapshot is guaranteed to retain: + // (a) The existing backlog on the subscription. More precisely, this is + // defined as the messages in the subscription's backlog that are + // unacknowledged upon the successful completion of the `CreateSnapshot` + // request; as well as: (b) Any messages published to the subscription's + // topic following the successful completion of the CreateSnapshot + // request. Format is `projects/{project}/subscriptions/{sub}`. Subscription string `json:"subscription,omitempty"` // ForceSendFields is a list of field names (e.g. "Labels") to @@ -403,46 +361,29 @@ func (s *CreateSnapshotRequest) MarshalJSON() ([]byte, error) { } // DeadLetterPolicy: Dead lettering is done on a best effort basis. The -// same message might be -// dead lettered multiple times. -// -// If validation on any of the fields fails at subscription -// creation/updation, -// the create/update subscription request will fail. +// same message might be dead lettered multiple times. If validation on +// any of the fields fails at subscription creation/updation, the +// create/update subscription request will fail. type DeadLetterPolicy struct { // DeadLetterTopic: The name of the topic to which dead letter messages - // should be published. - // Format is `projects/{project}/topics/{topic}`.The Cloud Pub/Sub - // service - // account associated with the enclosing subscription's parent project - // (i.e., - // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) - // must have - // permission to Publish() to this topic. - // - // The operation will fail if the topic does not exist. - // Users should ensure that there is a subscription attached to this - // topic - // since messages published to a topic with no subscriptions are lost. + // should be published. Format is + // `projects/{project}/topics/{topic}`.The Cloud Pub/Sub service account + // associated with the enclosing subscription's parent project (i.e., + // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must + // have permission to Publish() to this topic. The operation will fail + // if the topic does not exist. Users should ensure that there is a + // subscription attached to this topic since messages published to a + // topic with no subscriptions are lost. DeadLetterTopic string `json:"deadLetterTopic,omitempty"` // MaxDeliveryAttempts: The maximum number of delivery attempts for any - // message. The value must be - // between 5 and 100. - // - // The number of delivery attempts is defined as 1 + (the sum of number - // of - // NACKs and number of times the acknowledgement deadline has been - // exceeded - // for the message). - // - // A NACK is any call to ModifyAckDeadline with a 0 deadline. Note - // that - // client libraries may automatically extend ack_deadlines. - // - // This field will be honored on a best effort basis. - // - // If this parameter is 0, a default value of 5 is used. + // message. The value must be between 5 and 100. The number of delivery + // attempts is defined as 1 + (the sum of number of NACKs and number of + // times the acknowledgement deadline has been exceeded for the + // message). A NACK is any call to ModifyAckDeadline with a 0 deadline. + // Note that client libraries may automatically extend ack_deadlines. + // This field will be honored on a best effort basis. If this parameter + // is 0, a default value of 5 is used. MaxDeliveryAttempts int64 `json:"maxDeliveryAttempts,omitempty"` // ForceSendFields is a list of field names (e.g. "DeadLetterTopic") to @@ -469,18 +410,20 @@ func (s *DeadLetterPolicy) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DetachSubscriptionResponse: Response for the DetachSubscription +// method. Reserved for future use. +type DetachSubscriptionResponse struct { + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` +} + // Empty: A generic empty message that you can re-use to avoid defining -// duplicated -// empty messages in your APIs. A typical example is to use it as the -// request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. +// duplicated empty messages in your APIs. A typical example is to use +// it as the request or the response type of an API method. For +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } The JSON representation for `Empty` is +// empty JSON object `{}`. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -488,20 +431,14 @@ type Empty struct { } // ExpirationPolicy: A policy that specifies the conditions for resource -// expiration (i.e., -// automatic resource deletion). +// expiration (i.e., automatic resource deletion). type ExpirationPolicy struct { // Ttl: Specifies the "time-to-live" duration for an associated - // resource. The - // resource expires if it is not active for a period of `ttl`. The - // definition - // of "activity" depends on the type of the associated resource. The - // minimum - // and maximum allowed values for `ttl` depend on the type of the - // associated - // resource, as well. If `ttl` is not set, the associated resource - // never - // expires. + // resource. The resource expires if it is not active for a period of + // `ttl`. The definition of "activity" depends on the type of the + // associated resource. The minimum and maximum allowed values for `ttl` + // depend on the type of the associated resource, as well. If `ttl` is + // not set, the associated resource never expires. Ttl string `json:"ttl,omitempty"` // ForceSendFields is a list of field names (e.g. "Ttl") to @@ -528,65 +465,40 @@ func (s *ExpirationPolicy) MarshalJSON() ([]byte, error) { } // Expr: Represents a textual expression in the Common Expression -// Language (CEL) -// syntax. CEL is a C-like expression language. The syntax and semantics -// of CEL -// are documented at https://github.com/google/cel-spec. -// -// Example (Comparison): -// -// title: "Summary size limit" -// description: "Determines if a summary is less than 100 chars" -// expression: "document.summary.size() < 100" -// -// Example (Equality): -// -// title: "Requestor is owner" -// description: "Determines if requestor is the document owner" -// expression: "document.owner == -// request.auth.claims.email" -// -// Example (Logic): -// -// title: "Public documents" -// description: "Determine whether the document should be publicly -// visible" -// expression: "document.type != 'private' && document.type != -// 'internal'" -// -// Example (Data Manipulation): -// -// title: "Notification string" -// description: "Create a notification string with a timestamp." -// expression: "'New message received at ' + -// string(document.create_time)" -// -// The exact variables and functions that may be referenced within an -// expression -// are determined by the service that evaluates it. See the -// service -// documentation for additional information. +// Language (CEL) syntax. CEL is a C-like expression language. The +// syntax and semantics of CEL are documented at +// https://github.com/google/cel-spec. Example (Comparison): title: +// "Summary size limit" description: "Determines if a summary is less +// than 100 chars" expression: "document.summary.size() < 100" Example +// (Equality): title: "Requestor is owner" description: "Determines if +// requestor is the document owner" expression: "document.owner == +// request.auth.claims.email" Example (Logic): title: "Public documents" +// description: "Determine whether the document should be publicly +// visible" expression: "document.type != 'private' && document.type != +// 'internal'" Example (Data Manipulation): title: "Notification string" +// description: "Create a notification string with a timestamp." +// expression: "'New message received at ' + +// string(document.create_time)" The exact variables and functions that +// may be referenced within an expression are determined by the service +// that evaluates it. See the service documentation for additional +// information. type Expr struct { // Description: Optional. Description of the expression. This is a - // longer text which - // describes the expression, e.g. when hovered over it in a UI. + // longer text which describes the expression, e.g. when hovered over it + // in a UI. Description string `json:"description,omitempty"` // Expression: Textual representation of an expression in Common - // Expression Language - // syntax. + // Expression Language syntax. Expression string `json:"expression,omitempty"` // Location: Optional. String indicating the location of the expression - // for error - // reporting, e.g. a file name and a position in the file. + // for error reporting, e.g. a file name and a position in the file. Location string `json:"location,omitempty"` // Title: Optional. Title for the expression, i.e. a short string - // describing - // its purpose. This can be used e.g. in UIs which allow to enter - // the - // expression. + // describing its purpose. This can be used e.g. in UIs which allow to + // enter the expression. Title string `json:"title,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -615,8 +527,8 @@ func (s *Expr) MarshalJSON() ([]byte, error) { // ListSnapshotsResponse: Response for the `ListSnapshots` method. type ListSnapshotsResponse struct { // NextPageToken: If not empty, indicates that there may be more - // snapshot that match the - // request; this value should be passed in a new `ListSnapshotsRequest`. + // snapshot that match the request; this value should be passed in a new + // `ListSnapshotsRequest`. NextPageToken string `json:"nextPageToken,omitempty"` // Snapshots: The resulting snapshots. @@ -653,10 +565,8 @@ func (s *ListSnapshotsResponse) MarshalJSON() ([]byte, error) { // method. type ListSubscriptionsResponse struct { // NextPageToken: If not empty, indicates that there may be more - // subscriptions that match - // the request; this value should be passed in a - // new - // `ListSubscriptionsRequest` to get more subscriptions. + // subscriptions that match the request; this value should be passed in + // a new `ListSubscriptionsRequest` to get more subscriptions. NextPageToken string `json:"nextPageToken,omitempty"` // Subscriptions: The subscriptions that match the request. @@ -693,10 +603,8 @@ func (s *ListSubscriptionsResponse) MarshalJSON() ([]byte, error) { // method. type ListTopicSnapshotsResponse struct { // NextPageToken: If not empty, indicates that there may be more - // snapshots that match - // the request; this value should be passed in a - // new - // `ListTopicSnapshotsRequest` to get more snapshots. + // snapshots that match the request; this value should be passed in a + // new `ListTopicSnapshotsRequest` to get more snapshots. NextPageToken string `json:"nextPageToken,omitempty"` // Snapshots: The names of the snapshots that match the request. @@ -733,13 +641,12 @@ func (s *ListTopicSnapshotsResponse) MarshalJSON() ([]byte, error) { // `ListTopicSubscriptions` method. type ListTopicSubscriptionsResponse struct { // NextPageToken: If not empty, indicates that there may be more - // subscriptions that match - // the request; this value should be passed in a - // new - // `ListTopicSubscriptionsRequest` to get more subscriptions. + // subscriptions that match the request; this value should be passed in + // a new `ListTopicSubscriptionsRequest` to get more subscriptions. NextPageToken string `json:"nextPageToken,omitempty"` - // Subscriptions: The names of the subscriptions that match the request. + // Subscriptions: The names of subscriptions attached to the topic + // specified in the request. Subscriptions []string `json:"subscriptions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -772,8 +679,8 @@ func (s *ListTopicSubscriptionsResponse) MarshalJSON() ([]byte, error) { // ListTopicsResponse: Response for the `ListTopics` method. type ListTopicsResponse struct { // NextPageToken: If not empty, indicates that there may be more topics - // that match the - // request; this value should be passed in a new `ListTopicsRequest`. + // that match the request; this value should be passed in a new + // `ListTopicsRequest`. NextPageToken string `json:"nextPageToken,omitempty"` // Topics: The resulting topics. @@ -806,16 +713,15 @@ func (s *ListTopicsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// MessageStoragePolicy: A policy constraining the storage of messages +// published to the topic. type MessageStoragePolicy struct { // AllowedPersistenceRegions: A list of IDs of GCP regions where - // messages that are published to the topic - // may be persisted in storage. Messages published by publishers running - // in - // non-allowed GCP regions (or running outside of GCP altogether) will - // be - // routed for storage in one of the allowed regions. An empty list means - // that - // no regions are allowed, and is not a valid configuration. + // messages that are published to the topic may be persisted in storage. + // Messages published by publishers running in non-allowed GCP regions + // (or running outside of GCP altogether) will be routed for storage in + // one of the allowed regions. An empty list means that no regions are + // allowed, and is not a valid configuration. AllowedPersistenceRegions []string `json:"allowedPersistenceRegions,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -846,18 +752,14 @@ func (s *MessageStoragePolicy) MarshalJSON() ([]byte, error) { // ModifyAckDeadlineRequest: Request for the ModifyAckDeadline method. type ModifyAckDeadlineRequest struct { // AckDeadlineSeconds: Required. The new ack deadline with respect to - // the time this request was sent to - // the Pub/Sub system. For example, if the value is 10, the new - // ack deadline will expire 10 seconds after the `ModifyAckDeadline` - // call - // was made. Specifying zero might immediately make the message - // available for - // delivery to another subscriber client. This typically results in - // an - // increase in the rate of message redeliveries (that is, - // duplicates). - // The minimum deadline you can specify is 0 seconds. - // The maximum deadline you can specify is 600 seconds (10 minutes). + // the time this request was sent to the Pub/Sub system. For example, if + // the value is 10, the new ack deadline will expire 10 seconds after + // the `ModifyAckDeadline` call was made. Specifying zero might + // immediately make the message available for delivery to another + // subscriber client. This typically results in an increase in the rate + // of message redeliveries (that is, duplicates). The minimum deadline + // you can specify is 0 seconds. The maximum deadline you can specify is + // 600 seconds (10 minutes). AckDeadlineSeconds int64 `json:"ackDeadlineSeconds,omitempty"` // AckIds: Required. List of acknowledgment IDs. @@ -889,13 +791,11 @@ func (s *ModifyAckDeadlineRequest) MarshalJSON() ([]byte, error) { // ModifyPushConfigRequest: Request for the ModifyPushConfig method. type ModifyPushConfigRequest struct { - // PushConfig: Required. The push configuration for future - // deliveries. - // - // An empty `pushConfig` indicates that the Pub/Sub system should - // stop pushing messages from the given subscription and allow - // messages to be pulled and acknowledged - effectively pausing - // the subscription if `Pull` or `StreamingPull` is not called. + // PushConfig: Required. The push configuration for future deliveries. + // An empty `pushConfig` indicates that the Pub/Sub system should stop + // pushing messages from the given subscription and allow messages to be + // pulled and acknowledged - effectively pausing the subscription if + // `Pull` or `StreamingPull` is not called. PushConfig *PushConfig `json:"pushConfig,omitempty"` // ForceSendFields is a list of field names (e.g. "PushConfig") to @@ -921,34 +821,25 @@ func (s *ModifyPushConfigRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// OidcToken: Contains information needed for generating an -// [OpenID +// OidcToken: Contains information needed for generating an [OpenID // Connect -// token](https://developers.google.com/identity/protocols/OpenID -// Connect). +// token](https://developers.google.com/identity/protocols/OpenIDConnect) +// . type OidcToken struct { // Audience: Audience to be used when generating OIDC token. The - // audience claim - // identifies the recipients that the JWT is intended for. The - // audience - // value is a single case-sensitive string. Having multiple values - // (array) - // for the audience field is not supported. More info about the OIDC - // JWT - // token audience here: - // https://tools.ietf.org/html/rfc7519#section-4.1.3 - // Note: if not specified, the Push endpoint URL will be used. + // audience claim identifies the recipients that the JWT is intended + // for. The audience value is a single case-sensitive string. Having + // multiple values (array) for the audience field is not supported. More + // info about the OIDC JWT token audience here: + // https://tools.ietf.org/html/rfc7519#section-4.1.3 Note: if not + // specified, the Push endpoint URL will be used. Audience string `json:"audience,omitempty"` - // ServiceAccountEmail: [Service - // account - // email](https://cloud.google.com/iam/docs/service-accounts) - // to be used for generating the OIDC token. The caller - // (for - // CreateSubscription, UpdateSubscription, and ModifyPushConfig RPCs) - // must - // have the iam.serviceAccounts.actAs permission for the service - // account. + // ServiceAccountEmail: [Service account + // email](https://cloud.google.com/iam/docs/service-accounts) to be used + // for generating the OIDC token. The caller (for CreateSubscription, + // UpdateSubscription, and ModifyPushConfig RPCs) must have the + // iam.serviceAccounts.actAs permission for the service account. ServiceAccountEmail string `json:"serviceAccountEmail,omitempty"` // ForceSendFields is a list of field names (e.g. "Audience") to @@ -975,150 +866,73 @@ func (s *OidcToken) MarshalJSON() ([]byte, error) { } // Policy: An Identity and Access Management (IAM) policy, which -// specifies access -// controls for Google Cloud resources. -// -// -// A `Policy` is a collection of `bindings`. A `binding` binds one or -// more -// `members` to a single `role`. Members can be user accounts, service -// accounts, +// specifies access controls for Google Cloud resources. A `Policy` is a +// collection of `bindings`. A `binding` binds one or more `members` to +// a single `role`. Members can be user accounts, service accounts, // Google groups, and domains (such as G Suite). A `role` is a named -// list of -// permissions; each `role` can be an IAM predefined role or a -// user-created -// custom role. -// -// For some types of Google Cloud resources, a `binding` can also -// specify a -// `condition`, which is a logical expression that allows access to a -// resource -// only if the expression evaluates to `true`. A condition can add -// constraints -// based on attributes of the request, the resource, or both. To learn -// which -// resources support conditions in their IAM policies, see the -// [IAM +// list of permissions; each `role` can be an IAM predefined role or a +// user-created custom role. For some types of Google Cloud resources, a +// `binding` can also specify a `condition`, which is a logical +// expression that allows access to a resource only if the expression +// evaluates to `true`. A condition can add constraints based on +// attributes of the request, the resource, or both. To learn which +// resources support conditions in their IAM policies, see the [IAM // documentation](https://cloud.google.com/iam/help/conditions/resource-p -// olicies). -// -// **JSON example:** -// -// { -// "bindings": [ -// { -// "role": "roles/resourcemanager.organizationAdmin", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" -// ] -// }, -// { -// "role": "roles/resourcemanager.organizationViewer", -// "members": [ -// "user:eve@example.com" -// ], -// "condition": { -// "title": "expirable access", -// "description": "Does not grant access after Sep 2020", -// "expression": "request.time < -// timestamp('2020-10-01T00:00:00.000Z')", -// } -// } -// ], -// "etag": "BwWWja0YfJA=", -// "version": 3 -// } -// -// **YAML example:** -// -// bindings: -// - members: -// - user:mike@example.com -// - group:admins@example.com -// - domain:google.com -// - serviceAccount:my-project-id@appspot.gserviceaccount.com -// role: roles/resourcemanager.organizationAdmin -// - members: -// - user:eve@example.com -// role: roles/resourcemanager.organizationViewer -// condition: -// title: expirable access -// description: Does not grant access after Sep 2020 -// expression: request.time < -// timestamp('2020-10-01T00:00:00.000Z') -// - etag: BwWWja0YfJA= -// - version: 3 -// -// For a description of IAM and its features, see the -// [IAM documentation](https://cloud.google.com/iam/docs/). +// olicies). **JSON example:** { "bindings": [ { "role": +// "roles/resourcemanager.organizationAdmin", "members": [ +// "user:mike@example.com", "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { +// "role": "roles/resourcemanager.organizationViewer", "members": [ +// "user:eve@example.com" ], "condition": { "title": "expirable access", +// "description": "Does not grant access after Sep 2020", "expression": +// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], +// "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - +// members: - user:mike@example.com - group:admins@example.com - +// domain:google.com - +// serviceAccount:my-project-id@appspot.gserviceaccount.com role: +// roles/resourcemanager.organizationAdmin - members: - +// user:eve@example.com role: roles/resourcemanager.organizationViewer +// condition: title: expirable access description: Does not grant access +// after Sep 2020 expression: request.time < +// timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: +// 3 For a description of IAM and its features, see the [IAM +// documentation](https://cloud.google.com/iam/docs/). type Policy struct { // Bindings: Associates a list of `members` to a `role`. Optionally, may - // specify a - // `condition` that determines how and when the `bindings` are applied. - // Each - // of the `bindings` must contain at least one member. + // specify a `condition` that determines how and when the `bindings` are + // applied. Each of the `bindings` must contain at least one member. Bindings []*Binding `json:"bindings,omitempty"` // Etag: `etag` is used for optimistic concurrency control as a way to - // help - // prevent simultaneous updates of a policy from overwriting each - // other. - // It is strongly suggested that systems make use of the `etag` in - // the - // read-modify-write cycle to perform policy updates in order to avoid - // race - // conditions: An `etag` is returned in the response to `getIamPolicy`, - // and - // systems are expected to put that etag in the request to - // `setIamPolicy` to - // ensure that their change will be applied to the same version of the - // policy. - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of + // help prevent simultaneous updates of a policy from overwriting each + // other. It is strongly suggested that systems make use of the `etag` + // in the read-modify-write cycle to perform policy updates in order to + // avoid race conditions: An `etag` is returned in the response to + // `getIamPolicy`, and systems are expected to put that etag in the + // request to `setIamPolicy` to ensure that their change will be applied + // to the same version of the policy. **Important:** If you use IAM + // Conditions, you must include the `etag` field whenever you call + // `setIamPolicy`. If you omit this field, then IAM allows you to + // overwrite a version `3` policy with a version `1` policy, and all of // the conditions in the version `3` policy are lost. Etag string `json:"etag,omitempty"` - // Version: Specifies the format of the policy. - // - // Valid values are `0`, `1`, and `3`. Requests that specify an invalid - // value - // are rejected. - // + // Version: Specifies the format of the policy. Valid values are `0`, + // `1`, and `3`. Requests that specify an invalid value are rejected. // Any operation that affects conditional role bindings must specify - // version - // `3`. This requirement applies to the following operations: - // - // * Getting a policy that includes a conditional role binding - // * Adding a conditional role binding to a policy - // * Changing a conditional role binding in a policy - // * Removing any role binding, with or without a condition, from a - // policy - // that includes conditions - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of - // the conditions in the version `3` policy are lost. - // - // If a policy does not include any conditions, operations on that - // policy may - // specify any valid version or leave the field unset. - // - // To learn which resources support conditions in their IAM policies, - // see the - // [IAM + // version `3`. This requirement applies to the following operations: * + // Getting a policy that includes a conditional role binding * Adding a + // conditional role binding to a policy * Changing a conditional role + // binding in a policy * Removing any role binding, with or without a + // condition, from a policy that includes conditions **Important:** If + // you use IAM Conditions, you must include the `etag` field whenever + // you call `setIamPolicy`. If you omit this field, then IAM allows you + // to overwrite a version `3` policy with a version `1` policy, and all + // of the conditions in the version `3` policy are lost. If a policy + // does not include any conditions, operations on that policy may + // specify any valid version or leave the field unset. To learn which + // resources support conditions in their IAM policies, see the [IAM // documentation](https://cloud.google.com/iam/help/conditions/resource-p // olicies). Version int64 `json:"version,omitempty"` @@ -1181,10 +995,8 @@ func (s *PublishRequest) MarshalJSON() ([]byte, error) { // PublishResponse: Response for the `Publish` method. type PublishResponse struct { // MessageIds: The server-assigned ID of each published message, in the - // same order as - // the messages in the request. IDs are guaranteed to be unique - // within - // the topic. + // same order as the messages in the request. IDs are guaranteed to be + // unique within the topic. MessageIds []string `json:"messageIds,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1215,45 +1027,43 @@ func (s *PublishResponse) MarshalJSON() ([]byte, error) { } // PubsubMessage: A message that is published by publishers and consumed -// by subscribers. The -// message must contain either a non-empty data field or at least one -// attribute. -// Note that client libraries represent this object -// differently -// depending on the language. See the corresponding -// client -// -// library documentation for more information. See -// Quotas and -// limits -// for more information about message limits. +// by subscribers. The message must contain either a non-empty data +// field or at least one attribute. Note that client libraries represent +// this object differently depending on the language. See the +// corresponding [client library +// documentation](https://cloud.google.com/pubsub/docs/reference/librarie +// s) for more information. See [quotas and limits] +// (https://cloud.google.com/pubsub/quotas) for more information about +// message limits. type PubsubMessage struct { // Attributes: Attributes for this message. If this field is empty, the - // message must - // contain non-empty data. + // message must contain non-empty data. This can be used to filter + // messages on the subscription. Attributes map[string]string `json:"attributes,omitempty"` // Data: The message data field. If this field is empty, the message - // must contain - // at least one attribute. + // must contain at least one attribute. Data string `json:"data,omitempty"` // MessageId: ID of this message, assigned by the server when the - // message is published. - // Guaranteed to be unique within the topic. This value may be read by - // a - // subscriber that receives a `PubsubMessage` via a `Pull` call or a - // push - // delivery. It must not be populated by the publisher in a `Publish` - // call. + // message is published. Guaranteed to be unique within the topic. This + // value may be read by a subscriber that receives a `PubsubMessage` via + // a `Pull` call or a push delivery. It must not be populated by the + // publisher in a `Publish` call. MessageId string `json:"messageId,omitempty"` + // OrderingKey: If non-empty, identifies related messages for which + // publish order should be respected. If a `Subscription` has + // `enable_message_ordering` set to `true`, messages published with the + // same non-empty `ordering_key` value will be delivered to subscribers + // in the order in which they are received by the Pub/Sub system. All + // `PubsubMessage`s published in a given `PublishRequest` must specify + // the same `ordering_key` value. + OrderingKey string `json:"orderingKey,omitempty"` + // PublishTime: The time at which the message was published, populated - // by the server when - // it receives the `Publish` call. It must not be populated by - // the - // publisher in a `Publish` call. + // by the server when it receives the `Publish` call. It must not be + // populated by the publisher in a `Publish` call. PublishTime string `json:"publishTime,omitempty"` // ForceSendFields is a list of field names (e.g. "Attributes") to @@ -1282,25 +1092,17 @@ func (s *PubsubMessage) MarshalJSON() ([]byte, error) { // PullRequest: Request for the `Pull` method. type PullRequest struct { // MaxMessages: Required. The maximum number of messages to return for - // this request. Must be a - // positive integer. The Pub/Sub system may return fewer than the - // number - // specified. + // this request. Must be a positive integer. The Pub/Sub system may + // return fewer than the number specified. MaxMessages int64 `json:"maxMessages,omitempty"` // ReturnImmediately: Optional. If this field set to true, the system - // will respond immediately even if - // it there are no messages available to return in the `Pull` - // response. - // Otherwise, the system may wait (for a bounded amount of time) until - // at - // least one message is available, rather than returning no messages. - // Warning: - // setting this field to `true` is discouraged because it adversely - // impacts - // the performance of `Pull` operations. We recommend that users do not - // set - // this field. + // will respond immediately even if it there are no messages available + // to return in the `Pull` response. Otherwise, the system may wait (for + // a bounded amount of time) until at least one message is available, + // rather than returning no messages. Warning: setting this field to + // `true` is discouraged because it adversely impacts the performance of + // `Pull` operations. We recommend that users do not set this field. ReturnImmediately bool `json:"returnImmediately,omitempty"` // ForceSendFields is a list of field names (e.g. "MaxMessages") to @@ -1329,12 +1131,10 @@ func (s *PullRequest) MarshalJSON() ([]byte, error) { // PullResponse: Response for the `Pull` method. type PullResponse struct { // ReceivedMessages: Received Pub/Sub messages. The list will be empty - // if there are no more - // messages available in the backlog. For JSON, the response can be - // entirely - // empty. The Pub/Sub system may return fewer than the `maxMessages` - // requested - // even if there are more messages available in the backlog. + // if there are no more messages available in the backlog. For JSON, the + // response can be entirely empty. The Pub/Sub system may return fewer + // than the `maxMessages` requested even if there are more messages + // available in the backlog. ReceivedMessages []*ReceivedMessage `json:"receivedMessages,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1368,47 +1168,30 @@ func (s *PullResponse) MarshalJSON() ([]byte, error) { // PushConfig: Configuration for a push delivery endpoint. type PushConfig struct { // Attributes: Endpoint configuration attributes that can be used to - // control different - // aspects of the message delivery. - // - // The only currently supported attribute is `x-goog-version`, which you - // can - // use to change the format of the pushed message. This - // attribute - // indicates the version of the data expected by the endpoint. - // This - // controls the shape of the pushed message (i.e., its fields and - // metadata). - // - // If not present during the `CreateSubscription` call, it will default - // to - // the version of the Pub/Sub API used to make such call. If not present - // in a + // control different aspects of the message delivery. The only currently + // supported attribute is `x-goog-version`, which you can use to change + // the format of the pushed message. This attribute indicates the + // version of the data expected by the endpoint. This controls the shape + // of the pushed message (i.e., its fields and metadata). If not present + // during the `CreateSubscription` call, it will default to the version + // of the Pub/Sub API used to make such call. If not present in a // `ModifyPushConfig` call, its value will not be changed. - // `GetSubscription` - // calls will always return a valid version, even if the subscription - // was - // created without this attribute. - // - // The only supported values for the `x-goog-version` attribute are: - // - // * `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub - // API. - // * `v1` or `v1beta2`: uses the push format defined in the v1 Pub/Sub - // API. - // - // For example: - //
attributes { "x-goog-version": "v1" } 
+ // `GetSubscription` calls will always return a valid version, even if + // the subscription was created without this attribute. The only + // supported values for the `x-goog-version` attribute are: * `v1beta1`: + // uses the push format defined in the v1beta1 Pub/Sub API. * `v1` or + // `v1beta2`: uses the push format defined in the v1 Pub/Sub API. For + // example: attributes { "x-goog-version": "v1" } Attributes map[string]string `json:"attributes,omitempty"` // OidcToken: If specified, Pub/Sub will generate and attach an OIDC JWT - // token as an - // `Authorization` header in the HTTP request for every pushed message. + // token as an `Authorization` header in the HTTP request for every + // pushed message. OidcToken *OidcToken `json:"oidcToken,omitempty"` // PushEndpoint: A URL locating the endpoint to which messages should be - // pushed. - // For example, a Webhook endpoint might use `https://example.com/push`. + // pushed. For example, a Webhook endpoint might use + // `https://example.com/push`. PushEndpoint string `json:"pushEndpoint,omitempty"` // ForceSendFields is a list of field names (e.g. "Attributes") to @@ -1440,28 +1223,16 @@ type ReceivedMessage struct { AckId string `json:"ackId,omitempty"` // DeliveryAttempt: The approximate number of times that Cloud Pub/Sub - // has attempted to deliver - // the associated message to a subscriber. - // - // More precisely, this is 1 + (number of NACKs) + - // (number of ack_deadline exceeds) for this message. - // - // A NACK is any call to ModifyAckDeadline with a 0 deadline. An - // ack_deadline - // exceeds event is whenever a message is not acknowledged - // within - // ack_deadline. Note that ack_deadline is - // initially - // Subscription.ackDeadlineSeconds, but may get extended automatically - // by - // the client library. - // - // Upon the first delivery of a given message, `delivery_attempt` will - // have a - // value of 1. The value is calculated at best effort and is - // approximate. - // - // If a DeadLetterPolicy is not set on the subscription, this will be 0. + // has attempted to deliver the associated message to a subscriber. More + // precisely, this is 1 + (number of NACKs) + (number of ack_deadline + // exceeds) for this message. A NACK is any call to ModifyAckDeadline + // with a 0 deadline. An ack_deadline exceeds event is whenever a + // message is not acknowledged within ack_deadline. Note that + // ack_deadline is initially Subscription.ackDeadlineSeconds, but may + // get extended automatically by the client library. Upon the first + // delivery of a given message, `delivery_attempt` will have a value of + // 1. The value is calculated at best effort and is approximate. If a + // DeadLetterPolicy is not set on the subscription, this will be 0. DeliveryAttempt int64 `json:"deliveryAttempt,omitempty"` // Message: The message. @@ -1490,33 +1261,68 @@ func (s *ReceivedMessage) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// RetryPolicy: A policy that specifies how Cloud Pub/Sub retries +// message delivery. Retry delay will be exponential based on provided +// minimum and maximum backoffs. +// https://en.wikipedia.org/wiki/Exponential_backoff. RetryPolicy will +// be triggered on NACKs or acknowledgement deadline exceeded events for +// a given message. Retry Policy is implemented on a best effort basis. +// At times, the delay between consecutive deliveries may not match the +// configuration. That is, delay can be more or less than configured +// backoff. +type RetryPolicy struct { + // MaximumBackoff: The maximum delay between consecutive deliveries of a + // given message. Value should be between 0 and 600 seconds. Defaults to + // 600 seconds. + MaximumBackoff string `json:"maximumBackoff,omitempty"` + + // MinimumBackoff: The minimum delay between consecutive deliveries of a + // given message. Value should be between 0 and 600 seconds. Defaults to + // 10 seconds. + MinimumBackoff string `json:"minimumBackoff,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaximumBackoff") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MaximumBackoff") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *RetryPolicy) MarshalJSON() ([]byte, error) { + type NoMethod RetryPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // SeekRequest: Request for the `Seek` method. type SeekRequest struct { // Snapshot: The snapshot to seek to. The snapshot's topic must be the - // same as that of - // the provided subscription. - // Format is `projects/{project}/snapshots/{snap}`. + // same as that of the provided subscription. Format is + // `projects/{project}/snapshots/{snap}`. Snapshot string `json:"snapshot,omitempty"` - // Time: The time to seek to. - // Messages retained in the subscription that were published before - // this - // time are marked as acknowledged, and messages retained in - // the - // subscription that were published after this time are marked - // as - // unacknowledged. Note that this operation affects only those - // messages - // retained in the subscription (configured by the combination - // of - // `message_retention_duration` and `retain_acked_messages`). For - // example, - // if `time` corresponds to a point before the message retention - // window (or to a point before the system's notion of the - // subscription - // creation time), only retained messages will be marked as - // unacknowledged, - // and already-expunged messages will not be restored. + // Time: The time to seek to. Messages retained in the subscription that + // were published before this time are marked as acknowledged, and + // messages retained in the subscription that were published after this + // time are marked as unacknowledged. Note that this operation affects + // only those messages retained in the subscription (configured by the + // combination of `message_retention_duration` and + // `retain_acked_messages`). For example, if `time` corresponds to a + // point before the message retention window (or to a point before the + // system's notion of the subscription creation time), only retained + // messages will be marked as unacknowledged, and already-expunged + // messages will not be restored. Time string `json:"time,omitempty"` // ForceSendFields is a list of field names (e.g. "Snapshot") to @@ -1553,11 +1359,9 @@ type SeekResponse struct { // SetIamPolicyRequest: Request message for `SetIamPolicy` method. type SetIamPolicyRequest struct { // Policy: REQUIRED: The complete policy to be applied to the - // `resource`. The size of - // the policy is limited to a few 10s of KB. An empty policy is a - // valid policy but certain Cloud Platform services (such as - // Projects) - // might reject them. + // `resource`. The size of the policy is limited to a few 10s of KB. An + // empty policy is a valid policy but certain Cloud Platform services + // (such as Projects) might reject them. Policy *Policy `json:"policy,omitempty"` // ForceSendFields is a list of field names (e.g. "Policy") to @@ -1584,39 +1388,26 @@ func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { } // Snapshot: A snapshot resource. Snapshots are used in -// Seek -// o -// perations, which allow -// you to manage message acknowledgments in bulk. That is, you can set -// the -// acknowledgment state of messages in an existing subscription to the -// state -// captured by a snapshot. +// [Seek](https://cloud.google.com/pubsub/docs/replay-overview) +// operations, which allow you to manage message acknowledgments in +// bulk. That is, you can set the acknowledgment state of messages in an +// existing subscription to the state captured by a snapshot. type Snapshot struct { - // ExpireTime: The snapshot is guaranteed to exist up until this time. - // A newly-created snapshot expires no later than 7 days from the time - // of its - // creation. Its exact lifetime is determined at creation by the - // existing - // backlog in the source subscription. Specifically, the lifetime of - // the - // snapshot is `7 days - (age of oldest unacked message in the - // subscription)`. - // For example, consider a subscription whose oldest unacked message is - // 3 days - // old. If a snapshot is created from this subscription, the snapshot -- - // which - // will always capture this 3-day-old backlog as long as the - // snapshot - // exists -- will expire in 4 days. The service will refuse to create - // a - // snapshot that would expire in less than 1 hour after creation. + // ExpireTime: The snapshot is guaranteed to exist up until this time. A + // newly-created snapshot expires no later than 7 days from the time of + // its creation. Its exact lifetime is determined at creation by the + // existing backlog in the source subscription. Specifically, the + // lifetime of the snapshot is `7 days - (age of oldest unacked message + // in the subscription)`. For example, consider a subscription whose + // oldest unacked message is 3 days old. If a snapshot is created from + // this subscription, the snapshot -- which will always capture this + // 3-day-old backlog as long as the snapshot exists -- will expire in 4 + // days. The service will refuse to create a snapshot that would expire + // in less than 1 hour after creation. ExpireTime string `json:"expireTime,omitempty"` - // Labels: See - // Creating and - // managing labels. + // Labels: See [Creating and managing labels] + // (https://cloud.google.com/pubsub/docs/labels). Labels map[string]string `json:"labels,omitempty"` // Name: The name of the snapshot. @@ -1656,137 +1447,104 @@ func (s *Snapshot) MarshalJSON() ([]byte, error) { // Subscription: A subscription resource. type Subscription struct { // AckDeadlineSeconds: The approximate amount of time (on a best-effort - // basis) Pub/Sub waits for - // the subscriber to acknowledge receipt before resending the message. - // In the - // interval after the message is delivered and before it is - // acknowledged, it - // is considered to be outstanding. During that time period, - // the - // message will not be redelivered (on a best-effort basis). - // - // For pull subscriptions, this value is used as the initial value for - // the ack - // deadline. To override this value for a given message, - // call - // `ModifyAckDeadline` with the corresponding `ack_id` if - // using - // non-streaming pull or send the `ack_id` in - // a - // `StreamingModifyAckDeadlineRequest` if using streaming pull. - // The minimum custom deadline you can specify is 10 seconds. - // The maximum custom deadline you can specify is 600 seconds (10 - // minutes). - // If this parameter is 0, a default value of 10 seconds is used. - // - // For push delivery, this value is also used to set the request timeout - // for - // the call to the push endpoint. - // - // If the subscriber never acknowledges the message, the Pub/Sub - // system will eventually redeliver the message. + // basis) Pub/Sub waits for the subscriber to acknowledge receipt before + // resending the message. In the interval after the message is delivered + // and before it is acknowledged, it is considered to be *outstanding*. + // During that time period, the message will not be redelivered (on a + // best-effort basis). For pull subscriptions, this value is used as the + // initial value for the ack deadline. To override this value for a + // given message, call `ModifyAckDeadline` with the corresponding + // `ack_id` if using non-streaming pull or send the `ack_id` in a + // `StreamingModifyAckDeadlineRequest` if using streaming pull. The + // minimum custom deadline you can specify is 10 seconds. The maximum + // custom deadline you can specify is 600 seconds (10 minutes). If this + // parameter is 0, a default value of 10 seconds is used. For push + // delivery, this value is also used to set the request timeout for the + // call to the push endpoint. If the subscriber never acknowledges the + // message, the Pub/Sub system will eventually redeliver the message. AckDeadlineSeconds int64 `json:"ackDeadlineSeconds,omitempty"` // DeadLetterPolicy: A policy that specifies the conditions for dead - // lettering messages in - // this subscription. If dead_letter_policy is not set, dead - // lettering - // is disabled. - // - // The Cloud Pub/Sub service account associated with this - // subscriptions's - // parent project - // (i.e., - // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) - // must have - // permission to Acknowledge() messages on this subscription. + // lettering messages in this subscription. If dead_letter_policy is not + // set, dead lettering is disabled. The Cloud Pub/Sub service account + // associated with this subscriptions's parent project (i.e., + // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must + // have permission to Acknowledge() messages on this subscription. DeadLetterPolicy *DeadLetterPolicy `json:"deadLetterPolicy,omitempty"` + // Detached: Indicates whether the subscription is detached from its + // topic. Detached subscriptions don't receive messages from their topic + // and don't retain any backlog. `Pull` and `StreamingPull` requests + // will return FAILED_PRECONDITION. If the subscription is a push + // subscription, pushes to the endpoint will not be made. + Detached bool `json:"detached,omitempty"` + + // EnableMessageOrdering: If true, messages published with the same + // `ordering_key` in `PubsubMessage` will be delivered to the + // subscribers in the order in which they are received by the Pub/Sub + // system. Otherwise, they may be delivered in any order. + EnableMessageOrdering bool `json:"enableMessageOrdering,omitempty"` + // ExpirationPolicy: A policy that specifies the conditions for this - // subscription's expiration. - // A subscription is considered active as long as any connected - // subscriber is - // successfully consuming messages from the subscription or is - // issuing - // operations on the subscription. If `expiration_policy` is not set, - // a - // *default policy* with `ttl` of 31 days will be used. The minimum - // allowed - // value for `expiration_policy.ttl` is 1 day. + // subscription's expiration. A subscription is considered active as + // long as any connected subscriber is successfully consuming messages + // from the subscription or is issuing operations on the subscription. + // If `expiration_policy` is not set, a *default policy* with `ttl` of + // 31 days will be used. The minimum allowed value for + // `expiration_policy.ttl` is 1 day. ExpirationPolicy *ExpirationPolicy `json:"expirationPolicy,omitempty"` - // Filter: An expression written in the Cloud Pub/Sub filter language. - // If non-empty, - // then only `PubsubMessage`s whose `attributes` field matches the - // filter are - // delivered on this subscription. If empty, then no messages are - // filtered - // out. - // EXPERIMENTAL: This feature is part of a closed alpha release. - // This - // API might be changed in backward-incompatible ways and is not - // recommended - // for production use. It is not subject to any SLA or deprecation - // policy. + // Filter: An expression written in the Pub/Sub [filter + // language](https://cloud.google.com/pubsub/docs/filtering). If + // non-empty, then only `PubsubMessage`s whose `attributes` field + // matches the filter are delivered on this subscription. If empty, then + // no messages are filtered out. Filter string `json:"filter,omitempty"` - // Labels: See - // Creating and - // managing labels. + // Labels: See Creating and managing labels. Labels map[string]string `json:"labels,omitempty"` // MessageRetentionDuration: How long to retain unacknowledged messages - // in the subscription's backlog, - // from the moment a message is published. - // If `retain_acked_messages` is true, then this also configures the - // retention - // of acknowledged messages, and thus configures how far back in time a - // `Seek` - // can be done. Defaults to 7 days. Cannot be more than 7 days or less - // than 10 - // minutes. + // in the subscription's backlog, from the moment a message is + // published. If `retain_acked_messages` is true, then this also + // configures the retention of acknowledged messages, and thus + // configures how far back in time a `Seek` can be done. Defaults to 7 + // days. Cannot be more than 7 days or less than 10 minutes. MessageRetentionDuration string `json:"messageRetentionDuration,omitempty"` - // Name: Required. The name of the subscription. It must have the - // format + // Name: Required. The name of the subscription. It must have the format // "projects/{project}/subscriptions/{subscription}". `{subscription}` - // must - // start with a letter, and contain only letters (`[A-Za-z]`), - // numbers - // (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes - // (`~`), - // plus (`+`) or percent signs (`%`). It must be between 3 and 255 - // characters - // in length, and it must not start with "goog". + // must start with a letter, and contain only letters (`[A-Za-z]`), + // numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), + // tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 + // and 255 characters in length, and it must not start with "goog". Name string `json:"name,omitempty"` // PushConfig: If push delivery is used with this subscription, this - // field is - // used to configure it. An empty `pushConfig` signifies that the - // subscriber - // will pull and ack messages using API methods. + // field is used to configure it. An empty `pushConfig` signifies that + // the subscriber will pull and ack messages using API methods. PushConfig *PushConfig `json:"pushConfig,omitempty"` // RetainAckedMessages: Indicates whether to retain acknowledged - // messages. If true, then - // messages are not expunged from the subscription's backlog, even if - // they are - // acknowledged, until they fall out of the - // `message_retention_duration` - // window. This must be true if you would like - // to - // - // Seek to a timestamp. + // messages. If true, then messages are not expunged from the + // subscription's backlog, even if they are acknowledged, until they + // fall out of the `message_retention_duration` window. This must be + // true if you would like to [Seek to a timestamp] + // (https://cloud.google.com/pubsub/docs/replay-overview#seek_to_a_time). RetainAckedMessages bool `json:"retainAckedMessages,omitempty"` + // RetryPolicy: A policy that specifies how Pub/Sub retries message + // delivery for this subscription. If not set, the default retry policy + // is applied. This generally implies that messages will be retried as + // soon as possible for healthy subscribers. RetryPolicy will be + // triggered on NACKs or acknowledgement deadline exceeded events for a + // given message. + RetryPolicy *RetryPolicy `json:"retryPolicy,omitempty"` + // Topic: Required. The name of the topic from which this subscription - // is receiving messages. - // Format is `projects/{project}/topics/{topic}`. + // is receiving messages. Format is `projects/{project}/topics/{topic}`. // The value of this field will be `_deleted-topic_` if the topic has - // been - // deleted. + // been deleted. Topic string `json:"topic,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1821,11 +1579,8 @@ func (s *Subscription) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsRequest struct { // Permissions: The set of permissions to check for the `resource`. - // Permissions with - // wildcards (such as '*' or 'storage.*') are not allowed. For - // more - // information see - // [IAM + // Permissions with wildcards (such as '*' or 'storage.*') are not + // allowed. For more information see [IAM // Overview](https://cloud.google.com/iam/docs/overview#permissions). Permissions []string `json:"permissions,omitempty"` @@ -1856,8 +1611,7 @@ func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsResponse struct { // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is - // allowed. + // the caller is allowed. Permissions []string `json:"permissions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1890,36 +1644,25 @@ func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { // Topic: A topic resource. type Topic struct { // KmsKeyName: The resource name of the Cloud KMS CryptoKey to be used - // to protect access - // to messages published on this topic. - // - // The expected format is - // `projects/*/locations/*/keyRings/*/cryptoKeys/*`. + // to protect access to messages published on this topic. The expected + // format is `projects/*/locations/*/keyRings/*/cryptoKeys/*`. KmsKeyName string `json:"kmsKeyName,omitempty"` - // Labels: See - // Creating and - // managing labels. + // Labels: See [Creating and managing labels] + // (https://cloud.google.com/pubsub/docs/labels). Labels map[string]string `json:"labels,omitempty"` // MessageStoragePolicy: Policy constraining the set of Google Cloud - // Platform regions where messages - // published to the topic may be stored. If not present, then no - // constraints - // are in effect. + // Platform regions where messages published to the topic may be stored. + // If not present, then no constraints are in effect. MessageStoragePolicy *MessageStoragePolicy `json:"messageStoragePolicy,omitempty"` - // Name: Required. The name of the topic. It must have the - // format + // Name: Required. The name of the topic. It must have the format // "projects/{project}/topics/{topic}". `{topic}` must start with a - // letter, - // and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes - // (`-`), - // underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or - // percent - // signs (`%`). It must be between 3 and 255 characters in length, and - // it - // must not start with "goog". + // letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), + // dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus + // (`+`) or percent signs (`%`). It must be between 3 and 255 characters + // in length, and it must not start with "goog". Name string `json:"name,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1955,8 +1698,7 @@ type UpdateSnapshotRequest struct { Snapshot *Snapshot `json:"snapshot,omitempty"` // UpdateMask: Required. Indicates which fields in the provided snapshot - // to update. - // Must be specified and non-empty. + // to update. Must be specified and non-empty. UpdateMask string `json:"updateMask,omitempty"` // ForceSendFields is a list of field names (e.g. "Snapshot") to @@ -1988,8 +1730,7 @@ type UpdateSubscriptionRequest struct { Subscription *Subscription `json:"subscription,omitempty"` // UpdateMask: Required. Indicates which fields in the provided - // subscription to update. - // Must be specified and non-empty. + // subscription to update. Must be specified and non-empty. UpdateMask string `json:"updateMask,omitempty"` // ForceSendFields is a list of field names (e.g. "Subscription") to @@ -2021,14 +1762,11 @@ type UpdateTopicRequest struct { Topic *Topic `json:"topic,omitempty"` // UpdateMask: Required. Indicates which fields in the provided topic to - // update. Must be specified - // and non-empty. Note that if `update_mask` - // contains - // "message_storage_policy" but the `message_storage_policy` is not set - // in - // the `topic` provided above, then the updated value is determined by - // the - // policy configured at the project or organization level. + // update. Must be specified and non-empty. Note that if `update_mask` + // contains "message_storage_policy" but the `message_storage_policy` is + // not set in the `topic` provided above, then the updated value is + // determined by the policy configured at the project or organization + // level. UpdateMask string `json:"updateMask,omitempty"` // ForceSendFields is a list of field names (e.g. "Topic") to @@ -2067,34 +1805,21 @@ type ProjectsSnapshotsCreateCall struct { // Create: Creates a snapshot from the requested subscription. Snapshots // are used in -// Seek -// o -// perations, which allow -// you to manage message acknowledgments in bulk. That is, you can set -// the -// acknowledgment state of messages in an existing subscription to the -// state -// captured by a snapshot. -//

If the snapshot already exists, returns `ALREADY_EXISTS`. -// If the requested subscription doesn't exist, returns `NOT_FOUND`. -// If the backlog in the subscription is too old -- and the resulting -// snapshot -// would expire in less than 1 hour -- then `FAILED_PRECONDITION` is -// returned. +// [Seek](https://cloud.google.com/pubsub/docs/replay-overview) +// operations, which allow you to manage message acknowledgments in +// bulk. That is, you can set the acknowledgment state of messages in an +// existing subscription to the state captured by a snapshot. If the +// snapshot already exists, returns `ALREADY_EXISTS`. If the requested +// subscription doesn't exist, returns `NOT_FOUND`. If the backlog in +// the subscription is too old -- and the resulting snapshot would +// expire in less than 1 hour -- then `FAILED_PRECONDITION` is returned. // See also the `Snapshot.expire_time` field. If the name is not -// provided in -// the request, the server will assign a random -// name for this snapshot on the same project as the subscription, -// conforming -// to the -// [resource -// name -// format](https://cloud.google.com/pubsub/docs/admin#resource_names -// ). The +// provided in the request, the server will assign a random name for +// this snapshot on the same project as the subscription, conforming to +// the [resource name format] +// (https://cloud.google.com/pubsub/docs/admin#resource_names). The // generated name is populated in the returned Snapshot object. Note -// that for -// REST API requests, you must specify a name in the request. +// that for REST API requests, you must specify a name in the request. func (r *ProjectsSnapshotsService) Create(name string, createsnapshotrequest *CreateSnapshotRequest) *ProjectsSnapshotsCreateCall { c := &ProjectsSnapshotsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -2129,7 +1854,7 @@ func (c *ProjectsSnapshotsCreateCall) Header() http.Header { func (c *ProjectsSnapshotsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2193,7 +1918,7 @@ func (c *ProjectsSnapshotsCreateCall) Do(opts ...googleapi.CallOption) (*Snapsho } return ret, nil // { - // "description": "Creates a snapshot from the requested subscription. Snapshots are used in\n\u003ca href=\"https://cloud.google.com/pubsub/docs/replay-overview\"\u003eSeek\u003c/a\u003e\noperations, which allow\nyou to manage message acknowledgments in bulk. That is, you can set the\nacknowledgment state of messages in an existing subscription to the state\ncaptured by a snapshot.\n\u003cbr\u003e\u003cbr\u003eIf the snapshot already exists, returns `ALREADY_EXISTS`.\nIf the requested subscription doesn't exist, returns `NOT_FOUND`.\nIf the backlog in the subscription is too old -- and the resulting snapshot\nwould expire in less than 1 hour -- then `FAILED_PRECONDITION` is returned.\nSee also the `Snapshot.expire_time` field. If the name is not provided in\nthe request, the server will assign a random\nname for this snapshot on the same project as the subscription, conforming\nto the\n[resource name\nformat](https://cloud.google.com/pubsub/docs/admin#resource_names). The\ngenerated name is populated in the returned Snapshot object. Note that for\nREST API requests, you must specify a name in the request.", + // "description": "Creates a snapshot from the requested subscription. Snapshots are used in [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, which allow you to manage message acknowledgments in bulk. That is, you can set the acknowledgment state of messages in an existing subscription to the state captured by a snapshot. If the snapshot already exists, returns `ALREADY_EXISTS`. If the requested subscription doesn't exist, returns `NOT_FOUND`. If the backlog in the subscription is too old -- and the resulting snapshot would expire in less than 1 hour -- then `FAILED_PRECONDITION` is returned. See also the `Snapshot.expire_time` field. If the name is not provided in the request, the server will assign a random name for this snapshot on the same project as the subscription, conforming to the [resource name format] (https://cloud.google.com/pubsub/docs/admin#resource_names). The generated name is populated in the returned Snapshot object. Note that for REST API requests, you must specify a name in the request.", // "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}", // "httpMethod": "PUT", // "id": "pubsub.projects.snapshots.create", @@ -2202,7 +1927,7 @@ func (c *ProjectsSnapshotsCreateCall) Do(opts ...googleapi.CallOption) (*Snapsho // ], // "parameters": { // "name": { - // "description": "Required. User-provided name for this snapshot. If the name is not provided in the\nrequest, the server will assign a random name for this snapshot on the same\nproject as the subscription. Note that for REST API requests, you must\nspecify a name. See the \u003ca\nhref=\"https://cloud.google.com/pubsub/docs/admin#resource_names\"\u003e resource\nname rules\u003c/a\u003e. Format is `projects/{project}/snapshots/{snap}`.", + // "description": "Required. User-provided name for this snapshot. If the name is not provided in the request, the server will assign a random name for this snapshot on the same project as the subscription. Note that for REST API requests, you must specify a name. See the resource name rules. Format is `projects/{project}/snapshots/{snap}`.", // "location": "path", // "pattern": "^projects/[^/]+/snapshots/[^/]+$", // "required": true, @@ -2234,22 +1959,14 @@ type ProjectsSnapshotsDeleteCall struct { header_ http.Header } -// Delete: Removes an existing snapshot. Snapshots are used in -// Seek -// o -// perations, which allow -// you to manage message acknowledgments in bulk. That is, you can set -// the -// acknowledgment state of messages in an existing subscription to the -// state -// captured by a snapshot.

-// When the snapshot is deleted, all messages retained in the -// snapshot -// are immediately dropped. After a snapshot is deleted, a new one may -// be -// created with the same name, but the new one has no association with -// the old +// Delete: Removes an existing snapshot. Snapshots are used in [Seek] +// (https://cloud.google.com/pubsub/docs/replay-overview) operations, +// which allow you to manage message acknowledgments in bulk. That is, +// you can set the acknowledgment state of messages in an existing +// subscription to the state captured by a snapshot. When the snapshot +// is deleted, all messages retained in the snapshot are immediately +// dropped. After a snapshot is deleted, a new one may be created with +// the same name, but the new one has no association with the old // snapshot or its subscription, unless the same subscription is // specified. func (r *ProjectsSnapshotsService) Delete(snapshot string) *ProjectsSnapshotsDeleteCall { @@ -2285,7 +2002,7 @@ func (c *ProjectsSnapshotsDeleteCall) Header() http.Header { func (c *ProjectsSnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2344,7 +2061,7 @@ func (c *ProjectsSnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, } return ret, nil // { - // "description": "Removes an existing snapshot. Snapshots are used in\n\u003ca href=\"https://cloud.google.com/pubsub/docs/replay-overview\"\u003eSeek\u003c/a\u003e\noperations, which allow\nyou to manage message acknowledgments in bulk. That is, you can set the\nacknowledgment state of messages in an existing subscription to the state\ncaptured by a snapshot.\u003cbr\u003e\u003cbr\u003e\nWhen the snapshot is deleted, all messages retained in the snapshot\nare immediately dropped. After a snapshot is deleted, a new one may be\ncreated with the same name, but the new one has no association with the old\nsnapshot or its subscription, unless the same subscription is specified.", + // "description": "Removes an existing snapshot. Snapshots are used in [Seek] (https://cloud.google.com/pubsub/docs/replay-overview) operations, which allow you to manage message acknowledgments in bulk. That is, you can set the acknowledgment state of messages in an existing subscription to the state captured by a snapshot. When the snapshot is deleted, all messages retained in the snapshot are immediately dropped. After a snapshot is deleted, a new one may be created with the same name, but the new one has no association with the old snapshot or its subscription, unless the same subscription is specified.", // "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}", // "httpMethod": "DELETE", // "id": "pubsub.projects.snapshots.delete", @@ -2353,7 +2070,7 @@ func (c *ProjectsSnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, // ], // "parameters": { // "snapshot": { - // "description": "Required. The name of the snapshot to delete.\nFormat is `projects/{project}/snapshots/{snap}`.", + // "description": "Required. The name of the snapshot to delete. Format is `projects/{project}/snapshots/{snap}`.", // "location": "path", // "pattern": "^projects/[^/]+/snapshots/[^/]+$", // "required": true, @@ -2384,15 +2101,9 @@ type ProjectsSnapshotsGetCall struct { } // Get: Gets the configuration details of a snapshot. Snapshots are used -// in -// Seek -// o -// perations, which allow you to manage message acknowledgments in bulk. -// That -// is, you can set the acknowledgment state of messages in an -// existing -// subscription to the state captured by a snapshot. +// in Seek operations, which allow you to manage message acknowledgments +// in bulk. That is, you can set the acknowledgment state of messages in +// an existing subscription to the state captured by a snapshot. func (r *ProjectsSnapshotsService) Get(snapshot string) *ProjectsSnapshotsGetCall { c := &ProjectsSnapshotsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.snapshot = snapshot @@ -2436,7 +2147,7 @@ func (c *ProjectsSnapshotsGetCall) Header() http.Header { func (c *ProjectsSnapshotsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2498,7 +2209,7 @@ func (c *ProjectsSnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, } return ret, nil // { - // "description": "Gets the configuration details of a snapshot. Snapshots are used in\n\u003ca href=\"https://cloud.google.com/pubsub/docs/replay-overview\"\u003eSeek\u003c/a\u003e\noperations, which allow you to manage message acknowledgments in bulk. That\nis, you can set the acknowledgment state of messages in an existing\nsubscription to the state captured by a snapshot.", + // "description": "Gets the configuration details of a snapshot. Snapshots are used in Seek operations, which allow you to manage message acknowledgments in bulk. That is, you can set the acknowledgment state of messages in an existing subscription to the state captured by a snapshot.", // "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}", // "httpMethod": "GET", // "id": "pubsub.projects.snapshots.get", @@ -2507,7 +2218,7 @@ func (c *ProjectsSnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, // ], // "parameters": { // "snapshot": { - // "description": "Required. The name of the snapshot to get.\nFormat is `projects/{project}/snapshots/{snap}`.", + // "description": "Required. The name of the snapshot to get. Format is `projects/{project}/snapshots/{snap}`.", // "location": "path", // "pattern": "^projects/[^/]+/snapshots/[^/]+$", // "required": true, @@ -2537,9 +2248,8 @@ type ProjectsSnapshotsGetIamPolicyCall struct { header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. -// Returns an empty policy if the resource exists and does not have a -// policy +// GetIamPolicy: Gets the access control policy for a resource. Returns +// an empty policy if the resource exists and does not have a policy // set. func (r *ProjectsSnapshotsService) GetIamPolicy(resource string) *ProjectsSnapshotsGetIamPolicyCall { c := &ProjectsSnapshotsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -2549,24 +2259,14 @@ func (r *ProjectsSnapshotsService) GetIamPolicy(resource string) *ProjectsSnapsh // OptionsRequestedPolicyVersion sets the optional parameter // "options.requestedPolicyVersion": The policy format version to be -// returned. -// -// Valid values are 0, 1, and 3. Requests specifying an invalid value -// will be -// rejected. -// -// Requests for policies with any conditional bindings must specify -// version 3. -// Policies without any conditional bindings may specify any valid value -// or -// leave the field unset. -// -// To learn which resources support conditions in their IAM policies, -// see -// the -// [IAM -// documentation](https://cloud.google.com/iam/help/conditions/r -// esource-policies). +// returned. Valid values are 0, 1, and 3. Requests specifying an +// invalid value will be rejected. Requests for policies with any +// conditional bindings must specify version 3. Policies without any +// conditional bindings may specify any valid value or leave the field +// unset. To learn which resources support conditions in their IAM +// policies, see the [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-p +// olicies). func (c *ProjectsSnapshotsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsSnapshotsGetIamPolicyCall { c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c @@ -2609,7 +2309,7 @@ func (c *ProjectsSnapshotsGetIamPolicyCall) Header() http.Header { func (c *ProjectsSnapshotsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2671,7 +2371,7 @@ func (c *ProjectsSnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P } return ret, nil // { - // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + // "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", // "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}:getIamPolicy", // "httpMethod": "GET", // "id": "pubsub.projects.snapshots.getIamPolicy", @@ -2680,13 +2380,13 @@ func (c *ProjectsSnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P // ], // "parameters": { // "options.requestedPolicyVersion": { - // "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + // "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", // "format": "int32", // "location": "query", // "type": "integer" // }, // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/snapshots/[^/]+$", // "required": true, @@ -2716,16 +2416,11 @@ type ProjectsSnapshotsListCall struct { header_ http.Header } -// List: Lists the existing snapshots. Snapshots are used in -// Seek -// o -// perations, which allow -// you to manage message acknowledgments in bulk. That is, you can set -// the -// acknowledgment state of messages in an existing subscription to the -// state -// captured by a snapshot. +// List: Lists the existing snapshots. Snapshots are used in [Seek]( +// https://cloud.google.com/pubsub/docs/replay-overview) operations, +// which allow you to manage message acknowledgments in bulk. That is, +// you can set the acknowledgment state of messages in an existing +// subscription to the state captured by a snapshot. func (r *ProjectsSnapshotsService) List(project string) *ProjectsSnapshotsListCall { c := &ProjectsSnapshotsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -2740,9 +2435,8 @@ func (c *ProjectsSnapshotsListCall) PageSize(pageSize int64) *ProjectsSnapshotsL } // PageToken sets the optional parameter "pageToken": The value returned -// by the last `ListSnapshotsResponse`; indicates that this -// is a continuation of a prior `ListSnapshots` call, and that the -// system +// by the last `ListSnapshotsResponse`; indicates that this is a +// continuation of a prior `ListSnapshots` call, and that the system // should return the next page of data. func (c *ProjectsSnapshotsListCall) PageToken(pageToken string) *ProjectsSnapshotsListCall { c.urlParams_.Set("pageToken", pageToken) @@ -2786,7 +2480,7 @@ func (c *ProjectsSnapshotsListCall) Header() http.Header { func (c *ProjectsSnapshotsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2848,7 +2542,7 @@ func (c *ProjectsSnapshotsListCall) Do(opts ...googleapi.CallOption) (*ListSnaps } return ret, nil // { - // "description": "Lists the existing snapshots. Snapshots are used in\n\u003ca href=\"https://cloud.google.com/pubsub/docs/replay-overview\"\u003eSeek\u003c/a\u003e\noperations, which allow\nyou to manage message acknowledgments in bulk. That is, you can set the\nacknowledgment state of messages in an existing subscription to the state\ncaptured by a snapshot.", + // "description": "Lists the existing snapshots. Snapshots are used in [Seek]( https://cloud.google.com/pubsub/docs/replay-overview) operations, which allow you to manage message acknowledgments in bulk. That is, you can set the acknowledgment state of messages in an existing subscription to the state captured by a snapshot.", // "flatPath": "v1/projects/{projectsId}/snapshots", // "httpMethod": "GET", // "id": "pubsub.projects.snapshots.list", @@ -2863,12 +2557,12 @@ func (c *ProjectsSnapshotsListCall) Do(opts ...googleapi.CallOption) (*ListSnaps // "type": "integer" // }, // "pageToken": { - // "description": "The value returned by the last `ListSnapshotsResponse`; indicates that this\nis a continuation of a prior `ListSnapshots` call, and that the system\nshould return the next page of data.", + // "description": "The value returned by the last `ListSnapshotsResponse`; indicates that this is a continuation of a prior `ListSnapshots` call, and that the system should return the next page of data.", // "location": "query", // "type": "string" // }, // "project": { - // "description": "Required. The name of the project in which to list snapshots.\nFormat is `projects/{project-id}`.", + // "description": "Required. The name of the project in which to list snapshots. Format is `projects/{project-id}`.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -2919,16 +2613,10 @@ type ProjectsSnapshotsPatchCall struct { header_ http.Header } -// Patch: Updates an existing snapshot. Snapshots are used in -// Seek -// o -// perations, which allow -// you to manage message acknowledgments in bulk. That is, you can set -// the -// acknowledgment state of messages in an existing subscription to the -// state -// captured by a snapshot. +// Patch: Updates an existing snapshot. Snapshots are used in Seek +// operations, which allow you to manage message acknowledgments in +// bulk. That is, you can set the acknowledgment state of messages in an +// existing subscription to the state captured by a snapshot. func (r *ProjectsSnapshotsService) Patch(name string, updatesnapshotrequest *UpdateSnapshotRequest) *ProjectsSnapshotsPatchCall { c := &ProjectsSnapshotsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -2963,7 +2651,7 @@ func (c *ProjectsSnapshotsPatchCall) Header() http.Header { func (c *ProjectsSnapshotsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3027,7 +2715,7 @@ func (c *ProjectsSnapshotsPatchCall) Do(opts ...googleapi.CallOption) (*Snapshot } return ret, nil // { - // "description": "Updates an existing snapshot. Snapshots are used in\n\u003ca href=\"https://cloud.google.com/pubsub/docs/replay-overview\"\u003eSeek\u003c/a\u003e\noperations, which allow\nyou to manage message acknowledgments in bulk. That is, you can set the\nacknowledgment state of messages in an existing subscription to the state\ncaptured by a snapshot.", + // "description": "Updates an existing snapshot. Snapshots are used in Seek operations, which allow you to manage message acknowledgments in bulk. That is, you can set the acknowledgment state of messages in an existing subscription to the state captured by a snapshot.", // "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}", // "httpMethod": "PATCH", // "id": "pubsub.projects.snapshots.patch", @@ -3070,11 +2758,8 @@ type ProjectsSnapshotsSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any -// existing policy. -// -// Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` -// errors. +// resource. Replaces any existing policy. Can return `NOT_FOUND`, +// `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. func (r *ProjectsSnapshotsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsSnapshotsSetIamPolicyCall { c := &ProjectsSnapshotsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -3109,7 +2794,7 @@ func (c *ProjectsSnapshotsSetIamPolicyCall) Header() http.Header { func (c *ProjectsSnapshotsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3173,7 +2858,7 @@ func (c *ProjectsSnapshotsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", // "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}:setIamPolicy", // "httpMethod": "POST", // "id": "pubsub.projects.snapshots.setIamPolicy", @@ -3182,7 +2867,7 @@ func (c *ProjectsSnapshotsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/snapshots/[^/]+$", // "required": true, @@ -3216,16 +2901,11 @@ type ProjectsSnapshotsTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a `NOT_FOUND` error. -// -// Note: This operation is designed to be used for building -// permission-aware -// UIs and command-line tools, not for authorization checking. This -// operation -// may "fail open" without warning. +// specified resource. If the resource does not exist, this will return +// an empty set of permissions, not a `NOT_FOUND` error. Note: This +// operation is designed to be used for building permission-aware UIs +// and command-line tools, not for authorization checking. This +// operation may "fail open" without warning. func (r *ProjectsSnapshotsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsSnapshotsTestIamPermissionsCall { c := &ProjectsSnapshotsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -3260,7 +2940,7 @@ func (c *ProjectsSnapshotsTestIamPermissionsCall) Header() http.Header { func (c *ProjectsSnapshotsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3324,7 +3004,7 @@ func (c *ProjectsSnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + // "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", // "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}:testIamPermissions", // "httpMethod": "POST", // "id": "pubsub.projects.snapshots.testIamPermissions", @@ -3333,7 +3013,7 @@ func (c *ProjectsSnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/snapshots/[^/]+$", // "required": true, @@ -3367,16 +3047,11 @@ type ProjectsSubscriptionsAcknowledgeCall struct { } // Acknowledge: Acknowledges the messages associated with the `ack_ids` -// in the -// `AcknowledgeRequest`. The Pub/Sub system can remove the relevant -// messages -// from the subscription. -// -// Acknowledging a message whose ack deadline has expired may -// succeed, -// but such a message may be redelivered later. Acknowledging a message -// more -// than once will not result in an error. +// in the `AcknowledgeRequest`. The Pub/Sub system can remove the +// relevant messages from the subscription. Acknowledging a message +// whose ack deadline has expired may succeed, but such a message may be +// redelivered later. Acknowledging a message more than once will not +// result in an error. func (r *ProjectsSubscriptionsService) Acknowledge(subscription string, acknowledgerequest *AcknowledgeRequest) *ProjectsSubscriptionsAcknowledgeCall { c := &ProjectsSubscriptionsAcknowledgeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.subscription = subscription @@ -3411,7 +3086,7 @@ func (c *ProjectsSubscriptionsAcknowledgeCall) Header() http.Header { func (c *ProjectsSubscriptionsAcknowledgeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3475,7 +3150,7 @@ func (c *ProjectsSubscriptionsAcknowledgeCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Acknowledges the messages associated with the `ack_ids` in the\n`AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages\nfrom the subscription.\n\nAcknowledging a message whose ack deadline has expired may succeed,\nbut such a message may be redelivered later. Acknowledging a message more\nthan once will not result in an error.", + // "description": "Acknowledges the messages associated with the `ack_ids` in the `AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages from the subscription. Acknowledging a message whose ack deadline has expired may succeed, but such a message may be redelivered later. Acknowledging a message more than once will not result in an error.", // "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:acknowledge", // "httpMethod": "POST", // "id": "pubsub.projects.subscriptions.acknowledge", @@ -3484,7 +3159,7 @@ func (c *ProjectsSubscriptionsAcknowledgeCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "subscription": { - // "description": "Required. The subscription whose message is being acknowledged.\nFormat is `projects/{project}/subscriptions/{sub}`.", + // "description": "Required. The subscription whose message is being acknowledged. Format is `projects/{project}/subscriptions/{sub}`.", // "location": "path", // "pattern": "^projects/[^/]+/subscriptions/[^/]+$", // "required": true, @@ -3517,26 +3192,17 @@ type ProjectsSubscriptionsCreateCall struct { header_ http.Header } -// Create: Creates a subscription to a given topic. See the -// -// reso -// urce name rules. -// If the subscription already exists, returns `ALREADY_EXISTS`. -// If the corresponding topic doesn't exist, returns `NOT_FOUND`. -// -// If the name is not provided in the request, the server will assign a -// random -// name for this subscription on the same project as the topic, -// conforming -// to the -// [resource -// name -// format](https://cloud.google.com/pubsub/docs/admin#resource_names -// ). The +// Create: Creates a subscription to a given topic. See the [resource +// name rules] +// (https://cloud.google.com/pubsub/docs/admin#resource_names). If the +// subscription already exists, returns `ALREADY_EXISTS`. If the +// corresponding topic doesn't exist, returns `NOT_FOUND`. If the name +// is not provided in the request, the server will assign a random name +// for this subscription on the same project as the topic, conforming to +// the [resource name format] +// (https://cloud.google.com/pubsub/docs/admin#resource_names). The // generated name is populated in the returned Subscription object. Note -// that -// for REST API requests, you must specify a name in the request. +// that for REST API requests, you must specify a name in the request. func (r *ProjectsSubscriptionsService) Create(name string, subscription *Subscription) *ProjectsSubscriptionsCreateCall { c := &ProjectsSubscriptionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -3571,7 +3237,7 @@ func (c *ProjectsSubscriptionsCreateCall) Header() http.Header { func (c *ProjectsSubscriptionsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3635,7 +3301,7 @@ func (c *ProjectsSubscriptionsCreateCall) Do(opts ...googleapi.CallOption) (*Sub } return ret, nil // { - // "description": "Creates a subscription to a given topic. See the\n\u003ca href=\"https://cloud.google.com/pubsub/docs/admin#resource_names\"\u003e\nresource name rules\u003c/a\u003e.\nIf the subscription already exists, returns `ALREADY_EXISTS`.\nIf the corresponding topic doesn't exist, returns `NOT_FOUND`.\n\nIf the name is not provided in the request, the server will assign a random\nname for this subscription on the same project as the topic, conforming\nto the\n[resource name\nformat](https://cloud.google.com/pubsub/docs/admin#resource_names). The\ngenerated name is populated in the returned Subscription object. Note that\nfor REST API requests, you must specify a name in the request.", + // "description": "Creates a subscription to a given topic. See the [resource name rules] (https://cloud.google.com/pubsub/docs/admin#resource_names). If the subscription already exists, returns `ALREADY_EXISTS`. If the corresponding topic doesn't exist, returns `NOT_FOUND`. If the name is not provided in the request, the server will assign a random name for this subscription on the same project as the topic, conforming to the [resource name format] (https://cloud.google.com/pubsub/docs/admin#resource_names). The generated name is populated in the returned Subscription object. Note that for REST API requests, you must specify a name in the request.", // "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", // "httpMethod": "PUT", // "id": "pubsub.projects.subscriptions.create", @@ -3644,7 +3310,7 @@ func (c *ProjectsSubscriptionsCreateCall) Do(opts ...googleapi.CallOption) (*Sub // ], // "parameters": { // "name": { - // "description": "Required. The name of the subscription. It must have the format\n`\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must\nstart with a letter, and contain only letters (`[A-Za-z]`), numbers\n(`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),\nplus (`+`) or percent signs (`%`). It must be between 3 and 255 characters\nin length, and it must not start with `\"goog\"`.", + // "description": "Required. The name of the subscription. It must have the format `\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in length, and it must not start with `\"goog\"`.", // "location": "path", // "pattern": "^projects/[^/]+/subscriptions/[^/]+$", // "required": true, @@ -3677,14 +3343,11 @@ type ProjectsSubscriptionsDeleteCall struct { } // Delete: Deletes an existing subscription. All messages retained in -// the subscription -// are immediately dropped. Calls to `Pull` after deletion will -// return -// `NOT_FOUND`. After a subscription is deleted, a new one may be -// created with -// the same name, but the new one has no association with the -// old -// subscription or its topic unless the same topic is specified. +// the subscription are immediately dropped. Calls to `Pull` after +// deletion will return `NOT_FOUND`. After a subscription is deleted, a +// new one may be created with the same name, but the new one has no +// association with the old subscription or its topic unless the same +// topic is specified. func (r *ProjectsSubscriptionsService) Delete(subscription string) *ProjectsSubscriptionsDeleteCall { c := &ProjectsSubscriptionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.subscription = subscription @@ -3718,7 +3381,7 @@ func (c *ProjectsSubscriptionsDeleteCall) Header() http.Header { func (c *ProjectsSubscriptionsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3777,7 +3440,7 @@ func (c *ProjectsSubscriptionsDeleteCall) Do(opts ...googleapi.CallOption) (*Emp } return ret, nil // { - // "description": "Deletes an existing subscription. All messages retained in the subscription\nare immediately dropped. Calls to `Pull` after deletion will return\n`NOT_FOUND`. After a subscription is deleted, a new one may be created with\nthe same name, but the new one has no association with the old\nsubscription or its topic unless the same topic is specified.", + // "description": "Deletes an existing subscription. All messages retained in the subscription are immediately dropped. Calls to `Pull` after deletion will return `NOT_FOUND`. After a subscription is deleted, a new one may be created with the same name, but the new one has no association with the old subscription or its topic unless the same topic is specified.", // "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", // "httpMethod": "DELETE", // "id": "pubsub.projects.subscriptions.delete", @@ -3786,7 +3449,7 @@ func (c *ProjectsSubscriptionsDeleteCall) Do(opts ...googleapi.CallOption) (*Emp // ], // "parameters": { // "subscription": { - // "description": "Required. The subscription to delete.\nFormat is `projects/{project}/subscriptions/{sub}`.", + // "description": "Required. The subscription to delete. Format is `projects/{project}/subscriptions/{sub}`.", // "location": "path", // "pattern": "^projects/[^/]+/subscriptions/[^/]+$", // "required": true, @@ -3805,6 +3468,141 @@ func (c *ProjectsSubscriptionsDeleteCall) Do(opts ...googleapi.CallOption) (*Emp } +// method id "pubsub.projects.subscriptions.detach": + +type ProjectsSubscriptionsDetachCall struct { + s *Service + subscription string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Detach: Detaches a subscription from this topic. All messages +// retained in the subscription are dropped. Subsequent `Pull` and +// `StreamingPull` requests will return FAILED_PRECONDITION. If the +// subscription is a push subscription, pushes to the endpoint will +// stop. +func (r *ProjectsSubscriptionsService) Detach(subscription string) *ProjectsSubscriptionsDetachCall { + c := &ProjectsSubscriptionsDetachCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.subscription = subscription + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSubscriptionsDetachCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsDetachCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSubscriptionsDetachCall) Context(ctx context.Context) *ProjectsSubscriptionsDetachCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsSubscriptionsDetachCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsSubscriptionsDetachCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+subscription}:detach") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "subscription": c.subscription, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "pubsub.projects.subscriptions.detach" call. +// Exactly one of *DetachSubscriptionResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *DetachSubscriptionResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsSubscriptionsDetachCall) Do(opts ...googleapi.CallOption) (*DetachSubscriptionResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &DetachSubscriptionResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Detaches a subscription from this topic. All messages retained in the subscription are dropped. Subsequent `Pull` and `StreamingPull` requests will return FAILED_PRECONDITION. If the subscription is a push subscription, pushes to the endpoint will stop.", + // "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:detach", + // "httpMethod": "POST", + // "id": "pubsub.projects.subscriptions.detach", + // "parameterOrder": [ + // "subscription" + // ], + // "parameters": { + // "subscription": { + // "description": "Required. The subscription to detach. Format is `projects/{project}/subscriptions/{subscription}`.", + // "location": "path", + // "pattern": "^projects/[^/]+/subscriptions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+subscription}:detach", + // "response": { + // "$ref": "DetachSubscriptionResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + // method id "pubsub.projects.subscriptions.get": type ProjectsSubscriptionsGetCall struct { @@ -3860,7 +3658,7 @@ func (c *ProjectsSubscriptionsGetCall) Header() http.Header { func (c *ProjectsSubscriptionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3931,7 +3729,7 @@ func (c *ProjectsSubscriptionsGetCall) Do(opts ...googleapi.CallOption) (*Subscr // ], // "parameters": { // "subscription": { - // "description": "Required. The name of the subscription to get.\nFormat is `projects/{project}/subscriptions/{sub}`.", + // "description": "Required. The name of the subscription to get. Format is `projects/{project}/subscriptions/{sub}`.", // "location": "path", // "pattern": "^projects/[^/]+/subscriptions/[^/]+$", // "required": true, @@ -3961,9 +3759,8 @@ type ProjectsSubscriptionsGetIamPolicyCall struct { header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. -// Returns an empty policy if the resource exists and does not have a -// policy +// GetIamPolicy: Gets the access control policy for a resource. Returns +// an empty policy if the resource exists and does not have a policy // set. func (r *ProjectsSubscriptionsService) GetIamPolicy(resource string) *ProjectsSubscriptionsGetIamPolicyCall { c := &ProjectsSubscriptionsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -3973,24 +3770,14 @@ func (r *ProjectsSubscriptionsService) GetIamPolicy(resource string) *ProjectsSu // OptionsRequestedPolicyVersion sets the optional parameter // "options.requestedPolicyVersion": The policy format version to be -// returned. -// -// Valid values are 0, 1, and 3. Requests specifying an invalid value -// will be -// rejected. -// -// Requests for policies with any conditional bindings must specify -// version 3. -// Policies without any conditional bindings may specify any valid value -// or -// leave the field unset. -// -// To learn which resources support conditions in their IAM policies, -// see -// the -// [IAM -// documentation](https://cloud.google.com/iam/help/conditions/r -// esource-policies). +// returned. Valid values are 0, 1, and 3. Requests specifying an +// invalid value will be rejected. Requests for policies with any +// conditional bindings must specify version 3. Policies without any +// conditional bindings may specify any valid value or leave the field +// unset. To learn which resources support conditions in their IAM +// policies, see the [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-p +// olicies). func (c *ProjectsSubscriptionsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsSubscriptionsGetIamPolicyCall { c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c @@ -4033,7 +3820,7 @@ func (c *ProjectsSubscriptionsGetIamPolicyCall) Header() http.Header { func (c *ProjectsSubscriptionsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4095,7 +3882,7 @@ func (c *ProjectsSubscriptionsGetIamPolicyCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + // "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", // "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:getIamPolicy", // "httpMethod": "GET", // "id": "pubsub.projects.subscriptions.getIamPolicy", @@ -4104,13 +3891,13 @@ func (c *ProjectsSubscriptionsGetIamPolicyCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "options.requestedPolicyVersion": { - // "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + // "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", // "format": "int32", // "location": "query", // "type": "integer" // }, // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/subscriptions/[^/]+$", // "required": true, @@ -4155,10 +3942,9 @@ func (c *ProjectsSubscriptionsListCall) PageSize(pageSize int64) *ProjectsSubscr } // PageToken sets the optional parameter "pageToken": The value returned -// by the last `ListSubscriptionsResponse`; indicates that -// this is a continuation of a prior `ListSubscriptions` call, and that -// the -// system should return the next page of data. +// by the last `ListSubscriptionsResponse`; indicates that this is a +// continuation of a prior `ListSubscriptions` call, and that the system +// should return the next page of data. func (c *ProjectsSubscriptionsListCall) PageToken(pageToken string) *ProjectsSubscriptionsListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -4201,7 +3987,7 @@ func (c *ProjectsSubscriptionsListCall) Header() http.Header { func (c *ProjectsSubscriptionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4278,12 +4064,12 @@ func (c *ProjectsSubscriptionsListCall) Do(opts ...googleapi.CallOption) (*ListS // "type": "integer" // }, // "pageToken": { - // "description": "The value returned by the last `ListSubscriptionsResponse`; indicates that\nthis is a continuation of a prior `ListSubscriptions` call, and that the\nsystem should return the next page of data.", + // "description": "The value returned by the last `ListSubscriptionsResponse`; indicates that this is a continuation of a prior `ListSubscriptions` call, and that the system should return the next page of data.", // "location": "query", // "type": "string" // }, // "project": { - // "description": "Required. The name of the project in which to list subscriptions.\nFormat is `projects/{project-id}`.", + // "description": "Required. The name of the project in which to list subscriptions. Format is `projects/{project-id}`.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -4335,14 +4121,11 @@ type ProjectsSubscriptionsModifyAckDeadlineCall struct { } // ModifyAckDeadline: Modifies the ack deadline for a specific message. -// This method is useful -// to indicate that more time is needed to process a message by -// the -// subscriber, or to make the message available for redelivery if -// the -// processing was interrupted. Note that this does not modify -// the -// subscription-level `ackDeadlineSeconds` used for subsequent messages. +// This method is useful to indicate that more time is needed to process +// a message by the subscriber, or to make the message available for +// redelivery if the processing was interrupted. Note that this does not +// modify the subscription-level `ackDeadlineSeconds` used for +// subsequent messages. func (r *ProjectsSubscriptionsService) ModifyAckDeadline(subscription string, modifyackdeadlinerequest *ModifyAckDeadlineRequest) *ProjectsSubscriptionsModifyAckDeadlineCall { c := &ProjectsSubscriptionsModifyAckDeadlineCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.subscription = subscription @@ -4377,7 +4160,7 @@ func (c *ProjectsSubscriptionsModifyAckDeadlineCall) Header() http.Header { func (c *ProjectsSubscriptionsModifyAckDeadlineCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4441,7 +4224,7 @@ func (c *ProjectsSubscriptionsModifyAckDeadlineCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Modifies the ack deadline for a specific message. This method is useful\nto indicate that more time is needed to process a message by the\nsubscriber, or to make the message available for redelivery if the\nprocessing was interrupted. Note that this does not modify the\nsubscription-level `ackDeadlineSeconds` used for subsequent messages.", + // "description": "Modifies the ack deadline for a specific message. This method is useful to indicate that more time is needed to process a message by the subscriber, or to make the message available for redelivery if the processing was interrupted. Note that this does not modify the subscription-level `ackDeadlineSeconds` used for subsequent messages.", // "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:modifyAckDeadline", // "httpMethod": "POST", // "id": "pubsub.projects.subscriptions.modifyAckDeadline", @@ -4450,7 +4233,7 @@ func (c *ProjectsSubscriptionsModifyAckDeadlineCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "subscription": { - // "description": "Required. The name of the subscription.\nFormat is `projects/{project}/subscriptions/{sub}`.", + // "description": "Required. The name of the subscription. Format is `projects/{project}/subscriptions/{sub}`.", // "location": "path", // "pattern": "^projects/[^/]+/subscriptions/[^/]+$", // "required": true, @@ -4484,16 +4267,11 @@ type ProjectsSubscriptionsModifyPushConfigCall struct { } // ModifyPushConfig: Modifies the `PushConfig` for a specified -// subscription. -// -// This may be used to change a push subscription to a pull one -// (signified by -// an empty `PushConfig`) or vice versa, or change the endpoint URL and -// other -// attributes of a push subscription. Messages will accumulate for -// delivery -// continuously through the call regardless of changes to the -// `PushConfig`. +// subscription. This may be used to change a push subscription to a +// pull one (signified by an empty `PushConfig`) or vice versa, or +// change the endpoint URL and other attributes of a push subscription. +// Messages will accumulate for delivery continuously through the call +// regardless of changes to the `PushConfig`. func (r *ProjectsSubscriptionsService) ModifyPushConfig(subscription string, modifypushconfigrequest *ModifyPushConfigRequest) *ProjectsSubscriptionsModifyPushConfigCall { c := &ProjectsSubscriptionsModifyPushConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.subscription = subscription @@ -4528,7 +4306,7 @@ func (c *ProjectsSubscriptionsModifyPushConfigCall) Header() http.Header { func (c *ProjectsSubscriptionsModifyPushConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4592,7 +4370,7 @@ func (c *ProjectsSubscriptionsModifyPushConfigCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Modifies the `PushConfig` for a specified subscription.\n\nThis may be used to change a push subscription to a pull one (signified by\nan empty `PushConfig`) or vice versa, or change the endpoint URL and other\nattributes of a push subscription. Messages will accumulate for delivery\ncontinuously through the call regardless of changes to the `PushConfig`.", + // "description": "Modifies the `PushConfig` for a specified subscription. This may be used to change a push subscription to a pull one (signified by an empty `PushConfig`) or vice versa, or change the endpoint URL and other attributes of a push subscription. Messages will accumulate for delivery continuously through the call regardless of changes to the `PushConfig`.", // "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:modifyPushConfig", // "httpMethod": "POST", // "id": "pubsub.projects.subscriptions.modifyPushConfig", @@ -4601,7 +4379,7 @@ func (c *ProjectsSubscriptionsModifyPushConfigCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "subscription": { - // "description": "Required. The name of the subscription.\nFormat is `projects/{project}/subscriptions/{sub}`.", + // "description": "Required. The name of the subscription. Format is `projects/{project}/subscriptions/{sub}`.", // "location": "path", // "pattern": "^projects/[^/]+/subscriptions/[^/]+$", // "required": true, @@ -4635,8 +4413,7 @@ type ProjectsSubscriptionsPatchCall struct { } // Patch: Updates an existing subscription. Note that certain properties -// of a -// subscription, such as its topic, are not modifiable. +// of a subscription, such as its topic, are not modifiable. func (r *ProjectsSubscriptionsService) Patch(name string, updatesubscriptionrequest *UpdateSubscriptionRequest) *ProjectsSubscriptionsPatchCall { c := &ProjectsSubscriptionsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4671,7 +4448,7 @@ func (c *ProjectsSubscriptionsPatchCall) Header() http.Header { func (c *ProjectsSubscriptionsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4735,7 +4512,7 @@ func (c *ProjectsSubscriptionsPatchCall) Do(opts ...googleapi.CallOption) (*Subs } return ret, nil // { - // "description": "Updates an existing subscription. Note that certain properties of a\nsubscription, such as its topic, are not modifiable.", + // "description": "Updates an existing subscription. Note that certain properties of a subscription, such as its topic, are not modifiable.", // "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", // "httpMethod": "PATCH", // "id": "pubsub.projects.subscriptions.patch", @@ -4744,7 +4521,7 @@ func (c *ProjectsSubscriptionsPatchCall) Do(opts ...googleapi.CallOption) (*Subs // ], // "parameters": { // "name": { - // "description": "Required. The name of the subscription. It must have the format\n`\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must\nstart with a letter, and contain only letters (`[A-Za-z]`), numbers\n(`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),\nplus (`+`) or percent signs (`%`). It must be between 3 and 255 characters\nin length, and it must not start with `\"goog\"`.", + // "description": "Required. The name of the subscription. It must have the format `\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in length, and it must not start with `\"goog\"`.", // "location": "path", // "pattern": "^projects/[^/]+/subscriptions/[^/]+$", // "required": true, @@ -4778,10 +4555,8 @@ type ProjectsSubscriptionsPullCall struct { } // Pull: Pulls messages from the server. The server may return -// `UNAVAILABLE` if -// there are too many concurrent pull requests pending for the -// given -// subscription. +// `UNAVAILABLE` if there are too many concurrent pull requests pending +// for the given subscription. func (r *ProjectsSubscriptionsService) Pull(subscription string, pullrequest *PullRequest) *ProjectsSubscriptionsPullCall { c := &ProjectsSubscriptionsPullCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.subscription = subscription @@ -4816,7 +4591,7 @@ func (c *ProjectsSubscriptionsPullCall) Header() http.Header { func (c *ProjectsSubscriptionsPullCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4880,7 +4655,7 @@ func (c *ProjectsSubscriptionsPullCall) Do(opts ...googleapi.CallOption) (*PullR } return ret, nil // { - // "description": "Pulls messages from the server. The server may return `UNAVAILABLE` if\nthere are too many concurrent pull requests pending for the given\nsubscription.", + // "description": "Pulls messages from the server. The server may return `UNAVAILABLE` if there are too many concurrent pull requests pending for the given subscription.", // "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:pull", // "httpMethod": "POST", // "id": "pubsub.projects.subscriptions.pull", @@ -4889,7 +4664,7 @@ func (c *ProjectsSubscriptionsPullCall) Do(opts ...googleapi.CallOption) (*PullR // ], // "parameters": { // "subscription": { - // "description": "Required. The subscription from which messages should be pulled.\nFormat is `projects/{project}/subscriptions/{sub}`.", + // "description": "Required. The subscription from which messages should be pulled. Format is `projects/{project}/subscriptions/{sub}`.", // "location": "path", // "pattern": "^projects/[^/]+/subscriptions/[^/]+$", // "required": true, @@ -4923,19 +4698,12 @@ type ProjectsSubscriptionsSeekCall struct { } // Seek: Seeks an existing subscription to a point in time or to a given -// snapshot, -// whichever is provided in the request. Snapshots are used in -// Seek -// o -// perations, which allow -// you to manage message acknowledgments in bulk. That is, you can set -// the -// acknowledgment state of messages in an existing subscription to the -// state -// captured by a snapshot. Note that both the subscription and the -// snapshot -// must be on the same topic. +// snapshot, whichever is provided in the request. Snapshots are used in +// [Seek]( https://cloud.google.com/pubsub/docs/replay-overview) +// operations, which allow you to manage message acknowledgments in +// bulk. That is, you can set the acknowledgment state of messages in an +// existing subscription to the state captured by a snapshot. Note that +// both the subscription and the snapshot must be on the same topic. func (r *ProjectsSubscriptionsService) Seek(subscription string, seekrequest *SeekRequest) *ProjectsSubscriptionsSeekCall { c := &ProjectsSubscriptionsSeekCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.subscription = subscription @@ -4970,7 +4738,7 @@ func (c *ProjectsSubscriptionsSeekCall) Header() http.Header { func (c *ProjectsSubscriptionsSeekCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5034,7 +4802,7 @@ func (c *ProjectsSubscriptionsSeekCall) Do(opts ...googleapi.CallOption) (*SeekR } return ret, nil // { - // "description": "Seeks an existing subscription to a point in time or to a given snapshot,\nwhichever is provided in the request. Snapshots are used in\n\u003ca href=\"https://cloud.google.com/pubsub/docs/replay-overview\"\u003eSeek\u003c/a\u003e\noperations, which allow\nyou to manage message acknowledgments in bulk. That is, you can set the\nacknowledgment state of messages in an existing subscription to the state\ncaptured by a snapshot. Note that both the subscription and the snapshot\nmust be on the same topic.", + // "description": "Seeks an existing subscription to a point in time or to a given snapshot, whichever is provided in the request. Snapshots are used in [Seek]( https://cloud.google.com/pubsub/docs/replay-overview) operations, which allow you to manage message acknowledgments in bulk. That is, you can set the acknowledgment state of messages in an existing subscription to the state captured by a snapshot. Note that both the subscription and the snapshot must be on the same topic.", // "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:seek", // "httpMethod": "POST", // "id": "pubsub.projects.subscriptions.seek", @@ -5077,11 +4845,8 @@ type ProjectsSubscriptionsSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any -// existing policy. -// -// Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` -// errors. +// resource. Replaces any existing policy. Can return `NOT_FOUND`, +// `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. func (r *ProjectsSubscriptionsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsSubscriptionsSetIamPolicyCall { c := &ProjectsSubscriptionsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -5116,7 +4881,7 @@ func (c *ProjectsSubscriptionsSetIamPolicyCall) Header() http.Header { func (c *ProjectsSubscriptionsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5180,7 +4945,7 @@ func (c *ProjectsSubscriptionsSetIamPolicyCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", // "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:setIamPolicy", // "httpMethod": "POST", // "id": "pubsub.projects.subscriptions.setIamPolicy", @@ -5189,7 +4954,7 @@ func (c *ProjectsSubscriptionsSetIamPolicyCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/subscriptions/[^/]+$", // "required": true, @@ -5223,16 +4988,11 @@ type ProjectsSubscriptionsTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a `NOT_FOUND` error. -// -// Note: This operation is designed to be used for building -// permission-aware -// UIs and command-line tools, not for authorization checking. This -// operation -// may "fail open" without warning. +// specified resource. If the resource does not exist, this will return +// an empty set of permissions, not a `NOT_FOUND` error. Note: This +// operation is designed to be used for building permission-aware UIs +// and command-line tools, not for authorization checking. This +// operation may "fail open" without warning. func (r *ProjectsSubscriptionsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsSubscriptionsTestIamPermissionsCall { c := &ProjectsSubscriptionsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -5267,7 +5027,7 @@ func (c *ProjectsSubscriptionsTestIamPermissionsCall) Header() http.Header { func (c *ProjectsSubscriptionsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5331,7 +5091,7 @@ func (c *ProjectsSubscriptionsTestIamPermissionsCall) Do(opts ...googleapi.CallO } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + // "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", // "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:testIamPermissions", // "httpMethod": "POST", // "id": "pubsub.projects.subscriptions.testIamPermissions", @@ -5340,7 +5100,7 @@ func (c *ProjectsSubscriptionsTestIamPermissionsCall) Do(opts ...googleapi.CallO // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/subscriptions/[^/]+$", // "required": true, @@ -5374,10 +5134,8 @@ type ProjectsTopicsCreateCall struct { } // Create: Creates the given topic with the given name. See the -// -// reso -// urce name rules. +// [resource name rules]( +// https://cloud.google.com/pubsub/docs/admin#resource_names). func (r *ProjectsTopicsService) Create(name string, topic *Topic) *ProjectsTopicsCreateCall { c := &ProjectsTopicsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5412,7 +5170,7 @@ func (c *ProjectsTopicsCreateCall) Header() http.Header { func (c *ProjectsTopicsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5476,7 +5234,7 @@ func (c *ProjectsTopicsCreateCall) Do(opts ...googleapi.CallOption) (*Topic, err } return ret, nil // { - // "description": "Creates the given topic with the given name. See the\n\u003ca href=\"https://cloud.google.com/pubsub/docs/admin#resource_names\"\u003e\nresource name rules\u003c/a\u003e.", + // "description": "Creates the given topic with the given name. See the [resource name rules]( https://cloud.google.com/pubsub/docs/admin#resource_names).", // "flatPath": "v1/projects/{projectsId}/topics/{topicsId}", // "httpMethod": "PUT", // "id": "pubsub.projects.topics.create", @@ -5485,7 +5243,7 @@ func (c *ProjectsTopicsCreateCall) Do(opts ...googleapi.CallOption) (*Topic, err // ], // "parameters": { // "name": { - // "description": "Required. The name of the topic. It must have the format\n`\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter,\nand contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),\nunderscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent\nsigns (`%`). It must be between 3 and 255 characters in length, and it\nmust not start with `\"goog\"`.", + // "description": "Required. The name of the topic. It must have the format `\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in length, and it must not start with `\"goog\"`.", // "location": "path", // "pattern": "^projects/[^/]+/topics/[^/]+$", // "required": true, @@ -5518,14 +5276,11 @@ type ProjectsTopicsDeleteCall struct { } // Delete: Deletes the topic with the given name. Returns `NOT_FOUND` if -// the topic -// does not exist. After a topic is deleted, a new topic may be created -// with -// the same name; this is an entirely new topic with none of the -// old -// configuration or subscriptions. Existing subscriptions to this topic -// are -// not deleted, but their `topic` field is set to `_deleted-topic_`. +// the topic does not exist. After a topic is deleted, a new topic may +// be created with the same name; this is an entirely new topic with +// none of the old configuration or subscriptions. Existing +// subscriptions to this topic are not deleted, but their `topic` field +// is set to `_deleted-topic_`. func (r *ProjectsTopicsService) Delete(topic string) *ProjectsTopicsDeleteCall { c := &ProjectsTopicsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.topic = topic @@ -5559,7 +5314,7 @@ func (c *ProjectsTopicsDeleteCall) Header() http.Header { func (c *ProjectsTopicsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5618,7 +5373,7 @@ func (c *ProjectsTopicsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, err } return ret, nil // { - // "description": "Deletes the topic with the given name. Returns `NOT_FOUND` if the topic\ndoes not exist. After a topic is deleted, a new topic may be created with\nthe same name; this is an entirely new topic with none of the old\nconfiguration or subscriptions. Existing subscriptions to this topic are\nnot deleted, but their `topic` field is set to `_deleted-topic_`.", + // "description": "Deletes the topic with the given name. Returns `NOT_FOUND` if the topic does not exist. After a topic is deleted, a new topic may be created with the same name; this is an entirely new topic with none of the old configuration or subscriptions. Existing subscriptions to this topic are not deleted, but their `topic` field is set to `_deleted-topic_`.", // "flatPath": "v1/projects/{projectsId}/topics/{topicsId}", // "httpMethod": "DELETE", // "id": "pubsub.projects.topics.delete", @@ -5627,7 +5382,7 @@ func (c *ProjectsTopicsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, err // ], // "parameters": { // "topic": { - // "description": "Required. Name of the topic to delete.\nFormat is `projects/{project}/topics/{topic}`.", + // "description": "Required. Name of the topic to delete. Format is `projects/{project}/topics/{topic}`.", // "location": "path", // "pattern": "^projects/[^/]+/topics/[^/]+$", // "required": true, @@ -5701,7 +5456,7 @@ func (c *ProjectsTopicsGetCall) Header() http.Header { func (c *ProjectsTopicsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5772,7 +5527,7 @@ func (c *ProjectsTopicsGetCall) Do(opts ...googleapi.CallOption) (*Topic, error) // ], // "parameters": { // "topic": { - // "description": "Required. The name of the topic to get.\nFormat is `projects/{project}/topics/{topic}`.", + // "description": "Required. The name of the topic to get. Format is `projects/{project}/topics/{topic}`.", // "location": "path", // "pattern": "^projects/[^/]+/topics/[^/]+$", // "required": true, @@ -5802,9 +5557,8 @@ type ProjectsTopicsGetIamPolicyCall struct { header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. -// Returns an empty policy if the resource exists and does not have a -// policy +// GetIamPolicy: Gets the access control policy for a resource. Returns +// an empty policy if the resource exists and does not have a policy // set. func (r *ProjectsTopicsService) GetIamPolicy(resource string) *ProjectsTopicsGetIamPolicyCall { c := &ProjectsTopicsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -5814,24 +5568,14 @@ func (r *ProjectsTopicsService) GetIamPolicy(resource string) *ProjectsTopicsGet // OptionsRequestedPolicyVersion sets the optional parameter // "options.requestedPolicyVersion": The policy format version to be -// returned. -// -// Valid values are 0, 1, and 3. Requests specifying an invalid value -// will be -// rejected. -// -// Requests for policies with any conditional bindings must specify -// version 3. -// Policies without any conditional bindings may specify any valid value -// or -// leave the field unset. -// -// To learn which resources support conditions in their IAM policies, -// see -// the -// [IAM -// documentation](https://cloud.google.com/iam/help/conditions/r -// esource-policies). +// returned. Valid values are 0, 1, and 3. Requests specifying an +// invalid value will be rejected. Requests for policies with any +// conditional bindings must specify version 3. Policies without any +// conditional bindings may specify any valid value or leave the field +// unset. To learn which resources support conditions in their IAM +// policies, see the [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-p +// olicies). func (c *ProjectsTopicsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsTopicsGetIamPolicyCall { c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c @@ -5874,7 +5618,7 @@ func (c *ProjectsTopicsGetIamPolicyCall) Header() http.Header { func (c *ProjectsTopicsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5936,7 +5680,7 @@ func (c *ProjectsTopicsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli } return ret, nil // { - // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + // "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", // "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:getIamPolicy", // "httpMethod": "GET", // "id": "pubsub.projects.topics.getIamPolicy", @@ -5945,13 +5689,13 @@ func (c *ProjectsTopicsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli // ], // "parameters": { // "options.requestedPolicyVersion": { - // "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + // "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", // "format": "int32", // "location": "query", // "type": "integer" // }, // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/topics/[^/]+$", // "required": true, @@ -5996,9 +5740,8 @@ func (c *ProjectsTopicsListCall) PageSize(pageSize int64) *ProjectsTopicsListCal } // PageToken sets the optional parameter "pageToken": The value returned -// by the last `ListTopicsResponse`; indicates that this is -// a continuation of a prior `ListTopics` call, and that the system -// should +// by the last `ListTopicsResponse`; indicates that this is a +// continuation of a prior `ListTopics` call, and that the system should // return the next page of data. func (c *ProjectsTopicsListCall) PageToken(pageToken string) *ProjectsTopicsListCall { c.urlParams_.Set("pageToken", pageToken) @@ -6042,7 +5785,7 @@ func (c *ProjectsTopicsListCall) Header() http.Header { func (c *ProjectsTopicsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6119,12 +5862,12 @@ func (c *ProjectsTopicsListCall) Do(opts ...googleapi.CallOption) (*ListTopicsRe // "type": "integer" // }, // "pageToken": { - // "description": "The value returned by the last `ListTopicsResponse`; indicates that this is\na continuation of a prior `ListTopics` call, and that the system should\nreturn the next page of data.", + // "description": "The value returned by the last `ListTopicsResponse`; indicates that this is a continuation of a prior `ListTopics` call, and that the system should return the next page of data.", // "location": "query", // "type": "string" // }, // "project": { - // "description": "Required. The name of the project in which to list topics.\nFormat is `projects/{project-id}`.", + // "description": "Required. The name of the project in which to list topics. Format is `projects/{project-id}`.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -6175,8 +5918,7 @@ type ProjectsTopicsPatchCall struct { header_ http.Header } -// Patch: Updates an existing topic. Note that certain properties of -// a +// Patch: Updates an existing topic. Note that certain properties of a // topic are not modifiable. func (r *ProjectsTopicsService) Patch(name string, updatetopicrequest *UpdateTopicRequest) *ProjectsTopicsPatchCall { c := &ProjectsTopicsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -6212,7 +5954,7 @@ func (c *ProjectsTopicsPatchCall) Header() http.Header { func (c *ProjectsTopicsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6276,7 +6018,7 @@ func (c *ProjectsTopicsPatchCall) Do(opts ...googleapi.CallOption) (*Topic, erro } return ret, nil // { - // "description": "Updates an existing topic. Note that certain properties of a\ntopic are not modifiable.", + // "description": "Updates an existing topic. Note that certain properties of a topic are not modifiable.", // "flatPath": "v1/projects/{projectsId}/topics/{topicsId}", // "httpMethod": "PATCH", // "id": "pubsub.projects.topics.patch", @@ -6285,7 +6027,7 @@ func (c *ProjectsTopicsPatchCall) Do(opts ...googleapi.CallOption) (*Topic, erro // ], // "parameters": { // "name": { - // "description": "Required. The name of the topic. It must have the format\n`\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter,\nand contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),\nunderscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent\nsigns (`%`). It must be between 3 and 255 characters in length, and it\nmust not start with `\"goog\"`.", + // "description": "Required. The name of the topic. It must have the format `\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in length, and it must not start with `\"goog\"`.", // "location": "path", // "pattern": "^projects/[^/]+/topics/[^/]+$", // "required": true, @@ -6319,8 +6061,7 @@ type ProjectsTopicsPublishCall struct { } // Publish: Adds one or more messages to the topic. Returns `NOT_FOUND` -// if the topic -// does not exist. +// if the topic does not exist. func (r *ProjectsTopicsService) Publish(topic string, publishrequest *PublishRequest) *ProjectsTopicsPublishCall { c := &ProjectsTopicsPublishCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.topic = topic @@ -6355,7 +6096,7 @@ func (c *ProjectsTopicsPublishCall) Header() http.Header { func (c *ProjectsTopicsPublishCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6419,7 +6160,7 @@ func (c *ProjectsTopicsPublishCall) Do(opts ...googleapi.CallOption) (*PublishRe } return ret, nil // { - // "description": "Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic\ndoes not exist.", + // "description": "Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic does not exist.", // "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:publish", // "httpMethod": "POST", // "id": "pubsub.projects.topics.publish", @@ -6428,7 +6169,7 @@ func (c *ProjectsTopicsPublishCall) Do(opts ...googleapi.CallOption) (*PublishRe // ], // "parameters": { // "topic": { - // "description": "Required. The messages in the request will be published on this topic.\nFormat is `projects/{project}/topics/{topic}`.", + // "description": "Required. The messages in the request will be published on this topic. Format is `projects/{project}/topics/{topic}`.", // "location": "path", // "pattern": "^projects/[^/]+/topics/[^/]+$", // "required": true, @@ -6462,11 +6203,8 @@ type ProjectsTopicsSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any -// existing policy. -// -// Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` -// errors. +// resource. Replaces any existing policy. Can return `NOT_FOUND`, +// `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. func (r *ProjectsTopicsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsTopicsSetIamPolicyCall { c := &ProjectsTopicsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -6501,7 +6239,7 @@ func (c *ProjectsTopicsSetIamPolicyCall) Header() http.Header { func (c *ProjectsTopicsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6565,7 +6303,7 @@ func (c *ProjectsTopicsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", // "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:setIamPolicy", // "httpMethod": "POST", // "id": "pubsub.projects.topics.setIamPolicy", @@ -6574,7 +6312,7 @@ func (c *ProjectsTopicsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/topics/[^/]+$", // "required": true, @@ -6608,16 +6346,11 @@ type ProjectsTopicsTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a `NOT_FOUND` error. -// -// Note: This operation is designed to be used for building -// permission-aware -// UIs and command-line tools, not for authorization checking. This -// operation -// may "fail open" without warning. +// specified resource. If the resource does not exist, this will return +// an empty set of permissions, not a `NOT_FOUND` error. Note: This +// operation is designed to be used for building permission-aware UIs +// and command-line tools, not for authorization checking. This +// operation may "fail open" without warning. func (r *ProjectsTopicsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsTopicsTestIamPermissionsCall { c := &ProjectsTopicsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -6652,7 +6385,7 @@ func (c *ProjectsTopicsTestIamPermissionsCall) Header() http.Header { func (c *ProjectsTopicsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6716,7 +6449,7 @@ func (c *ProjectsTopicsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + // "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", // "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:testIamPermissions", // "httpMethod": "POST", // "id": "pubsub.projects.topics.testIamPermissions", @@ -6725,7 +6458,7 @@ func (c *ProjectsTopicsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/topics/[^/]+$", // "required": true, @@ -6759,16 +6492,10 @@ type ProjectsTopicsSnapshotsListCall struct { } // List: Lists the names of the snapshots on this topic. Snapshots are -// used in -// Seek -// o -// perations, which allow -// you to manage message acknowledgments in bulk. That is, you can set -// the -// acknowledgment state of messages in an existing subscription to the -// state -// captured by a snapshot. +// used in [Seek](https://cloud.google.com/pubsub/docs/replay-overview) +// operations, which allow you to manage message acknowledgments in +// bulk. That is, you can set the acknowledgment state of messages in an +// existing subscription to the state captured by a snapshot. func (r *ProjectsTopicsSnapshotsService) List(topic string) *ProjectsTopicsSnapshotsListCall { c := &ProjectsTopicsSnapshotsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.topic = topic @@ -6783,10 +6510,9 @@ func (c *ProjectsTopicsSnapshotsListCall) PageSize(pageSize int64) *ProjectsTopi } // PageToken sets the optional parameter "pageToken": The value returned -// by the last `ListTopicSnapshotsResponse`; indicates -// that this is a continuation of a prior `ListTopicSnapshots` call, -// and -// that the system should return the next page of data. +// by the last `ListTopicSnapshotsResponse`; indicates that this is a +// continuation of a prior `ListTopicSnapshots` call, and that the +// system should return the next page of data. func (c *ProjectsTopicsSnapshotsListCall) PageToken(pageToken string) *ProjectsTopicsSnapshotsListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -6829,7 +6555,7 @@ func (c *ProjectsTopicsSnapshotsListCall) Header() http.Header { func (c *ProjectsTopicsSnapshotsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6891,7 +6617,7 @@ func (c *ProjectsTopicsSnapshotsListCall) Do(opts ...googleapi.CallOption) (*Lis } return ret, nil // { - // "description": "Lists the names of the snapshots on this topic. Snapshots are used in\n\u003ca href=\"https://cloud.google.com/pubsub/docs/replay-overview\"\u003eSeek\u003c/a\u003e\noperations, which allow\nyou to manage message acknowledgments in bulk. That is, you can set the\nacknowledgment state of messages in an existing subscription to the state\ncaptured by a snapshot.", + // "description": "Lists the names of the snapshots on this topic. Snapshots are used in [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, which allow you to manage message acknowledgments in bulk. That is, you can set the acknowledgment state of messages in an existing subscription to the state captured by a snapshot.", // "flatPath": "v1/projects/{projectsId}/topics/{topicsId}/snapshots", // "httpMethod": "GET", // "id": "pubsub.projects.topics.snapshots.list", @@ -6906,12 +6632,12 @@ func (c *ProjectsTopicsSnapshotsListCall) Do(opts ...googleapi.CallOption) (*Lis // "type": "integer" // }, // "pageToken": { - // "description": "The value returned by the last `ListTopicSnapshotsResponse`; indicates\nthat this is a continuation of a prior `ListTopicSnapshots` call, and\nthat the system should return the next page of data.", + // "description": "The value returned by the last `ListTopicSnapshotsResponse`; indicates that this is a continuation of a prior `ListTopicSnapshots` call, and that the system should return the next page of data.", // "location": "query", // "type": "string" // }, // "topic": { - // "description": "Required. The name of the topic that snapshots are attached to.\nFormat is `projects/{project}/topics/{topic}`.", + // "description": "Required. The name of the topic that snapshots are attached to. Format is `projects/{project}/topics/{topic}`.", // "location": "path", // "pattern": "^projects/[^/]+/topics/[^/]+$", // "required": true, @@ -6962,7 +6688,7 @@ type ProjectsTopicsSubscriptionsListCall struct { header_ http.Header } -// List: Lists the names of the subscriptions on this topic. +// List: Lists the names of the attached subscriptions on this topic. func (r *ProjectsTopicsSubscriptionsService) List(topic string) *ProjectsTopicsSubscriptionsListCall { c := &ProjectsTopicsSubscriptionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.topic = topic @@ -6977,10 +6703,9 @@ func (c *ProjectsTopicsSubscriptionsListCall) PageSize(pageSize int64) *Projects } // PageToken sets the optional parameter "pageToken": The value returned -// by the last `ListTopicSubscriptionsResponse`; indicates -// that this is a continuation of a prior `ListTopicSubscriptions` call, -// and -// that the system should return the next page of data. +// by the last `ListTopicSubscriptionsResponse`; indicates that this is +// a continuation of a prior `ListTopicSubscriptions` call, and that the +// system should return the next page of data. func (c *ProjectsTopicsSubscriptionsListCall) PageToken(pageToken string) *ProjectsTopicsSubscriptionsListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -7023,7 +6748,7 @@ func (c *ProjectsTopicsSubscriptionsListCall) Header() http.Header { func (c *ProjectsTopicsSubscriptionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7085,7 +6810,7 @@ func (c *ProjectsTopicsSubscriptionsListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Lists the names of the subscriptions on this topic.", + // "description": "Lists the names of the attached subscriptions on this topic.", // "flatPath": "v1/projects/{projectsId}/topics/{topicsId}/subscriptions", // "httpMethod": "GET", // "id": "pubsub.projects.topics.subscriptions.list", @@ -7100,12 +6825,12 @@ func (c *ProjectsTopicsSubscriptionsListCall) Do(opts ...googleapi.CallOption) ( // "type": "integer" // }, // "pageToken": { - // "description": "The value returned by the last `ListTopicSubscriptionsResponse`; indicates\nthat this is a continuation of a prior `ListTopicSubscriptions` call, and\nthat the system should return the next page of data.", + // "description": "The value returned by the last `ListTopicSubscriptionsResponse`; indicates that this is a continuation of a prior `ListTopicSubscriptions` call, and that the system should return the next page of data.", // "location": "query", // "type": "string" // }, // "topic": { - // "description": "Required. The name of the topic that subscriptions are attached to.\nFormat is `projects/{project}/topics/{topic}`.", + // "description": "Required. The name of the topic that subscriptions are attached to. Format is `projects/{project}/topics/{topic}`.", // "location": "path", // "pattern": "^projects/[^/]+/topics/[^/]+$", // "required": true, diff --git a/vendor/google.golang.org/api/runtimeconfig/v1beta1/runtimeconfig-api.json b/vendor/google.golang.org/api/runtimeconfig/v1beta1/runtimeconfig-api.json index 281a8666e54..a0fc21f1b78 100644 --- a/vendor/google.golang.org/api/runtimeconfig/v1beta1/runtimeconfig-api.json +++ b/vendor/google.golang.org/api/runtimeconfig/v1beta1/runtimeconfig-api.json @@ -113,7 +113,7 @@ "configs": { "methods": { "create": { - "description": "Creates a new RuntimeConfig resource. The configuration name must be\nunique within project.", + "description": "Creates a new RuntimeConfig resource. The configuration name must be unique within project.", "flatPath": "v1beta1/projects/{projectsId}/configs", "httpMethod": "POST", "id": "runtimeconfig.projects.configs.create", @@ -122,14 +122,14 @@ ], "parameters": { "parent": { - "description": "The [project\nID](https://support.google.com/cloud/answer/6158840?hl=en\u0026ref_topic=6158848)\nfor this request, in the format `projects/[PROJECT_ID]`.", + "description": "The [project ID](https://support.google.com/cloud/answer/6158840?hl=en\u0026ref_topic=6158848) for this request, in the format `projects/[PROJECT_ID]`.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, "type": "string" }, "requestId": { - "description": "An optional but recommended unique `request_id`. If the server\nreceives two `create()` requests with the same\n`request_id`, then the second request will be ignored and the\nfirst resource created and stored in the backend is returned.\nEmpty `request_id` fields are ignored.\n\nIt is responsibility of the client to ensure uniqueness of the\n`request_id` strings.\n\n`request_id` strings are limited to 64 characters.", + "description": "An optional but recommended unique `request_id`. If the server receives two `create()` requests with the same `request_id`, then the second request will be ignored and the first resource created and stored in the backend is returned. Empty `request_id` fields are ignored. It is responsibility of the client to ensure uniqueness of the `request_id` strings. `request_id` strings are limited to 64 characters.", "location": "query", "type": "string" } @@ -156,7 +156,7 @@ ], "parameters": { "name": { - "description": "The RuntimeConfig resource to delete, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", + "description": "The RuntimeConfig resource to delete, in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", "location": "path", "pattern": "^projects/[^/]+/configs/[^/]+$", "required": true, @@ -182,7 +182,7 @@ ], "parameters": { "name": { - "description": "The name of the RuntimeConfig resource to retrieve, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", + "description": "The name of the RuntimeConfig resource to retrieve, in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", "location": "path", "pattern": "^projects/[^/]+/configs/[^/]+$", "required": true, @@ -199,7 +199,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}:getIamPolicy", "httpMethod": "GET", "id": "runtimeconfig.projects.configs.getIamPolicy", @@ -208,13 +208,13 @@ ], "parameters": { "options.requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "location": "query", "type": "integer" }, "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/configs/[^/]+$", "required": true, @@ -240,18 +240,18 @@ ], "parameters": { "pageSize": { - "description": "Specifies the number of results to return per page. If there are fewer\nelements than the specified number, returns all elements.", + "description": "Specifies the number of results to return per page. If there are fewer elements than the specified number, returns all elements.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to a `nextPageToken`\nreturned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to a `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, "parent": { - "description": "The [project\nID](https://support.google.com/cloud/answer/6158840?hl=en\u0026ref_topic=6158848)\nfor this request, in the format `projects/[PROJECT_ID]`.", + "description": "The [project ID](https://support.google.com/cloud/answer/6158840?hl=en\u0026ref_topic=6158848) for this request, in the format `projects/[PROJECT_ID]`.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -268,7 +268,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}:setIamPolicy", "httpMethod": "POST", "id": "runtimeconfig.projects.configs.setIamPolicy", @@ -277,7 +277,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/configs/[^/]+$", "required": true, @@ -297,7 +297,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}:testIamPermissions", "httpMethod": "POST", "id": "runtimeconfig.projects.configs.testIamPermissions", @@ -306,7 +306,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/configs/[^/]+$", "required": true, @@ -335,7 +335,7 @@ ], "parameters": { "name": { - "description": "The name of the RuntimeConfig resource to update, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", + "description": "The name of the RuntimeConfig resource to update, in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", "location": "path", "pattern": "^projects/[^/]+/configs/[^/]+$", "required": true, @@ -359,7 +359,7 @@ "operations": { "methods": { "get": { - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/operations/{operationsId}", "httpMethod": "GET", "id": "runtimeconfig.projects.configs.operations.get", @@ -385,7 +385,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/operations/{operationsId}:testIamPermissions", "httpMethod": "POST", "id": "runtimeconfig.projects.configs.operations.testIamPermissions", @@ -394,7 +394,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/configs/[^/]+/operations/.*$", "required": true, @@ -418,7 +418,7 @@ "variables": { "methods": { "create": { - "description": "Creates a variable within the given configuration. You cannot create\na variable with a name that is a prefix of an existing variable name, or a\nname that has an existing variable name as a prefix.\n\nTo learn more about creating a variable, read the\n[Setting and Getting\nData](/deployment-manager/runtime-configurator/set-and-get-variables)\ndocumentation.", + "description": "Creates a variable within the given configuration. You cannot create a variable with a name that is a prefix of an existing variable name, or a name that has an existing variable name as a prefix. To learn more about creating a variable, read the [Setting and Getting Data](/deployment-manager/runtime-configurator/set-and-get-variables) documentation.", "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/variables", "httpMethod": "POST", "id": "runtimeconfig.projects.configs.variables.create", @@ -427,14 +427,14 @@ ], "parameters": { "parent": { - "description": "The path to the RutimeConfig resource that this variable should belong to.\nThe configuration must exist beforehand; the path must be in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", + "description": "The path to the RutimeConfig resource that this variable should belong to. The configuration must exist beforehand; the path must be in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", "location": "path", "pattern": "^projects/[^/]+/configs/[^/]+$", "required": true, "type": "string" }, "requestId": { - "description": "An optional but recommended unique `request_id`. If the server\nreceives two `create()` requests with the same\n`request_id`, then the second request will be ignored and the\nfirst resource created and stored in the backend is returned.\nEmpty `request_id` fields are ignored.\n\nIt is responsibility of the client to ensure uniqueness of the\n`request_id` strings.\n\n`request_id` strings are limited to 64 characters.", + "description": "An optional but recommended unique `request_id`. If the server receives two `create()` requests with the same `request_id`, then the second request will be ignored and the first resource created and stored in the backend is returned. Empty `request_id` fields are ignored. It is responsibility of the client to ensure uniqueness of the `request_id` strings. `request_id` strings are limited to 64 characters.", "location": "query", "type": "string" } @@ -452,7 +452,7 @@ ] }, "delete": { - "description": "Deletes a variable or multiple variables.\n\nIf you specify a variable name, then that variable is deleted. If you\nspecify a prefix and `recursive` is true, then all variables with that\nprefix are deleted. You must set a `recursive` to true if you delete\nvariables by prefix.", + "description": "Deletes a variable or multiple variables. If you specify a variable name, then that variable is deleted. If you specify a prefix and `recursive` is true, then all variables with that prefix are deleted. You must set a `recursive` to true if you delete variables by prefix.", "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/variables/{variablesId}", "httpMethod": "DELETE", "id": "runtimeconfig.projects.configs.variables.delete", @@ -461,14 +461,14 @@ ], "parameters": { "name": { - "description": "The name of the variable to delete, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIABLE_NAME]`", + "description": "The name of the variable to delete, in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIABLE_NAME]`", "location": "path", "pattern": "^projects/[^/]+/configs/[^/]+/variables/.*$", "required": true, "type": "string" }, "recursive": { - "description": "Set to `true` to recursively delete multiple variables with the same\nprefix.", + "description": "Set to `true` to recursively delete multiple variables with the same prefix.", "location": "query", "type": "boolean" } @@ -492,7 +492,7 @@ ], "parameters": { "name": { - "description": "The name of the variable to return, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIBLE_NAME]`", + "description": "The name of the variable to return, in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIBLE_NAME]`", "location": "path", "pattern": "^projects/[^/]+/configs/[^/]+/variables/.*$", "required": true, @@ -509,7 +509,7 @@ ] }, "list": { - "description": "Lists variables within given a configuration, matching any provided\nfilters. This only lists variable names, not the values, unless\n`return_values` is true, in which case only variables that user has IAM\npermission to GetVariable will be returned.", + "description": "Lists variables within given a configuration, matching any provided filters. This only lists variable names, not the values, unless `return_values` is true, in which case only variables that user has IAM permission to GetVariable will be returned.", "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/variables", "httpMethod": "GET", "id": "runtimeconfig.projects.configs.variables.list", @@ -518,30 +518,30 @@ ], "parameters": { "filter": { - "description": "Filters variables by matching the specified filter. For example:\n\n`projects/example-project/config/[CONFIG_NAME]/variables/example-variable`.", + "description": "Filters variables by matching the specified filter. For example: `projects/example-project/config/[CONFIG_NAME]/variables/example-variable`.", "location": "query", "type": "string" }, "pageSize": { - "description": "Specifies the number of results to return per page. If there are fewer\nelements than the specified number, returns all elements.", + "description": "Specifies the number of results to return per page. If there are fewer elements than the specified number, returns all elements.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to a `nextPageToken`\nreturned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to a `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, "parent": { - "description": "The path to the RuntimeConfig resource for which you want to list\nvariables. The configuration must exist beforehand; the path must be in the\nformat:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", + "description": "The path to the RuntimeConfig resource for which you want to list variables. The configuration must exist beforehand; the path must be in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", "location": "path", "pattern": "^projects/[^/]+/configs/[^/]+$", "required": true, "type": "string" }, "returnValues": { - "description": "The flag indicates whether the user wants to return values of variables.\nIf true, then only those variables that user has IAM GetVariable permission\nwill be returned along with their values.", + "description": "The flag indicates whether the user wants to return values of variables. If true, then only those variables that user has IAM GetVariable permission will be returned along with their values.", "location": "query", "type": "boolean" } @@ -556,7 +556,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/variables/{variablesId}:testIamPermissions", "httpMethod": "POST", "id": "runtimeconfig.projects.configs.variables.testIamPermissions", @@ -565,7 +565,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/configs/[^/]+/variables/.*$", "required": true, @@ -594,7 +594,7 @@ ], "parameters": { "name": { - "description": "The name of the variable to update, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIABLE_NAME]`", + "description": "The name of the variable to update, in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIABLE_NAME]`", "location": "path", "pattern": "^projects/[^/]+/configs/[^/]+/variables/.*$", "required": true, @@ -614,7 +614,7 @@ ] }, "watch": { - "description": "Watches a specific variable and waits for a change in the variable's value.\nWhen there is a change, this method returns the new value or times out.\n\nIf a variable is deleted while being watched, the `variableState` state is\nset to `DELETED` and the method returns the last known variable `value`.\n\nIf you set the deadline for watching to a larger value than internal\ntimeout (60 seconds), the current variable value is returned and the\n`variableState` will be `VARIABLE_STATE_UNSPECIFIED`.\n\nTo learn more about creating a watcher, read the\n[Watching a Variable for\nChanges](/deployment-manager/runtime-configurator/watching-a-variable)\ndocumentation.", + "description": "Watches a specific variable and waits for a change in the variable's value. When there is a change, this method returns the new value or times out. If a variable is deleted while being watched, the `variableState` state is set to `DELETED` and the method returns the last known variable `value`. If you set the deadline for watching to a larger value than internal timeout (60 seconds), the current variable value is returned and the `variableState` will be `VARIABLE_STATE_UNSPECIFIED`. To learn more about creating a watcher, read the [Watching a Variable for Changes](/deployment-manager/runtime-configurator/watching-a-variable) documentation.", "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/variables/{variablesId}:watch", "httpMethod": "POST", "id": "runtimeconfig.projects.configs.variables.watch", @@ -623,7 +623,7 @@ ], "parameters": { "name": { - "description": "The name of the variable to watch, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", + "description": "The name of the variable to watch, in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", "location": "path", "pattern": "^projects/[^/]+/configs/[^/]+/variables/.*$", "required": true, @@ -647,7 +647,7 @@ "waiters": { "methods": { "create": { - "description": "Creates a Waiter resource. This operation returns a long-running Operation\nresource which can be polled for completion. However, a waiter with the\ngiven name will exist (and can be retrieved) prior to the operation\ncompleting. If the operation fails, the failed Waiter resource will\nstill exist and must be deleted prior to subsequent creation attempts.", + "description": "Creates a Waiter resource. This operation returns a long-running Operation resource which can be polled for completion. However, a waiter with the given name will exist (and can be retrieved) prior to the operation completing. If the operation fails, the failed Waiter resource will still exist and must be deleted prior to subsequent creation attempts.", "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/waiters", "httpMethod": "POST", "id": "runtimeconfig.projects.configs.waiters.create", @@ -656,14 +656,14 @@ ], "parameters": { "parent": { - "description": "The path to the configuration that will own the waiter.\nThe configuration must exist beforehand; the path must be in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`.", + "description": "The path to the configuration that will own the waiter. The configuration must exist beforehand; the path must be in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]`.", "location": "path", "pattern": "^projects/[^/]+/configs/[^/]+$", "required": true, "type": "string" }, "requestId": { - "description": "An optional but recommended unique `request_id`. If the server\nreceives two `create()` requests with the same\n`request_id`, then the second request will be ignored and the\nfirst resource created and stored in the backend is returned.\nEmpty `request_id` fields are ignored.\n\nIt is responsibility of the client to ensure uniqueness of the\n`request_id` strings.\n\n`request_id` strings are limited to 64 characters.", + "description": "An optional but recommended unique `request_id`. If the server receives two `create()` requests with the same `request_id`, then the second request will be ignored and the first resource created and stored in the backend is returned. Empty `request_id` fields are ignored. It is responsibility of the client to ensure uniqueness of the `request_id` strings. `request_id` strings are limited to 64 characters.", "location": "query", "type": "string" } @@ -690,7 +690,7 @@ ], "parameters": { "name": { - "description": "The Waiter resource to delete, in the format:\n\n `projects/[PROJECT_ID]/configs/[CONFIG_NAME]/waiters/[WAITER_NAME]`", + "description": "The Waiter resource to delete, in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]/waiters/[WAITER_NAME]`", "location": "path", "pattern": "^projects/[^/]+/configs/[^/]+/waiters/[^/]+$", "required": true, @@ -716,7 +716,7 @@ ], "parameters": { "name": { - "description": "The fully-qualified name of the Waiter resource object to retrieve, in the\nformat:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]/waiters/[WAITER_NAME]`", + "description": "The fully-qualified name of the Waiter resource object to retrieve, in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]/waiters/[WAITER_NAME]`", "location": "path", "pattern": "^projects/[^/]+/configs/[^/]+/waiters/[^/]+$", "required": true, @@ -742,18 +742,18 @@ ], "parameters": { "pageSize": { - "description": "Specifies the number of results to return per page. If there are fewer\nelements than the specified number, returns all elements.", + "description": "Specifies the number of results to return per page. If there are fewer elements than the specified number, returns all elements.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to a `nextPageToken`\nreturned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to a `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, "parent": { - "description": "The path to the configuration for which you want to get a list of waiters.\nThe configuration must exist beforehand; the path must be in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", + "description": "The path to the configuration for which you want to get a list of waiters. The configuration must exist beforehand; the path must be in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", "location": "path", "pattern": "^projects/[^/]+/configs/[^/]+$", "required": true, @@ -770,7 +770,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/waiters/{waitersId}:testIamPermissions", "httpMethod": "POST", "id": "runtimeconfig.projects.configs.waiters.testIamPermissions", @@ -779,7 +779,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/configs/[^/]+/waiters/[^/]+$", "required": true, @@ -805,37 +805,41 @@ } } }, - "revision": "20200504", + "revision": "20200831", "rootUrl": "https://runtimeconfig.googleapis.com/", "schemas": { "Binding": { "description": "Associates `members` with a `role`.", "id": "Binding", "properties": { + "bindingId": { + "description": "A client-specified ID for this binding. Expected to be globally unique to support the internal bindings-by-ID API.", + "type": "string" + }, "condition": { "$ref": "Expr", - "description": "The condition that is associated with this binding.\n\nIf the condition evaluates to `true`, then this binding applies to the\ncurrent request.\n\nIf the condition evaluates to `false`, then this binding does not apply to\nthe current request. However, a different role binding might grant the same\nrole to one or more of the members in this binding.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies)." + "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the members in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." }, "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@example.com` .\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a user that has been recently deleted. For\n example, `alice@example.com?uid=123456789012345678901`. If the user is\n recovered, this value reverts to `user:{emailid}` and the recovered user\n retains the role in the binding.\n\n* `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus\n unique identifier) representing a service account that has been recently\n deleted. For example,\n `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.\n If the service account is undeleted, this value reverts to\n `serviceAccount:{emailid}` and the undeleted service account retains the\n role in the binding.\n\n* `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a Google group that has been recently\n deleted. For example, `admins@example.com?uid=123456789012345678901`. If\n the group is recovered, this value reverts to `group:{emailid}` and the\n recovered group retains the role in the binding.\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", "items": { "type": "string" }, "type": "array" }, "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.", + "description": "Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.", "type": "string" } }, "type": "object" }, "Cardinality": { - "description": "A Cardinality condition for the Waiter resource. A cardinality condition is\nmet when the number of variables under a specified path prefix reaches a\npredefined number. For example, if you set a Cardinality condition where\nthe `path` is set to `/foo` and the number of paths is set to `2`, the\nfollowing variables would meet the condition in a RuntimeConfig resource:\n\n+ `/foo/variable1 = \"value1\"`\n+ `/foo/variable2 = \"value2\"`\n+ `/bar/variable3 = \"value3\"`\n\nIt would not satisfy the same condition with the `number` set to\n`3`, however, because there is only 2 paths that start with `/foo`.\nCardinality conditions are recursive; all subtrees under the specific\npath prefix are counted.", + "description": "A Cardinality condition for the Waiter resource. A cardinality condition is met when the number of variables under a specified path prefix reaches a predefined number. For example, if you set a Cardinality condition where the `path` is set to `/foo` and the number of paths is set to `2`, the following variables would meet the condition in a RuntimeConfig resource: + `/foo/variable1 = \"value1\"` + `/foo/variable2 = \"value2\"` + `/bar/variable3 = \"value3\"` It would not satisfy the same condition with the `number` set to `3`, however, because there is only 2 paths that start with `/foo`. Cardinality conditions are recursive; all subtrees under the specific path prefix are counted.", "id": "Cardinality", "properties": { "number": { - "description": "The number variables under the `path` that must exist to meet this\ncondition. Defaults to 1 if not specified.", + "description": "The number variables under the `path` that must exist to meet this condition. Defaults to 1 if not specified.", "format": "int32", "type": "integer" }, @@ -847,7 +851,7 @@ "type": "object" }, "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", "properties": {}, "type": "object" @@ -864,41 +868,41 @@ "type": "object" }, "Expr": { - "description": "Represents a textual expression in the Common Expression Language (CEL)\nsyntax. CEL is a C-like expression language. The syntax and semantics of CEL\nare documented at https://github.com/google/cel-spec.\n\nExample (Comparison):\n\n title: \"Summary size limit\"\n description: \"Determines if a summary is less than 100 chars\"\n expression: \"document.summary.size() \u003c 100\"\n\nExample (Equality):\n\n title: \"Requestor is owner\"\n description: \"Determines if requestor is the document owner\"\n expression: \"document.owner == request.auth.claims.email\"\n\nExample (Logic):\n\n title: \"Public documents\"\n description: \"Determine whether the document should be publicly visible\"\n expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\"\n\nExample (Data Manipulation):\n\n title: \"Notification string\"\n description: \"Create a notification string with a timestamp.\"\n expression: \"'New message received at ' + string(document.create_time)\"\n\nThe exact variables and functions that may be referenced within an expression\nare determined by the service that evaluates it. See the service\ndocumentation for additional information.", + "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() \u003c 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", "id": "Expr", "properties": { "description": { - "description": "Optional. Description of the expression. This is a longer text which\ndescribes the expression, e.g. when hovered over it in a UI.", + "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", "type": "string" }, "expression": { - "description": "Textual representation of an expression in Common Expression Language\nsyntax.", + "description": "Textual representation of an expression in Common Expression Language syntax.", "type": "string" }, "location": { - "description": "Optional. String indicating the location of the expression for error\nreporting, e.g. a file name and a position in the file.", + "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", "type": "string" }, "title": { - "description": "Optional. Title for the expression, i.e. a short string describing\nits purpose. This can be used e.g. in UIs which allow to enter the\nexpression.", + "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", "type": "string" } }, "type": "object" }, "ListConfigsResponse": { - "description": "`ListConfigs()` returns the following response. The order of returned\nobjects is arbitrary; that is, it is not ordered in any particular way.", + "description": "`ListConfigs()` returns the following response. The order of returned objects is arbitrary; that is, it is not ordered in any particular way.", "id": "ListConfigsResponse", "properties": { "configs": { - "description": "A list of the configurations in the project. The order of returned\nobjects is arbitrary; that is, it is not ordered in any particular way.", + "description": "A list of the configurations in the project. The order of returned objects is arbitrary; that is, it is not ordered in any particular way.", "items": { "$ref": "RuntimeConfig" }, "type": "array" }, "nextPageToken": { - "description": "This token allows you to get the next page of results for list requests.\nIf the number of results is larger than `pageSize`, use the `nextPageToken`\nas a value for the query parameter `pageToken` in the next list request.\nSubsequent list requests will have their own `nextPageToken` to continue\npaging through the results", + "description": "This token allows you to get the next page of results for list requests. If the number of results is larger than `pageSize`, use the `nextPageToken` as a value for the query parameter `pageToken` in the next list request. Subsequent list requests will have their own `nextPageToken` to continue paging through the results", "type": "string" } }, @@ -909,11 +913,11 @@ "id": "ListVariablesResponse", "properties": { "nextPageToken": { - "description": "This token allows you to get the next page of results for list requests.\nIf the number of results is larger than `pageSize`, use the `nextPageToken`\nas a value for the query parameter `pageToken` in the next list request.\nSubsequent list requests will have their own `nextPageToken` to continue\npaging through the results", + "description": "This token allows you to get the next page of results for list requests. If the number of results is larger than `pageSize`, use the `nextPageToken` as a value for the query parameter `pageToken` in the next list request. Subsequent list requests will have their own `nextPageToken` to continue paging through the results", "type": "string" }, "variables": { - "description": "A list of variables and their values. The order of returned variable\nobjects is arbitrary.", + "description": "A list of variables and their values. The order of returned variable objects is arbitrary.", "items": { "$ref": "Variable" }, @@ -923,11 +927,11 @@ "type": "object" }, "ListWaitersResponse": { - "description": "Response for the `ListWaiters()` method.\nOrder of returned waiter objects is arbitrary.", + "description": "Response for the `ListWaiters()` method. Order of returned waiter objects is arbitrary.", "id": "ListWaitersResponse", "properties": { "nextPageToken": { - "description": "This token allows you to get the next page of results for list requests.\nIf the number of results is larger than `pageSize`, use the `nextPageToken`\nas a value for the query parameter `pageToken` in the next list request.\nSubsequent list requests will have their own `nextPageToken` to continue\npaging through the results", + "description": "This token allows you to get the next page of results for list requests. If the number of results is larger than `pageSize`, use the `nextPageToken` as a value for the query parameter `pageToken` in the next list request. Subsequent list requests will have their own `nextPageToken` to continue paging through the results", "type": "string" }, "waiters": { @@ -941,11 +945,11 @@ "type": "object" }, "Operation": { - "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "description": "This resource represents a long-running operation that is the result of a network API call.", "id": "Operation", "properties": { "done": { - "description": "If the value is `false`, it means the operation is still in progress.\nIf `true`, the operation is completed, and either `error` or `response` is\navailable.", + "description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.", "type": "boolean" }, "error": { @@ -957,11 +961,11 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", "type": "object" }, "name": { - "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should be a resource name ending with `operations/{unique_id}`.", + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.", "type": "string" }, "response": { @@ -969,30 +973,30 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", + "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", "type": "object" } }, "type": "object" }, "Policy": { - "description": "An Identity and Access Management (IAM) policy, which specifies access\ncontrols for Google Cloud resources.\n\n\nA `Policy` is a collection of `bindings`. A `binding` binds one or more\n`members` to a single `role`. Members can be user accounts, service accounts,\nGoogle groups, and domains (such as G Suite). A `role` is a named list of\npermissions; each `role` can be an IAM predefined role or a user-created\ncustom role.\n\nFor some types of Google Cloud resources, a `binding` can also specify a\n`condition`, which is a logical expression that allows access to a resource\nonly if the expression evaluates to `true`. A condition can add constraints\nbased on attributes of the request, the resource, or both. To learn which\nresources support conditions in their IAM policies, see the\n[IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).\n\n**JSON example:**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/resourcemanager.organizationAdmin\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-project-id@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles/resourcemanager.organizationViewer\",\n \"members\": [\n \"user:eve@example.com\"\n ],\n \"condition\": {\n \"title\": \"expirable access\",\n \"description\": \"Does not grant access after Sep 2020\",\n \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\",\n }\n }\n ],\n \"etag\": \"BwWWja0YfJA=\",\n \"version\": 3\n }\n\n**YAML example:**\n\n bindings:\n - members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-project-id@appspot.gserviceaccount.com\n role: roles/resourcemanager.organizationAdmin\n - members:\n - user:eve@example.com\n role: roles/resourcemanager.organizationViewer\n condition:\n title: expirable access\n description: Does not grant access after Sep 2020\n expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\n - etag: BwWWja0YfJA=\n - version: 3\n\nFor a description of IAM and its features, see the\n[IAM documentation](https://cloud.google.com/iam/docs/).", + "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } **YAML example:** bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", "id": "Policy", "properties": { "bindings": { - "description": "Associates a list of `members` to a `role`. Optionally, may specify a\n`condition` that determines how and when the `bindings` are applied. Each\nof the `bindings` must contain at least one member.", + "description": "Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member.", "items": { "$ref": "Binding" }, "type": "array" }, "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.", + "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.", "format": "byte", "type": "string" }, "version": { - "description": "Specifies the format of the policy.\n\nValid values are `0`, `1`, and `3`. Requests that specify an invalid value\nare rejected.\n\nAny operation that affects conditional role bindings must specify version\n`3`. This requirement applies to the following operations:\n\n* Getting a policy that includes a conditional role binding\n* Adding a conditional role binding to a policy\n* Changing a conditional role binding in a policy\n* Removing any role binding, with or without a condition, from a policy\n that includes conditions\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.\n\nIf a policy does not include any conditions, operations on that policy may\nspecify any valid version or leave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } @@ -1000,7 +1004,7 @@ "type": "object" }, "RuntimeConfig": { - "description": "A RuntimeConfig resource is the primary resource in the Cloud RuntimeConfig\nservice. A RuntimeConfig resource consists of metadata and a hierarchy of\nvariables.", + "description": "A RuntimeConfig resource is the primary resource in the Cloud RuntimeConfig service. A RuntimeConfig resource consists of metadata and a hierarchy of variables.", "id": "RuntimeConfig", "properties": { "description": { @@ -1008,7 +1012,7 @@ "type": "string" }, "name": { - "description": "The resource name of a runtime config. The name must have the format:\n\n projects/[PROJECT_ID]/configs/[CONFIG_NAME]\n\nThe `[PROJECT_ID]` must be a valid project ID, and `[CONFIG_NAME]` is an\narbitrary name that matches the\n`[0-9A-Za-z](?:[_.A-Za-z0-9-]{0,62}[_.A-Za-z0-9])?` regular expression.\nThe length of `[CONFIG_NAME]` must be less than 64 characters.\n\nYou pick the RuntimeConfig resource name, but the server will validate that\nthe name adheres to this format. After you create the resource, you cannot\nchange the resource's name.", + "description": "The resource name of a runtime config. The name must have the format: projects/[PROJECT_ID]/configs/[CONFIG_NAME] The `[PROJECT_ID]` must be a valid project ID, and `[CONFIG_NAME]` is an arbitrary name that matches the `[0-9A-Za-z](?:[_.A-Za-z0-9-]{0,62}[_.A-Za-z0-9])?` regular expression. The length of `[CONFIG_NAME]` must be less than 64 characters. You pick the RuntimeConfig resource name, but the server will validate that the name adheres to this format. After you create the resource, you cannot change the resource's name.", "type": "string" } }, @@ -1020,13 +1024,13 @@ "properties": { "policy": { "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them." } }, "type": "object" }, "Status": { - "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors).", + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", "id": "Status", "properties": { "code": { @@ -1035,7 +1039,7 @@ "type": "integer" }, "details": { - "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use.", + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", "items": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -1046,7 +1050,7 @@ "type": "array" }, "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", "type": "string" } }, @@ -1057,7 +1061,7 @@ "id": "TestIamPermissionsRequest", "properties": { "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "description": "The set of permissions to check for the `resource`. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", "items": { "type": "string" }, @@ -1071,7 +1075,7 @@ "id": "TestIamPermissionsResponse", "properties": { "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", "items": { "type": "string" }, @@ -1081,15 +1085,15 @@ "type": "object" }, "Variable": { - "description": "Describes a single variable within a RuntimeConfig resource.\nThe name denotes the hierarchical variable name. For example,\n`ports/serving_port` is a valid variable name. The variable value is an\nopaque string and only leaf variables can have values (that is, variables\nthat do not have any child variables).", + "description": "Describes a single variable within a RuntimeConfig resource. The name denotes the hierarchical variable name. For example, `ports/serving_port` is a valid variable name. The variable value is an opaque string and only leaf variables can have values (that is, variables that do not have any child variables).", "id": "Variable", "properties": { "name": { - "description": "The name of the variable resource, in the format:\n\n projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIABLE_NAME]\n\nThe `[PROJECT_ID]` must be a valid project ID, `[CONFIG_NAME]` must be a\nvalid RuntimeConfig resource and `[VARIABLE_NAME]` follows Unix file system\nfile path naming.\n\nThe `[VARIABLE_NAME]` can contain ASCII letters, numbers, slashes and\ndashes. Slashes are used as path element separators and are not part of the\n`[VARIABLE_NAME]` itself, so `[VARIABLE_NAME]` must contain at least one\nnon-slash character. Multiple slashes are coalesced into single slash\ncharacter. Each path segment should match\n[0-9A-Za-z](?:[_.A-Za-z0-9-]{0,62}[_.A-Za-z0-9])? regular expression.\nThe length of a `[VARIABLE_NAME]` must be less than 256 characters.\n\nOnce you create a variable, you cannot change the variable name.", + "description": "The name of the variable resource, in the format: projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIABLE_NAME] The `[PROJECT_ID]` must be a valid project ID, `[CONFIG_NAME]` must be a valid RuntimeConfig resource and `[VARIABLE_NAME]` follows Unix file system file path naming. The `[VARIABLE_NAME]` can contain ASCII letters, numbers, slashes and dashes. Slashes are used as path element separators and are not part of the `[VARIABLE_NAME]` itself, so `[VARIABLE_NAME]` must contain at least one non-slash character. Multiple slashes are coalesced into single slash character. Each path segment should match [0-9A-Za-z](?:[_.A-Za-z0-9-]{0,62}[_.A-Za-z0-9])? regular expression. The length of a `[VARIABLE_NAME]` must be less than 256 characters. Once you create a variable, you cannot change the variable name.", "type": "string" }, "state": { - "description": "Output only. The current state of the variable. The variable state\nindicates the outcome of the `variables().watch` call and is visible\nthrough the `get` and `list` calls.", + "description": "Output only. The current state of the variable. The variable state indicates the outcome of the `variables().watch` call and is visible through the `get` and `list` calls.", "enum": [ "VARIABLE_STATE_UNSPECIFIED", "UPDATED", @@ -1103,16 +1107,16 @@ "type": "string" }, "text": { - "description": "The string value of the variable. The length of the value must be less\nthan 4096 bytes. Empty values are also accepted. For example,\n`text: \"my text value\"`. The string must be valid UTF-8.", + "description": "The string value of the variable. The length of the value must be less than 4096 bytes. Empty values are also accepted. For example, `text: \"my text value\"`. The string must be valid UTF-8.", "type": "string" }, "updateTime": { - "description": "Output only. The time of the last variable update.\nTimestamp will be UTC timestamp.", + "description": "Output only. The time of the last variable update. Timestamp will be UTC timestamp.", "format": "google-datetime", "type": "string" }, "value": { - "description": "The binary value of the variable. The length of the value must be less\nthan 4096 bytes. Empty values are also accepted. The value must be\nbase64 encoded, and must comply with IETF RFC4648\n(https://www.ietf.org/rfc/rfc4648.txt). Only one of `value` or `text`\ncan be set.", + "description": "The binary value of the variable. The length of the value must be less than 4096 bytes. Empty values are also accepted. The value must be base64 encoded, and must comply with IETF RFC4648 (https://www.ietf.org/rfc/rfc4648.txt). Only one of `value` or `text` can be set.", "format": "byte", "type": "string" } @@ -1120,36 +1124,36 @@ "type": "object" }, "Waiter": { - "description": "A Waiter resource waits for some end condition within a RuntimeConfig\nresource to be met before it returns. For example, assume you have a\ndistributed system where each node writes to a Variable resource indicating\nthe node's readiness as part of the startup process.\n\nYou then configure a Waiter resource with the success condition set to wait\nuntil some number of nodes have checked in. Afterwards, your application\nruns some arbitrary code after the condition has been met and the waiter\nreturns successfully.\n\nOnce created, a Waiter resource is immutable.\n\nTo learn more about using waiters, read the\n[Creating a\nWaiter](/deployment-manager/runtime-configurator/creating-a-waiter)\ndocumentation.", + "description": "A Waiter resource waits for some end condition within a RuntimeConfig resource to be met before it returns. For example, assume you have a distributed system where each node writes to a Variable resource indicating the node's readiness as part of the startup process. You then configure a Waiter resource with the success condition set to wait until some number of nodes have checked in. Afterwards, your application runs some arbitrary code after the condition has been met and the waiter returns successfully. Once created, a Waiter resource is immutable. To learn more about using waiters, read the [Creating a Waiter](/deployment-manager/runtime-configurator/creating-a-waiter) documentation.", "id": "Waiter", "properties": { "createTime": { - "description": "Output only. The instant at which this Waiter resource was created. Adding\nthe value of `timeout` to this instant yields the timeout deadline for the\nwaiter.", + "description": "Output only. The instant at which this Waiter resource was created. Adding the value of `timeout` to this instant yields the timeout deadline for the waiter.", "format": "google-datetime", "type": "string" }, "done": { - "description": "Output only. If the value is `false`, it means the waiter is still waiting\nfor one of its conditions to be met.\n\nIf true, the waiter has finished. If the waiter finished due to a timeout\nor failure, `error` will be set.", + "description": "Output only. If the value is `false`, it means the waiter is still waiting for one of its conditions to be met. If true, the waiter has finished. If the waiter finished due to a timeout or failure, `error` will be set.", "type": "boolean" }, "error": { "$ref": "Status", - "description": "Output only. If the waiter ended due to a failure or timeout, this value\nwill be set." + "description": "Output only. If the waiter ended due to a failure or timeout, this value will be set." }, "failure": { "$ref": "EndCondition", - "description": "[Optional] The failure condition of this waiter. If this condition is met,\n`done` will be set to `true` and the `error` code will be set to `ABORTED`.\nThe failure condition takes precedence over the success condition. If both\nconditions are met, a failure will be indicated. This value is optional; if\nno failure condition is set, the only failure scenario will be a timeout." + "description": "[Optional] The failure condition of this waiter. If this condition is met, `done` will be set to `true` and the `error` code will be set to `ABORTED`. The failure condition takes precedence over the success condition. If both conditions are met, a failure will be indicated. This value is optional; if no failure condition is set, the only failure scenario will be a timeout." }, "name": { - "description": "The name of the Waiter resource, in the format:\n\n projects/[PROJECT_ID]/configs/[CONFIG_NAME]/waiters/[WAITER_NAME]\n\nThe `[PROJECT_ID]` must be a valid Google Cloud project ID,\nthe `[CONFIG_NAME]` must be a valid RuntimeConfig resource, the\n`[WAITER_NAME]` must match RFC 1035 segment specification, and the length\nof `[WAITER_NAME]` must be less than 64 bytes.\n\nAfter you create a Waiter resource, you cannot change the resource name.", + "description": "The name of the Waiter resource, in the format: projects/[PROJECT_ID]/configs/[CONFIG_NAME]/waiters/[WAITER_NAME] The `[PROJECT_ID]` must be a valid Google Cloud project ID, the `[CONFIG_NAME]` must be a valid RuntimeConfig resource, the `[WAITER_NAME]` must match RFC 1035 segment specification, and the length of `[WAITER_NAME]` must be less than 64 bytes. After you create a Waiter resource, you cannot change the resource name.", "type": "string" }, "success": { "$ref": "EndCondition", - "description": "[Required] The success condition. If this condition is met, `done` will be\nset to `true` and the `error` value will remain unset. The failure\ncondition takes precedence over the success condition. If both conditions\nare met, a failure will be indicated." + "description": "[Required] The success condition. If this condition is met, `done` will be set to `true` and the `error` value will remain unset. The failure condition takes precedence over the success condition. If both conditions are met, a failure will be indicated." }, "timeout": { - "description": "[Required] Specifies the timeout of the waiter in seconds, beginning from\nthe instant that `waiters().create` method is called. If this time elapses\nbefore the success or failure conditions are met, the waiter fails and sets\nthe `error` code to `DEADLINE_EXCEEDED`.", + "description": "[Required] Specifies the timeout of the waiter in seconds, beginning from the instant that `waiters().create` method is called. If this time elapses before the success or failure conditions are met, the waiter fails and sets the `error` code to `DEADLINE_EXCEEDED`.", "format": "google-duration", "type": "string" } @@ -1161,7 +1165,7 @@ "id": "WatchVariableRequest", "properties": { "newerThan": { - "description": "If specified, checks the current timestamp of the variable and if the\ncurrent timestamp is newer than `newerThan` timestamp, the method returns\nimmediately.\n\nIf not specified or the variable has an older timestamp, the watcher waits\nfor a the value to change before returning.", + "description": "If specified, checks the current timestamp of the variable and if the current timestamp is newer than `newerThan` timestamp, the method returns immediately. If not specified or the variable has an older timestamp, the watcher waits for a the value to change before returning.", "format": "google-datetime", "type": "string" } diff --git a/vendor/google.golang.org/api/runtimeconfig/v1beta1/runtimeconfig-gen.go b/vendor/google.golang.org/api/runtimeconfig/v1beta1/runtimeconfig-gen.go index 6466f877e9d..e78300b5822 100644 --- a/vendor/google.golang.org/api/runtimeconfig/v1beta1/runtimeconfig-gen.go +++ b/vendor/google.golang.org/api/runtimeconfig/v1beta1/runtimeconfig-gen.go @@ -79,6 +79,7 @@ const apiId = "runtimeconfig:v1beta1" const apiName = "runtimeconfig" const apiVersion = "v1beta1" const basePath = "https://runtimeconfig.googleapis.com/" +const mtlsBasePath = "https://runtimeconfig.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -98,6 +99,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -200,98 +202,60 @@ type ProjectsConfigsWaitersService struct { // Binding: Associates `members` with a `role`. type Binding struct { - // Condition: The condition that is associated with this binding. - // - // If the condition evaluates to `true`, then this binding applies to - // the - // current request. - // - // If the condition evaluates to `false`, then this binding does not - // apply to - // the current request. However, a different role binding might grant - // the same - // role to one or more of the members in this binding. - // - // To learn which resources support conditions in their IAM policies, - // see - // the - // [IAM - // documentation](https://cloud.google.com/iam/help/conditions/r - // esource-policies). + // BindingId: A client-specified ID for this binding. Expected to be + // globally unique to support the internal bindings-by-ID API. + BindingId string `json:"bindingId,omitempty"` + + // Condition: The condition that is associated with this binding. If the + // condition evaluates to `true`, then this binding applies to the + // current request. If the condition evaluates to `false`, then this + // binding does not apply to the current request. However, a different + // role binding might grant the same role to one or more of the members + // in this binding. To learn which resources support conditions in their + // IAM policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). Condition *Expr `json:"condition,omitempty"` // Members: Specifies the identities requesting access for a Cloud - // Platform resource. - // `members` can have the following values: - // - // * `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. - // - // * `allAuthenticatedUsers`: A special identifier that represents - // anyone - // who is authenticated with a Google account or a service - // account. - // - // * `user:{emailid}`: An email address that represents a specific - // Google - // account. For example, `alice@example.com` . - // - // - // * `serviceAccount:{emailid}`: An email address that represents a - // service - // account. For example, - // `my-other-app@appspot.gserviceaccount.com`. - // - // * `group:{emailid}`: An email address that represents a Google - // group. - // For example, `admins@example.com`. - // - // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a user that has been recently deleted. - // For - // example, `alice@example.com?uid=123456789012345678901`. If the - // user is - // recovered, this value reverts to `user:{emailid}` and the - // recovered user - // retains the role in the binding. - // - // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address - // (plus - // unique identifier) representing a service account that has been - // recently - // deleted. For example, - // + // Platform resource. `members` can have the following values: * + // `allUsers`: A special identifier that represents anyone who is on the + // internet; with or without a Google account. * + // `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. * + // `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . * + // `serviceAccount:{emailid}`: An email address that represents a + // service account. For example, + // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An + // email address that represents a Google group. For example, + // `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An + // email address (plus unique identifier) representing a user that has + // been recently deleted. For example, + // `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered + // user retains the role in the binding. * + // `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address + // (plus unique identifier) representing a service account that has been + // recently deleted. For example, // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account - // retains the - // role in the binding. - // - // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a Google group that has been recently - // deleted. For example, - // `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` - // and the - // recovered group retains the role in the binding. - // - // - // * `domain:{domain}`: The G Suite domain (primary) that represents all - // the - // users of that domain. For example, `google.com` or - // `example.com`. - // - // + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains + // the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: + // An email address (plus unique identifier) representing a Google group + // that has been recently deleted. For example, + // `admins@example.com?uid=123456789012345678901`. If the group is + // recovered, this value reverts to `group:{emailid}` and the recovered + // group retains the role in the binding. * `domain:{domain}`: The G + // Suite domain (primary) that represents all the users of that domain. + // For example, `google.com` or `example.com`. Members []string `json:"members,omitempty"` - // Role: Role that is assigned to `members`. - // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + // Role: Role that is assigned to `members`. For example, + // `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `json:"role,omitempty"` - // ForceSendFields is a list of field names (e.g. "Condition") to + // ForceSendFields is a list of field names (e.g. "BindingId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -299,7 +263,7 @@ type Binding struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Condition") to include in + // NullFields is a list of field names (e.g. "BindingId") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -315,30 +279,19 @@ func (s *Binding) MarshalJSON() ([]byte, error) { } // Cardinality: A Cardinality condition for the Waiter resource. A -// cardinality condition is -// met when the number of variables under a specified path prefix -// reaches a -// predefined number. For example, if you set a Cardinality condition -// where -// the `path` is set to `/foo` and the number of paths is set to `2`, -// the -// following variables would meet the condition in a RuntimeConfig -// resource: -// -// + `/foo/variable1 = "value1" -// + `/foo/variable2 = "value2" -// + `/bar/variable3 = "value3" -// -// It would not satisfy the same condition with the `number` set to -// `3`, however, because there is only 2 paths that start with -// `/foo`. -// Cardinality conditions are recursive; all subtrees under the -// specific +// cardinality condition is met when the number of variables under a +// specified path prefix reaches a predefined number. For example, if +// you set a Cardinality condition where the `path` is set to `/foo` and +// the number of paths is set to `2`, the following variables would meet +// the condition in a RuntimeConfig resource: + `/foo/variable1 = +// "value1" + `/foo/variable2 = "value2" + `/bar/variable3 = "value3" +// It would not satisfy the same condition with the `number` set to `3`, +// however, because there is only 2 paths that start with `/foo`. +// Cardinality conditions are recursive; all subtrees under the specific // path prefix are counted. type Cardinality struct { // Number: The number variables under the `path` that must exist to meet - // this - // condition. Defaults to 1 if not specified. + // this condition. Defaults to 1 if not specified. Number int64 `json:"number,omitempty"` // Path: The root of the variable subtree to monitor. For example, @@ -369,17 +322,11 @@ func (s *Cardinality) MarshalJSON() ([]byte, error) { } // Empty: A generic empty message that you can re-use to avoid defining -// duplicated -// empty messages in your APIs. A typical example is to use it as the -// request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. +// duplicated empty messages in your APIs. A typical example is to use +// it as the request or the response type of an API method. For +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } The JSON representation for `Empty` is +// empty JSON object `{}`. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -415,65 +362,40 @@ func (s *EndCondition) MarshalJSON() ([]byte, error) { } // Expr: Represents a textual expression in the Common Expression -// Language (CEL) -// syntax. CEL is a C-like expression language. The syntax and semantics -// of CEL -// are documented at https://github.com/google/cel-spec. -// -// Example (Comparison): -// -// title: "Summary size limit" -// description: "Determines if a summary is less than 100 chars" -// expression: "document.summary.size() < 100" -// -// Example (Equality): -// -// title: "Requestor is owner" -// description: "Determines if requestor is the document owner" -// expression: "document.owner == -// request.auth.claims.email" -// -// Example (Logic): -// -// title: "Public documents" -// description: "Determine whether the document should be publicly -// visible" -// expression: "document.type != 'private' && document.type != -// 'internal'" -// -// Example (Data Manipulation): -// -// title: "Notification string" -// description: "Create a notification string with a timestamp." -// expression: "'New message received at ' + -// string(document.create_time)" -// -// The exact variables and functions that may be referenced within an -// expression -// are determined by the service that evaluates it. See the -// service -// documentation for additional information. +// Language (CEL) syntax. CEL is a C-like expression language. The +// syntax and semantics of CEL are documented at +// https://github.com/google/cel-spec. Example (Comparison): title: +// "Summary size limit" description: "Determines if a summary is less +// than 100 chars" expression: "document.summary.size() < 100" Example +// (Equality): title: "Requestor is owner" description: "Determines if +// requestor is the document owner" expression: "document.owner == +// request.auth.claims.email" Example (Logic): title: "Public documents" +// description: "Determine whether the document should be publicly +// visible" expression: "document.type != 'private' && document.type != +// 'internal'" Example (Data Manipulation): title: "Notification string" +// description: "Create a notification string with a timestamp." +// expression: "'New message received at ' + +// string(document.create_time)" The exact variables and functions that +// may be referenced within an expression are determined by the service +// that evaluates it. See the service documentation for additional +// information. type Expr struct { // Description: Optional. Description of the expression. This is a - // longer text which - // describes the expression, e.g. when hovered over it in a UI. + // longer text which describes the expression, e.g. when hovered over it + // in a UI. Description string `json:"description,omitempty"` // Expression: Textual representation of an expression in Common - // Expression Language - // syntax. + // Expression Language syntax. Expression string `json:"expression,omitempty"` // Location: Optional. String indicating the location of the expression - // for error - // reporting, e.g. a file name and a position in the file. + // for error reporting, e.g. a file name and a position in the file. Location string `json:"location,omitempty"` // Title: Optional. Title for the expression, i.e. a short string - // describing - // its purpose. This can be used e.g. in UIs which allow to enter - // the - // expression. + // describing its purpose. This can be used e.g. in UIs which allow to + // enter the expression. Title string `json:"title,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -500,25 +422,20 @@ func (s *Expr) MarshalJSON() ([]byte, error) { } // ListConfigsResponse: `ListConfigs()` returns the following response. -// The order of returned -// objects is arbitrary; that is, it is not ordered in any particular -// way. +// The order of returned objects is arbitrary; that is, it is not +// ordered in any particular way. type ListConfigsResponse struct { // Configs: A list of the configurations in the project. The order of - // returned - // objects is arbitrary; that is, it is not ordered in any particular - // way. + // returned objects is arbitrary; that is, it is not ordered in any + // particular way. Configs []*RuntimeConfig `json:"configs,omitempty"` // NextPageToken: This token allows you to get the next page of results - // for list requests. - // If the number of results is larger than `pageSize`, use the - // `nextPageToken` - // as a value for the query parameter `pageToken` in the next list - // request. - // Subsequent list requests will have their own `nextPageToken` to - // continue - // paging through the results + // for list requests. If the number of results is larger than + // `pageSize`, use the `nextPageToken` as a value for the query + // parameter `pageToken` in the next list request. Subsequent list + // requests will have their own `nextPageToken` to continue paging + // through the results NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -551,19 +468,15 @@ func (s *ListConfigsResponse) MarshalJSON() ([]byte, error) { // ListVariablesResponse: Response for the `ListVariables()` method. type ListVariablesResponse struct { // NextPageToken: This token allows you to get the next page of results - // for list requests. - // If the number of results is larger than `pageSize`, use the - // `nextPageToken` - // as a value for the query parameter `pageToken` in the next list - // request. - // Subsequent list requests will have their own `nextPageToken` to - // continue - // paging through the results + // for list requests. If the number of results is larger than + // `pageSize`, use the `nextPageToken` as a value for the query + // parameter `pageToken` in the next list request. Subsequent list + // requests will have their own `nextPageToken` to continue paging + // through the results NextPageToken string `json:"nextPageToken,omitempty"` // Variables: A list of variables and their values. The order of - // returned variable - // objects is arbitrary. + // returned variable objects is arbitrary. Variables []*Variable `json:"variables,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -593,18 +506,15 @@ func (s *ListVariablesResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ListWaitersResponse: Response for the `ListWaiters()` method. -// Order of returned waiter objects is arbitrary. +// ListWaitersResponse: Response for the `ListWaiters()` method. Order +// of returned waiter objects is arbitrary. type ListWaitersResponse struct { // NextPageToken: This token allows you to get the next page of results - // for list requests. - // If the number of results is larger than `pageSize`, use the - // `nextPageToken` - // as a value for the query parameter `pageToken` in the next list - // request. - // Subsequent list requests will have their own `nextPageToken` to - // continue - // paging through the results + // for list requests. If the number of results is larger than + // `pageSize`, use the `nextPageToken` as a value for the query + // parameter `pageToken` in the next list request. Subsequent list + // requests will have their own `nextPageToken` to continue paging + // through the results NextPageToken string `json:"nextPageToken,omitempty"` // Waiters: Found waiters in the project. @@ -638,52 +548,38 @@ func (s *ListWaitersResponse) MarshalJSON() ([]byte, error) { } // Operation: This resource represents a long-running operation that is -// the result of a -// network API call. +// the result of a network API call. type Operation struct { // Done: If the value is `false`, it means the operation is still in - // progress. - // If `true`, the operation is completed, and either `error` or - // `response` is - // available. + // progress. If `true`, the operation is completed, and either `error` + // or `response` is available. Done bool `json:"done,omitempty"` // Error: The error result of the operation in case of failure or // cancellation. Error *Status `json:"error,omitempty"` - // Metadata: Service-specific metadata associated with the operation. - // It typically - // contains progress information and common metadata such as create - // time. - // Some services might not provide such metadata. Any method that - // returns a - // long-running operation should document the metadata type, if any. + // Metadata: Service-specific metadata associated with the operation. It + // typically contains progress information and common metadata such as + // create time. Some services might not provide such metadata. Any + // method that returns a long-running operation should document the + // metadata type, if any. Metadata googleapi.RawMessage `json:"metadata,omitempty"` // Name: The server-assigned name, which is only unique within the same - // service that - // originally returns it. If you use the default HTTP mapping, - // the - // `name` should be a resource name ending with + // service that originally returns it. If you use the default HTTP + // mapping, the `name` should be a resource name ending with // `operations/{unique_id}`. Name string `json:"name,omitempty"` - // Response: The normal response of the operation in case of success. - // If the original - // method returns no data on success, such as `Delete`, the response - // is - // `google.protobuf.Empty`. If the original method is - // standard - // `Get`/`Create`/`Update`, the response should be the resource. For - // other - // methods, the response should have the type `XxxResponse`, where - // `Xxx` - // is the original method name. For example, if the original method - // name - // is `TakeSnapshot()`, the inferred response type - // is - // `TakeSnapshotResponse`. + // Response: The normal response of the operation in case of success. If + // the original method returns no data on success, such as `Delete`, the + // response is `google.protobuf.Empty`. If the original method is + // standard `Get`/`Create`/`Update`, the response should be the + // resource. For other methods, the response should have the type + // `XxxResponse`, where `Xxx` is the original method name. For example, + // if the original method name is `TakeSnapshot()`, the inferred + // response type is `TakeSnapshotResponse`. Response googleapi.RawMessage `json:"response,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -714,150 +610,73 @@ func (s *Operation) MarshalJSON() ([]byte, error) { } // Policy: An Identity and Access Management (IAM) policy, which -// specifies access -// controls for Google Cloud resources. -// -// -// A `Policy` is a collection of `bindings`. A `binding` binds one or -// more -// `members` to a single `role`. Members can be user accounts, service -// accounts, +// specifies access controls for Google Cloud resources. A `Policy` is a +// collection of `bindings`. A `binding` binds one or more `members` to +// a single `role`. Members can be user accounts, service accounts, // Google groups, and domains (such as G Suite). A `role` is a named -// list of -// permissions; each `role` can be an IAM predefined role or a -// user-created -// custom role. -// -// For some types of Google Cloud resources, a `binding` can also -// specify a -// `condition`, which is a logical expression that allows access to a -// resource -// only if the expression evaluates to `true`. A condition can add -// constraints -// based on attributes of the request, the resource, or both. To learn -// which -// resources support conditions in their IAM policies, see the -// [IAM +// list of permissions; each `role` can be an IAM predefined role or a +// user-created custom role. For some types of Google Cloud resources, a +// `binding` can also specify a `condition`, which is a logical +// expression that allows access to a resource only if the expression +// evaluates to `true`. A condition can add constraints based on +// attributes of the request, the resource, or both. To learn which +// resources support conditions in their IAM policies, see the [IAM // documentation](https://cloud.google.com/iam/help/conditions/resource-p -// olicies). -// -// **JSON example:** -// -// { -// "bindings": [ -// { -// "role": "roles/resourcemanager.organizationAdmin", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" -// ] -// }, -// { -// "role": "roles/resourcemanager.organizationViewer", -// "members": [ -// "user:eve@example.com" -// ], -// "condition": { -// "title": "expirable access", -// "description": "Does not grant access after Sep 2020", -// "expression": "request.time < -// timestamp('2020-10-01T00:00:00.000Z')", -// } -// } -// ], -// "etag": "BwWWja0YfJA=", -// "version": 3 -// } -// -// **YAML example:** -// -// bindings: -// - members: -// - user:mike@example.com -// - group:admins@example.com -// - domain:google.com -// - serviceAccount:my-project-id@appspot.gserviceaccount.com -// role: roles/resourcemanager.organizationAdmin -// - members: -// - user:eve@example.com -// role: roles/resourcemanager.organizationViewer -// condition: -// title: expirable access -// description: Does not grant access after Sep 2020 -// expression: request.time < -// timestamp('2020-10-01T00:00:00.000Z') -// - etag: BwWWja0YfJA= -// - version: 3 -// -// For a description of IAM and its features, see the -// [IAM documentation](https://cloud.google.com/iam/docs/). +// olicies). **JSON example:** { "bindings": [ { "role": +// "roles/resourcemanager.organizationAdmin", "members": [ +// "user:mike@example.com", "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { +// "role": "roles/resourcemanager.organizationViewer", "members": [ +// "user:eve@example.com" ], "condition": { "title": "expirable access", +// "description": "Does not grant access after Sep 2020", "expression": +// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], +// "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - +// members: - user:mike@example.com - group:admins@example.com - +// domain:google.com - +// serviceAccount:my-project-id@appspot.gserviceaccount.com role: +// roles/resourcemanager.organizationAdmin - members: - +// user:eve@example.com role: roles/resourcemanager.organizationViewer +// condition: title: expirable access description: Does not grant access +// after Sep 2020 expression: request.time < +// timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: +// 3 For a description of IAM and its features, see the [IAM +// documentation](https://cloud.google.com/iam/docs/). type Policy struct { // Bindings: Associates a list of `members` to a `role`. Optionally, may - // specify a - // `condition` that determines how and when the `bindings` are applied. - // Each - // of the `bindings` must contain at least one member. + // specify a `condition` that determines how and when the `bindings` are + // applied. Each of the `bindings` must contain at least one member. Bindings []*Binding `json:"bindings,omitempty"` // Etag: `etag` is used for optimistic concurrency control as a way to - // help - // prevent simultaneous updates of a policy from overwriting each - // other. - // It is strongly suggested that systems make use of the `etag` in - // the - // read-modify-write cycle to perform policy updates in order to avoid - // race - // conditions: An `etag` is returned in the response to `getIamPolicy`, - // and - // systems are expected to put that etag in the request to - // `setIamPolicy` to - // ensure that their change will be applied to the same version of the - // policy. - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of + // help prevent simultaneous updates of a policy from overwriting each + // other. It is strongly suggested that systems make use of the `etag` + // in the read-modify-write cycle to perform policy updates in order to + // avoid race conditions: An `etag` is returned in the response to + // `getIamPolicy`, and systems are expected to put that etag in the + // request to `setIamPolicy` to ensure that their change will be applied + // to the same version of the policy. **Important:** If you use IAM + // Conditions, you must include the `etag` field whenever you call + // `setIamPolicy`. If you omit this field, then IAM allows you to + // overwrite a version `3` policy with a version `1` policy, and all of // the conditions in the version `3` policy are lost. Etag string `json:"etag,omitempty"` - // Version: Specifies the format of the policy. - // - // Valid values are `0`, `1`, and `3`. Requests that specify an invalid - // value - // are rejected. - // + // Version: Specifies the format of the policy. Valid values are `0`, + // `1`, and `3`. Requests that specify an invalid value are rejected. // Any operation that affects conditional role bindings must specify - // version - // `3`. This requirement applies to the following operations: - // - // * Getting a policy that includes a conditional role binding - // * Adding a conditional role binding to a policy - // * Changing a conditional role binding in a policy - // * Removing any role binding, with or without a condition, from a - // policy - // that includes conditions - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of - // the conditions in the version `3` policy are lost. - // - // If a policy does not include any conditions, operations on that - // policy may - // specify any valid version or leave the field unset. - // - // To learn which resources support conditions in their IAM policies, - // see the - // [IAM + // version `3`. This requirement applies to the following operations: * + // Getting a policy that includes a conditional role binding * Adding a + // conditional role binding to a policy * Changing a conditional role + // binding in a policy * Removing any role binding, with or without a + // condition, from a policy that includes conditions **Important:** If + // you use IAM Conditions, you must include the `etag` field whenever + // you call `setIamPolicy`. If you omit this field, then IAM allows you + // to overwrite a version `3` policy with a version `1` policy, and all + // of the conditions in the version `3` policy are lost. If a policy + // does not include any conditions, operations on that policy may + // specify any valid version or leave the field unset. To learn which + // resources support conditions in their IAM policies, see the [IAM // documentation](https://cloud.google.com/iam/help/conditions/resource-p // olicies). Version int64 `json:"version,omitempty"` @@ -890,32 +709,21 @@ func (s *Policy) MarshalJSON() ([]byte, error) { } // RuntimeConfig: A RuntimeConfig resource is the primary resource in -// the Cloud RuntimeConfig -// service. A RuntimeConfig resource consists of metadata and a -// hierarchy of -// variables. +// the Cloud RuntimeConfig service. A RuntimeConfig resource consists of +// metadata and a hierarchy of variables. type RuntimeConfig struct { // Description: An optional description of the RuntimeConfig object. Description string `json:"description,omitempty"` // Name: The resource name of a runtime config. The name must have the - // format: - // - // projects/[PROJECT_ID]/configs/[CONFIG_NAME] - // - // The `[PROJECT_ID]` must be a valid project ID, and `[CONFIG_NAME]` is - // an - // arbitrary name that matches - // the + // format: projects/[PROJECT_ID]/configs/[CONFIG_NAME] The + // `[PROJECT_ID]` must be a valid project ID, and `[CONFIG_NAME]` is an + // arbitrary name that matches the // `[0-9A-Za-z](?:[_.A-Za-z0-9-]{0,62}[_.A-Za-z0-9])?` regular - // expression. - // The length of `[CONFIG_NAME]` must be less than 64 characters. - // - // You pick the RuntimeConfig resource name, but the server will - // validate that - // the name adheres to this format. After you create the resource, you - // cannot - // change the resource's name. + // expression. The length of `[CONFIG_NAME]` must be less than 64 + // characters. You pick the RuntimeConfig resource name, but the server + // will validate that the name adheres to this format. After you create + // the resource, you cannot change the resource's name. Name string `json:"name,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -948,11 +756,9 @@ func (s *RuntimeConfig) MarshalJSON() ([]byte, error) { // SetIamPolicyRequest: Request message for `SetIamPolicy` method. type SetIamPolicyRequest struct { // Policy: REQUIRED: The complete policy to be applied to the - // `resource`. The size of - // the policy is limited to a few 10s of KB. An empty policy is a - // valid policy but certain Cloud Platform services (such as - // Projects) - // might reject them. + // `resource`. The size of the policy is limited to a few 10s of KB. An + // empty policy is a valid policy but certain Cloud Platform services + // (such as Projects) might reject them. Policy *Policy `json:"policy,omitempty"` // ForceSendFields is a list of field names (e.g. "Policy") to @@ -979,32 +785,24 @@ func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { } // Status: The `Status` type defines a logical error model that is -// suitable for -// different programming environments, including REST APIs and RPC APIs. -// It is -// used by [gRPC](https://github.com/grpc). Each `Status` message -// contains -// three pieces of data: error code, error message, and error -// details. -// -// You can find out more about this error model and how to work with it -// in the -// [API Design Guide](https://cloud.google.com/apis/design/errors). +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each +// `Status` message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the [API Design +// Guide](https://cloud.google.com/apis/design/errors). type Status struct { // Code: The status code, which should be an enum value of // google.rpc.Code. Code int64 `json:"code,omitempty"` - // Details: A list of messages that carry the error details. There is a - // common set of - // message types for APIs to use. + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. Details []googleapi.RawMessage `json:"details,omitempty"` // Message: A developer-facing error message, which should be in - // English. Any - // user-facing error message should be localized and sent in - // the - // google.rpc.Status.details field, or localized by the client. + // English. Any user-facing error message should be localized and sent + // in the google.rpc.Status.details field, or localized by the client. Message string `json:"message,omitempty"` // ForceSendFields is a list of field names (e.g. "Code") to @@ -1034,11 +832,8 @@ func (s *Status) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsRequest struct { // Permissions: The set of permissions to check for the `resource`. - // Permissions with - // wildcards (such as '*' or 'storage.*') are not allowed. For - // more - // information see - // [IAM + // Permissions with wildcards (such as '*' or 'storage.*') are not + // allowed. For more information see [IAM // Overview](https://cloud.google.com/iam/docs/overview#permissions). Permissions []string `json:"permissions,omitempty"` @@ -1069,8 +864,7 @@ func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsResponse struct { // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is - // allowed. + // the caller is allowed. Permissions []string `json:"permissions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1101,50 +895,29 @@ func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { } // Variable: Describes a single variable within a RuntimeConfig -// resource. -// The name denotes the hierarchical variable name. For -// example, -// `ports/serving_port` is a valid variable name. The variable value is -// an -// opaque string and only leaf variables can have values (that is, -// variables -// that do not have any child variables). +// resource. The name denotes the hierarchical variable name. For +// example, `ports/serving_port` is a valid variable name. The variable +// value is an opaque string and only leaf variables can have values +// (that is, variables that do not have any child variables). type Variable struct { // Name: The name of the variable resource, in the format: - // - // // projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIABLE_NAME] - // - // // The `[PROJECT_ID]` must be a valid project ID, `[CONFIG_NAME]` must - // be a - // valid RuntimeConfig resource and `[VARIABLE_NAME]` follows Unix file - // system - // file path naming. - // - // The `[VARIABLE_NAME]` can contain ASCII letters, numbers, slashes - // and - // dashes. Slashes are used as path element separators and are not part - // of the - // `[VARIABLE_NAME]` itself, so `[VARIABLE_NAME]` must contain at least - // one - // non-slash character. Multiple slashes are coalesced into single - // slash - // character. Each path segment should - // match - // [0-9A-Za-z](?:[_.A-Za-z0-9-]{0,62}[_.A-Za-z0-9])? regular - // expression. - // The length of a `[VARIABLE_NAME]` must be less than 256 - // characters. - // + // be a valid RuntimeConfig resource and `[VARIABLE_NAME]` follows Unix + // file system file path naming. The `[VARIABLE_NAME]` can contain ASCII + // letters, numbers, slashes and dashes. Slashes are used as path + // element separators and are not part of the `[VARIABLE_NAME]` itself, + // so `[VARIABLE_NAME]` must contain at least one non-slash character. + // Multiple slashes are coalesced into single slash character. Each path + // segment should match + // [0-9A-Za-z](?:[_.A-Za-z0-9-]{0,62}[_.A-Za-z0-9])? regular expression. + // The length of a `[VARIABLE_NAME]` must be less than 256 characters. // Once you create a variable, you cannot change the variable name. Name string `json:"name,omitempty"` // State: Output only. The current state of the variable. The variable - // state - // indicates the outcome of the `variables().watch` call and is - // visible - // through the `get` and `list` calls. + // state indicates the outcome of the `variables().watch` call and is + // visible through the `get` and `list` calls. // // Possible values: // "VARIABLE_STATE_UNSPECIFIED" - Default variable state. @@ -1155,24 +928,18 @@ type Variable struct { State string `json:"state,omitempty"` // Text: The string value of the variable. The length of the value must - // be less - // than 4096 bytes. Empty values are also accepted. For example, + // be less than 4096 bytes. Empty values are also accepted. For example, // `text: "my text value". The string must be valid UTF-8. Text string `json:"text,omitempty"` - // UpdateTime: Output only. The time of the last variable - // update. + // UpdateTime: Output only. The time of the last variable update. // Timestamp will be UTC timestamp. UpdateTime string `json:"updateTime,omitempty"` // Value: The binary value of the variable. The length of the value must - // be less - // than 4096 bytes. Empty values are also accepted. The value must - // be - // base64 encoded, and must comply with IETF - // RFC4648 - // (https://www.ietf.org/rfc/rfc4648.txt). Only one of `value` or - // `text` + // be less than 4096 bytes. Empty values are also accepted. The value + // must be base64 encoded, and must comply with IETF RFC4648 + // (https://www.ietf.org/rfc/rfc4648.txt). Only one of `value` or `text` // can be set. Value string `json:"value,omitempty"` @@ -1204,96 +971,61 @@ func (s *Variable) MarshalJSON() ([]byte, error) { } // Waiter: A Waiter resource waits for some end condition within a -// RuntimeConfig -// resource to be met before it returns. For example, assume you have -// a -// distributed system where each node writes to a Variable resource -// indicating -// the node's readiness as part of the startup process. -// -// You then configure a Waiter resource with the success condition set -// to wait -// until some number of nodes have checked in. Afterwards, your -// application -// runs some arbitrary code after the condition has been met and the -// waiter -// returns successfully. -// -// Once created, a Waiter resource is immutable. -// -// To learn more about using waiters, read the -// [Creating -// a +// RuntimeConfig resource to be met before it returns. For example, +// assume you have a distributed system where each node writes to a +// Variable resource indicating the node's readiness as part of the +// startup process. You then configure a Waiter resource with the +// success condition set to wait until some number of nodes have checked +// in. Afterwards, your application runs some arbitrary code after the +// condition has been met and the waiter returns successfully. Once +// created, a Waiter resource is immutable. To learn more about using +// waiters, read the [Creating a // Waiter](/deployment-manager/runtime-configurator/creating-a-waiter) -// // documentation. type Waiter struct { // CreateTime: Output only. The instant at which this Waiter resource - // was created. Adding - // the value of `timeout` to this instant yields the timeout deadline - // for the - // waiter. + // was created. Adding the value of `timeout` to this instant yields the + // timeout deadline for the waiter. CreateTime string `json:"createTime,omitempty"` // Done: Output only. If the value is `false`, it means the waiter is - // still waiting - // for one of its conditions to be met. - // - // If true, the waiter has finished. If the waiter finished due to a - // timeout - // or failure, `error` will be set. + // still waiting for one of its conditions to be met. If true, the + // waiter has finished. If the waiter finished due to a timeout or + // failure, `error` will be set. Done bool `json:"done,omitempty"` // Error: Output only. If the waiter ended due to a failure or timeout, - // this value - // will be set. + // this value will be set. Error *Status `json:"error,omitempty"` // Failure: [Optional] The failure condition of this waiter. If this - // condition is met, - // `done` will be set to `true` and the `error` code will be set to - // `ABORTED`. - // The failure condition takes precedence over the success condition. If - // both - // conditions are met, a failure will be indicated. This value is - // optional; if - // no failure condition is set, the only failure scenario will be a - // timeout. + // condition is met, `done` will be set to `true` and the `error` code + // will be set to `ABORTED`. The failure condition takes precedence over + // the success condition. If both conditions are met, a failure will be + // indicated. This value is optional; if no failure condition is set, + // the only failure scenario will be a timeout. Failure *EndCondition `json:"failure,omitempty"` // Name: The name of the Waiter resource, in the format: - // - // - // projects/[PROJECT_ID]/configs/[CONFIG_NAME]/waiters/[WAITER_NAME] - // - // The - // `[PROJECT_ID]` must be a valid Google Cloud project ID, - // the `[CONFIG_NAME]` must be a valid RuntimeConfig resource, - // the + // projects/[PROJECT_ID]/configs/[CONFIG_NAME]/waiters/[WAITER_NAME] The + // `[PROJECT_ID]` must be a valid Google Cloud project ID, the + // `[CONFIG_NAME]` must be a valid RuntimeConfig resource, the // `[WAITER_NAME]` must match RFC 1035 segment specification, and the - // length - // of `[WAITER_NAME]` must be less than 64 bytes. - // - // After you create a Waiter resource, you cannot change the resource - // name. + // length of `[WAITER_NAME]` must be less than 64 bytes. After you + // create a Waiter resource, you cannot change the resource name. Name string `json:"name,omitempty"` // Success: [Required] The success condition. If this condition is met, - // `done` will be - // set to `true` and the `error` value will remain unset. The - // failure - // condition takes precedence over the success condition. If both - // conditions - // are met, a failure will be indicated. + // `done` will be set to `true` and the `error` value will remain unset. + // The failure condition takes precedence over the success condition. If + // both conditions are met, a failure will be indicated. Success *EndCondition `json:"success,omitempty"` // Timeout: [Required] Specifies the timeout of the waiter in seconds, - // beginning from - // the instant that `waiters().create` method is called. If this time - // elapses - // before the success or failure conditions are met, the waiter fails - // and sets - // the `error` code to `DEADLINE_EXCEEDED`. + // beginning from the instant that `waiters().create` method is called. + // If this time elapses before the success or failure conditions are + // met, the waiter fails and sets the `error` code to + // `DEADLINE_EXCEEDED`. Timeout string `json:"timeout,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1326,14 +1058,10 @@ func (s *Waiter) MarshalJSON() ([]byte, error) { // WatchVariableRequest: Request for the `WatchVariable()` method. type WatchVariableRequest struct { // NewerThan: If specified, checks the current timestamp of the variable - // and if the - // current timestamp is newer than `newerThan` timestamp, the method - // returns - // immediately. - // - // If not specified or the variable has an older timestamp, the watcher - // waits - // for a the value to change before returning. + // and if the current timestamp is newer than `newerThan` timestamp, the + // method returns immediately. If not specified or the variable has an + // older timestamp, the watcher waits for a the value to change before + // returning. NewerThan string `json:"newerThan,omitempty"` // ForceSendFields is a list of field names (e.g. "NewerThan") to @@ -1371,8 +1099,7 @@ type ProjectsConfigsCreateCall struct { } // Create: Creates a new RuntimeConfig resource. The configuration name -// must be -// unique within project. +// must be unique within project. func (r *ProjectsConfigsService) Create(parent string, runtimeconfig *RuntimeConfig) *ProjectsConfigsCreateCall { c := &ProjectsConfigsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -1381,17 +1108,12 @@ func (r *ProjectsConfigsService) Create(parent string, runtimeconfig *RuntimeCon } // RequestId sets the optional parameter "requestId": An optional but -// recommended unique `request_id`. If the server -// receives two `create()` requests with the same -// `request_id`, then the second request will be ignored and the -// first resource created and stored in the backend is returned. -// Empty `request_id` fields are ignored. -// -// It is responsibility of the client to ensure uniqueness of -// the -// `request_id` strings. -// -// `request_id` strings are limited to 64 characters. +// recommended unique `request_id`. If the server receives two +// `create()` requests with the same `request_id`, then the second +// request will be ignored and the first resource created and stored in +// the backend is returned. Empty `request_id` fields are ignored. It is +// responsibility of the client to ensure uniqueness of the `request_id` +// strings. `request_id` strings are limited to 64 characters. func (c *ProjectsConfigsCreateCall) RequestId(requestId string) *ProjectsConfigsCreateCall { c.urlParams_.Set("requestId", requestId) return c @@ -1424,7 +1146,7 @@ func (c *ProjectsConfigsCreateCall) Header() http.Header { func (c *ProjectsConfigsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1488,7 +1210,7 @@ func (c *ProjectsConfigsCreateCall) Do(opts ...googleapi.CallOption) (*RuntimeCo } return ret, nil // { - // "description": "Creates a new RuntimeConfig resource. The configuration name must be\nunique within project.", + // "description": "Creates a new RuntimeConfig resource. The configuration name must be unique within project.", // "flatPath": "v1beta1/projects/{projectsId}/configs", // "httpMethod": "POST", // "id": "runtimeconfig.projects.configs.create", @@ -1497,14 +1219,14 @@ func (c *ProjectsConfigsCreateCall) Do(opts ...googleapi.CallOption) (*RuntimeCo // ], // "parameters": { // "parent": { - // "description": "The [project\nID](https://support.google.com/cloud/answer/6158840?hl=en\u0026ref_topic=6158848)\nfor this request, in the format `projects/[PROJECT_ID]`.", + // "description": "The [project ID](https://support.google.com/cloud/answer/6158840?hl=en\u0026ref_topic=6158848) for this request, in the format `projects/[PROJECT_ID]`.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, // "type": "string" // }, // "requestId": { - // "description": "An optional but recommended unique `request_id`. If the server\nreceives two `create()` requests with the same\n`request_id`, then the second request will be ignored and the\nfirst resource created and stored in the backend is returned.\nEmpty `request_id` fields are ignored.\n\nIt is responsibility of the client to ensure uniqueness of the\n`request_id` strings.\n\n`request_id` strings are limited to 64 characters.", + // "description": "An optional but recommended unique `request_id`. If the server receives two `create()` requests with the same `request_id`, then the second request will be ignored and the first resource created and stored in the backend is returned. Empty `request_id` fields are ignored. It is responsibility of the client to ensure uniqueness of the `request_id` strings. `request_id` strings are limited to 64 characters.", // "location": "query", // "type": "string" // } @@ -1568,7 +1290,7 @@ func (c *ProjectsConfigsDeleteCall) Header() http.Header { func (c *ProjectsConfigsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1636,7 +1358,7 @@ func (c *ProjectsConfigsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, er // ], // "parameters": { // "name": { - // "description": "The RuntimeConfig resource to delete, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", + // "description": "The RuntimeConfig resource to delete, in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", // "location": "path", // "pattern": "^projects/[^/]+/configs/[^/]+$", // "required": true, @@ -1710,7 +1432,7 @@ func (c *ProjectsConfigsGetCall) Header() http.Header { func (c *ProjectsConfigsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1781,7 +1503,7 @@ func (c *ProjectsConfigsGetCall) Do(opts ...googleapi.CallOption) (*RuntimeConfi // ], // "parameters": { // "name": { - // "description": "The name of the RuntimeConfig resource to retrieve, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", + // "description": "The name of the RuntimeConfig resource to retrieve, in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", // "location": "path", // "pattern": "^projects/[^/]+/configs/[^/]+$", // "required": true, @@ -1811,9 +1533,8 @@ type ProjectsConfigsGetIamPolicyCall struct { header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. -// Returns an empty policy if the resource exists and does not have a -// policy +// GetIamPolicy: Gets the access control policy for a resource. Returns +// an empty policy if the resource exists and does not have a policy // set. func (r *ProjectsConfigsService) GetIamPolicy(resource string) *ProjectsConfigsGetIamPolicyCall { c := &ProjectsConfigsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -1823,24 +1544,14 @@ func (r *ProjectsConfigsService) GetIamPolicy(resource string) *ProjectsConfigsG // OptionsRequestedPolicyVersion sets the optional parameter // "options.requestedPolicyVersion": The policy format version to be -// returned. -// -// Valid values are 0, 1, and 3. Requests specifying an invalid value -// will be -// rejected. -// -// Requests for policies with any conditional bindings must specify -// version 3. -// Policies without any conditional bindings may specify any valid value -// or -// leave the field unset. -// -// To learn which resources support conditions in their IAM policies, -// see -// the -// [IAM -// documentation](https://cloud.google.com/iam/help/conditions/r -// esource-policies). +// returned. Valid values are 0, 1, and 3. Requests specifying an +// invalid value will be rejected. Requests for policies with any +// conditional bindings must specify version 3. Policies without any +// conditional bindings may specify any valid value or leave the field +// unset. To learn which resources support conditions in their IAM +// policies, see the [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-p +// olicies). func (c *ProjectsConfigsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsConfigsGetIamPolicyCall { c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c @@ -1883,7 +1594,7 @@ func (c *ProjectsConfigsGetIamPolicyCall) Header() http.Header { func (c *ProjectsConfigsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1945,7 +1656,7 @@ func (c *ProjectsConfigsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Pol } return ret, nil // { - // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + // "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", // "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}:getIamPolicy", // "httpMethod": "GET", // "id": "runtimeconfig.projects.configs.getIamPolicy", @@ -1954,13 +1665,13 @@ func (c *ProjectsConfigsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Pol // ], // "parameters": { // "options.requestedPolicyVersion": { - // "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + // "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", // "format": "int32", // "location": "query", // "type": "integer" // }, // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/configs/[^/]+$", // "required": true, @@ -1998,16 +1709,16 @@ func (r *ProjectsConfigsService) List(parent string) *ProjectsConfigsListCall { } // PageSize sets the optional parameter "pageSize": Specifies the number -// of results to return per page. If there are fewer -// elements than the specified number, returns all elements. +// of results to return per page. If there are fewer elements than the +// specified number, returns all elements. func (c *ProjectsConfigsListCall) PageSize(pageSize int64) *ProjectsConfigsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to a `nextPageToken` -// returned by a previous list request to get the next page of results. +// token to use. Set `pageToken` to a `nextPageToken` returned by a +// previous list request to get the next page of results. func (c *ProjectsConfigsListCall) PageToken(pageToken string) *ProjectsConfigsListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -2050,7 +1761,7 @@ func (c *ProjectsConfigsListCall) Header() http.Header { func (c *ProjectsConfigsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2121,18 +1832,18 @@ func (c *ProjectsConfigsListCall) Do(opts ...googleapi.CallOption) (*ListConfigs // ], // "parameters": { // "pageSize": { - // "description": "Specifies the number of results to return per page. If there are fewer\nelements than the specified number, returns all elements.", + // "description": "Specifies the number of results to return per page. If there are fewer elements than the specified number, returns all elements.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to a `nextPageToken`\nreturned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to a `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "The [project\nID](https://support.google.com/cloud/answer/6158840?hl=en\u0026ref_topic=6158848)\nfor this request, in the format `projects/[PROJECT_ID]`.", + // "description": "The [project ID](https://support.google.com/cloud/answer/6158840?hl=en\u0026ref_topic=6158848) for this request, in the format `projects/[PROJECT_ID]`.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -2184,11 +1895,8 @@ type ProjectsConfigsSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any -// existing policy. -// -// Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` -// errors. +// resource. Replaces any existing policy. Can return `NOT_FOUND`, +// `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. func (r *ProjectsConfigsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsConfigsSetIamPolicyCall { c := &ProjectsConfigsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -2223,7 +1931,7 @@ func (c *ProjectsConfigsSetIamPolicyCall) Header() http.Header { func (c *ProjectsConfigsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2287,7 +1995,7 @@ func (c *ProjectsConfigsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Pol } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", // "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}:setIamPolicy", // "httpMethod": "POST", // "id": "runtimeconfig.projects.configs.setIamPolicy", @@ -2296,7 +2004,7 @@ func (c *ProjectsConfigsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Pol // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/configs/[^/]+$", // "required": true, @@ -2330,16 +2038,11 @@ type ProjectsConfigsTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a `NOT_FOUND` error. -// -// Note: This operation is designed to be used for building -// permission-aware -// UIs and command-line tools, not for authorization checking. This -// operation -// may "fail open" without warning. +// specified resource. If the resource does not exist, this will return +// an empty set of permissions, not a `NOT_FOUND` error. Note: This +// operation is designed to be used for building permission-aware UIs +// and command-line tools, not for authorization checking. This +// operation may "fail open" without warning. func (r *ProjectsConfigsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsConfigsTestIamPermissionsCall { c := &ProjectsConfigsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -2374,7 +2077,7 @@ func (c *ProjectsConfigsTestIamPermissionsCall) Header() http.Header { func (c *ProjectsConfigsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2438,7 +2141,7 @@ func (c *ProjectsConfigsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + // "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", // "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}:testIamPermissions", // "httpMethod": "POST", // "id": "runtimeconfig.projects.configs.testIamPermissions", @@ -2447,7 +2150,7 @@ func (c *ProjectsConfigsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/configs/[^/]+$", // "required": true, @@ -2516,7 +2219,7 @@ func (c *ProjectsConfigsUpdateCall) Header() http.Header { func (c *ProjectsConfigsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2589,7 +2292,7 @@ func (c *ProjectsConfigsUpdateCall) Do(opts ...googleapi.CallOption) (*RuntimeCo // ], // "parameters": { // "name": { - // "description": "The name of the RuntimeConfig resource to update, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", + // "description": "The name of the RuntimeConfig resource to update, in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", // "location": "path", // "pattern": "^projects/[^/]+/configs/[^/]+$", // "required": true, @@ -2622,11 +2325,9 @@ type ProjectsConfigsOperationsGetCall struct { header_ http.Header } -// Get: Gets the latest state of a long-running operation. Clients can -// use this -// method to poll the operation result at intervals as recommended by -// the API -// service. +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. func (r *ProjectsConfigsOperationsService) Get(name string) *ProjectsConfigsOperationsGetCall { c := &ProjectsConfigsOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -2670,7 +2371,7 @@ func (c *ProjectsConfigsOperationsGetCall) Header() http.Header { func (c *ProjectsConfigsOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2732,7 +2433,7 @@ func (c *ProjectsConfigsOperationsGetCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", // "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/operations/{operationsId}", // "httpMethod": "GET", // "id": "runtimeconfig.projects.configs.operations.get", @@ -2772,16 +2473,11 @@ type ProjectsConfigsOperationsTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a `NOT_FOUND` error. -// -// Note: This operation is designed to be used for building -// permission-aware -// UIs and command-line tools, not for authorization checking. This -// operation -// may "fail open" without warning. +// specified resource. If the resource does not exist, this will return +// an empty set of permissions, not a `NOT_FOUND` error. Note: This +// operation is designed to be used for building permission-aware UIs +// and command-line tools, not for authorization checking. This +// operation may "fail open" without warning. func (r *ProjectsConfigsOperationsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsConfigsOperationsTestIamPermissionsCall { c := &ProjectsConfigsOperationsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -2816,7 +2512,7 @@ func (c *ProjectsConfigsOperationsTestIamPermissionsCall) Header() http.Header { func (c *ProjectsConfigsOperationsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2880,7 +2576,7 @@ func (c *ProjectsConfigsOperationsTestIamPermissionsCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + // "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", // "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/operations/{operationsId}:testIamPermissions", // "httpMethod": "POST", // "id": "runtimeconfig.projects.configs.operations.testIamPermissions", @@ -2889,7 +2585,7 @@ func (c *ProjectsConfigsOperationsTestIamPermissionsCall) Do(opts ...googleapi.C // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/configs/[^/]+/operations/.*$", // "required": true, @@ -2923,16 +2619,11 @@ type ProjectsConfigsVariablesCreateCall struct { } // Create: Creates a variable within the given configuration. You cannot -// create -// a variable with a name that is a prefix of an existing variable name, -// or a -// name that has an existing variable name as a prefix. -// -// To learn more about creating a variable, read the -// [Setting and -// Getting -// Data](/deployment-manager/runtime-configurator/set-and-get-var -// iables) +// create a variable with a name that is a prefix of an existing +// variable name, or a name that has an existing variable name as a +// prefix. To learn more about creating a variable, read the [Setting +// and Getting +// Data](/deployment-manager/runtime-configurator/set-and-get-variables) // documentation. func (r *ProjectsConfigsVariablesService) Create(parent string, variable *Variable) *ProjectsConfigsVariablesCreateCall { c := &ProjectsConfigsVariablesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -2942,17 +2633,12 @@ func (r *ProjectsConfigsVariablesService) Create(parent string, variable *Variab } // RequestId sets the optional parameter "requestId": An optional but -// recommended unique `request_id`. If the server -// receives two `create()` requests with the same -// `request_id`, then the second request will be ignored and the -// first resource created and stored in the backend is returned. -// Empty `request_id` fields are ignored. -// -// It is responsibility of the client to ensure uniqueness of -// the -// `request_id` strings. -// -// `request_id` strings are limited to 64 characters. +// recommended unique `request_id`. If the server receives two +// `create()` requests with the same `request_id`, then the second +// request will be ignored and the first resource created and stored in +// the backend is returned. Empty `request_id` fields are ignored. It is +// responsibility of the client to ensure uniqueness of the `request_id` +// strings. `request_id` strings are limited to 64 characters. func (c *ProjectsConfigsVariablesCreateCall) RequestId(requestId string) *ProjectsConfigsVariablesCreateCall { c.urlParams_.Set("requestId", requestId) return c @@ -2985,7 +2671,7 @@ func (c *ProjectsConfigsVariablesCreateCall) Header() http.Header { func (c *ProjectsConfigsVariablesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3049,7 +2735,7 @@ func (c *ProjectsConfigsVariablesCreateCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Creates a variable within the given configuration. You cannot create\na variable with a name that is a prefix of an existing variable name, or a\nname that has an existing variable name as a prefix.\n\nTo learn more about creating a variable, read the\n[Setting and Getting\nData](/deployment-manager/runtime-configurator/set-and-get-variables)\ndocumentation.", + // "description": "Creates a variable within the given configuration. You cannot create a variable with a name that is a prefix of an existing variable name, or a name that has an existing variable name as a prefix. To learn more about creating a variable, read the [Setting and Getting Data](/deployment-manager/runtime-configurator/set-and-get-variables) documentation.", // "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/variables", // "httpMethod": "POST", // "id": "runtimeconfig.projects.configs.variables.create", @@ -3058,14 +2744,14 @@ func (c *ProjectsConfigsVariablesCreateCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "parent": { - // "description": "The path to the RutimeConfig resource that this variable should belong to.\nThe configuration must exist beforehand; the path must be in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", + // "description": "The path to the RutimeConfig resource that this variable should belong to. The configuration must exist beforehand; the path must be in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", // "location": "path", // "pattern": "^projects/[^/]+/configs/[^/]+$", // "required": true, // "type": "string" // }, // "requestId": { - // "description": "An optional but recommended unique `request_id`. If the server\nreceives two `create()` requests with the same\n`request_id`, then the second request will be ignored and the\nfirst resource created and stored in the backend is returned.\nEmpty `request_id` fields are ignored.\n\nIt is responsibility of the client to ensure uniqueness of the\n`request_id` strings.\n\n`request_id` strings are limited to 64 characters.", + // "description": "An optional but recommended unique `request_id`. If the server receives two `create()` requests with the same `request_id`, then the second request will be ignored and the first resource created and stored in the backend is returned. Empty `request_id` fields are ignored. It is responsibility of the client to ensure uniqueness of the `request_id` strings. `request_id` strings are limited to 64 characters.", // "location": "query", // "type": "string" // } @@ -3095,15 +2781,11 @@ type ProjectsConfigsVariablesDeleteCall struct { header_ http.Header } -// Delete: Deletes a variable or multiple variables. -// -// If you specify a variable name, then that variable is deleted. If -// you -// specify a prefix and `recursive` is true, then all variables with -// that -// prefix are deleted. You must set a `recursive` to true if you -// delete -// variables by prefix. +// Delete: Deletes a variable or multiple variables. If you specify a +// variable name, then that variable is deleted. If you specify a prefix +// and `recursive` is true, then all variables with that prefix are +// deleted. You must set a `recursive` to true if you delete variables +// by prefix. func (r *ProjectsConfigsVariablesService) Delete(name string) *ProjectsConfigsVariablesDeleteCall { c := &ProjectsConfigsVariablesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -3111,8 +2793,7 @@ func (r *ProjectsConfigsVariablesService) Delete(name string) *ProjectsConfigsVa } // Recursive sets the optional parameter "recursive": Set to `true` to -// recursively delete multiple variables with the same -// prefix. +// recursively delete multiple variables with the same prefix. func (c *ProjectsConfigsVariablesDeleteCall) Recursive(recursive bool) *ProjectsConfigsVariablesDeleteCall { c.urlParams_.Set("recursive", fmt.Sprint(recursive)) return c @@ -3145,7 +2826,7 @@ func (c *ProjectsConfigsVariablesDeleteCall) Header() http.Header { func (c *ProjectsConfigsVariablesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3204,7 +2885,7 @@ func (c *ProjectsConfigsVariablesDeleteCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Deletes a variable or multiple variables.\n\nIf you specify a variable name, then that variable is deleted. If you\nspecify a prefix and `recursive` is true, then all variables with that\nprefix are deleted. You must set a `recursive` to true if you delete\nvariables by prefix.", + // "description": "Deletes a variable or multiple variables. If you specify a variable name, then that variable is deleted. If you specify a prefix and `recursive` is true, then all variables with that prefix are deleted. You must set a `recursive` to true if you delete variables by prefix.", // "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/variables/{variablesId}", // "httpMethod": "DELETE", // "id": "runtimeconfig.projects.configs.variables.delete", @@ -3213,14 +2894,14 @@ func (c *ProjectsConfigsVariablesDeleteCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "name": { - // "description": "The name of the variable to delete, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIABLE_NAME]`", + // "description": "The name of the variable to delete, in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIABLE_NAME]`", // "location": "path", // "pattern": "^projects/[^/]+/configs/[^/]+/variables/.*$", // "required": true, // "type": "string" // }, // "recursive": { - // "description": "Set to `true` to recursively delete multiple variables with the same\nprefix.", + // "description": "Set to `true` to recursively delete multiple variables with the same prefix.", // "location": "query", // "type": "boolean" // } @@ -3292,7 +2973,7 @@ func (c *ProjectsConfigsVariablesGetCall) Header() http.Header { func (c *ProjectsConfigsVariablesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3363,7 +3044,7 @@ func (c *ProjectsConfigsVariablesGetCall) Do(opts ...googleapi.CallOption) (*Var // ], // "parameters": { // "name": { - // "description": "The name of the variable to return, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIBLE_NAME]`", + // "description": "The name of the variable to return, in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIBLE_NAME]`", // "location": "path", // "pattern": "^projects/[^/]+/configs/[^/]+/variables/.*$", // "required": true, @@ -3394,12 +3075,9 @@ type ProjectsConfigsVariablesListCall struct { } // List: Lists variables within given a configuration, matching any -// provided -// filters. This only lists variable names, not the values, -// unless -// `return_values` is true, in which case only variables that user has -// IAM -// permission to GetVariable will be returned. +// provided filters. This only lists variable names, not the values, +// unless `return_values` is true, in which case only variables that +// user has IAM permission to GetVariable will be returned. func (r *ProjectsConfigsVariablesService) List(parent string) *ProjectsConfigsVariablesListCall { c := &ProjectsConfigsVariablesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -3407,37 +3085,34 @@ func (r *ProjectsConfigsVariablesService) List(parent string) *ProjectsConfigsVa } // Filter sets the optional parameter "filter": Filters variables by -// matching the specified filter. For -// example: -// -// `projects/example-project/config/[CONFIG_NAME]/variables/exa -// mple-variable`. +// matching the specified filter. For example: +// `projects/example-project/config/[CONFIG_NAME]/variables/example-varia +// ble`. func (c *ProjectsConfigsVariablesListCall) Filter(filter string) *ProjectsConfigsVariablesListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": Specifies the number -// of results to return per page. If there are fewer -// elements than the specified number, returns all elements. +// of results to return per page. If there are fewer elements than the +// specified number, returns all elements. func (c *ProjectsConfigsVariablesListCall) PageSize(pageSize int64) *ProjectsConfigsVariablesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to a `nextPageToken` -// returned by a previous list request to get the next page of results. +// token to use. Set `pageToken` to a `nextPageToken` returned by a +// previous list request to get the next page of results. func (c *ProjectsConfigsVariablesListCall) PageToken(pageToken string) *ProjectsConfigsVariablesListCall { c.urlParams_.Set("pageToken", pageToken) return c } // ReturnValues sets the optional parameter "returnValues": The flag -// indicates whether the user wants to return values of variables. -// If true, then only those variables that user has IAM GetVariable -// permission -// will be returned along with their values. +// indicates whether the user wants to return values of variables. If +// true, then only those variables that user has IAM GetVariable +// permission will be returned along with their values. func (c *ProjectsConfigsVariablesListCall) ReturnValues(returnValues bool) *ProjectsConfigsVariablesListCall { c.urlParams_.Set("returnValues", fmt.Sprint(returnValues)) return c @@ -3480,7 +3155,7 @@ func (c *ProjectsConfigsVariablesListCall) Header() http.Header { func (c *ProjectsConfigsVariablesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3542,7 +3217,7 @@ func (c *ProjectsConfigsVariablesListCall) Do(opts ...googleapi.CallOption) (*Li } return ret, nil // { - // "description": "Lists variables within given a configuration, matching any provided\nfilters. This only lists variable names, not the values, unless\n`return_values` is true, in which case only variables that user has IAM\npermission to GetVariable will be returned.", + // "description": "Lists variables within given a configuration, matching any provided filters. This only lists variable names, not the values, unless `return_values` is true, in which case only variables that user has IAM permission to GetVariable will be returned.", // "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/variables", // "httpMethod": "GET", // "id": "runtimeconfig.projects.configs.variables.list", @@ -3551,30 +3226,30 @@ func (c *ProjectsConfigsVariablesListCall) Do(opts ...googleapi.CallOption) (*Li // ], // "parameters": { // "filter": { - // "description": "Filters variables by matching the specified filter. For example:\n\n`projects/example-project/config/[CONFIG_NAME]/variables/example-variable`.", + // "description": "Filters variables by matching the specified filter. For example: `projects/example-project/config/[CONFIG_NAME]/variables/example-variable`.", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "Specifies the number of results to return per page. If there are fewer\nelements than the specified number, returns all elements.", + // "description": "Specifies the number of results to return per page. If there are fewer elements than the specified number, returns all elements.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to a `nextPageToken`\nreturned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to a `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "The path to the RuntimeConfig resource for which you want to list\nvariables. The configuration must exist beforehand; the path must be in the\nformat:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", + // "description": "The path to the RuntimeConfig resource for which you want to list variables. The configuration must exist beforehand; the path must be in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", // "location": "path", // "pattern": "^projects/[^/]+/configs/[^/]+$", // "required": true, // "type": "string" // }, // "returnValues": { - // "description": "The flag indicates whether the user wants to return values of variables.\nIf true, then only those variables that user has IAM GetVariable permission\nwill be returned along with their values.", + // "description": "The flag indicates whether the user wants to return values of variables. If true, then only those variables that user has IAM GetVariable permission will be returned along with their values.", // "location": "query", // "type": "boolean" // } @@ -3624,16 +3299,11 @@ type ProjectsConfigsVariablesTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a `NOT_FOUND` error. -// -// Note: This operation is designed to be used for building -// permission-aware -// UIs and command-line tools, not for authorization checking. This -// operation -// may "fail open" without warning. +// specified resource. If the resource does not exist, this will return +// an empty set of permissions, not a `NOT_FOUND` error. Note: This +// operation is designed to be used for building permission-aware UIs +// and command-line tools, not for authorization checking. This +// operation may "fail open" without warning. func (r *ProjectsConfigsVariablesService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsConfigsVariablesTestIamPermissionsCall { c := &ProjectsConfigsVariablesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -3668,7 +3338,7 @@ func (c *ProjectsConfigsVariablesTestIamPermissionsCall) Header() http.Header { func (c *ProjectsConfigsVariablesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3732,7 +3402,7 @@ func (c *ProjectsConfigsVariablesTestIamPermissionsCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + // "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", // "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/variables/{variablesId}:testIamPermissions", // "httpMethod": "POST", // "id": "runtimeconfig.projects.configs.variables.testIamPermissions", @@ -3741,7 +3411,7 @@ func (c *ProjectsConfigsVariablesTestIamPermissionsCall) Do(opts ...googleapi.Ca // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/configs/[^/]+/variables/.*$", // "required": true, @@ -3809,7 +3479,7 @@ func (c *ProjectsConfigsVariablesUpdateCall) Header() http.Header { func (c *ProjectsConfigsVariablesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3882,7 +3552,7 @@ func (c *ProjectsConfigsVariablesUpdateCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "name": { - // "description": "The name of the variable to update, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIABLE_NAME]`", + // "description": "The name of the variable to update, in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIABLE_NAME]`", // "location": "path", // "pattern": "^projects/[^/]+/configs/[^/]+/variables/.*$", // "required": true, @@ -3916,27 +3586,16 @@ type ProjectsConfigsVariablesWatchCall struct { } // Watch: Watches a specific variable and waits for a change in the -// variable's value. -// When there is a change, this method returns the new value or times -// out. -// -// If a variable is deleted while being watched, the `variableState` -// state is -// set to `DELETED` and the method returns the last known variable -// `value`. -// -// If you set the deadline for watching to a larger value than -// internal -// timeout (60 seconds), the current variable value is returned and -// the -// `variableState` will be `VARIABLE_STATE_UNSPECIFIED`. -// -// To learn more about creating a watcher, read the -// [Watching a Variable -// for -// Changes](/deployment-manager/runtime-configurator/watching-a-varia -// ble) -// documentation. +// variable's value. When there is a change, this method returns the new +// value or times out. If a variable is deleted while being watched, the +// `variableState` state is set to `DELETED` and the method returns the +// last known variable `value`. If you set the deadline for watching to +// a larger value than internal timeout (60 seconds), the current +// variable value is returned and the `variableState` will be +// `VARIABLE_STATE_UNSPECIFIED`. To learn more about creating a watcher, +// read the [Watching a Variable for +// Changes](/deployment-manager/runtime-configurator/watching-a-variable) +// documentation. func (r *ProjectsConfigsVariablesService) Watch(name string, watchvariablerequest *WatchVariableRequest) *ProjectsConfigsVariablesWatchCall { c := &ProjectsConfigsVariablesWatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -3971,7 +3630,7 @@ func (c *ProjectsConfigsVariablesWatchCall) Header() http.Header { func (c *ProjectsConfigsVariablesWatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4035,7 +3694,7 @@ func (c *ProjectsConfigsVariablesWatchCall) Do(opts ...googleapi.CallOption) (*V } return ret, nil // { - // "description": "Watches a specific variable and waits for a change in the variable's value.\nWhen there is a change, this method returns the new value or times out.\n\nIf a variable is deleted while being watched, the `variableState` state is\nset to `DELETED` and the method returns the last known variable `value`.\n\nIf you set the deadline for watching to a larger value than internal\ntimeout (60 seconds), the current variable value is returned and the\n`variableState` will be `VARIABLE_STATE_UNSPECIFIED`.\n\nTo learn more about creating a watcher, read the\n[Watching a Variable for\nChanges](/deployment-manager/runtime-configurator/watching-a-variable)\ndocumentation.", + // "description": "Watches a specific variable and waits for a change in the variable's value. When there is a change, this method returns the new value or times out. If a variable is deleted while being watched, the `variableState` state is set to `DELETED` and the method returns the last known variable `value`. If you set the deadline for watching to a larger value than internal timeout (60 seconds), the current variable value is returned and the `variableState` will be `VARIABLE_STATE_UNSPECIFIED`. To learn more about creating a watcher, read the [Watching a Variable for Changes](/deployment-manager/runtime-configurator/watching-a-variable) documentation.", // "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/variables/{variablesId}:watch", // "httpMethod": "POST", // "id": "runtimeconfig.projects.configs.variables.watch", @@ -4044,7 +3703,7 @@ func (c *ProjectsConfigsVariablesWatchCall) Do(opts ...googleapi.CallOption) (*V // ], // "parameters": { // "name": { - // "description": "The name of the variable to watch, in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", + // "description": "The name of the variable to watch, in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", // "location": "path", // "pattern": "^projects/[^/]+/configs/[^/]+/variables/.*$", // "required": true, @@ -4078,15 +3737,11 @@ type ProjectsConfigsWaitersCreateCall struct { } // Create: Creates a Waiter resource. This operation returns a -// long-running Operation -// resource which can be polled for completion. However, a waiter with -// the -// given name will exist (and can be retrieved) prior to the -// operation -// completing. If the operation fails, the failed Waiter resource -// will -// still exist and must be deleted prior to subsequent creation -// attempts. +// long-running Operation resource which can be polled for completion. +// However, a waiter with the given name will exist (and can be +// retrieved) prior to the operation completing. If the operation fails, +// the failed Waiter resource will still exist and must be deleted prior +// to subsequent creation attempts. func (r *ProjectsConfigsWaitersService) Create(parent string, waiter *Waiter) *ProjectsConfigsWaitersCreateCall { c := &ProjectsConfigsWaitersCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -4095,17 +3750,12 @@ func (r *ProjectsConfigsWaitersService) Create(parent string, waiter *Waiter) *P } // RequestId sets the optional parameter "requestId": An optional but -// recommended unique `request_id`. If the server -// receives two `create()` requests with the same -// `request_id`, then the second request will be ignored and the -// first resource created and stored in the backend is returned. -// Empty `request_id` fields are ignored. -// -// It is responsibility of the client to ensure uniqueness of -// the -// `request_id` strings. -// -// `request_id` strings are limited to 64 characters. +// recommended unique `request_id`. If the server receives two +// `create()` requests with the same `request_id`, then the second +// request will be ignored and the first resource created and stored in +// the backend is returned. Empty `request_id` fields are ignored. It is +// responsibility of the client to ensure uniqueness of the `request_id` +// strings. `request_id` strings are limited to 64 characters. func (c *ProjectsConfigsWaitersCreateCall) RequestId(requestId string) *ProjectsConfigsWaitersCreateCall { c.urlParams_.Set("requestId", requestId) return c @@ -4138,7 +3788,7 @@ func (c *ProjectsConfigsWaitersCreateCall) Header() http.Header { func (c *ProjectsConfigsWaitersCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4202,7 +3852,7 @@ func (c *ProjectsConfigsWaitersCreateCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Creates a Waiter resource. This operation returns a long-running Operation\nresource which can be polled for completion. However, a waiter with the\ngiven name will exist (and can be retrieved) prior to the operation\ncompleting. If the operation fails, the failed Waiter resource will\nstill exist and must be deleted prior to subsequent creation attempts.", + // "description": "Creates a Waiter resource. This operation returns a long-running Operation resource which can be polled for completion. However, a waiter with the given name will exist (and can be retrieved) prior to the operation completing. If the operation fails, the failed Waiter resource will still exist and must be deleted prior to subsequent creation attempts.", // "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/waiters", // "httpMethod": "POST", // "id": "runtimeconfig.projects.configs.waiters.create", @@ -4211,14 +3861,14 @@ func (c *ProjectsConfigsWaitersCreateCall) Do(opts ...googleapi.CallOption) (*Op // ], // "parameters": { // "parent": { - // "description": "The path to the configuration that will own the waiter.\nThe configuration must exist beforehand; the path must be in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`.", + // "description": "The path to the configuration that will own the waiter. The configuration must exist beforehand; the path must be in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]`.", // "location": "path", // "pattern": "^projects/[^/]+/configs/[^/]+$", // "required": true, // "type": "string" // }, // "requestId": { - // "description": "An optional but recommended unique `request_id`. If the server\nreceives two `create()` requests with the same\n`request_id`, then the second request will be ignored and the\nfirst resource created and stored in the backend is returned.\nEmpty `request_id` fields are ignored.\n\nIt is responsibility of the client to ensure uniqueness of the\n`request_id` strings.\n\n`request_id` strings are limited to 64 characters.", + // "description": "An optional but recommended unique `request_id`. If the server receives two `create()` requests with the same `request_id`, then the second request will be ignored and the first resource created and stored in the backend is returned. Empty `request_id` fields are ignored. It is responsibility of the client to ensure uniqueness of the `request_id` strings. `request_id` strings are limited to 64 characters.", // "location": "query", // "type": "string" // } @@ -4282,7 +3932,7 @@ func (c *ProjectsConfigsWaitersDeleteCall) Header() http.Header { func (c *ProjectsConfigsWaitersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4350,7 +4000,7 @@ func (c *ProjectsConfigsWaitersDeleteCall) Do(opts ...googleapi.CallOption) (*Em // ], // "parameters": { // "name": { - // "description": "The Waiter resource to delete, in the format:\n\n `projects/[PROJECT_ID]/configs/[CONFIG_NAME]/waiters/[WAITER_NAME]`", + // "description": "The Waiter resource to delete, in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]/waiters/[WAITER_NAME]`", // "location": "path", // "pattern": "^projects/[^/]+/configs/[^/]+/waiters/[^/]+$", // "required": true, @@ -4424,7 +4074,7 @@ func (c *ProjectsConfigsWaitersGetCall) Header() http.Header { func (c *ProjectsConfigsWaitersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4495,7 +4145,7 @@ func (c *ProjectsConfigsWaitersGetCall) Do(opts ...googleapi.CallOption) (*Waite // ], // "parameters": { // "name": { - // "description": "The fully-qualified name of the Waiter resource object to retrieve, in the\nformat:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]/waiters/[WAITER_NAME]`", + // "description": "The fully-qualified name of the Waiter resource object to retrieve, in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]/waiters/[WAITER_NAME]`", // "location": "path", // "pattern": "^projects/[^/]+/configs/[^/]+/waiters/[^/]+$", // "required": true, @@ -4533,16 +4183,16 @@ func (r *ProjectsConfigsWaitersService) List(parent string) *ProjectsConfigsWait } // PageSize sets the optional parameter "pageSize": Specifies the number -// of results to return per page. If there are fewer -// elements than the specified number, returns all elements. +// of results to return per page. If there are fewer elements than the +// specified number, returns all elements. func (c *ProjectsConfigsWaitersListCall) PageSize(pageSize int64) *ProjectsConfigsWaitersListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to a `nextPageToken` -// returned by a previous list request to get the next page of results. +// token to use. Set `pageToken` to a `nextPageToken` returned by a +// previous list request to get the next page of results. func (c *ProjectsConfigsWaitersListCall) PageToken(pageToken string) *ProjectsConfigsWaitersListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -4585,7 +4235,7 @@ func (c *ProjectsConfigsWaitersListCall) Header() http.Header { func (c *ProjectsConfigsWaitersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4656,18 +4306,18 @@ func (c *ProjectsConfigsWaitersListCall) Do(opts ...googleapi.CallOption) (*List // ], // "parameters": { // "pageSize": { - // "description": "Specifies the number of results to return per page. If there are fewer\nelements than the specified number, returns all elements.", + // "description": "Specifies the number of results to return per page. If there are fewer elements than the specified number, returns all elements.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to a `nextPageToken`\nreturned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to a `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "The path to the configuration for which you want to get a list of waiters.\nThe configuration must exist beforehand; the path must be in the format:\n\n`projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", + // "description": "The path to the configuration for which you want to get a list of waiters. The configuration must exist beforehand; the path must be in the format: `projects/[PROJECT_ID]/configs/[CONFIG_NAME]`", // "location": "path", // "pattern": "^projects/[^/]+/configs/[^/]+$", // "required": true, @@ -4719,16 +4369,11 @@ type ProjectsConfigsWaitersTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a `NOT_FOUND` error. -// -// Note: This operation is designed to be used for building -// permission-aware -// UIs and command-line tools, not for authorization checking. This -// operation -// may "fail open" without warning. +// specified resource. If the resource does not exist, this will return +// an empty set of permissions, not a `NOT_FOUND` error. Note: This +// operation is designed to be used for building permission-aware UIs +// and command-line tools, not for authorization checking. This +// operation may "fail open" without warning. func (r *ProjectsConfigsWaitersService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsConfigsWaitersTestIamPermissionsCall { c := &ProjectsConfigsWaitersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -4763,7 +4408,7 @@ func (c *ProjectsConfigsWaitersTestIamPermissionsCall) Header() http.Header { func (c *ProjectsConfigsWaitersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4827,7 +4472,7 @@ func (c *ProjectsConfigsWaitersTestIamPermissionsCall) Do(opts ...googleapi.Call } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + // "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", // "flatPath": "v1beta1/projects/{projectsId}/configs/{configsId}/waiters/{waitersId}:testIamPermissions", // "httpMethod": "POST", // "id": "runtimeconfig.projects.configs.waiters.testIamPermissions", @@ -4836,7 +4481,7 @@ func (c *ProjectsConfigsWaitersTestIamPermissionsCall) Do(opts ...googleapi.Call // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/configs/[^/]+/waiters/[^/]+$", // "required": true, diff --git a/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-api.json b/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-api.json index 793c1167dc3..8a6148b2260 100644 --- a/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-api.json +++ b/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-api.json @@ -117,7 +117,7 @@ "operations": { "methods": { "get": { - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", "flatPath": "v1/operations/{operationsId}", "httpMethod": "GET", "id": "servicemanagement.operations.get", @@ -150,7 +150,7 @@ "parameterOrder": [], "parameters": { "filter": { - "description": "A string for filtering Operations.\n The following filter fields are supported\u0026#58;\n\n * serviceName\u0026#58; Required. Only `=` operator is allowed.\n * startTime\u0026#58; The time this job was started, in ISO 8601 format.\n Allowed operators are `\u003e=`, `\u003e`, `\u003c=`, and `\u003c`.\n * status\u0026#58; Can be `done`, `in_progress`, or `failed`. Allowed\n operators are `=`, and `!=`.\n\n Filter expression supports conjunction (AND) and disjunction (OR)\n logical operators. However, the serviceName restriction must be at the\n top-level and can only be combined with other restrictions via the AND\n logical operator.\n\n Examples\u0026#58;\n\n * `serviceName={some-service}.googleapis.com`\n * `serviceName={some-service}.googleapis.com AND startTime\u003e=\"2017-02-01\"`\n * `serviceName={some-service}.googleapis.com AND status=done`\n * `serviceName={some-service}.googleapis.com AND (status=done OR startTime\u003e=\"2017-02-01\")`", + "description": "A string for filtering Operations. The following filter fields are supported: * serviceName: Required. Only `=` operator is allowed. * startTime: The time this job was started, in ISO 8601 format. Allowed operators are `\u003e=`, `\u003e`, `\u003c=`, and `\u003c`. * status: Can be `done`, `in_progress`, or `failed`. Allowed operators are `=`, and `!=`. Filter expression supports conjunction (AND) and disjunction (OR) logical operators. However, the serviceName restriction must be at the top-level and can only be combined with other restrictions via the AND logical operator. Examples: * `serviceName={some-service}.googleapis.com` * `serviceName={some-service}.googleapis.com AND startTime\u003e=\"2017-02-01\"` * `serviceName={some-service}.googleapis.com AND status=done` * `serviceName={some-service}.googleapis.com AND (status=done OR startTime\u003e=\"2017-02-01\")`", "location": "query", "type": "string" }, @@ -160,7 +160,7 @@ "type": "string" }, "pageSize": { - "description": "The maximum number of operations to return. If unspecified, defaults to\n50. The maximum value is 100.", + "description": "The maximum number of operations to return. If unspecified, defaults to 50. The maximum value is 100.", "format": "int32", "location": "query", "type": "integer" @@ -185,7 +185,7 @@ "services": { "methods": { "create": { - "description": "Creates a new managed service.\n\nA managed service is immutable, and is subject to mandatory 30-day\ndata retention. You cannot move a service or recreate it within 30 days\nafter deletion.\n\nOne producer project can own no more than 500 services. For security and\nreliability purposes, a production service should be hosted in a\ndedicated producer project.\n\nOperation\u003cresponse: ManagedService\u003e", + "description": "Creates a new managed service. A managed service is immutable, and is subject to mandatory 30-day data retention. You cannot move a service or recreate it within 30 days after deletion. One producer project can own no more than 500 services. For security and reliability purposes, a production service should be hosted in a dedicated producer project. Operation", "flatPath": "v1/services", "httpMethod": "POST", "id": "servicemanagement.services.create", @@ -204,7 +204,7 @@ ] }, "delete": { - "description": "Deletes a managed service. This method will change the service to the\n`Soft-Delete` state for 30 days. Within this period, service producers may\ncall UndeleteService to restore the service.\nAfter 30 days, the service will be permanently deleted.\n\nOperation\u003cresponse: google.protobuf.Empty\u003e", + "description": "Deletes a managed service. This method will change the service to the `Soft-Delete` state for 30 days. Within this period, service producers may call UndeleteService to restore the service. After 30 days, the service will be permanently deleted. Operation", "flatPath": "v1/services/{serviceName}", "httpMethod": "DELETE", "id": "servicemanagement.services.delete", @@ -213,7 +213,7 @@ ], "parameters": { "serviceName": { - "description": "Required. The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.", "location": "path", "required": true, "type": "string" @@ -228,36 +228,8 @@ "https://www.googleapis.com/auth/service.management" ] }, - "disable": { - "description": "Disables a service for a project, so it can no longer be\nbe used for the project. It prevents accidental usage that may cause\nunexpected billing charges or security leaks.\n\nOperation\u003cresponse: DisableServiceResponse\u003e", - "flatPath": "v1/services/{serviceName}:disable", - "httpMethod": "POST", - "id": "servicemanagement.services.disable", - "parameterOrder": [ - "serviceName" - ], - "parameters": { - "serviceName": { - "description": "Required. Name of the service to disable. Specifying an unknown service name\nwill cause the request to fail.", - "location": "path", - "required": true, - "type": "string" - } - }, - "path": "v1/services/{serviceName}:disable", - "request": { - "$ref": "DisableServiceRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/service.management" - ] - }, "enable": { - "description": "Enables a service for a project, so it can be used\nfor the project. See\n[Cloud Auth Guide](https://cloud.google.com/docs/authentication) for\nmore information.\n\nOperation\u003cresponse: EnableServiceResponse\u003e", + "description": "Enables a service for a project, so it can be used for the project. See [Cloud Auth Guide](https://cloud.google.com/docs/authentication) for more information. Operation", "flatPath": "v1/services/{serviceName}:enable", "httpMethod": "POST", "id": "servicemanagement.services.enable", @@ -266,7 +238,7 @@ ], "parameters": { "serviceName": { - "description": "Required. Name of the service to enable. Specifying an unknown service name will\ncause the request to fail.", + "description": "Required. Name of the service to enable. Specifying an unknown service name will cause the request to fail.", "location": "path", "required": true, "type": "string" @@ -285,7 +257,7 @@ ] }, "generateConfigReport": { - "description": "Generates and returns a report (errors, warnings and changes from\nexisting configurations) associated with\nGenerateConfigReportRequest.new_value\n\nIf GenerateConfigReportRequest.old_value is specified,\nGenerateConfigReportRequest will contain a single ChangeReport based on the\ncomparison between GenerateConfigReportRequest.new_value and\nGenerateConfigReportRequest.old_value.\nIf GenerateConfigReportRequest.old_value is not specified, this method\nwill compare GenerateConfigReportRequest.new_value with the last pushed\nservice configuration.", + "description": "Generates and returns a report (errors, warnings and changes from existing configurations) associated with GenerateConfigReportRequest.new_value If GenerateConfigReportRequest.old_value is specified, GenerateConfigReportRequest will contain a single ChangeReport based on the comparison between GenerateConfigReportRequest.new_value and GenerateConfigReportRequest.old_value. If GenerateConfigReportRequest.old_value is not specified, this method will compare GenerateConfigReportRequest.new_value with the last pushed service configuration.", "flatPath": "v1/services:generateConfigReport", "httpMethod": "POST", "id": "servicemanagement.services.generateConfigReport", @@ -304,7 +276,7 @@ ] }, "get": { - "description": "Gets a managed service. Authentication is required unless the service is\npublic.", + "description": "Gets a managed service. Authentication is required unless the service is public.", "flatPath": "v1/services/{serviceName}", "httpMethod": "GET", "id": "servicemanagement.services.get", @@ -313,7 +285,7 @@ ], "parameters": { "serviceName": { - "description": "Required. The name of the service. See the `ServiceManager` overview for naming\nrequirements. For example: `example.googleapis.com`.", + "description": "Required. The name of the service. See the `ServiceManager` overview for naming requirements. For example: `example.googleapis.com`.", "location": "path", "required": true, "type": "string" @@ -340,22 +312,26 @@ ], "parameters": { "configId": { - "description": "Required. The id of the service configuration resource.\n\nThis field must be specified for the server to return all fields, including\n`SourceInfo`.", + "description": "Required. The id of the service configuration resource. This field must be specified for the server to return all fields, including `SourceInfo`.", "location": "query", "type": "string" }, "serviceName": { - "description": "Required. The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.", "location": "path", "required": true, "type": "string" }, "view": { - "description": "Specifies which parts of the Service Config should be returned in the\nresponse.", + "description": "Specifies which parts of the Service Config should be returned in the response.", "enum": [ "BASIC", "FULL" ], + "enumDescriptions": [ + "Server response includes all fields except SourceInfo.", + "Server response includes all fields including SourceInfo. SourceFiles are of type 'google.api.servicemanagement.v1.ConfigFile' and are only available for configs created using the SubmitConfigSource method." + ], "location": "query", "type": "string" } @@ -372,7 +348,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", "flatPath": "v1/services/{servicesId}:getIamPolicy", "httpMethod": "POST", "id": "servicemanagement.services.getIamPolicy", @@ -381,7 +357,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^services/[^/]+$", "required": true, @@ -403,25 +379,25 @@ ] }, "list": { - "description": "Lists managed services.\n\nReturns all public services. For authenticated users, also returns all\nservices the calling user has \"servicemanagement.services.get\" permission\nfor.\n\n**BETA:** If the caller specifies the `consumer_id`, it returns only the\nservices enabled on the consumer. The `consumer_id` must have the format\nof \"project:{PROJECT-ID}\".", + "description": "Lists managed services. Returns all public services. For authenticated users, also returns all services the calling user has \"servicemanagement.services.get\" permission for. **BETA:** If the caller specifies the `consumer_id`, it returns only the services enabled on the consumer. The `consumer_id` must have the format of \"project:{PROJECT-ID}\".", "flatPath": "v1/services", "httpMethod": "GET", "id": "servicemanagement.services.list", "parameterOrder": [], "parameters": { "consumerId": { - "description": "Include services consumed by the specified consumer.\n\nThe Google Service Management implementation accepts the following\nforms:\n- project:\u003cproject_id\u003e", + "description": "Include services consumed by the specified consumer. The Google Service Management implementation accepts the following forms: - project:", "location": "query", "type": "string" }, "pageSize": { - "description": "The max number of items to include in the response list. Page size is 50\nif not specified. Maximum value is 100.", + "description": "The max number of items to include in the response list. Page size is 50 if not specified. Maximum value is 100.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Token identifying which result to start with; returned by a previous list\ncall.", + "description": "Token identifying which result to start with; returned by a previous list call.", "location": "query", "type": "string" }, @@ -443,7 +419,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", "flatPath": "v1/services/{servicesId}:setIamPolicy", "httpMethod": "POST", "id": "servicemanagement.services.setIamPolicy", @@ -452,7 +428,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^services/[^/]+$", "required": true, @@ -472,7 +448,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", "flatPath": "v1/services/{servicesId}:testIamPermissions", "httpMethod": "POST", "id": "servicemanagement.services.testIamPermissions", @@ -481,7 +457,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^services/[^/]+$", "required": true, @@ -503,7 +479,7 @@ ] }, "undelete": { - "description": "Revives a previously deleted managed service. The method restores the\nservice using the configuration at the time the service was deleted.\nThe target service must exist and must have been deleted within the\nlast 30 days.\n\nOperation\u003cresponse: UndeleteServiceResponse\u003e", + "description": "Revives a previously deleted managed service. The method restores the service using the configuration at the time the service was deleted. The target service must exist and must have been deleted within the last 30 days. Operation", "flatPath": "v1/services/{serviceName}:undelete", "httpMethod": "POST", "id": "servicemanagement.services.undelete", @@ -512,7 +488,7 @@ ], "parameters": { "serviceName": { - "description": "Required. The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.", "location": "path", "required": true, "type": "string" @@ -532,7 +508,7 @@ "configs": { "methods": { "create": { - "description": "Creates a new service configuration (version) for a managed service.\nThis method only stores the service configuration. To roll out the service\nconfiguration to backend systems please call\nCreateServiceRollout.\n\nOnly the 100 most recent service configurations and ones referenced by\nexisting rollouts are kept for each service. The rest will be deleted\neventually.", + "description": "Creates a new service configuration (version) for a managed service. This method only stores the service configuration. To roll out the service configuration to backend systems please call CreateServiceRollout. Only the 100 most recent service configurations and ones referenced by existing rollouts are kept for each service. The rest will be deleted eventually.", "flatPath": "v1/services/{serviceName}/configs", "httpMethod": "POST", "id": "servicemanagement.services.configs.create", @@ -541,7 +517,7 @@ ], "parameters": { "serviceName": { - "description": "Required. The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.", "location": "path", "required": true, "type": "string" @@ -570,23 +546,27 @@ ], "parameters": { "configId": { - "description": "Required. The id of the service configuration resource.\n\nThis field must be specified for the server to return all fields, including\n`SourceInfo`.", + "description": "Required. The id of the service configuration resource. This field must be specified for the server to return all fields, including `SourceInfo`.", "location": "path", "required": true, "type": "string" }, "serviceName": { - "description": "Required. The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.", "location": "path", "required": true, "type": "string" }, "view": { - "description": "Specifies which parts of the Service Config should be returned in the\nresponse.", + "description": "Specifies which parts of the Service Config should be returned in the response.", "enum": [ "BASIC", "FULL" ], + "enumDescriptions": [ + "Server response includes all fields except SourceInfo.", + "Server response includes all fields including SourceInfo. SourceFiles are of type 'google.api.servicemanagement.v1.ConfigFile' and are only available for configs created using the SubmitConfigSource method." + ], "location": "query", "type": "string" } @@ -603,7 +583,7 @@ ] }, "list": { - "description": "Lists the history of the service configuration for a managed service,\nfrom the newest to the oldest.", + "description": "Lists the history of the service configuration for a managed service, from the newest to the oldest.", "flatPath": "v1/services/{serviceName}/configs", "httpMethod": "GET", "id": "servicemanagement.services.configs.list", @@ -612,7 +592,7 @@ ], "parameters": { "pageSize": { - "description": "The max number of items to include in the response list. Page size is 50\nif not specified. Maximum value is 100.", + "description": "The max number of items to include in the response list. Page size is 50 if not specified. Maximum value is 100.", "format": "int32", "location": "query", "type": "integer" @@ -623,7 +603,7 @@ "type": "string" }, "serviceName": { - "description": "Required. The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.", "location": "path", "required": true, "type": "string" @@ -641,7 +621,7 @@ ] }, "submit": { - "description": "Creates a new service configuration (version) for a managed service based\non\nuser-supplied configuration source files (for example: OpenAPI\nSpecification). This method stores the source configurations as well as the\ngenerated service configuration. To rollout the service configuration to\nother services,\nplease call CreateServiceRollout.\n\nOnly the 100 most recent configuration sources and ones referenced by\nexisting service configurtions are kept for each service. The rest will be\ndeleted eventually.\n\nOperation\u003cresponse: SubmitConfigSourceResponse\u003e", + "description": "Creates a new service configuration (version) for a managed service based on user-supplied configuration source files (for example: OpenAPI Specification). This method stores the source configurations as well as the generated service configuration. To rollout the service configuration to other services, please call CreateServiceRollout. Only the 100 most recent configuration sources and ones referenced by existing service configurtions are kept for each service. The rest will be deleted eventually. Operation", "flatPath": "v1/services/{serviceName}/configs:submit", "httpMethod": "POST", "id": "servicemanagement.services.configs.submit", @@ -650,7 +630,7 @@ ], "parameters": { "serviceName": { - "description": "Required. The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.", "location": "path", "required": true, "type": "string" @@ -673,7 +653,7 @@ "consumers": { "methods": { "getIamPolicy": { - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", "flatPath": "v1/services/{servicesId}/consumers/{consumersId}:getIamPolicy", "httpMethod": "POST", "id": "servicemanagement.services.consumers.getIamPolicy", @@ -682,7 +662,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^services/[^/]+/consumers/[^/]+$", "required": true, @@ -704,7 +684,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", "flatPath": "v1/services/{servicesId}/consumers/{consumersId}:setIamPolicy", "httpMethod": "POST", "id": "servicemanagement.services.consumers.setIamPolicy", @@ -713,7 +693,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^services/[^/]+/consumers/[^/]+$", "required": true, @@ -733,7 +713,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", "flatPath": "v1/services/{servicesId}/consumers/{consumersId}:testIamPermissions", "httpMethod": "POST", "id": "servicemanagement.services.consumers.testIamPermissions", @@ -742,7 +722,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^services/[^/]+/consumers/[^/]+$", "required": true, @@ -768,7 +748,7 @@ "rollouts": { "methods": { "create": { - "description": "Creates a new service configuration rollout. Based on rollout, the\nGoogle Service Management will roll out the service configurations to\ndifferent backend services. For example, the logging configuration will be\npushed to Google Cloud Logging.\n\nPlease note that any previous pending and running Rollouts and associated\nOperations will be automatically cancelled so that the latest Rollout will\nnot be blocked by previous Rollouts.\n\nOnly the 100 most recent (in any state) and the last 10 successful (if not\nalready part of the set of 100 most recent) rollouts are kept for each\nservice. The rest will be deleted eventually.\n\nOperation\u003cresponse: Rollout\u003e", + "description": "Creates a new service configuration rollout. Based on rollout, the Google Service Management will roll out the service configurations to different backend services. For example, the logging configuration will be pushed to Google Cloud Logging. Please note that any previous pending and running Rollouts and associated Operations will be automatically cancelled so that the latest Rollout will not be blocked by previous Rollouts. Only the 100 most recent (in any state) and the last 10 successful (if not already part of the set of 100 most recent) rollouts are kept for each service. The rest will be deleted eventually. Operation", "flatPath": "v1/services/{serviceName}/rollouts", "httpMethod": "POST", "id": "servicemanagement.services.rollouts.create", @@ -777,7 +757,7 @@ ], "parameters": { "serviceName": { - "description": "Required. The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.", "location": "path", "required": true, "type": "string" @@ -812,7 +792,7 @@ "type": "string" }, "serviceName": { - "description": "Required. The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.", "location": "path", "required": true, "type": "string" @@ -830,7 +810,7 @@ ] }, "list": { - "description": "Lists the history of the service configuration rollouts for a managed\nservice, from the newest to the oldest.", + "description": "Lists the history of the service configuration rollouts for a managed service, from the newest to the oldest.", "flatPath": "v1/services/{serviceName}/rollouts", "httpMethod": "GET", "id": "servicemanagement.services.rollouts.list", @@ -839,12 +819,12 @@ ], "parameters": { "filter": { - "description": "Required. Use `filter` to return subset of rollouts.\nThe following filters are supported:\n -- To limit the results to only those in\n [status](google.api.servicemanagement.v1.RolloutStatus) 'SUCCESS',\n use filter='status=SUCCESS'\n -- To limit the results to those in\n [status](google.api.servicemanagement.v1.RolloutStatus) 'CANCELLED'\n or 'FAILED', use filter='status=CANCELLED OR status=FAILED'", + "description": "Required. Use `filter` to return subset of rollouts. The following filters are supported: -- To limit the results to only those in [status](google.api.servicemanagement.v1.RolloutStatus) 'SUCCESS', use filter='status=SUCCESS' -- To limit the results to those in [status](google.api.servicemanagement.v1.RolloutStatus) 'CANCELLED' or 'FAILED', use filter='status=CANCELLED OR status=FAILED'", "location": "query", "type": "string" }, "pageSize": { - "description": "The max number of items to include in the response list. Page size is 50\nif not specified. Maximum value is 100.", + "description": "The max number of items to include in the response list. Page size is 50 if not specified. Maximum value is 100.", "format": "int32", "location": "query", "type": "integer" @@ -855,7 +835,7 @@ "type": "string" }, "serviceName": { - "description": "Required. The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.", "location": "path", "required": true, "type": "string" @@ -877,22 +857,22 @@ } } }, - "revision": "20200506", + "revision": "20200921", "rootUrl": "https://servicemanagement.googleapis.com/", "schemas": { "Advice": { - "description": "Generated advice about this change, used for providing more\ninformation about how a change will affect the existing service.", + "description": "Generated advice about this change, used for providing more information about how a change will affect the existing service.", "id": "Advice", "properties": { "description": { - "description": "Useful description for why this advice was applied and what actions should\nbe taken to mitigate any implied risks.", + "description": "Useful description for why this advice was applied and what actions should be taken to mitigate any implied risks.", "type": "string" } }, "type": "object" }, "Api": { - "description": "Api is a light-weight descriptor for an API Interface.\n\nInterfaces are also described as \"protocol buffer services\" in some contexts,\nsuch as by the \"service\" keyword in a .proto file, but they are different\nfrom API Services, which represent a concrete implementation of an interface\nas opposed to simply a description of methods and bindings. They are also\nsometimes simply referred to as \"APIs\" in other contexts, such as the name of\nthis message itself. See https://cloud.google.com/apis/design/glossary for\ndetailed terminology.", + "description": "Api is a light-weight descriptor for an API Interface. Interfaces are also described as \"protocol buffer services\" in some contexts, such as by the \"service\" keyword in a .proto file, but they are different from API Services, which represent a concrete implementation of an interface as opposed to simply a description of methods and bindings. They are also sometimes simply referred to as \"APIs\" in other contexts, such as the name of this message itself. See https://cloud.google.com/apis/design/glossary for detailed terminology.", "id": "Api", "properties": { "methods": { @@ -910,7 +890,7 @@ "type": "array" }, "name": { - "description": "The fully qualified name of this interface, including package name\nfollowed by the interface's simple name.", + "description": "The fully qualified name of this interface, including package name followed by the interface's simple name.", "type": "string" }, "options": { @@ -922,7 +902,7 @@ }, "sourceContext": { "$ref": "SourceContext", - "description": "Source context for the protocol buffer service represented by this\nmessage." + "description": "Source context for the protocol buffer service represented by this message." }, "syntax": { "description": "The source syntax of the service.", @@ -937,14 +917,14 @@ "type": "string" }, "version": { - "description": "A version string for this interface. If specified, must have the form\n`major-version.minor-version`, as in `1.10`. If the minor version is\nomitted, it defaults to zero. If the entire version field is empty, the\nmajor version is derived from the package name, as outlined below. If the\nfield is not empty, the version in the package name will be verified to be\nconsistent with what is provided here.\n\nThe versioning schema uses [semantic\nversioning](http://semver.org) where the major version number\nindicates a breaking change and the minor version an additive,\nnon-breaking change. Both version numbers are signals to users\nwhat to expect from different versions, and should be carefully\nchosen based on the product plan.\n\nThe major version is also reflected in the package name of the\ninterface, which must end in `v\u003cmajor-version\u003e`, as in\n`google.feature.v1`. For major versions 0 and 1, the suffix can\nbe omitted. Zero major versions must only be used for\nexperimental, non-GA interfaces.\n", + "description": "A version string for this interface. If specified, must have the form `major-version.minor-version`, as in `1.10`. If the minor version is omitted, it defaults to zero. If the entire version field is empty, the major version is derived from the package name, as outlined below. If the field is not empty, the version in the package name will be verified to be consistent with what is provided here. The versioning schema uses [semantic versioning](http://semver.org) where the major version number indicates a breaking change and the minor version an additive, non-breaking change. Both version numbers are signals to users what to expect from different versions, and should be carefully chosen based on the product plan. The major version is also reflected in the package name of the interface, which must end in `v`, as in `google.feature.v1`. For major versions 0 and 1, the suffix can be omitted. Zero major versions must only be used for experimental, non-GA interfaces. ", "type": "string" } }, "type": "object" }, "AuditConfig": { - "description": "Specifies the audit configuration for a service.\nThe configuration determines which permission types are logged, and what\nidentities, if any, are exempted from logging.\nAn AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service,\nthe union of the two AuditConfigs is used for that service: the log_types\nspecified in each AuditConfig are enabled, and the exempted_members in each\nAuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n {\n \"audit_configs\": [\n {\n \"service\": \"allServices\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n },\n {\n \"log_type\": \"ADMIN_READ\",\n }\n ]\n },\n {\n \"service\": \"sampleservice.googleapis.com\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n },\n {\n \"log_type\": \"DATA_WRITE\",\n \"exempted_members\": [\n \"user:aliya@example.com\"\n ]\n }\n ]\n }\n ]\n }\n\nFor sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ\nlogging. It also exempts jose@example.com from DATA_READ logging, and\naliya@example.com from DATA_WRITE logging.", + "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { \"audit_configs\": [ { \"service\": \"allServices\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" }, { \"log_type\": \"ADMIN_READ\" } ] }, { \"service\": \"sampleservice.googleapis.com\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\" }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging.", "id": "AuditConfig", "properties": { "auditLogConfigs": { @@ -955,18 +935,18 @@ "type": "array" }, "service": { - "description": "Specifies a service that will be enabled for audit logging.\nFor example, `storage.googleapis.com`, `cloudsql.googleapis.com`.\n`allServices` is a special value that covers all services.", + "description": "Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.", "type": "string" } }, "type": "object" }, "AuditLogConfig": { - "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\njose@example.com from DATA_READ logging.", + "description": "Provides the configuration for logging a type of permissions. Example: { \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.", "id": "AuditLogConfig", "properties": { "exemptedMembers": { - "description": "Specifies the identities that do not cause logging for this type of\npermission.\nFollows the same format of Binding.members.", + "description": "Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.", "items": { "type": "string" }, @@ -992,31 +972,31 @@ "type": "object" }, "AuthProvider": { - "description": "Configuration for an authentication provider, including support for\n[JSON Web Token\n(JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).", + "description": "Configuration for an authentication provider, including support for [JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).", "id": "AuthProvider", "properties": { "audiences": { - "description": "The list of JWT\n[audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).\nthat are allowed to access. A JWT containing any of these audiences will\nbe accepted. When this setting is absent, JWTs with audiences:\n - \"https://[service.name]/[google.protobuf.Api.name]\"\n - \"https://[service.name]/\"\nwill be accepted.\nFor example, if no audiences are in the setting, LibraryService API will\naccept JWTs with the following audiences:\n -\n https://library-example.googleapis.com/google.example.library.v1.LibraryService\n - https://library-example.googleapis.com/\n\nExample:\n\n audiences: bookstore_android.apps.googleusercontent.com,\n bookstore_web.apps.googleusercontent.com", + "description": "The list of JWT [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3). that are allowed to access. A JWT containing any of these audiences will be accepted. When this setting is absent, JWTs with audiences: - \"https://[service.name]/[google.protobuf.Api.name]\" - \"https://[service.name]/\" will be accepted. For example, if no audiences are in the setting, LibraryService API will accept JWTs with the following audiences: - https://library-example.googleapis.com/google.example.library.v1.LibraryService - https://library-example.googleapis.com/ Example: audiences: bookstore_android.apps.googleusercontent.com, bookstore_web.apps.googleusercontent.com", "type": "string" }, "authorizationUrl": { - "description": "Redirect URL if JWT token is required but not present or is expired.\nImplement authorizationUrl of securityDefinitions in OpenAPI spec.", + "description": "Redirect URL if JWT token is required but not present or is expired. Implement authorizationUrl of securityDefinitions in OpenAPI spec.", "type": "string" }, "id": { - "description": "The unique identifier of the auth provider. It will be referred to by\n`AuthRequirement.provider_id`.\n\nExample: \"bookstore_auth\".", + "description": "The unique identifier of the auth provider. It will be referred to by `AuthRequirement.provider_id`. Example: \"bookstore_auth\".", "type": "string" }, "issuer": { - "description": "Identifies the principal that issued the JWT. See\nhttps://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1\nUsually a URL or an email address.\n\nExample: https://securetoken.google.com\nExample: 1234567-compute@developer.gserviceaccount.com", + "description": "Identifies the principal that issued the JWT. See https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1 Usually a URL or an email address. Example: https://securetoken.google.com Example: 1234567-compute@developer.gserviceaccount.com", "type": "string" }, "jwksUri": { - "description": "URL of the provider's public key set to validate signature of the JWT. See\n[OpenID\nDiscovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata).\nOptional if the key set document:\n - can be retrieved from\n [OpenID\n Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html of\n the issuer.\n - can be inferred from the email domain of the issuer (e.g. a Google\n service account).\n\nExample: https://www.googleapis.com/oauth2/v1/certs", + "description": "URL of the provider's public key set to validate signature of the JWT. See [OpenID Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata). Optional if the key set document: - can be retrieved from [OpenID Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html of the issuer. - can be inferred from the email domain of the issuer (e.g. a Google service account). Example: https://www.googleapis.com/oauth2/v1/certs", "type": "string" }, "jwtLocations": { - "description": "Defines the locations to extract the JWT.\n\nJWT locations can be either from HTTP headers or URL query parameters.\nThe rule is that the first match wins. The checking order is: checking\nall headers first, then URL query parameters.\n\nIf not specified, default to use following 3 locations:\n 1) Authorization: Bearer\n 2) x-goog-iap-jwt-assertion\n 3) access_token query parameter\n\nDefault locations can be specified as followings:\n jwt_locations:\n - header: Authorization\n value_prefix: \"Bearer \"\n - header: x-goog-iap-jwt-assertion\n - query: access_token", + "description": "Defines the locations to extract the JWT. JWT locations can be either from HTTP headers or URL query parameters. The rule is that the first match wins. The checking order is: checking all headers first, then URL query parameters. If not specified, default to use following 3 locations: 1) Authorization: Bearer 2) x-goog-iap-jwt-assertion 3) access_token query parameter Default locations can be specified as followings: jwt_locations: - header: Authorization value_prefix: \"Bearer \" - header: x-goog-iap-jwt-assertion - query: access_token", "items": { "$ref": "JwtLocation" }, @@ -1026,22 +1006,22 @@ "type": "object" }, "AuthRequirement": { - "description": "User-defined authentication requirements, including support for\n[JSON Web Token\n(JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).", + "description": "User-defined authentication requirements, including support for [JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).", "id": "AuthRequirement", "properties": { "audiences": { - "description": "NOTE: This will be deprecated soon, once AuthProvider.audiences is\nimplemented and accepted in all the runtime components.\n\nThe list of JWT\n[audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).\nthat are allowed to access. A JWT containing any of these audiences will\nbe accepted. When this setting is absent, only JWTs with audience\n\"https://Service_name/API_name\"\nwill be accepted. For example, if no audiences are in the setting,\nLibraryService API will only accept JWTs with the following audience\n\"https://library-example.googleapis.com/google.example.library.v1.LibraryService\".\n\nExample:\n\n audiences: bookstore_android.apps.googleusercontent.com,\n bookstore_web.apps.googleusercontent.com", + "description": "NOTE: This will be deprecated soon, once AuthProvider.audiences is implemented and accepted in all the runtime components. The list of JWT [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3). that are allowed to access. A JWT containing any of these audiences will be accepted. When this setting is absent, only JWTs with audience \"https://Service_name/API_name\" will be accepted. For example, if no audiences are in the setting, LibraryService API will only accept JWTs with the following audience \"https://library-example.googleapis.com/google.example.library.v1.LibraryService\". Example: audiences: bookstore_android.apps.googleusercontent.com, bookstore_web.apps.googleusercontent.com", "type": "string" }, "providerId": { - "description": "id from authentication provider.\n\nExample:\n\n provider_id: bookstore_auth", + "description": "id from authentication provider. Example: provider_id: bookstore_auth", "type": "string" } }, "type": "object" }, "Authentication": { - "description": "`Authentication` defines the authentication configuration for an API.\n\nExample for an API targeted for external use:\n\n name: calendar.googleapis.com\n authentication:\n providers:\n - id: google_calendar_auth\n jwks_uri: https://www.googleapis.com/oauth2/v1/certs\n issuer: https://securetoken.google.com\n rules:\n - selector: \"*\"\n requirements:\n provider_id: google_calendar_auth", + "description": "`Authentication` defines the authentication configuration for an API. Example for an API targeted for external use: name: calendar.googleapis.com authentication: providers: - id: google_calendar_auth jwks_uri: https://www.googleapis.com/oauth2/v1/certs issuer: https://securetoken.google.com rules: - selector: \"*\" requirements: provider_id: google_calendar_auth", "id": "Authentication", "properties": { "providers": { @@ -1052,7 +1032,7 @@ "type": "array" }, "rules": { - "description": "A list of authentication rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "A list of authentication rules that apply to individual API methods. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "AuthenticationRule" }, @@ -1062,7 +1042,7 @@ "type": "object" }, "AuthenticationRule": { - "description": "Authentication rules for the service.\n\nBy default, if a method has any authentication requirements, every request\nmust include a valid credential matching one of the requirements.\nIt's an error to include more than one kind of credential in a single\nrequest.\n\nIf a method doesn't have any auth requirements, request credentials will be\nignored.", + "description": "Authentication rules for the service. By default, if a method has any authentication requirements, every request must include a valid credential matching one of the requirements. It's an error to include more than one kind of credential in a single request. If a method doesn't have any auth requirements, request credentials will be ignored.", "id": "AuthenticationRule", "properties": { "allowWithoutCredential": { @@ -1081,7 +1061,7 @@ "type": "array" }, "selector": { - "description": "Selects the methods to which this rule applies.\n\nRefer to selector for syntax details.", + "description": "Selects the methods to which this rule applies. Refer to selector for syntax details.", "type": "string" } }, @@ -1092,7 +1072,7 @@ "id": "Backend", "properties": { "rules": { - "description": "A list of API backend rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "A list of API backend rules that apply to individual API methods. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "BackendRule" }, @@ -1106,29 +1086,29 @@ "id": "BackendRule", "properties": { "address": { - "description": "The address of the API backend.\n\nThe scheme is used to determine the backend protocol and security.\nThe following schemes are accepted:\n\n SCHEME PROTOCOL SECURITY\n http:// HTTP None\n https:// HTTP TLS\n grpc:// gRPC None\n grpcs:// gRPC TLS\n\nIt is recommended to explicitly include a scheme. Leaving out the scheme\nmay cause constrasting behaviors across platforms.\n\nIf the port is unspecified, the default is:\n- 80 for schemes without TLS\n- 443 for schemes with TLS\n\nFor HTTP backends, use protocol\nto specify the protocol version.", + "description": "The address of the API backend. The scheme is used to determine the backend protocol and security. The following schemes are accepted: SCHEME PROTOCOL SECURITY http:// HTTP None https:// HTTP TLS grpc:// gRPC None grpcs:// gRPC TLS It is recommended to explicitly include a scheme. Leaving out the scheme may cause constrasting behaviors across platforms. If the port is unspecified, the default is: - 80 for schemes without TLS - 443 for schemes with TLS For HTTP backends, use protocol to specify the protocol version.", "type": "string" }, "deadline": { - "description": "The number of seconds to wait for a response from a request. The default\nvaries based on the request protocol and deployment environment.", + "description": "The number of seconds to wait for a response from a request. The default varies based on the request protocol and deployment environment.", "format": "double", "type": "number" }, "disableAuth": { - "description": "When disable_auth is true, a JWT ID token won't be generated and the\noriginal \"Authorization\" HTTP header will be preserved. If the header is\nused to carry the original token and is expected by the backend, this\nfield must be set to true to preserve the header.", + "description": "When disable_auth is true, a JWT ID token won't be generated and the original \"Authorization\" HTTP header will be preserved. If the header is used to carry the original token and is expected by the backend, this field must be set to true to preserve the header.", "type": "boolean" }, "jwtAudience": { - "description": "The JWT audience is used when generating a JWT ID token for the backend.\nThis ID token will be added in the HTTP \"authorization\" header, and sent\nto the backend.", + "description": "The JWT audience is used when generating a JWT ID token for the backend. This ID token will be added in the HTTP \"authorization\" header, and sent to the backend.", "type": "string" }, "minDeadline": { - "description": "Minimum deadline in seconds needed for this method. Calls having deadline\nvalue lower than this will be rejected.", + "description": "Minimum deadline in seconds needed for this method. Calls having deadline value lower than this will be rejected.", "format": "double", "type": "number" }, "operationDeadline": { - "description": "The number of seconds to wait for the completion of a long running\noperation. The default is no deadline.", + "description": "The number of seconds to wait for the completion of a long running operation. The default is no deadline.", "format": "double", "type": "number" }, @@ -1140,32 +1120,28 @@ ], "enumDescriptions": [ "", - "Use the backend address as-is, with no modification to the path. If the\nURL pattern contains variables, the variable names and values will be\nappended to the query string. If a query string parameter and a URL\npattern variable have the same name, this may result in duplicate keys in\nthe query string.\n\n# Examples\n\nGiven the following operation config:\n\n Method path: /api/company/{cid}/user/{uid}\n Backend address: https://example.cloudfunctions.net/getUser\n\nRequests to the following request paths will call the backend at the\ntranslated path:\n\n Request path: /api/company/widgetworks/user/johndoe\n Translated:\n https://example.cloudfunctions.net/getUser?cid=widgetworks\u0026uid=johndoe\n\n Request path: /api/company/widgetworks/user/johndoe?timezone=EST\n Translated:\n https://example.cloudfunctions.net/getUser?timezone=EST\u0026cid=widgetworks\u0026uid=johndoe", - "The request path will be appended to the backend address.\n\n# Examples\n\nGiven the following operation config:\n\n Method path: /api/company/{cid}/user/{uid}\n Backend address: https://example.appspot.com\n\nRequests to the following request paths will call the backend at the\ntranslated path:\n\n Request path: /api/company/widgetworks/user/johndoe\n Translated:\n https://example.appspot.com/api/company/widgetworks/user/johndoe\n\n Request path: /api/company/widgetworks/user/johndoe?timezone=EST\n Translated:\n https://example.appspot.com/api/company/widgetworks/user/johndoe?timezone=EST" + "Use the backend address as-is, with no modification to the path. If the URL pattern contains variables, the variable names and values will be appended to the query string. If a query string parameter and a URL pattern variable have the same name, this may result in duplicate keys in the query string. # Examples Given the following operation config: Method path: /api/company/{cid}/user/{uid} Backend address: https://example.cloudfunctions.net/getUser Requests to the following request paths will call the backend at the translated path: Request path: /api/company/widgetworks/user/johndoe Translated: https://example.cloudfunctions.net/getUser?cid=widgetworks\u0026uid=johndoe Request path: /api/company/widgetworks/user/johndoe?timezone=EST Translated: https://example.cloudfunctions.net/getUser?timezone=EST\u0026cid=widgetworks\u0026uid=johndoe", + "The request path will be appended to the backend address. # Examples Given the following operation config: Method path: /api/company/{cid}/user/{uid} Backend address: https://example.appspot.com Requests to the following request paths will call the backend at the translated path: Request path: /api/company/widgetworks/user/johndoe Translated: https://example.appspot.com/api/company/widgetworks/user/johndoe Request path: /api/company/widgetworks/user/johndoe?timezone=EST Translated: https://example.appspot.com/api/company/widgetworks/user/johndoe?timezone=EST" ], "type": "string" }, "protocol": { - "description": "The protocol used for sending a request to the backend.\nThe supported values are \"http/1.1\" and \"h2\".\n\nThe default value is inferred from the scheme in the\naddress field:\n\n SCHEME PROTOCOL\n http:// http/1.1\n https:// http/1.1\n grpc:// h2\n grpcs:// h2\n\nFor secure HTTP backends (https://) that support HTTP/2, set this field\nto \"h2\" for improved performance.\n\nConfiguring this field to non-default values is only supported for secure\nHTTP backends. This field will be ignored for all other backends.\n\nSee\nhttps://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids\nfor more details on the supported values.", - "type": "string" - }, - "renameTo": { - "description": "Unimplemented. Do not use.\n\nThe new name the selected proto elements should be renamed to.\n\nThe package, the service and the method can all be renamed.\nThe backend server should implement the renamed proto. However, clients\nshould call the original method, and ESF routes the traffic to the renamed\nmethod.\n\nHTTP clients should call the URL mapped to the original method.\ngRPC and Stubby clients should call the original method with package name.\n\nFor legacy reasons, ESF allows Stubby clients to call with the\nshort name (without the package name). However, for API Versioning(or\nmultiple methods mapped to the same short name), all Stubby clients must\ncall the method's full name with the package name, otherwise the first one\n(selector) wins.\n\nIf this `rename_to` is specified with a trailing `*`, the `selector` must\nbe specified with a trailing `*` as well. The all element short names\nmatched by the `*` in the selector will be kept in the `rename_to`.\n\nFor example,\n rename_rules:\n - selector: |-\n google.example.library.v1.*\n rename_to: google.example.library.*\n\nThe selector matches `google.example.library.v1.Library.CreateShelf` and\n`google.example.library.v1.Library.CreateBook`, they will be renamed to\n`google.example.library.Library.CreateShelf` and\n`google.example.library.Library.CreateBook`. It essentially renames the\nproto package name section of the matched proto service and methods.", + "description": "The protocol used for sending a request to the backend. The supported values are \"http/1.1\" and \"h2\". The default value is inferred from the scheme in the address field: SCHEME PROTOCOL http:// http/1.1 https:// http/1.1 grpc:// h2 grpcs:// h2 For secure HTTP backends (https://) that support HTTP/2, set this field to \"h2\" for improved performance. Configuring this field to non-default values is only supported for secure HTTP backends. This field will be ignored for all other backends. See https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids for more details on the supported values.", "type": "string" }, "selector": { - "description": "Selects the methods to which this rule applies.\n\nRefer to selector for syntax details.", + "description": "Selects the methods to which this rule applies. Refer to selector for syntax details.", "type": "string" } }, "type": "object" }, "Billing": { - "description": "Billing related configuration of the service.\n\nThe following example shows how to configure monitored resources and metrics\nfor billing, `consumer_destinations` is the only supported destination and\nthe monitored resources need at least one label key\n`cloud.googleapis.com/location` to indicate the location of the billing\nusage, using different monitored resources between monitoring and billing is\nrecommended so they can be evolved independently:\n\n\n monitored_resources:\n - type: library.googleapis.com/billing_branch\n labels:\n - key: cloud.googleapis.com/location\n description: |\n Predefined label to support billing location restriction.\n - key: city\n description: |\n Custom label to define the city where the library branch is located\n in.\n - key: name\n description: Custom label to define the name of the library branch.\n metrics:\n - name: library.googleapis.com/book/borrowed_count\n metric_kind: DELTA\n value_type: INT64\n unit: \"1\"\n billing:\n consumer_destinations:\n - monitored_resource: library.googleapis.com/billing_branch\n metrics:\n - library.googleapis.com/book/borrowed_count", + "description": "Billing related configuration of the service. The following example shows how to configure monitored resources and metrics for billing, `consumer_destinations` is the only supported destination and the monitored resources need at least one label key `cloud.googleapis.com/location` to indicate the location of the billing usage, using different monitored resources between monitoring and billing is recommended so they can be evolved independently: monitored_resources: - type: library.googleapis.com/billing_branch labels: - key: cloud.googleapis.com/location description: | Predefined label to support billing location restriction. - key: city description: | Custom label to define the city where the library branch is located in. - key: name description: Custom label to define the name of the library branch. metrics: - name: library.googleapis.com/book/borrowed_count metric_kind: DELTA value_type: INT64 unit: \"1\" billing: consumer_destinations: - monitored_resource: library.googleapis.com/billing_branch metrics: - library.googleapis.com/book/borrowed_count", "id": "Billing", "properties": { "consumerDestinations": { - "description": "Billing configurations for sending metrics to the consumer project.\nThere can be multiple consumer destinations per service, each one must have\na different monitored resource type. A metric can be used in at most\none consumer destination.", + "description": "Billing configurations for sending metrics to the consumer project. There can be multiple consumer destinations per service, each one must have a different monitored resource type. A metric can be used in at most one consumer destination.", "items": { "$ref": "BillingDestination" }, @@ -1175,18 +1151,18 @@ "type": "object" }, "BillingDestination": { - "description": "Configuration of a specific billing destination (Currently only support\nbill against consumer project).", + "description": "Configuration of a specific billing destination (Currently only support bill against consumer project).", "id": "BillingDestination", "properties": { "metrics": { - "description": "Names of the metrics to report to this billing destination.\nEach name must be defined in Service.metrics section.", + "description": "Names of the metrics to report to this billing destination. Each name must be defined in Service.metrics section.", "items": { "type": "string" }, "type": "array" }, "monitoredResource": { - "description": "The monitored resource type. The type must be defined in\nService.monitored_resources section.", + "description": "The monitored resource type. The type must be defined in Service.monitored_resources section.", "type": "string" } }, @@ -1198,28 +1174,28 @@ "properties": { "condition": { "$ref": "Expr", - "description": "The condition that is associated with this binding.\n\nIf the condition evaluates to `true`, then this binding applies to the\ncurrent request.\n\nIf the condition evaluates to `false`, then this binding does not apply to\nthe current request. However, a different role binding might grant the same\nrole to one or more of the members in this binding.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies)." + "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the members in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." }, "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@example.com` .\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a user that has been recently deleted. For\n example, `alice@example.com?uid=123456789012345678901`. If the user is\n recovered, this value reverts to `user:{emailid}` and the recovered user\n retains the role in the binding.\n\n* `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus\n unique identifier) representing a service account that has been recently\n deleted. For example,\n `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.\n If the service account is undeleted, this value reverts to\n `serviceAccount:{emailid}` and the undeleted service account retains the\n role in the binding.\n\n* `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a Google group that has been recently\n deleted. For example, `admins@example.com?uid=123456789012345678901`. If\n the group is recovered, this value reverts to `group:{emailid}` and the\n recovered group retains the role in the binding.\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", "items": { "type": "string" }, "type": "array" }, "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.", + "description": "Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.", "type": "string" } }, "type": "object" }, "ChangeReport": { - "description": "Change report associated with a particular service configuration.\n\nIt contains a list of ConfigChanges based on the comparison between\ntwo service configurations.", + "description": "Change report associated with a particular service configuration. It contains a list of ConfigChanges based on the comparison between two service configurations.", "id": "ChangeReport", "properties": { "configChanges": { - "description": "List of changes between two service configurations.\nThe changes will be alphabetically sorted based on the identifier\nof each change.\nA ConfigChange identifier is a dot separated path to the configuration.\nExample: visibility.rules[selector='LibraryService.CreateBook'].restriction", + "description": "List of changes between two service configurations. The changes will be alphabetically sorted based on the identifier of each change. A ConfigChange identifier is a dot separated path to the configuration. Example: visibility.rules[selector='LibraryService.CreateBook'].restriction", "items": { "$ref": "ConfigChange" }, @@ -1229,11 +1205,11 @@ "type": "object" }, "ConfigChange": { - "description": "Output generated from semantically comparing two versions of a service\nconfiguration.\n\nIncludes detailed information about a field that have changed with\napplicable advice about potential consequences for the change, such as\nbackwards-incompatibility.", + "description": "Output generated from semantically comparing two versions of a service configuration. Includes detailed information about a field that have changed with applicable advice about potential consequences for the change, such as backwards-incompatibility.", "id": "ConfigChange", "properties": { "advices": { - "description": "Collection of advice provided for this change, useful for determining the\npossible impact of this change.", + "description": "Collection of advice provided for this change, useful for determining the possible impact of this change.", "items": { "$ref": "Advice" }, @@ -1249,22 +1225,22 @@ ], "enumDescriptions": [ "No value was provided.", - "The changed object exists in the 'new' service configuration, but not\nin the 'old' service configuration.", - "The changed object exists in the 'old' service configuration, but not\nin the 'new' service configuration.", - "The changed object exists in both service configurations, but its value\nis different." + "The changed object exists in the 'new' service configuration, but not in the 'old' service configuration.", + "The changed object exists in the 'old' service configuration, but not in the 'new' service configuration.", + "The changed object exists in both service configurations, but its value is different." ], "type": "string" }, "element": { - "description": "Object hierarchy path to the change, with levels separated by a '.'\ncharacter. For repeated fields, an applicable unique identifier field is\nused for the index (usually selector, name, or id). For maps, the term\n'key' is used. If the field has no unique identifier, the numeric index\nis used.\nExamples:\n- visibility.rules[selector==\"google.LibraryService.ListBooks\"].restriction\n- quota.metric_rules[selector==\"google\"].metric_costs[key==\"reads\"].value\n- logging.producer_destinations[0]", + "description": "Object hierarchy path to the change, with levels separated by a '.' character. For repeated fields, an applicable unique identifier field is used for the index (usually selector, name, or id). For maps, the term 'key' is used. If the field has no unique identifier, the numeric index is used. Examples: - visibility.rules[selector==\"google.LibraryService.ListBooks\"].restriction - quota.metric_rules[selector==\"google\"].metric_costs[key==\"reads\"].value - logging.producer_destinations[0]", "type": "string" }, "newValue": { - "description": "Value of the changed object in the new Service configuration,\nin JSON format. This field will not be populated if ChangeType == REMOVED.", + "description": "Value of the changed object in the new Service configuration, in JSON format. This field will not be populated if ChangeType == REMOVED.", "type": "string" }, "oldValue": { - "description": "Value of the changed object in the old Service configuration,\nin JSON format. This field will not be populated if ChangeType == ADDED.", + "description": "Value of the changed object in the old Service configuration, in JSON format. This field will not be populated if ChangeType == ADDED.", "type": "string" } }, @@ -1298,8 +1274,8 @@ "YAML-specification of service.", "OpenAPI specification, serialized in JSON.", "OpenAPI specification, serialized in YAML.", - "FileDescriptorSet, generated by protoc.\n\nTo generate, use protoc with imports and source info included.\nFor an example test.proto file, the following command would put the value\nin a new file named out.pb.\n\n$protoc --include_imports --include_source_info test.proto -o out.pb", - "Uncompiled Proto file. Used for storage and display purposes only,\ncurrently server-side compilation is not supported. Should match the\ninputs to 'protoc' command used to generated FILE_DESCRIPTOR_SET_PROTO. A\nfile of this type can only be included if at least one file of type\nFILE_DESCRIPTOR_SET_PROTO is included." + "FileDescriptorSet, generated by protoc. To generate, use protoc with imports and source info included. For an example test.proto file, the following command would put the value in a new file named out.pb. $protoc --include_imports --include_source_info test.proto -o out.pb", + "Uncompiled Proto file. Used for storage and display purposes only, currently server-side compilation is not supported. Should match the inputs to 'protoc' command used to generated FILE_DESCRIPTOR_SET_PROTO. A file of this type can only be included if at least one file of type FILE_DESCRIPTOR_SET_PROTO is included." ], "type": "string" } @@ -1311,36 +1287,36 @@ "id": "ConfigRef", "properties": { "name": { - "description": "Resource name of a service config. It must have the following\nformat: \"services/{service name}/configs/{config id}\".", + "description": "Resource name of a service config. It must have the following format: \"services/{service name}/configs/{config id}\".", "type": "string" } }, "type": "object" }, "ConfigSource": { - "description": "Represents a source file which is used to generate the service configuration\ndefined by `google.api.Service`.", + "description": "Represents a source file which is used to generate the service configuration defined by `google.api.Service`.", "id": "ConfigSource", "properties": { "files": { - "description": "Set of source configuration files that are used to generate a service\nconfiguration (`google.api.Service`).", + "description": "Set of source configuration files that are used to generate a service configuration (`google.api.Service`).", "items": { "$ref": "ConfigFile" }, "type": "array" }, "id": { - "description": "A unique ID for a specific instance of this message, typically assigned\nby the client for tracking purpose. If empty, the server may choose to\ngenerate one instead.", + "description": "A unique ID for a specific instance of this message, typically assigned by the client for tracking purpose. If empty, the server may choose to generate one instead.", "type": "string" } }, "type": "object" }, "Context": { - "description": "`Context` defines which contexts an API requests.\n\nExample:\n\n context:\n rules:\n - selector: \"*\"\n requested:\n - google.rpc.context.ProjectContext\n - google.rpc.context.OriginContext\n\nThe above specifies that all methods in the API request\n`google.rpc.context.ProjectContext` and\n`google.rpc.context.OriginContext`.\n\nAvailable context types are defined in package\n`google.rpc.context`.\n\nThis also provides mechanism to whitelist any protobuf message extension that\ncan be sent in grpc metadata using “x-goog-ext-\u003cextension_id\u003e-bin” and\n“x-goog-ext-\u003cextension_id\u003e-jspb” format. For example, list any service\nspecific protobuf types that can appear in grpc metadata as follows in your\nyaml file:\n\nExample:\n\n context:\n rules:\n - selector: \"google.example.library.v1.LibraryService.CreateBook\"\n allowed_request_extensions:\n - google.foo.v1.NewExtension\n allowed_response_extensions:\n - google.foo.v1.NewExtension\n\nYou can also specify extension ID instead of fully qualified extension name\nhere.", + "description": "`Context` defines which contexts an API requests. Example: context: rules: - selector: \"*\" requested: - google.rpc.context.ProjectContext - google.rpc.context.OriginContext The above specifies that all methods in the API request `google.rpc.context.ProjectContext` and `google.rpc.context.OriginContext`. Available context types are defined in package `google.rpc.context`. This also provides mechanism to whitelist any protobuf message extension that can be sent in grpc metadata using “x-goog-ext--bin” and “x-goog-ext--jspb” format. For example, list any service specific protobuf types that can appear in grpc metadata as follows in your yaml file: Example: context: rules: - selector: \"google.example.library.v1.LibraryService.CreateBook\" allowed_request_extensions: - google.foo.v1.NewExtension allowed_response_extensions: - google.foo.v1.NewExtension You can also specify extension ID instead of fully qualified extension name here.", "id": "Context", "properties": { "rules": { - "description": "A list of RPC context rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "A list of RPC context rules that apply to individual API methods. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "ContextRule" }, @@ -1350,18 +1326,18 @@ "type": "object" }, "ContextRule": { - "description": "A context rule provides information about the context for an individual API\nelement.", + "description": "A context rule provides information about the context for an individual API element.", "id": "ContextRule", "properties": { "allowedRequestExtensions": { - "description": "A list of full type names or extension IDs of extensions allowed in grpc\nside channel from client to backend.", + "description": "A list of full type names or extension IDs of extensions allowed in grpc side channel from client to backend.", "items": { "type": "string" }, "type": "array" }, "allowedResponseExtensions": { - "description": "A list of full type names or extension IDs of extensions allowed in grpc\nside channel from backend to client.", + "description": "A list of full type names or extension IDs of extensions allowed in grpc side channel from backend to client.", "items": { "type": "string" }, @@ -1382,29 +1358,29 @@ "type": "array" }, "selector": { - "description": "Selects the methods to which this rule applies.\n\nRefer to selector for syntax details.", + "description": "Selects the methods to which this rule applies. Refer to selector for syntax details.", "type": "string" } }, "type": "object" }, "Control": { - "description": "Selects and configures the service controller used by the service. The\nservice controller handles features like abuse, quota, billing, logging,\nmonitoring, etc.", + "description": "Selects and configures the service controller used by the service. The service controller handles features like abuse, quota, billing, logging, monitoring, etc.", "id": "Control", "properties": { "environment": { - "description": "The service control environment to use. If empty, no control plane\nfeature (like quota and billing) will be enabled.", + "description": "The service control environment to use. If empty, no control plane feature (like quota and billing) will be enabled.", "type": "string" } }, "type": "object" }, "CustomError": { - "description": "Customize service error responses. For example, list any service\nspecific protobuf types that can appear in error detail lists of\nerror responses.\n\nExample:\n\n custom_error:\n types:\n - google.foo.v1.CustomError\n - google.foo.v1.AnotherError", + "description": "Customize service error responses. For example, list any service specific protobuf types that can appear in error detail lists of error responses. Example: custom_error: types: - google.foo.v1.CustomError - google.foo.v1.AnotherError", "id": "CustomError", "properties": { "rules": { - "description": "The list of custom error rules that apply to individual API messages.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "The list of custom error rules that apply to individual API messages. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "CustomErrorRule" }, @@ -1425,11 +1401,11 @@ "id": "CustomErrorRule", "properties": { "isErrorType": { - "description": "Mark this message as possible payload in error response. Otherwise,\nobjects of this type will be filtered when they appear in error payload.", + "description": "Mark this message as possible payload in error response. Otherwise, objects of this type will be filtered when they appear in error payload.", "type": "boolean" }, "selector": { - "description": "Selects messages to which this rule applies.\n\nRefer to selector for syntax details.", + "description": "Selects messages to which this rule applies. Refer to selector for syntax details.", "type": "string" } }, @@ -1451,7 +1427,7 @@ "type": "object" }, "DeleteServiceStrategy": { - "description": "Strategy used to delete a service. This strategy is a placeholder only\nused by the system generated rollout to delete a service.", + "description": "Strategy used to delete a service. This strategy is a placeholder only used by the system generated rollout to delete a service.", "id": "DeleteServiceStrategy", "properties": {}, "type": "object" @@ -1483,17 +1459,6 @@ }, "type": "object" }, - "DisableServiceRequest": { - "description": "Request message for DisableService method.", - "id": "DisableServiceRequest", - "properties": { - "consumerId": { - "description": "Required. The identity of consumer resource which service disablement will be\napplied to.\n\nThe Google Service Management implementation accepts the following\nforms:\n- \"project:\u003cproject_id\u003e\"\n\nNote: this is made compatible with\ngoogle.api.servicecontrol.v1.Operation.consumer_id.", - "type": "string" - } - }, - "type": "object" - }, "DisableServiceResponse": { "description": "Operation payload for DisableService method.", "id": "DisableServiceResponse", @@ -1501,7 +1466,7 @@ "type": "object" }, "Documentation": { - "description": "`Documentation` provides the information for describing a service.\n\nExample:\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: \u003e\n The Google Calendar API gives access\n to most calendar features.\n pages:\n - name: Overview\n content: \u0026#40;== include google/foo/overview.md ==\u0026#41;\n - name: Tutorial\n content: \u0026#40;== include google/foo/tutorial.md ==\u0026#41;\n subpages;\n - name: Java\n content: \u0026#40;== include google/foo/tutorial_java.md ==\u0026#41;\n rules:\n - selector: google.calendar.Calendar.Get\n description: \u003e\n ...\n - selector: google.calendar.Calendar.Put\n description: \u003e\n ...\n\u003c/code\u003e\u003c/pre\u003e\nDocumentation is provided in markdown syntax. In addition to\nstandard markdown features, definition lists, tables and fenced\ncode blocks are supported. Section headers can be provided and are\ninterpreted relative to the section nesting of the context where\na documentation fragment is embedded.\n\nDocumentation from the IDL is merged with documentation defined\nvia the config at normalization time, where documentation provided\nby config rules overrides IDL provided.\n\nA number of constructs specific to the API platform are supported\nin documentation text.\n\nIn order to reference a proto element, the following\nnotation can be used:\n\u003cpre\u003e\u003ccode\u003e\u0026#91;fully.qualified.proto.name]\u0026#91;]\u003c/code\u003e\u003c/pre\u003e\nTo override the display text used for the link, this can be used:\n\u003cpre\u003e\u003ccode\u003e\u0026#91;display text]\u0026#91;fully.qualified.proto.name]\u003c/code\u003e\u003c/pre\u003e\nText can be excluded from doc using the following notation:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;-- internal comment --\u0026#41;\u003c/code\u003e\u003c/pre\u003e\n\nA few directives are available in documentation. Note that\ndirectives must appear on a single line to be properly\nidentified. The `include` directive includes a markdown file from\nan external source:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;== include path/to/file ==\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nThe `resource_for` directive marks a message to be the resource of\na collection in REST view. If it is not specified, tools attempt\nto infer the resource from the operations in a collection:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;== resource_for v1.shelves.books ==\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nThe directive `suppress_warning` does not directly affect documentation\nand is documented together with service config validation.", + "description": "`Documentation` provides the information for describing a service. Example: documentation: summary: \u003e The Google Calendar API gives access to most calendar features. pages: - name: Overview content: (== include google/foo/overview.md ==) - name: Tutorial content: (== include google/foo/tutorial.md ==) subpages; - name: Java content: (== include google/foo/tutorial_java.md ==) rules: - selector: google.calendar.Calendar.Get description: \u003e ... - selector: google.calendar.Calendar.Put description: \u003e ... Documentation is provided in markdown syntax. In addition to standard markdown features, definition lists, tables and fenced code blocks are supported. Section headers can be provided and are interpreted relative to the section nesting of the context where a documentation fragment is embedded. Documentation from the IDL is merged with documentation defined via the config at normalization time, where documentation provided by config rules overrides IDL provided. A number of constructs specific to the API platform are supported in documentation text. In order to reference a proto element, the following notation can be used: [fully.qualified.proto.name][] To override the display text used for the link, this can be used: [display text][fully.qualified.proto.name] Text can be excluded from doc using the following notation: (-- internal comment --) A few directives are available in documentation. Note that directives must appear on a single line to be properly identified. The `include` directive includes a markdown file from an external source: (== include path/to/file ==) The `resource_for` directive marks a message to be the resource of a collection in REST view. If it is not specified, tools attempt to infer the resource from the operations in a collection: (== resource_for v1.shelves.books ==) The directive `suppress_warning` does not directly affect documentation and is documented together with service config validation.", "id": "Documentation", "properties": { "documentationRootUrl": { @@ -1509,7 +1474,7 @@ "type": "string" }, "overview": { - "description": "Declares a single overview page. For example:\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: ...\n overview: \u0026#40;== include overview.md ==\u0026#41;\n\u003c/code\u003e\u003c/pre\u003e\nThis is a shortcut for the following declaration (using pages style):\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: ...\n pages:\n - name: Overview\n content: \u0026#40;== include overview.md ==\u0026#41;\n\u003c/code\u003e\u003c/pre\u003e\nNote: you cannot specify both `overview` field and `pages` field.", + "description": "Declares a single overview page. For example: documentation: summary: ... overview: (== include overview.md ==) This is a shortcut for the following declaration (using pages style): documentation: summary: ... pages: - name: Overview content: (== include overview.md ==) Note: you cannot specify both `overview` field and `pages` field.", "type": "string" }, "pages": { @@ -1520,18 +1485,18 @@ "type": "array" }, "rules": { - "description": "A list of documentation rules that apply to individual API elements.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "A list of documentation rules that apply to individual API elements. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "DocumentationRule" }, "type": "array" }, "serviceRootUrl": { - "description": "Specifies the service root url if the default one (the service name\nfrom the yaml file) is not suitable. This can be seen in any fully\nspecified service urls as well as sections that show a base that other\nurls are relative to.", + "description": "Specifies the service root url if the default one (the service name from the yaml file) is not suitable. This can be seen in any fully specified service urls as well as sections that show a base that other urls are relative to.", "type": "string" }, "summary": { - "description": "A short summary of what the service does. Can only be provided by\nplain text.", + "description": "A short summary of what the service does. Can only be provided by plain text.", "type": "string" } }, @@ -1542,7 +1507,7 @@ "id": "DocumentationRule", "properties": { "deprecationDescription": { - "description": "Deprecation description of the selected element(s). It can be provided if\nan element is marked as `deprecated`.", + "description": "Deprecation description of the selected element(s). It can be provided if an element is marked as `deprecated`.", "type": "string" }, "description": { @@ -1550,7 +1515,7 @@ "type": "string" }, "selector": { - "description": "The selector is a comma-separated list of patterns. Each pattern is a\nqualified name of the element which may end in \"*\", indicating a wildcard.\nWildcards are only allowed at the end and for a whole component of the\nqualified name, i.e. \"foo.*\" is ok, but not \"foo.b*\" or \"foo.*.bar\". A\nwildcard will match one or more components. To specify a default for all\napplicable elements, the whole pattern \"*\" is used.", + "description": "The selector is a comma-separated list of patterns. Each pattern is a qualified name of the element which may end in \"*\", indicating a wildcard. Wildcards are only allowed at the end and for a whole component of the qualified name, i.e. \"foo.*\" is ok, but not \"foo.b*\" or \"foo.*.bar\". A wildcard will match one or more components. To specify a default for all applicable elements, the whole pattern \"*\" is used.", "type": "string" } }, @@ -1561,7 +1526,7 @@ "id": "EnableServiceRequest", "properties": { "consumerId": { - "description": "Required. The identity of consumer resource which service enablement will be\napplied to.\n\nThe Google Service Management implementation accepts the following\nforms:\n- \"project:\u003cproject_id\u003e\"\n\nNote: this is made compatible with\ngoogle.api.servicecontrol.v1.Operation.consumer_id.", + "description": "Required. The identity of consumer resource which service enablement will be applied to. The Google Service Management implementation accepts the following forms: - \"project:\" Note: this is made compatible with google.api.servicecontrol.v1.Operation.consumer_id.", "type": "string" } }, @@ -1574,33 +1539,26 @@ "type": "object" }, "Endpoint": { - "description": "`Endpoint` describes a network endpoint that serves a set of APIs.\nA service may expose any number of endpoints, and all endpoints share the\nsame service configuration, such as quota configuration and monitoring\nconfiguration.\n\nExample service configuration:\n\n name: library-example.googleapis.com\n endpoints:\n # Below entry makes 'google.example.library.v1.Library'\n # API be served from endpoint address library-example.googleapis.com.\n # It also allows HTTP OPTIONS calls to be passed to the backend, for\n # it to decide whether the subsequent cross-origin request is\n # allowed to proceed.\n - name: library-example.googleapis.com\n allow_cors: true", + "description": "`Endpoint` describes a network endpoint that serves a set of APIs. A service may expose any number of endpoints, and all endpoints share the same service configuration, such as quota configuration and monitoring configuration. Example service configuration: name: library-example.googleapis.com endpoints: # Below entry makes 'google.example.library.v1.Library' # API be served from endpoint address library-example.googleapis.com. # It also allows HTTP OPTIONS calls to be passed to the backend, for # it to decide whether the subsequent cross-origin request is # allowed to proceed. - name: library-example.googleapis.com allow_cors: true", "id": "Endpoint", "properties": { "aliases": { - "description": "DEPRECATED: This field is no longer supported. Instead of using aliases,\nplease specify multiple google.api.Endpoint for each of the intended\naliases.\n\nAdditional names that this endpoint will be hosted on.", + "description": "DEPRECATED: This field is no longer supported. Instead of using aliases, please specify multiple google.api.Endpoint for each of the intended aliases. Additional names that this endpoint will be hosted on.", "items": { "type": "string" }, "type": "array" }, "allowCors": { - "description": "Allowing\n[CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka\ncross-domain traffic, would allow the backends served from this endpoint to\nreceive and respond to HTTP OPTIONS requests. The response will be used by\nthe browser to determine whether the subsequent cross-origin request is\nallowed to proceed.", + "description": "Allowing [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka cross-domain traffic, would allow the backends served from this endpoint to receive and respond to HTTP OPTIONS requests. The response will be used by the browser to determine whether the subsequent cross-origin request is allowed to proceed.", "type": "boolean" }, - "features": { - "description": "The list of features enabled on this endpoint.", - "items": { - "type": "string" - }, - "type": "array" - }, "name": { "description": "The canonical name of this endpoint.", "type": "string" }, "target": { - "description": "The specification of an Internet routable address of API frontend that will\nhandle requests to this [API\nEndpoint](https://cloud.google.com/apis/design/glossary). It should be\neither a valid IPv4 address or a fully-qualified domain name. For example,\n\"8.8.8.8\" or \"myservice.appspot.com\".", + "description": "The specification of an Internet routable address of API frontend that will handle requests to this [API Endpoint](https://cloud.google.com/apis/design/glossary). It should be either a valid IPv4 address or a fully-qualified domain name. For example, \"8.8.8.8\" or \"myservice.appspot.com\".", "type": "string" } }, @@ -1671,23 +1629,23 @@ "type": "object" }, "Expr": { - "description": "Represents a textual expression in the Common Expression Language (CEL)\nsyntax. CEL is a C-like expression language. The syntax and semantics of CEL\nare documented at https://github.com/google/cel-spec.\n\nExample (Comparison):\n\n title: \"Summary size limit\"\n description: \"Determines if a summary is less than 100 chars\"\n expression: \"document.summary.size() \u003c 100\"\n\nExample (Equality):\n\n title: \"Requestor is owner\"\n description: \"Determines if requestor is the document owner\"\n expression: \"document.owner == request.auth.claims.email\"\n\nExample (Logic):\n\n title: \"Public documents\"\n description: \"Determine whether the document should be publicly visible\"\n expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\"\n\nExample (Data Manipulation):\n\n title: \"Notification string\"\n description: \"Create a notification string with a timestamp.\"\n expression: \"'New message received at ' + string(document.create_time)\"\n\nThe exact variables and functions that may be referenced within an expression\nare determined by the service that evaluates it. See the service\ndocumentation for additional information.", + "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() \u003c 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", "id": "Expr", "properties": { "description": { - "description": "Optional. Description of the expression. This is a longer text which\ndescribes the expression, e.g. when hovered over it in a UI.", + "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", "type": "string" }, "expression": { - "description": "Textual representation of an expression in Common Expression Language\nsyntax.", + "description": "Textual representation of an expression in Common Expression Language syntax.", "type": "string" }, "location": { - "description": "Optional. String indicating the location of the expression for error\nreporting, e.g. a file name and a position in the file.", + "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", "type": "string" }, "title": { - "description": "Optional. Title for the expression, i.e. a short string describing\nits purpose. This can be used e.g. in UIs which allow to enter the\nexpression.", + "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", "type": "string" } }, @@ -1777,7 +1735,7 @@ "type": "integer" }, "oneofIndex": { - "description": "The index of the field type in `Type.oneofs`, for message or enumeration\ntypes. The first type has index 1; zero means the type is not in the list.", + "description": "The index of the field type in `Type.oneofs`, for message or enumeration types. The first type has index 1; zero means the type is not in the list.", "format": "int32", "type": "integer" }, @@ -1793,14 +1751,14 @@ "type": "boolean" }, "typeUrl": { - "description": "The field type URL, without the scheme, for message or enumeration\ntypes. Example: `\"type.googleapis.com/google.protobuf.Timestamp\"`.", + "description": "The field type URL, without the scheme, for message or enumeration types. Example: `\"type.googleapis.com/google.protobuf.Timestamp\"`.", "type": "string" } }, "type": "object" }, "FlowErrorDetails": { - "description": "Encapsulation of flow-specific error details for debugging.\nUsed as a details field on an error Status, not intended for external use.", + "description": "Encapsulation of flow-specific error details for debugging. Used as a details field on an error Status, not intended for external use.", "id": "FlowErrorDetails", "properties": { "exceptionType": { @@ -1823,7 +1781,7 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "Required. Service configuration for which we want to generate the report.\nFor this version of API, the supported types are\ngoogle.api.servicemanagement.v1.ConfigRef,\ngoogle.api.servicemanagement.v1.ConfigSource,\nand google.api.Service", + "description": "Required. Service configuration for which we want to generate the report. For this version of API, the supported types are google.api.servicemanagement.v1.ConfigRef, google.api.servicemanagement.v1.ConfigSource, and google.api.Service", "type": "object" }, "oldConfig": { @@ -1831,7 +1789,7 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "Optional. Service configuration against which the comparison will be done.\nFor this version of API, the supported types are\ngoogle.api.servicemanagement.v1.ConfigRef,\ngoogle.api.servicemanagement.v1.ConfigSource,\nand google.api.Service", + "description": "Optional. Service configuration against which the comparison will be done. For this version of API, the supported types are google.api.servicemanagement.v1.ConfigRef, google.api.servicemanagement.v1.ConfigSource, and google.api.Service", "type": "object" } }, @@ -1842,14 +1800,14 @@ "id": "GenerateConfigReportResponse", "properties": { "changeReports": { - "description": "list of ChangeReport, each corresponding to comparison between two\nservice configurations.", + "description": "list of ChangeReport, each corresponding to comparison between two service configurations.", "items": { "$ref": "ChangeReport" }, "type": "array" }, "diagnostics": { - "description": "Errors / Linter warnings associated with the service definition this\nreport\nbelongs to.", + "description": "Errors / Linter warnings associated with the service definition this report belongs to.", "items": { "$ref": "Diagnostic" }, @@ -1872,7 +1830,7 @@ "properties": { "options": { "$ref": "GetPolicyOptions", - "description": "OPTIONAL: A `GetPolicyOptions` object for specifying options to\n`GetIamPolicy`." + "description": "OPTIONAL: A `GetPolicyOptions` object for specifying options to `GetIamPolicy`." } }, "type": "object" @@ -1882,7 +1840,7 @@ "id": "GetPolicyOptions", "properties": { "requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } @@ -1890,15 +1848,15 @@ "type": "object" }, "Http": { - "description": "Defines the HTTP configuration for an API service. It contains a list of\nHttpRule, each specifying the mapping of an RPC method\nto one or more HTTP REST API methods.", + "description": "Defines the HTTP configuration for an API service. It contains a list of HttpRule, each specifying the mapping of an RPC method to one or more HTTP REST API methods.", "id": "Http", "properties": { "fullyDecodeReservedExpansion": { - "description": "When set to true, URL path parameters will be fully URI-decoded except in\ncases of single segment matches in reserved expansion, where \"%2F\" will be\nleft encoded.\n\nThe default behavior is to not decode RFC 6570 reserved characters in multi\nsegment matches.", + "description": "When set to true, URL path parameters will be fully URI-decoded except in cases of single segment matches in reserved expansion, where \"%2F\" will be left encoded. The default behavior is to not decode RFC 6570 reserved characters in multi segment matches.", "type": "boolean" }, "rules": { - "description": "A list of HTTP configuration rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "A list of HTTP configuration rules that apply to individual API methods. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "HttpRule" }, @@ -1908,34 +1866,34 @@ "type": "object" }, "HttpRule": { - "description": "# gRPC Transcoding\n\ngRPC Transcoding is a feature for mapping between a gRPC method and one or\nmore HTTP REST endpoints. It allows developers to build a single API service\nthat supports both gRPC APIs and REST APIs. Many systems, including [Google\nAPIs](https://github.com/googleapis/googleapis),\n[Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC\nGateway](https://github.com/grpc-ecosystem/grpc-gateway),\nand [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature\nand use it for large scale production services.\n\n`HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies\nhow different portions of the gRPC request message are mapped to the URL\npath, URL query parameters, and HTTP request body. It also controls how the\ngRPC response message is mapped to the HTTP response body. `HttpRule` is\ntypically specified as an `google.api.http` annotation on the gRPC method.\n\nEach mapping specifies a URL path template and an HTTP method. The path\ntemplate may refer to one or more fields in the gRPC request message, as long\nas each field is a non-repeated field with a primitive (non-message) type.\nThe path template controls how fields of the request message are mapped to\nthe URL path.\n\nExample:\n\n service Messaging {\n rpc GetMessage(GetMessageRequest) returns (Message) {\n option (google.api.http) = {\n get: \"/v1/{name=messages/*}\"\n };\n }\n }\n message GetMessageRequest {\n string name = 1; // Mapped to URL path.\n }\n message Message {\n string text = 1; // The resource content.\n }\n\nThis enables an HTTP REST to gRPC mapping as below:\n\nHTTP | gRPC\n-----|-----\n`GET /v1/messages/123456` | `GetMessage(name: \"messages/123456\")`\n\nAny fields in the request message which are not bound by the path template\nautomatically become HTTP query parameters if there is no HTTP request body.\nFor example:\n\n service Messaging {\n rpc GetMessage(GetMessageRequest) returns (Message) {\n option (google.api.http) = {\n get:\"/v1/messages/{message_id}\"\n };\n }\n }\n message GetMessageRequest {\n message SubMessage {\n string subfield = 1;\n }\n string message_id = 1; // Mapped to URL path.\n int64 revision = 2; // Mapped to URL query parameter `revision`.\n SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`.\n }\n\nThis enables a HTTP JSON to RPC mapping as below:\n\nHTTP | gRPC\n-----|-----\n`GET /v1/messages/123456?revision=2\u0026sub.subfield=foo` |\n`GetMessage(message_id: \"123456\" revision: 2 sub: SubMessage(subfield:\n\"foo\"))`\n\nNote that fields which are mapped to URL query parameters must have a\nprimitive type or a repeated primitive type or a non-repeated message type.\nIn the case of a repeated type, the parameter can be repeated in the URL\nas `...?param=A\u0026param=B`. In the case of a message type, each field of the\nmessage is mapped to a separate parameter, such as\n`...?foo.a=A\u0026foo.b=B\u0026foo.c=C`.\n\nFor HTTP methods that allow a request body, the `body` field\nspecifies the mapping. Consider a REST update method on the\nmessage resource collection:\n\n service Messaging {\n rpc UpdateMessage(UpdateMessageRequest) returns (Message) {\n option (google.api.http) = {\n patch: \"/v1/messages/{message_id}\"\n body: \"message\"\n };\n }\n }\n message UpdateMessageRequest {\n string message_id = 1; // mapped to the URL\n Message message = 2; // mapped to the body\n }\n\nThe following HTTP JSON to RPC mapping is enabled, where the\nrepresentation of the JSON in the request body is determined by\nprotos JSON encoding:\n\nHTTP | gRPC\n-----|-----\n`PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` | `UpdateMessage(message_id:\n\"123456\" message { text: \"Hi!\" })`\n\nThe special name `*` can be used in the body mapping to define that\nevery field not bound by the path template should be mapped to the\nrequest body. This enables the following alternative definition of\nthe update method:\n\n service Messaging {\n rpc UpdateMessage(Message) returns (Message) {\n option (google.api.http) = {\n patch: \"/v1/messages/{message_id}\"\n body: \"*\"\n };\n }\n }\n message Message {\n string message_id = 1;\n string text = 2;\n }\n\n\nThe following HTTP JSON to RPC mapping is enabled:\n\nHTTP | gRPC\n-----|-----\n`PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` | `UpdateMessage(message_id:\n\"123456\" text: \"Hi!\")`\n\nNote that when using `*` in the body mapping, it is not possible to\nhave HTTP parameters, as all fields not bound by the path end in\nthe body. This makes this option more rarely used in practice when\ndefining REST APIs. The common usage of `*` is in custom methods\nwhich don't use the URL at all for transferring data.\n\nIt is possible to define multiple HTTP methods for one RPC by using\nthe `additional_bindings` option. Example:\n\n service Messaging {\n rpc GetMessage(GetMessageRequest) returns (Message) {\n option (google.api.http) = {\n get: \"/v1/messages/{message_id}\"\n additional_bindings {\n get: \"/v1/users/{user_id}/messages/{message_id}\"\n }\n };\n }\n }\n message GetMessageRequest {\n string message_id = 1;\n string user_id = 2;\n }\n\nThis enables the following two alternative HTTP JSON to RPC mappings:\n\nHTTP | gRPC\n-----|-----\n`GET /v1/messages/123456` | `GetMessage(message_id: \"123456\")`\n`GET /v1/users/me/messages/123456` | `GetMessage(user_id: \"me\" message_id:\n\"123456\")`\n\n## Rules for HTTP mapping\n\n1. Leaf request fields (recursive expansion nested messages in the request\n message) are classified into three categories:\n - Fields referred by the path template. They are passed via the URL path.\n - Fields referred by the HttpRule.body. They are passed via the HTTP\n request body.\n - All other fields are passed via the URL query parameters, and the\n parameter name is the field path in the request message. A repeated\n field can be represented as multiple query parameters under the same\n name.\n 2. If HttpRule.body is \"*\", there is no URL query parameter, all fields\n are passed via URL path and HTTP request body.\n 3. If HttpRule.body is omitted, there is no HTTP request body, all\n fields are passed via URL path and URL query parameters.\n\n### Path template syntax\n\n Template = \"/\" Segments [ Verb ] ;\n Segments = Segment { \"/\" Segment } ;\n Segment = \"*\" | \"**\" | LITERAL | Variable ;\n Variable = \"{\" FieldPath [ \"=\" Segments ] \"}\" ;\n FieldPath = IDENT { \".\" IDENT } ;\n Verb = \":\" LITERAL ;\n\nThe syntax `*` matches a single URL path segment. The syntax `**` matches\nzero or more URL path segments, which must be the last part of the URL path\nexcept the `Verb`.\n\nThe syntax `Variable` matches part of the URL path as specified by its\ntemplate. A variable template must not contain other variables. If a variable\nmatches a single path segment, its template may be omitted, e.g. `{var}`\nis equivalent to `{var=*}`.\n\nThe syntax `LITERAL` matches literal text in the URL path. If the `LITERAL`\ncontains any reserved character, such characters should be percent-encoded\nbefore the matching.\n\nIf a variable contains exactly one path segment, such as `\"{var}\"` or\n`\"{var=*}\"`, when such a variable is expanded into a URL path on the client\nside, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The\nserver side does the reverse decoding. Such variables show up in the\n[Discovery\nDocument](https://developers.google.com/discovery/v1/reference/apis) as\n`{var}`.\n\nIf a variable contains multiple path segments, such as `\"{var=foo/*}\"`\nor `\"{var=**}\"`, when such a variable is expanded into a URL path on the\nclient side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded.\nThe server side does the reverse decoding, except \"%2F\" and \"%2f\" are left\nunchanged. Such variables show up in the\n[Discovery\nDocument](https://developers.google.com/discovery/v1/reference/apis) as\n`{+var}`.\n\n## Using gRPC API Service Configuration\n\ngRPC API Service Configuration (service config) is a configuration language\nfor configuring a gRPC service to become a user-facing product. The\nservice config is simply the YAML representation of the `google.api.Service`\nproto message.\n\nAs an alternative to annotating your proto file, you can configure gRPC\ntranscoding in your service config YAML files. You do this by specifying a\n`HttpRule` that maps the gRPC method to a REST endpoint, achieving the same\neffect as the proto annotation. This can be particularly useful if you\nhave a proto that is reused in multiple services. Note that any transcoding\nspecified in the service config will override any matching transcoding\nconfiguration in the proto.\n\nExample:\n\n http:\n rules:\n # Selects a gRPC method and applies HttpRule to it.\n - selector: example.v1.Messaging.GetMessage\n get: /v1/messages/{message_id}/{sub.subfield}\n\n## Special notes\n\nWhen gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the\nproto to JSON conversion must follow the [proto3\nspecification](https://developers.google.com/protocol-buffers/docs/proto3#json).\n\nWhile the single segment variable follows the semantics of\n[RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String\nExpansion, the multi segment variable **does not** follow RFC 6570 Section\n3.2.3 Reserved Expansion. The reason is that the Reserved Expansion\ndoes not expand special characters like `?` and `#`, which would lead\nto invalid URLs. As the result, gRPC Transcoding uses a custom encoding\nfor multi segment variables.\n\nThe path variables **must not** refer to any repeated or mapped field,\nbecause client libraries are not capable of handling such variable expansion.\n\nThe path variables **must not** capture the leading \"/\" character. The reason\nis that the most common use case \"{var}\" does not capture the leading \"/\"\ncharacter. For consistency, all path variables must share the same behavior.\n\nRepeated message fields must not be mapped to URL query parameters, because\nno client library can support such complicated mapping.\n\nIf an API needs to use a JSON array for request or response body, it can map\nthe request or response body to a repeated field. However, some gRPC\nTranscoding implementations may not support this feature.", + "description": "# gRPC Transcoding gRPC Transcoding is a feature for mapping between a gRPC method and one or more HTTP REST endpoints. It allows developers to build a single API service that supports both gRPC APIs and REST APIs. Many systems, including [Google APIs](https://github.com/googleapis/googleapis), [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC Gateway](https://github.com/grpc-ecosystem/grpc-gateway), and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature and use it for large scale production services. `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies how different portions of the gRPC request message are mapped to the URL path, URL query parameters, and HTTP request body. It also controls how the gRPC response message is mapped to the HTTP response body. `HttpRule` is typically specified as an `google.api.http` annotation on the gRPC method. Each mapping specifies a URL path template and an HTTP method. The path template may refer to one or more fields in the gRPC request message, as long as each field is a non-repeated field with a primitive (non-message) type. The path template controls how fields of the request message are mapped to the URL path. Example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get: \"/v1/{name=messages/*}\" }; } } message GetMessageRequest { string name = 1; // Mapped to URL path. } message Message { string text = 1; // The resource content. } This enables an HTTP REST to gRPC mapping as below: HTTP | gRPC -----|----- `GET /v1/messages/123456` | `GetMessage(name: \"messages/123456\")` Any fields in the request message which are not bound by the path template automatically become HTTP query parameters if there is no HTTP request body. For example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get:\"/v1/messages/{message_id}\" }; } } message GetMessageRequest { message SubMessage { string subfield = 1; } string message_id = 1; // Mapped to URL path. int64 revision = 2; // Mapped to URL query parameter `revision`. SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. } This enables a HTTP JSON to RPC mapping as below: HTTP | gRPC -----|----- `GET /v1/messages/123456?revision=2\u0026sub.subfield=foo` | `GetMessage(message_id: \"123456\" revision: 2 sub: SubMessage(subfield: \"foo\"))` Note that fields which are mapped to URL query parameters must have a primitive type or a repeated primitive type or a non-repeated message type. In the case of a repeated type, the parameter can be repeated in the URL as `...?param=A\u0026param=B`. In the case of a message type, each field of the message is mapped to a separate parameter, such as `...?foo.a=A\u0026foo.b=B\u0026foo.c=C`. For HTTP methods that allow a request body, the `body` field specifies the mapping. Consider a REST update method on the message resource collection: service Messaging { rpc UpdateMessage(UpdateMessageRequest) returns (Message) { option (google.api.http) = { patch: \"/v1/messages/{message_id}\" body: \"message\" }; } } message UpdateMessageRequest { string message_id = 1; // mapped to the URL Message message = 2; // mapped to the body } The following HTTP JSON to RPC mapping is enabled, where the representation of the JSON in the request body is determined by protos JSON encoding: HTTP | gRPC -----|----- `PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` | `UpdateMessage(message_id: \"123456\" message { text: \"Hi!\" })` The special name `*` can be used in the body mapping to define that every field not bound by the path template should be mapped to the request body. This enables the following alternative definition of the update method: service Messaging { rpc UpdateMessage(Message) returns (Message) { option (google.api.http) = { patch: \"/v1/messages/{message_id}\" body: \"*\" }; } } message Message { string message_id = 1; string text = 2; } The following HTTP JSON to RPC mapping is enabled: HTTP | gRPC -----|----- `PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` | `UpdateMessage(message_id: \"123456\" text: \"Hi!\")` Note that when using `*` in the body mapping, it is not possible to have HTTP parameters, as all fields not bound by the path end in the body. This makes this option more rarely used in practice when defining REST APIs. The common usage of `*` is in custom methods which don't use the URL at all for transferring data. It is possible to define multiple HTTP methods for one RPC by using the `additional_bindings` option. Example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get: \"/v1/messages/{message_id}\" additional_bindings { get: \"/v1/users/{user_id}/messages/{message_id}\" } }; } } message GetMessageRequest { string message_id = 1; string user_id = 2; } This enables the following two alternative HTTP JSON to RPC mappings: HTTP | gRPC -----|----- `GET /v1/messages/123456` | `GetMessage(message_id: \"123456\")` `GET /v1/users/me/messages/123456` | `GetMessage(user_id: \"me\" message_id: \"123456\")` ## Rules for HTTP mapping 1. Leaf request fields (recursive expansion nested messages in the request message) are classified into three categories: - Fields referred by the path template. They are passed via the URL path. - Fields referred by the HttpRule.body. They are passed via the HTTP request body. - All other fields are passed via the URL query parameters, and the parameter name is the field path in the request message. A repeated field can be represented as multiple query parameters under the same name. 2. If HttpRule.body is \"*\", there is no URL query parameter, all fields are passed via URL path and HTTP request body. 3. If HttpRule.body is omitted, there is no HTTP request body, all fields are passed via URL path and URL query parameters. ### Path template syntax Template = \"/\" Segments [ Verb ] ; Segments = Segment { \"/\" Segment } ; Segment = \"*\" | \"**\" | LITERAL | Variable ; Variable = \"{\" FieldPath [ \"=\" Segments ] \"}\" ; FieldPath = IDENT { \".\" IDENT } ; Verb = \":\" LITERAL ; The syntax `*` matches a single URL path segment. The syntax `**` matches zero or more URL path segments, which must be the last part of the URL path except the `Verb`. The syntax `Variable` matches part of the URL path as specified by its template. A variable template must not contain other variables. If a variable matches a single path segment, its template may be omitted, e.g. `{var}` is equivalent to `{var=*}`. The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` contains any reserved character, such characters should be percent-encoded before the matching. If a variable contains exactly one path segment, such as `\"{var}\"` or `\"{var=*}\"`, when such a variable is expanded into a URL path on the client side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The server side does the reverse decoding. Such variables show up in the [Discovery Document](https://developers.google.com/discovery/v1/reference/apis) as `{var}`. If a variable contains multiple path segments, such as `\"{var=foo/*}\"` or `\"{var=**}\"`, when such a variable is expanded into a URL path on the client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. The server side does the reverse decoding, except \"%2F\" and \"%2f\" are left unchanged. Such variables show up in the [Discovery Document](https://developers.google.com/discovery/v1/reference/apis) as `{+var}`. ## Using gRPC API Service Configuration gRPC API Service Configuration (service config) is a configuration language for configuring a gRPC service to become a user-facing product. The service config is simply the YAML representation of the `google.api.Service` proto message. As an alternative to annotating your proto file, you can configure gRPC transcoding in your service config YAML files. You do this by specifying a `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same effect as the proto annotation. This can be particularly useful if you have a proto that is reused in multiple services. Note that any transcoding specified in the service config will override any matching transcoding configuration in the proto. Example: http: rules: # Selects a gRPC method and applies HttpRule to it. - selector: example.v1.Messaging.GetMessage get: /v1/messages/{message_id}/{sub.subfield} ## Special notes When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the proto to JSON conversion must follow the [proto3 specification](https://developers.google.com/protocol-buffers/docs/proto3#json). While the single segment variable follows the semantics of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String Expansion, the multi segment variable **does not** follow RFC 6570 Section 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion does not expand special characters like `?` and `#`, which would lead to invalid URLs. As the result, gRPC Transcoding uses a custom encoding for multi segment variables. The path variables **must not** refer to any repeated or mapped field, because client libraries are not capable of handling such variable expansion. The path variables **must not** capture the leading \"/\" character. The reason is that the most common use case \"{var}\" does not capture the leading \"/\" character. For consistency, all path variables must share the same behavior. Repeated message fields must not be mapped to URL query parameters, because no client library can support such complicated mapping. If an API needs to use a JSON array for request or response body, it can map the request or response body to a repeated field. However, some gRPC Transcoding implementations may not support this feature.", "id": "HttpRule", "properties": { "additionalBindings": { - "description": "Additional HTTP bindings for the selector. Nested bindings must\nnot contain an `additional_bindings` field themselves (that is,\nthe nesting may only be one level deep).", + "description": "Additional HTTP bindings for the selector. Nested bindings must not contain an `additional_bindings` field themselves (that is, the nesting may only be one level deep).", "items": { "$ref": "HttpRule" }, "type": "array" }, "allowHalfDuplex": { - "description": "When this flag is set to true, HTTP requests will be allowed to invoke a\nhalf-duplex streaming method.", + "description": "When this flag is set to true, HTTP requests will be allowed to invoke a half-duplex streaming method.", "type": "boolean" }, "body": { - "description": "The name of the request field whose value is mapped to the HTTP request\nbody, or `*` for mapping all request fields not captured by the path\npattern to the HTTP body, or omitted for not having any HTTP request body.\n\nNOTE: the referred field must be present at the top-level of the request\nmessage type.", + "description": "The name of the request field whose value is mapped to the HTTP request body, or `*` for mapping all request fields not captured by the path pattern to the HTTP body, or omitted for not having any HTTP request body. NOTE: the referred field must be present at the top-level of the request message type.", "type": "string" }, "custom": { "$ref": "CustomHttpPattern", - "description": "The custom pattern is used for specifying an HTTP method that is not\nincluded in the `pattern` field, such as HEAD, or \"*\" to leave the\nHTTP method unspecified for this rule. The wild-card rule is useful\nfor services that provide content to Web (HTML) clients." + "description": "The custom pattern is used for specifying an HTTP method that is not included in the `pattern` field, such as HEAD, or \"*\" to leave the HTTP method unspecified for this rule. The wild-card rule is useful for services that provide content to Web (HTML) clients." }, "delete": { "description": "Maps to HTTP DELETE. Used for deleting a resource.", "type": "string" }, "get": { - "description": "Maps to HTTP GET. Used for listing and getting information about\nresources.", + "description": "Maps to HTTP GET. Used for listing and getting information about resources.", "type": "string" }, "patch": { @@ -1951,11 +1909,11 @@ "type": "string" }, "responseBody": { - "description": "Optional. The name of the response field whose value is mapped to the HTTP\nresponse body. When omitted, the entire response message will be used\nas the HTTP response body.\n\nNOTE: The referred field must be present at the top-level of the response\nmessage type.", + "description": "Optional. The name of the response field whose value is mapped to the HTTP response body. When omitted, the entire response message will be used as the HTTP response body. NOTE: The referred field must be present at the top-level of the response message type.", "type": "string" }, "selector": { - "description": "Selects a method to which this rule applies.\n\nRefer to selector for syntax details.", + "description": "Selects a method to which this rule applies. Refer to selector for syntax details.", "type": "string" } }, @@ -1974,7 +1932,7 @@ "type": "string" }, "valuePrefix": { - "description": "The value prefix. The value format is \"value_prefix{token}\"\nOnly applies to \"in\" header type. Must be empty for \"in\" query type.\nIf not empty, the header value has to match (case sensitive) this prefix.\nIf not matched, JWT will not be extracted. If matched, JWT will be\nextracted after the prefix is removed.\n\nFor example, for \"Authorization: Bearer {JWT}\",\nvalue_prefix=\"Bearer \" with a space at the end.", + "description": "The value prefix. The value format is \"value_prefix{token}\" Only applies to \"in\" header type. Must be empty for \"in\" query type. If not empty, the header value has to match (case sensitive) this prefix. If not matched, JWT will not be extracted. If matched, JWT will be extracted after the prefix is removed. For example, for \"Authorization: Bearer {JWT}\", value_prefix=\"Bearer \" with a space at the end.", "type": "string" } }, @@ -2082,44 +2040,44 @@ "type": "object" }, "LogDescriptor": { - "description": "A description of a log type. Example in YAML format:\n\n - name: library.googleapis.com/activity_history\n description: The history of borrowing and returning library items.\n display_name: Activity\n labels:\n - key: /customer_id\n description: Identifier of a library customer", + "description": "A description of a log type. Example in YAML format: - name: library.googleapis.com/activity_history description: The history of borrowing and returning library items. display_name: Activity labels: - key: /customer_id description: Identifier of a library customer", "id": "LogDescriptor", "properties": { "description": { - "description": "A human-readable description of this log. This information appears in\nthe documentation and can contain details.", + "description": "A human-readable description of this log. This information appears in the documentation and can contain details.", "type": "string" }, "displayName": { - "description": "The human-readable name for this log. This information appears on\nthe user interface and should be concise.", + "description": "The human-readable name for this log. This information appears on the user interface and should be concise.", "type": "string" }, "labels": { - "description": "The set of labels that are available to describe a specific log entry.\nRuntime requests that contain labels not specified here are\nconsidered invalid.", + "description": "The set of labels that are available to describe a specific log entry. Runtime requests that contain labels not specified here are considered invalid.", "items": { "$ref": "LabelDescriptor" }, "type": "array" }, "name": { - "description": "The name of the log. It must be less than 512 characters long and can\ninclude the following characters: upper- and lower-case alphanumeric\ncharacters [A-Za-z0-9], and punctuation characters including\nslash, underscore, hyphen, period [/_-.].", + "description": "The name of the log. It must be less than 512 characters long and can include the following characters: upper- and lower-case alphanumeric characters [A-Za-z0-9], and punctuation characters including slash, underscore, hyphen, period [/_-.].", "type": "string" } }, "type": "object" }, "Logging": { - "description": "Logging configuration of the service.\n\nThe following example shows how to configure logs to be sent to the\nproducer and consumer projects. In the example, the `activity_history`\nlog is sent to both the producer and consumer projects, whereas the\n`purchase_history` log is only sent to the producer project.\n\n monitored_resources:\n - type: library.googleapis.com/branch\n labels:\n - key: /city\n description: The city where the library branch is located in.\n - key: /name\n description: The name of the branch.\n logs:\n - name: activity_history\n labels:\n - key: /customer_id\n - name: purchase_history\n logging:\n producer_destinations:\n - monitored_resource: library.googleapis.com/branch\n logs:\n - activity_history\n - purchase_history\n consumer_destinations:\n - monitored_resource: library.googleapis.com/branch\n logs:\n - activity_history", + "description": "Logging configuration of the service. The following example shows how to configure logs to be sent to the producer and consumer projects. In the example, the `activity_history` log is sent to both the producer and consumer projects, whereas the `purchase_history` log is only sent to the producer project. monitored_resources: - type: library.googleapis.com/branch labels: - key: /city description: The city where the library branch is located in. - key: /name description: The name of the branch. logs: - name: activity_history labels: - key: /customer_id - name: purchase_history logging: producer_destinations: - monitored_resource: library.googleapis.com/branch logs: - activity_history - purchase_history consumer_destinations: - monitored_resource: library.googleapis.com/branch logs: - activity_history", "id": "Logging", "properties": { "consumerDestinations": { - "description": "Logging configurations for sending logs to the consumer project.\nThere can be multiple consumer destinations, each one must have a\ndifferent monitored resource type. A log can be used in at most\none consumer destination.", + "description": "Logging configurations for sending logs to the consumer project. There can be multiple consumer destinations, each one must have a different monitored resource type. A log can be used in at most one consumer destination.", "items": { "$ref": "LoggingDestination" }, "type": "array" }, "producerDestinations": { - "description": "Logging configurations for sending logs to the producer project.\nThere can be multiple producer destinations, each one must have a\ndifferent monitored resource type. A log can be used in at most\none producer destination.", + "description": "Logging configurations for sending logs to the producer project. There can be multiple producer destinations, each one must have a different monitored resource type. A log can be used in at most one producer destination.", "items": { "$ref": "LoggingDestination" }, @@ -2129,25 +2087,25 @@ "type": "object" }, "LoggingDestination": { - "description": "Configuration of a specific logging destination (the producer project\nor the consumer project).", + "description": "Configuration of a specific logging destination (the producer project or the consumer project).", "id": "LoggingDestination", "properties": { "logs": { - "description": "Names of the logs to be sent to this destination. Each name must\nbe defined in the Service.logs section. If the log name is\nnot a domain scoped name, it will be automatically prefixed with\nthe service name followed by \"/\".", + "description": "Names of the logs to be sent to this destination. Each name must be defined in the Service.logs section. If the log name is not a domain scoped name, it will be automatically prefixed with the service name followed by \"/\".", "items": { "type": "string" }, "type": "array" }, "monitoredResource": { - "description": "The monitored resource type. The type must be defined in the\nService.monitored_resources section.", + "description": "The monitored resource type. The type must be defined in the Service.monitored_resources section.", "type": "string" } }, "type": "object" }, "ManagedService": { - "description": "The full representation of a Service that is managed by\nGoogle Service Management.", + "description": "The full representation of a Service that is managed by Google Service Management.", "id": "ManagedService", "properties": { "producerProjectId": { @@ -2155,7 +2113,7 @@ "type": "string" }, "serviceName": { - "description": "The name of the service. See the [overview](/service-management/overview)\nfor naming requirements.", + "description": "The name of the service. See the [overview](/service-management/overview) for naming requirements.", "type": "string" } }, @@ -2208,7 +2166,7 @@ "type": "object" }, "MetricDescriptor": { - "description": "Defines a metric type and its schema. Once a metric descriptor is created,\ndeleting or altering it stops data collection and makes the metric type's\nexisting data unusable.", + "description": "Defines a metric type and its schema. Once a metric descriptor is created, deleting or altering it stops data collection and makes the metric type's existing data unusable. ", "id": "MetricDescriptor", "properties": { "description": { @@ -2216,11 +2174,11 @@ "type": "string" }, "displayName": { - "description": "A concise name for the metric, which can be displayed in user interfaces.\nUse sentence case without an ending period, for example \"Request count\".\nThis field is optional but it is recommended to be set for any metrics\nassociated with user-visible concepts, such as Quota.", + "description": "A concise name for the metric, which can be displayed in user interfaces. Use sentence case without an ending period, for example \"Request count\". This field is optional but it is recommended to be set for any metrics associated with user-visible concepts, such as Quota.", "type": "string" }, "labels": { - "description": "The set of labels that can be used to describe a specific\ninstance of this metric type. For example, the\n`appengine.googleapis.com/http/server/response_latencies` metric\ntype has a label for the HTTP response code, `response_code`, so\nyou can look at latencies for successful responses or just\nfor responses that failed.", + "description": "The set of labels that can be used to describe a specific instance of this metric type. For example, the `appengine.googleapis.com/http/server/response_latencies` metric type has a label for the HTTP response code, `response_code`, so you can look at latencies for successful responses or just for responses that failed.", "items": { "$ref": "LabelDescriptor" }, @@ -2242,11 +2200,11 @@ "Do not use this default value.", "The feature is not yet implemented. Users can not use it.", "Prelaunch features are hidden from users and are only visible internally.", - "Early Access features are limited to a closed group of testers. To use\nthese features, you must sign up in advance and sign a Trusted Tester\nagreement (which includes confidentiality provisions). These features may\nbe unstable, changed in backward-incompatible ways, and are not\nguaranteed to be released.", - "Alpha is a limited availability test for releases before they are cleared\nfor widespread use. By Alpha, all significant design issues are resolved\nand we are in the process of verifying functionality. Alpha customers\nneed to apply for access, agree to applicable terms, and have their\nprojects whitelisted. Alpha releases don’t have to be feature complete,\nno SLAs are provided, and there are no technical support obligations, but\nthey will be far enough along that customers can actually use them in\ntest environments or for limited-use tests -- just like they would in\nnormal production cases.", - "Beta is the point at which we are ready to open a release for any\ncustomer to use. There are no SLA or technical support obligations in a\nBeta release. Products will be complete from a feature perspective, but\nmay have some open outstanding issues. Beta releases are suitable for\nlimited production use cases.", - "GA features are open to all developers and are considered stable and\nfully qualified for production use.", - "Deprecated features are scheduled to be shut down and removed. For more\ninformation, see the “Deprecation Policy” section of our [Terms of\nService](https://cloud.google.com/terms/)\nand the [Google Cloud Platform Subject to the Deprecation\nPolicy](https://cloud.google.com/terms/deprecation) documentation." + "Early Access features are limited to a closed group of testers. To use these features, you must sign up in advance and sign a Trusted Tester agreement (which includes confidentiality provisions). These features may be unstable, changed in backward-incompatible ways, and are not guaranteed to be released.", + "Alpha is a limited availability test for releases before they are cleared for widespread use. By Alpha, all significant design issues are resolved and we are in the process of verifying functionality. Alpha customers need to apply for access, agree to applicable terms, and have their projects whitelisted. Alpha releases don’t have to be feature complete, no SLAs are provided, and there are no technical support obligations, but they will be far enough along that customers can actually use them in test environments or for limited-use tests -- just like they would in normal production cases.", + "Beta is the point at which we are ready to open a release for any customer to use. There are no SLA or technical support obligations in a Beta release. Products will be complete from a feature perspective, but may have some open outstanding issues. Beta releases are suitable for limited production use cases.", + "GA features are open to all developers and are considered stable and fully qualified for production use.", + "Deprecated features are scheduled to be shut down and removed. For more information, see the “Deprecation Policy” section of our [Terms of Service](https://cloud.google.com/terms/) and the [Google Cloud Platform Subject to the Deprecation Policy](https://cloud.google.com/terms/deprecation) documentation." ], "type": "string" }, @@ -2255,7 +2213,7 @@ "description": "Optional. Metadata which can be used to guide usage of the metric." }, "metricKind": { - "description": "Whether the metric records instantaneous values, changes to a value, etc.\nSome combinations of `metric_kind` and `value_type` might not be supported.", + "description": "Whether the metric records instantaneous values, changes to a value, etc. Some combinations of `metric_kind` and `value_type` might not be supported.", "enum": [ "METRIC_KIND_UNSPECIFIED", "GAUGE", @@ -2266,12 +2224,12 @@ "Do not use this default value.", "An instantaneous measurement of a value.", "The change in a value during a time interval.", - "A value accumulated over a time interval. Cumulative\nmeasurements in a time series should have the same start time\nand increasing end times, until an event resets the cumulative\nvalue to zero and sets a new start time for the following\npoints." + "A value accumulated over a time interval. Cumulative measurements in a time series should have the same start time and increasing end times, until an event resets the cumulative value to zero and sets a new start time for the following points." ], "type": "string" }, "monitoredResourceTypes": { - "description": "Read-only. If present, then a time\nseries, which is identified partially by\na metric type and a MonitoredResourceDescriptor, that is associated\nwith this metric type can only be associated with one of the monitored\nresource types listed here.", + "description": "Read-only. If present, then a time series, which is identified partially by a metric type and a MonitoredResourceDescriptor, that is associated with this metric type can only be associated with one of the monitored resource types listed here.", "items": { "type": "string" }, @@ -2282,15 +2240,15 @@ "type": "string" }, "type": { - "description": "The metric type, including its DNS name prefix. The type is not\nURL-encoded. All user-defined metric types have the DNS name\n`custom.googleapis.com` or `external.googleapis.com`. Metric types should\nuse a natural hierarchical grouping. For example:\n\n \"custom.googleapis.com/invoice/paid/amount\"\n \"external.googleapis.com/prometheus/up\"\n \"appengine.googleapis.com/http/server/response_latencies\"", + "description": "The metric type, including its DNS name prefix. The type is not URL-encoded. All user-defined metric types have the DNS name `custom.googleapis.com` or `external.googleapis.com`. Metric types should use a natural hierarchical grouping. For example: \"custom.googleapis.com/invoice/paid/amount\" \"external.googleapis.com/prometheus/up\" \"appengine.googleapis.com/http/server/response_latencies\"", "type": "string" }, "unit": { - "description": "The units in which the metric value is reported. It is only applicable\nif the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit`\ndefines the representation of the stored metric values.\n\nDifferent systems may scale the values to be more easily displayed (so a\nvalue of `0.02KBy` _might_ be displayed as `20By`, and a value of\n`3523KBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is\n`KBy`, then the value of the metric is always in thousands of bytes, no\nmatter how it may be displayed..\n\nIf you want a custom metric to record the exact number of CPU-seconds used\nby a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is\n`s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005\nCPU-seconds, then the value is written as `12005`.\n\nAlternatively, if you want a custom metric to record data in a more\ngranular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is\n`ks{CPU}`, and then write the value `12.005` (which is `12005/1000`),\nor use `Kis{CPU}` and write `11.723` (which is `12005/1024`).\n\nThe supported units are a subset of [The Unified Code for Units of\nMeasure](http://unitsofmeasure.org/ucum.html) standard:\n\n**Basic units (UNIT)**\n\n* `bit` bit\n* `By` byte\n* `s` second\n* `min` minute\n* `h` hour\n* `d` day\n\n**Prefixes (PREFIX)**\n\n* `k` kilo (10^3)\n* `M` mega (10^6)\n* `G` giga (10^9)\n* `T` tera (10^12)\n* `P` peta (10^15)\n* `E` exa (10^18)\n* `Z` zetta (10^21)\n* `Y` yotta (10^24)\n\n* `m` milli (10^-3)\n* `u` micro (10^-6)\n* `n` nano (10^-9)\n* `p` pico (10^-12)\n* `f` femto (10^-15)\n* `a` atto (10^-18)\n* `z` zepto (10^-21)\n* `y` yocto (10^-24)\n\n* `Ki` kibi (2^10)\n* `Mi` mebi (2^20)\n* `Gi` gibi (2^30)\n* `Ti` tebi (2^40)\n* `Pi` pebi (2^50)\n\n**Grammar**\n\nThe grammar also includes these connectors:\n\n* `/` division or ratio (as an infix operator). For examples,\n `kBy/{email}` or `MiBy/10ms` (although you should almost never\n have `/s` in a metric `unit`; rates should always be computed at\n query time from the underlying cumulative or delta value).\n* `.` multiplication or composition (as an infix operator). For\n examples, `GBy.d` or `k{watt}.h`.\n\nThe grammar for a unit is as follows:\n\n Expression = Component { \".\" Component } { \"/\" Component } ;\n\n Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ]\n | Annotation\n | \"1\"\n ;\n\n Annotation = \"{\" NAME \"}\" ;\n\nNotes:\n\n* `Annotation` is just a comment if it follows a `UNIT`. If the annotation\n is used alone, then the unit is equivalent to `1`. For examples,\n `{request}/s == 1/s`, `By{transmitted}/s == By/s`.\n* `NAME` is a sequence of non-blank printable ASCII characters not\n containing `{` or `}`.\n* `1` represents a unitary [dimensionless\n unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such\n as in `1/s`. It is typically used when none of the basic units are\n appropriate. For example, \"new users per day\" can be represented as\n `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5 new\n users). Alternatively, \"thousands of page views per day\" would be\n represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric\n value of `5.3` would mean \"5300 page views per day\").\n* `%` represents dimensionless value of 1/100, and annotates values giving\n a percentage (so the metric values are typically in the range of 0..100,\n and a metric value `3` means \"3 percent\").\n* `10^2.%` indicates a metric contains a ratio, typically in the range\n 0..1, that will be multiplied by 100 and displayed as a percentage\n (so a metric value `0.03` means \"3 percent\").", + "description": "The units in which the metric value is reported. It is only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` defines the representation of the stored metric values. Different systems may scale the values to be more easily displayed (so a value of `0.02KBy` _might_ be displayed as `20By`, and a value of `3523KBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is `KBy`, then the value of the metric is always in thousands of bytes, no matter how it may be displayed.. If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the value is written as `12005`. Alternatively, if you want a custom metric to record data in a more granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). The supported units are a subset of [The Unified Code for Units of Measure](http://unitsofmeasure.org/ucum.html) standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost never have `/s` in a metric `unit`; rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: Expression = Component { \".\" Component } { \"/\" Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ] | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used alone, then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable ASCII characters not containing `{` or `}`. * `1` represents a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in `1/s`. It is typically used when none of the basic units are appropriate. For example, \"new users per day\" can be represented as `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5 new users). Alternatively, \"thousands of page views per day\" would be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3` would mean \"5300 page views per day\"). * `%` represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value `3` means \"3 percent\"). * `10^2.%` indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value `0.03` means \"3 percent\").", "type": "string" }, "valueType": { - "description": "Whether the measurement is an integer, a floating-point number, etc.\nSome combinations of `metric_kind` and `value_type` might not be supported.", + "description": "Whether the measurement is an integer, a floating-point number, etc. Some combinations of `metric_kind` and `value_type` might not be supported.", "enum": [ "VALUE_TYPE_UNSPECIFIED", "BOOL", @@ -2302,10 +2260,10 @@ ], "enumDescriptions": [ "Do not use this default value.", - "The value is a boolean.\nThis value type can be used only if the metric kind is `GAUGE`.", + "The value is a boolean. This value type can be used only if the metric kind is `GAUGE`.", "The value is a signed 64-bit integer.", "The value is a double precision floating point number.", - "The value is a text string.\nThis value type can be used only if the metric kind is `GAUGE`.", + "The value is a text string. This value type can be used only if the metric kind is `GAUGE`.", "The value is a `Distribution`.", "The value is money." ], @@ -2319,7 +2277,7 @@ "id": "MetricDescriptorMetadata", "properties": { "ingestDelay": { - "description": "The delay of data points caused by ingestion. Data points older than this\nage are guaranteed to be ingested and available to be read, excluding\ndata loss due to errors.", + "description": "The delay of data points caused by ingestion. Data points older than this age are guaranteed to be ingested and available to be read, excluding data loss due to errors.", "format": "google-duration", "type": "string" }, @@ -2339,16 +2297,16 @@ "Do not use this default value.", "The feature is not yet implemented. Users can not use it.", "Prelaunch features are hidden from users and are only visible internally.", - "Early Access features are limited to a closed group of testers. To use\nthese features, you must sign up in advance and sign a Trusted Tester\nagreement (which includes confidentiality provisions). These features may\nbe unstable, changed in backward-incompatible ways, and are not\nguaranteed to be released.", - "Alpha is a limited availability test for releases before they are cleared\nfor widespread use. By Alpha, all significant design issues are resolved\nand we are in the process of verifying functionality. Alpha customers\nneed to apply for access, agree to applicable terms, and have their\nprojects whitelisted. Alpha releases don’t have to be feature complete,\nno SLAs are provided, and there are no technical support obligations, but\nthey will be far enough along that customers can actually use them in\ntest environments or for limited-use tests -- just like they would in\nnormal production cases.", - "Beta is the point at which we are ready to open a release for any\ncustomer to use. There are no SLA or technical support obligations in a\nBeta release. Products will be complete from a feature perspective, but\nmay have some open outstanding issues. Beta releases are suitable for\nlimited production use cases.", - "GA features are open to all developers and are considered stable and\nfully qualified for production use.", - "Deprecated features are scheduled to be shut down and removed. For more\ninformation, see the “Deprecation Policy” section of our [Terms of\nService](https://cloud.google.com/terms/)\nand the [Google Cloud Platform Subject to the Deprecation\nPolicy](https://cloud.google.com/terms/deprecation) documentation." + "Early Access features are limited to a closed group of testers. To use these features, you must sign up in advance and sign a Trusted Tester agreement (which includes confidentiality provisions). These features may be unstable, changed in backward-incompatible ways, and are not guaranteed to be released.", + "Alpha is a limited availability test for releases before they are cleared for widespread use. By Alpha, all significant design issues are resolved and we are in the process of verifying functionality. Alpha customers need to apply for access, agree to applicable terms, and have their projects whitelisted. Alpha releases don’t have to be feature complete, no SLAs are provided, and there are no technical support obligations, but they will be far enough along that customers can actually use them in test environments or for limited-use tests -- just like they would in normal production cases.", + "Beta is the point at which we are ready to open a release for any customer to use. There are no SLA or technical support obligations in a Beta release. Products will be complete from a feature perspective, but may have some open outstanding issues. Beta releases are suitable for limited production use cases.", + "GA features are open to all developers and are considered stable and fully qualified for production use.", + "Deprecated features are scheduled to be shut down and removed. For more information, see the “Deprecation Policy” section of our [Terms of Service](https://cloud.google.com/terms/) and the [Google Cloud Platform Subject to the Deprecation Policy](https://cloud.google.com/terms/deprecation) documentation." ], "type": "string" }, "samplePeriod": { - "description": "The sampling period of metric data points. For metrics which are written\nperiodically, consecutive data points are stored at this time interval,\nexcluding data loss due to errors. Metrics with a higher granularity have\na smaller sampling period.", + "description": "The sampling period of metric data points. For metrics which are written periodically, consecutive data points are stored at this time interval, excluding data loss due to errors. Metrics with a higher granularity have a smaller sampling period.", "format": "google-duration", "type": "string" } @@ -2356,7 +2314,7 @@ "type": "object" }, "MetricRule": { - "description": "Bind API methods to metrics. Binding a method to a metric causes that\nmetric's configured quota behaviors to apply to the method call.", + "description": "Bind API methods to metrics. Binding a method to a metric causes that metric's configured quota behaviors to apply to the method call.", "id": "MetricRule", "properties": { "metricCosts": { @@ -2364,18 +2322,18 @@ "format": "int64", "type": "string" }, - "description": "Metrics to update when the selected methods are called, and the associated\ncost applied to each metric.\n\nThe key of the map is the metric name, and the values are the amount\nincreased for the metric against which the quota limits are defined.\nThe value must not be negative.", + "description": "Metrics to update when the selected methods are called, and the associated cost applied to each metric. The key of the map is the metric name, and the values are the amount increased for the metric against which the quota limits are defined. The value must not be negative.", "type": "object" }, "selector": { - "description": "Selects the methods to which this rule applies.\n\nRefer to selector for syntax details.", + "description": "Selects the methods to which this rule applies. Refer to selector for syntax details.", "type": "string" } }, "type": "object" }, "Mixin": { - "description": "Declares an API Interface to be included in this interface. The including\ninterface must redeclare all the methods from the included interface, but\ndocumentation and options are inherited as follows:\n\n- If after comment and whitespace stripping, the documentation\n string of the redeclared method is empty, it will be inherited\n from the original method.\n\n- Each annotation belonging to the service config (http,\n visibility) which is not set in the redeclared method will be\n inherited.\n\n- If an http annotation is inherited, the path pattern will be\n modified as follows. Any version prefix will be replaced by the\n version of the including interface plus the root path if\n specified.\n\nExample of a simple mixin:\n\n package google.acl.v1;\n service AccessControl {\n // Get the underlying ACL object.\n rpc GetAcl(GetAclRequest) returns (Acl) {\n option (google.api.http).get = \"/v1/{resource=**}:getAcl\";\n }\n }\n\n package google.storage.v2;\n service Storage {\n // rpc GetAcl(GetAclRequest) returns (Acl);\n\n // Get a data record.\n rpc GetData(GetDataRequest) returns (Data) {\n option (google.api.http).get = \"/v2/{resource=**}\";\n }\n }\n\nExample of a mixin configuration:\n\n apis:\n - name: google.storage.v2.Storage\n mixins:\n - name: google.acl.v1.AccessControl\n\nThe mixin construct implies that all methods in `AccessControl` are\nalso declared with same name and request/response types in\n`Storage`. A documentation generator or annotation processor will\nsee the effective `Storage.GetAcl` method after inherting\ndocumentation and annotations as follows:\n\n service Storage {\n // Get the underlying ACL object.\n rpc GetAcl(GetAclRequest) returns (Acl) {\n option (google.api.http).get = \"/v2/{resource=**}:getAcl\";\n }\n ...\n }\n\nNote how the version in the path pattern changed from `v1` to `v2`.\n\nIf the `root` field in the mixin is specified, it should be a\nrelative path under which inherited HTTP paths are placed. Example:\n\n apis:\n - name: google.storage.v2.Storage\n mixins:\n - name: google.acl.v1.AccessControl\n root: acls\n\nThis implies the following inherited HTTP annotation:\n\n service Storage {\n // Get the underlying ACL object.\n rpc GetAcl(GetAclRequest) returns (Acl) {\n option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\";\n }\n ...\n }", + "description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inheriting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", "id": "Mixin", "properties": { "name": { @@ -2383,26 +2341,26 @@ "type": "string" }, "root": { - "description": "If non-empty specifies a path under which inherited HTTP paths\nare rooted.", + "description": "If non-empty specifies a path under which inherited HTTP paths are rooted.", "type": "string" } }, "type": "object" }, "MonitoredResourceDescriptor": { - "description": "An object that describes the schema of a MonitoredResource object using a\ntype name and a set of labels. For example, the monitored resource\ndescriptor for Google Compute Engine VM instances has a type of\n`\"gce_instance\"` and specifies the use of the labels `\"instance_id\"` and\n`\"zone\"` to identify particular VM instances.\n\nDifferent APIs can support different monitored resource types. APIs generally\nprovide a `list` method that returns the monitored resource descriptors used\nby the API.", + "description": "An object that describes the schema of a MonitoredResource object using a type name and a set of labels. For example, the monitored resource descriptor for Google Compute Engine VM instances has a type of `\"gce_instance\"` and specifies the use of the labels `\"instance_id\"` and `\"zone\"` to identify particular VM instances. Different APIs can support different monitored resource types. APIs generally provide a `list` method that returns the monitored resource descriptors used by the API. ", "id": "MonitoredResourceDescriptor", "properties": { "description": { - "description": "Optional. A detailed description of the monitored resource type that might\nbe used in documentation.", + "description": "Optional. A detailed description of the monitored resource type that might be used in documentation.", "type": "string" }, "displayName": { - "description": "Optional. A concise name for the monitored resource type that might be\ndisplayed in user interfaces. It should be a Title Cased Noun Phrase,\nwithout any article or other determiners. For example,\n`\"Google Cloud SQL Database\"`.", + "description": "Optional. A concise name for the monitored resource type that might be displayed in user interfaces. It should be a Title Cased Noun Phrase, without any article or other determiners. For example, `\"Google Cloud SQL Database\"`.", "type": "string" }, "labels": { - "description": "Required. A set of labels used to describe instances of this monitored\nresource type. For example, an individual Google Cloud SQL database is\nidentified by values for the labels `\"database_id\"` and `\"zone\"`.", + "description": "Required. A set of labels used to describe instances of this monitored resource type. For example, an individual Google Cloud SQL database is identified by values for the labels `\"database_id\"` and `\"zone\"`.", "items": { "$ref": "LabelDescriptor" }, @@ -2424,38 +2382,38 @@ "Do not use this default value.", "The feature is not yet implemented. Users can not use it.", "Prelaunch features are hidden from users and are only visible internally.", - "Early Access features are limited to a closed group of testers. To use\nthese features, you must sign up in advance and sign a Trusted Tester\nagreement (which includes confidentiality provisions). These features may\nbe unstable, changed in backward-incompatible ways, and are not\nguaranteed to be released.", - "Alpha is a limited availability test for releases before they are cleared\nfor widespread use. By Alpha, all significant design issues are resolved\nand we are in the process of verifying functionality. Alpha customers\nneed to apply for access, agree to applicable terms, and have their\nprojects whitelisted. Alpha releases don’t have to be feature complete,\nno SLAs are provided, and there are no technical support obligations, but\nthey will be far enough along that customers can actually use them in\ntest environments or for limited-use tests -- just like they would in\nnormal production cases.", - "Beta is the point at which we are ready to open a release for any\ncustomer to use. There are no SLA or technical support obligations in a\nBeta release. Products will be complete from a feature perspective, but\nmay have some open outstanding issues. Beta releases are suitable for\nlimited production use cases.", - "GA features are open to all developers and are considered stable and\nfully qualified for production use.", - "Deprecated features are scheduled to be shut down and removed. For more\ninformation, see the “Deprecation Policy” section of our [Terms of\nService](https://cloud.google.com/terms/)\nand the [Google Cloud Platform Subject to the Deprecation\nPolicy](https://cloud.google.com/terms/deprecation) documentation." + "Early Access features are limited to a closed group of testers. To use these features, you must sign up in advance and sign a Trusted Tester agreement (which includes confidentiality provisions). These features may be unstable, changed in backward-incompatible ways, and are not guaranteed to be released.", + "Alpha is a limited availability test for releases before they are cleared for widespread use. By Alpha, all significant design issues are resolved and we are in the process of verifying functionality. Alpha customers need to apply for access, agree to applicable terms, and have their projects whitelisted. Alpha releases don’t have to be feature complete, no SLAs are provided, and there are no technical support obligations, but they will be far enough along that customers can actually use them in test environments or for limited-use tests -- just like they would in normal production cases.", + "Beta is the point at which we are ready to open a release for any customer to use. There are no SLA or technical support obligations in a Beta release. Products will be complete from a feature perspective, but may have some open outstanding issues. Beta releases are suitable for limited production use cases.", + "GA features are open to all developers and are considered stable and fully qualified for production use.", + "Deprecated features are scheduled to be shut down and removed. For more information, see the “Deprecation Policy” section of our [Terms of Service](https://cloud.google.com/terms/) and the [Google Cloud Platform Subject to the Deprecation Policy](https://cloud.google.com/terms/deprecation) documentation." ], "type": "string" }, "name": { - "description": "Optional. The resource name of the monitored resource descriptor:\n`\"projects/{project_id}/monitoredResourceDescriptors/{type}\"` where\n{type} is the value of the `type` field in this object and\n{project_id} is a project ID that provides API-specific context for\naccessing the type. APIs that do not use project information can use the\nresource name format `\"monitoredResourceDescriptors/{type}\"`.", + "description": "Optional. The resource name of the monitored resource descriptor: `\"projects/{project_id}/monitoredResourceDescriptors/{type}\"` where {type} is the value of the `type` field in this object and {project_id} is a project ID that provides API-specific context for accessing the type. APIs that do not use project information can use the resource name format `\"monitoredResourceDescriptors/{type}\"`.", "type": "string" }, "type": { - "description": "Required. The monitored resource type. For example, the type\n`\"cloudsql_database\"` represents databases in Google Cloud SQL.\nThe maximum length of this value is 256 characters.", + "description": "Required. The monitored resource type. For example, the type `\"cloudsql_database\"` represents databases in Google Cloud SQL.", "type": "string" } }, "type": "object" }, "Monitoring": { - "description": "Monitoring configuration of the service.\n\nThe example below shows how to configure monitored resources and metrics\nfor monitoring. In the example, a monitored resource and two metrics are\ndefined. The `library.googleapis.com/book/returned_count` metric is sent\nto both producer and consumer projects, whereas the\n`library.googleapis.com/book/overdue_count` metric is only sent to the\nconsumer project.\n\n monitored_resources:\n - type: library.googleapis.com/branch\n labels:\n - key: /city\n description: The city where the library branch is located in.\n - key: /name\n description: The name of the branch.\n metrics:\n - name: library.googleapis.com/book/returned_count\n metric_kind: DELTA\n value_type: INT64\n labels:\n - key: /customer_id\n - name: library.googleapis.com/book/overdue_count\n metric_kind: GAUGE\n value_type: INT64\n labels:\n - key: /customer_id\n monitoring:\n producer_destinations:\n - monitored_resource: library.googleapis.com/branch\n metrics:\n - library.googleapis.com/book/returned_count\n consumer_destinations:\n - monitored_resource: library.googleapis.com/branch\n metrics:\n - library.googleapis.com/book/returned_count\n - library.googleapis.com/book/overdue_count", + "description": "Monitoring configuration of the service. The example below shows how to configure monitored resources and metrics for monitoring. In the example, a monitored resource and two metrics are defined. The `library.googleapis.com/book/returned_count` metric is sent to both producer and consumer projects, whereas the `library.googleapis.com/book/num_overdue` metric is only sent to the consumer project. monitored_resources: - type: library.googleapis.com/Branch display_name: \"Library Branch\" description: \"A branch of a library.\" launch_stage: GA labels: - key: resource_container description: \"The Cloud container (ie. project id) for the Branch.\" - key: location description: \"The location of the library branch.\" - key: branch_id description: \"The id of the branch.\" metrics: - name: library.googleapis.com/book/returned_count display_name: \"Books Returned\" description: \"The count of books that have been returned.\" launch_stage: GA metric_kind: DELTA value_type: INT64 unit: \"1\" labels: - key: customer_id description: \"The id of the customer.\" - name: library.googleapis.com/book/num_overdue display_name: \"Books Overdue\" description: \"The current number of overdue books.\" launch_stage: GA metric_kind: GAUGE value_type: INT64 unit: \"1\" labels: - key: customer_id description: \"The id of the customer.\" monitoring: producer_destinations: - monitored_resource: library.googleapis.com/Branch metrics: - library.googleapis.com/book/returned_count consumer_destinations: - monitored_resource: library.googleapis.com/Branch metrics: - library.googleapis.com/book/returned_count - library.googleapis.com/book/num_overdue", "id": "Monitoring", "properties": { "consumerDestinations": { - "description": "Monitoring configurations for sending metrics to the consumer project.\nThere can be multiple consumer destinations. A monitored resouce type may\nappear in multiple monitoring destinations if different aggregations are\nneeded for different sets of metrics associated with that monitored\nresource type. A monitored resource and metric pair may only be used once\nin the Monitoring configuration.", + "description": "Monitoring configurations for sending metrics to the consumer project. There can be multiple consumer destinations. A monitored resource type may appear in multiple monitoring destinations if different aggregations are needed for different sets of metrics associated with that monitored resource type. A monitored resource and metric pair may only be used once in the Monitoring configuration.", "items": { "$ref": "MonitoringDestination" }, "type": "array" }, "producerDestinations": { - "description": "Monitoring configurations for sending metrics to the producer project.\nThere can be multiple producer destinations. A monitored resouce type may\nappear in multiple monitoring destinations if different aggregations are\nneeded for different sets of metrics associated with that monitored\nresource type. A monitored resource and metric pair may only be used once\nin the Monitoring configuration.", + "description": "Monitoring configurations for sending metrics to the producer project. There can be multiple producer destinations. A monitored resource type may appear in multiple monitoring destinations if different aggregations are needed for different sets of metrics associated with that monitored resource type. A monitored resource and metric pair may only be used once in the Monitoring configuration.", "items": { "$ref": "MonitoringDestination" }, @@ -2465,40 +2423,40 @@ "type": "object" }, "MonitoringDestination": { - "description": "Configuration of a specific monitoring destination (the producer project\nor the consumer project).", + "description": "Configuration of a specific monitoring destination (the producer project or the consumer project).", "id": "MonitoringDestination", "properties": { "metrics": { - "description": "Types of the metrics to report to this monitoring destination.\nEach type must be defined in Service.metrics section.", + "description": "Types of the metrics to report to this monitoring destination. Each type must be defined in Service.metrics section.", "items": { "type": "string" }, "type": "array" }, "monitoredResource": { - "description": "The monitored resource type. The type must be defined in\nService.monitored_resources section.", + "description": "The monitored resource type. The type must be defined in Service.monitored_resources section.", "type": "string" } }, "type": "object" }, "OAuthRequirements": { - "description": "OAuth scopes are a way to define data and permissions on data. For example,\nthere are scopes defined for \"Read-only access to Google Calendar\" and\n\"Access to Cloud Platform\". Users can consent to a scope for an application,\ngiving it permission to access that data on their behalf.\n\nOAuth scope specifications should be fairly coarse grained; a user will need\nto see and understand the text description of what your scope means.\n\nIn most cases: use one or at most two OAuth scopes for an entire family of\nproducts. If your product has multiple APIs, you should probably be sharing\nthe OAuth scope across all of those APIs.\n\nWhen you need finer grained OAuth consent screens: talk with your product\nmanagement about how developers will use them in practice.\n\nPlease note that even though each of the canonical scopes is enough for a\nrequest to be accepted and passed to the backend, a request can still fail\ndue to the backend requiring additional scopes or permissions.", + "description": "OAuth scopes are a way to define data and permissions on data. For example, there are scopes defined for \"Read-only access to Google Calendar\" and \"Access to Cloud Platform\". Users can consent to a scope for an application, giving it permission to access that data on their behalf. OAuth scope specifications should be fairly coarse grained; a user will need to see and understand the text description of what your scope means. In most cases: use one or at most two OAuth scopes for an entire family of products. If your product has multiple APIs, you should probably be sharing the OAuth scope across all of those APIs. When you need finer grained OAuth consent screens: talk with your product management about how developers will use them in practice. Please note that even though each of the canonical scopes is enough for a request to be accepted and passed to the backend, a request can still fail due to the backend requiring additional scopes or permissions.", "id": "OAuthRequirements", "properties": { "canonicalScopes": { - "description": "The list of publicly documented OAuth scopes that are allowed access. An\nOAuth token containing any of these scopes will be accepted.\n\nExample:\n\n canonical_scopes: https://www.googleapis.com/auth/calendar,\n https://www.googleapis.com/auth/calendar.read", + "description": "The list of publicly documented OAuth scopes that are allowed access. An OAuth token containing any of these scopes will be accepted. Example: canonical_scopes: https://www.googleapis.com/auth/calendar, https://www.googleapis.com/auth/calendar.read", "type": "string" } }, "type": "object" }, "Operation": { - "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "description": "This resource represents a long-running operation that is the result of a network API call.", "id": "Operation", "properties": { "done": { - "description": "If the value is `false`, it means the operation is still in progress.\nIf `true`, the operation is completed, and either `error` or `response` is\navailable.", + "description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.", "type": "boolean" }, "error": { @@ -2510,11 +2468,11 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", "type": "object" }, "name": { - "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should be a resource name ending with `operations/{unique_id}`.", + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.", "type": "string" }, "response": { @@ -2522,7 +2480,7 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", + "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", "type": "object" } }, @@ -2538,7 +2496,7 @@ "type": "integer" }, "resourceNames": { - "description": "The full name of the resources that this operation is directly\nassociated with.", + "description": "The full name of the resources that this operation is directly associated with.", "items": { "type": "string" }, @@ -2560,11 +2518,11 @@ "type": "object" }, "Option": { - "description": "A protocol buffer option, which can be attached to a message, field,\nenumeration, etc.", + "description": "A protocol buffer option, which can be attached to a message, field, enumeration, etc.", "id": "Option", "properties": { "name": { - "description": "The option's name. For protobuf built-in options (options defined in\ndescriptor.proto), this is the short name. For example, `\"map_entry\"`.\nFor custom options, it should be the fully-qualified name. For example,\n`\"google.api.http\"`.", + "description": "The option's name. For protobuf built-in options (options defined in descriptor.proto), this is the short name. For example, `\"map_entry\"`. For custom options, it should be the fully-qualified name. For example, `\"google.api.http\"`.", "type": "string" }, "value": { @@ -2572,26 +2530,26 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "The option's value packed in an Any message. If the value is a primitive,\nthe corresponding wrapper type defined in google/protobuf/wrappers.proto\nshould be used. If the value is an enum, it should be stored as an int32\nvalue using the google.protobuf.Int32Value type.", + "description": "The option's value packed in an Any message. If the value is a primitive, the corresponding wrapper type defined in google/protobuf/wrappers.proto should be used. If the value is an enum, it should be stored as an int32 value using the google.protobuf.Int32Value type.", "type": "object" } }, "type": "object" }, "Page": { - "description": "Represents a documentation page. A page can contain subpages to represent\nnested documentation set structure.", + "description": "Represents a documentation page. A page can contain subpages to represent nested documentation set structure.", "id": "Page", "properties": { "content": { - "description": "The Markdown content of the page. You can use \u003ccode\u003e\u0026#40;== include {path}\n==\u0026#41;\u003c/code\u003e to include content from a Markdown file.", + "description": "The Markdown content of the page. You can use (== include {path} ==) to include content from a Markdown file.", "type": "string" }, "name": { - "description": "The name of the page. It will be used as an identity of the page to\ngenerate URI of the page, text of the link to this page in navigation,\netc. The full page name (start from the root page name to this page\nconcatenated with `.`) can be used as reference to the page in your\ndocumentation. For example:\n\u003cpre\u003e\u003ccode\u003epages:\n- name: Tutorial\n content: \u0026#40;== include tutorial.md ==\u0026#41;\n subpages:\n - name: Java\n content: \u0026#40;== include tutorial_java.md ==\u0026#41;\n\u003c/code\u003e\u003c/pre\u003e\nYou can reference `Java` page using Markdown reference link syntax:\n`Java`.", + "description": "The name of the page. It will be used as an identity of the page to generate URI of the page, text of the link to this page in navigation, etc. The full page name (start from the root page name to this page concatenated with `.`) can be used as reference to the page in your documentation. For example: pages: - name: Tutorial content: (== include tutorial.md ==) subpages: - name: Java content: (== include tutorial_java.md ==) You can reference `Java` page using Markdown reference link syntax: `Java`.", "type": "string" }, "subpages": { - "description": "Subpages of this page. The order of subpages specified here will be\nhonored in the generated docset.", + "description": "Subpages of this page. The order of subpages specified here will be honored in the generated docset.", "items": { "$ref": "Page" }, @@ -2601,7 +2559,7 @@ "type": "object" }, "Policy": { - "description": "An Identity and Access Management (IAM) policy, which specifies access\ncontrols for Google Cloud resources.\n\n\nA `Policy` is a collection of `bindings`. A `binding` binds one or more\n`members` to a single `role`. Members can be user accounts, service accounts,\nGoogle groups, and domains (such as G Suite). A `role` is a named list of\npermissions; each `role` can be an IAM predefined role or a user-created\ncustom role.\n\nFor some types of Google Cloud resources, a `binding` can also specify a\n`condition`, which is a logical expression that allows access to a resource\nonly if the expression evaluates to `true`. A condition can add constraints\nbased on attributes of the request, the resource, or both. To learn which\nresources support conditions in their IAM policies, see the\n[IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).\n\n**JSON example:**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/resourcemanager.organizationAdmin\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-project-id@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles/resourcemanager.organizationViewer\",\n \"members\": [\n \"user:eve@example.com\"\n ],\n \"condition\": {\n \"title\": \"expirable access\",\n \"description\": \"Does not grant access after Sep 2020\",\n \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\",\n }\n }\n ],\n \"etag\": \"BwWWja0YfJA=\",\n \"version\": 3\n }\n\n**YAML example:**\n\n bindings:\n - members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-project-id@appspot.gserviceaccount.com\n role: roles/resourcemanager.organizationAdmin\n - members:\n - user:eve@example.com\n role: roles/resourcemanager.organizationViewer\n condition:\n title: expirable access\n description: Does not grant access after Sep 2020\n expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\n - etag: BwWWja0YfJA=\n - version: 3\n\nFor a description of IAM and its features, see the\n[IAM documentation](https://cloud.google.com/iam/docs/).", + "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } **YAML example:** bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", "id": "Policy", "properties": { "auditConfigs": { @@ -2612,19 +2570,19 @@ "type": "array" }, "bindings": { - "description": "Associates a list of `members` to a `role`. Optionally, may specify a\n`condition` that determines how and when the `bindings` are applied. Each\nof the `bindings` must contain at least one member.", + "description": "Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member.", "items": { "$ref": "Binding" }, "type": "array" }, "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.", + "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.", "format": "byte", "type": "string" }, "version": { - "description": "Specifies the format of the policy.\n\nValid values are `0`, `1`, and `3`. Requests that specify an invalid value\nare rejected.\n\nAny operation that affects conditional role bindings must specify version\n`3`. This requirement applies to the following operations:\n\n* Getting a policy that includes a conditional role binding\n* Adding a conditional role binding to a policy\n* Changing a conditional role binding in a policy\n* Removing any role binding, with or without a condition, from a policy\n that includes conditions\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.\n\nIf a policy does not include any conditions, operations on that policy may\nspecify any valid version or leave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "description": "Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } @@ -2632,7 +2590,7 @@ "type": "object" }, "Quota": { - "description": "Quota configuration helps to achieve fairness and budgeting in service\nusage.\n\nThe metric based quota configuration works this way:\n- The service configuration defines a set of metrics.\n- For API calls, the quota.metric_rules maps methods to metrics with\n corresponding costs.\n- The quota.limits defines limits on the metrics, which will be used for\n quota checks at runtime.\n\nAn example quota configuration in yaml format:\n\n quota:\n limits:\n\n - name: apiWriteQpsPerProject\n metric: library.googleapis.com/write_calls\n unit: \"1/min/{project}\" # rate limit for consumer projects\n values:\n STANDARD: 10000\n\n\n # The metric rules bind all methods to the read_calls metric,\n # except for the UpdateBook and DeleteBook methods. These two methods\n # are mapped to the write_calls metric, with the UpdateBook method\n # consuming at twice rate as the DeleteBook method.\n metric_rules:\n - selector: \"*\"\n metric_costs:\n library.googleapis.com/read_calls: 1\n - selector: google.example.library.v1.LibraryService.UpdateBook\n metric_costs:\n library.googleapis.com/write_calls: 2\n - selector: google.example.library.v1.LibraryService.DeleteBook\n metric_costs:\n library.googleapis.com/write_calls: 1\n\n Corresponding Metric definition:\n\n metrics:\n - name: library.googleapis.com/read_calls\n display_name: Read requests\n metric_kind: DELTA\n value_type: INT64\n\n - name: library.googleapis.com/write_calls\n display_name: Write requests\n metric_kind: DELTA\n value_type: INT64\n\n", + "description": "Quota configuration helps to achieve fairness and budgeting in service usage. The metric based quota configuration works this way: - The service configuration defines a set of metrics. - For API calls, the quota.metric_rules maps methods to metrics with corresponding costs. - The quota.limits defines limits on the metrics, which will be used for quota checks at runtime. An example quota configuration in yaml format: quota: limits: - name: apiWriteQpsPerProject metric: library.googleapis.com/write_calls unit: \"1/min/{project}\" # rate limit for consumer projects values: STANDARD: 10000 # The metric rules bind all methods to the read_calls metric, # except for the UpdateBook and DeleteBook methods. These two methods # are mapped to the write_calls metric, with the UpdateBook method # consuming at twice rate as the DeleteBook method. metric_rules: - selector: \"*\" metric_costs: library.googleapis.com/read_calls: 1 - selector: google.example.library.v1.LibraryService.UpdateBook metric_costs: library.googleapis.com/write_calls: 2 - selector: google.example.library.v1.LibraryService.DeleteBook metric_costs: library.googleapis.com/write_calls: 1 Corresponding Metric definition: metrics: - name: library.googleapis.com/read_calls display_name: Read requests metric_kind: DELTA value_type: INT64 - name: library.googleapis.com/write_calls display_name: Write requests metric_kind: DELTA value_type: INT64 ", "id": "Quota", "properties": { "limits": { @@ -2643,7 +2601,7 @@ "type": "array" }, "metricRules": { - "description": "List of `MetricRule` definitions, each one mapping a selected method to one\nor more metrics.", + "description": "List of `MetricRule` definitions, each one mapping a selected method to one or more metrics.", "items": { "$ref": "MetricRule" }, @@ -2653,46 +2611,46 @@ "type": "object" }, "QuotaLimit": { - "description": "`QuotaLimit` defines a specific limit that applies over a specified duration\nfor a limit type. There can be at most one limit for a duration and limit\ntype combination defined within a `QuotaGroup`.", + "description": "`QuotaLimit` defines a specific limit that applies over a specified duration for a limit type. There can be at most one limit for a duration and limit type combination defined within a `QuotaGroup`.", "id": "QuotaLimit", "properties": { "defaultLimit": { - "description": "Default number of tokens that can be consumed during the specified\nduration. This is the number of tokens assigned when a client\napplication developer activates the service for his/her project.\n\nSpecifying a value of 0 will block all requests. This can be used if you\nare provisioning quota to selected consumers and blocking others.\nSimilarly, a value of -1 will indicate an unlimited quota. No other\nnegative values are allowed.\n\nUsed by group-based quotas only.", + "description": "Default number of tokens that can be consumed during the specified duration. This is the number of tokens assigned when a client application developer activates the service for his/her project. Specifying a value of 0 will block all requests. This can be used if you are provisioning quota to selected consumers and blocking others. Similarly, a value of -1 will indicate an unlimited quota. No other negative values are allowed. Used by group-based quotas only.", "format": "int64", "type": "string" }, "description": { - "description": "Optional. User-visible, extended description for this quota limit.\nShould be used only when more context is needed to understand this limit\nthan provided by the limit's display name (see: `display_name`).", + "description": "Optional. User-visible, extended description for this quota limit. Should be used only when more context is needed to understand this limit than provided by the limit's display name (see: `display_name`).", "type": "string" }, "displayName": { - "description": "User-visible display name for this limit.\nOptional. If not set, the UI will provide a default display name based on\nthe quota configuration. This field can be used to override the default\ndisplay name generated from the configuration.", + "description": "User-visible display name for this limit. Optional. If not set, the UI will provide a default display name based on the quota configuration. This field can be used to override the default display name generated from the configuration.", "type": "string" }, "duration": { - "description": "Duration of this limit in textual notation. Must be \"100s\" or \"1d\".\n\nUsed by group-based quotas only.", + "description": "Duration of this limit in textual notation. Must be \"100s\" or \"1d\". Used by group-based quotas only.", "type": "string" }, "freeTier": { - "description": "Free tier value displayed in the Developers Console for this limit.\nThe free tier is the number of tokens that will be subtracted from the\nbilled amount when billing is enabled.\nThis field can only be set on a limit with duration \"1d\", in a billable\ngroup; it is invalid on any other limit. If this field is not set, it\ndefaults to 0, indicating that there is no free tier for this service.\n\nUsed by group-based quotas only.", + "description": "Free tier value displayed in the Developers Console for this limit. The free tier is the number of tokens that will be subtracted from the billed amount when billing is enabled. This field can only be set on a limit with duration \"1d\", in a billable group; it is invalid on any other limit. If this field is not set, it defaults to 0, indicating that there is no free tier for this service. Used by group-based quotas only.", "format": "int64", "type": "string" }, "maxLimit": { - "description": "Maximum number of tokens that can be consumed during the specified\nduration. Client application developers can override the default limit up\nto this maximum. If specified, this value cannot be set to a value less\nthan the default limit. If not specified, it is set to the default limit.\n\nTo allow clients to apply overrides with no upper bound, set this to -1,\nindicating unlimited maximum quota.\n\nUsed by group-based quotas only.", + "description": "Maximum number of tokens that can be consumed during the specified duration. Client application developers can override the default limit up to this maximum. If specified, this value cannot be set to a value less than the default limit. If not specified, it is set to the default limit. To allow clients to apply overrides with no upper bound, set this to -1, indicating unlimited maximum quota. Used by group-based quotas only.", "format": "int64", "type": "string" }, "metric": { - "description": "The name of the metric this quota limit applies to. The quota limits with\nthe same metric will be checked together during runtime. The metric must be\ndefined within the service config.", + "description": "The name of the metric this quota limit applies to. The quota limits with the same metric will be checked together during runtime. The metric must be defined within the service config.", "type": "string" }, "name": { - "description": "Name of the quota limit.\n\nThe name must be provided, and it must be unique within the service. The\nname can only include alphanumeric characters as well as '-'.\n\nThe maximum length of the limit name is 64 characters.", + "description": "Name of the quota limit. The name must be provided, and it must be unique within the service. The name can only include alphanumeric characters as well as '-'. The maximum length of the limit name is 64 characters.", "type": "string" }, "unit": { - "description": "Specify the unit of the quota limit. It uses the same syntax as\nMetric.unit. The supported unit kinds are determined by the quota\nbackend system.\n\nHere are some examples:\n* \"1/min/{project}\" for quota per minute per project.\n\nNote: the order of unit components is insignificant.\nThe \"1\" at the beginning is required to follow the metric unit syntax.", + "description": "Specify the unit of the quota limit. It uses the same syntax as Metric.unit. The supported unit kinds are determined by the quota backend system. Here are some examples: * \"1/min/{project}\" for quota per minute per project. Note: the order of unit components is insignificant. The \"1\" at the beginning is required to follow the metric unit syntax.", "type": "string" }, "values": { @@ -2700,14 +2658,29 @@ "format": "int64", "type": "string" }, - "description": "Tiered limit values. You must specify this as a key:value pair, with an\ninteger value that is the maximum number of requests allowed for the\nspecified unit. Currently only STANDARD is supported.", + "description": "Tiered limit values. You must specify this as a key:value pair, with an integer value that is the maximum number of requests allowed for the specified unit. Currently only STANDARD is supported.", "type": "object" } }, "type": "object" }, + "ResourceReference": { + "description": "Defines a proto annotation that describes a string field that refers to an API resource.", + "id": "ResourceReference", + "properties": { + "childType": { + "description": "The resource type of a child collection that the annotated field references. This is useful for annotating the `parent` field that doesn't have a fixed resource type. Example: message ListLogEntriesRequest { string parent = 1 [(google.api.resource_reference) = { child_type: \"logging.googleapis.com/LogEntry\" }; }", + "type": "string" + }, + "type": { + "description": "The resource type that the annotated field references. Example: message Subscription { string topic = 2 [(google.api.resource_reference) = { type: \"pubsub.googleapis.com/Topic\" }]; } Occasionally, a field may reference an arbitrary resource. In this case, APIs use the special value * in their resource reference. Example: message GetIamPolicyRequest { string resource = 2 [(google.api.resource_reference) = { type: \"*\" }]; }", + "type": "string" + } + }, + "type": "object" + }, "Rollout": { - "description": "A rollout resource that defines how service configuration versions are pushed\nto control plane systems. Typically, you create a new version of the\nservice config, and then create a Rollout to push the service config.", + "description": "A rollout resource that defines how service configuration versions are pushed to control plane systems. Typically, you create a new version of the service config, and then create a Rollout to push the service config.", "id": "Rollout", "properties": { "createTime": { @@ -2716,15 +2689,15 @@ "type": "string" }, "createdBy": { - "description": "This field is deprecated and will be deleted. Please remove usage of\nthis field.", + "description": "This field is deprecated and will be deleted. Please remove usage of this field.", "type": "string" }, "deleteServiceStrategy": { "$ref": "DeleteServiceStrategy", - "description": "The strategy associated with a rollout to delete a `ManagedService`.\nReadonly." + "description": "The strategy associated with a rollout to delete a `ManagedService`. Readonly." }, "rolloutId": { - "description": "Optional. Unique identifier of this Rollout. Must be no longer than 63 characters\nand only lower case letters, digits, '.', '_' and '-' are allowed.\n\nIf not specified by client, the server will generate one. The generated id\nwill have the form of \u003cdate\u003e\u003crevision number\u003e, where \"date\" is the create\ndate in ISO 8601 format. \"revision number\" is a monotonically increasing\npositive number that is reset every day for each service.\nAn example of the generated rollout_id is '2016-02-16r1'", + "description": "Optional. Unique identifier of this Rollout. Must be no longer than 63 characters and only lower case letters, digits, '.', '_' and '-' are allowed. If not specified by client, the server will generate one. The generated id will have the form of , where \"date\" is the create date in ISO 8601 format. \"revision number\" is a monotonically increasing positive number that is reset every day for each service. An example of the generated rollout_id is '2016-02-16r1'", "type": "string" }, "serviceName": { @@ -2732,7 +2705,7 @@ "type": "string" }, "status": { - "description": "The status of this rollout. Readonly. In case of a failed rollout,\nthe system will automatically rollback to the current Rollout\nversion. Readonly.", + "description": "The status of this rollout. Readonly. In case of a failed rollout, the system will automatically rollback to the current Rollout version. Readonly.", "enum": [ "ROLLOUT_STATUS_UNSPECIFIED", "IN_PROGRESS", @@ -2746,26 +2719,26 @@ "No status specified.", "The Rollout is in progress.", "The Rollout has completed successfully.", - "The Rollout has been cancelled. This can happen if you have overlapping\nRollout pushes, and the previous ones will be cancelled.", + "The Rollout has been cancelled. This can happen if you have overlapping Rollout pushes, and the previous ones will be cancelled.", "The Rollout has failed and the rollback attempt has failed too.", "The Rollout has not started yet and is pending for execution.", - "The Rollout has failed and rolled back to the previous successful\nRollout." + "The Rollout has failed and rolled back to the previous successful Rollout." ], "type": "string" }, "trafficPercentStrategy": { "$ref": "TrafficPercentStrategy", - "description": "Google Service Control selects service configurations based on\ntraffic percentage." + "description": "Google Service Control selects service configurations based on traffic percentage." } }, "type": "object" }, "Service": { - "description": "`Service` is the root object of Google service configuration schema. It\ndescribes basic information about a service, such as the name and the\ntitle, and delegates other aspects to sub-sections. Each sub-section is\neither a proto message or a repeated proto message that configures a\nspecific aspect, such as auth. See each proto message definition for details.\n\nExample:\n\n type: google.api.Service\n config_version: 3\n name: calendar.googleapis.com\n title: Google Calendar API\n apis:\n - name: google.calendar.v3.Calendar\n authentication:\n providers:\n - id: google_calendar_auth\n jwks_uri: https://www.googleapis.com/oauth2/v1/certs\n issuer: https://securetoken.google.com\n rules:\n - selector: \"*\"\n requirements:\n provider_id: google_calendar_auth", + "description": "`Service` is the root object of Google service configuration schema. It describes basic information about a service, such as the name and the title, and delegates other aspects to sub-sections. Each sub-section is either a proto message or a repeated proto message that configures a specific aspect, such as auth. See each proto message definition for details. Example: type: google.api.Service config_version: 3 name: calendar.googleapis.com title: Google Calendar API apis: - name: google.calendar.v3.Calendar authentication: providers: - id: google_calendar_auth jwks_uri: https://www.googleapis.com/oauth2/v1/certs issuer: https://securetoken.google.com rules: - selector: \"*\" requirements: provider_id: google_calendar_auth", "id": "Service", "properties": { "apis": { - "description": "A list of API interfaces exported by this service. Only the `name` field\nof the google.protobuf.Api needs to be provided by the configuration\nauthor, as the remaining fields will be derived from the IDL during the\nnormalization process. It is an error to specify an API interface here\nwhich cannot be resolved against the associated IDL files.", + "description": "A list of API interfaces exported by this service. Only the `name` field of the google.protobuf.Api needs to be provided by the configuration author, as the remaining fields will be derived from the IDL during the normalization process. It is an error to specify an API interface here which cannot be resolved against the associated IDL files.", "items": { "$ref": "Api" }, @@ -2784,7 +2757,7 @@ "description": "Billing configuration." }, "configVersion": { - "description": "The semantic version of the service configuration. The config version\naffects the interpretation of the service configuration. For example,\ncertain features are enabled by default for certain config versions.\n\nThe latest config version is `3`.", + "description": "The semantic version of the service configuration. The config version affects the interpretation of the service configuration. For example, certain features are enabled by default for certain config versions. The latest config version is `3`.", "format": "uint32", "type": "integer" }, @@ -2805,14 +2778,14 @@ "description": "Additional API documentation." }, "endpoints": { - "description": "Configuration for network endpoints. If this is empty, then an endpoint\nwith the same name as the service is automatically generated to service all\ndefined APIs.", + "description": "Configuration for network endpoints. If this is empty, then an endpoint with the same name as the service is automatically generated to service all defined APIs.", "items": { "$ref": "Endpoint" }, "type": "array" }, "enums": { - "description": "A list of all enum types included in this API service. Enums\nreferenced directly or indirectly by the `apis` are automatically\nincluded. Enums which are not referenced but shall be included\nshould be listed here by name. Example:\n\n enums:\n - name: google.someapi.v1.SomeEnum", + "description": "A list of all enum types included in this API service. Enums referenced directly or indirectly by the `apis` are automatically included. Enums which are not referenced but shall be included should be listed here by name. Example: enums: - name: google.someapi.v1.SomeEnum", "items": { "$ref": "Enum" }, @@ -2823,7 +2796,7 @@ "description": "HTTP configuration." }, "id": { - "description": "A unique ID for a specific instance of this message, typically assigned\nby the client for tracking purpose. Must be no longer than 63 characters\nand only lower case letters, digits, '.', '_' and '-' are allowed. If\nempty, the server may choose to generate one instead.", + "description": "A unique ID for a specific instance of this message, typically assigned by the client for tracking purpose. Must be no longer than 63 characters and only lower case letters, digits, '.', '_' and '-' are allowed. If empty, the server may choose to generate one instead.", "type": "string" }, "logging": { @@ -2845,7 +2818,7 @@ "type": "array" }, "monitoredResources": { - "description": "Defines the monitored resources used by this service. This is required\nby the Service.monitoring and Service.logging configurations.", + "description": "Defines the monitored resources used by this service. This is required by the Service.monitoring and Service.logging configurations.", "items": { "$ref": "MonitoredResourceDescriptor" }, @@ -2856,7 +2829,7 @@ "description": "Monitoring configuration." }, "name": { - "description": "The service name, which is a DNS-like logical identifier for the\nservice, such as `calendar.googleapis.com`. The service name\ntypically goes through DNS verification to make sure the owner\nof the service also owns the DNS name.", + "description": "The service name, which is a DNS-like logical identifier for the service, such as `calendar.googleapis.com`. The service name typically goes through DNS verification to make sure the owner of the service also owns the DNS name.", "type": "string" }, "producerProjectId": { @@ -2876,7 +2849,7 @@ "description": "System parameter configuration." }, "systemTypes": { - "description": "A list of all proto message types included in this API service.\nIt serves similar purpose as [google.api.Service.types], except that\nthese types are not needed by user-defined APIs. Therefore, they will not\nshow up in the generated discovery doc. This field should only be used\nto define system APIs in ESF.", + "description": "A list of all proto message types included in this API service. It serves similar purpose as [google.api.Service.types], except that these types are not needed by user-defined APIs. Therefore, they will not show up in the generated discovery doc. This field should only be used to define system APIs in ESF.", "items": { "$ref": "Type" }, @@ -2887,7 +2860,7 @@ "type": "string" }, "types": { - "description": "A list of all proto message types included in this API service.\nTypes referenced directly or indirectly by the `apis` are\nautomatically included. Messages which are not referenced but\nshall be included, such as types used by the `google.protobuf.Any` type,\nshould be listed here by name. Example:\n\n types:\n - name: google.protobuf.Int32", + "description": "A list of all proto message types included in this API service. Types referenced directly or indirectly by the `apis` are automatically included. Messages which are not referenced but shall be included, such as types used by the `google.protobuf.Any` type, should be listed here by name. Example: types: - name: google.protobuf.Int32", "items": { "$ref": "Type" }, @@ -2901,19 +2874,19 @@ "type": "object" }, "ServiceIdentity": { - "description": "The per-product per-project service identity for a service.\n\n\nUse this field to configure per-product per-project service identity.\nExample of a service identity configuration.\n\n usage:\n service_identity:\n - service_account_parent: \"projects/123456789\"\n display_name: \"Cloud XXX Service Agent\"\n description: \"Used as the identity of Cloud XXX to access resources\"", + "description": "The per-product per-project service identity for a service. Use this field to configure per-product per-project service identity. Example of a service identity configuration. usage: service_identity: - service_account_parent: \"projects/123456789\" display_name: \"Cloud XXX Service Agent\" description: \"Used as the identity of Cloud XXX to access resources\"", "id": "ServiceIdentity", "properties": { "description": { - "description": "Optional. A user-specified opaque description of the service account.\nMust be less than or equal to 256 UTF-8 bytes.", + "description": "Optional. A user-specified opaque description of the service account. Must be less than or equal to 256 UTF-8 bytes.", "type": "string" }, "displayName": { - "description": "Optional. A user-specified name for the service account.\nMust be less than or equal to 100 UTF-8 bytes.", + "description": "Optional. A user-specified name for the service account. Must be less than or equal to 100 UTF-8 bytes.", "type": "string" }, "serviceAccountParent": { - "description": "A service account project that hosts the service accounts.\n\nAn example name would be:\n`projects/123456789`", + "description": "A service account project that hosts the service accounts. An example name would be: `projects/123456789`", "type": "string" } }, @@ -2925,10 +2898,10 @@ "properties": { "policy": { "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them." }, "updateMask": { - "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only\nthe fields in the mask will be modified. If no mask is provided, the\nfollowing default mask is used:\n\n`paths: \"bindings, etag\"`", + "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only the fields in the mask will be modified. If no mask is provided, the following default mask is used: `paths: \"bindings, etag\"`", "format": "google-fieldmask", "type": "string" } @@ -2936,11 +2909,11 @@ "type": "object" }, "SourceContext": { - "description": "`SourceContext` represents information about the source of a\nprotobuf element, like the file in which it is defined.", + "description": "`SourceContext` represents information about the source of a protobuf element, like the file in which it is defined.", "id": "SourceContext", "properties": { "fileName": { - "description": "The path-qualified name of the .proto file that contained the associated\nprotobuf element. For example: `\"google/protobuf/source_context.proto\"`.", + "description": "The path-qualified name of the .proto file that contained the associated protobuf element. For example: `\"google/protobuf/source_context.proto\"`.", "type": "string" } }, @@ -2965,7 +2938,7 @@ "type": "object" }, "Status": { - "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors).", + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", "id": "Status", "properties": { "code": { @@ -2974,7 +2947,7 @@ "type": "integer" }, "details": { - "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use.", + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", "items": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -2985,7 +2958,7 @@ "type": "array" }, "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", "type": "string" } }, @@ -3014,7 +2987,7 @@ "The operation or step has completed without errors.", "The operation or step has not started yet.", "The operation or step is in progress.", - "The operation or step has completed with errors. If the operation is\nrollbackable, the rollback completed with errors too.", + "The operation or step has completed with errors. If the operation is rollbackable, the rollback completed with errors too.", "The operation or step has completed with cancellation." ], "type": "string" @@ -3031,7 +3004,7 @@ "description": "Required. The source configuration for the service." }, "validateOnly": { - "description": "Optional. If set, this will result in the generation of a\n`google.api.Service` configuration based on the `ConfigSource` provided,\nbut the generated config and the sources will NOT be persisted.", + "description": "Optional. If set, this will result in the generation of a `google.api.Service` configuration based on the `ConfigSource` provided, but the generated config and the sources will NOT be persisted.", "type": "boolean" } }, @@ -3049,11 +3022,11 @@ "type": "object" }, "SystemParameter": { - "description": "Define a parameter's name and location. The parameter may be passed as either\nan HTTP header or a URL query parameter, and if both are passed the behavior\nis implementation-dependent.", + "description": "Define a parameter's name and location. The parameter may be passed as either an HTTP header or a URL query parameter, and if both are passed the behavior is implementation-dependent.", "id": "SystemParameter", "properties": { "httpHeader": { - "description": "Define the HTTP header name to use for the parameter. It is case\ninsensitive.", + "description": "Define the HTTP header name to use for the parameter. It is case insensitive.", "type": "string" }, "name": { @@ -3061,36 +3034,36 @@ "type": "string" }, "urlQueryParameter": { - "description": "Define the URL query parameter name to use for the parameter. It is case\nsensitive.", + "description": "Define the URL query parameter name to use for the parameter. It is case sensitive.", "type": "string" } }, "type": "object" }, "SystemParameterRule": { - "description": "Define a system parameter rule mapping system parameter definitions to\nmethods.", + "description": "Define a system parameter rule mapping system parameter definitions to methods.", "id": "SystemParameterRule", "properties": { "parameters": { - "description": "Define parameters. Multiple names may be defined for a parameter.\nFor a given method call, only one of them should be used. If multiple\nnames are used the behavior is implementation-dependent.\nIf none of the specified names are present the behavior is\nparameter-dependent.", + "description": "Define parameters. Multiple names may be defined for a parameter. For a given method call, only one of them should be used. If multiple names are used the behavior is implementation-dependent. If none of the specified names are present the behavior is parameter-dependent.", "items": { "$ref": "SystemParameter" }, "type": "array" }, "selector": { - "description": "Selects the methods to which this rule applies. Use '*' to indicate all\nmethods in all APIs.\n\nRefer to selector for syntax details.", + "description": "Selects the methods to which this rule applies. Use '*' to indicate all methods in all APIs. Refer to selector for syntax details.", "type": "string" } }, "type": "object" }, "SystemParameters": { - "description": "### System parameter configuration\n\nA system parameter is a special kind of parameter defined by the API\nsystem, not by an individual API. It is typically mapped to an HTTP header\nand/or a URL query parameter. This configuration specifies which methods\nchange the names of the system parameters.", + "description": "### System parameter configuration A system parameter is a special kind of parameter defined by the API system, not by an individual API. It is typically mapped to an HTTP header and/or a URL query parameter. This configuration specifies which methods change the names of the system parameters.", "id": "SystemParameters", "properties": { "rules": { - "description": "Define system parameters.\n\nThe parameters defined here will override the default parameters\nimplemented by the system. If this field is missing from the service\nconfig, default system parameters will be used. Default system parameters\nand names is implementation-dependent.\n\nExample: define api key for all methods\n\n system_parameters\n rules:\n - selector: \"*\"\n parameters:\n - name: api_key\n url_query_parameter: api_key\n\n\nExample: define 2 api key names for a specific method.\n\n system_parameters\n rules:\n - selector: \"/ListShelves\"\n parameters:\n - name: api_key\n http_header: Api-Key1\n - name: api_key\n http_header: Api-Key2\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "Define system parameters. The parameters defined here will override the default parameters implemented by the system. If this field is missing from the service config, default system parameters will be used. Default system parameters and names is implementation-dependent. Example: define api key for all methods system_parameters rules: - selector: \"*\" parameters: - name: api_key url_query_parameter: api_key Example: define 2 api key names for a specific method. system_parameters rules: - selector: \"/ListShelves\" parameters: - name: api_key http_header: Api-Key1 - name: api_key http_header: Api-Key2 **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "SystemParameterRule" }, @@ -3104,7 +3077,7 @@ "id": "TestIamPermissionsRequest", "properties": { "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "description": "The set of permissions to check for the `resource`. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", "items": { "type": "string" }, @@ -3118,7 +3091,7 @@ "id": "TestIamPermissionsResponse", "properties": { "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", "items": { "type": "string" }, @@ -3128,7 +3101,7 @@ "type": "object" }, "TrafficPercentStrategy": { - "description": "Strategy that specifies how clients of Google Service Controller want to\nsend traffic to use different config versions. This is generally\nused by API proxy to split traffic based on your configured percentage for\neach config version.\n\nOne example of how to gradually rollout a new service configuration using\nthis\nstrategy:\nDay 1\n\n Rollout {\n id: \"example.googleapis.com/rollout_20160206\"\n traffic_percent_strategy {\n percentages: {\n \"example.googleapis.com/20160201\": 70.00\n \"example.googleapis.com/20160206\": 30.00\n }\n }\n }\n\nDay 2\n\n Rollout {\n id: \"example.googleapis.com/rollout_20160207\"\n traffic_percent_strategy: {\n percentages: {\n \"example.googleapis.com/20160206\": 100.00\n }\n }\n }", + "description": "Strategy that specifies how clients of Google Service Controller want to send traffic to use different config versions. This is generally used by API proxy to split traffic based on your configured percentage for each config version. One example of how to gradually rollout a new service configuration using this strategy: Day 1 Rollout { id: \"example.googleapis.com/rollout_20160206\" traffic_percent_strategy { percentages: { \"example.googleapis.com/20160201\": 70.00 \"example.googleapis.com/20160206\": 30.00 } } } Day 2 Rollout { id: \"example.googleapis.com/rollout_20160207\" traffic_percent_strategy: { percentages: { \"example.googleapis.com/20160206\": 100.00 } } }", "id": "TrafficPercentStrategy", "properties": { "percentages": { @@ -3136,7 +3109,7 @@ "format": "double", "type": "number" }, - "description": "Maps service configuration IDs to their corresponding traffic percentage.\nKey is the service configuration ID, Value is the traffic percentage\nwhich must be greater than 0.0 and the sum must equal to 100.0.", + "description": "Maps service configuration IDs to their corresponding traffic percentage. Key is the service configuration ID, Value is the traffic percentage which must be greater than 0.0 and the sum must equal to 100.0.", "type": "object" } }, @@ -3206,18 +3179,18 @@ "id": "Usage", "properties": { "producerNotificationChannel": { - "description": "The full resource name of a channel used for sending notifications to the\nservice producer.\n\nGoogle Service Management currently only supports\n[Google Cloud Pub/Sub](https://cloud.google.com/pubsub) as a notification\nchannel. To use Google Cloud Pub/Sub as the channel, this must be the name\nof a Cloud Pub/Sub topic that uses the Cloud Pub/Sub topic name format\ndocumented in https://cloud.google.com/pubsub/docs/overview.", + "description": "The full resource name of a channel used for sending notifications to the service producer. Google Service Management currently only supports [Google Cloud Pub/Sub](https://cloud.google.com/pubsub) as a notification channel. To use Google Cloud Pub/Sub as the channel, this must be the name of a Cloud Pub/Sub topic that uses the Cloud Pub/Sub topic name format documented in https://cloud.google.com/pubsub/docs/overview.", "type": "string" }, "requirements": { - "description": "Requirements that must be satisfied before a consumer project can use the\nservice. Each requirement is of the form \u003cservice.name\u003e/\u003crequirement-id\u003e;\nfor example 'serviceusage.googleapis.com/billing-enabled'.", + "description": "Requirements that must be satisfied before a consumer project can use the service. Each requirement is of the form /; for example 'serviceusage.googleapis.com/billing-enabled'.", "items": { "type": "string" }, "type": "array" }, "rules": { - "description": "A list of usage rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "A list of usage rules that apply to individual API methods. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "UsageRule" }, @@ -3231,19 +3204,19 @@ "type": "object" }, "UsageRule": { - "description": "Usage configuration rules for the service.\n\nNOTE: Under development.\n\n\nUse this rule to configure unregistered calls for the service. Unregistered\ncalls are calls that do not contain consumer project identity.\n(Example: calls that do not contain an API key).\nBy default, API methods do not allow unregistered calls, and each method call\nmust be identified by a consumer project identity. Use this rule to\nallow/disallow unregistered calls.\n\nExample of an API that wants to allow unregistered calls for entire service.\n\n usage:\n rules:\n - selector: \"*\"\n allow_unregistered_calls: true\n\nExample of a method that wants to allow unregistered calls.\n\n usage:\n rules:\n - selector: \"google.example.library.v1.LibraryService.CreateBook\"\n allow_unregistered_calls: true", + "description": "Usage configuration rules for the service. NOTE: Under development. Use this rule to configure unregistered calls for the service. Unregistered calls are calls that do not contain consumer project identity. (Example: calls that do not contain an API key). By default, API methods do not allow unregistered calls, and each method call must be identified by a consumer project identity. Use this rule to allow/disallow unregistered calls. Example of an API that wants to allow unregistered calls for entire service. usage: rules: - selector: \"*\" allow_unregistered_calls: true Example of a method that wants to allow unregistered calls. usage: rules: - selector: \"google.example.library.v1.LibraryService.CreateBook\" allow_unregistered_calls: true", "id": "UsageRule", "properties": { "allowUnregisteredCalls": { - "description": "If true, the selected method allows unregistered calls, e.g. calls\nthat don't identify any user or application.", + "description": "If true, the selected method allows unregistered calls, e.g. calls that don't identify any user or application.", "type": "boolean" }, "selector": { - "description": "Selects the methods to which this rule applies. Use '*' to indicate all\nmethods in all APIs.\n\nRefer to selector for syntax details.", + "description": "Selects the methods to which this rule applies. Use '*' to indicate all methods in all APIs. Refer to selector for syntax details.", "type": "string" }, "skipServiceControl": { - "description": "If true, the selected method should skip service control and the control\nplane features, such as quota and billing, will not be available.\nThis flag is used by Google Cloud Endpoints to bypass checks for internal\nmethods, such as service health check methods.", + "description": "If true, the selected method should skip service control and the control plane features, such as quota and billing, will not be available. This flag is used by Google Cloud Endpoints to bypass checks for internal methods, such as service health check methods.", "type": "boolean" } }, diff --git a/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-gen.go b/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-gen.go index 89437c8b751..1840b78e3ce 100644 --- a/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-gen.go +++ b/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-gen.go @@ -79,6 +79,7 @@ const apiId = "servicemanagement:v1" const apiName = "servicemanagement" const apiVersion = "v1" const basePath = "https://servicemanagement.googleapis.com/" +const mtlsBasePath = "https://servicemanagement.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -106,6 +107,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*APIService, // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -206,13 +208,11 @@ type ServicesRolloutsService struct { s *APIService } -// Advice: Generated advice about this change, used for providing -// more +// Advice: Generated advice about this change, used for providing more // information about how a change will affect the existing service. type Advice struct { // Description: Useful description for why this advice was applied and - // what actions should - // be taken to mitigate any implied risks. + // what actions should be taken to mitigate any implied risks. Description string `json:"description,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -238,22 +238,15 @@ func (s *Advice) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Api: Api is a light-weight descriptor for an API -// Interface. -// +// Api: Api is a light-weight descriptor for an API Interface. // Interfaces are also described as "protocol buffer services" in some -// contexts, -// such as by the "service" keyword in a .proto file, but they are -// different -// from API Services, which represent a concrete implementation of an -// interface -// as opposed to simply a description of methods and bindings. They are -// also -// sometimes simply referred to as "APIs" in other contexts, such as the -// name of -// this message itself. See -// https://cloud.google.com/apis/design/glossary for -// detailed terminology. +// contexts, such as by the "service" keyword in a .proto file, but they +// are different from API Services, which represent a concrete +// implementation of an interface as opposed to simply a description of +// methods and bindings. They are also sometimes simply referred to as +// "APIs" in other contexts, such as the name of this message itself. +// See https://cloud.google.com/apis/design/glossary for detailed +// terminology. type Api struct { // Methods: The methods of this interface, in unspecified order. Methods []*Method `json:"methods,omitempty"` @@ -262,16 +255,14 @@ type Api struct { Mixins []*Mixin `json:"mixins,omitempty"` // Name: The fully qualified name of this interface, including package - // name - // followed by the interface's simple name. + // name followed by the interface's simple name. Name string `json:"name,omitempty"` // Options: Any metadata attached to the interface. Options []*Option `json:"options,omitempty"` // SourceContext: Source context for the protocol buffer service - // represented by this - // message. + // represented by this message. SourceContext *SourceContext `json:"sourceContext,omitempty"` // Syntax: The source syntax of the service. @@ -282,35 +273,20 @@ type Api struct { Syntax string `json:"syntax,omitempty"` // Version: A version string for this interface. If specified, must have - // the form - // `major-version.minor-version`, as in `1.10`. If the minor version - // is - // omitted, it defaults to zero. If the entire version field is empty, - // the - // major version is derived from the package name, as outlined below. If - // the - // field is not empty, the version in the package name will be verified - // to be - // consistent with what is provided here. - // - // The versioning schema uses [semantic - // versioning](http://semver.org) where the major version - // number - // indicates a breaking change and the minor version an - // additive, - // non-breaking change. Both version numbers are signals to users - // what to expect from different versions, and should be - // carefully - // chosen based on the product plan. - // - // The major version is also reflected in the package name of - // the - // interface, which must end in `v`, as - // in - // `google.feature.v1`. For major versions 0 and 1, the suffix can - // be omitted. Zero major versions must only be used for - // experimental, non-GA interfaces. - // + // the form `major-version.minor-version`, as in `1.10`. If the minor + // version is omitted, it defaults to zero. If the entire version field + // is empty, the major version is derived from the package name, as + // outlined below. If the field is not empty, the version in the package + // name will be verified to be consistent with what is provided here. + // The versioning schema uses [semantic versioning](http://semver.org) + // where the major version number indicates a breaking change and the + // minor version an additive, non-breaking change. Both version numbers + // are signals to users what to expect from different versions, and + // should be carefully chosen based on the product plan. The major + // version is also reflected in the package name of the interface, which + // must end in `v`, as in `google.feature.v1`. For major versions 0 and + // 1, the suffix can be omitted. Zero major versions must only be used + // for experimental, non-GA interfaces. Version string `json:"version,omitempty"` // ForceSendFields is a list of field names (e.g. "Methods") to @@ -336,72 +312,31 @@ func (s *Api) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// AuditConfig: Specifies the audit configuration for a service. -// The configuration determines which permission types are logged, and -// what -// identities, if any, are exempted from logging. -// An AuditConfig must have one or more AuditLogConfigs. -// -// If there are AuditConfigs for both `allServices` and a specific -// service, -// the union of the two AuditConfigs is used for that service: the -// log_types -// specified in each AuditConfig are enabled, and the exempted_members -// in each -// AuditLogConfig are exempted. -// -// Example Policy with multiple AuditConfigs: -// -// { -// "audit_configs": [ -// { -// "service": "allServices" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// }, -// { -// "log_type": "ADMIN_READ", -// } -// ] -// }, -// { -// "service": "sampleservice.googleapis.com" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// }, -// { -// "log_type": "DATA_WRITE", -// "exempted_members": [ -// "user:aliya@example.com" -// ] -// } -// ] -// } -// ] -// } -// -// For sampleservice, this policy enables DATA_READ, DATA_WRITE and -// ADMIN_READ -// logging. It also exempts jose@example.com from DATA_READ logging, -// and -// aliya@example.com from DATA_WRITE logging. +// AuditConfig: Specifies the audit configuration for a service. The +// configuration determines which permission types are logged, and what +// identities, if any, are exempted from logging. An AuditConfig must +// have one or more AuditLogConfigs. If there are AuditConfigs for both +// `allServices` and a specific service, the union of the two +// AuditConfigs is used for that service: the log_types specified in +// each AuditConfig are enabled, and the exempted_members in each +// AuditLogConfig are exempted. Example Policy with multiple +// AuditConfigs: { "audit_configs": [ { "service": "allServices", +// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": +// [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { +// "log_type": "ADMIN_READ" } ] }, { "service": +// "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": +// "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ +// "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy +// enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts +// jose@example.com from DATA_READ logging, and aliya@example.com from +// DATA_WRITE logging. type AuditConfig struct { // AuditLogConfigs: The configuration for logging of each type of // permission. AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"` - // Service: Specifies a service that will be enabled for audit - // logging. - // For example, `storage.googleapis.com`, - // `cloudsql.googleapis.com`. + // Service: Specifies a service that will be enabled for audit logging. + // For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. // `allServices` is a special value that covers all services. Service string `json:"service,omitempty"` @@ -430,31 +365,15 @@ func (s *AuditConfig) MarshalJSON() ([]byte, error) { } // AuditLogConfig: Provides the configuration for logging a type of -// permissions. -// Example: -// -// { -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// } -// ] -// } -// -// This enables 'DATA_READ' and 'DATA_WRITE' logging, while -// exempting -// jose@example.com from DATA_READ logging. +// permissions. Example: { "audit_log_configs": [ { "log_type": +// "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { +// "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and +// 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ +// logging. type AuditLogConfig struct { // ExemptedMembers: Specifies the identities that do not cause logging - // for this type of - // permission. - // Follows the same format of Binding.members. + // for this type of permission. Follows the same format of + // Binding.members. ExemptedMembers []string `json:"exemptedMembers,omitempty"` // LogType: The log type that this config enables. @@ -491,97 +410,57 @@ func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { } // AuthProvider: Configuration for an authentication provider, including -// support for -// [JSON Web -// Token -// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-tok -// en-32). +// support for [JSON Web Token +// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32) +// . type AuthProvider struct { - // Audiences: The list of - // JWT - // [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web- - // token-32#section-4.1.3). - // that are allowed to access. A JWT containing any of these audiences - // will - // be accepted. When this setting is absent, JWTs with audiences: - // - "https://[service.name]/[google.protobuf.Api.name]" - // - "https://[service.name]/" - // will be accepted. - // For example, if no audiences are in the setting, LibraryService API - // will - // accept JWTs with the following audiences: - // - - // - // https://library-example.googleapis.com/google.example.library.v1.LibraryService - // - https://library-example.googleapis.com/ - // - // Example: - // - // audiences: bookstore_android.apps.googleusercontent.com, - // bookstore_web.apps.googleusercontent.com + // Audiences: The list of JWT + // [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-toke + // n-32#section-4.1.3). that are allowed to access. A JWT containing any + // of these audiences will be accepted. When this setting is absent, + // JWTs with audiences: - + // "https://[service.name]/[google.protobuf.Api.name]" - + // "https://[service.name]/" will be accepted. For example, if no + // audiences are in the setting, LibraryService API will accept JWTs + // with the following audiences: - + // https://library-example.googleapis.com/google.example.library.v1.LibraryService - https://library-example.googleapis.com/ Example: audiences: bookstore_android.apps.googleusercontent.com, + // bookstore_web.apps.googleusercontent.com Audiences string `json:"audiences,omitempty"` // AuthorizationUrl: Redirect URL if JWT token is required but not - // present or is expired. - // Implement authorizationUrl of securityDefinitions in OpenAPI spec. + // present or is expired. Implement authorizationUrl of + // securityDefinitions in OpenAPI spec. AuthorizationUrl string `json:"authorizationUrl,omitempty"` // Id: The unique identifier of the auth provider. It will be referred - // to by - // `AuthRequirement.provider_id`. - // - // Example: "bookstore_auth". + // to by `AuthRequirement.provider_id`. Example: "bookstore_auth". Id string `json:"id,omitempty"` - // Issuer: Identifies the principal that issued the JWT. - // See - // https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#sec - // tion-4.1.1 - // Usually a URL or an email address. - // - // Example: https://securetoken.google.com - // Example: 1234567-compute@developer.gserviceaccount.com + // Issuer: Identifies the principal that issued the JWT. See + // https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1 Usually a URL or an email address. Example: https://securetoken.google.com Example: + // 1234567-compute@developer.gserviceaccount.com Issuer string `json:"issuer,omitempty"` // JwksUri: URL of the provider's public key set to validate signature - // of the JWT. - // See - // [OpenID - // Discovery](https://openid.net/specs/openid-connect-discove - // ry-1_0.html#ProviderMetadata). - // Optional if the key set document: - // - can be retrieved from - // [OpenID - // + // of the JWT. See [OpenID + // Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html# + // ProviderMetadata). Optional if the key set document: - can be + // retrieved from [OpenID // Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html - // of - // the issuer. - // - can be inferred from the email domain of the issuer (e.g. a - // Google - // service account). - // - // Example: https://www.googleapis.com/oauth2/v1/certs + // of the issuer. - can be inferred from the email domain of the issuer + // (e.g. a Google service account). Example: + // https://www.googleapis.com/oauth2/v1/certs JwksUri string `json:"jwksUri,omitempty"` - // JwtLocations: Defines the locations to extract the JWT. - // - // JWT locations can be either from HTTP headers or URL query - // parameters. - // The rule is that the first match wins. The checking order is: - // checking - // all headers first, then URL query parameters. - // - // If not specified, default to use following 3 locations: - // 1) Authorization: Bearer - // 2) x-goog-iap-jwt-assertion - // 3) access_token query parameter - // - // Default locations can be specified as followings: - // jwt_locations: - // - header: Authorization - // value_prefix: "Bearer " - // - header: x-goog-iap-jwt-assertion - // - query: access_token + // JwtLocations: Defines the locations to extract the JWT. JWT locations + // can be either from HTTP headers or URL query parameters. The rule is + // that the first match wins. The checking order is: checking all + // headers first, then URL query parameters. If not specified, default + // to use following 3 locations: 1) Authorization: Bearer 2) + // x-goog-iap-jwt-assertion 3) access_token query parameter Default + // locations can be specified as followings: jwt_locations: - header: + // Authorization value_prefix: "Bearer " - header: + // x-goog-iap-jwt-assertion - query: access_token JwtLocations []*JwtLocation `json:"jwtLocations,omitempty"` // ForceSendFields is a list of field names (e.g. "Audiences") to @@ -608,43 +487,27 @@ func (s *AuthProvider) MarshalJSON() ([]byte, error) { } // AuthRequirement: User-defined authentication requirements, including -// support for -// [JSON Web -// Token -// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-tok -// en-32). +// support for [JSON Web Token +// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32) +// . type AuthRequirement struct { // Audiences: NOTE: This will be deprecated soon, once - // AuthProvider.audiences is - // implemented and accepted in all the runtime components. - // - // The list of - // JWT - // [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web- - // token-32#section-4.1.3). - // that are allowed to access. A JWT containing any of these audiences - // will - // be accepted. When this setting is absent, only JWTs with - // audience - // "https://Service_name/API_name" - // will be accepted. For example, if no audiences are in the - // setting, - // LibraryService API will only accept JWTs with the following - // audience - // "https://library-example.googleapis.com/google.example.librar - // y.v1.LibraryService". - // - // Example: - // - // audiences: bookstore_android.apps.googleusercontent.com, - // bookstore_web.apps.googleusercontent.com + // AuthProvider.audiences is implemented and accepted in all the runtime + // components. The list of JWT + // [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-toke + // n-32#section-4.1.3). that are allowed to access. A JWT containing any + // of these audiences will be accepted. When this setting is absent, + // only JWTs with audience "https://Service_name/API_name" will be + // accepted. For example, if no audiences are in the setting, + // LibraryService API will only accept JWTs with the following audience + // "https://library-example.googleapis.com/google.example.library.v1.Libr + // aryService". Example: audiences: + // bookstore_android.apps.googleusercontent.com, + // bookstore_web.apps.googleusercontent.com Audiences string `json:"audiences,omitempty"` - // ProviderId: id from authentication provider. - // - // Example: - // - // provider_id: bookstore_auth + // ProviderId: id from authentication provider. Example: provider_id: + // bookstore_auth ProviderId string `json:"providerId,omitempty"` // ForceSendFields is a list of field names (e.g. "Audiences") to @@ -671,30 +534,20 @@ func (s *AuthRequirement) MarshalJSON() ([]byte, error) { } // Authentication: `Authentication` defines the authentication -// configuration for an API. -// -// Example for an API targeted for external use: -// -// name: calendar.googleapis.com -// authentication: -// providers: -// - id: google_calendar_auth -// jwks_uri: https://www.googleapis.com/oauth2/v1/certs -// issuer: https://securetoken.google.com -// rules: -// - selector: "*" -// requirements: -// provider_id: google_calendar_auth +// configuration for an API. Example for an API targeted for external +// use: name: calendar.googleapis.com authentication: providers: - id: +// google_calendar_auth jwks_uri: +// https://www.googleapis.com/oauth2/v1/certs issuer: +// https://securetoken.google.com rules: - selector: "*" requirements: +// provider_id: google_calendar_auth type Authentication struct { // Providers: Defines a set of authentication providers that a service // supports. Providers []*AuthProvider `json:"providers,omitempty"` // Rules: A list of authentication rules that apply to individual API - // methods. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // methods. **NOTE:** All service configuration rules follow "last one + // wins" order. Rules []*AuthenticationRule `json:"rules,omitempty"` // ForceSendFields is a list of field names (e.g. "Providers") to @@ -720,19 +573,12 @@ func (s *Authentication) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// AuthenticationRule: Authentication rules for the service. -// -// By default, if a method has any authentication requirements, every -// request -// must include a valid credential matching one of the -// requirements. -// It's an error to include more than one kind of credential in a -// single -// request. -// -// If a method doesn't have any auth requirements, request credentials -// will be -// ignored. +// AuthenticationRule: Authentication rules for the service. By default, +// if a method has any authentication requirements, every request must +// include a valid credential matching one of the requirements. It's an +// error to include more than one kind of credential in a single +// request. If a method doesn't have any auth requirements, request +// credentials will be ignored. type AuthenticationRule struct { // AllowWithoutCredential: If true, the service accepts API keys without // any other credential. @@ -744,9 +590,8 @@ type AuthenticationRule struct { // Requirements: Requirements for additional authentication providers. Requirements []*AuthRequirement `json:"requirements,omitempty"` - // Selector: Selects the methods to which this rule applies. - // - // Refer to selector for syntax details. + // Selector: Selects the methods to which this rule applies. Refer to + // selector for syntax details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -777,10 +622,8 @@ func (s *AuthenticationRule) MarshalJSON() ([]byte, error) { // Backend: `Backend` defines the backend configuration for a service. type Backend struct { // Rules: A list of API backend rules that apply to individual API - // methods. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // methods. **NOTE:** All service configuration rules follow "last one + // wins" order. Rules []*BackendRule `json:"rules,omitempty"` // ForceSendFields is a list of field names (e.g. "Rules") to @@ -809,197 +652,81 @@ func (s *Backend) MarshalJSON() ([]byte, error) { // BackendRule: A backend rule provides configuration for an individual // API element. type BackendRule struct { - // Address: The address of the API backend. - // - // The scheme is used to determine the backend protocol and - // security. - // The following schemes are accepted: - // - // SCHEME PROTOCOL SECURITY - // http:// HTTP None - // https:// HTTP TLS - // grpc:// gRPC None - // grpcs:// gRPC TLS - // - // It is recommended to explicitly include a scheme. Leaving out the - // scheme - // may cause constrasting behaviors across platforms. - // - // If the port is unspecified, the default is: - // - 80 for schemes without TLS - // - 443 for schemes with TLS - // - // For HTTP backends, use protocol - // to specify the protocol version. + // Address: The address of the API backend. The scheme is used to + // determine the backend protocol and security. The following schemes + // are accepted: SCHEME PROTOCOL SECURITY http:// HTTP None https:// + // HTTP TLS grpc:// gRPC None grpcs:// gRPC TLS It is recommended to + // explicitly include a scheme. Leaving out the scheme may cause + // constrasting behaviors across platforms. If the port is unspecified, + // the default is: - 80 for schemes without TLS - 443 for schemes with + // TLS For HTTP backends, use protocol to specify the protocol version. Address string `json:"address,omitempty"` // Deadline: The number of seconds to wait for a response from a - // request. The default - // varies based on the request protocol and deployment environment. + // request. The default varies based on the request protocol and + // deployment environment. Deadline float64 `json:"deadline,omitempty"` // DisableAuth: When disable_auth is true, a JWT ID token won't be - // generated and the - // original "Authorization" HTTP header will be preserved. If the header - // is - // used to carry the original token and is expected by the backend, - // this - // field must be set to true to preserve the header. + // generated and the original "Authorization" HTTP header will be + // preserved. If the header is used to carry the original token and is + // expected by the backend, this field must be set to true to preserve + // the header. DisableAuth bool `json:"disableAuth,omitempty"` // JwtAudience: The JWT audience is used when generating a JWT ID token - // for the backend. - // This ID token will be added in the HTTP "authorization" header, and - // sent - // to the backend. + // for the backend. This ID token will be added in the HTTP + // "authorization" header, and sent to the backend. JwtAudience string `json:"jwtAudience,omitempty"` // MinDeadline: Minimum deadline in seconds needed for this method. - // Calls having deadline - // value lower than this will be rejected. + // Calls having deadline value lower than this will be rejected. MinDeadline float64 `json:"minDeadline,omitempty"` // OperationDeadline: The number of seconds to wait for the completion - // of a long running - // operation. The default is no deadline. + // of a long running operation. The default is no deadline. OperationDeadline float64 `json:"operationDeadline,omitempty"` // Possible values: // "PATH_TRANSLATION_UNSPECIFIED" // "CONSTANT_ADDRESS" - Use the backend address as-is, with no - // modification to the path. If the - // URL pattern contains variables, the variable names and values will - // be - // appended to the query string. If a query string parameter and a - // URL - // pattern variable have the same name, this may result in duplicate - // keys in - // the query string. - // - // # Examples - // - // Given the following operation config: - // - // Method path: /api/company/{cid}/user/{uid} - // Backend address: - // https://example.cloudfunctions.net/getUser - // - // Requests to the following request paths will call the backend at - // the - // translated path: - // - // Request path: /api/company/widgetworks/user/johndoe - // Translated: - // - // https://example.cloudfunctions.net/getUser?cid=widgetworks&uid=johndoe - // - // Request path: /api/company/widgetworks/user/johndoe?timezone=EST - // Translated: - // + // modification to the path. If the URL pattern contains variables, the + // variable names and values will be appended to the query string. If a + // query string parameter and a URL pattern variable have the same name, + // this may result in duplicate keys in the query string. # Examples + // Given the following operation config: Method path: + // /api/company/{cid}/user/{uid} Backend address: + // https://example.cloudfunctions.net/getUser Requests to the following + // request paths will call the backend at the translated path: Request + // path: /api/company/widgetworks/user/johndoe Translated: + // https://example.cloudfunctions.net/getUser?cid=widgetworks&uid=johndoe Request path: /api/company/widgetworks/user/johndoe?timezone=EST Translated: // https://example.cloudfunctions.net/getUser?timezone=EST&cid=widgetworks&uid=johndoe // "APPEND_PATH_TO_ADDRESS" - The request path will be appended to the - // backend address. - // - // # Examples - // - // Given the following operation config: - // - // Method path: /api/company/{cid}/user/{uid} - // Backend address: https://example.appspot.com - // - // Requests to the following request paths will call the backend at - // the - // translated path: - // - // Request path: /api/company/widgetworks/user/johndoe - // Translated: - // + // backend address. # Examples Given the following operation config: + // Method path: /api/company/{cid}/user/{uid} Backend address: + // https://example.appspot.com Requests to the following request paths + // will call the backend at the translated path: Request path: + // /api/company/widgetworks/user/johndoe Translated: // https://example.appspot.com/api/company/widgetworks/user/johndoe - // - // Request path: /api/company/widgetworks/user/johndoe?timezone=EST - // Translated: - // + // Request path: /api/company/widgetworks/user/johndoe?timezone=EST + // Translated: // https://example.appspot.com/api/company/widgetworks/user/johndoe?timezone=EST PathTranslation string `json:"pathTranslation,omitempty"` - // Protocol: The protocol used for sending a request to the backend. - // The supported values are "http/1.1" and "h2". - // - // The default value is inferred from the scheme in the - // address field: - // - // SCHEME PROTOCOL - // http:// http/1.1 - // https:// http/1.1 - // grpc:// h2 - // grpcs:// h2 - // - // For secure HTTP backends (https://) that support HTTP/2, set this - // field - // to "h2" for improved performance. - // - // Configuring this field to non-default values is only supported for - // secure - // HTTP backends. This field will be ignored for all other - // backends. - // - // See - // https://www.iana.org/assignments/tls-extensiontype-valu - // es/tls-extensiontype-values.xhtml#alpn-protocol-ids - // for more details on the supported values. + // Protocol: The protocol used for sending a request to the backend. The + // supported values are "http/1.1" and "h2". The default value is + // inferred from the scheme in the address field: SCHEME PROTOCOL + // http:// http/1.1 https:// http/1.1 grpc:// h2 grpcs:// h2 For secure + // HTTP backends (https://) that support HTTP/2, set this field to "h2" + // for improved performance. Configuring this field to non-default + // values is only supported for secure HTTP backends. This field will be + // ignored for all other backends. See + // https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids for more details on the supported + // values. Protocol string `json:"protocol,omitempty"` - // RenameTo: Unimplemented. Do not use. - // - // The new name the selected proto elements should be renamed to. - // - // The package, the service and the method can all be renamed. - // The backend server should implement the renamed proto. However, - // clients - // should call the original method, and ESF routes the traffic to the - // renamed - // method. - // - // HTTP clients should call the URL mapped to the original method. - // gRPC and Stubby clients should call the original method with package - // name. - // - // For legacy reasons, ESF allows Stubby clients to call with the - // short name (without the package name). However, for API - // Versioning(or - // multiple methods mapped to the same short name), all Stubby clients - // must - // call the method's full name with the package name, otherwise the - // first one - // (selector) wins. - // - // If this `rename_to` is specified with a trailing `*`, the `selector` - // must - // be specified with a trailing `*` as well. The all element short - // names - // matched by the `*` in the selector will be kept in the - // `rename_to`. - // - // For example, - // rename_rules: - // - selector: |- - // google.example.library.v1.* - // rename_to: google.example.library.* - // - // The selector matches `google.example.library.v1.Library.CreateShelf` - // and - // `google.example.library.v1.Library.CreateBook`, they will be renamed - // to - // `google.example.library.Library.CreateShelf` - // and - // `google.example.library.Library.CreateBook`. It essentially renames - // the - // proto package name section of the matched proto service and methods. - RenameTo string `json:"renameTo,omitempty"` - - // Selector: Selects the methods to which this rule applies. - // - // Refer to selector for syntax details. + // Selector: Selects the methods to which this rule applies. Refer to + // selector for syntax details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. "Address") to @@ -1043,53 +770,28 @@ func (s *BackendRule) UnmarshalJSON(data []byte) error { return nil } -// Billing: Billing related configuration of the service. -// -// The following example shows how to configure monitored resources and -// metrics -// for billing, `consumer_destinations` is the only supported -// destination and -// the monitored resources need at least one label -// key +// Billing: Billing related configuration of the service. The following +// example shows how to configure monitored resources and metrics for +// billing, `consumer_destinations` is the only supported destination +// and the monitored resources need at least one label key // `cloud.googleapis.com/location` to indicate the location of the -// billing -// usage, using different monitored resources between monitoring and -// billing is -// recommended so they can be evolved independently: -// -// -// monitored_resources: -// - type: library.googleapis.com/billing_branch -// labels: -// - key: cloud.googleapis.com/location -// description: | -// Predefined label to support billing location restriction. -// - key: city -// description: | -// Custom label to define the city where the library branch is -// located -// in. -// - key: name -// description: Custom label to define the name of the library -// branch. -// metrics: -// - name: library.googleapis.com/book/borrowed_count -// metric_kind: DELTA -// value_type: INT64 -// unit: "1" -// billing: -// consumer_destinations: -// - monitored_resource: library.googleapis.com/billing_branch -// metrics: -// - library.googleapis.com/book/borrowed_count +// billing usage, using different monitored resources between monitoring +// and billing is recommended so they can be evolved independently: +// monitored_resources: - type: library.googleapis.com/billing_branch +// labels: - key: cloud.googleapis.com/location description: | +// Predefined label to support billing location restriction. - key: city +// description: | Custom label to define the city where the library +// branch is located in. - key: name description: Custom label to define +// the name of the library branch. metrics: - name: +// library.googleapis.com/book/borrowed_count metric_kind: DELTA +// value_type: INT64 unit: "1" billing: consumer_destinations: - +// monitored_resource: library.googleapis.com/billing_branch metrics: - +// library.googleapis.com/book/borrowed_count type Billing struct { // ConsumerDestinations: Billing configurations for sending metrics to - // the consumer project. - // There can be multiple consumer destinations per service, each one - // must have - // a different monitored resource type. A metric can be used in at - // most - // one consumer destination. + // the consumer project. There can be multiple consumer destinations per + // service, each one must have a different monitored resource type. A + // metric can be used in at most one consumer destination. ConsumerDestinations []*BillingDestination `json:"consumerDestinations,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1118,17 +820,14 @@ func (s *Billing) MarshalJSON() ([]byte, error) { } // BillingDestination: Configuration of a specific billing destination -// (Currently only support -// bill against consumer project). +// (Currently only support bill against consumer project). type BillingDestination struct { - // Metrics: Names of the metrics to report to this billing - // destination. + // Metrics: Names of the metrics to report to this billing destination. // Each name must be defined in Service.metrics section. Metrics []string `json:"metrics,omitempty"` // MonitoredResource: The monitored resource type. The type must be - // defined in - // Service.monitored_resources section. + // defined in Service.monitored_resources section. MonitoredResource string `json:"monitoredResource,omitempty"` // ForceSendFields is a list of field names (e.g. "Metrics") to @@ -1156,95 +855,53 @@ func (s *BillingDestination) MarshalJSON() ([]byte, error) { // Binding: Associates `members` with a `role`. type Binding struct { - // Condition: The condition that is associated with this binding. - // - // If the condition evaluates to `true`, then this binding applies to - // the - // current request. - // - // If the condition evaluates to `false`, then this binding does not - // apply to - // the current request. However, a different role binding might grant - // the same - // role to one or more of the members in this binding. - // - // To learn which resources support conditions in their IAM policies, - // see - // the - // [IAM - // documentation](https://cloud.google.com/iam/help/conditions/r - // esource-policies). + // Condition: The condition that is associated with this binding. If the + // condition evaluates to `true`, then this binding applies to the + // current request. If the condition evaluates to `false`, then this + // binding does not apply to the current request. However, a different + // role binding might grant the same role to one or more of the members + // in this binding. To learn which resources support conditions in their + // IAM policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). Condition *Expr `json:"condition,omitempty"` // Members: Specifies the identities requesting access for a Cloud - // Platform resource. - // `members` can have the following values: - // - // * `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. - // - // * `allAuthenticatedUsers`: A special identifier that represents - // anyone - // who is authenticated with a Google account or a service - // account. - // - // * `user:{emailid}`: An email address that represents a specific - // Google - // account. For example, `alice@example.com` . - // - // - // * `serviceAccount:{emailid}`: An email address that represents a - // service - // account. For example, - // `my-other-app@appspot.gserviceaccount.com`. - // - // * `group:{emailid}`: An email address that represents a Google - // group. - // For example, `admins@example.com`. - // - // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a user that has been recently deleted. - // For - // example, `alice@example.com?uid=123456789012345678901`. If the - // user is - // recovered, this value reverts to `user:{emailid}` and the - // recovered user - // retains the role in the binding. - // - // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address - // (plus - // unique identifier) representing a service account that has been - // recently - // deleted. For example, - // + // Platform resource. `members` can have the following values: * + // `allUsers`: A special identifier that represents anyone who is on the + // internet; with or without a Google account. * + // `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. * + // `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . * + // `serviceAccount:{emailid}`: An email address that represents a + // service account. For example, + // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An + // email address that represents a Google group. For example, + // `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An + // email address (plus unique identifier) representing a user that has + // been recently deleted. For example, + // `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered + // user retains the role in the binding. * + // `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address + // (plus unique identifier) representing a service account that has been + // recently deleted. For example, // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account - // retains the - // role in the binding. - // - // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a Google group that has been recently - // deleted. For example, - // `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` - // and the - // recovered group retains the role in the binding. - // - // - // * `domain:{domain}`: The G Suite domain (primary) that represents all - // the - // users of that domain. For example, `google.com` or - // `example.com`. - // - // + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains + // the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: + // An email address (plus unique identifier) representing a Google group + // that has been recently deleted. For example, + // `admins@example.com?uid=123456789012345678901`. If the group is + // recovered, this value reverts to `group:{emailid}` and the recovered + // group retains the role in the binding. * `domain:{domain}`: The G + // Suite domain (primary) that represents all the users of that domain. + // For example, `google.com` or `example.com`. Members []string `json:"members,omitempty"` - // Role: Role that is assigned to `members`. - // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + // Role: Role that is assigned to `members`. For example, + // `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `json:"role,omitempty"` // ForceSendFields is a list of field names (e.g. "Condition") to @@ -1271,19 +928,13 @@ func (s *Binding) MarshalJSON() ([]byte, error) { } // ChangeReport: Change report associated with a particular service -// configuration. -// -// It contains a list of ConfigChanges based on the comparison -// between -// two service configurations. +// configuration. It contains a list of ConfigChanges based on the +// comparison between two service configurations. type ChangeReport struct { - // ConfigChanges: List of changes between two service - // configurations. - // The changes will be alphabetically sorted based on the identifier - // of each change. - // A ConfigChange identifier is a dot separated path to the - // configuration. - // Example: + // ConfigChanges: List of changes between two service configurations. + // The changes will be alphabetically sorted based on the identifier of + // each change. A ConfigChange identifier is a dot separated path to the + // configuration. Example: // visibility.rules[selector='LibraryService.CreateBook'].restriction ConfigChanges []*ConfigChange `json:"configChanges,omitempty"` @@ -1311,18 +962,13 @@ func (s *ChangeReport) MarshalJSON() ([]byte, error) { } // ConfigChange: Output generated from semantically comparing two -// versions of a service -// configuration. -// -// Includes detailed information about a field that have changed -// with -// applicable advice about potential consequences for the change, such -// as +// versions of a service configuration. Includes detailed information +// about a field that have changed with applicable advice about +// potential consequences for the change, such as // backwards-incompatibility. type ConfigChange struct { // Advices: Collection of advice provided for this change, useful for - // determining the - // possible impact of this change. + // determining the possible impact of this change. Advices []*Advice `json:"advices,omitempty"` // ChangeType: The type for this change, either ADDED, REMOVED, or @@ -1331,45 +977,32 @@ type ConfigChange struct { // Possible values: // "CHANGE_TYPE_UNSPECIFIED" - No value was provided. // "ADDED" - The changed object exists in the 'new' service - // configuration, but not - // in the 'old' service configuration. + // configuration, but not in the 'old' service configuration. // "REMOVED" - The changed object exists in the 'old' service - // configuration, but not - // in the 'new' service configuration. + // configuration, but not in the 'new' service configuration. // "MODIFIED" - The changed object exists in both service - // configurations, but its value - // is different. + // configurations, but its value is different. ChangeType string `json:"changeType,omitempty"` // Element: Object hierarchy path to the change, with levels separated - // by a '.' - // character. For repeated fields, an applicable unique identifier field - // is - // used for the index (usually selector, name, or id). For maps, the - // term - // 'key' is used. If the field has no unique identifier, the numeric - // index - // is used. - // Examples: - // - + // by a '.' character. For repeated fields, an applicable unique + // identifier field is used for the index (usually selector, name, or + // id). For maps, the term 'key' is used. If the field has no unique + // identifier, the numeric index is used. Examples: - // visibility.rules[selector=="google.LibraryService.ListBooks"].restrict - // ion - // - + // ion - // quota.metric_rules[selector=="google"].metric_costs[key=="reads"].valu - // e - // - logging.producer_destinations[0] + // e - logging.producer_destinations[0] Element string `json:"element,omitempty"` // NewValue: Value of the changed object in the new Service - // configuration, - // in JSON format. This field will not be populated if ChangeType == - // REMOVED. + // configuration, in JSON format. This field will not be populated if + // ChangeType == REMOVED. NewValue string `json:"newValue,omitempty"` // OldValue: Value of the changed object in the old Service - // configuration, - // in JSON format. This field will not be populated if ChangeType == - // ADDED. + // configuration, in JSON format. This field will not be populated if + // ChangeType == ADDED. OldValue string `json:"oldValue,omitempty"` // ForceSendFields is a list of field names (e.g. "Advices") to @@ -1412,23 +1045,15 @@ type ConfigFile struct { // "OPEN_API_JSON" - OpenAPI specification, serialized in JSON. // "OPEN_API_YAML" - OpenAPI specification, serialized in YAML. // "FILE_DESCRIPTOR_SET_PROTO" - FileDescriptorSet, generated by - // protoc. - // - // To generate, use protoc with imports and source info included. - // For an example test.proto file, the following command would put the - // value - // in a new file named out.pb. - // - // $protoc --include_imports --include_source_info test.proto -o out.pb + // protoc. To generate, use protoc with imports and source info + // included. For an example test.proto file, the following command would + // put the value in a new file named out.pb. $protoc --include_imports + // --include_source_info test.proto -o out.pb // "PROTO_FILE" - Uncompiled Proto file. Used for storage and display - // purposes only, - // currently server-side compilation is not supported. Should match - // the - // inputs to 'protoc' command used to generated - // FILE_DESCRIPTOR_SET_PROTO. A - // file of this type can only be included if at least one file of - // type - // FILE_DESCRIPTOR_SET_PROTO is included. + // purposes only, currently server-side compilation is not supported. + // Should match the inputs to 'protoc' command used to generated + // FILE_DESCRIPTOR_SET_PROTO. A file of this type can only be included + // if at least one file of type FILE_DESCRIPTOR_SET_PROTO is included. FileType string `json:"fileType,omitempty"` // ForceSendFields is a list of field names (e.g. "FileContents") to @@ -1456,8 +1081,7 @@ func (s *ConfigFile) MarshalJSON() ([]byte, error) { // ConfigRef: Represents a service configuration with its name and id. type ConfigRef struct { - // Name: Resource name of a service config. It must have the - // following + // Name: Resource name of a service config. It must have the following // format: "services/{service name}/configs/{config id}". Name string `json:"name,omitempty"` @@ -1485,19 +1109,15 @@ func (s *ConfigRef) MarshalJSON() ([]byte, error) { } // ConfigSource: Represents a source file which is used to generate the -// service configuration -// defined by `google.api.Service`. +// service configuration defined by `google.api.Service`. type ConfigSource struct { // Files: Set of source configuration files that are used to generate a - // service - // configuration (`google.api.Service`). + // service configuration (`google.api.Service`). Files []*ConfigFile `json:"files,omitempty"` // Id: A unique ID for a specific instance of this message, typically - // assigned - // by the client for tracking purpose. If empty, the server may choose - // to - // generate one instead. + // assigned by the client for tracking purpose. If empty, the server may + // choose to generate one instead. Id string `json:"id,omitempty"` // ForceSendFields is a list of field names (e.g. "Files") to @@ -1523,59 +1143,27 @@ func (s *ConfigSource) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Context: `Context` defines which contexts an API -// requests. -// -// Example: -// -// context: -// rules: -// - selector: "*" -// requested: -// - google.rpc.context.ProjectContext -// - google.rpc.context.OriginContext -// -// The above specifies that all methods in the API -// request -// `google.rpc.context.ProjectContext` -// and -// `google.rpc.context.OriginContext`. -// -// Available context types are defined in -// package -// `google.rpc.context`. -// -// This also provides mechanism to whitelist any protobuf message -// extension that -// can be sent in grpc metadata using -// “x-goog-ext--bin” -// and -// “x-goog-ext--jspb” format. For example, list any -// service -// specific protobuf types that can appear in grpc metadata as follows -// in your -// yaml file: -// -// Example: -// -// context: -// rules: -// - selector: +// Context: `Context` defines which contexts an API requests. Example: +// context: rules: - selector: "*" requested: - +// google.rpc.context.ProjectContext - google.rpc.context.OriginContext +// The above specifies that all methods in the API request +// `google.rpc.context.ProjectContext` and +// `google.rpc.context.OriginContext`. Available context types are +// defined in package `google.rpc.context`. This also provides mechanism +// to whitelist any protobuf message extension that can be sent in grpc +// metadata using “x-goog-ext--bin” and “x-goog-ext--jspb” +// format. For example, list any service specific protobuf types that +// can appear in grpc metadata as follows in your yaml file: Example: +// context: rules: - selector: // "google.example.library.v1.LibraryService.CreateBook" -// allowed_request_extensions: -// - google.foo.v1.NewExtension -// allowed_response_extensions: -// - google.foo.v1.NewExtension -// -// You can also specify extension ID instead of fully qualified -// extension name +// allowed_request_extensions: - google.foo.v1.NewExtension +// allowed_response_extensions: - google.foo.v1.NewExtension You can +// also specify extension ID instead of fully qualified extension name // here. type Context struct { // Rules: A list of RPC context rules that apply to individual API - // methods. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // methods. **NOTE:** All service configuration rules follow "last one + // wins" order. Rules []*ContextRule `json:"rules,omitempty"` // ForceSendFields is a list of field names (e.g. "Rules") to @@ -1602,17 +1190,14 @@ func (s *Context) MarshalJSON() ([]byte, error) { } // ContextRule: A context rule provides information about the context -// for an individual API -// element. +// for an individual API element. type ContextRule struct { // AllowedRequestExtensions: A list of full type names or extension IDs - // of extensions allowed in grpc - // side channel from client to backend. + // of extensions allowed in grpc side channel from client to backend. AllowedRequestExtensions []string `json:"allowedRequestExtensions,omitempty"` // AllowedResponseExtensions: A list of full type names or extension IDs - // of extensions allowed in grpc - // side channel from backend to client. + // of extensions allowed in grpc side channel from backend to client. AllowedResponseExtensions []string `json:"allowedResponseExtensions,omitempty"` // Provided: A list of full type names of provided contexts. @@ -1621,9 +1206,8 @@ type ContextRule struct { // Requested: A list of full type names of requested contexts. Requested []string `json:"requested,omitempty"` - // Selector: Selects the methods to which this rule applies. - // - // Refer to selector for syntax details. + // Selector: Selects the methods to which this rule applies. Refer to + // selector for syntax details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1652,14 +1236,11 @@ func (s *ContextRule) MarshalJSON() ([]byte, error) { } // Control: Selects and configures the service controller used by the -// service. The -// service controller handles features like abuse, quota, billing, -// logging, -// monitoring, etc. +// service. The service controller handles features like abuse, quota, +// billing, logging, monitoring, etc. type Control struct { // Environment: The service control environment to use. If empty, no - // control plane - // feature (like quota and billing) will be enabled. + // control plane feature (like quota and billing) will be enabled. Environment string `json:"environment,omitempty"` // ForceSendFields is a list of field names (e.g. "Environment") to @@ -1685,24 +1266,14 @@ func (s *Control) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// CustomError: Customize service error responses. For example, list -// any service -// specific protobuf types that can appear in error detail lists -// of -// error responses. -// -// Example: -// -// custom_error: -// types: -// - google.foo.v1.CustomError -// - google.foo.v1.AnotherError +// CustomError: Customize service error responses. For example, list any +// service specific protobuf types that can appear in error detail lists +// of error responses. Example: custom_error: types: - +// google.foo.v1.CustomError - google.foo.v1.AnotherError type CustomError struct { // Rules: The list of custom error rules that apply to individual API - // messages. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // messages. **NOTE:** All service configuration rules follow "last one + // wins" order. Rules []*CustomErrorRule `json:"rules,omitempty"` // Types: The list of custom error detail types, e.g. @@ -1735,14 +1306,12 @@ func (s *CustomError) MarshalJSON() ([]byte, error) { // CustomErrorRule: A custom error rule. type CustomErrorRule struct { // IsErrorType: Mark this message as possible payload in error response. - // Otherwise, - // objects of this type will be filtered when they appear in error - // payload. + // Otherwise, objects of this type will be filtered when they appear in + // error payload. IsErrorType bool `json:"isErrorType,omitempty"` - // Selector: Selects messages to which this rule applies. - // - // Refer to selector for syntax details. + // Selector: Selects messages to which this rule applies. Refer to + // selector for syntax details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. "IsErrorType") to @@ -1801,8 +1370,8 @@ func (s *CustomHttpPattern) MarshalJSON() ([]byte, error) { } // DeleteServiceStrategy: Strategy used to delete a service. This -// strategy is a placeholder only -// used by the system generated rollout to delete a service. +// strategy is a placeholder only used by the system generated rollout +// to delete a service. type DeleteServiceStrategy struct { } @@ -1844,157 +1413,68 @@ func (s *Diagnostic) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// DisableServiceRequest: Request message for DisableService method. -type DisableServiceRequest struct { - // ConsumerId: Required. The identity of consumer resource which service - // disablement will be - // applied to. - // - // The Google Service Management implementation accepts the - // following - // forms: - // - "project:" - // - // Note: this is made compatible - // with - // google.api.servicecontrol.v1.Operation.consumer_id. - ConsumerId string `json:"consumerId,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ConsumerId") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ConsumerId") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *DisableServiceRequest) MarshalJSON() ([]byte, error) { - type NoMethod DisableServiceRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - // DisableServiceResponse: Operation payload for DisableService method. type DisableServiceResponse struct { } // Documentation: `Documentation` provides the information for -// describing a service. -// -// Example: -//
documentation:
-//   summary: >
-//     The Google Calendar API gives access
-//     to most calendar features.
-//   pages:
-//   - name: Overview
-//     content: (== include google/foo/overview.md ==)
-//   - name: Tutorial
-//     content: (== include google/foo/tutorial.md ==)
-//     subpages;
-//     - name: Java
-//       content: (== include google/foo/tutorial_java.md ==)
-//   rules:
-//   - selector: google.calendar.Calendar.Get
-//     description: >
-//       ...
-//   - selector: google.calendar.Calendar.Put
-//     description: >
-//       ...
-// 
-// Documentation is provided in markdown syntax. In addition to -// standard markdown features, definition lists, tables and fenced -// code blocks are supported. Section headers can be provided and -// are -// interpreted relative to the section nesting of the context where -// a documentation fragment is embedded. -// -// Documentation from the IDL is merged with documentation defined -// via the config at normalization time, where documentation provided -// by config rules overrides IDL provided. -// -// A number of constructs specific to the API platform are supported -// in documentation text. -// -// In order to reference a proto element, the following -// notation can be -// used: -//
[fully.qualified.proto.name][]
-// T -// o override the display text used for the link, this can be -// used: -//
[display
-// text][fully.qualified.proto.name]
-// Text can be excluded from doc using the following -// notation: -//
(-- internal comment --)
-// -// A few directives are available in documentation. Note that -// directives must appear on a single line to be properly -// identified. The `include` directive includes a markdown file from -// an external source: -//
(== include path/to/file ==)
-// The `resource_for` directive marks a message to be the resource of -// a collection in REST view. If it is not specified, tools attempt -// to infer the resource from the operations in a -// collection: -//
(== resource_for v1.shelves.books
-// ==)
-// The directive `suppress_warning` does not directly affect -// documentation -// and is documented together with service config validation. +// describing a service. Example: documentation: summary: > The Google +// Calendar API gives access to most calendar features. pages: - name: +// Overview content: (== include google/foo/overview.md ==) - name: +// Tutorial content: (== include google/foo/tutorial.md ==) subpages; - +// name: Java content: (== include google/foo/tutorial_java.md ==) +// rules: - selector: google.calendar.Calendar.Get description: > ... - +// selector: google.calendar.Calendar.Put description: > ... +// Documentation is provided in markdown syntax. In addition to standard +// markdown features, definition lists, tables and fenced code blocks +// are supported. Section headers can be provided and are interpreted +// relative to the section nesting of the context where a documentation +// fragment is embedded. Documentation from the IDL is merged with +// documentation defined via the config at normalization time, where +// documentation provided by config rules overrides IDL provided. A +// number of constructs specific to the API platform are supported in +// documentation text. In order to reference a proto element, the +// following notation can be used: [fully.qualified.proto.name][] To +// override the display text used for the link, this can be used: +// [display text][fully.qualified.proto.name] Text can be excluded from +// doc using the following notation: (-- internal comment --) A few +// directives are available in documentation. Note that directives must +// appear on a single line to be properly identified. The `include` +// directive includes a markdown file from an external source: (== +// include path/to/file ==) The `resource_for` directive marks a message +// to be the resource of a collection in REST view. If it is not +// specified, tools attempt to infer the resource from the operations in +// a collection: (== resource_for v1.shelves.books ==) The directive +// `suppress_warning` does not directly affect documentation and is +// documented together with service config validation. type Documentation struct { // DocumentationRootUrl: The URL to the root of documentation. DocumentationRootUrl string `json:"documentationRootUrl,omitempty"` - // Overview: Declares a single overview page. For - // example: - //
documentation:
-	//   summary: ...
-	//   overview: (== include overview.md ==)
-	// 
- // This is a shortcut for the following declaration (using pages - // style): - //
documentation:
-	//   summary: ...
-	//   pages:
-	//   - name: Overview
-	//     content: (== include overview.md ==)
-	// 
- // Note: you cannot specify both `overview` field and `pages` field. + // Overview: Declares a single overview page. For example: + // documentation: summary: ... overview: (== include overview.md ==) + // This is a shortcut for the following declaration (using pages style): + // documentation: summary: ... pages: - name: Overview content: (== + // include overview.md ==) Note: you cannot specify both `overview` + // field and `pages` field. Overview string `json:"overview,omitempty"` // Pages: The top level pages for the documentation set. Pages []*Page `json:"pages,omitempty"` // Rules: A list of documentation rules that apply to individual API - // elements. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // elements. **NOTE:** All service configuration rules follow "last one + // wins" order. Rules []*DocumentationRule `json:"rules,omitempty"` // ServiceRootUrl: Specifies the service root url if the default one - // (the service name - // from the yaml file) is not suitable. This can be seen in any - // fully - // specified service urls as well as sections that show a base that - // other - // urls are relative to. + // (the service name from the yaml file) is not suitable. This can be + // seen in any fully specified service urls as well as sections that + // show a base that other urls are relative to. ServiceRootUrl string `json:"serviceRootUrl,omitempty"` // Summary: A short summary of what the service does. Can only be - // provided by - // plain text. + // provided by plain text. Summary string `json:"summary,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -2026,24 +1506,20 @@ func (s *Documentation) MarshalJSON() ([]byte, error) { // individual API elements. type DocumentationRule struct { // DeprecationDescription: Deprecation description of the selected - // element(s). It can be provided if - // an element is marked as `deprecated`. + // element(s). It can be provided if an element is marked as + // `deprecated`. DeprecationDescription string `json:"deprecationDescription,omitempty"` // Description: Description of the selected API(s). Description string `json:"description,omitempty"` // Selector: The selector is a comma-separated list of patterns. Each - // pattern is a - // qualified name of the element which may end in "*", indicating a - // wildcard. - // Wildcards are only allowed at the end and for a whole component of - // the - // qualified name, i.e. "foo.*" is ok, but not "foo.b*" or "foo.*.bar". - // A - // wildcard will match one or more components. To specify a default for - // all - // applicable elements, the whole pattern "*" is used. + // pattern is a qualified name of the element which may end in "*", + // indicating a wildcard. Wildcards are only allowed at the end and for + // a whole component of the qualified name, i.e. "foo.*" is ok, but not + // "foo.b*" or "foo.*.bar". A wildcard will match one or more + // components. To specify a default for all applicable elements, the + // whole pattern "*" is used. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -2074,16 +1550,9 @@ func (s *DocumentationRule) MarshalJSON() ([]byte, error) { // EnableServiceRequest: Request message for EnableService method. type EnableServiceRequest struct { // ConsumerId: Required. The identity of consumer resource which service - // enablement will be - // applied to. - // - // The Google Service Management implementation accepts the - // following - // forms: - // - "project:" - // - // Note: this is made compatible - // with + // enablement will be applied to. The Google Service Management + // implementation accepts the following forms: - "project:" Note: this + // is made compatible with // google.api.servicecontrol.v1.Operation.consumer_id. ConsumerId string `json:"consumerId,omitempty"` @@ -2115,64 +1584,38 @@ type EnableServiceResponse struct { } // Endpoint: `Endpoint` describes a network endpoint that serves a set -// of APIs. -// A service may expose any number of endpoints, and all endpoints share -// the -// same service configuration, such as quota configuration and -// monitoring -// configuration. -// -// Example service configuration: -// -// name: library-example.googleapis.com -// endpoints: -// # Below entry makes 'google.example.library.v1.Library' -// # API be served from endpoint address -// library-example.googleapis.com. -// # It also allows HTTP OPTIONS calls to be passed to the -// backend, for -// # it to decide whether the subsequent cross-origin request is -// # allowed to proceed. -// - name: library-example.googleapis.com -// allow_cors: true +// of APIs. A service may expose any number of endpoints, and all +// endpoints share the same service configuration, such as quota +// configuration and monitoring configuration. Example service +// configuration: name: library-example.googleapis.com endpoints: # +// Below entry makes 'google.example.library.v1.Library' # API be served +// from endpoint address library-example.googleapis.com. # It also +// allows HTTP OPTIONS calls to be passed to the backend, for # it to +// decide whether the subsequent cross-origin request is # allowed to +// proceed. - name: library-example.googleapis.com allow_cors: true type Endpoint struct { // Aliases: DEPRECATED: This field is no longer supported. Instead of - // using aliases, - // please specify multiple google.api.Endpoint for each of the - // intended - // aliases. - // - // Additional names that this endpoint will be hosted on. + // using aliases, please specify multiple google.api.Endpoint for each + // of the intended aliases. Additional names that this endpoint will be + // hosted on. Aliases []string `json:"aliases,omitempty"` - // AllowCors: - // Allowing - // [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sh - // aring), aka - // cross-domain traffic, would allow the backends served from this - // endpoint to - // receive and respond to HTTP OPTIONS requests. The response will be - // used by - // the browser to determine whether the subsequent cross-origin request - // is - // allowed to proceed. + // AllowCors: Allowing + // [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), + // aka cross-domain traffic, would allow the backends served from this + // endpoint to receive and respond to HTTP OPTIONS requests. The + // response will be used by the browser to determine whether the + // subsequent cross-origin request is allowed to proceed. AllowCors bool `json:"allowCors,omitempty"` - // Features: The list of features enabled on this endpoint. - Features []string `json:"features,omitempty"` - // Name: The canonical name of this endpoint. Name string `json:"name,omitempty"` // Target: The specification of an Internet routable address of API - // frontend that will - // handle requests to this - // [API + // frontend that will handle requests to this [API // Endpoint](https://cloud.google.com/apis/design/glossary). It should - // be - // either a valid IPv4 address or a fully-qualified domain name. For - // example, - // "8.8.8.8" or "myservice.appspot.com". + // be either a valid IPv4 address or a fully-qualified domain name. For + // example, "8.8.8.8" or "myservice.appspot.com". Target string `json:"target,omitempty"` // ForceSendFields is a list of field names (e.g. "Aliases") to @@ -2277,65 +1720,40 @@ func (s *EnumValue) MarshalJSON() ([]byte, error) { } // Expr: Represents a textual expression in the Common Expression -// Language (CEL) -// syntax. CEL is a C-like expression language. The syntax and semantics -// of CEL -// are documented at https://github.com/google/cel-spec. -// -// Example (Comparison): -// -// title: "Summary size limit" -// description: "Determines if a summary is less than 100 chars" -// expression: "document.summary.size() < 100" -// -// Example (Equality): -// -// title: "Requestor is owner" -// description: "Determines if requestor is the document owner" -// expression: "document.owner == -// request.auth.claims.email" -// -// Example (Logic): -// -// title: "Public documents" -// description: "Determine whether the document should be publicly -// visible" -// expression: "document.type != 'private' && document.type != -// 'internal'" -// -// Example (Data Manipulation): -// -// title: "Notification string" -// description: "Create a notification string with a timestamp." -// expression: "'New message received at ' + -// string(document.create_time)" -// -// The exact variables and functions that may be referenced within an -// expression -// are determined by the service that evaluates it. See the -// service -// documentation for additional information. +// Language (CEL) syntax. CEL is a C-like expression language. The +// syntax and semantics of CEL are documented at +// https://github.com/google/cel-spec. Example (Comparison): title: +// "Summary size limit" description: "Determines if a summary is less +// than 100 chars" expression: "document.summary.size() < 100" Example +// (Equality): title: "Requestor is owner" description: "Determines if +// requestor is the document owner" expression: "document.owner == +// request.auth.claims.email" Example (Logic): title: "Public documents" +// description: "Determine whether the document should be publicly +// visible" expression: "document.type != 'private' && document.type != +// 'internal'" Example (Data Manipulation): title: "Notification string" +// description: "Create a notification string with a timestamp." +// expression: "'New message received at ' + +// string(document.create_time)" The exact variables and functions that +// may be referenced within an expression are determined by the service +// that evaluates it. See the service documentation for additional +// information. type Expr struct { // Description: Optional. Description of the expression. This is a - // longer text which - // describes the expression, e.g. when hovered over it in a UI. + // longer text which describes the expression, e.g. when hovered over it + // in a UI. Description string `json:"description,omitempty"` // Expression: Textual representation of an expression in Common - // Expression Language - // syntax. + // Expression Language syntax. Expression string `json:"expression,omitempty"` // Location: Optional. String indicating the location of the expression - // for error - // reporting, e.g. a file name and a position in the file. + // for error reporting, e.g. a file name and a position in the file. Location string `json:"location,omitempty"` // Title: Optional. Title for the expression, i.e. a short string - // describing - // its purpose. This can be used e.g. in UIs which allow to enter - // the - // expression. + // describing its purpose. This can be used e.g. in UIs which allow to + // enter the expression. Title string `json:"title,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -2411,9 +1829,8 @@ type Field struct { Number int64 `json:"number,omitempty"` // OneofIndex: The index of the field type in `Type.oneofs`, for message - // or enumeration - // types. The first type has index 1; zero means the type is not in the - // list. + // or enumeration types. The first type has index 1; zero means the type + // is not in the list. OneofIndex int64 `json:"oneofIndex,omitempty"` // Options: The protocol buffer options. @@ -2423,8 +1840,8 @@ type Field struct { Packed bool `json:"packed,omitempty"` // TypeUrl: The field type URL, without the scheme, for message or - // enumeration - // types. Example: "type.googleapis.com/google.protobuf.Timestamp". + // enumeration types. Example: + // "type.googleapis.com/google.protobuf.Timestamp". TypeUrl string `json:"typeUrl,omitempty"` // ForceSendFields is a list of field names (e.g. "Cardinality") to @@ -2451,9 +1868,8 @@ func (s *Field) MarshalJSON() ([]byte, error) { } // FlowErrorDetails: Encapsulation of flow-specific error details for -// debugging. -// Used as a details field on an error Status, not intended for external -// use. +// debugging. Used as a details field on an error Status, not intended +// for external use. type FlowErrorDetails struct { // ExceptionType: The type of exception (as a class name). ExceptionType string `json:"exceptionType,omitempty"` @@ -2488,23 +1904,15 @@ func (s *FlowErrorDetails) MarshalJSON() ([]byte, error) { // method. type GenerateConfigReportRequest struct { // NewConfig: Required. Service configuration for which we want to - // generate the report. - // For this version of API, the supported types - // are + // generate the report. For this version of API, the supported types are // google.api.servicemanagement.v1.ConfigRef, - // google.api.servicemanag - // ement.v1.ConfigSource, - // and google.api.Service + // google.api.servicemanagement.v1.ConfigSource, and google.api.Service NewConfig googleapi.RawMessage `json:"newConfig,omitempty"` // OldConfig: Optional. Service configuration against which the - // comparison will be done. - // For this version of API, the supported types - // are - // google.api.servicemanagement.v1.ConfigRef, - // google.api.servicemanag - // ement.v1.ConfigSource, - // and google.api.Service + // comparison will be done. For this version of API, the supported types + // are google.api.servicemanagement.v1.ConfigRef, + // google.api.servicemanagement.v1.ConfigSource, and google.api.Service OldConfig googleapi.RawMessage `json:"oldConfig,omitempty"` // ForceSendFields is a list of field names (e.g. "NewConfig") to @@ -2534,14 +1942,11 @@ func (s *GenerateConfigReportRequest) MarshalJSON() ([]byte, error) { // GenerateConfigReport method. type GenerateConfigReportResponse struct { // ChangeReports: list of ChangeReport, each corresponding to comparison - // between two - // service configurations. + // between two service configurations. ChangeReports []*ChangeReport `json:"changeReports,omitempty"` // Diagnostics: Errors / Linter warnings associated with the service - // definition this - // report - // belongs to. + // definition this report belongs to. Diagnostics []*Diagnostic `json:"diagnostics,omitempty"` // Id: ID of the service configuration this report belongs to. @@ -2580,8 +1985,7 @@ func (s *GenerateConfigReportResponse) MarshalJSON() ([]byte, error) { // GetIamPolicyRequest: Request message for `GetIamPolicy` method. type GetIamPolicyRequest struct { // Options: OPTIONAL: A `GetPolicyOptions` object for specifying options - // to - // `GetIamPolicy`. + // to `GetIamPolicy`. Options *GetPolicyOptions `json:"options,omitempty"` // ForceSendFields is a list of field names (e.g. "Options") to @@ -2610,24 +2014,14 @@ func (s *GetIamPolicyRequest) MarshalJSON() ([]byte, error) { // GetPolicyOptions: Encapsulates settings provided to GetIamPolicy. type GetPolicyOptions struct { // RequestedPolicyVersion: Optional. The policy format version to be - // returned. - // - // Valid values are 0, 1, and 3. Requests specifying an invalid value - // will be - // rejected. - // - // Requests for policies with any conditional bindings must specify - // version 3. - // Policies without any conditional bindings may specify any valid value - // or - // leave the field unset. - // - // To learn which resources support conditions in their IAM policies, - // see - // the - // [IAM - // documentation](https://cloud.google.com/iam/help/conditions/r - // esource-policies). + // returned. Valid values are 0, 1, and 3. Requests specifying an + // invalid value will be rejected. Requests for policies with any + // conditional bindings must specify version 3. Policies without any + // conditional bindings may specify any valid value or leave the field + // unset. To learn which resources support conditions in their IAM + // policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). RequestedPolicyVersion int64 `json:"requestedPolicyVersion,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -2656,26 +2050,19 @@ func (s *GetPolicyOptions) MarshalJSON() ([]byte, error) { } // Http: Defines the HTTP configuration for an API service. It contains -// a list of -// HttpRule, each specifying the mapping of an RPC method -// to one or more HTTP REST API methods. +// a list of HttpRule, each specifying the mapping of an RPC method to +// one or more HTTP REST API methods. type Http struct { // FullyDecodeReservedExpansion: When set to true, URL path parameters - // will be fully URI-decoded except in - // cases of single segment matches in reserved expansion, where "%2F" - // will be - // left encoded. - // - // The default behavior is to not decode RFC 6570 reserved characters in - // multi + // will be fully URI-decoded except in cases of single segment matches + // in reserved expansion, where "%2F" will be left encoded. The default + // behavior is to not decode RFC 6570 reserved characters in multi // segment matches. FullyDecodeReservedExpansion bool `json:"fullyDecodeReservedExpansion,omitempty"` // Rules: A list of HTTP configuration rules that apply to individual - // API methods. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // API methods. **NOTE:** All service configuration rules follow "last + // one wins" order. Rules []*HttpRule `json:"rules,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -2703,403 +2090,187 @@ func (s *Http) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// HttpRule: # gRPC Transcoding -// -// gRPC Transcoding is a feature for mapping between a gRPC method and -// one or -// more HTTP REST endpoints. It allows developers to build a single API -// service -// that supports both gRPC APIs and REST APIs. Many systems, including -// [Google -// APIs](https://github.com/googleapis/googleapis), -// [Cloud Endpoints](https://cloud.google.com/endpoints), -// [gRPC -// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), -// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this -// feature -// and use it for large scale production services. -// -// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping -// specifies +// HttpRule: # gRPC Transcoding gRPC Transcoding is a feature for +// mapping between a gRPC method and one or more HTTP REST endpoints. It +// allows developers to build a single API service that supports both +// gRPC APIs and REST APIs. Many systems, including [Google +// APIs](https://github.com/googleapis/googleapis), [Cloud +// Endpoints](https://cloud.google.com/endpoints), [gRPC +// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), and +// [Envoy](https://github.com/envoyproxy/envoy) proxy support this +// feature and use it for large scale production services. `HttpRule` +// defines the schema of the gRPC/REST mapping. The mapping specifies // how different portions of the gRPC request message are mapped to the -// URL -// path, URL query parameters, and HTTP request body. It also controls -// how the -// gRPC response message is mapped to the HTTP response body. `HttpRule` -// is -// typically specified as an `google.api.http` annotation on the gRPC -// method. -// -// Each mapping specifies a URL path template and an HTTP method. The -// path -// template may refer to one or more fields in the gRPC request message, -// as long -// as each field is a non-repeated field with a primitive (non-message) -// type. -// The path template controls how fields of the request message are -// mapped to -// the URL path. -// -// Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/{name=messages/*}" -// }; -// } -// } -// message GetMessageRequest { -// string name = 1; // Mapped to URL path. -// } -// message Message { -// string text = 1; // The resource content. -// } -// -// This enables an HTTP REST to gRPC mapping as below: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(name: -// "messages/123456")` -// -// Any fields in the request message which are not bound by the path -// template -// automatically become HTTP query parameters if there is no HTTP -// request body. -// For example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get:"/v1/messages/{message_id}" -// }; -// } -// } -// message GetMessageRequest { -// message SubMessage { -// string subfield = 1; -// } -// string message_id = 1; // Mapped to URL path. -// int64 revision = 2; // Mapped to URL query parameter -// `revision`. -// SubMessage sub = 3; // Mapped to URL query parameter -// `sub.subfield`. -// } -// -// This enables a HTTP JSON to RPC mapping as below: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456?revision=2&sub.subfield=foo` -// | +// URL path, URL query parameters, and HTTP request body. It also +// controls how the gRPC response message is mapped to the HTTP response +// body. `HttpRule` is typically specified as an `google.api.http` +// annotation on the gRPC method. Each mapping specifies a URL path +// template and an HTTP method. The path template may refer to one or +// more fields in the gRPC request message, as long as each field is a +// non-repeated field with a primitive (non-message) type. The path +// template controls how fields of the request message are mapped to the +// URL path. Example: service Messaging { rpc +// GetMessage(GetMessageRequest) returns (Message) { option +// (google.api.http) = { get: "/v1/{name=messages/*}" }; } } message +// GetMessageRequest { string name = 1; // Mapped to URL path. } message +// Message { string text = 1; // The resource content. } This enables an +// HTTP REST to gRPC mapping as below: HTTP | gRPC -----|----- `GET +// /v1/messages/123456` | `GetMessage(name: "messages/123456")` Any +// fields in the request message which are not bound by the path +// template automatically become HTTP query parameters if there is no +// HTTP request body. For example: service Messaging { rpc +// GetMessage(GetMessageRequest) returns (Message) { option +// (google.api.http) = { get:"/v1/messages/{message_id}" }; } } message +// GetMessageRequest { message SubMessage { string subfield = 1; } +// string message_id = 1; // Mapped to URL path. int64 revision = 2; // +// Mapped to URL query parameter `revision`. SubMessage sub = 3; // +// Mapped to URL query parameter `sub.subfield`. } This enables a HTTP +// JSON to RPC mapping as below: HTTP | gRPC -----|----- `GET +// /v1/messages/123456?revision=2&sub.subfield=foo` | // `GetMessage(message_id: "123456" revision: 2 sub: -// SubMessage(subfield: -// "foo"))` -// -// Note that fields which are mapped to URL query parameters must have -// a -// primitive type or a repeated primitive type or a non-repeated message -// type. -// In the case of a repeated type, the parameter can be repeated in the -// URL -// as `...?param=A¶m=B`. In the case of a message type, each field -// of the -// message is mapped to a separate parameter, such -// as -// `...?foo.a=A&foo.b=B&foo.c=C`. -// -// For HTTP methods that allow a request body, the `body` -// field -// specifies the mapping. Consider a REST update method on the -// message resource collection: -// -// service Messaging { -// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "message" -// }; -// } -// } -// message UpdateMessageRequest { -// string message_id = 1; // mapped to the URL -// Message message = 2; // mapped to the body -// } -// -// The following HTTP JSON to RPC mapping is enabled, where -// the -// representation of the JSON in the request body is determined -// by -// protos JSON encoding: -// -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | -// `UpdateMessage(message_id: -// "123456" message { text: "Hi!" })` -// -// The special name `*` can be used in the body mapping to define -// that -// every field not bound by the path template should be mapped to -// the -// request body. This enables the following alternative definition -// of -// the update method: -// -// service Messaging { -// rpc UpdateMessage(Message) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "*" -// }; -// } -// } -// message Message { -// string message_id = 1; -// string text = 2; -// } -// -// -// The following HTTP JSON to RPC mapping is enabled: -// -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | -// `UpdateMessage(message_id: -// "123456" text: "Hi!")` -// -// Note that when using `*` in the body mapping, it is not possible -// to -// have HTTP parameters, as all fields not bound by the path end in -// the body. This makes this option more rarely used in practice -// when -// defining REST APIs. The common usage of `*` is in custom -// methods -// which don't use the URL at all for transferring data. -// -// It is possible to define multiple HTTP methods for one RPC by -// using -// the `additional_bindings` option. Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/messages/{message_id}" -// additional_bindings { -// get: "/v1/users/{user_id}/messages/{message_id}" -// } -// }; -// } -// } -// message GetMessageRequest { -// string message_id = 1; -// string user_id = 2; -// } -// -// This enables the following two alternative HTTP JSON to RPC -// mappings: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" -// message_id: -// "123456")` -// -// ## Rules for HTTP mapping -// -// 1. Leaf request fields (recursive expansion nested messages in the -// request -// message) are classified into three categories: -// - Fields referred by the path template. They are passed via the -// URL path. -// - Fields referred by the HttpRule.body. They are passed via the -// HTTP -// request body. -// - All other fields are passed via the URL query parameters, and -// the -// parameter name is the field path in the request message. A -// repeated -// field can be represented as multiple query parameters under the -// same -// name. -// 2. If HttpRule.body is "*", there is no URL query parameter, all -// fields -// are passed via URL path and HTTP request body. -// 3. If HttpRule.body is omitted, there is no HTTP request body, all -// fields are passed via URL path and URL query parameters. -// -// ### Path template syntax -// -// Template = "/" Segments [ Verb ] ; -// Segments = Segment { "/" Segment } ; -// Segment = "*" | "**" | LITERAL | Variable ; -// Variable = "{" FieldPath [ "=" Segments ] "}" ; -// FieldPath = IDENT { "." IDENT } ; -// Verb = ":" LITERAL ; -// -// The syntax `*` matches a single URL path segment. The syntax `**` -// matches -// zero or more URL path segments, which must be the last part of the -// URL path -// except the `Verb`. -// -// The syntax `Variable` matches part of the URL path as specified by -// its -// template. A variable template must not contain other variables. If a -// variable -// matches a single path segment, its template may be omitted, e.g. -// `{var}` -// is equivalent to `{var=*}`. -// -// The syntax `LITERAL` matches literal text in the URL path. If the -// `LITERAL` -// contains any reserved character, such characters should be -// percent-encoded -// before the matching. -// -// If a variable contains exactly one path segment, such as "{var}" -// or -// "{var=*}", when such a variable is expanded into a URL path on the -// client -// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. -// The -// server side does the reverse decoding. Such variables show up in -// the -// [Discovery -// Document](https://developers.google.com/discovery/v1/re -// ference/apis) as -// `{var}`. -// -// If a variable contains multiple path segments, such as -// "{var=foo/*}" -// or "{var=**}", when such a variable is expanded into a URL path on -// the -// client side, all characters except `[-_.~/0-9a-zA-Z]` are -// percent-encoded. -// The server side does the reverse decoding, except "%2F" and "%2f" are -// left -// unchanged. Such variables show up in -// the +// SubMessage(subfield: "foo"))` Note that fields which are mapped to +// URL query parameters must have a primitive type or a repeated +// primitive type or a non-repeated message type. In the case of a +// repeated type, the parameter can be repeated in the URL as +// `...?param=A¶m=B`. In the case of a message type, each field of +// the message is mapped to a separate parameter, such as +// `...?foo.a=A&foo.b=B&foo.c=C`. For HTTP methods that allow a request +// body, the `body` field specifies the mapping. Consider a REST update +// method on the message resource collection: service Messaging { rpc +// UpdateMessage(UpdateMessageRequest) returns (Message) { option +// (google.api.http) = { patch: "/v1/messages/{message_id}" body: +// "message" }; } } message UpdateMessageRequest { string message_id = +// 1; // mapped to the URL Message message = 2; // mapped to the body } +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: HTTP | gRPC -----|----- `PATCH +// /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" message { text: "Hi!" })` The special name `*` can be used +// in the body mapping to define that every field not bound by the path +// template should be mapped to the request body. This enables the +// following alternative definition of the update method: service +// Messaging { rpc UpdateMessage(Message) returns (Message) { option +// (google.api.http) = { patch: "/v1/messages/{message_id}" body: "*" }; +// } } message Message { string message_id = 1; string text = 2; } The +// following HTTP JSON to RPC mapping is enabled: HTTP | gRPC +// -----|----- `PATCH /v1/messages/123456 { "text": "Hi!" }` | +// `UpdateMessage(message_id: "123456" text: "Hi!")` Note that when +// using `*` in the body mapping, it is not possible to have HTTP +// parameters, as all fields not bound by the path end in the body. This +// makes this option more rarely used in practice when defining REST +// APIs. The common usage of `*` is in custom methods which don't use +// the URL at all for transferring data. It is possible to define +// multiple HTTP methods for one RPC by using the `additional_bindings` +// option. Example: service Messaging { rpc +// GetMessage(GetMessageRequest) returns (Message) { option +// (google.api.http) = { get: "/v1/messages/{message_id}" +// additional_bindings { get: +// "/v1/users/{user_id}/messages/{message_id}" } }; } } message +// GetMessageRequest { string message_id = 1; string user_id = 2; } This +// enables the following two alternative HTTP JSON to RPC mappings: HTTP +// | gRPC -----|----- `GET /v1/messages/123456` | +// `GetMessage(message_id: "123456")` `GET /v1/users/me/messages/123456` +// | `GetMessage(user_id: "me" message_id: "123456")` ## Rules for HTTP +// mapping 1. Leaf request fields (recursive expansion nested messages +// in the request message) are classified into three categories: - +// Fields referred by the path template. They are passed via the URL +// path. - Fields referred by the HttpRule.body. They are passed via the +// HTTP request body. - All other fields are passed via the URL query +// parameters, and the parameter name is the field path in the request +// message. A repeated field can be represented as multiple query +// parameters under the same name. 2. If HttpRule.body is "*", there is +// no URL query parameter, all fields are passed via URL path and HTTP +// request body. 3. If HttpRule.body is omitted, there is no HTTP +// request body, all fields are passed via URL path and URL query +// parameters. ### Path template syntax Template = "/" Segments [ Verb ] +// ; Segments = Segment { "/" Segment } ; Segment = "*" | "**" | LITERAL +// | Variable ; Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; Verb = ":" LITERAL ; The syntax `*` +// matches a single URL path segment. The syntax `**` matches zero or +// more URL path segments, which must be the last part of the URL path +// except the `Verb`. The syntax `Variable` matches part of the URL path +// as specified by its template. A variable template must not contain +// other variables. If a variable matches a single path segment, its +// template may be omitted, e.g. `{var}` is equivalent to `{var=*}`. The +// syntax `LITERAL` matches literal text in the URL path. If the +// `LITERAL` contains any reserved character, such characters should be +// percent-encoded before the matching. If a variable contains exactly +// one path segment, such as "{var}" or "{var=*}", when such a +// variable is expanded into a URL path on the client side, all +// characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The server +// side does the reverse decoding. Such variables show up in the // [Discovery -// Document](https://developers.google.com/discovery/v1/re -// ference/apis) as -// `{+var}`. -// -// ## Using gRPC API Service Configuration -// -// gRPC API Service Configuration (service config) is a configuration -// language -// for configuring a gRPC service to become a user-facing product. -// The +// Document](https://developers.google.com/discovery/v1/reference/apis) +// as `{var}`. If a variable contains multiple path segments, such as +// "{var=foo/*}" or "{var=**}", when such a variable is expanded +// into a URL path on the client side, all characters except +// `[-_.~/0-9a-zA-Z]` are percent-encoded. The server side does the +// reverse decoding, except "%2F" and "%2f" are left unchanged. Such +// variables show up in the [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) +// as `{+var}`. ## Using gRPC API Service Configuration gRPC API Service +// Configuration (service config) is a configuration language for +// configuring a gRPC service to become a user-facing product. The // service config is simply the YAML representation of the -// `google.api.Service` -// proto message. -// -// As an alternative to annotating your proto file, you can configure -// gRPC -// transcoding in your service config YAML files. You do this by -// specifying a -// `HttpRule` that maps the gRPC method to a REST endpoint, achieving -// the same -// effect as the proto annotation. This can be particularly useful if -// you -// have a proto that is reused in multiple services. Note that any -// transcoding +// `google.api.Service` proto message. As an alternative to annotating +// your proto file, you can configure gRPC transcoding in your service +// config YAML files. You do this by specifying a `HttpRule` that maps +// the gRPC method to a REST endpoint, achieving the same effect as the +// proto annotation. This can be particularly useful if you have a proto +// that is reused in multiple services. Note that any transcoding // specified in the service config will override any matching -// transcoding -// configuration in the proto. -// -// Example: -// -// http: -// rules: -// # Selects a gRPC method and applies HttpRule to it. -// - selector: example.v1.Messaging.GetMessage -// get: /v1/messages/{message_id}/{sub.subfield} -// -// ## Special notes -// -// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, -// the -// proto to JSON conversion must follow the -// [proto3 -// specification](https://developers.google.com/protocol-buffers/ -// docs/proto3#json). -// -// While the single segment variable follows the semantics of +// transcoding configuration in the proto. Example: http: rules: # +// Selects a gRPC method and applies HttpRule to it. - selector: +// example.v1.Messaging.GetMessage get: +// /v1/messages/{message_id}/{sub.subfield} ## Special notes When gRPC +// Transcoding is used to map a gRPC to JSON REST endpoints, the proto +// to JSON conversion must follow the [proto3 +// specification](https://developers.google.com/protocol-buffers/docs/pro +// to3#json). While the single segment variable follows the semantics of // [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple -// String -// Expansion, the multi segment variable **does not** follow RFC 6570 -// Section -// 3.2.3 Reserved Expansion. The reason is that the Reserved -// Expansion -// does not expand special characters like `?` and `#`, which would -// lead -// to invalid URLs. As the result, gRPC Transcoding uses a custom -// encoding -// for multi segment variables. -// -// The path variables **must not** refer to any repeated or mapped -// field, +// String Expansion, the multi segment variable **does not** follow RFC +// 6570 Section 3.2.3 Reserved Expansion. The reason is that the +// Reserved Expansion does not expand special characters like `?` and +// `#`, which would lead to invalid URLs. As the result, gRPC +// Transcoding uses a custom encoding for multi segment variables. The +// path variables **must not** refer to any repeated or mapped field, // because client libraries are not capable of handling such variable -// expansion. -// -// The path variables **must not** capture the leading "/" character. -// The reason -// is that the most common use case "{var}" does not capture the leading -// "/" -// character. For consistency, all path variables must share the same -// behavior. -// -// Repeated message fields must not be mapped to URL query parameters, -// because -// no client library can support such complicated mapping. -// -// If an API needs to use a JSON array for request or response body, it -// can map -// the request or response body to a repeated field. However, some -// gRPC -// Transcoding implementations may not support this feature. +// expansion. The path variables **must not** capture the leading "/" +// character. The reason is that the most common use case "{var}" does +// not capture the leading "/" character. For consistency, all path +// variables must share the same behavior. Repeated message fields must +// not be mapped to URL query parameters, because no client library can +// support such complicated mapping. If an API needs to use a JSON array +// for request or response body, it can map the request or response body +// to a repeated field. However, some gRPC Transcoding implementations +// may not support this feature. type HttpRule struct { // AdditionalBindings: Additional HTTP bindings for the selector. Nested - // bindings must - // not contain an `additional_bindings` field themselves (that is, - // the nesting may only be one level deep). + // bindings must not contain an `additional_bindings` field themselves + // (that is, the nesting may only be one level deep). AdditionalBindings []*HttpRule `json:"additionalBindings,omitempty"` // AllowHalfDuplex: When this flag is set to true, HTTP requests will be - // allowed to invoke a - // half-duplex streaming method. + // allowed to invoke a half-duplex streaming method. AllowHalfDuplex bool `json:"allowHalfDuplex,omitempty"` // Body: The name of the request field whose value is mapped to the HTTP - // request - // body, or `*` for mapping all request fields not captured by the - // path - // pattern to the HTTP body, or omitted for not having any HTTP request - // body. - // - // NOTE: the referred field must be present at the top-level of the - // request - // message type. + // request body, or `*` for mapping all request fields not captured by + // the path pattern to the HTTP body, or omitted for not having any HTTP + // request body. NOTE: the referred field must be present at the + // top-level of the request message type. Body string `json:"body,omitempty"` // Custom: The custom pattern is used for specifying an HTTP method that - // is not - // included in the `pattern` field, such as HEAD, or "*" to leave - // the - // HTTP method unspecified for this rule. The wild-card rule is - // useful - // for services that provide content to Web (HTML) clients. + // is not included in the `pattern` field, such as HEAD, or "*" to leave + // the HTTP method unspecified for this rule. The wild-card rule is + // useful for services that provide content to Web (HTML) clients. Custom *CustomHttpPattern `json:"custom,omitempty"` // Delete: Maps to HTTP DELETE. Used for deleting a resource. Delete string `json:"delete,omitempty"` - // Get: Maps to HTTP GET. Used for listing and getting information - // about + // Get: Maps to HTTP GET. Used for listing and getting information about // resources. Get string `json:"get,omitempty"` @@ -3114,19 +2285,13 @@ type HttpRule struct { Put string `json:"put,omitempty"` // ResponseBody: Optional. The name of the response field whose value is - // mapped to the HTTP - // response body. When omitted, the entire response message will be - // used - // as the HTTP response body. - // - // NOTE: The referred field must be present at the top-level of the - // response - // message type. + // mapped to the HTTP response body. When omitted, the entire response + // message will be used as the HTTP response body. NOTE: The referred + // field must be present at the top-level of the response message type. ResponseBody string `json:"responseBody,omitempty"` - // Selector: Selects a method to which this rule applies. - // - // Refer to selector for syntax details. + // Selector: Selects a method to which this rule applies. Refer to + // selector for syntax details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. "AdditionalBindings") @@ -3162,16 +2327,11 @@ type JwtLocation struct { Query string `json:"query,omitempty"` // ValuePrefix: The value prefix. The value format is - // "value_prefix{token}" - // Only applies to "in" header type. Must be empty for "in" query - // type. - // If not empty, the header value has to match (case sensitive) this - // prefix. - // If not matched, JWT will not be extracted. If matched, JWT will - // be - // extracted after the prefix is removed. - // - // For example, for "Authorization: Bearer {JWT}", + // "value_prefix{token}" Only applies to "in" header type. Must be empty + // for "in" query type. If not empty, the header value has to match + // (case sensitive) this prefix. If not matched, JWT will not be + // extracted. If matched, JWT will be extracted after the prefix is + // removed. For example, for "Authorization: Bearer {JWT}", // value_prefix="Bearer " with a space at the end. ValuePrefix string `json:"valuePrefix,omitempty"` @@ -3382,39 +2542,29 @@ func (s *ListServicesResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// LogDescriptor: A description of a log type. Example in YAML format: -// -// - name: library.googleapis.com/activity_history -// description: The history of borrowing and returning library -// items. -// display_name: Activity -// labels: -// - key: /customer_id -// description: Identifier of a library customer +// LogDescriptor: A description of a log type. Example in YAML format: - +// name: library.googleapis.com/activity_history description: The +// history of borrowing and returning library items. display_name: +// Activity labels: - key: /customer_id description: Identifier of a +// library customer type LogDescriptor struct { // Description: A human-readable description of this log. This - // information appears in - // the documentation and can contain details. + // information appears in the documentation and can contain details. Description string `json:"description,omitempty"` // DisplayName: The human-readable name for this log. This information - // appears on - // the user interface and should be concise. + // appears on the user interface and should be concise. DisplayName string `json:"displayName,omitempty"` // Labels: The set of labels that are available to describe a specific - // log entry. - // Runtime requests that contain labels not specified here - // are - // considered invalid. + // log entry. Runtime requests that contain labels not specified here + // are considered invalid. Labels []*LabelDescriptor `json:"labels,omitempty"` // Name: The name of the log. It must be less than 512 characters long - // and can - // include the following characters: upper- and lower-case - // alphanumeric - // characters [A-Za-z0-9], and punctuation characters including - // slash, underscore, hyphen, period [/_-.]. + // and can include the following characters: upper- and lower-case + // alphanumeric characters [A-Za-z0-9], and punctuation characters + // including slash, underscore, hyphen, period [/_-.]. Name string `json:"name,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -3440,54 +2590,30 @@ func (s *LogDescriptor) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Logging: Logging configuration of the service. -// -// The following example shows how to configure logs to be sent to -// the -// producer and consumer projects. In the example, the -// `activity_history` -// log is sent to both the producer and consumer projects, whereas -// the -// `purchase_history` log is only sent to the producer project. -// -// monitored_resources: -// - type: library.googleapis.com/branch -// labels: -// - key: /city -// description: The city where the library branch is located -// in. -// - key: /name -// description: The name of the branch. -// logs: -// - name: activity_history -// labels: -// - key: /customer_id -// - name: purchase_history -// logging: -// producer_destinations: -// - monitored_resource: library.googleapis.com/branch -// logs: -// - activity_history -// - purchase_history -// consumer_destinations: -// - monitored_resource: library.googleapis.com/branch -// logs: -// - activity_history +// Logging: Logging configuration of the service. The following example +// shows how to configure logs to be sent to the producer and consumer +// projects. In the example, the `activity_history` log is sent to both +// the producer and consumer projects, whereas the `purchase_history` +// log is only sent to the producer project. monitored_resources: - +// type: library.googleapis.com/branch labels: - key: /city description: +// The city where the library branch is located in. - key: /name +// description: The name of the branch. logs: - name: activity_history +// labels: - key: /customer_id - name: purchase_history logging: +// producer_destinations: - monitored_resource: +// library.googleapis.com/branch logs: - activity_history - +// purchase_history consumer_destinations: - monitored_resource: +// library.googleapis.com/branch logs: - activity_history type Logging struct { // ConsumerDestinations: Logging configurations for sending logs to the - // consumer project. - // There can be multiple consumer destinations, each one must have - // a - // different monitored resource type. A log can be used in at most - // one consumer destination. + // consumer project. There can be multiple consumer destinations, each + // one must have a different monitored resource type. A log can be used + // in at most one consumer destination. ConsumerDestinations []*LoggingDestination `json:"consumerDestinations,omitempty"` // ProducerDestinations: Logging configurations for sending logs to the - // producer project. - // There can be multiple producer destinations, each one must have - // a - // different monitored resource type. A log can be used in at most - // one producer destination. + // producer project. There can be multiple producer destinations, each + // one must have a different monitored resource type. A log can be used + // in at most one producer destination. ProducerDestinations []*LoggingDestination `json:"producerDestinations,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -3516,19 +2642,16 @@ func (s *Logging) MarshalJSON() ([]byte, error) { } // LoggingDestination: Configuration of a specific logging destination -// (the producer project -// or the consumer project). +// (the producer project or the consumer project). type LoggingDestination struct { // Logs: Names of the logs to be sent to this destination. Each name - // must - // be defined in the Service.logs section. If the log name is - // not a domain scoped name, it will be automatically prefixed with - // the service name followed by "/". + // must be defined in the Service.logs section. If the log name is not a + // domain scoped name, it will be automatically prefixed with the + // service name followed by "/". Logs []string `json:"logs,omitempty"` // MonitoredResource: The monitored resource type. The type must be - // defined in the - // Service.monitored_resources section. + // defined in the Service.monitored_resources section. MonitoredResource string `json:"monitoredResource,omitempty"` // ForceSendFields is a list of field names (e.g. "Logs") to @@ -3555,16 +2678,14 @@ func (s *LoggingDestination) MarshalJSON() ([]byte, error) { } // ManagedService: The full representation of a Service that is managed -// by -// Google Service Management. +// by Google Service Management. type ManagedService struct { // ProducerProjectId: ID of the project that produces and owns this // service. ProducerProjectId string `json:"producerProjectId,omitempty"` // ServiceName: The name of the service. See the - // [overview](/service-management/overview) - // for naming requirements. + // [overview](/service-management/overview) for naming requirements. ServiceName string `json:"serviceName,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -3646,32 +2767,26 @@ func (s *Method) MarshalJSON() ([]byte, error) { } // MetricDescriptor: Defines a metric type and its schema. Once a metric -// descriptor is created, -// deleting or altering it stops data collection and makes the metric -// type's -// existing data unusable. +// descriptor is created, deleting or altering it stops data collection +// and makes the metric type's existing data unusable. type MetricDescriptor struct { // Description: A detailed description of the metric, which can be used // in documentation. Description string `json:"description,omitempty"` // DisplayName: A concise name for the metric, which can be displayed in - // user interfaces. - // Use sentence case without an ending period, for example "Request - // count". - // This field is optional but it is recommended to be set for any - // metrics - // associated with user-visible concepts, such as Quota. + // user interfaces. Use sentence case without an ending period, for + // example "Request count". This field is optional but it is recommended + // to be set for any metrics associated with user-visible concepts, such + // as Quota. DisplayName string `json:"displayName,omitempty"` - // Labels: The set of labels that can be used to describe a - // specific - // instance of this metric type. For example, - // the - // `appengine.googleapis.com/http/server/response_latencies` metric - // type has a label for the HTTP response code, `response_code`, so - // you can look at latencies for successful responses or just - // for responses that failed. + // Labels: The set of labels that can be used to describe a specific + // instance of this metric type. For example, the + // `appengine.googleapis.com/http/server/response_latencies` metric type + // has a label for the HTTP response code, `response_code`, so you can + // look at latencies for successful responses or just for responses that + // failed. Labels []*LabelDescriptor `json:"labels,omitempty"` // LaunchStage: Optional. The launch stage of the metric definition. @@ -3683,50 +2798,31 @@ type MetricDescriptor struct { // "PRELAUNCH" - Prelaunch features are hidden from users and are only // visible internally. // "EARLY_ACCESS" - Early Access features are limited to a closed - // group of testers. To use - // these features, you must sign up in advance and sign a Trusted - // Tester - // agreement (which includes confidentiality provisions). These features - // may - // be unstable, changed in backward-incompatible ways, and are - // not - // guaranteed to be released. + // group of testers. To use these features, you must sign up in advance + // and sign a Trusted Tester agreement (which includes confidentiality + // provisions). These features may be unstable, changed in + // backward-incompatible ways, and are not guaranteed to be released. // "ALPHA" - Alpha is a limited availability test for releases before - // they are cleared - // for widespread use. By Alpha, all significant design issues are - // resolved - // and we are in the process of verifying functionality. Alpha - // customers - // need to apply for access, agree to applicable terms, and have - // their - // projects whitelisted. Alpha releases don’t have to be feature - // complete, - // no SLAs are provided, and there are no technical support obligations, - // but - // they will be far enough along that customers can actually use them - // in - // test environments or for limited-use tests -- just like they would - // in - // normal production cases. + // they are cleared for widespread use. By Alpha, all significant design + // issues are resolved and we are in the process of verifying + // functionality. Alpha customers need to apply for access, agree to + // applicable terms, and have their projects whitelisted. Alpha releases + // don’t have to be feature complete, no SLAs are provided, and there + // are no technical support obligations, but they will be far enough + // along that customers can actually use them in test environments or + // for limited-use tests -- just like they would in normal production + // cases. // "BETA" - Beta is the point at which we are ready to open a release - // for any - // customer to use. There are no SLA or technical support obligations in - // a - // Beta release. Products will be complete from a feature perspective, - // but - // may have some open outstanding issues. Beta releases are suitable - // for - // limited production use cases. + // for any customer to use. There are no SLA or technical support + // obligations in a Beta release. Products will be complete from a + // feature perspective, but may have some open outstanding issues. Beta + // releases are suitable for limited production use cases. // "GA" - GA features are open to all developers and are considered - // stable and - // fully qualified for production use. + // stable and fully qualified for production use. // "DEPRECATED" - Deprecated features are scheduled to be shut down - // and removed. For more - // information, see the “Deprecation Policy” section of our [Terms - // of - // Service](https://cloud.google.com/terms/) - // and the [Google Cloud Platform Subject to the - // Deprecation + // and removed. For more information, see the “Deprecation Policy” + // section of our [Terms of Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation // Policy](https://cloud.google.com/terms/deprecation) documentation. LaunchStage string `json:"launchStage,omitempty"` @@ -3735,188 +2831,106 @@ type MetricDescriptor struct { Metadata *MetricDescriptorMetadata `json:"metadata,omitempty"` // MetricKind: Whether the metric records instantaneous values, changes - // to a value, etc. - // Some combinations of `metric_kind` and `value_type` might not be - // supported. + // to a value, etc. Some combinations of `metric_kind` and `value_type` + // might not be supported. // // Possible values: // "METRIC_KIND_UNSPECIFIED" - Do not use this default value. // "GAUGE" - An instantaneous measurement of a value. // "DELTA" - The change in a value during a time interval. - // "CUMULATIVE" - A value accumulated over a time interval. - // Cumulative - // measurements in a time series should have the same start time - // and increasing end times, until an event resets the cumulative - // value to zero and sets a new start time for the following - // points. + // "CUMULATIVE" - A value accumulated over a time interval. Cumulative + // measurements in a time series should have the same start time and + // increasing end times, until an event resets the cumulative value to + // zero and sets a new start time for the following points. MetricKind string `json:"metricKind,omitempty"` - // MonitoredResourceTypes: Read-only. If present, then a time - // series, which is identified partially by - // a metric type and a MonitoredResourceDescriptor, that is - // associated - // with this metric type can only be associated with one of the - // monitored - // resource types listed here. + // MonitoredResourceTypes: Read-only. If present, then a time series, + // which is identified partially by a metric type and a + // MonitoredResourceDescriptor, that is associated with this metric type + // can only be associated with one of the monitored resource types + // listed here. MonitoredResourceTypes []string `json:"monitoredResourceTypes,omitempty"` // Name: The resource name of the metric descriptor. Name string `json:"name,omitempty"` - // Type: The metric type, including its DNS name prefix. The type is - // not - // URL-encoded. All user-defined metric types have the DNS - // name - // `custom.googleapis.com` or `external.googleapis.com`. Metric types - // should - // use a natural hierarchical grouping. For example: - // - // "custom.googleapis.com/invoice/paid/amount" - // "external.googleapis.com/prometheus/up" - // "appengine.googleapis.com/http/server/response_latencies" + // Type: The metric type, including its DNS name prefix. The type is not + // URL-encoded. All user-defined metric types have the DNS name + // `custom.googleapis.com` or `external.googleapis.com`. Metric types + // should use a natural hierarchical grouping. For example: + // "custom.googleapis.com/invoice/paid/amount" + // "external.googleapis.com/prometheus/up" + // "appengine.googleapis.com/http/server/response_latencies" Type string `json:"type,omitempty"` // Unit: The units in which the metric value is reported. It is only - // applicable - // if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The - // `unit` - // defines the representation of the stored metric values. - // - // Different systems may scale the values to be more easily displayed - // (so a - // value of `0.02KBy` _might_ be displayed as `20By`, and a value - // of - // `3523KBy` _might_ be displayed as `3.5MBy`). However, if the `unit` - // is - // `KBy`, then the value of the metric is always in thousands of bytes, - // no - // matter how it may be displayed.. - // - // If you want a custom metric to record the exact number of CPU-seconds - // used - // by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` - // is - // `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses - // 12,005 - // CPU-seconds, then the value is written as `12005`. - // - // Alternatively, if you want a custom metric to record data in a - // more + // applicable if the `value_type` is `INT64`, `DOUBLE`, or + // `DISTRIBUTION`. The `unit` defines the representation of the stored + // metric values. Different systems may scale the values to be more + // easily displayed (so a value of `0.02KBy` _might_ be displayed as + // `20By`, and a value of `3523KBy` _might_ be displayed as `3.5MBy`). + // However, if the `unit` is `KBy`, then the value of the metric is + // always in thousands of bytes, no matter how it may be displayed.. If + // you want a custom metric to record the exact number of CPU-seconds + // used by a job, you can create an `INT64 CUMULATIVE` metric whose + // `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the + // job uses 12,005 CPU-seconds, then the value is written as `12005`. + // Alternatively, if you want a custom metric to record data in a more // granular way, you can create a `DOUBLE CUMULATIVE` metric whose - // `unit` is - // `ks{CPU}`, and then write the value `12.005` (which is - // `12005/1000`), - // or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). - // - // The supported units are a subset of [The Unified Code for Units - // of - // Measure](http://unitsofmeasure.org/ucum.html) standard: - // - // **Basic units (UNIT)** - // - // * `bit` bit - // * `By` byte - // * `s` second - // * `min` minute - // * `h` hour - // * `d` day - // - // **Prefixes (PREFIX)** - // - // * `k` kilo (10^3) - // * `M` mega (10^6) - // * `G` giga (10^9) - // * `T` tera (10^12) - // * `P` peta (10^15) - // * `E` exa (10^18) - // * `Z` zetta (10^21) - // * `Y` yotta (10^24) - // - // * `m` milli (10^-3) - // * `u` micro (10^-6) - // * `n` nano (10^-9) - // * `p` pico (10^-12) - // * `f` femto (10^-15) - // * `a` atto (10^-18) - // * `z` zepto (10^-21) - // * `y` yocto (10^-24) - // - // * `Ki` kibi (2^10) - // * `Mi` mebi (2^20) - // * `Gi` gibi (2^30) - // * `Ti` tebi (2^40) - // * `Pi` pebi (2^50) - // - // **Grammar** - // - // The grammar also includes these connectors: - // - // * `/` division or ratio (as an infix operator). For examples, - // `kBy/{email}` or `MiBy/10ms` (although you should almost - // never - // have `/s` in a metric `unit`; rates should always be - // computed at - // query time from the underlying cumulative or delta value). - // * `.` multiplication or composition (as an infix operator). For - // examples, `GBy.d` or `k{watt}.h`. - // - // The grammar for a unit is as follows: - // - // Expression = Component { "." Component } { "/" Component } ; - // - // Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] - // | Annotation - // | "1" - // ; - // - // Annotation = "{" NAME "}" ; - // - // Notes: - // - // * `Annotation` is just a comment if it follows a `UNIT`. If the - // annotation - // is used alone, then the unit is equivalent to `1`. For examples, - // `{request}/s == 1/s`, `By{transmitted}/s == By/s`. - // * `NAME` is a sequence of non-blank printable ASCII characters not - // containing `{` or `}`. - // * `1` represents a unitary [dimensionless - // unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, - // such - // as in `1/s`. It is typically used when none of the basic units - // are - // appropriate. For example, "new users per day" can be represented - // as - // `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 - // new - // users). Alternatively, "thousands of page views per day" would be - // represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a - // metric - // value of `5.3` would mean "5300 page views per day"). - // * `%` represents dimensionless value of 1/100, and annotates values - // giving - // a percentage (so the metric values are typically in the range of - // 0..100, - // and a metric value `3` means "3 percent"). - // * `10^2.%` indicates a metric contains a ratio, typically in the - // range - // 0..1, that will be multiplied by 100 and displayed as a - // percentage - // (so a metric value `0.03` means "3 percent"). + // `unit` is `ks{CPU}`, and then write the value `12.005` (which is + // `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is + // `12005/1024`). The supported units are a subset of [The Unified Code + // for Units of Measure](http://unitsofmeasure.org/ucum.html) standard: + // **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` + // minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** + // * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera + // (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * + // `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano + // (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) + // * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` + // mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) + // **Grammar** The grammar also includes these connectors: * `/` + // division or ratio (as an infix operator). For examples, `kBy/{email}` + // or `MiBy/10ms` (although you should almost never have `/s` in a + // metric `unit`; rates should always be computed at query time from the + // underlying cumulative or delta value). * `.` multiplication or + // composition (as an infix operator). For examples, `GBy.d` or + // `k{watt}.h`. The grammar for a unit is as follows: Expression = + // Component { "." Component } { "/" Component } ; Component = ( [ + // PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; Annotation + // = "{" NAME "}" ; Notes: * `Annotation` is just a comment if it + // follows a `UNIT`. If the annotation is used alone, then the unit is + // equivalent to `1`. For examples, `{request}/s == 1/s`, + // `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank + // printable ASCII characters not containing `{` or `}`. * `1` + // represents a unitary [dimensionless + // unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, + // such as in `1/s`. It is typically used when none of the basic units + // are appropriate. For example, "new users per day" can be represented + // as `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 new + // users). Alternatively, "thousands of page views per day" would be + // represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric + // value of `5.3` would mean "5300 page views per day"). * `%` + // represents dimensionless value of 1/100, and annotates values giving + // a percentage (so the metric values are typically in the range of + // 0..100, and a metric value `3` means "3 percent"). * `10^2.%` + // indicates a metric contains a ratio, typically in the range 0..1, + // that will be multiplied by 100 and displayed as a percentage (so a + // metric value `0.03` means "3 percent"). Unit string `json:"unit,omitempty"` // ValueType: Whether the measurement is an integer, a floating-point - // number, etc. - // Some combinations of `metric_kind` and `value_type` might not be - // supported. + // number, etc. Some combinations of `metric_kind` and `value_type` + // might not be supported. // // Possible values: // "VALUE_TYPE_UNSPECIFIED" - Do not use this default value. - // "BOOL" - The value is a boolean. - // This value type can be used only if the metric kind is `GAUGE`. + // "BOOL" - The value is a boolean. This value type can be used only + // if the metric kind is `GAUGE`. // "INT64" - The value is a signed 64-bit integer. // "DOUBLE" - The value is a double precision floating point number. - // "STRING" - The value is a text string. - // This value type can be used only if the metric kind is `GAUGE`. + // "STRING" - The value is a text string. This value type can be used + // only if the metric kind is `GAUGE`. // "DISTRIBUTION" - The value is a `Distribution`. // "MONEY" - The value is money. ValueType string `json:"valueType,omitempty"` @@ -3948,10 +2962,8 @@ func (s *MetricDescriptor) MarshalJSON() ([]byte, error) { // guide the usage of a metric. type MetricDescriptorMetadata struct { // IngestDelay: The delay of data points caused by ingestion. Data - // points older than this - // age are guaranteed to be ingested and available to be read, - // excluding - // data loss due to errors. + // points older than this age are guaranteed to be ingested and + // available to be read, excluding data loss due to errors. IngestDelay string `json:"ingestDelay,omitempty"` // LaunchStage: Deprecated. Must use the MetricDescriptor.launch_stage @@ -3964,60 +2976,38 @@ type MetricDescriptorMetadata struct { // "PRELAUNCH" - Prelaunch features are hidden from users and are only // visible internally. // "EARLY_ACCESS" - Early Access features are limited to a closed - // group of testers. To use - // these features, you must sign up in advance and sign a Trusted - // Tester - // agreement (which includes confidentiality provisions). These features - // may - // be unstable, changed in backward-incompatible ways, and are - // not - // guaranteed to be released. + // group of testers. To use these features, you must sign up in advance + // and sign a Trusted Tester agreement (which includes confidentiality + // provisions). These features may be unstable, changed in + // backward-incompatible ways, and are not guaranteed to be released. // "ALPHA" - Alpha is a limited availability test for releases before - // they are cleared - // for widespread use. By Alpha, all significant design issues are - // resolved - // and we are in the process of verifying functionality. Alpha - // customers - // need to apply for access, agree to applicable terms, and have - // their - // projects whitelisted. Alpha releases don’t have to be feature - // complete, - // no SLAs are provided, and there are no technical support obligations, - // but - // they will be far enough along that customers can actually use them - // in - // test environments or for limited-use tests -- just like they would - // in - // normal production cases. + // they are cleared for widespread use. By Alpha, all significant design + // issues are resolved and we are in the process of verifying + // functionality. Alpha customers need to apply for access, agree to + // applicable terms, and have their projects whitelisted. Alpha releases + // don’t have to be feature complete, no SLAs are provided, and there + // are no technical support obligations, but they will be far enough + // along that customers can actually use them in test environments or + // for limited-use tests -- just like they would in normal production + // cases. // "BETA" - Beta is the point at which we are ready to open a release - // for any - // customer to use. There are no SLA or technical support obligations in - // a - // Beta release. Products will be complete from a feature perspective, - // but - // may have some open outstanding issues. Beta releases are suitable - // for - // limited production use cases. + // for any customer to use. There are no SLA or technical support + // obligations in a Beta release. Products will be complete from a + // feature perspective, but may have some open outstanding issues. Beta + // releases are suitable for limited production use cases. // "GA" - GA features are open to all developers and are considered - // stable and - // fully qualified for production use. + // stable and fully qualified for production use. // "DEPRECATED" - Deprecated features are scheduled to be shut down - // and removed. For more - // information, see the “Deprecation Policy” section of our [Terms - // of - // Service](https://cloud.google.com/terms/) - // and the [Google Cloud Platform Subject to the - // Deprecation + // and removed. For more information, see the “Deprecation Policy” + // section of our [Terms of Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation // Policy](https://cloud.google.com/terms/deprecation) documentation. LaunchStage string `json:"launchStage,omitempty"` // SamplePeriod: The sampling period of metric data points. For metrics - // which are written - // periodically, consecutive data points are stored at this time - // interval, - // excluding data loss due to errors. Metrics with a higher granularity - // have - // a smaller sampling period. + // which are written periodically, consecutive data points are stored at + // this time interval, excluding data loss due to errors. Metrics with a + // higher granularity have a smaller sampling period. SamplePeriod string `json:"samplePeriod,omitempty"` // ForceSendFields is a list of field names (e.g. "IngestDelay") to @@ -4044,23 +3034,18 @@ func (s *MetricDescriptorMetadata) MarshalJSON() ([]byte, error) { } // MetricRule: Bind API methods to metrics. Binding a method to a metric -// causes that -// metric's configured quota behaviors to apply to the method call. +// causes that metric's configured quota behaviors to apply to the +// method call. type MetricRule struct { // MetricCosts: Metrics to update when the selected methods are called, - // and the associated - // cost applied to each metric. - // - // The key of the map is the metric name, and the values are the - // amount - // increased for the metric against which the quota limits are - // defined. - // The value must not be negative. + // and the associated cost applied to each metric. The key of the map is + // the metric name, and the values are the amount increased for the + // metric against which the quota limits are defined. The value must not + // be negative. MetricCosts map[string]string `json:"metricCosts,omitempty"` - // Selector: Selects the methods to which this rule applies. - // - // Refer to selector for syntax details. + // Selector: Selects the methods to which this rule applies. Refer to + // selector for syntax details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. "MetricCosts") to @@ -4087,96 +3072,44 @@ func (s *MetricRule) MarshalJSON() ([]byte, error) { } // Mixin: Declares an API Interface to be included in this interface. -// The including -// interface must redeclare all the methods from the included interface, -// but -// documentation and options are inherited as follows: -// -// - If after comment and whitespace stripping, the documentation -// string of the redeclared method is empty, it will be inherited -// from the original method. -// -// - Each annotation belonging to the service config (http, -// visibility) which is not set in the redeclared method will be -// inherited. -// -// - If an http annotation is inherited, the path pattern will be -// modified as follows. Any version prefix will be replaced by the -// version of the including interface plus the root path if -// specified. -// -// Example of a simple mixin: -// -// package google.acl.v1; -// service AccessControl { -// // Get the underlying ACL object. -// rpc GetAcl(GetAclRequest) returns (Acl) { -// option (google.api.http).get = "/v1/{resource=**}:getAcl"; -// } -// } -// -// package google.storage.v2; -// service Storage { -// // rpc GetAcl(GetAclRequest) returns (Acl); -// -// // Get a data record. -// rpc GetData(GetDataRequest) returns (Data) { -// option (google.api.http).get = "/v2/{resource=**}"; -// } -// } -// -// Example of a mixin configuration: -// -// apis: -// - name: google.storage.v2.Storage -// mixins: -// - name: google.acl.v1.AccessControl -// -// The mixin construct implies that all methods in `AccessControl` -// are -// also declared with same name and request/response types in -// `Storage`. A documentation generator or annotation processor will -// see the effective `Storage.GetAcl` method after -// inherting -// documentation and annotations as follows: -// -// service Storage { -// // Get the underlying ACL object. -// rpc GetAcl(GetAclRequest) returns (Acl) { -// option (google.api.http).get = "/v2/{resource=**}:getAcl"; -// } -// ... -// } -// -// Note how the version in the path pattern changed from `v1` to -// `v2`. -// -// If the `root` field in the mixin is specified, it should be -// a -// relative path under which inherited HTTP paths are placed. Example: -// -// apis: -// - name: google.storage.v2.Storage -// mixins: -// - name: google.acl.v1.AccessControl -// root: acls -// -// This implies the following inherited HTTP annotation: -// -// service Storage { -// // Get the underlying ACL object. -// rpc GetAcl(GetAclRequest) returns (Acl) { -// option (google.api.http).get = -// "/v2/acls/{resource=**}:getAcl"; -// } -// ... -// } +// The including interface must redeclare all the methods from the +// included interface, but documentation and options are inherited as +// follows: - If after comment and whitespace stripping, the +// documentation string of the redeclared method is empty, it will be +// inherited from the original method. - Each annotation belonging to +// the service config (http, visibility) which is not set in the +// redeclared method will be inherited. - If an http annotation is +// inherited, the path pattern will be modified as follows. Any version +// prefix will be replaced by the version of the including interface +// plus the root path if specified. Example of a simple mixin: package +// google.acl.v1; service AccessControl { // Get the underlying ACL +// object. rpc GetAcl(GetAclRequest) returns (Acl) { option +// (google.api.http).get = "/v1/{resource=**}:getAcl"; } } package +// google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) +// returns (Acl); // Get a data record. rpc GetData(GetDataRequest) +// returns (Data) { option (google.api.http).get = "/v2/{resource=**}"; +// } } Example of a mixin configuration: apis: - name: +// google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl +// The mixin construct implies that all methods in `AccessControl` are +// also declared with same name and request/response types in `Storage`. +// A documentation generator or annotation processor will see the +// effective `Storage.GetAcl` method after inheriting documentation and +// annotations as follows: service Storage { // Get the underlying ACL +// object. rpc GetAcl(GetAclRequest) returns (Acl) { option +// (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how +// the version in the path pattern changed from `v1` to `v2`. If the +// `root` field in the mixin is specified, it should be a relative path +// under which inherited HTTP paths are placed. Example: apis: - name: +// google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl +// root: acls This implies the following inherited HTTP annotation: +// service Storage { // Get the underlying ACL object. rpc +// GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = +// "/v2/acls/{resource=**}:getAcl"; } ... } type Mixin struct { // Name: The fully qualified name of the interface which is included. Name string `json:"name,omitempty"` - // Root: If non-empty specifies a path under which inherited HTTP - // paths + // Root: If non-empty specifies a path under which inherited HTTP paths // are rooted. Root string `json:"root,omitempty"` @@ -4204,39 +3137,28 @@ func (s *Mixin) MarshalJSON() ([]byte, error) { } // MonitoredResourceDescriptor: An object that describes the schema of a -// MonitoredResource object using a -// type name and a set of labels. For example, the monitored -// resource -// descriptor for Google Compute Engine VM instances has a type -// of -// "gce_instance" and specifies the use of the labels "instance_id" -// and -// "zone" to identify particular VM instances. -// -// Different APIs can support different monitored resource types. APIs -// generally -// provide a `list` method that returns the monitored resource -// descriptors used -// by the API. +// MonitoredResource object using a type name and a set of labels. For +// example, the monitored resource descriptor for Google Compute Engine +// VM instances has a type of "gce_instance" and specifies the use of +// the labels "instance_id" and "zone" to identify particular VM +// instances. Different APIs can support different monitored resource +// types. APIs generally provide a `list` method that returns the +// monitored resource descriptors used by the API. type MonitoredResourceDescriptor struct { // Description: Optional. A detailed description of the monitored - // resource type that might - // be used in documentation. + // resource type that might be used in documentation. Description string `json:"description,omitempty"` // DisplayName: Optional. A concise name for the monitored resource type - // that might be - // displayed in user interfaces. It should be a Title Cased Noun - // Phrase, - // without any article or other determiners. For example, - // "Google Cloud SQL Database". + // that might be displayed in user interfaces. It should be a Title + // Cased Noun Phrase, without any article or other determiners. For + // example, "Google Cloud SQL Database". DisplayName string `json:"displayName,omitempty"` // Labels: Required. A set of labels used to describe instances of this - // monitored - // resource type. For example, an individual Google Cloud SQL database - // is - // identified by values for the labels "database_id" and "zone". + // monitored resource type. For example, an individual Google Cloud SQL + // database is identified by values for the labels "database_id" and + // "zone". Labels []*LabelDescriptor `json:"labels,omitempty"` // LaunchStage: Optional. The launch stage of the monitored resource @@ -4249,70 +3171,45 @@ type MonitoredResourceDescriptor struct { // "PRELAUNCH" - Prelaunch features are hidden from users and are only // visible internally. // "EARLY_ACCESS" - Early Access features are limited to a closed - // group of testers. To use - // these features, you must sign up in advance and sign a Trusted - // Tester - // agreement (which includes confidentiality provisions). These features - // may - // be unstable, changed in backward-incompatible ways, and are - // not - // guaranteed to be released. + // group of testers. To use these features, you must sign up in advance + // and sign a Trusted Tester agreement (which includes confidentiality + // provisions). These features may be unstable, changed in + // backward-incompatible ways, and are not guaranteed to be released. // "ALPHA" - Alpha is a limited availability test for releases before - // they are cleared - // for widespread use. By Alpha, all significant design issues are - // resolved - // and we are in the process of verifying functionality. Alpha - // customers - // need to apply for access, agree to applicable terms, and have - // their - // projects whitelisted. Alpha releases don’t have to be feature - // complete, - // no SLAs are provided, and there are no technical support obligations, - // but - // they will be far enough along that customers can actually use them - // in - // test environments or for limited-use tests -- just like they would - // in - // normal production cases. + // they are cleared for widespread use. By Alpha, all significant design + // issues are resolved and we are in the process of verifying + // functionality. Alpha customers need to apply for access, agree to + // applicable terms, and have their projects whitelisted. Alpha releases + // don’t have to be feature complete, no SLAs are provided, and there + // are no technical support obligations, but they will be far enough + // along that customers can actually use them in test environments or + // for limited-use tests -- just like they would in normal production + // cases. // "BETA" - Beta is the point at which we are ready to open a release - // for any - // customer to use. There are no SLA or technical support obligations in - // a - // Beta release. Products will be complete from a feature perspective, - // but - // may have some open outstanding issues. Beta releases are suitable - // for - // limited production use cases. + // for any customer to use. There are no SLA or technical support + // obligations in a Beta release. Products will be complete from a + // feature perspective, but may have some open outstanding issues. Beta + // releases are suitable for limited production use cases. // "GA" - GA features are open to all developers and are considered - // stable and - // fully qualified for production use. + // stable and fully qualified for production use. // "DEPRECATED" - Deprecated features are scheduled to be shut down - // and removed. For more - // information, see the “Deprecation Policy” section of our [Terms - // of - // Service](https://cloud.google.com/terms/) - // and the [Google Cloud Platform Subject to the - // Deprecation + // and removed. For more information, see the “Deprecation Policy” + // section of our [Terms of Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation // Policy](https://cloud.google.com/terms/deprecation) documentation. LaunchStage string `json:"launchStage,omitempty"` // Name: Optional. The resource name of the monitored resource // descriptor: - // "projects/{project_id}/monitoredResourceDescriptors/{type - // }" where - // {type} is the value of the `type` field in this object - // and - // {project_id} is a project ID that provides API-specific context - // for - // accessing the type. APIs that do not use project information can use - // the - // resource name format "monitoredResourceDescriptors/{type}". + // "projects/{project_id}/monitoredResourceDescriptors/{type}" where + // {type} is the value of the `type` field in this object and + // {project_id} is a project ID that provides API-specific context for + // accessing the type. APIs that do not use project information can use + // the resource name format "monitoredResourceDescriptors/{type}". Name string `json:"name,omitempty"` - // Type: Required. The monitored resource type. For example, the - // type + // Type: Required. The monitored resource type. For example, the type // "cloudsql_database" represents databases in Google Cloud SQL. - // The maximum length of this value is 256 characters. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -4338,74 +3235,49 @@ func (s *MonitoredResourceDescriptor) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Monitoring: Monitoring configuration of the service. -// -// The example below shows how to configure monitored resources and -// metrics -// for monitoring. In the example, a monitored resource and two metrics -// are +// Monitoring: Monitoring configuration of the service. The example +// below shows how to configure monitored resources and metrics for +// monitoring. In the example, a monitored resource and two metrics are // defined. The `library.googleapis.com/book/returned_count` metric is -// sent -// to both producer and consumer projects, whereas -// the -// `library.googleapis.com/book/overdue_count` metric is only sent to -// the -// consumer project. -// -// monitored_resources: -// - type: library.googleapis.com/branch -// labels: -// - key: /city -// description: The city where the library branch is located -// in. -// - key: /name -// description: The name of the branch. -// metrics: -// - name: library.googleapis.com/book/returned_count -// metric_kind: DELTA -// value_type: INT64 -// labels: -// - key: /customer_id -// - name: library.googleapis.com/book/overdue_count -// metric_kind: GAUGE -// value_type: INT64 -// labels: -// - key: /customer_id -// monitoring: -// producer_destinations: -// - monitored_resource: library.googleapis.com/branch -// metrics: -// - library.googleapis.com/book/returned_count -// consumer_destinations: -// - monitored_resource: library.googleapis.com/branch -// metrics: -// - library.googleapis.com/book/returned_count -// - library.googleapis.com/book/overdue_count +// sent to both producer and consumer projects, whereas the +// `library.googleapis.com/book/num_overdue` metric is only sent to the +// consumer project. monitored_resources: - type: +// library.googleapis.com/Branch display_name: "Library Branch" +// description: "A branch of a library." launch_stage: GA labels: - key: +// resource_container description: "The Cloud container (ie. project id) +// for the Branch." - key: location description: "The location of the +// library branch." - key: branch_id description: "The id of the +// branch." metrics: - name: library.googleapis.com/book/returned_count +// display_name: "Books Returned" description: "The count of books that +// have been returned." launch_stage: GA metric_kind: DELTA value_type: +// INT64 unit: "1" labels: - key: customer_id description: "The id of +// the customer." - name: library.googleapis.com/book/num_overdue +// display_name: "Books Overdue" description: "The current number of +// overdue books." launch_stage: GA metric_kind: GAUGE value_type: INT64 +// unit: "1" labels: - key: customer_id description: "The id of the +// customer." monitoring: producer_destinations: - monitored_resource: +// library.googleapis.com/Branch metrics: - +// library.googleapis.com/book/returned_count consumer_destinations: - +// monitored_resource: library.googleapis.com/Branch metrics: - +// library.googleapis.com/book/returned_count - +// library.googleapis.com/book/num_overdue type Monitoring struct { // ConsumerDestinations: Monitoring configurations for sending metrics - // to the consumer project. - // There can be multiple consumer destinations. A monitored resouce type - // may - // appear in multiple monitoring destinations if different aggregations - // are - // needed for different sets of metrics associated with that - // monitored - // resource type. A monitored resource and metric pair may only be used - // once - // in the Monitoring configuration. + // to the consumer project. There can be multiple consumer destinations. + // A monitored resource type may appear in multiple monitoring + // destinations if different aggregations are needed for different sets + // of metrics associated with that monitored resource type. A monitored + // resource and metric pair may only be used once in the Monitoring + // configuration. ConsumerDestinations []*MonitoringDestination `json:"consumerDestinations,omitempty"` // ProducerDestinations: Monitoring configurations for sending metrics - // to the producer project. - // There can be multiple producer destinations. A monitored resouce type - // may - // appear in multiple monitoring destinations if different aggregations - // are - // needed for different sets of metrics associated with that - // monitored - // resource type. A monitored resource and metric pair may only be used - // once - // in the Monitoring configuration. + // to the producer project. There can be multiple producer destinations. + // A monitored resource type may appear in multiple monitoring + // destinations if different aggregations are needed for different sets + // of metrics associated with that monitored resource type. A monitored + // resource and metric pair may only be used once in the Monitoring + // configuration. ProducerDestinations []*MonitoringDestination `json:"producerDestinations,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -4434,17 +3306,14 @@ func (s *Monitoring) MarshalJSON() ([]byte, error) { } // MonitoringDestination: Configuration of a specific monitoring -// destination (the producer project -// or the consumer project). +// destination (the producer project or the consumer project). type MonitoringDestination struct { // Metrics: Types of the metrics to report to this monitoring - // destination. - // Each type must be defined in Service.metrics section. + // destination. Each type must be defined in Service.metrics section. Metrics []string `json:"metrics,omitempty"` // MonitoredResource: The monitored resource type. The type must be - // defined in - // Service.monitored_resources section. + // defined in Service.monitored_resources section. MonitoredResource string `json:"monitoredResource,omitempty"` // ForceSendFields is a list of field names (e.g. "Metrics") to @@ -4471,43 +3340,26 @@ func (s *MonitoringDestination) MarshalJSON() ([]byte, error) { } // OAuthRequirements: OAuth scopes are a way to define data and -// permissions on data. For example, -// there are scopes defined for "Read-only access to Google Calendar" -// and -// "Access to Cloud Platform". Users can consent to a scope for an -// application, -// giving it permission to access that data on their behalf. -// -// OAuth scope specifications should be fairly coarse grained; a user -// will need -// to see and understand the text description of what your scope -// means. -// -// In most cases: use one or at most two OAuth scopes for an entire -// family of +// permissions on data. For example, there are scopes defined for +// "Read-only access to Google Calendar" and "Access to Cloud Platform". +// Users can consent to a scope for an application, giving it permission +// to access that data on their behalf. OAuth scope specifications +// should be fairly coarse grained; a user will need to see and +// understand the text description of what your scope means. In most +// cases: use one or at most two OAuth scopes for an entire family of // products. If your product has multiple APIs, you should probably be -// sharing -// the OAuth scope across all of those APIs. -// -// When you need finer grained OAuth consent screens: talk with your -// product -// management about how developers will use them in practice. -// -// Please note that even though each of the canonical scopes is enough -// for a -// request to be accepted and passed to the backend, a request can still -// fail -// due to the backend requiring additional scopes or permissions. +// sharing the OAuth scope across all of those APIs. When you need finer +// grained OAuth consent screens: talk with your product management +// about how developers will use them in practice. Please note that even +// though each of the canonical scopes is enough for a request to be +// accepted and passed to the backend, a request can still fail due to +// the backend requiring additional scopes or permissions. type OAuthRequirements struct { // CanonicalScopes: The list of publicly documented OAuth scopes that - // are allowed access. An - // OAuth token containing any of these scopes will be - // accepted. - // - // Example: - // - // canonical_scopes: https://www.googleapis.com/auth/calendar, - // https://www.googleapis.com/auth/calendar.read + // are allowed access. An OAuth token containing any of these scopes + // will be accepted. Example: canonical_scopes: + // https://www.googleapis.com/auth/calendar, + // https://www.googleapis.com/auth/calendar.read CanonicalScopes string `json:"canonicalScopes,omitempty"` // ForceSendFields is a list of field names (e.g. "CanonicalScopes") to @@ -4535,52 +3387,38 @@ func (s *OAuthRequirements) MarshalJSON() ([]byte, error) { } // Operation: This resource represents a long-running operation that is -// the result of a -// network API call. +// the result of a network API call. type Operation struct { // Done: If the value is `false`, it means the operation is still in - // progress. - // If `true`, the operation is completed, and either `error` or - // `response` is - // available. + // progress. If `true`, the operation is completed, and either `error` + // or `response` is available. Done bool `json:"done,omitempty"` // Error: The error result of the operation in case of failure or // cancellation. Error *Status `json:"error,omitempty"` - // Metadata: Service-specific metadata associated with the operation. - // It typically - // contains progress information and common metadata such as create - // time. - // Some services might not provide such metadata. Any method that - // returns a - // long-running operation should document the metadata type, if any. + // Metadata: Service-specific metadata associated with the operation. It + // typically contains progress information and common metadata such as + // create time. Some services might not provide such metadata. Any + // method that returns a long-running operation should document the + // metadata type, if any. Metadata googleapi.RawMessage `json:"metadata,omitempty"` // Name: The server-assigned name, which is only unique within the same - // service that - // originally returns it. If you use the default HTTP mapping, - // the - // `name` should be a resource name ending with + // service that originally returns it. If you use the default HTTP + // mapping, the `name` should be a resource name ending with // `operations/{unique_id}`. Name string `json:"name,omitempty"` - // Response: The normal response of the operation in case of success. - // If the original - // method returns no data on success, such as `Delete`, the response - // is - // `google.protobuf.Empty`. If the original method is - // standard - // `Get`/`Create`/`Update`, the response should be the resource. For - // other - // methods, the response should have the type `XxxResponse`, where - // `Xxx` - // is the original method name. For example, if the original method - // name - // is `TakeSnapshot()`, the inferred response type - // is - // `TakeSnapshotResponse`. + // Response: The normal response of the operation in case of success. If + // the original method returns no data on success, such as `Delete`, the + // response is `google.protobuf.Empty`. If the original method is + // standard `Get`/`Create`/`Update`, the response should be the + // resource. For other methods, the response should have the type + // `XxxResponse`, where `Xxx` is the original method name. For example, + // if the original method name is `TakeSnapshot()`, the inferred + // response type is `TakeSnapshotResponse`. Response googleapi.RawMessage `json:"response,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -4618,8 +3456,7 @@ type OperationMetadata struct { ProgressPercentage int64 `json:"progressPercentage,omitempty"` // ResourceNames: The full name of the resources that this operation is - // directly - // associated with. + // directly associated with. ResourceNames []string `json:"resourceNames,omitempty"` // StartTime: The start time of the operation. @@ -4654,25 +3491,19 @@ func (s *OperationMetadata) MarshalJSON() ([]byte, error) { } // Option: A protocol buffer option, which can be attached to a message, -// field, -// enumeration, etc. +// field, enumeration, etc. type Option struct { // Name: The option's name. For protobuf built-in options (options - // defined in - // descriptor.proto), this is the short name. For example, - // "map_entry". - // For custom options, it should be the fully-qualified name. For - // example, - // "google.api.http". + // defined in descriptor.proto), this is the short name. For example, + // "map_entry". For custom options, it should be the fully-qualified + // name. For example, "google.api.http". Name string `json:"name,omitempty"` // Value: The option's value packed in an Any message. If the value is a - // primitive, - // the corresponding wrapper type defined in - // google/protobuf/wrappers.proto - // should be used. If the value is an enum, it should be stored as an - // int32 - // value using the google.protobuf.Int32Value type. + // primitive, the corresponding wrapper type defined in + // google/protobuf/wrappers.proto should be used. If the value is an + // enum, it should be stored as an int32 value using the + // google.protobuf.Int32Value type. Value googleapi.RawMessage `json:"value,omitempty"` // ForceSendFields is a list of field names (e.g. "Name") to @@ -4699,39 +3530,24 @@ func (s *Option) MarshalJSON() ([]byte, error) { } // Page: Represents a documentation page. A page can contain subpages to -// represent -// nested documentation set structure. +// represent nested documentation set structure. type Page struct { - // Content: The Markdown content of the page. You can use (== - // include {path} - // ==) to include content from a Markdown file. + // Content: The Markdown content of the page. You can use (== include + // {path} ==) to include content from a Markdown file. Content string `json:"content,omitempty"` // Name: The name of the page. It will be used as an identity of the - // page to - // generate URI of the page, text of the link to this page in - // navigation, - // etc. The full page name (start from the root page name to this - // page - // concatenated with `.`) can be used as reference to the page in - // your - // documentation. For example: - //
pages:
-	// - name: Tutorial
-	//   content: (== include tutorial.md ==)
-	//   subpages:
-	//   - name: Java
-	//     content: (== include tutorial_java.md
-	// ==)
-	// 
- // You can reference `Java` page using Markdown reference link - // syntax: - // `Java`. + // page to generate URI of the page, text of the link to this page in + // navigation, etc. The full page name (start from the root page name to + // this page concatenated with `.`) can be used as reference to the page + // in your documentation. For example: pages: - name: Tutorial content: + // (== include tutorial.md ==) subpages: - name: Java content: (== + // include tutorial_java.md ==) You can reference `Java` page using + // Markdown reference link syntax: `Java`. Name string `json:"name,omitempty"` // Subpages: Subpages of this page. The order of subpages specified here - // will be - // honored in the generated docset. + // will be honored in the generated docset. Subpages []*Page `json:"subpages,omitempty"` // ForceSendFields is a list of field names (e.g. "Content") to @@ -4758,154 +3574,77 @@ func (s *Page) MarshalJSON() ([]byte, error) { } // Policy: An Identity and Access Management (IAM) policy, which -// specifies access -// controls for Google Cloud resources. -// -// -// A `Policy` is a collection of `bindings`. A `binding` binds one or -// more -// `members` to a single `role`. Members can be user accounts, service -// accounts, +// specifies access controls for Google Cloud resources. A `Policy` is a +// collection of `bindings`. A `binding` binds one or more `members` to +// a single `role`. Members can be user accounts, service accounts, // Google groups, and domains (such as G Suite). A `role` is a named -// list of -// permissions; each `role` can be an IAM predefined role or a -// user-created -// custom role. -// -// For some types of Google Cloud resources, a `binding` can also -// specify a -// `condition`, which is a logical expression that allows access to a -// resource -// only if the expression evaluates to `true`. A condition can add -// constraints -// based on attributes of the request, the resource, or both. To learn -// which -// resources support conditions in their IAM policies, see the -// [IAM +// list of permissions; each `role` can be an IAM predefined role or a +// user-created custom role. For some types of Google Cloud resources, a +// `binding` can also specify a `condition`, which is a logical +// expression that allows access to a resource only if the expression +// evaluates to `true`. A condition can add constraints based on +// attributes of the request, the resource, or both. To learn which +// resources support conditions in their IAM policies, see the [IAM // documentation](https://cloud.google.com/iam/help/conditions/resource-p -// olicies). -// -// **JSON example:** -// -// { -// "bindings": [ -// { -// "role": "roles/resourcemanager.organizationAdmin", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" -// ] -// }, -// { -// "role": "roles/resourcemanager.organizationViewer", -// "members": [ -// "user:eve@example.com" -// ], -// "condition": { -// "title": "expirable access", -// "description": "Does not grant access after Sep 2020", -// "expression": "request.time < -// timestamp('2020-10-01T00:00:00.000Z')", -// } -// } -// ], -// "etag": "BwWWja0YfJA=", -// "version": 3 -// } -// -// **YAML example:** -// -// bindings: -// - members: -// - user:mike@example.com -// - group:admins@example.com -// - domain:google.com -// - serviceAccount:my-project-id@appspot.gserviceaccount.com -// role: roles/resourcemanager.organizationAdmin -// - members: -// - user:eve@example.com -// role: roles/resourcemanager.organizationViewer -// condition: -// title: expirable access -// description: Does not grant access after Sep 2020 -// expression: request.time < -// timestamp('2020-10-01T00:00:00.000Z') -// - etag: BwWWja0YfJA= -// - version: 3 -// -// For a description of IAM and its features, see the -// [IAM documentation](https://cloud.google.com/iam/docs/). +// olicies). **JSON example:** { "bindings": [ { "role": +// "roles/resourcemanager.organizationAdmin", "members": [ +// "user:mike@example.com", "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { +// "role": "roles/resourcemanager.organizationViewer", "members": [ +// "user:eve@example.com" ], "condition": { "title": "expirable access", +// "description": "Does not grant access after Sep 2020", "expression": +// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], +// "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - +// members: - user:mike@example.com - group:admins@example.com - +// domain:google.com - +// serviceAccount:my-project-id@appspot.gserviceaccount.com role: +// roles/resourcemanager.organizationAdmin - members: - +// user:eve@example.com role: roles/resourcemanager.organizationViewer +// condition: title: expirable access description: Does not grant access +// after Sep 2020 expression: request.time < +// timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: +// 3 For a description of IAM and its features, see the [IAM +// documentation](https://cloud.google.com/iam/docs/). type Policy struct { // AuditConfigs: Specifies cloud audit logging configuration for this // policy. AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` // Bindings: Associates a list of `members` to a `role`. Optionally, may - // specify a - // `condition` that determines how and when the `bindings` are applied. - // Each - // of the `bindings` must contain at least one member. + // specify a `condition` that determines how and when the `bindings` are + // applied. Each of the `bindings` must contain at least one member. Bindings []*Binding `json:"bindings,omitempty"` // Etag: `etag` is used for optimistic concurrency control as a way to - // help - // prevent simultaneous updates of a policy from overwriting each - // other. - // It is strongly suggested that systems make use of the `etag` in - // the - // read-modify-write cycle to perform policy updates in order to avoid - // race - // conditions: An `etag` is returned in the response to `getIamPolicy`, - // and - // systems are expected to put that etag in the request to - // `setIamPolicy` to - // ensure that their change will be applied to the same version of the - // policy. - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of + // help prevent simultaneous updates of a policy from overwriting each + // other. It is strongly suggested that systems make use of the `etag` + // in the read-modify-write cycle to perform policy updates in order to + // avoid race conditions: An `etag` is returned in the response to + // `getIamPolicy`, and systems are expected to put that etag in the + // request to `setIamPolicy` to ensure that their change will be applied + // to the same version of the policy. **Important:** If you use IAM + // Conditions, you must include the `etag` field whenever you call + // `setIamPolicy`. If you omit this field, then IAM allows you to + // overwrite a version `3` policy with a version `1` policy, and all of // the conditions in the version `3` policy are lost. Etag string `json:"etag,omitempty"` - // Version: Specifies the format of the policy. - // - // Valid values are `0`, `1`, and `3`. Requests that specify an invalid - // value - // are rejected. - // + // Version: Specifies the format of the policy. Valid values are `0`, + // `1`, and `3`. Requests that specify an invalid value are rejected. // Any operation that affects conditional role bindings must specify - // version - // `3`. This requirement applies to the following operations: - // - // * Getting a policy that includes a conditional role binding - // * Adding a conditional role binding to a policy - // * Changing a conditional role binding in a policy - // * Removing any role binding, with or without a condition, from a - // policy - // that includes conditions - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of - // the conditions in the version `3` policy are lost. - // - // If a policy does not include any conditions, operations on that - // policy may - // specify any valid version or leave the field unset. - // - // To learn which resources support conditions in their IAM policies, - // see the - // [IAM + // version `3`. This requirement applies to the following operations: * + // Getting a policy that includes a conditional role binding * Adding a + // conditional role binding to a policy * Changing a conditional role + // binding in a policy * Removing any role binding, with or without a + // condition, from a policy that includes conditions **Important:** If + // you use IAM Conditions, you must include the `etag` field whenever + // you call `setIamPolicy`. If you omit this field, then IAM allows you + // to overwrite a version `3` policy with a version `1` policy, and all + // of the conditions in the version `3` policy are lost. If a policy + // does not include any conditions, operations on that policy may + // specify any valid version or leave the field unset. To learn which + // resources support conditions in their IAM policies, see the [IAM // documentation](https://cloud.google.com/iam/help/conditions/resource-p // olicies). Version int64 `json:"version,omitempty"` @@ -4938,67 +3677,33 @@ func (s *Policy) MarshalJSON() ([]byte, error) { } // Quota: Quota configuration helps to achieve fairness and budgeting in -// service -// usage. -// -// The metric based quota configuration works this way: -// - The service configuration defines a set of metrics. -// - For API calls, the quota.metric_rules maps methods to metrics with -// corresponding costs. -// - The quota.limits defines limits on the metrics, which will be used -// for -// quota checks at runtime. -// -// An example quota configuration in yaml format: -// -// quota: -// limits: -// -// - name: apiWriteQpsPerProject -// metric: library.googleapis.com/write_calls -// unit: "1/min/{project}" # rate limit for consumer projects -// values: -// STANDARD: 10000 -// -// -// # The metric rules bind all methods to the read_calls metric, -// # except for the UpdateBook and DeleteBook methods. These two -// methods -// # are mapped to the write_calls metric, with the UpdateBook -// method -// # consuming at twice rate as the DeleteBook method. -// metric_rules: -// - selector: "*" -// metric_costs: -// library.googleapis.com/read_calls: 1 -// - selector: google.example.library.v1.LibraryService.UpdateBook -// metric_costs: -// library.googleapis.com/write_calls: 2 -// - selector: google.example.library.v1.LibraryService.DeleteBook -// metric_costs: -// library.googleapis.com/write_calls: 1 -// -// Corresponding Metric definition: -// -// metrics: -// - name: library.googleapis.com/read_calls -// display_name: Read requests -// metric_kind: DELTA -// value_type: INT64 -// -// - name: library.googleapis.com/write_calls -// display_name: Write requests -// metric_kind: DELTA -// value_type: INT64 -// -// +// service usage. The metric based quota configuration works this way: - +// The service configuration defines a set of metrics. - For API calls, +// the quota.metric_rules maps methods to metrics with corresponding +// costs. - The quota.limits defines limits on the metrics, which will +// be used for quota checks at runtime. An example quota configuration +// in yaml format: quota: limits: - name: apiWriteQpsPerProject metric: +// library.googleapis.com/write_calls unit: "1/min/{project}" # rate +// limit for consumer projects values: STANDARD: 10000 # The metric +// rules bind all methods to the read_calls metric, # except for the +// UpdateBook and DeleteBook methods. These two methods # are mapped to +// the write_calls metric, with the UpdateBook method # consuming at +// twice rate as the DeleteBook method. metric_rules: - selector: "*" +// metric_costs: library.googleapis.com/read_calls: 1 - selector: +// google.example.library.v1.LibraryService.UpdateBook metric_costs: +// library.googleapis.com/write_calls: 2 - selector: +// google.example.library.v1.LibraryService.DeleteBook metric_costs: +// library.googleapis.com/write_calls: 1 Corresponding Metric +// definition: metrics: - name: library.googleapis.com/read_calls +// display_name: Read requests metric_kind: DELTA value_type: INT64 - +// name: library.googleapis.com/write_calls display_name: Write requests +// metric_kind: DELTA value_type: INT64 type Quota struct { // Limits: List of `QuotaLimit` definitions for the service. Limits []*QuotaLimit `json:"limits,omitempty"` // MetricRules: List of `MetricRule` definitions, each one mapping a - // selected method to one - // or more metrics. + // selected method to one or more metrics. MetricRules []*MetricRule `json:"metricRules,omitempty"` // ForceSendFields is a list of field names (e.g. "Limits") to @@ -5025,116 +3730,75 @@ func (s *Quota) MarshalJSON() ([]byte, error) { } // QuotaLimit: `QuotaLimit` defines a specific limit that applies over a -// specified duration -// for a limit type. There can be at most one limit for a duration and -// limit -// type combination defined within a `QuotaGroup`. +// specified duration for a limit type. There can be at most one limit +// for a duration and limit type combination defined within a +// `QuotaGroup`. type QuotaLimit struct { // DefaultLimit: Default number of tokens that can be consumed during - // the specified - // duration. This is the number of tokens assigned when a - // client - // application developer activates the service for his/her - // project. - // - // Specifying a value of 0 will block all requests. This can be used if - // you - // are provisioning quota to selected consumers and blocking - // others. - // Similarly, a value of -1 will indicate an unlimited quota. No - // other - // negative values are allowed. - // - // Used by group-based quotas only. + // the specified duration. This is the number of tokens assigned when a + // client application developer activates the service for his/her + // project. Specifying a value of 0 will block all requests. This can be + // used if you are provisioning quota to selected consumers and blocking + // others. Similarly, a value of -1 will indicate an unlimited quota. No + // other negative values are allowed. Used by group-based quotas only. DefaultLimit int64 `json:"defaultLimit,omitempty,string"` // Description: Optional. User-visible, extended description for this - // quota limit. - // Should be used only when more context is needed to understand this - // limit - // than provided by the limit's display name (see: `display_name`). + // quota limit. Should be used only when more context is needed to + // understand this limit than provided by the limit's display name (see: + // `display_name`). Description string `json:"description,omitempty"` - // DisplayName: User-visible display name for this limit. - // Optional. If not set, the UI will provide a default display name - // based on - // the quota configuration. This field can be used to override the - // default + // DisplayName: User-visible display name for this limit. Optional. If + // not set, the UI will provide a default display name based on the + // quota configuration. This field can be used to override the default // display name generated from the configuration. DisplayName string `json:"displayName,omitempty"` // Duration: Duration of this limit in textual notation. Must be "100s" - // or "1d". - // - // Used by group-based quotas only. + // or "1d". Used by group-based quotas only. Duration string `json:"duration,omitempty"` // FreeTier: Free tier value displayed in the Developers Console for - // this limit. - // The free tier is the number of tokens that will be subtracted from - // the - // billed amount when billing is enabled. - // This field can only be set on a limit with duration "1d", in a - // billable - // group; it is invalid on any other limit. If this field is not set, - // it + // this limit. The free tier is the number of tokens that will be + // subtracted from the billed amount when billing is enabled. This field + // can only be set on a limit with duration "1d", in a billable group; + // it is invalid on any other limit. If this field is not set, it // defaults to 0, indicating that there is no free tier for this - // service. - // - // Used by group-based quotas only. + // service. Used by group-based quotas only. FreeTier int64 `json:"freeTier,omitempty,string"` // MaxLimit: Maximum number of tokens that can be consumed during the - // specified - // duration. Client application developers can override the default - // limit up - // to this maximum. If specified, this value cannot be set to a value - // less - // than the default limit. If not specified, it is set to the default - // limit. - // - // To allow clients to apply overrides with no upper bound, set this to - // -1, - // indicating unlimited maximum quota. - // - // Used by group-based quotas only. + // specified duration. Client application developers can override the + // default limit up to this maximum. If specified, this value cannot be + // set to a value less than the default limit. If not specified, it is + // set to the default limit. To allow clients to apply overrides with no + // upper bound, set this to -1, indicating unlimited maximum quota. Used + // by group-based quotas only. MaxLimit int64 `json:"maxLimit,omitempty,string"` // Metric: The name of the metric this quota limit applies to. The quota - // limits with - // the same metric will be checked together during runtime. The metric - // must be - // defined within the service config. + // limits with the same metric will be checked together during runtime. + // The metric must be defined within the service config. Metric string `json:"metric,omitempty"` - // Name: Name of the quota limit. - // - // The name must be provided, and it must be unique within the service. - // The - // name can only include alphanumeric characters as well as '-'. - // - // The maximum length of the limit name is 64 characters. + // Name: Name of the quota limit. The name must be provided, and it must + // be unique within the service. The name can only include alphanumeric + // characters as well as '-'. The maximum length of the limit name is 64 + // characters. Name string `json:"name,omitempty"` - // Unit: Specify the unit of the quota limit. It uses the same syntax - // as - // Metric.unit. The supported unit kinds are determined by the - // quota - // backend system. - // - // Here are some examples: - // * "1/min/{project}" for quota per minute per project. - // - // Note: the order of unit components is insignificant. - // The "1" at the beginning is required to follow the metric unit - // syntax. + // Unit: Specify the unit of the quota limit. It uses the same syntax as + // Metric.unit. The supported unit kinds are determined by the quota + // backend system. Here are some examples: * "1/min/{project}" for quota + // per minute per project. Note: the order of unit components is + // insignificant. The "1" at the beginning is required to follow the + // metric unit syntax. Unit string `json:"unit,omitempty"` // Values: Tiered limit values. You must specify this as a key:value - // pair, with an - // integer value that is the maximum number of requests allowed for - // the - // specified unit. Currently only STANDARD is supported. + // pair, with an integer value that is the maximum number of requests + // allowed for the specified unit. Currently only STANDARD is supported. Values map[string]string `json:"values,omitempty"` // ForceSendFields is a list of field names (e.g. "DefaultLimit") to @@ -5160,68 +3824,100 @@ func (s *QuotaLimit) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ResourceReference: Defines a proto annotation that describes a string +// field that refers to an API resource. +type ResourceReference struct { + // ChildType: The resource type of a child collection that the annotated + // field references. This is useful for annotating the `parent` field + // that doesn't have a fixed resource type. Example: message + // ListLogEntriesRequest { string parent = 1 + // [(google.api.resource_reference) = { child_type: + // "logging.googleapis.com/LogEntry" }; } + ChildType string `json:"childType,omitempty"` + + // Type: The resource type that the annotated field references. Example: + // message Subscription { string topic = 2 + // [(google.api.resource_reference) = { type: + // "pubsub.googleapis.com/Topic" }]; } Occasionally, a field may + // reference an arbitrary resource. In this case, APIs use the special + // value * in their resource reference. Example: message + // GetIamPolicyRequest { string resource = 2 + // [(google.api.resource_reference) = { type: "*" }]; } + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ChildType") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ChildType") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ResourceReference) MarshalJSON() ([]byte, error) { + type NoMethod ResourceReference + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Rollout: A rollout resource that defines how service configuration -// versions are pushed -// to control plane systems. Typically, you create a new version of -// the -// service config, and then create a Rollout to push the service config. +// versions are pushed to control plane systems. Typically, you create a +// new version of the service config, and then create a Rollout to push +// the service config. type Rollout struct { // CreateTime: Creation time of the rollout. Readonly. CreateTime string `json:"createTime,omitempty"` // CreatedBy: This field is deprecated and will be deleted. Please - // remove usage of - // this field. + // remove usage of this field. CreatedBy string `json:"createdBy,omitempty"` // DeleteServiceStrategy: The strategy associated with a rollout to - // delete a `ManagedService`. - // Readonly. + // delete a `ManagedService`. Readonly. DeleteServiceStrategy *DeleteServiceStrategy `json:"deleteServiceStrategy,omitempty"` // RolloutId: Optional. Unique identifier of this Rollout. Must be no - // longer than 63 characters - // and only lower case letters, digits, '.', '_' and '-' are - // allowed. - // - // If not specified by client, the server will generate one. The - // generated id - // will have the form of , where "date" is the - // create - // date in ISO 8601 format. "revision number" is a monotonically - // increasing - // positive number that is reset every day for each service. - // An example of the generated rollout_id is '2016-02-16r1' + // longer than 63 characters and only lower case letters, digits, '.', + // '_' and '-' are allowed. If not specified by client, the server will + // generate one. The generated id will have the form of , where "date" + // is the create date in ISO 8601 format. "revision number" is a + // monotonically increasing positive number that is reset every day for + // each service. An example of the generated rollout_id is + // '2016-02-16r1' RolloutId string `json:"rolloutId,omitempty"` // ServiceName: The name of the service associated with this Rollout. ServiceName string `json:"serviceName,omitempty"` // Status: The status of this rollout. Readonly. In case of a failed - // rollout, - // the system will automatically rollback to the current - // Rollout - // version. Readonly. + // rollout, the system will automatically rollback to the current + // Rollout version. Readonly. // // Possible values: // "ROLLOUT_STATUS_UNSPECIFIED" - No status specified. // "IN_PROGRESS" - The Rollout is in progress. // "SUCCESS" - The Rollout has completed successfully. // "CANCELLED" - The Rollout has been cancelled. This can happen if - // you have overlapping - // Rollout pushes, and the previous ones will be cancelled. + // you have overlapping Rollout pushes, and the previous ones will be + // cancelled. // "FAILED" - The Rollout has failed and the rollback attempt has // failed too. // "PENDING" - The Rollout has not started yet and is pending for // execution. // "FAILED_ROLLED_BACK" - The Rollout has failed and rolled back to - // the previous successful - // Rollout. + // the previous successful Rollout. Status string `json:"status,omitempty"` // TrafficPercentStrategy: Google Service Control selects service - // configurations based on - // traffic percentage. + // configurations based on traffic percentage. TrafficPercentStrategy *TrafficPercentStrategy `json:"trafficPercentStrategy,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -5252,43 +3948,24 @@ func (s *Rollout) MarshalJSON() ([]byte, error) { } // Service: `Service` is the root object of Google service configuration -// schema. It -// describes basic information about a service, such as the name and -// the -// title, and delegates other aspects to sub-sections. Each sub-section -// is -// either a proto message or a repeated proto message that configures -// a -// specific aspect, such as auth. See each proto message definition for -// details. -// -// Example: -// -// type: google.api.Service -// config_version: 3 -// name: calendar.googleapis.com -// title: Google Calendar API -// apis: -// - name: google.calendar.v3.Calendar -// authentication: -// providers: -// - id: google_calendar_auth -// jwks_uri: https://www.googleapis.com/oauth2/v1/certs -// issuer: https://securetoken.google.com -// rules: -// - selector: "*" -// requirements: -// provider_id: google_calendar_auth +// schema. It describes basic information about a service, such as the +// name and the title, and delegates other aspects to sub-sections. Each +// sub-section is either a proto message or a repeated proto message +// that configures a specific aspect, such as auth. See each proto +// message definition for details. Example: type: google.api.Service +// config_version: 3 name: calendar.googleapis.com title: Google +// Calendar API apis: - name: google.calendar.v3.Calendar +// authentication: providers: - id: google_calendar_auth jwks_uri: +// https://www.googleapis.com/oauth2/v1/certs issuer: +// https://securetoken.google.com rules: - selector: "*" requirements: +// provider_id: google_calendar_auth type Service struct { // Apis: A list of API interfaces exported by this service. Only the - // `name` field - // of the google.protobuf.Api needs to be provided by the - // configuration - // author, as the remaining fields will be derived from the IDL during - // the - // normalization process. It is an error to specify an API interface - // here - // which cannot be resolved against the associated IDL files. + // `name` field of the google.protobuf.Api needs to be provided by the + // configuration author, as the remaining fields will be derived from + // the IDL during the normalization process. It is an error to specify + // an API interface here which cannot be resolved against the associated + // IDL files. Apis []*Api `json:"apis,omitempty"` // Authentication: Auth configuration. @@ -5301,13 +3978,9 @@ type Service struct { Billing *Billing `json:"billing,omitempty"` // ConfigVersion: The semantic version of the service configuration. The - // config version - // affects the interpretation of the service configuration. For - // example, - // certain features are enabled by default for certain config - // versions. - // - // The latest config version is `3`. + // config version affects the interpretation of the service + // configuration. For example, certain features are enabled by default + // for certain config versions. The latest config version is `3`. ConfigVersion int64 `json:"configVersion,omitempty"` // Context: Context configuration. @@ -5322,35 +3995,25 @@ type Service struct { // Documentation: Additional API documentation. Documentation *Documentation `json:"documentation,omitempty"` - // Endpoints: Configuration for network endpoints. If this is empty, - // then an endpoint - // with the same name as the service is automatically generated to - // service all - // defined APIs. + // Endpoints: Configuration for network endpoints. If this is empty, + // then an endpoint with the same name as the service is automatically + // generated to service all defined APIs. Endpoints []*Endpoint `json:"endpoints,omitempty"` - // Enums: A list of all enum types included in this API service. - // Enums - // referenced directly or indirectly by the `apis` are - // automatically - // included. Enums which are not referenced but shall be - // included - // should be listed here by name. Example: - // - // enums: - // - name: google.someapi.v1.SomeEnum + // Enums: A list of all enum types included in this API service. Enums + // referenced directly or indirectly by the `apis` are automatically + // included. Enums which are not referenced but shall be included should + // be listed here by name. Example: enums: - name: + // google.someapi.v1.SomeEnum Enums []*Enum `json:"enums,omitempty"` // Http: HTTP configuration. Http *Http `json:"http,omitempty"` // Id: A unique ID for a specific instance of this message, typically - // assigned - // by the client for tracking purpose. Must be no longer than 63 - // characters - // and only lower case letters, digits, '.', '_' and '-' are allowed. - // If - // empty, the server may choose to generate one instead. + // assigned by the client for tracking purpose. Must be no longer than + // 63 characters and only lower case letters, digits, '.', '_' and '-' + // are allowed. If empty, the server may choose to generate one instead. Id string `json:"id,omitempty"` // Logging: Logging configuration. @@ -5363,19 +4026,17 @@ type Service struct { Metrics []*MetricDescriptor `json:"metrics,omitempty"` // MonitoredResources: Defines the monitored resources used by this - // service. This is required - // by the Service.monitoring and Service.logging configurations. + // service. This is required by the Service.monitoring and + // Service.logging configurations. MonitoredResources []*MonitoredResourceDescriptor `json:"monitoredResources,omitempty"` // Monitoring: Monitoring configuration. Monitoring *Monitoring `json:"monitoring,omitempty"` // Name: The service name, which is a DNS-like logical identifier for - // the - // service, such as `calendar.googleapis.com`. The service - // name - // typically goes through DNS verification to make sure the owner - // of the service also owns the DNS name. + // the service, such as `calendar.googleapis.com`. The service name + // typically goes through DNS verification to make sure the owner of the + // service also owns the DNS name. Name string `json:"name,omitempty"` // ProducerProjectId: The Google project that owns this service. @@ -5392,30 +4053,21 @@ type Service struct { SystemParameters *SystemParameters `json:"systemParameters,omitempty"` // SystemTypes: A list of all proto message types included in this API - // service. - // It serves similar purpose as [google.api.Service.types], except - // that - // these types are not needed by user-defined APIs. Therefore, they will - // not - // show up in the generated discovery doc. This field should only be - // used - // to define system APIs in ESF. + // service. It serves similar purpose as [google.api.Service.types], + // except that these types are not needed by user-defined APIs. + // Therefore, they will not show up in the generated discovery doc. This + // field should only be used to define system APIs in ESF. SystemTypes []*Type `json:"systemTypes,omitempty"` // Title: The product title for this service. Title string `json:"title,omitempty"` // Types: A list of all proto message types included in this API - // service. - // Types referenced directly or indirectly by the `apis` - // are - // automatically included. Messages which are not referenced but - // shall be included, such as types used by the `google.protobuf.Any` - // type, - // should be listed here by name. Example: - // - // types: - // - name: google.protobuf.Int32 + // service. Types referenced directly or indirectly by the `apis` are + // automatically included. Messages which are not referenced but shall + // be included, such as types used by the `google.protobuf.Any` type, + // should be listed here by name. Example: types: - name: + // google.protobuf.Int32 Types []*Type `json:"types,omitempty"` // Usage: Configuration controlling usage of this service. @@ -5449,35 +4101,22 @@ func (s *Service) MarshalJSON() ([]byte, error) { } // ServiceIdentity: The per-product per-project service identity for a -// service. -// -// -// Use this field to configure per-product per-project service -// identity. -// Example of a service identity configuration. -// -// usage: -// service_identity: -// - service_account_parent: "projects/123456789" -// display_name: "Cloud XXX Service Agent" -// description: "Used as the identity of Cloud XXX to access -// resources" +// service. Use this field to configure per-product per-project service +// identity. Example of a service identity configuration. usage: +// service_identity: - service_account_parent: "projects/123456789" +// display_name: "Cloud XXX Service Agent" description: "Used as the +// identity of Cloud XXX to access resources" type ServiceIdentity struct { // Description: Optional. A user-specified opaque description of the - // service account. - // Must be less than or equal to 256 UTF-8 bytes. + // service account. Must be less than or equal to 256 UTF-8 bytes. Description string `json:"description,omitempty"` - // DisplayName: Optional. A user-specified name for the service - // account. + // DisplayName: Optional. A user-specified name for the service account. // Must be less than or equal to 100 UTF-8 bytes. DisplayName string `json:"displayName,omitempty"` // ServiceAccountParent: A service account project that hosts the - // service accounts. - // - // An example name would be: - // `projects/123456789` + // service accounts. An example name would be: `projects/123456789` ServiceAccountParent string `json:"serviceAccountParent,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -5506,20 +4145,15 @@ func (s *ServiceIdentity) MarshalJSON() ([]byte, error) { // SetIamPolicyRequest: Request message for `SetIamPolicy` method. type SetIamPolicyRequest struct { // Policy: REQUIRED: The complete policy to be applied to the - // `resource`. The size of - // the policy is limited to a few 10s of KB. An empty policy is a - // valid policy but certain Cloud Platform services (such as - // Projects) - // might reject them. + // `resource`. The size of the policy is limited to a few 10s of KB. An + // empty policy is a valid policy but certain Cloud Platform services + // (such as Projects) might reject them. Policy *Policy `json:"policy,omitempty"` // UpdateMask: OPTIONAL: A FieldMask specifying which fields of the - // policy to modify. Only - // the fields in the mask will be modified. If no mask is provided, - // the - // following default mask is used: - // - // `paths: "bindings, etag" + // policy to modify. Only the fields in the mask will be modified. If no + // mask is provided, the following default mask is used: `paths: + // "bindings, etag" UpdateMask string `json:"updateMask,omitempty"` // ForceSendFields is a list of field names (e.g. "Policy") to @@ -5546,12 +4180,10 @@ func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { } // SourceContext: `SourceContext` represents information about the -// source of a -// protobuf element, like the file in which it is defined. +// source of a protobuf element, like the file in which it is defined. type SourceContext struct { // FileName: The path-qualified name of the .proto file that contained - // the associated - // protobuf element. For example: + // the associated protobuf element. For example: // "google/protobuf/source_context.proto". FileName string `json:"fileName,omitempty"` @@ -5607,32 +4239,24 @@ func (s *SourceInfo) MarshalJSON() ([]byte, error) { } // Status: The `Status` type defines a logical error model that is -// suitable for -// different programming environments, including REST APIs and RPC APIs. -// It is -// used by [gRPC](https://github.com/grpc). Each `Status` message -// contains -// three pieces of data: error code, error message, and error -// details. -// -// You can find out more about this error model and how to work with it -// in the -// [API Design Guide](https://cloud.google.com/apis/design/errors). +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each +// `Status` message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the [API Design +// Guide](https://cloud.google.com/apis/design/errors). type Status struct { // Code: The status code, which should be an enum value of // google.rpc.Code. Code int64 `json:"code,omitempty"` - // Details: A list of messages that carry the error details. There is a - // common set of - // message types for APIs to use. + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. Details []googleapi.RawMessage `json:"details,omitempty"` // Message: A developer-facing error message, which should be in - // English. Any - // user-facing error message should be localized and sent in - // the - // google.rpc.Status.details field, or localized by the client. + // English. Any user-facing error message should be localized and sent + // in the google.rpc.Status.details field, or localized by the client. Message string `json:"message,omitempty"` // ForceSendFields is a list of field names (e.g. "Code") to @@ -5671,8 +4295,7 @@ type Step struct { // "NOT_STARTED" - The operation or step has not started yet. // "IN_PROGRESS" - The operation or step is in progress. // "FAILED" - The operation or step has completed with errors. If the - // operation is - // rollbackable, the rollback completed with errors too. + // operation is rollbackable, the rollback completed with errors too. // "CANCELLED" - The operation or step has completed with // cancellation. Status string `json:"status,omitempty"` @@ -5707,10 +4330,9 @@ type SubmitConfigSourceRequest struct { ConfigSource *ConfigSource `json:"configSource,omitempty"` // ValidateOnly: Optional. If set, this will result in the generation of - // a - // `google.api.Service` configuration based on the `ConfigSource` - // provided, - // but the generated config and the sources will NOT be persisted. + // a `google.api.Service` configuration based on the `ConfigSource` + // provided, but the generated config and the sources will NOT be + // persisted. ValidateOnly bool `json:"validateOnly,omitempty"` // ForceSendFields is a list of field names (e.g. "ConfigSource") to @@ -5766,14 +4388,12 @@ func (s *SubmitConfigSourceResponse) MarshalJSON() ([]byte, error) { } // SystemParameter: Define a parameter's name and location. The -// parameter may be passed as either -// an HTTP header or a URL query parameter, and if both are passed the -// behavior -// is implementation-dependent. +// parameter may be passed as either an HTTP header or a URL query +// parameter, and if both are passed the behavior is +// implementation-dependent. type SystemParameter struct { // HttpHeader: Define the HTTP header name to use for the parameter. It - // is case - // insensitive. + // is case insensitive. HttpHeader string `json:"httpHeader,omitempty"` // Name: Define the name of the parameter, such as "api_key" . It is @@ -5781,8 +4401,7 @@ type SystemParameter struct { Name string `json:"name,omitempty"` // UrlQueryParameter: Define the URL query parameter name to use for the - // parameter. It is case - // sensitive. + // parameter. It is case sensitive. UrlQueryParameter string `json:"urlQueryParameter,omitempty"` // ForceSendFields is a list of field names (e.g. "HttpHeader") to @@ -5809,24 +4428,18 @@ func (s *SystemParameter) MarshalJSON() ([]byte, error) { } // SystemParameterRule: Define a system parameter rule mapping system -// parameter definitions to -// methods. +// parameter definitions to methods. type SystemParameterRule struct { // Parameters: Define parameters. Multiple names may be defined for a - // parameter. - // For a given method call, only one of them should be used. If - // multiple - // names are used the behavior is implementation-dependent. - // If none of the specified names are present the behavior - // is + // parameter. For a given method call, only one of them should be used. + // If multiple names are used the behavior is implementation-dependent. + // If none of the specified names are present the behavior is // parameter-dependent. Parameters []*SystemParameter `json:"parameters,omitempty"` // Selector: Selects the methods to which this rule applies. Use '*' to - // indicate all - // methods in all APIs. - // - // Refer to selector for syntax details. + // indicate all methods in all APIs. Refer to selector for syntax + // details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. "Parameters") to @@ -5852,49 +4465,23 @@ func (s *SystemParameterRule) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SystemParameters: ### System parameter configuration -// -// A system parameter is a special kind of parameter defined by the -// API -// system, not by an individual API. It is typically mapped to an HTTP -// header +// SystemParameters: ### System parameter configuration A system +// parameter is a special kind of parameter defined by the API system, +// not by an individual API. It is typically mapped to an HTTP header // and/or a URL query parameter. This configuration specifies which -// methods -// change the names of the system parameters. +// methods change the names of the system parameters. type SystemParameters struct { - // Rules: Define system parameters. - // - // The parameters defined here will override the default - // parameters - // implemented by the system. If this field is missing from the - // service - // config, default system parameters will be used. Default system - // parameters - // and names is implementation-dependent. - // - // Example: define api key for all methods - // - // system_parameters - // rules: - // - selector: "*" - // parameters: - // - name: api_key - // url_query_parameter: api_key - // - // - // Example: define 2 api key names for a specific method. - // - // system_parameters - // rules: - // - selector: "/ListShelves" - // parameters: - // - name: api_key - // http_header: Api-Key1 - // - name: api_key - // http_header: Api-Key2 - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // Rules: Define system parameters. The parameters defined here will + // override the default parameters implemented by the system. If this + // field is missing from the service config, default system parameters + // will be used. Default system parameters and names is + // implementation-dependent. Example: define api key for all methods + // system_parameters rules: - selector: "*" parameters: - name: api_key + // url_query_parameter: api_key Example: define 2 api key names for a + // specific method. system_parameters rules: - selector: "/ListShelves" + // parameters: - name: api_key http_header: Api-Key1 - name: api_key + // http_header: Api-Key2 **NOTE:** All service configuration rules + // follow "last one wins" order. Rules []*SystemParameterRule `json:"rules,omitempty"` // ForceSendFields is a list of field names (e.g. "Rules") to @@ -5924,11 +4511,8 @@ func (s *SystemParameters) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsRequest struct { // Permissions: The set of permissions to check for the `resource`. - // Permissions with - // wildcards (such as '*' or 'storage.*') are not allowed. For - // more - // information see - // [IAM + // Permissions with wildcards (such as '*' or 'storage.*') are not + // allowed. For more information see [IAM // Overview](https://cloud.google.com/iam/docs/overview#permissions). Permissions []string `json:"permissions,omitempty"` @@ -5959,8 +4543,7 @@ func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsResponse struct { // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is - // allowed. + // the caller is allowed. Permissions []string `json:"permissions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -5991,44 +4574,21 @@ func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { } // TrafficPercentStrategy: Strategy that specifies how clients of Google -// Service Controller want to -// send traffic to use different config versions. This is generally -// used by API proxy to split traffic based on your configured -// percentage for -// each config version. -// -// One example of how to gradually rollout a new service configuration -// using -// this -// strategy: -// Day 1 -// -// Rollout { -// id: "example.googleapis.com/rollout_20160206" -// traffic_percent_strategy { -// percentages: { -// "example.googleapis.com/20160201": 70.00 -// "example.googleapis.com/20160206": 30.00 -// } -// } -// } -// -// Day 2 -// -// Rollout { -// id: "example.googleapis.com/rollout_20160207" -// traffic_percent_strategy: { -// percentages: { -// "example.googleapis.com/20160206": 100.00 -// } -// } -// } +// Service Controller want to send traffic to use different config +// versions. This is generally used by API proxy to split traffic based +// on your configured percentage for each config version. One example of +// how to gradually rollout a new service configuration using this +// strategy: Day 1 Rollout { id: +// "example.googleapis.com/rollout_20160206" traffic_percent_strategy { +// percentages: { "example.googleapis.com/20160201": 70.00 +// "example.googleapis.com/20160206": 30.00 } } } Day 2 Rollout { id: +// "example.googleapis.com/rollout_20160207" traffic_percent_strategy: { +// percentages: { "example.googleapis.com/20160206": 100.00 } } } type TrafficPercentStrategy struct { // Percentages: Maps service configuration IDs to their corresponding - // traffic percentage. - // Key is the service configuration ID, Value is the traffic - // percentage - // which must be greater than 0.0 and the sum must equal to 100.0. + // traffic percentage. Key is the service configuration ID, Value is the + // traffic percentage which must be greater than 0.0 and the sum must + // equal to 100.0. Percentages map[string]float64 `json:"percentages,omitempty"` // ForceSendFields is a list of field names (e.g. "Percentages") to @@ -6133,29 +4693,20 @@ func (s *UndeleteServiceResponse) MarshalJSON() ([]byte, error) { // Usage: Configuration controlling usage of a service. type Usage struct { // ProducerNotificationChannel: The full resource name of a channel used - // for sending notifications to the - // service producer. - // - // Google Service Management currently only supports - // [Google Cloud Pub/Sub](https://cloud.google.com/pubsub) as a - // notification - // channel. To use Google Cloud Pub/Sub as the channel, this must be the - // name - // of a Cloud Pub/Sub topic that uses the Cloud Pub/Sub topic name - // format + // for sending notifications to the service producer. Google Service + // Management currently only supports [Google Cloud + // Pub/Sub](https://cloud.google.com/pubsub) as a notification channel. + // To use Google Cloud Pub/Sub as the channel, this must be the name of + // a Cloud Pub/Sub topic that uses the Cloud Pub/Sub topic name format // documented in https://cloud.google.com/pubsub/docs/overview. ProducerNotificationChannel string `json:"producerNotificationChannel,omitempty"` // Requirements: Requirements that must be satisfied before a consumer - // project can use the - // service. Each requirement is of the form - // /; - // for example 'serviceusage.googleapis.com/billing-enabled'. + // project can use the service. Each requirement is of the form /; for + // example 'serviceusage.googleapis.com/billing-enabled'. Requirements []string `json:"requirements,omitempty"` - // Rules: A list of usage rules that apply to individual API - // methods. - // + // Rules: A list of usage rules that apply to individual API methods. // **NOTE:** All service configuration rules follow "last one wins" // order. Rules []*UsageRule `json:"rules,omitempty"` @@ -6189,57 +4740,34 @@ func (s *Usage) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UsageRule: Usage configuration rules for the service. -// -// NOTE: Under development. -// -// -// Use this rule to configure unregistered calls for the service. -// Unregistered -// calls are calls that do not contain consumer project -// identity. -// (Example: calls that do not contain an API key). -// By default, API methods do not allow unregistered calls, and each -// method call -// must be identified by a consumer project identity. Use this rule -// to -// allow/disallow unregistered calls. -// -// Example of an API that wants to allow unregistered calls for entire -// service. -// -// usage: -// rules: -// - selector: "*" -// allow_unregistered_calls: true -// -// Example of a method that wants to allow unregistered calls. -// -// usage: -// rules: -// - selector: +// UsageRule: Usage configuration rules for the service. NOTE: Under +// development. Use this rule to configure unregistered calls for the +// service. Unregistered calls are calls that do not contain consumer +// project identity. (Example: calls that do not contain an API key). By +// default, API methods do not allow unregistered calls, and each method +// call must be identified by a consumer project identity. Use this rule +// to allow/disallow unregistered calls. Example of an API that wants to +// allow unregistered calls for entire service. usage: rules: - +// selector: "*" allow_unregistered_calls: true Example of a method that +// wants to allow unregistered calls. usage: rules: - selector: // "google.example.library.v1.LibraryService.CreateBook" -// allow_unregistered_calls: true +// allow_unregistered_calls: true type UsageRule struct { // AllowUnregisteredCalls: If true, the selected method allows - // unregistered calls, e.g. calls - // that don't identify any user or application. + // unregistered calls, e.g. calls that don't identify any user or + // application. AllowUnregisteredCalls bool `json:"allowUnregisteredCalls,omitempty"` // Selector: Selects the methods to which this rule applies. Use '*' to - // indicate all - // methods in all APIs. - // - // Refer to selector for syntax details. + // indicate all methods in all APIs. Refer to selector for syntax + // details. Selector string `json:"selector,omitempty"` // SkipServiceControl: If true, the selected method should skip service - // control and the control - // plane features, such as quota and billing, will not be - // available. - // This flag is used by Google Cloud Endpoints to bypass checks for - // internal - // methods, such as service health check methods. + // control and the control plane features, such as quota and billing, + // will not be available. This flag is used by Google Cloud Endpoints to + // bypass checks for internal methods, such as service health check + // methods. SkipServiceControl bool `json:"skipServiceControl,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -6278,11 +4806,9 @@ type OperationsGetCall struct { header_ http.Header } -// Get: Gets the latest state of a long-running operation. Clients can -// use this -// method to poll the operation result at intervals as recommended by -// the API -// service. +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. func (r *OperationsService) Get(name string) *OperationsGetCall { c := &OperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -6326,7 +4852,7 @@ func (c *OperationsGetCall) Header() http.Header { func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6388,7 +4914,7 @@ func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", // "flatPath": "v1/operations/{operationsId}", // "httpMethod": "GET", // "id": "servicemanagement.operations.get", @@ -6434,31 +4960,19 @@ func (r *OperationsService) List() *OperationsListCall { } // Filter sets the optional parameter "filter": A string for filtering -// Operations. -// The following filter fields are supported: -// -// * serviceName: Required. Only `=` operator is allowed. -// * startTime: The time this job was started, in ISO 8601 -// format. -// Allowed operators are `>=`, `>`, `<=`, and `<`. -// * status: Can be `done`, `in_progress`, or `failed`. Allowed -// operators are `=`, and `!=`. -// -// Filter expression supports conjunction (AND) and disjunction (OR) -// logical operators. However, the serviceName restriction must be at -// the -// top-level and can only be combined with other restrictions via the -// AND -// logical operator. -// -// Examples: -// -// * `serviceName={some-service}.googleapis.com` -// * `serviceName={some-service}.googleapis.com AND -// startTime>="2017-02-01" -// * `serviceName={some-service}.googleapis.com AND status=done` -// * `serviceName={some-service}.googleapis.com AND (status=done OR -// startTime>="2017-02-01")` +// Operations. The following filter fields are supported: * serviceName: +// Required. Only `=` operator is allowed. * startTime: The time this +// job was started, in ISO 8601 format. Allowed operators are `>=`, `>`, +// `<=`, and `<`. * status: Can be `done`, `in_progress`, or `failed`. +// Allowed operators are `=`, and `!=`. Filter expression supports +// conjunction (AND) and disjunction (OR) logical operators. However, +// the serviceName restriction must be at the top-level and can only be +// combined with other restrictions via the AND logical operator. +// Examples: * `serviceName={some-service}.googleapis.com` * +// `serviceName={some-service}.googleapis.com AND +// startTime>="2017-02-01" * `serviceName={some-service}.googleapis.com +// AND status=done` * `serviceName={some-service}.googleapis.com AND +// (status=done OR startTime>="2017-02-01")` func (c *OperationsListCall) Filter(filter string) *OperationsListCall { c.urlParams_.Set("filter", filter) return c @@ -6471,8 +4985,8 @@ func (c *OperationsListCall) Name(name string) *OperationsListCall { } // PageSize sets the optional parameter "pageSize": The maximum number -// of operations to return. If unspecified, defaults to -// 50. The maximum value is 100. +// of operations to return. If unspecified, defaults to 50. The maximum +// value is 100. func (c *OperationsListCall) PageSize(pageSize int64) *OperationsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c @@ -6522,7 +5036,7 @@ func (c *OperationsListCall) Header() http.Header { func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6588,7 +5102,7 @@ func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsRe // "parameterOrder": [], // "parameters": { // "filter": { - // "description": "A string for filtering Operations.\n The following filter fields are supported\u0026#58;\n\n * serviceName\u0026#58; Required. Only `=` operator is allowed.\n * startTime\u0026#58; The time this job was started, in ISO 8601 format.\n Allowed operators are `\u003e=`, `\u003e`, `\u003c=`, and `\u003c`.\n * status\u0026#58; Can be `done`, `in_progress`, or `failed`. Allowed\n operators are `=`, and `!=`.\n\n Filter expression supports conjunction (AND) and disjunction (OR)\n logical operators. However, the serviceName restriction must be at the\n top-level and can only be combined with other restrictions via the AND\n logical operator.\n\n Examples\u0026#58;\n\n * `serviceName={some-service}.googleapis.com`\n * `serviceName={some-service}.googleapis.com AND startTime\u003e=\"2017-02-01\"`\n * `serviceName={some-service}.googleapis.com AND status=done`\n * `serviceName={some-service}.googleapis.com AND (status=done OR startTime\u003e=\"2017-02-01\")`", + // "description": "A string for filtering Operations. The following filter fields are supported: * serviceName: Required. Only `=` operator is allowed. * startTime: The time this job was started, in ISO 8601 format. Allowed operators are `\u003e=`, `\u003e`, `\u003c=`, and `\u003c`. * status: Can be `done`, `in_progress`, or `failed`. Allowed operators are `=`, and `!=`. Filter expression supports conjunction (AND) and disjunction (OR) logical operators. However, the serviceName restriction must be at the top-level and can only be combined with other restrictions via the AND logical operator. Examples: * `serviceName={some-service}.googleapis.com` * `serviceName={some-service}.googleapis.com AND startTime\u003e=\"2017-02-01\"` * `serviceName={some-service}.googleapis.com AND status=done` * `serviceName={some-service}.googleapis.com AND (status=done OR startTime\u003e=\"2017-02-01\")`", // "location": "query", // "type": "string" // }, @@ -6598,7 +5112,7 @@ func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsRe // "type": "string" // }, // "pageSize": { - // "description": "The maximum number of operations to return. If unspecified, defaults to\n50. The maximum value is 100.", + // "description": "The maximum number of operations to return. If unspecified, defaults to 50. The maximum value is 100.", // "format": "int32", // "location": "query", // "type": "integer" @@ -6652,21 +5166,12 @@ type ServicesCreateCall struct { header_ http.Header } -// Create: Creates a new managed service. -// -// A managed service is immutable, and is subject to mandatory -// 30-day -// data retention. You cannot move a service or recreate it within 30 -// days -// after deletion. -// +// Create: Creates a new managed service. A managed service is +// immutable, and is subject to mandatory 30-day data retention. You +// cannot move a service or recreate it within 30 days after deletion. // One producer project can own no more than 500 services. For security -// and -// reliability purposes, a production service should be hosted in -// a -// dedicated producer project. -// -// Operation +// and reliability purposes, a production service should be hosted in a +// dedicated producer project. Operation func (r *ServicesService) Create(managedservice *ManagedService) *ServicesCreateCall { c := &ServicesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.managedservice = managedservice @@ -6700,7 +5205,7 @@ func (c *ServicesCreateCall) Header() http.Header { func (c *ServicesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6761,7 +5266,7 @@ func (c *ServicesCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Creates a new managed service.\n\nA managed service is immutable, and is subject to mandatory 30-day\ndata retention. You cannot move a service or recreate it within 30 days\nafter deletion.\n\nOne producer project can own no more than 500 services. For security and\nreliability purposes, a production service should be hosted in a\ndedicated producer project.\n\nOperation\u003cresponse: ManagedService\u003e", + // "description": "Creates a new managed service. A managed service is immutable, and is subject to mandatory 30-day data retention. You cannot move a service or recreate it within 30 days after deletion. One producer project can own no more than 500 services. For security and reliability purposes, a production service should be hosted in a dedicated producer project. Operation", // "flatPath": "v1/services", // "httpMethod": "POST", // "id": "servicemanagement.services.create", @@ -6793,14 +5298,9 @@ type ServicesDeleteCall struct { } // Delete: Deletes a managed service. This method will change the -// service to the -// `Soft-Delete` state for 30 days. Within this period, service -// producers may -// call UndeleteService to restore the service. -// After 30 days, the service will be permanently -// deleted. -// -// Operation +// service to the `Soft-Delete` state for 30 days. Within this period, +// service producers may call UndeleteService to restore the service. +// After 30 days, the service will be permanently deleted. Operation func (r *ServicesService) Delete(serviceName string) *ServicesDeleteCall { c := &ServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.serviceName = serviceName @@ -6834,7 +5334,7 @@ func (c *ServicesDeleteCall) Header() http.Header { func (c *ServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6893,7 +5393,7 @@ func (c *ServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Deletes a managed service. This method will change the service to the\n`Soft-Delete` state for 30 days. Within this period, service producers may\ncall UndeleteService to restore the service.\nAfter 30 days, the service will be permanently deleted.\n\nOperation\u003cresponse: google.protobuf.Empty\u003e", + // "description": "Deletes a managed service. This method will change the service to the `Soft-Delete` state for 30 days. Within this period, service producers may call UndeleteService to restore the service. After 30 days, the service will be permanently deleted. Operation", // "flatPath": "v1/services/{serviceName}", // "httpMethod": "DELETE", // "id": "servicemanagement.services.delete", @@ -6902,7 +5402,7 @@ func (c *ServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error // ], // "parameters": { // "serviceName": { - // "description": "Required. The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + // "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.", // "location": "path", // "required": true, // "type": "string" @@ -6920,151 +5420,6 @@ func (c *ServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "servicemanagement.services.disable": - -type ServicesDisableCall struct { - s *APIService - serviceName string - disableservicerequest *DisableServiceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Disable: Disables a service for a project, so it can no longer be -// be used for the project. It prevents accidental usage that may -// cause -// unexpected billing charges or security leaks. -// -// Operation -func (r *ServicesService) Disable(serviceName string, disableservicerequest *DisableServiceRequest) *ServicesDisableCall { - c := &ServicesDisableCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.serviceName = serviceName - c.disableservicerequest = disableservicerequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ServicesDisableCall) Fields(s ...googleapi.Field) *ServicesDisableCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ServicesDisableCall) Context(ctx context.Context) *ServicesDisableCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ServicesDisableCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ServicesDisableCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disableservicerequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/services/{serviceName}:disable") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "serviceName": c.serviceName, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "servicemanagement.services.disable" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ServicesDisableCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Disables a service for a project, so it can no longer be\nbe used for the project. It prevents accidental usage that may cause\nunexpected billing charges or security leaks.\n\nOperation\u003cresponse: DisableServiceResponse\u003e", - // "flatPath": "v1/services/{serviceName}:disable", - // "httpMethod": "POST", - // "id": "servicemanagement.services.disable", - // "parameterOrder": [ - // "serviceName" - // ], - // "parameters": { - // "serviceName": { - // "description": "Required. Name of the service to disable. Specifying an unknown service name\nwill cause the request to fail.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/services/{serviceName}:disable", - // "request": { - // "$ref": "DisableServiceRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/service.management" - // ] - // } - -} - // method id "servicemanagement.services.enable": type ServicesEnableCall struct { @@ -7076,13 +5431,10 @@ type ServicesEnableCall struct { header_ http.Header } -// Enable: Enables a service for a project, so it can be used -// for the project. See -// [Cloud Auth Guide](https://cloud.google.com/docs/authentication) -// for -// more information. -// -// Operation +// Enable: Enables a service for a project, so it can be used for the +// project. See [Cloud Auth +// Guide](https://cloud.google.com/docs/authentication) for more +// information. Operation func (r *ServicesService) Enable(serviceName string, enableservicerequest *EnableServiceRequest) *ServicesEnableCall { c := &ServicesEnableCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.serviceName = serviceName @@ -7117,7 +5469,7 @@ func (c *ServicesEnableCall) Header() http.Header { func (c *ServicesEnableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7181,7 +5533,7 @@ func (c *ServicesEnableCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Enables a service for a project, so it can be used\nfor the project. See\n[Cloud Auth Guide](https://cloud.google.com/docs/authentication) for\nmore information.\n\nOperation\u003cresponse: EnableServiceResponse\u003e", + // "description": "Enables a service for a project, so it can be used for the project. See [Cloud Auth Guide](https://cloud.google.com/docs/authentication) for more information. Operation", // "flatPath": "v1/services/{serviceName}:enable", // "httpMethod": "POST", // "id": "servicemanagement.services.enable", @@ -7190,7 +5542,7 @@ func (c *ServicesEnableCall) Do(opts ...googleapi.CallOption) (*Operation, error // ], // "parameters": { // "serviceName": { - // "description": "Required. Name of the service to enable. Specifying an unknown service name will\ncause the request to fail.", + // "description": "Required. Name of the service to enable. Specifying an unknown service name will cause the request to fail.", // "location": "path", // "required": true, // "type": "string" @@ -7222,23 +5574,15 @@ type ServicesGenerateConfigReportCall struct { } // GenerateConfigReport: Generates and returns a report (errors, -// warnings and changes from -// existing configurations) associated -// with -// GenerateConfigReportRequest.new_value -// -// If GenerateConfigReportRequest.old_value is -// specified, +// warnings and changes from existing configurations) associated with +// GenerateConfigReportRequest.new_value If +// GenerateConfigReportRequest.old_value is specified, // GenerateConfigReportRequest will contain a single ChangeReport based -// on the -// comparison between GenerateConfigReportRequest.new_value -// and -// GenerateConfigReportRequest.old_value. -// If GenerateConfigReportRequest.old_value is not specified, this -// method +// on the comparison between GenerateConfigReportRequest.new_value and +// GenerateConfigReportRequest.old_value. If +// GenerateConfigReportRequest.old_value is not specified, this method // will compare GenerateConfigReportRequest.new_value with the last -// pushed -// service configuration. +// pushed service configuration. func (r *ServicesService) GenerateConfigReport(generateconfigreportrequest *GenerateConfigReportRequest) *ServicesGenerateConfigReportCall { c := &ServicesGenerateConfigReportCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.generateconfigreportrequest = generateconfigreportrequest @@ -7272,7 +5616,7 @@ func (c *ServicesGenerateConfigReportCall) Header() http.Header { func (c *ServicesGenerateConfigReportCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7333,7 +5677,7 @@ func (c *ServicesGenerateConfigReportCall) Do(opts ...googleapi.CallOption) (*Ge } return ret, nil // { - // "description": "Generates and returns a report (errors, warnings and changes from\nexisting configurations) associated with\nGenerateConfigReportRequest.new_value\n\nIf GenerateConfigReportRequest.old_value is specified,\nGenerateConfigReportRequest will contain a single ChangeReport based on the\ncomparison between GenerateConfigReportRequest.new_value and\nGenerateConfigReportRequest.old_value.\nIf GenerateConfigReportRequest.old_value is not specified, this method\nwill compare GenerateConfigReportRequest.new_value with the last pushed\nservice configuration.", + // "description": "Generates and returns a report (errors, warnings and changes from existing configurations) associated with GenerateConfigReportRequest.new_value If GenerateConfigReportRequest.old_value is specified, GenerateConfigReportRequest will contain a single ChangeReport based on the comparison between GenerateConfigReportRequest.new_value and GenerateConfigReportRequest.old_value. If GenerateConfigReportRequest.old_value is not specified, this method will compare GenerateConfigReportRequest.new_value with the last pushed service configuration.", // "flatPath": "v1/services:generateConfigReport", // "httpMethod": "POST", // "id": "servicemanagement.services.generateConfigReport", @@ -7366,8 +5710,7 @@ type ServicesGetCall struct { } // Get: Gets a managed service. Authentication is required unless the -// service is -// public. +// service is public. func (r *ServicesService) Get(serviceName string) *ServicesGetCall { c := &ServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.serviceName = serviceName @@ -7411,7 +5754,7 @@ func (c *ServicesGetCall) Header() http.Header { func (c *ServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7473,7 +5816,7 @@ func (c *ServicesGetCall) Do(opts ...googleapi.CallOption) (*ManagedService, err } return ret, nil // { - // "description": "Gets a managed service. Authentication is required unless the service is\npublic.", + // "description": "Gets a managed service. Authentication is required unless the service is public.", // "flatPath": "v1/services/{serviceName}", // "httpMethod": "GET", // "id": "servicemanagement.services.get", @@ -7482,7 +5825,7 @@ func (c *ServicesGetCall) Do(opts ...googleapi.CallOption) (*ManagedService, err // ], // "parameters": { // "serviceName": { - // "description": "Required. The name of the service. See the `ServiceManager` overview for naming\nrequirements. For example: `example.googleapis.com`.", + // "description": "Required. The name of the service. See the `ServiceManager` overview for naming requirements. For example: `example.googleapis.com`.", // "location": "path", // "required": true, // "type": "string" @@ -7522,23 +5865,22 @@ func (r *ServicesService) GetConfig(serviceName string) *ServicesGetConfigCall { } // ConfigId sets the optional parameter "configId": Required. The id of -// the service configuration resource. -// -// This field must be specified for the server to return all fields, -// including -// `SourceInfo`. +// the service configuration resource. This field must be specified for +// the server to return all fields, including `SourceInfo`. func (c *ServicesGetConfigCall) ConfigId(configId string) *ServicesGetConfigCall { c.urlParams_.Set("configId", configId) return c } // View sets the optional parameter "view": Specifies which parts of the -// Service Config should be returned in the -// response. +// Service Config should be returned in the response. // // Possible values: -// "BASIC" -// "FULL" +// "BASIC" - Server response includes all fields except SourceInfo. +// "FULL" - Server response includes all fields including SourceInfo. +// SourceFiles are of type 'google.api.servicemanagement.v1.ConfigFile' +// and are only available for configs created using the +// SubmitConfigSource method. func (c *ServicesGetConfigCall) View(view string) *ServicesGetConfigCall { c.urlParams_.Set("view", view) return c @@ -7581,7 +5923,7 @@ func (c *ServicesGetConfigCall) Header() http.Header { func (c *ServicesGetConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7652,22 +5994,26 @@ func (c *ServicesGetConfigCall) Do(opts ...googleapi.CallOption) (*Service, erro // ], // "parameters": { // "configId": { - // "description": "Required. The id of the service configuration resource.\n\nThis field must be specified for the server to return all fields, including\n`SourceInfo`.", + // "description": "Required. The id of the service configuration resource. This field must be specified for the server to return all fields, including `SourceInfo`.", // "location": "query", // "type": "string" // }, // "serviceName": { - // "description": "Required. The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + // "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.", // "location": "path", // "required": true, // "type": "string" // }, // "view": { - // "description": "Specifies which parts of the Service Config should be returned in the\nresponse.", + // "description": "Specifies which parts of the Service Config should be returned in the response.", // "enum": [ // "BASIC", // "FULL" // ], + // "enumDescriptions": [ + // "Server response includes all fields except SourceInfo.", + // "Server response includes all fields including SourceInfo. SourceFiles are of type 'google.api.servicemanagement.v1.ConfigFile' and are only available for configs created using the SubmitConfigSource method." + // ], // "location": "query", // "type": "string" // } @@ -7697,9 +6043,8 @@ type ServicesGetIamPolicyCall struct { header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. -// Returns an empty policy if the resource exists and does not have a -// policy +// GetIamPolicy: Gets the access control policy for a resource. Returns +// an empty policy if the resource exists and does not have a policy // set. func (r *ServicesService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ServicesGetIamPolicyCall { c := &ServicesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -7735,7 +6080,7 @@ func (c *ServicesGetIamPolicyCall) Header() http.Header { func (c *ServicesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7799,7 +6144,7 @@ func (c *ServicesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er } return ret, nil // { - // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + // "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", // "flatPath": "v1/services/{servicesId}:getIamPolicy", // "httpMethod": "POST", // "id": "servicemanagement.services.getIamPolicy", @@ -7808,7 +6153,7 @@ func (c *ServicesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^services/[^/]+$", // "required": true, @@ -7842,47 +6187,35 @@ type ServicesListCall struct { header_ http.Header } -// List: Lists managed services. -// -// Returns all public services. For authenticated users, also returns -// all -// services the calling user has "servicemanagement.services.get" -// permission -// for. -// -// **BETA:** If the caller specifies the `consumer_id`, it returns only -// the -// services enabled on the consumer. The `consumer_id` must have the -// format -// of "project:{PROJECT-ID}". +// List: Lists managed services. Returns all public services. For +// authenticated users, also returns all services the calling user has +// "servicemanagement.services.get" permission for. **BETA:** If the +// caller specifies the `consumer_id`, it returns only the services +// enabled on the consumer. The `consumer_id` must have the format of +// "project:{PROJECT-ID}". func (r *ServicesService) List() *ServicesListCall { c := &ServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} return c } // ConsumerId sets the optional parameter "consumerId": Include services -// consumed by the specified consumer. -// -// The Google Service Management implementation accepts the -// following -// forms: -// - project: +// consumed by the specified consumer. The Google Service Management +// implementation accepts the following forms: - project: func (c *ServicesListCall) ConsumerId(consumerId string) *ServicesListCall { c.urlParams_.Set("consumerId", consumerId) return c } // PageSize sets the optional parameter "pageSize": The max number of -// items to include in the response list. Page size is 50 -// if not specified. Maximum value is 100. +// items to include in the response list. Page size is 50 if not +// specified. Maximum value is 100. func (c *ServicesListCall) PageSize(pageSize int64) *ServicesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": Token identifying -// which result to start with; returned by a previous list -// call. +// which result to start with; returned by a previous list call. func (c *ServicesListCall) PageToken(pageToken string) *ServicesListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -7932,7 +6265,7 @@ func (c *ServicesListCall) Header() http.Header { func (c *ServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7991,25 +6324,25 @@ func (c *ServicesListCall) Do(opts ...googleapi.CallOption) (*ListServicesRespon } return ret, nil // { - // "description": "Lists managed services.\n\nReturns all public services. For authenticated users, also returns all\nservices the calling user has \"servicemanagement.services.get\" permission\nfor.\n\n**BETA:** If the caller specifies the `consumer_id`, it returns only the\nservices enabled on the consumer. The `consumer_id` must have the format\nof \"project:{PROJECT-ID}\".", + // "description": "Lists managed services. Returns all public services. For authenticated users, also returns all services the calling user has \"servicemanagement.services.get\" permission for. **BETA:** If the caller specifies the `consumer_id`, it returns only the services enabled on the consumer. The `consumer_id` must have the format of \"project:{PROJECT-ID}\".", // "flatPath": "v1/services", // "httpMethod": "GET", // "id": "servicemanagement.services.list", // "parameterOrder": [], // "parameters": { // "consumerId": { - // "description": "Include services consumed by the specified consumer.\n\nThe Google Service Management implementation accepts the following\nforms:\n- project:\u003cproject_id\u003e", + // "description": "Include services consumed by the specified consumer. The Google Service Management implementation accepts the following forms: - project:", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "The max number of items to include in the response list. Page size is 50\nif not specified. Maximum value is 100.", + // "description": "The max number of items to include in the response list. Page size is 50 if not specified. Maximum value is 100.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "Token identifying which result to start with; returned by a previous list\ncall.", + // "description": "Token identifying which result to start with; returned by a previous list call.", // "location": "query", // "type": "string" // }, @@ -8066,11 +6399,8 @@ type ServicesSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any -// existing policy. -// -// Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` -// errors. +// resource. Replaces any existing policy. Can return `NOT_FOUND`, +// `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. func (r *ServicesService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ServicesSetIamPolicyCall { c := &ServicesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -8105,7 +6435,7 @@ func (c *ServicesSetIamPolicyCall) Header() http.Header { func (c *ServicesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8169,7 +6499,7 @@ func (c *ServicesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", // "flatPath": "v1/services/{servicesId}:setIamPolicy", // "httpMethod": "POST", // "id": "servicemanagement.services.setIamPolicy", @@ -8178,7 +6508,7 @@ func (c *ServicesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^services/[^/]+$", // "required": true, @@ -8212,16 +6542,11 @@ type ServicesTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a `NOT_FOUND` error. -// -// Note: This operation is designed to be used for building -// permission-aware -// UIs and command-line tools, not for authorization checking. This -// operation -// may "fail open" without warning. +// specified resource. If the resource does not exist, this will return +// an empty set of permissions, not a `NOT_FOUND` error. Note: This +// operation is designed to be used for building permission-aware UIs +// and command-line tools, not for authorization checking. This +// operation may "fail open" without warning. func (r *ServicesService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ServicesTestIamPermissionsCall { c := &ServicesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -8256,7 +6581,7 @@ func (c *ServicesTestIamPermissionsCall) Header() http.Header { func (c *ServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8320,7 +6645,7 @@ func (c *ServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + // "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", // "flatPath": "v1/services/{servicesId}:testIamPermissions", // "httpMethod": "POST", // "id": "servicemanagement.services.testIamPermissions", @@ -8329,7 +6654,7 @@ func (c *ServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^services/[^/]+$", // "required": true, @@ -8364,14 +6689,9 @@ type ServicesUndeleteCall struct { } // Undelete: Revives a previously deleted managed service. The method -// restores the -// service using the configuration at the time the service was -// deleted. -// The target service must exist and must have been deleted within -// the -// last 30 days. -// -// Operation +// restores the service using the configuration at the time the service +// was deleted. The target service must exist and must have been deleted +// within the last 30 days. Operation func (r *ServicesService) Undelete(serviceName string) *ServicesUndeleteCall { c := &ServicesUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.serviceName = serviceName @@ -8405,7 +6725,7 @@ func (c *ServicesUndeleteCall) Header() http.Header { func (c *ServicesUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8464,7 +6784,7 @@ func (c *ServicesUndeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Revives a previously deleted managed service. The method restores the\nservice using the configuration at the time the service was deleted.\nThe target service must exist and must have been deleted within the\nlast 30 days.\n\nOperation\u003cresponse: UndeleteServiceResponse\u003e", + // "description": "Revives a previously deleted managed service. The method restores the service using the configuration at the time the service was deleted. The target service must exist and must have been deleted within the last 30 days. Operation", // "flatPath": "v1/services/{serviceName}:undelete", // "httpMethod": "POST", // "id": "servicemanagement.services.undelete", @@ -8473,7 +6793,7 @@ func (c *ServicesUndeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err // ], // "parameters": { // "serviceName": { - // "description": "Required. The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + // "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.", // "location": "path", // "required": true, // "type": "string" @@ -8503,18 +6823,11 @@ type ServicesConfigsCreateCall struct { } // Create: Creates a new service configuration (version) for a managed -// service. -// This method only stores the service configuration. To roll out the -// service -// configuration to backend systems please -// call -// CreateServiceRollout. -// -// Only the 100 most recent service configurations and ones referenced -// by -// existing rollouts are kept for each service. The rest will be -// deleted -// eventually. +// service. This method only stores the service configuration. To roll +// out the service configuration to backend systems please call +// CreateServiceRollout. Only the 100 most recent service configurations +// and ones referenced by existing rollouts are kept for each service. +// The rest will be deleted eventually. func (r *ServicesConfigsService) Create(serviceName string, service *Service) *ServicesConfigsCreateCall { c := &ServicesConfigsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.serviceName = serviceName @@ -8549,7 +6862,7 @@ func (c *ServicesConfigsCreateCall) Header() http.Header { func (c *ServicesConfigsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8613,7 +6926,7 @@ func (c *ServicesConfigsCreateCall) Do(opts ...googleapi.CallOption) (*Service, } return ret, nil // { - // "description": "Creates a new service configuration (version) for a managed service.\nThis method only stores the service configuration. To roll out the service\nconfiguration to backend systems please call\nCreateServiceRollout.\n\nOnly the 100 most recent service configurations and ones referenced by\nexisting rollouts are kept for each service. The rest will be deleted\neventually.", + // "description": "Creates a new service configuration (version) for a managed service. This method only stores the service configuration. To roll out the service configuration to backend systems please call CreateServiceRollout. Only the 100 most recent service configurations and ones referenced by existing rollouts are kept for each service. The rest will be deleted eventually.", // "flatPath": "v1/services/{serviceName}/configs", // "httpMethod": "POST", // "id": "servicemanagement.services.configs.create", @@ -8622,7 +6935,7 @@ func (c *ServicesConfigsCreateCall) Do(opts ...googleapi.CallOption) (*Service, // ], // "parameters": { // "serviceName": { - // "description": "Required. The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + // "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.", // "location": "path", // "required": true, // "type": "string" @@ -8664,12 +6977,14 @@ func (r *ServicesConfigsService) Get(serviceName string, configId string) *Servi } // View sets the optional parameter "view": Specifies which parts of the -// Service Config should be returned in the -// response. +// Service Config should be returned in the response. // // Possible values: -// "BASIC" -// "FULL" +// "BASIC" - Server response includes all fields except SourceInfo. +// "FULL" - Server response includes all fields including SourceInfo. +// SourceFiles are of type 'google.api.servicemanagement.v1.ConfigFile' +// and are only available for configs created using the +// SubmitConfigSource method. func (c *ServicesConfigsGetCall) View(view string) *ServicesConfigsGetCall { c.urlParams_.Set("view", view) return c @@ -8712,7 +7027,7 @@ func (c *ServicesConfigsGetCall) Header() http.Header { func (c *ServicesConfigsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8785,23 +7100,27 @@ func (c *ServicesConfigsGetCall) Do(opts ...googleapi.CallOption) (*Service, err // ], // "parameters": { // "configId": { - // "description": "Required. The id of the service configuration resource.\n\nThis field must be specified for the server to return all fields, including\n`SourceInfo`.", + // "description": "Required. The id of the service configuration resource. This field must be specified for the server to return all fields, including `SourceInfo`.", // "location": "path", // "required": true, // "type": "string" // }, // "serviceName": { - // "description": "Required. The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + // "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.", // "location": "path", // "required": true, // "type": "string" // }, // "view": { - // "description": "Specifies which parts of the Service Config should be returned in the\nresponse.", + // "description": "Specifies which parts of the Service Config should be returned in the response.", // "enum": [ // "BASIC", // "FULL" // ], + // "enumDescriptions": [ + // "Server response includes all fields except SourceInfo.", + // "Server response includes all fields including SourceInfo. SourceFiles are of type 'google.api.servicemanagement.v1.ConfigFile' and are only available for configs created using the SubmitConfigSource method." + // ], // "location": "query", // "type": "string" // } @@ -8832,8 +7151,7 @@ type ServicesConfigsListCall struct { } // List: Lists the history of the service configuration for a managed -// service, -// from the newest to the oldest. +// service, from the newest to the oldest. func (r *ServicesConfigsService) List(serviceName string) *ServicesConfigsListCall { c := &ServicesConfigsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.serviceName = serviceName @@ -8841,8 +7159,8 @@ func (r *ServicesConfigsService) List(serviceName string) *ServicesConfigsListCa } // PageSize sets the optional parameter "pageSize": The max number of -// items to include in the response list. Page size is 50 -// if not specified. Maximum value is 100. +// items to include in the response list. Page size is 50 if not +// specified. Maximum value is 100. func (c *ServicesConfigsListCall) PageSize(pageSize int64) *ServicesConfigsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c @@ -8892,7 +7210,7 @@ func (c *ServicesConfigsListCall) Header() http.Header { func (c *ServicesConfigsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8954,7 +7272,7 @@ func (c *ServicesConfigsListCall) Do(opts ...googleapi.CallOption) (*ListService } return ret, nil // { - // "description": "Lists the history of the service configuration for a managed service,\nfrom the newest to the oldest.", + // "description": "Lists the history of the service configuration for a managed service, from the newest to the oldest.", // "flatPath": "v1/services/{serviceName}/configs", // "httpMethod": "GET", // "id": "servicemanagement.services.configs.list", @@ -8963,7 +7281,7 @@ func (c *ServicesConfigsListCall) Do(opts ...googleapi.CallOption) (*ListService // ], // "parameters": { // "pageSize": { - // "description": "The max number of items to include in the response list. Page size is 50\nif not specified. Maximum value is 100.", + // "description": "The max number of items to include in the response list. Page size is 50 if not specified. Maximum value is 100.", // "format": "int32", // "location": "query", // "type": "integer" @@ -8974,7 +7292,7 @@ func (c *ServicesConfigsListCall) Do(opts ...googleapi.CallOption) (*ListService // "type": "string" // }, // "serviceName": { - // "description": "Required. The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + // "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.", // "location": "path", // "required": true, // "type": "string" @@ -9027,24 +7345,13 @@ type ServicesConfigsSubmitCall struct { } // Submit: Creates a new service configuration (version) for a managed -// service based -// on -// user-supplied configuration source files (for example: -// OpenAPI -// Specification). This method stores the source configurations as well -// as the -// generated service configuration. To rollout the service configuration -// to -// other services, -// please call CreateServiceRollout. -// -// Only the 100 most recent configuration sources and ones referenced -// by -// existing service configurtions are kept for each service. The rest -// will be -// deleted eventually. -// -// Operation +// service based on user-supplied configuration source files (for +// example: OpenAPI Specification). This method stores the source +// configurations as well as the generated service configuration. To +// rollout the service configuration to other services, please call +// CreateServiceRollout. Only the 100 most recent configuration sources +// and ones referenced by existing service configurtions are kept for +// each service. The rest will be deleted eventually. Operation func (r *ServicesConfigsService) Submit(serviceName string, submitconfigsourcerequest *SubmitConfigSourceRequest) *ServicesConfigsSubmitCall { c := &ServicesConfigsSubmitCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.serviceName = serviceName @@ -9079,7 +7386,7 @@ func (c *ServicesConfigsSubmitCall) Header() http.Header { func (c *ServicesConfigsSubmitCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9143,7 +7450,7 @@ func (c *ServicesConfigsSubmitCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a new service configuration (version) for a managed service based\non\nuser-supplied configuration source files (for example: OpenAPI\nSpecification). This method stores the source configurations as well as the\ngenerated service configuration. To rollout the service configuration to\nother services,\nplease call CreateServiceRollout.\n\nOnly the 100 most recent configuration sources and ones referenced by\nexisting service configurtions are kept for each service. The rest will be\ndeleted eventually.\n\nOperation\u003cresponse: SubmitConfigSourceResponse\u003e", + // "description": "Creates a new service configuration (version) for a managed service based on user-supplied configuration source files (for example: OpenAPI Specification). This method stores the source configurations as well as the generated service configuration. To rollout the service configuration to other services, please call CreateServiceRollout. Only the 100 most recent configuration sources and ones referenced by existing service configurtions are kept for each service. The rest will be deleted eventually. Operation", // "flatPath": "v1/services/{serviceName}/configs:submit", // "httpMethod": "POST", // "id": "servicemanagement.services.configs.submit", @@ -9152,7 +7459,7 @@ func (c *ServicesConfigsSubmitCall) Do(opts ...googleapi.CallOption) (*Operation // ], // "parameters": { // "serviceName": { - // "description": "Required. The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + // "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.", // "location": "path", // "required": true, // "type": "string" @@ -9184,9 +7491,8 @@ type ServicesConsumersGetIamPolicyCall struct { header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. -// Returns an empty policy if the resource exists and does not have a -// policy +// GetIamPolicy: Gets the access control policy for a resource. Returns +// an empty policy if the resource exists and does not have a policy // set. func (r *ServicesConsumersService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ServicesConsumersGetIamPolicyCall { c := &ServicesConsumersGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -9222,7 +7528,7 @@ func (c *ServicesConsumersGetIamPolicyCall) Header() http.Header { func (c *ServicesConsumersGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9286,7 +7592,7 @@ func (c *ServicesConsumersGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P } return ret, nil // { - // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + // "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", // "flatPath": "v1/services/{servicesId}/consumers/{consumersId}:getIamPolicy", // "httpMethod": "POST", // "id": "servicemanagement.services.consumers.getIamPolicy", @@ -9295,7 +7601,7 @@ func (c *ServicesConsumersGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^services/[^/]+/consumers/[^/]+$", // "required": true, @@ -9331,11 +7637,8 @@ type ServicesConsumersSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any -// existing policy. -// -// Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` -// errors. +// resource. Replaces any existing policy. Can return `NOT_FOUND`, +// `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. func (r *ServicesConsumersService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ServicesConsumersSetIamPolicyCall { c := &ServicesConsumersSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -9370,7 +7673,7 @@ func (c *ServicesConsumersSetIamPolicyCall) Header() http.Header { func (c *ServicesConsumersSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9434,7 +7737,7 @@ func (c *ServicesConsumersSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", // "flatPath": "v1/services/{servicesId}/consumers/{consumersId}:setIamPolicy", // "httpMethod": "POST", // "id": "servicemanagement.services.consumers.setIamPolicy", @@ -9443,7 +7746,7 @@ func (c *ServicesConsumersSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^services/[^/]+/consumers/[^/]+$", // "required": true, @@ -9477,16 +7780,11 @@ type ServicesConsumersTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a `NOT_FOUND` error. -// -// Note: This operation is designed to be used for building -// permission-aware -// UIs and command-line tools, not for authorization checking. This -// operation -// may "fail open" without warning. +// specified resource. If the resource does not exist, this will return +// an empty set of permissions, not a `NOT_FOUND` error. Note: This +// operation is designed to be used for building permission-aware UIs +// and command-line tools, not for authorization checking. This +// operation may "fail open" without warning. func (r *ServicesConsumersService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ServicesConsumersTestIamPermissionsCall { c := &ServicesConsumersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -9521,7 +7819,7 @@ func (c *ServicesConsumersTestIamPermissionsCall) Header() http.Header { func (c *ServicesConsumersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9585,7 +7883,7 @@ func (c *ServicesConsumersTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + // "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", // "flatPath": "v1/services/{servicesId}/consumers/{consumersId}:testIamPermissions", // "httpMethod": "POST", // "id": "servicemanagement.services.consumers.testIamPermissions", @@ -9594,7 +7892,7 @@ func (c *ServicesConsumersTestIamPermissionsCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^services/[^/]+/consumers/[^/]+$", // "required": true, @@ -9630,26 +7928,15 @@ type ServicesRolloutsCreateCall struct { } // Create: Creates a new service configuration rollout. Based on -// rollout, the -// Google Service Management will roll out the service configurations -// to -// different backend services. For example, the logging configuration -// will be -// pushed to Google Cloud Logging. -// -// Please note that any previous pending and running Rollouts and -// associated +// rollout, the Google Service Management will roll out the service +// configurations to different backend services. For example, the +// logging configuration will be pushed to Google Cloud Logging. Please +// note that any previous pending and running Rollouts and associated // Operations will be automatically cancelled so that the latest Rollout -// will -// not be blocked by previous Rollouts. -// -// Only the 100 most recent (in any state) and the last 10 successful -// (if not -// already part of the set of 100 most recent) rollouts are kept for -// each -// service. The rest will be deleted eventually. -// -// Operation +// will not be blocked by previous Rollouts. Only the 100 most recent +// (in any state) and the last 10 successful (if not already part of the +// set of 100 most recent) rollouts are kept for each service. The rest +// will be deleted eventually. Operation func (r *ServicesRolloutsService) Create(serviceName string, rollout *Rollout) *ServicesRolloutsCreateCall { c := &ServicesRolloutsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.serviceName = serviceName @@ -9684,7 +7971,7 @@ func (c *ServicesRolloutsCreateCall) Header() http.Header { func (c *ServicesRolloutsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9748,7 +8035,7 @@ func (c *ServicesRolloutsCreateCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Creates a new service configuration rollout. Based on rollout, the\nGoogle Service Management will roll out the service configurations to\ndifferent backend services. For example, the logging configuration will be\npushed to Google Cloud Logging.\n\nPlease note that any previous pending and running Rollouts and associated\nOperations will be automatically cancelled so that the latest Rollout will\nnot be blocked by previous Rollouts.\n\nOnly the 100 most recent (in any state) and the last 10 successful (if not\nalready part of the set of 100 most recent) rollouts are kept for each\nservice. The rest will be deleted eventually.\n\nOperation\u003cresponse: Rollout\u003e", + // "description": "Creates a new service configuration rollout. Based on rollout, the Google Service Management will roll out the service configurations to different backend services. For example, the logging configuration will be pushed to Google Cloud Logging. Please note that any previous pending and running Rollouts and associated Operations will be automatically cancelled so that the latest Rollout will not be blocked by previous Rollouts. Only the 100 most recent (in any state) and the last 10 successful (if not already part of the set of 100 most recent) rollouts are kept for each service. The rest will be deleted eventually. Operation", // "flatPath": "v1/services/{serviceName}/rollouts", // "httpMethod": "POST", // "id": "servicemanagement.services.rollouts.create", @@ -9757,7 +8044,7 @@ func (c *ServicesRolloutsCreateCall) Do(opts ...googleapi.CallOption) (*Operatio // ], // "parameters": { // "serviceName": { - // "description": "Required. The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + // "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.", // "location": "path", // "required": true, // "type": "string" @@ -9835,7 +8122,7 @@ func (c *ServicesRolloutsGetCall) Header() http.Header { func (c *ServicesRolloutsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9914,7 +8201,7 @@ func (c *ServicesRolloutsGetCall) Do(opts ...googleapi.CallOption) (*Rollout, er // "type": "string" // }, // "serviceName": { - // "description": "Required. The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + // "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.", // "location": "path", // "required": true, // "type": "string" @@ -9946,8 +8233,7 @@ type ServicesRolloutsListCall struct { } // List: Lists the history of the service configuration rollouts for a -// managed -// service, from the newest to the oldest. +// managed service, from the newest to the oldest. func (r *ServicesRolloutsService) List(serviceName string) *ServicesRolloutsListCall { c := &ServicesRolloutsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.serviceName = serviceName @@ -9955,24 +8241,20 @@ func (r *ServicesRolloutsService) List(serviceName string) *ServicesRolloutsList } // Filter sets the optional parameter "filter": Required. Use `filter` -// to return subset of rollouts. -// The following filters are supported: -// -- To limit the results to only those in -// [status](google.api.servicemanagement.v1.RolloutStatus) -// 'SUCCESS', -// use filter='status=SUCCESS' -// -- To limit the results to those in -// [status](google.api.servicemanagement.v1.RolloutStatus) -// 'CANCELLED' -// or 'FAILED', use filter='status=CANCELLED OR status=FAILED' +// to return subset of rollouts. The following filters are supported: -- +// To limit the results to only those in +// [status](google.api.servicemanagement.v1.RolloutStatus) 'SUCCESS', +// use filter='status=SUCCESS' -- To limit the results to those in +// [status](google.api.servicemanagement.v1.RolloutStatus) 'CANCELLED' +// or 'FAILED', use filter='status=CANCELLED OR status=FAILED' func (c *ServicesRolloutsListCall) Filter(filter string) *ServicesRolloutsListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": The max number of -// items to include in the response list. Page size is 50 -// if not specified. Maximum value is 100. +// items to include in the response list. Page size is 50 if not +// specified. Maximum value is 100. func (c *ServicesRolloutsListCall) PageSize(pageSize int64) *ServicesRolloutsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c @@ -10022,7 +8304,7 @@ func (c *ServicesRolloutsListCall) Header() http.Header { func (c *ServicesRolloutsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10084,7 +8366,7 @@ func (c *ServicesRolloutsListCall) Do(opts ...googleapi.CallOption) (*ListServic } return ret, nil // { - // "description": "Lists the history of the service configuration rollouts for a managed\nservice, from the newest to the oldest.", + // "description": "Lists the history of the service configuration rollouts for a managed service, from the newest to the oldest.", // "flatPath": "v1/services/{serviceName}/rollouts", // "httpMethod": "GET", // "id": "servicemanagement.services.rollouts.list", @@ -10093,12 +8375,12 @@ func (c *ServicesRolloutsListCall) Do(opts ...googleapi.CallOption) (*ListServic // ], // "parameters": { // "filter": { - // "description": "Required. Use `filter` to return subset of rollouts.\nThe following filters are supported:\n -- To limit the results to only those in\n [status](google.api.servicemanagement.v1.RolloutStatus) 'SUCCESS',\n use filter='status=SUCCESS'\n -- To limit the results to those in\n [status](google.api.servicemanagement.v1.RolloutStatus) 'CANCELLED'\n or 'FAILED', use filter='status=CANCELLED OR status=FAILED'", + // "description": "Required. Use `filter` to return subset of rollouts. The following filters are supported: -- To limit the results to only those in [status](google.api.servicemanagement.v1.RolloutStatus) 'SUCCESS', use filter='status=SUCCESS' -- To limit the results to those in [status](google.api.servicemanagement.v1.RolloutStatus) 'CANCELLED' or 'FAILED', use filter='status=CANCELLED OR status=FAILED'", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "The max number of items to include in the response list. Page size is 50\nif not specified. Maximum value is 100.", + // "description": "The max number of items to include in the response list. Page size is 50 if not specified. Maximum value is 100.", // "format": "int32", // "location": "query", // "type": "integer" @@ -10109,7 +8391,7 @@ func (c *ServicesRolloutsListCall) Do(opts ...googleapi.CallOption) (*ListServic // "type": "string" // }, // "serviceName": { - // "description": "Required. The name of the service. See the [overview](/service-management/overview)\nfor naming requirements. For example: `example.googleapis.com`.", + // "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.", // "location": "path", // "required": true, // "type": "string" diff --git a/vendor/google.golang.org/api/servicenetworking/v1/servicenetworking-api.json b/vendor/google.golang.org/api/servicenetworking/v1/servicenetworking-api.json index baf992df071..c1f8592558b 100644 --- a/vendor/google.golang.org/api/servicenetworking/v1/servicenetworking-api.json +++ b/vendor/google.golang.org/api/servicenetworking/v1/servicenetworking-api.json @@ -111,7 +111,7 @@ "operations": { "methods": { "cancel": { - "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", "flatPath": "v1/operations/{operationsId}:cancel", "httpMethod": "POST", "id": "servicenetworking.operations.cancel", @@ -140,7 +140,7 @@ ] }, "delete": { - "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.", + "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", "flatPath": "v1/operations/{operationsId}", "httpMethod": "DELETE", "id": "servicenetworking.operations.delete", @@ -166,7 +166,7 @@ ] }, "get": { - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", "flatPath": "v1/operations/{operationsId}", "httpMethod": "GET", "id": "servicenetworking.operations.get", @@ -192,7 +192,7 @@ ] }, "list": { - "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", "flatPath": "v1/operations", "httpMethod": "GET", "id": "servicenetworking.operations.list", @@ -238,7 +238,7 @@ "services": { "methods": { "addSubnetwork": { - "description": "For service producers, provisions a new subnet in a peered service's shared\nVPC network in the requested region and with the requested size that's\nexpressed as a CIDR range (number of leading bits of ipV4 network mask).\nThe method checks against the assigned allocated ranges to find a\nnon-conflicting IP address range. The method will reuse a subnet if\nsubsequent calls contain the same subnet name, region, and prefix length.\nThis method will make producer's tenant project to be a shared VPC service\nproject as needed.", + "description": "For service producers, provisions a new subnet in a peered service's shared VPC network in the requested region and with the requested size that's expressed as a CIDR range (number of leading bits of ipV4 network mask). The method checks against the assigned allocated ranges to find a non-conflicting IP address range. The method will reuse a subnet if subsequent calls contain the same subnet name, region, and prefix length. This method will make producer's tenant project to be a shared VPC service project as needed.", "flatPath": "v1/services/{servicesId}/{servicesId1}/{servicesId2}:addSubnetwork", "httpMethod": "POST", "id": "servicenetworking.services.addSubnetwork", @@ -247,7 +247,7 @@ ], "parameters": { "parent": { - "description": "Required. A tenant project in the service producer organization, in the\nfollowing format: services/{service}/{collection-id}/{resource-id}.\n{collection-id} is the cloud resource collection type that represents the\ntenant project. Only `projects` are supported.\n{resource-id} is the tenant project numeric id, such as\n`123456`. {service} the name of the peering service, such as\n`service-peering.example.com`. This service must already be\nenabled in the service consumer's project.", + "description": "Required. A tenant project in the service producer organization, in the following format: services/{service}/{collection-id}/{resource-id}. {collection-id} is the cloud resource collection type that represents the tenant project. Only `projects` are supported. {resource-id} is the tenant project numeric id, such as `123456`. {service} the name of the peering service, such as `service-peering.example.com`. This service must already be enabled in the service consumer's project.", "location": "path", "pattern": "^services/[^/]+/[^/]+/[^/]+$", "required": true, @@ -276,7 +276,7 @@ ], "parameters": { "parent": { - "description": "The service that is managing peering connectivity for a service producer's\norganization. For Google services that support this functionality, this\nvalue is `services/servicenetworking.googleapis.com`.", + "description": "The service that is managing peering connectivity for a service producer's organization. For Google services that support this functionality, this value is `services/servicenetworking.googleapis.com`.", "location": "path", "pattern": "^services/[^/]+$", "required": true, @@ -305,7 +305,7 @@ ], "parameters": { "parent": { - "description": "The service that is managing peering connectivity for a service producer's\norganization. For Google services that support this functionality, this\nvalue is `services/servicenetworking.googleapis.com`.", + "description": "The service that is managing peering connectivity for a service producer's organization. For Google services that support this functionality, this value is `services/servicenetworking.googleapis.com`.", "location": "path", "pattern": "^services/[^/]+$", "required": true, @@ -325,7 +325,7 @@ ] }, "searchRange": { - "description": "Service producers can use this method to find a currently unused range\nwithin consumer allocated ranges. This returned range is not reserved,\nand not guaranteed to remain unused. It will validate previously provided\nallocated ranges, find non-conflicting sub-range of requested size\n(expressed in number of leading bits of ipv4 network mask, as in CIDR range\nnotation).", + "description": "Service producers can use this method to find a currently unused range within consumer allocated ranges. This returned range is not reserved, and not guaranteed to remain unused. It will validate previously provided allocated ranges, find non-conflicting sub-range of requested size (expressed in number of leading bits of ipv4 network mask, as in CIDR range notation).", "flatPath": "v1/services/{servicesId}:searchRange", "httpMethod": "POST", "id": "servicenetworking.services.searchRange", @@ -334,7 +334,7 @@ ], "parameters": { "parent": { - "description": "Required. This is in a form services/{service}. {service} the name of the private\naccess management service, for example 'service-peering.example.com'.", + "description": "Required. This is in a form services/{service}. {service} the name of the private access management service, for example 'service-peering.example.com'.", "location": "path", "pattern": "^services/[^/]+$", "required": true, @@ -354,7 +354,7 @@ ] }, "validate": { - "description": "Service producers use this method to validate if the consumer provided\nnetwork, project and requested range are valid. This allows them to use\na fail-fast mechanism for consumer requests, and not have to wait for\nAddSubnetwork operation completion to determine if user request is invalid.", + "description": "Service producers use this method to validate if the consumer provided network, project and requested range are valid. This allows them to use a fail-fast mechanism for consumer requests, and not have to wait for AddSubnetwork operation completion to determine if user request is invalid.", "flatPath": "v1/services/{servicesId}:validate", "httpMethod": "POST", "id": "servicenetworking.services.validate", @@ -363,7 +363,7 @@ ], "parameters": { "parent": { - "description": "Required. This is in a form services/{service} where {service} is the name of the\nprivate access management service. For example\n'service-peering.example.com'.", + "description": "Required. This is in a form services/{service} where {service} is the name of the private access management service. For example 'service-peering.example.com'.", "location": "path", "pattern": "^services/[^/]+$", "required": true, @@ -387,7 +387,7 @@ "connections": { "methods": { "create": { - "description": "Creates a private connection that establishes a VPC Network Peering\nconnection to a VPC network in the service producer's organization.\nThe administrator of the service consumer's VPC network invokes this\nmethod. The administrator must assign one or more allocated IP ranges for\nprovisioning subnetworks in the service producer's VPC network. This\nconnection is used for all supported services in the service producer's\norganization, so it only needs to be invoked once.", + "description": "Creates a private connection that establishes a VPC Network Peering connection to a VPC network in the service producer's organization. The administrator of the service consumer's VPC network invokes this method. The administrator must assign one or more allocated IP ranges for provisioning subnetworks in the service producer's VPC network. This connection is used for all supported services in the service producer's organization, so it only needs to be invoked once.", "flatPath": "v1/services/{servicesId}/connections", "httpMethod": "POST", "id": "servicenetworking.services.connections.create", @@ -396,7 +396,7 @@ ], "parameters": { "parent": { - "description": "The service that is managing peering connectivity for a service producer's\norganization. For Google services that support this functionality, this\nvalue is `services/servicenetworking.googleapis.com`.", + "description": "The service that is managing peering connectivity for a service producer's organization. For Google services that support this functionality, this value is `services/servicenetworking.googleapis.com`.", "location": "path", "pattern": "^services/[^/]+$", "required": true, @@ -416,7 +416,7 @@ ] }, "list": { - "description": "List the private connections that are configured in a service consumer's\nVPC network.", + "description": "List the private connections that are configured in a service consumer's VPC network.", "flatPath": "v1/services/{servicesId}/connections", "httpMethod": "GET", "id": "servicenetworking.services.connections.list", @@ -425,12 +425,12 @@ ], "parameters": { "network": { - "description": "The name of service consumer's VPC network that's connected with service\nproducer network through a private connection. The network name must be in\nthe following format:\n`projects/{project}/global/networks/{network}`. {project} is a\nproject number, such as in `12345` that includes the VPC service\nconsumer's VPC network. {network} is the name of the service consumer's VPC\nnetwork.", + "description": "The name of service consumer's VPC network that's connected with service producer network through a private connection. The network name must be in the following format: `projects/{project}/global/networks/{network}`. {project} is a project number, such as in `12345` that includes the VPC service consumer's VPC network. {network} is the name of the service consumer's VPC network.", "location": "query", "type": "string" }, "parent": { - "description": "The service that is managing peering connectivity for a service producer's\norganization. For Google services that support this functionality, this\nvalue is `services/servicenetworking.googleapis.com`.\nIf you specify `services/-` as the parameter value, all configured peering\nservices are listed.", + "description": "The service that is managing peering connectivity for a service producer's organization. For Google services that support this functionality, this value is `services/servicenetworking.googleapis.com`. If you specify `services/-` as the parameter value, all configured peering services are listed.", "location": "path", "pattern": "^services/[^/]+$", "required": true, @@ -456,19 +456,19 @@ ], "parameters": { "force": { - "description": "If a previously defined allocated range is removed, force flag must be\nset to true.", + "description": "If a previously defined allocated range is removed, force flag must be set to true.", "location": "query", "type": "boolean" }, "name": { - "description": "The private service connection that connects to a service producer\norganization. The name includes both the private service name and the VPC\nnetwork peering name in the format of\n`services/{peering_service_name}/connections/{vpc_peering_name}`. For\nGoogle services that support this functionality, this is\n`services/servicenetworking.googleapis.com/connections/servicenetworking-googleapis-com`.", + "description": "The private service connection that connects to a service producer organization. The name includes both the private service name and the VPC network peering name in the format of `services/{peering_service_name}/connections/{vpc_peering_name}`. For Google services that support this functionality, this is `services/servicenetworking.googleapis.com/connections/servicenetworking-googleapis-com`.", "location": "path", "pattern": "^services/[^/]+/connections/[^/]+$", "required": true, "type": "string" }, "updateMask": { - "description": "The update mask. If this is omitted, it defaults to \"*\". You can only\nupdate the listed peering ranges.", + "description": "The update mask. If this is omitted, it defaults to \"*\". You can only update the listed peering ranges.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -488,10 +488,260 @@ } } }, + "dnsRecordSets": { + "methods": { + "add": { + "description": "Service producers can use this method to add DNS record sets to private DNS zones in the shared producer host project.", + "flatPath": "v1/services/{servicesId}/dnsRecordSets:add", + "httpMethod": "POST", + "id": "servicenetworking.services.dnsRecordSets.add", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The service that is managing peering connectivity for a service producer's organization. For Google services that support this functionality, this value is `services/servicenetworking.googleapis.com`.", + "location": "path", + "pattern": "^services/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/dnsRecordSets:add", + "request": { + "$ref": "AddDnsRecordSetRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" + ] + }, + "remove": { + "description": "Service producers can use this method to remove DNS record sets from private DNS zones in the shared producer host project.", + "flatPath": "v1/services/{servicesId}/dnsRecordSets:remove", + "httpMethod": "POST", + "id": "servicenetworking.services.dnsRecordSets.remove", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The service that is managing peering connectivity for a service producer's organization. For Google services that support this functionality, this value is `services/servicenetworking.googleapis.com`.", + "location": "path", + "pattern": "^services/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/dnsRecordSets:remove", + "request": { + "$ref": "RemoveDnsRecordSetRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" + ] + }, + "update": { + "description": "Service producers can use this method to update DNS record sets from private DNS zones in the shared producer host project.", + "flatPath": "v1/services/{servicesId}/dnsRecordSets:update", + "httpMethod": "POST", + "id": "servicenetworking.services.dnsRecordSets.update", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The service that is managing peering connectivity for a service producer's organization. For Google services that support this functionality, this value is `services/servicenetworking.googleapis.com`.", + "location": "path", + "pattern": "^services/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/dnsRecordSets:update", + "request": { + "$ref": "UpdateDnsRecordSetRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" + ] + } + } + }, + "dnsZones": { + "methods": { + "add": { + "description": "Service producers can use this method to add private DNS zones in the shared producer host project and matching peering zones in the consumer project.", + "flatPath": "v1/services/{servicesId}/dnsZones:add", + "httpMethod": "POST", + "id": "servicenetworking.services.dnsZones.add", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The service that is managing peering connectivity for a service producer's organization. For Google services that support this functionality, this value is `services/servicenetworking.googleapis.com`.", + "location": "path", + "pattern": "^services/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/dnsZones:add", + "request": { + "$ref": "AddDnsZoneRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" + ] + }, + "remove": { + "description": "Service producers can use this method to remove private DNS zones in the shared producer host project and matching peering zones in the consumer project.", + "flatPath": "v1/services/{servicesId}/dnsZones:remove", + "httpMethod": "POST", + "id": "servicenetworking.services.dnsZones.remove", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The service that is managing peering connectivity for a service producer's organization. For Google services that support this functionality, this value is `services/servicenetworking.googleapis.com`.", + "location": "path", + "pattern": "^services/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/dnsZones:remove", + "request": { + "$ref": "RemoveDnsZoneRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" + ] + } + } + }, + "projects": { + "resources": { + "global": { + "resources": { + "networks": { + "resources": { + "peeredDnsDomains": { + "methods": { + "create": { + "description": "Creates a peered DNS domain which sends requests for records in given namespace originating in the service producer VPC network to the consumer VPC network to be resolved.", + "flatPath": "v1/services/{servicesId}/projects/{projectsId}/global/networks/{networksId}/peeredDnsDomains", + "httpMethod": "POST", + "id": "servicenetworking.services.projects.global.networks.peeredDnsDomains.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. Parent resource identifying the connection for which the peered DNS domain will be created in the format: `services/{service}/projects/{project}/global/networks/{network}` {service} is the peering service that is managing connectivity for the service producer's organization. For Google services that support this functionality, this value is `servicenetworking.googleapis.com`. {project} is the number of the project that contains the service consumer's VPC network e.g. `12345`. {network} is the name of the service consumer's VPC network.", + "location": "path", + "pattern": "^services/[^/]+/projects/[^/]+/global/networks/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/peeredDnsDomains", + "request": { + "$ref": "PeeredDnsDomain" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" + ] + }, + "delete": { + "description": "Deletes a peered DNS domain.", + "flatPath": "v1/services/{servicesId}/projects/{projectsId}/global/networks/{networksId}/peeredDnsDomains/{peeredDnsDomainsId}", + "httpMethod": "DELETE", + "id": "servicenetworking.services.projects.global.networks.peeredDnsDomains.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the peered DNS domain to delete in the format: `services/{service}/projects/{project}/global/networks/{network}/peeredDnsDomains/{name}`. {service} is the peering service that is managing connectivity for the service producer's organization. For Google services that support this functionality, this value is `servicenetworking.googleapis.com`. {project} is the number of the project that contains the service consumer's VPC network e.g. `12345`. {network} is the name of the service consumer's VPC network. {name} is the name of the peered DNS domain.", + "location": "path", + "pattern": "^services/[^/]+/projects/[^/]+/global/networks/[^/]+/peeredDnsDomains/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" + ] + }, + "list": { + "description": "Lists peered DNS domains for a connection.", + "flatPath": "v1/services/{servicesId}/projects/{projectsId}/global/networks/{networksId}/peeredDnsDomains", + "httpMethod": "GET", + "id": "servicenetworking.services.projects.global.networks.peeredDnsDomains.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. Parent resource identifying the connection which owns this collection of peered DNS domains in the format: `services/{service}/projects/{project}/global/networks/{network}`. {service} is the peering service that is managing connectivity for the service producer's organization. For Google services that support this functionality, this value is `servicenetworking.googleapis.com`. {project} is a project number e.g. `12345` that contains the service consumer's VPC network. {network} is the name of the service consumer's VPC network.", + "location": "path", + "pattern": "^services/[^/]+/projects/[^/]+/global/networks/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/peeredDnsDomains", + "response": { + "$ref": "ListPeeredDnsDomainsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" + ] + } + } + } + } + } + } + } + } + }, "roles": { "methods": { "add": { - "description": "Service producers can use this method to add roles in the shared VPC host\nproject. Each role is bound to the provided member. Each role must be\nselected from within a whitelisted set of roles. Each role is applied at\nonly the granularity specified in the whitelist.", + "description": "Service producers can use this method to add roles in the shared VPC host project. Each role is bound to the provided member. Each role must be selected from within a whitelisted set of roles. Each role is applied at only the granularity specified in the whitelist.", "flatPath": "v1/services/{servicesId}/roles:add", "httpMethod": "POST", "id": "servicenetworking.services.roles.add", @@ -500,7 +750,7 @@ ], "parameters": { "parent": { - "description": "Required. This is in a form services/{service} where {service} is the name of the\nprivate access management service. For example\n'service-peering.example.com'.", + "description": "Required. This is in a form services/{service} where {service} is the name of the private access management service. For example 'service-peering.example.com'.", "location": "path", "pattern": "^services/[^/]+$", "required": true, @@ -524,21 +774,86 @@ } } }, - "revision": "20200508", + "revision": "20200909", "rootUrl": "https://servicenetworking.googleapis.com/", "schemas": { + "AddDnsRecordSetMetadata": { + "description": "Metadata provided through GetOperation request for the LRO generated by AddDnsRecordSet API", + "id": "AddDnsRecordSetMetadata", + "properties": {}, + "type": "object" + }, + "AddDnsRecordSetRequest": { + "description": "Request to add a record set to a private managed DNS zone in the shared producer host project.", + "id": "AddDnsRecordSetRequest", + "properties": { + "consumerNetwork": { + "description": "Required. The network that the consumer is using to connect with services. Must be in the form of projects/{project}/global/networks/{network} {project} is the project number, as in '12345' {network} is the network name.", + "type": "string" + }, + "dnsRecordSet": { + "$ref": "DnsRecordSet", + "description": "Required. The DNS record set to add." + }, + "zone": { + "description": "Required. The name of the private DNS zone in the shared producer host project to which the record set will be added.", + "type": "string" + } + }, + "type": "object" + }, + "AddDnsZoneMetadata": { + "description": "Metadata provided through GetOperation request for the LRO generated by AddDnsZone API", + "id": "AddDnsZoneMetadata", + "properties": {}, + "type": "object" + }, + "AddDnsZoneRequest": { + "description": "Request to add a private managed DNS zone in the shared producer host project and a matching DNS peering zone in the consumer project.", + "id": "AddDnsZoneRequest", + "properties": { + "consumerNetwork": { + "description": "Required. The network that the consumer is using to connect with services. Must be in the form of projects/{project}/global/networks/{network} {project} is the project number, as in '12345' {network} is the network name.", + "type": "string" + }, + "dnsSuffix": { + "description": "Required. The DNS name suffix for the zones e.g. `example.com`.", + "type": "string" + }, + "name": { + "description": "Required. The name for both the private zone in the shared producer host project and the peering zone in the consumer project. Must be unique within both projects. The name must be 1-63 characters long, must begin with a letter, end with a letter or digit, and only contain lowercase letters, digits or dashes.", + "type": "string" + } + }, + "type": "object" + }, + "AddDnsZoneResponse": { + "description": "Represents managed DNS zones created in the shared producer host and consumer projects.", + "id": "AddDnsZoneResponse", + "properties": { + "consumerPeeringZone": { + "$ref": "DnsZone", + "description": "The DNS peering zone created in the consumer project." + }, + "producerPrivateZone": { + "$ref": "DnsZone", + "description": "The private DNS zone created in the shared producer host project." + } + }, + "type": "object" + }, "AddRolesMetadata": { - "description": "Metadata provided through GetOperation request for the LRO generated by\nAddRoles API", + "description": "Metadata provided through GetOperation request for the LRO generated by AddRoles API", "id": "AddRolesMetadata", "properties": {}, "type": "object" }, "AddRolesRequest": { - "description": "Request for AddRoles to allow Service Producers to add roles in the shared\nVPC host project for them to use.", + "description": "Request for AddRoles to allow Service Producers to add roles in the shared VPC host project for them to use.", "id": "AddRolesRequest", "properties": { "consumerNetwork": { - "description": "Required. The network that the consumer is using to connect with services. Must be in\nthe form of projects/{project}/global/networks/{network}\n{project} is a project number, as in '12345'\n{network} is a network name.", + "description": "Required. The network that the consumer is using to connect with services. Must be in the form of projects/{project}/global/networks/{network} {project} is a project number, as in '12345' {network} is a network name.", "type": "string" }, "policyBinding": { @@ -570,11 +885,11 @@ "id": "AddSubnetworkRequest", "properties": { "consumer": { - "description": "Required. A resource that represents the service consumer, such as\n`projects/123456`. The project number can be different from the\nvalue in the consumer network parameter. For example, the network might be\npart of a Shared VPC network. In those cases, Service Networking validates\nthat this resource belongs to that Shared VPC.", + "description": "Required. A resource that represents the service consumer, such as `projects/123456`. The project number can be different from the value in the consumer network parameter. For example, the network might be part of a Shared VPC network. In those cases, Service Networking validates that this resource belongs to that Shared VPC.", "type": "string" }, "consumerNetwork": { - "description": "Required. The name of the service consumer's VPC network. The network\nmust have an existing private connection that was provisioned through the\nconnections.create method. The name must be in the following format:\n`projects/{project}/global/networks/{network}`, where {project}\nis a project number, such as `12345`. {network} is the name of a\nVPC network in the project.", + "description": "Required. The name of the service consumer's VPC network. The network must have an existing private connection that was provisioned through the connections.create method. The name must be in the following format: `projects/{project}/global/networks/{network}`, where {project} is a project number, such as `12345`. {network} is the name of a VPC network in the project.", "type": "string" }, "description": { @@ -582,28 +897,24 @@ "type": "string" }, "ipPrefixLength": { - "description": "Required. The prefix length of the subnet's IP address range. Use CIDR\nrange notation, such as `30` to provision a subnet with an\n`x.x.x.x/30` CIDR range. The IP address range is drawn from a\npool of available ranges in the service consumer's allocated range.", + "description": "Required. The prefix length of the subnet's IP address range. Use CIDR range notation, such as `30` to provision a subnet with an `x.x.x.x/30` CIDR range. The IP address range is drawn from a pool of available ranges in the service consumer's allocated range.", "format": "int32", "type": "integer" }, - "privateIpv6GoogleAccess": { - "description": "Optional. The private IPv6 google access type for the VMs in this subnet.\nFor information about the access types that can be set using this field,\nsee [subnetwork](/compute/docs/reference/rest/v1/subnetworks)\nin the Compute API documentation.", - "type": "string" - }, "region": { - "description": "Required. The name of a [region](/compute/docs/regions-zones)\nfor the subnet, such `europe-west1`.", + "description": "Required. The name of a [region](/compute/docs/regions-zones) for the subnet, such `europe-west1`.", "type": "string" }, "requestedAddress": { - "description": "Optional. The starting address of a range. The address must be a valid\nIPv4 address in the x.x.x.x format. This value combined with the IP prefix\nrange is the CIDR range for the subnet. The range must be within the\nallocated range that is assigned to the private connection. If the CIDR\nrange isn't available, the call fails.", + "description": "Optional. The starting address of a range. The address must be a valid IPv4 address in the x.x.x.x format. This value combined with the IP prefix range is the CIDR range for the subnet. The range must be within the allocated range that is assigned to the private connection. If the CIDR range isn't available, the call fails.", "type": "string" }, "subnetwork": { - "description": "Required. A name for the new subnet. For information about the naming\nrequirements, see [subnetwork](/compute/docs/reference/rest/v1/subnetworks)\nin the Compute API documentation.", + "description": "Required. A name for the new subnet. For information about the naming requirements, see [subnetwork](/compute/docs/reference/rest/v1/subnetworks) in the Compute API documentation.", "type": "string" }, "subnetworkUsers": { - "description": "A list of members that are granted the `compute.networkUser`\nrole on the subnet.", + "description": "A list of members that are granted the `compute.networkUser` role on the subnet.", "items": { "type": "string" }, @@ -613,7 +924,7 @@ "type": "object" }, "Api": { - "description": "Api is a light-weight descriptor for an API Interface.\n\nInterfaces are also described as \"protocol buffer services\" in some contexts,\nsuch as by the \"service\" keyword in a .proto file, but they are different\nfrom API Services, which represent a concrete implementation of an interface\nas opposed to simply a description of methods and bindings. They are also\nsometimes simply referred to as \"APIs\" in other contexts, such as the name of\nthis message itself. See https://cloud.google.com/apis/design/glossary for\ndetailed terminology.", + "description": "Api is a light-weight descriptor for an API Interface. Interfaces are also described as \"protocol buffer services\" in some contexts, such as by the \"service\" keyword in a .proto file, but they are different from API Services, which represent a concrete implementation of an interface as opposed to simply a description of methods and bindings. They are also sometimes simply referred to as \"APIs\" in other contexts, such as the name of this message itself. See https://cloud.google.com/apis/design/glossary for detailed terminology.", "id": "Api", "properties": { "methods": { @@ -631,7 +942,7 @@ "type": "array" }, "name": { - "description": "The fully qualified name of this interface, including package name\nfollowed by the interface's simple name.", + "description": "The fully qualified name of this interface, including package name followed by the interface's simple name.", "type": "string" }, "options": { @@ -643,7 +954,7 @@ }, "sourceContext": { "$ref": "SourceContext", - "description": "Source context for the protocol buffer service represented by this\nmessage." + "description": "Source context for the protocol buffer service represented by this message." }, "syntax": { "description": "The source syntax of the service.", @@ -658,38 +969,38 @@ "type": "string" }, "version": { - "description": "A version string for this interface. If specified, must have the form\n`major-version.minor-version`, as in `1.10`. If the minor version is\nomitted, it defaults to zero. If the entire version field is empty, the\nmajor version is derived from the package name, as outlined below. If the\nfield is not empty, the version in the package name will be verified to be\nconsistent with what is provided here.\n\nThe versioning schema uses [semantic\nversioning](http://semver.org) where the major version number\nindicates a breaking change and the minor version an additive,\nnon-breaking change. Both version numbers are signals to users\nwhat to expect from different versions, and should be carefully\nchosen based on the product plan.\n\nThe major version is also reflected in the package name of the\ninterface, which must end in `v\u003cmajor-version\u003e`, as in\n`google.feature.v1`. For major versions 0 and 1, the suffix can\nbe omitted. Zero major versions must only be used for\nexperimental, non-GA interfaces.\n", + "description": "A version string for this interface. If specified, must have the form `major-version.minor-version`, as in `1.10`. If the minor version is omitted, it defaults to zero. If the entire version field is empty, the major version is derived from the package name, as outlined below. If the field is not empty, the version in the package name will be verified to be consistent with what is provided here. The versioning schema uses [semantic versioning](http://semver.org) where the major version number indicates a breaking change and the minor version an additive, non-breaking change. Both version numbers are signals to users what to expect from different versions, and should be carefully chosen based on the product plan. The major version is also reflected in the package name of the interface, which must end in `v`, as in `google.feature.v1`. For major versions 0 and 1, the suffix can be omitted. Zero major versions must only be used for experimental, non-GA interfaces. ", "type": "string" } }, "type": "object" }, "AuthProvider": { - "description": "Configuration for an authentication provider, including support for\n[JSON Web Token\n(JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).", + "description": "Configuration for an authentication provider, including support for [JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).", "id": "AuthProvider", "properties": { "audiences": { - "description": "The list of JWT\n[audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).\nthat are allowed to access. A JWT containing any of these audiences will\nbe accepted. When this setting is absent, JWTs with audiences:\n - \"https://[service.name]/[google.protobuf.Api.name]\"\n - \"https://[service.name]/\"\nwill be accepted.\nFor example, if no audiences are in the setting, LibraryService API will\naccept JWTs with the following audiences:\n -\n https://library-example.googleapis.com/google.example.library.v1.LibraryService\n - https://library-example.googleapis.com/\n\nExample:\n\n audiences: bookstore_android.apps.googleusercontent.com,\n bookstore_web.apps.googleusercontent.com", + "description": "The list of JWT [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3). that are allowed to access. A JWT containing any of these audiences will be accepted. When this setting is absent, JWTs with audiences: - \"https://[service.name]/[google.protobuf.Api.name]\" - \"https://[service.name]/\" will be accepted. For example, if no audiences are in the setting, LibraryService API will accept JWTs with the following audiences: - https://library-example.googleapis.com/google.example.library.v1.LibraryService - https://library-example.googleapis.com/ Example: audiences: bookstore_android.apps.googleusercontent.com, bookstore_web.apps.googleusercontent.com", "type": "string" }, "authorizationUrl": { - "description": "Redirect URL if JWT token is required but not present or is expired.\nImplement authorizationUrl of securityDefinitions in OpenAPI spec.", + "description": "Redirect URL if JWT token is required but not present or is expired. Implement authorizationUrl of securityDefinitions in OpenAPI spec.", "type": "string" }, "id": { - "description": "The unique identifier of the auth provider. It will be referred to by\n`AuthRequirement.provider_id`.\n\nExample: \"bookstore_auth\".", + "description": "The unique identifier of the auth provider. It will be referred to by `AuthRequirement.provider_id`. Example: \"bookstore_auth\".", "type": "string" }, "issuer": { - "description": "Identifies the principal that issued the JWT. See\nhttps://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1\nUsually a URL or an email address.\n\nExample: https://securetoken.google.com\nExample: 1234567-compute@developer.gserviceaccount.com", + "description": "Identifies the principal that issued the JWT. See https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1 Usually a URL or an email address. Example: https://securetoken.google.com Example: 1234567-compute@developer.gserviceaccount.com", "type": "string" }, "jwksUri": { - "description": "URL of the provider's public key set to validate signature of the JWT. See\n[OpenID\nDiscovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata).\nOptional if the key set document:\n - can be retrieved from\n [OpenID\n Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html of\n the issuer.\n - can be inferred from the email domain of the issuer (e.g. a Google\n service account).\n\nExample: https://www.googleapis.com/oauth2/v1/certs", + "description": "URL of the provider's public key set to validate signature of the JWT. See [OpenID Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata). Optional if the key set document: - can be retrieved from [OpenID Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html of the issuer. - can be inferred from the email domain of the issuer (e.g. a Google service account). Example: https://www.googleapis.com/oauth2/v1/certs", "type": "string" }, "jwtLocations": { - "description": "Defines the locations to extract the JWT.\n\nJWT locations can be either from HTTP headers or URL query parameters.\nThe rule is that the first match wins. The checking order is: checking\nall headers first, then URL query parameters.\n\nIf not specified, default to use following 3 locations:\n 1) Authorization: Bearer\n 2) x-goog-iap-jwt-assertion\n 3) access_token query parameter\n\nDefault locations can be specified as followings:\n jwt_locations:\n - header: Authorization\n value_prefix: \"Bearer \"\n - header: x-goog-iap-jwt-assertion\n - query: access_token", + "description": "Defines the locations to extract the JWT. JWT locations can be either from HTTP headers or URL query parameters. The rule is that the first match wins. The checking order is: checking all headers first, then URL query parameters. If not specified, default to use following 3 locations: 1) Authorization: Bearer 2) x-goog-iap-jwt-assertion 3) access_token query parameter Default locations can be specified as followings: jwt_locations: - header: Authorization value_prefix: \"Bearer \" - header: x-goog-iap-jwt-assertion - query: access_token", "items": { "$ref": "JwtLocation" }, @@ -699,22 +1010,22 @@ "type": "object" }, "AuthRequirement": { - "description": "User-defined authentication requirements, including support for\n[JSON Web Token\n(JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).", + "description": "User-defined authentication requirements, including support for [JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).", "id": "AuthRequirement", "properties": { "audiences": { - "description": "NOTE: This will be deprecated soon, once AuthProvider.audiences is\nimplemented and accepted in all the runtime components.\n\nThe list of JWT\n[audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).\nthat are allowed to access. A JWT containing any of these audiences will\nbe accepted. When this setting is absent, only JWTs with audience\n\"https://Service_name/API_name\"\nwill be accepted. For example, if no audiences are in the setting,\nLibraryService API will only accept JWTs with the following audience\n\"https://library-example.googleapis.com/google.example.library.v1.LibraryService\".\n\nExample:\n\n audiences: bookstore_android.apps.googleusercontent.com,\n bookstore_web.apps.googleusercontent.com", + "description": "NOTE: This will be deprecated soon, once AuthProvider.audiences is implemented and accepted in all the runtime components. The list of JWT [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3). that are allowed to access. A JWT containing any of these audiences will be accepted. When this setting is absent, only JWTs with audience \"https://Service_name/API_name\" will be accepted. For example, if no audiences are in the setting, LibraryService API will only accept JWTs with the following audience \"https://library-example.googleapis.com/google.example.library.v1.LibraryService\". Example: audiences: bookstore_android.apps.googleusercontent.com, bookstore_web.apps.googleusercontent.com", "type": "string" }, "providerId": { - "description": "id from authentication provider.\n\nExample:\n\n provider_id: bookstore_auth", + "description": "id from authentication provider. Example: provider_id: bookstore_auth", "type": "string" } }, "type": "object" }, "Authentication": { - "description": "`Authentication` defines the authentication configuration for an API.\n\nExample for an API targeted for external use:\n\n name: calendar.googleapis.com\n authentication:\n providers:\n - id: google_calendar_auth\n jwks_uri: https://www.googleapis.com/oauth2/v1/certs\n issuer: https://securetoken.google.com\n rules:\n - selector: \"*\"\n requirements:\n provider_id: google_calendar_auth", + "description": "`Authentication` defines the authentication configuration for an API. Example for an API targeted for external use: name: calendar.googleapis.com authentication: providers: - id: google_calendar_auth jwks_uri: https://www.googleapis.com/oauth2/v1/certs issuer: https://securetoken.google.com rules: - selector: \"*\" requirements: provider_id: google_calendar_auth", "id": "Authentication", "properties": { "providers": { @@ -725,7 +1036,7 @@ "type": "array" }, "rules": { - "description": "A list of authentication rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "A list of authentication rules that apply to individual API methods. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "AuthenticationRule" }, @@ -735,7 +1046,7 @@ "type": "object" }, "AuthenticationRule": { - "description": "Authentication rules for the service.\n\nBy default, if a method has any authentication requirements, every request\nmust include a valid credential matching one of the requirements.\nIt's an error to include more than one kind of credential in a single\nrequest.\n\nIf a method doesn't have any auth requirements, request credentials will be\nignored.", + "description": "Authentication rules for the service. By default, if a method has any authentication requirements, every request must include a valid credential matching one of the requirements. It's an error to include more than one kind of credential in a single request. If a method doesn't have any auth requirements, request credentials will be ignored.", "id": "AuthenticationRule", "properties": { "allowWithoutCredential": { @@ -754,7 +1065,7 @@ "type": "array" }, "selector": { - "description": "Selects the methods to which this rule applies.\n\nRefer to selector for syntax details.", + "description": "Selects the methods to which this rule applies. Refer to selector for syntax details.", "type": "string" } }, @@ -765,7 +1076,7 @@ "id": "Backend", "properties": { "rules": { - "description": "A list of API backend rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "A list of API backend rules that apply to individual API methods. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "BackendRule" }, @@ -779,29 +1090,29 @@ "id": "BackendRule", "properties": { "address": { - "description": "The address of the API backend.\n\nThe scheme is used to determine the backend protocol and security.\nThe following schemes are accepted:\n\n SCHEME PROTOCOL SECURITY\n http:// HTTP None\n https:// HTTP TLS\n grpc:// gRPC None\n grpcs:// gRPC TLS\n\nIt is recommended to explicitly include a scheme. Leaving out the scheme\nmay cause constrasting behaviors across platforms.\n\nIf the port is unspecified, the default is:\n- 80 for schemes without TLS\n- 443 for schemes with TLS\n\nFor HTTP backends, use protocol\nto specify the protocol version.", + "description": "The address of the API backend. The scheme is used to determine the backend protocol and security. The following schemes are accepted: SCHEME PROTOCOL SECURITY http:// HTTP None https:// HTTP TLS grpc:// gRPC None grpcs:// gRPC TLS It is recommended to explicitly include a scheme. Leaving out the scheme may cause constrasting behaviors across platforms. If the port is unspecified, the default is: - 80 for schemes without TLS - 443 for schemes with TLS For HTTP backends, use protocol to specify the protocol version.", "type": "string" }, "deadline": { - "description": "The number of seconds to wait for a response from a request. The default\nvaries based on the request protocol and deployment environment.", + "description": "The number of seconds to wait for a response from a request. The default varies based on the request protocol and deployment environment.", "format": "double", "type": "number" }, "disableAuth": { - "description": "When disable_auth is true, a JWT ID token won't be generated and the\noriginal \"Authorization\" HTTP header will be preserved. If the header is\nused to carry the original token and is expected by the backend, this\nfield must be set to true to preserve the header.", + "description": "When disable_auth is true, a JWT ID token won't be generated and the original \"Authorization\" HTTP header will be preserved. If the header is used to carry the original token and is expected by the backend, this field must be set to true to preserve the header.", "type": "boolean" }, "jwtAudience": { - "description": "The JWT audience is used when generating a JWT ID token for the backend.\nThis ID token will be added in the HTTP \"authorization\" header, and sent\nto the backend.", + "description": "The JWT audience is used when generating a JWT ID token for the backend. This ID token will be added in the HTTP \"authorization\" header, and sent to the backend.", "type": "string" }, "minDeadline": { - "description": "Minimum deadline in seconds needed for this method. Calls having deadline\nvalue lower than this will be rejected.", + "description": "Minimum deadline in seconds needed for this method. Calls having deadline value lower than this will be rejected.", "format": "double", "type": "number" }, "operationDeadline": { - "description": "The number of seconds to wait for the completion of a long running\noperation. The default is no deadline.", + "description": "The number of seconds to wait for the completion of a long running operation. The default is no deadline.", "format": "double", "type": "number" }, @@ -813,32 +1124,28 @@ ], "enumDescriptions": [ "", - "Use the backend address as-is, with no modification to the path. If the\nURL pattern contains variables, the variable names and values will be\nappended to the query string. If a query string parameter and a URL\npattern variable have the same name, this may result in duplicate keys in\nthe query string.\n\n# Examples\n\nGiven the following operation config:\n\n Method path: /api/company/{cid}/user/{uid}\n Backend address: https://example.cloudfunctions.net/getUser\n\nRequests to the following request paths will call the backend at the\ntranslated path:\n\n Request path: /api/company/widgetworks/user/johndoe\n Translated:\n https://example.cloudfunctions.net/getUser?cid=widgetworks\u0026uid=johndoe\n\n Request path: /api/company/widgetworks/user/johndoe?timezone=EST\n Translated:\n https://example.cloudfunctions.net/getUser?timezone=EST\u0026cid=widgetworks\u0026uid=johndoe", - "The request path will be appended to the backend address.\n\n# Examples\n\nGiven the following operation config:\n\n Method path: /api/company/{cid}/user/{uid}\n Backend address: https://example.appspot.com\n\nRequests to the following request paths will call the backend at the\ntranslated path:\n\n Request path: /api/company/widgetworks/user/johndoe\n Translated:\n https://example.appspot.com/api/company/widgetworks/user/johndoe\n\n Request path: /api/company/widgetworks/user/johndoe?timezone=EST\n Translated:\n https://example.appspot.com/api/company/widgetworks/user/johndoe?timezone=EST" + "Use the backend address as-is, with no modification to the path. If the URL pattern contains variables, the variable names and values will be appended to the query string. If a query string parameter and a URL pattern variable have the same name, this may result in duplicate keys in the query string. # Examples Given the following operation config: Method path: /api/company/{cid}/user/{uid} Backend address: https://example.cloudfunctions.net/getUser Requests to the following request paths will call the backend at the translated path: Request path: /api/company/widgetworks/user/johndoe Translated: https://example.cloudfunctions.net/getUser?cid=widgetworks\u0026uid=johndoe Request path: /api/company/widgetworks/user/johndoe?timezone=EST Translated: https://example.cloudfunctions.net/getUser?timezone=EST\u0026cid=widgetworks\u0026uid=johndoe", + "The request path will be appended to the backend address. # Examples Given the following operation config: Method path: /api/company/{cid}/user/{uid} Backend address: https://example.appspot.com Requests to the following request paths will call the backend at the translated path: Request path: /api/company/widgetworks/user/johndoe Translated: https://example.appspot.com/api/company/widgetworks/user/johndoe Request path: /api/company/widgetworks/user/johndoe?timezone=EST Translated: https://example.appspot.com/api/company/widgetworks/user/johndoe?timezone=EST" ], "type": "string" }, "protocol": { - "description": "The protocol used for sending a request to the backend.\nThe supported values are \"http/1.1\" and \"h2\".\n\nThe default value is inferred from the scheme in the\naddress field:\n\n SCHEME PROTOCOL\n http:// http/1.1\n https:// http/1.1\n grpc:// h2\n grpcs:// h2\n\nFor secure HTTP backends (https://) that support HTTP/2, set this field\nto \"h2\" for improved performance.\n\nConfiguring this field to non-default values is only supported for secure\nHTTP backends. This field will be ignored for all other backends.\n\nSee\nhttps://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids\nfor more details on the supported values.", - "type": "string" - }, - "renameTo": { - "description": "Unimplemented. Do not use.\n\nThe new name the selected proto elements should be renamed to.\n\nThe package, the service and the method can all be renamed.\nThe backend server should implement the renamed proto. However, clients\nshould call the original method, and ESF routes the traffic to the renamed\nmethod.\n\nHTTP clients should call the URL mapped to the original method.\ngRPC and Stubby clients should call the original method with package name.\n\nFor legacy reasons, ESF allows Stubby clients to call with the\nshort name (without the package name). However, for API Versioning(or\nmultiple methods mapped to the same short name), all Stubby clients must\ncall the method's full name with the package name, otherwise the first one\n(selector) wins.\n\nIf this `rename_to` is specified with a trailing `*`, the `selector` must\nbe specified with a trailing `*` as well. The all element short names\nmatched by the `*` in the selector will be kept in the `rename_to`.\n\nFor example,\n rename_rules:\n - selector: |-\n google.example.library.v1.*\n rename_to: google.example.library.*\n\nThe selector matches `google.example.library.v1.Library.CreateShelf` and\n`google.example.library.v1.Library.CreateBook`, they will be renamed to\n`google.example.library.Library.CreateShelf` and\n`google.example.library.Library.CreateBook`. It essentially renames the\nproto package name section of the matched proto service and methods.", + "description": "The protocol used for sending a request to the backend. The supported values are \"http/1.1\" and \"h2\". The default value is inferred from the scheme in the address field: SCHEME PROTOCOL http:// http/1.1 https:// http/1.1 grpc:// h2 grpcs:// h2 For secure HTTP backends (https://) that support HTTP/2, set this field to \"h2\" for improved performance. Configuring this field to non-default values is only supported for secure HTTP backends. This field will be ignored for all other backends. See https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids for more details on the supported values.", "type": "string" }, "selector": { - "description": "Selects the methods to which this rule applies.\n\nRefer to selector for syntax details.", + "description": "Selects the methods to which this rule applies. Refer to selector for syntax details.", "type": "string" } }, "type": "object" }, "Billing": { - "description": "Billing related configuration of the service.\n\nThe following example shows how to configure monitored resources and metrics\nfor billing, `consumer_destinations` is the only supported destination and\nthe monitored resources need at least one label key\n`cloud.googleapis.com/location` to indicate the location of the billing\nusage, using different monitored resources between monitoring and billing is\nrecommended so they can be evolved independently:\n\n\n monitored_resources:\n - type: library.googleapis.com/billing_branch\n labels:\n - key: cloud.googleapis.com/location\n description: |\n Predefined label to support billing location restriction.\n - key: city\n description: |\n Custom label to define the city where the library branch is located\n in.\n - key: name\n description: Custom label to define the name of the library branch.\n metrics:\n - name: library.googleapis.com/book/borrowed_count\n metric_kind: DELTA\n value_type: INT64\n unit: \"1\"\n billing:\n consumer_destinations:\n - monitored_resource: library.googleapis.com/billing_branch\n metrics:\n - library.googleapis.com/book/borrowed_count", + "description": "Billing related configuration of the service. The following example shows how to configure monitored resources and metrics for billing, `consumer_destinations` is the only supported destination and the monitored resources need at least one label key `cloud.googleapis.com/location` to indicate the location of the billing usage, using different monitored resources between monitoring and billing is recommended so they can be evolved independently: monitored_resources: - type: library.googleapis.com/billing_branch labels: - key: cloud.googleapis.com/location description: | Predefined label to support billing location restriction. - key: city description: | Custom label to define the city where the library branch is located in. - key: name description: Custom label to define the name of the library branch. metrics: - name: library.googleapis.com/book/borrowed_count metric_kind: DELTA value_type: INT64 unit: \"1\" billing: consumer_destinations: - monitored_resource: library.googleapis.com/billing_branch metrics: - library.googleapis.com/book/borrowed_count", "id": "Billing", "properties": { "consumerDestinations": { - "description": "Billing configurations for sending metrics to the consumer project.\nThere can be multiple consumer destinations per service, each one must have\na different monitored resource type. A metric can be used in at most\none consumer destination.", + "description": "Billing configurations for sending metrics to the consumer project. There can be multiple consumer destinations per service, each one must have a different monitored resource type. A metric can be used in at most one consumer destination.", "items": { "$ref": "BillingDestination" }, @@ -848,18 +1155,18 @@ "type": "object" }, "BillingDestination": { - "description": "Configuration of a specific billing destination (Currently only support\nbill against consumer project).", + "description": "Configuration of a specific billing destination (Currently only support bill against consumer project).", "id": "BillingDestination", "properties": { "metrics": { - "description": "Names of the metrics to report to this billing destination.\nEach name must be defined in Service.metrics section.", + "description": "Names of the metrics to report to this billing destination. Each name must be defined in Service.metrics section.", "items": { "type": "string" }, "type": "array" }, "monitoredResource": { - "description": "The monitored resource type. The type must be defined in\nService.monitored_resources section.", + "description": "The monitored resource type. The type must be defined in Service.monitored_resources section.", "type": "string" } }, @@ -872,26 +1179,28 @@ "type": "object" }, "Connection": { - "description": "Represents a private connection resource. A private connection is implemented\nas a VPC Network Peering connection between a service producer's VPC network\nand a service consumer's VPC network.", + "description": "Represents a private connection resource. A private connection is implemented as a VPC Network Peering connection between a service producer's VPC network and a service consumer's VPC network.", "id": "Connection", "properties": { "network": { - "description": "The name of service consumer's VPC network that's connected with service\nproducer network, in the following format:\n`projects/{project}/global/networks/{network}`.\n`{project}` is a project number, such as in `12345` that includes\nthe VPC service consumer's VPC network. `{network}` is the name of the\nservice consumer's VPC network.", + "description": "The name of service consumer's VPC network that's connected with service producer network, in the following format: `projects/{project}/global/networks/{network}`. `{project}` is a project number, such as in `12345` that includes the VPC service consumer's VPC network. `{network}` is the name of the service consumer's VPC network.", "type": "string" }, "peering": { - "description": "Output only. The name of the VPC Network Peering connection that was created by the\nservice producer.", + "description": "Output only. The name of the VPC Network Peering connection that was created by the service producer.", + "readOnly": true, "type": "string" }, "reservedPeeringRanges": { - "description": "The name of one or more allocated IP address ranges for this service\nproducer of type `PEERING`.\nNote that invoking CreateConnection method with a different range when\nconnection is already established will not modify already provisioned\nservice producer subnetworks.\nIf CreateConnection method is invoked repeatedly to reconnect when peering\nconnection had been disconnected on the consumer side, leaving this field\nempty will restore previously allocated IP ranges.", + "description": "The name of one or more allocated IP address ranges for this service producer of type `PEERING`. Note that invoking CreateConnection method with a different range when connection is already established will not modify already provisioned service producer subnetworks. If CreateConnection method is invoked repeatedly to reconnect when peering connection had been disconnected on the consumer side, leaving this field empty will restore previously allocated IP ranges.", "items": { "type": "string" }, "type": "array" }, "service": { - "description": "Output only. The name of the peering service that's associated with this connection, in\nthe following format: `services/{service name}`.", + "description": "Output only. The name of the peering service that's associated with this connection, in the following format: `services/{service name}`.", + "readOnly": true, "type": "string" } }, @@ -902,7 +1211,7 @@ "id": "ConsumerProject", "properties": { "projectNum": { - "description": "Required. Project number of the consumer that is launching the service instance. It\ncan own the network that is peered with Google or, be a service project in\nan XPN where the host project has the network.", + "description": "Required. Project number of the consumer that is launching the service instance. It can own the network that is peered with Google or, be a service project in an XPN where the host project has the network.", "format": "int64", "type": "string" } @@ -910,11 +1219,11 @@ "type": "object" }, "Context": { - "description": "`Context` defines which contexts an API requests.\n\nExample:\n\n context:\n rules:\n - selector: \"*\"\n requested:\n - google.rpc.context.ProjectContext\n - google.rpc.context.OriginContext\n\nThe above specifies that all methods in the API request\n`google.rpc.context.ProjectContext` and\n`google.rpc.context.OriginContext`.\n\nAvailable context types are defined in package\n`google.rpc.context`.\n\nThis also provides mechanism to whitelist any protobuf message extension that\ncan be sent in grpc metadata using “x-goog-ext-\u003cextension_id\u003e-bin” and\n“x-goog-ext-\u003cextension_id\u003e-jspb” format. For example, list any service\nspecific protobuf types that can appear in grpc metadata as follows in your\nyaml file:\n\nExample:\n\n context:\n rules:\n - selector: \"google.example.library.v1.LibraryService.CreateBook\"\n allowed_request_extensions:\n - google.foo.v1.NewExtension\n allowed_response_extensions:\n - google.foo.v1.NewExtension\n\nYou can also specify extension ID instead of fully qualified extension name\nhere.", + "description": "`Context` defines which contexts an API requests. Example: context: rules: - selector: \"*\" requested: - google.rpc.context.ProjectContext - google.rpc.context.OriginContext The above specifies that all methods in the API request `google.rpc.context.ProjectContext` and `google.rpc.context.OriginContext`. Available context types are defined in package `google.rpc.context`. This also provides mechanism to whitelist any protobuf message extension that can be sent in grpc metadata using “x-goog-ext--bin” and “x-goog-ext--jspb” format. For example, list any service specific protobuf types that can appear in grpc metadata as follows in your yaml file: Example: context: rules: - selector: \"google.example.library.v1.LibraryService.CreateBook\" allowed_request_extensions: - google.foo.v1.NewExtension allowed_response_extensions: - google.foo.v1.NewExtension You can also specify extension ID instead of fully qualified extension name here.", "id": "Context", "properties": { "rules": { - "description": "A list of RPC context rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "A list of RPC context rules that apply to individual API methods. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "ContextRule" }, @@ -924,18 +1233,18 @@ "type": "object" }, "ContextRule": { - "description": "A context rule provides information about the context for an individual API\nelement.", + "description": "A context rule provides information about the context for an individual API element.", "id": "ContextRule", "properties": { "allowedRequestExtensions": { - "description": "A list of full type names or extension IDs of extensions allowed in grpc\nside channel from client to backend.", + "description": "A list of full type names or extension IDs of extensions allowed in grpc side channel from client to backend.", "items": { "type": "string" }, "type": "array" }, "allowedResponseExtensions": { - "description": "A list of full type names or extension IDs of extensions allowed in grpc\nside channel from backend to client.", + "description": "A list of full type names or extension IDs of extensions allowed in grpc side channel from backend to client.", "items": { "type": "string" }, @@ -956,29 +1265,29 @@ "type": "array" }, "selector": { - "description": "Selects the methods to which this rule applies.\n\nRefer to selector for syntax details.", + "description": "Selects the methods to which this rule applies. Refer to selector for syntax details.", "type": "string" } }, "type": "object" }, "Control": { - "description": "Selects and configures the service controller used by the service. The\nservice controller handles features like abuse, quota, billing, logging,\nmonitoring, etc.", + "description": "Selects and configures the service controller used by the service. The service controller handles features like abuse, quota, billing, logging, monitoring, etc.", "id": "Control", "properties": { "environment": { - "description": "The service control environment to use. If empty, no control plane\nfeature (like quota and billing) will be enabled.", + "description": "The service control environment to use. If empty, no control plane feature (like quota and billing) will be enabled.", "type": "string" } }, "type": "object" }, "CustomError": { - "description": "Customize service error responses. For example, list any service\nspecific protobuf types that can appear in error detail lists of\nerror responses.\n\nExample:\n\n custom_error:\n types:\n - google.foo.v1.CustomError\n - google.foo.v1.AnotherError", + "description": "Customize service error responses. For example, list any service specific protobuf types that can appear in error detail lists of error responses. Example: custom_error: types: - google.foo.v1.CustomError - google.foo.v1.AnotherError", "id": "CustomError", "properties": { "rules": { - "description": "The list of custom error rules that apply to individual API messages.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "The list of custom error rules that apply to individual API messages. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "CustomErrorRule" }, @@ -999,11 +1308,11 @@ "id": "CustomErrorRule", "properties": { "isErrorType": { - "description": "Mark this message as possible payload in error response. Otherwise,\nobjects of this type will be filtered when they appear in error payload.", + "description": "Mark this message as possible payload in error response. Otherwise, objects of this type will be filtered when they appear in error payload.", "type": "boolean" }, "selector": { - "description": "Selects messages to which this rule applies.\n\nRefer to selector for syntax details.", + "description": "Selects messages to which this rule applies. Refer to selector for syntax details.", "type": "string" } }, @@ -1024,19 +1333,67 @@ }, "type": "object" }, + "DeletePeeredDnsDomainMetadata": { + "description": "Metadata provided through GetOperation request for the LRO generated by DeletePeeredDnsDomain API.", + "id": "DeletePeeredDnsDomainMetadata", + "properties": {}, + "type": "object" + }, "DisableVpcServiceControlsRequest": { "description": "Request to disable VPC service controls.", "id": "DisableVpcServiceControlsRequest", "properties": { "consumerNetwork": { - "description": "Required. The network that the consumer is using to connect with services.\nMust be in the form of projects/{project}/global/networks/{network}\n{project} is a project number, as in '12345'\n{network} is network name.", + "description": "Required. The network that the consumer is using to connect with services. Must be in the form of projects/{project}/global/networks/{network} {project} is a project number, as in '12345' {network} is network name.", + "type": "string" + } + }, + "type": "object" + }, + "DnsRecordSet": { + "description": "Represents a DNS record set resource.", + "id": "DnsRecordSet", + "properties": { + "data": { + "description": "Required. As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) for examples see https://cloud.google.com/dns/records/json-record.", + "items": { + "type": "string" + }, + "type": "array" + }, + "domain": { + "description": "Required. The DNS or domain name of the record set, e.g. `test.example.com`.", + "type": "string" + }, + "ttl": { + "description": "Required. The period of time for which this RecordSet can be cached by resolvers.", + "format": "google-duration", + "type": "string" + }, + "type": { + "description": "Required. The identifier of a supported record type.", + "type": "string" + } + }, + "type": "object" + }, + "DnsZone": { + "description": "Represents a DNS zone resource.", + "id": "DnsZone", + "properties": { + "dnsSuffix": { + "description": "The DNS name suffix of this zone e.g. `example.com.`.", + "type": "string" + }, + "name": { + "description": "User assigned name for this resource. Must be unique within the project. The name must be 1-63 characters long, must begin with a letter, end with a letter or digit, and only contain lowercase letters, digits or dashes.", "type": "string" } }, "type": "object" }, "Documentation": { - "description": "`Documentation` provides the information for describing a service.\n\nExample:\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: \u003e\n The Google Calendar API gives access\n to most calendar features.\n pages:\n - name: Overview\n content: \u0026#40;== include google/foo/overview.md ==\u0026#41;\n - name: Tutorial\n content: \u0026#40;== include google/foo/tutorial.md ==\u0026#41;\n subpages;\n - name: Java\n content: \u0026#40;== include google/foo/tutorial_java.md ==\u0026#41;\n rules:\n - selector: google.calendar.Calendar.Get\n description: \u003e\n ...\n - selector: google.calendar.Calendar.Put\n description: \u003e\n ...\n\u003c/code\u003e\u003c/pre\u003e\nDocumentation is provided in markdown syntax. In addition to\nstandard markdown features, definition lists, tables and fenced\ncode blocks are supported. Section headers can be provided and are\ninterpreted relative to the section nesting of the context where\na documentation fragment is embedded.\n\nDocumentation from the IDL is merged with documentation defined\nvia the config at normalization time, where documentation provided\nby config rules overrides IDL provided.\n\nA number of constructs specific to the API platform are supported\nin documentation text.\n\nIn order to reference a proto element, the following\nnotation can be used:\n\u003cpre\u003e\u003ccode\u003e\u0026#91;fully.qualified.proto.name]\u0026#91;]\u003c/code\u003e\u003c/pre\u003e\nTo override the display text used for the link, this can be used:\n\u003cpre\u003e\u003ccode\u003e\u0026#91;display text]\u0026#91;fully.qualified.proto.name]\u003c/code\u003e\u003c/pre\u003e\nText can be excluded from doc using the following notation:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;-- internal comment --\u0026#41;\u003c/code\u003e\u003c/pre\u003e\n\nA few directives are available in documentation. Note that\ndirectives must appear on a single line to be properly\nidentified. The `include` directive includes a markdown file from\nan external source:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;== include path/to/file ==\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nThe `resource_for` directive marks a message to be the resource of\na collection in REST view. If it is not specified, tools attempt\nto infer the resource from the operations in a collection:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;== resource_for v1.shelves.books ==\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nThe directive `suppress_warning` does not directly affect documentation\nand is documented together with service config validation.", + "description": "`Documentation` provides the information for describing a service. Example: documentation: summary: \u003e The Google Calendar API gives access to most calendar features. pages: - name: Overview content: (== include google/foo/overview.md ==) - name: Tutorial content: (== include google/foo/tutorial.md ==) subpages; - name: Java content: (== include google/foo/tutorial_java.md ==) rules: - selector: google.calendar.Calendar.Get description: \u003e ... - selector: google.calendar.Calendar.Put description: \u003e ... Documentation is provided in markdown syntax. In addition to standard markdown features, definition lists, tables and fenced code blocks are supported. Section headers can be provided and are interpreted relative to the section nesting of the context where a documentation fragment is embedded. Documentation from the IDL is merged with documentation defined via the config at normalization time, where documentation provided by config rules overrides IDL provided. A number of constructs specific to the API platform are supported in documentation text. In order to reference a proto element, the following notation can be used: [fully.qualified.proto.name][] To override the display text used for the link, this can be used: [display text][fully.qualified.proto.name] Text can be excluded from doc using the following notation: (-- internal comment --) A few directives are available in documentation. Note that directives must appear on a single line to be properly identified. The `include` directive includes a markdown file from an external source: (== include path/to/file ==) The `resource_for` directive marks a message to be the resource of a collection in REST view. If it is not specified, tools attempt to infer the resource from the operations in a collection: (== resource_for v1.shelves.books ==) The directive `suppress_warning` does not directly affect documentation and is documented together with service config validation.", "id": "Documentation", "properties": { "documentationRootUrl": { @@ -1044,7 +1401,7 @@ "type": "string" }, "overview": { - "description": "Declares a single overview page. For example:\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: ...\n overview: \u0026#40;== include overview.md ==\u0026#41;\n\u003c/code\u003e\u003c/pre\u003e\nThis is a shortcut for the following declaration (using pages style):\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: ...\n pages:\n - name: Overview\n content: \u0026#40;== include overview.md ==\u0026#41;\n\u003c/code\u003e\u003c/pre\u003e\nNote: you cannot specify both `overview` field and `pages` field.", + "description": "Declares a single overview page. For example: documentation: summary: ... overview: (== include overview.md ==) This is a shortcut for the following declaration (using pages style): documentation: summary: ... pages: - name: Overview content: (== include overview.md ==) Note: you cannot specify both `overview` field and `pages` field.", "type": "string" }, "pages": { @@ -1055,18 +1412,18 @@ "type": "array" }, "rules": { - "description": "A list of documentation rules that apply to individual API elements.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "A list of documentation rules that apply to individual API elements. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "DocumentationRule" }, "type": "array" }, "serviceRootUrl": { - "description": "Specifies the service root url if the default one (the service name\nfrom the yaml file) is not suitable. This can be seen in any fully\nspecified service urls as well as sections that show a base that other\nurls are relative to.", + "description": "Specifies the service root url if the default one (the service name from the yaml file) is not suitable. This can be seen in any fully specified service urls as well as sections that show a base that other urls are relative to.", "type": "string" }, "summary": { - "description": "A short summary of what the service does. Can only be provided by\nplain text.", + "description": "A short summary of what the service does. Can only be provided by plain text.", "type": "string" } }, @@ -1077,7 +1434,7 @@ "id": "DocumentationRule", "properties": { "deprecationDescription": { - "description": "Deprecation description of the selected element(s). It can be provided if\nan element is marked as `deprecated`.", + "description": "Deprecation description of the selected element(s). It can be provided if an element is marked as `deprecated`.", "type": "string" }, "description": { @@ -1085,14 +1442,14 @@ "type": "string" }, "selector": { - "description": "The selector is a comma-separated list of patterns. Each pattern is a\nqualified name of the element which may end in \"*\", indicating a wildcard.\nWildcards are only allowed at the end and for a whole component of the\nqualified name, i.e. \"foo.*\" is ok, but not \"foo.b*\" or \"foo.*.bar\". A\nwildcard will match one or more components. To specify a default for all\napplicable elements, the whole pattern \"*\" is used.", + "description": "The selector is a comma-separated list of patterns. Each pattern is a qualified name of the element which may end in \"*\", indicating a wildcard. Wildcards are only allowed at the end and for a whole component of the qualified name, i.e. \"foo.*\" is ok, but not \"foo.b*\" or \"foo.*.bar\". A wildcard will match one or more components. To specify a default for all applicable elements, the whole pattern \"*\" is used.", "type": "string" } }, "type": "object" }, "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", "properties": {}, "type": "object" @@ -1102,40 +1459,33 @@ "id": "EnableVpcServiceControlsRequest", "properties": { "consumerNetwork": { - "description": "Required. The network that the consumer is using to connect with services.\nMust be in the form of projects/{project}/global/networks/{network}\n{project} is a project number, as in '12345'\n{network} is network name.", + "description": "Required. The network that the consumer is using to connect with services. Must be in the form of projects/{project}/global/networks/{network} {project} is a project number, as in '12345' {network} is network name.", "type": "string" } }, "type": "object" }, "Endpoint": { - "description": "`Endpoint` describes a network endpoint that serves a set of APIs.\nA service may expose any number of endpoints, and all endpoints share the\nsame service configuration, such as quota configuration and monitoring\nconfiguration.\n\nExample service configuration:\n\n name: library-example.googleapis.com\n endpoints:\n # Below entry makes 'google.example.library.v1.Library'\n # API be served from endpoint address library-example.googleapis.com.\n # It also allows HTTP OPTIONS calls to be passed to the backend, for\n # it to decide whether the subsequent cross-origin request is\n # allowed to proceed.\n - name: library-example.googleapis.com\n allow_cors: true", + "description": "`Endpoint` describes a network endpoint that serves a set of APIs. A service may expose any number of endpoints, and all endpoints share the same service configuration, such as quota configuration and monitoring configuration. Example service configuration: name: library-example.googleapis.com endpoints: # Below entry makes 'google.example.library.v1.Library' # API be served from endpoint address library-example.googleapis.com. # It also allows HTTP OPTIONS calls to be passed to the backend, for # it to decide whether the subsequent cross-origin request is # allowed to proceed. - name: library-example.googleapis.com allow_cors: true", "id": "Endpoint", "properties": { "aliases": { - "description": "DEPRECATED: This field is no longer supported. Instead of using aliases,\nplease specify multiple google.api.Endpoint for each of the intended\naliases.\n\nAdditional names that this endpoint will be hosted on.", + "description": "DEPRECATED: This field is no longer supported. Instead of using aliases, please specify multiple google.api.Endpoint for each of the intended aliases. Additional names that this endpoint will be hosted on.", "items": { "type": "string" }, "type": "array" }, "allowCors": { - "description": "Allowing\n[CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka\ncross-domain traffic, would allow the backends served from this endpoint to\nreceive and respond to HTTP OPTIONS requests. The response will be used by\nthe browser to determine whether the subsequent cross-origin request is\nallowed to proceed.", + "description": "Allowing [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka cross-domain traffic, would allow the backends served from this endpoint to receive and respond to HTTP OPTIONS requests. The response will be used by the browser to determine whether the subsequent cross-origin request is allowed to proceed.", "type": "boolean" }, - "features": { - "description": "The list of features enabled on this endpoint.", - "items": { - "type": "string" - }, - "type": "array" - }, "name": { "description": "The canonical name of this endpoint.", "type": "string" }, "target": { - "description": "The specification of an Internet routable address of API frontend that will\nhandle requests to this [API\nEndpoint](https://cloud.google.com/apis/design/glossary). It should be\neither a valid IPv4 address or a fully-qualified domain name. For example,\n\"8.8.8.8\" or \"myservice.appspot.com\".", + "description": "The specification of an Internet routable address of API frontend that will handle requests to this [API Endpoint](https://cloud.google.com/apis/design/glossary). It should be either a valid IPv4 address or a fully-qualified domain name. For example, \"8.8.8.8\" or \"myservice.appspot.com\".", "type": "string" } }, @@ -1289,7 +1639,7 @@ "type": "integer" }, "oneofIndex": { - "description": "The index of the field type in `Type.oneofs`, for message or enumeration\ntypes. The first type has index 1; zero means the type is not in the list.", + "description": "The index of the field type in `Type.oneofs`, for message or enumeration types. The first type has index 1; zero means the type is not in the list.", "format": "int32", "type": "integer" }, @@ -1305,14 +1655,14 @@ "type": "boolean" }, "typeUrl": { - "description": "The field type URL, without the scheme, for message or enumeration\ntypes. Example: `\"type.googleapis.com/google.protobuf.Timestamp\"`.", + "description": "The field type URL, without the scheme, for message or enumeration types. Example: `\"type.googleapis.com/google.protobuf.Timestamp\"`.", "type": "string" } }, "type": "object" }, "GoogleCloudServicenetworkingV1betaSubnetwork": { - "description": "Represents a subnet that was created or discovered by a private access\nmanagement service.", + "description": "Represents a subnet that was created or discovered by a private access management service.", "id": "GoogleCloudServicenetworkingV1betaSubnetwork", "properties": { "ipCidrRange": { @@ -1320,30 +1670,30 @@ "type": "string" }, "name": { - "description": "Subnetwork name.\nSee https://cloud.google.com/compute/docs/vpc/", + "description": "Subnetwork name. See https://cloud.google.com/compute/docs/vpc/", "type": "string" }, "network": { - "description": "In the Shared VPC host project, the VPC network that's peered with the\nconsumer network. For example:\n`projects/1234321/global/networks/host-network`", + "description": "In the Shared VPC host project, the VPC network that's peered with the consumer network. For example: `projects/1234321/global/networks/host-network`", "type": "string" }, "outsideAllocation": { - "description": "This is a discovered subnet that is not within the current consumer\nallocated ranges.", + "description": "This is a discovered subnet that is not within the current consumer allocated ranges.", "type": "boolean" } }, "type": "object" }, "Http": { - "description": "Defines the HTTP configuration for an API service. It contains a list of\nHttpRule, each specifying the mapping of an RPC method\nto one or more HTTP REST API methods.", + "description": "Defines the HTTP configuration for an API service. It contains a list of HttpRule, each specifying the mapping of an RPC method to one or more HTTP REST API methods.", "id": "Http", "properties": { "fullyDecodeReservedExpansion": { - "description": "When set to true, URL path parameters will be fully URI-decoded except in\ncases of single segment matches in reserved expansion, where \"%2F\" will be\nleft encoded.\n\nThe default behavior is to not decode RFC 6570 reserved characters in multi\nsegment matches.", + "description": "When set to true, URL path parameters will be fully URI-decoded except in cases of single segment matches in reserved expansion, where \"%2F\" will be left encoded. The default behavior is to not decode RFC 6570 reserved characters in multi segment matches.", "type": "boolean" }, "rules": { - "description": "A list of HTTP configuration rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "A list of HTTP configuration rules that apply to individual API methods. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "HttpRule" }, @@ -1353,34 +1703,34 @@ "type": "object" }, "HttpRule": { - "description": "# gRPC Transcoding\n\ngRPC Transcoding is a feature for mapping between a gRPC method and one or\nmore HTTP REST endpoints. It allows developers to build a single API service\nthat supports both gRPC APIs and REST APIs. Many systems, including [Google\nAPIs](https://github.com/googleapis/googleapis),\n[Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC\nGateway](https://github.com/grpc-ecosystem/grpc-gateway),\nand [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature\nand use it for large scale production services.\n\n`HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies\nhow different portions of the gRPC request message are mapped to the URL\npath, URL query parameters, and HTTP request body. It also controls how the\ngRPC response message is mapped to the HTTP response body. `HttpRule` is\ntypically specified as an `google.api.http` annotation on the gRPC method.\n\nEach mapping specifies a URL path template and an HTTP method. The path\ntemplate may refer to one or more fields in the gRPC request message, as long\nas each field is a non-repeated field with a primitive (non-message) type.\nThe path template controls how fields of the request message are mapped to\nthe URL path.\n\nExample:\n\n service Messaging {\n rpc GetMessage(GetMessageRequest) returns (Message) {\n option (google.api.http) = {\n get: \"/v1/{name=messages/*}\"\n };\n }\n }\n message GetMessageRequest {\n string name = 1; // Mapped to URL path.\n }\n message Message {\n string text = 1; // The resource content.\n }\n\nThis enables an HTTP REST to gRPC mapping as below:\n\nHTTP | gRPC\n-----|-----\n`GET /v1/messages/123456` | `GetMessage(name: \"messages/123456\")`\n\nAny fields in the request message which are not bound by the path template\nautomatically become HTTP query parameters if there is no HTTP request body.\nFor example:\n\n service Messaging {\n rpc GetMessage(GetMessageRequest) returns (Message) {\n option (google.api.http) = {\n get:\"/v1/messages/{message_id}\"\n };\n }\n }\n message GetMessageRequest {\n message SubMessage {\n string subfield = 1;\n }\n string message_id = 1; // Mapped to URL path.\n int64 revision = 2; // Mapped to URL query parameter `revision`.\n SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`.\n }\n\nThis enables a HTTP JSON to RPC mapping as below:\n\nHTTP | gRPC\n-----|-----\n`GET /v1/messages/123456?revision=2\u0026sub.subfield=foo` |\n`GetMessage(message_id: \"123456\" revision: 2 sub: SubMessage(subfield:\n\"foo\"))`\n\nNote that fields which are mapped to URL query parameters must have a\nprimitive type or a repeated primitive type or a non-repeated message type.\nIn the case of a repeated type, the parameter can be repeated in the URL\nas `...?param=A\u0026param=B`. In the case of a message type, each field of the\nmessage is mapped to a separate parameter, such as\n`...?foo.a=A\u0026foo.b=B\u0026foo.c=C`.\n\nFor HTTP methods that allow a request body, the `body` field\nspecifies the mapping. Consider a REST update method on the\nmessage resource collection:\n\n service Messaging {\n rpc UpdateMessage(UpdateMessageRequest) returns (Message) {\n option (google.api.http) = {\n patch: \"/v1/messages/{message_id}\"\n body: \"message\"\n };\n }\n }\n message UpdateMessageRequest {\n string message_id = 1; // mapped to the URL\n Message message = 2; // mapped to the body\n }\n\nThe following HTTP JSON to RPC mapping is enabled, where the\nrepresentation of the JSON in the request body is determined by\nprotos JSON encoding:\n\nHTTP | gRPC\n-----|-----\n`PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` | `UpdateMessage(message_id:\n\"123456\" message { text: \"Hi!\" })`\n\nThe special name `*` can be used in the body mapping to define that\nevery field not bound by the path template should be mapped to the\nrequest body. This enables the following alternative definition of\nthe update method:\n\n service Messaging {\n rpc UpdateMessage(Message) returns (Message) {\n option (google.api.http) = {\n patch: \"/v1/messages/{message_id}\"\n body: \"*\"\n };\n }\n }\n message Message {\n string message_id = 1;\n string text = 2;\n }\n\n\nThe following HTTP JSON to RPC mapping is enabled:\n\nHTTP | gRPC\n-----|-----\n`PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` | `UpdateMessage(message_id:\n\"123456\" text: \"Hi!\")`\n\nNote that when using `*` in the body mapping, it is not possible to\nhave HTTP parameters, as all fields not bound by the path end in\nthe body. This makes this option more rarely used in practice when\ndefining REST APIs. The common usage of `*` is in custom methods\nwhich don't use the URL at all for transferring data.\n\nIt is possible to define multiple HTTP methods for one RPC by using\nthe `additional_bindings` option. Example:\n\n service Messaging {\n rpc GetMessage(GetMessageRequest) returns (Message) {\n option (google.api.http) = {\n get: \"/v1/messages/{message_id}\"\n additional_bindings {\n get: \"/v1/users/{user_id}/messages/{message_id}\"\n }\n };\n }\n }\n message GetMessageRequest {\n string message_id = 1;\n string user_id = 2;\n }\n\nThis enables the following two alternative HTTP JSON to RPC mappings:\n\nHTTP | gRPC\n-----|-----\n`GET /v1/messages/123456` | `GetMessage(message_id: \"123456\")`\n`GET /v1/users/me/messages/123456` | `GetMessage(user_id: \"me\" message_id:\n\"123456\")`\n\n## Rules for HTTP mapping\n\n1. Leaf request fields (recursive expansion nested messages in the request\n message) are classified into three categories:\n - Fields referred by the path template. They are passed via the URL path.\n - Fields referred by the HttpRule.body. They are passed via the HTTP\n request body.\n - All other fields are passed via the URL query parameters, and the\n parameter name is the field path in the request message. A repeated\n field can be represented as multiple query parameters under the same\n name.\n 2. If HttpRule.body is \"*\", there is no URL query parameter, all fields\n are passed via URL path and HTTP request body.\n 3. If HttpRule.body is omitted, there is no HTTP request body, all\n fields are passed via URL path and URL query parameters.\n\n### Path template syntax\n\n Template = \"/\" Segments [ Verb ] ;\n Segments = Segment { \"/\" Segment } ;\n Segment = \"*\" | \"**\" | LITERAL | Variable ;\n Variable = \"{\" FieldPath [ \"=\" Segments ] \"}\" ;\n FieldPath = IDENT { \".\" IDENT } ;\n Verb = \":\" LITERAL ;\n\nThe syntax `*` matches a single URL path segment. The syntax `**` matches\nzero or more URL path segments, which must be the last part of the URL path\nexcept the `Verb`.\n\nThe syntax `Variable` matches part of the URL path as specified by its\ntemplate. A variable template must not contain other variables. If a variable\nmatches a single path segment, its template may be omitted, e.g. `{var}`\nis equivalent to `{var=*}`.\n\nThe syntax `LITERAL` matches literal text in the URL path. If the `LITERAL`\ncontains any reserved character, such characters should be percent-encoded\nbefore the matching.\n\nIf a variable contains exactly one path segment, such as `\"{var}\"` or\n`\"{var=*}\"`, when such a variable is expanded into a URL path on the client\nside, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The\nserver side does the reverse decoding. Such variables show up in the\n[Discovery\nDocument](https://developers.google.com/discovery/v1/reference/apis) as\n`{var}`.\n\nIf a variable contains multiple path segments, such as `\"{var=foo/*}\"`\nor `\"{var=**}\"`, when such a variable is expanded into a URL path on the\nclient side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded.\nThe server side does the reverse decoding, except \"%2F\" and \"%2f\" are left\nunchanged. Such variables show up in the\n[Discovery\nDocument](https://developers.google.com/discovery/v1/reference/apis) as\n`{+var}`.\n\n## Using gRPC API Service Configuration\n\ngRPC API Service Configuration (service config) is a configuration language\nfor configuring a gRPC service to become a user-facing product. The\nservice config is simply the YAML representation of the `google.api.Service`\nproto message.\n\nAs an alternative to annotating your proto file, you can configure gRPC\ntranscoding in your service config YAML files. You do this by specifying a\n`HttpRule` that maps the gRPC method to a REST endpoint, achieving the same\neffect as the proto annotation. This can be particularly useful if you\nhave a proto that is reused in multiple services. Note that any transcoding\nspecified in the service config will override any matching transcoding\nconfiguration in the proto.\n\nExample:\n\n http:\n rules:\n # Selects a gRPC method and applies HttpRule to it.\n - selector: example.v1.Messaging.GetMessage\n get: /v1/messages/{message_id}/{sub.subfield}\n\n## Special notes\n\nWhen gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the\nproto to JSON conversion must follow the [proto3\nspecification](https://developers.google.com/protocol-buffers/docs/proto3#json).\n\nWhile the single segment variable follows the semantics of\n[RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String\nExpansion, the multi segment variable **does not** follow RFC 6570 Section\n3.2.3 Reserved Expansion. The reason is that the Reserved Expansion\ndoes not expand special characters like `?` and `#`, which would lead\nto invalid URLs. As the result, gRPC Transcoding uses a custom encoding\nfor multi segment variables.\n\nThe path variables **must not** refer to any repeated or mapped field,\nbecause client libraries are not capable of handling such variable expansion.\n\nThe path variables **must not** capture the leading \"/\" character. The reason\nis that the most common use case \"{var}\" does not capture the leading \"/\"\ncharacter. For consistency, all path variables must share the same behavior.\n\nRepeated message fields must not be mapped to URL query parameters, because\nno client library can support such complicated mapping.\n\nIf an API needs to use a JSON array for request or response body, it can map\nthe request or response body to a repeated field. However, some gRPC\nTranscoding implementations may not support this feature.", + "description": "# gRPC Transcoding gRPC Transcoding is a feature for mapping between a gRPC method and one or more HTTP REST endpoints. It allows developers to build a single API service that supports both gRPC APIs and REST APIs. Many systems, including [Google APIs](https://github.com/googleapis/googleapis), [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC Gateway](https://github.com/grpc-ecosystem/grpc-gateway), and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature and use it for large scale production services. `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies how different portions of the gRPC request message are mapped to the URL path, URL query parameters, and HTTP request body. It also controls how the gRPC response message is mapped to the HTTP response body. `HttpRule` is typically specified as an `google.api.http` annotation on the gRPC method. Each mapping specifies a URL path template and an HTTP method. The path template may refer to one or more fields in the gRPC request message, as long as each field is a non-repeated field with a primitive (non-message) type. The path template controls how fields of the request message are mapped to the URL path. Example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get: \"/v1/{name=messages/*}\" }; } } message GetMessageRequest { string name = 1; // Mapped to URL path. } message Message { string text = 1; // The resource content. } This enables an HTTP REST to gRPC mapping as below: HTTP | gRPC -----|----- `GET /v1/messages/123456` | `GetMessage(name: \"messages/123456\")` Any fields in the request message which are not bound by the path template automatically become HTTP query parameters if there is no HTTP request body. For example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get:\"/v1/messages/{message_id}\" }; } } message GetMessageRequest { message SubMessage { string subfield = 1; } string message_id = 1; // Mapped to URL path. int64 revision = 2; // Mapped to URL query parameter `revision`. SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. } This enables a HTTP JSON to RPC mapping as below: HTTP | gRPC -----|----- `GET /v1/messages/123456?revision=2\u0026sub.subfield=foo` | `GetMessage(message_id: \"123456\" revision: 2 sub: SubMessage(subfield: \"foo\"))` Note that fields which are mapped to URL query parameters must have a primitive type or a repeated primitive type or a non-repeated message type. In the case of a repeated type, the parameter can be repeated in the URL as `...?param=A\u0026param=B`. In the case of a message type, each field of the message is mapped to a separate parameter, such as `...?foo.a=A\u0026foo.b=B\u0026foo.c=C`. For HTTP methods that allow a request body, the `body` field specifies the mapping. Consider a REST update method on the message resource collection: service Messaging { rpc UpdateMessage(UpdateMessageRequest) returns (Message) { option (google.api.http) = { patch: \"/v1/messages/{message_id}\" body: \"message\" }; } } message UpdateMessageRequest { string message_id = 1; // mapped to the URL Message message = 2; // mapped to the body } The following HTTP JSON to RPC mapping is enabled, where the representation of the JSON in the request body is determined by protos JSON encoding: HTTP | gRPC -----|----- `PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` | `UpdateMessage(message_id: \"123456\" message { text: \"Hi!\" })` The special name `*` can be used in the body mapping to define that every field not bound by the path template should be mapped to the request body. This enables the following alternative definition of the update method: service Messaging { rpc UpdateMessage(Message) returns (Message) { option (google.api.http) = { patch: \"/v1/messages/{message_id}\" body: \"*\" }; } } message Message { string message_id = 1; string text = 2; } The following HTTP JSON to RPC mapping is enabled: HTTP | gRPC -----|----- `PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` | `UpdateMessage(message_id: \"123456\" text: \"Hi!\")` Note that when using `*` in the body mapping, it is not possible to have HTTP parameters, as all fields not bound by the path end in the body. This makes this option more rarely used in practice when defining REST APIs. The common usage of `*` is in custom methods which don't use the URL at all for transferring data. It is possible to define multiple HTTP methods for one RPC by using the `additional_bindings` option. Example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get: \"/v1/messages/{message_id}\" additional_bindings { get: \"/v1/users/{user_id}/messages/{message_id}\" } }; } } message GetMessageRequest { string message_id = 1; string user_id = 2; } This enables the following two alternative HTTP JSON to RPC mappings: HTTP | gRPC -----|----- `GET /v1/messages/123456` | `GetMessage(message_id: \"123456\")` `GET /v1/users/me/messages/123456` | `GetMessage(user_id: \"me\" message_id: \"123456\")` ## Rules for HTTP mapping 1. Leaf request fields (recursive expansion nested messages in the request message) are classified into three categories: - Fields referred by the path template. They are passed via the URL path. - Fields referred by the HttpRule.body. They are passed via the HTTP request body. - All other fields are passed via the URL query parameters, and the parameter name is the field path in the request message. A repeated field can be represented as multiple query parameters under the same name. 2. If HttpRule.body is \"*\", there is no URL query parameter, all fields are passed via URL path and HTTP request body. 3. If HttpRule.body is omitted, there is no HTTP request body, all fields are passed via URL path and URL query parameters. ### Path template syntax Template = \"/\" Segments [ Verb ] ; Segments = Segment { \"/\" Segment } ; Segment = \"*\" | \"**\" | LITERAL | Variable ; Variable = \"{\" FieldPath [ \"=\" Segments ] \"}\" ; FieldPath = IDENT { \".\" IDENT } ; Verb = \":\" LITERAL ; The syntax `*` matches a single URL path segment. The syntax `**` matches zero or more URL path segments, which must be the last part of the URL path except the `Verb`. The syntax `Variable` matches part of the URL path as specified by its template. A variable template must not contain other variables. If a variable matches a single path segment, its template may be omitted, e.g. `{var}` is equivalent to `{var=*}`. The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` contains any reserved character, such characters should be percent-encoded before the matching. If a variable contains exactly one path segment, such as `\"{var}\"` or `\"{var=*}\"`, when such a variable is expanded into a URL path on the client side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The server side does the reverse decoding. Such variables show up in the [Discovery Document](https://developers.google.com/discovery/v1/reference/apis) as `{var}`. If a variable contains multiple path segments, such as `\"{var=foo/*}\"` or `\"{var=**}\"`, when such a variable is expanded into a URL path on the client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. The server side does the reverse decoding, except \"%2F\" and \"%2f\" are left unchanged. Such variables show up in the [Discovery Document](https://developers.google.com/discovery/v1/reference/apis) as `{+var}`. ## Using gRPC API Service Configuration gRPC API Service Configuration (service config) is a configuration language for configuring a gRPC service to become a user-facing product. The service config is simply the YAML representation of the `google.api.Service` proto message. As an alternative to annotating your proto file, you can configure gRPC transcoding in your service config YAML files. You do this by specifying a `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same effect as the proto annotation. This can be particularly useful if you have a proto that is reused in multiple services. Note that any transcoding specified in the service config will override any matching transcoding configuration in the proto. Example: http: rules: # Selects a gRPC method and applies HttpRule to it. - selector: example.v1.Messaging.GetMessage get: /v1/messages/{message_id}/{sub.subfield} ## Special notes When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the proto to JSON conversion must follow the [proto3 specification](https://developers.google.com/protocol-buffers/docs/proto3#json). While the single segment variable follows the semantics of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String Expansion, the multi segment variable **does not** follow RFC 6570 Section 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion does not expand special characters like `?` and `#`, which would lead to invalid URLs. As the result, gRPC Transcoding uses a custom encoding for multi segment variables. The path variables **must not** refer to any repeated or mapped field, because client libraries are not capable of handling such variable expansion. The path variables **must not** capture the leading \"/\" character. The reason is that the most common use case \"{var}\" does not capture the leading \"/\" character. For consistency, all path variables must share the same behavior. Repeated message fields must not be mapped to URL query parameters, because no client library can support such complicated mapping. If an API needs to use a JSON array for request or response body, it can map the request or response body to a repeated field. However, some gRPC Transcoding implementations may not support this feature.", "id": "HttpRule", "properties": { "additionalBindings": { - "description": "Additional HTTP bindings for the selector. Nested bindings must\nnot contain an `additional_bindings` field themselves (that is,\nthe nesting may only be one level deep).", + "description": "Additional HTTP bindings for the selector. Nested bindings must not contain an `additional_bindings` field themselves (that is, the nesting may only be one level deep).", "items": { "$ref": "HttpRule" }, "type": "array" }, "allowHalfDuplex": { - "description": "When this flag is set to true, HTTP requests will be allowed to invoke a\nhalf-duplex streaming method.", + "description": "When this flag is set to true, HTTP requests will be allowed to invoke a half-duplex streaming method.", "type": "boolean" }, "body": { - "description": "The name of the request field whose value is mapped to the HTTP request\nbody, or `*` for mapping all request fields not captured by the path\npattern to the HTTP body, or omitted for not having any HTTP request body.\n\nNOTE: the referred field must be present at the top-level of the request\nmessage type.", + "description": "The name of the request field whose value is mapped to the HTTP request body, or `*` for mapping all request fields not captured by the path pattern to the HTTP body, or omitted for not having any HTTP request body. NOTE: the referred field must be present at the top-level of the request message type.", "type": "string" }, "custom": { "$ref": "CustomHttpPattern", - "description": "The custom pattern is used for specifying an HTTP method that is not\nincluded in the `pattern` field, such as HEAD, or \"*\" to leave the\nHTTP method unspecified for this rule. The wild-card rule is useful\nfor services that provide content to Web (HTML) clients." + "description": "The custom pattern is used for specifying an HTTP method that is not included in the `pattern` field, such as HEAD, or \"*\" to leave the HTTP method unspecified for this rule. The wild-card rule is useful for services that provide content to Web (HTML) clients." }, "delete": { "description": "Maps to HTTP DELETE. Used for deleting a resource.", "type": "string" }, "get": { - "description": "Maps to HTTP GET. Used for listing and getting information about\nresources.", + "description": "Maps to HTTP GET. Used for listing and getting information about resources.", "type": "string" }, "patch": { @@ -1396,11 +1746,11 @@ "type": "string" }, "responseBody": { - "description": "Optional. The name of the response field whose value is mapped to the HTTP\nresponse body. When omitted, the entire response message will be used\nas the HTTP response body.\n\nNOTE: The referred field must be present at the top-level of the response\nmessage type.", + "description": "Optional. The name of the response field whose value is mapped to the HTTP response body. When omitted, the entire response message will be used as the HTTP response body. NOTE: The referred field must be present at the top-level of the response message type.", "type": "string" }, "selector": { - "description": "Selects a method to which this rule applies.\n\nRefer to selector for syntax details.", + "description": "Selects a method to which this rule applies. Refer to selector for syntax details.", "type": "string" } }, @@ -1419,7 +1769,7 @@ "type": "string" }, "valuePrefix": { - "description": "The value prefix. The value format is \"value_prefix{token}\"\nOnly applies to \"in\" header type. Must be empty for \"in\" query type.\nIf not empty, the header value has to match (case sensitive) this prefix.\nIf not matched, JWT will not be extracted. If matched, JWT will be\nextracted after the prefix is removed.\n\nFor example, for \"Authorization: Bearer {JWT}\",\nvalue_prefix=\"Bearer \" with a space at the end.", + "description": "The value prefix. The value format is \"value_prefix{token}\" Only applies to \"in\" header type. Must be empty for \"in\" query type. If not empty, the header value has to match (case sensitive) this prefix. If not matched, JWT will not be extracted. If matched, JWT will be extracted after the prefix is removed. For example, for \"Authorization: Bearer {JWT}\", value_prefix=\"Bearer \" with a space at the end.", "type": "string" } }, @@ -1455,7 +1805,7 @@ "type": "object" }, "ListConnectionsResponse": { - "description": "ListConnectionsResponse is the response to list peering states for the\ngiven service and consumer project.", + "description": "ListConnectionsResponse is the response to list peering states for the given service and consumer project.", "id": "ListConnectionsResponse", "properties": { "connections": { @@ -1486,45 +1836,59 @@ }, "type": "object" }, + "ListPeeredDnsDomainsResponse": { + "description": "Response to list peered DNS domains for a given connection.", + "id": "ListPeeredDnsDomainsResponse", + "properties": { + "peeredDnsDomains": { + "description": "The list of peered DNS domains.", + "items": { + "$ref": "PeeredDnsDomain" + }, + "type": "array" + } + }, + "type": "object" + }, "LogDescriptor": { - "description": "A description of a log type. Example in YAML format:\n\n - name: library.googleapis.com/activity_history\n description: The history of borrowing and returning library items.\n display_name: Activity\n labels:\n - key: /customer_id\n description: Identifier of a library customer", + "description": "A description of a log type. Example in YAML format: - name: library.googleapis.com/activity_history description: The history of borrowing and returning library items. display_name: Activity labels: - key: /customer_id description: Identifier of a library customer", "id": "LogDescriptor", "properties": { "description": { - "description": "A human-readable description of this log. This information appears in\nthe documentation and can contain details.", + "description": "A human-readable description of this log. This information appears in the documentation and can contain details.", "type": "string" }, "displayName": { - "description": "The human-readable name for this log. This information appears on\nthe user interface and should be concise.", + "description": "The human-readable name for this log. This information appears on the user interface and should be concise.", "type": "string" }, "labels": { - "description": "The set of labels that are available to describe a specific log entry.\nRuntime requests that contain labels not specified here are\nconsidered invalid.", + "description": "The set of labels that are available to describe a specific log entry. Runtime requests that contain labels not specified here are considered invalid.", "items": { "$ref": "LabelDescriptor" }, "type": "array" }, "name": { - "description": "The name of the log. It must be less than 512 characters long and can\ninclude the following characters: upper- and lower-case alphanumeric\ncharacters [A-Za-z0-9], and punctuation characters including\nslash, underscore, hyphen, period [/_-.].", + "description": "The name of the log. It must be less than 512 characters long and can include the following characters: upper- and lower-case alphanumeric characters [A-Za-z0-9], and punctuation characters including slash, underscore, hyphen, period [/_-.].", "type": "string" } }, "type": "object" }, "Logging": { - "description": "Logging configuration of the service.\n\nThe following example shows how to configure logs to be sent to the\nproducer and consumer projects. In the example, the `activity_history`\nlog is sent to both the producer and consumer projects, whereas the\n`purchase_history` log is only sent to the producer project.\n\n monitored_resources:\n - type: library.googleapis.com/branch\n labels:\n - key: /city\n description: The city where the library branch is located in.\n - key: /name\n description: The name of the branch.\n logs:\n - name: activity_history\n labels:\n - key: /customer_id\n - name: purchase_history\n logging:\n producer_destinations:\n - monitored_resource: library.googleapis.com/branch\n logs:\n - activity_history\n - purchase_history\n consumer_destinations:\n - monitored_resource: library.googleapis.com/branch\n logs:\n - activity_history", + "description": "Logging configuration of the service. The following example shows how to configure logs to be sent to the producer and consumer projects. In the example, the `activity_history` log is sent to both the producer and consumer projects, whereas the `purchase_history` log is only sent to the producer project. monitored_resources: - type: library.googleapis.com/branch labels: - key: /city description: The city where the library branch is located in. - key: /name description: The name of the branch. logs: - name: activity_history labels: - key: /customer_id - name: purchase_history logging: producer_destinations: - monitored_resource: library.googleapis.com/branch logs: - activity_history - purchase_history consumer_destinations: - monitored_resource: library.googleapis.com/branch logs: - activity_history", "id": "Logging", "properties": { "consumerDestinations": { - "description": "Logging configurations for sending logs to the consumer project.\nThere can be multiple consumer destinations, each one must have a\ndifferent monitored resource type. A log can be used in at most\none consumer destination.", + "description": "Logging configurations for sending logs to the consumer project. There can be multiple consumer destinations, each one must have a different monitored resource type. A log can be used in at most one consumer destination.", "items": { "$ref": "LoggingDestination" }, "type": "array" }, "producerDestinations": { - "description": "Logging configurations for sending logs to the producer project.\nThere can be multiple producer destinations, each one must have a\ndifferent monitored resource type. A log can be used in at most\none producer destination.", + "description": "Logging configurations for sending logs to the producer project. There can be multiple producer destinations, each one must have a different monitored resource type. A log can be used in at most one producer destination.", "items": { "$ref": "LoggingDestination" }, @@ -1534,18 +1898,18 @@ "type": "object" }, "LoggingDestination": { - "description": "Configuration of a specific logging destination (the producer project\nor the consumer project).", + "description": "Configuration of a specific logging destination (the producer project or the consumer project).", "id": "LoggingDestination", "properties": { "logs": { - "description": "Names of the logs to be sent to this destination. Each name must\nbe defined in the Service.logs section. If the log name is\nnot a domain scoped name, it will be automatically prefixed with\nthe service name followed by \"/\".", + "description": "Names of the logs to be sent to this destination. Each name must be defined in the Service.logs section. If the log name is not a domain scoped name, it will be automatically prefixed with the service name followed by \"/\".", "items": { "type": "string" }, "type": "array" }, "monitoredResource": { - "description": "The monitored resource type. The type must be defined in the\nService.monitored_resources section.", + "description": "The monitored resource type. The type must be defined in the Service.monitored_resources section.", "type": "string" } }, @@ -1598,7 +1962,7 @@ "type": "object" }, "MetricDescriptor": { - "description": "Defines a metric type and its schema. Once a metric descriptor is created,\ndeleting or altering it stops data collection and makes the metric type's\nexisting data unusable.", + "description": "Defines a metric type and its schema. Once a metric descriptor is created, deleting or altering it stops data collection and makes the metric type's existing data unusable. ", "id": "MetricDescriptor", "properties": { "description": { @@ -1606,11 +1970,11 @@ "type": "string" }, "displayName": { - "description": "A concise name for the metric, which can be displayed in user interfaces.\nUse sentence case without an ending period, for example \"Request count\".\nThis field is optional but it is recommended to be set for any metrics\nassociated with user-visible concepts, such as Quota.", + "description": "A concise name for the metric, which can be displayed in user interfaces. Use sentence case without an ending period, for example \"Request count\". This field is optional but it is recommended to be set for any metrics associated with user-visible concepts, such as Quota.", "type": "string" }, "labels": { - "description": "The set of labels that can be used to describe a specific\ninstance of this metric type. For example, the\n`appengine.googleapis.com/http/server/response_latencies` metric\ntype has a label for the HTTP response code, `response_code`, so\nyou can look at latencies for successful responses or just\nfor responses that failed.", + "description": "The set of labels that can be used to describe a specific instance of this metric type. For example, the `appengine.googleapis.com/http/server/response_latencies` metric type has a label for the HTTP response code, `response_code`, so you can look at latencies for successful responses or just for responses that failed.", "items": { "$ref": "LabelDescriptor" }, @@ -1632,11 +1996,11 @@ "Do not use this default value.", "The feature is not yet implemented. Users can not use it.", "Prelaunch features are hidden from users and are only visible internally.", - "Early Access features are limited to a closed group of testers. To use\nthese features, you must sign up in advance and sign a Trusted Tester\nagreement (which includes confidentiality provisions). These features may\nbe unstable, changed in backward-incompatible ways, and are not\nguaranteed to be released.", - "Alpha is a limited availability test for releases before they are cleared\nfor widespread use. By Alpha, all significant design issues are resolved\nand we are in the process of verifying functionality. Alpha customers\nneed to apply for access, agree to applicable terms, and have their\nprojects whitelisted. Alpha releases don’t have to be feature complete,\nno SLAs are provided, and there are no technical support obligations, but\nthey will be far enough along that customers can actually use them in\ntest environments or for limited-use tests -- just like they would in\nnormal production cases.", - "Beta is the point at which we are ready to open a release for any\ncustomer to use. There are no SLA or technical support obligations in a\nBeta release. Products will be complete from a feature perspective, but\nmay have some open outstanding issues. Beta releases are suitable for\nlimited production use cases.", - "GA features are open to all developers and are considered stable and\nfully qualified for production use.", - "Deprecated features are scheduled to be shut down and removed. For more\ninformation, see the “Deprecation Policy” section of our [Terms of\nService](https://cloud.google.com/terms/)\nand the [Google Cloud Platform Subject to the Deprecation\nPolicy](https://cloud.google.com/terms/deprecation) documentation." + "Early Access features are limited to a closed group of testers. To use these features, you must sign up in advance and sign a Trusted Tester agreement (which includes confidentiality provisions). These features may be unstable, changed in backward-incompatible ways, and are not guaranteed to be released.", + "Alpha is a limited availability test for releases before they are cleared for widespread use. By Alpha, all significant design issues are resolved and we are in the process of verifying functionality. Alpha customers need to apply for access, agree to applicable terms, and have their projects whitelisted. Alpha releases don’t have to be feature complete, no SLAs are provided, and there are no technical support obligations, but they will be far enough along that customers can actually use them in test environments or for limited-use tests -- just like they would in normal production cases.", + "Beta is the point at which we are ready to open a release for any customer to use. There are no SLA or technical support obligations in a Beta release. Products will be complete from a feature perspective, but may have some open outstanding issues. Beta releases are suitable for limited production use cases.", + "GA features are open to all developers and are considered stable and fully qualified for production use.", + "Deprecated features are scheduled to be shut down and removed. For more information, see the “Deprecation Policy” section of our [Terms of Service](https://cloud.google.com/terms/) and the [Google Cloud Platform Subject to the Deprecation Policy](https://cloud.google.com/terms/deprecation) documentation." ], "type": "string" }, @@ -1645,7 +2009,7 @@ "description": "Optional. Metadata which can be used to guide usage of the metric." }, "metricKind": { - "description": "Whether the metric records instantaneous values, changes to a value, etc.\nSome combinations of `metric_kind` and `value_type` might not be supported.", + "description": "Whether the metric records instantaneous values, changes to a value, etc. Some combinations of `metric_kind` and `value_type` might not be supported.", "enum": [ "METRIC_KIND_UNSPECIFIED", "GAUGE", @@ -1656,12 +2020,12 @@ "Do not use this default value.", "An instantaneous measurement of a value.", "The change in a value during a time interval.", - "A value accumulated over a time interval. Cumulative\nmeasurements in a time series should have the same start time\nand increasing end times, until an event resets the cumulative\nvalue to zero and sets a new start time for the following\npoints." + "A value accumulated over a time interval. Cumulative measurements in a time series should have the same start time and increasing end times, until an event resets the cumulative value to zero and sets a new start time for the following points." ], "type": "string" }, "monitoredResourceTypes": { - "description": "Read-only. If present, then a time\nseries, which is identified partially by\na metric type and a MonitoredResourceDescriptor, that is associated\nwith this metric type can only be associated with one of the monitored\nresource types listed here.", + "description": "Read-only. If present, then a time series, which is identified partially by a metric type and a MonitoredResourceDescriptor, that is associated with this metric type can only be associated with one of the monitored resource types listed here.", "items": { "type": "string" }, @@ -1672,15 +2036,15 @@ "type": "string" }, "type": { - "description": "The metric type, including its DNS name prefix. The type is not\nURL-encoded. All user-defined metric types have the DNS name\n`custom.googleapis.com` or `external.googleapis.com`. Metric types should\nuse a natural hierarchical grouping. For example:\n\n \"custom.googleapis.com/invoice/paid/amount\"\n \"external.googleapis.com/prometheus/up\"\n \"appengine.googleapis.com/http/server/response_latencies\"", + "description": "The metric type, including its DNS name prefix. The type is not URL-encoded. All user-defined metric types have the DNS name `custom.googleapis.com` or `external.googleapis.com`. Metric types should use a natural hierarchical grouping. For example: \"custom.googleapis.com/invoice/paid/amount\" \"external.googleapis.com/prometheus/up\" \"appengine.googleapis.com/http/server/response_latencies\"", "type": "string" }, "unit": { - "description": "The units in which the metric value is reported. It is only applicable\nif the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit`\ndefines the representation of the stored metric values.\n\nDifferent systems may scale the values to be more easily displayed (so a\nvalue of `0.02KBy` _might_ be displayed as `20By`, and a value of\n`3523KBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is\n`KBy`, then the value of the metric is always in thousands of bytes, no\nmatter how it may be displayed..\n\nIf you want a custom metric to record the exact number of CPU-seconds used\nby a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is\n`s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005\nCPU-seconds, then the value is written as `12005`.\n\nAlternatively, if you want a custom metric to record data in a more\ngranular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is\n`ks{CPU}`, and then write the value `12.005` (which is `12005/1000`),\nor use `Kis{CPU}` and write `11.723` (which is `12005/1024`).\n\nThe supported units are a subset of [The Unified Code for Units of\nMeasure](http://unitsofmeasure.org/ucum.html) standard:\n\n**Basic units (UNIT)**\n\n* `bit` bit\n* `By` byte\n* `s` second\n* `min` minute\n* `h` hour\n* `d` day\n\n**Prefixes (PREFIX)**\n\n* `k` kilo (10^3)\n* `M` mega (10^6)\n* `G` giga (10^9)\n* `T` tera (10^12)\n* `P` peta (10^15)\n* `E` exa (10^18)\n* `Z` zetta (10^21)\n* `Y` yotta (10^24)\n\n* `m` milli (10^-3)\n* `u` micro (10^-6)\n* `n` nano (10^-9)\n* `p` pico (10^-12)\n* `f` femto (10^-15)\n* `a` atto (10^-18)\n* `z` zepto (10^-21)\n* `y` yocto (10^-24)\n\n* `Ki` kibi (2^10)\n* `Mi` mebi (2^20)\n* `Gi` gibi (2^30)\n* `Ti` tebi (2^40)\n* `Pi` pebi (2^50)\n\n**Grammar**\n\nThe grammar also includes these connectors:\n\n* `/` division or ratio (as an infix operator). For examples,\n `kBy/{email}` or `MiBy/10ms` (although you should almost never\n have `/s` in a metric `unit`; rates should always be computed at\n query time from the underlying cumulative or delta value).\n* `.` multiplication or composition (as an infix operator). For\n examples, `GBy.d` or `k{watt}.h`.\n\nThe grammar for a unit is as follows:\n\n Expression = Component { \".\" Component } { \"/\" Component } ;\n\n Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ]\n | Annotation\n | \"1\"\n ;\n\n Annotation = \"{\" NAME \"}\" ;\n\nNotes:\n\n* `Annotation` is just a comment if it follows a `UNIT`. If the annotation\n is used alone, then the unit is equivalent to `1`. For examples,\n `{request}/s == 1/s`, `By{transmitted}/s == By/s`.\n* `NAME` is a sequence of non-blank printable ASCII characters not\n containing `{` or `}`.\n* `1` represents a unitary [dimensionless\n unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such\n as in `1/s`. It is typically used when none of the basic units are\n appropriate. For example, \"new users per day\" can be represented as\n `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5 new\n users). Alternatively, \"thousands of page views per day\" would be\n represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric\n value of `5.3` would mean \"5300 page views per day\").\n* `%` represents dimensionless value of 1/100, and annotates values giving\n a percentage (so the metric values are typically in the range of 0..100,\n and a metric value `3` means \"3 percent\").\n* `10^2.%` indicates a metric contains a ratio, typically in the range\n 0..1, that will be multiplied by 100 and displayed as a percentage\n (so a metric value `0.03` means \"3 percent\").", + "description": "The units in which the metric value is reported. It is only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` defines the representation of the stored metric values. Different systems may scale the values to be more easily displayed (so a value of `0.02KBy` _might_ be displayed as `20By`, and a value of `3523KBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is `KBy`, then the value of the metric is always in thousands of bytes, no matter how it may be displayed.. If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the value is written as `12005`. Alternatively, if you want a custom metric to record data in a more granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). The supported units are a subset of [The Unified Code for Units of Measure](http://unitsofmeasure.org/ucum.html) standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost never have `/s` in a metric `unit`; rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: Expression = Component { \".\" Component } { \"/\" Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ] | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used alone, then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable ASCII characters not containing `{` or `}`. * `1` represents a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in `1/s`. It is typically used when none of the basic units are appropriate. For example, \"new users per day\" can be represented as `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5 new users). Alternatively, \"thousands of page views per day\" would be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3` would mean \"5300 page views per day\"). * `%` represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value `3` means \"3 percent\"). * `10^2.%` indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value `0.03` means \"3 percent\").", "type": "string" }, "valueType": { - "description": "Whether the measurement is an integer, a floating-point number, etc.\nSome combinations of `metric_kind` and `value_type` might not be supported.", + "description": "Whether the measurement is an integer, a floating-point number, etc. Some combinations of `metric_kind` and `value_type` might not be supported.", "enum": [ "VALUE_TYPE_UNSPECIFIED", "BOOL", @@ -1692,10 +2056,10 @@ ], "enumDescriptions": [ "Do not use this default value.", - "The value is a boolean.\nThis value type can be used only if the metric kind is `GAUGE`.", + "The value is a boolean. This value type can be used only if the metric kind is `GAUGE`.", "The value is a signed 64-bit integer.", "The value is a double precision floating point number.", - "The value is a text string.\nThis value type can be used only if the metric kind is `GAUGE`.", + "The value is a text string. This value type can be used only if the metric kind is `GAUGE`.", "The value is a `Distribution`.", "The value is money." ], @@ -1709,7 +2073,7 @@ "id": "MetricDescriptorMetadata", "properties": { "ingestDelay": { - "description": "The delay of data points caused by ingestion. Data points older than this\nage are guaranteed to be ingested and available to be read, excluding\ndata loss due to errors.", + "description": "The delay of data points caused by ingestion. Data points older than this age are guaranteed to be ingested and available to be read, excluding data loss due to errors.", "format": "google-duration", "type": "string" }, @@ -1729,16 +2093,16 @@ "Do not use this default value.", "The feature is not yet implemented. Users can not use it.", "Prelaunch features are hidden from users and are only visible internally.", - "Early Access features are limited to a closed group of testers. To use\nthese features, you must sign up in advance and sign a Trusted Tester\nagreement (which includes confidentiality provisions). These features may\nbe unstable, changed in backward-incompatible ways, and are not\nguaranteed to be released.", - "Alpha is a limited availability test for releases before they are cleared\nfor widespread use. By Alpha, all significant design issues are resolved\nand we are in the process of verifying functionality. Alpha customers\nneed to apply for access, agree to applicable terms, and have their\nprojects whitelisted. Alpha releases don’t have to be feature complete,\nno SLAs are provided, and there are no technical support obligations, but\nthey will be far enough along that customers can actually use them in\ntest environments or for limited-use tests -- just like they would in\nnormal production cases.", - "Beta is the point at which we are ready to open a release for any\ncustomer to use. There are no SLA or technical support obligations in a\nBeta release. Products will be complete from a feature perspective, but\nmay have some open outstanding issues. Beta releases are suitable for\nlimited production use cases.", - "GA features are open to all developers and are considered stable and\nfully qualified for production use.", - "Deprecated features are scheduled to be shut down and removed. For more\ninformation, see the “Deprecation Policy” section of our [Terms of\nService](https://cloud.google.com/terms/)\nand the [Google Cloud Platform Subject to the Deprecation\nPolicy](https://cloud.google.com/terms/deprecation) documentation." + "Early Access features are limited to a closed group of testers. To use these features, you must sign up in advance and sign a Trusted Tester agreement (which includes confidentiality provisions). These features may be unstable, changed in backward-incompatible ways, and are not guaranteed to be released.", + "Alpha is a limited availability test for releases before they are cleared for widespread use. By Alpha, all significant design issues are resolved and we are in the process of verifying functionality. Alpha customers need to apply for access, agree to applicable terms, and have their projects whitelisted. Alpha releases don’t have to be feature complete, no SLAs are provided, and there are no technical support obligations, but they will be far enough along that customers can actually use them in test environments or for limited-use tests -- just like they would in normal production cases.", + "Beta is the point at which we are ready to open a release for any customer to use. There are no SLA or technical support obligations in a Beta release. Products will be complete from a feature perspective, but may have some open outstanding issues. Beta releases are suitable for limited production use cases.", + "GA features are open to all developers and are considered stable and fully qualified for production use.", + "Deprecated features are scheduled to be shut down and removed. For more information, see the “Deprecation Policy” section of our [Terms of Service](https://cloud.google.com/terms/) and the [Google Cloud Platform Subject to the Deprecation Policy](https://cloud.google.com/terms/deprecation) documentation." ], "type": "string" }, "samplePeriod": { - "description": "The sampling period of metric data points. For metrics which are written\nperiodically, consecutive data points are stored at this time interval,\nexcluding data loss due to errors. Metrics with a higher granularity have\na smaller sampling period.", + "description": "The sampling period of metric data points. For metrics which are written periodically, consecutive data points are stored at this time interval, excluding data loss due to errors. Metrics with a higher granularity have a smaller sampling period.", "format": "google-duration", "type": "string" } @@ -1746,7 +2110,7 @@ "type": "object" }, "MetricRule": { - "description": "Bind API methods to metrics. Binding a method to a metric causes that\nmetric's configured quota behaviors to apply to the method call.", + "description": "Bind API methods to metrics. Binding a method to a metric causes that metric's configured quota behaviors to apply to the method call.", "id": "MetricRule", "properties": { "metricCosts": { @@ -1754,18 +2118,18 @@ "format": "int64", "type": "string" }, - "description": "Metrics to update when the selected methods are called, and the associated\ncost applied to each metric.\n\nThe key of the map is the metric name, and the values are the amount\nincreased for the metric against which the quota limits are defined.\nThe value must not be negative.", + "description": "Metrics to update when the selected methods are called, and the associated cost applied to each metric. The key of the map is the metric name, and the values are the amount increased for the metric against which the quota limits are defined. The value must not be negative.", "type": "object" }, "selector": { - "description": "Selects the methods to which this rule applies.\n\nRefer to selector for syntax details.", + "description": "Selects the methods to which this rule applies. Refer to selector for syntax details.", "type": "string" } }, "type": "object" }, "Mixin": { - "description": "Declares an API Interface to be included in this interface. The including\ninterface must redeclare all the methods from the included interface, but\ndocumentation and options are inherited as follows:\n\n- If after comment and whitespace stripping, the documentation\n string of the redeclared method is empty, it will be inherited\n from the original method.\n\n- Each annotation belonging to the service config (http,\n visibility) which is not set in the redeclared method will be\n inherited.\n\n- If an http annotation is inherited, the path pattern will be\n modified as follows. Any version prefix will be replaced by the\n version of the including interface plus the root path if\n specified.\n\nExample of a simple mixin:\n\n package google.acl.v1;\n service AccessControl {\n // Get the underlying ACL object.\n rpc GetAcl(GetAclRequest) returns (Acl) {\n option (google.api.http).get = \"/v1/{resource=**}:getAcl\";\n }\n }\n\n package google.storage.v2;\n service Storage {\n // rpc GetAcl(GetAclRequest) returns (Acl);\n\n // Get a data record.\n rpc GetData(GetDataRequest) returns (Data) {\n option (google.api.http).get = \"/v2/{resource=**}\";\n }\n }\n\nExample of a mixin configuration:\n\n apis:\n - name: google.storage.v2.Storage\n mixins:\n - name: google.acl.v1.AccessControl\n\nThe mixin construct implies that all methods in `AccessControl` are\nalso declared with same name and request/response types in\n`Storage`. A documentation generator or annotation processor will\nsee the effective `Storage.GetAcl` method after inherting\ndocumentation and annotations as follows:\n\n service Storage {\n // Get the underlying ACL object.\n rpc GetAcl(GetAclRequest) returns (Acl) {\n option (google.api.http).get = \"/v2/{resource=**}:getAcl\";\n }\n ...\n }\n\nNote how the version in the path pattern changed from `v1` to `v2`.\n\nIf the `root` field in the mixin is specified, it should be a\nrelative path under which inherited HTTP paths are placed. Example:\n\n apis:\n - name: google.storage.v2.Storage\n mixins:\n - name: google.acl.v1.AccessControl\n root: acls\n\nThis implies the following inherited HTTP annotation:\n\n service Storage {\n // Get the underlying ACL object.\n rpc GetAcl(GetAclRequest) returns (Acl) {\n option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\";\n }\n ...\n }", + "description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inheriting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", "id": "Mixin", "properties": { "name": { @@ -1773,26 +2137,26 @@ "type": "string" }, "root": { - "description": "If non-empty specifies a path under which inherited HTTP paths\nare rooted.", + "description": "If non-empty specifies a path under which inherited HTTP paths are rooted.", "type": "string" } }, "type": "object" }, "MonitoredResourceDescriptor": { - "description": "An object that describes the schema of a MonitoredResource object using a\ntype name and a set of labels. For example, the monitored resource\ndescriptor for Google Compute Engine VM instances has a type of\n`\"gce_instance\"` and specifies the use of the labels `\"instance_id\"` and\n`\"zone\"` to identify particular VM instances.\n\nDifferent APIs can support different monitored resource types. APIs generally\nprovide a `list` method that returns the monitored resource descriptors used\nby the API.", + "description": "An object that describes the schema of a MonitoredResource object using a type name and a set of labels. For example, the monitored resource descriptor for Google Compute Engine VM instances has a type of `\"gce_instance\"` and specifies the use of the labels `\"instance_id\"` and `\"zone\"` to identify particular VM instances. Different APIs can support different monitored resource types. APIs generally provide a `list` method that returns the monitored resource descriptors used by the API. ", "id": "MonitoredResourceDescriptor", "properties": { "description": { - "description": "Optional. A detailed description of the monitored resource type that might\nbe used in documentation.", + "description": "Optional. A detailed description of the monitored resource type that might be used in documentation.", "type": "string" }, "displayName": { - "description": "Optional. A concise name for the monitored resource type that might be\ndisplayed in user interfaces. It should be a Title Cased Noun Phrase,\nwithout any article or other determiners. For example,\n`\"Google Cloud SQL Database\"`.", + "description": "Optional. A concise name for the monitored resource type that might be displayed in user interfaces. It should be a Title Cased Noun Phrase, without any article or other determiners. For example, `\"Google Cloud SQL Database\"`.", "type": "string" }, "labels": { - "description": "Required. A set of labels used to describe instances of this monitored\nresource type. For example, an individual Google Cloud SQL database is\nidentified by values for the labels `\"database_id\"` and `\"zone\"`.", + "description": "Required. A set of labels used to describe instances of this monitored resource type. For example, an individual Google Cloud SQL database is identified by values for the labels `\"database_id\"` and `\"zone\"`.", "items": { "$ref": "LabelDescriptor" }, @@ -1814,38 +2178,38 @@ "Do not use this default value.", "The feature is not yet implemented. Users can not use it.", "Prelaunch features are hidden from users and are only visible internally.", - "Early Access features are limited to a closed group of testers. To use\nthese features, you must sign up in advance and sign a Trusted Tester\nagreement (which includes confidentiality provisions). These features may\nbe unstable, changed in backward-incompatible ways, and are not\nguaranteed to be released.", - "Alpha is a limited availability test for releases before they are cleared\nfor widespread use. By Alpha, all significant design issues are resolved\nand we are in the process of verifying functionality. Alpha customers\nneed to apply for access, agree to applicable terms, and have their\nprojects whitelisted. Alpha releases don’t have to be feature complete,\nno SLAs are provided, and there are no technical support obligations, but\nthey will be far enough along that customers can actually use them in\ntest environments or for limited-use tests -- just like they would in\nnormal production cases.", - "Beta is the point at which we are ready to open a release for any\ncustomer to use. There are no SLA or technical support obligations in a\nBeta release. Products will be complete from a feature perspective, but\nmay have some open outstanding issues. Beta releases are suitable for\nlimited production use cases.", - "GA features are open to all developers and are considered stable and\nfully qualified for production use.", - "Deprecated features are scheduled to be shut down and removed. For more\ninformation, see the “Deprecation Policy” section of our [Terms of\nService](https://cloud.google.com/terms/)\nand the [Google Cloud Platform Subject to the Deprecation\nPolicy](https://cloud.google.com/terms/deprecation) documentation." + "Early Access features are limited to a closed group of testers. To use these features, you must sign up in advance and sign a Trusted Tester agreement (which includes confidentiality provisions). These features may be unstable, changed in backward-incompatible ways, and are not guaranteed to be released.", + "Alpha is a limited availability test for releases before they are cleared for widespread use. By Alpha, all significant design issues are resolved and we are in the process of verifying functionality. Alpha customers need to apply for access, agree to applicable terms, and have their projects whitelisted. Alpha releases don’t have to be feature complete, no SLAs are provided, and there are no technical support obligations, but they will be far enough along that customers can actually use them in test environments or for limited-use tests -- just like they would in normal production cases.", + "Beta is the point at which we are ready to open a release for any customer to use. There are no SLA or technical support obligations in a Beta release. Products will be complete from a feature perspective, but may have some open outstanding issues. Beta releases are suitable for limited production use cases.", + "GA features are open to all developers and are considered stable and fully qualified for production use.", + "Deprecated features are scheduled to be shut down and removed. For more information, see the “Deprecation Policy” section of our [Terms of Service](https://cloud.google.com/terms/) and the [Google Cloud Platform Subject to the Deprecation Policy](https://cloud.google.com/terms/deprecation) documentation." ], "type": "string" }, "name": { - "description": "Optional. The resource name of the monitored resource descriptor:\n`\"projects/{project_id}/monitoredResourceDescriptors/{type}\"` where\n{type} is the value of the `type` field in this object and\n{project_id} is a project ID that provides API-specific context for\naccessing the type. APIs that do not use project information can use the\nresource name format `\"monitoredResourceDescriptors/{type}\"`.", + "description": "Optional. The resource name of the monitored resource descriptor: `\"projects/{project_id}/monitoredResourceDescriptors/{type}\"` where {type} is the value of the `type` field in this object and {project_id} is a project ID that provides API-specific context for accessing the type. APIs that do not use project information can use the resource name format `\"monitoredResourceDescriptors/{type}\"`.", "type": "string" }, "type": { - "description": "Required. The monitored resource type. For example, the type\n`\"cloudsql_database\"` represents databases in Google Cloud SQL.\nThe maximum length of this value is 256 characters.", + "description": "Required. The monitored resource type. For example, the type `\"cloudsql_database\"` represents databases in Google Cloud SQL.", "type": "string" } }, "type": "object" }, "Monitoring": { - "description": "Monitoring configuration of the service.\n\nThe example below shows how to configure monitored resources and metrics\nfor monitoring. In the example, a monitored resource and two metrics are\ndefined. The `library.googleapis.com/book/returned_count` metric is sent\nto both producer and consumer projects, whereas the\n`library.googleapis.com/book/overdue_count` metric is only sent to the\nconsumer project.\n\n monitored_resources:\n - type: library.googleapis.com/branch\n labels:\n - key: /city\n description: The city where the library branch is located in.\n - key: /name\n description: The name of the branch.\n metrics:\n - name: library.googleapis.com/book/returned_count\n metric_kind: DELTA\n value_type: INT64\n labels:\n - key: /customer_id\n - name: library.googleapis.com/book/overdue_count\n metric_kind: GAUGE\n value_type: INT64\n labels:\n - key: /customer_id\n monitoring:\n producer_destinations:\n - monitored_resource: library.googleapis.com/branch\n metrics:\n - library.googleapis.com/book/returned_count\n consumer_destinations:\n - monitored_resource: library.googleapis.com/branch\n metrics:\n - library.googleapis.com/book/returned_count\n - library.googleapis.com/book/overdue_count", + "description": "Monitoring configuration of the service. The example below shows how to configure monitored resources and metrics for monitoring. In the example, a monitored resource and two metrics are defined. The `library.googleapis.com/book/returned_count` metric is sent to both producer and consumer projects, whereas the `library.googleapis.com/book/num_overdue` metric is only sent to the consumer project. monitored_resources: - type: library.googleapis.com/Branch display_name: \"Library Branch\" description: \"A branch of a library.\" launch_stage: GA labels: - key: resource_container description: \"The Cloud container (ie. project id) for the Branch.\" - key: location description: \"The location of the library branch.\" - key: branch_id description: \"The id of the branch.\" metrics: - name: library.googleapis.com/book/returned_count display_name: \"Books Returned\" description: \"The count of books that have been returned.\" launch_stage: GA metric_kind: DELTA value_type: INT64 unit: \"1\" labels: - key: customer_id description: \"The id of the customer.\" - name: library.googleapis.com/book/num_overdue display_name: \"Books Overdue\" description: \"The current number of overdue books.\" launch_stage: GA metric_kind: GAUGE value_type: INT64 unit: \"1\" labels: - key: customer_id description: \"The id of the customer.\" monitoring: producer_destinations: - monitored_resource: library.googleapis.com/Branch metrics: - library.googleapis.com/book/returned_count consumer_destinations: - monitored_resource: library.googleapis.com/Branch metrics: - library.googleapis.com/book/returned_count - library.googleapis.com/book/num_overdue", "id": "Monitoring", "properties": { "consumerDestinations": { - "description": "Monitoring configurations for sending metrics to the consumer project.\nThere can be multiple consumer destinations. A monitored resouce type may\nappear in multiple monitoring destinations if different aggregations are\nneeded for different sets of metrics associated with that monitored\nresource type. A monitored resource and metric pair may only be used once\nin the Monitoring configuration.", + "description": "Monitoring configurations for sending metrics to the consumer project. There can be multiple consumer destinations. A monitored resource type may appear in multiple monitoring destinations if different aggregations are needed for different sets of metrics associated with that monitored resource type. A monitored resource and metric pair may only be used once in the Monitoring configuration.", "items": { "$ref": "MonitoringDestination" }, "type": "array" }, "producerDestinations": { - "description": "Monitoring configurations for sending metrics to the producer project.\nThere can be multiple producer destinations. A monitored resouce type may\nappear in multiple monitoring destinations if different aggregations are\nneeded for different sets of metrics associated with that monitored\nresource type. A monitored resource and metric pair may only be used once\nin the Monitoring configuration.", + "description": "Monitoring configurations for sending metrics to the producer project. There can be multiple producer destinations. A monitored resource type may appear in multiple monitoring destinations if different aggregations are needed for different sets of metrics associated with that monitored resource type. A monitored resource and metric pair may only be used once in the Monitoring configuration.", "items": { "$ref": "MonitoringDestination" }, @@ -1855,40 +2219,40 @@ "type": "object" }, "MonitoringDestination": { - "description": "Configuration of a specific monitoring destination (the producer project\nor the consumer project).", + "description": "Configuration of a specific monitoring destination (the producer project or the consumer project).", "id": "MonitoringDestination", "properties": { "metrics": { - "description": "Types of the metrics to report to this monitoring destination.\nEach type must be defined in Service.metrics section.", + "description": "Types of the metrics to report to this monitoring destination. Each type must be defined in Service.metrics section.", "items": { "type": "string" }, "type": "array" }, "monitoredResource": { - "description": "The monitored resource type. The type must be defined in\nService.monitored_resources section.", + "description": "The monitored resource type. The type must be defined in Service.monitored_resources section.", "type": "string" } }, "type": "object" }, "OAuthRequirements": { - "description": "OAuth scopes are a way to define data and permissions on data. For example,\nthere are scopes defined for \"Read-only access to Google Calendar\" and\n\"Access to Cloud Platform\". Users can consent to a scope for an application,\ngiving it permission to access that data on their behalf.\n\nOAuth scope specifications should be fairly coarse grained; a user will need\nto see and understand the text description of what your scope means.\n\nIn most cases: use one or at most two OAuth scopes for an entire family of\nproducts. If your product has multiple APIs, you should probably be sharing\nthe OAuth scope across all of those APIs.\n\nWhen you need finer grained OAuth consent screens: talk with your product\nmanagement about how developers will use them in practice.\n\nPlease note that even though each of the canonical scopes is enough for a\nrequest to be accepted and passed to the backend, a request can still fail\ndue to the backend requiring additional scopes or permissions.", + "description": "OAuth scopes are a way to define data and permissions on data. For example, there are scopes defined for \"Read-only access to Google Calendar\" and \"Access to Cloud Platform\". Users can consent to a scope for an application, giving it permission to access that data on their behalf. OAuth scope specifications should be fairly coarse grained; a user will need to see and understand the text description of what your scope means. In most cases: use one or at most two OAuth scopes for an entire family of products. If your product has multiple APIs, you should probably be sharing the OAuth scope across all of those APIs. When you need finer grained OAuth consent screens: talk with your product management about how developers will use them in practice. Please note that even though each of the canonical scopes is enough for a request to be accepted and passed to the backend, a request can still fail due to the backend requiring additional scopes or permissions.", "id": "OAuthRequirements", "properties": { "canonicalScopes": { - "description": "The list of publicly documented OAuth scopes that are allowed access. An\nOAuth token containing any of these scopes will be accepted.\n\nExample:\n\n canonical_scopes: https://www.googleapis.com/auth/calendar,\n https://www.googleapis.com/auth/calendar.read", + "description": "The list of publicly documented OAuth scopes that are allowed access. An OAuth token containing any of these scopes will be accepted. Example: canonical_scopes: https://www.googleapis.com/auth/calendar, https://www.googleapis.com/auth/calendar.read", "type": "string" } }, "type": "object" }, "Operation": { - "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "description": "This resource represents a long-running operation that is the result of a network API call.", "id": "Operation", "properties": { "done": { - "description": "If the value is `false`, it means the operation is still in progress.\nIf `true`, the operation is completed, and either `error` or `response` is\navailable.", + "description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.", "type": "boolean" }, "error": { @@ -1900,11 +2264,11 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", "type": "object" }, "name": { - "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should be a resource name ending with `operations/{unique_id}`.", + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.", "type": "string" }, "response": { @@ -1912,18 +2276,18 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", + "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", "type": "object" } }, "type": "object" }, "Option": { - "description": "A protocol buffer option, which can be attached to a message, field,\nenumeration, etc.", + "description": "A protocol buffer option, which can be attached to a message, field, enumeration, etc.", "id": "Option", "properties": { "name": { - "description": "The option's name. For protobuf built-in options (options defined in\ndescriptor.proto), this is the short name. For example, `\"map_entry\"`.\nFor custom options, it should be the fully-qualified name. For example,\n`\"google.api.http\"`.", + "description": "The option's name. For protobuf built-in options (options defined in descriptor.proto), this is the short name. For example, `\"map_entry\"`. For custom options, it should be the fully-qualified name. For example, `\"google.api.http\"`.", "type": "string" }, "value": { @@ -1931,26 +2295,26 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "The option's value packed in an Any message. If the value is a primitive,\nthe corresponding wrapper type defined in google/protobuf/wrappers.proto\nshould be used. If the value is an enum, it should be stored as an int32\nvalue using the google.protobuf.Int32Value type.", + "description": "The option's value packed in an Any message. If the value is a primitive, the corresponding wrapper type defined in google/protobuf/wrappers.proto should be used. If the value is an enum, it should be stored as an int32 value using the google.protobuf.Int32Value type.", "type": "object" } }, "type": "object" }, "Page": { - "description": "Represents a documentation page. A page can contain subpages to represent\nnested documentation set structure.", + "description": "Represents a documentation page. A page can contain subpages to represent nested documentation set structure.", "id": "Page", "properties": { "content": { - "description": "The Markdown content of the page. You can use \u003ccode\u003e\u0026#40;== include {path}\n==\u0026#41;\u003c/code\u003e to include content from a Markdown file.", + "description": "The Markdown content of the page. You can use (== include {path} ==) to include content from a Markdown file.", "type": "string" }, "name": { - "description": "The name of the page. It will be used as an identity of the page to\ngenerate URI of the page, text of the link to this page in navigation,\netc. The full page name (start from the root page name to this page\nconcatenated with `.`) can be used as reference to the page in your\ndocumentation. For example:\n\u003cpre\u003e\u003ccode\u003epages:\n- name: Tutorial\n content: \u0026#40;== include tutorial.md ==\u0026#41;\n subpages:\n - name: Java\n content: \u0026#40;== include tutorial_java.md ==\u0026#41;\n\u003c/code\u003e\u003c/pre\u003e\nYou can reference `Java` page using Markdown reference link syntax:\n`Java`.", + "description": "The name of the page. It will be used as an identity of the page to generate URI of the page, text of the link to this page in navigation, etc. The full page name (start from the root page name to this page concatenated with `.`) can be used as reference to the page in your documentation. For example: pages: - name: Tutorial content: (== include tutorial.md ==) subpages: - name: Java content: (== include tutorial_java.md ==) You can reference `Java` page using Markdown reference link syntax: `Java`.", "type": "string" }, "subpages": { - "description": "Subpages of this page. The order of subpages specified here will be\nhonored in the generated docset.", + "description": "Subpages of this page. The order of subpages specified here will be honored in the generated docset.", "items": { "$ref": "Page" }, @@ -1959,23 +2323,44 @@ }, "type": "object" }, + "PeeredDnsDomain": { + "description": "DNS domain suffix for which requests originating in the producer VPC network are resolved in the associated consumer VPC network.", + "id": "PeeredDnsDomain", + "properties": { + "dnsSuffix": { + "description": "The DNS domain name suffix e.g. `example.com.`.", + "type": "string" + }, + "name": { + "description": "User assigned name for this resource. Must be unique within the consumer network. The name must be 1-63 characters long, must begin with a letter, end with a letter or digit, and only contain lowercase letters, digits or dashes.", + "type": "string" + } + }, + "type": "object" + }, + "PeeredDnsDomainMetadata": { + "description": "Metadata provided through GetOperation request for the LRO generated by CreatePeeredDnsDomain API.", + "id": "PeeredDnsDomainMetadata", + "properties": {}, + "type": "object" + }, "PolicyBinding": { "description": "Grouping of IAM role and IAM member.", "id": "PolicyBinding", "properties": { "member": { - "description": "Required. Member to bind the role with. See\n/iam/docs/reference/rest/v1/Policy#Binding for how to format each member.\nEg.\n - user:myuser@mydomain.com\n - serviceAccount:my-service-account@app.gserviceaccount.com", + "description": "Required. Member to bind the role with. See /iam/docs/reference/rest/v1/Policy#Binding for how to format each member. Eg. - user:myuser@mydomain.com - serviceAccount:my-service-account@app.gserviceaccount.com", "type": "string" }, "role": { - "description": "Required. Role to apply. Only whitelisted roles can be used at the specified\ngranularity. The role must be one of the following:\n - 'roles/container.hostServiceAgentUser' applied on the shared VPC host\n project", + "description": "Required. Role to apply. Only whitelisted roles can be used at the specified granularity. The role must be one of the following: - 'roles/container.hostServiceAgentUser' applied on the shared VPC host project - 'roles/compute.securityAdmin' applied on the shared VPC host project", "type": "string" } }, "type": "object" }, "Quota": { - "description": "Quota configuration helps to achieve fairness and budgeting in service\nusage.\n\nThe metric based quota configuration works this way:\n- The service configuration defines a set of metrics.\n- For API calls, the quota.metric_rules maps methods to metrics with\n corresponding costs.\n- The quota.limits defines limits on the metrics, which will be used for\n quota checks at runtime.\n\nAn example quota configuration in yaml format:\n\n quota:\n limits:\n\n - name: apiWriteQpsPerProject\n metric: library.googleapis.com/write_calls\n unit: \"1/min/{project}\" # rate limit for consumer projects\n values:\n STANDARD: 10000\n\n\n # The metric rules bind all methods to the read_calls metric,\n # except for the UpdateBook and DeleteBook methods. These two methods\n # are mapped to the write_calls metric, with the UpdateBook method\n # consuming at twice rate as the DeleteBook method.\n metric_rules:\n - selector: \"*\"\n metric_costs:\n library.googleapis.com/read_calls: 1\n - selector: google.example.library.v1.LibraryService.UpdateBook\n metric_costs:\n library.googleapis.com/write_calls: 2\n - selector: google.example.library.v1.LibraryService.DeleteBook\n metric_costs:\n library.googleapis.com/write_calls: 1\n\n Corresponding Metric definition:\n\n metrics:\n - name: library.googleapis.com/read_calls\n display_name: Read requests\n metric_kind: DELTA\n value_type: INT64\n\n - name: library.googleapis.com/write_calls\n display_name: Write requests\n metric_kind: DELTA\n value_type: INT64\n\n", + "description": "Quota configuration helps to achieve fairness and budgeting in service usage. The metric based quota configuration works this way: - The service configuration defines a set of metrics. - For API calls, the quota.metric_rules maps methods to metrics with corresponding costs. - The quota.limits defines limits on the metrics, which will be used for quota checks at runtime. An example quota configuration in yaml format: quota: limits: - name: apiWriteQpsPerProject metric: library.googleapis.com/write_calls unit: \"1/min/{project}\" # rate limit for consumer projects values: STANDARD: 10000 # The metric rules bind all methods to the read_calls metric, # except for the UpdateBook and DeleteBook methods. These two methods # are mapped to the write_calls metric, with the UpdateBook method # consuming at twice rate as the DeleteBook method. metric_rules: - selector: \"*\" metric_costs: library.googleapis.com/read_calls: 1 - selector: google.example.library.v1.LibraryService.UpdateBook metric_costs: library.googleapis.com/write_calls: 2 - selector: google.example.library.v1.LibraryService.DeleteBook metric_costs: library.googleapis.com/write_calls: 1 Corresponding Metric definition: metrics: - name: library.googleapis.com/read_calls display_name: Read requests metric_kind: DELTA value_type: INT64 - name: library.googleapis.com/write_calls display_name: Write requests metric_kind: DELTA value_type: INT64 ", "id": "Quota", "properties": { "limits": { @@ -1986,7 +2371,7 @@ "type": "array" }, "metricRules": { - "description": "List of `MetricRule` definitions, each one mapping a selected method to one\nor more metrics.", + "description": "List of `MetricRule` definitions, each one mapping a selected method to one or more metrics.", "items": { "$ref": "MetricRule" }, @@ -1996,46 +2381,46 @@ "type": "object" }, "QuotaLimit": { - "description": "`QuotaLimit` defines a specific limit that applies over a specified duration\nfor a limit type. There can be at most one limit for a duration and limit\ntype combination defined within a `QuotaGroup`.", + "description": "`QuotaLimit` defines a specific limit that applies over a specified duration for a limit type. There can be at most one limit for a duration and limit type combination defined within a `QuotaGroup`.", "id": "QuotaLimit", "properties": { "defaultLimit": { - "description": "Default number of tokens that can be consumed during the specified\nduration. This is the number of tokens assigned when a client\napplication developer activates the service for his/her project.\n\nSpecifying a value of 0 will block all requests. This can be used if you\nare provisioning quota to selected consumers and blocking others.\nSimilarly, a value of -1 will indicate an unlimited quota. No other\nnegative values are allowed.\n\nUsed by group-based quotas only.", + "description": "Default number of tokens that can be consumed during the specified duration. This is the number of tokens assigned when a client application developer activates the service for his/her project. Specifying a value of 0 will block all requests. This can be used if you are provisioning quota to selected consumers and blocking others. Similarly, a value of -1 will indicate an unlimited quota. No other negative values are allowed. Used by group-based quotas only.", "format": "int64", "type": "string" }, "description": { - "description": "Optional. User-visible, extended description for this quota limit.\nShould be used only when more context is needed to understand this limit\nthan provided by the limit's display name (see: `display_name`).", + "description": "Optional. User-visible, extended description for this quota limit. Should be used only when more context is needed to understand this limit than provided by the limit's display name (see: `display_name`).", "type": "string" }, "displayName": { - "description": "User-visible display name for this limit.\nOptional. If not set, the UI will provide a default display name based on\nthe quota configuration. This field can be used to override the default\ndisplay name generated from the configuration.", + "description": "User-visible display name for this limit. Optional. If not set, the UI will provide a default display name based on the quota configuration. This field can be used to override the default display name generated from the configuration.", "type": "string" }, "duration": { - "description": "Duration of this limit in textual notation. Must be \"100s\" or \"1d\".\n\nUsed by group-based quotas only.", + "description": "Duration of this limit in textual notation. Must be \"100s\" or \"1d\". Used by group-based quotas only.", "type": "string" }, "freeTier": { - "description": "Free tier value displayed in the Developers Console for this limit.\nThe free tier is the number of tokens that will be subtracted from the\nbilled amount when billing is enabled.\nThis field can only be set on a limit with duration \"1d\", in a billable\ngroup; it is invalid on any other limit. If this field is not set, it\ndefaults to 0, indicating that there is no free tier for this service.\n\nUsed by group-based quotas only.", + "description": "Free tier value displayed in the Developers Console for this limit. The free tier is the number of tokens that will be subtracted from the billed amount when billing is enabled. This field can only be set on a limit with duration \"1d\", in a billable group; it is invalid on any other limit. If this field is not set, it defaults to 0, indicating that there is no free tier for this service. Used by group-based quotas only.", "format": "int64", "type": "string" }, "maxLimit": { - "description": "Maximum number of tokens that can be consumed during the specified\nduration. Client application developers can override the default limit up\nto this maximum. If specified, this value cannot be set to a value less\nthan the default limit. If not specified, it is set to the default limit.\n\nTo allow clients to apply overrides with no upper bound, set this to -1,\nindicating unlimited maximum quota.\n\nUsed by group-based quotas only.", + "description": "Maximum number of tokens that can be consumed during the specified duration. Client application developers can override the default limit up to this maximum. If specified, this value cannot be set to a value less than the default limit. If not specified, it is set to the default limit. To allow clients to apply overrides with no upper bound, set this to -1, indicating unlimited maximum quota. Used by group-based quotas only.", "format": "int64", "type": "string" }, "metric": { - "description": "The name of the metric this quota limit applies to. The quota limits with\nthe same metric will be checked together during runtime. The metric must be\ndefined within the service config.", + "description": "The name of the metric this quota limit applies to. The quota limits with the same metric will be checked together during runtime. The metric must be defined within the service config.", "type": "string" }, "name": { - "description": "Name of the quota limit.\n\nThe name must be provided, and it must be unique within the service. The\nname can only include alphanumeric characters as well as '-'.\n\nThe maximum length of the limit name is 64 characters.", + "description": "Name of the quota limit. The name must be provided, and it must be unique within the service. The name can only include alphanumeric characters as well as '-'. The maximum length of the limit name is 64 characters.", "type": "string" }, "unit": { - "description": "Specify the unit of the quota limit. It uses the same syntax as\nMetric.unit. The supported unit kinds are determined by the quota\nbackend system.\n\nHere are some examples:\n* \"1/min/{project}\" for quota per minute per project.\n\nNote: the order of unit components is insignificant.\nThe \"1\" at the beginning is required to follow the metric unit syntax.", + "description": "Specify the unit of the quota limit. It uses the same syntax as Metric.unit. The supported unit kinds are determined by the quota backend system. Here are some examples: * \"1/min/{project}\" for quota per minute per project. Note: the order of unit components is insignificant. The \"1\" at the beginning is required to follow the metric unit syntax.", "type": "string" }, "values": { @@ -2043,7 +2428,7 @@ "format": "int64", "type": "string" }, - "description": "Tiered limit values. You must specify this as a key:value pair, with an\ninteger value that is the maximum number of requests allowed for the\nspecified unit. Currently only STANDARD is supported.", + "description": "Tiered limit values. You must specify this as a key:value pair, with an integer value that is the maximum number of requests allowed for the specified unit. Currently only STANDARD is supported.", "type": "object" } }, @@ -2054,11 +2439,11 @@ "id": "Range", "properties": { "ipCidrRange": { - "description": "CIDR range in \"10.x.x.x/y\" format that is within the\nallocated ranges and currently unused.", + "description": "CIDR range in \"10.x.x.x/y\" format that is within the allocated ranges and currently unused.", "type": "string" }, "network": { - "description": "In the Shared VPC host project, the VPC network that's peered with the\nconsumer network. For example:\n`projects/1234321/global/networks/host-network`", + "description": "In the Shared VPC host project, the VPC network that's peered with the consumer network. For example: `projects/1234321/global/networks/host-network`", "type": "string" } }, @@ -2069,12 +2454,12 @@ "id": "RangeReservation", "properties": { "ipPrefixLength": { - "description": "Required. The size of the desired subnet. Use usual CIDR range notation. For example,\n'30' to find unused x.x.x.x/30 CIDR range. The goal is to determine if one\nof the allocated ranges has enough free space for a subnet of the requested\nsize.", + "description": "Required. The size of the desired subnet. Use usual CIDR range notation. For example, '30' to find unused x.x.x.x/30 CIDR range. The goal is to determine if one of the allocated ranges has enough free space for a subnet of the requested size.", "format": "int32", "type": "integer" }, "secondaryRangeIpPrefixLengths": { - "description": "Optional. DO NOT USE - Under development.\nThe size of the desired secondary ranges for the subnet. Use usual CIDR\nrange notation. For example, '30' to find unused x.x.x.x/30 CIDR range. The\ngoal is to determine that the allocated ranges have enough free space for\nall the requested secondary ranges.", + "description": "Optional. DO NOT USE - Under development. The size of the desired secondary ranges for the subnet. Use usual CIDR range notation. For example, '30' to find unused x.x.x.x/30 CIDR range. The goal is to determine that the allocated ranges have enough free space for all the requested secondary ranges.", "items": { "format": "int32", "type": "integer" @@ -2084,8 +2469,66 @@ }, "type": "object" }, + "RemoveDnsRecordSetMetadata": { + "description": "Metadata provided through GetOperation request for the LRO generated by RemoveDnsRecordSet API", + "id": "RemoveDnsRecordSetMetadata", + "properties": {}, + "type": "object" + }, + "RemoveDnsRecordSetRequest": { + "description": "Request to remove a record set from a private managed DNS zone in the shared producer host project. The name, type, ttl, and data values must all exactly match an existing record set in the specified zone.", + "id": "RemoveDnsRecordSetRequest", + "properties": { + "consumerNetwork": { + "description": "Required. The network that the consumer is using to connect with services. Must be in the form of projects/{project}/global/networks/{network} {project} is the project number, as in '12345' {network} is the network name.", + "type": "string" + }, + "dnsRecordSet": { + "$ref": "DnsRecordSet", + "description": "Required. The DNS record set to remove." + }, + "zone": { + "description": "Required. The name of the private DNS zone in the shared producer host project from which the record set will be removed.", + "type": "string" + } + }, + "type": "object" + }, + "RemoveDnsRecordSetResponse": { + "description": "Blank message response type for RemoveDnsRecordSet API", + "id": "RemoveDnsRecordSetResponse", + "properties": {}, + "type": "object" + }, + "RemoveDnsZoneMetadata": { + "description": "Metadata provided through GetOperation request for the LRO generated by RemoveDnsZone API", + "id": "RemoveDnsZoneMetadata", + "properties": {}, + "type": "object" + }, + "RemoveDnsZoneRequest": { + "description": "Request to remove a private managed DNS zone in the shared producer host project and a matching DNS peering zone in the consumer project.", + "id": "RemoveDnsZoneRequest", + "properties": { + "consumerNetwork": { + "description": "Required. The network that the consumer is using to connect with services. Must be in the form of projects/{project}/global/networks/{network} {project} is the project number, as in '12345' {network} is the network name.", + "type": "string" + }, + "name": { + "description": "Required. The name for both the private zone in the shared producer host project and the peering zone in the consumer project.", + "type": "string" + } + }, + "type": "object" + }, + "RemoveDnsZoneResponse": { + "description": "Blank message response type for RemoveDnsZone API", + "id": "RemoveDnsZoneResponse", + "properties": {}, + "type": "object" + }, "Route": { - "description": "Represents a route that was created or discovered by a private access\nmanagement service.", + "description": "Represents a route that was created or discovered by a private access management service.", "id": "Route", "properties": { "destRange": { @@ -2097,11 +2540,11 @@ "type": "string" }, "network": { - "description": "Fully-qualified URL of the VPC network in the producer host tenant project\nthat this route applies to. For example:\n`projects/123456/global/networks/host-network`", + "description": "Fully-qualified URL of the VPC network in the producer host tenant project that this route applies to. For example: `projects/123456/global/networks/host-network`", "type": "string" }, "nextHopGateway": { - "description": "Fully-qualified URL of the gateway that should handle matching packets that\nthis route applies to. For example:\n`projects/123456/global/gateways/default-internet-gateway`", + "description": "Fully-qualified URL of the gateway that should handle matching packets that this route applies to. For example: `projects/123456/global/gateways/default-internet-gateway`", "type": "string" } }, @@ -2112,23 +2555,23 @@ "id": "SearchRangeRequest", "properties": { "ipPrefixLength": { - "description": "Required. The prefix length of the IP range. Use usual CIDR range notation. For\nexample, '30' to find unused x.x.x.x/30 CIDR range. Actual range will be\ndetermined using allocated range for the consumer peered network and\nreturned in the result.", + "description": "Required. The prefix length of the IP range. Use usual CIDR range notation. For example, '30' to find unused x.x.x.x/30 CIDR range. Actual range will be determined using allocated range for the consumer peered network and returned in the result.", "format": "int32", "type": "integer" }, "network": { - "description": "Network name in the consumer project. This network must have been\nalready peered with a shared VPC network using CreateConnection\nmethod. Must be in a form 'projects/{project}/global/networks/{network}'.\n{project} is a project number, as in '12345' {network} is network name.", + "description": "Network name in the consumer project. This network must have been already peered with a shared VPC network using CreateConnection method. Must be in a form 'projects/{project}/global/networks/{network}'. {project} is a project number, as in '12345' {network} is network name.", "type": "string" } }, "type": "object" }, "Service": { - "description": "`Service` is the root object of Google service configuration schema. It\ndescribes basic information about a service, such as the name and the\ntitle, and delegates other aspects to sub-sections. Each sub-section is\neither a proto message or a repeated proto message that configures a\nspecific aspect, such as auth. See each proto message definition for details.\n\nExample:\n\n type: google.api.Service\n config_version: 3\n name: calendar.googleapis.com\n title: Google Calendar API\n apis:\n - name: google.calendar.v3.Calendar\n authentication:\n providers:\n - id: google_calendar_auth\n jwks_uri: https://www.googleapis.com/oauth2/v1/certs\n issuer: https://securetoken.google.com\n rules:\n - selector: \"*\"\n requirements:\n provider_id: google_calendar_auth", + "description": "`Service` is the root object of Google service configuration schema. It describes basic information about a service, such as the name and the title, and delegates other aspects to sub-sections. Each sub-section is either a proto message or a repeated proto message that configures a specific aspect, such as auth. See each proto message definition for details. Example: type: google.api.Service config_version: 3 name: calendar.googleapis.com title: Google Calendar API apis: - name: google.calendar.v3.Calendar authentication: providers: - id: google_calendar_auth jwks_uri: https://www.googleapis.com/oauth2/v1/certs issuer: https://securetoken.google.com rules: - selector: \"*\" requirements: provider_id: google_calendar_auth", "id": "Service", "properties": { "apis": { - "description": "A list of API interfaces exported by this service. Only the `name` field\nof the google.protobuf.Api needs to be provided by the configuration\nauthor, as the remaining fields will be derived from the IDL during the\nnormalization process. It is an error to specify an API interface here\nwhich cannot be resolved against the associated IDL files.", + "description": "A list of API interfaces exported by this service. Only the `name` field of the google.protobuf.Api needs to be provided by the configuration author, as the remaining fields will be derived from the IDL during the normalization process. It is an error to specify an API interface here which cannot be resolved against the associated IDL files.", "items": { "$ref": "Api" }, @@ -2147,7 +2590,7 @@ "description": "Billing configuration." }, "configVersion": { - "description": "The semantic version of the service configuration. The config version\naffects the interpretation of the service configuration. For example,\ncertain features are enabled by default for certain config versions.\n\nThe latest config version is `3`.", + "description": "The semantic version of the service configuration. The config version affects the interpretation of the service configuration. For example, certain features are enabled by default for certain config versions. The latest config version is `3`.", "format": "uint32", "type": "integer" }, @@ -2168,14 +2611,14 @@ "description": "Additional API documentation." }, "endpoints": { - "description": "Configuration for network endpoints. If this is empty, then an endpoint\nwith the same name as the service is automatically generated to service all\ndefined APIs.", + "description": "Configuration for network endpoints. If this is empty, then an endpoint with the same name as the service is automatically generated to service all defined APIs.", "items": { "$ref": "Endpoint" }, "type": "array" }, "enums": { - "description": "A list of all enum types included in this API service. Enums\nreferenced directly or indirectly by the `apis` are automatically\nincluded. Enums which are not referenced but shall be included\nshould be listed here by name. Example:\n\n enums:\n - name: google.someapi.v1.SomeEnum", + "description": "A list of all enum types included in this API service. Enums referenced directly or indirectly by the `apis` are automatically included. Enums which are not referenced but shall be included should be listed here by name. Example: enums: - name: google.someapi.v1.SomeEnum", "items": { "$ref": "Enum" }, @@ -2186,7 +2629,7 @@ "description": "HTTP configuration." }, "id": { - "description": "A unique ID for a specific instance of this message, typically assigned\nby the client for tracking purpose. Must be no longer than 63 characters\nand only lower case letters, digits, '.', '_' and '-' are allowed. If\nempty, the server may choose to generate one instead.", + "description": "A unique ID for a specific instance of this message, typically assigned by the client for tracking purpose. Must be no longer than 63 characters and only lower case letters, digits, '.', '_' and '-' are allowed. If empty, the server may choose to generate one instead.", "type": "string" }, "logging": { @@ -2208,7 +2651,7 @@ "type": "array" }, "monitoredResources": { - "description": "Defines the monitored resources used by this service. This is required\nby the Service.monitoring and Service.logging configurations.", + "description": "Defines the monitored resources used by this service. This is required by the Service.monitoring and Service.logging configurations.", "items": { "$ref": "MonitoredResourceDescriptor" }, @@ -2219,7 +2662,7 @@ "description": "Monitoring configuration." }, "name": { - "description": "The service name, which is a DNS-like logical identifier for the\nservice, such as `calendar.googleapis.com`. The service name\ntypically goes through DNS verification to make sure the owner\nof the service also owns the DNS name.", + "description": "The service name, which is a DNS-like logical identifier for the service, such as `calendar.googleapis.com`. The service name typically goes through DNS verification to make sure the owner of the service also owns the DNS name.", "type": "string" }, "producerProjectId": { @@ -2239,7 +2682,7 @@ "description": "System parameter configuration." }, "systemTypes": { - "description": "A list of all proto message types included in this API service.\nIt serves similar purpose as [google.api.Service.types], except that\nthese types are not needed by user-defined APIs. Therefore, they will not\nshow up in the generated discovery doc. This field should only be used\nto define system APIs in ESF.", + "description": "A list of all proto message types included in this API service. It serves similar purpose as [google.api.Service.types], except that these types are not needed by user-defined APIs. Therefore, they will not show up in the generated discovery doc. This field should only be used to define system APIs in ESF.", "items": { "$ref": "Type" }, @@ -2250,7 +2693,7 @@ "type": "string" }, "types": { - "description": "A list of all proto message types included in this API service.\nTypes referenced directly or indirectly by the `apis` are\nautomatically included. Messages which are not referenced but\nshall be included, such as types used by the `google.protobuf.Any` type,\nshould be listed here by name. Example:\n\n types:\n - name: google.protobuf.Int32", + "description": "A list of all proto message types included in this API service. Types referenced directly or indirectly by the `apis` are automatically included. Messages which are not referenced but shall be included, such as types used by the `google.protobuf.Any` type, should be listed here by name. Example: types: - name: google.protobuf.Int32", "items": { "$ref": "Type" }, @@ -2264,30 +2707,30 @@ "type": "object" }, "ServiceIdentity": { - "description": "The per-product per-project service identity for a service.\n\n\nUse this field to configure per-product per-project service identity.\nExample of a service identity configuration.\n\n usage:\n service_identity:\n - service_account_parent: \"projects/123456789\"\n display_name: \"Cloud XXX Service Agent\"\n description: \"Used as the identity of Cloud XXX to access resources\"", + "description": "The per-product per-project service identity for a service. Use this field to configure per-product per-project service identity. Example of a service identity configuration. usage: service_identity: - service_account_parent: \"projects/123456789\" display_name: \"Cloud XXX Service Agent\" description: \"Used as the identity of Cloud XXX to access resources\"", "id": "ServiceIdentity", "properties": { "description": { - "description": "Optional. A user-specified opaque description of the service account.\nMust be less than or equal to 256 UTF-8 bytes.", + "description": "Optional. A user-specified opaque description of the service account. Must be less than or equal to 256 UTF-8 bytes.", "type": "string" }, "displayName": { - "description": "Optional. A user-specified name for the service account.\nMust be less than or equal to 100 UTF-8 bytes.", + "description": "Optional. A user-specified name for the service account. Must be less than or equal to 100 UTF-8 bytes.", "type": "string" }, "serviceAccountParent": { - "description": "A service account project that hosts the service accounts.\n\nAn example name would be:\n`projects/123456789`", + "description": "A service account project that hosts the service accounts. An example name would be: `projects/123456789`", "type": "string" } }, "type": "object" }, "SourceContext": { - "description": "`SourceContext` represents information about the source of a\nprotobuf element, like the file in which it is defined.", + "description": "`SourceContext` represents information about the source of a protobuf element, like the file in which it is defined.", "id": "SourceContext", "properties": { "fileName": { - "description": "The path-qualified name of the .proto file that contained the associated\nprotobuf element. For example: `\"google/protobuf/source_context.proto\"`.", + "description": "The path-qualified name of the .proto file that contained the associated protobuf element. For example: `\"google/protobuf/source_context.proto\"`.", "type": "string" } }, @@ -2312,7 +2755,7 @@ "type": "object" }, "Status": { - "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors).", + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", "id": "Status", "properties": { "code": { @@ -2321,7 +2764,7 @@ "type": "integer" }, "details": { - "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use.", + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", "items": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -2332,14 +2775,14 @@ "type": "array" }, "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", "type": "string" } }, "type": "object" }, "Subnetwork": { - "description": "Represents a subnet that was created or discovered by a private access\nmanagement service.", + "description": "Represents a subnet that was created or discovered by a private access management service.", "id": "Subnetwork", "properties": { "ipCidrRange": { @@ -2347,26 +2790,26 @@ "type": "string" }, "name": { - "description": "Subnetwork name.\nSee https://cloud.google.com/compute/docs/vpc/", + "description": "Subnetwork name. See https://cloud.google.com/compute/docs/vpc/", "type": "string" }, "network": { - "description": "In the Shared VPC host project, the VPC network that's peered with the\nconsumer network. For example:\n`projects/1234321/global/networks/host-network`", + "description": "In the Shared VPC host project, the VPC network that's peered with the consumer network. For example: `projects/1234321/global/networks/host-network`", "type": "string" }, "outsideAllocation": { - "description": "This is a discovered subnet that is not within the current consumer\nallocated ranges.", + "description": "This is a discovered subnet that is not within the current consumer allocated ranges.", "type": "boolean" } }, "type": "object" }, "SystemParameter": { - "description": "Define a parameter's name and location. The parameter may be passed as either\nan HTTP header or a URL query parameter, and if both are passed the behavior\nis implementation-dependent.", + "description": "Define a parameter's name and location. The parameter may be passed as either an HTTP header or a URL query parameter, and if both are passed the behavior is implementation-dependent.", "id": "SystemParameter", "properties": { "httpHeader": { - "description": "Define the HTTP header name to use for the parameter. It is case\ninsensitive.", + "description": "Define the HTTP header name to use for the parameter. It is case insensitive.", "type": "string" }, "name": { @@ -2374,36 +2817,36 @@ "type": "string" }, "urlQueryParameter": { - "description": "Define the URL query parameter name to use for the parameter. It is case\nsensitive.", + "description": "Define the URL query parameter name to use for the parameter. It is case sensitive.", "type": "string" } }, "type": "object" }, "SystemParameterRule": { - "description": "Define a system parameter rule mapping system parameter definitions to\nmethods.", + "description": "Define a system parameter rule mapping system parameter definitions to methods.", "id": "SystemParameterRule", "properties": { "parameters": { - "description": "Define parameters. Multiple names may be defined for a parameter.\nFor a given method call, only one of them should be used. If multiple\nnames are used the behavior is implementation-dependent.\nIf none of the specified names are present the behavior is\nparameter-dependent.", + "description": "Define parameters. Multiple names may be defined for a parameter. For a given method call, only one of them should be used. If multiple names are used the behavior is implementation-dependent. If none of the specified names are present the behavior is parameter-dependent.", "items": { "$ref": "SystemParameter" }, "type": "array" }, "selector": { - "description": "Selects the methods to which this rule applies. Use '*' to indicate all\nmethods in all APIs.\n\nRefer to selector for syntax details.", + "description": "Selects the methods to which this rule applies. Use '*' to indicate all methods in all APIs. Refer to selector for syntax details.", "type": "string" } }, "type": "object" }, "SystemParameters": { - "description": "### System parameter configuration\n\nA system parameter is a special kind of parameter defined by the API\nsystem, not by an individual API. It is typically mapped to an HTTP header\nand/or a URL query parameter. This configuration specifies which methods\nchange the names of the system parameters.", + "description": "### System parameter configuration A system parameter is a special kind of parameter defined by the API system, not by an individual API. It is typically mapped to an HTTP header and/or a URL query parameter. This configuration specifies which methods change the names of the system parameters.", "id": "SystemParameters", "properties": { "rules": { - "description": "Define system parameters.\n\nThe parameters defined here will override the default parameters\nimplemented by the system. If this field is missing from the service\nconfig, default system parameters will be used. Default system parameters\nand names is implementation-dependent.\n\nExample: define api key for all methods\n\n system_parameters\n rules:\n - selector: \"*\"\n parameters:\n - name: api_key\n url_query_parameter: api_key\n\n\nExample: define 2 api key names for a specific method.\n\n system_parameters\n rules:\n - selector: \"/ListShelves\"\n parameters:\n - name: api_key\n http_header: Api-Key1\n - name: api_key\n http_header: Api-Key2\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "Define system parameters. The parameters defined here will override the default parameters implemented by the system. If this field is missing from the service config, default system parameters will be used. Default system parameters and names is implementation-dependent. Example: define api key for all methods system_parameters rules: - selector: \"*\" parameters: - name: api_key url_query_parameter: api_key Example: define 2 api key names for a specific method. system_parameters rules: - selector: \"/ListShelves\" parameters: - name: api_key http_header: Api-Key1 - name: api_key http_header: Api-Key2 **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "SystemParameterRule" }, @@ -2460,23 +2903,52 @@ }, "type": "object" }, + "UpdateDnsRecordSetMetadata": { + "description": "Metadata provided through GetOperation request for the LRO generated by UpdateDnsRecordSet API", + "id": "UpdateDnsRecordSetMetadata", + "properties": {}, + "type": "object" + }, + "UpdateDnsRecordSetRequest": { + "description": "Request to update a record set from a private managed DNS zone in the shared producer host project. The name, type, ttl, and data values of the existing record set must all exactly match an existing record set in the specified zone.", + "id": "UpdateDnsRecordSetRequest", + "properties": { + "consumerNetwork": { + "description": "Required. The network that the consumer is using to connect with services. Must be in the form of projects/{project}/global/networks/{network} {project} is the project number, as in '12345' {network} is the network name.", + "type": "string" + }, + "existingDnsRecordSet": { + "$ref": "DnsRecordSet", + "description": "Required. The existing DNS record set to update." + }, + "newDnsRecordSet": { + "$ref": "DnsRecordSet", + "description": "Required. The new values that the DNS record set should be updated to hold." + }, + "zone": { + "description": "Required. The name of the private DNS zone in the shared producer host project from which the record set will be removed.", + "type": "string" + } + }, + "type": "object" + }, "Usage": { "description": "Configuration controlling usage of a service.", "id": "Usage", "properties": { "producerNotificationChannel": { - "description": "The full resource name of a channel used for sending notifications to the\nservice producer.\n\nGoogle Service Management currently only supports\n[Google Cloud Pub/Sub](https://cloud.google.com/pubsub) as a notification\nchannel. To use Google Cloud Pub/Sub as the channel, this must be the name\nof a Cloud Pub/Sub topic that uses the Cloud Pub/Sub topic name format\ndocumented in https://cloud.google.com/pubsub/docs/overview.", + "description": "The full resource name of a channel used for sending notifications to the service producer. Google Service Management currently only supports [Google Cloud Pub/Sub](https://cloud.google.com/pubsub) as a notification channel. To use Google Cloud Pub/Sub as the channel, this must be the name of a Cloud Pub/Sub topic that uses the Cloud Pub/Sub topic name format documented in https://cloud.google.com/pubsub/docs/overview.", "type": "string" }, "requirements": { - "description": "Requirements that must be satisfied before a consumer project can use the\nservice. Each requirement is of the form \u003cservice.name\u003e/\u003crequirement-id\u003e;\nfor example 'serviceusage.googleapis.com/billing-enabled'.", + "description": "Requirements that must be satisfied before a consumer project can use the service. Each requirement is of the form /; for example 'serviceusage.googleapis.com/billing-enabled'.", "items": { "type": "string" }, "type": "array" }, "rules": { - "description": "A list of usage rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "A list of usage rules that apply to individual API methods. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "UsageRule" }, @@ -2490,19 +2962,19 @@ "type": "object" }, "UsageRule": { - "description": "Usage configuration rules for the service.\n\nNOTE: Under development.\n\n\nUse this rule to configure unregistered calls for the service. Unregistered\ncalls are calls that do not contain consumer project identity.\n(Example: calls that do not contain an API key).\nBy default, API methods do not allow unregistered calls, and each method call\nmust be identified by a consumer project identity. Use this rule to\nallow/disallow unregistered calls.\n\nExample of an API that wants to allow unregistered calls for entire service.\n\n usage:\n rules:\n - selector: \"*\"\n allow_unregistered_calls: true\n\nExample of a method that wants to allow unregistered calls.\n\n usage:\n rules:\n - selector: \"google.example.library.v1.LibraryService.CreateBook\"\n allow_unregistered_calls: true", + "description": "Usage configuration rules for the service. NOTE: Under development. Use this rule to configure unregistered calls for the service. Unregistered calls are calls that do not contain consumer project identity. (Example: calls that do not contain an API key). By default, API methods do not allow unregistered calls, and each method call must be identified by a consumer project identity. Use this rule to allow/disallow unregistered calls. Example of an API that wants to allow unregistered calls for entire service. usage: rules: - selector: \"*\" allow_unregistered_calls: true Example of a method that wants to allow unregistered calls. usage: rules: - selector: \"google.example.library.v1.LibraryService.CreateBook\" allow_unregistered_calls: true", "id": "UsageRule", "properties": { "allowUnregisteredCalls": { - "description": "If true, the selected method allows unregistered calls, e.g. calls\nthat don't identify any user or application.", + "description": "If true, the selected method allows unregistered calls, e.g. calls that don't identify any user or application.", "type": "boolean" }, "selector": { - "description": "Selects the methods to which this rule applies. Use '*' to indicate all\nmethods in all APIs.\n\nRefer to selector for syntax details.", + "description": "Selects the methods to which this rule applies. Use '*' to indicate all methods in all APIs. Refer to selector for syntax details.", "type": "string" }, "skipServiceControl": { - "description": "If true, the selected method should skip service control and the control\nplane features, such as quota and billing, will not be available.\nThis flag is used by Google Cloud Endpoints to bypass checks for internal\nmethods, such as service health check methods.", + "description": "If true, the selected method should skip service control and the control plane features, such as quota and billing, will not be available. This flag is used by Google Cloud Endpoints to bypass checks for internal methods, such as service health check methods.", "type": "boolean" } }, @@ -2512,19 +2984,19 @@ "id": "ValidateConsumerConfigRequest", "properties": { "consumerNetwork": { - "description": "Required. The network that the consumer is using to connect with services. Must be in\nthe form of projects/{project}/global/networks/{network} {project} is a\nproject number, as in '12345' {network} is network name.", + "description": "Required. The network that the consumer is using to connect with services. Must be in the form of projects/{project}/global/networks/{network} {project} is a project number, as in '12345' {network} is network name.", "type": "string" }, "consumerProject": { "$ref": "ConsumerProject", - "description": "NETWORK_NOT_IN_CONSUMERS_PROJECT, NETWORK_NOT_IN_CONSUMERS_HOST_PROJECT,\nand HOST_PROJECT_NOT_FOUND are done when consumer_project is provided." + "description": "NETWORK_NOT_IN_CONSUMERS_PROJECT, NETWORK_NOT_IN_CONSUMERS_HOST_PROJECT, and HOST_PROJECT_NOT_FOUND are done when consumer_project is provided." }, "rangeReservation": { "$ref": "RangeReservation", - "description": "RANGES_EXHAUSTED, RANGES_EXHAUSTED, and RANGES_DELETED_LATER are done\nwhen range_reservation is provided." + "description": "RANGES_EXHAUSTED, RANGES_EXHAUSTED, and RANGES_DELETED_LATER are done when range_reservation is provided." }, "validateNetwork": { - "description": "The validations will be performed in the order listed in the\nValidationError enum. The first failure will return. If a validation is not\nrequested, then the next one will be performed.\nSERVICE_NETWORKING_NOT_ENABLED and NETWORK_NOT_PEERED checks are performed\nfor all requests where validation is requested. NETWORK_NOT_FOUND and\nNETWORK_DISCONNECTED checks are done for requests that have\nvalidate_network set to true.", + "description": "The validations will be performed in the order listed in the ValidationError enum. The first failure will return. If a validation is not requested, then the next one will be performed. SERVICE_NETWORKING_NOT_ENABLED and NETWORK_NOT_PEERED checks are performed for all requests where validation is requested. NETWORK_NOT_FOUND and NETWORK_DISCONNECTED checks are done for requests that have validate_network set to true.", "type": "boolean" } }, @@ -2560,11 +3032,11 @@ "The network provided by the consumer does not exist.", "The network has not been peered with the producer org.", "The peering was created and later deleted.", - "The network is a regular VPC but the network is not in the consumer's\nproject.", - "The consumer project is a service project, and network is a shared VPC,\nbut the network is not in the host project of this consumer project.", - "The host project associated with the consumer project\nwas not found.", - "The consumer project is not a service project for\nthe specified host project.", - "The reserved IP ranges do not have enough space to create\na subnet of desired size.", + "The network is a regular VPC but the network is not in the consumer's project.", + "The consumer project is a service project, and network is a shared VPC, but the network is not in the host project of this consumer project.", + "The host project associated with the consumer project was not found.", + "The consumer project is not a service project for the specified host project.", + "The reserved IP ranges do not have enough space to create a subnet of desired size.", "The IP ranges were not reserved.", "The IP ranges were reserved but deleted later.", "The consumer project does not have the compute api enabled." diff --git a/vendor/google.golang.org/api/servicenetworking/v1/servicenetworking-gen.go b/vendor/google.golang.org/api/servicenetworking/v1/servicenetworking-gen.go index 9680aa4cc23..0d5c3d5b14c 100644 --- a/vendor/google.golang.org/api/servicenetworking/v1/servicenetworking-gen.go +++ b/vendor/google.golang.org/api/servicenetworking/v1/servicenetworking-gen.go @@ -79,6 +79,7 @@ const apiId = "servicenetworking:v1" const apiName = "servicenetworking" const apiVersion = "v1" const basePath = "https://servicenetworking.googleapis.com/" +const mtlsBasePath = "https://servicenetworking.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -98,6 +99,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*APIService, // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -156,6 +158,9 @@ type OperationsService struct { func NewServicesService(s *APIService) *ServicesService { rs := &ServicesService{s: s} rs.Connections = NewServicesConnectionsService(s) + rs.DnsRecordSets = NewServicesDnsRecordSetsService(s) + rs.DnsZones = NewServicesDnsZonesService(s) + rs.Projects = NewServicesProjectsService(s) rs.Roles = NewServicesRolesService(s) return rs } @@ -165,6 +170,12 @@ type ServicesService struct { Connections *ServicesConnectionsService + DnsRecordSets *ServicesDnsRecordSetsService + + DnsZones *ServicesDnsZonesService + + Projects *ServicesProjectsService + Roles *ServicesRolesService } @@ -177,6 +188,69 @@ type ServicesConnectionsService struct { s *APIService } +func NewServicesDnsRecordSetsService(s *APIService) *ServicesDnsRecordSetsService { + rs := &ServicesDnsRecordSetsService{s: s} + return rs +} + +type ServicesDnsRecordSetsService struct { + s *APIService +} + +func NewServicesDnsZonesService(s *APIService) *ServicesDnsZonesService { + rs := &ServicesDnsZonesService{s: s} + return rs +} + +type ServicesDnsZonesService struct { + s *APIService +} + +func NewServicesProjectsService(s *APIService) *ServicesProjectsService { + rs := &ServicesProjectsService{s: s} + rs.Global = NewServicesProjectsGlobalService(s) + return rs +} + +type ServicesProjectsService struct { + s *APIService + + Global *ServicesProjectsGlobalService +} + +func NewServicesProjectsGlobalService(s *APIService) *ServicesProjectsGlobalService { + rs := &ServicesProjectsGlobalService{s: s} + rs.Networks = NewServicesProjectsGlobalNetworksService(s) + return rs +} + +type ServicesProjectsGlobalService struct { + s *APIService + + Networks *ServicesProjectsGlobalNetworksService +} + +func NewServicesProjectsGlobalNetworksService(s *APIService) *ServicesProjectsGlobalNetworksService { + rs := &ServicesProjectsGlobalNetworksService{s: s} + rs.PeeredDnsDomains = NewServicesProjectsGlobalNetworksPeeredDnsDomainsService(s) + return rs +} + +type ServicesProjectsGlobalNetworksService struct { + s *APIService + + PeeredDnsDomains *ServicesProjectsGlobalNetworksPeeredDnsDomainsService +} + +func NewServicesProjectsGlobalNetworksPeeredDnsDomainsService(s *APIService) *ServicesProjectsGlobalNetworksPeeredDnsDomainsService { + rs := &ServicesProjectsGlobalNetworksPeeredDnsDomainsService{s: s} + return rs +} + +type ServicesProjectsGlobalNetworksPeeredDnsDomainsService struct { + s *APIService +} + func NewServicesRolesService(s *APIService) *ServicesRolesService { rs := &ServicesRolesService{s: s} return rs @@ -186,21 +260,148 @@ type ServicesRolesService struct { s *APIService } +// AddDnsRecordSetMetadata: Metadata provided through GetOperation +// request for the LRO generated by AddDnsRecordSet API +type AddDnsRecordSetMetadata struct { +} + +// AddDnsRecordSetRequest: Request to add a record set to a private +// managed DNS zone in the shared producer host project. +type AddDnsRecordSetRequest struct { + // ConsumerNetwork: Required. The network that the consumer is using to + // connect with services. Must be in the form of + // projects/{project}/global/networks/{network} {project} is the project + // number, as in '12345' {network} is the network name. + ConsumerNetwork string `json:"consumerNetwork,omitempty"` + + // DnsRecordSet: Required. The DNS record set to add. + DnsRecordSet *DnsRecordSet `json:"dnsRecordSet,omitempty"` + + // Zone: Required. The name of the private DNS zone in the shared + // producer host project to which the record set will be added. + Zone string `json:"zone,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ConsumerNetwork") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ConsumerNetwork") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AddDnsRecordSetRequest) MarshalJSON() ([]byte, error) { + type NoMethod AddDnsRecordSetRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AddDnsZoneMetadata: Metadata provided through GetOperation request +// for the LRO generated by AddDnsZone API +type AddDnsZoneMetadata struct { +} + +// AddDnsZoneRequest: Request to add a private managed DNS zone in the +// shared producer host project and a matching DNS peering zone in the +// consumer project. +type AddDnsZoneRequest struct { + // ConsumerNetwork: Required. The network that the consumer is using to + // connect with services. Must be in the form of + // projects/{project}/global/networks/{network} {project} is the project + // number, as in '12345' {network} is the network name. + ConsumerNetwork string `json:"consumerNetwork,omitempty"` + + // DnsSuffix: Required. The DNS name suffix for the zones e.g. + // `example.com`. + DnsSuffix string `json:"dnsSuffix,omitempty"` + + // Name: Required. The name for both the private zone in the shared + // producer host project and the peering zone in the consumer project. + // Must be unique within both projects. The name must be 1-63 characters + // long, must begin with a letter, end with a letter or digit, and only + // contain lowercase letters, digits or dashes. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ConsumerNetwork") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ConsumerNetwork") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AddDnsZoneRequest) MarshalJSON() ([]byte, error) { + type NoMethod AddDnsZoneRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AddDnsZoneResponse: Represents managed DNS zones created in the +// shared producer host and consumer projects. +type AddDnsZoneResponse struct { + // ConsumerPeeringZone: The DNS peering zone created in the consumer + // project. + ConsumerPeeringZone *DnsZone `json:"consumerPeeringZone,omitempty"` + + // ProducerPrivateZone: The private DNS zone created in the shared + // producer host project. + ProducerPrivateZone *DnsZone `json:"producerPrivateZone,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ConsumerPeeringZone") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ConsumerPeeringZone") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AddDnsZoneResponse) MarshalJSON() ([]byte, error) { + type NoMethod AddDnsZoneResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // AddRolesMetadata: Metadata provided through GetOperation request for -// the LRO generated by -// AddRoles API +// the LRO generated by AddRoles API type AddRolesMetadata struct { } // AddRolesRequest: Request for AddRoles to allow Service Producers to -// add roles in the shared -// VPC host project for them to use. +// add roles in the shared VPC host project for them to use. type AddRolesRequest struct { // ConsumerNetwork: Required. The network that the consumer is using to - // connect with services. Must be in - // the form of projects/{project}/global/networks/{network} - // {project} is a project number, as in '12345' - // {network} is a network name. + // connect with services. Must be in the form of + // projects/{project}/global/networks/{network} {project} is a project + // number, as in '12345' {network} is a network name. ConsumerNetwork string `json:"consumerNetwork,omitempty"` // PolicyBinding: Required. List of policy bindings to add to shared VPC @@ -265,71 +466,51 @@ func (s *AddRolesResponse) MarshalJSON() ([]byte, error) { // peered service network. type AddSubnetworkRequest struct { // Consumer: Required. A resource that represents the service consumer, - // such as - // `projects/123456`. The project number can be different from the - // value in the consumer network parameter. For example, the network - // might be - // part of a Shared VPC network. In those cases, Service Networking - // validates - // that this resource belongs to that Shared VPC. + // such as `projects/123456`. The project number can be different from + // the value in the consumer network parameter. For example, the network + // might be part of a Shared VPC network. In those cases, Service + // Networking validates that this resource belongs to that Shared VPC. Consumer string `json:"consumer,omitempty"` // ConsumerNetwork: Required. The name of the service consumer's VPC - // network. The network - // must have an existing private connection that was provisioned through - // the - // connections.create method. The name must be in the following - // format: - // `projects/{project}/global/networks/{network}`, where {project} - // is a project number, such as `12345`. {network} is the name of a - // VPC network in the project. + // network. The network must have an existing private connection that + // was provisioned through the connections.create method. The name must + // be in the following format: + // `projects/{project}/global/networks/{network}`, where {project} is a + // project number, such as `12345`. {network} is the name of a VPC + // network in the project. ConsumerNetwork string `json:"consumerNetwork,omitempty"` // Description: Optional. Description of the subnet. Description string `json:"description,omitempty"` // IpPrefixLength: Required. The prefix length of the subnet's IP - // address range. Use CIDR - // range notation, such as `30` to provision a subnet with - // an - // `x.x.x.x/30` CIDR range. The IP address range is drawn from a - // pool of available ranges in the service consumer's allocated range. + // address range. Use CIDR range notation, such as `30` to provision a + // subnet with an `x.x.x.x/30` CIDR range. The IP address range is drawn + // from a pool of available ranges in the service consumer's allocated + // range. IpPrefixLength int64 `json:"ipPrefixLength,omitempty"` - // PrivateIpv6GoogleAccess: Optional. The private IPv6 google access - // type for the VMs in this subnet. - // For information about the access types that can be set using this - // field, - // see [subnetwork](/compute/docs/reference/rest/v1/subnetworks) - // in the Compute API documentation. - PrivateIpv6GoogleAccess string `json:"privateIpv6GoogleAccess,omitempty"` - - // Region: Required. The name of a - // [region](/compute/docs/regions-zones) + // Region: Required. The name of a [region](/compute/docs/regions-zones) // for the subnet, such `europe-west1`. Region string `json:"region,omitempty"` // RequestedAddress: Optional. The starting address of a range. The - // address must be a valid - // IPv4 address in the x.x.x.x format. This value combined with the IP - // prefix - // range is the CIDR range for the subnet. The range must be within - // the - // allocated range that is assigned to the private connection. If the - // CIDR - // range isn't available, the call fails. + // address must be a valid IPv4 address in the x.x.x.x format. This + // value combined with the IP prefix range is the CIDR range for the + // subnet. The range must be within the allocated range that is assigned + // to the private connection. If the CIDR range isn't available, the + // call fails. RequestedAddress string `json:"requestedAddress,omitempty"` // Subnetwork: Required. A name for the new subnet. For information - // about the naming - // requirements, see - // [subnetwork](/compute/docs/reference/rest/v1/subnetworks) - // in the Compute API documentation. + // about the naming requirements, see + // [subnetwork](/compute/docs/reference/rest/v1/subnetworks) in the + // Compute API documentation. Subnetwork string `json:"subnetwork,omitempty"` // SubnetworkUsers: A list of members that are granted the - // `compute.networkUser` - // role on the subnet. + // `compute.networkUser` role on the subnet. SubnetworkUsers []string `json:"subnetworkUsers,omitempty"` // ForceSendFields is a list of field names (e.g. "Consumer") to @@ -355,22 +536,15 @@ func (s *AddSubnetworkRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Api: Api is a light-weight descriptor for an API -// Interface. -// +// Api: Api is a light-weight descriptor for an API Interface. // Interfaces are also described as "protocol buffer services" in some -// contexts, -// such as by the "service" keyword in a .proto file, but they are -// different -// from API Services, which represent a concrete implementation of an -// interface -// as opposed to simply a description of methods and bindings. They are -// also -// sometimes simply referred to as "APIs" in other contexts, such as the -// name of -// this message itself. See -// https://cloud.google.com/apis/design/glossary for -// detailed terminology. +// contexts, such as by the "service" keyword in a .proto file, but they +// are different from API Services, which represent a concrete +// implementation of an interface as opposed to simply a description of +// methods and bindings. They are also sometimes simply referred to as +// "APIs" in other contexts, such as the name of this message itself. +// See https://cloud.google.com/apis/design/glossary for detailed +// terminology. type Api struct { // Methods: The methods of this interface, in unspecified order. Methods []*Method `json:"methods,omitempty"` @@ -379,16 +553,14 @@ type Api struct { Mixins []*Mixin `json:"mixins,omitempty"` // Name: The fully qualified name of this interface, including package - // name - // followed by the interface's simple name. + // name followed by the interface's simple name. Name string `json:"name,omitempty"` // Options: Any metadata attached to the interface. Options []*Option `json:"options,omitempty"` // SourceContext: Source context for the protocol buffer service - // represented by this - // message. + // represented by this message. SourceContext *SourceContext `json:"sourceContext,omitempty"` // Syntax: The source syntax of the service. @@ -399,35 +571,20 @@ type Api struct { Syntax string `json:"syntax,omitempty"` // Version: A version string for this interface. If specified, must have - // the form - // `major-version.minor-version`, as in `1.10`. If the minor version - // is - // omitted, it defaults to zero. If the entire version field is empty, - // the - // major version is derived from the package name, as outlined below. If - // the - // field is not empty, the version in the package name will be verified - // to be - // consistent with what is provided here. - // - // The versioning schema uses [semantic - // versioning](http://semver.org) where the major version - // number - // indicates a breaking change and the minor version an - // additive, - // non-breaking change. Both version numbers are signals to users - // what to expect from different versions, and should be - // carefully - // chosen based on the product plan. - // - // The major version is also reflected in the package name of - // the - // interface, which must end in `v`, as - // in - // `google.feature.v1`. For major versions 0 and 1, the suffix can - // be omitted. Zero major versions must only be used for - // experimental, non-GA interfaces. - // + // the form `major-version.minor-version`, as in `1.10`. If the minor + // version is omitted, it defaults to zero. If the entire version field + // is empty, the major version is derived from the package name, as + // outlined below. If the field is not empty, the version in the package + // name will be verified to be consistent with what is provided here. + // The versioning schema uses [semantic versioning](http://semver.org) + // where the major version number indicates a breaking change and the + // minor version an additive, non-breaking change. Both version numbers + // are signals to users what to expect from different versions, and + // should be carefully chosen based on the product plan. The major + // version is also reflected in the package name of the interface, which + // must end in `v`, as in `google.feature.v1`. For major versions 0 and + // 1, the suffix can be omitted. Zero major versions must only be used + // for experimental, non-GA interfaces. Version string `json:"version,omitempty"` // ForceSendFields is a list of field names (e.g. "Methods") to @@ -454,97 +611,57 @@ func (s *Api) MarshalJSON() ([]byte, error) { } // AuthProvider: Configuration for an authentication provider, including -// support for -// [JSON Web -// Token -// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-tok -// en-32). +// support for [JSON Web Token +// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32) +// . type AuthProvider struct { - // Audiences: The list of - // JWT - // [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web- - // token-32#section-4.1.3). - // that are allowed to access. A JWT containing any of these audiences - // will - // be accepted. When this setting is absent, JWTs with audiences: - // - "https://[service.name]/[google.protobuf.Api.name]" - // - "https://[service.name]/" - // will be accepted. - // For example, if no audiences are in the setting, LibraryService API - // will - // accept JWTs with the following audiences: - // - - // - // https://library-example.googleapis.com/google.example.library.v1.LibraryService - // - https://library-example.googleapis.com/ - // - // Example: - // - // audiences: bookstore_android.apps.googleusercontent.com, - // bookstore_web.apps.googleusercontent.com + // Audiences: The list of JWT + // [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-toke + // n-32#section-4.1.3). that are allowed to access. A JWT containing any + // of these audiences will be accepted. When this setting is absent, + // JWTs with audiences: - + // "https://[service.name]/[google.protobuf.Api.name]" - + // "https://[service.name]/" will be accepted. For example, if no + // audiences are in the setting, LibraryService API will accept JWTs + // with the following audiences: - + // https://library-example.googleapis.com/google.example.library.v1.LibraryService - https://library-example.googleapis.com/ Example: audiences: bookstore_android.apps.googleusercontent.com, + // bookstore_web.apps.googleusercontent.com Audiences string `json:"audiences,omitempty"` // AuthorizationUrl: Redirect URL if JWT token is required but not - // present or is expired. - // Implement authorizationUrl of securityDefinitions in OpenAPI spec. + // present or is expired. Implement authorizationUrl of + // securityDefinitions in OpenAPI spec. AuthorizationUrl string `json:"authorizationUrl,omitempty"` // Id: The unique identifier of the auth provider. It will be referred - // to by - // `AuthRequirement.provider_id`. - // - // Example: "bookstore_auth". + // to by `AuthRequirement.provider_id`. Example: "bookstore_auth". Id string `json:"id,omitempty"` - // Issuer: Identifies the principal that issued the JWT. - // See - // https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#sec - // tion-4.1.1 - // Usually a URL or an email address. - // - // Example: https://securetoken.google.com - // Example: 1234567-compute@developer.gserviceaccount.com + // Issuer: Identifies the principal that issued the JWT. See + // https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1 Usually a URL or an email address. Example: https://securetoken.google.com Example: + // 1234567-compute@developer.gserviceaccount.com Issuer string `json:"issuer,omitempty"` // JwksUri: URL of the provider's public key set to validate signature - // of the JWT. - // See - // [OpenID - // Discovery](https://openid.net/specs/openid-connect-discove - // ry-1_0.html#ProviderMetadata). - // Optional if the key set document: - // - can be retrieved from - // [OpenID - // + // of the JWT. See [OpenID + // Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html# + // ProviderMetadata). Optional if the key set document: - can be + // retrieved from [OpenID // Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html - // of - // the issuer. - // - can be inferred from the email domain of the issuer (e.g. a - // Google - // service account). - // - // Example: https://www.googleapis.com/oauth2/v1/certs + // of the issuer. - can be inferred from the email domain of the issuer + // (e.g. a Google service account). Example: + // https://www.googleapis.com/oauth2/v1/certs JwksUri string `json:"jwksUri,omitempty"` - // JwtLocations: Defines the locations to extract the JWT. - // - // JWT locations can be either from HTTP headers or URL query - // parameters. - // The rule is that the first match wins. The checking order is: - // checking - // all headers first, then URL query parameters. - // - // If not specified, default to use following 3 locations: - // 1) Authorization: Bearer - // 2) x-goog-iap-jwt-assertion - // 3) access_token query parameter - // - // Default locations can be specified as followings: - // jwt_locations: - // - header: Authorization - // value_prefix: "Bearer " - // - header: x-goog-iap-jwt-assertion - // - query: access_token + // JwtLocations: Defines the locations to extract the JWT. JWT locations + // can be either from HTTP headers or URL query parameters. The rule is + // that the first match wins. The checking order is: checking all + // headers first, then URL query parameters. If not specified, default + // to use following 3 locations: 1) Authorization: Bearer 2) + // x-goog-iap-jwt-assertion 3) access_token query parameter Default + // locations can be specified as followings: jwt_locations: - header: + // Authorization value_prefix: "Bearer " - header: + // x-goog-iap-jwt-assertion - query: access_token JwtLocations []*JwtLocation `json:"jwtLocations,omitempty"` // ForceSendFields is a list of field names (e.g. "Audiences") to @@ -571,43 +688,27 @@ func (s *AuthProvider) MarshalJSON() ([]byte, error) { } // AuthRequirement: User-defined authentication requirements, including -// support for -// [JSON Web -// Token -// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-tok -// en-32). +// support for [JSON Web Token +// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32) +// . type AuthRequirement struct { // Audiences: NOTE: This will be deprecated soon, once - // AuthProvider.audiences is - // implemented and accepted in all the runtime components. - // - // The list of - // JWT - // [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web- - // token-32#section-4.1.3). - // that are allowed to access. A JWT containing any of these audiences - // will - // be accepted. When this setting is absent, only JWTs with - // audience - // "https://Service_name/API_name" - // will be accepted. For example, if no audiences are in the - // setting, - // LibraryService API will only accept JWTs with the following - // audience - // "https://library-example.googleapis.com/google.example.librar - // y.v1.LibraryService". - // - // Example: - // - // audiences: bookstore_android.apps.googleusercontent.com, - // bookstore_web.apps.googleusercontent.com + // AuthProvider.audiences is implemented and accepted in all the runtime + // components. The list of JWT + // [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-toke + // n-32#section-4.1.3). that are allowed to access. A JWT containing any + // of these audiences will be accepted. When this setting is absent, + // only JWTs with audience "https://Service_name/API_name" will be + // accepted. For example, if no audiences are in the setting, + // LibraryService API will only accept JWTs with the following audience + // "https://library-example.googleapis.com/google.example.library.v1.Libr + // aryService". Example: audiences: + // bookstore_android.apps.googleusercontent.com, + // bookstore_web.apps.googleusercontent.com Audiences string `json:"audiences,omitempty"` - // ProviderId: id from authentication provider. - // - // Example: - // - // provider_id: bookstore_auth + // ProviderId: id from authentication provider. Example: provider_id: + // bookstore_auth ProviderId string `json:"providerId,omitempty"` // ForceSendFields is a list of field names (e.g. "Audiences") to @@ -634,30 +735,20 @@ func (s *AuthRequirement) MarshalJSON() ([]byte, error) { } // Authentication: `Authentication` defines the authentication -// configuration for an API. -// -// Example for an API targeted for external use: -// -// name: calendar.googleapis.com -// authentication: -// providers: -// - id: google_calendar_auth -// jwks_uri: https://www.googleapis.com/oauth2/v1/certs -// issuer: https://securetoken.google.com -// rules: -// - selector: "*" -// requirements: -// provider_id: google_calendar_auth +// configuration for an API. Example for an API targeted for external +// use: name: calendar.googleapis.com authentication: providers: - id: +// google_calendar_auth jwks_uri: +// https://www.googleapis.com/oauth2/v1/certs issuer: +// https://securetoken.google.com rules: - selector: "*" requirements: +// provider_id: google_calendar_auth type Authentication struct { // Providers: Defines a set of authentication providers that a service // supports. Providers []*AuthProvider `json:"providers,omitempty"` // Rules: A list of authentication rules that apply to individual API - // methods. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // methods. **NOTE:** All service configuration rules follow "last one + // wins" order. Rules []*AuthenticationRule `json:"rules,omitempty"` // ForceSendFields is a list of field names (e.g. "Providers") to @@ -683,19 +774,12 @@ func (s *Authentication) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// AuthenticationRule: Authentication rules for the service. -// -// By default, if a method has any authentication requirements, every -// request -// must include a valid credential matching one of the -// requirements. -// It's an error to include more than one kind of credential in a -// single -// request. -// -// If a method doesn't have any auth requirements, request credentials -// will be -// ignored. +// AuthenticationRule: Authentication rules for the service. By default, +// if a method has any authentication requirements, every request must +// include a valid credential matching one of the requirements. It's an +// error to include more than one kind of credential in a single +// request. If a method doesn't have any auth requirements, request +// credentials will be ignored. type AuthenticationRule struct { // AllowWithoutCredential: If true, the service accepts API keys without // any other credential. @@ -707,9 +791,8 @@ type AuthenticationRule struct { // Requirements: Requirements for additional authentication providers. Requirements []*AuthRequirement `json:"requirements,omitempty"` - // Selector: Selects the methods to which this rule applies. - // - // Refer to selector for syntax details. + // Selector: Selects the methods to which this rule applies. Refer to + // selector for syntax details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -740,10 +823,8 @@ func (s *AuthenticationRule) MarshalJSON() ([]byte, error) { // Backend: `Backend` defines the backend configuration for a service. type Backend struct { // Rules: A list of API backend rules that apply to individual API - // methods. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // methods. **NOTE:** All service configuration rules follow "last one + // wins" order. Rules []*BackendRule `json:"rules,omitempty"` // ForceSendFields is a list of field names (e.g. "Rules") to @@ -772,197 +853,81 @@ func (s *Backend) MarshalJSON() ([]byte, error) { // BackendRule: A backend rule provides configuration for an individual // API element. type BackendRule struct { - // Address: The address of the API backend. - // - // The scheme is used to determine the backend protocol and - // security. - // The following schemes are accepted: - // - // SCHEME PROTOCOL SECURITY - // http:// HTTP None - // https:// HTTP TLS - // grpc:// gRPC None - // grpcs:// gRPC TLS - // - // It is recommended to explicitly include a scheme. Leaving out the - // scheme - // may cause constrasting behaviors across platforms. - // - // If the port is unspecified, the default is: - // - 80 for schemes without TLS - // - 443 for schemes with TLS - // - // For HTTP backends, use protocol - // to specify the protocol version. + // Address: The address of the API backend. The scheme is used to + // determine the backend protocol and security. The following schemes + // are accepted: SCHEME PROTOCOL SECURITY http:// HTTP None https:// + // HTTP TLS grpc:// gRPC None grpcs:// gRPC TLS It is recommended to + // explicitly include a scheme. Leaving out the scheme may cause + // constrasting behaviors across platforms. If the port is unspecified, + // the default is: - 80 for schemes without TLS - 443 for schemes with + // TLS For HTTP backends, use protocol to specify the protocol version. Address string `json:"address,omitempty"` // Deadline: The number of seconds to wait for a response from a - // request. The default - // varies based on the request protocol and deployment environment. + // request. The default varies based on the request protocol and + // deployment environment. Deadline float64 `json:"deadline,omitempty"` // DisableAuth: When disable_auth is true, a JWT ID token won't be - // generated and the - // original "Authorization" HTTP header will be preserved. If the header - // is - // used to carry the original token and is expected by the backend, - // this - // field must be set to true to preserve the header. + // generated and the original "Authorization" HTTP header will be + // preserved. If the header is used to carry the original token and is + // expected by the backend, this field must be set to true to preserve + // the header. DisableAuth bool `json:"disableAuth,omitempty"` // JwtAudience: The JWT audience is used when generating a JWT ID token - // for the backend. - // This ID token will be added in the HTTP "authorization" header, and - // sent - // to the backend. + // for the backend. This ID token will be added in the HTTP + // "authorization" header, and sent to the backend. JwtAudience string `json:"jwtAudience,omitempty"` // MinDeadline: Minimum deadline in seconds needed for this method. - // Calls having deadline - // value lower than this will be rejected. + // Calls having deadline value lower than this will be rejected. MinDeadline float64 `json:"minDeadline,omitempty"` // OperationDeadline: The number of seconds to wait for the completion - // of a long running - // operation. The default is no deadline. + // of a long running operation. The default is no deadline. OperationDeadline float64 `json:"operationDeadline,omitempty"` // Possible values: // "PATH_TRANSLATION_UNSPECIFIED" // "CONSTANT_ADDRESS" - Use the backend address as-is, with no - // modification to the path. If the - // URL pattern contains variables, the variable names and values will - // be - // appended to the query string. If a query string parameter and a - // URL - // pattern variable have the same name, this may result in duplicate - // keys in - // the query string. - // - // # Examples - // - // Given the following operation config: - // - // Method path: /api/company/{cid}/user/{uid} - // Backend address: - // https://example.cloudfunctions.net/getUser - // - // Requests to the following request paths will call the backend at - // the - // translated path: - // - // Request path: /api/company/widgetworks/user/johndoe - // Translated: - // - // https://example.cloudfunctions.net/getUser?cid=widgetworks&uid=johndoe - // - // Request path: /api/company/widgetworks/user/johndoe?timezone=EST - // Translated: - // + // modification to the path. If the URL pattern contains variables, the + // variable names and values will be appended to the query string. If a + // query string parameter and a URL pattern variable have the same name, + // this may result in duplicate keys in the query string. # Examples + // Given the following operation config: Method path: + // /api/company/{cid}/user/{uid} Backend address: + // https://example.cloudfunctions.net/getUser Requests to the following + // request paths will call the backend at the translated path: Request + // path: /api/company/widgetworks/user/johndoe Translated: + // https://example.cloudfunctions.net/getUser?cid=widgetworks&uid=johndoe Request path: /api/company/widgetworks/user/johndoe?timezone=EST Translated: // https://example.cloudfunctions.net/getUser?timezone=EST&cid=widgetworks&uid=johndoe // "APPEND_PATH_TO_ADDRESS" - The request path will be appended to the - // backend address. - // - // # Examples - // - // Given the following operation config: - // - // Method path: /api/company/{cid}/user/{uid} - // Backend address: https://example.appspot.com - // - // Requests to the following request paths will call the backend at - // the - // translated path: - // - // Request path: /api/company/widgetworks/user/johndoe - // Translated: - // + // backend address. # Examples Given the following operation config: + // Method path: /api/company/{cid}/user/{uid} Backend address: + // https://example.appspot.com Requests to the following request paths + // will call the backend at the translated path: Request path: + // /api/company/widgetworks/user/johndoe Translated: // https://example.appspot.com/api/company/widgetworks/user/johndoe - // - // Request path: /api/company/widgetworks/user/johndoe?timezone=EST - // Translated: - // + // Request path: /api/company/widgetworks/user/johndoe?timezone=EST + // Translated: // https://example.appspot.com/api/company/widgetworks/user/johndoe?timezone=EST PathTranslation string `json:"pathTranslation,omitempty"` - // Protocol: The protocol used for sending a request to the backend. - // The supported values are "http/1.1" and "h2". - // - // The default value is inferred from the scheme in the - // address field: - // - // SCHEME PROTOCOL - // http:// http/1.1 - // https:// http/1.1 - // grpc:// h2 - // grpcs:// h2 - // - // For secure HTTP backends (https://) that support HTTP/2, set this - // field - // to "h2" for improved performance. - // - // Configuring this field to non-default values is only supported for - // secure - // HTTP backends. This field will be ignored for all other - // backends. - // - // See - // https://www.iana.org/assignments/tls-extensiontype-valu - // es/tls-extensiontype-values.xhtml#alpn-protocol-ids - // for more details on the supported values. + // Protocol: The protocol used for sending a request to the backend. The + // supported values are "http/1.1" and "h2". The default value is + // inferred from the scheme in the address field: SCHEME PROTOCOL + // http:// http/1.1 https:// http/1.1 grpc:// h2 grpcs:// h2 For secure + // HTTP backends (https://) that support HTTP/2, set this field to "h2" + // for improved performance. Configuring this field to non-default + // values is only supported for secure HTTP backends. This field will be + // ignored for all other backends. See + // https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids for more details on the supported + // values. Protocol string `json:"protocol,omitempty"` - // RenameTo: Unimplemented. Do not use. - // - // The new name the selected proto elements should be renamed to. - // - // The package, the service and the method can all be renamed. - // The backend server should implement the renamed proto. However, - // clients - // should call the original method, and ESF routes the traffic to the - // renamed - // method. - // - // HTTP clients should call the URL mapped to the original method. - // gRPC and Stubby clients should call the original method with package - // name. - // - // For legacy reasons, ESF allows Stubby clients to call with the - // short name (without the package name). However, for API - // Versioning(or - // multiple methods mapped to the same short name), all Stubby clients - // must - // call the method's full name with the package name, otherwise the - // first one - // (selector) wins. - // - // If this `rename_to` is specified with a trailing `*`, the `selector` - // must - // be specified with a trailing `*` as well. The all element short - // names - // matched by the `*` in the selector will be kept in the - // `rename_to`. - // - // For example, - // rename_rules: - // - selector: |- - // google.example.library.v1.* - // rename_to: google.example.library.* - // - // The selector matches `google.example.library.v1.Library.CreateShelf` - // and - // `google.example.library.v1.Library.CreateBook`, they will be renamed - // to - // `google.example.library.Library.CreateShelf` - // and - // `google.example.library.Library.CreateBook`. It essentially renames - // the - // proto package name section of the matched proto service and methods. - RenameTo string `json:"renameTo,omitempty"` - - // Selector: Selects the methods to which this rule applies. - // - // Refer to selector for syntax details. + // Selector: Selects the methods to which this rule applies. Refer to + // selector for syntax details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. "Address") to @@ -1006,53 +971,28 @@ func (s *BackendRule) UnmarshalJSON(data []byte) error { return nil } -// Billing: Billing related configuration of the service. -// -// The following example shows how to configure monitored resources and -// metrics -// for billing, `consumer_destinations` is the only supported -// destination and -// the monitored resources need at least one label -// key +// Billing: Billing related configuration of the service. The following +// example shows how to configure monitored resources and metrics for +// billing, `consumer_destinations` is the only supported destination +// and the monitored resources need at least one label key // `cloud.googleapis.com/location` to indicate the location of the -// billing -// usage, using different monitored resources between monitoring and -// billing is -// recommended so they can be evolved independently: -// -// -// monitored_resources: -// - type: library.googleapis.com/billing_branch -// labels: -// - key: cloud.googleapis.com/location -// description: | -// Predefined label to support billing location restriction. -// - key: city -// description: | -// Custom label to define the city where the library branch is -// located -// in. -// - key: name -// description: Custom label to define the name of the library -// branch. -// metrics: -// - name: library.googleapis.com/book/borrowed_count -// metric_kind: DELTA -// value_type: INT64 -// unit: "1" -// billing: -// consumer_destinations: -// - monitored_resource: library.googleapis.com/billing_branch -// metrics: -// - library.googleapis.com/book/borrowed_count +// billing usage, using different monitored resources between monitoring +// and billing is recommended so they can be evolved independently: +// monitored_resources: - type: library.googleapis.com/billing_branch +// labels: - key: cloud.googleapis.com/location description: | +// Predefined label to support billing location restriction. - key: city +// description: | Custom label to define the city where the library +// branch is located in. - key: name description: Custom label to define +// the name of the library branch. metrics: - name: +// library.googleapis.com/book/borrowed_count metric_kind: DELTA +// value_type: INT64 unit: "1" billing: consumer_destinations: - +// monitored_resource: library.googleapis.com/billing_branch metrics: - +// library.googleapis.com/book/borrowed_count type Billing struct { // ConsumerDestinations: Billing configurations for sending metrics to - // the consumer project. - // There can be multiple consumer destinations per service, each one - // must have - // a different monitored resource type. A metric can be used in at - // most - // one consumer destination. + // the consumer project. There can be multiple consumer destinations per + // service, each one must have a different monitored resource type. A + // metric can be used in at most one consumer destination. ConsumerDestinations []*BillingDestination `json:"consumerDestinations,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1081,17 +1021,14 @@ func (s *Billing) MarshalJSON() ([]byte, error) { } // BillingDestination: Configuration of a specific billing destination -// (Currently only support -// bill against consumer project). +// (Currently only support bill against consumer project). type BillingDestination struct { - // Metrics: Names of the metrics to report to this billing - // destination. + // Metrics: Names of the metrics to report to this billing destination. // Each name must be defined in Service.metrics section. Metrics []string `json:"metrics,omitempty"` // MonitoredResource: The monitored resource type. The type must be - // defined in - // Service.monitored_resources section. + // defined in Service.monitored_resources section. MonitoredResource string `json:"monitoredResource,omitempty"` // ForceSendFields is a list of field names (e.g. "Metrics") to @@ -1123,45 +1060,35 @@ type CancelOperationRequest struct { } // Connection: Represents a private connection resource. A private -// connection is implemented -// as a VPC Network Peering connection between a service producer's VPC -// network -// and a service consumer's VPC network. +// connection is implemented as a VPC Network Peering connection between +// a service producer's VPC network and a service consumer's VPC +// network. type Connection struct { // Network: The name of service consumer's VPC network that's connected - // with service - // producer network, in the following - // format: - // `projects/{project}/global/networks/{network}`. - // `{project}` is a project number, such as in `12345` that includes - // the VPC service consumer's VPC network. `{network}` is the name of - // the - // service consumer's VPC network. + // with service producer network, in the following format: + // `projects/{project}/global/networks/{network}`. `{project}` is a + // project number, such as in `12345` that includes the VPC service + // consumer's VPC network. `{network}` is the name of the service + // consumer's VPC network. Network string `json:"network,omitempty"` // Peering: Output only. The name of the VPC Network Peering connection - // that was created by the - // service producer. + // that was created by the service producer. Peering string `json:"peering,omitempty"` // ReservedPeeringRanges: The name of one or more allocated IP address - // ranges for this service - // producer of type `PEERING`. - // Note that invoking CreateConnection method with a different range - // when - // connection is already established will not modify already - // provisioned - // service producer subnetworks. - // If CreateConnection method is invoked repeatedly to reconnect when - // peering - // connection had been disconnected on the consumer side, leaving this - // field - // empty will restore previously allocated IP ranges. + // ranges for this service producer of type `PEERING`. Note that + // invoking CreateConnection method with a different range when + // connection is already established will not modify already provisioned + // service producer subnetworks. If CreateConnection method is invoked + // repeatedly to reconnect when peering connection had been disconnected + // on the consumer side, leaving this field empty will restore + // previously allocated IP ranges. ReservedPeeringRanges []string `json:"reservedPeeringRanges,omitempty"` // Service: Output only. The name of the peering service that's - // associated with this connection, in - // the following format: `services/{service name}`. + // associated with this connection, in the following format: + // `services/{service name}`. Service string `json:"service,omitempty"` // ForceSendFields is a list of field names (e.g. "Network") to @@ -1190,10 +1117,9 @@ func (s *Connection) MarshalJSON() ([]byte, error) { // ConsumerProject: Represents a consumer project. type ConsumerProject struct { // ProjectNum: Required. Project number of the consumer that is - // launching the service instance. It - // can own the network that is peered with Google or, be a service - // project in - // an XPN where the host project has the network. + // launching the service instance. It can own the network that is peered + // with Google or, be a service project in an XPN where the host project + // has the network. ProjectNum int64 `json:"projectNum,omitempty,string"` // ForceSendFields is a list of field names (e.g. "ProjectNum") to @@ -1219,59 +1145,27 @@ func (s *ConsumerProject) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Context: `Context` defines which contexts an API -// requests. -// -// Example: -// -// context: -// rules: -// - selector: "*" -// requested: -// - google.rpc.context.ProjectContext -// - google.rpc.context.OriginContext -// -// The above specifies that all methods in the API -// request -// `google.rpc.context.ProjectContext` -// and -// `google.rpc.context.OriginContext`. -// -// Available context types are defined in -// package -// `google.rpc.context`. -// -// This also provides mechanism to whitelist any protobuf message -// extension that -// can be sent in grpc metadata using -// “x-goog-ext--bin” -// and -// “x-goog-ext--jspb” format. For example, list any -// service -// specific protobuf types that can appear in grpc metadata as follows -// in your -// yaml file: -// -// Example: -// -// context: -// rules: -// - selector: +// Context: `Context` defines which contexts an API requests. Example: +// context: rules: - selector: "*" requested: - +// google.rpc.context.ProjectContext - google.rpc.context.OriginContext +// The above specifies that all methods in the API request +// `google.rpc.context.ProjectContext` and +// `google.rpc.context.OriginContext`. Available context types are +// defined in package `google.rpc.context`. This also provides mechanism +// to whitelist any protobuf message extension that can be sent in grpc +// metadata using “x-goog-ext--bin” and “x-goog-ext--jspb” +// format. For example, list any service specific protobuf types that +// can appear in grpc metadata as follows in your yaml file: Example: +// context: rules: - selector: // "google.example.library.v1.LibraryService.CreateBook" -// allowed_request_extensions: -// - google.foo.v1.NewExtension -// allowed_response_extensions: -// - google.foo.v1.NewExtension -// -// You can also specify extension ID instead of fully qualified -// extension name +// allowed_request_extensions: - google.foo.v1.NewExtension +// allowed_response_extensions: - google.foo.v1.NewExtension You can +// also specify extension ID instead of fully qualified extension name // here. type Context struct { // Rules: A list of RPC context rules that apply to individual API - // methods. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // methods. **NOTE:** All service configuration rules follow "last one + // wins" order. Rules []*ContextRule `json:"rules,omitempty"` // ForceSendFields is a list of field names (e.g. "Rules") to @@ -1298,17 +1192,14 @@ func (s *Context) MarshalJSON() ([]byte, error) { } // ContextRule: A context rule provides information about the context -// for an individual API -// element. +// for an individual API element. type ContextRule struct { // AllowedRequestExtensions: A list of full type names or extension IDs - // of extensions allowed in grpc - // side channel from client to backend. + // of extensions allowed in grpc side channel from client to backend. AllowedRequestExtensions []string `json:"allowedRequestExtensions,omitempty"` // AllowedResponseExtensions: A list of full type names or extension IDs - // of extensions allowed in grpc - // side channel from backend to client. + // of extensions allowed in grpc side channel from backend to client. AllowedResponseExtensions []string `json:"allowedResponseExtensions,omitempty"` // Provided: A list of full type names of provided contexts. @@ -1317,9 +1208,8 @@ type ContextRule struct { // Requested: A list of full type names of requested contexts. Requested []string `json:"requested,omitempty"` - // Selector: Selects the methods to which this rule applies. - // - // Refer to selector for syntax details. + // Selector: Selects the methods to which this rule applies. Refer to + // selector for syntax details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1348,14 +1238,11 @@ func (s *ContextRule) MarshalJSON() ([]byte, error) { } // Control: Selects and configures the service controller used by the -// service. The -// service controller handles features like abuse, quota, billing, -// logging, -// monitoring, etc. +// service. The service controller handles features like abuse, quota, +// billing, logging, monitoring, etc. type Control struct { // Environment: The service control environment to use. If empty, no - // control plane - // feature (like quota and billing) will be enabled. + // control plane feature (like quota and billing) will be enabled. Environment string `json:"environment,omitempty"` // ForceSendFields is a list of field names (e.g. "Environment") to @@ -1381,24 +1268,14 @@ func (s *Control) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// CustomError: Customize service error responses. For example, list -// any service -// specific protobuf types that can appear in error detail lists -// of -// error responses. -// -// Example: -// -// custom_error: -// types: -// - google.foo.v1.CustomError -// - google.foo.v1.AnotherError +// CustomError: Customize service error responses. For example, list any +// service specific protobuf types that can appear in error detail lists +// of error responses. Example: custom_error: types: - +// google.foo.v1.CustomError - google.foo.v1.AnotherError type CustomError struct { // Rules: The list of custom error rules that apply to individual API - // messages. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // messages. **NOTE:** All service configuration rules follow "last one + // wins" order. Rules []*CustomErrorRule `json:"rules,omitempty"` // Types: The list of custom error detail types, e.g. @@ -1431,14 +1308,12 @@ func (s *CustomError) MarshalJSON() ([]byte, error) { // CustomErrorRule: A custom error rule. type CustomErrorRule struct { // IsErrorType: Mark this message as possible payload in error response. - // Otherwise, - // objects of this type will be filtered when they appear in error - // payload. + // Otherwise, objects of this type will be filtered when they appear in + // error payload. IsErrorType bool `json:"isErrorType,omitempty"` - // Selector: Selects messages to which this rule applies. - // - // Refer to selector for syntax details. + // Selector: Selects messages to which this rule applies. Refer to + // selector for syntax details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. "IsErrorType") to @@ -1496,15 +1371,18 @@ func (s *CustomHttpPattern) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DeletePeeredDnsDomainMetadata: Metadata provided through GetOperation +// request for the LRO generated by DeletePeeredDnsDomain API. +type DeletePeeredDnsDomainMetadata struct { +} + // DisableVpcServiceControlsRequest: Request to disable VPC service // controls. type DisableVpcServiceControlsRequest struct { // ConsumerNetwork: Required. The network that the consumer is using to - // connect with services. - // Must be in the form of - // projects/{project}/global/networks/{network} - // {project} is a project number, as in '12345' - // {network} is network name. + // connect with services. Must be in the form of + // projects/{project}/global/networks/{network} {project} is a project + // number, as in '12345' {network} is network name. ConsumerNetwork string `json:"consumerNetwork,omitempty"` // ForceSendFields is a list of field names (e.g. "ConsumerNetwork") to @@ -1531,128 +1409,153 @@ func (s *DisableVpcServiceControlsRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Documentation: `Documentation` provides the information for -// describing a service. -// -// Example: -//
documentation:
-//   summary: >
-//     The Google Calendar API gives access
-//     to most calendar features.
-//   pages:
-//   - name: Overview
-//     content: (== include google/foo/overview.md ==)
-//   - name: Tutorial
-//     content: (== include google/foo/tutorial.md ==)
-//     subpages;
-//     - name: Java
-//       content: (== include google/foo/tutorial_java.md ==)
-//   rules:
-//   - selector: google.calendar.Calendar.Get
-//     description: >
-//       ...
-//   - selector: google.calendar.Calendar.Put
-//     description: >
-//       ...
-// 
-// Documentation is provided in markdown syntax. In addition to -// standard markdown features, definition lists, tables and fenced -// code blocks are supported. Section headers can be provided and -// are -// interpreted relative to the section nesting of the context where -// a documentation fragment is embedded. -// -// Documentation from the IDL is merged with documentation defined -// via the config at normalization time, where documentation provided -// by config rules overrides IDL provided. -// -// A number of constructs specific to the API platform are supported -// in documentation text. -// -// In order to reference a proto element, the following -// notation can be -// used: -//
[fully.qualified.proto.name][]
-// T -// o override the display text used for the link, this can be -// used: -//
[display
-// text][fully.qualified.proto.name]
-// Text can be excluded from doc using the following -// notation: -//
(-- internal comment --)
-// -// A few directives are available in documentation. Note that -// directives must appear on a single line to be properly -// identified. The `include` directive includes a markdown file from -// an external source: -//
(== include path/to/file ==)
-// The `resource_for` directive marks a message to be the resource of -// a collection in REST view. If it is not specified, tools attempt -// to infer the resource from the operations in a -// collection: -//
(== resource_for v1.shelves.books
-// ==)
-// The directive `suppress_warning` does not directly affect -// documentation -// and is documented together with service config validation. -type Documentation struct { - // DocumentationRootUrl: The URL to the root of documentation. - DocumentationRootUrl string `json:"documentationRootUrl,omitempty"` - - // Overview: Declares a single overview page. For - // example: - //
documentation:
-	//   summary: ...
-	//   overview: (== include overview.md ==)
-	// 
- // This is a shortcut for the following declaration (using pages - // style): - //
documentation:
-	//   summary: ...
-	//   pages:
-	//   - name: Overview
-	//     content: (== include overview.md ==)
-	// 
- // Note: you cannot specify both `overview` field and `pages` field. - Overview string `json:"overview,omitempty"` - - // Pages: The top level pages for the documentation set. - Pages []*Page `json:"pages,omitempty"` +// DnsRecordSet: Represents a DNS record set resource. +type DnsRecordSet struct { + // Data: Required. As defined in RFC 1035 (section 5) and RFC 1034 + // (section 3.6.1) for examples see + // https://cloud.google.com/dns/records/json-record. + Data []string `json:"data,omitempty"` - // Rules: A list of documentation rules that apply to individual API - // elements. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. - Rules []*DocumentationRule `json:"rules,omitempty"` + // Domain: Required. The DNS or domain name of the record set, e.g. + // `test.example.com`. + Domain string `json:"domain,omitempty"` - // ServiceRootUrl: Specifies the service root url if the default one - // (the service name - // from the yaml file) is not suitable. This can be seen in any - // fully - // specified service urls as well as sections that show a base that - // other - // urls are relative to. - ServiceRootUrl string `json:"serviceRootUrl,omitempty"` + // Ttl: Required. The period of time for which this RecordSet can be + // cached by resolvers. + Ttl string `json:"ttl,omitempty"` - // Summary: A short summary of what the service does. Can only be - // provided by - // plain text. - Summary string `json:"summary,omitempty"` + // Type: Required. The identifier of a supported record type. + Type string `json:"type,omitempty"` - // ForceSendFields is a list of field names (e.g. - // "DocumentationRootUrl") to unconditionally include in API requests. - // By default, fields with empty values are omitted from API requests. - // However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. + // ForceSendFields is a list of field names (e.g. "Data") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DocumentationRootUrl") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field + // NullFields is a list of field names (e.g. "Data") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DnsRecordSet) MarshalJSON() ([]byte, error) { + type NoMethod DnsRecordSet + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// DnsZone: Represents a DNS zone resource. +type DnsZone struct { + // DnsSuffix: The DNS name suffix of this zone e.g. `example.com.`. + DnsSuffix string `json:"dnsSuffix,omitempty"` + + // Name: User assigned name for this resource. Must be unique within the + // project. The name must be 1-63 characters long, must begin with a + // letter, end with a letter or digit, and only contain lowercase + // letters, digits or dashes. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DnsSuffix") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DnsSuffix") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DnsZone) MarshalJSON() ([]byte, error) { + type NoMethod DnsZone + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Documentation: `Documentation` provides the information for +// describing a service. Example: documentation: summary: > The Google +// Calendar API gives access to most calendar features. pages: - name: +// Overview content: (== include google/foo/overview.md ==) - name: +// Tutorial content: (== include google/foo/tutorial.md ==) subpages; - +// name: Java content: (== include google/foo/tutorial_java.md ==) +// rules: - selector: google.calendar.Calendar.Get description: > ... - +// selector: google.calendar.Calendar.Put description: > ... +// Documentation is provided in markdown syntax. In addition to standard +// markdown features, definition lists, tables and fenced code blocks +// are supported. Section headers can be provided and are interpreted +// relative to the section nesting of the context where a documentation +// fragment is embedded. Documentation from the IDL is merged with +// documentation defined via the config at normalization time, where +// documentation provided by config rules overrides IDL provided. A +// number of constructs specific to the API platform are supported in +// documentation text. In order to reference a proto element, the +// following notation can be used: [fully.qualified.proto.name][] To +// override the display text used for the link, this can be used: +// [display text][fully.qualified.proto.name] Text can be excluded from +// doc using the following notation: (-- internal comment --) A few +// directives are available in documentation. Note that directives must +// appear on a single line to be properly identified. The `include` +// directive includes a markdown file from an external source: (== +// include path/to/file ==) The `resource_for` directive marks a message +// to be the resource of a collection in REST view. If it is not +// specified, tools attempt to infer the resource from the operations in +// a collection: (== resource_for v1.shelves.books ==) The directive +// `suppress_warning` does not directly affect documentation and is +// documented together with service config validation. +type Documentation struct { + // DocumentationRootUrl: The URL to the root of documentation. + DocumentationRootUrl string `json:"documentationRootUrl,omitempty"` + + // Overview: Declares a single overview page. For example: + // documentation: summary: ... overview: (== include overview.md ==) + // This is a shortcut for the following declaration (using pages style): + // documentation: summary: ... pages: - name: Overview content: (== + // include overview.md ==) Note: you cannot specify both `overview` + // field and `pages` field. + Overview string `json:"overview,omitempty"` + + // Pages: The top level pages for the documentation set. + Pages []*Page `json:"pages,omitempty"` + + // Rules: A list of documentation rules that apply to individual API + // elements. **NOTE:** All service configuration rules follow "last one + // wins" order. + Rules []*DocumentationRule `json:"rules,omitempty"` + + // ServiceRootUrl: Specifies the service root url if the default one + // (the service name from the yaml file) is not suitable. This can be + // seen in any fully specified service urls as well as sections that + // show a base that other urls are relative to. + ServiceRootUrl string `json:"serviceRootUrl,omitempty"` + + // Summary: A short summary of what the service does. Can only be + // provided by plain text. + Summary string `json:"summary,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "DocumentationRootUrl") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DocumentationRootUrl") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch @@ -1670,24 +1573,20 @@ func (s *Documentation) MarshalJSON() ([]byte, error) { // individual API elements. type DocumentationRule struct { // DeprecationDescription: Deprecation description of the selected - // element(s). It can be provided if - // an element is marked as `deprecated`. + // element(s). It can be provided if an element is marked as + // `deprecated`. DeprecationDescription string `json:"deprecationDescription,omitempty"` // Description: Description of the selected API(s). Description string `json:"description,omitempty"` // Selector: The selector is a comma-separated list of patterns. Each - // pattern is a - // qualified name of the element which may end in "*", indicating a - // wildcard. - // Wildcards are only allowed at the end and for a whole component of - // the - // qualified name, i.e. "foo.*" is ok, but not "foo.b*" or "foo.*.bar". - // A - // wildcard will match one or more components. To specify a default for - // all - // applicable elements, the whole pattern "*" is used. + // pattern is a qualified name of the element which may end in "*", + // indicating a wildcard. Wildcards are only allowed at the end and for + // a whole component of the qualified name, i.e. "foo.*" is ok, but not + // "foo.b*" or "foo.*.bar". A wildcard will match one or more + // components. To specify a default for all applicable elements, the + // whole pattern "*" is used. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1716,17 +1615,11 @@ func (s *DocumentationRule) MarshalJSON() ([]byte, error) { } // Empty: A generic empty message that you can re-use to avoid defining -// duplicated -// empty messages in your APIs. A typical example is to use it as the -// request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. +// duplicated empty messages in your APIs. A typical example is to use +// it as the request or the response type of an API method. For +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } The JSON representation for `Empty` is +// empty JSON object `{}`. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -1737,11 +1630,9 @@ type Empty struct { // controls. type EnableVpcServiceControlsRequest struct { // ConsumerNetwork: Required. The network that the consumer is using to - // connect with services. - // Must be in the form of - // projects/{project}/global/networks/{network} - // {project} is a project number, as in '12345' - // {network} is network name. + // connect with services. Must be in the form of + // projects/{project}/global/networks/{network} {project} is a project + // number, as in '12345' {network} is network name. ConsumerNetwork string `json:"consumerNetwork,omitempty"` // ForceSendFields is a list of field names (e.g. "ConsumerNetwork") to @@ -1769,64 +1660,38 @@ func (s *EnableVpcServiceControlsRequest) MarshalJSON() ([]byte, error) { } // Endpoint: `Endpoint` describes a network endpoint that serves a set -// of APIs. -// A service may expose any number of endpoints, and all endpoints share -// the -// same service configuration, such as quota configuration and -// monitoring -// configuration. -// -// Example service configuration: -// -// name: library-example.googleapis.com -// endpoints: -// # Below entry makes 'google.example.library.v1.Library' -// # API be served from endpoint address -// library-example.googleapis.com. -// # It also allows HTTP OPTIONS calls to be passed to the -// backend, for -// # it to decide whether the subsequent cross-origin request is -// # allowed to proceed. -// - name: library-example.googleapis.com -// allow_cors: true +// of APIs. A service may expose any number of endpoints, and all +// endpoints share the same service configuration, such as quota +// configuration and monitoring configuration. Example service +// configuration: name: library-example.googleapis.com endpoints: # +// Below entry makes 'google.example.library.v1.Library' # API be served +// from endpoint address library-example.googleapis.com. # It also +// allows HTTP OPTIONS calls to be passed to the backend, for # it to +// decide whether the subsequent cross-origin request is # allowed to +// proceed. - name: library-example.googleapis.com allow_cors: true type Endpoint struct { // Aliases: DEPRECATED: This field is no longer supported. Instead of - // using aliases, - // please specify multiple google.api.Endpoint for each of the - // intended - // aliases. - // - // Additional names that this endpoint will be hosted on. + // using aliases, please specify multiple google.api.Endpoint for each + // of the intended aliases. Additional names that this endpoint will be + // hosted on. Aliases []string `json:"aliases,omitempty"` - // AllowCors: - // Allowing - // [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sh - // aring), aka - // cross-domain traffic, would allow the backends served from this - // endpoint to - // receive and respond to HTTP OPTIONS requests. The response will be - // used by - // the browser to determine whether the subsequent cross-origin request - // is - // allowed to proceed. + // AllowCors: Allowing + // [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), + // aka cross-domain traffic, would allow the backends served from this + // endpoint to receive and respond to HTTP OPTIONS requests. The + // response will be used by the browser to determine whether the + // subsequent cross-origin request is allowed to proceed. AllowCors bool `json:"allowCors,omitempty"` - // Features: The list of features enabled on this endpoint. - Features []string `json:"features,omitempty"` - // Name: The canonical name of this endpoint. Name string `json:"name,omitempty"` // Target: The specification of an Internet routable address of API - // frontend that will - // handle requests to this - // [API + // frontend that will handle requests to this [API // Endpoint](https://cloud.google.com/apis/design/glossary). It should - // be - // either a valid IPv4 address or a fully-qualified domain name. For - // example, - // "8.8.8.8" or "myservice.appspot.com". + // be either a valid IPv4 address or a fully-qualified domain name. For + // example, "8.8.8.8" or "myservice.appspot.com". Target string `json:"target,omitempty"` // ForceSendFields is a list of field names (e.g. "Aliases") to @@ -1980,9 +1845,8 @@ type Field struct { Number int64 `json:"number,omitempty"` // OneofIndex: The index of the field type in `Type.oneofs`, for message - // or enumeration - // types. The first type has index 1; zero means the type is not in the - // list. + // or enumeration types. The first type has index 1; zero means the type + // is not in the list. OneofIndex int64 `json:"oneofIndex,omitempty"` // Options: The protocol buffer options. @@ -1992,8 +1856,8 @@ type Field struct { Packed bool `json:"packed,omitempty"` // TypeUrl: The field type URL, without the scheme, for message or - // enumeration - // types. Example: "type.googleapis.com/google.protobuf.Timestamp". + // enumeration types. Example: + // "type.googleapis.com/google.protobuf.Timestamp". TypeUrl string `json:"typeUrl,omitempty"` // ForceSendFields is a list of field names (e.g. "Cardinality") to @@ -2020,26 +1884,22 @@ func (s *Field) MarshalJSON() ([]byte, error) { } // GoogleCloudServicenetworkingV1betaSubnetwork: Represents a subnet -// that was created or discovered by a private access -// management service. +// that was created or discovered by a private access management +// service. type GoogleCloudServicenetworkingV1betaSubnetwork struct { // IpCidrRange: Subnetwork CIDR range in `10.x.x.x/y` format. IpCidrRange string `json:"ipCidrRange,omitempty"` - // Name: Subnetwork name. - // See https://cloud.google.com/compute/docs/vpc/ + // Name: Subnetwork name. See https://cloud.google.com/compute/docs/vpc/ Name string `json:"name,omitempty"` // Network: In the Shared VPC host project, the VPC network that's - // peered with the - // consumer network. For - // example: + // peered with the consumer network. For example: // `projects/1234321/global/networks/host-network` Network string `json:"network,omitempty"` // OutsideAllocation: This is a discovered subnet that is not within the - // current consumer - // allocated ranges. + // current consumer allocated ranges. OutsideAllocation bool `json:"outsideAllocation,omitempty"` // ForceSendFields is a list of field names (e.g. "IpCidrRange") to @@ -2066,26 +1926,19 @@ func (s *GoogleCloudServicenetworkingV1betaSubnetwork) MarshalJSON() ([]byte, er } // Http: Defines the HTTP configuration for an API service. It contains -// a list of -// HttpRule, each specifying the mapping of an RPC method -// to one or more HTTP REST API methods. +// a list of HttpRule, each specifying the mapping of an RPC method to +// one or more HTTP REST API methods. type Http struct { // FullyDecodeReservedExpansion: When set to true, URL path parameters - // will be fully URI-decoded except in - // cases of single segment matches in reserved expansion, where "%2F" - // will be - // left encoded. - // - // The default behavior is to not decode RFC 6570 reserved characters in - // multi + // will be fully URI-decoded except in cases of single segment matches + // in reserved expansion, where "%2F" will be left encoded. The default + // behavior is to not decode RFC 6570 reserved characters in multi // segment matches. FullyDecodeReservedExpansion bool `json:"fullyDecodeReservedExpansion,omitempty"` // Rules: A list of HTTP configuration rules that apply to individual - // API methods. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // API methods. **NOTE:** All service configuration rules follow "last + // one wins" order. Rules []*HttpRule `json:"rules,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -2113,403 +1966,187 @@ func (s *Http) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// HttpRule: # gRPC Transcoding -// -// gRPC Transcoding is a feature for mapping between a gRPC method and -// one or -// more HTTP REST endpoints. It allows developers to build a single API -// service -// that supports both gRPC APIs and REST APIs. Many systems, including -// [Google -// APIs](https://github.com/googleapis/googleapis), -// [Cloud Endpoints](https://cloud.google.com/endpoints), -// [gRPC -// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), -// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this -// feature -// and use it for large scale production services. -// -// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping -// specifies +// HttpRule: # gRPC Transcoding gRPC Transcoding is a feature for +// mapping between a gRPC method and one or more HTTP REST endpoints. It +// allows developers to build a single API service that supports both +// gRPC APIs and REST APIs. Many systems, including [Google +// APIs](https://github.com/googleapis/googleapis), [Cloud +// Endpoints](https://cloud.google.com/endpoints), [gRPC +// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), and +// [Envoy](https://github.com/envoyproxy/envoy) proxy support this +// feature and use it for large scale production services. `HttpRule` +// defines the schema of the gRPC/REST mapping. The mapping specifies // how different portions of the gRPC request message are mapped to the -// URL -// path, URL query parameters, and HTTP request body. It also controls -// how the -// gRPC response message is mapped to the HTTP response body. `HttpRule` -// is -// typically specified as an `google.api.http` annotation on the gRPC -// method. -// -// Each mapping specifies a URL path template and an HTTP method. The -// path -// template may refer to one or more fields in the gRPC request message, -// as long -// as each field is a non-repeated field with a primitive (non-message) -// type. -// The path template controls how fields of the request message are -// mapped to -// the URL path. -// -// Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/{name=messages/*}" -// }; -// } -// } -// message GetMessageRequest { -// string name = 1; // Mapped to URL path. -// } -// message Message { -// string text = 1; // The resource content. -// } -// -// This enables an HTTP REST to gRPC mapping as below: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(name: -// "messages/123456")` -// -// Any fields in the request message which are not bound by the path -// template -// automatically become HTTP query parameters if there is no HTTP -// request body. -// For example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get:"/v1/messages/{message_id}" -// }; -// } -// } -// message GetMessageRequest { -// message SubMessage { -// string subfield = 1; -// } -// string message_id = 1; // Mapped to URL path. -// int64 revision = 2; // Mapped to URL query parameter -// `revision`. -// SubMessage sub = 3; // Mapped to URL query parameter -// `sub.subfield`. -// } -// -// This enables a HTTP JSON to RPC mapping as below: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456?revision=2&sub.subfield=foo` -// | +// URL path, URL query parameters, and HTTP request body. It also +// controls how the gRPC response message is mapped to the HTTP response +// body. `HttpRule` is typically specified as an `google.api.http` +// annotation on the gRPC method. Each mapping specifies a URL path +// template and an HTTP method. The path template may refer to one or +// more fields in the gRPC request message, as long as each field is a +// non-repeated field with a primitive (non-message) type. The path +// template controls how fields of the request message are mapped to the +// URL path. Example: service Messaging { rpc +// GetMessage(GetMessageRequest) returns (Message) { option +// (google.api.http) = { get: "/v1/{name=messages/*}" }; } } message +// GetMessageRequest { string name = 1; // Mapped to URL path. } message +// Message { string text = 1; // The resource content. } This enables an +// HTTP REST to gRPC mapping as below: HTTP | gRPC -----|----- `GET +// /v1/messages/123456` | `GetMessage(name: "messages/123456")` Any +// fields in the request message which are not bound by the path +// template automatically become HTTP query parameters if there is no +// HTTP request body. For example: service Messaging { rpc +// GetMessage(GetMessageRequest) returns (Message) { option +// (google.api.http) = { get:"/v1/messages/{message_id}" }; } } message +// GetMessageRequest { message SubMessage { string subfield = 1; } +// string message_id = 1; // Mapped to URL path. int64 revision = 2; // +// Mapped to URL query parameter `revision`. SubMessage sub = 3; // +// Mapped to URL query parameter `sub.subfield`. } This enables a HTTP +// JSON to RPC mapping as below: HTTP | gRPC -----|----- `GET +// /v1/messages/123456?revision=2&sub.subfield=foo` | // `GetMessage(message_id: "123456" revision: 2 sub: -// SubMessage(subfield: -// "foo"))` -// -// Note that fields which are mapped to URL query parameters must have -// a -// primitive type or a repeated primitive type or a non-repeated message -// type. -// In the case of a repeated type, the parameter can be repeated in the -// URL -// as `...?param=A¶m=B`. In the case of a message type, each field -// of the -// message is mapped to a separate parameter, such -// as -// `...?foo.a=A&foo.b=B&foo.c=C`. -// -// For HTTP methods that allow a request body, the `body` -// field -// specifies the mapping. Consider a REST update method on the -// message resource collection: -// -// service Messaging { -// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "message" -// }; -// } -// } -// message UpdateMessageRequest { -// string message_id = 1; // mapped to the URL -// Message message = 2; // mapped to the body -// } -// -// The following HTTP JSON to RPC mapping is enabled, where -// the -// representation of the JSON in the request body is determined -// by -// protos JSON encoding: -// -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | -// `UpdateMessage(message_id: -// "123456" message { text: "Hi!" })` -// -// The special name `*` can be used in the body mapping to define -// that -// every field not bound by the path template should be mapped to -// the -// request body. This enables the following alternative definition -// of -// the update method: -// -// service Messaging { -// rpc UpdateMessage(Message) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "*" -// }; -// } -// } -// message Message { -// string message_id = 1; -// string text = 2; -// } -// -// -// The following HTTP JSON to RPC mapping is enabled: -// -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | -// `UpdateMessage(message_id: -// "123456" text: "Hi!")` -// -// Note that when using `*` in the body mapping, it is not possible -// to -// have HTTP parameters, as all fields not bound by the path end in -// the body. This makes this option more rarely used in practice -// when -// defining REST APIs. The common usage of `*` is in custom -// methods -// which don't use the URL at all for transferring data. -// -// It is possible to define multiple HTTP methods for one RPC by -// using -// the `additional_bindings` option. Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/messages/{message_id}" -// additional_bindings { -// get: "/v1/users/{user_id}/messages/{message_id}" -// } -// }; -// } -// } -// message GetMessageRequest { -// string message_id = 1; -// string user_id = 2; -// } -// -// This enables the following two alternative HTTP JSON to RPC -// mappings: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" -// message_id: -// "123456")` -// -// ## Rules for HTTP mapping -// -// 1. Leaf request fields (recursive expansion nested messages in the -// request -// message) are classified into three categories: -// - Fields referred by the path template. They are passed via the -// URL path. -// - Fields referred by the HttpRule.body. They are passed via the -// HTTP -// request body. -// - All other fields are passed via the URL query parameters, and -// the -// parameter name is the field path in the request message. A -// repeated -// field can be represented as multiple query parameters under the -// same -// name. -// 2. If HttpRule.body is "*", there is no URL query parameter, all -// fields -// are passed via URL path and HTTP request body. -// 3. If HttpRule.body is omitted, there is no HTTP request body, all -// fields are passed via URL path and URL query parameters. -// -// ### Path template syntax -// -// Template = "/" Segments [ Verb ] ; -// Segments = Segment { "/" Segment } ; -// Segment = "*" | "**" | LITERAL | Variable ; -// Variable = "{" FieldPath [ "=" Segments ] "}" ; -// FieldPath = IDENT { "." IDENT } ; -// Verb = ":" LITERAL ; -// -// The syntax `*` matches a single URL path segment. The syntax `**` -// matches -// zero or more URL path segments, which must be the last part of the -// URL path -// except the `Verb`. -// -// The syntax `Variable` matches part of the URL path as specified by -// its -// template. A variable template must not contain other variables. If a -// variable -// matches a single path segment, its template may be omitted, e.g. -// `{var}` -// is equivalent to `{var=*}`. -// -// The syntax `LITERAL` matches literal text in the URL path. If the -// `LITERAL` -// contains any reserved character, such characters should be -// percent-encoded -// before the matching. -// -// If a variable contains exactly one path segment, such as "{var}" -// or -// "{var=*}", when such a variable is expanded into a URL path on the -// client -// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. -// The -// server side does the reverse decoding. Such variables show up in -// the -// [Discovery -// Document](https://developers.google.com/discovery/v1/re -// ference/apis) as -// `{var}`. -// -// If a variable contains multiple path segments, such as -// "{var=foo/*}" -// or "{var=**}", when such a variable is expanded into a URL path on -// the -// client side, all characters except `[-_.~/0-9a-zA-Z]` are -// percent-encoded. -// The server side does the reverse decoding, except "%2F" and "%2f" are -// left -// unchanged. Such variables show up in -// the +// SubMessage(subfield: "foo"))` Note that fields which are mapped to +// URL query parameters must have a primitive type or a repeated +// primitive type or a non-repeated message type. In the case of a +// repeated type, the parameter can be repeated in the URL as +// `...?param=A¶m=B`. In the case of a message type, each field of +// the message is mapped to a separate parameter, such as +// `...?foo.a=A&foo.b=B&foo.c=C`. For HTTP methods that allow a request +// body, the `body` field specifies the mapping. Consider a REST update +// method on the message resource collection: service Messaging { rpc +// UpdateMessage(UpdateMessageRequest) returns (Message) { option +// (google.api.http) = { patch: "/v1/messages/{message_id}" body: +// "message" }; } } message UpdateMessageRequest { string message_id = +// 1; // mapped to the URL Message message = 2; // mapped to the body } +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: HTTP | gRPC -----|----- `PATCH +// /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" message { text: "Hi!" })` The special name `*` can be used +// in the body mapping to define that every field not bound by the path +// template should be mapped to the request body. This enables the +// following alternative definition of the update method: service +// Messaging { rpc UpdateMessage(Message) returns (Message) { option +// (google.api.http) = { patch: "/v1/messages/{message_id}" body: "*" }; +// } } message Message { string message_id = 1; string text = 2; } The +// following HTTP JSON to RPC mapping is enabled: HTTP | gRPC +// -----|----- `PATCH /v1/messages/123456 { "text": "Hi!" }` | +// `UpdateMessage(message_id: "123456" text: "Hi!")` Note that when +// using `*` in the body mapping, it is not possible to have HTTP +// parameters, as all fields not bound by the path end in the body. This +// makes this option more rarely used in practice when defining REST +// APIs. The common usage of `*` is in custom methods which don't use +// the URL at all for transferring data. It is possible to define +// multiple HTTP methods for one RPC by using the `additional_bindings` +// option. Example: service Messaging { rpc +// GetMessage(GetMessageRequest) returns (Message) { option +// (google.api.http) = { get: "/v1/messages/{message_id}" +// additional_bindings { get: +// "/v1/users/{user_id}/messages/{message_id}" } }; } } message +// GetMessageRequest { string message_id = 1; string user_id = 2; } This +// enables the following two alternative HTTP JSON to RPC mappings: HTTP +// | gRPC -----|----- `GET /v1/messages/123456` | +// `GetMessage(message_id: "123456")` `GET /v1/users/me/messages/123456` +// | `GetMessage(user_id: "me" message_id: "123456")` ## Rules for HTTP +// mapping 1. Leaf request fields (recursive expansion nested messages +// in the request message) are classified into three categories: - +// Fields referred by the path template. They are passed via the URL +// path. - Fields referred by the HttpRule.body. They are passed via the +// HTTP request body. - All other fields are passed via the URL query +// parameters, and the parameter name is the field path in the request +// message. A repeated field can be represented as multiple query +// parameters under the same name. 2. If HttpRule.body is "*", there is +// no URL query parameter, all fields are passed via URL path and HTTP +// request body. 3. If HttpRule.body is omitted, there is no HTTP +// request body, all fields are passed via URL path and URL query +// parameters. ### Path template syntax Template = "/" Segments [ Verb ] +// ; Segments = Segment { "/" Segment } ; Segment = "*" | "**" | LITERAL +// | Variable ; Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; Verb = ":" LITERAL ; The syntax `*` +// matches a single URL path segment. The syntax `**` matches zero or +// more URL path segments, which must be the last part of the URL path +// except the `Verb`. The syntax `Variable` matches part of the URL path +// as specified by its template. A variable template must not contain +// other variables. If a variable matches a single path segment, its +// template may be omitted, e.g. `{var}` is equivalent to `{var=*}`. The +// syntax `LITERAL` matches literal text in the URL path. If the +// `LITERAL` contains any reserved character, such characters should be +// percent-encoded before the matching. If a variable contains exactly +// one path segment, such as "{var}" or "{var=*}", when such a +// variable is expanded into a URL path on the client side, all +// characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The server +// side does the reverse decoding. Such variables show up in the // [Discovery -// Document](https://developers.google.com/discovery/v1/re -// ference/apis) as -// `{+var}`. -// -// ## Using gRPC API Service Configuration -// -// gRPC API Service Configuration (service config) is a configuration -// language -// for configuring a gRPC service to become a user-facing product. -// The +// Document](https://developers.google.com/discovery/v1/reference/apis) +// as `{var}`. If a variable contains multiple path segments, such as +// "{var=foo/*}" or "{var=**}", when such a variable is expanded +// into a URL path on the client side, all characters except +// `[-_.~/0-9a-zA-Z]` are percent-encoded. The server side does the +// reverse decoding, except "%2F" and "%2f" are left unchanged. Such +// variables show up in the [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) +// as `{+var}`. ## Using gRPC API Service Configuration gRPC API Service +// Configuration (service config) is a configuration language for +// configuring a gRPC service to become a user-facing product. The // service config is simply the YAML representation of the -// `google.api.Service` -// proto message. -// -// As an alternative to annotating your proto file, you can configure -// gRPC -// transcoding in your service config YAML files. You do this by -// specifying a -// `HttpRule` that maps the gRPC method to a REST endpoint, achieving -// the same -// effect as the proto annotation. This can be particularly useful if -// you -// have a proto that is reused in multiple services. Note that any -// transcoding +// `google.api.Service` proto message. As an alternative to annotating +// your proto file, you can configure gRPC transcoding in your service +// config YAML files. You do this by specifying a `HttpRule` that maps +// the gRPC method to a REST endpoint, achieving the same effect as the +// proto annotation. This can be particularly useful if you have a proto +// that is reused in multiple services. Note that any transcoding // specified in the service config will override any matching -// transcoding -// configuration in the proto. -// -// Example: -// -// http: -// rules: -// # Selects a gRPC method and applies HttpRule to it. -// - selector: example.v1.Messaging.GetMessage -// get: /v1/messages/{message_id}/{sub.subfield} -// -// ## Special notes -// -// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, -// the -// proto to JSON conversion must follow the -// [proto3 -// specification](https://developers.google.com/protocol-buffers/ -// docs/proto3#json). -// -// While the single segment variable follows the semantics of +// transcoding configuration in the proto. Example: http: rules: # +// Selects a gRPC method and applies HttpRule to it. - selector: +// example.v1.Messaging.GetMessage get: +// /v1/messages/{message_id}/{sub.subfield} ## Special notes When gRPC +// Transcoding is used to map a gRPC to JSON REST endpoints, the proto +// to JSON conversion must follow the [proto3 +// specification](https://developers.google.com/protocol-buffers/docs/pro +// to3#json). While the single segment variable follows the semantics of // [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple -// String -// Expansion, the multi segment variable **does not** follow RFC 6570 -// Section -// 3.2.3 Reserved Expansion. The reason is that the Reserved -// Expansion -// does not expand special characters like `?` and `#`, which would -// lead -// to invalid URLs. As the result, gRPC Transcoding uses a custom -// encoding -// for multi segment variables. -// -// The path variables **must not** refer to any repeated or mapped -// field, +// String Expansion, the multi segment variable **does not** follow RFC +// 6570 Section 3.2.3 Reserved Expansion. The reason is that the +// Reserved Expansion does not expand special characters like `?` and +// `#`, which would lead to invalid URLs. As the result, gRPC +// Transcoding uses a custom encoding for multi segment variables. The +// path variables **must not** refer to any repeated or mapped field, // because client libraries are not capable of handling such variable -// expansion. -// -// The path variables **must not** capture the leading "/" character. -// The reason -// is that the most common use case "{var}" does not capture the leading -// "/" -// character. For consistency, all path variables must share the same -// behavior. -// -// Repeated message fields must not be mapped to URL query parameters, -// because -// no client library can support such complicated mapping. -// -// If an API needs to use a JSON array for request or response body, it -// can map -// the request or response body to a repeated field. However, some -// gRPC -// Transcoding implementations may not support this feature. +// expansion. The path variables **must not** capture the leading "/" +// character. The reason is that the most common use case "{var}" does +// not capture the leading "/" character. For consistency, all path +// variables must share the same behavior. Repeated message fields must +// not be mapped to URL query parameters, because no client library can +// support such complicated mapping. If an API needs to use a JSON array +// for request or response body, it can map the request or response body +// to a repeated field. However, some gRPC Transcoding implementations +// may not support this feature. type HttpRule struct { // AdditionalBindings: Additional HTTP bindings for the selector. Nested - // bindings must - // not contain an `additional_bindings` field themselves (that is, - // the nesting may only be one level deep). + // bindings must not contain an `additional_bindings` field themselves + // (that is, the nesting may only be one level deep). AdditionalBindings []*HttpRule `json:"additionalBindings,omitempty"` // AllowHalfDuplex: When this flag is set to true, HTTP requests will be - // allowed to invoke a - // half-duplex streaming method. + // allowed to invoke a half-duplex streaming method. AllowHalfDuplex bool `json:"allowHalfDuplex,omitempty"` // Body: The name of the request field whose value is mapped to the HTTP - // request - // body, or `*` for mapping all request fields not captured by the - // path - // pattern to the HTTP body, or omitted for not having any HTTP request - // body. - // - // NOTE: the referred field must be present at the top-level of the - // request - // message type. + // request body, or `*` for mapping all request fields not captured by + // the path pattern to the HTTP body, or omitted for not having any HTTP + // request body. NOTE: the referred field must be present at the + // top-level of the request message type. Body string `json:"body,omitempty"` // Custom: The custom pattern is used for specifying an HTTP method that - // is not - // included in the `pattern` field, such as HEAD, or "*" to leave - // the - // HTTP method unspecified for this rule. The wild-card rule is - // useful - // for services that provide content to Web (HTML) clients. + // is not included in the `pattern` field, such as HEAD, or "*" to leave + // the HTTP method unspecified for this rule. The wild-card rule is + // useful for services that provide content to Web (HTML) clients. Custom *CustomHttpPattern `json:"custom,omitempty"` // Delete: Maps to HTTP DELETE. Used for deleting a resource. Delete string `json:"delete,omitempty"` - // Get: Maps to HTTP GET. Used for listing and getting information - // about + // Get: Maps to HTTP GET. Used for listing and getting information about // resources. Get string `json:"get,omitempty"` @@ -2524,19 +2161,13 @@ type HttpRule struct { Put string `json:"put,omitempty"` // ResponseBody: Optional. The name of the response field whose value is - // mapped to the HTTP - // response body. When omitted, the entire response message will be - // used - // as the HTTP response body. - // - // NOTE: The referred field must be present at the top-level of the - // response - // message type. + // mapped to the HTTP response body. When omitted, the entire response + // message will be used as the HTTP response body. NOTE: The referred + // field must be present at the top-level of the response message type. ResponseBody string `json:"responseBody,omitempty"` - // Selector: Selects a method to which this rule applies. - // - // Refer to selector for syntax details. + // Selector: Selects a method to which this rule applies. Refer to + // selector for syntax details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. "AdditionalBindings") @@ -2572,16 +2203,11 @@ type JwtLocation struct { Query string `json:"query,omitempty"` // ValuePrefix: The value prefix. The value format is - // "value_prefix{token}" - // Only applies to "in" header type. Must be empty for "in" query - // type. - // If not empty, the header value has to match (case sensitive) this - // prefix. - // If not matched, JWT will not be extracted. If matched, JWT will - // be - // extracted after the prefix is removed. - // - // For example, for "Authorization: Bearer {JWT}", + // "value_prefix{token}" Only applies to "in" header type. Must be empty + // for "in" query type. If not empty, the header value has to match + // (case sensitive) this prefix. If not matched, JWT will not be + // extracted. If matched, JWT will be extracted after the prefix is + // removed. For example, for "Authorization: Bearer {JWT}", // value_prefix="Bearer " with a space at the end. ValuePrefix string `json:"valuePrefix,omitempty"` @@ -2648,8 +2274,7 @@ func (s *LabelDescriptor) MarshalJSON() ([]byte, error) { } // ListConnectionsResponse: ListConnectionsResponse is the response to -// list peering states for the -// given service and consumer project. +// list peering states for the given service and consumer project. type ListConnectionsResponse struct { // Connections: The list of Connections. Connections []*Connection `json:"connections,omitempty"` @@ -2718,39 +2343,63 @@ func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// LogDescriptor: A description of a log type. Example in YAML format: -// -// - name: library.googleapis.com/activity_history -// description: The history of borrowing and returning library -// items. -// display_name: Activity -// labels: -// - key: /customer_id -// description: Identifier of a library customer +// ListPeeredDnsDomainsResponse: Response to list peered DNS domains for +// a given connection. +type ListPeeredDnsDomainsResponse struct { + // PeeredDnsDomains: The list of peered DNS domains. + PeeredDnsDomains []*PeeredDnsDomain `json:"peeredDnsDomains,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "PeeredDnsDomains") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PeeredDnsDomains") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ListPeeredDnsDomainsResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListPeeredDnsDomainsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// LogDescriptor: A description of a log type. Example in YAML format: - +// name: library.googleapis.com/activity_history description: The +// history of borrowing and returning library items. display_name: +// Activity labels: - key: /customer_id description: Identifier of a +// library customer type LogDescriptor struct { // Description: A human-readable description of this log. This - // information appears in - // the documentation and can contain details. + // information appears in the documentation and can contain details. Description string `json:"description,omitempty"` // DisplayName: The human-readable name for this log. This information - // appears on - // the user interface and should be concise. + // appears on the user interface and should be concise. DisplayName string `json:"displayName,omitempty"` // Labels: The set of labels that are available to describe a specific - // log entry. - // Runtime requests that contain labels not specified here - // are - // considered invalid. + // log entry. Runtime requests that contain labels not specified here + // are considered invalid. Labels []*LabelDescriptor `json:"labels,omitempty"` // Name: The name of the log. It must be less than 512 characters long - // and can - // include the following characters: upper- and lower-case - // alphanumeric - // characters [A-Za-z0-9], and punctuation characters including - // slash, underscore, hyphen, period [/_-.]. + // and can include the following characters: upper- and lower-case + // alphanumeric characters [A-Za-z0-9], and punctuation characters + // including slash, underscore, hyphen, period [/_-.]. Name string `json:"name,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -2776,54 +2425,30 @@ func (s *LogDescriptor) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Logging: Logging configuration of the service. -// -// The following example shows how to configure logs to be sent to -// the -// producer and consumer projects. In the example, the -// `activity_history` -// log is sent to both the producer and consumer projects, whereas -// the -// `purchase_history` log is only sent to the producer project. -// -// monitored_resources: -// - type: library.googleapis.com/branch -// labels: -// - key: /city -// description: The city where the library branch is located -// in. -// - key: /name -// description: The name of the branch. -// logs: -// - name: activity_history -// labels: -// - key: /customer_id -// - name: purchase_history -// logging: -// producer_destinations: -// - monitored_resource: library.googleapis.com/branch -// logs: -// - activity_history -// - purchase_history -// consumer_destinations: -// - monitored_resource: library.googleapis.com/branch -// logs: -// - activity_history +// Logging: Logging configuration of the service. The following example +// shows how to configure logs to be sent to the producer and consumer +// projects. In the example, the `activity_history` log is sent to both +// the producer and consumer projects, whereas the `purchase_history` +// log is only sent to the producer project. monitored_resources: - +// type: library.googleapis.com/branch labels: - key: /city description: +// The city where the library branch is located in. - key: /name +// description: The name of the branch. logs: - name: activity_history +// labels: - key: /customer_id - name: purchase_history logging: +// producer_destinations: - monitored_resource: +// library.googleapis.com/branch logs: - activity_history - +// purchase_history consumer_destinations: - monitored_resource: +// library.googleapis.com/branch logs: - activity_history type Logging struct { // ConsumerDestinations: Logging configurations for sending logs to the - // consumer project. - // There can be multiple consumer destinations, each one must have - // a - // different monitored resource type. A log can be used in at most - // one consumer destination. + // consumer project. There can be multiple consumer destinations, each + // one must have a different monitored resource type. A log can be used + // in at most one consumer destination. ConsumerDestinations []*LoggingDestination `json:"consumerDestinations,omitempty"` // ProducerDestinations: Logging configurations for sending logs to the - // producer project. - // There can be multiple producer destinations, each one must have - // a - // different monitored resource type. A log can be used in at most - // one producer destination. + // producer project. There can be multiple producer destinations, each + // one must have a different monitored resource type. A log can be used + // in at most one producer destination. ProducerDestinations []*LoggingDestination `json:"producerDestinations,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -2852,19 +2477,16 @@ func (s *Logging) MarshalJSON() ([]byte, error) { } // LoggingDestination: Configuration of a specific logging destination -// (the producer project -// or the consumer project). +// (the producer project or the consumer project). type LoggingDestination struct { // Logs: Names of the logs to be sent to this destination. Each name - // must - // be defined in the Service.logs section. If the log name is - // not a domain scoped name, it will be automatically prefixed with - // the service name followed by "/". + // must be defined in the Service.logs section. If the log name is not a + // domain scoped name, it will be automatically prefixed with the + // service name followed by "/". Logs []string `json:"logs,omitempty"` // MonitoredResource: The monitored resource type. The type must be - // defined in the - // Service.monitored_resources section. + // defined in the Service.monitored_resources section. MonitoredResource string `json:"monitoredResource,omitempty"` // ForceSendFields is a list of field names (e.g. "Logs") to @@ -2941,32 +2563,26 @@ func (s *Method) MarshalJSON() ([]byte, error) { } // MetricDescriptor: Defines a metric type and its schema. Once a metric -// descriptor is created, -// deleting or altering it stops data collection and makes the metric -// type's -// existing data unusable. +// descriptor is created, deleting or altering it stops data collection +// and makes the metric type's existing data unusable. type MetricDescriptor struct { // Description: A detailed description of the metric, which can be used // in documentation. Description string `json:"description,omitempty"` // DisplayName: A concise name for the metric, which can be displayed in - // user interfaces. - // Use sentence case without an ending period, for example "Request - // count". - // This field is optional but it is recommended to be set for any - // metrics - // associated with user-visible concepts, such as Quota. + // user interfaces. Use sentence case without an ending period, for + // example "Request count". This field is optional but it is recommended + // to be set for any metrics associated with user-visible concepts, such + // as Quota. DisplayName string `json:"displayName,omitempty"` - // Labels: The set of labels that can be used to describe a - // specific - // instance of this metric type. For example, - // the - // `appengine.googleapis.com/http/server/response_latencies` metric - // type has a label for the HTTP response code, `response_code`, so - // you can look at latencies for successful responses or just - // for responses that failed. + // Labels: The set of labels that can be used to describe a specific + // instance of this metric type. For example, the + // `appengine.googleapis.com/http/server/response_latencies` metric type + // has a label for the HTTP response code, `response_code`, so you can + // look at latencies for successful responses or just for responses that + // failed. Labels []*LabelDescriptor `json:"labels,omitempty"` // LaunchStage: Optional. The launch stage of the metric definition. @@ -2978,50 +2594,31 @@ type MetricDescriptor struct { // "PRELAUNCH" - Prelaunch features are hidden from users and are only // visible internally. // "EARLY_ACCESS" - Early Access features are limited to a closed - // group of testers. To use - // these features, you must sign up in advance and sign a Trusted - // Tester - // agreement (which includes confidentiality provisions). These features - // may - // be unstable, changed in backward-incompatible ways, and are - // not - // guaranteed to be released. + // group of testers. To use these features, you must sign up in advance + // and sign a Trusted Tester agreement (which includes confidentiality + // provisions). These features may be unstable, changed in + // backward-incompatible ways, and are not guaranteed to be released. // "ALPHA" - Alpha is a limited availability test for releases before - // they are cleared - // for widespread use. By Alpha, all significant design issues are - // resolved - // and we are in the process of verifying functionality. Alpha - // customers - // need to apply for access, agree to applicable terms, and have - // their - // projects whitelisted. Alpha releases don’t have to be feature - // complete, - // no SLAs are provided, and there are no technical support obligations, - // but - // they will be far enough along that customers can actually use them - // in - // test environments or for limited-use tests -- just like they would - // in - // normal production cases. + // they are cleared for widespread use. By Alpha, all significant design + // issues are resolved and we are in the process of verifying + // functionality. Alpha customers need to apply for access, agree to + // applicable terms, and have their projects whitelisted. Alpha releases + // don’t have to be feature complete, no SLAs are provided, and there + // are no technical support obligations, but they will be far enough + // along that customers can actually use them in test environments or + // for limited-use tests -- just like they would in normal production + // cases. // "BETA" - Beta is the point at which we are ready to open a release - // for any - // customer to use. There are no SLA or technical support obligations in - // a - // Beta release. Products will be complete from a feature perspective, - // but - // may have some open outstanding issues. Beta releases are suitable - // for - // limited production use cases. + // for any customer to use. There are no SLA or technical support + // obligations in a Beta release. Products will be complete from a + // feature perspective, but may have some open outstanding issues. Beta + // releases are suitable for limited production use cases. // "GA" - GA features are open to all developers and are considered - // stable and - // fully qualified for production use. + // stable and fully qualified for production use. // "DEPRECATED" - Deprecated features are scheduled to be shut down - // and removed. For more - // information, see the “Deprecation Policy” section of our [Terms - // of - // Service](https://cloud.google.com/terms/) - // and the [Google Cloud Platform Subject to the - // Deprecation + // and removed. For more information, see the “Deprecation Policy” + // section of our [Terms of Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation // Policy](https://cloud.google.com/terms/deprecation) documentation. LaunchStage string `json:"launchStage,omitempty"` @@ -3030,188 +2627,106 @@ type MetricDescriptor struct { Metadata *MetricDescriptorMetadata `json:"metadata,omitempty"` // MetricKind: Whether the metric records instantaneous values, changes - // to a value, etc. - // Some combinations of `metric_kind` and `value_type` might not be - // supported. + // to a value, etc. Some combinations of `metric_kind` and `value_type` + // might not be supported. // // Possible values: // "METRIC_KIND_UNSPECIFIED" - Do not use this default value. // "GAUGE" - An instantaneous measurement of a value. // "DELTA" - The change in a value during a time interval. - // "CUMULATIVE" - A value accumulated over a time interval. - // Cumulative - // measurements in a time series should have the same start time - // and increasing end times, until an event resets the cumulative - // value to zero and sets a new start time for the following - // points. + // "CUMULATIVE" - A value accumulated over a time interval. Cumulative + // measurements in a time series should have the same start time and + // increasing end times, until an event resets the cumulative value to + // zero and sets a new start time for the following points. MetricKind string `json:"metricKind,omitempty"` - // MonitoredResourceTypes: Read-only. If present, then a time - // series, which is identified partially by - // a metric type and a MonitoredResourceDescriptor, that is - // associated - // with this metric type can only be associated with one of the - // monitored - // resource types listed here. + // MonitoredResourceTypes: Read-only. If present, then a time series, + // which is identified partially by a metric type and a + // MonitoredResourceDescriptor, that is associated with this metric type + // can only be associated with one of the monitored resource types + // listed here. MonitoredResourceTypes []string `json:"monitoredResourceTypes,omitempty"` // Name: The resource name of the metric descriptor. Name string `json:"name,omitempty"` - // Type: The metric type, including its DNS name prefix. The type is - // not - // URL-encoded. All user-defined metric types have the DNS - // name - // `custom.googleapis.com` or `external.googleapis.com`. Metric types - // should - // use a natural hierarchical grouping. For example: - // - // "custom.googleapis.com/invoice/paid/amount" - // "external.googleapis.com/prometheus/up" - // "appengine.googleapis.com/http/server/response_latencies" + // Type: The metric type, including its DNS name prefix. The type is not + // URL-encoded. All user-defined metric types have the DNS name + // `custom.googleapis.com` or `external.googleapis.com`. Metric types + // should use a natural hierarchical grouping. For example: + // "custom.googleapis.com/invoice/paid/amount" + // "external.googleapis.com/prometheus/up" + // "appengine.googleapis.com/http/server/response_latencies" Type string `json:"type,omitempty"` // Unit: The units in which the metric value is reported. It is only - // applicable - // if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The - // `unit` - // defines the representation of the stored metric values. - // - // Different systems may scale the values to be more easily displayed - // (so a - // value of `0.02KBy` _might_ be displayed as `20By`, and a value - // of - // `3523KBy` _might_ be displayed as `3.5MBy`). However, if the `unit` - // is - // `KBy`, then the value of the metric is always in thousands of bytes, - // no - // matter how it may be displayed.. - // - // If you want a custom metric to record the exact number of CPU-seconds - // used - // by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` - // is - // `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses - // 12,005 - // CPU-seconds, then the value is written as `12005`. - // - // Alternatively, if you want a custom metric to record data in a - // more + // applicable if the `value_type` is `INT64`, `DOUBLE`, or + // `DISTRIBUTION`. The `unit` defines the representation of the stored + // metric values. Different systems may scale the values to be more + // easily displayed (so a value of `0.02KBy` _might_ be displayed as + // `20By`, and a value of `3523KBy` _might_ be displayed as `3.5MBy`). + // However, if the `unit` is `KBy`, then the value of the metric is + // always in thousands of bytes, no matter how it may be displayed.. If + // you want a custom metric to record the exact number of CPU-seconds + // used by a job, you can create an `INT64 CUMULATIVE` metric whose + // `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the + // job uses 12,005 CPU-seconds, then the value is written as `12005`. + // Alternatively, if you want a custom metric to record data in a more // granular way, you can create a `DOUBLE CUMULATIVE` metric whose - // `unit` is - // `ks{CPU}`, and then write the value `12.005` (which is - // `12005/1000`), - // or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). - // - // The supported units are a subset of [The Unified Code for Units - // of - // Measure](http://unitsofmeasure.org/ucum.html) standard: - // - // **Basic units (UNIT)** - // - // * `bit` bit - // * `By` byte - // * `s` second - // * `min` minute - // * `h` hour - // * `d` day - // - // **Prefixes (PREFIX)** - // - // * `k` kilo (10^3) - // * `M` mega (10^6) - // * `G` giga (10^9) - // * `T` tera (10^12) - // * `P` peta (10^15) - // * `E` exa (10^18) - // * `Z` zetta (10^21) - // * `Y` yotta (10^24) - // - // * `m` milli (10^-3) - // * `u` micro (10^-6) - // * `n` nano (10^-9) - // * `p` pico (10^-12) - // * `f` femto (10^-15) - // * `a` atto (10^-18) - // * `z` zepto (10^-21) - // * `y` yocto (10^-24) - // - // * `Ki` kibi (2^10) - // * `Mi` mebi (2^20) - // * `Gi` gibi (2^30) - // * `Ti` tebi (2^40) - // * `Pi` pebi (2^50) - // - // **Grammar** - // - // The grammar also includes these connectors: - // - // * `/` division or ratio (as an infix operator). For examples, - // `kBy/{email}` or `MiBy/10ms` (although you should almost - // never - // have `/s` in a metric `unit`; rates should always be - // computed at - // query time from the underlying cumulative or delta value). - // * `.` multiplication or composition (as an infix operator). For - // examples, `GBy.d` or `k{watt}.h`. - // - // The grammar for a unit is as follows: - // - // Expression = Component { "." Component } { "/" Component } ; - // - // Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] - // | Annotation - // | "1" - // ; - // - // Annotation = "{" NAME "}" ; - // - // Notes: - // - // * `Annotation` is just a comment if it follows a `UNIT`. If the - // annotation - // is used alone, then the unit is equivalent to `1`. For examples, - // `{request}/s == 1/s`, `By{transmitted}/s == By/s`. - // * `NAME` is a sequence of non-blank printable ASCII characters not - // containing `{` or `}`. - // * `1` represents a unitary [dimensionless - // unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, - // such - // as in `1/s`. It is typically used when none of the basic units - // are - // appropriate. For example, "new users per day" can be represented - // as - // `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 - // new - // users). Alternatively, "thousands of page views per day" would be - // represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a - // metric - // value of `5.3` would mean "5300 page views per day"). - // * `%` represents dimensionless value of 1/100, and annotates values - // giving - // a percentage (so the metric values are typically in the range of - // 0..100, - // and a metric value `3` means "3 percent"). - // * `10^2.%` indicates a metric contains a ratio, typically in the - // range - // 0..1, that will be multiplied by 100 and displayed as a - // percentage - // (so a metric value `0.03` means "3 percent"). + // `unit` is `ks{CPU}`, and then write the value `12.005` (which is + // `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is + // `12005/1024`). The supported units are a subset of [The Unified Code + // for Units of Measure](http://unitsofmeasure.org/ucum.html) standard: + // **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` + // minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** + // * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera + // (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * + // `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano + // (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) + // * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` + // mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) + // **Grammar** The grammar also includes these connectors: * `/` + // division or ratio (as an infix operator). For examples, `kBy/{email}` + // or `MiBy/10ms` (although you should almost never have `/s` in a + // metric `unit`; rates should always be computed at query time from the + // underlying cumulative or delta value). * `.` multiplication or + // composition (as an infix operator). For examples, `GBy.d` or + // `k{watt}.h`. The grammar for a unit is as follows: Expression = + // Component { "." Component } { "/" Component } ; Component = ( [ + // PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; Annotation + // = "{" NAME "}" ; Notes: * `Annotation` is just a comment if it + // follows a `UNIT`. If the annotation is used alone, then the unit is + // equivalent to `1`. For examples, `{request}/s == 1/s`, + // `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank + // printable ASCII characters not containing `{` or `}`. * `1` + // represents a unitary [dimensionless + // unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, + // such as in `1/s`. It is typically used when none of the basic units + // are appropriate. For example, "new users per day" can be represented + // as `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 new + // users). Alternatively, "thousands of page views per day" would be + // represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric + // value of `5.3` would mean "5300 page views per day"). * `%` + // represents dimensionless value of 1/100, and annotates values giving + // a percentage (so the metric values are typically in the range of + // 0..100, and a metric value `3` means "3 percent"). * `10^2.%` + // indicates a metric contains a ratio, typically in the range 0..1, + // that will be multiplied by 100 and displayed as a percentage (so a + // metric value `0.03` means "3 percent"). Unit string `json:"unit,omitempty"` // ValueType: Whether the measurement is an integer, a floating-point - // number, etc. - // Some combinations of `metric_kind` and `value_type` might not be - // supported. + // number, etc. Some combinations of `metric_kind` and `value_type` + // might not be supported. // // Possible values: // "VALUE_TYPE_UNSPECIFIED" - Do not use this default value. - // "BOOL" - The value is a boolean. - // This value type can be used only if the metric kind is `GAUGE`. + // "BOOL" - The value is a boolean. This value type can be used only + // if the metric kind is `GAUGE`. // "INT64" - The value is a signed 64-bit integer. // "DOUBLE" - The value is a double precision floating point number. - // "STRING" - The value is a text string. - // This value type can be used only if the metric kind is `GAUGE`. + // "STRING" - The value is a text string. This value type can be used + // only if the metric kind is `GAUGE`. // "DISTRIBUTION" - The value is a `Distribution`. // "MONEY" - The value is money. ValueType string `json:"valueType,omitempty"` @@ -3243,10 +2758,8 @@ func (s *MetricDescriptor) MarshalJSON() ([]byte, error) { // guide the usage of a metric. type MetricDescriptorMetadata struct { // IngestDelay: The delay of data points caused by ingestion. Data - // points older than this - // age are guaranteed to be ingested and available to be read, - // excluding - // data loss due to errors. + // points older than this age are guaranteed to be ingested and + // available to be read, excluding data loss due to errors. IngestDelay string `json:"ingestDelay,omitempty"` // LaunchStage: Deprecated. Must use the MetricDescriptor.launch_stage @@ -3259,60 +2772,38 @@ type MetricDescriptorMetadata struct { // "PRELAUNCH" - Prelaunch features are hidden from users and are only // visible internally. // "EARLY_ACCESS" - Early Access features are limited to a closed - // group of testers. To use - // these features, you must sign up in advance and sign a Trusted - // Tester - // agreement (which includes confidentiality provisions). These features - // may - // be unstable, changed in backward-incompatible ways, and are - // not - // guaranteed to be released. + // group of testers. To use these features, you must sign up in advance + // and sign a Trusted Tester agreement (which includes confidentiality + // provisions). These features may be unstable, changed in + // backward-incompatible ways, and are not guaranteed to be released. // "ALPHA" - Alpha is a limited availability test for releases before - // they are cleared - // for widespread use. By Alpha, all significant design issues are - // resolved - // and we are in the process of verifying functionality. Alpha - // customers - // need to apply for access, agree to applicable terms, and have - // their - // projects whitelisted. Alpha releases don’t have to be feature - // complete, - // no SLAs are provided, and there are no technical support obligations, - // but - // they will be far enough along that customers can actually use them - // in - // test environments or for limited-use tests -- just like they would - // in - // normal production cases. + // they are cleared for widespread use. By Alpha, all significant design + // issues are resolved and we are in the process of verifying + // functionality. Alpha customers need to apply for access, agree to + // applicable terms, and have their projects whitelisted. Alpha releases + // don’t have to be feature complete, no SLAs are provided, and there + // are no technical support obligations, but they will be far enough + // along that customers can actually use them in test environments or + // for limited-use tests -- just like they would in normal production + // cases. // "BETA" - Beta is the point at which we are ready to open a release - // for any - // customer to use. There are no SLA or technical support obligations in - // a - // Beta release. Products will be complete from a feature perspective, - // but - // may have some open outstanding issues. Beta releases are suitable - // for - // limited production use cases. + // for any customer to use. There are no SLA or technical support + // obligations in a Beta release. Products will be complete from a + // feature perspective, but may have some open outstanding issues. Beta + // releases are suitable for limited production use cases. // "GA" - GA features are open to all developers and are considered - // stable and - // fully qualified for production use. + // stable and fully qualified for production use. // "DEPRECATED" - Deprecated features are scheduled to be shut down - // and removed. For more - // information, see the “Deprecation Policy” section of our [Terms - // of - // Service](https://cloud.google.com/terms/) - // and the [Google Cloud Platform Subject to the - // Deprecation + // and removed. For more information, see the “Deprecation Policy” + // section of our [Terms of Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation // Policy](https://cloud.google.com/terms/deprecation) documentation. LaunchStage string `json:"launchStage,omitempty"` // SamplePeriod: The sampling period of metric data points. For metrics - // which are written - // periodically, consecutive data points are stored at this time - // interval, - // excluding data loss due to errors. Metrics with a higher granularity - // have - // a smaller sampling period. + // which are written periodically, consecutive data points are stored at + // this time interval, excluding data loss due to errors. Metrics with a + // higher granularity have a smaller sampling period. SamplePeriod string `json:"samplePeriod,omitempty"` // ForceSendFields is a list of field names (e.g. "IngestDelay") to @@ -3339,23 +2830,18 @@ func (s *MetricDescriptorMetadata) MarshalJSON() ([]byte, error) { } // MetricRule: Bind API methods to metrics. Binding a method to a metric -// causes that -// metric's configured quota behaviors to apply to the method call. +// causes that metric's configured quota behaviors to apply to the +// method call. type MetricRule struct { // MetricCosts: Metrics to update when the selected methods are called, - // and the associated - // cost applied to each metric. - // - // The key of the map is the metric name, and the values are the - // amount - // increased for the metric against which the quota limits are - // defined. - // The value must not be negative. + // and the associated cost applied to each metric. The key of the map is + // the metric name, and the values are the amount increased for the + // metric against which the quota limits are defined. The value must not + // be negative. MetricCosts map[string]string `json:"metricCosts,omitempty"` - // Selector: Selects the methods to which this rule applies. - // - // Refer to selector for syntax details. + // Selector: Selects the methods to which this rule applies. Refer to + // selector for syntax details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. "MetricCosts") to @@ -3382,96 +2868,44 @@ func (s *MetricRule) MarshalJSON() ([]byte, error) { } // Mixin: Declares an API Interface to be included in this interface. -// The including -// interface must redeclare all the methods from the included interface, -// but -// documentation and options are inherited as follows: -// -// - If after comment and whitespace stripping, the documentation -// string of the redeclared method is empty, it will be inherited -// from the original method. -// -// - Each annotation belonging to the service config (http, -// visibility) which is not set in the redeclared method will be -// inherited. -// -// - If an http annotation is inherited, the path pattern will be -// modified as follows. Any version prefix will be replaced by the -// version of the including interface plus the root path if -// specified. -// -// Example of a simple mixin: -// -// package google.acl.v1; -// service AccessControl { -// // Get the underlying ACL object. -// rpc GetAcl(GetAclRequest) returns (Acl) { -// option (google.api.http).get = "/v1/{resource=**}:getAcl"; -// } -// } -// -// package google.storage.v2; -// service Storage { -// // rpc GetAcl(GetAclRequest) returns (Acl); -// -// // Get a data record. -// rpc GetData(GetDataRequest) returns (Data) { -// option (google.api.http).get = "/v2/{resource=**}"; -// } -// } -// -// Example of a mixin configuration: -// -// apis: -// - name: google.storage.v2.Storage -// mixins: -// - name: google.acl.v1.AccessControl -// -// The mixin construct implies that all methods in `AccessControl` -// are -// also declared with same name and request/response types in -// `Storage`. A documentation generator or annotation processor will -// see the effective `Storage.GetAcl` method after -// inherting -// documentation and annotations as follows: -// -// service Storage { -// // Get the underlying ACL object. -// rpc GetAcl(GetAclRequest) returns (Acl) { -// option (google.api.http).get = "/v2/{resource=**}:getAcl"; -// } -// ... -// } -// -// Note how the version in the path pattern changed from `v1` to -// `v2`. -// -// If the `root` field in the mixin is specified, it should be -// a -// relative path under which inherited HTTP paths are placed. Example: -// -// apis: -// - name: google.storage.v2.Storage -// mixins: -// - name: google.acl.v1.AccessControl -// root: acls -// -// This implies the following inherited HTTP annotation: -// -// service Storage { -// // Get the underlying ACL object. -// rpc GetAcl(GetAclRequest) returns (Acl) { -// option (google.api.http).get = -// "/v2/acls/{resource=**}:getAcl"; -// } -// ... -// } +// The including interface must redeclare all the methods from the +// included interface, but documentation and options are inherited as +// follows: - If after comment and whitespace stripping, the +// documentation string of the redeclared method is empty, it will be +// inherited from the original method. - Each annotation belonging to +// the service config (http, visibility) which is not set in the +// redeclared method will be inherited. - If an http annotation is +// inherited, the path pattern will be modified as follows. Any version +// prefix will be replaced by the version of the including interface +// plus the root path if specified. Example of a simple mixin: package +// google.acl.v1; service AccessControl { // Get the underlying ACL +// object. rpc GetAcl(GetAclRequest) returns (Acl) { option +// (google.api.http).get = "/v1/{resource=**}:getAcl"; } } package +// google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) +// returns (Acl); // Get a data record. rpc GetData(GetDataRequest) +// returns (Data) { option (google.api.http).get = "/v2/{resource=**}"; +// } } Example of a mixin configuration: apis: - name: +// google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl +// The mixin construct implies that all methods in `AccessControl` are +// also declared with same name and request/response types in `Storage`. +// A documentation generator or annotation processor will see the +// effective `Storage.GetAcl` method after inheriting documentation and +// annotations as follows: service Storage { // Get the underlying ACL +// object. rpc GetAcl(GetAclRequest) returns (Acl) { option +// (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how +// the version in the path pattern changed from `v1` to `v2`. If the +// `root` field in the mixin is specified, it should be a relative path +// under which inherited HTTP paths are placed. Example: apis: - name: +// google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl +// root: acls This implies the following inherited HTTP annotation: +// service Storage { // Get the underlying ACL object. rpc +// GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = +// "/v2/acls/{resource=**}:getAcl"; } ... } type Mixin struct { // Name: The fully qualified name of the interface which is included. Name string `json:"name,omitempty"` - // Root: If non-empty specifies a path under which inherited HTTP - // paths + // Root: If non-empty specifies a path under which inherited HTTP paths // are rooted. Root string `json:"root,omitempty"` @@ -3499,39 +2933,28 @@ func (s *Mixin) MarshalJSON() ([]byte, error) { } // MonitoredResourceDescriptor: An object that describes the schema of a -// MonitoredResource object using a -// type name and a set of labels. For example, the monitored -// resource -// descriptor for Google Compute Engine VM instances has a type -// of -// "gce_instance" and specifies the use of the labels "instance_id" -// and -// "zone" to identify particular VM instances. -// -// Different APIs can support different monitored resource types. APIs -// generally -// provide a `list` method that returns the monitored resource -// descriptors used -// by the API. +// MonitoredResource object using a type name and a set of labels. For +// example, the monitored resource descriptor for Google Compute Engine +// VM instances has a type of "gce_instance" and specifies the use of +// the labels "instance_id" and "zone" to identify particular VM +// instances. Different APIs can support different monitored resource +// types. APIs generally provide a `list` method that returns the +// monitored resource descriptors used by the API. type MonitoredResourceDescriptor struct { // Description: Optional. A detailed description of the monitored - // resource type that might - // be used in documentation. + // resource type that might be used in documentation. Description string `json:"description,omitempty"` // DisplayName: Optional. A concise name for the monitored resource type - // that might be - // displayed in user interfaces. It should be a Title Cased Noun - // Phrase, - // without any article or other determiners. For example, - // "Google Cloud SQL Database". + // that might be displayed in user interfaces. It should be a Title + // Cased Noun Phrase, without any article or other determiners. For + // example, "Google Cloud SQL Database". DisplayName string `json:"displayName,omitempty"` // Labels: Required. A set of labels used to describe instances of this - // monitored - // resource type. For example, an individual Google Cloud SQL database - // is - // identified by values for the labels "database_id" and "zone". + // monitored resource type. For example, an individual Google Cloud SQL + // database is identified by values for the labels "database_id" and + // "zone". Labels []*LabelDescriptor `json:"labels,omitempty"` // LaunchStage: Optional. The launch stage of the monitored resource @@ -3544,70 +2967,45 @@ type MonitoredResourceDescriptor struct { // "PRELAUNCH" - Prelaunch features are hidden from users and are only // visible internally. // "EARLY_ACCESS" - Early Access features are limited to a closed - // group of testers. To use - // these features, you must sign up in advance and sign a Trusted - // Tester - // agreement (which includes confidentiality provisions). These features - // may - // be unstable, changed in backward-incompatible ways, and are - // not - // guaranteed to be released. + // group of testers. To use these features, you must sign up in advance + // and sign a Trusted Tester agreement (which includes confidentiality + // provisions). These features may be unstable, changed in + // backward-incompatible ways, and are not guaranteed to be released. // "ALPHA" - Alpha is a limited availability test for releases before - // they are cleared - // for widespread use. By Alpha, all significant design issues are - // resolved - // and we are in the process of verifying functionality. Alpha - // customers - // need to apply for access, agree to applicable terms, and have - // their - // projects whitelisted. Alpha releases don’t have to be feature - // complete, - // no SLAs are provided, and there are no technical support obligations, - // but - // they will be far enough along that customers can actually use them - // in - // test environments or for limited-use tests -- just like they would - // in - // normal production cases. + // they are cleared for widespread use. By Alpha, all significant design + // issues are resolved and we are in the process of verifying + // functionality. Alpha customers need to apply for access, agree to + // applicable terms, and have their projects whitelisted. Alpha releases + // don’t have to be feature complete, no SLAs are provided, and there + // are no technical support obligations, but they will be far enough + // along that customers can actually use them in test environments or + // for limited-use tests -- just like they would in normal production + // cases. // "BETA" - Beta is the point at which we are ready to open a release - // for any - // customer to use. There are no SLA or technical support obligations in - // a - // Beta release. Products will be complete from a feature perspective, - // but - // may have some open outstanding issues. Beta releases are suitable - // for - // limited production use cases. + // for any customer to use. There are no SLA or technical support + // obligations in a Beta release. Products will be complete from a + // feature perspective, but may have some open outstanding issues. Beta + // releases are suitable for limited production use cases. // "GA" - GA features are open to all developers and are considered - // stable and - // fully qualified for production use. + // stable and fully qualified for production use. // "DEPRECATED" - Deprecated features are scheduled to be shut down - // and removed. For more - // information, see the “Deprecation Policy” section of our [Terms - // of - // Service](https://cloud.google.com/terms/) - // and the [Google Cloud Platform Subject to the - // Deprecation + // and removed. For more information, see the “Deprecation Policy” + // section of our [Terms of Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation // Policy](https://cloud.google.com/terms/deprecation) documentation. LaunchStage string `json:"launchStage,omitempty"` // Name: Optional. The resource name of the monitored resource // descriptor: - // "projects/{project_id}/monitoredResourceDescriptors/{type - // }" where - // {type} is the value of the `type` field in this object - // and - // {project_id} is a project ID that provides API-specific context - // for - // accessing the type. APIs that do not use project information can use - // the - // resource name format "monitoredResourceDescriptors/{type}". + // "projects/{project_id}/monitoredResourceDescriptors/{type}" where + // {type} is the value of the `type` field in this object and + // {project_id} is a project ID that provides API-specific context for + // accessing the type. APIs that do not use project information can use + // the resource name format "monitoredResourceDescriptors/{type}". Name string `json:"name,omitempty"` - // Type: Required. The monitored resource type. For example, the - // type + // Type: Required. The monitored resource type. For example, the type // "cloudsql_database" represents databases in Google Cloud SQL. - // The maximum length of this value is 256 characters. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -3633,74 +3031,49 @@ func (s *MonitoredResourceDescriptor) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Monitoring: Monitoring configuration of the service. -// -// The example below shows how to configure monitored resources and -// metrics -// for monitoring. In the example, a monitored resource and two metrics -// are +// Monitoring: Monitoring configuration of the service. The example +// below shows how to configure monitored resources and metrics for +// monitoring. In the example, a monitored resource and two metrics are // defined. The `library.googleapis.com/book/returned_count` metric is -// sent -// to both producer and consumer projects, whereas -// the -// `library.googleapis.com/book/overdue_count` metric is only sent to -// the -// consumer project. -// -// monitored_resources: -// - type: library.googleapis.com/branch -// labels: -// - key: /city -// description: The city where the library branch is located -// in. -// - key: /name -// description: The name of the branch. -// metrics: -// - name: library.googleapis.com/book/returned_count -// metric_kind: DELTA -// value_type: INT64 -// labels: -// - key: /customer_id -// - name: library.googleapis.com/book/overdue_count -// metric_kind: GAUGE -// value_type: INT64 -// labels: -// - key: /customer_id -// monitoring: -// producer_destinations: -// - monitored_resource: library.googleapis.com/branch -// metrics: -// - library.googleapis.com/book/returned_count -// consumer_destinations: -// - monitored_resource: library.googleapis.com/branch -// metrics: -// - library.googleapis.com/book/returned_count -// - library.googleapis.com/book/overdue_count +// sent to both producer and consumer projects, whereas the +// `library.googleapis.com/book/num_overdue` metric is only sent to the +// consumer project. monitored_resources: - type: +// library.googleapis.com/Branch display_name: "Library Branch" +// description: "A branch of a library." launch_stage: GA labels: - key: +// resource_container description: "The Cloud container (ie. project id) +// for the Branch." - key: location description: "The location of the +// library branch." - key: branch_id description: "The id of the +// branch." metrics: - name: library.googleapis.com/book/returned_count +// display_name: "Books Returned" description: "The count of books that +// have been returned." launch_stage: GA metric_kind: DELTA value_type: +// INT64 unit: "1" labels: - key: customer_id description: "The id of +// the customer." - name: library.googleapis.com/book/num_overdue +// display_name: "Books Overdue" description: "The current number of +// overdue books." launch_stage: GA metric_kind: GAUGE value_type: INT64 +// unit: "1" labels: - key: customer_id description: "The id of the +// customer." monitoring: producer_destinations: - monitored_resource: +// library.googleapis.com/Branch metrics: - +// library.googleapis.com/book/returned_count consumer_destinations: - +// monitored_resource: library.googleapis.com/Branch metrics: - +// library.googleapis.com/book/returned_count - +// library.googleapis.com/book/num_overdue type Monitoring struct { // ConsumerDestinations: Monitoring configurations for sending metrics - // to the consumer project. - // There can be multiple consumer destinations. A monitored resouce type - // may - // appear in multiple monitoring destinations if different aggregations - // are - // needed for different sets of metrics associated with that - // monitored - // resource type. A monitored resource and metric pair may only be used - // once - // in the Monitoring configuration. + // to the consumer project. There can be multiple consumer destinations. + // A monitored resource type may appear in multiple monitoring + // destinations if different aggregations are needed for different sets + // of metrics associated with that monitored resource type. A monitored + // resource and metric pair may only be used once in the Monitoring + // configuration. ConsumerDestinations []*MonitoringDestination `json:"consumerDestinations,omitempty"` // ProducerDestinations: Monitoring configurations for sending metrics - // to the producer project. - // There can be multiple producer destinations. A monitored resouce type - // may - // appear in multiple monitoring destinations if different aggregations - // are - // needed for different sets of metrics associated with that - // monitored - // resource type. A monitored resource and metric pair may only be used - // once - // in the Monitoring configuration. + // to the producer project. There can be multiple producer destinations. + // A monitored resource type may appear in multiple monitoring + // destinations if different aggregations are needed for different sets + // of metrics associated with that monitored resource type. A monitored + // resource and metric pair may only be used once in the Monitoring + // configuration. ProducerDestinations []*MonitoringDestination `json:"producerDestinations,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -3729,17 +3102,14 @@ func (s *Monitoring) MarshalJSON() ([]byte, error) { } // MonitoringDestination: Configuration of a specific monitoring -// destination (the producer project -// or the consumer project). +// destination (the producer project or the consumer project). type MonitoringDestination struct { // Metrics: Types of the metrics to report to this monitoring - // destination. - // Each type must be defined in Service.metrics section. + // destination. Each type must be defined in Service.metrics section. Metrics []string `json:"metrics,omitempty"` // MonitoredResource: The monitored resource type. The type must be - // defined in - // Service.monitored_resources section. + // defined in Service.monitored_resources section. MonitoredResource string `json:"monitoredResource,omitempty"` // ForceSendFields is a list of field names (e.g. "Metrics") to @@ -3766,43 +3136,26 @@ func (s *MonitoringDestination) MarshalJSON() ([]byte, error) { } // OAuthRequirements: OAuth scopes are a way to define data and -// permissions on data. For example, -// there are scopes defined for "Read-only access to Google Calendar" -// and -// "Access to Cloud Platform". Users can consent to a scope for an -// application, -// giving it permission to access that data on their behalf. -// -// OAuth scope specifications should be fairly coarse grained; a user -// will need -// to see and understand the text description of what your scope -// means. -// -// In most cases: use one or at most two OAuth scopes for an entire -// family of +// permissions on data. For example, there are scopes defined for +// "Read-only access to Google Calendar" and "Access to Cloud Platform". +// Users can consent to a scope for an application, giving it permission +// to access that data on their behalf. OAuth scope specifications +// should be fairly coarse grained; a user will need to see and +// understand the text description of what your scope means. In most +// cases: use one or at most two OAuth scopes for an entire family of // products. If your product has multiple APIs, you should probably be -// sharing -// the OAuth scope across all of those APIs. -// -// When you need finer grained OAuth consent screens: talk with your -// product -// management about how developers will use them in practice. -// -// Please note that even though each of the canonical scopes is enough -// for a -// request to be accepted and passed to the backend, a request can still -// fail -// due to the backend requiring additional scopes or permissions. +// sharing the OAuth scope across all of those APIs. When you need finer +// grained OAuth consent screens: talk with your product management +// about how developers will use them in practice. Please note that even +// though each of the canonical scopes is enough for a request to be +// accepted and passed to the backend, a request can still fail due to +// the backend requiring additional scopes or permissions. type OAuthRequirements struct { // CanonicalScopes: The list of publicly documented OAuth scopes that - // are allowed access. An - // OAuth token containing any of these scopes will be - // accepted. - // - // Example: - // - // canonical_scopes: https://www.googleapis.com/auth/calendar, - // https://www.googleapis.com/auth/calendar.read + // are allowed access. An OAuth token containing any of these scopes + // will be accepted. Example: canonical_scopes: + // https://www.googleapis.com/auth/calendar, + // https://www.googleapis.com/auth/calendar.read CanonicalScopes string `json:"canonicalScopes,omitempty"` // ForceSendFields is a list of field names (e.g. "CanonicalScopes") to @@ -3830,52 +3183,38 @@ func (s *OAuthRequirements) MarshalJSON() ([]byte, error) { } // Operation: This resource represents a long-running operation that is -// the result of a -// network API call. +// the result of a network API call. type Operation struct { // Done: If the value is `false`, it means the operation is still in - // progress. - // If `true`, the operation is completed, and either `error` or - // `response` is - // available. + // progress. If `true`, the operation is completed, and either `error` + // or `response` is available. Done bool `json:"done,omitempty"` // Error: The error result of the operation in case of failure or // cancellation. Error *Status `json:"error,omitempty"` - // Metadata: Service-specific metadata associated with the operation. - // It typically - // contains progress information and common metadata such as create - // time. - // Some services might not provide such metadata. Any method that - // returns a - // long-running operation should document the metadata type, if any. + // Metadata: Service-specific metadata associated with the operation. It + // typically contains progress information and common metadata such as + // create time. Some services might not provide such metadata. Any + // method that returns a long-running operation should document the + // metadata type, if any. Metadata googleapi.RawMessage `json:"metadata,omitempty"` // Name: The server-assigned name, which is only unique within the same - // service that - // originally returns it. If you use the default HTTP mapping, - // the - // `name` should be a resource name ending with + // service that originally returns it. If you use the default HTTP + // mapping, the `name` should be a resource name ending with // `operations/{unique_id}`. Name string `json:"name,omitempty"` - // Response: The normal response of the operation in case of success. - // If the original - // method returns no data on success, such as `Delete`, the response - // is - // `google.protobuf.Empty`. If the original method is - // standard - // `Get`/`Create`/`Update`, the response should be the resource. For - // other - // methods, the response should have the type `XxxResponse`, where - // `Xxx` - // is the original method name. For example, if the original method - // name - // is `TakeSnapshot()`, the inferred response type - // is - // `TakeSnapshotResponse`. + // Response: The normal response of the operation in case of success. If + // the original method returns no data on success, such as `Delete`, the + // response is `google.protobuf.Empty`. If the original method is + // standard `Get`/`Create`/`Update`, the response should be the + // resource. For other methods, the response should have the type + // `XxxResponse`, where `Xxx` is the original method name. For example, + // if the original method name is `TakeSnapshot()`, the inferred + // response type is `TakeSnapshotResponse`. Response googleapi.RawMessage `json:"response,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -3906,25 +3245,19 @@ func (s *Operation) MarshalJSON() ([]byte, error) { } // Option: A protocol buffer option, which can be attached to a message, -// field, -// enumeration, etc. +// field, enumeration, etc. type Option struct { // Name: The option's name. For protobuf built-in options (options - // defined in - // descriptor.proto), this is the short name. For example, - // "map_entry". - // For custom options, it should be the fully-qualified name. For - // example, - // "google.api.http". + // defined in descriptor.proto), this is the short name. For example, + // "map_entry". For custom options, it should be the fully-qualified + // name. For example, "google.api.http". Name string `json:"name,omitempty"` // Value: The option's value packed in an Any message. If the value is a - // primitive, - // the corresponding wrapper type defined in - // google/protobuf/wrappers.proto - // should be used. If the value is an enum, it should be stored as an - // int32 - // value using the google.protobuf.Int32Value type. + // primitive, the corresponding wrapper type defined in + // google/protobuf/wrappers.proto should be used. If the value is an + // enum, it should be stored as an int32 value using the + // google.protobuf.Int32Value type. Value googleapi.RawMessage `json:"value,omitempty"` // ForceSendFields is a list of field names (e.g. "Name") to @@ -3951,39 +3284,24 @@ func (s *Option) MarshalJSON() ([]byte, error) { } // Page: Represents a documentation page. A page can contain subpages to -// represent -// nested documentation set structure. +// represent nested documentation set structure. type Page struct { - // Content: The Markdown content of the page. You can use (== - // include {path} - // ==) to include content from a Markdown file. + // Content: The Markdown content of the page. You can use (== include + // {path} ==) to include content from a Markdown file. Content string `json:"content,omitempty"` // Name: The name of the page. It will be used as an identity of the - // page to - // generate URI of the page, text of the link to this page in - // navigation, - // etc. The full page name (start from the root page name to this - // page - // concatenated with `.`) can be used as reference to the page in - // your - // documentation. For example: - //
pages:
-	// - name: Tutorial
-	//   content: (== include tutorial.md ==)
-	//   subpages:
-	//   - name: Java
-	//     content: (== include tutorial_java.md
-	// ==)
-	// 
- // You can reference `Java` page using Markdown reference link - // syntax: - // `Java`. + // page to generate URI of the page, text of the link to this page in + // navigation, etc. The full page name (start from the root page name to + // this page concatenated with `.`) can be used as reference to the page + // in your documentation. For example: pages: - name: Tutorial content: + // (== include tutorial.md ==) subpages: - name: Java content: (== + // include tutorial_java.md ==) You can reference `Java` page using + // Markdown reference link syntax: `Java`. Name string `json:"name,omitempty"` // Subpages: Subpages of this page. The order of subpages specified here - // will be - // honored in the generated docset. + // will be honored in the generated docset. Subpages []*Page `json:"subpages,omitempty"` // ForceSendFields is a list of field names (e.g. "Content") to @@ -4009,23 +3327,60 @@ func (s *Page) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// PeeredDnsDomain: DNS domain suffix for which requests originating in +// the producer VPC network are resolved in the associated consumer VPC +// network. +type PeeredDnsDomain struct { + // DnsSuffix: The DNS domain name suffix e.g. `example.com.`. + DnsSuffix string `json:"dnsSuffix,omitempty"` + + // Name: User assigned name for this resource. Must be unique within the + // consumer network. The name must be 1-63 characters long, must begin + // with a letter, end with a letter or digit, and only contain lowercase + // letters, digits or dashes. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DnsSuffix") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DnsSuffix") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PeeredDnsDomain) MarshalJSON() ([]byte, error) { + type NoMethod PeeredDnsDomain + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// PeeredDnsDomainMetadata: Metadata provided through GetOperation +// request for the LRO generated by CreatePeeredDnsDomain API. +type PeeredDnsDomainMetadata struct { +} + // PolicyBinding: Grouping of IAM role and IAM member. type PolicyBinding struct { - // Member: Required. Member to bind the role with. - // See + // Member: Required. Member to bind the role with. See // /iam/docs/reference/rest/v1/Policy#Binding for how to format each - // member. - // Eg. - // - user:myuser@mydomain.com - // - serviceAccount:my-service-account@app.gserviceaccount.com + // member. Eg. - user:myuser@mydomain.com - + // serviceAccount:my-service-account@app.gserviceaccount.com Member string `json:"member,omitempty"` // Role: Required. Role to apply. Only whitelisted roles can be used at - // the specified - // granularity. The role must be one of the following: - // - 'roles/container.hostServiceAgentUser' applied on the shared VPC - // host - // project + // the specified granularity. The role must be one of the following: - + // 'roles/container.hostServiceAgentUser' applied on the shared VPC host + // project - 'roles/compute.securityAdmin' applied on the shared VPC + // host project Role string `json:"role,omitempty"` // ForceSendFields is a list of field names (e.g. "Member") to @@ -4052,67 +3407,33 @@ func (s *PolicyBinding) MarshalJSON() ([]byte, error) { } // Quota: Quota configuration helps to achieve fairness and budgeting in -// service -// usage. -// -// The metric based quota configuration works this way: -// - The service configuration defines a set of metrics. -// - For API calls, the quota.metric_rules maps methods to metrics with -// corresponding costs. -// - The quota.limits defines limits on the metrics, which will be used -// for -// quota checks at runtime. -// -// An example quota configuration in yaml format: -// -// quota: -// limits: -// -// - name: apiWriteQpsPerProject -// metric: library.googleapis.com/write_calls -// unit: "1/min/{project}" # rate limit for consumer projects -// values: -// STANDARD: 10000 -// -// -// # The metric rules bind all methods to the read_calls metric, -// # except for the UpdateBook and DeleteBook methods. These two -// methods -// # are mapped to the write_calls metric, with the UpdateBook -// method -// # consuming at twice rate as the DeleteBook method. -// metric_rules: -// - selector: "*" -// metric_costs: -// library.googleapis.com/read_calls: 1 -// - selector: google.example.library.v1.LibraryService.UpdateBook -// metric_costs: -// library.googleapis.com/write_calls: 2 -// - selector: google.example.library.v1.LibraryService.DeleteBook -// metric_costs: -// library.googleapis.com/write_calls: 1 -// -// Corresponding Metric definition: -// -// metrics: -// - name: library.googleapis.com/read_calls -// display_name: Read requests -// metric_kind: DELTA -// value_type: INT64 -// -// - name: library.googleapis.com/write_calls -// display_name: Write requests -// metric_kind: DELTA -// value_type: INT64 -// -// +// service usage. The metric based quota configuration works this way: - +// The service configuration defines a set of metrics. - For API calls, +// the quota.metric_rules maps methods to metrics with corresponding +// costs. - The quota.limits defines limits on the metrics, which will +// be used for quota checks at runtime. An example quota configuration +// in yaml format: quota: limits: - name: apiWriteQpsPerProject metric: +// library.googleapis.com/write_calls unit: "1/min/{project}" # rate +// limit for consumer projects values: STANDARD: 10000 # The metric +// rules bind all methods to the read_calls metric, # except for the +// UpdateBook and DeleteBook methods. These two methods # are mapped to +// the write_calls metric, with the UpdateBook method # consuming at +// twice rate as the DeleteBook method. metric_rules: - selector: "*" +// metric_costs: library.googleapis.com/read_calls: 1 - selector: +// google.example.library.v1.LibraryService.UpdateBook metric_costs: +// library.googleapis.com/write_calls: 2 - selector: +// google.example.library.v1.LibraryService.DeleteBook metric_costs: +// library.googleapis.com/write_calls: 1 Corresponding Metric +// definition: metrics: - name: library.googleapis.com/read_calls +// display_name: Read requests metric_kind: DELTA value_type: INT64 - +// name: library.googleapis.com/write_calls display_name: Write requests +// metric_kind: DELTA value_type: INT64 type Quota struct { // Limits: List of `QuotaLimit` definitions for the service. Limits []*QuotaLimit `json:"limits,omitempty"` // MetricRules: List of `MetricRule` definitions, each one mapping a - // selected method to one - // or more metrics. + // selected method to one or more metrics. MetricRules []*MetricRule `json:"metricRules,omitempty"` // ForceSendFields is a list of field names (e.g. "Limits") to @@ -4139,116 +3460,75 @@ func (s *Quota) MarshalJSON() ([]byte, error) { } // QuotaLimit: `QuotaLimit` defines a specific limit that applies over a -// specified duration -// for a limit type. There can be at most one limit for a duration and -// limit -// type combination defined within a `QuotaGroup`. +// specified duration for a limit type. There can be at most one limit +// for a duration and limit type combination defined within a +// `QuotaGroup`. type QuotaLimit struct { // DefaultLimit: Default number of tokens that can be consumed during - // the specified - // duration. This is the number of tokens assigned when a - // client - // application developer activates the service for his/her - // project. - // - // Specifying a value of 0 will block all requests. This can be used if - // you - // are provisioning quota to selected consumers and blocking - // others. - // Similarly, a value of -1 will indicate an unlimited quota. No - // other - // negative values are allowed. - // - // Used by group-based quotas only. + // the specified duration. This is the number of tokens assigned when a + // client application developer activates the service for his/her + // project. Specifying a value of 0 will block all requests. This can be + // used if you are provisioning quota to selected consumers and blocking + // others. Similarly, a value of -1 will indicate an unlimited quota. No + // other negative values are allowed. Used by group-based quotas only. DefaultLimit int64 `json:"defaultLimit,omitempty,string"` // Description: Optional. User-visible, extended description for this - // quota limit. - // Should be used only when more context is needed to understand this - // limit - // than provided by the limit's display name (see: `display_name`). + // quota limit. Should be used only when more context is needed to + // understand this limit than provided by the limit's display name (see: + // `display_name`). Description string `json:"description,omitempty"` - // DisplayName: User-visible display name for this limit. - // Optional. If not set, the UI will provide a default display name - // based on - // the quota configuration. This field can be used to override the - // default + // DisplayName: User-visible display name for this limit. Optional. If + // not set, the UI will provide a default display name based on the + // quota configuration. This field can be used to override the default // display name generated from the configuration. DisplayName string `json:"displayName,omitempty"` // Duration: Duration of this limit in textual notation. Must be "100s" - // or "1d". - // - // Used by group-based quotas only. + // or "1d". Used by group-based quotas only. Duration string `json:"duration,omitempty"` // FreeTier: Free tier value displayed in the Developers Console for - // this limit. - // The free tier is the number of tokens that will be subtracted from - // the - // billed amount when billing is enabled. - // This field can only be set on a limit with duration "1d", in a - // billable - // group; it is invalid on any other limit. If this field is not set, - // it + // this limit. The free tier is the number of tokens that will be + // subtracted from the billed amount when billing is enabled. This field + // can only be set on a limit with duration "1d", in a billable group; + // it is invalid on any other limit. If this field is not set, it // defaults to 0, indicating that there is no free tier for this - // service. - // - // Used by group-based quotas only. + // service. Used by group-based quotas only. FreeTier int64 `json:"freeTier,omitempty,string"` // MaxLimit: Maximum number of tokens that can be consumed during the - // specified - // duration. Client application developers can override the default - // limit up - // to this maximum. If specified, this value cannot be set to a value - // less - // than the default limit. If not specified, it is set to the default - // limit. - // - // To allow clients to apply overrides with no upper bound, set this to - // -1, - // indicating unlimited maximum quota. - // - // Used by group-based quotas only. + // specified duration. Client application developers can override the + // default limit up to this maximum. If specified, this value cannot be + // set to a value less than the default limit. If not specified, it is + // set to the default limit. To allow clients to apply overrides with no + // upper bound, set this to -1, indicating unlimited maximum quota. Used + // by group-based quotas only. MaxLimit int64 `json:"maxLimit,omitempty,string"` // Metric: The name of the metric this quota limit applies to. The quota - // limits with - // the same metric will be checked together during runtime. The metric - // must be - // defined within the service config. + // limits with the same metric will be checked together during runtime. + // The metric must be defined within the service config. Metric string `json:"metric,omitempty"` - // Name: Name of the quota limit. - // - // The name must be provided, and it must be unique within the service. - // The - // name can only include alphanumeric characters as well as '-'. - // - // The maximum length of the limit name is 64 characters. + // Name: Name of the quota limit. The name must be provided, and it must + // be unique within the service. The name can only include alphanumeric + // characters as well as '-'. The maximum length of the limit name is 64 + // characters. Name string `json:"name,omitempty"` - // Unit: Specify the unit of the quota limit. It uses the same syntax - // as - // Metric.unit. The supported unit kinds are determined by the - // quota - // backend system. - // - // Here are some examples: - // * "1/min/{project}" for quota per minute per project. - // - // Note: the order of unit components is insignificant. - // The "1" at the beginning is required to follow the metric unit - // syntax. + // Unit: Specify the unit of the quota limit. It uses the same syntax as + // Metric.unit. The supported unit kinds are determined by the quota + // backend system. Here are some examples: * "1/min/{project}" for quota + // per minute per project. Note: the order of unit components is + // insignificant. The "1" at the beginning is required to follow the + // metric unit syntax. Unit string `json:"unit,omitempty"` // Values: Tiered limit values. You must specify this as a key:value - // pair, with an - // integer value that is the maximum number of requests allowed for - // the - // specified unit. Currently only STANDARD is supported. + // pair, with an integer value that is the maximum number of requests + // allowed for the specified unit. Currently only STANDARD is supported. Values map[string]string `json:"values,omitempty"` // ForceSendFields is a list of field names (e.g. "DefaultLimit") to @@ -4276,15 +3556,12 @@ func (s *QuotaLimit) MarshalJSON() ([]byte, error) { // Range: Represents a found unused range. type Range struct { - // IpCidrRange: CIDR range in "10.x.x.x/y" format that is within - // the + // IpCidrRange: CIDR range in "10.x.x.x/y" format that is within the // allocated ranges and currently unused. IpCidrRange string `json:"ipCidrRange,omitempty"` // Network: In the Shared VPC host project, the VPC network that's - // peered with the - // consumer network. For - // example: + // peered with the consumer network. For example: // `projects/1234321/global/networks/host-network` Network string `json:"network,omitempty"` @@ -4314,23 +3591,16 @@ func (s *Range) MarshalJSON() ([]byte, error) { // RangeReservation: Represents a range reservation. type RangeReservation struct { // IpPrefixLength: Required. The size of the desired subnet. Use usual - // CIDR range notation. For example, - // '30' to find unused x.x.x.x/30 CIDR range. The goal is to determine - // if one - // of the allocated ranges has enough free space for a subnet of the - // requested - // size. + // CIDR range notation. For example, '30' to find unused x.x.x.x/30 CIDR + // range. The goal is to determine if one of the allocated ranges has + // enough free space for a subnet of the requested size. IpPrefixLength int64 `json:"ipPrefixLength,omitempty"` // SecondaryRangeIpPrefixLengths: Optional. DO NOT USE - Under - // development. - // The size of the desired secondary ranges for the subnet. Use usual - // CIDR - // range notation. For example, '30' to find unused x.x.x.x/30 CIDR - // range. The - // goal is to determine that the allocated ranges have enough free space - // for - // all the requested secondary ranges. + // development. The size of the desired secondary ranges for the subnet. + // Use usual CIDR range notation. For example, '30' to find unused + // x.x.x.x/30 CIDR range. The goal is to determine that the allocated + // ranges have enough free space for all the requested secondary ranges. SecondaryRangeIpPrefixLengths []int64 `json:"secondaryRangeIpPrefixLengths,omitempty"` // ForceSendFields is a list of field names (e.g. "IpPrefixLength") to @@ -4357,27 +3627,122 @@ func (s *RangeReservation) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Route: Represents a route that was created or discovered by a private -// access -// management service. -type Route struct { - // DestRange: Destination CIDR range that this route applies to. - DestRange string `json:"destRange,omitempty"` +// RemoveDnsRecordSetMetadata: Metadata provided through GetOperation +// request for the LRO generated by RemoveDnsRecordSet API +type RemoveDnsRecordSetMetadata struct { +} + +// RemoveDnsRecordSetRequest: Request to remove a record set from a +// private managed DNS zone in the shared producer host project. The +// name, type, ttl, and data values must all exactly match an existing +// record set in the specified zone. +type RemoveDnsRecordSetRequest struct { + // ConsumerNetwork: Required. The network that the consumer is using to + // connect with services. Must be in the form of + // projects/{project}/global/networks/{network} {project} is the project + // number, as in '12345' {network} is the network name. + ConsumerNetwork string `json:"consumerNetwork,omitempty"` + + // DnsRecordSet: Required. The DNS record set to remove. + DnsRecordSet *DnsRecordSet `json:"dnsRecordSet,omitempty"` + + // Zone: Required. The name of the private DNS zone in the shared + // producer host project from which the record set will be removed. + Zone string `json:"zone,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ConsumerNetwork") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ConsumerNetwork") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *RemoveDnsRecordSetRequest) MarshalJSON() ([]byte, error) { + type NoMethod RemoveDnsRecordSetRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RemoveDnsRecordSetResponse: Blank message response type for +// RemoveDnsRecordSet API +type RemoveDnsRecordSetResponse struct { +} + +// RemoveDnsZoneMetadata: Metadata provided through GetOperation request +// for the LRO generated by RemoveDnsZone API +type RemoveDnsZoneMetadata struct { +} + +// RemoveDnsZoneRequest: Request to remove a private managed DNS zone in +// the shared producer host project and a matching DNS peering zone in +// the consumer project. +type RemoveDnsZoneRequest struct { + // ConsumerNetwork: Required. The network that the consumer is using to + // connect with services. Must be in the form of + // projects/{project}/global/networks/{network} {project} is the project + // number, as in '12345' {network} is the network name. + ConsumerNetwork string `json:"consumerNetwork,omitempty"` + + // Name: Required. The name for both the private zone in the shared + // producer host project and the peering zone in the consumer project. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ConsumerNetwork") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ConsumerNetwork") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *RemoveDnsZoneRequest) MarshalJSON() ([]byte, error) { + type NoMethod RemoveDnsZoneRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RemoveDnsZoneResponse: Blank message response type for RemoveDnsZone +// API +type RemoveDnsZoneResponse struct { +} + +// Route: Represents a route that was created or discovered by a private +// access management service. +type Route struct { + // DestRange: Destination CIDR range that this route applies to. + DestRange string `json:"destRange,omitempty"` // Name: Route name. See https://cloud.google.com/vpc/docs/routes Name string `json:"name,omitempty"` // Network: Fully-qualified URL of the VPC network in the producer host - // tenant project - // that this route applies to. For - // example: + // tenant project that this route applies to. For example: // `projects/123456/global/networks/host-network` Network string `json:"network,omitempty"` // NextHopGateway: Fully-qualified URL of the gateway that should handle - // matching packets that - // this route applies to. For - // example: + // matching packets that this route applies to. For example: // `projects/123456/global/gateways/default-internet-gateway` NextHopGateway string `json:"nextHopGateway,omitempty"` @@ -4408,22 +3773,17 @@ func (s *Route) MarshalJSON() ([]byte, error) { // allocated ranges. type SearchRangeRequest struct { // IpPrefixLength: Required. The prefix length of the IP range. Use - // usual CIDR range notation. For - // example, '30' to find unused x.x.x.x/30 CIDR range. Actual range will - // be - // determined using allocated range for the consumer peered network - // and - // returned in the result. + // usual CIDR range notation. For example, '30' to find unused + // x.x.x.x/30 CIDR range. Actual range will be determined using + // allocated range for the consumer peered network and returned in the + // result. IpPrefixLength int64 `json:"ipPrefixLength,omitempty"` // Network: Network name in the consumer project. This network must have - // been - // already peered with a shared VPC network using - // CreateConnection + // been already peered with a shared VPC network using CreateConnection // method. Must be in a form - // 'projects/{project}/global/networks/{network}'. - // {project} is a project number, as in '12345' {network} is network - // name. + // 'projects/{project}/global/networks/{network}'. {project} is a + // project number, as in '12345' {network} is network name. Network string `json:"network,omitempty"` // ForceSendFields is a list of field names (e.g. "IpPrefixLength") to @@ -4451,43 +3811,24 @@ func (s *SearchRangeRequest) MarshalJSON() ([]byte, error) { } // Service: `Service` is the root object of Google service configuration -// schema. It -// describes basic information about a service, such as the name and -// the -// title, and delegates other aspects to sub-sections. Each sub-section -// is -// either a proto message or a repeated proto message that configures -// a -// specific aspect, such as auth. See each proto message definition for -// details. -// -// Example: -// -// type: google.api.Service -// config_version: 3 -// name: calendar.googleapis.com -// title: Google Calendar API -// apis: -// - name: google.calendar.v3.Calendar -// authentication: -// providers: -// - id: google_calendar_auth -// jwks_uri: https://www.googleapis.com/oauth2/v1/certs -// issuer: https://securetoken.google.com -// rules: -// - selector: "*" -// requirements: -// provider_id: google_calendar_auth +// schema. It describes basic information about a service, such as the +// name and the title, and delegates other aspects to sub-sections. Each +// sub-section is either a proto message or a repeated proto message +// that configures a specific aspect, such as auth. See each proto +// message definition for details. Example: type: google.api.Service +// config_version: 3 name: calendar.googleapis.com title: Google +// Calendar API apis: - name: google.calendar.v3.Calendar +// authentication: providers: - id: google_calendar_auth jwks_uri: +// https://www.googleapis.com/oauth2/v1/certs issuer: +// https://securetoken.google.com rules: - selector: "*" requirements: +// provider_id: google_calendar_auth type Service struct { // Apis: A list of API interfaces exported by this service. Only the - // `name` field - // of the google.protobuf.Api needs to be provided by the - // configuration - // author, as the remaining fields will be derived from the IDL during - // the - // normalization process. It is an error to specify an API interface - // here - // which cannot be resolved against the associated IDL files. + // `name` field of the google.protobuf.Api needs to be provided by the + // configuration author, as the remaining fields will be derived from + // the IDL during the normalization process. It is an error to specify + // an API interface here which cannot be resolved against the associated + // IDL files. Apis []*Api `json:"apis,omitempty"` // Authentication: Auth configuration. @@ -4500,13 +3841,9 @@ type Service struct { Billing *Billing `json:"billing,omitempty"` // ConfigVersion: The semantic version of the service configuration. The - // config version - // affects the interpretation of the service configuration. For - // example, - // certain features are enabled by default for certain config - // versions. - // - // The latest config version is `3`. + // config version affects the interpretation of the service + // configuration. For example, certain features are enabled by default + // for certain config versions. The latest config version is `3`. ConfigVersion int64 `json:"configVersion,omitempty"` // Context: Context configuration. @@ -4521,35 +3858,25 @@ type Service struct { // Documentation: Additional API documentation. Documentation *Documentation `json:"documentation,omitempty"` - // Endpoints: Configuration for network endpoints. If this is empty, - // then an endpoint - // with the same name as the service is automatically generated to - // service all - // defined APIs. + // Endpoints: Configuration for network endpoints. If this is empty, + // then an endpoint with the same name as the service is automatically + // generated to service all defined APIs. Endpoints []*Endpoint `json:"endpoints,omitempty"` - // Enums: A list of all enum types included in this API service. - // Enums - // referenced directly or indirectly by the `apis` are - // automatically - // included. Enums which are not referenced but shall be - // included - // should be listed here by name. Example: - // - // enums: - // - name: google.someapi.v1.SomeEnum + // Enums: A list of all enum types included in this API service. Enums + // referenced directly or indirectly by the `apis` are automatically + // included. Enums which are not referenced but shall be included should + // be listed here by name. Example: enums: - name: + // google.someapi.v1.SomeEnum Enums []*Enum `json:"enums,omitempty"` // Http: HTTP configuration. Http *Http `json:"http,omitempty"` // Id: A unique ID for a specific instance of this message, typically - // assigned - // by the client for tracking purpose. Must be no longer than 63 - // characters - // and only lower case letters, digits, '.', '_' and '-' are allowed. - // If - // empty, the server may choose to generate one instead. + // assigned by the client for tracking purpose. Must be no longer than + // 63 characters and only lower case letters, digits, '.', '_' and '-' + // are allowed. If empty, the server may choose to generate one instead. Id string `json:"id,omitempty"` // Logging: Logging configuration. @@ -4562,19 +3889,17 @@ type Service struct { Metrics []*MetricDescriptor `json:"metrics,omitempty"` // MonitoredResources: Defines the monitored resources used by this - // service. This is required - // by the Service.monitoring and Service.logging configurations. + // service. This is required by the Service.monitoring and + // Service.logging configurations. MonitoredResources []*MonitoredResourceDescriptor `json:"monitoredResources,omitempty"` // Monitoring: Monitoring configuration. Monitoring *Monitoring `json:"monitoring,omitempty"` // Name: The service name, which is a DNS-like logical identifier for - // the - // service, such as `calendar.googleapis.com`. The service - // name - // typically goes through DNS verification to make sure the owner - // of the service also owns the DNS name. + // the service, such as `calendar.googleapis.com`. The service name + // typically goes through DNS verification to make sure the owner of the + // service also owns the DNS name. Name string `json:"name,omitempty"` // ProducerProjectId: The Google project that owns this service. @@ -4591,30 +3916,21 @@ type Service struct { SystemParameters *SystemParameters `json:"systemParameters,omitempty"` // SystemTypes: A list of all proto message types included in this API - // service. - // It serves similar purpose as [google.api.Service.types], except - // that - // these types are not needed by user-defined APIs. Therefore, they will - // not - // show up in the generated discovery doc. This field should only be - // used - // to define system APIs in ESF. + // service. It serves similar purpose as [google.api.Service.types], + // except that these types are not needed by user-defined APIs. + // Therefore, they will not show up in the generated discovery doc. This + // field should only be used to define system APIs in ESF. SystemTypes []*Type `json:"systemTypes,omitempty"` // Title: The product title for this service. Title string `json:"title,omitempty"` // Types: A list of all proto message types included in this API - // service. - // Types referenced directly or indirectly by the `apis` - // are - // automatically included. Messages which are not referenced but - // shall be included, such as types used by the `google.protobuf.Any` - // type, - // should be listed here by name. Example: - // - // types: - // - name: google.protobuf.Int32 + // service. Types referenced directly or indirectly by the `apis` are + // automatically included. Messages which are not referenced but shall + // be included, such as types used by the `google.protobuf.Any` type, + // should be listed here by name. Example: types: - name: + // google.protobuf.Int32 Types []*Type `json:"types,omitempty"` // Usage: Configuration controlling usage of this service. @@ -4644,35 +3960,22 @@ func (s *Service) MarshalJSON() ([]byte, error) { } // ServiceIdentity: The per-product per-project service identity for a -// service. -// -// -// Use this field to configure per-product per-project service -// identity. -// Example of a service identity configuration. -// -// usage: -// service_identity: -// - service_account_parent: "projects/123456789" -// display_name: "Cloud XXX Service Agent" -// description: "Used as the identity of Cloud XXX to access -// resources" +// service. Use this field to configure per-product per-project service +// identity. Example of a service identity configuration. usage: +// service_identity: - service_account_parent: "projects/123456789" +// display_name: "Cloud XXX Service Agent" description: "Used as the +// identity of Cloud XXX to access resources" type ServiceIdentity struct { // Description: Optional. A user-specified opaque description of the - // service account. - // Must be less than or equal to 256 UTF-8 bytes. + // service account. Must be less than or equal to 256 UTF-8 bytes. Description string `json:"description,omitempty"` - // DisplayName: Optional. A user-specified name for the service - // account. + // DisplayName: Optional. A user-specified name for the service account. // Must be less than or equal to 100 UTF-8 bytes. DisplayName string `json:"displayName,omitempty"` // ServiceAccountParent: A service account project that hosts the - // service accounts. - // - // An example name would be: - // `projects/123456789` + // service accounts. An example name would be: `projects/123456789` ServiceAccountParent string `json:"serviceAccountParent,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -4699,12 +4002,10 @@ func (s *ServiceIdentity) MarshalJSON() ([]byte, error) { } // SourceContext: `SourceContext` represents information about the -// source of a -// protobuf element, like the file in which it is defined. +// source of a protobuf element, like the file in which it is defined. type SourceContext struct { // FileName: The path-qualified name of the .proto file that contained - // the associated - // protobuf element. For example: + // the associated protobuf element. For example: // "google/protobuf/source_context.proto". FileName string `json:"fileName,omitempty"` @@ -4760,32 +4061,24 @@ func (s *SourceInfo) MarshalJSON() ([]byte, error) { } // Status: The `Status` type defines a logical error model that is -// suitable for -// different programming environments, including REST APIs and RPC APIs. -// It is -// used by [gRPC](https://github.com/grpc). Each `Status` message -// contains -// three pieces of data: error code, error message, and error -// details. -// -// You can find out more about this error model and how to work with it -// in the -// [API Design Guide](https://cloud.google.com/apis/design/errors). +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each +// `Status` message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the [API Design +// Guide](https://cloud.google.com/apis/design/errors). type Status struct { // Code: The status code, which should be an enum value of // google.rpc.Code. Code int64 `json:"code,omitempty"` - // Details: A list of messages that carry the error details. There is a - // common set of - // message types for APIs to use. + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. Details []googleapi.RawMessage `json:"details,omitempty"` // Message: A developer-facing error message, which should be in - // English. Any - // user-facing error message should be localized and sent in - // the - // google.rpc.Status.details field, or localized by the client. + // English. Any user-facing error message should be localized and sent + // in the google.rpc.Status.details field, or localized by the client. Message string `json:"message,omitempty"` // ForceSendFields is a list of field names (e.g. "Code") to @@ -4812,26 +4105,21 @@ func (s *Status) MarshalJSON() ([]byte, error) { } // Subnetwork: Represents a subnet that was created or discovered by a -// private access -// management service. +// private access management service. type Subnetwork struct { // IpCidrRange: Subnetwork CIDR range in `10.x.x.x/y` format. IpCidrRange string `json:"ipCidrRange,omitempty"` - // Name: Subnetwork name. - // See https://cloud.google.com/compute/docs/vpc/ + // Name: Subnetwork name. See https://cloud.google.com/compute/docs/vpc/ Name string `json:"name,omitempty"` // Network: In the Shared VPC host project, the VPC network that's - // peered with the - // consumer network. For - // example: + // peered with the consumer network. For example: // `projects/1234321/global/networks/host-network` Network string `json:"network,omitempty"` // OutsideAllocation: This is a discovered subnet that is not within the - // current consumer - // allocated ranges. + // current consumer allocated ranges. OutsideAllocation bool `json:"outsideAllocation,omitempty"` // ForceSendFields is a list of field names (e.g. "IpCidrRange") to @@ -4858,14 +4146,12 @@ func (s *Subnetwork) MarshalJSON() ([]byte, error) { } // SystemParameter: Define a parameter's name and location. The -// parameter may be passed as either -// an HTTP header or a URL query parameter, and if both are passed the -// behavior -// is implementation-dependent. +// parameter may be passed as either an HTTP header or a URL query +// parameter, and if both are passed the behavior is +// implementation-dependent. type SystemParameter struct { // HttpHeader: Define the HTTP header name to use for the parameter. It - // is case - // insensitive. + // is case insensitive. HttpHeader string `json:"httpHeader,omitempty"` // Name: Define the name of the parameter, such as "api_key" . It is @@ -4873,8 +4159,7 @@ type SystemParameter struct { Name string `json:"name,omitempty"` // UrlQueryParameter: Define the URL query parameter name to use for the - // parameter. It is case - // sensitive. + // parameter. It is case sensitive. UrlQueryParameter string `json:"urlQueryParameter,omitempty"` // ForceSendFields is a list of field names (e.g. "HttpHeader") to @@ -4901,24 +4186,18 @@ func (s *SystemParameter) MarshalJSON() ([]byte, error) { } // SystemParameterRule: Define a system parameter rule mapping system -// parameter definitions to -// methods. +// parameter definitions to methods. type SystemParameterRule struct { // Parameters: Define parameters. Multiple names may be defined for a - // parameter. - // For a given method call, only one of them should be used. If - // multiple - // names are used the behavior is implementation-dependent. - // If none of the specified names are present the behavior - // is + // parameter. For a given method call, only one of them should be used. + // If multiple names are used the behavior is implementation-dependent. + // If none of the specified names are present the behavior is // parameter-dependent. Parameters []*SystemParameter `json:"parameters,omitempty"` // Selector: Selects the methods to which this rule applies. Use '*' to - // indicate all - // methods in all APIs. - // - // Refer to selector for syntax details. + // indicate all methods in all APIs. Refer to selector for syntax + // details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. "Parameters") to @@ -4944,49 +4223,23 @@ func (s *SystemParameterRule) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SystemParameters: ### System parameter configuration -// -// A system parameter is a special kind of parameter defined by the -// API -// system, not by an individual API. It is typically mapped to an HTTP -// header +// SystemParameters: ### System parameter configuration A system +// parameter is a special kind of parameter defined by the API system, +// not by an individual API. It is typically mapped to an HTTP header // and/or a URL query parameter. This configuration specifies which -// methods -// change the names of the system parameters. +// methods change the names of the system parameters. type SystemParameters struct { - // Rules: Define system parameters. - // - // The parameters defined here will override the default - // parameters - // implemented by the system. If this field is missing from the - // service - // config, default system parameters will be used. Default system - // parameters - // and names is implementation-dependent. - // - // Example: define api key for all methods - // - // system_parameters - // rules: - // - selector: "*" - // parameters: - // - name: api_key - // url_query_parameter: api_key - // - // - // Example: define 2 api key names for a specific method. - // - // system_parameters - // rules: - // - selector: "/ListShelves" - // parameters: - // - name: api_key - // http_header: Api-Key1 - // - name: api_key - // http_header: Api-Key2 - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // Rules: Define system parameters. The parameters defined here will + // override the default parameters implemented by the system. If this + // field is missing from the service config, default system parameters + // will be used. Default system parameters and names is + // implementation-dependent. Example: define api key for all methods + // system_parameters rules: - selector: "*" parameters: - name: api_key + // url_query_parameter: api_key Example: define 2 api key names for a + // specific method. system_parameters rules: - selector: "/ListShelves" + // parameters: - name: api_key http_header: Api-Key1 - name: api_key + // http_header: Api-Key2 **NOTE:** All service configuration rules + // follow "last one wins" order. Rules []*SystemParameterRule `json:"rules,omitempty"` // ForceSendFields is a list of field names (e.g. "Rules") to @@ -5060,32 +4313,75 @@ func (s *Type) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// UpdateDnsRecordSetMetadata: Metadata provided through GetOperation +// request for the LRO generated by UpdateDnsRecordSet API +type UpdateDnsRecordSetMetadata struct { +} + +// UpdateDnsRecordSetRequest: Request to update a record set from a +// private managed DNS zone in the shared producer host project. The +// name, type, ttl, and data values of the existing record set must all +// exactly match an existing record set in the specified zone. +type UpdateDnsRecordSetRequest struct { + // ConsumerNetwork: Required. The network that the consumer is using to + // connect with services. Must be in the form of + // projects/{project}/global/networks/{network} {project} is the project + // number, as in '12345' {network} is the network name. + ConsumerNetwork string `json:"consumerNetwork,omitempty"` + + // ExistingDnsRecordSet: Required. The existing DNS record set to + // update. + ExistingDnsRecordSet *DnsRecordSet `json:"existingDnsRecordSet,omitempty"` + + // NewDnsRecordSet: Required. The new values that the DNS record set + // should be updated to hold. + NewDnsRecordSet *DnsRecordSet `json:"newDnsRecordSet,omitempty"` + + // Zone: Required. The name of the private DNS zone in the shared + // producer host project from which the record set will be removed. + Zone string `json:"zone,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ConsumerNetwork") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ConsumerNetwork") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *UpdateDnsRecordSetRequest) MarshalJSON() ([]byte, error) { + type NoMethod UpdateDnsRecordSetRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Usage: Configuration controlling usage of a service. type Usage struct { // ProducerNotificationChannel: The full resource name of a channel used - // for sending notifications to the - // service producer. - // - // Google Service Management currently only supports - // [Google Cloud Pub/Sub](https://cloud.google.com/pubsub) as a - // notification - // channel. To use Google Cloud Pub/Sub as the channel, this must be the - // name - // of a Cloud Pub/Sub topic that uses the Cloud Pub/Sub topic name - // format + // for sending notifications to the service producer. Google Service + // Management currently only supports [Google Cloud + // Pub/Sub](https://cloud.google.com/pubsub) as a notification channel. + // To use Google Cloud Pub/Sub as the channel, this must be the name of + // a Cloud Pub/Sub topic that uses the Cloud Pub/Sub topic name format // documented in https://cloud.google.com/pubsub/docs/overview. ProducerNotificationChannel string `json:"producerNotificationChannel,omitempty"` // Requirements: Requirements that must be satisfied before a consumer - // project can use the - // service. Each requirement is of the form - // /; - // for example 'serviceusage.googleapis.com/billing-enabled'. + // project can use the service. Each requirement is of the form /; for + // example 'serviceusage.googleapis.com/billing-enabled'. Requirements []string `json:"requirements,omitempty"` - // Rules: A list of usage rules that apply to individual API - // methods. - // + // Rules: A list of usage rules that apply to individual API methods. // **NOTE:** All service configuration rules follow "last one wins" // order. Rules []*UsageRule `json:"rules,omitempty"` @@ -5119,57 +4415,34 @@ func (s *Usage) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UsageRule: Usage configuration rules for the service. -// -// NOTE: Under development. -// -// -// Use this rule to configure unregistered calls for the service. -// Unregistered -// calls are calls that do not contain consumer project -// identity. -// (Example: calls that do not contain an API key). -// By default, API methods do not allow unregistered calls, and each -// method call -// must be identified by a consumer project identity. Use this rule -// to -// allow/disallow unregistered calls. -// -// Example of an API that wants to allow unregistered calls for entire -// service. -// -// usage: -// rules: -// - selector: "*" -// allow_unregistered_calls: true -// -// Example of a method that wants to allow unregistered calls. -// -// usage: -// rules: -// - selector: +// UsageRule: Usage configuration rules for the service. NOTE: Under +// development. Use this rule to configure unregistered calls for the +// service. Unregistered calls are calls that do not contain consumer +// project identity. (Example: calls that do not contain an API key). By +// default, API methods do not allow unregistered calls, and each method +// call must be identified by a consumer project identity. Use this rule +// to allow/disallow unregistered calls. Example of an API that wants to +// allow unregistered calls for entire service. usage: rules: - +// selector: "*" allow_unregistered_calls: true Example of a method that +// wants to allow unregistered calls. usage: rules: - selector: // "google.example.library.v1.LibraryService.CreateBook" -// allow_unregistered_calls: true +// allow_unregistered_calls: true type UsageRule struct { // AllowUnregisteredCalls: If true, the selected method allows - // unregistered calls, e.g. calls - // that don't identify any user or application. + // unregistered calls, e.g. calls that don't identify any user or + // application. AllowUnregisteredCalls bool `json:"allowUnregisteredCalls,omitempty"` // Selector: Selects the methods to which this rule applies. Use '*' to - // indicate all - // methods in all APIs. - // - // Refer to selector for syntax details. + // indicate all methods in all APIs. Refer to selector for syntax + // details. Selector string `json:"selector,omitempty"` // SkipServiceControl: If true, the selected method should skip service - // control and the control - // plane features, such as quota and billing, will not be - // available. - // This flag is used by Google Cloud Endpoints to bypass checks for - // internal - // methods, such as service health check methods. + // control and the control plane features, such as quota and billing, + // will not be available. This flag is used by Google Cloud Endpoints to + // bypass checks for internal methods, such as service health check + // methods. SkipServiceControl bool `json:"skipServiceControl,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -5199,36 +4472,27 @@ func (s *UsageRule) MarshalJSON() ([]byte, error) { type ValidateConsumerConfigRequest struct { // ConsumerNetwork: Required. The network that the consumer is using to - // connect with services. Must be in - // the form of projects/{project}/global/networks/{network} {project} is - // a - // project number, as in '12345' {network} is network name. + // connect with services. Must be in the form of + // projects/{project}/global/networks/{network} {project} is a project + // number, as in '12345' {network} is network name. ConsumerNetwork string `json:"consumerNetwork,omitempty"` // ConsumerProject: NETWORK_NOT_IN_CONSUMERS_PROJECT, - // NETWORK_NOT_IN_CONSUMERS_HOST_PROJECT, - // and HOST_PROJECT_NOT_FOUND are done when consumer_project is - // provided. + // NETWORK_NOT_IN_CONSUMERS_HOST_PROJECT, and HOST_PROJECT_NOT_FOUND are + // done when consumer_project is provided. ConsumerProject *ConsumerProject `json:"consumerProject,omitempty"` // RangeReservation: RANGES_EXHAUSTED, RANGES_EXHAUSTED, and - // RANGES_DELETED_LATER are done - // when range_reservation is provided. + // RANGES_DELETED_LATER are done when range_reservation is provided. RangeReservation *RangeReservation `json:"rangeReservation,omitempty"` // ValidateNetwork: The validations will be performed in the order - // listed in the - // ValidationError enum. The first failure will return. If a validation - // is not - // requested, then the next one will be - // performed. + // listed in the ValidationError enum. The first failure will return. If + // a validation is not requested, then the next one will be performed. // SERVICE_NETWORKING_NOT_ENABLED and NETWORK_NOT_PEERED checks are - // performed - // for all requests where validation is requested. NETWORK_NOT_FOUND - // and - // NETWORK_DISCONNECTED checks are done for requests that - // have - // validate_network set to true. + // performed for all requests where validation is requested. + // NETWORK_NOT_FOUND and NETWORK_DISCONNECTED checks are done for + // requests that have validate_network set to true. ValidateNetwork bool `json:"validateNetwork,omitempty"` // ForceSendFields is a list of field names (e.g. "ConsumerNetwork") to @@ -5270,20 +4534,16 @@ type ValidateConsumerConfigResponse struct { // "NETWORK_PEERING_DELETED" - The peering was created and later // deleted. // "NETWORK_NOT_IN_CONSUMERS_PROJECT" - The network is a regular VPC - // but the network is not in the consumer's - // project. + // but the network is not in the consumer's project. // "NETWORK_NOT_IN_CONSUMERS_HOST_PROJECT" - The consumer project is a - // service project, and network is a shared VPC, - // but the network is not in the host project of this consumer project. + // service project, and network is a shared VPC, but the network is not + // in the host project of this consumer project. // "HOST_PROJECT_NOT_FOUND" - The host project associated with the - // consumer project - // was not found. + // consumer project was not found. // "CONSUMER_PROJECT_NOT_SERVICE_PROJECT" - The consumer project is - // not a service project for - // the specified host project. + // not a service project for the specified host project. // "RANGES_EXHAUSTED" - The reserved IP ranges do not have enough - // space to create - // a subnet of desired size. + // space to create a subnet of desired size. // "RANGES_NOT_RESERVED" - The IP ranges were not reserved. // "RANGES_DELETED_LATER" - The IP ranges were reserved but deleted // later. @@ -5330,23 +4590,15 @@ type OperationsCancelCall struct { } // Cancel: Starts asynchronous cancellation on a long-running operation. -// The server -// makes a best effort to cancel the operation, but success is -// not -// guaranteed. If the server doesn't support this method, it -// returns -// `google.rpc.Code.UNIMPLEMENTED`. Clients can -// use -// Operations.GetOperation or -// other methods to check whether the cancellation succeeded or whether -// the -// operation completed despite cancellation. On successful -// cancellation, -// the operation is not deleted; instead, it becomes an operation -// with -// an Operation.error value with a google.rpc.Status.code of -// 1, -// corresponding to `Code.CANCELLED`. +// The server makes a best effort to cancel the operation, but success +// is not guaranteed. If the server doesn't support this method, it +// returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use +// Operations.GetOperation or other methods to check whether the +// cancellation succeeded or whether the operation completed despite +// cancellation. On successful cancellation, the operation is not +// deleted; instead, it becomes an operation with an Operation.error +// value with a google.rpc.Status.code of 1, corresponding to +// `Code.CANCELLED`. func (r *OperationsService) Cancel(name string, canceloperationrequest *CancelOperationRequest) *OperationsCancelCall { c := &OperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5381,7 +4633,7 @@ func (c *OperationsCancelCall) Header() http.Header { func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5445,7 +4697,7 @@ func (c *OperationsCancelCall) Do(opts ...googleapi.CallOption) (*Empty, error) } return ret, nil // { - // "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + // "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", // "flatPath": "v1/operations/{operationsId}:cancel", // "httpMethod": "POST", // "id": "servicenetworking.operations.cancel", @@ -5487,12 +4739,9 @@ type OperationsDeleteCall struct { } // Delete: Deletes a long-running operation. This method indicates that -// the client is -// no longer interested in the operation result. It does not cancel -// the -// operation. If the server doesn't support this method, it -// returns -// `google.rpc.Code.UNIMPLEMENTED`. +// the client is no longer interested in the operation result. It does +// not cancel the operation. If the server doesn't support this method, +// it returns `google.rpc.Code.UNIMPLEMENTED`. func (r *OperationsService) Delete(name string) *OperationsDeleteCall { c := &OperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5526,7 +4775,7 @@ func (c *OperationsDeleteCall) Header() http.Header { func (c *OperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5585,7 +4834,7 @@ func (c *OperationsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) } return ret, nil // { - // "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.", + // "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", // "flatPath": "v1/operations/{operationsId}", // "httpMethod": "DELETE", // "id": "servicenetworking.operations.delete", @@ -5624,11 +4873,9 @@ type OperationsGetCall struct { header_ http.Header } -// Get: Gets the latest state of a long-running operation. Clients can -// use this -// method to poll the operation result at intervals as recommended by -// the API -// service. +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. func (r *OperationsService) Get(name string) *OperationsGetCall { c := &OperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5672,7 +4919,7 @@ func (c *OperationsGetCall) Header() http.Header { func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5734,7 +4981,7 @@ func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", // "flatPath": "v1/operations/{operationsId}", // "httpMethod": "GET", // "id": "servicenetworking.operations.get", @@ -5774,22 +5021,15 @@ type OperationsListCall struct { } // List: Lists operations that match the specified filter in the -// request. If the -// server doesn't support this method, it returns -// `UNIMPLEMENTED`. -// -// NOTE: the `name` binding allows API services to override the -// binding -// to use different resource name schemes, such as `users/*/operations`. -// To -// override the binding, API services can add a binding such -// as -// "/v1/{name=users/*}/operations" to their service configuration. -// For backwards compatibility, the default name includes the -// operations -// collection id, however overriding users must ensure the name -// binding -// is the parent resource, without the operations collection id. +// request. If the server doesn't support this method, it returns +// `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to +// override the binding to use different resource name schemes, such as +// `users/*/operations`. To override the binding, API services can add a +// binding such as "/v1/{name=users/*}/operations" to their service +// configuration. For backwards compatibility, the default name includes +// the operations collection id, however overriding users must ensure +// the name binding is the parent resource, without the operations +// collection id. func (r *OperationsService) List(name string) *OperationsListCall { c := &OperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5854,7 +5094,7 @@ func (c *OperationsListCall) Header() http.Header { func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5916,7 +5156,7 @@ func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsRe } return ret, nil // { - // "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", + // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", // "flatPath": "v1/operations", // "httpMethod": "GET", // "id": "servicenetworking.operations.list", @@ -5993,20 +5233,14 @@ type ServicesAddSubnetworkCall struct { } // AddSubnetwork: For service producers, provisions a new subnet in a -// peered service's shared -// VPC network in the requested region and with the requested size -// that's -// expressed as a CIDR range (number of leading bits of ipV4 network -// mask). -// The method checks against the assigned allocated ranges to find -// a -// non-conflicting IP address range. The method will reuse a subnet -// if -// subsequent calls contain the same subnet name, region, and prefix -// length. -// This method will make producer's tenant project to be a shared VPC -// service -// project as needed. +// peered service's shared VPC network in the requested region and with +// the requested size that's expressed as a CIDR range (number of +// leading bits of ipV4 network mask). The method checks against the +// assigned allocated ranges to find a non-conflicting IP address range. +// The method will reuse a subnet if subsequent calls contain the same +// subnet name, region, and prefix length. This method will make +// producer's tenant project to be a shared VPC service project as +// needed. func (r *ServicesService) AddSubnetwork(parent string, addsubnetworkrequest *AddSubnetworkRequest) *ServicesAddSubnetworkCall { c := &ServicesAddSubnetworkCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -6041,7 +5275,7 @@ func (c *ServicesAddSubnetworkCall) Header() http.Header { func (c *ServicesAddSubnetworkCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6105,7 +5339,7 @@ func (c *ServicesAddSubnetworkCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "For service producers, provisions a new subnet in a peered service's shared\nVPC network in the requested region and with the requested size that's\nexpressed as a CIDR range (number of leading bits of ipV4 network mask).\nThe method checks against the assigned allocated ranges to find a\nnon-conflicting IP address range. The method will reuse a subnet if\nsubsequent calls contain the same subnet name, region, and prefix length.\nThis method will make producer's tenant project to be a shared VPC service\nproject as needed.", + // "description": "For service producers, provisions a new subnet in a peered service's shared VPC network in the requested region and with the requested size that's expressed as a CIDR range (number of leading bits of ipV4 network mask). The method checks against the assigned allocated ranges to find a non-conflicting IP address range. The method will reuse a subnet if subsequent calls contain the same subnet name, region, and prefix length. This method will make producer's tenant project to be a shared VPC service project as needed.", // "flatPath": "v1/services/{servicesId}/{servicesId1}/{servicesId2}:addSubnetwork", // "httpMethod": "POST", // "id": "servicenetworking.services.addSubnetwork", @@ -6114,7 +5348,7 @@ func (c *ServicesAddSubnetworkCall) Do(opts ...googleapi.CallOption) (*Operation // ], // "parameters": { // "parent": { - // "description": "Required. A tenant project in the service producer organization, in the\nfollowing format: services/{service}/{collection-id}/{resource-id}.\n{collection-id} is the cloud resource collection type that represents the\ntenant project. Only `projects` are supported.\n{resource-id} is the tenant project numeric id, such as\n`123456`. {service} the name of the peering service, such as\n`service-peering.example.com`. This service must already be\nenabled in the service consumer's project.", + // "description": "Required. A tenant project in the service producer organization, in the following format: services/{service}/{collection-id}/{resource-id}. {collection-id} is the cloud resource collection type that represents the tenant project. Only `projects` are supported. {resource-id} is the tenant project numeric id, such as `123456`. {service} the name of the peering service, such as `service-peering.example.com`. This service must already be enabled in the service consumer's project.", // "location": "path", // "pattern": "^services/[^/]+/[^/]+/[^/]+$", // "required": true, @@ -6183,7 +5417,7 @@ func (c *ServicesDisableVpcServiceControlsCall) Header() http.Header { func (c *ServicesDisableVpcServiceControlsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6256,7 +5490,7 @@ func (c *ServicesDisableVpcServiceControlsCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "parent": { - // "description": "The service that is managing peering connectivity for a service producer's\norganization. For Google services that support this functionality, this\nvalue is `services/servicenetworking.googleapis.com`.", + // "description": "The service that is managing peering connectivity for a service producer's organization. For Google services that support this functionality, this value is `services/servicenetworking.googleapis.com`.", // "location": "path", // "pattern": "^services/[^/]+$", // "required": true, @@ -6325,7 +5559,7 @@ func (c *ServicesEnableVpcServiceControlsCall) Header() http.Header { func (c *ServicesEnableVpcServiceControlsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6398,7 +5632,7 @@ func (c *ServicesEnableVpcServiceControlsCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "parent": { - // "description": "The service that is managing peering connectivity for a service producer's\norganization. For Google services that support this functionality, this\nvalue is `services/servicenetworking.googleapis.com`.", + // "description": "The service that is managing peering connectivity for a service producer's organization. For Google services that support this functionality, this value is `services/servicenetworking.googleapis.com`.", // "location": "path", // "pattern": "^services/[^/]+$", // "required": true, @@ -6432,16 +5666,11 @@ type ServicesSearchRangeCall struct { } // SearchRange: Service producers can use this method to find a -// currently unused range -// within consumer allocated ranges. This returned range is not -// reserved, -// and not guaranteed to remain unused. It will validate previously -// provided -// allocated ranges, find non-conflicting sub-range of requested -// size -// (expressed in number of leading bits of ipv4 network mask, as in CIDR -// range -// notation). +// currently unused range within consumer allocated ranges. This +// returned range is not reserved, and not guaranteed to remain unused. +// It will validate previously provided allocated ranges, find +// non-conflicting sub-range of requested size (expressed in number of +// leading bits of ipv4 network mask, as in CIDR range notation). func (r *ServicesService) SearchRange(parent string, searchrangerequest *SearchRangeRequest) *ServicesSearchRangeCall { c := &ServicesSearchRangeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -6476,7 +5705,7 @@ func (c *ServicesSearchRangeCall) Header() http.Header { func (c *ServicesSearchRangeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6540,7 +5769,7 @@ func (c *ServicesSearchRangeCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Service producers can use this method to find a currently unused range\nwithin consumer allocated ranges. This returned range is not reserved,\nand not guaranteed to remain unused. It will validate previously provided\nallocated ranges, find non-conflicting sub-range of requested size\n(expressed in number of leading bits of ipv4 network mask, as in CIDR range\nnotation).", + // "description": "Service producers can use this method to find a currently unused range within consumer allocated ranges. This returned range is not reserved, and not guaranteed to remain unused. It will validate previously provided allocated ranges, find non-conflicting sub-range of requested size (expressed in number of leading bits of ipv4 network mask, as in CIDR range notation).", // "flatPath": "v1/services/{servicesId}:searchRange", // "httpMethod": "POST", // "id": "servicenetworking.services.searchRange", @@ -6549,7 +5778,7 @@ func (c *ServicesSearchRangeCall) Do(opts ...googleapi.CallOption) (*Operation, // ], // "parameters": { // "parent": { - // "description": "Required. This is in a form services/{service}. {service} the name of the private\naccess management service, for example 'service-peering.example.com'.", + // "description": "Required. This is in a form services/{service}. {service} the name of the private access management service, for example 'service-peering.example.com'.", // "location": "path", // "pattern": "^services/[^/]+$", // "required": true, @@ -6583,13 +5812,10 @@ type ServicesValidateCall struct { } // Validate: Service producers use this method to validate if the -// consumer provided -// network, project and requested range are valid. This allows them to -// use -// a fail-fast mechanism for consumer requests, and not have to wait -// for -// AddSubnetwork operation completion to determine if user request is -// invalid. +// consumer provided network, project and requested range are valid. +// This allows them to use a fail-fast mechanism for consumer requests, +// and not have to wait for AddSubnetwork operation completion to +// determine if user request is invalid. func (r *ServicesService) Validate(parent string, validateconsumerconfigrequest *ValidateConsumerConfigRequest) *ServicesValidateCall { c := &ServicesValidateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -6624,7 +5850,7 @@ func (c *ServicesValidateCall) Header() http.Header { func (c *ServicesValidateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6688,7 +5914,7 @@ func (c *ServicesValidateCall) Do(opts ...googleapi.CallOption) (*ValidateConsum } return ret, nil // { - // "description": "Service producers use this method to validate if the consumer provided\nnetwork, project and requested range are valid. This allows them to use\na fail-fast mechanism for consumer requests, and not have to wait for\nAddSubnetwork operation completion to determine if user request is invalid.", + // "description": "Service producers use this method to validate if the consumer provided network, project and requested range are valid. This allows them to use a fail-fast mechanism for consumer requests, and not have to wait for AddSubnetwork operation completion to determine if user request is invalid.", // "flatPath": "v1/services/{servicesId}:validate", // "httpMethod": "POST", // "id": "servicenetworking.services.validate", @@ -6697,7 +5923,7 @@ func (c *ServicesValidateCall) Do(opts ...googleapi.CallOption) (*ValidateConsum // ], // "parameters": { // "parent": { - // "description": "Required. This is in a form services/{service} where {service} is the name of the\nprivate access management service. For example\n'service-peering.example.com'.", + // "description": "Required. This is in a form services/{service} where {service} is the name of the private access management service. For example 'service-peering.example.com'.", // "location": "path", // "pattern": "^services/[^/]+$", // "required": true, @@ -6731,18 +5957,13 @@ type ServicesConnectionsCreateCall struct { } // Create: Creates a private connection that establishes a VPC Network -// Peering -// connection to a VPC network in the service producer's -// organization. -// The administrator of the service consumer's VPC network invokes -// this -// method. The administrator must assign one or more allocated IP ranges -// for -// provisioning subnetworks in the service producer's VPC network. -// This -// connection is used for all supported services in the service -// producer's -// organization, so it only needs to be invoked once. +// Peering connection to a VPC network in the service producer's +// organization. The administrator of the service consumer's VPC network +// invokes this method. The administrator must assign one or more +// allocated IP ranges for provisioning subnetworks in the service +// producer's VPC network. This connection is used for all supported +// services in the service producer's organization, so it only needs to +// be invoked once. func (r *ServicesConnectionsService) Create(parent string, connection *Connection) *ServicesConnectionsCreateCall { c := &ServicesConnectionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -6777,7 +5998,7 @@ func (c *ServicesConnectionsCreateCall) Header() http.Header { func (c *ServicesConnectionsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6841,7 +6062,7 @@ func (c *ServicesConnectionsCreateCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Creates a private connection that establishes a VPC Network Peering\nconnection to a VPC network in the service producer's organization.\nThe administrator of the service consumer's VPC network invokes this\nmethod. The administrator must assign one or more allocated IP ranges for\nprovisioning subnetworks in the service producer's VPC network. This\nconnection is used for all supported services in the service producer's\norganization, so it only needs to be invoked once.", + // "description": "Creates a private connection that establishes a VPC Network Peering connection to a VPC network in the service producer's organization. The administrator of the service consumer's VPC network invokes this method. The administrator must assign one or more allocated IP ranges for provisioning subnetworks in the service producer's VPC network. This connection is used for all supported services in the service producer's organization, so it only needs to be invoked once.", // "flatPath": "v1/services/{servicesId}/connections", // "httpMethod": "POST", // "id": "servicenetworking.services.connections.create", @@ -6850,7 +6071,7 @@ func (c *ServicesConnectionsCreateCall) Do(opts ...googleapi.CallOption) (*Opera // ], // "parameters": { // "parent": { - // "description": "The service that is managing peering connectivity for a service producer's\norganization. For Google services that support this functionality, this\nvalue is `services/servicenetworking.googleapis.com`.", + // "description": "The service that is managing peering connectivity for a service producer's organization. For Google services that support this functionality, this value is `services/servicenetworking.googleapis.com`.", // "location": "path", // "pattern": "^services/[^/]+$", // "required": true, @@ -6884,8 +6105,7 @@ type ServicesConnectionsListCall struct { } // List: List the private connections that are configured in a service -// consumer's -// VPC network. +// consumer's VPC network. func (r *ServicesConnectionsService) List(parent string) *ServicesConnectionsListCall { c := &ServicesConnectionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -6893,17 +6113,12 @@ func (r *ServicesConnectionsService) List(parent string) *ServicesConnectionsLis } // Network sets the optional parameter "network": The name of service -// consumer's VPC network that's connected with service -// producer network through a private connection. The network name must -// be in -// the following format: -// `projects/{project}/global/networks/{network}`. {project} is -// a -// project number, such as in `12345` that includes the VPC -// service -// consumer's VPC network. {network} is the name of the service -// consumer's VPC -// network. +// consumer's VPC network that's connected with service producer network +// through a private connection. The network name must be in the +// following format: `projects/{project}/global/networks/{network}`. +// {project} is a project number, such as in `12345` that includes the +// VPC service consumer's VPC network. {network} is the name of the +// service consumer's VPC network. func (c *ServicesConnectionsListCall) Network(network string) *ServicesConnectionsListCall { c.urlParams_.Set("network", network) return c @@ -6946,7 +6161,7 @@ func (c *ServicesConnectionsListCall) Header() http.Header { func (c *ServicesConnectionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7008,7 +6223,7 @@ func (c *ServicesConnectionsListCall) Do(opts ...googleapi.CallOption) (*ListCon } return ret, nil // { - // "description": "List the private connections that are configured in a service consumer's\nVPC network.", + // "description": "List the private connections that are configured in a service consumer's VPC network.", // "flatPath": "v1/services/{servicesId}/connections", // "httpMethod": "GET", // "id": "servicenetworking.services.connections.list", @@ -7017,12 +6232,12 @@ func (c *ServicesConnectionsListCall) Do(opts ...googleapi.CallOption) (*ListCon // ], // "parameters": { // "network": { - // "description": "The name of service consumer's VPC network that's connected with service\nproducer network through a private connection. The network name must be in\nthe following format:\n`projects/{project}/global/networks/{network}`. {project} is a\nproject number, such as in `12345` that includes the VPC service\nconsumer's VPC network. {network} is the name of the service consumer's VPC\nnetwork.", + // "description": "The name of service consumer's VPC network that's connected with service producer network through a private connection. The network name must be in the following format: `projects/{project}/global/networks/{network}`. {project} is a project number, such as in `12345` that includes the VPC service consumer's VPC network. {network} is the name of the service consumer's VPC network.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "The service that is managing peering connectivity for a service producer's\norganization. For Google services that support this functionality, this\nvalue is `services/servicenetworking.googleapis.com`.\nIf you specify `services/-` as the parameter value, all configured peering\nservices are listed.", + // "description": "The service that is managing peering connectivity for a service producer's organization. For Google services that support this functionality, this value is `services/servicenetworking.googleapis.com`. If you specify `services/-` as the parameter value, all configured peering services are listed.", // "location": "path", // "pattern": "^services/[^/]+$", // "required": true, @@ -7062,16 +6277,15 @@ func (r *ServicesConnectionsService) Patch(name string, connection *Connection) } // Force sets the optional parameter "force": If a previously defined -// allocated range is removed, force flag must be -// set to true. +// allocated range is removed, force flag must be set to true. func (c *ServicesConnectionsPatchCall) Force(force bool) *ServicesConnectionsPatchCall { c.urlParams_.Set("force", fmt.Sprint(force)) return c } // UpdateMask sets the optional parameter "updateMask": The update mask. -// If this is omitted, it defaults to "*". You can only -// update the listed peering ranges. +// If this is omitted, it defaults to "*". You can only update the +// listed peering ranges. func (c *ServicesConnectionsPatchCall) UpdateMask(updateMask string) *ServicesConnectionsPatchCall { c.urlParams_.Set("updateMask", updateMask) return c @@ -7104,7 +6318,7 @@ func (c *ServicesConnectionsPatchCall) Header() http.Header { func (c *ServicesConnectionsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7177,19 +6391,19 @@ func (c *ServicesConnectionsPatchCall) Do(opts ...googleapi.CallOption) (*Operat // ], // "parameters": { // "force": { - // "description": "If a previously defined allocated range is removed, force flag must be\nset to true.", + // "description": "If a previously defined allocated range is removed, force flag must be set to true.", // "location": "query", // "type": "boolean" // }, // "name": { - // "description": "The private service connection that connects to a service producer\norganization. The name includes both the private service name and the VPC\nnetwork peering name in the format of\n`services/{peering_service_name}/connections/{vpc_peering_name}`. For\nGoogle services that support this functionality, this is\n`services/servicenetworking.googleapis.com/connections/servicenetworking-googleapis-com`.", + // "description": "The private service connection that connects to a service producer organization. The name includes both the private service name and the VPC network peering name in the format of `services/{peering_service_name}/connections/{vpc_peering_name}`. For Google services that support this functionality, this is `services/servicenetworking.googleapis.com/connections/servicenetworking-googleapis-com`.", // "location": "path", // "pattern": "^services/[^/]+/connections/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { - // "description": "The update mask. If this is omitted, it defaults to \"*\". You can only\nupdate the listed peering ranges.", + // "description": "The update mask. If this is omitted, it defaults to \"*\". You can only update the listed peering ranges.", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -7210,6 +6424,1137 @@ func (c *ServicesConnectionsPatchCall) Do(opts ...googleapi.CallOption) (*Operat } +// method id "servicenetworking.services.dnsRecordSets.add": + +type ServicesDnsRecordSetsAddCall struct { + s *APIService + parent string + adddnsrecordsetrequest *AddDnsRecordSetRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Add: Service producers can use this method to add DNS record sets to +// private DNS zones in the shared producer host project. +func (r *ServicesDnsRecordSetsService) Add(parent string, adddnsrecordsetrequest *AddDnsRecordSetRequest) *ServicesDnsRecordSetsAddCall { + c := &ServicesDnsRecordSetsAddCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.adddnsrecordsetrequest = adddnsrecordsetrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ServicesDnsRecordSetsAddCall) Fields(s ...googleapi.Field) *ServicesDnsRecordSetsAddCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ServicesDnsRecordSetsAddCall) Context(ctx context.Context) *ServicesDnsRecordSetsAddCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ServicesDnsRecordSetsAddCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ServicesDnsRecordSetsAddCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.adddnsrecordsetrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/dnsRecordSets:add") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "servicenetworking.services.dnsRecordSets.add" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ServicesDnsRecordSetsAddCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Service producers can use this method to add DNS record sets to private DNS zones in the shared producer host project.", + // "flatPath": "v1/services/{servicesId}/dnsRecordSets:add", + // "httpMethod": "POST", + // "id": "servicenetworking.services.dnsRecordSets.add", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The service that is managing peering connectivity for a service producer's organization. For Google services that support this functionality, this value is `services/servicenetworking.googleapis.com`.", + // "location": "path", + // "pattern": "^services/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/dnsRecordSets:add", + // "request": { + // "$ref": "AddDnsRecordSetRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/service.management" + // ] + // } + +} + +// method id "servicenetworking.services.dnsRecordSets.remove": + +type ServicesDnsRecordSetsRemoveCall struct { + s *APIService + parent string + removednsrecordsetrequest *RemoveDnsRecordSetRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Remove: Service producers can use this method to remove DNS record +// sets from private DNS zones in the shared producer host project. +func (r *ServicesDnsRecordSetsService) Remove(parent string, removednsrecordsetrequest *RemoveDnsRecordSetRequest) *ServicesDnsRecordSetsRemoveCall { + c := &ServicesDnsRecordSetsRemoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.removednsrecordsetrequest = removednsrecordsetrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ServicesDnsRecordSetsRemoveCall) Fields(s ...googleapi.Field) *ServicesDnsRecordSetsRemoveCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ServicesDnsRecordSetsRemoveCall) Context(ctx context.Context) *ServicesDnsRecordSetsRemoveCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ServicesDnsRecordSetsRemoveCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ServicesDnsRecordSetsRemoveCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.removednsrecordsetrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/dnsRecordSets:remove") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "servicenetworking.services.dnsRecordSets.remove" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ServicesDnsRecordSetsRemoveCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Service producers can use this method to remove DNS record sets from private DNS zones in the shared producer host project.", + // "flatPath": "v1/services/{servicesId}/dnsRecordSets:remove", + // "httpMethod": "POST", + // "id": "servicenetworking.services.dnsRecordSets.remove", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The service that is managing peering connectivity for a service producer's organization. For Google services that support this functionality, this value is `services/servicenetworking.googleapis.com`.", + // "location": "path", + // "pattern": "^services/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/dnsRecordSets:remove", + // "request": { + // "$ref": "RemoveDnsRecordSetRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/service.management" + // ] + // } + +} + +// method id "servicenetworking.services.dnsRecordSets.update": + +type ServicesDnsRecordSetsUpdateCall struct { + s *APIService + parent string + updatednsrecordsetrequest *UpdateDnsRecordSetRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Service producers can use this method to update DNS record +// sets from private DNS zones in the shared producer host project. +func (r *ServicesDnsRecordSetsService) Update(parent string, updatednsrecordsetrequest *UpdateDnsRecordSetRequest) *ServicesDnsRecordSetsUpdateCall { + c := &ServicesDnsRecordSetsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.updatednsrecordsetrequest = updatednsrecordsetrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ServicesDnsRecordSetsUpdateCall) Fields(s ...googleapi.Field) *ServicesDnsRecordSetsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ServicesDnsRecordSetsUpdateCall) Context(ctx context.Context) *ServicesDnsRecordSetsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ServicesDnsRecordSetsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ServicesDnsRecordSetsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.updatednsrecordsetrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/dnsRecordSets:update") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "servicenetworking.services.dnsRecordSets.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ServicesDnsRecordSetsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Service producers can use this method to update DNS record sets from private DNS zones in the shared producer host project.", + // "flatPath": "v1/services/{servicesId}/dnsRecordSets:update", + // "httpMethod": "POST", + // "id": "servicenetworking.services.dnsRecordSets.update", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The service that is managing peering connectivity for a service producer's organization. For Google services that support this functionality, this value is `services/servicenetworking.googleapis.com`.", + // "location": "path", + // "pattern": "^services/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/dnsRecordSets:update", + // "request": { + // "$ref": "UpdateDnsRecordSetRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/service.management" + // ] + // } + +} + +// method id "servicenetworking.services.dnsZones.add": + +type ServicesDnsZonesAddCall struct { + s *APIService + parent string + adddnszonerequest *AddDnsZoneRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Add: Service producers can use this method to add private DNS zones +// in the shared producer host project and matching peering zones in the +// consumer project. +func (r *ServicesDnsZonesService) Add(parent string, adddnszonerequest *AddDnsZoneRequest) *ServicesDnsZonesAddCall { + c := &ServicesDnsZonesAddCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.adddnszonerequest = adddnszonerequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ServicesDnsZonesAddCall) Fields(s ...googleapi.Field) *ServicesDnsZonesAddCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ServicesDnsZonesAddCall) Context(ctx context.Context) *ServicesDnsZonesAddCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ServicesDnsZonesAddCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ServicesDnsZonesAddCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.adddnszonerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/dnsZones:add") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "servicenetworking.services.dnsZones.add" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ServicesDnsZonesAddCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Service producers can use this method to add private DNS zones in the shared producer host project and matching peering zones in the consumer project.", + // "flatPath": "v1/services/{servicesId}/dnsZones:add", + // "httpMethod": "POST", + // "id": "servicenetworking.services.dnsZones.add", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The service that is managing peering connectivity for a service producer's organization. For Google services that support this functionality, this value is `services/servicenetworking.googleapis.com`.", + // "location": "path", + // "pattern": "^services/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/dnsZones:add", + // "request": { + // "$ref": "AddDnsZoneRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/service.management" + // ] + // } + +} + +// method id "servicenetworking.services.dnsZones.remove": + +type ServicesDnsZonesRemoveCall struct { + s *APIService + parent string + removednszonerequest *RemoveDnsZoneRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Remove: Service producers can use this method to remove private DNS +// zones in the shared producer host project and matching peering zones +// in the consumer project. +func (r *ServicesDnsZonesService) Remove(parent string, removednszonerequest *RemoveDnsZoneRequest) *ServicesDnsZonesRemoveCall { + c := &ServicesDnsZonesRemoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.removednszonerequest = removednszonerequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ServicesDnsZonesRemoveCall) Fields(s ...googleapi.Field) *ServicesDnsZonesRemoveCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ServicesDnsZonesRemoveCall) Context(ctx context.Context) *ServicesDnsZonesRemoveCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ServicesDnsZonesRemoveCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ServicesDnsZonesRemoveCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.removednszonerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/dnsZones:remove") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "servicenetworking.services.dnsZones.remove" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ServicesDnsZonesRemoveCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Service producers can use this method to remove private DNS zones in the shared producer host project and matching peering zones in the consumer project.", + // "flatPath": "v1/services/{servicesId}/dnsZones:remove", + // "httpMethod": "POST", + // "id": "servicenetworking.services.dnsZones.remove", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The service that is managing peering connectivity for a service producer's organization. For Google services that support this functionality, this value is `services/servicenetworking.googleapis.com`.", + // "location": "path", + // "pattern": "^services/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/dnsZones:remove", + // "request": { + // "$ref": "RemoveDnsZoneRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/service.management" + // ] + // } + +} + +// method id "servicenetworking.services.projects.global.networks.peeredDnsDomains.create": + +type ServicesProjectsGlobalNetworksPeeredDnsDomainsCreateCall struct { + s *APIService + parent string + peereddnsdomain *PeeredDnsDomain + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a peered DNS domain which sends requests for records +// in given namespace originating in the service producer VPC network to +// the consumer VPC network to be resolved. +func (r *ServicesProjectsGlobalNetworksPeeredDnsDomainsService) Create(parent string, peereddnsdomain *PeeredDnsDomain) *ServicesProjectsGlobalNetworksPeeredDnsDomainsCreateCall { + c := &ServicesProjectsGlobalNetworksPeeredDnsDomainsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.peereddnsdomain = peereddnsdomain + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ServicesProjectsGlobalNetworksPeeredDnsDomainsCreateCall) Fields(s ...googleapi.Field) *ServicesProjectsGlobalNetworksPeeredDnsDomainsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ServicesProjectsGlobalNetworksPeeredDnsDomainsCreateCall) Context(ctx context.Context) *ServicesProjectsGlobalNetworksPeeredDnsDomainsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ServicesProjectsGlobalNetworksPeeredDnsDomainsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ServicesProjectsGlobalNetworksPeeredDnsDomainsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.peereddnsdomain) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/peeredDnsDomains") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "servicenetworking.services.projects.global.networks.peeredDnsDomains.create" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ServicesProjectsGlobalNetworksPeeredDnsDomainsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a peered DNS domain which sends requests for records in given namespace originating in the service producer VPC network to the consumer VPC network to be resolved.", + // "flatPath": "v1/services/{servicesId}/projects/{projectsId}/global/networks/{networksId}/peeredDnsDomains", + // "httpMethod": "POST", + // "id": "servicenetworking.services.projects.global.networks.peeredDnsDomains.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. Parent resource identifying the connection for which the peered DNS domain will be created in the format: `services/{service}/projects/{project}/global/networks/{network}` {service} is the peering service that is managing connectivity for the service producer's organization. For Google services that support this functionality, this value is `servicenetworking.googleapis.com`. {project} is the number of the project that contains the service consumer's VPC network e.g. `12345`. {network} is the name of the service consumer's VPC network.", + // "location": "path", + // "pattern": "^services/[^/]+/projects/[^/]+/global/networks/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/peeredDnsDomains", + // "request": { + // "$ref": "PeeredDnsDomain" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/service.management" + // ] + // } + +} + +// method id "servicenetworking.services.projects.global.networks.peeredDnsDomains.delete": + +type ServicesProjectsGlobalNetworksPeeredDnsDomainsDeleteCall struct { + s *APIService + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a peered DNS domain. +func (r *ServicesProjectsGlobalNetworksPeeredDnsDomainsService) Delete(name string) *ServicesProjectsGlobalNetworksPeeredDnsDomainsDeleteCall { + c := &ServicesProjectsGlobalNetworksPeeredDnsDomainsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ServicesProjectsGlobalNetworksPeeredDnsDomainsDeleteCall) Fields(s ...googleapi.Field) *ServicesProjectsGlobalNetworksPeeredDnsDomainsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ServicesProjectsGlobalNetworksPeeredDnsDomainsDeleteCall) Context(ctx context.Context) *ServicesProjectsGlobalNetworksPeeredDnsDomainsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ServicesProjectsGlobalNetworksPeeredDnsDomainsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ServicesProjectsGlobalNetworksPeeredDnsDomainsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "servicenetworking.services.projects.global.networks.peeredDnsDomains.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ServicesProjectsGlobalNetworksPeeredDnsDomainsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a peered DNS domain.", + // "flatPath": "v1/services/{servicesId}/projects/{projectsId}/global/networks/{networksId}/peeredDnsDomains/{peeredDnsDomainsId}", + // "httpMethod": "DELETE", + // "id": "servicenetworking.services.projects.global.networks.peeredDnsDomains.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The name of the peered DNS domain to delete in the format: `services/{service}/projects/{project}/global/networks/{network}/peeredDnsDomains/{name}`. {service} is the peering service that is managing connectivity for the service producer's organization. For Google services that support this functionality, this value is `servicenetworking.googleapis.com`. {project} is the number of the project that contains the service consumer's VPC network e.g. `12345`. {network} is the name of the service consumer's VPC network. {name} is the name of the peered DNS domain.", + // "location": "path", + // "pattern": "^services/[^/]+/projects/[^/]+/global/networks/[^/]+/peeredDnsDomains/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/service.management" + // ] + // } + +} + +// method id "servicenetworking.services.projects.global.networks.peeredDnsDomains.list": + +type ServicesProjectsGlobalNetworksPeeredDnsDomainsListCall struct { + s *APIService + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists peered DNS domains for a connection. +func (r *ServicesProjectsGlobalNetworksPeeredDnsDomainsService) List(parent string) *ServicesProjectsGlobalNetworksPeeredDnsDomainsListCall { + c := &ServicesProjectsGlobalNetworksPeeredDnsDomainsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ServicesProjectsGlobalNetworksPeeredDnsDomainsListCall) Fields(s ...googleapi.Field) *ServicesProjectsGlobalNetworksPeeredDnsDomainsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ServicesProjectsGlobalNetworksPeeredDnsDomainsListCall) IfNoneMatch(entityTag string) *ServicesProjectsGlobalNetworksPeeredDnsDomainsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ServicesProjectsGlobalNetworksPeeredDnsDomainsListCall) Context(ctx context.Context) *ServicesProjectsGlobalNetworksPeeredDnsDomainsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ServicesProjectsGlobalNetworksPeeredDnsDomainsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ServicesProjectsGlobalNetworksPeeredDnsDomainsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/peeredDnsDomains") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "servicenetworking.services.projects.global.networks.peeredDnsDomains.list" call. +// Exactly one of *ListPeeredDnsDomainsResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ListPeeredDnsDomainsResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ServicesProjectsGlobalNetworksPeeredDnsDomainsListCall) Do(opts ...googleapi.CallOption) (*ListPeeredDnsDomainsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListPeeredDnsDomainsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists peered DNS domains for a connection.", + // "flatPath": "v1/services/{servicesId}/projects/{projectsId}/global/networks/{networksId}/peeredDnsDomains", + // "httpMethod": "GET", + // "id": "servicenetworking.services.projects.global.networks.peeredDnsDomains.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. Parent resource identifying the connection which owns this collection of peered DNS domains in the format: `services/{service}/projects/{project}/global/networks/{network}`. {service} is the peering service that is managing connectivity for the service producer's organization. For Google services that support this functionality, this value is `servicenetworking.googleapis.com`. {project} is a project number e.g. `12345` that contains the service consumer's VPC network. {network} is the name of the service consumer's VPC network.", + // "location": "path", + // "pattern": "^services/[^/]+/projects/[^/]+/global/networks/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/peeredDnsDomains", + // "response": { + // "$ref": "ListPeeredDnsDomainsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/service.management" + // ] + // } + +} + // method id "servicenetworking.services.roles.add": type ServicesRolesAddCall struct { @@ -7222,12 +7567,9 @@ type ServicesRolesAddCall struct { } // Add: Service producers can use this method to add roles in the shared -// VPC host -// project. Each role is bound to the provided member. Each role must -// be -// selected from within a whitelisted set of roles. Each role is applied -// at -// only the granularity specified in the whitelist. +// VPC host project. Each role is bound to the provided member. Each +// role must be selected from within a whitelisted set of roles. Each +// role is applied at only the granularity specified in the whitelist. func (r *ServicesRolesService) Add(parent string, addrolesrequest *AddRolesRequest) *ServicesRolesAddCall { c := &ServicesRolesAddCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -7262,7 +7604,7 @@ func (c *ServicesRolesAddCall) Header() http.Header { func (c *ServicesRolesAddCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7326,7 +7668,7 @@ func (c *ServicesRolesAddCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Service producers can use this method to add roles in the shared VPC host\nproject. Each role is bound to the provided member. Each role must be\nselected from within a whitelisted set of roles. Each role is applied at\nonly the granularity specified in the whitelist.", + // "description": "Service producers can use this method to add roles in the shared VPC host project. Each role is bound to the provided member. Each role must be selected from within a whitelisted set of roles. Each role is applied at only the granularity specified in the whitelist.", // "flatPath": "v1/services/{servicesId}/roles:add", // "httpMethod": "POST", // "id": "servicenetworking.services.roles.add", @@ -7335,7 +7677,7 @@ func (c *ServicesRolesAddCall) Do(opts ...googleapi.CallOption) (*Operation, err // ], // "parameters": { // "parent": { - // "description": "Required. This is in a form services/{service} where {service} is the name of the\nprivate access management service. For example\n'service-peering.example.com'.", + // "description": "Required. This is in a form services/{service} where {service} is the name of the private access management service. For example 'service-peering.example.com'.", // "location": "path", // "pattern": "^services/[^/]+$", // "required": true, diff --git a/vendor/google.golang.org/api/serviceusage/v1/serviceusage-api.json b/vendor/google.golang.org/api/serviceusage/v1/serviceusage-api.json index 22621e0a8bd..5fe587001a0 100644 --- a/vendor/google.golang.org/api/serviceusage/v1/serviceusage-api.json +++ b/vendor/google.golang.org/api/serviceusage/v1/serviceusage-api.json @@ -114,7 +114,7 @@ "operations": { "methods": { "cancel": { - "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", "flatPath": "v1/operations/{operationsId}:cancel", "httpMethod": "POST", "id": "serviceusage.operations.cancel", @@ -143,7 +143,7 @@ ] }, "delete": { - "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.", + "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", "flatPath": "v1/operations/{operationsId}", "httpMethod": "DELETE", "id": "serviceusage.operations.delete", @@ -169,7 +169,7 @@ ] }, "get": { - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", "flatPath": "v1/operations/{operationsId}", "httpMethod": "GET", "id": "serviceusage.operations.get", @@ -195,7 +195,7 @@ ] }, "list": { - "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", "flatPath": "v1/operations", "httpMethod": "GET", "id": "serviceusage.operations.list", @@ -237,7 +237,7 @@ "services": { "methods": { "batchEnable": { - "description": "Enable multiple services on a project. The operation is atomic: if enabling\nany service fails, then the entire batch fails, and no state changes occur.\nTo enable a single service, use the `EnableService` method instead.", + "description": "Enable multiple services on a project. The operation is atomic: if enabling any service fails, then the entire batch fails, and no state changes occur. To enable a single service, use the `EnableService` method instead.", "flatPath": "v1/{v1Id}/{v1Id1}/services:batchEnable", "httpMethod": "POST", "id": "serviceusage.services.batchEnable", @@ -246,7 +246,7 @@ ], "parameters": { "parent": { - "description": "Parent to enable services on.\n\nAn example name would be:\n`projects/123` where `123` is the project number.\n\nThe `BatchEnableServices` method currently only supports projects.", + "description": "Parent to enable services on. An example name would be: `projects/123` where `123` is the project number. The `BatchEnableServices` method currently only supports projects.", "location": "path", "pattern": "^[^/]+/[^/]+$", "required": true, @@ -266,7 +266,7 @@ ] }, "batchGet": { - "description": "Returns the service configurations and enabled states for a given list of\nservices.", + "description": "Returns the service configurations and enabled states for a given list of services.", "flatPath": "v1/{v1Id}/{v1Id1}/services:batchGet", "httpMethod": "GET", "id": "serviceusage.services.batchGet", @@ -275,13 +275,13 @@ ], "parameters": { "names": { - "description": "Names of the services to retrieve.\n\nAn example name would be:\n`projects/123/services/serviceusage.googleapis.com` where `123` is the\nproject number.\nA single request can get a maximum of 30 services at a time.", + "description": "Names of the services to retrieve. An example name would be: `projects/123/services/serviceusage.googleapis.com` where `123` is the project number. A single request can get a maximum of 30 services at a time.", "location": "query", "repeated": true, "type": "string" }, "parent": { - "description": "Parent to retrieve services from.\nIf this is set, the parent of all of the services specified in `names` must\nmatch this field. An example name would be: `projects/123` where `123` is\nthe project number. The `BatchGetServices` method currently only supports\nprojects.", + "description": "Parent to retrieve services from. If this is set, the parent of all of the services specified in `names` must match this field. An example name would be: `projects/123` where `123` is the project number. The `BatchGetServices` method currently only supports projects.", "location": "path", "pattern": "^[^/]+/[^/]+$", "required": true, @@ -298,7 +298,7 @@ ] }, "disable": { - "description": "Disable a service so that it can no longer be used with a project.\nThis prevents unintended usage that may cause unexpected billing\ncharges or security leaks.\n\nIt is not valid to call the disable method on a service that is not\ncurrently enabled. Callers will receive a `FAILED_PRECONDITION` status if\nthe target service is not currently enabled.", + "description": "Disable a service so that it can no longer be used with a project. This prevents unintended usage that may cause unexpected billing charges or security leaks. It is not valid to call the disable method on a service that is not currently enabled. Callers will receive a `FAILED_PRECONDITION` status if the target service is not currently enabled.", "flatPath": "v1/{v1Id}/{v1Id1}/services/{servicesId}:disable", "httpMethod": "POST", "id": "serviceusage.services.disable", @@ -307,7 +307,7 @@ ], "parameters": { "name": { - "description": "Name of the consumer and service to disable the service on.\n\nThe enable and disable methods currently only support projects.\n\nAn example name would be:\n`projects/123/services/serviceusage.googleapis.com` where `123` is the\nproject number.", + "description": "Name of the consumer and service to disable the service on. The enable and disable methods currently only support projects. An example name would be: `projects/123/services/serviceusage.googleapis.com` where `123` is the project number.", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+$", "required": true, @@ -336,7 +336,7 @@ ], "parameters": { "name": { - "description": "Name of the consumer and service to enable the service on.\n\nThe `EnableService` and `DisableService` methods currently only support\nprojects.\n\nEnabling a service requires that the service is public or is shared with\nthe user enabling the service.\n\nAn example name would be:\n`projects/123/services/serviceusage.googleapis.com` where `123` is the\nproject number.", + "description": "Name of the consumer and service to enable the service on. The `EnableService` and `DisableService` methods currently only support projects. Enabling a service requires that the service is public or is shared with the user enabling the service. An example name would be: `projects/123/services/serviceusage.googleapis.com` where `123` is the project number.", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+$", "required": true, @@ -365,7 +365,7 @@ ], "parameters": { "name": { - "description": "Name of the consumer and service to get the `ConsumerState` for.\n\nAn example name would be:\n`projects/123/services/serviceusage.googleapis.com` where `123` is the\nproject number.", + "description": "Name of the consumer and service to get the `ConsumerState` for. An example name would be: `projects/123/services/serviceusage.googleapis.com` where `123` is the project number.", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+$", "required": true, @@ -382,7 +382,7 @@ ] }, "list": { - "description": "List all services available to the specified project, and the current\nstate of those services with respect to the project. The list includes\nall public services, all services for which the calling user has the\n`servicemanagement.services.bind` permission, and all services that have\nalready been enabled on the project. The list can be filtered to\nonly include services in a specific state, for example to only include\nservices enabled on the project.", + "description": "List all services available to the specified project, and the current state of those services with respect to the project. The list includes all public services, all services for which the calling user has the `servicemanagement.services.bind` permission, and all services that have already been enabled on the project. The list can be filtered to only include services in a specific state, for example to only include services enabled on the project. WARNING: If you need to query enabled services frequently or across an organization, you should use [Cloud Asset Inventory API](https://cloud.google.com/asset-inventory/docs/apis), which provides higher throughput and richer filtering capability.", "flatPath": "v1/{v1Id}/{v1Id1}/services", "httpMethod": "GET", "id": "serviceusage.services.list", @@ -391,23 +391,23 @@ ], "parameters": { "filter": { - "description": "Only list services that conform to the given filter.\nThe allowed filter strings are `state:ENABLED` and `state:DISABLED`.", + "description": "Only list services that conform to the given filter. The allowed filter strings are `state:ENABLED` and `state:DISABLED`.", "location": "query", "type": "string" }, "pageSize": { - "description": "Requested size of the next page of data.\nRequested page size cannot exceed 200.\n If not set, the default page size is 50.", + "description": "Requested size of the next page of data. Requested page size cannot exceed 200. If not set, the default page size is 50.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Token identifying which result to start with, which is returned by a\nprevious list call.", + "description": "Token identifying which result to start with, which is returned by a previous list call.", "location": "query", "type": "string" }, "parent": { - "description": "Parent to search for services on.\n\nAn example name would be:\n`projects/123` where `123` is the project number.", + "description": "Parent to search for services on. An example name would be: `projects/123` where `123` is the project number.", "location": "path", "pattern": "^[^/]+/[^/]+$", "required": true, @@ -426,11 +426,46 @@ } } }, - "revision": "20200508", + "revision": "20200821", "rootUrl": "https://serviceusage.googleapis.com/", "schemas": { + "AdminQuotaPolicy": { + "description": "Quota policy created by quota administrator.", + "id": "AdminQuotaPolicy", + "properties": { + "container": { + "description": "The cloud resource container at which the quota policy is created. The format is {container_type}/{container_number}", + "type": "string" + }, + "dimensions": { + "additionalProperties": { + "type": "string" + }, + "description": " If this map is nonempty, then this policy applies only to specific values for dimensions defined in the limit unit. For example, an policy on a limit with the unit 1/{project}/{region} could contain an entry with the key \"region\" and the value \"us-east-1\"; the policy is only applied to quota consumed in that region. This map has the following restrictions: * If \"region\" appears as a key, its value must be a valid Cloud region. * If \"zone\" appears as a key, its value must be a valid Cloud zone. * Keys other than \"region\" or \"zone\" are not valid.", + "type": "object" + }, + "metric": { + "description": "The name of the metric to which this policy applies. An example name would be: `compute.googleapis.com/cpus`", + "type": "string" + }, + "name": { + "description": "The resource name of the policy. This name is generated by the server when the policy is created. Example names would be: `organizations/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/adminQuotaPolicies/4a3f2c1d`", + "type": "string" + }, + "policyValue": { + "description": "The quota policy value. Can be any nonnegative integer, or -1 (unlimited quota).", + "format": "int64", + "type": "string" + }, + "unit": { + "description": "The limit unit of the limit to which this policy applies. An example unit would be: `1/{project}/{region}` Note that `{project}` and `{region}` are not placeholders in this example; the literal characters `{` and `}` occur in the string.", + "type": "string" + } + }, + "type": "object" + }, "Api": { - "description": "Api is a light-weight descriptor for an API Interface.\n\nInterfaces are also described as \"protocol buffer services\" in some contexts,\nsuch as by the \"service\" keyword in a .proto file, but they are different\nfrom API Services, which represent a concrete implementation of an interface\nas opposed to simply a description of methods and bindings. They are also\nsometimes simply referred to as \"APIs\" in other contexts, such as the name of\nthis message itself. See https://cloud.google.com/apis/design/glossary for\ndetailed terminology.", + "description": "Api is a light-weight descriptor for an API Interface. Interfaces are also described as \"protocol buffer services\" in some contexts, such as by the \"service\" keyword in a .proto file, but they are different from API Services, which represent a concrete implementation of an interface as opposed to simply a description of methods and bindings. They are also sometimes simply referred to as \"APIs\" in other contexts, such as the name of this message itself. See https://cloud.google.com/apis/design/glossary for detailed terminology.", "id": "Api", "properties": { "methods": { @@ -448,7 +483,7 @@ "type": "array" }, "name": { - "description": "The fully qualified name of this interface, including package name\nfollowed by the interface's simple name.", + "description": "The fully qualified name of this interface, including package name followed by the interface's simple name.", "type": "string" }, "options": { @@ -460,7 +495,7 @@ }, "sourceContext": { "$ref": "SourceContext", - "description": "Source context for the protocol buffer service represented by this\nmessage." + "description": "Source context for the protocol buffer service represented by this message." }, "syntax": { "description": "The source syntax of the service.", @@ -475,38 +510,38 @@ "type": "string" }, "version": { - "description": "A version string for this interface. If specified, must have the form\n`major-version.minor-version`, as in `1.10`. If the minor version is\nomitted, it defaults to zero. If the entire version field is empty, the\nmajor version is derived from the package name, as outlined below. If the\nfield is not empty, the version in the package name will be verified to be\nconsistent with what is provided here.\n\nThe versioning schema uses [semantic\nversioning](http://semver.org) where the major version number\nindicates a breaking change and the minor version an additive,\nnon-breaking change. Both version numbers are signals to users\nwhat to expect from different versions, and should be carefully\nchosen based on the product plan.\n\nThe major version is also reflected in the package name of the\ninterface, which must end in `v\u003cmajor-version\u003e`, as in\n`google.feature.v1`. For major versions 0 and 1, the suffix can\nbe omitted. Zero major versions must only be used for\nexperimental, non-GA interfaces.\n", + "description": "A version string for this interface. If specified, must have the form `major-version.minor-version`, as in `1.10`. If the minor version is omitted, it defaults to zero. If the entire version field is empty, the major version is derived from the package name, as outlined below. If the field is not empty, the version in the package name will be verified to be consistent with what is provided here. The versioning schema uses [semantic versioning](http://semver.org) where the major version number indicates a breaking change and the minor version an additive, non-breaking change. Both version numbers are signals to users what to expect from different versions, and should be carefully chosen based on the product plan. The major version is also reflected in the package name of the interface, which must end in `v`, as in `google.feature.v1`. For major versions 0 and 1, the suffix can be omitted. Zero major versions must only be used for experimental, non-GA interfaces. ", "type": "string" } }, "type": "object" }, "AuthProvider": { - "description": "Configuration for an authentication provider, including support for\n[JSON Web Token\n(JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).", + "description": "Configuration for an authentication provider, including support for [JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).", "id": "AuthProvider", "properties": { "audiences": { - "description": "The list of JWT\n[audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).\nthat are allowed to access. A JWT containing any of these audiences will\nbe accepted. When this setting is absent, JWTs with audiences:\n - \"https://[service.name]/[google.protobuf.Api.name]\"\n - \"https://[service.name]/\"\nwill be accepted.\nFor example, if no audiences are in the setting, LibraryService API will\naccept JWTs with the following audiences:\n -\n https://library-example.googleapis.com/google.example.library.v1.LibraryService\n - https://library-example.googleapis.com/\n\nExample:\n\n audiences: bookstore_android.apps.googleusercontent.com,\n bookstore_web.apps.googleusercontent.com", + "description": "The list of JWT [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3). that are allowed to access. A JWT containing any of these audiences will be accepted. When this setting is absent, JWTs with audiences: - \"https://[service.name]/[google.protobuf.Api.name]\" - \"https://[service.name]/\" will be accepted. For example, if no audiences are in the setting, LibraryService API will accept JWTs with the following audiences: - https://library-example.googleapis.com/google.example.library.v1.LibraryService - https://library-example.googleapis.com/ Example: audiences: bookstore_android.apps.googleusercontent.com, bookstore_web.apps.googleusercontent.com", "type": "string" }, "authorizationUrl": { - "description": "Redirect URL if JWT token is required but not present or is expired.\nImplement authorizationUrl of securityDefinitions in OpenAPI spec.", + "description": "Redirect URL if JWT token is required but not present or is expired. Implement authorizationUrl of securityDefinitions in OpenAPI spec.", "type": "string" }, "id": { - "description": "The unique identifier of the auth provider. It will be referred to by\n`AuthRequirement.provider_id`.\n\nExample: \"bookstore_auth\".", + "description": "The unique identifier of the auth provider. It will be referred to by `AuthRequirement.provider_id`. Example: \"bookstore_auth\".", "type": "string" }, "issuer": { - "description": "Identifies the principal that issued the JWT. See\nhttps://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1\nUsually a URL or an email address.\n\nExample: https://securetoken.google.com\nExample: 1234567-compute@developer.gserviceaccount.com", + "description": "Identifies the principal that issued the JWT. See https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1 Usually a URL or an email address. Example: https://securetoken.google.com Example: 1234567-compute@developer.gserviceaccount.com", "type": "string" }, "jwksUri": { - "description": "URL of the provider's public key set to validate signature of the JWT. See\n[OpenID\nDiscovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata).\nOptional if the key set document:\n - can be retrieved from\n [OpenID\n Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html of\n the issuer.\n - can be inferred from the email domain of the issuer (e.g. a Google\n service account).\n\nExample: https://www.googleapis.com/oauth2/v1/certs", + "description": "URL of the provider's public key set to validate signature of the JWT. See [OpenID Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata). Optional if the key set document: - can be retrieved from [OpenID Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html of the issuer. - can be inferred from the email domain of the issuer (e.g. a Google service account). Example: https://www.googleapis.com/oauth2/v1/certs", "type": "string" }, "jwtLocations": { - "description": "Defines the locations to extract the JWT.\n\nJWT locations can be either from HTTP headers or URL query parameters.\nThe rule is that the first match wins. The checking order is: checking\nall headers first, then URL query parameters.\n\nIf not specified, default to use following 3 locations:\n 1) Authorization: Bearer\n 2) x-goog-iap-jwt-assertion\n 3) access_token query parameter\n\nDefault locations can be specified as followings:\n jwt_locations:\n - header: Authorization\n value_prefix: \"Bearer \"\n - header: x-goog-iap-jwt-assertion\n - query: access_token", + "description": "Defines the locations to extract the JWT. JWT locations can be either from HTTP headers or URL query parameters. The rule is that the first match wins. The checking order is: checking all headers first, then URL query parameters. If not specified, default to use following 3 locations: 1) Authorization: Bearer 2) x-goog-iap-jwt-assertion 3) access_token query parameter Default locations can be specified as followings: jwt_locations: - header: Authorization value_prefix: \"Bearer \" - header: x-goog-iap-jwt-assertion - query: access_token", "items": { "$ref": "JwtLocation" }, @@ -516,22 +551,22 @@ "type": "object" }, "AuthRequirement": { - "description": "User-defined authentication requirements, including support for\n[JSON Web Token\n(JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).", + "description": "User-defined authentication requirements, including support for [JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).", "id": "AuthRequirement", "properties": { "audiences": { - "description": "NOTE: This will be deprecated soon, once AuthProvider.audiences is\nimplemented and accepted in all the runtime components.\n\nThe list of JWT\n[audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).\nthat are allowed to access. A JWT containing any of these audiences will\nbe accepted. When this setting is absent, only JWTs with audience\n\"https://Service_name/API_name\"\nwill be accepted. For example, if no audiences are in the setting,\nLibraryService API will only accept JWTs with the following audience\n\"https://library-example.googleapis.com/google.example.library.v1.LibraryService\".\n\nExample:\n\n audiences: bookstore_android.apps.googleusercontent.com,\n bookstore_web.apps.googleusercontent.com", + "description": "NOTE: This will be deprecated soon, once AuthProvider.audiences is implemented and accepted in all the runtime components. The list of JWT [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3). that are allowed to access. A JWT containing any of these audiences will be accepted. When this setting is absent, only JWTs with audience \"https://Service_name/API_name\" will be accepted. For example, if no audiences are in the setting, LibraryService API will only accept JWTs with the following audience \"https://library-example.googleapis.com/google.example.library.v1.LibraryService\". Example: audiences: bookstore_android.apps.googleusercontent.com, bookstore_web.apps.googleusercontent.com", "type": "string" }, "providerId": { - "description": "id from authentication provider.\n\nExample:\n\n provider_id: bookstore_auth", + "description": "id from authentication provider. Example: provider_id: bookstore_auth", "type": "string" } }, "type": "object" }, "Authentication": { - "description": "`Authentication` defines the authentication configuration for an API.\n\nExample for an API targeted for external use:\n\n name: calendar.googleapis.com\n authentication:\n providers:\n - id: google_calendar_auth\n jwks_uri: https://www.googleapis.com/oauth2/v1/certs\n issuer: https://securetoken.google.com\n rules:\n - selector: \"*\"\n requirements:\n provider_id: google_calendar_auth", + "description": "`Authentication` defines the authentication configuration for an API. Example for an API targeted for external use: name: calendar.googleapis.com authentication: providers: - id: google_calendar_auth jwks_uri: https://www.googleapis.com/oauth2/v1/certs issuer: https://securetoken.google.com rules: - selector: \"*\" requirements: provider_id: google_calendar_auth", "id": "Authentication", "properties": { "providers": { @@ -542,7 +577,7 @@ "type": "array" }, "rules": { - "description": "A list of authentication rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "A list of authentication rules that apply to individual API methods. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "AuthenticationRule" }, @@ -552,7 +587,7 @@ "type": "object" }, "AuthenticationRule": { - "description": "Authentication rules for the service.\n\nBy default, if a method has any authentication requirements, every request\nmust include a valid credential matching one of the requirements.\nIt's an error to include more than one kind of credential in a single\nrequest.\n\nIf a method doesn't have any auth requirements, request credentials will be\nignored.", + "description": "Authentication rules for the service. By default, if a method has any authentication requirements, every request must include a valid credential matching one of the requirements. It's an error to include more than one kind of credential in a single request. If a method doesn't have any auth requirements, request credentials will be ignored.", "id": "AuthenticationRule", "properties": { "allowWithoutCredential": { @@ -571,7 +606,7 @@ "type": "array" }, "selector": { - "description": "Selects the methods to which this rule applies.\n\nRefer to selector for syntax details.", + "description": "Selects the methods to which this rule applies. Refer to selector for syntax details.", "type": "string" } }, @@ -582,7 +617,7 @@ "id": "Backend", "properties": { "rules": { - "description": "A list of API backend rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "A list of API backend rules that apply to individual API methods. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "BackendRule" }, @@ -596,29 +631,29 @@ "id": "BackendRule", "properties": { "address": { - "description": "The address of the API backend.\n\nThe scheme is used to determine the backend protocol and security.\nThe following schemes are accepted:\n\n SCHEME PROTOCOL SECURITY\n http:// HTTP None\n https:// HTTP TLS\n grpc:// gRPC None\n grpcs:// gRPC TLS\n\nIt is recommended to explicitly include a scheme. Leaving out the scheme\nmay cause constrasting behaviors across platforms.\n\nIf the port is unspecified, the default is:\n- 80 for schemes without TLS\n- 443 for schemes with TLS\n\nFor HTTP backends, use protocol\nto specify the protocol version.", + "description": "The address of the API backend. The scheme is used to determine the backend protocol and security. The following schemes are accepted: SCHEME PROTOCOL SECURITY http:// HTTP None https:// HTTP TLS grpc:// gRPC None grpcs:// gRPC TLS It is recommended to explicitly include a scheme. Leaving out the scheme may cause constrasting behaviors across platforms. If the port is unspecified, the default is: - 80 for schemes without TLS - 443 for schemes with TLS For HTTP backends, use protocol to specify the protocol version.", "type": "string" }, "deadline": { - "description": "The number of seconds to wait for a response from a request. The default\nvaries based on the request protocol and deployment environment.", + "description": "The number of seconds to wait for a response from a request. The default varies based on the request protocol and deployment environment.", "format": "double", "type": "number" }, "disableAuth": { - "description": "When disable_auth is true, a JWT ID token won't be generated and the\noriginal \"Authorization\" HTTP header will be preserved. If the header is\nused to carry the original token and is expected by the backend, this\nfield must be set to true to preserve the header.", + "description": "When disable_auth is true, a JWT ID token won't be generated and the original \"Authorization\" HTTP header will be preserved. If the header is used to carry the original token and is expected by the backend, this field must be set to true to preserve the header.", "type": "boolean" }, "jwtAudience": { - "description": "The JWT audience is used when generating a JWT ID token for the backend.\nThis ID token will be added in the HTTP \"authorization\" header, and sent\nto the backend.", + "description": "The JWT audience is used when generating a JWT ID token for the backend. This ID token will be added in the HTTP \"authorization\" header, and sent to the backend.", "type": "string" }, "minDeadline": { - "description": "Minimum deadline in seconds needed for this method. Calls having deadline\nvalue lower than this will be rejected.", + "description": "Minimum deadline in seconds needed for this method. Calls having deadline value lower than this will be rejected.", "format": "double", "type": "number" }, "operationDeadline": { - "description": "The number of seconds to wait for the completion of a long running\noperation. The default is no deadline.", + "description": "The number of seconds to wait for the completion of a long running operation. The default is no deadline.", "format": "double", "type": "number" }, @@ -630,21 +665,17 @@ ], "enumDescriptions": [ "", - "Use the backend address as-is, with no modification to the path. If the\nURL pattern contains variables, the variable names and values will be\nappended to the query string. If a query string parameter and a URL\npattern variable have the same name, this may result in duplicate keys in\nthe query string.\n\n# Examples\n\nGiven the following operation config:\n\n Method path: /api/company/{cid}/user/{uid}\n Backend address: https://example.cloudfunctions.net/getUser\n\nRequests to the following request paths will call the backend at the\ntranslated path:\n\n Request path: /api/company/widgetworks/user/johndoe\n Translated:\n https://example.cloudfunctions.net/getUser?cid=widgetworks\u0026uid=johndoe\n\n Request path: /api/company/widgetworks/user/johndoe?timezone=EST\n Translated:\n https://example.cloudfunctions.net/getUser?timezone=EST\u0026cid=widgetworks\u0026uid=johndoe", - "The request path will be appended to the backend address.\n\n# Examples\n\nGiven the following operation config:\n\n Method path: /api/company/{cid}/user/{uid}\n Backend address: https://example.appspot.com\n\nRequests to the following request paths will call the backend at the\ntranslated path:\n\n Request path: /api/company/widgetworks/user/johndoe\n Translated:\n https://example.appspot.com/api/company/widgetworks/user/johndoe\n\n Request path: /api/company/widgetworks/user/johndoe?timezone=EST\n Translated:\n https://example.appspot.com/api/company/widgetworks/user/johndoe?timezone=EST" + "Use the backend address as-is, with no modification to the path. If the URL pattern contains variables, the variable names and values will be appended to the query string. If a query string parameter and a URL pattern variable have the same name, this may result in duplicate keys in the query string. # Examples Given the following operation config: Method path: /api/company/{cid}/user/{uid} Backend address: https://example.cloudfunctions.net/getUser Requests to the following request paths will call the backend at the translated path: Request path: /api/company/widgetworks/user/johndoe Translated: https://example.cloudfunctions.net/getUser?cid=widgetworks\u0026uid=johndoe Request path: /api/company/widgetworks/user/johndoe?timezone=EST Translated: https://example.cloudfunctions.net/getUser?timezone=EST\u0026cid=widgetworks\u0026uid=johndoe", + "The request path will be appended to the backend address. # Examples Given the following operation config: Method path: /api/company/{cid}/user/{uid} Backend address: https://example.appspot.com Requests to the following request paths will call the backend at the translated path: Request path: /api/company/widgetworks/user/johndoe Translated: https://example.appspot.com/api/company/widgetworks/user/johndoe Request path: /api/company/widgetworks/user/johndoe?timezone=EST Translated: https://example.appspot.com/api/company/widgetworks/user/johndoe?timezone=EST" ], "type": "string" }, "protocol": { - "description": "The protocol used for sending a request to the backend.\nThe supported values are \"http/1.1\" and \"h2\".\n\nThe default value is inferred from the scheme in the\naddress field:\n\n SCHEME PROTOCOL\n http:// http/1.1\n https:// http/1.1\n grpc:// h2\n grpcs:// h2\n\nFor secure HTTP backends (https://) that support HTTP/2, set this field\nto \"h2\" for improved performance.\n\nConfiguring this field to non-default values is only supported for secure\nHTTP backends. This field will be ignored for all other backends.\n\nSee\nhttps://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids\nfor more details on the supported values.", - "type": "string" - }, - "renameTo": { - "description": "Unimplemented. Do not use.\n\nThe new name the selected proto elements should be renamed to.\n\nThe package, the service and the method can all be renamed.\nThe backend server should implement the renamed proto. However, clients\nshould call the original method, and ESF routes the traffic to the renamed\nmethod.\n\nHTTP clients should call the URL mapped to the original method.\ngRPC and Stubby clients should call the original method with package name.\n\nFor legacy reasons, ESF allows Stubby clients to call with the\nshort name (without the package name). However, for API Versioning(or\nmultiple methods mapped to the same short name), all Stubby clients must\ncall the method's full name with the package name, otherwise the first one\n(selector) wins.\n\nIf this `rename_to` is specified with a trailing `*`, the `selector` must\nbe specified with a trailing `*` as well. The all element short names\nmatched by the `*` in the selector will be kept in the `rename_to`.\n\nFor example,\n rename_rules:\n - selector: |-\n google.example.library.v1.*\n rename_to: google.example.library.*\n\nThe selector matches `google.example.library.v1.Library.CreateShelf` and\n`google.example.library.v1.Library.CreateBook`, they will be renamed to\n`google.example.library.Library.CreateShelf` and\n`google.example.library.Library.CreateBook`. It essentially renames the\nproto package name section of the matched proto service and methods.", + "description": "The protocol used for sending a request to the backend. The supported values are \"http/1.1\" and \"h2\". The default value is inferred from the scheme in the address field: SCHEME PROTOCOL http:// http/1.1 https:// http/1.1 grpc:// h2 grpcs:// h2 For secure HTTP backends (https://) that support HTTP/2, set this field to \"h2\" for improved performance. Configuring this field to non-default values is only supported for secure HTTP backends. This field will be ignored for all other backends. See https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids for more details on the supported values.", "type": "string" }, "selector": { - "description": "Selects the methods to which this rule applies.\n\nRefer to selector for syntax details.", + "description": "Selects the methods to which this rule applies. Refer to selector for syntax details.", "type": "string" } }, @@ -683,7 +714,7 @@ "id": "BatchEnableServicesRequest", "properties": { "serviceIds": { - "description": "The identifiers of the services to enable on the project.\n\nA valid identifier would be:\nserviceusage.googleapis.com\n\nEnabling services requires that each service is public or is shared with\nthe user enabling the service.\n\nA single request can enable a maximum of 20 services at a time. If more\nthan 20 services are specified, the request will fail, and no state changes\nwill occur.", + "description": "The identifiers of the services to enable on the project. A valid identifier would be: serviceusage.googleapis.com Enabling services requires that each service is public or is shared with the user enabling the service. A single request can enable a maximum of 20 services at a time. If more than 20 services are specified, the request will fail, and no state changes will occur.", "items": { "type": "string" }, @@ -693,11 +724,11 @@ "type": "object" }, "BatchEnableServicesResponse": { - "description": "Response message for the `BatchEnableServices` method.\nThis response message is assigned to the `response` field of the returned\nOperation when that operation is done.", + "description": "Response message for the `BatchEnableServices` method. This response message is assigned to the `response` field of the returned Operation when that operation is done.", "id": "BatchEnableServicesResponse", "properties": { "failures": { - "description": "If allow_partial_success is true, and one or more services could not be\nenabled, this field contains the details about each failure.", + "description": "If allow_partial_success is true, and one or more services could not be enabled, this field contains the details about each failure.", "items": { "$ref": "EnableFailure" }, @@ -728,11 +759,11 @@ "type": "object" }, "Billing": { - "description": "Billing related configuration of the service.\n\nThe following example shows how to configure monitored resources and metrics\nfor billing, `consumer_destinations` is the only supported destination and\nthe monitored resources need at least one label key\n`cloud.googleapis.com/location` to indicate the location of the billing\nusage, using different monitored resources between monitoring and billing is\nrecommended so they can be evolved independently:\n\n\n monitored_resources:\n - type: library.googleapis.com/billing_branch\n labels:\n - key: cloud.googleapis.com/location\n description: |\n Predefined label to support billing location restriction.\n - key: city\n description: |\n Custom label to define the city where the library branch is located\n in.\n - key: name\n description: Custom label to define the name of the library branch.\n metrics:\n - name: library.googleapis.com/book/borrowed_count\n metric_kind: DELTA\n value_type: INT64\n unit: \"1\"\n billing:\n consumer_destinations:\n - monitored_resource: library.googleapis.com/billing_branch\n metrics:\n - library.googleapis.com/book/borrowed_count", + "description": "Billing related configuration of the service. The following example shows how to configure monitored resources and metrics for billing, `consumer_destinations` is the only supported destination and the monitored resources need at least one label key `cloud.googleapis.com/location` to indicate the location of the billing usage, using different monitored resources between monitoring and billing is recommended so they can be evolved independently: monitored_resources: - type: library.googleapis.com/billing_branch labels: - key: cloud.googleapis.com/location description: | Predefined label to support billing location restriction. - key: city description: | Custom label to define the city where the library branch is located in. - key: name description: Custom label to define the name of the library branch. metrics: - name: library.googleapis.com/book/borrowed_count metric_kind: DELTA value_type: INT64 unit: \"1\" billing: consumer_destinations: - monitored_resource: library.googleapis.com/billing_branch metrics: - library.googleapis.com/book/borrowed_count", "id": "Billing", "properties": { "consumerDestinations": { - "description": "Billing configurations for sending metrics to the consumer project.\nThere can be multiple consumer destinations per service, each one must have\na different monitored resource type. A metric can be used in at most\none consumer destination.", + "description": "Billing configurations for sending metrics to the consumer project. There can be multiple consumer destinations per service, each one must have a different monitored resource type. A metric can be used in at most one consumer destination.", "items": { "$ref": "BillingDestination" }, @@ -742,18 +773,18 @@ "type": "object" }, "BillingDestination": { - "description": "Configuration of a specific billing destination (Currently only support\nbill against consumer project).", + "description": "Configuration of a specific billing destination (Currently only support bill against consumer project).", "id": "BillingDestination", "properties": { "metrics": { - "description": "Names of the metrics to report to this billing destination.\nEach name must be defined in Service.metrics section.", + "description": "Names of the metrics to report to this billing destination. Each name must be defined in Service.metrics section.", "items": { "type": "string" }, "type": "array" }, "monitoredResource": { - "description": "The monitored resource type. The type must be defined in\nService.monitored_resources section.", + "description": "The monitored resource type. The type must be defined in Service.monitored_resources section.", "type": "string" } }, @@ -766,11 +797,11 @@ "type": "object" }, "Context": { - "description": "`Context` defines which contexts an API requests.\n\nExample:\n\n context:\n rules:\n - selector: \"*\"\n requested:\n - google.rpc.context.ProjectContext\n - google.rpc.context.OriginContext\n\nThe above specifies that all methods in the API request\n`google.rpc.context.ProjectContext` and\n`google.rpc.context.OriginContext`.\n\nAvailable context types are defined in package\n`google.rpc.context`.\n\nThis also provides mechanism to whitelist any protobuf message extension that\ncan be sent in grpc metadata using “x-goog-ext-\u003cextension_id\u003e-bin” and\n“x-goog-ext-\u003cextension_id\u003e-jspb” format. For example, list any service\nspecific protobuf types that can appear in grpc metadata as follows in your\nyaml file:\n\nExample:\n\n context:\n rules:\n - selector: \"google.example.library.v1.LibraryService.CreateBook\"\n allowed_request_extensions:\n - google.foo.v1.NewExtension\n allowed_response_extensions:\n - google.foo.v1.NewExtension\n\nYou can also specify extension ID instead of fully qualified extension name\nhere.", + "description": "`Context` defines which contexts an API requests. Example: context: rules: - selector: \"*\" requested: - google.rpc.context.ProjectContext - google.rpc.context.OriginContext The above specifies that all methods in the API request `google.rpc.context.ProjectContext` and `google.rpc.context.OriginContext`. Available context types are defined in package `google.rpc.context`. This also provides mechanism to whitelist any protobuf message extension that can be sent in grpc metadata using “x-goog-ext--bin” and “x-goog-ext--jspb” format. For example, list any service specific protobuf types that can appear in grpc metadata as follows in your yaml file: Example: context: rules: - selector: \"google.example.library.v1.LibraryService.CreateBook\" allowed_request_extensions: - google.foo.v1.NewExtension allowed_response_extensions: - google.foo.v1.NewExtension You can also specify extension ID instead of fully qualified extension name here.", "id": "Context", "properties": { "rules": { - "description": "A list of RPC context rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "A list of RPC context rules that apply to individual API methods. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "ContextRule" }, @@ -780,18 +811,18 @@ "type": "object" }, "ContextRule": { - "description": "A context rule provides information about the context for an individual API\nelement.", + "description": "A context rule provides information about the context for an individual API element.", "id": "ContextRule", "properties": { "allowedRequestExtensions": { - "description": "A list of full type names or extension IDs of extensions allowed in grpc\nside channel from client to backend.", + "description": "A list of full type names or extension IDs of extensions allowed in grpc side channel from client to backend.", "items": { "type": "string" }, "type": "array" }, "allowedResponseExtensions": { - "description": "A list of full type names or extension IDs of extensions allowed in grpc\nside channel from backend to client.", + "description": "A list of full type names or extension IDs of extensions allowed in grpc side channel from backend to client.", "items": { "type": "string" }, @@ -812,29 +843,29 @@ "type": "array" }, "selector": { - "description": "Selects the methods to which this rule applies.\n\nRefer to selector for syntax details.", + "description": "Selects the methods to which this rule applies. Refer to selector for syntax details.", "type": "string" } }, "type": "object" }, "Control": { - "description": "Selects and configures the service controller used by the service. The\nservice controller handles features like abuse, quota, billing, logging,\nmonitoring, etc.", + "description": "Selects and configures the service controller used by the service. The service controller handles features like abuse, quota, billing, logging, monitoring, etc.", "id": "Control", "properties": { "environment": { - "description": "The service control environment to use. If empty, no control plane\nfeature (like quota and billing) will be enabled.", + "description": "The service control environment to use. If empty, no control plane feature (like quota and billing) will be enabled.", "type": "string" } }, "type": "object" }, "CustomError": { - "description": "Customize service error responses. For example, list any service\nspecific protobuf types that can appear in error detail lists of\nerror responses.\n\nExample:\n\n custom_error:\n types:\n - google.foo.v1.CustomError\n - google.foo.v1.AnotherError", + "description": "Customize service error responses. For example, list any service specific protobuf types that can appear in error detail lists of error responses. Example: custom_error: types: - google.foo.v1.CustomError - google.foo.v1.AnotherError", "id": "CustomError", "properties": { "rules": { - "description": "The list of custom error rules that apply to individual API messages.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "The list of custom error rules that apply to individual API messages. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "CustomErrorRule" }, @@ -855,11 +886,11 @@ "id": "CustomErrorRule", "properties": { "isErrorType": { - "description": "Mark this message as possible payload in error response. Otherwise,\nobjects of this type will be filtered when they appear in error payload.", + "description": "Mark this message as possible payload in error response. Otherwise, objects of this type will be filtered when they appear in error payload.", "type": "boolean" }, "selector": { - "description": "Selects messages to which this rule applies.\n\nRefer to selector for syntax details.", + "description": "Selects messages to which this rule applies. Refer to selector for syntax details.", "type": "string" } }, @@ -884,15 +915,29 @@ "description": "Request message for the `DisableService` method.", "id": "DisableServiceRequest", "properties": { + "checkIfServiceHasUsage": { + "description": "Defines the behavior for checking service usage when disabling a service.", + "enum": [ + "CHECK_IF_SERVICE_HAS_USAGE_UNSPECIFIED", + "SKIP", + "CHECK" + ], + "enumDescriptions": [ + "When unset, the default behavior is used, which is SKIP.", + "If set, skip checking service usage when disabling a service.", + "If set, service usage is checked when disabling the service. If a service, or its dependents, has usage in the last 30 days, the request returns a FAILED_PRECONDITION error." + ], + "type": "string" + }, "disableDependentServices": { - "description": "Indicates if services that are enabled and which depend on this service\nshould also be disabled. If not set, an error will be generated if any\nenabled services depend on the service to be disabled. When set, the\nservice, and any enabled services that depend on it, will be disabled\ntogether.", + "description": "Indicates if services that are enabled and which depend on this service should also be disabled. If not set, an error will be generated if any enabled services depend on the service to be disabled. When set, the service, and any enabled services that depend on it, will be disabled together.", "type": "boolean" } }, "type": "object" }, "DisableServiceResponse": { - "description": "Response message for the `DisableService` method.\nThis response message is assigned to the `response` field of the returned\nOperation when that operation is done.", + "description": "Response message for the `DisableService` method. This response message is assigned to the `response` field of the returned Operation when that operation is done.", "id": "DisableServiceResponse", "properties": { "service": { @@ -903,7 +948,7 @@ "type": "object" }, "Documentation": { - "description": "`Documentation` provides the information for describing a service.\n\nExample:\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: \u003e\n The Google Calendar API gives access\n to most calendar features.\n pages:\n - name: Overview\n content: \u0026#40;== include google/foo/overview.md ==\u0026#41;\n - name: Tutorial\n content: \u0026#40;== include google/foo/tutorial.md ==\u0026#41;\n subpages;\n - name: Java\n content: \u0026#40;== include google/foo/tutorial_java.md ==\u0026#41;\n rules:\n - selector: google.calendar.Calendar.Get\n description: \u003e\n ...\n - selector: google.calendar.Calendar.Put\n description: \u003e\n ...\n\u003c/code\u003e\u003c/pre\u003e\nDocumentation is provided in markdown syntax. In addition to\nstandard markdown features, definition lists, tables and fenced\ncode blocks are supported. Section headers can be provided and are\ninterpreted relative to the section nesting of the context where\na documentation fragment is embedded.\n\nDocumentation from the IDL is merged with documentation defined\nvia the config at normalization time, where documentation provided\nby config rules overrides IDL provided.\n\nA number of constructs specific to the API platform are supported\nin documentation text.\n\nIn order to reference a proto element, the following\nnotation can be used:\n\u003cpre\u003e\u003ccode\u003e\u0026#91;fully.qualified.proto.name]\u0026#91;]\u003c/code\u003e\u003c/pre\u003e\nTo override the display text used for the link, this can be used:\n\u003cpre\u003e\u003ccode\u003e\u0026#91;display text]\u0026#91;fully.qualified.proto.name]\u003c/code\u003e\u003c/pre\u003e\nText can be excluded from doc using the following notation:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;-- internal comment --\u0026#41;\u003c/code\u003e\u003c/pre\u003e\n\nA few directives are available in documentation. Note that\ndirectives must appear on a single line to be properly\nidentified. The `include` directive includes a markdown file from\nan external source:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;== include path/to/file ==\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nThe `resource_for` directive marks a message to be the resource of\na collection in REST view. If it is not specified, tools attempt\nto infer the resource from the operations in a collection:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;== resource_for v1.shelves.books ==\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nThe directive `suppress_warning` does not directly affect documentation\nand is documented together with service config validation.", + "description": "`Documentation` provides the information for describing a service. Example: documentation: summary: \u003e The Google Calendar API gives access to most calendar features. pages: - name: Overview content: (== include google/foo/overview.md ==) - name: Tutorial content: (== include google/foo/tutorial.md ==) subpages; - name: Java content: (== include google/foo/tutorial_java.md ==) rules: - selector: google.calendar.Calendar.Get description: \u003e ... - selector: google.calendar.Calendar.Put description: \u003e ... Documentation is provided in markdown syntax. In addition to standard markdown features, definition lists, tables and fenced code blocks are supported. Section headers can be provided and are interpreted relative to the section nesting of the context where a documentation fragment is embedded. Documentation from the IDL is merged with documentation defined via the config at normalization time, where documentation provided by config rules overrides IDL provided. A number of constructs specific to the API platform are supported in documentation text. In order to reference a proto element, the following notation can be used: [fully.qualified.proto.name][] To override the display text used for the link, this can be used: [display text][fully.qualified.proto.name] Text can be excluded from doc using the following notation: (-- internal comment --) A few directives are available in documentation. Note that directives must appear on a single line to be properly identified. The `include` directive includes a markdown file from an external source: (== include path/to/file ==) The `resource_for` directive marks a message to be the resource of a collection in REST view. If it is not specified, tools attempt to infer the resource from the operations in a collection: (== resource_for v1.shelves.books ==) The directive `suppress_warning` does not directly affect documentation and is documented together with service config validation.", "id": "Documentation", "properties": { "documentationRootUrl": { @@ -911,7 +956,7 @@ "type": "string" }, "overview": { - "description": "Declares a single overview page. For example:\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: ...\n overview: \u0026#40;== include overview.md ==\u0026#41;\n\u003c/code\u003e\u003c/pre\u003e\nThis is a shortcut for the following declaration (using pages style):\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: ...\n pages:\n - name: Overview\n content: \u0026#40;== include overview.md ==\u0026#41;\n\u003c/code\u003e\u003c/pre\u003e\nNote: you cannot specify both `overview` field and `pages` field.", + "description": "Declares a single overview page. For example: documentation: summary: ... overview: (== include overview.md ==) This is a shortcut for the following declaration (using pages style): documentation: summary: ... pages: - name: Overview content: (== include overview.md ==) Note: you cannot specify both `overview` field and `pages` field.", "type": "string" }, "pages": { @@ -922,18 +967,18 @@ "type": "array" }, "rules": { - "description": "A list of documentation rules that apply to individual API elements.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "A list of documentation rules that apply to individual API elements. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "DocumentationRule" }, "type": "array" }, "serviceRootUrl": { - "description": "Specifies the service root url if the default one (the service name\nfrom the yaml file) is not suitable. This can be seen in any fully\nspecified service urls as well as sections that show a base that other\nurls are relative to.", + "description": "Specifies the service root url if the default one (the service name from the yaml file) is not suitable. This can be seen in any fully specified service urls as well as sections that show a base that other urls are relative to.", "type": "string" }, "summary": { - "description": "A short summary of what the service does. Can only be provided by\nplain text.", + "description": "A short summary of what the service does. Can only be provided by plain text.", "type": "string" } }, @@ -944,7 +989,7 @@ "id": "DocumentationRule", "properties": { "deprecationDescription": { - "description": "Deprecation description of the selected element(s). It can be provided if\nan element is marked as `deprecated`.", + "description": "Deprecation description of the selected element(s). It can be provided if an element is marked as `deprecated`.", "type": "string" }, "description": { @@ -952,14 +997,14 @@ "type": "string" }, "selector": { - "description": "The selector is a comma-separated list of patterns. Each pattern is a\nqualified name of the element which may end in \"*\", indicating a wildcard.\nWildcards are only allowed at the end and for a whole component of the\nqualified name, i.e. \"foo.*\" is ok, but not \"foo.b*\" or \"foo.*.bar\". A\nwildcard will match one or more components. To specify a default for all\napplicable elements, the whole pattern \"*\" is used.", + "description": "The selector is a comma-separated list of patterns. Each pattern is a qualified name of the element which may end in \"*\", indicating a wildcard. Wildcards are only allowed at the end and for a whole component of the qualified name, i.e. \"foo.*\" is ok, but not \"foo.b*\" or \"foo.*.bar\". A wildcard will match one or more components. To specify a default for all applicable elements, the whole pattern \"*\" is used.", "type": "string" } }, "type": "object" }, "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", "properties": {}, "type": "object" @@ -986,7 +1031,7 @@ "type": "object" }, "EnableServiceResponse": { - "description": "Response message for the `EnableService` method.\nThis response message is assigned to the `response` field of the returned\nOperation when that operation is done.", + "description": "Response message for the `EnableService` method. This response message is assigned to the `response` field of the returned Operation when that operation is done.", "id": "EnableServiceResponse", "properties": { "service": { @@ -997,33 +1042,26 @@ "type": "object" }, "Endpoint": { - "description": "`Endpoint` describes a network endpoint that serves a set of APIs.\nA service may expose any number of endpoints, and all endpoints share the\nsame service configuration, such as quota configuration and monitoring\nconfiguration.\n\nExample service configuration:\n\n name: library-example.googleapis.com\n endpoints:\n # Below entry makes 'google.example.library.v1.Library'\n # API be served from endpoint address library-example.googleapis.com.\n # It also allows HTTP OPTIONS calls to be passed to the backend, for\n # it to decide whether the subsequent cross-origin request is\n # allowed to proceed.\n - name: library-example.googleapis.com\n allow_cors: true", + "description": "`Endpoint` describes a network endpoint that serves a set of APIs. A service may expose any number of endpoints, and all endpoints share the same service configuration, such as quota configuration and monitoring configuration. Example service configuration: name: library-example.googleapis.com endpoints: # Below entry makes 'google.example.library.v1.Library' # API be served from endpoint address library-example.googleapis.com. # It also allows HTTP OPTIONS calls to be passed to the backend, for # it to decide whether the subsequent cross-origin request is # allowed to proceed. - name: library-example.googleapis.com allow_cors: true", "id": "Endpoint", "properties": { "aliases": { - "description": "DEPRECATED: This field is no longer supported. Instead of using aliases,\nplease specify multiple google.api.Endpoint for each of the intended\naliases.\n\nAdditional names that this endpoint will be hosted on.", + "description": "DEPRECATED: This field is no longer supported. Instead of using aliases, please specify multiple google.api.Endpoint for each of the intended aliases. Additional names that this endpoint will be hosted on.", "items": { "type": "string" }, "type": "array" }, "allowCors": { - "description": "Allowing\n[CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka\ncross-domain traffic, would allow the backends served from this endpoint to\nreceive and respond to HTTP OPTIONS requests. The response will be used by\nthe browser to determine whether the subsequent cross-origin request is\nallowed to proceed.", + "description": "Allowing [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka cross-domain traffic, would allow the backends served from this endpoint to receive and respond to HTTP OPTIONS requests. The response will be used by the browser to determine whether the subsequent cross-origin request is allowed to proceed.", "type": "boolean" }, - "features": { - "description": "The list of features enabled on this endpoint.", - "items": { - "type": "string" - }, - "type": "array" - }, "name": { "description": "The canonical name of this endpoint.", "type": "string" }, "target": { - "description": "The specification of an Internet routable address of API frontend that will\nhandle requests to this [API\nEndpoint](https://cloud.google.com/apis/design/glossary). It should be\neither a valid IPv4 address or a fully-qualified domain name. For example,\n\"8.8.8.8\" or \"myservice.appspot.com\".", + "description": "The specification of an Internet routable address of API frontend that will handle requests to this [API Endpoint](https://cloud.google.com/apis/design/glossary). It should be either a valid IPv4 address or a fully-qualified domain name. For example, \"8.8.8.8\" or \"myservice.appspot.com\".", "type": "string" } }, @@ -1177,7 +1215,7 @@ "type": "integer" }, "oneofIndex": { - "description": "The index of the field type in `Type.oneofs`, for message or enumeration\ntypes. The first type has index 1; zero means the type is not in the list.", + "description": "The index of the field type in `Type.oneofs`, for message or enumeration types. The first type has index 1; zero means the type is not in the list.", "format": "int32", "type": "integer" }, @@ -1193,7 +1231,7 @@ "type": "boolean" }, "typeUrl": { - "description": "The field type URL, without the scheme, for message or enumeration\ntypes. Example: `\"type.googleapis.com/google.protobuf.Timestamp\"`.", + "description": "The field type URL, without the scheme, for message or enumeration types. Example: `\"type.googleapis.com/google.protobuf.Timestamp\"`.", "type": "string" } }, @@ -1205,7 +1243,7 @@ "properties": { "identity": { "$ref": "ServiceIdentity", - "description": "Service identity that service producer can use to access consumer\nresources. If exists is true, it contains email and unique_id. If exists is\nfalse, it contains pre-constructed email and empty unique_id." + "description": "Service identity that service producer can use to access consumer resources. If exists is true, it contains email and unique_id. If exists is false, it contains pre-constructed email and empty unique_id." }, "state": { "description": "Service identity state.", @@ -1214,7 +1252,7 @@ "ACTIVE" ], "enumDescriptions": [ - "Default service identity state. This value is used if the state is\nomitted.", + "Default service identity state. This value is used if the state is omitted.", "Service identity has been created and can be used." ], "type": "string" @@ -1223,11 +1261,11 @@ "type": "object" }, "GoogleApiService": { - "description": "`Service` is the root object of Google service configuration schema. It\ndescribes basic information about a service, such as the name and the\ntitle, and delegates other aspects to sub-sections. Each sub-section is\neither a proto message or a repeated proto message that configures a\nspecific aspect, such as auth. See each proto message definition for details.\n\nExample:\n\n type: google.api.Service\n config_version: 3\n name: calendar.googleapis.com\n title: Google Calendar API\n apis:\n - name: google.calendar.v3.Calendar\n authentication:\n providers:\n - id: google_calendar_auth\n jwks_uri: https://www.googleapis.com/oauth2/v1/certs\n issuer: https://securetoken.google.com\n rules:\n - selector: \"*\"\n requirements:\n provider_id: google_calendar_auth", + "description": "`Service` is the root object of Google service configuration schema. It describes basic information about a service, such as the name and the title, and delegates other aspects to sub-sections. Each sub-section is either a proto message or a repeated proto message that configures a specific aspect, such as auth. See each proto message definition for details. Example: type: google.api.Service config_version: 3 name: calendar.googleapis.com title: Google Calendar API apis: - name: google.calendar.v3.Calendar authentication: providers: - id: google_calendar_auth jwks_uri: https://www.googleapis.com/oauth2/v1/certs issuer: https://securetoken.google.com rules: - selector: \"*\" requirements: provider_id: google_calendar_auth", "id": "GoogleApiService", "properties": { "apis": { - "description": "A list of API interfaces exported by this service. Only the `name` field\nof the google.protobuf.Api needs to be provided by the configuration\nauthor, as the remaining fields will be derived from the IDL during the\nnormalization process. It is an error to specify an API interface here\nwhich cannot be resolved against the associated IDL files.", + "description": "A list of API interfaces exported by this service. Only the `name` field of the google.protobuf.Api needs to be provided by the configuration author, as the remaining fields will be derived from the IDL during the normalization process. It is an error to specify an API interface here which cannot be resolved against the associated IDL files.", "items": { "$ref": "Api" }, @@ -1246,7 +1284,7 @@ "description": "Billing configuration." }, "configVersion": { - "description": "The semantic version of the service configuration. The config version\naffects the interpretation of the service configuration. For example,\ncertain features are enabled by default for certain config versions.\n\nThe latest config version is `3`.", + "description": "The semantic version of the service configuration. The config version affects the interpretation of the service configuration. For example, certain features are enabled by default for certain config versions. The latest config version is `3`.", "format": "uint32", "type": "integer" }, @@ -1267,14 +1305,14 @@ "description": "Additional API documentation." }, "endpoints": { - "description": "Configuration for network endpoints. If this is empty, then an endpoint\nwith the same name as the service is automatically generated to service all\ndefined APIs.", + "description": "Configuration for network endpoints. If this is empty, then an endpoint with the same name as the service is automatically generated to service all defined APIs.", "items": { "$ref": "Endpoint" }, "type": "array" }, "enums": { - "description": "A list of all enum types included in this API service. Enums\nreferenced directly or indirectly by the `apis` are automatically\nincluded. Enums which are not referenced but shall be included\nshould be listed here by name. Example:\n\n enums:\n - name: google.someapi.v1.SomeEnum", + "description": "A list of all enum types included in this API service. Enums referenced directly or indirectly by the `apis` are automatically included. Enums which are not referenced but shall be included should be listed here by name. Example: enums: - name: google.someapi.v1.SomeEnum", "items": { "$ref": "Enum" }, @@ -1285,7 +1323,7 @@ "description": "HTTP configuration." }, "id": { - "description": "A unique ID for a specific instance of this message, typically assigned\nby the client for tracking purpose. Must be no longer than 63 characters\nand only lower case letters, digits, '.', '_' and '-' are allowed. If\nempty, the server may choose to generate one instead.", + "description": "A unique ID for a specific instance of this message, typically assigned by the client for tracking purpose. Must be no longer than 63 characters and only lower case letters, digits, '.', '_' and '-' are allowed. If empty, the server may choose to generate one instead.", "type": "string" }, "logging": { @@ -1307,7 +1345,7 @@ "type": "array" }, "monitoredResources": { - "description": "Defines the monitored resources used by this service. This is required\nby the Service.monitoring and Service.logging configurations.", + "description": "Defines the monitored resources used by this service. This is required by the Service.monitoring and Service.logging configurations.", "items": { "$ref": "MonitoredResourceDescriptor" }, @@ -1318,7 +1356,7 @@ "description": "Monitoring configuration." }, "name": { - "description": "The service name, which is a DNS-like logical identifier for the\nservice, such as `calendar.googleapis.com`. The service name\ntypically goes through DNS verification to make sure the owner\nof the service also owns the DNS name.", + "description": "The service name, which is a DNS-like logical identifier for the service, such as `calendar.googleapis.com`. The service name typically goes through DNS verification to make sure the owner of the service also owns the DNS name.", "type": "string" }, "producerProjectId": { @@ -1338,7 +1376,7 @@ "description": "System parameter configuration." }, "systemTypes": { - "description": "A list of all proto message types included in this API service.\nIt serves similar purpose as [google.api.Service.types], except that\nthese types are not needed by user-defined APIs. Therefore, they will not\nshow up in the generated discovery doc. This field should only be used\nto define system APIs in ESF.", + "description": "A list of all proto message types included in this API service. It serves similar purpose as [google.api.Service.types], except that these types are not needed by user-defined APIs. Therefore, they will not show up in the generated discovery doc. This field should only be used to define system APIs in ESF.", "items": { "$ref": "Type" }, @@ -1349,7 +1387,7 @@ "type": "string" }, "types": { - "description": "A list of all proto message types included in this API service.\nTypes referenced directly or indirectly by the `apis` are\nautomatically included. Messages which are not referenced but\nshall be included, such as types used by the `google.protobuf.Any` type,\nshould be listed here by name. Example:\n\n types:\n - name: google.protobuf.Int32", + "description": "A list of all proto message types included in this API service. Types referenced directly or indirectly by the `apis` are automatically included. Messages which are not referenced but shall be included, such as types used by the `google.protobuf.Any` type, should be listed here by name. Example: types: - name: google.protobuf.Int32", "items": { "$ref": "Type" }, @@ -1363,19 +1401,19 @@ "type": "object" }, "GoogleApiServiceIdentity": { - "description": "The per-product per-project service identity for a service.\n\n\nUse this field to configure per-product per-project service identity.\nExample of a service identity configuration.\n\n usage:\n service_identity:\n - service_account_parent: \"projects/123456789\"\n display_name: \"Cloud XXX Service Agent\"\n description: \"Used as the identity of Cloud XXX to access resources\"", + "description": "The per-product per-project service identity for a service. Use this field to configure per-product per-project service identity. Example of a service identity configuration. usage: service_identity: - service_account_parent: \"projects/123456789\" display_name: \"Cloud XXX Service Agent\" description: \"Used as the identity of Cloud XXX to access resources\"", "id": "GoogleApiServiceIdentity", "properties": { "description": { - "description": "Optional. A user-specified opaque description of the service account.\nMust be less than or equal to 256 UTF-8 bytes.", + "description": "Optional. A user-specified opaque description of the service account. Must be less than or equal to 256 UTF-8 bytes.", "type": "string" }, "displayName": { - "description": "Optional. A user-specified name for the service account.\nMust be less than or equal to 100 UTF-8 bytes.", + "description": "Optional. A user-specified name for the service account. Must be less than or equal to 100 UTF-8 bytes.", "type": "string" }, "serviceAccountParent": { - "description": "A service account project that hosts the service accounts.\n\nAn example name would be:\n`projects/123456789`", + "description": "A service account project that hosts the service accounts. An example name would be: `projects/123456789`", "type": "string" } }, @@ -1386,7 +1424,7 @@ "id": "GoogleApiServiceusageV1OperationMetadata", "properties": { "resourceNames": { - "description": "The full name of the resources that this operation is directly\nassociated with.", + "description": "The full name of the resources that this operation is directly associated with.", "items": { "type": "string" }, @@ -1401,14 +1439,14 @@ "properties": { "config": { "$ref": "GoogleApiServiceusageV1ServiceConfig", - "description": "The service configuration of the available service.\nSome fields may be filtered out of the configuration in responses to\nthe `ListServices` method. These fields are present only in responses to\nthe `GetService` method." + "description": "The service configuration of the available service. Some fields may be filtered out of the configuration in responses to the `ListServices` method. These fields are present only in responses to the `GetService` method." }, "name": { - "description": "The resource name of the consumer and service.\n\nA valid name would be:\n- projects/123/services/serviceusage.googleapis.com", + "description": "The resource name of the consumer and service. A valid name would be: - projects/123/services/serviceusage.googleapis.com", "type": "string" }, "parent": { - "description": "The resource name of the consumer.\n\nA valid name would be:\n- projects/123", + "description": "The resource name of the consumer. A valid name would be: - projects/123", "type": "string" }, "state": { @@ -1419,8 +1457,8 @@ "ENABLED" ], "enumDescriptions": [ - "The default value, which indicates that the enabled state of the service\nis unspecified or not meaningful. Currently, all consumers other than\nprojects (such as folders and organizations) are always in this state.", - "The service cannot be used by this consumer. It has either been explicitly\ndisabled, or has never been enabled.", + "The default value, which indicates that the enabled state of the service is unspecified or not meaningful. Currently, all consumers other than projects (such as folders and organizations) are always in this state.", + "The service cannot be used by this consumer. It has either been explicitly disabled, or has never been enabled.", "The service has been explicitly enabled for use by this consumer." ], "type": "string" @@ -1433,7 +1471,7 @@ "id": "GoogleApiServiceusageV1ServiceConfig", "properties": { "apis": { - "description": "A list of API interfaces exported by this service. Contains only the names,\nversions, and method names of the interfaces.", + "description": "A list of API interfaces exported by this service. Contains only the names, versions, and method names of the interfaces.", "items": { "$ref": "Api" }, @@ -1445,17 +1483,28 @@ }, "documentation": { "$ref": "Documentation", - "description": "Additional API documentation. Contains only the summary and the\ndocumentation URL." + "description": "Additional API documentation. Contains only the summary and the documentation URL." }, "endpoints": { - "description": "Configuration for network endpoints. Contains only the names and aliases\nof the endpoints.", + "description": "Configuration for network endpoints. Contains only the names and aliases of the endpoints.", "items": { "$ref": "Endpoint" }, "type": "array" }, + "monitoredResources": { + "description": "Defines the monitored resources used by this service. This is required by the Service.monitoring and Service.logging configurations.", + "items": { + "$ref": "MonitoredResourceDescriptor" + }, + "type": "array" + }, + "monitoring": { + "$ref": "Monitoring", + "description": "Monitoring configuration. This should not include the 'producer_destinations' field." + }, "name": { - "description": "The DNS address at which this service is available.\n\nAn example DNS address would be:\n`calendar.googleapis.com`.", + "description": "The DNS address at which this service is available. An example DNS address would be: `calendar.googleapis.com`.", "type": "string" }, "quota": { @@ -1479,7 +1528,7 @@ "properties": { "identity": { "$ref": "GoogleApiServiceusageV1beta1ServiceIdentity", - "description": "Service identity that service producer can use to access consumer\nresources. If exists is true, it contains email and unique_id. If exists is\nfalse, it contains pre-constructed email and empty unique_id." + "description": "Service identity that service producer can use to access consumer resources. If exists is true, it contains email and unique_id. If exists is false, it contains pre-constructed email and empty unique_id." }, "state": { "description": "Service identity state.", @@ -1488,7 +1537,7 @@ "ACTIVE" ], "enumDescriptions": [ - "Default service identity state. This value is used if the state is\nomitted.", + "Default service identity state. This value is used if the state is omitted.", "Service identity has been created and can be used." ], "type": "string" @@ -1497,30 +1546,30 @@ "type": "object" }, "GoogleApiServiceusageV1beta1ServiceIdentity": { - "description": "Service identity for a service. This is the identity that service producer\nshould use to access consumer resources.", + "description": "Service identity for a service. This is the identity that service producer should use to access consumer resources.", "id": "GoogleApiServiceusageV1beta1ServiceIdentity", "properties": { "email": { - "description": "The email address of the service account that a service producer would use\nto access consumer resources.", + "description": "The email address of the service account that a service producer would use to access consumer resources.", "type": "string" }, "uniqueId": { - "description": "The unique and stable id of the service account.\nhttps://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts#ServiceAccount", + "description": "The unique and stable id of the service account. https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts#ServiceAccount", "type": "string" } }, "type": "object" }, "Http": { - "description": "Defines the HTTP configuration for an API service. It contains a list of\nHttpRule, each specifying the mapping of an RPC method\nto one or more HTTP REST API methods.", + "description": "Defines the HTTP configuration for an API service. It contains a list of HttpRule, each specifying the mapping of an RPC method to one or more HTTP REST API methods.", "id": "Http", "properties": { "fullyDecodeReservedExpansion": { - "description": "When set to true, URL path parameters will be fully URI-decoded except in\ncases of single segment matches in reserved expansion, where \"%2F\" will be\nleft encoded.\n\nThe default behavior is to not decode RFC 6570 reserved characters in multi\nsegment matches.", + "description": "When set to true, URL path parameters will be fully URI-decoded except in cases of single segment matches in reserved expansion, where \"%2F\" will be left encoded. The default behavior is to not decode RFC 6570 reserved characters in multi segment matches.", "type": "boolean" }, "rules": { - "description": "A list of HTTP configuration rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "A list of HTTP configuration rules that apply to individual API methods. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "HttpRule" }, @@ -1530,34 +1579,34 @@ "type": "object" }, "HttpRule": { - "description": "# gRPC Transcoding\n\ngRPC Transcoding is a feature for mapping between a gRPC method and one or\nmore HTTP REST endpoints. It allows developers to build a single API service\nthat supports both gRPC APIs and REST APIs. Many systems, including [Google\nAPIs](https://github.com/googleapis/googleapis),\n[Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC\nGateway](https://github.com/grpc-ecosystem/grpc-gateway),\nand [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature\nand use it for large scale production services.\n\n`HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies\nhow different portions of the gRPC request message are mapped to the URL\npath, URL query parameters, and HTTP request body. It also controls how the\ngRPC response message is mapped to the HTTP response body. `HttpRule` is\ntypically specified as an `google.api.http` annotation on the gRPC method.\n\nEach mapping specifies a URL path template and an HTTP method. The path\ntemplate may refer to one or more fields in the gRPC request message, as long\nas each field is a non-repeated field with a primitive (non-message) type.\nThe path template controls how fields of the request message are mapped to\nthe URL path.\n\nExample:\n\n service Messaging {\n rpc GetMessage(GetMessageRequest) returns (Message) {\n option (google.api.http) = {\n get: \"/v1/{name=messages/*}\"\n };\n }\n }\n message GetMessageRequest {\n string name = 1; // Mapped to URL path.\n }\n message Message {\n string text = 1; // The resource content.\n }\n\nThis enables an HTTP REST to gRPC mapping as below:\n\nHTTP | gRPC\n-----|-----\n`GET /v1/messages/123456` | `GetMessage(name: \"messages/123456\")`\n\nAny fields in the request message which are not bound by the path template\nautomatically become HTTP query parameters if there is no HTTP request body.\nFor example:\n\n service Messaging {\n rpc GetMessage(GetMessageRequest) returns (Message) {\n option (google.api.http) = {\n get:\"/v1/messages/{message_id}\"\n };\n }\n }\n message GetMessageRequest {\n message SubMessage {\n string subfield = 1;\n }\n string message_id = 1; // Mapped to URL path.\n int64 revision = 2; // Mapped to URL query parameter `revision`.\n SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`.\n }\n\nThis enables a HTTP JSON to RPC mapping as below:\n\nHTTP | gRPC\n-----|-----\n`GET /v1/messages/123456?revision=2\u0026sub.subfield=foo` |\n`GetMessage(message_id: \"123456\" revision: 2 sub: SubMessage(subfield:\n\"foo\"))`\n\nNote that fields which are mapped to URL query parameters must have a\nprimitive type or a repeated primitive type or a non-repeated message type.\nIn the case of a repeated type, the parameter can be repeated in the URL\nas `...?param=A\u0026param=B`. In the case of a message type, each field of the\nmessage is mapped to a separate parameter, such as\n`...?foo.a=A\u0026foo.b=B\u0026foo.c=C`.\n\nFor HTTP methods that allow a request body, the `body` field\nspecifies the mapping. Consider a REST update method on the\nmessage resource collection:\n\n service Messaging {\n rpc UpdateMessage(UpdateMessageRequest) returns (Message) {\n option (google.api.http) = {\n patch: \"/v1/messages/{message_id}\"\n body: \"message\"\n };\n }\n }\n message UpdateMessageRequest {\n string message_id = 1; // mapped to the URL\n Message message = 2; // mapped to the body\n }\n\nThe following HTTP JSON to RPC mapping is enabled, where the\nrepresentation of the JSON in the request body is determined by\nprotos JSON encoding:\n\nHTTP | gRPC\n-----|-----\n`PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` | `UpdateMessage(message_id:\n\"123456\" message { text: \"Hi!\" })`\n\nThe special name `*` can be used in the body mapping to define that\nevery field not bound by the path template should be mapped to the\nrequest body. This enables the following alternative definition of\nthe update method:\n\n service Messaging {\n rpc UpdateMessage(Message) returns (Message) {\n option (google.api.http) = {\n patch: \"/v1/messages/{message_id}\"\n body: \"*\"\n };\n }\n }\n message Message {\n string message_id = 1;\n string text = 2;\n }\n\n\nThe following HTTP JSON to RPC mapping is enabled:\n\nHTTP | gRPC\n-----|-----\n`PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` | `UpdateMessage(message_id:\n\"123456\" text: \"Hi!\")`\n\nNote that when using `*` in the body mapping, it is not possible to\nhave HTTP parameters, as all fields not bound by the path end in\nthe body. This makes this option more rarely used in practice when\ndefining REST APIs. The common usage of `*` is in custom methods\nwhich don't use the URL at all for transferring data.\n\nIt is possible to define multiple HTTP methods for one RPC by using\nthe `additional_bindings` option. Example:\n\n service Messaging {\n rpc GetMessage(GetMessageRequest) returns (Message) {\n option (google.api.http) = {\n get: \"/v1/messages/{message_id}\"\n additional_bindings {\n get: \"/v1/users/{user_id}/messages/{message_id}\"\n }\n };\n }\n }\n message GetMessageRequest {\n string message_id = 1;\n string user_id = 2;\n }\n\nThis enables the following two alternative HTTP JSON to RPC mappings:\n\nHTTP | gRPC\n-----|-----\n`GET /v1/messages/123456` | `GetMessage(message_id: \"123456\")`\n`GET /v1/users/me/messages/123456` | `GetMessage(user_id: \"me\" message_id:\n\"123456\")`\n\n## Rules for HTTP mapping\n\n1. Leaf request fields (recursive expansion nested messages in the request\n message) are classified into three categories:\n - Fields referred by the path template. They are passed via the URL path.\n - Fields referred by the HttpRule.body. They are passed via the HTTP\n request body.\n - All other fields are passed via the URL query parameters, and the\n parameter name is the field path in the request message. A repeated\n field can be represented as multiple query parameters under the same\n name.\n 2. If HttpRule.body is \"*\", there is no URL query parameter, all fields\n are passed via URL path and HTTP request body.\n 3. If HttpRule.body is omitted, there is no HTTP request body, all\n fields are passed via URL path and URL query parameters.\n\n### Path template syntax\n\n Template = \"/\" Segments [ Verb ] ;\n Segments = Segment { \"/\" Segment } ;\n Segment = \"*\" | \"**\" | LITERAL | Variable ;\n Variable = \"{\" FieldPath [ \"=\" Segments ] \"}\" ;\n FieldPath = IDENT { \".\" IDENT } ;\n Verb = \":\" LITERAL ;\n\nThe syntax `*` matches a single URL path segment. The syntax `**` matches\nzero or more URL path segments, which must be the last part of the URL path\nexcept the `Verb`.\n\nThe syntax `Variable` matches part of the URL path as specified by its\ntemplate. A variable template must not contain other variables. If a variable\nmatches a single path segment, its template may be omitted, e.g. `{var}`\nis equivalent to `{var=*}`.\n\nThe syntax `LITERAL` matches literal text in the URL path. If the `LITERAL`\ncontains any reserved character, such characters should be percent-encoded\nbefore the matching.\n\nIf a variable contains exactly one path segment, such as `\"{var}\"` or\n`\"{var=*}\"`, when such a variable is expanded into a URL path on the client\nside, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The\nserver side does the reverse decoding. Such variables show up in the\n[Discovery\nDocument](https://developers.google.com/discovery/v1/reference/apis) as\n`{var}`.\n\nIf a variable contains multiple path segments, such as `\"{var=foo/*}\"`\nor `\"{var=**}\"`, when such a variable is expanded into a URL path on the\nclient side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded.\nThe server side does the reverse decoding, except \"%2F\" and \"%2f\" are left\nunchanged. Such variables show up in the\n[Discovery\nDocument](https://developers.google.com/discovery/v1/reference/apis) as\n`{+var}`.\n\n## Using gRPC API Service Configuration\n\ngRPC API Service Configuration (service config) is a configuration language\nfor configuring a gRPC service to become a user-facing product. The\nservice config is simply the YAML representation of the `google.api.Service`\nproto message.\n\nAs an alternative to annotating your proto file, you can configure gRPC\ntranscoding in your service config YAML files. You do this by specifying a\n`HttpRule` that maps the gRPC method to a REST endpoint, achieving the same\neffect as the proto annotation. This can be particularly useful if you\nhave a proto that is reused in multiple services. Note that any transcoding\nspecified in the service config will override any matching transcoding\nconfiguration in the proto.\n\nExample:\n\n http:\n rules:\n # Selects a gRPC method and applies HttpRule to it.\n - selector: example.v1.Messaging.GetMessage\n get: /v1/messages/{message_id}/{sub.subfield}\n\n## Special notes\n\nWhen gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the\nproto to JSON conversion must follow the [proto3\nspecification](https://developers.google.com/protocol-buffers/docs/proto3#json).\n\nWhile the single segment variable follows the semantics of\n[RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String\nExpansion, the multi segment variable **does not** follow RFC 6570 Section\n3.2.3 Reserved Expansion. The reason is that the Reserved Expansion\ndoes not expand special characters like `?` and `#`, which would lead\nto invalid URLs. As the result, gRPC Transcoding uses a custom encoding\nfor multi segment variables.\n\nThe path variables **must not** refer to any repeated or mapped field,\nbecause client libraries are not capable of handling such variable expansion.\n\nThe path variables **must not** capture the leading \"/\" character. The reason\nis that the most common use case \"{var}\" does not capture the leading \"/\"\ncharacter. For consistency, all path variables must share the same behavior.\n\nRepeated message fields must not be mapped to URL query parameters, because\nno client library can support such complicated mapping.\n\nIf an API needs to use a JSON array for request or response body, it can map\nthe request or response body to a repeated field. However, some gRPC\nTranscoding implementations may not support this feature.", + "description": "# gRPC Transcoding gRPC Transcoding is a feature for mapping between a gRPC method and one or more HTTP REST endpoints. It allows developers to build a single API service that supports both gRPC APIs and REST APIs. Many systems, including [Google APIs](https://github.com/googleapis/googleapis), [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC Gateway](https://github.com/grpc-ecosystem/grpc-gateway), and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature and use it for large scale production services. `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies how different portions of the gRPC request message are mapped to the URL path, URL query parameters, and HTTP request body. It also controls how the gRPC response message is mapped to the HTTP response body. `HttpRule` is typically specified as an `google.api.http` annotation on the gRPC method. Each mapping specifies a URL path template and an HTTP method. The path template may refer to one or more fields in the gRPC request message, as long as each field is a non-repeated field with a primitive (non-message) type. The path template controls how fields of the request message are mapped to the URL path. Example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get: \"/v1/{name=messages/*}\" }; } } message GetMessageRequest { string name = 1; // Mapped to URL path. } message Message { string text = 1; // The resource content. } This enables an HTTP REST to gRPC mapping as below: HTTP | gRPC -----|----- `GET /v1/messages/123456` | `GetMessage(name: \"messages/123456\")` Any fields in the request message which are not bound by the path template automatically become HTTP query parameters if there is no HTTP request body. For example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get:\"/v1/messages/{message_id}\" }; } } message GetMessageRequest { message SubMessage { string subfield = 1; } string message_id = 1; // Mapped to URL path. int64 revision = 2; // Mapped to URL query parameter `revision`. SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. } This enables a HTTP JSON to RPC mapping as below: HTTP | gRPC -----|----- `GET /v1/messages/123456?revision=2\u0026sub.subfield=foo` | `GetMessage(message_id: \"123456\" revision: 2 sub: SubMessage(subfield: \"foo\"))` Note that fields which are mapped to URL query parameters must have a primitive type or a repeated primitive type or a non-repeated message type. In the case of a repeated type, the parameter can be repeated in the URL as `...?param=A\u0026param=B`. In the case of a message type, each field of the message is mapped to a separate parameter, such as `...?foo.a=A\u0026foo.b=B\u0026foo.c=C`. For HTTP methods that allow a request body, the `body` field specifies the mapping. Consider a REST update method on the message resource collection: service Messaging { rpc UpdateMessage(UpdateMessageRequest) returns (Message) { option (google.api.http) = { patch: \"/v1/messages/{message_id}\" body: \"message\" }; } } message UpdateMessageRequest { string message_id = 1; // mapped to the URL Message message = 2; // mapped to the body } The following HTTP JSON to RPC mapping is enabled, where the representation of the JSON in the request body is determined by protos JSON encoding: HTTP | gRPC -----|----- `PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` | `UpdateMessage(message_id: \"123456\" message { text: \"Hi!\" })` The special name `*` can be used in the body mapping to define that every field not bound by the path template should be mapped to the request body. This enables the following alternative definition of the update method: service Messaging { rpc UpdateMessage(Message) returns (Message) { option (google.api.http) = { patch: \"/v1/messages/{message_id}\" body: \"*\" }; } } message Message { string message_id = 1; string text = 2; } The following HTTP JSON to RPC mapping is enabled: HTTP | gRPC -----|----- `PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` | `UpdateMessage(message_id: \"123456\" text: \"Hi!\")` Note that when using `*` in the body mapping, it is not possible to have HTTP parameters, as all fields not bound by the path end in the body. This makes this option more rarely used in practice when defining REST APIs. The common usage of `*` is in custom methods which don't use the URL at all for transferring data. It is possible to define multiple HTTP methods for one RPC by using the `additional_bindings` option. Example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get: \"/v1/messages/{message_id}\" additional_bindings { get: \"/v1/users/{user_id}/messages/{message_id}\" } }; } } message GetMessageRequest { string message_id = 1; string user_id = 2; } This enables the following two alternative HTTP JSON to RPC mappings: HTTP | gRPC -----|----- `GET /v1/messages/123456` | `GetMessage(message_id: \"123456\")` `GET /v1/users/me/messages/123456` | `GetMessage(user_id: \"me\" message_id: \"123456\")` ## Rules for HTTP mapping 1. Leaf request fields (recursive expansion nested messages in the request message) are classified into three categories: - Fields referred by the path template. They are passed via the URL path. - Fields referred by the HttpRule.body. They are passed via the HTTP request body. - All other fields are passed via the URL query parameters, and the parameter name is the field path in the request message. A repeated field can be represented as multiple query parameters under the same name. 2. If HttpRule.body is \"*\", there is no URL query parameter, all fields are passed via URL path and HTTP request body. 3. If HttpRule.body is omitted, there is no HTTP request body, all fields are passed via URL path and URL query parameters. ### Path template syntax Template = \"/\" Segments [ Verb ] ; Segments = Segment { \"/\" Segment } ; Segment = \"*\" | \"**\" | LITERAL | Variable ; Variable = \"{\" FieldPath [ \"=\" Segments ] \"}\" ; FieldPath = IDENT { \".\" IDENT } ; Verb = \":\" LITERAL ; The syntax `*` matches a single URL path segment. The syntax `**` matches zero or more URL path segments, which must be the last part of the URL path except the `Verb`. The syntax `Variable` matches part of the URL path as specified by its template. A variable template must not contain other variables. If a variable matches a single path segment, its template may be omitted, e.g. `{var}` is equivalent to `{var=*}`. The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` contains any reserved character, such characters should be percent-encoded before the matching. If a variable contains exactly one path segment, such as `\"{var}\"` or `\"{var=*}\"`, when such a variable is expanded into a URL path on the client side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The server side does the reverse decoding. Such variables show up in the [Discovery Document](https://developers.google.com/discovery/v1/reference/apis) as `{var}`. If a variable contains multiple path segments, such as `\"{var=foo/*}\"` or `\"{var=**}\"`, when such a variable is expanded into a URL path on the client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. The server side does the reverse decoding, except \"%2F\" and \"%2f\" are left unchanged. Such variables show up in the [Discovery Document](https://developers.google.com/discovery/v1/reference/apis) as `{+var}`. ## Using gRPC API Service Configuration gRPC API Service Configuration (service config) is a configuration language for configuring a gRPC service to become a user-facing product. The service config is simply the YAML representation of the `google.api.Service` proto message. As an alternative to annotating your proto file, you can configure gRPC transcoding in your service config YAML files. You do this by specifying a `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same effect as the proto annotation. This can be particularly useful if you have a proto that is reused in multiple services. Note that any transcoding specified in the service config will override any matching transcoding configuration in the proto. Example: http: rules: # Selects a gRPC method and applies HttpRule to it. - selector: example.v1.Messaging.GetMessage get: /v1/messages/{message_id}/{sub.subfield} ## Special notes When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the proto to JSON conversion must follow the [proto3 specification](https://developers.google.com/protocol-buffers/docs/proto3#json). While the single segment variable follows the semantics of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String Expansion, the multi segment variable **does not** follow RFC 6570 Section 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion does not expand special characters like `?` and `#`, which would lead to invalid URLs. As the result, gRPC Transcoding uses a custom encoding for multi segment variables. The path variables **must not** refer to any repeated or mapped field, because client libraries are not capable of handling such variable expansion. The path variables **must not** capture the leading \"/\" character. The reason is that the most common use case \"{var}\" does not capture the leading \"/\" character. For consistency, all path variables must share the same behavior. Repeated message fields must not be mapped to URL query parameters, because no client library can support such complicated mapping. If an API needs to use a JSON array for request or response body, it can map the request or response body to a repeated field. However, some gRPC Transcoding implementations may not support this feature.", "id": "HttpRule", "properties": { "additionalBindings": { - "description": "Additional HTTP bindings for the selector. Nested bindings must\nnot contain an `additional_bindings` field themselves (that is,\nthe nesting may only be one level deep).", + "description": "Additional HTTP bindings for the selector. Nested bindings must not contain an `additional_bindings` field themselves (that is, the nesting may only be one level deep).", "items": { "$ref": "HttpRule" }, "type": "array" }, "allowHalfDuplex": { - "description": "When this flag is set to true, HTTP requests will be allowed to invoke a\nhalf-duplex streaming method.", + "description": "When this flag is set to true, HTTP requests will be allowed to invoke a half-duplex streaming method.", "type": "boolean" }, "body": { - "description": "The name of the request field whose value is mapped to the HTTP request\nbody, or `*` for mapping all request fields not captured by the path\npattern to the HTTP body, or omitted for not having any HTTP request body.\n\nNOTE: the referred field must be present at the top-level of the request\nmessage type.", + "description": "The name of the request field whose value is mapped to the HTTP request body, or `*` for mapping all request fields not captured by the path pattern to the HTTP body, or omitted for not having any HTTP request body. NOTE: the referred field must be present at the top-level of the request message type.", "type": "string" }, "custom": { "$ref": "CustomHttpPattern", - "description": "The custom pattern is used for specifying an HTTP method that is not\nincluded in the `pattern` field, such as HEAD, or \"*\" to leave the\nHTTP method unspecified for this rule. The wild-card rule is useful\nfor services that provide content to Web (HTML) clients." + "description": "The custom pattern is used for specifying an HTTP method that is not included in the `pattern` field, such as HEAD, or \"*\" to leave the HTTP method unspecified for this rule. The wild-card rule is useful for services that provide content to Web (HTML) clients." }, "delete": { "description": "Maps to HTTP DELETE. Used for deleting a resource.", "type": "string" }, "get": { - "description": "Maps to HTTP GET. Used for listing and getting information about\nresources.", + "description": "Maps to HTTP GET. Used for listing and getting information about resources.", "type": "string" }, "patch": { @@ -1573,11 +1622,11 @@ "type": "string" }, "responseBody": { - "description": "Optional. The name of the response field whose value is mapped to the HTTP\nresponse body. When omitted, the entire response message will be used\nas the HTTP response body.\n\nNOTE: The referred field must be present at the top-level of the response\nmessage type.", + "description": "Optional. The name of the response field whose value is mapped to the HTTP response body. When omitted, the entire response message will be used as the HTTP response body. NOTE: The referred field must be present at the top-level of the response message type.", "type": "string" }, "selector": { - "description": "Selects a method to which this rule applies.\n\nRefer to selector for syntax details.", + "description": "Selects a method to which this rule applies. Refer to selector for syntax details.", "type": "string" } }, @@ -1597,6 +1646,20 @@ }, "type": "object" }, + "ImportAdminQuotaPoliciesResponse": { + "description": "Response message for ImportAdminQuotaPolicies", + "id": "ImportAdminQuotaPoliciesResponse", + "properties": { + "policies": { + "description": "The policies that were created from the imported data.", + "items": { + "$ref": "AdminQuotaPolicy" + }, + "type": "array" + } + }, + "type": "object" + }, "ImportConsumerOverridesResponse": { "description": "Response message for ImportConsumerOverrides", "id": "ImportConsumerOverridesResponse", @@ -1624,7 +1687,7 @@ "type": "string" }, "valuePrefix": { - "description": "The value prefix. The value format is \"value_prefix{token}\"\nOnly applies to \"in\" header type. Must be empty for \"in\" query type.\nIf not empty, the header value has to match (case sensitive) this prefix.\nIf not matched, JWT will not be extracted. If matched, JWT will be\nextracted after the prefix is removed.\n\nFor example, for \"Authorization: Bearer {JWT}\",\nvalue_prefix=\"Bearer \" with a space at the end.", + "description": "The value prefix. The value format is \"value_prefix{token}\" Only applies to \"in\" header type. Must be empty for \"in\" query type. If not empty, the header value has to match (case sensitive) this prefix. If not matched, JWT will not be extracted. If matched, JWT will be extracted after the prefix is removed. For example, for \"Authorization: Bearer {JWT}\", value_prefix=\"Bearer \" with a space at the end.", "type": "string" } }, @@ -1682,7 +1745,7 @@ "id": "ListServicesResponse", "properties": { "nextPageToken": { - "description": "Token that can be passed to `ListServices` to resume a paginated\nquery.", + "description": "Token that can be passed to `ListServices` to resume a paginated query.", "type": "string" }, "services": { @@ -1696,44 +1759,44 @@ "type": "object" }, "LogDescriptor": { - "description": "A description of a log type. Example in YAML format:\n\n - name: library.googleapis.com/activity_history\n description: The history of borrowing and returning library items.\n display_name: Activity\n labels:\n - key: /customer_id\n description: Identifier of a library customer", + "description": "A description of a log type. Example in YAML format: - name: library.googleapis.com/activity_history description: The history of borrowing and returning library items. display_name: Activity labels: - key: /customer_id description: Identifier of a library customer", "id": "LogDescriptor", "properties": { "description": { - "description": "A human-readable description of this log. This information appears in\nthe documentation and can contain details.", + "description": "A human-readable description of this log. This information appears in the documentation and can contain details.", "type": "string" }, "displayName": { - "description": "The human-readable name for this log. This information appears on\nthe user interface and should be concise.", + "description": "The human-readable name for this log. This information appears on the user interface and should be concise.", "type": "string" }, "labels": { - "description": "The set of labels that are available to describe a specific log entry.\nRuntime requests that contain labels not specified here are\nconsidered invalid.", + "description": "The set of labels that are available to describe a specific log entry. Runtime requests that contain labels not specified here are considered invalid.", "items": { "$ref": "LabelDescriptor" }, "type": "array" }, "name": { - "description": "The name of the log. It must be less than 512 characters long and can\ninclude the following characters: upper- and lower-case alphanumeric\ncharacters [A-Za-z0-9], and punctuation characters including\nslash, underscore, hyphen, period [/_-.].", + "description": "The name of the log. It must be less than 512 characters long and can include the following characters: upper- and lower-case alphanumeric characters [A-Za-z0-9], and punctuation characters including slash, underscore, hyphen, period [/_-.].", "type": "string" } }, "type": "object" }, "Logging": { - "description": "Logging configuration of the service.\n\nThe following example shows how to configure logs to be sent to the\nproducer and consumer projects. In the example, the `activity_history`\nlog is sent to both the producer and consumer projects, whereas the\n`purchase_history` log is only sent to the producer project.\n\n monitored_resources:\n - type: library.googleapis.com/branch\n labels:\n - key: /city\n description: The city where the library branch is located in.\n - key: /name\n description: The name of the branch.\n logs:\n - name: activity_history\n labels:\n - key: /customer_id\n - name: purchase_history\n logging:\n producer_destinations:\n - monitored_resource: library.googleapis.com/branch\n logs:\n - activity_history\n - purchase_history\n consumer_destinations:\n - monitored_resource: library.googleapis.com/branch\n logs:\n - activity_history", + "description": "Logging configuration of the service. The following example shows how to configure logs to be sent to the producer and consumer projects. In the example, the `activity_history` log is sent to both the producer and consumer projects, whereas the `purchase_history` log is only sent to the producer project. monitored_resources: - type: library.googleapis.com/branch labels: - key: /city description: The city where the library branch is located in. - key: /name description: The name of the branch. logs: - name: activity_history labels: - key: /customer_id - name: purchase_history logging: producer_destinations: - monitored_resource: library.googleapis.com/branch logs: - activity_history - purchase_history consumer_destinations: - monitored_resource: library.googleapis.com/branch logs: - activity_history", "id": "Logging", "properties": { "consumerDestinations": { - "description": "Logging configurations for sending logs to the consumer project.\nThere can be multiple consumer destinations, each one must have a\ndifferent monitored resource type. A log can be used in at most\none consumer destination.", + "description": "Logging configurations for sending logs to the consumer project. There can be multiple consumer destinations, each one must have a different monitored resource type. A log can be used in at most one consumer destination.", "items": { "$ref": "LoggingDestination" }, "type": "array" }, "producerDestinations": { - "description": "Logging configurations for sending logs to the producer project.\nThere can be multiple producer destinations, each one must have a\ndifferent monitored resource type. A log can be used in at most\none producer destination.", + "description": "Logging configurations for sending logs to the producer project. There can be multiple producer destinations, each one must have a different monitored resource type. A log can be used in at most one producer destination.", "items": { "$ref": "LoggingDestination" }, @@ -1743,18 +1806,18 @@ "type": "object" }, "LoggingDestination": { - "description": "Configuration of a specific logging destination (the producer project\nor the consumer project).", + "description": "Configuration of a specific logging destination (the producer project or the consumer project).", "id": "LoggingDestination", "properties": { "logs": { - "description": "Names of the logs to be sent to this destination. Each name must\nbe defined in the Service.logs section. If the log name is\nnot a domain scoped name, it will be automatically prefixed with\nthe service name followed by \"/\".", + "description": "Names of the logs to be sent to this destination. Each name must be defined in the Service.logs section. If the log name is not a domain scoped name, it will be automatically prefixed with the service name followed by \"/\".", "items": { "type": "string" }, "type": "array" }, "monitoredResource": { - "description": "The monitored resource type. The type must be defined in the\nService.monitored_resources section.", + "description": "The monitored resource type. The type must be defined in the Service.monitored_resources section.", "type": "string" } }, @@ -1807,7 +1870,7 @@ "type": "object" }, "MetricDescriptor": { - "description": "Defines a metric type and its schema. Once a metric descriptor is created,\ndeleting or altering it stops data collection and makes the metric type's\nexisting data unusable.", + "description": "Defines a metric type and its schema. Once a metric descriptor is created, deleting or altering it stops data collection and makes the metric type's existing data unusable. ", "id": "MetricDescriptor", "properties": { "description": { @@ -1815,11 +1878,11 @@ "type": "string" }, "displayName": { - "description": "A concise name for the metric, which can be displayed in user interfaces.\nUse sentence case without an ending period, for example \"Request count\".\nThis field is optional but it is recommended to be set for any metrics\nassociated with user-visible concepts, such as Quota.", + "description": "A concise name for the metric, which can be displayed in user interfaces. Use sentence case without an ending period, for example \"Request count\". This field is optional but it is recommended to be set for any metrics associated with user-visible concepts, such as Quota.", "type": "string" }, "labels": { - "description": "The set of labels that can be used to describe a specific\ninstance of this metric type. For example, the\n`appengine.googleapis.com/http/server/response_latencies` metric\ntype has a label for the HTTP response code, `response_code`, so\nyou can look at latencies for successful responses or just\nfor responses that failed.", + "description": "The set of labels that can be used to describe a specific instance of this metric type. For example, the `appengine.googleapis.com/http/server/response_latencies` metric type has a label for the HTTP response code, `response_code`, so you can look at latencies for successful responses or just for responses that failed.", "items": { "$ref": "LabelDescriptor" }, @@ -1841,11 +1904,11 @@ "Do not use this default value.", "The feature is not yet implemented. Users can not use it.", "Prelaunch features are hidden from users and are only visible internally.", - "Early Access features are limited to a closed group of testers. To use\nthese features, you must sign up in advance and sign a Trusted Tester\nagreement (which includes confidentiality provisions). These features may\nbe unstable, changed in backward-incompatible ways, and are not\nguaranteed to be released.", - "Alpha is a limited availability test for releases before they are cleared\nfor widespread use. By Alpha, all significant design issues are resolved\nand we are in the process of verifying functionality. Alpha customers\nneed to apply for access, agree to applicable terms, and have their\nprojects whitelisted. Alpha releases don’t have to be feature complete,\nno SLAs are provided, and there are no technical support obligations, but\nthey will be far enough along that customers can actually use them in\ntest environments or for limited-use tests -- just like they would in\nnormal production cases.", - "Beta is the point at which we are ready to open a release for any\ncustomer to use. There are no SLA or technical support obligations in a\nBeta release. Products will be complete from a feature perspective, but\nmay have some open outstanding issues. Beta releases are suitable for\nlimited production use cases.", - "GA features are open to all developers and are considered stable and\nfully qualified for production use.", - "Deprecated features are scheduled to be shut down and removed. For more\ninformation, see the “Deprecation Policy” section of our [Terms of\nService](https://cloud.google.com/terms/)\nand the [Google Cloud Platform Subject to the Deprecation\nPolicy](https://cloud.google.com/terms/deprecation) documentation." + "Early Access features are limited to a closed group of testers. To use these features, you must sign up in advance and sign a Trusted Tester agreement (which includes confidentiality provisions). These features may be unstable, changed in backward-incompatible ways, and are not guaranteed to be released.", + "Alpha is a limited availability test for releases before they are cleared for widespread use. By Alpha, all significant design issues are resolved and we are in the process of verifying functionality. Alpha customers need to apply for access, agree to applicable terms, and have their projects whitelisted. Alpha releases don’t have to be feature complete, no SLAs are provided, and there are no technical support obligations, but they will be far enough along that customers can actually use them in test environments or for limited-use tests -- just like they would in normal production cases.", + "Beta is the point at which we are ready to open a release for any customer to use. There are no SLA or technical support obligations in a Beta release. Products will be complete from a feature perspective, but may have some open outstanding issues. Beta releases are suitable for limited production use cases.", + "GA features are open to all developers and are considered stable and fully qualified for production use.", + "Deprecated features are scheduled to be shut down and removed. For more information, see the “Deprecation Policy” section of our [Terms of Service](https://cloud.google.com/terms/) and the [Google Cloud Platform Subject to the Deprecation Policy](https://cloud.google.com/terms/deprecation) documentation." ], "type": "string" }, @@ -1854,7 +1917,7 @@ "description": "Optional. Metadata which can be used to guide usage of the metric." }, "metricKind": { - "description": "Whether the metric records instantaneous values, changes to a value, etc.\nSome combinations of `metric_kind` and `value_type` might not be supported.", + "description": "Whether the metric records instantaneous values, changes to a value, etc. Some combinations of `metric_kind` and `value_type` might not be supported.", "enum": [ "METRIC_KIND_UNSPECIFIED", "GAUGE", @@ -1865,12 +1928,12 @@ "Do not use this default value.", "An instantaneous measurement of a value.", "The change in a value during a time interval.", - "A value accumulated over a time interval. Cumulative\nmeasurements in a time series should have the same start time\nand increasing end times, until an event resets the cumulative\nvalue to zero and sets a new start time for the following\npoints." + "A value accumulated over a time interval. Cumulative measurements in a time series should have the same start time and increasing end times, until an event resets the cumulative value to zero and sets a new start time for the following points." ], "type": "string" }, "monitoredResourceTypes": { - "description": "Read-only. If present, then a time\nseries, which is identified partially by\na metric type and a MonitoredResourceDescriptor, that is associated\nwith this metric type can only be associated with one of the monitored\nresource types listed here.", + "description": "Read-only. If present, then a time series, which is identified partially by a metric type and a MonitoredResourceDescriptor, that is associated with this metric type can only be associated with one of the monitored resource types listed here.", "items": { "type": "string" }, @@ -1881,15 +1944,15 @@ "type": "string" }, "type": { - "description": "The metric type, including its DNS name prefix. The type is not\nURL-encoded. All user-defined metric types have the DNS name\n`custom.googleapis.com` or `external.googleapis.com`. Metric types should\nuse a natural hierarchical grouping. For example:\n\n \"custom.googleapis.com/invoice/paid/amount\"\n \"external.googleapis.com/prometheus/up\"\n \"appengine.googleapis.com/http/server/response_latencies\"", + "description": "The metric type, including its DNS name prefix. The type is not URL-encoded. All user-defined metric types have the DNS name `custom.googleapis.com` or `external.googleapis.com`. Metric types should use a natural hierarchical grouping. For example: \"custom.googleapis.com/invoice/paid/amount\" \"external.googleapis.com/prometheus/up\" \"appengine.googleapis.com/http/server/response_latencies\"", "type": "string" }, "unit": { - "description": "The units in which the metric value is reported. It is only applicable\nif the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit`\ndefines the representation of the stored metric values.\n\nDifferent systems may scale the values to be more easily displayed (so a\nvalue of `0.02KBy` _might_ be displayed as `20By`, and a value of\n`3523KBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is\n`KBy`, then the value of the metric is always in thousands of bytes, no\nmatter how it may be displayed..\n\nIf you want a custom metric to record the exact number of CPU-seconds used\nby a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is\n`s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005\nCPU-seconds, then the value is written as `12005`.\n\nAlternatively, if you want a custom metric to record data in a more\ngranular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is\n`ks{CPU}`, and then write the value `12.005` (which is `12005/1000`),\nor use `Kis{CPU}` and write `11.723` (which is `12005/1024`).\n\nThe supported units are a subset of [The Unified Code for Units of\nMeasure](http://unitsofmeasure.org/ucum.html) standard:\n\n**Basic units (UNIT)**\n\n* `bit` bit\n* `By` byte\n* `s` second\n* `min` minute\n* `h` hour\n* `d` day\n\n**Prefixes (PREFIX)**\n\n* `k` kilo (10^3)\n* `M` mega (10^6)\n* `G` giga (10^9)\n* `T` tera (10^12)\n* `P` peta (10^15)\n* `E` exa (10^18)\n* `Z` zetta (10^21)\n* `Y` yotta (10^24)\n\n* `m` milli (10^-3)\n* `u` micro (10^-6)\n* `n` nano (10^-9)\n* `p` pico (10^-12)\n* `f` femto (10^-15)\n* `a` atto (10^-18)\n* `z` zepto (10^-21)\n* `y` yocto (10^-24)\n\n* `Ki` kibi (2^10)\n* `Mi` mebi (2^20)\n* `Gi` gibi (2^30)\n* `Ti` tebi (2^40)\n* `Pi` pebi (2^50)\n\n**Grammar**\n\nThe grammar also includes these connectors:\n\n* `/` division or ratio (as an infix operator). For examples,\n `kBy/{email}` or `MiBy/10ms` (although you should almost never\n have `/s` in a metric `unit`; rates should always be computed at\n query time from the underlying cumulative or delta value).\n* `.` multiplication or composition (as an infix operator). For\n examples, `GBy.d` or `k{watt}.h`.\n\nThe grammar for a unit is as follows:\n\n Expression = Component { \".\" Component } { \"/\" Component } ;\n\n Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ]\n | Annotation\n | \"1\"\n ;\n\n Annotation = \"{\" NAME \"}\" ;\n\nNotes:\n\n* `Annotation` is just a comment if it follows a `UNIT`. If the annotation\n is used alone, then the unit is equivalent to `1`. For examples,\n `{request}/s == 1/s`, `By{transmitted}/s == By/s`.\n* `NAME` is a sequence of non-blank printable ASCII characters not\n containing `{` or `}`.\n* `1` represents a unitary [dimensionless\n unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such\n as in `1/s`. It is typically used when none of the basic units are\n appropriate. For example, \"new users per day\" can be represented as\n `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5 new\n users). Alternatively, \"thousands of page views per day\" would be\n represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric\n value of `5.3` would mean \"5300 page views per day\").\n* `%` represents dimensionless value of 1/100, and annotates values giving\n a percentage (so the metric values are typically in the range of 0..100,\n and a metric value `3` means \"3 percent\").\n* `10^2.%` indicates a metric contains a ratio, typically in the range\n 0..1, that will be multiplied by 100 and displayed as a percentage\n (so a metric value `0.03` means \"3 percent\").", + "description": "The units in which the metric value is reported. It is only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` defines the representation of the stored metric values. Different systems may scale the values to be more easily displayed (so a value of `0.02KBy` _might_ be displayed as `20By`, and a value of `3523KBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is `KBy`, then the value of the metric is always in thousands of bytes, no matter how it may be displayed.. If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the value is written as `12005`. Alternatively, if you want a custom metric to record data in a more granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). The supported units are a subset of [The Unified Code for Units of Measure](http://unitsofmeasure.org/ucum.html) standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost never have `/s` in a metric `unit`; rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: Expression = Component { \".\" Component } { \"/\" Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ] | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used alone, then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable ASCII characters not containing `{` or `}`. * `1` represents a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in `1/s`. It is typically used when none of the basic units are appropriate. For example, \"new users per day\" can be represented as `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5 new users). Alternatively, \"thousands of page views per day\" would be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3` would mean \"5300 page views per day\"). * `%` represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value `3` means \"3 percent\"). * `10^2.%` indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value `0.03` means \"3 percent\").", "type": "string" }, "valueType": { - "description": "Whether the measurement is an integer, a floating-point number, etc.\nSome combinations of `metric_kind` and `value_type` might not be supported.", + "description": "Whether the measurement is an integer, a floating-point number, etc. Some combinations of `metric_kind` and `value_type` might not be supported.", "enum": [ "VALUE_TYPE_UNSPECIFIED", "BOOL", @@ -1901,10 +1964,10 @@ ], "enumDescriptions": [ "Do not use this default value.", - "The value is a boolean.\nThis value type can be used only if the metric kind is `GAUGE`.", + "The value is a boolean. This value type can be used only if the metric kind is `GAUGE`.", "The value is a signed 64-bit integer.", "The value is a double precision floating point number.", - "The value is a text string.\nThis value type can be used only if the metric kind is `GAUGE`.", + "The value is a text string. This value type can be used only if the metric kind is `GAUGE`.", "The value is a `Distribution`.", "The value is money." ], @@ -1918,7 +1981,7 @@ "id": "MetricDescriptorMetadata", "properties": { "ingestDelay": { - "description": "The delay of data points caused by ingestion. Data points older than this\nage are guaranteed to be ingested and available to be read, excluding\ndata loss due to errors.", + "description": "The delay of data points caused by ingestion. Data points older than this age are guaranteed to be ingested and available to be read, excluding data loss due to errors.", "format": "google-duration", "type": "string" }, @@ -1938,16 +2001,16 @@ "Do not use this default value.", "The feature is not yet implemented. Users can not use it.", "Prelaunch features are hidden from users and are only visible internally.", - "Early Access features are limited to a closed group of testers. To use\nthese features, you must sign up in advance and sign a Trusted Tester\nagreement (which includes confidentiality provisions). These features may\nbe unstable, changed in backward-incompatible ways, and are not\nguaranteed to be released.", - "Alpha is a limited availability test for releases before they are cleared\nfor widespread use. By Alpha, all significant design issues are resolved\nand we are in the process of verifying functionality. Alpha customers\nneed to apply for access, agree to applicable terms, and have their\nprojects whitelisted. Alpha releases don’t have to be feature complete,\nno SLAs are provided, and there are no technical support obligations, but\nthey will be far enough along that customers can actually use them in\ntest environments or for limited-use tests -- just like they would in\nnormal production cases.", - "Beta is the point at which we are ready to open a release for any\ncustomer to use. There are no SLA or technical support obligations in a\nBeta release. Products will be complete from a feature perspective, but\nmay have some open outstanding issues. Beta releases are suitable for\nlimited production use cases.", - "GA features are open to all developers and are considered stable and\nfully qualified for production use.", - "Deprecated features are scheduled to be shut down and removed. For more\ninformation, see the “Deprecation Policy” section of our [Terms of\nService](https://cloud.google.com/terms/)\nand the [Google Cloud Platform Subject to the Deprecation\nPolicy](https://cloud.google.com/terms/deprecation) documentation." + "Early Access features are limited to a closed group of testers. To use these features, you must sign up in advance and sign a Trusted Tester agreement (which includes confidentiality provisions). These features may be unstable, changed in backward-incompatible ways, and are not guaranteed to be released.", + "Alpha is a limited availability test for releases before they are cleared for widespread use. By Alpha, all significant design issues are resolved and we are in the process of verifying functionality. Alpha customers need to apply for access, agree to applicable terms, and have their projects whitelisted. Alpha releases don’t have to be feature complete, no SLAs are provided, and there are no technical support obligations, but they will be far enough along that customers can actually use them in test environments or for limited-use tests -- just like they would in normal production cases.", + "Beta is the point at which we are ready to open a release for any customer to use. There are no SLA or technical support obligations in a Beta release. Products will be complete from a feature perspective, but may have some open outstanding issues. Beta releases are suitable for limited production use cases.", + "GA features are open to all developers and are considered stable and fully qualified for production use.", + "Deprecated features are scheduled to be shut down and removed. For more information, see the “Deprecation Policy” section of our [Terms of Service](https://cloud.google.com/terms/) and the [Google Cloud Platform Subject to the Deprecation Policy](https://cloud.google.com/terms/deprecation) documentation." ], "type": "string" }, "samplePeriod": { - "description": "The sampling period of metric data points. For metrics which are written\nperiodically, consecutive data points are stored at this time interval,\nexcluding data loss due to errors. Metrics with a higher granularity have\na smaller sampling period.", + "description": "The sampling period of metric data points. For metrics which are written periodically, consecutive data points are stored at this time interval, excluding data loss due to errors. Metrics with a higher granularity have a smaller sampling period.", "format": "google-duration", "type": "string" } @@ -1955,7 +2018,7 @@ "type": "object" }, "MetricRule": { - "description": "Bind API methods to metrics. Binding a method to a metric causes that\nmetric's configured quota behaviors to apply to the method call.", + "description": "Bind API methods to metrics. Binding a method to a metric causes that metric's configured quota behaviors to apply to the method call.", "id": "MetricRule", "properties": { "metricCosts": { @@ -1963,18 +2026,18 @@ "format": "int64", "type": "string" }, - "description": "Metrics to update when the selected methods are called, and the associated\ncost applied to each metric.\n\nThe key of the map is the metric name, and the values are the amount\nincreased for the metric against which the quota limits are defined.\nThe value must not be negative.", + "description": "Metrics to update when the selected methods are called, and the associated cost applied to each metric. The key of the map is the metric name, and the values are the amount increased for the metric against which the quota limits are defined. The value must not be negative.", "type": "object" }, "selector": { - "description": "Selects the methods to which this rule applies.\n\nRefer to selector for syntax details.", + "description": "Selects the methods to which this rule applies. Refer to selector for syntax details.", "type": "string" } }, "type": "object" }, "Mixin": { - "description": "Declares an API Interface to be included in this interface. The including\ninterface must redeclare all the methods from the included interface, but\ndocumentation and options are inherited as follows:\n\n- If after comment and whitespace stripping, the documentation\n string of the redeclared method is empty, it will be inherited\n from the original method.\n\n- Each annotation belonging to the service config (http,\n visibility) which is not set in the redeclared method will be\n inherited.\n\n- If an http annotation is inherited, the path pattern will be\n modified as follows. Any version prefix will be replaced by the\n version of the including interface plus the root path if\n specified.\n\nExample of a simple mixin:\n\n package google.acl.v1;\n service AccessControl {\n // Get the underlying ACL object.\n rpc GetAcl(GetAclRequest) returns (Acl) {\n option (google.api.http).get = \"/v1/{resource=**}:getAcl\";\n }\n }\n\n package google.storage.v2;\n service Storage {\n // rpc GetAcl(GetAclRequest) returns (Acl);\n\n // Get a data record.\n rpc GetData(GetDataRequest) returns (Data) {\n option (google.api.http).get = \"/v2/{resource=**}\";\n }\n }\n\nExample of a mixin configuration:\n\n apis:\n - name: google.storage.v2.Storage\n mixins:\n - name: google.acl.v1.AccessControl\n\nThe mixin construct implies that all methods in `AccessControl` are\nalso declared with same name and request/response types in\n`Storage`. A documentation generator or annotation processor will\nsee the effective `Storage.GetAcl` method after inherting\ndocumentation and annotations as follows:\n\n service Storage {\n // Get the underlying ACL object.\n rpc GetAcl(GetAclRequest) returns (Acl) {\n option (google.api.http).get = \"/v2/{resource=**}:getAcl\";\n }\n ...\n }\n\nNote how the version in the path pattern changed from `v1` to `v2`.\n\nIf the `root` field in the mixin is specified, it should be a\nrelative path under which inherited HTTP paths are placed. Example:\n\n apis:\n - name: google.storage.v2.Storage\n mixins:\n - name: google.acl.v1.AccessControl\n root: acls\n\nThis implies the following inherited HTTP annotation:\n\n service Storage {\n // Get the underlying ACL object.\n rpc GetAcl(GetAclRequest) returns (Acl) {\n option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\";\n }\n ...\n }", + "description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inheriting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", "id": "Mixin", "properties": { "name": { @@ -1982,26 +2045,26 @@ "type": "string" }, "root": { - "description": "If non-empty specifies a path under which inherited HTTP paths\nare rooted.", + "description": "If non-empty specifies a path under which inherited HTTP paths are rooted.", "type": "string" } }, "type": "object" }, "MonitoredResourceDescriptor": { - "description": "An object that describes the schema of a MonitoredResource object using a\ntype name and a set of labels. For example, the monitored resource\ndescriptor for Google Compute Engine VM instances has a type of\n`\"gce_instance\"` and specifies the use of the labels `\"instance_id\"` and\n`\"zone\"` to identify particular VM instances.\n\nDifferent APIs can support different monitored resource types. APIs generally\nprovide a `list` method that returns the monitored resource descriptors used\nby the API.", + "description": "An object that describes the schema of a MonitoredResource object using a type name and a set of labels. For example, the monitored resource descriptor for Google Compute Engine VM instances has a type of `\"gce_instance\"` and specifies the use of the labels `\"instance_id\"` and `\"zone\"` to identify particular VM instances. Different APIs can support different monitored resource types. APIs generally provide a `list` method that returns the monitored resource descriptors used by the API. ", "id": "MonitoredResourceDescriptor", "properties": { "description": { - "description": "Optional. A detailed description of the monitored resource type that might\nbe used in documentation.", + "description": "Optional. A detailed description of the monitored resource type that might be used in documentation.", "type": "string" }, "displayName": { - "description": "Optional. A concise name for the monitored resource type that might be\ndisplayed in user interfaces. It should be a Title Cased Noun Phrase,\nwithout any article or other determiners. For example,\n`\"Google Cloud SQL Database\"`.", + "description": "Optional. A concise name for the monitored resource type that might be displayed in user interfaces. It should be a Title Cased Noun Phrase, without any article or other determiners. For example, `\"Google Cloud SQL Database\"`.", "type": "string" }, "labels": { - "description": "Required. A set of labels used to describe instances of this monitored\nresource type. For example, an individual Google Cloud SQL database is\nidentified by values for the labels `\"database_id\"` and `\"zone\"`.", + "description": "Required. A set of labels used to describe instances of this monitored resource type. For example, an individual Google Cloud SQL database is identified by values for the labels `\"database_id\"` and `\"zone\"`.", "items": { "$ref": "LabelDescriptor" }, @@ -2023,38 +2086,38 @@ "Do not use this default value.", "The feature is not yet implemented. Users can not use it.", "Prelaunch features are hidden from users and are only visible internally.", - "Early Access features are limited to a closed group of testers. To use\nthese features, you must sign up in advance and sign a Trusted Tester\nagreement (which includes confidentiality provisions). These features may\nbe unstable, changed in backward-incompatible ways, and are not\nguaranteed to be released.", - "Alpha is a limited availability test for releases before they are cleared\nfor widespread use. By Alpha, all significant design issues are resolved\nand we are in the process of verifying functionality. Alpha customers\nneed to apply for access, agree to applicable terms, and have their\nprojects whitelisted. Alpha releases don’t have to be feature complete,\nno SLAs are provided, and there are no technical support obligations, but\nthey will be far enough along that customers can actually use them in\ntest environments or for limited-use tests -- just like they would in\nnormal production cases.", - "Beta is the point at which we are ready to open a release for any\ncustomer to use. There are no SLA or technical support obligations in a\nBeta release. Products will be complete from a feature perspective, but\nmay have some open outstanding issues. Beta releases are suitable for\nlimited production use cases.", - "GA features are open to all developers and are considered stable and\nfully qualified for production use.", - "Deprecated features are scheduled to be shut down and removed. For more\ninformation, see the “Deprecation Policy” section of our [Terms of\nService](https://cloud.google.com/terms/)\nand the [Google Cloud Platform Subject to the Deprecation\nPolicy](https://cloud.google.com/terms/deprecation) documentation." + "Early Access features are limited to a closed group of testers. To use these features, you must sign up in advance and sign a Trusted Tester agreement (which includes confidentiality provisions). These features may be unstable, changed in backward-incompatible ways, and are not guaranteed to be released.", + "Alpha is a limited availability test for releases before they are cleared for widespread use. By Alpha, all significant design issues are resolved and we are in the process of verifying functionality. Alpha customers need to apply for access, agree to applicable terms, and have their projects whitelisted. Alpha releases don’t have to be feature complete, no SLAs are provided, and there are no technical support obligations, but they will be far enough along that customers can actually use them in test environments or for limited-use tests -- just like they would in normal production cases.", + "Beta is the point at which we are ready to open a release for any customer to use. There are no SLA or technical support obligations in a Beta release. Products will be complete from a feature perspective, but may have some open outstanding issues. Beta releases are suitable for limited production use cases.", + "GA features are open to all developers and are considered stable and fully qualified for production use.", + "Deprecated features are scheduled to be shut down and removed. For more information, see the “Deprecation Policy” section of our [Terms of Service](https://cloud.google.com/terms/) and the [Google Cloud Platform Subject to the Deprecation Policy](https://cloud.google.com/terms/deprecation) documentation." ], "type": "string" }, "name": { - "description": "Optional. The resource name of the monitored resource descriptor:\n`\"projects/{project_id}/monitoredResourceDescriptors/{type}\"` where\n{type} is the value of the `type` field in this object and\n{project_id} is a project ID that provides API-specific context for\naccessing the type. APIs that do not use project information can use the\nresource name format `\"monitoredResourceDescriptors/{type}\"`.", + "description": "Optional. The resource name of the monitored resource descriptor: `\"projects/{project_id}/monitoredResourceDescriptors/{type}\"` where {type} is the value of the `type` field in this object and {project_id} is a project ID that provides API-specific context for accessing the type. APIs that do not use project information can use the resource name format `\"monitoredResourceDescriptors/{type}\"`.", "type": "string" }, "type": { - "description": "Required. The monitored resource type. For example, the type\n`\"cloudsql_database\"` represents databases in Google Cloud SQL.\nThe maximum length of this value is 256 characters.", + "description": "Required. The monitored resource type. For example, the type `\"cloudsql_database\"` represents databases in Google Cloud SQL.", "type": "string" } }, "type": "object" }, "Monitoring": { - "description": "Monitoring configuration of the service.\n\nThe example below shows how to configure monitored resources and metrics\nfor monitoring. In the example, a monitored resource and two metrics are\ndefined. The `library.googleapis.com/book/returned_count` metric is sent\nto both producer and consumer projects, whereas the\n`library.googleapis.com/book/overdue_count` metric is only sent to the\nconsumer project.\n\n monitored_resources:\n - type: library.googleapis.com/branch\n labels:\n - key: /city\n description: The city where the library branch is located in.\n - key: /name\n description: The name of the branch.\n metrics:\n - name: library.googleapis.com/book/returned_count\n metric_kind: DELTA\n value_type: INT64\n labels:\n - key: /customer_id\n - name: library.googleapis.com/book/overdue_count\n metric_kind: GAUGE\n value_type: INT64\n labels:\n - key: /customer_id\n monitoring:\n producer_destinations:\n - monitored_resource: library.googleapis.com/branch\n metrics:\n - library.googleapis.com/book/returned_count\n consumer_destinations:\n - monitored_resource: library.googleapis.com/branch\n metrics:\n - library.googleapis.com/book/returned_count\n - library.googleapis.com/book/overdue_count", + "description": "Monitoring configuration of the service. The example below shows how to configure monitored resources and metrics for monitoring. In the example, a monitored resource and two metrics are defined. The `library.googleapis.com/book/returned_count` metric is sent to both producer and consumer projects, whereas the `library.googleapis.com/book/num_overdue` metric is only sent to the consumer project. monitored_resources: - type: library.googleapis.com/Branch display_name: \"Library Branch\" description: \"A branch of a library.\" launch_stage: GA labels: - key: resource_container description: \"The Cloud container (ie. project id) for the Branch.\" - key: location description: \"The location of the library branch.\" - key: branch_id description: \"The id of the branch.\" metrics: - name: library.googleapis.com/book/returned_count display_name: \"Books Returned\" description: \"The count of books that have been returned.\" launch_stage: GA metric_kind: DELTA value_type: INT64 unit: \"1\" labels: - key: customer_id description: \"The id of the customer.\" - name: library.googleapis.com/book/num_overdue display_name: \"Books Overdue\" description: \"The current number of overdue books.\" launch_stage: GA metric_kind: GAUGE value_type: INT64 unit: \"1\" labels: - key: customer_id description: \"The id of the customer.\" monitoring: producer_destinations: - monitored_resource: library.googleapis.com/Branch metrics: - library.googleapis.com/book/returned_count consumer_destinations: - monitored_resource: library.googleapis.com/Branch metrics: - library.googleapis.com/book/returned_count - library.googleapis.com/book/num_overdue", "id": "Monitoring", "properties": { "consumerDestinations": { - "description": "Monitoring configurations for sending metrics to the consumer project.\nThere can be multiple consumer destinations. A monitored resouce type may\nappear in multiple monitoring destinations if different aggregations are\nneeded for different sets of metrics associated with that monitored\nresource type. A monitored resource and metric pair may only be used once\nin the Monitoring configuration.", + "description": "Monitoring configurations for sending metrics to the consumer project. There can be multiple consumer destinations. A monitored resource type may appear in multiple monitoring destinations if different aggregations are needed for different sets of metrics associated with that monitored resource type. A monitored resource and metric pair may only be used once in the Monitoring configuration.", "items": { "$ref": "MonitoringDestination" }, "type": "array" }, "producerDestinations": { - "description": "Monitoring configurations for sending metrics to the producer project.\nThere can be multiple producer destinations. A monitored resouce type may\nappear in multiple monitoring destinations if different aggregations are\nneeded for different sets of metrics associated with that monitored\nresource type. A monitored resource and metric pair may only be used once\nin the Monitoring configuration.", + "description": "Monitoring configurations for sending metrics to the producer project. There can be multiple producer destinations. A monitored resource type may appear in multiple monitoring destinations if different aggregations are needed for different sets of metrics associated with that monitored resource type. A monitored resource and metric pair may only be used once in the Monitoring configuration.", "items": { "$ref": "MonitoringDestination" }, @@ -2064,40 +2127,40 @@ "type": "object" }, "MonitoringDestination": { - "description": "Configuration of a specific monitoring destination (the producer project\nor the consumer project).", + "description": "Configuration of a specific monitoring destination (the producer project or the consumer project).", "id": "MonitoringDestination", "properties": { "metrics": { - "description": "Types of the metrics to report to this monitoring destination.\nEach type must be defined in Service.metrics section.", + "description": "Types of the metrics to report to this monitoring destination. Each type must be defined in Service.metrics section.", "items": { "type": "string" }, "type": "array" }, "monitoredResource": { - "description": "The monitored resource type. The type must be defined in\nService.monitored_resources section.", + "description": "The monitored resource type. The type must be defined in Service.monitored_resources section.", "type": "string" } }, "type": "object" }, "OAuthRequirements": { - "description": "OAuth scopes are a way to define data and permissions on data. For example,\nthere are scopes defined for \"Read-only access to Google Calendar\" and\n\"Access to Cloud Platform\". Users can consent to a scope for an application,\ngiving it permission to access that data on their behalf.\n\nOAuth scope specifications should be fairly coarse grained; a user will need\nto see and understand the text description of what your scope means.\n\nIn most cases: use one or at most two OAuth scopes for an entire family of\nproducts. If your product has multiple APIs, you should probably be sharing\nthe OAuth scope across all of those APIs.\n\nWhen you need finer grained OAuth consent screens: talk with your product\nmanagement about how developers will use them in practice.\n\nPlease note that even though each of the canonical scopes is enough for a\nrequest to be accepted and passed to the backend, a request can still fail\ndue to the backend requiring additional scopes or permissions.", + "description": "OAuth scopes are a way to define data and permissions on data. For example, there are scopes defined for \"Read-only access to Google Calendar\" and \"Access to Cloud Platform\". Users can consent to a scope for an application, giving it permission to access that data on their behalf. OAuth scope specifications should be fairly coarse grained; a user will need to see and understand the text description of what your scope means. In most cases: use one or at most two OAuth scopes for an entire family of products. If your product has multiple APIs, you should probably be sharing the OAuth scope across all of those APIs. When you need finer grained OAuth consent screens: talk with your product management about how developers will use them in practice. Please note that even though each of the canonical scopes is enough for a request to be accepted and passed to the backend, a request can still fail due to the backend requiring additional scopes or permissions.", "id": "OAuthRequirements", "properties": { "canonicalScopes": { - "description": "The list of publicly documented OAuth scopes that are allowed access. An\nOAuth token containing any of these scopes will be accepted.\n\nExample:\n\n canonical_scopes: https://www.googleapis.com/auth/calendar,\n https://www.googleapis.com/auth/calendar.read", + "description": "The list of publicly documented OAuth scopes that are allowed access. An OAuth token containing any of these scopes will be accepted. Example: canonical_scopes: https://www.googleapis.com/auth/calendar, https://www.googleapis.com/auth/calendar.read", "type": "string" } }, "type": "object" }, "Operation": { - "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "description": "This resource represents a long-running operation that is the result of a network API call.", "id": "Operation", "properties": { "done": { - "description": "If the value is `false`, it means the operation is still in progress.\nIf `true`, the operation is completed, and either `error` or `response` is\navailable.", + "description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.", "type": "boolean" }, "error": { @@ -2109,11 +2172,11 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", "type": "object" }, "name": { - "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should be a resource name ending with `operations/{unique_id}`.", + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.", "type": "string" }, "response": { @@ -2121,7 +2184,7 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", + "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", "type": "object" } }, @@ -2132,7 +2195,7 @@ "id": "OperationMetadata", "properties": { "resourceNames": { - "description": "The full name of the resources that this operation is directly\nassociated with.", + "description": "The full name of the resources that this operation is directly associated with.", "items": { "type": "string" }, @@ -2142,11 +2205,11 @@ "type": "object" }, "Option": { - "description": "A protocol buffer option, which can be attached to a message, field,\nenumeration, etc.", + "description": "A protocol buffer option, which can be attached to a message, field, enumeration, etc.", "id": "Option", "properties": { "name": { - "description": "The option's name. For protobuf built-in options (options defined in\ndescriptor.proto), this is the short name. For example, `\"map_entry\"`.\nFor custom options, it should be the fully-qualified name. For example,\n`\"google.api.http\"`.", + "description": "The option's name. For protobuf built-in options (options defined in descriptor.proto), this is the short name. For example, `\"map_entry\"`. For custom options, it should be the fully-qualified name. For example, `\"google.api.http\"`.", "type": "string" }, "value": { @@ -2154,26 +2217,26 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "The option's value packed in an Any message. If the value is a primitive,\nthe corresponding wrapper type defined in google/protobuf/wrappers.proto\nshould be used. If the value is an enum, it should be stored as an int32\nvalue using the google.protobuf.Int32Value type.", + "description": "The option's value packed in an Any message. If the value is a primitive, the corresponding wrapper type defined in google/protobuf/wrappers.proto should be used. If the value is an enum, it should be stored as an int32 value using the google.protobuf.Int32Value type.", "type": "object" } }, "type": "object" }, "Page": { - "description": "Represents a documentation page. A page can contain subpages to represent\nnested documentation set structure.", + "description": "Represents a documentation page. A page can contain subpages to represent nested documentation set structure.", "id": "Page", "properties": { "content": { - "description": "The Markdown content of the page. You can use \u003ccode\u003e\u0026#40;== include {path}\n==\u0026#41;\u003c/code\u003e to include content from a Markdown file.", + "description": "The Markdown content of the page. You can use (== include {path} ==) to include content from a Markdown file.", "type": "string" }, "name": { - "description": "The name of the page. It will be used as an identity of the page to\ngenerate URI of the page, text of the link to this page in navigation,\netc. The full page name (start from the root page name to this page\nconcatenated with `.`) can be used as reference to the page in your\ndocumentation. For example:\n\u003cpre\u003e\u003ccode\u003epages:\n- name: Tutorial\n content: \u0026#40;== include tutorial.md ==\u0026#41;\n subpages:\n - name: Java\n content: \u0026#40;== include tutorial_java.md ==\u0026#41;\n\u003c/code\u003e\u003c/pre\u003e\nYou can reference `Java` page using Markdown reference link syntax:\n`Java`.", + "description": "The name of the page. It will be used as an identity of the page to generate URI of the page, text of the link to this page in navigation, etc. The full page name (start from the root page name to this page concatenated with `.`) can be used as reference to the page in your documentation. For example: pages: - name: Tutorial content: (== include tutorial.md ==) subpages: - name: Java content: (== include tutorial_java.md ==) You can reference `Java` page using Markdown reference link syntax: `Java`.", "type": "string" }, "subpages": { - "description": "Subpages of this page. The order of subpages specified here will be\nhonored in the generated docset.", + "description": "Subpages of this page. The order of subpages specified here will be honored in the generated docset.", "items": { "$ref": "Page" }, @@ -2183,7 +2246,7 @@ "type": "object" }, "Quota": { - "description": "Quota configuration helps to achieve fairness and budgeting in service\nusage.\n\nThe metric based quota configuration works this way:\n- The service configuration defines a set of metrics.\n- For API calls, the quota.metric_rules maps methods to metrics with\n corresponding costs.\n- The quota.limits defines limits on the metrics, which will be used for\n quota checks at runtime.\n\nAn example quota configuration in yaml format:\n\n quota:\n limits:\n\n - name: apiWriteQpsPerProject\n metric: library.googleapis.com/write_calls\n unit: \"1/min/{project}\" # rate limit for consumer projects\n values:\n STANDARD: 10000\n\n\n # The metric rules bind all methods to the read_calls metric,\n # except for the UpdateBook and DeleteBook methods. These two methods\n # are mapped to the write_calls metric, with the UpdateBook method\n # consuming at twice rate as the DeleteBook method.\n metric_rules:\n - selector: \"*\"\n metric_costs:\n library.googleapis.com/read_calls: 1\n - selector: google.example.library.v1.LibraryService.UpdateBook\n metric_costs:\n library.googleapis.com/write_calls: 2\n - selector: google.example.library.v1.LibraryService.DeleteBook\n metric_costs:\n library.googleapis.com/write_calls: 1\n\n Corresponding Metric definition:\n\n metrics:\n - name: library.googleapis.com/read_calls\n display_name: Read requests\n metric_kind: DELTA\n value_type: INT64\n\n - name: library.googleapis.com/write_calls\n display_name: Write requests\n metric_kind: DELTA\n value_type: INT64\n\n", + "description": "Quota configuration helps to achieve fairness and budgeting in service usage. The metric based quota configuration works this way: - The service configuration defines a set of metrics. - For API calls, the quota.metric_rules maps methods to metrics with corresponding costs. - The quota.limits defines limits on the metrics, which will be used for quota checks at runtime. An example quota configuration in yaml format: quota: limits: - name: apiWriteQpsPerProject metric: library.googleapis.com/write_calls unit: \"1/min/{project}\" # rate limit for consumer projects values: STANDARD: 10000 # The metric rules bind all methods to the read_calls metric, # except for the UpdateBook and DeleteBook methods. These two methods # are mapped to the write_calls metric, with the UpdateBook method # consuming at twice rate as the DeleteBook method. metric_rules: - selector: \"*\" metric_costs: library.googleapis.com/read_calls: 1 - selector: google.example.library.v1.LibraryService.UpdateBook metric_costs: library.googleapis.com/write_calls: 2 - selector: google.example.library.v1.LibraryService.DeleteBook metric_costs: library.googleapis.com/write_calls: 1 Corresponding Metric definition: metrics: - name: library.googleapis.com/read_calls display_name: Read requests metric_kind: DELTA value_type: INT64 - name: library.googleapis.com/write_calls display_name: Write requests metric_kind: DELTA value_type: INT64 ", "id": "Quota", "properties": { "limits": { @@ -2194,7 +2257,7 @@ "type": "array" }, "metricRules": { - "description": "List of `MetricRule` definitions, each one mapping a selected method to one\nor more metrics.", + "description": "List of `MetricRule` definitions, each one mapping a selected method to one or more metrics.", "items": { "$ref": "MetricRule" }, @@ -2204,46 +2267,46 @@ "type": "object" }, "QuotaLimit": { - "description": "`QuotaLimit` defines a specific limit that applies over a specified duration\nfor a limit type. There can be at most one limit for a duration and limit\ntype combination defined within a `QuotaGroup`.", + "description": "`QuotaLimit` defines a specific limit that applies over a specified duration for a limit type. There can be at most one limit for a duration and limit type combination defined within a `QuotaGroup`.", "id": "QuotaLimit", "properties": { "defaultLimit": { - "description": "Default number of tokens that can be consumed during the specified\nduration. This is the number of tokens assigned when a client\napplication developer activates the service for his/her project.\n\nSpecifying a value of 0 will block all requests. This can be used if you\nare provisioning quota to selected consumers and blocking others.\nSimilarly, a value of -1 will indicate an unlimited quota. No other\nnegative values are allowed.\n\nUsed by group-based quotas only.", + "description": "Default number of tokens that can be consumed during the specified duration. This is the number of tokens assigned when a client application developer activates the service for his/her project. Specifying a value of 0 will block all requests. This can be used if you are provisioning quota to selected consumers and blocking others. Similarly, a value of -1 will indicate an unlimited quota. No other negative values are allowed. Used by group-based quotas only.", "format": "int64", "type": "string" }, "description": { - "description": "Optional. User-visible, extended description for this quota limit.\nShould be used only when more context is needed to understand this limit\nthan provided by the limit's display name (see: `display_name`).", + "description": "Optional. User-visible, extended description for this quota limit. Should be used only when more context is needed to understand this limit than provided by the limit's display name (see: `display_name`).", "type": "string" }, "displayName": { - "description": "User-visible display name for this limit.\nOptional. If not set, the UI will provide a default display name based on\nthe quota configuration. This field can be used to override the default\ndisplay name generated from the configuration.", + "description": "User-visible display name for this limit. Optional. If not set, the UI will provide a default display name based on the quota configuration. This field can be used to override the default display name generated from the configuration.", "type": "string" }, "duration": { - "description": "Duration of this limit in textual notation. Must be \"100s\" or \"1d\".\n\nUsed by group-based quotas only.", + "description": "Duration of this limit in textual notation. Must be \"100s\" or \"1d\". Used by group-based quotas only.", "type": "string" }, "freeTier": { - "description": "Free tier value displayed in the Developers Console for this limit.\nThe free tier is the number of tokens that will be subtracted from the\nbilled amount when billing is enabled.\nThis field can only be set on a limit with duration \"1d\", in a billable\ngroup; it is invalid on any other limit. If this field is not set, it\ndefaults to 0, indicating that there is no free tier for this service.\n\nUsed by group-based quotas only.", + "description": "Free tier value displayed in the Developers Console for this limit. The free tier is the number of tokens that will be subtracted from the billed amount when billing is enabled. This field can only be set on a limit with duration \"1d\", in a billable group; it is invalid on any other limit. If this field is not set, it defaults to 0, indicating that there is no free tier for this service. Used by group-based quotas only.", "format": "int64", "type": "string" }, "maxLimit": { - "description": "Maximum number of tokens that can be consumed during the specified\nduration. Client application developers can override the default limit up\nto this maximum. If specified, this value cannot be set to a value less\nthan the default limit. If not specified, it is set to the default limit.\n\nTo allow clients to apply overrides with no upper bound, set this to -1,\nindicating unlimited maximum quota.\n\nUsed by group-based quotas only.", + "description": "Maximum number of tokens that can be consumed during the specified duration. Client application developers can override the default limit up to this maximum. If specified, this value cannot be set to a value less than the default limit. If not specified, it is set to the default limit. To allow clients to apply overrides with no upper bound, set this to -1, indicating unlimited maximum quota. Used by group-based quotas only.", "format": "int64", "type": "string" }, "metric": { - "description": "The name of the metric this quota limit applies to. The quota limits with\nthe same metric will be checked together during runtime. The metric must be\ndefined within the service config.", + "description": "The name of the metric this quota limit applies to. The quota limits with the same metric will be checked together during runtime. The metric must be defined within the service config.", "type": "string" }, "name": { - "description": "Name of the quota limit.\n\nThe name must be provided, and it must be unique within the service. The\nname can only include alphanumeric characters as well as '-'.\n\nThe maximum length of the limit name is 64 characters.", + "description": "Name of the quota limit. The name must be provided, and it must be unique within the service. The name can only include alphanumeric characters as well as '-'. The maximum length of the limit name is 64 characters.", "type": "string" }, "unit": { - "description": "Specify the unit of the quota limit. It uses the same syntax as\nMetric.unit. The supported unit kinds are determined by the quota\nbackend system.\n\nHere are some examples:\n* \"1/min/{project}\" for quota per minute per project.\n\nNote: the order of unit components is insignificant.\nThe \"1\" at the beginning is required to follow the metric unit syntax.", + "description": "Specify the unit of the quota limit. It uses the same syntax as Metric.unit. The supported unit kinds are determined by the quota backend system. Here are some examples: * \"1/min/{project}\" for quota per minute per project. Note: the order of unit components is insignificant. The \"1\" at the beginning is required to follow the metric unit syntax.", "type": "string" }, "values": { @@ -2251,7 +2314,7 @@ "format": "int64", "type": "string" }, - "description": "Tiered limit values. You must specify this as a key:value pair, with an\ninteger value that is the maximum number of requests allowed for the\nspecified unit. Currently only STANDARD is supported.", + "description": "Tiered limit values. You must specify this as a key:value pair, with an integer value that is the maximum number of requests allowed for the specified unit. Currently only STANDARD is supported.", "type": "object" } }, @@ -2261,54 +2324,58 @@ "description": "A quota override", "id": "QuotaOverride", "properties": { + "adminOverrideAncestor": { + "description": "The resource name of the ancestor that requested the override. For example: \"organizations/12345\" or \"folders/67890\". Used by admin overrides only.", + "type": "string" + }, "dimensions": { "additionalProperties": { "type": "string" }, - "description": "If this map is nonempty, then this override applies only to specific values\nfor dimensions defined in the limit unit.\n\nFor example, an override on a limit with the unit 1/{project}/{region}\ncould contain an entry with the key \"region\" and the value \"us-east-1\";\nthe override is only applied to quota consumed in that region.\n\nThis map has the following restrictions:\n\n* Keys that are not defined in the limit's unit are not valid keys.\n Any string appearing in {brackets} in the unit (besides {project} or\n {user}) is a defined key.\n* \"project\" is not a valid key; the project is already specified in\n the parent resource name.\n* \"user\" is not a valid key; the API does not support quota overrides\n that apply only to a specific user.\n* If \"region\" appears as a key, its value must be a valid Cloud region.\n* If \"zone\" appears as a key, its value must be a valid Cloud zone.\n* If any valid key other than \"region\" or \"zone\" appears in the map, then\n all valid keys other than \"region\" or \"zone\" must also appear in the\n map.", + "description": "If this map is nonempty, then this override applies only to specific values for dimensions defined in the limit unit. For example, an override on a limit with the unit 1/{project}/{region} could contain an entry with the key \"region\" and the value \"us-east-1\"; the override is only applied to quota consumed in that region. This map has the following restrictions: * Keys that are not defined in the limit's unit are not valid keys. Any string appearing in {brackets} in the unit (besides {project} or {user}) is a defined key. * \"project\" is not a valid key; the project is already specified in the parent resource name. * \"user\" is not a valid key; the API does not support quota overrides that apply only to a specific user. * If \"region\" appears as a key, its value must be a valid Cloud region. * If \"zone\" appears as a key, its value must be a valid Cloud zone. * If any valid key other than \"region\" or \"zone\" appears in the map, then all valid keys other than \"region\" or \"zone\" must also appear in the map.", "type": "object" }, "metric": { - "description": "The name of the metric to which this override applies.\n\nAn example name would be:\n`compute.googleapis.com/cpus`", + "description": "The name of the metric to which this override applies. An example name would be: `compute.googleapis.com/cpus`", "type": "string" }, "name": { - "description": "The resource name of the override.\nThis name is generated by the server when the override is created.\n\nExample names would be:\n`projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/adminOverrides/4a3f2c1d`\n`projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/consumerOverrides/4a3f2c1d`\n\nThe resource name is intended to be opaque and should not be parsed for\nits component strings, since its representation could change in the future.", + "description": "The resource name of the override. This name is generated by the server when the override is created. Example names would be: `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/adminOverrides/4a3f2c1d` `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/consumerOverrides/4a3f2c1d` The resource name is intended to be opaque and should not be parsed for its component strings, since its representation could change in the future.", "type": "string" }, "overrideValue": { - "description": "The overriding quota limit value.\nCan be any nonnegative integer, or -1 (unlimited quota).", + "description": "The overriding quota limit value. Can be any nonnegative integer, or -1 (unlimited quota).", "format": "int64", "type": "string" }, "unit": { - "description": "The limit unit of the limit to which this override applies.\n\nAn example unit would be:\n`1/{project}/{region}`\nNote that `{project}` and `{region}` are not placeholders in this example;\nthe literal characters `{` and `}` occur in the string.", + "description": "The limit unit of the limit to which this override applies. An example unit would be: `1/{project}/{region}` Note that `{project}` and `{region}` are not placeholders in this example; the literal characters `{` and `}` occur in the string.", "type": "string" } }, "type": "object" }, "ServiceIdentity": { - "description": "Service identity for a service. This is the identity that service producer\nshould use to access consumer resources.", + "description": "Service identity for a service. This is the identity that service producer should use to access consumer resources.", "id": "ServiceIdentity", "properties": { "email": { - "description": "The email address of the service account that a service producer would use\nto access consumer resources.", + "description": "The email address of the service account that a service producer would use to access consumer resources.", "type": "string" }, "uniqueId": { - "description": "The unique and stable id of the service account.\nhttps://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts#ServiceAccount", + "description": "The unique and stable id of the service account. https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts#ServiceAccount", "type": "string" } }, "type": "object" }, "SourceContext": { - "description": "`SourceContext` represents information about the source of a\nprotobuf element, like the file in which it is defined.", + "description": "`SourceContext` represents information about the source of a protobuf element, like the file in which it is defined.", "id": "SourceContext", "properties": { "fileName": { - "description": "The path-qualified name of the .proto file that contained the associated\nprotobuf element. For example: `\"google/protobuf/source_context.proto\"`.", + "description": "The path-qualified name of the .proto file that contained the associated protobuf element. For example: `\"google/protobuf/source_context.proto\"`.", "type": "string" } }, @@ -2333,7 +2400,7 @@ "type": "object" }, "Status": { - "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors).", + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", "id": "Status", "properties": { "code": { @@ -2342,7 +2409,7 @@ "type": "integer" }, "details": { - "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use.", + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", "items": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -2353,18 +2420,18 @@ "type": "array" }, "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", "type": "string" } }, "type": "object" }, "SystemParameter": { - "description": "Define a parameter's name and location. The parameter may be passed as either\nan HTTP header or a URL query parameter, and if both are passed the behavior\nis implementation-dependent.", + "description": "Define a parameter's name and location. The parameter may be passed as either an HTTP header or a URL query parameter, and if both are passed the behavior is implementation-dependent.", "id": "SystemParameter", "properties": { "httpHeader": { - "description": "Define the HTTP header name to use for the parameter. It is case\ninsensitive.", + "description": "Define the HTTP header name to use for the parameter. It is case insensitive.", "type": "string" }, "name": { @@ -2372,36 +2439,36 @@ "type": "string" }, "urlQueryParameter": { - "description": "Define the URL query parameter name to use for the parameter. It is case\nsensitive.", + "description": "Define the URL query parameter name to use for the parameter. It is case sensitive.", "type": "string" } }, "type": "object" }, "SystemParameterRule": { - "description": "Define a system parameter rule mapping system parameter definitions to\nmethods.", + "description": "Define a system parameter rule mapping system parameter definitions to methods.", "id": "SystemParameterRule", "properties": { "parameters": { - "description": "Define parameters. Multiple names may be defined for a parameter.\nFor a given method call, only one of them should be used. If multiple\nnames are used the behavior is implementation-dependent.\nIf none of the specified names are present the behavior is\nparameter-dependent.", + "description": "Define parameters. Multiple names may be defined for a parameter. For a given method call, only one of them should be used. If multiple names are used the behavior is implementation-dependent. If none of the specified names are present the behavior is parameter-dependent.", "items": { "$ref": "SystemParameter" }, "type": "array" }, "selector": { - "description": "Selects the methods to which this rule applies. Use '*' to indicate all\nmethods in all APIs.\n\nRefer to selector for syntax details.", + "description": "Selects the methods to which this rule applies. Use '*' to indicate all methods in all APIs. Refer to selector for syntax details.", "type": "string" } }, "type": "object" }, "SystemParameters": { - "description": "### System parameter configuration\n\nA system parameter is a special kind of parameter defined by the API\nsystem, not by an individual API. It is typically mapped to an HTTP header\nand/or a URL query parameter. This configuration specifies which methods\nchange the names of the system parameters.", + "description": "### System parameter configuration A system parameter is a special kind of parameter defined by the API system, not by an individual API. It is typically mapped to an HTTP header and/or a URL query parameter. This configuration specifies which methods change the names of the system parameters.", "id": "SystemParameters", "properties": { "rules": { - "description": "Define system parameters.\n\nThe parameters defined here will override the default parameters\nimplemented by the system. If this field is missing from the service\nconfig, default system parameters will be used. Default system parameters\nand names is implementation-dependent.\n\nExample: define api key for all methods\n\n system_parameters\n rules:\n - selector: \"*\"\n parameters:\n - name: api_key\n url_query_parameter: api_key\n\n\nExample: define 2 api key names for a specific method.\n\n system_parameters\n rules:\n - selector: \"/ListShelves\"\n parameters:\n - name: api_key\n http_header: Api-Key1\n - name: api_key\n http_header: Api-Key2\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "Define system parameters. The parameters defined here will override the default parameters implemented by the system. If this field is missing from the service config, default system parameters will be used. Default system parameters and names is implementation-dependent. Example: define api key for all methods system_parameters rules: - selector: \"*\" parameters: - name: api_key url_query_parameter: api_key Example: define 2 api key names for a specific method. system_parameters rules: - selector: \"/ListShelves\" parameters: - name: api_key http_header: Api-Key1 - name: api_key http_header: Api-Key2 **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "SystemParameterRule" }, @@ -2463,18 +2530,18 @@ "id": "Usage", "properties": { "producerNotificationChannel": { - "description": "The full resource name of a channel used for sending notifications to the\nservice producer.\n\nGoogle Service Management currently only supports\n[Google Cloud Pub/Sub](https://cloud.google.com/pubsub) as a notification\nchannel. To use Google Cloud Pub/Sub as the channel, this must be the name\nof a Cloud Pub/Sub topic that uses the Cloud Pub/Sub topic name format\ndocumented in https://cloud.google.com/pubsub/docs/overview.", + "description": "The full resource name of a channel used for sending notifications to the service producer. Google Service Management currently only supports [Google Cloud Pub/Sub](https://cloud.google.com/pubsub) as a notification channel. To use Google Cloud Pub/Sub as the channel, this must be the name of a Cloud Pub/Sub topic that uses the Cloud Pub/Sub topic name format documented in https://cloud.google.com/pubsub/docs/overview.", "type": "string" }, "requirements": { - "description": "Requirements that must be satisfied before a consumer project can use the\nservice. Each requirement is of the form \u003cservice.name\u003e/\u003crequirement-id\u003e;\nfor example 'serviceusage.googleapis.com/billing-enabled'.", + "description": "Requirements that must be satisfied before a consumer project can use the service. Each requirement is of the form /; for example 'serviceusage.googleapis.com/billing-enabled'.", "items": { "type": "string" }, "type": "array" }, "rules": { - "description": "A list of usage rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "A list of usage rules that apply to individual API methods. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "UsageRule" }, @@ -2488,19 +2555,19 @@ "type": "object" }, "UsageRule": { - "description": "Usage configuration rules for the service.\n\nNOTE: Under development.\n\n\nUse this rule to configure unregistered calls for the service. Unregistered\ncalls are calls that do not contain consumer project identity.\n(Example: calls that do not contain an API key).\nBy default, API methods do not allow unregistered calls, and each method call\nmust be identified by a consumer project identity. Use this rule to\nallow/disallow unregistered calls.\n\nExample of an API that wants to allow unregistered calls for entire service.\n\n usage:\n rules:\n - selector: \"*\"\n allow_unregistered_calls: true\n\nExample of a method that wants to allow unregistered calls.\n\n usage:\n rules:\n - selector: \"google.example.library.v1.LibraryService.CreateBook\"\n allow_unregistered_calls: true", + "description": "Usage configuration rules for the service. NOTE: Under development. Use this rule to configure unregistered calls for the service. Unregistered calls are calls that do not contain consumer project identity. (Example: calls that do not contain an API key). By default, API methods do not allow unregistered calls, and each method call must be identified by a consumer project identity. Use this rule to allow/disallow unregistered calls. Example of an API that wants to allow unregistered calls for entire service. usage: rules: - selector: \"*\" allow_unregistered_calls: true Example of a method that wants to allow unregistered calls. usage: rules: - selector: \"google.example.library.v1.LibraryService.CreateBook\" allow_unregistered_calls: true", "id": "UsageRule", "properties": { "allowUnregisteredCalls": { - "description": "If true, the selected method allows unregistered calls, e.g. calls\nthat don't identify any user or application.", + "description": "If true, the selected method allows unregistered calls, e.g. calls that don't identify any user or application.", "type": "boolean" }, "selector": { - "description": "Selects the methods to which this rule applies. Use '*' to indicate all\nmethods in all APIs.\n\nRefer to selector for syntax details.", + "description": "Selects the methods to which this rule applies. Use '*' to indicate all methods in all APIs. Refer to selector for syntax details.", "type": "string" }, "skipServiceControl": { - "description": "If true, the selected method should skip service control and the control\nplane features, such as quota and billing, will not be available.\nThis flag is used by Google Cloud Endpoints to bypass checks for internal\nmethods, such as service health check methods.", + "description": "If true, the selected method should skip service control and the control plane features, such as quota and billing, will not be available. This flag is used by Google Cloud Endpoints to bypass checks for internal methods, such as service health check methods.", "type": "boolean" } }, diff --git a/vendor/google.golang.org/api/serviceusage/v1/serviceusage-gen.go b/vendor/google.golang.org/api/serviceusage/v1/serviceusage-gen.go index 2aa1fd88e04..308c2f785cd 100644 --- a/vendor/google.golang.org/api/serviceusage/v1/serviceusage-gen.go +++ b/vendor/google.golang.org/api/serviceusage/v1/serviceusage-gen.go @@ -79,6 +79,7 @@ const apiId = "serviceusage:v1" const apiName = "serviceusage" const apiVersion = "v1" const basePath = "https://serviceusage.googleapis.com/" +const mtlsBasePath = "https://serviceusage.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -102,6 +103,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -166,22 +168,76 @@ type ServicesService struct { s *Service } -// Api: Api is a light-weight descriptor for an API -// Interface. -// +// AdminQuotaPolicy: Quota policy created by quota administrator. +type AdminQuotaPolicy struct { + // Container: The cloud resource container at which the quota policy is + // created. The format is {container_type}/{container_number} + Container string `json:"container,omitempty"` + + // Dimensions: If this map is nonempty, then this policy applies only + // to specific values for dimensions defined in the limit unit. For + // example, an policy on a limit with the unit 1/{project}/{region} + // could contain an entry with the key "region" and the value + // "us-east-1"; the policy is only applied to quota consumed in that + // region. This map has the following restrictions: * If "region" + // appears as a key, its value must be a valid Cloud region. * If "zone" + // appears as a key, its value must be a valid Cloud zone. * Keys other + // than "region" or "zone" are not valid. + Dimensions map[string]string `json:"dimensions,omitempty"` + + // Metric: The name of the metric to which this policy applies. An + // example name would be: `compute.googleapis.com/cpus` + Metric string `json:"metric,omitempty"` + + // Name: The resource name of the policy. This name is generated by the + // server when the policy is created. Example names would be: + // `organizations/123/services/compute.googleapis.com/consumerQuotaMetric + // s/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/adminQuotaP + // olicies/4a3f2c1d` + Name string `json:"name,omitempty"` + + // PolicyValue: The quota policy value. Can be any nonnegative integer, + // or -1 (unlimited quota). + PolicyValue int64 `json:"policyValue,omitempty,string"` + + // Unit: The limit unit of the limit to which this policy applies. An + // example unit would be: `1/{project}/{region}` Note that `{project}` + // and `{region}` are not placeholders in this example; the literal + // characters `{` and `}` occur in the string. + Unit string `json:"unit,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Container") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Container") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AdminQuotaPolicy) MarshalJSON() ([]byte, error) { + type NoMethod AdminQuotaPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Api: Api is a light-weight descriptor for an API Interface. // Interfaces are also described as "protocol buffer services" in some -// contexts, -// such as by the "service" keyword in a .proto file, but they are -// different -// from API Services, which represent a concrete implementation of an -// interface -// as opposed to simply a description of methods and bindings. They are -// also -// sometimes simply referred to as "APIs" in other contexts, such as the -// name of -// this message itself. See -// https://cloud.google.com/apis/design/glossary for -// detailed terminology. +// contexts, such as by the "service" keyword in a .proto file, but they +// are different from API Services, which represent a concrete +// implementation of an interface as opposed to simply a description of +// methods and bindings. They are also sometimes simply referred to as +// "APIs" in other contexts, such as the name of this message itself. +// See https://cloud.google.com/apis/design/glossary for detailed +// terminology. type Api struct { // Methods: The methods of this interface, in unspecified order. Methods []*Method `json:"methods,omitempty"` @@ -190,16 +246,14 @@ type Api struct { Mixins []*Mixin `json:"mixins,omitempty"` // Name: The fully qualified name of this interface, including package - // name - // followed by the interface's simple name. + // name followed by the interface's simple name. Name string `json:"name,omitempty"` // Options: Any metadata attached to the interface. Options []*Option `json:"options,omitempty"` // SourceContext: Source context for the protocol buffer service - // represented by this - // message. + // represented by this message. SourceContext *SourceContext `json:"sourceContext,omitempty"` // Syntax: The source syntax of the service. @@ -210,35 +264,20 @@ type Api struct { Syntax string `json:"syntax,omitempty"` // Version: A version string for this interface. If specified, must have - // the form - // `major-version.minor-version`, as in `1.10`. If the minor version - // is - // omitted, it defaults to zero. If the entire version field is empty, - // the - // major version is derived from the package name, as outlined below. If - // the - // field is not empty, the version in the package name will be verified - // to be - // consistent with what is provided here. - // - // The versioning schema uses [semantic - // versioning](http://semver.org) where the major version - // number - // indicates a breaking change and the minor version an - // additive, - // non-breaking change. Both version numbers are signals to users - // what to expect from different versions, and should be - // carefully - // chosen based on the product plan. - // - // The major version is also reflected in the package name of - // the - // interface, which must end in `v`, as - // in - // `google.feature.v1`. For major versions 0 and 1, the suffix can - // be omitted. Zero major versions must only be used for - // experimental, non-GA interfaces. - // + // the form `major-version.minor-version`, as in `1.10`. If the minor + // version is omitted, it defaults to zero. If the entire version field + // is empty, the major version is derived from the package name, as + // outlined below. If the field is not empty, the version in the package + // name will be verified to be consistent with what is provided here. + // The versioning schema uses [semantic versioning](http://semver.org) + // where the major version number indicates a breaking change and the + // minor version an additive, non-breaking change. Both version numbers + // are signals to users what to expect from different versions, and + // should be carefully chosen based on the product plan. The major + // version is also reflected in the package name of the interface, which + // must end in `v`, as in `google.feature.v1`. For major versions 0 and + // 1, the suffix can be omitted. Zero major versions must only be used + // for experimental, non-GA interfaces. Version string `json:"version,omitempty"` // ForceSendFields is a list of field names (e.g. "Methods") to @@ -265,97 +304,57 @@ func (s *Api) MarshalJSON() ([]byte, error) { } // AuthProvider: Configuration for an authentication provider, including -// support for -// [JSON Web -// Token -// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-tok -// en-32). +// support for [JSON Web Token +// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32) +// . type AuthProvider struct { - // Audiences: The list of - // JWT - // [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web- - // token-32#section-4.1.3). - // that are allowed to access. A JWT containing any of these audiences - // will - // be accepted. When this setting is absent, JWTs with audiences: - // - "https://[service.name]/[google.protobuf.Api.name]" - // - "https://[service.name]/" - // will be accepted. - // For example, if no audiences are in the setting, LibraryService API - // will - // accept JWTs with the following audiences: - // - - // - // https://library-example.googleapis.com/google.example.library.v1.LibraryService - // - https://library-example.googleapis.com/ - // - // Example: - // - // audiences: bookstore_android.apps.googleusercontent.com, - // bookstore_web.apps.googleusercontent.com + // Audiences: The list of JWT + // [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-toke + // n-32#section-4.1.3). that are allowed to access. A JWT containing any + // of these audiences will be accepted. When this setting is absent, + // JWTs with audiences: - + // "https://[service.name]/[google.protobuf.Api.name]" - + // "https://[service.name]/" will be accepted. For example, if no + // audiences are in the setting, LibraryService API will accept JWTs + // with the following audiences: - + // https://library-example.googleapis.com/google.example.library.v1.LibraryService - https://library-example.googleapis.com/ Example: audiences: bookstore_android.apps.googleusercontent.com, + // bookstore_web.apps.googleusercontent.com Audiences string `json:"audiences,omitempty"` // AuthorizationUrl: Redirect URL if JWT token is required but not - // present or is expired. - // Implement authorizationUrl of securityDefinitions in OpenAPI spec. + // present or is expired. Implement authorizationUrl of + // securityDefinitions in OpenAPI spec. AuthorizationUrl string `json:"authorizationUrl,omitempty"` // Id: The unique identifier of the auth provider. It will be referred - // to by - // `AuthRequirement.provider_id`. - // - // Example: "bookstore_auth". + // to by `AuthRequirement.provider_id`. Example: "bookstore_auth". Id string `json:"id,omitempty"` - // Issuer: Identifies the principal that issued the JWT. - // See - // https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#sec - // tion-4.1.1 - // Usually a URL or an email address. - // - // Example: https://securetoken.google.com - // Example: 1234567-compute@developer.gserviceaccount.com + // Issuer: Identifies the principal that issued the JWT. See + // https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1 Usually a URL or an email address. Example: https://securetoken.google.com Example: + // 1234567-compute@developer.gserviceaccount.com Issuer string `json:"issuer,omitempty"` // JwksUri: URL of the provider's public key set to validate signature - // of the JWT. - // See - // [OpenID - // Discovery](https://openid.net/specs/openid-connect-discove - // ry-1_0.html#ProviderMetadata). - // Optional if the key set document: - // - can be retrieved from - // [OpenID - // + // of the JWT. See [OpenID + // Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html# + // ProviderMetadata). Optional if the key set document: - can be + // retrieved from [OpenID // Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html - // of - // the issuer. - // - can be inferred from the email domain of the issuer (e.g. a - // Google - // service account). - // - // Example: https://www.googleapis.com/oauth2/v1/certs + // of the issuer. - can be inferred from the email domain of the issuer + // (e.g. a Google service account). Example: + // https://www.googleapis.com/oauth2/v1/certs JwksUri string `json:"jwksUri,omitempty"` - // JwtLocations: Defines the locations to extract the JWT. - // - // JWT locations can be either from HTTP headers or URL query - // parameters. - // The rule is that the first match wins. The checking order is: - // checking - // all headers first, then URL query parameters. - // - // If not specified, default to use following 3 locations: - // 1) Authorization: Bearer - // 2) x-goog-iap-jwt-assertion - // 3) access_token query parameter - // - // Default locations can be specified as followings: - // jwt_locations: - // - header: Authorization - // value_prefix: "Bearer " - // - header: x-goog-iap-jwt-assertion - // - query: access_token + // JwtLocations: Defines the locations to extract the JWT. JWT locations + // can be either from HTTP headers or URL query parameters. The rule is + // that the first match wins. The checking order is: checking all + // headers first, then URL query parameters. If not specified, default + // to use following 3 locations: 1) Authorization: Bearer 2) + // x-goog-iap-jwt-assertion 3) access_token query parameter Default + // locations can be specified as followings: jwt_locations: - header: + // Authorization value_prefix: "Bearer " - header: + // x-goog-iap-jwt-assertion - query: access_token JwtLocations []*JwtLocation `json:"jwtLocations,omitempty"` // ForceSendFields is a list of field names (e.g. "Audiences") to @@ -382,43 +381,27 @@ func (s *AuthProvider) MarshalJSON() ([]byte, error) { } // AuthRequirement: User-defined authentication requirements, including -// support for -// [JSON Web -// Token -// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-tok -// en-32). +// support for [JSON Web Token +// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32) +// . type AuthRequirement struct { // Audiences: NOTE: This will be deprecated soon, once - // AuthProvider.audiences is - // implemented and accepted in all the runtime components. - // - // The list of - // JWT - // [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web- - // token-32#section-4.1.3). - // that are allowed to access. A JWT containing any of these audiences - // will - // be accepted. When this setting is absent, only JWTs with - // audience - // "https://Service_name/API_name" - // will be accepted. For example, if no audiences are in the - // setting, - // LibraryService API will only accept JWTs with the following - // audience - // "https://library-example.googleapis.com/google.example.librar - // y.v1.LibraryService". - // - // Example: - // - // audiences: bookstore_android.apps.googleusercontent.com, - // bookstore_web.apps.googleusercontent.com + // AuthProvider.audiences is implemented and accepted in all the runtime + // components. The list of JWT + // [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-toke + // n-32#section-4.1.3). that are allowed to access. A JWT containing any + // of these audiences will be accepted. When this setting is absent, + // only JWTs with audience "https://Service_name/API_name" will be + // accepted. For example, if no audiences are in the setting, + // LibraryService API will only accept JWTs with the following audience + // "https://library-example.googleapis.com/google.example.library.v1.Libr + // aryService". Example: audiences: + // bookstore_android.apps.googleusercontent.com, + // bookstore_web.apps.googleusercontent.com Audiences string `json:"audiences,omitempty"` - // ProviderId: id from authentication provider. - // - // Example: - // - // provider_id: bookstore_auth + // ProviderId: id from authentication provider. Example: provider_id: + // bookstore_auth ProviderId string `json:"providerId,omitempty"` // ForceSendFields is a list of field names (e.g. "Audiences") to @@ -445,30 +428,20 @@ func (s *AuthRequirement) MarshalJSON() ([]byte, error) { } // Authentication: `Authentication` defines the authentication -// configuration for an API. -// -// Example for an API targeted for external use: -// -// name: calendar.googleapis.com -// authentication: -// providers: -// - id: google_calendar_auth -// jwks_uri: https://www.googleapis.com/oauth2/v1/certs -// issuer: https://securetoken.google.com -// rules: -// - selector: "*" -// requirements: -// provider_id: google_calendar_auth +// configuration for an API. Example for an API targeted for external +// use: name: calendar.googleapis.com authentication: providers: - id: +// google_calendar_auth jwks_uri: +// https://www.googleapis.com/oauth2/v1/certs issuer: +// https://securetoken.google.com rules: - selector: "*" requirements: +// provider_id: google_calendar_auth type Authentication struct { // Providers: Defines a set of authentication providers that a service // supports. Providers []*AuthProvider `json:"providers,omitempty"` // Rules: A list of authentication rules that apply to individual API - // methods. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // methods. **NOTE:** All service configuration rules follow "last one + // wins" order. Rules []*AuthenticationRule `json:"rules,omitempty"` // ForceSendFields is a list of field names (e.g. "Providers") to @@ -494,19 +467,12 @@ func (s *Authentication) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// AuthenticationRule: Authentication rules for the service. -// -// By default, if a method has any authentication requirements, every -// request -// must include a valid credential matching one of the -// requirements. -// It's an error to include more than one kind of credential in a -// single -// request. -// -// If a method doesn't have any auth requirements, request credentials -// will be -// ignored. +// AuthenticationRule: Authentication rules for the service. By default, +// if a method has any authentication requirements, every request must +// include a valid credential matching one of the requirements. It's an +// error to include more than one kind of credential in a single +// request. If a method doesn't have any auth requirements, request +// credentials will be ignored. type AuthenticationRule struct { // AllowWithoutCredential: If true, the service accepts API keys without // any other credential. @@ -518,9 +484,8 @@ type AuthenticationRule struct { // Requirements: Requirements for additional authentication providers. Requirements []*AuthRequirement `json:"requirements,omitempty"` - // Selector: Selects the methods to which this rule applies. - // - // Refer to selector for syntax details. + // Selector: Selects the methods to which this rule applies. Refer to + // selector for syntax details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -551,10 +516,8 @@ func (s *AuthenticationRule) MarshalJSON() ([]byte, error) { // Backend: `Backend` defines the backend configuration for a service. type Backend struct { // Rules: A list of API backend rules that apply to individual API - // methods. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // methods. **NOTE:** All service configuration rules follow "last one + // wins" order. Rules []*BackendRule `json:"rules,omitempty"` // ForceSendFields is a list of field names (e.g. "Rules") to @@ -583,197 +546,81 @@ func (s *Backend) MarshalJSON() ([]byte, error) { // BackendRule: A backend rule provides configuration for an individual // API element. type BackendRule struct { - // Address: The address of the API backend. - // - // The scheme is used to determine the backend protocol and - // security. - // The following schemes are accepted: - // - // SCHEME PROTOCOL SECURITY - // http:// HTTP None - // https:// HTTP TLS - // grpc:// gRPC None - // grpcs:// gRPC TLS - // - // It is recommended to explicitly include a scheme. Leaving out the - // scheme - // may cause constrasting behaviors across platforms. - // - // If the port is unspecified, the default is: - // - 80 for schemes without TLS - // - 443 for schemes with TLS - // - // For HTTP backends, use protocol - // to specify the protocol version. + // Address: The address of the API backend. The scheme is used to + // determine the backend protocol and security. The following schemes + // are accepted: SCHEME PROTOCOL SECURITY http:// HTTP None https:// + // HTTP TLS grpc:// gRPC None grpcs:// gRPC TLS It is recommended to + // explicitly include a scheme. Leaving out the scheme may cause + // constrasting behaviors across platforms. If the port is unspecified, + // the default is: - 80 for schemes without TLS - 443 for schemes with + // TLS For HTTP backends, use protocol to specify the protocol version. Address string `json:"address,omitempty"` // Deadline: The number of seconds to wait for a response from a - // request. The default - // varies based on the request protocol and deployment environment. + // request. The default varies based on the request protocol and + // deployment environment. Deadline float64 `json:"deadline,omitempty"` // DisableAuth: When disable_auth is true, a JWT ID token won't be - // generated and the - // original "Authorization" HTTP header will be preserved. If the header - // is - // used to carry the original token and is expected by the backend, - // this - // field must be set to true to preserve the header. + // generated and the original "Authorization" HTTP header will be + // preserved. If the header is used to carry the original token and is + // expected by the backend, this field must be set to true to preserve + // the header. DisableAuth bool `json:"disableAuth,omitempty"` // JwtAudience: The JWT audience is used when generating a JWT ID token - // for the backend. - // This ID token will be added in the HTTP "authorization" header, and - // sent - // to the backend. + // for the backend. This ID token will be added in the HTTP + // "authorization" header, and sent to the backend. JwtAudience string `json:"jwtAudience,omitempty"` // MinDeadline: Minimum deadline in seconds needed for this method. - // Calls having deadline - // value lower than this will be rejected. + // Calls having deadline value lower than this will be rejected. MinDeadline float64 `json:"minDeadline,omitempty"` // OperationDeadline: The number of seconds to wait for the completion - // of a long running - // operation. The default is no deadline. + // of a long running operation. The default is no deadline. OperationDeadline float64 `json:"operationDeadline,omitempty"` // Possible values: // "PATH_TRANSLATION_UNSPECIFIED" // "CONSTANT_ADDRESS" - Use the backend address as-is, with no - // modification to the path. If the - // URL pattern contains variables, the variable names and values will - // be - // appended to the query string. If a query string parameter and a - // URL - // pattern variable have the same name, this may result in duplicate - // keys in - // the query string. - // - // # Examples - // - // Given the following operation config: - // - // Method path: /api/company/{cid}/user/{uid} - // Backend address: - // https://example.cloudfunctions.net/getUser - // - // Requests to the following request paths will call the backend at - // the - // translated path: - // - // Request path: /api/company/widgetworks/user/johndoe - // Translated: - // - // https://example.cloudfunctions.net/getUser?cid=widgetworks&uid=johndoe - // - // Request path: /api/company/widgetworks/user/johndoe?timezone=EST - // Translated: - // + // modification to the path. If the URL pattern contains variables, the + // variable names and values will be appended to the query string. If a + // query string parameter and a URL pattern variable have the same name, + // this may result in duplicate keys in the query string. # Examples + // Given the following operation config: Method path: + // /api/company/{cid}/user/{uid} Backend address: + // https://example.cloudfunctions.net/getUser Requests to the following + // request paths will call the backend at the translated path: Request + // path: /api/company/widgetworks/user/johndoe Translated: + // https://example.cloudfunctions.net/getUser?cid=widgetworks&uid=johndoe Request path: /api/company/widgetworks/user/johndoe?timezone=EST Translated: // https://example.cloudfunctions.net/getUser?timezone=EST&cid=widgetworks&uid=johndoe // "APPEND_PATH_TO_ADDRESS" - The request path will be appended to the - // backend address. - // - // # Examples - // - // Given the following operation config: - // - // Method path: /api/company/{cid}/user/{uid} - // Backend address: https://example.appspot.com - // - // Requests to the following request paths will call the backend at - // the - // translated path: - // - // Request path: /api/company/widgetworks/user/johndoe - // Translated: - // + // backend address. # Examples Given the following operation config: + // Method path: /api/company/{cid}/user/{uid} Backend address: + // https://example.appspot.com Requests to the following request paths + // will call the backend at the translated path: Request path: + // /api/company/widgetworks/user/johndoe Translated: // https://example.appspot.com/api/company/widgetworks/user/johndoe - // - // Request path: /api/company/widgetworks/user/johndoe?timezone=EST - // Translated: - // + // Request path: /api/company/widgetworks/user/johndoe?timezone=EST + // Translated: // https://example.appspot.com/api/company/widgetworks/user/johndoe?timezone=EST PathTranslation string `json:"pathTranslation,omitempty"` - // Protocol: The protocol used for sending a request to the backend. - // The supported values are "http/1.1" and "h2". - // - // The default value is inferred from the scheme in the - // address field: - // - // SCHEME PROTOCOL - // http:// http/1.1 - // https:// http/1.1 - // grpc:// h2 - // grpcs:// h2 - // - // For secure HTTP backends (https://) that support HTTP/2, set this - // field - // to "h2" for improved performance. - // - // Configuring this field to non-default values is only supported for - // secure - // HTTP backends. This field will be ignored for all other - // backends. - // - // See - // https://www.iana.org/assignments/tls-extensiontype-valu - // es/tls-extensiontype-values.xhtml#alpn-protocol-ids - // for more details on the supported values. + // Protocol: The protocol used for sending a request to the backend. The + // supported values are "http/1.1" and "h2". The default value is + // inferred from the scheme in the address field: SCHEME PROTOCOL + // http:// http/1.1 https:// http/1.1 grpc:// h2 grpcs:// h2 For secure + // HTTP backends (https://) that support HTTP/2, set this field to "h2" + // for improved performance. Configuring this field to non-default + // values is only supported for secure HTTP backends. This field will be + // ignored for all other backends. See + // https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids for more details on the supported + // values. Protocol string `json:"protocol,omitempty"` - // RenameTo: Unimplemented. Do not use. - // - // The new name the selected proto elements should be renamed to. - // - // The package, the service and the method can all be renamed. - // The backend server should implement the renamed proto. However, - // clients - // should call the original method, and ESF routes the traffic to the - // renamed - // method. - // - // HTTP clients should call the URL mapped to the original method. - // gRPC and Stubby clients should call the original method with package - // name. - // - // For legacy reasons, ESF allows Stubby clients to call with the - // short name (without the package name). However, for API - // Versioning(or - // multiple methods mapped to the same short name), all Stubby clients - // must - // call the method's full name with the package name, otherwise the - // first one - // (selector) wins. - // - // If this `rename_to` is specified with a trailing `*`, the `selector` - // must - // be specified with a trailing `*` as well. The all element short - // names - // matched by the `*` in the selector will be kept in the - // `rename_to`. - // - // For example, - // rename_rules: - // - selector: |- - // google.example.library.v1.* - // rename_to: google.example.library.* - // - // The selector matches `google.example.library.v1.Library.CreateShelf` - // and - // `google.example.library.v1.Library.CreateBook`, they will be renamed - // to - // `google.example.library.Library.CreateShelf` - // and - // `google.example.library.Library.CreateBook`. It essentially renames - // the - // proto package name section of the matched proto service and methods. - RenameTo string `json:"renameTo,omitempty"` - - // Selector: Selects the methods to which this rule applies. - // - // Refer to selector for syntax details. + // Selector: Selects the methods to which this rule applies. Refer to + // selector for syntax details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. "Address") to @@ -878,21 +725,12 @@ func (s *BatchCreateConsumerOverridesResponse) MarshalJSON() ([]byte, error) { // BatchEnableServicesRequest: Request message for the // `BatchEnableServices` method. type BatchEnableServicesRequest struct { - // ServiceIds: The identifiers of the services to enable on the - // project. - // - // A valid identifier would be: - // serviceusage.googleapis.com - // - // Enabling services requires that each service is public or is shared - // with - // the user enabling the service. - // - // A single request can enable a maximum of 20 services at a time. If - // more - // than 20 services are specified, the request will fail, and no state - // changes - // will occur. + // ServiceIds: The identifiers of the services to enable on the project. + // A valid identifier would be: serviceusage.googleapis.com Enabling + // services requires that each service is public or is shared with the + // user enabling the service. A single request can enable a maximum of + // 20 services at a time. If more than 20 services are specified, the + // request will fail, and no state changes will occur. ServiceIds []string `json:"serviceIds,omitempty"` // ForceSendFields is a list of field names (e.g. "ServiceIds") to @@ -919,14 +757,13 @@ func (s *BatchEnableServicesRequest) MarshalJSON() ([]byte, error) { } // BatchEnableServicesResponse: Response message for the -// `BatchEnableServices` method. -// This response message is assigned to the `response` field of the -// returned -// Operation when that operation is done. +// `BatchEnableServices` method. This response message is assigned to +// the `response` field of the returned Operation when that operation is +// done. type BatchEnableServicesResponse struct { // Failures: If allow_partial_success is true, and one or more services - // could not be - // enabled, this field contains the details about each failure. + // could not be enabled, this field contains the details about each + // failure. Failures []*EnableFailure `json:"failures,omitempty"` // Services: The new state of the services after enabling. @@ -988,53 +825,28 @@ func (s *BatchGetServicesResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Billing: Billing related configuration of the service. -// -// The following example shows how to configure monitored resources and -// metrics -// for billing, `consumer_destinations` is the only supported -// destination and -// the monitored resources need at least one label -// key +// Billing: Billing related configuration of the service. The following +// example shows how to configure monitored resources and metrics for +// billing, `consumer_destinations` is the only supported destination +// and the monitored resources need at least one label key // `cloud.googleapis.com/location` to indicate the location of the -// billing -// usage, using different monitored resources between monitoring and -// billing is -// recommended so they can be evolved independently: -// -// -// monitored_resources: -// - type: library.googleapis.com/billing_branch -// labels: -// - key: cloud.googleapis.com/location -// description: | -// Predefined label to support billing location restriction. -// - key: city -// description: | -// Custom label to define the city where the library branch is -// located -// in. -// - key: name -// description: Custom label to define the name of the library -// branch. -// metrics: -// - name: library.googleapis.com/book/borrowed_count -// metric_kind: DELTA -// value_type: INT64 -// unit: "1" -// billing: -// consumer_destinations: -// - monitored_resource: library.googleapis.com/billing_branch -// metrics: -// - library.googleapis.com/book/borrowed_count +// billing usage, using different monitored resources between monitoring +// and billing is recommended so they can be evolved independently: +// monitored_resources: - type: library.googleapis.com/billing_branch +// labels: - key: cloud.googleapis.com/location description: | +// Predefined label to support billing location restriction. - key: city +// description: | Custom label to define the city where the library +// branch is located in. - key: name description: Custom label to define +// the name of the library branch. metrics: - name: +// library.googleapis.com/book/borrowed_count metric_kind: DELTA +// value_type: INT64 unit: "1" billing: consumer_destinations: - +// monitored_resource: library.googleapis.com/billing_branch metrics: - +// library.googleapis.com/book/borrowed_count type Billing struct { // ConsumerDestinations: Billing configurations for sending metrics to - // the consumer project. - // There can be multiple consumer destinations per service, each one - // must have - // a different monitored resource type. A metric can be used in at - // most - // one consumer destination. + // the consumer project. There can be multiple consumer destinations per + // service, each one must have a different monitored resource type. A + // metric can be used in at most one consumer destination. ConsumerDestinations []*BillingDestination `json:"consumerDestinations,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1063,17 +875,14 @@ func (s *Billing) MarshalJSON() ([]byte, error) { } // BillingDestination: Configuration of a specific billing destination -// (Currently only support -// bill against consumer project). +// (Currently only support bill against consumer project). type BillingDestination struct { - // Metrics: Names of the metrics to report to this billing - // destination. + // Metrics: Names of the metrics to report to this billing destination. // Each name must be defined in Service.metrics section. Metrics []string `json:"metrics,omitempty"` // MonitoredResource: The monitored resource type. The type must be - // defined in - // Service.monitored_resources section. + // defined in Service.monitored_resources section. MonitoredResource string `json:"monitoredResource,omitempty"` // ForceSendFields is a list of field names (e.g. "Metrics") to @@ -1104,59 +913,27 @@ func (s *BillingDestination) MarshalJSON() ([]byte, error) { type CancelOperationRequest struct { } -// Context: `Context` defines which contexts an API -// requests. -// -// Example: -// -// context: -// rules: -// - selector: "*" -// requested: -// - google.rpc.context.ProjectContext -// - google.rpc.context.OriginContext -// -// The above specifies that all methods in the API -// request -// `google.rpc.context.ProjectContext` -// and -// `google.rpc.context.OriginContext`. -// -// Available context types are defined in -// package -// `google.rpc.context`. -// -// This also provides mechanism to whitelist any protobuf message -// extension that -// can be sent in grpc metadata using -// “x-goog-ext--bin” -// and -// “x-goog-ext--jspb” format. For example, list any -// service -// specific protobuf types that can appear in grpc metadata as follows -// in your -// yaml file: -// -// Example: -// -// context: -// rules: -// - selector: +// Context: `Context` defines which contexts an API requests. Example: +// context: rules: - selector: "*" requested: - +// google.rpc.context.ProjectContext - google.rpc.context.OriginContext +// The above specifies that all methods in the API request +// `google.rpc.context.ProjectContext` and +// `google.rpc.context.OriginContext`. Available context types are +// defined in package `google.rpc.context`. This also provides mechanism +// to whitelist any protobuf message extension that can be sent in grpc +// metadata using “x-goog-ext--bin” and “x-goog-ext--jspb” +// format. For example, list any service specific protobuf types that +// can appear in grpc metadata as follows in your yaml file: Example: +// context: rules: - selector: // "google.example.library.v1.LibraryService.CreateBook" -// allowed_request_extensions: -// - google.foo.v1.NewExtension -// allowed_response_extensions: -// - google.foo.v1.NewExtension -// -// You can also specify extension ID instead of fully qualified -// extension name +// allowed_request_extensions: - google.foo.v1.NewExtension +// allowed_response_extensions: - google.foo.v1.NewExtension You can +// also specify extension ID instead of fully qualified extension name // here. type Context struct { // Rules: A list of RPC context rules that apply to individual API - // methods. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // methods. **NOTE:** All service configuration rules follow "last one + // wins" order. Rules []*ContextRule `json:"rules,omitempty"` // ForceSendFields is a list of field names (e.g. "Rules") to @@ -1183,17 +960,14 @@ func (s *Context) MarshalJSON() ([]byte, error) { } // ContextRule: A context rule provides information about the context -// for an individual API -// element. +// for an individual API element. type ContextRule struct { // AllowedRequestExtensions: A list of full type names or extension IDs - // of extensions allowed in grpc - // side channel from client to backend. + // of extensions allowed in grpc side channel from client to backend. AllowedRequestExtensions []string `json:"allowedRequestExtensions,omitempty"` // AllowedResponseExtensions: A list of full type names or extension IDs - // of extensions allowed in grpc - // side channel from backend to client. + // of extensions allowed in grpc side channel from backend to client. AllowedResponseExtensions []string `json:"allowedResponseExtensions,omitempty"` // Provided: A list of full type names of provided contexts. @@ -1202,9 +976,8 @@ type ContextRule struct { // Requested: A list of full type names of requested contexts. Requested []string `json:"requested,omitempty"` - // Selector: Selects the methods to which this rule applies. - // - // Refer to selector for syntax details. + // Selector: Selects the methods to which this rule applies. Refer to + // selector for syntax details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1233,14 +1006,11 @@ func (s *ContextRule) MarshalJSON() ([]byte, error) { } // Control: Selects and configures the service controller used by the -// service. The -// service controller handles features like abuse, quota, billing, -// logging, -// monitoring, etc. +// service. The service controller handles features like abuse, quota, +// billing, logging, monitoring, etc. type Control struct { // Environment: The service control environment to use. If empty, no - // control plane - // feature (like quota and billing) will be enabled. + // control plane feature (like quota and billing) will be enabled. Environment string `json:"environment,omitempty"` // ForceSendFields is a list of field names (e.g. "Environment") to @@ -1266,24 +1036,14 @@ func (s *Control) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// CustomError: Customize service error responses. For example, list -// any service -// specific protobuf types that can appear in error detail lists -// of -// error responses. -// -// Example: -// -// custom_error: -// types: -// - google.foo.v1.CustomError -// - google.foo.v1.AnotherError +// CustomError: Customize service error responses. For example, list any +// service specific protobuf types that can appear in error detail lists +// of error responses. Example: custom_error: types: - +// google.foo.v1.CustomError - google.foo.v1.AnotherError type CustomError struct { // Rules: The list of custom error rules that apply to individual API - // messages. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // messages. **NOTE:** All service configuration rules follow "last one + // wins" order. Rules []*CustomErrorRule `json:"rules,omitempty"` // Types: The list of custom error detail types, e.g. @@ -1316,14 +1076,12 @@ func (s *CustomError) MarshalJSON() ([]byte, error) { // CustomErrorRule: A custom error rule. type CustomErrorRule struct { // IsErrorType: Mark this message as possible payload in error response. - // Otherwise, - // objects of this type will be filtered when they appear in error - // payload. + // Otherwise, objects of this type will be filtered when they appear in + // error payload. IsErrorType bool `json:"isErrorType,omitempty"` - // Selector: Selects messages to which this rule applies. - // - // Refer to selector for syntax details. + // Selector: Selects messages to which this rule applies. Refer to + // selector for syntax details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. "IsErrorType") to @@ -1384,27 +1142,36 @@ func (s *CustomHttpPattern) MarshalJSON() ([]byte, error) { // DisableServiceRequest: Request message for the `DisableService` // method. type DisableServiceRequest struct { + // CheckIfServiceHasUsage: Defines the behavior for checking service + // usage when disabling a service. + // + // Possible values: + // "CHECK_IF_SERVICE_HAS_USAGE_UNSPECIFIED" - When unset, the default + // behavior is used, which is SKIP. + // "SKIP" - If set, skip checking service usage when disabling a + // service. + // "CHECK" - If set, service usage is checked when disabling the + // service. If a service, or its dependents, has usage in the last 30 + // days, the request returns a FAILED_PRECONDITION error. + CheckIfServiceHasUsage string `json:"checkIfServiceHasUsage,omitempty"` + // DisableDependentServices: Indicates if services that are enabled and - // which depend on this service - // should also be disabled. If not set, an error will be generated if - // any - // enabled services depend on the service to be disabled. When set, - // the - // service, and any enabled services that depend on it, will be - // disabled - // together. + // which depend on this service should also be disabled. If not set, an + // error will be generated if any enabled services depend on the service + // to be disabled. When set, the service, and any enabled services that + // depend on it, will be disabled together. DisableDependentServices bool `json:"disableDependentServices,omitempty"` // ForceSendFields is a list of field names (e.g. - // "DisableDependentServices") to unconditionally include in API - // requests. By default, fields with empty values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in + // "CheckIfServiceHasUsage") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in // ForceSendFields will be sent to the server regardless of whether the // field is empty or not. This may be used to include empty fields in // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DisableDependentServices") + // NullFields is a list of field names (e.g. "CheckIfServiceHasUsage") // to include in API requests with the JSON null value. By default, // fields with empty values are omitted from API requests. However, any // field with an empty value appearing in NullFields will be sent to the @@ -1421,10 +1188,8 @@ func (s *DisableServiceRequest) MarshalJSON() ([]byte, error) { } // DisableServiceResponse: Response message for the `DisableService` -// method. -// This response message is assigned to the `response` field of the -// returned -// Operation when that operation is done. +// method. This response message is assigned to the `response` field of +// the returned Operation when that operation is done. type DisableServiceResponse struct { // Service: The new state of the service after disabling. Service *GoogleApiServiceusageV1Service `json:"service,omitempty"` @@ -1453,113 +1218,63 @@ func (s *DisableServiceResponse) MarshalJSON() ([]byte, error) { } // Documentation: `Documentation` provides the information for -// describing a service. -// -// Example: -//
documentation:
-//   summary: >
-//     The Google Calendar API gives access
-//     to most calendar features.
-//   pages:
-//   - name: Overview
-//     content: (== include google/foo/overview.md ==)
-//   - name: Tutorial
-//     content: (== include google/foo/tutorial.md ==)
-//     subpages;
-//     - name: Java
-//       content: (== include google/foo/tutorial_java.md ==)
-//   rules:
-//   - selector: google.calendar.Calendar.Get
-//     description: >
-//       ...
-//   - selector: google.calendar.Calendar.Put
-//     description: >
-//       ...
-// 
-// Documentation is provided in markdown syntax. In addition to -// standard markdown features, definition lists, tables and fenced -// code blocks are supported. Section headers can be provided and -// are -// interpreted relative to the section nesting of the context where -// a documentation fragment is embedded. -// -// Documentation from the IDL is merged with documentation defined -// via the config at normalization time, where documentation provided -// by config rules overrides IDL provided. -// -// A number of constructs specific to the API platform are supported -// in documentation text. -// -// In order to reference a proto element, the following -// notation can be -// used: -//
[fully.qualified.proto.name][]
-// T -// o override the display text used for the link, this can be -// used: -//
[display
-// text][fully.qualified.proto.name]
-// Text can be excluded from doc using the following -// notation: -//
(-- internal comment --)
-// -// A few directives are available in documentation. Note that -// directives must appear on a single line to be properly -// identified. The `include` directive includes a markdown file from -// an external source: -//
(== include path/to/file ==)
-// The `resource_for` directive marks a message to be the resource of -// a collection in REST view. If it is not specified, tools attempt -// to infer the resource from the operations in a -// collection: -//
(== resource_for v1.shelves.books
-// ==)
-// The directive `suppress_warning` does not directly affect -// documentation -// and is documented together with service config validation. +// describing a service. Example: documentation: summary: > The Google +// Calendar API gives access to most calendar features. pages: - name: +// Overview content: (== include google/foo/overview.md ==) - name: +// Tutorial content: (== include google/foo/tutorial.md ==) subpages; - +// name: Java content: (== include google/foo/tutorial_java.md ==) +// rules: - selector: google.calendar.Calendar.Get description: > ... - +// selector: google.calendar.Calendar.Put description: > ... +// Documentation is provided in markdown syntax. In addition to standard +// markdown features, definition lists, tables and fenced code blocks +// are supported. Section headers can be provided and are interpreted +// relative to the section nesting of the context where a documentation +// fragment is embedded. Documentation from the IDL is merged with +// documentation defined via the config at normalization time, where +// documentation provided by config rules overrides IDL provided. A +// number of constructs specific to the API platform are supported in +// documentation text. In order to reference a proto element, the +// following notation can be used: [fully.qualified.proto.name][] To +// override the display text used for the link, this can be used: +// [display text][fully.qualified.proto.name] Text can be excluded from +// doc using the following notation: (-- internal comment --) A few +// directives are available in documentation. Note that directives must +// appear on a single line to be properly identified. The `include` +// directive includes a markdown file from an external source: (== +// include path/to/file ==) The `resource_for` directive marks a message +// to be the resource of a collection in REST view. If it is not +// specified, tools attempt to infer the resource from the operations in +// a collection: (== resource_for v1.shelves.books ==) The directive +// `suppress_warning` does not directly affect documentation and is +// documented together with service config validation. type Documentation struct { // DocumentationRootUrl: The URL to the root of documentation. DocumentationRootUrl string `json:"documentationRootUrl,omitempty"` - // Overview: Declares a single overview page. For - // example: - //
documentation:
-	//   summary: ...
-	//   overview: (== include overview.md ==)
-	// 
- // This is a shortcut for the following declaration (using pages - // style): - //
documentation:
-	//   summary: ...
-	//   pages:
-	//   - name: Overview
-	//     content: (== include overview.md ==)
-	// 
- // Note: you cannot specify both `overview` field and `pages` field. + // Overview: Declares a single overview page. For example: + // documentation: summary: ... overview: (== include overview.md ==) + // This is a shortcut for the following declaration (using pages style): + // documentation: summary: ... pages: - name: Overview content: (== + // include overview.md ==) Note: you cannot specify both `overview` + // field and `pages` field. Overview string `json:"overview,omitempty"` // Pages: The top level pages for the documentation set. Pages []*Page `json:"pages,omitempty"` // Rules: A list of documentation rules that apply to individual API - // elements. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // elements. **NOTE:** All service configuration rules follow "last one + // wins" order. Rules []*DocumentationRule `json:"rules,omitempty"` // ServiceRootUrl: Specifies the service root url if the default one - // (the service name - // from the yaml file) is not suitable. This can be seen in any - // fully - // specified service urls as well as sections that show a base that - // other - // urls are relative to. + // (the service name from the yaml file) is not suitable. This can be + // seen in any fully specified service urls as well as sections that + // show a base that other urls are relative to. ServiceRootUrl string `json:"serviceRootUrl,omitempty"` // Summary: A short summary of what the service does. Can only be - // provided by - // plain text. + // provided by plain text. Summary string `json:"summary,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1591,24 +1306,20 @@ func (s *Documentation) MarshalJSON() ([]byte, error) { // individual API elements. type DocumentationRule struct { // DeprecationDescription: Deprecation description of the selected - // element(s). It can be provided if - // an element is marked as `deprecated`. + // element(s). It can be provided if an element is marked as + // `deprecated`. DeprecationDescription string `json:"deprecationDescription,omitempty"` // Description: Description of the selected API(s). Description string `json:"description,omitempty"` // Selector: The selector is a comma-separated list of patterns. Each - // pattern is a - // qualified name of the element which may end in "*", indicating a - // wildcard. - // Wildcards are only allowed at the end and for a whole component of - // the - // qualified name, i.e. "foo.*" is ok, but not "foo.b*" or "foo.*.bar". - // A - // wildcard will match one or more components. To specify a default for - // all - // applicable elements, the whole pattern "*" is used. + // pattern is a qualified name of the element which may end in "*", + // indicating a wildcard. Wildcards are only allowed at the end and for + // a whole component of the qualified name, i.e. "foo.*" is ok, but not + // "foo.b*" or "foo.*.bar". A wildcard will match one or more + // components. To specify a default for all applicable elements, the + // whole pattern "*" is used. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1637,17 +1348,11 @@ func (s *DocumentationRule) MarshalJSON() ([]byte, error) { } // Empty: A generic empty message that you can re-use to avoid defining -// duplicated -// empty messages in your APIs. A typical example is to use it as the -// request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. +// duplicated empty messages in your APIs. A typical example is to use +// it as the request or the response type of an API method. For +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } The JSON representation for `Empty` is +// empty JSON object `{}`. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -1691,10 +1396,8 @@ type EnableServiceRequest struct { } // EnableServiceResponse: Response message for the `EnableService` -// method. -// This response message is assigned to the `response` field of the -// returned -// Operation when that operation is done. +// method. This response message is assigned to the `response` field of +// the returned Operation when that operation is done. type EnableServiceResponse struct { // Service: The new state of the service after enabling. Service *GoogleApiServiceusageV1Service `json:"service,omitempty"` @@ -1723,64 +1426,38 @@ func (s *EnableServiceResponse) MarshalJSON() ([]byte, error) { } // Endpoint: `Endpoint` describes a network endpoint that serves a set -// of APIs. -// A service may expose any number of endpoints, and all endpoints share -// the -// same service configuration, such as quota configuration and -// monitoring -// configuration. -// -// Example service configuration: -// -// name: library-example.googleapis.com -// endpoints: -// # Below entry makes 'google.example.library.v1.Library' -// # API be served from endpoint address -// library-example.googleapis.com. -// # It also allows HTTP OPTIONS calls to be passed to the -// backend, for -// # it to decide whether the subsequent cross-origin request is -// # allowed to proceed. -// - name: library-example.googleapis.com -// allow_cors: true +// of APIs. A service may expose any number of endpoints, and all +// endpoints share the same service configuration, such as quota +// configuration and monitoring configuration. Example service +// configuration: name: library-example.googleapis.com endpoints: # +// Below entry makes 'google.example.library.v1.Library' # API be served +// from endpoint address library-example.googleapis.com. # It also +// allows HTTP OPTIONS calls to be passed to the backend, for # it to +// decide whether the subsequent cross-origin request is # allowed to +// proceed. - name: library-example.googleapis.com allow_cors: true type Endpoint struct { // Aliases: DEPRECATED: This field is no longer supported. Instead of - // using aliases, - // please specify multiple google.api.Endpoint for each of the - // intended - // aliases. - // - // Additional names that this endpoint will be hosted on. + // using aliases, please specify multiple google.api.Endpoint for each + // of the intended aliases. Additional names that this endpoint will be + // hosted on. Aliases []string `json:"aliases,omitempty"` - // AllowCors: - // Allowing - // [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sh - // aring), aka - // cross-domain traffic, would allow the backends served from this - // endpoint to - // receive and respond to HTTP OPTIONS requests. The response will be - // used by - // the browser to determine whether the subsequent cross-origin request - // is - // allowed to proceed. + // AllowCors: Allowing + // [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), + // aka cross-domain traffic, would allow the backends served from this + // endpoint to receive and respond to HTTP OPTIONS requests. The + // response will be used by the browser to determine whether the + // subsequent cross-origin request is allowed to proceed. AllowCors bool `json:"allowCors,omitempty"` - // Features: The list of features enabled on this endpoint. - Features []string `json:"features,omitempty"` - // Name: The canonical name of this endpoint. Name string `json:"name,omitempty"` // Target: The specification of an Internet routable address of API - // frontend that will - // handle requests to this - // [API + // frontend that will handle requests to this [API // Endpoint](https://cloud.google.com/apis/design/glossary). It should - // be - // either a valid IPv4 address or a fully-qualified domain name. For - // example, - // "8.8.8.8" or "myservice.appspot.com". + // be either a valid IPv4 address or a fully-qualified domain name. For + // example, "8.8.8.8" or "myservice.appspot.com". Target string `json:"target,omitempty"` // ForceSendFields is a list of field names (e.g. "Aliases") to @@ -1934,9 +1611,8 @@ type Field struct { Number int64 `json:"number,omitempty"` // OneofIndex: The index of the field type in `Type.oneofs`, for message - // or enumeration - // types. The first type has index 1; zero means the type is not in the - // list. + // or enumeration types. The first type has index 1; zero means the type + // is not in the list. OneofIndex int64 `json:"oneofIndex,omitempty"` // Options: The protocol buffer options. @@ -1946,8 +1622,8 @@ type Field struct { Packed bool `json:"packed,omitempty"` // TypeUrl: The field type URL, without the scheme, for message or - // enumeration - // types. Example: "type.googleapis.com/google.protobuf.Timestamp". + // enumeration types. Example: + // "type.googleapis.com/google.protobuf.Timestamp". TypeUrl string `json:"typeUrl,omitempty"` // ForceSendFields is a list of field names (e.g. "Cardinality") to @@ -1977,18 +1653,16 @@ func (s *Field) MarshalJSON() ([]byte, error) { // identity. type GetServiceIdentityResponse struct { // Identity: Service identity that service producer can use to access - // consumer - // resources. If exists is true, it contains email and unique_id. If - // exists is - // false, it contains pre-constructed email and empty unique_id. + // consumer resources. If exists is true, it contains email and + // unique_id. If exists is false, it contains pre-constructed email and + // empty unique_id. Identity *ServiceIdentity `json:"identity,omitempty"` // State: Service identity state. // // Possible values: // "IDENTITY_STATE_UNSPECIFIED" - Default service identity state. This - // value is used if the state is - // omitted. + // value is used if the state is omitted. // "ACTIVE" - Service identity has been created and can be used. State string `json:"state,omitempty"` @@ -2016,43 +1690,24 @@ func (s *GetServiceIdentityResponse) MarshalJSON() ([]byte, error) { } // GoogleApiService: `Service` is the root object of Google service -// configuration schema. It -// describes basic information about a service, such as the name and -// the -// title, and delegates other aspects to sub-sections. Each sub-section -// is -// either a proto message or a repeated proto message that configures -// a -// specific aspect, such as auth. See each proto message definition for -// details. -// -// Example: -// -// type: google.api.Service -// config_version: 3 -// name: calendar.googleapis.com -// title: Google Calendar API -// apis: -// - name: google.calendar.v3.Calendar -// authentication: -// providers: -// - id: google_calendar_auth -// jwks_uri: https://www.googleapis.com/oauth2/v1/certs -// issuer: https://securetoken.google.com -// rules: -// - selector: "*" -// requirements: -// provider_id: google_calendar_auth +// configuration schema. It describes basic information about a service, +// such as the name and the title, and delegates other aspects to +// sub-sections. Each sub-section is either a proto message or a +// repeated proto message that configures a specific aspect, such as +// auth. See each proto message definition for details. Example: type: +// google.api.Service config_version: 3 name: calendar.googleapis.com +// title: Google Calendar API apis: - name: google.calendar.v3.Calendar +// authentication: providers: - id: google_calendar_auth jwks_uri: +// https://www.googleapis.com/oauth2/v1/certs issuer: +// https://securetoken.google.com rules: - selector: "*" requirements: +// provider_id: google_calendar_auth type GoogleApiService struct { // Apis: A list of API interfaces exported by this service. Only the - // `name` field - // of the google.protobuf.Api needs to be provided by the - // configuration - // author, as the remaining fields will be derived from the IDL during - // the - // normalization process. It is an error to specify an API interface - // here - // which cannot be resolved against the associated IDL files. + // `name` field of the google.protobuf.Api needs to be provided by the + // configuration author, as the remaining fields will be derived from + // the IDL during the normalization process. It is an error to specify + // an API interface here which cannot be resolved against the associated + // IDL files. Apis []*Api `json:"apis,omitempty"` // Authentication: Auth configuration. @@ -2065,13 +1720,9 @@ type GoogleApiService struct { Billing *Billing `json:"billing,omitempty"` // ConfigVersion: The semantic version of the service configuration. The - // config version - // affects the interpretation of the service configuration. For - // example, - // certain features are enabled by default for certain config - // versions. - // - // The latest config version is `3`. + // config version affects the interpretation of the service + // configuration. For example, certain features are enabled by default + // for certain config versions. The latest config version is `3`. ConfigVersion int64 `json:"configVersion,omitempty"` // Context: Context configuration. @@ -2086,35 +1737,25 @@ type GoogleApiService struct { // Documentation: Additional API documentation. Documentation *Documentation `json:"documentation,omitempty"` - // Endpoints: Configuration for network endpoints. If this is empty, - // then an endpoint - // with the same name as the service is automatically generated to - // service all - // defined APIs. + // Endpoints: Configuration for network endpoints. If this is empty, + // then an endpoint with the same name as the service is automatically + // generated to service all defined APIs. Endpoints []*Endpoint `json:"endpoints,omitempty"` - // Enums: A list of all enum types included in this API service. - // Enums - // referenced directly or indirectly by the `apis` are - // automatically - // included. Enums which are not referenced but shall be - // included - // should be listed here by name. Example: - // - // enums: - // - name: google.someapi.v1.SomeEnum + // Enums: A list of all enum types included in this API service. Enums + // referenced directly or indirectly by the `apis` are automatically + // included. Enums which are not referenced but shall be included should + // be listed here by name. Example: enums: - name: + // google.someapi.v1.SomeEnum Enums []*Enum `json:"enums,omitempty"` // Http: HTTP configuration. Http *Http `json:"http,omitempty"` // Id: A unique ID for a specific instance of this message, typically - // assigned - // by the client for tracking purpose. Must be no longer than 63 - // characters - // and only lower case letters, digits, '.', '_' and '-' are allowed. - // If - // empty, the server may choose to generate one instead. + // assigned by the client for tracking purpose. Must be no longer than + // 63 characters and only lower case letters, digits, '.', '_' and '-' + // are allowed. If empty, the server may choose to generate one instead. Id string `json:"id,omitempty"` // Logging: Logging configuration. @@ -2127,19 +1768,17 @@ type GoogleApiService struct { Metrics []*MetricDescriptor `json:"metrics,omitempty"` // MonitoredResources: Defines the monitored resources used by this - // service. This is required - // by the Service.monitoring and Service.logging configurations. + // service. This is required by the Service.monitoring and + // Service.logging configurations. MonitoredResources []*MonitoredResourceDescriptor `json:"monitoredResources,omitempty"` // Monitoring: Monitoring configuration. Monitoring *Monitoring `json:"monitoring,omitempty"` // Name: The service name, which is a DNS-like logical identifier for - // the - // service, such as `calendar.googleapis.com`. The service - // name - // typically goes through DNS verification to make sure the owner - // of the service also owns the DNS name. + // the service, such as `calendar.googleapis.com`. The service name + // typically goes through DNS verification to make sure the owner of the + // service also owns the DNS name. Name string `json:"name,omitempty"` // ProducerProjectId: The Google project that owns this service. @@ -2156,30 +1795,21 @@ type GoogleApiService struct { SystemParameters *SystemParameters `json:"systemParameters,omitempty"` // SystemTypes: A list of all proto message types included in this API - // service. - // It serves similar purpose as [google.api.Service.types], except - // that - // these types are not needed by user-defined APIs. Therefore, they will - // not - // show up in the generated discovery doc. This field should only be - // used - // to define system APIs in ESF. + // service. It serves similar purpose as [google.api.Service.types], + // except that these types are not needed by user-defined APIs. + // Therefore, they will not show up in the generated discovery doc. This + // field should only be used to define system APIs in ESF. SystemTypes []*Type `json:"systemTypes,omitempty"` // Title: The product title for this service. Title string `json:"title,omitempty"` // Types: A list of all proto message types included in this API - // service. - // Types referenced directly or indirectly by the `apis` - // are - // automatically included. Messages which are not referenced but - // shall be included, such as types used by the `google.protobuf.Any` - // type, - // should be listed here by name. Example: - // - // types: - // - name: google.protobuf.Int32 + // service. Types referenced directly or indirectly by the `apis` are + // automatically included. Messages which are not referenced but shall + // be included, such as types used by the `google.protobuf.Any` type, + // should be listed here by name. Example: types: - name: + // google.protobuf.Int32 Types []*Type `json:"types,omitempty"` // Usage: Configuration controlling usage of this service. @@ -2209,35 +1839,22 @@ func (s *GoogleApiService) MarshalJSON() ([]byte, error) { } // GoogleApiServiceIdentity: The per-product per-project service -// identity for a service. -// -// -// Use this field to configure per-product per-project service -// identity. -// Example of a service identity configuration. -// -// usage: -// service_identity: -// - service_account_parent: "projects/123456789" -// display_name: "Cloud XXX Service Agent" -// description: "Used as the identity of Cloud XXX to access -// resources" +// identity for a service. Use this field to configure per-product +// per-project service identity. Example of a service identity +// configuration. usage: service_identity: - service_account_parent: +// "projects/123456789" display_name: "Cloud XXX Service Agent" +// description: "Used as the identity of Cloud XXX to access resources" type GoogleApiServiceIdentity struct { // Description: Optional. A user-specified opaque description of the - // service account. - // Must be less than or equal to 256 UTF-8 bytes. + // service account. Must be less than or equal to 256 UTF-8 bytes. Description string `json:"description,omitempty"` - // DisplayName: Optional. A user-specified name for the service - // account. + // DisplayName: Optional. A user-specified name for the service account. // Must be less than or equal to 100 UTF-8 bytes. DisplayName string `json:"displayName,omitempty"` // ServiceAccountParent: A service account project that hosts the - // service accounts. - // - // An example name would be: - // `projects/123456789` + // service accounts. An example name would be: `projects/123456789` ServiceAccountParent string `json:"serviceAccountParent,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -2267,8 +1884,7 @@ func (s *GoogleApiServiceIdentity) MarshalJSON() ([]byte, error) { // returned for the batchend services operation. type GoogleApiServiceusageV1OperationMetadata struct { // ResourceNames: The full name of the resources that this operation is - // directly - // associated with. + // directly associated with. ResourceNames []string `json:"resourceNames,omitempty"` // ForceSendFields is a list of field names (e.g. "ResourceNames") to @@ -2297,24 +1913,18 @@ func (s *GoogleApiServiceusageV1OperationMetadata) MarshalJSON() ([]byte, error) // GoogleApiServiceusageV1Service: A service that is available for use // by the consumer. type GoogleApiServiceusageV1Service struct { - // Config: The service configuration of the available service. - // Some fields may be filtered out of the configuration in responses - // to - // the `ListServices` method. These fields are present only in responses - // to + // Config: The service configuration of the available service. Some + // fields may be filtered out of the configuration in responses to the + // `ListServices` method. These fields are present only in responses to // the `GetService` method. Config *GoogleApiServiceusageV1ServiceConfig `json:"config,omitempty"` - // Name: The resource name of the consumer and service. - // - // A valid name would be: - // - projects/123/services/serviceusage.googleapis.com + // Name: The resource name of the consumer and service. A valid name + // would be: - projects/123/services/serviceusage.googleapis.com Name string `json:"name,omitempty"` - // Parent: The resource name of the consumer. - // - // A valid name would be: - // - projects/123 + // Parent: The resource name of the consumer. A valid name would be: - + // projects/123 Parent string `json:"parent,omitempty"` // State: Whether or not the service has been enabled for use by the @@ -2322,14 +1932,11 @@ type GoogleApiServiceusageV1Service struct { // // Possible values: // "STATE_UNSPECIFIED" - The default value, which indicates that the - // enabled state of the service - // is unspecified or not meaningful. Currently, all consumers other - // than - // projects (such as folders and organizations) are always in this - // state. + // enabled state of the service is unspecified or not meaningful. + // Currently, all consumers other than projects (such as folders and + // organizations) are always in this state. // "DISABLED" - The service cannot be used by this consumer. It has - // either been explicitly - // disabled, or has never been enabled. + // either been explicitly disabled, or has never been enabled. // "ENABLED" - The service has been explicitly enabled for use by this // consumer. State string `json:"state,omitempty"` @@ -2365,27 +1972,31 @@ func (s *GoogleApiServiceusageV1Service) MarshalJSON() ([]byte, error) { // service. type GoogleApiServiceusageV1ServiceConfig struct { // Apis: A list of API interfaces exported by this service. Contains - // only the names, - // versions, and method names of the interfaces. + // only the names, versions, and method names of the interfaces. Apis []*Api `json:"apis,omitempty"` // Authentication: Auth configuration. Contains only the OAuth rules. Authentication *Authentication `json:"authentication,omitempty"` // Documentation: Additional API documentation. Contains only the - // summary and the - // documentation URL. + // summary and the documentation URL. Documentation *Documentation `json:"documentation,omitempty"` // Endpoints: Configuration for network endpoints. Contains only the - // names and aliases - // of the endpoints. + // names and aliases of the endpoints. Endpoints []*Endpoint `json:"endpoints,omitempty"` - // Name: The DNS address at which this service is available. - // - // An example DNS address would be: - // `calendar.googleapis.com`. + // MonitoredResources: Defines the monitored resources used by this + // service. This is required by the Service.monitoring and + // Service.logging configurations. + MonitoredResources []*MonitoredResourceDescriptor `json:"monitoredResources,omitempty"` + + // Monitoring: Monitoring configuration. This should not include the + // 'producer_destinations' field. + Monitoring *Monitoring `json:"monitoring,omitempty"` + + // Name: The DNS address at which this service is available. An example + // DNS address would be: `calendar.googleapis.com`. Name string `json:"name,omitempty"` // Quota: Quota configuration. @@ -2424,18 +2035,16 @@ func (s *GoogleApiServiceusageV1ServiceConfig) MarshalJSON() ([]byte, error) { // message for getting service identity. type GoogleApiServiceusageV1beta1GetServiceIdentityResponse struct { // Identity: Service identity that service producer can use to access - // consumer - // resources. If exists is true, it contains email and unique_id. If - // exists is - // false, it contains pre-constructed email and empty unique_id. + // consumer resources. If exists is true, it contains email and + // unique_id. If exists is false, it contains pre-constructed email and + // empty unique_id. Identity *GoogleApiServiceusageV1beta1ServiceIdentity `json:"identity,omitempty"` // State: Service identity state. // // Possible values: // "IDENTITY_STATE_UNSPECIFIED" - Default service identity state. This - // value is used if the state is - // omitted. + // value is used if the state is omitted. // "ACTIVE" - Service identity has been created and can be used. State string `json:"state,omitempty"` @@ -2463,18 +2072,15 @@ func (s *GoogleApiServiceusageV1beta1GetServiceIdentityResponse) MarshalJSON() ( } // GoogleApiServiceusageV1beta1ServiceIdentity: Service identity for a -// service. This is the identity that service producer -// should use to access consumer resources. +// service. This is the identity that service producer should use to +// access consumer resources. type GoogleApiServiceusageV1beta1ServiceIdentity struct { // Email: The email address of the service account that a service - // producer would use - // to access consumer resources. + // producer would use to access consumer resources. Email string `json:"email,omitempty"` - // UniqueId: The unique and stable id of the service - // account. - // https://cloud.google.com/iam/reference/rest/v1/projects.servi - // ceAccounts#ServiceAccount + // UniqueId: The unique and stable id of the service account. + // https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts#ServiceAccount UniqueId string `json:"uniqueId,omitempty"` // ForceSendFields is a list of field names (e.g. "Email") to @@ -2501,26 +2107,19 @@ func (s *GoogleApiServiceusageV1beta1ServiceIdentity) MarshalJSON() ([]byte, err } // Http: Defines the HTTP configuration for an API service. It contains -// a list of -// HttpRule, each specifying the mapping of an RPC method -// to one or more HTTP REST API methods. +// a list of HttpRule, each specifying the mapping of an RPC method to +// one or more HTTP REST API methods. type Http struct { // FullyDecodeReservedExpansion: When set to true, URL path parameters - // will be fully URI-decoded except in - // cases of single segment matches in reserved expansion, where "%2F" - // will be - // left encoded. - // - // The default behavior is to not decode RFC 6570 reserved characters in - // multi + // will be fully URI-decoded except in cases of single segment matches + // in reserved expansion, where "%2F" will be left encoded. The default + // behavior is to not decode RFC 6570 reserved characters in multi // segment matches. FullyDecodeReservedExpansion bool `json:"fullyDecodeReservedExpansion,omitempty"` // Rules: A list of HTTP configuration rules that apply to individual - // API methods. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // API methods. **NOTE:** All service configuration rules follow "last + // one wins" order. Rules []*HttpRule `json:"rules,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -2548,403 +2147,187 @@ func (s *Http) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// HttpRule: # gRPC Transcoding -// -// gRPC Transcoding is a feature for mapping between a gRPC method and -// one or -// more HTTP REST endpoints. It allows developers to build a single API -// service -// that supports both gRPC APIs and REST APIs. Many systems, including -// [Google -// APIs](https://github.com/googleapis/googleapis), -// [Cloud Endpoints](https://cloud.google.com/endpoints), -// [gRPC -// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), -// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this -// feature -// and use it for large scale production services. -// -// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping -// specifies +// HttpRule: # gRPC Transcoding gRPC Transcoding is a feature for +// mapping between a gRPC method and one or more HTTP REST endpoints. It +// allows developers to build a single API service that supports both +// gRPC APIs and REST APIs. Many systems, including [Google +// APIs](https://github.com/googleapis/googleapis), [Cloud +// Endpoints](https://cloud.google.com/endpoints), [gRPC +// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), and +// [Envoy](https://github.com/envoyproxy/envoy) proxy support this +// feature and use it for large scale production services. `HttpRule` +// defines the schema of the gRPC/REST mapping. The mapping specifies // how different portions of the gRPC request message are mapped to the -// URL -// path, URL query parameters, and HTTP request body. It also controls -// how the -// gRPC response message is mapped to the HTTP response body. `HttpRule` -// is -// typically specified as an `google.api.http` annotation on the gRPC -// method. -// -// Each mapping specifies a URL path template and an HTTP method. The -// path -// template may refer to one or more fields in the gRPC request message, -// as long -// as each field is a non-repeated field with a primitive (non-message) -// type. -// The path template controls how fields of the request message are -// mapped to -// the URL path. -// -// Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/{name=messages/*}" -// }; -// } -// } -// message GetMessageRequest { -// string name = 1; // Mapped to URL path. -// } -// message Message { -// string text = 1; // The resource content. -// } -// -// This enables an HTTP REST to gRPC mapping as below: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(name: -// "messages/123456")` -// -// Any fields in the request message which are not bound by the path -// template -// automatically become HTTP query parameters if there is no HTTP -// request body. -// For example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get:"/v1/messages/{message_id}" -// }; -// } -// } -// message GetMessageRequest { -// message SubMessage { -// string subfield = 1; -// } -// string message_id = 1; // Mapped to URL path. -// int64 revision = 2; // Mapped to URL query parameter -// `revision`. -// SubMessage sub = 3; // Mapped to URL query parameter -// `sub.subfield`. -// } -// -// This enables a HTTP JSON to RPC mapping as below: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456?revision=2&sub.subfield=foo` -// | +// URL path, URL query parameters, and HTTP request body. It also +// controls how the gRPC response message is mapped to the HTTP response +// body. `HttpRule` is typically specified as an `google.api.http` +// annotation on the gRPC method. Each mapping specifies a URL path +// template and an HTTP method. The path template may refer to one or +// more fields in the gRPC request message, as long as each field is a +// non-repeated field with a primitive (non-message) type. The path +// template controls how fields of the request message are mapped to the +// URL path. Example: service Messaging { rpc +// GetMessage(GetMessageRequest) returns (Message) { option +// (google.api.http) = { get: "/v1/{name=messages/*}" }; } } message +// GetMessageRequest { string name = 1; // Mapped to URL path. } message +// Message { string text = 1; // The resource content. } This enables an +// HTTP REST to gRPC mapping as below: HTTP | gRPC -----|----- `GET +// /v1/messages/123456` | `GetMessage(name: "messages/123456")` Any +// fields in the request message which are not bound by the path +// template automatically become HTTP query parameters if there is no +// HTTP request body. For example: service Messaging { rpc +// GetMessage(GetMessageRequest) returns (Message) { option +// (google.api.http) = { get:"/v1/messages/{message_id}" }; } } message +// GetMessageRequest { message SubMessage { string subfield = 1; } +// string message_id = 1; // Mapped to URL path. int64 revision = 2; // +// Mapped to URL query parameter `revision`. SubMessage sub = 3; // +// Mapped to URL query parameter `sub.subfield`. } This enables a HTTP +// JSON to RPC mapping as below: HTTP | gRPC -----|----- `GET +// /v1/messages/123456?revision=2&sub.subfield=foo` | // `GetMessage(message_id: "123456" revision: 2 sub: -// SubMessage(subfield: -// "foo"))` -// -// Note that fields which are mapped to URL query parameters must have -// a -// primitive type or a repeated primitive type or a non-repeated message -// type. -// In the case of a repeated type, the parameter can be repeated in the -// URL -// as `...?param=A¶m=B`. In the case of a message type, each field -// of the -// message is mapped to a separate parameter, such -// as -// `...?foo.a=A&foo.b=B&foo.c=C`. -// -// For HTTP methods that allow a request body, the `body` -// field -// specifies the mapping. Consider a REST update method on the -// message resource collection: -// -// service Messaging { -// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "message" -// }; -// } -// } -// message UpdateMessageRequest { -// string message_id = 1; // mapped to the URL -// Message message = 2; // mapped to the body -// } -// -// The following HTTP JSON to RPC mapping is enabled, where -// the -// representation of the JSON in the request body is determined -// by -// protos JSON encoding: -// -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | -// `UpdateMessage(message_id: -// "123456" message { text: "Hi!" })` -// -// The special name `*` can be used in the body mapping to define -// that -// every field not bound by the path template should be mapped to -// the -// request body. This enables the following alternative definition -// of -// the update method: -// -// service Messaging { -// rpc UpdateMessage(Message) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "*" -// }; -// } -// } -// message Message { -// string message_id = 1; -// string text = 2; -// } -// -// -// The following HTTP JSON to RPC mapping is enabled: -// -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | -// `UpdateMessage(message_id: -// "123456" text: "Hi!")` -// -// Note that when using `*` in the body mapping, it is not possible -// to -// have HTTP parameters, as all fields not bound by the path end in -// the body. This makes this option more rarely used in practice -// when -// defining REST APIs. The common usage of `*` is in custom -// methods -// which don't use the URL at all for transferring data. -// -// It is possible to define multiple HTTP methods for one RPC by -// using -// the `additional_bindings` option. Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/messages/{message_id}" -// additional_bindings { -// get: "/v1/users/{user_id}/messages/{message_id}" -// } -// }; -// } -// } -// message GetMessageRequest { -// string message_id = 1; -// string user_id = 2; -// } -// -// This enables the following two alternative HTTP JSON to RPC -// mappings: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" -// message_id: -// "123456")` -// -// ## Rules for HTTP mapping -// -// 1. Leaf request fields (recursive expansion nested messages in the -// request -// message) are classified into three categories: -// - Fields referred by the path template. They are passed via the -// URL path. -// - Fields referred by the HttpRule.body. They are passed via the -// HTTP -// request body. -// - All other fields are passed via the URL query parameters, and -// the -// parameter name is the field path in the request message. A -// repeated -// field can be represented as multiple query parameters under the -// same -// name. -// 2. If HttpRule.body is "*", there is no URL query parameter, all -// fields -// are passed via URL path and HTTP request body. -// 3. If HttpRule.body is omitted, there is no HTTP request body, all -// fields are passed via URL path and URL query parameters. -// -// ### Path template syntax -// -// Template = "/" Segments [ Verb ] ; -// Segments = Segment { "/" Segment } ; -// Segment = "*" | "**" | LITERAL | Variable ; -// Variable = "{" FieldPath [ "=" Segments ] "}" ; -// FieldPath = IDENT { "." IDENT } ; -// Verb = ":" LITERAL ; -// -// The syntax `*` matches a single URL path segment. The syntax `**` -// matches -// zero or more URL path segments, which must be the last part of the -// URL path -// except the `Verb`. -// -// The syntax `Variable` matches part of the URL path as specified by -// its -// template. A variable template must not contain other variables. If a -// variable -// matches a single path segment, its template may be omitted, e.g. -// `{var}` -// is equivalent to `{var=*}`. -// -// The syntax `LITERAL` matches literal text in the URL path. If the -// `LITERAL` -// contains any reserved character, such characters should be -// percent-encoded -// before the matching. -// -// If a variable contains exactly one path segment, such as "{var}" -// or -// "{var=*}", when such a variable is expanded into a URL path on the -// client -// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. -// The -// server side does the reverse decoding. Such variables show up in -// the -// [Discovery -// Document](https://developers.google.com/discovery/v1/re -// ference/apis) as -// `{var}`. -// -// If a variable contains multiple path segments, such as -// "{var=foo/*}" -// or "{var=**}", when such a variable is expanded into a URL path on -// the -// client side, all characters except `[-_.~/0-9a-zA-Z]` are -// percent-encoded. -// The server side does the reverse decoding, except "%2F" and "%2f" are -// left -// unchanged. Such variables show up in -// the +// SubMessage(subfield: "foo"))` Note that fields which are mapped to +// URL query parameters must have a primitive type or a repeated +// primitive type or a non-repeated message type. In the case of a +// repeated type, the parameter can be repeated in the URL as +// `...?param=A¶m=B`. In the case of a message type, each field of +// the message is mapped to a separate parameter, such as +// `...?foo.a=A&foo.b=B&foo.c=C`. For HTTP methods that allow a request +// body, the `body` field specifies the mapping. Consider a REST update +// method on the message resource collection: service Messaging { rpc +// UpdateMessage(UpdateMessageRequest) returns (Message) { option +// (google.api.http) = { patch: "/v1/messages/{message_id}" body: +// "message" }; } } message UpdateMessageRequest { string message_id = +// 1; // mapped to the URL Message message = 2; // mapped to the body } +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: HTTP | gRPC -----|----- `PATCH +// /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" message { text: "Hi!" })` The special name `*` can be used +// in the body mapping to define that every field not bound by the path +// template should be mapped to the request body. This enables the +// following alternative definition of the update method: service +// Messaging { rpc UpdateMessage(Message) returns (Message) { option +// (google.api.http) = { patch: "/v1/messages/{message_id}" body: "*" }; +// } } message Message { string message_id = 1; string text = 2; } The +// following HTTP JSON to RPC mapping is enabled: HTTP | gRPC +// -----|----- `PATCH /v1/messages/123456 { "text": "Hi!" }` | +// `UpdateMessage(message_id: "123456" text: "Hi!")` Note that when +// using `*` in the body mapping, it is not possible to have HTTP +// parameters, as all fields not bound by the path end in the body. This +// makes this option more rarely used in practice when defining REST +// APIs. The common usage of `*` is in custom methods which don't use +// the URL at all for transferring data. It is possible to define +// multiple HTTP methods for one RPC by using the `additional_bindings` +// option. Example: service Messaging { rpc +// GetMessage(GetMessageRequest) returns (Message) { option +// (google.api.http) = { get: "/v1/messages/{message_id}" +// additional_bindings { get: +// "/v1/users/{user_id}/messages/{message_id}" } }; } } message +// GetMessageRequest { string message_id = 1; string user_id = 2; } This +// enables the following two alternative HTTP JSON to RPC mappings: HTTP +// | gRPC -----|----- `GET /v1/messages/123456` | +// `GetMessage(message_id: "123456")` `GET /v1/users/me/messages/123456` +// | `GetMessage(user_id: "me" message_id: "123456")` ## Rules for HTTP +// mapping 1. Leaf request fields (recursive expansion nested messages +// in the request message) are classified into three categories: - +// Fields referred by the path template. They are passed via the URL +// path. - Fields referred by the HttpRule.body. They are passed via the +// HTTP request body. - All other fields are passed via the URL query +// parameters, and the parameter name is the field path in the request +// message. A repeated field can be represented as multiple query +// parameters under the same name. 2. If HttpRule.body is "*", there is +// no URL query parameter, all fields are passed via URL path and HTTP +// request body. 3. If HttpRule.body is omitted, there is no HTTP +// request body, all fields are passed via URL path and URL query +// parameters. ### Path template syntax Template = "/" Segments [ Verb ] +// ; Segments = Segment { "/" Segment } ; Segment = "*" | "**" | LITERAL +// | Variable ; Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; Verb = ":" LITERAL ; The syntax `*` +// matches a single URL path segment. The syntax `**` matches zero or +// more URL path segments, which must be the last part of the URL path +// except the `Verb`. The syntax `Variable` matches part of the URL path +// as specified by its template. A variable template must not contain +// other variables. If a variable matches a single path segment, its +// template may be omitted, e.g. `{var}` is equivalent to `{var=*}`. The +// syntax `LITERAL` matches literal text in the URL path. If the +// `LITERAL` contains any reserved character, such characters should be +// percent-encoded before the matching. If a variable contains exactly +// one path segment, such as "{var}" or "{var=*}", when such a +// variable is expanded into a URL path on the client side, all +// characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The server +// side does the reverse decoding. Such variables show up in the // [Discovery -// Document](https://developers.google.com/discovery/v1/re -// ference/apis) as -// `{+var}`. -// -// ## Using gRPC API Service Configuration -// -// gRPC API Service Configuration (service config) is a configuration -// language -// for configuring a gRPC service to become a user-facing product. -// The +// Document](https://developers.google.com/discovery/v1/reference/apis) +// as `{var}`. If a variable contains multiple path segments, such as +// "{var=foo/*}" or "{var=**}", when such a variable is expanded +// into a URL path on the client side, all characters except +// `[-_.~/0-9a-zA-Z]` are percent-encoded. The server side does the +// reverse decoding, except "%2F" and "%2f" are left unchanged. Such +// variables show up in the [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) +// as `{+var}`. ## Using gRPC API Service Configuration gRPC API Service +// Configuration (service config) is a configuration language for +// configuring a gRPC service to become a user-facing product. The // service config is simply the YAML representation of the -// `google.api.Service` -// proto message. -// -// As an alternative to annotating your proto file, you can configure -// gRPC -// transcoding in your service config YAML files. You do this by -// specifying a -// `HttpRule` that maps the gRPC method to a REST endpoint, achieving -// the same -// effect as the proto annotation. This can be particularly useful if -// you -// have a proto that is reused in multiple services. Note that any -// transcoding +// `google.api.Service` proto message. As an alternative to annotating +// your proto file, you can configure gRPC transcoding in your service +// config YAML files. You do this by specifying a `HttpRule` that maps +// the gRPC method to a REST endpoint, achieving the same effect as the +// proto annotation. This can be particularly useful if you have a proto +// that is reused in multiple services. Note that any transcoding // specified in the service config will override any matching -// transcoding -// configuration in the proto. -// -// Example: -// -// http: -// rules: -// # Selects a gRPC method and applies HttpRule to it. -// - selector: example.v1.Messaging.GetMessage -// get: /v1/messages/{message_id}/{sub.subfield} -// -// ## Special notes -// -// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, -// the -// proto to JSON conversion must follow the -// [proto3 -// specification](https://developers.google.com/protocol-buffers/ -// docs/proto3#json). -// -// While the single segment variable follows the semantics of +// transcoding configuration in the proto. Example: http: rules: # +// Selects a gRPC method and applies HttpRule to it. - selector: +// example.v1.Messaging.GetMessage get: +// /v1/messages/{message_id}/{sub.subfield} ## Special notes When gRPC +// Transcoding is used to map a gRPC to JSON REST endpoints, the proto +// to JSON conversion must follow the [proto3 +// specification](https://developers.google.com/protocol-buffers/docs/pro +// to3#json). While the single segment variable follows the semantics of // [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple -// String -// Expansion, the multi segment variable **does not** follow RFC 6570 -// Section -// 3.2.3 Reserved Expansion. The reason is that the Reserved -// Expansion -// does not expand special characters like `?` and `#`, which would -// lead -// to invalid URLs. As the result, gRPC Transcoding uses a custom -// encoding -// for multi segment variables. -// -// The path variables **must not** refer to any repeated or mapped -// field, +// String Expansion, the multi segment variable **does not** follow RFC +// 6570 Section 3.2.3 Reserved Expansion. The reason is that the +// Reserved Expansion does not expand special characters like `?` and +// `#`, which would lead to invalid URLs. As the result, gRPC +// Transcoding uses a custom encoding for multi segment variables. The +// path variables **must not** refer to any repeated or mapped field, // because client libraries are not capable of handling such variable -// expansion. -// -// The path variables **must not** capture the leading "/" character. -// The reason -// is that the most common use case "{var}" does not capture the leading -// "/" -// character. For consistency, all path variables must share the same -// behavior. -// -// Repeated message fields must not be mapped to URL query parameters, -// because -// no client library can support such complicated mapping. -// -// If an API needs to use a JSON array for request or response body, it -// can map -// the request or response body to a repeated field. However, some -// gRPC -// Transcoding implementations may not support this feature. +// expansion. The path variables **must not** capture the leading "/" +// character. The reason is that the most common use case "{var}" does +// not capture the leading "/" character. For consistency, all path +// variables must share the same behavior. Repeated message fields must +// not be mapped to URL query parameters, because no client library can +// support such complicated mapping. If an API needs to use a JSON array +// for request or response body, it can map the request or response body +// to a repeated field. However, some gRPC Transcoding implementations +// may not support this feature. type HttpRule struct { // AdditionalBindings: Additional HTTP bindings for the selector. Nested - // bindings must - // not contain an `additional_bindings` field themselves (that is, - // the nesting may only be one level deep). + // bindings must not contain an `additional_bindings` field themselves + // (that is, the nesting may only be one level deep). AdditionalBindings []*HttpRule `json:"additionalBindings,omitempty"` // AllowHalfDuplex: When this flag is set to true, HTTP requests will be - // allowed to invoke a - // half-duplex streaming method. + // allowed to invoke a half-duplex streaming method. AllowHalfDuplex bool `json:"allowHalfDuplex,omitempty"` // Body: The name of the request field whose value is mapped to the HTTP - // request - // body, or `*` for mapping all request fields not captured by the - // path - // pattern to the HTTP body, or omitted for not having any HTTP request - // body. - // - // NOTE: the referred field must be present at the top-level of the - // request - // message type. + // request body, or `*` for mapping all request fields not captured by + // the path pattern to the HTTP body, or omitted for not having any HTTP + // request body. NOTE: the referred field must be present at the + // top-level of the request message type. Body string `json:"body,omitempty"` // Custom: The custom pattern is used for specifying an HTTP method that - // is not - // included in the `pattern` field, such as HEAD, or "*" to leave - // the - // HTTP method unspecified for this rule. The wild-card rule is - // useful - // for services that provide content to Web (HTML) clients. + // is not included in the `pattern` field, such as HEAD, or "*" to leave + // the HTTP method unspecified for this rule. The wild-card rule is + // useful for services that provide content to Web (HTML) clients. Custom *CustomHttpPattern `json:"custom,omitempty"` // Delete: Maps to HTTP DELETE. Used for deleting a resource. Delete string `json:"delete,omitempty"` - // Get: Maps to HTTP GET. Used for listing and getting information - // about + // Get: Maps to HTTP GET. Used for listing and getting information about // resources. Get string `json:"get,omitempty"` @@ -2959,19 +2342,13 @@ type HttpRule struct { Put string `json:"put,omitempty"` // ResponseBody: Optional. The name of the response field whose value is - // mapped to the HTTP - // response body. When omitted, the entire response message will be - // used - // as the HTTP response body. - // - // NOTE: The referred field must be present at the top-level of the - // response - // message type. + // mapped to the HTTP response body. When omitted, the entire response + // message will be used as the HTTP response body. NOTE: The referred + // field must be present at the top-level of the response message type. ResponseBody string `json:"responseBody,omitempty"` - // Selector: Selects a method to which this rule applies. - // - // Refer to selector for syntax details. + // Selector: Selects a method to which this rule applies. Refer to + // selector for syntax details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. "AdditionalBindings") @@ -3027,6 +2404,35 @@ func (s *ImportAdminOverridesResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ImportAdminQuotaPoliciesResponse: Response message for +// ImportAdminQuotaPolicies +type ImportAdminQuotaPoliciesResponse struct { + // Policies: The policies that were created from the imported data. + Policies []*AdminQuotaPolicy `json:"policies,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Policies") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Policies") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ImportAdminQuotaPoliciesResponse) MarshalJSON() ([]byte, error) { + type NoMethod ImportAdminQuotaPoliciesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ImportConsumerOverridesResponse: Response message for // ImportConsumerOverrides type ImportConsumerOverridesResponse struct { @@ -3065,16 +2471,11 @@ type JwtLocation struct { Query string `json:"query,omitempty"` // ValuePrefix: The value prefix. The value format is - // "value_prefix{token}" - // Only applies to "in" header type. Must be empty for "in" query - // type. - // If not empty, the header value has to match (case sensitive) this - // prefix. - // If not matched, JWT will not be extracted. If matched, JWT will - // be - // extracted after the prefix is removed. - // - // For example, for "Authorization: Bearer {JWT}", + // "value_prefix{token}" Only applies to "in" header type. Must be empty + // for "in" query type. If not empty, the header value has to match + // (case sensitive) this prefix. If not matched, JWT will not be + // extracted. If matched, JWT will be extracted after the prefix is + // removed. For example, for "Authorization: Bearer {JWT}", // value_prefix="Bearer " with a space at the end. ValuePrefix string `json:"valuePrefix,omitempty"` @@ -3180,8 +2581,7 @@ func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { // ListServicesResponse: Response message for the `ListServices` method. type ListServicesResponse struct { // NextPageToken: Token that can be passed to `ListServices` to resume a - // paginated - // query. + // paginated query. NextPageToken string `json:"nextPageToken,omitempty"` // Services: The available services for the requested project. @@ -3214,39 +2614,29 @@ func (s *ListServicesResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// LogDescriptor: A description of a log type. Example in YAML format: -// -// - name: library.googleapis.com/activity_history -// description: The history of borrowing and returning library -// items. -// display_name: Activity -// labels: -// - key: /customer_id -// description: Identifier of a library customer +// LogDescriptor: A description of a log type. Example in YAML format: - +// name: library.googleapis.com/activity_history description: The +// history of borrowing and returning library items. display_name: +// Activity labels: - key: /customer_id description: Identifier of a +// library customer type LogDescriptor struct { // Description: A human-readable description of this log. This - // information appears in - // the documentation and can contain details. + // information appears in the documentation and can contain details. Description string `json:"description,omitempty"` // DisplayName: The human-readable name for this log. This information - // appears on - // the user interface and should be concise. + // appears on the user interface and should be concise. DisplayName string `json:"displayName,omitempty"` // Labels: The set of labels that are available to describe a specific - // log entry. - // Runtime requests that contain labels not specified here - // are - // considered invalid. + // log entry. Runtime requests that contain labels not specified here + // are considered invalid. Labels []*LabelDescriptor `json:"labels,omitempty"` // Name: The name of the log. It must be less than 512 characters long - // and can - // include the following characters: upper- and lower-case - // alphanumeric - // characters [A-Za-z0-9], and punctuation characters including - // slash, underscore, hyphen, period [/_-.]. + // and can include the following characters: upper- and lower-case + // alphanumeric characters [A-Za-z0-9], and punctuation characters + // including slash, underscore, hyphen, period [/_-.]. Name string `json:"name,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -3272,54 +2662,30 @@ func (s *LogDescriptor) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Logging: Logging configuration of the service. -// -// The following example shows how to configure logs to be sent to -// the -// producer and consumer projects. In the example, the -// `activity_history` -// log is sent to both the producer and consumer projects, whereas -// the -// `purchase_history` log is only sent to the producer project. -// -// monitored_resources: -// - type: library.googleapis.com/branch -// labels: -// - key: /city -// description: The city where the library branch is located -// in. -// - key: /name -// description: The name of the branch. -// logs: -// - name: activity_history -// labels: -// - key: /customer_id -// - name: purchase_history -// logging: -// producer_destinations: -// - monitored_resource: library.googleapis.com/branch -// logs: -// - activity_history -// - purchase_history -// consumer_destinations: -// - monitored_resource: library.googleapis.com/branch -// logs: -// - activity_history +// Logging: Logging configuration of the service. The following example +// shows how to configure logs to be sent to the producer and consumer +// projects. In the example, the `activity_history` log is sent to both +// the producer and consumer projects, whereas the `purchase_history` +// log is only sent to the producer project. monitored_resources: - +// type: library.googleapis.com/branch labels: - key: /city description: +// The city where the library branch is located in. - key: /name +// description: The name of the branch. logs: - name: activity_history +// labels: - key: /customer_id - name: purchase_history logging: +// producer_destinations: - monitored_resource: +// library.googleapis.com/branch logs: - activity_history - +// purchase_history consumer_destinations: - monitored_resource: +// library.googleapis.com/branch logs: - activity_history type Logging struct { // ConsumerDestinations: Logging configurations for sending logs to the - // consumer project. - // There can be multiple consumer destinations, each one must have - // a - // different monitored resource type. A log can be used in at most - // one consumer destination. + // consumer project. There can be multiple consumer destinations, each + // one must have a different monitored resource type. A log can be used + // in at most one consumer destination. ConsumerDestinations []*LoggingDestination `json:"consumerDestinations,omitempty"` // ProducerDestinations: Logging configurations for sending logs to the - // producer project. - // There can be multiple producer destinations, each one must have - // a - // different monitored resource type. A log can be used in at most - // one producer destination. + // producer project. There can be multiple producer destinations, each + // one must have a different monitored resource type. A log can be used + // in at most one producer destination. ProducerDestinations []*LoggingDestination `json:"producerDestinations,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -3348,19 +2714,16 @@ func (s *Logging) MarshalJSON() ([]byte, error) { } // LoggingDestination: Configuration of a specific logging destination -// (the producer project -// or the consumer project). +// (the producer project or the consumer project). type LoggingDestination struct { // Logs: Names of the logs to be sent to this destination. Each name - // must - // be defined in the Service.logs section. If the log name is - // not a domain scoped name, it will be automatically prefixed with - // the service name followed by "/". + // must be defined in the Service.logs section. If the log name is not a + // domain scoped name, it will be automatically prefixed with the + // service name followed by "/". Logs []string `json:"logs,omitempty"` // MonitoredResource: The monitored resource type. The type must be - // defined in the - // Service.monitored_resources section. + // defined in the Service.monitored_resources section. MonitoredResource string `json:"monitoredResource,omitempty"` // ForceSendFields is a list of field names (e.g. "Logs") to @@ -3437,32 +2800,26 @@ func (s *Method) MarshalJSON() ([]byte, error) { } // MetricDescriptor: Defines a metric type and its schema. Once a metric -// descriptor is created, -// deleting or altering it stops data collection and makes the metric -// type's -// existing data unusable. +// descriptor is created, deleting or altering it stops data collection +// and makes the metric type's existing data unusable. type MetricDescriptor struct { // Description: A detailed description of the metric, which can be used // in documentation. Description string `json:"description,omitempty"` // DisplayName: A concise name for the metric, which can be displayed in - // user interfaces. - // Use sentence case without an ending period, for example "Request - // count". - // This field is optional but it is recommended to be set for any - // metrics - // associated with user-visible concepts, such as Quota. + // user interfaces. Use sentence case without an ending period, for + // example "Request count". This field is optional but it is recommended + // to be set for any metrics associated with user-visible concepts, such + // as Quota. DisplayName string `json:"displayName,omitempty"` - // Labels: The set of labels that can be used to describe a - // specific - // instance of this metric type. For example, - // the - // `appengine.googleapis.com/http/server/response_latencies` metric - // type has a label for the HTTP response code, `response_code`, so - // you can look at latencies for successful responses or just - // for responses that failed. + // Labels: The set of labels that can be used to describe a specific + // instance of this metric type. For example, the + // `appengine.googleapis.com/http/server/response_latencies` metric type + // has a label for the HTTP response code, `response_code`, so you can + // look at latencies for successful responses or just for responses that + // failed. Labels []*LabelDescriptor `json:"labels,omitempty"` // LaunchStage: Optional. The launch stage of the metric definition. @@ -3474,50 +2831,31 @@ type MetricDescriptor struct { // "PRELAUNCH" - Prelaunch features are hidden from users and are only // visible internally. // "EARLY_ACCESS" - Early Access features are limited to a closed - // group of testers. To use - // these features, you must sign up in advance and sign a Trusted - // Tester - // agreement (which includes confidentiality provisions). These features - // may - // be unstable, changed in backward-incompatible ways, and are - // not - // guaranteed to be released. + // group of testers. To use these features, you must sign up in advance + // and sign a Trusted Tester agreement (which includes confidentiality + // provisions). These features may be unstable, changed in + // backward-incompatible ways, and are not guaranteed to be released. // "ALPHA" - Alpha is a limited availability test for releases before - // they are cleared - // for widespread use. By Alpha, all significant design issues are - // resolved - // and we are in the process of verifying functionality. Alpha - // customers - // need to apply for access, agree to applicable terms, and have - // their - // projects whitelisted. Alpha releases don’t have to be feature - // complete, - // no SLAs are provided, and there are no technical support obligations, - // but - // they will be far enough along that customers can actually use them - // in - // test environments or for limited-use tests -- just like they would - // in - // normal production cases. + // they are cleared for widespread use. By Alpha, all significant design + // issues are resolved and we are in the process of verifying + // functionality. Alpha customers need to apply for access, agree to + // applicable terms, and have their projects whitelisted. Alpha releases + // don’t have to be feature complete, no SLAs are provided, and there + // are no technical support obligations, but they will be far enough + // along that customers can actually use them in test environments or + // for limited-use tests -- just like they would in normal production + // cases. // "BETA" - Beta is the point at which we are ready to open a release - // for any - // customer to use. There are no SLA or technical support obligations in - // a - // Beta release. Products will be complete from a feature perspective, - // but - // may have some open outstanding issues. Beta releases are suitable - // for - // limited production use cases. + // for any customer to use. There are no SLA or technical support + // obligations in a Beta release. Products will be complete from a + // feature perspective, but may have some open outstanding issues. Beta + // releases are suitable for limited production use cases. // "GA" - GA features are open to all developers and are considered - // stable and - // fully qualified for production use. + // stable and fully qualified for production use. // "DEPRECATED" - Deprecated features are scheduled to be shut down - // and removed. For more - // information, see the “Deprecation Policy” section of our [Terms - // of - // Service](https://cloud.google.com/terms/) - // and the [Google Cloud Platform Subject to the - // Deprecation + // and removed. For more information, see the “Deprecation Policy” + // section of our [Terms of Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation // Policy](https://cloud.google.com/terms/deprecation) documentation. LaunchStage string `json:"launchStage,omitempty"` @@ -3526,188 +2864,106 @@ type MetricDescriptor struct { Metadata *MetricDescriptorMetadata `json:"metadata,omitempty"` // MetricKind: Whether the metric records instantaneous values, changes - // to a value, etc. - // Some combinations of `metric_kind` and `value_type` might not be - // supported. + // to a value, etc. Some combinations of `metric_kind` and `value_type` + // might not be supported. // // Possible values: // "METRIC_KIND_UNSPECIFIED" - Do not use this default value. // "GAUGE" - An instantaneous measurement of a value. // "DELTA" - The change in a value during a time interval. - // "CUMULATIVE" - A value accumulated over a time interval. - // Cumulative - // measurements in a time series should have the same start time - // and increasing end times, until an event resets the cumulative - // value to zero and sets a new start time for the following - // points. + // "CUMULATIVE" - A value accumulated over a time interval. Cumulative + // measurements in a time series should have the same start time and + // increasing end times, until an event resets the cumulative value to + // zero and sets a new start time for the following points. MetricKind string `json:"metricKind,omitempty"` - // MonitoredResourceTypes: Read-only. If present, then a time - // series, which is identified partially by - // a metric type and a MonitoredResourceDescriptor, that is - // associated - // with this metric type can only be associated with one of the - // monitored - // resource types listed here. + // MonitoredResourceTypes: Read-only. If present, then a time series, + // which is identified partially by a metric type and a + // MonitoredResourceDescriptor, that is associated with this metric type + // can only be associated with one of the monitored resource types + // listed here. MonitoredResourceTypes []string `json:"monitoredResourceTypes,omitempty"` // Name: The resource name of the metric descriptor. Name string `json:"name,omitempty"` - // Type: The metric type, including its DNS name prefix. The type is - // not - // URL-encoded. All user-defined metric types have the DNS - // name - // `custom.googleapis.com` or `external.googleapis.com`. Metric types - // should - // use a natural hierarchical grouping. For example: - // - // "custom.googleapis.com/invoice/paid/amount" - // "external.googleapis.com/prometheus/up" - // "appengine.googleapis.com/http/server/response_latencies" + // Type: The metric type, including its DNS name prefix. The type is not + // URL-encoded. All user-defined metric types have the DNS name + // `custom.googleapis.com` or `external.googleapis.com`. Metric types + // should use a natural hierarchical grouping. For example: + // "custom.googleapis.com/invoice/paid/amount" + // "external.googleapis.com/prometheus/up" + // "appengine.googleapis.com/http/server/response_latencies" Type string `json:"type,omitempty"` // Unit: The units in which the metric value is reported. It is only - // applicable - // if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The - // `unit` - // defines the representation of the stored metric values. - // - // Different systems may scale the values to be more easily displayed - // (so a - // value of `0.02KBy` _might_ be displayed as `20By`, and a value - // of - // `3523KBy` _might_ be displayed as `3.5MBy`). However, if the `unit` - // is - // `KBy`, then the value of the metric is always in thousands of bytes, - // no - // matter how it may be displayed.. - // - // If you want a custom metric to record the exact number of CPU-seconds - // used - // by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` - // is - // `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses - // 12,005 - // CPU-seconds, then the value is written as `12005`. - // - // Alternatively, if you want a custom metric to record data in a - // more + // applicable if the `value_type` is `INT64`, `DOUBLE`, or + // `DISTRIBUTION`. The `unit` defines the representation of the stored + // metric values. Different systems may scale the values to be more + // easily displayed (so a value of `0.02KBy` _might_ be displayed as + // `20By`, and a value of `3523KBy` _might_ be displayed as `3.5MBy`). + // However, if the `unit` is `KBy`, then the value of the metric is + // always in thousands of bytes, no matter how it may be displayed.. If + // you want a custom metric to record the exact number of CPU-seconds + // used by a job, you can create an `INT64 CUMULATIVE` metric whose + // `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the + // job uses 12,005 CPU-seconds, then the value is written as `12005`. + // Alternatively, if you want a custom metric to record data in a more // granular way, you can create a `DOUBLE CUMULATIVE` metric whose - // `unit` is - // `ks{CPU}`, and then write the value `12.005` (which is - // `12005/1000`), - // or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). - // - // The supported units are a subset of [The Unified Code for Units - // of - // Measure](http://unitsofmeasure.org/ucum.html) standard: - // - // **Basic units (UNIT)** - // - // * `bit` bit - // * `By` byte - // * `s` second - // * `min` minute - // * `h` hour - // * `d` day - // - // **Prefixes (PREFIX)** - // - // * `k` kilo (10^3) - // * `M` mega (10^6) - // * `G` giga (10^9) - // * `T` tera (10^12) - // * `P` peta (10^15) - // * `E` exa (10^18) - // * `Z` zetta (10^21) - // * `Y` yotta (10^24) - // - // * `m` milli (10^-3) - // * `u` micro (10^-6) - // * `n` nano (10^-9) - // * `p` pico (10^-12) - // * `f` femto (10^-15) - // * `a` atto (10^-18) - // * `z` zepto (10^-21) - // * `y` yocto (10^-24) - // - // * `Ki` kibi (2^10) - // * `Mi` mebi (2^20) - // * `Gi` gibi (2^30) - // * `Ti` tebi (2^40) - // * `Pi` pebi (2^50) - // - // **Grammar** - // - // The grammar also includes these connectors: - // - // * `/` division or ratio (as an infix operator). For examples, - // `kBy/{email}` or `MiBy/10ms` (although you should almost - // never - // have `/s` in a metric `unit`; rates should always be - // computed at - // query time from the underlying cumulative or delta value). - // * `.` multiplication or composition (as an infix operator). For - // examples, `GBy.d` or `k{watt}.h`. - // - // The grammar for a unit is as follows: - // - // Expression = Component { "." Component } { "/" Component } ; - // - // Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] - // | Annotation - // | "1" - // ; - // - // Annotation = "{" NAME "}" ; - // - // Notes: - // - // * `Annotation` is just a comment if it follows a `UNIT`. If the - // annotation - // is used alone, then the unit is equivalent to `1`. For examples, - // `{request}/s == 1/s`, `By{transmitted}/s == By/s`. - // * `NAME` is a sequence of non-blank printable ASCII characters not - // containing `{` or `}`. - // * `1` represents a unitary [dimensionless - // unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, - // such - // as in `1/s`. It is typically used when none of the basic units - // are - // appropriate. For example, "new users per day" can be represented - // as - // `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 - // new - // users). Alternatively, "thousands of page views per day" would be - // represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a - // metric - // value of `5.3` would mean "5300 page views per day"). - // * `%` represents dimensionless value of 1/100, and annotates values - // giving - // a percentage (so the metric values are typically in the range of - // 0..100, - // and a metric value `3` means "3 percent"). - // * `10^2.%` indicates a metric contains a ratio, typically in the - // range - // 0..1, that will be multiplied by 100 and displayed as a - // percentage - // (so a metric value `0.03` means "3 percent"). + // `unit` is `ks{CPU}`, and then write the value `12.005` (which is + // `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is + // `12005/1024`). The supported units are a subset of [The Unified Code + // for Units of Measure](http://unitsofmeasure.org/ucum.html) standard: + // **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` + // minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** + // * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera + // (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * + // `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano + // (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) + // * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` + // mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) + // **Grammar** The grammar also includes these connectors: * `/` + // division or ratio (as an infix operator). For examples, `kBy/{email}` + // or `MiBy/10ms` (although you should almost never have `/s` in a + // metric `unit`; rates should always be computed at query time from the + // underlying cumulative or delta value). * `.` multiplication or + // composition (as an infix operator). For examples, `GBy.d` or + // `k{watt}.h`. The grammar for a unit is as follows: Expression = + // Component { "." Component } { "/" Component } ; Component = ( [ + // PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; Annotation + // = "{" NAME "}" ; Notes: * `Annotation` is just a comment if it + // follows a `UNIT`. If the annotation is used alone, then the unit is + // equivalent to `1`. For examples, `{request}/s == 1/s`, + // `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank + // printable ASCII characters not containing `{` or `}`. * `1` + // represents a unitary [dimensionless + // unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, + // such as in `1/s`. It is typically used when none of the basic units + // are appropriate. For example, "new users per day" can be represented + // as `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 new + // users). Alternatively, "thousands of page views per day" would be + // represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric + // value of `5.3` would mean "5300 page views per day"). * `%` + // represents dimensionless value of 1/100, and annotates values giving + // a percentage (so the metric values are typically in the range of + // 0..100, and a metric value `3` means "3 percent"). * `10^2.%` + // indicates a metric contains a ratio, typically in the range 0..1, + // that will be multiplied by 100 and displayed as a percentage (so a + // metric value `0.03` means "3 percent"). Unit string `json:"unit,omitempty"` // ValueType: Whether the measurement is an integer, a floating-point - // number, etc. - // Some combinations of `metric_kind` and `value_type` might not be - // supported. + // number, etc. Some combinations of `metric_kind` and `value_type` + // might not be supported. // // Possible values: // "VALUE_TYPE_UNSPECIFIED" - Do not use this default value. - // "BOOL" - The value is a boolean. - // This value type can be used only if the metric kind is `GAUGE`. + // "BOOL" - The value is a boolean. This value type can be used only + // if the metric kind is `GAUGE`. // "INT64" - The value is a signed 64-bit integer. // "DOUBLE" - The value is a double precision floating point number. - // "STRING" - The value is a text string. - // This value type can be used only if the metric kind is `GAUGE`. + // "STRING" - The value is a text string. This value type can be used + // only if the metric kind is `GAUGE`. // "DISTRIBUTION" - The value is a `Distribution`. // "MONEY" - The value is money. ValueType string `json:"valueType,omitempty"` @@ -3739,10 +2995,8 @@ func (s *MetricDescriptor) MarshalJSON() ([]byte, error) { // guide the usage of a metric. type MetricDescriptorMetadata struct { // IngestDelay: The delay of data points caused by ingestion. Data - // points older than this - // age are guaranteed to be ingested and available to be read, - // excluding - // data loss due to errors. + // points older than this age are guaranteed to be ingested and + // available to be read, excluding data loss due to errors. IngestDelay string `json:"ingestDelay,omitempty"` // LaunchStage: Deprecated. Must use the MetricDescriptor.launch_stage @@ -3755,60 +3009,38 @@ type MetricDescriptorMetadata struct { // "PRELAUNCH" - Prelaunch features are hidden from users and are only // visible internally. // "EARLY_ACCESS" - Early Access features are limited to a closed - // group of testers. To use - // these features, you must sign up in advance and sign a Trusted - // Tester - // agreement (which includes confidentiality provisions). These features - // may - // be unstable, changed in backward-incompatible ways, and are - // not - // guaranteed to be released. + // group of testers. To use these features, you must sign up in advance + // and sign a Trusted Tester agreement (which includes confidentiality + // provisions). These features may be unstable, changed in + // backward-incompatible ways, and are not guaranteed to be released. // "ALPHA" - Alpha is a limited availability test for releases before - // they are cleared - // for widespread use. By Alpha, all significant design issues are - // resolved - // and we are in the process of verifying functionality. Alpha - // customers - // need to apply for access, agree to applicable terms, and have - // their - // projects whitelisted. Alpha releases don’t have to be feature - // complete, - // no SLAs are provided, and there are no technical support obligations, - // but - // they will be far enough along that customers can actually use them - // in - // test environments or for limited-use tests -- just like they would - // in - // normal production cases. + // they are cleared for widespread use. By Alpha, all significant design + // issues are resolved and we are in the process of verifying + // functionality. Alpha customers need to apply for access, agree to + // applicable terms, and have their projects whitelisted. Alpha releases + // don’t have to be feature complete, no SLAs are provided, and there + // are no technical support obligations, but they will be far enough + // along that customers can actually use them in test environments or + // for limited-use tests -- just like they would in normal production + // cases. // "BETA" - Beta is the point at which we are ready to open a release - // for any - // customer to use. There are no SLA or technical support obligations in - // a - // Beta release. Products will be complete from a feature perspective, - // but - // may have some open outstanding issues. Beta releases are suitable - // for - // limited production use cases. + // for any customer to use. There are no SLA or technical support + // obligations in a Beta release. Products will be complete from a + // feature perspective, but may have some open outstanding issues. Beta + // releases are suitable for limited production use cases. // "GA" - GA features are open to all developers and are considered - // stable and - // fully qualified for production use. + // stable and fully qualified for production use. // "DEPRECATED" - Deprecated features are scheduled to be shut down - // and removed. For more - // information, see the “Deprecation Policy” section of our [Terms - // of - // Service](https://cloud.google.com/terms/) - // and the [Google Cloud Platform Subject to the - // Deprecation + // and removed. For more information, see the “Deprecation Policy” + // section of our [Terms of Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation // Policy](https://cloud.google.com/terms/deprecation) documentation. LaunchStage string `json:"launchStage,omitempty"` // SamplePeriod: The sampling period of metric data points. For metrics - // which are written - // periodically, consecutive data points are stored at this time - // interval, - // excluding data loss due to errors. Metrics with a higher granularity - // have - // a smaller sampling period. + // which are written periodically, consecutive data points are stored at + // this time interval, excluding data loss due to errors. Metrics with a + // higher granularity have a smaller sampling period. SamplePeriod string `json:"samplePeriod,omitempty"` // ForceSendFields is a list of field names (e.g. "IngestDelay") to @@ -3835,23 +3067,18 @@ func (s *MetricDescriptorMetadata) MarshalJSON() ([]byte, error) { } // MetricRule: Bind API methods to metrics. Binding a method to a metric -// causes that -// metric's configured quota behaviors to apply to the method call. +// causes that metric's configured quota behaviors to apply to the +// method call. type MetricRule struct { // MetricCosts: Metrics to update when the selected methods are called, - // and the associated - // cost applied to each metric. - // - // The key of the map is the metric name, and the values are the - // amount - // increased for the metric against which the quota limits are - // defined. - // The value must not be negative. + // and the associated cost applied to each metric. The key of the map is + // the metric name, and the values are the amount increased for the + // metric against which the quota limits are defined. The value must not + // be negative. MetricCosts map[string]string `json:"metricCosts,omitempty"` - // Selector: Selects the methods to which this rule applies. - // - // Refer to selector for syntax details. + // Selector: Selects the methods to which this rule applies. Refer to + // selector for syntax details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. "MetricCosts") to @@ -3878,96 +3105,44 @@ func (s *MetricRule) MarshalJSON() ([]byte, error) { } // Mixin: Declares an API Interface to be included in this interface. -// The including -// interface must redeclare all the methods from the included interface, -// but -// documentation and options are inherited as follows: -// -// - If after comment and whitespace stripping, the documentation -// string of the redeclared method is empty, it will be inherited -// from the original method. -// -// - Each annotation belonging to the service config (http, -// visibility) which is not set in the redeclared method will be -// inherited. -// -// - If an http annotation is inherited, the path pattern will be -// modified as follows. Any version prefix will be replaced by the -// version of the including interface plus the root path if -// specified. -// -// Example of a simple mixin: -// -// package google.acl.v1; -// service AccessControl { -// // Get the underlying ACL object. -// rpc GetAcl(GetAclRequest) returns (Acl) { -// option (google.api.http).get = "/v1/{resource=**}:getAcl"; -// } -// } -// -// package google.storage.v2; -// service Storage { -// // rpc GetAcl(GetAclRequest) returns (Acl); -// -// // Get a data record. -// rpc GetData(GetDataRequest) returns (Data) { -// option (google.api.http).get = "/v2/{resource=**}"; -// } -// } -// -// Example of a mixin configuration: -// -// apis: -// - name: google.storage.v2.Storage -// mixins: -// - name: google.acl.v1.AccessControl -// -// The mixin construct implies that all methods in `AccessControl` -// are -// also declared with same name and request/response types in -// `Storage`. A documentation generator or annotation processor will -// see the effective `Storage.GetAcl` method after -// inherting -// documentation and annotations as follows: -// -// service Storage { -// // Get the underlying ACL object. -// rpc GetAcl(GetAclRequest) returns (Acl) { -// option (google.api.http).get = "/v2/{resource=**}:getAcl"; -// } -// ... -// } -// -// Note how the version in the path pattern changed from `v1` to -// `v2`. -// -// If the `root` field in the mixin is specified, it should be -// a -// relative path under which inherited HTTP paths are placed. Example: -// -// apis: -// - name: google.storage.v2.Storage -// mixins: -// - name: google.acl.v1.AccessControl -// root: acls -// -// This implies the following inherited HTTP annotation: -// -// service Storage { -// // Get the underlying ACL object. -// rpc GetAcl(GetAclRequest) returns (Acl) { -// option (google.api.http).get = -// "/v2/acls/{resource=**}:getAcl"; -// } -// ... -// } +// The including interface must redeclare all the methods from the +// included interface, but documentation and options are inherited as +// follows: - If after comment and whitespace stripping, the +// documentation string of the redeclared method is empty, it will be +// inherited from the original method. - Each annotation belonging to +// the service config (http, visibility) which is not set in the +// redeclared method will be inherited. - If an http annotation is +// inherited, the path pattern will be modified as follows. Any version +// prefix will be replaced by the version of the including interface +// plus the root path if specified. Example of a simple mixin: package +// google.acl.v1; service AccessControl { // Get the underlying ACL +// object. rpc GetAcl(GetAclRequest) returns (Acl) { option +// (google.api.http).get = "/v1/{resource=**}:getAcl"; } } package +// google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) +// returns (Acl); // Get a data record. rpc GetData(GetDataRequest) +// returns (Data) { option (google.api.http).get = "/v2/{resource=**}"; +// } } Example of a mixin configuration: apis: - name: +// google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl +// The mixin construct implies that all methods in `AccessControl` are +// also declared with same name and request/response types in `Storage`. +// A documentation generator or annotation processor will see the +// effective `Storage.GetAcl` method after inheriting documentation and +// annotations as follows: service Storage { // Get the underlying ACL +// object. rpc GetAcl(GetAclRequest) returns (Acl) { option +// (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how +// the version in the path pattern changed from `v1` to `v2`. If the +// `root` field in the mixin is specified, it should be a relative path +// under which inherited HTTP paths are placed. Example: apis: - name: +// google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl +// root: acls This implies the following inherited HTTP annotation: +// service Storage { // Get the underlying ACL object. rpc +// GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = +// "/v2/acls/{resource=**}:getAcl"; } ... } type Mixin struct { // Name: The fully qualified name of the interface which is included. Name string `json:"name,omitempty"` - // Root: If non-empty specifies a path under which inherited HTTP - // paths + // Root: If non-empty specifies a path under which inherited HTTP paths // are rooted. Root string `json:"root,omitempty"` @@ -3995,39 +3170,28 @@ func (s *Mixin) MarshalJSON() ([]byte, error) { } // MonitoredResourceDescriptor: An object that describes the schema of a -// MonitoredResource object using a -// type name and a set of labels. For example, the monitored -// resource -// descriptor for Google Compute Engine VM instances has a type -// of -// "gce_instance" and specifies the use of the labels "instance_id" -// and -// "zone" to identify particular VM instances. -// -// Different APIs can support different monitored resource types. APIs -// generally -// provide a `list` method that returns the monitored resource -// descriptors used -// by the API. +// MonitoredResource object using a type name and a set of labels. For +// example, the monitored resource descriptor for Google Compute Engine +// VM instances has a type of "gce_instance" and specifies the use of +// the labels "instance_id" and "zone" to identify particular VM +// instances. Different APIs can support different monitored resource +// types. APIs generally provide a `list` method that returns the +// monitored resource descriptors used by the API. type MonitoredResourceDescriptor struct { // Description: Optional. A detailed description of the monitored - // resource type that might - // be used in documentation. + // resource type that might be used in documentation. Description string `json:"description,omitempty"` // DisplayName: Optional. A concise name for the monitored resource type - // that might be - // displayed in user interfaces. It should be a Title Cased Noun - // Phrase, - // without any article or other determiners. For example, - // "Google Cloud SQL Database". + // that might be displayed in user interfaces. It should be a Title + // Cased Noun Phrase, without any article or other determiners. For + // example, "Google Cloud SQL Database". DisplayName string `json:"displayName,omitempty"` // Labels: Required. A set of labels used to describe instances of this - // monitored - // resource type. For example, an individual Google Cloud SQL database - // is - // identified by values for the labels "database_id" and "zone". + // monitored resource type. For example, an individual Google Cloud SQL + // database is identified by values for the labels "database_id" and + // "zone". Labels []*LabelDescriptor `json:"labels,omitempty"` // LaunchStage: Optional. The launch stage of the monitored resource @@ -4040,70 +3204,45 @@ type MonitoredResourceDescriptor struct { // "PRELAUNCH" - Prelaunch features are hidden from users and are only // visible internally. // "EARLY_ACCESS" - Early Access features are limited to a closed - // group of testers. To use - // these features, you must sign up in advance and sign a Trusted - // Tester - // agreement (which includes confidentiality provisions). These features - // may - // be unstable, changed in backward-incompatible ways, and are - // not - // guaranteed to be released. + // group of testers. To use these features, you must sign up in advance + // and sign a Trusted Tester agreement (which includes confidentiality + // provisions). These features may be unstable, changed in + // backward-incompatible ways, and are not guaranteed to be released. // "ALPHA" - Alpha is a limited availability test for releases before - // they are cleared - // for widespread use. By Alpha, all significant design issues are - // resolved - // and we are in the process of verifying functionality. Alpha - // customers - // need to apply for access, agree to applicable terms, and have - // their - // projects whitelisted. Alpha releases don’t have to be feature - // complete, - // no SLAs are provided, and there are no technical support obligations, - // but - // they will be far enough along that customers can actually use them - // in - // test environments or for limited-use tests -- just like they would - // in - // normal production cases. + // they are cleared for widespread use. By Alpha, all significant design + // issues are resolved and we are in the process of verifying + // functionality. Alpha customers need to apply for access, agree to + // applicable terms, and have their projects whitelisted. Alpha releases + // don’t have to be feature complete, no SLAs are provided, and there + // are no technical support obligations, but they will be far enough + // along that customers can actually use them in test environments or + // for limited-use tests -- just like they would in normal production + // cases. // "BETA" - Beta is the point at which we are ready to open a release - // for any - // customer to use. There are no SLA or technical support obligations in - // a - // Beta release. Products will be complete from a feature perspective, - // but - // may have some open outstanding issues. Beta releases are suitable - // for - // limited production use cases. + // for any customer to use. There are no SLA or technical support + // obligations in a Beta release. Products will be complete from a + // feature perspective, but may have some open outstanding issues. Beta + // releases are suitable for limited production use cases. // "GA" - GA features are open to all developers and are considered - // stable and - // fully qualified for production use. + // stable and fully qualified for production use. // "DEPRECATED" - Deprecated features are scheduled to be shut down - // and removed. For more - // information, see the “Deprecation Policy” section of our [Terms - // of - // Service](https://cloud.google.com/terms/) - // and the [Google Cloud Platform Subject to the - // Deprecation + // and removed. For more information, see the “Deprecation Policy” + // section of our [Terms of Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation // Policy](https://cloud.google.com/terms/deprecation) documentation. LaunchStage string `json:"launchStage,omitempty"` // Name: Optional. The resource name of the monitored resource // descriptor: - // "projects/{project_id}/monitoredResourceDescriptors/{type - // }" where - // {type} is the value of the `type` field in this object - // and - // {project_id} is a project ID that provides API-specific context - // for - // accessing the type. APIs that do not use project information can use - // the - // resource name format "monitoredResourceDescriptors/{type}". + // "projects/{project_id}/monitoredResourceDescriptors/{type}" where + // {type} is the value of the `type` field in this object and + // {project_id} is a project ID that provides API-specific context for + // accessing the type. APIs that do not use project information can use + // the resource name format "monitoredResourceDescriptors/{type}". Name string `json:"name,omitempty"` - // Type: Required. The monitored resource type. For example, the - // type + // Type: Required. The monitored resource type. For example, the type // "cloudsql_database" represents databases in Google Cloud SQL. - // The maximum length of this value is 256 characters. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -4129,74 +3268,49 @@ func (s *MonitoredResourceDescriptor) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Monitoring: Monitoring configuration of the service. -// -// The example below shows how to configure monitored resources and -// metrics -// for monitoring. In the example, a monitored resource and two metrics -// are +// Monitoring: Monitoring configuration of the service. The example +// below shows how to configure monitored resources and metrics for +// monitoring. In the example, a monitored resource and two metrics are // defined. The `library.googleapis.com/book/returned_count` metric is -// sent -// to both producer and consumer projects, whereas -// the -// `library.googleapis.com/book/overdue_count` metric is only sent to -// the -// consumer project. -// -// monitored_resources: -// - type: library.googleapis.com/branch -// labels: -// - key: /city -// description: The city where the library branch is located -// in. -// - key: /name -// description: The name of the branch. -// metrics: -// - name: library.googleapis.com/book/returned_count -// metric_kind: DELTA -// value_type: INT64 -// labels: -// - key: /customer_id -// - name: library.googleapis.com/book/overdue_count -// metric_kind: GAUGE -// value_type: INT64 -// labels: -// - key: /customer_id -// monitoring: -// producer_destinations: -// - monitored_resource: library.googleapis.com/branch -// metrics: -// - library.googleapis.com/book/returned_count -// consumer_destinations: -// - monitored_resource: library.googleapis.com/branch -// metrics: -// - library.googleapis.com/book/returned_count -// - library.googleapis.com/book/overdue_count +// sent to both producer and consumer projects, whereas the +// `library.googleapis.com/book/num_overdue` metric is only sent to the +// consumer project. monitored_resources: - type: +// library.googleapis.com/Branch display_name: "Library Branch" +// description: "A branch of a library." launch_stage: GA labels: - key: +// resource_container description: "The Cloud container (ie. project id) +// for the Branch." - key: location description: "The location of the +// library branch." - key: branch_id description: "The id of the +// branch." metrics: - name: library.googleapis.com/book/returned_count +// display_name: "Books Returned" description: "The count of books that +// have been returned." launch_stage: GA metric_kind: DELTA value_type: +// INT64 unit: "1" labels: - key: customer_id description: "The id of +// the customer." - name: library.googleapis.com/book/num_overdue +// display_name: "Books Overdue" description: "The current number of +// overdue books." launch_stage: GA metric_kind: GAUGE value_type: INT64 +// unit: "1" labels: - key: customer_id description: "The id of the +// customer." monitoring: producer_destinations: - monitored_resource: +// library.googleapis.com/Branch metrics: - +// library.googleapis.com/book/returned_count consumer_destinations: - +// monitored_resource: library.googleapis.com/Branch metrics: - +// library.googleapis.com/book/returned_count - +// library.googleapis.com/book/num_overdue type Monitoring struct { // ConsumerDestinations: Monitoring configurations for sending metrics - // to the consumer project. - // There can be multiple consumer destinations. A monitored resouce type - // may - // appear in multiple monitoring destinations if different aggregations - // are - // needed for different sets of metrics associated with that - // monitored - // resource type. A monitored resource and metric pair may only be used - // once - // in the Monitoring configuration. + // to the consumer project. There can be multiple consumer destinations. + // A monitored resource type may appear in multiple monitoring + // destinations if different aggregations are needed for different sets + // of metrics associated with that monitored resource type. A monitored + // resource and metric pair may only be used once in the Monitoring + // configuration. ConsumerDestinations []*MonitoringDestination `json:"consumerDestinations,omitempty"` // ProducerDestinations: Monitoring configurations for sending metrics - // to the producer project. - // There can be multiple producer destinations. A monitored resouce type - // may - // appear in multiple monitoring destinations if different aggregations - // are - // needed for different sets of metrics associated with that - // monitored - // resource type. A monitored resource and metric pair may only be used - // once - // in the Monitoring configuration. + // to the producer project. There can be multiple producer destinations. + // A monitored resource type may appear in multiple monitoring + // destinations if different aggregations are needed for different sets + // of metrics associated with that monitored resource type. A monitored + // resource and metric pair may only be used once in the Monitoring + // configuration. ProducerDestinations []*MonitoringDestination `json:"producerDestinations,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -4225,17 +3339,14 @@ func (s *Monitoring) MarshalJSON() ([]byte, error) { } // MonitoringDestination: Configuration of a specific monitoring -// destination (the producer project -// or the consumer project). +// destination (the producer project or the consumer project). type MonitoringDestination struct { // Metrics: Types of the metrics to report to this monitoring - // destination. - // Each type must be defined in Service.metrics section. + // destination. Each type must be defined in Service.metrics section. Metrics []string `json:"metrics,omitempty"` // MonitoredResource: The monitored resource type. The type must be - // defined in - // Service.monitored_resources section. + // defined in Service.monitored_resources section. MonitoredResource string `json:"monitoredResource,omitempty"` // ForceSendFields is a list of field names (e.g. "Metrics") to @@ -4262,43 +3373,26 @@ func (s *MonitoringDestination) MarshalJSON() ([]byte, error) { } // OAuthRequirements: OAuth scopes are a way to define data and -// permissions on data. For example, -// there are scopes defined for "Read-only access to Google Calendar" -// and -// "Access to Cloud Platform". Users can consent to a scope for an -// application, -// giving it permission to access that data on their behalf. -// -// OAuth scope specifications should be fairly coarse grained; a user -// will need -// to see and understand the text description of what your scope -// means. -// -// In most cases: use one or at most two OAuth scopes for an entire -// family of +// permissions on data. For example, there are scopes defined for +// "Read-only access to Google Calendar" and "Access to Cloud Platform". +// Users can consent to a scope for an application, giving it permission +// to access that data on their behalf. OAuth scope specifications +// should be fairly coarse grained; a user will need to see and +// understand the text description of what your scope means. In most +// cases: use one or at most two OAuth scopes for an entire family of // products. If your product has multiple APIs, you should probably be -// sharing -// the OAuth scope across all of those APIs. -// -// When you need finer grained OAuth consent screens: talk with your -// product -// management about how developers will use them in practice. -// -// Please note that even though each of the canonical scopes is enough -// for a -// request to be accepted and passed to the backend, a request can still -// fail -// due to the backend requiring additional scopes or permissions. +// sharing the OAuth scope across all of those APIs. When you need finer +// grained OAuth consent screens: talk with your product management +// about how developers will use them in practice. Please note that even +// though each of the canonical scopes is enough for a request to be +// accepted and passed to the backend, a request can still fail due to +// the backend requiring additional scopes or permissions. type OAuthRequirements struct { // CanonicalScopes: The list of publicly documented OAuth scopes that - // are allowed access. An - // OAuth token containing any of these scopes will be - // accepted. - // - // Example: - // - // canonical_scopes: https://www.googleapis.com/auth/calendar, - // https://www.googleapis.com/auth/calendar.read + // are allowed access. An OAuth token containing any of these scopes + // will be accepted. Example: canonical_scopes: + // https://www.googleapis.com/auth/calendar, + // https://www.googleapis.com/auth/calendar.read CanonicalScopes string `json:"canonicalScopes,omitempty"` // ForceSendFields is a list of field names (e.g. "CanonicalScopes") to @@ -4326,52 +3420,38 @@ func (s *OAuthRequirements) MarshalJSON() ([]byte, error) { } // Operation: This resource represents a long-running operation that is -// the result of a -// network API call. +// the result of a network API call. type Operation struct { // Done: If the value is `false`, it means the operation is still in - // progress. - // If `true`, the operation is completed, and either `error` or - // `response` is - // available. + // progress. If `true`, the operation is completed, and either `error` + // or `response` is available. Done bool `json:"done,omitempty"` // Error: The error result of the operation in case of failure or // cancellation. Error *Status `json:"error,omitempty"` - // Metadata: Service-specific metadata associated with the operation. - // It typically - // contains progress information and common metadata such as create - // time. - // Some services might not provide such metadata. Any method that - // returns a - // long-running operation should document the metadata type, if any. + // Metadata: Service-specific metadata associated with the operation. It + // typically contains progress information and common metadata such as + // create time. Some services might not provide such metadata. Any + // method that returns a long-running operation should document the + // metadata type, if any. Metadata googleapi.RawMessage `json:"metadata,omitempty"` // Name: The server-assigned name, which is only unique within the same - // service that - // originally returns it. If you use the default HTTP mapping, - // the - // `name` should be a resource name ending with + // service that originally returns it. If you use the default HTTP + // mapping, the `name` should be a resource name ending with // `operations/{unique_id}`. Name string `json:"name,omitempty"` - // Response: The normal response of the operation in case of success. - // If the original - // method returns no data on success, such as `Delete`, the response - // is - // `google.protobuf.Empty`. If the original method is - // standard - // `Get`/`Create`/`Update`, the response should be the resource. For - // other - // methods, the response should have the type `XxxResponse`, where - // `Xxx` - // is the original method name. For example, if the original method - // name - // is `TakeSnapshot()`, the inferred response type - // is - // `TakeSnapshotResponse`. + // Response: The normal response of the operation in case of success. If + // the original method returns no data on success, such as `Delete`, the + // response is `google.protobuf.Empty`. If the original method is + // standard `Get`/`Create`/`Update`, the response should be the + // resource. For other methods, the response should have the type + // `XxxResponse`, where `Xxx` is the original method name. For example, + // if the original method name is `TakeSnapshot()`, the inferred + // response type is `TakeSnapshotResponse`. Response googleapi.RawMessage `json:"response,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -4405,8 +3485,7 @@ func (s *Operation) MarshalJSON() ([]byte, error) { // services operation. type OperationMetadata struct { // ResourceNames: The full name of the resources that this operation is - // directly - // associated with. + // directly associated with. ResourceNames []string `json:"resourceNames,omitempty"` // ForceSendFields is a list of field names (e.g. "ResourceNames") to @@ -4433,25 +3512,19 @@ func (s *OperationMetadata) MarshalJSON() ([]byte, error) { } // Option: A protocol buffer option, which can be attached to a message, -// field, -// enumeration, etc. +// field, enumeration, etc. type Option struct { // Name: The option's name. For protobuf built-in options (options - // defined in - // descriptor.proto), this is the short name. For example, - // "map_entry". - // For custom options, it should be the fully-qualified name. For - // example, - // "google.api.http". + // defined in descriptor.proto), this is the short name. For example, + // "map_entry". For custom options, it should be the fully-qualified + // name. For example, "google.api.http". Name string `json:"name,omitempty"` // Value: The option's value packed in an Any message. If the value is a - // primitive, - // the corresponding wrapper type defined in - // google/protobuf/wrappers.proto - // should be used. If the value is an enum, it should be stored as an - // int32 - // value using the google.protobuf.Int32Value type. + // primitive, the corresponding wrapper type defined in + // google/protobuf/wrappers.proto should be used. If the value is an + // enum, it should be stored as an int32 value using the + // google.protobuf.Int32Value type. Value googleapi.RawMessage `json:"value,omitempty"` // ForceSendFields is a list of field names (e.g. "Name") to @@ -4478,39 +3551,24 @@ func (s *Option) MarshalJSON() ([]byte, error) { } // Page: Represents a documentation page. A page can contain subpages to -// represent -// nested documentation set structure. +// represent nested documentation set structure. type Page struct { - // Content: The Markdown content of the page. You can use (== - // include {path} - // ==) to include content from a Markdown file. + // Content: The Markdown content of the page. You can use (== include + // {path} ==) to include content from a Markdown file. Content string `json:"content,omitempty"` // Name: The name of the page. It will be used as an identity of the - // page to - // generate URI of the page, text of the link to this page in - // navigation, - // etc. The full page name (start from the root page name to this - // page - // concatenated with `.`) can be used as reference to the page in - // your - // documentation. For example: - //
pages:
-	// - name: Tutorial
-	//   content: (== include tutorial.md ==)
-	//   subpages:
-	//   - name: Java
-	//     content: (== include tutorial_java.md
-	// ==)
-	// 
- // You can reference `Java` page using Markdown reference link - // syntax: - // `Java`. + // page to generate URI of the page, text of the link to this page in + // navigation, etc. The full page name (start from the root page name to + // this page concatenated with `.`) can be used as reference to the page + // in your documentation. For example: pages: - name: Tutorial content: + // (== include tutorial.md ==) subpages: - name: Java content: (== + // include tutorial_java.md ==) You can reference `Java` page using + // Markdown reference link syntax: `Java`. Name string `json:"name,omitempty"` // Subpages: Subpages of this page. The order of subpages specified here - // will be - // honored in the generated docset. + // will be honored in the generated docset. Subpages []*Page `json:"subpages,omitempty"` // ForceSendFields is a list of field names (e.g. "Content") to @@ -4537,67 +3595,33 @@ func (s *Page) MarshalJSON() ([]byte, error) { } // Quota: Quota configuration helps to achieve fairness and budgeting in -// service -// usage. -// -// The metric based quota configuration works this way: -// - The service configuration defines a set of metrics. -// - For API calls, the quota.metric_rules maps methods to metrics with -// corresponding costs. -// - The quota.limits defines limits on the metrics, which will be used -// for -// quota checks at runtime. -// -// An example quota configuration in yaml format: -// -// quota: -// limits: -// -// - name: apiWriteQpsPerProject -// metric: library.googleapis.com/write_calls -// unit: "1/min/{project}" # rate limit for consumer projects -// values: -// STANDARD: 10000 -// -// -// # The metric rules bind all methods to the read_calls metric, -// # except for the UpdateBook and DeleteBook methods. These two -// methods -// # are mapped to the write_calls metric, with the UpdateBook -// method -// # consuming at twice rate as the DeleteBook method. -// metric_rules: -// - selector: "*" -// metric_costs: -// library.googleapis.com/read_calls: 1 -// - selector: google.example.library.v1.LibraryService.UpdateBook -// metric_costs: -// library.googleapis.com/write_calls: 2 -// - selector: google.example.library.v1.LibraryService.DeleteBook -// metric_costs: -// library.googleapis.com/write_calls: 1 -// -// Corresponding Metric definition: -// -// metrics: -// - name: library.googleapis.com/read_calls -// display_name: Read requests -// metric_kind: DELTA -// value_type: INT64 -// -// - name: library.googleapis.com/write_calls -// display_name: Write requests -// metric_kind: DELTA -// value_type: INT64 -// -// +// service usage. The metric based quota configuration works this way: - +// The service configuration defines a set of metrics. - For API calls, +// the quota.metric_rules maps methods to metrics with corresponding +// costs. - The quota.limits defines limits on the metrics, which will +// be used for quota checks at runtime. An example quota configuration +// in yaml format: quota: limits: - name: apiWriteQpsPerProject metric: +// library.googleapis.com/write_calls unit: "1/min/{project}" # rate +// limit for consumer projects values: STANDARD: 10000 # The metric +// rules bind all methods to the read_calls metric, # except for the +// UpdateBook and DeleteBook methods. These two methods # are mapped to +// the write_calls metric, with the UpdateBook method # consuming at +// twice rate as the DeleteBook method. metric_rules: - selector: "*" +// metric_costs: library.googleapis.com/read_calls: 1 - selector: +// google.example.library.v1.LibraryService.UpdateBook metric_costs: +// library.googleapis.com/write_calls: 2 - selector: +// google.example.library.v1.LibraryService.DeleteBook metric_costs: +// library.googleapis.com/write_calls: 1 Corresponding Metric +// definition: metrics: - name: library.googleapis.com/read_calls +// display_name: Read requests metric_kind: DELTA value_type: INT64 - +// name: library.googleapis.com/write_calls display_name: Write requests +// metric_kind: DELTA value_type: INT64 type Quota struct { // Limits: List of `QuotaLimit` definitions for the service. Limits []*QuotaLimit `json:"limits,omitempty"` // MetricRules: List of `MetricRule` definitions, each one mapping a - // selected method to one - // or more metrics. + // selected method to one or more metrics. MetricRules []*MetricRule `json:"metricRules,omitempty"` // ForceSendFields is a list of field names (e.g. "Limits") to @@ -4624,116 +3648,75 @@ func (s *Quota) MarshalJSON() ([]byte, error) { } // QuotaLimit: `QuotaLimit` defines a specific limit that applies over a -// specified duration -// for a limit type. There can be at most one limit for a duration and -// limit -// type combination defined within a `QuotaGroup`. +// specified duration for a limit type. There can be at most one limit +// for a duration and limit type combination defined within a +// `QuotaGroup`. type QuotaLimit struct { // DefaultLimit: Default number of tokens that can be consumed during - // the specified - // duration. This is the number of tokens assigned when a - // client - // application developer activates the service for his/her - // project. - // - // Specifying a value of 0 will block all requests. This can be used if - // you - // are provisioning quota to selected consumers and blocking - // others. - // Similarly, a value of -1 will indicate an unlimited quota. No - // other - // negative values are allowed. - // - // Used by group-based quotas only. + // the specified duration. This is the number of tokens assigned when a + // client application developer activates the service for his/her + // project. Specifying a value of 0 will block all requests. This can be + // used if you are provisioning quota to selected consumers and blocking + // others. Similarly, a value of -1 will indicate an unlimited quota. No + // other negative values are allowed. Used by group-based quotas only. DefaultLimit int64 `json:"defaultLimit,omitempty,string"` // Description: Optional. User-visible, extended description for this - // quota limit. - // Should be used only when more context is needed to understand this - // limit - // than provided by the limit's display name (see: `display_name`). + // quota limit. Should be used only when more context is needed to + // understand this limit than provided by the limit's display name (see: + // `display_name`). Description string `json:"description,omitempty"` - // DisplayName: User-visible display name for this limit. - // Optional. If not set, the UI will provide a default display name - // based on - // the quota configuration. This field can be used to override the - // default + // DisplayName: User-visible display name for this limit. Optional. If + // not set, the UI will provide a default display name based on the + // quota configuration. This field can be used to override the default // display name generated from the configuration. DisplayName string `json:"displayName,omitempty"` // Duration: Duration of this limit in textual notation. Must be "100s" - // or "1d". - // - // Used by group-based quotas only. + // or "1d". Used by group-based quotas only. Duration string `json:"duration,omitempty"` // FreeTier: Free tier value displayed in the Developers Console for - // this limit. - // The free tier is the number of tokens that will be subtracted from - // the - // billed amount when billing is enabled. - // This field can only be set on a limit with duration "1d", in a - // billable - // group; it is invalid on any other limit. If this field is not set, - // it + // this limit. The free tier is the number of tokens that will be + // subtracted from the billed amount when billing is enabled. This field + // can only be set on a limit with duration "1d", in a billable group; + // it is invalid on any other limit. If this field is not set, it // defaults to 0, indicating that there is no free tier for this - // service. - // - // Used by group-based quotas only. + // service. Used by group-based quotas only. FreeTier int64 `json:"freeTier,omitempty,string"` // MaxLimit: Maximum number of tokens that can be consumed during the - // specified - // duration. Client application developers can override the default - // limit up - // to this maximum. If specified, this value cannot be set to a value - // less - // than the default limit. If not specified, it is set to the default - // limit. - // - // To allow clients to apply overrides with no upper bound, set this to - // -1, - // indicating unlimited maximum quota. - // - // Used by group-based quotas only. + // specified duration. Client application developers can override the + // default limit up to this maximum. If specified, this value cannot be + // set to a value less than the default limit. If not specified, it is + // set to the default limit. To allow clients to apply overrides with no + // upper bound, set this to -1, indicating unlimited maximum quota. Used + // by group-based quotas only. MaxLimit int64 `json:"maxLimit,omitempty,string"` // Metric: The name of the metric this quota limit applies to. The quota - // limits with - // the same metric will be checked together during runtime. The metric - // must be - // defined within the service config. + // limits with the same metric will be checked together during runtime. + // The metric must be defined within the service config. Metric string `json:"metric,omitempty"` - // Name: Name of the quota limit. - // - // The name must be provided, and it must be unique within the service. - // The - // name can only include alphanumeric characters as well as '-'. - // - // The maximum length of the limit name is 64 characters. + // Name: Name of the quota limit. The name must be provided, and it must + // be unique within the service. The name can only include alphanumeric + // characters as well as '-'. The maximum length of the limit name is 64 + // characters. Name string `json:"name,omitempty"` - // Unit: Specify the unit of the quota limit. It uses the same syntax - // as - // Metric.unit. The supported unit kinds are determined by the - // quota - // backend system. - // - // Here are some examples: - // * "1/min/{project}" for quota per minute per project. - // - // Note: the order of unit components is insignificant. - // The "1" at the beginning is required to follow the metric unit - // syntax. + // Unit: Specify the unit of the quota limit. It uses the same syntax as + // Metric.unit. The supported unit kinds are determined by the quota + // backend system. Here are some examples: * "1/min/{project}" for quota + // per minute per project. Note: the order of unit components is + // insignificant. The "1" at the beginning is required to follow the + // metric unit syntax. Unit string `json:"unit,omitempty"` // Values: Tiered limit values. You must specify this as a key:value - // pair, with an - // integer value that is the maximum number of requests allowed for - // the - // specified unit. Currently only STANDARD is supported. + // pair, with an integer value that is the maximum number of requests + // allowed for the specified unit. Currently only STANDARD is supported. Values map[string]string `json:"values,omitempty"` // ForceSendFields is a list of field names (e.g. "DefaultLimit") to @@ -4761,92 +3744,71 @@ func (s *QuotaLimit) MarshalJSON() ([]byte, error) { // QuotaOverride: A quota override type QuotaOverride struct { + // AdminOverrideAncestor: The resource name of the ancestor that + // requested the override. For example: "organizations/12345" or + // "folders/67890". Used by admin overrides only. + AdminOverrideAncestor string `json:"adminOverrideAncestor,omitempty"` + // Dimensions: If this map is nonempty, then this override applies only - // to specific values - // for dimensions defined in the limit unit. - // - // For example, an override on a limit with the unit - // 1/{project}/{region} + // to specific values for dimensions defined in the limit unit. For + // example, an override on a limit with the unit 1/{project}/{region} // could contain an entry with the key "region" and the value - // "us-east-1"; - // the override is only applied to quota consumed in that region. - // - // This map has the following restrictions: - // - // * Keys that are not defined in the limit's unit are not valid - // keys. - // Any string appearing in {brackets} in the unit (besides {project} - // or - // {user}) is a defined key. - // * "project" is not a valid key; the project is already specified - // in - // the parent resource name. - // * "user" is not a valid key; the API does not support quota - // overrides - // that apply only to a specific user. - // * If "region" appears as a key, its value must be a valid Cloud - // region. - // * If "zone" appears as a key, its value must be a valid Cloud - // zone. - // * If any valid key other than "region" or "zone" appears in the - // map, then - // all valid keys other than "region" or "zone" must also appear in - // the - // map. + // "us-east-1"; the override is only applied to quota consumed in that + // region. This map has the following restrictions: * Keys that are not + // defined in the limit's unit are not valid keys. Any string appearing + // in {brackets} in the unit (besides {project} or {user}) is a defined + // key. * "project" is not a valid key; the project is already specified + // in the parent resource name. * "user" is not a valid key; the API + // does not support quota overrides that apply only to a specific user. + // * If "region" appears as a key, its value must be a valid Cloud + // region. * If "zone" appears as a key, its value must be a valid Cloud + // zone. * If any valid key other than "region" or "zone" appears in the + // map, then all valid keys other than "region" or "zone" must also + // appear in the map. Dimensions map[string]string `json:"dimensions,omitempty"` - // Metric: The name of the metric to which this override applies. - // - // An example name would be: - // `compute.googleapis.com/cpus` + // Metric: The name of the metric to which this override applies. An + // example name would be: `compute.googleapis.com/cpus` Metric string `json:"metric,omitempty"` - // Name: The resource name of the override. - // This name is generated by the server when the override is - // created. - // - // Example names would - // be: - // `projects/123/services/compute.googleapis.com/consumerQuotaMetrics - // /compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/adminOverrid - // es/4a3f2c1d` - // `projects/123/services/compute.googleapis.com/consumerQuo - // taMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/con - // sumerOverrides/4a3f2c1d` - // - // The resource name is intended to be opaque and should not be parsed - // for - // its component strings, since its representation could change in the - // future. + // Name: The resource name of the override. This name is generated by + // the server when the override is created. Example names would be: + // `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/com + // pute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/adminOverrides/4 + // a3f2c1d` + // `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/com + // pute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/consumerOverride + // s/4a3f2c1d` The resource name is intended to be opaque and should not + // be parsed for its component strings, since its representation could + // change in the future. Name string `json:"name,omitempty"` - // OverrideValue: The overriding quota limit value. - // Can be any nonnegative integer, or -1 (unlimited quota). + // OverrideValue: The overriding quota limit value. Can be any + // nonnegative integer, or -1 (unlimited quota). OverrideValue int64 `json:"overrideValue,omitempty,string"` - // Unit: The limit unit of the limit to which this override applies. - // - // An example unit would be: - // `1/{project}/{region}` - // Note that `{project}` and `{region}` are not placeholders in this - // example; - // the literal characters `{` and `}` occur in the string. + // Unit: The limit unit of the limit to which this override applies. An + // example unit would be: `1/{project}/{region}` Note that `{project}` + // and `{region}` are not placeholders in this example; the literal + // characters `{` and `}` occur in the string. Unit string `json:"unit,omitempty"` - // ForceSendFields is a list of field names (e.g. "Dimensions") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "AdminOverrideAncestor") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Dimensions") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AdminOverrideAncestor") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -4857,18 +3819,14 @@ func (s *QuotaOverride) MarshalJSON() ([]byte, error) { } // ServiceIdentity: Service identity for a service. This is the identity -// that service producer -// should use to access consumer resources. +// that service producer should use to access consumer resources. type ServiceIdentity struct { // Email: The email address of the service account that a service - // producer would use - // to access consumer resources. + // producer would use to access consumer resources. Email string `json:"email,omitempty"` - // UniqueId: The unique and stable id of the service - // account. - // https://cloud.google.com/iam/reference/rest/v1/projects.servi - // ceAccounts#ServiceAccount + // UniqueId: The unique and stable id of the service account. + // https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts#ServiceAccount UniqueId string `json:"uniqueId,omitempty"` // ForceSendFields is a list of field names (e.g. "Email") to @@ -4895,12 +3853,10 @@ func (s *ServiceIdentity) MarshalJSON() ([]byte, error) { } // SourceContext: `SourceContext` represents information about the -// source of a -// protobuf element, like the file in which it is defined. +// source of a protobuf element, like the file in which it is defined. type SourceContext struct { // FileName: The path-qualified name of the .proto file that contained - // the associated - // protobuf element. For example: + // the associated protobuf element. For example: // "google/protobuf/source_context.proto". FileName string `json:"fileName,omitempty"` @@ -4956,32 +3912,24 @@ func (s *SourceInfo) MarshalJSON() ([]byte, error) { } // Status: The `Status` type defines a logical error model that is -// suitable for -// different programming environments, including REST APIs and RPC APIs. -// It is -// used by [gRPC](https://github.com/grpc). Each `Status` message -// contains -// three pieces of data: error code, error message, and error -// details. -// -// You can find out more about this error model and how to work with it -// in the -// [API Design Guide](https://cloud.google.com/apis/design/errors). +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each +// `Status` message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the [API Design +// Guide](https://cloud.google.com/apis/design/errors). type Status struct { // Code: The status code, which should be an enum value of // google.rpc.Code. Code int64 `json:"code,omitempty"` - // Details: A list of messages that carry the error details. There is a - // common set of - // message types for APIs to use. + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. Details []googleapi.RawMessage `json:"details,omitempty"` // Message: A developer-facing error message, which should be in - // English. Any - // user-facing error message should be localized and sent in - // the - // google.rpc.Status.details field, or localized by the client. + // English. Any user-facing error message should be localized and sent + // in the google.rpc.Status.details field, or localized by the client. Message string `json:"message,omitempty"` // ForceSendFields is a list of field names (e.g. "Code") to @@ -5008,14 +3956,12 @@ func (s *Status) MarshalJSON() ([]byte, error) { } // SystemParameter: Define a parameter's name and location. The -// parameter may be passed as either -// an HTTP header or a URL query parameter, and if both are passed the -// behavior -// is implementation-dependent. +// parameter may be passed as either an HTTP header or a URL query +// parameter, and if both are passed the behavior is +// implementation-dependent. type SystemParameter struct { // HttpHeader: Define the HTTP header name to use for the parameter. It - // is case - // insensitive. + // is case insensitive. HttpHeader string `json:"httpHeader,omitempty"` // Name: Define the name of the parameter, such as "api_key" . It is @@ -5023,8 +3969,7 @@ type SystemParameter struct { Name string `json:"name,omitempty"` // UrlQueryParameter: Define the URL query parameter name to use for the - // parameter. It is case - // sensitive. + // parameter. It is case sensitive. UrlQueryParameter string `json:"urlQueryParameter,omitempty"` // ForceSendFields is a list of field names (e.g. "HttpHeader") to @@ -5051,24 +3996,18 @@ func (s *SystemParameter) MarshalJSON() ([]byte, error) { } // SystemParameterRule: Define a system parameter rule mapping system -// parameter definitions to -// methods. +// parameter definitions to methods. type SystemParameterRule struct { // Parameters: Define parameters. Multiple names may be defined for a - // parameter. - // For a given method call, only one of them should be used. If - // multiple - // names are used the behavior is implementation-dependent. - // If none of the specified names are present the behavior - // is + // parameter. For a given method call, only one of them should be used. + // If multiple names are used the behavior is implementation-dependent. + // If none of the specified names are present the behavior is // parameter-dependent. Parameters []*SystemParameter `json:"parameters,omitempty"` // Selector: Selects the methods to which this rule applies. Use '*' to - // indicate all - // methods in all APIs. - // - // Refer to selector for syntax details. + // indicate all methods in all APIs. Refer to selector for syntax + // details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. "Parameters") to @@ -5094,49 +4033,23 @@ func (s *SystemParameterRule) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SystemParameters: ### System parameter configuration -// -// A system parameter is a special kind of parameter defined by the -// API -// system, not by an individual API. It is typically mapped to an HTTP -// header +// SystemParameters: ### System parameter configuration A system +// parameter is a special kind of parameter defined by the API system, +// not by an individual API. It is typically mapped to an HTTP header // and/or a URL query parameter. This configuration specifies which -// methods -// change the names of the system parameters. +// methods change the names of the system parameters. type SystemParameters struct { - // Rules: Define system parameters. - // - // The parameters defined here will override the default - // parameters - // implemented by the system. If this field is missing from the - // service - // config, default system parameters will be used. Default system - // parameters - // and names is implementation-dependent. - // - // Example: define api key for all methods - // - // system_parameters - // rules: - // - selector: "*" - // parameters: - // - name: api_key - // url_query_parameter: api_key - // - // - // Example: define 2 api key names for a specific method. - // - // system_parameters - // rules: - // - selector: "/ListShelves" - // parameters: - // - name: api_key - // http_header: Api-Key1 - // - name: api_key - // http_header: Api-Key2 - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // Rules: Define system parameters. The parameters defined here will + // override the default parameters implemented by the system. If this + // field is missing from the service config, default system parameters + // will be used. Default system parameters and names is + // implementation-dependent. Example: define api key for all methods + // system_parameters rules: - selector: "*" parameters: - name: api_key + // url_query_parameter: api_key Example: define 2 api key names for a + // specific method. system_parameters rules: - selector: "/ListShelves" + // parameters: - name: api_key http_header: Api-Key1 - name: api_key + // http_header: Api-Key2 **NOTE:** All service configuration rules + // follow "last one wins" order. Rules []*SystemParameterRule `json:"rules,omitempty"` // ForceSendFields is a list of field names (e.g. "Rules") to @@ -5213,29 +4126,20 @@ func (s *Type) MarshalJSON() ([]byte, error) { // Usage: Configuration controlling usage of a service. type Usage struct { // ProducerNotificationChannel: The full resource name of a channel used - // for sending notifications to the - // service producer. - // - // Google Service Management currently only supports - // [Google Cloud Pub/Sub](https://cloud.google.com/pubsub) as a - // notification - // channel. To use Google Cloud Pub/Sub as the channel, this must be the - // name - // of a Cloud Pub/Sub topic that uses the Cloud Pub/Sub topic name - // format + // for sending notifications to the service producer. Google Service + // Management currently only supports [Google Cloud + // Pub/Sub](https://cloud.google.com/pubsub) as a notification channel. + // To use Google Cloud Pub/Sub as the channel, this must be the name of + // a Cloud Pub/Sub topic that uses the Cloud Pub/Sub topic name format // documented in https://cloud.google.com/pubsub/docs/overview. ProducerNotificationChannel string `json:"producerNotificationChannel,omitempty"` // Requirements: Requirements that must be satisfied before a consumer - // project can use the - // service. Each requirement is of the form - // /; - // for example 'serviceusage.googleapis.com/billing-enabled'. + // project can use the service. Each requirement is of the form /; for + // example 'serviceusage.googleapis.com/billing-enabled'. Requirements []string `json:"requirements,omitempty"` - // Rules: A list of usage rules that apply to individual API - // methods. - // + // Rules: A list of usage rules that apply to individual API methods. // **NOTE:** All service configuration rules follow "last one wins" // order. Rules []*UsageRule `json:"rules,omitempty"` @@ -5269,57 +4173,34 @@ func (s *Usage) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UsageRule: Usage configuration rules for the service. -// -// NOTE: Under development. -// -// -// Use this rule to configure unregistered calls for the service. -// Unregistered -// calls are calls that do not contain consumer project -// identity. -// (Example: calls that do not contain an API key). -// By default, API methods do not allow unregistered calls, and each -// method call -// must be identified by a consumer project identity. Use this rule -// to -// allow/disallow unregistered calls. -// -// Example of an API that wants to allow unregistered calls for entire -// service. -// -// usage: -// rules: -// - selector: "*" -// allow_unregistered_calls: true -// -// Example of a method that wants to allow unregistered calls. -// -// usage: -// rules: -// - selector: +// UsageRule: Usage configuration rules for the service. NOTE: Under +// development. Use this rule to configure unregistered calls for the +// service. Unregistered calls are calls that do not contain consumer +// project identity. (Example: calls that do not contain an API key). By +// default, API methods do not allow unregistered calls, and each method +// call must be identified by a consumer project identity. Use this rule +// to allow/disallow unregistered calls. Example of an API that wants to +// allow unregistered calls for entire service. usage: rules: - +// selector: "*" allow_unregistered_calls: true Example of a method that +// wants to allow unregistered calls. usage: rules: - selector: // "google.example.library.v1.LibraryService.CreateBook" -// allow_unregistered_calls: true +// allow_unregistered_calls: true type UsageRule struct { // AllowUnregisteredCalls: If true, the selected method allows - // unregistered calls, e.g. calls - // that don't identify any user or application. + // unregistered calls, e.g. calls that don't identify any user or + // application. AllowUnregisteredCalls bool `json:"allowUnregisteredCalls,omitempty"` // Selector: Selects the methods to which this rule applies. Use '*' to - // indicate all - // methods in all APIs. - // - // Refer to selector for syntax details. + // indicate all methods in all APIs. Refer to selector for syntax + // details. Selector string `json:"selector,omitempty"` // SkipServiceControl: If true, the selected method should skip service - // control and the control - // plane features, such as quota and billing, will not be - // available. - // This flag is used by Google Cloud Endpoints to bypass checks for - // internal - // methods, such as service health check methods. + // control and the control plane features, such as quota and billing, + // will not be available. This flag is used by Google Cloud Endpoints to + // bypass checks for internal methods, such as service health check + // methods. SkipServiceControl bool `json:"skipServiceControl,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -5359,23 +4240,15 @@ type OperationsCancelCall struct { } // Cancel: Starts asynchronous cancellation on a long-running operation. -// The server -// makes a best effort to cancel the operation, but success is -// not -// guaranteed. If the server doesn't support this method, it -// returns -// `google.rpc.Code.UNIMPLEMENTED`. Clients can -// use -// Operations.GetOperation or -// other methods to check whether the cancellation succeeded or whether -// the -// operation completed despite cancellation. On successful -// cancellation, -// the operation is not deleted; instead, it becomes an operation -// with -// an Operation.error value with a google.rpc.Status.code of -// 1, -// corresponding to `Code.CANCELLED`. +// The server makes a best effort to cancel the operation, but success +// is not guaranteed. If the server doesn't support this method, it +// returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use +// Operations.GetOperation or other methods to check whether the +// cancellation succeeded or whether the operation completed despite +// cancellation. On successful cancellation, the operation is not +// deleted; instead, it becomes an operation with an Operation.error +// value with a google.rpc.Status.code of 1, corresponding to +// `Code.CANCELLED`. func (r *OperationsService) Cancel(name string, canceloperationrequest *CancelOperationRequest) *OperationsCancelCall { c := &OperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5410,7 +4283,7 @@ func (c *OperationsCancelCall) Header() http.Header { func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5474,7 +4347,7 @@ func (c *OperationsCancelCall) Do(opts ...googleapi.CallOption) (*Empty, error) } return ret, nil // { - // "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + // "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", // "flatPath": "v1/operations/{operationsId}:cancel", // "httpMethod": "POST", // "id": "serviceusage.operations.cancel", @@ -5516,12 +4389,9 @@ type OperationsDeleteCall struct { } // Delete: Deletes a long-running operation. This method indicates that -// the client is -// no longer interested in the operation result. It does not cancel -// the -// operation. If the server doesn't support this method, it -// returns -// `google.rpc.Code.UNIMPLEMENTED`. +// the client is no longer interested in the operation result. It does +// not cancel the operation. If the server doesn't support this method, +// it returns `google.rpc.Code.UNIMPLEMENTED`. func (r *OperationsService) Delete(name string) *OperationsDeleteCall { c := &OperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5555,7 +4425,7 @@ func (c *OperationsDeleteCall) Header() http.Header { func (c *OperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5614,7 +4484,7 @@ func (c *OperationsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) } return ret, nil // { - // "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.", + // "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", // "flatPath": "v1/operations/{operationsId}", // "httpMethod": "DELETE", // "id": "serviceusage.operations.delete", @@ -5653,11 +4523,9 @@ type OperationsGetCall struct { header_ http.Header } -// Get: Gets the latest state of a long-running operation. Clients can -// use this -// method to poll the operation result at intervals as recommended by -// the API -// service. +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. func (r *OperationsService) Get(name string) *OperationsGetCall { c := &OperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5701,7 +4569,7 @@ func (c *OperationsGetCall) Header() http.Header { func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5763,7 +4631,7 @@ func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", // "flatPath": "v1/operations/{operationsId}", // "httpMethod": "GET", // "id": "serviceusage.operations.get", @@ -5802,22 +4670,15 @@ type OperationsListCall struct { } // List: Lists operations that match the specified filter in the -// request. If the -// server doesn't support this method, it returns -// `UNIMPLEMENTED`. -// -// NOTE: the `name` binding allows API services to override the -// binding -// to use different resource name schemes, such as `users/*/operations`. -// To -// override the binding, API services can add a binding such -// as -// "/v1/{name=users/*}/operations" to their service configuration. -// For backwards compatibility, the default name includes the -// operations -// collection id, however overriding users must ensure the name -// binding -// is the parent resource, without the operations collection id. +// request. If the server doesn't support this method, it returns +// `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to +// override the binding to use different resource name schemes, such as +// `users/*/operations`. To override the binding, API services can add a +// binding such as "/v1/{name=users/*}/operations" to their service +// configuration. For backwards compatibility, the default name includes +// the operations collection id, however overriding users must ensure +// the name binding is the parent resource, without the operations +// collection id. func (r *OperationsService) List() *OperationsListCall { c := &OperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} return c @@ -5888,7 +4749,7 @@ func (c *OperationsListCall) Header() http.Header { func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5947,7 +4808,7 @@ func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsRe } return ret, nil // { - // "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", + // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", // "flatPath": "v1/operations", // "httpMethod": "GET", // "id": "serviceusage.operations.list", @@ -6020,10 +4881,9 @@ type ServicesBatchEnableCall struct { } // BatchEnable: Enable multiple services on a project. The operation is -// atomic: if enabling -// any service fails, then the entire batch fails, and no state changes -// occur. -// To enable a single service, use the `EnableService` method instead. +// atomic: if enabling any service fails, then the entire batch fails, +// and no state changes occur. To enable a single service, use the +// `EnableService` method instead. func (r *ServicesService) BatchEnable(parent string, batchenableservicesrequest *BatchEnableServicesRequest) *ServicesBatchEnableCall { c := &ServicesBatchEnableCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -6058,7 +4918,7 @@ func (c *ServicesBatchEnableCall) Header() http.Header { func (c *ServicesBatchEnableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6122,7 +4982,7 @@ func (c *ServicesBatchEnableCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Enable multiple services on a project. The operation is atomic: if enabling\nany service fails, then the entire batch fails, and no state changes occur.\nTo enable a single service, use the `EnableService` method instead.", + // "description": "Enable multiple services on a project. The operation is atomic: if enabling any service fails, then the entire batch fails, and no state changes occur. To enable a single service, use the `EnableService` method instead.", // "flatPath": "v1/{v1Id}/{v1Id1}/services:batchEnable", // "httpMethod": "POST", // "id": "serviceusage.services.batchEnable", @@ -6131,7 +4991,7 @@ func (c *ServicesBatchEnableCall) Do(opts ...googleapi.CallOption) (*Operation, // ], // "parameters": { // "parent": { - // "description": "Parent to enable services on.\n\nAn example name would be:\n`projects/123` where `123` is the project number.\n\nThe `BatchEnableServices` method currently only supports projects.", + // "description": "Parent to enable services on. An example name would be: `projects/123` where `123` is the project number. The `BatchEnableServices` method currently only supports projects.", // "location": "path", // "pattern": "^[^/]+/[^/]+$", // "required": true, @@ -6165,8 +5025,7 @@ type ServicesBatchGetCall struct { } // BatchGet: Returns the service configurations and enabled states for a -// given list of -// services. +// given list of services. func (r *ServicesService) BatchGet(parent string) *ServicesBatchGetCall { c := &ServicesBatchGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -6174,14 +5033,10 @@ func (r *ServicesService) BatchGet(parent string) *ServicesBatchGetCall { } // Names sets the optional parameter "names": Names of the services to -// retrieve. -// -// An example name would -// be: +// retrieve. An example name would be: // `projects/123/services/serviceusage.googleapis.com` where `123` is -// the -// project number. -// A single request can get a maximum of 30 services at a time. +// the project number. A single request can get a maximum of 30 services +// at a time. func (c *ServicesBatchGetCall) Names(names ...string) *ServicesBatchGetCall { c.urlParams_.SetMulti("names", append([]string{}, names...)) return c @@ -6224,7 +5079,7 @@ func (c *ServicesBatchGetCall) Header() http.Header { func (c *ServicesBatchGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6286,7 +5141,7 @@ func (c *ServicesBatchGetCall) Do(opts ...googleapi.CallOption) (*BatchGetServic } return ret, nil // { - // "description": "Returns the service configurations and enabled states for a given list of\nservices.", + // "description": "Returns the service configurations and enabled states for a given list of services.", // "flatPath": "v1/{v1Id}/{v1Id1}/services:batchGet", // "httpMethod": "GET", // "id": "serviceusage.services.batchGet", @@ -6295,13 +5150,13 @@ func (c *ServicesBatchGetCall) Do(opts ...googleapi.CallOption) (*BatchGetServic // ], // "parameters": { // "names": { - // "description": "Names of the services to retrieve.\n\nAn example name would be:\n`projects/123/services/serviceusage.googleapis.com` where `123` is the\nproject number.\nA single request can get a maximum of 30 services at a time.", + // "description": "Names of the services to retrieve. An example name would be: `projects/123/services/serviceusage.googleapis.com` where `123` is the project number. A single request can get a maximum of 30 services at a time.", // "location": "query", // "repeated": true, // "type": "string" // }, // "parent": { - // "description": "Parent to retrieve services from.\nIf this is set, the parent of all of the services specified in `names` must\nmatch this field. An example name would be: `projects/123` where `123` is\nthe project number. The `BatchGetServices` method currently only supports\nprojects.", + // "description": "Parent to retrieve services from. If this is set, the parent of all of the services specified in `names` must match this field. An example name would be: `projects/123` where `123` is the project number. The `BatchGetServices` method currently only supports projects.", // "location": "path", // "pattern": "^[^/]+/[^/]+$", // "required": true, @@ -6332,16 +5187,11 @@ type ServicesDisableCall struct { } // Disable: Disable a service so that it can no longer be used with a -// project. -// This prevents unintended usage that may cause unexpected -// billing -// charges or security leaks. -// -// It is not valid to call the disable method on a service that is -// not -// currently enabled. Callers will receive a `FAILED_PRECONDITION` -// status if -// the target service is not currently enabled. +// project. This prevents unintended usage that may cause unexpected +// billing charges or security leaks. It is not valid to call the +// disable method on a service that is not currently enabled. Callers +// will receive a `FAILED_PRECONDITION` status if the target service is +// not currently enabled. func (r *ServicesService) Disable(name string, disableservicerequest *DisableServiceRequest) *ServicesDisableCall { c := &ServicesDisableCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -6376,7 +5226,7 @@ func (c *ServicesDisableCall) Header() http.Header { func (c *ServicesDisableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6440,7 +5290,7 @@ func (c *ServicesDisableCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Disable a service so that it can no longer be used with a project.\nThis prevents unintended usage that may cause unexpected billing\ncharges or security leaks.\n\nIt is not valid to call the disable method on a service that is not\ncurrently enabled. Callers will receive a `FAILED_PRECONDITION` status if\nthe target service is not currently enabled.", + // "description": "Disable a service so that it can no longer be used with a project. This prevents unintended usage that may cause unexpected billing charges or security leaks. It is not valid to call the disable method on a service that is not currently enabled. Callers will receive a `FAILED_PRECONDITION` status if the target service is not currently enabled.", // "flatPath": "v1/{v1Id}/{v1Id1}/services/{servicesId}:disable", // "httpMethod": "POST", // "id": "serviceusage.services.disable", @@ -6449,7 +5299,7 @@ func (c *ServicesDisableCall) Do(opts ...googleapi.CallOption) (*Operation, erro // ], // "parameters": { // "name": { - // "description": "Name of the consumer and service to disable the service on.\n\nThe enable and disable methods currently only support projects.\n\nAn example name would be:\n`projects/123/services/serviceusage.googleapis.com` where `123` is the\nproject number.", + // "description": "Name of the consumer and service to disable the service on. The enable and disable methods currently only support projects. An example name would be: `projects/123/services/serviceusage.googleapis.com` where `123` is the project number.", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+$", // "required": true, @@ -6517,7 +5367,7 @@ func (c *ServicesEnableCall) Header() http.Header { func (c *ServicesEnableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6590,7 +5440,7 @@ func (c *ServicesEnableCall) Do(opts ...googleapi.CallOption) (*Operation, error // ], // "parameters": { // "name": { - // "description": "Name of the consumer and service to enable the service on.\n\nThe `EnableService` and `DisableService` methods currently only support\nprojects.\n\nEnabling a service requires that the service is public or is shared with\nthe user enabling the service.\n\nAn example name would be:\n`projects/123/services/serviceusage.googleapis.com` where `123` is the\nproject number.", + // "description": "Name of the consumer and service to enable the service on. The `EnableService` and `DisableService` methods currently only support projects. Enabling a service requires that the service is public or is shared with the user enabling the service. An example name would be: `projects/123/services/serviceusage.googleapis.com` where `123` is the project number.", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+$", // "required": true, @@ -6668,7 +5518,7 @@ func (c *ServicesGetCall) Header() http.Header { func (c *ServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6739,7 +5589,7 @@ func (c *ServicesGetCall) Do(opts ...googleapi.CallOption) (*GoogleApiServiceusa // ], // "parameters": { // "name": { - // "description": "Name of the consumer and service to get the `ConsumerState` for.\n\nAn example name would be:\n`projects/123/services/serviceusage.googleapis.com` where `123` is the\nproject number.", + // "description": "Name of the consumer and service to get the `ConsumerState` for. An example name would be: `projects/123/services/serviceusage.googleapis.com` where `123` is the project number.", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+$", // "required": true, @@ -6770,17 +5620,16 @@ type ServicesListCall struct { } // List: List all services available to the specified project, and the -// current -// state of those services with respect to the project. The list -// includes -// all public services, all services for which the calling user has -// the -// `servicemanagement.services.bind` permission, and all services that -// have -// already been enabled on the project. The list can be filtered to -// only include services in a specific state, for example to only -// include -// services enabled on the project. +// current state of those services with respect to the project. The list +// includes all public services, all services for which the calling user +// has the `servicemanagement.services.bind` permission, and all +// services that have already been enabled on the project. The list can +// be filtered to only include services in a specific state, for example +// to only include services enabled on the project. WARNING: If you need +// to query enabled services frequently or across an organization, you +// should use [Cloud Asset Inventory +// API](https://cloud.google.com/asset-inventory/docs/apis), which +// provides higher throughput and richer filtering capability. func (r *ServicesService) List(parent string) *ServicesListCall { c := &ServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -6788,25 +5637,24 @@ func (r *ServicesService) List(parent string) *ServicesListCall { } // Filter sets the optional parameter "filter": Only list services that -// conform to the given filter. -// The allowed filter strings are `state:ENABLED` and `state:DISABLED`. +// conform to the given filter. The allowed filter strings are +// `state:ENABLED` and `state:DISABLED`. func (c *ServicesListCall) Filter(filter string) *ServicesListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": Requested size of -// the next page of data. -// Requested page size cannot exceed 200. -// If not set, the default page size is 50. +// the next page of data. Requested page size cannot exceed 200. If not +// set, the default page size is 50. func (c *ServicesListCall) PageSize(pageSize int64) *ServicesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": Token identifying -// which result to start with, which is returned by a -// previous list call. +// which result to start with, which is returned by a previous list +// call. func (c *ServicesListCall) PageToken(pageToken string) *ServicesListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -6849,7 +5697,7 @@ func (c *ServicesListCall) Header() http.Header { func (c *ServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6911,7 +5759,7 @@ func (c *ServicesListCall) Do(opts ...googleapi.CallOption) (*ListServicesRespon } return ret, nil // { - // "description": "List all services available to the specified project, and the current\nstate of those services with respect to the project. The list includes\nall public services, all services for which the calling user has the\n`servicemanagement.services.bind` permission, and all services that have\nalready been enabled on the project. The list can be filtered to\nonly include services in a specific state, for example to only include\nservices enabled on the project.", + // "description": "List all services available to the specified project, and the current state of those services with respect to the project. The list includes all public services, all services for which the calling user has the `servicemanagement.services.bind` permission, and all services that have already been enabled on the project. The list can be filtered to only include services in a specific state, for example to only include services enabled on the project. WARNING: If you need to query enabled services frequently or across an organization, you should use [Cloud Asset Inventory API](https://cloud.google.com/asset-inventory/docs/apis), which provides higher throughput and richer filtering capability.", // "flatPath": "v1/{v1Id}/{v1Id1}/services", // "httpMethod": "GET", // "id": "serviceusage.services.list", @@ -6920,23 +5768,23 @@ func (c *ServicesListCall) Do(opts ...googleapi.CallOption) (*ListServicesRespon // ], // "parameters": { // "filter": { - // "description": "Only list services that conform to the given filter.\nThe allowed filter strings are `state:ENABLED` and `state:DISABLED`.", + // "description": "Only list services that conform to the given filter. The allowed filter strings are `state:ENABLED` and `state:DISABLED`.", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "Requested size of the next page of data.\nRequested page size cannot exceed 200.\n If not set, the default page size is 50.", + // "description": "Requested size of the next page of data. Requested page size cannot exceed 200. If not set, the default page size is 50.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "Token identifying which result to start with, which is returned by a\nprevious list call.", + // "description": "Token identifying which result to start with, which is returned by a previous list call.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Parent to search for services on.\n\nAn example name would be:\n`projects/123` where `123` is the project number.", + // "description": "Parent to search for services on. An example name would be: `projects/123` where `123` is the project number.", // "location": "path", // "pattern": "^[^/]+/[^/]+$", // "required": true, diff --git a/vendor/google.golang.org/api/serviceusage/v1beta1/serviceusage-api.json b/vendor/google.golang.org/api/serviceusage/v1beta1/serviceusage-api.json index 390124c265e..a1fd019b8a4 100644 --- a/vendor/google.golang.org/api/serviceusage/v1beta1/serviceusage-api.json +++ b/vendor/google.golang.org/api/serviceusage/v1beta1/serviceusage-api.json @@ -114,7 +114,7 @@ "operations": { "methods": { "get": { - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", "flatPath": "v1beta1/operations/{operationsId}", "httpMethod": "GET", "id": "serviceusage.operations.get", @@ -140,7 +140,7 @@ ] }, "list": { - "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", "flatPath": "v1beta1/operations", "httpMethod": "GET", "id": "serviceusage.operations.list", @@ -182,7 +182,7 @@ "services": { "methods": { "batchEnable": { - "description": "Enable multiple services on a project. The operation is atomic: if enabling\nany service fails, then the entire batch fails, and no state changes occur.\n\nOperation\u003cresponse: google.protobuf.Empty\u003e", + "description": "Enable multiple services on a project. The operation is atomic: if enabling any service fails, then the entire batch fails, and no state changes occur. Operation", "flatPath": "v1beta1/{v1beta1Id}/{v1beta1Id1}/services:batchEnable", "httpMethod": "POST", "id": "serviceusage.services.batchEnable", @@ -191,7 +191,7 @@ ], "parameters": { "parent": { - "description": "Parent to enable services on.\n\nAn example name would be:\n`projects/123`\nwhere `123` is the project number (not project ID).\n\nThe `BatchEnableServices` method currently only supports projects.", + "description": "Parent to enable services on. An example name would be: `projects/123` where `123` is the project number (not project ID). The `BatchEnableServices` method currently only supports projects.", "location": "path", "pattern": "^[^/]+/[^/]+$", "required": true, @@ -211,7 +211,7 @@ ] }, "disable": { - "description": "Disable a service so that it can no longer be used with a project.\nThis prevents unintended usage that may cause unexpected billing\ncharges or security leaks.\n\nIt is not valid to call the disable method on a service that is not\ncurrently enabled. Callers will receive a `FAILED_PRECONDITION` status if\nthe target service is not currently enabled.\n\nOperation\u003cresponse: google.protobuf.Empty\u003e", + "description": "Disable a service so that it can no longer be used with a project. This prevents unintended usage that may cause unexpected billing charges or security leaks. It is not valid to call the disable method on a service that is not currently enabled. Callers will receive a `FAILED_PRECONDITION` status if the target service is not currently enabled. Operation", "flatPath": "v1beta1/{v1beta1Id}/{v1beta1Id1}/services/{servicesId}:disable", "httpMethod": "POST", "id": "serviceusage.services.disable", @@ -220,7 +220,7 @@ ], "parameters": { "name": { - "description": "Name of the consumer and service to disable the service on.\n\nThe enable and disable methods currently only support projects.\n\nAn example name would be:\n`projects/123/services/serviceusage.googleapis.com`\nwhere `123` is the project number (not project ID).", + "description": "Name of the consumer and service to disable the service on. The enable and disable methods currently only support projects. An example name would be: `projects/123/services/serviceusage.googleapis.com` where `123` is the project number (not project ID).", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+$", "required": true, @@ -240,7 +240,7 @@ ] }, "enable": { - "description": "Enable a service so that it can be used with a project.\n\nOperation\u003cresponse: google.protobuf.Empty\u003e", + "description": "Enable a service so that it can be used with a project. Operation", "flatPath": "v1beta1/{v1beta1Id}/{v1beta1Id1}/services/{servicesId}:enable", "httpMethod": "POST", "id": "serviceusage.services.enable", @@ -249,7 +249,7 @@ ], "parameters": { "name": { - "description": "Name of the consumer and service to enable the service on.\n\nThe `EnableService` and `DisableService` methods currently only support\nprojects.\n\nEnabling a service requires that the service is public or is shared with\nthe user enabling the service.\n\nAn example name would be:\n`projects/123/services/serviceusage.googleapis.com`\nwhere `123` is the project number (not project ID).", + "description": "Name of the consumer and service to enable the service on. The `EnableService` and `DisableService` methods currently only support projects. Enabling a service requires that the service is public or is shared with the user enabling the service. An example name would be: `projects/123/services/serviceusage.googleapis.com` where `123` is the project number (not project ID).", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+$", "required": true, @@ -268,6 +268,32 @@ "https://www.googleapis.com/auth/service.management" ] }, + "generateServiceIdentity": { + "description": "Generate service identity for service.", + "flatPath": "v1beta1/{v1beta1Id}/{v1beta1Id1}/services/{servicesId}:generateServiceIdentity", + "httpMethod": "POST", + "id": "serviceusage.services.generateServiceIdentity", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Name of the consumer and service to generate an identity for. The `GenerateServiceIdentity` methods currently only support projects. An example name would be: `projects/123/services/example.googleapis.com` where `123` is the project number.", + "location": "path", + "pattern": "^[^/]+/[^/]+/services/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1beta1/{+parent}:generateServiceIdentity", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" + ] + }, "get": { "description": "Returns the service configuration and enabled state for a given service.", "flatPath": "v1beta1/{v1beta1Id}/{v1beta1Id1}/services/{servicesId}", @@ -278,7 +304,7 @@ ], "parameters": { "name": { - "description": "Name of the consumer and service to get the `ConsumerState` for.\n\nAn example name would be:\n`projects/123/services/serviceusage.googleapis.com`\nwhere `123` is the project number (not project ID).", + "description": "Name of the consumer and service to get the `ConsumerState` for. An example name would be: `projects/123/services/serviceusage.googleapis.com` where `123` is the project number (not project ID).", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+$", "required": true, @@ -295,7 +321,7 @@ ] }, "list": { - "description": "List all services available to the specified project, and the current\nstate of those services with respect to the project. The list includes\nall public services, all services for which the calling user has the\n`servicemanagement.services.bind` permission, and all services that have\nalready been enabled on the project. The list can be filtered to\nonly include services in a specific state, for example to only include\nservices enabled on the project.", + "description": "List all services available to the specified project, and the current state of those services with respect to the project. The list includes all public services, all services for which the calling user has the `servicemanagement.services.bind` permission, and all services that have already been enabled on the project. The list can be filtered to only include services in a specific state, for example to only include services enabled on the project.", "flatPath": "v1beta1/{v1beta1Id}/{v1beta1Id1}/services", "httpMethod": "GET", "id": "serviceusage.services.list", @@ -304,23 +330,23 @@ ], "parameters": { "filter": { - "description": "Only list services that conform to the given filter.\nThe allowed filter strings are `state:ENABLED` and `state:DISABLED`.", + "description": "Only list services that conform to the given filter. The allowed filter strings are `state:ENABLED` and `state:DISABLED`.", "location": "query", "type": "string" }, "pageSize": { - "description": "Requested size of the next page of data.\nRequested page size cannot exceed 200.\n If not set, the default page size is 50.", + "description": "Requested size of the next page of data. Requested page size cannot exceed 200. If not set, the default page size is 50.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Token identifying which result to start with, which is returned by a\nprevious list call.", + "description": "Token identifying which result to start with, which is returned by a previous list call.", "location": "query", "type": "string" }, "parent": { - "description": "Parent to search for services on.\n\nAn example name would be:\n`projects/123`\nwhere `123` is the project number (not project ID).", + "description": "Parent to search for services on. An example name would be: `projects/123` where `123` is the project number (not project ID).", "location": "path", "pattern": "^[^/]+/[^/]+$", "required": true, @@ -350,7 +376,7 @@ ], "parameters": { "name": { - "description": "The resource name of the quota limit.\n\nAn example name would be:\nprojects/123/services/serviceusage.googleapis.com/quotas/metrics/serviceusage.googleapis.com%2Fmutate_requests", + "description": "The resource name of the quota limit. An example name would be: projects/123/services/serviceusage.googleapis.com/quotas/metrics/serviceusage.googleapis.com%2Fmutate_requests", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+/consumerQuotaMetrics/[^/]+$", "required": true, @@ -363,6 +389,11 @@ "BASIC", "FULL" ], + "enumDescriptions": [ + "No quota view specified. Requests that do not specify a quota view will typically default to the BASIC view.", + "Only buckets with overrides are shown in the response.", + "Include per-location buckets even if they do not have overrides. When the view is FULL, and a limit has regional or zonal quota, the limit will include buckets for all regions or zones that could support overrides, even if none are currently present. In some cases this will cause the response to become very large; callers that do not need this extra information should use the BASIC view instead." + ], "location": "query", "type": "string" } @@ -376,8 +407,37 @@ "https://www.googleapis.com/auth/cloud-platform.read-only" ] }, + "importConsumerOverrides": { + "description": "Create or update multiple consumer overrides atomically, all on the same consumer, but on many different metrics or limits. The name field in the quota override message should not be set.", + "flatPath": "v1beta1/{v1beta1Id}/{v1beta1Id1}/services/{servicesId}/consumerQuotaMetrics:importConsumerOverrides", + "httpMethod": "POST", + "id": "serviceusage.services.consumerQuotaMetrics.importConsumerOverrides", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "The resource name of the consumer. An example name would be: `projects/123/services/compute.googleapis.com`", + "location": "path", + "pattern": "^[^/]+/[^/]+/services/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1beta1/{+parent}/consumerQuotaMetrics:importConsumerOverrides", + "request": { + "$ref": "ImportConsumerOverridesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" + ] + }, "list": { - "description": "Retrieves a summary of all quota information visible to the service\nconsumer, organized by service metric. Each metric includes information\nabout all of its defined limits. Each limit includes the limit\nconfiguration (quota unit, preciseness, default value), the current\neffective limit value, and all of the overrides applied to the limit.", + "description": "Retrieves a summary of all quota information visible to the service consumer, organized by service metric. Each metric includes information about all of its defined limits. Each limit includes the limit configuration (quota unit, preciseness, default value), the current effective limit value, and all of the overrides applied to the limit.", "flatPath": "v1beta1/{v1beta1Id}/{v1beta1Id1}/services/{servicesId}/consumerQuotaMetrics", "httpMethod": "GET", "id": "serviceusage.services.consumerQuotaMetrics.list", @@ -392,12 +452,12 @@ "type": "integer" }, "pageToken": { - "description": "Token identifying which result to start with; returned by a previous list\ncall.", + "description": "Token identifying which result to start with; returned by a previous list call.", "location": "query", "type": "string" }, "parent": { - "description": "Parent of the quotas resource.\n\nSome example names would be:\nprojects/123/services/serviceconsumermanagement.googleapis.com\nfolders/345/services/serviceconsumermanagement.googleapis.com\norganizations/456/services/serviceconsumermanagement.googleapis.com", + "description": "Parent of the quotas resource. Some example names would be: projects/123/services/serviceconsumermanagement.googleapis.com folders/345/services/serviceconsumermanagement.googleapis.com organizations/456/services/serviceconsumermanagement.googleapis.com", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+$", "required": true, @@ -410,6 +470,11 @@ "BASIC", "FULL" ], + "enumDescriptions": [ + "No quota view specified. Requests that do not specify a quota view will typically default to the BASIC view.", + "Only buckets with overrides are shown in the response.", + "Include per-location buckets even if they do not have overrides. When the view is FULL, and a limit has regional or zonal quota, the limit will include buckets for all regions or zones that could support overrides, even if none are currently present. In some cases this will cause the response to become very large; callers that do not need this extra information should use the BASIC view instead." + ], "location": "query", "type": "string" } @@ -437,7 +502,7 @@ ], "parameters": { "name": { - "description": "The resource name of the quota limit.\n\nUse the quota limit resource name returned by previous\nListConsumerQuotaMetrics and GetConsumerQuotaMetric API calls.", + "description": "The resource name of the quota limit. Use the quota limit resource name returned by previous ListConsumerQuotaMetrics and GetConsumerQuotaMetric API calls.", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+/consumerQuotaMetrics/[^/]+/limits/[^/]+$", "required": true, @@ -450,6 +515,11 @@ "BASIC", "FULL" ], + "enumDescriptions": [ + "No quota view specified. Requests that do not specify a quota view will typically default to the BASIC view.", + "Only buckets with overrides are shown in the response.", + "Include per-location buckets even if they do not have overrides. When the view is FULL, and a limit has regional or zonal quota, the limit will include buckets for all regions or zones that could support overrides, even if none are currently present. In some cases this will cause the response to become very large; callers that do not need this extra information should use the BASIC view instead." + ], "location": "query", "type": "string" } @@ -468,7 +538,7 @@ "adminOverrides": { "methods": { "create": { - "description": "Creates an admin override.\nAn admin override is applied by an administrator of a parent folder or\nparent organization of the consumer receiving the override. An admin\noverride is intended to limit the amount of quota the consumer can use out\nof the total quota pool allocated to all children of the folder or\norganization.", + "description": "Creates an admin override. An admin override is applied by an administrator of a parent folder or parent organization of the consumer receiving the override. An admin override is intended to limit the amount of quota the consumer can use out of the total quota pool allocated to all children of the folder or organization.", "flatPath": "v1beta1/{v1beta1Id}/{v1beta1Id1}/services/{servicesId}/consumerQuotaMetrics/{consumerQuotaMetricsId}/limits/{limitsId}/adminOverrides", "httpMethod": "POST", "id": "serviceusage.services.consumerQuotaMetrics.limits.adminOverrides.create", @@ -477,12 +547,12 @@ ], "parameters": { "force": { - "description": "Whether to force the creation of the quota override.\nIf creating an override would cause the effective quota for the consumer to\ndecrease by more than 10 percent, the call is rejected, as a safety measure\nto avoid accidentally decreasing quota too quickly. Setting the force\nparameter to true ignores this restriction.", + "description": "Whether to force the creation of the quota override. If creating an override would cause the effective quota for the consumer to decrease by more than 10 percent, the call is rejected, as a safety measure to avoid accidentally decreasing quota too quickly. Setting the force parameter to true ignores this restriction.", "location": "query", "type": "boolean" }, "parent": { - "description": "The resource name of the parent quota limit, returned by a\nListConsumerQuotaMetrics or GetConsumerQuotaMetric call.\n\nAn example name would be:\n`projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion`", + "description": "The resource name of the parent quota limit, returned by a ListConsumerQuotaMetrics or GetConsumerQuotaMetric call. An example name would be: `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion`", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+/consumerQuotaMetrics/[^/]+/limits/[^/]+$", "required": true, @@ -511,12 +581,12 @@ ], "parameters": { "force": { - "description": "Whether to force the deletion of the quota override.\nIf deleting an override would cause the effective quota for the consumer to\ndecrease by more than 10 percent, the call is rejected, as a safety measure\nto avoid accidentally decreasing quota too quickly. Setting the force\nparameter to true ignores this restriction.", + "description": "Whether to force the deletion of the quota override. If deleting an override would cause the effective quota for the consumer to decrease by more than 10 percent, the call is rejected, as a safety measure to avoid accidentally decreasing quota too quickly. Setting the force parameter to true ignores this restriction.", "location": "query", "type": "boolean" }, "name": { - "description": "The resource name of the override to delete.\n\nAn example name would be:\n`projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/adminOverrides/4a3f2c1d`", + "description": "The resource name of the override to delete. An example name would be: `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/adminOverrides/4a3f2c1d`", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+/consumerQuotaMetrics/[^/]+/limits/[^/]+/adminOverrides/[^/]+$", "required": true, @@ -548,12 +618,12 @@ "type": "integer" }, "pageToken": { - "description": "Token identifying which result to start with; returned by a previous list\ncall.", + "description": "Token identifying which result to start with; returned by a previous list call.", "location": "query", "type": "string" }, "parent": { - "description": "The resource name of the parent quota limit, returned by a\nListConsumerQuotaMetrics or GetConsumerQuotaMetric call.\n\nAn example name would be:\n`projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion`", + "description": "The resource name of the parent quota limit, returned by a ListConsumerQuotaMetrics or GetConsumerQuotaMetric call. An example name would be: `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion`", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+/consumerQuotaMetrics/[^/]+/limits/[^/]+$", "required": true, @@ -579,19 +649,19 @@ ], "parameters": { "force": { - "description": "Whether to force the update of the quota override.\nIf updating an override would cause the effective quota for the consumer to\ndecrease by more than 10 percent, the call is rejected, as a safety measure\nto avoid accidentally decreasing quota too quickly. Setting the force\nparameter to true ignores this restriction.", + "description": "Whether to force the update of the quota override. If updating an override would cause the effective quota for the consumer to decrease by more than 10 percent, the call is rejected, as a safety measure to avoid accidentally decreasing quota too quickly. Setting the force parameter to true ignores this restriction.", "location": "query", "type": "boolean" }, "name": { - "description": "The resource name of the override to update.\n\nAn example name would be:\n`projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/adminOverrides/4a3f2c1d`", + "description": "The resource name of the override to update. An example name would be: `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/adminOverrides/4a3f2c1d`", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+/consumerQuotaMetrics/[^/]+/limits/[^/]+/adminOverrides/[^/]+$", "required": true, "type": "string" }, "updateMask": { - "description": "Update only the specified fields of the override.\nIf unset, all fields will be updated.", + "description": "Update only the specified fields of the override. If unset, all fields will be updated.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -614,7 +684,7 @@ "consumerOverrides": { "methods": { "create": { - "description": "Creates a consumer override.\nA consumer override is applied to the consumer on its own authority to\nlimit its own quota usage. Consumer overrides cannot be used to grant more\nquota than would be allowed by admin overrides, producer overrides, or the\ndefault limit of the service.", + "description": "Creates a consumer override. A consumer override is applied to the consumer on its own authority to limit its own quota usage. Consumer overrides cannot be used to grant more quota than would be allowed by admin overrides, producer overrides, or the default limit of the service.", "flatPath": "v1beta1/{v1beta1Id}/{v1beta1Id1}/services/{servicesId}/consumerQuotaMetrics/{consumerQuotaMetricsId}/limits/{limitsId}/consumerOverrides", "httpMethod": "POST", "id": "serviceusage.services.consumerQuotaMetrics.limits.consumerOverrides.create", @@ -623,12 +693,12 @@ ], "parameters": { "force": { - "description": "Whether to force the creation of the quota override.\nIf creating an override would cause the effective quota for the consumer to\ndecrease by more than 10 percent, the call is rejected, as a safety measure\nto avoid accidentally decreasing quota too quickly. Setting the force\nparameter to true ignores this restriction.", + "description": "Whether to force the creation of the quota override. If creating an override would cause the effective quota for the consumer to decrease by more than 10 percent, the call is rejected, as a safety measure to avoid accidentally decreasing quota too quickly. Setting the force parameter to true ignores this restriction.", "location": "query", "type": "boolean" }, "parent": { - "description": "The resource name of the parent quota limit, returned by a\nListConsumerQuotaMetrics or GetConsumerQuotaMetric call.\n\nAn example name would be:\n`projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion`", + "description": "The resource name of the parent quota limit, returned by a ListConsumerQuotaMetrics or GetConsumerQuotaMetric call. An example name would be: `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion`", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+/consumerQuotaMetrics/[^/]+/limits/[^/]+$", "required": true, @@ -657,12 +727,12 @@ ], "parameters": { "force": { - "description": "Whether to force the deletion of the quota override.\nIf deleting an override would cause the effective quota for the consumer to\ndecrease by more than 10 percent, the call is rejected, as a safety measure\nto avoid accidentally decreasing quota too quickly. Setting the force\nparameter to true ignores this restriction.", + "description": "Whether to force the deletion of the quota override. If deleting an override would cause the effective quota for the consumer to decrease by more than 10 percent, the call is rejected, as a safety measure to avoid accidentally decreasing quota too quickly. Setting the force parameter to true ignores this restriction.", "location": "query", "type": "boolean" }, "name": { - "description": "The resource name of the override to delete.\n\nAn example name would be:\n`projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/consumerOverrides/4a3f2c1d`", + "description": "The resource name of the override to delete. An example name would be: `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/consumerOverrides/4a3f2c1d`", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+/consumerQuotaMetrics/[^/]+/limits/[^/]+/consumerOverrides/[^/]+$", "required": true, @@ -694,12 +764,12 @@ "type": "integer" }, "pageToken": { - "description": "Token identifying which result to start with; returned by a previous list\ncall.", + "description": "Token identifying which result to start with; returned by a previous list call.", "location": "query", "type": "string" }, "parent": { - "description": "The resource name of the parent quota limit, returned by a\nListConsumerQuotaMetrics or GetConsumerQuotaMetric call.\n\nAn example name would be:\n`projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion`", + "description": "The resource name of the parent quota limit, returned by a ListConsumerQuotaMetrics or GetConsumerQuotaMetric call. An example name would be: `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion`", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+/consumerQuotaMetrics/[^/]+/limits/[^/]+$", "required": true, @@ -725,19 +795,19 @@ ], "parameters": { "force": { - "description": "Whether to force the update of the quota override.\nIf updating an override would cause the effective quota for the consumer to\ndecrease by more than 10 percent, the call is rejected, as a safety measure\nto avoid accidentally decreasing quota too quickly. Setting the force\nparameter to true ignores this restriction.", + "description": "Whether to force the update of the quota override. If updating an override would cause the effective quota for the consumer to decrease by more than 10 percent, the call is rejected, as a safety measure to avoid accidentally decreasing quota too quickly. Setting the force parameter to true ignores this restriction.", "location": "query", "type": "boolean" }, "name": { - "description": "The resource name of the override to update.\n\nAn example name would be:\n`projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/consumerOverrides/4a3f2c1d`", + "description": "The resource name of the override to update. An example name would be: `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/consumerOverrides/4a3f2c1d`", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+/consumerQuotaMetrics/[^/]+/limits/[^/]+/consumerOverrides/[^/]+$", "required": true, "type": "string" }, "updateMask": { - "description": "Update only the specified fields of the override.\nIf unset, all fields will be updated.", + "description": "Update only the specified fields of the override. If unset, all fields will be updated.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -764,11 +834,46 @@ } } }, - "revision": "20200508", + "revision": "20200821", "rootUrl": "https://serviceusage.googleapis.com/", "schemas": { + "AdminQuotaPolicy": { + "description": "Quota policy created by quota administrator.", + "id": "AdminQuotaPolicy", + "properties": { + "container": { + "description": "The cloud resource container at which the quota policy is created. The format is {container_type}/{container_number}", + "type": "string" + }, + "dimensions": { + "additionalProperties": { + "type": "string" + }, + "description": " If this map is nonempty, then this policy applies only to specific values for dimensions defined in the limit unit. For example, an policy on a limit with the unit 1/{project}/{region} could contain an entry with the key \"region\" and the value \"us-east-1\"; the policy is only applied to quota consumed in that region. This map has the following restrictions: * If \"region\" appears as a key, its value must be a valid Cloud region. * If \"zone\" appears as a key, its value must be a valid Cloud zone. * Keys other than \"region\" or \"zone\" are not valid.", + "type": "object" + }, + "metric": { + "description": "The name of the metric to which this policy applies. An example name would be: `compute.googleapis.com/cpus`", + "type": "string" + }, + "name": { + "description": "The resource name of the policy. This name is generated by the server when the policy is created. Example names would be: `organizations/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/adminQuotaPolicies/4a3f2c1d`", + "type": "string" + }, + "policyValue": { + "description": "The quota policy value. Can be any nonnegative integer, or -1 (unlimited quota).", + "format": "int64", + "type": "string" + }, + "unit": { + "description": "The limit unit of the limit to which this policy applies. An example unit would be: `1/{project}/{region}` Note that `{project}` and `{region}` are not placeholders in this example; the literal characters `{` and `}` occur in the string.", + "type": "string" + } + }, + "type": "object" + }, "Api": { - "description": "Api is a light-weight descriptor for an API Interface.\n\nInterfaces are also described as \"protocol buffer services\" in some contexts,\nsuch as by the \"service\" keyword in a .proto file, but they are different\nfrom API Services, which represent a concrete implementation of an interface\nas opposed to simply a description of methods and bindings. They are also\nsometimes simply referred to as \"APIs\" in other contexts, such as the name of\nthis message itself. See https://cloud.google.com/apis/design/glossary for\ndetailed terminology.", + "description": "Api is a light-weight descriptor for an API Interface. Interfaces are also described as \"protocol buffer services\" in some contexts, such as by the \"service\" keyword in a .proto file, but they are different from API Services, which represent a concrete implementation of an interface as opposed to simply a description of methods and bindings. They are also sometimes simply referred to as \"APIs\" in other contexts, such as the name of this message itself. See https://cloud.google.com/apis/design/glossary for detailed terminology.", "id": "Api", "properties": { "methods": { @@ -786,7 +891,7 @@ "type": "array" }, "name": { - "description": "The fully qualified name of this interface, including package name\nfollowed by the interface's simple name.", + "description": "The fully qualified name of this interface, including package name followed by the interface's simple name.", "type": "string" }, "options": { @@ -798,7 +903,7 @@ }, "sourceContext": { "$ref": "SourceContext", - "description": "Source context for the protocol buffer service represented by this\nmessage." + "description": "Source context for the protocol buffer service represented by this message." }, "syntax": { "description": "The source syntax of the service.", @@ -813,38 +918,38 @@ "type": "string" }, "version": { - "description": "A version string for this interface. If specified, must have the form\n`major-version.minor-version`, as in `1.10`. If the minor version is\nomitted, it defaults to zero. If the entire version field is empty, the\nmajor version is derived from the package name, as outlined below. If the\nfield is not empty, the version in the package name will be verified to be\nconsistent with what is provided here.\n\nThe versioning schema uses [semantic\nversioning](http://semver.org) where the major version number\nindicates a breaking change and the minor version an additive,\nnon-breaking change. Both version numbers are signals to users\nwhat to expect from different versions, and should be carefully\nchosen based on the product plan.\n\nThe major version is also reflected in the package name of the\ninterface, which must end in `v\u003cmajor-version\u003e`, as in\n`google.feature.v1`. For major versions 0 and 1, the suffix can\nbe omitted. Zero major versions must only be used for\nexperimental, non-GA interfaces.\n", + "description": "A version string for this interface. If specified, must have the form `major-version.minor-version`, as in `1.10`. If the minor version is omitted, it defaults to zero. If the entire version field is empty, the major version is derived from the package name, as outlined below. If the field is not empty, the version in the package name will be verified to be consistent with what is provided here. The versioning schema uses [semantic versioning](http://semver.org) where the major version number indicates a breaking change and the minor version an additive, non-breaking change. Both version numbers are signals to users what to expect from different versions, and should be carefully chosen based on the product plan. The major version is also reflected in the package name of the interface, which must end in `v`, as in `google.feature.v1`. For major versions 0 and 1, the suffix can be omitted. Zero major versions must only be used for experimental, non-GA interfaces. ", "type": "string" } }, "type": "object" }, "AuthProvider": { - "description": "Configuration for an authentication provider, including support for\n[JSON Web Token\n(JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).", + "description": "Configuration for an authentication provider, including support for [JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).", "id": "AuthProvider", "properties": { "audiences": { - "description": "The list of JWT\n[audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).\nthat are allowed to access. A JWT containing any of these audiences will\nbe accepted. When this setting is absent, JWTs with audiences:\n - \"https://[service.name]/[google.protobuf.Api.name]\"\n - \"https://[service.name]/\"\nwill be accepted.\nFor example, if no audiences are in the setting, LibraryService API will\naccept JWTs with the following audiences:\n -\n https://library-example.googleapis.com/google.example.library.v1.LibraryService\n - https://library-example.googleapis.com/\n\nExample:\n\n audiences: bookstore_android.apps.googleusercontent.com,\n bookstore_web.apps.googleusercontent.com", + "description": "The list of JWT [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3). that are allowed to access. A JWT containing any of these audiences will be accepted. When this setting is absent, JWTs with audiences: - \"https://[service.name]/[google.protobuf.Api.name]\" - \"https://[service.name]/\" will be accepted. For example, if no audiences are in the setting, LibraryService API will accept JWTs with the following audiences: - https://library-example.googleapis.com/google.example.library.v1.LibraryService - https://library-example.googleapis.com/ Example: audiences: bookstore_android.apps.googleusercontent.com, bookstore_web.apps.googleusercontent.com", "type": "string" }, "authorizationUrl": { - "description": "Redirect URL if JWT token is required but not present or is expired.\nImplement authorizationUrl of securityDefinitions in OpenAPI spec.", + "description": "Redirect URL if JWT token is required but not present or is expired. Implement authorizationUrl of securityDefinitions in OpenAPI spec.", "type": "string" }, "id": { - "description": "The unique identifier of the auth provider. It will be referred to by\n`AuthRequirement.provider_id`.\n\nExample: \"bookstore_auth\".", + "description": "The unique identifier of the auth provider. It will be referred to by `AuthRequirement.provider_id`. Example: \"bookstore_auth\".", "type": "string" }, "issuer": { - "description": "Identifies the principal that issued the JWT. See\nhttps://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1\nUsually a URL or an email address.\n\nExample: https://securetoken.google.com\nExample: 1234567-compute@developer.gserviceaccount.com", + "description": "Identifies the principal that issued the JWT. See https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1 Usually a URL or an email address. Example: https://securetoken.google.com Example: 1234567-compute@developer.gserviceaccount.com", "type": "string" }, "jwksUri": { - "description": "URL of the provider's public key set to validate signature of the JWT. See\n[OpenID\nDiscovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata).\nOptional if the key set document:\n - can be retrieved from\n [OpenID\n Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html of\n the issuer.\n - can be inferred from the email domain of the issuer (e.g. a Google\n service account).\n\nExample: https://www.googleapis.com/oauth2/v1/certs", + "description": "URL of the provider's public key set to validate signature of the JWT. See [OpenID Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata). Optional if the key set document: - can be retrieved from [OpenID Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html of the issuer. - can be inferred from the email domain of the issuer (e.g. a Google service account). Example: https://www.googleapis.com/oauth2/v1/certs", "type": "string" }, "jwtLocations": { - "description": "Defines the locations to extract the JWT.\n\nJWT locations can be either from HTTP headers or URL query parameters.\nThe rule is that the first match wins. The checking order is: checking\nall headers first, then URL query parameters.\n\nIf not specified, default to use following 3 locations:\n 1) Authorization: Bearer\n 2) x-goog-iap-jwt-assertion\n 3) access_token query parameter\n\nDefault locations can be specified as followings:\n jwt_locations:\n - header: Authorization\n value_prefix: \"Bearer \"\n - header: x-goog-iap-jwt-assertion\n - query: access_token", + "description": "Defines the locations to extract the JWT. JWT locations can be either from HTTP headers or URL query parameters. The rule is that the first match wins. The checking order is: checking all headers first, then URL query parameters. If not specified, default to use following 3 locations: 1) Authorization: Bearer 2) x-goog-iap-jwt-assertion 3) access_token query parameter Default locations can be specified as followings: jwt_locations: - header: Authorization value_prefix: \"Bearer \" - header: x-goog-iap-jwt-assertion - query: access_token", "items": { "$ref": "JwtLocation" }, @@ -854,22 +959,22 @@ "type": "object" }, "AuthRequirement": { - "description": "User-defined authentication requirements, including support for\n[JSON Web Token\n(JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).", + "description": "User-defined authentication requirements, including support for [JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).", "id": "AuthRequirement", "properties": { "audiences": { - "description": "NOTE: This will be deprecated soon, once AuthProvider.audiences is\nimplemented and accepted in all the runtime components.\n\nThe list of JWT\n[audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).\nthat are allowed to access. A JWT containing any of these audiences will\nbe accepted. When this setting is absent, only JWTs with audience\n\"https://Service_name/API_name\"\nwill be accepted. For example, if no audiences are in the setting,\nLibraryService API will only accept JWTs with the following audience\n\"https://library-example.googleapis.com/google.example.library.v1.LibraryService\".\n\nExample:\n\n audiences: bookstore_android.apps.googleusercontent.com,\n bookstore_web.apps.googleusercontent.com", + "description": "NOTE: This will be deprecated soon, once AuthProvider.audiences is implemented and accepted in all the runtime components. The list of JWT [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3). that are allowed to access. A JWT containing any of these audiences will be accepted. When this setting is absent, only JWTs with audience \"https://Service_name/API_name\" will be accepted. For example, if no audiences are in the setting, LibraryService API will only accept JWTs with the following audience \"https://library-example.googleapis.com/google.example.library.v1.LibraryService\". Example: audiences: bookstore_android.apps.googleusercontent.com, bookstore_web.apps.googleusercontent.com", "type": "string" }, "providerId": { - "description": "id from authentication provider.\n\nExample:\n\n provider_id: bookstore_auth", + "description": "id from authentication provider. Example: provider_id: bookstore_auth", "type": "string" } }, "type": "object" }, "Authentication": { - "description": "`Authentication` defines the authentication configuration for an API.\n\nExample for an API targeted for external use:\n\n name: calendar.googleapis.com\n authentication:\n providers:\n - id: google_calendar_auth\n jwks_uri: https://www.googleapis.com/oauth2/v1/certs\n issuer: https://securetoken.google.com\n rules:\n - selector: \"*\"\n requirements:\n provider_id: google_calendar_auth", + "description": "`Authentication` defines the authentication configuration for an API. Example for an API targeted for external use: name: calendar.googleapis.com authentication: providers: - id: google_calendar_auth jwks_uri: https://www.googleapis.com/oauth2/v1/certs issuer: https://securetoken.google.com rules: - selector: \"*\" requirements: provider_id: google_calendar_auth", "id": "Authentication", "properties": { "providers": { @@ -880,7 +985,7 @@ "type": "array" }, "rules": { - "description": "A list of authentication rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "A list of authentication rules that apply to individual API methods. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "AuthenticationRule" }, @@ -890,7 +995,7 @@ "type": "object" }, "AuthenticationRule": { - "description": "Authentication rules for the service.\n\nBy default, if a method has any authentication requirements, every request\nmust include a valid credential matching one of the requirements.\nIt's an error to include more than one kind of credential in a single\nrequest.\n\nIf a method doesn't have any auth requirements, request credentials will be\nignored.", + "description": "Authentication rules for the service. By default, if a method has any authentication requirements, every request must include a valid credential matching one of the requirements. It's an error to include more than one kind of credential in a single request. If a method doesn't have any auth requirements, request credentials will be ignored.", "id": "AuthenticationRule", "properties": { "allowWithoutCredential": { @@ -909,7 +1014,7 @@ "type": "array" }, "selector": { - "description": "Selects the methods to which this rule applies.\n\nRefer to selector for syntax details.", + "description": "Selects the methods to which this rule applies. Refer to selector for syntax details.", "type": "string" } }, @@ -920,7 +1025,7 @@ "id": "Backend", "properties": { "rules": { - "description": "A list of API backend rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "A list of API backend rules that apply to individual API methods. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "BackendRule" }, @@ -934,29 +1039,29 @@ "id": "BackendRule", "properties": { "address": { - "description": "The address of the API backend.\n\nThe scheme is used to determine the backend protocol and security.\nThe following schemes are accepted:\n\n SCHEME PROTOCOL SECURITY\n http:// HTTP None\n https:// HTTP TLS\n grpc:// gRPC None\n grpcs:// gRPC TLS\n\nIt is recommended to explicitly include a scheme. Leaving out the scheme\nmay cause constrasting behaviors across platforms.\n\nIf the port is unspecified, the default is:\n- 80 for schemes without TLS\n- 443 for schemes with TLS\n\nFor HTTP backends, use protocol\nto specify the protocol version.", + "description": "The address of the API backend. The scheme is used to determine the backend protocol and security. The following schemes are accepted: SCHEME PROTOCOL SECURITY http:// HTTP None https:// HTTP TLS grpc:// gRPC None grpcs:// gRPC TLS It is recommended to explicitly include a scheme. Leaving out the scheme may cause constrasting behaviors across platforms. If the port is unspecified, the default is: - 80 for schemes without TLS - 443 for schemes with TLS For HTTP backends, use protocol to specify the protocol version.", "type": "string" }, "deadline": { - "description": "The number of seconds to wait for a response from a request. The default\nvaries based on the request protocol and deployment environment.", + "description": "The number of seconds to wait for a response from a request. The default varies based on the request protocol and deployment environment.", "format": "double", "type": "number" }, "disableAuth": { - "description": "When disable_auth is true, a JWT ID token won't be generated and the\noriginal \"Authorization\" HTTP header will be preserved. If the header is\nused to carry the original token and is expected by the backend, this\nfield must be set to true to preserve the header.", + "description": "When disable_auth is true, a JWT ID token won't be generated and the original \"Authorization\" HTTP header will be preserved. If the header is used to carry the original token and is expected by the backend, this field must be set to true to preserve the header.", "type": "boolean" }, "jwtAudience": { - "description": "The JWT audience is used when generating a JWT ID token for the backend.\nThis ID token will be added in the HTTP \"authorization\" header, and sent\nto the backend.", + "description": "The JWT audience is used when generating a JWT ID token for the backend. This ID token will be added in the HTTP \"authorization\" header, and sent to the backend.", "type": "string" }, "minDeadline": { - "description": "Minimum deadline in seconds needed for this method. Calls having deadline\nvalue lower than this will be rejected.", + "description": "Minimum deadline in seconds needed for this method. Calls having deadline value lower than this will be rejected.", "format": "double", "type": "number" }, "operationDeadline": { - "description": "The number of seconds to wait for the completion of a long running\noperation. The default is no deadline.", + "description": "The number of seconds to wait for the completion of a long running operation. The default is no deadline.", "format": "double", "type": "number" }, @@ -968,21 +1073,17 @@ ], "enumDescriptions": [ "", - "Use the backend address as-is, with no modification to the path. If the\nURL pattern contains variables, the variable names and values will be\nappended to the query string. If a query string parameter and a URL\npattern variable have the same name, this may result in duplicate keys in\nthe query string.\n\n# Examples\n\nGiven the following operation config:\n\n Method path: /api/company/{cid}/user/{uid}\n Backend address: https://example.cloudfunctions.net/getUser\n\nRequests to the following request paths will call the backend at the\ntranslated path:\n\n Request path: /api/company/widgetworks/user/johndoe\n Translated:\n https://example.cloudfunctions.net/getUser?cid=widgetworks\u0026uid=johndoe\n\n Request path: /api/company/widgetworks/user/johndoe?timezone=EST\n Translated:\n https://example.cloudfunctions.net/getUser?timezone=EST\u0026cid=widgetworks\u0026uid=johndoe", - "The request path will be appended to the backend address.\n\n# Examples\n\nGiven the following operation config:\n\n Method path: /api/company/{cid}/user/{uid}\n Backend address: https://example.appspot.com\n\nRequests to the following request paths will call the backend at the\ntranslated path:\n\n Request path: /api/company/widgetworks/user/johndoe\n Translated:\n https://example.appspot.com/api/company/widgetworks/user/johndoe\n\n Request path: /api/company/widgetworks/user/johndoe?timezone=EST\n Translated:\n https://example.appspot.com/api/company/widgetworks/user/johndoe?timezone=EST" + "Use the backend address as-is, with no modification to the path. If the URL pattern contains variables, the variable names and values will be appended to the query string. If a query string parameter and a URL pattern variable have the same name, this may result in duplicate keys in the query string. # Examples Given the following operation config: Method path: /api/company/{cid}/user/{uid} Backend address: https://example.cloudfunctions.net/getUser Requests to the following request paths will call the backend at the translated path: Request path: /api/company/widgetworks/user/johndoe Translated: https://example.cloudfunctions.net/getUser?cid=widgetworks\u0026uid=johndoe Request path: /api/company/widgetworks/user/johndoe?timezone=EST Translated: https://example.cloudfunctions.net/getUser?timezone=EST\u0026cid=widgetworks\u0026uid=johndoe", + "The request path will be appended to the backend address. # Examples Given the following operation config: Method path: /api/company/{cid}/user/{uid} Backend address: https://example.appspot.com Requests to the following request paths will call the backend at the translated path: Request path: /api/company/widgetworks/user/johndoe Translated: https://example.appspot.com/api/company/widgetworks/user/johndoe Request path: /api/company/widgetworks/user/johndoe?timezone=EST Translated: https://example.appspot.com/api/company/widgetworks/user/johndoe?timezone=EST" ], "type": "string" }, "protocol": { - "description": "The protocol used for sending a request to the backend.\nThe supported values are \"http/1.1\" and \"h2\".\n\nThe default value is inferred from the scheme in the\naddress field:\n\n SCHEME PROTOCOL\n http:// http/1.1\n https:// http/1.1\n grpc:// h2\n grpcs:// h2\n\nFor secure HTTP backends (https://) that support HTTP/2, set this field\nto \"h2\" for improved performance.\n\nConfiguring this field to non-default values is only supported for secure\nHTTP backends. This field will be ignored for all other backends.\n\nSee\nhttps://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids\nfor more details on the supported values.", - "type": "string" - }, - "renameTo": { - "description": "Unimplemented. Do not use.\n\nThe new name the selected proto elements should be renamed to.\n\nThe package, the service and the method can all be renamed.\nThe backend server should implement the renamed proto. However, clients\nshould call the original method, and ESF routes the traffic to the renamed\nmethod.\n\nHTTP clients should call the URL mapped to the original method.\ngRPC and Stubby clients should call the original method with package name.\n\nFor legacy reasons, ESF allows Stubby clients to call with the\nshort name (without the package name). However, for API Versioning(or\nmultiple methods mapped to the same short name), all Stubby clients must\ncall the method's full name with the package name, otherwise the first one\n(selector) wins.\n\nIf this `rename_to` is specified with a trailing `*`, the `selector` must\nbe specified with a trailing `*` as well. The all element short names\nmatched by the `*` in the selector will be kept in the `rename_to`.\n\nFor example,\n rename_rules:\n - selector: |-\n google.example.library.v1.*\n rename_to: google.example.library.*\n\nThe selector matches `google.example.library.v1.Library.CreateShelf` and\n`google.example.library.v1.Library.CreateBook`, they will be renamed to\n`google.example.library.Library.CreateShelf` and\n`google.example.library.Library.CreateBook`. It essentially renames the\nproto package name section of the matched proto service and methods.", + "description": "The protocol used for sending a request to the backend. The supported values are \"http/1.1\" and \"h2\". The default value is inferred from the scheme in the address field: SCHEME PROTOCOL http:// http/1.1 https:// http/1.1 grpc:// h2 grpcs:// h2 For secure HTTP backends (https://) that support HTTP/2, set this field to \"h2\" for improved performance. Configuring this field to non-default values is only supported for secure HTTP backends. This field will be ignored for all other backends. See https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids for more details on the supported values.", "type": "string" }, "selector": { - "description": "Selects the methods to which this rule applies.\n\nRefer to selector for syntax details.", + "description": "Selects the methods to which this rule applies. Refer to selector for syntax details.", "type": "string" } }, @@ -1021,7 +1122,7 @@ "id": "BatchEnableServicesRequest", "properties": { "serviceIds": { - "description": "The identifiers of the services to enable on the project.\n\nA valid identifier would be:\nserviceusage.googleapis.com\n\nEnabling services requires that each service is public or is shared with\nthe user enabling the service.\n\nTwo or more services must be specified. To enable a single service,\nuse the `EnableService` method instead.\n\nA single request can enable a maximum of 20 services at a time. If more\nthan 20 services are specified, the request will fail, and no state changes\nwill occur.", + "description": "The identifiers of the services to enable on the project. A valid identifier would be: serviceusage.googleapis.com Enabling services requires that each service is public or is shared with the user enabling the service. Two or more services must be specified. To enable a single service, use the `EnableService` method instead. A single request can enable a maximum of 20 services at a time. If more than 20 services are specified, the request will fail, and no state changes will occur.", "items": { "type": "string" }, @@ -1031,11 +1132,11 @@ "type": "object" }, "BatchEnableServicesResponse": { - "description": "Response message for the `BatchEnableServices` method.\nThis response message is assigned to the `response` field of the returned\nOperation when that operation is done.", + "description": "Response message for the `BatchEnableServices` method. This response message is assigned to the `response` field of the returned Operation when that operation is done.", "id": "BatchEnableServicesResponse", "properties": { "failures": { - "description": "If allow_partial_success is true, and one or more services could not be\nenabled, this field contains the details about each failure.", + "description": "If allow_partial_success is true, and one or more services could not be enabled, this field contains the details about each failure.", "items": { "$ref": "EnableFailure" }, @@ -1052,11 +1153,11 @@ "type": "object" }, "Billing": { - "description": "Billing related configuration of the service.\n\nThe following example shows how to configure monitored resources and metrics\nfor billing, `consumer_destinations` is the only supported destination and\nthe monitored resources need at least one label key\n`cloud.googleapis.com/location` to indicate the location of the billing\nusage, using different monitored resources between monitoring and billing is\nrecommended so they can be evolved independently:\n\n\n monitored_resources:\n - type: library.googleapis.com/billing_branch\n labels:\n - key: cloud.googleapis.com/location\n description: |\n Predefined label to support billing location restriction.\n - key: city\n description: |\n Custom label to define the city where the library branch is located\n in.\n - key: name\n description: Custom label to define the name of the library branch.\n metrics:\n - name: library.googleapis.com/book/borrowed_count\n metric_kind: DELTA\n value_type: INT64\n unit: \"1\"\n billing:\n consumer_destinations:\n - monitored_resource: library.googleapis.com/billing_branch\n metrics:\n - library.googleapis.com/book/borrowed_count", + "description": "Billing related configuration of the service. The following example shows how to configure monitored resources and metrics for billing, `consumer_destinations` is the only supported destination and the monitored resources need at least one label key `cloud.googleapis.com/location` to indicate the location of the billing usage, using different monitored resources between monitoring and billing is recommended so they can be evolved independently: monitored_resources: - type: library.googleapis.com/billing_branch labels: - key: cloud.googleapis.com/location description: | Predefined label to support billing location restriction. - key: city description: | Custom label to define the city where the library branch is located in. - key: name description: Custom label to define the name of the library branch. metrics: - name: library.googleapis.com/book/borrowed_count metric_kind: DELTA value_type: INT64 unit: \"1\" billing: consumer_destinations: - monitored_resource: library.googleapis.com/billing_branch metrics: - library.googleapis.com/book/borrowed_count", "id": "Billing", "properties": { "consumerDestinations": { - "description": "Billing configurations for sending metrics to the consumer project.\nThere can be multiple consumer destinations per service, each one must have\na different monitored resource type. A metric can be used in at most\none consumer destination.", + "description": "Billing configurations for sending metrics to the consumer project. There can be multiple consumer destinations per service, each one must have a different monitored resource type. A metric can be used in at most one consumer destination.", "items": { "$ref": "BillingDestination" }, @@ -1066,18 +1167,18 @@ "type": "object" }, "BillingDestination": { - "description": "Configuration of a specific billing destination (Currently only support\nbill against consumer project).", + "description": "Configuration of a specific billing destination (Currently only support bill against consumer project).", "id": "BillingDestination", "properties": { "metrics": { - "description": "Names of the metrics to report to this billing destination.\nEach name must be defined in Service.metrics section.", + "description": "Names of the metrics to report to this billing destination. Each name must be defined in Service.metrics section.", "items": { "type": "string" }, "type": "array" }, "monitoredResource": { - "description": "The monitored resource type. The type must be defined in\nService.monitored_resources section.", + "description": "The monitored resource type. The type must be defined in Service.monitored_resources section.", "type": "string" } }, @@ -1096,22 +1197,22 @@ "type": "boolean" }, "metric": { - "description": "The name of the parent metric of this limit.\n\nAn example name would be:\n`compute.googleapis.com/cpus`", + "description": "The name of the parent metric of this limit. An example name would be: `compute.googleapis.com/cpus`", "type": "string" }, "name": { - "description": "The resource name of the quota limit.\n\nAn example name would be:\n`projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion`\n\nThe resource name is intended to be opaque and should not be parsed for\nits component strings, since its representation could change in the future.", + "description": "The resource name of the quota limit. An example name would be: `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion` The resource name is intended to be opaque and should not be parsed for its component strings, since its representation could change in the future.", "type": "string" }, "quotaBuckets": { - "description": "Summary of the enforced quota buckets, organized by quota dimension,\nordered from least specific to most specific (for example, the global\ndefault bucket, with no quota dimensions, will always appear first).", + "description": "Summary of the enforced quota buckets, organized by quota dimension, ordered from least specific to most specific (for example, the global default bucket, with no quota dimensions, will always appear first).", "items": { "$ref": "QuotaBucket" }, "type": "array" }, "unit": { - "description": "The limit unit.\n\nAn example unit would be\n`1/{project}/{region}`\nNote that `{project}` and `{region}` are not placeholders in this example;\nthe literal characters `{` and `}` occur in the string.", + "description": "The limit unit. An example unit would be `1/{project}/{region}` Note that `{project}` and `{region}` are not placeholders in this example; the literal characters `{` and `}` occur in the string.", "type": "string" } }, @@ -1129,26 +1230,30 @@ "type": "array" }, "displayName": { - "description": "The display name of the metric.\n\nAn example name would be:\n\"CPUs\"", + "description": "The display name of the metric. An example name would be: \"CPUs\"", "type": "string" }, "metric": { - "description": "The name of the metric.\n\nAn example name would be:\n`compute.googleapis.com/cpus`", + "description": "The name of the metric. An example name would be: `compute.googleapis.com/cpus`", "type": "string" }, "name": { - "description": "The resource name of the quota settings on this metric for this consumer.\n\nAn example name would be:\n`projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus\n\nThe resource name is intended to be opaque and should not be parsed for\nits component strings, since its representation could change in the future.", + "description": "The resource name of the quota settings on this metric for this consumer. An example name would be: `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus The resource name is intended to be opaque and should not be parsed for its component strings, since its representation could change in the future.", + "type": "string" + }, + "unit": { + "description": "The units in which the metric value is reported.", "type": "string" } }, "type": "object" }, "Context": { - "description": "`Context` defines which contexts an API requests.\n\nExample:\n\n context:\n rules:\n - selector: \"*\"\n requested:\n - google.rpc.context.ProjectContext\n - google.rpc.context.OriginContext\n\nThe above specifies that all methods in the API request\n`google.rpc.context.ProjectContext` and\n`google.rpc.context.OriginContext`.\n\nAvailable context types are defined in package\n`google.rpc.context`.\n\nThis also provides mechanism to whitelist any protobuf message extension that\ncan be sent in grpc metadata using “x-goog-ext-\u003cextension_id\u003e-bin” and\n“x-goog-ext-\u003cextension_id\u003e-jspb” format. For example, list any service\nspecific protobuf types that can appear in grpc metadata as follows in your\nyaml file:\n\nExample:\n\n context:\n rules:\n - selector: \"google.example.library.v1.LibraryService.CreateBook\"\n allowed_request_extensions:\n - google.foo.v1.NewExtension\n allowed_response_extensions:\n - google.foo.v1.NewExtension\n\nYou can also specify extension ID instead of fully qualified extension name\nhere.", + "description": "`Context` defines which contexts an API requests. Example: context: rules: - selector: \"*\" requested: - google.rpc.context.ProjectContext - google.rpc.context.OriginContext The above specifies that all methods in the API request `google.rpc.context.ProjectContext` and `google.rpc.context.OriginContext`. Available context types are defined in package `google.rpc.context`. This also provides mechanism to whitelist any protobuf message extension that can be sent in grpc metadata using “x-goog-ext--bin” and “x-goog-ext--jspb” format. For example, list any service specific protobuf types that can appear in grpc metadata as follows in your yaml file: Example: context: rules: - selector: \"google.example.library.v1.LibraryService.CreateBook\" allowed_request_extensions: - google.foo.v1.NewExtension allowed_response_extensions: - google.foo.v1.NewExtension You can also specify extension ID instead of fully qualified extension name here.", "id": "Context", "properties": { "rules": { - "description": "A list of RPC context rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "A list of RPC context rules that apply to individual API methods. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "ContextRule" }, @@ -1158,18 +1263,18 @@ "type": "object" }, "ContextRule": { - "description": "A context rule provides information about the context for an individual API\nelement.", + "description": "A context rule provides information about the context for an individual API element.", "id": "ContextRule", "properties": { "allowedRequestExtensions": { - "description": "A list of full type names or extension IDs of extensions allowed in grpc\nside channel from client to backend.", + "description": "A list of full type names or extension IDs of extensions allowed in grpc side channel from client to backend.", "items": { "type": "string" }, "type": "array" }, "allowedResponseExtensions": { - "description": "A list of full type names or extension IDs of extensions allowed in grpc\nside channel from backend to client.", + "description": "A list of full type names or extension IDs of extensions allowed in grpc side channel from backend to client.", "items": { "type": "string" }, @@ -1190,29 +1295,29 @@ "type": "array" }, "selector": { - "description": "Selects the methods to which this rule applies.\n\nRefer to selector for syntax details.", + "description": "Selects the methods to which this rule applies. Refer to selector for syntax details.", "type": "string" } }, "type": "object" }, "Control": { - "description": "Selects and configures the service controller used by the service. The\nservice controller handles features like abuse, quota, billing, logging,\nmonitoring, etc.", + "description": "Selects and configures the service controller used by the service. The service controller handles features like abuse, quota, billing, logging, monitoring, etc.", "id": "Control", "properties": { "environment": { - "description": "The service control environment to use. If empty, no control plane\nfeature (like quota and billing) will be enabled.", + "description": "The service control environment to use. If empty, no control plane feature (like quota and billing) will be enabled.", "type": "string" } }, "type": "object" }, "CustomError": { - "description": "Customize service error responses. For example, list any service\nspecific protobuf types that can appear in error detail lists of\nerror responses.\n\nExample:\n\n custom_error:\n types:\n - google.foo.v1.CustomError\n - google.foo.v1.AnotherError", + "description": "Customize service error responses. For example, list any service specific protobuf types that can appear in error detail lists of error responses. Example: custom_error: types: - google.foo.v1.CustomError - google.foo.v1.AnotherError", "id": "CustomError", "properties": { "rules": { - "description": "The list of custom error rules that apply to individual API messages.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "The list of custom error rules that apply to individual API messages. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "CustomErrorRule" }, @@ -1233,11 +1338,11 @@ "id": "CustomErrorRule", "properties": { "isErrorType": { - "description": "Mark this message as possible payload in error response. Otherwise,\nobjects of this type will be filtered when they appear in error payload.", + "description": "Mark this message as possible payload in error response. Otherwise, objects of this type will be filtered when they appear in error payload.", "type": "boolean" }, "selector": { - "description": "Selects messages to which this rule applies.\n\nRefer to selector for syntax details.", + "description": "Selects messages to which this rule applies. Refer to selector for syntax details.", "type": "string" } }, @@ -1265,7 +1370,7 @@ "type": "object" }, "DisableServiceResponse": { - "description": "Response message for the `DisableService` method.\nThis response message is assigned to the `response` field of the returned\nOperation when that operation is done.", + "description": "Response message for the `DisableService` method. This response message is assigned to the `response` field of the returned Operation when that operation is done.", "id": "DisableServiceResponse", "properties": { "service": { @@ -1276,7 +1381,7 @@ "type": "object" }, "Documentation": { - "description": "`Documentation` provides the information for describing a service.\n\nExample:\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: \u003e\n The Google Calendar API gives access\n to most calendar features.\n pages:\n - name: Overview\n content: \u0026#40;== include google/foo/overview.md ==\u0026#41;\n - name: Tutorial\n content: \u0026#40;== include google/foo/tutorial.md ==\u0026#41;\n subpages;\n - name: Java\n content: \u0026#40;== include google/foo/tutorial_java.md ==\u0026#41;\n rules:\n - selector: google.calendar.Calendar.Get\n description: \u003e\n ...\n - selector: google.calendar.Calendar.Put\n description: \u003e\n ...\n\u003c/code\u003e\u003c/pre\u003e\nDocumentation is provided in markdown syntax. In addition to\nstandard markdown features, definition lists, tables and fenced\ncode blocks are supported. Section headers can be provided and are\ninterpreted relative to the section nesting of the context where\na documentation fragment is embedded.\n\nDocumentation from the IDL is merged with documentation defined\nvia the config at normalization time, where documentation provided\nby config rules overrides IDL provided.\n\nA number of constructs specific to the API platform are supported\nin documentation text.\n\nIn order to reference a proto element, the following\nnotation can be used:\n\u003cpre\u003e\u003ccode\u003e\u0026#91;fully.qualified.proto.name]\u0026#91;]\u003c/code\u003e\u003c/pre\u003e\nTo override the display text used for the link, this can be used:\n\u003cpre\u003e\u003ccode\u003e\u0026#91;display text]\u0026#91;fully.qualified.proto.name]\u003c/code\u003e\u003c/pre\u003e\nText can be excluded from doc using the following notation:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;-- internal comment --\u0026#41;\u003c/code\u003e\u003c/pre\u003e\n\nA few directives are available in documentation. Note that\ndirectives must appear on a single line to be properly\nidentified. The `include` directive includes a markdown file from\nan external source:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;== include path/to/file ==\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nThe `resource_for` directive marks a message to be the resource of\na collection in REST view. If it is not specified, tools attempt\nto infer the resource from the operations in a collection:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;== resource_for v1.shelves.books ==\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nThe directive `suppress_warning` does not directly affect documentation\nand is documented together with service config validation.", + "description": "`Documentation` provides the information for describing a service. Example: documentation: summary: \u003e The Google Calendar API gives access to most calendar features. pages: - name: Overview content: (== include google/foo/overview.md ==) - name: Tutorial content: (== include google/foo/tutorial.md ==) subpages; - name: Java content: (== include google/foo/tutorial_java.md ==) rules: - selector: google.calendar.Calendar.Get description: \u003e ... - selector: google.calendar.Calendar.Put description: \u003e ... Documentation is provided in markdown syntax. In addition to standard markdown features, definition lists, tables and fenced code blocks are supported. Section headers can be provided and are interpreted relative to the section nesting of the context where a documentation fragment is embedded. Documentation from the IDL is merged with documentation defined via the config at normalization time, where documentation provided by config rules overrides IDL provided. A number of constructs specific to the API platform are supported in documentation text. In order to reference a proto element, the following notation can be used: [fully.qualified.proto.name][] To override the display text used for the link, this can be used: [display text][fully.qualified.proto.name] Text can be excluded from doc using the following notation: (-- internal comment --) A few directives are available in documentation. Note that directives must appear on a single line to be properly identified. The `include` directive includes a markdown file from an external source: (== include path/to/file ==) The `resource_for` directive marks a message to be the resource of a collection in REST view. If it is not specified, tools attempt to infer the resource from the operations in a collection: (== resource_for v1.shelves.books ==) The directive `suppress_warning` does not directly affect documentation and is documented together with service config validation.", "id": "Documentation", "properties": { "documentationRootUrl": { @@ -1284,7 +1389,7 @@ "type": "string" }, "overview": { - "description": "Declares a single overview page. For example:\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: ...\n overview: \u0026#40;== include overview.md ==\u0026#41;\n\u003c/code\u003e\u003c/pre\u003e\nThis is a shortcut for the following declaration (using pages style):\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: ...\n pages:\n - name: Overview\n content: \u0026#40;== include overview.md ==\u0026#41;\n\u003c/code\u003e\u003c/pre\u003e\nNote: you cannot specify both `overview` field and `pages` field.", + "description": "Declares a single overview page. For example: documentation: summary: ... overview: (== include overview.md ==) This is a shortcut for the following declaration (using pages style): documentation: summary: ... pages: - name: Overview content: (== include overview.md ==) Note: you cannot specify both `overview` field and `pages` field.", "type": "string" }, "pages": { @@ -1295,18 +1400,18 @@ "type": "array" }, "rules": { - "description": "A list of documentation rules that apply to individual API elements.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "A list of documentation rules that apply to individual API elements. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "DocumentationRule" }, "type": "array" }, "serviceRootUrl": { - "description": "Specifies the service root url if the default one (the service name\nfrom the yaml file) is not suitable. This can be seen in any fully\nspecified service urls as well as sections that show a base that other\nurls are relative to.", + "description": "Specifies the service root url if the default one (the service name from the yaml file) is not suitable. This can be seen in any fully specified service urls as well as sections that show a base that other urls are relative to.", "type": "string" }, "summary": { - "description": "A short summary of what the service does. Can only be provided by\nplain text.", + "description": "A short summary of what the service does. Can only be provided by plain text.", "type": "string" } }, @@ -1317,7 +1422,7 @@ "id": "DocumentationRule", "properties": { "deprecationDescription": { - "description": "Deprecation description of the selected element(s). It can be provided if\nan element is marked as `deprecated`.", + "description": "Deprecation description of the selected element(s). It can be provided if an element is marked as `deprecated`.", "type": "string" }, "description": { @@ -1325,14 +1430,14 @@ "type": "string" }, "selector": { - "description": "The selector is a comma-separated list of patterns. Each pattern is a\nqualified name of the element which may end in \"*\", indicating a wildcard.\nWildcards are only allowed at the end and for a whole component of the\nqualified name, i.e. \"foo.*\" is ok, but not \"foo.b*\" or \"foo.*.bar\". A\nwildcard will match one or more components. To specify a default for all\napplicable elements, the whole pattern \"*\" is used.", + "description": "The selector is a comma-separated list of patterns. Each pattern is a qualified name of the element which may end in \"*\", indicating a wildcard. Wildcards are only allowed at the end and for a whole component of the qualified name, i.e. \"foo.*\" is ok, but not \"foo.b*\" or \"foo.*.bar\". A wildcard will match one or more components. To specify a default for all applicable elements, the whole pattern \"*\" is used.", "type": "string" } }, "type": "object" }, "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", "properties": {}, "type": "object" @@ -1359,7 +1464,7 @@ "type": "object" }, "EnableServiceResponse": { - "description": "Response message for the `EnableService` method.\nThis response message is assigned to the `response` field of the returned\nOperation when that operation is done.", + "description": "Response message for the `EnableService` method. This response message is assigned to the `response` field of the returned Operation when that operation is done.", "id": "EnableServiceResponse", "properties": { "service": { @@ -1370,33 +1475,26 @@ "type": "object" }, "Endpoint": { - "description": "`Endpoint` describes a network endpoint that serves a set of APIs.\nA service may expose any number of endpoints, and all endpoints share the\nsame service configuration, such as quota configuration and monitoring\nconfiguration.\n\nExample service configuration:\n\n name: library-example.googleapis.com\n endpoints:\n # Below entry makes 'google.example.library.v1.Library'\n # API be served from endpoint address library-example.googleapis.com.\n # It also allows HTTP OPTIONS calls to be passed to the backend, for\n # it to decide whether the subsequent cross-origin request is\n # allowed to proceed.\n - name: library-example.googleapis.com\n allow_cors: true", + "description": "`Endpoint` describes a network endpoint that serves a set of APIs. A service may expose any number of endpoints, and all endpoints share the same service configuration, such as quota configuration and monitoring configuration. Example service configuration: name: library-example.googleapis.com endpoints: # Below entry makes 'google.example.library.v1.Library' # API be served from endpoint address library-example.googleapis.com. # It also allows HTTP OPTIONS calls to be passed to the backend, for # it to decide whether the subsequent cross-origin request is # allowed to proceed. - name: library-example.googleapis.com allow_cors: true", "id": "Endpoint", "properties": { "aliases": { - "description": "DEPRECATED: This field is no longer supported. Instead of using aliases,\nplease specify multiple google.api.Endpoint for each of the intended\naliases.\n\nAdditional names that this endpoint will be hosted on.", + "description": "DEPRECATED: This field is no longer supported. Instead of using aliases, please specify multiple google.api.Endpoint for each of the intended aliases. Additional names that this endpoint will be hosted on.", "items": { "type": "string" }, "type": "array" }, "allowCors": { - "description": "Allowing\n[CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka\ncross-domain traffic, would allow the backends served from this endpoint to\nreceive and respond to HTTP OPTIONS requests. The response will be used by\nthe browser to determine whether the subsequent cross-origin request is\nallowed to proceed.", + "description": "Allowing [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka cross-domain traffic, would allow the backends served from this endpoint to receive and respond to HTTP OPTIONS requests. The response will be used by the browser to determine whether the subsequent cross-origin request is allowed to proceed.", "type": "boolean" }, - "features": { - "description": "The list of features enabled on this endpoint.", - "items": { - "type": "string" - }, - "type": "array" - }, "name": { "description": "The canonical name of this endpoint.", "type": "string" }, "target": { - "description": "The specification of an Internet routable address of API frontend that will\nhandle requests to this [API\nEndpoint](https://cloud.google.com/apis/design/glossary). It should be\neither a valid IPv4 address or a fully-qualified domain name. For example,\n\"8.8.8.8\" or \"myservice.appspot.com\".", + "description": "The specification of an Internet routable address of API frontend that will handle requests to this [API Endpoint](https://cloud.google.com/apis/design/glossary). It should be either a valid IPv4 address or a fully-qualified domain name. For example, \"8.8.8.8\" or \"myservice.appspot.com\".", "type": "string" } }, @@ -1550,7 +1648,7 @@ "type": "integer" }, "oneofIndex": { - "description": "The index of the field type in `Type.oneofs`, for message or enumeration\ntypes. The first type has index 1; zero means the type is not in the list.", + "description": "The index of the field type in `Type.oneofs`, for message or enumeration types. The first type has index 1; zero means the type is not in the list.", "format": "int32", "type": "integer" }, @@ -1566,7 +1664,7 @@ "type": "boolean" }, "typeUrl": { - "description": "The field type URL, without the scheme, for message or enumeration\ntypes. Example: `\"type.googleapis.com/google.protobuf.Timestamp\"`.", + "description": "The field type URL, without the scheme, for message or enumeration types. Example: `\"type.googleapis.com/google.protobuf.Timestamp\"`.", "type": "string" } }, @@ -1578,7 +1676,7 @@ "properties": { "identity": { "$ref": "ServiceIdentity", - "description": "Service identity that service producer can use to access consumer\nresources. If exists is true, it contains email and unique_id. If exists is\nfalse, it contains pre-constructed email and empty unique_id." + "description": "Service identity that service producer can use to access consumer resources. If exists is true, it contains email and unique_id. If exists is false, it contains pre-constructed email and empty unique_id." }, "state": { "description": "Service identity state.", @@ -1587,7 +1685,7 @@ "ACTIVE" ], "enumDescriptions": [ - "Default service identity state. This value is used if the state is\nomitted.", + "Default service identity state. This value is used if the state is omitted.", "Service identity has been created and can be used." ], "type": "string" @@ -1596,11 +1694,11 @@ "type": "object" }, "GoogleApiService": { - "description": "`Service` is the root object of Google service configuration schema. It\ndescribes basic information about a service, such as the name and the\ntitle, and delegates other aspects to sub-sections. Each sub-section is\neither a proto message or a repeated proto message that configures a\nspecific aspect, such as auth. See each proto message definition for details.\n\nExample:\n\n type: google.api.Service\n config_version: 3\n name: calendar.googleapis.com\n title: Google Calendar API\n apis:\n - name: google.calendar.v3.Calendar\n authentication:\n providers:\n - id: google_calendar_auth\n jwks_uri: https://www.googleapis.com/oauth2/v1/certs\n issuer: https://securetoken.google.com\n rules:\n - selector: \"*\"\n requirements:\n provider_id: google_calendar_auth", + "description": "`Service` is the root object of Google service configuration schema. It describes basic information about a service, such as the name and the title, and delegates other aspects to sub-sections. Each sub-section is either a proto message or a repeated proto message that configures a specific aspect, such as auth. See each proto message definition for details. Example: type: google.api.Service config_version: 3 name: calendar.googleapis.com title: Google Calendar API apis: - name: google.calendar.v3.Calendar authentication: providers: - id: google_calendar_auth jwks_uri: https://www.googleapis.com/oauth2/v1/certs issuer: https://securetoken.google.com rules: - selector: \"*\" requirements: provider_id: google_calendar_auth", "id": "GoogleApiService", "properties": { "apis": { - "description": "A list of API interfaces exported by this service. Only the `name` field\nof the google.protobuf.Api needs to be provided by the configuration\nauthor, as the remaining fields will be derived from the IDL during the\nnormalization process. It is an error to specify an API interface here\nwhich cannot be resolved against the associated IDL files.", + "description": "A list of API interfaces exported by this service. Only the `name` field of the google.protobuf.Api needs to be provided by the configuration author, as the remaining fields will be derived from the IDL during the normalization process. It is an error to specify an API interface here which cannot be resolved against the associated IDL files.", "items": { "$ref": "Api" }, @@ -1619,7 +1717,7 @@ "description": "Billing configuration." }, "configVersion": { - "description": "The semantic version of the service configuration. The config version\naffects the interpretation of the service configuration. For example,\ncertain features are enabled by default for certain config versions.\n\nThe latest config version is `3`.", + "description": "The semantic version of the service configuration. The config version affects the interpretation of the service configuration. For example, certain features are enabled by default for certain config versions. The latest config version is `3`.", "format": "uint32", "type": "integer" }, @@ -1640,14 +1738,14 @@ "description": "Additional API documentation." }, "endpoints": { - "description": "Configuration for network endpoints. If this is empty, then an endpoint\nwith the same name as the service is automatically generated to service all\ndefined APIs.", + "description": "Configuration for network endpoints. If this is empty, then an endpoint with the same name as the service is automatically generated to service all defined APIs.", "items": { "$ref": "Endpoint" }, "type": "array" }, "enums": { - "description": "A list of all enum types included in this API service. Enums\nreferenced directly or indirectly by the `apis` are automatically\nincluded. Enums which are not referenced but shall be included\nshould be listed here by name. Example:\n\n enums:\n - name: google.someapi.v1.SomeEnum", + "description": "A list of all enum types included in this API service. Enums referenced directly or indirectly by the `apis` are automatically included. Enums which are not referenced but shall be included should be listed here by name. Example: enums: - name: google.someapi.v1.SomeEnum", "items": { "$ref": "Enum" }, @@ -1658,7 +1756,7 @@ "description": "HTTP configuration." }, "id": { - "description": "A unique ID for a specific instance of this message, typically assigned\nby the client for tracking purpose. Must be no longer than 63 characters\nand only lower case letters, digits, '.', '_' and '-' are allowed. If\nempty, the server may choose to generate one instead.", + "description": "A unique ID for a specific instance of this message, typically assigned by the client for tracking purpose. Must be no longer than 63 characters and only lower case letters, digits, '.', '_' and '-' are allowed. If empty, the server may choose to generate one instead.", "type": "string" }, "logging": { @@ -1680,7 +1778,7 @@ "type": "array" }, "monitoredResources": { - "description": "Defines the monitored resources used by this service. This is required\nby the Service.monitoring and Service.logging configurations.", + "description": "Defines the monitored resources used by this service. This is required by the Service.monitoring and Service.logging configurations.", "items": { "$ref": "MonitoredResourceDescriptor" }, @@ -1691,7 +1789,7 @@ "description": "Monitoring configuration." }, "name": { - "description": "The service name, which is a DNS-like logical identifier for the\nservice, such as `calendar.googleapis.com`. The service name\ntypically goes through DNS verification to make sure the owner\nof the service also owns the DNS name.", + "description": "The service name, which is a DNS-like logical identifier for the service, such as `calendar.googleapis.com`. The service name typically goes through DNS verification to make sure the owner of the service also owns the DNS name.", "type": "string" }, "producerProjectId": { @@ -1711,7 +1809,7 @@ "description": "System parameter configuration." }, "systemTypes": { - "description": "A list of all proto message types included in this API service.\nIt serves similar purpose as [google.api.Service.types], except that\nthese types are not needed by user-defined APIs. Therefore, they will not\nshow up in the generated discovery doc. This field should only be used\nto define system APIs in ESF.", + "description": "A list of all proto message types included in this API service. It serves similar purpose as [google.api.Service.types], except that these types are not needed by user-defined APIs. Therefore, they will not show up in the generated discovery doc. This field should only be used to define system APIs in ESF.", "items": { "$ref": "Type" }, @@ -1722,7 +1820,7 @@ "type": "string" }, "types": { - "description": "A list of all proto message types included in this API service.\nTypes referenced directly or indirectly by the `apis` are\nautomatically included. Messages which are not referenced but\nshall be included, such as types used by the `google.protobuf.Any` type,\nshould be listed here by name. Example:\n\n types:\n - name: google.protobuf.Int32", + "description": "A list of all proto message types included in this API service. Types referenced directly or indirectly by the `apis` are automatically included. Messages which are not referenced but shall be included, such as types used by the `google.protobuf.Any` type, should be listed here by name. Example: types: - name: google.protobuf.Int32", "items": { "$ref": "Type" }, @@ -1736,19 +1834,19 @@ "type": "object" }, "GoogleApiServiceIdentity": { - "description": "The per-product per-project service identity for a service.\n\n\nUse this field to configure per-product per-project service identity.\nExample of a service identity configuration.\n\n usage:\n service_identity:\n - service_account_parent: \"projects/123456789\"\n display_name: \"Cloud XXX Service Agent\"\n description: \"Used as the identity of Cloud XXX to access resources\"", + "description": "The per-product per-project service identity for a service. Use this field to configure per-product per-project service identity. Example of a service identity configuration. usage: service_identity: - service_account_parent: \"projects/123456789\" display_name: \"Cloud XXX Service Agent\" description: \"Used as the identity of Cloud XXX to access resources\"", "id": "GoogleApiServiceIdentity", "properties": { "description": { - "description": "Optional. A user-specified opaque description of the service account.\nMust be less than or equal to 256 UTF-8 bytes.", + "description": "Optional. A user-specified opaque description of the service account. Must be less than or equal to 256 UTF-8 bytes.", "type": "string" }, "displayName": { - "description": "Optional. A user-specified name for the service account.\nMust be less than or equal to 100 UTF-8 bytes.", + "description": "Optional. A user-specified name for the service account. Must be less than or equal to 100 UTF-8 bytes.", "type": "string" }, "serviceAccountParent": { - "description": "A service account project that hosts the service accounts.\n\nAn example name would be:\n`projects/123456789`", + "description": "A service account project that hosts the service accounts. An example name would be: `projects/123456789`", "type": "string" } }, @@ -1759,7 +1857,7 @@ "id": "GoogleApiServiceusageV1OperationMetadata", "properties": { "resourceNames": { - "description": "The full name of the resources that this operation is directly\nassociated with.", + "description": "The full name of the resources that this operation is directly associated with.", "items": { "type": "string" }, @@ -1774,14 +1872,14 @@ "properties": { "config": { "$ref": "GoogleApiServiceusageV1ServiceConfig", - "description": "The service configuration of the available service.\nSome fields may be filtered out of the configuration in responses to\nthe `ListServices` method. These fields are present only in responses to\nthe `GetService` method." + "description": "The service configuration of the available service. Some fields may be filtered out of the configuration in responses to the `ListServices` method. These fields are present only in responses to the `GetService` method." }, "name": { - "description": "The resource name of the consumer and service.\n\nA valid name would be:\n- projects/123/services/serviceusage.googleapis.com", + "description": "The resource name of the consumer and service. A valid name would be: - projects/123/services/serviceusage.googleapis.com", "type": "string" }, "parent": { - "description": "The resource name of the consumer.\n\nA valid name would be:\n- projects/123", + "description": "The resource name of the consumer. A valid name would be: - projects/123", "type": "string" }, "state": { @@ -1792,8 +1890,8 @@ "ENABLED" ], "enumDescriptions": [ - "The default value, which indicates that the enabled state of the service\nis unspecified or not meaningful. Currently, all consumers other than\nprojects (such as folders and organizations) are always in this state.", - "The service cannot be used by this consumer. It has either been explicitly\ndisabled, or has never been enabled.", + "The default value, which indicates that the enabled state of the service is unspecified or not meaningful. Currently, all consumers other than projects (such as folders and organizations) are always in this state.", + "The service cannot be used by this consumer. It has either been explicitly disabled, or has never been enabled.", "The service has been explicitly enabled for use by this consumer." ], "type": "string" @@ -1806,7 +1904,7 @@ "id": "GoogleApiServiceusageV1ServiceConfig", "properties": { "apis": { - "description": "A list of API interfaces exported by this service. Contains only the names,\nversions, and method names of the interfaces.", + "description": "A list of API interfaces exported by this service. Contains only the names, versions, and method names of the interfaces.", "items": { "$ref": "Api" }, @@ -1818,17 +1916,28 @@ }, "documentation": { "$ref": "Documentation", - "description": "Additional API documentation. Contains only the summary and the\ndocumentation URL." + "description": "Additional API documentation. Contains only the summary and the documentation URL." }, "endpoints": { - "description": "Configuration for network endpoints. Contains only the names and aliases\nof the endpoints.", + "description": "Configuration for network endpoints. Contains only the names and aliases of the endpoints.", "items": { "$ref": "Endpoint" }, "type": "array" }, + "monitoredResources": { + "description": "Defines the monitored resources used by this service. This is required by the Service.monitoring and Service.logging configurations.", + "items": { + "$ref": "MonitoredResourceDescriptor" + }, + "type": "array" + }, + "monitoring": { + "$ref": "Monitoring", + "description": "Monitoring configuration. This should not include the 'producer_destinations' field." + }, "name": { - "description": "The DNS address at which this service is available.\n\nAn example DNS address would be:\n`calendar.googleapis.com`.", + "description": "The DNS address at which this service is available. An example DNS address would be: `calendar.googleapis.com`.", "type": "string" }, "quota": { @@ -1852,7 +1961,7 @@ "properties": { "identity": { "$ref": "GoogleApiServiceusageV1beta1ServiceIdentity", - "description": "Service identity that service producer can use to access consumer\nresources. If exists is true, it contains email and unique_id. If exists is\nfalse, it contains pre-constructed email and empty unique_id." + "description": "Service identity that service producer can use to access consumer resources. If exists is true, it contains email and unique_id. If exists is false, it contains pre-constructed email and empty unique_id." }, "state": { "description": "Service identity state.", @@ -1861,7 +1970,7 @@ "ACTIVE" ], "enumDescriptions": [ - "Default service identity state. This value is used if the state is\nomitted.", + "Default service identity state. This value is used if the state is omitted.", "Service identity has been created and can be used." ], "type": "string" @@ -1870,30 +1979,30 @@ "type": "object" }, "GoogleApiServiceusageV1beta1ServiceIdentity": { - "description": "Service identity for a service. This is the identity that service producer\nshould use to access consumer resources.", + "description": "Service identity for a service. This is the identity that service producer should use to access consumer resources.", "id": "GoogleApiServiceusageV1beta1ServiceIdentity", "properties": { "email": { - "description": "The email address of the service account that a service producer would use\nto access consumer resources.", + "description": "The email address of the service account that a service producer would use to access consumer resources.", "type": "string" }, "uniqueId": { - "description": "The unique and stable id of the service account.\nhttps://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts#ServiceAccount", + "description": "The unique and stable id of the service account. https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts#ServiceAccount", "type": "string" } }, "type": "object" }, "Http": { - "description": "Defines the HTTP configuration for an API service. It contains a list of\nHttpRule, each specifying the mapping of an RPC method\nto one or more HTTP REST API methods.", + "description": "Defines the HTTP configuration for an API service. It contains a list of HttpRule, each specifying the mapping of an RPC method to one or more HTTP REST API methods.", "id": "Http", "properties": { "fullyDecodeReservedExpansion": { - "description": "When set to true, URL path parameters will be fully URI-decoded except in\ncases of single segment matches in reserved expansion, where \"%2F\" will be\nleft encoded.\n\nThe default behavior is to not decode RFC 6570 reserved characters in multi\nsegment matches.", + "description": "When set to true, URL path parameters will be fully URI-decoded except in cases of single segment matches in reserved expansion, where \"%2F\" will be left encoded. The default behavior is to not decode RFC 6570 reserved characters in multi segment matches.", "type": "boolean" }, "rules": { - "description": "A list of HTTP configuration rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "A list of HTTP configuration rules that apply to individual API methods. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "HttpRule" }, @@ -1903,34 +2012,34 @@ "type": "object" }, "HttpRule": { - "description": "# gRPC Transcoding\n\ngRPC Transcoding is a feature for mapping between a gRPC method and one or\nmore HTTP REST endpoints. It allows developers to build a single API service\nthat supports both gRPC APIs and REST APIs. Many systems, including [Google\nAPIs](https://github.com/googleapis/googleapis),\n[Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC\nGateway](https://github.com/grpc-ecosystem/grpc-gateway),\nand [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature\nand use it for large scale production services.\n\n`HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies\nhow different portions of the gRPC request message are mapped to the URL\npath, URL query parameters, and HTTP request body. It also controls how the\ngRPC response message is mapped to the HTTP response body. `HttpRule` is\ntypically specified as an `google.api.http` annotation on the gRPC method.\n\nEach mapping specifies a URL path template and an HTTP method. The path\ntemplate may refer to one or more fields in the gRPC request message, as long\nas each field is a non-repeated field with a primitive (non-message) type.\nThe path template controls how fields of the request message are mapped to\nthe URL path.\n\nExample:\n\n service Messaging {\n rpc GetMessage(GetMessageRequest) returns (Message) {\n option (google.api.http) = {\n get: \"/v1/{name=messages/*}\"\n };\n }\n }\n message GetMessageRequest {\n string name = 1; // Mapped to URL path.\n }\n message Message {\n string text = 1; // The resource content.\n }\n\nThis enables an HTTP REST to gRPC mapping as below:\n\nHTTP | gRPC\n-----|-----\n`GET /v1/messages/123456` | `GetMessage(name: \"messages/123456\")`\n\nAny fields in the request message which are not bound by the path template\nautomatically become HTTP query parameters if there is no HTTP request body.\nFor example:\n\n service Messaging {\n rpc GetMessage(GetMessageRequest) returns (Message) {\n option (google.api.http) = {\n get:\"/v1/messages/{message_id}\"\n };\n }\n }\n message GetMessageRequest {\n message SubMessage {\n string subfield = 1;\n }\n string message_id = 1; // Mapped to URL path.\n int64 revision = 2; // Mapped to URL query parameter `revision`.\n SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`.\n }\n\nThis enables a HTTP JSON to RPC mapping as below:\n\nHTTP | gRPC\n-----|-----\n`GET /v1/messages/123456?revision=2\u0026sub.subfield=foo` |\n`GetMessage(message_id: \"123456\" revision: 2 sub: SubMessage(subfield:\n\"foo\"))`\n\nNote that fields which are mapped to URL query parameters must have a\nprimitive type or a repeated primitive type or a non-repeated message type.\nIn the case of a repeated type, the parameter can be repeated in the URL\nas `...?param=A\u0026param=B`. In the case of a message type, each field of the\nmessage is mapped to a separate parameter, such as\n`...?foo.a=A\u0026foo.b=B\u0026foo.c=C`.\n\nFor HTTP methods that allow a request body, the `body` field\nspecifies the mapping. Consider a REST update method on the\nmessage resource collection:\n\n service Messaging {\n rpc UpdateMessage(UpdateMessageRequest) returns (Message) {\n option (google.api.http) = {\n patch: \"/v1/messages/{message_id}\"\n body: \"message\"\n };\n }\n }\n message UpdateMessageRequest {\n string message_id = 1; // mapped to the URL\n Message message = 2; // mapped to the body\n }\n\nThe following HTTP JSON to RPC mapping is enabled, where the\nrepresentation of the JSON in the request body is determined by\nprotos JSON encoding:\n\nHTTP | gRPC\n-----|-----\n`PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` | `UpdateMessage(message_id:\n\"123456\" message { text: \"Hi!\" })`\n\nThe special name `*` can be used in the body mapping to define that\nevery field not bound by the path template should be mapped to the\nrequest body. This enables the following alternative definition of\nthe update method:\n\n service Messaging {\n rpc UpdateMessage(Message) returns (Message) {\n option (google.api.http) = {\n patch: \"/v1/messages/{message_id}\"\n body: \"*\"\n };\n }\n }\n message Message {\n string message_id = 1;\n string text = 2;\n }\n\n\nThe following HTTP JSON to RPC mapping is enabled:\n\nHTTP | gRPC\n-----|-----\n`PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` | `UpdateMessage(message_id:\n\"123456\" text: \"Hi!\")`\n\nNote that when using `*` in the body mapping, it is not possible to\nhave HTTP parameters, as all fields not bound by the path end in\nthe body. This makes this option more rarely used in practice when\ndefining REST APIs. The common usage of `*` is in custom methods\nwhich don't use the URL at all for transferring data.\n\nIt is possible to define multiple HTTP methods for one RPC by using\nthe `additional_bindings` option. Example:\n\n service Messaging {\n rpc GetMessage(GetMessageRequest) returns (Message) {\n option (google.api.http) = {\n get: \"/v1/messages/{message_id}\"\n additional_bindings {\n get: \"/v1/users/{user_id}/messages/{message_id}\"\n }\n };\n }\n }\n message GetMessageRequest {\n string message_id = 1;\n string user_id = 2;\n }\n\nThis enables the following two alternative HTTP JSON to RPC mappings:\n\nHTTP | gRPC\n-----|-----\n`GET /v1/messages/123456` | `GetMessage(message_id: \"123456\")`\n`GET /v1/users/me/messages/123456` | `GetMessage(user_id: \"me\" message_id:\n\"123456\")`\n\n## Rules for HTTP mapping\n\n1. Leaf request fields (recursive expansion nested messages in the request\n message) are classified into three categories:\n - Fields referred by the path template. They are passed via the URL path.\n - Fields referred by the HttpRule.body. They are passed via the HTTP\n request body.\n - All other fields are passed via the URL query parameters, and the\n parameter name is the field path in the request message. A repeated\n field can be represented as multiple query parameters under the same\n name.\n 2. If HttpRule.body is \"*\", there is no URL query parameter, all fields\n are passed via URL path and HTTP request body.\n 3. If HttpRule.body is omitted, there is no HTTP request body, all\n fields are passed via URL path and URL query parameters.\n\n### Path template syntax\n\n Template = \"/\" Segments [ Verb ] ;\n Segments = Segment { \"/\" Segment } ;\n Segment = \"*\" | \"**\" | LITERAL | Variable ;\n Variable = \"{\" FieldPath [ \"=\" Segments ] \"}\" ;\n FieldPath = IDENT { \".\" IDENT } ;\n Verb = \":\" LITERAL ;\n\nThe syntax `*` matches a single URL path segment. The syntax `**` matches\nzero or more URL path segments, which must be the last part of the URL path\nexcept the `Verb`.\n\nThe syntax `Variable` matches part of the URL path as specified by its\ntemplate. A variable template must not contain other variables. If a variable\nmatches a single path segment, its template may be omitted, e.g. `{var}`\nis equivalent to `{var=*}`.\n\nThe syntax `LITERAL` matches literal text in the URL path. If the `LITERAL`\ncontains any reserved character, such characters should be percent-encoded\nbefore the matching.\n\nIf a variable contains exactly one path segment, such as `\"{var}\"` or\n`\"{var=*}\"`, when such a variable is expanded into a URL path on the client\nside, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The\nserver side does the reverse decoding. Such variables show up in the\n[Discovery\nDocument](https://developers.google.com/discovery/v1/reference/apis) as\n`{var}`.\n\nIf a variable contains multiple path segments, such as `\"{var=foo/*}\"`\nor `\"{var=**}\"`, when such a variable is expanded into a URL path on the\nclient side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded.\nThe server side does the reverse decoding, except \"%2F\" and \"%2f\" are left\nunchanged. Such variables show up in the\n[Discovery\nDocument](https://developers.google.com/discovery/v1/reference/apis) as\n`{+var}`.\n\n## Using gRPC API Service Configuration\n\ngRPC API Service Configuration (service config) is a configuration language\nfor configuring a gRPC service to become a user-facing product. The\nservice config is simply the YAML representation of the `google.api.Service`\nproto message.\n\nAs an alternative to annotating your proto file, you can configure gRPC\ntranscoding in your service config YAML files. You do this by specifying a\n`HttpRule` that maps the gRPC method to a REST endpoint, achieving the same\neffect as the proto annotation. This can be particularly useful if you\nhave a proto that is reused in multiple services. Note that any transcoding\nspecified in the service config will override any matching transcoding\nconfiguration in the proto.\n\nExample:\n\n http:\n rules:\n # Selects a gRPC method and applies HttpRule to it.\n - selector: example.v1.Messaging.GetMessage\n get: /v1/messages/{message_id}/{sub.subfield}\n\n## Special notes\n\nWhen gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the\nproto to JSON conversion must follow the [proto3\nspecification](https://developers.google.com/protocol-buffers/docs/proto3#json).\n\nWhile the single segment variable follows the semantics of\n[RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String\nExpansion, the multi segment variable **does not** follow RFC 6570 Section\n3.2.3 Reserved Expansion. The reason is that the Reserved Expansion\ndoes not expand special characters like `?` and `#`, which would lead\nto invalid URLs. As the result, gRPC Transcoding uses a custom encoding\nfor multi segment variables.\n\nThe path variables **must not** refer to any repeated or mapped field,\nbecause client libraries are not capable of handling such variable expansion.\n\nThe path variables **must not** capture the leading \"/\" character. The reason\nis that the most common use case \"{var}\" does not capture the leading \"/\"\ncharacter. For consistency, all path variables must share the same behavior.\n\nRepeated message fields must not be mapped to URL query parameters, because\nno client library can support such complicated mapping.\n\nIf an API needs to use a JSON array for request or response body, it can map\nthe request or response body to a repeated field. However, some gRPC\nTranscoding implementations may not support this feature.", + "description": "# gRPC Transcoding gRPC Transcoding is a feature for mapping between a gRPC method and one or more HTTP REST endpoints. It allows developers to build a single API service that supports both gRPC APIs and REST APIs. Many systems, including [Google APIs](https://github.com/googleapis/googleapis), [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC Gateway](https://github.com/grpc-ecosystem/grpc-gateway), and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature and use it for large scale production services. `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies how different portions of the gRPC request message are mapped to the URL path, URL query parameters, and HTTP request body. It also controls how the gRPC response message is mapped to the HTTP response body. `HttpRule` is typically specified as an `google.api.http` annotation on the gRPC method. Each mapping specifies a URL path template and an HTTP method. The path template may refer to one or more fields in the gRPC request message, as long as each field is a non-repeated field with a primitive (non-message) type. The path template controls how fields of the request message are mapped to the URL path. Example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get: \"/v1/{name=messages/*}\" }; } } message GetMessageRequest { string name = 1; // Mapped to URL path. } message Message { string text = 1; // The resource content. } This enables an HTTP REST to gRPC mapping as below: HTTP | gRPC -----|----- `GET /v1/messages/123456` | `GetMessage(name: \"messages/123456\")` Any fields in the request message which are not bound by the path template automatically become HTTP query parameters if there is no HTTP request body. For example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get:\"/v1/messages/{message_id}\" }; } } message GetMessageRequest { message SubMessage { string subfield = 1; } string message_id = 1; // Mapped to URL path. int64 revision = 2; // Mapped to URL query parameter `revision`. SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. } This enables a HTTP JSON to RPC mapping as below: HTTP | gRPC -----|----- `GET /v1/messages/123456?revision=2\u0026sub.subfield=foo` | `GetMessage(message_id: \"123456\" revision: 2 sub: SubMessage(subfield: \"foo\"))` Note that fields which are mapped to URL query parameters must have a primitive type or a repeated primitive type or a non-repeated message type. In the case of a repeated type, the parameter can be repeated in the URL as `...?param=A\u0026param=B`. In the case of a message type, each field of the message is mapped to a separate parameter, such as `...?foo.a=A\u0026foo.b=B\u0026foo.c=C`. For HTTP methods that allow a request body, the `body` field specifies the mapping. Consider a REST update method on the message resource collection: service Messaging { rpc UpdateMessage(UpdateMessageRequest) returns (Message) { option (google.api.http) = { patch: \"/v1/messages/{message_id}\" body: \"message\" }; } } message UpdateMessageRequest { string message_id = 1; // mapped to the URL Message message = 2; // mapped to the body } The following HTTP JSON to RPC mapping is enabled, where the representation of the JSON in the request body is determined by protos JSON encoding: HTTP | gRPC -----|----- `PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` | `UpdateMessage(message_id: \"123456\" message { text: \"Hi!\" })` The special name `*` can be used in the body mapping to define that every field not bound by the path template should be mapped to the request body. This enables the following alternative definition of the update method: service Messaging { rpc UpdateMessage(Message) returns (Message) { option (google.api.http) = { patch: \"/v1/messages/{message_id}\" body: \"*\" }; } } message Message { string message_id = 1; string text = 2; } The following HTTP JSON to RPC mapping is enabled: HTTP | gRPC -----|----- `PATCH /v1/messages/123456 { \"text\": \"Hi!\" }` | `UpdateMessage(message_id: \"123456\" text: \"Hi!\")` Note that when using `*` in the body mapping, it is not possible to have HTTP parameters, as all fields not bound by the path end in the body. This makes this option more rarely used in practice when defining REST APIs. The common usage of `*` is in custom methods which don't use the URL at all for transferring data. It is possible to define multiple HTTP methods for one RPC by using the `additional_bindings` option. Example: service Messaging { rpc GetMessage(GetMessageRequest) returns (Message) { option (google.api.http) = { get: \"/v1/messages/{message_id}\" additional_bindings { get: \"/v1/users/{user_id}/messages/{message_id}\" } }; } } message GetMessageRequest { string message_id = 1; string user_id = 2; } This enables the following two alternative HTTP JSON to RPC mappings: HTTP | gRPC -----|----- `GET /v1/messages/123456` | `GetMessage(message_id: \"123456\")` `GET /v1/users/me/messages/123456` | `GetMessage(user_id: \"me\" message_id: \"123456\")` ## Rules for HTTP mapping 1. Leaf request fields (recursive expansion nested messages in the request message) are classified into three categories: - Fields referred by the path template. They are passed via the URL path. - Fields referred by the HttpRule.body. They are passed via the HTTP request body. - All other fields are passed via the URL query parameters, and the parameter name is the field path in the request message. A repeated field can be represented as multiple query parameters under the same name. 2. If HttpRule.body is \"*\", there is no URL query parameter, all fields are passed via URL path and HTTP request body. 3. If HttpRule.body is omitted, there is no HTTP request body, all fields are passed via URL path and URL query parameters. ### Path template syntax Template = \"/\" Segments [ Verb ] ; Segments = Segment { \"/\" Segment } ; Segment = \"*\" | \"**\" | LITERAL | Variable ; Variable = \"{\" FieldPath [ \"=\" Segments ] \"}\" ; FieldPath = IDENT { \".\" IDENT } ; Verb = \":\" LITERAL ; The syntax `*` matches a single URL path segment. The syntax `**` matches zero or more URL path segments, which must be the last part of the URL path except the `Verb`. The syntax `Variable` matches part of the URL path as specified by its template. A variable template must not contain other variables. If a variable matches a single path segment, its template may be omitted, e.g. `{var}` is equivalent to `{var=*}`. The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` contains any reserved character, such characters should be percent-encoded before the matching. If a variable contains exactly one path segment, such as `\"{var}\"` or `\"{var=*}\"`, when such a variable is expanded into a URL path on the client side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The server side does the reverse decoding. Such variables show up in the [Discovery Document](https://developers.google.com/discovery/v1/reference/apis) as `{var}`. If a variable contains multiple path segments, such as `\"{var=foo/*}\"` or `\"{var=**}\"`, when such a variable is expanded into a URL path on the client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. The server side does the reverse decoding, except \"%2F\" and \"%2f\" are left unchanged. Such variables show up in the [Discovery Document](https://developers.google.com/discovery/v1/reference/apis) as `{+var}`. ## Using gRPC API Service Configuration gRPC API Service Configuration (service config) is a configuration language for configuring a gRPC service to become a user-facing product. The service config is simply the YAML representation of the `google.api.Service` proto message. As an alternative to annotating your proto file, you can configure gRPC transcoding in your service config YAML files. You do this by specifying a `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same effect as the proto annotation. This can be particularly useful if you have a proto that is reused in multiple services. Note that any transcoding specified in the service config will override any matching transcoding configuration in the proto. Example: http: rules: # Selects a gRPC method and applies HttpRule to it. - selector: example.v1.Messaging.GetMessage get: /v1/messages/{message_id}/{sub.subfield} ## Special notes When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the proto to JSON conversion must follow the [proto3 specification](https://developers.google.com/protocol-buffers/docs/proto3#json). While the single segment variable follows the semantics of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String Expansion, the multi segment variable **does not** follow RFC 6570 Section 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion does not expand special characters like `?` and `#`, which would lead to invalid URLs. As the result, gRPC Transcoding uses a custom encoding for multi segment variables. The path variables **must not** refer to any repeated or mapped field, because client libraries are not capable of handling such variable expansion. The path variables **must not** capture the leading \"/\" character. The reason is that the most common use case \"{var}\" does not capture the leading \"/\" character. For consistency, all path variables must share the same behavior. Repeated message fields must not be mapped to URL query parameters, because no client library can support such complicated mapping. If an API needs to use a JSON array for request or response body, it can map the request or response body to a repeated field. However, some gRPC Transcoding implementations may not support this feature.", "id": "HttpRule", "properties": { "additionalBindings": { - "description": "Additional HTTP bindings for the selector. Nested bindings must\nnot contain an `additional_bindings` field themselves (that is,\nthe nesting may only be one level deep).", + "description": "Additional HTTP bindings for the selector. Nested bindings must not contain an `additional_bindings` field themselves (that is, the nesting may only be one level deep).", "items": { "$ref": "HttpRule" }, "type": "array" }, "allowHalfDuplex": { - "description": "When this flag is set to true, HTTP requests will be allowed to invoke a\nhalf-duplex streaming method.", + "description": "When this flag is set to true, HTTP requests will be allowed to invoke a half-duplex streaming method.", "type": "boolean" }, "body": { - "description": "The name of the request field whose value is mapped to the HTTP request\nbody, or `*` for mapping all request fields not captured by the path\npattern to the HTTP body, or omitted for not having any HTTP request body.\n\nNOTE: the referred field must be present at the top-level of the request\nmessage type.", + "description": "The name of the request field whose value is mapped to the HTTP request body, or `*` for mapping all request fields not captured by the path pattern to the HTTP body, or omitted for not having any HTTP request body. NOTE: the referred field must be present at the top-level of the request message type.", "type": "string" }, "custom": { "$ref": "CustomHttpPattern", - "description": "The custom pattern is used for specifying an HTTP method that is not\nincluded in the `pattern` field, such as HEAD, or \"*\" to leave the\nHTTP method unspecified for this rule. The wild-card rule is useful\nfor services that provide content to Web (HTML) clients." + "description": "The custom pattern is used for specifying an HTTP method that is not included in the `pattern` field, such as HEAD, or \"*\" to leave the HTTP method unspecified for this rule. The wild-card rule is useful for services that provide content to Web (HTML) clients." }, "delete": { "description": "Maps to HTTP DELETE. Used for deleting a resource.", "type": "string" }, "get": { - "description": "Maps to HTTP GET. Used for listing and getting information about\nresources.", + "description": "Maps to HTTP GET. Used for listing and getting information about resources.", "type": "string" }, "patch": { @@ -1946,11 +2055,11 @@ "type": "string" }, "responseBody": { - "description": "Optional. The name of the response field whose value is mapped to the HTTP\nresponse body. When omitted, the entire response message will be used\nas the HTTP response body.\n\nNOTE: The referred field must be present at the top-level of the response\nmessage type.", + "description": "Optional. The name of the response field whose value is mapped to the HTTP response body. When omitted, the entire response message will be used as the HTTP response body. NOTE: The referred field must be present at the top-level of the response message type.", "type": "string" }, "selector": { - "description": "Selects a method to which this rule applies.\n\nRefer to selector for syntax details.", + "description": "Selects a method to which this rule applies. Refer to selector for syntax details.", "type": "string" } }, @@ -1970,6 +2079,35 @@ }, "type": "object" }, + "ImportAdminQuotaPoliciesResponse": { + "description": "Response message for ImportAdminQuotaPolicies", + "id": "ImportAdminQuotaPoliciesResponse", + "properties": { + "policies": { + "description": "The policies that were created from the imported data.", + "items": { + "$ref": "AdminQuotaPolicy" + }, + "type": "array" + } + }, + "type": "object" + }, + "ImportConsumerOverridesRequest": { + "description": "Request message for ImportConsumerOverrides", + "id": "ImportConsumerOverridesRequest", + "properties": { + "force": { + "description": "Whether to force the creation of the quota overrides. If creating an override would cause the effective quota for the consumer to decrease by more than 10 percent, the call is rejected, as a safety measure to avoid accidentally decreasing quota too quickly. Setting the force parameter to true ignores this restriction.", + "type": "boolean" + }, + "inlineSource": { + "$ref": "OverrideInlineSource", + "description": "The import data is specified in the request message itself" + } + }, + "type": "object" + }, "ImportConsumerOverridesResponse": { "description": "Response message for ImportConsumerOverrides", "id": "ImportConsumerOverridesResponse", @@ -1997,7 +2135,7 @@ "type": "string" }, "valuePrefix": { - "description": "The value prefix. The value format is \"value_prefix{token}\"\nOnly applies to \"in\" header type. Must be empty for \"in\" query type.\nIf not empty, the header value has to match (case sensitive) this prefix.\nIf not matched, JWT will not be extracted. If matched, JWT will be\nextracted after the prefix is removed.\n\nFor example, for \"Authorization: Bearer {JWT}\",\nvalue_prefix=\"Bearer \" with a space at the end.", + "description": "The value prefix. The value format is \"value_prefix{token}\" Only applies to \"in\" header type. Must be empty for \"in\" query type. If not empty, the header value has to match (case sensitive) this prefix. If not matched, JWT will not be extracted. If matched, JWT will be extracted after the prefix is removed. For example, for \"Authorization: Bearer {JWT}\", value_prefix=\"Bearer \" with a space at the end.", "type": "string" } }, @@ -2037,7 +2175,7 @@ "id": "ListAdminOverridesResponse", "properties": { "nextPageToken": { - "description": "Token identifying which result to start with; returned by a previous list\ncall.", + "description": "Token identifying which result to start with; returned by a previous list call.", "type": "string" }, "overrides": { @@ -2055,7 +2193,7 @@ "id": "ListConsumerOverridesResponse", "properties": { "nextPageToken": { - "description": "Token identifying which result to start with; returned by a previous list\ncall.", + "description": "Token identifying which result to start with; returned by a previous list call.", "type": "string" }, "overrides": { @@ -2080,7 +2218,7 @@ "type": "array" }, "nextPageToken": { - "description": "Token identifying which result to start with; returned by a previous list\ncall.", + "description": "Token identifying which result to start with; returned by a previous list call.", "type": "string" } }, @@ -2109,7 +2247,7 @@ "id": "ListServicesResponse", "properties": { "nextPageToken": { - "description": "Token that can be passed to `ListServices` to resume a paginated\nquery.", + "description": "Token that can be passed to `ListServices` to resume a paginated query.", "type": "string" }, "services": { @@ -2123,44 +2261,44 @@ "type": "object" }, "LogDescriptor": { - "description": "A description of a log type. Example in YAML format:\n\n - name: library.googleapis.com/activity_history\n description: The history of borrowing and returning library items.\n display_name: Activity\n labels:\n - key: /customer_id\n description: Identifier of a library customer", + "description": "A description of a log type. Example in YAML format: - name: library.googleapis.com/activity_history description: The history of borrowing and returning library items. display_name: Activity labels: - key: /customer_id description: Identifier of a library customer", "id": "LogDescriptor", "properties": { "description": { - "description": "A human-readable description of this log. This information appears in\nthe documentation and can contain details.", + "description": "A human-readable description of this log. This information appears in the documentation and can contain details.", "type": "string" }, "displayName": { - "description": "The human-readable name for this log. This information appears on\nthe user interface and should be concise.", + "description": "The human-readable name for this log. This information appears on the user interface and should be concise.", "type": "string" }, "labels": { - "description": "The set of labels that are available to describe a specific log entry.\nRuntime requests that contain labels not specified here are\nconsidered invalid.", + "description": "The set of labels that are available to describe a specific log entry. Runtime requests that contain labels not specified here are considered invalid.", "items": { "$ref": "LabelDescriptor" }, "type": "array" }, "name": { - "description": "The name of the log. It must be less than 512 characters long and can\ninclude the following characters: upper- and lower-case alphanumeric\ncharacters [A-Za-z0-9], and punctuation characters including\nslash, underscore, hyphen, period [/_-.].", + "description": "The name of the log. It must be less than 512 characters long and can include the following characters: upper- and lower-case alphanumeric characters [A-Za-z0-9], and punctuation characters including slash, underscore, hyphen, period [/_-.].", "type": "string" } }, "type": "object" }, "Logging": { - "description": "Logging configuration of the service.\n\nThe following example shows how to configure logs to be sent to the\nproducer and consumer projects. In the example, the `activity_history`\nlog is sent to both the producer and consumer projects, whereas the\n`purchase_history` log is only sent to the producer project.\n\n monitored_resources:\n - type: library.googleapis.com/branch\n labels:\n - key: /city\n description: The city where the library branch is located in.\n - key: /name\n description: The name of the branch.\n logs:\n - name: activity_history\n labels:\n - key: /customer_id\n - name: purchase_history\n logging:\n producer_destinations:\n - monitored_resource: library.googleapis.com/branch\n logs:\n - activity_history\n - purchase_history\n consumer_destinations:\n - monitored_resource: library.googleapis.com/branch\n logs:\n - activity_history", + "description": "Logging configuration of the service. The following example shows how to configure logs to be sent to the producer and consumer projects. In the example, the `activity_history` log is sent to both the producer and consumer projects, whereas the `purchase_history` log is only sent to the producer project. monitored_resources: - type: library.googleapis.com/branch labels: - key: /city description: The city where the library branch is located in. - key: /name description: The name of the branch. logs: - name: activity_history labels: - key: /customer_id - name: purchase_history logging: producer_destinations: - monitored_resource: library.googleapis.com/branch logs: - activity_history - purchase_history consumer_destinations: - monitored_resource: library.googleapis.com/branch logs: - activity_history", "id": "Logging", "properties": { "consumerDestinations": { - "description": "Logging configurations for sending logs to the consumer project.\nThere can be multiple consumer destinations, each one must have a\ndifferent monitored resource type. A log can be used in at most\none consumer destination.", + "description": "Logging configurations for sending logs to the consumer project. There can be multiple consumer destinations, each one must have a different monitored resource type. A log can be used in at most one consumer destination.", "items": { "$ref": "LoggingDestination" }, "type": "array" }, "producerDestinations": { - "description": "Logging configurations for sending logs to the producer project.\nThere can be multiple producer destinations, each one must have a\ndifferent monitored resource type. A log can be used in at most\none producer destination.", + "description": "Logging configurations for sending logs to the producer project. There can be multiple producer destinations, each one must have a different monitored resource type. A log can be used in at most one producer destination.", "items": { "$ref": "LoggingDestination" }, @@ -2170,18 +2308,18 @@ "type": "object" }, "LoggingDestination": { - "description": "Configuration of a specific logging destination (the producer project\nor the consumer project).", + "description": "Configuration of a specific logging destination (the producer project or the consumer project).", "id": "LoggingDestination", "properties": { "logs": { - "description": "Names of the logs to be sent to this destination. Each name must\nbe defined in the Service.logs section. If the log name is\nnot a domain scoped name, it will be automatically prefixed with\nthe service name followed by \"/\".", + "description": "Names of the logs to be sent to this destination. Each name must be defined in the Service.logs section. If the log name is not a domain scoped name, it will be automatically prefixed with the service name followed by \"/\".", "items": { "type": "string" }, "type": "array" }, "monitoredResource": { - "description": "The monitored resource type. The type must be defined in the\nService.monitored_resources section.", + "description": "The monitored resource type. The type must be defined in the Service.monitored_resources section.", "type": "string" } }, @@ -2234,7 +2372,7 @@ "type": "object" }, "MetricDescriptor": { - "description": "Defines a metric type and its schema. Once a metric descriptor is created,\ndeleting or altering it stops data collection and makes the metric type's\nexisting data unusable.", + "description": "Defines a metric type and its schema. Once a metric descriptor is created, deleting or altering it stops data collection and makes the metric type's existing data unusable. ", "id": "MetricDescriptor", "properties": { "description": { @@ -2242,11 +2380,11 @@ "type": "string" }, "displayName": { - "description": "A concise name for the metric, which can be displayed in user interfaces.\nUse sentence case without an ending period, for example \"Request count\".\nThis field is optional but it is recommended to be set for any metrics\nassociated with user-visible concepts, such as Quota.", + "description": "A concise name for the metric, which can be displayed in user interfaces. Use sentence case without an ending period, for example \"Request count\". This field is optional but it is recommended to be set for any metrics associated with user-visible concepts, such as Quota.", "type": "string" }, "labels": { - "description": "The set of labels that can be used to describe a specific\ninstance of this metric type. For example, the\n`appengine.googleapis.com/http/server/response_latencies` metric\ntype has a label for the HTTP response code, `response_code`, so\nyou can look at latencies for successful responses or just\nfor responses that failed.", + "description": "The set of labels that can be used to describe a specific instance of this metric type. For example, the `appengine.googleapis.com/http/server/response_latencies` metric type has a label for the HTTP response code, `response_code`, so you can look at latencies for successful responses or just for responses that failed.", "items": { "$ref": "LabelDescriptor" }, @@ -2268,11 +2406,11 @@ "Do not use this default value.", "The feature is not yet implemented. Users can not use it.", "Prelaunch features are hidden from users and are only visible internally.", - "Early Access features are limited to a closed group of testers. To use\nthese features, you must sign up in advance and sign a Trusted Tester\nagreement (which includes confidentiality provisions). These features may\nbe unstable, changed in backward-incompatible ways, and are not\nguaranteed to be released.", - "Alpha is a limited availability test for releases before they are cleared\nfor widespread use. By Alpha, all significant design issues are resolved\nand we are in the process of verifying functionality. Alpha customers\nneed to apply for access, agree to applicable terms, and have their\nprojects whitelisted. Alpha releases don’t have to be feature complete,\nno SLAs are provided, and there are no technical support obligations, but\nthey will be far enough along that customers can actually use them in\ntest environments or for limited-use tests -- just like they would in\nnormal production cases.", - "Beta is the point at which we are ready to open a release for any\ncustomer to use. There are no SLA or technical support obligations in a\nBeta release. Products will be complete from a feature perspective, but\nmay have some open outstanding issues. Beta releases are suitable for\nlimited production use cases.", - "GA features are open to all developers and are considered stable and\nfully qualified for production use.", - "Deprecated features are scheduled to be shut down and removed. For more\ninformation, see the “Deprecation Policy” section of our [Terms of\nService](https://cloud.google.com/terms/)\nand the [Google Cloud Platform Subject to the Deprecation\nPolicy](https://cloud.google.com/terms/deprecation) documentation." + "Early Access features are limited to a closed group of testers. To use these features, you must sign up in advance and sign a Trusted Tester agreement (which includes confidentiality provisions). These features may be unstable, changed in backward-incompatible ways, and are not guaranteed to be released.", + "Alpha is a limited availability test for releases before they are cleared for widespread use. By Alpha, all significant design issues are resolved and we are in the process of verifying functionality. Alpha customers need to apply for access, agree to applicable terms, and have their projects whitelisted. Alpha releases don’t have to be feature complete, no SLAs are provided, and there are no technical support obligations, but they will be far enough along that customers can actually use them in test environments or for limited-use tests -- just like they would in normal production cases.", + "Beta is the point at which we are ready to open a release for any customer to use. There are no SLA or technical support obligations in a Beta release. Products will be complete from a feature perspective, but may have some open outstanding issues. Beta releases are suitable for limited production use cases.", + "GA features are open to all developers and are considered stable and fully qualified for production use.", + "Deprecated features are scheduled to be shut down and removed. For more information, see the “Deprecation Policy” section of our [Terms of Service](https://cloud.google.com/terms/) and the [Google Cloud Platform Subject to the Deprecation Policy](https://cloud.google.com/terms/deprecation) documentation." ], "type": "string" }, @@ -2281,7 +2419,7 @@ "description": "Optional. Metadata which can be used to guide usage of the metric." }, "metricKind": { - "description": "Whether the metric records instantaneous values, changes to a value, etc.\nSome combinations of `metric_kind` and `value_type` might not be supported.", + "description": "Whether the metric records instantaneous values, changes to a value, etc. Some combinations of `metric_kind` and `value_type` might not be supported.", "enum": [ "METRIC_KIND_UNSPECIFIED", "GAUGE", @@ -2292,12 +2430,12 @@ "Do not use this default value.", "An instantaneous measurement of a value.", "The change in a value during a time interval.", - "A value accumulated over a time interval. Cumulative\nmeasurements in a time series should have the same start time\nand increasing end times, until an event resets the cumulative\nvalue to zero and sets a new start time for the following\npoints." + "A value accumulated over a time interval. Cumulative measurements in a time series should have the same start time and increasing end times, until an event resets the cumulative value to zero and sets a new start time for the following points." ], "type": "string" }, "monitoredResourceTypes": { - "description": "Read-only. If present, then a time\nseries, which is identified partially by\na metric type and a MonitoredResourceDescriptor, that is associated\nwith this metric type can only be associated with one of the monitored\nresource types listed here.", + "description": "Read-only. If present, then a time series, which is identified partially by a metric type and a MonitoredResourceDescriptor, that is associated with this metric type can only be associated with one of the monitored resource types listed here.", "items": { "type": "string" }, @@ -2308,15 +2446,15 @@ "type": "string" }, "type": { - "description": "The metric type, including its DNS name prefix. The type is not\nURL-encoded. All user-defined metric types have the DNS name\n`custom.googleapis.com` or `external.googleapis.com`. Metric types should\nuse a natural hierarchical grouping. For example:\n\n \"custom.googleapis.com/invoice/paid/amount\"\n \"external.googleapis.com/prometheus/up\"\n \"appengine.googleapis.com/http/server/response_latencies\"", + "description": "The metric type, including its DNS name prefix. The type is not URL-encoded. All user-defined metric types have the DNS name `custom.googleapis.com` or `external.googleapis.com`. Metric types should use a natural hierarchical grouping. For example: \"custom.googleapis.com/invoice/paid/amount\" \"external.googleapis.com/prometheus/up\" \"appengine.googleapis.com/http/server/response_latencies\"", "type": "string" }, "unit": { - "description": "The units in which the metric value is reported. It is only applicable\nif the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit`\ndefines the representation of the stored metric values.\n\nDifferent systems may scale the values to be more easily displayed (so a\nvalue of `0.02KBy` _might_ be displayed as `20By`, and a value of\n`3523KBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is\n`KBy`, then the value of the metric is always in thousands of bytes, no\nmatter how it may be displayed..\n\nIf you want a custom metric to record the exact number of CPU-seconds used\nby a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is\n`s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005\nCPU-seconds, then the value is written as `12005`.\n\nAlternatively, if you want a custom metric to record data in a more\ngranular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is\n`ks{CPU}`, and then write the value `12.005` (which is `12005/1000`),\nor use `Kis{CPU}` and write `11.723` (which is `12005/1024`).\n\nThe supported units are a subset of [The Unified Code for Units of\nMeasure](http://unitsofmeasure.org/ucum.html) standard:\n\n**Basic units (UNIT)**\n\n* `bit` bit\n* `By` byte\n* `s` second\n* `min` minute\n* `h` hour\n* `d` day\n\n**Prefixes (PREFIX)**\n\n* `k` kilo (10^3)\n* `M` mega (10^6)\n* `G` giga (10^9)\n* `T` tera (10^12)\n* `P` peta (10^15)\n* `E` exa (10^18)\n* `Z` zetta (10^21)\n* `Y` yotta (10^24)\n\n* `m` milli (10^-3)\n* `u` micro (10^-6)\n* `n` nano (10^-9)\n* `p` pico (10^-12)\n* `f` femto (10^-15)\n* `a` atto (10^-18)\n* `z` zepto (10^-21)\n* `y` yocto (10^-24)\n\n* `Ki` kibi (2^10)\n* `Mi` mebi (2^20)\n* `Gi` gibi (2^30)\n* `Ti` tebi (2^40)\n* `Pi` pebi (2^50)\n\n**Grammar**\n\nThe grammar also includes these connectors:\n\n* `/` division or ratio (as an infix operator). For examples,\n `kBy/{email}` or `MiBy/10ms` (although you should almost never\n have `/s` in a metric `unit`; rates should always be computed at\n query time from the underlying cumulative or delta value).\n* `.` multiplication or composition (as an infix operator). For\n examples, `GBy.d` or `k{watt}.h`.\n\nThe grammar for a unit is as follows:\n\n Expression = Component { \".\" Component } { \"/\" Component } ;\n\n Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ]\n | Annotation\n | \"1\"\n ;\n\n Annotation = \"{\" NAME \"}\" ;\n\nNotes:\n\n* `Annotation` is just a comment if it follows a `UNIT`. If the annotation\n is used alone, then the unit is equivalent to `1`. For examples,\n `{request}/s == 1/s`, `By{transmitted}/s == By/s`.\n* `NAME` is a sequence of non-blank printable ASCII characters not\n containing `{` or `}`.\n* `1` represents a unitary [dimensionless\n unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such\n as in `1/s`. It is typically used when none of the basic units are\n appropriate. For example, \"new users per day\" can be represented as\n `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5 new\n users). Alternatively, \"thousands of page views per day\" would be\n represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric\n value of `5.3` would mean \"5300 page views per day\").\n* `%` represents dimensionless value of 1/100, and annotates values giving\n a percentage (so the metric values are typically in the range of 0..100,\n and a metric value `3` means \"3 percent\").\n* `10^2.%` indicates a metric contains a ratio, typically in the range\n 0..1, that will be multiplied by 100 and displayed as a percentage\n (so a metric value `0.03` means \"3 percent\").", + "description": "The units in which the metric value is reported. It is only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` defines the representation of the stored metric values. Different systems may scale the values to be more easily displayed (so a value of `0.02KBy` _might_ be displayed as `20By`, and a value of `3523KBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is `KBy`, then the value of the metric is always in thousands of bytes, no matter how it may be displayed.. If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the value is written as `12005`. Alternatively, if you want a custom metric to record data in a more granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). The supported units are a subset of [The Unified Code for Units of Measure](http://unitsofmeasure.org/ucum.html) standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost never have `/s` in a metric `unit`; rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: Expression = Component { \".\" Component } { \"/\" Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ] | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used alone, then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable ASCII characters not containing `{` or `}`. * `1` represents a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in `1/s`. It is typically used when none of the basic units are appropriate. For example, \"new users per day\" can be represented as `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5 new users). Alternatively, \"thousands of page views per day\" would be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3` would mean \"5300 page views per day\"). * `%` represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value `3` means \"3 percent\"). * `10^2.%` indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value `0.03` means \"3 percent\").", "type": "string" }, "valueType": { - "description": "Whether the measurement is an integer, a floating-point number, etc.\nSome combinations of `metric_kind` and `value_type` might not be supported.", + "description": "Whether the measurement is an integer, a floating-point number, etc. Some combinations of `metric_kind` and `value_type` might not be supported.", "enum": [ "VALUE_TYPE_UNSPECIFIED", "BOOL", @@ -2328,10 +2466,10 @@ ], "enumDescriptions": [ "Do not use this default value.", - "The value is a boolean.\nThis value type can be used only if the metric kind is `GAUGE`.", + "The value is a boolean. This value type can be used only if the metric kind is `GAUGE`.", "The value is a signed 64-bit integer.", "The value is a double precision floating point number.", - "The value is a text string.\nThis value type can be used only if the metric kind is `GAUGE`.", + "The value is a text string. This value type can be used only if the metric kind is `GAUGE`.", "The value is a `Distribution`.", "The value is money." ], @@ -2345,7 +2483,7 @@ "id": "MetricDescriptorMetadata", "properties": { "ingestDelay": { - "description": "The delay of data points caused by ingestion. Data points older than this\nage are guaranteed to be ingested and available to be read, excluding\ndata loss due to errors.", + "description": "The delay of data points caused by ingestion. Data points older than this age are guaranteed to be ingested and available to be read, excluding data loss due to errors.", "format": "google-duration", "type": "string" }, @@ -2365,16 +2503,16 @@ "Do not use this default value.", "The feature is not yet implemented. Users can not use it.", "Prelaunch features are hidden from users and are only visible internally.", - "Early Access features are limited to a closed group of testers. To use\nthese features, you must sign up in advance and sign a Trusted Tester\nagreement (which includes confidentiality provisions). These features may\nbe unstable, changed in backward-incompatible ways, and are not\nguaranteed to be released.", - "Alpha is a limited availability test for releases before they are cleared\nfor widespread use. By Alpha, all significant design issues are resolved\nand we are in the process of verifying functionality. Alpha customers\nneed to apply for access, agree to applicable terms, and have their\nprojects whitelisted. Alpha releases don’t have to be feature complete,\nno SLAs are provided, and there are no technical support obligations, but\nthey will be far enough along that customers can actually use them in\ntest environments or for limited-use tests -- just like they would in\nnormal production cases.", - "Beta is the point at which we are ready to open a release for any\ncustomer to use. There are no SLA or technical support obligations in a\nBeta release. Products will be complete from a feature perspective, but\nmay have some open outstanding issues. Beta releases are suitable for\nlimited production use cases.", - "GA features are open to all developers and are considered stable and\nfully qualified for production use.", - "Deprecated features are scheduled to be shut down and removed. For more\ninformation, see the “Deprecation Policy” section of our [Terms of\nService](https://cloud.google.com/terms/)\nand the [Google Cloud Platform Subject to the Deprecation\nPolicy](https://cloud.google.com/terms/deprecation) documentation." + "Early Access features are limited to a closed group of testers. To use these features, you must sign up in advance and sign a Trusted Tester agreement (which includes confidentiality provisions). These features may be unstable, changed in backward-incompatible ways, and are not guaranteed to be released.", + "Alpha is a limited availability test for releases before they are cleared for widespread use. By Alpha, all significant design issues are resolved and we are in the process of verifying functionality. Alpha customers need to apply for access, agree to applicable terms, and have their projects whitelisted. Alpha releases don’t have to be feature complete, no SLAs are provided, and there are no technical support obligations, but they will be far enough along that customers can actually use them in test environments or for limited-use tests -- just like they would in normal production cases.", + "Beta is the point at which we are ready to open a release for any customer to use. There are no SLA or technical support obligations in a Beta release. Products will be complete from a feature perspective, but may have some open outstanding issues. Beta releases are suitable for limited production use cases.", + "GA features are open to all developers and are considered stable and fully qualified for production use.", + "Deprecated features are scheduled to be shut down and removed. For more information, see the “Deprecation Policy” section of our [Terms of Service](https://cloud.google.com/terms/) and the [Google Cloud Platform Subject to the Deprecation Policy](https://cloud.google.com/terms/deprecation) documentation." ], "type": "string" }, "samplePeriod": { - "description": "The sampling period of metric data points. For metrics which are written\nperiodically, consecutive data points are stored at this time interval,\nexcluding data loss due to errors. Metrics with a higher granularity have\na smaller sampling period.", + "description": "The sampling period of metric data points. For metrics which are written periodically, consecutive data points are stored at this time interval, excluding data loss due to errors. Metrics with a higher granularity have a smaller sampling period.", "format": "google-duration", "type": "string" } @@ -2382,7 +2520,7 @@ "type": "object" }, "MetricRule": { - "description": "Bind API methods to metrics. Binding a method to a metric causes that\nmetric's configured quota behaviors to apply to the method call.", + "description": "Bind API methods to metrics. Binding a method to a metric causes that metric's configured quota behaviors to apply to the method call.", "id": "MetricRule", "properties": { "metricCosts": { @@ -2390,18 +2528,18 @@ "format": "int64", "type": "string" }, - "description": "Metrics to update when the selected methods are called, and the associated\ncost applied to each metric.\n\nThe key of the map is the metric name, and the values are the amount\nincreased for the metric against which the quota limits are defined.\nThe value must not be negative.", + "description": "Metrics to update when the selected methods are called, and the associated cost applied to each metric. The key of the map is the metric name, and the values are the amount increased for the metric against which the quota limits are defined. The value must not be negative.", "type": "object" }, "selector": { - "description": "Selects the methods to which this rule applies.\n\nRefer to selector for syntax details.", + "description": "Selects the methods to which this rule applies. Refer to selector for syntax details.", "type": "string" } }, "type": "object" }, "Mixin": { - "description": "Declares an API Interface to be included in this interface. The including\ninterface must redeclare all the methods from the included interface, but\ndocumentation and options are inherited as follows:\n\n- If after comment and whitespace stripping, the documentation\n string of the redeclared method is empty, it will be inherited\n from the original method.\n\n- Each annotation belonging to the service config (http,\n visibility) which is not set in the redeclared method will be\n inherited.\n\n- If an http annotation is inherited, the path pattern will be\n modified as follows. Any version prefix will be replaced by the\n version of the including interface plus the root path if\n specified.\n\nExample of a simple mixin:\n\n package google.acl.v1;\n service AccessControl {\n // Get the underlying ACL object.\n rpc GetAcl(GetAclRequest) returns (Acl) {\n option (google.api.http).get = \"/v1/{resource=**}:getAcl\";\n }\n }\n\n package google.storage.v2;\n service Storage {\n // rpc GetAcl(GetAclRequest) returns (Acl);\n\n // Get a data record.\n rpc GetData(GetDataRequest) returns (Data) {\n option (google.api.http).get = \"/v2/{resource=**}\";\n }\n }\n\nExample of a mixin configuration:\n\n apis:\n - name: google.storage.v2.Storage\n mixins:\n - name: google.acl.v1.AccessControl\n\nThe mixin construct implies that all methods in `AccessControl` are\nalso declared with same name and request/response types in\n`Storage`. A documentation generator or annotation processor will\nsee the effective `Storage.GetAcl` method after inherting\ndocumentation and annotations as follows:\n\n service Storage {\n // Get the underlying ACL object.\n rpc GetAcl(GetAclRequest) returns (Acl) {\n option (google.api.http).get = \"/v2/{resource=**}:getAcl\";\n }\n ...\n }\n\nNote how the version in the path pattern changed from `v1` to `v2`.\n\nIf the `root` field in the mixin is specified, it should be a\nrelative path under which inherited HTTP paths are placed. Example:\n\n apis:\n - name: google.storage.v2.Storage\n mixins:\n - name: google.acl.v1.AccessControl\n root: acls\n\nThis implies the following inherited HTTP annotation:\n\n service Storage {\n // Get the underlying ACL object.\n rpc GetAcl(GetAclRequest) returns (Acl) {\n option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\";\n }\n ...\n }", + "description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inheriting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", "id": "Mixin", "properties": { "name": { @@ -2409,26 +2547,26 @@ "type": "string" }, "root": { - "description": "If non-empty specifies a path under which inherited HTTP paths\nare rooted.", + "description": "If non-empty specifies a path under which inherited HTTP paths are rooted.", "type": "string" } }, "type": "object" }, "MonitoredResourceDescriptor": { - "description": "An object that describes the schema of a MonitoredResource object using a\ntype name and a set of labels. For example, the monitored resource\ndescriptor for Google Compute Engine VM instances has a type of\n`\"gce_instance\"` and specifies the use of the labels `\"instance_id\"` and\n`\"zone\"` to identify particular VM instances.\n\nDifferent APIs can support different monitored resource types. APIs generally\nprovide a `list` method that returns the monitored resource descriptors used\nby the API.", + "description": "An object that describes the schema of a MonitoredResource object using a type name and a set of labels. For example, the monitored resource descriptor for Google Compute Engine VM instances has a type of `\"gce_instance\"` and specifies the use of the labels `\"instance_id\"` and `\"zone\"` to identify particular VM instances. Different APIs can support different monitored resource types. APIs generally provide a `list` method that returns the monitored resource descriptors used by the API. ", "id": "MonitoredResourceDescriptor", "properties": { "description": { - "description": "Optional. A detailed description of the monitored resource type that might\nbe used in documentation.", + "description": "Optional. A detailed description of the monitored resource type that might be used in documentation.", "type": "string" }, "displayName": { - "description": "Optional. A concise name for the monitored resource type that might be\ndisplayed in user interfaces. It should be a Title Cased Noun Phrase,\nwithout any article or other determiners. For example,\n`\"Google Cloud SQL Database\"`.", + "description": "Optional. A concise name for the monitored resource type that might be displayed in user interfaces. It should be a Title Cased Noun Phrase, without any article or other determiners. For example, `\"Google Cloud SQL Database\"`.", "type": "string" }, "labels": { - "description": "Required. A set of labels used to describe instances of this monitored\nresource type. For example, an individual Google Cloud SQL database is\nidentified by values for the labels `\"database_id\"` and `\"zone\"`.", + "description": "Required. A set of labels used to describe instances of this monitored resource type. For example, an individual Google Cloud SQL database is identified by values for the labels `\"database_id\"` and `\"zone\"`.", "items": { "$ref": "LabelDescriptor" }, @@ -2450,38 +2588,38 @@ "Do not use this default value.", "The feature is not yet implemented. Users can not use it.", "Prelaunch features are hidden from users and are only visible internally.", - "Early Access features are limited to a closed group of testers. To use\nthese features, you must sign up in advance and sign a Trusted Tester\nagreement (which includes confidentiality provisions). These features may\nbe unstable, changed in backward-incompatible ways, and are not\nguaranteed to be released.", - "Alpha is a limited availability test for releases before they are cleared\nfor widespread use. By Alpha, all significant design issues are resolved\nand we are in the process of verifying functionality. Alpha customers\nneed to apply for access, agree to applicable terms, and have their\nprojects whitelisted. Alpha releases don’t have to be feature complete,\nno SLAs are provided, and there are no technical support obligations, but\nthey will be far enough along that customers can actually use them in\ntest environments or for limited-use tests -- just like they would in\nnormal production cases.", - "Beta is the point at which we are ready to open a release for any\ncustomer to use. There are no SLA or technical support obligations in a\nBeta release. Products will be complete from a feature perspective, but\nmay have some open outstanding issues. Beta releases are suitable for\nlimited production use cases.", - "GA features are open to all developers and are considered stable and\nfully qualified for production use.", - "Deprecated features are scheduled to be shut down and removed. For more\ninformation, see the “Deprecation Policy” section of our [Terms of\nService](https://cloud.google.com/terms/)\nand the [Google Cloud Platform Subject to the Deprecation\nPolicy](https://cloud.google.com/terms/deprecation) documentation." + "Early Access features are limited to a closed group of testers. To use these features, you must sign up in advance and sign a Trusted Tester agreement (which includes confidentiality provisions). These features may be unstable, changed in backward-incompatible ways, and are not guaranteed to be released.", + "Alpha is a limited availability test for releases before they are cleared for widespread use. By Alpha, all significant design issues are resolved and we are in the process of verifying functionality. Alpha customers need to apply for access, agree to applicable terms, and have their projects whitelisted. Alpha releases don’t have to be feature complete, no SLAs are provided, and there are no technical support obligations, but they will be far enough along that customers can actually use them in test environments or for limited-use tests -- just like they would in normal production cases.", + "Beta is the point at which we are ready to open a release for any customer to use. There are no SLA or technical support obligations in a Beta release. Products will be complete from a feature perspective, but may have some open outstanding issues. Beta releases are suitable for limited production use cases.", + "GA features are open to all developers and are considered stable and fully qualified for production use.", + "Deprecated features are scheduled to be shut down and removed. For more information, see the “Deprecation Policy” section of our [Terms of Service](https://cloud.google.com/terms/) and the [Google Cloud Platform Subject to the Deprecation Policy](https://cloud.google.com/terms/deprecation) documentation." ], "type": "string" }, "name": { - "description": "Optional. The resource name of the monitored resource descriptor:\n`\"projects/{project_id}/monitoredResourceDescriptors/{type}\"` where\n{type} is the value of the `type` field in this object and\n{project_id} is a project ID that provides API-specific context for\naccessing the type. APIs that do not use project information can use the\nresource name format `\"monitoredResourceDescriptors/{type}\"`.", + "description": "Optional. The resource name of the monitored resource descriptor: `\"projects/{project_id}/monitoredResourceDescriptors/{type}\"` where {type} is the value of the `type` field in this object and {project_id} is a project ID that provides API-specific context for accessing the type. APIs that do not use project information can use the resource name format `\"monitoredResourceDescriptors/{type}\"`.", "type": "string" }, "type": { - "description": "Required. The monitored resource type. For example, the type\n`\"cloudsql_database\"` represents databases in Google Cloud SQL.\nThe maximum length of this value is 256 characters.", + "description": "Required. The monitored resource type. For example, the type `\"cloudsql_database\"` represents databases in Google Cloud SQL.", "type": "string" } }, "type": "object" }, "Monitoring": { - "description": "Monitoring configuration of the service.\n\nThe example below shows how to configure monitored resources and metrics\nfor monitoring. In the example, a monitored resource and two metrics are\ndefined. The `library.googleapis.com/book/returned_count` metric is sent\nto both producer and consumer projects, whereas the\n`library.googleapis.com/book/overdue_count` metric is only sent to the\nconsumer project.\n\n monitored_resources:\n - type: library.googleapis.com/branch\n labels:\n - key: /city\n description: The city where the library branch is located in.\n - key: /name\n description: The name of the branch.\n metrics:\n - name: library.googleapis.com/book/returned_count\n metric_kind: DELTA\n value_type: INT64\n labels:\n - key: /customer_id\n - name: library.googleapis.com/book/overdue_count\n metric_kind: GAUGE\n value_type: INT64\n labels:\n - key: /customer_id\n monitoring:\n producer_destinations:\n - monitored_resource: library.googleapis.com/branch\n metrics:\n - library.googleapis.com/book/returned_count\n consumer_destinations:\n - monitored_resource: library.googleapis.com/branch\n metrics:\n - library.googleapis.com/book/returned_count\n - library.googleapis.com/book/overdue_count", + "description": "Monitoring configuration of the service. The example below shows how to configure monitored resources and metrics for monitoring. In the example, a monitored resource and two metrics are defined. The `library.googleapis.com/book/returned_count` metric is sent to both producer and consumer projects, whereas the `library.googleapis.com/book/num_overdue` metric is only sent to the consumer project. monitored_resources: - type: library.googleapis.com/Branch display_name: \"Library Branch\" description: \"A branch of a library.\" launch_stage: GA labels: - key: resource_container description: \"The Cloud container (ie. project id) for the Branch.\" - key: location description: \"The location of the library branch.\" - key: branch_id description: \"The id of the branch.\" metrics: - name: library.googleapis.com/book/returned_count display_name: \"Books Returned\" description: \"The count of books that have been returned.\" launch_stage: GA metric_kind: DELTA value_type: INT64 unit: \"1\" labels: - key: customer_id description: \"The id of the customer.\" - name: library.googleapis.com/book/num_overdue display_name: \"Books Overdue\" description: \"The current number of overdue books.\" launch_stage: GA metric_kind: GAUGE value_type: INT64 unit: \"1\" labels: - key: customer_id description: \"The id of the customer.\" monitoring: producer_destinations: - monitored_resource: library.googleapis.com/Branch metrics: - library.googleapis.com/book/returned_count consumer_destinations: - monitored_resource: library.googleapis.com/Branch metrics: - library.googleapis.com/book/returned_count - library.googleapis.com/book/num_overdue", "id": "Monitoring", "properties": { "consumerDestinations": { - "description": "Monitoring configurations for sending metrics to the consumer project.\nThere can be multiple consumer destinations. A monitored resouce type may\nappear in multiple monitoring destinations if different aggregations are\nneeded for different sets of metrics associated with that monitored\nresource type. A monitored resource and metric pair may only be used once\nin the Monitoring configuration.", + "description": "Monitoring configurations for sending metrics to the consumer project. There can be multiple consumer destinations. A monitored resource type may appear in multiple monitoring destinations if different aggregations are needed for different sets of metrics associated with that monitored resource type. A monitored resource and metric pair may only be used once in the Monitoring configuration.", "items": { "$ref": "MonitoringDestination" }, "type": "array" }, "producerDestinations": { - "description": "Monitoring configurations for sending metrics to the producer project.\nThere can be multiple producer destinations. A monitored resouce type may\nappear in multiple monitoring destinations if different aggregations are\nneeded for different sets of metrics associated with that monitored\nresource type. A monitored resource and metric pair may only be used once\nin the Monitoring configuration.", + "description": "Monitoring configurations for sending metrics to the producer project. There can be multiple producer destinations. A monitored resource type may appear in multiple monitoring destinations if different aggregations are needed for different sets of metrics associated with that monitored resource type. A monitored resource and metric pair may only be used once in the Monitoring configuration.", "items": { "$ref": "MonitoringDestination" }, @@ -2491,40 +2629,40 @@ "type": "object" }, "MonitoringDestination": { - "description": "Configuration of a specific monitoring destination (the producer project\nor the consumer project).", + "description": "Configuration of a specific monitoring destination (the producer project or the consumer project).", "id": "MonitoringDestination", "properties": { "metrics": { - "description": "Types of the metrics to report to this monitoring destination.\nEach type must be defined in Service.metrics section.", + "description": "Types of the metrics to report to this monitoring destination. Each type must be defined in Service.metrics section.", "items": { "type": "string" }, "type": "array" }, "monitoredResource": { - "description": "The monitored resource type. The type must be defined in\nService.monitored_resources section.", + "description": "The monitored resource type. The type must be defined in Service.monitored_resources section.", "type": "string" } }, "type": "object" }, "OAuthRequirements": { - "description": "OAuth scopes are a way to define data and permissions on data. For example,\nthere are scopes defined for \"Read-only access to Google Calendar\" and\n\"Access to Cloud Platform\". Users can consent to a scope for an application,\ngiving it permission to access that data on their behalf.\n\nOAuth scope specifications should be fairly coarse grained; a user will need\nto see and understand the text description of what your scope means.\n\nIn most cases: use one or at most two OAuth scopes for an entire family of\nproducts. If your product has multiple APIs, you should probably be sharing\nthe OAuth scope across all of those APIs.\n\nWhen you need finer grained OAuth consent screens: talk with your product\nmanagement about how developers will use them in practice.\n\nPlease note that even though each of the canonical scopes is enough for a\nrequest to be accepted and passed to the backend, a request can still fail\ndue to the backend requiring additional scopes or permissions.", + "description": "OAuth scopes are a way to define data and permissions on data. For example, there are scopes defined for \"Read-only access to Google Calendar\" and \"Access to Cloud Platform\". Users can consent to a scope for an application, giving it permission to access that data on their behalf. OAuth scope specifications should be fairly coarse grained; a user will need to see and understand the text description of what your scope means. In most cases: use one or at most two OAuth scopes for an entire family of products. If your product has multiple APIs, you should probably be sharing the OAuth scope across all of those APIs. When you need finer grained OAuth consent screens: talk with your product management about how developers will use them in practice. Please note that even though each of the canonical scopes is enough for a request to be accepted and passed to the backend, a request can still fail due to the backend requiring additional scopes or permissions.", "id": "OAuthRequirements", "properties": { "canonicalScopes": { - "description": "The list of publicly documented OAuth scopes that are allowed access. An\nOAuth token containing any of these scopes will be accepted.\n\nExample:\n\n canonical_scopes: https://www.googleapis.com/auth/calendar,\n https://www.googleapis.com/auth/calendar.read", + "description": "The list of publicly documented OAuth scopes that are allowed access. An OAuth token containing any of these scopes will be accepted. Example: canonical_scopes: https://www.googleapis.com/auth/calendar, https://www.googleapis.com/auth/calendar.read", "type": "string" } }, "type": "object" }, "Operation": { - "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "description": "This resource represents a long-running operation that is the result of a network API call.", "id": "Operation", "properties": { "done": { - "description": "If the value is `false`, it means the operation is still in progress.\nIf `true`, the operation is completed, and either `error` or `response` is\navailable.", + "description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.", "type": "boolean" }, "error": { @@ -2536,11 +2674,11 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", "type": "object" }, "name": { - "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should be a resource name ending with `operations/{unique_id}`.", + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.", "type": "string" }, "response": { @@ -2548,7 +2686,7 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", + "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", "type": "object" } }, @@ -2559,7 +2697,7 @@ "id": "OperationMetadata", "properties": { "resourceNames": { - "description": "The full name of the resources that this operation is directly\nassociated with.", + "description": "The full name of the resources that this operation is directly associated with.", "items": { "type": "string" }, @@ -2569,11 +2707,11 @@ "type": "object" }, "Option": { - "description": "A protocol buffer option, which can be attached to a message, field,\nenumeration, etc.", + "description": "A protocol buffer option, which can be attached to a message, field, enumeration, etc.", "id": "Option", "properties": { "name": { - "description": "The option's name. For protobuf built-in options (options defined in\ndescriptor.proto), this is the short name. For example, `\"map_entry\"`.\nFor custom options, it should be the fully-qualified name. For example,\n`\"google.api.http\"`.", + "description": "The option's name. For protobuf built-in options (options defined in descriptor.proto), this is the short name. For example, `\"map_entry\"`. For custom options, it should be the fully-qualified name. For example, `\"google.api.http\"`.", "type": "string" }, "value": { @@ -2581,26 +2719,40 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "The option's value packed in an Any message. If the value is a primitive,\nthe corresponding wrapper type defined in google/protobuf/wrappers.proto\nshould be used. If the value is an enum, it should be stored as an int32\nvalue using the google.protobuf.Int32Value type.", + "description": "The option's value packed in an Any message. If the value is a primitive, the corresponding wrapper type defined in google/protobuf/wrappers.proto should be used. If the value is an enum, it should be stored as an int32 value using the google.protobuf.Int32Value type.", "type": "object" } }, "type": "object" }, + "OverrideInlineSource": { + "description": "Import data embedded in the request message", + "id": "OverrideInlineSource", + "properties": { + "overrides": { + "description": "The overrides to create. Each override must have a value for 'metric' and 'unit', to specify which metric and which limit the override should be applied to. The 'name' field of the override does not need to be set; it is ignored.", + "items": { + "$ref": "QuotaOverride" + }, + "type": "array" + } + }, + "type": "object" + }, "Page": { - "description": "Represents a documentation page. A page can contain subpages to represent\nnested documentation set structure.", + "description": "Represents a documentation page. A page can contain subpages to represent nested documentation set structure.", "id": "Page", "properties": { "content": { - "description": "The Markdown content of the page. You can use \u003ccode\u003e\u0026#40;== include {path}\n==\u0026#41;\u003c/code\u003e to include content from a Markdown file.", + "description": "The Markdown content of the page. You can use (== include {path} ==) to include content from a Markdown file.", "type": "string" }, "name": { - "description": "The name of the page. It will be used as an identity of the page to\ngenerate URI of the page, text of the link to this page in navigation,\netc. The full page name (start from the root page name to this page\nconcatenated with `.`) can be used as reference to the page in your\ndocumentation. For example:\n\u003cpre\u003e\u003ccode\u003epages:\n- name: Tutorial\n content: \u0026#40;== include tutorial.md ==\u0026#41;\n subpages:\n - name: Java\n content: \u0026#40;== include tutorial_java.md ==\u0026#41;\n\u003c/code\u003e\u003c/pre\u003e\nYou can reference `Java` page using Markdown reference link syntax:\n`Java`.", + "description": "The name of the page. It will be used as an identity of the page to generate URI of the page, text of the link to this page in navigation, etc. The full page name (start from the root page name to this page concatenated with `.`) can be used as reference to the page in your documentation. For example: pages: - name: Tutorial content: (== include tutorial.md ==) subpages: - name: Java content: (== include tutorial_java.md ==) You can reference `Java` page using Markdown reference link syntax: `Java`.", "type": "string" }, "subpages": { - "description": "Subpages of this page. The order of subpages specified here will be\nhonored in the generated docset.", + "description": "Subpages of this page. The order of subpages specified here will be honored in the generated docset.", "items": { "$ref": "Page" }, @@ -2610,7 +2762,7 @@ "type": "object" }, "Quota": { - "description": "Quota configuration helps to achieve fairness and budgeting in service\nusage.\n\nThe metric based quota configuration works this way:\n- The service configuration defines a set of metrics.\n- For API calls, the quota.metric_rules maps methods to metrics with\n corresponding costs.\n- The quota.limits defines limits on the metrics, which will be used for\n quota checks at runtime.\n\nAn example quota configuration in yaml format:\n\n quota:\n limits:\n\n - name: apiWriteQpsPerProject\n metric: library.googleapis.com/write_calls\n unit: \"1/min/{project}\" # rate limit for consumer projects\n values:\n STANDARD: 10000\n\n\n # The metric rules bind all methods to the read_calls metric,\n # except for the UpdateBook and DeleteBook methods. These two methods\n # are mapped to the write_calls metric, with the UpdateBook method\n # consuming at twice rate as the DeleteBook method.\n metric_rules:\n - selector: \"*\"\n metric_costs:\n library.googleapis.com/read_calls: 1\n - selector: google.example.library.v1.LibraryService.UpdateBook\n metric_costs:\n library.googleapis.com/write_calls: 2\n - selector: google.example.library.v1.LibraryService.DeleteBook\n metric_costs:\n library.googleapis.com/write_calls: 1\n\n Corresponding Metric definition:\n\n metrics:\n - name: library.googleapis.com/read_calls\n display_name: Read requests\n metric_kind: DELTA\n value_type: INT64\n\n - name: library.googleapis.com/write_calls\n display_name: Write requests\n metric_kind: DELTA\n value_type: INT64\n\n", + "description": "Quota configuration helps to achieve fairness and budgeting in service usage. The metric based quota configuration works this way: - The service configuration defines a set of metrics. - For API calls, the quota.metric_rules maps methods to metrics with corresponding costs. - The quota.limits defines limits on the metrics, which will be used for quota checks at runtime. An example quota configuration in yaml format: quota: limits: - name: apiWriteQpsPerProject metric: library.googleapis.com/write_calls unit: \"1/min/{project}\" # rate limit for consumer projects values: STANDARD: 10000 # The metric rules bind all methods to the read_calls metric, # except for the UpdateBook and DeleteBook methods. These two methods # are mapped to the write_calls metric, with the UpdateBook method # consuming at twice rate as the DeleteBook method. metric_rules: - selector: \"*\" metric_costs: library.googleapis.com/read_calls: 1 - selector: google.example.library.v1.LibraryService.UpdateBook metric_costs: library.googleapis.com/write_calls: 2 - selector: google.example.library.v1.LibraryService.DeleteBook metric_costs: library.googleapis.com/write_calls: 1 Corresponding Metric definition: metrics: - name: library.googleapis.com/read_calls display_name: Read requests metric_kind: DELTA value_type: INT64 - name: library.googleapis.com/write_calls display_name: Write requests metric_kind: DELTA value_type: INT64 ", "id": "Quota", "properties": { "limits": { @@ -2621,7 +2773,7 @@ "type": "array" }, "metricRules": { - "description": "List of `MetricRule` definitions, each one mapping a selected method to one\nor more metrics.", + "description": "List of `MetricRule` definitions, each one mapping a selected method to one or more metrics.", "items": { "$ref": "MetricRule" }, @@ -2643,7 +2795,7 @@ "description": "Consumer override on this quota bucket." }, "defaultLimit": { - "description": "The default limit of this quota bucket, as specified by the service\nconfiguration.", + "description": "The default limit of this quota bucket, as specified by the service configuration.", "format": "int64", "type": "string" }, @@ -2651,11 +2803,11 @@ "additionalProperties": { "type": "string" }, - "description": "The dimensions of this quota bucket.\n\nIf this map is empty, this is the global bucket, which is the default quota\nvalue applied to all requests that do not have a more specific override.\n\nIf this map is nonempty, the default limit, effective limit, and quota\noverrides apply only to requests that have the dimensions given in the map.\n\nFor example, if the map has key \"region\" and value \"us-east-1\", then the\nspecified effective limit is only effective in that region, and the\nspecified overrides apply only in that region.", + "description": "The dimensions of this quota bucket. If this map is empty, this is the global bucket, which is the default quota value applied to all requests that do not have a more specific override. If this map is nonempty, the default limit, effective limit, and quota overrides apply only to requests that have the dimensions given in the map. For example, if the map has key \"region\" and value \"us-east-1\", then the specified effective limit is only effective in that region, and the specified overrides apply only in that region.", "type": "object" }, "effectiveLimit": { - "description": "The effective limit of this quota bucket. Equal to default_limit if there\nare no overrides.", + "description": "The effective limit of this quota bucket. Equal to default_limit if there are no overrides.", "format": "int64", "type": "string" }, @@ -2667,46 +2819,46 @@ "type": "object" }, "QuotaLimit": { - "description": "`QuotaLimit` defines a specific limit that applies over a specified duration\nfor a limit type. There can be at most one limit for a duration and limit\ntype combination defined within a `QuotaGroup`.", + "description": "`QuotaLimit` defines a specific limit that applies over a specified duration for a limit type. There can be at most one limit for a duration and limit type combination defined within a `QuotaGroup`.", "id": "QuotaLimit", "properties": { "defaultLimit": { - "description": "Default number of tokens that can be consumed during the specified\nduration. This is the number of tokens assigned when a client\napplication developer activates the service for his/her project.\n\nSpecifying a value of 0 will block all requests. This can be used if you\nare provisioning quota to selected consumers and blocking others.\nSimilarly, a value of -1 will indicate an unlimited quota. No other\nnegative values are allowed.\n\nUsed by group-based quotas only.", + "description": "Default number of tokens that can be consumed during the specified duration. This is the number of tokens assigned when a client application developer activates the service for his/her project. Specifying a value of 0 will block all requests. This can be used if you are provisioning quota to selected consumers and blocking others. Similarly, a value of -1 will indicate an unlimited quota. No other negative values are allowed. Used by group-based quotas only.", "format": "int64", "type": "string" }, "description": { - "description": "Optional. User-visible, extended description for this quota limit.\nShould be used only when more context is needed to understand this limit\nthan provided by the limit's display name (see: `display_name`).", + "description": "Optional. User-visible, extended description for this quota limit. Should be used only when more context is needed to understand this limit than provided by the limit's display name (see: `display_name`).", "type": "string" }, "displayName": { - "description": "User-visible display name for this limit.\nOptional. If not set, the UI will provide a default display name based on\nthe quota configuration. This field can be used to override the default\ndisplay name generated from the configuration.", + "description": "User-visible display name for this limit. Optional. If not set, the UI will provide a default display name based on the quota configuration. This field can be used to override the default display name generated from the configuration.", "type": "string" }, "duration": { - "description": "Duration of this limit in textual notation. Must be \"100s\" or \"1d\".\n\nUsed by group-based quotas only.", + "description": "Duration of this limit in textual notation. Must be \"100s\" or \"1d\". Used by group-based quotas only.", "type": "string" }, "freeTier": { - "description": "Free tier value displayed in the Developers Console for this limit.\nThe free tier is the number of tokens that will be subtracted from the\nbilled amount when billing is enabled.\nThis field can only be set on a limit with duration \"1d\", in a billable\ngroup; it is invalid on any other limit. If this field is not set, it\ndefaults to 0, indicating that there is no free tier for this service.\n\nUsed by group-based quotas only.", + "description": "Free tier value displayed in the Developers Console for this limit. The free tier is the number of tokens that will be subtracted from the billed amount when billing is enabled. This field can only be set on a limit with duration \"1d\", in a billable group; it is invalid on any other limit. If this field is not set, it defaults to 0, indicating that there is no free tier for this service. Used by group-based quotas only.", "format": "int64", "type": "string" }, "maxLimit": { - "description": "Maximum number of tokens that can be consumed during the specified\nduration. Client application developers can override the default limit up\nto this maximum. If specified, this value cannot be set to a value less\nthan the default limit. If not specified, it is set to the default limit.\n\nTo allow clients to apply overrides with no upper bound, set this to -1,\nindicating unlimited maximum quota.\n\nUsed by group-based quotas only.", + "description": "Maximum number of tokens that can be consumed during the specified duration. Client application developers can override the default limit up to this maximum. If specified, this value cannot be set to a value less than the default limit. If not specified, it is set to the default limit. To allow clients to apply overrides with no upper bound, set this to -1, indicating unlimited maximum quota. Used by group-based quotas only.", "format": "int64", "type": "string" }, "metric": { - "description": "The name of the metric this quota limit applies to. The quota limits with\nthe same metric will be checked together during runtime. The metric must be\ndefined within the service config.", + "description": "The name of the metric this quota limit applies to. The quota limits with the same metric will be checked together during runtime. The metric must be defined within the service config.", "type": "string" }, "name": { - "description": "Name of the quota limit.\n\nThe name must be provided, and it must be unique within the service. The\nname can only include alphanumeric characters as well as '-'.\n\nThe maximum length of the limit name is 64 characters.", + "description": "Name of the quota limit. The name must be provided, and it must be unique within the service. The name can only include alphanumeric characters as well as '-'. The maximum length of the limit name is 64 characters.", "type": "string" }, "unit": { - "description": "Specify the unit of the quota limit. It uses the same syntax as\nMetric.unit. The supported unit kinds are determined by the quota\nbackend system.\n\nHere are some examples:\n* \"1/min/{project}\" for quota per minute per project.\n\nNote: the order of unit components is insignificant.\nThe \"1\" at the beginning is required to follow the metric unit syntax.", + "description": "Specify the unit of the quota limit. It uses the same syntax as Metric.unit. The supported unit kinds are determined by the quota backend system. Here are some examples: * \"1/min/{project}\" for quota per minute per project. Note: the order of unit components is insignificant. The \"1\" at the beginning is required to follow the metric unit syntax.", "type": "string" }, "values": { @@ -2714,7 +2866,7 @@ "format": "int64", "type": "string" }, - "description": "Tiered limit values. You must specify this as a key:value pair, with an\ninteger value that is the maximum number of requests allowed for the\nspecified unit. Currently only STANDARD is supported.", + "description": "Tiered limit values. You must specify this as a key:value pair, with an integer value that is the maximum number of requests allowed for the specified unit. Currently only STANDARD is supported.", "type": "object" } }, @@ -2724,28 +2876,32 @@ "description": "A quota override", "id": "QuotaOverride", "properties": { + "adminOverrideAncestor": { + "description": "The resource name of the ancestor that requested the override. For example: \"organizations/12345\" or \"folders/67890\". Used by admin overrides only.", + "type": "string" + }, "dimensions": { "additionalProperties": { "type": "string" }, - "description": "If this map is nonempty, then this override applies only to specific values\nfor dimensions defined in the limit unit.\n\nFor example, an override on a limit with the unit 1/{project}/{region}\ncould contain an entry with the key \"region\" and the value \"us-east-1\";\nthe override is only applied to quota consumed in that region.\n\nThis map has the following restrictions:\n\n* Keys that are not defined in the limit's unit are not valid keys.\n Any string appearing in {brackets} in the unit (besides {project} or\n {user}) is a defined key.\n* \"project\" is not a valid key; the project is already specified in\n the parent resource name.\n* \"user\" is not a valid key; the API does not support quota overrides\n that apply only to a specific user.\n* If \"region\" appears as a key, its value must be a valid Cloud region.\n* If \"zone\" appears as a key, its value must be a valid Cloud zone.\n* If any valid key other than \"region\" or \"zone\" appears in the map, then\n all valid keys other than \"region\" or \"zone\" must also appear in the\n map.", + "description": "If this map is nonempty, then this override applies only to specific values for dimensions defined in the limit unit. For example, an override on a limit with the unit 1/{project}/{region} could contain an entry with the key \"region\" and the value \"us-east-1\"; the override is only applied to quota consumed in that region. This map has the following restrictions: * Keys that are not defined in the limit's unit are not valid keys. Any string appearing in {brackets} in the unit (besides {project} or {user}) is a defined key. * \"project\" is not a valid key; the project is already specified in the parent resource name. * \"user\" is not a valid key; the API does not support quota overrides that apply only to a specific user. * If \"region\" appears as a key, its value must be a valid Cloud region. * If \"zone\" appears as a key, its value must be a valid Cloud zone. * If any valid key other than \"region\" or \"zone\" appears in the map, then all valid keys other than \"region\" or \"zone\" must also appear in the map.", "type": "object" }, "metric": { - "description": "The name of the metric to which this override applies.\n\nAn example name would be:\n`compute.googleapis.com/cpus`", + "description": "The name of the metric to which this override applies. An example name would be: `compute.googleapis.com/cpus`", "type": "string" }, "name": { - "description": "The resource name of the override.\nThis name is generated by the server when the override is created.\n\nExample names would be:\n`projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/adminOverrides/4a3f2c1d`\n`projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/consumerOverrides/4a3f2c1d`\n\nThe resource name is intended to be opaque and should not be parsed for\nits component strings, since its representation could change in the future.", + "description": "The resource name of the override. This name is generated by the server when the override is created. Example names would be: `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/adminOverrides/4a3f2c1d` `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/consumerOverrides/4a3f2c1d` The resource name is intended to be opaque and should not be parsed for its component strings, since its representation could change in the future.", "type": "string" }, "overrideValue": { - "description": "The overriding quota limit value.\nCan be any nonnegative integer, or -1 (unlimited quota).", + "description": "The overriding quota limit value. Can be any nonnegative integer, or -1 (unlimited quota).", "format": "int64", "type": "string" }, "unit": { - "description": "The limit unit of the limit to which this override applies.\n\nAn example unit would be:\n`1/{project}/{region}`\nNote that `{project}` and `{region}` are not placeholders in this example;\nthe literal characters `{` and `}` occur in the string.", + "description": "The limit unit of the limit to which this override applies. An example unit would be: `1/{project}/{region}` Note that `{project}` and `{region}` are not placeholders in this example; the literal characters `{` and `}` occur in the string.", "type": "string" } }, @@ -2757,14 +2913,14 @@ "properties": { "config": { "$ref": "ServiceConfig", - "description": "The service configuration of the available service.\nSome fields may be filtered out of the configuration in responses to\nthe `ListServices` method. These fields are present only in responses to\nthe `GetService` method." + "description": "The service configuration of the available service. Some fields may be filtered out of the configuration in responses to the `ListServices` method. These fields are present only in responses to the `GetService` method." }, "name": { - "description": "The resource name of the consumer and service.\n\nA valid name would be:\n- projects/123/services/serviceusage.googleapis.com", + "description": "The resource name of the consumer and service. A valid name would be: - projects/123/services/serviceusage.googleapis.com", "type": "string" }, "parent": { - "description": "The resource name of the consumer.\n\nA valid name would be:\n- projects/123", + "description": "The resource name of the consumer. A valid name would be: - projects/123", "type": "string" }, "state": { @@ -2775,8 +2931,8 @@ "ENABLED" ], "enumDescriptions": [ - "The default value, which indicates that the enabled state of the service\nis unspecified or not meaningful. Currently, all consumers other than\nprojects (such as folders and organizations) are always in this state.", - "The service cannot be used by this consumer. It has either been explicitly\ndisabled, or has never been enabled.", + "The default value, which indicates that the enabled state of the service is unspecified or not meaningful. Currently, all consumers other than projects (such as folders and organizations) are always in this state.", + "The service cannot be used by this consumer. It has either been explicitly disabled, or has never been enabled.", "The service has been explicitly enabled for use by this consumer." ], "type": "string" @@ -2789,7 +2945,7 @@ "id": "ServiceConfig", "properties": { "apis": { - "description": "A list of API interfaces exported by this service. Contains only the names,\nversions, and method names of the interfaces.", + "description": "A list of API interfaces exported by this service. Contains only the names, versions, and method names of the interfaces.", "items": { "$ref": "Api" }, @@ -2801,17 +2957,28 @@ }, "documentation": { "$ref": "Documentation", - "description": "Additional API documentation. Contains only the summary and the\ndocumentation URL." + "description": "Additional API documentation. Contains only the summary and the documentation URL." }, "endpoints": { - "description": "Configuration for network endpoints. Contains only the names and aliases\nof the endpoints.", + "description": "Configuration for network endpoints. Contains only the names and aliases of the endpoints.", "items": { "$ref": "Endpoint" }, "type": "array" }, + "monitoredResources": { + "description": "Defines the monitored resources used by this service. This is required by the Service.monitoring and Service.logging configurations.", + "items": { + "$ref": "MonitoredResourceDescriptor" + }, + "type": "array" + }, + "monitoring": { + "$ref": "Monitoring", + "description": "Monitoring configuration. This should not include the 'producer_destinations' field." + }, "name": { - "description": "The DNS address at which this service is available.\n\nAn example DNS address would be:\n`calendar.googleapis.com`.", + "description": "The DNS address at which this service is available. An example DNS address would be: `calendar.googleapis.com`.", "type": "string" }, "quota": { @@ -2830,26 +2997,26 @@ "type": "object" }, "ServiceIdentity": { - "description": "Service identity for a service. This is the identity that service producer\nshould use to access consumer resources.", + "description": "Service identity for a service. This is the identity that service producer should use to access consumer resources.", "id": "ServiceIdentity", "properties": { "email": { - "description": "The email address of the service account that a service producer would use\nto access consumer resources.", + "description": "The email address of the service account that a service producer would use to access consumer resources.", "type": "string" }, "uniqueId": { - "description": "The unique and stable id of the service account.\nhttps://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts#ServiceAccount", + "description": "The unique and stable id of the service account. https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts#ServiceAccount", "type": "string" } }, "type": "object" }, "SourceContext": { - "description": "`SourceContext` represents information about the source of a\nprotobuf element, like the file in which it is defined.", + "description": "`SourceContext` represents information about the source of a protobuf element, like the file in which it is defined.", "id": "SourceContext", "properties": { "fileName": { - "description": "The path-qualified name of the .proto file that contained the associated\nprotobuf element. For example: `\"google/protobuf/source_context.proto\"`.", + "description": "The path-qualified name of the .proto file that contained the associated protobuf element. For example: `\"google/protobuf/source_context.proto\"`.", "type": "string" } }, @@ -2874,7 +3041,7 @@ "type": "object" }, "Status": { - "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors).", + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", "id": "Status", "properties": { "code": { @@ -2883,7 +3050,7 @@ "type": "integer" }, "details": { - "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use.", + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", "items": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -2894,18 +3061,18 @@ "type": "array" }, "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", "type": "string" } }, "type": "object" }, "SystemParameter": { - "description": "Define a parameter's name and location. The parameter may be passed as either\nan HTTP header or a URL query parameter, and if both are passed the behavior\nis implementation-dependent.", + "description": "Define a parameter's name and location. The parameter may be passed as either an HTTP header or a URL query parameter, and if both are passed the behavior is implementation-dependent.", "id": "SystemParameter", "properties": { "httpHeader": { - "description": "Define the HTTP header name to use for the parameter. It is case\ninsensitive.", + "description": "Define the HTTP header name to use for the parameter. It is case insensitive.", "type": "string" }, "name": { @@ -2913,36 +3080,36 @@ "type": "string" }, "urlQueryParameter": { - "description": "Define the URL query parameter name to use for the parameter. It is case\nsensitive.", + "description": "Define the URL query parameter name to use for the parameter. It is case sensitive.", "type": "string" } }, "type": "object" }, "SystemParameterRule": { - "description": "Define a system parameter rule mapping system parameter definitions to\nmethods.", + "description": "Define a system parameter rule mapping system parameter definitions to methods.", "id": "SystemParameterRule", "properties": { "parameters": { - "description": "Define parameters. Multiple names may be defined for a parameter.\nFor a given method call, only one of them should be used. If multiple\nnames are used the behavior is implementation-dependent.\nIf none of the specified names are present the behavior is\nparameter-dependent.", + "description": "Define parameters. Multiple names may be defined for a parameter. For a given method call, only one of them should be used. If multiple names are used the behavior is implementation-dependent. If none of the specified names are present the behavior is parameter-dependent.", "items": { "$ref": "SystemParameter" }, "type": "array" }, "selector": { - "description": "Selects the methods to which this rule applies. Use '*' to indicate all\nmethods in all APIs.\n\nRefer to selector for syntax details.", + "description": "Selects the methods to which this rule applies. Use '*' to indicate all methods in all APIs. Refer to selector for syntax details.", "type": "string" } }, "type": "object" }, "SystemParameters": { - "description": "### System parameter configuration\n\nA system parameter is a special kind of parameter defined by the API\nsystem, not by an individual API. It is typically mapped to an HTTP header\nand/or a URL query parameter. This configuration specifies which methods\nchange the names of the system parameters.", + "description": "### System parameter configuration A system parameter is a special kind of parameter defined by the API system, not by an individual API. It is typically mapped to an HTTP header and/or a URL query parameter. This configuration specifies which methods change the names of the system parameters.", "id": "SystemParameters", "properties": { "rules": { - "description": "Define system parameters.\n\nThe parameters defined here will override the default parameters\nimplemented by the system. If this field is missing from the service\nconfig, default system parameters will be used. Default system parameters\nand names is implementation-dependent.\n\nExample: define api key for all methods\n\n system_parameters\n rules:\n - selector: \"*\"\n parameters:\n - name: api_key\n url_query_parameter: api_key\n\n\nExample: define 2 api key names for a specific method.\n\n system_parameters\n rules:\n - selector: \"/ListShelves\"\n parameters:\n - name: api_key\n http_header: Api-Key1\n - name: api_key\n http_header: Api-Key2\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "Define system parameters. The parameters defined here will override the default parameters implemented by the system. If this field is missing from the service config, default system parameters will be used. Default system parameters and names is implementation-dependent. Example: define api key for all methods system_parameters rules: - selector: \"*\" parameters: - name: api_key url_query_parameter: api_key Example: define 2 api key names for a specific method. system_parameters rules: - selector: \"/ListShelves\" parameters: - name: api_key http_header: Api-Key1 - name: api_key http_header: Api-Key2 **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "SystemParameterRule" }, @@ -3004,18 +3171,18 @@ "id": "Usage", "properties": { "producerNotificationChannel": { - "description": "The full resource name of a channel used for sending notifications to the\nservice producer.\n\nGoogle Service Management currently only supports\n[Google Cloud Pub/Sub](https://cloud.google.com/pubsub) as a notification\nchannel. To use Google Cloud Pub/Sub as the channel, this must be the name\nof a Cloud Pub/Sub topic that uses the Cloud Pub/Sub topic name format\ndocumented in https://cloud.google.com/pubsub/docs/overview.", + "description": "The full resource name of a channel used for sending notifications to the service producer. Google Service Management currently only supports [Google Cloud Pub/Sub](https://cloud.google.com/pubsub) as a notification channel. To use Google Cloud Pub/Sub as the channel, this must be the name of a Cloud Pub/Sub topic that uses the Cloud Pub/Sub topic name format documented in https://cloud.google.com/pubsub/docs/overview.", "type": "string" }, "requirements": { - "description": "Requirements that must be satisfied before a consumer project can use the\nservice. Each requirement is of the form \u003cservice.name\u003e/\u003crequirement-id\u003e;\nfor example 'serviceusage.googleapis.com/billing-enabled'.", + "description": "Requirements that must be satisfied before a consumer project can use the service. Each requirement is of the form /; for example 'serviceusage.googleapis.com/billing-enabled'.", "items": { "type": "string" }, "type": "array" }, "rules": { - "description": "A list of usage rules that apply to individual API methods.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", + "description": "A list of usage rules that apply to individual API methods. **NOTE:** All service configuration rules follow \"last one wins\" order.", "items": { "$ref": "UsageRule" }, @@ -3029,19 +3196,19 @@ "type": "object" }, "UsageRule": { - "description": "Usage configuration rules for the service.\n\nNOTE: Under development.\n\n\nUse this rule to configure unregistered calls for the service. Unregistered\ncalls are calls that do not contain consumer project identity.\n(Example: calls that do not contain an API key).\nBy default, API methods do not allow unregistered calls, and each method call\nmust be identified by a consumer project identity. Use this rule to\nallow/disallow unregistered calls.\n\nExample of an API that wants to allow unregistered calls for entire service.\n\n usage:\n rules:\n - selector: \"*\"\n allow_unregistered_calls: true\n\nExample of a method that wants to allow unregistered calls.\n\n usage:\n rules:\n - selector: \"google.example.library.v1.LibraryService.CreateBook\"\n allow_unregistered_calls: true", + "description": "Usage configuration rules for the service. NOTE: Under development. Use this rule to configure unregistered calls for the service. Unregistered calls are calls that do not contain consumer project identity. (Example: calls that do not contain an API key). By default, API methods do not allow unregistered calls, and each method call must be identified by a consumer project identity. Use this rule to allow/disallow unregistered calls. Example of an API that wants to allow unregistered calls for entire service. usage: rules: - selector: \"*\" allow_unregistered_calls: true Example of a method that wants to allow unregistered calls. usage: rules: - selector: \"google.example.library.v1.LibraryService.CreateBook\" allow_unregistered_calls: true", "id": "UsageRule", "properties": { "allowUnregisteredCalls": { - "description": "If true, the selected method allows unregistered calls, e.g. calls\nthat don't identify any user or application.", + "description": "If true, the selected method allows unregistered calls, e.g. calls that don't identify any user or application.", "type": "boolean" }, "selector": { - "description": "Selects the methods to which this rule applies. Use '*' to indicate all\nmethods in all APIs.\n\nRefer to selector for syntax details.", + "description": "Selects the methods to which this rule applies. Use '*' to indicate all methods in all APIs. Refer to selector for syntax details.", "type": "string" }, "skipServiceControl": { - "description": "If true, the selected method should skip service control and the control\nplane features, such as quota and billing, will not be available.\nThis flag is used by Google Cloud Endpoints to bypass checks for internal\nmethods, such as service health check methods.", + "description": "If true, the selected method should skip service control and the control plane features, such as quota and billing, will not be available. This flag is used by Google Cloud Endpoints to bypass checks for internal methods, such as service health check methods.", "type": "boolean" } }, diff --git a/vendor/google.golang.org/api/serviceusage/v1beta1/serviceusage-gen.go b/vendor/google.golang.org/api/serviceusage/v1beta1/serviceusage-gen.go index f4d1b8a97cd..66b124c0546 100644 --- a/vendor/google.golang.org/api/serviceusage/v1beta1/serviceusage-gen.go +++ b/vendor/google.golang.org/api/serviceusage/v1beta1/serviceusage-gen.go @@ -79,6 +79,7 @@ const apiId = "serviceusage:v1beta1" const apiName = "serviceusage" const apiVersion = "v1beta1" const basePath = "https://serviceusage.googleapis.com/" +const mtlsBasePath = "https://serviceusage.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -102,6 +103,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*APIService, // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -214,22 +216,76 @@ type ServicesConsumerQuotaMetricsLimitsConsumerOverridesService struct { s *APIService } -// Api: Api is a light-weight descriptor for an API -// Interface. -// +// AdminQuotaPolicy: Quota policy created by quota administrator. +type AdminQuotaPolicy struct { + // Container: The cloud resource container at which the quota policy is + // created. The format is {container_type}/{container_number} + Container string `json:"container,omitempty"` + + // Dimensions: If this map is nonempty, then this policy applies only + // to specific values for dimensions defined in the limit unit. For + // example, an policy on a limit with the unit 1/{project}/{region} + // could contain an entry with the key "region" and the value + // "us-east-1"; the policy is only applied to quota consumed in that + // region. This map has the following restrictions: * If "region" + // appears as a key, its value must be a valid Cloud region. * If "zone" + // appears as a key, its value must be a valid Cloud zone. * Keys other + // than "region" or "zone" are not valid. + Dimensions map[string]string `json:"dimensions,omitempty"` + + // Metric: The name of the metric to which this policy applies. An + // example name would be: `compute.googleapis.com/cpus` + Metric string `json:"metric,omitempty"` + + // Name: The resource name of the policy. This name is generated by the + // server when the policy is created. Example names would be: + // `organizations/123/services/compute.googleapis.com/consumerQuotaMetric + // s/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/adminQuotaP + // olicies/4a3f2c1d` + Name string `json:"name,omitempty"` + + // PolicyValue: The quota policy value. Can be any nonnegative integer, + // or -1 (unlimited quota). + PolicyValue int64 `json:"policyValue,omitempty,string"` + + // Unit: The limit unit of the limit to which this policy applies. An + // example unit would be: `1/{project}/{region}` Note that `{project}` + // and `{region}` are not placeholders in this example; the literal + // characters `{` and `}` occur in the string. + Unit string `json:"unit,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Container") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Container") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AdminQuotaPolicy) MarshalJSON() ([]byte, error) { + type NoMethod AdminQuotaPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Api: Api is a light-weight descriptor for an API Interface. // Interfaces are also described as "protocol buffer services" in some -// contexts, -// such as by the "service" keyword in a .proto file, but they are -// different -// from API Services, which represent a concrete implementation of an -// interface -// as opposed to simply a description of methods and bindings. They are -// also -// sometimes simply referred to as "APIs" in other contexts, such as the -// name of -// this message itself. See -// https://cloud.google.com/apis/design/glossary for -// detailed terminology. +// contexts, such as by the "service" keyword in a .proto file, but they +// are different from API Services, which represent a concrete +// implementation of an interface as opposed to simply a description of +// methods and bindings. They are also sometimes simply referred to as +// "APIs" in other contexts, such as the name of this message itself. +// See https://cloud.google.com/apis/design/glossary for detailed +// terminology. type Api struct { // Methods: The methods of this interface, in unspecified order. Methods []*Method `json:"methods,omitempty"` @@ -238,16 +294,14 @@ type Api struct { Mixins []*Mixin `json:"mixins,omitempty"` // Name: The fully qualified name of this interface, including package - // name - // followed by the interface's simple name. + // name followed by the interface's simple name. Name string `json:"name,omitempty"` // Options: Any metadata attached to the interface. Options []*Option `json:"options,omitempty"` // SourceContext: Source context for the protocol buffer service - // represented by this - // message. + // represented by this message. SourceContext *SourceContext `json:"sourceContext,omitempty"` // Syntax: The source syntax of the service. @@ -258,35 +312,20 @@ type Api struct { Syntax string `json:"syntax,omitempty"` // Version: A version string for this interface. If specified, must have - // the form - // `major-version.minor-version`, as in `1.10`. If the minor version - // is - // omitted, it defaults to zero. If the entire version field is empty, - // the - // major version is derived from the package name, as outlined below. If - // the - // field is not empty, the version in the package name will be verified - // to be - // consistent with what is provided here. - // - // The versioning schema uses [semantic - // versioning](http://semver.org) where the major version - // number - // indicates a breaking change and the minor version an - // additive, - // non-breaking change. Both version numbers are signals to users - // what to expect from different versions, and should be - // carefully - // chosen based on the product plan. - // - // The major version is also reflected in the package name of - // the - // interface, which must end in `v`, as - // in - // `google.feature.v1`. For major versions 0 and 1, the suffix can - // be omitted. Zero major versions must only be used for - // experimental, non-GA interfaces. - // + // the form `major-version.minor-version`, as in `1.10`. If the minor + // version is omitted, it defaults to zero. If the entire version field + // is empty, the major version is derived from the package name, as + // outlined below. If the field is not empty, the version in the package + // name will be verified to be consistent with what is provided here. + // The versioning schema uses [semantic versioning](http://semver.org) + // where the major version number indicates a breaking change and the + // minor version an additive, non-breaking change. Both version numbers + // are signals to users what to expect from different versions, and + // should be carefully chosen based on the product plan. The major + // version is also reflected in the package name of the interface, which + // must end in `v`, as in `google.feature.v1`. For major versions 0 and + // 1, the suffix can be omitted. Zero major versions must only be used + // for experimental, non-GA interfaces. Version string `json:"version,omitempty"` // ForceSendFields is a list of field names (e.g. "Methods") to @@ -313,97 +352,57 @@ func (s *Api) MarshalJSON() ([]byte, error) { } // AuthProvider: Configuration for an authentication provider, including -// support for -// [JSON Web -// Token -// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-tok -// en-32). +// support for [JSON Web Token +// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32) +// . type AuthProvider struct { - // Audiences: The list of - // JWT - // [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web- - // token-32#section-4.1.3). - // that are allowed to access. A JWT containing any of these audiences - // will - // be accepted. When this setting is absent, JWTs with audiences: - // - "https://[service.name]/[google.protobuf.Api.name]" - // - "https://[service.name]/" - // will be accepted. - // For example, if no audiences are in the setting, LibraryService API - // will - // accept JWTs with the following audiences: - // - - // - // https://library-example.googleapis.com/google.example.library.v1.LibraryService - // - https://library-example.googleapis.com/ - // - // Example: - // - // audiences: bookstore_android.apps.googleusercontent.com, - // bookstore_web.apps.googleusercontent.com + // Audiences: The list of JWT + // [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-toke + // n-32#section-4.1.3). that are allowed to access. A JWT containing any + // of these audiences will be accepted. When this setting is absent, + // JWTs with audiences: - + // "https://[service.name]/[google.protobuf.Api.name]" - + // "https://[service.name]/" will be accepted. For example, if no + // audiences are in the setting, LibraryService API will accept JWTs + // with the following audiences: - + // https://library-example.googleapis.com/google.example.library.v1.LibraryService - https://library-example.googleapis.com/ Example: audiences: bookstore_android.apps.googleusercontent.com, + // bookstore_web.apps.googleusercontent.com Audiences string `json:"audiences,omitempty"` // AuthorizationUrl: Redirect URL if JWT token is required but not - // present or is expired. - // Implement authorizationUrl of securityDefinitions in OpenAPI spec. + // present or is expired. Implement authorizationUrl of + // securityDefinitions in OpenAPI spec. AuthorizationUrl string `json:"authorizationUrl,omitempty"` // Id: The unique identifier of the auth provider. It will be referred - // to by - // `AuthRequirement.provider_id`. - // - // Example: "bookstore_auth". + // to by `AuthRequirement.provider_id`. Example: "bookstore_auth". Id string `json:"id,omitempty"` - // Issuer: Identifies the principal that issued the JWT. - // See - // https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#sec - // tion-4.1.1 - // Usually a URL or an email address. - // - // Example: https://securetoken.google.com - // Example: 1234567-compute@developer.gserviceaccount.com + // Issuer: Identifies the principal that issued the JWT. See + // https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1 Usually a URL or an email address. Example: https://securetoken.google.com Example: + // 1234567-compute@developer.gserviceaccount.com Issuer string `json:"issuer,omitempty"` // JwksUri: URL of the provider's public key set to validate signature - // of the JWT. - // See - // [OpenID - // Discovery](https://openid.net/specs/openid-connect-discove - // ry-1_0.html#ProviderMetadata). - // Optional if the key set document: - // - can be retrieved from - // [OpenID - // + // of the JWT. See [OpenID + // Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html# + // ProviderMetadata). Optional if the key set document: - can be + // retrieved from [OpenID // Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html - // of - // the issuer. - // - can be inferred from the email domain of the issuer (e.g. a - // Google - // service account). - // - // Example: https://www.googleapis.com/oauth2/v1/certs + // of the issuer. - can be inferred from the email domain of the issuer + // (e.g. a Google service account). Example: + // https://www.googleapis.com/oauth2/v1/certs JwksUri string `json:"jwksUri,omitempty"` - // JwtLocations: Defines the locations to extract the JWT. - // - // JWT locations can be either from HTTP headers or URL query - // parameters. - // The rule is that the first match wins. The checking order is: - // checking - // all headers first, then URL query parameters. - // - // If not specified, default to use following 3 locations: - // 1) Authorization: Bearer - // 2) x-goog-iap-jwt-assertion - // 3) access_token query parameter - // - // Default locations can be specified as followings: - // jwt_locations: - // - header: Authorization - // value_prefix: "Bearer " - // - header: x-goog-iap-jwt-assertion - // - query: access_token + // JwtLocations: Defines the locations to extract the JWT. JWT locations + // can be either from HTTP headers or URL query parameters. The rule is + // that the first match wins. The checking order is: checking all + // headers first, then URL query parameters. If not specified, default + // to use following 3 locations: 1) Authorization: Bearer 2) + // x-goog-iap-jwt-assertion 3) access_token query parameter Default + // locations can be specified as followings: jwt_locations: - header: + // Authorization value_prefix: "Bearer " - header: + // x-goog-iap-jwt-assertion - query: access_token JwtLocations []*JwtLocation `json:"jwtLocations,omitempty"` // ForceSendFields is a list of field names (e.g. "Audiences") to @@ -430,43 +429,27 @@ func (s *AuthProvider) MarshalJSON() ([]byte, error) { } // AuthRequirement: User-defined authentication requirements, including -// support for -// [JSON Web -// Token -// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-tok -// en-32). +// support for [JSON Web Token +// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32) +// . type AuthRequirement struct { // Audiences: NOTE: This will be deprecated soon, once - // AuthProvider.audiences is - // implemented and accepted in all the runtime components. - // - // The list of - // JWT - // [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web- - // token-32#section-4.1.3). - // that are allowed to access. A JWT containing any of these audiences - // will - // be accepted. When this setting is absent, only JWTs with - // audience - // "https://Service_name/API_name" - // will be accepted. For example, if no audiences are in the - // setting, - // LibraryService API will only accept JWTs with the following - // audience - // "https://library-example.googleapis.com/google.example.librar - // y.v1.LibraryService". - // - // Example: - // - // audiences: bookstore_android.apps.googleusercontent.com, - // bookstore_web.apps.googleusercontent.com + // AuthProvider.audiences is implemented and accepted in all the runtime + // components. The list of JWT + // [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-toke + // n-32#section-4.1.3). that are allowed to access. A JWT containing any + // of these audiences will be accepted. When this setting is absent, + // only JWTs with audience "https://Service_name/API_name" will be + // accepted. For example, if no audiences are in the setting, + // LibraryService API will only accept JWTs with the following audience + // "https://library-example.googleapis.com/google.example.library.v1.Libr + // aryService". Example: audiences: + // bookstore_android.apps.googleusercontent.com, + // bookstore_web.apps.googleusercontent.com Audiences string `json:"audiences,omitempty"` - // ProviderId: id from authentication provider. - // - // Example: - // - // provider_id: bookstore_auth + // ProviderId: id from authentication provider. Example: provider_id: + // bookstore_auth ProviderId string `json:"providerId,omitempty"` // ForceSendFields is a list of field names (e.g. "Audiences") to @@ -493,30 +476,20 @@ func (s *AuthRequirement) MarshalJSON() ([]byte, error) { } // Authentication: `Authentication` defines the authentication -// configuration for an API. -// -// Example for an API targeted for external use: -// -// name: calendar.googleapis.com -// authentication: -// providers: -// - id: google_calendar_auth -// jwks_uri: https://www.googleapis.com/oauth2/v1/certs -// issuer: https://securetoken.google.com -// rules: -// - selector: "*" -// requirements: -// provider_id: google_calendar_auth +// configuration for an API. Example for an API targeted for external +// use: name: calendar.googleapis.com authentication: providers: - id: +// google_calendar_auth jwks_uri: +// https://www.googleapis.com/oauth2/v1/certs issuer: +// https://securetoken.google.com rules: - selector: "*" requirements: +// provider_id: google_calendar_auth type Authentication struct { // Providers: Defines a set of authentication providers that a service // supports. Providers []*AuthProvider `json:"providers,omitempty"` // Rules: A list of authentication rules that apply to individual API - // methods. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // methods. **NOTE:** All service configuration rules follow "last one + // wins" order. Rules []*AuthenticationRule `json:"rules,omitempty"` // ForceSendFields is a list of field names (e.g. "Providers") to @@ -542,19 +515,12 @@ func (s *Authentication) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// AuthenticationRule: Authentication rules for the service. -// -// By default, if a method has any authentication requirements, every -// request -// must include a valid credential matching one of the -// requirements. -// It's an error to include more than one kind of credential in a -// single -// request. -// -// If a method doesn't have any auth requirements, request credentials -// will be -// ignored. +// AuthenticationRule: Authentication rules for the service. By default, +// if a method has any authentication requirements, every request must +// include a valid credential matching one of the requirements. It's an +// error to include more than one kind of credential in a single +// request. If a method doesn't have any auth requirements, request +// credentials will be ignored. type AuthenticationRule struct { // AllowWithoutCredential: If true, the service accepts API keys without // any other credential. @@ -566,9 +532,8 @@ type AuthenticationRule struct { // Requirements: Requirements for additional authentication providers. Requirements []*AuthRequirement `json:"requirements,omitempty"` - // Selector: Selects the methods to which this rule applies. - // - // Refer to selector for syntax details. + // Selector: Selects the methods to which this rule applies. Refer to + // selector for syntax details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -599,10 +564,8 @@ func (s *AuthenticationRule) MarshalJSON() ([]byte, error) { // Backend: `Backend` defines the backend configuration for a service. type Backend struct { // Rules: A list of API backend rules that apply to individual API - // methods. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // methods. **NOTE:** All service configuration rules follow "last one + // wins" order. Rules []*BackendRule `json:"rules,omitempty"` // ForceSendFields is a list of field names (e.g. "Rules") to @@ -631,197 +594,81 @@ func (s *Backend) MarshalJSON() ([]byte, error) { // BackendRule: A backend rule provides configuration for an individual // API element. type BackendRule struct { - // Address: The address of the API backend. - // - // The scheme is used to determine the backend protocol and - // security. - // The following schemes are accepted: - // - // SCHEME PROTOCOL SECURITY - // http:// HTTP None - // https:// HTTP TLS - // grpc:// gRPC None - // grpcs:// gRPC TLS - // - // It is recommended to explicitly include a scheme. Leaving out the - // scheme - // may cause constrasting behaviors across platforms. - // - // If the port is unspecified, the default is: - // - 80 for schemes without TLS - // - 443 for schemes with TLS - // - // For HTTP backends, use protocol - // to specify the protocol version. + // Address: The address of the API backend. The scheme is used to + // determine the backend protocol and security. The following schemes + // are accepted: SCHEME PROTOCOL SECURITY http:// HTTP None https:// + // HTTP TLS grpc:// gRPC None grpcs:// gRPC TLS It is recommended to + // explicitly include a scheme. Leaving out the scheme may cause + // constrasting behaviors across platforms. If the port is unspecified, + // the default is: - 80 for schemes without TLS - 443 for schemes with + // TLS For HTTP backends, use protocol to specify the protocol version. Address string `json:"address,omitempty"` // Deadline: The number of seconds to wait for a response from a - // request. The default - // varies based on the request protocol and deployment environment. + // request. The default varies based on the request protocol and + // deployment environment. Deadline float64 `json:"deadline,omitempty"` // DisableAuth: When disable_auth is true, a JWT ID token won't be - // generated and the - // original "Authorization" HTTP header will be preserved. If the header - // is - // used to carry the original token and is expected by the backend, - // this - // field must be set to true to preserve the header. + // generated and the original "Authorization" HTTP header will be + // preserved. If the header is used to carry the original token and is + // expected by the backend, this field must be set to true to preserve + // the header. DisableAuth bool `json:"disableAuth,omitempty"` // JwtAudience: The JWT audience is used when generating a JWT ID token - // for the backend. - // This ID token will be added in the HTTP "authorization" header, and - // sent - // to the backend. + // for the backend. This ID token will be added in the HTTP + // "authorization" header, and sent to the backend. JwtAudience string `json:"jwtAudience,omitempty"` // MinDeadline: Minimum deadline in seconds needed for this method. - // Calls having deadline - // value lower than this will be rejected. + // Calls having deadline value lower than this will be rejected. MinDeadline float64 `json:"minDeadline,omitempty"` // OperationDeadline: The number of seconds to wait for the completion - // of a long running - // operation. The default is no deadline. + // of a long running operation. The default is no deadline. OperationDeadline float64 `json:"operationDeadline,omitempty"` // Possible values: // "PATH_TRANSLATION_UNSPECIFIED" // "CONSTANT_ADDRESS" - Use the backend address as-is, with no - // modification to the path. If the - // URL pattern contains variables, the variable names and values will - // be - // appended to the query string. If a query string parameter and a - // URL - // pattern variable have the same name, this may result in duplicate - // keys in - // the query string. - // - // # Examples - // - // Given the following operation config: - // - // Method path: /api/company/{cid}/user/{uid} - // Backend address: - // https://example.cloudfunctions.net/getUser - // - // Requests to the following request paths will call the backend at - // the - // translated path: - // - // Request path: /api/company/widgetworks/user/johndoe - // Translated: - // - // https://example.cloudfunctions.net/getUser?cid=widgetworks&uid=johndoe - // - // Request path: /api/company/widgetworks/user/johndoe?timezone=EST - // Translated: - // + // modification to the path. If the URL pattern contains variables, the + // variable names and values will be appended to the query string. If a + // query string parameter and a URL pattern variable have the same name, + // this may result in duplicate keys in the query string. # Examples + // Given the following operation config: Method path: + // /api/company/{cid}/user/{uid} Backend address: + // https://example.cloudfunctions.net/getUser Requests to the following + // request paths will call the backend at the translated path: Request + // path: /api/company/widgetworks/user/johndoe Translated: + // https://example.cloudfunctions.net/getUser?cid=widgetworks&uid=johndoe Request path: /api/company/widgetworks/user/johndoe?timezone=EST Translated: // https://example.cloudfunctions.net/getUser?timezone=EST&cid=widgetworks&uid=johndoe // "APPEND_PATH_TO_ADDRESS" - The request path will be appended to the - // backend address. - // - // # Examples - // - // Given the following operation config: - // - // Method path: /api/company/{cid}/user/{uid} - // Backend address: https://example.appspot.com - // - // Requests to the following request paths will call the backend at - // the - // translated path: - // - // Request path: /api/company/widgetworks/user/johndoe - // Translated: - // + // backend address. # Examples Given the following operation config: + // Method path: /api/company/{cid}/user/{uid} Backend address: + // https://example.appspot.com Requests to the following request paths + // will call the backend at the translated path: Request path: + // /api/company/widgetworks/user/johndoe Translated: // https://example.appspot.com/api/company/widgetworks/user/johndoe - // - // Request path: /api/company/widgetworks/user/johndoe?timezone=EST - // Translated: - // + // Request path: /api/company/widgetworks/user/johndoe?timezone=EST + // Translated: // https://example.appspot.com/api/company/widgetworks/user/johndoe?timezone=EST PathTranslation string `json:"pathTranslation,omitempty"` - // Protocol: The protocol used for sending a request to the backend. - // The supported values are "http/1.1" and "h2". - // - // The default value is inferred from the scheme in the - // address field: - // - // SCHEME PROTOCOL - // http:// http/1.1 - // https:// http/1.1 - // grpc:// h2 - // grpcs:// h2 - // - // For secure HTTP backends (https://) that support HTTP/2, set this - // field - // to "h2" for improved performance. - // - // Configuring this field to non-default values is only supported for - // secure - // HTTP backends. This field will be ignored for all other - // backends. - // - // See - // https://www.iana.org/assignments/tls-extensiontype-valu - // es/tls-extensiontype-values.xhtml#alpn-protocol-ids - // for more details on the supported values. + // Protocol: The protocol used for sending a request to the backend. The + // supported values are "http/1.1" and "h2". The default value is + // inferred from the scheme in the address field: SCHEME PROTOCOL + // http:// http/1.1 https:// http/1.1 grpc:// h2 grpcs:// h2 For secure + // HTTP backends (https://) that support HTTP/2, set this field to "h2" + // for improved performance. Configuring this field to non-default + // values is only supported for secure HTTP backends. This field will be + // ignored for all other backends. See + // https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids for more details on the supported + // values. Protocol string `json:"protocol,omitempty"` - // RenameTo: Unimplemented. Do not use. - // - // The new name the selected proto elements should be renamed to. - // - // The package, the service and the method can all be renamed. - // The backend server should implement the renamed proto. However, - // clients - // should call the original method, and ESF routes the traffic to the - // renamed - // method. - // - // HTTP clients should call the URL mapped to the original method. - // gRPC and Stubby clients should call the original method with package - // name. - // - // For legacy reasons, ESF allows Stubby clients to call with the - // short name (without the package name). However, for API - // Versioning(or - // multiple methods mapped to the same short name), all Stubby clients - // must - // call the method's full name with the package name, otherwise the - // first one - // (selector) wins. - // - // If this `rename_to` is specified with a trailing `*`, the `selector` - // must - // be specified with a trailing `*` as well. The all element short - // names - // matched by the `*` in the selector will be kept in the - // `rename_to`. - // - // For example, - // rename_rules: - // - selector: |- - // google.example.library.v1.* - // rename_to: google.example.library.* - // - // The selector matches `google.example.library.v1.Library.CreateShelf` - // and - // `google.example.library.v1.Library.CreateBook`, they will be renamed - // to - // `google.example.library.Library.CreateShelf` - // and - // `google.example.library.Library.CreateBook`. It essentially renames - // the - // proto package name section of the matched proto service and methods. - RenameTo string `json:"renameTo,omitempty"` - - // Selector: Selects the methods to which this rule applies. - // - // Refer to selector for syntax details. + // Selector: Selects the methods to which this rule applies. Refer to + // selector for syntax details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. "Address") to @@ -926,25 +773,14 @@ func (s *BatchCreateConsumerOverridesResponse) MarshalJSON() ([]byte, error) { // BatchEnableServicesRequest: Request message for the // `BatchEnableServices` method. type BatchEnableServicesRequest struct { - // ServiceIds: The identifiers of the services to enable on the - // project. - // - // A valid identifier would be: - // serviceusage.googleapis.com - // - // Enabling services requires that each service is public or is shared - // with - // the user enabling the service. - // - // Two or more services must be specified. To enable a single - // service, - // use the `EnableService` method instead. - // - // A single request can enable a maximum of 20 services at a time. If - // more + // ServiceIds: The identifiers of the services to enable on the project. + // A valid identifier would be: serviceusage.googleapis.com Enabling + // services requires that each service is public or is shared with the + // user enabling the service. Two or more services must be specified. To + // enable a single service, use the `EnableService` method instead. A + // single request can enable a maximum of 20 services at a time. If more // than 20 services are specified, the request will fail, and no state - // changes - // will occur. + // changes will occur. ServiceIds []string `json:"serviceIds,omitempty"` // ForceSendFields is a list of field names (e.g. "ServiceIds") to @@ -971,14 +807,13 @@ func (s *BatchEnableServicesRequest) MarshalJSON() ([]byte, error) { } // BatchEnableServicesResponse: Response message for the -// `BatchEnableServices` method. -// This response message is assigned to the `response` field of the -// returned -// Operation when that operation is done. +// `BatchEnableServices` method. This response message is assigned to +// the `response` field of the returned Operation when that operation is +// done. type BatchEnableServicesResponse struct { // Failures: If allow_partial_success is true, and one or more services - // could not be - // enabled, this field contains the details about each failure. + // could not be enabled, this field contains the details about each + // failure. Failures []*EnableFailure `json:"failures,omitempty"` // Services: The new state of the services after enabling. @@ -1007,53 +842,28 @@ func (s *BatchEnableServicesResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Billing: Billing related configuration of the service. -// -// The following example shows how to configure monitored resources and -// metrics -// for billing, `consumer_destinations` is the only supported -// destination and -// the monitored resources need at least one label -// key +// Billing: Billing related configuration of the service. The following +// example shows how to configure monitored resources and metrics for +// billing, `consumer_destinations` is the only supported destination +// and the monitored resources need at least one label key // `cloud.googleapis.com/location` to indicate the location of the -// billing -// usage, using different monitored resources between monitoring and -// billing is -// recommended so they can be evolved independently: -// -// -// monitored_resources: -// - type: library.googleapis.com/billing_branch -// labels: -// - key: cloud.googleapis.com/location -// description: | -// Predefined label to support billing location restriction. -// - key: city -// description: | -// Custom label to define the city where the library branch is -// located -// in. -// - key: name -// description: Custom label to define the name of the library -// branch. -// metrics: -// - name: library.googleapis.com/book/borrowed_count -// metric_kind: DELTA -// value_type: INT64 -// unit: "1" -// billing: -// consumer_destinations: -// - monitored_resource: library.googleapis.com/billing_branch -// metrics: -// - library.googleapis.com/book/borrowed_count +// billing usage, using different monitored resources between monitoring +// and billing is recommended so they can be evolved independently: +// monitored_resources: - type: library.googleapis.com/billing_branch +// labels: - key: cloud.googleapis.com/location description: | +// Predefined label to support billing location restriction. - key: city +// description: | Custom label to define the city where the library +// branch is located in. - key: name description: Custom label to define +// the name of the library branch. metrics: - name: +// library.googleapis.com/book/borrowed_count metric_kind: DELTA +// value_type: INT64 unit: "1" billing: consumer_destinations: - +// monitored_resource: library.googleapis.com/billing_branch metrics: - +// library.googleapis.com/book/borrowed_count type Billing struct { // ConsumerDestinations: Billing configurations for sending metrics to - // the consumer project. - // There can be multiple consumer destinations per service, each one - // must have - // a different monitored resource type. A metric can be used in at - // most - // one consumer destination. + // the consumer project. There can be multiple consumer destinations per + // service, each one must have a different monitored resource type. A + // metric can be used in at most one consumer destination. ConsumerDestinations []*BillingDestination `json:"consumerDestinations,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1082,17 +892,14 @@ func (s *Billing) MarshalJSON() ([]byte, error) { } // BillingDestination: Configuration of a specific billing destination -// (Currently only support -// bill against consumer project). +// (Currently only support bill against consumer project). type BillingDestination struct { - // Metrics: Names of the metrics to report to this billing - // destination. + // Metrics: Names of the metrics to report to this billing destination. // Each name must be defined in Service.metrics section. Metrics []string `json:"metrics,omitempty"` // MonitoredResource: The monitored resource type. The type must be - // defined in - // Service.monitored_resources section. + // defined in Service.monitored_resources section. MonitoredResource string `json:"monitoredResource,omitempty"` // ForceSendFields is a list of field names (e.g. "Metrics") to @@ -1127,39 +934,27 @@ type ConsumerQuotaLimit struct { // IsPrecise: Whether this limit is precise or imprecise. IsPrecise bool `json:"isPrecise,omitempty"` - // Metric: The name of the parent metric of this limit. - // - // An example name would be: - // `compute.googleapis.com/cpus` + // Metric: The name of the parent metric of this limit. An example name + // would be: `compute.googleapis.com/cpus` Metric string `json:"metric,omitempty"` - // Name: The resource name of the quota limit. - // - // An example name would - // be: - // `projects/123/services/compute.googleapis.com/consumerQuotaMetrics - // /compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion` - // - // The resource name is intended to be opaque and should not be parsed - // for - // its component strings, since its representation could change in the + // Name: The resource name of the quota limit. An example name would be: + // `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/com + // pute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion` The resource + // name is intended to be opaque and should not be parsed for its + // component strings, since its representation could change in the // future. Name string `json:"name,omitempty"` // QuotaBuckets: Summary of the enforced quota buckets, organized by - // quota dimension, - // ordered from least specific to most specific (for example, the - // global - // default bucket, with no quota dimensions, will always appear first). + // quota dimension, ordered from least specific to most specific (for + // example, the global default bucket, with no quota dimensions, will + // always appear first). QuotaBuckets []*QuotaBucket `json:"quotaBuckets,omitempty"` - // Unit: The limit unit. - // - // An example unit would be - // `1/{project}/{region}` + // Unit: The limit unit. An example unit would be `1/{project}/{region}` // Note that `{project}` and `{region}` are not placeholders in this - // example; - // the literal characters `{` and `}` occur in the string. + // example; the literal characters `{` and `}` occur in the string. Unit string `json:"unit,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1197,32 +992,25 @@ type ConsumerQuotaMetric struct { // on the metric. ConsumerQuotaLimits []*ConsumerQuotaLimit `json:"consumerQuotaLimits,omitempty"` - // DisplayName: The display name of the metric. - // - // An example name would be: - // "CPUs" + // DisplayName: The display name of the metric. An example name would + // be: "CPUs" DisplayName string `json:"displayName,omitempty"` - // Metric: The name of the metric. - // - // An example name would be: + // Metric: The name of the metric. An example name would be: // `compute.googleapis.com/cpus` Metric string `json:"metric,omitempty"` // Name: The resource name of the quota settings on this metric for this - // consumer. - // - // An example name would - // be: - // `projects/123/services/compute.googleapis.com/consumerQuotaMetrics - // /compute.googleapis.com%2Fcpus - // - // The resource name is intended to be opaque and should not be parsed - // for - // its component strings, since its representation could change in the - // future. + // consumer. An example name would be: + // `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/com + // pute.googleapis.com%2Fcpus The resource name is intended to be opaque + // and should not be parsed for its component strings, since its + // representation could change in the future. Name string `json:"name,omitempty"` + // Unit: The units in which the metric value is reported. + Unit string `json:"unit,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -1251,59 +1039,27 @@ func (s *ConsumerQuotaMetric) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Context: `Context` defines which contexts an API -// requests. -// -// Example: -// -// context: -// rules: -// - selector: "*" -// requested: -// - google.rpc.context.ProjectContext -// - google.rpc.context.OriginContext -// -// The above specifies that all methods in the API -// request -// `google.rpc.context.ProjectContext` -// and -// `google.rpc.context.OriginContext`. -// -// Available context types are defined in -// package -// `google.rpc.context`. -// -// This also provides mechanism to whitelist any protobuf message -// extension that -// can be sent in grpc metadata using -// “x-goog-ext--bin” -// and -// “x-goog-ext--jspb” format. For example, list any -// service -// specific protobuf types that can appear in grpc metadata as follows -// in your -// yaml file: -// -// Example: -// -// context: -// rules: -// - selector: +// Context: `Context` defines which contexts an API requests. Example: +// context: rules: - selector: "*" requested: - +// google.rpc.context.ProjectContext - google.rpc.context.OriginContext +// The above specifies that all methods in the API request +// `google.rpc.context.ProjectContext` and +// `google.rpc.context.OriginContext`. Available context types are +// defined in package `google.rpc.context`. This also provides mechanism +// to whitelist any protobuf message extension that can be sent in grpc +// metadata using “x-goog-ext--bin” and “x-goog-ext--jspb” +// format. For example, list any service specific protobuf types that +// can appear in grpc metadata as follows in your yaml file: Example: +// context: rules: - selector: // "google.example.library.v1.LibraryService.CreateBook" -// allowed_request_extensions: -// - google.foo.v1.NewExtension -// allowed_response_extensions: -// - google.foo.v1.NewExtension -// -// You can also specify extension ID instead of fully qualified -// extension name +// allowed_request_extensions: - google.foo.v1.NewExtension +// allowed_response_extensions: - google.foo.v1.NewExtension You can +// also specify extension ID instead of fully qualified extension name // here. type Context struct { // Rules: A list of RPC context rules that apply to individual API - // methods. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // methods. **NOTE:** All service configuration rules follow "last one + // wins" order. Rules []*ContextRule `json:"rules,omitempty"` // ForceSendFields is a list of field names (e.g. "Rules") to @@ -1330,17 +1086,14 @@ func (s *Context) MarshalJSON() ([]byte, error) { } // ContextRule: A context rule provides information about the context -// for an individual API -// element. +// for an individual API element. type ContextRule struct { // AllowedRequestExtensions: A list of full type names or extension IDs - // of extensions allowed in grpc - // side channel from client to backend. + // of extensions allowed in grpc side channel from client to backend. AllowedRequestExtensions []string `json:"allowedRequestExtensions,omitempty"` // AllowedResponseExtensions: A list of full type names or extension IDs - // of extensions allowed in grpc - // side channel from backend to client. + // of extensions allowed in grpc side channel from backend to client. AllowedResponseExtensions []string `json:"allowedResponseExtensions,omitempty"` // Provided: A list of full type names of provided contexts. @@ -1349,9 +1102,8 @@ type ContextRule struct { // Requested: A list of full type names of requested contexts. Requested []string `json:"requested,omitempty"` - // Selector: Selects the methods to which this rule applies. - // - // Refer to selector for syntax details. + // Selector: Selects the methods to which this rule applies. Refer to + // selector for syntax details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1380,14 +1132,11 @@ func (s *ContextRule) MarshalJSON() ([]byte, error) { } // Control: Selects and configures the service controller used by the -// service. The -// service controller handles features like abuse, quota, billing, -// logging, -// monitoring, etc. +// service. The service controller handles features like abuse, quota, +// billing, logging, monitoring, etc. type Control struct { // Environment: The service control environment to use. If empty, no - // control plane - // feature (like quota and billing) will be enabled. + // control plane feature (like quota and billing) will be enabled. Environment string `json:"environment,omitempty"` // ForceSendFields is a list of field names (e.g. "Environment") to @@ -1413,24 +1162,14 @@ func (s *Control) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// CustomError: Customize service error responses. For example, list -// any service -// specific protobuf types that can appear in error detail lists -// of -// error responses. -// -// Example: -// -// custom_error: -// types: -// - google.foo.v1.CustomError -// - google.foo.v1.AnotherError +// CustomError: Customize service error responses. For example, list any +// service specific protobuf types that can appear in error detail lists +// of error responses. Example: custom_error: types: - +// google.foo.v1.CustomError - google.foo.v1.AnotherError type CustomError struct { // Rules: The list of custom error rules that apply to individual API - // messages. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // messages. **NOTE:** All service configuration rules follow "last one + // wins" order. Rules []*CustomErrorRule `json:"rules,omitempty"` // Types: The list of custom error detail types, e.g. @@ -1463,14 +1202,12 @@ func (s *CustomError) MarshalJSON() ([]byte, error) { // CustomErrorRule: A custom error rule. type CustomErrorRule struct { // IsErrorType: Mark this message as possible payload in error response. - // Otherwise, - // objects of this type will be filtered when they appear in error - // payload. + // Otherwise, objects of this type will be filtered when they appear in + // error payload. IsErrorType bool `json:"isErrorType,omitempty"` - // Selector: Selects messages to which this rule applies. - // - // Refer to selector for syntax details. + // Selector: Selects messages to which this rule applies. Refer to + // selector for syntax details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. "IsErrorType") to @@ -1534,10 +1271,8 @@ type DisableServiceRequest struct { } // DisableServiceResponse: Response message for the `DisableService` -// method. -// This response message is assigned to the `response` field of the -// returned -// Operation when that operation is done. +// method. This response message is assigned to the `response` field of +// the returned Operation when that operation is done. type DisableServiceResponse struct { // Service: The new state of the service after disabling. Service *GoogleApiServiceusageV1Service `json:"service,omitempty"` @@ -1566,113 +1301,63 @@ func (s *DisableServiceResponse) MarshalJSON() ([]byte, error) { } // Documentation: `Documentation` provides the information for -// describing a service. -// -// Example: -//
documentation:
-//   summary: >
-//     The Google Calendar API gives access
-//     to most calendar features.
-//   pages:
-//   - name: Overview
-//     content: (== include google/foo/overview.md ==)
-//   - name: Tutorial
-//     content: (== include google/foo/tutorial.md ==)
-//     subpages;
-//     - name: Java
-//       content: (== include google/foo/tutorial_java.md ==)
-//   rules:
-//   - selector: google.calendar.Calendar.Get
-//     description: >
-//       ...
-//   - selector: google.calendar.Calendar.Put
-//     description: >
-//       ...
-// 
-// Documentation is provided in markdown syntax. In addition to -// standard markdown features, definition lists, tables and fenced -// code blocks are supported. Section headers can be provided and -// are -// interpreted relative to the section nesting of the context where -// a documentation fragment is embedded. -// -// Documentation from the IDL is merged with documentation defined -// via the config at normalization time, where documentation provided -// by config rules overrides IDL provided. -// -// A number of constructs specific to the API platform are supported -// in documentation text. -// -// In order to reference a proto element, the following -// notation can be -// used: -//
[fully.qualified.proto.name][]
-// T -// o override the display text used for the link, this can be -// used: -//
[display
-// text][fully.qualified.proto.name]
-// Text can be excluded from doc using the following -// notation: -//
(-- internal comment --)
-// -// A few directives are available in documentation. Note that -// directives must appear on a single line to be properly -// identified. The `include` directive includes a markdown file from -// an external source: -//
(== include path/to/file ==)
-// The `resource_for` directive marks a message to be the resource of -// a collection in REST view. If it is not specified, tools attempt -// to infer the resource from the operations in a -// collection: -//
(== resource_for v1.shelves.books
-// ==)
-// The directive `suppress_warning` does not directly affect -// documentation -// and is documented together with service config validation. +// describing a service. Example: documentation: summary: > The Google +// Calendar API gives access to most calendar features. pages: - name: +// Overview content: (== include google/foo/overview.md ==) - name: +// Tutorial content: (== include google/foo/tutorial.md ==) subpages; - +// name: Java content: (== include google/foo/tutorial_java.md ==) +// rules: - selector: google.calendar.Calendar.Get description: > ... - +// selector: google.calendar.Calendar.Put description: > ... +// Documentation is provided in markdown syntax. In addition to standard +// markdown features, definition lists, tables and fenced code blocks +// are supported. Section headers can be provided and are interpreted +// relative to the section nesting of the context where a documentation +// fragment is embedded. Documentation from the IDL is merged with +// documentation defined via the config at normalization time, where +// documentation provided by config rules overrides IDL provided. A +// number of constructs specific to the API platform are supported in +// documentation text. In order to reference a proto element, the +// following notation can be used: [fully.qualified.proto.name][] To +// override the display text used for the link, this can be used: +// [display text][fully.qualified.proto.name] Text can be excluded from +// doc using the following notation: (-- internal comment --) A few +// directives are available in documentation. Note that directives must +// appear on a single line to be properly identified. The `include` +// directive includes a markdown file from an external source: (== +// include path/to/file ==) The `resource_for` directive marks a message +// to be the resource of a collection in REST view. If it is not +// specified, tools attempt to infer the resource from the operations in +// a collection: (== resource_for v1.shelves.books ==) The directive +// `suppress_warning` does not directly affect documentation and is +// documented together with service config validation. type Documentation struct { // DocumentationRootUrl: The URL to the root of documentation. DocumentationRootUrl string `json:"documentationRootUrl,omitempty"` - // Overview: Declares a single overview page. For - // example: - //
documentation:
-	//   summary: ...
-	//   overview: (== include overview.md ==)
-	// 
- // This is a shortcut for the following declaration (using pages - // style): - //
documentation:
-	//   summary: ...
-	//   pages:
-	//   - name: Overview
-	//     content: (== include overview.md ==)
-	// 
- // Note: you cannot specify both `overview` field and `pages` field. + // Overview: Declares a single overview page. For example: + // documentation: summary: ... overview: (== include overview.md ==) + // This is a shortcut for the following declaration (using pages style): + // documentation: summary: ... pages: - name: Overview content: (== + // include overview.md ==) Note: you cannot specify both `overview` + // field and `pages` field. Overview string `json:"overview,omitempty"` // Pages: The top level pages for the documentation set. Pages []*Page `json:"pages,omitempty"` // Rules: A list of documentation rules that apply to individual API - // elements. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // elements. **NOTE:** All service configuration rules follow "last one + // wins" order. Rules []*DocumentationRule `json:"rules,omitempty"` // ServiceRootUrl: Specifies the service root url if the default one - // (the service name - // from the yaml file) is not suitable. This can be seen in any - // fully - // specified service urls as well as sections that show a base that - // other - // urls are relative to. + // (the service name from the yaml file) is not suitable. This can be + // seen in any fully specified service urls as well as sections that + // show a base that other urls are relative to. ServiceRootUrl string `json:"serviceRootUrl,omitempty"` // Summary: A short summary of what the service does. Can only be - // provided by - // plain text. + // provided by plain text. Summary string `json:"summary,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1704,24 +1389,20 @@ func (s *Documentation) MarshalJSON() ([]byte, error) { // individual API elements. type DocumentationRule struct { // DeprecationDescription: Deprecation description of the selected - // element(s). It can be provided if - // an element is marked as `deprecated`. + // element(s). It can be provided if an element is marked as + // `deprecated`. DeprecationDescription string `json:"deprecationDescription,omitempty"` // Description: Description of the selected API(s). Description string `json:"description,omitempty"` // Selector: The selector is a comma-separated list of patterns. Each - // pattern is a - // qualified name of the element which may end in "*", indicating a - // wildcard. - // Wildcards are only allowed at the end and for a whole component of - // the - // qualified name, i.e. "foo.*" is ok, but not "foo.b*" or "foo.*.bar". - // A - // wildcard will match one or more components. To specify a default for - // all - // applicable elements, the whole pattern "*" is used. + // pattern is a qualified name of the element which may end in "*", + // indicating a wildcard. Wildcards are only allowed at the end and for + // a whole component of the qualified name, i.e. "foo.*" is ok, but not + // "foo.b*" or "foo.*.bar". A wildcard will match one or more + // components. To specify a default for all applicable elements, the + // whole pattern "*" is used. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1750,17 +1431,11 @@ func (s *DocumentationRule) MarshalJSON() ([]byte, error) { } // Empty: A generic empty message that you can re-use to avoid defining -// duplicated -// empty messages in your APIs. A typical example is to use it as the -// request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. +// duplicated empty messages in your APIs. A typical example is to use +// it as the request or the response type of an API method. For +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } The JSON representation for `Empty` is +// empty JSON object `{}`. type Empty struct { } @@ -1801,10 +1476,8 @@ type EnableServiceRequest struct { } // EnableServiceResponse: Response message for the `EnableService` -// method. -// This response message is assigned to the `response` field of the -// returned -// Operation when that operation is done. +// method. This response message is assigned to the `response` field of +// the returned Operation when that operation is done. type EnableServiceResponse struct { // Service: The new state of the service after enabling. Service *GoogleApiServiceusageV1Service `json:"service,omitempty"` @@ -1833,64 +1506,38 @@ func (s *EnableServiceResponse) MarshalJSON() ([]byte, error) { } // Endpoint: `Endpoint` describes a network endpoint that serves a set -// of APIs. -// A service may expose any number of endpoints, and all endpoints share -// the -// same service configuration, such as quota configuration and -// monitoring -// configuration. -// -// Example service configuration: -// -// name: library-example.googleapis.com -// endpoints: -// # Below entry makes 'google.example.library.v1.Library' -// # API be served from endpoint address -// library-example.googleapis.com. -// # It also allows HTTP OPTIONS calls to be passed to the -// backend, for -// # it to decide whether the subsequent cross-origin request is -// # allowed to proceed. -// - name: library-example.googleapis.com -// allow_cors: true +// of APIs. A service may expose any number of endpoints, and all +// endpoints share the same service configuration, such as quota +// configuration and monitoring configuration. Example service +// configuration: name: library-example.googleapis.com endpoints: # +// Below entry makes 'google.example.library.v1.Library' # API be served +// from endpoint address library-example.googleapis.com. # It also +// allows HTTP OPTIONS calls to be passed to the backend, for # it to +// decide whether the subsequent cross-origin request is # allowed to +// proceed. - name: library-example.googleapis.com allow_cors: true type Endpoint struct { // Aliases: DEPRECATED: This field is no longer supported. Instead of - // using aliases, - // please specify multiple google.api.Endpoint for each of the - // intended - // aliases. - // - // Additional names that this endpoint will be hosted on. + // using aliases, please specify multiple google.api.Endpoint for each + // of the intended aliases. Additional names that this endpoint will be + // hosted on. Aliases []string `json:"aliases,omitempty"` - // AllowCors: - // Allowing - // [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sh - // aring), aka - // cross-domain traffic, would allow the backends served from this - // endpoint to - // receive and respond to HTTP OPTIONS requests. The response will be - // used by - // the browser to determine whether the subsequent cross-origin request - // is - // allowed to proceed. + // AllowCors: Allowing + // [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), + // aka cross-domain traffic, would allow the backends served from this + // endpoint to receive and respond to HTTP OPTIONS requests. The + // response will be used by the browser to determine whether the + // subsequent cross-origin request is allowed to proceed. AllowCors bool `json:"allowCors,omitempty"` - // Features: The list of features enabled on this endpoint. - Features []string `json:"features,omitempty"` - // Name: The canonical name of this endpoint. Name string `json:"name,omitempty"` // Target: The specification of an Internet routable address of API - // frontend that will - // handle requests to this - // [API + // frontend that will handle requests to this [API // Endpoint](https://cloud.google.com/apis/design/glossary). It should - // be - // either a valid IPv4 address or a fully-qualified domain name. For - // example, - // "8.8.8.8" or "myservice.appspot.com". + // be either a valid IPv4 address or a fully-qualified domain name. For + // example, "8.8.8.8" or "myservice.appspot.com". Target string `json:"target,omitempty"` // ForceSendFields is a list of field names (e.g. "Aliases") to @@ -2044,9 +1691,8 @@ type Field struct { Number int64 `json:"number,omitempty"` // OneofIndex: The index of the field type in `Type.oneofs`, for message - // or enumeration - // types. The first type has index 1; zero means the type is not in the - // list. + // or enumeration types. The first type has index 1; zero means the type + // is not in the list. OneofIndex int64 `json:"oneofIndex,omitempty"` // Options: The protocol buffer options. @@ -2056,8 +1702,8 @@ type Field struct { Packed bool `json:"packed,omitempty"` // TypeUrl: The field type URL, without the scheme, for message or - // enumeration - // types. Example: "type.googleapis.com/google.protobuf.Timestamp". + // enumeration types. Example: + // "type.googleapis.com/google.protobuf.Timestamp". TypeUrl string `json:"typeUrl,omitempty"` // ForceSendFields is a list of field names (e.g. "Cardinality") to @@ -2087,18 +1733,16 @@ func (s *Field) MarshalJSON() ([]byte, error) { // identity. type GetServiceIdentityResponse struct { // Identity: Service identity that service producer can use to access - // consumer - // resources. If exists is true, it contains email and unique_id. If - // exists is - // false, it contains pre-constructed email and empty unique_id. + // consumer resources. If exists is true, it contains email and + // unique_id. If exists is false, it contains pre-constructed email and + // empty unique_id. Identity *ServiceIdentity `json:"identity,omitempty"` // State: Service identity state. // // Possible values: // "IDENTITY_STATE_UNSPECIFIED" - Default service identity state. This - // value is used if the state is - // omitted. + // value is used if the state is omitted. // "ACTIVE" - Service identity has been created and can be used. State string `json:"state,omitempty"` @@ -2126,43 +1770,24 @@ func (s *GetServiceIdentityResponse) MarshalJSON() ([]byte, error) { } // GoogleApiService: `Service` is the root object of Google service -// configuration schema. It -// describes basic information about a service, such as the name and -// the -// title, and delegates other aspects to sub-sections. Each sub-section -// is -// either a proto message or a repeated proto message that configures -// a -// specific aspect, such as auth. See each proto message definition for -// details. -// -// Example: -// -// type: google.api.Service -// config_version: 3 -// name: calendar.googleapis.com -// title: Google Calendar API -// apis: -// - name: google.calendar.v3.Calendar -// authentication: -// providers: -// - id: google_calendar_auth -// jwks_uri: https://www.googleapis.com/oauth2/v1/certs -// issuer: https://securetoken.google.com -// rules: -// - selector: "*" -// requirements: -// provider_id: google_calendar_auth +// configuration schema. It describes basic information about a service, +// such as the name and the title, and delegates other aspects to +// sub-sections. Each sub-section is either a proto message or a +// repeated proto message that configures a specific aspect, such as +// auth. See each proto message definition for details. Example: type: +// google.api.Service config_version: 3 name: calendar.googleapis.com +// title: Google Calendar API apis: - name: google.calendar.v3.Calendar +// authentication: providers: - id: google_calendar_auth jwks_uri: +// https://www.googleapis.com/oauth2/v1/certs issuer: +// https://securetoken.google.com rules: - selector: "*" requirements: +// provider_id: google_calendar_auth type GoogleApiService struct { // Apis: A list of API interfaces exported by this service. Only the - // `name` field - // of the google.protobuf.Api needs to be provided by the - // configuration - // author, as the remaining fields will be derived from the IDL during - // the - // normalization process. It is an error to specify an API interface - // here - // which cannot be resolved against the associated IDL files. + // `name` field of the google.protobuf.Api needs to be provided by the + // configuration author, as the remaining fields will be derived from + // the IDL during the normalization process. It is an error to specify + // an API interface here which cannot be resolved against the associated + // IDL files. Apis []*Api `json:"apis,omitempty"` // Authentication: Auth configuration. @@ -2175,13 +1800,9 @@ type GoogleApiService struct { Billing *Billing `json:"billing,omitempty"` // ConfigVersion: The semantic version of the service configuration. The - // config version - // affects the interpretation of the service configuration. For - // example, - // certain features are enabled by default for certain config - // versions. - // - // The latest config version is `3`. + // config version affects the interpretation of the service + // configuration. For example, certain features are enabled by default + // for certain config versions. The latest config version is `3`. ConfigVersion int64 `json:"configVersion,omitempty"` // Context: Context configuration. @@ -2196,35 +1817,25 @@ type GoogleApiService struct { // Documentation: Additional API documentation. Documentation *Documentation `json:"documentation,omitempty"` - // Endpoints: Configuration for network endpoints. If this is empty, - // then an endpoint - // with the same name as the service is automatically generated to - // service all - // defined APIs. + // Endpoints: Configuration for network endpoints. If this is empty, + // then an endpoint with the same name as the service is automatically + // generated to service all defined APIs. Endpoints []*Endpoint `json:"endpoints,omitempty"` - // Enums: A list of all enum types included in this API service. - // Enums - // referenced directly or indirectly by the `apis` are - // automatically - // included. Enums which are not referenced but shall be - // included - // should be listed here by name. Example: - // - // enums: - // - name: google.someapi.v1.SomeEnum + // Enums: A list of all enum types included in this API service. Enums + // referenced directly or indirectly by the `apis` are automatically + // included. Enums which are not referenced but shall be included should + // be listed here by name. Example: enums: - name: + // google.someapi.v1.SomeEnum Enums []*Enum `json:"enums,omitempty"` // Http: HTTP configuration. Http *Http `json:"http,omitempty"` // Id: A unique ID for a specific instance of this message, typically - // assigned - // by the client for tracking purpose. Must be no longer than 63 - // characters - // and only lower case letters, digits, '.', '_' and '-' are allowed. - // If - // empty, the server may choose to generate one instead. + // assigned by the client for tracking purpose. Must be no longer than + // 63 characters and only lower case letters, digits, '.', '_' and '-' + // are allowed. If empty, the server may choose to generate one instead. Id string `json:"id,omitempty"` // Logging: Logging configuration. @@ -2237,19 +1848,17 @@ type GoogleApiService struct { Metrics []*MetricDescriptor `json:"metrics,omitempty"` // MonitoredResources: Defines the monitored resources used by this - // service. This is required - // by the Service.monitoring and Service.logging configurations. + // service. This is required by the Service.monitoring and + // Service.logging configurations. MonitoredResources []*MonitoredResourceDescriptor `json:"monitoredResources,omitempty"` // Monitoring: Monitoring configuration. Monitoring *Monitoring `json:"monitoring,omitempty"` // Name: The service name, which is a DNS-like logical identifier for - // the - // service, such as `calendar.googleapis.com`. The service - // name - // typically goes through DNS verification to make sure the owner - // of the service also owns the DNS name. + // the service, such as `calendar.googleapis.com`. The service name + // typically goes through DNS verification to make sure the owner of the + // service also owns the DNS name. Name string `json:"name,omitempty"` // ProducerProjectId: The Google project that owns this service. @@ -2266,30 +1875,21 @@ type GoogleApiService struct { SystemParameters *SystemParameters `json:"systemParameters,omitempty"` // SystemTypes: A list of all proto message types included in this API - // service. - // It serves similar purpose as [google.api.Service.types], except - // that - // these types are not needed by user-defined APIs. Therefore, they will - // not - // show up in the generated discovery doc. This field should only be - // used - // to define system APIs in ESF. + // service. It serves similar purpose as [google.api.Service.types], + // except that these types are not needed by user-defined APIs. + // Therefore, they will not show up in the generated discovery doc. This + // field should only be used to define system APIs in ESF. SystemTypes []*Type `json:"systemTypes,omitempty"` // Title: The product title for this service. Title string `json:"title,omitempty"` // Types: A list of all proto message types included in this API - // service. - // Types referenced directly or indirectly by the `apis` - // are - // automatically included. Messages which are not referenced but - // shall be included, such as types used by the `google.protobuf.Any` - // type, - // should be listed here by name. Example: - // - // types: - // - name: google.protobuf.Int32 + // service. Types referenced directly or indirectly by the `apis` are + // automatically included. Messages which are not referenced but shall + // be included, such as types used by the `google.protobuf.Any` type, + // should be listed here by name. Example: types: - name: + // google.protobuf.Int32 Types []*Type `json:"types,omitempty"` // Usage: Configuration controlling usage of this service. @@ -2319,35 +1919,22 @@ func (s *GoogleApiService) MarshalJSON() ([]byte, error) { } // GoogleApiServiceIdentity: The per-product per-project service -// identity for a service. -// -// -// Use this field to configure per-product per-project service -// identity. -// Example of a service identity configuration. -// -// usage: -// service_identity: -// - service_account_parent: "projects/123456789" -// display_name: "Cloud XXX Service Agent" -// description: "Used as the identity of Cloud XXX to access -// resources" +// identity for a service. Use this field to configure per-product +// per-project service identity. Example of a service identity +// configuration. usage: service_identity: - service_account_parent: +// "projects/123456789" display_name: "Cloud XXX Service Agent" +// description: "Used as the identity of Cloud XXX to access resources" type GoogleApiServiceIdentity struct { // Description: Optional. A user-specified opaque description of the - // service account. - // Must be less than or equal to 256 UTF-8 bytes. + // service account. Must be less than or equal to 256 UTF-8 bytes. Description string `json:"description,omitempty"` - // DisplayName: Optional. A user-specified name for the service - // account. + // DisplayName: Optional. A user-specified name for the service account. // Must be less than or equal to 100 UTF-8 bytes. DisplayName string `json:"displayName,omitempty"` // ServiceAccountParent: A service account project that hosts the - // service accounts. - // - // An example name would be: - // `projects/123456789` + // service accounts. An example name would be: `projects/123456789` ServiceAccountParent string `json:"serviceAccountParent,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -2377,8 +1964,7 @@ func (s *GoogleApiServiceIdentity) MarshalJSON() ([]byte, error) { // returned for the batchend services operation. type GoogleApiServiceusageV1OperationMetadata struct { // ResourceNames: The full name of the resources that this operation is - // directly - // associated with. + // directly associated with. ResourceNames []string `json:"resourceNames,omitempty"` // ForceSendFields is a list of field names (e.g. "ResourceNames") to @@ -2407,24 +1993,18 @@ func (s *GoogleApiServiceusageV1OperationMetadata) MarshalJSON() ([]byte, error) // GoogleApiServiceusageV1Service: A service that is available for use // by the consumer. type GoogleApiServiceusageV1Service struct { - // Config: The service configuration of the available service. - // Some fields may be filtered out of the configuration in responses - // to - // the `ListServices` method. These fields are present only in responses - // to + // Config: The service configuration of the available service. Some + // fields may be filtered out of the configuration in responses to the + // `ListServices` method. These fields are present only in responses to // the `GetService` method. Config *GoogleApiServiceusageV1ServiceConfig `json:"config,omitempty"` - // Name: The resource name of the consumer and service. - // - // A valid name would be: - // - projects/123/services/serviceusage.googleapis.com + // Name: The resource name of the consumer and service. A valid name + // would be: - projects/123/services/serviceusage.googleapis.com Name string `json:"name,omitempty"` - // Parent: The resource name of the consumer. - // - // A valid name would be: - // - projects/123 + // Parent: The resource name of the consumer. A valid name would be: - + // projects/123 Parent string `json:"parent,omitempty"` // State: Whether or not the service has been enabled for use by the @@ -2432,14 +2012,11 @@ type GoogleApiServiceusageV1Service struct { // // Possible values: // "STATE_UNSPECIFIED" - The default value, which indicates that the - // enabled state of the service - // is unspecified or not meaningful. Currently, all consumers other - // than - // projects (such as folders and organizations) are always in this - // state. + // enabled state of the service is unspecified or not meaningful. + // Currently, all consumers other than projects (such as folders and + // organizations) are always in this state. // "DISABLED" - The service cannot be used by this consumer. It has - // either been explicitly - // disabled, or has never been enabled. + // either been explicitly disabled, or has never been enabled. // "ENABLED" - The service has been explicitly enabled for use by this // consumer. State string `json:"state,omitempty"` @@ -2471,27 +2048,31 @@ func (s *GoogleApiServiceusageV1Service) MarshalJSON() ([]byte, error) { // service. type GoogleApiServiceusageV1ServiceConfig struct { // Apis: A list of API interfaces exported by this service. Contains - // only the names, - // versions, and method names of the interfaces. + // only the names, versions, and method names of the interfaces. Apis []*Api `json:"apis,omitempty"` // Authentication: Auth configuration. Contains only the OAuth rules. Authentication *Authentication `json:"authentication,omitempty"` // Documentation: Additional API documentation. Contains only the - // summary and the - // documentation URL. + // summary and the documentation URL. Documentation *Documentation `json:"documentation,omitempty"` // Endpoints: Configuration for network endpoints. Contains only the - // names and aliases - // of the endpoints. + // names and aliases of the endpoints. Endpoints []*Endpoint `json:"endpoints,omitempty"` - // Name: The DNS address at which this service is available. - // - // An example DNS address would be: - // `calendar.googleapis.com`. + // MonitoredResources: Defines the monitored resources used by this + // service. This is required by the Service.monitoring and + // Service.logging configurations. + MonitoredResources []*MonitoredResourceDescriptor `json:"monitoredResources,omitempty"` + + // Monitoring: Monitoring configuration. This should not include the + // 'producer_destinations' field. + Monitoring *Monitoring `json:"monitoring,omitempty"` + + // Name: The DNS address at which this service is available. An example + // DNS address would be: `calendar.googleapis.com`. Name string `json:"name,omitempty"` // Quota: Quota configuration. @@ -2530,18 +2111,16 @@ func (s *GoogleApiServiceusageV1ServiceConfig) MarshalJSON() ([]byte, error) { // message for getting service identity. type GoogleApiServiceusageV1beta1GetServiceIdentityResponse struct { // Identity: Service identity that service producer can use to access - // consumer - // resources. If exists is true, it contains email and unique_id. If - // exists is - // false, it contains pre-constructed email and empty unique_id. + // consumer resources. If exists is true, it contains email and + // unique_id. If exists is false, it contains pre-constructed email and + // empty unique_id. Identity *GoogleApiServiceusageV1beta1ServiceIdentity `json:"identity,omitempty"` // State: Service identity state. // // Possible values: // "IDENTITY_STATE_UNSPECIFIED" - Default service identity state. This - // value is used if the state is - // omitted. + // value is used if the state is omitted. // "ACTIVE" - Service identity has been created and can be used. State string `json:"state,omitempty"` @@ -2569,18 +2148,15 @@ func (s *GoogleApiServiceusageV1beta1GetServiceIdentityResponse) MarshalJSON() ( } // GoogleApiServiceusageV1beta1ServiceIdentity: Service identity for a -// service. This is the identity that service producer -// should use to access consumer resources. +// service. This is the identity that service producer should use to +// access consumer resources. type GoogleApiServiceusageV1beta1ServiceIdentity struct { // Email: The email address of the service account that a service - // producer would use - // to access consumer resources. + // producer would use to access consumer resources. Email string `json:"email,omitempty"` - // UniqueId: The unique and stable id of the service - // account. - // https://cloud.google.com/iam/reference/rest/v1/projects.servi - // ceAccounts#ServiceAccount + // UniqueId: The unique and stable id of the service account. + // https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts#ServiceAccount UniqueId string `json:"uniqueId,omitempty"` // ForceSendFields is a list of field names (e.g. "Email") to @@ -2607,26 +2183,19 @@ func (s *GoogleApiServiceusageV1beta1ServiceIdentity) MarshalJSON() ([]byte, err } // Http: Defines the HTTP configuration for an API service. It contains -// a list of -// HttpRule, each specifying the mapping of an RPC method -// to one or more HTTP REST API methods. +// a list of HttpRule, each specifying the mapping of an RPC method to +// one or more HTTP REST API methods. type Http struct { // FullyDecodeReservedExpansion: When set to true, URL path parameters - // will be fully URI-decoded except in - // cases of single segment matches in reserved expansion, where "%2F" - // will be - // left encoded. - // - // The default behavior is to not decode RFC 6570 reserved characters in - // multi + // will be fully URI-decoded except in cases of single segment matches + // in reserved expansion, where "%2F" will be left encoded. The default + // behavior is to not decode RFC 6570 reserved characters in multi // segment matches. FullyDecodeReservedExpansion bool `json:"fullyDecodeReservedExpansion,omitempty"` // Rules: A list of HTTP configuration rules that apply to individual - // API methods. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // API methods. **NOTE:** All service configuration rules follow "last + // one wins" order. Rules []*HttpRule `json:"rules,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -2654,403 +2223,187 @@ func (s *Http) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// HttpRule: # gRPC Transcoding -// -// gRPC Transcoding is a feature for mapping between a gRPC method and -// one or -// more HTTP REST endpoints. It allows developers to build a single API -// service -// that supports both gRPC APIs and REST APIs. Many systems, including -// [Google -// APIs](https://github.com/googleapis/googleapis), -// [Cloud Endpoints](https://cloud.google.com/endpoints), -// [gRPC -// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), -// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this -// feature -// and use it for large scale production services. -// -// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping -// specifies +// HttpRule: # gRPC Transcoding gRPC Transcoding is a feature for +// mapping between a gRPC method and one or more HTTP REST endpoints. It +// allows developers to build a single API service that supports both +// gRPC APIs and REST APIs. Many systems, including [Google +// APIs](https://github.com/googleapis/googleapis), [Cloud +// Endpoints](https://cloud.google.com/endpoints), [gRPC +// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), and +// [Envoy](https://github.com/envoyproxy/envoy) proxy support this +// feature and use it for large scale production services. `HttpRule` +// defines the schema of the gRPC/REST mapping. The mapping specifies // how different portions of the gRPC request message are mapped to the -// URL -// path, URL query parameters, and HTTP request body. It also controls -// how the -// gRPC response message is mapped to the HTTP response body. `HttpRule` -// is -// typically specified as an `google.api.http` annotation on the gRPC -// method. -// -// Each mapping specifies a URL path template and an HTTP method. The -// path -// template may refer to one or more fields in the gRPC request message, -// as long -// as each field is a non-repeated field with a primitive (non-message) -// type. -// The path template controls how fields of the request message are -// mapped to -// the URL path. -// -// Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/{name=messages/*}" -// }; -// } -// } -// message GetMessageRequest { -// string name = 1; // Mapped to URL path. -// } -// message Message { -// string text = 1; // The resource content. -// } -// -// This enables an HTTP REST to gRPC mapping as below: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(name: -// "messages/123456")` -// -// Any fields in the request message which are not bound by the path -// template -// automatically become HTTP query parameters if there is no HTTP -// request body. -// For example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get:"/v1/messages/{message_id}" -// }; -// } -// } -// message GetMessageRequest { -// message SubMessage { -// string subfield = 1; -// } -// string message_id = 1; // Mapped to URL path. -// int64 revision = 2; // Mapped to URL query parameter -// `revision`. -// SubMessage sub = 3; // Mapped to URL query parameter -// `sub.subfield`. -// } -// -// This enables a HTTP JSON to RPC mapping as below: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456?revision=2&sub.subfield=foo` -// | +// URL path, URL query parameters, and HTTP request body. It also +// controls how the gRPC response message is mapped to the HTTP response +// body. `HttpRule` is typically specified as an `google.api.http` +// annotation on the gRPC method. Each mapping specifies a URL path +// template and an HTTP method. The path template may refer to one or +// more fields in the gRPC request message, as long as each field is a +// non-repeated field with a primitive (non-message) type. The path +// template controls how fields of the request message are mapped to the +// URL path. Example: service Messaging { rpc +// GetMessage(GetMessageRequest) returns (Message) { option +// (google.api.http) = { get: "/v1/{name=messages/*}" }; } } message +// GetMessageRequest { string name = 1; // Mapped to URL path. } message +// Message { string text = 1; // The resource content. } This enables an +// HTTP REST to gRPC mapping as below: HTTP | gRPC -----|----- `GET +// /v1/messages/123456` | `GetMessage(name: "messages/123456")` Any +// fields in the request message which are not bound by the path +// template automatically become HTTP query parameters if there is no +// HTTP request body. For example: service Messaging { rpc +// GetMessage(GetMessageRequest) returns (Message) { option +// (google.api.http) = { get:"/v1/messages/{message_id}" }; } } message +// GetMessageRequest { message SubMessage { string subfield = 1; } +// string message_id = 1; // Mapped to URL path. int64 revision = 2; // +// Mapped to URL query parameter `revision`. SubMessage sub = 3; // +// Mapped to URL query parameter `sub.subfield`. } This enables a HTTP +// JSON to RPC mapping as below: HTTP | gRPC -----|----- `GET +// /v1/messages/123456?revision=2&sub.subfield=foo` | // `GetMessage(message_id: "123456" revision: 2 sub: -// SubMessage(subfield: -// "foo"))` -// -// Note that fields which are mapped to URL query parameters must have -// a -// primitive type or a repeated primitive type or a non-repeated message -// type. -// In the case of a repeated type, the parameter can be repeated in the -// URL -// as `...?param=A¶m=B`. In the case of a message type, each field -// of the -// message is mapped to a separate parameter, such -// as -// `...?foo.a=A&foo.b=B&foo.c=C`. -// -// For HTTP methods that allow a request body, the `body` -// field -// specifies the mapping. Consider a REST update method on the -// message resource collection: -// -// service Messaging { -// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "message" -// }; -// } -// } -// message UpdateMessageRequest { -// string message_id = 1; // mapped to the URL -// Message message = 2; // mapped to the body -// } -// -// The following HTTP JSON to RPC mapping is enabled, where -// the -// representation of the JSON in the request body is determined -// by -// protos JSON encoding: -// -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | -// `UpdateMessage(message_id: -// "123456" message { text: "Hi!" })` -// -// The special name `*` can be used in the body mapping to define -// that -// every field not bound by the path template should be mapped to -// the -// request body. This enables the following alternative definition -// of -// the update method: -// -// service Messaging { -// rpc UpdateMessage(Message) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "*" -// }; -// } -// } -// message Message { -// string message_id = 1; -// string text = 2; -// } -// -// -// The following HTTP JSON to RPC mapping is enabled: -// -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | -// `UpdateMessage(message_id: -// "123456" text: "Hi!")` -// -// Note that when using `*` in the body mapping, it is not possible -// to -// have HTTP parameters, as all fields not bound by the path end in -// the body. This makes this option more rarely used in practice -// when -// defining REST APIs. The common usage of `*` is in custom -// methods -// which don't use the URL at all for transferring data. -// -// It is possible to define multiple HTTP methods for one RPC by -// using -// the `additional_bindings` option. Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/messages/{message_id}" -// additional_bindings { -// get: "/v1/users/{user_id}/messages/{message_id}" -// } -// }; -// } -// } -// message GetMessageRequest { -// string message_id = 1; -// string user_id = 2; -// } -// -// This enables the following two alternative HTTP JSON to RPC -// mappings: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" -// message_id: -// "123456")` -// -// ## Rules for HTTP mapping -// -// 1. Leaf request fields (recursive expansion nested messages in the -// request -// message) are classified into three categories: -// - Fields referred by the path template. They are passed via the -// URL path. -// - Fields referred by the HttpRule.body. They are passed via the -// HTTP -// request body. -// - All other fields are passed via the URL query parameters, and -// the -// parameter name is the field path in the request message. A -// repeated -// field can be represented as multiple query parameters under the -// same -// name. -// 2. If HttpRule.body is "*", there is no URL query parameter, all -// fields -// are passed via URL path and HTTP request body. -// 3. If HttpRule.body is omitted, there is no HTTP request body, all -// fields are passed via URL path and URL query parameters. -// -// ### Path template syntax -// -// Template = "/" Segments [ Verb ] ; -// Segments = Segment { "/" Segment } ; -// Segment = "*" | "**" | LITERAL | Variable ; -// Variable = "{" FieldPath [ "=" Segments ] "}" ; -// FieldPath = IDENT { "." IDENT } ; -// Verb = ":" LITERAL ; -// -// The syntax `*` matches a single URL path segment. The syntax `**` -// matches -// zero or more URL path segments, which must be the last part of the -// URL path -// except the `Verb`. -// -// The syntax `Variable` matches part of the URL path as specified by -// its -// template. A variable template must not contain other variables. If a -// variable -// matches a single path segment, its template may be omitted, e.g. -// `{var}` -// is equivalent to `{var=*}`. -// -// The syntax `LITERAL` matches literal text in the URL path. If the -// `LITERAL` -// contains any reserved character, such characters should be -// percent-encoded -// before the matching. -// -// If a variable contains exactly one path segment, such as "{var}" -// or -// "{var=*}", when such a variable is expanded into a URL path on the -// client -// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. -// The -// server side does the reverse decoding. Such variables show up in -// the -// [Discovery -// Document](https://developers.google.com/discovery/v1/re -// ference/apis) as -// `{var}`. -// -// If a variable contains multiple path segments, such as -// "{var=foo/*}" -// or "{var=**}", when such a variable is expanded into a URL path on -// the -// client side, all characters except `[-_.~/0-9a-zA-Z]` are -// percent-encoded. -// The server side does the reverse decoding, except "%2F" and "%2f" are -// left -// unchanged. Such variables show up in -// the +// SubMessage(subfield: "foo"))` Note that fields which are mapped to +// URL query parameters must have a primitive type or a repeated +// primitive type or a non-repeated message type. In the case of a +// repeated type, the parameter can be repeated in the URL as +// `...?param=A¶m=B`. In the case of a message type, each field of +// the message is mapped to a separate parameter, such as +// `...?foo.a=A&foo.b=B&foo.c=C`. For HTTP methods that allow a request +// body, the `body` field specifies the mapping. Consider a REST update +// method on the message resource collection: service Messaging { rpc +// UpdateMessage(UpdateMessageRequest) returns (Message) { option +// (google.api.http) = { patch: "/v1/messages/{message_id}" body: +// "message" }; } } message UpdateMessageRequest { string message_id = +// 1; // mapped to the URL Message message = 2; // mapped to the body } +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: HTTP | gRPC -----|----- `PATCH +// /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" message { text: "Hi!" })` The special name `*` can be used +// in the body mapping to define that every field not bound by the path +// template should be mapped to the request body. This enables the +// following alternative definition of the update method: service +// Messaging { rpc UpdateMessage(Message) returns (Message) { option +// (google.api.http) = { patch: "/v1/messages/{message_id}" body: "*" }; +// } } message Message { string message_id = 1; string text = 2; } The +// following HTTP JSON to RPC mapping is enabled: HTTP | gRPC +// -----|----- `PATCH /v1/messages/123456 { "text": "Hi!" }` | +// `UpdateMessage(message_id: "123456" text: "Hi!")` Note that when +// using `*` in the body mapping, it is not possible to have HTTP +// parameters, as all fields not bound by the path end in the body. This +// makes this option more rarely used in practice when defining REST +// APIs. The common usage of `*` is in custom methods which don't use +// the URL at all for transferring data. It is possible to define +// multiple HTTP methods for one RPC by using the `additional_bindings` +// option. Example: service Messaging { rpc +// GetMessage(GetMessageRequest) returns (Message) { option +// (google.api.http) = { get: "/v1/messages/{message_id}" +// additional_bindings { get: +// "/v1/users/{user_id}/messages/{message_id}" } }; } } message +// GetMessageRequest { string message_id = 1; string user_id = 2; } This +// enables the following two alternative HTTP JSON to RPC mappings: HTTP +// | gRPC -----|----- `GET /v1/messages/123456` | +// `GetMessage(message_id: "123456")` `GET /v1/users/me/messages/123456` +// | `GetMessage(user_id: "me" message_id: "123456")` ## Rules for HTTP +// mapping 1. Leaf request fields (recursive expansion nested messages +// in the request message) are classified into three categories: - +// Fields referred by the path template. They are passed via the URL +// path. - Fields referred by the HttpRule.body. They are passed via the +// HTTP request body. - All other fields are passed via the URL query +// parameters, and the parameter name is the field path in the request +// message. A repeated field can be represented as multiple query +// parameters under the same name. 2. If HttpRule.body is "*", there is +// no URL query parameter, all fields are passed via URL path and HTTP +// request body. 3. If HttpRule.body is omitted, there is no HTTP +// request body, all fields are passed via URL path and URL query +// parameters. ### Path template syntax Template = "/" Segments [ Verb ] +// ; Segments = Segment { "/" Segment } ; Segment = "*" | "**" | LITERAL +// | Variable ; Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; Verb = ":" LITERAL ; The syntax `*` +// matches a single URL path segment. The syntax `**` matches zero or +// more URL path segments, which must be the last part of the URL path +// except the `Verb`. The syntax `Variable` matches part of the URL path +// as specified by its template. A variable template must not contain +// other variables. If a variable matches a single path segment, its +// template may be omitted, e.g. `{var}` is equivalent to `{var=*}`. The +// syntax `LITERAL` matches literal text in the URL path. If the +// `LITERAL` contains any reserved character, such characters should be +// percent-encoded before the matching. If a variable contains exactly +// one path segment, such as "{var}" or "{var=*}", when such a +// variable is expanded into a URL path on the client side, all +// characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The server +// side does the reverse decoding. Such variables show up in the // [Discovery -// Document](https://developers.google.com/discovery/v1/re -// ference/apis) as -// `{+var}`. -// -// ## Using gRPC API Service Configuration -// -// gRPC API Service Configuration (service config) is a configuration -// language -// for configuring a gRPC service to become a user-facing product. -// The +// Document](https://developers.google.com/discovery/v1/reference/apis) +// as `{var}`. If a variable contains multiple path segments, such as +// "{var=foo/*}" or "{var=**}", when such a variable is expanded +// into a URL path on the client side, all characters except +// `[-_.~/0-9a-zA-Z]` are percent-encoded. The server side does the +// reverse decoding, except "%2F" and "%2f" are left unchanged. Such +// variables show up in the [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) +// as `{+var}`. ## Using gRPC API Service Configuration gRPC API Service +// Configuration (service config) is a configuration language for +// configuring a gRPC service to become a user-facing product. The // service config is simply the YAML representation of the -// `google.api.Service` -// proto message. -// -// As an alternative to annotating your proto file, you can configure -// gRPC -// transcoding in your service config YAML files. You do this by -// specifying a -// `HttpRule` that maps the gRPC method to a REST endpoint, achieving -// the same -// effect as the proto annotation. This can be particularly useful if -// you -// have a proto that is reused in multiple services. Note that any -// transcoding +// `google.api.Service` proto message. As an alternative to annotating +// your proto file, you can configure gRPC transcoding in your service +// config YAML files. You do this by specifying a `HttpRule` that maps +// the gRPC method to a REST endpoint, achieving the same effect as the +// proto annotation. This can be particularly useful if you have a proto +// that is reused in multiple services. Note that any transcoding // specified in the service config will override any matching -// transcoding -// configuration in the proto. -// -// Example: -// -// http: -// rules: -// # Selects a gRPC method and applies HttpRule to it. -// - selector: example.v1.Messaging.GetMessage -// get: /v1/messages/{message_id}/{sub.subfield} -// -// ## Special notes -// -// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, -// the -// proto to JSON conversion must follow the -// [proto3 -// specification](https://developers.google.com/protocol-buffers/ -// docs/proto3#json). -// -// While the single segment variable follows the semantics of +// transcoding configuration in the proto. Example: http: rules: # +// Selects a gRPC method and applies HttpRule to it. - selector: +// example.v1.Messaging.GetMessage get: +// /v1/messages/{message_id}/{sub.subfield} ## Special notes When gRPC +// Transcoding is used to map a gRPC to JSON REST endpoints, the proto +// to JSON conversion must follow the [proto3 +// specification](https://developers.google.com/protocol-buffers/docs/pro +// to3#json). While the single segment variable follows the semantics of // [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple -// String -// Expansion, the multi segment variable **does not** follow RFC 6570 -// Section -// 3.2.3 Reserved Expansion. The reason is that the Reserved -// Expansion -// does not expand special characters like `?` and `#`, which would -// lead -// to invalid URLs. As the result, gRPC Transcoding uses a custom -// encoding -// for multi segment variables. -// -// The path variables **must not** refer to any repeated or mapped -// field, +// String Expansion, the multi segment variable **does not** follow RFC +// 6570 Section 3.2.3 Reserved Expansion. The reason is that the +// Reserved Expansion does not expand special characters like `?` and +// `#`, which would lead to invalid URLs. As the result, gRPC +// Transcoding uses a custom encoding for multi segment variables. The +// path variables **must not** refer to any repeated or mapped field, // because client libraries are not capable of handling such variable -// expansion. -// -// The path variables **must not** capture the leading "/" character. -// The reason -// is that the most common use case "{var}" does not capture the leading -// "/" -// character. For consistency, all path variables must share the same -// behavior. -// -// Repeated message fields must not be mapped to URL query parameters, -// because -// no client library can support such complicated mapping. -// -// If an API needs to use a JSON array for request or response body, it -// can map -// the request or response body to a repeated field. However, some -// gRPC -// Transcoding implementations may not support this feature. +// expansion. The path variables **must not** capture the leading "/" +// character. The reason is that the most common use case "{var}" does +// not capture the leading "/" character. For consistency, all path +// variables must share the same behavior. Repeated message fields must +// not be mapped to URL query parameters, because no client library can +// support such complicated mapping. If an API needs to use a JSON array +// for request or response body, it can map the request or response body +// to a repeated field. However, some gRPC Transcoding implementations +// may not support this feature. type HttpRule struct { // AdditionalBindings: Additional HTTP bindings for the selector. Nested - // bindings must - // not contain an `additional_bindings` field themselves (that is, - // the nesting may only be one level deep). + // bindings must not contain an `additional_bindings` field themselves + // (that is, the nesting may only be one level deep). AdditionalBindings []*HttpRule `json:"additionalBindings,omitempty"` // AllowHalfDuplex: When this flag is set to true, HTTP requests will be - // allowed to invoke a - // half-duplex streaming method. + // allowed to invoke a half-duplex streaming method. AllowHalfDuplex bool `json:"allowHalfDuplex,omitempty"` // Body: The name of the request field whose value is mapped to the HTTP - // request - // body, or `*` for mapping all request fields not captured by the - // path - // pattern to the HTTP body, or omitted for not having any HTTP request - // body. - // - // NOTE: the referred field must be present at the top-level of the - // request - // message type. + // request body, or `*` for mapping all request fields not captured by + // the path pattern to the HTTP body, or omitted for not having any HTTP + // request body. NOTE: the referred field must be present at the + // top-level of the request message type. Body string `json:"body,omitempty"` // Custom: The custom pattern is used for specifying an HTTP method that - // is not - // included in the `pattern` field, such as HEAD, or "*" to leave - // the - // HTTP method unspecified for this rule. The wild-card rule is - // useful - // for services that provide content to Web (HTML) clients. + // is not included in the `pattern` field, such as HEAD, or "*" to leave + // the HTTP method unspecified for this rule. The wild-card rule is + // useful for services that provide content to Web (HTML) clients. Custom *CustomHttpPattern `json:"custom,omitempty"` // Delete: Maps to HTTP DELETE. Used for deleting a resource. Delete string `json:"delete,omitempty"` - // Get: Maps to HTTP GET. Used for listing and getting information - // about + // Get: Maps to HTTP GET. Used for listing and getting information about // resources. Get string `json:"get,omitempty"` @@ -3065,19 +2418,13 @@ type HttpRule struct { Put string `json:"put,omitempty"` // ResponseBody: Optional. The name of the response field whose value is - // mapped to the HTTP - // response body. When omitted, the entire response message will be - // used - // as the HTTP response body. - // - // NOTE: The referred field must be present at the top-level of the - // response - // message type. + // mapped to the HTTP response body. When omitted, the entire response + // message will be used as the HTTP response body. NOTE: The referred + // field must be present at the top-level of the response message type. ResponseBody string `json:"responseBody,omitempty"` - // Selector: Selects a method to which this rule applies. - // - // Refer to selector for syntax details. + // Selector: Selects a method to which this rule applies. Refer to + // selector for syntax details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. "AdditionalBindings") @@ -3133,6 +2480,72 @@ func (s *ImportAdminOverridesResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ImportAdminQuotaPoliciesResponse: Response message for +// ImportAdminQuotaPolicies +type ImportAdminQuotaPoliciesResponse struct { + // Policies: The policies that were created from the imported data. + Policies []*AdminQuotaPolicy `json:"policies,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Policies") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Policies") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ImportAdminQuotaPoliciesResponse) MarshalJSON() ([]byte, error) { + type NoMethod ImportAdminQuotaPoliciesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ImportConsumerOverridesRequest: Request message for +// ImportConsumerOverrides +type ImportConsumerOverridesRequest struct { + // Force: Whether to force the creation of the quota overrides. If + // creating an override would cause the effective quota for the consumer + // to decrease by more than 10 percent, the call is rejected, as a + // safety measure to avoid accidentally decreasing quota too quickly. + // Setting the force parameter to true ignores this restriction. + Force bool `json:"force,omitempty"` + + // InlineSource: The import data is specified in the request message + // itself + InlineSource *OverrideInlineSource `json:"inlineSource,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Force") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Force") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ImportConsumerOverridesRequest) MarshalJSON() ([]byte, error) { + type NoMethod ImportConsumerOverridesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ImportConsumerOverridesResponse: Response message for // ImportConsumerOverrides type ImportConsumerOverridesResponse struct { @@ -3171,16 +2584,11 @@ type JwtLocation struct { Query string `json:"query,omitempty"` // ValuePrefix: The value prefix. The value format is - // "value_prefix{token}" - // Only applies to "in" header type. Must be empty for "in" query - // type. - // If not empty, the header value has to match (case sensitive) this - // prefix. - // If not matched, JWT will not be extracted. If matched, JWT will - // be - // extracted after the prefix is removed. - // - // For example, for "Authorization: Bearer {JWT}", + // "value_prefix{token}" Only applies to "in" header type. Must be empty + // for "in" query type. If not empty, the header value has to match + // (case sensitive) this prefix. If not matched, JWT will not be + // extracted. If matched, JWT will be extracted after the prefix is + // removed. For example, for "Authorization: Bearer {JWT}", // value_prefix="Bearer " with a space at the end. ValuePrefix string `json:"valuePrefix,omitempty"` @@ -3249,8 +2657,7 @@ func (s *LabelDescriptor) MarshalJSON() ([]byte, error) { // ListAdminOverridesResponse: Response message for ListAdminOverrides. type ListAdminOverridesResponse struct { // NextPageToken: Token identifying which result to start with; returned - // by a previous list - // call. + // by a previous list call. NextPageToken string `json:"nextPageToken,omitempty"` // Overrides: Admin overrides on this limit. @@ -3287,8 +2694,7 @@ func (s *ListAdminOverridesResponse) MarshalJSON() ([]byte, error) { // ListConsumerOverrides. type ListConsumerOverridesResponse struct { // NextPageToken: Token identifying which result to start with; returned - // by a previous list - // call. + // by a previous list call. NextPageToken string `json:"nextPageToken,omitempty"` // Overrides: Consumer overrides on this limit. @@ -3328,8 +2734,7 @@ type ListConsumerQuotaMetricsResponse struct { Metrics []*ConsumerQuotaMetric `json:"metrics,omitempty"` // NextPageToken: Token identifying which result to start with; returned - // by a previous list - // call. + // by a previous list call. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -3399,8 +2804,7 @@ func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { // ListServicesResponse: Response message for the `ListServices` method. type ListServicesResponse struct { // NextPageToken: Token that can be passed to `ListServices` to resume a - // paginated - // query. + // paginated query. NextPageToken string `json:"nextPageToken,omitempty"` // Services: The available services for the requested project. @@ -3433,39 +2837,29 @@ func (s *ListServicesResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// LogDescriptor: A description of a log type. Example in YAML format: -// -// - name: library.googleapis.com/activity_history -// description: The history of borrowing and returning library -// items. -// display_name: Activity -// labels: -// - key: /customer_id -// description: Identifier of a library customer +// LogDescriptor: A description of a log type. Example in YAML format: - +// name: library.googleapis.com/activity_history description: The +// history of borrowing and returning library items. display_name: +// Activity labels: - key: /customer_id description: Identifier of a +// library customer type LogDescriptor struct { // Description: A human-readable description of this log. This - // information appears in - // the documentation and can contain details. + // information appears in the documentation and can contain details. Description string `json:"description,omitempty"` // DisplayName: The human-readable name for this log. This information - // appears on - // the user interface and should be concise. + // appears on the user interface and should be concise. DisplayName string `json:"displayName,omitempty"` // Labels: The set of labels that are available to describe a specific - // log entry. - // Runtime requests that contain labels not specified here - // are - // considered invalid. + // log entry. Runtime requests that contain labels not specified here + // are considered invalid. Labels []*LabelDescriptor `json:"labels,omitempty"` // Name: The name of the log. It must be less than 512 characters long - // and can - // include the following characters: upper- and lower-case - // alphanumeric - // characters [A-Za-z0-9], and punctuation characters including - // slash, underscore, hyphen, period [/_-.]. + // and can include the following characters: upper- and lower-case + // alphanumeric characters [A-Za-z0-9], and punctuation characters + // including slash, underscore, hyphen, period [/_-.]. Name string `json:"name,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -3491,54 +2885,30 @@ func (s *LogDescriptor) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Logging: Logging configuration of the service. -// -// The following example shows how to configure logs to be sent to -// the -// producer and consumer projects. In the example, the -// `activity_history` -// log is sent to both the producer and consumer projects, whereas -// the -// `purchase_history` log is only sent to the producer project. -// -// monitored_resources: -// - type: library.googleapis.com/branch -// labels: -// - key: /city -// description: The city where the library branch is located -// in. -// - key: /name -// description: The name of the branch. -// logs: -// - name: activity_history -// labels: -// - key: /customer_id -// - name: purchase_history -// logging: -// producer_destinations: -// - monitored_resource: library.googleapis.com/branch -// logs: -// - activity_history -// - purchase_history -// consumer_destinations: -// - monitored_resource: library.googleapis.com/branch -// logs: -// - activity_history +// Logging: Logging configuration of the service. The following example +// shows how to configure logs to be sent to the producer and consumer +// projects. In the example, the `activity_history` log is sent to both +// the producer and consumer projects, whereas the `purchase_history` +// log is only sent to the producer project. monitored_resources: - +// type: library.googleapis.com/branch labels: - key: /city description: +// The city where the library branch is located in. - key: /name +// description: The name of the branch. logs: - name: activity_history +// labels: - key: /customer_id - name: purchase_history logging: +// producer_destinations: - monitored_resource: +// library.googleapis.com/branch logs: - activity_history - +// purchase_history consumer_destinations: - monitored_resource: +// library.googleapis.com/branch logs: - activity_history type Logging struct { // ConsumerDestinations: Logging configurations for sending logs to the - // consumer project. - // There can be multiple consumer destinations, each one must have - // a - // different monitored resource type. A log can be used in at most - // one consumer destination. + // consumer project. There can be multiple consumer destinations, each + // one must have a different monitored resource type. A log can be used + // in at most one consumer destination. ConsumerDestinations []*LoggingDestination `json:"consumerDestinations,omitempty"` // ProducerDestinations: Logging configurations for sending logs to the - // producer project. - // There can be multiple producer destinations, each one must have - // a - // different monitored resource type. A log can be used in at most - // one producer destination. + // producer project. There can be multiple producer destinations, each + // one must have a different monitored resource type. A log can be used + // in at most one producer destination. ProducerDestinations []*LoggingDestination `json:"producerDestinations,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -3567,19 +2937,16 @@ func (s *Logging) MarshalJSON() ([]byte, error) { } // LoggingDestination: Configuration of a specific logging destination -// (the producer project -// or the consumer project). +// (the producer project or the consumer project). type LoggingDestination struct { // Logs: Names of the logs to be sent to this destination. Each name - // must - // be defined in the Service.logs section. If the log name is - // not a domain scoped name, it will be automatically prefixed with - // the service name followed by "/". + // must be defined in the Service.logs section. If the log name is not a + // domain scoped name, it will be automatically prefixed with the + // service name followed by "/". Logs []string `json:"logs,omitempty"` // MonitoredResource: The monitored resource type. The type must be - // defined in the - // Service.monitored_resources section. + // defined in the Service.monitored_resources section. MonitoredResource string `json:"monitoredResource,omitempty"` // ForceSendFields is a list of field names (e.g. "Logs") to @@ -3656,32 +3023,26 @@ func (s *Method) MarshalJSON() ([]byte, error) { } // MetricDescriptor: Defines a metric type and its schema. Once a metric -// descriptor is created, -// deleting or altering it stops data collection and makes the metric -// type's -// existing data unusable. +// descriptor is created, deleting or altering it stops data collection +// and makes the metric type's existing data unusable. type MetricDescriptor struct { // Description: A detailed description of the metric, which can be used // in documentation. Description string `json:"description,omitempty"` // DisplayName: A concise name for the metric, which can be displayed in - // user interfaces. - // Use sentence case without an ending period, for example "Request - // count". - // This field is optional but it is recommended to be set for any - // metrics - // associated with user-visible concepts, such as Quota. + // user interfaces. Use sentence case without an ending period, for + // example "Request count". This field is optional but it is recommended + // to be set for any metrics associated with user-visible concepts, such + // as Quota. DisplayName string `json:"displayName,omitempty"` - // Labels: The set of labels that can be used to describe a - // specific - // instance of this metric type. For example, - // the - // `appengine.googleapis.com/http/server/response_latencies` metric - // type has a label for the HTTP response code, `response_code`, so - // you can look at latencies for successful responses or just - // for responses that failed. + // Labels: The set of labels that can be used to describe a specific + // instance of this metric type. For example, the + // `appengine.googleapis.com/http/server/response_latencies` metric type + // has a label for the HTTP response code, `response_code`, so you can + // look at latencies for successful responses or just for responses that + // failed. Labels []*LabelDescriptor `json:"labels,omitempty"` // LaunchStage: Optional. The launch stage of the metric definition. @@ -3693,50 +3054,31 @@ type MetricDescriptor struct { // "PRELAUNCH" - Prelaunch features are hidden from users and are only // visible internally. // "EARLY_ACCESS" - Early Access features are limited to a closed - // group of testers. To use - // these features, you must sign up in advance and sign a Trusted - // Tester - // agreement (which includes confidentiality provisions). These features - // may - // be unstable, changed in backward-incompatible ways, and are - // not - // guaranteed to be released. + // group of testers. To use these features, you must sign up in advance + // and sign a Trusted Tester agreement (which includes confidentiality + // provisions). These features may be unstable, changed in + // backward-incompatible ways, and are not guaranteed to be released. // "ALPHA" - Alpha is a limited availability test for releases before - // they are cleared - // for widespread use. By Alpha, all significant design issues are - // resolved - // and we are in the process of verifying functionality. Alpha - // customers - // need to apply for access, agree to applicable terms, and have - // their - // projects whitelisted. Alpha releases don’t have to be feature - // complete, - // no SLAs are provided, and there are no technical support obligations, - // but - // they will be far enough along that customers can actually use them - // in - // test environments or for limited-use tests -- just like they would - // in - // normal production cases. + // they are cleared for widespread use. By Alpha, all significant design + // issues are resolved and we are in the process of verifying + // functionality. Alpha customers need to apply for access, agree to + // applicable terms, and have their projects whitelisted. Alpha releases + // don’t have to be feature complete, no SLAs are provided, and there + // are no technical support obligations, but they will be far enough + // along that customers can actually use them in test environments or + // for limited-use tests -- just like they would in normal production + // cases. // "BETA" - Beta is the point at which we are ready to open a release - // for any - // customer to use. There are no SLA or technical support obligations in - // a - // Beta release. Products will be complete from a feature perspective, - // but - // may have some open outstanding issues. Beta releases are suitable - // for - // limited production use cases. + // for any customer to use. There are no SLA or technical support + // obligations in a Beta release. Products will be complete from a + // feature perspective, but may have some open outstanding issues. Beta + // releases are suitable for limited production use cases. // "GA" - GA features are open to all developers and are considered - // stable and - // fully qualified for production use. + // stable and fully qualified for production use. // "DEPRECATED" - Deprecated features are scheduled to be shut down - // and removed. For more - // information, see the “Deprecation Policy” section of our [Terms - // of - // Service](https://cloud.google.com/terms/) - // and the [Google Cloud Platform Subject to the - // Deprecation + // and removed. For more information, see the “Deprecation Policy” + // section of our [Terms of Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation // Policy](https://cloud.google.com/terms/deprecation) documentation. LaunchStage string `json:"launchStage,omitempty"` @@ -3745,188 +3087,106 @@ type MetricDescriptor struct { Metadata *MetricDescriptorMetadata `json:"metadata,omitempty"` // MetricKind: Whether the metric records instantaneous values, changes - // to a value, etc. - // Some combinations of `metric_kind` and `value_type` might not be - // supported. + // to a value, etc. Some combinations of `metric_kind` and `value_type` + // might not be supported. // // Possible values: // "METRIC_KIND_UNSPECIFIED" - Do not use this default value. // "GAUGE" - An instantaneous measurement of a value. // "DELTA" - The change in a value during a time interval. - // "CUMULATIVE" - A value accumulated over a time interval. - // Cumulative - // measurements in a time series should have the same start time - // and increasing end times, until an event resets the cumulative - // value to zero and sets a new start time for the following - // points. + // "CUMULATIVE" - A value accumulated over a time interval. Cumulative + // measurements in a time series should have the same start time and + // increasing end times, until an event resets the cumulative value to + // zero and sets a new start time for the following points. MetricKind string `json:"metricKind,omitempty"` - // MonitoredResourceTypes: Read-only. If present, then a time - // series, which is identified partially by - // a metric type and a MonitoredResourceDescriptor, that is - // associated - // with this metric type can only be associated with one of the - // monitored - // resource types listed here. + // MonitoredResourceTypes: Read-only. If present, then a time series, + // which is identified partially by a metric type and a + // MonitoredResourceDescriptor, that is associated with this metric type + // can only be associated with one of the monitored resource types + // listed here. MonitoredResourceTypes []string `json:"monitoredResourceTypes,omitempty"` // Name: The resource name of the metric descriptor. Name string `json:"name,omitempty"` - // Type: The metric type, including its DNS name prefix. The type is - // not - // URL-encoded. All user-defined metric types have the DNS - // name - // `custom.googleapis.com` or `external.googleapis.com`. Metric types - // should - // use a natural hierarchical grouping. For example: - // - // "custom.googleapis.com/invoice/paid/amount" - // "external.googleapis.com/prometheus/up" - // "appengine.googleapis.com/http/server/response_latencies" + // Type: The metric type, including its DNS name prefix. The type is not + // URL-encoded. All user-defined metric types have the DNS name + // `custom.googleapis.com` or `external.googleapis.com`. Metric types + // should use a natural hierarchical grouping. For example: + // "custom.googleapis.com/invoice/paid/amount" + // "external.googleapis.com/prometheus/up" + // "appengine.googleapis.com/http/server/response_latencies" Type string `json:"type,omitempty"` // Unit: The units in which the metric value is reported. It is only - // applicable - // if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The - // `unit` - // defines the representation of the stored metric values. - // - // Different systems may scale the values to be more easily displayed - // (so a - // value of `0.02KBy` _might_ be displayed as `20By`, and a value - // of - // `3523KBy` _might_ be displayed as `3.5MBy`). However, if the `unit` - // is - // `KBy`, then the value of the metric is always in thousands of bytes, - // no - // matter how it may be displayed.. - // - // If you want a custom metric to record the exact number of CPU-seconds - // used - // by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` - // is - // `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses - // 12,005 - // CPU-seconds, then the value is written as `12005`. - // - // Alternatively, if you want a custom metric to record data in a - // more + // applicable if the `value_type` is `INT64`, `DOUBLE`, or + // `DISTRIBUTION`. The `unit` defines the representation of the stored + // metric values. Different systems may scale the values to be more + // easily displayed (so a value of `0.02KBy` _might_ be displayed as + // `20By`, and a value of `3523KBy` _might_ be displayed as `3.5MBy`). + // However, if the `unit` is `KBy`, then the value of the metric is + // always in thousands of bytes, no matter how it may be displayed.. If + // you want a custom metric to record the exact number of CPU-seconds + // used by a job, you can create an `INT64 CUMULATIVE` metric whose + // `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the + // job uses 12,005 CPU-seconds, then the value is written as `12005`. + // Alternatively, if you want a custom metric to record data in a more // granular way, you can create a `DOUBLE CUMULATIVE` metric whose - // `unit` is - // `ks{CPU}`, and then write the value `12.005` (which is - // `12005/1000`), - // or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). - // - // The supported units are a subset of [The Unified Code for Units - // of - // Measure](http://unitsofmeasure.org/ucum.html) standard: - // - // **Basic units (UNIT)** - // - // * `bit` bit - // * `By` byte - // * `s` second - // * `min` minute - // * `h` hour - // * `d` day - // - // **Prefixes (PREFIX)** - // - // * `k` kilo (10^3) - // * `M` mega (10^6) - // * `G` giga (10^9) - // * `T` tera (10^12) - // * `P` peta (10^15) - // * `E` exa (10^18) - // * `Z` zetta (10^21) - // * `Y` yotta (10^24) - // - // * `m` milli (10^-3) - // * `u` micro (10^-6) - // * `n` nano (10^-9) - // * `p` pico (10^-12) - // * `f` femto (10^-15) - // * `a` atto (10^-18) - // * `z` zepto (10^-21) - // * `y` yocto (10^-24) - // - // * `Ki` kibi (2^10) - // * `Mi` mebi (2^20) - // * `Gi` gibi (2^30) - // * `Ti` tebi (2^40) - // * `Pi` pebi (2^50) - // - // **Grammar** - // - // The grammar also includes these connectors: - // - // * `/` division or ratio (as an infix operator). For examples, - // `kBy/{email}` or `MiBy/10ms` (although you should almost - // never - // have `/s` in a metric `unit`; rates should always be - // computed at - // query time from the underlying cumulative or delta value). - // * `.` multiplication or composition (as an infix operator). For - // examples, `GBy.d` or `k{watt}.h`. - // - // The grammar for a unit is as follows: - // - // Expression = Component { "." Component } { "/" Component } ; - // - // Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] - // | Annotation - // | "1" - // ; - // - // Annotation = "{" NAME "}" ; - // - // Notes: - // - // * `Annotation` is just a comment if it follows a `UNIT`. If the - // annotation - // is used alone, then the unit is equivalent to `1`. For examples, - // `{request}/s == 1/s`, `By{transmitted}/s == By/s`. - // * `NAME` is a sequence of non-blank printable ASCII characters not - // containing `{` or `}`. - // * `1` represents a unitary [dimensionless - // unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, - // such - // as in `1/s`. It is typically used when none of the basic units - // are - // appropriate. For example, "new users per day" can be represented - // as - // `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 - // new - // users). Alternatively, "thousands of page views per day" would be - // represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a - // metric - // value of `5.3` would mean "5300 page views per day"). - // * `%` represents dimensionless value of 1/100, and annotates values - // giving - // a percentage (so the metric values are typically in the range of - // 0..100, - // and a metric value `3` means "3 percent"). - // * `10^2.%` indicates a metric contains a ratio, typically in the - // range - // 0..1, that will be multiplied by 100 and displayed as a - // percentage - // (so a metric value `0.03` means "3 percent"). + // `unit` is `ks{CPU}`, and then write the value `12.005` (which is + // `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is + // `12005/1024`). The supported units are a subset of [The Unified Code + // for Units of Measure](http://unitsofmeasure.org/ucum.html) standard: + // **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` + // minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** + // * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera + // (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * + // `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano + // (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) + // * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` + // mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) + // **Grammar** The grammar also includes these connectors: * `/` + // division or ratio (as an infix operator). For examples, `kBy/{email}` + // or `MiBy/10ms` (although you should almost never have `/s` in a + // metric `unit`; rates should always be computed at query time from the + // underlying cumulative or delta value). * `.` multiplication or + // composition (as an infix operator). For examples, `GBy.d` or + // `k{watt}.h`. The grammar for a unit is as follows: Expression = + // Component { "." Component } { "/" Component } ; Component = ( [ + // PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; Annotation + // = "{" NAME "}" ; Notes: * `Annotation` is just a comment if it + // follows a `UNIT`. If the annotation is used alone, then the unit is + // equivalent to `1`. For examples, `{request}/s == 1/s`, + // `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank + // printable ASCII characters not containing `{` or `}`. * `1` + // represents a unitary [dimensionless + // unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, + // such as in `1/s`. It is typically used when none of the basic units + // are appropriate. For example, "new users per day" can be represented + // as `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 new + // users). Alternatively, "thousands of page views per day" would be + // represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric + // value of `5.3` would mean "5300 page views per day"). * `%` + // represents dimensionless value of 1/100, and annotates values giving + // a percentage (so the metric values are typically in the range of + // 0..100, and a metric value `3` means "3 percent"). * `10^2.%` + // indicates a metric contains a ratio, typically in the range 0..1, + // that will be multiplied by 100 and displayed as a percentage (so a + // metric value `0.03` means "3 percent"). Unit string `json:"unit,omitempty"` // ValueType: Whether the measurement is an integer, a floating-point - // number, etc. - // Some combinations of `metric_kind` and `value_type` might not be - // supported. + // number, etc. Some combinations of `metric_kind` and `value_type` + // might not be supported. // // Possible values: // "VALUE_TYPE_UNSPECIFIED" - Do not use this default value. - // "BOOL" - The value is a boolean. - // This value type can be used only if the metric kind is `GAUGE`. + // "BOOL" - The value is a boolean. This value type can be used only + // if the metric kind is `GAUGE`. // "INT64" - The value is a signed 64-bit integer. // "DOUBLE" - The value is a double precision floating point number. - // "STRING" - The value is a text string. - // This value type can be used only if the metric kind is `GAUGE`. + // "STRING" - The value is a text string. This value type can be used + // only if the metric kind is `GAUGE`. // "DISTRIBUTION" - The value is a `Distribution`. // "MONEY" - The value is money. ValueType string `json:"valueType,omitempty"` @@ -3958,10 +3218,8 @@ func (s *MetricDescriptor) MarshalJSON() ([]byte, error) { // guide the usage of a metric. type MetricDescriptorMetadata struct { // IngestDelay: The delay of data points caused by ingestion. Data - // points older than this - // age are guaranteed to be ingested and available to be read, - // excluding - // data loss due to errors. + // points older than this age are guaranteed to be ingested and + // available to be read, excluding data loss due to errors. IngestDelay string `json:"ingestDelay,omitempty"` // LaunchStage: Deprecated. Must use the MetricDescriptor.launch_stage @@ -3974,60 +3232,38 @@ type MetricDescriptorMetadata struct { // "PRELAUNCH" - Prelaunch features are hidden from users and are only // visible internally. // "EARLY_ACCESS" - Early Access features are limited to a closed - // group of testers. To use - // these features, you must sign up in advance and sign a Trusted - // Tester - // agreement (which includes confidentiality provisions). These features - // may - // be unstable, changed in backward-incompatible ways, and are - // not - // guaranteed to be released. + // group of testers. To use these features, you must sign up in advance + // and sign a Trusted Tester agreement (which includes confidentiality + // provisions). These features may be unstable, changed in + // backward-incompatible ways, and are not guaranteed to be released. // "ALPHA" - Alpha is a limited availability test for releases before - // they are cleared - // for widespread use. By Alpha, all significant design issues are - // resolved - // and we are in the process of verifying functionality. Alpha - // customers - // need to apply for access, agree to applicable terms, and have - // their - // projects whitelisted. Alpha releases don’t have to be feature - // complete, - // no SLAs are provided, and there are no technical support obligations, - // but - // they will be far enough along that customers can actually use them - // in - // test environments or for limited-use tests -- just like they would - // in - // normal production cases. + // they are cleared for widespread use. By Alpha, all significant design + // issues are resolved and we are in the process of verifying + // functionality. Alpha customers need to apply for access, agree to + // applicable terms, and have their projects whitelisted. Alpha releases + // don’t have to be feature complete, no SLAs are provided, and there + // are no technical support obligations, but they will be far enough + // along that customers can actually use them in test environments or + // for limited-use tests -- just like they would in normal production + // cases. // "BETA" - Beta is the point at which we are ready to open a release - // for any - // customer to use. There are no SLA or technical support obligations in - // a - // Beta release. Products will be complete from a feature perspective, - // but - // may have some open outstanding issues. Beta releases are suitable - // for - // limited production use cases. + // for any customer to use. There are no SLA or technical support + // obligations in a Beta release. Products will be complete from a + // feature perspective, but may have some open outstanding issues. Beta + // releases are suitable for limited production use cases. // "GA" - GA features are open to all developers and are considered - // stable and - // fully qualified for production use. + // stable and fully qualified for production use. // "DEPRECATED" - Deprecated features are scheduled to be shut down - // and removed. For more - // information, see the “Deprecation Policy” section of our [Terms - // of - // Service](https://cloud.google.com/terms/) - // and the [Google Cloud Platform Subject to the - // Deprecation + // and removed. For more information, see the “Deprecation Policy” + // section of our [Terms of Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation // Policy](https://cloud.google.com/terms/deprecation) documentation. LaunchStage string `json:"launchStage,omitempty"` // SamplePeriod: The sampling period of metric data points. For metrics - // which are written - // periodically, consecutive data points are stored at this time - // interval, - // excluding data loss due to errors. Metrics with a higher granularity - // have - // a smaller sampling period. + // which are written periodically, consecutive data points are stored at + // this time interval, excluding data loss due to errors. Metrics with a + // higher granularity have a smaller sampling period. SamplePeriod string `json:"samplePeriod,omitempty"` // ForceSendFields is a list of field names (e.g. "IngestDelay") to @@ -4054,23 +3290,18 @@ func (s *MetricDescriptorMetadata) MarshalJSON() ([]byte, error) { } // MetricRule: Bind API methods to metrics. Binding a method to a metric -// causes that -// metric's configured quota behaviors to apply to the method call. +// causes that metric's configured quota behaviors to apply to the +// method call. type MetricRule struct { // MetricCosts: Metrics to update when the selected methods are called, - // and the associated - // cost applied to each metric. - // - // The key of the map is the metric name, and the values are the - // amount - // increased for the metric against which the quota limits are - // defined. - // The value must not be negative. + // and the associated cost applied to each metric. The key of the map is + // the metric name, and the values are the amount increased for the + // metric against which the quota limits are defined. The value must not + // be negative. MetricCosts map[string]string `json:"metricCosts,omitempty"` - // Selector: Selects the methods to which this rule applies. - // - // Refer to selector for syntax details. + // Selector: Selects the methods to which this rule applies. Refer to + // selector for syntax details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. "MetricCosts") to @@ -4097,96 +3328,44 @@ func (s *MetricRule) MarshalJSON() ([]byte, error) { } // Mixin: Declares an API Interface to be included in this interface. -// The including -// interface must redeclare all the methods from the included interface, -// but -// documentation and options are inherited as follows: -// -// - If after comment and whitespace stripping, the documentation -// string of the redeclared method is empty, it will be inherited -// from the original method. -// -// - Each annotation belonging to the service config (http, -// visibility) which is not set in the redeclared method will be -// inherited. -// -// - If an http annotation is inherited, the path pattern will be -// modified as follows. Any version prefix will be replaced by the -// version of the including interface plus the root path if -// specified. -// -// Example of a simple mixin: -// -// package google.acl.v1; -// service AccessControl { -// // Get the underlying ACL object. -// rpc GetAcl(GetAclRequest) returns (Acl) { -// option (google.api.http).get = "/v1/{resource=**}:getAcl"; -// } -// } -// -// package google.storage.v2; -// service Storage { -// // rpc GetAcl(GetAclRequest) returns (Acl); -// -// // Get a data record. -// rpc GetData(GetDataRequest) returns (Data) { -// option (google.api.http).get = "/v2/{resource=**}"; -// } -// } -// -// Example of a mixin configuration: -// -// apis: -// - name: google.storage.v2.Storage -// mixins: -// - name: google.acl.v1.AccessControl -// -// The mixin construct implies that all methods in `AccessControl` -// are -// also declared with same name and request/response types in -// `Storage`. A documentation generator or annotation processor will -// see the effective `Storage.GetAcl` method after -// inherting -// documentation and annotations as follows: -// -// service Storage { -// // Get the underlying ACL object. -// rpc GetAcl(GetAclRequest) returns (Acl) { -// option (google.api.http).get = "/v2/{resource=**}:getAcl"; -// } -// ... -// } -// -// Note how the version in the path pattern changed from `v1` to -// `v2`. -// -// If the `root` field in the mixin is specified, it should be -// a -// relative path under which inherited HTTP paths are placed. Example: -// -// apis: -// - name: google.storage.v2.Storage -// mixins: -// - name: google.acl.v1.AccessControl -// root: acls -// -// This implies the following inherited HTTP annotation: -// -// service Storage { -// // Get the underlying ACL object. -// rpc GetAcl(GetAclRequest) returns (Acl) { -// option (google.api.http).get = -// "/v2/acls/{resource=**}:getAcl"; -// } -// ... -// } +// The including interface must redeclare all the methods from the +// included interface, but documentation and options are inherited as +// follows: - If after comment and whitespace stripping, the +// documentation string of the redeclared method is empty, it will be +// inherited from the original method. - Each annotation belonging to +// the service config (http, visibility) which is not set in the +// redeclared method will be inherited. - If an http annotation is +// inherited, the path pattern will be modified as follows. Any version +// prefix will be replaced by the version of the including interface +// plus the root path if specified. Example of a simple mixin: package +// google.acl.v1; service AccessControl { // Get the underlying ACL +// object. rpc GetAcl(GetAclRequest) returns (Acl) { option +// (google.api.http).get = "/v1/{resource=**}:getAcl"; } } package +// google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) +// returns (Acl); // Get a data record. rpc GetData(GetDataRequest) +// returns (Data) { option (google.api.http).get = "/v2/{resource=**}"; +// } } Example of a mixin configuration: apis: - name: +// google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl +// The mixin construct implies that all methods in `AccessControl` are +// also declared with same name and request/response types in `Storage`. +// A documentation generator or annotation processor will see the +// effective `Storage.GetAcl` method after inheriting documentation and +// annotations as follows: service Storage { // Get the underlying ACL +// object. rpc GetAcl(GetAclRequest) returns (Acl) { option +// (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how +// the version in the path pattern changed from `v1` to `v2`. If the +// `root` field in the mixin is specified, it should be a relative path +// under which inherited HTTP paths are placed. Example: apis: - name: +// google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl +// root: acls This implies the following inherited HTTP annotation: +// service Storage { // Get the underlying ACL object. rpc +// GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = +// "/v2/acls/{resource=**}:getAcl"; } ... } type Mixin struct { // Name: The fully qualified name of the interface which is included. Name string `json:"name,omitempty"` - // Root: If non-empty specifies a path under which inherited HTTP - // paths + // Root: If non-empty specifies a path under which inherited HTTP paths // are rooted. Root string `json:"root,omitempty"` @@ -4214,39 +3393,28 @@ func (s *Mixin) MarshalJSON() ([]byte, error) { } // MonitoredResourceDescriptor: An object that describes the schema of a -// MonitoredResource object using a -// type name and a set of labels. For example, the monitored -// resource -// descriptor for Google Compute Engine VM instances has a type -// of -// "gce_instance" and specifies the use of the labels "instance_id" -// and -// "zone" to identify particular VM instances. -// -// Different APIs can support different monitored resource types. APIs -// generally -// provide a `list` method that returns the monitored resource -// descriptors used -// by the API. +// MonitoredResource object using a type name and a set of labels. For +// example, the monitored resource descriptor for Google Compute Engine +// VM instances has a type of "gce_instance" and specifies the use of +// the labels "instance_id" and "zone" to identify particular VM +// instances. Different APIs can support different monitored resource +// types. APIs generally provide a `list` method that returns the +// monitored resource descriptors used by the API. type MonitoredResourceDescriptor struct { // Description: Optional. A detailed description of the monitored - // resource type that might - // be used in documentation. + // resource type that might be used in documentation. Description string `json:"description,omitempty"` // DisplayName: Optional. A concise name for the monitored resource type - // that might be - // displayed in user interfaces. It should be a Title Cased Noun - // Phrase, - // without any article or other determiners. For example, - // "Google Cloud SQL Database". + // that might be displayed in user interfaces. It should be a Title + // Cased Noun Phrase, without any article or other determiners. For + // example, "Google Cloud SQL Database". DisplayName string `json:"displayName,omitempty"` // Labels: Required. A set of labels used to describe instances of this - // monitored - // resource type. For example, an individual Google Cloud SQL database - // is - // identified by values for the labels "database_id" and "zone". + // monitored resource type. For example, an individual Google Cloud SQL + // database is identified by values for the labels "database_id" and + // "zone". Labels []*LabelDescriptor `json:"labels,omitempty"` // LaunchStage: Optional. The launch stage of the monitored resource @@ -4259,70 +3427,45 @@ type MonitoredResourceDescriptor struct { // "PRELAUNCH" - Prelaunch features are hidden from users and are only // visible internally. // "EARLY_ACCESS" - Early Access features are limited to a closed - // group of testers. To use - // these features, you must sign up in advance and sign a Trusted - // Tester - // agreement (which includes confidentiality provisions). These features - // may - // be unstable, changed in backward-incompatible ways, and are - // not - // guaranteed to be released. + // group of testers. To use these features, you must sign up in advance + // and sign a Trusted Tester agreement (which includes confidentiality + // provisions). These features may be unstable, changed in + // backward-incompatible ways, and are not guaranteed to be released. // "ALPHA" - Alpha is a limited availability test for releases before - // they are cleared - // for widespread use. By Alpha, all significant design issues are - // resolved - // and we are in the process of verifying functionality. Alpha - // customers - // need to apply for access, agree to applicable terms, and have - // their - // projects whitelisted. Alpha releases don’t have to be feature - // complete, - // no SLAs are provided, and there are no technical support obligations, - // but - // they will be far enough along that customers can actually use them - // in - // test environments or for limited-use tests -- just like they would - // in - // normal production cases. + // they are cleared for widespread use. By Alpha, all significant design + // issues are resolved and we are in the process of verifying + // functionality. Alpha customers need to apply for access, agree to + // applicable terms, and have their projects whitelisted. Alpha releases + // don’t have to be feature complete, no SLAs are provided, and there + // are no technical support obligations, but they will be far enough + // along that customers can actually use them in test environments or + // for limited-use tests -- just like they would in normal production + // cases. // "BETA" - Beta is the point at which we are ready to open a release - // for any - // customer to use. There are no SLA or technical support obligations in - // a - // Beta release. Products will be complete from a feature perspective, - // but - // may have some open outstanding issues. Beta releases are suitable - // for - // limited production use cases. + // for any customer to use. There are no SLA or technical support + // obligations in a Beta release. Products will be complete from a + // feature perspective, but may have some open outstanding issues. Beta + // releases are suitable for limited production use cases. // "GA" - GA features are open to all developers and are considered - // stable and - // fully qualified for production use. + // stable and fully qualified for production use. // "DEPRECATED" - Deprecated features are scheduled to be shut down - // and removed. For more - // information, see the “Deprecation Policy” section of our [Terms - // of - // Service](https://cloud.google.com/terms/) - // and the [Google Cloud Platform Subject to the - // Deprecation + // and removed. For more information, see the “Deprecation Policy” + // section of our [Terms of Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation // Policy](https://cloud.google.com/terms/deprecation) documentation. LaunchStage string `json:"launchStage,omitempty"` // Name: Optional. The resource name of the monitored resource // descriptor: - // "projects/{project_id}/monitoredResourceDescriptors/{type - // }" where - // {type} is the value of the `type` field in this object - // and - // {project_id} is a project ID that provides API-specific context - // for - // accessing the type. APIs that do not use project information can use - // the - // resource name format "monitoredResourceDescriptors/{type}". + // "projects/{project_id}/monitoredResourceDescriptors/{type}" where + // {type} is the value of the `type` field in this object and + // {project_id} is a project ID that provides API-specific context for + // accessing the type. APIs that do not use project information can use + // the resource name format "monitoredResourceDescriptors/{type}". Name string `json:"name,omitempty"` - // Type: Required. The monitored resource type. For example, the - // type + // Type: Required. The monitored resource type. For example, the type // "cloudsql_database" represents databases in Google Cloud SQL. - // The maximum length of this value is 256 characters. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -4348,74 +3491,49 @@ func (s *MonitoredResourceDescriptor) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Monitoring: Monitoring configuration of the service. -// -// The example below shows how to configure monitored resources and -// metrics -// for monitoring. In the example, a monitored resource and two metrics -// are +// Monitoring: Monitoring configuration of the service. The example +// below shows how to configure monitored resources and metrics for +// monitoring. In the example, a monitored resource and two metrics are // defined. The `library.googleapis.com/book/returned_count` metric is -// sent -// to both producer and consumer projects, whereas -// the -// `library.googleapis.com/book/overdue_count` metric is only sent to -// the -// consumer project. -// -// monitored_resources: -// - type: library.googleapis.com/branch -// labels: -// - key: /city -// description: The city where the library branch is located -// in. -// - key: /name -// description: The name of the branch. -// metrics: -// - name: library.googleapis.com/book/returned_count -// metric_kind: DELTA -// value_type: INT64 -// labels: -// - key: /customer_id -// - name: library.googleapis.com/book/overdue_count -// metric_kind: GAUGE -// value_type: INT64 -// labels: -// - key: /customer_id -// monitoring: -// producer_destinations: -// - monitored_resource: library.googleapis.com/branch -// metrics: -// - library.googleapis.com/book/returned_count -// consumer_destinations: -// - monitored_resource: library.googleapis.com/branch -// metrics: -// - library.googleapis.com/book/returned_count -// - library.googleapis.com/book/overdue_count +// sent to both producer and consumer projects, whereas the +// `library.googleapis.com/book/num_overdue` metric is only sent to the +// consumer project. monitored_resources: - type: +// library.googleapis.com/Branch display_name: "Library Branch" +// description: "A branch of a library." launch_stage: GA labels: - key: +// resource_container description: "The Cloud container (ie. project id) +// for the Branch." - key: location description: "The location of the +// library branch." - key: branch_id description: "The id of the +// branch." metrics: - name: library.googleapis.com/book/returned_count +// display_name: "Books Returned" description: "The count of books that +// have been returned." launch_stage: GA metric_kind: DELTA value_type: +// INT64 unit: "1" labels: - key: customer_id description: "The id of +// the customer." - name: library.googleapis.com/book/num_overdue +// display_name: "Books Overdue" description: "The current number of +// overdue books." launch_stage: GA metric_kind: GAUGE value_type: INT64 +// unit: "1" labels: - key: customer_id description: "The id of the +// customer." monitoring: producer_destinations: - monitored_resource: +// library.googleapis.com/Branch metrics: - +// library.googleapis.com/book/returned_count consumer_destinations: - +// monitored_resource: library.googleapis.com/Branch metrics: - +// library.googleapis.com/book/returned_count - +// library.googleapis.com/book/num_overdue type Monitoring struct { // ConsumerDestinations: Monitoring configurations for sending metrics - // to the consumer project. - // There can be multiple consumer destinations. A monitored resouce type - // may - // appear in multiple monitoring destinations if different aggregations - // are - // needed for different sets of metrics associated with that - // monitored - // resource type. A monitored resource and metric pair may only be used - // once - // in the Monitoring configuration. + // to the consumer project. There can be multiple consumer destinations. + // A monitored resource type may appear in multiple monitoring + // destinations if different aggregations are needed for different sets + // of metrics associated with that monitored resource type. A monitored + // resource and metric pair may only be used once in the Monitoring + // configuration. ConsumerDestinations []*MonitoringDestination `json:"consumerDestinations,omitempty"` // ProducerDestinations: Monitoring configurations for sending metrics - // to the producer project. - // There can be multiple producer destinations. A monitored resouce type - // may - // appear in multiple monitoring destinations if different aggregations - // are - // needed for different sets of metrics associated with that - // monitored - // resource type. A monitored resource and metric pair may only be used - // once - // in the Monitoring configuration. + // to the producer project. There can be multiple producer destinations. + // A monitored resource type may appear in multiple monitoring + // destinations if different aggregations are needed for different sets + // of metrics associated with that monitored resource type. A monitored + // resource and metric pair may only be used once in the Monitoring + // configuration. ProducerDestinations []*MonitoringDestination `json:"producerDestinations,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -4444,17 +3562,14 @@ func (s *Monitoring) MarshalJSON() ([]byte, error) { } // MonitoringDestination: Configuration of a specific monitoring -// destination (the producer project -// or the consumer project). +// destination (the producer project or the consumer project). type MonitoringDestination struct { // Metrics: Types of the metrics to report to this monitoring - // destination. - // Each type must be defined in Service.metrics section. + // destination. Each type must be defined in Service.metrics section. Metrics []string `json:"metrics,omitempty"` // MonitoredResource: The monitored resource type. The type must be - // defined in - // Service.monitored_resources section. + // defined in Service.monitored_resources section. MonitoredResource string `json:"monitoredResource,omitempty"` // ForceSendFields is a list of field names (e.g. "Metrics") to @@ -4481,43 +3596,26 @@ func (s *MonitoringDestination) MarshalJSON() ([]byte, error) { } // OAuthRequirements: OAuth scopes are a way to define data and -// permissions on data. For example, -// there are scopes defined for "Read-only access to Google Calendar" -// and -// "Access to Cloud Platform". Users can consent to a scope for an -// application, -// giving it permission to access that data on their behalf. -// -// OAuth scope specifications should be fairly coarse grained; a user -// will need -// to see and understand the text description of what your scope -// means. -// -// In most cases: use one or at most two OAuth scopes for an entire -// family of +// permissions on data. For example, there are scopes defined for +// "Read-only access to Google Calendar" and "Access to Cloud Platform". +// Users can consent to a scope for an application, giving it permission +// to access that data on their behalf. OAuth scope specifications +// should be fairly coarse grained; a user will need to see and +// understand the text description of what your scope means. In most +// cases: use one or at most two OAuth scopes for an entire family of // products. If your product has multiple APIs, you should probably be -// sharing -// the OAuth scope across all of those APIs. -// -// When you need finer grained OAuth consent screens: talk with your -// product -// management about how developers will use them in practice. -// -// Please note that even though each of the canonical scopes is enough -// for a -// request to be accepted and passed to the backend, a request can still -// fail -// due to the backend requiring additional scopes or permissions. +// sharing the OAuth scope across all of those APIs. When you need finer +// grained OAuth consent screens: talk with your product management +// about how developers will use them in practice. Please note that even +// though each of the canonical scopes is enough for a request to be +// accepted and passed to the backend, a request can still fail due to +// the backend requiring additional scopes or permissions. type OAuthRequirements struct { // CanonicalScopes: The list of publicly documented OAuth scopes that - // are allowed access. An - // OAuth token containing any of these scopes will be - // accepted. - // - // Example: - // - // canonical_scopes: https://www.googleapis.com/auth/calendar, - // https://www.googleapis.com/auth/calendar.read + // are allowed access. An OAuth token containing any of these scopes + // will be accepted. Example: canonical_scopes: + // https://www.googleapis.com/auth/calendar, + // https://www.googleapis.com/auth/calendar.read CanonicalScopes string `json:"canonicalScopes,omitempty"` // ForceSendFields is a list of field names (e.g. "CanonicalScopes") to @@ -4545,52 +3643,38 @@ func (s *OAuthRequirements) MarshalJSON() ([]byte, error) { } // Operation: This resource represents a long-running operation that is -// the result of a -// network API call. +// the result of a network API call. type Operation struct { // Done: If the value is `false`, it means the operation is still in - // progress. - // If `true`, the operation is completed, and either `error` or - // `response` is - // available. + // progress. If `true`, the operation is completed, and either `error` + // or `response` is available. Done bool `json:"done,omitempty"` // Error: The error result of the operation in case of failure or // cancellation. Error *Status `json:"error,omitempty"` - // Metadata: Service-specific metadata associated with the operation. - // It typically - // contains progress information and common metadata such as create - // time. - // Some services might not provide such metadata. Any method that - // returns a - // long-running operation should document the metadata type, if any. + // Metadata: Service-specific metadata associated with the operation. It + // typically contains progress information and common metadata such as + // create time. Some services might not provide such metadata. Any + // method that returns a long-running operation should document the + // metadata type, if any. Metadata googleapi.RawMessage `json:"metadata,omitempty"` // Name: The server-assigned name, which is only unique within the same - // service that - // originally returns it. If you use the default HTTP mapping, - // the - // `name` should be a resource name ending with + // service that originally returns it. If you use the default HTTP + // mapping, the `name` should be a resource name ending with // `operations/{unique_id}`. Name string `json:"name,omitempty"` - // Response: The normal response of the operation in case of success. - // If the original - // method returns no data on success, such as `Delete`, the response - // is - // `google.protobuf.Empty`. If the original method is - // standard - // `Get`/`Create`/`Update`, the response should be the resource. For - // other - // methods, the response should have the type `XxxResponse`, where - // `Xxx` - // is the original method name. For example, if the original method - // name - // is `TakeSnapshot()`, the inferred response type - // is - // `TakeSnapshotResponse`. + // Response: The normal response of the operation in case of success. If + // the original method returns no data on success, such as `Delete`, the + // response is `google.protobuf.Empty`. If the original method is + // standard `Get`/`Create`/`Update`, the response should be the + // resource. For other methods, the response should have the type + // `XxxResponse`, where `Xxx` is the original method name. For example, + // if the original method name is `TakeSnapshot()`, the inferred + // response type is `TakeSnapshotResponse`. Response googleapi.RawMessage `json:"response,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -4624,8 +3708,7 @@ func (s *Operation) MarshalJSON() ([]byte, error) { // services operation. type OperationMetadata struct { // ResourceNames: The full name of the resources that this operation is - // directly - // associated with. + // directly associated with. ResourceNames []string `json:"resourceNames,omitempty"` // ForceSendFields is a list of field names (e.g. "ResourceNames") to @@ -4652,25 +3735,19 @@ func (s *OperationMetadata) MarshalJSON() ([]byte, error) { } // Option: A protocol buffer option, which can be attached to a message, -// field, -// enumeration, etc. +// field, enumeration, etc. type Option struct { // Name: The option's name. For protobuf built-in options (options - // defined in - // descriptor.proto), this is the short name. For example, - // "map_entry". - // For custom options, it should be the fully-qualified name. For - // example, - // "google.api.http". + // defined in descriptor.proto), this is the short name. For example, + // "map_entry". For custom options, it should be the fully-qualified + // name. For example, "google.api.http". Name string `json:"name,omitempty"` // Value: The option's value packed in an Any message. If the value is a - // primitive, - // the corresponding wrapper type defined in - // google/protobuf/wrappers.proto - // should be used. If the value is an enum, it should be stored as an - // int32 - // value using the google.protobuf.Int32Value type. + // primitive, the corresponding wrapper type defined in + // google/protobuf/wrappers.proto should be used. If the value is an + // enum, it should be stored as an int32 value using the + // google.protobuf.Int32Value type. Value googleapi.RawMessage `json:"value,omitempty"` // ForceSendFields is a list of field names (e.g. "Name") to @@ -4696,40 +3773,56 @@ func (s *Option) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// OverrideInlineSource: Import data embedded in the request message +type OverrideInlineSource struct { + // Overrides: The overrides to create. Each override must have a value + // for 'metric' and 'unit', to specify which metric and which limit the + // override should be applied to. The 'name' field of the override does + // not need to be set; it is ignored. + Overrides []*QuotaOverride `json:"overrides,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Overrides") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Overrides") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OverrideInlineSource) MarshalJSON() ([]byte, error) { + type NoMethod OverrideInlineSource + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Page: Represents a documentation page. A page can contain subpages to -// represent -// nested documentation set structure. +// represent nested documentation set structure. type Page struct { - // Content: The Markdown content of the page. You can use (== - // include {path} - // ==) to include content from a Markdown file. + // Content: The Markdown content of the page. You can use (== include + // {path} ==) to include content from a Markdown file. Content string `json:"content,omitempty"` // Name: The name of the page. It will be used as an identity of the - // page to - // generate URI of the page, text of the link to this page in - // navigation, - // etc. The full page name (start from the root page name to this - // page - // concatenated with `.`) can be used as reference to the page in - // your - // documentation. For example: - //
pages:
-	// - name: Tutorial
-	//   content: (== include tutorial.md ==)
-	//   subpages:
-	//   - name: Java
-	//     content: (== include tutorial_java.md
-	// ==)
-	// 
- // You can reference `Java` page using Markdown reference link - // syntax: - // `Java`. + // page to generate URI of the page, text of the link to this page in + // navigation, etc. The full page name (start from the root page name to + // this page concatenated with `.`) can be used as reference to the page + // in your documentation. For example: pages: - name: Tutorial content: + // (== include tutorial.md ==) subpages: - name: Java content: (== + // include tutorial_java.md ==) You can reference `Java` page using + // Markdown reference link syntax: `Java`. Name string `json:"name,omitempty"` // Subpages: Subpages of this page. The order of subpages specified here - // will be - // honored in the generated docset. + // will be honored in the generated docset. Subpages []*Page `json:"subpages,omitempty"` // ForceSendFields is a list of field names (e.g. "Content") to @@ -4756,67 +3849,33 @@ func (s *Page) MarshalJSON() ([]byte, error) { } // Quota: Quota configuration helps to achieve fairness and budgeting in -// service -// usage. -// -// The metric based quota configuration works this way: -// - The service configuration defines a set of metrics. -// - For API calls, the quota.metric_rules maps methods to metrics with -// corresponding costs. -// - The quota.limits defines limits on the metrics, which will be used -// for -// quota checks at runtime. -// -// An example quota configuration in yaml format: -// -// quota: -// limits: -// -// - name: apiWriteQpsPerProject -// metric: library.googleapis.com/write_calls -// unit: "1/min/{project}" # rate limit for consumer projects -// values: -// STANDARD: 10000 -// -// -// # The metric rules bind all methods to the read_calls metric, -// # except for the UpdateBook and DeleteBook methods. These two -// methods -// # are mapped to the write_calls metric, with the UpdateBook -// method -// # consuming at twice rate as the DeleteBook method. -// metric_rules: -// - selector: "*" -// metric_costs: -// library.googleapis.com/read_calls: 1 -// - selector: google.example.library.v1.LibraryService.UpdateBook -// metric_costs: -// library.googleapis.com/write_calls: 2 -// - selector: google.example.library.v1.LibraryService.DeleteBook -// metric_costs: -// library.googleapis.com/write_calls: 1 -// -// Corresponding Metric definition: -// -// metrics: -// - name: library.googleapis.com/read_calls -// display_name: Read requests -// metric_kind: DELTA -// value_type: INT64 -// -// - name: library.googleapis.com/write_calls -// display_name: Write requests -// metric_kind: DELTA -// value_type: INT64 -// -// +// service usage. The metric based quota configuration works this way: - +// The service configuration defines a set of metrics. - For API calls, +// the quota.metric_rules maps methods to metrics with corresponding +// costs. - The quota.limits defines limits on the metrics, which will +// be used for quota checks at runtime. An example quota configuration +// in yaml format: quota: limits: - name: apiWriteQpsPerProject metric: +// library.googleapis.com/write_calls unit: "1/min/{project}" # rate +// limit for consumer projects values: STANDARD: 10000 # The metric +// rules bind all methods to the read_calls metric, # except for the +// UpdateBook and DeleteBook methods. These two methods # are mapped to +// the write_calls metric, with the UpdateBook method # consuming at +// twice rate as the DeleteBook method. metric_rules: - selector: "*" +// metric_costs: library.googleapis.com/read_calls: 1 - selector: +// google.example.library.v1.LibraryService.UpdateBook metric_costs: +// library.googleapis.com/write_calls: 2 - selector: +// google.example.library.v1.LibraryService.DeleteBook metric_costs: +// library.googleapis.com/write_calls: 1 Corresponding Metric +// definition: metrics: - name: library.googleapis.com/read_calls +// display_name: Read requests metric_kind: DELTA value_type: INT64 - +// name: library.googleapis.com/write_calls display_name: Write requests +// metric_kind: DELTA value_type: INT64 type Quota struct { // Limits: List of `QuotaLimit` definitions for the service. Limits []*QuotaLimit `json:"limits,omitempty"` // MetricRules: List of `MetricRule` definitions, each one mapping a - // selected method to one - // or more metrics. + // selected method to one or more metrics. MetricRules []*MetricRule `json:"metricRules,omitempty"` // ForceSendFields is a list of field names (e.g. "Limits") to @@ -4852,32 +3911,21 @@ type QuotaBucket struct { ConsumerOverride *QuotaOverride `json:"consumerOverride,omitempty"` // DefaultLimit: The default limit of this quota bucket, as specified by - // the service - // configuration. + // the service configuration. DefaultLimit int64 `json:"defaultLimit,omitempty,string"` - // Dimensions: The dimensions of this quota bucket. - // - // If this map is empty, this is the global bucket, which is the default - // quota - // value applied to all requests that do not have a more specific - // override. - // - // If this map is nonempty, the default limit, effective limit, and - // quota + // Dimensions: The dimensions of this quota bucket. If this map is + // empty, this is the global bucket, which is the default quota value + // applied to all requests that do not have a more specific override. If + // this map is nonempty, the default limit, effective limit, and quota // overrides apply only to requests that have the dimensions given in - // the map. - // - // For example, if the map has key "region" and value "us-east-1", then - // the - // specified effective limit is only effective in that region, and - // the - // specified overrides apply only in that region. + // the map. For example, if the map has key "region" and value + // "us-east-1", then the specified effective limit is only effective in + // that region, and the specified overrides apply only in that region. Dimensions map[string]string `json:"dimensions,omitempty"` // EffectiveLimit: The effective limit of this quota bucket. Equal to - // default_limit if there - // are no overrides. + // default_limit if there are no overrides. EffectiveLimit int64 `json:"effectiveLimit,omitempty,string"` // ProducerOverride: Producer override on this quota bucket. @@ -4907,116 +3955,75 @@ func (s *QuotaBucket) MarshalJSON() ([]byte, error) { } // QuotaLimit: `QuotaLimit` defines a specific limit that applies over a -// specified duration -// for a limit type. There can be at most one limit for a duration and -// limit -// type combination defined within a `QuotaGroup`. +// specified duration for a limit type. There can be at most one limit +// for a duration and limit type combination defined within a +// `QuotaGroup`. type QuotaLimit struct { // DefaultLimit: Default number of tokens that can be consumed during - // the specified - // duration. This is the number of tokens assigned when a - // client - // application developer activates the service for his/her - // project. - // - // Specifying a value of 0 will block all requests. This can be used if - // you - // are provisioning quota to selected consumers and blocking - // others. - // Similarly, a value of -1 will indicate an unlimited quota. No - // other - // negative values are allowed. - // - // Used by group-based quotas only. + // the specified duration. This is the number of tokens assigned when a + // client application developer activates the service for his/her + // project. Specifying a value of 0 will block all requests. This can be + // used if you are provisioning quota to selected consumers and blocking + // others. Similarly, a value of -1 will indicate an unlimited quota. No + // other negative values are allowed. Used by group-based quotas only. DefaultLimit int64 `json:"defaultLimit,omitempty,string"` // Description: Optional. User-visible, extended description for this - // quota limit. - // Should be used only when more context is needed to understand this - // limit - // than provided by the limit's display name (see: `display_name`). + // quota limit. Should be used only when more context is needed to + // understand this limit than provided by the limit's display name (see: + // `display_name`). Description string `json:"description,omitempty"` - // DisplayName: User-visible display name for this limit. - // Optional. If not set, the UI will provide a default display name - // based on - // the quota configuration. This field can be used to override the - // default + // DisplayName: User-visible display name for this limit. Optional. If + // not set, the UI will provide a default display name based on the + // quota configuration. This field can be used to override the default // display name generated from the configuration. DisplayName string `json:"displayName,omitempty"` // Duration: Duration of this limit in textual notation. Must be "100s" - // or "1d". - // - // Used by group-based quotas only. + // or "1d". Used by group-based quotas only. Duration string `json:"duration,omitempty"` // FreeTier: Free tier value displayed in the Developers Console for - // this limit. - // The free tier is the number of tokens that will be subtracted from - // the - // billed amount when billing is enabled. - // This field can only be set on a limit with duration "1d", in a - // billable - // group; it is invalid on any other limit. If this field is not set, - // it + // this limit. The free tier is the number of tokens that will be + // subtracted from the billed amount when billing is enabled. This field + // can only be set on a limit with duration "1d", in a billable group; + // it is invalid on any other limit. If this field is not set, it // defaults to 0, indicating that there is no free tier for this - // service. - // - // Used by group-based quotas only. + // service. Used by group-based quotas only. FreeTier int64 `json:"freeTier,omitempty,string"` // MaxLimit: Maximum number of tokens that can be consumed during the - // specified - // duration. Client application developers can override the default - // limit up - // to this maximum. If specified, this value cannot be set to a value - // less - // than the default limit. If not specified, it is set to the default - // limit. - // - // To allow clients to apply overrides with no upper bound, set this to - // -1, - // indicating unlimited maximum quota. - // - // Used by group-based quotas only. + // specified duration. Client application developers can override the + // default limit up to this maximum. If specified, this value cannot be + // set to a value less than the default limit. If not specified, it is + // set to the default limit. To allow clients to apply overrides with no + // upper bound, set this to -1, indicating unlimited maximum quota. Used + // by group-based quotas only. MaxLimit int64 `json:"maxLimit,omitempty,string"` // Metric: The name of the metric this quota limit applies to. The quota - // limits with - // the same metric will be checked together during runtime. The metric - // must be - // defined within the service config. + // limits with the same metric will be checked together during runtime. + // The metric must be defined within the service config. Metric string `json:"metric,omitempty"` - // Name: Name of the quota limit. - // - // The name must be provided, and it must be unique within the service. - // The - // name can only include alphanumeric characters as well as '-'. - // - // The maximum length of the limit name is 64 characters. + // Name: Name of the quota limit. The name must be provided, and it must + // be unique within the service. The name can only include alphanumeric + // characters as well as '-'. The maximum length of the limit name is 64 + // characters. Name string `json:"name,omitempty"` - // Unit: Specify the unit of the quota limit. It uses the same syntax - // as - // Metric.unit. The supported unit kinds are determined by the - // quota - // backend system. - // - // Here are some examples: - // * "1/min/{project}" for quota per minute per project. - // - // Note: the order of unit components is insignificant. - // The "1" at the beginning is required to follow the metric unit - // syntax. + // Unit: Specify the unit of the quota limit. It uses the same syntax as + // Metric.unit. The supported unit kinds are determined by the quota + // backend system. Here are some examples: * "1/min/{project}" for quota + // per minute per project. Note: the order of unit components is + // insignificant. The "1" at the beginning is required to follow the + // metric unit syntax. Unit string `json:"unit,omitempty"` // Values: Tiered limit values. You must specify this as a key:value - // pair, with an - // integer value that is the maximum number of requests allowed for - // the - // specified unit. Currently only STANDARD is supported. + // pair, with an integer value that is the maximum number of requests + // allowed for the specified unit. Currently only STANDARD is supported. Values map[string]string `json:"values,omitempty"` // ForceSendFields is a list of field names (e.g. "DefaultLimit") to @@ -5044,92 +4051,71 @@ func (s *QuotaLimit) MarshalJSON() ([]byte, error) { // QuotaOverride: A quota override type QuotaOverride struct { + // AdminOverrideAncestor: The resource name of the ancestor that + // requested the override. For example: "organizations/12345" or + // "folders/67890". Used by admin overrides only. + AdminOverrideAncestor string `json:"adminOverrideAncestor,omitempty"` + // Dimensions: If this map is nonempty, then this override applies only - // to specific values - // for dimensions defined in the limit unit. - // - // For example, an override on a limit with the unit - // 1/{project}/{region} + // to specific values for dimensions defined in the limit unit. For + // example, an override on a limit with the unit 1/{project}/{region} // could contain an entry with the key "region" and the value - // "us-east-1"; - // the override is only applied to quota consumed in that region. - // - // This map has the following restrictions: - // - // * Keys that are not defined in the limit's unit are not valid - // keys. - // Any string appearing in {brackets} in the unit (besides {project} - // or - // {user}) is a defined key. - // * "project" is not a valid key; the project is already specified - // in - // the parent resource name. - // * "user" is not a valid key; the API does not support quota - // overrides - // that apply only to a specific user. - // * If "region" appears as a key, its value must be a valid Cloud - // region. - // * If "zone" appears as a key, its value must be a valid Cloud - // zone. - // * If any valid key other than "region" or "zone" appears in the - // map, then - // all valid keys other than "region" or "zone" must also appear in - // the - // map. + // "us-east-1"; the override is only applied to quota consumed in that + // region. This map has the following restrictions: * Keys that are not + // defined in the limit's unit are not valid keys. Any string appearing + // in {brackets} in the unit (besides {project} or {user}) is a defined + // key. * "project" is not a valid key; the project is already specified + // in the parent resource name. * "user" is not a valid key; the API + // does not support quota overrides that apply only to a specific user. + // * If "region" appears as a key, its value must be a valid Cloud + // region. * If "zone" appears as a key, its value must be a valid Cloud + // zone. * If any valid key other than "region" or "zone" appears in the + // map, then all valid keys other than "region" or "zone" must also + // appear in the map. Dimensions map[string]string `json:"dimensions,omitempty"` - // Metric: The name of the metric to which this override applies. - // - // An example name would be: - // `compute.googleapis.com/cpus` + // Metric: The name of the metric to which this override applies. An + // example name would be: `compute.googleapis.com/cpus` Metric string `json:"metric,omitempty"` - // Name: The resource name of the override. - // This name is generated by the server when the override is - // created. - // - // Example names would - // be: - // `projects/123/services/compute.googleapis.com/consumerQuotaMetrics - // /compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/adminOverrid - // es/4a3f2c1d` - // `projects/123/services/compute.googleapis.com/consumerQuo - // taMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/con - // sumerOverrides/4a3f2c1d` - // - // The resource name is intended to be opaque and should not be parsed - // for - // its component strings, since its representation could change in the - // future. + // Name: The resource name of the override. This name is generated by + // the server when the override is created. Example names would be: + // `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/com + // pute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/adminOverrides/4 + // a3f2c1d` + // `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/com + // pute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/consumerOverride + // s/4a3f2c1d` The resource name is intended to be opaque and should not + // be parsed for its component strings, since its representation could + // change in the future. Name string `json:"name,omitempty"` - // OverrideValue: The overriding quota limit value. - // Can be any nonnegative integer, or -1 (unlimited quota). + // OverrideValue: The overriding quota limit value. Can be any + // nonnegative integer, or -1 (unlimited quota). OverrideValue int64 `json:"overrideValue,omitempty,string"` - // Unit: The limit unit of the limit to which this override applies. - // - // An example unit would be: - // `1/{project}/{region}` - // Note that `{project}` and `{region}` are not placeholders in this - // example; - // the literal characters `{` and `}` occur in the string. + // Unit: The limit unit of the limit to which this override applies. An + // example unit would be: `1/{project}/{region}` Note that `{project}` + // and `{region}` are not placeholders in this example; the literal + // characters `{` and `}` occur in the string. Unit string `json:"unit,omitempty"` - // ForceSendFields is a list of field names (e.g. "Dimensions") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "AdminOverrideAncestor") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Dimensions") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AdminOverrideAncestor") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -5141,24 +4127,18 @@ func (s *QuotaOverride) MarshalJSON() ([]byte, error) { // Service: A service that is available for use by the consumer. type Service struct { - // Config: The service configuration of the available service. - // Some fields may be filtered out of the configuration in responses - // to - // the `ListServices` method. These fields are present only in responses - // to + // Config: The service configuration of the available service. Some + // fields may be filtered out of the configuration in responses to the + // `ListServices` method. These fields are present only in responses to // the `GetService` method. Config *ServiceConfig `json:"config,omitempty"` - // Name: The resource name of the consumer and service. - // - // A valid name would be: - // - projects/123/services/serviceusage.googleapis.com + // Name: The resource name of the consumer and service. A valid name + // would be: - projects/123/services/serviceusage.googleapis.com Name string `json:"name,omitempty"` - // Parent: The resource name of the consumer. - // - // A valid name would be: - // - projects/123 + // Parent: The resource name of the consumer. A valid name would be: - + // projects/123 Parent string `json:"parent,omitempty"` // State: Whether or not the service has been enabled for use by the @@ -5166,14 +4146,11 @@ type Service struct { // // Possible values: // "STATE_UNSPECIFIED" - The default value, which indicates that the - // enabled state of the service - // is unspecified or not meaningful. Currently, all consumers other - // than - // projects (such as folders and organizations) are always in this - // state. + // enabled state of the service is unspecified or not meaningful. + // Currently, all consumers other than projects (such as folders and + // organizations) are always in this state. // "DISABLED" - The service cannot be used by this consumer. It has - // either been explicitly - // disabled, or has never been enabled. + // either been explicitly disabled, or has never been enabled. // "ENABLED" - The service has been explicitly enabled for use by this // consumer. State string `json:"state,omitempty"` @@ -5208,27 +4185,31 @@ func (s *Service) MarshalJSON() ([]byte, error) { // ServiceConfig: The configuration of the service. type ServiceConfig struct { // Apis: A list of API interfaces exported by this service. Contains - // only the names, - // versions, and method names of the interfaces. + // only the names, versions, and method names of the interfaces. Apis []*Api `json:"apis,omitempty"` // Authentication: Auth configuration. Contains only the OAuth rules. Authentication *Authentication `json:"authentication,omitempty"` // Documentation: Additional API documentation. Contains only the - // summary and the - // documentation URL. + // summary and the documentation URL. Documentation *Documentation `json:"documentation,omitempty"` // Endpoints: Configuration for network endpoints. Contains only the - // names and aliases - // of the endpoints. + // names and aliases of the endpoints. Endpoints []*Endpoint `json:"endpoints,omitempty"` - // Name: The DNS address at which this service is available. - // - // An example DNS address would be: - // `calendar.googleapis.com`. + // MonitoredResources: Defines the monitored resources used by this + // service. This is required by the Service.monitoring and + // Service.logging configurations. + MonitoredResources []*MonitoredResourceDescriptor `json:"monitoredResources,omitempty"` + + // Monitoring: Monitoring configuration. This should not include the + // 'producer_destinations' field. + Monitoring *Monitoring `json:"monitoring,omitempty"` + + // Name: The DNS address at which this service is available. An example + // DNS address would be: `calendar.googleapis.com`. Name string `json:"name,omitempty"` // Quota: Quota configuration. @@ -5264,18 +4245,14 @@ func (s *ServiceConfig) MarshalJSON() ([]byte, error) { } // ServiceIdentity: Service identity for a service. This is the identity -// that service producer -// should use to access consumer resources. +// that service producer should use to access consumer resources. type ServiceIdentity struct { // Email: The email address of the service account that a service - // producer would use - // to access consumer resources. + // producer would use to access consumer resources. Email string `json:"email,omitempty"` - // UniqueId: The unique and stable id of the service - // account. - // https://cloud.google.com/iam/reference/rest/v1/projects.servi - // ceAccounts#ServiceAccount + // UniqueId: The unique and stable id of the service account. + // https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts#ServiceAccount UniqueId string `json:"uniqueId,omitempty"` // ForceSendFields is a list of field names (e.g. "Email") to @@ -5302,12 +4279,10 @@ func (s *ServiceIdentity) MarshalJSON() ([]byte, error) { } // SourceContext: `SourceContext` represents information about the -// source of a -// protobuf element, like the file in which it is defined. +// source of a protobuf element, like the file in which it is defined. type SourceContext struct { // FileName: The path-qualified name of the .proto file that contained - // the associated - // protobuf element. For example: + // the associated protobuf element. For example: // "google/protobuf/source_context.proto". FileName string `json:"fileName,omitempty"` @@ -5363,32 +4338,24 @@ func (s *SourceInfo) MarshalJSON() ([]byte, error) { } // Status: The `Status` type defines a logical error model that is -// suitable for -// different programming environments, including REST APIs and RPC APIs. -// It is -// used by [gRPC](https://github.com/grpc). Each `Status` message -// contains -// three pieces of data: error code, error message, and error -// details. -// -// You can find out more about this error model and how to work with it -// in the -// [API Design Guide](https://cloud.google.com/apis/design/errors). +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each +// `Status` message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the [API Design +// Guide](https://cloud.google.com/apis/design/errors). type Status struct { // Code: The status code, which should be an enum value of // google.rpc.Code. Code int64 `json:"code,omitempty"` - // Details: A list of messages that carry the error details. There is a - // common set of - // message types for APIs to use. + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. Details []googleapi.RawMessage `json:"details,omitempty"` // Message: A developer-facing error message, which should be in - // English. Any - // user-facing error message should be localized and sent in - // the - // google.rpc.Status.details field, or localized by the client. + // English. Any user-facing error message should be localized and sent + // in the google.rpc.Status.details field, or localized by the client. Message string `json:"message,omitempty"` // ForceSendFields is a list of field names (e.g. "Code") to @@ -5415,14 +4382,12 @@ func (s *Status) MarshalJSON() ([]byte, error) { } // SystemParameter: Define a parameter's name and location. The -// parameter may be passed as either -// an HTTP header or a URL query parameter, and if both are passed the -// behavior -// is implementation-dependent. +// parameter may be passed as either an HTTP header or a URL query +// parameter, and if both are passed the behavior is +// implementation-dependent. type SystemParameter struct { // HttpHeader: Define the HTTP header name to use for the parameter. It - // is case - // insensitive. + // is case insensitive. HttpHeader string `json:"httpHeader,omitempty"` // Name: Define the name of the parameter, such as "api_key" . It is @@ -5430,8 +4395,7 @@ type SystemParameter struct { Name string `json:"name,omitempty"` // UrlQueryParameter: Define the URL query parameter name to use for the - // parameter. It is case - // sensitive. + // parameter. It is case sensitive. UrlQueryParameter string `json:"urlQueryParameter,omitempty"` // ForceSendFields is a list of field names (e.g. "HttpHeader") to @@ -5458,24 +4422,18 @@ func (s *SystemParameter) MarshalJSON() ([]byte, error) { } // SystemParameterRule: Define a system parameter rule mapping system -// parameter definitions to -// methods. +// parameter definitions to methods. type SystemParameterRule struct { // Parameters: Define parameters. Multiple names may be defined for a - // parameter. - // For a given method call, only one of them should be used. If - // multiple - // names are used the behavior is implementation-dependent. - // If none of the specified names are present the behavior - // is + // parameter. For a given method call, only one of them should be used. + // If multiple names are used the behavior is implementation-dependent. + // If none of the specified names are present the behavior is // parameter-dependent. Parameters []*SystemParameter `json:"parameters,omitempty"` // Selector: Selects the methods to which this rule applies. Use '*' to - // indicate all - // methods in all APIs. - // - // Refer to selector for syntax details. + // indicate all methods in all APIs. Refer to selector for syntax + // details. Selector string `json:"selector,omitempty"` // ForceSendFields is a list of field names (e.g. "Parameters") to @@ -5501,49 +4459,23 @@ func (s *SystemParameterRule) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SystemParameters: ### System parameter configuration -// -// A system parameter is a special kind of parameter defined by the -// API -// system, not by an individual API. It is typically mapped to an HTTP -// header +// SystemParameters: ### System parameter configuration A system +// parameter is a special kind of parameter defined by the API system, +// not by an individual API. It is typically mapped to an HTTP header // and/or a URL query parameter. This configuration specifies which -// methods -// change the names of the system parameters. +// methods change the names of the system parameters. type SystemParameters struct { - // Rules: Define system parameters. - // - // The parameters defined here will override the default - // parameters - // implemented by the system. If this field is missing from the - // service - // config, default system parameters will be used. Default system - // parameters - // and names is implementation-dependent. - // - // Example: define api key for all methods - // - // system_parameters - // rules: - // - selector: "*" - // parameters: - // - name: api_key - // url_query_parameter: api_key - // - // - // Example: define 2 api key names for a specific method. - // - // system_parameters - // rules: - // - selector: "/ListShelves" - // parameters: - // - name: api_key - // http_header: Api-Key1 - // - name: api_key - // http_header: Api-Key2 - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. + // Rules: Define system parameters. The parameters defined here will + // override the default parameters implemented by the system. If this + // field is missing from the service config, default system parameters + // will be used. Default system parameters and names is + // implementation-dependent. Example: define api key for all methods + // system_parameters rules: - selector: "*" parameters: - name: api_key + // url_query_parameter: api_key Example: define 2 api key names for a + // specific method. system_parameters rules: - selector: "/ListShelves" + // parameters: - name: api_key http_header: Api-Key1 - name: api_key + // http_header: Api-Key2 **NOTE:** All service configuration rules + // follow "last one wins" order. Rules []*SystemParameterRule `json:"rules,omitempty"` // ForceSendFields is a list of field names (e.g. "Rules") to @@ -5620,29 +4552,20 @@ func (s *Type) MarshalJSON() ([]byte, error) { // Usage: Configuration controlling usage of a service. type Usage struct { // ProducerNotificationChannel: The full resource name of a channel used - // for sending notifications to the - // service producer. - // - // Google Service Management currently only supports - // [Google Cloud Pub/Sub](https://cloud.google.com/pubsub) as a - // notification - // channel. To use Google Cloud Pub/Sub as the channel, this must be the - // name - // of a Cloud Pub/Sub topic that uses the Cloud Pub/Sub topic name - // format + // for sending notifications to the service producer. Google Service + // Management currently only supports [Google Cloud + // Pub/Sub](https://cloud.google.com/pubsub) as a notification channel. + // To use Google Cloud Pub/Sub as the channel, this must be the name of + // a Cloud Pub/Sub topic that uses the Cloud Pub/Sub topic name format // documented in https://cloud.google.com/pubsub/docs/overview. ProducerNotificationChannel string `json:"producerNotificationChannel,omitempty"` // Requirements: Requirements that must be satisfied before a consumer - // project can use the - // service. Each requirement is of the form - // /; - // for example 'serviceusage.googleapis.com/billing-enabled'. + // project can use the service. Each requirement is of the form /; for + // example 'serviceusage.googleapis.com/billing-enabled'. Requirements []string `json:"requirements,omitempty"` - // Rules: A list of usage rules that apply to individual API - // methods. - // + // Rules: A list of usage rules that apply to individual API methods. // **NOTE:** All service configuration rules follow "last one wins" // order. Rules []*UsageRule `json:"rules,omitempty"` @@ -5676,57 +4599,34 @@ func (s *Usage) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UsageRule: Usage configuration rules for the service. -// -// NOTE: Under development. -// -// -// Use this rule to configure unregistered calls for the service. -// Unregistered -// calls are calls that do not contain consumer project -// identity. -// (Example: calls that do not contain an API key). -// By default, API methods do not allow unregistered calls, and each -// method call -// must be identified by a consumer project identity. Use this rule -// to -// allow/disallow unregistered calls. -// -// Example of an API that wants to allow unregistered calls for entire -// service. -// -// usage: -// rules: -// - selector: "*" -// allow_unregistered_calls: true -// -// Example of a method that wants to allow unregistered calls. -// -// usage: -// rules: -// - selector: +// UsageRule: Usage configuration rules for the service. NOTE: Under +// development. Use this rule to configure unregistered calls for the +// service. Unregistered calls are calls that do not contain consumer +// project identity. (Example: calls that do not contain an API key). By +// default, API methods do not allow unregistered calls, and each method +// call must be identified by a consumer project identity. Use this rule +// to allow/disallow unregistered calls. Example of an API that wants to +// allow unregistered calls for entire service. usage: rules: - +// selector: "*" allow_unregistered_calls: true Example of a method that +// wants to allow unregistered calls. usage: rules: - selector: // "google.example.library.v1.LibraryService.CreateBook" -// allow_unregistered_calls: true +// allow_unregistered_calls: true type UsageRule struct { // AllowUnregisteredCalls: If true, the selected method allows - // unregistered calls, e.g. calls - // that don't identify any user or application. + // unregistered calls, e.g. calls that don't identify any user or + // application. AllowUnregisteredCalls bool `json:"allowUnregisteredCalls,omitempty"` // Selector: Selects the methods to which this rule applies. Use '*' to - // indicate all - // methods in all APIs. - // - // Refer to selector for syntax details. + // indicate all methods in all APIs. Refer to selector for syntax + // details. Selector string `json:"selector,omitempty"` // SkipServiceControl: If true, the selected method should skip service - // control and the control - // plane features, such as quota and billing, will not be - // available. - // This flag is used by Google Cloud Endpoints to bypass checks for - // internal - // methods, such as service health check methods. + // control and the control plane features, such as quota and billing, + // will not be available. This flag is used by Google Cloud Endpoints to + // bypass checks for internal methods, such as service health check + // methods. SkipServiceControl bool `json:"skipServiceControl,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -5765,11 +4665,9 @@ type OperationsGetCall struct { header_ http.Header } -// Get: Gets the latest state of a long-running operation. Clients can -// use this -// method to poll the operation result at intervals as recommended by -// the API -// service. +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. func (r *OperationsService) Get(name string) *OperationsGetCall { c := &OperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5813,7 +4711,7 @@ func (c *OperationsGetCall) Header() http.Header { func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5875,7 +4773,7 @@ func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", // "flatPath": "v1beta1/operations/{operationsId}", // "httpMethod": "GET", // "id": "serviceusage.operations.get", @@ -5914,22 +4812,15 @@ type OperationsListCall struct { } // List: Lists operations that match the specified filter in the -// request. If the -// server doesn't support this method, it returns -// `UNIMPLEMENTED`. -// -// NOTE: the `name` binding allows API services to override the -// binding -// to use different resource name schemes, such as `users/*/operations`. -// To -// override the binding, API services can add a binding such -// as -// "/v1/{name=users/*}/operations" to their service configuration. -// For backwards compatibility, the default name includes the -// operations -// collection id, however overriding users must ensure the name -// binding -// is the parent resource, without the operations collection id. +// request. If the server doesn't support this method, it returns +// `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to +// override the binding to use different resource name schemes, such as +// `users/*/operations`. To override the binding, API services can add a +// binding such as "/v1/{name=users/*}/operations" to their service +// configuration. For backwards compatibility, the default name includes +// the operations collection id, however overriding users must ensure +// the name binding is the parent resource, without the operations +// collection id. func (r *OperationsService) List() *OperationsListCall { c := &OperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} return c @@ -6000,7 +4891,7 @@ func (c *OperationsListCall) Header() http.Header { func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6059,7 +4950,7 @@ func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsRe } return ret, nil // { - // "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", + // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", // "flatPath": "v1beta1/operations", // "httpMethod": "GET", // "id": "serviceusage.operations.list", @@ -6132,11 +5023,8 @@ type ServicesBatchEnableCall struct { } // BatchEnable: Enable multiple services on a project. The operation is -// atomic: if enabling -// any service fails, then the entire batch fails, and no state changes -// occur. -// -// Operation +// atomic: if enabling any service fails, then the entire batch fails, +// and no state changes occur. Operation func (r *ServicesService) BatchEnable(parent string, batchenableservicesrequest *BatchEnableServicesRequest) *ServicesBatchEnableCall { c := &ServicesBatchEnableCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -6171,7 +5059,7 @@ func (c *ServicesBatchEnableCall) Header() http.Header { func (c *ServicesBatchEnableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6235,7 +5123,7 @@ func (c *ServicesBatchEnableCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Enable multiple services on a project. The operation is atomic: if enabling\nany service fails, then the entire batch fails, and no state changes occur.\n\nOperation\u003cresponse: google.protobuf.Empty\u003e", + // "description": "Enable multiple services on a project. The operation is atomic: if enabling any service fails, then the entire batch fails, and no state changes occur. Operation", // "flatPath": "v1beta1/{v1beta1Id}/{v1beta1Id1}/services:batchEnable", // "httpMethod": "POST", // "id": "serviceusage.services.batchEnable", @@ -6244,7 +5132,7 @@ func (c *ServicesBatchEnableCall) Do(opts ...googleapi.CallOption) (*Operation, // ], // "parameters": { // "parent": { - // "description": "Parent to enable services on.\n\nAn example name would be:\n`projects/123`\nwhere `123` is the project number (not project ID).\n\nThe `BatchEnableServices` method currently only supports projects.", + // "description": "Parent to enable services on. An example name would be: `projects/123` where `123` is the project number (not project ID). The `BatchEnableServices` method currently only supports projects.", // "location": "path", // "pattern": "^[^/]+/[^/]+$", // "required": true, @@ -6278,18 +5166,11 @@ type ServicesDisableCall struct { } // Disable: Disable a service so that it can no longer be used with a -// project. -// This prevents unintended usage that may cause unexpected -// billing -// charges or security leaks. -// -// It is not valid to call the disable method on a service that is -// not -// currently enabled. Callers will receive a `FAILED_PRECONDITION` -// status if -// the target service is not currently enabled. -// -// Operation +// project. This prevents unintended usage that may cause unexpected +// billing charges or security leaks. It is not valid to call the +// disable method on a service that is not currently enabled. Callers +// will receive a `FAILED_PRECONDITION` status if the target service is +// not currently enabled. Operation func (r *ServicesService) Disable(name string, disableservicerequest *DisableServiceRequest) *ServicesDisableCall { c := &ServicesDisableCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -6324,7 +5205,7 @@ func (c *ServicesDisableCall) Header() http.Header { func (c *ServicesDisableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6388,7 +5269,7 @@ func (c *ServicesDisableCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Disable a service so that it can no longer be used with a project.\nThis prevents unintended usage that may cause unexpected billing\ncharges or security leaks.\n\nIt is not valid to call the disable method on a service that is not\ncurrently enabled. Callers will receive a `FAILED_PRECONDITION` status if\nthe target service is not currently enabled.\n\nOperation\u003cresponse: google.protobuf.Empty\u003e", + // "description": "Disable a service so that it can no longer be used with a project. This prevents unintended usage that may cause unexpected billing charges or security leaks. It is not valid to call the disable method on a service that is not currently enabled. Callers will receive a `FAILED_PRECONDITION` status if the target service is not currently enabled. Operation", // "flatPath": "v1beta1/{v1beta1Id}/{v1beta1Id1}/services/{servicesId}:disable", // "httpMethod": "POST", // "id": "serviceusage.services.disable", @@ -6397,7 +5278,7 @@ func (c *ServicesDisableCall) Do(opts ...googleapi.CallOption) (*Operation, erro // ], // "parameters": { // "name": { - // "description": "Name of the consumer and service to disable the service on.\n\nThe enable and disable methods currently only support projects.\n\nAn example name would be:\n`projects/123/services/serviceusage.googleapis.com`\nwhere `123` is the project number (not project ID).", + // "description": "Name of the consumer and service to disable the service on. The enable and disable methods currently only support projects. An example name would be: `projects/123/services/serviceusage.googleapis.com` where `123` is the project number (not project ID).", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+$", // "required": true, @@ -6430,10 +5311,8 @@ type ServicesEnableCall struct { header_ http.Header } -// Enable: Enable a service so that it can be used with a -// project. -// -// Operation +// Enable: Enable a service so that it can be used with a project. +// Operation func (r *ServicesService) Enable(name string, enableservicerequest *EnableServiceRequest) *ServicesEnableCall { c := &ServicesEnableCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -6468,7 +5347,7 @@ func (c *ServicesEnableCall) Header() http.Header { func (c *ServicesEnableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6532,7 +5411,7 @@ func (c *ServicesEnableCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Enable a service so that it can be used with a project.\n\nOperation\u003cresponse: google.protobuf.Empty\u003e", + // "description": "Enable a service so that it can be used with a project. Operation", // "flatPath": "v1beta1/{v1beta1Id}/{v1beta1Id1}/services/{servicesId}:enable", // "httpMethod": "POST", // "id": "serviceusage.services.enable", @@ -6541,7 +5420,7 @@ func (c *ServicesEnableCall) Do(opts ...googleapi.CallOption) (*Operation, error // ], // "parameters": { // "name": { - // "description": "Name of the consumer and service to enable the service on.\n\nThe `EnableService` and `DisableService` methods currently only support\nprojects.\n\nEnabling a service requires that the service is public or is shared with\nthe user enabling the service.\n\nAn example name would be:\n`projects/123/services/serviceusage.googleapis.com`\nwhere `123` is the project number (not project ID).", + // "description": "Name of the consumer and service to enable the service on. The `EnableService` and `DisableService` methods currently only support projects. Enabling a service requires that the service is public or is shared with the user enabling the service. An example name would be: `projects/123/services/serviceusage.googleapis.com` where `123` is the project number (not project ID).", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+$", // "required": true, @@ -6563,6 +5442,137 @@ func (c *ServicesEnableCall) Do(opts ...googleapi.CallOption) (*Operation, error } +// method id "serviceusage.services.generateServiceIdentity": + +type ServicesGenerateServiceIdentityCall struct { + s *APIService + parent string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// GenerateServiceIdentity: Generate service identity for service. +func (r *ServicesService) GenerateServiceIdentity(parent string) *ServicesGenerateServiceIdentityCall { + c := &ServicesGenerateServiceIdentityCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ServicesGenerateServiceIdentityCall) Fields(s ...googleapi.Field) *ServicesGenerateServiceIdentityCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ServicesGenerateServiceIdentityCall) Context(ctx context.Context) *ServicesGenerateServiceIdentityCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ServicesGenerateServiceIdentityCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ServicesGenerateServiceIdentityCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+parent}:generateServiceIdentity") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "serviceusage.services.generateServiceIdentity" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ServicesGenerateServiceIdentityCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Generate service identity for service.", + // "flatPath": "v1beta1/{v1beta1Id}/{v1beta1Id1}/services/{servicesId}:generateServiceIdentity", + // "httpMethod": "POST", + // "id": "serviceusage.services.generateServiceIdentity", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Name of the consumer and service to generate an identity for. The `GenerateServiceIdentity` methods currently only support projects. An example name would be: `projects/123/services/example.googleapis.com` where `123` is the project number.", + // "location": "path", + // "pattern": "^[^/]+/[^/]+/services/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1beta1/{+parent}:generateServiceIdentity", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/service.management" + // ] + // } + +} + // method id "serviceusage.services.get": type ServicesGetCall struct { @@ -6619,7 +5629,7 @@ func (c *ServicesGetCall) Header() http.Header { func (c *ServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6690,7 +5700,7 @@ func (c *ServicesGetCall) Do(opts ...googleapi.CallOption) (*Service, error) { // ], // "parameters": { // "name": { - // "description": "Name of the consumer and service to get the `ConsumerState` for.\n\nAn example name would be:\n`projects/123/services/serviceusage.googleapis.com`\nwhere `123` is the project number (not project ID).", + // "description": "Name of the consumer and service to get the `ConsumerState` for. An example name would be: `projects/123/services/serviceusage.googleapis.com` where `123` is the project number (not project ID).", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+$", // "required": true, @@ -6721,17 +5731,12 @@ type ServicesListCall struct { } // List: List all services available to the specified project, and the -// current -// state of those services with respect to the project. The list -// includes -// all public services, all services for which the calling user has -// the -// `servicemanagement.services.bind` permission, and all services that -// have -// already been enabled on the project. The list can be filtered to -// only include services in a specific state, for example to only -// include -// services enabled on the project. +// current state of those services with respect to the project. The list +// includes all public services, all services for which the calling user +// has the `servicemanagement.services.bind` permission, and all +// services that have already been enabled on the project. The list can +// be filtered to only include services in a specific state, for example +// to only include services enabled on the project. func (r *ServicesService) List(parent string) *ServicesListCall { c := &ServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -6739,25 +5744,24 @@ func (r *ServicesService) List(parent string) *ServicesListCall { } // Filter sets the optional parameter "filter": Only list services that -// conform to the given filter. -// The allowed filter strings are `state:ENABLED` and `state:DISABLED`. +// conform to the given filter. The allowed filter strings are +// `state:ENABLED` and `state:DISABLED`. func (c *ServicesListCall) Filter(filter string) *ServicesListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": Requested size of -// the next page of data. -// Requested page size cannot exceed 200. -// If not set, the default page size is 50. +// the next page of data. Requested page size cannot exceed 200. If not +// set, the default page size is 50. func (c *ServicesListCall) PageSize(pageSize int64) *ServicesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": Token identifying -// which result to start with, which is returned by a -// previous list call. +// which result to start with, which is returned by a previous list +// call. func (c *ServicesListCall) PageToken(pageToken string) *ServicesListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -6800,7 +5804,7 @@ func (c *ServicesListCall) Header() http.Header { func (c *ServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6862,7 +5866,7 @@ func (c *ServicesListCall) Do(opts ...googleapi.CallOption) (*ListServicesRespon } return ret, nil // { - // "description": "List all services available to the specified project, and the current\nstate of those services with respect to the project. The list includes\nall public services, all services for which the calling user has the\n`servicemanagement.services.bind` permission, and all services that have\nalready been enabled on the project. The list can be filtered to\nonly include services in a specific state, for example to only include\nservices enabled on the project.", + // "description": "List all services available to the specified project, and the current state of those services with respect to the project. The list includes all public services, all services for which the calling user has the `servicemanagement.services.bind` permission, and all services that have already been enabled on the project. The list can be filtered to only include services in a specific state, for example to only include services enabled on the project.", // "flatPath": "v1beta1/{v1beta1Id}/{v1beta1Id1}/services", // "httpMethod": "GET", // "id": "serviceusage.services.list", @@ -6871,23 +5875,23 @@ func (c *ServicesListCall) Do(opts ...googleapi.CallOption) (*ListServicesRespon // ], // "parameters": { // "filter": { - // "description": "Only list services that conform to the given filter.\nThe allowed filter strings are `state:ENABLED` and `state:DISABLED`.", + // "description": "Only list services that conform to the given filter. The allowed filter strings are `state:ENABLED` and `state:DISABLED`.", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "Requested size of the next page of data.\nRequested page size cannot exceed 200.\n If not set, the default page size is 50.", + // "description": "Requested size of the next page of data. Requested page size cannot exceed 200. If not set, the default page size is 50.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "Token identifying which result to start with, which is returned by a\nprevious list call.", + // "description": "Token identifying which result to start with, which is returned by a previous list call.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Parent to search for services on.\n\nAn example name would be:\n`projects/123`\nwhere `123` is the project number (not project ID).", + // "description": "Parent to search for services on. An example name would be: `projects/123` where `123` is the project number (not project ID).", // "location": "path", // "pattern": "^[^/]+/[^/]+$", // "required": true, @@ -6950,9 +5954,15 @@ func (r *ServicesConsumerQuotaMetricsService) Get(name string) *ServicesConsumer // detail for quota information in the response. // // Possible values: -// "QUOTA_VIEW_UNSPECIFIED" -// "BASIC" -// "FULL" +// "QUOTA_VIEW_UNSPECIFIED" - No quota view specified. Requests that +// do not specify a quota view will typically default to the BASIC view. +// "BASIC" - Only buckets with overrides are shown in the response. +// "FULL" - Include per-location buckets even if they do not have +// overrides. When the view is FULL, and a limit has regional or zonal +// quota, the limit will include buckets for all regions or zones that +// could support overrides, even if none are currently present. In some +// cases this will cause the response to become very large; callers that +// do not need this extra information should use the BASIC view instead. func (c *ServicesConsumerQuotaMetricsGetCall) View(view string) *ServicesConsumerQuotaMetricsGetCall { c.urlParams_.Set("view", view) return c @@ -6995,7 +6005,7 @@ func (c *ServicesConsumerQuotaMetricsGetCall) Header() http.Header { func (c *ServicesConsumerQuotaMetricsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7066,7 +6076,7 @@ func (c *ServicesConsumerQuotaMetricsGetCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "name": { - // "description": "The resource name of the quota limit.\n\nAn example name would be:\nprojects/123/services/serviceusage.googleapis.com/quotas/metrics/serviceusage.googleapis.com%2Fmutate_requests", + // "description": "The resource name of the quota limit. An example name would be: projects/123/services/serviceusage.googleapis.com/quotas/metrics/serviceusage.googleapis.com%2Fmutate_requests", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+/consumerQuotaMetrics/[^/]+$", // "required": true, @@ -7079,6 +6089,11 @@ func (c *ServicesConsumerQuotaMetricsGetCall) Do(opts ...googleapi.CallOption) ( // "BASIC", // "FULL" // ], + // "enumDescriptions": [ + // "No quota view specified. Requests that do not specify a quota view will typically default to the BASIC view.", + // "Only buckets with overrides are shown in the response.", + // "Include per-location buckets even if they do not have overrides. When the view is FULL, and a limit has regional or zonal quota, the limit will include buckets for all regions or zones that could support overrides, even if none are currently present. In some cases this will cause the response to become very large; callers that do not need this extra information should use the BASIC view instead." + // ], // "location": "query", // "type": "string" // } @@ -7095,6 +6110,150 @@ func (c *ServicesConsumerQuotaMetricsGetCall) Do(opts ...googleapi.CallOption) ( } +// method id "serviceusage.services.consumerQuotaMetrics.importConsumerOverrides": + +type ServicesConsumerQuotaMetricsImportConsumerOverridesCall struct { + s *APIService + parent string + importconsumeroverridesrequest *ImportConsumerOverridesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// ImportConsumerOverrides: Create or update multiple consumer overrides +// atomically, all on the same consumer, but on many different metrics +// or limits. The name field in the quota override message should not be +// set. +func (r *ServicesConsumerQuotaMetricsService) ImportConsumerOverrides(parent string, importconsumeroverridesrequest *ImportConsumerOverridesRequest) *ServicesConsumerQuotaMetricsImportConsumerOverridesCall { + c := &ServicesConsumerQuotaMetricsImportConsumerOverridesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.importconsumeroverridesrequest = importconsumeroverridesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ServicesConsumerQuotaMetricsImportConsumerOverridesCall) Fields(s ...googleapi.Field) *ServicesConsumerQuotaMetricsImportConsumerOverridesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ServicesConsumerQuotaMetricsImportConsumerOverridesCall) Context(ctx context.Context) *ServicesConsumerQuotaMetricsImportConsumerOverridesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ServicesConsumerQuotaMetricsImportConsumerOverridesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ServicesConsumerQuotaMetricsImportConsumerOverridesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.importconsumeroverridesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+parent}/consumerQuotaMetrics:importConsumerOverrides") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "serviceusage.services.consumerQuotaMetrics.importConsumerOverrides" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ServicesConsumerQuotaMetricsImportConsumerOverridesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Create or update multiple consumer overrides atomically, all on the same consumer, but on many different metrics or limits. The name field in the quota override message should not be set.", + // "flatPath": "v1beta1/{v1beta1Id}/{v1beta1Id1}/services/{servicesId}/consumerQuotaMetrics:importConsumerOverrides", + // "httpMethod": "POST", + // "id": "serviceusage.services.consumerQuotaMetrics.importConsumerOverrides", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "The resource name of the consumer. An example name would be: `projects/123/services/compute.googleapis.com`", + // "location": "path", + // "pattern": "^[^/]+/[^/]+/services/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1beta1/{+parent}/consumerQuotaMetrics:importConsumerOverrides", + // "request": { + // "$ref": "ImportConsumerOverridesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/service.management" + // ] + // } + +} + // method id "serviceusage.services.consumerQuotaMetrics.list": type ServicesConsumerQuotaMetricsListCall struct { @@ -7107,14 +6266,11 @@ type ServicesConsumerQuotaMetricsListCall struct { } // List: Retrieves a summary of all quota information visible to the -// service -// consumer, organized by service metric. Each metric includes -// information -// about all of its defined limits. Each limit includes the -// limit -// configuration (quota unit, preciseness, default value), the -// current -// effective limit value, and all of the overrides applied to the limit. +// service consumer, organized by service metric. Each metric includes +// information about all of its defined limits. Each limit includes the +// limit configuration (quota unit, preciseness, default value), the +// current effective limit value, and all of the overrides applied to +// the limit. func (r *ServicesConsumerQuotaMetricsService) List(parent string) *ServicesConsumerQuotaMetricsListCall { c := &ServicesConsumerQuotaMetricsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -7129,8 +6285,7 @@ func (c *ServicesConsumerQuotaMetricsListCall) PageSize(pageSize int64) *Service } // PageToken sets the optional parameter "pageToken": Token identifying -// which result to start with; returned by a previous list -// call. +// which result to start with; returned by a previous list call. func (c *ServicesConsumerQuotaMetricsListCall) PageToken(pageToken string) *ServicesConsumerQuotaMetricsListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -7140,9 +6295,15 @@ func (c *ServicesConsumerQuotaMetricsListCall) PageToken(pageToken string) *Serv // detail for quota information in the response. // // Possible values: -// "QUOTA_VIEW_UNSPECIFIED" -// "BASIC" -// "FULL" +// "QUOTA_VIEW_UNSPECIFIED" - No quota view specified. Requests that +// do not specify a quota view will typically default to the BASIC view. +// "BASIC" - Only buckets with overrides are shown in the response. +// "FULL" - Include per-location buckets even if they do not have +// overrides. When the view is FULL, and a limit has regional or zonal +// quota, the limit will include buckets for all regions or zones that +// could support overrides, even if none are currently present. In some +// cases this will cause the response to become very large; callers that +// do not need this extra information should use the BASIC view instead. func (c *ServicesConsumerQuotaMetricsListCall) View(view string) *ServicesConsumerQuotaMetricsListCall { c.urlParams_.Set("view", view) return c @@ -7185,7 +6346,7 @@ func (c *ServicesConsumerQuotaMetricsListCall) Header() http.Header { func (c *ServicesConsumerQuotaMetricsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7247,7 +6408,7 @@ func (c *ServicesConsumerQuotaMetricsListCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Retrieves a summary of all quota information visible to the service\nconsumer, organized by service metric. Each metric includes information\nabout all of its defined limits. Each limit includes the limit\nconfiguration (quota unit, preciseness, default value), the current\neffective limit value, and all of the overrides applied to the limit.", + // "description": "Retrieves a summary of all quota information visible to the service consumer, organized by service metric. Each metric includes information about all of its defined limits. Each limit includes the limit configuration (quota unit, preciseness, default value), the current effective limit value, and all of the overrides applied to the limit.", // "flatPath": "v1beta1/{v1beta1Id}/{v1beta1Id1}/services/{servicesId}/consumerQuotaMetrics", // "httpMethod": "GET", // "id": "serviceusage.services.consumerQuotaMetrics.list", @@ -7262,12 +6423,12 @@ func (c *ServicesConsumerQuotaMetricsListCall) Do(opts ...googleapi.CallOption) // "type": "integer" // }, // "pageToken": { - // "description": "Token identifying which result to start with; returned by a previous list\ncall.", + // "description": "Token identifying which result to start with; returned by a previous list call.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Parent of the quotas resource.\n\nSome example names would be:\nprojects/123/services/serviceconsumermanagement.googleapis.com\nfolders/345/services/serviceconsumermanagement.googleapis.com\norganizations/456/services/serviceconsumermanagement.googleapis.com", + // "description": "Parent of the quotas resource. Some example names would be: projects/123/services/serviceconsumermanagement.googleapis.com folders/345/services/serviceconsumermanagement.googleapis.com organizations/456/services/serviceconsumermanagement.googleapis.com", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+$", // "required": true, @@ -7280,6 +6441,11 @@ func (c *ServicesConsumerQuotaMetricsListCall) Do(opts ...googleapi.CallOption) // "BASIC", // "FULL" // ], + // "enumDescriptions": [ + // "No quota view specified. Requests that do not specify a quota view will typically default to the BASIC view.", + // "Only buckets with overrides are shown in the response.", + // "Include per-location buckets even if they do not have overrides. When the view is FULL, and a limit has regional or zonal quota, the limit will include buckets for all regions or zones that could support overrides, even if none are currently present. In some cases this will cause the response to become very large; callers that do not need this extra information should use the BASIC view instead." + // ], // "location": "query", // "type": "string" // } @@ -7340,9 +6506,15 @@ func (r *ServicesConsumerQuotaMetricsLimitsService) Get(name string) *ServicesCo // detail for quota information in the response. // // Possible values: -// "QUOTA_VIEW_UNSPECIFIED" -// "BASIC" -// "FULL" +// "QUOTA_VIEW_UNSPECIFIED" - No quota view specified. Requests that +// do not specify a quota view will typically default to the BASIC view. +// "BASIC" - Only buckets with overrides are shown in the response. +// "FULL" - Include per-location buckets even if they do not have +// overrides. When the view is FULL, and a limit has regional or zonal +// quota, the limit will include buckets for all regions or zones that +// could support overrides, even if none are currently present. In some +// cases this will cause the response to become very large; callers that +// do not need this extra information should use the BASIC view instead. func (c *ServicesConsumerQuotaMetricsLimitsGetCall) View(view string) *ServicesConsumerQuotaMetricsLimitsGetCall { c.urlParams_.Set("view", view) return c @@ -7385,7 +6557,7 @@ func (c *ServicesConsumerQuotaMetricsLimitsGetCall) Header() http.Header { func (c *ServicesConsumerQuotaMetricsLimitsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7456,7 +6628,7 @@ func (c *ServicesConsumerQuotaMetricsLimitsGetCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "name": { - // "description": "The resource name of the quota limit.\n\nUse the quota limit resource name returned by previous\nListConsumerQuotaMetrics and GetConsumerQuotaMetric API calls.", + // "description": "The resource name of the quota limit. Use the quota limit resource name returned by previous ListConsumerQuotaMetrics and GetConsumerQuotaMetric API calls.", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+/consumerQuotaMetrics/[^/]+/limits/[^/]+$", // "required": true, @@ -7469,6 +6641,11 @@ func (c *ServicesConsumerQuotaMetricsLimitsGetCall) Do(opts ...googleapi.CallOpt // "BASIC", // "FULL" // ], + // "enumDescriptions": [ + // "No quota view specified. Requests that do not specify a quota view will typically default to the BASIC view.", + // "Only buckets with overrides are shown in the response.", + // "Include per-location buckets even if they do not have overrides. When the view is FULL, and a limit has regional or zonal quota, the limit will include buckets for all regions or zones that could support overrides, even if none are currently present. In some cases this will cause the response to become very large; callers that do not need this extra information should use the BASIC view instead." + // ], // "location": "query", // "type": "string" // } @@ -7496,16 +6673,11 @@ type ServicesConsumerQuotaMetricsLimitsAdminOverridesCreateCall struct { header_ http.Header } -// Create: Creates an admin override. -// An admin override is applied by an administrator of a parent folder -// or -// parent organization of the consumer receiving the override. An -// admin -// override is intended to limit the amount of quota the consumer can -// use out -// of the total quota pool allocated to all children of the folder -// or -// organization. +// Create: Creates an admin override. An admin override is applied by an +// administrator of a parent folder or parent organization of the +// consumer receiving the override. An admin override is intended to +// limit the amount of quota the consumer can use out of the total quota +// pool allocated to all children of the folder or organization. func (r *ServicesConsumerQuotaMetricsLimitsAdminOverridesService) Create(parent string, quotaoverride *QuotaOverride) *ServicesConsumerQuotaMetricsLimitsAdminOverridesCreateCall { c := &ServicesConsumerQuotaMetricsLimitsAdminOverridesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -7514,13 +6686,10 @@ func (r *ServicesConsumerQuotaMetricsLimitsAdminOverridesService) Create(parent } // Force sets the optional parameter "force": Whether to force the -// creation of the quota override. -// If creating an override would cause the effective quota for the -// consumer to -// decrease by more than 10 percent, the call is rejected, as a safety -// measure -// to avoid accidentally decreasing quota too quickly. Setting the -// force +// creation of the quota override. If creating an override would cause +// the effective quota for the consumer to decrease by more than 10 +// percent, the call is rejected, as a safety measure to avoid +// accidentally decreasing quota too quickly. Setting the force // parameter to true ignores this restriction. func (c *ServicesConsumerQuotaMetricsLimitsAdminOverridesCreateCall) Force(force bool) *ServicesConsumerQuotaMetricsLimitsAdminOverridesCreateCall { c.urlParams_.Set("force", fmt.Sprint(force)) @@ -7554,7 +6723,7 @@ func (c *ServicesConsumerQuotaMetricsLimitsAdminOverridesCreateCall) Header() ht func (c *ServicesConsumerQuotaMetricsLimitsAdminOverridesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7618,7 +6787,7 @@ func (c *ServicesConsumerQuotaMetricsLimitsAdminOverridesCreateCall) Do(opts ... } return ret, nil // { - // "description": "Creates an admin override.\nAn admin override is applied by an administrator of a parent folder or\nparent organization of the consumer receiving the override. An admin\noverride is intended to limit the amount of quota the consumer can use out\nof the total quota pool allocated to all children of the folder or\norganization.", + // "description": "Creates an admin override. An admin override is applied by an administrator of a parent folder or parent organization of the consumer receiving the override. An admin override is intended to limit the amount of quota the consumer can use out of the total quota pool allocated to all children of the folder or organization.", // "flatPath": "v1beta1/{v1beta1Id}/{v1beta1Id1}/services/{servicesId}/consumerQuotaMetrics/{consumerQuotaMetricsId}/limits/{limitsId}/adminOverrides", // "httpMethod": "POST", // "id": "serviceusage.services.consumerQuotaMetrics.limits.adminOverrides.create", @@ -7627,12 +6796,12 @@ func (c *ServicesConsumerQuotaMetricsLimitsAdminOverridesCreateCall) Do(opts ... // ], // "parameters": { // "force": { - // "description": "Whether to force the creation of the quota override.\nIf creating an override would cause the effective quota for the consumer to\ndecrease by more than 10 percent, the call is rejected, as a safety measure\nto avoid accidentally decreasing quota too quickly. Setting the force\nparameter to true ignores this restriction.", + // "description": "Whether to force the creation of the quota override. If creating an override would cause the effective quota for the consumer to decrease by more than 10 percent, the call is rejected, as a safety measure to avoid accidentally decreasing quota too quickly. Setting the force parameter to true ignores this restriction.", // "location": "query", // "type": "boolean" // }, // "parent": { - // "description": "The resource name of the parent quota limit, returned by a\nListConsumerQuotaMetrics or GetConsumerQuotaMetric call.\n\nAn example name would be:\n`projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion`", + // "description": "The resource name of the parent quota limit, returned by a ListConsumerQuotaMetrics or GetConsumerQuotaMetric call. An example name would be: `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion`", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+/consumerQuotaMetrics/[^/]+/limits/[^/]+$", // "required": true, @@ -7672,13 +6841,10 @@ func (r *ServicesConsumerQuotaMetricsLimitsAdminOverridesService) Delete(name st } // Force sets the optional parameter "force": Whether to force the -// deletion of the quota override. -// If deleting an override would cause the effective quota for the -// consumer to -// decrease by more than 10 percent, the call is rejected, as a safety -// measure -// to avoid accidentally decreasing quota too quickly. Setting the -// force +// deletion of the quota override. If deleting an override would cause +// the effective quota for the consumer to decrease by more than 10 +// percent, the call is rejected, as a safety measure to avoid +// accidentally decreasing quota too quickly. Setting the force // parameter to true ignores this restriction. func (c *ServicesConsumerQuotaMetricsLimitsAdminOverridesDeleteCall) Force(force bool) *ServicesConsumerQuotaMetricsLimitsAdminOverridesDeleteCall { c.urlParams_.Set("force", fmt.Sprint(force)) @@ -7712,7 +6878,7 @@ func (c *ServicesConsumerQuotaMetricsLimitsAdminOverridesDeleteCall) Header() ht func (c *ServicesConsumerQuotaMetricsLimitsAdminOverridesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7780,12 +6946,12 @@ func (c *ServicesConsumerQuotaMetricsLimitsAdminOverridesDeleteCall) Do(opts ... // ], // "parameters": { // "force": { - // "description": "Whether to force the deletion of the quota override.\nIf deleting an override would cause the effective quota for the consumer to\ndecrease by more than 10 percent, the call is rejected, as a safety measure\nto avoid accidentally decreasing quota too quickly. Setting the force\nparameter to true ignores this restriction.", + // "description": "Whether to force the deletion of the quota override. If deleting an override would cause the effective quota for the consumer to decrease by more than 10 percent, the call is rejected, as a safety measure to avoid accidentally decreasing quota too quickly. Setting the force parameter to true ignores this restriction.", // "location": "query", // "type": "boolean" // }, // "name": { - // "description": "The resource name of the override to delete.\n\nAn example name would be:\n`projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/adminOverrides/4a3f2c1d`", + // "description": "The resource name of the override to delete. An example name would be: `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/adminOverrides/4a3f2c1d`", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+/consumerQuotaMetrics/[^/]+/limits/[^/]+/adminOverrides/[^/]+$", // "required": true, @@ -7830,8 +6996,7 @@ func (c *ServicesConsumerQuotaMetricsLimitsAdminOverridesListCall) PageSize(page } // PageToken sets the optional parameter "pageToken": Token identifying -// which result to start with; returned by a previous list -// call. +// which result to start with; returned by a previous list call. func (c *ServicesConsumerQuotaMetricsLimitsAdminOverridesListCall) PageToken(pageToken string) *ServicesConsumerQuotaMetricsLimitsAdminOverridesListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -7874,7 +7039,7 @@ func (c *ServicesConsumerQuotaMetricsLimitsAdminOverridesListCall) Header() http func (c *ServicesConsumerQuotaMetricsLimitsAdminOverridesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7951,12 +7116,12 @@ func (c *ServicesConsumerQuotaMetricsLimitsAdminOverridesListCall) Do(opts ...go // "type": "integer" // }, // "pageToken": { - // "description": "Token identifying which result to start with; returned by a previous list\ncall.", + // "description": "Token identifying which result to start with; returned by a previous list call.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "The resource name of the parent quota limit, returned by a\nListConsumerQuotaMetrics or GetConsumerQuotaMetric call.\n\nAn example name would be:\n`projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion`", + // "description": "The resource name of the parent quota limit, returned by a ListConsumerQuotaMetrics or GetConsumerQuotaMetric call. An example name would be: `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion`", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+/consumerQuotaMetrics/[^/]+/limits/[^/]+$", // "required": true, @@ -8016,22 +7181,19 @@ func (r *ServicesConsumerQuotaMetricsLimitsAdminOverridesService) Patch(name str } // Force sets the optional parameter "force": Whether to force the -// update of the quota override. -// If updating an override would cause the effective quota for the -// consumer to -// decrease by more than 10 percent, the call is rejected, as a safety -// measure -// to avoid accidentally decreasing quota too quickly. Setting the -// force -// parameter to true ignores this restriction. +// update of the quota override. If updating an override would cause the +// effective quota for the consumer to decrease by more than 10 percent, +// the call is rejected, as a safety measure to avoid accidentally +// decreasing quota too quickly. Setting the force parameter to true +// ignores this restriction. func (c *ServicesConsumerQuotaMetricsLimitsAdminOverridesPatchCall) Force(force bool) *ServicesConsumerQuotaMetricsLimitsAdminOverridesPatchCall { c.urlParams_.Set("force", fmt.Sprint(force)) return c } // UpdateMask sets the optional parameter "updateMask": Update only the -// specified fields of the override. -// If unset, all fields will be updated. +// specified fields of the override. If unset, all fields will be +// updated. func (c *ServicesConsumerQuotaMetricsLimitsAdminOverridesPatchCall) UpdateMask(updateMask string) *ServicesConsumerQuotaMetricsLimitsAdminOverridesPatchCall { c.urlParams_.Set("updateMask", updateMask) return c @@ -8064,7 +7226,7 @@ func (c *ServicesConsumerQuotaMetricsLimitsAdminOverridesPatchCall) Header() htt func (c *ServicesConsumerQuotaMetricsLimitsAdminOverridesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8137,19 +7299,19 @@ func (c *ServicesConsumerQuotaMetricsLimitsAdminOverridesPatchCall) Do(opts ...g // ], // "parameters": { // "force": { - // "description": "Whether to force the update of the quota override.\nIf updating an override would cause the effective quota for the consumer to\ndecrease by more than 10 percent, the call is rejected, as a safety measure\nto avoid accidentally decreasing quota too quickly. Setting the force\nparameter to true ignores this restriction.", + // "description": "Whether to force the update of the quota override. If updating an override would cause the effective quota for the consumer to decrease by more than 10 percent, the call is rejected, as a safety measure to avoid accidentally decreasing quota too quickly. Setting the force parameter to true ignores this restriction.", // "location": "query", // "type": "boolean" // }, // "name": { - // "description": "The resource name of the override to update.\n\nAn example name would be:\n`projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/adminOverrides/4a3f2c1d`", + // "description": "The resource name of the override to update. An example name would be: `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/adminOverrides/4a3f2c1d`", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+/consumerQuotaMetrics/[^/]+/limits/[^/]+/adminOverrides/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { - // "description": "Update only the specified fields of the override.\nIf unset, all fields will be updated.", + // "description": "Update only the specified fields of the override. If unset, all fields will be updated.", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -8181,14 +7343,11 @@ type ServicesConsumerQuotaMetricsLimitsConsumerOverridesCreateCall struct { header_ http.Header } -// Create: Creates a consumer override. -// A consumer override is applied to the consumer on its own authority -// to -// limit its own quota usage. Consumer overrides cannot be used to grant -// more -// quota than would be allowed by admin overrides, producer overrides, -// or the -// default limit of the service. +// Create: Creates a consumer override. A consumer override is applied +// to the consumer on its own authority to limit its own quota usage. +// Consumer overrides cannot be used to grant more quota than would be +// allowed by admin overrides, producer overrides, or the default limit +// of the service. func (r *ServicesConsumerQuotaMetricsLimitsConsumerOverridesService) Create(parent string, quotaoverride *QuotaOverride) *ServicesConsumerQuotaMetricsLimitsConsumerOverridesCreateCall { c := &ServicesConsumerQuotaMetricsLimitsConsumerOverridesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -8197,13 +7356,10 @@ func (r *ServicesConsumerQuotaMetricsLimitsConsumerOverridesService) Create(pare } // Force sets the optional parameter "force": Whether to force the -// creation of the quota override. -// If creating an override would cause the effective quota for the -// consumer to -// decrease by more than 10 percent, the call is rejected, as a safety -// measure -// to avoid accidentally decreasing quota too quickly. Setting the -// force +// creation of the quota override. If creating an override would cause +// the effective quota for the consumer to decrease by more than 10 +// percent, the call is rejected, as a safety measure to avoid +// accidentally decreasing quota too quickly. Setting the force // parameter to true ignores this restriction. func (c *ServicesConsumerQuotaMetricsLimitsConsumerOverridesCreateCall) Force(force bool) *ServicesConsumerQuotaMetricsLimitsConsumerOverridesCreateCall { c.urlParams_.Set("force", fmt.Sprint(force)) @@ -8237,7 +7393,7 @@ func (c *ServicesConsumerQuotaMetricsLimitsConsumerOverridesCreateCall) Header() func (c *ServicesConsumerQuotaMetricsLimitsConsumerOverridesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8301,7 +7457,7 @@ func (c *ServicesConsumerQuotaMetricsLimitsConsumerOverridesCreateCall) Do(opts } return ret, nil // { - // "description": "Creates a consumer override.\nA consumer override is applied to the consumer on its own authority to\nlimit its own quota usage. Consumer overrides cannot be used to grant more\nquota than would be allowed by admin overrides, producer overrides, or the\ndefault limit of the service.", + // "description": "Creates a consumer override. A consumer override is applied to the consumer on its own authority to limit its own quota usage. Consumer overrides cannot be used to grant more quota than would be allowed by admin overrides, producer overrides, or the default limit of the service.", // "flatPath": "v1beta1/{v1beta1Id}/{v1beta1Id1}/services/{servicesId}/consumerQuotaMetrics/{consumerQuotaMetricsId}/limits/{limitsId}/consumerOverrides", // "httpMethod": "POST", // "id": "serviceusage.services.consumerQuotaMetrics.limits.consumerOverrides.create", @@ -8310,12 +7466,12 @@ func (c *ServicesConsumerQuotaMetricsLimitsConsumerOverridesCreateCall) Do(opts // ], // "parameters": { // "force": { - // "description": "Whether to force the creation of the quota override.\nIf creating an override would cause the effective quota for the consumer to\ndecrease by more than 10 percent, the call is rejected, as a safety measure\nto avoid accidentally decreasing quota too quickly. Setting the force\nparameter to true ignores this restriction.", + // "description": "Whether to force the creation of the quota override. If creating an override would cause the effective quota for the consumer to decrease by more than 10 percent, the call is rejected, as a safety measure to avoid accidentally decreasing quota too quickly. Setting the force parameter to true ignores this restriction.", // "location": "query", // "type": "boolean" // }, // "parent": { - // "description": "The resource name of the parent quota limit, returned by a\nListConsumerQuotaMetrics or GetConsumerQuotaMetric call.\n\nAn example name would be:\n`projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion`", + // "description": "The resource name of the parent quota limit, returned by a ListConsumerQuotaMetrics or GetConsumerQuotaMetric call. An example name would be: `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion`", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+/consumerQuotaMetrics/[^/]+/limits/[^/]+$", // "required": true, @@ -8355,13 +7511,10 @@ func (r *ServicesConsumerQuotaMetricsLimitsConsumerOverridesService) Delete(name } // Force sets the optional parameter "force": Whether to force the -// deletion of the quota override. -// If deleting an override would cause the effective quota for the -// consumer to -// decrease by more than 10 percent, the call is rejected, as a safety -// measure -// to avoid accidentally decreasing quota too quickly. Setting the -// force +// deletion of the quota override. If deleting an override would cause +// the effective quota for the consumer to decrease by more than 10 +// percent, the call is rejected, as a safety measure to avoid +// accidentally decreasing quota too quickly. Setting the force // parameter to true ignores this restriction. func (c *ServicesConsumerQuotaMetricsLimitsConsumerOverridesDeleteCall) Force(force bool) *ServicesConsumerQuotaMetricsLimitsConsumerOverridesDeleteCall { c.urlParams_.Set("force", fmt.Sprint(force)) @@ -8395,7 +7548,7 @@ func (c *ServicesConsumerQuotaMetricsLimitsConsumerOverridesDeleteCall) Header() func (c *ServicesConsumerQuotaMetricsLimitsConsumerOverridesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8463,12 +7616,12 @@ func (c *ServicesConsumerQuotaMetricsLimitsConsumerOverridesDeleteCall) Do(opts // ], // "parameters": { // "force": { - // "description": "Whether to force the deletion of the quota override.\nIf deleting an override would cause the effective quota for the consumer to\ndecrease by more than 10 percent, the call is rejected, as a safety measure\nto avoid accidentally decreasing quota too quickly. Setting the force\nparameter to true ignores this restriction.", + // "description": "Whether to force the deletion of the quota override. If deleting an override would cause the effective quota for the consumer to decrease by more than 10 percent, the call is rejected, as a safety measure to avoid accidentally decreasing quota too quickly. Setting the force parameter to true ignores this restriction.", // "location": "query", // "type": "boolean" // }, // "name": { - // "description": "The resource name of the override to delete.\n\nAn example name would be:\n`projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/consumerOverrides/4a3f2c1d`", + // "description": "The resource name of the override to delete. An example name would be: `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/consumerOverrides/4a3f2c1d`", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+/consumerQuotaMetrics/[^/]+/limits/[^/]+/consumerOverrides/[^/]+$", // "required": true, @@ -8513,8 +7666,7 @@ func (c *ServicesConsumerQuotaMetricsLimitsConsumerOverridesListCall) PageSize(p } // PageToken sets the optional parameter "pageToken": Token identifying -// which result to start with; returned by a previous list -// call. +// which result to start with; returned by a previous list call. func (c *ServicesConsumerQuotaMetricsLimitsConsumerOverridesListCall) PageToken(pageToken string) *ServicesConsumerQuotaMetricsLimitsConsumerOverridesListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -8557,7 +7709,7 @@ func (c *ServicesConsumerQuotaMetricsLimitsConsumerOverridesListCall) Header() h func (c *ServicesConsumerQuotaMetricsLimitsConsumerOverridesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8634,12 +7786,12 @@ func (c *ServicesConsumerQuotaMetricsLimitsConsumerOverridesListCall) Do(opts .. // "type": "integer" // }, // "pageToken": { - // "description": "Token identifying which result to start with; returned by a previous list\ncall.", + // "description": "Token identifying which result to start with; returned by a previous list call.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "The resource name of the parent quota limit, returned by a\nListConsumerQuotaMetrics or GetConsumerQuotaMetric call.\n\nAn example name would be:\n`projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion`", + // "description": "The resource name of the parent quota limit, returned by a ListConsumerQuotaMetrics or GetConsumerQuotaMetric call. An example name would be: `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion`", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+/consumerQuotaMetrics/[^/]+/limits/[^/]+$", // "required": true, @@ -8699,22 +7851,19 @@ func (r *ServicesConsumerQuotaMetricsLimitsConsumerOverridesService) Patch(name } // Force sets the optional parameter "force": Whether to force the -// update of the quota override. -// If updating an override would cause the effective quota for the -// consumer to -// decrease by more than 10 percent, the call is rejected, as a safety -// measure -// to avoid accidentally decreasing quota too quickly. Setting the -// force -// parameter to true ignores this restriction. +// update of the quota override. If updating an override would cause the +// effective quota for the consumer to decrease by more than 10 percent, +// the call is rejected, as a safety measure to avoid accidentally +// decreasing quota too quickly. Setting the force parameter to true +// ignores this restriction. func (c *ServicesConsumerQuotaMetricsLimitsConsumerOverridesPatchCall) Force(force bool) *ServicesConsumerQuotaMetricsLimitsConsumerOverridesPatchCall { c.urlParams_.Set("force", fmt.Sprint(force)) return c } // UpdateMask sets the optional parameter "updateMask": Update only the -// specified fields of the override. -// If unset, all fields will be updated. +// specified fields of the override. If unset, all fields will be +// updated. func (c *ServicesConsumerQuotaMetricsLimitsConsumerOverridesPatchCall) UpdateMask(updateMask string) *ServicesConsumerQuotaMetricsLimitsConsumerOverridesPatchCall { c.urlParams_.Set("updateMask", updateMask) return c @@ -8747,7 +7896,7 @@ func (c *ServicesConsumerQuotaMetricsLimitsConsumerOverridesPatchCall) Header() func (c *ServicesConsumerQuotaMetricsLimitsConsumerOverridesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8820,19 +7969,19 @@ func (c *ServicesConsumerQuotaMetricsLimitsConsumerOverridesPatchCall) Do(opts . // ], // "parameters": { // "force": { - // "description": "Whether to force the update of the quota override.\nIf updating an override would cause the effective quota for the consumer to\ndecrease by more than 10 percent, the call is rejected, as a safety measure\nto avoid accidentally decreasing quota too quickly. Setting the force\nparameter to true ignores this restriction.", + // "description": "Whether to force the update of the quota override. If updating an override would cause the effective quota for the consumer to decrease by more than 10 percent, the call is rejected, as a safety measure to avoid accidentally decreasing quota too quickly. Setting the force parameter to true ignores this restriction.", // "location": "query", // "type": "boolean" // }, // "name": { - // "description": "The resource name of the override to update.\n\nAn example name would be:\n`projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/consumerOverrides/4a3f2c1d`", + // "description": "The resource name of the override to update. An example name would be: `projects/123/services/compute.googleapis.com/consumerQuotaMetrics/compute.googleapis.com%2Fcpus/limits/%2Fproject%2Fregion/consumerOverrides/4a3f2c1d`", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+/consumerQuotaMetrics/[^/]+/limits/[^/]+/consumerOverrides/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { - // "description": "Update only the specified fields of the override.\nIf unset, all fields will be updated.", + // "description": "Update only the specified fields of the override. If unset, all fields will be updated.", // "format": "google-fieldmask", // "location": "query", // "type": "string" diff --git a/vendor/google.golang.org/api/sourcerepo/v1/sourcerepo-api.json b/vendor/google.golang.org/api/sourcerepo/v1/sourcerepo-api.json index 41b8aa950b6..d28ae7e5204 100644 --- a/vendor/google.golang.org/api/sourcerepo/v1/sourcerepo-api.json +++ b/vendor/google.golang.org/api/sourcerepo/v1/sourcerepo-api.json @@ -126,7 +126,7 @@ ], "parameters": { "name": { - "description": "The name of the requested project. Values are of the form\n`projects/\u003cproject\u003e`.", + "description": "The name of the requested project. Values are of the form `projects/`.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -151,7 +151,7 @@ ], "parameters": { "name": { - "description": "The name of the requested project. Values are of the form\n`projects/\u003cproject\u003e`.", + "description": "The name of the requested project. Values are of the form `projects/`.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -174,7 +174,7 @@ "repos": { "methods": { "create": { - "description": "Creates a repo in the given project with the given name.\n\nIf the named repository already exists, `CreateRepo` returns\n`ALREADY_EXISTS`.", + "description": "Creates a repo in the given project with the given name. If the named repository already exists, `CreateRepo` returns `ALREADY_EXISTS`.", "flatPath": "v1/projects/{projectsId}/repos", "httpMethod": "POST", "id": "sourcerepo.projects.repos.create", @@ -183,7 +183,7 @@ ], "parameters": { "parent": { - "description": "The project in which to create the repo. Values are of the form\n`projects/\u003cproject\u003e`.", + "description": "The project in which to create the repo. Values are of the form `projects/`.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -212,7 +212,7 @@ ], "parameters": { "name": { - "description": "The name of the repo to delete. Values are of the form\n`projects/\u003cproject\u003e/repos/\u003crepo\u003e`.", + "description": "The name of the repo to delete. Values are of the form `projects//repos/`.", "location": "path", "pattern": "^projects/[^/]+/repos/.*$", "required": true, @@ -238,7 +238,7 @@ ], "parameters": { "name": { - "description": "The name of the requested repository. Values are of the form\n`projects/\u003cproject\u003e/repos/\u003crepo\u003e`.", + "description": "The name of the requested repository. Values are of the form `projects//repos/`.", "location": "path", "pattern": "^projects/[^/]+/repos/.*$", "required": true, @@ -257,7 +257,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", "flatPath": "v1/projects/{projectsId}/repos/{reposId}:getIamPolicy", "httpMethod": "GET", "id": "sourcerepo.projects.repos.getIamPolicy", @@ -266,13 +266,13 @@ ], "parameters": { "options.requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.", + "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "location": "query", "type": "integer" }, "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/repos/.*$", "required": true, @@ -291,7 +291,7 @@ ] }, "list": { - "description": "Returns all repos belonging to a project. The sizes of the repos are\nnot set by ListRepos. To get the size of a repo, use GetRepo.", + "description": "Returns all repos belonging to a project. The sizes of the repos are not set by ListRepos. To get the size of a repo, use GetRepo.", "flatPath": "v1/projects/{projectsId}/repos", "httpMethod": "GET", "id": "sourcerepo.projects.repos.list", @@ -300,20 +300,20 @@ ], "parameters": { "name": { - "description": "The project ID whose repos should be listed. Values are of the form\n`projects/\u003cproject\u003e`.", + "description": "The project ID whose repos should be listed. Values are of the form `projects/`.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, "type": "string" }, "pageSize": { - "description": "Maximum number of repositories to return; between 1 and 500.\nIf not set or zero, defaults to 100 at the server.", + "description": "Maximum number of repositories to return; between 1 and 500. If not set or zero, defaults to 100 at the server.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Resume listing repositories where a prior ListReposResponse\nleft off. This is an opaque token that must be obtained from\na recent, prior ListReposResponse's next_page_token field.", + "description": "Resume listing repositories where a prior ListReposResponse left off. This is an opaque token that must be obtained from a recent, prior ListReposResponse's next_page_token field.", "location": "query", "type": "string" } @@ -339,7 +339,7 @@ ], "parameters": { "name": { - "description": "The name of the requested repository. Values are of the form\n`projects/\u003cproject\u003e/repos/\u003crepo\u003e`.", + "description": "The name of the requested repository. Values are of the form `projects//repos/`.", "location": "path", "pattern": "^projects/[^/]+/repos/.*$", "required": true, @@ -358,7 +358,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", "flatPath": "v1/projects/{projectsId}/repos/{reposId}:setIamPolicy", "httpMethod": "POST", "id": "sourcerepo.projects.repos.setIamPolicy", @@ -367,7 +367,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/repos/.*$", "required": true, @@ -387,7 +387,7 @@ ] }, "sync": { - "description": "Synchronize a connected repo.\n\nThe response contains SyncRepoMetadata in the metadata field.", + "description": "Synchronize a connected repo. The response contains SyncRepoMetadata in the metadata field.", "flatPath": "v1/projects/{projectsId}/repos/{reposId}:sync", "httpMethod": "POST", "id": "sourcerepo.projects.repos.sync", @@ -396,7 +396,7 @@ ], "parameters": { "name": { - "description": "The name of the repo to synchronize. Values are of the form\n`projects/\u003cproject\u003e/repos/\u003crepo\u003e`.", + "description": "The name of the repo to synchronize. Values are of the form `projects//repos/`.", "location": "path", "pattern": "^projects/[^/]+/repos/.*$", "required": true, @@ -415,7 +415,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.", "flatPath": "v1/projects/{projectsId}/repos/{reposId}:testIamPermissions", "httpMethod": "POST", "id": "sourcerepo.projects.repos.testIamPermissions", @@ -424,7 +424,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", "location": "path", "pattern": "^projects/[^/]+/repos/.*$", "required": true, @@ -450,11 +450,11 @@ } } }, - "revision": "20200415", + "revision": "20200914", "rootUrl": "https://sourcerepo.googleapis.com/", "schemas": { "AuditConfig": { - "description": "Specifies the audit configuration for a service.\nThe configuration determines which permission types are logged, and what\nidentities, if any, are exempted from logging.\nAn AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service,\nthe union of the two AuditConfigs is used for that service: the log_types\nspecified in each AuditConfig are enabled, and the exempted_members in each\nAuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n {\n \"audit_configs\": [\n {\n \"service\": \"allServices\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n },\n {\n \"log_type\": \"ADMIN_READ\",\n }\n ]\n },\n {\n \"service\": \"sampleservice.googleapis.com\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n },\n {\n \"log_type\": \"DATA_WRITE\",\n \"exempted_members\": [\n \"user:aliya@example.com\"\n ]\n }\n ]\n }\n ]\n }\n\nFor sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ\nlogging. It also exempts jose@example.com from DATA_READ logging, and\naliya@example.com from DATA_WRITE logging.", + "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { \"audit_configs\": [ { \"service\": \"allServices\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" }, { \"log_type\": \"ADMIN_READ\" } ] }, { \"service\": \"sampleservice.googleapis.com\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\" }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging.", "id": "AuditConfig", "properties": { "auditLogConfigs": { @@ -465,18 +465,18 @@ "type": "array" }, "service": { - "description": "Specifies a service that will be enabled for audit logging.\nFor example, `storage.googleapis.com`, `cloudsql.googleapis.com`.\n`allServices` is a special value that covers all services.", + "description": "Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.", "type": "string" } }, "type": "object" }, "AuditLogConfig": { - "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\njose@example.com from DATA_READ logging.", + "description": "Provides the configuration for logging a type of permissions. Example: { \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.", "id": "AuditLogConfig", "properties": { "exemptedMembers": { - "description": "Specifies the identities that do not cause logging for this type of\npermission.\nFollows the same format of Binding.members.", + "description": "Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.", "items": { "type": "string" }, @@ -507,57 +507,57 @@ "properties": { "condition": { "$ref": "Expr", - "description": "The condition that is associated with this binding.\nNOTE: An unsatisfied condition will not allow user access via current\nbinding. Different bindings, including their conditions, are examined\nindependently." + "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the members in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." }, "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@example.com` .\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a user that has been recently deleted. For\n example, `alice@example.com?uid=123456789012345678901`. If the user is\n recovered, this value reverts to `user:{emailid}` and the recovered user\n retains the role in the binding.\n\n* `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus\n unique identifier) representing a service account that has been recently\n deleted. For example,\n `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.\n If the service account is undeleted, this value reverts to\n `serviceAccount:{emailid}` and the undeleted service account retains the\n role in the binding.\n\n* `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a Google group that has been recently\n deleted. For example, `admins@example.com?uid=123456789012345678901`. If\n the group is recovered, this value reverts to `group:{emailid}` and the\n recovered group retains the role in the binding.\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", "items": { "type": "string" }, "type": "array" }, "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.", + "description": "Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.", "type": "string" } }, "type": "object" }, "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", "properties": {}, "type": "object" }, "Expr": { - "description": "Represents a textual expression in the Common Expression Language (CEL)\nsyntax. CEL is a C-like expression language. The syntax and semantics of CEL\nare documented at https://github.com/google/cel-spec.\n\nExample (Comparison):\n\n title: \"Summary size limit\"\n description: \"Determines if a summary is less than 100 chars\"\n expression: \"document.summary.size() \u003c 100\"\n\nExample (Equality):\n\n title: \"Requestor is owner\"\n description: \"Determines if requestor is the document owner\"\n expression: \"document.owner == request.auth.claims.email\"\n\nExample (Logic):\n\n title: \"Public documents\"\n description: \"Determine whether the document should be publicly visible\"\n expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\"\n\nExample (Data Manipulation):\n\n title: \"Notification string\"\n description: \"Create a notification string with a timestamp.\"\n expression: \"'New message received at ' + string(document.create_time)\"\n\nThe exact variables and functions that may be referenced within an expression\nare determined by the service that evaluates it. See the service\ndocumentation for additional information.", + "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() \u003c 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", "id": "Expr", "properties": { "description": { - "description": "Optional. Description of the expression. This is a longer text which\ndescribes the expression, e.g. when hovered over it in a UI.", + "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", "type": "string" }, "expression": { - "description": "Textual representation of an expression in Common Expression Language\nsyntax.", + "description": "Textual representation of an expression in Common Expression Language syntax.", "type": "string" }, "location": { - "description": "Optional. String indicating the location of the expression for error\nreporting, e.g. a file name and a position in the file.", + "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", "type": "string" }, "title": { - "description": "Optional. Title for the expression, i.e. a short string describing\nits purpose. This can be used e.g. in UIs which allow to enter the\nexpression.", + "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", "type": "string" } }, "type": "object" }, "ListReposResponse": { - "description": "Response for ListRepos. The size is not set in the returned repositories.", + "description": "Response for ListRepos. The size is not set in the returned repositories.", "id": "ListReposResponse", "properties": { "nextPageToken": { - "description": "If non-empty, additional repositories exist within the project. These\ncan be retrieved by including this value in the next ListReposRequest's\npage_token field.", + "description": "If non-empty, additional repositories exist within the project. These can be retrieved by including this value in the next ListReposRequest's page_token field.", "type": "string" }, "repos": { @@ -571,11 +571,11 @@ "type": "object" }, "MirrorConfig": { - "description": "Configuration to automatically mirror a repository from another\nhosting service, for example GitHub or Bitbucket.", + "description": "Configuration to automatically mirror a repository from another hosting service, for example GitHub or Bitbucket.", "id": "MirrorConfig", "properties": { "deployKeyId": { - "description": "ID of the SSH deploy key at the other hosting service.\nRemoving this key from the other service would deauthorize\nGoogle Cloud Source Repositories from mirroring.", + "description": "ID of the SSH deploy key at the other hosting service. Removing this key from the other service would deauthorize Google Cloud Source Repositories from mirroring.", "type": "string" }, "url": { @@ -583,18 +583,18 @@ "type": "string" }, "webhookId": { - "description": "ID of the webhook listening to updates to trigger mirroring.\nRemoving this webhook from the other hosting service will stop\nGoogle Cloud Source Repositories from receiving notifications,\nand thereby disabling mirroring.", + "description": "ID of the webhook listening to updates to trigger mirroring. Removing this webhook from the other hosting service will stop Google Cloud Source Repositories from receiving notifications, and thereby disabling mirroring.", "type": "string" } }, "type": "object" }, "Operation": { - "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "description": "This resource represents a long-running operation that is the result of a network API call.", "id": "Operation", "properties": { "done": { - "description": "If the value is `false`, it means the operation is still in progress.\nIf `true`, the operation is completed, and either `error` or `response` is\navailable.", + "description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.", "type": "boolean" }, "error": { @@ -606,11 +606,11 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", "type": "object" }, "name": { - "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should be a resource name ending with `operations/{unique_id}`.", + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.", "type": "string" }, "response": { @@ -618,14 +618,14 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", + "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", "type": "object" } }, "type": "object" }, "Policy": { - "description": "An Identity and Access Management (IAM) policy, which specifies access\ncontrols for Google Cloud resources.\n\n\nA `Policy` is a collection of `bindings`. A `binding` binds one or more\n`members` to a single `role`. Members can be user accounts, service accounts,\nGoogle groups, and domains (such as G Suite). A `role` is a named list of\npermissions; each `role` can be an IAM predefined role or a user-created\ncustom role.\n\nOptionally, a `binding` can specify a `condition`, which is a logical\nexpression that allows access to a resource only if the expression evaluates\nto `true`. A condition can add constraints based on attributes of the\nrequest, the resource, or both.\n\n**JSON example:**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/resourcemanager.organizationAdmin\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-project-id@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles/resourcemanager.organizationViewer\",\n \"members\": [\"user:eve@example.com\"],\n \"condition\": {\n \"title\": \"expirable access\",\n \"description\": \"Does not grant access after Sep 2020\",\n \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\",\n }\n }\n ],\n \"etag\": \"BwWWja0YfJA=\",\n \"version\": 3\n }\n\n**YAML example:**\n\n bindings:\n - members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-project-id@appspot.gserviceaccount.com\n role: roles/resourcemanager.organizationAdmin\n - members:\n - user:eve@example.com\n role: roles/resourcemanager.organizationViewer\n condition:\n title: expirable access\n description: Does not grant access after Sep 2020\n expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\n - etag: BwWWja0YfJA=\n - version: 3\n\nFor a description of IAM and its features, see the\n[IAM documentation](https://cloud.google.com/iam/docs/).", + "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } **YAML example:** bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", "id": "Policy", "properties": { "auditConfigs": { @@ -636,19 +636,19 @@ "type": "array" }, "bindings": { - "description": "Associates a list of `members` to a `role`. Optionally, may specify a\n`condition` that determines how and when the `bindings` are applied. Each\nof the `bindings` must contain at least one member.", + "description": "Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member.", "items": { "$ref": "Binding" }, "type": "array" }, "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.", + "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.", "format": "byte", "type": "string" }, "version": { - "description": "Specifies the format of the policy.\n\nValid values are `0`, `1`, and `3`. Requests that specify an invalid value\nare rejected.\n\nAny operation that affects conditional role bindings must specify version\n`3`. This requirement applies to the following operations:\n\n* Getting a policy that includes a conditional role binding\n* Adding a conditional role binding to a policy\n* Changing a conditional role binding in a policy\n* Removing any role binding, with or without a condition, from a policy\n that includes conditions\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.\n\nIf a policy does not include any conditions, operations on that policy may\nspecify any valid version or leave the field unset.", + "description": "Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } @@ -664,14 +664,14 @@ "type": "boolean" }, "name": { - "description": "The name of the project. Values are of the form `projects/\u003cproject\u003e`.", + "description": "The name of the project. Values are of the form `projects/`.", "type": "string" }, "pubsubConfigs": { "additionalProperties": { "$ref": "PubsubConfig" }, - "description": "How this project publishes a change in the repositories through Cloud\nPub/Sub. Keyed by the topic names.", + "description": "How this project publishes a change in the repositories through Cloud Pub/Sub. Keyed by the topic names.", "type": "object" } }, @@ -696,11 +696,11 @@ "type": "string" }, "serviceAccountEmail": { - "description": "Email address of the service account used for publishing Cloud Pub/Sub\nmessages. This service account needs to be in the same project as the\nPubsubConfig. When added, the caller needs to have\niam.serviceAccounts.actAs permission on this service account. If\nunspecified, it defaults to the compute engine default service account.", + "description": "Email address of the service account used for publishing Cloud Pub/Sub messages. This service account needs to be in the same project as the PubsubConfig. When added, the caller needs to have iam.serviceAccounts.actAs permission on this service account. If unspecified, it defaults to the compute engine default service account.", "type": "string" }, "topic": { - "description": "A topic of Cloud Pub/Sub. Values are of the form\n`projects/\u003cproject\u003e/topics/\u003ctopic\u003e`. The project needs to be the same\nproject as this config is in.", + "description": "A topic of Cloud Pub/Sub. Values are of the form `projects//topics/`. The project needs to be the same project as this config is in.", "type": "string" } }, @@ -712,26 +712,26 @@ "properties": { "mirrorConfig": { "$ref": "MirrorConfig", - "description": "How this repository mirrors a repository managed by another service.\nRead-only field." + "description": "How this repository mirrors a repository managed by another service. Read-only field." }, "name": { - "description": "Resource name of the repository, of the form\n`projects/\u003cproject\u003e/repos/\u003crepo\u003e`. The repo name may contain slashes.\neg, `projects/myproject/repos/name/with/slash`", + "description": "Resource name of the repository, of the form `projects//repos/`. The repo name may contain slashes. eg, `projects/myproject/repos/name/with/slash`", "type": "string" }, "pubsubConfigs": { "additionalProperties": { "$ref": "PubsubConfig" }, - "description": "How this repository publishes a change in the repository through Cloud\nPub/Sub. Keyed by the topic names.", + "description": "How this repository publishes a change in the repository through Cloud Pub/Sub. Keyed by the topic names.", "type": "object" }, "size": { - "description": "The disk usage of the repo, in bytes. Read-only field. Size is only\nreturned by GetRepo.", + "description": "The disk usage of the repo, in bytes. Read-only field. Size is only returned by GetRepo.", "format": "int64", "type": "string" }, "url": { - "description": "URL to clone the repository from Google Cloud Source Repositories.\nRead-only field.", + "description": "URL to clone the repository from Google Cloud Source Repositories. Read-only field.", "type": "string" } }, @@ -743,10 +743,10 @@ "properties": { "policy": { "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them." }, "updateMask": { - "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only\nthe fields in the mask will be modified. If no mask is provided, the\nfollowing default mask is used:\n\n`paths: \"bindings, etag\"`", + "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only the fields in the mask will be modified. If no mask is provided, the following default mask is used: `paths: \"bindings, etag\"`", "format": "google-fieldmask", "type": "string" } @@ -754,7 +754,7 @@ "type": "object" }, "Status": { - "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors).", + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", "id": "Status", "properties": { "code": { @@ -763,7 +763,7 @@ "type": "integer" }, "details": { - "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use.", + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", "items": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -774,18 +774,18 @@ "type": "array" }, "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", "type": "string" } }, "type": "object" }, "SyncRepoMetadata": { - "description": "Metadata of SyncRepo.\n\nThis message is in the metadata field of Operation.", + "description": "Metadata of SyncRepo. This message is in the metadata field of Operation.", "id": "SyncRepoMetadata", "properties": { "name": { - "description": "The name of the repo being synchronized. Values are of the form\n`projects/\u003cproject\u003e/repos/\u003crepo\u003e`.", + "description": "The name of the repo being synchronized. Values are of the form `projects//repos/`.", "type": "string" }, "startTime": { @@ -816,7 +816,7 @@ "id": "TestIamPermissionsRequest", "properties": { "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "description": "The set of permissions to check for the `resource`. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", "items": { "type": "string" }, @@ -830,7 +830,7 @@ "id": "TestIamPermissionsResponse", "properties": { "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", "items": { "type": "string" }, @@ -848,7 +848,7 @@ "description": "The new configuration for the project." }, "updateMask": { - "description": "A FieldMask specifying which fields of the project_config to modify. Only\nthe fields in the mask will be modified. If no mask is provided, this\nrequest is no-op.", + "description": "A FieldMask specifying which fields of the project_config to modify. Only the fields in the mask will be modified. If no mask is provided, this request is no-op.", "format": "google-fieldmask", "type": "string" } @@ -864,7 +864,7 @@ "description": "The new configuration for the repository." }, "updateMask": { - "description": "A FieldMask specifying which fields of the repo to modify. Only the fields\nin the mask will be modified. If no mask is provided, this request is\nno-op.", + "description": "A FieldMask specifying which fields of the repo to modify. Only the fields in the mask will be modified. If no mask is provided, this request is no-op.", "format": "google-fieldmask", "type": "string" } diff --git a/vendor/google.golang.org/api/sourcerepo/v1/sourcerepo-gen.go b/vendor/google.golang.org/api/sourcerepo/v1/sourcerepo-gen.go index ee022edd0b8..ed6e9333563 100644 --- a/vendor/google.golang.org/api/sourcerepo/v1/sourcerepo-gen.go +++ b/vendor/google.golang.org/api/sourcerepo/v1/sourcerepo-gen.go @@ -79,6 +79,7 @@ const apiId = "sourcerepo:v1" const apiName = "sourcerepo" const apiVersion = "v1" const basePath = "https://sourcerepo.googleapis.com/" +const mtlsBasePath = "https://sourcerepo.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -106,6 +107,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -170,72 +172,31 @@ type ProjectsReposService struct { s *Service } -// AuditConfig: Specifies the audit configuration for a service. -// The configuration determines which permission types are logged, and -// what -// identities, if any, are exempted from logging. -// An AuditConfig must have one or more AuditLogConfigs. -// -// If there are AuditConfigs for both `allServices` and a specific -// service, -// the union of the two AuditConfigs is used for that service: the -// log_types -// specified in each AuditConfig are enabled, and the exempted_members -// in each -// AuditLogConfig are exempted. -// -// Example Policy with multiple AuditConfigs: -// -// { -// "audit_configs": [ -// { -// "service": "allServices" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// }, -// { -// "log_type": "ADMIN_READ", -// } -// ] -// }, -// { -// "service": "sampleservice.googleapis.com" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// }, -// { -// "log_type": "DATA_WRITE", -// "exempted_members": [ -// "user:aliya@example.com" -// ] -// } -// ] -// } -// ] -// } -// -// For sampleservice, this policy enables DATA_READ, DATA_WRITE and -// ADMIN_READ -// logging. It also exempts jose@example.com from DATA_READ logging, -// and -// aliya@example.com from DATA_WRITE logging. +// AuditConfig: Specifies the audit configuration for a service. The +// configuration determines which permission types are logged, and what +// identities, if any, are exempted from logging. An AuditConfig must +// have one or more AuditLogConfigs. If there are AuditConfigs for both +// `allServices` and a specific service, the union of the two +// AuditConfigs is used for that service: the log_types specified in +// each AuditConfig are enabled, and the exempted_members in each +// AuditLogConfig are exempted. Example Policy with multiple +// AuditConfigs: { "audit_configs": [ { "service": "allServices", +// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": +// [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { +// "log_type": "ADMIN_READ" } ] }, { "service": +// "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": +// "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ +// "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy +// enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts +// jose@example.com from DATA_READ logging, and aliya@example.com from +// DATA_WRITE logging. type AuditConfig struct { // AuditLogConfigs: The configuration for logging of each type of // permission. AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"` - // Service: Specifies a service that will be enabled for audit - // logging. - // For example, `storage.googleapis.com`, - // `cloudsql.googleapis.com`. + // Service: Specifies a service that will be enabled for audit logging. + // For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. // `allServices` is a special value that covers all services. Service string `json:"service,omitempty"` @@ -264,31 +225,15 @@ func (s *AuditConfig) MarshalJSON() ([]byte, error) { } // AuditLogConfig: Provides the configuration for logging a type of -// permissions. -// Example: -// -// { -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// } -// ] -// } -// -// This enables 'DATA_READ' and 'DATA_WRITE' logging, while -// exempting -// jose@example.com from DATA_READ logging. +// permissions. Example: { "audit_log_configs": [ { "log_type": +// "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { +// "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and +// 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ +// logging. type AuditLogConfig struct { // ExemptedMembers: Specifies the identities that do not cause logging - // for this type of - // permission. - // Follows the same format of Binding.members. + // for this type of permission. Follows the same format of + // Binding.members. ExemptedMembers []string `json:"exemptedMembers,omitempty"` // LogType: The log type that this config enables. @@ -326,83 +271,53 @@ func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { // Binding: Associates `members` with a `role`. type Binding struct { - // Condition: The condition that is associated with this binding. - // NOTE: An unsatisfied condition will not allow user access via - // current - // binding. Different bindings, including their conditions, are - // examined - // independently. + // Condition: The condition that is associated with this binding. If the + // condition evaluates to `true`, then this binding applies to the + // current request. If the condition evaluates to `false`, then this + // binding does not apply to the current request. However, a different + // role binding might grant the same role to one or more of the members + // in this binding. To learn which resources support conditions in their + // IAM policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). Condition *Expr `json:"condition,omitempty"` // Members: Specifies the identities requesting access for a Cloud - // Platform resource. - // `members` can have the following values: - // - // * `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. - // - // * `allAuthenticatedUsers`: A special identifier that represents - // anyone - // who is authenticated with a Google account or a service - // account. - // - // * `user:{emailid}`: An email address that represents a specific - // Google - // account. For example, `alice@example.com` . - // - // - // * `serviceAccount:{emailid}`: An email address that represents a - // service - // account. For example, - // `my-other-app@appspot.gserviceaccount.com`. - // - // * `group:{emailid}`: An email address that represents a Google - // group. - // For example, `admins@example.com`. - // - // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a user that has been recently deleted. - // For - // example, `alice@example.com?uid=123456789012345678901`. If the - // user is - // recovered, this value reverts to `user:{emailid}` and the - // recovered user - // retains the role in the binding. - // - // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address - // (plus - // unique identifier) representing a service account that has been - // recently - // deleted. For example, - // + // Platform resource. `members` can have the following values: * + // `allUsers`: A special identifier that represents anyone who is on the + // internet; with or without a Google account. * + // `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. * + // `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . * + // `serviceAccount:{emailid}`: An email address that represents a + // service account. For example, + // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An + // email address that represents a Google group. For example, + // `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An + // email address (plus unique identifier) representing a user that has + // been recently deleted. For example, + // `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered + // user retains the role in the binding. * + // `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address + // (plus unique identifier) representing a service account that has been + // recently deleted. For example, // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account - // retains the - // role in the binding. - // - // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a Google group that has been recently - // deleted. For example, - // `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` - // and the - // recovered group retains the role in the binding. - // - // - // * `domain:{domain}`: The G Suite domain (primary) that represents all - // the - // users of that domain. For example, `google.com` or - // `example.com`. - // - // + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains + // the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: + // An email address (plus unique identifier) representing a Google group + // that has been recently deleted. For example, + // `admins@example.com?uid=123456789012345678901`. If the group is + // recovered, this value reverts to `group:{emailid}` and the recovered + // group retains the role in the binding. * `domain:{domain}`: The G + // Suite domain (primary) that represents all the users of that domain. + // For example, `google.com` or `example.com`. Members []string `json:"members,omitempty"` - // Role: Role that is assigned to `members`. - // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + // Role: Role that is assigned to `members`. For example, + // `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `json:"role,omitempty"` // ForceSendFields is a list of field names (e.g. "Condition") to @@ -429,17 +344,11 @@ func (s *Binding) MarshalJSON() ([]byte, error) { } // Empty: A generic empty message that you can re-use to avoid defining -// duplicated -// empty messages in your APIs. A typical example is to use it as the -// request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. +// duplicated empty messages in your APIs. A typical example is to use +// it as the request or the response type of an API method. For +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } The JSON representation for `Empty` is +// empty JSON object `{}`. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -447,65 +356,40 @@ type Empty struct { } // Expr: Represents a textual expression in the Common Expression -// Language (CEL) -// syntax. CEL is a C-like expression language. The syntax and semantics -// of CEL -// are documented at https://github.com/google/cel-spec. -// -// Example (Comparison): -// -// title: "Summary size limit" -// description: "Determines if a summary is less than 100 chars" -// expression: "document.summary.size() < 100" -// -// Example (Equality): -// -// title: "Requestor is owner" -// description: "Determines if requestor is the document owner" -// expression: "document.owner == -// request.auth.claims.email" -// -// Example (Logic): -// -// title: "Public documents" -// description: "Determine whether the document should be publicly -// visible" -// expression: "document.type != 'private' && document.type != -// 'internal'" -// -// Example (Data Manipulation): -// -// title: "Notification string" -// description: "Create a notification string with a timestamp." -// expression: "'New message received at ' + -// string(document.create_time)" -// -// The exact variables and functions that may be referenced within an -// expression -// are determined by the service that evaluates it. See the -// service -// documentation for additional information. +// Language (CEL) syntax. CEL is a C-like expression language. The +// syntax and semantics of CEL are documented at +// https://github.com/google/cel-spec. Example (Comparison): title: +// "Summary size limit" description: "Determines if a summary is less +// than 100 chars" expression: "document.summary.size() < 100" Example +// (Equality): title: "Requestor is owner" description: "Determines if +// requestor is the document owner" expression: "document.owner == +// request.auth.claims.email" Example (Logic): title: "Public documents" +// description: "Determine whether the document should be publicly +// visible" expression: "document.type != 'private' && document.type != +// 'internal'" Example (Data Manipulation): title: "Notification string" +// description: "Create a notification string with a timestamp." +// expression: "'New message received at ' + +// string(document.create_time)" The exact variables and functions that +// may be referenced within an expression are determined by the service +// that evaluates it. See the service documentation for additional +// information. type Expr struct { // Description: Optional. Description of the expression. This is a - // longer text which - // describes the expression, e.g. when hovered over it in a UI. + // longer text which describes the expression, e.g. when hovered over it + // in a UI. Description string `json:"description,omitempty"` // Expression: Textual representation of an expression in Common - // Expression Language - // syntax. + // Expression Language syntax. Expression string `json:"expression,omitempty"` // Location: Optional. String indicating the location of the expression - // for error - // reporting, e.g. a file name and a position in the file. + // for error reporting, e.g. a file name and a position in the file. Location string `json:"location,omitempty"` // Title: Optional. Title for the expression, i.e. a short string - // describing - // its purpose. This can be used e.g. in UIs which allow to enter - // the - // expression. + // describing its purpose. This can be used e.g. in UIs which allow to + // enter the expression. Title string `json:"title,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -531,14 +415,12 @@ func (s *Expr) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ListReposResponse: Response for ListRepos. The size is not set in -// the returned repositories. +// ListReposResponse: Response for ListRepos. The size is not set in the +// returned repositories. type ListReposResponse struct { // NextPageToken: If non-empty, additional repositories exist within the - // project. These - // can be retrieved by including this value in the next - // ListReposRequest's - // page_token field. + // project. These can be retrieved by including this value in the next + // ListReposRequest's page_token field. NextPageToken string `json:"nextPageToken,omitempty"` // Repos: The listed repos. @@ -572,22 +454,19 @@ func (s *ListReposResponse) MarshalJSON() ([]byte, error) { } // MirrorConfig: Configuration to automatically mirror a repository from -// another -// hosting service, for example GitHub or Bitbucket. +// another hosting service, for example GitHub or Bitbucket. type MirrorConfig struct { - // DeployKeyId: ID of the SSH deploy key at the other hosting - // service. - // Removing this key from the other service would deauthorize - // Google Cloud Source Repositories from mirroring. + // DeployKeyId: ID of the SSH deploy key at the other hosting service. + // Removing this key from the other service would deauthorize Google + // Cloud Source Repositories from mirroring. DeployKeyId string `json:"deployKeyId,omitempty"` // Url: URL of the main repository at the other hosting service. Url string `json:"url,omitempty"` // WebhookId: ID of the webhook listening to updates to trigger - // mirroring. - // Removing this webhook from the other hosting service will stop - // Google Cloud Source Repositories from receiving notifications, + // mirroring. Removing this webhook from the other hosting service will + // stop Google Cloud Source Repositories from receiving notifications, // and thereby disabling mirroring. WebhookId string `json:"webhookId,omitempty"` @@ -615,52 +494,38 @@ func (s *MirrorConfig) MarshalJSON() ([]byte, error) { } // Operation: This resource represents a long-running operation that is -// the result of a -// network API call. +// the result of a network API call. type Operation struct { // Done: If the value is `false`, it means the operation is still in - // progress. - // If `true`, the operation is completed, and either `error` or - // `response` is - // available. + // progress. If `true`, the operation is completed, and either `error` + // or `response` is available. Done bool `json:"done,omitempty"` // Error: The error result of the operation in case of failure or // cancellation. Error *Status `json:"error,omitempty"` - // Metadata: Service-specific metadata associated with the operation. - // It typically - // contains progress information and common metadata such as create - // time. - // Some services might not provide such metadata. Any method that - // returns a - // long-running operation should document the metadata type, if any. + // Metadata: Service-specific metadata associated with the operation. It + // typically contains progress information and common metadata such as + // create time. Some services might not provide such metadata. Any + // method that returns a long-running operation should document the + // metadata type, if any. Metadata googleapi.RawMessage `json:"metadata,omitempty"` // Name: The server-assigned name, which is only unique within the same - // service that - // originally returns it. If you use the default HTTP mapping, - // the - // `name` should be a resource name ending with + // service that originally returns it. If you use the default HTTP + // mapping, the `name` should be a resource name ending with // `operations/{unique_id}`. Name string `json:"name,omitempty"` - // Response: The normal response of the operation in case of success. - // If the original - // method returns no data on success, such as `Delete`, the response - // is - // `google.protobuf.Empty`. If the original method is - // standard - // `Get`/`Create`/`Update`, the response should be the resource. For - // other - // methods, the response should have the type `XxxResponse`, where - // `Xxx` - // is the original method name. For example, if the original method - // name - // is `TakeSnapshot()`, the inferred response type - // is - // `TakeSnapshotResponse`. + // Response: The normal response of the operation in case of success. If + // the original method returns no data on success, such as `Delete`, the + // response is `google.protobuf.Empty`. If the original method is + // standard `Get`/`Create`/`Update`, the response should be the + // resource. For other methods, the response should have the type + // `XxxResponse`, where `Xxx` is the original method name. For example, + // if the original method name is `TakeSnapshot()`, the inferred + // response type is `TakeSnapshotResponse`. Response googleapi.RawMessage `json:"response,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -691,143 +556,79 @@ func (s *Operation) MarshalJSON() ([]byte, error) { } // Policy: An Identity and Access Management (IAM) policy, which -// specifies access -// controls for Google Cloud resources. -// -// -// A `Policy` is a collection of `bindings`. A `binding` binds one or -// more -// `members` to a single `role`. Members can be user accounts, service -// accounts, +// specifies access controls for Google Cloud resources. A `Policy` is a +// collection of `bindings`. A `binding` binds one or more `members` to +// a single `role`. Members can be user accounts, service accounts, // Google groups, and domains (such as G Suite). A `role` is a named -// list of -// permissions; each `role` can be an IAM predefined role or a -// user-created -// custom role. -// -// Optionally, a `binding` can specify a `condition`, which is a -// logical +// list of permissions; each `role` can be an IAM predefined role or a +// user-created custom role. For some types of Google Cloud resources, a +// `binding` can also specify a `condition`, which is a logical // expression that allows access to a resource only if the expression -// evaluates -// to `true`. A condition can add constraints based on attributes of -// the -// request, the resource, or both. -// -// **JSON example:** -// -// { -// "bindings": [ -// { -// "role": "roles/resourcemanager.organizationAdmin", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" -// ] -// }, -// { -// "role": "roles/resourcemanager.organizationViewer", -// "members": ["user:eve@example.com"], -// "condition": { -// "title": "expirable access", -// "description": "Does not grant access after Sep 2020", -// "expression": "request.time < -// timestamp('2020-10-01T00:00:00.000Z')", -// } -// } -// ], -// "etag": "BwWWja0YfJA=", -// "version": 3 -// } -// -// **YAML example:** -// -// bindings: -// - members: -// - user:mike@example.com -// - group:admins@example.com -// - domain:google.com -// - serviceAccount:my-project-id@appspot.gserviceaccount.com -// role: roles/resourcemanager.organizationAdmin -// - members: -// - user:eve@example.com -// role: roles/resourcemanager.organizationViewer -// condition: -// title: expirable access -// description: Does not grant access after Sep 2020 -// expression: request.time < -// timestamp('2020-10-01T00:00:00.000Z') -// - etag: BwWWja0YfJA= -// - version: 3 -// -// For a description of IAM and its features, see the -// [IAM documentation](https://cloud.google.com/iam/docs/). +// evaluates to `true`. A condition can add constraints based on +// attributes of the request, the resource, or both. To learn which +// resources support conditions in their IAM policies, see the [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-p +// olicies). **JSON example:** { "bindings": [ { "role": +// "roles/resourcemanager.organizationAdmin", "members": [ +// "user:mike@example.com", "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { +// "role": "roles/resourcemanager.organizationViewer", "members": [ +// "user:eve@example.com" ], "condition": { "title": "expirable access", +// "description": "Does not grant access after Sep 2020", "expression": +// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], +// "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - +// members: - user:mike@example.com - group:admins@example.com - +// domain:google.com - +// serviceAccount:my-project-id@appspot.gserviceaccount.com role: +// roles/resourcemanager.organizationAdmin - members: - +// user:eve@example.com role: roles/resourcemanager.organizationViewer +// condition: title: expirable access description: Does not grant access +// after Sep 2020 expression: request.time < +// timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: +// 3 For a description of IAM and its features, see the [IAM +// documentation](https://cloud.google.com/iam/docs/). type Policy struct { // AuditConfigs: Specifies cloud audit logging configuration for this // policy. AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` // Bindings: Associates a list of `members` to a `role`. Optionally, may - // specify a - // `condition` that determines how and when the `bindings` are applied. - // Each - // of the `bindings` must contain at least one member. + // specify a `condition` that determines how and when the `bindings` are + // applied. Each of the `bindings` must contain at least one member. Bindings []*Binding `json:"bindings,omitempty"` // Etag: `etag` is used for optimistic concurrency control as a way to - // help - // prevent simultaneous updates of a policy from overwriting each - // other. - // It is strongly suggested that systems make use of the `etag` in - // the - // read-modify-write cycle to perform policy updates in order to avoid - // race - // conditions: An `etag` is returned in the response to `getIamPolicy`, - // and - // systems are expected to put that etag in the request to - // `setIamPolicy` to - // ensure that their change will be applied to the same version of the - // policy. - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of + // help prevent simultaneous updates of a policy from overwriting each + // other. It is strongly suggested that systems make use of the `etag` + // in the read-modify-write cycle to perform policy updates in order to + // avoid race conditions: An `etag` is returned in the response to + // `getIamPolicy`, and systems are expected to put that etag in the + // request to `setIamPolicy` to ensure that their change will be applied + // to the same version of the policy. **Important:** If you use IAM + // Conditions, you must include the `etag` field whenever you call + // `setIamPolicy`. If you omit this field, then IAM allows you to + // overwrite a version `3` policy with a version `1` policy, and all of // the conditions in the version `3` policy are lost. Etag string `json:"etag,omitempty"` - // Version: Specifies the format of the policy. - // - // Valid values are `0`, `1`, and `3`. Requests that specify an invalid - // value - // are rejected. - // + // Version: Specifies the format of the policy. Valid values are `0`, + // `1`, and `3`. Requests that specify an invalid value are rejected. // Any operation that affects conditional role bindings must specify - // version - // `3`. This requirement applies to the following operations: - // - // * Getting a policy that includes a conditional role binding - // * Adding a conditional role binding to a policy - // * Changing a conditional role binding in a policy - // * Removing any role binding, with or without a condition, from a - // policy - // that includes conditions - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of - // the conditions in the version `3` policy are lost. - // - // If a policy does not include any conditions, operations on that - // policy may - // specify any valid version or leave the field unset. + // version `3`. This requirement applies to the following operations: * + // Getting a policy that includes a conditional role binding * Adding a + // conditional role binding to a policy * Changing a conditional role + // binding in a policy * Removing any role binding, with or without a + // condition, from a policy that includes conditions **Important:** If + // you use IAM Conditions, you must include the `etag` field whenever + // you call `setIamPolicy`. If you omit this field, then IAM allows you + // to overwrite a version `3` policy with a version `1` policy, and all + // of the conditions in the version `3` policy are lost. If a policy + // does not include any conditions, operations on that policy may + // specify any valid version or leave the field unset. To learn which + // resources support conditions in their IAM policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). Version int64 `json:"version,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -862,13 +663,11 @@ type ProjectConfig struct { // EnablePrivateKeyCheck: Reject a Git push that contains a private key. EnablePrivateKeyCheck bool `json:"enablePrivateKeyCheck,omitempty"` - // Name: The name of the project. Values are of the form - // `projects/`. + // Name: The name of the project. Values are of the form `projects/`. Name string `json:"name,omitempty"` // PubsubConfigs: How this project publishes a change in the - // repositories through Cloud - // Pub/Sub. Keyed by the topic names. + // repositories through Cloud Pub/Sub. Keyed by the topic names. PubsubConfigs map[string]PubsubConfig `json:"pubsubConfigs,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -912,22 +711,16 @@ type PubsubConfig struct { MessageFormat string `json:"messageFormat,omitempty"` // ServiceAccountEmail: Email address of the service account used for - // publishing Cloud Pub/Sub - // messages. This service account needs to be in the same project as - // the - // PubsubConfig. When added, the caller needs to - // have - // iam.serviceAccounts.actAs permission on this service account. - // If - // unspecified, it defaults to the compute engine default service + // publishing Cloud Pub/Sub messages. This service account needs to be + // in the same project as the PubsubConfig. When added, the caller needs + // to have iam.serviceAccounts.actAs permission on this service account. + // If unspecified, it defaults to the compute engine default service // account. ServiceAccountEmail string `json:"serviceAccountEmail,omitempty"` - // Topic: A topic of Cloud Pub/Sub. Values are of the - // form - // `projects//topics/`. The project needs to be the - // same - // project as this config is in. + // Topic: A topic of Cloud Pub/Sub. Values are of the form + // `projects//topics/`. The project needs to be the same project as this + // config is in. Topic string `json:"topic,omitempty"` // ForceSendFields is a list of field names (e.g. "MessageFormat") to @@ -957,30 +750,24 @@ func (s *PubsubConfig) MarshalJSON() ([]byte, error) { // source content. type Repo struct { // MirrorConfig: How this repository mirrors a repository managed by - // another service. - // Read-only field. + // another service. Read-only field. MirrorConfig *MirrorConfig `json:"mirrorConfig,omitempty"` - // Name: Resource name of the repository, of the - // form - // `projects//repos/`. The repo name may contain - // slashes. - // eg, `projects/myproject/repos/name/with/slash` + // Name: Resource name of the repository, of the form + // `projects//repos/`. The repo name may contain slashes. eg, + // `projects/myproject/repos/name/with/slash` Name string `json:"name,omitempty"` // PubsubConfigs: How this repository publishes a change in the - // repository through Cloud - // Pub/Sub. Keyed by the topic names. + // repository through Cloud Pub/Sub. Keyed by the topic names. PubsubConfigs map[string]PubsubConfig `json:"pubsubConfigs,omitempty"` // Size: The disk usage of the repo, in bytes. Read-only field. Size is - // only - // returned by GetRepo. + // only returned by GetRepo. Size int64 `json:"size,omitempty,string"` // Url: URL to clone the repository from Google Cloud Source - // Repositories. - // Read-only field. + // Repositories. Read-only field. Url string `json:"url,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1013,20 +800,15 @@ func (s *Repo) MarshalJSON() ([]byte, error) { // SetIamPolicyRequest: Request message for `SetIamPolicy` method. type SetIamPolicyRequest struct { // Policy: REQUIRED: The complete policy to be applied to the - // `resource`. The size of - // the policy is limited to a few 10s of KB. An empty policy is a - // valid policy but certain Cloud Platform services (such as - // Projects) - // might reject them. + // `resource`. The size of the policy is limited to a few 10s of KB. An + // empty policy is a valid policy but certain Cloud Platform services + // (such as Projects) might reject them. Policy *Policy `json:"policy,omitempty"` // UpdateMask: OPTIONAL: A FieldMask specifying which fields of the - // policy to modify. Only - // the fields in the mask will be modified. If no mask is provided, - // the - // following default mask is used: - // - // `paths: "bindings, etag" + // policy to modify. Only the fields in the mask will be modified. If no + // mask is provided, the following default mask is used: `paths: + // "bindings, etag" UpdateMask string `json:"updateMask,omitempty"` // ForceSendFields is a list of field names (e.g. "Policy") to @@ -1053,32 +835,24 @@ func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { } // Status: The `Status` type defines a logical error model that is -// suitable for -// different programming environments, including REST APIs and RPC APIs. -// It is -// used by [gRPC](https://github.com/grpc). Each `Status` message -// contains -// three pieces of data: error code, error message, and error -// details. -// -// You can find out more about this error model and how to work with it -// in the -// [API Design Guide](https://cloud.google.com/apis/design/errors). +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each +// `Status` message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the [API Design +// Guide](https://cloud.google.com/apis/design/errors). type Status struct { // Code: The status code, which should be an enum value of // google.rpc.Code. Code int64 `json:"code,omitempty"` - // Details: A list of messages that carry the error details. There is a - // common set of - // message types for APIs to use. + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. Details []googleapi.RawMessage `json:"details,omitempty"` // Message: A developer-facing error message, which should be in - // English. Any - // user-facing error message should be localized and sent in - // the - // google.rpc.Status.details field, or localized by the client. + // English. Any user-facing error message should be localized and sent + // in the google.rpc.Status.details field, or localized by the client. Message string `json:"message,omitempty"` // ForceSendFields is a list of field names (e.g. "Code") to @@ -1104,13 +878,11 @@ func (s *Status) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SyncRepoMetadata: Metadata of SyncRepo. -// -// This message is in the metadata field of Operation. +// SyncRepoMetadata: Metadata of SyncRepo. This message is in the +// metadata field of Operation. type SyncRepoMetadata struct { - // Name: The name of the repo being synchronized. Values are of the - // form - // `projects//repos/`. + // Name: The name of the repo being synchronized. Values are of the form + // `projects//repos/`. Name string `json:"name,omitempty"` // StartTime: The time this operation is started. @@ -1153,11 +925,8 @@ type SyncRepoRequest struct { // method. type TestIamPermissionsRequest struct { // Permissions: The set of permissions to check for the `resource`. - // Permissions with - // wildcards (such as '*' or 'storage.*') are not allowed. For - // more - // information see - // [IAM + // Permissions with wildcards (such as '*' or 'storage.*') are not + // allowed. For more information see [IAM // Overview](https://cloud.google.com/iam/docs/overview#permissions). Permissions []string `json:"permissions,omitempty"` @@ -1188,8 +957,7 @@ func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsResponse struct { // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is - // allowed. + // the caller is allowed. Permissions []string `json:"permissions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1225,10 +993,8 @@ type UpdateProjectConfigRequest struct { ProjectConfig *ProjectConfig `json:"projectConfig,omitempty"` // UpdateMask: A FieldMask specifying which fields of the project_config - // to modify. Only - // the fields in the mask will be modified. If no mask is provided, - // this - // request is no-op. + // to modify. Only the fields in the mask will be modified. If no mask + // is provided, this request is no-op. UpdateMask string `json:"updateMask,omitempty"` // ForceSendFields is a list of field names (e.g. "ProjectConfig") to @@ -1260,10 +1026,8 @@ type UpdateRepoRequest struct { Repo *Repo `json:"repo,omitempty"` // UpdateMask: A FieldMask specifying which fields of the repo to - // modify. Only the fields - // in the mask will be modified. If no mask is provided, this request - // is - // no-op. + // modify. Only the fields in the mask will be modified. If no mask is + // provided, this request is no-op. UpdateMask string `json:"updateMask,omitempty"` // ForceSendFields is a list of field names (e.g. "Repo") to @@ -1345,7 +1109,7 @@ func (c *ProjectsGetConfigCall) Header() http.Header { func (c *ProjectsGetConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1416,7 +1180,7 @@ func (c *ProjectsGetConfigCall) Do(opts ...googleapi.CallOption) (*ProjectConfig // ], // "parameters": { // "name": { - // "description": "The name of the requested project. Values are of the form\n`projects/\u003cproject\u003e`.", + // "description": "The name of the requested project. Values are of the form `projects/`.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -1481,7 +1245,7 @@ func (c *ProjectsUpdateConfigCall) Header() http.Header { func (c *ProjectsUpdateConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1554,7 +1318,7 @@ func (c *ProjectsUpdateConfigCall) Do(opts ...googleapi.CallOption) (*ProjectCon // ], // "parameters": { // "name": { - // "description": "The name of the requested project. Values are of the form\n`projects/\u003cproject\u003e`.", + // "description": "The name of the requested project. Values are of the form `projects/`.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -1586,10 +1350,8 @@ type ProjectsReposCreateCall struct { header_ http.Header } -// Create: Creates a repo in the given project with the given name. -// -// If the named repository already exists, `CreateRepo` -// returns +// Create: Creates a repo in the given project with the given name. If +// the named repository already exists, `CreateRepo` returns // `ALREADY_EXISTS`. func (r *ProjectsReposService) Create(parent string, repo *Repo) *ProjectsReposCreateCall { c := &ProjectsReposCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -1625,7 +1387,7 @@ func (c *ProjectsReposCreateCall) Header() http.Header { func (c *ProjectsReposCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1689,7 +1451,7 @@ func (c *ProjectsReposCreateCall) Do(opts ...googleapi.CallOption) (*Repo, error } return ret, nil // { - // "description": "Creates a repo in the given project with the given name.\n\nIf the named repository already exists, `CreateRepo` returns\n`ALREADY_EXISTS`.", + // "description": "Creates a repo in the given project with the given name. If the named repository already exists, `CreateRepo` returns `ALREADY_EXISTS`.", // "flatPath": "v1/projects/{projectsId}/repos", // "httpMethod": "POST", // "id": "sourcerepo.projects.repos.create", @@ -1698,7 +1460,7 @@ func (c *ProjectsReposCreateCall) Do(opts ...googleapi.CallOption) (*Repo, error // ], // "parameters": { // "parent": { - // "description": "The project in which to create the repo. Values are of the form\n`projects/\u003cproject\u003e`.", + // "description": "The project in which to create the repo. Values are of the form `projects/`.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -1764,7 +1526,7 @@ func (c *ProjectsReposDeleteCall) Header() http.Header { func (c *ProjectsReposDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1832,7 +1594,7 @@ func (c *ProjectsReposDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, erro // ], // "parameters": { // "name": { - // "description": "The name of the repo to delete. Values are of the form\n`projects/\u003cproject\u003e/repos/\u003crepo\u003e`.", + // "description": "The name of the repo to delete. Values are of the form `projects//repos/`.", // "location": "path", // "pattern": "^projects/[^/]+/repos/.*$", // "required": true, @@ -1906,7 +1668,7 @@ func (c *ProjectsReposGetCall) Header() http.Header { func (c *ProjectsReposGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1977,7 +1739,7 @@ func (c *ProjectsReposGetCall) Do(opts ...googleapi.CallOption) (*Repo, error) { // ], // "parameters": { // "name": { - // "description": "The name of the requested repository. Values are of the form\n`projects/\u003cproject\u003e/repos/\u003crepo\u003e`.", + // "description": "The name of the requested repository. Values are of the form `projects//repos/`.", // "location": "path", // "pattern": "^projects/[^/]+/repos/.*$", // "required": true, @@ -2009,9 +1771,8 @@ type ProjectsReposGetIamPolicyCall struct { header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. -// Returns an empty policy if the resource exists and does not have a -// policy +// GetIamPolicy: Gets the access control policy for a resource. Returns +// an empty policy if the resource exists and does not have a policy // set. func (r *ProjectsReposService) GetIamPolicy(resource string) *ProjectsReposGetIamPolicyCall { c := &ProjectsReposGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -2021,17 +1782,14 @@ func (r *ProjectsReposService) GetIamPolicy(resource string) *ProjectsReposGetIa // OptionsRequestedPolicyVersion sets the optional parameter // "options.requestedPolicyVersion": The policy format version to be -// returned. -// -// Valid values are 0, 1, and 3. Requests specifying an invalid value -// will be -// rejected. -// -// Requests for policies with any conditional bindings must specify -// version 3. -// Policies without any conditional bindings may specify any valid value -// or -// leave the field unset. +// returned. Valid values are 0, 1, and 3. Requests specifying an +// invalid value will be rejected. Requests for policies with any +// conditional bindings must specify version 3. Policies without any +// conditional bindings may specify any valid value or leave the field +// unset. To learn which resources support conditions in their IAM +// policies, see the [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-p +// olicies). func (c *ProjectsReposGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsReposGetIamPolicyCall { c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c @@ -2074,7 +1832,7 @@ func (c *ProjectsReposGetIamPolicyCall) Header() http.Header { func (c *ProjectsReposGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2136,7 +1894,7 @@ func (c *ProjectsReposGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } return ret, nil // { - // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + // "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", // "flatPath": "v1/projects/{projectsId}/repos/{reposId}:getIamPolicy", // "httpMethod": "GET", // "id": "sourcerepo.projects.repos.getIamPolicy", @@ -2145,13 +1903,13 @@ func (c *ProjectsReposGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic // ], // "parameters": { // "options.requestedPolicyVersion": { - // "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.", + // "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", // "format": "int32", // "location": "query", // "type": "integer" // }, // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/repos/.*$", // "required": true, @@ -2184,8 +1942,8 @@ type ProjectsReposListCall struct { } // List: Returns all repos belonging to a project. The sizes of the -// repos are -// not set by ListRepos. To get the size of a repo, use GetRepo. +// repos are not set by ListRepos. To get the size of a repo, use +// GetRepo. func (r *ProjectsReposService) List(name string) *ProjectsReposListCall { c := &ProjectsReposListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -2193,17 +1951,17 @@ func (r *ProjectsReposService) List(name string) *ProjectsReposListCall { } // PageSize sets the optional parameter "pageSize": Maximum number of -// repositories to return; between 1 and 500. -// If not set or zero, defaults to 100 at the server. +// repositories to return; between 1 and 500. If not set or zero, +// defaults to 100 at the server. func (c *ProjectsReposListCall) PageSize(pageSize int64) *ProjectsReposListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": Resume listing -// repositories where a prior ListReposResponse -// left off. This is an opaque token that must be obtained from -// a recent, prior ListReposResponse's next_page_token field. +// repositories where a prior ListReposResponse left off. This is an +// opaque token that must be obtained from a recent, prior +// ListReposResponse's next_page_token field. func (c *ProjectsReposListCall) PageToken(pageToken string) *ProjectsReposListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -2246,7 +2004,7 @@ func (c *ProjectsReposListCall) Header() http.Header { func (c *ProjectsReposListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2308,7 +2066,7 @@ func (c *ProjectsReposListCall) Do(opts ...googleapi.CallOption) (*ListReposResp } return ret, nil // { - // "description": "Returns all repos belonging to a project. The sizes of the repos are\nnot set by ListRepos. To get the size of a repo, use GetRepo.", + // "description": "Returns all repos belonging to a project. The sizes of the repos are not set by ListRepos. To get the size of a repo, use GetRepo.", // "flatPath": "v1/projects/{projectsId}/repos", // "httpMethod": "GET", // "id": "sourcerepo.projects.repos.list", @@ -2317,20 +2075,20 @@ func (c *ProjectsReposListCall) Do(opts ...googleapi.CallOption) (*ListReposResp // ], // "parameters": { // "name": { - // "description": "The project ID whose repos should be listed. Values are of the form\n`projects/\u003cproject\u003e`.", + // "description": "The project ID whose repos should be listed. Values are of the form `projects/`.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, // "type": "string" // }, // "pageSize": { - // "description": "Maximum number of repositories to return; between 1 and 500.\nIf not set or zero, defaults to 100 at the server.", + // "description": "Maximum number of repositories to return; between 1 and 500. If not set or zero, defaults to 100 at the server.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "Resume listing repositories where a prior ListReposResponse\nleft off. This is an opaque token that must be obtained from\na recent, prior ListReposResponse's next_page_token field.", + // "description": "Resume listing repositories where a prior ListReposResponse left off. This is an opaque token that must be obtained from a recent, prior ListReposResponse's next_page_token field.", // "location": "query", // "type": "string" // } @@ -2416,7 +2174,7 @@ func (c *ProjectsReposPatchCall) Header() http.Header { func (c *ProjectsReposPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2489,7 +2247,7 @@ func (c *ProjectsReposPatchCall) Do(opts ...googleapi.CallOption) (*Repo, error) // ], // "parameters": { // "name": { - // "description": "The name of the requested repository. Values are of the form\n`projects/\u003cproject\u003e/repos/\u003crepo\u003e`.", + // "description": "The name of the requested repository. Values are of the form `projects//repos/`.", // "location": "path", // "pattern": "^projects/[^/]+/repos/.*$", // "required": true, @@ -2522,8 +2280,7 @@ type ProjectsReposSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any -// existing policy. +// resource. Replaces any existing policy. func (r *ProjectsReposService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsReposSetIamPolicyCall { c := &ProjectsReposSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -2558,7 +2315,7 @@ func (c *ProjectsReposSetIamPolicyCall) Header() http.Header { func (c *ProjectsReposSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2622,7 +2379,7 @@ func (c *ProjectsReposSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", // "flatPath": "v1/projects/{projectsId}/repos/{reposId}:setIamPolicy", // "httpMethod": "POST", // "id": "sourcerepo.projects.repos.setIamPolicy", @@ -2631,7 +2388,7 @@ func (c *ProjectsReposSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/repos/.*$", // "required": true, @@ -2664,9 +2421,8 @@ type ProjectsReposSyncCall struct { header_ http.Header } -// Sync: Synchronize a connected repo. -// -// The response contains SyncRepoMetadata in the metadata field. +// Sync: Synchronize a connected repo. The response contains +// SyncRepoMetadata in the metadata field. func (r *ProjectsReposService) Sync(name string, syncreporequest *SyncRepoRequest) *ProjectsReposSyncCall { c := &ProjectsReposSyncCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -2701,7 +2457,7 @@ func (c *ProjectsReposSyncCall) Header() http.Header { func (c *ProjectsReposSyncCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2765,7 +2521,7 @@ func (c *ProjectsReposSyncCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Synchronize a connected repo.\n\nThe response contains SyncRepoMetadata in the metadata field.", + // "description": "Synchronize a connected repo. The response contains SyncRepoMetadata in the metadata field.", // "flatPath": "v1/projects/{projectsId}/repos/{reposId}:sync", // "httpMethod": "POST", // "id": "sourcerepo.projects.repos.sync", @@ -2774,7 +2530,7 @@ func (c *ProjectsReposSyncCall) Do(opts ...googleapi.CallOption) (*Operation, er // ], // "parameters": { // "name": { - // "description": "The name of the repo to synchronize. Values are of the form\n`projects/\u003cproject\u003e/repos/\u003crepo\u003e`.", + // "description": "The name of the repo to synchronize. Values are of the form `projects//repos/`.", // "location": "path", // "pattern": "^projects/[^/]+/repos/.*$", // "required": true, @@ -2807,10 +2563,8 @@ type ProjectsReposTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a NOT_FOUND error. +// specified resource. If the resource does not exist, this will return +// an empty set of permissions, not a NOT_FOUND error. func (r *ProjectsReposService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsReposTestIamPermissionsCall { c := &ProjectsReposTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -2845,7 +2599,7 @@ func (c *ProjectsReposTestIamPermissionsCall) Header() http.Header { func (c *ProjectsReposTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2909,7 +2663,7 @@ func (c *ProjectsReposTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.", + // "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.", // "flatPath": "v1/projects/{projectsId}/repos/{reposId}:testIamPermissions", // "httpMethod": "POST", // "id": "sourcerepo.projects.repos.testIamPermissions", @@ -2918,7 +2672,7 @@ func (c *ProjectsReposTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/repos/.*$", // "required": true, diff --git a/vendor/google.golang.org/api/spanner/v1/spanner-api.json b/vendor/google.golang.org/api/spanner/v1/spanner-api.json index ed1f0cae421..2767b9227cb 100644 --- a/vendor/google.golang.org/api/spanner/v1/spanner-api.json +++ b/vendor/google.golang.org/api/spanner/v1/spanner-api.json @@ -125,7 +125,7 @@ ], "parameters": { "name": { - "description": "Required. The name of the requested instance configuration. Values are of\nthe form `projects/\u003cproject\u003e/instanceConfigs/\u003cconfig\u003e`.", + "description": "Required. The name of the requested instance configuration. Values are of the form `projects//instanceConfigs/`.", "location": "path", "pattern": "^projects/[^/]+/instanceConfigs/[^/]+$", "required": true, @@ -151,18 +151,18 @@ ], "parameters": { "pageSize": { - "description": "Number of instance configurations to be returned in the response. If 0 or\nless, defaults to the server's maximum allowed page size.", + "description": "Number of instance configurations to be returned in the response. If 0 or less, defaults to the server's maximum allowed page size.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "If non-empty, `page_token` should contain a\nnext_page_token\nfrom a previous ListInstanceConfigsResponse.", + "description": "If non-empty, `page_token` should contain a next_page_token from a previous ListInstanceConfigsResponse.", "location": "query", "type": "string" }, "parent": { - "description": "Required. The name of the project for which a list of supported instance\nconfigurations is requested. Values are of the form\n`projects/\u003cproject\u003e`.", + "description": "Required. The name of the project for which a list of supported instance configurations is requested. Values are of the form `projects/`.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -183,7 +183,7 @@ "instances": { "methods": { "create": { - "description": "Creates an instance and begins preparing it to begin serving. The\nreturned long-running operation\ncan be used to track the progress of preparing the new\ninstance. The instance name is assigned by the caller. If the\nnamed instance already exists, `CreateInstance` returns\n`ALREADY_EXISTS`.\n\nImmediately upon completion of this request:\n\n * The instance is readable via the API, with all requested attributes\n but no allocated resources. Its state is `CREATING`.\n\nUntil completion of the returned operation:\n\n * Cancelling the operation renders the instance immediately unreadable\n via the API.\n * The instance can be deleted.\n * All other attempts to modify the instance are rejected.\n\nUpon completion of the returned operation:\n\n * Billing for all successfully-allocated resources begins (some types\n may have lower than the requested levels).\n * Databases can be created in the instance.\n * The instance's allocated resource levels are readable via the API.\n * The instance's state becomes `READY`.\n\nThe returned long-running operation will\nhave a name of the format `\u003cinstance_name\u003e/operations/\u003coperation_id\u003e` and\ncan be used to track creation of the instance. The\nmetadata field type is\nCreateInstanceMetadata.\nThe response field type is\nInstance, if successful.", + "description": "Creates an instance and begins preparing it to begin serving. The returned long-running operation can be used to track the progress of preparing the new instance. The instance name is assigned by the caller. If the named instance already exists, `CreateInstance` returns `ALREADY_EXISTS`. Immediately upon completion of this request: * The instance is readable via the API, with all requested attributes but no allocated resources. Its state is `CREATING`. Until completion of the returned operation: * Cancelling the operation renders the instance immediately unreadable via the API. * The instance can be deleted. * All other attempts to modify the instance are rejected. Upon completion of the returned operation: * Billing for all successfully-allocated resources begins (some types may have lower than the requested levels). * Databases can be created in the instance. * The instance's allocated resource levels are readable via the API. * The instance's state becomes `READY`. The returned long-running operation will have a name of the format `/operations/` and can be used to track creation of the instance. The metadata field type is CreateInstanceMetadata. The response field type is Instance, if successful.", "flatPath": "v1/projects/{projectsId}/instances", "httpMethod": "POST", "id": "spanner.projects.instances.create", @@ -192,7 +192,7 @@ ], "parameters": { "parent": { - "description": "Required. The name of the project in which to create the instance. Values\nare of the form `projects/\u003cproject\u003e`.", + "description": "Required. The name of the project in which to create the instance. Values are of the form `projects/`.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -212,7 +212,7 @@ ] }, "delete": { - "description": "Deletes an instance.\n\nImmediately upon completion of the request:\n\n * Billing ceases for all of the instance's reserved resources.\n\nSoon afterward:\n\n * The instance and *all of its databases* immediately and\n irrevocably disappear from the API. All data in the databases\n is permanently deleted.", + "description": "Deletes an instance. Immediately upon completion of the request: * Billing ceases for all of the instance's reserved resources. Soon afterward: * The instance and *all of its databases* immediately and irrevocably disappear from the API. All data in the databases is permanently deleted.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}", "httpMethod": "DELETE", "id": "spanner.projects.instances.delete", @@ -221,7 +221,7 @@ ], "parameters": { "name": { - "description": "Required. The name of the instance to be deleted. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e`", + "description": "Required. The name of the instance to be deleted. Values are of the form `projects//instances/`", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, @@ -247,13 +247,13 @@ ], "parameters": { "fieldMask": { - "description": "If field_mask is present, specifies the subset of Instance fields that\nshould be returned.\nIf absent, all Instance fields are returned.", + "description": "If field_mask is present, specifies the subset of Instance fields that should be returned. If absent, all Instance fields are returned.", "format": "google-fieldmask", "location": "query", "type": "string" }, "name": { - "description": "Required. The name of the requested instance. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e`.", + "description": "Required. The name of the requested instance. Values are of the form `projects//instances/`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, @@ -270,7 +270,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for an instance resource. Returns an empty\npolicy if an instance exists but does not have a policy set.\n\nAuthorization requires `spanner.instances.getIamPolicy` on\nresource.", + "description": "Gets the access control policy for an instance resource. Returns an empty policy if an instance exists but does not have a policy set. Authorization requires `spanner.instances.getIamPolicy` on resource.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}:getIamPolicy", "httpMethod": "POST", "id": "spanner.projects.instances.getIamPolicy", @@ -279,7 +279,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The Cloud Spanner resource for which the policy is being retrieved. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for database resources.", + "description": "REQUIRED: The Cloud Spanner resource for which the policy is being retrieved. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for database resources.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, @@ -308,23 +308,23 @@ ], "parameters": { "filter": { - "description": "An expression for filtering the results of the request. Filter rules are\ncase insensitive. The fields eligible for filtering are:\n\n * `name`\n * `display_name`\n * `labels.key` where key is the name of a label\n\nSome examples of using filters are:\n\n * `name:*` --\u003e The instance has a name.\n * `name:Howl` --\u003e The instance's name contains the string \"howl\".\n * `name:HOWL` --\u003e Equivalent to above.\n * `NAME:howl` --\u003e Equivalent to above.\n * `labels.env:*` --\u003e The instance has the label \"env\".\n * `labels.env:dev` --\u003e The instance has the label \"env\" and the value of\n the label contains the string \"dev\".\n * `name:howl labels.env:dev` --\u003e The instance's name contains \"howl\" and\n it has the label \"env\" with its value\n containing \"dev\".", + "description": "An expression for filtering the results of the request. Filter rules are case insensitive. The fields eligible for filtering are: * `name` * `display_name` * `labels.key` where key is the name of a label Some examples of using filters are: * `name:*` --\u003e The instance has a name. * `name:Howl` --\u003e The instance's name contains the string \"howl\". * `name:HOWL` --\u003e Equivalent to above. * `NAME:howl` --\u003e Equivalent to above. * `labels.env:*` --\u003e The instance has the label \"env\". * `labels.env:dev` --\u003e The instance has the label \"env\" and the value of the label contains the string \"dev\". * `name:howl labels.env:dev` --\u003e The instance's name contains \"howl\" and it has the label \"env\" with its value containing \"dev\".", "location": "query", "type": "string" }, "pageSize": { - "description": "Number of instances to be returned in the response. If 0 or less, defaults\nto the server's maximum allowed page size.", + "description": "Number of instances to be returned in the response. If 0 or less, defaults to the server's maximum allowed page size.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "If non-empty, `page_token` should contain a\nnext_page_token from a\nprevious ListInstancesResponse.", + "description": "If non-empty, `page_token` should contain a next_page_token from a previous ListInstancesResponse.", "location": "query", "type": "string" }, "parent": { - "description": "Required. The name of the project for which a list of instances is\nrequested. Values are of the form `projects/\u003cproject\u003e`.", + "description": "Required. The name of the project for which a list of instances is requested. Values are of the form `projects/`.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -341,7 +341,7 @@ ] }, "patch": { - "description": "Updates an instance, and begins allocating or releasing resources\nas requested. The returned long-running\noperation can be used to track the\nprogress of updating the instance. If the named instance does not\nexist, returns `NOT_FOUND`.\n\nImmediately upon completion of this request:\n\n * For resource types for which a decrease in the instance's allocation\n has been requested, billing is based on the newly-requested level.\n\nUntil completion of the returned operation:\n\n * Cancelling the operation sets its metadata's\n cancel_time, and begins\n restoring resources to their pre-request values. The operation\n is guaranteed to succeed at undoing all resource changes,\n after which point it terminates with a `CANCELLED` status.\n * All other attempts to modify the instance are rejected.\n * Reading the instance via the API continues to give the pre-request\n resource levels.\n\nUpon completion of the returned operation:\n\n * Billing begins for all successfully-allocated resources (some types\n may have lower than the requested levels).\n * All newly-reserved resources are available for serving the instance's\n tables.\n * The instance's new resource levels are readable via the API.\n\nThe returned long-running operation will\nhave a name of the format `\u003cinstance_name\u003e/operations/\u003coperation_id\u003e` and\ncan be used to track the instance modification. The\nmetadata field type is\nUpdateInstanceMetadata.\nThe response field type is\nInstance, if successful.\n\nAuthorization requires `spanner.instances.update` permission on\nresource name.", + "description": "Updates an instance, and begins allocating or releasing resources as requested. The returned long-running operation can be used to track the progress of updating the instance. If the named instance does not exist, returns `NOT_FOUND`. Immediately upon completion of this request: * For resource types for which a decrease in the instance's allocation has been requested, billing is based on the newly-requested level. Until completion of the returned operation: * Cancelling the operation sets its metadata's cancel_time, and begins restoring resources to their pre-request values. The operation is guaranteed to succeed at undoing all resource changes, after which point it terminates with a `CANCELLED` status. * All other attempts to modify the instance are rejected. * Reading the instance via the API continues to give the pre-request resource levels. Upon completion of the returned operation: * Billing begins for all successfully-allocated resources (some types may have lower than the requested levels). * All newly-reserved resources are available for serving the instance's tables. * The instance's new resource levels are readable via the API. The returned long-running operation will have a name of the format `/operations/` and can be used to track the instance modification. The metadata field type is UpdateInstanceMetadata. The response field type is Instance, if successful. Authorization requires `spanner.instances.update` permission on resource name.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}", "httpMethod": "PATCH", "id": "spanner.projects.instances.patch", @@ -350,7 +350,7 @@ ], "parameters": { "name": { - "description": "Required. A unique identifier for the instance, which cannot be changed\nafter the instance is created. Values are of the form\n`projects/\u003cproject\u003e/instances/a-z*[a-z0-9]`. The final\nsegment of the name must be between 2 and 64 characters in length.", + "description": "Required. A unique identifier for the instance, which cannot be changed after the instance is created. Values are of the form `projects//instances/a-z*[a-z0-9]`. The final segment of the name must be between 2 and 64 characters in length.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, @@ -370,7 +370,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on an instance resource. Replaces any\nexisting policy.\n\nAuthorization requires `spanner.instances.setIamPolicy` on\nresource.", + "description": "Sets the access control policy on an instance resource. Replaces any existing policy. Authorization requires `spanner.instances.setIamPolicy` on resource.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}:setIamPolicy", "httpMethod": "POST", "id": "spanner.projects.instances.setIamPolicy", @@ -379,7 +379,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The Cloud Spanner resource for which the policy is being set. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for databases resources.", + "description": "REQUIRED: The Cloud Spanner resource for which the policy is being set. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for databases resources.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, @@ -399,7 +399,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that the caller has on the specified instance resource.\n\nAttempting this RPC on a non-existent Cloud Spanner instance resource will\nresult in a NOT_FOUND error if the user has `spanner.instances.list`\npermission on the containing Google Cloud Project. Otherwise returns an\nempty set of permissions.", + "description": "Returns permissions that the caller has on the specified instance resource. Attempting this RPC on a non-existent Cloud Spanner instance resource will result in a NOT_FOUND error if the user has `spanner.instances.list` permission on the containing Google Cloud Project. Otherwise returns an empty set of permissions.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}:testIamPermissions", "httpMethod": "POST", "id": "spanner.projects.instances.testIamPermissions", @@ -408,7 +408,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The Cloud Spanner resource for which permissions are being tested. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for database resources.", + "description": "REQUIRED: The Cloud Spanner resource for which permissions are being tested. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for database resources.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, @@ -432,7 +432,7 @@ "backupOperations": { "methods": { "list": { - "description": "Lists the backup long-running operations in\nthe given instance. A backup operation has a name of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/backups/\u003cbackup\u003e/operations/\u003coperation\u003e`.\nThe long-running operation\nmetadata field type\n`metadata.type_url` describes the type of the metadata. Operations returned\ninclude those that have completed/failed/canceled within the last 7 days,\nand pending operations. Operations returned are ordered by\n`operation.metadata.value.progress.start_time` in descending order starting\nfrom the most recently started operation.", + "description": "Lists the backup long-running operations in the given instance. A backup operation has a name of the form `projects//instances//backups//operations/`. The long-running operation metadata field type `metadata.type_url` describes the type of the metadata. Operations returned include those that have completed/failed/canceled within the last 7 days, and pending operations. Operations returned are ordered by `operation.metadata.value.progress.start_time` in descending order starting from the most recently started operation.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/backupOperations", "httpMethod": "GET", "id": "spanner.projects.instances.backupOperations.list", @@ -441,23 +441,23 @@ ], "parameters": { "filter": { - "description": "An expression that filters the list of returned backup operations.\n\nA filter expression consists of a field name, a\ncomparison operator, and a value for filtering.\nThe value must be a string, a number, or a boolean. The comparison operator\nmust be one of: `\u003c`, `\u003e`, `\u003c=`, `\u003e=`, `!=`, `=`, or `:`.\nColon `:` is the contains operator. Filter rules are not case sensitive.\n\nThe following fields in the operation\nare eligible for filtering:\n\n * `name` - The name of the long-running operation\n * `done` - False if the operation is in progress, else true.\n * `metadata.@type` - the type of metadata. For example, the type string\n for CreateBackupMetadata is\n `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`.\n * `metadata.\u003cfield_name\u003e` - any field in metadata.value.\n * `error` - Error associated with the long-running operation.\n * `response.@type` - the type of response.\n * `response.\u003cfield_name\u003e` - any field in response.value.\n\nYou can combine multiple expressions by enclosing each expression in\nparentheses. By default, expressions are combined with AND logic, but\nyou can specify AND, OR, and NOT logic explicitly.\n\nHere are a few examples:\n\n * `done:true` - The operation is complete.\n * `metadata.database:prod` - The database the backup was taken from has\n a name containing the string \"prod\".\n * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \u003cbr/\u003e\n `(metadata.name:howl) AND` \u003cbr/\u003e\n `(metadata.progress.start_time \u003c \\\"2018-03-28T14:50:00Z\\\") AND` \u003cbr/\u003e\n `(error:*)` - Returns operations where:\n * The operation's metadata type is CreateBackupMetadata.\n * The backup name contains the string \"howl\".\n * The operation started before 2018-03-28T14:50:00Z.\n * The operation resulted in an error.", + "description": "An expression that filters the list of returned backup operations. A filter expression consists of a field name, a comparison operator, and a value for filtering. The value must be a string, a number, or a boolean. The comparison operator must be one of: `\u003c`, `\u003e`, `\u003c=`, `\u003e=`, `!=`, `=`, or `:`. Colon `:` is the contains operator. Filter rules are not case sensitive. The following fields in the operation are eligible for filtering: * `name` - The name of the long-running operation * `done` - False if the operation is in progress, else true. * `metadata.@type` - the type of metadata. For example, the type string for CreateBackupMetadata is `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`. * `metadata.` - any field in metadata.value. * `error` - Error associated with the long-running operation. * `response.@type` - the type of response. * `response.` - any field in response.value. You can combine multiple expressions by enclosing each expression in parentheses. By default, expressions are combined with AND logic, but you can specify AND, OR, and NOT logic explicitly. Here are a few examples: * `done:true` - The operation is complete. * `metadata.database:prod` - The database the backup was taken from has a name containing the string \"prod\". * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \\ `(metadata.name:howl) AND` \\ `(metadata.progress.start_time \u003c \\\"2018-03-28T14:50:00Z\\\") AND` \\ `(error:*)` - Returns operations where: * The operation's metadata type is CreateBackupMetadata. * The backup name contains the string \"howl\". * The operation started before 2018-03-28T14:50:00Z. * The operation resulted in an error.", "location": "query", "type": "string" }, "pageSize": { - "description": "Number of operations to be returned in the response. If 0 or\nless, defaults to the server's maximum allowed page size.", + "description": "Number of operations to be returned in the response. If 0 or less, defaults to the server's maximum allowed page size.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "If non-empty, `page_token` should contain a\nnext_page_token\nfrom a previous ListBackupOperationsResponse to the\nsame `parent` and with the same `filter`.", + "description": "If non-empty, `page_token` should contain a next_page_token from a previous ListBackupOperationsResponse to the same `parent` and with the same `filter`.", "location": "query", "type": "string" }, "parent": { - "description": "Required. The instance of the backup operations. Values are of\nthe form `projects/\u003cproject\u003e/instances/\u003cinstance\u003e`.", + "description": "Required. The instance of the backup operations. Values are of the form `projects//instances/`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, @@ -478,7 +478,7 @@ "backups": { "methods": { "create": { - "description": "Starts creating a new Cloud Spanner Backup.\nThe returned backup long-running operation\nwill have a name of the format\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/backups/\u003cbackup\u003e/operations/\u003coperation_id\u003e`\nand can be used to track creation of the backup. The\nmetadata field type is\nCreateBackupMetadata. The\nresponse field type is\nBackup, if successful. Cancelling the returned operation will stop the\ncreation and delete the backup.\nThere can be only one pending backup creation per database. Backup creation\nof different databases can run concurrently.", + "description": "Starts creating a new Cloud Spanner Backup. The returned backup long-running operation will have a name of the format `projects//instances//backups//operations/` and can be used to track creation of the backup. The metadata field type is CreateBackupMetadata. The response field type is Backup, if successful. Cancelling the returned operation will stop the creation and delete the backup. There can be only one pending backup creation per database. Backup creation of different databases can run concurrently.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/backups", "httpMethod": "POST", "id": "spanner.projects.instances.backups.create", @@ -487,12 +487,12 @@ ], "parameters": { "backupId": { - "description": "Required. The id of the backup to be created. The `backup_id` appended to\n`parent` forms the full backup name of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/backups/\u003cbackup_id\u003e`.", + "description": "Required. The id of the backup to be created. The `backup_id` appended to `parent` forms the full backup name of the form `projects//instances//backups/`.", "location": "query", "type": "string" }, "parent": { - "description": "Required. The name of the instance in which the backup will be\ncreated. This must be the same instance that contains the database the\nbackup will be created from. The backup will be stored in the\nlocation(s) specified in the instance configuration of this\ninstance. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e`.", + "description": "Required. The name of the instance in which the backup will be created. This must be the same instance that contains the database the backup will be created from. The backup will be stored in the location(s) specified in the instance configuration of this instance. Values are of the form `projects//instances/`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, @@ -521,7 +521,7 @@ ], "parameters": { "name": { - "description": "Required. Name of the backup to delete.\nValues are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/backups/\u003cbackup\u003e`.", + "description": "Required. Name of the backup to delete. Values are of the form `projects//instances//backups/`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/backups/[^/]+$", "required": true, @@ -547,7 +547,7 @@ ], "parameters": { "name": { - "description": "Required. Name of the backup.\nValues are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/backups/\u003cbackup\u003e`.", + "description": "Required. Name of the backup. Values are of the form `projects//instances//backups/`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/backups/[^/]+$", "required": true, @@ -564,7 +564,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a database or backup resource.\nReturns an empty policy if a database or backup exists but does not have a\npolicy set.\n\nAuthorization requires `spanner.databases.getIamPolicy` permission on\nresource.\nFor backups, authorization requires `spanner.backups.getIamPolicy`\npermission on resource.", + "description": "Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists but does not have a policy set. Authorization requires `spanner.databases.getIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.getIamPolicy` permission on resource.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/backups/{backupsId}:getIamPolicy", "httpMethod": "POST", "id": "spanner.projects.instances.backups.getIamPolicy", @@ -573,7 +573,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The Cloud Spanner resource for which the policy is being retrieved. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for database resources.", + "description": "REQUIRED: The Cloud Spanner resource for which the policy is being retrieved. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for database resources.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/backups/[^/]+$", "required": true, @@ -593,7 +593,7 @@ ] }, "list": { - "description": "Lists completed and pending backups.\nBackups returned are ordered by `create_time` in descending order,\nstarting from the most recent `create_time`.", + "description": "Lists completed and pending backups. Backups returned are ordered by `create_time` in descending order, starting from the most recent `create_time`.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/backups", "httpMethod": "GET", "id": "spanner.projects.instances.backups.list", @@ -602,23 +602,23 @@ ], "parameters": { "filter": { - "description": "An expression that filters the list of returned backups.\n\nA filter expression consists of a field name, a comparison operator, and a\nvalue for filtering.\nThe value must be a string, a number, or a boolean. The comparison operator\nmust be one of: `\u003c`, `\u003e`, `\u003c=`, `\u003e=`, `!=`, `=`, or `:`.\nColon `:` is the contains operator. Filter rules are not case sensitive.\n\nThe following fields in the Backup are eligible for filtering:\n\n * `name`\n * `database`\n * `state`\n * `create_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)\n * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)\n * `size_bytes`\n\nYou can combine multiple expressions by enclosing each expression in\nparentheses. By default, expressions are combined with AND logic, but\nyou can specify AND, OR, and NOT logic explicitly.\n\nHere are a few examples:\n\n * `name:Howl` - The backup's name contains the string \"howl\".\n * `database:prod`\n - The database's name contains the string \"prod\".\n * `state:CREATING` - The backup is pending creation.\n * `state:READY` - The backup is fully created and ready for use.\n * `(name:howl) AND (create_time \u003c \\\"2018-03-28T14:50:00Z\\\")`\n - The backup name contains the string \"howl\" and `create_time`\n of the backup is before 2018-03-28T14:50:00Z.\n * `expire_time \u003c \\\"2018-03-28T14:50:00Z\\\"`\n - The backup `expire_time` is before 2018-03-28T14:50:00Z.\n * `size_bytes \u003e 10000000000` - The backup's size is greater than 10GB", + "description": "An expression that filters the list of returned backups. A filter expression consists of a field name, a comparison operator, and a value for filtering. The value must be a string, a number, or a boolean. The comparison operator must be one of: `\u003c`, `\u003e`, `\u003c=`, `\u003e=`, `!=`, `=`, or `:`. Colon `:` is the contains operator. Filter rules are not case sensitive. The following fields in the Backup are eligible for filtering: * `name` * `database` * `state` * `create_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * `size_bytes` You can combine multiple expressions by enclosing each expression in parentheses. By default, expressions are combined with AND logic, but you can specify AND, OR, and NOT logic explicitly. Here are a few examples: * `name:Howl` - The backup's name contains the string \"howl\". * `database:prod` - The database's name contains the string \"prod\". * `state:CREATING` - The backup is pending creation. * `state:READY` - The backup is fully created and ready for use. * `(name:howl) AND (create_time \u003c \\\"2018-03-28T14:50:00Z\\\")` - The backup name contains the string \"howl\" and `create_time` of the backup is before 2018-03-28T14:50:00Z. * `expire_time \u003c \\\"2018-03-28T14:50:00Z\\\"` - The backup `expire_time` is before 2018-03-28T14:50:00Z. * `size_bytes \u003e 10000000000` - The backup's size is greater than 10GB", "location": "query", "type": "string" }, "pageSize": { - "description": "Number of backups to be returned in the response. If 0 or\nless, defaults to the server's maximum allowed page size.", + "description": "Number of backups to be returned in the response. If 0 or less, defaults to the server's maximum allowed page size.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "If non-empty, `page_token` should contain a\nnext_page_token from a\nprevious ListBackupsResponse to the same `parent` and with the same\n`filter`.", + "description": "If non-empty, `page_token` should contain a next_page_token from a previous ListBackupsResponse to the same `parent` and with the same `filter`.", "location": "query", "type": "string" }, "parent": { - "description": "Required. The instance to list backups from. Values are of the\nform `projects/\u003cproject\u003e/instances/\u003cinstance\u003e`.", + "description": "Required. The instance to list backups from. Values are of the form `projects//instances/`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, @@ -644,14 +644,14 @@ ], "parameters": { "name": { - "description": "Output only for the CreateBackup operation.\nRequired for the UpdateBackup operation.\n\nA globally unique identifier for the backup which cannot be\nchanged. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/backups/a-z*[a-z0-9]`\nThe final segment of the name must be between 2 and 60 characters\nin length.\n\nThe backup is stored in the location(s) specified in the instance\nconfiguration of the instance containing the backup, identified\nby the prefix of the backup name of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e`.", + "description": "Output only for the CreateBackup operation. Required for the UpdateBackup operation. A globally unique identifier for the backup which cannot be changed. Values are of the form `projects//instances//backups/a-z*[a-z0-9]` The final segment of the name must be between 2 and 60 characters in length. The backup is stored in the location(s) specified in the instance configuration of the instance containing the backup, identified by the prefix of the backup name of the form `projects//instances/`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/backups/[^/]+$", "required": true, "type": "string" }, "updateMask": { - "description": "Required. A mask specifying which fields (e.g. `expire_time`) in the\nBackup resource should be updated. This mask is relative to the Backup\nresource, not to the request message. The field mask must always be\nspecified; this prevents any future fields from being erased accidentally\nby clients that do not know about them.", + "description": "Required. A mask specifying which fields (e.g. `expire_time`) in the Backup resource should be updated. This mask is relative to the Backup resource, not to the request message. The field mask must always be specified; this prevents any future fields from being erased accidentally by clients that do not know about them.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -670,7 +670,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on a database or backup resource.\nReplaces any existing policy.\n\nAuthorization requires `spanner.databases.setIamPolicy`\npermission on resource.\nFor backups, authorization requires `spanner.backups.setIamPolicy`\npermission on resource.", + "description": "Sets the access control policy on a database or backup resource. Replaces any existing policy. Authorization requires `spanner.databases.setIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.setIamPolicy` permission on resource.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/backups/{backupsId}:setIamPolicy", "httpMethod": "POST", "id": "spanner.projects.instances.backups.setIamPolicy", @@ -679,7 +679,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The Cloud Spanner resource for which the policy is being set. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for databases resources.", + "description": "REQUIRED: The Cloud Spanner resource for which the policy is being set. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for databases resources.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/backups/[^/]+$", "required": true, @@ -699,7 +699,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that the caller has on the specified database or backup\nresource.\n\nAttempting this RPC on a non-existent Cloud Spanner database will\nresult in a NOT_FOUND error if the user has\n`spanner.databases.list` permission on the containing Cloud\nSpanner instance. Otherwise returns an empty set of permissions.\nCalling this method on a backup that does not exist will\nresult in a NOT_FOUND error if the user has\n`spanner.backups.list` permission on the containing instance.", + "description": "Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/backups/{backupsId}:testIamPermissions", "httpMethod": "POST", "id": "spanner.projects.instances.backups.testIamPermissions", @@ -708,7 +708,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The Cloud Spanner resource for which permissions are being tested. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for database resources.", + "description": "REQUIRED: The Cloud Spanner resource for which permissions are being tested. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for database resources.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/backups/[^/]+$", "required": true, @@ -732,7 +732,7 @@ "operations": { "methods": { "cancel": { - "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/backups/{backupsId}/operations/{operationsId}:cancel", "httpMethod": "POST", "id": "spanner.projects.instances.backups.operations.cancel", @@ -758,7 +758,7 @@ ] }, "delete": { - "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.", + "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/backups/{backupsId}/operations/{operationsId}", "httpMethod": "DELETE", "id": "spanner.projects.instances.backups.operations.delete", @@ -784,7 +784,7 @@ ] }, "get": { - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/backups/{backupsId}/operations/{operationsId}", "httpMethod": "GET", "id": "spanner.projects.instances.backups.operations.get", @@ -810,7 +810,7 @@ ] }, "list": { - "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/backups/{backupsId}/operations", "httpMethod": "GET", "id": "spanner.projects.instances.backups.operations.list", @@ -858,7 +858,7 @@ "databaseOperations": { "methods": { "list": { - "description": "Lists database longrunning-operations.\nA database operation has a name of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/databases/\u003cdatabase\u003e/operations/\u003coperation\u003e`.\nThe long-running operation\nmetadata field type\n`metadata.type_url` describes the type of the metadata. Operations returned\ninclude those that have completed/failed/canceled within the last 7 days,\nand pending operations.", + "description": "Lists database longrunning-operations. A database operation has a name of the form `projects//instances//databases//operations/`. The long-running operation metadata field type `metadata.type_url` describes the type of the metadata. Operations returned include those that have completed/failed/canceled within the last 7 days, and pending operations.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databaseOperations", "httpMethod": "GET", "id": "spanner.projects.instances.databaseOperations.list", @@ -867,23 +867,23 @@ ], "parameters": { "filter": { - "description": "An expression that filters the list of returned operations.\n\nA filter expression consists of a field name, a\ncomparison operator, and a value for filtering.\nThe value must be a string, a number, or a boolean. The comparison operator\nmust be one of: `\u003c`, `\u003e`, `\u003c=`, `\u003e=`, `!=`, `=`, or `:`.\nColon `:` is the contains operator. Filter rules are not case sensitive.\n\nThe following fields in the Operation\nare eligible for filtering:\n\n * `name` - The name of the long-running operation\n * `done` - False if the operation is in progress, else true.\n * `metadata.@type` - the type of metadata. For example, the type string\n for RestoreDatabaseMetadata is\n `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`.\n * `metadata.\u003cfield_name\u003e` - any field in metadata.value.\n * `error` - Error associated with the long-running operation.\n * `response.@type` - the type of response.\n * `response.\u003cfield_name\u003e` - any field in response.value.\n\nYou can combine multiple expressions by enclosing each expression in\nparentheses. By default, expressions are combined with AND logic. However,\nyou can specify AND, OR, and NOT logic explicitly.\n\nHere are a few examples:\n\n * `done:true` - The operation is complete.\n * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND` \u003cbr/\u003e\n `(metadata.source_type:BACKUP) AND` \u003cbr/\u003e\n `(metadata.backup_info.backup:backup_howl) AND` \u003cbr/\u003e\n `(metadata.name:restored_howl) AND` \u003cbr/\u003e\n `(metadata.progress.start_time \u003c \\\"2018-03-28T14:50:00Z\\\") AND` \u003cbr/\u003e\n `(error:*)` - Return operations where:\n * The operation's metadata type is RestoreDatabaseMetadata.\n * The database is restored from a backup.\n * The backup name contains \"backup_howl\".\n * The restored database's name contains \"restored_howl\".\n * The operation started before 2018-03-28T14:50:00Z.\n * The operation resulted in an error.", + "description": "An expression that filters the list of returned operations. A filter expression consists of a field name, a comparison operator, and a value for filtering. The value must be a string, a number, or a boolean. The comparison operator must be one of: `\u003c`, `\u003e`, `\u003c=`, `\u003e=`, `!=`, `=`, or `:`. Colon `:` is the contains operator. Filter rules are not case sensitive. The following fields in the Operation are eligible for filtering: * `name` - The name of the long-running operation * `done` - False if the operation is in progress, else true. * `metadata.@type` - the type of metadata. For example, the type string for RestoreDatabaseMetadata is `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`. * `metadata.` - any field in metadata.value. * `error` - Error associated with the long-running operation. * `response.@type` - the type of response. * `response.` - any field in response.value. You can combine multiple expressions by enclosing each expression in parentheses. By default, expressions are combined with AND logic. However, you can specify AND, OR, and NOT logic explicitly. Here are a few examples: * `done:true` - The operation is complete. * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND` \\ `(metadata.source_type:BACKUP) AND` \\ `(metadata.backup_info.backup:backup_howl) AND` \\ `(metadata.name:restored_howl) AND` \\ `(metadata.progress.start_time \u003c \\\"2018-03-28T14:50:00Z\\\") AND` \\ `(error:*)` - Return operations where: * The operation's metadata type is RestoreDatabaseMetadata. * The database is restored from a backup. * The backup name contains \"backup_howl\". * The restored database's name contains \"restored_howl\". * The operation started before 2018-03-28T14:50:00Z. * The operation resulted in an error.", "location": "query", "type": "string" }, "pageSize": { - "description": "Number of operations to be returned in the response. If 0 or\nless, defaults to the server's maximum allowed page size.", + "description": "Number of operations to be returned in the response. If 0 or less, defaults to the server's maximum allowed page size.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "If non-empty, `page_token` should contain a\nnext_page_token\nfrom a previous ListDatabaseOperationsResponse to the\nsame `parent` and with the same `filter`.", + "description": "If non-empty, `page_token` should contain a next_page_token from a previous ListDatabaseOperationsResponse to the same `parent` and with the same `filter`.", "location": "query", "type": "string" }, "parent": { - "description": "Required. The instance of the database operations.\nValues are of the form `projects/\u003cproject\u003e/instances/\u003cinstance\u003e`.", + "description": "Required. The instance of the database operations. Values are of the form `projects//instances/`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, @@ -904,7 +904,7 @@ "databases": { "methods": { "create": { - "description": "Creates a new Cloud Spanner database and starts to prepare it for serving.\nThe returned long-running operation will\nhave a name of the format `\u003cdatabase_name\u003e/operations/\u003coperation_id\u003e` and\ncan be used to track preparation of the database. The\nmetadata field type is\nCreateDatabaseMetadata. The\nresponse field type is\nDatabase, if successful.", + "description": "Creates a new Cloud Spanner database and starts to prepare it for serving. The returned long-running operation will have a name of the format `/operations/` and can be used to track preparation of the database. The metadata field type is CreateDatabaseMetadata. The response field type is Database, if successful.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases", "httpMethod": "POST", "id": "spanner.projects.instances.databases.create", @@ -913,7 +913,7 @@ ], "parameters": { "parent": { - "description": "Required. The name of the instance that will serve the new database.\nValues are of the form `projects/\u003cproject\u003e/instances/\u003cinstance\u003e`.", + "description": "Required. The name of the instance that will serve the new database. Values are of the form `projects//instances/`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, @@ -933,7 +933,7 @@ ] }, "dropDatabase": { - "description": "Drops (aka deletes) a Cloud Spanner database.\nCompleted backups for the database will be retained according to their\n`expire_time`.", + "description": "Drops (aka deletes) a Cloud Spanner database. Completed backups for the database will be retained according to their `expire_time`.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}", "httpMethod": "DELETE", "id": "spanner.projects.instances.databases.dropDatabase", @@ -968,7 +968,7 @@ ], "parameters": { "name": { - "description": "Required. The name of the requested database. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/databases/\u003cdatabase\u003e`.", + "description": "Required. The name of the requested database. Values are of the form `projects//instances//databases/`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", "required": true, @@ -985,7 +985,7 @@ ] }, "getDdl": { - "description": "Returns the schema of a Cloud Spanner database as a list of formatted\nDDL statements. This method does not show pending schema updates, those may\nbe queried using the Operations API.", + "description": "Returns the schema of a Cloud Spanner database as a list of formatted DDL statements. This method does not show pending schema updates, those may be queried using the Operations API.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/ddl", "httpMethod": "GET", "id": "spanner.projects.instances.databases.getDdl", @@ -994,7 +994,7 @@ ], "parameters": { "database": { - "description": "Required. The database whose schema we wish to get.", + "description": "Required. The database whose schema we wish to get. Values are of the form `projects//instances//databases/`", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", "required": true, @@ -1011,7 +1011,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a database or backup resource.\nReturns an empty policy if a database or backup exists but does not have a\npolicy set.\n\nAuthorization requires `spanner.databases.getIamPolicy` permission on\nresource.\nFor backups, authorization requires `spanner.backups.getIamPolicy`\npermission on resource.", + "description": "Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists but does not have a policy set. Authorization requires `spanner.databases.getIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.getIamPolicy` permission on resource.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}:getIamPolicy", "httpMethod": "POST", "id": "spanner.projects.instances.databases.getIamPolicy", @@ -1020,7 +1020,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The Cloud Spanner resource for which the policy is being retrieved. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for database resources.", + "description": "REQUIRED: The Cloud Spanner resource for which the policy is being retrieved. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for database resources.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", "required": true, @@ -1049,18 +1049,18 @@ ], "parameters": { "pageSize": { - "description": "Number of databases to be returned in the response. If 0 or less,\ndefaults to the server's maximum allowed page size.", + "description": "Number of databases to be returned in the response. If 0 or less, defaults to the server's maximum allowed page size.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "If non-empty, `page_token` should contain a\nnext_page_token from a\nprevious ListDatabasesResponse.", + "description": "If non-empty, `page_token` should contain a next_page_token from a previous ListDatabasesResponse.", "location": "query", "type": "string" }, "parent": { - "description": "Required. The instance whose databases should be listed.\nValues are of the form `projects/\u003cproject\u003e/instances/\u003cinstance\u003e`.", + "description": "Required. The instance whose databases should be listed. Values are of the form `projects//instances/`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, @@ -1077,7 +1077,7 @@ ] }, "restore": { - "description": "Create a new database by restoring from a completed backup. The new\ndatabase must be in the same project and in an instance with the same\ninstance configuration as the instance containing\nthe backup. The returned database long-running\noperation has a name of the format\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/databases/\u003cdatabase\u003e/operations/\u003coperation_id\u003e`,\nand can be used to track the progress of the operation, and to cancel it.\nThe metadata field type is\nRestoreDatabaseMetadata.\nThe response type\nis Database, if\nsuccessful. Cancelling the returned operation will stop the restore and\ndelete the database.\nThere can be only one database being restored into an instance at a time.\nOnce the restore operation completes, a new restore operation can be\ninitiated, without waiting for the optimize operation associated with the\nfirst restore to complete.", + "description": "Create a new database by restoring from a completed backup. The new database must be in the same project and in an instance with the same instance configuration as the instance containing the backup. The returned database long-running operation has a name of the format `projects//instances//databases//operations/`, and can be used to track the progress of the operation, and to cancel it. The metadata field type is RestoreDatabaseMetadata. The response type is Database, if successful. Cancelling the returned operation will stop the restore and delete the database. There can be only one database being restored into an instance at a time. Once the restore operation completes, a new restore operation can be initiated, without waiting for the optimize operation associated with the first restore to complete.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases:restore", "httpMethod": "POST", "id": "spanner.projects.instances.databases.restore", @@ -1086,7 +1086,7 @@ ], "parameters": { "parent": { - "description": "Required. The name of the instance in which to create the\nrestored database. This instance must be in the same project and\nhave the same instance configuration as the instance containing\nthe source backup. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e`.", + "description": "Required. The name of the instance in which to create the restored database. This instance must be in the same project and have the same instance configuration as the instance containing the source backup. Values are of the form `projects//instances/`.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+$", "required": true, @@ -1106,7 +1106,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on a database or backup resource.\nReplaces any existing policy.\n\nAuthorization requires `spanner.databases.setIamPolicy`\npermission on resource.\nFor backups, authorization requires `spanner.backups.setIamPolicy`\npermission on resource.", + "description": "Sets the access control policy on a database or backup resource. Replaces any existing policy. Authorization requires `spanner.databases.setIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.setIamPolicy` permission on resource.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}:setIamPolicy", "httpMethod": "POST", "id": "spanner.projects.instances.databases.setIamPolicy", @@ -1115,7 +1115,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The Cloud Spanner resource for which the policy is being set. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for databases resources.", + "description": "REQUIRED: The Cloud Spanner resource for which the policy is being set. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for databases resources.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", "required": true, @@ -1135,7 +1135,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that the caller has on the specified database or backup\nresource.\n\nAttempting this RPC on a non-existent Cloud Spanner database will\nresult in a NOT_FOUND error if the user has\n`spanner.databases.list` permission on the containing Cloud\nSpanner instance. Otherwise returns an empty set of permissions.\nCalling this method on a backup that does not exist will\nresult in a NOT_FOUND error if the user has\n`spanner.backups.list` permission on the containing instance.", + "description": "Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}:testIamPermissions", "httpMethod": "POST", "id": "spanner.projects.instances.databases.testIamPermissions", @@ -1144,7 +1144,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The Cloud Spanner resource for which permissions are being tested. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for database resources.", + "description": "REQUIRED: The Cloud Spanner resource for which permissions are being tested. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for database resources.", "location": "path", "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", "required": true, @@ -1164,7 +1164,7 @@ ] }, "updateDdl": { - "description": "Updates the schema of a Cloud Spanner database by\ncreating/altering/dropping tables, columns, indexes, etc. The returned\nlong-running operation will have a name of\nthe format `\u003cdatabase_name\u003e/operations/\u003coperation_id\u003e` and can be used to\ntrack execution of the schema change(s). The\nmetadata field type is\nUpdateDatabaseDdlMetadata. The operation has no response.", + "description": "Updates the schema of a Cloud Spanner database by creating/altering/dropping tables, columns, indexes, etc. The returned long-running operation will have a name of the format `/operations/` and can be used to track execution of the schema change(s). The metadata field type is UpdateDatabaseDdlMetadata. The operation has no response.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/ddl", "httpMethod": "PATCH", "id": "spanner.projects.instances.databases.updateDdl", @@ -1197,7 +1197,7 @@ "operations": { "methods": { "cancel": { - "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/operations/{operationsId}:cancel", "httpMethod": "POST", "id": "spanner.projects.instances.databases.operations.cancel", @@ -1223,7 +1223,7 @@ ] }, "delete": { - "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.", + "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/operations/{operationsId}", "httpMethod": "DELETE", "id": "spanner.projects.instances.databases.operations.delete", @@ -1249,7 +1249,7 @@ ] }, "get": { - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/operations/{operationsId}", "httpMethod": "GET", "id": "spanner.projects.instances.databases.operations.get", @@ -1275,7 +1275,7 @@ ] }, "list": { - "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/operations", "httpMethod": "GET", "id": "spanner.projects.instances.databases.operations.list", @@ -1321,7 +1321,7 @@ "sessions": { "methods": { "batchCreate": { - "description": "Creates multiple new sessions.\n\nThis API can be used to initialize a session cache on the clients.\nSee https://goo.gl/TgSFN2 for best practices on session cache management.", + "description": "Creates multiple new sessions. This API can be used to initialize a session cache on the clients. See https://goo.gl/TgSFN2 for best practices on session cache management.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions:batchCreate", "httpMethod": "POST", "id": "spanner.projects.instances.databases.sessions.batchCreate", @@ -1350,7 +1350,7 @@ ] }, "beginTransaction": { - "description": "Begins a new transaction. This step can often be skipped:\nRead, ExecuteSql and\nCommit can begin a new transaction as a\nside-effect.", + "description": "Begins a new transaction. This step can often be skipped: Read, ExecuteSql and Commit can begin a new transaction as a side-effect.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:beginTransaction", "httpMethod": "POST", "id": "spanner.projects.instances.databases.sessions.beginTransaction", @@ -1379,7 +1379,7 @@ ] }, "commit": { - "description": "Commits a transaction. The request includes the mutations to be\napplied to rows in the database.\n\n`Commit` might return an `ABORTED` error. This can occur at any time;\ncommonly, the cause is conflicts with concurrent\ntransactions. However, it can also happen for a variety of other\nreasons. If `Commit` returns `ABORTED`, the caller should re-attempt\nthe transaction from the beginning, re-using the same session.", + "description": "Commits a transaction. The request includes the mutations to be applied to rows in the database. `Commit` might return an `ABORTED` error. This can occur at any time; commonly, the cause is conflicts with concurrent transactions. However, it can also happen for a variety of other reasons. If `Commit` returns `ABORTED`, the caller should re-attempt the transaction from the beginning, re-using the same session. On very rare occasions, `Commit` might return `UNKNOWN`. This can happen, for example, if the client job experiences a 1+ hour networking failure. At that point, Cloud Spanner has lost track of the transaction outcome and we recommend that you perform another read from the database to see the state of things as they are now.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:commit", "httpMethod": "POST", "id": "spanner.projects.instances.databases.sessions.commit", @@ -1408,7 +1408,7 @@ ] }, "create": { - "description": "Creates a new session. A session can be used to perform\ntransactions that read and/or modify data in a Cloud Spanner database.\nSessions are meant to be reused for many consecutive\ntransactions.\n\nSessions can only execute one transaction at a time. To execute\nmultiple concurrent read-write/write-only transactions, create\nmultiple sessions. Note that standalone reads and queries use a\ntransaction internally, and count toward the one transaction\nlimit.\n\nActive sessions use additional server resources, so it is a good idea to\ndelete idle and unneeded sessions.\nAside from explicit deletes, Cloud Spanner may delete sessions for which no\noperations are sent for more than an hour. If a session is deleted,\nrequests to it return `NOT_FOUND`.\n\nIdle sessions can be kept alive by sending a trivial SQL query\nperiodically, e.g., `\"SELECT 1\"`.", + "description": "Creates a new session. A session can be used to perform transactions that read and/or modify data in a Cloud Spanner database. Sessions are meant to be reused for many consecutive transactions. Sessions can only execute one transaction at a time. To execute multiple concurrent read-write/write-only transactions, create multiple sessions. Note that standalone reads and queries use a transaction internally, and count toward the one transaction limit. Active sessions use additional server resources, so it is a good idea to delete idle and unneeded sessions. Aside from explicit deletes, Cloud Spanner may delete sessions for which no operations are sent for more than an hour. If a session is deleted, requests to it return `NOT_FOUND`. Idle sessions can be kept alive by sending a trivial SQL query periodically, e.g., `\"SELECT 1\"`.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions", "httpMethod": "POST", "id": "spanner.projects.instances.databases.sessions.create", @@ -1437,7 +1437,7 @@ ] }, "delete": { - "description": "Ends a session, releasing server resources associated with it. This will\nasynchronously trigger cancellation of any operations that are running with\nthis session.", + "description": "Ends a session, releasing server resources associated with it. This will asynchronously trigger cancellation of any operations that are running with this session.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}", "httpMethod": "DELETE", "id": "spanner.projects.instances.databases.sessions.delete", @@ -1463,7 +1463,7 @@ ] }, "executeBatchDml": { - "description": "Executes a batch of SQL DML statements. This method allows many statements\nto be run with lower latency than submitting them sequentially with\nExecuteSql.\n\nStatements are executed in sequential order. A request can succeed even if\na statement fails. The ExecuteBatchDmlResponse.status field in the\nresponse provides information about the statement that failed. Clients must\ninspect this field to determine whether an error occurred.\n\nExecution stops after the first failed statement; the remaining statements\nare not executed.", + "description": "Executes a batch of SQL DML statements. This method allows many statements to be run with lower latency than submitting them sequentially with ExecuteSql. Statements are executed in sequential order. A request can succeed even if a statement fails. The ExecuteBatchDmlResponse.status field in the response provides information about the statement that failed. Clients must inspect this field to determine whether an error occurred. Execution stops after the first failed statement; the remaining statements are not executed.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:executeBatchDml", "httpMethod": "POST", "id": "spanner.projects.instances.databases.sessions.executeBatchDml", @@ -1492,7 +1492,7 @@ ] }, "executeSql": { - "description": "Executes an SQL statement, returning all results in a single reply. This\nmethod cannot be used to return a result set larger than 10 MiB;\nif the query yields more data than that, the query fails with\na `FAILED_PRECONDITION` error.\n\nOperations inside read-write transactions might return `ABORTED`. If\nthis occurs, the application should restart the transaction from\nthe beginning. See Transaction for more details.\n\nLarger result sets can be fetched in streaming fashion by calling\nExecuteStreamingSql instead.", + "description": "Executes an SQL statement, returning all results in a single reply. This method cannot be used to return a result set larger than 10 MiB; if the query yields more data than that, the query fails with a `FAILED_PRECONDITION` error. Operations inside read-write transactions might return `ABORTED`. If this occurs, the application should restart the transaction from the beginning. See Transaction for more details. Larger result sets can be fetched in streaming fashion by calling ExecuteStreamingSql instead.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:executeSql", "httpMethod": "POST", "id": "spanner.projects.instances.databases.sessions.executeSql", @@ -1521,7 +1521,7 @@ ] }, "executeStreamingSql": { - "description": "Like ExecuteSql, except returns the result\nset as a stream. Unlike ExecuteSql, there\nis no limit on the size of the returned result set. However, no\nindividual row in the result set can exceed 100 MiB, and no\ncolumn value can exceed 10 MiB.", + "description": "Like ExecuteSql, except returns the result set as a stream. Unlike ExecuteSql, there is no limit on the size of the returned result set. However, no individual row in the result set can exceed 100 MiB, and no column value can exceed 10 MiB.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:executeStreamingSql", "httpMethod": "POST", "id": "spanner.projects.instances.databases.sessions.executeStreamingSql", @@ -1550,7 +1550,7 @@ ] }, "get": { - "description": "Gets a session. Returns `NOT_FOUND` if the session does not exist.\nThis is mainly useful for determining whether a session is still\nalive.", + "description": "Gets a session. Returns `NOT_FOUND` if the session does not exist. This is mainly useful for determining whether a session is still alive.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}", "httpMethod": "GET", "id": "spanner.projects.instances.databases.sessions.get", @@ -1592,18 +1592,18 @@ "type": "string" }, "filter": { - "description": "An expression for filtering the results of the request. Filter rules are\ncase insensitive. The fields eligible for filtering are:\n\n * `labels.key` where key is the name of a label\n\nSome examples of using filters are:\n\n * `labels.env:*` --\u003e The session has the label \"env\".\n * `labels.env:dev` --\u003e The session has the label \"env\" and the value of\n the label contains the string \"dev\".", + "description": "An expression for filtering the results of the request. Filter rules are case insensitive. The fields eligible for filtering are: * `labels.key` where key is the name of a label Some examples of using filters are: * `labels.env:*` --\u003e The session has the label \"env\". * `labels.env:dev` --\u003e The session has the label \"env\" and the value of the label contains the string \"dev\".", "location": "query", "type": "string" }, "pageSize": { - "description": "Number of sessions to be returned in the response. If 0 or less, defaults\nto the server's maximum allowed page size.", + "description": "Number of sessions to be returned in the response. If 0 or less, defaults to the server's maximum allowed page size.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "If non-empty, `page_token` should contain a\nnext_page_token from a previous\nListSessionsResponse.", + "description": "If non-empty, `page_token` should contain a next_page_token from a previous ListSessionsResponse.", "location": "query", "type": "string" } @@ -1618,7 +1618,7 @@ ] }, "partitionQuery": { - "description": "Creates a set of partition tokens that can be used to execute a query\noperation in parallel. Each of the returned partition tokens can be used\nby ExecuteStreamingSql to specify a subset\nof the query result to read. The same session and read-only transaction\nmust be used by the PartitionQueryRequest used to create the\npartition tokens and the ExecuteSqlRequests that use the partition tokens.\n\nPartition tokens become invalid when the session used to create them\nis deleted, is idle for too long, begins a new transaction, or becomes too\nold. When any of these happen, it is not possible to resume the query, and\nthe whole operation must be restarted from the beginning.", + "description": "Creates a set of partition tokens that can be used to execute a query operation in parallel. Each of the returned partition tokens can be used by ExecuteStreamingSql to specify a subset of the query result to read. The same session and read-only transaction must be used by the PartitionQueryRequest used to create the partition tokens and the ExecuteSqlRequests that use the partition tokens. Partition tokens become invalid when the session used to create them is deleted, is idle for too long, begins a new transaction, or becomes too old. When any of these happen, it is not possible to resume the query, and the whole operation must be restarted from the beginning.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:partitionQuery", "httpMethod": "POST", "id": "spanner.projects.instances.databases.sessions.partitionQuery", @@ -1647,7 +1647,7 @@ ] }, "partitionRead": { - "description": "Creates a set of partition tokens that can be used to execute a read\noperation in parallel. Each of the returned partition tokens can be used\nby StreamingRead to specify a subset of the read\nresult to read. The same session and read-only transaction must be used by\nthe PartitionReadRequest used to create the partition tokens and the\nReadRequests that use the partition tokens. There are no ordering\nguarantees on rows returned among the returned partition tokens, or even\nwithin each individual StreamingRead call issued with a partition_token.\n\nPartition tokens become invalid when the session used to create them\nis deleted, is idle for too long, begins a new transaction, or becomes too\nold. When any of these happen, it is not possible to resume the read, and\nthe whole operation must be restarted from the beginning.", + "description": "Creates a set of partition tokens that can be used to execute a read operation in parallel. Each of the returned partition tokens can be used by StreamingRead to specify a subset of the read result to read. The same session and read-only transaction must be used by the PartitionReadRequest used to create the partition tokens and the ReadRequests that use the partition tokens. There are no ordering guarantees on rows returned among the returned partition tokens, or even within each individual StreamingRead call issued with a partition_token. Partition tokens become invalid when the session used to create them is deleted, is idle for too long, begins a new transaction, or becomes too old. When any of these happen, it is not possible to resume the read, and the whole operation must be restarted from the beginning.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:partitionRead", "httpMethod": "POST", "id": "spanner.projects.instances.databases.sessions.partitionRead", @@ -1676,7 +1676,7 @@ ] }, "read": { - "description": "Reads rows from the database using key lookups and scans, as a\nsimple key/value style alternative to\nExecuteSql. This method cannot be used to\nreturn a result set larger than 10 MiB; if the read matches more\ndata than that, the read fails with a `FAILED_PRECONDITION`\nerror.\n\nReads inside read-write transactions might return `ABORTED`. If\nthis occurs, the application should restart the transaction from\nthe beginning. See Transaction for more details.\n\nLarger result sets can be yielded in streaming fashion by calling\nStreamingRead instead.", + "description": "Reads rows from the database using key lookups and scans, as a simple key/value style alternative to ExecuteSql. This method cannot be used to return a result set larger than 10 MiB; if the read matches more data than that, the read fails with a `FAILED_PRECONDITION` error. Reads inside read-write transactions might return `ABORTED`. If this occurs, the application should restart the transaction from the beginning. See Transaction for more details. Larger result sets can be yielded in streaming fashion by calling StreamingRead instead.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:read", "httpMethod": "POST", "id": "spanner.projects.instances.databases.sessions.read", @@ -1705,7 +1705,7 @@ ] }, "rollback": { - "description": "Rolls back a transaction, releasing any locks it holds. It is a good\nidea to call this for any transaction that includes one or more\nRead or ExecuteSql requests and\nultimately decides not to commit.\n\n`Rollback` returns `OK` if it successfully aborts the transaction, the\ntransaction was already aborted, or the transaction is not\nfound. `Rollback` never returns `ABORTED`.", + "description": "Rolls back a transaction, releasing any locks it holds. It is a good idea to call this for any transaction that includes one or more Read or ExecuteSql requests and ultimately decides not to commit. `Rollback` returns `OK` if it successfully aborts the transaction, the transaction was already aborted, or the transaction is not found. `Rollback` never returns `ABORTED`.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:rollback", "httpMethod": "POST", "id": "spanner.projects.instances.databases.sessions.rollback", @@ -1734,7 +1734,7 @@ ] }, "streamingRead": { - "description": "Like Read, except returns the result set as a\nstream. Unlike Read, there is no limit on the\nsize of the returned result set. However, no individual row in\nthe result set can exceed 100 MiB, and no column value can exceed\n10 MiB.", + "description": "Like Read, except returns the result set as a stream. Unlike Read, there is no limit on the size of the returned result set. However, no individual row in the result set can exceed 100 MiB, and no column value can exceed 10 MiB.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:streamingRead", "httpMethod": "POST", "id": "spanner.projects.instances.databases.sessions.streamingRead", @@ -1769,7 +1769,7 @@ "operations": { "methods": { "cancel": { - "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/operations/{operationsId}:cancel", "httpMethod": "POST", "id": "spanner.projects.instances.operations.cancel", @@ -1795,7 +1795,7 @@ ] }, "delete": { - "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.", + "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/operations/{operationsId}", "httpMethod": "DELETE", "id": "spanner.projects.instances.operations.delete", @@ -1821,7 +1821,7 @@ ] }, "get": { - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/operations/{operationsId}", "httpMethod": "GET", "id": "spanner.projects.instances.operations.get", @@ -1847,7 +1847,7 @@ ] }, "list": { - "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/operations", "httpMethod": "GET", "id": "spanner.projects.instances.operations.list", @@ -1895,7 +1895,7 @@ } } }, - "revision": "20200508", + "revision": "20200905", "rootUrl": "https://spanner.googleapis.com/", "schemas": { "Backup": { @@ -1903,33 +1903,36 @@ "id": "Backup", "properties": { "createTime": { - "description": "Output only. The backup will contain an externally consistent\ncopy of the database at the timestamp specified by\n`create_time`. `create_time` is approximately the time the\nCreateBackup request is received.", + "description": "Output only. The backup will contain an externally consistent copy of the database at the timestamp specified by `create_time`. `create_time` is approximately the time the CreateBackup request is received.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "database": { - "description": "Required for the CreateBackup operation.\nName of the database from which this backup was\ncreated. This needs to be in the same instance as the backup.\nValues are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/databases/\u003cdatabase\u003e`.", + "description": "Required for the CreateBackup operation. Name of the database from which this backup was created. This needs to be in the same instance as the backup. Values are of the form `projects//instances//databases/`.", "type": "string" }, "expireTime": { - "description": "Required for the CreateBackup\noperation. The expiration time of the backup, with microseconds\ngranularity that must be at least 6 hours and at most 366 days\nfrom the time the CreateBackup request is processed. Once the `expire_time`\nhas passed, the backup is eligible to be automatically deleted by Cloud\nSpanner to free the resources used by the backup.", + "description": "Required for the CreateBackup operation. The expiration time of the backup, with microseconds granularity that must be at least 6 hours and at most 366 days from the time the CreateBackup request is processed. Once the `expire_time` has passed, the backup is eligible to be automatically deleted by Cloud Spanner to free the resources used by the backup.", "format": "google-datetime", "type": "string" }, "name": { - "description": "Output only for the CreateBackup operation.\nRequired for the UpdateBackup operation.\n\nA globally unique identifier for the backup which cannot be\nchanged. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/backups/a-z*[a-z0-9]`\nThe final segment of the name must be between 2 and 60 characters\nin length.\n\nThe backup is stored in the location(s) specified in the instance\nconfiguration of the instance containing the backup, identified\nby the prefix of the backup name of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e`.", + "description": "Output only for the CreateBackup operation. Required for the UpdateBackup operation. A globally unique identifier for the backup which cannot be changed. Values are of the form `projects//instances//backups/a-z*[a-z0-9]` The final segment of the name must be between 2 and 60 characters in length. The backup is stored in the location(s) specified in the instance configuration of the instance containing the backup, identified by the prefix of the backup name of the form `projects//instances/`.", "type": "string" }, "referencingDatabases": { - "description": "Output only. The names of the restored databases that reference the backup.\nThe database names are of\nthe form `projects/\u003cproject\u003e/instances/\u003cinstance\u003e/databases/\u003cdatabase\u003e`.\nReferencing databases may exist in different instances. The existence of\nany referencing database prevents the backup from being deleted. When a\nrestored database from the backup enters the `READY` state, the reference\nto the backup is removed.", + "description": "Output only. The names of the restored databases that reference the backup. The database names are of the form `projects//instances//databases/`. Referencing databases may exist in different instances. The existence of any referencing database prevents the backup from being deleted. When a restored database from the backup enters the `READY` state, the reference to the backup is removed.", "items": { "type": "string" }, + "readOnly": true, "type": "array" }, "sizeBytes": { "description": "Output only. Size of the backup in bytes.", "format": "int64", + "readOnly": true, "type": "string" }, "state": { @@ -1941,9 +1944,10 @@ ], "enumDescriptions": [ "Not specified.", - "The pending backup is still being created. Operations on the\nbackup may fail with `FAILED_PRECONDITION` in this state.", + "The pending backup is still being created. Operations on the backup may fail with `FAILED_PRECONDITION` in this state.", "The backup is complete and ready for use." ], + "readOnly": true, "type": "string" } }, @@ -1958,7 +1962,7 @@ "type": "string" }, "createTime": { - "description": "The backup contains an externally consistent copy of `source_database` at\nthe timestamp specified by `create_time`.", + "description": "The backup contains an externally consistent copy of `source_database` at the timestamp specified by `create_time`.", "format": "google-datetime", "type": "string" }, @@ -1974,7 +1978,7 @@ "id": "BatchCreateSessionsRequest", "properties": { "sessionCount": { - "description": "Required. The number of sessions to be created in this batch call.\nThe API may return fewer than the requested number of sessions. If a\nspecific number of sessions are desired, the client can make additional\ncalls to BatchCreateSessions (adjusting\nsession_count as necessary).", + "description": "Required. The number of sessions to be created in this batch call. The API may return fewer than the requested number of sessions. If a specific number of sessions are desired, the client can make additional calls to BatchCreateSessions (adjusting session_count as necessary).", "format": "int32", "type": "integer" }, @@ -2014,26 +2018,30 @@ "description": "Associates `members` with a `role`.", "id": "Binding", "properties": { + "bindingId": { + "description": "A client-specified ID for this binding. Expected to be globally unique to support the internal bindings-by-ID API.", + "type": "string" + }, "condition": { "$ref": "Expr", - "description": "The condition that is associated with this binding.\nNOTE: An unsatisfied condition will not allow user access via current\nbinding. Different bindings, including their conditions, are examined\nindependently." + "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the members in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." }, "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@example.com` .\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a user that has been recently deleted. For\n example, `alice@example.com?uid=123456789012345678901`. If the user is\n recovered, this value reverts to `user:{emailid}` and the recovered user\n retains the role in the binding.\n\n* `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus\n unique identifier) representing a service account that has been recently\n deleted. For example,\n `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.\n If the service account is undeleted, this value reverts to\n `serviceAccount:{emailid}` and the undeleted service account retains the\n role in the binding.\n\n* `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a Google group that has been recently\n deleted. For example, `admins@example.com?uid=123456789012345678901`. If\n the group is recovered, this value reverts to `group:{emailid}` and the\n recovered group retains the role in the binding.\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", "items": { "type": "string" }, "type": "array" }, "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.", + "description": "Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.", "type": "string" } }, "type": "object" }, "ChildLink": { - "description": "Metadata associated with a parent-child relationship appearing in a\nPlanNode.", + "description": "Metadata associated with a parent-child relationship appearing in a PlanNode.", "id": "ChildLink", "properties": { "childIndex": { @@ -2042,11 +2050,11 @@ "type": "integer" }, "type": { - "description": "The type of the link. For example, in Hash Joins this could be used to\ndistinguish between the build child and the probe child, or in the case\nof the child being an output variable, to represent the tag associated\nwith the output variable.", + "description": "The type of the link. For example, in Hash Joins this could be used to distinguish between the build child and the probe child, or in the case of the child being an output variable, to represent the tag associated with the output variable.", "type": "string" }, "variable": { - "description": "Only present if the child node is SCALAR and corresponds\nto an output variable of the parent node. The field carries the name of\nthe output variable.\nFor example, a `TableScan` operator that reads rows from a table will\nhave child links to the `SCALAR` nodes representing the output variables\ncreated for each column that is read by the operator. The corresponding\n`variable` fields will be set to the variable names assigned to the\ncolumns.", + "description": "Only present if the child node is SCALAR and corresponds to an output variable of the parent node. The field carries the name of the output variable. For example, a `TableScan` operator that reads rows from a table will have child links to the `SCALAR` nodes representing the output variables created for each column that is read by the operator. The corresponding `variable` fields will be set to the variable names assigned to the columns.", "type": "string" } }, @@ -2057,7 +2065,7 @@ "id": "CommitRequest", "properties": { "mutations": { - "description": "The mutations to be executed when this transaction commits. All\nmutations are applied atomically, in the order they appear in\nthis list.", + "description": "The mutations to be executed when this transaction commits. All mutations are applied atomically, in the order they appear in this list.", "items": { "$ref": "Mutation" }, @@ -2065,7 +2073,7 @@ }, "singleUseTransaction": { "$ref": "TransactionOptions", - "description": "Execute mutations in a temporary transaction. Note that unlike\ncommit of a previously-started transaction, commit with a\ntemporary transaction is non-idempotent. That is, if the\n`CommitRequest` is sent to Cloud Spanner more than once (for\ninstance, due to retries in the application, or in the\ntransport library), it is possible that the mutations are\nexecuted more than once. If this is undesirable, use\nBeginTransaction and\nCommit instead." + "description": "Execute mutations in a temporary transaction. Note that unlike commit of a previously-started transaction, commit with a temporary transaction is non-idempotent. That is, if the `CommitRequest` is sent to Cloud Spanner more than once (for instance, due to retries in the application, or in the transport library), it is possible that the mutations are executed more than once. If this is undesirable, use BeginTransaction and Commit instead." }, "transactionId": { "description": "Commit a previously-started transaction.", @@ -2088,11 +2096,11 @@ "type": "object" }, "CreateBackupMetadata": { - "description": "Metadata type for the operation returned by\nCreateBackup.", + "description": "Metadata type for the operation returned by CreateBackup.", "id": "CreateBackupMetadata", "properties": { "cancelTime": { - "description": "The time at which cancellation of this operation was received.\nOperations.CancelOperation\nstarts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not guaranteed.\nClients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + "description": "The time at which cancellation of this operation was received. Operations.CancelOperation starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", "format": "google-datetime", "type": "string" }, @@ -2106,13 +2114,13 @@ }, "progress": { "$ref": "OperationProgress", - "description": "The progress of the\nCreateBackup operation." + "description": "The progress of the CreateBackup operation." } }, "type": "object" }, "CreateDatabaseMetadata": { - "description": "Metadata type for the operation returned by\nCreateDatabase.", + "description": "Metadata type for the operation returned by CreateDatabase.", "id": "CreateDatabaseMetadata", "properties": { "database": { @@ -2127,11 +2135,11 @@ "id": "CreateDatabaseRequest", "properties": { "createStatement": { - "description": "Required. A `CREATE DATABASE` statement, which specifies the ID of the\nnew database. The database ID must conform to the regular expression\n`a-z*[a-z0-9]` and be between 2 and 30 characters in length.\nIf the database ID is a reserved word or if it contains a hyphen, the\ndatabase ID must be enclosed in backticks (`` ` ``).", + "description": "Required. A `CREATE DATABASE` statement, which specifies the ID of the new database. The database ID must conform to the regular expression `a-z*[a-z0-9]` and be between 2 and 30 characters in length. If the database ID is a reserved word or if it contains a hyphen, the database ID must be enclosed in backticks (`` ` ``).", "type": "string" }, "extraStatements": { - "description": "Optional. A list of DDL statements to run inside the newly created\ndatabase. Statements can create tables, indexes, etc. These\nstatements execute atomically with the creation of the database:\nif there is an error in any statement, the database is not created.", + "description": "Optional. A list of DDL statements to run inside the newly created database. Statements can create tables, indexes, etc. These statements execute atomically with the creation of the database: if there is an error in any statement, the database is not created.", "items": { "type": "string" }, @@ -2141,11 +2149,11 @@ "type": "object" }, "CreateInstanceMetadata": { - "description": "Metadata type for the operation returned by\nCreateInstance.", + "description": "Metadata type for the operation returned by CreateInstance.", "id": "CreateInstanceMetadata", "properties": { "cancelTime": { - "description": "The time at which this operation was cancelled. If set, this operation is\nin the process of undoing itself (which is guaranteed to succeed) and\ncannot be cancelled again.", + "description": "The time at which this operation was cancelled. If set, this operation is in the process of undoing itself (which is guaranteed to succeed) and cannot be cancelled again.", "format": "google-datetime", "type": "string" }, @@ -2159,7 +2167,7 @@ "description": "The instance being created." }, "startTime": { - "description": "The time at which the\nCreateInstance request was\nreceived.", + "description": "The time at which the CreateInstance request was received.", "format": "google-datetime", "type": "string" } @@ -2172,10 +2180,10 @@ "properties": { "instance": { "$ref": "Instance", - "description": "Required. The instance to create. The name may be omitted, but if\nspecified must be `\u003cparent\u003e/instances/\u003cinstance_id\u003e`." + "description": "Required. The instance to create. The name may be omitted, but if specified must be `/instances/`." }, "instanceId": { - "description": "Required. The ID of the instance to create. Valid identifiers are of the\nform `a-z*[a-z0-9]` and must be between 2 and 64 characters in\nlength.", + "description": "Required. The ID of the instance to create. Valid identifiers are of the form `a-z*[a-z0-9]` and must be between 2 and 64 characters in length.", "type": "string" } }, @@ -2187,7 +2195,7 @@ "properties": { "session": { "$ref": "Session", - "description": "The session to create." + "description": "Required. The session to create." } }, "type": "object" @@ -2199,15 +2207,17 @@ "createTime": { "description": "Output only. If exists, the time at which the database creation started.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "name": { - "description": "Required. The name of the database. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/databases/\u003cdatabase\u003e`,\nwhere `\u003cdatabase\u003e` is as specified in the `CREATE DATABASE`\nstatement. This name can be passed to other API methods to\nidentify the database.", + "description": "Required. The name of the database. Values are of the form `projects//instances//databases/`, where `` is as specified in the `CREATE DATABASE` statement. This name can be passed to other API methods to identify the database.", "type": "string" }, "restoreInfo": { "$ref": "RestoreInfo", - "description": "Output only. Applicable only for restored databases. Contains information\nabout the restore source." + "description": "Output only. Applicable only for restored databases. Contains information about the restore source.", + "readOnly": true }, "state": { "description": "Output only. The current database state.", @@ -2219,10 +2229,11 @@ ], "enumDescriptions": [ "Not specified.", - "The database is still being created. Operations on the database may fail\nwith `FAILED_PRECONDITION` in this state.", + "The database is still being created. Operations on the database may fail with `FAILED_PRECONDITION` in this state.", "The database is fully created and ready for use.", - "The database is fully created and ready for use, but is still\nbeing optimized for performance and cannot handle full load.\n\nIn this state, the database still references the backup\nit was restore from, preventing the backup\nfrom being deleted. When optimizations are complete, the full performance\nof the database will be restored, and the database will transition to\n`READY` state." + "The database is fully created and ready for use, but is still being optimized for performance and cannot handle full load. In this state, the database still references the backup it was restore from, preventing the backup from being deleted. When optimizations are complete, the full performance of the database will be restored, and the database will transition to `READY` state." ], + "readOnly": true, "type": "string" } }, @@ -2234,7 +2245,7 @@ "properties": { "keySet": { "$ref": "KeySet", - "description": "Required. The primary keys of the rows within table to delete. The\nprimary keys must be specified in the order in which they appear in the\n`PRIMARY KEY()` clause of the table's equivalent DDL statement (the DDL\nstatement used to create the table).\nDelete is idempotent. The transaction will succeed even if some or all\nrows do not exist." + "description": "Required. The primary keys of the rows within table to delete. The primary keys must be specified in the order in which they appear in the `PRIMARY KEY()` clause of the table's equivalent DDL statement (the DDL statement used to create the table). Delete is idempotent. The transaction will succeed even if some or all rows do not exist." }, "table": { "description": "Required. The table whose rows will be deleted.", @@ -2244,7 +2255,7 @@ "type": "object" }, "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", "properties": {}, "type": "object" @@ -2254,12 +2265,12 @@ "id": "ExecuteBatchDmlRequest", "properties": { "seqno": { - "description": "Required. A per-transaction sequence number used to identify this request. This field\nmakes each request idempotent such that if the request is received multiple\ntimes, at most one will succeed.\n\nThe sequence number must be monotonically increasing within the\ntransaction. If a request arrives for the first time with an out-of-order\nsequence number, the transaction may be aborted. Replays of previously\nhandled requests will yield the same response as the first execution.", + "description": "Required. A per-transaction sequence number used to identify this request. This field makes each request idempotent such that if the request is received multiple times, at most one will succeed. The sequence number must be monotonically increasing within the transaction. If a request arrives for the first time with an out-of-order sequence number, the transaction may be aborted. Replays of previously handled requests will yield the same response as the first execution.", "format": "int64", "type": "string" }, "statements": { - "description": "Required. The list of statements to execute in this batch. Statements are executed\nserially, such that the effects of statement `i` are visible to statement\n`i+1`. Each statement must be a DML statement. Execution stops at the\nfirst failed statement; the remaining statements are not executed.\n\nCallers must provide at least one statement.", + "description": "Required. The list of statements to execute in this batch. Statements are executed serially, such that the effects of statement `i` are visible to statement `i+1`. Each statement must be a DML statement. Execution stops at the first failed statement; the remaining statements are not executed. Callers must provide at least one statement.", "items": { "$ref": "Statement" }, @@ -2267,17 +2278,17 @@ }, "transaction": { "$ref": "TransactionSelector", - "description": "Required. The transaction to use. Must be a read-write transaction.\n\nTo protect against replays, single-use transactions are not supported. The\ncaller must either supply an existing transaction ID or begin a new\ntransaction." + "description": "Required. The transaction to use. Must be a read-write transaction. To protect against replays, single-use transactions are not supported. The caller must either supply an existing transaction ID or begin a new transaction." } }, "type": "object" }, "ExecuteBatchDmlResponse": { - "description": "The response for ExecuteBatchDml. Contains a list\nof ResultSet messages, one for each DML statement that has successfully\nexecuted, in the same order as the statements in the request. If a statement\nfails, the status in the response body identifies the cause of the failure.\n\nTo check for DML statements that failed, use the following approach:\n\n1. Check the status in the response message. The google.rpc.Code enum\n value `OK` indicates that all statements were executed successfully.\n2. If the status was not `OK`, check the number of result sets in the\n response. If the response contains `N` ResultSet messages, then\n statement `N+1` in the request failed.\n\nExample 1:\n\n* Request: 5 DML statements, all executed successfully.\n* Response: 5 ResultSet messages, with the status `OK`.\n\nExample 2:\n\n* Request: 5 DML statements. The third statement has a syntax error.\n* Response: 2 ResultSet messages, and a syntax error (`INVALID_ARGUMENT`)\n status. The number of ResultSet messages indicates that the third\n statement failed, and the fourth and fifth statements were not executed.", + "description": "The response for ExecuteBatchDml. Contains a list of ResultSet messages, one for each DML statement that has successfully executed, in the same order as the statements in the request. If a statement fails, the status in the response body identifies the cause of the failure. To check for DML statements that failed, use the following approach: 1. Check the status in the response message. The google.rpc.Code enum value `OK` indicates that all statements were executed successfully. 2. If the status was not `OK`, check the number of result sets in the response. If the response contains `N` ResultSet messages, then statement `N+1` in the request failed. Example 1: * Request: 5 DML statements, all executed successfully. * Response: 5 ResultSet messages, with the status `OK`. Example 2: * Request: 5 DML statements. The third statement has a syntax error. * Response: 2 ResultSet messages, and a syntax error (`INVALID_ARGUMENT`) status. The number of ResultSet messages indicates that the third statement failed, and the fourth and fifth statements were not executed.", "id": "ExecuteBatchDmlResponse", "properties": { "resultSets": { - "description": "One ResultSet for each statement in the request that ran successfully,\nin the same order as the statements in the request. Each ResultSet does\nnot contain any rows. The ResultSetStats in each ResultSet contain\nthe number of rows modified by the statement.\n\nOnly the first ResultSet in the response contains valid\nResultSetMetadata.", + "description": "One ResultSet for each statement in the request that ran successfully, in the same order as the statements in the request. Each ResultSet does not contain any rows. The ResultSetStats in each ResultSet contain the number of rows modified by the statement. Only the first ResultSet in the response contains valid ResultSetMetadata.", "items": { "$ref": "ResultSet" }, @@ -2285,20 +2296,20 @@ }, "status": { "$ref": "Status", - "description": "If all DML statements are executed successfully, the status is `OK`.\nOtherwise, the error status of the first failed statement." + "description": "If all DML statements are executed successfully, the status is `OK`. Otherwise, the error status of the first failed statement." } }, "type": "object" }, "ExecuteSqlRequest": { - "description": "The request for ExecuteSql and\nExecuteStreamingSql.", + "description": "The request for ExecuteSql and ExecuteStreamingSql.", "id": "ExecuteSqlRequest", "properties": { "paramTypes": { "additionalProperties": { "$ref": "Type" }, - "description": "It is not always possible for Cloud Spanner to infer the right SQL type\nfrom a JSON value. For example, values of type `BYTES` and values\nof type `STRING` both appear in params as JSON strings.\n\nIn these cases, `param_types` can be used to specify the exact\nSQL type for some or all of the SQL statement parameters. See the\ndefinition of Type for more information\nabout SQL types.", + "description": "It is not always possible for Cloud Spanner to infer the right SQL type from a JSON value. For example, values of type `BYTES` and values of type `STRING` both appear in params as JSON strings. In these cases, `param_types` can be used to specify the exact SQL type for some or all of the SQL statement parameters. See the definition of Type for more information about SQL types.", "type": "object" }, "params": { @@ -2306,16 +2317,16 @@ "description": "Properties of the object.", "type": "any" }, - "description": "Parameter names and values that bind to placeholders in the SQL string.\n\nA parameter placeholder consists of the `@` character followed by the\nparameter name (for example, `@firstName`). Parameter names can contain\nletters, numbers, and underscores.\n\nParameters can appear anywhere that a literal value is expected. The same\nparameter name can be used more than once, for example:\n\n`\"WHERE id \u003e @msg_id AND id \u003c @msg_id + 100\"`\n\nIt is an error to execute a SQL statement with unbound parameters.", + "description": "Parameter names and values that bind to placeholders in the SQL string. A parameter placeholder consists of the `@` character followed by the parameter name (for example, `@firstName`). Parameter names must conform to the naming requirements of identifiers as specified at https://cloud.google.com/spanner/docs/lexical#identifiers. Parameters can appear anywhere that a literal value is expected. The same parameter name can be used more than once, for example: `\"WHERE id \u003e @msg_id AND id \u003c @msg_id + 100\"` It is an error to execute a SQL statement with unbound parameters.", "type": "object" }, "partitionToken": { - "description": "If present, results will be restricted to the specified partition\npreviously created using PartitionQuery(). There must be an exact\nmatch for the values of fields common to this message and the\nPartitionQueryRequest message used to create this partition_token.", + "description": "If present, results will be restricted to the specified partition previously created using PartitionQuery(). There must be an exact match for the values of fields common to this message and the PartitionQueryRequest message used to create this partition_token.", "format": "byte", "type": "string" }, "queryMode": { - "description": "Used to control the amount of debugging information returned in\nResultSetStats. If partition_token is set, query_mode can only\nbe set to QueryMode.NORMAL.", + "description": "Used to control the amount of debugging information returned in ResultSetStats. If partition_token is set, query_mode can only be set to QueryMode.NORMAL.", "enum": [ "NORMAL", "PLAN", @@ -2323,8 +2334,8 @@ ], "enumDescriptions": [ "The default mode. Only the statement results are returned.", - "This mode returns only the query plan, without any results or\nexecution statistics information.", - "This mode returns both the query plan and the execution statistics along\nwith the results." + "This mode returns only the query plan, without any results or execution statistics information.", + "This mode returns both the query plan and the execution statistics along with the results." ], "type": "string" }, @@ -2333,12 +2344,12 @@ "description": "Query optimizer configuration to use for the given query." }, "resumeToken": { - "description": "If this request is resuming a previously interrupted SQL statement\nexecution, `resume_token` should be copied from the last\nPartialResultSet yielded before the interruption. Doing this\nenables the new SQL statement execution to resume where the last one left\noff. The rest of the request parameters must exactly match the\nrequest that yielded this token.", + "description": "If this request is resuming a previously interrupted SQL statement execution, `resume_token` should be copied from the last PartialResultSet yielded before the interruption. Doing this enables the new SQL statement execution to resume where the last one left off. The rest of the request parameters must exactly match the request that yielded this token.", "format": "byte", "type": "string" }, "seqno": { - "description": "A per-transaction sequence number used to identify this request. This field\nmakes each request idempotent such that if the request is received multiple\ntimes, at most one will succeed.\n\nThe sequence number must be monotonically increasing within the\ntransaction. If a request arrives for the first time with an out-of-order\nsequence number, the transaction may be aborted. Replays of previously\nhandled requests will yield the same response as the first execution.\n\nRequired for DML statements. Ignored for queries.", + "description": "A per-transaction sequence number used to identify this request. This field makes each request idempotent such that if the request is received multiple times, at most one will succeed. The sequence number must be monotonically increasing within the transaction. If a request arrives for the first time with an out-of-order sequence number, the transaction may be aborted. Replays of previously handled requests will yield the same response as the first execution. Required for DML statements. Ignored for queries.", "format": "int64", "type": "string" }, @@ -2348,29 +2359,29 @@ }, "transaction": { "$ref": "TransactionSelector", - "description": "The transaction to use.\n\nFor queries, if none is provided, the default is a temporary read-only\ntransaction with strong concurrency.\n\nStandard DML statements require a read-write transaction. To protect\nagainst replays, single-use transactions are not supported. The caller\nmust either supply an existing transaction ID or begin a new transaction.\n\nPartitioned DML requires an existing Partitioned DML transaction ID." + "description": "The transaction to use. For queries, if none is provided, the default is a temporary read-only transaction with strong concurrency. Standard DML statements require a read-write transaction. To protect against replays, single-use transactions are not supported. The caller must either supply an existing transaction ID or begin a new transaction. Partitioned DML requires an existing Partitioned DML transaction ID." } }, "type": "object" }, "Expr": { - "description": "Represents a textual expression in the Common Expression Language (CEL)\nsyntax. CEL is a C-like expression language. The syntax and semantics of CEL\nare documented at https://github.com/google/cel-spec.\n\nExample (Comparison):\n\n title: \"Summary size limit\"\n description: \"Determines if a summary is less than 100 chars\"\n expression: \"document.summary.size() \u003c 100\"\n\nExample (Equality):\n\n title: \"Requestor is owner\"\n description: \"Determines if requestor is the document owner\"\n expression: \"document.owner == request.auth.claims.email\"\n\nExample (Logic):\n\n title: \"Public documents\"\n description: \"Determine whether the document should be publicly visible\"\n expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\"\n\nExample (Data Manipulation):\n\n title: \"Notification string\"\n description: \"Create a notification string with a timestamp.\"\n expression: \"'New message received at ' + string(document.create_time)\"\n\nThe exact variables and functions that may be referenced within an expression\nare determined by the service that evaluates it. See the service\ndocumentation for additional information.", + "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() \u003c 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", "id": "Expr", "properties": { "description": { - "description": "Optional. Description of the expression. This is a longer text which\ndescribes the expression, e.g. when hovered over it in a UI.", + "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", "type": "string" }, "expression": { - "description": "Textual representation of an expression in Common Expression Language\nsyntax.", + "description": "Textual representation of an expression in Common Expression Language syntax.", "type": "string" }, "location": { - "description": "Optional. String indicating the location of the expression for error\nreporting, e.g. a file name and a position in the file.", + "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", "type": "string" }, "title": { - "description": "Optional. Title for the expression, i.e. a short string describing\nits purpose. This can be used e.g. in UIs which allow to enter the\nexpression.", + "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", "type": "string" } }, @@ -2381,7 +2392,7 @@ "id": "Field", "properties": { "name": { - "description": "The name of the field. For reads, this is the column name. For\nSQL queries, it is the column alias (e.g., `\"Word\"` in the\nquery `\"SELECT 'hello' AS Word\"`), or the column name (e.g.,\n`\"ColName\"` in the query `\"SELECT ColName FROM Table\"`). Some\ncolumns might have an empty name (e.g., !\"SELECT\nUPPER(ColName)\"`). Note that a query result can contain\nmultiple fields with the same name.", + "description": "The name of the field. For reads, this is the column name. For SQL queries, it is the column alias (e.g., `\"Word\"` in the query `\"SELECT 'hello' AS Word\"`), or the column name (e.g., `\"ColName\"` in the query `\"SELECT ColName FROM Table\"`). Some columns might have an empty name (e.g., !\"SELECT UPPER(ColName)\"`). Note that a query result can contain multiple fields with the same name.", "type": "string" }, "type": { @@ -2396,7 +2407,7 @@ "id": "GetDatabaseDdlResponse", "properties": { "statements": { - "description": "A list of formatted DDL statements defining the schema of the database\nspecified in the request.", + "description": "A list of formatted DDL statements defining the schema of the database specified in the request.", "items": { "type": "string" }, @@ -2411,7 +2422,7 @@ "properties": { "options": { "$ref": "GetPolicyOptions", - "description": "OPTIONAL: A `GetPolicyOptions` object for specifying options to\n`GetIamPolicy`." + "description": "OPTIONAL: A `GetPolicyOptions` object for specifying options to `GetIamPolicy`." } }, "type": "object" @@ -2421,7 +2432,7 @@ "id": "GetPolicyOptions", "properties": { "requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.", + "description": "Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } @@ -2433,11 +2444,11 @@ "id": "Instance", "properties": { "config": { - "description": "Required. The name of the instance's configuration. Values are of the form\n`projects/\u003cproject\u003e/instanceConfigs/\u003cconfiguration\u003e`. See\nalso InstanceConfig and\nListInstanceConfigs.", + "description": "Required. The name of the instance's configuration. Values are of the form `projects//instanceConfigs/`. See also InstanceConfig and ListInstanceConfigs.", "type": "string" }, "displayName": { - "description": "Required. The descriptive name for this instance as it appears in UIs.\nMust be unique per project and between 4 and 30 characters in length.", + "description": "Required. The descriptive name for this instance as it appears in UIs. Must be unique per project and between 4 and 30 characters in length.", "type": "string" }, "endpointUris": { @@ -2451,20 +2462,20 @@ "additionalProperties": { "type": "string" }, - "description": "Cloud Labels are a flexible and lightweight mechanism for organizing cloud\nresources into groups that reflect a customer's organizational needs and\ndeployment strategies. Cloud Labels can be used to filter collections of\nresources. They can be used to control how resource metrics are aggregated.\nAnd they can be used as arguments to policy management rules (e.g. route,\nfirewall, load balancing, etc.).\n\n * Label keys must be between 1 and 63 characters long and must conform to\n the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.\n * Label values must be between 0 and 63 characters long and must conform\n to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.\n * No more than 64 labels can be associated with a given resource.\n\nSee https://goo.gl/xmQnxf for more information on and examples of labels.\n\nIf you plan to use labels in your own code, please note that additional\ncharacters may be allowed in the future. And so you are advised to use an\ninternal label representation, such as JSON, which doesn't rely upon\nspecific characters being disallowed. For example, representing labels\nas the string: name + \"_\" + value would prove problematic if we were to\nallow \"_\" in a future release.", + "description": "Cloud Labels are a flexible and lightweight mechanism for organizing cloud resources into groups that reflect a customer's organizational needs and deployment strategies. Cloud Labels can be used to filter collections of resources. They can be used to control how resource metrics are aggregated. And they can be used as arguments to policy management rules (e.g. route, firewall, load balancing, etc.). * Label keys must be between 1 and 63 characters long and must conform to the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. * Label values must be between 0 and 63 characters long and must conform to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. * No more than 64 labels can be associated with a given resource. See https://goo.gl/xmQnxf for more information on and examples of labels. If you plan to use labels in your own code, please note that additional characters may be allowed in the future. And so you are advised to use an internal label representation, such as JSON, which doesn't rely upon specific characters being disallowed. For example, representing labels as the string: name + \"_\" + value would prove problematic if we were to allow \"_\" in a future release.", "type": "object" }, "name": { - "description": "Required. A unique identifier for the instance, which cannot be changed\nafter the instance is created. Values are of the form\n`projects/\u003cproject\u003e/instances/a-z*[a-z0-9]`. The final\nsegment of the name must be between 2 and 64 characters in length.", + "description": "Required. A unique identifier for the instance, which cannot be changed after the instance is created. Values are of the form `projects//instances/a-z*[a-z0-9]`. The final segment of the name must be between 2 and 64 characters in length.", "type": "string" }, "nodeCount": { - "description": "The number of nodes allocated to this instance. This\nmay be zero in API responses for instances that are not yet in state\n`READY`.\n\nSee [the\ndocumentation](https://cloud.google.com/spanner/docs/instances#node_count)\nfor more information about nodes.", + "description": "The number of nodes allocated to this instance. This may be zero in API responses for instances that are not yet in state `READY`. See [the documentation](https://cloud.google.com/spanner/docs/instances#node_count) for more information about nodes.", "format": "int32", "type": "integer" }, "state": { - "description": "Output only. The current instance state. For\nCreateInstance, the state must be\neither omitted or set to `CREATING`. For\nUpdateInstance, the state must be\neither omitted or set to `READY`.", + "description": "Output only. The current instance state. For CreateInstance, the state must be either omitted or set to `CREATING`. For UpdateInstance, the state must be either omitted or set to `READY`.", "enum": [ "STATE_UNSPECIFIED", "CREATING", @@ -2472,16 +2483,17 @@ ], "enumDescriptions": [ "Not specified.", - "The instance is still being created. Resources may not be\navailable yet, and operations such as database creation may not\nwork.", - "The instance is fully created and ready to do work such as\ncreating databases." + "The instance is still being created. Resources may not be available yet, and operations such as database creation may not work.", + "The instance is fully created and ready to do work such as creating databases." ], + "readOnly": true, "type": "string" } }, "type": "object" }, "InstanceConfig": { - "description": "A possible configuration for a Cloud Spanner instance. Configurations\ndefine the geographic placement of nodes and their replication.", + "description": "A possible configuration for a Cloud Spanner instance. Configurations define the geographic placement of nodes and their replication.", "id": "InstanceConfig", "properties": { "displayName": { @@ -2489,11 +2501,11 @@ "type": "string" }, "name": { - "description": "A unique identifier for the instance configuration. Values\nare of the form\n`projects/\u003cproject\u003e/instanceConfigs/a-z*`", + "description": "A unique identifier for the instance configuration. Values are of the form `projects//instanceConfigs/a-z*`", "type": "string" }, "replicas": { - "description": "The geographic placement of nodes in this instance configuration and their\nreplication properties.", + "description": "The geographic placement of nodes in this instance configuration and their replication properties.", "items": { "$ref": "ReplicaInfo" }, @@ -2503,32 +2515,32 @@ "type": "object" }, "KeyRange": { - "description": "KeyRange represents a range of rows in a table or index.\n\nA range has a start key and an end key. These keys can be open or\nclosed, indicating if the range includes rows with that key.\n\nKeys are represented by lists, where the ith value in the list\ncorresponds to the ith component of the table or index primary key.\nIndividual values are encoded as described\nhere.\n\nFor example, consider the following table definition:\n\n CREATE TABLE UserEvents (\n UserName STRING(MAX),\n EventDate STRING(10)\n ) PRIMARY KEY(UserName, EventDate);\n\nThe following keys name rows in this table:\n\n \"Bob\", \"2014-09-23\"\n\nSince the `UserEvents` table's `PRIMARY KEY` clause names two\ncolumns, each `UserEvents` key has two elements; the first is the\n`UserName`, and the second is the `EventDate`.\n\nKey ranges with multiple components are interpreted\nlexicographically by component using the table or index key's declared\nsort order. For example, the following range returns all events for\nuser `\"Bob\"` that occurred in the year 2015:\n\n \"start_closed\": [\"Bob\", \"2015-01-01\"]\n \"end_closed\": [\"Bob\", \"2015-12-31\"]\n\nStart and end keys can omit trailing key components. This affects the\ninclusion and exclusion of rows that exactly match the provided key\ncomponents: if the key is closed, then rows that exactly match the\nprovided components are included; if the key is open, then rows\nthat exactly match are not included.\n\nFor example, the following range includes all events for `\"Bob\"` that\noccurred during and after the year 2000:\n\n \"start_closed\": [\"Bob\", \"2000-01-01\"]\n \"end_closed\": [\"Bob\"]\n\nThe next example retrieves all events for `\"Bob\"`:\n\n \"start_closed\": [\"Bob\"]\n \"end_closed\": [\"Bob\"]\n\nTo retrieve events before the year 2000:\n\n \"start_closed\": [\"Bob\"]\n \"end_open\": [\"Bob\", \"2000-01-01\"]\n\nThe following range includes all rows in the table:\n\n \"start_closed\": []\n \"end_closed\": []\n\nThis range returns all users whose `UserName` begins with any\ncharacter from A to C:\n\n \"start_closed\": [\"A\"]\n \"end_open\": [\"D\"]\n\nThis range returns all users whose `UserName` begins with B:\n\n \"start_closed\": [\"B\"]\n \"end_open\": [\"C\"]\n\nKey ranges honor column sort order. For example, suppose a table is\ndefined as follows:\n\n CREATE TABLE DescendingSortedTable {\n Key INT64,\n ...\n ) PRIMARY KEY(Key DESC);\n\nThe following range retrieves all rows with key values between 1\nand 100 inclusive:\n\n \"start_closed\": [\"100\"]\n \"end_closed\": [\"1\"]\n\nNote that 100 is passed as the start, and 1 is passed as the end,\nbecause `Key` is a descending column in the schema.", + "description": "KeyRange represents a range of rows in a table or index. A range has a start key and an end key. These keys can be open or closed, indicating if the range includes rows with that key. Keys are represented by lists, where the ith value in the list corresponds to the ith component of the table or index primary key. Individual values are encoded as described here. For example, consider the following table definition: CREATE TABLE UserEvents ( UserName STRING(MAX), EventDate STRING(10) ) PRIMARY KEY(UserName, EventDate); The following keys name rows in this table: \"Bob\", \"2014-09-23\" Since the `UserEvents` table's `PRIMARY KEY` clause names two columns, each `UserEvents` key has two elements; the first is the `UserName`, and the second is the `EventDate`. Key ranges with multiple components are interpreted lexicographically by component using the table or index key's declared sort order. For example, the following range returns all events for user `\"Bob\"` that occurred in the year 2015: \"start_closed\": [\"Bob\", \"2015-01-01\"] \"end_closed\": [\"Bob\", \"2015-12-31\"] Start and end keys can omit trailing key components. This affects the inclusion and exclusion of rows that exactly match the provided key components: if the key is closed, then rows that exactly match the provided components are included; if the key is open, then rows that exactly match are not included. For example, the following range includes all events for `\"Bob\"` that occurred during and after the year 2000: \"start_closed\": [\"Bob\", \"2000-01-01\"] \"end_closed\": [\"Bob\"] The next example retrieves all events for `\"Bob\"`: \"start_closed\": [\"Bob\"] \"end_closed\": [\"Bob\"] To retrieve events before the year 2000: \"start_closed\": [\"Bob\"] \"end_open\": [\"Bob\", \"2000-01-01\"] The following range includes all rows in the table: \"start_closed\": [] \"end_closed\": [] This range returns all users whose `UserName` begins with any character from A to C: \"start_closed\": [\"A\"] \"end_open\": [\"D\"] This range returns all users whose `UserName` begins with B: \"start_closed\": [\"B\"] \"end_open\": [\"C\"] Key ranges honor column sort order. For example, suppose a table is defined as follows: CREATE TABLE DescendingSortedTable { Key INT64, ... ) PRIMARY KEY(Key DESC); The following range retrieves all rows with key values between 1 and 100 inclusive: \"start_closed\": [\"100\"] \"end_closed\": [\"1\"] Note that 100 is passed as the start, and 1 is passed as the end, because `Key` is a descending column in the schema.", "id": "KeyRange", "properties": { "endClosed": { - "description": "If the end is closed, then the range includes all rows whose\nfirst `len(end_closed)` key columns exactly match `end_closed`.", + "description": "If the end is closed, then the range includes all rows whose first `len(end_closed)` key columns exactly match `end_closed`.", "items": { "type": "any" }, "type": "array" }, "endOpen": { - "description": "If the end is open, then the range excludes rows whose first\n`len(end_open)` key columns exactly match `end_open`.", + "description": "If the end is open, then the range excludes rows whose first `len(end_open)` key columns exactly match `end_open`.", "items": { "type": "any" }, "type": "array" }, "startClosed": { - "description": "If the start is closed, then the range includes all rows whose\nfirst `len(start_closed)` key columns exactly match `start_closed`.", + "description": "If the start is closed, then the range includes all rows whose first `len(start_closed)` key columns exactly match `start_closed`.", "items": { "type": "any" }, "type": "array" }, "startOpen": { - "description": "If the start is open, then the range excludes rows whose first\n`len(start_open)` key columns exactly match `start_open`.", + "description": "If the start is open, then the range excludes rows whose first `len(start_open)` key columns exactly match `start_open`.", "items": { "type": "any" }, @@ -2538,15 +2550,15 @@ "type": "object" }, "KeySet": { - "description": "`KeySet` defines a collection of Cloud Spanner keys and/or key ranges. All\nthe keys are expected to be in the same table or index. The keys need\nnot be sorted in any particular way.\n\nIf the same key is specified multiple times in the set (for example\nif two ranges, two keys, or a key and a range overlap), Cloud Spanner\nbehaves as if the key were only specified once.", + "description": "`KeySet` defines a collection of Cloud Spanner keys and/or key ranges. All the keys are expected to be in the same table or index. The keys need not be sorted in any particular way. If the same key is specified multiple times in the set (for example if two ranges, two keys, or a key and a range overlap), Cloud Spanner behaves as if the key were only specified once.", "id": "KeySet", "properties": { "all": { - "description": "For convenience `all` can be set to `true` to indicate that this\n`KeySet` matches all keys in the table or index. Note that any keys\nspecified in `keys` or `ranges` are only yielded once.", + "description": "For convenience `all` can be set to `true` to indicate that this `KeySet` matches all keys in the table or index. Note that any keys specified in `keys` or `ranges` are only yielded once.", "type": "boolean" }, "keys": { - "description": "A list of specific keys. Entries in `keys` should have exactly as\nmany elements as there are columns in the primary or index key\nwith which this `KeySet` is used. Individual key values are\nencoded as described here.", + "description": "A list of specific keys. Entries in `keys` should have exactly as many elements as there are columns in the primary or index key with which this `KeySet` is used. Individual key values are encoded as described here.", "items": { "items": { "type": "any" @@ -2556,7 +2568,7 @@ "type": "array" }, "ranges": { - "description": "A list of key ranges. See KeyRange for more information about\nkey range specifications.", + "description": "A list of key ranges. See KeyRange for more information about key range specifications.", "items": { "$ref": "KeyRange" }, @@ -2566,15 +2578,15 @@ "type": "object" }, "ListBackupOperationsResponse": { - "description": "The response for\nListBackupOperations.", + "description": "The response for ListBackupOperations.", "id": "ListBackupOperationsResponse", "properties": { "nextPageToken": { - "description": "`next_page_token` can be sent in a subsequent\nListBackupOperations\ncall to fetch more of the matching metadata.", + "description": "`next_page_token` can be sent in a subsequent ListBackupOperations call to fetch more of the matching metadata.", "type": "string" }, "operations": { - "description": "The list of matching backup long-running\noperations. Each operation's name will be\nprefixed by the backup's name and the operation's\nmetadata will be of type\nCreateBackupMetadata. Operations returned include those that are\npending or have completed/failed/canceled within the last 7 days.\nOperations returned are ordered by\n`operation.metadata.value.progress.start_time` in descending order starting\nfrom the most recently started operation.", + "description": "The list of matching backup long-running operations. Each operation's name will be prefixed by the backup's name and the operation's metadata will be of type CreateBackupMetadata. Operations returned include those that are pending or have completed/failed/canceled within the last 7 days. Operations returned are ordered by `operation.metadata.value.progress.start_time` in descending order starting from the most recently started operation.", "items": { "$ref": "Operation" }, @@ -2588,29 +2600,29 @@ "id": "ListBackupsResponse", "properties": { "backups": { - "description": "The list of matching backups. Backups returned are ordered by `create_time`\nin descending order, starting from the most recent `create_time`.", + "description": "The list of matching backups. Backups returned are ordered by `create_time` in descending order, starting from the most recent `create_time`.", "items": { "$ref": "Backup" }, "type": "array" }, "nextPageToken": { - "description": "`next_page_token` can be sent in a subsequent\nListBackups call to fetch more\nof the matching backups.", + "description": "`next_page_token` can be sent in a subsequent ListBackups call to fetch more of the matching backups.", "type": "string" } }, "type": "object" }, "ListDatabaseOperationsResponse": { - "description": "The response for\nListDatabaseOperations.", + "description": "The response for ListDatabaseOperations.", "id": "ListDatabaseOperationsResponse", "properties": { "nextPageToken": { - "description": "`next_page_token` can be sent in a subsequent\nListDatabaseOperations\ncall to fetch more of the matching metadata.", + "description": "`next_page_token` can be sent in a subsequent ListDatabaseOperations call to fetch more of the matching metadata.", "type": "string" }, "operations": { - "description": "The list of matching database long-running\noperations. Each operation's name will be\nprefixed by the database's name. The operation's\nmetadata field type\n`metadata.type_url` describes the type of the metadata.", + "description": "The list of matching database long-running operations. Each operation's name will be prefixed by the database's name. The operation's metadata field type `metadata.type_url` describes the type of the metadata.", "items": { "$ref": "Operation" }, @@ -2631,7 +2643,7 @@ "type": "array" }, "nextPageToken": { - "description": "`next_page_token` can be sent in a subsequent\nListDatabases call to fetch more\nof the matching databases.", + "description": "`next_page_token` can be sent in a subsequent ListDatabases call to fetch more of the matching databases.", "type": "string" } }, @@ -2649,7 +2661,7 @@ "type": "array" }, "nextPageToken": { - "description": "`next_page_token` can be sent in a subsequent\nListInstanceConfigs call to\nfetch more of the matching instance configurations.", + "description": "`next_page_token` can be sent in a subsequent ListInstanceConfigs call to fetch more of the matching instance configurations.", "type": "string" } }, @@ -2667,7 +2679,7 @@ "type": "array" }, "nextPageToken": { - "description": "`next_page_token` can be sent in a subsequent\nListInstances call to fetch more\nof the matching instances.", + "description": "`next_page_token` can be sent in a subsequent ListInstances call to fetch more of the matching instances.", "type": "string" } }, @@ -2696,7 +2708,7 @@ "id": "ListSessionsResponse", "properties": { "nextPageToken": { - "description": "`next_page_token` can be sent in a subsequent\nListSessions call to fetch more of the matching\nsessions.", + "description": "`next_page_token` can be sent in a subsequent ListSessions call to fetch more of the matching sessions.", "type": "string" }, "sessions": { @@ -2710,38 +2722,38 @@ "type": "object" }, "Mutation": { - "description": "A modification to one or more Cloud Spanner rows. Mutations can be\napplied to a Cloud Spanner database by sending them in a\nCommit call.", + "description": "A modification to one or more Cloud Spanner rows. Mutations can be applied to a Cloud Spanner database by sending them in a Commit call.", "id": "Mutation", "properties": { "delete": { "$ref": "Delete", - "description": "Delete rows from a table. Succeeds whether or not the named\nrows were present." + "description": "Delete rows from a table. Succeeds whether or not the named rows were present." }, "insert": { "$ref": "Write", - "description": "Insert new rows in a table. If any of the rows already exist,\nthe write or transaction fails with error `ALREADY_EXISTS`." + "description": "Insert new rows in a table. If any of the rows already exist, the write or transaction fails with error `ALREADY_EXISTS`." }, "insertOrUpdate": { "$ref": "Write", - "description": "Like insert, except that if the row already exists, then\nits column values are overwritten with the ones provided. Any\ncolumn values not explicitly written are preserved.\n\nWhen using insert_or_update, just as when using insert, all `NOT\nNULL` columns in the table must be given a value. This holds true\neven when the row already exists and will therefore actually be updated." + "description": "Like insert, except that if the row already exists, then its column values are overwritten with the ones provided. Any column values not explicitly written are preserved. When using insert_or_update, just as when using insert, all `NOT NULL` columns in the table must be given a value. This holds true even when the row already exists and will therefore actually be updated." }, "replace": { "$ref": "Write", - "description": "Like insert, except that if the row already exists, it is\ndeleted, and the column values provided are inserted\ninstead. Unlike insert_or_update, this means any values not\nexplicitly written become `NULL`.\n\nIn an interleaved table, if you create the child table with the\n`ON DELETE CASCADE` annotation, then replacing a parent row\nalso deletes the child rows. Otherwise, you must delete the\nchild rows before you replace the parent row." + "description": "Like insert, except that if the row already exists, it is deleted, and the column values provided are inserted instead. Unlike insert_or_update, this means any values not explicitly written become `NULL`. In an interleaved table, if you create the child table with the `ON DELETE CASCADE` annotation, then replacing a parent row also deletes the child rows. Otherwise, you must delete the child rows before you replace the parent row." }, "update": { "$ref": "Write", - "description": "Update existing rows in a table. If any of the rows does not\nalready exist, the transaction fails with error `NOT_FOUND`." + "description": "Update existing rows in a table. If any of the rows does not already exist, the transaction fails with error `NOT_FOUND`." } }, "type": "object" }, "Operation": { - "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "description": "This resource represents a long-running operation that is the result of a network API call.", "id": "Operation", "properties": { "done": { - "description": "If the value is `false`, it means the operation is still in progress.\nIf `true`, the operation is completed, and either `error` or `response` is\navailable.", + "description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.", "type": "boolean" }, "error": { @@ -2753,11 +2765,11 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", "type": "object" }, "name": { - "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should be a resource name ending with `operations/{unique_id}`.", + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.", "type": "string" }, "response": { @@ -2765,23 +2777,23 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", + "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", "type": "object" } }, "type": "object" }, "OperationProgress": { - "description": "Encapsulates progress related information for a Cloud Spanner long\nrunning operation.", + "description": "Encapsulates progress related information for a Cloud Spanner long running operation.", "id": "OperationProgress", "properties": { "endTime": { - "description": "If set, the time at which this operation failed or was completed\nsuccessfully.", + "description": "If set, the time at which this operation failed or was completed successfully.", "format": "google-datetime", "type": "string" }, "progressPercent": { - "description": "Percent completion of the operation.\nValues are between 0 and 100 inclusive.", + "description": "Percent completion of the operation. Values are between 0 and 100 inclusive.", "format": "int32", "type": "integer" }, @@ -2794,7 +2806,7 @@ "type": "object" }, "OptimizeRestoredDatabaseMetadata": { - "description": "Metadata type for the long-running operation used to track the progress\nof optimizations performed on a newly restored database. This long-running\noperation is automatically created by the system after the successful\ncompletion of a database restore, and cannot be cancelled.", + "description": "Metadata type for the long-running operation used to track the progress of optimizations performed on a newly restored database. This long-running operation is automatically created by the system after the successful completion of a database restore, and cannot be cancelled.", "id": "OptimizeRestoredDatabaseMetadata", "properties": { "name": { @@ -2809,28 +2821,28 @@ "type": "object" }, "PartialResultSet": { - "description": "Partial results from a streaming read or SQL query. Streaming reads and\nSQL queries better tolerate large result sets, large rows, and large\nvalues, but are a little trickier to consume.", + "description": "Partial results from a streaming read or SQL query. Streaming reads and SQL queries better tolerate large result sets, large rows, and large values, but are a little trickier to consume.", "id": "PartialResultSet", "properties": { "chunkedValue": { - "description": "If true, then the final value in values is chunked, and must\nbe combined with more values from subsequent `PartialResultSet`s\nto obtain a complete field value.", + "description": "If true, then the final value in values is chunked, and must be combined with more values from subsequent `PartialResultSet`s to obtain a complete field value.", "type": "boolean" }, "metadata": { "$ref": "ResultSetMetadata", - "description": "Metadata about the result set, such as row type information.\nOnly present in the first response." + "description": "Metadata about the result set, such as row type information. Only present in the first response." }, "resumeToken": { - "description": "Streaming calls might be interrupted for a variety of reasons, such\nas TCP connection loss. If this occurs, the stream of results can\nbe resumed by re-sending the original request and including\n`resume_token`. Note that executing any other transaction in the\nsame session invalidates the token.", + "description": "Streaming calls might be interrupted for a variety of reasons, such as TCP connection loss. If this occurs, the stream of results can be resumed by re-sending the original request and including `resume_token`. Note that executing any other transaction in the same session invalidates the token.", "format": "byte", "type": "string" }, "stats": { "$ref": "ResultSetStats", - "description": "Query plan and execution statistics for the statement that produced this\nstreaming result set. These can be requested by setting\nExecuteSqlRequest.query_mode and are sent\nonly once with the last response in the stream.\nThis field will also be present in the last response for DML\nstatements." + "description": "Query plan and execution statistics for the statement that produced this streaming result set. These can be requested by setting ExecuteSqlRequest.query_mode and are sent only once with the last response in the stream. This field will also be present in the last response for DML statements." }, "values": { - "description": "A streamed result set consists of a stream of values, which might\nbe split into many `PartialResultSet` messages to accommodate\nlarge rows and/or large values. Every N complete values defines a\nrow, where N is equal to the number of entries in\nmetadata.row_type.fields.\n\nMost values are encoded based on type as described\nhere.\n\nIt is possible that the last value in values is \"chunked\",\nmeaning that the rest of the value is sent in subsequent\n`PartialResultSet`(s). This is denoted by the chunked_value\nfield. Two or more chunked values can be merged to form a\ncomplete value as follows:\n\n * `bool/number/null`: cannot be chunked\n * `string`: concatenate the strings\n * `list`: concatenate the lists. If the last element in a list is a\n `string`, `list`, or `object`, merge it with the first element in\n the next list by applying these rules recursively.\n * `object`: concatenate the (field name, field value) pairs. If a\n field name is duplicated, then apply these rules recursively\n to merge the field values.\n\nSome examples of merging:\n\n # Strings are concatenated.\n \"foo\", \"bar\" =\u003e \"foobar\"\n\n # Lists of non-strings are concatenated.\n [2, 3], [4] =\u003e [2, 3, 4]\n\n # Lists are concatenated, but the last and first elements are merged\n # because they are strings.\n [\"a\", \"b\"], [\"c\", \"d\"] =\u003e [\"a\", \"bc\", \"d\"]\n\n # Lists are concatenated, but the last and first elements are merged\n # because they are lists. Recursively, the last and first elements\n # of the inner lists are merged because they are strings.\n [\"a\", [\"b\", \"c\"]], [[\"d\"], \"e\"] =\u003e [\"a\", [\"b\", \"cd\"], \"e\"]\n\n # Non-overlapping object fields are combined.\n {\"a\": \"1\"}, {\"b\": \"2\"} =\u003e {\"a\": \"1\", \"b\": 2\"}\n\n # Overlapping object fields are merged.\n {\"a\": \"1\"}, {\"a\": \"2\"} =\u003e {\"a\": \"12\"}\n\n # Examples of merging objects containing lists of strings.\n {\"a\": [\"1\"]}, {\"a\": [\"2\"]} =\u003e {\"a\": [\"12\"]}\n\nFor a more complete example, suppose a streaming SQL query is\nyielding a result set whose rows contain a single string\nfield. The following `PartialResultSet`s might be yielded:\n\n {\n \"metadata\": { ... }\n \"values\": [\"Hello\", \"W\"]\n \"chunked_value\": true\n \"resume_token\": \"Af65...\"\n }\n {\n \"values\": [\"orl\"]\n \"chunked_value\": true\n \"resume_token\": \"Bqp2...\"\n }\n {\n \"values\": [\"d\"]\n \"resume_token\": \"Zx1B...\"\n }\n\nThis sequence of `PartialResultSet`s encodes two rows, one\ncontaining the field value `\"Hello\"`, and a second containing the\nfield value `\"World\" = \"W\" + \"orl\" + \"d\"`.", + "description": "A streamed result set consists of a stream of values, which might be split into many `PartialResultSet` messages to accommodate large rows and/or large values. Every N complete values defines a row, where N is equal to the number of entries in metadata.row_type.fields. Most values are encoded based on type as described here. It is possible that the last value in values is \"chunked\", meaning that the rest of the value is sent in subsequent `PartialResultSet`(s). This is denoted by the chunked_value field. Two or more chunked values can be merged to form a complete value as follows: * `bool/number/null`: cannot be chunked * `string`: concatenate the strings * `list`: concatenate the lists. If the last element in a list is a `string`, `list`, or `object`, merge it with the first element in the next list by applying these rules recursively. * `object`: concatenate the (field name, field value) pairs. If a field name is duplicated, then apply these rules recursively to merge the field values. Some examples of merging: # Strings are concatenated. \"foo\", \"bar\" =\u003e \"foobar\" # Lists of non-strings are concatenated. [2, 3], [4] =\u003e [2, 3, 4] # Lists are concatenated, but the last and first elements are merged # because they are strings. [\"a\", \"b\"], [\"c\", \"d\"] =\u003e [\"a\", \"bc\", \"d\"] # Lists are concatenated, but the last and first elements are merged # because they are lists. Recursively, the last and first elements # of the inner lists are merged because they are strings. [\"a\", [\"b\", \"c\"]], [[\"d\"], \"e\"] =\u003e [\"a\", [\"b\", \"cd\"], \"e\"] # Non-overlapping object fields are combined. {\"a\": \"1\"}, {\"b\": \"2\"} =\u003e {\"a\": \"1\", \"b\": 2\"} # Overlapping object fields are merged. {\"a\": \"1\"}, {\"a\": \"2\"} =\u003e {\"a\": \"12\"} # Examples of merging objects containing lists of strings. {\"a\": [\"1\"]}, {\"a\": [\"2\"]} =\u003e {\"a\": [\"12\"]} For a more complete example, suppose a streaming SQL query is yielding a result set whose rows contain a single string field. The following `PartialResultSet`s might be yielded: { \"metadata\": { ... } \"values\": [\"Hello\", \"W\"] \"chunked_value\": true \"resume_token\": \"Af65...\" } { \"values\": [\"orl\"] \"chunked_value\": true \"resume_token\": \"Bqp2...\" } { \"values\": [\"d\"] \"resume_token\": \"Zx1B...\" } This sequence of `PartialResultSet`s encodes two rows, one containing the field value `\"Hello\"`, and a second containing the field value `\"World\" = \"W\" + \"orl\" + \"d\"`.", "items": { "type": "any" }, @@ -2840,11 +2852,11 @@ "type": "object" }, "Partition": { - "description": "Information returned for each partition returned in a\nPartitionResponse.", + "description": "Information returned for each partition returned in a PartitionResponse.", "id": "Partition", "properties": { "partitionToken": { - "description": "This token can be passed to Read, StreamingRead, ExecuteSql, or\nExecuteStreamingSql requests to restrict the results to those identified by\nthis partition token.", + "description": "This token can be passed to Read, StreamingRead, ExecuteSql, or ExecuteStreamingSql requests to restrict the results to those identified by this partition token.", "format": "byte", "type": "string" } @@ -2852,16 +2864,16 @@ "type": "object" }, "PartitionOptions": { - "description": "Options for a PartitionQueryRequest and\nPartitionReadRequest.", + "description": "Options for a PartitionQueryRequest and PartitionReadRequest.", "id": "PartitionOptions", "properties": { "maxPartitions": { - "description": "**Note:** This hint is currently ignored by PartitionQuery and\nPartitionRead requests.\n\nThe desired maximum number of partitions to return. For example, this may\nbe set to the number of workers available. The default for this option\nis currently 10,000. The maximum value is currently 200,000. This is only\na hint. The actual number of partitions returned may be smaller or larger\nthan this maximum count request.", + "description": "**Note:** This hint is currently ignored by PartitionQuery and PartitionRead requests. The desired maximum number of partitions to return. For example, this may be set to the number of workers available. The default for this option is currently 10,000. The maximum value is currently 200,000. This is only a hint. The actual number of partitions returned may be smaller or larger than this maximum count request.", "format": "int64", "type": "string" }, "partitionSizeBytes": { - "description": "**Note:** This hint is currently ignored by PartitionQuery and\nPartitionRead requests.\n\nThe desired data size for each partition generated. The default for this\noption is currently 1 GiB. This is only a hint. The actual size of each\npartition may be smaller or larger than this size request.", + "description": "**Note:** This hint is currently ignored by PartitionQuery and PartitionRead requests. The desired data size for each partition generated. The default for this option is currently 1 GiB. This is only a hint. The actual size of each partition may be smaller or larger than this size request.", "format": "int64", "type": "string" } @@ -2876,7 +2888,7 @@ "additionalProperties": { "$ref": "Type" }, - "description": "It is not always possible for Cloud Spanner to infer the right SQL type\nfrom a JSON value. For example, values of type `BYTES` and values\nof type `STRING` both appear in params as JSON strings.\n\nIn these cases, `param_types` can be used to specify the exact\nSQL type for some or all of the SQL query parameters. See the\ndefinition of Type for more information\nabout SQL types.", + "description": "It is not always possible for Cloud Spanner to infer the right SQL type from a JSON value. For example, values of type `BYTES` and values of type `STRING` both appear in params as JSON strings. In these cases, `param_types` can be used to specify the exact SQL type for some or all of the SQL query parameters. See the definition of Type for more information about SQL types.", "type": "object" }, "params": { @@ -2884,7 +2896,7 @@ "description": "Properties of the object.", "type": "any" }, - "description": "Parameter names and values that bind to placeholders in the SQL string.\n\nA parameter placeholder consists of the `@` character followed by the\nparameter name (for example, `@firstName`). Parameter names can contain\nletters, numbers, and underscores.\n\nParameters can appear anywhere that a literal value is expected. The same\nparameter name can be used more than once, for example:\n\n`\"WHERE id \u003e @msg_id AND id \u003c @msg_id + 100\"`\n\nIt is an error to execute a SQL statement with unbound parameters.", + "description": "Parameter names and values that bind to placeholders in the SQL string. A parameter placeholder consists of the `@` character followed by the parameter name (for example, `@firstName`). Parameter names can contain letters, numbers, and underscores. Parameters can appear anywhere that a literal value is expected. The same parameter name can be used more than once, for example: `\"WHERE id \u003e @msg_id AND id \u003c @msg_id + 100\"` It is an error to execute a SQL statement with unbound parameters.", "type": "object" }, "partitionOptions": { @@ -2892,12 +2904,12 @@ "description": "Additional options that affect how many partitions are created." }, "sql": { - "description": "Required. The query request to generate partitions for. The request will fail if\nthe query is not root partitionable. The query plan of a root\npartitionable query has a single distributed union operator. A distributed\nunion operator conceptually divides one or more tables into multiple\nsplits, remotely evaluates a subquery independently on each split, and\nthen unions all results.\n\nThis must not contain DML commands, such as INSERT, UPDATE, or\nDELETE. Use ExecuteStreamingSql with a\nPartitionedDml transaction for large, partition-friendly DML operations.", + "description": "Required. The query request to generate partitions for. The request will fail if the query is not root partitionable. The query plan of a root partitionable query has a single distributed union operator. A distributed union operator conceptually divides one or more tables into multiple splits, remotely evaluates a subquery independently on each split, and then unions all results. This must not contain DML commands, such as INSERT, UPDATE, or DELETE. Use ExecuteStreamingSql with a PartitionedDml transaction for large, partition-friendly DML operations.", "type": "string" }, "transaction": { "$ref": "TransactionSelector", - "description": "Read only snapshot transactions are supported, read/write and single use\ntransactions are not." + "description": "Read only snapshot transactions are supported, read/write and single use transactions are not." } }, "type": "object" @@ -2907,19 +2919,19 @@ "id": "PartitionReadRequest", "properties": { "columns": { - "description": "The columns of table to be returned for each row matching\nthis request.", + "description": "The columns of table to be returned for each row matching this request.", "items": { "type": "string" }, "type": "array" }, "index": { - "description": "If non-empty, the name of an index on table. This index is\nused instead of the table primary key when interpreting key_set\nand sorting result rows. See key_set for further information.", + "description": "If non-empty, the name of an index on table. This index is used instead of the table primary key when interpreting key_set and sorting result rows. See key_set for further information.", "type": "string" }, "keySet": { "$ref": "KeySet", - "description": "Required. `key_set` identifies the rows to be yielded. `key_set` names the\nprimary keys of the rows in table to be yielded, unless index\nis present. If index is present, then key_set instead names\nindex keys in index.\n\nIt is not an error for the `key_set` to name rows that do not\nexist in the database. Read yields nothing for nonexistent rows." + "description": "Required. `key_set` identifies the rows to be yielded. `key_set` names the primary keys of the rows in table to be yielded, unless index is present. If index is present, then key_set instead names index keys in index. It is not an error for the `key_set` to name rows that do not exist in the database. Read yields nothing for nonexistent rows." }, "partitionOptions": { "$ref": "PartitionOptions", @@ -2931,13 +2943,13 @@ }, "transaction": { "$ref": "TransactionSelector", - "description": "Read only snapshot transactions are supported, read/write and single use\ntransactions are not." + "description": "Read only snapshot transactions are supported, read/write and single use transactions are not." } }, "type": "object" }, "PartitionResponse": { - "description": "The response for PartitionQuery\nor PartitionRead", + "description": "The response for PartitionQuery or PartitionRead", "id": "PartitionResponse", "properties": { "partitions": { @@ -2980,7 +2992,7 @@ "description": "Properties of the object.", "type": "any" }, - "description": "The execution statistics associated with the node, contained in a group of\nkey-value pairs. Only present if the plan was returned as a result of a\nprofile query. For example, number of executions, number of rows/time per\nexecution etc.", + "description": "The execution statistics associated with the node, contained in a group of key-value pairs. Only present if the plan was returned as a result of a profile query. For example, number of executions, number of rows/time per execution etc.", "type": "object" }, "index": { @@ -2989,7 +3001,7 @@ "type": "integer" }, "kind": { - "description": "Used to determine the type of node. May be needed for visualizing\ndifferent kinds of nodes differently. For example, If the node is a\nSCALAR node, it will have a condensed representation\nwhich can be used to directly embed a description of the node in its\nparent.", + "description": "Used to determine the type of node. May be needed for visualizing different kinds of nodes differently. For example, If the node is a SCALAR node, it will have a condensed representation which can be used to directly embed a description of the node in its parent.", "enum": [ "KIND_UNSPECIFIED", "RELATIONAL", @@ -2997,8 +3009,8 @@ ], "enumDescriptions": [ "Not specified.", - "Denotes a Relational operator node in the expression tree. Relational\noperators represent iterative processing of rows during query execution.\nFor example, a `TableScan` operation that reads rows from a table.", - "Denotes a Scalar node in the expression tree. Scalar nodes represent\nnon-iterable entities in the query plan. For example, constants or\narithmetic operators appearing inside predicate expressions or references\nto column names." + "Denotes a Relational operator node in the expression tree. Relational operators represent iterative processing of rows during query execution. For example, a `TableScan` operation that reads rows from a table.", + "Denotes a Scalar node in the expression tree. Scalar nodes represent non-iterable entities in the query plan. For example, constants or arithmetic operators appearing inside predicate expressions or references to column names." ], "type": "string" }, @@ -3007,7 +3019,7 @@ "description": "Properties of the object.", "type": "any" }, - "description": "Attributes relevant to the node contained in a group of key-value pairs.\nFor example, a Parameter Reference node could have the following\ninformation in its metadata:\n\n {\n \"parameter_reference\": \"param1\",\n \"parameter_type\": \"array\"\n }", + "description": "Attributes relevant to the node contained in a group of key-value pairs. For example, a Parameter Reference node could have the following information in its metadata: { \"parameter_reference\": \"param1\", \"parameter_type\": \"array\" }", "type": "object" }, "shortRepresentation": { @@ -3018,23 +3030,23 @@ "type": "object" }, "Policy": { - "description": "An Identity and Access Management (IAM) policy, which specifies access\ncontrols for Google Cloud resources.\n\n\nA `Policy` is a collection of `bindings`. A `binding` binds one or more\n`members` to a single `role`. Members can be user accounts, service accounts,\nGoogle groups, and domains (such as G Suite). A `role` is a named list of\npermissions; each `role` can be an IAM predefined role or a user-created\ncustom role.\n\nOptionally, a `binding` can specify a `condition`, which is a logical\nexpression that allows access to a resource only if the expression evaluates\nto `true`. A condition can add constraints based on attributes of the\nrequest, the resource, or both.\n\n**JSON example:**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/resourcemanager.organizationAdmin\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-project-id@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles/resourcemanager.organizationViewer\",\n \"members\": [\"user:eve@example.com\"],\n \"condition\": {\n \"title\": \"expirable access\",\n \"description\": \"Does not grant access after Sep 2020\",\n \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\",\n }\n }\n ],\n \"etag\": \"BwWWja0YfJA=\",\n \"version\": 3\n }\n\n**YAML example:**\n\n bindings:\n - members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-project-id@appspot.gserviceaccount.com\n role: roles/resourcemanager.organizationAdmin\n - members:\n - user:eve@example.com\n role: roles/resourcemanager.organizationViewer\n condition:\n title: expirable access\n description: Does not grant access after Sep 2020\n expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\n - etag: BwWWja0YfJA=\n - version: 3\n\nFor a description of IAM and its features, see the\n[IAM documentation](https://cloud.google.com/iam/docs/).", + "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } **YAML example:** bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", "id": "Policy", "properties": { "bindings": { - "description": "Associates a list of `members` to a `role`. Optionally, may specify a\n`condition` that determines how and when the `bindings` are applied. Each\nof the `bindings` must contain at least one member.", + "description": "Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member.", "items": { "$ref": "Binding" }, "type": "array" }, "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.", + "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.", "format": "byte", "type": "string" }, "version": { - "description": "Specifies the format of the policy.\n\nValid values are `0`, `1`, and `3`. Requests that specify an invalid value\nare rejected.\n\nAny operation that affects conditional role bindings must specify version\n`3`. This requirement applies to the following operations:\n\n* Getting a policy that includes a conditional role binding\n* Adding a conditional role binding to a policy\n* Changing a conditional role binding in a policy\n* Removing any role binding, with or without a condition, from a policy\n that includes conditions\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.\n\nIf a policy does not include any conditions, operations on that policy may\nspecify any valid version or leave the field unset.", + "description": "Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" } @@ -3046,7 +3058,7 @@ "id": "QueryOptions", "properties": { "optimizerVersion": { - "description": "An option to control the selection of optimizer version.\n\nThis parameter allows individual queries to pick different query\noptimizer versions.\n\nSpecifying \"latest\" as a value instructs Cloud Spanner to use the\nlatest supported query optimizer version. If not specified, Cloud Spanner\nuses optimizer version set at the database level options. Any other\npositive integer (from the list of supported optimizer versions)\noverrides the default optimizer version for query execution.\nThe list of supported optimizer versions can be queried from\nSPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS. Executing a SQL statement\nwith an invalid optimizer version will fail with a syntax error\n(`INVALID_ARGUMENT`) status.\n\nThe `optimizer_version` statement hint has precedence over this setting.", + "description": "An option to control the selection of optimizer version. This parameter allows individual queries to pick different query optimizer versions. Specifying \"latest\" as a value instructs Cloud Spanner to use the latest supported query optimizer version. If not specified, Cloud Spanner uses optimizer version set at the database level options. Any other positive integer (from the list of supported optimizer versions) overrides the default optimizer version for query execution. The list of supported optimizer versions can be queried from SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS. Executing a SQL statement with an invalid optimizer version will fail with a syntax error (`INVALID_ARGUMENT`) status. See https://cloud.google.com/spanner/docs/query-optimizer/manage-query-optimizer for more information on managing the query optimizer. The `optimizer_version` statement hint has precedence over this setting.", "type": "string" } }, @@ -3057,7 +3069,7 @@ "id": "QueryPlan", "properties": { "planNodes": { - "description": "The nodes in the query plan. Plan nodes are returned in pre-order starting\nwith the plan root. Each PlanNode's `id` corresponds to its index in\n`plan_nodes`.", + "description": "The nodes in the query plan. Plan nodes are returned in pre-order starting with the plan root. Each PlanNode's `id` corresponds to its index in `plan_nodes`.", "items": { "$ref": "PlanNode" }, @@ -3071,67 +3083,67 @@ "id": "ReadOnly", "properties": { "exactStaleness": { - "description": "Executes all reads at a timestamp that is `exact_staleness`\nold. The timestamp is chosen soon after the read is started.\n\nGuarantees that all writes that have committed more than the\nspecified number of seconds ago are visible. Because Cloud Spanner\nchooses the exact timestamp, this mode works even if the client's\nlocal clock is substantially skewed from Cloud Spanner commit\ntimestamps.\n\nUseful for reading at nearby replicas without the distributed\ntimestamp negotiation overhead of `max_staleness`.", + "description": "Executes all reads at a timestamp that is `exact_staleness` old. The timestamp is chosen soon after the read is started. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading at nearby replicas without the distributed timestamp negotiation overhead of `max_staleness`.", "format": "google-duration", "type": "string" }, "maxStaleness": { - "description": "Read data at a timestamp \u003e= `NOW - max_staleness`\nseconds. Guarantees that all writes that have committed more\nthan the specified number of seconds ago are visible. Because\nCloud Spanner chooses the exact timestamp, this mode works even if\nthe client's local clock is substantially skewed from Cloud Spanner\ncommit timestamps.\n\nUseful for reading the freshest data available at a nearby\nreplica, while bounding the possible staleness if the local\nreplica has fallen behind.\n\nNote that this option can only be used in single-use\ntransactions.", + "description": "Read data at a timestamp \u003e= `NOW - max_staleness` seconds. Guarantees that all writes that have committed more than the specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the client's local clock is substantially skewed from Cloud Spanner commit timestamps. Useful for reading the freshest data available at a nearby replica, while bounding the possible staleness if the local replica has fallen behind. Note that this option can only be used in single-use transactions.", "format": "google-duration", "type": "string" }, "minReadTimestamp": { - "description": "Executes all reads at a timestamp \u003e= `min_read_timestamp`.\n\nThis is useful for requesting fresher data than some previous\nread, or data that is fresh enough to observe the effects of some\npreviously committed transaction whose timestamp is known.\n\nNote that this option can only be used in single-use transactions.\n\nA timestamp in RFC3339 UTC \\\"Zulu\\\" format, accurate to nanoseconds.\nExample: `\"2014-10-02T15:01:23.045123456Z\"`.", + "description": "Executes all reads at a timestamp \u003e= `min_read_timestamp`. This is useful for requesting fresher data than some previous read, or data that is fresh enough to observe the effects of some previously committed transaction whose timestamp is known. Note that this option can only be used in single-use transactions. A timestamp in RFC3339 UTC \\\"Zulu\\\" format, accurate to nanoseconds. Example: `\"2014-10-02T15:01:23.045123456Z\"`.", "format": "google-datetime", "type": "string" }, "readTimestamp": { - "description": "Executes all reads at the given timestamp. Unlike other modes,\nreads at a specific timestamp are repeatable; the same read at\nthe same timestamp always returns the same data. If the\ntimestamp is in the future, the read will block until the\nspecified timestamp, modulo the read's deadline.\n\nUseful for large scale consistent reads such as mapreduces, or\nfor coordinating many reads against a consistent snapshot of the\ndata.\n\nA timestamp in RFC3339 UTC \\\"Zulu\\\" format, accurate to nanoseconds.\nExample: `\"2014-10-02T15:01:23.045123456Z\"`.", + "description": "Executes all reads at the given timestamp. Unlike other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp always returns the same data. If the timestamp is in the future, the read will block until the specified timestamp, modulo the read's deadline. Useful for large scale consistent reads such as mapreduces, or for coordinating many reads against a consistent snapshot of the data. A timestamp in RFC3339 UTC \\\"Zulu\\\" format, accurate to nanoseconds. Example: `\"2014-10-02T15:01:23.045123456Z\"`.", "format": "google-datetime", "type": "string" }, "returnReadTimestamp": { - "description": "If true, the Cloud Spanner-selected read timestamp is included in\nthe Transaction message that describes the transaction.", + "description": "If true, the Cloud Spanner-selected read timestamp is included in the Transaction message that describes the transaction.", "type": "boolean" }, "strong": { - "description": "Read at a timestamp where all previously committed transactions\nare visible.", + "description": "Read at a timestamp where all previously committed transactions are visible.", "type": "boolean" } }, "type": "object" }, "ReadRequest": { - "description": "The request for Read and\nStreamingRead.", + "description": "The request for Read and StreamingRead.", "id": "ReadRequest", "properties": { "columns": { - "description": "Required. The columns of table to be returned for each row matching\nthis request.", + "description": "Required. The columns of table to be returned for each row matching this request.", "items": { "type": "string" }, "type": "array" }, "index": { - "description": "If non-empty, the name of an index on table. This index is\nused instead of the table primary key when interpreting key_set\nand sorting result rows. See key_set for further information.", + "description": "If non-empty, the name of an index on table. This index is used instead of the table primary key when interpreting key_set and sorting result rows. See key_set for further information.", "type": "string" }, "keySet": { "$ref": "KeySet", - "description": "Required. `key_set` identifies the rows to be yielded. `key_set` names the\nprimary keys of the rows in table to be yielded, unless index\nis present. If index is present, then key_set instead names\nindex keys in index.\n\nIf the partition_token field is empty, rows are yielded\nin table primary key order (if index is empty) or index key order\n(if index is non-empty). If the partition_token field is not\nempty, rows will be yielded in an unspecified order.\n\nIt is not an error for the `key_set` to name rows that do not\nexist in the database. Read yields nothing for nonexistent rows." + "description": "Required. `key_set` identifies the rows to be yielded. `key_set` names the primary keys of the rows in table to be yielded, unless index is present. If index is present, then key_set instead names index keys in index. If the partition_token field is empty, rows are yielded in table primary key order (if index is empty) or index key order (if index is non-empty). If the partition_token field is not empty, rows will be yielded in an unspecified order. It is not an error for the `key_set` to name rows that do not exist in the database. Read yields nothing for nonexistent rows." }, "limit": { - "description": "If greater than zero, only the first `limit` rows are yielded. If `limit`\nis zero, the default is no limit. A limit cannot be specified if\n`partition_token` is set.", + "description": "If greater than zero, only the first `limit` rows are yielded. If `limit` is zero, the default is no limit. A limit cannot be specified if `partition_token` is set.", "format": "int64", "type": "string" }, "partitionToken": { - "description": "If present, results will be restricted to the specified partition\npreviously created using PartitionRead(). There must be an exact\nmatch for the values of fields common to this message and the\nPartitionReadRequest message used to create this partition_token.", + "description": "If present, results will be restricted to the specified partition previously created using PartitionRead(). There must be an exact match for the values of fields common to this message and the PartitionReadRequest message used to create this partition_token.", "format": "byte", "type": "string" }, "resumeToken": { - "description": "If this request is resuming a previously interrupted read,\n`resume_token` should be copied from the last\nPartialResultSet yielded before the interruption. Doing this\nenables the new read to resume where the last read left off. The\nrest of the request parameters must exactly match the request\nthat yielded this token.", + "description": "If this request is resuming a previously interrupted read, `resume_token` should be copied from the last PartialResultSet yielded before the interruption. Doing this enables the new read to resume where the last read left off. The rest of the request parameters must exactly match the request that yielded this token.", "format": "byte", "type": "string" }, @@ -3141,13 +3153,13 @@ }, "transaction": { "$ref": "TransactionSelector", - "description": "The transaction to use. If none is provided, the default is a\ntemporary read-only transaction with strong concurrency." + "description": "The transaction to use. If none is provided, the default is a temporary read-only transaction with strong concurrency." } }, "type": "object" }, "ReadWrite": { - "description": "Message type to initiate a read-write transaction. Currently this\ntransaction type has no options.", + "description": "Message type to initiate a read-write transaction. Currently this transaction type has no options.", "id": "ReadWrite", "properties": {}, "type": "object" @@ -3156,7 +3168,7 @@ "id": "ReplicaInfo", "properties": { "defaultLeaderLocation": { - "description": "If true, this location is designated as the default leader location where\nleader replicas are placed. See the [region types\ndocumentation](https://cloud.google.com/spanner/docs/instances#region_types)\nfor more details.", + "description": "If true, this location is designated as the default leader location where leader replicas are placed. See the [region types documentation](https://cloud.google.com/spanner/docs/instances#region_types) for more details.", "type": "boolean" }, "location": { @@ -3173,9 +3185,9 @@ ], "enumDescriptions": [ "Not specified.", - "Read-write replicas support both reads and writes. These replicas:\n\n* Maintain a full copy of your data.\n* Serve reads.\n* Can vote whether to commit a write.\n* Participate in leadership election.\n* Are eligible to become a leader.", - "Read-only replicas only support reads (not writes). Read-only replicas:\n\n* Maintain a full copy of your data.\n* Serve reads.\n* Do not participate in voting to commit writes.\n* Are not eligible to become a leader.", - "Witness replicas don't support reads but do participate in voting to\ncommit writes. Witness replicas:\n\n* Do not maintain a full copy of data.\n* Do not serve reads.\n* Vote whether to commit writes.\n* Participate in leader election but are not eligible to become leader." + "Read-write replicas support both reads and writes. These replicas: * Maintain a full copy of your data. * Serve reads. * Can vote whether to commit a write. * Participate in leadership election. * Are eligible to become a leader.", + "Read-only replicas only support reads (not writes). Read-only replicas: * Maintain a full copy of your data. * Serve reads. * Do not participate in voting to commit writes. * Are not eligible to become a leader.", + "Witness replicas don't support reads but do participate in voting to commit writes. Witness replicas: * Do not maintain a full copy of data. * Do not serve reads. * Vote whether to commit writes. * Participate in leader election but are not eligible to become leader." ], "type": "string" } @@ -3183,7 +3195,7 @@ "type": "object" }, "RestoreDatabaseMetadata": { - "description": "Metadata type for the long-running operation returned by\nRestoreDatabase.", + "description": "Metadata type for the long-running operation returned by RestoreDatabase.", "id": "RestoreDatabaseMetadata", "properties": { "backupInfo": { @@ -3191,7 +3203,7 @@ "description": "Information about the backup used to restore the database." }, "cancelTime": { - "description": "The time at which cancellation of this operation was received.\nOperations.CancelOperation\nstarts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not guaranteed.\nClients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a\ngoogle.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", + "description": "The time at which cancellation of this operation was received. Operations.CancelOperation starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", "format": "google-datetime", "type": "string" }, @@ -3200,12 +3212,12 @@ "type": "string" }, "optimizeDatabaseOperationName": { - "description": "If exists, the name of the long-running operation that will be used to\ntrack the post-restore optimization process to optimize the performance of\nthe restored database, and remove the dependency on the restore source.\nThe name is of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/databases/\u003cdatabase\u003e/operations/\u003coperation\u003e`\nwhere the \u003cdatabase\u003e is the name of database being created and restored to.\nThe metadata type of the long-running operation is\nOptimizeRestoredDatabaseMetadata. This long-running operation will be\nautomatically created by the system after the RestoreDatabase long-running\noperation completes successfully. This operation will not be created if the\nrestore was not successful.", + "description": "If exists, the name of the long-running operation that will be used to track the post-restore optimization process to optimize the performance of the restored database, and remove the dependency on the restore source. The name is of the form `projects//instances//databases//operations/` where the is the name of database being created and restored to. The metadata type of the long-running operation is OptimizeRestoredDatabaseMetadata. This long-running operation will be automatically created by the system after the RestoreDatabase long-running operation completes successfully. This operation will not be created if the restore was not successful.", "type": "string" }, "progress": { "$ref": "OperationProgress", - "description": "The progress of the\nRestoreDatabase\noperation." + "description": "The progress of the RestoreDatabase operation." }, "sourceType": { "description": "The type of the restore source.", @@ -3223,15 +3235,15 @@ "type": "object" }, "RestoreDatabaseRequest": { - "description": "The request for\nRestoreDatabase.", + "description": "The request for RestoreDatabase.", "id": "RestoreDatabaseRequest", "properties": { "backup": { - "description": "Name of the backup from which to restore. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/backups/\u003cbackup\u003e`.", + "description": "Name of the backup from which to restore. Values are of the form `projects//instances//backups/`.", "type": "string" }, "databaseId": { - "description": "Required. The id of the database to create and restore to. This\ndatabase must not already exist. The `database_id` appended to\n`parent` forms the full database name of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/databases/\u003cdatabase_id\u003e`.", + "description": "Required. The id of the database to create and restore to. This database must not already exist. The `database_id` appended to `parent` forms the full database name of the form `projects//instances//databases/`.", "type": "string" } }, @@ -3243,7 +3255,7 @@ "properties": { "backupInfo": { "$ref": "BackupInfo", - "description": "Information about the backup used to restore the database. The backup\nmay no longer exist." + "description": "Information about the backup used to restore the database. The backup may no longer exist." }, "sourceType": { "description": "The type of the restore source.", @@ -3261,7 +3273,7 @@ "type": "object" }, "ResultSet": { - "description": "Results from Read or\nExecuteSql.", + "description": "Results from Read or ExecuteSql.", "id": "ResultSet", "properties": { "metadata": { @@ -3269,7 +3281,7 @@ "description": "Metadata about the result set, such as row type information." }, "rows": { - "description": "Each element in `rows` is a row whose format is defined by\nmetadata.row_type. The ith element\nin each row matches the ith field in\nmetadata.row_type. Elements are\nencoded based on type as described\nhere.", + "description": "Each element in `rows` is a row whose format is defined by metadata.row_type. The ith element in each row matches the ith field in metadata.row_type. Elements are encoded based on type as described here.", "items": { "items": { "type": "any" @@ -3280,7 +3292,7 @@ }, "stats": { "$ref": "ResultSetStats", - "description": "Query plan and execution statistics for the SQL statement that\nproduced this result set. These can be requested by setting\nExecuteSqlRequest.query_mode.\nDML statements always produce stats containing the number of rows\nmodified, unless executed using the\nExecuteSqlRequest.QueryMode.PLAN ExecuteSqlRequest.query_mode.\nOther fields may or may not be populated, based on the\nExecuteSqlRequest.query_mode." + "description": "Query plan and execution statistics for the SQL statement that produced this result set. These can be requested by setting ExecuteSqlRequest.query_mode. DML statements always produce stats containing the number of rows modified, unless executed using the ExecuteSqlRequest.QueryMode.PLAN ExecuteSqlRequest.query_mode. Other fields may or may not be populated, based on the ExecuteSqlRequest.query_mode." } }, "type": "object" @@ -3291,11 +3303,11 @@ "properties": { "rowType": { "$ref": "StructType", - "description": "Indicates the field names and types for the rows in the result\nset. For example, a SQL query like `\"SELECT UserId, UserName FROM\nUsers\"` could return a `row_type` value like:\n\n \"fields\": [\n { \"name\": \"UserId\", \"type\": { \"code\": \"INT64\" } },\n { \"name\": \"UserName\", \"type\": { \"code\": \"STRING\" } },\n ]" + "description": "Indicates the field names and types for the rows in the result set. For example, a SQL query like `\"SELECT UserId, UserName FROM Users\"` could return a `row_type` value like: \"fields\": [ { \"name\": \"UserId\", \"type\": { \"code\": \"INT64\" } }, { \"name\": \"UserName\", \"type\": { \"code\": \"STRING\" } }, ]" }, "transaction": { "$ref": "Transaction", - "description": "If the read or SQL query began a transaction as a side-effect, the\ninformation about the new transaction is yielded here." + "description": "If the read or SQL query began a transaction as a side-effect, the information about the new transaction is yielded here." } }, "type": "object" @@ -3313,7 +3325,7 @@ "description": "Properties of the object.", "type": "any" }, - "description": "Aggregated statistics from the execution of the query. Only present when\nthe query is profiled. For example, a query could return the statistics as\nfollows:\n\n {\n \"rows_returned\": \"3\",\n \"elapsed_time\": \"1.22 secs\",\n \"cpu_time\": \"1.19 secs\"\n }", + "description": "Aggregated statistics from the execution of the query. Only present when the query is profiled. For example, a query could return the statistics as follows: { \"rows_returned\": \"3\", \"elapsed_time\": \"1.22 secs\", \"cpu_time\": \"1.19 secs\" }", "type": "object" }, "rowCountExact": { @@ -3322,7 +3334,7 @@ "type": "string" }, "rowCountLowerBound": { - "description": "Partitioned DML does not offer exactly-once semantics, so it\nreturns a lower bound of the rows modified.", + "description": "Partitioned DML does not offer exactly-once semantics, so it returns a lower bound of the rows modified.", "format": "int64", "type": "string" } @@ -3346,24 +3358,27 @@ "id": "Session", "properties": { "approximateLastUseTime": { - "description": "Output only. The approximate timestamp when the session is last used. It is\ntypically earlier than the actual last use time.", + "description": "Output only. The approximate timestamp when the session is last used. It is typically earlier than the actual last use time.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "createTime": { "description": "Output only. The timestamp when the session is created.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "labels": { "additionalProperties": { "type": "string" }, - "description": "The labels for the session.\n\n * Label keys must be between 1 and 63 characters long and must conform to\n the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.\n * Label values must be between 0 and 63 characters long and must conform\n to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.\n * No more than 64 labels can be associated with a given session.\n\nSee https://goo.gl/xmQnxf for more information on and examples of labels.", + "description": "The labels for the session. * Label keys must be between 1 and 63 characters long and must conform to the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. * Label values must be between 0 and 63 characters long and must conform to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. * No more than 64 labels can be associated with a given session. See https://goo.gl/xmQnxf for more information on and examples of labels.", "type": "object" }, "name": { - "description": "The name of the session. This is always system-assigned; values provided\nwhen creating a session are ignored.", + "description": "Output only. The name of the session. This is always system-assigned.", + "readOnly": true, "type": "string" } }, @@ -3375,13 +3390,13 @@ "properties": { "policy": { "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them." } }, "type": "object" }, "ShortRepresentation": { - "description": "Condensed representation of a node and its subtree. Only present for\n`SCALAR` PlanNode(s).", + "description": "Condensed representation of a node and its subtree. Only present for `SCALAR` PlanNode(s).", "id": "ShortRepresentation", "properties": { "description": { @@ -3393,7 +3408,7 @@ "format": "int32", "type": "integer" }, - "description": "A mapping of (subquery variable name) -\u003e (subquery node id) for cases\nwhere the `description` string of this node references a `SCALAR`\nsubquery contained in the expression subtree rooted at this node. The\nreferenced `SCALAR` subquery may not necessarily be a direct child of\nthis node.", + "description": "A mapping of (subquery variable name) -\u003e (subquery node id) for cases where the `description` string of this node references a `SCALAR` subquery contained in the expression subtree rooted at this node. The referenced `SCALAR` subquery may not necessarily be a direct child of this node.", "type": "object" } }, @@ -3407,7 +3422,7 @@ "additionalProperties": { "$ref": "Type" }, - "description": "It is not always possible for Cloud Spanner to infer the right SQL type\nfrom a JSON value. For example, values of type `BYTES` and values\nof type `STRING` both appear in params as JSON strings.\n\nIn these cases, `param_types` can be used to specify the exact\nSQL type for some or all of the SQL statement parameters. See the\ndefinition of Type for more information\nabout SQL types.", + "description": "It is not always possible for Cloud Spanner to infer the right SQL type from a JSON value. For example, values of type `BYTES` and values of type `STRING` both appear in params as JSON strings. In these cases, `param_types` can be used to specify the exact SQL type for some or all of the SQL statement parameters. See the definition of Type for more information about SQL types.", "type": "object" }, "params": { @@ -3415,7 +3430,7 @@ "description": "Properties of the object.", "type": "any" }, - "description": "Parameter names and values that bind to placeholders in the DML string.\n\nA parameter placeholder consists of the `@` character followed by the\nparameter name (for example, `@firstName`). Parameter names can contain\nletters, numbers, and underscores.\n\nParameters can appear anywhere that a literal value is expected. The\nsame parameter name can be used more than once, for example:\n\n`\"WHERE id \u003e @msg_id AND id \u003c @msg_id + 100\"`\n\nIt is an error to execute a SQL statement with unbound parameters.", + "description": "Parameter names and values that bind to placeholders in the DML string. A parameter placeholder consists of the `@` character followed by the parameter name (for example, `@firstName`). Parameter names can contain letters, numbers, and underscores. Parameters can appear anywhere that a literal value is expected. The same parameter name can be used more than once, for example: `\"WHERE id \u003e @msg_id AND id \u003c @msg_id + 100\"` It is an error to execute a SQL statement with unbound parameters.", "type": "object" }, "sql": { @@ -3426,7 +3441,7 @@ "type": "object" }, "Status": { - "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors).", + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", "id": "Status", "properties": { "code": { @@ -3435,7 +3450,7 @@ "type": "integer" }, "details": { - "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use.", + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", "items": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -3446,7 +3461,7 @@ "type": "array" }, "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", "type": "string" } }, @@ -3457,7 +3472,7 @@ "id": "StructType", "properties": { "fields": { - "description": "The list of fields that make up this struct. Order is\nsignificant, because values of this struct type are represented as\nlists, where the order of field values matches the order of\nfields in the StructType. In turn, the order of fields\nmatches the order of columns in a read request, or the order of\nfields in the `SELECT` clause of a query.", + "description": "The list of fields that make up this struct. Order is significant, because values of this struct type are represented as lists, where the order of field values matches the order of fields in the StructType. In turn, the order of fields matches the order of columns in a read request, or the order of fields in the `SELECT` clause of a query.", "items": { "$ref": "Field" }, @@ -3471,7 +3486,7 @@ "id": "TestIamPermissionsRequest", "properties": { "permissions": { - "description": "REQUIRED: The set of permissions to check for 'resource'.\nPermissions with wildcards (such as '*', 'spanner.*', 'spanner.instances.*') are not allowed.", + "description": "REQUIRED: The set of permissions to check for 'resource'. Permissions with wildcards (such as '*', 'spanner.*', 'spanner.instances.*') are not allowed.", "items": { "type": "string" }, @@ -3485,7 +3500,7 @@ "id": "TestIamPermissionsResponse", "properties": { "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", "items": { "type": "string" }, @@ -3499,12 +3514,12 @@ "id": "Transaction", "properties": { "id": { - "description": "`id` may be used to identify the transaction in subsequent\nRead,\nExecuteSql,\nCommit, or\nRollback calls.\n\nSingle-use read-only transactions do not have IDs, because\nsingle-use transactions do not support multiple requests.", + "description": "`id` may be used to identify the transaction in subsequent Read, ExecuteSql, Commit, or Rollback calls. Single-use read-only transactions do not have IDs, because single-use transactions do not support multiple requests.", "format": "byte", "type": "string" }, "readTimestamp": { - "description": "For snapshot read-only transactions, the read timestamp chosen\nfor the transaction. Not returned by default: see\nTransactionOptions.ReadOnly.return_read_timestamp.\n\nA timestamp in RFC3339 UTC \\\"Zulu\\\" format, accurate to nanoseconds.\nExample: `\"2014-10-02T15:01:23.045123456Z\"`.", + "description": "For snapshot read-only transactions, the read timestamp chosen for the transaction. Not returned by default: see TransactionOptions.ReadOnly.return_read_timestamp. A timestamp in RFC3339 UTC \\\"Zulu\\\" format, accurate to nanoseconds. Example: `\"2014-10-02T15:01:23.045123456Z\"`.", "format": "google-datetime", "type": "string" } @@ -3512,31 +3527,31 @@ "type": "object" }, "TransactionOptions": { - "description": "# Transactions\n\n\nEach session can have at most one active transaction at a time. After the\nactive transaction is completed, the session can immediately be\nre-used for the next transaction. It is not necessary to create a\nnew session for each transaction.\n\n# Transaction Modes\n\nCloud Spanner supports three transaction modes:\n\n 1. Locking read-write. This type of transaction is the only way\n to write data into Cloud Spanner. These transactions rely on\n pessimistic locking and, if necessary, two-phase commit.\n Locking read-write transactions may abort, requiring the\n application to retry.\n\n 2. Snapshot read-only. This transaction type provides guaranteed\n consistency across several reads, but does not allow\n writes. Snapshot read-only transactions can be configured to\n read at timestamps in the past. Snapshot read-only\n transactions do not need to be committed.\n\n 3. Partitioned DML. This type of transaction is used to execute\n a single Partitioned DML statement. Partitioned DML partitions\n the key space and runs the DML statement over each partition\n in parallel using separate, internal transactions that commit\n independently. Partitioned DML transactions do not need to be\n committed.\n\nFor transactions that only read, snapshot read-only transactions\nprovide simpler semantics and are almost always faster. In\nparticular, read-only transactions do not take locks, so they do\nnot conflict with read-write transactions. As a consequence of not\ntaking locks, they also do not abort, so retry loops are not needed.\n\nTransactions may only read/write data in a single database. They\nmay, however, read/write data in different tables within that\ndatabase.\n\n## Locking Read-Write Transactions\n\nLocking transactions may be used to atomically read-modify-write\ndata anywhere in a database. This type of transaction is externally\nconsistent.\n\nClients should attempt to minimize the amount of time a transaction\nis active. Faster transactions commit with higher probability\nand cause less contention. Cloud Spanner attempts to keep read locks\nactive as long as the transaction continues to do reads, and the\ntransaction has not been terminated by\nCommit or\nRollback. Long periods of\ninactivity at the client may cause Cloud Spanner to release a\ntransaction's locks and abort it.\n\nConceptually, a read-write transaction consists of zero or more\nreads or SQL statements followed by\nCommit. At any time before\nCommit, the client can send a\nRollback request to abort the\ntransaction.\n\n### Semantics\n\nCloud Spanner can commit the transaction if all read locks it acquired\nare still valid at commit time, and it is able to acquire write\nlocks for all writes. Cloud Spanner can abort the transaction for any\nreason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees\nthat the transaction has not modified any user data in Cloud Spanner.\n\nUnless the transaction commits, Cloud Spanner makes no guarantees about\nhow long the transaction's locks were held for. It is an error to\nuse Cloud Spanner locks for any sort of mutual exclusion other than\nbetween Cloud Spanner transactions themselves.\n\n### Retrying Aborted Transactions\n\nWhen a transaction aborts, the application can choose to retry the\nwhole transaction again. To maximize the chances of successfully\ncommitting the retry, the client should execute the retry in the\nsame session as the original attempt. The original session's lock\npriority increases with each consecutive abort, meaning that each\nattempt has a slightly better chance of success than the previous.\n\nUnder some circumstances (e.g., many transactions attempting to\nmodify the same row(s)), a transaction can abort many times in a\nshort period before successfully committing. Thus, it is not a good\nidea to cap the number of retries a transaction can attempt;\ninstead, it is better to limit the total amount of wall time spent\nretrying.\n\n### Idle Transactions\n\nA transaction is considered idle if it has no outstanding reads or\nSQL queries and has not started a read or SQL query within the last 10\nseconds. Idle transactions can be aborted by Cloud Spanner so that they\ndon't hold on to locks indefinitely. In that case, the commit will\nfail with error `ABORTED`.\n\nIf this behavior is undesirable, periodically executing a simple\nSQL query in the transaction (e.g., `SELECT 1`) prevents the\ntransaction from becoming idle.\n\n## Snapshot Read-Only Transactions\n\nSnapshot read-only transactions provides a simpler method than\nlocking read-write transactions for doing several consistent\nreads. However, this type of transaction does not support writes.\n\nSnapshot transactions do not take locks. Instead, they work by\nchoosing a Cloud Spanner timestamp, then executing all reads at that\ntimestamp. Since they do not acquire locks, they do not block\nconcurrent read-write transactions.\n\nUnlike locking read-write transactions, snapshot read-only\ntransactions never abort. They can fail if the chosen read\ntimestamp is garbage collected; however, the default garbage\ncollection policy is generous enough that most applications do not\nneed to worry about this in practice.\n\nSnapshot read-only transactions do not need to call\nCommit or\nRollback (and in fact are not\npermitted to do so).\n\nTo execute a snapshot transaction, the client specifies a timestamp\nbound, which tells Cloud Spanner how to choose a read timestamp.\n\nThe types of timestamp bound are:\n\n - Strong (the default).\n - Bounded staleness.\n - Exact staleness.\n\nIf the Cloud Spanner database to be read is geographically distributed,\nstale read-only transactions can execute more quickly than strong\nor read-write transaction, because they are able to execute far\nfrom the leader replica.\n\nEach type of timestamp bound is discussed in detail below.\n\n### Strong\n\nStrong reads are guaranteed to see the effects of all transactions\nthat have committed before the start of the read. Furthermore, all\nrows yielded by a single read are consistent with each other -- if\nany part of the read observes a transaction, all parts of the read\nsee the transaction.\n\nStrong reads are not repeatable: two consecutive strong read-only\ntransactions might return inconsistent results if there are\nconcurrent writes. If consistency across reads is required, the\nreads should be executed within a transaction or at an exact read\ntimestamp.\n\nSee TransactionOptions.ReadOnly.strong.\n\n### Exact Staleness\n\nThese timestamp bounds execute reads at a user-specified\ntimestamp. Reads at a timestamp are guaranteed to see a consistent\nprefix of the global transaction history: they observe\nmodifications done by all transactions with a commit timestamp \u003c=\nthe read timestamp, and observe none of the modifications done by\ntransactions with a larger commit timestamp. They will block until\nall conflicting transactions that may be assigned commit timestamps\n\u003c= the read timestamp have finished.\n\nThe timestamp can either be expressed as an absolute Cloud Spanner commit\ntimestamp or a staleness relative to the current time.\n\nThese modes do not require a \"negotiation phase\" to pick a\ntimestamp. As a result, they execute slightly faster than the\nequivalent boundedly stale concurrency modes. On the other hand,\nboundedly stale reads usually return fresher results.\n\nSee TransactionOptions.ReadOnly.read_timestamp and\nTransactionOptions.ReadOnly.exact_staleness.\n\n### Bounded Staleness\n\nBounded staleness modes allow Cloud Spanner to pick the read timestamp,\nsubject to a user-provided staleness bound. Cloud Spanner chooses the\nnewest timestamp within the staleness bound that allows execution\nof the reads at the closest available replica without blocking.\n\nAll rows yielded are consistent with each other -- if any part of\nthe read observes a transaction, all parts of the read see the\ntransaction. Boundedly stale reads are not repeatable: two stale\nreads, even if they use the same staleness bound, can execute at\ndifferent timestamps and thus return inconsistent results.\n\nBoundedly stale reads execute in two phases: the first phase\nnegotiates a timestamp among all replicas needed to serve the\nread. In the second phase, reads are executed at the negotiated\ntimestamp.\n\nAs a result of the two phase execution, bounded staleness reads are\nusually a little slower than comparable exact staleness\nreads. However, they are typically able to return fresher\nresults, and are more likely to execute at the closest replica.\n\nBecause the timestamp negotiation requires up-front knowledge of\nwhich rows will be read, it can only be used with single-use\nread-only transactions.\n\nSee TransactionOptions.ReadOnly.max_staleness and\nTransactionOptions.ReadOnly.min_read_timestamp.\n\n### Old Read Timestamps and Garbage Collection\n\nCloud Spanner continuously garbage collects deleted and overwritten data\nin the background to reclaim storage space. This process is known\nas \"version GC\". By default, version GC reclaims versions after they\nare one hour old. Because of this, Cloud Spanner cannot perform reads\nat read timestamps more than one hour in the past. This\nrestriction also applies to in-progress reads and/or SQL queries whose\ntimestamp become too old while executing. Reads and SQL queries with\ntoo-old read timestamps fail with the error `FAILED_PRECONDITION`.\n\n## Partitioned DML Transactions\n\nPartitioned DML transactions are used to execute DML statements with a\ndifferent execution strategy that provides different, and often better,\nscalability properties for large, table-wide operations than DML in a\nReadWrite transaction. Smaller scoped statements, such as an OLTP workload,\nshould prefer using ReadWrite transactions.\n\nPartitioned DML partitions the keyspace and runs the DML statement on each\npartition in separate, internal transactions. These transactions commit\nautomatically when complete, and run independently from one another.\n\nTo reduce lock contention, this execution strategy only acquires read locks\non rows that match the WHERE clause of the statement. Additionally, the\nsmaller per-partition transactions hold locks for less time.\n\nThat said, Partitioned DML is not a drop-in replacement for standard DML used\nin ReadWrite transactions.\n\n - The DML statement must be fully-partitionable. Specifically, the statement\n must be expressible as the union of many statements which each access only\n a single row of the table.\n\n - The statement is not applied atomically to all rows of the table. Rather,\n the statement is applied atomically to partitions of the table, in\n independent transactions. Secondary index rows are updated atomically\n with the base table rows.\n\n - Partitioned DML does not guarantee exactly-once execution semantics\n against a partition. The statement will be applied at least once to each\n partition. It is strongly recommended that the DML statement should be\n idempotent to avoid unexpected results. For instance, it is potentially\n dangerous to run a statement such as\n `UPDATE table SET column = column + 1` as it could be run multiple times\n against some rows.\n\n - The partitions are committed automatically - there is no support for\n Commit or Rollback. If the call returns an error, or if the client issuing\n the ExecuteSql call dies, it is possible that some rows had the statement\n executed on them successfully. It is also possible that statement was\n never executed against other rows.\n\n - Partitioned DML transactions may only contain the execution of a single\n DML statement via ExecuteSql or ExecuteStreamingSql.\n\n - If any error is encountered during the execution of the partitioned DML\n operation (for instance, a UNIQUE INDEX violation, division by zero, or a\n value that cannot be stored due to schema constraints), then the\n operation is stopped at that point and an error is returned. It is\n possible that at this point, some partitions have been committed (or even\n committed multiple times), and other partitions have not been run at all.\n\nGiven the above, Partitioned DML is good fit for large, database-wide,\noperations that are idempotent, such as deleting old rows from a very large\ntable.", + "description": "# Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ### Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ### Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ### Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ### Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ### Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp \u003c= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps \u003c= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a \"negotiation phase\" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ### Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ### Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as \"version GC\". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table.", "id": "TransactionOptions", "properties": { "partitionedDml": { "$ref": "PartitionedDml", - "description": "Partitioned DML transaction.\n\nAuthorization to begin a Partitioned DML transaction requires\n`spanner.databases.beginPartitionedDmlTransaction` permission\non the `session` resource." + "description": "Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource." }, "readOnly": { "$ref": "ReadOnly", - "description": "Transaction will not write.\n\nAuthorization to begin a read-only transaction requires\n`spanner.databases.beginReadOnlyTransaction` permission\non the `session` resource." + "description": "Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource." }, "readWrite": { "$ref": "ReadWrite", - "description": "Transaction may write.\n\nAuthorization to begin a read-write transaction requires\n`spanner.databases.beginOrRollbackReadWriteTransaction` permission\non the `session` resource." + "description": "Transaction may write. Authorization to begin a read-write transaction requires `spanner.databases.beginOrRollbackReadWriteTransaction` permission on the `session` resource." } }, "type": "object" }, "TransactionSelector": { - "description": "This message is used to select the transaction in which a\nRead or\nExecuteSql call runs.\n\nSee TransactionOptions for more information about transactions.", + "description": "This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions.", "id": "TransactionSelector", "properties": { "begin": { "$ref": "TransactionOptions", - "description": "Begin a new transaction and execute this read or SQL query in\nit. The transaction ID of the new transaction is returned in\nResultSetMetadata.transaction, which is a Transaction." + "description": "Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction." }, "id": { "description": "Execute the read or SQL query in a previously-started transaction.", @@ -3545,18 +3560,18 @@ }, "singleUse": { "$ref": "TransactionOptions", - "description": "Execute the read or SQL query in a temporary transaction.\nThis is the most efficient way to execute a transaction that\nconsists of a single SQL query." + "description": "Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query." } }, "type": "object" }, "Type": { - "description": "`Type` indicates the type of a Cloud Spanner value, as might be stored in a\ntable cell or returned from an SQL query.", + "description": "`Type` indicates the type of a Cloud Spanner value, as might be stored in a table cell or returned from an SQL query.", "id": "Type", "properties": { "arrayElementType": { "$ref": "Type", - "description": "If code == ARRAY, then `array_element_type`\nis the type of the array elements." + "description": "If code == ARRAY, then `array_element_type` is the type of the array elements." }, "code": { "description": "Required. The TypeCode for this type.", @@ -3570,35 +3585,37 @@ "STRING", "BYTES", "ARRAY", - "STRUCT" + "STRUCT", + "NUMERIC" ], "enumDescriptions": [ "Not specified.", "Encoded as JSON `true` or `false`.", "Encoded as `string`, in decimal format.", - "Encoded as `number`, or the strings `\"NaN\"`, `\"Infinity\"`, or\n`\"-Infinity\"`.", - "Encoded as `string` in RFC 3339 timestamp format. The time zone\nmust be present, and must be `\"Z\"`.\n\nIf the schema has the column option\n`allow_commit_timestamp=true`, the placeholder string\n`\"spanner.commit_timestamp()\"` can be used to instruct the system\nto insert the commit timestamp associated with the transaction\ncommit.", + "Encoded as `number`, or the strings `\"NaN\"`, `\"Infinity\"`, or `\"-Infinity\"`.", + "Encoded as `string` in RFC 3339 timestamp format. The time zone must be present, and must be `\"Z\"`. If the schema has the column option `allow_commit_timestamp=true`, the placeholder string `\"spanner.commit_timestamp()\"` can be used to instruct the system to insert the commit timestamp associated with the transaction commit.", "Encoded as `string` in RFC 3339 date format.", "Encoded as `string`.", - "Encoded as a base64-encoded `string`, as described in RFC 4648,\nsection 4.", - "Encoded as `list`, where the list elements are represented\naccording to\narray_element_type.", - "Encoded as `list`, where list element `i` is represented according\nto [struct_type.fields[i]][google.spanner.v1.StructType.fields]." + "Encoded as a base64-encoded `string`, as described in RFC 4648, section 4.", + "Encoded as `list`, where the list elements are represented according to array_element_type.", + "Encoded as `list`, where list element `i` is represented according to [struct_type.fields[i]][google.spanner.v1.StructType.fields].", + "Encoded as `string`, in decimal format or scientific notation format. Decimal format: `[+-]Digits[.[Digits]]` or `+-.Digits` Scientific notation: `[+-]Digits[.[Digits]][ExponentIndicator[+-]Digits]` or `+-.Digits[ExponentIndicator[+-]Digits]` (ExponentIndicator is `\"e\"` or `\"E\"`)" ], "type": "string" }, "structType": { "$ref": "StructType", - "description": "If code == STRUCT, then `struct_type`\nprovides type information for the struct's fields." + "description": "If code == STRUCT, then `struct_type` provides type information for the struct's fields." } }, "type": "object" }, "UpdateDatabaseDdlMetadata": { - "description": "Metadata type for the operation returned by\nUpdateDatabaseDdl.", + "description": "Metadata type for the operation returned by UpdateDatabaseDdl.", "id": "UpdateDatabaseDdlMetadata", "properties": { "commitTimestamps": { - "description": "Reports the commit timestamps of all statements that have\nsucceeded so far, where `commit_timestamps[i]` is the commit\ntimestamp for the statement `statements[i]`.", + "description": "Reports the commit timestamps of all statements that have succeeded so far, where `commit_timestamps[i]` is the commit timestamp for the statement `statements[i]`.", "items": { "format": "google-datetime", "type": "string" @@ -3610,7 +3627,7 @@ "type": "string" }, "statements": { - "description": "For an update this list contains all the statements. For an\nindividual statement, this list contains only that statement.", + "description": "For an update this list contains all the statements. For an individual statement, this list contains only that statement.", "items": { "type": "string" }, @@ -3620,11 +3637,11 @@ "type": "object" }, "UpdateDatabaseDdlRequest": { - "description": "Enqueues the given DDL statements to be applied, in order but not\nnecessarily all at once, to the database schema at some point (or\npoints) in the future. The server checks that the statements\nare executable (syntactically valid, name tables that exist, etc.)\nbefore enqueueing them, but they may still fail upon\nlater execution (e.g., if a statement from another batch of\nstatements is applied first and it conflicts in some way, or if\nthere is some data-related problem like a `NULL` value in a column to\nwhich `NOT NULL` would be added). If a statement fails, all\nsubsequent statements in the batch are automatically cancelled.\n\nEach batch of statements is assigned a name which can be used with\nthe Operations API to monitor\nprogress. See the\noperation_id field for more\ndetails.", + "description": "Enqueues the given DDL statements to be applied, in order but not necessarily all at once, to the database schema at some point (or points) in the future. The server checks that the statements are executable (syntactically valid, name tables that exist, etc.) before enqueueing them, but they may still fail upon later execution (e.g., if a statement from another batch of statements is applied first and it conflicts in some way, or if there is some data-related problem like a `NULL` value in a column to which `NOT NULL` would be added). If a statement fails, all subsequent statements in the batch are automatically cancelled. Each batch of statements is assigned a name which can be used with the Operations API to monitor progress. See the operation_id field for more details.", "id": "UpdateDatabaseDdlRequest", "properties": { "operationId": { - "description": "If empty, the new update request is assigned an\nautomatically-generated operation ID. Otherwise, `operation_id`\nis used to construct the name of the resulting\nOperation.\n\nSpecifying an explicit operation ID simplifies determining\nwhether the statements were executed in the event that the\nUpdateDatabaseDdl call is replayed,\nor the return value is otherwise lost: the database and\n`operation_id` fields can be combined to form the\nname of the resulting\nlongrunning.Operation: `\u003cdatabase\u003e/operations/\u003coperation_id\u003e`.\n\n`operation_id` should be unique within the database, and must be\na valid identifier: `a-z*`. Note that\nautomatically-generated operation IDs always begin with an\nunderscore. If the named operation already exists,\nUpdateDatabaseDdl returns\n`ALREADY_EXISTS`.", + "description": "If empty, the new update request is assigned an automatically-generated operation ID. Otherwise, `operation_id` is used to construct the name of the resulting Operation. Specifying an explicit operation ID simplifies determining whether the statements were executed in the event that the UpdateDatabaseDdl call is replayed, or the return value is otherwise lost: the database and `operation_id` fields can be combined to form the name of the resulting longrunning.Operation: `/operations/`. `operation_id` should be unique within the database, and must be a valid identifier: `a-z*`. Note that automatically-generated operation IDs always begin with an underscore. If the named operation already exists, UpdateDatabaseDdl returns `ALREADY_EXISTS`.", "type": "string" }, "statements": { @@ -3638,11 +3655,11 @@ "type": "object" }, "UpdateInstanceMetadata": { - "description": "Metadata type for the operation returned by\nUpdateInstance.", + "description": "Metadata type for the operation returned by UpdateInstance.", "id": "UpdateInstanceMetadata", "properties": { "cancelTime": { - "description": "The time at which this operation was cancelled. If set, this operation is\nin the process of undoing itself (which is guaranteed to succeed) and\ncannot be cancelled again.", + "description": "The time at which this operation was cancelled. If set, this operation is in the process of undoing itself (which is guaranteed to succeed) and cannot be cancelled again.", "format": "google-datetime", "type": "string" }, @@ -3656,7 +3673,7 @@ "description": "The desired end state of the update." }, "startTime": { - "description": "The time at which UpdateInstance\nrequest was received.", + "description": "The time at which UpdateInstance request was received.", "format": "google-datetime", "type": "string" } @@ -3668,23 +3685,23 @@ "id": "UpdateInstanceRequest", "properties": { "fieldMask": { - "description": "Required. A mask specifying which fields in Instance should be updated.\nThe field mask must always be specified; this prevents any future fields in\nInstance from being erased accidentally by clients that do not know\nabout them.", + "description": "Required. A mask specifying which fields in Instance should be updated. The field mask must always be specified; this prevents any future fields in Instance from being erased accidentally by clients that do not know about them.", "format": "google-fieldmask", "type": "string" }, "instance": { "$ref": "Instance", - "description": "Required. The instance to update, which must always include the instance\nname. Otherwise, only fields mentioned in field_mask need be included." + "description": "Required. The instance to update, which must always include the instance name. Otherwise, only fields mentioned in field_mask need be included." } }, "type": "object" }, "Write": { - "description": "Arguments to insert, update, insert_or_update, and\nreplace operations.", + "description": "Arguments to insert, update, insert_or_update, and replace operations.", "id": "Write", "properties": { "columns": { - "description": "The names of the columns in table to be written.\n\nThe list of columns must contain enough columns to allow\nCloud Spanner to derive values for all primary key columns in the\nrow(s) to be modified.", + "description": "The names of the columns in table to be written. The list of columns must contain enough columns to allow Cloud Spanner to derive values for all primary key columns in the row(s) to be modified.", "items": { "type": "string" }, @@ -3695,7 +3712,7 @@ "type": "string" }, "values": { - "description": "The values to be written. `values` can contain more than one\nlist of values. If it does, then multiple rows are written, one\nfor each entry in `values`. Each list in `values` must have\nexactly as many entries as there are entries in columns\nabove. Sending multiple lists is equivalent to sending multiple\n`Mutation`s, each containing one `values` entry and repeating\ntable and columns. Individual values in each list are\nencoded as described here.", + "description": "The values to be written. `values` can contain more than one list of values. If it does, then multiple rows are written, one for each entry in `values`. Each list in `values` must have exactly as many entries as there are entries in columns above. Sending multiple lists is equivalent to sending multiple `Mutation`s, each containing one `values` entry and repeating table and columns. Individual values in each list are encoded as described here.", "items": { "items": { "type": "any" diff --git a/vendor/google.golang.org/api/spanner/v1/spanner-gen.go b/vendor/google.golang.org/api/spanner/v1/spanner-gen.go index 926fd09eccb..9b2b60b7160 100644 --- a/vendor/google.golang.org/api/spanner/v1/spanner-gen.go +++ b/vendor/google.golang.org/api/spanner/v1/spanner-gen.go @@ -81,6 +81,7 @@ const apiId = "spanner:v1" const apiName = "spanner" const apiVersion = "v1" const basePath = "https://spanner.googleapis.com/" +const mtlsBasePath = "https://spanner.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -104,6 +105,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -279,64 +281,42 @@ type ProjectsInstancesOperationsService struct { // Backup: A backup of a Cloud Spanner database. type Backup struct { // CreateTime: Output only. The backup will contain an externally - // consistent - // copy of the database at the timestamp specified by - // `create_time`. `create_time` is approximately the time - // the + // consistent copy of the database at the timestamp specified by + // `create_time`. `create_time` is approximately the time the // CreateBackup request is received. CreateTime string `json:"createTime,omitempty"` - // Database: Required for the CreateBackup operation. - // Name of the database from which this backup was - // created. This needs to be in the same instance as the backup. - // Values are of the - // form - // `projects//instances//databases/`. + // Database: Required for the CreateBackup operation. Name of the + // database from which this backup was created. This needs to be in the + // same instance as the backup. Values are of the form + // `projects//instances//databases/`. Database string `json:"database,omitempty"` - // ExpireTime: Required for the CreateBackup - // operation. The expiration time of the backup, with - // microseconds - // granularity that must be at least 6 hours and at most 366 days - // from the time the CreateBackup request is processed. Once the - // `expire_time` - // has passed, the backup is eligible to be automatically deleted by - // Cloud - // Spanner to free the resources used by the backup. + // ExpireTime: Required for the CreateBackup operation. The expiration + // time of the backup, with microseconds granularity that must be at + // least 6 hours and at most 366 days from the time the CreateBackup + // request is processed. Once the `expire_time` has passed, the backup + // is eligible to be automatically deleted by Cloud Spanner to free the + // resources used by the backup. ExpireTime string `json:"expireTime,omitempty"` - // Name: Output only for the CreateBackup operation. - // Required for the UpdateBackup operation. - // - // A globally unique identifier for the backup which cannot be - // changed. Values are of the - // form - // `projects//instances//backups/a-z*[a-z0-9]` - // Th - // e final segment of the name must be between 2 and 60 characters - // in length. - // - // The backup is stored in the location(s) specified in the - // instance - // configuration of the instance containing the backup, identified - // by the prefix of the backup name of the - // form - // `projects//instances/`. + // Name: Output only for the CreateBackup operation. Required for the + // UpdateBackup operation. A globally unique identifier for the backup + // which cannot be changed. Values are of the form + // `projects//instances//backups/a-z*[a-z0-9]` The final segment of the + // name must be between 2 and 60 characters in length. The backup is + // stored in the location(s) specified in the instance configuration of + // the instance containing the backup, identified by the prefix of the + // backup name of the form `projects//instances/`. Name string `json:"name,omitempty"` // ReferencingDatabases: Output only. The names of the restored - // databases that reference the backup. - // The database names are of - // the form - // `projects//instances//databases/`. - // Refere - // ncing databases may exist in different instances. The existence - // of - // any referencing database prevents the backup from being deleted. When - // a - // restored database from the backup enters the `READY` state, the - // reference - // to the backup is removed. + // databases that reference the backup. The database names are of the + // form `projects//instances//databases/`. Referencing databases may + // exist in different instances. The existence of any referencing + // database prevents the backup from being deleted. When a restored + // database from the backup enters the `READY` state, the reference to + // the backup is removed. ReferencingDatabases []string `json:"referencingDatabases,omitempty"` // SizeBytes: Output only. Size of the backup in bytes. @@ -347,8 +327,7 @@ type Backup struct { // Possible values: // "STATE_UNSPECIFIED" - Not specified. // "CREATING" - The pending backup is still being created. Operations - // on the - // backup may fail with `FAILED_PRECONDITION` in this state. + // on the backup may fail with `FAILED_PRECONDITION` in this state. // "READY" - The backup is complete and ready for use. State string `json:"state,omitempty"` @@ -385,8 +364,7 @@ type BackupInfo struct { Backup string `json:"backup,omitempty"` // CreateTime: The backup contains an externally consistent copy of - // `source_database` at - // the timestamp specified by `create_time`. + // `source_database` at the timestamp specified by `create_time`. CreateTime string `json:"createTime,omitempty"` // SourceDatabase: Name of the database the backup was created from. @@ -418,12 +396,9 @@ func (s *BackupInfo) MarshalJSON() ([]byte, error) { // BatchCreateSessionsRequest: The request for BatchCreateSessions. type BatchCreateSessionsRequest struct { // SessionCount: Required. The number of sessions to be created in this - // batch call. - // The API may return fewer than the requested number of sessions. If - // a - // specific number of sessions are desired, the client can make - // additional - // calls to BatchCreateSessions (adjusting + // batch call. The API may return fewer than the requested number of + // sessions. If a specific number of sessions are desired, the client + // can make additional calls to BatchCreateSessions (adjusting // session_count as necessary). SessionCount int64 `json:"sessionCount,omitempty"` @@ -515,86 +490,60 @@ func (s *BeginTransactionRequest) MarshalJSON() ([]byte, error) { // Binding: Associates `members` with a `role`. type Binding struct { - // Condition: The condition that is associated with this binding. - // NOTE: An unsatisfied condition will not allow user access via - // current - // binding. Different bindings, including their conditions, are - // examined - // independently. + // BindingId: A client-specified ID for this binding. Expected to be + // globally unique to support the internal bindings-by-ID API. + BindingId string `json:"bindingId,omitempty"` + + // Condition: The condition that is associated with this binding. If the + // condition evaluates to `true`, then this binding applies to the + // current request. If the condition evaluates to `false`, then this + // binding does not apply to the current request. However, a different + // role binding might grant the same role to one or more of the members + // in this binding. To learn which resources support conditions in their + // IAM policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). Condition *Expr `json:"condition,omitempty"` // Members: Specifies the identities requesting access for a Cloud - // Platform resource. - // `members` can have the following values: - // - // * `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. - // - // * `allAuthenticatedUsers`: A special identifier that represents - // anyone - // who is authenticated with a Google account or a service - // account. - // - // * `user:{emailid}`: An email address that represents a specific - // Google - // account. For example, `alice@example.com` . - // - // - // * `serviceAccount:{emailid}`: An email address that represents a - // service - // account. For example, - // `my-other-app@appspot.gserviceaccount.com`. - // - // * `group:{emailid}`: An email address that represents a Google - // group. - // For example, `admins@example.com`. - // - // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a user that has been recently deleted. - // For - // example, `alice@example.com?uid=123456789012345678901`. If the - // user is - // recovered, this value reverts to `user:{emailid}` and the - // recovered user - // retains the role in the binding. - // - // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address - // (plus - // unique identifier) representing a service account that has been - // recently - // deleted. For example, - // + // Platform resource. `members` can have the following values: * + // `allUsers`: A special identifier that represents anyone who is on the + // internet; with or without a Google account. * + // `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. * + // `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . * + // `serviceAccount:{emailid}`: An email address that represents a + // service account. For example, + // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An + // email address that represents a Google group. For example, + // `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An + // email address (plus unique identifier) representing a user that has + // been recently deleted. For example, + // `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered + // user retains the role in the binding. * + // `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address + // (plus unique identifier) representing a service account that has been + // recently deleted. For example, // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account - // retains the - // role in the binding. - // - // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus - // unique - // identifier) representing a Google group that has been recently - // deleted. For example, - // `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` - // and the - // recovered group retains the role in the binding. - // - // - // * `domain:{domain}`: The G Suite domain (primary) that represents all - // the - // users of that domain. For example, `google.com` or - // `example.com`. - // - // + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains + // the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: + // An email address (plus unique identifier) representing a Google group + // that has been recently deleted. For example, + // `admins@example.com?uid=123456789012345678901`. If the group is + // recovered, this value reverts to `group:{emailid}` and the recovered + // group retains the role in the binding. * `domain:{domain}`: The G + // Suite domain (primary) that represents all the users of that domain. + // For example, `google.com` or `example.com`. Members []string `json:"members,omitempty"` - // Role: Role that is assigned to `members`. - // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + // Role: Role that is assigned to `members`. For example, + // `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `json:"role,omitempty"` - // ForceSendFields is a list of field names (e.g. "Condition") to + // ForceSendFields is a list of field names (e.g. "BindingId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -602,7 +551,7 @@ type Binding struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Condition") to include in + // NullFields is a list of field names (e.g. "BindingId") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -618,34 +567,24 @@ func (s *Binding) MarshalJSON() ([]byte, error) { } // ChildLink: Metadata associated with a parent-child relationship -// appearing in a -// PlanNode. +// appearing in a PlanNode. type ChildLink struct { // ChildIndex: The node to which the link points. ChildIndex int64 `json:"childIndex,omitempty"` // Type: The type of the link. For example, in Hash Joins this could be - // used to - // distinguish between the build child and the probe child, or in the - // case - // of the child being an output variable, to represent the tag - // associated - // with the output variable. + // used to distinguish between the build child and the probe child, or + // in the case of the child being an output variable, to represent the + // tag associated with the output variable. Type string `json:"type,omitempty"` - // Variable: Only present if the child node is SCALAR and corresponds - // to an output variable of the parent node. The field carries the name - // of - // the output variable. - // For example, a `TableScan` operator that reads rows from a table - // will - // have child links to the `SCALAR` nodes representing the output - // variables - // created for each column that is read by the operator. The - // corresponding - // `variable` fields will be set to the variable names assigned to - // the - // columns. + // Variable: Only present if the child node is SCALAR and corresponds to + // an output variable of the parent node. The field carries the name of + // the output variable. For example, a `TableScan` operator that reads + // rows from a table will have child links to the `SCALAR` nodes + // representing the output variables created for each column that is + // read by the operator. The corresponding `variable` fields will be set + // to the variable names assigned to the columns. Variable string `json:"variable,omitempty"` // ForceSendFields is a list of field names (e.g. "ChildIndex") to @@ -674,23 +613,18 @@ func (s *ChildLink) MarshalJSON() ([]byte, error) { // CommitRequest: The request for Commit. type CommitRequest struct { // Mutations: The mutations to be executed when this transaction - // commits. All - // mutations are applied atomically, in the order they appear in - // this list. + // commits. All mutations are applied atomically, in the order they + // appear in this list. Mutations []*Mutation `json:"mutations,omitempty"` // SingleUseTransaction: Execute mutations in a temporary transaction. - // Note that unlike - // commit of a previously-started transaction, commit with a - // temporary transaction is non-idempotent. That is, if - // the - // `CommitRequest` is sent to Cloud Spanner more than once - // (for - // instance, due to retries in the application, or in the - // transport library), it is possible that the mutations are - // executed more than once. If this is undesirable, use - // BeginTransaction and - // Commit instead. + // Note that unlike commit of a previously-started transaction, commit + // with a temporary transaction is non-idempotent. That is, if the + // `CommitRequest` is sent to Cloud Spanner more than once (for + // instance, due to retries in the application, or in the transport + // library), it is possible that the mutations are executed more than + // once. If this is undesirable, use BeginTransaction and Commit + // instead. SingleUseTransaction *TransactionOptions `json:"singleUseTransaction,omitempty"` // TransactionId: Commit a previously-started transaction. @@ -753,28 +687,19 @@ func (s *CommitResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// CreateBackupMetadata: Metadata type for the operation returned -// by +// CreateBackupMetadata: Metadata type for the operation returned by // CreateBackup. type CreateBackupMetadata struct { // CancelTime: The time at which cancellation of this operation was - // received. - // Operations.CancelOperation - // starts asynchronous cancellation on a long-running operation. The - // server - // makes a best effort to cancel the operation, but success is not - // guaranteed. - // Clients can use - // Operations.GetOperation or - // other methods to check whether the cancellation succeeded or whether - // the - // operation completed despite cancellation. On successful - // cancellation, - // the operation is not deleted; instead, it becomes an operation - // with - // an Operation.error value with a google.rpc.Status.code of - // 1, - // corresponding to `Code.CANCELLED`. + // received. Operations.CancelOperation starts asynchronous cancellation + // on a long-running operation. The server makes a best effort to cancel + // the operation, but success is not guaranteed. Clients can use + // Operations.GetOperation or other methods to check whether the + // cancellation succeeded or whether the operation completed despite + // cancellation. On successful cancellation, the operation is not + // deleted; instead, it becomes an operation with an Operation.error + // value with a google.rpc.Status.code of 1, corresponding to + // `Code.CANCELLED`. CancelTime string `json:"cancelTime,omitempty"` // Database: The name of the database the backup is created from. @@ -783,8 +708,7 @@ type CreateBackupMetadata struct { // Name: The name of the backup being created. Name string `json:"name,omitempty"` - // Progress: The progress of the - // CreateBackup operation. + // Progress: The progress of the CreateBackup operation. Progress *OperationProgress `json:"progress,omitempty"` // ForceSendFields is a list of field names (e.g. "CancelTime") to @@ -810,8 +734,7 @@ func (s *CreateBackupMetadata) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// CreateDatabaseMetadata: Metadata type for the operation returned -// by +// CreateDatabaseMetadata: Metadata type for the operation returned by // CreateDatabase. type CreateDatabaseMetadata struct { // Database: The database being created. @@ -843,21 +766,18 @@ func (s *CreateDatabaseMetadata) MarshalJSON() ([]byte, error) { // CreateDatabaseRequest: The request for CreateDatabase. type CreateDatabaseRequest struct { // CreateStatement: Required. A `CREATE DATABASE` statement, which - // specifies the ID of the - // new database. The database ID must conform to the regular - // expression - // `a-z*[a-z0-9]` and be between 2 and 30 characters in length. - // If the database ID is a reserved word or if it contains a hyphen, - // the - // database ID must be enclosed in backticks (`` ` ``). + // specifies the ID of the new database. The database ID must conform to + // the regular expression `a-z*[a-z0-9]` and be between 2 and 30 + // characters in length. If the database ID is a reserved word or if it + // contains a hyphen, the database ID must be enclosed in backticks (`` + // ` ``). CreateStatement string `json:"createStatement,omitempty"` // ExtraStatements: Optional. A list of DDL statements to run inside the - // newly created - // database. Statements can create tables, indexes, etc. - // These - // statements execute atomically with the creation of the database: - // if there is an error in any statement, the database is not created. + // newly created database. Statements can create tables, indexes, etc. + // These statements execute atomically with the creation of the + // database: if there is an error in any statement, the database is not + // created. ExtraStatements []string `json:"extraStatements,omitempty"` // ForceSendFields is a list of field names (e.g. "CreateStatement") to @@ -884,15 +804,12 @@ func (s *CreateDatabaseRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// CreateInstanceMetadata: Metadata type for the operation returned -// by +// CreateInstanceMetadata: Metadata type for the operation returned by // CreateInstance. type CreateInstanceMetadata struct { // CancelTime: The time at which this operation was cancelled. If set, - // this operation is - // in the process of undoing itself (which is guaranteed to succeed) - // and - // cannot be cancelled again. + // this operation is in the process of undoing itself (which is + // guaranteed to succeed) and cannot be cancelled again. CancelTime string `json:"cancelTime,omitempty"` // EndTime: The time at which this operation failed or was completed @@ -902,9 +819,7 @@ type CreateInstanceMetadata struct { // Instance: The instance being created. Instance *Instance `json:"instance,omitempty"` - // StartTime: The time at which the - // CreateInstance request was - // received. + // StartTime: The time at which the CreateInstance request was received. StartTime string `json:"startTime,omitempty"` // ForceSendFields is a list of field names (e.g. "CancelTime") to @@ -932,16 +847,13 @@ func (s *CreateInstanceMetadata) MarshalJSON() ([]byte, error) { // CreateInstanceRequest: The request for CreateInstance. type CreateInstanceRequest struct { - // Instance: Required. The instance to create. The name may be omitted, - // but if - // specified must be `/instances/`. + // Instance: Required. The instance to create. The name may be omitted, + // but if specified must be `/instances/`. Instance *Instance `json:"instance,omitempty"` - // InstanceId: Required. The ID of the instance to create. Valid - // identifiers are of the - // form `a-z*[a-z0-9]` and must be between 2 and 64 characters - // in - // length. + // InstanceId: Required. The ID of the instance to create. Valid + // identifiers are of the form `a-z*[a-z0-9]` and must be between 2 and + // 64 characters in length. InstanceId string `json:"instanceId,omitempty"` // ForceSendFields is a list of field names (e.g. "Instance") to @@ -969,7 +881,7 @@ func (s *CreateInstanceRequest) MarshalJSON() ([]byte, error) { // CreateSessionRequest: The request for CreateSession. type CreateSessionRequest struct { - // Session: The session to create. + // Session: Required. The session to create. Session *Session `json:"session,omitempty"` // ForceSendFields is a list of field names (e.g. "Session") to @@ -1001,18 +913,14 @@ type Database struct { // creation started. CreateTime string `json:"createTime,omitempty"` - // Name: Required. The name of the database. Values are of the - // form - // `projects//instances//databases/`, - // w - // here `` is as specified in the `CREATE DATABASE` - // statement. This name can be passed to other API methods to - // identify the database. + // Name: Required. The name of the database. Values are of the form + // `projects//instances//databases/`, where `` is as specified in the + // `CREATE DATABASE` statement. This name can be passed to other API + // methods to identify the database. Name string `json:"name,omitempty"` // RestoreInfo: Output only. Applicable only for restored databases. - // Contains information - // about the restore source. + // Contains information about the restore source. RestoreInfo *RestoreInfo `json:"restoreInfo,omitempty"` // State: Output only. The current database state. @@ -1020,20 +928,14 @@ type Database struct { // Possible values: // "STATE_UNSPECIFIED" - Not specified. // "CREATING" - The database is still being created. Operations on the - // database may fail - // with `FAILED_PRECONDITION` in this state. + // database may fail with `FAILED_PRECONDITION` in this state. // "READY" - The database is fully created and ready for use. // "READY_OPTIMIZING" - The database is fully created and ready for - // use, but is still - // being optimized for performance and cannot handle full load. - // - // In this state, the database still references the backup - // it was restore from, preventing the backup - // from being deleted. When optimizations are complete, the full - // performance - // of the database will be restored, and the database will transition - // to - // `READY` state. + // use, but is still being optimized for performance and cannot handle + // full load. In this state, the database still references the backup it + // was restore from, preventing the backup from being deleted. When + // optimizations are complete, the full performance of the database will + // be restored, and the database will transition to `READY` state. State string `json:"state,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1066,15 +968,11 @@ func (s *Database) MarshalJSON() ([]byte, error) { // Delete: Arguments to delete operations. type Delete struct { // KeySet: Required. The primary keys of the rows within table to - // delete. The - // primary keys must be specified in the order in which they appear in - // the - // `PRIMARY KEY()` clause of the table's equivalent DDL statement (the - // DDL - // statement used to create the table). - // Delete is idempotent. The transaction will succeed even if some or - // all - // rows do not exist. + // delete. The primary keys must be specified in the order in which they + // appear in the `PRIMARY KEY()` clause of the table's equivalent DDL + // statement (the DDL statement used to create the table). Delete is + // idempotent. The transaction will succeed even if some or all rows do + // not exist. KeySet *KeySet `json:"keySet,omitempty"` // Table: Required. The table whose rows will be deleted. @@ -1104,17 +1002,11 @@ func (s *Delete) MarshalJSON() ([]byte, error) { } // Empty: A generic empty message that you can re-use to avoid defining -// duplicated -// empty messages in your APIs. A typical example is to use it as the -// request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. +// duplicated empty messages in your APIs. A typical example is to use +// it as the request or the response type of an API method. For +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } The JSON representation for `Empty` is +// empty JSON object `{}`. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -1124,40 +1016,27 @@ type Empty struct { // ExecuteBatchDmlRequest: The request for ExecuteBatchDml. type ExecuteBatchDmlRequest struct { // Seqno: Required. A per-transaction sequence number used to identify - // this request. This field - // makes each request idempotent such that if the request is received - // multiple - // times, at most one will succeed. - // - // The sequence number must be monotonically increasing within - // the + // this request. This field makes each request idempotent such that if + // the request is received multiple times, at most one will succeed. The + // sequence number must be monotonically increasing within the // transaction. If a request arrives for the first time with an - // out-of-order - // sequence number, the transaction may be aborted. Replays of - // previously - // handled requests will yield the same response as the first execution. + // out-of-order sequence number, the transaction may be aborted. Replays + // of previously handled requests will yield the same response as the + // first execution. Seqno int64 `json:"seqno,omitempty,string"` // Statements: Required. The list of statements to execute in this - // batch. Statements are executed - // serially, such that the effects of statement `i` are visible to - // statement - // `i+1`. Each statement must be a DML statement. Execution stops at - // the - // first failed statement; the remaining statements are not - // executed. - // - // Callers must provide at least one statement. + // batch. Statements are executed serially, such that the effects of + // statement `i` are visible to statement `i+1`. Each statement must be + // a DML statement. Execution stops at the first failed statement; the + // remaining statements are not executed. Callers must provide at least + // one statement. Statements []*Statement `json:"statements,omitempty"` // Transaction: Required. The transaction to use. Must be a read-write - // transaction. - // - // To protect against replays, single-use transactions are not - // supported. The - // caller must either supply an existing transaction ID or begin a - // new - // transaction. + // transaction. To protect against replays, single-use transactions are + // not supported. The caller must either supply an existing transaction + // ID or begin a new transaction. Transaction *TransactionSelector `json:"transaction,omitempty"` // ForceSendFields is a list of field names (e.g. "Seqno") to @@ -1184,57 +1063,32 @@ func (s *ExecuteBatchDmlRequest) MarshalJSON() ([]byte, error) { } // ExecuteBatchDmlResponse: The response for ExecuteBatchDml. Contains a -// list -// of ResultSet messages, one for each DML statement that has -// successfully -// executed, in the same order as the statements in the request. If a -// statement -// fails, the status in the response body identifies the cause of the -// failure. -// -// To check for DML statements that failed, use the following -// approach: -// -// 1. Check the status in the response message. The google.rpc.Code -// enum -// value `OK` indicates that all statements were executed -// successfully. -// 2. If the status was not `OK`, check the number of result sets in -// the -// response. If the response contains `N` ResultSet messages, then -// statement `N+1` in the request failed. -// -// Example 1: -// -// * Request: 5 DML statements, all executed successfully. -// * Response: 5 ResultSet messages, with the status `OK`. -// -// Example 2: -// -// * Request: 5 DML statements. The third statement has a syntax -// error. -// * Response: 2 ResultSet messages, and a syntax error -// (`INVALID_ARGUMENT`) -// status. The number of ResultSet messages indicates that the third -// statement failed, and the fourth and fifth statements were not -// executed. +// list of ResultSet messages, one for each DML statement that has +// successfully executed, in the same order as the statements in the +// request. If a statement fails, the status in the response body +// identifies the cause of the failure. To check for DML statements that +// failed, use the following approach: 1. Check the status in the +// response message. The google.rpc.Code enum value `OK` indicates that +// all statements were executed successfully. 2. If the status was not +// `OK`, check the number of result sets in the response. If the +// response contains `N` ResultSet messages, then statement `N+1` in the +// request failed. Example 1: * Request: 5 DML statements, all executed +// successfully. * Response: 5 ResultSet messages, with the status `OK`. +// Example 2: * Request: 5 DML statements. The third statement has a +// syntax error. * Response: 2 ResultSet messages, and a syntax error +// (`INVALID_ARGUMENT`) status. The number of ResultSet messages +// indicates that the third statement failed, and the fourth and fifth +// statements were not executed. type ExecuteBatchDmlResponse struct { // ResultSets: One ResultSet for each statement in the request that ran - // successfully, - // in the same order as the statements in the request. Each ResultSet - // does - // not contain any rows. The ResultSetStats in each ResultSet - // contain - // the number of rows modified by the statement. - // - // Only the first ResultSet in the response contains - // valid - // ResultSetMetadata. + // successfully, in the same order as the statements in the request. + // Each ResultSet does not contain any rows. The ResultSetStats in each + // ResultSet contain the number of rows modified by the statement. Only + // the first ResultSet in the response contains valid ResultSetMetadata. ResultSets []*ResultSet `json:"resultSets,omitempty"` // Status: If all DML statements are executed successfully, the status - // is `OK`. - // Otherwise, the error status of the first failed statement. + // is `OK`. Otherwise, the error status of the first failed statement. Status *Status `json:"status,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1264,63 +1118,47 @@ func (s *ExecuteBatchDmlResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ExecuteSqlRequest: The request for ExecuteSql -// and +// ExecuteSqlRequest: The request for ExecuteSql and // ExecuteStreamingSql. type ExecuteSqlRequest struct { // ParamTypes: It is not always possible for Cloud Spanner to infer the - // right SQL type - // from a JSON value. For example, values of type `BYTES` and values - // of type `STRING` both appear in params as JSON strings. - // - // In these cases, `param_types` can be used to specify the exact - // SQL type for some or all of the SQL statement parameters. See - // the - // definition of Type for more information - // about SQL types. + // right SQL type from a JSON value. For example, values of type `BYTES` + // and values of type `STRING` both appear in params as JSON strings. In + // these cases, `param_types` can be used to specify the exact SQL type + // for some or all of the SQL statement parameters. See the definition + // of Type for more information about SQL types. ParamTypes map[string]Type `json:"paramTypes,omitempty"` // Params: Parameter names and values that bind to placeholders in the - // SQL string. - // - // A parameter placeholder consists of the `@` character followed by - // the - // parameter name (for example, `@firstName`). Parameter names can - // contain - // letters, numbers, and underscores. - // - // Parameters can appear anywhere that a literal value is expected. The - // same - // parameter name can be used more than once, for example: - // - // "WHERE id > @msg_id AND id < @msg_id + 100" - // - // It is an error to execute a SQL statement with unbound parameters. + // SQL string. A parameter placeholder consists of the `@` character + // followed by the parameter name (for example, `@firstName`). Parameter + // names must conform to the naming requirements of identifiers as + // specified at + // https://cloud.google.com/spanner/docs/lexical#identifiers. Parameters + // can appear anywhere that a literal value is expected. The same + // parameter name can be used more than once, for example: "WHERE id > + // @msg_id AND id < @msg_id + 100" It is an error to execute a SQL + // statement with unbound parameters. Params googleapi.RawMessage `json:"params,omitempty"` // PartitionToken: If present, results will be restricted to the - // specified partition - // previously created using PartitionQuery(). There must be an - // exact - // match for the values of fields common to this message and - // the - // PartitionQueryRequest message used to create this partition_token. + // specified partition previously created using PartitionQuery(). There + // must be an exact match for the values of fields common to this + // message and the PartitionQueryRequest message used to create this + // partition_token. PartitionToken string `json:"partitionToken,omitempty"` // QueryMode: Used to control the amount of debugging information - // returned in - // ResultSetStats. If partition_token is set, query_mode can only - // be set to QueryMode.NORMAL. + // returned in ResultSetStats. If partition_token is set, query_mode can + // only be set to QueryMode.NORMAL. // // Possible values: // "NORMAL" - The default mode. Only the statement results are // returned. // "PLAN" - This mode returns only the query plan, without any results - // or - // execution statistics information. + // or execution statistics information. // "PROFILE" - This mode returns both the query plan and the execution - // statistics along - // with the results. + // statistics along with the results. QueryMode string `json:"queryMode,omitempty"` // QueryOptions: Query optimizer configuration to use for the given @@ -1328,52 +1166,33 @@ type ExecuteSqlRequest struct { QueryOptions *QueryOptions `json:"queryOptions,omitempty"` // ResumeToken: If this request is resuming a previously interrupted SQL - // statement - // execution, `resume_token` should be copied from the - // last - // PartialResultSet yielded before the interruption. Doing this - // enables the new SQL statement execution to resume where the last one - // left - // off. The rest of the request parameters must exactly match - // the + // statement execution, `resume_token` should be copied from the last + // PartialResultSet yielded before the interruption. Doing this enables + // the new SQL statement execution to resume where the last one left + // off. The rest of the request parameters must exactly match the // request that yielded this token. ResumeToken string `json:"resumeToken,omitempty"` // Seqno: A per-transaction sequence number used to identify this - // request. This field - // makes each request idempotent such that if the request is received - // multiple - // times, at most one will succeed. - // - // The sequence number must be monotonically increasing within - // the + // request. This field makes each request idempotent such that if the + // request is received multiple times, at most one will succeed. The + // sequence number must be monotonically increasing within the // transaction. If a request arrives for the first time with an - // out-of-order - // sequence number, the transaction may be aborted. Replays of - // previously - // handled requests will yield the same response as the first - // execution. - // - // Required for DML statements. Ignored for queries. + // out-of-order sequence number, the transaction may be aborted. Replays + // of previously handled requests will yield the same response as the + // first execution. Required for DML statements. Ignored for queries. Seqno int64 `json:"seqno,omitempty,string"` // Sql: Required. The SQL string. Sql string `json:"sql,omitempty"` - // Transaction: The transaction to use. - // - // For queries, if none is provided, the default is a temporary - // read-only - // transaction with strong concurrency. - // - // Standard DML statements require a read-write transaction. To - // protect - // against replays, single-use transactions are not supported. The - // caller - // must either supply an existing transaction ID or begin a new - // transaction. - // - // Partitioned DML requires an existing Partitioned DML transaction ID. + // Transaction: The transaction to use. For queries, if none is + // provided, the default is a temporary read-only transaction with + // strong concurrency. Standard DML statements require a read-write + // transaction. To protect against replays, single-use transactions are + // not supported. The caller must either supply an existing transaction + // ID or begin a new transaction. Partitioned DML requires an existing + // Partitioned DML transaction ID. Transaction *TransactionSelector `json:"transaction,omitempty"` // ForceSendFields is a list of field names (e.g. "ParamTypes") to @@ -1400,65 +1219,40 @@ func (s *ExecuteSqlRequest) MarshalJSON() ([]byte, error) { } // Expr: Represents a textual expression in the Common Expression -// Language (CEL) -// syntax. CEL is a C-like expression language. The syntax and semantics -// of CEL -// are documented at https://github.com/google/cel-spec. -// -// Example (Comparison): -// -// title: "Summary size limit" -// description: "Determines if a summary is less than 100 chars" -// expression: "document.summary.size() < 100" -// -// Example (Equality): -// -// title: "Requestor is owner" -// description: "Determines if requestor is the document owner" -// expression: "document.owner == -// request.auth.claims.email" -// -// Example (Logic): -// -// title: "Public documents" -// description: "Determine whether the document should be publicly -// visible" -// expression: "document.type != 'private' && document.type != -// 'internal'" -// -// Example (Data Manipulation): -// -// title: "Notification string" -// description: "Create a notification string with a timestamp." -// expression: "'New message received at ' + -// string(document.create_time)" -// -// The exact variables and functions that may be referenced within an -// expression -// are determined by the service that evaluates it. See the -// service -// documentation for additional information. +// Language (CEL) syntax. CEL is a C-like expression language. The +// syntax and semantics of CEL are documented at +// https://github.com/google/cel-spec. Example (Comparison): title: +// "Summary size limit" description: "Determines if a summary is less +// than 100 chars" expression: "document.summary.size() < 100" Example +// (Equality): title: "Requestor is owner" description: "Determines if +// requestor is the document owner" expression: "document.owner == +// request.auth.claims.email" Example (Logic): title: "Public documents" +// description: "Determine whether the document should be publicly +// visible" expression: "document.type != 'private' && document.type != +// 'internal'" Example (Data Manipulation): title: "Notification string" +// description: "Create a notification string with a timestamp." +// expression: "'New message received at ' + +// string(document.create_time)" The exact variables and functions that +// may be referenced within an expression are determined by the service +// that evaluates it. See the service documentation for additional +// information. type Expr struct { // Description: Optional. Description of the expression. This is a - // longer text which - // describes the expression, e.g. when hovered over it in a UI. + // longer text which describes the expression, e.g. when hovered over it + // in a UI. Description string `json:"description,omitempty"` // Expression: Textual representation of an expression in Common - // Expression Language - // syntax. + // Expression Language syntax. Expression string `json:"expression,omitempty"` // Location: Optional. String indicating the location of the expression - // for error - // reporting, e.g. a file name and a position in the file. + // for error reporting, e.g. a file name and a position in the file. Location string `json:"location,omitempty"` // Title: Optional. Title for the expression, i.e. a short string - // describing - // its purpose. This can be used e.g. in UIs which allow to enter - // the - // expression. + // describing its purpose. This can be used e.g. in UIs which allow to + // enter the expression. Title string `json:"title,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -1486,15 +1280,12 @@ func (s *Expr) MarshalJSON() ([]byte, error) { // Field: Message representing a single field of a struct. type Field struct { - // Name: The name of the field. For reads, this is the column name. - // For - // SQL queries, it is the column alias (e.g., "Word" in the - // query "SELECT 'hello' AS Word"), or the column name - // (e.g., - // "ColName" in the query "SELECT ColName FROM Table"). Some - // columns might have an empty name (e.g., !"SELECT - // UPPER(ColName)"). Note that a query result can contain - // multiple fields with the same name. + // Name: The name of the field. For reads, this is the column name. For + // SQL queries, it is the column alias (e.g., "Word" in the query + // "SELECT 'hello' AS Word"), or the column name (e.g., "ColName" in + // the query "SELECT ColName FROM Table"). Some columns might have an + // empty name (e.g., !"SELECT UPPER(ColName)"). Note that a query + // result can contain multiple fields with the same name. Name string `json:"name,omitempty"` // Type: The type of the field. @@ -1526,8 +1317,7 @@ func (s *Field) MarshalJSON() ([]byte, error) { // GetDatabaseDdlResponse: The response for GetDatabaseDdl. type GetDatabaseDdlResponse struct { // Statements: A list of formatted DDL statements defining the schema of - // the database - // specified in the request. + // the database specified in the request. Statements []string `json:"statements,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1560,8 +1350,7 @@ func (s *GetDatabaseDdlResponse) MarshalJSON() ([]byte, error) { // GetIamPolicyRequest: Request message for `GetIamPolicy` method. type GetIamPolicyRequest struct { // Options: OPTIONAL: A `GetPolicyOptions` object for specifying options - // to - // `GetIamPolicy`. + // to `GetIamPolicy`. Options *GetPolicyOptions `json:"options,omitempty"` // ForceSendFields is a list of field names (e.g. "Options") to @@ -1590,17 +1379,14 @@ func (s *GetIamPolicyRequest) MarshalJSON() ([]byte, error) { // GetPolicyOptions: Encapsulates settings provided to GetIamPolicy. type GetPolicyOptions struct { // RequestedPolicyVersion: Optional. The policy format version to be - // returned. - // - // Valid values are 0, 1, and 3. Requests specifying an invalid value - // will be - // rejected. - // - // Requests for policies with any conditional bindings must specify - // version 3. - // Policies without any conditional bindings may specify any valid value - // or - // leave the field unset. + // returned. Valid values are 0, 1, and 3. Requests specifying an + // invalid value will be rejected. Requests for policies with any + // conditional bindings must specify version 3. Policies without any + // conditional bindings may specify any valid value or leave the field + // unset. To learn which resources support conditions in their IAM + // policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). RequestedPolicyVersion int64 `json:"requestedPolicyVersion,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1632,92 +1418,63 @@ func (s *GetPolicyOptions) MarshalJSON() ([]byte, error) { // databases can be hosted. type Instance struct { // Config: Required. The name of the instance's configuration. Values - // are of the form - // `projects//instanceConfigs/`. See - // also InstanceConfig and - // ListInstanceConfigs. + // are of the form `projects//instanceConfigs/`. See also InstanceConfig + // and ListInstanceConfigs. Config string `json:"config,omitempty"` // DisplayName: Required. The descriptive name for this instance as it - // appears in UIs. - // Must be unique per project and between 4 and 30 characters in length. + // appears in UIs. Must be unique per project and between 4 and 30 + // characters in length. DisplayName string `json:"displayName,omitempty"` // EndpointUris: Deprecated. This field is not populated. EndpointUris []string `json:"endpointUris,omitempty"` // Labels: Cloud Labels are a flexible and lightweight mechanism for - // organizing cloud - // resources into groups that reflect a customer's organizational needs - // and - // deployment strategies. Cloud Labels can be used to filter collections - // of - // resources. They can be used to control how resource metrics are - // aggregated. - // And they can be used as arguments to policy management rules (e.g. - // route, - // firewall, load balancing, etc.). - // - // * Label keys must be between 1 and 63 characters long and must - // conform to - // the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. - // * Label values must be between 0 and 63 characters long and must - // conform - // to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. - // * No more than 64 labels can be associated with a given - // resource. - // - // See https://goo.gl/xmQnxf for more information on and examples of - // labels. - // - // If you plan to use labels in your own code, please note that - // additional - // characters may be allowed in the future. And so you are advised to - // use an - // internal label representation, such as JSON, which doesn't rely - // upon - // specific characters being disallowed. For example, representing - // labels - // as the string: name + "_" + value would prove problematic if we - // were to + // organizing cloud resources into groups that reflect a customer's + // organizational needs and deployment strategies. Cloud Labels can be + // used to filter collections of resources. They can be used to control + // how resource metrics are aggregated. And they can be used as + // arguments to policy management rules (e.g. route, firewall, load + // balancing, etc.). * Label keys must be between 1 and 63 characters + // long and must conform to the following regular expression: + // `[a-z]([-a-z0-9]*[a-z0-9])?`. * Label values must be between 0 and 63 + // characters long and must conform to the regular expression + // `([a-z]([-a-z0-9]*[a-z0-9])?)?`. * No more than 64 labels can be + // associated with a given resource. See https://goo.gl/xmQnxf for more + // information on and examples of labels. If you plan to use labels in + // your own code, please note that additional characters may be allowed + // in the future. And so you are advised to use an internal label + // representation, such as JSON, which doesn't rely upon specific + // characters being disallowed. For example, representing labels as the + // string: name + "_" + value would prove problematic if we were to // allow "_" in a future release. Labels map[string]string `json:"labels,omitempty"` // Name: Required. A unique identifier for the instance, which cannot be - // changed - // after the instance is created. Values are of the - // form - // `projects//instances/a-z*[a-z0-9]`. The final - // segment of the name must be between 2 and 64 characters in length. + // changed after the instance is created. Values are of the form + // `projects//instances/a-z*[a-z0-9]`. The final segment of the name + // must be between 2 and 64 characters in length. Name string `json:"name,omitempty"` - // NodeCount: The number of nodes allocated to this instance. This - // may be zero in API responses for instances that are not yet in - // state - // `READY`. - // - // See - // [the - // documentation](https://cloud.google.com/spanner/docs/instances#no - // de_count) - // for more information about nodes. + // NodeCount: The number of nodes allocated to this instance. This may + // be zero in API responses for instances that are not yet in state + // `READY`. See [the + // documentation](https://cloud.google.com/spanner/docs/instances#node_co + // unt) for more information about nodes. NodeCount int64 `json:"nodeCount,omitempty"` - // State: Output only. The current instance state. For - // CreateInstance, the state must be - // either omitted or set to `CREATING`. For - // UpdateInstance, the state must be - // either omitted or set to `READY`. + // State: Output only. The current instance state. For CreateInstance, + // the state must be either omitted or set to `CREATING`. For + // UpdateInstance, the state must be either omitted or set to `READY`. // // Possible values: // "STATE_UNSPECIFIED" - Not specified. // "CREATING" - The instance is still being created. Resources may not - // be - // available yet, and operations such as database creation may not + // be available yet, and operations such as database creation may not // work. // "READY" - The instance is fully created and ready to do work such - // as - // creating databases. + // as creating databases. State string `json:"state,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1748,21 +1505,19 @@ func (s *Instance) MarshalJSON() ([]byte, error) { } // InstanceConfig: A possible configuration for a Cloud Spanner -// instance. Configurations -// define the geographic placement of nodes and their replication. +// instance. Configurations define the geographic placement of nodes and +// their replication. type InstanceConfig struct { // DisplayName: The name of this instance configuration as it appears in // UIs. DisplayName string `json:"displayName,omitempty"` - // Name: A unique identifier for the instance configuration. Values - // are of the form - // `projects//instanceConfigs/a-z*` + // Name: A unique identifier for the instance configuration. Values are + // of the form `projects//instanceConfigs/a-z*` Name string `json:"name,omitempty"` // Replicas: The geographic placement of nodes in this instance - // configuration and their - // replication properties. + // configuration and their replication properties. Replicas []*ReplicaInfo `json:"replicas,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1792,126 +1547,60 @@ func (s *InstanceConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// KeyRange: KeyRange represents a range of rows in a table or index. -// -// A range has a start key and an end key. These keys can be open -// or -// closed, indicating if the range includes rows with that key. -// -// Keys are represented by lists, where the ith value in the -// list -// corresponds to the ith component of the table or index primary -// key. -// Individual values are encoded as described -// here. -// -// For example, consider the following table definition: -// -// CREATE TABLE UserEvents ( -// UserName STRING(MAX), -// EventDate STRING(10) -// ) PRIMARY KEY(UserName, EventDate); -// -// The following keys name rows in this table: -// -// "Bob", "2014-09-23" -// -// Since the `UserEvents` table's `PRIMARY KEY` clause names -// two -// columns, each `UserEvents` key has two elements; the first is -// the -// `UserName`, and the second is the `EventDate`. -// -// Key ranges with multiple components are interpreted -// lexicographically by component using the table or index key's -// declared -// sort order. For example, the following range returns all events -// for -// user "Bob" that occurred in the year 2015: -// -// "start_closed": ["Bob", "2015-01-01"] -// "end_closed": ["Bob", "2015-12-31"] -// -// Start and end keys can omit trailing key components. This affects -// the -// inclusion and exclusion of rows that exactly match the provided -// key -// components: if the key is closed, then rows that exactly match -// the -// provided components are included; if the key is open, then rows -// that exactly match are not included. -// -// For example, the following range includes all events for "Bob" -// that -// occurred during and after the year 2000: -// -// "start_closed": ["Bob", "2000-01-01"] -// "end_closed": ["Bob"] -// -// The next example retrieves all events for "Bob": -// -// "start_closed": ["Bob"] -// "end_closed": ["Bob"] -// -// To retrieve events before the year 2000: -// -// "start_closed": ["Bob"] -// "end_open": ["Bob", "2000-01-01"] -// -// The following range includes all rows in the table: -// -// "start_closed": [] -// "end_closed": [] -// -// This range returns all users whose `UserName` begins with -// any -// character from A to C: -// -// "start_closed": ["A"] -// "end_open": ["D"] -// -// This range returns all users whose `UserName` begins with B: -// -// "start_closed": ["B"] -// "end_open": ["C"] -// -// Key ranges honor column sort order. For example, suppose a table -// is -// defined as follows: -// -// CREATE TABLE DescendingSortedTable { -// Key INT64, -// ... -// ) PRIMARY KEY(Key DESC); -// -// The following range retrieves all rows with key values between 1 -// and 100 inclusive: -// -// "start_closed": ["100"] -// "end_closed": ["1"] -// -// Note that 100 is passed as the start, and 1 is passed as the -// end, -// because `Key` is a descending column in the schema. +// KeyRange: KeyRange represents a range of rows in a table or index. A +// range has a start key and an end key. These keys can be open or +// closed, indicating if the range includes rows with that key. Keys are +// represented by lists, where the ith value in the list corresponds to +// the ith component of the table or index primary key. Individual +// values are encoded as described here. For example, consider the +// following table definition: CREATE TABLE UserEvents ( UserName +// STRING(MAX), EventDate STRING(10) ) PRIMARY KEY(UserName, EventDate); +// The following keys name rows in this table: "Bob", "2014-09-23" Since +// the `UserEvents` table's `PRIMARY KEY` clause names two columns, each +// `UserEvents` key has two elements; the first is the `UserName`, and +// the second is the `EventDate`. Key ranges with multiple components +// are interpreted lexicographically by component using the table or +// index key's declared sort order. For example, the following range +// returns all events for user "Bob" that occurred in the year 2015: +// "start_closed": ["Bob", "2015-01-01"] "end_closed": ["Bob", +// "2015-12-31"] Start and end keys can omit trailing key components. +// This affects the inclusion and exclusion of rows that exactly match +// the provided key components: if the key is closed, then rows that +// exactly match the provided components are included; if the key is +// open, then rows that exactly match are not included. For example, the +// following range includes all events for "Bob" that occurred during +// and after the year 2000: "start_closed": ["Bob", "2000-01-01"] +// "end_closed": ["Bob"] The next example retrieves all events for +// "Bob": "start_closed": ["Bob"] "end_closed": ["Bob"] To retrieve +// events before the year 2000: "start_closed": ["Bob"] "end_open": +// ["Bob", "2000-01-01"] The following range includes all rows in the +// table: "start_closed": [] "end_closed": [] This range returns all +// users whose `UserName` begins with any character from A to C: +// "start_closed": ["A"] "end_open": ["D"] This range returns all users +// whose `UserName` begins with B: "start_closed": ["B"] "end_open": +// ["C"] Key ranges honor column sort order. For example, suppose a +// table is defined as follows: CREATE TABLE DescendingSortedTable { Key +// INT64, ... ) PRIMARY KEY(Key DESC); The following range retrieves all +// rows with key values between 1 and 100 inclusive: "start_closed": +// ["100"] "end_closed": ["1"] Note that 100 is passed as the start, and +// 1 is passed as the end, because `Key` is a descending column in the +// schema. type KeyRange struct { // EndClosed: If the end is closed, then the range includes all rows - // whose - // first `len(end_closed)` key columns exactly match `end_closed`. + // whose first `len(end_closed)` key columns exactly match `end_closed`. EndClosed []interface{} `json:"endClosed,omitempty"` - // EndOpen: If the end is open, then the range excludes rows whose - // first + // EndOpen: If the end is open, then the range excludes rows whose first // `len(end_open)` key columns exactly match `end_open`. EndOpen []interface{} `json:"endOpen,omitempty"` // StartClosed: If the start is closed, then the range includes all rows - // whose - // first `len(start_closed)` key columns exactly match `start_closed`. + // whose first `len(start_closed)` key columns exactly match + // `start_closed`. StartClosed []interface{} `json:"startClosed,omitempty"` // StartOpen: If the start is open, then the range excludes rows whose - // first - // `len(start_open)` key columns exactly match `start_open`. + // first `len(start_open)` key columns exactly match `start_open`. StartOpen []interface{} `json:"startOpen,omitempty"` // ForceSendFields is a list of field names (e.g. "EndClosed") to @@ -1938,33 +1627,24 @@ func (s *KeyRange) MarshalJSON() ([]byte, error) { } // KeySet: `KeySet` defines a collection of Cloud Spanner keys and/or -// key ranges. All -// the keys are expected to be in the same table or index. The keys -// need -// not be sorted in any particular way. -// -// If the same key is specified multiple times in the set (for -// example -// if two ranges, two keys, or a key and a range overlap), Cloud -// Spanner +// key ranges. All the keys are expected to be in the same table or +// index. The keys need not be sorted in any particular way. If the same +// key is specified multiple times in the set (for example if two +// ranges, two keys, or a key and a range overlap), Cloud Spanner // behaves as if the key were only specified once. type KeySet struct { - // All: For convenience `all` can be set to `true` to indicate that - // this - // `KeySet` matches all keys in the table or index. Note that any - // keys + // All: For convenience `all` can be set to `true` to indicate that this + // `KeySet` matches all keys in the table or index. Note that any keys // specified in `keys` or `ranges` are only yielded once. All bool `json:"all,omitempty"` // Keys: A list of specific keys. Entries in `keys` should have exactly - // as - // many elements as there are columns in the primary or index key - // with which this `KeySet` is used. Individual key values are - // encoded as described here. + // as many elements as there are columns in the primary or index key + // with which this `KeySet` is used. Individual key values are encoded + // as described here. Keys [][]interface{} `json:"keys,omitempty"` - // Ranges: A list of key ranges. See KeyRange for more information - // about + // Ranges: A list of key ranges. See KeyRange for more information about // key range specifications. Ranges []*KeyRange `json:"ranges,omitempty"` @@ -1991,28 +1671,19 @@ func (s *KeySet) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ListBackupOperationsResponse: The response for -// ListBackupOperations. +// ListBackupOperationsResponse: The response for ListBackupOperations. type ListBackupOperationsResponse struct { - // NextPageToken: `next_page_token` can be sent in a - // subsequent - // ListBackupOperations - // call to fetch more of the matching metadata. + // NextPageToken: `next_page_token` can be sent in a subsequent + // ListBackupOperations call to fetch more of the matching metadata. NextPageToken string `json:"nextPageToken,omitempty"` - // Operations: The list of matching backup long-running - // operations. Each operation's name will be - // prefixed by the backup's name and the operation's - // metadata will be of type - // CreateBackupMetadata. Operations returned include those that - // are - // pending or have completed/failed/canceled within the last 7 - // days. - // Operations returned are ordered - // by - // `operation.metadata.value.progress.start_time` in descending order - // starting - // from the most recently started operation. + // Operations: The list of matching backup long-running operations. Each + // operation's name will be prefixed by the backup's name and the + // operation's metadata will be of type CreateBackupMetadata. Operations + // returned include those that are pending or have + // completed/failed/canceled within the last 7 days. Operations returned + // are ordered by `operation.metadata.value.progress.start_time` in + // descending order starting from the most recently started operation. Operations []*Operation `json:"operations,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2045,14 +1716,12 @@ func (s *ListBackupOperationsResponse) MarshalJSON() ([]byte, error) { // ListBackupsResponse: The response for ListBackups. type ListBackupsResponse struct { // Backups: The list of matching backups. Backups returned are ordered - // by `create_time` - // in descending order, starting from the most recent `create_time`. + // by `create_time` in descending order, starting from the most recent + // `create_time`. Backups []*Backup `json:"backups,omitempty"` - // NextPageToken: `next_page_token` can be sent in a - // subsequent - // ListBackups call to fetch more - // of the matching backups. + // NextPageToken: `next_page_token` can be sent in a subsequent + // ListBackups call to fetch more of the matching backups. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2082,21 +1751,17 @@ func (s *ListBackupsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ListDatabaseOperationsResponse: The response -// for +// ListDatabaseOperationsResponse: The response for // ListDatabaseOperations. type ListDatabaseOperationsResponse struct { - // NextPageToken: `next_page_token` can be sent in a - // subsequent - // ListDatabaseOperations - // call to fetch more of the matching metadata. + // NextPageToken: `next_page_token` can be sent in a subsequent + // ListDatabaseOperations call to fetch more of the matching metadata. NextPageToken string `json:"nextPageToken,omitempty"` - // Operations: The list of matching database long-running - // operations. Each operation's name will be - // prefixed by the database's name. The operation's - // metadata field type - // `metadata.type_url` describes the type of the metadata. + // Operations: The list of matching database long-running operations. + // Each operation's name will be prefixed by the database's name. The + // operation's metadata field type `metadata.type_url` describes the + // type of the metadata. Operations []*Operation `json:"operations,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2131,10 +1796,8 @@ type ListDatabasesResponse struct { // Databases: Databases that matched the request. Databases []*Database `json:"databases,omitempty"` - // NextPageToken: `next_page_token` can be sent in a - // subsequent - // ListDatabases call to fetch more - // of the matching databases. + // NextPageToken: `next_page_token` can be sent in a subsequent + // ListDatabases call to fetch more of the matching databases. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2169,10 +1832,9 @@ type ListInstanceConfigsResponse struct { // InstanceConfigs: The list of requested instance configurations. InstanceConfigs []*InstanceConfig `json:"instanceConfigs,omitempty"` - // NextPageToken: `next_page_token` can be sent in a - // subsequent - // ListInstanceConfigs call to - // fetch more of the matching instance configurations. + // NextPageToken: `next_page_token` can be sent in a subsequent + // ListInstanceConfigs call to fetch more of the matching instance + // configurations. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2208,10 +1870,8 @@ type ListInstancesResponse struct { // Instances: The list of requested instances. Instances []*Instance `json:"instances,omitempty"` - // NextPageToken: `next_page_token` can be sent in a - // subsequent - // ListInstances call to fetch more - // of the matching instances. + // NextPageToken: `next_page_token` can be sent in a subsequent + // ListInstances call to fetch more of the matching instances. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2280,10 +1940,8 @@ func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { // ListSessionsResponse: The response for ListSessions. type ListSessionsResponse struct { - // NextPageToken: `next_page_token` can be sent in a - // subsequent - // ListSessions call to fetch more of the matching - // sessions. + // NextPageToken: `next_page_token` can be sent in a subsequent + // ListSessions call to fetch more of the matching sessions. NextPageToken string `json:"nextPageToken,omitempty"` // Sessions: The list of requested sessions. @@ -2316,49 +1974,36 @@ func (s *ListSessionsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Mutation: A modification to one or more Cloud Spanner rows. -// Mutations can be -// applied to a Cloud Spanner database by sending them in a +// Mutation: A modification to one or more Cloud Spanner rows. Mutations +// can be applied to a Cloud Spanner database by sending them in a // Commit call. type Mutation struct { - // Delete: Delete rows from a table. Succeeds whether or not the - // named + // Delete: Delete rows from a table. Succeeds whether or not the named // rows were present. Delete *Delete `json:"delete,omitempty"` - // Insert: Insert new rows in a table. If any of the rows already - // exist, + // Insert: Insert new rows in a table. If any of the rows already exist, // the write or transaction fails with error `ALREADY_EXISTS`. Insert *Write `json:"insert,omitempty"` // InsertOrUpdate: Like insert, except that if the row already exists, - // then - // its column values are overwritten with the ones provided. Any - // column values not explicitly written are preserved. - // - // When using insert_or_update, just as when using insert, all - // `NOT - // NULL` columns in the table must be given a value. This holds - // true - // even when the row already exists and will therefore actually be - // updated. + // then its column values are overwritten with the ones provided. Any + // column values not explicitly written are preserved. When using + // insert_or_update, just as when using insert, all `NOT NULL` columns + // in the table must be given a value. This holds true even when the row + // already exists and will therefore actually be updated. InsertOrUpdate *Write `json:"insertOrUpdate,omitempty"` - // Replace: Like insert, except that if the row already exists, it - // is - // deleted, and the column values provided are inserted - // instead. Unlike insert_or_update, this means any values - // not - // explicitly written become `NULL`. - // - // In an interleaved table, if you create the child table with the - // `ON DELETE CASCADE` annotation, then replacing a parent row - // also deletes the child rows. Otherwise, you must delete the - // child rows before you replace the parent row. + // Replace: Like insert, except that if the row already exists, it is + // deleted, and the column values provided are inserted instead. Unlike + // insert_or_update, this means any values not explicitly written become + // `NULL`. In an interleaved table, if you create the child table with + // the `ON DELETE CASCADE` annotation, then replacing a parent row also + // deletes the child rows. Otherwise, you must delete the child rows + // before you replace the parent row. Replace *Write `json:"replace,omitempty"` - // Update: Update existing rows in a table. If any of the rows does - // not + // Update: Update existing rows in a table. If any of the rows does not // already exist, the transaction fails with error `NOT_FOUND`. Update *Write `json:"update,omitempty"` @@ -2386,52 +2031,38 @@ func (s *Mutation) MarshalJSON() ([]byte, error) { } // Operation: This resource represents a long-running operation that is -// the result of a -// network API call. +// the result of a network API call. type Operation struct { // Done: If the value is `false`, it means the operation is still in - // progress. - // If `true`, the operation is completed, and either `error` or - // `response` is - // available. + // progress. If `true`, the operation is completed, and either `error` + // or `response` is available. Done bool `json:"done,omitempty"` // Error: The error result of the operation in case of failure or // cancellation. Error *Status `json:"error,omitempty"` - // Metadata: Service-specific metadata associated with the operation. - // It typically - // contains progress information and common metadata such as create - // time. - // Some services might not provide such metadata. Any method that - // returns a - // long-running operation should document the metadata type, if any. + // Metadata: Service-specific metadata associated with the operation. It + // typically contains progress information and common metadata such as + // create time. Some services might not provide such metadata. Any + // method that returns a long-running operation should document the + // metadata type, if any. Metadata googleapi.RawMessage `json:"metadata,omitempty"` // Name: The server-assigned name, which is only unique within the same - // service that - // originally returns it. If you use the default HTTP mapping, - // the - // `name` should be a resource name ending with + // service that originally returns it. If you use the default HTTP + // mapping, the `name` should be a resource name ending with // `operations/{unique_id}`. Name string `json:"name,omitempty"` - // Response: The normal response of the operation in case of success. - // If the original - // method returns no data on success, such as `Delete`, the response - // is - // `google.protobuf.Empty`. If the original method is - // standard - // `Get`/`Create`/`Update`, the response should be the resource. For - // other - // methods, the response should have the type `XxxResponse`, where - // `Xxx` - // is the original method name. For example, if the original method - // name - // is `TakeSnapshot()`, the inferred response type - // is - // `TakeSnapshotResponse`. + // Response: The normal response of the operation in case of success. If + // the original method returns no data on success, such as `Delete`, the + // response is `google.protobuf.Empty`. If the original method is + // standard `Get`/`Create`/`Update`, the response should be the + // resource. For other methods, the response should have the type + // `XxxResponse`, where `Xxx` is the original method name. For example, + // if the original method name is `TakeSnapshot()`, the inferred + // response type is `TakeSnapshotResponse`. Response googleapi.RawMessage `json:"response,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2462,16 +2093,14 @@ func (s *Operation) MarshalJSON() ([]byte, error) { } // OperationProgress: Encapsulates progress related information for a -// Cloud Spanner long -// running operation. +// Cloud Spanner long running operation. type OperationProgress struct { // EndTime: If set, the time at which this operation failed or was - // completed - // successfully. + // completed successfully. EndTime string `json:"endTime,omitempty"` - // ProgressPercent: Percent completion of the operation. - // Values are between 0 and 100 inclusive. + // ProgressPercent: Percent completion of the operation. Values are + // between 0 and 100 inclusive. ProgressPercent int64 `json:"progressPercent,omitempty"` // StartTime: Time the request was received. @@ -2501,12 +2130,10 @@ func (s *OperationProgress) MarshalJSON() ([]byte, error) { } // OptimizeRestoredDatabaseMetadata: Metadata type for the long-running -// operation used to track the progress -// of optimizations performed on a newly restored database. This -// long-running -// operation is automatically created by the system after the -// successful -// completion of a database restore, and cannot be cancelled. +// operation used to track the progress of optimizations performed on a +// newly restored database. This long-running operation is automatically +// created by the system after the successful completion of a database +// restore, and cannot be cancelled. type OptimizeRestoredDatabaseMetadata struct { // Name: Name of the restored database being optimized. Name string `json:"name,omitempty"` @@ -2538,126 +2165,69 @@ func (s *OptimizeRestoredDatabaseMetadata) MarshalJSON() ([]byte, error) { } // PartialResultSet: Partial results from a streaming read or SQL query. -// Streaming reads and -// SQL queries better tolerate large result sets, large rows, and -// large -// values, but are a little trickier to consume. +// Streaming reads and SQL queries better tolerate large result sets, +// large rows, and large values, but are a little trickier to consume. type PartialResultSet struct { // ChunkedValue: If true, then the final value in values is chunked, and - // must - // be combined with more values from subsequent `PartialResultSet`s + // must be combined with more values from subsequent `PartialResultSet`s // to obtain a complete field value. ChunkedValue bool `json:"chunkedValue,omitempty"` // Metadata: Metadata about the result set, such as row type - // information. - // Only present in the first response. + // information. Only present in the first response. Metadata *ResultSetMetadata `json:"metadata,omitempty"` // ResumeToken: Streaming calls might be interrupted for a variety of - // reasons, such - // as TCP connection loss. If this occurs, the stream of results can - // be resumed by re-sending the original request and - // including - // `resume_token`. Note that executing any other transaction in the - // same session invalidates the token. + // reasons, such as TCP connection loss. If this occurs, the stream of + // results can be resumed by re-sending the original request and + // including `resume_token`. Note that executing any other transaction + // in the same session invalidates the token. ResumeToken string `json:"resumeToken,omitempty"` // Stats: Query plan and execution statistics for the statement that - // produced this - // streaming result set. These can be requested by - // setting - // ExecuteSqlRequest.query_mode and are sent - // only once with the last response in the stream. - // This field will also be present in the last response for - // DML - // statements. + // produced this streaming result set. These can be requested by setting + // ExecuteSqlRequest.query_mode and are sent only once with the last + // response in the stream. This field will also be present in the last + // response for DML statements. Stats *ResultSetStats `json:"stats,omitempty"` // Values: A streamed result set consists of a stream of values, which - // might - // be split into many `PartialResultSet` messages to accommodate - // large rows and/or large values. Every N complete values defines - // a - // row, where N is equal to the number of entries - // in - // metadata.row_type.fields. - // - // Most values are encoded based on type as described - // here. - // - // It is possible that the last value in values is "chunked", - // meaning that the rest of the value is sent in - // subsequent - // `PartialResultSet`(s). This is denoted by the chunked_value - // field. Two or more chunked values can be merged to form a - // complete value as follows: - // - // * `bool/number/null`: cannot be chunked - // * `string`: concatenate the strings - // * `list`: concatenate the lists. If the last element in a list is - // a - // `string`, `list`, or `object`, merge it with the first element - // in - // the next list by applying these rules recursively. - // * `object`: concatenate the (field name, field value) pairs. If a - // field name is duplicated, then apply these rules recursively - // to merge the field values. - // - // Some examples of merging: - // - // # Strings are concatenated. - // "foo", "bar" => "foobar" - // - // # Lists of non-strings are concatenated. - // [2, 3], [4] => [2, 3, 4] - // - // # Lists are concatenated, but the last and first elements are - // merged - // # because they are strings. - // ["a", "b"], ["c", "d"] => ["a", "bc", "d"] - // - // # Lists are concatenated, but the last and first elements are - // merged - // # because they are lists. Recursively, the last and first - // elements - // # of the inner lists are merged because they are strings. - // ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"] - // - // # Non-overlapping object fields are combined. - // {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"} - // - // # Overlapping object fields are merged. - // {"a": "1"}, {"a": "2"} => {"a": "12"} - // - // # Examples of merging objects containing lists of strings. - // {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]} - // - // For a more complete example, suppose a streaming SQL query - // is - // yielding a result set whose rows contain a single string - // field. The following `PartialResultSet`s might be yielded: - // - // { - // "metadata": { ... } - // "values": ["Hello", "W"] - // "chunked_value": true - // "resume_token": "Af65..." - // } - // { - // "values": ["orl"] - // "chunked_value": true - // "resume_token": "Bqp2..." - // } - // { - // "values": ["d"] - // "resume_token": "Zx1B..." - // } - // - // This sequence of `PartialResultSet`s encodes two rows, one - // containing the field value "Hello", and a second containing - // the - // field value "World" = "W" + "orl" + "d". + // might be split into many `PartialResultSet` messages to accommodate + // large rows and/or large values. Every N complete values defines a + // row, where N is equal to the number of entries in + // metadata.row_type.fields. Most values are encoded based on type as + // described here. It is possible that the last value in values is + // "chunked", meaning that the rest of the value is sent in subsequent + // `PartialResultSet`(s). This is denoted by the chunked_value field. + // Two or more chunked values can be merged to form a complete value as + // follows: * `bool/number/null`: cannot be chunked * `string`: + // concatenate the strings * `list`: concatenate the lists. If the last + // element in a list is a `string`, `list`, or `object`, merge it with + // the first element in the next list by applying these rules + // recursively. * `object`: concatenate the (field name, field value) + // pairs. If a field name is duplicated, then apply these rules + // recursively to merge the field values. Some examples of merging: # + // Strings are concatenated. "foo", "bar" => "foobar" # Lists of + // non-strings are concatenated. [2, 3], [4] => [2, 3, 4] # Lists are + // concatenated, but the last and first elements are merged # because + // they are strings. ["a", "b"], ["c", "d"] => ["a", "bc", "d"] # Lists + // are concatenated, but the last and first elements are merged # + // because they are lists. Recursively, the last and first elements # of + // the inner lists are merged because they are strings. ["a", ["b", + // "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"] # Non-overlapping + // object fields are combined. {"a": "1"}, {"b": "2"} => {"a": "1", "b": + // 2"} # Overlapping object fields are merged. {"a": "1"}, {"a": "2"} => + // {"a": "12"} # Examples of merging objects containing lists of + // strings. {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]} For a more + // complete example, suppose a streaming SQL query is yielding a result + // set whose rows contain a single string field. The following + // `PartialResultSet`s might be yielded: { "metadata": { ... } "values": + // ["Hello", "W"] "chunked_value": true "resume_token": "Af65..." } { + // "values": ["orl"] "chunked_value": true "resume_token": "Bqp2..." } { + // "values": ["d"] "resume_token": "Zx1B..." } This sequence of + // `PartialResultSet`s encodes two rows, one containing the field value + // "Hello", and a second containing the field value "World" = "W" + + // "orl" + "d". Values []interface{} `json:"values,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2687,15 +2257,12 @@ func (s *PartialResultSet) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Partition: Information returned for each partition returned in -// a +// Partition: Information returned for each partition returned in a // PartitionResponse. type Partition struct { // PartitionToken: This token can be passed to Read, StreamingRead, - // ExecuteSql, or - // ExecuteStreamingSql requests to restrict the results to those - // identified by - // this partition token. + // ExecuteSql, or ExecuteStreamingSql requests to restrict the results + // to those identified by this partition token. PartitionToken string `json:"partitionToken,omitempty"` // ForceSendFields is a list of field names (e.g. "PartitionToken") to @@ -2722,34 +2289,23 @@ func (s *Partition) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PartitionOptions: Options for a PartitionQueryRequest -// and +// PartitionOptions: Options for a PartitionQueryRequest and // PartitionReadRequest. type PartitionOptions struct { // MaxPartitions: **Note:** This hint is currently ignored by - // PartitionQuery and - // PartitionRead requests. - // - // The desired maximum number of partitions to return. For example, - // this may - // be set to the number of workers available. The default for this - // option - // is currently 10,000. The maximum value is currently 200,000. This is - // only - // a hint. The actual number of partitions returned may be smaller or - // larger + // PartitionQuery and PartitionRead requests. The desired maximum number + // of partitions to return. For example, this may be set to the number + // of workers available. The default for this option is currently + // 10,000. The maximum value is currently 200,000. This is only a hint. + // The actual number of partitions returned may be smaller or larger // than this maximum count request. MaxPartitions int64 `json:"maxPartitions,omitempty,string"` // PartitionSizeBytes: **Note:** This hint is currently ignored by - // PartitionQuery and - // PartitionRead requests. - // - // The desired data size for each partition generated. The default for - // this - // option is currently 1 GiB. This is only a hint. The actual size of - // each - // partition may be smaller or larger than this size request. + // PartitionQuery and PartitionRead requests. The desired data size for + // each partition generated. The default for this option is currently 1 + // GiB. This is only a hint. The actual size of each partition may be + // smaller or larger than this size request. PartitionSizeBytes int64 `json:"partitionSizeBytes,omitempty,string"` // ForceSendFields is a list of field names (e.g. "MaxPartitions") to @@ -2778,33 +2334,21 @@ func (s *PartitionOptions) MarshalJSON() ([]byte, error) { // PartitionQueryRequest: The request for PartitionQuery type PartitionQueryRequest struct { // ParamTypes: It is not always possible for Cloud Spanner to infer the - // right SQL type - // from a JSON value. For example, values of type `BYTES` and values - // of type `STRING` both appear in params as JSON strings. - // - // In these cases, `param_types` can be used to specify the exact - // SQL type for some or all of the SQL query parameters. See - // the - // definition of Type for more information - // about SQL types. + // right SQL type from a JSON value. For example, values of type `BYTES` + // and values of type `STRING` both appear in params as JSON strings. In + // these cases, `param_types` can be used to specify the exact SQL type + // for some or all of the SQL query parameters. See the definition of + // Type for more information about SQL types. ParamTypes map[string]Type `json:"paramTypes,omitempty"` // Params: Parameter names and values that bind to placeholders in the - // SQL string. - // - // A parameter placeholder consists of the `@` character followed by - // the - // parameter name (for example, `@firstName`). Parameter names can - // contain - // letters, numbers, and underscores. - // - // Parameters can appear anywhere that a literal value is expected. The - // same - // parameter name can be used more than once, for example: - // - // "WHERE id > @msg_id AND id < @msg_id + 100" - // - // It is an error to execute a SQL statement with unbound parameters. + // SQL string. A parameter placeholder consists of the `@` character + // followed by the parameter name (for example, `@firstName`). Parameter + // names can contain letters, numbers, and underscores. Parameters can + // appear anywhere that a literal value is expected. The same parameter + // name can be used more than once, for example: "WHERE id > @msg_id + // AND id < @msg_id + 100" It is an error to execute a SQL statement + // with unbound parameters. Params googleapi.RawMessage `json:"params,omitempty"` // PartitionOptions: Additional options that affect how many partitions @@ -2812,27 +2356,18 @@ type PartitionQueryRequest struct { PartitionOptions *PartitionOptions `json:"partitionOptions,omitempty"` // Sql: Required. The query request to generate partitions for. The - // request will fail if - // the query is not root partitionable. The query plan of a - // root - // partitionable query has a single distributed union operator. A - // distributed - // union operator conceptually divides one or more tables into - // multiple - // splits, remotely evaluates a subquery independently on each split, - // and - // then unions all results. - // - // This must not contain DML commands, such as INSERT, UPDATE, - // or - // DELETE. Use ExecuteStreamingSql with a - // PartitionedDml transaction for large, partition-friendly DML - // operations. + // request will fail if the query is not root partitionable. The query + // plan of a root partitionable query has a single distributed union + // operator. A distributed union operator conceptually divides one or + // more tables into multiple splits, remotely evaluates a subquery + // independently on each split, and then unions all results. This must + // not contain DML commands, such as INSERT, UPDATE, or DELETE. Use + // ExecuteStreamingSql with a PartitionedDml transaction for large, + // partition-friendly DML operations. Sql string `json:"sql,omitempty"` // Transaction: Read only snapshot transactions are supported, - // read/write and single use - // transactions are not. + // read/write and single use transactions are not. Transaction *TransactionSelector `json:"transaction,omitempty"` // ForceSendFields is a list of field names (e.g. "ParamTypes") to @@ -2860,25 +2395,21 @@ func (s *PartitionQueryRequest) MarshalJSON() ([]byte, error) { // PartitionReadRequest: The request for PartitionRead type PartitionReadRequest struct { - // Columns: The columns of table to be returned for each row - // matching + // Columns: The columns of table to be returned for each row matching // this request. Columns []string `json:"columns,omitempty"` - // Index: If non-empty, the name of an index on table. This index - // is - // used instead of the table primary key when interpreting key_set - // and sorting result rows. See key_set for further information. + // Index: If non-empty, the name of an index on table. This index is + // used instead of the table primary key when interpreting key_set and + // sorting result rows. See key_set for further information. Index string `json:"index,omitempty"` // KeySet: Required. `key_set` identifies the rows to be yielded. - // `key_set` names the - // primary keys of the rows in table to be yielded, unless index - // is present. If index is present, then key_set instead names - // index keys in index. - // - // It is not an error for the `key_set` to name rows that do not - // exist in the database. Read yields nothing for nonexistent rows. + // `key_set` names the primary keys of the rows in table to be yielded, + // unless index is present. If index is present, then key_set instead + // names index keys in index. It is not an error for the `key_set` to + // name rows that do not exist in the database. Read yields nothing for + // nonexistent rows. KeySet *KeySet `json:"keySet,omitempty"` // PartitionOptions: Additional options that affect how many partitions @@ -2889,8 +2420,7 @@ type PartitionReadRequest struct { Table string `json:"table,omitempty"` // Transaction: Read only snapshot transactions are supported, - // read/write and single use - // transactions are not. + // read/write and single use transactions are not. Transaction *TransactionSelector `json:"transaction,omitempty"` // ForceSendFields is a list of field names (e.g. "Columns") to @@ -2916,8 +2446,7 @@ func (s *PartitionReadRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PartitionResponse: The response for PartitionQuery -// or PartitionRead +// PartitionResponse: The response for PartitionQuery or PartitionRead type PartitionResponse struct { // Partitions: Partitions created by this request. Partitions []*Partition `json:"partitions,omitempty"` @@ -2968,52 +2497,36 @@ type PlanNode struct { DisplayName string `json:"displayName,omitempty"` // ExecutionStats: The execution statistics associated with the node, - // contained in a group of - // key-value pairs. Only present if the plan was returned as a result of - // a - // profile query. For example, number of executions, number of rows/time - // per - // execution etc. + // contained in a group of key-value pairs. Only present if the plan was + // returned as a result of a profile query. For example, number of + // executions, number of rows/time per execution etc. ExecutionStats googleapi.RawMessage `json:"executionStats,omitempty"` // Index: The `PlanNode`'s index in node list. Index int64 `json:"index,omitempty"` // Kind: Used to determine the type of node. May be needed for - // visualizing - // different kinds of nodes differently. For example, If the node is - // a - // SCALAR node, it will have a condensed representation - // which can be used to directly embed a description of the node in - // its + // visualizing different kinds of nodes differently. For example, If the + // node is a SCALAR node, it will have a condensed representation which + // can be used to directly embed a description of the node in its // parent. // // Possible values: // "KIND_UNSPECIFIED" - Not specified. // "RELATIONAL" - Denotes a Relational operator node in the expression - // tree. Relational - // operators represent iterative processing of rows during query - // execution. - // For example, a `TableScan` operation that reads rows from a table. + // tree. Relational operators represent iterative processing of rows + // during query execution. For example, a `TableScan` operation that + // reads rows from a table. // "SCALAR" - Denotes a Scalar node in the expression tree. Scalar - // nodes represent - // non-iterable entities in the query plan. For example, constants - // or - // arithmetic operators appearing inside predicate expressions or - // references - // to column names. + // nodes represent non-iterable entities in the query plan. For example, + // constants or arithmetic operators appearing inside predicate + // expressions or references to column names. Kind string `json:"kind,omitempty"` // Metadata: Attributes relevant to the node contained in a group of - // key-value pairs. - // For example, a Parameter Reference node could have the - // following - // information in its metadata: - // - // { - // "parameter_reference": "param1", - // "parameter_type": "array" - // } + // key-value pairs. For example, a Parameter Reference node could have + // the following information in its metadata: { "parameter_reference": + // "param1", "parameter_type": "array" } Metadata googleapi.RawMessage `json:"metadata,omitempty"` // ShortRepresentation: Condensed representation for SCALAR nodes. @@ -3043,139 +2556,75 @@ func (s *PlanNode) MarshalJSON() ([]byte, error) { } // Policy: An Identity and Access Management (IAM) policy, which -// specifies access -// controls for Google Cloud resources. -// -// -// A `Policy` is a collection of `bindings`. A `binding` binds one or -// more -// `members` to a single `role`. Members can be user accounts, service -// accounts, +// specifies access controls for Google Cloud resources. A `Policy` is a +// collection of `bindings`. A `binding` binds one or more `members` to +// a single `role`. Members can be user accounts, service accounts, // Google groups, and domains (such as G Suite). A `role` is a named -// list of -// permissions; each `role` can be an IAM predefined role or a -// user-created -// custom role. -// -// Optionally, a `binding` can specify a `condition`, which is a -// logical +// list of permissions; each `role` can be an IAM predefined role or a +// user-created custom role. For some types of Google Cloud resources, a +// `binding` can also specify a `condition`, which is a logical // expression that allows access to a resource only if the expression -// evaluates -// to `true`. A condition can add constraints based on attributes of -// the -// request, the resource, or both. -// -// **JSON example:** -// -// { -// "bindings": [ -// { -// "role": "roles/resourcemanager.organizationAdmin", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" -// ] -// }, -// { -// "role": "roles/resourcemanager.organizationViewer", -// "members": ["user:eve@example.com"], -// "condition": { -// "title": "expirable access", -// "description": "Does not grant access after Sep 2020", -// "expression": "request.time < -// timestamp('2020-10-01T00:00:00.000Z')", -// } -// } -// ], -// "etag": "BwWWja0YfJA=", -// "version": 3 -// } -// -// **YAML example:** -// -// bindings: -// - members: -// - user:mike@example.com -// - group:admins@example.com -// - domain:google.com -// - serviceAccount:my-project-id@appspot.gserviceaccount.com -// role: roles/resourcemanager.organizationAdmin -// - members: -// - user:eve@example.com -// role: roles/resourcemanager.organizationViewer -// condition: -// title: expirable access -// description: Does not grant access after Sep 2020 -// expression: request.time < -// timestamp('2020-10-01T00:00:00.000Z') -// - etag: BwWWja0YfJA= -// - version: 3 -// -// For a description of IAM and its features, see the -// [IAM documentation](https://cloud.google.com/iam/docs/). +// evaluates to `true`. A condition can add constraints based on +// attributes of the request, the resource, or both. To learn which +// resources support conditions in their IAM policies, see the [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-p +// olicies). **JSON example:** { "bindings": [ { "role": +// "roles/resourcemanager.organizationAdmin", "members": [ +// "user:mike@example.com", "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { +// "role": "roles/resourcemanager.organizationViewer", "members": [ +// "user:eve@example.com" ], "condition": { "title": "expirable access", +// "description": "Does not grant access after Sep 2020", "expression": +// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], +// "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - +// members: - user:mike@example.com - group:admins@example.com - +// domain:google.com - +// serviceAccount:my-project-id@appspot.gserviceaccount.com role: +// roles/resourcemanager.organizationAdmin - members: - +// user:eve@example.com role: roles/resourcemanager.organizationViewer +// condition: title: expirable access description: Does not grant access +// after Sep 2020 expression: request.time < +// timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: +// 3 For a description of IAM and its features, see the [IAM +// documentation](https://cloud.google.com/iam/docs/). type Policy struct { // Bindings: Associates a list of `members` to a `role`. Optionally, may - // specify a - // `condition` that determines how and when the `bindings` are applied. - // Each - // of the `bindings` must contain at least one member. + // specify a `condition` that determines how and when the `bindings` are + // applied. Each of the `bindings` must contain at least one member. Bindings []*Binding `json:"bindings,omitempty"` // Etag: `etag` is used for optimistic concurrency control as a way to - // help - // prevent simultaneous updates of a policy from overwriting each - // other. - // It is strongly suggested that systems make use of the `etag` in - // the - // read-modify-write cycle to perform policy updates in order to avoid - // race - // conditions: An `etag` is returned in the response to `getIamPolicy`, - // and - // systems are expected to put that etag in the request to - // `setIamPolicy` to - // ensure that their change will be applied to the same version of the - // policy. - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of + // help prevent simultaneous updates of a policy from overwriting each + // other. It is strongly suggested that systems make use of the `etag` + // in the read-modify-write cycle to perform policy updates in order to + // avoid race conditions: An `etag` is returned in the response to + // `getIamPolicy`, and systems are expected to put that etag in the + // request to `setIamPolicy` to ensure that their change will be applied + // to the same version of the policy. **Important:** If you use IAM + // Conditions, you must include the `etag` field whenever you call + // `setIamPolicy`. If you omit this field, then IAM allows you to + // overwrite a version `3` policy with a version `1` policy, and all of // the conditions in the version `3` policy are lost. Etag string `json:"etag,omitempty"` - // Version: Specifies the format of the policy. - // - // Valid values are `0`, `1`, and `3`. Requests that specify an invalid - // value - // are rejected. - // + // Version: Specifies the format of the policy. Valid values are `0`, + // `1`, and `3`. Requests that specify an invalid value are rejected. // Any operation that affects conditional role bindings must specify - // version - // `3`. This requirement applies to the following operations: - // - // * Getting a policy that includes a conditional role binding - // * Adding a conditional role binding to a policy - // * Changing a conditional role binding in a policy - // * Removing any role binding, with or without a condition, from a - // policy - // that includes conditions - // - // **Important:** If you use IAM Conditions, you must include the `etag` - // field - // whenever you call `setIamPolicy`. If you omit this field, then IAM - // allows - // you to overwrite a version `3` policy with a version `1` policy, and - // all of - // the conditions in the version `3` policy are lost. - // - // If a policy does not include any conditions, operations on that - // policy may - // specify any valid version or leave the field unset. + // version `3`. This requirement applies to the following operations: * + // Getting a policy that includes a conditional role binding * Adding a + // conditional role binding to a policy * Changing a conditional role + // binding in a policy * Removing any role binding, with or without a + // condition, from a policy that includes conditions **Important:** If + // you use IAM Conditions, you must include the `etag` field whenever + // you call `setIamPolicy`. If you omit this field, then IAM allows you + // to overwrite a version `3` policy with a version `1` policy, and all + // of the conditions in the version `3` policy are lost. If a policy + // does not include any conditions, operations on that policy may + // specify any valid version or leave the field unset. To learn which + // resources support conditions in their IAM policies, see the [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-p + // olicies). Version int64 `json:"version,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -3208,30 +2657,17 @@ func (s *Policy) MarshalJSON() ([]byte, error) { // QueryOptions: Query optimizer configuration. type QueryOptions struct { // OptimizerVersion: An option to control the selection of optimizer - // version. - // - // This parameter allows individual queries to pick different - // query - // optimizer versions. - // - // Specifying "latest" as a value instructs Cloud Spanner to use - // the - // latest supported query optimizer version. If not specified, Cloud - // Spanner - // uses optimizer version set at the database level options. Any - // other - // positive integer (from the list of supported optimizer - // versions) - // overrides the default optimizer version for query execution. - // The list of supported optimizer versions can be queried - // from - // SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS. Executing a SQL - // statement - // with an invalid optimizer version will fail with a syntax - // error - // (`INVALID_ARGUMENT`) status. - // - // The `optimizer_version` statement hint has precedence over this + // version. This parameter allows individual queries to pick different + // query optimizer versions. Specifying "latest" as a value instructs + // Cloud Spanner to use the latest supported query optimizer version. If + // not specified, Cloud Spanner uses optimizer version set at the + // database level options. Any other positive integer (from the list of + // supported optimizer versions) overrides the default optimizer version + // for query execution. The list of supported optimizer versions can be + // queried from SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS. Executing a + // SQL statement with an invalid optimizer version will fail with a + // syntax error (`INVALID_ARGUMENT`) status. See + // https://cloud.google.com/spanner/docs/query-optimizer/manage-query-optimizer for more information on managing the query optimizer. The `optimizer_version` statement hint has precedence over this // setting. OptimizerVersion string `json:"optimizerVersion,omitempty"` @@ -3263,10 +2699,8 @@ func (s *QueryOptions) MarshalJSON() ([]byte, error) { // plan. type QueryPlan struct { // PlanNodes: The nodes in the query plan. Plan nodes are returned in - // pre-order starting - // with the plan root. Each PlanNode's `id` corresponds to its index - // in - // `plan_nodes`. + // pre-order starting with the plan root. Each PlanNode's `id` + // corresponds to its index in `plan_nodes`. PlanNodes []*PlanNode `json:"planNodes,omitempty"` // ForceSendFields is a list of field names (e.g. "PlanNodes") to @@ -3295,83 +2729,53 @@ func (s *QueryPlan) MarshalJSON() ([]byte, error) { // ReadOnly: Message type to initiate a read-only transaction. type ReadOnly struct { // ExactStaleness: Executes all reads at a timestamp that is - // `exact_staleness` - // old. The timestamp is chosen soon after the read is - // started. - // - // Guarantees that all writes that have committed more than - // the - // specified number of seconds ago are visible. Because Cloud - // Spanner - // chooses the exact timestamp, this mode works even if the - // client's - // local clock is substantially skewed from Cloud Spanner - // commit - // timestamps. - // - // Useful for reading at nearby replicas without the - // distributed - // timestamp negotiation overhead of `max_staleness`. + // `exact_staleness` old. The timestamp is chosen soon after the read is + // started. Guarantees that all writes that have committed more than the + // specified number of seconds ago are visible. Because Cloud Spanner + // chooses the exact timestamp, this mode works even if the client's + // local clock is substantially skewed from Cloud Spanner commit + // timestamps. Useful for reading at nearby replicas without the + // distributed timestamp negotiation overhead of `max_staleness`. ExactStaleness string `json:"exactStaleness,omitempty"` - // MaxStaleness: Read data at a timestamp >= `NOW - - // max_staleness` - // seconds. Guarantees that all writes that have committed more - // than the specified number of seconds ago are visible. Because - // Cloud Spanner chooses the exact timestamp, this mode works even - // if - // the client's local clock is substantially skewed from Cloud - // Spanner - // commit timestamps. - // - // Useful for reading the freshest data available at a nearby - // replica, while bounding the possible staleness if the local - // replica has fallen behind. - // - // Note that this option can only be used in single-use - // transactions. + // MaxStaleness: Read data at a timestamp >= `NOW - max_staleness` + // seconds. Guarantees that all writes that have committed more than the + // specified number of seconds ago are visible. Because Cloud Spanner + // chooses the exact timestamp, this mode works even if the client's + // local clock is substantially skewed from Cloud Spanner commit + // timestamps. Useful for reading the freshest data available at a + // nearby replica, while bounding the possible staleness if the local + // replica has fallen behind. Note that this option can only be used in + // single-use transactions. MaxStaleness string `json:"maxStaleness,omitempty"` // MinReadTimestamp: Executes all reads at a timestamp >= - // `min_read_timestamp`. - // - // This is useful for requesting fresher data than some previous - // read, or data that is fresh enough to observe the effects of - // some - // previously committed transaction whose timestamp is known. - // - // Note that this option can only be used in single-use transactions. - // - // A timestamp in RFC3339 UTC \"Zulu\" format, accurate to - // nanoseconds. - // Example: "2014-10-02T15:01:23.045123456Z". + // `min_read_timestamp`. This is useful for requesting fresher data than + // some previous read, or data that is fresh enough to observe the + // effects of some previously committed transaction whose timestamp is + // known. Note that this option can only be used in single-use + // transactions. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to + // nanoseconds. Example: "2014-10-02T15:01:23.045123456Z". MinReadTimestamp string `json:"minReadTimestamp,omitempty"` // ReadTimestamp: Executes all reads at the given timestamp. Unlike - // other modes, - // reads at a specific timestamp are repeatable; the same read at - // the same timestamp always returns the same data. If the - // timestamp is in the future, the read will block until the - // specified timestamp, modulo the read's deadline. - // - // Useful for large scale consistent reads such as mapreduces, or - // for coordinating many reads against a consistent snapshot of - // the - // data. - // - // A timestamp in RFC3339 UTC \"Zulu\" format, accurate to - // nanoseconds. - // Example: "2014-10-02T15:01:23.045123456Z". + // other modes, reads at a specific timestamp are repeatable; the same + // read at the same timestamp always returns the same data. If the + // timestamp is in the future, the read will block until the specified + // timestamp, modulo the read's deadline. Useful for large scale + // consistent reads such as mapreduces, or for coordinating many reads + // against a consistent snapshot of the data. A timestamp in RFC3339 UTC + // \"Zulu\" format, accurate to nanoseconds. Example: + // "2014-10-02T15:01:23.045123456Z". ReadTimestamp string `json:"readTimestamp,omitempty"` // ReturnReadTimestamp: If true, the Cloud Spanner-selected read - // timestamp is included in - // the Transaction message that describes the transaction. + // timestamp is included in the Transaction message that describes the + // transaction. ReturnReadTimestamp bool `json:"returnReadTimestamp,omitempty"` // Strong: Read at a timestamp where all previously committed - // transactions - // are visible. + // transactions are visible. Strong bool `json:"strong,omitempty"` // ForceSendFields is a list of field names (e.g. "ExactStaleness") to @@ -3398,66 +2802,52 @@ func (s *ReadOnly) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ReadRequest: The request for Read and -// StreamingRead. +// ReadRequest: The request for Read and StreamingRead. type ReadRequest struct { // Columns: Required. The columns of table to be returned for each row - // matching - // this request. + // matching this request. Columns []string `json:"columns,omitempty"` - // Index: If non-empty, the name of an index on table. This index - // is - // used instead of the table primary key when interpreting key_set - // and sorting result rows. See key_set for further information. + // Index: If non-empty, the name of an index on table. This index is + // used instead of the table primary key when interpreting key_set and + // sorting result rows. See key_set for further information. Index string `json:"index,omitempty"` // KeySet: Required. `key_set` identifies the rows to be yielded. - // `key_set` names the - // primary keys of the rows in table to be yielded, unless index - // is present. If index is present, then key_set instead names - // index keys in index. - // - // If the partition_token field is empty, rows are yielded - // in table primary key order (if index is empty) or index key order - // (if index is non-empty). If the partition_token field is not - // empty, rows will be yielded in an unspecified order. - // - // It is not an error for the `key_set` to name rows that do not - // exist in the database. Read yields nothing for nonexistent rows. + // `key_set` names the primary keys of the rows in table to be yielded, + // unless index is present. If index is present, then key_set instead + // names index keys in index. If the partition_token field is empty, + // rows are yielded in table primary key order (if index is empty) or + // index key order (if index is non-empty). If the partition_token field + // is not empty, rows will be yielded in an unspecified order. It is not + // an error for the `key_set` to name rows that do not exist in the + // database. Read yields nothing for nonexistent rows. KeySet *KeySet `json:"keySet,omitempty"` // Limit: If greater than zero, only the first `limit` rows are yielded. - // If `limit` - // is zero, the default is no limit. A limit cannot be specified - // if - // `partition_token` is set. + // If `limit` is zero, the default is no limit. A limit cannot be + // specified if `partition_token` is set. Limit int64 `json:"limit,omitempty,string"` // PartitionToken: If present, results will be restricted to the - // specified partition - // previously created using PartitionRead(). There must be an - // exact - // match for the values of fields common to this message and - // the - // PartitionReadRequest message used to create this partition_token. + // specified partition previously created using PartitionRead(). There + // must be an exact match for the values of fields common to this + // message and the PartitionReadRequest message used to create this + // partition_token. PartitionToken string `json:"partitionToken,omitempty"` // ResumeToken: If this request is resuming a previously interrupted - // read, - // `resume_token` should be copied from the last - // PartialResultSet yielded before the interruption. Doing this - // enables the new read to resume where the last read left off. The - // rest of the request parameters must exactly match the request - // that yielded this token. + // read, `resume_token` should be copied from the last PartialResultSet + // yielded before the interruption. Doing this enables the new read to + // resume where the last read left off. The rest of the request + // parameters must exactly match the request that yielded this token. ResumeToken string `json:"resumeToken,omitempty"` // Table: Required. The name of the table in the database to be read. Table string `json:"table,omitempty"` // Transaction: The transaction to use. If none is provided, the default - // is a - // temporary read-only transaction with strong concurrency. + // is a temporary read-only transaction with strong concurrency. Transaction *TransactionSelector `json:"transaction,omitempty"` // ForceSendFields is a list of field names (e.g. "Columns") to @@ -3484,19 +2874,16 @@ func (s *ReadRequest) MarshalJSON() ([]byte, error) { } // ReadWrite: Message type to initiate a read-write transaction. -// Currently this -// transaction type has no options. +// Currently this transaction type has no options. type ReadWrite struct { } type ReplicaInfo struct { // DefaultLeaderLocation: If true, this location is designated as the - // default leader location where - // leader replicas are placed. See the [region - // types - // documentation](https://cloud.google.com/spanner/docs/instances#r - // egion_types) - // for more details. + // default leader location where leader replicas are placed. See the + // [region types + // documentation](https://cloud.google.com/spanner/docs/instances#region_ + // types) for more details. DefaultLeaderLocation bool `json:"defaultLeaderLocation,omitempty"` // Location: The location of the serving resources, e.g. "us-central1". @@ -3507,29 +2894,18 @@ type ReplicaInfo struct { // Possible values: // "TYPE_UNSPECIFIED" - Not specified. // "READ_WRITE" - Read-write replicas support both reads and writes. - // These replicas: - // - // * Maintain a full copy of your data. - // * Serve reads. - // * Can vote whether to commit a write. - // * Participate in leadership election. - // * Are eligible to become a leader. + // These replicas: * Maintain a full copy of your data. * Serve reads. * + // Can vote whether to commit a write. * Participate in leadership + // election. * Are eligible to become a leader. // "READ_ONLY" - Read-only replicas only support reads (not writes). - // Read-only replicas: - // - // * Maintain a full copy of your data. - // * Serve reads. - // * Do not participate in voting to commit writes. - // * Are not eligible to become a leader. + // Read-only replicas: * Maintain a full copy of your data. * Serve + // reads. * Do not participate in voting to commit writes. * Are not + // eligible to become a leader. // "WITNESS" - Witness replicas don't support reads but do participate - // in voting to - // commit writes. Witness replicas: - // - // * Do not maintain a full copy of data. - // * Do not serve reads. - // * Vote whether to commit writes. - // * Participate in leader election but are not eligible to become - // leader. + // in voting to commit writes. Witness replicas: * Do not maintain a + // full copy of data. * Do not serve reads. * Vote whether to commit + // writes. * Participate in leader election but are not eligible to + // become leader. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -3558,61 +2934,41 @@ func (s *ReplicaInfo) MarshalJSON() ([]byte, error) { } // RestoreDatabaseMetadata: Metadata type for the long-running operation -// returned by -// RestoreDatabase. +// returned by RestoreDatabase. type RestoreDatabaseMetadata struct { // BackupInfo: Information about the backup used to restore the // database. BackupInfo *BackupInfo `json:"backupInfo,omitempty"` // CancelTime: The time at which cancellation of this operation was - // received. - // Operations.CancelOperation - // starts asynchronous cancellation on a long-running operation. The - // server - // makes a best effort to cancel the operation, but success is not - // guaranteed. - // Clients can use - // Operations.GetOperation or - // other methods to check whether the cancellation succeeded or whether - // the - // operation completed despite cancellation. On successful - // cancellation, - // the operation is not deleted; instead, it becomes an operation - // with - // an Operation.error value with a - // google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`. + // received. Operations.CancelOperation starts asynchronous cancellation + // on a long-running operation. The server makes a best effort to cancel + // the operation, but success is not guaranteed. Clients can use + // Operations.GetOperation or other methods to check whether the + // cancellation succeeded or whether the operation completed despite + // cancellation. On successful cancellation, the operation is not + // deleted; instead, it becomes an operation with an Operation.error + // value with a google.rpc.Status.code of 1, corresponding to + // `Code.CANCELLED`. CancelTime string `json:"cancelTime,omitempty"` // Name: Name of the database being created and restored to. Name string `json:"name,omitempty"` // OptimizeDatabaseOperationName: If exists, the name of the - // long-running operation that will be used to - // track the post-restore optimization process to optimize the - // performance of - // the restored database, and remove the dependency on the restore - // source. - // The name is of the - // form - // `projects//instances//databases//ope - // rations/` - // where the is the name of database being created and - // restored to. - // The metadata type of the long-running operation - // is - // OptimizeRestoredDatabaseMetadata. This long-running operation will - // be + // long-running operation that will be used to track the post-restore + // optimization process to optimize the performance of the restored + // database, and remove the dependency on the restore source. The name + // is of the form `projects//instances//databases//operations/` where + // the is the name of database being created and restored to. The + // metadata type of the long-running operation is + // OptimizeRestoredDatabaseMetadata. This long-running operation will be // automatically created by the system after the RestoreDatabase - // long-running - // operation completes successfully. This operation will not be created - // if the - // restore was not successful. + // long-running operation completes successfully. This operation will + // not be created if the restore was not successful. OptimizeDatabaseOperationName string `json:"optimizeDatabaseOperationName,omitempty"` - // Progress: The progress of the - // RestoreDatabase - // operation. + // Progress: The progress of the RestoreDatabase operation. Progress *OperationProgress `json:"progress,omitempty"` // SourceType: The type of the restore source. @@ -3645,22 +3001,16 @@ func (s *RestoreDatabaseMetadata) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RestoreDatabaseRequest: The request for -// RestoreDatabase. +// RestoreDatabaseRequest: The request for RestoreDatabase. type RestoreDatabaseRequest struct { - // Backup: Name of the backup from which to restore. Values are of the - // form - // `projects//instances//backups/`. + // Backup: Name of the backup from which to restore. Values are of the + // form `projects//instances//backups/`. Backup string `json:"backup,omitempty"` // DatabaseId: Required. The id of the database to create and restore - // to. This - // database must not already exist. The `database_id` appended - // to - // `parent` forms the full database name of the - // form - // `projects//instances//databases/` - // . + // to. This database must not already exist. The `database_id` appended + // to `parent` forms the full database name of the form + // `projects//instances//databases/`. DatabaseId string `json:"databaseId,omitempty"` // ForceSendFields is a list of field names (e.g. "Backup") to @@ -3689,8 +3039,7 @@ func (s *RestoreDatabaseRequest) MarshalJSON() ([]byte, error) { // RestoreInfo: Information about the database restore. type RestoreInfo struct { // BackupInfo: Information about the backup used to restore the - // database. The backup - // may no longer exist. + // database. The backup may no longer exist. BackupInfo *BackupInfo `json:"backupInfo,omitempty"` // SourceType: The type of the restore source. @@ -3723,33 +3072,24 @@ func (s *RestoreInfo) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ResultSet: Results from Read or -// ExecuteSql. +// ResultSet: Results from Read or ExecuteSql. type ResultSet struct { // Metadata: Metadata about the result set, such as row type // information. Metadata *ResultSetMetadata `json:"metadata,omitempty"` - // Rows: Each element in `rows` is a row whose format is defined - // by - // metadata.row_type. The ith element - // in each row matches the ith field in - // metadata.row_type. Elements are - // encoded based on type as described + // Rows: Each element in `rows` is a row whose format is defined by + // metadata.row_type. The ith element in each row matches the ith field + // in metadata.row_type. Elements are encoded based on type as described // here. Rows [][]interface{} `json:"rows,omitempty"` - // Stats: Query plan and execution statistics for the SQL statement - // that - // produced this result set. These can be requested by - // setting - // ExecuteSqlRequest.query_mode. - // DML statements always produce stats containing the number of - // rows - // modified, unless executed using the - // ExecuteSqlRequest.QueryMode.PLAN ExecuteSqlRequest.query_mode. - // Other fields may or may not be populated, based on - // the + // Stats: Query plan and execution statistics for the SQL statement that + // produced this result set. These can be requested by setting + // ExecuteSqlRequest.query_mode. DML statements always produce stats + // containing the number of rows modified, unless executed using the + // ExecuteSqlRequest.QueryMode.PLAN ExecuteSqlRequest.query_mode. Other + // fields may or may not be populated, based on the // ExecuteSqlRequest.query_mode. Stats *ResultSetStats `json:"stats,omitempty"` @@ -3783,20 +3123,15 @@ func (s *ResultSet) MarshalJSON() ([]byte, error) { // ResultSetMetadata: Metadata about a ResultSet or PartialResultSet. type ResultSetMetadata struct { // RowType: Indicates the field names and types for the rows in the - // result - // set. For example, a SQL query like "SELECT UserId, UserName - // FROM - // Users" could return a `row_type` value like: - // - // "fields": [ - // { "name": "UserId", "type": { "code": "INT64" } }, - // { "name": "UserName", "type": { "code": "STRING" } }, - // ] + // result set. For example, a SQL query like "SELECT UserId, UserName + // FROM Users" could return a `row_type` value like: "fields": [ { + // "name": "UserId", "type": { "code": "INT64" } }, { "name": + // "UserName", "type": { "code": "STRING" } }, ] RowType *StructType `json:"rowType,omitempty"` // Transaction: If the read or SQL query began a transaction as a - // side-effect, the - // information about the new transaction is yielded here. + // side-effect, the information about the new transaction is yielded + // here. Transaction *Transaction `json:"transaction,omitempty"` // ForceSendFields is a list of field names (e.g. "RowType") to @@ -3829,16 +3164,9 @@ type ResultSetStats struct { QueryPlan *QueryPlan `json:"queryPlan,omitempty"` // QueryStats: Aggregated statistics from the execution of the query. - // Only present when - // the query is profiled. For example, a query could return the - // statistics as - // follows: - // - // { - // "rows_returned": "3", - // "elapsed_time": "1.22 secs", - // "cpu_time": "1.19 secs" - // } + // Only present when the query is profiled. For example, a query could + // return the statistics as follows: { "rows_returned": "3", + // "elapsed_time": "1.22 secs", "cpu_time": "1.19 secs" } QueryStats googleapi.RawMessage `json:"queryStats,omitempty"` // RowCountExact: Standard DML returns an exact count of rows that were @@ -3846,8 +3174,7 @@ type ResultSetStats struct { RowCountExact int64 `json:"rowCountExact,omitempty,string"` // RowCountLowerBound: Partitioned DML does not offer exactly-once - // semantics, so it - // returns a lower bound of the rows modified. + // semantics, so it returns a lower bound of the rows modified. RowCountLowerBound int64 `json:"rowCountLowerBound,omitempty,string"` // ForceSendFields is a list of field names (e.g. "QueryPlan") to @@ -3904,31 +3231,24 @@ func (s *RollbackRequest) MarshalJSON() ([]byte, error) { // Session: A session in the Cloud Spanner API. type Session struct { // ApproximateLastUseTime: Output only. The approximate timestamp when - // the session is last used. It is - // typically earlier than the actual last use time. + // the session is last used. It is typically earlier than the actual + // last use time. ApproximateLastUseTime string `json:"approximateLastUseTime,omitempty"` // CreateTime: Output only. The timestamp when the session is created. CreateTime string `json:"createTime,omitempty"` - // Labels: The labels for the session. - // - // * Label keys must be between 1 and 63 characters long and must - // conform to - // the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. - // * Label values must be between 0 and 63 characters long and must - // conform - // to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. - // * No more than 64 labels can be associated with a given - // session. - // - // See https://goo.gl/xmQnxf for more information on and examples of - // labels. + // Labels: The labels for the session. * Label keys must be between 1 + // and 63 characters long and must conform to the following regular + // expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. * Label values must be + // between 0 and 63 characters long and must conform to the regular + // expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. * No more than 64 labels + // can be associated with a given session. See https://goo.gl/xmQnxf for + // more information on and examples of labels. Labels map[string]string `json:"labels,omitempty"` - // Name: The name of the session. This is always system-assigned; values - // provided - // when creating a session are ignored. + // Name: Output only. The name of the session. This is always + // system-assigned. Name string `json:"name,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -3963,11 +3283,9 @@ func (s *Session) MarshalJSON() ([]byte, error) { // SetIamPolicyRequest: Request message for `SetIamPolicy` method. type SetIamPolicyRequest struct { // Policy: REQUIRED: The complete policy to be applied to the - // `resource`. The size of - // the policy is limited to a few 10s of KB. An empty policy is a - // valid policy but certain Cloud Platform services (such as - // Projects) - // might reject them. + // `resource`. The size of the policy is limited to a few 10s of KB. An + // empty policy is a valid policy but certain Cloud Platform services + // (such as Projects) might reject them. Policy *Policy `json:"policy,omitempty"` // ForceSendFields is a list of field names (e.g. "Policy") to @@ -3994,22 +3312,17 @@ func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { } // ShortRepresentation: Condensed representation of a node and its -// subtree. Only present for -// `SCALAR` PlanNode(s). +// subtree. Only present for `SCALAR` PlanNode(s). type ShortRepresentation struct { // Description: A string representation of the expression subtree rooted // at this node. Description string `json:"description,omitempty"` // Subqueries: A mapping of (subquery variable name) -> (subquery node - // id) for cases - // where the `description` string of this node references a - // `SCALAR` - // subquery contained in the expression subtree rooted at this node. - // The - // referenced `SCALAR` subquery may not necessarily be a direct child - // of - // this node. + // id) for cases where the `description` string of this node references + // a `SCALAR` subquery contained in the expression subtree rooted at + // this node. The referenced `SCALAR` subquery may not necessarily be a + // direct child of this node. Subqueries map[string]int64 `json:"subqueries,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -4038,33 +3351,21 @@ func (s *ShortRepresentation) MarshalJSON() ([]byte, error) { // Statement: A single DML statement. type Statement struct { // ParamTypes: It is not always possible for Cloud Spanner to infer the - // right SQL type - // from a JSON value. For example, values of type `BYTES` and values - // of type `STRING` both appear in params as JSON strings. - // - // In these cases, `param_types` can be used to specify the exact - // SQL type for some or all of the SQL statement parameters. See - // the - // definition of Type for more information - // about SQL types. + // right SQL type from a JSON value. For example, values of type `BYTES` + // and values of type `STRING` both appear in params as JSON strings. In + // these cases, `param_types` can be used to specify the exact SQL type + // for some or all of the SQL statement parameters. See the definition + // of Type for more information about SQL types. ParamTypes map[string]Type `json:"paramTypes,omitempty"` // Params: Parameter names and values that bind to placeholders in the - // DML string. - // - // A parameter placeholder consists of the `@` character followed by - // the - // parameter name (for example, `@firstName`). Parameter names can - // contain - // letters, numbers, and underscores. - // - // Parameters can appear anywhere that a literal value is expected. - // The - // same parameter name can be used more than once, for example: - // - // "WHERE id > @msg_id AND id < @msg_id + 100" - // - // It is an error to execute a SQL statement with unbound parameters. + // DML string. A parameter placeholder consists of the `@` character + // followed by the parameter name (for example, `@firstName`). Parameter + // names can contain letters, numbers, and underscores. Parameters can + // appear anywhere that a literal value is expected. The same parameter + // name can be used more than once, for example: "WHERE id > @msg_id + // AND id < @msg_id + 100" It is an error to execute a SQL statement + // with unbound parameters. Params googleapi.RawMessage `json:"params,omitempty"` // Sql: Required. The DML string. @@ -4094,32 +3395,24 @@ func (s *Statement) MarshalJSON() ([]byte, error) { } // Status: The `Status` type defines a logical error model that is -// suitable for -// different programming environments, including REST APIs and RPC APIs. -// It is -// used by [gRPC](https://github.com/grpc). Each `Status` message -// contains -// three pieces of data: error code, error message, and error -// details. -// -// You can find out more about this error model and how to work with it -// in the -// [API Design Guide](https://cloud.google.com/apis/design/errors). +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each +// `Status` message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the [API Design +// Guide](https://cloud.google.com/apis/design/errors). type Status struct { // Code: The status code, which should be an enum value of // google.rpc.Code. Code int64 `json:"code,omitempty"` - // Details: A list of messages that carry the error details. There is a - // common set of - // message types for APIs to use. + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. Details []googleapi.RawMessage `json:"details,omitempty"` // Message: A developer-facing error message, which should be in - // English. Any - // user-facing error message should be localized and sent in - // the - // google.rpc.Status.details field, or localized by the client. + // English. Any user-facing error message should be localized and sent + // in the google.rpc.Status.details field, or localized by the client. Message string `json:"message,omitempty"` // ForceSendFields is a list of field names (e.g. "Code") to @@ -4147,15 +3440,12 @@ func (s *Status) MarshalJSON() ([]byte, error) { // StructType: `StructType` defines the fields of a STRUCT type. type StructType struct { - // Fields: The list of fields that make up this struct. Order - // is - // significant, because values of this struct type are represented - // as - // lists, where the order of field values matches the order of - // fields in the StructType. In turn, the order of fields - // matches the order of columns in a read request, or the order - // of - // fields in the `SELECT` clause of a query. + // Fields: The list of fields that make up this struct. Order is + // significant, because values of this struct type are represented as + // lists, where the order of field values matches the order of fields in + // the StructType. In turn, the order of fields matches the order of + // columns in a read request, or the order of fields in the `SELECT` + // clause of a query. Fields []*Field `json:"fields,omitempty"` // ForceSendFields is a list of field names (e.g. "Fields") to @@ -4185,8 +3475,7 @@ func (s *StructType) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsRequest struct { // Permissions: REQUIRED: The set of permissions to check for - // 'resource'. - // Permissions with wildcards (such as '*', 'spanner.*', + // 'resource'. Permissions with wildcards (such as '*', 'spanner.*', // 'spanner.instances.*') are not allowed. Permissions []string `json:"permissions,omitempty"` @@ -4217,8 +3506,7 @@ func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsResponse struct { // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is - // allowed. + // the caller is allowed. Permissions []string `json:"permissions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -4250,26 +3538,17 @@ func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { // Transaction: A transaction. type Transaction struct { - // Id: `id` may be used to identify the transaction in - // subsequent - // Read, - // ExecuteSql, - // Commit, or - // Rollback calls. - // - // Single-use read-only transactions do not have IDs, because - // single-use transactions do not support multiple requests. + // Id: `id` may be used to identify the transaction in subsequent Read, + // ExecuteSql, Commit, or Rollback calls. Single-use read-only + // transactions do not have IDs, because single-use transactions do not + // support multiple requests. Id string `json:"id,omitempty"` // ReadTimestamp: For snapshot read-only transactions, the read - // timestamp chosen - // for the transaction. Not returned by default: - // see - // TransactionOptions.ReadOnly.return_read_timestamp. - // - // A timestamp in RFC3339 UTC \"Zulu\" format, accurate to - // nanoseconds. - // Example: "2014-10-02T15:01:23.045123456Z". + // timestamp chosen for the transaction. Not returned by default: see + // TransactionOptions.ReadOnly.return_read_timestamp. A timestamp in + // RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: + // "2014-10-02T15:01:23.045123456Z". ReadTimestamp string `json:"readTimestamp,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -4299,413 +3578,206 @@ func (s *Transaction) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TransactionOptions: # Transactions -// -// -// Each session can have at most one active transaction at a time. After -// the -// active transaction is completed, the session can immediately -// be -// re-used for the next transaction. It is not necessary to create a -// new session for each transaction. -// -// # Transaction Modes -// -// Cloud Spanner supports three transaction modes: -// -// 1. Locking read-write. This type of transaction is the only way -// to write data into Cloud Spanner. These transactions rely on -// pessimistic locking and, if necessary, two-phase commit. -// Locking read-write transactions may abort, requiring the -// application to retry. -// -// 2. Snapshot read-only. This transaction type provides guaranteed -// consistency across several reads, but does not allow -// writes. Snapshot read-only transactions can be configured to -// read at timestamps in the past. Snapshot read-only -// transactions do not need to be committed. -// -// 3. Partitioned DML. This type of transaction is used to execute -// a single Partitioned DML statement. Partitioned DML partitions -// the key space and runs the DML statement over each partition -// in parallel using separate, internal transactions that commit -// independently. Partitioned DML transactions do not need to be -// committed. -// -// For transactions that only read, snapshot read-only -// transactions -// provide simpler semantics and are almost always faster. -// In -// particular, read-only transactions do not take locks, so they do -// not conflict with read-write transactions. As a consequence of -// not -// taking locks, they also do not abort, so retry loops are not -// needed. -// -// Transactions may only read/write data in a single database. They -// may, however, read/write data in different tables within -// that -// database. -// -// ## Locking Read-Write Transactions -// -// Locking transactions may be used to atomically read-modify-write -// data anywhere in a database. This type of transaction is -// externally -// consistent. -// -// Clients should attempt to minimize the amount of time a -// transaction -// is active. Faster transactions commit with higher probability -// and cause less contention. Cloud Spanner attempts to keep read -// locks -// active as long as the transaction continues to do reads, and -// the -// transaction has not been terminated by -// Commit or -// Rollback. Long periods of -// inactivity at the client may cause Cloud Spanner to release -// a -// transaction's locks and abort it. -// -// Conceptually, a read-write transaction consists of zero or more -// reads or SQL statements followed by -// Commit. At any time before -// Commit, the client can send a -// Rollback request to abort the -// transaction. -// -// ### Semantics -// +// TransactionOptions: # Transactions Each session can have at most one +// active transaction at a time (note that standalone reads and queries +// use a transaction internally and do count towards the one transaction +// limit). After the active transaction is completed, the session can +// immediately be re-used for the next transaction. It is not necessary +// to create a new session for each transaction. # Transaction Modes +// Cloud Spanner supports three transaction modes: 1. Locking +// read-write. This type of transaction is the only way to write data +// into Cloud Spanner. These transactions rely on pessimistic locking +// and, if necessary, two-phase commit. Locking read-write transactions +// may abort, requiring the application to retry. 2. Snapshot read-only. +// This transaction type provides guaranteed consistency across several +// reads, but does not allow writes. Snapshot read-only transactions can +// be configured to read at timestamps in the past. Snapshot read-only +// transactions do not need to be committed. 3. Partitioned DML. This +// type of transaction is used to execute a single Partitioned DML +// statement. Partitioned DML partitions the key space and runs the DML +// statement over each partition in parallel using separate, internal +// transactions that commit independently. Partitioned DML transactions +// do not need to be committed. For transactions that only read, +// snapshot read-only transactions provide simpler semantics and are +// almost always faster. In particular, read-only transactions do not +// take locks, so they do not conflict with read-write transactions. As +// a consequence of not taking locks, they also do not abort, so retry +// loops are not needed. Transactions may only read/write data in a +// single database. They may, however, read/write data in different +// tables within that database. ## Locking Read-Write Transactions +// Locking transactions may be used to atomically read-modify-write data +// anywhere in a database. This type of transaction is externally +// consistent. Clients should attempt to minimize the amount of time a +// transaction is active. Faster transactions commit with higher +// probability and cause less contention. Cloud Spanner attempts to keep +// read locks active as long as the transaction continues to do reads, +// and the transaction has not been terminated by Commit or Rollback. +// Long periods of inactivity at the client may cause Cloud Spanner to +// release a transaction's locks and abort it. Conceptually, a +// read-write transaction consists of zero or more reads or SQL +// statements followed by Commit. At any time before Commit, the client +// can send a Rollback request to abort the transaction. ### Semantics // Cloud Spanner can commit the transaction if all read locks it -// acquired -// are still valid at commit time, and it is able to acquire write -// locks for all writes. Cloud Spanner can abort the transaction for -// any -// reason. If a commit attempt returns `ABORTED`, Cloud Spanner -// guarantees -// that the transaction has not modified any user data in Cloud -// Spanner. -// -// Unless the transaction commits, Cloud Spanner makes no guarantees -// about -// how long the transaction's locks were held for. It is an error to -// use Cloud Spanner locks for any sort of mutual exclusion other -// than -// between Cloud Spanner transactions themselves. -// -// ### Retrying Aborted Transactions -// -// When a transaction aborts, the application can choose to retry -// the -// whole transaction again. To maximize the chances of -// successfully -// committing the retry, the client should execute the retry in the -// same session as the original attempt. The original session's -// lock -// priority increases with each consecutive abort, meaning that -// each -// attempt has a slightly better chance of success than the -// previous. -// -// Under some circumstances (e.g., many transactions attempting -// to -// modify the same row(s)), a transaction can abort many times in -// a -// short period before successfully committing. Thus, it is not a -// good -// idea to cap the number of retries a transaction can attempt; -// instead, it is better to limit the total amount of wall time -// spent -// retrying. -// -// ### Idle Transactions -// -// A transaction is considered idle if it has no outstanding reads -// or -// SQL queries and has not started a read or SQL query within the last -// 10 +// acquired are still valid at commit time, and it is able to acquire +// write locks for all writes. Cloud Spanner can abort the transaction +// for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner +// guarantees that the transaction has not modified any user data in +// Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no +// guarantees about how long the transaction's locks were held for. It +// is an error to use Cloud Spanner locks for any sort of mutual +// exclusion other than between Cloud Spanner transactions themselves. +// ### Retrying Aborted Transactions When a transaction aborts, the +// application can choose to retry the whole transaction again. To +// maximize the chances of successfully committing the retry, the client +// should execute the retry in the same session as the original attempt. +// The original session's lock priority increases with each consecutive +// abort, meaning that each attempt has a slightly better chance of +// success than the previous. Under some circumstances (e.g., many +// transactions attempting to modify the same row(s)), a transaction can +// abort many times in a short period before successfully committing. +// Thus, it is not a good idea to cap the number of retries a +// transaction can attempt; instead, it is better to limit the total +// amount of wall time spent retrying. ### Idle Transactions A +// transaction is considered idle if it has no outstanding reads or SQL +// queries and has not started a read or SQL query within the last 10 // seconds. Idle transactions can be aborted by Cloud Spanner so that -// they -// don't hold on to locks indefinitely. In that case, the commit -// will -// fail with error `ABORTED`. -// -// If this behavior is undesirable, periodically executing a simple -// SQL query in the transaction (e.g., `SELECT 1`) prevents -// the -// transaction from becoming idle. -// -// ## Snapshot Read-Only Transactions -// -// Snapshot read-only transactions provides a simpler method -// than -// locking read-write transactions for doing several consistent -// reads. However, this type of transaction does not support -// writes. -// -// Snapshot transactions do not take locks. Instead, they work -// by -// choosing a Cloud Spanner timestamp, then executing all reads at -// that -// timestamp. Since they do not acquire locks, they do not -// block -// concurrent read-write transactions. -// -// Unlike locking read-write transactions, snapshot -// read-only -// transactions never abort. They can fail if the chosen read -// timestamp is garbage collected; however, the default -// garbage -// collection policy is generous enough that most applications do -// not -// need to worry about this in practice. -// -// Snapshot read-only transactions do not need to call -// Commit or -// Rollback (and in fact are not -// permitted to do so). -// -// To execute a snapshot transaction, the client specifies a -// timestamp -// bound, which tells Cloud Spanner how to choose a read timestamp. -// -// The types of timestamp bound are: -// -// - Strong (the default). -// - Bounded staleness. -// - Exact staleness. -// -// If the Cloud Spanner database to be read is geographically -// distributed, -// stale read-only transactions can execute more quickly than strong -// or read-write transaction, because they are able to execute far -// from the leader replica. -// -// Each type of timestamp bound is discussed in detail below. -// -// ### Strong -// -// Strong reads are guaranteed to see the effects of all -// transactions -// that have committed before the start of the read. Furthermore, -// all -// rows yielded by a single read are consistent with each other -- -// if -// any part of the read observes a transaction, all parts of the -// read -// see the transaction. -// -// Strong reads are not repeatable: two consecutive strong -// read-only -// transactions might return inconsistent results if there -// are -// concurrent writes. If consistency across reads is required, the -// reads should be executed within a transaction or at an exact -// read -// timestamp. -// -// See TransactionOptions.ReadOnly.strong. -// -// ### Exact Staleness -// -// These timestamp bounds execute reads at a user-specified -// timestamp. Reads at a timestamp are guaranteed to see a -// consistent -// prefix of the global transaction history: they observe -// modifications done by all transactions with a commit timestamp <= -// the read timestamp, and observe none of the modifications done -// by -// transactions with a larger commit timestamp. They will block -// until -// all conflicting transactions that may be assigned commit -// timestamps -// <= the read timestamp have finished. -// -// The timestamp can either be expressed as an absolute Cloud Spanner -// commit -// timestamp or a staleness relative to the current time. -// -// These modes do not require a "negotiation phase" to pick a -// timestamp. As a result, they execute slightly faster than -// the -// equivalent boundedly stale concurrency modes. On the other -// hand, -// boundedly stale reads usually return fresher results. -// -// See TransactionOptions.ReadOnly.read_timestamp -// and -// TransactionOptions.ReadOnly.exact_staleness. -// -// ### Bounded Staleness -// +// they don't hold on to locks indefinitely. In that case, the commit +// will fail with error `ABORTED`. If this behavior is undesirable, +// periodically executing a simple SQL query in the transaction (e.g., +// `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot +// Read-Only Transactions Snapshot read-only transactions provides a +// simpler method than locking read-write transactions for doing several +// consistent reads. However, this type of transaction does not support +// writes. Snapshot transactions do not take locks. Instead, they work +// by choosing a Cloud Spanner timestamp, then executing all reads at +// that timestamp. Since they do not acquire locks, they do not block +// concurrent read-write transactions. Unlike locking read-write +// transactions, snapshot read-only transactions never abort. They can +// fail if the chosen read timestamp is garbage collected; however, the +// default garbage collection policy is generous enough that most +// applications do not need to worry about this in practice. Snapshot +// read-only transactions do not need to call Commit or Rollback (and in +// fact are not permitted to do so). To execute a snapshot transaction, +// the client specifies a timestamp bound, which tells Cloud Spanner how +// to choose a read timestamp. The types of timestamp bound are: - +// Strong (the default). - Bounded staleness. - Exact staleness. If the +// Cloud Spanner database to be read is geographically distributed, +// stale read-only transactions can execute more quickly than strong or +// read-write transaction, because they are able to execute far from the +// leader replica. Each type of timestamp bound is discussed in detail +// below. ### Strong Strong reads are guaranteed to see the effects of +// all transactions that have committed before the start of the read. +// Furthermore, all rows yielded by a single read are consistent with +// each other -- if any part of the read observes a transaction, all +// parts of the read see the transaction. Strong reads are not +// repeatable: two consecutive strong read-only transactions might +// return inconsistent results if there are concurrent writes. If +// consistency across reads is required, the reads should be executed +// within a transaction or at an exact read timestamp. See +// TransactionOptions.ReadOnly.strong. ### Exact Staleness These +// timestamp bounds execute reads at a user-specified timestamp. Reads +// at a timestamp are guaranteed to see a consistent prefix of the +// global transaction history: they observe modifications done by all +// transactions with a commit timestamp <= the read timestamp, and +// observe none of the modifications done by transactions with a larger +// commit timestamp. They will block until all conflicting transactions +// that may be assigned commit timestamps <= the read timestamp have +// finished. The timestamp can either be expressed as an absolute Cloud +// Spanner commit timestamp or a staleness relative to the current time. +// These modes do not require a "negotiation phase" to pick a timestamp. +// As a result, they execute slightly faster than the equivalent +// boundedly stale concurrency modes. On the other hand, boundedly stale +// reads usually return fresher results. See +// TransactionOptions.ReadOnly.read_timestamp and +// TransactionOptions.ReadOnly.exact_staleness. ### Bounded Staleness // Bounded staleness modes allow Cloud Spanner to pick the read -// timestamp, -// subject to a user-provided staleness bound. Cloud Spanner chooses -// the -// newest timestamp within the staleness bound that allows execution -// of the reads at the closest available replica without blocking. -// -// All rows yielded are consistent with each other -- if any part of -// the read observes a transaction, all parts of the read see -// the -// transaction. Boundedly stale reads are not repeatable: two -// stale -// reads, even if they use the same staleness bound, can execute -// at -// different timestamps and thus return inconsistent results. -// -// Boundedly stale reads execute in two phases: the first -// phase -// negotiates a timestamp among all replicas needed to serve the -// read. In the second phase, reads are executed at the -// negotiated -// timestamp. -// -// As a result of the two phase execution, bounded staleness reads -// are -// usually a little slower than comparable exact staleness -// reads. However, they are typically able to return fresher -// results, and are more likely to execute at the closest -// replica. -// -// Because the timestamp negotiation requires up-front knowledge -// of -// which rows will be read, it can only be used with -// single-use -// read-only transactions. -// -// See TransactionOptions.ReadOnly.max_staleness -// and -// TransactionOptions.ReadOnly.min_read_timestamp. -// -// ### Old Read Timestamps and Garbage Collection -// -// Cloud Spanner continuously garbage collects deleted and overwritten -// data -// in the background to reclaim storage space. This process is known -// as "version GC". By default, version GC reclaims versions after -// they -// are one hour old. Because of this, Cloud Spanner cannot perform -// reads -// at read timestamps more than one hour in the past. This -// restriction also applies to in-progress reads and/or SQL queries -// whose -// timestamp become too old while executing. Reads and SQL queries -// with -// too-old read timestamps fail with the error -// `FAILED_PRECONDITION`. -// -// ## Partitioned DML Transactions -// +// timestamp, subject to a user-provided staleness bound. Cloud Spanner +// chooses the newest timestamp within the staleness bound that allows +// execution of the reads at the closest available replica without +// blocking. All rows yielded are consistent with each other -- if any +// part of the read observes a transaction, all parts of the read see +// the transaction. Boundedly stale reads are not repeatable: two stale +// reads, even if they use the same staleness bound, can execute at +// different timestamps and thus return inconsistent results. Boundedly +// stale reads execute in two phases: the first phase negotiates a +// timestamp among all replicas needed to serve the read. In the second +// phase, reads are executed at the negotiated timestamp. As a result of +// the two phase execution, bounded staleness reads are usually a little +// slower than comparable exact staleness reads. However, they are +// typically able to return fresher results, and are more likely to +// execute at the closest replica. Because the timestamp negotiation +// requires up-front knowledge of which rows will be read, it can only +// be used with single-use read-only transactions. See +// TransactionOptions.ReadOnly.max_staleness and +// TransactionOptions.ReadOnly.min_read_timestamp. ### Old Read +// Timestamps and Garbage Collection Cloud Spanner continuously garbage +// collects deleted and overwritten data in the background to reclaim +// storage space. This process is known as "version GC". By default, +// version GC reclaims versions after they are one hour old. Because of +// this, Cloud Spanner cannot perform reads at read timestamps more than +// one hour in the past. This restriction also applies to in-progress +// reads and/or SQL queries whose timestamp become too old while +// executing. Reads and SQL queries with too-old read timestamps fail +// with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions // Partitioned DML transactions are used to execute DML statements with -// a -// different execution strategy that provides different, and often -// better, -// scalability properties for large, table-wide operations than DML in -// a -// ReadWrite transaction. Smaller scoped statements, such as an OLTP -// workload, -// should prefer using ReadWrite transactions. -// +// a different execution strategy that provides different, and often +// better, scalability properties for large, table-wide operations than +// DML in a ReadWrite transaction. Smaller scoped statements, such as an +// OLTP workload, should prefer using ReadWrite transactions. // Partitioned DML partitions the keyspace and runs the DML statement on -// each -// partition in separate, internal transactions. These transactions -// commit -// automatically when complete, and run independently from one -// another. -// -// To reduce lock contention, this execution strategy only acquires read -// locks -// on rows that match the WHERE clause of the statement. Additionally, -// the -// smaller per-partition transactions hold locks for less time. -// -// That said, Partitioned DML is not a drop-in replacement for standard -// DML used -// in ReadWrite transactions. -// -// - The DML statement must be fully-partitionable. Specifically, the -// statement -// must be expressible as the union of many statements which each -// access only -// a single row of the table. -// -// - The statement is not applied atomically to all rows of the table. -// Rather, -// the statement is applied atomically to partitions of the table, -// in -// independent transactions. Secondary index rows are updated -// atomically -// with the base table rows. -// -// - Partitioned DML does not guarantee exactly-once execution -// semantics -// against a partition. The statement will be applied at least once -// to each -// partition. It is strongly recommended that the DML statement -// should be -// idempotent to avoid unexpected results. For instance, it is -// potentially -// dangerous to run a statement such as -// `UPDATE table SET column = column + 1` as it could be run multiple -// times -// against some rows. -// -// - The partitions are committed automatically - there is no support -// for -// Commit or Rollback. If the call returns an error, or if the client -// issuing -// the ExecuteSql call dies, it is possible that some rows had the -// statement -// executed on them successfully. It is also possible that statement -// was -// never executed against other rows. -// -// - Partitioned DML transactions may only contain the execution of a -// single -// DML statement via ExecuteSql or ExecuteStreamingSql. -// -// - If any error is encountered during the execution of the -// partitioned DML -// operation (for instance, a UNIQUE INDEX violation, division by -// zero, or a -// value that cannot be stored due to schema constraints), then the -// operation is stopped at that point and an error is returned. It -// is -// possible that at this point, some partitions have been committed -// (or even -// committed multiple times), and other partitions have not been run -// at all. -// -// Given the above, Partitioned DML is good fit for large, -// database-wide, +// each partition in separate, internal transactions. These transactions +// commit automatically when complete, and run independently from one +// another. To reduce lock contention, this execution strategy only +// acquires read locks on rows that match the WHERE clause of the +// statement. Additionally, the smaller per-partition transactions hold +// locks for less time. That said, Partitioned DML is not a drop-in +// replacement for standard DML used in ReadWrite transactions. - The +// DML statement must be fully-partitionable. Specifically, the +// statement must be expressible as the union of many statements which +// each access only a single row of the table. - The statement is not +// applied atomically to all rows of the table. Rather, the statement is +// applied atomically to partitions of the table, in independent +// transactions. Secondary index rows are updated atomically with the +// base table rows. - Partitioned DML does not guarantee exactly-once +// execution semantics against a partition. The statement will be +// applied at least once to each partition. It is strongly recommended +// that the DML statement should be idempotent to avoid unexpected +// results. For instance, it is potentially dangerous to run a statement +// such as `UPDATE table SET column = column + 1` as it could be run +// multiple times against some rows. - The partitions are committed +// automatically - there is no support for Commit or Rollback. If the +// call returns an error, or if the client issuing the ExecuteSql call +// dies, it is possible that some rows had the statement executed on +// them successfully. It is also possible that statement was never +// executed against other rows. - Partitioned DML transactions may only +// contain the execution of a single DML statement via ExecuteSql or +// ExecuteStreamingSql. - If any error is encountered during the +// execution of the partitioned DML operation (for instance, a UNIQUE +// INDEX violation, division by zero, or a value that cannot be stored +// due to schema constraints), then the operation is stopped at that +// point and an error is returned. It is possible that at this point, +// some partitions have been committed (or even committed multiple +// times), and other partitions have not been run at all. Given the +// above, Partitioned DML is good fit for large, database-wide, // operations that are idempotent, such as deleting old rows from a very -// large -// table. +// large table. type TransactionOptions struct { - // PartitionedDml: Partitioned DML transaction. - // - // Authorization to begin a Partitioned DML transaction - // requires - // `spanner.databases.beginPartitionedDmlTransaction` permission - // on the `session` resource. + // PartitionedDml: Partitioned DML transaction. Authorization to begin a + // Partitioned DML transaction requires + // `spanner.databases.beginPartitionedDmlTransaction` permission on the + // `session` resource. PartitionedDml *PartitionedDml `json:"partitionedDml,omitempty"` - // ReadOnly: Transaction will not write. - // - // Authorization to begin a read-only transaction - // requires - // `spanner.databases.beginReadOnlyTransaction` permission - // on the `session` resource. + // ReadOnly: Transaction will not write. Authorization to begin a + // read-only transaction requires + // `spanner.databases.beginReadOnlyTransaction` permission on the + // `session` resource. ReadOnly *ReadOnly `json:"readOnly,omitempty"` - // ReadWrite: Transaction may write. - // - // Authorization to begin a read-write transaction - // requires - // `spanner.databases.beginOrRollbackReadWriteTransaction` permission - // on the `session` resource. + // ReadWrite: Transaction may write. Authorization to begin a read-write + // transaction requires + // `spanner.databases.beginOrRollbackReadWriteTransaction` permission on + // the `session` resource. ReadWrite *ReadWrite `json:"readWrite,omitempty"` // ForceSendFields is a list of field names (e.g. "PartitionedDml") to @@ -4733,16 +3805,11 @@ func (s *TransactionOptions) MarshalJSON() ([]byte, error) { } // TransactionSelector: This message is used to select the transaction -// in which a -// Read or -// ExecuteSql call runs. -// -// See TransactionOptions for more information about transactions. +// in which a Read or ExecuteSql call runs. See TransactionOptions for +// more information about transactions. type TransactionSelector struct { - // Begin: Begin a new transaction and execute this read or SQL query - // in - // it. The transaction ID of the new transaction is returned - // in + // Begin: Begin a new transaction and execute this read or SQL query in + // it. The transaction ID of the new transaction is returned in // ResultSetMetadata.transaction, which is a Transaction. Begin *TransactionOptions `json:"begin,omitempty"` @@ -4750,10 +3817,9 @@ type TransactionSelector struct { // transaction. Id string `json:"id,omitempty"` - // SingleUse: Execute the read or SQL query in a temporary - // transaction. - // This is the most efficient way to execute a transaction that - // consists of a single SQL query. + // SingleUse: Execute the read or SQL query in a temporary transaction. + // This is the most efficient way to execute a transaction that consists + // of a single SQL query. SingleUse *TransactionOptions `json:"singleUse,omitempty"` // ForceSendFields is a list of field names (e.g. "Begin") to @@ -4780,11 +3846,10 @@ func (s *TransactionSelector) MarshalJSON() ([]byte, error) { } // Type: `Type` indicates the type of a Cloud Spanner value, as might be -// stored in a -// table cell or returned from an SQL query. +// stored in a table cell or returned from an SQL query. type Type struct { - // ArrayElementType: If code == ARRAY, then `array_element_type` - // is the type of the array elements. + // ArrayElementType: If code == ARRAY, then `array_element_type` is the + // type of the array elements. ArrayElementType *Type `json:"arrayElementType,omitempty"` // Code: Required. The TypeCode for this type. @@ -4794,35 +3859,31 @@ type Type struct { // "BOOL" - Encoded as JSON `true` or `false`. // "INT64" - Encoded as `string`, in decimal format. // "FLOAT64" - Encoded as `number`, or the strings "NaN", - // "Infinity", or - // "-Infinity". + // "Infinity", or "-Infinity". // "TIMESTAMP" - Encoded as `string` in RFC 3339 timestamp format. The - // time zone - // must be present, and must be "Z". - // - // If the schema has the column option - // `allow_commit_timestamp=true`, the placeholder - // string - // "spanner.commit_timestamp()" can be used to instruct the system - // to insert the commit timestamp associated with the - // transaction - // commit. + // time zone must be present, and must be "Z". If the schema has the + // column option `allow_commit_timestamp=true`, the placeholder string + // "spanner.commit_timestamp()" can be used to instruct the system to + // insert the commit timestamp associated with the transaction commit. // "DATE" - Encoded as `string` in RFC 3339 date format. // "STRING" - Encoded as `string`. // "BYTES" - Encoded as a base64-encoded `string`, as described in RFC - // 4648, - // section 4. + // 4648, section 4. // "ARRAY" - Encoded as `list`, where the list elements are - // represented - // according to - // array_element_type. + // represented according to array_element_type. // "STRUCT" - Encoded as `list`, where list element `i` is represented - // according - // to [struct_type.fields[i]][google.spanner.v1.StructType.fields]. + // according to + // [struct_type.fields[i]][google.spanner.v1.StructType.fields]. + // "NUMERIC" - Encoded as `string`, in decimal format or scientific + // notation format. Decimal format: `[+-]Digits[.[Digits]]` or + // `+-.Digits` Scientific notation: + // `[+-]Digits[.[Digits]][ExponentIndicator[+-]Digits]` or + // `+-.Digits[ExponentIndicator[+-]Digits]` (ExponentIndicator is "e" + // or "E") Code string `json:"code,omitempty"` - // StructType: If code == STRUCT, then `struct_type` - // provides type information for the struct's fields. + // StructType: If code == STRUCT, then `struct_type` provides type + // information for the struct's fields. StructType *StructType `json:"structType,omitempty"` // ForceSendFields is a list of field names (e.g. "ArrayElementType") to @@ -4850,22 +3911,18 @@ func (s *Type) MarshalJSON() ([]byte, error) { } // UpdateDatabaseDdlMetadata: Metadata type for the operation returned -// by -// UpdateDatabaseDdl. +// by UpdateDatabaseDdl. type UpdateDatabaseDdlMetadata struct { // CommitTimestamps: Reports the commit timestamps of all statements - // that have - // succeeded so far, where `commit_timestamps[i]` is the - // commit - // timestamp for the statement `statements[i]`. + // that have succeeded so far, where `commit_timestamps[i]` is the + // commit timestamp for the statement `statements[i]`. CommitTimestamps []string `json:"commitTimestamps,omitempty"` // Database: The database being modified. Database string `json:"database,omitempty"` // Statements: For an update this list contains all the statements. For - // an - // individual statement, this list contains only that statement. + // an individual statement, this list contains only that statement. Statements []string `json:"statements,omitempty"` // ForceSendFields is a list of field names (e.g. "CommitTimestamps") to @@ -4893,53 +3950,30 @@ func (s *UpdateDatabaseDdlMetadata) MarshalJSON() ([]byte, error) { } // UpdateDatabaseDdlRequest: Enqueues the given DDL statements to be -// applied, in order but not -// necessarily all at once, to the database schema at some point -// (or -// points) in the future. The server checks that the statements -// are executable (syntactically valid, name tables that exist, -// etc.) -// before enqueueing them, but they may still fail upon -// later execution (e.g., if a statement from another batch -// of -// statements is applied first and it conflicts in some way, or if -// there is some data-related problem like a `NULL` value in a column -// to -// which `NOT NULL` would be added). If a statement fails, -// all -// subsequent statements in the batch are automatically cancelled. -// -// Each batch of statements is assigned a name which can be used -// with -// the Operations API to monitor -// progress. See the -// operation_id field for more -// details. +// applied, in order but not necessarily all at once, to the database +// schema at some point (or points) in the future. The server checks +// that the statements are executable (syntactically valid, name tables +// that exist, etc.) before enqueueing them, but they may still fail +// upon later execution (e.g., if a statement from another batch of +// statements is applied first and it conflicts in some way, or if there +// is some data-related problem like a `NULL` value in a column to which +// `NOT NULL` would be added). If a statement fails, all subsequent +// statements in the batch are automatically cancelled. Each batch of +// statements is assigned a name which can be used with the Operations +// API to monitor progress. See the operation_id field for more details. type UpdateDatabaseDdlRequest struct { - // OperationId: If empty, the new update request is assigned - // an - // automatically-generated operation ID. Otherwise, `operation_id` - // is used to construct the name of the resulting - // Operation. - // - // Specifying an explicit operation ID simplifies determining - // whether the statements were executed in the event that - // the - // UpdateDatabaseDdl call is replayed, - // or the return value is otherwise lost: the database - // and - // `operation_id` fields can be combined to form the - // name of the resulting - // longrunning.Operation: - // `/operations/`. - // - // `operation_id` should be unique within the database, and must be - // a valid identifier: `a-z*`. Note that - // automatically-generated operation IDs always begin with - // an - // underscore. If the named operation already exists, - // UpdateDatabaseDdl returns - // `ALREADY_EXISTS`. + // OperationId: If empty, the new update request is assigned an + // automatically-generated operation ID. Otherwise, `operation_id` is + // used to construct the name of the resulting Operation. Specifying an + // explicit operation ID simplifies determining whether the statements + // were executed in the event that the UpdateDatabaseDdl call is + // replayed, or the return value is otherwise lost: the database and + // `operation_id` fields can be combined to form the name of the + // resulting longrunning.Operation: `/operations/`. `operation_id` + // should be unique within the database, and must be a valid identifier: + // `a-z*`. Note that automatically-generated operation IDs always begin + // with an underscore. If the named operation already exists, + // UpdateDatabaseDdl returns `ALREADY_EXISTS`. OperationId string `json:"operationId,omitempty"` // Statements: Required. DDL statements to be applied to the database. @@ -4968,15 +4002,12 @@ func (s *UpdateDatabaseDdlRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UpdateInstanceMetadata: Metadata type for the operation returned -// by +// UpdateInstanceMetadata: Metadata type for the operation returned by // UpdateInstance. type UpdateInstanceMetadata struct { // CancelTime: The time at which this operation was cancelled. If set, - // this operation is - // in the process of undoing itself (which is guaranteed to succeed) - // and - // cannot be cancelled again. + // this operation is in the process of undoing itself (which is + // guaranteed to succeed) and cannot be cancelled again. CancelTime string `json:"cancelTime,omitempty"` // EndTime: The time at which this operation failed or was completed @@ -4986,8 +4017,7 @@ type UpdateInstanceMetadata struct { // Instance: The desired end state of the update. Instance *Instance `json:"instance,omitempty"` - // StartTime: The time at which UpdateInstance - // request was received. + // StartTime: The time at which UpdateInstance request was received. StartTime string `json:"startTime,omitempty"` // ForceSendFields is a list of field names (e.g. "CancelTime") to @@ -5016,18 +4046,14 @@ func (s *UpdateInstanceMetadata) MarshalJSON() ([]byte, error) { // UpdateInstanceRequest: The request for UpdateInstance. type UpdateInstanceRequest struct { // FieldMask: Required. A mask specifying which fields in Instance - // should be updated. - // The field mask must always be specified; this prevents any future - // fields in - // Instance from being erased accidentally by clients that do not - // know - // about them. + // should be updated. The field mask must always be specified; this + // prevents any future fields in Instance from being erased accidentally + // by clients that do not know about them. FieldMask string `json:"fieldMask,omitempty"` // Instance: Required. The instance to update, which must always include - // the instance - // name. Otherwise, only fields mentioned in field_mask need be - // included. + // the instance name. Otherwise, only fields mentioned in field_mask + // need be included. Instance *Instance `json:"instance,omitempty"` // ForceSendFields is a list of field names (e.g. "FieldMask") to @@ -5053,30 +4079,24 @@ func (s *UpdateInstanceRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Write: Arguments to insert, update, insert_or_update, and -// replace operations. +// Write: Arguments to insert, update, insert_or_update, and replace +// operations. type Write struct { - // Columns: The names of the columns in table to be written. - // - // The list of columns must contain enough columns to allow - // Cloud Spanner to derive values for all primary key columns in - // the - // row(s) to be modified. + // Columns: The names of the columns in table to be written. The list of + // columns must contain enough columns to allow Cloud Spanner to derive + // values for all primary key columns in the row(s) to be modified. Columns []string `json:"columns,omitempty"` // Table: Required. The table whose rows will be written. Table string `json:"table,omitempty"` - // Values: The values to be written. `values` can contain more than - // one - // list of values. If it does, then multiple rows are written, one - // for each entry in `values`. Each list in `values` must have - // exactly as many entries as there are entries in columns - // above. Sending multiple lists is equivalent to sending - // multiple - // `Mutation`s, each containing one `values` entry and repeating - // table and columns. Individual values in each list are - // encoded as described here. + // Values: The values to be written. `values` can contain more than one + // list of values. If it does, then multiple rows are written, one for + // each entry in `values`. Each list in `values` must have exactly as + // many entries as there are entries in columns above. Sending multiple + // lists is equivalent to sending multiple `Mutation`s, each containing + // one `values` entry and repeating table and columns. Individual values + // in each list are encoded as described here. Values [][]interface{} `json:"values,omitempty"` // ForceSendFields is a list of field names (e.g. "Columns") to @@ -5157,7 +4177,7 @@ func (c *ProjectsInstanceConfigsGetCall) Header() http.Header { func (c *ProjectsInstanceConfigsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5228,7 +4248,7 @@ func (c *ProjectsInstanceConfigsGetCall) Do(opts ...googleapi.CallOption) (*Inst // ], // "parameters": { // "name": { - // "description": "Required. The name of the requested instance configuration. Values are of\nthe form `projects/\u003cproject\u003e/instanceConfigs/\u003cconfig\u003e`.", + // "description": "Required. The name of the requested instance configuration. Values are of the form `projects//instanceConfigs/`.", // "location": "path", // "pattern": "^projects/[^/]+/instanceConfigs/[^/]+$", // "required": true, @@ -5267,17 +4287,16 @@ func (r *ProjectsInstanceConfigsService) List(parent string) *ProjectsInstanceCo } // PageSize sets the optional parameter "pageSize": Number of instance -// configurations to be returned in the response. If 0 or -// less, defaults to the server's maximum allowed page size. +// configurations to be returned in the response. If 0 or less, defaults +// to the server's maximum allowed page size. func (c *ProjectsInstanceConfigsListCall) PageSize(pageSize int64) *ProjectsInstanceConfigsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": If non-empty, -// `page_token` should contain a -// next_page_token -// from a previous ListInstanceConfigsResponse. +// `page_token` should contain a next_page_token from a previous +// ListInstanceConfigsResponse. func (c *ProjectsInstanceConfigsListCall) PageToken(pageToken string) *ProjectsInstanceConfigsListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -5320,7 +4339,7 @@ func (c *ProjectsInstanceConfigsListCall) Header() http.Header { func (c *ProjectsInstanceConfigsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5391,18 +4410,18 @@ func (c *ProjectsInstanceConfigsListCall) Do(opts ...googleapi.CallOption) (*Lis // ], // "parameters": { // "pageSize": { - // "description": "Number of instance configurations to be returned in the response. If 0 or\nless, defaults to the server's maximum allowed page size.", + // "description": "Number of instance configurations to be returned in the response. If 0 or less, defaults to the server's maximum allowed page size.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "If non-empty, `page_token` should contain a\nnext_page_token\nfrom a previous ListInstanceConfigsResponse.", + // "description": "If non-empty, `page_token` should contain a next_page_token from a previous ListInstanceConfigsResponse.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The name of the project for which a list of supported instance\nconfigurations is requested. Values are of the form\n`projects/\u003cproject\u003e`.", + // "description": "Required. The name of the project for which a list of supported instance configurations is requested. Values are of the form `projects/`.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -5454,46 +4473,24 @@ type ProjectsInstancesCreateCall struct { } // Create: Creates an instance and begins preparing it to begin serving. -// The -// returned long-running operation -// can be used to track the progress of preparing the new -// instance. The instance name is assigned by the caller. If the -// named instance already exists, `CreateInstance` -// returns -// `ALREADY_EXISTS`. -// -// Immediately upon completion of this request: -// -// * The instance is readable via the API, with all requested -// attributes -// but no allocated resources. Its state is `CREATING`. -// -// Until completion of the returned operation: -// -// * Cancelling the operation renders the instance immediately -// unreadable -// via the API. -// * The instance can be deleted. -// * All other attempts to modify the instance are rejected. -// -// Upon completion of the returned operation: -// -// * Billing for all successfully-allocated resources begins (some -// types -// may have lower than the requested levels). -// * Databases can be created in the instance. -// * The instance's allocated resource levels are readable via the -// API. -// * The instance's state becomes `READY`. -// -// The returned long-running operation will -// have a name of the format `/operations/` -// and -// can be used to track creation of the instance. The -// metadata field type is -// CreateInstanceMetadata. -// The response field type is -// Instance, if successful. +// The returned long-running operation can be used to track the progress +// of preparing the new instance. The instance name is assigned by the +// caller. If the named instance already exists, `CreateInstance` +// returns `ALREADY_EXISTS`. Immediately upon completion of this +// request: * The instance is readable via the API, with all requested +// attributes but no allocated resources. Its state is `CREATING`. Until +// completion of the returned operation: * Cancelling the operation +// renders the instance immediately unreadable via the API. * The +// instance can be deleted. * All other attempts to modify the instance +// are rejected. Upon completion of the returned operation: * Billing +// for all successfully-allocated resources begins (some types may have +// lower than the requested levels). * Databases can be created in the +// instance. * The instance's allocated resource levels are readable via +// the API. * The instance's state becomes `READY`. The returned +// long-running operation will have a name of the format `/operations/` +// and can be used to track creation of the instance. The metadata field +// type is CreateInstanceMetadata. The response field type is Instance, +// if successful. func (r *ProjectsInstancesService) Create(parent string, createinstancerequest *CreateInstanceRequest) *ProjectsInstancesCreateCall { c := &ProjectsInstancesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -5528,7 +4525,7 @@ func (c *ProjectsInstancesCreateCall) Header() http.Header { func (c *ProjectsInstancesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5592,7 +4589,7 @@ func (c *ProjectsInstancesCreateCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates an instance and begins preparing it to begin serving. The\nreturned long-running operation\ncan be used to track the progress of preparing the new\ninstance. The instance name is assigned by the caller. If the\nnamed instance already exists, `CreateInstance` returns\n`ALREADY_EXISTS`.\n\nImmediately upon completion of this request:\n\n * The instance is readable via the API, with all requested attributes\n but no allocated resources. Its state is `CREATING`.\n\nUntil completion of the returned operation:\n\n * Cancelling the operation renders the instance immediately unreadable\n via the API.\n * The instance can be deleted.\n * All other attempts to modify the instance are rejected.\n\nUpon completion of the returned operation:\n\n * Billing for all successfully-allocated resources begins (some types\n may have lower than the requested levels).\n * Databases can be created in the instance.\n * The instance's allocated resource levels are readable via the API.\n * The instance's state becomes `READY`.\n\nThe returned long-running operation will\nhave a name of the format `\u003cinstance_name\u003e/operations/\u003coperation_id\u003e` and\ncan be used to track creation of the instance. The\nmetadata field type is\nCreateInstanceMetadata.\nThe response field type is\nInstance, if successful.", + // "description": "Creates an instance and begins preparing it to begin serving. The returned long-running operation can be used to track the progress of preparing the new instance. The instance name is assigned by the caller. If the named instance already exists, `CreateInstance` returns `ALREADY_EXISTS`. Immediately upon completion of this request: * The instance is readable via the API, with all requested attributes but no allocated resources. Its state is `CREATING`. Until completion of the returned operation: * Cancelling the operation renders the instance immediately unreadable via the API. * The instance can be deleted. * All other attempts to modify the instance are rejected. Upon completion of the returned operation: * Billing for all successfully-allocated resources begins (some types may have lower than the requested levels). * Databases can be created in the instance. * The instance's allocated resource levels are readable via the API. * The instance's state becomes `READY`. The returned long-running operation will have a name of the format `/operations/` and can be used to track creation of the instance. The metadata field type is CreateInstanceMetadata. The response field type is Instance, if successful.", // "flatPath": "v1/projects/{projectsId}/instances", // "httpMethod": "POST", // "id": "spanner.projects.instances.create", @@ -5601,7 +4598,7 @@ func (c *ProjectsInstancesCreateCall) Do(opts ...googleapi.CallOption) (*Operati // ], // "parameters": { // "parent": { - // "description": "Required. The name of the project in which to create the instance. Values\nare of the form `projects/\u003cproject\u003e`.", + // "description": "Required. The name of the project in which to create the instance. Values are of the form `projects/`.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -5633,18 +4630,11 @@ type ProjectsInstancesDeleteCall struct { header_ http.Header } -// Delete: Deletes an instance. -// -// Immediately upon completion of the request: -// -// * Billing ceases for all of the instance's reserved -// resources. -// -// Soon afterward: -// -// * The instance and *all of its databases* immediately and -// irrevocably disappear from the API. All data in the databases -// is permanently deleted. +// Delete: Deletes an instance. Immediately upon completion of the +// request: * Billing ceases for all of the instance's reserved +// resources. Soon afterward: * The instance and *all of its databases* +// immediately and irrevocably disappear from the API. All data in the +// databases is permanently deleted. func (r *ProjectsInstancesService) Delete(name string) *ProjectsInstancesDeleteCall { c := &ProjectsInstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5678,7 +4668,7 @@ func (c *ProjectsInstancesDeleteCall) Header() http.Header { func (c *ProjectsInstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5737,7 +4727,7 @@ func (c *ProjectsInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, } return ret, nil // { - // "description": "Deletes an instance.\n\nImmediately upon completion of the request:\n\n * Billing ceases for all of the instance's reserved resources.\n\nSoon afterward:\n\n * The instance and *all of its databases* immediately and\n irrevocably disappear from the API. All data in the databases\n is permanently deleted.", + // "description": "Deletes an instance. Immediately upon completion of the request: * Billing ceases for all of the instance's reserved resources. Soon afterward: * The instance and *all of its databases* immediately and irrevocably disappear from the API. All data in the databases is permanently deleted.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}", // "httpMethod": "DELETE", // "id": "spanner.projects.instances.delete", @@ -5746,7 +4736,7 @@ func (c *ProjectsInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, // ], // "parameters": { // "name": { - // "description": "Required. The name of the instance to be deleted. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e`", + // "description": "Required. The name of the instance to be deleted. Values are of the form `projects//instances/`", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, @@ -5784,9 +4774,8 @@ func (r *ProjectsInstancesService) Get(name string) *ProjectsInstancesGetCall { } // FieldMask sets the optional parameter "fieldMask": If field_mask is -// present, specifies the subset of Instance fields that -// should be returned. -// If absent, all Instance fields are returned. +// present, specifies the subset of Instance fields that should be +// returned. If absent, all Instance fields are returned. func (c *ProjectsInstancesGetCall) FieldMask(fieldMask string) *ProjectsInstancesGetCall { c.urlParams_.Set("fieldMask", fieldMask) return c @@ -5829,7 +4818,7 @@ func (c *ProjectsInstancesGetCall) Header() http.Header { func (c *ProjectsInstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5900,13 +4889,13 @@ func (c *ProjectsInstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, // ], // "parameters": { // "fieldMask": { - // "description": "If field_mask is present, specifies the subset of Instance fields that\nshould be returned.\nIf absent, all Instance fields are returned.", + // "description": "If field_mask is present, specifies the subset of Instance fields that should be returned. If absent, all Instance fields are returned.", // "format": "google-fieldmask", // "location": "query", // "type": "string" // }, // "name": { - // "description": "Required. The name of the requested instance. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e`.", + // "description": "Required. The name of the requested instance. Values are of the form `projects//instances/`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, @@ -5937,12 +4926,9 @@ type ProjectsInstancesGetIamPolicyCall struct { } // GetIamPolicy: Gets the access control policy for an instance -// resource. Returns an empty -// policy if an instance exists but does not have a policy -// set. -// -// Authorization requires `spanner.instances.getIamPolicy` on -// resource. +// resource. Returns an empty policy if an instance exists but does not +// have a policy set. Authorization requires +// `spanner.instances.getIamPolicy` on resource. func (r *ProjectsInstancesService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ProjectsInstancesGetIamPolicyCall { c := &ProjectsInstancesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -5977,7 +4963,7 @@ func (c *ProjectsInstancesGetIamPolicyCall) Header() http.Header { func (c *ProjectsInstancesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6041,7 +5027,7 @@ func (c *ProjectsInstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P } return ret, nil // { - // "description": "Gets the access control policy for an instance resource. Returns an empty\npolicy if an instance exists but does not have a policy set.\n\nAuthorization requires `spanner.instances.getIamPolicy` on\nresource.", + // "description": "Gets the access control policy for an instance resource. Returns an empty policy if an instance exists but does not have a policy set. Authorization requires `spanner.instances.getIamPolicy` on resource.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}:getIamPolicy", // "httpMethod": "POST", // "id": "spanner.projects.instances.getIamPolicy", @@ -6050,7 +5036,7 @@ func (c *ProjectsInstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The Cloud Spanner resource for which the policy is being retrieved. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for database resources.", + // "description": "REQUIRED: The Cloud Spanner resource for which the policy is being retrieved. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for database resources.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, @@ -6091,45 +5077,33 @@ func (r *ProjectsInstancesService) List(parent string) *ProjectsInstancesListCal } // Filter sets the optional parameter "filter": An expression for -// filtering the results of the request. Filter rules are -// case insensitive. The fields eligible for filtering are: -// -// * `name` -// * `display_name` -// * `labels.key` where key is the name of a label -// -// Some examples of using filters are: -// -// * `name:*` --> The instance has a name. -// * `name:Howl` --> The instance's name contains the string "howl". -// * `name:HOWL` --> Equivalent to above. -// * `NAME:howl` --> Equivalent to above. -// * `labels.env:*` --> The instance has the label "env". -// * `labels.env:dev` --> The instance has the label "env" and the -// value of -// the label contains the string "dev". -// * `name:howl labels.env:dev` --> The instance's name contains -// "howl" and -// it has the label "env" with its -// value -// containing "dev". +// filtering the results of the request. Filter rules are case +// insensitive. The fields eligible for filtering are: * `name` * +// `display_name` * `labels.key` where key is the name of a label Some +// examples of using filters are: * `name:*` --> The instance has a +// name. * `name:Howl` --> The instance's name contains the string +// "howl". * `name:HOWL` --> Equivalent to above. * `NAME:howl` --> +// Equivalent to above. * `labels.env:*` --> The instance has the label +// "env". * `labels.env:dev` --> The instance has the label "env" and +// the value of the label contains the string "dev". * `name:howl +// labels.env:dev` --> The instance's name contains "howl" and it has +// the label "env" with its value containing "dev". func (c *ProjectsInstancesListCall) Filter(filter string) *ProjectsInstancesListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": Number of instances -// to be returned in the response. If 0 or less, defaults -// to the server's maximum allowed page size. +// to be returned in the response. If 0 or less, defaults to the +// server's maximum allowed page size. func (c *ProjectsInstancesListCall) PageSize(pageSize int64) *ProjectsInstancesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": If non-empty, -// `page_token` should contain a -// next_page_token from a -// previous ListInstancesResponse. +// `page_token` should contain a next_page_token from a previous +// ListInstancesResponse. func (c *ProjectsInstancesListCall) PageToken(pageToken string) *ProjectsInstancesListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -6172,7 +5146,7 @@ func (c *ProjectsInstancesListCall) Header() http.Header { func (c *ProjectsInstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6243,23 +5217,23 @@ func (c *ProjectsInstancesListCall) Do(opts ...googleapi.CallOption) (*ListInsta // ], // "parameters": { // "filter": { - // "description": "An expression for filtering the results of the request. Filter rules are\ncase insensitive. The fields eligible for filtering are:\n\n * `name`\n * `display_name`\n * `labels.key` where key is the name of a label\n\nSome examples of using filters are:\n\n * `name:*` --\u003e The instance has a name.\n * `name:Howl` --\u003e The instance's name contains the string \"howl\".\n * `name:HOWL` --\u003e Equivalent to above.\n * `NAME:howl` --\u003e Equivalent to above.\n * `labels.env:*` --\u003e The instance has the label \"env\".\n * `labels.env:dev` --\u003e The instance has the label \"env\" and the value of\n the label contains the string \"dev\".\n * `name:howl labels.env:dev` --\u003e The instance's name contains \"howl\" and\n it has the label \"env\" with its value\n containing \"dev\".", + // "description": "An expression for filtering the results of the request. Filter rules are case insensitive. The fields eligible for filtering are: * `name` * `display_name` * `labels.key` where key is the name of a label Some examples of using filters are: * `name:*` --\u003e The instance has a name. * `name:Howl` --\u003e The instance's name contains the string \"howl\". * `name:HOWL` --\u003e Equivalent to above. * `NAME:howl` --\u003e Equivalent to above. * `labels.env:*` --\u003e The instance has the label \"env\". * `labels.env:dev` --\u003e The instance has the label \"env\" and the value of the label contains the string \"dev\". * `name:howl labels.env:dev` --\u003e The instance's name contains \"howl\" and it has the label \"env\" with its value containing \"dev\".", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "Number of instances to be returned in the response. If 0 or less, defaults\nto the server's maximum allowed page size.", + // "description": "Number of instances to be returned in the response. If 0 or less, defaults to the server's maximum allowed page size.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "If non-empty, `page_token` should contain a\nnext_page_token from a\nprevious ListInstancesResponse.", + // "description": "If non-empty, `page_token` should contain a next_page_token from a previous ListInstancesResponse.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The name of the project for which a list of instances is\nrequested. Values are of the form `projects/\u003cproject\u003e`.", + // "description": "Required. The name of the project for which a list of instances is requested. Values are of the form `projects/`.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -6311,54 +5285,28 @@ type ProjectsInstancesPatchCall struct { } // Patch: Updates an instance, and begins allocating or releasing -// resources -// as requested. The returned long-running -// operation can be used to track the -// progress of updating the instance. If the named instance does -// not -// exist, returns `NOT_FOUND`. -// -// Immediately upon completion of this request: -// -// * For resource types for which a decrease in the instance's -// allocation -// has been requested, billing is based on the newly-requested -// level. -// -// Until completion of the returned operation: -// -// * Cancelling the operation sets its metadata's -// cancel_time, and begins -// restoring resources to their pre-request values. The operation -// is guaranteed to succeed at undoing all resource changes, -// after which point it terminates with a `CANCELLED` status. -// * All other attempts to modify the instance are rejected. -// * Reading the instance via the API continues to give the -// pre-request -// resource levels. -// -// Upon completion of the returned operation: -// -// * Billing begins for all successfully-allocated resources (some -// types -// may have lower than the requested levels). -// * All newly-reserved resources are available for serving the -// instance's -// tables. -// * The instance's new resource levels are readable via the API. -// -// The returned long-running operation will -// have a name of the format `/operations/` -// and -// can be used to track the instance modification. The -// metadata field type is -// UpdateInstanceMetadata. -// The response field type is -// Instance, if successful. -// -// Authorization requires `spanner.instances.update` permission -// on -// resource name. +// resources as requested. The returned long-running operation can be +// used to track the progress of updating the instance. If the named +// instance does not exist, returns `NOT_FOUND`. Immediately upon +// completion of this request: * For resource types for which a decrease +// in the instance's allocation has been requested, billing is based on +// the newly-requested level. Until completion of the returned +// operation: * Cancelling the operation sets its metadata's +// cancel_time, and begins restoring resources to their pre-request +// values. The operation is guaranteed to succeed at undoing all +// resource changes, after which point it terminates with a `CANCELLED` +// status. * All other attempts to modify the instance are rejected. * +// Reading the instance via the API continues to give the pre-request +// resource levels. Upon completion of the returned operation: * Billing +// begins for all successfully-allocated resources (some types may have +// lower than the requested levels). * All newly-reserved resources are +// available for serving the instance's tables. * The instance's new +// resource levels are readable via the API. The returned long-running +// operation will have a name of the format `/operations/` and can be +// used to track the instance modification. The metadata field type is +// UpdateInstanceMetadata. The response field type is Instance, if +// successful. Authorization requires `spanner.instances.update` +// permission on resource name. func (r *ProjectsInstancesService) Patch(nameid string, updateinstancerequest *UpdateInstanceRequest) *ProjectsInstancesPatchCall { c := &ProjectsInstancesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.nameid = nameid @@ -6393,7 +5341,7 @@ func (c *ProjectsInstancesPatchCall) Header() http.Header { func (c *ProjectsInstancesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6457,7 +5405,7 @@ func (c *ProjectsInstancesPatchCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Updates an instance, and begins allocating or releasing resources\nas requested. The returned long-running\noperation can be used to track the\nprogress of updating the instance. If the named instance does not\nexist, returns `NOT_FOUND`.\n\nImmediately upon completion of this request:\n\n * For resource types for which a decrease in the instance's allocation\n has been requested, billing is based on the newly-requested level.\n\nUntil completion of the returned operation:\n\n * Cancelling the operation sets its metadata's\n cancel_time, and begins\n restoring resources to their pre-request values. The operation\n is guaranteed to succeed at undoing all resource changes,\n after which point it terminates with a `CANCELLED` status.\n * All other attempts to modify the instance are rejected.\n * Reading the instance via the API continues to give the pre-request\n resource levels.\n\nUpon completion of the returned operation:\n\n * Billing begins for all successfully-allocated resources (some types\n may have lower than the requested levels).\n * All newly-reserved resources are available for serving the instance's\n tables.\n * The instance's new resource levels are readable via the API.\n\nThe returned long-running operation will\nhave a name of the format `\u003cinstance_name\u003e/operations/\u003coperation_id\u003e` and\ncan be used to track the instance modification. The\nmetadata field type is\nUpdateInstanceMetadata.\nThe response field type is\nInstance, if successful.\n\nAuthorization requires `spanner.instances.update` permission on\nresource name.", + // "description": "Updates an instance, and begins allocating or releasing resources as requested. The returned long-running operation can be used to track the progress of updating the instance. If the named instance does not exist, returns `NOT_FOUND`. Immediately upon completion of this request: * For resource types for which a decrease in the instance's allocation has been requested, billing is based on the newly-requested level. Until completion of the returned operation: * Cancelling the operation sets its metadata's cancel_time, and begins restoring resources to their pre-request values. The operation is guaranteed to succeed at undoing all resource changes, after which point it terminates with a `CANCELLED` status. * All other attempts to modify the instance are rejected. * Reading the instance via the API continues to give the pre-request resource levels. Upon completion of the returned operation: * Billing begins for all successfully-allocated resources (some types may have lower than the requested levels). * All newly-reserved resources are available for serving the instance's tables. * The instance's new resource levels are readable via the API. The returned long-running operation will have a name of the format `/operations/` and can be used to track the instance modification. The metadata field type is UpdateInstanceMetadata. The response field type is Instance, if successful. Authorization requires `spanner.instances.update` permission on resource name.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}", // "httpMethod": "PATCH", // "id": "spanner.projects.instances.patch", @@ -6466,7 +5414,7 @@ func (c *ProjectsInstancesPatchCall) Do(opts ...googleapi.CallOption) (*Operatio // ], // "parameters": { // "name": { - // "description": "Required. A unique identifier for the instance, which cannot be changed\nafter the instance is created. Values are of the form\n`projects/\u003cproject\u003e/instances/a-z*[a-z0-9]`. The final\nsegment of the name must be between 2 and 64 characters in length.", + // "description": "Required. A unique identifier for the instance, which cannot be changed after the instance is created. Values are of the form `projects//instances/a-z*[a-z0-9]`. The final segment of the name must be between 2 and 64 characters in length.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, @@ -6500,11 +5448,8 @@ type ProjectsInstancesSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on an instance resource. -// Replaces any -// existing policy. -// -// Authorization requires `spanner.instances.setIamPolicy` on -// resource. +// Replaces any existing policy. Authorization requires +// `spanner.instances.setIamPolicy` on resource. func (r *ProjectsInstancesService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsInstancesSetIamPolicyCall { c := &ProjectsInstancesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -6539,7 +5484,7 @@ func (c *ProjectsInstancesSetIamPolicyCall) Header() http.Header { func (c *ProjectsInstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6603,7 +5548,7 @@ func (c *ProjectsInstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P } return ret, nil // { - // "description": "Sets the access control policy on an instance resource. Replaces any\nexisting policy.\n\nAuthorization requires `spanner.instances.setIamPolicy` on\nresource.", + // "description": "Sets the access control policy on an instance resource. Replaces any existing policy. Authorization requires `spanner.instances.setIamPolicy` on resource.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}:setIamPolicy", // "httpMethod": "POST", // "id": "spanner.projects.instances.setIamPolicy", @@ -6612,7 +5557,7 @@ func (c *ProjectsInstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The Cloud Spanner resource for which the policy is being set. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for databases resources.", + // "description": "REQUIRED: The Cloud Spanner resource for which the policy is being set. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for databases resources.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, @@ -6646,15 +5591,10 @@ type ProjectsInstancesTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that the caller has on the -// specified instance resource. -// -// Attempting this RPC on a non-existent Cloud Spanner instance resource -// will -// result in a NOT_FOUND error if the user has -// `spanner.instances.list` -// permission on the containing Google Cloud Project. Otherwise returns -// an -// empty set of permissions. +// specified instance resource. Attempting this RPC on a non-existent +// Cloud Spanner instance resource will result in a NOT_FOUND error if +// the user has `spanner.instances.list` permission on the containing +// Google Cloud Project. Otherwise returns an empty set of permissions. func (r *ProjectsInstancesService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsInstancesTestIamPermissionsCall { c := &ProjectsInstancesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -6689,7 +5629,7 @@ func (c *ProjectsInstancesTestIamPermissionsCall) Header() http.Header { func (c *ProjectsInstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6753,7 +5693,7 @@ func (c *ProjectsInstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Returns permissions that the caller has on the specified instance resource.\n\nAttempting this RPC on a non-existent Cloud Spanner instance resource will\nresult in a NOT_FOUND error if the user has `spanner.instances.list`\npermission on the containing Google Cloud Project. Otherwise returns an\nempty set of permissions.", + // "description": "Returns permissions that the caller has on the specified instance resource. Attempting this RPC on a non-existent Cloud Spanner instance resource will result in a NOT_FOUND error if the user has `spanner.instances.list` permission on the containing Google Cloud Project. Otherwise returns an empty set of permissions.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}:testIamPermissions", // "httpMethod": "POST", // "id": "spanner.projects.instances.testIamPermissions", @@ -6762,7 +5702,7 @@ func (c *ProjectsInstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The Cloud Spanner resource for which permissions are being tested. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for database resources.", + // "description": "REQUIRED: The Cloud Spanner resource for which permissions are being tested. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for database resources.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, @@ -6795,22 +5735,15 @@ type ProjectsInstancesBackupOperationsListCall struct { header_ http.Header } -// List: Lists the backup long-running operations in -// the given instance. A backup operation has a name of the -// form -// `projects//instances//backups//operati -// ons/`. -// The long-running operation -// metadata field type -// `metadata.type_url` describes the type of the metadata. Operations -// returned -// include those that have completed/failed/canceled within the last 7 -// days, -// and pending operations. Operations returned are ordered -// by +// List: Lists the backup long-running operations in the given instance. +// A backup operation has a name of the form +// `projects//instances//backups//operations/`. The long-running +// operation metadata field type `metadata.type_url` describes the type +// of the metadata. Operations returned include those that have +// completed/failed/canceled within the last 7 days, and pending +// operations. Operations returned are ordered by // `operation.metadata.value.progress.start_time` in descending order -// starting -// from the most recently started operation. +// starting from the most recently started operation. func (r *ProjectsInstancesBackupOperationsService) List(parent string) *ProjectsInstancesBackupOperationsListCall { c := &ProjectsInstancesBackupOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -6818,73 +5751,50 @@ func (r *ProjectsInstancesBackupOperationsService) List(parent string) *Projects } // Filter sets the optional parameter "filter": An expression that -// filters the list of returned backup operations. -// -// A filter expression consists of a field name, a -// comparison operator, and a value for filtering. -// The value must be a string, a number, or a boolean. The comparison -// operator -// must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. -// Colon `:` is the contains operator. Filter rules are not case -// sensitive. -// -// The following fields in the operation -// are eligible for filtering: -// -// * `name` - The name of the long-running operation -// * `done` - False if the operation is in progress, else true. -// * `metadata.@type` - the type of metadata. For example, the type -// string -// for CreateBackupMetadata is -// +// filters the list of returned backup operations. A filter expression +// consists of a field name, a comparison operator, and a value for +// filtering. The value must be a string, a number, or a boolean. The +// comparison operator must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, +// or `:`. Colon `:` is the contains operator. Filter rules are not case +// sensitive. The following fields in the operation are eligible for +// filtering: * `name` - The name of the long-running operation * `done` +// - False if the operation is in progress, else true. * +// `metadata.@type` - the type of metadata. For example, the type string +// for CreateBackupMetadata is // `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMeta -// data`. -// * `metadata.` - any field in metadata.value. -// * `error` - Error associated with the long-running operation. -// * `response.@type` - the type of response. -// * `response.` - any field in response.value. -// -// You can combine multiple expressions by enclosing each expression -// in -// parentheses. By default, expressions are combined with AND logic, -// but -// you can specify AND, OR, and NOT logic explicitly. -// -// Here are a few examples: -// -// * `done:true` - The operation is complete. -// * `metadata.database:prod` - The database the backup was taken from -// has -// a name containing the string "prod". -// * +// data`. * `metadata.` - any field in metadata.value. * `error` - Error +// associated with the long-running operation. * `response.@type` - the +// type of response. * `response.` - any field in response.value. You +// can combine multiple expressions by enclosing each expression in +// parentheses. By default, expressions are combined with AND logic, but +// you can specify AND, OR, and NOT logic explicitly. Here are a few +// examples: * `done:true` - The operation is complete. * +// `metadata.database:prod` - The database the backup was taken from has +// a name containing the string "prod". * // `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1. -// CreateBackupMetadata) AND`
-// `(metadata.name:howl) AND`
-// `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` -//
-// `(error:*)` - Returns operations where: -// * The operation's metadata type is CreateBackupMetadata. -// * The backup name contains the string "howl". -// * The operation started before 2018-03-28T14:50:00Z. -// * The operation resulted in an error. +// CreateBackupMetadata) AND` \ `(metadata.name:howl) AND` \ +// `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ +// `(error:*)` - Returns operations where: * The operation's metadata +// type is CreateBackupMetadata. * The backup name contains the string +// "howl". * The operation started before 2018-03-28T14:50:00Z. * The +// operation resulted in an error. func (c *ProjectsInstancesBackupOperationsListCall) Filter(filter string) *ProjectsInstancesBackupOperationsListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": Number of operations -// to be returned in the response. If 0 or -// less, defaults to the server's maximum allowed page size. +// to be returned in the response. If 0 or less, defaults to the +// server's maximum allowed page size. func (c *ProjectsInstancesBackupOperationsListCall) PageSize(pageSize int64) *ProjectsInstancesBackupOperationsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": If non-empty, -// `page_token` should contain a -// next_page_token -// from a previous ListBackupOperationsResponse to the -// same `parent` and with the same `filter`. +// `page_token` should contain a next_page_token from a previous +// ListBackupOperationsResponse to the same `parent` and with the same +// `filter`. func (c *ProjectsInstancesBackupOperationsListCall) PageToken(pageToken string) *ProjectsInstancesBackupOperationsListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -6927,7 +5837,7 @@ func (c *ProjectsInstancesBackupOperationsListCall) Header() http.Header { func (c *ProjectsInstancesBackupOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6989,7 +5899,7 @@ func (c *ProjectsInstancesBackupOperationsListCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Lists the backup long-running operations in\nthe given instance. A backup operation has a name of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/backups/\u003cbackup\u003e/operations/\u003coperation\u003e`.\nThe long-running operation\nmetadata field type\n`metadata.type_url` describes the type of the metadata. Operations returned\ninclude those that have completed/failed/canceled within the last 7 days,\nand pending operations. Operations returned are ordered by\n`operation.metadata.value.progress.start_time` in descending order starting\nfrom the most recently started operation.", + // "description": "Lists the backup long-running operations in the given instance. A backup operation has a name of the form `projects//instances//backups//operations/`. The long-running operation metadata field type `metadata.type_url` describes the type of the metadata. Operations returned include those that have completed/failed/canceled within the last 7 days, and pending operations. Operations returned are ordered by `operation.metadata.value.progress.start_time` in descending order starting from the most recently started operation.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/backupOperations", // "httpMethod": "GET", // "id": "spanner.projects.instances.backupOperations.list", @@ -6998,23 +5908,23 @@ func (c *ProjectsInstancesBackupOperationsListCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "filter": { - // "description": "An expression that filters the list of returned backup operations.\n\nA filter expression consists of a field name, a\ncomparison operator, and a value for filtering.\nThe value must be a string, a number, or a boolean. The comparison operator\nmust be one of: `\u003c`, `\u003e`, `\u003c=`, `\u003e=`, `!=`, `=`, or `:`.\nColon `:` is the contains operator. Filter rules are not case sensitive.\n\nThe following fields in the operation\nare eligible for filtering:\n\n * `name` - The name of the long-running operation\n * `done` - False if the operation is in progress, else true.\n * `metadata.@type` - the type of metadata. For example, the type string\n for CreateBackupMetadata is\n `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`.\n * `metadata.\u003cfield_name\u003e` - any field in metadata.value.\n * `error` - Error associated with the long-running operation.\n * `response.@type` - the type of response.\n * `response.\u003cfield_name\u003e` - any field in response.value.\n\nYou can combine multiple expressions by enclosing each expression in\nparentheses. By default, expressions are combined with AND logic, but\nyou can specify AND, OR, and NOT logic explicitly.\n\nHere are a few examples:\n\n * `done:true` - The operation is complete.\n * `metadata.database:prod` - The database the backup was taken from has\n a name containing the string \"prod\".\n * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \u003cbr/\u003e\n `(metadata.name:howl) AND` \u003cbr/\u003e\n `(metadata.progress.start_time \u003c \\\"2018-03-28T14:50:00Z\\\") AND` \u003cbr/\u003e\n `(error:*)` - Returns operations where:\n * The operation's metadata type is CreateBackupMetadata.\n * The backup name contains the string \"howl\".\n * The operation started before 2018-03-28T14:50:00Z.\n * The operation resulted in an error.", + // "description": "An expression that filters the list of returned backup operations. A filter expression consists of a field name, a comparison operator, and a value for filtering. The value must be a string, a number, or a boolean. The comparison operator must be one of: `\u003c`, `\u003e`, `\u003c=`, `\u003e=`, `!=`, `=`, or `:`. Colon `:` is the contains operator. Filter rules are not case sensitive. The following fields in the operation are eligible for filtering: * `name` - The name of the long-running operation * `done` - False if the operation is in progress, else true. * `metadata.@type` - the type of metadata. For example, the type string for CreateBackupMetadata is `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`. * `metadata.` - any field in metadata.value. * `error` - Error associated with the long-running operation. * `response.@type` - the type of response. * `response.` - any field in response.value. You can combine multiple expressions by enclosing each expression in parentheses. By default, expressions are combined with AND logic, but you can specify AND, OR, and NOT logic explicitly. Here are a few examples: * `done:true` - The operation is complete. * `metadata.database:prod` - The database the backup was taken from has a name containing the string \"prod\". * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \\ `(metadata.name:howl) AND` \\ `(metadata.progress.start_time \u003c \\\"2018-03-28T14:50:00Z\\\") AND` \\ `(error:*)` - Returns operations where: * The operation's metadata type is CreateBackupMetadata. * The backup name contains the string \"howl\". * The operation started before 2018-03-28T14:50:00Z. * The operation resulted in an error.", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "Number of operations to be returned in the response. If 0 or\nless, defaults to the server's maximum allowed page size.", + // "description": "Number of operations to be returned in the response. If 0 or less, defaults to the server's maximum allowed page size.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "If non-empty, `page_token` should contain a\nnext_page_token\nfrom a previous ListBackupOperationsResponse to the\nsame `parent` and with the same `filter`.", + // "description": "If non-empty, `page_token` should contain a next_page_token from a previous ListBackupOperationsResponse to the same `parent` and with the same `filter`.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The instance of the backup operations. Values are of\nthe form `projects/\u003cproject\u003e/instances/\u003cinstance\u003e`.", + // "description": "Required. The instance of the backup operations. Values are of the form `projects//instances/`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, @@ -7065,22 +5975,15 @@ type ProjectsInstancesBackupsCreateCall struct { header_ http.Header } -// Create: Starts creating a new Cloud Spanner Backup. -// The returned backup long-running operation -// will have a name of the -// format -// `projects//instances//backups//opera -// tions/` -// and can be used to track creation of the backup. The -// metadata field type is -// CreateBackupMetadata. The -// response field type is -// Backup, if successful. Cancelling the returned operation will stop -// the -// creation and delete the backup. -// There can be only one pending backup creation per database. Backup -// creation -// of different databases can run concurrently. +// Create: Starts creating a new Cloud Spanner Backup. The returned +// backup long-running operation will have a name of the format +// `projects//instances//backups//operations/` and can be used to track +// creation of the backup. The metadata field type is +// CreateBackupMetadata. The response field type is Backup, if +// successful. Cancelling the returned operation will stop the creation +// and delete the backup. There can be only one pending backup creation +// per database. Backup creation of different databases can run +// concurrently. func (r *ProjectsInstancesBackupsService) Create(parent string, backup *Backup) *ProjectsInstancesBackupsCreateCall { c := &ProjectsInstancesBackupsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -7089,10 +5992,8 @@ func (r *ProjectsInstancesBackupsService) Create(parent string, backup *Backup) } // BackupId sets the optional parameter "backupId": Required. The id of -// the backup to be created. The `backup_id` appended to -// `parent` forms the full backup name of the -// form -// `projects//instances//backups/`. +// the backup to be created. The `backup_id` appended to `parent` forms +// the full backup name of the form `projects//instances//backups/`. func (c *ProjectsInstancesBackupsCreateCall) BackupId(backupId string) *ProjectsInstancesBackupsCreateCall { c.urlParams_.Set("backupId", backupId) return c @@ -7125,7 +6026,7 @@ func (c *ProjectsInstancesBackupsCreateCall) Header() http.Header { func (c *ProjectsInstancesBackupsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7189,7 +6090,7 @@ func (c *ProjectsInstancesBackupsCreateCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Starts creating a new Cloud Spanner Backup.\nThe returned backup long-running operation\nwill have a name of the format\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/backups/\u003cbackup\u003e/operations/\u003coperation_id\u003e`\nand can be used to track creation of the backup. The\nmetadata field type is\nCreateBackupMetadata. The\nresponse field type is\nBackup, if successful. Cancelling the returned operation will stop the\ncreation and delete the backup.\nThere can be only one pending backup creation per database. Backup creation\nof different databases can run concurrently.", + // "description": "Starts creating a new Cloud Spanner Backup. The returned backup long-running operation will have a name of the format `projects//instances//backups//operations/` and can be used to track creation of the backup. The metadata field type is CreateBackupMetadata. The response field type is Backup, if successful. Cancelling the returned operation will stop the creation and delete the backup. There can be only one pending backup creation per database. Backup creation of different databases can run concurrently.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/backups", // "httpMethod": "POST", // "id": "spanner.projects.instances.backups.create", @@ -7198,12 +6099,12 @@ func (c *ProjectsInstancesBackupsCreateCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "backupId": { - // "description": "Required. The id of the backup to be created. The `backup_id` appended to\n`parent` forms the full backup name of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/backups/\u003cbackup_id\u003e`.", + // "description": "Required. The id of the backup to be created. The `backup_id` appended to `parent` forms the full backup name of the form `projects//instances//backups/`.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The name of the instance in which the backup will be\ncreated. This must be the same instance that contains the database the\nbackup will be created from. The backup will be stored in the\nlocation(s) specified in the instance configuration of this\ninstance. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e`.", + // "description": "Required. The name of the instance in which the backup will be created. This must be the same instance that contains the database the backup will be created from. The backup will be stored in the location(s) specified in the instance configuration of this instance. Values are of the form `projects//instances/`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, @@ -7269,7 +6170,7 @@ func (c *ProjectsInstancesBackupsDeleteCall) Header() http.Header { func (c *ProjectsInstancesBackupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7337,7 +6238,7 @@ func (c *ProjectsInstancesBackupsDeleteCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "name": { - // "description": "Required. Name of the backup to delete.\nValues are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/backups/\u003cbackup\u003e`.", + // "description": "Required. Name of the backup to delete. Values are of the form `projects//instances//backups/`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/backups/[^/]+$", // "required": true, @@ -7411,7 +6312,7 @@ func (c *ProjectsInstancesBackupsGetCall) Header() http.Header { func (c *ProjectsInstancesBackupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7482,7 +6383,7 @@ func (c *ProjectsInstancesBackupsGetCall) Do(opts ...googleapi.CallOption) (*Bac // ], // "parameters": { // "name": { - // "description": "Required. Name of the backup.\nValues are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/backups/\u003cbackup\u003e`.", + // "description": "Required. Name of the backup. Values are of the form `projects//instances//backups/`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/backups/[^/]+$", // "required": true, @@ -7513,17 +6414,11 @@ type ProjectsInstancesBackupsGetIamPolicyCall struct { } // GetIamPolicy: Gets the access control policy for a database or backup +// resource. Returns an empty policy if a database or backup exists but +// does not have a policy set. Authorization requires +// `spanner.databases.getIamPolicy` permission on resource. For backups, +// authorization requires `spanner.backups.getIamPolicy` permission on // resource. -// Returns an empty policy if a database or backup exists but does not -// have a -// policy set. -// -// Authorization requires `spanner.databases.getIamPolicy` permission -// on -// resource. -// For backups, authorization requires -// `spanner.backups.getIamPolicy` -// permission on resource. func (r *ProjectsInstancesBackupsService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ProjectsInstancesBackupsGetIamPolicyCall { c := &ProjectsInstancesBackupsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -7558,7 +6453,7 @@ func (c *ProjectsInstancesBackupsGetIamPolicyCall) Header() http.Header { func (c *ProjectsInstancesBackupsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7622,7 +6517,7 @@ func (c *ProjectsInstancesBackupsGetIamPolicyCall) Do(opts ...googleapi.CallOpti } return ret, nil // { - // "description": "Gets the access control policy for a database or backup resource.\nReturns an empty policy if a database or backup exists but does not have a\npolicy set.\n\nAuthorization requires `spanner.databases.getIamPolicy` permission on\nresource.\nFor backups, authorization requires `spanner.backups.getIamPolicy`\npermission on resource.", + // "description": "Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists but does not have a policy set. Authorization requires `spanner.databases.getIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.getIamPolicy` permission on resource.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/backups/{backupsId}:getIamPolicy", // "httpMethod": "POST", // "id": "spanner.projects.instances.backups.getIamPolicy", @@ -7631,7 +6526,7 @@ func (c *ProjectsInstancesBackupsGetIamPolicyCall) Do(opts ...googleapi.CallOpti // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The Cloud Spanner resource for which the policy is being retrieved. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for database resources.", + // "description": "REQUIRED: The Cloud Spanner resource for which the policy is being retrieved. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for database resources.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/backups/[^/]+$", // "required": true, @@ -7664,10 +6559,9 @@ type ProjectsInstancesBackupsListCall struct { header_ http.Header } -// List: Lists completed and pending backups. -// Backups returned are ordered by `create_time` in descending -// order, -// starting from the most recent `create_time`. +// List: Lists completed and pending backups. Backups returned are +// ordered by `create_time` in descending order, starting from the most +// recent `create_time`. func (r *ProjectsInstancesBackupsService) List(parent string) *ProjectsInstancesBackupsListCall { c := &ProjectsInstancesBackupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -7675,68 +6569,44 @@ func (r *ProjectsInstancesBackupsService) List(parent string) *ProjectsInstances } // Filter sets the optional parameter "filter": An expression that -// filters the list of returned backups. -// -// A filter expression consists of a field name, a comparison operator, -// and a -// value for filtering. -// The value must be a string, a number, or a boolean. The comparison -// operator -// must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. +// filters the list of returned backups. A filter expression consists of +// a field name, a comparison operator, and a value for filtering. The +// value must be a string, a number, or a boolean. The comparison +// operator must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. // Colon `:` is the contains operator. Filter rules are not case -// sensitive. -// -// The following fields in the Backup are eligible for filtering: -// -// * `name` -// * `database` -// * `state` -// * `create_time` (and values are of the format -// YYYY-MM-DDTHH:MM:SSZ) -// * `expire_time` (and values are of the format -// YYYY-MM-DDTHH:MM:SSZ) -// * `size_bytes` -// -// You can combine multiple expressions by enclosing each expression -// in -// parentheses. By default, expressions are combined with AND logic, -// but -// you can specify AND, OR, and NOT logic explicitly. -// -// Here are a few examples: -// -// * `name:Howl` - The backup's name contains the string "howl". -// * `database:prod` -// - The database's name contains the string "prod". -// * `state:CREATING` - The backup is pending creation. -// * `state:READY` - The backup is fully created and ready for use. -// * `(name:howl) AND (create_time < \"2018-03-28T14:50:00Z\")` -// - The backup name contains the string "howl" and -// `create_time` -// of the backup is before 2018-03-28T14:50:00Z. -// * `expire_time < \"2018-03-28T14:50:00Z\" -// - The backup `expire_time` is before 2018-03-28T14:50:00Z. -// * `size_bytes > 10000000000` - The backup's size is greater than -// 10GB +// sensitive. The following fields in the Backup are eligible for +// filtering: * `name` * `database` * `state` * `create_time` (and +// values are of the format YYYY-MM-DDTHH:MM:SSZ) * `expire_time` (and +// values are of the format YYYY-MM-DDTHH:MM:SSZ) * `size_bytes` You can +// combine multiple expressions by enclosing each expression in +// parentheses. By default, expressions are combined with AND logic, but +// you can specify AND, OR, and NOT logic explicitly. Here are a few +// examples: * `name:Howl` - The backup's name contains the string +// "howl". * `database:prod` - The database's name contains the string +// "prod". * `state:CREATING` - The backup is pending creation. * +// `state:READY` - The backup is fully created and ready for use. * +// `(name:howl) AND (create_time < \"2018-03-28T14:50:00Z\")` - The +// backup name contains the string "howl" and `create_time` of the +// backup is before 2018-03-28T14:50:00Z. * `expire_time < +// \"2018-03-28T14:50:00Z\" - The backup `expire_time` is before +// 2018-03-28T14:50:00Z. * `size_bytes > 10000000000` - The backup's +// size is greater than 10GB func (c *ProjectsInstancesBackupsListCall) Filter(filter string) *ProjectsInstancesBackupsListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": Number of backups to -// be returned in the response. If 0 or -// less, defaults to the server's maximum allowed page size. +// be returned in the response. If 0 or less, defaults to the server's +// maximum allowed page size. func (c *ProjectsInstancesBackupsListCall) PageSize(pageSize int64) *ProjectsInstancesBackupsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": If non-empty, -// `page_token` should contain a -// next_page_token from a -// previous ListBackupsResponse to the same `parent` and with the -// same -// `filter`. +// `page_token` should contain a next_page_token from a previous +// ListBackupsResponse to the same `parent` and with the same `filter`. func (c *ProjectsInstancesBackupsListCall) PageToken(pageToken string) *ProjectsInstancesBackupsListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -7779,7 +6649,7 @@ func (c *ProjectsInstancesBackupsListCall) Header() http.Header { func (c *ProjectsInstancesBackupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7841,7 +6711,7 @@ func (c *ProjectsInstancesBackupsListCall) Do(opts ...googleapi.CallOption) (*Li } return ret, nil // { - // "description": "Lists completed and pending backups.\nBackups returned are ordered by `create_time` in descending order,\nstarting from the most recent `create_time`.", + // "description": "Lists completed and pending backups. Backups returned are ordered by `create_time` in descending order, starting from the most recent `create_time`.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/backups", // "httpMethod": "GET", // "id": "spanner.projects.instances.backups.list", @@ -7850,23 +6720,23 @@ func (c *ProjectsInstancesBackupsListCall) Do(opts ...googleapi.CallOption) (*Li // ], // "parameters": { // "filter": { - // "description": "An expression that filters the list of returned backups.\n\nA filter expression consists of a field name, a comparison operator, and a\nvalue for filtering.\nThe value must be a string, a number, or a boolean. The comparison operator\nmust be one of: `\u003c`, `\u003e`, `\u003c=`, `\u003e=`, `!=`, `=`, or `:`.\nColon `:` is the contains operator. Filter rules are not case sensitive.\n\nThe following fields in the Backup are eligible for filtering:\n\n * `name`\n * `database`\n * `state`\n * `create_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)\n * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)\n * `size_bytes`\n\nYou can combine multiple expressions by enclosing each expression in\nparentheses. By default, expressions are combined with AND logic, but\nyou can specify AND, OR, and NOT logic explicitly.\n\nHere are a few examples:\n\n * `name:Howl` - The backup's name contains the string \"howl\".\n * `database:prod`\n - The database's name contains the string \"prod\".\n * `state:CREATING` - The backup is pending creation.\n * `state:READY` - The backup is fully created and ready for use.\n * `(name:howl) AND (create_time \u003c \\\"2018-03-28T14:50:00Z\\\")`\n - The backup name contains the string \"howl\" and `create_time`\n of the backup is before 2018-03-28T14:50:00Z.\n * `expire_time \u003c \\\"2018-03-28T14:50:00Z\\\"`\n - The backup `expire_time` is before 2018-03-28T14:50:00Z.\n * `size_bytes \u003e 10000000000` - The backup's size is greater than 10GB", + // "description": "An expression that filters the list of returned backups. A filter expression consists of a field name, a comparison operator, and a value for filtering. The value must be a string, a number, or a boolean. The comparison operator must be one of: `\u003c`, `\u003e`, `\u003c=`, `\u003e=`, `!=`, `=`, or `:`. Colon `:` is the contains operator. Filter rules are not case sensitive. The following fields in the Backup are eligible for filtering: * `name` * `database` * `state` * `create_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * `size_bytes` You can combine multiple expressions by enclosing each expression in parentheses. By default, expressions are combined with AND logic, but you can specify AND, OR, and NOT logic explicitly. Here are a few examples: * `name:Howl` - The backup's name contains the string \"howl\". * `database:prod` - The database's name contains the string \"prod\". * `state:CREATING` - The backup is pending creation. * `state:READY` - The backup is fully created and ready for use. * `(name:howl) AND (create_time \u003c \\\"2018-03-28T14:50:00Z\\\")` - The backup name contains the string \"howl\" and `create_time` of the backup is before 2018-03-28T14:50:00Z. * `expire_time \u003c \\\"2018-03-28T14:50:00Z\\\"` - The backup `expire_time` is before 2018-03-28T14:50:00Z. * `size_bytes \u003e 10000000000` - The backup's size is greater than 10GB", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "Number of backups to be returned in the response. If 0 or\nless, defaults to the server's maximum allowed page size.", + // "description": "Number of backups to be returned in the response. If 0 or less, defaults to the server's maximum allowed page size.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "If non-empty, `page_token` should contain a\nnext_page_token from a\nprevious ListBackupsResponse to the same `parent` and with the same\n`filter`.", + // "description": "If non-empty, `page_token` should contain a next_page_token from a previous ListBackupsResponse to the same `parent` and with the same `filter`.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The instance to list backups from. Values are of the\nform `projects/\u003cproject\u003e/instances/\u003cinstance\u003e`.", + // "description": "Required. The instance to list backups from. Values are of the form `projects//instances/`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, @@ -7926,14 +6796,11 @@ func (r *ProjectsInstancesBackupsService) Patch(nameid string, backup *Backup) * } // UpdateMask sets the optional parameter "updateMask": Required. A mask -// specifying which fields (e.g. `expire_time`) in the -// Backup resource should be updated. This mask is relative to the -// Backup -// resource, not to the request message. The field mask must always -// be -// specified; this prevents any future fields from being erased -// accidentally -// by clients that do not know about them. +// specifying which fields (e.g. `expire_time`) in the Backup resource +// should be updated. This mask is relative to the Backup resource, not +// to the request message. The field mask must always be specified; this +// prevents any future fields from being erased accidentally by clients +// that do not know about them. func (c *ProjectsInstancesBackupsPatchCall) UpdateMask(updateMask string) *ProjectsInstancesBackupsPatchCall { c.urlParams_.Set("updateMask", updateMask) return c @@ -7966,7 +6833,7 @@ func (c *ProjectsInstancesBackupsPatchCall) Header() http.Header { func (c *ProjectsInstancesBackupsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8039,14 +6906,14 @@ func (c *ProjectsInstancesBackupsPatchCall) Do(opts ...googleapi.CallOption) (*B // ], // "parameters": { // "name": { - // "description": "Output only for the CreateBackup operation.\nRequired for the UpdateBackup operation.\n\nA globally unique identifier for the backup which cannot be\nchanged. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/backups/a-z*[a-z0-9]`\nThe final segment of the name must be between 2 and 60 characters\nin length.\n\nThe backup is stored in the location(s) specified in the instance\nconfiguration of the instance containing the backup, identified\nby the prefix of the backup name of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e`.", + // "description": "Output only for the CreateBackup operation. Required for the UpdateBackup operation. A globally unique identifier for the backup which cannot be changed. Values are of the form `projects//instances//backups/a-z*[a-z0-9]` The final segment of the name must be between 2 and 60 characters in length. The backup is stored in the location(s) specified in the instance configuration of the instance containing the backup, identified by the prefix of the backup name of the form `projects//instances/`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/backups/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { - // "description": "Required. A mask specifying which fields (e.g. `expire_time`) in the\nBackup resource should be updated. This mask is relative to the Backup\nresource, not to the request message. The field mask must always be\nspecified; this prevents any future fields from being erased accidentally\nby clients that do not know about them.", + // "description": "Required. A mask specifying which fields (e.g. `expire_time`) in the Backup resource should be updated. This mask is relative to the Backup resource, not to the request message. The field mask must always be specified; this prevents any future fields from being erased accidentally by clients that do not know about them.", // "format": "google-fieldmask", // "location": "query", // "type": "string" @@ -8079,14 +6946,10 @@ type ProjectsInstancesBackupsSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on a database or backup +// resource. Replaces any existing policy. Authorization requires +// `spanner.databases.setIamPolicy` permission on resource. For backups, +// authorization requires `spanner.backups.setIamPolicy` permission on // resource. -// Replaces any existing policy. -// -// Authorization requires `spanner.databases.setIamPolicy` -// permission on resource. -// For backups, authorization requires -// `spanner.backups.setIamPolicy` -// permission on resource. func (r *ProjectsInstancesBackupsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsInstancesBackupsSetIamPolicyCall { c := &ProjectsInstancesBackupsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -8121,7 +6984,7 @@ func (c *ProjectsInstancesBackupsSetIamPolicyCall) Header() http.Header { func (c *ProjectsInstancesBackupsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8185,7 +7048,7 @@ func (c *ProjectsInstancesBackupsSetIamPolicyCall) Do(opts ...googleapi.CallOpti } return ret, nil // { - // "description": "Sets the access control policy on a database or backup resource.\nReplaces any existing policy.\n\nAuthorization requires `spanner.databases.setIamPolicy`\npermission on resource.\nFor backups, authorization requires `spanner.backups.setIamPolicy`\npermission on resource.", + // "description": "Sets the access control policy on a database or backup resource. Replaces any existing policy. Authorization requires `spanner.databases.setIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.setIamPolicy` permission on resource.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/backups/{backupsId}:setIamPolicy", // "httpMethod": "POST", // "id": "spanner.projects.instances.backups.setIamPolicy", @@ -8194,7 +7057,7 @@ func (c *ProjectsInstancesBackupsSetIamPolicyCall) Do(opts ...googleapi.CallOpti // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The Cloud Spanner resource for which the policy is being set. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for databases resources.", + // "description": "REQUIRED: The Cloud Spanner resource for which the policy is being set. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for databases resources.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/backups/[^/]+$", // "required": true, @@ -8228,18 +7091,13 @@ type ProjectsInstancesBackupsTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that the caller has on the -// specified database or backup -// resource. -// -// Attempting this RPC on a non-existent Cloud Spanner database -// will -// result in a NOT_FOUND error if the user has -// `spanner.databases.list` permission on the containing Cloud -// Spanner instance. Otherwise returns an empty set of -// permissions. -// Calling this method on a backup that does not exist will -// result in a NOT_FOUND error if the user has -// `spanner.backups.list` permission on the containing instance. +// specified database or backup resource. Attempting this RPC on a +// non-existent Cloud Spanner database will result in a NOT_FOUND error +// if the user has `spanner.databases.list` permission on the containing +// Cloud Spanner instance. Otherwise returns an empty set of +// permissions. Calling this method on a backup that does not exist will +// result in a NOT_FOUND error if the user has `spanner.backups.list` +// permission on the containing instance. func (r *ProjectsInstancesBackupsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsInstancesBackupsTestIamPermissionsCall { c := &ProjectsInstancesBackupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -8274,7 +7132,7 @@ func (c *ProjectsInstancesBackupsTestIamPermissionsCall) Header() http.Header { func (c *ProjectsInstancesBackupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8338,7 +7196,7 @@ func (c *ProjectsInstancesBackupsTestIamPermissionsCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Returns permissions that the caller has on the specified database or backup\nresource.\n\nAttempting this RPC on a non-existent Cloud Spanner database will\nresult in a NOT_FOUND error if the user has\n`spanner.databases.list` permission on the containing Cloud\nSpanner instance. Otherwise returns an empty set of permissions.\nCalling this method on a backup that does not exist will\nresult in a NOT_FOUND error if the user has\n`spanner.backups.list` permission on the containing instance.", + // "description": "Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/backups/{backupsId}:testIamPermissions", // "httpMethod": "POST", // "id": "spanner.projects.instances.backups.testIamPermissions", @@ -8347,7 +7205,7 @@ func (c *ProjectsInstancesBackupsTestIamPermissionsCall) Do(opts ...googleapi.Ca // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The Cloud Spanner resource for which permissions are being tested. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for database resources.", + // "description": "REQUIRED: The Cloud Spanner resource for which permissions are being tested. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for database resources.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/backups/[^/]+$", // "required": true, @@ -8380,23 +7238,15 @@ type ProjectsInstancesBackupsOperationsCancelCall struct { } // Cancel: Starts asynchronous cancellation on a long-running operation. -// The server -// makes a best effort to cancel the operation, but success is -// not -// guaranteed. If the server doesn't support this method, it -// returns -// `google.rpc.Code.UNIMPLEMENTED`. Clients can -// use -// Operations.GetOperation or -// other methods to check whether the cancellation succeeded or whether -// the -// operation completed despite cancellation. On successful -// cancellation, -// the operation is not deleted; instead, it becomes an operation -// with -// an Operation.error value with a google.rpc.Status.code of -// 1, -// corresponding to `Code.CANCELLED`. +// The server makes a best effort to cancel the operation, but success +// is not guaranteed. If the server doesn't support this method, it +// returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use +// Operations.GetOperation or other methods to check whether the +// cancellation succeeded or whether the operation completed despite +// cancellation. On successful cancellation, the operation is not +// deleted; instead, it becomes an operation with an Operation.error +// value with a google.rpc.Status.code of 1, corresponding to +// `Code.CANCELLED`. func (r *ProjectsInstancesBackupsOperationsService) Cancel(name string) *ProjectsInstancesBackupsOperationsCancelCall { c := &ProjectsInstancesBackupsOperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -8430,7 +7280,7 @@ func (c *ProjectsInstancesBackupsOperationsCancelCall) Header() http.Header { func (c *ProjectsInstancesBackupsOperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8489,7 +7339,7 @@ func (c *ProjectsInstancesBackupsOperationsCancelCall) Do(opts ...googleapi.Call } return ret, nil // { - // "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + // "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/backups/{backupsId}/operations/{operationsId}:cancel", // "httpMethod": "POST", // "id": "spanner.projects.instances.backups.operations.cancel", @@ -8528,12 +7378,9 @@ type ProjectsInstancesBackupsOperationsDeleteCall struct { } // Delete: Deletes a long-running operation. This method indicates that -// the client is -// no longer interested in the operation result. It does not cancel -// the -// operation. If the server doesn't support this method, it -// returns -// `google.rpc.Code.UNIMPLEMENTED`. +// the client is no longer interested in the operation result. It does +// not cancel the operation. If the server doesn't support this method, +// it returns `google.rpc.Code.UNIMPLEMENTED`. func (r *ProjectsInstancesBackupsOperationsService) Delete(name string) *ProjectsInstancesBackupsOperationsDeleteCall { c := &ProjectsInstancesBackupsOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -8567,7 +7414,7 @@ func (c *ProjectsInstancesBackupsOperationsDeleteCall) Header() http.Header { func (c *ProjectsInstancesBackupsOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8626,7 +7473,7 @@ func (c *ProjectsInstancesBackupsOperationsDeleteCall) Do(opts ...googleapi.Call } return ret, nil // { - // "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.", + // "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/backups/{backupsId}/operations/{operationsId}", // "httpMethod": "DELETE", // "id": "spanner.projects.instances.backups.operations.delete", @@ -8665,11 +7512,9 @@ type ProjectsInstancesBackupsOperationsGetCall struct { header_ http.Header } -// Get: Gets the latest state of a long-running operation. Clients can -// use this -// method to poll the operation result at intervals as recommended by -// the API -// service. +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. func (r *ProjectsInstancesBackupsOperationsService) Get(name string) *ProjectsInstancesBackupsOperationsGetCall { c := &ProjectsInstancesBackupsOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -8713,7 +7558,7 @@ func (c *ProjectsInstancesBackupsOperationsGetCall) Header() http.Header { func (c *ProjectsInstancesBackupsOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8775,7 +7620,7 @@ func (c *ProjectsInstancesBackupsOperationsGetCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/backups/{backupsId}/operations/{operationsId}", // "httpMethod": "GET", // "id": "spanner.projects.instances.backups.operations.get", @@ -8815,22 +7660,15 @@ type ProjectsInstancesBackupsOperationsListCall struct { } // List: Lists operations that match the specified filter in the -// request. If the -// server doesn't support this method, it returns -// `UNIMPLEMENTED`. -// -// NOTE: the `name` binding allows API services to override the -// binding -// to use different resource name schemes, such as `users/*/operations`. -// To -// override the binding, API services can add a binding such -// as -// "/v1/{name=users/*}/operations" to their service configuration. -// For backwards compatibility, the default name includes the -// operations -// collection id, however overriding users must ensure the name -// binding -// is the parent resource, without the operations collection id. +// request. If the server doesn't support this method, it returns +// `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to +// override the binding to use different resource name schemes, such as +// `users/*/operations`. To override the binding, API services can add a +// binding such as "/v1/{name=users/*}/operations" to their service +// configuration. For backwards compatibility, the default name includes +// the operations collection id, however overriding users must ensure +// the name binding is the parent resource, without the operations +// collection id. func (r *ProjectsInstancesBackupsOperationsService) List(name string) *ProjectsInstancesBackupsOperationsListCall { c := &ProjectsInstancesBackupsOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -8895,7 +7733,7 @@ func (c *ProjectsInstancesBackupsOperationsListCall) Header() http.Header { func (c *ProjectsInstancesBackupsOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8957,7 +7795,7 @@ func (c *ProjectsInstancesBackupsOperationsListCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", + // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/backups/{backupsId}/operations", // "httpMethod": "GET", // "id": "spanner.projects.instances.backups.operations.list", @@ -9033,18 +7871,12 @@ type ProjectsInstancesDatabaseOperationsListCall struct { header_ http.Header } -// List: Lists database longrunning-operations. -// A database operation has a name of the -// form -// `projects//instances//databases//ope -// rations/`. -// The long-running operation -// metadata field type -// `metadata.type_url` describes the type of the metadata. Operations -// returned -// include those that have completed/failed/canceled within the last 7 -// days, -// and pending operations. +// List: Lists database longrunning-operations. A database operation has +// a name of the form `projects//instances//databases//operations/`. The +// long-running operation metadata field type `metadata.type_url` +// describes the type of the metadata. Operations returned include those +// that have completed/failed/canceled within the last 7 days, and +// pending operations. func (r *ProjectsInstancesDatabaseOperationsService) List(parent string) *ProjectsInstancesDatabaseOperationsListCall { c := &ProjectsInstancesDatabaseOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -9052,74 +7884,51 @@ func (r *ProjectsInstancesDatabaseOperationsService) List(parent string) *Projec } // Filter sets the optional parameter "filter": An expression that -// filters the list of returned operations. -// -// A filter expression consists of a field name, a -// comparison operator, and a value for filtering. +// filters the list of returned operations. A filter expression consists +// of a field name, a comparison operator, and a value for filtering. // The value must be a string, a number, or a boolean. The comparison -// operator -// must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. +// operator must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. // Colon `:` is the contains operator. Filter rules are not case -// sensitive. -// -// The following fields in the Operation -// are eligible for filtering: -// -// * `name` - The name of the long-running operation -// * `done` - False if the operation is in progress, else true. -// * `metadata.@type` - the type of metadata. For example, the type -// string -// for RestoreDatabaseMetadata is -// +// sensitive. The following fields in the Operation are eligible for +// filtering: * `name` - The name of the long-running operation * `done` +// - False if the operation is in progress, else true. * +// `metadata.@type` - the type of metadata. For example, the type string +// for RestoreDatabaseMetadata is // `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseM -// etadata`. -// * `metadata.` - any field in metadata.value. -// * `error` - Error associated with the long-running operation. -// * `response.@type` - the type of response. -// * `response.` - any field in response.value. -// -// You can combine multiple expressions by enclosing each expression -// in +// etadata`. * `metadata.` - any field in metadata.value. * `error` - +// Error associated with the long-running operation. * `response.@type` +// - the type of response. * `response.` - any field in response.value. +// You can combine multiple expressions by enclosing each expression in // parentheses. By default, expressions are combined with AND logic. -// However, -// you can specify AND, OR, and NOT logic explicitly. -// -// Here are a few examples: -// -// * `done:true` - The operation is complete. -// * +// However, you can specify AND, OR, and NOT logic explicitly. Here are +// a few examples: * `done:true` - The operation is complete. * // `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1. -// RestoreDatabaseMetadata) AND`
-// `(metadata.source_type:BACKUP) AND`
-// `(metadata.backup_info.backup:backup_howl) AND`
-// `(metadata.name:restored_howl) AND`
-// `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` -//
-// `(error:*)` - Return operations where: -// * The operation's metadata type is RestoreDatabaseMetadata. -// * The database is restored from a backup. -// * The backup name contains "backup_howl". -// * The restored database's name contains "restored_howl". -// * The operation started before 2018-03-28T14:50:00Z. -// * The operation resulted in an error. +// RestoreDatabaseMetadata) AND` \ `(metadata.source_type:BACKUP) AND` \ +// `(metadata.backup_info.backup:backup_howl) AND` \ +// `(metadata.name:restored_howl) AND` \ `(metadata.progress.start_time +// < \"2018-03-28T14:50:00Z\") AND` \ `(error:*)` - Return operations +// where: * The operation's metadata type is RestoreDatabaseMetadata. * +// The database is restored from a backup. * The backup name contains +// "backup_howl". * The restored database's name contains +// "restored_howl". * The operation started before 2018-03-28T14:50:00Z. +// * The operation resulted in an error. func (c *ProjectsInstancesDatabaseOperationsListCall) Filter(filter string) *ProjectsInstancesDatabaseOperationsListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": Number of operations -// to be returned in the response. If 0 or -// less, defaults to the server's maximum allowed page size. +// to be returned in the response. If 0 or less, defaults to the +// server's maximum allowed page size. func (c *ProjectsInstancesDatabaseOperationsListCall) PageSize(pageSize int64) *ProjectsInstancesDatabaseOperationsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": If non-empty, -// `page_token` should contain a -// next_page_token -// from a previous ListDatabaseOperationsResponse to the -// same `parent` and with the same `filter`. +// `page_token` should contain a next_page_token from a previous +// ListDatabaseOperationsResponse to the same `parent` and with the same +// `filter`. func (c *ProjectsInstancesDatabaseOperationsListCall) PageToken(pageToken string) *ProjectsInstancesDatabaseOperationsListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -9162,7 +7971,7 @@ func (c *ProjectsInstancesDatabaseOperationsListCall) Header() http.Header { func (c *ProjectsInstancesDatabaseOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9224,7 +8033,7 @@ func (c *ProjectsInstancesDatabaseOperationsListCall) Do(opts ...googleapi.CallO } return ret, nil // { - // "description": "Lists database longrunning-operations.\nA database operation has a name of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/databases/\u003cdatabase\u003e/operations/\u003coperation\u003e`.\nThe long-running operation\nmetadata field type\n`metadata.type_url` describes the type of the metadata. Operations returned\ninclude those that have completed/failed/canceled within the last 7 days,\nand pending operations.", + // "description": "Lists database longrunning-operations. A database operation has a name of the form `projects//instances//databases//operations/`. The long-running operation metadata field type `metadata.type_url` describes the type of the metadata. Operations returned include those that have completed/failed/canceled within the last 7 days, and pending operations.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databaseOperations", // "httpMethod": "GET", // "id": "spanner.projects.instances.databaseOperations.list", @@ -9233,23 +8042,23 @@ func (c *ProjectsInstancesDatabaseOperationsListCall) Do(opts ...googleapi.CallO // ], // "parameters": { // "filter": { - // "description": "An expression that filters the list of returned operations.\n\nA filter expression consists of a field name, a\ncomparison operator, and a value for filtering.\nThe value must be a string, a number, or a boolean. The comparison operator\nmust be one of: `\u003c`, `\u003e`, `\u003c=`, `\u003e=`, `!=`, `=`, or `:`.\nColon `:` is the contains operator. Filter rules are not case sensitive.\n\nThe following fields in the Operation\nare eligible for filtering:\n\n * `name` - The name of the long-running operation\n * `done` - False if the operation is in progress, else true.\n * `metadata.@type` - the type of metadata. For example, the type string\n for RestoreDatabaseMetadata is\n `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`.\n * `metadata.\u003cfield_name\u003e` - any field in metadata.value.\n * `error` - Error associated with the long-running operation.\n * `response.@type` - the type of response.\n * `response.\u003cfield_name\u003e` - any field in response.value.\n\nYou can combine multiple expressions by enclosing each expression in\nparentheses. By default, expressions are combined with AND logic. However,\nyou can specify AND, OR, and NOT logic explicitly.\n\nHere are a few examples:\n\n * `done:true` - The operation is complete.\n * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND` \u003cbr/\u003e\n `(metadata.source_type:BACKUP) AND` \u003cbr/\u003e\n `(metadata.backup_info.backup:backup_howl) AND` \u003cbr/\u003e\n `(metadata.name:restored_howl) AND` \u003cbr/\u003e\n `(metadata.progress.start_time \u003c \\\"2018-03-28T14:50:00Z\\\") AND` \u003cbr/\u003e\n `(error:*)` - Return operations where:\n * The operation's metadata type is RestoreDatabaseMetadata.\n * The database is restored from a backup.\n * The backup name contains \"backup_howl\".\n * The restored database's name contains \"restored_howl\".\n * The operation started before 2018-03-28T14:50:00Z.\n * The operation resulted in an error.", + // "description": "An expression that filters the list of returned operations. A filter expression consists of a field name, a comparison operator, and a value for filtering. The value must be a string, a number, or a boolean. The comparison operator must be one of: `\u003c`, `\u003e`, `\u003c=`, `\u003e=`, `!=`, `=`, or `:`. Colon `:` is the contains operator. Filter rules are not case sensitive. The following fields in the Operation are eligible for filtering: * `name` - The name of the long-running operation * `done` - False if the operation is in progress, else true. * `metadata.@type` - the type of metadata. For example, the type string for RestoreDatabaseMetadata is `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`. * `metadata.` - any field in metadata.value. * `error` - Error associated with the long-running operation. * `response.@type` - the type of response. * `response.` - any field in response.value. You can combine multiple expressions by enclosing each expression in parentheses. By default, expressions are combined with AND logic. However, you can specify AND, OR, and NOT logic explicitly. Here are a few examples: * `done:true` - The operation is complete. * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND` \\ `(metadata.source_type:BACKUP) AND` \\ `(metadata.backup_info.backup:backup_howl) AND` \\ `(metadata.name:restored_howl) AND` \\ `(metadata.progress.start_time \u003c \\\"2018-03-28T14:50:00Z\\\") AND` \\ `(error:*)` - Return operations where: * The operation's metadata type is RestoreDatabaseMetadata. * The database is restored from a backup. * The backup name contains \"backup_howl\". * The restored database's name contains \"restored_howl\". * The operation started before 2018-03-28T14:50:00Z. * The operation resulted in an error.", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "Number of operations to be returned in the response. If 0 or\nless, defaults to the server's maximum allowed page size.", + // "description": "Number of operations to be returned in the response. If 0 or less, defaults to the server's maximum allowed page size.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "If non-empty, `page_token` should contain a\nnext_page_token\nfrom a previous ListDatabaseOperationsResponse to the\nsame `parent` and with the same `filter`.", + // "description": "If non-empty, `page_token` should contain a next_page_token from a previous ListDatabaseOperationsResponse to the same `parent` and with the same `filter`.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The instance of the database operations.\nValues are of the form `projects/\u003cproject\u003e/instances/\u003cinstance\u003e`.", + // "description": "Required. The instance of the database operations. Values are of the form `projects//instances/`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, @@ -9301,15 +8110,10 @@ type ProjectsInstancesDatabasesCreateCall struct { } // Create: Creates a new Cloud Spanner database and starts to prepare it -// for serving. -// The returned long-running operation will -// have a name of the format `/operations/` -// and -// can be used to track preparation of the database. The -// metadata field type is -// CreateDatabaseMetadata. The -// response field type is -// Database, if successful. +// for serving. The returned long-running operation will have a name of +// the format `/operations/` and can be used to track preparation of the +// database. The metadata field type is CreateDatabaseMetadata. The +// response field type is Database, if successful. func (r *ProjectsInstancesDatabasesService) Create(parent string, createdatabaserequest *CreateDatabaseRequest) *ProjectsInstancesDatabasesCreateCall { c := &ProjectsInstancesDatabasesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -9344,7 +8148,7 @@ func (c *ProjectsInstancesDatabasesCreateCall) Header() http.Header { func (c *ProjectsInstancesDatabasesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9408,7 +8212,7 @@ func (c *ProjectsInstancesDatabasesCreateCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Creates a new Cloud Spanner database and starts to prepare it for serving.\nThe returned long-running operation will\nhave a name of the format `\u003cdatabase_name\u003e/operations/\u003coperation_id\u003e` and\ncan be used to track preparation of the database. The\nmetadata field type is\nCreateDatabaseMetadata. The\nresponse field type is\nDatabase, if successful.", + // "description": "Creates a new Cloud Spanner database and starts to prepare it for serving. The returned long-running operation will have a name of the format `/operations/` and can be used to track preparation of the database. The metadata field type is CreateDatabaseMetadata. The response field type is Database, if successful.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases", // "httpMethod": "POST", // "id": "spanner.projects.instances.databases.create", @@ -9417,7 +8221,7 @@ func (c *ProjectsInstancesDatabasesCreateCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "parent": { - // "description": "Required. The name of the instance that will serve the new database.\nValues are of the form `projects/\u003cproject\u003e/instances/\u003cinstance\u003e`.", + // "description": "Required. The name of the instance that will serve the new database. Values are of the form `projects//instances/`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, @@ -9449,9 +8253,8 @@ type ProjectsInstancesDatabasesDropDatabaseCall struct { header_ http.Header } -// DropDatabase: Drops (aka deletes) a Cloud Spanner database. -// Completed backups for the database will be retained according to -// their +// DropDatabase: Drops (aka deletes) a Cloud Spanner database. Completed +// backups for the database will be retained according to their // `expire_time`. func (r *ProjectsInstancesDatabasesService) DropDatabase(database string) *ProjectsInstancesDatabasesDropDatabaseCall { c := &ProjectsInstancesDatabasesDropDatabaseCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -9486,7 +8289,7 @@ func (c *ProjectsInstancesDatabasesDropDatabaseCall) Header() http.Header { func (c *ProjectsInstancesDatabasesDropDatabaseCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9545,7 +8348,7 @@ func (c *ProjectsInstancesDatabasesDropDatabaseCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Drops (aka deletes) a Cloud Spanner database.\nCompleted backups for the database will be retained according to their\n`expire_time`.", + // "description": "Drops (aka deletes) a Cloud Spanner database. Completed backups for the database will be retained according to their `expire_time`.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}", // "httpMethod": "DELETE", // "id": "spanner.projects.instances.databases.dropDatabase", @@ -9628,7 +8431,7 @@ func (c *ProjectsInstancesDatabasesGetCall) Header() http.Header { func (c *ProjectsInstancesDatabasesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9699,7 +8502,7 @@ func (c *ProjectsInstancesDatabasesGetCall) Do(opts ...googleapi.CallOption) (*D // ], // "parameters": { // "name": { - // "description": "Required. The name of the requested database. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/databases/\u003cdatabase\u003e`.", + // "description": "Required. The name of the requested database. Values are of the form `projects//instances//databases/`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", // "required": true, @@ -9730,10 +8533,8 @@ type ProjectsInstancesDatabasesGetDdlCall struct { } // GetDdl: Returns the schema of a Cloud Spanner database as a list of -// formatted -// DDL statements. This method does not show pending schema updates, -// those may -// be queried using the Operations API. +// formatted DDL statements. This method does not show pending schema +// updates, those may be queried using the Operations API. func (r *ProjectsInstancesDatabasesService) GetDdl(database string) *ProjectsInstancesDatabasesGetDdlCall { c := &ProjectsInstancesDatabasesGetDdlCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.database = database @@ -9777,7 +8578,7 @@ func (c *ProjectsInstancesDatabasesGetDdlCall) Header() http.Header { func (c *ProjectsInstancesDatabasesGetDdlCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9839,7 +8640,7 @@ func (c *ProjectsInstancesDatabasesGetDdlCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Returns the schema of a Cloud Spanner database as a list of formatted\nDDL statements. This method does not show pending schema updates, those may\nbe queried using the Operations API.", + // "description": "Returns the schema of a Cloud Spanner database as a list of formatted DDL statements. This method does not show pending schema updates, those may be queried using the Operations API.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/ddl", // "httpMethod": "GET", // "id": "spanner.projects.instances.databases.getDdl", @@ -9848,7 +8649,7 @@ func (c *ProjectsInstancesDatabasesGetDdlCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "database": { - // "description": "Required. The database whose schema we wish to get.", + // "description": "Required. The database whose schema we wish to get. Values are of the form `projects//instances//databases/`", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", // "required": true, @@ -9879,17 +8680,11 @@ type ProjectsInstancesDatabasesGetIamPolicyCall struct { } // GetIamPolicy: Gets the access control policy for a database or backup +// resource. Returns an empty policy if a database or backup exists but +// does not have a policy set. Authorization requires +// `spanner.databases.getIamPolicy` permission on resource. For backups, +// authorization requires `spanner.backups.getIamPolicy` permission on // resource. -// Returns an empty policy if a database or backup exists but does not -// have a -// policy set. -// -// Authorization requires `spanner.databases.getIamPolicy` permission -// on -// resource. -// For backups, authorization requires -// `spanner.backups.getIamPolicy` -// permission on resource. func (r *ProjectsInstancesDatabasesService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ProjectsInstancesDatabasesGetIamPolicyCall { c := &ProjectsInstancesDatabasesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -9924,7 +8719,7 @@ func (c *ProjectsInstancesDatabasesGetIamPolicyCall) Header() http.Header { func (c *ProjectsInstancesDatabasesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9988,7 +8783,7 @@ func (c *ProjectsInstancesDatabasesGetIamPolicyCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Gets the access control policy for a database or backup resource.\nReturns an empty policy if a database or backup exists but does not have a\npolicy set.\n\nAuthorization requires `spanner.databases.getIamPolicy` permission on\nresource.\nFor backups, authorization requires `spanner.backups.getIamPolicy`\npermission on resource.", + // "description": "Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists but does not have a policy set. Authorization requires `spanner.databases.getIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.getIamPolicy` permission on resource.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}:getIamPolicy", // "httpMethod": "POST", // "id": "spanner.projects.instances.databases.getIamPolicy", @@ -9997,7 +8792,7 @@ func (c *ProjectsInstancesDatabasesGetIamPolicyCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The Cloud Spanner resource for which the policy is being retrieved. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for database resources.", + // "description": "REQUIRED: The Cloud Spanner resource for which the policy is being retrieved. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for database resources.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", // "required": true, @@ -10038,17 +8833,16 @@ func (r *ProjectsInstancesDatabasesService) List(parent string) *ProjectsInstanc } // PageSize sets the optional parameter "pageSize": Number of databases -// to be returned in the response. If 0 or less, -// defaults to the server's maximum allowed page size. +// to be returned in the response. If 0 or less, defaults to the +// server's maximum allowed page size. func (c *ProjectsInstancesDatabasesListCall) PageSize(pageSize int64) *ProjectsInstancesDatabasesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": If non-empty, -// `page_token` should contain a -// next_page_token from a -// previous ListDatabasesResponse. +// `page_token` should contain a next_page_token from a previous +// ListDatabasesResponse. func (c *ProjectsInstancesDatabasesListCall) PageToken(pageToken string) *ProjectsInstancesDatabasesListCall { c.urlParams_.Set("pageToken", pageToken) return c @@ -10091,7 +8885,7 @@ func (c *ProjectsInstancesDatabasesListCall) Header() http.Header { func (c *ProjectsInstancesDatabasesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10162,18 +8956,18 @@ func (c *ProjectsInstancesDatabasesListCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "pageSize": { - // "description": "Number of databases to be returned in the response. If 0 or less,\ndefaults to the server's maximum allowed page size.", + // "description": "Number of databases to be returned in the response. If 0 or less, defaults to the server's maximum allowed page size.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "If non-empty, `page_token` should contain a\nnext_page_token from a\nprevious ListDatabasesResponse.", + // "description": "If non-empty, `page_token` should contain a next_page_token from a previous ListDatabasesResponse.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The instance whose databases should be listed.\nValues are of the form `projects/\u003cproject\u003e/instances/\u003cinstance\u003e`.", + // "description": "Required. The instance whose databases should be listed. Values are of the form `projects//instances/`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, @@ -10225,31 +9019,18 @@ type ProjectsInstancesDatabasesRestoreCall struct { } // Restore: Create a new database by restoring from a completed backup. -// The new -// database must be in the same project and in an instance with the -// same -// instance configuration as the instance containing -// the backup. The returned database long-running -// operation has a name of the -// format -// `projects//instances//databases//o -// perations/`, -// and can be used to track the progress of the operation, and to cancel -// it. -// The metadata field type is -// RestoreDatabaseMetadata. -// The response type -// is Database, if -// successful. Cancelling the returned operation will stop the restore -// and -// delete the database. -// There can be only one database being restored into an instance at a -// time. -// Once the restore operation completes, a new restore operation can -// be -// initiated, without waiting for the optimize operation associated with -// the -// first restore to complete. +// The new database must be in the same project and in an instance with +// the same instance configuration as the instance containing the +// backup. The returned database long-running operation has a name of +// the format `projects//instances//databases//operations/`, and can be +// used to track the progress of the operation, and to cancel it. The +// metadata field type is RestoreDatabaseMetadata. The response type is +// Database, if successful. Cancelling the returned operation will stop +// the restore and delete the database. There can be only one database +// being restored into an instance at a time. Once the restore operation +// completes, a new restore operation can be initiated, without waiting +// for the optimize operation associated with the first restore to +// complete. func (r *ProjectsInstancesDatabasesService) Restore(parent string, restoredatabaserequest *RestoreDatabaseRequest) *ProjectsInstancesDatabasesRestoreCall { c := &ProjectsInstancesDatabasesRestoreCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -10284,7 +9065,7 @@ func (c *ProjectsInstancesDatabasesRestoreCall) Header() http.Header { func (c *ProjectsInstancesDatabasesRestoreCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10348,7 +9129,7 @@ func (c *ProjectsInstancesDatabasesRestoreCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Create a new database by restoring from a completed backup. The new\ndatabase must be in the same project and in an instance with the same\ninstance configuration as the instance containing\nthe backup. The returned database long-running\noperation has a name of the format\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e/databases/\u003cdatabase\u003e/operations/\u003coperation_id\u003e`,\nand can be used to track the progress of the operation, and to cancel it.\nThe metadata field type is\nRestoreDatabaseMetadata.\nThe response type\nis Database, if\nsuccessful. Cancelling the returned operation will stop the restore and\ndelete the database.\nThere can be only one database being restored into an instance at a time.\nOnce the restore operation completes, a new restore operation can be\ninitiated, without waiting for the optimize operation associated with the\nfirst restore to complete.", + // "description": "Create a new database by restoring from a completed backup. The new database must be in the same project and in an instance with the same instance configuration as the instance containing the backup. The returned database long-running operation has a name of the format `projects//instances//databases//operations/`, and can be used to track the progress of the operation, and to cancel it. The metadata field type is RestoreDatabaseMetadata. The response type is Database, if successful. Cancelling the returned operation will stop the restore and delete the database. There can be only one database being restored into an instance at a time. Once the restore operation completes, a new restore operation can be initiated, without waiting for the optimize operation associated with the first restore to complete.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases:restore", // "httpMethod": "POST", // "id": "spanner.projects.instances.databases.restore", @@ -10357,7 +9138,7 @@ func (c *ProjectsInstancesDatabasesRestoreCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "parent": { - // "description": "Required. The name of the instance in which to create the\nrestored database. This instance must be in the same project and\nhave the same instance configuration as the instance containing\nthe source backup. Values are of the form\n`projects/\u003cproject\u003e/instances/\u003cinstance\u003e`.", + // "description": "Required. The name of the instance in which to create the restored database. This instance must be in the same project and have the same instance configuration as the instance containing the source backup. Values are of the form `projects//instances/`.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+$", // "required": true, @@ -10391,14 +9172,10 @@ type ProjectsInstancesDatabasesSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on a database or backup +// resource. Replaces any existing policy. Authorization requires +// `spanner.databases.setIamPolicy` permission on resource. For backups, +// authorization requires `spanner.backups.setIamPolicy` permission on // resource. -// Replaces any existing policy. -// -// Authorization requires `spanner.databases.setIamPolicy` -// permission on resource. -// For backups, authorization requires -// `spanner.backups.setIamPolicy` -// permission on resource. func (r *ProjectsInstancesDatabasesService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsInstancesDatabasesSetIamPolicyCall { c := &ProjectsInstancesDatabasesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -10433,7 +9210,7 @@ func (c *ProjectsInstancesDatabasesSetIamPolicyCall) Header() http.Header { func (c *ProjectsInstancesDatabasesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10497,7 +9274,7 @@ func (c *ProjectsInstancesDatabasesSetIamPolicyCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Sets the access control policy on a database or backup resource.\nReplaces any existing policy.\n\nAuthorization requires `spanner.databases.setIamPolicy`\npermission on resource.\nFor backups, authorization requires `spanner.backups.setIamPolicy`\npermission on resource.", + // "description": "Sets the access control policy on a database or backup resource. Replaces any existing policy. Authorization requires `spanner.databases.setIamPolicy` permission on resource. For backups, authorization requires `spanner.backups.setIamPolicy` permission on resource.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}:setIamPolicy", // "httpMethod": "POST", // "id": "spanner.projects.instances.databases.setIamPolicy", @@ -10506,7 +9283,7 @@ func (c *ProjectsInstancesDatabasesSetIamPolicyCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The Cloud Spanner resource for which the policy is being set. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for databases resources.", + // "description": "REQUIRED: The Cloud Spanner resource for which the policy is being set. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for databases resources.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", // "required": true, @@ -10540,18 +9317,13 @@ type ProjectsInstancesDatabasesTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that the caller has on the -// specified database or backup -// resource. -// -// Attempting this RPC on a non-existent Cloud Spanner database -// will -// result in a NOT_FOUND error if the user has -// `spanner.databases.list` permission on the containing Cloud -// Spanner instance. Otherwise returns an empty set of -// permissions. -// Calling this method on a backup that does not exist will -// result in a NOT_FOUND error if the user has -// `spanner.backups.list` permission on the containing instance. +// specified database or backup resource. Attempting this RPC on a +// non-existent Cloud Spanner database will result in a NOT_FOUND error +// if the user has `spanner.databases.list` permission on the containing +// Cloud Spanner instance. Otherwise returns an empty set of +// permissions. Calling this method on a backup that does not exist will +// result in a NOT_FOUND error if the user has `spanner.backups.list` +// permission on the containing instance. func (r *ProjectsInstancesDatabasesService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsInstancesDatabasesTestIamPermissionsCall { c := &ProjectsInstancesDatabasesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -10586,7 +9358,7 @@ func (c *ProjectsInstancesDatabasesTestIamPermissionsCall) Header() http.Header func (c *ProjectsInstancesDatabasesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10650,7 +9422,7 @@ func (c *ProjectsInstancesDatabasesTestIamPermissionsCall) Do(opts ...googleapi. } return ret, nil // { - // "description": "Returns permissions that the caller has on the specified database or backup\nresource.\n\nAttempting this RPC on a non-existent Cloud Spanner database will\nresult in a NOT_FOUND error if the user has\n`spanner.databases.list` permission on the containing Cloud\nSpanner instance. Otherwise returns an empty set of permissions.\nCalling this method on a backup that does not exist will\nresult in a NOT_FOUND error if the user has\n`spanner.backups.list` permission on the containing instance.", + // "description": "Returns permissions that the caller has on the specified database or backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}:testIamPermissions", // "httpMethod": "POST", // "id": "spanner.projects.instances.databases.testIamPermissions", @@ -10659,7 +9431,7 @@ func (c *ProjectsInstancesDatabasesTestIamPermissionsCall) Do(opts ...googleapi. // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The Cloud Spanner resource for which permissions are being tested. The format is `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e` for instance resources and `projects/\u003cproject ID\u003e/instances/\u003cinstance ID\u003e/databases/\u003cdatabase ID\u003e` for database resources.", + // "description": "REQUIRED: The Cloud Spanner resource for which permissions are being tested. The format is `projects//instances/` for instance resources and `projects//instances//databases/` for database resources.", // "location": "path", // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", // "required": true, @@ -10692,16 +9464,12 @@ type ProjectsInstancesDatabasesUpdateDdlCall struct { header_ http.Header } -// UpdateDdl: Updates the schema of a Cloud Spanner database -// by +// UpdateDdl: Updates the schema of a Cloud Spanner database by // creating/altering/dropping tables, columns, indexes, etc. The -// returned -// long-running operation will have a name of -// the format `/operations/` and can be -// used to -// track execution of the schema change(s). The -// metadata field type is -// UpdateDatabaseDdlMetadata. The operation has no response. +// returned long-running operation will have a name of the format +// `/operations/` and can be used to track execution of the schema +// change(s). The metadata field type is UpdateDatabaseDdlMetadata. The +// operation has no response. func (r *ProjectsInstancesDatabasesService) UpdateDdl(database string, updatedatabaseddlrequest *UpdateDatabaseDdlRequest) *ProjectsInstancesDatabasesUpdateDdlCall { c := &ProjectsInstancesDatabasesUpdateDdlCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.database = database @@ -10736,7 +9504,7 @@ func (c *ProjectsInstancesDatabasesUpdateDdlCall) Header() http.Header { func (c *ProjectsInstancesDatabasesUpdateDdlCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10800,7 +9568,7 @@ func (c *ProjectsInstancesDatabasesUpdateDdlCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Updates the schema of a Cloud Spanner database by\ncreating/altering/dropping tables, columns, indexes, etc. The returned\nlong-running operation will have a name of\nthe format `\u003cdatabase_name\u003e/operations/\u003coperation_id\u003e` and can be used to\ntrack execution of the schema change(s). The\nmetadata field type is\nUpdateDatabaseDdlMetadata. The operation has no response.", + // "description": "Updates the schema of a Cloud Spanner database by creating/altering/dropping tables, columns, indexes, etc. The returned long-running operation will have a name of the format `/operations/` and can be used to track execution of the schema change(s). The metadata field type is UpdateDatabaseDdlMetadata. The operation has no response.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/ddl", // "httpMethod": "PATCH", // "id": "spanner.projects.instances.databases.updateDdl", @@ -10842,23 +9610,15 @@ type ProjectsInstancesDatabasesOperationsCancelCall struct { } // Cancel: Starts asynchronous cancellation on a long-running operation. -// The server -// makes a best effort to cancel the operation, but success is -// not -// guaranteed. If the server doesn't support this method, it -// returns -// `google.rpc.Code.UNIMPLEMENTED`. Clients can -// use -// Operations.GetOperation or -// other methods to check whether the cancellation succeeded or whether -// the -// operation completed despite cancellation. On successful -// cancellation, -// the operation is not deleted; instead, it becomes an operation -// with -// an Operation.error value with a google.rpc.Status.code of -// 1, -// corresponding to `Code.CANCELLED`. +// The server makes a best effort to cancel the operation, but success +// is not guaranteed. If the server doesn't support this method, it +// returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use +// Operations.GetOperation or other methods to check whether the +// cancellation succeeded or whether the operation completed despite +// cancellation. On successful cancellation, the operation is not +// deleted; instead, it becomes an operation with an Operation.error +// value with a google.rpc.Status.code of 1, corresponding to +// `Code.CANCELLED`. func (r *ProjectsInstancesDatabasesOperationsService) Cancel(name string) *ProjectsInstancesDatabasesOperationsCancelCall { c := &ProjectsInstancesDatabasesOperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -10892,7 +9652,7 @@ func (c *ProjectsInstancesDatabasesOperationsCancelCall) Header() http.Header { func (c *ProjectsInstancesDatabasesOperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10951,7 +9711,7 @@ func (c *ProjectsInstancesDatabasesOperationsCancelCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + // "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/operations/{operationsId}:cancel", // "httpMethod": "POST", // "id": "spanner.projects.instances.databases.operations.cancel", @@ -10990,12 +9750,9 @@ type ProjectsInstancesDatabasesOperationsDeleteCall struct { } // Delete: Deletes a long-running operation. This method indicates that -// the client is -// no longer interested in the operation result. It does not cancel -// the -// operation. If the server doesn't support this method, it -// returns -// `google.rpc.Code.UNIMPLEMENTED`. +// the client is no longer interested in the operation result. It does +// not cancel the operation. If the server doesn't support this method, +// it returns `google.rpc.Code.UNIMPLEMENTED`. func (r *ProjectsInstancesDatabasesOperationsService) Delete(name string) *ProjectsInstancesDatabasesOperationsDeleteCall { c := &ProjectsInstancesDatabasesOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -11029,7 +9786,7 @@ func (c *ProjectsInstancesDatabasesOperationsDeleteCall) Header() http.Header { func (c *ProjectsInstancesDatabasesOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11088,7 +9845,7 @@ func (c *ProjectsInstancesDatabasesOperationsDeleteCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.", + // "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/operations/{operationsId}", // "httpMethod": "DELETE", // "id": "spanner.projects.instances.databases.operations.delete", @@ -11127,11 +9884,9 @@ type ProjectsInstancesDatabasesOperationsGetCall struct { header_ http.Header } -// Get: Gets the latest state of a long-running operation. Clients can -// use this -// method to poll the operation result at intervals as recommended by -// the API -// service. +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. func (r *ProjectsInstancesDatabasesOperationsService) Get(name string) *ProjectsInstancesDatabasesOperationsGetCall { c := &ProjectsInstancesDatabasesOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -11175,7 +9930,7 @@ func (c *ProjectsInstancesDatabasesOperationsGetCall) Header() http.Header { func (c *ProjectsInstancesDatabasesOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11237,7 +9992,7 @@ func (c *ProjectsInstancesDatabasesOperationsGetCall) Do(opts ...googleapi.CallO } return ret, nil // { - // "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/operations/{operationsId}", // "httpMethod": "GET", // "id": "spanner.projects.instances.databases.operations.get", @@ -11277,22 +10032,15 @@ type ProjectsInstancesDatabasesOperationsListCall struct { } // List: Lists operations that match the specified filter in the -// request. If the -// server doesn't support this method, it returns -// `UNIMPLEMENTED`. -// -// NOTE: the `name` binding allows API services to override the -// binding -// to use different resource name schemes, such as `users/*/operations`. -// To -// override the binding, API services can add a binding such -// as -// "/v1/{name=users/*}/operations" to their service configuration. -// For backwards compatibility, the default name includes the -// operations -// collection id, however overriding users must ensure the name -// binding -// is the parent resource, without the operations collection id. +// request. If the server doesn't support this method, it returns +// `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to +// override the binding to use different resource name schemes, such as +// `users/*/operations`. To override the binding, API services can add a +// binding such as "/v1/{name=users/*}/operations" to their service +// configuration. For backwards compatibility, the default name includes +// the operations collection id, however overriding users must ensure +// the name binding is the parent resource, without the operations +// collection id. func (r *ProjectsInstancesDatabasesOperationsService) List(name string) *ProjectsInstancesDatabasesOperationsListCall { c := &ProjectsInstancesDatabasesOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -11357,7 +10105,7 @@ func (c *ProjectsInstancesDatabasesOperationsListCall) Header() http.Header { func (c *ProjectsInstancesDatabasesOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11419,7 +10167,7 @@ func (c *ProjectsInstancesDatabasesOperationsListCall) Do(opts ...googleapi.Call } return ret, nil // { - // "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", + // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/operations", // "httpMethod": "GET", // "id": "spanner.projects.instances.databases.operations.list", @@ -11495,12 +10243,9 @@ type ProjectsInstancesDatabasesSessionsBatchCreateCall struct { header_ http.Header } -// BatchCreate: Creates multiple new sessions. -// -// This API can be used to initialize a session cache on the -// clients. -// See https://goo.gl/TgSFN2 for best practices on session cache -// management. +// BatchCreate: Creates multiple new sessions. This API can be used to +// initialize a session cache on the clients. See https://goo.gl/TgSFN2 +// for best practices on session cache management. func (r *ProjectsInstancesDatabasesSessionsService) BatchCreate(database string, batchcreatesessionsrequest *BatchCreateSessionsRequest) *ProjectsInstancesDatabasesSessionsBatchCreateCall { c := &ProjectsInstancesDatabasesSessionsBatchCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.database = database @@ -11535,7 +10280,7 @@ func (c *ProjectsInstancesDatabasesSessionsBatchCreateCall) Header() http.Header func (c *ProjectsInstancesDatabasesSessionsBatchCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11599,7 +10344,7 @@ func (c *ProjectsInstancesDatabasesSessionsBatchCreateCall) Do(opts ...googleapi } return ret, nil // { - // "description": "Creates multiple new sessions.\n\nThis API can be used to initialize a session cache on the clients.\nSee https://goo.gl/TgSFN2 for best practices on session cache management.", + // "description": "Creates multiple new sessions. This API can be used to initialize a session cache on the clients. See https://goo.gl/TgSFN2 for best practices on session cache management.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions:batchCreate", // "httpMethod": "POST", // "id": "spanner.projects.instances.databases.sessions.batchCreate", @@ -11642,9 +10387,7 @@ type ProjectsInstancesDatabasesSessionsBeginTransactionCall struct { } // BeginTransaction: Begins a new transaction. This step can often be -// skipped: -// Read, ExecuteSql and -// Commit can begin a new transaction as a +// skipped: Read, ExecuteSql and Commit can begin a new transaction as a // side-effect. func (r *ProjectsInstancesDatabasesSessionsService) BeginTransaction(session string, begintransactionrequest *BeginTransactionRequest) *ProjectsInstancesDatabasesSessionsBeginTransactionCall { c := &ProjectsInstancesDatabasesSessionsBeginTransactionCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -11680,7 +10423,7 @@ func (c *ProjectsInstancesDatabasesSessionsBeginTransactionCall) Header() http.H func (c *ProjectsInstancesDatabasesSessionsBeginTransactionCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11744,7 +10487,7 @@ func (c *ProjectsInstancesDatabasesSessionsBeginTransactionCall) Do(opts ...goog } return ret, nil // { - // "description": "Begins a new transaction. This step can often be skipped:\nRead, ExecuteSql and\nCommit can begin a new transaction as a\nside-effect.", + // "description": "Begins a new transaction. This step can often be skipped: Read, ExecuteSql and Commit can begin a new transaction as a side-effect.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:beginTransaction", // "httpMethod": "POST", // "id": "spanner.projects.instances.databases.sessions.beginTransaction", @@ -11787,17 +10530,17 @@ type ProjectsInstancesDatabasesSessionsCommitCall struct { } // Commit: Commits a transaction. The request includes the mutations to -// be -// applied to rows in the database. -// -// `Commit` might return an `ABORTED` error. This can occur at any -// time; -// commonly, the cause is conflicts with concurrent -// transactions. However, it can also happen for a variety of -// other -// reasons. If `Commit` returns `ABORTED`, the caller should -// re-attempt -// the transaction from the beginning, re-using the same session. +// be applied to rows in the database. `Commit` might return an +// `ABORTED` error. This can occur at any time; commonly, the cause is +// conflicts with concurrent transactions. However, it can also happen +// for a variety of other reasons. If `Commit` returns `ABORTED`, the +// caller should re-attempt the transaction from the beginning, re-using +// the same session. On very rare occasions, `Commit` might return +// `UNKNOWN`. This can happen, for example, if the client job +// experiences a 1+ hour networking failure. At that point, Cloud +// Spanner has lost track of the transaction outcome and we recommend +// that you perform another read from the database to see the state of +// things as they are now. func (r *ProjectsInstancesDatabasesSessionsService) Commit(session string, commitrequest *CommitRequest) *ProjectsInstancesDatabasesSessionsCommitCall { c := &ProjectsInstancesDatabasesSessionsCommitCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.session = session @@ -11832,7 +10575,7 @@ func (c *ProjectsInstancesDatabasesSessionsCommitCall) Header() http.Header { func (c *ProjectsInstancesDatabasesSessionsCommitCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11896,7 +10639,7 @@ func (c *ProjectsInstancesDatabasesSessionsCommitCall) Do(opts ...googleapi.Call } return ret, nil // { - // "description": "Commits a transaction. The request includes the mutations to be\napplied to rows in the database.\n\n`Commit` might return an `ABORTED` error. This can occur at any time;\ncommonly, the cause is conflicts with concurrent\ntransactions. However, it can also happen for a variety of other\nreasons. If `Commit` returns `ABORTED`, the caller should re-attempt\nthe transaction from the beginning, re-using the same session.", + // "description": "Commits a transaction. The request includes the mutations to be applied to rows in the database. `Commit` might return an `ABORTED` error. This can occur at any time; commonly, the cause is conflicts with concurrent transactions. However, it can also happen for a variety of other reasons. If `Commit` returns `ABORTED`, the caller should re-attempt the transaction from the beginning, re-using the same session. On very rare occasions, `Commit` might return `UNKNOWN`. This can happen, for example, if the client job experiences a 1+ hour networking failure. At that point, Cloud Spanner has lost track of the transaction outcome and we recommend that you perform another read from the database to see the state of things as they are now.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:commit", // "httpMethod": "POST", // "id": "spanner.projects.instances.databases.sessions.commit", @@ -11938,36 +10681,19 @@ type ProjectsInstancesDatabasesSessionsCreateCall struct { header_ http.Header } -// Create: Creates a new session. A session can be used to -// perform +// Create: Creates a new session. A session can be used to perform // transactions that read and/or modify data in a Cloud Spanner -// database. -// Sessions are meant to be reused for many -// consecutive -// transactions. -// -// Sessions can only execute one transaction at a time. To -// execute -// multiple concurrent read-write/write-only transactions, -// create -// multiple sessions. Note that standalone reads and queries use -// a -// transaction internally, and count toward the one -// transaction -// limit. -// +// database. Sessions are meant to be reused for many consecutive +// transactions. Sessions can only execute one transaction at a time. To +// execute multiple concurrent read-write/write-only transactions, +// create multiple sessions. Note that standalone reads and queries use +// a transaction internally, and count toward the one transaction limit. // Active sessions use additional server resources, so it is a good idea -// to -// delete idle and unneeded sessions. -// Aside from explicit deletes, Cloud Spanner may delete sessions for -// which no -// operations are sent for more than an hour. If a session is -// deleted, -// requests to it return `NOT_FOUND`. -// -// Idle sessions can be kept alive by sending a trivial SQL -// query -// periodically, e.g., "SELECT 1". +// to delete idle and unneeded sessions. Aside from explicit deletes, +// Cloud Spanner may delete sessions for which no operations are sent +// for more than an hour. If a session is deleted, requests to it return +// `NOT_FOUND`. Idle sessions can be kept alive by sending a trivial SQL +// query periodically, e.g., "SELECT 1". func (r *ProjectsInstancesDatabasesSessionsService) Create(database string, createsessionrequest *CreateSessionRequest) *ProjectsInstancesDatabasesSessionsCreateCall { c := &ProjectsInstancesDatabasesSessionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.database = database @@ -12002,7 +10728,7 @@ func (c *ProjectsInstancesDatabasesSessionsCreateCall) Header() http.Header { func (c *ProjectsInstancesDatabasesSessionsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12066,7 +10792,7 @@ func (c *ProjectsInstancesDatabasesSessionsCreateCall) Do(opts ...googleapi.Call } return ret, nil // { - // "description": "Creates a new session. A session can be used to perform\ntransactions that read and/or modify data in a Cloud Spanner database.\nSessions are meant to be reused for many consecutive\ntransactions.\n\nSessions can only execute one transaction at a time. To execute\nmultiple concurrent read-write/write-only transactions, create\nmultiple sessions. Note that standalone reads and queries use a\ntransaction internally, and count toward the one transaction\nlimit.\n\nActive sessions use additional server resources, so it is a good idea to\ndelete idle and unneeded sessions.\nAside from explicit deletes, Cloud Spanner may delete sessions for which no\noperations are sent for more than an hour. If a session is deleted,\nrequests to it return `NOT_FOUND`.\n\nIdle sessions can be kept alive by sending a trivial SQL query\nperiodically, e.g., `\"SELECT 1\"`.", + // "description": "Creates a new session. A session can be used to perform transactions that read and/or modify data in a Cloud Spanner database. Sessions are meant to be reused for many consecutive transactions. Sessions can only execute one transaction at a time. To execute multiple concurrent read-write/write-only transactions, create multiple sessions. Note that standalone reads and queries use a transaction internally, and count toward the one transaction limit. Active sessions use additional server resources, so it is a good idea to delete idle and unneeded sessions. Aside from explicit deletes, Cloud Spanner may delete sessions for which no operations are sent for more than an hour. If a session is deleted, requests to it return `NOT_FOUND`. Idle sessions can be kept alive by sending a trivial SQL query periodically, e.g., `\"SELECT 1\"`.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions", // "httpMethod": "POST", // "id": "spanner.projects.instances.databases.sessions.create", @@ -12108,10 +10834,8 @@ type ProjectsInstancesDatabasesSessionsDeleteCall struct { } // Delete: Ends a session, releasing server resources associated with -// it. This will -// asynchronously trigger cancellation of any operations that are -// running with -// this session. +// it. This will asynchronously trigger cancellation of any operations +// that are running with this session. func (r *ProjectsInstancesDatabasesSessionsService) Delete(name string) *ProjectsInstancesDatabasesSessionsDeleteCall { c := &ProjectsInstancesDatabasesSessionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -12145,7 +10869,7 @@ func (c *ProjectsInstancesDatabasesSessionsDeleteCall) Header() http.Header { func (c *ProjectsInstancesDatabasesSessionsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12204,7 +10928,7 @@ func (c *ProjectsInstancesDatabasesSessionsDeleteCall) Do(opts ...googleapi.Call } return ret, nil // { - // "description": "Ends a session, releasing server resources associated with it. This will\nasynchronously trigger cancellation of any operations that are running with\nthis session.", + // "description": "Ends a session, releasing server resources associated with it. This will asynchronously trigger cancellation of any operations that are running with this session.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}", // "httpMethod": "DELETE", // "id": "spanner.projects.instances.databases.sessions.delete", @@ -12244,22 +10968,14 @@ type ProjectsInstancesDatabasesSessionsExecuteBatchDmlCall struct { } // ExecuteBatchDml: Executes a batch of SQL DML statements. This method -// allows many statements -// to be run with lower latency than submitting them sequentially -// with -// ExecuteSql. -// -// Statements are executed in sequential order. A request can succeed -// even if -// a statement fails. The ExecuteBatchDmlResponse.status field in -// the -// response provides information about the statement that failed. -// Clients must -// inspect this field to determine whether an error occurred. -// -// Execution stops after the first failed statement; the remaining -// statements -// are not executed. +// allows many statements to be run with lower latency than submitting +// them sequentially with ExecuteSql. Statements are executed in +// sequential order. A request can succeed even if a statement fails. +// The ExecuteBatchDmlResponse.status field in the response provides +// information about the statement that failed. Clients must inspect +// this field to determine whether an error occurred. Execution stops +// after the first failed statement; the remaining statements are not +// executed. func (r *ProjectsInstancesDatabasesSessionsService) ExecuteBatchDml(session string, executebatchdmlrequest *ExecuteBatchDmlRequest) *ProjectsInstancesDatabasesSessionsExecuteBatchDmlCall { c := &ProjectsInstancesDatabasesSessionsExecuteBatchDmlCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.session = session @@ -12294,7 +11010,7 @@ func (c *ProjectsInstancesDatabasesSessionsExecuteBatchDmlCall) Header() http.He func (c *ProjectsInstancesDatabasesSessionsExecuteBatchDmlCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12358,7 +11074,7 @@ func (c *ProjectsInstancesDatabasesSessionsExecuteBatchDmlCall) Do(opts ...googl } return ret, nil // { - // "description": "Executes a batch of SQL DML statements. This method allows many statements\nto be run with lower latency than submitting them sequentially with\nExecuteSql.\n\nStatements are executed in sequential order. A request can succeed even if\na statement fails. The ExecuteBatchDmlResponse.status field in the\nresponse provides information about the statement that failed. Clients must\ninspect this field to determine whether an error occurred.\n\nExecution stops after the first failed statement; the remaining statements\nare not executed.", + // "description": "Executes a batch of SQL DML statements. This method allows many statements to be run with lower latency than submitting them sequentially with ExecuteSql. Statements are executed in sequential order. A request can succeed even if a statement fails. The ExecuteBatchDmlResponse.status field in the response provides information about the statement that failed. Clients must inspect this field to determine whether an error occurred. Execution stops after the first failed statement; the remaining statements are not executed.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:executeBatchDml", // "httpMethod": "POST", // "id": "spanner.projects.instances.databases.sessions.executeBatchDml", @@ -12401,19 +11117,13 @@ type ProjectsInstancesDatabasesSessionsExecuteSqlCall struct { } // ExecuteSql: Executes an SQL statement, returning all results in a -// single reply. This -// method cannot be used to return a result set larger than 10 MiB; -// if the query yields more data than that, the query fails with -// a `FAILED_PRECONDITION` error. -// -// Operations inside read-write transactions might return `ABORTED`. -// If -// this occurs, the application should restart the transaction from -// the beginning. See Transaction for more details. -// -// Larger result sets can be fetched in streaming fashion by -// calling -// ExecuteStreamingSql instead. +// single reply. This method cannot be used to return a result set +// larger than 10 MiB; if the query yields more data than that, the +// query fails with a `FAILED_PRECONDITION` error. Operations inside +// read-write transactions might return `ABORTED`. If this occurs, the +// application should restart the transaction from the beginning. See +// Transaction for more details. Larger result sets can be fetched in +// streaming fashion by calling ExecuteStreamingSql instead. func (r *ProjectsInstancesDatabasesSessionsService) ExecuteSql(session string, executesqlrequest *ExecuteSqlRequest) *ProjectsInstancesDatabasesSessionsExecuteSqlCall { c := &ProjectsInstancesDatabasesSessionsExecuteSqlCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.session = session @@ -12448,7 +11158,7 @@ func (c *ProjectsInstancesDatabasesSessionsExecuteSqlCall) Header() http.Header func (c *ProjectsInstancesDatabasesSessionsExecuteSqlCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12512,7 +11222,7 @@ func (c *ProjectsInstancesDatabasesSessionsExecuteSqlCall) Do(opts ...googleapi. } return ret, nil // { - // "description": "Executes an SQL statement, returning all results in a single reply. This\nmethod cannot be used to return a result set larger than 10 MiB;\nif the query yields more data than that, the query fails with\na `FAILED_PRECONDITION` error.\n\nOperations inside read-write transactions might return `ABORTED`. If\nthis occurs, the application should restart the transaction from\nthe beginning. See Transaction for more details.\n\nLarger result sets can be fetched in streaming fashion by calling\nExecuteStreamingSql instead.", + // "description": "Executes an SQL statement, returning all results in a single reply. This method cannot be used to return a result set larger than 10 MiB; if the query yields more data than that, the query fails with a `FAILED_PRECONDITION` error. Operations inside read-write transactions might return `ABORTED`. If this occurs, the application should restart the transaction from the beginning. See Transaction for more details. Larger result sets can be fetched in streaming fashion by calling ExecuteStreamingSql instead.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:executeSql", // "httpMethod": "POST", // "id": "spanner.projects.instances.databases.sessions.executeSql", @@ -12554,12 +11264,10 @@ type ProjectsInstancesDatabasesSessionsExecuteStreamingSqlCall struct { header_ http.Header } -// ExecuteStreamingSql: Like ExecuteSql, except returns the result -// set as a stream. Unlike ExecuteSql, there -// is no limit on the size of the returned result set. However, -// no -// individual row in the result set can exceed 100 MiB, and no -// column value can exceed 10 MiB. +// ExecuteStreamingSql: Like ExecuteSql, except returns the result set +// as a stream. Unlike ExecuteSql, there is no limit on the size of the +// returned result set. However, no individual row in the result set can +// exceed 100 MiB, and no column value can exceed 10 MiB. func (r *ProjectsInstancesDatabasesSessionsService) ExecuteStreamingSql(session string, executesqlrequest *ExecuteSqlRequest) *ProjectsInstancesDatabasesSessionsExecuteStreamingSqlCall { c := &ProjectsInstancesDatabasesSessionsExecuteStreamingSqlCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.session = session @@ -12594,7 +11302,7 @@ func (c *ProjectsInstancesDatabasesSessionsExecuteStreamingSqlCall) Header() htt func (c *ProjectsInstancesDatabasesSessionsExecuteStreamingSqlCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12658,7 +11366,7 @@ func (c *ProjectsInstancesDatabasesSessionsExecuteStreamingSqlCall) Do(opts ...g } return ret, nil // { - // "description": "Like ExecuteSql, except returns the result\nset as a stream. Unlike ExecuteSql, there\nis no limit on the size of the returned result set. However, no\nindividual row in the result set can exceed 100 MiB, and no\ncolumn value can exceed 10 MiB.", + // "description": "Like ExecuteSql, except returns the result set as a stream. Unlike ExecuteSql, there is no limit on the size of the returned result set. However, no individual row in the result set can exceed 100 MiB, and no column value can exceed 10 MiB.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:executeStreamingSql", // "httpMethod": "POST", // "id": "spanner.projects.instances.databases.sessions.executeStreamingSql", @@ -12701,10 +11409,8 @@ type ProjectsInstancesDatabasesSessionsGetCall struct { } // Get: Gets a session. Returns `NOT_FOUND` if the session does not -// exist. -// This is mainly useful for determining whether a session is -// still -// alive. +// exist. This is mainly useful for determining whether a session is +// still alive. func (r *ProjectsInstancesDatabasesSessionsService) Get(name string) *ProjectsInstancesDatabasesSessionsGetCall { c := &ProjectsInstancesDatabasesSessionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -12748,7 +11454,7 @@ func (c *ProjectsInstancesDatabasesSessionsGetCall) Header() http.Header { func (c *ProjectsInstancesDatabasesSessionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12810,7 +11516,7 @@ func (c *ProjectsInstancesDatabasesSessionsGetCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Gets a session. Returns `NOT_FOUND` if the session does not exist.\nThis is mainly useful for determining whether a session is still\nalive.", + // "description": "Gets a session. Returns `NOT_FOUND` if the session does not exist. This is mainly useful for determining whether a session is still alive.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}", // "httpMethod": "GET", // "id": "spanner.projects.instances.databases.sessions.get", @@ -12857,33 +11563,27 @@ func (r *ProjectsInstancesDatabasesSessionsService) List(database string) *Proje } // Filter sets the optional parameter "filter": An expression for -// filtering the results of the request. Filter rules are -// case insensitive. The fields eligible for filtering are: -// -// * `labels.key` where key is the name of a label -// -// Some examples of using filters are: -// -// * `labels.env:*` --> The session has the label "env". -// * `labels.env:dev` --> The session has the label "env" and the -// value of -// the label contains the string "dev". +// filtering the results of the request. Filter rules are case +// insensitive. The fields eligible for filtering are: * `labels.key` +// where key is the name of a label Some examples of using filters are: +// * `labels.env:*` --> The session has the label "env". * +// `labels.env:dev` --> The session has the label "env" and the value of +// the label contains the string "dev". func (c *ProjectsInstancesDatabasesSessionsListCall) Filter(filter string) *ProjectsInstancesDatabasesSessionsListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": Number of sessions -// to be returned in the response. If 0 or less, defaults -// to the server's maximum allowed page size. +// to be returned in the response. If 0 or less, defaults to the +// server's maximum allowed page size. func (c *ProjectsInstancesDatabasesSessionsListCall) PageSize(pageSize int64) *ProjectsInstancesDatabasesSessionsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": If non-empty, -// `page_token` should contain a -// next_page_token from a previous +// `page_token` should contain a next_page_token from a previous // ListSessionsResponse. func (c *ProjectsInstancesDatabasesSessionsListCall) PageToken(pageToken string) *ProjectsInstancesDatabasesSessionsListCall { c.urlParams_.Set("pageToken", pageToken) @@ -12927,7 +11627,7 @@ func (c *ProjectsInstancesDatabasesSessionsListCall) Header() http.Header { func (c *ProjectsInstancesDatabasesSessionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13005,18 +11705,18 @@ func (c *ProjectsInstancesDatabasesSessionsListCall) Do(opts ...googleapi.CallOp // "type": "string" // }, // "filter": { - // "description": "An expression for filtering the results of the request. Filter rules are\ncase insensitive. The fields eligible for filtering are:\n\n * `labels.key` where key is the name of a label\n\nSome examples of using filters are:\n\n * `labels.env:*` --\u003e The session has the label \"env\".\n * `labels.env:dev` --\u003e The session has the label \"env\" and the value of\n the label contains the string \"dev\".", + // "description": "An expression for filtering the results of the request. Filter rules are case insensitive. The fields eligible for filtering are: * `labels.key` where key is the name of a label Some examples of using filters are: * `labels.env:*` --\u003e The session has the label \"env\". * `labels.env:dev` --\u003e The session has the label \"env\" and the value of the label contains the string \"dev\".", // "location": "query", // "type": "string" // }, // "pageSize": { - // "description": "Number of sessions to be returned in the response. If 0 or less, defaults\nto the server's maximum allowed page size.", + // "description": "Number of sessions to be returned in the response. If 0 or less, defaults to the server's maximum allowed page size.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "If non-empty, `page_token` should contain a\nnext_page_token from a previous\nListSessionsResponse.", + // "description": "If non-empty, `page_token` should contain a next_page_token from a previous ListSessionsResponse.", // "location": "query", // "type": "string" // } @@ -13066,24 +11766,16 @@ type ProjectsInstancesDatabasesSessionsPartitionQueryCall struct { } // PartitionQuery: Creates a set of partition tokens that can be used to -// execute a query -// operation in parallel. Each of the returned partition tokens can be -// used -// by ExecuteStreamingSql to specify a subset -// of the query result to read. The same session and read-only -// transaction -// must be used by the PartitionQueryRequest used to create -// the -// partition tokens and the ExecuteSqlRequests that use the partition -// tokens. -// -// Partition tokens become invalid when the session used to create -// them +// execute a query operation in parallel. Each of the returned partition +// tokens can be used by ExecuteStreamingSql to specify a subset of the +// query result to read. The same session and read-only transaction must +// be used by the PartitionQueryRequest used to create the partition +// tokens and the ExecuteSqlRequests that use the partition tokens. +// Partition tokens become invalid when the session used to create them // is deleted, is idle for too long, begins a new transaction, or -// becomes too -// old. When any of these happen, it is not possible to resume the -// query, and -// the whole operation must be restarted from the beginning. +// becomes too old. When any of these happen, it is not possible to +// resume the query, and the whole operation must be restarted from the +// beginning. func (r *ProjectsInstancesDatabasesSessionsService) PartitionQuery(session string, partitionqueryrequest *PartitionQueryRequest) *ProjectsInstancesDatabasesSessionsPartitionQueryCall { c := &ProjectsInstancesDatabasesSessionsPartitionQueryCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.session = session @@ -13118,7 +11810,7 @@ func (c *ProjectsInstancesDatabasesSessionsPartitionQueryCall) Header() http.Hea func (c *ProjectsInstancesDatabasesSessionsPartitionQueryCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13182,7 +11874,7 @@ func (c *ProjectsInstancesDatabasesSessionsPartitionQueryCall) Do(opts ...google } return ret, nil // { - // "description": "Creates a set of partition tokens that can be used to execute a query\noperation in parallel. Each of the returned partition tokens can be used\nby ExecuteStreamingSql to specify a subset\nof the query result to read. The same session and read-only transaction\nmust be used by the PartitionQueryRequest used to create the\npartition tokens and the ExecuteSqlRequests that use the partition tokens.\n\nPartition tokens become invalid when the session used to create them\nis deleted, is idle for too long, begins a new transaction, or becomes too\nold. When any of these happen, it is not possible to resume the query, and\nthe whole operation must be restarted from the beginning.", + // "description": "Creates a set of partition tokens that can be used to execute a query operation in parallel. Each of the returned partition tokens can be used by ExecuteStreamingSql to specify a subset of the query result to read. The same session and read-only transaction must be used by the PartitionQueryRequest used to create the partition tokens and the ExecuteSqlRequests that use the partition tokens. Partition tokens become invalid when the session used to create them is deleted, is idle for too long, begins a new transaction, or becomes too old. When any of these happen, it is not possible to resume the query, and the whole operation must be restarted from the beginning.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:partitionQuery", // "httpMethod": "POST", // "id": "spanner.projects.instances.databases.sessions.partitionQuery", @@ -13225,28 +11917,18 @@ type ProjectsInstancesDatabasesSessionsPartitionReadCall struct { } // PartitionRead: Creates a set of partition tokens that can be used to -// execute a read -// operation in parallel. Each of the returned partition tokens can be -// used -// by StreamingRead to specify a subset of the read -// result to read. The same session and read-only transaction must be -// used by -// the PartitionReadRequest used to create the partition tokens and -// the -// ReadRequests that use the partition tokens. There are no -// ordering -// guarantees on rows returned among the returned partition tokens, or -// even -// within each individual StreamingRead call issued with a -// partition_token. -// -// Partition tokens become invalid when the session used to create -// them -// is deleted, is idle for too long, begins a new transaction, or -// becomes too -// old. When any of these happen, it is not possible to resume the -// read, and -// the whole operation must be restarted from the beginning. +// execute a read operation in parallel. Each of the returned partition +// tokens can be used by StreamingRead to specify a subset of the read +// result to read. The same session and read-only transaction must be +// used by the PartitionReadRequest used to create the partition tokens +// and the ReadRequests that use the partition tokens. There are no +// ordering guarantees on rows returned among the returned partition +// tokens, or even within each individual StreamingRead call issued with +// a partition_token. Partition tokens become invalid when the session +// used to create them is deleted, is idle for too long, begins a new +// transaction, or becomes too old. When any of these happen, it is not +// possible to resume the read, and the whole operation must be +// restarted from the beginning. func (r *ProjectsInstancesDatabasesSessionsService) PartitionRead(session string, partitionreadrequest *PartitionReadRequest) *ProjectsInstancesDatabasesSessionsPartitionReadCall { c := &ProjectsInstancesDatabasesSessionsPartitionReadCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.session = session @@ -13281,7 +11963,7 @@ func (c *ProjectsInstancesDatabasesSessionsPartitionReadCall) Header() http.Head func (c *ProjectsInstancesDatabasesSessionsPartitionReadCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13345,7 +12027,7 @@ func (c *ProjectsInstancesDatabasesSessionsPartitionReadCall) Do(opts ...googlea } return ret, nil // { - // "description": "Creates a set of partition tokens that can be used to execute a read\noperation in parallel. Each of the returned partition tokens can be used\nby StreamingRead to specify a subset of the read\nresult to read. The same session and read-only transaction must be used by\nthe PartitionReadRequest used to create the partition tokens and the\nReadRequests that use the partition tokens. There are no ordering\nguarantees on rows returned among the returned partition tokens, or even\nwithin each individual StreamingRead call issued with a partition_token.\n\nPartition tokens become invalid when the session used to create them\nis deleted, is idle for too long, begins a new transaction, or becomes too\nold. When any of these happen, it is not possible to resume the read, and\nthe whole operation must be restarted from the beginning.", + // "description": "Creates a set of partition tokens that can be used to execute a read operation in parallel. Each of the returned partition tokens can be used by StreamingRead to specify a subset of the read result to read. The same session and read-only transaction must be used by the PartitionReadRequest used to create the partition tokens and the ReadRequests that use the partition tokens. There are no ordering guarantees on rows returned among the returned partition tokens, or even within each individual StreamingRead call issued with a partition_token. Partition tokens become invalid when the session used to create them is deleted, is idle for too long, begins a new transaction, or becomes too old. When any of these happen, it is not possible to resume the read, and the whole operation must be restarted from the beginning.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:partitionRead", // "httpMethod": "POST", // "id": "spanner.projects.instances.databases.sessions.partitionRead", @@ -13387,22 +12069,15 @@ type ProjectsInstancesDatabasesSessionsReadCall struct { header_ http.Header } -// Read: Reads rows from the database using key lookups and scans, as -// a -// simple key/value style alternative to -// ExecuteSql. This method cannot be used to -// return a result set larger than 10 MiB; if the read matches more -// data than that, the read fails with a -// `FAILED_PRECONDITION` -// error. -// -// Reads inside read-write transactions might return `ABORTED`. If -// this occurs, the application should restart the transaction from -// the beginning. See Transaction for more details. -// -// Larger result sets can be yielded in streaming fashion by -// calling -// StreamingRead instead. +// Read: Reads rows from the database using key lookups and scans, as a +// simple key/value style alternative to ExecuteSql. This method cannot +// be used to return a result set larger than 10 MiB; if the read +// matches more data than that, the read fails with a +// `FAILED_PRECONDITION` error. Reads inside read-write transactions +// might return `ABORTED`. If this occurs, the application should +// restart the transaction from the beginning. See Transaction for more +// details. Larger result sets can be yielded in streaming fashion by +// calling StreamingRead instead. func (r *ProjectsInstancesDatabasesSessionsService) Read(session string, readrequest *ReadRequest) *ProjectsInstancesDatabasesSessionsReadCall { c := &ProjectsInstancesDatabasesSessionsReadCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.session = session @@ -13437,7 +12112,7 @@ func (c *ProjectsInstancesDatabasesSessionsReadCall) Header() http.Header { func (c *ProjectsInstancesDatabasesSessionsReadCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13501,7 +12176,7 @@ func (c *ProjectsInstancesDatabasesSessionsReadCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Reads rows from the database using key lookups and scans, as a\nsimple key/value style alternative to\nExecuteSql. This method cannot be used to\nreturn a result set larger than 10 MiB; if the read matches more\ndata than that, the read fails with a `FAILED_PRECONDITION`\nerror.\n\nReads inside read-write transactions might return `ABORTED`. If\nthis occurs, the application should restart the transaction from\nthe beginning. See Transaction for more details.\n\nLarger result sets can be yielded in streaming fashion by calling\nStreamingRead instead.", + // "description": "Reads rows from the database using key lookups and scans, as a simple key/value style alternative to ExecuteSql. This method cannot be used to return a result set larger than 10 MiB; if the read matches more data than that, the read fails with a `FAILED_PRECONDITION` error. Reads inside read-write transactions might return `ABORTED`. If this occurs, the application should restart the transaction from the beginning. See Transaction for more details. Larger result sets can be yielded in streaming fashion by calling StreamingRead instead.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:read", // "httpMethod": "POST", // "id": "spanner.projects.instances.databases.sessions.read", @@ -13544,15 +12219,11 @@ type ProjectsInstancesDatabasesSessionsRollbackCall struct { } // Rollback: Rolls back a transaction, releasing any locks it holds. It -// is a good -// idea to call this for any transaction that includes one or more -// Read or ExecuteSql requests and -// ultimately decides not to commit. -// -// `Rollback` returns `OK` if it successfully aborts the transaction, -// the -// transaction was already aborted, or the transaction is not -// found. `Rollback` never returns `ABORTED`. +// is a good idea to call this for any transaction that includes one or +// more Read or ExecuteSql requests and ultimately decides not to +// commit. `Rollback` returns `OK` if it successfully aborts the +// transaction, the transaction was already aborted, or the transaction +// is not found. `Rollback` never returns `ABORTED`. func (r *ProjectsInstancesDatabasesSessionsService) Rollback(session string, rollbackrequest *RollbackRequest) *ProjectsInstancesDatabasesSessionsRollbackCall { c := &ProjectsInstancesDatabasesSessionsRollbackCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.session = session @@ -13587,7 +12258,7 @@ func (c *ProjectsInstancesDatabasesSessionsRollbackCall) Header() http.Header { func (c *ProjectsInstancesDatabasesSessionsRollbackCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13651,7 +12322,7 @@ func (c *ProjectsInstancesDatabasesSessionsRollbackCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Rolls back a transaction, releasing any locks it holds. It is a good\nidea to call this for any transaction that includes one or more\nRead or ExecuteSql requests and\nultimately decides not to commit.\n\n`Rollback` returns `OK` if it successfully aborts the transaction, the\ntransaction was already aborted, or the transaction is not\nfound. `Rollback` never returns `ABORTED`.", + // "description": "Rolls back a transaction, releasing any locks it holds. It is a good idea to call this for any transaction that includes one or more Read or ExecuteSql requests and ultimately decides not to commit. `Rollback` returns `OK` if it successfully aborts the transaction, the transaction was already aborted, or the transaction is not found. `Rollback` never returns `ABORTED`.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:rollback", // "httpMethod": "POST", // "id": "spanner.projects.instances.databases.sessions.rollback", @@ -13693,11 +12364,10 @@ type ProjectsInstancesDatabasesSessionsStreamingReadCall struct { header_ http.Header } -// StreamingRead: Like Read, except returns the result set as a -// stream. Unlike Read, there is no limit on the -// size of the returned result set. However, no individual row in -// the result set can exceed 100 MiB, and no column value can exceed -// 10 MiB. +// StreamingRead: Like Read, except returns the result set as a stream. +// Unlike Read, there is no limit on the size of the returned result +// set. However, no individual row in the result set can exceed 100 MiB, +// and no column value can exceed 10 MiB. func (r *ProjectsInstancesDatabasesSessionsService) StreamingRead(session string, readrequest *ReadRequest) *ProjectsInstancesDatabasesSessionsStreamingReadCall { c := &ProjectsInstancesDatabasesSessionsStreamingReadCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.session = session @@ -13732,7 +12402,7 @@ func (c *ProjectsInstancesDatabasesSessionsStreamingReadCall) Header() http.Head func (c *ProjectsInstancesDatabasesSessionsStreamingReadCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13796,7 +12466,7 @@ func (c *ProjectsInstancesDatabasesSessionsStreamingReadCall) Do(opts ...googlea } return ret, nil // { - // "description": "Like Read, except returns the result set as a\nstream. Unlike Read, there is no limit on the\nsize of the returned result set. However, no individual row in\nthe result set can exceed 100 MiB, and no column value can exceed\n10 MiB.", + // "description": "Like Read, except returns the result set as a stream. Unlike Read, there is no limit on the size of the returned result set. However, no individual row in the result set can exceed 100 MiB, and no column value can exceed 10 MiB.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:streamingRead", // "httpMethod": "POST", // "id": "spanner.projects.instances.databases.sessions.streamingRead", @@ -13838,23 +12508,15 @@ type ProjectsInstancesOperationsCancelCall struct { } // Cancel: Starts asynchronous cancellation on a long-running operation. -// The server -// makes a best effort to cancel the operation, but success is -// not -// guaranteed. If the server doesn't support this method, it -// returns -// `google.rpc.Code.UNIMPLEMENTED`. Clients can -// use -// Operations.GetOperation or -// other methods to check whether the cancellation succeeded or whether -// the -// operation completed despite cancellation. On successful -// cancellation, -// the operation is not deleted; instead, it becomes an operation -// with -// an Operation.error value with a google.rpc.Status.code of -// 1, -// corresponding to `Code.CANCELLED`. +// The server makes a best effort to cancel the operation, but success +// is not guaranteed. If the server doesn't support this method, it +// returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use +// Operations.GetOperation or other methods to check whether the +// cancellation succeeded or whether the operation completed despite +// cancellation. On successful cancellation, the operation is not +// deleted; instead, it becomes an operation with an Operation.error +// value with a google.rpc.Status.code of 1, corresponding to +// `Code.CANCELLED`. func (r *ProjectsInstancesOperationsService) Cancel(name string) *ProjectsInstancesOperationsCancelCall { c := &ProjectsInstancesOperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -13888,7 +12550,7 @@ func (c *ProjectsInstancesOperationsCancelCall) Header() http.Header { func (c *ProjectsInstancesOperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13947,7 +12609,7 @@ func (c *ProjectsInstancesOperationsCancelCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", + // "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/operations/{operationsId}:cancel", // "httpMethod": "POST", // "id": "spanner.projects.instances.operations.cancel", @@ -13986,12 +12648,9 @@ type ProjectsInstancesOperationsDeleteCall struct { } // Delete: Deletes a long-running operation. This method indicates that -// the client is -// no longer interested in the operation result. It does not cancel -// the -// operation. If the server doesn't support this method, it -// returns -// `google.rpc.Code.UNIMPLEMENTED`. +// the client is no longer interested in the operation result. It does +// not cancel the operation. If the server doesn't support this method, +// it returns `google.rpc.Code.UNIMPLEMENTED`. func (r *ProjectsInstancesOperationsService) Delete(name string) *ProjectsInstancesOperationsDeleteCall { c := &ProjectsInstancesOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -14025,7 +12684,7 @@ func (c *ProjectsInstancesOperationsDeleteCall) Header() http.Header { func (c *ProjectsInstancesOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14084,7 +12743,7 @@ func (c *ProjectsInstancesOperationsDeleteCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.", + // "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/operations/{operationsId}", // "httpMethod": "DELETE", // "id": "spanner.projects.instances.operations.delete", @@ -14123,11 +12782,9 @@ type ProjectsInstancesOperationsGetCall struct { header_ http.Header } -// Get: Gets the latest state of a long-running operation. Clients can -// use this -// method to poll the operation result at intervals as recommended by -// the API -// service. +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. func (r *ProjectsInstancesOperationsService) Get(name string) *ProjectsInstancesOperationsGetCall { c := &ProjectsInstancesOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -14171,7 +12828,7 @@ func (c *ProjectsInstancesOperationsGetCall) Header() http.Header { func (c *ProjectsInstancesOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14233,7 +12890,7 @@ func (c *ProjectsInstancesOperationsGetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/operations/{operationsId}", // "httpMethod": "GET", // "id": "spanner.projects.instances.operations.get", @@ -14273,22 +12930,15 @@ type ProjectsInstancesOperationsListCall struct { } // List: Lists operations that match the specified filter in the -// request. If the -// server doesn't support this method, it returns -// `UNIMPLEMENTED`. -// -// NOTE: the `name` binding allows API services to override the -// binding -// to use different resource name schemes, such as `users/*/operations`. -// To -// override the binding, API services can add a binding such -// as -// "/v1/{name=users/*}/operations" to their service configuration. -// For backwards compatibility, the default name includes the -// operations -// collection id, however overriding users must ensure the name -// binding -// is the parent resource, without the operations collection id. +// request. If the server doesn't support this method, it returns +// `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to +// override the binding to use different resource name schemes, such as +// `users/*/operations`. To override the binding, API services can add a +// binding such as "/v1/{name=users/*}/operations" to their service +// configuration. For backwards compatibility, the default name includes +// the operations collection id, however overriding users must ensure +// the name binding is the parent resource, without the operations +// collection id. func (r *ProjectsInstancesOperationsService) List(name string) *ProjectsInstancesOperationsListCall { c := &ProjectsInstancesOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -14353,7 +13003,7 @@ func (c *ProjectsInstancesOperationsListCall) Header() http.Header { func (c *ProjectsInstancesOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -14415,7 +13065,7 @@ func (c *ProjectsInstancesOperationsListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", + // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/operations", // "httpMethod": "GET", // "id": "spanner.projects.instances.operations.list", diff --git a/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json b/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json index 528b3da0096..e0576b037aa 100644 --- a/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json +++ b/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json @@ -22,10 +22,10 @@ "x16": "http://www.google.com/images/icons/product/search-16.gif", "x32": "http://www.google.com/images/icons/product/search-32.gif" }, - "id": "sql:v1beta4", + "id": "sqladmin:v1beta4", "kind": "discovery#restDescription", "mtlsRootUrl": "https://sqladmin.mtls.googleapis.com/", - "name": "sql", + "name": "sqladmin", "ownerDomain": "google.com", "ownerName": "Google", "parameters": { @@ -121,7 +121,7 @@ ], "parameters": { "id": { - "description": "The ID of the Backup Run to delete. To find a Backup Run ID, use the \u003ca\nhref=\"/sql/docs/db_path/admin-api/rest/v1beta4/backupRuns/list\"\u003elist\u003c/a\u003e\nmethod.", + "description": "The ID of the Backup Run to delete. To find a Backup Run ID, use the list method.", "format": "int64", "location": "path", "required": true, @@ -190,7 +190,7 @@ ] }, "insert": { - "description": "Creates a new backup run on demand. This method is applicable only to\nSecond Generation instances.", + "description": "Creates a new backup run on demand. This method is applicable only to Second Generation instances.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/backupRuns", "httpMethod": "POST", "id": "sql.backupRuns.insert", @@ -225,7 +225,7 @@ ] }, "list": { - "description": "Lists all backup runs associated with a given instance and configuration in\nthe reverse chronological order of the backup initiation time.", + "description": "Lists all backup runs associated with a given instance and configuration in the reverse chronological order of the backup initiation time.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/backupRuns", "httpMethod": "GET", "id": "sql.backupRuns.list", @@ -247,7 +247,7 @@ "type": "integer" }, "pageToken": { - "description": "A previously-returned page token representing part of the larger set of\nresults to view.", + "description": "A previously-returned page token representing part of the larger set of results to view.", "location": "query", "type": "string" }, @@ -311,7 +311,7 @@ ] }, "get": { - "description": "Retrieves a resource containing information about a database inside a Cloud\nSQL instance.", + "description": "Retrieves a resource containing information about a database inside a Cloud SQL instance.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/databases/{database}", "httpMethod": "GET", "id": "sql.databases.get", @@ -350,7 +350,7 @@ ] }, "insert": { - "description": "Inserts a resource containing information about a database inside a Cloud\nSQL instance.", + "description": "Inserts a resource containing information about a database inside a Cloud SQL instance.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/databases", "httpMethod": "POST", "id": "sql.databases.insert", @@ -417,7 +417,7 @@ ] }, "patch": { - "description": "Partially updates a resource containing information about a database inside\na Cloud SQL instance. This method supports patch semantics.", + "description": "Partially updates a resource containing information about a database inside a Cloud SQL instance. This method supports patch semantics.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/databases/{database}", "httpMethod": "PATCH", "id": "sql.databases.patch", @@ -459,7 +459,7 @@ ] }, "update": { - "description": "Updates a resource containing information about a database inside a Cloud\nSQL instance.", + "description": "Updates a resource containing information about a database inside a Cloud SQL instance.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/databases/{database}", "httpMethod": "PUT", "id": "sql.databases.update", @@ -512,7 +512,7 @@ "parameterOrder": [], "parameters": { "databaseVersion": { - "description": "Database type and version you want to retrieve flags for. By default, this\nmethod returns flags for all database types and versions.", + "description": "Database type and version you want to retrieve flags for. By default, this method returns flags for all database types and versions.", "location": "query", "type": "string" } @@ -531,7 +531,7 @@ "instances": { "methods": { "addServerCa": { - "description": "Add a new trusted Certificate Authority (CA) version for the specified\ninstance. Required to prepare for a certificate rotation. If a CA version\nwas previously added but never used in a certificate rotation, this\noperation replaces that version. There cannot be more than one CA version\nwaiting to be rotated in.", + "description": "Add a new trusted Certificate Authority (CA) version for the specified instance. Required to prepare for a certificate rotation. If a CA version was previously added but never used in a certificate rotation, this operation replaces that version. There cannot be more than one CA version waiting to be rotated in.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/addServerCa", "httpMethod": "POST", "id": "sql.instances.addServerCa", @@ -563,7 +563,7 @@ ] }, "clone": { - "description": "Creates a Cloud SQL instance as a clone of the source instance. Using this\noperation might cause your instance to restart.", + "description": "Creates a Cloud SQL instance as a clone of the source instance. Using this operation might cause your instance to restart.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/clone", "httpMethod": "POST", "id": "sql.instances.clone", @@ -573,7 +573,7 @@ ], "parameters": { "instance": { - "description": "The ID of the Cloud SQL instance to be cloned (source). This does not\ninclude the project ID.", + "description": "The ID of the Cloud SQL instance to be cloned (source). This does not include the project ID.", "location": "path", "required": true, "type": "string" @@ -630,7 +630,7 @@ ] }, "demoteMaster": { - "description": "Demotes the stand-alone instance to be a Cloud SQL read replica for an\nexternal database server.", + "description": "Demotes the stand-alone instance to be a Cloud SQL read replica for an external database server.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/demoteMaster", "httpMethod": "POST", "id": "sql.instances.demoteMaster", @@ -665,7 +665,7 @@ ] }, "export": { - "description": "Exports data from a Cloud SQL instance to a Cloud Storage bucket as a SQL\ndump or CSV file.", + "description": "Exports data from a Cloud SQL instance to a Cloud Storage bucket as a SQL dump or CSV file.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/export", "httpMethod": "POST", "id": "sql.instances.export", @@ -699,7 +699,7 @@ ] }, "failover": { - "description": "Failover the instance to its failover replica instance. Using this\noperation might cause your instance to restart.", + "description": "Failover the instance to its failover replica instance. Using this operation might cause your instance to restart.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/failover", "httpMethod": "POST", "id": "sql.instances.failover", @@ -766,7 +766,7 @@ ] }, "import": { - "description": "Imports data into a Cloud SQL instance from a SQL dump or CSV file in\nCloud Storage.", + "description": "Imports data into a Cloud SQL instance from a SQL dump or CSV file in Cloud Storage.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/import", "httpMethod": "POST", "id": "sql.instances.import", @@ -809,7 +809,7 @@ ], "parameters": { "project": { - "description": "Project ID of the project to which the newly created Cloud SQL instances\nshould belong.", + "description": "Project ID of the project to which the newly created Cloud SQL instances should belong.", "location": "path", "required": true, "type": "string" @@ -837,7 +837,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response.\nThe expression is in the form of field:value. For example,\n'instanceType:CLOUD_SQL_INSTANCE'. Fields can be nested as needed as per\ntheir JSON representation, such as 'settings.userLabels.auto_start:true'.\n\nMultiple filter queries are space-separated. For example.\n'state:RUNNABLE instanceType:CLOUD_SQL_INSTANCE'. By default, each\nexpression is an AND expression. However, you can include AND and OR\nexpressions explicitly.", + "description": "A filter expression that filters resources listed in the response. The expression is in the form of field:value. For example, 'instanceType:CLOUD_SQL_INSTANCE'. Fields can be nested as needed as per their JSON representation, such as 'settings.userLabels.auto_start:true'. Multiple filter queries are space-separated. For example. 'state:RUNNABLE instanceType:CLOUD_SQL_INSTANCE'. By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly.", "location": "query", "type": "string" }, @@ -848,7 +848,7 @@ "type": "integer" }, "pageToken": { - "description": "A previously-returned page token representing part of the larger set of\nresults to view.", + "description": "A previously-returned page token representing part of the larger set of results to view.", "location": "query", "type": "string" }, @@ -869,7 +869,7 @@ ] }, "listServerCas": { - "description": "Lists all of the trusted Certificate Authorities (CAs) for the specified\ninstance. There can be up to three CAs listed: the CA that was used to sign\nthe certificate that is currently in use, a CA that has been added but not\nyet used to sign a certificate, and a CA used to sign a certificate that\nhas previously rotated out.", + "description": "Lists all of the trusted Certificate Authorities (CAs) for the specified instance. There can be up to three CAs listed: the CA that was used to sign the certificate that is currently in use, a CA that has been added but not yet used to sign a certificate, and a CA used to sign a certificate that has previously rotated out.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/listServerCas", "httpMethod": "GET", "id": "sql.instances.listServerCas", @@ -901,7 +901,7 @@ ] }, "patch": { - "description": "Updates settings of a Cloud SQL instance.\nThis method supports patch semantics.", + "description": "Updates settings of a Cloud SQL instance. This method supports patch semantics.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}", "httpMethod": "PATCH", "id": "sql.instances.patch", @@ -936,7 +936,7 @@ ] }, "promoteReplica": { - "description": "Promotes the read replica instance to be a stand-alone Cloud SQL instance.\nUsing this operation might cause your instance to restart.", + "description": "Promotes the read replica instance to be a stand-alone Cloud SQL instance. Using this operation might cause your instance to restart.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/promoteReplica", "httpMethod": "POST", "id": "sql.instances.promoteReplica", @@ -968,7 +968,7 @@ ] }, "resetSslConfig": { - "description": "Deletes all client certificates and generates a new server SSL certificate\nfor the instance.", + "description": "Deletes all client certificates and generates a new server SSL certificate for the instance.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/resetSslConfig", "httpMethod": "POST", "id": "sql.instances.resetSslConfig", @@ -1032,7 +1032,7 @@ ] }, "restoreBackup": { - "description": "Restores a backup of a Cloud SQL instance. Using this operation might cause\nyour instance to restart.", + "description": "Restores a backup of a Cloud SQL instance. Using this operation might cause your instance to restart.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/restoreBackup", "httpMethod": "POST", "id": "sql.instances.restoreBackup", @@ -1067,7 +1067,7 @@ ] }, "rotateServerCa": { - "description": "Rotates the server certificate to one signed by the Certificate Authority\n(CA) version previously added with the addServerCA method.", + "description": "Rotates the server certificate to one signed by the Certificate Authority (CA) version previously added with the addServerCA method.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/rotateServerCa", "httpMethod": "POST", "id": "sql.instances.rotateServerCa", @@ -1201,7 +1201,7 @@ ] }, "update": { - "description": "Updates settings of a Cloud SQL instance. Using this operation might cause\nyour instance to restart.", + "description": "Updates settings of a Cloud SQL instance. Using this operation might cause your instance to restart.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}", "httpMethod": "PUT", "id": "sql.instances.update", @@ -1272,7 +1272,7 @@ ] }, "list": { - "description": "Lists all instance operations that have been performed on the given Cloud\nSQL instance in the reverse chronological order of the start time.", + "description": "Lists all instance operations that have been performed on the given Cloud SQL instance in the reverse chronological order of the start time.", "flatPath": "sql/v1beta4/projects/{project}/operations", "httpMethod": "GET", "id": "sql.operations.list", @@ -1292,7 +1292,7 @@ "type": "integer" }, "pageToken": { - "description": "A previously-returned page token representing part of the larger set of\nresults to view.", + "description": "A previously-returned page token representing part of the larger set of results to view.", "location": "query", "type": "string" }, @@ -1354,7 +1354,7 @@ ] }, "startExternalSync": { - "description": "Start External master migration.", + "description": "Start External primary instance migration.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/startExternalSync", "httpMethod": "POST", "id": "sql.projects.instances.startExternalSync", @@ -1370,7 +1370,7 @@ "type": "string" }, "project": { - "description": "ID of the project that contains the first generation instance.", + "description": "ID of the project that contains the instance.", "location": "path", "required": true, "type": "string" @@ -1382,6 +1382,11 @@ "ONLINE", "OFFLINE" ], + "enumDescriptions": [ + "Unknown external sync mode, will be defaulted to ONLINE mode", + "Online external sync will set up replication after initial data external sync", + "Offline external sync only dumps and loads a one-time snapshot of the primary instance's data" + ], "location": "query", "type": "string" } @@ -1396,7 +1401,7 @@ ] }, "verifyExternalSyncSettings": { - "description": "Verify External master external sync settings.", + "description": "Verify External primary instance external sync settings.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/verifyExternalSyncSettings", "httpMethod": "POST", "id": "sql.projects.instances.verifyExternalSyncSettings", @@ -1424,6 +1429,11 @@ "ONLINE", "OFFLINE" ], + "enumDescriptions": [ + "Unknown external sync mode, will be defaulted to ONLINE mode", + "Online external sync will set up replication after initial data external sync", + "Offline external sync only dumps and loads a one-time snapshot of the primary instance's data" + ], "location": "query", "type": "string" }, @@ -1449,7 +1459,7 @@ "sslCerts": { "methods": { "createEphemeral": { - "description": "Generates a short-lived X509 certificate containing the provided public key\nand signed by a private key specific to the target instance. Users may use\nthe certificate to authenticate as themselves when connecting to the\ndatabase.", + "description": "Generates a short-lived X509 certificate containing the provided public key and signed by a private key specific to the target instance. Users may use the certificate to authenticate as themselves when connecting to the database.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/createEphemeral", "httpMethod": "POST", "id": "sql.sslCerts.createEphemeral", @@ -1484,7 +1494,7 @@ ] }, "delete": { - "description": "Deletes the SSL certificate. For First Generation instances, the\ncertificate remains valid until the instance is restarted.", + "description": "Deletes the SSL certificate. For First Generation instances, the certificate remains valid until the instance is restarted.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/sslCerts/{sha1Fingerprint}", "httpMethod": "DELETE", "id": "sql.sslCerts.delete", @@ -1523,7 +1533,7 @@ ] }, "get": { - "description": "Retrieves a particular SSL certificate. Does not include the private key\n(required for usage). The private key must be saved from the response to\ninitial creation.", + "description": "Retrieves a particular SSL certificate. Does not include the private key (required for usage). The private key must be saved from the response to initial creation.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/sslCerts/{sha1Fingerprint}", "httpMethod": "GET", "id": "sql.sslCerts.get", @@ -1562,7 +1572,7 @@ ] }, "insert": { - "description": "Creates an SSL certificate and returns it along with the private key and\nserver certificate authority. The new certificate will not be usable until\nthe instance is restarted.", + "description": "Creates an SSL certificate and returns it along with the private key and server certificate authority. The new certificate will not be usable until the instance is restarted.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/sslCerts", "httpMethod": "POST", "id": "sql.sslCerts.insert", @@ -1633,7 +1643,7 @@ "tiers": { "methods": { "list": { - "description": "Lists all available machine types (tiers) for Cloud SQL, for example,\ndb-n1-standard-1. For related information, see \u003ca\nhref=\"/sql/pricing\"\u003ePricing\u003c/a\u003e.", + "description": "Lists all available machine types (tiers) for Cloud SQL, for example, db-n1-standard-1. For related information, see Pricing.", "flatPath": "sql/v1beta4/projects/{project}/tiers", "httpMethod": "GET", "id": "sql.tiers.list", @@ -1818,7 +1828,7 @@ } } }, - "revision": "20200331", + "revision": "20200930", "rootUrl": "https://sqladmin.googleapis.com/", "schemas": { "AclEntry": { @@ -1826,12 +1836,12 @@ "id": "AclEntry", "properties": { "expirationTime": { - "description": "The time when this access control entry expires in \u003ca\nhref=\"https://tools.ietf.org/html/rfc3339\"\u003eRFC 3339\u003c/a\u003e format, for example\n\u003ccode\u003e2012-11-15T16:19:00.094Z\u003c/code\u003e.", + "description": "The time when this access control entry expires in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.", "format": "google-datetime", "type": "string" }, "kind": { - "description": "This is always \u003ccode\u003esql#aclEntry\u003c/code\u003e.", + "description": "This is always *sql#aclEntry*.", "type": "string" }, "name": { @@ -1839,7 +1849,7 @@ "type": "string" }, "value": { - "description": "The whitelisted value for the access control list.", + "description": "The allowlisted value for the access control list.", "type": "string" } }, @@ -1857,7 +1867,7 @@ ], "enumDescriptions": [ "An unknown or unset warning type from Cloud SQL API.", - "Warning when one or more regions are not reachable. The returned result\nset may be incomplete." + "Warning when one or more regions are not reachable. The returned result set may be incomplete." ], "type": "string" }, @@ -1872,8 +1882,12 @@ "description": "Database instance backup configuration.", "id": "BackupConfiguration", "properties": { + "backupRetentionSettings": { + "$ref": "BackupRetentionSettings", + "description": "Backup retention settings." + }, "binaryLogEnabled": { - "description": "(MySQL only) Whether binary log is enabled. If backup configuration is\ndisabled, binarylog must be disabled as well.", + "description": "(MySQL only) Whether binary log is enabled. If backup configuration is disabled, binarylog must be disabled as well.", "type": "boolean" }, "enabled": { @@ -1881,7 +1895,7 @@ "type": "boolean" }, "kind": { - "description": "This is always \u003ccode\u003esql#backupConfiguration\u003c/code\u003e.", + "description": "This is always *sql#backupConfiguration*.", "type": "string" }, "location": { @@ -1897,7 +1911,36 @@ "type": "boolean" }, "startTime": { - "description": "Start time for the daily backup configuration in UTC timezone in the 24\nhour format - \u003ccode\u003eHH:MM\u003c/code\u003e.", + "description": "Start time for the daily backup configuration in UTC timezone in the 24 hour format - *HH:MM*.", + "type": "string" + }, + "transactionLogRetentionDays": { + "description": "The number of days of transaction logs we retain for point in time restore, from 1-7.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "BackupRetentionSettings": { + "description": "We currently only support backup retention by specifying the number of backups we will retain.", + "id": "BackupRetentionSettings", + "properties": { + "retainedBackups": { + "description": "Depending on the value of retention_unit, this is used to determine if a backup needs to be deleted. If retention_unit is 'COUNT', we will retain this many backups.", + "format": "int32", + "type": "integer" + }, + "retentionUnit": { + "description": "The unit that 'retained_backups' represents.", + "enum": [ + "RETENTION_UNIT_UNSPECIFIED", + "COUNT" + ], + "enumDescriptions": [ + "Backup retention unit is unspecified, will be treated as COUNT.", + "Retention will be by count, eg. \"retain the most recent 7 backups\"." + ], "type": "string" } }, @@ -1907,34 +1950,48 @@ "description": "A BackupRun resource.", "id": "BackupRun", "properties": { + "backupKind": { + "description": "Specifies the kind of backup, PHYSICAL or DEFAULT_SNAPSHOT.", + "enum": [ + "SQL_BACKUP_KIND_UNSPECIFIED", + "SNAPSHOT", + "PHYSICAL" + ], + "enumDescriptions": [ + "This is an unknown BackupKind.", + "The snapshot based backups", + "Physical backups" + ], + "type": "string" + }, "description": { "description": "The description of this run, only applicable to on-demand backups.", "type": "string" }, "diskEncryptionConfiguration": { "$ref": "DiskEncryptionConfiguration", - "description": "Encryption configuration specific to a backup.\nApplies only to Second Generation instances." + "description": "Encryption configuration specific to a backup. Applies only to Second Generation instances." }, "diskEncryptionStatus": { "$ref": "DiskEncryptionStatus", - "description": "Encryption status specific to a backup.\nApplies only to Second Generation instances." + "description": "Encryption status specific to a backup. Applies only to Second Generation instances." }, "endTime": { - "description": "The time the backup operation completed in UTC timezone in \u003ca\nhref=\"https://tools.ietf.org/html/rfc3339\"\u003eRFC 3339\u003c/a\u003e format, for example\n\u003ccode\u003e2012-11-15T16:19:00.094Z\u003c/code\u003e.", + "description": "The time the backup operation completed in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.", "format": "google-datetime", "type": "string" }, "enqueuedTime": { - "description": "The time the run was enqueued in UTC timezone in \u003ca\nhref=\"https://tools.ietf.org/html/rfc3339\"\u003eRFC 3339\u003c/a\u003e format, for example\n\u003ccode\u003e2012-11-15T16:19:00.094Z\u003c/code\u003e.", + "description": "The time the run was enqueued in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.", "format": "google-datetime", "type": "string" }, "error": { "$ref": "OperationError", - "description": "Information about why the backup operation failed. This is only present if\nthe run has the FAILED status." + "description": "Information about why the backup operation failed. This is only present if the run has the FAILED status." }, "id": { - "description": "The identifier for this backup run. Unique only for a specific Cloud SQL\ninstance.", + "description": "The identifier for this backup run. Unique only for a specific Cloud SQL instance.", "format": "int64", "type": "string" }, @@ -1943,7 +2000,7 @@ "type": "string" }, "kind": { - "description": "This is always \u003ccode\u003esql#backupRun\u003c/code\u003e.", + "description": "This is always *sql#backupRun*.", "type": "string" }, "location": { @@ -1955,7 +2012,7 @@ "type": "string" }, "startTime": { - "description": "The time the backup operation actually started in UTC timezone in \u003ca\nhref=\"https://tools.ietf.org/html/rfc3339\"\u003eRFC 3339\u003c/a\u003e format, for example\n\u003ccode\u003e2012-11-15T16:19:00.094Z\u003c/code\u003e.", + "description": "The time the backup operation actually started in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.", "format": "google-datetime", "type": "string" }, @@ -1976,11 +2033,11 @@ "enumDescriptions": [ "The status of the run is unknown.", "The backup operation was enqueued.", - "The backup is overdue across a given backup window. Indicates a\nproblem. Example: Long-running operation in progress during\nthe whole window.", + "The backup is overdue across a given backup window. Indicates a problem. Example: Long-running operation in progress during the whole window.", "The backup is in progress.", "The backup failed.", "The backup was successful.", - "The backup was skipped (without problems) for a given backup\nwindow. Example: Instance was idle.", + "The backup was skipped (without problems) for a given backup window. Example: Instance was idle.", "The backup is about to be deleted.", "The backup deletion failed.", "The backup has been deleted." @@ -2002,7 +2059,7 @@ "type": "string" }, "windowStartTime": { - "description": "The start time of the backup window during which this the backup was\nattempted in \u003ca href=\"https://tools.ietf.org/html/rfc3339\"\u003eRFC 3339\u003c/a\u003e\nformat, for example \u003ccode\u003e2012-11-15T16:19:00.094Z\u003c/code\u003e.", + "description": "The start time of the backup window during which this the backup was attempted in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.", "format": "google-datetime", "type": "string" } @@ -2021,11 +2078,11 @@ "type": "array" }, "kind": { - "description": "This is always \u003ccode\u003esql#backupRunsList\u003c/code\u003e.", + "description": "This is always *sql#backupRunsList*.", "type": "string" }, "nextPageToken": { - "description": "The continuation token, used to page through large result sets. Provide\nthis value in a subsequent request to return the next page of results.", + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", "type": "string" } }, @@ -2045,7 +2102,7 @@ "type": "string" }, "kind": { - "description": "This is always \u003ccode\u003esql#binLogCoordinates\u003c/code\u003e.", + "description": "This is always *sql#binLogCoordinates*.", "type": "string" } }, @@ -2057,14 +2114,14 @@ "properties": { "binLogCoordinates": { "$ref": "BinLogCoordinates", - "description": "Binary log coordinates, if specified, identify the position up to which the\nsource instance should be cloned. If not specified, the source instance is\ncloned up to the most recent binary log coordinates." + "description": "Binary log coordinates, if specified, identify the position up to which the source instance is cloned. If not specified, the source instance is cloned up to the most recent binary log coordinates." }, "destinationInstanceName": { "description": "Name of the Cloud SQL instance to be created as a clone.", "type": "string" }, "kind": { - "description": "This is always \u003ccode\u003esql#cloneContext\u003c/code\u003e.", + "description": "This is always *sql#cloneContext*.", "type": "string" }, "pitrTimestampMs": { @@ -2085,15 +2142,15 @@ "id": "Database", "properties": { "charset": { - "description": "The MySQL charset value.", + "description": "The Cloud SQL charset value.", "type": "string" }, "collation": { - "description": "The MySQL collation value.", + "description": "The Cloud SQL collation value.", "type": "string" }, "etag": { - "description": "This field is deprecated and will be removed from a future version of the\nAPI.", + "description": "This field is deprecated and will be removed from a future version of the API.", "type": "string" }, "instance": { @@ -2101,15 +2158,15 @@ "type": "string" }, "kind": { - "description": "This is always \u003ccode\u003esql#database\u003c/code\u003e.", + "description": "This is always *sql#database*.", "type": "string" }, "name": { - "description": "The name of the database in the Cloud SQL instance. This does not include\nthe project ID or instance name.", + "description": "The name of the database in the Cloud SQL instance. This does not include the project ID or instance name.", "type": "string" }, "project": { - "description": "The project ID of the project containing the Cloud SQL database. The Google\napps domain is prefixed if applicable.", + "description": "The project ID of the project containing the Cloud SQL database. The Google apps domain is prefixed if applicable.", "type": "string" }, "selfLink": { @@ -2127,22 +2184,22 @@ "id": "DatabaseFlags", "properties": { "name": { - "description": "The name of the flag. These flags are passed at instance startup, so\ninclude both server options and system variables for MySQL. Flags should be\nspecified with underscores, not hyphens. For more information, see \u003ca\nhref=\"/sql/docs/mysql/flags\"\u003eConfiguring Database Flags\u003c/a\u003e in the Cloud\nSQL documentation.", + "description": "The name of the flag. These flags are passed at instance startup, so include both server options and system variables for MySQL. Flags are specified with underscores, not hyphens. For more information, see Configuring Database Flags in the Cloud SQL documentation.", "type": "string" }, "value": { - "description": "The value of the flag. Booleans should be set to \u003ccode\u003eon\u003c/code\u003e for true\nand \u003ccode\u003eoff\u003c/code\u003e for false. This field must be omitted if the flag\ndoesn't take a value.", + "description": "The value of the flag. Booleans are set to *on* for true and *off* for false. This field must be omitted if the flag doesn't take a value.", "type": "string" } }, "type": "object" }, "DatabaseInstance": { - "description": "A Cloud SQL instance resource.", + "description": "A Cloud SQL instance resource. Next field: 36", "id": "DatabaseInstance", "properties": { "backendType": { - "description": "\u003ccode\u003eFIRST_GEN\u003c/code\u003e: First Generation instance. MySQL only. \u003cbr\n/\u003e\u003ccode\u003eSECOND_GEN\u003c/code\u003e: Second Generation instance or PostgreSQL\ninstance. \u003cbr /\u003e\u003ccode\u003eEXTERNAL\u003c/code\u003e: A database server that is not\nmanaged by Google. \u003cbr\u003eThis property is read-only; use the\n\u003ccode\u003etier\u003c/code\u003e property in the \u003ccode\u003esettings\u003c/code\u003e object to determine\nthe database type and Second or First Generation.", + "description": " *SECOND_GEN*: Cloud SQL database instance. *EXTERNAL*: A database server that is not managed by Google. This property is read-only; use the *tier* property in the *settings* object to determine the database type.", "enum": [ "SQL_BACKEND_TYPE_UNSPECIFIED", "FIRST_GEN", @@ -2162,12 +2219,12 @@ "type": "string" }, "currentDiskSize": { - "description": "The current disk usage of the instance in bytes. This property has been\ndeprecated. Users should use the\n\"cloudsql.googleapis.com/database/disk/bytes_used\" metric in Cloud\nMonitoring API instead. Please see \u003ca\nhref=\"https://groups.google.com/d/msg/google-cloud-sql-announce/I_7-F9EBhT0/BtvFtdFeAgAJ\"\u003ethis\nannouncement\u003c/a\u003e for details.", + "description": "The current disk usage of the instance in bytes. This property has been deprecated. Use the \"cloudsql.googleapis.com/database/disk/bytes_used\" metric in Cloud Monitoring API instead. Please see this announcement for details.", "format": "int64", "type": "string" }, "databaseVersion": { - "description": "The database engine type and version. The \u003ccode\u003edatabaseVersion\u003c/code\u003e\nfield can not be changed after instance creation. MySQL Second Generation\ninstances: \u003ccode\u003eMYSQL_5_7\u003c/code\u003e (default) or \u003ccode\u003eMYSQL_5_6\u003c/code\u003e.\nPostgreSQL instances: \u003ccode\u003ePOSTGRES_9_6\u003c/code\u003e (default) or\n\u003ccode\u003ePOSTGRES_11 Beta\u003c/code\u003e MySQL First Generation\ninstances: \u003ccode\u003eMYSQL_5_6\u003c/code\u003e (default) or \u003ccode\u003eMYSQL_5_5\u003c/code\u003e", + "description": "The database engine type and version. The *databaseVersion* field cannot be changed after instance creation. MySQL instances: *MYSQL_8_0*, *MYSQL_5_7* (default), or *MYSQL_5_6*. PostgreSQL instances: *POSTGRES_9_6*, *POSTGRES_10*, *POSTGRES_11* or *POSTGRES_12* (default). SQL Server instances: *SQLSERVER_2017_STANDARD* (default), *SQLSERVER_2017_ENTERPRISE*, *SQLSERVER_2017_EXPRESS*, or *SQLSERVER_2017_WEB*.", "enum": [ "SQL_DATABASE_VERSION_UNSPECIFIED", "MYSQL_5_1", @@ -2181,7 +2238,9 @@ "SQLSERVER_2017_EXPRESS", "SQLSERVER_2017_WEB", "POSTGRES_10", - "POSTGRES_12" + "POSTGRES_12", + "MYSQL_8_0", + "POSTGRES_13" ], "enumDescriptions": [ "This is an unknown database version.", @@ -2196,42 +2255,44 @@ "The database version is SQL Server 2017 Express.", "The database version is SQL Server 2017 Web.", "The database version is PostgreSQL 10.", - "The database version is PostgreSQL 12." + "The database version is PostgreSQL 12.", + "The database version is MySQL 8.", + "The database version is PostgreSQL 13." ], "type": "string" }, "diskEncryptionConfiguration": { "$ref": "DiskEncryptionConfiguration", - "description": "Disk encryption configuration specific to an instance.\nApplies only to Second Generation instances." + "description": "Disk encryption configuration specific to an instance. Applies only to Second Generation instances." }, "diskEncryptionStatus": { "$ref": "DiskEncryptionStatus", - "description": "Disk encryption status specific to an instance.\nApplies only to Second Generation instances." + "description": "Disk encryption status specific to an instance. Applies only to Second Generation instances." }, "etag": { - "description": "This field is deprecated and will be removed from a future version of the\nAPI. Use the \u003ccode\u003esettings.settingsVersion\u003c/code\u003e field instead.", + "description": "This field is deprecated and will be removed from a future version of the API. Use the *settings.settingsVersion* field instead.", "type": "string" }, "failoverReplica": { - "description": "The name and status of the failover replica. This property is applicable\nonly to Second Generation instances.", + "description": "The name and status of the failover replica. This property is applicable only to Second Generation instances.", "properties": { "available": { - "description": "The availability status of the failover replica. A false status indicates\nthat the failover replica is out of sync. The master can only failover to\nthe failover replica when the status is true.", + "description": "The availability status of the failover replica. A false status indicates that the failover replica is out of sync. The primary instance can only failover to the failover replica when the status is true.", "type": "boolean" }, "name": { - "description": "The name of the failover replica. If specified at instance creation, a\nfailover replica is created for the instance. The name\ndoesn't include the project ID. This property is applicable only to\nSecond Generation instances.", + "description": "The name of the failover replica. If specified at instance creation, a failover replica is created for the instance. The name doesn't include the project ID. This property is applicable only to Second Generation instances.", "type": "string" } }, "type": "object" }, "gceZone": { - "description": "The Compute Engine zone that the instance is currently serving from. This\nvalue could be different from the zone that was specified when the instance\nwas created if the instance has failed over to its secondary zone.", + "description": "The Compute Engine zone that the instance is currently serving from. This value could be different from the zone that was specified when the instance was created if the instance has failed over to its secondary zone.", "type": "string" }, "instanceType": { - "description": "The instance type. This can be one of the following.\n\u003cbr\u003e\u003ccode\u003eCLOUD_SQL_INSTANCE\u003c/code\u003e: A Cloud SQL instance that is not\nreplicating from a master. \u003cbr\u003e\u003ccode\u003eON_PREMISES_INSTANCE\u003c/code\u003e: An\ninstance running on the\ncustomer's premises. \u003cbr\u003e\u003ccode\u003eREAD_REPLICA_INSTANCE\u003c/code\u003e: A Cloud SQL\ninstance configured as a read-replica.", + "description": "The instance type. This can be one of the following. *CLOUD_SQL_INSTANCE*: A Cloud SQL instance that is not replicating from a primary instance. *ON_PREMISES_INSTANCE*: An instance running on the customer's premises. *READ_REPLICA_INSTANCE*: A Cloud SQL instance configured as a read-replica.", "enum": [ "SQL_INSTANCE_TYPE_UNSPECIFIED", "CLOUD_SQL_INSTANCE", @@ -2241,7 +2302,7 @@ "enumDescriptions": [ "This is an unknown Cloud SQL instance type.", "A regular Cloud SQL instance.", - "An instance running on the customer's premises that is not managed by\nCloud SQL.", + "An instance running on the customer's premises that is not managed by Cloud SQL.", "A Cloud SQL instance acting as a read-replica." ], "type": "string" @@ -2254,15 +2315,15 @@ "type": "array" }, "ipv6Address": { - "description": "The IPv6 address assigned to the instance. This property is applicable only\nto First Generation instances.", + "description": "The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.", "type": "string" }, "kind": { - "description": "This is always \u003ccode\u003esql#instance\u003c/code\u003e.", + "description": "This is always *sql#instance*.", "type": "string" }, "masterInstanceName": { - "description": "The name of the instance which will act as master in the replication setup.", + "description": "The name of the instance which will act as primary in the replication setup.", "type": "string" }, "maxDiskSize": { @@ -2279,11 +2340,11 @@ "description": "Configuration specific to on-premises instances." }, "project": { - "description": "The project ID of the project containing the Cloud SQL instance. The Google\napps domain is prefixed if applicable.", + "description": "The project ID of the project containing the Cloud SQL instance. The Google apps domain is prefixed if applicable.", "type": "string" }, "region": { - "description": "The geographical region. Can be \u003ccode\u003eus-central\u003c/code\u003e\n(\u003ccode\u003eFIRST_GEN\u003c/code\u003e instances only), \u003ccode\u003eus-central1\u003c/code\u003e\n(\u003ccode\u003eSECOND_GEN\u003c/code\u003e instances only), \u003ccode\u003easia-east1\u003c/code\u003e or\n\u003ccode\u003eeurope-west1\u003c/code\u003e. Defaults to \u003ccode\u003eus-central\u003c/code\u003e or\n\u003ccode\u003eus-central1\u003c/code\u003e depending on the instance type (First Generation\nor Second Generation). The region can not be changed after instance\ncreation.", + "description": "The geographical region. Can be *us-central* (*FIRST_GEN* instances only) *us-central1* (*SECOND_GEN* instances only) *asia-east1* or *europe-west1*. Defaults to *us-central* or *us-central1* depending on the instance type. The region cannot be changed after instance creation.", "type": "string" }, "replicaConfiguration": { @@ -2314,7 +2375,7 @@ "description": "SSL configuration." }, "serviceAccountEmailAddress": { - "description": "The service account email address assigned to the instance. This property\nis applicable only to Second Generation instances.", + "description": "The service account email address assigned to the instance. This property is applicable only to Second Generation instances.", "type": "string" }, "settings": { @@ -2322,7 +2383,7 @@ "description": "The user settings." }, "state": { - "description": "The current serving state of the Cloud SQL instance. This can be one of the\nfollowing. \u003cbr\u003e\u003ccode\u003eRUNNABLE\u003c/code\u003e: The instance is running, or is ready\nto run when accessed. \u003cbr\u003e\u003ccode\u003eSUSPENDED\u003c/code\u003e: The instance is not\navailable, for example due to problems with billing.\n\u003cbr\u003e\u003ccode\u003ePENDING_CREATE\u003c/code\u003e: The instance is being created.\n\u003cbr\u003e\u003ccode\u003eMAINTENANCE\u003c/code\u003e: The instance is down for maintenance.\n\u003cbr\u003e\u003ccode\u003eFAILED\u003c/code\u003e: The instance creation failed.\n\u003cbr\u003e\u003ccode\u003eUNKNOWN_STATE\u003c/code\u003e: The state of the instance is unknown.", + "description": "The current serving state of the Cloud SQL instance. This can be one of the following. *SQL_INSTANCE_STATE_UNSPECIFIED*: The state of the instance is unknown. *RUNNABLE*: The instance has been stopped by owner. It is not currently running, but it's ready to be restarted. *SUSPENDED*: The instance is not available, for example due to problems with billing. for example due to problems with billing. *PENDING_DELETE*: The instance is being deleted. *PENDING_CREATE*: The instance is being created. *MAINTENANCE*: The instance is down for maintenance. *FAILED*: The instance creation failed.", "enum": [ "SQL_INSTANCE_STATE_UNSPECIFIED", "RUNNABLE", @@ -2334,24 +2395,17 @@ ], "enumDescriptions": [ "The state of the instance is unknown.", - "The instance is running.", - "The instance is currently offline, but it may run again in the future.", + "The instance has been stopped by owner. It is not currently running, but it's ready to be restarted.", + "The instance is not available, for example due to problems with billing.", "The instance is being deleted.", "The instance is being created.", "The instance is down for maintenance.", - "The instance failed to be created." + "The creation of the instance failed or a fatal error occurred during maintenance." ], "type": "string" }, "suspensionReason": { "description": "If the instance state is SUSPENDED, the reason for the suspension.", - "enumDescriptions": [ - "This is an unknown suspension reason.", - "The instance is suspended due to billing issues (e.g., GCP account issue)", - "The instance is suspended due to illegal content (e.g., child pornography,\ncopyrighted material, etc.).", - "The instance is causing operational issues (e.g., causing the database\nto crash).", - "The KMS key used by the instance is either revoked or denied access to" - ], "items": { "enum": [ "SQL_SUSPENSION_REASON_UNSPECIFIED", @@ -2360,6 +2414,13 @@ "OPERATIONAL_ISSUE", "KMS_KEY_ISSUE" ], + "enumDescriptions": [ + "This is an unknown suspension reason.", + "The instance is suspended due to billing issues (for example:, GCP account issue)", + "The instance is suspended due to illegal content (for example:, child pornography, copyrighted material, etc.).", + "The instance is causing operational issues (for example:, causing the database to crash).", + "The KMS key used by the instance is either revoked or denied access to" + ], "type": "string" }, "type": "array" @@ -2379,45 +2440,45 @@ "type": "array" }, "kind": { - "description": "This is always \u003ccode\u003esql#databasesList\u003c/code\u003e.", + "description": "This is always *sql#databasesList*.", "type": "string" } }, "type": "object" }, "DemoteMasterConfiguration": { - "description": "Read-replica configuration for connecting to the on-premises master.", + "description": "Read-replica configuration for connecting to the on-premises primary instance.", "id": "DemoteMasterConfiguration", "properties": { "kind": { - "description": "This is always \u003ccode\u003esql#demoteMasterConfiguration\u003c/code\u003e.", + "description": "This is always *sql#demoteMasterConfiguration*.", "type": "string" }, "mysqlReplicaConfiguration": { "$ref": "DemoteMasterMySqlReplicaConfiguration", - "description": "MySQL specific configuration when replicating from a MySQL on-premises\nmaster. Replication configuration information such as the username,\npassword, certificates, and keys are not stored in the instance metadata.\nThe configuration information is used only to set up the replication\nconnection and is stored by MySQL in a file named \u003ccode\u003emaster.info\u003c/code\u003e\nin the data directory." + "description": "MySQL specific configuration when replicating from a MySQL on-premises primary instance. Replication configuration information such as the username, password, certificates, and keys are not stored in the instance metadata. The configuration information is used only to set up the replication connection and is stored by MySQL in a file named *master.info* in the data directory." } }, "type": "object" }, "DemoteMasterContext": { - "description": "Database instance demote master context.", + "description": "Database instance demote primary instance context.", "id": "DemoteMasterContext", "properties": { "kind": { - "description": "This is always \u003ccode\u003esql#demoteMasterContext\u003c/code\u003e.", + "description": "This is always *sql#demoteMasterContext*.", "type": "string" }, "masterInstanceName": { - "description": "The name of the instance which will act as on-premises master in the\nreplication setup.", + "description": "The name of the instance which will act as on-premises primary instance in the replication setup.", "type": "string" }, "replicaConfiguration": { "$ref": "DemoteMasterConfiguration", - "description": "Configuration specific to read-replicas replicating from the on-premises\nmaster." + "description": "Configuration specific to read-replicas replicating from the on-premises primary instance." }, "verifyGtidConsistency": { - "description": "Verify GTID consistency for demote operation. Default value:\n\u003ccode\u003eTrue\u003c/code\u003e. Second Generation instances only. Setting this flag to\nfalse enables you to bypass GTID consistency check between on-premises\nmaster and Cloud SQL instance during the demotion operation but also\nexposes you to the risk of future replication failures. Change the value\nonly if you know the reason for the GTID divergence and are confident that\ndoing so will not cause any replication issues.", + "description": "Verify GTID consistency for demote operation. Default value: *True*. Second Generation instances only. Setting this flag to false enables you to bypass GTID consistency check between on-premises primary instance and Cloud SQL instance during the demotion operation but also exposes you to the risk of future replication failures. Change the value only if you know the reason for the GTID divergence and are confident that doing so will not cause any replication issues.", "type": "boolean" } }, @@ -2432,15 +2493,15 @@ "type": "string" }, "clientCertificate": { - "description": "PEM representation of the slave's x509 certificate.", + "description": "PEM representation of the replica's x509 certificate.", "type": "string" }, "clientKey": { - "description": "PEM representation of the slave's private key. The corresponsing public key\nis encoded in the client's certificate. The format of the slave's private\nkey can be either PKCS #1 or PKCS #8.", + "description": "PEM representation of the replica's private key. The corresponsing public key is encoded in the client's certificate. The format of the replica's private key can be either PKCS #1 or PKCS #8.", "type": "string" }, "kind": { - "description": "This is always \u003ccode\u003esql#demoteMasterMysqlReplicaConfiguration\u003c/code\u003e.", + "description": "This is always *sql#demoteMasterMysqlReplicaConfiguration*.", "type": "string" }, "password": { @@ -2454,12 +2515,31 @@ }, "type": "object" }, + "DenyMaintenancePeriod": { + "description": "Deny Maintenance Periods. This specifies a date range during when all CSA rollout will be denied.", + "id": "DenyMaintenancePeriod", + "properties": { + "endDate": { + "description": "\"deny maintenance period\" end date. If the year of the end date is empty, the year of the start date also must be empty. In this case, it means the deny maintenance period recurs every year. The date is in format yyyy-mm-dd i.e., 2020-11-01, or mm-dd, i.e., 11-01", + "type": "string" + }, + "startDate": { + "description": "\"deny maintenance period\" start date. If the year of the start date is empty, the year of the end date also must be empty. In this case, it means the deny maintenance period recurs every year. The date is in format yyyy-mm-dd i.e., 2020-11-01, or mm-dd, i.e., 11-01", + "type": "string" + }, + "time": { + "description": "Time in UTC when the \"deny maintenance period\" starts on start_date and ends on end_date. The time is in format: HH:mm:SS, i.e., 00:00:00", + "type": "string" + } + }, + "type": "object" + }, "DiskEncryptionConfiguration": { "description": "Disk encryption configuration for an instance.", "id": "DiskEncryptionConfiguration", "properties": { "kind": { - "description": "This is always \u003ccode\u003esql#diskEncryptionConfiguration\u003c/code\u003e.", + "description": "This is always *sql#diskEncryptionConfiguration*.", "type": "string" }, "kmsKeyName": { @@ -2474,7 +2554,7 @@ "id": "DiskEncryptionStatus", "properties": { "kind": { - "description": "This is always \u003ccode\u003esql#diskEncryptionStatus\u003c/code\u003e.", + "description": "This is always *sql#diskEncryptionStatus*.", "type": "string" }, "kmsKeyVersionName": { @@ -2499,14 +2579,14 @@ "type": "object" }, "databases": { - "description": "Databases to be exported. \u003cbr /\u003e \u003cb\u003eMySQL instances:\u003c/b\u003e If\n\u003ccode\u003efileType\u003c/code\u003e is \u003ccode\u003eSQL\u003c/code\u003e and no database is specified, all\ndatabases are exported, except for the \u003ccode\u003emysql\u003c/code\u003e system database.\nIf \u003ccode\u003efileType\u003c/code\u003e is \u003ccode\u003eCSV\u003c/code\u003e, you can specify one database,\neither by using this property or by using the\n\u003ccode\u003ecsvExportOptions.selectQuery\u003c/code\u003e property, which takes precedence\nover this property. \u003cbr /\u003e \u003cb\u003ePostgreSQL instances:\u003c/b\u003e You must specify\none database to be exported. If \u003ccode\u003efileType\u003c/code\u003e is \u003ccode\u003eCSV\u003c/code\u003e,\nthis database must match the one specified in the\n\u003ccode\u003ecsvExportOptions.selectQuery\u003c/code\u003e property.", + "description": "Databases to be exported. *MySQL instances:* If *fileType* is *SQL* and no database is specified, all databases are exported, except for the *mysql* system database. If *fileType* is *CSV*, you can specify one database, either by using this property or by using the *csvExportOptions.selectQuery* property, which takes precedence over this property. *PostgreSQL instances:* You must specify one database to be exported. If *fileType* is *CSV*, this database must match the one specified in the *csvExportOptions.selectQuery* property.", "items": { "type": "string" }, "type": "array" }, "fileType": { - "description": "The file type for the specified uri. \u003cbr\u003e\u003ccode\u003eSQL\u003c/code\u003e: The file\ncontains SQL statements. \u003cbr\u003e\u003ccode\u003eCSV\u003c/code\u003e: The file contains CSV data.", + "description": "The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.", "enum": [ "SQL_FILE_TYPE_UNSPECIFIED", "SQL", @@ -2522,9 +2602,13 @@ "type": "string" }, "kind": { - "description": "This is always \u003ccode\u003esql#exportContext\u003c/code\u003e.", + "description": "This is always *sql#exportContext*.", "type": "string" }, + "offload": { + "description": "Option for export offload.", + "type": "boolean" + }, "sqlExportOptions": { "description": "Options for exporting data as SQL statements.", "properties": { @@ -2532,7 +2616,7 @@ "description": "Options for exporting from MySQL.", "properties": { "masterData": { - "description": "Option to include SQL statement required to set up replication.\nIf set to \u003ccode\u003e1\u003c/code\u003e, the dump file includes\n a CHANGE MASTER TO statement with the binary log coordinates.\nIf set to \u003ccode\u003e2\u003c/code\u003e, the CHANGE MASTER TO statement is written as\n a SQL comment, and has no effect.\nAll other values are ignored.", + "description": "Option to include SQL statement required to set up replication. If set to *1*, the dump file includes a CHANGE MASTER TO statement with the binary log coordinates. If set to *2*, the CHANGE MASTER TO statement is written as a SQL comment, and has no effect. All other values are ignored.", "format": "int32", "type": "integer" } @@ -2544,7 +2628,7 @@ "type": "boolean" }, "tables": { - "description": "Tables to export, or that were exported, from the specified database. If\nyou specify tables, specify one and only one database. For PostgreSQL\ninstances, you can specify only one table.", + "description": "Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database. For PostgreSQL instances, you can specify only one table.", "items": { "type": "string" }, @@ -2554,7 +2638,7 @@ "type": "object" }, "uri": { - "description": "The path to the file in Google Cloud Storage where the export will be\nstored. The URI is in the form \u003ccode\u003egs:\n//bucketName/fileName\u003c/code\u003e. If the file already exists, the requests\n// succeeds, but the operation fails. If \u003ccode\u003efileType\u003c/code\u003e is\n// \u003ccode\u003eSQL\u003c/code\u003e and the filename ends with .gz, the contents are\n// compressed.", + "description": "The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form *gs: //bucketName/fileName*. If the file already exists, the requests // succeeds, but the operation fails. If *fileType* is // *SQL* and the filename ends with .gz, the contents are // compressed.", "type": "string" } }, @@ -2565,11 +2649,11 @@ "id": "FailoverContext", "properties": { "kind": { - "description": "This is always \u003ccode\u003esql#failoverContext\u003c/code\u003e.", + "description": "This is always *sql#failoverContext*.", "type": "string" }, "settingsVersion": { - "description": "The current settings version of this instance. Request will be rejected if\nthis version doesn't match the current settings version.", + "description": "The current settings version of this instance. Request will be rejected if this version doesn't match the current settings version.", "format": "int64", "type": "string" } @@ -2581,7 +2665,7 @@ "id": "Flag", "properties": { "allowedIntValues": { - "description": "Use this field if only certain integers are accepted. Can be combined\nwith min_value and max_value to add additional values.", + "description": "Use this field if only certain integers are accepted. Can be combined with min_value and max_value to add additional values.", "items": { "format": "int64", "type": "string" @@ -2589,29 +2673,14 @@ "type": "array" }, "allowedStringValues": { - "description": "For \u003ccode\u003eSTRING\u003c/code\u003e flags, a list of strings that the value can be set\nto.", + "description": "For *STRING* flags, a list of strings that the value can be set to.", "items": { "type": "string" }, "type": "array" }, "appliesTo": { - "description": "The database version this flag applies to. Can be \u003ccode\u003eMYSQL_5_5\u003c/code\u003e,\n\u003ccode\u003eMYSQL_5_6\u003c/code\u003e, or \u003ccode\u003eMYSQL_5_7\u003c/code\u003e. \u003ccode\u003eMYSQL_5_7\u003c/code\u003e\nis applicable only to Second Generation instances.", - "enumDescriptions": [ - "This is an unknown database version.", - "The database version is MySQL 5.1.", - "The database version is MySQL 5.5.", - "The database version is MySQL 5.6.", - "The database version is MySQL 5.7.", - "The database version is PostgreSQL 9.6.", - "The database version is PostgreSQL 11.", - "The database version is SQL Server 2017 Standard.", - "The database version is SQL Server 2017 Enterprise.", - "The database version is SQL Server 2017 Express.", - "The database version is SQL Server 2017 Web.", - "The database version is PostgreSQL 10.", - "The database version is PostgreSQL 12." - ], + "description": "The database version this flag applies to. Can be *MYSQL_8_0*, *MYSQL_5_6*, or *MYSQL_5_7*.", "items": { "enum": [ "SQL_DATABASE_VERSION_UNSPECIFIED", @@ -2626,7 +2695,26 @@ "SQLSERVER_2017_EXPRESS", "SQLSERVER_2017_WEB", "POSTGRES_10", - "POSTGRES_12" + "POSTGRES_12", + "MYSQL_8_0", + "POSTGRES_13" + ], + "enumDescriptions": [ + "This is an unknown database version.", + "The database version is MySQL 5.1.", + "The database version is MySQL 5.5.", + "The database version is MySQL 5.6.", + "The database version is MySQL 5.7.", + "The database version is PostgreSQL 9.6.", + "The database version is PostgreSQL 11.", + "The database version is SQL Server 2017 Standard.", + "The database version is SQL Server 2017 Enterprise.", + "The database version is SQL Server 2017 Express.", + "The database version is SQL Server 2017 Web.", + "The database version is PostgreSQL 10.", + "The database version is PostgreSQL 12.", + "The database version is MySQL 8.", + "The database version is PostgreSQL 13." ], "type": "string" }, @@ -2637,29 +2725,29 @@ "type": "boolean" }, "kind": { - "description": "This is always \u003ccode\u003esql#flag\u003c/code\u003e.", + "description": "This is always *sql#flag*.", "type": "string" }, "maxValue": { - "description": "For \u003ccode\u003eINTEGER\u003c/code\u003e flags, the maximum allowed value.", + "description": "For *INTEGER* flags, the maximum allowed value.", "format": "int64", "type": "string" }, "minValue": { - "description": "For \u003ccode\u003eINTEGER\u003c/code\u003e flags, the minimum allowed value.", + "description": "For *INTEGER* flags, the minimum allowed value.", "format": "int64", "type": "string" }, "name": { - "description": "This is the name of the flag. Flag names always use underscores, not\nhyphens, e.g. \u003ccode\u003emax_allowed_packet\u003c/code\u003e", + "description": "This is the name of the flag. Flag names always use underscores, not hyphens, for example: *max_allowed_packet*", "type": "string" }, "requiresRestart": { - "description": "Indicates whether changing this flag will trigger a database restart. Only\napplicable to Second Generation instances.", + "description": "Indicates whether changing this flag will trigger a database restart. Only applicable to Second Generation instances.", "type": "boolean" }, "type": { - "description": "The type of the flag. Flags are typed to being \u003ccode\u003eBOOLEAN\u003c/code\u003e,\n\u003ccode\u003eSTRING\u003c/code\u003e, \u003ccode\u003eINTEGER\u003c/code\u003e or \u003ccode\u003eNONE\u003c/code\u003e.\n\u003ccode\u003eNONE\u003c/code\u003e is used for flags which do not take a value, such as\n\u003ccode\u003eskip_grant_tables\u003c/code\u003e.", + "description": "The type of the flag. Flags are typed to being *BOOLEAN*, *STRING*, *INTEGER* or *NONE*. *NONE* is used for flags which do not take a value, such as *skip_grant_tables*.", "enum": [ "SQL_FLAG_TYPE_UNSPECIFIED", "BOOLEAN", @@ -2676,7 +2764,7 @@ "String type flag.", "Integer type flag.", "Flag type used for a server startup option.", - "Type introduced specically for MySQL TimeZone offset. Accept a string value\nwith the format [-12:59, 13:00].", + "Type introduced specically for MySQL TimeZone offset. Accept a string value with the format [-12:59, 13:00].", "Float type flag.", "Comma-separated list of the strings in a SqlFlagType enum." ], @@ -2697,7 +2785,7 @@ "type": "array" }, "kind": { - "description": "This is always \u003ccode\u003esql#flagsList\u003c/code\u003e.", + "description": "This is always *sql#flagsList*.", "type": "string" } }, @@ -2713,7 +2801,7 @@ "encryptionOptions": { "properties": { "certPath": { - "description": "Path to the Certificate (.cer) in Cloud Storage, in the form\n\u003ccode\u003egs://bucketName/fileName\u003c/code\u003e. The instance must have\nwrite permissions to the bucket and read access to the file.", + "description": "Path to the Certificate (.cer) in Cloud Storage, in the form *gs://bucketName/fileName*. The instance must have write permissions to the bucket and read access to the file.", "type": "string" }, "pvkPassword": { @@ -2721,7 +2809,7 @@ "type": "string" }, "pvkPath": { - "description": "Path to the Certificate Private Key (.pvk) in Cloud Storage, in the\nform \u003ccode\u003egs://bucketName/fileName\u003c/code\u003e. The instance must have\nwrite permissions to the bucket and read access to the file.", + "description": "Path to the Certificate Private Key (.pvk) in Cloud Storage, in the form *gs://bucketName/fileName*. The instance must have write permissions to the bucket and read access to the file.", "type": "string" } }, @@ -2734,7 +2822,7 @@ "description": "Options for importing data as CSV.", "properties": { "columns": { - "description": "The columns to which CSV data is imported. If not specified, all columns\nof the database table are loaded with CSV data.", + "description": "The columns to which CSV data is imported. If not specified, all columns of the database table are loaded with CSV data.", "items": { "type": "string" }, @@ -2748,11 +2836,11 @@ "type": "object" }, "database": { - "description": "The target database for the import. If \u003ccode\u003efileType\u003c/code\u003e is\n\u003ccode\u003eSQL\u003c/code\u003e, this field is required only if the import file does not\nspecify a database, and is overridden by any database specification in the\nimport file. If \u003ccode\u003efileType\u003c/code\u003e is \u003ccode\u003eCSV\u003c/code\u003e, one database\nmust be specified.", + "description": "The target database for the import. If *fileType* is *SQL*, this field is required only if the import file does not specify a database, and is overridden by any database specification in the import file. If *fileType* is *CSV*, one database must be specified.", "type": "string" }, "fileType": { - "description": "The file type for the specified uri. \u003cbr\u003e\u003ccode\u003eSQL\u003c/code\u003e: The file\ncontains SQL statements. \u003cbr\u003e\u003ccode\u003eCSV\u003c/code\u003e: The file contains CSV data.", + "description": "The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.", "enum": [ "SQL_FILE_TYPE_UNSPECIFIED", "SQL", @@ -2772,11 +2860,11 @@ "type": "string" }, "kind": { - "description": "This is always \u003ccode\u003esql#importContext\u003c/code\u003e.", + "description": "This is always *sql#importContext*.", "type": "string" }, "uri": { - "description": "Path to the import file in Cloud Storage, in the form\n\u003ccode\u003egs:\n//bucketName/fileName\u003c/code\u003e. Compressed gzip files (.gz) are supported\n// when \u003ccode\u003efileType\u003c/code\u003e is \u003ccode\u003eSQL\u003c/code\u003e. The instance must have\n// write permissions to the bucket and read access to the file.", + "description": "Path to the import file in Cloud Storage, in the form *gs: //bucketName/fileName*. Compressed gzip files (.gz) are supported // when *fileType* is *SQL*. The instance must have // write permissions to the bucket and read access to the file.", "type": "string" } }, @@ -2794,7 +2882,7 @@ "type": "object" }, "InstancesDemoteMasterRequest": { - "description": "Database demote master request.", + "description": "Database demote primary instance request.", "id": "InstancesDemoteMasterRequest", "properties": { "demoteMasterContext": { @@ -2849,11 +2937,11 @@ "type": "array" }, "kind": { - "description": "This is always \u003ccode\u003esql#instancesList\u003c/code\u003e.", + "description": "This is always *sql#instancesList*.", "type": "string" }, "nextPageToken": { - "description": "The continuation token, used to page through large result sets. Provide\nthis value in a subsequent request to return the next page of results.", + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", "type": "string" }, "warnings": { @@ -2881,7 +2969,7 @@ "type": "array" }, "kind": { - "description": "This is always \u003ccode\u003esql#instancesListServerCas\u003c/code\u003e.", + "description": "This is always *sql#instancesListServerCas*.", "type": "string" } }, @@ -2925,22 +3013,22 @@ "id": "IpConfiguration", "properties": { "authorizedNetworks": { - "description": "The list of external networks that are allowed to connect to the instance\nusing the IP. In \u003ca\nhref=\"http://en.wikipedia.org/wiki/CIDR_notation#CIDR_notation\"\u003eCIDR\nnotation\u003c/a\u003e, also known as 'slash' notation (e.g.\n\u003ccode\u003e192.168.100.0/24\u003c/code\u003e).", + "description": "The list of external networks that are allowed to connect to the instance using the IP. In 'CIDR' notation, also known as 'slash' notation (for example: *192.168.100.0/24*).", "items": { "$ref": "AclEntry" }, "type": "array" }, "ipv4Enabled": { - "description": "Whether the instance should be assigned an IP address or not.", + "description": "Whether the instance is assigned a public IP address or not.", "type": "boolean" }, "privateNetwork": { - "description": "The resource link for the VPC network from which the Cloud SQL instance is\naccessible for private IP. For example,\n\u003ccode\u003e/projects/myProject/global/networks/default\u003c/code\u003e. This setting can\nbe updated, but it cannot be removed after it is set.", + "description": "The resource link for the VPC network from which the Cloud SQL instance is accessible for private IP. For example, */projects/myProject/global/networks/default*. This setting can be updated, but it cannot be removed after it is set.", "type": "string" }, "requireSsl": { - "description": "Whether SSL connections over IP should be enforced or not.", + "description": "Whether SSL connections over IP are enforced or not.", "type": "boolean" } }, @@ -2955,12 +3043,12 @@ "type": "string" }, "timeToRetire": { - "description": "The due time for this IP to be retired in \u003ca\nhref=\"https://tools.ietf.org/html/rfc3339\"\u003eRFC 3339\u003c/a\u003e format, for example\n\u003ccode\u003e2012-11-15T16:19:00.094Z\u003c/code\u003e. This field is only available when\nthe IP is scheduled to be retired.", + "description": "The due time for this IP to be retired in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*. This field is only available when the IP is scheduled to be retired.", "format": "google-datetime", "type": "string" }, "type": { - "description": "The type of this IP address. A \u003ccode\u003ePRIMARY\u003c/code\u003e address is a public\naddress that can accept incoming connections. A \u003ccode\u003ePRIVATE\u003c/code\u003e\naddress is a private address that can accept incoming connections. An\n\u003ccode\u003eOUTGOING\u003c/code\u003e address is the source address of connections\noriginating from the instance, if supported.", + "description": "The type of this IP address. A *PRIMARY* address is a public address that can accept incoming connections. A *PRIVATE* address is a private address that can accept incoming connections. An *OUTGOING* address is the source address of connections originating from the instance, if supported.", "enum": [ "SQL_IP_ADDRESS_TYPE_UNSPECIFIED", "PRIMARY", @@ -2970,10 +3058,10 @@ ], "enumDescriptions": [ "This is an unknown IP address type.", - "IP address the customer is supposed to connect to. Usually this is the\nload balancer's IP address", - "Source IP address of the connection a read replica establishes to its\nexternal master. This IP address can be whitelisted by the customer\nin case it has a firewall that filters incoming connection to its\non premises master.", + "IP address the customer is supposed to connect to. Usually this is the load balancer's IP address", + "Source IP address of the connection a read replica establishes to its external primary instance. This IP address can be allowlisted by the customer in case it has a firewall that filters incoming connection to its on premises primary instance.", "Private IP used when using private IPs and network peering.", - "V1 IP of a migrated instance. We want the user to\ndecommission this IP as soon as the migration is complete.\nNote: V1 instances with V1 ip addresses will be counted as PRIMARY." + "V1 IP of a migrated instance. We want the user to decommission this IP as soon as the migration is complete. Note: V1 instances with V1 ip addresses will be counted as PRIMARY." ], "type": "string" } @@ -2981,26 +3069,26 @@ "type": "object" }, "LocationPreference": { - "description": "Preferred location. This specifies where a Cloud SQL instance should\npreferably be located, either in a specific Compute Engine zone, or\nco-located with an App Engine application. Note that if the preferred\nlocation is not available, the instance will be located as close as possible\nwithin the region. Only one location may be specified.", + "description": "Preferred location. This specifies where a Cloud SQL instance is located, either in a specific Compute Engine zone, or co-located with an App Engine application. Note that if the preferred location is not available, the instance will be located as close as possible within the region. Only one location may be specified.", "id": "LocationPreference", "properties": { "followGaeApplication": { - "description": "The AppEngine application to follow, it must be in the same region as the\nCloud SQL instance.", + "description": "The App Engine application to follow, it must be in the same region as the Cloud SQL instance.", "type": "string" }, "kind": { - "description": "This is always \u003ccode\u003esql#locationPreference\u003c/code\u003e.", + "description": "This is always *sql#locationPreference*.", "type": "string" }, "zone": { - "description": "The preferred Compute Engine zone (e.g. us-central1-a, us-central1-b,\netc.).", + "description": "The preferred Compute Engine zone (for example: us-central1-a, us-central1-b, etc.).", "type": "string" } }, "type": "object" }, "MaintenanceWindow": { - "description": "Maintenance window. This specifies when a v2 Cloud SQL instance should\npreferably be restarted for system maintenance purposes.", + "description": "Maintenance window. This specifies when a Cloud SQL instance is restarted for system maintenance purposes.", "id": "MaintenanceWindow", "properties": { "day": { @@ -3014,11 +3102,11 @@ "type": "integer" }, "kind": { - "description": "This is always \u003ccode\u003esql#maintenanceWindow\u003c/code\u003e.", + "description": "This is always *sql#maintenanceWindow*.", "type": "string" }, "updateTrack": { - "description": "Maintenance timing setting: \u003ccode\u003ecanary\u003c/code\u003e (Earlier) or\n\u003ccode\u003estable\u003c/code\u003e (Later). \u003cbr /\u003e\u003ca\nhref=\"/sql/docs/db_path/instance-settings#maintenance-timing-2ndgen\"\u003e\nLearn more\u003c/a\u003e.", + "description": "Maintenance timing setting: *canary* (Earlier) or *stable* (Later). Learn more.", "enum": [ "SQL_UPDATE_TRACK_UNSPECIFIED", "canary", @@ -3026,8 +3114,8 @@ ], "enumDescriptions": [ "This is an unknown maintenance timing preference.", - "For instance update that requires a restart, this update track indicates\nyour instance prefer to restart for new version early in maintenance\nwindow.", - "For instance update that requires a restart, this update track indicates\nyour instance prefer to let Cloud SQL choose the timing of restart (within\nits Maintenance window, if applicable)." + "For instance update that requires a restart, this update track indicates your instance prefer to restart for new version early in maintenance window.", + "For instance update that requires a restart, this update track indicates your instance prefer to let Cloud SQL choose the timing of restart (within its Maintenance window, if applicable)." ], "type": "string" } @@ -3043,11 +3131,11 @@ "type": "string" }, "clientCertificate": { - "description": "PEM representation of the slave's x509 certificate.", + "description": "PEM representation of the replica's x509 certificate.", "type": "string" }, "clientKey": { - "description": "PEM representation of the slave's private key. The corresponsing public key\nis encoded in the client's certificate.", + "description": "PEM representation of the replica's private key. The corresponsing public key is encoded in the client's certificate.", "type": "string" }, "connectRetryInterval": { @@ -3056,11 +3144,11 @@ "type": "integer" }, "dumpFilePath": { - "description": "Path to a SQL dump file in Google Cloud Storage from which the slave\ninstance is to be created. The URI is in the form gs:\n//bucketName/fileName. Compressed gzip files (.gz) are also supported.\n// Dumps should have the binlog co-ordinates from which replication should\n// begin. This can be accomplished by setting --master-data to 1 when using\n// mysqldump.", + "description": "Path to a SQL dump file in Google Cloud Storage from which the replica instance is to be created. The URI is in the form gs://bucketName/fileName. Compressed gzip files (.gz) are also supported. Dumps have the binlog co-ordinates from which replication begins. This can be accomplished by setting --master-data to 1 when using mysqldump.", "type": "string" }, "kind": { - "description": "This is always \u003ccode\u003esql#mysqlReplicaConfiguration\u003c/code\u003e.", + "description": "This is always *sql#mysqlReplicaConfiguration*.", "type": "string" }, "masterHeartbeatPeriod": { @@ -3081,7 +3169,7 @@ "type": "string" }, "verifyServerCertificate": { - "description": "Whether or not to check the master's Common Name value in the certificate\nthat it sends during the SSL handshake.", + "description": "Whether or not to check the primary instance's Common Name value in the certificate that it sends during the SSL handshake.", "type": "boolean" } }, @@ -3096,11 +3184,11 @@ "type": "string" }, "clientCertificate": { - "description": "PEM representation of the slave's x509 certificate.", + "description": "PEM representation of the replica's x509 certificate.", "type": "string" }, "clientKey": { - "description": "PEM representation of the slave's private key. The corresponsing public key\nis encoded in the client's certificate.", + "description": "PEM representation of the replica's private key. The corresponsing public key is encoded in the client's certificate.", "type": "string" }, "dumpFilePath": { @@ -3112,7 +3200,7 @@ "type": "string" }, "kind": { - "description": "This is always \u003ccode\u003esql#onPremisesConfiguration\u003c/code\u003e.", + "description": "This is always *sql#onPremisesConfiguration*.", "type": "string" }, "password": { @@ -3127,17 +3215,17 @@ "type": "object" }, "Operation": { - "description": "An Operation resource.\u0026nbsp;For successful operations that return an\nOperation resource, only the fields relevant to the operation are populated\nin the resource.", + "description": "An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource.", "id": "Operation", "properties": { "endTime": { - "description": "The time this operation finished in UTC timezone in \u003ca\nhref=\"https://tools.ietf.org/html/rfc3339\"\u003eRFC 3339\u003c/a\u003e format, for example\n\u003ccode\u003e2012-11-15T16:19:00.094Z\u003c/code\u003e.", + "description": "The time this operation finished in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.", "format": "google-datetime", "type": "string" }, "error": { "$ref": "OperationErrors", - "description": "If errors occurred during processing of this operation, this field will be\npopulated." + "description": "If errors occurred during processing of this operation, this field will be populated." }, "exportContext": { "$ref": "ExportContext", @@ -3148,20 +3236,20 @@ "description": "The context for import operation, if applicable." }, "insertTime": { - "description": "The time this operation was enqueued in UTC timezone in \u003ca\nhref=\"https://tools.ietf.org/html/rfc3339\"\u003eRFC 3339\u003c/a\u003e format, for example\n\u003ccode\u003e2012-11-15T16:19:00.094Z\u003c/code\u003e.", + "description": "The time this operation was enqueued in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.", "format": "google-datetime", "type": "string" }, "kind": { - "description": "This is always \u003ccode\u003esql#operation\u003c/code\u003e.", + "description": "This is always *sql#operation*.", "type": "string" }, "name": { - "description": "An identifier that uniquely identifies the operation. You can use this\nidentifier to retrieve the Operations resource that has information about\nthe operation.", + "description": "An identifier that uniquely identifies the operation. You can use this identifier to retrieve the Operations resource that has information about the operation.", "type": "string" }, "operationType": { - "description": "The type of the operation. Valid values are \u003ccode\u003eCREATE\u003c/code\u003e,\n\u003ccode\u003eDELETE\u003c/code\u003e, \u003ccode\u003eUPDATE\u003c/code\u003e, \u003ccode\u003eRESTART\u003c/code\u003e,\n\u003ccode\u003eIMPORT\u003c/code\u003e, \u003ccode\u003eEXPORT\u003c/code\u003e, \u003ccode\u003eBACKUP_VOLUME\u003c/code\u003e,\n\u003ccode\u003eRESTORE_VOLUME\u003c/code\u003e, \u003ccode\u003eCREATE_USER\u003c/code\u003e,\n\u003ccode\u003eDELETE_USER\u003c/code\u003e, \u003ccode\u003eCREATE_DATABASE\u003c/code\u003e,\n\u003ccode\u003eDELETE_DATABASE\u003c/code\u003e .", + "description": "The type of the operation. Valid values are: *CREATE* *DELETE* *UPDATE* *RESTART* *IMPORT* *EXPORT* *BACKUP_VOLUME* *RESTORE_VOLUME* *CREATE_USER* *DELETE_USER* *CREATE_DATABASE* *DELETE_DATABASE*", "enum": [ "SQL_OPERATION_TYPE_UNSPECIFIED", "IMPORT", @@ -3202,7 +3290,7 @@ "enumDescriptions": [ "Unknown operation type.", "Imports data into a Cloud SQL instance.", - "Exports data from a Cloud SQL instance to a Cloud Storage\nbucket.", + "Exports data from a Cloud SQL instance to a Cloud Storage bucket.", "Creates a new Cloud SQL instance.", "Updates the settings of a Cloud SQL instance.", "Deletes a Cloud SQL instance.", @@ -3224,17 +3312,17 @@ "Creates a database in the Cloud SQL instance.", "Deletes a database in the Cloud SQL instance.", "Updates a database in the Cloud SQL instance.", - "Performs failover of an HA-enabled Cloud SQL\nfailover replica.", + "Performs failover of an HA-enabled Cloud SQL failover replica.", "Deletes the backup taken by a backup run.", "", "Truncates a general or slow log table in MySQL.", - "Demotes the stand-alone instance to be a Cloud SQL\nread replica for an external database server.", - "Indicates that the instance is currently in maintenance. Maintenance\ntypically causes the instance to be unavailable for 1-3 minutes.", + "Demotes the stand-alone instance to be a Cloud SQL read replica for an external database server.", + "Indicates that the instance is currently in maintenance. Maintenance typically causes the instance to be unavailable for 1-3 minutes.", "This field is deprecated, and will be removed in future version of API.", "", "Creates clone instance.", "Reschedule maintenance to another time.", - "Starts external sync of a Cloud SQL EM replica to an external master." + "Starts external sync of a Cloud SQL EM replica to an external primary instance." ], "type": "string" }, @@ -3243,12 +3331,12 @@ "type": "string" }, "startTime": { - "description": "The time this operation actually started in UTC timezone in \u003ca\nhref=\"https://tools.ietf.org/html/rfc3339\"\u003eRFC 3339\u003c/a\u003e format, for example\n\u003ccode\u003e2012-11-15T16:19:00.094Z\u003c/code\u003e.", + "description": "The time this operation actually started in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.", "format": "google-datetime", "type": "string" }, "status": { - "description": "The status of an operation. Valid values are \u003ccode\u003ePENDING\u003c/code\u003e,\n\u003ccode\u003eRUNNING\u003c/code\u003e, \u003ccode\u003eDONE\u003c/code\u003e,\n\u003ccode\u003eSQL_OPERATION_STATUS_UNSPECIFIED\u003c/code\u003e.", + "description": "The status of an operation. Valid values are: *PENDING* *RUNNING* *DONE* *SQL_OPERATION_STATUS_UNSPECIFIED*", "enum": [ "SQL_OPERATION_STATUS_UNSPECIFIED", "PENDING", @@ -3290,7 +3378,7 @@ "type": "string" }, "kind": { - "description": "This is always \u003ccode\u003esql#operationError\u003c/code\u003e.", + "description": "This is always *sql#operationError*.", "type": "string" }, "message": { @@ -3312,7 +3400,7 @@ "type": "array" }, "kind": { - "description": "This is always \u003ccode\u003esql#operationErrors\u003c/code\u003e.", + "description": "This is always *sql#operationErrors*.", "type": "string" } }, @@ -3330,31 +3418,31 @@ "type": "array" }, "kind": { - "description": "This is always \u003ccode\u003esql#operationsList\u003c/code\u003e.", + "description": "This is always *sql#operationsList*.", "type": "string" }, "nextPageToken": { - "description": "The continuation token, used to page through large result sets. Provide\nthis value in a subsequent request to return the next page of results.", + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", "type": "string" } }, "type": "object" }, "ReplicaConfiguration": { - "description": "Read-replica configuration for connecting to the master.", + "description": "Read-replica configuration for connecting to the primary instance.", "id": "ReplicaConfiguration", "properties": { "failoverTarget": { - "description": "Specifies if the replica is the failover target. If the field is set to\n\u003ccode\u003etrue\u003c/code\u003e the replica will be designated as a failover replica. In\ncase the master instance fails, the replica instance will be promoted as\nthe new master instance. \u003cp\u003eOnly one replica can be specified as failover\ntarget, and the replica has to be in different zone with the master\ninstance.", + "description": "Specifies if the replica is the failover target. If the field is set to *true* the replica will be designated as a failover replica. In case the primary instance fails, the replica instance will be promoted as the new primary instance. Only one replica can be specified as failover target, and the replica has to be in different zone with the primary instance.", "type": "boolean" }, "kind": { - "description": "This is always \u003ccode\u003esql#replicaConfiguration\u003c/code\u003e.", + "description": "This is always *sql#replicaConfiguration*.", "type": "string" }, "mysqlReplicaConfiguration": { "$ref": "MySqlReplicaConfiguration", - "description": "MySQL specific configuration when replicating from a MySQL on-premises\nmaster. Replication configuration information such as the username,\npassword, certificates, and keys are not stored in the instance metadata.\nThe configuration information is used only to set up the replication\nconnection and is stored by MySQL in a file named \u003ccode\u003emaster.info\u003c/code\u003e\nin the data directory." + "description": "MySQL specific configuration when replicating from a MySQL on-premises primary instance. Replication configuration information such as the username, password, certificates, and keys are not stored in the instance metadata. The configuration information is used only to set up the replication connection and is stored by MySQL in a file named *master.info* in the data directory." } }, "type": "object" @@ -3373,13 +3461,13 @@ "enumDescriptions": [ "", "If the user wants to schedule the maintenance to happen now.", - "If the user wants to use the existing maintenance policy to find the\nnext available window.", + "If the user wants to use the existing maintenance policy to find the next available window.", "If the user wants to reschedule the maintenance to a specific time." ], "type": "string" }, "scheduleTime": { - "description": "Optional. Timestamp when the maintenance shall be rescheduled to if\nreschedule_type=SPECIFIC_TIME, in \u003ca\nhref=\"https://tools.ietf.org/html/rfc3339\"\u003eRFC 3339\u003c/a\u003e format, for\nexample \u003ccode\u003e2012-11-15T16:19:00.094Z\u003c/code\u003e.", + "description": "Optional. Timestamp when the maintenance shall be rescheduled to if reschedule_type=SPECIFIC_TIME, in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.", "format": "google-datetime", "type": "string" } @@ -3387,7 +3475,7 @@ "type": "object" }, "RestoreBackupContext": { - "description": "Database instance restore from backup context.\nBackup context contains source instance id and project id.", + "description": "Database instance restore from backup context. Backup context contains source instance id and project id.", "id": "RestoreBackupContext", "properties": { "backupRunId": { @@ -3400,7 +3488,7 @@ "type": "string" }, "kind": { - "description": "This is always \u003ccode\u003esql#restoreBackupContext\u003c/code\u003e.", + "description": "This is always *sql#restoreBackupContext*.", "type": "string" }, "project": { @@ -3415,11 +3503,11 @@ "id": "RotateServerCaContext", "properties": { "kind": { - "description": "This is always \u003ccode\u003esql#rotateServerCaContext\u003c/code\u003e.", + "description": "This is always *sql#rotateServerCaContext*.", "type": "string" }, "nextVersion": { - "description": "The fingerprint of the next version to be rotated to. If left unspecified,\nwill be rotated to the most recently added server CA version.", + "description": "The fingerprint of the next version to be rotated to. If left unspecified, will be rotated to the most recently added server CA version.", "type": "string" } }, @@ -3430,7 +3518,7 @@ "id": "Settings", "properties": { "activationPolicy": { - "description": "The activation policy specifies when the instance is activated; it is\napplicable only when the instance state is \u003ccode\u003eRUNNABLE\u003c/code\u003e. Valid\nvalues: \u003cbr\u003e\u003ccode\u003eALWAYS\u003c/code\u003e: The instance is on, and remains so even in\nthe absence of connection requests. \u003cbr\u003e\u003ccode\u003eNEVER\u003c/code\u003e: The instance is\noff; it is not activated, even if a connection request arrives.\n\u003cbr\u003e\u003ccode\u003eON_DEMAND\u003c/code\u003e: First Generation instances only. The instance\nresponds to incoming requests, and turns itself off when not in use.\nInstances with \u003ccode\u003ePER_USE\u003c/code\u003e pricing turn off after 15 minutes of\ninactivity. Instances with \u003ccode\u003ePER_PACKAGE\u003c/code\u003e pricing turn off after\n12 hours of inactivity.", + "description": "The activation policy specifies when the instance is activated; it is applicable only when the instance state is RUNNABLE. Valid values: *ALWAYS*: The instance is on, and remains so even in the absence of connection requests. *NEVER*: The instance is off; it is not activated, even if a connection request arrives.", "enum": [ "SQL_ACTIVATION_POLICY_UNSPECIFIED", "ALWAYS", @@ -3440,20 +3528,24 @@ "enumDescriptions": [ "Unknown activation plan.", "The instance is always up and running.", - "The instance should never spin up.", - "The instance spins up upon receiving requests." + "The instance never starts.", + "The instance starts upon receiving requests." ], "type": "string" }, + "activeDirectoryConfig": { + "$ref": "SqlActiveDirectoryConfig", + "description": "Active Directory configuration, for now relevant only for SQL Server" + }, "authorizedGaeApplications": { - "description": "The App Engine app IDs that can access this instance. First Generation\ninstances only.", + "description": "The App Engine app IDs that can access this instance. (Deprecated) Applied to First Generation instances only.", "items": { "type": "string" }, "type": "array" }, "availabilityType": { - "description": "Availability type (PostgreSQL and MySQL instances only). Potential values:\n\u003cbr\u003e\u003ccode\u003eZONAL\u003c/code\u003e: The instance serves data from only one zone.\nOutages in that zone affect data accessibility. \u003cbr\u003e\u003ccode\u003eREGIONAL\u003c/code\u003e:\nThe instance can serve data from more than one zone in a region (it is\nhighly available). \u003cbr\u003eFor more information, see \u003ca\nhref=\"https://cloud.google.com/sql/docs/postgres/high-availability\"\u003eOverview\nof the High Availability Configuration\u003c/a\u003e.", + "description": "Availability type. Potential values: *ZONAL*: The instance serves data from only one zone. Outages in that zone affect data accessibility. *REGIONAL*: The instance can serve data from more than one zone in a region (it is highly available). For more information, see Overview of the High Availability Configuration.", "enum": [ "SQL_AVAILABILITY_TYPE_UNSPECIFIED", "ZONAL", @@ -3470,17 +3562,21 @@ "$ref": "BackupConfiguration", "description": "The daily backup configuration for the instance." }, + "collation": { + "description": "The name of server Instance collation.", + "type": "string" + }, "crashSafeReplicationEnabled": { - "description": "Configuration specific to read replica instances. Indicates whether\ndatabase flags for crash-safe replication are enabled. This property is\nonly applicable to First Generation instances.", + "description": "Configuration specific to read replica instances. Indicates whether database flags for crash-safe replication are enabled. This property was only applicable to First Generation instances.", "type": "boolean" }, "dataDiskSizeGb": { - "description": "The size of data disk, in GB. The data disk size minimum is 10GB. Not used\nfor First Generation instances.", + "description": "The size of data disk, in GB. The data disk size minimum is 10GB.", "format": "int64", "type": "string" }, "dataDiskType": { - "description": "The type of data disk: \u003ccode\u003ePD_SSD\u003c/code\u003e (default) or\n\u003ccode\u003ePD_HDD\u003c/code\u003e. Not used for First Generation instances.", + "description": "The type of data disk: PD_SSD (default) or PD_HDD. Not used for First Generation instances.", "enum": [ "SQL_DATA_DISK_TYPE_UNSPECIFIED", "PD_SSD", @@ -3491,7 +3587,7 @@ "This is an unknown data disk type.", "An SSD data disk.", "An HDD data disk.", - "This field is deprecated and will be removed from a future version of the\nAPI." + "This field is deprecated and will be removed from a future version of the API." ], "type": "string" }, @@ -3503,27 +3599,34 @@ "type": "array" }, "databaseReplicationEnabled": { - "description": "Configuration specific to read replica instances. Indicates whether\nreplication is enabled or not.", + "description": "Configuration specific to read replica instances. Indicates whether replication is enabled or not.", "type": "boolean" }, + "denyMaintenancePeriods": { + "description": "Deny maintenance periods", + "items": { + "$ref": "DenyMaintenancePeriod" + }, + "type": "array" + }, "ipConfiguration": { "$ref": "IpConfiguration", - "description": "The settings for IP Management. This allows to enable or disable the\ninstance IP and manage which external networks can connect to the instance.\nThe IPv4 address cannot be disabled for Second Generation instances." + "description": "The settings for IP Management. This allows to enable or disable the instance IP and manage which external networks can connect to the instance. The IPv4 address cannot be disabled for Second Generation instances." }, "kind": { - "description": "This is always \u003ccode\u003esql#settings\u003c/code\u003e.", + "description": "This is always *sql#settings*.", "type": "string" }, "locationPreference": { "$ref": "LocationPreference", - "description": "The location preference settings. This allows the instance to be located as\nnear as possible to either an App Engine app or Compute Engine zone for\nbetter performance. App Engine co-location is only applicable to First\nGeneration instances." + "description": "The location preference settings. This allows the instance to be located as near as possible to either an App Engine app or Compute Engine zone for better performance. App Engine co-location was only applicable to First Generation instances." }, "maintenanceWindow": { "$ref": "MaintenanceWindow", - "description": "The maintenance window for this instance. This specifies when the instance\ncan be restarted for maintenance purposes. Not used for First Generation\ninstances." + "description": "The maintenance window for this instance. This specifies when the instance can be restarted for maintenance purposes." }, "pricingPlan": { - "description": "The pricing plan for this instance. This can be either \u003ccode\u003ePER_USE\u003c/code\u003e\nor \u003ccode\u003ePACKAGE\u003c/code\u003e. Only \u003ccode\u003ePER_USE\u003c/code\u003e is supported for Second\nGeneration instances.", + "description": "The pricing plan for this instance. This can be either *PER_USE* or *PACKAGE*. Only *PER_USE* is supported for Second Generation instances.", "enum": [ "SQL_PRICING_PLAN_UNSPECIFIED", "PACKAGE", @@ -3537,7 +3640,7 @@ "type": "string" }, "replicationType": { - "description": "The type of replication this instance uses. This can be either\n\u003ccode\u003eASYNCHRONOUS\u003c/code\u003e or \u003ccode\u003eSYNCHRONOUS\u003c/code\u003e. This property is\nonly applicable to First Generation instances.", + "description": "The type of replication this instance uses. This can be either *ASYNCHRONOUS* or *SYNCHRONOUS*. (Deprecated_ This property was only applicable to First Generation instances.", "enum": [ "SQL_REPLICATION_TYPE_UNSPECIFIED", "SYNCHRONOUS", @@ -3545,41 +3648,56 @@ ], "enumDescriptions": [ "This is an unknown replication type for a Cloud SQL instance.", - "The synchronous replication mode for First Generation instances. It is the\ndefault value.", - "The asynchronous replication mode for First Generation instances. It\nprovides a slight performance gain, but if an outage occurs while this\noption is set to asynchronous, you can lose up to a few seconds of updates\nto your data." + "The synchronous replication mode for First Generation instances. It is the default value.", + "The asynchronous replication mode for First Generation instances. It provides a slight performance gain, but if an outage occurs while this option is set to asynchronous, you can lose up to a few seconds of updates to your data." ], "type": "string" }, "settingsVersion": { - "description": "The version of instance settings. This is a required field for update\nmethod to make sure concurrent updates are handled properly. During update,\nuse the most recent settingsVersion value for this instance and do not try\nto update this value.", + "description": "The version of instance settings. This is a required field for update method to make sure concurrent updates are handled properly. During update, use the most recent settingsVersion value for this instance and do not try to update this value.", "format": "int64", "type": "string" }, "storageAutoResize": { - "description": "Configuration to increase storage size automatically. The default value is\ntrue. Not used for First Generation instances.", + "description": "Configuration to increase storage size automatically. The default value is true.", "type": "boolean" }, "storageAutoResizeLimit": { - "description": "The maximum size to which storage capacity can be automatically increased.\nThe default value is 0, which specifies that there is no limit. Not used\nfor First Generation instances.", + "description": "The maximum size to which storage capacity can be automatically increased. The default value is 0, which specifies that there is no limit.", "format": "int64", "type": "string" }, "tier": { - "description": "The tier (or machine type) for this instance, for example\n\u003ccode\u003edb-n1-standard-1\u003c/code\u003e (MySQL instances) or\n\u003ccode\u003edb-custom-1-3840\u003c/code\u003e (PostgreSQL instances). For MySQL instances,\nthis property determines whether the instance is First or Second\nGeneration. For more information, see \u003ca\nhref=\"/sql/docs/db_path/instance-settings\"\u003eInstance Settings\u003c/a\u003e.", + "description": "The tier (or machine type) for this instance, for example *db-n1-standard-1* (MySQL instances) or *db-custom-1-3840* (PostgreSQL instances).", "type": "string" }, "userLabels": { "additionalProperties": { "type": "string" }, - "description": "User-provided labels, represented as a dictionary where each label is a\nsingle key value pair.", + "description": "User-provided labels, represented as a dictionary where each label is a single key value pair.", "type": "object" } }, "type": "object" }, + "SqlActiveDirectoryConfig": { + "description": "Active Directory configuration, for now relevant only for SQL Server", + "id": "SqlActiveDirectoryConfig", + "properties": { + "domain": { + "description": "Domain name", + "type": "string" + }, + "kind": { + "description": "This will be always sql#activeDirectoryConfig.", + "type": "string" + } + }, + "type": "object" + }, "SqlExternalSyncSettingError": { - "description": "External master migration setting error.", + "description": "External primary instance migration setting error.", "id": "SqlExternalSyncSettingError", "properties": { "detail": { @@ -3587,7 +3705,7 @@ "type": "string" }, "kind": { - "description": "This is always \u003ccode\u003esql#migrationSettingError\u003c/code\u003e.", + "description": "This is always *sql#migrationSettingError*.", "type": "string" }, "type": { @@ -3601,7 +3719,18 @@ "INSUFFICIENT_PRIVILEGE", "UNSUPPORTED_MIGRATION_TYPE", "NO_PGLOGICAL_INSTALLED", - "PGLOGICAL_NODE_ALREADY_EXISTS" + "PGLOGICAL_NODE_ALREADY_EXISTS", + "INVALID_WAL_LEVEL", + "INVALID_SHARED_PRELOAD_LIBRARY", + "INSUFFICIENT_MAX_REPLICATION_SLOTS", + "INSUFFICIENT_MAX_WAL_SENDERS", + "INSUFFICIENT_MAX_WORKER_PROCESSES", + "UNSUPPORTED_EXTENSIONS", + "INVALID_RDS_LOGICAL_REPLICATION", + "INVALID_LOGGING_SETUP", + "INVALID_DB_PARAM", + "UNSUPPORTED_GTID_MODE", + "SQLSERVER_AGENT_NOT_RUNNING" ], "enumDescriptions": [ "", @@ -3612,7 +3741,18 @@ "", "Unsupported migration type.", "No pglogical extension installed on databases, applicable for postgres.", - "pglogical node already exists on databases, applicable for postgres." + "pglogical node already exists on databases, applicable for postgres.", + "The value of parameter wal_level is not set to logical.", + "The value of parameter shared_preload_libraries does not include pglogical.", + "The value of parameter max_replication_slots is not sufficient.", + "The value of parameter max_wal_senders is not sufficient.", + "The value of parameter max_worker_processes is not sufficient.", + "Extensions installed are either not supported or having unsupported versions", + "The value of parameter rds.logical_replication is not set to 1.", + "The primary instance logging setup doesn't allow EM sync.", + "The primary instance database parameter setup doesn't allow EM sync.", + "The gtid_mode is not supported, applicable for MySQL.", + "SQL Server Agent is not running." ], "type": "string" } @@ -3642,7 +3782,7 @@ "type": "array" }, "kind": { - "description": "This is always \u003ccode\u003esql#migrationSettingErrorList\u003c/code\u003e.", + "description": "This is always *sql#migrationSettingErrorList*.", "type": "string" } }, @@ -3714,16 +3854,16 @@ "type": "string" }, "commonName": { - "description": "User supplied name. Constrained to [a-zA-Z.-_ ]+.", + "description": "User supplied name. Constrained to [a-zA-Z.-_ ]+.", "type": "string" }, "createTime": { - "description": "The time when the certificate was created in \u003ca\nhref=\"https://tools.ietf.org/html/rfc3339\"\u003eRFC 3339\u003c/a\u003e format, for example\n\u003ccode\u003e2012-11-15T16:19:00.094Z\u003c/code\u003e", + "description": "The time when the certificate was created in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*", "format": "google-datetime", "type": "string" }, "expirationTime": { - "description": "The time when the certificate expires in \u003ca\nhref=\"https://tools.ietf.org/html/rfc3339\"\u003eRFC 3339\u003c/a\u003e format, for example\n\u003ccode\u003e2012-11-15T16:19:00.094Z\u003c/code\u003e.", + "description": "The time when the certificate expires in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.", "format": "google-datetime", "type": "string" }, @@ -3732,7 +3872,7 @@ "type": "string" }, "kind": { - "description": "This is always \u003ccode\u003esql#sslCert\u003c/code\u003e.", + "description": "This is always *sql#sslCert*.", "type": "string" }, "selfLink": { @@ -3755,7 +3895,7 @@ "description": "The public information about the cert." }, "certPrivateKey": { - "description": "The private key for the client cert, in pem format. Keep private in order\nto protect your security.", + "description": "The private key for the client cert, in pem format. Keep private in order to protect your security.", "type": "string" } }, @@ -3777,7 +3917,7 @@ "id": "SslCertsInsertRequest", "properties": { "commonName": { - "description": "User supplied name. Must be a distinct name from the other certificates\nfor this instance.", + "description": "User supplied name. Must be a distinct name from the other certificates for this instance.", "type": "string" } }, @@ -3789,10 +3929,10 @@ "properties": { "clientCert": { "$ref": "SslCertDetail", - "description": "The new client certificate and private key. For First Generation\ninstances, the new certificate does not take effect until the instance is\nrestarted." + "description": "The new client certificate and private key." }, "kind": { - "description": "This is always \u003ccode\u003esql#sslCertsInsert\u003c/code\u003e.", + "description": "This is always *sql#sslCertsInsert*.", "type": "string" }, "operation": { @@ -3801,7 +3941,7 @@ }, "serverCaCert": { "$ref": "SslCert", - "description": "The server Certificate Authority's certificate. If this is missing you can\nforce a new one to be generated by calling resetSslConfig method on\ninstances resource." + "description": "The server Certificate Authority's certificate. If this is missing you can force a new one to be generated by calling resetSslConfig method on instances resource." } }, "type": "object" @@ -3818,7 +3958,7 @@ "type": "array" }, "kind": { - "description": "This is always \u003ccode\u003esql#sslCertsList\u003c/code\u003e.", + "description": "This is always *sql#sslCertsList*.", "type": "string" } }, @@ -3839,7 +3979,7 @@ "type": "string" }, "kind": { - "description": "This is always \u003ccode\u003esql#tier\u003c/code\u003e.", + "description": "This is always *sql#tier*.", "type": "string" }, "region": { @@ -3850,7 +3990,7 @@ "type": "array" }, "tier": { - "description": "An identifier for the machine type, for example, db-n1-standard-1. For\nrelated information, see \u003ca href=\"/sql/pricing\"\u003ePricing\u003c/a\u003e.", + "description": "An identifier for the machine type, for example, db-n1-standard-1. For related information, see Pricing.", "type": "string" } }, @@ -3868,7 +4008,7 @@ "type": "array" }, "kind": { - "description": "This is always \u003ccode\u003esql#tiersList\u003c/code\u003e.", + "description": "This is always *sql#tiersList*.", "type": "string" } }, @@ -3879,11 +4019,11 @@ "id": "TruncateLogContext", "properties": { "kind": { - "description": "This is always \u003ccode\u003esql#truncateLogContext\u003c/code\u003e.", + "description": "This is always *sql#truncateLogContext*.", "type": "string" }, "logType": { - "description": "The type of log to truncate. Valid values are\n\u003ccode\u003eMYSQL_GENERAL_TABLE\u003c/code\u003e and \u003ccode\u003eMYSQL_SLOW_TABLE\u003c/code\u003e.", + "description": "The type of log to truncate. Valid values are *MYSQL_GENERAL_TABLE* and *MYSQL_SLOW_TABLE*.", "type": "string" } }, @@ -3894,23 +4034,23 @@ "id": "User", "properties": { "etag": { - "description": "This field is deprecated and will be removed from a future version of the\nAPI.", + "description": "This field is deprecated and will be removed from a future version of the API.", "type": "string" }, "host": { - "description": "The host name from which the user can connect. For \u003ccode\u003einsert\u003c/code\u003e\noperations, host defaults to an empty string. For \u003ccode\u003eupdate\u003c/code\u003e\noperations, host is specified as part of the request URL. The host name\ncannot be updated after insertion.", + "description": "The host name from which the user can connect. For *insert* operations, host defaults to an empty string. For *update* operations, host is specified as part of the request URL. The host name cannot be updated after insertion.", "type": "string" }, "instance": { - "description": "The name of the Cloud SQL instance. This does not include the project ID.\nCan be omitted for \u003ccode\u003eupdate\u003c/code\u003e since it is already specified on the\nURL.", + "description": "The name of the Cloud SQL instance. This does not include the project ID. Can be omitted for *update* since it is already specified on the URL.", "type": "string" }, "kind": { - "description": "This is always \u003ccode\u003esql#user\u003c/code\u003e.", + "description": "This is always *sql#user*.", "type": "string" }, "name": { - "description": "The name of the user in the Cloud SQL instance. Can be omitted for\n\u003ccode\u003eupdate\u003c/code\u003e since it is already specified in the URL.", + "description": "The name of the user in the Cloud SQL instance. Can be omitted for *update* since it is already specified in the URL.", "type": "string" }, "password": { @@ -3918,11 +4058,25 @@ "type": "string" }, "project": { - "description": "The project ID of the project containing the Cloud SQL database. The Google\napps domain is prefixed if applicable. Can be omitted for\n\u003ccode\u003eupdate\u003c/code\u003e since it is already specified on the URL.", + "description": "The project ID of the project containing the Cloud SQL database. The Google apps domain is prefixed if applicable. Can be omitted for *update* since it is already specified on the URL.", "type": "string" }, "sqlserverUserDetails": { "$ref": "SqlServerUserDetails" + }, + "type": { + "description": "The user type. It determines the method to authenticate the user during login. The default is the database's built-in user type.", + "enum": [ + "BUILT_IN", + "CLOUD_IAM_USER", + "CLOUD_IAM_SERVICE_ACCOUNT" + ], + "enumDescriptions": [ + "The database's built-in user type.", + "Cloud IAM user.", + "Cloud IAM service account." + ], + "type": "string" } }, "type": "object" @@ -3939,11 +4093,11 @@ "type": "array" }, "kind": { - "description": "This is always \u003ccode\u003esql#usersList\u003c/code\u003e.", + "description": "This is always *sql#usersList*.", "type": "string" }, "nextPageToken": { - "description": "An identifier that uniquely identifies the operation. You can use this\nidentifier to retrieve the Operations resource that has information about\nthe operation.", + "description": "An identifier that uniquely identifies the operation. You can use this identifier to retrieve the Operations resource that has information about the operation.", "type": "string" } }, diff --git a/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go b/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go index c239799126e..02ab793e5bf 100644 --- a/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go +++ b/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go @@ -75,10 +75,11 @@ var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint -const apiId = "sql:v1beta4" -const apiName = "sql" +const apiId = "sqladmin:v1beta4" +const apiName = "sqladmin" const apiVersion = "v1beta4" const basePath = "https://sqladmin.googleapis.com/" +const mtlsBasePath = "https://sqladmin.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -98,6 +99,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -261,19 +263,16 @@ type UsersService struct { // AclEntry: An entry for an Access Control list. type AclEntry struct { // ExpirationTime: The time when this access control entry expires in - // RFC 3339 format, for - // example - // 2012-11-15T16:19:00.094Z. + // RFC 3339 format, for example *2012-11-15T16:19:00.094Z*. ExpirationTime string `json:"expirationTime,omitempty"` - // Kind: This is always sql#aclEntry. + // Kind: This is always *sql#aclEntry*. Kind string `json:"kind,omitempty"` // Name: Optional. A label to identify this entry. Name string `json:"name,omitempty"` - // Value: The whitelisted value for the access control list. + // Value: The allowlisted value for the access control list. Value string `json:"value,omitempty"` // ForceSendFields is a list of field names (e.g. "ExpirationTime") to @@ -308,8 +307,7 @@ type ApiWarning struct { // "SQL_API_WARNING_CODE_UNSPECIFIED" - An unknown or unset warning // type from Cloud SQL API. // "REGION_UNREACHABLE" - Warning when one or more regions are not - // reachable. The returned result - // set may be incomplete. + // reachable. The returned result set may be incomplete. Code string `json:"code,omitempty"` // Message: The warning message. @@ -340,15 +338,17 @@ func (s *ApiWarning) MarshalJSON() ([]byte, error) { // BackupConfiguration: Database instance backup configuration. type BackupConfiguration struct { + // BackupRetentionSettings: Backup retention settings. + BackupRetentionSettings *BackupRetentionSettings `json:"backupRetentionSettings,omitempty"` + // BinaryLogEnabled: (MySQL only) Whether binary log is enabled. If - // backup configuration is - // disabled, binarylog must be disabled as well. + // backup configuration is disabled, binarylog must be disabled as well. BinaryLogEnabled bool `json:"binaryLogEnabled,omitempty"` // Enabled: Whether this configuration is enabled. Enabled bool `json:"enabled,omitempty"` - // Kind: This is always sql#backupConfiguration. + // Kind: This is always *sql#backupConfiguration*. Kind string `json:"kind,omitempty"` // Location: Location of the backup @@ -361,11 +361,56 @@ type BackupConfiguration struct { ReplicationLogArchivingEnabled bool `json:"replicationLogArchivingEnabled,omitempty"` // StartTime: Start time for the daily backup configuration in UTC - // timezone in the 24 - // hour format - HH:MM. + // timezone in the 24 hour format - *HH:MM*. StartTime string `json:"startTime,omitempty"` - // ForceSendFields is a list of field names (e.g. "BinaryLogEnabled") to + // TransactionLogRetentionDays: The number of days of transaction logs + // we retain for point in time restore, from 1-7. + TransactionLogRetentionDays int64 `json:"transactionLogRetentionDays,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "BackupRetentionSettings") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BackupRetentionSettings") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *BackupConfiguration) MarshalJSON() ([]byte, error) { + type NoMethod BackupConfiguration + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BackupRetentionSettings: We currently only support backup retention +// by specifying the number of backups we will retain. +type BackupRetentionSettings struct { + // RetainedBackups: Depending on the value of retention_unit, this is + // used to determine if a backup needs to be deleted. If retention_unit + // is 'COUNT', we will retain this many backups. + RetainedBackups int64 `json:"retainedBackups,omitempty"` + + // RetentionUnit: The unit that 'retained_backups' represents. + // + // Possible values: + // "RETENTION_UNIT_UNSPECIFIED" - Backup retention unit is + // unspecified, will be treated as COUNT. + // "COUNT" - Retention will be by count, eg. "retain the most recent 7 + // backups". + RetentionUnit string `json:"retentionUnit,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RetainedBackups") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -373,7 +418,7 @@ type BackupConfiguration struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "BinaryLogEnabled") to + // NullFields is a list of field names (e.g. "RetainedBackups") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -383,55 +428,55 @@ type BackupConfiguration struct { NullFields []string `json:"-"` } -func (s *BackupConfiguration) MarshalJSON() ([]byte, error) { - type NoMethod BackupConfiguration +func (s *BackupRetentionSettings) MarshalJSON() ([]byte, error) { + type NoMethod BackupRetentionSettings raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // BackupRun: A BackupRun resource. type BackupRun struct { + // BackupKind: Specifies the kind of backup, PHYSICAL or + // DEFAULT_SNAPSHOT. + // + // Possible values: + // "SQL_BACKUP_KIND_UNSPECIFIED" - This is an unknown BackupKind. + // "SNAPSHOT" - The snapshot based backups + // "PHYSICAL" - Physical backups + BackupKind string `json:"backupKind,omitempty"` + // Description: The description of this run, only applicable to // on-demand backups. Description string `json:"description,omitempty"` // DiskEncryptionConfiguration: Encryption configuration specific to a - // backup. - // Applies only to Second Generation instances. + // backup. Applies only to Second Generation instances. DiskEncryptionConfiguration *DiskEncryptionConfiguration `json:"diskEncryptionConfiguration,omitempty"` - // DiskEncryptionStatus: Encryption status specific to a backup. - // Applies only to Second Generation instances. + // DiskEncryptionStatus: Encryption status specific to a backup. Applies + // only to Second Generation instances. DiskEncryptionStatus *DiskEncryptionStatus `json:"diskEncryptionStatus,omitempty"` // EndTime: The time the backup operation completed in UTC timezone in - // RFC 3339 format, for - // example - // 2012-11-15T16:19:00.094Z. + // RFC 3339 format, for example *2012-11-15T16:19:00.094Z*. EndTime string `json:"endTime,omitempty"` - // EnqueuedTime: The time the run was enqueued in UTC timezone in - // RFC 3339 format, for - // example - // 2012-11-15T16:19:00.094Z. + // EnqueuedTime: The time the run was enqueued in UTC timezone in RFC + // 3339 format, for example *2012-11-15T16:19:00.094Z*. EnqueuedTime string `json:"enqueuedTime,omitempty"` // Error: Information about why the backup operation failed. This is - // only present if - // the run has the FAILED status. + // only present if the run has the FAILED status. Error *OperationError `json:"error,omitempty"` // Id: The identifier for this backup run. Unique only for a specific - // Cloud SQL - // instance. + // Cloud SQL instance. Id int64 `json:"id,omitempty,string"` // Instance: Name of the database instance. Instance string `json:"instance,omitempty"` - // Kind: This is always sql#backupRun. + // Kind: This is always *sql#backupRun*. Kind string `json:"kind,omitempty"` // Location: Location of the backups. @@ -441,10 +486,7 @@ type BackupRun struct { SelfLink string `json:"selfLink,omitempty"` // StartTime: The time the backup operation actually started in UTC - // timezone in RFC 3339 format, for - // example - // 2012-11-15T16:19:00.094Z. + // timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*. StartTime string `json:"startTime,omitempty"` // Status: The status of this run. @@ -454,15 +496,13 @@ type BackupRun struct { // unknown. // "ENQUEUED" - The backup operation was enqueued. // "OVERDUE" - The backup is overdue across a given backup window. - // Indicates a - // problem. Example: Long-running operation in progress during - // the whole window. + // Indicates a problem. Example: Long-running operation in progress + // during the whole window. // "RUNNING" - The backup is in progress. // "FAILED" - The backup failed. // "SUCCESSFUL" - The backup was successful. // "SKIPPED" - The backup was skipped (without problems) for a given - // backup - // window. Example: Instance was idle. + // backup window. Example: Instance was idle. // "DELETION_PENDING" - The backup is about to be deleted. // "DELETION_FAILED" - The backup deletion failed. // "DELETED" - The backup has been deleted. @@ -478,17 +518,15 @@ type BackupRun struct { Type string `json:"type,omitempty"` // WindowStartTime: The start time of the backup window during which - // this the backup was - // attempted in RFC - // 3339 - // format, for example 2012-11-15T16:19:00.094Z. + // this the backup was attempted in RFC 3339 format, for example + // *2012-11-15T16:19:00.094Z*. WindowStartTime string `json:"windowStartTime,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Description") to + // ForceSendFields is a list of field names (e.g. "BackupKind") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -496,10 +534,10 @@ type BackupRun struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "BackupKind") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` @@ -517,13 +555,12 @@ type BackupRunsListResponse struct { // enqueued time. Items []*BackupRun `json:"items,omitempty"` - // Kind: This is always sql#backupRunsList. + // Kind: This is always *sql#backupRunsList*. Kind string `json:"kind,omitempty"` // NextPageToken: The continuation token, used to page through large - // result sets. Provide - // this value in a subsequent request to return the next page of - // results. + // result sets. Provide this value in a subsequent request to return the + // next page of results. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -561,7 +598,7 @@ type BinLogCoordinates struct { // BinLogPosition: Position (offset) within the binary log file. BinLogPosition int64 `json:"binLogPosition,omitempty,string"` - // Kind: This is always sql#binLogCoordinates. + // Kind: This is always *sql#binLogCoordinates*. Kind string `json:"kind,omitempty"` // ForceSendFields is a list of field names (e.g. "BinLogFileName") to @@ -591,17 +628,16 @@ func (s *BinLogCoordinates) MarshalJSON() ([]byte, error) { // CloneContext: Database instance clone context. type CloneContext struct { // BinLogCoordinates: Binary log coordinates, if specified, identify the - // position up to which the - // source instance should be cloned. If not specified, the source - // instance is - // cloned up to the most recent binary log coordinates. + // position up to which the source instance is cloned. If not specified, + // the source instance is cloned up to the most recent binary log + // coordinates. BinLogCoordinates *BinLogCoordinates `json:"binLogCoordinates,omitempty"` // DestinationInstanceName: Name of the Cloud SQL instance to be created // as a clone. DestinationInstanceName string `json:"destinationInstanceName,omitempty"` - // Kind: This is always sql#cloneContext. + // Kind: This is always *sql#cloneContext*. Kind string `json:"kind,omitempty"` // PitrTimestampMs: Reserved for future use. @@ -636,32 +672,29 @@ func (s *CloneContext) MarshalJSON() ([]byte, error) { // Database: Represents a SQL database on the Cloud SQL instance. type Database struct { - // Charset: The MySQL charset value. + // Charset: The Cloud SQL charset value. Charset string `json:"charset,omitempty"` - // Collation: The MySQL collation value. + // Collation: The Cloud SQL collation value. Collation string `json:"collation,omitempty"` // Etag: This field is deprecated and will be removed from a future - // version of the - // API. + // version of the API. Etag string `json:"etag,omitempty"` // Instance: The name of the Cloud SQL instance. This does not include // the project ID. Instance string `json:"instance,omitempty"` - // Kind: This is always sql#database. + // Kind: This is always *sql#database*. Kind string `json:"kind,omitempty"` // Name: The name of the database in the Cloud SQL instance. This does - // not include - // the project ID or instance name. + // not include the project ID or instance name. Name string `json:"name,omitempty"` // Project: The project ID of the project containing the Cloud SQL - // database. The Google - // apps domain is prefixed if applicable. + // database. The Google apps domain is prefixed if applicable. Project string `json:"project,omitempty"` // SelfLink: The URI of this resource. @@ -699,21 +732,15 @@ func (s *Database) MarshalJSON() ([]byte, error) { // DatabaseFlags: Database flags for Cloud SQL instances. type DatabaseFlags struct { // Name: The name of the flag. These flags are passed at instance - // startup, so - // include both server options and system variables for MySQL. Flags - // should be - // specified with underscores, not hyphens. For more information, see - // Configuring Database Flags in the - // Cloud - // SQL documentation. + // startup, so include both server options and system variables for + // MySQL. Flags are specified with underscores, not hyphens. For more + // information, see Configuring Database Flags in the Cloud SQL + // documentation. Name string `json:"name,omitempty"` - // Value: The value of the flag. Booleans should be set to - // on for true - // and off for false. This field must be omitted if the - // flag - // doesn't take a value. + // Value: The value of the flag. Booleans are set to *on* for true and + // *off* for false. This field must be omitted if the flag doesn't take + // a value. Value string `json:"value,omitempty"` // ForceSendFields is a list of field names (e.g. "Name") to @@ -739,19 +766,12 @@ func (s *DatabaseFlags) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// DatabaseInstance: A Cloud SQL instance resource. +// DatabaseInstance: A Cloud SQL instance resource. Next field: 36 type DatabaseInstance struct { - // BackendType: FIRST_GEN: First Generation instance. MySQL - // only.
SECOND_GEN: Second Generation instance or - // PostgreSQL - // instance.
EXTERNAL: A database server that is - // not - // managed by Google.
This property is read-only; use - // the - // tier property in the settings object to - // determine - // the database type and Second or First Generation. + // BackendType: *SECOND_GEN*: Cloud SQL database instance. *EXTERNAL*: + // A database server that is not managed by Google. This property is + // read-only; use the *tier* property in the *settings* object to + // determine the database type. // // Possible values: // "SQL_BACKEND_TYPE_UNSPECIFIED" - This is an unknown backend type @@ -766,28 +786,18 @@ type DatabaseInstance struct { ConnectionName string `json:"connectionName,omitempty"` // CurrentDiskSize: The current disk usage of the instance in bytes. - // This property has been - // deprecated. Users should use - // the - // "cloudsql.googleapis.com/database/disk/bytes_used" metric in - // Cloud - // Monitoring API instead. Please see - // this - // announcement for details. + // This property has been deprecated. Use the + // "cloudsql.googleapis.com/database/disk/bytes_used" metric in Cloud + // Monitoring API instead. Please see this announcement for details. CurrentDiskSize int64 `json:"currentDiskSize,omitempty,string"` // DatabaseVersion: The database engine type and version. The - // databaseVersion - // field can not be changed after instance creation. MySQL Second - // Generation - // instances: MYSQL_5_7 (default) or - // MYSQL_5_6. - // PostgreSQL instances: POSTGRES_9_6 (default) - // or - // POSTGRES_11 Beta MySQL First Generation - // instances: MYSQL_5_6 (default) or MYSQL_5_5 + // *databaseVersion* field cannot be changed after instance creation. + // MySQL instances: *MYSQL_8_0*, *MYSQL_5_7* (default), or *MYSQL_5_6*. + // PostgreSQL instances: *POSTGRES_9_6*, *POSTGRES_10*, *POSTGRES_11* or + // *POSTGRES_12* (default). SQL Server instances: + // *SQLSERVER_2017_STANDARD* (default), *SQLSERVER_2017_ENTERPRISE*, + // *SQLSERVER_2017_EXPRESS*, or *SQLSERVER_2017_WEB*. // // Possible values: // "SQL_DATABASE_VERSION_UNSPECIFIED" - This is an unknown database @@ -807,44 +817,36 @@ type DatabaseInstance struct { // "SQLSERVER_2017_WEB" - The database version is SQL Server 2017 Web. // "POSTGRES_10" - The database version is PostgreSQL 10. // "POSTGRES_12" - The database version is PostgreSQL 12. + // "MYSQL_8_0" - The database version is MySQL 8. + // "POSTGRES_13" - The database version is PostgreSQL 13. DatabaseVersion string `json:"databaseVersion,omitempty"` // DiskEncryptionConfiguration: Disk encryption configuration specific - // to an instance. - // Applies only to Second Generation instances. + // to an instance. Applies only to Second Generation instances. DiskEncryptionConfiguration *DiskEncryptionConfiguration `json:"diskEncryptionConfiguration,omitempty"` - // DiskEncryptionStatus: Disk encryption status specific to an - // instance. + // DiskEncryptionStatus: Disk encryption status specific to an instance. // Applies only to Second Generation instances. DiskEncryptionStatus *DiskEncryptionStatus `json:"diskEncryptionStatus,omitempty"` // Etag: This field is deprecated and will be removed from a future - // version of the - // API. Use the settings.settingsVersion field instead. + // version of the API. Use the *settings.settingsVersion* field instead. Etag string `json:"etag,omitempty"` // FailoverReplica: The name and status of the failover replica. This - // property is applicable - // only to Second Generation instances. + // property is applicable only to Second Generation instances. FailoverReplica *DatabaseInstanceFailoverReplica `json:"failoverReplica,omitempty"` // GceZone: The Compute Engine zone that the instance is currently - // serving from. This - // value could be different from the zone that was specified when the - // instance - // was created if the instance has failed over to its secondary zone. + // serving from. This value could be different from the zone that was + // specified when the instance was created if the instance has failed + // over to its secondary zone. GceZone string `json:"gceZone,omitempty"` - // InstanceType: The instance type. This can be one of the - // following. - //
CLOUD_SQL_INSTANCE: A Cloud SQL instance that is - // not - // replicating from a master.
ON_PREMISES_INSTANCE: - // An - // instance running on the - // customer's premises.
READ_REPLICA_INSTANCE: A Cloud - // SQL + // InstanceType: The instance type. This can be one of the following. + // *CLOUD_SQL_INSTANCE*: A Cloud SQL instance that is not replicating + // from a primary instance. *ON_PREMISES_INSTANCE*: An instance running + // on the customer's premises. *READ_REPLICA_INSTANCE*: A Cloud SQL // instance configured as a read-replica. // // Possible values: @@ -852,8 +854,7 @@ type DatabaseInstance struct { // instance type. // "CLOUD_SQL_INSTANCE" - A regular Cloud SQL instance. // "ON_PREMISES_INSTANCE" - An instance running on the customer's - // premises that is not managed by - // Cloud SQL. + // premises that is not managed by Cloud SQL. // "READ_REPLICA_INSTANCE" - A Cloud SQL instance acting as a // read-replica. InstanceType string `json:"instanceType,omitempty"` @@ -861,16 +862,15 @@ type DatabaseInstance struct { // IpAddresses: The assigned IP addresses for the instance. IpAddresses []*IpMapping `json:"ipAddresses,omitempty"` - // Ipv6Address: The IPv6 address assigned to the instance. This property - // is applicable only - // to First Generation instances. + // Ipv6Address: The IPv6 address assigned to the instance. (Deprecated) + // This property was applicable only to First Generation instances. Ipv6Address string `json:"ipv6Address,omitempty"` - // Kind: This is always sql#instance. + // Kind: This is always *sql#instance*. Kind string `json:"kind,omitempty"` - // MasterInstanceName: The name of the instance which will act as master - // in the replication setup. + // MasterInstanceName: The name of the instance which will act as + // primary in the replication setup. MasterInstanceName string `json:"masterInstanceName,omitempty"` // MaxDiskSize: The maximum disk size of the instance in bytes. @@ -885,23 +885,14 @@ type DatabaseInstance struct { OnPremisesConfiguration *OnPremisesConfiguration `json:"onPremisesConfiguration,omitempty"` // Project: The project ID of the project containing the Cloud SQL - // instance. The Google - // apps domain is prefixed if applicable. + // instance. The Google apps domain is prefixed if applicable. Project string `json:"project,omitempty"` - // Region: The geographical region. Can be - // us-central - // (FIRST_GEN instances only), - // us-central1 - // (SECOND_GEN instances only), asia-east1 - // or - // europe-west1. Defaults to us-central - // or - // us-central1 depending on the instance type (First - // Generation - // or Second Generation). The region can not be changed after - // instance - // creation. + // Region: The geographical region. Can be *us-central* (*FIRST_GEN* + // instances only) *us-central1* (*SECOND_GEN* instances only) + // *asia-east1* or *europe-west1*. Defaults to *us-central* or + // *us-central1* depending on the instance type. The region cannot be + // changed after instance creation. Region string `json:"region,omitempty"` // ReplicaConfiguration: Configuration specific to failover replicas and @@ -925,39 +916,36 @@ type DatabaseInstance struct { ServerCaCert *SslCert `json:"serverCaCert,omitempty"` // ServiceAccountEmailAddress: The service account email address - // assigned to the instance. This property - // is applicable only to Second Generation instances. + // assigned to the instance. This property is applicable only to Second + // Generation instances. ServiceAccountEmailAddress string `json:"serviceAccountEmailAddress,omitempty"` // Settings: The user settings. Settings *Settings `json:"settings,omitempty"` // State: The current serving state of the Cloud SQL instance. This can - // be one of the - // following.
RUNNABLE: The instance is running, or is - // ready - // to run when accessed.
SUSPENDED: The instance is - // not - // available, for example due to problems with - // billing. - //
PENDING_CREATE: The instance is being - // created. - //
MAINTENANCE: The instance is down for - // maintenance. - //
FAILED: The instance creation + // be one of the following. *SQL_INSTANCE_STATE_UNSPECIFIED*: The state + // of the instance is unknown. *RUNNABLE*: The instance has been stopped + // by owner. It is not currently running, but it's ready to be + // restarted. *SUSPENDED*: The instance is not available, for example + // due to problems with billing. for example due to problems with + // billing. *PENDING_DELETE*: The instance is being deleted. + // *PENDING_CREATE*: The instance is being created. *MAINTENANCE*: The + // instance is down for maintenance. *FAILED*: The instance creation // failed. - //
UNKNOWN_STATE: The state of the instance is unknown. // // Possible values: // "SQL_INSTANCE_STATE_UNSPECIFIED" - The state of the instance is // unknown. - // "RUNNABLE" - The instance is running. - // "SUSPENDED" - The instance is currently offline, but it may run - // again in the future. + // "RUNNABLE" - The instance has been stopped by owner. It is not + // currently running, but it's ready to be restarted. + // "SUSPENDED" - The instance is not available, for example due to + // problems with billing. // "PENDING_DELETE" - The instance is being deleted. // "PENDING_CREATE" - The instance is being created. // "MAINTENANCE" - The instance is down for maintenance. - // "FAILED" - The instance failed to be created. + // "FAILED" - The creation of the instance failed or a fatal error + // occurred during maintenance. State string `json:"state,omitempty"` // SuspensionReason: If the instance state is SUSPENDED, the reason for @@ -967,13 +955,11 @@ type DatabaseInstance struct { // "SQL_SUSPENSION_REASON_UNSPECIFIED" - This is an unknown suspension // reason. // "BILLING_ISSUE" - The instance is suspended due to billing issues - // (e.g., GCP account issue) + // (for example:, GCP account issue) // "LEGAL_ISSUE" - The instance is suspended due to illegal content - // (e.g., child pornography, - // copyrighted material, etc.). + // (for example:, child pornography, copyrighted material, etc.). // "OPERATIONAL_ISSUE" - The instance is causing operational issues - // (e.g., causing the database - // to crash). + // (for example:, causing the database to crash). // "KMS_KEY_ISSUE" - The KMS key used by the instance is either // revoked or denied access to SuspensionReason []string `json:"suspensionReason,omitempty"` @@ -1006,21 +992,18 @@ func (s *DatabaseInstance) MarshalJSON() ([]byte, error) { } // DatabaseInstanceFailoverReplica: The name and status of the failover -// replica. This property is applicable -// only to Second Generation instances. +// replica. This property is applicable only to Second Generation +// instances. type DatabaseInstanceFailoverReplica struct { // Available: The availability status of the failover replica. A false - // status indicates - // that the failover replica is out of sync. The master can only - // failover to - // the failover replica when the status is true. + // status indicates that the failover replica is out of sync. The + // primary instance can only failover to the failover replica when the + // status is true. Available bool `json:"available,omitempty"` // Name: The name of the failover replica. If specified at instance - // creation, a - // failover replica is created for the instance. The name - // doesn't include the project ID. This property is applicable only - // to + // creation, a failover replica is created for the instance. The name + // doesn't include the project ID. This property is applicable only to // Second Generation instances. Name string `json:"name,omitempty"` @@ -1052,7 +1035,7 @@ type DatabasesListResponse struct { // Items: List of database resources in the instance. Items []*Database `json:"items,omitempty"` - // Kind: This is always sql#databasesList. + // Kind: This is always *sql#databasesList*. Kind string `json:"kind,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1083,22 +1066,18 @@ func (s *DatabasesListResponse) MarshalJSON() ([]byte, error) { } // DemoteMasterConfiguration: Read-replica configuration for connecting -// to the on-premises master. +// to the on-premises primary instance. type DemoteMasterConfiguration struct { - // Kind: This is always sql#demoteMasterConfiguration. + // Kind: This is always *sql#demoteMasterConfiguration*. Kind string `json:"kind,omitempty"` // MysqlReplicaConfiguration: MySQL specific configuration when - // replicating from a MySQL on-premises - // master. Replication configuration information such as the - // username, - // password, certificates, and keys are not stored in the instance - // metadata. - // The configuration information is used only to set up the - // replication - // connection and is stored by MySQL in a file named - // master.info - // in the data directory. + // replicating from a MySQL on-premises primary instance. Replication + // configuration information such as the username, password, + // certificates, and keys are not stored in the instance metadata. The + // configuration information is used only to set up the replication + // connection and is stored by MySQL in a file named *master.info* in + // the data directory. MysqlReplicaConfiguration *DemoteMasterMySqlReplicaConfiguration `json:"mysqlReplicaConfiguration,omitempty"` // ForceSendFields is a list of field names (e.g. "Kind") to @@ -1124,34 +1103,28 @@ func (s *DemoteMasterConfiguration) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// DemoteMasterContext: Database instance demote master context. +// DemoteMasterContext: Database instance demote primary instance +// context. type DemoteMasterContext struct { - // Kind: This is always sql#demoteMasterContext. + // Kind: This is always *sql#demoteMasterContext*. Kind string `json:"kind,omitempty"` // MasterInstanceName: The name of the instance which will act as - // on-premises master in the - // replication setup. + // on-premises primary instance in the replication setup. MasterInstanceName string `json:"masterInstanceName,omitempty"` // ReplicaConfiguration: Configuration specific to read-replicas - // replicating from the on-premises - // master. + // replicating from the on-premises primary instance. ReplicaConfiguration *DemoteMasterConfiguration `json:"replicaConfiguration,omitempty"` // VerifyGtidConsistency: Verify GTID consistency for demote operation. - // Default value: - // True. Second Generation instances only. Setting this - // flag to - // false enables you to bypass GTID consistency check between - // on-premises - // master and Cloud SQL instance during the demotion operation but - // also - // exposes you to the risk of future replication failures. Change the - // value - // only if you know the reason for the GTID divergence and are confident - // that - // doing so will not cause any replication issues. + // Default value: *True*. Second Generation instances only. Setting this + // flag to false enables you to bypass GTID consistency check between + // on-premises primary instance and Cloud SQL instance during the + // demotion operation but also exposes you to the risk of future + // replication failures. Change the value only if you know the reason + // for the GTID divergence and are confident that doing so will not + // cause any replication issues. VerifyGtidConsistency bool `json:"verifyGtidConsistency,omitempty"` // ForceSendFields is a list of field names (e.g. "Kind") to @@ -1184,19 +1157,16 @@ type DemoteMasterMySqlReplicaConfiguration struct { // certificate. CaCertificate string `json:"caCertificate,omitempty"` - // ClientCertificate: PEM representation of the slave's x509 + // ClientCertificate: PEM representation of the replica's x509 // certificate. ClientCertificate string `json:"clientCertificate,omitempty"` - // ClientKey: PEM representation of the slave's private key. The - // corresponsing public key - // is encoded in the client's certificate. The format of the slave's - // private - // key can be either PKCS #1 or PKCS #8. + // ClientKey: PEM representation of the replica's private key. The + // corresponsing public key is encoded in the client's certificate. The + // format of the replica's private key can be either PKCS #1 or PKCS #8. ClientKey string `json:"clientKey,omitempty"` - // Kind: This is always - // sql#demoteMasterMysqlReplicaConfiguration. + // Kind: This is always *sql#demoteMasterMysqlReplicaConfiguration*. Kind string `json:"kind,omitempty"` // Password: The password for the replication connection. @@ -1228,10 +1198,54 @@ func (s *DemoteMasterMySqlReplicaConfiguration) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DenyMaintenancePeriod: Deny Maintenance Periods. This specifies a +// date range during when all CSA rollout will be denied. +type DenyMaintenancePeriod struct { + // EndDate: "deny maintenance period" end date. If the year of the end + // date is empty, the year of the start date also must be empty. In this + // case, it means the deny maintenance period recurs every year. The + // date is in format yyyy-mm-dd i.e., 2020-11-01, or mm-dd, i.e., 11-01 + EndDate string `json:"endDate,omitempty"` + + // StartDate: "deny maintenance period" start date. If the year of the + // start date is empty, the year of the end date also must be empty. In + // this case, it means the deny maintenance period recurs every year. + // The date is in format yyyy-mm-dd i.e., 2020-11-01, or mm-dd, i.e., + // 11-01 + StartDate string `json:"startDate,omitempty"` + + // Time: Time in UTC when the "deny maintenance period" starts on + // start_date and ends on end_date. The time is in format: HH:mm:SS, + // i.e., 00:00:00 + Time string `json:"time,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EndDate") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EndDate") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DenyMaintenancePeriod) MarshalJSON() ([]byte, error) { + type NoMethod DenyMaintenancePeriod + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // DiskEncryptionConfiguration: Disk encryption configuration for an // instance. type DiskEncryptionConfiguration struct { - // Kind: This is always sql#diskEncryptionConfiguration. + // Kind: This is always *sql#diskEncryptionConfiguration*. Kind string `json:"kind,omitempty"` // KmsKeyName: Resource name of KMS key for disk encryption @@ -1262,7 +1276,7 @@ func (s *DiskEncryptionConfiguration) MarshalJSON() ([]byte, error) { // DiskEncryptionStatus: Disk encryption status for an instance. type DiskEncryptionStatus struct { - // Kind: This is always sql#diskEncryptionStatus. + // Kind: This is always *sql#diskEncryptionStatus*. Kind string `json:"kind,omitempty"` // KmsKeyVersionName: KMS key version used to encrypt the Cloud SQL @@ -1297,31 +1311,19 @@ type ExportContext struct { // CsvExportOptions: Options for exporting data as CSV. CsvExportOptions *ExportContextCsvExportOptions `json:"csvExportOptions,omitempty"` - // Databases: Databases to be exported.
MySQL instances: - // If - // fileType is SQL and no database is - // specified, all - // databases are exported, except for the mysql system - // database. - // If fileType is CSV, you can specify one - // database, - // either by using this property or by using - // the - // csvExportOptions.selectQuery property, which takes - // precedence - // over this property.
PostgreSQL instances: You must - // specify - // one database to be exported. If fileType is - // CSV, - // this database must match the one specified in - // the - // csvExportOptions.selectQuery property. + // Databases: Databases to be exported. *MySQL instances:* If *fileType* + // is *SQL* and no database is specified, all databases are exported, + // except for the *mysql* system database. If *fileType* is *CSV*, you + // can specify one database, either by using this property or by using + // the *csvExportOptions.selectQuery* property, which takes precedence + // over this property. *PostgreSQL instances:* You must specify one + // database to be exported. If *fileType* is *CSV*, this database must + // match the one specified in the *csvExportOptions.selectQuery* + // property. Databases []string `json:"databases,omitempty"` - // FileType: The file type for the specified uri.
SQL: - // The file - // contains SQL statements.
CSV: The file contains CSV - // data. + // FileType: The file type for the specified uri. *SQL*: The file + // contains SQL statements. *CSV*: The file contains CSV data. // // Possible values: // "SQL_FILE_TYPE_UNSPECIFIED" - Unknown file type. @@ -1330,22 +1332,20 @@ type ExportContext struct { // "BAK" FileType string `json:"fileType,omitempty"` - // Kind: This is always sql#exportContext. + // Kind: This is always *sql#exportContext*. Kind string `json:"kind,omitempty"` + // Offload: Option for export offload. + Offload bool `json:"offload,omitempty"` + // SqlExportOptions: Options for exporting data as SQL statements. SqlExportOptions *ExportContextSqlExportOptions `json:"sqlExportOptions,omitempty"` // Uri: The path to the file in Google Cloud Storage where the export - // will be - // stored. The URI is in the form - // gs: - // //bucketName/fileName. If the file already exists, the - // requests - // // succeeds, but the operation fails. If fileType is - // // SQL and the filename ends with .gz, the contents - // are - // // compressed. + // will be stored. The URI is in the form *gs: //bucketName/fileName*. + // If the file already exists, the requests // succeeds, but the + // operation fails. If *fileType* is // *SQL* and the filename ends with + // .gz, the contents are // compressed. Uri string `json:"uri,omitempty"` // ForceSendFields is a list of field names (e.g. "CsvExportOptions") to @@ -1410,10 +1410,8 @@ type ExportContextSqlExportOptions struct { SchemaOnly bool `json:"schemaOnly,omitempty"` // Tables: Tables to export, or that were exported, from the specified - // database. If - // you specify tables, specify one and only one database. For - // PostgreSQL - // instances, you can specify only one table. + // database. If you specify tables, specify one and only one database. + // For PostgreSQL instances, you can specify only one table. Tables []string `json:"tables,omitempty"` // ForceSendFields is a list of field names (e.g. "MysqlExportOptions") @@ -1444,12 +1442,9 @@ func (s *ExportContextSqlExportOptions) MarshalJSON() ([]byte, error) { // exporting from MySQL. type ExportContextSqlExportOptionsMysqlExportOptions struct { // MasterData: Option to include SQL statement required to set up - // replication. - // If set to 1, the dump file includes - // a CHANGE MASTER TO statement with the binary log coordinates. - // If set to 2, the CHANGE MASTER TO statement is written - // as - // a SQL comment, and has no effect. + // replication. If set to *1*, the dump file includes a CHANGE MASTER TO + // statement with the binary log coordinates. If set to *2*, the CHANGE + // MASTER TO statement is written as a SQL comment, and has no effect. // All other values are ignored. MasterData int64 `json:"masterData,omitempty"` @@ -1478,12 +1473,12 @@ func (s *ExportContextSqlExportOptionsMysqlExportOptions) MarshalJSON() ([]byte, // FailoverContext: Database instance failover context. type FailoverContext struct { - // Kind: This is always sql#failoverContext. + // Kind: This is always *sql#failoverContext*. Kind string `json:"kind,omitempty"` // SettingsVersion: The current settings version of this instance. - // Request will be rejected if - // this version doesn't match the current settings version. + // Request will be rejected if this version doesn't match the current + // settings version. SettingsVersion int64 `json:"settingsVersion,omitempty,string"` // ForceSendFields is a list of field names (e.g. "Kind") to @@ -1512,20 +1507,16 @@ func (s *FailoverContext) MarshalJSON() ([]byte, error) { // Flag: A flag resource. type Flag struct { // AllowedIntValues: Use this field if only certain integers are - // accepted. Can be combined - // with min_value and max_value to add additional values. + // accepted. Can be combined with min_value and max_value to add + // additional values. AllowedIntValues googleapi.Int64s `json:"allowedIntValues,omitempty"` - // AllowedStringValues: For STRING flags, a list of strings - // that the value can be set - // to. + // AllowedStringValues: For *STRING* flags, a list of strings that the + // value can be set to. AllowedStringValues []string `json:"allowedStringValues,omitempty"` // AppliesTo: The database version this flag applies to. Can be - // MYSQL_5_5, - // MYSQL_5_6, or MYSQL_5_7. - // MYSQL_5_7 - // is applicable only to Second Generation instances. + // *MYSQL_8_0*, *MYSQL_5_6*, or *MYSQL_5_7*. // // Possible values: // "SQL_DATABASE_VERSION_UNSPECIFIED" - This is an unknown database @@ -1545,37 +1536,33 @@ type Flag struct { // "SQLSERVER_2017_WEB" - The database version is SQL Server 2017 Web. // "POSTGRES_10" - The database version is PostgreSQL 10. // "POSTGRES_12" - The database version is PostgreSQL 12. + // "MYSQL_8_0" - The database version is MySQL 8. + // "POSTGRES_13" - The database version is PostgreSQL 13. AppliesTo []string `json:"appliesTo,omitempty"` // InBeta: Whether or not the flag is considered in beta. InBeta bool `json:"inBeta,omitempty"` - // Kind: This is always sql#flag. + // Kind: This is always *sql#flag*. Kind string `json:"kind,omitempty"` - // MaxValue: For INTEGER flags, the maximum allowed value. + // MaxValue: For *INTEGER* flags, the maximum allowed value. MaxValue int64 `json:"maxValue,omitempty,string"` - // MinValue: For INTEGER flags, the minimum allowed value. + // MinValue: For *INTEGER* flags, the minimum allowed value. MinValue int64 `json:"minValue,omitempty,string"` // Name: This is the name of the flag. Flag names always use - // underscores, not - // hyphens, e.g. max_allowed_packet + // underscores, not hyphens, for example: *max_allowed_packet* Name string `json:"name,omitempty"` // RequiresRestart: Indicates whether changing this flag will trigger a - // database restart. Only - // applicable to Second Generation instances. + // database restart. Only applicable to Second Generation instances. RequiresRestart bool `json:"requiresRestart,omitempty"` - // Type: The type of the flag. Flags are typed to being - // BOOLEAN, - // STRING, INTEGER or - // NONE. - // NONE is used for flags which do not take a value, such - // as - // skip_grant_tables. + // Type: The type of the flag. Flags are typed to being *BOOLEAN*, + // *STRING*, *INTEGER* or *NONE*. *NONE* is used for flags which do not + // take a value, such as *skip_grant_tables*. // // Possible values: // "SQL_FLAG_TYPE_UNSPECIFIED" - This is an unknown flag type. @@ -1584,8 +1571,8 @@ type Flag struct { // "INTEGER" - Integer type flag. // "NONE" - Flag type used for a server startup option. // "MYSQL_TIMEZONE_OFFSET" - Type introduced specically for MySQL - // TimeZone offset. Accept a string value - // with the format [-12:59, 13:00]. + // TimeZone offset. Accept a string value with the format [-12:59, + // 13:00]. // "FLOAT" - Float type flag. // "REPEATED_STRING" - Comma-separated list of the strings in a // SqlFlagType enum. @@ -1620,7 +1607,7 @@ type FlagsListResponse struct { // Items: List of flags. Items []*Flag `json:"items,omitempty"` - // Kind: This is always sql#flagsList. + // Kind: This is always *sql#flagsList*. Kind string `json:"kind,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1658,21 +1645,14 @@ type ImportContext struct { // CsvImportOptions: Options for importing data as CSV. CsvImportOptions *ImportContextCsvImportOptions `json:"csvImportOptions,omitempty"` - // Database: The target database for the import. If - // fileType is - // SQL, this field is required only if the import file does - // not - // specify a database, and is overridden by any database specification - // in the - // import file. If fileType is CSV, one - // database - // must be specified. + // Database: The target database for the import. If *fileType* is *SQL*, + // this field is required only if the import file does not specify a + // database, and is overridden by any database specification in the + // import file. If *fileType* is *CSV*, one database must be specified. Database string `json:"database,omitempty"` - // FileType: The file type for the specified uri.
SQL: - // The file - // contains SQL statements.
CSV: The file contains CSV - // data. + // FileType: The file type for the specified uri. *SQL*: The file + // contains SQL statements. *CSV*: The file contains CSV data. // // Possible values: // "SQL_FILE_TYPE_UNSPECIFIED" - Unknown file type. @@ -1685,17 +1665,13 @@ type ImportContext struct { // instances only. ImportUser string `json:"importUser,omitempty"` - // Kind: This is always sql#importContext. + // Kind: This is always *sql#importContext*. Kind string `json:"kind,omitempty"` - // Uri: Path to the import file in Cloud Storage, in the - // form - // gs: - // //bucketName/fileName. Compressed gzip files (.gz) are - // supported - // // when fileType is SQL. The instance must - // have - // // write permissions to the bucket and read access to the file. + // Uri: Path to the import file in Cloud Storage, in the form *gs: + // //bucketName/fileName*. Compressed gzip files (.gz) are supported // + // when *fileType* is *SQL*. The instance must have // write permissions + // to the bucket and read access to the file. Uri string `json:"uri,omitempty"` // ForceSendFields is a list of field names (e.g. "BakImportOptions") to @@ -1753,19 +1729,16 @@ func (s *ImportContextBakImportOptions) MarshalJSON() ([]byte, error) { type ImportContextBakImportOptionsEncryptionOptions struct { // CertPath: Path to the Certificate (.cer) in Cloud Storage, in the - // form - // gs://bucketName/fileName. The instance must have - // write permissions to the bucket and read access to the file. + // form *gs://bucketName/fileName*. The instance must have write + // permissions to the bucket and read access to the file. CertPath string `json:"certPath,omitempty"` // PvkPassword: Password that encrypts the private key PvkPassword string `json:"pvkPassword,omitempty"` - // PvkPath: Path to the Certificate Private Key (.pvk) in Cloud - // Storage, in the - // form gs://bucketName/fileName. The instance must - // have - // write permissions to the bucket and read access to the file. + // PvkPath: Path to the Certificate Private Key (.pvk) in Cloud Storage, + // in the form *gs://bucketName/fileName*. The instance must have write + // permissions to the bucket and read access to the file. PvkPath string `json:"pvkPath,omitempty"` // ForceSendFields is a list of field names (e.g. "CertPath") to @@ -1794,8 +1767,7 @@ func (s *ImportContextBakImportOptionsEncryptionOptions) MarshalJSON() ([]byte, // ImportContextCsvImportOptions: Options for importing data as CSV. type ImportContextCsvImportOptions struct { // Columns: The columns to which CSV data is imported. If not specified, - // all columns - // of the database table are loaded with CSV data. + // all columns of the database table are loaded with CSV data. Columns []string `json:"columns,omitempty"` // Table: The table to which CSV data is imported. @@ -1852,7 +1824,8 @@ func (s *InstancesCloneRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InstancesDemoteMasterRequest: Database demote master request. +// InstancesDemoteMasterRequest: Database demote primary instance +// request. type InstancesDemoteMasterRequest struct { // DemoteMasterContext: Contains details about the demoteMaster // operation. @@ -1972,13 +1945,12 @@ type InstancesListResponse struct { // Items: List of database instance resources. Items []*DatabaseInstance `json:"items,omitempty"` - // Kind: This is always sql#instancesList. + // Kind: This is always *sql#instancesList*. Kind string `json:"kind,omitempty"` // NextPageToken: The continuation token, used to page through large - // result sets. Provide - // this value in a subsequent request to return the next page of - // results. + // result sets. Provide this value in a subsequent request to return the + // next page of results. NextPageToken string `json:"nextPageToken,omitempty"` // Warnings: List of warnings that occurred while handling the request. @@ -2018,7 +1990,7 @@ type InstancesListServerCasResponse struct { // Certs: List of server CA certificates for the instance. Certs []*SslCert `json:"certs,omitempty"` - // Kind: This is always sql#instancesListServerCas. + // Kind: This is always *sql#instancesListServerCas*. Kind string `json:"kind,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2144,31 +2116,21 @@ func (s *InstancesTruncateLogRequest) MarshalJSON() ([]byte, error) { // IpConfiguration: IP Management configuration. type IpConfiguration struct { // AuthorizedNetworks: The list of external networks that are allowed to - // connect to the instance - // using the IP. In - // CID - // R - // notation, also known as 'slash' notation - // (e.g. - // 192.168.100.0/24). + // connect to the instance using the IP. In 'CIDR' notation, also known + // as 'slash' notation (for example: *192.168.100.0/24*). AuthorizedNetworks []*AclEntry `json:"authorizedNetworks,omitempty"` - // Ipv4Enabled: Whether the instance should be assigned an IP address or + // Ipv4Enabled: Whether the instance is assigned a public IP address or // not. Ipv4Enabled bool `json:"ipv4Enabled,omitempty"` // PrivateNetwork: The resource link for the VPC network from which the - // Cloud SQL instance is - // accessible for private IP. For - // example, - // /projects/myProject/global/networks/default. This - // setting can - // be updated, but it cannot be removed after it is set. + // Cloud SQL instance is accessible for private IP. For example, + // */projects/myProject/global/networks/default*. This setting can be + // updated, but it cannot be removed after it is set. PrivateNetwork string `json:"privateNetwork,omitempty"` - // RequireSsl: Whether SSL connections over IP should be enforced or - // not. + // RequireSsl: Whether SSL connections over IP are enforced or not. RequireSsl bool `json:"requireSsl,omitempty"` // ForceSendFields is a list of field names (e.g. "AuthorizedNetworks") @@ -2200,43 +2162,31 @@ type IpMapping struct { // IpAddress: The IP address assigned. IpAddress string `json:"ipAddress,omitempty"` - // TimeToRetire: The due time for this IP to be retired in - // RFC 3339 format, for - // example - // 2012-11-15T16:19:00.094Z. This field is only available - // when - // the IP is scheduled to be retired. + // TimeToRetire: The due time for this IP to be retired in RFC 3339 + // format, for example *2012-11-15T16:19:00.094Z*. This field is only + // available when the IP is scheduled to be retired. TimeToRetire string `json:"timeToRetire,omitempty"` - // Type: The type of this IP address. A PRIMARY address is - // a public - // address that can accept incoming connections. A - // PRIVATE - // address is a private address that can accept incoming connections. - // An - // OUTGOING address is the source address of - // connections - // originating from the instance, if supported. + // Type: The type of this IP address. A *PRIMARY* address is a public + // address that can accept incoming connections. A *PRIVATE* address is + // a private address that can accept incoming connections. An *OUTGOING* + // address is the source address of connections originating from the + // instance, if supported. // // Possible values: // "SQL_IP_ADDRESS_TYPE_UNSPECIFIED" - This is an unknown IP address // type. // "PRIMARY" - IP address the customer is supposed to connect to. - // Usually this is the - // load balancer's IP address + // Usually this is the load balancer's IP address // "OUTGOING" - Source IP address of the connection a read replica - // establishes to its - // external master. This IP address can be whitelisted by the - // customer - // in case it has a firewall that filters incoming connection to its - // on premises master. + // establishes to its external primary instance. This IP address can be + // allowlisted by the customer in case it has a firewall that filters + // incoming connection to its on premises primary instance. // "PRIVATE" - Private IP used when using private IPs and network // peering. // "MIGRATED_1ST_GEN" - V1 IP of a migrated instance. We want the user - // to - // decommission this IP as soon as the migration is complete. - // Note: V1 instances with V1 ip addresses will be counted as PRIMARY. + // to decommission this IP as soon as the migration is complete. Note: + // V1 instances with V1 ip addresses will be counted as PRIMARY. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "IpAddress") to @@ -2263,26 +2213,20 @@ func (s *IpMapping) MarshalJSON() ([]byte, error) { } // LocationPreference: Preferred location. This specifies where a Cloud -// SQL instance should -// preferably be located, either in a specific Compute Engine zone, -// or -// co-located with an App Engine application. Note that if the -// preferred +// SQL instance is located, either in a specific Compute Engine zone, or +// co-located with an App Engine application. Note that if the preferred // location is not available, the instance will be located as close as -// possible -// within the region. Only one location may be specified. +// possible within the region. Only one location may be specified. type LocationPreference struct { - // FollowGaeApplication: The AppEngine application to follow, it must be - // in the same region as the - // Cloud SQL instance. + // FollowGaeApplication: The App Engine application to follow, it must + // be in the same region as the Cloud SQL instance. FollowGaeApplication string `json:"followGaeApplication,omitempty"` - // Kind: This is always sql#locationPreference. + // Kind: This is always *sql#locationPreference*. Kind string `json:"kind,omitempty"` - // Zone: The preferred Compute Engine zone (e.g. us-central1-a, - // us-central1-b, - // etc.). + // Zone: The preferred Compute Engine zone (for example: us-central1-a, + // us-central1-b, etc.). Zone string `json:"zone,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -2310,9 +2254,8 @@ func (s *LocationPreference) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// MaintenanceWindow: Maintenance window. This specifies when a v2 Cloud -// SQL instance should -// preferably be restarted for system maintenance purposes. +// MaintenanceWindow: Maintenance window. This specifies when a Cloud +// SQL instance is restarted for system maintenance purposes. type MaintenanceWindow struct { // Day: day of week (1-7), starting on Monday. Day int64 `json:"day,omitempty"` @@ -2320,30 +2263,21 @@ type MaintenanceWindow struct { // Hour: hour of day - 0 to 23. Hour int64 `json:"hour,omitempty"` - // Kind: This is always sql#maintenanceWindow. + // Kind: This is always *sql#maintenanceWindow*. Kind string `json:"kind,omitempty"` - // UpdateTrack: Maintenance timing setting: canary - // (Earlier) or - // stable (Later).
- // Learn more. + // UpdateTrack: Maintenance timing setting: *canary* (Earlier) or + // *stable* (Later). Learn more. // // Possible values: // "SQL_UPDATE_TRACK_UNSPECIFIED" - This is an unknown maintenance // timing preference. // "canary" - For instance update that requires a restart, this update - // track indicates - // your instance prefer to restart for new version early in - // maintenance - // window. + // track indicates your instance prefer to restart for new version early + // in maintenance window. // "stable" - For instance update that requires a restart, this update - // track indicates - // your instance prefer to let Cloud SQL choose the timing of restart - // (within - // its Maintenance window, if applicable). + // track indicates your instance prefer to let Cloud SQL choose the + // timing of restart (within its Maintenance window, if applicable). UpdateTrack string `json:"updateTrack,omitempty"` // ForceSendFields is a list of field names (e.g. "Day") to @@ -2376,13 +2310,12 @@ type MySqlReplicaConfiguration struct { // certificate. CaCertificate string `json:"caCertificate,omitempty"` - // ClientCertificate: PEM representation of the slave's x509 + // ClientCertificate: PEM representation of the replica's x509 // certificate. ClientCertificate string `json:"clientCertificate,omitempty"` - // ClientKey: PEM representation of the slave's private key. The - // corresponsing public key - // is encoded in the client's certificate. + // ClientKey: PEM representation of the replica's private key. The + // corresponsing public key is encoded in the client's certificate. ClientKey string `json:"clientKey,omitempty"` // ConnectRetryInterval: Seconds to wait between connect retries. @@ -2390,19 +2323,14 @@ type MySqlReplicaConfiguration struct { ConnectRetryInterval int64 `json:"connectRetryInterval,omitempty"` // DumpFilePath: Path to a SQL dump file in Google Cloud Storage from - // which the slave - // instance is to be created. The URI is in the form - // gs: - // //bucketName/fileName. Compressed gzip files (.gz) are also - // supported. - // // Dumps should have the binlog co-ordinates from which replication - // should - // // begin. This can be accomplished by setting --master-data to 1 when - // using - // // mysqldump. + // which the replica instance is to be created. The URI is in the form + // gs://bucketName/fileName. Compressed gzip files (.gz) are also + // supported. Dumps have the binlog co-ordinates from which replication + // begins. This can be accomplished by setting --master-data to 1 when + // using mysqldump. DumpFilePath string `json:"dumpFilePath,omitempty"` - // Kind: This is always sql#mysqlReplicaConfiguration. + // Kind: This is always *sql#mysqlReplicaConfiguration*. Kind string `json:"kind,omitempty"` // MasterHeartbeatPeriod: Interval in milliseconds between replication @@ -2418,9 +2346,9 @@ type MySqlReplicaConfiguration struct { // Username: The username for the replication connection. Username string `json:"username,omitempty"` - // VerifyServerCertificate: Whether or not to check the master's Common - // Name value in the certificate - // that it sends during the SSL handshake. + // VerifyServerCertificate: Whether or not to check the primary + // instance's Common Name value in the certificate that it sends during + // the SSL handshake. VerifyServerCertificate bool `json:"verifyServerCertificate,omitempty"` // ForceSendFields is a list of field names (e.g. "CaCertificate") to @@ -2452,13 +2380,12 @@ type OnPremisesConfiguration struct { // certificate. CaCertificate string `json:"caCertificate,omitempty"` - // ClientCertificate: PEM representation of the slave's x509 + // ClientCertificate: PEM representation of the replica's x509 // certificate. ClientCertificate string `json:"clientCertificate,omitempty"` - // ClientKey: PEM representation of the slave's private key. The - // corresponsing public key - // is encoded in the client's certificate. + // ClientKey: PEM representation of the replica's private key. The + // corresponsing public key is encoded in the client's certificate. ClientKey string `json:"clientKey,omitempty"` // DumpFilePath: The dump file to create the Cloud SQL replica. @@ -2468,7 +2395,7 @@ type OnPremisesConfiguration struct { // format HostPort string `json:"hostPort,omitempty"` - // Kind: This is always sql#onPremisesConfiguration. + // Kind: This is always *sql#onPremisesConfiguration*. Kind string `json:"kind,omitempty"` // Password: The password for connecting to on-premises instance. @@ -2500,22 +2427,16 @@ func (s *OnPremisesConfiguration) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Operation: An Operation resource. For successful operations that -// return an -// Operation resource, only the fields relevant to the operation are -// populated -// in the resource. +// Operation: An Operation resource. For successful operations that +// return an Operation resource, only the fields relevant to the +// operation are populated in the resource. type Operation struct { - // EndTime: The time this operation finished in UTC timezone in - // RFC 3339 format, for - // example - // 2012-11-15T16:19:00.094Z. + // EndTime: The time this operation finished in UTC timezone in RFC 3339 + // format, for example *2012-11-15T16:19:00.094Z*. EndTime string `json:"endTime,omitempty"` // Error: If errors occurred during processing of this operation, this - // field will be - // populated. + // field will be populated. Error *OperationErrors `json:"error,omitempty"` // ExportContext: The context for export operation, if applicable. @@ -2525,40 +2446,27 @@ type Operation struct { ImportContext *ImportContext `json:"importContext,omitempty"` // InsertTime: The time this operation was enqueued in UTC timezone in - // RFC 3339 format, for - // example - // 2012-11-15T16:19:00.094Z. + // RFC 3339 format, for example *2012-11-15T16:19:00.094Z*. InsertTime string `json:"insertTime,omitempty"` - // Kind: This is always sql#operation. + // Kind: This is always *sql#operation*. Kind string `json:"kind,omitempty"` // Name: An identifier that uniquely identifies the operation. You can - // use this - // identifier to retrieve the Operations resource that has information - // about - // the operation. + // use this identifier to retrieve the Operations resource that has + // information about the operation. Name string `json:"name,omitempty"` - // OperationType: The type of the operation. Valid values are - // CREATE, - // DELETE, UPDATE, - // RESTART, - // IMPORT, EXPORT, - // BACKUP_VOLUME, - // RESTORE_VOLUME, - // CREATE_USER, - // DELETE_USER, - // CREATE_DATABASE, - // DELETE_DATABASE . + // OperationType: The type of the operation. Valid values are: *CREATE* + // *DELETE* *UPDATE* *RESTART* *IMPORT* *EXPORT* *BACKUP_VOLUME* + // *RESTORE_VOLUME* *CREATE_USER* *DELETE_USER* *CREATE_DATABASE* + // *DELETE_DATABASE* // // Possible values: // "SQL_OPERATION_TYPE_UNSPECIFIED" - Unknown operation type. // "IMPORT" - Imports data into a Cloud SQL instance. // "EXPORT" - Exports data from a Cloud SQL instance to a Cloud - // Storage - // bucket. + // Storage bucket. // "CREATE" - Creates a new Cloud SQL instance. // "UPDATE" - Updates the settings of a Cloud SQL instance. // "DELETE" - Deletes a Cloud SQL instance. @@ -2583,41 +2491,34 @@ type Operation struct { // "CREATE_DATABASE" - Creates a database in the Cloud SQL instance. // "DELETE_DATABASE" - Deletes a database in the Cloud SQL instance. // "UPDATE_DATABASE" - Updates a database in the Cloud SQL instance. - // "FAILOVER" - Performs failover of an HA-enabled Cloud SQL - // failover replica. + // "FAILOVER" - Performs failover of an HA-enabled Cloud SQL failover + // replica. // "DELETE_BACKUP" - Deletes the backup taken by a backup run. // "RECREATE_REPLICA" // "TRUNCATE_LOG" - Truncates a general or slow log table in MySQL. // "DEMOTE_MASTER" - Demotes the stand-alone instance to be a Cloud - // SQL - // read replica for an external database server. + // SQL read replica for an external database server. // "MAINTENANCE" - Indicates that the instance is currently in - // maintenance. Maintenance - // typically causes the instance to be unavailable for 1-3 minutes. + // maintenance. Maintenance typically causes the instance to be + // unavailable for 1-3 minutes. // "ENABLE_PRIVATE_IP" - This field is deprecated, and will be removed // in future version of API. // "DEFER_MAINTENANCE" // "CREATE_CLONE" - Creates clone instance. // "RESCHEDULE_MAINTENANCE" - Reschedule maintenance to another time. // "START_EXTERNAL_SYNC" - Starts external sync of a Cloud SQL EM - // replica to an external master. + // replica to an external primary instance. OperationType string `json:"operationType,omitempty"` // SelfLink: The URI of this resource. SelfLink string `json:"selfLink,omitempty"` // StartTime: The time this operation actually started in UTC timezone - // in RFC 3339 format, for - // example - // 2012-11-15T16:19:00.094Z. + // in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*. StartTime string `json:"startTime,omitempty"` - // Status: The status of an operation. Valid values are - // PENDING, - // RUNNING, - // DONE, - // SQL_OPERATION_STATUS_UNSPECIFIED. + // Status: The status of an operation. Valid values are: *PENDING* + // *RUNNING* *DONE* *SQL_OPERATION_STATUS_UNSPECIFIED* // // Possible values: // "SQL_OPERATION_STATUS_UNSPECIFIED" - The state of the operation is @@ -2671,7 +2572,7 @@ type OperationError struct { // Code: Identifies the specific error that occurred. Code string `json:"code,omitempty"` - // Kind: This is always sql#operationError. + // Kind: This is always *sql#operationError*. Kind string `json:"kind,omitempty"` // Message: Additional information about the error encountered. @@ -2706,7 +2607,7 @@ type OperationErrors struct { // operation. Errors []*OperationError `json:"errors,omitempty"` - // Kind: This is always sql#operationErrors. + // Kind: This is always *sql#operationErrors*. Kind string `json:"kind,omitempty"` // ForceSendFields is a list of field names (e.g. "Errors") to @@ -2737,13 +2638,12 @@ type OperationsListResponse struct { // Items: List of operation resources. Items []*Operation `json:"items,omitempty"` - // Kind: This is always sql#operationsList. + // Kind: This is always *sql#operationsList*. Kind string `json:"kind,omitempty"` // NextPageToken: The continuation token, used to page through large - // result sets. Provide - // this value in a subsequent request to return the next page of - // results. + // result sets. Provide this value in a subsequent request to return the + // next page of results. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2774,35 +2674,26 @@ func (s *OperationsListResponse) MarshalJSON() ([]byte, error) { } // ReplicaConfiguration: Read-replica configuration for connecting to -// the master. +// the primary instance. type ReplicaConfiguration struct { // FailoverTarget: Specifies if the replica is the failover target. If - // the field is set to - // true the replica will be designated as a failover - // replica. In - // case the master instance fails, the replica instance will be promoted - // as - // the new master instance.

Only one replica can be specified as - // failover - // target, and the replica has to be in different zone with the - // master - // instance. + // the field is set to *true* the replica will be designated as a + // failover replica. In case the primary instance fails, the replica + // instance will be promoted as the new primary instance. Only one + // replica can be specified as failover target, and the replica has to + // be in different zone with the primary instance. FailoverTarget bool `json:"failoverTarget,omitempty"` - // Kind: This is always sql#replicaConfiguration. + // Kind: This is always *sql#replicaConfiguration*. Kind string `json:"kind,omitempty"` // MysqlReplicaConfiguration: MySQL specific configuration when - // replicating from a MySQL on-premises - // master. Replication configuration information such as the - // username, - // password, certificates, and keys are not stored in the instance - // metadata. - // The configuration information is used only to set up the - // replication - // connection and is stored by MySQL in a file named - // master.info - // in the data directory. + // replicating from a MySQL on-premises primary instance. Replication + // configuration information such as the username, password, + // certificates, and keys are not stored in the instance metadata. The + // configuration information is used only to set up the replication + // connection and is stored by MySQL in a file named *master.info* in + // the data directory. MysqlReplicaConfiguration *MySqlReplicaConfiguration `json:"mysqlReplicaConfiguration,omitempty"` // ForceSendFields is a list of field names (e.g. "FailoverTarget") to @@ -2837,19 +2728,14 @@ type Reschedule struct { // "IMMEDIATE" - If the user wants to schedule the maintenance to // happen now. // "NEXT_AVAILABLE_WINDOW" - If the user wants to use the existing - // maintenance policy to find the - // next available window. + // maintenance policy to find the next available window. // "SPECIFIC_TIME" - If the user wants to reschedule the maintenance // to a specific time. RescheduleType string `json:"rescheduleType,omitempty"` // ScheduleTime: Optional. Timestamp when the maintenance shall be - // rescheduled to if - // reschedule_type=SPECIFIC_TIME, in - // RFC 3339 format, - // for - // example 2012-11-15T16:19:00.094Z. + // rescheduled to if reschedule_type=SPECIFIC_TIME, in RFC 3339 format, + // for example *2012-11-15T16:19:00.094Z*. ScheduleTime string `json:"scheduleTime,omitempty"` // ForceSendFields is a list of field names (e.g. "RescheduleType") to @@ -2876,8 +2762,7 @@ func (s *Reschedule) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RestoreBackupContext: Database instance restore from backup -// context. +// RestoreBackupContext: Database instance restore from backup context. // Backup context contains source instance id and project id. type RestoreBackupContext struct { // BackupRunId: The ID of the backup run to restore from. @@ -2886,7 +2771,7 @@ type RestoreBackupContext struct { // InstanceId: The ID of the instance that the backup was taken from. InstanceId string `json:"instanceId,omitempty"` - // Kind: This is always sql#restoreBackupContext. + // Kind: This is always *sql#restoreBackupContext*. Kind string `json:"kind,omitempty"` // Project: The full project ID of the source instance. @@ -2917,12 +2802,12 @@ func (s *RestoreBackupContext) MarshalJSON() ([]byte, error) { // RotateServerCaContext: Instance rotate server CA context. type RotateServerCaContext struct { - // Kind: This is always sql#rotateServerCaContext. + // Kind: This is always *sql#rotateServerCaContext*. Kind string `json:"kind,omitempty"` // NextVersion: The fingerprint of the next version to be rotated to. If - // left unspecified, - // will be rotated to the most recently added server CA version. + // left unspecified, will be rotated to the most recently added server + // CA version. NextVersion string `json:"nextVersion,omitempty"` // ForceSendFields is a list of field names (e.g. "Kind") to @@ -2951,50 +2836,32 @@ func (s *RotateServerCaContext) MarshalJSON() ([]byte, error) { // Settings: Database instance settings. type Settings struct { // ActivationPolicy: The activation policy specifies when the instance - // is activated; it is - // applicable only when the instance state is RUNNABLE. - // Valid - // values:
ALWAYS: The instance is on, and remains so - // even in - // the absence of connection requests.
NEVER: The - // instance is - // off; it is not activated, even if a connection request - // arrives. - //
ON_DEMAND: First Generation instances only. The - // instance - // responds to incoming requests, and turns itself off when not in - // use. - // Instances with PER_USE pricing turn off after 15 minutes - // of - // inactivity. Instances with PER_PACKAGE pricing turn off - // after - // 12 hours of inactivity. + // is activated; it is applicable only when the instance state is + // RUNNABLE. Valid values: *ALWAYS*: The instance is on, and remains so + // even in the absence of connection requests. *NEVER*: The instance is + // off; it is not activated, even if a connection request arrives. // // Possible values: // "SQL_ACTIVATION_POLICY_UNSPECIFIED" - Unknown activation plan. // "ALWAYS" - The instance is always up and running. - // "NEVER" - The instance should never spin up. - // "ON_DEMAND" - The instance spins up upon receiving requests. + // "NEVER" - The instance never starts. + // "ON_DEMAND" - The instance starts upon receiving requests. ActivationPolicy string `json:"activationPolicy,omitempty"` + // ActiveDirectoryConfig: Active Directory configuration, for now + // relevant only for SQL Server + ActiveDirectoryConfig *SqlActiveDirectoryConfig `json:"activeDirectoryConfig,omitempty"` + // AuthorizedGaeApplications: The App Engine app IDs that can access - // this instance. First Generation - // instances only. + // this instance. (Deprecated) Applied to First Generation instances + // only. AuthorizedGaeApplications []string `json:"authorizedGaeApplications,omitempty"` - // AvailabilityType: Availability type (PostgreSQL and MySQL instances - // only). Potential values: - //
ZONAL: The instance serves data from only one - // zone. - // Outages in that zone affect data accessibility. - //
REGIONAL: - // The instance can serve data from more than one zone in a region (it - // is - // highly available).
For more information, see - // Overview - // of the High Availability Configuration. + // AvailabilityType: Availability type. Potential values: *ZONAL*: The + // instance serves data from only one zone. Outages in that zone affect + // data accessibility. *REGIONAL*: The instance can serve data from more + // than one zone in a region (it is highly available). For more + // information, see Overview of the High Availability Configuration. // // Possible values: // "SQL_AVAILABILITY_TYPE_UNSPECIFIED" - This is an unknown @@ -3006,21 +2873,21 @@ type Settings struct { // BackupConfiguration: The daily backup configuration for the instance. BackupConfiguration *BackupConfiguration `json:"backupConfiguration,omitempty"` + // Collation: The name of server Instance collation. + Collation string `json:"collation,omitempty"` + // CrashSafeReplicationEnabled: Configuration specific to read replica - // instances. Indicates whether - // database flags for crash-safe replication are enabled. This property - // is - // only applicable to First Generation instances. + // instances. Indicates whether database flags for crash-safe + // replication are enabled. This property was only applicable to First + // Generation instances. CrashSafeReplicationEnabled bool `json:"crashSafeReplicationEnabled,omitempty"` // DataDiskSizeGb: The size of data disk, in GB. The data disk size - // minimum is 10GB. Not used - // for First Generation instances. + // minimum is 10GB. DataDiskSizeGb int64 `json:"dataDiskSizeGb,omitempty,string"` - // DataDiskType: The type of data disk: PD_SSD (default) - // or - // PD_HDD. Not used for First Generation instances. + // DataDiskType: The type of data disk: PD_SSD (default) or PD_HDD. Not + // used for First Generation instances. // // Possible values: // "SQL_DATA_DISK_TYPE_UNSPECIFIED" - This is an unknown data disk @@ -3028,48 +2895,41 @@ type Settings struct { // "PD_SSD" - An SSD data disk. // "PD_HDD" - An HDD data disk. // "OBSOLETE_LOCAL_SSD" - This field is deprecated and will be removed - // from a future version of the - // API. + // from a future version of the API. DataDiskType string `json:"dataDiskType,omitempty"` // DatabaseFlags: The database flags passed to the instance at startup. DatabaseFlags []*DatabaseFlags `json:"databaseFlags,omitempty"` // DatabaseReplicationEnabled: Configuration specific to read replica - // instances. Indicates whether - // replication is enabled or not. + // instances. Indicates whether replication is enabled or not. DatabaseReplicationEnabled bool `json:"databaseReplicationEnabled,omitempty"` + // DenyMaintenancePeriods: Deny maintenance periods + DenyMaintenancePeriods []*DenyMaintenancePeriod `json:"denyMaintenancePeriods,omitempty"` + // IpConfiguration: The settings for IP Management. This allows to - // enable or disable the - // instance IP and manage which external networks can connect to the - // instance. - // The IPv4 address cannot be disabled for Second Generation instances. + // enable or disable the instance IP and manage which external networks + // can connect to the instance. The IPv4 address cannot be disabled for + // Second Generation instances. IpConfiguration *IpConfiguration `json:"ipConfiguration,omitempty"` - // Kind: This is always sql#settings. + // Kind: This is always *sql#settings*. Kind string `json:"kind,omitempty"` // LocationPreference: The location preference settings. This allows the - // instance to be located as - // near as possible to either an App Engine app or Compute Engine zone - // for - // better performance. App Engine co-location is only applicable to - // First - // Generation instances. + // instance to be located as near as possible to either an App Engine + // app or Compute Engine zone for better performance. App Engine + // co-location was only applicable to First Generation instances. LocationPreference *LocationPreference `json:"locationPreference,omitempty"` // MaintenanceWindow: The maintenance window for this instance. This - // specifies when the instance - // can be restarted for maintenance purposes. Not used for First - // Generation - // instances. + // specifies when the instance can be restarted for maintenance + // purposes. MaintenanceWindow *MaintenanceWindow `json:"maintenanceWindow,omitempty"` // PricingPlan: The pricing plan for this instance. This can be either - // PER_USE - // or PACKAGE. Only PER_USE is supported for - // Second + // *PER_USE* or *PACKAGE*. Only *PER_USE* is supported for Second // Generation instances. // // Possible values: @@ -3080,63 +2940,42 @@ type Settings struct { PricingPlan string `json:"pricingPlan,omitempty"` // ReplicationType: The type of replication this instance uses. This can - // be either - // ASYNCHRONOUS or SYNCHRONOUS. This property - // is - // only applicable to First Generation instances. + // be either *ASYNCHRONOUS* or *SYNCHRONOUS*. (Deprecated_ This property + // was only applicable to First Generation instances. // // Possible values: // "SQL_REPLICATION_TYPE_UNSPECIFIED" - This is an unknown replication // type for a Cloud SQL instance. // "SYNCHRONOUS" - The synchronous replication mode for First - // Generation instances. It is the - // default value. + // Generation instances. It is the default value. // "ASYNCHRONOUS" - The asynchronous replication mode for First - // Generation instances. It - // provides a slight performance gain, but if an outage occurs while - // this - // option is set to asynchronous, you can lose up to a few seconds of - // updates - // to your data. + // Generation instances. It provides a slight performance gain, but if + // an outage occurs while this option is set to asynchronous, you can + // lose up to a few seconds of updates to your data. ReplicationType string `json:"replicationType,omitempty"` // SettingsVersion: The version of instance settings. This is a required - // field for update - // method to make sure concurrent updates are handled properly. During - // update, - // use the most recent settingsVersion value for this instance and do - // not try - // to update this value. + // field for update method to make sure concurrent updates are handled + // properly. During update, use the most recent settingsVersion value + // for this instance and do not try to update this value. SettingsVersion int64 `json:"settingsVersion,omitempty,string"` // StorageAutoResize: Configuration to increase storage size - // automatically. The default value is - // true. Not used for First Generation instances. + // automatically. The default value is true. StorageAutoResize *bool `json:"storageAutoResize,omitempty"` // StorageAutoResizeLimit: The maximum size to which storage capacity - // can be automatically increased. - // The default value is 0, which specifies that there is no limit. Not - // used - // for First Generation instances. + // can be automatically increased. The default value is 0, which + // specifies that there is no limit. StorageAutoResizeLimit int64 `json:"storageAutoResizeLimit,omitempty,string"` - // Tier: The tier (or machine type) for this instance, for - // example - // db-n1-standard-1 (MySQL instances) - // or - // db-custom-1-3840 (PostgreSQL instances). For MySQL - // instances, - // this property determines whether the instance is First or - // Second - // Generation. For more information, see - // Instance Settings. + // Tier: The tier (or machine type) for this instance, for example + // *db-n1-standard-1* (MySQL instances) or *db-custom-1-3840* + // (PostgreSQL instances). Tier string `json:"tier,omitempty"` // UserLabels: User-provided labels, represented as a dictionary where - // each label is a - // single key value pair. + // each label is a single key value pair. UserLabels map[string]string `json:"userLabels,omitempty"` // ForceSendFields is a list of field names (e.g. "ActivationPolicy") to @@ -3163,12 +3002,45 @@ func (s *Settings) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SqlExternalSyncSettingError: External master migration setting error. +// SqlActiveDirectoryConfig: Active Directory configuration, for now +// relevant only for SQL Server +type SqlActiveDirectoryConfig struct { + // Domain: Domain name + Domain string `json:"domain,omitempty"` + + // Kind: This will be always sql#activeDirectoryConfig. + Kind string `json:"kind,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Domain") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Domain") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SqlActiveDirectoryConfig) MarshalJSON() ([]byte, error) { + type NoMethod SqlActiveDirectoryConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SqlExternalSyncSettingError: External primary instance migration +// setting error. type SqlExternalSyncSettingError struct { // Detail: Additional information about the error encountered. Detail string `json:"detail,omitempty"` - // Kind: This is always sql#migrationSettingError. + // Kind: This is always *sql#migrationSettingError*. Kind string `json:"kind,omitempty"` // Type: Identifies the specific error that occurred. @@ -3185,6 +3057,27 @@ type SqlExternalSyncSettingError struct { // databases, applicable for postgres. // "PGLOGICAL_NODE_ALREADY_EXISTS" - pglogical node already exists on // databases, applicable for postgres. + // "INVALID_WAL_LEVEL" - The value of parameter wal_level is not set + // to logical. + // "INVALID_SHARED_PRELOAD_LIBRARY" - The value of parameter + // shared_preload_libraries does not include pglogical. + // "INSUFFICIENT_MAX_REPLICATION_SLOTS" - The value of parameter + // max_replication_slots is not sufficient. + // "INSUFFICIENT_MAX_WAL_SENDERS" - The value of parameter + // max_wal_senders is not sufficient. + // "INSUFFICIENT_MAX_WORKER_PROCESSES" - The value of parameter + // max_worker_processes is not sufficient. + // "UNSUPPORTED_EXTENSIONS" - Extensions installed are either not + // supported or having unsupported versions + // "INVALID_RDS_LOGICAL_REPLICATION" - The value of parameter + // rds.logical_replication is not set to 1. + // "INVALID_LOGGING_SETUP" - The primary instance logging setup + // doesn't allow EM sync. + // "INVALID_DB_PARAM" - The primary instance database parameter setup + // doesn't allow EM sync. + // "UNSUPPORTED_GTID_MODE" - The gtid_mode is not supported, + // applicable for MySQL. + // "SQLSERVER_AGENT_NOT_RUNNING" - SQL Server Agent is not running. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "Detail") to @@ -3245,7 +3138,7 @@ type SqlInstancesVerifyExternalSyncSettingsResponse struct { // Errors: List of migration violations. Errors []*SqlExternalSyncSettingError `json:"errors,omitempty"` - // Kind: This is always sql#migrationSettingErrorList. + // Kind: This is always *sql#migrationSettingErrorList*. Kind string `json:"kind,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -3384,27 +3277,21 @@ type SslCert struct { // CertSerialNumber: Serial number, as extracted from the certificate. CertSerialNumber string `json:"certSerialNumber,omitempty"` - // CommonName: User supplied name. Constrained to [a-zA-Z.-_ ]+. + // CommonName: User supplied name. Constrained to [a-zA-Z.-_ ]+. CommonName string `json:"commonName,omitempty"` - // CreateTime: The time when the certificate was created in - // RFC 3339 format, for - // example - // 2012-11-15T16:19:00.094Z + // CreateTime: The time when the certificate was created in RFC 3339 + // format, for example *2012-11-15T16:19:00.094Z* CreateTime string `json:"createTime,omitempty"` - // ExpirationTime: The time when the certificate expires in - // RFC 3339 format, for - // example - // 2012-11-15T16:19:00.094Z. + // ExpirationTime: The time when the certificate expires in RFC 3339 + // format, for example *2012-11-15T16:19:00.094Z*. ExpirationTime string `json:"expirationTime,omitempty"` // Instance: Name of the database instance. Instance string `json:"instance,omitempty"` - // Kind: This is always sql#sslCert. + // Kind: This is always *sql#sslCert*. Kind string `json:"kind,omitempty"` // SelfLink: The URI of this resource. @@ -3446,8 +3333,7 @@ type SslCertDetail struct { CertInfo *SslCert `json:"certInfo,omitempty"` // CertPrivateKey: The private key for the client cert, in pem format. - // Keep private in order - // to protect your security. + // Keep private in order to protect your security. CertPrivateKey string `json:"certPrivateKey,omitempty"` // ForceSendFields is a list of field names (e.g. "CertInfo") to @@ -3505,9 +3391,8 @@ func (s *SslCertsCreateEphemeralRequest) MarshalJSON() ([]byte, error) { // SslCertsInsertRequest: SslCerts insert request. type SslCertsInsertRequest struct { - // CommonName: User supplied name. Must be a distinct name from the - // other certificates - // for this instance. + // CommonName: User supplied name. Must be a distinct name from the + // other certificates for this instance. CommonName string `json:"commonName,omitempty"` // ForceSendFields is a list of field names (e.g. "CommonName") to @@ -3535,24 +3420,18 @@ func (s *SslCertsInsertRequest) MarshalJSON() ([]byte, error) { // SslCertsInsertResponse: SslCert insert response. type SslCertsInsertResponse struct { - // ClientCert: The new client certificate and private key. For First - // Generation - // instances, the new certificate does not take effect until the - // instance is - // restarted. + // ClientCert: The new client certificate and private key. ClientCert *SslCertDetail `json:"clientCert,omitempty"` - // Kind: This is always sql#sslCertsInsert. + // Kind: This is always *sql#sslCertsInsert*. Kind string `json:"kind,omitempty"` // Operation: The operation to track the ssl certs insert request. Operation *Operation `json:"operation,omitempty"` - // ServerCaCert: The server Certificate Authority's certificate. If - // this is missing you can - // force a new one to be generated by calling resetSslConfig method - // on - // instances resource. + // ServerCaCert: The server Certificate Authority's certificate. If this + // is missing you can force a new one to be generated by calling + // resetSslConfig method on instances resource. ServerCaCert *SslCert `json:"serverCaCert,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -3587,7 +3466,7 @@ type SslCertsListResponse struct { // Items: List of client certificates for the instance. Items []*SslCert `json:"items,omitempty"` - // Kind: This is always sql#sslCertsList. + // Kind: This is always *sql#sslCertsList*. Kind string `json:"kind,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -3625,15 +3504,14 @@ type Tier struct { // RAM: The maximum RAM usage of this tier in bytes. RAM int64 `json:"RAM,omitempty,string"` - // Kind: This is always sql#tier. + // Kind: This is always *sql#tier*. Kind string `json:"kind,omitempty"` // Region: The applicable regions for this tier. Region []string `json:"region,omitempty"` // Tier: An identifier for the machine type, for example, - // db-n1-standard-1. For - // related information, see Pricing. + // db-n1-standard-1. For related information, see Pricing. Tier string `json:"tier,omitempty"` // ForceSendFields is a list of field names (e.g. "DiskQuota") to @@ -3664,7 +3542,7 @@ type TiersListResponse struct { // Items: List of tiers. Items []*Tier `json:"items,omitempty"` - // Kind: This is always sql#tiersList. + // Kind: This is always *sql#tiersList*. Kind string `json:"kind,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -3696,12 +3574,11 @@ func (s *TiersListResponse) MarshalJSON() ([]byte, error) { // TruncateLogContext: Database Instance truncate log context. type TruncateLogContext struct { - // Kind: This is always sql#truncateLogContext. + // Kind: This is always *sql#truncateLogContext*. Kind string `json:"kind,omitempty"` - // LogType: The type of log to truncate. Valid values - // are - // MYSQL_GENERAL_TABLE and MYSQL_SLOW_TABLE. + // LogType: The type of log to truncate. Valid values are + // *MYSQL_GENERAL_TABLE* and *MYSQL_SLOW_TABLE*. LogType string `json:"logType,omitempty"` // ForceSendFields is a list of field names (e.g. "Kind") to @@ -3730,46 +3607,46 @@ func (s *TruncateLogContext) MarshalJSON() ([]byte, error) { // User: A Cloud SQL user resource. type User struct { // Etag: This field is deprecated and will be removed from a future - // version of the - // API. + // version of the API. Etag string `json:"etag,omitempty"` - // Host: The host name from which the user can connect. For - // insert - // operations, host defaults to an empty string. For - // update + // Host: The host name from which the user can connect. For *insert* + // operations, host defaults to an empty string. For *update* // operations, host is specified as part of the request URL. The host - // name - // cannot be updated after insertion. + // name cannot be updated after insertion. Host string `json:"host,omitempty"` // Instance: The name of the Cloud SQL instance. This does not include - // the project ID. - // Can be omitted for update since it is already specified - // on the - // URL. + // the project ID. Can be omitted for *update* since it is already + // specified on the URL. Instance string `json:"instance,omitempty"` - // Kind: This is always sql#user. + // Kind: This is always *sql#user*. Kind string `json:"kind,omitempty"` // Name: The name of the user in the Cloud SQL instance. Can be omitted - // for - // update since it is already specified in the URL. + // for *update* since it is already specified in the URL. Name string `json:"name,omitempty"` // Password: The password for the user. Password string `json:"password,omitempty"` // Project: The project ID of the project containing the Cloud SQL - // database. The Google - // apps domain is prefixed if applicable. Can be omitted - // for - // update since it is already specified on the URL. + // database. The Google apps domain is prefixed if applicable. Can be + // omitted for *update* since it is already specified on the URL. Project string `json:"project,omitempty"` SqlserverUserDetails *SqlServerUserDetails `json:"sqlserverUserDetails,omitempty"` + // Type: The user type. It determines the method to authenticate the + // user during login. The default is the database's built-in user type. + // + // Possible values: + // "BUILT_IN" - The database's built-in user type. + // "CLOUD_IAM_USER" - Cloud IAM user. + // "CLOUD_IAM_SERVICE_ACCOUNT" - Cloud IAM service account. + Type string `json:"type,omitempty"` + // ForceSendFields is a list of field names (e.g. "Etag") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -3798,14 +3675,12 @@ type UsersListResponse struct { // Items: List of user resources in the instance. Items []*User `json:"items,omitempty"` - // Kind: This is always sql#usersList. + // Kind: This is always *sql#usersList*. Kind string `json:"kind,omitempty"` // NextPageToken: An identifier that uniquely identifies the operation. - // You can use this - // identifier to retrieve the Operations resource that has information - // about - // the operation. + // You can use this identifier to retrieve the Operations resource that + // has information about the operation. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -3883,7 +3758,7 @@ func (c *BackupRunsDeleteCall) Header() http.Header { func (c *BackupRunsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3955,7 +3830,7 @@ func (c *BackupRunsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err // ], // "parameters": { // "id": { - // "description": "The ID of the Backup Run to delete. To find a Backup Run ID, use the \u003ca\nhref=\"/sql/docs/db_path/admin-api/rest/v1beta4/backupRuns/list\"\u003elist\u003c/a\u003e\nmethod.", + // "description": "The ID of the Backup Run to delete. To find a Backup Run ID, use the list method.", // "format": "int64", // "location": "path", // "required": true, @@ -4045,7 +3920,7 @@ func (c *BackupRunsGetCall) Header() http.Header { func (c *BackupRunsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4164,8 +4039,7 @@ type BackupRunsInsertCall struct { } // Insert: Creates a new backup run on demand. This method is applicable -// only to -// Second Generation instances. +// only to Second Generation instances. func (r *BackupRunsService) Insert(project string, instance string, backuprun *BackupRun) *BackupRunsInsertCall { c := &BackupRunsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -4201,7 +4075,7 @@ func (c *BackupRunsInsertCall) Header() http.Header { func (c *BackupRunsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4266,7 +4140,7 @@ func (c *BackupRunsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Creates a new backup run on demand. This method is applicable only to\nSecond Generation instances.", + // "description": "Creates a new backup run on demand. This method is applicable only to Second Generation instances.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/backupRuns", // "httpMethod": "POST", // "id": "sql.backupRuns.insert", @@ -4316,8 +4190,8 @@ type BackupRunsListCall struct { } // List: Lists all backup runs associated with a given instance and -// configuration in -// the reverse chronological order of the backup initiation time. +// configuration in the reverse chronological order of the backup +// initiation time. func (r *BackupRunsService) List(project string, instance string) *BackupRunsListCall { c := &BackupRunsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -4333,8 +4207,7 @@ func (c *BackupRunsListCall) MaxResults(maxResults int64) *BackupRunsListCall { } // PageToken sets the optional parameter "pageToken": A -// previously-returned page token representing part of the larger set -// of +// previously-returned page token representing part of the larger set of // results to view. func (c *BackupRunsListCall) PageToken(pageToken string) *BackupRunsListCall { c.urlParams_.Set("pageToken", pageToken) @@ -4378,7 +4251,7 @@ func (c *BackupRunsListCall) Header() http.Header { func (c *BackupRunsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4441,7 +4314,7 @@ func (c *BackupRunsListCall) Do(opts ...googleapi.CallOption) (*BackupRunsListRe } return ret, nil // { - // "description": "Lists all backup runs associated with a given instance and configuration in\nthe reverse chronological order of the backup initiation time.", + // "description": "Lists all backup runs associated with a given instance and configuration in the reverse chronological order of the backup initiation time.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/backupRuns", // "httpMethod": "GET", // "id": "sql.backupRuns.list", @@ -4463,7 +4336,7 @@ func (c *BackupRunsListCall) Do(opts ...googleapi.CallOption) (*BackupRunsListRe // "type": "integer" // }, // "pageToken": { - // "description": "A previously-returned page token representing part of the larger set of\nresults to view.", + // "description": "A previously-returned page token representing part of the larger set of results to view.", // "location": "query", // "type": "string" // }, @@ -4555,7 +4428,7 @@ func (c *DatabasesDeleteCall) Header() http.Header { func (c *DatabasesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4671,8 +4544,7 @@ type DatabasesGetCall struct { } // Get: Retrieves a resource containing information about a database -// inside a Cloud -// SQL instance. +// inside a Cloud SQL instance. func (r *DatabasesService) Get(project string, instance string, database string) *DatabasesGetCall { c := &DatabasesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -4718,7 +4590,7 @@ func (c *DatabasesGetCall) Header() http.Header { func (c *DatabasesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4782,7 +4654,7 @@ func (c *DatabasesGetCall) Do(opts ...googleapi.CallOption) (*Database, error) { } return ret, nil // { - // "description": "Retrieves a resource containing information about a database inside a Cloud\nSQL instance.", + // "description": "Retrieves a resource containing information about a database inside a Cloud SQL instance.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/databases/{database}", // "httpMethod": "GET", // "id": "sql.databases.get", @@ -4836,8 +4708,7 @@ type DatabasesInsertCall struct { } // Insert: Inserts a resource containing information about a database -// inside a Cloud -// SQL instance. +// inside a Cloud SQL instance. func (r *DatabasesService) Insert(project string, instance string, database *Database) *DatabasesInsertCall { c := &DatabasesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -4873,7 +4744,7 @@ func (c *DatabasesInsertCall) Header() http.Header { func (c *DatabasesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4938,7 +4809,7 @@ func (c *DatabasesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Inserts a resource containing information about a database inside a Cloud\nSQL instance.", + // "description": "Inserts a resource containing information about a database inside a Cloud SQL instance.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/databases", // "httpMethod": "POST", // "id": "sql.databases.insert", @@ -5032,7 +4903,7 @@ func (c *DatabasesListCall) Header() http.Header { func (c *DatabasesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5143,8 +5014,8 @@ type DatabasesPatchCall struct { } // Patch: Partially updates a resource containing information about a -// database inside -// a Cloud SQL instance. This method supports patch semantics. +// database inside a Cloud SQL instance. This method supports patch +// semantics. func (r *DatabasesService) Patch(project string, instance string, database string, database2 *Database) *DatabasesPatchCall { c := &DatabasesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -5181,7 +5052,7 @@ func (c *DatabasesPatchCall) Header() http.Header { func (c *DatabasesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5247,7 +5118,7 @@ func (c *DatabasesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Partially updates a resource containing information about a database inside\na Cloud SQL instance. This method supports patch semantics.", + // "description": "Partially updates a resource containing information about a database inside a Cloud SQL instance. This method supports patch semantics.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/databases/{database}", // "httpMethod": "PATCH", // "id": "sql.databases.patch", @@ -5305,8 +5176,7 @@ type DatabasesUpdateCall struct { } // Update: Updates a resource containing information about a database -// inside a Cloud -// SQL instance. +// inside a Cloud SQL instance. func (r *DatabasesService) Update(project string, instance string, database string, database2 *Database) *DatabasesUpdateCall { c := &DatabasesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -5343,7 +5213,7 @@ func (c *DatabasesUpdateCall) Header() http.Header { func (c *DatabasesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5409,7 +5279,7 @@ func (c *DatabasesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Updates a resource containing information about a database inside a Cloud\nSQL instance.", + // "description": "Updates a resource containing information about a database inside a Cloud SQL instance.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/databases/{database}", // "httpMethod": "PUT", // "id": "sql.databases.update", @@ -5471,8 +5341,7 @@ func (r *FlagsService) List() *FlagsListCall { // DatabaseVersion sets the optional parameter "databaseVersion": // Database type and version you want to retrieve flags for. By default, -// this -// method returns flags for all database types and versions. +// this method returns flags for all database types and versions. func (c *FlagsListCall) DatabaseVersion(databaseVersion string) *FlagsListCall { c.urlParams_.Set("databaseVersion", databaseVersion) return c @@ -5515,7 +5384,7 @@ func (c *FlagsListCall) Header() http.Header { func (c *FlagsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5581,7 +5450,7 @@ func (c *FlagsListCall) Do(opts ...googleapi.CallOption) (*FlagsListResponse, er // "parameterOrder": [], // "parameters": { // "databaseVersion": { - // "description": "Database type and version you want to retrieve flags for. By default, this\nmethod returns flags for all database types and versions.", + // "description": "Database type and version you want to retrieve flags for. By default, this method returns flags for all database types and versions.", // "location": "query", // "type": "string" // } @@ -5610,14 +5479,10 @@ type InstancesAddServerCaCall struct { } // AddServerCa: Add a new trusted Certificate Authority (CA) version for -// the specified -// instance. Required to prepare for a certificate rotation. If a CA -// version -// was previously added but never used in a certificate rotation, -// this -// operation replaces that version. There cannot be more than one CA -// version -// waiting to be rotated in. +// the specified instance. Required to prepare for a certificate +// rotation. If a CA version was previously added but never used in a +// certificate rotation, this operation replaces that version. There +// cannot be more than one CA version waiting to be rotated in. func (r *InstancesService) AddServerCa(project string, instance string) *InstancesAddServerCaCall { c := &InstancesAddServerCaCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -5652,7 +5517,7 @@ func (c *InstancesAddServerCaCall) Header() http.Header { func (c *InstancesAddServerCaCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5712,7 +5577,7 @@ func (c *InstancesAddServerCaCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Add a new trusted Certificate Authority (CA) version for the specified\ninstance. Required to prepare for a certificate rotation. If a CA version\nwas previously added but never used in a certificate rotation, this\noperation replaces that version. There cannot be more than one CA version\nwaiting to be rotated in.", + // "description": "Add a new trusted Certificate Authority (CA) version for the specified instance. Required to prepare for a certificate rotation. If a CA version was previously added but never used in a certificate rotation, this operation replaces that version. There cannot be more than one CA version waiting to be rotated in.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/addServerCa", // "httpMethod": "POST", // "id": "sql.instances.addServerCa", @@ -5759,8 +5624,7 @@ type InstancesCloneCall struct { } // Clone: Creates a Cloud SQL instance as a clone of the source -// instance. Using this -// operation might cause your instance to restart. +// instance. Using this operation might cause your instance to restart. func (r *InstancesService) Clone(project string, instance string, instancesclonerequest *InstancesCloneRequest) *InstancesCloneCall { c := &InstancesCloneCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -5796,7 +5660,7 @@ func (c *InstancesCloneCall) Header() http.Header { func (c *InstancesCloneCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5861,7 +5725,7 @@ func (c *InstancesCloneCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Creates a Cloud SQL instance as a clone of the source instance. Using this\noperation might cause your instance to restart.", + // "description": "Creates a Cloud SQL instance as a clone of the source instance. Using this operation might cause your instance to restart.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/clone", // "httpMethod": "POST", // "id": "sql.instances.clone", @@ -5871,7 +5735,7 @@ func (c *InstancesCloneCall) Do(opts ...googleapi.CallOption) (*Operation, error // ], // "parameters": { // "instance": { - // "description": "The ID of the Cloud SQL instance to be cloned (source). This does not\ninclude the project ID.", + // "description": "The ID of the Cloud SQL instance to be cloned (source). This does not include the project ID.", // "location": "path", // "required": true, // "type": "string" @@ -5944,7 +5808,7 @@ func (c *InstancesDeleteCall) Header() http.Header { func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6051,8 +5915,7 @@ type InstancesDemoteMasterCall struct { } // DemoteMaster: Demotes the stand-alone instance to be a Cloud SQL read -// replica for an -// external database server. +// replica for an external database server. func (r *InstancesService) DemoteMaster(project string, instance string, instancesdemotemasterrequest *InstancesDemoteMasterRequest) *InstancesDemoteMasterCall { c := &InstancesDemoteMasterCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -6088,7 +5951,7 @@ func (c *InstancesDemoteMasterCall) Header() http.Header { func (c *InstancesDemoteMasterCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6153,7 +6016,7 @@ func (c *InstancesDemoteMasterCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Demotes the stand-alone instance to be a Cloud SQL read replica for an\nexternal database server.", + // "description": "Demotes the stand-alone instance to be a Cloud SQL read replica for an external database server.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/demoteMaster", // "httpMethod": "POST", // "id": "sql.instances.demoteMaster", @@ -6203,8 +6066,7 @@ type InstancesExportCall struct { } // Export: Exports data from a Cloud SQL instance to a Cloud Storage -// bucket as a SQL -// dump or CSV file. +// bucket as a SQL dump or CSV file. func (r *InstancesService) Export(project string, instance string, instancesexportrequest *InstancesExportRequest) *InstancesExportCall { c := &InstancesExportCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -6240,7 +6102,7 @@ func (c *InstancesExportCall) Header() http.Header { func (c *InstancesExportCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6305,7 +6167,7 @@ func (c *InstancesExportCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Exports data from a Cloud SQL instance to a Cloud Storage bucket as a SQL\ndump or CSV file.", + // "description": "Exports data from a Cloud SQL instance to a Cloud Storage bucket as a SQL dump or CSV file.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/export", // "httpMethod": "POST", // "id": "sql.instances.export", @@ -6354,8 +6216,7 @@ type InstancesFailoverCall struct { } // Failover: Failover the instance to its failover replica instance. -// Using this -// operation might cause your instance to restart. +// Using this operation might cause your instance to restart. func (r *InstancesService) Failover(project string, instance string, instancesfailoverrequest *InstancesFailoverRequest) *InstancesFailoverCall { c := &InstancesFailoverCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -6391,7 +6252,7 @@ func (c *InstancesFailoverCall) Header() http.Header { func (c *InstancesFailoverCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6456,7 +6317,7 @@ func (c *InstancesFailoverCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Failover the instance to its failover replica instance. Using this\noperation might cause your instance to restart.", + // "description": "Failover the instance to its failover replica instance. Using this operation might cause your instance to restart.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/failover", // "httpMethod": "POST", // "id": "sql.instances.failover", @@ -6551,7 +6412,7 @@ func (c *InstancesGetCall) Header() http.Header { func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6660,9 +6521,8 @@ type InstancesImportCall struct { header_ http.Header } -// Import: Imports data into a Cloud SQL instance from a SQL dump or -// CSV file in -// Cloud Storage. +// Import: Imports data into a Cloud SQL instance from a SQL dump or CSV +// file in Cloud Storage. func (r *InstancesService) Import(project string, instance string, instancesimportrequest *InstancesImportRequest) *InstancesImportCall { c := &InstancesImportCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -6698,7 +6558,7 @@ func (c *InstancesImportCall) Header() http.Header { func (c *InstancesImportCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6763,7 +6623,7 @@ func (c *InstancesImportCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Imports data into a Cloud SQL instance from a SQL dump or CSV file in\nCloud Storage.", + // "description": "Imports data into a Cloud SQL instance from a SQL dump or CSV file in Cloud Storage.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/import", // "httpMethod": "POST", // "id": "sql.instances.import", @@ -6845,7 +6705,7 @@ func (c *InstancesInsertCall) Header() http.Header { func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6918,7 +6778,7 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro // ], // "parameters": { // "project": { - // "description": "Project ID of the project to which the newly created Cloud SQL instances\nshould belong.", + // "description": "Project ID of the project to which the newly created Cloud SQL instances should belong.", // "location": "path", // "required": true, // "type": "string" @@ -6958,21 +6818,14 @@ func (r *InstancesService) List(project string) *InstancesListCall { } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. -// The expression is in the form of field:value. For -// example, -// 'instanceType:CLOUD_SQL_INSTANCE'. Fields can be nested as needed as -// per -// their JSON representation, such as -// 'settings.userLabels.auto_start:true'. -// -// Multiple filter queries are space-separated. For -// example. -// 'state:RUNNABLE instanceType:CLOUD_SQL_INSTANCE'. By default, -// each -// expression is an AND expression. However, you can include AND and -// OR -// expressions explicitly. +// filters resources listed in the response. The expression is in the +// form of field:value. For example, 'instanceType:CLOUD_SQL_INSTANCE'. +// Fields can be nested as needed as per their JSON representation, such +// as 'settings.userLabels.auto_start:true'. Multiple filter queries are +// space-separated. For example. 'state:RUNNABLE +// instanceType:CLOUD_SQL_INSTANCE'. By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. func (c *InstancesListCall) Filter(filter string) *InstancesListCall { c.urlParams_.Set("filter", filter) return c @@ -6986,8 +6839,7 @@ func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall { } // PageToken sets the optional parameter "pageToken": A -// previously-returned page token representing part of the larger set -// of +// previously-returned page token representing part of the larger set of // results to view. func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { c.urlParams_.Set("pageToken", pageToken) @@ -7031,7 +6883,7 @@ func (c *InstancesListCall) Header() http.Header { func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7102,7 +6954,7 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstancesListResp // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response.\nThe expression is in the form of field:value. For example,\n'instanceType:CLOUD_SQL_INSTANCE'. Fields can be nested as needed as per\ntheir JSON representation, such as 'settings.userLabels.auto_start:true'.\n\nMultiple filter queries are space-separated. For example.\n'state:RUNNABLE instanceType:CLOUD_SQL_INSTANCE'. By default, each\nexpression is an AND expression. However, you can include AND and OR\nexpressions explicitly.", + // "description": "A filter expression that filters resources listed in the response. The expression is in the form of field:value. For example, 'instanceType:CLOUD_SQL_INSTANCE'. Fields can be nested as needed as per their JSON representation, such as 'settings.userLabels.auto_start:true'. Multiple filter queries are space-separated. For example. 'state:RUNNABLE instanceType:CLOUD_SQL_INSTANCE'. By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly.", // "location": "query", // "type": "string" // }, @@ -7113,7 +6965,7 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstancesListResp // "type": "integer" // }, // "pageToken": { - // "description": "A previously-returned page token representing part of the larger set of\nresults to view.", + // "description": "A previously-returned page token representing part of the larger set of results to view.", // "location": "query", // "type": "string" // }, @@ -7170,14 +7022,10 @@ type InstancesListServerCasCall struct { } // ListServerCas: Lists all of the trusted Certificate Authorities (CAs) -// for the specified -// instance. There can be up to three CAs listed: the CA that was used -// to sign -// the certificate that is currently in use, a CA that has been added -// but not -// yet used to sign a certificate, and a CA used to sign a certificate -// that -// has previously rotated out. +// for the specified instance. There can be up to three CAs listed: the +// CA that was used to sign the certificate that is currently in use, a +// CA that has been added but not yet used to sign a certificate, and a +// CA used to sign a certificate that has previously rotated out. func (r *InstancesService) ListServerCas(project string, instance string) *InstancesListServerCasCall { c := &InstancesListServerCasCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -7222,7 +7070,7 @@ func (c *InstancesListServerCasCall) Header() http.Header { func (c *InstancesListServerCasCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7285,7 +7133,7 @@ func (c *InstancesListServerCasCall) Do(opts ...googleapi.CallOption) (*Instance } return ret, nil // { - // "description": "Lists all of the trusted Certificate Authorities (CAs) for the specified\ninstance. There can be up to three CAs listed: the CA that was used to sign\nthe certificate that is currently in use, a CA that has been added but not\nyet used to sign a certificate, and a CA used to sign a certificate that\nhas previously rotated out.", + // "description": "Lists all of the trusted Certificate Authorities (CAs) for the specified instance. There can be up to three CAs listed: the CA that was used to sign the certificate that is currently in use, a CA that has been added but not yet used to sign a certificate, and a CA used to sign a certificate that has previously rotated out.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/listServerCas", // "httpMethod": "GET", // "id": "sql.instances.listServerCas", @@ -7331,8 +7179,8 @@ type InstancesPatchCall struct { header_ http.Header } -// Patch: Updates settings of a Cloud SQL instance. -// This method supports patch semantics. +// Patch: Updates settings of a Cloud SQL instance. This method supports +// patch semantics. func (r *InstancesService) Patch(project string, instance string, databaseinstance *DatabaseInstance) *InstancesPatchCall { c := &InstancesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -7368,7 +7216,7 @@ func (c *InstancesPatchCall) Header() http.Header { func (c *InstancesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7433,7 +7281,7 @@ func (c *InstancesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Updates settings of a Cloud SQL instance.\nThis method supports patch semantics.", + // "description": "Updates settings of a Cloud SQL instance. This method supports patch semantics.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}", // "httpMethod": "PATCH", // "id": "sql.instances.patch", @@ -7482,8 +7330,8 @@ type InstancesPromoteReplicaCall struct { } // PromoteReplica: Promotes the read replica instance to be a -// stand-alone Cloud SQL instance. -// Using this operation might cause your instance to restart. +// stand-alone Cloud SQL instance. Using this operation might cause your +// instance to restart. func (r *InstancesService) PromoteReplica(project string, instance string) *InstancesPromoteReplicaCall { c := &InstancesPromoteReplicaCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -7518,7 +7366,7 @@ func (c *InstancesPromoteReplicaCall) Header() http.Header { func (c *InstancesPromoteReplicaCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7578,7 +7426,7 @@ func (c *InstancesPromoteReplicaCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Promotes the read replica instance to be a stand-alone Cloud SQL instance.\nUsing this operation might cause your instance to restart.", + // "description": "Promotes the read replica instance to be a stand-alone Cloud SQL instance. Using this operation might cause your instance to restart.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/promoteReplica", // "httpMethod": "POST", // "id": "sql.instances.promoteReplica", @@ -7624,8 +7472,7 @@ type InstancesResetSslConfigCall struct { } // ResetSslConfig: Deletes all client certificates and generates a new -// server SSL certificate -// for the instance. +// server SSL certificate for the instance. func (r *InstancesService) ResetSslConfig(project string, instance string) *InstancesResetSslConfigCall { c := &InstancesResetSslConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -7660,7 +7507,7 @@ func (c *InstancesResetSslConfigCall) Header() http.Header { func (c *InstancesResetSslConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7720,7 +7567,7 @@ func (c *InstancesResetSslConfigCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes all client certificates and generates a new server SSL certificate\nfor the instance.", + // "description": "Deletes all client certificates and generates a new server SSL certificate for the instance.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/resetSslConfig", // "httpMethod": "POST", // "id": "sql.instances.resetSslConfig", @@ -7800,7 +7647,7 @@ func (c *InstancesRestartCall) Header() http.Header { func (c *InstancesRestartCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7907,8 +7754,7 @@ type InstancesRestoreBackupCall struct { } // RestoreBackup: Restores a backup of a Cloud SQL instance. Using this -// operation might cause -// your instance to restart. +// operation might cause your instance to restart. func (r *InstancesService) RestoreBackup(project string, instance string, instancesrestorebackuprequest *InstancesRestoreBackupRequest) *InstancesRestoreBackupCall { c := &InstancesRestoreBackupCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -7944,7 +7790,7 @@ func (c *InstancesRestoreBackupCall) Header() http.Header { func (c *InstancesRestoreBackupCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8009,7 +7855,7 @@ func (c *InstancesRestoreBackupCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Restores a backup of a Cloud SQL instance. Using this operation might cause\nyour instance to restart.", + // "description": "Restores a backup of a Cloud SQL instance. Using this operation might cause your instance to restart.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/restoreBackup", // "httpMethod": "POST", // "id": "sql.instances.restoreBackup", @@ -8059,8 +7905,8 @@ type InstancesRotateServerCaCall struct { } // RotateServerCa: Rotates the server certificate to one signed by the -// Certificate Authority -// (CA) version previously added with the addServerCA method. +// Certificate Authority (CA) version previously added with the +// addServerCA method. func (r *InstancesService) RotateServerCa(project string, instance string, instancesrotateservercarequest *InstancesRotateServerCaRequest) *InstancesRotateServerCaCall { c := &InstancesRotateServerCaCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -8096,7 +7942,7 @@ func (c *InstancesRotateServerCaCall) Header() http.Header { func (c *InstancesRotateServerCaCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8161,7 +8007,7 @@ func (c *InstancesRotateServerCaCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Rotates the server certificate to one signed by the Certificate Authority\n(CA) version previously added with the addServerCA method.", + // "description": "Rotates the server certificate to one signed by the Certificate Authority (CA) version previously added with the addServerCA method.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/rotateServerCa", // "httpMethod": "POST", // "id": "sql.instances.rotateServerCa", @@ -8244,7 +8090,7 @@ func (c *InstancesStartReplicaCall) Header() http.Header { func (c *InstancesStartReplicaCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8384,7 +8230,7 @@ func (c *InstancesStopReplicaCall) Header() http.Header { func (c *InstancesStopReplicaCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8526,7 +8372,7 @@ func (c *InstancesTruncateLogCall) Header() http.Header { func (c *InstancesTruncateLogCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8641,8 +8487,7 @@ type InstancesUpdateCall struct { } // Update: Updates settings of a Cloud SQL instance. Using this -// operation might cause -// your instance to restart. +// operation might cause your instance to restart. func (r *InstancesService) Update(project string, instance string, databaseinstance *DatabaseInstance) *InstancesUpdateCall { c := &InstancesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -8678,7 +8523,7 @@ func (c *InstancesUpdateCall) Header() http.Header { func (c *InstancesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8743,7 +8588,7 @@ func (c *InstancesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Updates settings of a Cloud SQL instance. Using this operation might cause\nyour instance to restart.", + // "description": "Updates settings of a Cloud SQL instance. Using this operation might cause your instance to restart.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}", // "httpMethod": "PUT", // "id": "sql.instances.update", @@ -8838,7 +8683,7 @@ func (c *OperationsGetCall) Header() http.Header { func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8947,8 +8792,8 @@ type OperationsListCall struct { } // List: Lists all instance operations that have been performed on the -// given Cloud -// SQL instance in the reverse chronological order of the start time. +// given Cloud SQL instance in the reverse chronological order of the +// start time. func (r *OperationsService) List(project string) *OperationsListCall { c := &OperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -8970,8 +8815,7 @@ func (c *OperationsListCall) MaxResults(maxResults int64) *OperationsListCall { } // PageToken sets the optional parameter "pageToken": A -// previously-returned page token representing part of the larger set -// of +// previously-returned page token representing part of the larger set of // results to view. func (c *OperationsListCall) PageToken(pageToken string) *OperationsListCall { c.urlParams_.Set("pageToken", pageToken) @@ -9015,7 +8859,7 @@ func (c *OperationsListCall) Header() http.Header { func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9077,7 +8921,7 @@ func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*OperationsListRe } return ret, nil // { - // "description": "Lists all instance operations that have been performed on the given Cloud\nSQL instance in the reverse chronological order of the start time.", + // "description": "Lists all instance operations that have been performed on the given Cloud SQL instance in the reverse chronological order of the start time.", // "flatPath": "sql/v1beta4/projects/{project}/operations", // "httpMethod": "GET", // "id": "sql.operations.list", @@ -9097,7 +8941,7 @@ func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*OperationsListRe // "type": "integer" // }, // "pageToken": { - // "description": "A previously-returned page token representing part of the larger set of\nresults to view.", + // "description": "A previously-returned page token representing part of the larger set of results to view.", // "location": "query", // "type": "string" // }, @@ -9190,7 +9034,7 @@ func (c *ProjectsInstancesRescheduleMaintenanceCall) Header() http.Header { func (c *ProjectsInstancesRescheduleMaintenanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9303,7 +9147,7 @@ type ProjectsInstancesStartExternalSyncCall struct { header_ http.Header } -// StartExternalSync: Start External master migration. +// StartExternalSync: Start External primary instance migration. func (r *ProjectsInstancesService) StartExternalSync(project string, instance string) *ProjectsInstancesStartExternalSyncCall { c := &ProjectsInstancesStartExternalSyncCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -9314,9 +9158,12 @@ func (r *ProjectsInstancesService) StartExternalSync(project string, instance st // SyncMode sets the optional parameter "syncMode": External sync mode // // Possible values: -// "EXTERNAL_SYNC_MODE_UNSPECIFIED" -// "ONLINE" -// "OFFLINE" +// "EXTERNAL_SYNC_MODE_UNSPECIFIED" - Unknown external sync mode, will +// be defaulted to ONLINE mode +// "ONLINE" - Online external sync will set up replication after +// initial data external sync +// "OFFLINE" - Offline external sync only dumps and loads a one-time +// snapshot of the primary instance's data func (c *ProjectsInstancesStartExternalSyncCall) SyncMode(syncMode string) *ProjectsInstancesStartExternalSyncCall { c.urlParams_.Set("syncMode", syncMode) return c @@ -9349,7 +9196,7 @@ func (c *ProjectsInstancesStartExternalSyncCall) Header() http.Header { func (c *ProjectsInstancesStartExternalSyncCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9409,7 +9256,7 @@ func (c *ProjectsInstancesStartExternalSyncCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Start External master migration.", + // "description": "Start External primary instance migration.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/startExternalSync", // "httpMethod": "POST", // "id": "sql.projects.instances.startExternalSync", @@ -9425,7 +9272,7 @@ func (c *ProjectsInstancesStartExternalSyncCall) Do(opts ...googleapi.CallOption // "type": "string" // }, // "project": { - // "description": "ID of the project that contains the first generation instance.", + // "description": "ID of the project that contains the instance.", // "location": "path", // "required": true, // "type": "string" @@ -9437,6 +9284,11 @@ func (c *ProjectsInstancesStartExternalSyncCall) Do(opts ...googleapi.CallOption // "ONLINE", // "OFFLINE" // ], + // "enumDescriptions": [ + // "Unknown external sync mode, will be defaulted to ONLINE mode", + // "Online external sync will set up replication after initial data external sync", + // "Offline external sync only dumps and loads a one-time snapshot of the primary instance's data" + // ], // "location": "query", // "type": "string" // } @@ -9464,8 +9316,8 @@ type ProjectsInstancesVerifyExternalSyncSettingsCall struct { header_ http.Header } -// VerifyExternalSyncSettings: Verify External master external sync -// settings. +// VerifyExternalSyncSettings: Verify External primary instance external +// sync settings. func (r *ProjectsInstancesService) VerifyExternalSyncSettings(project string, instance string) *ProjectsInstancesVerifyExternalSyncSettingsCall { c := &ProjectsInstancesVerifyExternalSyncSettingsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -9476,9 +9328,12 @@ func (r *ProjectsInstancesService) VerifyExternalSyncSettings(project string, in // SyncMode sets the optional parameter "syncMode": External sync mode // // Possible values: -// "EXTERNAL_SYNC_MODE_UNSPECIFIED" -// "ONLINE" -// "OFFLINE" +// "EXTERNAL_SYNC_MODE_UNSPECIFIED" - Unknown external sync mode, will +// be defaulted to ONLINE mode +// "ONLINE" - Online external sync will set up replication after +// initial data external sync +// "OFFLINE" - Offline external sync only dumps and loads a one-time +// snapshot of the primary instance's data func (c *ProjectsInstancesVerifyExternalSyncSettingsCall) SyncMode(syncMode string) *ProjectsInstancesVerifyExternalSyncSettingsCall { c.urlParams_.Set("syncMode", syncMode) return c @@ -9518,7 +9373,7 @@ func (c *ProjectsInstancesVerifyExternalSyncSettingsCall) Header() http.Header { func (c *ProjectsInstancesVerifyExternalSyncSettingsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9580,7 +9435,7 @@ func (c *ProjectsInstancesVerifyExternalSyncSettingsCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Verify External master external sync settings.", + // "description": "Verify External primary instance external sync settings.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/verifyExternalSyncSettings", // "httpMethod": "POST", // "id": "sql.projects.instances.verifyExternalSyncSettings", @@ -9608,6 +9463,11 @@ func (c *ProjectsInstancesVerifyExternalSyncSettingsCall) Do(opts ...googleapi.C // "ONLINE", // "OFFLINE" // ], + // "enumDescriptions": [ + // "Unknown external sync mode, will be defaulted to ONLINE mode", + // "Online external sync will set up replication after initial data external sync", + // "Offline external sync only dumps and loads a one-time snapshot of the primary instance's data" + // ], // "location": "query", // "type": "string" // }, @@ -9642,12 +9502,9 @@ type SslCertsCreateEphemeralCall struct { } // CreateEphemeral: Generates a short-lived X509 certificate containing -// the provided public key -// and signed by a private key specific to the target instance. Users -// may use -// the certificate to authenticate as themselves when connecting to -// the -// database. +// the provided public key and signed by a private key specific to the +// target instance. Users may use the certificate to authenticate as +// themselves when connecting to the database. func (r *SslCertsService) CreateEphemeral(project string, instance string, sslcertscreateephemeralrequest *SslCertsCreateEphemeralRequest) *SslCertsCreateEphemeralCall { c := &SslCertsCreateEphemeralCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -9683,7 +9540,7 @@ func (c *SslCertsCreateEphemeralCall) Header() http.Header { func (c *SslCertsCreateEphemeralCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9748,7 +9605,7 @@ func (c *SslCertsCreateEphemeralCall) Do(opts ...googleapi.CallOption) (*SslCert } return ret, nil // { - // "description": "Generates a short-lived X509 certificate containing the provided public key\nand signed by a private key specific to the target instance. Users may use\nthe certificate to authenticate as themselves when connecting to the\ndatabase.", + // "description": "Generates a short-lived X509 certificate containing the provided public key and signed by a private key specific to the target instance. Users may use the certificate to authenticate as themselves when connecting to the database.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/createEphemeral", // "httpMethod": "POST", // "id": "sql.sslCerts.createEphemeral", @@ -9798,8 +9655,7 @@ type SslCertsDeleteCall struct { } // Delete: Deletes the SSL certificate. For First Generation instances, -// the -// certificate remains valid until the instance is restarted. +// the certificate remains valid until the instance is restarted. func (r *SslCertsService) Delete(project string, instance string, sha1Fingerprint string) *SslCertsDeleteCall { c := &SslCertsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -9835,7 +9691,7 @@ func (c *SslCertsDeleteCall) Header() http.Header { func (c *SslCertsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9896,7 +9752,7 @@ func (c *SslCertsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Deletes the SSL certificate. For First Generation instances, the\ncertificate remains valid until the instance is restarted.", + // "description": "Deletes the SSL certificate. For First Generation instances, the certificate remains valid until the instance is restarted.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/sslCerts/{sha1Fingerprint}", // "httpMethod": "DELETE", // "id": "sql.sslCerts.delete", @@ -9950,11 +9806,9 @@ type SslCertsGetCall struct { header_ http.Header } -// Get: Retrieves a particular SSL certificate. Does not include the -// private key -// (required for usage). The private key must be saved from the -// response to -// initial creation. +// Get: Retrieves a particular SSL certificate. Does not include the +// private key (required for usage). The private key must be saved from +// the response to initial creation. func (r *SslCertsService) Get(project string, instance string, sha1Fingerprint string) *SslCertsGetCall { c := &SslCertsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -10000,7 +9854,7 @@ func (c *SslCertsGetCall) Header() http.Header { func (c *SslCertsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10064,7 +9918,7 @@ func (c *SslCertsGetCall) Do(opts ...googleapi.CallOption) (*SslCert, error) { } return ret, nil // { - // "description": "Retrieves a particular SSL certificate. Does not include the private key\n(required for usage). The private key must be saved from the response to\ninitial creation.", + // "description": "Retrieves a particular SSL certificate. Does not include the private key (required for usage). The private key must be saved from the response to initial creation.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/sslCerts/{sha1Fingerprint}", // "httpMethod": "GET", // "id": "sql.sslCerts.get", @@ -10118,10 +9972,8 @@ type SslCertsInsertCall struct { } // Insert: Creates an SSL certificate and returns it along with the -// private key and -// server certificate authority. The new certificate will not be usable -// until -// the instance is restarted. +// private key and server certificate authority. The new certificate +// will not be usable until the instance is restarted. func (r *SslCertsService) Insert(project string, instance string, sslcertsinsertrequest *SslCertsInsertRequest) *SslCertsInsertCall { c := &SslCertsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -10157,7 +10009,7 @@ func (c *SslCertsInsertCall) Header() http.Header { func (c *SslCertsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10222,7 +10074,7 @@ func (c *SslCertsInsertCall) Do(opts ...googleapi.CallOption) (*SslCertsInsertRe } return ret, nil // { - // "description": "Creates an SSL certificate and returns it along with the private key and\nserver certificate authority. The new certificate will not be usable until\nthe instance is restarted.", + // "description": "Creates an SSL certificate and returns it along with the private key and server certificate authority. The new certificate will not be usable until the instance is restarted.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/sslCerts", // "httpMethod": "POST", // "id": "sql.sslCerts.insert", @@ -10316,7 +10168,7 @@ func (c *SslCertsListCall) Header() http.Header { func (c *SslCertsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10425,10 +10277,7 @@ type TiersListCall struct { } // List: Lists all available machine types (tiers) for Cloud SQL, for -// example, -// db-n1-standard-1. For related information, see -// Pricing. +// example, db-n1-standard-1. For related information, see Pricing. func (r *TiersService) List(project string) *TiersListCall { c := &TiersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -10472,7 +10321,7 @@ func (c *TiersListCall) Header() http.Header { func (c *TiersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10534,7 +10383,7 @@ func (c *TiersListCall) Do(opts ...googleapi.CallOption) (*TiersListResponse, er } return ret, nil // { - // "description": "Lists all available machine types (tiers) for Cloud SQL, for example,\ndb-n1-standard-1. For related information, see \u003ca\nhref=\"/sql/pricing\"\u003ePricing\u003c/a\u003e.", + // "description": "Lists all available machine types (tiers) for Cloud SQL, for example, db-n1-standard-1. For related information, see Pricing.", // "flatPath": "sql/v1beta4/projects/{project}/tiers", // "httpMethod": "GET", // "id": "sql.tiers.list", @@ -10621,7 +10470,7 @@ func (c *UsersDeleteCall) Header() http.Header { func (c *UsersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10773,7 +10622,7 @@ func (c *UsersInsertCall) Header() http.Header { func (c *UsersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10932,7 +10781,7 @@ func (c *UsersListCall) Header() http.Header { func (c *UsersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11091,7 +10940,7 @@ func (c *UsersUpdateCall) Header() http.Header { func (c *UsersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 8bb2fa6db67..1e076ab66d4 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -26,7 +26,7 @@ "description": "Stores and retrieves potentially large, immutable data objects.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", - "etag": "\"u9GIe6H63LSGq-9_t39K2Zx_EAc/SMGMLKKvE-TZrla7d9TA_SDVTI0\"", + "etag": "\"3133373531323239383338313531333236393038\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -3229,7 +3229,7 @@ } } }, - "revision": "20200430", + "revision": "20200927", "rootUrl": "https://storage.googleapis.com/", "schemas": { "Bucket": { @@ -3406,8 +3406,8 @@ "type": "string" }, "customTimeBefore": { - "description": "A timestamp in RFC 3339 format. This condition is satisfied when the custom time on an object is before this timestamp.", - "format": "date-time", + "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when the custom time on an object is before this date in UTC.", + "format": "date", "type": "string" }, "daysSinceCustomTime": { @@ -3436,8 +3436,8 @@ "type": "array" }, "noncurrentTimeBefore": { - "description": "A timestamp in RFC 3339 format. This condition is satisfied when the noncurrent time on an object is before this timestamp. This condition is relevant only for versioned objects.", - "format": "date-time", + "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when the noncurrent time on an object is before this date in UTC. This condition is relevant only for versioned objects.", + "format": "date", "type": "string" }, "numNewerVersions": { @@ -3579,10 +3579,6 @@ "type": "string" }, "type": "array" - }, - "zoneSeparation": { - "description": "If set, objects placed in this bucket are required to be separated by disaster domain.", - "type": "boolean" } }, "type": "object" @@ -4096,7 +4092,7 @@ "type": "string" }, "kmsKeyName": { - "description": "Cloud KMS Key used to encrypt this object, if the object is encrypted by such a key.", + "description": "Not currently supported. Specifying the parameter causes the request to fail with status code 400 - Bad Request.", "type": "string" }, "md5Hash": { diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index e214ab5b37e..badf3b8e565 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -400,10 +400,6 @@ type Bucket struct { // requests will fail with a 400 Bad Request response. ZoneAffinity []string `json:"zoneAffinity,omitempty"` - // ZoneSeparation: If set, objects placed in this bucket are required to - // be separated by disaster domain. - ZoneSeparation bool `json:"zoneSeparation,omitempty"` - // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -756,8 +752,9 @@ type BucketLifecycleRuleCondition struct { // is created before midnight of the specified date in UTC. CreatedBefore string `json:"createdBefore,omitempty"` - // CustomTimeBefore: A timestamp in RFC 3339 format. This condition is - // satisfied when the custom time on an object is before this timestamp. + // CustomTimeBefore: A date in RFC 3339 format with only the date part + // (for instance, "2013-01-15"). This condition is satisfied when the + // custom time on an object is before this date in UTC. CustomTimeBefore string `json:"customTimeBefore,omitempty"` // DaysSinceCustomTime: Number of days elapsed since the user-specified @@ -793,9 +790,10 @@ type BucketLifecycleRuleCondition struct { // DURABLE_REDUCED_AVAILABILITY. MatchesStorageClass []string `json:"matchesStorageClass,omitempty"` - // NoncurrentTimeBefore: A timestamp in RFC 3339 format. This condition - // is satisfied when the noncurrent time on an object is before this - // timestamp. This condition is relevant only for versioned objects. + // NoncurrentTimeBefore: A date in RFC 3339 format with only the date + // part (for instance, "2013-01-15"). This condition is satisfied when + // the noncurrent time on an object is before this date in UTC. This + // condition is relevant only for versioned objects. NoncurrentTimeBefore string `json:"noncurrentTimeBefore,omitempty"` // NumNewerVersions: Relevant only for versioned objects. If the value @@ -1734,8 +1732,8 @@ type Object struct { // storage#object. Kind string `json:"kind,omitempty"` - // KmsKeyName: Cloud KMS Key used to encrypt this object, if the object - // is encrypted by such a key. + // KmsKeyName: Not currently supported. Specifying the parameter causes + // the request to fail with status code 400 - Bad Request. KmsKeyName string `json:"kmsKeyName,omitempty"` // Md5Hash: MD5 hash of the data; encoded using base64. For more @@ -2442,7 +2440,7 @@ func (c *BucketAccessControlsDeleteCall) Header() http.Header { func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2590,7 +2588,7 @@ func (c *BucketAccessControlsGetCall) Header() http.Header { func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2757,7 +2755,7 @@ func (c *BucketAccessControlsInsertCall) Header() http.Header { func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2930,7 +2928,7 @@ func (c *BucketAccessControlsListCall) Header() http.Header { func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3091,7 +3089,7 @@ func (c *BucketAccessControlsPatchCall) Header() http.Header { func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3265,7 +3263,7 @@ func (c *BucketAccessControlsUpdateCall) Header() http.Header { func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3451,7 +3449,7 @@ func (c *BucketsDeleteCall) Header() http.Header { func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3630,7 +3628,7 @@ func (c *BucketsGetCall) Header() http.Header { func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3836,7 +3834,7 @@ func (c *BucketsGetIamPolicyCall) Header() http.Header { func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4053,7 +4051,7 @@ func (c *BucketsInsertCall) Header() http.Header { func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4310,7 +4308,7 @@ func (c *BucketsListCall) Header() http.Header { func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4520,7 +4518,7 @@ func (c *BucketsLockRetentionPolicyCall) Header() http.Header { func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4755,7 +4753,7 @@ func (c *BucketsPatchCall) Header() http.Header { func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4984,7 +4982,7 @@ func (c *BucketsSetIamPolicyCall) Header() http.Header { func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5159,7 +5157,7 @@ func (c *BucketsTestIamPermissionsCall) Header() http.Header { func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5399,7 +5397,7 @@ func (c *BucketsUpdateCall) Header() http.Header { func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5611,7 +5609,7 @@ func (c *ChannelsStopCall) Header() http.Header { func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5728,7 +5726,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5876,7 +5874,7 @@ func (c *DefaultObjectAccessControlsGetCall) Header() http.Header { func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6044,7 +6042,7 @@ func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6234,7 +6232,7 @@ func (c *DefaultObjectAccessControlsListCall) Header() http.Header { func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6407,7 +6405,7 @@ func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6581,7 +6579,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6753,7 +6751,7 @@ func (c *NotificationsDeleteCall) Header() http.Header { func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6901,7 +6899,7 @@ func (c *NotificationsGetCall) Header() http.Header { func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7071,7 +7069,7 @@ func (c *NotificationsInsertCall) Header() http.Header { func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7246,7 +7244,7 @@ func (c *NotificationsListCall) Header() http.Header { func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7419,7 +7417,7 @@ func (c *ObjectAccessControlsDeleteCall) Header() http.Header { func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7591,7 +7589,7 @@ func (c *ObjectAccessControlsGetCall) Header() http.Header { func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7782,7 +7780,7 @@ func (c *ObjectAccessControlsInsertCall) Header() http.Header { func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7979,7 +7977,7 @@ func (c *ObjectAccessControlsListCall) Header() http.Header { func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8164,7 +8162,7 @@ func (c *ObjectAccessControlsPatchCall) Header() http.Header { func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8362,7 +8360,7 @@ func (c *ObjectAccessControlsUpdateCall) Header() http.Header { func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8599,7 +8597,7 @@ func (c *ObjectsComposeCall) Header() http.Header { func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8942,7 +8940,7 @@ func (c *ObjectsCopyCall) Header() http.Header { func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9270,7 +9268,7 @@ func (c *ObjectsDeleteCall) Header() http.Header { func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9503,7 +9501,7 @@ func (c *ObjectsGetCall) Header() http.Header { func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9753,7 +9751,7 @@ func (c *ObjectsGetIamPolicyCall) Header() http.Header { func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10070,7 +10068,7 @@ func (c *ObjectsInsertCall) Header() http.Header { func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10104,7 +10102,7 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) + return gensupport.SendRequestWithRetry(c.ctx_, c.s.client, req) } // Do executes the "storage.objects.insert" call. @@ -10443,7 +10441,7 @@ func (c *ObjectsListCall) Header() http.Header { func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10760,7 +10758,7 @@ func (c *ObjectsPatchCall) Header() http.Header { func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11152,7 +11150,7 @@ func (c *ObjectsRewriteCall) Header() http.Header { func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11455,7 +11453,7 @@ func (c *ObjectsSetIamPolicyCall) Header() http.Header { func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11655,7 +11653,7 @@ func (c *ObjectsTestIamPermissionsCall) Header() http.Header { func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11916,7 +11914,7 @@ func (c *ObjectsUpdateCall) Header() http.Header { func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12234,7 +12232,7 @@ func (c *ObjectsWatchAllCall) Header() http.Header { func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12450,7 +12448,7 @@ func (c *ProjectsHmacKeysCreateCall) Header() http.Header { func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12600,7 +12598,7 @@ func (c *ProjectsHmacKeysDeleteCall) Header() http.Header { func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12735,7 +12733,7 @@ func (c *ProjectsHmacKeysGetCall) Header() http.Header { func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12935,7 +12933,7 @@ func (c *ProjectsHmacKeysListCall) Header() http.Header { func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13130,7 +13128,7 @@ func (c *ProjectsHmacKeysUpdateCall) Header() http.Header { func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13307,7 +13305,7 @@ func (c *ProjectsServiceAccountGetCall) Header() http.Header { func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-api.json b/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-api.json index 864313c4c27..b9849cd5ce3 100644 --- a/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-api.json +++ b/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-api.json @@ -107,7 +107,7 @@ "googleServiceAccounts": { "methods": { "get": { - "description": "Returns the Google service account that is used by Storage Transfer\nService to access buckets in the project where transfers\nrun or in other projects. Each Google service account is associated\nwith one Google Cloud Platform Console project. Users\nshould add this service account to the Google Cloud Storage bucket\nACLs to grant access to Storage Transfer Service. This service\naccount is created and owned by Storage Transfer Service and can\nonly be used by Storage Transfer Service.", + "description": "Returns the Google service account that is used by Storage Transfer Service to access buckets in the project where transfers run or in other projects. Each Google service account is associated with one Google Cloud Platform Console project. Users should add this service account to the Google Cloud Storage bucket ACLs to grant access to Storage Transfer Service. This service account is created and owned by Storage Transfer Service and can only be used by Storage Transfer Service.", "flatPath": "v1/googleServiceAccounts/{projectId}", "httpMethod": "GET", "id": "storagetransfer.googleServiceAccounts.get", @@ -116,7 +116,7 @@ ], "parameters": { "projectId": { - "description": "Required. The ID of the Google Cloud Platform Console project that the\nGoogle service account is associated with.", + "description": "Required. The ID of the Google Cloud Platform Console project that the Google service account is associated with.", "location": "path", "required": true, "type": "string" @@ -162,14 +162,14 @@ ], "parameters": { "jobName": { - "description": "Required. The job to get.", + "description": "Required. \" The job to get.", "location": "path", "pattern": "^transferJobs/.*$", "required": true, "type": "string" }, "projectId": { - "description": "Required. The ID of the Google Cloud Platform Console project that owns the\njob.", + "description": "Required. The ID of the Google Cloud Platform Console project that owns the job.", "location": "query", "type": "string" } @@ -190,7 +190,7 @@ "parameterOrder": [], "parameters": { "filter": { - "description": "Required. A list of query parameters specified as JSON text in the form of:\n{\"project\u003cspan\u003e_\u003c/span\u003eid\":\"my_project_id\",\n \"job_names\":[\"jobid1\",\"jobid2\",...],\n \"job_statuses\":[\"status1\",\"status2\",...]}.\nSince `job_names` and `job_statuses` support multiple values, their values\nmust be specified with array notation. `project`\u003cspan\u003e`_`\u003c/span\u003e`id` is\nrequired. `job_names` and `job_statuses` are optional. The valid values\nfor `job_statuses` are case-insensitive:\nENABLED,\nDISABLED, and\nDELETED.", + "description": "Required. A list of query parameters specified as JSON text in the form of: {\"project_id\":\"my_project_id\", \"job_names\":[\"jobid1\",\"jobid2\",...], \"job_statuses\":[\"status1\",\"status2\",...]}. Since `job_names` and `job_statuses` support multiple values, their values must be specified with array notation. `project``_``id` is required. `job_names` and `job_statuses` are optional. The valid values for `job_statuses` are case-insensitive: ENABLED, DISABLED, and DELETED.", "location": "query", "type": "string" }, @@ -215,7 +215,7 @@ ] }, "patch": { - "description": "Updates a transfer job. Updating a job's transfer spec does not affect\ntransfer operations that are running already. Updating a job's schedule\nis not allowed.\n\n**Note:** The job's status field can be modified\nusing this RPC (for example, to set a job's status to\nDELETED,\nDISABLED, or\nENABLED).", + "description": "Updates a transfer job. Updating a job's transfer spec does not affect transfer operations that are running already. Updating a job's schedule is not allowed. **Note:** The job's status field can be modified using this RPC (for example, to set a job's status to DELETED, DISABLED, or ENABLED).", "flatPath": "v1/transferJobs/{transferJobsId}", "httpMethod": "PATCH", "id": "storagetransfer.transferJobs.patch", @@ -247,7 +247,7 @@ "transferOperations": { "methods": { "cancel": { - "description": "Cancels a transfer. Use the get method to check whether the cancellation succeeded or whether the operation completed despite cancellation.", + "description": "Cancels a transfer. Use the transferOperations.get method to check if the cancellation succeeded or if the operation completed despite the `cancel` request. When you cancel an operation, the currently running transfer is interrupted. For recurring transfer jobs, the next instance of the transfer job will still run. For example, if your job is configured to run every day at 1pm and you cancel Monday's operation at 1:05pm, Monday's transfer will stop. However, a transfer job will still be attempted on Tuesday. This applies only to currently running operations. If an operation is not currently running, `cancel` does nothing. *Caution:* Canceling a transfer job can leave your data in an unknown state. We recommend that you restore the state at both the destination and the source after the `cancel` request completes so that your data is in a consistent state. When you cancel a job, the next job computes a delta of files and may repair any inconsistent state. For instance, if you run a job every day, and today's job found 10 new files and transferred five files before you canceled the job, tomorrow's transfer operation will compute a new delta with the five files that were not copied today plus any new files discovered tomorrow.", "flatPath": "v1/transferOperations/{transferOperationsId}:cancel", "httpMethod": "POST", "id": "storagetransfer.transferOperations.cancel", @@ -264,6 +264,9 @@ } }, "path": "v1/{+name}:cancel", + "request": { + "$ref": "CancelOperationRequest" + }, "response": { "$ref": "Empty" }, @@ -272,7 +275,7 @@ ] }, "get": { - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", "flatPath": "v1/transferOperations/{transferOperationsId}", "httpMethod": "GET", "id": "storagetransfer.transferOperations.get", @@ -306,7 +309,7 @@ ], "parameters": { "filter": { - "description": "Required. A list of query parameters specified as JSON text in the form of: {\"project\u003cspan\u003e_\u003c/span\u003eid\":\"my_project_id\",\n \"job_names\":[\"jobid1\",\"jobid2\",...],\n \"operation_names\":[\"opid1\",\"opid2\",...],\n \"transfer_statuses\":[\"status1\",\"status2\",...]}.\nSince `job_names`, `operation_names`, and `transfer_statuses` support multiple values, they must be specified with array notation. `project`\u003cspan\u003e`_`\u003c/span\u003e`id` is required. `job_names`, `operation_names`, and `transfer_statuses` are optional. The valid values for `transfer_statuses` are case-insensitive: IN_PROGRESS, PAUSED, SUCCESS, FAILED, and ABORTED.", + "description": "Required. A list of query parameters specified as JSON text in the form of: {\"project_id\":\"my_project_id\", \"job_names\":[\"jobid1\",\"jobid2\",...], \"operation_names\":[\"opid1\",\"opid2\",...], \"transfer_statuses\":[\"status1\",\"status2\",...]}. Since `job_names`, `operation_names`, and `transfer_statuses` support multiple values, they must be specified with array notation. `project``_``id` is required. `job_names`, `operation_names`, and `transfer_statuses` are optional. The valid values for `transfer_statuses` are case-insensitive: IN_PROGRESS, PAUSED, SUCCESS, FAILED, and ABORTED.", "location": "query", "type": "string" }, @@ -396,11 +399,11 @@ } } }, - "revision": "20200405", + "revision": "20201001", "rootUrl": "https://storagetransfer.googleapis.com/", "schemas": { "AwsAccessKey": { - "description": "AWS access key (see\n[AWS Security\nCredentials](https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html)).", + "description": "AWS access key (see [AWS Security Credentials](https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html)).", "id": "AwsAccessKey", "properties": { "accessKeyId": { @@ -408,34 +411,34 @@ "type": "string" }, "secretAccessKey": { - "description": "Required. AWS secret access key. This field is not returned in RPC\nresponses.", + "description": "Required. AWS secret access key. This field is not returned in RPC responses.", "type": "string" } }, "type": "object" }, "AwsS3Data": { - "description": "An AwsS3Data resource can be a data source, but not a data sink.\nIn an AwsS3Data resource, an object's name is the S3 object's key name.", + "description": "An AwsS3Data resource can be a data source, but not a data sink. In an AwsS3Data resource, an object's name is the S3 object's key name.", "id": "AwsS3Data", "properties": { "awsAccessKey": { "$ref": "AwsAccessKey", - "description": "Required. AWS access key used to sign the API requests to the AWS S3\nbucket. Permissions on the bucket must be granted to the access ID of the\nAWS access key." + "description": "Required. Input only. AWS access key used to sign the API requests to the AWS S3 bucket. Permissions on the bucket must be granted to the access ID of the AWS access key." }, "bucketName": { - "description": "Required. S3 Bucket name (see\n[Creating a\nbucket](https://docs.aws.amazon.com/AmazonS3/latest/dev/create-bucket-get-location-example.html)).", + "description": "Required. S3 Bucket name (see [Creating a bucket](https://docs.aws.amazon.com/AmazonS3/latest/dev/create-bucket-get-location-example.html)).", "type": "string" } }, "type": "object" }, "AzureBlobStorageData": { - "description": "An AzureBlobStorageData resource can be a data source, but not a data sink.\nAn AzureBlobStorageData resource represents one Azure container. The storage\naccount determines the [Azure\nendpoint](https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account#storage-account-endpoints).\nIn an AzureBlobStorageData resource, a blobs's name is the [Azure Blob\nStorage blob's key\nname](https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata#blob-names).", + "description": "An AzureBlobStorageData resource can be a data source, but not a data sink. An AzureBlobStorageData resource represents one Azure container. The storage account determines the [Azure endpoint](https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account#storage-account-endpoints). In an AzureBlobStorageData resource, a blobs's name is the [Azure Blob Storage blob's key name](https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata#blob-names).", "id": "AzureBlobStorageData", "properties": { "azureCredentials": { "$ref": "AzureCredentials", - "description": "Required. Credentials used to authenticate API requests to Azure." + "description": "Required. Input only. Credentials used to authenticate API requests to Azure." }, "container": { "description": "Required. The container to transfer from the Azure Storage account.", @@ -453,28 +456,34 @@ "id": "AzureCredentials", "properties": { "sasToken": { - "description": "Required. Azure shared access signature. (see\n[Grant limited access to Azure Storage resources using shared access\nsignatures\n(SAS)](https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview)).", + "description": "Required. Azure shared access signature. (see [Grant limited access to Azure Storage resources using shared access signatures (SAS)](https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview)).", "type": "string" } }, "type": "object" }, + "CancelOperationRequest": { + "description": "The request message for Operations.CancelOperation.", + "id": "CancelOperationRequest", + "properties": {}, + "type": "object" + }, "Date": { - "description": "Represents a whole or partial calendar date, e.g. a birthday. The time of day\nand time zone are either specified elsewhere or are not significant. The date\nis relative to the Proleptic Gregorian Calendar. This can represent:\n\n* A full date, with non-zero year, month and day values\n* A month and day value, with a zero year, e.g. an anniversary\n* A year on its own, with zero month and day values\n* A year and month value, with a zero day, e.g. a credit card expiration date\n\nRelated types are google.type.TimeOfDay and `google.protobuf.Timestamp`.", + "description": "Represents a whole or partial calendar date, e.g. a birthday. The time of day and time zone are either specified elsewhere or are not significant. The date is relative to the Proleptic Gregorian Calendar. This can represent: * A full date, with non-zero year, month and day values * A month and day value, with a zero year, e.g. an anniversary * A year on its own, with zero month and day values * A year and month value, with a zero day, e.g. a credit card expiration date Related types are google.type.TimeOfDay and `google.protobuf.Timestamp`.", "id": "Date", "properties": { "day": { - "description": "Day of month. Must be from 1 to 31 and valid for the year and month, or 0\nif specifying a year by itself or a year and month where the day is not\nsignificant.", + "description": "Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a year by itself or a year and month where the day is not significant.", "format": "int32", "type": "integer" }, "month": { - "description": "Month of year. Must be from 1 to 12, or 0 if specifying a year without a\nmonth and day.", + "description": "Month of year. Must be from 1 to 12, or 0 if specifying a year without a month and day.", "format": "int32", "type": "integer" }, "year": { - "description": "Year of date. Must be from 1 to 9999, or 0 if specifying a date without\na year.", + "description": "Year of date. Must be from 1 to 9999, or 0 if specifying a date without a year.", "format": "int32", "type": "integer" } @@ -482,7 +491,7 @@ "type": "object" }, "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", "properties": {}, "type": "object" @@ -499,14 +508,14 @@ "type": "array" }, "url": { - "description": "Required. A URL that refers to the target (a data source, a data sink,\nor an object) with which the error is associated.", + "description": "Required. A URL that refers to the target (a data source, a data sink, or an object) with which the error is associated.", "type": "string" } }, "type": "object" }, "ErrorSummary": { - "description": "A summary of errors by error code, plus a count and sample error log\nentries.", + "description": "A summary of errors by error code, plus a count and sample error log entries.", "id": "ErrorSummary", "properties": { "errorCode": { @@ -531,23 +540,23 @@ "DATA_LOSS" ], "enumDescriptions": [ - "Not an error; returned on success\n\nHTTP Mapping: 200 OK", - "The operation was cancelled, typically by the caller.\n\nHTTP Mapping: 499 Client Closed Request", - "Unknown error. For example, this error may be returned when\na `Status` value received from another address space belongs to\nan error space that is not known in this address space. Also\nerrors raised by APIs that do not return enough error information\nmay be converted to this error.\n\nHTTP Mapping: 500 Internal Server Error", - "The client specified an invalid argument. Note that this differs\nfrom `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments\nthat are problematic regardless of the state of the system\n(e.g., a malformed file name).\n\nHTTP Mapping: 400 Bad Request", - "The deadline expired before the operation could complete. For operations\nthat change the state of the system, this error may be returned\neven if the operation has completed successfully. For example, a\nsuccessful response from a server could have been delayed long\nenough for the deadline to expire.\n\nHTTP Mapping: 504 Gateway Timeout", - "Some requested entity (e.g., file or directory) was not found.\n\nNote to server developers: if a request is denied for an entire class\nof users, such as gradual feature rollout or undocumented whitelist,\n`NOT_FOUND` may be used. If a request is denied for some users within\na class of users, such as user-based access control, `PERMISSION_DENIED`\nmust be used.\n\nHTTP Mapping: 404 Not Found", - "The entity that a client attempted to create (e.g., file or directory)\nalready exists.\n\nHTTP Mapping: 409 Conflict", - "The caller does not have permission to execute the specified\noperation. `PERMISSION_DENIED` must not be used for rejections\ncaused by exhausting some resource (use `RESOURCE_EXHAUSTED`\ninstead for those errors). `PERMISSION_DENIED` must not be\nused if the caller can not be identified (use `UNAUTHENTICATED`\ninstead for those errors). This error code does not imply the\nrequest is valid or the requested entity exists or satisfies\nother pre-conditions.\n\nHTTP Mapping: 403 Forbidden", - "The request does not have valid authentication credentials for the\noperation.\n\nHTTP Mapping: 401 Unauthorized", - "Some resource has been exhausted, perhaps a per-user quota, or\nperhaps the entire file system is out of space.\n\nHTTP Mapping: 429 Too Many Requests", - "The operation was rejected because the system is not in a state\nrequired for the operation's execution. For example, the directory\nto be deleted is non-empty, an rmdir operation is applied to\na non-directory, etc.\n\nService implementors can use the following guidelines to decide\nbetween `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`:\n (a) Use `UNAVAILABLE` if the client can retry just the failing call.\n (b) Use `ABORTED` if the client should retry at a higher level\n (e.g., when a client-specified test-and-set fails, indicating the\n client should restart a read-modify-write sequence).\n (c) Use `FAILED_PRECONDITION` if the client should not retry until\n the system state has been explicitly fixed. E.g., if an \"rmdir\"\n fails because the directory is non-empty, `FAILED_PRECONDITION`\n should be returned since the client should not retry unless\n the files are deleted from the directory.\n\nHTTP Mapping: 400 Bad Request", - "The operation was aborted, typically due to a concurrency issue such as\na sequencer check failure or transaction abort.\n\nSee the guidelines above for deciding between `FAILED_PRECONDITION`,\n`ABORTED`, and `UNAVAILABLE`.\n\nHTTP Mapping: 409 Conflict", - "The operation was attempted past the valid range. E.g., seeking or\nreading past end-of-file.\n\nUnlike `INVALID_ARGUMENT`, this error indicates a problem that may\nbe fixed if the system state changes. For example, a 32-bit file\nsystem will generate `INVALID_ARGUMENT` if asked to read at an\noffset that is not in the range [0,2^32-1], but it will generate\n`OUT_OF_RANGE` if asked to read from an offset past the current\nfile size.\n\nThere is a fair bit of overlap between `FAILED_PRECONDITION` and\n`OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific\nerror) when it applies so that callers who are iterating through\na space can easily look for an `OUT_OF_RANGE` error to detect when\nthey are done.\n\nHTTP Mapping: 400 Bad Request", - "The operation is not implemented or is not supported/enabled in this\nservice.\n\nHTTP Mapping: 501 Not Implemented", - "Internal errors. This means that some invariants expected by the\nunderlying system have been broken. This error code is reserved\nfor serious errors.\n\nHTTP Mapping: 500 Internal Server Error", - "The service is currently unavailable. This is most likely a\ntransient condition, which can be corrected by retrying with\na backoff. Note that it is not always safe to retry\nnon-idempotent operations.\n\nSee the guidelines above for deciding between `FAILED_PRECONDITION`,\n`ABORTED`, and `UNAVAILABLE`.\n\nHTTP Mapping: 503 Service Unavailable", - "Unrecoverable data loss or corruption.\n\nHTTP Mapping: 500 Internal Server Error" + "Not an error; returned on success HTTP Mapping: 200 OK", + "The operation was cancelled, typically by the caller. HTTP Mapping: 499 Client Closed Request", + "Unknown error. For example, this error may be returned when a `Status` value received from another address space belongs to an error space that is not known in this address space. Also errors raised by APIs that do not return enough error information may be converted to this error. HTTP Mapping: 500 Internal Server Error", + "The client specified an invalid argument. Note that this differs from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments that are problematic regardless of the state of the system (e.g., a malformed file name). HTTP Mapping: 400 Bad Request", + "The deadline expired before the operation could complete. For operations that change the state of the system, this error may be returned even if the operation has completed successfully. For example, a successful response from a server could have been delayed long enough for the deadline to expire. HTTP Mapping: 504 Gateway Timeout", + "Some requested entity (e.g., file or directory) was not found. Note to server developers: if a request is denied for an entire class of users, such as gradual feature rollout or undocumented allowlist, `NOT_FOUND` may be used. If a request is denied for some users within a class of users, such as user-based access control, `PERMISSION_DENIED` must be used. HTTP Mapping: 404 Not Found", + "The entity that a client attempted to create (e.g., file or directory) already exists. HTTP Mapping: 409 Conflict", + "The caller does not have permission to execute the specified operation. `PERMISSION_DENIED` must not be used for rejections caused by exhausting some resource (use `RESOURCE_EXHAUSTED` instead for those errors). `PERMISSION_DENIED` must not be used if the caller can not be identified (use `UNAUTHENTICATED` instead for those errors). This error code does not imply the request is valid or the requested entity exists or satisfies other pre-conditions. HTTP Mapping: 403 Forbidden", + "The request does not have valid authentication credentials for the operation. HTTP Mapping: 401 Unauthorized", + "Some resource has been exhausted, perhaps a per-user quota, or perhaps the entire file system is out of space. HTTP Mapping: 429 Too Many Requests", + "The operation was rejected because the system is not in a state required for the operation's execution. For example, the directory to be deleted is non-empty, an rmdir operation is applied to a non-directory, etc. Service implementors can use the following guidelines to decide between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`: (a) Use `UNAVAILABLE` if the client can retry just the failing call. (b) Use `ABORTED` if the client should retry at a higher level (e.g., when a client-specified test-and-set fails, indicating the client should restart a read-modify-write sequence). (c) Use `FAILED_PRECONDITION` if the client should not retry until the system state has been explicitly fixed. E.g., if an \"rmdir\" fails because the directory is non-empty, `FAILED_PRECONDITION` should be returned since the client should not retry unless the files are deleted from the directory. HTTP Mapping: 400 Bad Request", + "The operation was aborted, typically due to a concurrency issue such as a sequencer check failure or transaction abort. See the guidelines above for deciding between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`. HTTP Mapping: 409 Conflict", + "The operation was attempted past the valid range. E.g., seeking or reading past end-of-file. Unlike `INVALID_ARGUMENT`, this error indicates a problem that may be fixed if the system state changes. For example, a 32-bit file system will generate `INVALID_ARGUMENT` if asked to read at an offset that is not in the range [0,2^32-1], but it will generate `OUT_OF_RANGE` if asked to read from an offset past the current file size. There is a fair bit of overlap between `FAILED_PRECONDITION` and `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific error) when it applies so that callers who are iterating through a space can easily look for an `OUT_OF_RANGE` error to detect when they are done. HTTP Mapping: 400 Bad Request", + "The operation is not implemented or is not supported/enabled in this service. HTTP Mapping: 501 Not Implemented", + "Internal errors. This means that some invariants expected by the underlying system have been broken. This error code is reserved for serious errors. HTTP Mapping: 500 Internal Server Error", + "The service is currently unavailable. This is most likely a transient condition, which can be corrected by retrying with a backoff. Note that it is not always safe to retry non-idempotent operations. See the guidelines above for deciding between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`. HTTP Mapping: 503 Service Unavailable", + "Unrecoverable data loss or corruption. HTTP Mapping: 500 Internal Server Error" ], "type": "string" }, @@ -557,7 +566,7 @@ "type": "string" }, "errorLogEntries": { - "description": "Error samples.\n\nAt most 5 error log entries will be recorded for a given\nerror code for a single transfer operation.", + "description": "Error samples. At most 5 error log entries will be recorded for a given error code for a single transfer operation.", "items": { "$ref": "ErrorLogEntry" }, @@ -567,11 +576,11 @@ "type": "object" }, "GcsData": { - "description": "In a GcsData resource, an object's name is the Cloud Storage object's\nname and its \"last modification time\" refers to the object's `updated`\nproperty of Cloud Storage objects, which changes when the content or the\nmetadata of the object is updated.", + "description": "In a GcsData resource, an object's name is the Cloud Storage object's name and its \"last modification time\" refers to the object's `updated` property of Cloud Storage objects, which changes when the content or the metadata of the object is updated.", "id": "GcsData", "properties": { "bucketName": { - "description": "Required. Cloud Storage bucket name (see\n[Bucket Name\nRequirements](https://cloud.google.com/storage/docs/naming#requirements)).", + "description": "Required. Cloud Storage bucket name (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/naming#requirements)).", "type": "string" } }, @@ -589,11 +598,11 @@ "type": "object" }, "HttpData": { - "description": "An HttpData resource specifies a list of objects on the web to be transferred\nover HTTP. The information of the objects to be transferred is contained in\na file referenced by a URL. The first line in the file must be\n`\"TsvHttpData-1.0\"`, which specifies the format of the file. Subsequent\nlines specify the information of the list of objects, one object per list\nentry. Each entry has the following tab-delimited fields:\n\n* **HTTP URL** — The location of the object.\n\n* **Length** — The size of the object in bytes.\n\n* **MD5** — The base64-encoded MD5 hash of the object.\n\nFor an example of a valid TSV file, see\n[Transferring data from\nURLs](https://cloud.google.com/storage-transfer/docs/create-url-list).\n\nWhen transferring data based on a URL list, keep the following in mind:\n\n* When an object located at `http(s)://hostname:port/\u003cURL-path\u003e` is\ntransferred to a data sink, the name of the object at the data sink is\n`\u003chostname\u003e/\u003cURL-path\u003e`.\n\n* If the specified size of an object does not match the actual size of the\nobject fetched, the object will not be transferred.\n\n* If the specified MD5 does not match the MD5 computed from the transferred\nbytes, the object transfer will fail. For more information, see\n[Generating MD5\nhashes](https://cloud.google.com/storage-transfer/docs/create-url-list#md5)\n\n* Ensure that each URL you specify is publicly accessible. For\nexample, in Cloud Storage you can\n[share an object publicly]\n(https://cloud.google.com/storage/docs/cloud-console#_sharingdata) and get\na link to it.\n\n* Storage Transfer Service obeys `robots.txt` rules and requires the source\nHTTP server to support `Range` requests and to return a `Content-Length`\nheader in each response.\n\n* ObjectConditions have no effect when filtering objects to transfer.", + "description": "An HttpData resource specifies a list of objects on the web to be transferred over HTTP. The information of the objects to be transferred is contained in a file referenced by a URL. The first line in the file must be `\"TsvHttpData-1.0\"`, which specifies the format of the file. Subsequent lines specify the information of the list of objects, one object per list entry. Each entry has the following tab-delimited fields: * **HTTP URL** — The location of the object. * **Length** — The size of the object in bytes. * **MD5** — The base64-encoded MD5 hash of the object. For an example of a valid TSV file, see [Transferring data from URLs](https://cloud.google.com/storage-transfer/docs/create-url-list). When transferring data based on a URL list, keep the following in mind: * When an object located at `http(s)://hostname:port/` is transferred to a data sink, the name of the object at the data sink is `/`. * If the specified size of an object does not match the actual size of the object fetched, the object will not be transferred. * If the specified MD5 does not match the MD5 computed from the transferred bytes, the object transfer will fail. For more information, see [Generating MD5 hashes](https://cloud.google.com/storage-transfer/docs/create-url-list#md5) * Ensure that each URL you specify is publicly accessible. For example, in Cloud Storage you can [share an object publicly] (https://cloud.google.com/storage/docs/cloud-console#_sharingdata) and get a link to it. * Storage Transfer Service obeys `robots.txt` rules and requires the source HTTP server to support `Range` requests and to return a `Content-Length` header in each response. * ObjectConditions have no effect when filtering objects to transfer.", "id": "HttpData", "properties": { "listUrl": { - "description": "Required. The URL that points to the file that stores the object list\nentries. This file must allow public access. Currently, only URLs with\nHTTP and HTTPS schemes are supported.", + "description": "Required. The URL that points to the file that stores the object list entries. This file must allow public access. Currently, only URLs with HTTP and HTTPS schemes are supported.", "type": "string" } }, @@ -636,17 +645,11 @@ "type": "object" }, "NotificationConfig": { - "description": "Specification to configure notifications published to Cloud Pub/Sub.\nNotifications will be published to the customer-provided topic using the\nfollowing `PubsubMessage.attributes`:\n\n* `\"eventType\"`: one of the EventType values\n* `\"payloadFormat\"`: one of the PayloadFormat values\n* `\"projectId\"`: the project_id of the\n`TransferOperation`\n* `\"transferJobName\"`: the\ntransfer_job_name of the\n`TransferOperation`\n* `\"transferOperationName\"`: the name of the\n`TransferOperation`\n\nThe `PubsubMessage.data` will contain a TransferOperation resource\nformatted according to the specified `PayloadFormat`.", + "description": "Specification to configure notifications published to Cloud Pub/Sub. Notifications will be published to the customer-provided topic using the following `PubsubMessage.attributes`: * `\"eventType\"`: one of the EventType values * `\"payloadFormat\"`: one of the PayloadFormat values * `\"projectId\"`: the project_id of the `TransferOperation` * `\"transferJobName\"`: the transfer_job_name of the `TransferOperation` * `\"transferOperationName\"`: the name of the `TransferOperation` The `PubsubMessage.data` will contain a TransferOperation resource formatted according to the specified `PayloadFormat`.", "id": "NotificationConfig", "properties": { "eventTypes": { - "description": "Event types for which a notification is desired. If empty, send\nnotifications for all event types.", - "enumDescriptions": [ - "Illegal value, to avoid allowing a default.", - "`TransferOperation` completed with status\nSUCCESS.", - "`TransferOperation` completed with status\nFAILED.", - "`TransferOperation` completed with status\nABORTED." - ], + "description": "Event types for which a notification is desired. If empty, send notifications for all event types.", "items": { "enum": [ "EVENT_TYPE_UNSPECIFIED", @@ -654,6 +657,12 @@ "TRANSFER_OPERATION_FAILED", "TRANSFER_OPERATION_ABORTED" ], + "enumDescriptions": [ + "Illegal value, to avoid allowing a default.", + "`TransferOperation` completed with status SUCCESS.", + "`TransferOperation` completed with status FAILED.", + "`TransferOperation` completed with status ABORTED." + ], "type": "string" }, "type": "array" @@ -668,52 +677,52 @@ "enumDescriptions": [ "Illegal value, to avoid allowing a default.", "No payload is included with the notification.", - "`TransferOperation` is [formatted as a JSON\nresponse](https://developers.google.com/protocol-buffers/docs/proto3#json),\nin application/json." + "`TransferOperation` is [formatted as a JSON response](https://developers.google.com/protocol-buffers/docs/proto3#json), in application/json." ], "type": "string" }, "pubsubTopic": { - "description": "Required. The `Topic.name` of the Cloud Pub/Sub topic to which to publish\nnotifications. Must be of the format: `projects/{project}/topics/{topic}`.\nNot matching this format will result in an\nINVALID_ARGUMENT error.", + "description": "Required. The `Topic.name` of the Cloud Pub/Sub topic to which to publish notifications. Must be of the format: `projects/{project}/topics/{topic}`. Not matching this format will result in an INVALID_ARGUMENT error.", "type": "string" } }, "type": "object" }, "ObjectConditions": { - "description": "Conditions that determine which objects will be transferred. Applies only\nto S3 and Cloud Storage objects.\n\nThe \"last modification time\" refers to the time of the\nlast change to the object's content or metadata — specifically, this is\nthe `updated` property of Cloud Storage objects and the `LastModified`\nfield of S3 objects.", + "description": "Conditions that determine which objects will be transferred. Applies only to Cloud Data Sources such as S3, Azure, and Cloud Storage. The \"last modification time\" refers to the time of the last change to the object's content or metadata — specifically, this is the `updated` property of Cloud Storage objects, the `LastModified` field of S3 objects, and the `Last-Modified` header of Azure blobs.", "id": "ObjectConditions", "properties": { "excludePrefixes": { - "description": "`exclude_prefixes` must follow the requirements described for\ninclude_prefixes.\n\nThe max size of `exclude_prefixes` is 1000.", + "description": "`exclude_prefixes` must follow the requirements described for include_prefixes. The max size of `exclude_prefixes` is 1000.", "items": { "type": "string" }, "type": "array" }, "includePrefixes": { - "description": "If `include_prefixes` is specified, objects that satisfy the object\nconditions must have names that start with one of the `include_prefixes`\nand that do not start with any of the exclude_prefixes. If\n`include_prefixes` is not specified, all objects except those that have\nnames starting with one of the `exclude_prefixes` must satisfy the object\nconditions.\n\nRequirements:\n\n * Each include-prefix and exclude-prefix can contain any sequence of\n Unicode characters, to a max length of 1024 bytes when UTF8-encoded,\n and must not contain Carriage Return or Line Feed characters. Wildcard\n matching and regular expression matching are not supported.\n\n * Each include-prefix and exclude-prefix must omit the leading slash.\n For example, to include the `requests.gz` object in a transfer from\n `s3://my-aws-bucket/logs/y=2015/requests.gz`, specify the include\n prefix as `logs/y=2015/requests.gz`.\n\n * None of the include-prefix or the exclude-prefix values can be empty,\n if specified.\n\n * Each include-prefix must include a distinct portion of the object\n namespace. No include-prefix may be a prefix of another\n include-prefix.\n\n * Each exclude-prefix must exclude a distinct portion of the object\n namespace. No exclude-prefix may be a prefix of another\n exclude-prefix.\n\n * If `include_prefixes` is specified, then each exclude-prefix must start\n with the value of a path explicitly included by `include_prefixes`.\n\nThe max size of `include_prefixes` is 1000.", + "description": "If `include_prefixes` is specified, objects that satisfy the object conditions must have names that start with one of the `include_prefixes` and that do not start with any of the exclude_prefixes. If `include_prefixes` is not specified, all objects except those that have names starting with one of the `exclude_prefixes` must satisfy the object conditions. Requirements: * Each include-prefix and exclude-prefix can contain any sequence of Unicode characters, to a max length of 1024 bytes when UTF8-encoded, and must not contain Carriage Return or Line Feed characters. Wildcard matching and regular expression matching are not supported. * Each include-prefix and exclude-prefix must omit the leading slash. For example, to include the `requests.gz` object in a transfer from `s3://my-aws-bucket/logs/y=2015/requests.gz`, specify the include prefix as `logs/y=2015/requests.gz`. * None of the include-prefix or the exclude-prefix values can be empty, if specified. * Each include-prefix must include a distinct portion of the object namespace. No include-prefix may be a prefix of another include-prefix. * Each exclude-prefix must exclude a distinct portion of the object namespace. No exclude-prefix may be a prefix of another exclude-prefix. * If `include_prefixes` is specified, then each exclude-prefix must start with the value of a path explicitly included by `include_prefixes`. The max size of `include_prefixes` is 1000.", "items": { "type": "string" }, "type": "array" }, "lastModifiedBefore": { - "description": "If specified, only objects with a \"last modification time\" before this\ntimestamp and objects that don't have a \"last modification time\" will be\ntransferred.", + "description": "If specified, only objects with a \"last modification time\" before this timestamp and objects that don't have a \"last modification time\" will be transferred.", "format": "google-datetime", "type": "string" }, "lastModifiedSince": { - "description": "If specified, only objects with a \"last modification time\" on or after\nthis timestamp and objects that don't have a \"last modification time\" are\ntransferred.\n\nThe `last_modified_since` and `last_modified_before` fields can be used\ntogether for chunked data processing. For example, consider a script that\nprocesses each day's worth of data at a time. For that you'd set each\nof the fields as follows:\n\n* `last_modified_since` to the start of the day\n\n* `last_modified_before` to the end of the day", + "description": "If specified, only objects with a \"last modification time\" on or after this timestamp and objects that don't have a \"last modification time\" are transferred. The `last_modified_since` and `last_modified_before` fields can be used together for chunked data processing. For example, consider a script that processes each day's worth of data at a time. For that you'd set each of the fields as follows: * `last_modified_since` to the start of the day * `last_modified_before` to the end of the day", "format": "google-datetime", "type": "string" }, "maxTimeElapsedSinceLastModification": { - "description": "If specified, only objects with a \"last modification time\" on or after\n`NOW` - `max_time_elapsed_since_last_modification` and objects that don't\nhave a \"last modification time\" are transferred.\n\nFor each TransferOperation started by this TransferJob,\n`NOW` refers to the start_time of the\n`TransferOperation`.", + "description": "If specified, only objects with a \"last modification time\" on or after `NOW` - `max_time_elapsed_since_last_modification` and objects that don't have a \"last modification time\" are transferred. For each TransferOperation started by this TransferJob, `NOW` refers to the start_time of the `TransferOperation`.", "format": "google-duration", "type": "string" }, "minTimeElapsedSinceLastModification": { - "description": "If specified, only objects with a \"last modification time\" before\n`NOW` - `min_time_elapsed_since_last_modification` and objects that don't\n have a \"last modification time\" are transferred.\n\nFor each TransferOperation started by this TransferJob, `NOW`\nrefers to the start_time of the\n`TransferOperation`.", + "description": "If specified, only objects with a \"last modification time\" before `NOW` - `min_time_elapsed_since_last_modification` and objects that don't have a \"last modification time\" are transferred. For each TransferOperation started by this TransferJob, `NOW` refers to the start_time of the `TransferOperation`.", "format": "google-duration", "type": "string" } @@ -721,11 +730,11 @@ "type": "object" }, "Operation": { - "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "description": "This resource represents a long-running operation that is the result of a network API call.", "id": "Operation", "properties": { "done": { - "description": "If the value is `false`, it means the operation is still in progress.\nIf `true`, the operation is completed, and either `error` or `response` is\navailable.", + "description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.", "type": "boolean" }, "error": { @@ -737,7 +746,7 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "Represents the transfer operation object.", + "description": "Represents the transfer operation object. To request a TransferOperation object, use transferOperations.get.", "type": "object" }, "name": { @@ -749,7 +758,7 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", + "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", "type": "object" } }, @@ -773,21 +782,21 @@ "properties": { "scheduleEndDate": { "$ref": "Date", - "description": "The last day a transfer runs. Date boundaries are determined relative to\nUTC time. A job will run once per 24 hours within the following guidelines:\n\n* If `schedule_end_date` and schedule_start_date are the same and in\n the future relative to UTC, the transfer is executed only one time.\n* If `schedule_end_date` is later than `schedule_start_date` and\n `schedule_end_date` is in the future relative to UTC, the job will\n run each day at start_time_of_day through `schedule_end_date`." + "description": "The last day a transfer runs. Date boundaries are determined relative to UTC time. A job will run once per 24 hours within the following guidelines: * If `schedule_end_date` and schedule_start_date are the same and in the future relative to UTC, the transfer is executed only one time. * If `schedule_end_date` is later than `schedule_start_date` and `schedule_end_date` is in the future relative to UTC, the job will run each day at start_time_of_day through `schedule_end_date`." }, "scheduleStartDate": { "$ref": "Date", - "description": "Required. The start date of a transfer. Date boundaries are determined\nrelative to UTC time. If `schedule_start_date` and start_time_of_day\nare in the past relative to the job's creation time, the transfer starts\nthe day after you schedule the transfer request.\n\n**Note:** When starting jobs at or near midnight UTC it is possible that\na job will start later than expected. For example, if you send an outbound\nrequest on June 1 one millisecond prior to midnight UTC and the Storage\nTransfer Service server receives the request on June 2, then it will create\na TransferJob with `schedule_start_date` set to June 2 and a\n`start_time_of_day` set to midnight UTC. The first scheduled\nTransferOperation will take place on June 3 at midnight UTC." + "description": "Required. The start date of a transfer. Date boundaries are determined relative to UTC time. If `schedule_start_date` and start_time_of_day are in the past relative to the job's creation time, the transfer starts the day after you schedule the transfer request. **Note:** When starting jobs at or near midnight UTC it is possible that a job will start later than expected. For example, if you send an outbound request on June 1 one millisecond prior to midnight UTC and the Storage Transfer Service server receives the request on June 2, then it will create a TransferJob with `schedule_start_date` set to June 2 and a `start_time_of_day` set to midnight UTC. The first scheduled TransferOperation will take place on June 3 at midnight UTC." }, "startTimeOfDay": { "$ref": "TimeOfDay", - "description": "The time in UTC that a transfer job is scheduled to run. Transfers may\nstart later than this time.\n\nIf `start_time_of_day` is not specified:\n\n* One-time transfers run immediately.\n* Recurring transfers run immediately, and each day at midnight UTC,\n through schedule_end_date.\n\nIf `start_time_of_day` is specified:\n\n* One-time transfers run at the specified time.\n* Recurring transfers run at the specified time each day, through\n `schedule_end_date`." + "description": "The time in UTC that a transfer job is scheduled to run. Transfers may start later than this time. If `start_time_of_day` is not specified: * One-time transfers run immediately. * Recurring transfers run immediately, and each day at midnight UTC, through schedule_end_date. If `start_time_of_day` is specified: * One-time transfers run at the specified time. * Recurring transfers run at the specified time each day, through `schedule_end_date`." } }, "type": "object" }, "Status": { - "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors).", + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", "id": "Status", "properties": { "code": { @@ -796,7 +805,7 @@ "type": "integer" }, "details": { - "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use.", + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", "items": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -807,18 +816,18 @@ "type": "array" }, "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", "type": "string" } }, "type": "object" }, "TimeOfDay": { - "description": "Represents a time of day. The date and time zone are either not significant\nor are specified elsewhere. An API may choose to allow leap seconds. Related\ntypes are google.type.Date and `google.protobuf.Timestamp`.", + "description": "Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`.", "id": "TimeOfDay", "properties": { "hours": { - "description": "Hours of day in 24 hour format. Should be from 0 to 23. An API may choose\nto allow the value \"24:00:00\" for scenarios like business closing time.", + "description": "Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", "format": "int32", "type": "integer" }, @@ -833,7 +842,7 @@ "type": "integer" }, "seconds": { - "description": "Seconds of minutes of the time. Must normally be from 0 to 59. An API may\nallow the value 60 if it allows leap-seconds.", + "description": "Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.", "format": "int32", "type": "integer" } @@ -865,7 +874,7 @@ "type": "string" }, "bytesFoundFromSource": { - "description": "Bytes found in the data source that are scheduled to be transferred,\nexcluding any that are filtered based on object conditions or skipped due\nto sync.", + "description": "Bytes found in the data source that are scheduled to be transferred, excluding any that are filtered based on object conditions or skipped due to sync.", "format": "int64", "type": "string" }, @@ -875,12 +884,12 @@ "type": "string" }, "bytesFromSourceFailed": { - "description": "Bytes in the data source that failed to be transferred or that failed to\nbe deleted after being transferred.", + "description": "Bytes in the data source that failed to be transferred or that failed to be deleted after being transferred.", "format": "int64", "type": "string" }, "bytesFromSourceSkippedBySync": { - "description": "Bytes in the data source that are not transferred because they already\nexist in the data sink.", + "description": "Bytes in the data source that are not transferred because they already exist in the data sink.", "format": "int64", "type": "string" }, @@ -905,7 +914,7 @@ "type": "string" }, "objectsFoundFromSource": { - "description": "Objects found in the data source that are scheduled to be transferred,\nexcluding any that are filtered based on object conditions or skipped due\nto sync.", + "description": "Objects found in the data source that are scheduled to be transferred, excluding any that are filtered based on object conditions or skipped due to sync.", "format": "int64", "type": "string" }, @@ -915,12 +924,12 @@ "type": "string" }, "objectsFromSourceFailed": { - "description": "Objects in the data source that failed to be transferred or that failed\nto be deleted after being transferred.", + "description": "Objects in the data source that failed to be transferred or that failed to be deleted after being transferred.", "format": "int64", "type": "string" }, "objectsFromSourceSkippedBySync": { - "description": "Objects in the data source that are not transferred because they already\nexist in the data sink.", + "description": "Objects in the data source that are not transferred because they already exist in the data sink.", "format": "int64", "type": "string" } @@ -928,30 +937,33 @@ "type": "object" }, "TransferJob": { - "description": "This resource represents the configuration of a transfer job that runs\nperiodically.", + "description": "This resource represents the configuration of a transfer job that runs periodically.", "id": "TransferJob", "properties": { "creationTime": { "description": "Output only. The time that the transfer job was created.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "deletionTime": { "description": "Output only. The time that the transfer job was deleted.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "description": { - "description": "A description provided by the user for the job. Its max length is 1024\nbytes when Unicode-encoded.", + "description": "A description provided by the user for the job. Its max length is 1024 bytes when Unicode-encoded.", "type": "string" }, "lastModificationTime": { "description": "Output only. The time that the transfer job was last modified.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "name": { - "description": "A unique name (within the transfer project) assigned when the job is\ncreated. If this field is empty in a CreateTransferJobRequest, Storage\nTransfer Service will assign a unique name. Otherwise, the specified name\nis used as the unique name for this job.\n\nIf the specified name is in use by a job, the creation request fails with\nan ALREADY_EXISTS error.\n\nThis name must start with `\"transferJobs/\"` prefix and end with a letter or\na number, and should be no more than 128 characters.\nExample: `\"transferJobs/[A-Za-z0-9-._~]*[A-Za-z0-9]$\"`\n\nInvalid job names will fail with an\nINVALID_ARGUMENT error.", + "description": "A unique name (within the transfer project) assigned when the job is created. If this field is empty in a CreateTransferJobRequest, Storage Transfer Service will assign a unique name. Otherwise, the specified name is used as the unique name for this job. If the specified name is in use by a job, the creation request fails with an ALREADY_EXISTS error. This name must start with `\"transferJobs/\"` prefix and end with a letter or a number, and should be no more than 128 characters. Example: `\"transferJobs/[A-Za-z0-9-._~]*[A-Za-z0-9]$\"` Invalid job names will fail with an INVALID_ARGUMENT error.", "type": "string" }, "notificationConfig": { @@ -967,7 +979,7 @@ "description": "Schedule specification." }, "status": { - "description": "Status of the job. This value MUST be specified for\n`CreateTransferJobRequests`.\n\n**Note:** The effect of the new job status takes place during a subsequent\njob run. For example, if you change the job status from\nENABLED to DISABLED, and an operation\nspawned by the transfer is running, the status change would not affect the\ncurrent operation.", + "description": "Status of the job. This value MUST be specified for `CreateTransferJobRequests`. **Note:** The effect of the new job status takes place during a subsequent job run. For example, if you change the job status from ENABLED to DISABLED, and an operation spawned by the transfer is running, the status change would not affect the current operation.", "enum": [ "STATUS_UNSPECIFIED", "ENABLED", @@ -978,7 +990,7 @@ "Zero is an illegal value.", "New transfers will be performed based on the schedule.", "New transfers will not be scheduled.", - "This is a soft delete state. After a transfer job is set to this\nstate, the job and all the transfer executions are subject to\ngarbage collection. Transfer jobs become eligible for garbage collection\n30 days after their status is set to `DELETED`." + "This is a soft delete state. After a transfer job is set to this state, the job and all the transfer executions are subject to garbage collection. Transfer jobs become eligible for garbage collection 30 days after their status is set to `DELETED`." ], "type": "string" }, @@ -1060,15 +1072,15 @@ "type": "object" }, "TransferOptions": { - "description": "TransferOptions uses three boolean parameters to define the actions\nto be performed on objects in a transfer.", + "description": "TransferOptions define the actions to be performed on objects in a transfer.", "id": "TransferOptions", "properties": { "deleteObjectsFromSourceAfterTransfer": { - "description": "Whether objects should be deleted from the source after they are\ntransferred to the sink.\n\n**Note:** This option and delete_objects_unique_in_sink are mutually\nexclusive.", + "description": "Whether objects should be deleted from the source after they are transferred to the sink. **Note:** This option and delete_objects_unique_in_sink are mutually exclusive.", "type": "boolean" }, "deleteObjectsUniqueInSink": { - "description": "Whether objects that exist only in the sink should be deleted.\n\n**Note:** This option and delete_objects_from_source_after_transfer are\nmutually exclusive.", + "description": "Whether objects that exist only in the sink should be deleted. **Note:** This option and delete_objects_from_source_after_transfer are mutually exclusive.", "type": "boolean" }, "overwriteObjectsAlreadyExistingInSink": { @@ -1104,11 +1116,11 @@ }, "objectConditions": { "$ref": "ObjectConditions", - "description": "Only objects that satisfy these object conditions are included in the set\nof data source and data sink objects. Object conditions based on\nobjects' \"last modification time\" do not exclude objects in a data sink." + "description": "Only objects that satisfy these object conditions are included in the set of data source and data sink objects. Object conditions based on objects' \"last modification time\" do not exclude objects in a data sink." }, "transferOptions": { "$ref": "TransferOptions", - "description": "If the option\ndelete_objects_unique_in_sink\nis `true`, object conditions based on objects' \"last modification time\" are\nignored and do not exclude objects in a data source or a data sink." + "description": "If the option delete_objects_unique_in_sink is `true` and time-based object conditions such as 'last modification time' are specified, the request fails with an INVALID_ARGUMENT error." } }, "type": "object" @@ -1118,15 +1130,15 @@ "id": "UpdateTransferJobRequest", "properties": { "projectId": { - "description": "Required. The ID of the Google Cloud Platform Console project that owns the\njob.", + "description": "Required. The ID of the Google Cloud Platform Console project that owns the job.", "type": "string" }, "transferJob": { "$ref": "TransferJob", - "description": "Required. The job to update. `transferJob` is expected to specify only\nfour fields: description,\ntransfer_spec,\nnotification_config, and\nstatus. An `UpdateTransferJobRequest` that specifies\nother fields will be rejected with the error\nINVALID_ARGUMENT." + "description": "Required. The job to update. `transferJob` is expected to specify only four fields: description, transfer_spec, notification_config, and status. An `UpdateTransferJobRequest` that specifies other fields will be rejected with the error INVALID_ARGUMENT. Updating a job satus to DELETED requires `storagetransfer.jobs.delete` permissions." }, "updateTransferJobFieldMask": { - "description": "The field mask of the fields in `transferJob` that are to be updated in\nthis request. Fields in `transferJob` that can be updated are:\ndescription,\ntransfer_spec,\nnotification_config, and\nstatus. To update the `transfer_spec` of the job, a\ncomplete transfer specification must be provided. An incomplete\nspecification missing any required fields will be rejected with the error\nINVALID_ARGUMENT.", + "description": "The field mask of the fields in `transferJob` that are to be updated in this request. Fields in `transferJob` that can be updated are: description, transfer_spec, notification_config, and status. To update the `transfer_spec` of the job, a complete transfer specification must be provided. An incomplete specification missing any required fields will be rejected with the error INVALID_ARGUMENT.", "format": "google-fieldmask", "type": "string" } diff --git a/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-gen.go b/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-gen.go index 226360af9c1..41abc1af518 100644 --- a/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-gen.go +++ b/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-gen.go @@ -75,6 +75,7 @@ const apiId = "storagetransfer:v1" const apiName = "storagetransfer" const apiVersion = "v1" const basePath = "https://storagetransfer.googleapis.com/" +const mtlsBasePath = "https://storagetransfer.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -90,6 +91,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -166,18 +168,15 @@ type TransferOperationsService struct { s *Service } -// AwsAccessKey: AWS access key (see -// [AWS -// Security -// Credentials](https://docs.aws.amazon.com/general/latest/gr/aw -// s-security-credentials.html)). +// AwsAccessKey: AWS access key (see [AWS Security +// Credentials](https://docs.aws.amazon.com/general/latest/gr/aws-securit +// y-credentials.html)). type AwsAccessKey struct { // AccessKeyId: Required. AWS access key ID. AccessKeyId string `json:"accessKeyId,omitempty"` // SecretAccessKey: Required. AWS secret access key. This field is not - // returned in RPC - // responses. + // returned in RPC responses. SecretAccessKey string `json:"secretAccessKey,omitempty"` // ForceSendFields is a list of field names (e.g. "AccessKeyId") to @@ -204,22 +203,17 @@ func (s *AwsAccessKey) MarshalJSON() ([]byte, error) { } // AwsS3Data: An AwsS3Data resource can be a data source, but not a data -// sink. -// In an AwsS3Data resource, an object's name is the S3 object's key -// name. +// sink. In an AwsS3Data resource, an object's name is the S3 object's +// key name. type AwsS3Data struct { - // AwsAccessKey: Required. AWS access key used to sign the API requests - // to the AWS S3 - // bucket. Permissions on the bucket must be granted to the access ID of - // the - // AWS access key. + // AwsAccessKey: Required. Input only. AWS access key used to sign the + // API requests to the AWS S3 bucket. Permissions on the bucket must be + // granted to the access ID of the AWS access key. AwsAccessKey *AwsAccessKey `json:"awsAccessKey,omitempty"` - // BucketName: Required. S3 Bucket name (see - // [Creating - // a - // bucket](https://docs.aws.amazon.com/AmazonS3/latest/dev/create-bucke - // t-get-location-example.html)). + // BucketName: Required. S3 Bucket name (see [Creating a + // bucket](https://docs.aws.amazon.com/AmazonS3/latest/dev/create-bucket- + // get-location-example.html)). BucketName string `json:"bucketName,omitempty"` // ForceSendFields is a list of field names (e.g. "AwsAccessKey") to @@ -246,22 +240,18 @@ func (s *AwsS3Data) MarshalJSON() ([]byte, error) { } // AzureBlobStorageData: An AzureBlobStorageData resource can be a data -// source, but not a data sink. -// An AzureBlobStorageData resource represents one Azure container. The -// storage -// account determines the +// source, but not a data sink. An AzureBlobStorageData resource +// represents one Azure container. The storage account determines the // [Azure -// endpoint](https://docs.microsoft.com/en-us/azure/storage/common -// /storage-create-storage-account#storage-account-endpoints). -// In an AzureBlobStorageData resource, a blobs's name is the [Azure -// Blob -// Storage blob's -// key -// name](https://docs.microsoft.com/en-us/rest/api/storageservices/na -// ming-and-referencing-containers--blobs--and-metadata#blob-names). +// endpoint](https://docs.microsoft.com/en-us/azure/storage/common/storag +// e-create-storage-account#storage-account-endpoints). In an +// AzureBlobStorageData resource, a blobs's name is the [Azure Blob +// Storage blob's key +// name](https://docs.microsoft.com/en-us/rest/api/storageservices/naming +// -and-referencing-containers--blobs--and-metadata#blob-names). type AzureBlobStorageData struct { - // AzureCredentials: Required. Credentials used to authenticate API - // requests to Azure. + // AzureCredentials: Required. Input only. Credentials used to + // authenticate API requests to Azure. AzureCredentials *AzureCredentials `json:"azureCredentials,omitempty"` // Container: Required. The container to transfer from the Azure Storage @@ -297,12 +287,11 @@ func (s *AzureBlobStorageData) MarshalJSON() ([]byte, error) { // AzureCredentials: Azure credentials type AzureCredentials struct { - // SasToken: Required. Azure shared access signature. (see - // [Grant limited access to Azure Storage resources using shared - // access + // SasToken: Required. Azure shared access signature. (see [Grant + // limited access to Azure Storage resources using shared access // signatures - // (SAS)](https://docs.microsoft.com/en-us/azure/storag - // e/common/storage-sas-overview)). + // (SAS)](https://docs.microsoft.com/en-us/azure/storage/common/storage-s + // as-overview)). SasToken string `json:"sasToken,omitempty"` // ForceSendFields is a list of field names (e.g. "SasToken") to @@ -328,37 +317,32 @@ func (s *AzureCredentials) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// CancelOperationRequest: The request message for +// Operations.CancelOperation. +type CancelOperationRequest struct { +} + // Date: Represents a whole or partial calendar date, e.g. a birthday. -// The time of day -// and time zone are either specified elsewhere or are not significant. -// The date -// is relative to the Proleptic Gregorian Calendar. This can -// represent: -// -// * A full date, with non-zero year, month and day values -// * A month and day value, with a zero year, e.g. an anniversary -// * A year on its own, with zero month and day values -// * A year and month value, with a zero day, e.g. a credit card -// expiration date -// -// Related types are google.type.TimeOfDay and +// The time of day and time zone are either specified elsewhere or are +// not significant. The date is relative to the Proleptic Gregorian +// Calendar. This can represent: * A full date, with non-zero year, +// month and day values * A month and day value, with a zero year, e.g. +// an anniversary * A year on its own, with zero month and day values * +// A year and month value, with a zero day, e.g. a credit card +// expiration date Related types are google.type.TimeOfDay and // `google.protobuf.Timestamp`. type Date struct { // Day: Day of month. Must be from 1 to 31 and valid for the year and - // month, or 0 - // if specifying a year by itself or a year and month where the day is - // not - // significant. + // month, or 0 if specifying a year by itself or a year and month where + // the day is not significant. Day int64 `json:"day,omitempty"` // Month: Month of year. Must be from 1 to 12, or 0 if specifying a year - // without a - // month and day. + // without a month and day. Month int64 `json:"month,omitempty"` // Year: Year of date. Must be from 1 to 9999, or 0 if specifying a date - // without - // a year. + // without a year. Year int64 `json:"year,omitempty"` // ForceSendFields is a list of field names (e.g. "Day") to @@ -385,17 +369,11 @@ func (s *Date) MarshalJSON() ([]byte, error) { } // Empty: A generic empty message that you can re-use to avoid defining -// duplicated -// empty messages in your APIs. A typical example is to use it as the -// request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. +// duplicated empty messages in your APIs. A typical example is to use +// it as the request or the response type of an API method. For +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } The JSON representation for `Empty` is +// empty JSON object `{}`. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -408,8 +386,7 @@ type ErrorLogEntry struct { ErrorDetails []string `json:"errorDetails,omitempty"` // Url: Required. A URL that refers to the target (a data source, a data - // sink, - // or an object) with which the error is associated. + // sink, or an object) with which the error is associated. Url string `json:"url,omitempty"` // ForceSendFields is a list of field names (e.g. "ErrorDetails") to @@ -436,175 +413,105 @@ func (s *ErrorLogEntry) MarshalJSON() ([]byte, error) { } // ErrorSummary: A summary of errors by error code, plus a count and -// sample error log -// entries. +// sample error log entries. type ErrorSummary struct { // ErrorCode: Required. // // Possible values: - // "OK" - Not an error; returned on success - // - // HTTP Mapping: 200 OK - // "CANCELLED" - The operation was cancelled, typically by the - // caller. - // + // "OK" - Not an error; returned on success HTTP Mapping: 200 OK + // "CANCELLED" - The operation was cancelled, typically by the caller. // HTTP Mapping: 499 Client Closed Request - // "UNKNOWN" - Unknown error. For example, this error may be returned - // when - // a `Status` value received from another address space belongs to - // an error space that is not known in this address space. Also - // errors raised by APIs that do not return enough error information - // may be converted to this error. - // - // HTTP Mapping: 500 Internal Server Error - // "INVALID_ARGUMENT" - The client specified an invalid argument. - // Note that this differs - // from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates - // arguments - // that are problematic regardless of the state of the system - // (e.g., a malformed file name). - // - // HTTP Mapping: 400 Bad Request + // "UNKNOWN" - Unknown error. For example, this error may be returned + // when a `Status` value received from another address space belongs to + // an error space that is not known in this address space. Also errors + // raised by APIs that do not return enough error information may be + // converted to this error. HTTP Mapping: 500 Internal Server Error + // "INVALID_ARGUMENT" - The client specified an invalid argument. Note + // that this differs from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` + // indicates arguments that are problematic regardless of the state of + // the system (e.g., a malformed file name). HTTP Mapping: 400 Bad + // Request // "DEADLINE_EXCEEDED" - The deadline expired before the operation - // could complete. For operations - // that change the state of the system, this error may be returned - // even if the operation has completed successfully. For example, - // a - // successful response from a server could have been delayed long - // enough for the deadline to expire. - // - // HTTP Mapping: 504 Gateway Timeout + // could complete. For operations that change the state of the system, + // this error may be returned even if the operation has completed + // successfully. For example, a successful response from a server could + // have been delayed long enough for the deadline to expire. HTTP + // Mapping: 504 Gateway Timeout // "NOT_FOUND" - Some requested entity (e.g., file or directory) was - // not found. - // - // Note to server developers: if a request is denied for an entire - // class - // of users, such as gradual feature rollout or undocumented - // whitelist, - // `NOT_FOUND` may be used. If a request is denied for some users - // within - // a class of users, such as user-based access control, - // `PERMISSION_DENIED` - // must be used. - // - // HTTP Mapping: 404 Not Found + // not found. Note to server developers: if a request is denied for an + // entire class of users, such as gradual feature rollout or + // undocumented allowlist, `NOT_FOUND` may be used. If a request is + // denied for some users within a class of users, such as user-based + // access control, `PERMISSION_DENIED` must be used. HTTP Mapping: 404 + // Not Found // "ALREADY_EXISTS" - The entity that a client attempted to create - // (e.g., file or directory) - // already exists. - // - // HTTP Mapping: 409 Conflict + // (e.g., file or directory) already exists. HTTP Mapping: 409 Conflict // "PERMISSION_DENIED" - The caller does not have permission to - // execute the specified - // operation. `PERMISSION_DENIED` must not be used for rejections - // caused by exhausting some resource (use `RESOURCE_EXHAUSTED` - // instead for those errors). `PERMISSION_DENIED` must not be - // used if the caller can not be identified (use - // `UNAUTHENTICATED` - // instead for those errors). This error code does not imply the - // request is valid or the requested entity exists or satisfies - // other pre-conditions. - // - // HTTP Mapping: 403 Forbidden + // execute the specified operation. `PERMISSION_DENIED` must not be used + // for rejections caused by exhausting some resource (use + // `RESOURCE_EXHAUSTED` instead for those errors). `PERMISSION_DENIED` + // must not be used if the caller can not be identified (use + // `UNAUTHENTICATED` instead for those errors). This error code does not + // imply the request is valid or the requested entity exists or + // satisfies other pre-conditions. HTTP Mapping: 403 Forbidden // "UNAUTHENTICATED" - The request does not have valid authentication - // credentials for the - // operation. - // - // HTTP Mapping: 401 Unauthorized + // credentials for the operation. HTTP Mapping: 401 Unauthorized // "RESOURCE_EXHAUSTED" - Some resource has been exhausted, perhaps a - // per-user quota, or - // perhaps the entire file system is out of space. - // + // per-user quota, or perhaps the entire file system is out of space. // HTTP Mapping: 429 Too Many Requests // "FAILED_PRECONDITION" - The operation was rejected because the - // system is not in a state - // required for the operation's execution. For example, the - // directory - // to be deleted is non-empty, an rmdir operation is applied to - // a non-directory, etc. - // - // Service implementors can use the following guidelines to - // decide - // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`: - // (a) Use `UNAVAILABLE` if the client can retry just the failing - // call. - // (b) Use `ABORTED` if the client should retry at a higher level - // (e.g., when a client-specified test-and-set fails, indicating - // the - // client should restart a read-modify-write sequence). - // (c) Use `FAILED_PRECONDITION` if the client should not retry until - // the system state has been explicitly fixed. E.g., if an - // "rmdir" - // fails because the directory is non-empty, `FAILED_PRECONDITION` - // should be returned since the client should not retry unless - // the files are deleted from the directory. - // - // HTTP Mapping: 400 Bad Request + // system is not in a state required for the operation's execution. For + // example, the directory to be deleted is non-empty, an rmdir operation + // is applied to a non-directory, etc. Service implementors can use the + // following guidelines to decide between `FAILED_PRECONDITION`, + // `ABORTED`, and `UNAVAILABLE`: (a) Use `UNAVAILABLE` if the client can + // retry just the failing call. (b) Use `ABORTED` if the client should + // retry at a higher level (e.g., when a client-specified test-and-set + // fails, indicating the client should restart a read-modify-write + // sequence). (c) Use `FAILED_PRECONDITION` if the client should not + // retry until the system state has been explicitly fixed. E.g., if an + // "rmdir" fails because the directory is non-empty, + // `FAILED_PRECONDITION` should be returned since the client should not + // retry unless the files are deleted from the directory. HTTP Mapping: + // 400 Bad Request // "ABORTED" - The operation was aborted, typically due to a - // concurrency issue such as - // a sequencer check failure or transaction abort. - // - // See the guidelines above for deciding between - // `FAILED_PRECONDITION`, - // `ABORTED`, and `UNAVAILABLE`. - // - // HTTP Mapping: 409 Conflict + // concurrency issue such as a sequencer check failure or transaction + // abort. See the guidelines above for deciding between + // `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`. HTTP Mapping: + // 409 Conflict // "OUT_OF_RANGE" - The operation was attempted past the valid range. - // E.g., seeking or - // reading past end-of-file. - // - // Unlike `INVALID_ARGUMENT`, this error indicates a problem that may - // be fixed if the system state changes. For example, a 32-bit - // file - // system will generate `INVALID_ARGUMENT` if asked to read at an - // offset that is not in the range [0,2^32-1], but it will - // generate - // `OUT_OF_RANGE` if asked to read from an offset past the current - // file size. - // - // There is a fair bit of overlap between `FAILED_PRECONDITION` - // and - // `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more - // specific - // error) when it applies so that callers who are iterating through - // a space can easily look for an `OUT_OF_RANGE` error to detect - // when - // they are done. - // - // HTTP Mapping: 400 Bad Request + // E.g., seeking or reading past end-of-file. Unlike `INVALID_ARGUMENT`, + // this error indicates a problem that may be fixed if the system state + // changes. For example, a 32-bit file system will generate + // `INVALID_ARGUMENT` if asked to read at an offset that is not in the + // range [0,2^32-1], but it will generate `OUT_OF_RANGE` if asked to + // read from an offset past the current file size. There is a fair bit + // of overlap between `FAILED_PRECONDITION` and `OUT_OF_RANGE`. We + // recommend using `OUT_OF_RANGE` (the more specific error) when it + // applies so that callers who are iterating through a space can easily + // look for an `OUT_OF_RANGE` error to detect when they are done. HTTP + // Mapping: 400 Bad Request // "UNIMPLEMENTED" - The operation is not implemented or is not - // supported/enabled in this - // service. - // - // HTTP Mapping: 501 Not Implemented - // "INTERNAL" - Internal errors. This means that some invariants - // expected by the - // underlying system have been broken. This error code is reserved - // for serious errors. - // - // HTTP Mapping: 500 Internal Server Error - // "UNAVAILABLE" - The service is currently unavailable. This is most - // likely a - // transient condition, which can be corrected by retrying with - // a backoff. Note that it is not always safe to retry - // non-idempotent operations. - // - // See the guidelines above for deciding between - // `FAILED_PRECONDITION`, - // `ABORTED`, and `UNAVAILABLE`. - // - // HTTP Mapping: 503 Service Unavailable - // "DATA_LOSS" - Unrecoverable data loss or corruption. - // - // HTTP Mapping: 500 Internal Server Error + // supported/enabled in this service. HTTP Mapping: 501 Not Implemented + // "INTERNAL" - Internal errors. This means that some invariants + // expected by the underlying system have been broken. This error code + // is reserved for serious errors. HTTP Mapping: 500 Internal Server + // Error + // "UNAVAILABLE" - The service is currently unavailable. This is most + // likely a transient condition, which can be corrected by retrying with + // a backoff. Note that it is not always safe to retry non-idempotent + // operations. See the guidelines above for deciding between + // `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`. HTTP Mapping: + // 503 Service Unavailable + // "DATA_LOSS" - Unrecoverable data loss or corruption. HTTP Mapping: + // 500 Internal Server Error ErrorCode string `json:"errorCode,omitempty"` // ErrorCount: Required. Count of this type of error. ErrorCount int64 `json:"errorCount,omitempty,string"` - // ErrorLogEntries: Error samples. - // - // At most 5 error log entries will be recorded for a given - // error code for a single transfer operation. + // ErrorLogEntries: Error samples. At most 5 error log entries will be + // recorded for a given error code for a single transfer operation. ErrorLogEntries []*ErrorLogEntry `json:"errorLogEntries,omitempty"` // ForceSendFields is a list of field names (e.g. "ErrorCode") to @@ -631,18 +538,13 @@ func (s *ErrorSummary) MarshalJSON() ([]byte, error) { } // GcsData: In a GcsData resource, an object's name is the Cloud Storage -// object's -// name and its "last modification time" refers to the object's -// `updated` -// property of Cloud Storage objects, which changes when the content or -// the -// metadata of the object is updated. +// object's name and its "last modification time" refers to the object's +// `updated` property of Cloud Storage objects, which changes when the +// content or the metadata of the object is updated. type GcsData struct { - // BucketName: Required. Cloud Storage bucket name (see - // [Bucket - // Name - // Requirements](https://cloud.google.com/storage/docs/naming#requir - // ements)). + // BucketName: Required. Cloud Storage bucket name (see [Bucket Name + // Requirements](https://cloud.google.com/storage/docs/naming#requirement + // s)). BucketName string `json:"bucketName,omitempty"` // ForceSendFields is a list of field names (e.g. "BucketName") to @@ -701,73 +603,36 @@ func (s *GoogleServiceAccount) MarshalJSON() ([]byte, error) { } // HttpData: An HttpData resource specifies a list of objects on the web -// to be transferred -// over HTTP. The information of the objects to be transferred is -// contained in -// a file referenced by a URL. The first line in the file must -// be -// "TsvHttpData-1.0", which specifies the format of the file. -// Subsequent -// lines specify the information of the list of objects, one object per -// list -// entry. Each entry has the following tab-delimited fields: -// -// * **HTTP URL** — The location of the object. -// -// * **Length** — The size of the object in bytes. -// -// * **MD5** — The base64-encoded MD5 hash of the object. -// -// For an example of a valid TSV file, see -// [Transferring data -// from -// URLs](https://cloud.google.com/storage-transfer/docs/create-url-l -// ist). -// -// When transferring data based on a URL list, keep the following in -// mind: -// -// * When an object located at `http(s)://hostname:port/` -// is +// to be transferred over HTTP. The information of the objects to be +// transferred is contained in a file referenced by a URL. The first +// line in the file must be "TsvHttpData-1.0", which specifies the +// format of the file. Subsequent lines specify the information of the +// list of objects, one object per list entry. Each entry has the +// following tab-delimited fields: * **HTTP URL** — The location of +// the object. * **Length** — The size of the object in bytes. * +// **MD5** — The base64-encoded MD5 hash of the object. For an example +// of a valid TSV file, see [Transferring data from +// URLs](https://cloud.google.com/storage-transfer/docs/create-url-list). +// When transferring data based on a URL list, keep the following in +// mind: * When an object located at `http(s)://hostname:port/` is // transferred to a data sink, the name of the object at the data sink -// is -// `/`. -// -// * If the specified size of an object does not match the actual size -// of the -// object fetched, the object will not be transferred. -// -// * If the specified MD5 does not match the MD5 computed from the -// transferred -// bytes, the object transfer will fail. For more information, -// see -// [Generating -// MD5 -// hashes](https://cloud.google.com/storage-transfer/docs/create-url- -// list#md5) -// -// * Ensure that each URL you specify is publicly accessible. -// For -// example, in Cloud Storage you can -// [share an object -// publicly] -// (https://cloud.google.com/storage/docs/cloud-console#_sharin -// gdata) and get -// a link to it. -// -// * Storage Transfer Service obeys `robots.txt` rules and requires the -// source -// HTTP server to support `Range` requests and to return a -// `Content-Length` -// header in each response. -// -// * ObjectConditions have no effect when filtering objects to transfer. +// is `/`. * If the specified size of an object does not match the +// actual size of the object fetched, the object will not be +// transferred. * If the specified MD5 does not match the MD5 computed +// from the transferred bytes, the object transfer will fail. For more +// information, see [Generating MD5 +// hashes](https://cloud.google.com/storage-transfer/docs/create-url-list +// #md5) * Ensure that each URL you specify is publicly accessible. For +// example, in Cloud Storage you can [share an object publicly] +// (https://cloud.google.com/storage/docs/cloud-console#_sharingdata) +// and get a link to it. * Storage Transfer Service obeys `robots.txt` +// rules and requires the source HTTP server to support `Range` requests +// and to return a `Content-Length` header in each response. * +// ObjectConditions have no effect when filtering objects to transfer. type HttpData struct { // ListUrl: Required. The URL that points to the file that stores the - // object list - // entries. This file must allow public access. Currently, only URLs - // with - // HTTP and HTTPS schemes are supported. + // object list entries. This file must allow public access. Currently, + // only URLs with HTTP and HTTPS schemes are supported. ListUrl string `json:"listUrl,omitempty"` // ForceSendFields is a list of field names (e.g. "ListUrl") to @@ -866,41 +731,28 @@ func (s *ListTransferJobsResponse) MarshalJSON() ([]byte, error) { } // NotificationConfig: Specification to configure notifications -// published to Cloud Pub/Sub. -// Notifications will be published to the customer-provided topic using -// the -// following `PubsubMessage.attributes`: -// -// * "eventType": one of the EventType values -// * "payloadFormat": one of the PayloadFormat values -// * "projectId": the project_id of the -// `TransferOperation` -// * "transferJobName": the -// transfer_job_name of the -// `TransferOperation` -// * "transferOperationName": the name of the -// `TransferOperation` -// -// The `PubsubMessage.data` will contain a TransferOperation -// resource +// published to Cloud Pub/Sub. Notifications will be published to the +// customer-provided topic using the following +// `PubsubMessage.attributes`: * "eventType": one of the EventType +// values * "payloadFormat": one of the PayloadFormat values * +// "projectId": the project_id of the `TransferOperation` * +// "transferJobName": the transfer_job_name of the `TransferOperation` +// * "transferOperationName": the name of the `TransferOperation` The +// `PubsubMessage.data` will contain a TransferOperation resource // formatted according to the specified `PayloadFormat`. type NotificationConfig struct { // EventTypes: Event types for which a notification is desired. If - // empty, send - // notifications for all event types. + // empty, send notifications for all event types. // // Possible values: // "EVENT_TYPE_UNSPECIFIED" - Illegal value, to avoid allowing a // default. // "TRANSFER_OPERATION_SUCCESS" - `TransferOperation` completed with - // status - // SUCCESS. + // status SUCCESS. // "TRANSFER_OPERATION_FAILED" - `TransferOperation` completed with - // status - // FAILED. + // status FAILED. // "TRANSFER_OPERATION_ABORTED" - `TransferOperation` completed with - // status - // ABORTED. + // status ABORTED. EventTypes []string `json:"eventTypes,omitempty"` // PayloadFormat: Required. The desired format of the notification @@ -910,19 +762,15 @@ type NotificationConfig struct { // "PAYLOAD_FORMAT_UNSPECIFIED" - Illegal value, to avoid allowing a // default. // "NONE" - No payload is included with the notification. - // "JSON" - `TransferOperation` is [formatted as a - // JSON - // response](https://developers.google.com/protocol-buffers/docs/pro - // to3#json), - // in application/json. + // "JSON" - `TransferOperation` is [formatted as a JSON + // response](https://developers.google.com/protocol-buffers/docs/proto3#j + // son), in application/json. PayloadFormat string `json:"payloadFormat,omitempty"` // PubsubTopic: Required. The `Topic.name` of the Cloud Pub/Sub topic to - // which to publish - // notifications. Must be of the format: - // `projects/{project}/topics/{topic}`. - // Not matching this format will result in an - // INVALID_ARGUMENT error. + // which to publish notifications. Must be of the format: + // `projects/{project}/topics/{topic}`. Not matching this format will + // result in an INVALID_ARGUMENT error. PubsubTopic string `json:"pubsubTopic,omitempty"` // ForceSendFields is a list of field names (e.g. "EventTypes") to @@ -949,121 +797,72 @@ func (s *NotificationConfig) MarshalJSON() ([]byte, error) { } // ObjectConditions: Conditions that determine which objects will be -// transferred. Applies only -// to S3 and Cloud Storage objects. -// -// The "last modification time" refers to the time of the -// last change to the object's content or metadata — specifically, -// this is -// the `updated` property of Cloud Storage objects and the -// `LastModified` -// field of S3 objects. +// transferred. Applies only to Cloud Data Sources such as S3, Azure, +// and Cloud Storage. The "last modification time" refers to the time of +// the last change to the object's content or metadata — specifically, +// this is the `updated` property of Cloud Storage objects, the +// `LastModified` field of S3 objects, and the `Last-Modified` header of +// Azure blobs. type ObjectConditions struct { // ExcludePrefixes: `exclude_prefixes` must follow the requirements - // described for - // include_prefixes. - // - // The max size of `exclude_prefixes` is 1000. + // described for include_prefixes. The max size of `exclude_prefixes` is + // 1000. ExcludePrefixes []string `json:"excludePrefixes,omitempty"` // IncludePrefixes: If `include_prefixes` is specified, objects that - // satisfy the object - // conditions must have names that start with one of the - // `include_prefixes` - // and that do not start with any of the exclude_prefixes. - // If - // `include_prefixes` is not specified, all objects except those that - // have - // names starting with one of the `exclude_prefixes` must satisfy the - // object - // conditions. - // - // Requirements: - // - // * Each include-prefix and exclude-prefix can contain any sequence - // of - // Unicode characters, to a max length of 1024 bytes when - // UTF8-encoded, - // and must not contain Carriage Return or Line Feed characters. - // Wildcard - // matching and regular expression matching are not supported. - // - // * Each include-prefix and exclude-prefix must omit the leading - // slash. - // For example, to include the `requests.gz` object in a transfer - // from - // `s3://my-aws-bucket/logs/y=2015/requests.gz`, specify the - // include - // prefix as `logs/y=2015/requests.gz`. - // - // * None of the include-prefix or the exclude-prefix values can be - // empty, - // if specified. - // - // * Each include-prefix must include a distinct portion of the - // object - // namespace. No include-prefix may be a prefix of another - // include-prefix. - // - // * Each exclude-prefix must exclude a distinct portion of the - // object - // namespace. No exclude-prefix may be a prefix of another - // exclude-prefix. - // - // * If `include_prefixes` is specified, then each exclude-prefix must - // start - // with the value of a path explicitly included by - // `include_prefixes`. - // - // The max size of `include_prefixes` is 1000. + // satisfy the object conditions must have names that start with one of + // the `include_prefixes` and that do not start with any of the + // exclude_prefixes. If `include_prefixes` is not specified, all objects + // except those that have names starting with one of the + // `exclude_prefixes` must satisfy the object conditions. Requirements: + // * Each include-prefix and exclude-prefix can contain any sequence of + // Unicode characters, to a max length of 1024 bytes when UTF8-encoded, + // and must not contain Carriage Return or Line Feed characters. + // Wildcard matching and regular expression matching are not supported. + // * Each include-prefix and exclude-prefix must omit the leading slash. + // For example, to include the `requests.gz` object in a transfer from + // `s3://my-aws-bucket/logs/y=2015/requests.gz`, specify the include + // prefix as `logs/y=2015/requests.gz`. * None of the include-prefix or + // the exclude-prefix values can be empty, if specified. * Each + // include-prefix must include a distinct portion of the object + // namespace. No include-prefix may be a prefix of another + // include-prefix. * Each exclude-prefix must exclude a distinct portion + // of the object namespace. No exclude-prefix may be a prefix of another + // exclude-prefix. * If `include_prefixes` is specified, then each + // exclude-prefix must start with the value of a path explicitly + // included by `include_prefixes`. The max size of `include_prefixes` is + // 1000. IncludePrefixes []string `json:"includePrefixes,omitempty"` // LastModifiedBefore: If specified, only objects with a "last - // modification time" before this - // timestamp and objects that don't have a "last modification time" will - // be - // transferred. + // modification time" before this timestamp and objects that don't have + // a "last modification time" will be transferred. LastModifiedBefore string `json:"lastModifiedBefore,omitempty"` // LastModifiedSince: If specified, only objects with a "last - // modification time" on or after - // this timestamp and objects that don't have a "last modification time" - // are - // transferred. - // - // The `last_modified_since` and `last_modified_before` fields can be - // used + // modification time" on or after this timestamp and objects that don't + // have a "last modification time" are transferred. The + // `last_modified_since` and `last_modified_before` fields can be used // together for chunked data processing. For example, consider a script - // that - // processes each day's worth of data at a time. For that you'd set - // each - // of the fields as follows: - // - // * `last_modified_since` to the start of the day - // - // * `last_modified_before` to the end of the day + // that processes each day's worth of data at a time. For that you'd set + // each of the fields as follows: * `last_modified_since` to the start + // of the day * `last_modified_before` to the end of the day LastModifiedSince string `json:"lastModifiedSince,omitempty"` // MaxTimeElapsedSinceLastModification: If specified, only objects with - // a "last modification time" on or after - // `NOW` - `max_time_elapsed_since_last_modification` and objects that - // don't - // have a "last modification time" are transferred. - // - // For each TransferOperation started by this TransferJob, - // `NOW` refers to the start_time of the - // `TransferOperation`. + // a "last modification time" on or after `NOW` - + // `max_time_elapsed_since_last_modification` and objects that don't + // have a "last modification time" are transferred. For each + // TransferOperation started by this TransferJob, `NOW` refers to the + // start_time of the `TransferOperation`. MaxTimeElapsedSinceLastModification string `json:"maxTimeElapsedSinceLastModification,omitempty"` // MinTimeElapsedSinceLastModification: If specified, only objects with - // a "last modification time" before - // `NOW` - `min_time_elapsed_since_last_modification` and objects that - // don't - // have a "last modification time" are transferred. - // - // For each TransferOperation started by this TransferJob, `NOW` - // refers to the start_time of the - // `TransferOperation`. + // a "last modification time" before `NOW` - + // `min_time_elapsed_since_last_modification` and objects that don't + // have a "last modification time" are transferred. For each + // TransferOperation started by this TransferJob, `NOW` refers to the + // start_time of the `TransferOperation`. MinTimeElapsedSinceLastModification string `json:"minTimeElapsedSinceLastModification,omitempty"` // ForceSendFields is a list of field names (e.g. "ExcludePrefixes") to @@ -1091,21 +890,19 @@ func (s *ObjectConditions) MarshalJSON() ([]byte, error) { } // Operation: This resource represents a long-running operation that is -// the result of a -// network API call. +// the result of a network API call. type Operation struct { // Done: If the value is `false`, it means the operation is still in - // progress. - // If `true`, the operation is completed, and either `error` or - // `response` is - // available. + // progress. If `true`, the operation is completed, and either `error` + // or `response` is available. Done bool `json:"done,omitempty"` // Error: The error result of the operation in case of failure or // cancellation. Error *Status `json:"error,omitempty"` - // Metadata: Represents the transfer operation object. + // Metadata: Represents the transfer operation object. To request a + // TransferOperation object, use transferOperations.get. Metadata googleapi.RawMessage `json:"metadata,omitempty"` // Name: The server-assigned name, which is only unique within the same @@ -1114,21 +911,14 @@ type Operation struct { // `transferOperations/some/unique/name`. Name string `json:"name,omitempty"` - // Response: The normal response of the operation in case of success. - // If the original - // method returns no data on success, such as `Delete`, the response - // is - // `google.protobuf.Empty`. If the original method is - // standard - // `Get`/`Create`/`Update`, the response should be the resource. For - // other - // methods, the response should have the type `XxxResponse`, where - // `Xxx` - // is the original method name. For example, if the original method - // name - // is `TakeSnapshot()`, the inferred response type - // is - // `TakeSnapshotResponse`. + // Response: The normal response of the operation in case of success. If + // the original method returns no data on success, such as `Delete`, the + // response is `google.protobuf.Empty`. If the original method is + // standard `Get`/`Create`/`Update`, the response should be the + // resource. For other methods, the response should have the type + // `XxxResponse`, where `Xxx` is the original method name. For example, + // if the original method name is `TakeSnapshot()`, the inferred + // response type is `TakeSnapshotResponse`. Response googleapi.RawMessage `json:"response,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1171,59 +961,36 @@ type ResumeTransferOperationRequest struct { // Schedule: Transfers can be scheduled to recur or to run just once. type Schedule struct { // ScheduleEndDate: The last day a transfer runs. Date boundaries are - // determined relative to - // UTC time. A job will run once per 24 hours within the following - // guidelines: - // - // * If `schedule_end_date` and schedule_start_date are the same and - // in - // the future relative to UTC, the transfer is executed only one - // time. - // * If `schedule_end_date` is later than `schedule_start_date` and - // `schedule_end_date` is in the future relative to UTC, the job - // will - // run each day at start_time_of_day through `schedule_end_date`. + // determined relative to UTC time. A job will run once per 24 hours + // within the following guidelines: * If `schedule_end_date` and + // schedule_start_date are the same and in the future relative to UTC, + // the transfer is executed only one time. * If `schedule_end_date` is + // later than `schedule_start_date` and `schedule_end_date` is in the + // future relative to UTC, the job will run each day at + // start_time_of_day through `schedule_end_date`. ScheduleEndDate *Date `json:"scheduleEndDate,omitempty"` // ScheduleStartDate: Required. The start date of a transfer. Date - // boundaries are determined - // relative to UTC time. If `schedule_start_date` and - // start_time_of_day - // are in the past relative to the job's creation time, the transfer - // starts - // the day after you schedule the transfer request. - // - // **Note:** When starting jobs at or near midnight UTC it is possible - // that - // a job will start later than expected. For example, if you send an - // outbound - // request on June 1 one millisecond prior to midnight UTC and the - // Storage - // Transfer Service server receives the request on June 2, then it will - // create - // a TransferJob with `schedule_start_date` set to June 2 and - // a - // `start_time_of_day` set to midnight UTC. The first - // scheduled + // boundaries are determined relative to UTC time. If + // `schedule_start_date` and start_time_of_day are in the past relative + // to the job's creation time, the transfer starts the day after you + // schedule the transfer request. **Note:** When starting jobs at or + // near midnight UTC it is possible that a job will start later than + // expected. For example, if you send an outbound request on June 1 one + // millisecond prior to midnight UTC and the Storage Transfer Service + // server receives the request on June 2, then it will create a + // TransferJob with `schedule_start_date` set to June 2 and a + // `start_time_of_day` set to midnight UTC. The first scheduled // TransferOperation will take place on June 3 at midnight UTC. ScheduleStartDate *Date `json:"scheduleStartDate,omitempty"` // StartTimeOfDay: The time in UTC that a transfer job is scheduled to - // run. Transfers may - // start later than this time. - // - // If `start_time_of_day` is not specified: - // - // * One-time transfers run immediately. - // * Recurring transfers run immediately, and each day at midnight - // UTC, - // through schedule_end_date. - // - // If `start_time_of_day` is specified: - // - // * One-time transfers run at the specified time. - // * Recurring transfers run at the specified time each day, through - // `schedule_end_date`. + // run. Transfers may start later than this time. If `start_time_of_day` + // is not specified: * One-time transfers run immediately. * Recurring + // transfers run immediately, and each day at midnight UTC, through + // schedule_end_date. If `start_time_of_day` is specified: * One-time + // transfers run at the specified time. * Recurring transfers run at the + // specified time each day, through `schedule_end_date`. StartTimeOfDay *TimeOfDay `json:"startTimeOfDay,omitempty"` // ForceSendFields is a list of field names (e.g. "ScheduleEndDate") to @@ -1251,32 +1018,24 @@ func (s *Schedule) MarshalJSON() ([]byte, error) { } // Status: The `Status` type defines a logical error model that is -// suitable for -// different programming environments, including REST APIs and RPC APIs. -// It is -// used by [gRPC](https://github.com/grpc). Each `Status` message -// contains -// three pieces of data: error code, error message, and error -// details. -// -// You can find out more about this error model and how to work with it -// in the -// [API Design Guide](https://cloud.google.com/apis/design/errors). +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each +// `Status` message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the [API Design +// Guide](https://cloud.google.com/apis/design/errors). type Status struct { // Code: The status code, which should be an enum value of // google.rpc.Code. Code int64 `json:"code,omitempty"` - // Details: A list of messages that carry the error details. There is a - // common set of - // message types for APIs to use. + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. Details []googleapi.RawMessage `json:"details,omitempty"` // Message: A developer-facing error message, which should be in - // English. Any - // user-facing error message should be localized and sent in - // the - // google.rpc.Status.details field, or localized by the client. + // English. Any user-facing error message should be localized and sent + // in the google.rpc.Status.details field, or localized by the client. Message string `json:"message,omitempty"` // ForceSendFields is a list of field names (e.g. "Code") to @@ -1303,15 +1062,13 @@ func (s *Status) MarshalJSON() ([]byte, error) { } // TimeOfDay: Represents a time of day. The date and time zone are -// either not significant -// or are specified elsewhere. An API may choose to allow leap seconds. -// Related -// types are google.type.Date and `google.protobuf.Timestamp`. +// either not significant or are specified elsewhere. An API may choose +// to allow leap seconds. Related types are google.type.Date and +// `google.protobuf.Timestamp`. type TimeOfDay struct { // Hours: Hours of day in 24 hour format. Should be from 0 to 23. An API - // may choose - // to allow the value "24:00:00" for scenarios like business closing - // time. + // may choose to allow the value "24:00:00" for scenarios like business + // closing time. Hours int64 `json:"hours,omitempty"` // Minutes: Minutes of hour of day. Must be from 0 to 59. @@ -1322,8 +1079,7 @@ type TimeOfDay struct { Nanos int64 `json:"nanos,omitempty"` // Seconds: Seconds of minutes of the time. Must normally be from 0 to - // 59. An API may - // allow the value 60 if it allows leap-seconds. + // 59. An API may allow the value 60 if it allows leap-seconds. Seconds int64 `json:"seconds,omitempty"` // ForceSendFields is a list of field names (e.g. "Hours") to @@ -1366,10 +1122,8 @@ type TransferCounters struct { BytesFailedToDeleteFromSink int64 `json:"bytesFailedToDeleteFromSink,omitempty,string"` // BytesFoundFromSource: Bytes found in the data source that are - // scheduled to be transferred, - // excluding any that are filtered based on object conditions or skipped - // due - // to sync. + // scheduled to be transferred, excluding any that are filtered based on + // object conditions or skipped due to sync. BytesFoundFromSource int64 `json:"bytesFoundFromSource,omitempty,string"` // BytesFoundOnlyFromSink: Bytes found only in the data sink that are @@ -1377,13 +1131,11 @@ type TransferCounters struct { BytesFoundOnlyFromSink int64 `json:"bytesFoundOnlyFromSink,omitempty,string"` // BytesFromSourceFailed: Bytes in the data source that failed to be - // transferred or that failed to - // be deleted after being transferred. + // transferred or that failed to be deleted after being transferred. BytesFromSourceFailed int64 `json:"bytesFromSourceFailed,omitempty,string"` // BytesFromSourceSkippedBySync: Bytes in the data source that are not - // transferred because they already - // exist in the data sink. + // transferred because they already exist in the data sink. BytesFromSourceSkippedBySync int64 `json:"bytesFromSourceSkippedBySync,omitempty,string"` // ObjectsCopiedToSink: Objects that are copied to the data sink. @@ -1401,10 +1153,8 @@ type TransferCounters struct { ObjectsFailedToDeleteFromSink int64 `json:"objectsFailedToDeleteFromSink,omitempty,string"` // ObjectsFoundFromSource: Objects found in the data source that are - // scheduled to be transferred, - // excluding any that are filtered based on object conditions or skipped - // due - // to sync. + // scheduled to be transferred, excluding any that are filtered based on + // object conditions or skipped due to sync. ObjectsFoundFromSource int64 `json:"objectsFoundFromSource,omitempty,string"` // ObjectsFoundOnlyFromSink: Objects found only in the data sink that @@ -1412,13 +1162,11 @@ type TransferCounters struct { ObjectsFoundOnlyFromSink int64 `json:"objectsFoundOnlyFromSink,omitempty,string"` // ObjectsFromSourceFailed: Objects in the data source that failed to be - // transferred or that failed - // to be deleted after being transferred. + // transferred or that failed to be deleted after being transferred. ObjectsFromSourceFailed int64 `json:"objectsFromSourceFailed,omitempty,string"` // ObjectsFromSourceSkippedBySync: Objects in the data source that are - // not transferred because they already - // exist in the data sink. + // not transferred because they already exist in the data sink. ObjectsFromSourceSkippedBySync int64 `json:"objectsFromSourceSkippedBySync,omitempty,string"` // ForceSendFields is a list of field names (e.g. "BytesCopiedToSink") @@ -1446,8 +1194,7 @@ func (s *TransferCounters) MarshalJSON() ([]byte, error) { } // TransferJob: This resource represents the configuration of a transfer -// job that runs -// periodically. +// job that runs periodically. type TransferJob struct { // CreationTime: Output only. The time that the transfer job was // created. @@ -1458,8 +1205,7 @@ type TransferJob struct { DeletionTime string `json:"deletionTime,omitempty"` // Description: A description provided by the user for the job. Its max - // length is 1024 - // bytes when Unicode-encoded. + // length is 1024 bytes when Unicode-encoded. Description string `json:"description,omitempty"` // LastModificationTime: Output only. The time that the transfer job was @@ -1467,24 +1213,15 @@ type TransferJob struct { LastModificationTime string `json:"lastModificationTime,omitempty"` // Name: A unique name (within the transfer project) assigned when the - // job is - // created. If this field is empty in a CreateTransferJobRequest, - // Storage - // Transfer Service will assign a unique name. Otherwise, the specified - // name - // is used as the unique name for this job. - // - // If the specified name is in use by a job, the creation request fails - // with - // an ALREADY_EXISTS error. - // - // This name must start with "transferJobs/" prefix and end with a - // letter or - // a number, and should be no more than 128 characters. - // Example: "transferJobs/[A-Za-z0-9-._~]*[A-Za-z0-9]$" - // - // Invalid job names will fail with an - // INVALID_ARGUMENT error. + // job is created. If this field is empty in a CreateTransferJobRequest, + // Storage Transfer Service will assign a unique name. Otherwise, the + // specified name is used as the unique name for this job. If the + // specified name is in use by a job, the creation request fails with an + // ALREADY_EXISTS error. This name must start with "transferJobs/" + // prefix and end with a letter or a number, and should be no more than + // 128 characters. Example: + // "transferJobs/[A-Za-z0-9-._~]*[A-Za-z0-9]$" Invalid job names will + // fail with an INVALID_ARGUMENT error. Name string `json:"name,omitempty"` // NotificationConfig: Notification configuration. @@ -1497,28 +1234,21 @@ type TransferJob struct { // Schedule: Schedule specification. Schedule *Schedule `json:"schedule,omitempty"` - // Status: Status of the job. This value MUST be specified - // for - // `CreateTransferJobRequests`. - // - // **Note:** The effect of the new job status takes place during a - // subsequent - // job run. For example, if you change the job status from - // ENABLED to DISABLED, and an operation + // Status: Status of the job. This value MUST be specified for + // `CreateTransferJobRequests`. **Note:** The effect of the new job + // status takes place during a subsequent job run. For example, if you + // change the job status from ENABLED to DISABLED, and an operation // spawned by the transfer is running, the status change would not - // affect the - // current operation. + // affect the current operation. // // Possible values: // "STATUS_UNSPECIFIED" - Zero is an illegal value. // "ENABLED" - New transfers will be performed based on the schedule. // "DISABLED" - New transfers will not be scheduled. // "DELETED" - This is a soft delete state. After a transfer job is - // set to this - // state, the job and all the transfer executions are subject to - // garbage collection. Transfer jobs become eligible for garbage - // collection - // 30 days after their status is set to `DELETED`. + // set to this state, the job and all the transfer executions are + // subject to garbage collection. Transfer jobs become eligible for + // garbage collection 30 days after their status is set to `DELETED`. Status string `json:"status,omitempty"` // TransferSpec: Transfer specification. @@ -1619,25 +1349,18 @@ func (s *TransferOperation) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TransferOptions: TransferOptions uses three boolean parameters to -// define the actions -// to be performed on objects in a transfer. +// TransferOptions: TransferOptions define the actions to be performed +// on objects in a transfer. type TransferOptions struct { // DeleteObjectsFromSourceAfterTransfer: Whether objects should be - // deleted from the source after they are - // transferred to the sink. - // - // **Note:** This option and delete_objects_unique_in_sink are - // mutually + // deleted from the source after they are transferred to the sink. + // **Note:** This option and delete_objects_unique_in_sink are mutually // exclusive. DeleteObjectsFromSourceAfterTransfer bool `json:"deleteObjectsFromSourceAfterTransfer,omitempty"` // DeleteObjectsUniqueInSink: Whether objects that exist only in the - // sink should be deleted. - // - // **Note:** This option and delete_objects_from_source_after_transfer - // are - // mutually exclusive. + // sink should be deleted. **Note:** This option and + // delete_objects_from_source_after_transfer are mutually exclusive. DeleteObjectsUniqueInSink bool `json:"deleteObjectsUniqueInSink,omitempty"` // OverwriteObjectsAlreadyExistingInSink: Whether overwriting objects @@ -1687,18 +1410,15 @@ type TransferSpec struct { HttpDataSource *HttpData `json:"httpDataSource,omitempty"` // ObjectConditions: Only objects that satisfy these object conditions - // are included in the set - // of data source and data sink objects. Object conditions based - // on - // objects' "last modification time" do not exclude objects in a data - // sink. + // are included in the set of data source and data sink objects. Object + // conditions based on objects' "last modification time" do not exclude + // objects in a data sink. ObjectConditions *ObjectConditions `json:"objectConditions,omitempty"` - // TransferOptions: If the option - // delete_objects_unique_in_sink - // is `true`, object conditions based on objects' "last modification - // time" are - // ignored and do not exclude objects in a data source or a data sink. + // TransferOptions: If the option delete_objects_unique_in_sink is + // `true` and time-based object conditions such as 'last modification + // time' are specified, the request fails with an INVALID_ARGUMENT + // error. TransferOptions *TransferOptions `json:"transferOptions,omitempty"` // ForceSendFields is a list of field names (e.g. "AwsS3DataSource") to @@ -1728,33 +1448,24 @@ func (s *TransferSpec) MarshalJSON() ([]byte, error) { // UpdateTransferJobRequest: Request passed to UpdateTransferJob. type UpdateTransferJobRequest struct { // ProjectId: Required. The ID of the Google Cloud Platform Console - // project that owns the - // job. + // project that owns the job. ProjectId string `json:"projectId,omitempty"` // TransferJob: Required. The job to update. `transferJob` is expected - // to specify only - // four fields: description, - // transfer_spec, - // notification_config, and - // status. An `UpdateTransferJobRequest` that specifies - // other fields will be rejected with the error - // INVALID_ARGUMENT. + // to specify only four fields: description, transfer_spec, + // notification_config, and status. An `UpdateTransferJobRequest` that + // specifies other fields will be rejected with the error + // INVALID_ARGUMENT. Updating a job satus to DELETED requires + // `storagetransfer.jobs.delete` permissions. TransferJob *TransferJob `json:"transferJob,omitempty"` // UpdateTransferJobFieldMask: The field mask of the fields in - // `transferJob` that are to be updated in - // this request. Fields in `transferJob` that can be updated - // are: - // description, - // transfer_spec, - // notification_config, and - // status. To update the `transfer_spec` of the job, a - // complete transfer specification must be provided. An - // incomplete - // specification missing any required fields will be rejected with the - // error - // INVALID_ARGUMENT. + // `transferJob` that are to be updated in this request. Fields in + // `transferJob` that can be updated are: description, transfer_spec, + // notification_config, and status. To update the `transfer_spec` of the + // job, a complete transfer specification must be provided. An + // incomplete specification missing any required fields will be rejected + // with the error INVALID_ARGUMENT. UpdateTransferJobFieldMask string `json:"updateTransferJobFieldMask,omitempty"` // ForceSendFields is a list of field names (e.g. "ProjectId") to @@ -1792,17 +1503,13 @@ type GoogleServiceAccountsGetCall struct { } // Get: Returns the Google service account that is used by Storage -// Transfer -// Service to access buckets in the project where transfers -// run or in other projects. Each Google service account is -// associated -// with one Google Cloud Platform Console project. Users -// should add this service account to the Google Cloud Storage -// bucket -// ACLs to grant access to Storage Transfer Service. This -// service -// account is created and owned by Storage Transfer Service and can -// only be used by Storage Transfer Service. +// Transfer Service to access buckets in the project where transfers run +// or in other projects. Each Google service account is associated with +// one Google Cloud Platform Console project. Users should add this +// service account to the Google Cloud Storage bucket ACLs to grant +// access to Storage Transfer Service. This service account is created +// and owned by Storage Transfer Service and can only be used by Storage +// Transfer Service. func (r *GoogleServiceAccountsService) Get(projectId string) *GoogleServiceAccountsGetCall { c := &GoogleServiceAccountsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -1846,7 +1553,7 @@ func (c *GoogleServiceAccountsGetCall) Header() http.Header { func (c *GoogleServiceAccountsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1908,7 +1615,7 @@ func (c *GoogleServiceAccountsGetCall) Do(opts ...googleapi.CallOption) (*Google } return ret, nil // { - // "description": "Returns the Google service account that is used by Storage Transfer\nService to access buckets in the project where transfers\nrun or in other projects. Each Google service account is associated\nwith one Google Cloud Platform Console project. Users\nshould add this service account to the Google Cloud Storage bucket\nACLs to grant access to Storage Transfer Service. This service\naccount is created and owned by Storage Transfer Service and can\nonly be used by Storage Transfer Service.", + // "description": "Returns the Google service account that is used by Storage Transfer Service to access buckets in the project where transfers run or in other projects. Each Google service account is associated with one Google Cloud Platform Console project. Users should add this service account to the Google Cloud Storage bucket ACLs to grant access to Storage Transfer Service. This service account is created and owned by Storage Transfer Service and can only be used by Storage Transfer Service.", // "flatPath": "v1/googleServiceAccounts/{projectId}", // "httpMethod": "GET", // "id": "storagetransfer.googleServiceAccounts.get", @@ -1917,7 +1624,7 @@ func (c *GoogleServiceAccountsGetCall) Do(opts ...googleapi.CallOption) (*Google // ], // "parameters": { // "projectId": { - // "description": "Required. The ID of the Google Cloud Platform Console project that the\nGoogle service account is associated with.", + // "description": "Required. The ID of the Google Cloud Platform Console project that the Google service account is associated with.", // "location": "path", // "required": true, // "type": "string" @@ -1978,7 +1685,7 @@ func (c *TransferJobsCreateCall) Header() http.Header { func (c *TransferJobsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2078,8 +1785,7 @@ func (r *TransferJobsService) Get(jobName string) *TransferJobsGetCall { } // ProjectId sets the optional parameter "projectId": Required. The ID -// of the Google Cloud Platform Console project that owns the -// job. +// of the Google Cloud Platform Console project that owns the job. func (c *TransferJobsGetCall) ProjectId(projectId string) *TransferJobsGetCall { c.urlParams_.Set("projectId", projectId) return c @@ -2122,7 +1828,7 @@ func (c *TransferJobsGetCall) Header() http.Header { func (c *TransferJobsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2193,14 +1899,14 @@ func (c *TransferJobsGetCall) Do(opts ...googleapi.CallOption) (*TransferJob, er // ], // "parameters": { // "jobName": { - // "description": "Required. The job to get.", + // "description": "Required. \" The job to get.", // "location": "path", // "pattern": "^transferJobs/.*$", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "Required. The ID of the Google Cloud Platform Console project that owns the\njob.", + // "description": "Required. The ID of the Google Cloud Platform Console project that owns the job.", // "location": "query", // "type": "string" // } @@ -2233,21 +1939,13 @@ func (r *TransferJobsService) List() *TransferJobsListCall { } // Filter sets the optional parameter "filter": Required. A list of -// query parameters specified as JSON text in the form -// of: -// {"project_id":"my_project_id", -// "job_names":["jobid1","jobid2",...], -// "job_statuses":["status1","status2",...]}. -// Since `job_names` and `job_statuses` support multiple values, their -// values -// must be specified with array notation. `project``_``id` -// is -// required. `job_names` and `job_statuses` are optional. The valid -// values -// for `job_statuses` are case-insensitive: -// ENABLED, -// DISABLED, and -// DELETED. +// query parameters specified as JSON text in the form of: +// {"project_id":"my_project_id", "job_names":["jobid1","jobid2",...], +// "job_statuses":["status1","status2",...]}. Since `job_names` and +// `job_statuses` support multiple values, their values must be +// specified with array notation. `project``_``id` is required. +// `job_names` and `job_statuses` are optional. The valid values for +// `job_statuses` are case-insensitive: ENABLED, DISABLED, and DELETED. func (c *TransferJobsListCall) Filter(filter string) *TransferJobsListCall { c.urlParams_.Set("filter", filter) return c @@ -2304,7 +2002,7 @@ func (c *TransferJobsListCall) Header() http.Header { func (c *TransferJobsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2370,7 +2068,7 @@ func (c *TransferJobsListCall) Do(opts ...googleapi.CallOption) (*ListTransferJo // "parameterOrder": [], // "parameters": { // "filter": { - // "description": "Required. A list of query parameters specified as JSON text in the form of:\n{\"project\u003cspan\u003e_\u003c/span\u003eid\":\"my_project_id\",\n \"job_names\":[\"jobid1\",\"jobid2\",...],\n \"job_statuses\":[\"status1\",\"status2\",...]}.\nSince `job_names` and `job_statuses` support multiple values, their values\nmust be specified with array notation. `project`\u003cspan\u003e`_`\u003c/span\u003e`id` is\nrequired. `job_names` and `job_statuses` are optional. The valid values\nfor `job_statuses` are case-insensitive:\nENABLED,\nDISABLED, and\nDELETED.", + // "description": "Required. A list of query parameters specified as JSON text in the form of: {\"project_id\":\"my_project_id\", \"job_names\":[\"jobid1\",\"jobid2\",...], \"job_statuses\":[\"status1\",\"status2\",...]}. Since `job_names` and `job_statuses` support multiple values, their values must be specified with array notation. `project``_``id` is required. `job_names` and `job_statuses` are optional. The valid values for `job_statuses` are case-insensitive: ENABLED, DISABLED, and DELETED.", // "location": "query", // "type": "string" // }, @@ -2430,17 +2128,10 @@ type TransferJobsPatchCall struct { } // Patch: Updates a transfer job. Updating a job's transfer spec does -// not affect -// transfer operations that are running already. Updating a job's -// schedule -// is not allowed. -// -// **Note:** The job's status field can be modified -// using this RPC (for example, to set a job's status -// to -// DELETED, -// DISABLED, or -// ENABLED). +// not affect transfer operations that are running already. Updating a +// job's schedule is not allowed. **Note:** The job's status field can +// be modified using this RPC (for example, to set a job's status to +// DELETED, DISABLED, or ENABLED). func (r *TransferJobsService) Patch(jobName string, updatetransferjobrequest *UpdateTransferJobRequest) *TransferJobsPatchCall { c := &TransferJobsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.jobName = jobName @@ -2475,7 +2166,7 @@ func (c *TransferJobsPatchCall) Header() http.Header { func (c *TransferJobsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2539,7 +2230,7 @@ func (c *TransferJobsPatchCall) Do(opts ...googleapi.CallOption) (*TransferJob, } return ret, nil // { - // "description": "Updates a transfer job. Updating a job's transfer spec does not affect\ntransfer operations that are running already. Updating a job's schedule\nis not allowed.\n\n**Note:** The job's status field can be modified\nusing this RPC (for example, to set a job's status to\nDELETED,\nDISABLED, or\nENABLED).", + // "description": "Updates a transfer job. Updating a job's transfer spec does not affect transfer operations that are running already. Updating a job's schedule is not allowed. **Note:** The job's status field can be modified using this RPC (for example, to set a job's status to DELETED, DISABLED, or ENABLED).", // "flatPath": "v1/transferJobs/{transferJobsId}", // "httpMethod": "PATCH", // "id": "storagetransfer.transferJobs.patch", @@ -2572,19 +2263,37 @@ func (c *TransferJobsPatchCall) Do(opts ...googleapi.CallOption) (*TransferJob, // method id "storagetransfer.transferOperations.cancel": type TransferOperationsCancelCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Cancel: Cancels a transfer. Use the get method to check whether the -// cancellation succeeded or whether the operation completed despite -// cancellation. -func (r *TransferOperationsService) Cancel(name string) *TransferOperationsCancelCall { + s *Service + name string + canceloperationrequest *CancelOperationRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Cancel: Cancels a transfer. Use the transferOperations.get method to +// check if the cancellation succeeded or if the operation completed +// despite the `cancel` request. When you cancel an operation, the +// currently running transfer is interrupted. For recurring transfer +// jobs, the next instance of the transfer job will still run. For +// example, if your job is configured to run every day at 1pm and you +// cancel Monday's operation at 1:05pm, Monday's transfer will stop. +// However, a transfer job will still be attempted on Tuesday. This +// applies only to currently running operations. If an operation is not +// currently running, `cancel` does nothing. *Caution:* Canceling a +// transfer job can leave your data in an unknown state. We recommend +// that you restore the state at both the destination and the source +// after the `cancel` request completes so that your data is in a +// consistent state. When you cancel a job, the next job computes a +// delta of files and may repair any inconsistent state. For instance, +// if you run a job every day, and today's job found 10 new files and +// transferred five files before you canceled the job, tomorrow's +// transfer operation will compute a new delta with the five files that +// were not copied today plus any new files discovered tomorrow. +func (r *TransferOperationsService) Cancel(name string, canceloperationrequest *CancelOperationRequest) *TransferOperationsCancelCall { c := &TransferOperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name + c.canceloperationrequest = canceloperationrequest return c } @@ -2615,12 +2324,17 @@ func (c *TransferOperationsCancelCall) Header() http.Header { func (c *TransferOperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.canceloperationrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:cancel") @@ -2674,7 +2388,7 @@ func (c *TransferOperationsCancelCall) Do(opts ...googleapi.CallOption) (*Empty, } return ret, nil // { - // "description": "Cancels a transfer. Use the get method to check whether the cancellation succeeded or whether the operation completed despite cancellation.", + // "description": "Cancels a transfer. Use the transferOperations.get method to check if the cancellation succeeded or if the operation completed despite the `cancel` request. When you cancel an operation, the currently running transfer is interrupted. For recurring transfer jobs, the next instance of the transfer job will still run. For example, if your job is configured to run every day at 1pm and you cancel Monday's operation at 1:05pm, Monday's transfer will stop. However, a transfer job will still be attempted on Tuesday. This applies only to currently running operations. If an operation is not currently running, `cancel` does nothing. *Caution:* Canceling a transfer job can leave your data in an unknown state. We recommend that you restore the state at both the destination and the source after the `cancel` request completes so that your data is in a consistent state. When you cancel a job, the next job computes a delta of files and may repair any inconsistent state. For instance, if you run a job every day, and today's job found 10 new files and transferred five files before you canceled the job, tomorrow's transfer operation will compute a new delta with the five files that were not copied today plus any new files discovered tomorrow.", // "flatPath": "v1/transferOperations/{transferOperationsId}:cancel", // "httpMethod": "POST", // "id": "storagetransfer.transferOperations.cancel", @@ -2691,6 +2405,9 @@ func (c *TransferOperationsCancelCall) Do(opts ...googleapi.CallOption) (*Empty, // } // }, // "path": "v1/{+name}:cancel", + // "request": { + // "$ref": "CancelOperationRequest" + // }, // "response": { // "$ref": "Empty" // }, @@ -2712,11 +2429,9 @@ type TransferOperationsGetCall struct { header_ http.Header } -// Get: Gets the latest state of a long-running operation. Clients can -// use this -// method to poll the operation result at intervals as recommended by -// the API -// service. +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. func (r *TransferOperationsService) Get(name string) *TransferOperationsGetCall { c := &TransferOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -2760,7 +2475,7 @@ func (c *TransferOperationsGetCall) Header() http.Header { func (c *TransferOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2822,7 +2537,7 @@ func (c *TransferOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", // "flatPath": "v1/transferOperations/{transferOperationsId}", // "httpMethod": "GET", // "id": "storagetransfer.transferOperations.get", @@ -2869,16 +2584,14 @@ func (r *TransferOperationsService) List(name string) *TransferOperationsListCal // Filter sets the optional parameter "filter": Required. A list of // query parameters specified as JSON text in the form of: -// {"project_id":"my_project_id", -// "job_names":["jobid1","jobid2",...], -// "operation_names":["opid1","opid2",...], -// "transfer_statuses":["status1","status2",...]}. -// Since `job_names`, `operation_names`, and `transfer_statuses` support -// multiple values, they must be specified with array notation. -// `project``_``id` is required. `job_names`, -// `operation_names`, and `transfer_statuses` are optional. The valid -// values for `transfer_statuses` are case-insensitive: IN_PROGRESS, -// PAUSED, SUCCESS, FAILED, and ABORTED. +// {"project_id":"my_project_id", "job_names":["jobid1","jobid2",...], +// "operation_names":["opid1","opid2",...], +// "transfer_statuses":["status1","status2",...]}. Since `job_names`, +// `operation_names`, and `transfer_statuses` support multiple values, +// they must be specified with array notation. `project``_``id` is +// required. `job_names`, `operation_names`, and `transfer_statuses` are +// optional. The valid values for `transfer_statuses` are +// case-insensitive: IN_PROGRESS, PAUSED, SUCCESS, FAILED, and ABORTED. func (c *TransferOperationsListCall) Filter(filter string) *TransferOperationsListCall { c.urlParams_.Set("filter", filter) return c @@ -2935,7 +2648,7 @@ func (c *TransferOperationsListCall) Header() http.Header { func (c *TransferOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3006,7 +2719,7 @@ func (c *TransferOperationsListCall) Do(opts ...googleapi.CallOption) (*ListOper // ], // "parameters": { // "filter": { - // "description": "Required. A list of query parameters specified as JSON text in the form of: {\"project\u003cspan\u003e_\u003c/span\u003eid\":\"my_project_id\",\n \"job_names\":[\"jobid1\",\"jobid2\",...],\n \"operation_names\":[\"opid1\",\"opid2\",...],\n \"transfer_statuses\":[\"status1\",\"status2\",...]}.\nSince `job_names`, `operation_names`, and `transfer_statuses` support multiple values, they must be specified with array notation. `project`\u003cspan\u003e`_`\u003c/span\u003e`id` is required. `job_names`, `operation_names`, and `transfer_statuses` are optional. The valid values for `transfer_statuses` are case-insensitive: IN_PROGRESS, PAUSED, SUCCESS, FAILED, and ABORTED.", + // "description": "Required. A list of query parameters specified as JSON text in the form of: {\"project_id\":\"my_project_id\", \"job_names\":[\"jobid1\",\"jobid2\",...], \"operation_names\":[\"opid1\",\"opid2\",...], \"transfer_statuses\":[\"status1\",\"status2\",...]}. Since `job_names`, `operation_names`, and `transfer_statuses` support multiple values, they must be specified with array notation. `project``_``id` is required. `job_names`, `operation_names`, and `transfer_statuses` are optional. The valid values for `transfer_statuses` are case-insensitive: IN_PROGRESS, PAUSED, SUCCESS, FAILED, and ABORTED.", // "location": "query", // "type": "string" // }, @@ -3107,7 +2820,7 @@ func (c *TransferOperationsPauseCall) Header() http.Header { func (c *TransferOperationsPauseCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3247,7 +2960,7 @@ func (c *TransferOperationsResumeCall) Header() http.Header { func (c *TransferOperationsResumeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201009") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index 0131a2cfaf0..55c04a5af06 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -9,6 +9,7 @@ package grpc import ( "context" + "crypto/tls" "errors" "log" "os" @@ -18,6 +19,7 @@ import ( "golang.org/x/oauth2" "google.golang.org/api/internal" "google.golang.org/api/option" + "google.golang.org/api/transport/internal/dca" "google.golang.org/grpc" "google.golang.org/grpc/credentials" grpcgoogle "google.golang.org/grpc/credentials/google" @@ -112,6 +114,10 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C if o.GRPCConn != nil { return o.GRPCConn, nil } + clientCertSource, endpoint, err := dca.GetClientCertificateSourceAndEndpoint(o) + if err != nil { + return nil, err + } var grpcOpts []grpc.DialOption if insecure { grpcOpts = []grpc.DialOption{grpc.WithInsecure()} @@ -134,9 +140,9 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C // service account. // * Opted in via GOOGLE_CLOUD_ENABLE_DIRECT_PATH environment variable. // For example, GOOGLE_CLOUD_ENABLE_DIRECT_PATH=spanner,pubsub - if isDirectPathEnabled(o.Endpoint) && isTokenSourceDirectPathCompatible(creds.TokenSource) { - if !strings.HasPrefix(o.Endpoint, "dns:///") { - o.Endpoint = "dns:///" + o.Endpoint + if isDirectPathEnabled(endpoint) && isTokenSourceDirectPathCompatible(creds.TokenSource) { + if !strings.HasPrefix(endpoint, "dns:///") { + endpoint = "dns:///" + endpoint } grpcOpts = []grpc.DialOption{ grpc.WithCredentialsBundle( @@ -150,13 +156,16 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C } // TODO(cbro): add support for system parameters (quota project, request reason) via chained interceptor. } else { + tlsConfig := &tls.Config{ + GetClientCertificate: clientCertSource, + } grpcOpts = []grpc.DialOption{ grpc.WithPerRPCCredentials(grpcTokenSource{ TokenSource: oauth.TokenSource{creds.TokenSource}, quotaProject: o.QuotaProject, requestReason: o.RequestReason, }), - grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")), + grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)), } } } @@ -180,11 +189,11 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C // point when isDirectPathEnabled will default to true, we guard it by // the Directpath env var for now once we can introspect user defined // dialer (https://github.com/grpc/grpc-go/issues/2795). - if timeoutDialerOption != nil && isDirectPathEnabled(o.Endpoint) { + if timeoutDialerOption != nil && isDirectPathEnabled(endpoint) { grpcOpts = append(grpcOpts, timeoutDialerOption) } - return grpc.DialContext(ctx, o.Endpoint, grpcOpts...) + return grpc.DialContext(ctx, endpoint, grpcOpts...) } func addOCStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption { @@ -253,8 +262,8 @@ func isDirectPathEnabled(endpoint string) bool { } // Only try direct path if the user has opted in via the environment variable. - whitelist := strings.Split(os.Getenv("GOOGLE_CLOUD_ENABLE_DIRECT_PATH"), ",") - for _, api := range whitelist { + directPathAPIs := strings.Split(os.Getenv("GOOGLE_CLOUD_ENABLE_DIRECT_PATH"), ",") + for _, api := range directPathAPIs { // Ignore empty string since an empty env variable splits into [""] if api != "" && strings.Contains(endpoint, api) { return true @@ -271,6 +280,7 @@ func processAndValidateOpts(opts []option.ClientOption) (*internal.DialSettings, if err := o.Validate(); err != nil { return nil, err } + return &o, nil } diff --git a/vendor/google.golang.org/api/transport/grpc/pool.go b/vendor/google.golang.org/api/transport/grpc/pool.go index 32c02934b70..4cf94a2771e 100644 --- a/vendor/google.golang.org/api/transport/grpc/pool.go +++ b/vendor/google.golang.org/api/transport/grpc/pool.go @@ -63,7 +63,7 @@ func (p *roundRobinConnPool) NewStream(ctx context.Context, desc *grpc.StreamDes return p.Conn().NewStream(ctx, desc, method, opts...) } -// multiError represents errors from mulitple conns in the group. +// multiError represents errors from multiple conns in the group. // // TODO: figure out how and whether this is useful to export. End users should // not be depending on the transport/grpc package directly, so there might need diff --git a/vendor/google.golang.org/api/transport/http/default_transport_go113.go b/vendor/google.golang.org/api/transport/http/default_transport_go113.go new file mode 100644 index 00000000000..924f2704d1a --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/default_transport_go113.go @@ -0,0 +1,20 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.13 + +package http + +import "net/http" + +// clonedTransport returns the given RoundTripper as a cloned *http.Transport. +// It returns nil if the RoundTripper can't be cloned or coerced to +// *http.Transport. +func clonedTransport(rt http.RoundTripper) *http.Transport { + t, ok := rt.(*http.Transport) + if !ok { + return nil + } + return t.Clone() +} diff --git a/vendor/google.golang.org/api/transport/http/default_transport_not_go113.go b/vendor/google.golang.org/api/transport/http/default_transport_not_go113.go new file mode 100644 index 00000000000..3cb16c6cb6c --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/default_transport_not_go113.go @@ -0,0 +1,15 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.13 + +package http + +import "net/http" + +// clonedTransport returns the given RoundTripper as a cloned *http.Transport. +// For versions of Go <1.13, this is not supported, so return nil. +func clonedTransport(rt http.RoundTripper) *http.Transport { + return nil +} diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index c8d79b87676..8578cac9ef2 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -11,10 +11,9 @@ import ( "context" "crypto/tls" "errors" + "net" "net/http" - "net/url" - "os" - "strings" + "time" "go.opencensus.io/plugin/ochttp" "golang.org/x/oauth2" @@ -23,12 +22,7 @@ import ( "google.golang.org/api/option" "google.golang.org/api/transport/cert" "google.golang.org/api/transport/http/internal/propagation" -) - -const ( - mTLSModeAlways = "always" - mTLSModeNever = "never" - mTLSModeAuto = "auto" + "google.golang.org/api/transport/internal/dca" ) // NewClient returns an HTTP client for use communicating with a Google cloud @@ -39,11 +33,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, if err != nil { return nil, "", err } - clientCertSource, err := getClientCertificateSource(settings) - if err != nil { - return nil, "", err - } - endpoint, err := getEndpoint(settings, clientCertSource) + clientCertSource, endpoint, err := dca.GetClientCertificateSourceAndEndpoint(settings) if err != nil { return nil, "", err } @@ -162,23 +152,49 @@ var appengineUrlfetchHook func(context.Context) http.RoundTripper // defaultBaseTransport returns the base HTTP transport. // On App Engine, this is urlfetch.Transport. -// If TLSCertificate is available, return a custom Transport with TLSClientConfig. -// Otherwise, return http.DefaultTransport. +// Otherwise, use a default transport, taking most defaults from +// http.DefaultTransport. +// If TLSCertificate is available, set TLSClientConfig as well. func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source) http.RoundTripper { if appengineUrlfetchHook != nil { return appengineUrlfetchHook(ctx) } + // Copy http.DefaultTransport except for MaxIdleConnsPerHost setting, + // which is increased due to reported performance issues under load in the GCS + // client. Transport.Clone is only available in Go 1.13 and up. + trans := clonedTransport(http.DefaultTransport) + if trans == nil { + trans = fallbackBaseTransport() + } + trans.MaxIdleConnsPerHost = 100 + if clientCertSource != nil { - // TODO (cbro): copy default transport settings from http.DefaultTransport - return &http.Transport{ - TLSClientConfig: &tls.Config{ - GetClientCertificate: clientCertSource, - }, + trans.TLSClientConfig = &tls.Config{ + GetClientCertificate: clientCertSource, } } - return http.DefaultTransport + return trans +} + +// fallbackBaseTransport is used in google.protobuf.MethodOptions diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index c4d7afd8fb6..e4324641d6c 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,11 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/api/client.proto package annotations @@ -25,9 +24,9 @@ import ( reflect "reflect" proto "github.com/golang/protobuf/proto" - descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" ) const ( @@ -43,7 +42,7 @@ const _ = proto.ProtoPackageIsVersion4 var file_google_api_client_proto_extTypes = []protoimpl.ExtensionInfo{ { - ExtendedType: (*descriptor.MethodOptions)(nil), + ExtendedType: (*descriptorpb.MethodOptions)(nil), ExtensionType: ([]string)(nil), Field: 1051, Name: "google.api.method_signature", @@ -51,7 +50,7 @@ var file_google_api_client_proto_extTypes = []protoimpl.ExtensionInfo{ Filename: "google/api/client.proto", }, { - ExtendedType: (*descriptor.ServiceOptions)(nil), + ExtendedType: (*descriptorpb.ServiceOptions)(nil), ExtensionType: (*string)(nil), Field: 1049, Name: "google.api.default_host", @@ -59,7 +58,7 @@ var file_google_api_client_proto_extTypes = []protoimpl.ExtensionInfo{ Filename: "google/api/client.proto", }, { - ExtendedType: (*descriptor.ServiceOptions)(nil), + ExtendedType: (*descriptorpb.ServiceOptions)(nil), ExtensionType: (*string)(nil), Field: 1050, Name: "google.api.oauth_scopes", @@ -68,7 +67,7 @@ var file_google_api_client_proto_extTypes = []protoimpl.ExtensionInfo{ }, } -// Extension fields to descriptor.MethodOptions. +// Extension fields to descriptorpb.MethodOptions. var ( // A definition of a client library method signature. // @@ -110,7 +109,7 @@ var ( E_MethodSignature = &file_google_api_client_proto_extTypes[0] ) -// Extension fields to descriptor.ServiceOptions. +// Extension fields to descriptorpb.ServiceOptions. var ( // The hostname for this service. // This should be specified with no prefix or protocol. @@ -180,8 +179,8 @@ var file_google_api_client_proto_rawDesc = []byte{ } var file_google_api_client_proto_goTypes = []interface{}{ - (*descriptor.MethodOptions)(nil), // 0: google.protobuf.MethodOptions - (*descriptor.ServiceOptions)(nil), // 1: google.protobuf.ServiceOptions + (*descriptorpb.MethodOptions)(nil), // 0: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 1: google.protobuf.ServiceOptions } var file_google_api_client_proto_depIdxs = []int32{ 0, // 0: google.api.method_signature:extendee -> google.protobuf.MethodOptions diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go index 10db82e7324..b6b9094c9a4 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,11 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/api/field_behavior.proto package annotations @@ -26,9 +25,9 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" ) const ( @@ -125,7 +124,7 @@ func (FieldBehavior) EnumDescriptor() ([]byte, []int) { var file_google_api_field_behavior_proto_extTypes = []protoimpl.ExtensionInfo{ { - ExtendedType: (*descriptor.FieldOptions)(nil), + ExtendedType: (*descriptorpb.FieldOptions)(nil), ExtensionType: ([]FieldBehavior)(nil), Field: 1052, Name: "google.api.field_behavior", @@ -134,7 +133,7 @@ var file_google_api_field_behavior_proto_extTypes = []protoimpl.ExtensionInfo{ }, } -// Extension fields to descriptor.FieldOptions. +// Extension fields to descriptorpb.FieldOptions. var ( // A designation of a specific field behavior (required, output only, etc.) // in protobuf messages. @@ -199,8 +198,8 @@ func file_google_api_field_behavior_proto_rawDescGZIP() []byte { var file_google_api_field_behavior_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_api_field_behavior_proto_goTypes = []interface{}{ - (FieldBehavior)(0), // 0: google.api.FieldBehavior - (*descriptor.FieldOptions)(nil), // 1: google.protobuf.FieldOptions + (FieldBehavior)(0), // 0: google.api.FieldBehavior + (*descriptorpb.FieldOptions)(nil), // 1: google.protobuf.FieldOptions } var file_google_api_field_behavior_proto_depIdxs = []int32{ 1, // 0: google.api.field_behavior:extendee -> google.protobuf.FieldOptions diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go index 4bc7350d6cf..f36d981ced0 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,11 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/api/http.proto package annotations diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go index 8da6ec2915f..d3e36efee6d 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,11 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/api/resource.proto package annotations @@ -26,9 +25,9 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" ) const ( @@ -262,10 +261,14 @@ type ResourceDescriptor struct { // }; // } History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"` - // The plural name used in the resource name, such as 'projects' for - // the name of 'projects/{project}'. It is the same concept of the `plural` - // field in k8s CRD spec + // The plural name used in the resource name and permission names, such as + // 'projects' for the resource name of 'projects/{project}' and the permission + // name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same + // concept of the `plural` field in k8s CRD spec // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ + // + // Note: The plural form is required even for singleton resources. See + // https://aip.dev/156 Plural string `protobuf:"bytes,5,opt,name=plural,proto3" json:"plural,omitempty"` // The same concept of the `singular` field in k8s CRD spec // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ @@ -363,6 +366,17 @@ type ResourceReference struct { // type: "pubsub.googleapis.com/Topic" // }]; // } + // + // Occasionally, a field may reference an arbitrary resource. In this case, + // APIs use the special value * in their resource reference. + // + // Example: + // + // message GetIamPolicyRequest { + // string resource = 2 [(google.api.resource_reference) = { + // type: "*" + // }]; + // } Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // The resource type of a child collection that the annotated field // references. This is useful for annotating the `parent` field that @@ -426,7 +440,7 @@ func (x *ResourceReference) GetChildType() string { var file_google_api_resource_proto_extTypes = []protoimpl.ExtensionInfo{ { - ExtendedType: (*descriptor.FieldOptions)(nil), + ExtendedType: (*descriptorpb.FieldOptions)(nil), ExtensionType: (*ResourceReference)(nil), Field: 1055, Name: "google.api.resource_reference", @@ -434,7 +448,7 @@ var file_google_api_resource_proto_extTypes = []protoimpl.ExtensionInfo{ Filename: "google/api/resource.proto", }, { - ExtendedType: (*descriptor.FileOptions)(nil), + ExtendedType: (*descriptorpb.FileOptions)(nil), ExtensionType: ([]*ResourceDescriptor)(nil), Field: 1053, Name: "google.api.resource_definition", @@ -442,7 +456,7 @@ var file_google_api_resource_proto_extTypes = []protoimpl.ExtensionInfo{ Filename: "google/api/resource.proto", }, { - ExtendedType: (*descriptor.MessageOptions)(nil), + ExtendedType: (*descriptorpb.MessageOptions)(nil), ExtensionType: (*ResourceDescriptor)(nil), Field: 1053, Name: "google.api.resource", @@ -451,7 +465,7 @@ var file_google_api_resource_proto_extTypes = []protoimpl.ExtensionInfo{ }, } -// Extension fields to descriptor.FieldOptions. +// Extension fields to descriptorpb.FieldOptions. var ( // An annotation that describes a resource reference, see // [ResourceReference][]. @@ -460,7 +474,7 @@ var ( E_ResourceReference = &file_google_api_resource_proto_extTypes[0] ) -// Extension fields to descriptor.FileOptions. +// Extension fields to descriptorpb.FileOptions. var ( // An annotation that describes a resource definition without a corresponding // message; see [ResourceDescriptor][]. @@ -469,7 +483,7 @@ var ( E_ResourceDefinition = &file_google_api_resource_proto_extTypes[1] ) -// Extension fields to descriptor.MessageOptions. +// Extension fields to descriptorpb.MessageOptions. var ( // An annotation that describes a resource definition, see // [ResourceDescriptor][]. @@ -554,12 +568,12 @@ func file_google_api_resource_proto_rawDescGZIP() []byte { var file_google_api_resource_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_api_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_google_api_resource_proto_goTypes = []interface{}{ - (ResourceDescriptor_History)(0), // 0: google.api.ResourceDescriptor.History - (*ResourceDescriptor)(nil), // 1: google.api.ResourceDescriptor - (*ResourceReference)(nil), // 2: google.api.ResourceReference - (*descriptor.FieldOptions)(nil), // 3: google.protobuf.FieldOptions - (*descriptor.FileOptions)(nil), // 4: google.protobuf.FileOptions - (*descriptor.MessageOptions)(nil), // 5: google.protobuf.MessageOptions + (ResourceDescriptor_History)(0), // 0: google.api.ResourceDescriptor.History + (*ResourceDescriptor)(nil), // 1: google.api.ResourceDescriptor + (*ResourceReference)(nil), // 2: google.api.ResourceReference + (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions + (*descriptorpb.FileOptions)(nil), // 4: google.protobuf.FileOptions + (*descriptorpb.MessageOptions)(nil), // 5: google.protobuf.MessageOptions } var file_google_api_resource_proto_depIdxs = []int32{ 0, // 0: google.api.ResourceDescriptor.history:type_name -> google.api.ResourceDescriptor.History diff --git a/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go b/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go index 96f4aadb20a..cc9be566b99 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,11 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/api/distribution.proto package distribution @@ -26,10 +25,10 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - any "github.com/golang/protobuf/ptypes/any" - timestamp "github.com/golang/protobuf/ptypes/timestamp" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -372,7 +371,7 @@ type Distribution_Exemplar struct { // exemplar belongs. Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` // The observation (sampling) time of the above value. - Timestamp *timestamp.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Timestamp *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // Contextual information about the example value. Examples are: // // Trace: type.googleapis.com/google.monitoring.v3.SpanContext @@ -384,7 +383,7 @@ type Distribution_Exemplar struct { // // There may be only a single attachment of any given message type in a // single exemplar, and this is enforced by the system. - Attachments []*any.Any `protobuf:"bytes,3,rep,name=attachments,proto3" json:"attachments,omitempty"` + Attachments []*anypb.Any `protobuf:"bytes,3,rep,name=attachments,proto3" json:"attachments,omitempty"` } func (x *Distribution_Exemplar) Reset() { @@ -426,14 +425,14 @@ func (x *Distribution_Exemplar) GetValue() float64 { return 0 } -func (x *Distribution_Exemplar) GetTimestamp() *timestamp.Timestamp { +func (x *Distribution_Exemplar) GetTimestamp() *timestamppb.Timestamp { if x != nil { return x.Timestamp } return nil } -func (x *Distribution_Exemplar) GetAttachments() []*any.Any { +func (x *Distribution_Exemplar) GetAttachments() []*anypb.Any { if x != nil { return x.Attachments } @@ -759,8 +758,8 @@ var file_google_api_distribution_proto_goTypes = []interface{}{ (*Distribution_BucketOptions_Linear)(nil), // 4: google.api.Distribution.BucketOptions.Linear (*Distribution_BucketOptions_Exponential)(nil), // 5: google.api.Distribution.BucketOptions.Exponential (*Distribution_BucketOptions_Explicit)(nil), // 6: google.api.Distribution.BucketOptions.Explicit - (*timestamp.Timestamp)(nil), // 7: google.protobuf.Timestamp - (*any.Any)(nil), // 8: google.protobuf.Any + (*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp + (*anypb.Any)(nil), // 8: google.protobuf.Any } var file_google_api_distribution_proto_depIdxs = []int32{ 1, // 0: google.api.Distribution.range:type_name -> google.api.Distribution.Range diff --git a/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go b/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go index 2b112e721bf..619b46b3128 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,11 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/api/label.proto package label diff --git a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go index f82bb5ae38b..081a40af4ef 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,11 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/api/launch_stage.proto package api @@ -48,6 +47,10 @@ type LaunchStage int32 const ( // Do not use this default value. LaunchStage_LAUNCH_STAGE_UNSPECIFIED LaunchStage = 0 + // The feature is not yet implemented. Users can not use it. + LaunchStage_UNIMPLEMENTED LaunchStage = 6 + // Prelaunch features are hidden from users and are only visible internally. + LaunchStage_PRELAUNCH LaunchStage = 7 // Early Access features are limited to a closed group of testers. To use // these features, you must sign up in advance and sign a Trusted Tester // agreement (which includes confidentiality provisions). These features may @@ -85,6 +88,8 @@ const ( var ( LaunchStage_name = map[int32]string{ 0: "LAUNCH_STAGE_UNSPECIFIED", + 6: "UNIMPLEMENTED", + 7: "PRELAUNCH", 1: "EARLY_ACCESS", 2: "ALPHA", 3: "BETA", @@ -93,6 +98,8 @@ var ( } LaunchStage_value = map[string]int32{ "LAUNCH_STAGE_UNSPECIFIED": 0, + "UNIMPLEMENTED": 6, + "PRELAUNCH": 7, "EARLY_ACCESS": 1, "ALPHA": 2, "BETA": 3, @@ -133,20 +140,22 @@ var File_google_api_launch_stage_proto protoreflect.FileDescriptor var file_google_api_launch_stage_proto_rawDesc = []byte{ 0x0a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2a, 0x6a, 0x0a, 0x0b, 0x4c, - 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x18, 0x4c, 0x41, - 0x55, 0x4e, 0x43, 0x48, 0x5f, 0x53, 0x54, 0x41, 0x47, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x41, 0x52, 0x4c, - 0x59, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, - 0x50, 0x48, 0x41, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x45, 0x54, 0x41, 0x10, 0x03, 0x12, - 0x06, 0x0a, 0x02, 0x47, 0x41, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, 0x50, 0x52, 0x45, - 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x05, 0x42, 0x5a, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x10, 0x4c, 0x61, 0x75, 0x6e, 0x63, - 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2d, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, - 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x61, 0x70, 0x69, 0xa2, 0x02, 0x04, 0x47, - 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2a, 0x8c, 0x01, 0x0a, 0x0b, + 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x18, 0x4c, + 0x41, 0x55, 0x4e, 0x43, 0x48, 0x5f, 0x53, 0x54, 0x41, 0x47, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x49, + 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, + 0x50, 0x52, 0x45, 0x4c, 0x41, 0x55, 0x4e, 0x43, 0x48, 0x10, 0x07, 0x12, 0x10, 0x0a, 0x0c, 0x45, + 0x41, 0x52, 0x4c, 0x59, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x09, 0x0a, + 0x05, 0x41, 0x4c, 0x50, 0x48, 0x41, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x45, 0x54, 0x41, + 0x10, 0x03, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x41, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, + 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x05, 0x42, 0x5a, 0x0a, 0x0e, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x10, 0x4c, 0x61, + 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x61, 0x70, 0x69, 0xa2, + 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go index a60c2864d98..5ff44c9d328 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,11 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/api/metric.proto package metric @@ -26,11 +25,11 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - duration "github.com/golang/protobuf/ptypes/duration" api "google.golang.org/genproto/googleapis/api" label "google.golang.org/genproto/googleapis/api/label" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" ) const ( @@ -179,6 +178,18 @@ func (MetricDescriptor_ValueType) EnumDescriptor() ([]byte, []int) { // Defines a metric type and its schema. Once a metric descriptor is created, // deleting or altering it stops data collection and makes the metric type's // existing data unusable. +// +// The following are specific rules for service defined Monitoring metric +// descriptors: +// +// * `type`, `metric_kind`, `value_type`, `description`, `display_name`, +// `launch_stage` fields are all required. The `unit` field must be specified +// if the `value_type` is any of DOUBLE, INT64, DISTRIBUTION. +// * Maximum of default 500 metric descriptors per service is allowed. +// * Maximum of default 10 labels per metric descriptor is allowed. +// +// The default maximum limit can be overridden. Please follow +// https://cloud.google.com/monitoring/quotas type MetricDescriptor struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -187,16 +198,39 @@ type MetricDescriptor struct { // The resource name of the metric descriptor. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // The metric type, including its DNS name prefix. The type is not - // URL-encoded. All user-defined metric types have the DNS name - // `custom.googleapis.com` or `external.googleapis.com`. Metric types should - // use a natural hierarchical grouping. For example: + // URL-encoded. + // + // All service defined metrics must be prefixed with the service name, in the + // format of `{service name}/{relative metric name}`, such as + // `cloudsql.googleapis.com/database/cpu/utilization`. The relative metric + // name must follow: + // + // * Only upper and lower-case letters, digits, '/' and underscores '_' are + // allowed. + // * The maximum number of characters allowed for the relative_metric_name is + // 100. + // + // All user-defined metric types have the DNS name + // `custom.googleapis.com`, `external.googleapis.com`, or + // `logging.googleapis.com/user/`. + // + // Metric types should use a natural hierarchical grouping. For example: // // "custom.googleapis.com/invoice/paid/amount" // "external.googleapis.com/prometheus/up" // "appengine.googleapis.com/http/server/response_latencies" Type string `protobuf:"bytes,8,opt,name=type,proto3" json:"type,omitempty"` // The set of labels that can be used to describe a specific - // instance of this metric type. For example, the + // instance of this metric type. + // + // The label key name must follow: + // + // * Only upper and lower-case letters, digits and underscores (_) are + // allowed. + // * Label name must start with a letter or digit. + // * The maximum length of a label name is 100 characters. + // + // For example, the // `appengine.googleapis.com/http/server/response_latencies` metric // type has a label for the HTTP response code, `response_code`, so // you can look at latencies for successful responses or just @@ -223,7 +257,7 @@ type MetricDescriptor struct { // `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 // CPU-seconds, then the value is written as `12005`. // - // Alternatively, if you want a custome metric to record data in a more + // Alternatively, if you want a custom metric to record data in a more // granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is // `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), // or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). @@ -239,6 +273,7 @@ type MetricDescriptor struct { // * `min` minute // * `h` hour // * `d` day + // * `1` dimensionless // // **Prefixes (PREFIX)** // @@ -309,7 +344,6 @@ type MetricDescriptor struct { // * `10^2.%` indicates a metric contains a ratio, typically in the range // 0..1, that will be multiplied by 100 and displayed as a percentage // (so a metric value `0.03` means "3 percent"). - // Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` // A detailed description of the metric, which can be used in documentation. Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` @@ -322,6 +356,12 @@ type MetricDescriptor struct { Metadata *MetricDescriptor_MetricDescriptorMetadata `protobuf:"bytes,10,opt,name=metadata,proto3" json:"metadata,omitempty"` // Optional. The launch stage of the metric definition. LaunchStage api.LaunchStage `protobuf:"varint,12,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` + // Read-only. If present, then a [time + // series][google.monitoring.v3.TimeSeries], which is identified partially by + // a metric type and a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor], that is associated + // with this metric type can only be associated with one of the monitored + // resource types listed here. + MonitoredResourceTypes []string `protobuf:"bytes,13,rep,name=monitored_resource_types,json=monitoredResourceTypes,proto3" json:"monitored_resource_types,omitempty"` } func (x *MetricDescriptor) Reset() { @@ -426,6 +466,13 @@ func (x *MetricDescriptor) GetLaunchStage() api.LaunchStage { return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED } +func (x *MetricDescriptor) GetMonitoredResourceTypes() []string { + if x != nil { + return x.MonitoredResourceTypes + } + return nil +} + // A specific metric, identified by specifying values for all of the // labels of a [`MetricDescriptor`][google.api.MetricDescriptor]. type Metric struct { @@ -493,8 +540,7 @@ type MetricDescriptor_MetricDescriptorMetadata struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Deprecated. Please use the MetricDescriptor.launch_stage instead. - // The launch stage of the metric definition. + // Deprecated. Must use the [MetricDescriptor.launch_stage][google.api.MetricDescriptor.launch_stage] instead. // // Deprecated: Do not use. LaunchStage api.LaunchStage `protobuf:"varint,1,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` @@ -502,11 +548,11 @@ type MetricDescriptor_MetricDescriptorMetadata struct { // periodically, consecutive data points are stored at this time interval, // excluding data loss due to errors. Metrics with a higher granularity have // a smaller sampling period. - SamplePeriod *duration.Duration `protobuf:"bytes,2,opt,name=sample_period,json=samplePeriod,proto3" json:"sample_period,omitempty"` + SamplePeriod *durationpb.Duration `protobuf:"bytes,2,opt,name=sample_period,json=samplePeriod,proto3" json:"sample_period,omitempty"` // The delay of data points caused by ingestion. Data points older than this // age are guaranteed to be ingested and available to be read, excluding // data loss due to errors. - IngestDelay *duration.Duration `protobuf:"bytes,3,opt,name=ingest_delay,json=ingestDelay,proto3" json:"ingest_delay,omitempty"` + IngestDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=ingest_delay,json=ingestDelay,proto3" json:"ingest_delay,omitempty"` } func (x *MetricDescriptor_MetricDescriptorMetadata) Reset() { @@ -549,14 +595,14 @@ func (x *MetricDescriptor_MetricDescriptorMetadata) GetLaunchStage() api.LaunchS return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED } -func (x *MetricDescriptor_MetricDescriptorMetadata) GetSamplePeriod() *duration.Duration { +func (x *MetricDescriptor_MetricDescriptorMetadata) GetSamplePeriod() *durationpb.Duration { if x != nil { return x.SamplePeriod } return nil } -func (x *MetricDescriptor_MetricDescriptorMetadata) GetIngestDelay() *duration.Duration { +func (x *MetricDescriptor_MetricDescriptorMetadata) GetIngestDelay() *durationpb.Duration { if x != nil { return x.IngestDelay } @@ -573,7 +619,7 @@ var file_google_api_metric_proto_rawDesc = []byte{ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x07, 0x0a, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc1, 0x07, 0x0a, 0x10, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, @@ -604,49 +650,52 @@ var file_google_api_metric_proto_rawDesc = []byte{ 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, - 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x1a, 0xd8, 0x01, 0x0a, 0x18, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x12, 0x3e, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, - 0x74, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, - 0x61, 0x67, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, - 0x74, 0x61, 0x67, 0x65, 0x12, 0x3e, 0x0a, 0x0d, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x70, - 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x50, 0x65, - 0x72, 0x69, 0x6f, 0x64, 0x12, 0x3c, 0x0a, 0x0c, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x5f, 0x64, - 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x44, 0x65, 0x6c, - 0x61, 0x79, 0x22, 0x4f, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, - 0x12, 0x1b, 0x0a, 0x17, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, - 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x4c, 0x54, - 0x41, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x43, 0x55, 0x4d, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x56, - 0x45, 0x10, 0x03, 0x22, 0x71, 0x0a, 0x09, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x1a, 0x0a, 0x16, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, - 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, - 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, - 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x03, 0x12, 0x0a, 0x0a, - 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x04, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x49, 0x53, - 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x4d, - 0x4f, 0x4e, 0x45, 0x59, 0x10, 0x06, 0x22, 0x8f, 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x36, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, - 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x5f, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x3b, 0x6d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x38, 0x0a, 0x18, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x73, 0x1a, 0xd8, 0x01, 0x0a, 0x18, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3e, + 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x3e, + 0x0a, 0x0d, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x3c, + 0x0a, 0x0c, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0b, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x22, 0x4f, 0x0a, 0x0a, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1b, 0x0a, 0x17, 0x4d, 0x45, + 0x54, 0x52, 0x49, 0x43, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, + 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x10, 0x02, 0x12, 0x0e, 0x0a, + 0x0a, 0x43, 0x55, 0x4d, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x56, 0x45, 0x10, 0x03, 0x22, 0x71, 0x0a, + 0x09, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x56, 0x41, + 0x4c, 0x55, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, + 0x12, 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x44, + 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, + 0x47, 0x10, 0x04, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x49, 0x53, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, + 0x49, 0x4f, 0x4e, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x4f, 0x4e, 0x45, 0x59, 0x10, 0x06, + 0x22, 0x8f, 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x36, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x42, 0x5f, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x3b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0xa2, 0x02, 0x04, 0x47, + 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -672,7 +721,7 @@ var file_google_api_metric_proto_goTypes = []interface{}{ nil, // 5: google.api.Metric.LabelsEntry (*label.LabelDescriptor)(nil), // 6: google.api.LabelDescriptor (api.LaunchStage)(0), // 7: google.api.LaunchStage - (*duration.Duration)(nil), // 8: google.protobuf.Duration + (*durationpb.Duration)(nil), // 8: google.protobuf.Duration } var file_google_api_metric_proto_depIdxs = []int32{ 6, // 0: google.api.MetricDescriptor.labels:type_name -> google.api.LabelDescriptor diff --git a/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go index 99efd4f3279..6e52cc3626d 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,11 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/api/monitored_resource.proto package monitoredres @@ -26,11 +25,11 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - _struct "github.com/golang/protobuf/ptypes/struct" api "google.golang.org/genproto/googleapis/api" label "google.golang.org/genproto/googleapis/api/label" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" ) const ( @@ -50,9 +49,24 @@ const _ = proto.ProtoPackageIsVersion4 // `"gce_instance"` and specifies the use of the labels `"instance_id"` and // `"zone"` to identify particular VM instances. // -// Different APIs can support different monitored resource types. APIs generally -// provide a `list` method that returns the monitored resource descriptors used -// by the API. +// Different services can support different monitored resource types. +// +// The following are specific rules to service defined monitored resources for +// Monitoring and Logging: +// +// * The `type`, `display_name`, `description`, `labels` and `launch_stage` +// fields are all required. +// * The first label of the monitored resource descriptor must be +// `resource_container`. There are legacy monitored resource descritptors +// start with `project_id`. +// * It must include a `location` label. +// * Maximum of default 5 service defined monitored resource descriptors +// is allowed per service. +// * Maximum of default 10 labels per monitored resource is allowed. +// +// The default maximum limit can be overridden. Please follow +// https://cloud.google.com/monitoring/quotas +// type MonitoredResourceDescriptor struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -66,8 +80,19 @@ type MonitoredResourceDescriptor struct { // resource name format `"monitoredResourceDescriptors/{type}"`. Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` // Required. The monitored resource type. For example, the type - // `"cloudsql_database"` represents databases in Google Cloud SQL. - // The maximum length of this value is 256 characters. + // `cloudsql_database` represents databases in Google Cloud SQL. + // + // All service defined monitored resource types must be prefixed with the + // service name, in the format of `{service name}/{relative resource name}`. + // The relative resource name must follow: + // + // * Only upper and lower-case letters and digits are allowed. + // * It must start with upper case character and is recommended to use Upper + // Camel Case style. + // * The maximum number of characters allowed for the relative_resource_name + // is 100. + // + // Note there are legacy service monitored resources not following this rule. Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // Optional. A concise name for the monitored resource type that might be // displayed in user interfaces. It should be a Title Cased Noun Phrase, @@ -78,8 +103,16 @@ type MonitoredResourceDescriptor struct { // be used in documentation. Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` // Required. A set of labels used to describe instances of this monitored - // resource type. For example, an individual Google Cloud SQL database is - // identified by values for the labels `"database_id"` and `"zone"`. + // resource type. + // The label key name must follow: + // + // * Only upper and lower-case letters, digits and underscores (_) are + // allowed. + // * Label name must start with a letter or digit. + // * The maximum length of a label name is 100 characters. + // + // For example, an individual Google Cloud SQL database is + // identified by values for the labels `database_id` and `location`. Labels []*label.LabelDescriptor `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"` // Optional. The launch stage of the monitored resource definition. LaunchStage api.LaunchStage `protobuf:"varint,7,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` @@ -254,7 +287,7 @@ type MonitoredResourceMetadata struct { // { "name": "my-test-instance", // "security_group": ["a", "b", "c"], // "spot_instance": false } - SystemLabels *_struct.Struct `protobuf:"bytes,1,opt,name=system_labels,json=systemLabels,proto3" json:"system_labels,omitempty"` + SystemLabels *structpb.Struct `protobuf:"bytes,1,opt,name=system_labels,json=systemLabels,proto3" json:"system_labels,omitempty"` // Output only. A map of user-defined metadata labels. UserLabels map[string]string `protobuf:"bytes,2,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } @@ -291,7 +324,7 @@ func (*MonitoredResourceMetadata) Descriptor() ([]byte, []int) { return file_google_api_monitored_resource_proto_rawDescGZIP(), []int{2} } -func (x *MonitoredResourceMetadata) GetSystemLabels() *_struct.Struct { +func (x *MonitoredResourceMetadata) GetSystemLabels() *structpb.Struct { if x != nil { return x.SystemLabels } @@ -390,7 +423,7 @@ var file_google_api_monitored_resource_proto_goTypes = []interface{}{ nil, // 4: google.api.MonitoredResourceMetadata.UserLabelsEntry (*label.LabelDescriptor)(nil), // 5: google.api.LabelDescriptor (api.LaunchStage)(0), // 6: google.api.LaunchStage - (*_struct.Struct)(nil), // 7: google.protobuf.Struct + (*structpb.Struct)(nil), // 7: google.protobuf.Struct } var file_google_api_monitored_resource_proto_depIdxs = []int32{ 5, // 0: google.api.MonitoredResourceDescriptor.labels:type_name -> google.api.LabelDescriptor diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go index 1320ad06bf2..5cedf042c32 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go @@ -15,8 +15,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/bigtable/admin/v2/bigtable_instance_admin.proto package admin @@ -27,17 +27,17 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - empty "github.com/golang/protobuf/ptypes/empty" - timestamp "github.com/golang/protobuf/ptypes/timestamp" _ "google.golang.org/genproto/googleapis/api/annotations" v1 "google.golang.org/genproto/googleapis/iam/v1" longrunning "google.golang.org/genproto/googleapis/longrunning" - field_mask "google.golang.org/genproto/protobuf/field_mask" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -326,7 +326,7 @@ type PartialUpdateInstanceRequest struct { Instance *Instance `protobuf:"bytes,1,opt,name=instance,proto3" json:"instance,omitempty"` // Required. The subset of Instance fields which should be replaced. // Must be explicitly set. - UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` } func (x *PartialUpdateInstanceRequest) Reset() { @@ -368,7 +368,7 @@ func (x *PartialUpdateInstanceRequest) GetInstance() *Instance { return nil } -func (x *PartialUpdateInstanceRequest) GetUpdateMask() *field_mask.FieldMask { +func (x *PartialUpdateInstanceRequest) GetUpdateMask() *fieldmaskpb.FieldMask { if x != nil { return x.UpdateMask } @@ -738,9 +738,9 @@ type CreateInstanceMetadata struct { // The request that prompted the initiation of this CreateInstance operation. OriginalRequest *CreateInstanceRequest `protobuf:"bytes,1,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` // The time at which the original request was received. - RequestTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime,proto3" json:"request_time,omitempty"` + RequestTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime,proto3" json:"request_time,omitempty"` // The time at which the operation failed or was completed successfully. - FinishTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=finish_time,json=finishTime,proto3" json:"finish_time,omitempty"` + FinishTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=finish_time,json=finishTime,proto3" json:"finish_time,omitempty"` } func (x *CreateInstanceMetadata) Reset() { @@ -782,14 +782,14 @@ func (x *CreateInstanceMetadata) GetOriginalRequest() *CreateInstanceRequest { return nil } -func (x *CreateInstanceMetadata) GetRequestTime() *timestamp.Timestamp { +func (x *CreateInstanceMetadata) GetRequestTime() *timestamppb.Timestamp { if x != nil { return x.RequestTime } return nil } -func (x *CreateInstanceMetadata) GetFinishTime() *timestamp.Timestamp { +func (x *CreateInstanceMetadata) GetFinishTime() *timestamppb.Timestamp { if x != nil { return x.FinishTime } @@ -805,9 +805,9 @@ type UpdateInstanceMetadata struct { // The request that prompted the initiation of this UpdateInstance operation. OriginalRequest *PartialUpdateInstanceRequest `protobuf:"bytes,1,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` // The time at which the original request was received. - RequestTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime,proto3" json:"request_time,omitempty"` + RequestTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime,proto3" json:"request_time,omitempty"` // The time at which the operation failed or was completed successfully. - FinishTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=finish_time,json=finishTime,proto3" json:"finish_time,omitempty"` + FinishTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=finish_time,json=finishTime,proto3" json:"finish_time,omitempty"` } func (x *UpdateInstanceMetadata) Reset() { @@ -849,14 +849,14 @@ func (x *UpdateInstanceMetadata) GetOriginalRequest() *PartialUpdateInstanceRequ return nil } -func (x *UpdateInstanceMetadata) GetRequestTime() *timestamp.Timestamp { +func (x *UpdateInstanceMetadata) GetRequestTime() *timestamppb.Timestamp { if x != nil { return x.RequestTime } return nil } -func (x *UpdateInstanceMetadata) GetFinishTime() *timestamp.Timestamp { +func (x *UpdateInstanceMetadata) GetFinishTime() *timestamppb.Timestamp { if x != nil { return x.FinishTime } @@ -872,9 +872,9 @@ type CreateClusterMetadata struct { // The request that prompted the initiation of this CreateCluster operation. OriginalRequest *CreateClusterRequest `protobuf:"bytes,1,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` // The time at which the original request was received. - RequestTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime,proto3" json:"request_time,omitempty"` + RequestTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime,proto3" json:"request_time,omitempty"` // The time at which the operation failed or was completed successfully. - FinishTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=finish_time,json=finishTime,proto3" json:"finish_time,omitempty"` + FinishTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=finish_time,json=finishTime,proto3" json:"finish_time,omitempty"` } func (x *CreateClusterMetadata) Reset() { @@ -916,14 +916,14 @@ func (x *CreateClusterMetadata) GetOriginalRequest() *CreateClusterRequest { return nil } -func (x *CreateClusterMetadata) GetRequestTime() *timestamp.Timestamp { +func (x *CreateClusterMetadata) GetRequestTime() *timestamppb.Timestamp { if x != nil { return x.RequestTime } return nil } -func (x *CreateClusterMetadata) GetFinishTime() *timestamp.Timestamp { +func (x *CreateClusterMetadata) GetFinishTime() *timestamppb.Timestamp { if x != nil { return x.FinishTime } @@ -939,9 +939,9 @@ type UpdateClusterMetadata struct { // The request that prompted the initiation of this UpdateCluster operation. OriginalRequest *Cluster `protobuf:"bytes,1,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` // The time at which the original request was received. - RequestTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime,proto3" json:"request_time,omitempty"` + RequestTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime,proto3" json:"request_time,omitempty"` // The time at which the operation failed or was completed successfully. - FinishTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=finish_time,json=finishTime,proto3" json:"finish_time,omitempty"` + FinishTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=finish_time,json=finishTime,proto3" json:"finish_time,omitempty"` } func (x *UpdateClusterMetadata) Reset() { @@ -983,14 +983,14 @@ func (x *UpdateClusterMetadata) GetOriginalRequest() *Cluster { return nil } -func (x *UpdateClusterMetadata) GetRequestTime() *timestamp.Timestamp { +func (x *UpdateClusterMetadata) GetRequestTime() *timestamppb.Timestamp { if x != nil { return x.RequestTime } return nil } -func (x *UpdateClusterMetadata) GetFinishTime() *timestamp.Timestamp { +func (x *UpdateClusterMetadata) GetFinishTime() *timestamppb.Timestamp { if x != nil { return x.FinishTime } @@ -1289,7 +1289,7 @@ type UpdateAppProfileRequest struct { AppProfile *AppProfile `protobuf:"bytes,1,opt,name=app_profile,json=appProfile,proto3" json:"app_profile,omitempty"` // Required. The subset of app profile fields which should be replaced. // If unset, all fields will be replaced. - UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` // If true, ignore safety checks when updating the app profile. IgnoreWarnings bool `protobuf:"varint,3,opt,name=ignore_warnings,json=ignoreWarnings,proto3" json:"ignore_warnings,omitempty"` } @@ -1333,7 +1333,7 @@ func (x *UpdateAppProfileRequest) GetAppProfile() *AppProfile { return nil } -func (x *UpdateAppProfileRequest) GetUpdateMask() *field_mask.FieldMask { +func (x *UpdateAppProfileRequest) GetUpdateMask() *fieldmaskpb.FieldMask { if x != nil { return x.UpdateMask } @@ -1948,7 +1948,7 @@ var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDesc = []byte 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, - 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x42, 0xbd, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, + 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x42, 0xe2, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x1a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, @@ -1960,7 +1960,10 @@ var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDesc = []byte 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, - 0x69, 0x6e, 0x5c, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, + 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -2001,15 +2004,15 @@ var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_goTypes = []inte (*UpdateAppProfileMetadata)(nil), // 21: google.bigtable.admin.v2.UpdateAppProfileMetadata nil, // 22: google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry (*Instance)(nil), // 23: google.bigtable.admin.v2.Instance - (*field_mask.FieldMask)(nil), // 24: google.protobuf.FieldMask + (*fieldmaskpb.FieldMask)(nil), // 24: google.protobuf.FieldMask (*Cluster)(nil), // 25: google.bigtable.admin.v2.Cluster - (*timestamp.Timestamp)(nil), // 26: google.protobuf.Timestamp + (*timestamppb.Timestamp)(nil), // 26: google.protobuf.Timestamp (*AppProfile)(nil), // 27: google.bigtable.admin.v2.AppProfile (*v1.GetIamPolicyRequest)(nil), // 28: google.iam.v1.GetIamPolicyRequest (*v1.SetIamPolicyRequest)(nil), // 29: google.iam.v1.SetIamPolicyRequest (*v1.TestIamPermissionsRequest)(nil), // 30: google.iam.v1.TestIamPermissionsRequest (*longrunning.Operation)(nil), // 31: google.longrunning.Operation - (*empty.Empty)(nil), // 32: google.protobuf.Empty + (*emptypb.Empty)(nil), // 32: google.protobuf.Empty (*v1.Policy)(nil), // 33: google.iam.v1.Policy (*v1.TestIamPermissionsResponse)(nil), // 34: google.iam.v1.TestIamPermissionsResponse } @@ -2401,7 +2404,7 @@ type BigtableInstanceAdminClient interface { // fields of an Instance and is the preferred way to update an Instance. PartialUpdateInstance(ctx context.Context, in *PartialUpdateInstanceRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) // Delete an instance from a project. - DeleteInstance(ctx context.Context, in *DeleteInstanceRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteInstance(ctx context.Context, in *DeleteInstanceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Creates a cluster within an instance. CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) // Gets information about a cluster. @@ -2411,7 +2414,7 @@ type BigtableInstanceAdminClient interface { // Updates a cluster within an instance. UpdateCluster(ctx context.Context, in *Cluster, opts ...grpc.CallOption) (*longrunning.Operation, error) // Deletes a cluster from an instance. - DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Creates an app profile within an instance. CreateAppProfile(ctx context.Context, in *CreateAppProfileRequest, opts ...grpc.CallOption) (*AppProfile, error) // Gets information about an app profile. @@ -2421,7 +2424,7 @@ type BigtableInstanceAdminClient interface { // Updates an app profile within an instance. UpdateAppProfile(ctx context.Context, in *UpdateAppProfileRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) // Deletes an app profile from an instance. - DeleteAppProfile(ctx context.Context, in *DeleteAppProfileRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteAppProfile(ctx context.Context, in *DeleteAppProfileRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Gets the access control policy for an instance resource. Returns an empty // policy if an instance exists but does not have a policy set. GetIamPolicy(ctx context.Context, in *v1.GetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) @@ -2485,8 +2488,8 @@ func (c *bigtableInstanceAdminClient) PartialUpdateInstance(ctx context.Context, return out, nil } -func (c *bigtableInstanceAdminClient) DeleteInstance(ctx context.Context, in *DeleteInstanceRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *bigtableInstanceAdminClient) DeleteInstance(ctx context.Context, in *DeleteInstanceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", in, out, opts...) if err != nil { return nil, err @@ -2530,8 +2533,8 @@ func (c *bigtableInstanceAdminClient) UpdateCluster(ctx context.Context, in *Clu return out, nil } -func (c *bigtableInstanceAdminClient) DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *bigtableInstanceAdminClient) DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", in, out, opts...) if err != nil { return nil, err @@ -2575,8 +2578,8 @@ func (c *bigtableInstanceAdminClient) UpdateAppProfile(ctx context.Context, in * return out, nil } -func (c *bigtableInstanceAdminClient) DeleteAppProfile(ctx context.Context, in *DeleteAppProfileRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *bigtableInstanceAdminClient) DeleteAppProfile(ctx context.Context, in *DeleteAppProfileRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", in, out, opts...) if err != nil { return nil, err @@ -2627,7 +2630,7 @@ type BigtableInstanceAdminServer interface { // fields of an Instance and is the preferred way to update an Instance. PartialUpdateInstance(context.Context, *PartialUpdateInstanceRequest) (*longrunning.Operation, error) // Delete an instance from a project. - DeleteInstance(context.Context, *DeleteInstanceRequest) (*empty.Empty, error) + DeleteInstance(context.Context, *DeleteInstanceRequest) (*emptypb.Empty, error) // Creates a cluster within an instance. CreateCluster(context.Context, *CreateClusterRequest) (*longrunning.Operation, error) // Gets information about a cluster. @@ -2637,7 +2640,7 @@ type BigtableInstanceAdminServer interface { // Updates a cluster within an instance. UpdateCluster(context.Context, *Cluster) (*longrunning.Operation, error) // Deletes a cluster from an instance. - DeleteCluster(context.Context, *DeleteClusterRequest) (*empty.Empty, error) + DeleteCluster(context.Context, *DeleteClusterRequest) (*emptypb.Empty, error) // Creates an app profile within an instance. CreateAppProfile(context.Context, *CreateAppProfileRequest) (*AppProfile, error) // Gets information about an app profile. @@ -2647,7 +2650,7 @@ type BigtableInstanceAdminServer interface { // Updates an app profile within an instance. UpdateAppProfile(context.Context, *UpdateAppProfileRequest) (*longrunning.Operation, error) // Deletes an app profile from an instance. - DeleteAppProfile(context.Context, *DeleteAppProfileRequest) (*empty.Empty, error) + DeleteAppProfile(context.Context, *DeleteAppProfileRequest) (*emptypb.Empty, error) // Gets the access control policy for an instance resource. Returns an empty // policy if an instance exists but does not have a policy set. GetIamPolicy(context.Context, *v1.GetIamPolicyRequest) (*v1.Policy, error) @@ -2677,7 +2680,7 @@ func (*UnimplementedBigtableInstanceAdminServer) UpdateInstance(context.Context, func (*UnimplementedBigtableInstanceAdminServer) PartialUpdateInstance(context.Context, *PartialUpdateInstanceRequest) (*longrunning.Operation, error) { return nil, status.Errorf(codes.Unimplemented, "method PartialUpdateInstance not implemented") } -func (*UnimplementedBigtableInstanceAdminServer) DeleteInstance(context.Context, *DeleteInstanceRequest) (*empty.Empty, error) { +func (*UnimplementedBigtableInstanceAdminServer) DeleteInstance(context.Context, *DeleteInstanceRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteInstance not implemented") } func (*UnimplementedBigtableInstanceAdminServer) CreateCluster(context.Context, *CreateClusterRequest) (*longrunning.Operation, error) { @@ -2692,7 +2695,7 @@ func (*UnimplementedBigtableInstanceAdminServer) ListClusters(context.Context, * func (*UnimplementedBigtableInstanceAdminServer) UpdateCluster(context.Context, *Cluster) (*longrunning.Operation, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateCluster not implemented") } -func (*UnimplementedBigtableInstanceAdminServer) DeleteCluster(context.Context, *DeleteClusterRequest) (*empty.Empty, error) { +func (*UnimplementedBigtableInstanceAdminServer) DeleteCluster(context.Context, *DeleteClusterRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteCluster not implemented") } func (*UnimplementedBigtableInstanceAdminServer) CreateAppProfile(context.Context, *CreateAppProfileRequest) (*AppProfile, error) { @@ -2707,7 +2710,7 @@ func (*UnimplementedBigtableInstanceAdminServer) ListAppProfiles(context.Context func (*UnimplementedBigtableInstanceAdminServer) UpdateAppProfile(context.Context, *UpdateAppProfileRequest) (*longrunning.Operation, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateAppProfile not implemented") } -func (*UnimplementedBigtableInstanceAdminServer) DeleteAppProfile(context.Context, *DeleteAppProfileRequest) (*empty.Empty, error) { +func (*UnimplementedBigtableInstanceAdminServer) DeleteAppProfile(context.Context, *DeleteAppProfileRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteAppProfile not implemented") } func (*UnimplementedBigtableInstanceAdminServer) GetIamPolicy(context.Context, *v1.GetIamPolicyRequest) (*v1.Policy, error) { diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go index 3ef3eba2f12..f4986711024 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/bigtable/admin/v2/bigtable_table_admin.proto package admin @@ -26,18 +26,18 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - duration "github.com/golang/protobuf/ptypes/duration" - empty "github.com/golang/protobuf/ptypes/empty" - timestamp "github.com/golang/protobuf/ptypes/timestamp" _ "google.golang.org/genproto/googleapis/api/annotations" v1 "google.golang.org/genproto/googleapis/iam/v1" longrunning "google.golang.org/genproto/googleapis/longrunning" - field_mask "google.golang.org/genproto/protobuf/field_mask" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -891,7 +891,7 @@ type SnapshotTableRequest struct { // created. Once 'ttl' expires, the snapshot will get deleted. The maximum // amount of time a snapshot can stay active is 7 days. If 'ttl' is not // specified, the default value of 24 hours will be used. - Ttl *duration.Duration `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"` + Ttl *durationpb.Duration `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"` // Description of the snapshot. Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` } @@ -949,7 +949,7 @@ func (x *SnapshotTableRequest) GetSnapshotId() string { return "" } -func (x *SnapshotTableRequest) GetTtl() *duration.Duration { +func (x *SnapshotTableRequest) GetTtl() *durationpb.Duration { if x != nil { return x.Ttl } @@ -1235,9 +1235,9 @@ type SnapshotTableMetadata struct { // The request that prompted the initiation of this SnapshotTable operation. OriginalRequest *SnapshotTableRequest `protobuf:"bytes,1,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` // The time at which the original request was received. - RequestTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime,proto3" json:"request_time,omitempty"` + RequestTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime,proto3" json:"request_time,omitempty"` // The time at which the operation failed or was completed successfully. - FinishTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=finish_time,json=finishTime,proto3" json:"finish_time,omitempty"` + FinishTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=finish_time,json=finishTime,proto3" json:"finish_time,omitempty"` } func (x *SnapshotTableMetadata) Reset() { @@ -1279,14 +1279,14 @@ func (x *SnapshotTableMetadata) GetOriginalRequest() *SnapshotTableRequest { return nil } -func (x *SnapshotTableMetadata) GetRequestTime() *timestamp.Timestamp { +func (x *SnapshotTableMetadata) GetRequestTime() *timestamppb.Timestamp { if x != nil { return x.RequestTime } return nil } -func (x *SnapshotTableMetadata) GetFinishTime() *timestamp.Timestamp { +func (x *SnapshotTableMetadata) GetFinishTime() *timestamppb.Timestamp { if x != nil { return x.FinishTime } @@ -1308,9 +1308,9 @@ type CreateTableFromSnapshotMetadata struct { // operation. OriginalRequest *CreateTableFromSnapshotRequest `protobuf:"bytes,1,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` // The time at which the original request was received. - RequestTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime,proto3" json:"request_time,omitempty"` + RequestTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime,proto3" json:"request_time,omitempty"` // The time at which the operation failed or was completed successfully. - FinishTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=finish_time,json=finishTime,proto3" json:"finish_time,omitempty"` + FinishTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=finish_time,json=finishTime,proto3" json:"finish_time,omitempty"` } func (x *CreateTableFromSnapshotMetadata) Reset() { @@ -1352,14 +1352,14 @@ func (x *CreateTableFromSnapshotMetadata) GetOriginalRequest() *CreateTableFromS return nil } -func (x *CreateTableFromSnapshotMetadata) GetRequestTime() *timestamp.Timestamp { +func (x *CreateTableFromSnapshotMetadata) GetRequestTime() *timestamppb.Timestamp { if x != nil { return x.RequestTime } return nil } -func (x *CreateTableFromSnapshotMetadata) GetFinishTime() *timestamp.Timestamp { +func (x *CreateTableFromSnapshotMetadata) GetFinishTime() *timestamppb.Timestamp { if x != nil { return x.FinishTime } @@ -1453,9 +1453,9 @@ type CreateBackupMetadata struct { // The name of the table the backup is created from. SourceTable string `protobuf:"bytes,2,opt,name=source_table,json=sourceTable,proto3" json:"source_table,omitempty"` // The time at which this operation started. - StartTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + StartTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` // If set, the time at which this operation finished or was cancelled. - EndTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + EndTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` } func (x *CreateBackupMetadata) Reset() { @@ -1504,14 +1504,14 @@ func (x *CreateBackupMetadata) GetSourceTable() string { return "" } -func (x *CreateBackupMetadata) GetStartTime() *timestamp.Timestamp { +func (x *CreateBackupMetadata) GetStartTime() *timestamppb.Timestamp { if x != nil { return x.StartTime } return nil } -func (x *CreateBackupMetadata) GetEndTime() *timestamp.Timestamp { +func (x *CreateBackupMetadata) GetEndTime() *timestamppb.Timestamp { if x != nil { return x.EndTime } @@ -1587,7 +1587,7 @@ type UpdateBackupRequest struct { // resource, not to the request message. The field mask must always be // specified; this prevents any future fields from being erased accidentally // by clients that do not know about them. - UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` } func (x *UpdateBackupRequest) Reset() { @@ -1629,7 +1629,7 @@ func (x *UpdateBackupRequest) GetBackup() *Backup { return nil } -func (x *UpdateBackupRequest) GetUpdateMask() *field_mask.FieldMask { +func (x *UpdateBackupRequest) GetUpdateMask() *fieldmaskpb.FieldMask { if x != nil { return x.UpdateMask } @@ -2961,7 +2961,7 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, - 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x42, 0xba, 0x01, 0x0a, + 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x42, 0xdf, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x17, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x64, 0x6d, 0x69, @@ -2973,8 +2973,10 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -3024,11 +3026,11 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_goTypes = []interfa (*ModifyColumnFamiliesRequest_Modification)(nil), // 30: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification (*Table)(nil), // 31: google.bigtable.admin.v2.Table (Table_View)(0), // 32: google.bigtable.admin.v2.Table.View - (*duration.Duration)(nil), // 33: google.protobuf.Duration + (*durationpb.Duration)(nil), // 33: google.protobuf.Duration (*Snapshot)(nil), // 34: google.bigtable.admin.v2.Snapshot - (*timestamp.Timestamp)(nil), // 35: google.protobuf.Timestamp + (*timestamppb.Timestamp)(nil), // 35: google.protobuf.Timestamp (*Backup)(nil), // 36: google.bigtable.admin.v2.Backup - (*field_mask.FieldMask)(nil), // 37: google.protobuf.FieldMask + (*fieldmaskpb.FieldMask)(nil), // 37: google.protobuf.FieldMask (RestoreSourceType)(0), // 38: google.bigtable.admin.v2.RestoreSourceType (*BackupInfo)(nil), // 39: google.bigtable.admin.v2.BackupInfo (*OperationProgress)(nil), // 40: google.bigtable.admin.v2.OperationProgress @@ -3037,7 +3039,7 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_goTypes = []interfa (*v1.SetIamPolicyRequest)(nil), // 43: google.iam.v1.SetIamPolicyRequest (*v1.TestIamPermissionsRequest)(nil), // 44: google.iam.v1.TestIamPermissionsRequest (*longrunning.Operation)(nil), // 45: google.longrunning.Operation - (*empty.Empty)(nil), // 46: google.protobuf.Empty + (*emptypb.Empty)(nil), // 46: google.protobuf.Empty (*v1.Policy)(nil), // 47: google.iam.v1.Policy (*v1.TestIamPermissionsResponse)(nil), // 48: google.iam.v1.TestIamPermissionsResponse } @@ -3565,7 +3567,7 @@ type BigtableTableAdminClient interface { // Gets metadata information about the specified table. GetTable(ctx context.Context, in *GetTableRequest, opts ...grpc.CallOption) (*Table, error) // Permanently deletes a specified table and all of its data. - DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Performs a series of column family modifications on the specified table. // Either all or none of the modifications will occur before this method // returns, but data requests received prior to that point may see a table @@ -3574,7 +3576,7 @@ type BigtableTableAdminClient interface { // Permanently drop/delete a row range from a specified table. The request can // specify whether to delete all rows in a table, or only those that match a // particular prefix. - DropRowRange(ctx context.Context, in *DropRowRangeRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DropRowRange(ctx context.Context, in *DropRowRangeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Generates a consistency token for a Table, which can be used in // CheckConsistency to check whether mutations to the table that finished // before this call started have been replicated. The tokens will be available @@ -3616,7 +3618,7 @@ type BigtableTableAdminClient interface { // feature might be changed in backward-incompatible ways and is not // recommended for production use. It is not subject to any SLA or deprecation // policy. - DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Starts creating a new Cloud Bigtable Backup. The returned backup // [long-running operation][google.longrunning.Operation] can be used to // track creation of the backup. The @@ -3631,7 +3633,7 @@ type BigtableTableAdminClient interface { // Updates a pending or completed Cloud Bigtable Backup. UpdateBackup(ctx context.Context, in *UpdateBackupRequest, opts ...grpc.CallOption) (*Backup, error) // Deletes a pending or completed Cloud Bigtable backup. - DeleteBackup(ctx context.Context, in *DeleteBackupRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteBackup(ctx context.Context, in *DeleteBackupRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Lists Cloud Bigtable backups. Returns both completed and pending // backups. ListBackups(ctx context.Context, in *ListBackupsRequest, opts ...grpc.CallOption) (*ListBackupsResponse, error) @@ -3699,8 +3701,8 @@ func (c *bigtableTableAdminClient) GetTable(ctx context.Context, in *GetTableReq return out, nil } -func (c *bigtableTableAdminClient) DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *bigtableTableAdminClient) DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", in, out, opts...) if err != nil { return nil, err @@ -3717,8 +3719,8 @@ func (c *bigtableTableAdminClient) ModifyColumnFamilies(ctx context.Context, in return out, nil } -func (c *bigtableTableAdminClient) DropRowRange(ctx context.Context, in *DropRowRangeRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *bigtableTableAdminClient) DropRowRange(ctx context.Context, in *DropRowRangeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", in, out, opts...) if err != nil { return nil, err @@ -3771,8 +3773,8 @@ func (c *bigtableTableAdminClient) ListSnapshots(ctx context.Context, in *ListSn return out, nil } -func (c *bigtableTableAdminClient) DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *bigtableTableAdminClient) DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", in, out, opts...) if err != nil { return nil, err @@ -3807,8 +3809,8 @@ func (c *bigtableTableAdminClient) UpdateBackup(ctx context.Context, in *UpdateB return out, nil } -func (c *bigtableTableAdminClient) DeleteBackup(ctx context.Context, in *DeleteBackupRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *bigtableTableAdminClient) DeleteBackup(ctx context.Context, in *DeleteBackupRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup", in, out, opts...) if err != nil { return nil, err @@ -3881,7 +3883,7 @@ type BigtableTableAdminServer interface { // Gets metadata information about the specified table. GetTable(context.Context, *GetTableRequest) (*Table, error) // Permanently deletes a specified table and all of its data. - DeleteTable(context.Context, *DeleteTableRequest) (*empty.Empty, error) + DeleteTable(context.Context, *DeleteTableRequest) (*emptypb.Empty, error) // Performs a series of column family modifications on the specified table. // Either all or none of the modifications will occur before this method // returns, but data requests received prior to that point may see a table @@ -3890,7 +3892,7 @@ type BigtableTableAdminServer interface { // Permanently drop/delete a row range from a specified table. The request can // specify whether to delete all rows in a table, or only those that match a // particular prefix. - DropRowRange(context.Context, *DropRowRangeRequest) (*empty.Empty, error) + DropRowRange(context.Context, *DropRowRangeRequest) (*emptypb.Empty, error) // Generates a consistency token for a Table, which can be used in // CheckConsistency to check whether mutations to the table that finished // before this call started have been replicated. The tokens will be available @@ -3932,7 +3934,7 @@ type BigtableTableAdminServer interface { // feature might be changed in backward-incompatible ways and is not // recommended for production use. It is not subject to any SLA or deprecation // policy. - DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*empty.Empty, error) + DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*emptypb.Empty, error) // Starts creating a new Cloud Bigtable Backup. The returned backup // [long-running operation][google.longrunning.Operation] can be used to // track creation of the backup. The @@ -3947,7 +3949,7 @@ type BigtableTableAdminServer interface { // Updates a pending or completed Cloud Bigtable Backup. UpdateBackup(context.Context, *UpdateBackupRequest) (*Backup, error) // Deletes a pending or completed Cloud Bigtable backup. - DeleteBackup(context.Context, *DeleteBackupRequest) (*empty.Empty, error) + DeleteBackup(context.Context, *DeleteBackupRequest) (*emptypb.Empty, error) // Lists Cloud Bigtable backups. Returns both completed and pending // backups. ListBackups(context.Context, *ListBackupsRequest) (*ListBackupsResponse, error) @@ -3987,13 +3989,13 @@ func (*UnimplementedBigtableTableAdminServer) ListTables(context.Context, *ListT func (*UnimplementedBigtableTableAdminServer) GetTable(context.Context, *GetTableRequest) (*Table, error) { return nil, status.Errorf(codes.Unimplemented, "method GetTable not implemented") } -func (*UnimplementedBigtableTableAdminServer) DeleteTable(context.Context, *DeleteTableRequest) (*empty.Empty, error) { +func (*UnimplementedBigtableTableAdminServer) DeleteTable(context.Context, *DeleteTableRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteTable not implemented") } func (*UnimplementedBigtableTableAdminServer) ModifyColumnFamilies(context.Context, *ModifyColumnFamiliesRequest) (*Table, error) { return nil, status.Errorf(codes.Unimplemented, "method ModifyColumnFamilies not implemented") } -func (*UnimplementedBigtableTableAdminServer) DropRowRange(context.Context, *DropRowRangeRequest) (*empty.Empty, error) { +func (*UnimplementedBigtableTableAdminServer) DropRowRange(context.Context, *DropRowRangeRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DropRowRange not implemented") } func (*UnimplementedBigtableTableAdminServer) GenerateConsistencyToken(context.Context, *GenerateConsistencyTokenRequest) (*GenerateConsistencyTokenResponse, error) { @@ -4011,7 +4013,7 @@ func (*UnimplementedBigtableTableAdminServer) GetSnapshot(context.Context, *GetS func (*UnimplementedBigtableTableAdminServer) ListSnapshots(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListSnapshots not implemented") } -func (*UnimplementedBigtableTableAdminServer) DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*empty.Empty, error) { +func (*UnimplementedBigtableTableAdminServer) DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteSnapshot not implemented") } func (*UnimplementedBigtableTableAdminServer) CreateBackup(context.Context, *CreateBackupRequest) (*longrunning.Operation, error) { @@ -4023,7 +4025,7 @@ func (*UnimplementedBigtableTableAdminServer) GetBackup(context.Context, *GetBac func (*UnimplementedBigtableTableAdminServer) UpdateBackup(context.Context, *UpdateBackupRequest) (*Backup, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateBackup not implemented") } -func (*UnimplementedBigtableTableAdminServer) DeleteBackup(context.Context, *DeleteBackupRequest) (*empty.Empty, error) { +func (*UnimplementedBigtableTableAdminServer) DeleteBackup(context.Context, *DeleteBackupRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteBackup not implemented") } func (*UnimplementedBigtableTableAdminServer) ListBackups(context.Context, *ListBackupsRequest) (*ListBackupsResponse, error) { diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/common.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/common.pb.go index 0c9c4f2d286..9910cc5a660 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/common.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/common.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/bigtable/admin/v2/common.proto package admin @@ -25,9 +25,9 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -105,10 +105,10 @@ type OperationProgress struct { // Values are between 0 and 100 inclusive. ProgressPercent int32 `protobuf:"varint,1,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"` // Time the request was received. - StartTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + StartTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` // If set, the time at which this operation failed or was completed // successfully. - EndTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + EndTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` } func (x *OperationProgress) Reset() { @@ -150,14 +150,14 @@ func (x *OperationProgress) GetProgressPercent() int32 { return 0 } -func (x *OperationProgress) GetStartTime() *timestamp.Timestamp { +func (x *OperationProgress) GetStartTime() *timestamppb.Timestamp { if x != nil { return x.StartTime } return nil } -func (x *OperationProgress) GetEndTime() *timestamp.Timestamp { +func (x *OperationProgress) GetEndTime() *timestamppb.Timestamp { if x != nil { return x.EndTime } @@ -188,7 +188,7 @@ var file_google_bigtable_admin_v2_common_proto_rawDesc = []byte{ 0x54, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x53, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x48, - 0x44, 0x44, 0x10, 0x02, 0x42, 0xae, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, + 0x44, 0x44, 0x10, 0x02, 0x42, 0xd3, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, @@ -199,7 +199,10 @@ var file_google_bigtable_admin_v2_common_proto_rawDesc = []byte{ 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, - 0x69, 0x6e, 0x5c, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, + 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -217,9 +220,9 @@ func file_google_bigtable_admin_v2_common_proto_rawDescGZIP() []byte { var file_google_bigtable_admin_v2_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_bigtable_admin_v2_common_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_google_bigtable_admin_v2_common_proto_goTypes = []interface{}{ - (StorageType)(0), // 0: google.bigtable.admin.v2.StorageType - (*OperationProgress)(nil), // 1: google.bigtable.admin.v2.OperationProgress - (*timestamp.Timestamp)(nil), // 2: google.protobuf.Timestamp + (StorageType)(0), // 0: google.bigtable.admin.v2.StorageType + (*OperationProgress)(nil), // 1: google.bigtable.admin.v2.OperationProgress + (*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp } var file_google_bigtable_admin_v2_common_proto_depIdxs = []int32{ 2, // 0: google.bigtable.admin.v2.OperationProgress.start_time:type_name -> google.protobuf.Timestamp diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go index 8e9452bade2..b56aef84322 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go @@ -15,8 +15,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/bigtable/admin/v2/instance.proto package admin @@ -766,7 +766,7 @@ var file_google_bigtable_admin_v2_instance_proto_rawDesc = []byte{ 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x7d, 0x42, 0x10, 0x0a, 0x0e, 0x72, 0x6f, - 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0xb0, 0x01, 0x0a, + 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0xd5, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x0d, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, @@ -777,8 +777,10 @@ var file_google_bigtable_admin_v2_instance_proto_rawDesc = []byte{ 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, + 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, + 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, + 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go index 9578baa200e..ca815e0d4c3 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/bigtable/admin/v2/table.proto package admin @@ -25,11 +25,11 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - duration "github.com/golang/protobuf/ptypes/duration" - timestamp "github.com/golang/protobuf/ptypes/timestamp" _ "google.golang.org/genproto/googleapis/api/annotations" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -682,7 +682,7 @@ func (x *GcRule) GetMaxNumVersions() int32 { return 0 } -func (x *GcRule) GetMaxAge() *duration.Duration { +func (x *GcRule) GetMaxAge() *durationpb.Duration { if x, ok := x.GetRule().(*GcRule_MaxAge); ok { return x.MaxAge } @@ -716,7 +716,7 @@ type GcRule_MaxAge struct { // Delete cells in a column older than the given age. // Values must be at least one millisecond, and will be truncated to // microsecond granularity. - MaxAge *duration.Duration `protobuf:"bytes,2,opt,name=max_age,json=maxAge,proto3,oneof"` + MaxAge *durationpb.Duration `protobuf:"bytes,2,opt,name=max_age,json=maxAge,proto3,oneof"` } type GcRule_Intersection_ struct { @@ -761,11 +761,11 @@ type Snapshot struct { // in the meantime. DataSizeBytes int64 `protobuf:"varint,3,opt,name=data_size_bytes,json=dataSizeBytes,proto3" json:"data_size_bytes,omitempty"` // Output only. The time when the snapshot is created. - CreateTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + CreateTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` // Output only. The time when the snapshot will be deleted. The maximum amount // of time a snapshot can stay active is 365 days. If 'ttl' is not specified, // the default maximum of 365 days will be used. - DeleteTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"` + DeleteTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"` // Output only. The current state of the snapshot. State Snapshot_State `protobuf:"varint,6,opt,name=state,proto3,enum=google.bigtable.admin.v2.Snapshot_State" json:"state,omitempty"` // Output only. Description of the snapshot. @@ -825,14 +825,14 @@ func (x *Snapshot) GetDataSizeBytes() int64 { return 0 } -func (x *Snapshot) GetCreateTime() *timestamp.Timestamp { +func (x *Snapshot) GetCreateTime() *timestamppb.Timestamp { if x != nil { return x.CreateTime } return nil } -func (x *Snapshot) GetDeleteTime() *timestamp.Timestamp { +func (x *Snapshot) GetDeleteTime() *timestamppb.Timestamp { if x != nil { return x.DeleteTime } @@ -879,16 +879,16 @@ type Backup struct { // from the time the request is received. Once the `expire_time` // has passed, Cloud Bigtable will delete the backup and free the // resources used by the backup. - ExpireTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` + ExpireTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` // Output only. `start_time` is the time that the backup was started // (i.e. approximately the time the // [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup] // request is received). The row data in this backup will be no older than // this timestamp. - StartTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + StartTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` // Output only. `end_time` is the time that the backup was finished. The row // data in the backup will be no newer than this timestamp. - EndTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + EndTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` // Output only. Size of the backup in bytes. SizeBytes int64 `protobuf:"varint,6,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"` // Output only. The current state of the backup. @@ -941,21 +941,21 @@ func (x *Backup) GetSourceTable() string { return "" } -func (x *Backup) GetExpireTime() *timestamp.Timestamp { +func (x *Backup) GetExpireTime() *timestamppb.Timestamp { if x != nil { return x.ExpireTime } return nil } -func (x *Backup) GetStartTime() *timestamp.Timestamp { +func (x *Backup) GetStartTime() *timestamppb.Timestamp { if x != nil { return x.StartTime } return nil } -func (x *Backup) GetEndTime() *timestamp.Timestamp { +func (x *Backup) GetEndTime() *timestamppb.Timestamp { if x != nil { return x.EndTime } @@ -986,10 +986,10 @@ type BackupInfo struct { Backup string `protobuf:"bytes,1,opt,name=backup,proto3" json:"backup,omitempty"` // Output only. The time that the backup was started. Row data in the backup // will be no older than this timestamp. - StartTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + StartTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` // Output only. This time that the backup was finished. Row data in the // backup will be no newer than this timestamp. - EndTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + EndTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` // Output only. Name of the table the backup was created from. SourceTable string `protobuf:"bytes,4,opt,name=source_table,json=sourceTable,proto3" json:"source_table,omitempty"` } @@ -1033,14 +1033,14 @@ func (x *BackupInfo) GetBackup() string { return "" } -func (x *BackupInfo) GetStartTime() *timestamp.Timestamp { +func (x *BackupInfo) GetStartTime() *timestamppb.Timestamp { if x != nil { return x.StartTime } return nil } -func (x *BackupInfo) GetEndTime() *timestamp.Timestamp { +func (x *BackupInfo) GetEndTime() *timestamppb.Timestamp { if x != nil { return x.EndTime } @@ -1412,7 +1412,7 @@ var file_google_bigtable_admin_v2_table_proto_rawDesc = []byte{ 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x53, 0x54, 0x4f, 0x52, 0x45, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x42, 0x41, 0x43, - 0x4b, 0x55, 0x50, 0x10, 0x01, 0x42, 0xad, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, + 0x4b, 0x55, 0x50, 0x10, 0x01, 0x42, 0xd2, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x0a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, @@ -1423,7 +1423,10 @@ var file_google_bigtable_admin_v2_table_proto_rawDesc = []byte{ 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, - 0x69, 0x6e, 0x5c, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, + 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -1459,8 +1462,8 @@ var file_google_bigtable_admin_v2_table_proto_goTypes = []interface{}{ nil, // 15: google.bigtable.admin.v2.Table.ColumnFamiliesEntry (*GcRule_Intersection)(nil), // 16: google.bigtable.admin.v2.GcRule.Intersection (*GcRule_Union)(nil), // 17: google.bigtable.admin.v2.GcRule.Union - (*duration.Duration)(nil), // 18: google.protobuf.Duration - (*timestamp.Timestamp)(nil), // 19: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 18: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 19: google.protobuf.Timestamp } var file_google_bigtable_admin_v2_table_proto_depIdxs = []int32{ 0, // 0: google.bigtable.admin.v2.RestoreInfo.source_type:type_name -> google.bigtable.admin.v2.RestoreSourceType diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go index 7d39e3ca08b..a2bb439178c 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/bigtable/v2/bigtable.proto package bigtable @@ -26,7 +26,6 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - wrappers "github.com/golang/protobuf/ptypes/wrappers" _ "google.golang.org/genproto/googleapis/api/annotations" status "google.golang.org/genproto/googleapis/rpc/status" grpc "google.golang.org/grpc" @@ -34,6 +33,7 @@ import ( status1 "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" ) const ( @@ -878,13 +878,13 @@ type ReadRowsResponse_CellChunk struct { // column family name in a response so clients must check // explicitly for the presence of this message, not just for // `family_name.value` being non-empty. - FamilyName *wrappers.StringValue `protobuf:"bytes,2,opt,name=family_name,json=familyName,proto3" json:"family_name,omitempty"` + FamilyName *wrapperspb.StringValue `protobuf:"bytes,2,opt,name=family_name,json=familyName,proto3" json:"family_name,omitempty"` // The column qualifier for this chunk of data. If this message // is not present, this CellChunk is a continuation of the same column // as the previous CellChunk. Column qualifiers may be empty so // clients must check for the presence of this message, not just // for `qualifier.value` being non-empty. - Qualifier *wrappers.BytesValue `protobuf:"bytes,3,opt,name=qualifier,proto3" json:"qualifier,omitempty"` + Qualifier *wrapperspb.BytesValue `protobuf:"bytes,3,opt,name=qualifier,proto3" json:"qualifier,omitempty"` // The cell's stored timestamp, which also uniquely identifies it // within its column. Values are always expressed in // microseconds, but individual tables may set a coarser @@ -956,14 +956,14 @@ func (x *ReadRowsResponse_CellChunk) GetRowKey() []byte { return nil } -func (x *ReadRowsResponse_CellChunk) GetFamilyName() *wrappers.StringValue { +func (x *ReadRowsResponse_CellChunk) GetFamilyName() *wrapperspb.StringValue { if x != nil { return x.FamilyName } return nil } -func (x *ReadRowsResponse_CellChunk) GetQualifier() *wrappers.BytesValue { +func (x *ReadRowsResponse_CellChunk) GetQualifier() *wrapperspb.BytesValue { if x != nil { return x.Qualifier } @@ -1451,7 +1451,7 @@ var file_google_bigtable_v2_bigtable_proto_rawDesc = []byte{ 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, - 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x42, 0xf5, 0x01, 0x0a, 0x16, 0x63, + 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x42, 0x93, 0x02, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0d, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, @@ -1461,13 +1461,15 @@ var file_google_bigtable_v2_bigtable_proto_rawDesc = []byte{ 0x6c, 0x65, 0xaa, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x56, 0x32, 0xea, 0x41, 0x57, 0x0a, 0x1d, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x36, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x63, 0x65, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x7d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x1b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x32, 0xea, 0x41, 0x57, 0x0a, 0x1d, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x36, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x7d, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1504,8 +1506,8 @@ var file_google_bigtable_v2_bigtable_proto_goTypes = []interface{}{ (*Mutation)(nil), // 17: google.bigtable.v2.Mutation (*ReadModifyWriteRule)(nil), // 18: google.bigtable.v2.ReadModifyWriteRule (*Row)(nil), // 19: google.bigtable.v2.Row - (*wrappers.StringValue)(nil), // 20: google.protobuf.StringValue - (*wrappers.BytesValue)(nil), // 21: google.protobuf.BytesValue + (*wrapperspb.StringValue)(nil), // 20: google.protobuf.StringValue + (*wrapperspb.BytesValue)(nil), // 21: google.protobuf.BytesValue (*status.Status)(nil), // 22: google.rpc.Status } var file_google_bigtable_v2_bigtable_proto_depIdxs = []int32{ diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/v2/data.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/data.pb.go index 2a0b6436085..30974400446 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/v2/data.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/data.pb.go @@ -15,8 +15,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/bigtable/v2/data.proto package bigtable @@ -2218,7 +2218,7 @@ var file_google_bigtable_v2_data_proto_rawDesc = []byte{ 0x10, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0f, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x41, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x06, 0x0a, 0x04, 0x72, 0x75, - 0x6c, 0x65, 0x42, 0x97, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x6c, 0x65, 0x42, 0xb5, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x09, 0x44, 0x61, 0x74, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, @@ -2227,8 +2227,10 @@ var file_google_bigtable_v2_data_proto_rawDesc = []byte{ 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0xaa, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, - 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x1b, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go index fd9e2a15982..8a0cca603e4 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go @@ -15,8 +15,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/iam/v1/iam_policy.proto package iam diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go index 36d8f5305ca..f8f9fb0e92f 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go @@ -15,8 +15,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/iam/v1/options.proto package iam diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go index 81cbb5d6003..78fa9008621 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go @@ -15,8 +15,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/iam/v1/policy.proto package iam diff --git a/vendor/google.golang.org/genproto/googleapis/longrunning/operations.pb.go b/vendor/google.golang.org/genproto/googleapis/longrunning/operations.pb.go index 9181a9523ff..b0ec6540ee5 100644 --- a/vendor/google.golang.org/genproto/googleapis/longrunning/operations.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/longrunning/operations.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/longrunning/operations.proto package longrunning @@ -26,10 +26,6 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" - any "github.com/golang/protobuf/ptypes/any" - duration "github.com/golang/protobuf/ptypes/duration" - empty "github.com/golang/protobuf/ptypes/empty" _ "google.golang.org/genproto/googleapis/api/annotations" status "google.golang.org/genproto/googleapis/rpc/status" grpc "google.golang.org/grpc" @@ -37,6 +33,10 @@ import ( status1 "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + emptypb "google.golang.org/protobuf/types/known/emptypb" ) const ( @@ -65,7 +65,7 @@ type Operation struct { // contains progress information and common metadata such as create time. // Some services might not provide such metadata. Any method that returns a // long-running operation should document the metadata type, if any. - Metadata *any.Any `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` + Metadata *anypb.Any `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` // If the value is `false`, it means the operation is still in progress. // If `true`, the operation is completed, and either `error` or `response` is // available. @@ -119,7 +119,7 @@ func (x *Operation) GetName() string { return "" } -func (x *Operation) GetMetadata() *any.Any { +func (x *Operation) GetMetadata() *anypb.Any { if x != nil { return x.Metadata } @@ -147,7 +147,7 @@ func (x *Operation) GetError() *status.Status { return nil } -func (x *Operation) GetResponse() *any.Any { +func (x *Operation) GetResponse() *anypb.Any { if x, ok := x.GetResult().(*Operation_Response); ok { return x.Response } @@ -172,7 +172,7 @@ type Operation_Response struct { // is the original method name. For example, if the original method name // is `TakeSnapshot()`, the inferred response type is // `TakeSnapshotResponse`. - Response *any.Any `protobuf:"bytes,5,opt,name=response,proto3,oneof"` + Response *anypb.Any `protobuf:"bytes,5,opt,name=response,proto3,oneof"` } func (*Operation_Error) isOperation_Result() {} @@ -471,7 +471,7 @@ type WaitOperationRequest struct { // The maximum duration to wait before timing out. If left blank, the wait // will be at most the time permitted by the underlying HTTP/RPC protocol. // If RPC context deadline is also specified, the shorter one will be used. - Timeout *duration.Duration `protobuf:"bytes,2,opt,name=timeout,proto3" json:"timeout,omitempty"` + Timeout *durationpb.Duration `protobuf:"bytes,2,opt,name=timeout,proto3" json:"timeout,omitempty"` } func (x *WaitOperationRequest) Reset() { @@ -513,7 +513,7 @@ func (x *WaitOperationRequest) GetName() string { return "" } -func (x *WaitOperationRequest) GetTimeout() *duration.Duration { +func (x *WaitOperationRequest) GetTimeout() *durationpb.Duration { if x != nil { return x.Timeout } @@ -603,7 +603,7 @@ func (x *OperationInfo) GetMetadataType() string { var file_google_longrunning_operations_proto_extTypes = []protoimpl.ExtensionInfo{ { - ExtendedType: (*descriptor.MethodOptions)(nil), + ExtendedType: (*descriptorpb.MethodOptions)(nil), ExtensionType: (*OperationInfo)(nil), Field: 1049, Name: "google.longrunning.operation_info", @@ -612,7 +612,7 @@ var file_google_longrunning_operations_proto_extTypes = []protoimpl.ExtensionInf }, } -// Extension fields to descriptor.MethodOptions. +// Extension fields to descriptorpb.MethodOptions. var ( // Additional information regarding long-running operations. // In particular, this specifies the types that are returned from @@ -770,19 +770,19 @@ func file_google_longrunning_operations_proto_rawDescGZIP() []byte { var file_google_longrunning_operations_proto_msgTypes = make([]protoimpl.MessageInfo, 8) var file_google_longrunning_operations_proto_goTypes = []interface{}{ - (*Operation)(nil), // 0: google.longrunning.Operation - (*GetOperationRequest)(nil), // 1: google.longrunning.GetOperationRequest - (*ListOperationsRequest)(nil), // 2: google.longrunning.ListOperationsRequest - (*ListOperationsResponse)(nil), // 3: google.longrunning.ListOperationsResponse - (*CancelOperationRequest)(nil), // 4: google.longrunning.CancelOperationRequest - (*DeleteOperationRequest)(nil), // 5: google.longrunning.DeleteOperationRequest - (*WaitOperationRequest)(nil), // 6: google.longrunning.WaitOperationRequest - (*OperationInfo)(nil), // 7: google.longrunning.OperationInfo - (*any.Any)(nil), // 8: google.protobuf.Any - (*status.Status)(nil), // 9: google.rpc.Status - (*duration.Duration)(nil), // 10: google.protobuf.Duration - (*descriptor.MethodOptions)(nil), // 11: google.protobuf.MethodOptions - (*empty.Empty)(nil), // 12: google.protobuf.Empty + (*Operation)(nil), // 0: google.longrunning.Operation + (*GetOperationRequest)(nil), // 1: google.longrunning.GetOperationRequest + (*ListOperationsRequest)(nil), // 2: google.longrunning.ListOperationsRequest + (*ListOperationsResponse)(nil), // 3: google.longrunning.ListOperationsResponse + (*CancelOperationRequest)(nil), // 4: google.longrunning.CancelOperationRequest + (*DeleteOperationRequest)(nil), // 5: google.longrunning.DeleteOperationRequest + (*WaitOperationRequest)(nil), // 6: google.longrunning.WaitOperationRequest + (*OperationInfo)(nil), // 7: google.longrunning.OperationInfo + (*anypb.Any)(nil), // 8: google.protobuf.Any + (*status.Status)(nil), // 9: google.rpc.Status + (*durationpb.Duration)(nil), // 10: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 11: google.protobuf.MethodOptions + (*emptypb.Empty)(nil), // 12: google.protobuf.Empty } var file_google_longrunning_operations_proto_depIdxs = []int32{ 8, // 0: google.longrunning.Operation.metadata:type_name -> google.protobuf.Any @@ -968,7 +968,7 @@ type OperationsClient interface { // no longer interested in the operation result. It does not cancel the // operation. If the server doesn't support this method, it returns // `google.rpc.Code.UNIMPLEMENTED`. - DeleteOperation(ctx context.Context, in *DeleteOperationRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteOperation(ctx context.Context, in *DeleteOperationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Starts asynchronous cancellation on a long-running operation. The server // makes a best effort to cancel the operation, but success is not // guaranteed. If the server doesn't support this method, it returns @@ -979,7 +979,7 @@ type OperationsClient interface { // the operation is not deleted; instead, it becomes an operation with // an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, // corresponding to `Code.CANCELLED`. - CancelOperation(ctx context.Context, in *CancelOperationRequest, opts ...grpc.CallOption) (*empty.Empty, error) + CancelOperation(ctx context.Context, in *CancelOperationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Waits for the specified long-running operation until it is done or reaches // at most a specified timeout, returning the latest state. If the operation // is already done, the latest state is immediately returned. If the timeout @@ -1018,8 +1018,8 @@ func (c *operationsClient) GetOperation(ctx context.Context, in *GetOperationReq return out, nil } -func (c *operationsClient) DeleteOperation(ctx context.Context, in *DeleteOperationRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *operationsClient) DeleteOperation(ctx context.Context, in *DeleteOperationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.longrunning.Operations/DeleteOperation", in, out, opts...) if err != nil { return nil, err @@ -1027,8 +1027,8 @@ func (c *operationsClient) DeleteOperation(ctx context.Context, in *DeleteOperat return out, nil } -func (c *operationsClient) CancelOperation(ctx context.Context, in *CancelOperationRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *operationsClient) CancelOperation(ctx context.Context, in *CancelOperationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.longrunning.Operations/CancelOperation", in, out, opts...) if err != nil { return nil, err @@ -1066,7 +1066,7 @@ type OperationsServer interface { // no longer interested in the operation result. It does not cancel the // operation. If the server doesn't support this method, it returns // `google.rpc.Code.UNIMPLEMENTED`. - DeleteOperation(context.Context, *DeleteOperationRequest) (*empty.Empty, error) + DeleteOperation(context.Context, *DeleteOperationRequest) (*emptypb.Empty, error) // Starts asynchronous cancellation on a long-running operation. The server // makes a best effort to cancel the operation, but success is not // guaranteed. If the server doesn't support this method, it returns @@ -1077,7 +1077,7 @@ type OperationsServer interface { // the operation is not deleted; instead, it becomes an operation with // an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, // corresponding to `Code.CANCELLED`. - CancelOperation(context.Context, *CancelOperationRequest) (*empty.Empty, error) + CancelOperation(context.Context, *CancelOperationRequest) (*emptypb.Empty, error) // Waits for the specified long-running operation until it is done or reaches // at most a specified timeout, returning the latest state. If the operation // is already done, the latest state is immediately returned. If the timeout @@ -1100,10 +1100,10 @@ func (*UnimplementedOperationsServer) ListOperations(context.Context, *ListOpera func (*UnimplementedOperationsServer) GetOperation(context.Context, *GetOperationRequest) (*Operation, error) { return nil, status1.Errorf(codes.Unimplemented, "method GetOperation not implemented") } -func (*UnimplementedOperationsServer) DeleteOperation(context.Context, *DeleteOperationRequest) (*empty.Empty, error) { +func (*UnimplementedOperationsServer) DeleteOperation(context.Context, *DeleteOperationRequest) (*emptypb.Empty, error) { return nil, status1.Errorf(codes.Unimplemented, "method DeleteOperation not implemented") } -func (*UnimplementedOperationsServer) CancelOperation(context.Context, *CancelOperationRequest) (*empty.Empty, error) { +func (*UnimplementedOperationsServer) CancelOperation(context.Context, *CancelOperationRequest) (*emptypb.Empty, error) { return nil, status1.Errorf(codes.Unimplemented, "method CancelOperation not implemented") } func (*UnimplementedOperationsServer) WaitOperation(context.Context, *WaitOperationRequest) (*Operation, error) { diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go index cdbba83c279..87315aaae08 100644 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/monitoring/v3/alert.proto package monitoring @@ -25,12 +25,12 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - duration "github.com/golang/protobuf/ptypes/duration" - wrappers "github.com/golang/protobuf/ptypes/wrappers" _ "google.golang.org/genproto/googleapis/api/annotations" status "google.golang.org/genproto/googleapis/rpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" ) const ( @@ -163,7 +163,7 @@ type AlertPolicy struct { // any assumption about the state if it has not been populated. The // field should always be populated on List and Get operations, unless // a field projection has been specified that strips it out. - Enabled *wrappers.BoolValue `protobuf:"bytes,17,opt,name=enabled,proto3" json:"enabled,omitempty"` + Enabled *wrapperspb.BoolValue `protobuf:"bytes,17,opt,name=enabled,proto3" json:"enabled,omitempty"` // Read-only description of how the alert policy is invalid. OK if the alert // policy is valid. If not OK, the alert policy will not generate incidents. Validity *status.Status `protobuf:"bytes,18,opt,name=validity,proto3" json:"validity,omitempty"` @@ -260,7 +260,7 @@ func (x *AlertPolicy) GetCombiner() AlertPolicy_ConditionCombinerType { return AlertPolicy_COMBINE_UNSPECIFIED } -func (x *AlertPolicy) GetEnabled() *wrappers.BoolValue { +func (x *AlertPolicy) GetEnabled() *wrapperspb.BoolValue { if x != nil { return x.Enabled } @@ -649,7 +649,7 @@ type AlertPolicy_Condition_MetricThreshold struct { // `aggregations` field); a good duration is long enough so that a single // outlier does not generate spurious alerts, but short enough that // unhealthy states are detected and alerted on quickly. - Duration *duration.Duration `protobuf:"bytes,6,opt,name=duration,proto3" json:"duration,omitempty"` + Duration *durationpb.Duration `protobuf:"bytes,6,opt,name=duration,proto3" json:"duration,omitempty"` // The number/percent of time series for which the comparison must hold // in order for the condition to trigger. If unspecified, then the // condition will trigger if the comparison is true for any of the @@ -733,7 +733,7 @@ func (x *AlertPolicy_Condition_MetricThreshold) GetThresholdValue() float64 { return 0 } -func (x *AlertPolicy_Condition_MetricThreshold) GetDuration() *duration.Duration { +func (x *AlertPolicy_Condition_MetricThreshold) GetDuration() *durationpb.Duration { if x != nil { return x.Duration } @@ -786,7 +786,7 @@ type AlertPolicy_Condition_MetricAbsence struct { // seconds--are supported. If an invalid value is given, an // error will be returned. The `Duration.nanos` field is // ignored. - Duration *duration.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"` + Duration *durationpb.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"` // The number/percent of time series for which the comparison must hold // in order for the condition to trigger. If unspecified, then the // condition will trigger if the comparison is true for any of the @@ -840,7 +840,7 @@ func (x *AlertPolicy_Condition_MetricAbsence) GetAggregations() []*Aggregation { return nil } -func (x *AlertPolicy_Condition_MetricAbsence) GetDuration() *duration.Duration { +func (x *AlertPolicy_Condition_MetricAbsence) GetDuration() *durationpb.Duration { if x != nil { return x.Duration } @@ -1071,12 +1071,12 @@ var file_google_monitoring_v3_alert_proto_goTypes = []interface{}{ (*AlertPolicy_Condition_Trigger)(nil), // 5: google.monitoring.v3.AlertPolicy.Condition.Trigger (*AlertPolicy_Condition_MetricThreshold)(nil), // 6: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold (*AlertPolicy_Condition_MetricAbsence)(nil), // 7: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence - (*wrappers.BoolValue)(nil), // 8: google.protobuf.BoolValue + (*wrapperspb.BoolValue)(nil), // 8: google.protobuf.BoolValue (*status.Status)(nil), // 9: google.rpc.Status (*MutationRecord)(nil), // 10: google.monitoring.v3.MutationRecord (*Aggregation)(nil), // 11: google.monitoring.v3.Aggregation (ComparisonType)(0), // 12: google.monitoring.v3.ComparisonType - (*duration.Duration)(nil), // 13: google.protobuf.Duration + (*durationpb.Duration)(nil), // 13: google.protobuf.Duration } var file_google_monitoring_v3_alert_proto_depIdxs = []int32{ 2, // 0: google.monitoring.v3.AlertPolicy.documentation:type_name -> google.monitoring.v3.AlertPolicy.Documentation diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go index 057459dd858..c1704c4b724 100644 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/monitoring/v3/alert_service.proto package monitoring @@ -26,14 +26,14 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - empty "github.com/golang/protobuf/ptypes/empty" _ "google.golang.org/genproto/googleapis/api/annotations" - field_mask "google.golang.org/genproto/protobuf/field_mask" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" ) const ( @@ -358,7 +358,7 @@ type UpdateAlertPolicyRequest struct { // the supplied condition includes the `name` field with that // `[CONDITION_ID]`. If the supplied condition omits the `name` field, // then a new `[CONDITION_ID]` is created. - UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` // Required. The updated alerting policy or the updated values for the // fields listed in `update_mask`. // If `update_mask` is not empty, any fields in this policy that are @@ -398,7 +398,7 @@ func (*UpdateAlertPolicyRequest) Descriptor() ([]byte, []int) { return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{4} } -func (x *UpdateAlertPolicyRequest) GetUpdateMask() *field_mask.FieldMask { +func (x *UpdateAlertPolicyRequest) GetUpdateMask() *fieldmaskpb.FieldMask { if x != nil { return x.UpdateMask } @@ -643,8 +643,8 @@ var file_google_monitoring_v3_alert_service_proto_goTypes = []interface{}{ (*UpdateAlertPolicyRequest)(nil), // 4: google.monitoring.v3.UpdateAlertPolicyRequest (*DeleteAlertPolicyRequest)(nil), // 5: google.monitoring.v3.DeleteAlertPolicyRequest (*AlertPolicy)(nil), // 6: google.monitoring.v3.AlertPolicy - (*field_mask.FieldMask)(nil), // 7: google.protobuf.FieldMask - (*empty.Empty)(nil), // 8: google.protobuf.Empty + (*fieldmaskpb.FieldMask)(nil), // 7: google.protobuf.FieldMask + (*emptypb.Empty)(nil), // 8: google.protobuf.Empty } var file_google_monitoring_v3_alert_service_proto_depIdxs = []int32{ 6, // 0: google.monitoring.v3.CreateAlertPolicyRequest.alert_policy:type_name -> google.monitoring.v3.AlertPolicy @@ -787,7 +787,7 @@ type AlertPolicyServiceClient interface { // Creates a new alerting policy. CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) // Deletes an alerting policy. - DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Updates an alerting policy. You can either replace the entire policy with // a new one or replace only certain fields in the current alerting policy by // specifying the fields to be updated via `updateMask`. Returns the @@ -830,8 +830,8 @@ func (c *alertPolicyServiceClient) CreateAlertPolicy(ctx context.Context, in *Cr return out, nil } -func (c *alertPolicyServiceClient) DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *alertPolicyServiceClient) DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", in, out, opts...) if err != nil { return nil, err @@ -857,7 +857,7 @@ type AlertPolicyServiceServer interface { // Creates a new alerting policy. CreateAlertPolicy(context.Context, *CreateAlertPolicyRequest) (*AlertPolicy, error) // Deletes an alerting policy. - DeleteAlertPolicy(context.Context, *DeleteAlertPolicyRequest) (*empty.Empty, error) + DeleteAlertPolicy(context.Context, *DeleteAlertPolicyRequest) (*emptypb.Empty, error) // Updates an alerting policy. You can either replace the entire policy with // a new one or replace only certain fields in the current alerting policy by // specifying the fields to be updated via `updateMask`. Returns the @@ -878,7 +878,7 @@ func (*UnimplementedAlertPolicyServiceServer) GetAlertPolicy(context.Context, *G func (*UnimplementedAlertPolicyServiceServer) CreateAlertPolicy(context.Context, *CreateAlertPolicyRequest) (*AlertPolicy, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateAlertPolicy not implemented") } -func (*UnimplementedAlertPolicyServiceServer) DeleteAlertPolicy(context.Context, *DeleteAlertPolicyRequest) (*empty.Empty, error) { +func (*UnimplementedAlertPolicyServiceServer) DeleteAlertPolicy(context.Context, *DeleteAlertPolicyRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteAlertPolicy not implemented") } func (*UnimplementedAlertPolicyServiceServer) UpdateAlertPolicy(context.Context, *UpdateAlertPolicyRequest) (*AlertPolicy, error) { diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go index 26e7a619536..87b735ae38f 100644 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/monitoring/v3/common.proto package monitoring @@ -25,11 +25,11 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - duration "github.com/golang/protobuf/ptypes/duration" - timestamp "github.com/golang/protobuf/ptypes/timestamp" distribution "google.golang.org/genproto/googleapis/api/distribution" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -675,7 +675,11 @@ func (*TypedValue_StringValue) isTypedValue_Value() {} func (*TypedValue_DistributionValue) isTypedValue_Value() {} -// A closed time interval. It extends from the start time to the end time, and includes both: `[startTime, endTime]`. Valid time intervals depend on the [`MetricKind`](/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricKind) of the metric value. In no case can the end time be earlier than the start time. +// A closed time interval. It extends from the start time to the end time, and +// includes both: `[startTime, endTime]`. Valid time intervals depend on the +// [`MetricKind`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricKind) +// of the metric value. In no case can the end time be earlier than the start +// time. // // * For a `GAUGE` metric, the `startTime` value is technically optional; if // no value is specified, the start time defaults to the value of the @@ -699,11 +703,11 @@ type TimeInterval struct { unknownFields protoimpl.UnknownFields // Required. The end of the time interval. - EndTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + EndTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` // Optional. The beginning of the time interval. The default value // for the start time is the end time. The start time must not be // later than the end time. - StartTime *timestamp.Timestamp `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + StartTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` } func (x *TimeInterval) Reset() { @@ -738,14 +742,14 @@ func (*TimeInterval) Descriptor() ([]byte, []int) { return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{1} } -func (x *TimeInterval) GetEndTime() *timestamp.Timestamp { +func (x *TimeInterval) GetEndTime() *timestamppb.Timestamp { if x != nil { return x.EndTime } return nil } -func (x *TimeInterval) GetStartTime() *timestamp.Timestamp { +func (x *TimeInterval) GetStartTime() *timestamppb.Timestamp { if x != nil { return x.StartTime } @@ -793,7 +797,7 @@ type Aggregation struct { // `ALIGN_NONE` is specified, this field is required or an error is returned. // If no per-series aligner is specified, or the aligner `ALIGN_NONE` is // specified, then this field is ignored. - AlignmentPeriod *duration.Duration `protobuf:"bytes,1,opt,name=alignment_period,json=alignmentPeriod,proto3" json:"alignment_period,omitempty"` + AlignmentPeriod *durationpb.Duration `protobuf:"bytes,1,opt,name=alignment_period,json=alignmentPeriod,proto3" json:"alignment_period,omitempty"` // An `Aligner` describes how to bring the data points in a single // time series into temporal alignment. Except for `ALIGN_NONE`, all // alignments cause all the data points in an `alignment_period` to be @@ -875,7 +879,7 @@ func (*Aggregation) Descriptor() ([]byte, []int) { return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{2} } -func (x *Aggregation) GetAlignmentPeriod() *duration.Duration { +func (x *Aggregation) GetAlignmentPeriod() *durationpb.Duration { if x != nil { return x.AlignmentPeriod } @@ -1057,8 +1061,8 @@ var file_google_monitoring_v3_common_proto_goTypes = []interface{}{ (*TimeInterval)(nil), // 5: google.monitoring.v3.TimeInterval (*Aggregation)(nil), // 6: google.monitoring.v3.Aggregation (*distribution.Distribution)(nil), // 7: google.api.Distribution - (*timestamp.Timestamp)(nil), // 8: google.protobuf.Timestamp - (*duration.Duration)(nil), // 9: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 9: google.protobuf.Duration } var file_google_monitoring_v3_common_proto_depIdxs = []int32{ 7, // 0: google.monitoring.v3.TypedValue.distribution_value:type_name -> google.api.Distribution diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/dropped_labels.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/dropped_labels.pb.go index 7b6309767c8..38e5e5b4e7c 100644 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/dropped_labels.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/dropped_labels.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/monitoring/v3/dropped_labels.proto package monitoring diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go index 5568724d168..3e9fabc0d1c 100644 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/monitoring/v3/group.proto package monitoring diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go index 9979f3dd32f..cc81ca4ed66 100644 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/monitoring/v3/group_service.proto package monitoring @@ -26,7 +26,6 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - empty "github.com/golang/protobuf/ptypes/empty" _ "google.golang.org/genproto/googleapis/api/annotations" monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" grpc "google.golang.org/grpc" @@ -34,6 +33,7 @@ import ( status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" ) const ( @@ -886,7 +886,7 @@ var file_google_monitoring_v3_group_service_proto_goTypes = []interface{}{ (*Group)(nil), // 8: google.monitoring.v3.Group (*TimeInterval)(nil), // 9: google.monitoring.v3.TimeInterval (*monitoredres.MonitoredResource)(nil), // 10: google.api.MonitoredResource - (*empty.Empty)(nil), // 11: google.protobuf.Empty + (*emptypb.Empty)(nil), // 11: google.protobuf.Empty } var file_google_monitoring_v3_group_service_proto_depIdxs = []int32{ 8, // 0: google.monitoring.v3.ListGroupsResponse.group:type_name -> google.monitoring.v3.Group @@ -1065,7 +1065,7 @@ type GroupServiceClient interface { // You can change any group attributes except `name`. UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error) // Deletes an existing group. - DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Lists the monitored resources that are members of a group. ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error) } @@ -1114,8 +1114,8 @@ func (c *groupServiceClient) UpdateGroup(ctx context.Context, in *UpdateGroupReq return out, nil } -func (c *groupServiceClient) DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *groupServiceClient) DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/DeleteGroup", in, out, opts...) if err != nil { return nil, err @@ -1144,7 +1144,7 @@ type GroupServiceServer interface { // You can change any group attributes except `name`. UpdateGroup(context.Context, *UpdateGroupRequest) (*Group, error) // Deletes an existing group. - DeleteGroup(context.Context, *DeleteGroupRequest) (*empty.Empty, error) + DeleteGroup(context.Context, *DeleteGroupRequest) (*emptypb.Empty, error) // Lists the monitored resources that are members of a group. ListGroupMembers(context.Context, *ListGroupMembersRequest) (*ListGroupMembersResponse, error) } @@ -1165,7 +1165,7 @@ func (*UnimplementedGroupServiceServer) CreateGroup(context.Context, *CreateGrou func (*UnimplementedGroupServiceServer) UpdateGroup(context.Context, *UpdateGroupRequest) (*Group, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateGroup not implemented") } -func (*UnimplementedGroupServiceServer) DeleteGroup(context.Context, *DeleteGroupRequest) (*empty.Empty, error) { +func (*UnimplementedGroupServiceServer) DeleteGroup(context.Context, *DeleteGroupRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteGroup not implemented") } func (*UnimplementedGroupServiceServer) ListGroupMembers(context.Context, *ListGroupMembersRequest) (*ListGroupMembersResponse, error) { diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go index 82abbb84326..ad976bbfe97 100644 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/monitoring/v3/metric.proto package monitoring @@ -25,13 +25,13 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - _ "github.com/golang/protobuf/ptypes/duration" _ "google.golang.org/genproto/googleapis/api/distribution" label "google.golang.org/genproto/googleapis/api/label" metric "google.golang.org/genproto/googleapis/api/metric" monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + _ "google.golang.org/protobuf/types/known/durationpb" ) const ( diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go index 9a99c8e1f9a..015d8e2ba8e 100644 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/monitoring/v3/metric_service.proto package monitoring @@ -26,8 +26,6 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - _ "github.com/golang/protobuf/ptypes/duration" - empty "github.com/golang/protobuf/ptypes/empty" _ "google.golang.org/genproto/googleapis/api/annotations" metric "google.golang.org/genproto/googleapis/api/metric" monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" @@ -37,6 +35,8 @@ import ( status1 "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + _ "google.golang.org/protobuf/types/known/durationpb" + emptypb "google.golang.org/protobuf/types/known/emptypb" ) const ( @@ -1719,7 +1719,7 @@ var file_google_monitoring_v3_metric_service_proto_goTypes = []interface{}{ (*TimeSeriesDescriptor)(nil), // 24: google.monitoring.v3.TimeSeriesDescriptor (*TimeSeriesData)(nil), // 25: google.monitoring.v3.TimeSeriesData (*QueryError)(nil), // 26: google.monitoring.v3.QueryError - (*empty.Empty)(nil), // 27: google.protobuf.Empty + (*emptypb.Empty)(nil), // 27: google.protobuf.Empty } var file_google_monitoring_v3_metric_service_proto_depIdxs = []int32{ 18, // 0: google.monitoring.v3.ListMonitoredResourceDescriptorsResponse.resource_descriptors:type_name -> google.api.MonitoredResourceDescriptor @@ -2024,14 +2024,14 @@ type MetricServiceClient interface { // Deletes a metric descriptor. Only user-created // [custom metrics](https://cloud.google.com/monitoring/custom-metrics) can be // deleted. - DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Lists time series that match a filter. This method does not require a Workspace. ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error) // Creates or adds data to one or more time series. // The response is empty if all time series in the request were written. // If any time series could not be written, a corresponding failure message is // included in the error response. - CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*empty.Empty, error) + CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) } type metricServiceClient struct { @@ -2087,8 +2087,8 @@ func (c *metricServiceClient) CreateMetricDescriptor(ctx context.Context, in *Cr return out, nil } -func (c *metricServiceClient) DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *metricServiceClient) DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", in, out, opts...) if err != nil { return nil, err @@ -2105,8 +2105,8 @@ func (c *metricServiceClient) ListTimeSeries(ctx context.Context, in *ListTimeSe return out, nil } -func (c *metricServiceClient) CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *metricServiceClient) CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateTimeSeries", in, out, opts...) if err != nil { return nil, err @@ -2131,14 +2131,14 @@ type MetricServiceServer interface { // Deletes a metric descriptor. Only user-created // [custom metrics](https://cloud.google.com/monitoring/custom-metrics) can be // deleted. - DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*empty.Empty, error) + DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*emptypb.Empty, error) // Lists time series that match a filter. This method does not require a Workspace. ListTimeSeries(context.Context, *ListTimeSeriesRequest) (*ListTimeSeriesResponse, error) // Creates or adds data to one or more time series. // The response is empty if all time series in the request were written. // If any time series could not be written, a corresponding failure message is // included in the error response. - CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*empty.Empty, error) + CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*emptypb.Empty, error) } // UnimplementedMetricServiceServer can be embedded to have forward compatible implementations. @@ -2160,13 +2160,13 @@ func (*UnimplementedMetricServiceServer) GetMetricDescriptor(context.Context, *G func (*UnimplementedMetricServiceServer) CreateMetricDescriptor(context.Context, *CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) { return nil, status1.Errorf(codes.Unimplemented, "method CreateMetricDescriptor not implemented") } -func (*UnimplementedMetricServiceServer) DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*empty.Empty, error) { +func (*UnimplementedMetricServiceServer) DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*emptypb.Empty, error) { return nil, status1.Errorf(codes.Unimplemented, "method DeleteMetricDescriptor not implemented") } func (*UnimplementedMetricServiceServer) ListTimeSeries(context.Context, *ListTimeSeriesRequest) (*ListTimeSeriesResponse, error) { return nil, status1.Errorf(codes.Unimplemented, "method ListTimeSeries not implemented") } -func (*UnimplementedMetricServiceServer) CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*empty.Empty, error) { +func (*UnimplementedMetricServiceServer) CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*emptypb.Empty, error) { return nil, status1.Errorf(codes.Unimplemented, "method CreateTimeSeries not implemented") } diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go index cb7a982e50f..e933e824089 100644 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/monitoring/v3/mutation_record.proto package monitoring @@ -25,9 +25,9 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -48,7 +48,7 @@ type MutationRecord struct { unknownFields protoimpl.UnknownFields // When the change occurred. - MutateTime *timestamp.Timestamp `protobuf:"bytes,1,opt,name=mutate_time,json=mutateTime,proto3" json:"mutate_time,omitempty"` + MutateTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=mutate_time,json=mutateTime,proto3" json:"mutate_time,omitempty"` // The email address of the user making the change. MutatedBy string `protobuf:"bytes,2,opt,name=mutated_by,json=mutatedBy,proto3" json:"mutated_by,omitempty"` } @@ -85,7 +85,7 @@ func (*MutationRecord) Descriptor() ([]byte, []int) { return file_google_monitoring_v3_mutation_record_proto_rawDescGZIP(), []int{0} } -func (x *MutationRecord) GetMutateTime() *timestamp.Timestamp { +func (x *MutationRecord) GetMutateTime() *timestamppb.Timestamp { if x != nil { return x.MutateTime } @@ -145,8 +145,8 @@ func file_google_monitoring_v3_mutation_record_proto_rawDescGZIP() []byte { var file_google_monitoring_v3_mutation_record_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_google_monitoring_v3_mutation_record_proto_goTypes = []interface{}{ - (*MutationRecord)(nil), // 0: google.monitoring.v3.MutationRecord - (*timestamp.Timestamp)(nil), // 1: google.protobuf.Timestamp + (*MutationRecord)(nil), // 0: google.monitoring.v3.MutationRecord + (*timestamppb.Timestamp)(nil), // 1: google.protobuf.Timestamp } var file_google_monitoring_v3_mutation_record_proto_depIdxs = []int32{ 1, // 0: google.monitoring.v3.MutationRecord.mutate_time:type_name -> google.protobuf.Timestamp diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go index 9bd785e53e0..6105f945ac4 100644 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/monitoring/v3/notification.proto package monitoring @@ -25,12 +25,12 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - wrappers "github.com/golang/protobuf/ptypes/wrappers" api "google.golang.org/genproto/googleapis/api" _ "google.golang.org/genproto/googleapis/api/annotations" label "google.golang.org/genproto/googleapis/api/label" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" ) const ( @@ -297,7 +297,7 @@ type NotificationChannel struct { // the channel. This is a more convenient approach when the change is // temporary and you want to receive notifications from the same set // of alerting policies on the channel at some point in the future. - Enabled *wrappers.BoolValue `protobuf:"bytes,11,opt,name=enabled,proto3" json:"enabled,omitempty"` + Enabled *wrapperspb.BoolValue `protobuf:"bytes,11,opt,name=enabled,proto3" json:"enabled,omitempty"` } func (x *NotificationChannel) Reset() { @@ -381,7 +381,7 @@ func (x *NotificationChannel) GetVerificationStatus() NotificationChannel_Verifi return NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED } -func (x *NotificationChannel) GetEnabled() *wrappers.BoolValue { +func (x *NotificationChannel) GetEnabled() *wrapperspb.BoolValue { if x != nil { return x.Enabled } @@ -541,7 +541,7 @@ var file_google_monitoring_v3_notification_proto_goTypes = []interface{}{ (*label.LabelDescriptor)(nil), // 5: google.api.LabelDescriptor (ServiceTier)(0), // 6: google.monitoring.v3.ServiceTier (api.LaunchStage)(0), // 7: google.api.LaunchStage - (*wrappers.BoolValue)(nil), // 8: google.protobuf.BoolValue + (*wrapperspb.BoolValue)(nil), // 8: google.protobuf.BoolValue } var file_google_monitoring_v3_notification_proto_depIdxs = []int32{ 5, // 0: google.monitoring.v3.NotificationChannelDescriptor.labels:type_name -> google.api.LabelDescriptor diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go index 8f0a48cb1a3..b8fc414d070 100644 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/monitoring/v3/notification_service.proto package monitoring @@ -26,16 +26,16 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - empty "github.com/golang/protobuf/ptypes/empty" - _ "github.com/golang/protobuf/ptypes/struct" - timestamp "github.com/golang/protobuf/ptypes/timestamp" _ "google.golang.org/genproto/googleapis/api/annotations" - field_mask "google.golang.org/genproto/protobuf/field_mask" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + _ "google.golang.org/protobuf/types/known/structpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -532,7 +532,7 @@ type UpdateNotificationChannelRequest struct { unknownFields protoimpl.UnknownFields // The fields to update. - UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` // Required. A description of the changes to be applied to the specified // notification channel. The description must provide a definition for // fields to be updated; the names of these fields should also be @@ -572,7 +572,7 @@ func (*UpdateNotificationChannelRequest) Descriptor() ([]byte, []int) { return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{7} } -func (x *UpdateNotificationChannelRequest) GetUpdateMask() *field_mask.FieldMask { +func (x *UpdateNotificationChannelRequest) GetUpdateMask() *fieldmaskpb.FieldMask { if x != nil { return x.UpdateMask } @@ -717,7 +717,7 @@ type GetNotificationChannelVerificationCodeRequest struct { // permissible expiration (so specifying an expiration may extend the // code's lifetime over omitting an expiration, even though the API does // impose an upper limit on the maximum expiration that is permitted). - ExpireTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` + ExpireTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` } func (x *GetNotificationChannelVerificationCodeRequest) Reset() { @@ -759,7 +759,7 @@ func (x *GetNotificationChannelVerificationCodeRequest) GetName() string { return "" } -func (x *GetNotificationChannelVerificationCodeRequest) GetExpireTime() *timestamp.Timestamp { +func (x *GetNotificationChannelVerificationCodeRequest) GetExpireTime() *timestamppb.Timestamp { if x != nil { return x.ExpireTime } @@ -780,7 +780,7 @@ type GetNotificationChannelVerificationCodeResponse struct { // The expiration time associated with the code that was returned. If // an expiration was provided in the request, this is the minimum of the // requested expiration in the request and the max permitted expiration. - ExpireTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` + ExpireTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` } func (x *GetNotificationChannelVerificationCodeResponse) Reset() { @@ -822,7 +822,7 @@ func (x *GetNotificationChannelVerificationCodeResponse) GetCode() string { return "" } -func (x *GetNotificationChannelVerificationCodeResponse) GetExpireTime() *timestamp.Timestamp { +func (x *GetNotificationChannelVerificationCodeResponse) GetExpireTime() *timestamppb.Timestamp { if x != nil { return x.ExpireTime } @@ -1247,9 +1247,9 @@ var file_google_monitoring_v3_notification_service_proto_goTypes = []interface{} (*VerifyNotificationChannelRequest)(nil), // 12: google.monitoring.v3.VerifyNotificationChannelRequest (*NotificationChannelDescriptor)(nil), // 13: google.monitoring.v3.NotificationChannelDescriptor (*NotificationChannel)(nil), // 14: google.monitoring.v3.NotificationChannel - (*field_mask.FieldMask)(nil), // 15: google.protobuf.FieldMask - (*timestamp.Timestamp)(nil), // 16: google.protobuf.Timestamp - (*empty.Empty)(nil), // 17: google.protobuf.Empty + (*fieldmaskpb.FieldMask)(nil), // 15: google.protobuf.FieldMask + (*timestamppb.Timestamp)(nil), // 16: google.protobuf.Timestamp + (*emptypb.Empty)(nil), // 17: google.protobuf.Empty } var file_google_monitoring_v3_notification_service_proto_depIdxs = []int32{ 13, // 0: google.monitoring.v3.ListNotificationChannelDescriptorsResponse.channel_descriptors:type_name -> google.monitoring.v3.NotificationChannelDescriptor @@ -1503,10 +1503,10 @@ type NotificationChannelServiceClient interface { // remain unchanged. UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) // Deletes a notification channel. - DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Causes a verification code to be delivered to the channel. The code // can then be supplied in `VerifyNotificationChannel` to verify the channel. - SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) + SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Requests a verification code for an already verified channel that can then // be used in a call to VerifyNotificationChannel() on a different channel // with an equivalent identity in the same or in a different project. This @@ -1597,8 +1597,8 @@ func (c *notificationChannelServiceClient) UpdateNotificationChannel(ctx context return out, nil } -func (c *notificationChannelServiceClient) DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *notificationChannelServiceClient) DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel", in, out, opts...) if err != nil { return nil, err @@ -1606,8 +1606,8 @@ func (c *notificationChannelServiceClient) DeleteNotificationChannel(ctx context return out, nil } -func (c *notificationChannelServiceClient) SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *notificationChannelServiceClient) SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode", in, out, opts...) if err != nil { return nil, err @@ -1656,10 +1656,10 @@ type NotificationChannelServiceServer interface { // remain unchanged. UpdateNotificationChannel(context.Context, *UpdateNotificationChannelRequest) (*NotificationChannel, error) // Deletes a notification channel. - DeleteNotificationChannel(context.Context, *DeleteNotificationChannelRequest) (*empty.Empty, error) + DeleteNotificationChannel(context.Context, *DeleteNotificationChannelRequest) (*emptypb.Empty, error) // Causes a verification code to be delivered to the channel. The code // can then be supplied in `VerifyNotificationChannel` to verify the channel. - SendNotificationChannelVerificationCode(context.Context, *SendNotificationChannelVerificationCodeRequest) (*empty.Empty, error) + SendNotificationChannelVerificationCode(context.Context, *SendNotificationChannelVerificationCodeRequest) (*emptypb.Empty, error) // Requests a verification code for an already verified channel that can then // be used in a call to VerifyNotificationChannel() on a different channel // with an equivalent identity in the same or in a different project. This @@ -1710,10 +1710,10 @@ func (*UnimplementedNotificationChannelServiceServer) CreateNotificationChannel( func (*UnimplementedNotificationChannelServiceServer) UpdateNotificationChannel(context.Context, *UpdateNotificationChannelRequest) (*NotificationChannel, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateNotificationChannel not implemented") } -func (*UnimplementedNotificationChannelServiceServer) DeleteNotificationChannel(context.Context, *DeleteNotificationChannelRequest) (*empty.Empty, error) { +func (*UnimplementedNotificationChannelServiceServer) DeleteNotificationChannel(context.Context, *DeleteNotificationChannelRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteNotificationChannel not implemented") } -func (*UnimplementedNotificationChannelServiceServer) SendNotificationChannelVerificationCode(context.Context, *SendNotificationChannelVerificationCodeRequest) (*empty.Empty, error) { +func (*UnimplementedNotificationChannelServiceServer) SendNotificationChannelVerificationCode(context.Context, *SendNotificationChannelVerificationCodeRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method SendNotificationChannelVerificationCode not implemented") } func (*UnimplementedNotificationChannelServiceServer) GetNotificationChannelVerificationCode(context.Context, *GetNotificationChannelVerificationCodeRequest) (*GetNotificationChannelVerificationCodeResponse, error) { diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/service.pb.go index 09e9a24f389..d67808ff104 100644 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/service.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/service.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/monitoring/v3/service.proto package monitoring @@ -25,13 +25,13 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - duration "github.com/golang/protobuf/ptypes/duration" - _ "github.com/golang/protobuf/ptypes/timestamp" _ "google.golang.org/genproto/googleapis/api/annotations" _ "google.golang.org/genproto/googleapis/api/monitoredres" calendarperiod "google.golang.org/genproto/googleapis/type/calendarperiod" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + _ "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -370,7 +370,7 @@ func (m *ServiceLevelObjective) GetPeriod() isServiceLevelObjective_Period { return nil } -func (x *ServiceLevelObjective) GetRollingPeriod() *duration.Duration { +func (x *ServiceLevelObjective) GetRollingPeriod() *durationpb.Duration { if x, ok := x.GetPeriod().(*ServiceLevelObjective_RollingPeriod); ok { return x.RollingPeriod } @@ -391,7 +391,7 @@ type isServiceLevelObjective_Period interface { type ServiceLevelObjective_RollingPeriod struct { // A rolling time period, semantically "in the past ``". // Must be an integer multiple of 1 day no larger than 30 days. - RollingPeriod *duration.Duration `protobuf:"bytes,5,opt,name=rolling_period,json=rollingPeriod,proto3,oneof"` + RollingPeriod *durationpb.Duration `protobuf:"bytes,5,opt,name=rolling_period,json=rollingPeriod,proto3,oneof"` } type ServiceLevelObjective_CalendarPeriod struct { @@ -965,7 +965,7 @@ type WindowsBasedSli struct { WindowCriterion isWindowsBasedSli_WindowCriterion `protobuf_oneof:"window_criterion"` // Duration over which window quality is evaluated. Must be an integer // fraction of a day and at least `60s`. - WindowPeriod *duration.Duration `protobuf:"bytes,4,opt,name=window_period,json=windowPeriod,proto3" json:"window_period,omitempty"` + WindowPeriod *durationpb.Duration `protobuf:"bytes,4,opt,name=window_period,json=windowPeriod,proto3" json:"window_period,omitempty"` } func (x *WindowsBasedSli) Reset() { @@ -1035,7 +1035,7 @@ func (x *WindowsBasedSli) GetMetricSumInRange() *WindowsBasedSli_MetricRange { return nil } -func (x *WindowsBasedSli) GetWindowPeriod() *duration.Duration { +func (x *WindowsBasedSli) GetWindowPeriod() *durationpb.Duration { if x != nil { return x.WindowPeriod } @@ -1472,7 +1472,7 @@ type BasicSli_LatencyCriteria struct { // Good service is defined to be the count of requests made to this service // that return in no more than `threshold`. - Threshold *duration.Duration `protobuf:"bytes,3,opt,name=threshold,proto3" json:"threshold,omitempty"` + Threshold *durationpb.Duration `protobuf:"bytes,3,opt,name=threshold,proto3" json:"threshold,omitempty"` } func (x *BasicSli_LatencyCriteria) Reset() { @@ -1507,7 +1507,7 @@ func (*BasicSli_LatencyCriteria) Descriptor() ([]byte, []int) { return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{3, 1} } -func (x *BasicSli_LatencyCriteria) GetThreshold() *duration.Duration { +func (x *BasicSli_LatencyCriteria) GetThreshold() *durationpb.Duration { if x != nil { return x.Threshold } @@ -1983,7 +1983,7 @@ var file_google_monitoring_v3_service_proto_goTypes = []interface{}{ (*BasicSli_LatencyCriteria)(nil), // 17: google.monitoring.v3.BasicSli.LatencyCriteria (*WindowsBasedSli_PerformanceThreshold)(nil), // 18: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold (*WindowsBasedSli_MetricRange)(nil), // 19: google.monitoring.v3.WindowsBasedSli.MetricRange - (*duration.Duration)(nil), // 20: google.protobuf.Duration + (*durationpb.Duration)(nil), // 20: google.protobuf.Duration (calendarperiod.CalendarPeriod)(0), // 21: google.type.CalendarPeriod } var file_google_monitoring_v3_service_proto_depIdxs = []int32{ diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/service_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/service_service.pb.go index 8d12622263c..e05d0b40c0d 100644 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/service_service.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/service_service.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/monitoring/v3/service_service.proto package monitoring @@ -26,14 +26,14 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - empty "github.com/golang/protobuf/ptypes/empty" _ "google.golang.org/genproto/googleapis/api/annotations" - field_mask "google.golang.org/genproto/protobuf/field_mask" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" ) const ( @@ -335,7 +335,7 @@ type UpdateServiceRequest struct { // The given `name` specifies the resource to update. Service *Service `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` // A set of field paths defining which fields to use for the update. - UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` } func (x *UpdateServiceRequest) Reset() { @@ -377,7 +377,7 @@ func (x *UpdateServiceRequest) GetService() *Service { return nil } -func (x *UpdateServiceRequest) GetUpdateMask() *field_mask.FieldMask { +func (x *UpdateServiceRequest) GetUpdateMask() *fieldmaskpb.FieldMask { if x != nil { return x.UpdateMask } @@ -736,7 +736,7 @@ type UpdateServiceLevelObjectiveRequest struct { // The given `name` specifies the resource to update. ServiceLevelObjective *ServiceLevelObjective `protobuf:"bytes,1,opt,name=service_level_objective,json=serviceLevelObjective,proto3" json:"service_level_objective,omitempty"` // A set of field paths defining which fields to use for the update. - UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` } func (x *UpdateServiceLevelObjectiveRequest) Reset() { @@ -778,7 +778,7 @@ func (x *UpdateServiceLevelObjectiveRequest) GetServiceLevelObjective() *Service return nil } -func (x *UpdateServiceLevelObjectiveRequest) GetUpdateMask() *field_mask.FieldMask { +func (x *UpdateServiceLevelObjectiveRequest) GetUpdateMask() *fieldmaskpb.FieldMask { if x != nil { return x.UpdateMask } @@ -1153,10 +1153,10 @@ var file_google_monitoring_v3_service_service_proto_goTypes = []interface{}{ (*UpdateServiceLevelObjectiveRequest)(nil), // 10: google.monitoring.v3.UpdateServiceLevelObjectiveRequest (*DeleteServiceLevelObjectiveRequest)(nil), // 11: google.monitoring.v3.DeleteServiceLevelObjectiveRequest (*Service)(nil), // 12: google.monitoring.v3.Service - (*field_mask.FieldMask)(nil), // 13: google.protobuf.FieldMask + (*fieldmaskpb.FieldMask)(nil), // 13: google.protobuf.FieldMask (*ServiceLevelObjective)(nil), // 14: google.monitoring.v3.ServiceLevelObjective (ServiceLevelObjective_View)(0), // 15: google.monitoring.v3.ServiceLevelObjective.View - (*empty.Empty)(nil), // 16: google.protobuf.Empty + (*emptypb.Empty)(nil), // 16: google.protobuf.Empty } var file_google_monitoring_v3_service_service_proto_depIdxs = []int32{ 12, // 0: google.monitoring.v3.CreateServiceRequest.service:type_name -> google.monitoring.v3.Service @@ -1389,7 +1389,7 @@ type ServiceMonitoringServiceClient interface { // Update this `Service`. UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*Service, error) // Soft delete this `Service`. - DeleteService(ctx context.Context, in *DeleteServiceRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteService(ctx context.Context, in *DeleteServiceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Create a `ServiceLevelObjective` for the given `Service`. CreateServiceLevelObjective(ctx context.Context, in *CreateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) // Get a `ServiceLevelObjective` by name. @@ -1399,7 +1399,7 @@ type ServiceMonitoringServiceClient interface { // Update the given `ServiceLevelObjective`. UpdateServiceLevelObjective(ctx context.Context, in *UpdateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) // Delete the given `ServiceLevelObjective`. - DeleteServiceLevelObjective(ctx context.Context, in *DeleteServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteServiceLevelObjective(ctx context.Context, in *DeleteServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) } type serviceMonitoringServiceClient struct { @@ -1446,8 +1446,8 @@ func (c *serviceMonitoringServiceClient) UpdateService(ctx context.Context, in * return out, nil } -func (c *serviceMonitoringServiceClient) DeleteService(ctx context.Context, in *DeleteServiceRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *serviceMonitoringServiceClient) DeleteService(ctx context.Context, in *DeleteServiceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/DeleteService", in, out, opts...) if err != nil { return nil, err @@ -1491,8 +1491,8 @@ func (c *serviceMonitoringServiceClient) UpdateServiceLevelObjective(ctx context return out, nil } -func (c *serviceMonitoringServiceClient) DeleteServiceLevelObjective(ctx context.Context, in *DeleteServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *serviceMonitoringServiceClient) DeleteServiceLevelObjective(ctx context.Context, in *DeleteServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/DeleteServiceLevelObjective", in, out, opts...) if err != nil { return nil, err @@ -1511,7 +1511,7 @@ type ServiceMonitoringServiceServer interface { // Update this `Service`. UpdateService(context.Context, *UpdateServiceRequest) (*Service, error) // Soft delete this `Service`. - DeleteService(context.Context, *DeleteServiceRequest) (*empty.Empty, error) + DeleteService(context.Context, *DeleteServiceRequest) (*emptypb.Empty, error) // Create a `ServiceLevelObjective` for the given `Service`. CreateServiceLevelObjective(context.Context, *CreateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) // Get a `ServiceLevelObjective` by name. @@ -1521,7 +1521,7 @@ type ServiceMonitoringServiceServer interface { // Update the given `ServiceLevelObjective`. UpdateServiceLevelObjective(context.Context, *UpdateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) // Delete the given `ServiceLevelObjective`. - DeleteServiceLevelObjective(context.Context, *DeleteServiceLevelObjectiveRequest) (*empty.Empty, error) + DeleteServiceLevelObjective(context.Context, *DeleteServiceLevelObjectiveRequest) (*emptypb.Empty, error) } // UnimplementedServiceMonitoringServiceServer can be embedded to have forward compatible implementations. @@ -1540,7 +1540,7 @@ func (*UnimplementedServiceMonitoringServiceServer) ListServices(context.Context func (*UnimplementedServiceMonitoringServiceServer) UpdateService(context.Context, *UpdateServiceRequest) (*Service, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateService not implemented") } -func (*UnimplementedServiceMonitoringServiceServer) DeleteService(context.Context, *DeleteServiceRequest) (*empty.Empty, error) { +func (*UnimplementedServiceMonitoringServiceServer) DeleteService(context.Context, *DeleteServiceRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteService not implemented") } func (*UnimplementedServiceMonitoringServiceServer) CreateServiceLevelObjective(context.Context, *CreateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) { @@ -1555,7 +1555,7 @@ func (*UnimplementedServiceMonitoringServiceServer) ListServiceLevelObjectives(c func (*UnimplementedServiceMonitoringServiceServer) UpdateServiceLevelObjective(context.Context, *UpdateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateServiceLevelObjective not implemented") } -func (*UnimplementedServiceMonitoringServiceServer) DeleteServiceLevelObjective(context.Context, *DeleteServiceLevelObjectiveRequest) (*empty.Empty, error) { +func (*UnimplementedServiceMonitoringServiceServer) DeleteServiceLevelObjective(context.Context, *DeleteServiceLevelObjectiveRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteServiceLevelObjective not implemented") } diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/span_context.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/span_context.pb.go index 52b119b4424..0b3c3c30203 100644 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/span_context.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/span_context.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/monitoring/v3/span_context.proto package monitoring diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go index 9101103365e..d09c926e0b8 100644 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/monitoring/v3/uptime.proto package monitoring @@ -25,11 +25,11 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - duration "github.com/golang/protobuf/ptypes/duration" _ "google.golang.org/genproto/googleapis/api/annotations" monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" ) const ( @@ -546,10 +546,10 @@ type UptimeCheckConfig struct { // Currently, the only supported values are `60s` (1 minute), `300s` // (5 minutes), `600s` (10 minutes), and `900s` (15 minutes). Optional, // defaults to `60s`. - Period *duration.Duration `protobuf:"bytes,7,opt,name=period,proto3" json:"period,omitempty"` + Period *durationpb.Duration `protobuf:"bytes,7,opt,name=period,proto3" json:"period,omitempty"` // The maximum amount of time to wait for the request to complete (must be // between 1 and 60 seconds). Required. - Timeout *duration.Duration `protobuf:"bytes,8,opt,name=timeout,proto3" json:"timeout,omitempty"` + Timeout *durationpb.Duration `protobuf:"bytes,8,opt,name=timeout,proto3" json:"timeout,omitempty"` // The content that is expected to appear in the data returned by the target // server against which the check is run. Currently, only the first entry // in the `content_matchers` list is supported, and additional entries will @@ -666,14 +666,14 @@ func (x *UptimeCheckConfig) GetTcpCheck() *UptimeCheckConfig_TcpCheck { return nil } -func (x *UptimeCheckConfig) GetPeriod() *duration.Duration { +func (x *UptimeCheckConfig) GetPeriod() *durationpb.Duration { if x != nil { return x.Period } return nil } -func (x *UptimeCheckConfig) GetTimeout() *duration.Duration { +func (x *UptimeCheckConfig) GetTimeout() *durationpb.Duration { if x != nil { return x.Timeout } @@ -1469,7 +1469,7 @@ var file_google_monitoring_v3_uptime_proto_goTypes = []interface{}{ (*UptimeCheckConfig_HttpCheck_BasicAuthentication)(nil), // 13: google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication nil, // 14: google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry (*monitoredres.MonitoredResource)(nil), // 15: google.api.MonitoredResource - (*duration.Duration)(nil), // 16: google.protobuf.Duration + (*durationpb.Duration)(nil), // 16: google.protobuf.Duration } var file_google_monitoring_v3_uptime_proto_depIdxs = []int32{ 2, // 0: google.monitoring.v3.InternalChecker.state:type_name -> google.monitoring.v3.InternalChecker.State diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go index eac384ea018..a828a127e30 100644 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/monitoring/v3/uptime_service.proto package monitoring @@ -26,15 +26,15 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - _ "github.com/golang/protobuf/ptypes/duration" - empty "github.com/golang/protobuf/ptypes/empty" _ "google.golang.org/genproto/googleapis/api/annotations" - field_mask "google.golang.org/genproto/protobuf/field_mask" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + _ "google.golang.org/protobuf/types/known/durationpb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" ) const ( @@ -315,7 +315,7 @@ type UpdateUptimeCheckConfigRequest struct { // configuration are updated with values from the new configuration. If this // field is empty, then the current configuration is completely replaced with // the new configuration. - UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` // Required. If an `updateMask` has been specified, this field gives // the values for the set of fields mentioned in the `updateMask`. If an // `updateMask` has not been given, this Uptime check configuration replaces @@ -362,7 +362,7 @@ func (*UpdateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{4} } -func (x *UpdateUptimeCheckConfigRequest) GetUpdateMask() *field_mask.FieldMask { +func (x *UpdateUptimeCheckConfigRequest) GetUpdateMask() *fieldmaskpb.FieldMask { if x != nil { return x.UpdateMask } @@ -777,9 +777,9 @@ var file_google_monitoring_v3_uptime_service_proto_goTypes = []interface{}{ (*ListUptimeCheckIpsRequest)(nil), // 6: google.monitoring.v3.ListUptimeCheckIpsRequest (*ListUptimeCheckIpsResponse)(nil), // 7: google.monitoring.v3.ListUptimeCheckIpsResponse (*UptimeCheckConfig)(nil), // 8: google.monitoring.v3.UptimeCheckConfig - (*field_mask.FieldMask)(nil), // 9: google.protobuf.FieldMask + (*fieldmaskpb.FieldMask)(nil), // 9: google.protobuf.FieldMask (*UptimeCheckIp)(nil), // 10: google.monitoring.v3.UptimeCheckIp - (*empty.Empty)(nil), // 11: google.protobuf.Empty + (*emptypb.Empty)(nil), // 11: google.protobuf.Empty } var file_google_monitoring_v3_uptime_service_proto_depIdxs = []int32{ 8, // 0: google.monitoring.v3.ListUptimeCheckConfigsResponse.uptime_check_configs:type_name -> google.monitoring.v3.UptimeCheckConfig @@ -957,7 +957,7 @@ type UptimeCheckServiceClient interface { // Deletes an Uptime check configuration. Note that this method will fail // if the Uptime check configuration is referenced by an alert policy or // other dependent configs that would be rendered invalid by the deletion. - DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Returns the list of IP addresses that checkers run from ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error) } @@ -1006,8 +1006,8 @@ func (c *uptimeCheckServiceClient) UpdateUptimeCheckConfig(ctx context.Context, return out, nil } -func (c *uptimeCheckServiceClient) DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *uptimeCheckServiceClient) DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", in, out, opts...) if err != nil { return nil, err @@ -1041,7 +1041,7 @@ type UptimeCheckServiceServer interface { // Deletes an Uptime check configuration. Note that this method will fail // if the Uptime check configuration is referenced by an alert policy or // other dependent configs that would be rendered invalid by the deletion. - DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*empty.Empty, error) + DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*emptypb.Empty, error) // Returns the list of IP addresses that checkers run from ListUptimeCheckIps(context.Context, *ListUptimeCheckIpsRequest) (*ListUptimeCheckIpsResponse, error) } @@ -1062,7 +1062,7 @@ func (*UnimplementedUptimeCheckServiceServer) CreateUptimeCheckConfig(context.Co func (*UnimplementedUptimeCheckServiceServer) UpdateUptimeCheckConfig(context.Context, *UpdateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateUptimeCheckConfig not implemented") } -func (*UnimplementedUptimeCheckServiceServer) DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*empty.Empty, error) { +func (*UnimplementedUptimeCheckServiceServer) DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteUptimeCheckConfig not implemented") } func (*UnimplementedUptimeCheckServiceServer) ListUptimeCheckIps(context.Context, *ListUptimeCheckIpsRequest) (*ListUptimeCheckIpsResponse, error) { diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go index 2bf62796a15..386fd7b13c4 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/rpc/code.proto package code diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index 063d724cb56..e79a5388465 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/rpc/status.proto package status @@ -25,9 +25,9 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - any "github.com/golang/protobuf/ptypes/any" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" ) const ( @@ -61,7 +61,7 @@ type Status struct { Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // A list of messages that carry the error details. There is a common set of // message types for APIs to use. - Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` + Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` } func (x *Status) Reset() { @@ -110,7 +110,7 @@ func (x *Status) GetMessage() string { return "" } -func (x *Status) GetDetails() []*any.Any { +func (x *Status) GetDetails() []*anypb.Any { if x != nil { return x.Details } @@ -154,8 +154,8 @@ func file_google_rpc_status_proto_rawDescGZIP() []byte { var file_google_rpc_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_google_rpc_status_proto_goTypes = []interface{}{ - (*Status)(nil), // 0: google.rpc.Status - (*any.Any)(nil), // 1: google.protobuf.Any + (*Status)(nil), // 0: google.rpc.Status + (*anypb.Any)(nil), // 1: google.protobuf.Any } var file_google_rpc_status_proto_depIdxs = []int32{ 1, // 0: google.rpc.Status.details:type_name -> google.protobuf.Any diff --git a/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go b/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go index 7af75cb2073..c21bc43fd89 100644 --- a/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go @@ -15,8 +15,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/type/calendar_period.proto package calendarperiod diff --git a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go index 6f0566eda6a..1cc50df7366 100644 --- a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go @@ -15,8 +15,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/type/expr.proto package expr diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml index a11e8cbca66..0e24e59f056 100644 --- a/vendor/google.golang.org/grpc/.travis.yml +++ b/vendor/google.golang.org/grpc/.travis.yml @@ -2,19 +2,19 @@ language: go matrix: include: - - go: 1.13.x + - go: 1.14.x env: VET=1 GO111MODULE=on - - go: 1.13.x + - go: 1.14.x env: RACE=1 GO111MODULE=on - - go: 1.13.x + - go: 1.14.x env: RUN386=1 - - go: 1.13.x + - go: 1.14.x env: GRPC_GO_RETRY=on - - go: 1.13.x + - go: 1.14.x env: TESTEXTRAS=1 - - go: 1.12.x + - go: 1.13.x env: GO111MODULE=on - - go: 1.11.x + - go: 1.12.x env: GO111MODULE=on - go: 1.9.x env: GAE=1 diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index 800e7bd4c90..fef78e4a1df 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -1,64 +1,53 @@ # gRPC-Go [![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) -[![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) +[![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)][API] [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) -The Go implementation of [gRPC](https://grpc.io/): A high performance, open -source, general RPC framework that puts mobile and HTTP/2 first. For more -information see the [gRPC Quick Start: -Go](https://grpc.io/docs/quickstart/go.html) guide. +The [Go][] implementation of [gRPC][]: A high performance, open source, general +RPC framework that puts mobile and HTTP/2 first. For more information see the +[Go gRPC docs][], or jump directly into the [quick start][]. -Installation ------------- +## Prerequisites -To install this package, you need to install Go and setup your Go workspace on -your computer. The simplest way to install the library is to run: +- **[Go][]**: any one of the **three latest major** [releases][go-releases]. -``` -$ go get -u google.golang.org/grpc +## Installation + +With [Go module][] support (Go 1.11+), simply add the following import + +```go +import "google.golang.org/grpc" ``` -With Go module support (Go 1.11+), simply `import "google.golang.org/grpc"` in -your source code and `go [build|run|test]` will automatically download the -necessary dependencies ([Go modules -ref](https://github.com/golang/go/wiki/Modules)). +to your code, and then `go [build|run|test]` will automatically fetch the +necessary dependencies. -If you are trying to access grpc-go from within China, please see the -[FAQ](#FAQ) below. +Otherwise, to install the `grpc-go` package, run the following command: -Prerequisites -------------- -gRPC-Go requires Go 1.9 or later. +```console +$ go get -u google.golang.org/grpc +``` -Documentation -------------- -- See [godoc](https://godoc.org/google.golang.org/grpc) for package and API - descriptions. -- Documentation on specific topics can be found in the [Documentation - directory](Documentation/). -- Examples can be found in the [examples directory](examples/). +> **Note:** If you are trying to access `grpc-go` from **China**, see the +> [FAQ](#FAQ) below. -Performance ------------ -Performance benchmark data for grpc-go and other languages is maintained in -[this -dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696). +## Learn more -Status ------- -General Availability [Google Cloud Platform Launch -Stages](https://cloud.google.com/terms/launch-stages). +- [Go gRPC docs][], which include a [quick start][] and [API + reference][API] among other resources +- [Low-level technical docs](Documentation) from this repository +- [Performance benchmark][] +- [Examples](examples) -FAQ ---- +## FAQ -#### I/O Timeout Errors +### I/O Timeout Errors -The `golang.org` domain may be blocked from some countries. `go get` usually +The `golang.org` domain may be blocked from some countries. `go get` usually produces an error like the following when this happens: -``` +```console $ go get -u google.golang.org/grpc package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout) ``` @@ -69,7 +58,7 @@ To build Go code, there are several options: - Without Go module support: `git clone` the repo manually: - ``` + ```sh git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc ``` @@ -79,7 +68,7 @@ To build Go code, there are several options: - With Go module support: it is possible to use the `replace` feature of `go mod` to create aliases for golang.org packages. In your project's directory: - ``` + ```sh go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest go mod tidy go mod vendor @@ -87,19 +76,17 @@ To build Go code, there are several options: ``` Again, this will need to be done for all transitive dependencies hosted on - golang.org as well. Please refer to [this - issue](https://github.com/golang/go/issues/28652) in the golang repo regarding - this concern. + golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652). -#### Compiling error, undefined: grpc.SupportPackageIsVersion +### Compiling error, undefined: grpc.SupportPackageIsVersion -##### If you are using Go modules: +#### If you are using Go modules: -Please ensure your gRPC-Go version is `require`d at the appropriate version in +Ensure your gRPC-Go version is `require`d at the appropriate version in the same module containing the generated `.pb.go` files. For example, `SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: -``` +```go module require ( @@ -107,23 +94,27 @@ require ( ) ``` -##### If you are *not* using Go modules: +#### If you are *not* using Go modules: -Please update proto package, gRPC package and rebuild the proto files: - - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}` - - `go get -u google.golang.org/grpc` - - `protoc --go_out=plugins=grpc:. *.proto` +Update the `proto` package, gRPC package, and rebuild the `.proto` files: -#### How to turn on logging +```sh +go get -u github.com/golang/protobuf/{proto,protoc-gen-go} +go get -u google.golang.org/grpc +protoc --go_out=plugins=grpc:. *.proto +``` -The default logger is controlled by the environment variables. Turn everything -on by setting: +### How to turn on logging -``` -GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info +The default logger is controlled by environment variables. Turn everything on +like this: + +```console +$ export GRPC_GO_LOG_VERBOSITY_LEVEL=99 +$ export GRPC_GO_LOG_SEVERITY_LEVEL=info ``` -#### The RPC failed with error `"code = Unavailable desc = transport is closing"` +### The RPC failed with error `"code = Unavailable desc = transport is closing"` This error means the connection the RPC is using was closed, and there are many possible reasons, including: @@ -139,3 +130,12 @@ It can be tricky to debug this because the error happens on the client side but the root cause of the connection being closed is on the server side. Turn on logging on __both client and server__, and see if there are any transport errors. + +[API]: https://grpc.io/docs/languages/go/api +[Go]: https://golang.org +[Go module]: https://github.com/golang/go/wiki/Modules +[gRPC]: https://grpc.io +[Go gRPC docs]: https://grpc.io/docs/languages/go +[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696 +[quick start]: https://grpc.io/docs/languages/go/quickstart +[go-releases]: https://golang.org/doc/devel/release.html diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 68ffc620137..ee5c51e6cdb 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -50,6 +50,9 @@ func New(kvs ...interface{}) *Attributes { // times, the last value overwrites all previous values for that key. To // remove an existing key, use a nil value. func (a *Attributes) WithValues(kvs ...interface{}) *Attributes { + if a == nil { + return New(kvs...) + } if len(kvs)%2 != 0 { panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) } @@ -66,5 +69,8 @@ func (a *Attributes) WithValues(kvs ...interface{}) *Attributes { // Value returns the value associated with these attributes for key, or nil if // no value is associated with key. func (a *Attributes) Value(key interface{}) interface{} { + if a == nil { + return nil + } return a.m[key] } diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go deleted file mode 100644 index a8eb0f47609..00000000000 --- a/vendor/google.golang.org/grpc/balancer.go +++ /dev/null @@ -1,391 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "net" - "sync" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/naming" - "google.golang.org/grpc/status" -) - -// Address represents a server the client connects to. -// -// Deprecated: please use package balancer. -type Address struct { - // Addr is the server address on which a connection will be established. - Addr string - // Metadata is the information associated with Addr, which may be used - // to make load balancing decision. - Metadata interface{} -} - -// BalancerConfig specifies the configurations for Balancer. -// -// Deprecated: please use package balancer. May be removed in a future 1.x release. -type BalancerConfig struct { - // DialCreds is the transport credential the Balancer implementation can - // use to dial to a remote load balancer server. The Balancer implementations - // can ignore this if it does not need to talk to another party securely. - DialCreds credentials.TransportCredentials - // Dialer is the custom dialer the Balancer implementation can use to dial - // to a remote load balancer server. The Balancer implementations - // can ignore this if it doesn't need to talk to remote balancer. - Dialer func(context.Context, string) (net.Conn, error) -} - -// BalancerGetOptions configures a Get call. -// -// Deprecated: please use package balancer. May be removed in a future 1.x release. -type BalancerGetOptions struct { - // BlockingWait specifies whether Get should block when there is no - // connected address. - BlockingWait bool -} - -// Balancer chooses network addresses for RPCs. -// -// Deprecated: please use package balancer. May be removed in a future 1.x release. -type Balancer interface { - // Start does the initialization work to bootstrap a Balancer. For example, - // this function may start the name resolution and watch the updates. It will - // be called when dialing. - Start(target string, config BalancerConfig) error - // Up informs the Balancer that gRPC has a connection to the server at - // addr. It returns down which is called once the connection to addr gets - // lost or closed. - // TODO: It is not clear how to construct and take advantage of the meaningful error - // parameter for down. Need realistic demands to guide. - Up(addr Address) (down func(error)) - // Get gets the address of a server for the RPC corresponding to ctx. - // i) If it returns a connected address, gRPC internals issues the RPC on the - // connection to this address; - // ii) If it returns an address on which the connection is under construction - // (initiated by Notify(...)) but not connected, gRPC internals - // * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or - // Shutdown state; - // or - // * issues RPC on the connection otherwise. - // iii) If it returns an address on which the connection does not exist, gRPC - // internals treats it as an error and will fail the corresponding RPC. - // - // Therefore, the following is the recommended rule when writing a custom Balancer. - // If opts.BlockingWait is true, it should return a connected address or - // block if there is no connected address. It should respect the timeout or - // cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast - // RPCs), it should return an address it has notified via Notify(...) immediately - // instead of blocking. - // - // The function returns put which is called once the rpc has completed or failed. - // put can collect and report RPC stats to a remote load balancer. - // - // This function should only return the errors Balancer cannot recover by itself. - // gRPC internals will fail the RPC if an error is returned. - Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) - // Notify returns a channel that is used by gRPC internals to watch the addresses - // gRPC needs to connect. The addresses might be from a name resolver or remote - // load balancer. gRPC internals will compare it with the existing connected - // addresses. If the address Balancer notified is not in the existing connected - // addresses, gRPC starts to connect the address. If an address in the existing - // connected addresses is not in the notification list, the corresponding connection - // is shutdown gracefully. Otherwise, there are no operations to take. Note that - // the Address slice must be the full list of the Addresses which should be connected. - // It is NOT delta. - Notify() <-chan []Address - // Close shuts down the balancer. - Close() error -} - -// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch -// the name resolution updates and updates the addresses available correspondingly. -// -// Deprecated: please use package balancer/roundrobin. May be removed in a future 1.x release. -func RoundRobin(r naming.Resolver) Balancer { - return &roundRobin{r: r} -} - -type addrInfo struct { - addr Address - connected bool -} - -type roundRobin struct { - r naming.Resolver - w naming.Watcher - addrs []*addrInfo // all the addresses the client should potentially connect - mu sync.Mutex - addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to. - next int // index of the next address to return for Get() - waitCh chan struct{} // the channel to block when there is no connected address available - done bool // The Balancer is closed. -} - -func (rr *roundRobin) watchAddrUpdates() error { - updates, err := rr.w.Next() - if err != nil { - grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err) - return err - } - rr.mu.Lock() - defer rr.mu.Unlock() - for _, update := range updates { - addr := Address{ - Addr: update.Addr, - Metadata: update.Metadata, - } - switch update.Op { - case naming.Add: - var exist bool - for _, v := range rr.addrs { - if addr == v.addr { - exist = true - grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr) - break - } - } - if exist { - continue - } - rr.addrs = append(rr.addrs, &addrInfo{addr: addr}) - case naming.Delete: - for i, v := range rr.addrs { - if addr == v.addr { - copy(rr.addrs[i:], rr.addrs[i+1:]) - rr.addrs = rr.addrs[:len(rr.addrs)-1] - break - } - } - default: - grpclog.Errorln("Unknown update.Op ", update.Op) - } - } - // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. - open := make([]Address, len(rr.addrs)) - for i, v := range rr.addrs { - open[i] = v.addr - } - if rr.done { - return ErrClientConnClosing - } - select { - case <-rr.addrCh: - default: - } - rr.addrCh <- open - return nil -} - -func (rr *roundRobin) Start(target string, config BalancerConfig) error { - rr.mu.Lock() - defer rr.mu.Unlock() - if rr.done { - return ErrClientConnClosing - } - if rr.r == nil { - // If there is no name resolver installed, it is not needed to - // do name resolution. In this case, target is added into rr.addrs - // as the only address available and rr.addrCh stays nil. - rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}}) - return nil - } - w, err := rr.r.Resolve(target) - if err != nil { - return err - } - rr.w = w - rr.addrCh = make(chan []Address, 1) - go func() { - for { - if err := rr.watchAddrUpdates(); err != nil { - return - } - } - }() - return nil -} - -// Up sets the connected state of addr and sends notification if there are pending -// Get() calls. -func (rr *roundRobin) Up(addr Address) func(error) { - rr.mu.Lock() - defer rr.mu.Unlock() - var cnt int - for _, a := range rr.addrs { - if a.addr == addr { - if a.connected { - return nil - } - a.connected = true - } - if a.connected { - cnt++ - } - } - // addr is only one which is connected. Notify the Get() callers who are blocking. - if cnt == 1 && rr.waitCh != nil { - close(rr.waitCh) - rr.waitCh = nil - } - return func(err error) { - rr.down(addr, err) - } -} - -// down unsets the connected state of addr. -func (rr *roundRobin) down(addr Address, err error) { - rr.mu.Lock() - defer rr.mu.Unlock() - for _, a := range rr.addrs { - if addr == a.addr { - a.connected = false - break - } - } -} - -// Get returns the next addr in the rotation. -func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { - var ch chan struct{} - rr.mu.Lock() - if rr.done { - rr.mu.Unlock() - err = ErrClientConnClosing - return - } - - if len(rr.addrs) > 0 { - if rr.next >= len(rr.addrs) { - rr.next = 0 - } - next := rr.next - for { - a := rr.addrs[next] - next = (next + 1) % len(rr.addrs) - if a.connected { - addr = a.addr - rr.next = next - rr.mu.Unlock() - return - } - if next == rr.next { - // Has iterated all the possible address but none is connected. - break - } - } - } - if !opts.BlockingWait { - if len(rr.addrs) == 0 { - rr.mu.Unlock() - err = status.Errorf(codes.Unavailable, "there is no address available") - return - } - // Returns the next addr on rr.addrs for failfast RPCs. - addr = rr.addrs[rr.next].addr - rr.next++ - rr.mu.Unlock() - return - } - // Wait on rr.waitCh for non-failfast RPCs. - if rr.waitCh == nil { - ch = make(chan struct{}) - rr.waitCh = ch - } else { - ch = rr.waitCh - } - rr.mu.Unlock() - for { - select { - case <-ctx.Done(): - err = ctx.Err() - return - case <-ch: - rr.mu.Lock() - if rr.done { - rr.mu.Unlock() - err = ErrClientConnClosing - return - } - - if len(rr.addrs) > 0 { - if rr.next >= len(rr.addrs) { - rr.next = 0 - } - next := rr.next - for { - a := rr.addrs[next] - next = (next + 1) % len(rr.addrs) - if a.connected { - addr = a.addr - rr.next = next - rr.mu.Unlock() - return - } - if next == rr.next { - // Has iterated all the possible address but none is connected. - break - } - } - } - // The newly added addr got removed by Down() again. - if rr.waitCh == nil { - ch = make(chan struct{}) - rr.waitCh = ch - } else { - ch = rr.waitCh - } - rr.mu.Unlock() - } - } -} - -func (rr *roundRobin) Notify() <-chan []Address { - return rr.addrCh -} - -func (rr *roundRobin) Close() error { - rr.mu.Lock() - defer rr.mu.Unlock() - if rr.done { - return errBalancerClosed - } - rr.done = true - if rr.w != nil { - rr.w.Close() - } - if rr.waitCh != nil { - close(rr.waitCh) - rr.waitCh = nil - } - if rr.addrCh != nil { - close(rr.addrCh) - } - return nil -} - -// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn. -// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get() -// returns the only address Up by resetTransport(). -type pickFirst struct { - *roundRobin -} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 9258858ed75..8bf359dbfda 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -111,6 +111,9 @@ type NewSubConnOptions struct { // CredsBundle is the credentials bundle that will be used in the created // SubConn. If it's nil, the original creds from grpc DialOptions will be // used. + // + // Deprecated: Use the Attributes field in resolver.Address to pass + // arbitrary data to the credential handshaker. CredsBundle credentials.Bundle // HealthCheckEnabled indicates whether health check service should be // enabled on this SubConn @@ -123,7 +126,7 @@ type State struct { // determine the state of the ClientConn. ConnectivityState connectivity.State // Picker is used to choose connections (SubConns) for RPCs. - Picker V2Picker + Picker Picker } // ClientConn represents a gRPC ClientConn. @@ -141,20 +144,11 @@ type ClientConn interface { // The SubConn will be shutdown. RemoveSubConn(SubConn) - // UpdateBalancerState is called by balancer to notify gRPC that some internal - // state in balancer has changed. - // - // gRPC will update the connectivity state of the ClientConn, and will call pick - // on the new picker to pick new SubConn. - // - // Deprecated: use UpdateState instead - UpdateBalancerState(s connectivity.State, p Picker) - // UpdateState notifies gRPC that the balancer's internal state has // changed. // - // gRPC will update the connectivity state of the ClientConn, and will call pick - // on the new picker to pick new SubConns. + // gRPC will update the connectivity state of the ClientConn, and will call + // Pick on the new Picker to pick new SubConns. UpdateState(State) // ResolveNow is called by balancer to notify gRPC to do a name resolving. @@ -232,55 +226,16 @@ type DoneInfo struct { var ( // ErrNoSubConnAvailable indicates no SubConn is available for pick(). - // gRPC will block the RPC until a new picker is available via UpdateBalancerState(). + // gRPC will block the RPC until a new picker is available via UpdateState(). ErrNoSubConnAvailable = errors.New("no SubConn is available") // ErrTransientFailure indicates all SubConns are in TransientFailure. // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. - ErrTransientFailure = TransientFailureError(errors.New("all SubConns are in TransientFailure")) -) - -// Picker is used by gRPC to pick a SubConn to send an RPC. -// Balancer is expected to generate a new picker from its snapshot every time its -// internal state has changed. -// -// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). -// -// Deprecated: use V2Picker instead -type Picker interface { - // Pick returns the SubConn to be used to send the RPC. - // The returned SubConn must be one returned by NewSubConn(). - // - // This functions is expected to return: - // - a SubConn that is known to be READY; - // - ErrNoSubConnAvailable if no SubConn is available, but progress is being - // made (for example, some SubConn is in CONNECTING mode); - // - other errors if no active connecting is happening (for example, all SubConn - // are in TRANSIENT_FAILURE mode). - // - // If a SubConn is returned: - // - If it is READY, gRPC will send the RPC on it; - // - If it is not ready, or becomes not ready after it's returned, gRPC will - // block until UpdateBalancerState() is called and will call pick on the - // new picker. The done function returned from Pick(), if not nil, will be - // called with nil error, no bytes sent and no bytes received. // - // If the returned error is not nil: - // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState() - // - If the error is ErrTransientFailure or implements IsTransientFailure() - // bool, returning true: - // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState() - // is called to pick again; - // - Otherwise, RPC will fail with unavailable error. - // - Else (error is other non-nil error): - // - The RPC will fail with the error's status code, or Unknown if it is - // not a status error. - // - // The returned done() function will be called once the rpc has finished, - // with the final status of that RPC. If the SubConn returned is not a - // valid SubConn type, done may not be called. done may be nil if balancer - // doesn't care about the RPC status. - Pick(ctx context.Context, info PickInfo) (conn SubConn, done func(DoneInfo), err error) -} + // Deprecated: return an appropriate error based on the last resolution or + // connection attempt instead. The behavior is the same for any non-gRPC + // status error. + ErrTransientFailure = errors.New("all SubConns are in TransientFailure") +) // PickResult contains information related to a connection chosen for an RPC. type PickResult struct { @@ -297,24 +252,19 @@ type PickResult struct { Done func(DoneInfo) } -type transientFailureError struct { - error -} - -func (e *transientFailureError) IsTransientFailure() bool { return true } - -// TransientFailureError wraps err in an error implementing -// IsTransientFailure() bool, returning true. -func TransientFailureError(err error) error { - return &transientFailureError{error: err} -} +// TransientFailureError returns e. It exists for backward compatibility and +// will be deleted soon. +// +// Deprecated: no longer necessary, picker errors are treated this way by +// default. +func TransientFailureError(e error) error { return e } -// V2Picker is used by gRPC to pick a SubConn to send an RPC. +// Picker is used by gRPC to pick a SubConn to send an RPC. // Balancer is expected to generate a new picker from its snapshot every time its // internal state has changed. // -// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). -type V2Picker interface { +// The pickers used by gRPC can be updated by ClientConn.UpdateState(). +type Picker interface { // Pick returns the connection to use for this RPC and related information. // // Pick should not block. If the balancer needs to do I/O or any blocking @@ -327,14 +277,13 @@ type V2Picker interface { // - If the error is ErrNoSubConnAvailable, gRPC will block until a new // Picker is provided by the balancer (using ClientConn.UpdateState). // - // - If the error implements IsTransientFailure() bool, returning true, - // wait for ready RPCs will wait, but non-wait for ready RPCs will be - // terminated with this error's Error() string and status code - // Unavailable. + // - If the error is a status error (implemented by the grpc/status + // package), gRPC will terminate the RPC with the code and message + // provided. // - // - Any other errors terminate all RPCs with the code and message - // provided. If the error is not a status error, it will be converted by - // gRPC to a status error with code Unknown. + // - For all other errors, wait for ready RPCs will wait, but non-wait for + // ready RPCs will be terminated with this error's Error() string and + // status code Unavailable. Pick(info PickInfo) (PickResult, error) } @@ -343,29 +292,21 @@ type V2Picker interface { // // It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. // -// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed -// to be called synchronously from the same goroutine. -// There's no guarantee on picker.Pick, it may be called anytime. +// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are +// guaranteed to be called synchronously from the same goroutine. There's no +// guarantee on picker.Pick, it may be called anytime. type Balancer interface { - // HandleSubConnStateChange is called by gRPC when the connectivity state - // of sc has changed. - // Balancer is expected to aggregate all the state of SubConn and report - // that back to gRPC. - // Balancer should also generate and update Pickers when its internal state has - // been changed by the new state. - // - // Deprecated: if V2Balancer is implemented by the Balancer, - // UpdateSubConnState will be called instead. - HandleSubConnStateChange(sc SubConn, state connectivity.State) - // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to - // balancers. - // Balancer can create new SubConn or remove SubConn with the addresses. - // An empty address slice and a non-nil error will be passed if the resolver returns - // non-nil error to gRPC. - // - // Deprecated: if V2Balancer is implemented by the Balancer, - // UpdateClientConnState will be called instead. - HandleResolvedAddrs([]resolver.Address, error) + // UpdateClientConnState is called by gRPC when the state of the ClientConn + // changes. If the error returned is ErrBadResolverState, the ClientConn + // will begin calling ResolveNow on the active name resolver with + // exponential backoff until a subsequent call to UpdateClientConnState + // returns a nil error. Any other errors are currently ignored. + UpdateClientConnState(ClientConnState) error + // ResolverError is called by gRPC when the name resolver reports an error. + ResolverError(error) + // UpdateSubConnState is called by gRPC when the state of a SubConn + // changes. + UpdateSubConnState(SubConn, SubConnState) // Close closes the balancer. The balancer is not required to call // ClientConn.RemoveSubConn for its existing SubConns. Close() @@ -393,27 +334,6 @@ type ClientConnState struct { // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") -// V2Balancer is defined for documentation purposes. If a Balancer also -// implements V2Balancer, its UpdateClientConnState method will be called -// instead of HandleResolvedAddrs and its UpdateSubConnState will be called -// instead of HandleSubConnStateChange. -type V2Balancer interface { - // UpdateClientConnState is called by gRPC when the state of the ClientConn - // changes. If the error returned is ErrBadResolverState, the ClientConn - // will begin calling ResolveNow on the active name resolver with - // exponential backoff until a subsequent call to UpdateClientConnState - // returns a nil error. Any other errors are currently ignored. - UpdateClientConnState(ClientConnState) error - // ResolverError is called by gRPC when the name resolver reports an error. - ResolverError(error) - // UpdateSubConnState is called by gRPC when the state of a SubConn - // changes. - UpdateSubConnState(SubConn, SubConnState) - // Close closes the balancer. The balancer is not required to call - // ClientConn.RemoveSubConn for its existing SubConns. - Close() -} - // ConnectivityStateEvaluator takes the connectivity states of multiple SubConns // and returns one aggregated connectivity state. // diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index 80559b80ace..32d782f1cf5 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -19,7 +19,6 @@ package base import ( - "context" "errors" "fmt" @@ -29,18 +28,18 @@ import ( "google.golang.org/grpc/resolver" ) +var logger = grpclog.Component("balancer") + type baseBuilder struct { - name string - pickerBuilder PickerBuilder - v2PickerBuilder V2PickerBuilder - config Config + name string + pickerBuilder PickerBuilder + config Config } func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { bal := &baseBalancer{ - cc: cc, - pickerBuilder: bb.pickerBuilder, - v2PickerBuilder: bb.v2PickerBuilder, + cc: cc, + pickerBuilder: bb.pickerBuilder, subConns: make(map[resolver.Address]balancer.SubConn), scStates: make(map[balancer.SubConn]connectivity.State), @@ -50,11 +49,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) // Initialize picker to a picker that always returns // ErrNoSubConnAvailable, because when state of a SubConn changes, we // may call UpdateState with this picker. - if bb.pickerBuilder != nil { - bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable) - } else { - bal.v2Picker = NewErrPickerV2(balancer.ErrNoSubConnAvailable) - } + bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable) return bal } @@ -62,12 +57,9 @@ func (bb *baseBuilder) Name() string { return bb.name } -var _ balancer.V2Balancer = (*baseBalancer)(nil) // Assert that we implement V2Balancer - type baseBalancer struct { - cc balancer.ClientConn - pickerBuilder PickerBuilder - v2PickerBuilder V2PickerBuilder + cc balancer.ClientConn + pickerBuilder PickerBuilder csEvltr *balancer.ConnectivityStateEvaluator state connectivity.State @@ -75,43 +67,34 @@ type baseBalancer struct { subConns map[resolver.Address]balancer.SubConn scStates map[balancer.SubConn]connectivity.State picker balancer.Picker - v2Picker balancer.V2Picker config Config resolverErr error // the last error reported by the resolver; cleared on successful resolution connErr error // the last connection error; cleared upon leaving TransientFailure } -func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { - panic("not implemented") -} - func (b *baseBalancer) ResolverError(err error) { b.resolverErr = err if len(b.subConns) == 0 { b.state = connectivity.TransientFailure } + if b.state != connectivity.TransientFailure { // The picker will not change since the balancer does not currently // report an error. return } b.regeneratePicker() - if b.picker != nil { - b.cc.UpdateBalancerState(b.state, b.picker) - } else { - b.cc.UpdateState(balancer.State{ - ConnectivityState: b.state, - Picker: b.v2Picker, - }) - } + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.state, + Picker: b.picker, + }) } func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - // TODO: handle s.ResolverState.Err (log if not nil) once implemented. // TODO: handle s.ResolverState.ServiceConfig? - if grpclog.V(2) { - grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s) + if logger.V(2) { + logger.Info("base.baseBalancer: got new ClientConn state: ", s) } // Successful resolution; clear resolver error and ensure we return nil. b.resolverErr = nil @@ -123,7 +106,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // a is a new address (not existing in b.subConns). sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) if err != nil { - grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue } b.subConns[a] = sc @@ -137,7 +120,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { b.cc.RemoveSubConn(sc) delete(b.subConns, a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. - // The entry will be deleted in HandleSubConnStateChange. + // The entry will be deleted in UpdateSubConnState. } } // If resolver state contains no addresses, return an error so ClientConn @@ -171,49 +154,29 @@ func (b *baseBalancer) mergeErrors() error { // - built by the pickerBuilder with all READY SubConns otherwise. func (b *baseBalancer) regeneratePicker() { if b.state == connectivity.TransientFailure { - if b.pickerBuilder != nil { - b.picker = NewErrPicker(balancer.ErrTransientFailure) - } else { - b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(b.mergeErrors())) - } + b.picker = NewErrPicker(b.mergeErrors()) return } - if b.pickerBuilder != nil { - readySCs := make(map[resolver.Address]balancer.SubConn) - - // Filter out all ready SCs from full subConn map. - for addr, sc := range b.subConns { - if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { - readySCs[addr] = sc - } - } - b.picker = b.pickerBuilder.Build(readySCs) - } else { - readySCs := make(map[balancer.SubConn]SubConnInfo) + readySCs := make(map[balancer.SubConn]SubConnInfo) - // Filter out all ready SCs from full subConn map. - for addr, sc := range b.subConns { - if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { - readySCs[sc] = SubConnInfo{Address: addr} - } + // Filter out all ready SCs from full subConn map. + for addr, sc := range b.subConns { + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[sc] = SubConnInfo{Address: addr} } - b.v2Picker = b.v2PickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) } -} - -func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - panic("not implemented") + b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) } func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { s := state.ConnectivityState - if grpclog.V(2) { - grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) + if logger.V(2) { + logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) } oldS, ok := b.scStates[sc] if !ok { - if grpclog.V(2) { - grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + if logger.V(2) { + logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) } return } @@ -247,11 +210,7 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su b.regeneratePicker() } - if b.picker != nil { - b.cc.UpdateBalancerState(b.state, b.picker) - } else { - b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.v2Picker}) - } + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) } // Close is a nop because base balancer doesn't have internal state to clean up, @@ -259,28 +218,20 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su func (b *baseBalancer) Close() { } -// NewErrPicker returns a picker that always returns err on Pick(). +// NewErrPicker returns a Picker that always returns err on Pick(). func NewErrPicker(err error) balancer.Picker { return &errPicker{err: err} } -type errPicker struct { - err error // Pick() always returns this err. -} +// NewErrPickerV2 is temporarily defined for backward compatibility reasons. +// +// Deprecated: use NewErrPicker instead. +var NewErrPickerV2 = NewErrPicker -func (p *errPicker) Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) { - return nil, nil, p.err -} - -// NewErrPickerV2 returns a V2Picker that always returns err on Pick(). -func NewErrPickerV2(err error) balancer.V2Picker { - return &errPickerV2{err: err} -} - -type errPickerV2 struct { +type errPicker struct { err error // Pick() always returns this err. } -func (p *errPickerV2) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { return balancer.PickResult{}, p.err } diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go index 4192918b9e2..e31d76e338a 100644 --- a/vendor/google.golang.org/grpc/balancer/base/base.go +++ b/vendor/google.golang.org/grpc/balancer/base/base.go @@ -37,15 +37,8 @@ import ( // PickerBuilder creates balancer.Picker. type PickerBuilder interface { - // Build takes a slice of ready SubConns, and returns a picker that will be - // used by gRPC to pick a SubConn. - Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker -} - -// V2PickerBuilder creates balancer.V2Picker. -type V2PickerBuilder interface { // Build returns a picker that will be used by gRPC to pick a SubConn. - Build(info PickerBuildInfo) balancer.V2Picker + Build(info PickerBuildInfo) balancer.Picker } // PickerBuildInfo contains information needed by the picker builder to @@ -62,32 +55,17 @@ type SubConnInfo struct { Address resolver.Address // the address used to create this SubConn } -// NewBalancerBuilder returns a balancer builder. The balancers -// built by this builder will use the picker builder to build pickers. -func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder { - return NewBalancerBuilderWithConfig(name, pb, Config{}) -} - // Config contains the config info about the base balancer builder. type Config struct { // HealthCheck indicates whether health checking should be enabled for this specific balancer. HealthCheck bool } -// NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config. -func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder { +// NewBalancerBuilder returns a base balancer builder configured by the provided config. +func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder { return &baseBuilder{ name: name, pickerBuilder: pb, config: config, } } - -// NewBalancerBuilderV2 returns a base balancer builder configured by the provided config. -func NewBalancerBuilderV2(name string, pb V2PickerBuilder, config Config) balancer.Builder { - return &baseBuilder{ - name: name, - v2PickerBuilder: pb, - config: config, - } -} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index b59191ac582..7381dfc1ae4 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -4,14 +4,10 @@ package grpc_lb_v1 import ( - context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" duration "github.com/golang/protobuf/ptypes/duration" timestamp "github.com/golang/protobuf/ptypes/timestamp" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" math "math" ) @@ -642,117 +638,3 @@ var fileDescriptor_7cd3f6d792743fdf = []byte{ 0x6d, 0xe1, 0xbe, 0xfb, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x3f, 0x47, 0x55, 0xac, 0xab, 0x06, 0x00, 0x00, } - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// LoadBalancerClient is the client API for LoadBalancer service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type LoadBalancerClient interface { - // Bidirectional rpc to get a list of servers. - BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) -} - -type loadBalancerClient struct { - cc grpc.ClientConnInterface -} - -func NewLoadBalancerClient(cc grpc.ClientConnInterface) LoadBalancerClient { - return &loadBalancerClient{cc} -} - -func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) { - stream, err := c.cc.NewStream(ctx, &_LoadBalancer_serviceDesc.Streams[0], "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) - if err != nil { - return nil, err - } - x := &loadBalancerBalanceLoadClient{stream} - return x, nil -} - -type LoadBalancer_BalanceLoadClient interface { - Send(*LoadBalanceRequest) error - Recv() (*LoadBalanceResponse, error) - grpc.ClientStream -} - -type loadBalancerBalanceLoadClient struct { - grpc.ClientStream -} - -func (x *loadBalancerBalanceLoadClient) Send(m *LoadBalanceRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *loadBalancerBalanceLoadClient) Recv() (*LoadBalanceResponse, error) { - m := new(LoadBalanceResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// LoadBalancerServer is the server API for LoadBalancer service. -type LoadBalancerServer interface { - // Bidirectional rpc to get a list of servers. - BalanceLoad(LoadBalancer_BalanceLoadServer) error -} - -// UnimplementedLoadBalancerServer can be embedded to have forward compatible implementations. -type UnimplementedLoadBalancerServer struct { -} - -func (*UnimplementedLoadBalancerServer) BalanceLoad(srv LoadBalancer_BalanceLoadServer) error { - return status.Errorf(codes.Unimplemented, "method BalanceLoad not implemented") -} - -func RegisterLoadBalancerServer(s *grpc.Server, srv LoadBalancerServer) { - s.RegisterService(&_LoadBalancer_serviceDesc, srv) -} - -func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{stream}) -} - -type LoadBalancer_BalanceLoadServer interface { - Send(*LoadBalanceResponse) error - Recv() (*LoadBalanceRequest, error) - grpc.ServerStream -} - -type loadBalancerBalanceLoadServer struct { - grpc.ServerStream -} - -func (x *loadBalancerBalanceLoadServer) Send(m *LoadBalanceResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *loadBalancerBalanceLoadServer) Recv() (*LoadBalanceRequest, error) { - m := new(LoadBalanceRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _LoadBalancer_serviceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.lb.v1.LoadBalancer", - HandlerType: (*LoadBalancerServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "BalanceLoad", - Handler: _LoadBalancer_BalanceLoad_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "grpc/lb/v1/load_balancer.proto", -} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go new file mode 100644 index 00000000000..5a3a2ec5764 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -0,0 +1,122 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package grpc_lb_v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// LoadBalancerClient is the client API for LoadBalancer service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type LoadBalancerClient interface { + // Bidirectional rpc to get a list of servers. + BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) +} + +type loadBalancerClient struct { + cc grpc.ClientConnInterface +} + +func NewLoadBalancerClient(cc grpc.ClientConnInterface) LoadBalancerClient { + return &loadBalancerClient{cc} +} + +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) { + stream, err := c.cc.NewStream(ctx, &_LoadBalancer_serviceDesc.Streams[0], "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) + if err != nil { + return nil, err + } + x := &loadBalancerBalanceLoadClient{stream} + return x, nil +} + +type LoadBalancer_BalanceLoadClient interface { + Send(*LoadBalanceRequest) error + Recv() (*LoadBalanceResponse, error) + grpc.ClientStream +} + +type loadBalancerBalanceLoadClient struct { + grpc.ClientStream +} + +func (x *loadBalancerBalanceLoadClient) Send(m *LoadBalanceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *loadBalancerBalanceLoadClient) Recv() (*LoadBalanceResponse, error) { + m := new(LoadBalanceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// LoadBalancerServer is the server API for LoadBalancer service. +// All implementations should embed UnimplementedLoadBalancerServer +// for forward compatibility +type LoadBalancerServer interface { + // Bidirectional rpc to get a list of servers. + BalanceLoad(LoadBalancer_BalanceLoadServer) error +} + +// UnimplementedLoadBalancerServer should be embedded to have forward compatible implementations. +type UnimplementedLoadBalancerServer struct { +} + +func (*UnimplementedLoadBalancerServer) BalanceLoad(LoadBalancer_BalanceLoadServer) error { + return status.Errorf(codes.Unimplemented, "method BalanceLoad not implemented") +} + +func RegisterLoadBalancerServer(s *grpc.Server, srv LoadBalancerServer) { + s.RegisterService(&_LoadBalancer_serviceDesc, srv) +} + +func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{stream}) +} + +type LoadBalancer_BalanceLoadServer interface { + Send(*LoadBalanceResponse) error + Recv() (*LoadBalanceRequest, error) + grpc.ServerStream +} + +type loadBalancerBalanceLoadServer struct { + grpc.ServerStream +} + +func (x *loadBalancerBalanceLoadServer) Send(m *LoadBalanceResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *loadBalancerBalanceLoadServer) Recv() (*LoadBalanceRequest, error) { + m := new(LoadBalanceRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _LoadBalancer_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.lb.v1.LoadBalancer", + HandlerType: (*LoadBalancerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "BalanceLoad", + Handler: _LoadBalancer_BalanceLoad_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/lb/v1/load_balancer.proto", +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go index 219ca7235b4..a7424cf8d2d 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -16,8 +16,6 @@ * */ -//go:generate ./regenerate.sh - // Package grpclb defines a grpclb balancer. // // To install grpclb balancer, import this package as: @@ -30,10 +28,9 @@ import ( "sync" "time" - durationpb "github.com/golang/protobuf/ptypes/duration" "google.golang.org/grpc" "google.golang.org/grpc/balancer" - lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" + grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" @@ -41,6 +38,9 @@ import ( "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/resolver/dns" "google.golang.org/grpc/resolver" + + durationpb "github.com/golang/protobuf/ptypes/duration" + lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" ) const ( @@ -50,6 +50,7 @@ const ( ) var errServerTerminatedConnection = errors.New("grpclb: failed to recv server list: server terminated connection") +var logger = grpclog.Component("grpclb") func convertDuration(d *durationpb.Duration) time.Duration { if d == nil { @@ -150,19 +151,17 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal if opt.CredsBundle != nil { lb.grpclbClientConnCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBalancer) if err != nil { - grpclog.Warningf("lbBalancer: client connection creds NewWithMode failed: %v", err) + logger.Warningf("lbBalancer: client connection creds NewWithMode failed: %v", err) } lb.grpclbBackendCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBackendFromBalancer) if err != nil { - grpclog.Warningf("lbBalancer: backend creds NewWithMode failed: %v", err) + logger.Warningf("lbBalancer: backend creds NewWithMode failed: %v", err) } } return lb } -var _ balancer.V2Balancer = (*lbBalancer)(nil) // Assert that we implement V2Balancer - type lbBalancer struct { cc *lbCacheClientConn target string @@ -212,7 +211,7 @@ type lbBalancer struct { state connectivity.State subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn. scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns. - picker balancer.V2Picker + picker balancer.Picker // Support fallback to resolved backend addresses if there's no response // from remote balancer within fallbackTimeout. remoteBalancerConnected bool @@ -310,22 +309,18 @@ func (lb *lbBalancer) aggregateSubConnStates() connectivity.State { return connectivity.TransientFailure } -func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - panic("not used") -} - func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { s := scs.ConnectivityState - if grpclog.V(2) { - grpclog.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s) + if logger.V(2) { + logger.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s) } lb.mu.Lock() defer lb.mu.Unlock() oldS, ok := lb.scStates[sc] if !ok { - if grpclog.V(2) { - grpclog.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + if logger.V(2) { + logger.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) } return } @@ -391,13 +386,6 @@ func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) { lb.mu.Unlock() } -// HandleResolvedAddrs sends the updated remoteLB addresses to remoteLB -// clientConn. The remoteLB clientConn will handle creating/removing remoteLB -// connections. -func (lb *lbBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { - panic("not used") -} - func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) { lb.mu.Lock() defer lb.mu.Unlock() @@ -406,8 +394,8 @@ func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) { if lb.usePickFirst == newUsePickFirst { return } - if grpclog.V(2) { - grpclog.Infof("lbBalancer: switching mode, new usePickFirst: %+v", newUsePickFirst) + if logger.V(2) { + logger.Infof("lbBalancer: switching mode, new usePickFirst: %+v", newUsePickFirst) } lb.refreshSubConns(lb.backendAddrs, lb.inFallback, newUsePickFirst) } @@ -418,18 +406,13 @@ func (lb *lbBalancer) ResolverError(error) { } func (lb *lbBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { - if grpclog.V(2) { - grpclog.Infof("lbBalancer: UpdateClientConnState: %+v", ccs) + if logger.V(2) { + logger.Infof("lbBalancer: UpdateClientConnState: %+v", ccs) } gc, _ := ccs.BalancerConfig.(*grpclbServiceConfig) lb.handleServiceConfig(gc) addrs := ccs.ResolverState.Addresses - if len(addrs) == 0 { - // There should be at least one address, either grpclb server or - // fallback. Empty address is not valid. - return balancer.ErrBadResolverState - } var remoteBalancerAddrs, backendAddrs []resolver.Address for _, a := range addrs { @@ -440,6 +423,17 @@ func (lb *lbBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error backendAddrs = append(backendAddrs, a) } } + if sd := grpclbstate.Get(ccs.ResolverState); sd != nil { + // Override any balancer addresses provided via + // ccs.ResolverState.Addresses. + remoteBalancerAddrs = sd.BalancerAddresses + } + + if len(backendAddrs)+len(remoteBalancerAddrs) == 0 { + // There should be at least one address, either grpclb server or + // fallback. Empty address is not valid. + return balancer.ErrBadResolverState + } if len(remoteBalancerAddrs) == 0 { if lb.ccRemoteLB != nil { diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go index c6d555e4d8c..8eb45be28e3 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go @@ -33,7 +33,6 @@ import ( "google.golang.org/grpc/balancer" lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" @@ -44,8 +43,8 @@ import ( // processServerList updates balancer's internal state, create/remove SubConns // and regenerates picker using the received serverList. func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { - if grpclog.V(2) { - grpclog.Infof("lbBalancer: processing server list: %+v", l) + if logger.V(2) { + logger.Infof("lbBalancer: processing server list: %+v", l) } lb.mu.Lock() defer lb.mu.Unlock() @@ -56,8 +55,8 @@ func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { // If the new server list == old server list, do nothing. if cmp.Equal(lb.fullServerList, l.Servers, cmp.Comparer(proto.Equal)) { - if grpclog.V(2) { - grpclog.Infof("lbBalancer: new serverlist same as the previous one, ignoring") + if logger.V(2) { + logger.Infof("lbBalancer: new serverlist same as the previous one, ignoring") } return } @@ -81,8 +80,8 @@ func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { Addr: fmt.Sprintf("%s:%d", ipStr, s.Port), Metadata: &md, } - if grpclog.V(2) { - grpclog.Infof("lbBalancer: server list entry[%d]: ipStr:|%s|, port:|%d|, load balancer token:|%v|", + if logger.V(2) { + logger.Infof("lbBalancer: server list entry[%d]: ipStr:|%s|, port:|%d|, load balancer token:|%v|", i, ipStr, s.Port, s.LoadBalanceToken) } backendAddrs = append(backendAddrs, addr) @@ -150,7 +149,7 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback // This bypasses the cc wrapper with SubConn cache. sc, err := lb.cc.cc.NewSubConn(backendAddrs, opts) if err != nil { - grpclog.Warningf("grpclb: failed to create new SubConn: %v", err) + logger.Warningf("grpclb: failed to create new SubConn: %v", err) return } sc.Connect() @@ -173,7 +172,7 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback // Use addrWithMD to create the SubConn. sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, opts) if err != nil { - grpclog.Warningf("grpclb: failed to create new SubConn: %v", err) + logger.Warningf("grpclb: failed to create new SubConn: %v", err) continue } lb.subConns[addrWithoutMD] = sc // Use the addr without MD as key for the map. @@ -192,7 +191,7 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback lb.cc.RemoveSubConn(sc) delete(lb.subConns, a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. - // The entry will be deleted in HandleSubConnStateChange. + // The entry will be deleted in UpdateSubConnState. } } @@ -245,7 +244,7 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() { // receive ServerName as authority. cc, err := grpc.DialContext(context.Background(), lb.manualResolver.Scheme()+":///grpclb.subClientConn", dopts...) if err != nil { - grpclog.Fatalf("failed to dial: %v", err) + logger.Fatalf("failed to dial: %v", err) } ccw := &remoteBalancerCCWrapper{ cc: cc, @@ -373,9 +372,9 @@ func (ccw *remoteBalancerCCWrapper) watchRemoteBalancer() { default: if err != nil { if err == errServerTerminatedConnection { - grpclog.Info(err) + logger.Info(err) } else { - grpclog.Warning(err) + logger.Warning(err) } } } diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/regenerate.sh b/vendor/google.golang.org/grpc/balancer/grpclb/regenerate.sh deleted file mode 100644 index b8978e11b38..00000000000 --- a/vendor/google.golang.org/grpc/balancer/grpclb/regenerate.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# Copyright 2018 gRPC authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eux -o pipefail - -TMP=$(mktemp -d) - -function finish { - rm -rf "$TMP" -} -trap finish EXIT - -pushd "$TMP" -mkdir -p grpc/lb/v1 -curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/lb/v1/load_balancer.proto > grpc/lb/v1/load_balancer.proto - -protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/lb/v1/*.proto -popd -rm -f grpc_lb_v1/*.pb.go -cp "$TMP"/grpc/lb/v1/*.pb.go grpc_lb_v1/ - diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go new file mode 100644 index 00000000000..a24264a34f5 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package state declares grpclb types to be set by resolvers wishing to pass +// information to grpclb via resolver.State Attributes. +package state + +import ( + "google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.grpclb.state") + +// State contains gRPCLB-relevant data passed from the name resolver. +type State struct { + // BalancerAddresses contains the remote load balancer address(es). If + // set, overrides any resolver-provided addresses with Type of GRPCLB. + BalancerAddresses []resolver.Address +} + +// Set returns a copy of the provided state with attributes containing s. s's +// data should not be mutated after calling Set. +func Set(state resolver.State, s *State) resolver.State { + state.Attributes = state.Attributes.WithValues(key, s) + return state +} + +// Get returns the grpclb State in the resolver.State, or nil if not present. +// The returned data should not be mutated. +func Get(state resolver.State) *State { + s, _ := state.Attributes.Value(key).(*State) + return s +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index d4d645501c1..43c2a15373a 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -33,9 +33,11 @@ import ( // Name is the name of round_robin balancer. const Name = "round_robin" +var logger = grpclog.Component("roundrobin") + // newBuilder creates a new roundrobin balancer builder. func newBuilder() balancer.Builder { - return base.NewBalancerBuilderV2(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) + return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) } func init() { @@ -44,10 +46,10 @@ func init() { type rrPickerBuilder struct{} -func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.V2Picker { - grpclog.Infof("roundrobinPicker: newPicker called with info: %v", info) +func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { + logger.Infof("roundrobinPicker: newPicker called with info: %v", info) if len(info.ReadySCs) == 0 { - return base.NewErrPickerV2(balancer.ErrNoSubConnAvailable) + return base.NewErrPicker(balancer.ErrNoSubConnAvailable) } var scs []balancer.SubConn for sc := range info.ReadySCs { diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index f8667a23f2c..11e592aabb0 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -74,11 +74,7 @@ func (ccb *ccBalancerWrapper) watcher() { } ccb.balancerMu.Lock() su := t.(*scStateUpdate) - if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { - ub.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err}) - } else { - ccb.balancer.HandleSubConnStateChange(su.sc, su.state) - } + ccb.balancer.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err}) ccb.balancerMu.Unlock() case <-ccb.done.Done(): } @@ -123,19 +119,13 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { ccb.balancerMu.Lock() defer ccb.balancerMu.Unlock() - if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { - return ub.UpdateClientConnState(*ccs) - } - ccb.balancer.HandleResolvedAddrs(ccs.ResolverState.Addresses, nil) - return nil + return ccb.balancer.UpdateClientConnState(*ccs) } func (ccb *ccBalancerWrapper) resolverError(err error) { - if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { - ccb.balancerMu.Lock() - ub.ResolverError(err) - ccb.balancerMu.Unlock() - } + ccb.balancerMu.Lock() + ccb.balancer.ResolverError(err) + ccb.balancerMu.Unlock() } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { @@ -173,21 +163,6 @@ func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) } -func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) { - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return - } - // Update picker before updating state. Even though the ordering here does - // not matter, it can lead to multiple calls of Pick in the common start-up - // case where we wait for ready and then perform an RPC. If the picker is - // updated later, we could call the "connecting" picker when the state is - // updated, and then call the "ready" picker after the picker gets updated. - ccb.cc.blockingpicker.updatePicker(p) - ccb.cc.csMgr.updateState(s) -} - func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { ccb.mu.Lock() defer ccb.mu.Unlock() @@ -199,7 +174,7 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { // case where we wait for ready and then perform an RPC. If the picker is // updated later, we could call the "connecting" picker when the state is // updated, and then call the "ready" picker after the picker gets updated. - ccb.cc.blockingpicker.updatePickerV2(s.Picker) + ccb.cc.blockingpicker.updatePicker(s.Picker) ccb.cc.csMgr.updateState(s.ConnectivityState) } @@ -245,7 +220,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { ac, err := cc.newAddrConn(addrs, opts) if err != nil { - channelz.Warningf(acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) + channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) return } acbw.ac = ac diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go deleted file mode 100644 index db04b08b842..00000000000 --- a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go +++ /dev/null @@ -1,334 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/resolver" -) - -type balancerWrapperBuilder struct { - b Balancer // The v1 balancer. -} - -func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - bwb.b.Start(opts.Target.Endpoint, BalancerConfig{ - DialCreds: opts.DialCreds, - Dialer: opts.Dialer, - }) - _, pickfirst := bwb.b.(*pickFirst) - bw := &balancerWrapper{ - balancer: bwb.b, - pickfirst: pickfirst, - cc: cc, - targetAddr: opts.Target.Endpoint, - startCh: make(chan struct{}), - conns: make(map[resolver.Address]balancer.SubConn), - connSt: make(map[balancer.SubConn]*scState), - csEvltr: &balancer.ConnectivityStateEvaluator{}, - state: connectivity.Idle, - } - cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: bw}) - go bw.lbWatcher() - return bw -} - -func (bwb *balancerWrapperBuilder) Name() string { - return "wrapper" -} - -type scState struct { - addr Address // The v1 address type. - s connectivity.State - down func(error) -} - -type balancerWrapper struct { - balancer Balancer // The v1 balancer. - pickfirst bool - - cc balancer.ClientConn - targetAddr string // Target without the scheme. - - mu sync.Mutex - conns map[resolver.Address]balancer.SubConn - connSt map[balancer.SubConn]*scState - // This channel is closed when handling the first resolver result. - // lbWatcher blocks until this is closed, to avoid race between - // - NewSubConn is created, cc wants to notify balancer of state changes; - // - Build hasn't return, cc doesn't have access to balancer. - startCh chan struct{} - - // To aggregate the connectivity state. - csEvltr *balancer.ConnectivityStateEvaluator - state connectivity.State -} - -// lbWatcher watches the Notify channel of the balancer and manages -// connections accordingly. -func (bw *balancerWrapper) lbWatcher() { - <-bw.startCh - notifyCh := bw.balancer.Notify() - if notifyCh == nil { - // There's no resolver in the balancer. Connect directly. - a := resolver.Address{ - Addr: bw.targetAddr, - Type: resolver.Backend, - } - sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) - if err != nil { - grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) - } else { - bw.mu.Lock() - bw.conns[a] = sc - bw.connSt[sc] = &scState{ - addr: Address{Addr: bw.targetAddr}, - s: connectivity.Idle, - } - bw.mu.Unlock() - sc.Connect() - } - return - } - - for addrs := range notifyCh { - grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs) - if bw.pickfirst { - var ( - oldA resolver.Address - oldSC balancer.SubConn - ) - bw.mu.Lock() - for oldA, oldSC = range bw.conns { - break - } - bw.mu.Unlock() - if len(addrs) <= 0 { - if oldSC != nil { - // Teardown old sc. - bw.mu.Lock() - delete(bw.conns, oldA) - delete(bw.connSt, oldSC) - bw.mu.Unlock() - bw.cc.RemoveSubConn(oldSC) - } - continue - } - - var newAddrs []resolver.Address - for _, a := range addrs { - newAddr := resolver.Address{ - Addr: a.Addr, - Type: resolver.Backend, // All addresses from balancer are all backends. - ServerName: "", - Metadata: a.Metadata, - } - newAddrs = append(newAddrs, newAddr) - } - if oldSC == nil { - // Create new sc. - sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{}) - if err != nil { - grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err) - } else { - bw.mu.Lock() - // For pickfirst, there should be only one SubConn, so the - // address doesn't matter. All states updating (up and down) - // and picking should all happen on that only SubConn. - bw.conns[resolver.Address{}] = sc - bw.connSt[sc] = &scState{ - addr: addrs[0], // Use the first address. - s: connectivity.Idle, - } - bw.mu.Unlock() - sc.Connect() - } - } else { - bw.mu.Lock() - bw.connSt[oldSC].addr = addrs[0] - bw.mu.Unlock() - oldSC.UpdateAddresses(newAddrs) - } - } else { - var ( - add []resolver.Address // Addresses need to setup connections. - del []balancer.SubConn // Connections need to tear down. - ) - resAddrs := make(map[resolver.Address]Address) - for _, a := range addrs { - resAddrs[resolver.Address{ - Addr: a.Addr, - Type: resolver.Backend, // All addresses from balancer are all backends. - ServerName: "", - Metadata: a.Metadata, - }] = a - } - bw.mu.Lock() - for a := range resAddrs { - if _, ok := bw.conns[a]; !ok { - add = append(add, a) - } - } - for a, c := range bw.conns { - if _, ok := resAddrs[a]; !ok { - del = append(del, c) - delete(bw.conns, a) - // Keep the state of this sc in bw.connSt until its state becomes Shutdown. - } - } - bw.mu.Unlock() - for _, a := range add { - sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) - if err != nil { - grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) - } else { - bw.mu.Lock() - bw.conns[a] = sc - bw.connSt[sc] = &scState{ - addr: resAddrs[a], - s: connectivity.Idle, - } - bw.mu.Unlock() - sc.Connect() - } - } - for _, c := range del { - bw.cc.RemoveSubConn(c) - } - } - } -} - -func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - bw.mu.Lock() - defer bw.mu.Unlock() - scSt, ok := bw.connSt[sc] - if !ok { - return - } - if s == connectivity.Idle { - sc.Connect() - } - oldS := scSt.s - scSt.s = s - if oldS != connectivity.Ready && s == connectivity.Ready { - scSt.down = bw.balancer.Up(scSt.addr) - } else if oldS == connectivity.Ready && s != connectivity.Ready { - if scSt.down != nil { - scSt.down(errConnClosing) - } - } - sa := bw.csEvltr.RecordTransition(oldS, s) - if bw.state != sa { - bw.state = sa - } - bw.cc.UpdateState(balancer.State{ConnectivityState: bw.state, Picker: bw}) - if s == connectivity.Shutdown { - // Remove state for this sc. - delete(bw.connSt, sc) - } -} - -func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) { - bw.mu.Lock() - defer bw.mu.Unlock() - select { - case <-bw.startCh: - default: - close(bw.startCh) - } - // There should be a resolver inside the balancer. - // All updates here, if any, are ignored. -} - -func (bw *balancerWrapper) Close() { - bw.mu.Lock() - defer bw.mu.Unlock() - select { - case <-bw.startCh: - default: - close(bw.startCh) - } - bw.balancer.Close() -} - -// The picker is the balancerWrapper itself. -// It either blocks or returns error, consistent with v1 balancer Get(). -func (bw *balancerWrapper) Pick(info balancer.PickInfo) (result balancer.PickResult, err error) { - failfast := true // Default failfast is true. - if ss, ok := rpcInfoFromContext(info.Ctx); ok { - failfast = ss.failfast - } - a, p, err := bw.balancer.Get(info.Ctx, BalancerGetOptions{BlockingWait: !failfast}) - if err != nil { - return balancer.PickResult{}, toRPCErr(err) - } - if p != nil { - result.Done = func(balancer.DoneInfo) { p() } - defer func() { - if err != nil { - p() - } - }() - } - - bw.mu.Lock() - defer bw.mu.Unlock() - if bw.pickfirst { - // Get the first sc in conns. - for _, result.SubConn = range bw.conns { - return result, nil - } - return balancer.PickResult{}, balancer.ErrNoSubConnAvailable - } - var ok1 bool - result.SubConn, ok1 = bw.conns[resolver.Address{ - Addr: a.Addr, - Type: resolver.Backend, - ServerName: "", - Metadata: a.Metadata, - }] - s, ok2 := bw.connSt[result.SubConn] - if !ok1 || !ok2 { - // This can only happen due to a race where Get() returned an address - // that was subsequently removed by Notify. In this case we should - // retry always. - return balancer.PickResult{}, balancer.ErrNoSubConnAvailable - } - switch s.s { - case connectivity.Ready, connectivity.Idle: - return result, nil - case connectivity.Shutdown, connectivity.TransientFailure: - // If the returned sc has been shut down or is in transient failure, - // return error, and this RPC will fail or wait for another picker (if - // non-failfast). - return balancer.PickResult{}, balancer.ErrTransientFailure - default: - // For other states (connecting or unknown), the v1 balancer would - // traditionally wait until ready and then issue the RPC. Returning - // ErrNoSubConnAvailable will be a slight improvement in that it will - // allow the balancer to choose another address in case others are - // connected. - return balancer.PickResult{}, balancer.ErrNoSubConnAvailable - } -} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index f393bb66187..f826ec76984 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -1,13 +1,15 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: grpc/binarylog/grpc_binarylog_v1/binarylog.proto +// source: grpc/binlog/v1/binarylog.proto -package grpc_binarylog_v1 // import "google.golang.org/grpc/binarylog/grpc_binarylog_v1" +package grpc_binarylog_v1 -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import duration "github.com/golang/protobuf/ptypes/duration" -import timestamp "github.com/golang/protobuf/ptypes/timestamp" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + duration "github.com/golang/protobuf/ptypes/duration" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -18,7 +20,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // Enumerates the type of event // Note the terminology is different from the RPC semantics @@ -64,6 +66,7 @@ var GrpcLogEntry_EventType_name = map[int32]string{ 6: "EVENT_TYPE_SERVER_TRAILER", 7: "EVENT_TYPE_CANCEL", } + var GrpcLogEntry_EventType_value = map[string]int32{ "EVENT_TYPE_UNKNOWN": 0, "EVENT_TYPE_CLIENT_HEADER": 1, @@ -78,8 +81,9 @@ var GrpcLogEntry_EventType_value = map[string]int32{ func (x GrpcLogEntry_EventType) String() string { return proto.EnumName(GrpcLogEntry_EventType_name, int32(x)) } + func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 0} + return fileDescriptor_b7972e58de45083a, []int{0, 0} } // Enumerates the entity that generates the log entry @@ -96,6 +100,7 @@ var GrpcLogEntry_Logger_name = map[int32]string{ 1: "LOGGER_CLIENT", 2: "LOGGER_SERVER", } + var GrpcLogEntry_Logger_value = map[string]int32{ "LOGGER_UNKNOWN": 0, "LOGGER_CLIENT": 1, @@ -105,8 +110,9 @@ var GrpcLogEntry_Logger_value = map[string]int32{ func (x GrpcLogEntry_Logger) String() string { return proto.EnumName(GrpcLogEntry_Logger_name, int32(x)) } + func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 1} + return fileDescriptor_b7972e58de45083a, []int{0, 1} } type Address_Type int32 @@ -128,6 +134,7 @@ var Address_Type_name = map[int32]string{ 2: "TYPE_IPV6", 3: "TYPE_UNIX", } + var Address_Type_value = map[string]int32{ "TYPE_UNKNOWN": 0, "TYPE_IPV4": 1, @@ -138,8 +145,9 @@ var Address_Type_value = map[string]int32{ func (x Address_Type) String() string { return proto.EnumName(Address_Type_name, int32(x)) } + func (Address_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{7, 0} + return fileDescriptor_b7972e58de45083a, []int{7, 0} } // Log entry we store in binary logs @@ -185,16 +193,17 @@ func (m *GrpcLogEntry) Reset() { *m = GrpcLogEntry{} } func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) } func (*GrpcLogEntry) ProtoMessage() {} func (*GrpcLogEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{0} + return fileDescriptor_b7972e58de45083a, []int{0} } + func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b) } func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic) } -func (dst *GrpcLogEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_GrpcLogEntry.Merge(dst, src) +func (m *GrpcLogEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_GrpcLogEntry.Merge(m, src) } func (m *GrpcLogEntry) XXX_Size() int { return xxx_messageInfo_GrpcLogEntry.Size(m) @@ -317,9 +326,9 @@ func (m *GrpcLogEntry) GetPeer() *Address { return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _GrpcLogEntry_OneofMarshaler, _GrpcLogEntry_OneofUnmarshaler, _GrpcLogEntry_OneofSizer, []interface{}{ +// XXX_OneofWrappers is for the internal use of the proto package. +func (*GrpcLogEntry) XXX_OneofWrappers() []interface{} { + return []interface{}{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), (*GrpcLogEntry_Message)(nil), @@ -327,108 +336,6 @@ func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) } } -func _GrpcLogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*GrpcLogEntry) - // payload - switch x := m.Payload.(type) { - case *GrpcLogEntry_ClientHeader: - b.EncodeVarint(6<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ClientHeader); err != nil { - return err - } - case *GrpcLogEntry_ServerHeader: - b.EncodeVarint(7<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ServerHeader); err != nil { - return err - } - case *GrpcLogEntry_Message: - b.EncodeVarint(8<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Message); err != nil { - return err - } - case *GrpcLogEntry_Trailer: - b.EncodeVarint(9<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Trailer); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("GrpcLogEntry.Payload has unexpected type %T", x) - } - return nil -} - -func _GrpcLogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*GrpcLogEntry) - switch tag { - case 6: // payload.client_header - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ClientHeader) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_ClientHeader{msg} - return true, err - case 7: // payload.server_header - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ServerHeader) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_ServerHeader{msg} - return true, err - case 8: // payload.message - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Message) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_Message{msg} - return true, err - case 9: // payload.trailer - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Trailer) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_Trailer{msg} - return true, err - default: - return false, nil - } -} - -func _GrpcLogEntry_OneofSizer(msg proto.Message) (n int) { - m := msg.(*GrpcLogEntry) - // payload - switch x := m.Payload.(type) { - case *GrpcLogEntry_ClientHeader: - s := proto.Size(x.ClientHeader) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *GrpcLogEntry_ServerHeader: - s := proto.Size(x.ServerHeader) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *GrpcLogEntry_Message: - s := proto.Size(x.Message) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *GrpcLogEntry_Trailer: - s := proto.Size(x.Trailer) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - type ClientHeader struct { // This contains only the metadata from the application. Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` @@ -453,16 +360,17 @@ func (m *ClientHeader) Reset() { *m = ClientHeader{} } func (m *ClientHeader) String() string { return proto.CompactTextString(m) } func (*ClientHeader) ProtoMessage() {} func (*ClientHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{1} + return fileDescriptor_b7972e58de45083a, []int{1} } + func (m *ClientHeader) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ClientHeader.Unmarshal(m, b) } func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic) } -func (dst *ClientHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClientHeader.Merge(dst, src) +func (m *ClientHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientHeader.Merge(m, src) } func (m *ClientHeader) XXX_Size() int { return xxx_messageInfo_ClientHeader.Size(m) @@ -513,16 +421,17 @@ func (m *ServerHeader) Reset() { *m = ServerHeader{} } func (m *ServerHeader) String() string { return proto.CompactTextString(m) } func (*ServerHeader) ProtoMessage() {} func (*ServerHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{2} + return fileDescriptor_b7972e58de45083a, []int{2} } + func (m *ServerHeader) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ServerHeader.Unmarshal(m, b) } func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic) } -func (dst *ServerHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServerHeader.Merge(dst, src) +func (m *ServerHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerHeader.Merge(m, src) } func (m *ServerHeader) XXX_Size() int { return xxx_messageInfo_ServerHeader.Size(m) @@ -560,16 +469,17 @@ func (m *Trailer) Reset() { *m = Trailer{} } func (m *Trailer) String() string { return proto.CompactTextString(m) } func (*Trailer) ProtoMessage() {} func (*Trailer) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{3} + return fileDescriptor_b7972e58de45083a, []int{3} } + func (m *Trailer) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Trailer.Unmarshal(m, b) } func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Trailer.Marshal(b, m, deterministic) } -func (dst *Trailer) XXX_Merge(src proto.Message) { - xxx_messageInfo_Trailer.Merge(dst, src) +func (m *Trailer) XXX_Merge(src proto.Message) { + xxx_messageInfo_Trailer.Merge(m, src) } func (m *Trailer) XXX_Size() int { return xxx_messageInfo_Trailer.Size(m) @@ -624,16 +534,17 @@ func (m *Message) Reset() { *m = Message{} } func (m *Message) String() string { return proto.CompactTextString(m) } func (*Message) ProtoMessage() {} func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{4} + return fileDescriptor_b7972e58de45083a, []int{4} } + func (m *Message) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Message.Unmarshal(m, b) } func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Message.Marshal(b, m, deterministic) } -func (dst *Message) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message.Merge(dst, src) +func (m *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(m, src) } func (m *Message) XXX_Size() int { return xxx_messageInfo_Message.Size(m) @@ -690,16 +601,17 @@ func (m *Metadata) Reset() { *m = Metadata{} } func (m *Metadata) String() string { return proto.CompactTextString(m) } func (*Metadata) ProtoMessage() {} func (*Metadata) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{5} + return fileDescriptor_b7972e58de45083a, []int{5} } + func (m *Metadata) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Metadata.Unmarshal(m, b) } func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) } -func (dst *Metadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metadata.Merge(dst, src) +func (m *Metadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metadata.Merge(m, src) } func (m *Metadata) XXX_Size() int { return xxx_messageInfo_Metadata.Size(m) @@ -730,16 +642,17 @@ func (m *MetadataEntry) Reset() { *m = MetadataEntry{} } func (m *MetadataEntry) String() string { return proto.CompactTextString(m) } func (*MetadataEntry) ProtoMessage() {} func (*MetadataEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{6} + return fileDescriptor_b7972e58de45083a, []int{6} } + func (m *MetadataEntry) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MetadataEntry.Unmarshal(m, b) } func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic) } -func (dst *MetadataEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetadataEntry.Merge(dst, src) +func (m *MetadataEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetadataEntry.Merge(m, src) } func (m *MetadataEntry) XXX_Size() int { return xxx_messageInfo_MetadataEntry.Size(m) @@ -779,16 +692,17 @@ func (m *Address) Reset() { *m = Address{} } func (m *Address) String() string { return proto.CompactTextString(m) } func (*Address) ProtoMessage() {} func (*Address) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{7} + return fileDescriptor_b7972e58de45083a, []int{7} } + func (m *Address) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Address.Unmarshal(m, b) } func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Address.Marshal(b, m, deterministic) } -func (dst *Address) XXX_Merge(src proto.Message) { - xxx_messageInfo_Address.Merge(dst, src) +func (m *Address) XXX_Merge(src proto.Message) { + xxx_messageInfo_Address.Merge(m, src) } func (m *Address) XXX_Size() int { return xxx_messageInfo_Address.Size(m) @@ -821,6 +735,9 @@ func (m *Address) GetIpPort() uint32 { } func init() { + proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value) + proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value) + proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value) proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry") proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader") proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader") @@ -829,72 +746,67 @@ func init() { proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata") proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry") proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address") - proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value) - proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value) - proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value) } -func init() { - proto.RegisterFile("grpc/binarylog/grpc_binarylog_v1/binarylog.proto", fileDescriptor_binarylog_264c8c9c551ce911) -} +func init() { proto.RegisterFile("grpc/binlog/v1/binarylog.proto", fileDescriptor_b7972e58de45083a) } -var fileDescriptor_binarylog_264c8c9c551ce911 = []byte{ - // 900 bytes of a gzipped FileDescriptorProto +var fileDescriptor_b7972e58de45083a = []byte{ + // 904 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44, - 0x10, 0x3e, 0x37, 0x69, 0xdc, 0x4c, 0x92, 0xca, 0x5d, 0x95, 0x3b, 0x5f, 0x29, 0x34, 0xb2, 0x04, - 0x0a, 0x42, 0x72, 0xb9, 0x94, 0xeb, 0xf1, 0x02, 0x52, 0x92, 0xfa, 0xd2, 0x88, 0x5c, 0x1a, 0x6d, - 0x72, 0x3d, 0x40, 0x48, 0xd6, 0x36, 0x5e, 0x1c, 0x0b, 0xc7, 0x6b, 0xd6, 0x9b, 0xa0, 0xfc, 0x2c, - 0xde, 0x90, 0xee, 0x77, 0xf1, 0x8e, 0xbc, 0x6b, 0x27, 0xa6, 0x69, 0x0f, 0x09, 0xde, 0x3c, 0xdf, - 0x7c, 0xf3, 0xcd, 0xee, 0x78, 0x66, 0x16, 0xbe, 0xf2, 0x79, 0x3c, 0x3b, 0xbf, 0x0b, 0x22, 0xc2, - 0xd7, 0x21, 0xf3, 0xcf, 0x53, 0xd3, 0xdd, 0x98, 0xee, 0xea, 0xc5, 0xd6, 0x67, 0xc7, 0x9c, 0x09, - 0x86, 0x8e, 0x52, 0x8a, 0xbd, 0x45, 0x57, 0x2f, 0x4e, 0x3e, 0xf5, 0x19, 0xf3, 0x43, 0x7a, 0x2e, - 0x09, 0x77, 0xcb, 0x5f, 0xce, 0xbd, 0x25, 0x27, 0x22, 0x60, 0x91, 0x0a, 0x39, 0x39, 0xbb, 0xef, - 0x17, 0xc1, 0x82, 0x26, 0x82, 0x2c, 0x62, 0x45, 0xb0, 0xde, 0xeb, 0x50, 0xef, 0xf3, 0x78, 0x36, - 0x64, 0xbe, 0x13, 0x09, 0xbe, 0x46, 0xdf, 0x40, 0x75, 0xc3, 0x31, 0xb5, 0xa6, 0xd6, 0xaa, 0xb5, - 0x4f, 0x6c, 0xa5, 0x62, 0xe7, 0x2a, 0xf6, 0x34, 0x67, 0xe0, 0x2d, 0x19, 0x3d, 0x03, 0x7d, 0x46, - 0xc2, 0xd0, 0x0d, 0x3c, 0x73, 0xaf, 0xa9, 0xb5, 0xca, 0xb8, 0x92, 0x9a, 0x03, 0x0f, 0xbd, 0x84, - 0x67, 0x09, 0xfd, 0x6d, 0x49, 0xa3, 0x19, 0x75, 0x03, 0xcf, 0xfd, 0x3d, 0x10, 0xf3, 0x20, 0x72, - 0x53, 0xa7, 0x59, 0x92, 0xc4, 0xe3, 0xdc, 0x3d, 0xf0, 0xde, 0x49, 0x67, 0x8f, 0x84, 0x21, 0xfa, - 0x16, 0xca, 0x62, 0x1d, 0x53, 0xb3, 0xdc, 0xd4, 0x5a, 0x87, 0xed, 0x2f, 0xec, 0x9d, 0xdb, 0xdb, - 0xc5, 0x83, 0xdb, 0xce, 0x8a, 0x46, 0x62, 0xba, 0x8e, 0x29, 0x96, 0x61, 0xe8, 0x3b, 0xa8, 0x84, - 0xcc, 0xf7, 0x29, 0x37, 0xf7, 0xa5, 0xc0, 0xe7, 0xff, 0x26, 0x30, 0x94, 0x6c, 0x9c, 0x45, 0xa1, - 0xd7, 0xd0, 0x98, 0x85, 0x01, 0x8d, 0x84, 0x3b, 0xa7, 0xc4, 0xa3, 0xdc, 0xac, 0xc8, 0x62, 0x9c, - 0x3d, 0x20, 0xd3, 0x93, 0xbc, 0x6b, 0x49, 0xbb, 0x7e, 0x82, 0xeb, 0xb3, 0x82, 0x9d, 0xea, 0x24, - 0x94, 0xaf, 0x28, 0xcf, 0x75, 0xf4, 0x47, 0x75, 0x26, 0x92, 0xb7, 0xd5, 0x49, 0x0a, 0x36, 0xba, - 0x04, 0x7d, 0x41, 0x93, 0x84, 0xf8, 0xd4, 0x3c, 0xc8, 0x7f, 0xcb, 0x8e, 0xc2, 0x1b, 0xc5, 0xb8, - 0x7e, 0x82, 0x73, 0x72, 0x1a, 0x27, 0x38, 0x09, 0x42, 0xca, 0xcd, 0xea, 0xa3, 0x71, 0x53, 0xc5, - 0x48, 0xe3, 0x32, 0x32, 0xfa, 0x12, 0x8e, 0x62, 0xb2, 0x0e, 0x19, 0xf1, 0x5c, 0xc1, 0x97, 0xd1, - 0x8c, 0x08, 0xea, 0x99, 0xd0, 0xd4, 0x5a, 0x07, 0xd8, 0xc8, 0x1c, 0xd3, 0x1c, 0x47, 0x36, 0x94, - 0x63, 0x4a, 0xb9, 0x59, 0x7b, 0x34, 0x43, 0xc7, 0xf3, 0x38, 0x4d, 0x12, 0x2c, 0x79, 0xd6, 0x5f, - 0x1a, 0x54, 0x37, 0x3f, 0x0c, 0x3d, 0x05, 0xe4, 0xdc, 0x3a, 0xa3, 0xa9, 0x3b, 0xfd, 0x71, 0xec, - 0xb8, 0x6f, 0x47, 0xdf, 0x8f, 0x6e, 0xde, 0x8d, 0x8c, 0x27, 0xe8, 0x14, 0xcc, 0x02, 0xde, 0x1b, - 0x0e, 0xd2, 0xef, 0x6b, 0xa7, 0x73, 0xe5, 0x60, 0x43, 0xbb, 0xe7, 0x9d, 0x38, 0xf8, 0xd6, 0xc1, - 0xb9, 0x77, 0x0f, 0x7d, 0x02, 0xcf, 0x77, 0x63, 0xdf, 0x38, 0x93, 0x49, 0xa7, 0xef, 0x18, 0xa5, - 0x7b, 0xee, 0x2c, 0x38, 0x77, 0x97, 0x51, 0x13, 0x4e, 0x1f, 0xc8, 0xdc, 0x19, 0xbe, 0x76, 0x7b, - 0xc3, 0x9b, 0x89, 0x63, 0xec, 0x3f, 0x2c, 0x30, 0xc5, 0x9d, 0xc1, 0xd0, 0xc1, 0x46, 0x05, 0x7d, - 0x04, 0x47, 0x45, 0x81, 0xce, 0xa8, 0xe7, 0x0c, 0x0d, 0xdd, 0xea, 0x42, 0x45, 0xb5, 0x19, 0x42, - 0x70, 0x38, 0xbc, 0xe9, 0xf7, 0x1d, 0x5c, 0xb8, 0xef, 0x11, 0x34, 0x32, 0x4c, 0x65, 0x34, 0xb4, - 0x02, 0xa4, 0x52, 0x18, 0x7b, 0xdd, 0x2a, 0xe8, 0x59, 0xfd, 0xad, 0xf7, 0x1a, 0xd4, 0x8b, 0xcd, - 0x87, 0x5e, 0xc1, 0xc1, 0x82, 0x0a, 0xe2, 0x11, 0x41, 0xb2, 0xe1, 0xfd, 0xf8, 0xc1, 0x2e, 0x51, - 0x14, 0xbc, 0x21, 0xa3, 0x33, 0xa8, 0x2d, 0xa8, 0x98, 0x33, 0xcf, 0x8d, 0xc8, 0x82, 0xca, 0x01, - 0xae, 0x62, 0x50, 0xd0, 0x88, 0x2c, 0x28, 0x3a, 0x85, 0x2a, 0x59, 0x8a, 0x39, 0xe3, 0x81, 0x58, - 0xcb, 0xb1, 0xad, 0xe2, 0x2d, 0x80, 0x2e, 0x40, 0x4f, 0x17, 0x01, 0x5b, 0x0a, 0x39, 0xae, 0xb5, - 0xf6, 0xf3, 0x9d, 0x9d, 0x71, 0x95, 0x6d, 0x26, 0x9c, 0x33, 0xad, 0x3e, 0xd4, 0x8b, 0x1d, 0xff, - 0x9f, 0x0f, 0x6f, 0xfd, 0xa1, 0x81, 0x9e, 0x75, 0xf0, 0xff, 0xaa, 0x40, 0x22, 0x88, 0x58, 0x26, - 0xee, 0x8c, 0x79, 0xaa, 0x02, 0x0d, 0x0c, 0x0a, 0xea, 0x31, 0x8f, 0xa2, 0xcf, 0xe0, 0x30, 0x23, - 0xe4, 0x73, 0xa8, 0xca, 0xd0, 0x50, 0x68, 0x36, 0x7a, 0x05, 0x9a, 0x47, 0x05, 0x09, 0xc2, 0x44, - 0x56, 0xa4, 0x9e, 0xd3, 0xae, 0x14, 0x68, 0xbd, 0x04, 0x3d, 0x8f, 0x78, 0x0a, 0x95, 0x90, 0x46, - 0xbe, 0x98, 0xcb, 0x03, 0x37, 0x70, 0x66, 0x21, 0x04, 0x65, 0x79, 0x8d, 0x3d, 0x19, 0x2f, 0xbf, - 0xad, 0x2e, 0x1c, 0xe4, 0x67, 0x47, 0x97, 0xb0, 0x4f, 0xd3, 0xcd, 0x65, 0x6a, 0xcd, 0x52, 0xab, - 0xd6, 0x6e, 0x7e, 0xe0, 0x9e, 0x72, 0xc3, 0x61, 0x45, 0xb7, 0x5e, 0x41, 0xe3, 0x1f, 0x38, 0x32, - 0xa0, 0xf4, 0x2b, 0x5d, 0xcb, 0xec, 0x55, 0x9c, 0x7e, 0xa2, 0x63, 0xd8, 0x5f, 0x91, 0x70, 0x49, - 0xb3, 0xdc, 0xca, 0xb0, 0xfe, 0xd4, 0x40, 0xcf, 0xe6, 0x18, 0x5d, 0x64, 0xdb, 0x59, 0x93, 0xcb, - 0xf5, 0xec, 0xf1, 0x89, 0xb7, 0x0b, 0x3b, 0xd9, 0x04, 0x9d, 0x28, 0x34, 0xeb, 0xb0, 0xdc, 0x4c, - 0x1f, 0x8f, 0x20, 0x76, 0x63, 0xc6, 0x85, 0xac, 0x6a, 0x03, 0x57, 0x82, 0x78, 0xcc, 0xb8, 0xb0, - 0x1c, 0x28, 0xcb, 0x1d, 0x61, 0x40, 0xfd, 0xde, 0x76, 0x68, 0x40, 0x55, 0x22, 0x83, 0xf1, 0xed, - 0xd7, 0x86, 0x56, 0x34, 0x2f, 0x8d, 0xbd, 0x8d, 0xf9, 0x76, 0x34, 0xf8, 0xc1, 0x28, 0x75, 0x7f, - 0x86, 0xe3, 0x80, 0xed, 0x1e, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7, - 0xda, 0x4f, 0xed, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0x57, 0x4f, 0xf3, 0x87, 0x5e, - 0xea, 0xbb, 0x8a, 0xec, 0xf2, 0x8b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xf6, 0x4b, 0x50, - 0xd4, 0x07, 0x00, 0x00, + 0x10, 0xae, 0xdb, 0x34, 0x6e, 0x26, 0x49, 0xe5, 0xae, 0xca, 0x9d, 0xaf, 0x94, 0x6b, 0x64, 0x09, + 0x14, 0x84, 0xe4, 0xa8, 0x29, 0xd7, 0xe3, 0x05, 0xa4, 0x24, 0xf5, 0xa5, 0x11, 0xb9, 0x34, 0xda, + 0xe4, 0x7a, 0x80, 0x90, 0xac, 0x6d, 0xbc, 0x38, 0x16, 0x8e, 0xd7, 0xac, 0x37, 0x41, 0xf9, 0x59, + 0xbc, 0x21, 0xdd, 0xef, 0xe2, 0x1d, 0x79, 0xd7, 0x4e, 0x4d, 0xd3, 0x82, 0xc4, 0xbd, 0xed, 0x7c, + 0xf3, 0xcd, 0x37, 0xbb, 0xe3, 0x99, 0x31, 0xbc, 0xf4, 0x79, 0x3c, 0x6b, 0xdd, 0x05, 0x51, 0xc8, + 0xfc, 0xd6, 0xea, 0x3c, 0x3d, 0x11, 0xbe, 0x0e, 0x99, 0x6f, 0xc7, 0x9c, 0x09, 0x86, 0x8e, 0x52, + 0xbf, 0x7d, 0x8f, 0xae, 0xce, 0x4f, 0x5e, 0xfa, 0x8c, 0xf9, 0x21, 0x6d, 0x49, 0xc2, 0xdd, 0xf2, + 0x97, 0x96, 0xb7, 0xe4, 0x44, 0x04, 0x2c, 0x52, 0x21, 0x27, 0x67, 0x0f, 0xfd, 0x22, 0x58, 0xd0, + 0x44, 0x90, 0x45, 0xac, 0x08, 0xd6, 0x07, 0x1d, 0x6a, 0x7d, 0x1e, 0xcf, 0x86, 0xcc, 0x77, 0x22, + 0xc1, 0xd7, 0xe8, 0x1b, 0xa8, 0x6c, 0x38, 0xa6, 0xd6, 0xd0, 0x9a, 0xd5, 0xf6, 0x89, 0xad, 0x54, + 0xec, 0x5c, 0xc5, 0x9e, 0xe6, 0x0c, 0x7c, 0x4f, 0x46, 0xcf, 0x41, 0x9f, 0x91, 0x30, 0x74, 0x03, + 0xcf, 0xdc, 0x6d, 0x68, 0xcd, 0x12, 0x2e, 0xa7, 0xe6, 0xc0, 0x43, 0xaf, 0xe0, 0x79, 0x42, 0x7f, + 0x5b, 0xd2, 0x68, 0x46, 0xdd, 0xc0, 0x73, 0x7f, 0x0f, 0xc4, 0x3c, 0x88, 0xdc, 0xd4, 0x69, 0xee, + 0x49, 0xe2, 0x71, 0xee, 0x1e, 0x78, 0xef, 0xa5, 0xb3, 0x47, 0xc2, 0x10, 0x7d, 0x0b, 0x25, 0xb1, + 0x8e, 0xa9, 0x59, 0x6a, 0x68, 0xcd, 0xc3, 0xf6, 0x97, 0xf6, 0xd6, 0xeb, 0xed, 0xe2, 0xc5, 0x6d, + 0x67, 0x45, 0x23, 0x31, 0x5d, 0xc7, 0x14, 0xcb, 0x30, 0xf4, 0x1d, 0x94, 0x43, 0xe6, 0xfb, 0x94, + 0x9b, 0xfb, 0x52, 0xe0, 0x8b, 0xff, 0x12, 0x18, 0x4a, 0x36, 0xce, 0xa2, 0xd0, 0x1b, 0xa8, 0xcf, + 0xc2, 0x80, 0x46, 0xc2, 0x9d, 0x53, 0xe2, 0x51, 0x6e, 0x96, 0x65, 0x31, 0xce, 0x1e, 0x91, 0xe9, + 0x49, 0xde, 0xb5, 0xa4, 0x5d, 0xef, 0xe0, 0xda, 0xac, 0x60, 0xa7, 0x3a, 0x09, 0xe5, 0x2b, 0xca, + 0x73, 0x1d, 0xfd, 0x49, 0x9d, 0x89, 0xe4, 0xdd, 0xeb, 0x24, 0x05, 0x1b, 0x5d, 0x82, 0xbe, 0xa0, + 0x49, 0x42, 0x7c, 0x6a, 0x1e, 0xe4, 0x9f, 0x65, 0x4b, 0xe1, 0xad, 0x62, 0x5c, 0xef, 0xe0, 0x9c, + 0x9c, 0xc6, 0x09, 0x4e, 0x82, 0x90, 0x72, 0xb3, 0xf2, 0x64, 0xdc, 0x54, 0x31, 0xd2, 0xb8, 0x8c, + 0x8c, 0xbe, 0x82, 0xa3, 0x98, 0xac, 0x43, 0x46, 0x3c, 0x57, 0xf0, 0x65, 0x34, 0x23, 0x82, 0x7a, + 0x26, 0x34, 0xb4, 0xe6, 0x01, 0x36, 0x32, 0xc7, 0x34, 0xc7, 0x91, 0x0d, 0xa5, 0x98, 0x52, 0x6e, + 0x56, 0x9f, 0xcc, 0xd0, 0xf1, 0x3c, 0x4e, 0x93, 0x04, 0x4b, 0x9e, 0xf5, 0x97, 0x06, 0x95, 0xcd, + 0x07, 0x43, 0xcf, 0x00, 0x39, 0xb7, 0xce, 0x68, 0xea, 0x4e, 0x7f, 0x1c, 0x3b, 0xee, 0xbb, 0xd1, + 0xf7, 0xa3, 0x9b, 0xf7, 0x23, 0x63, 0x07, 0x9d, 0x82, 0x59, 0xc0, 0x7b, 0xc3, 0x41, 0x7a, 0xbe, + 0x76, 0x3a, 0x57, 0x0e, 0x36, 0xb4, 0x07, 0xde, 0x89, 0x83, 0x6f, 0x1d, 0x9c, 0x7b, 0x77, 0xd1, + 0x67, 0xf0, 0x62, 0x3b, 0xf6, 0xad, 0x33, 0x99, 0x74, 0xfa, 0x8e, 0xb1, 0xf7, 0xc0, 0x9d, 0x05, + 0xe7, 0xee, 0x12, 0x6a, 0xc0, 0xe9, 0x23, 0x99, 0x3b, 0xc3, 0x37, 0x6e, 0x6f, 0x78, 0x33, 0x71, + 0x8c, 0xfd, 0xc7, 0x05, 0xa6, 0xb8, 0x33, 0x18, 0x3a, 0xd8, 0x28, 0xa3, 0x4f, 0xe0, 0xa8, 0x28, + 0xd0, 0x19, 0xf5, 0x9c, 0xa1, 0xa1, 0x5b, 0x5d, 0x28, 0xab, 0x36, 0x43, 0x08, 0x0e, 0x87, 0x37, + 0xfd, 0xbe, 0x83, 0x0b, 0xef, 0x3d, 0x82, 0x7a, 0x86, 0xa9, 0x8c, 0x86, 0x56, 0x80, 0x54, 0x0a, + 0x63, 0xb7, 0x5b, 0x01, 0x3d, 0xab, 0xbf, 0xf5, 0x41, 0x83, 0x5a, 0xb1, 0xf9, 0xd0, 0x6b, 0x38, + 0x58, 0x50, 0x41, 0x3c, 0x22, 0x48, 0x36, 0xbc, 0x9f, 0x3e, 0xda, 0x25, 0x8a, 0x82, 0x37, 0x64, + 0x74, 0x06, 0xd5, 0x05, 0x15, 0x73, 0xe6, 0xb9, 0x11, 0x59, 0x50, 0x39, 0xc0, 0x15, 0x0c, 0x0a, + 0x1a, 0x91, 0x05, 0x45, 0xa7, 0x50, 0x21, 0x4b, 0x31, 0x67, 0x3c, 0x10, 0x6b, 0x39, 0xb6, 0x15, + 0x7c, 0x0f, 0xa0, 0x0b, 0xd0, 0xd3, 0x45, 0xc0, 0x96, 0x42, 0x8e, 0x6b, 0xb5, 0xfd, 0x62, 0x6b, + 0x67, 0x5c, 0x65, 0x9b, 0x09, 0xe7, 0x4c, 0xab, 0x0f, 0xb5, 0x62, 0xc7, 0xff, 0xef, 0xcb, 0x5b, + 0x7f, 0x68, 0xa0, 0x67, 0x1d, 0xfc, 0x51, 0x15, 0x48, 0x04, 0x11, 0xcb, 0xc4, 0x9d, 0x31, 0x4f, + 0x55, 0xa0, 0x8e, 0x41, 0x41, 0x3d, 0xe6, 0x51, 0xf4, 0x39, 0x1c, 0x66, 0x84, 0x7c, 0x0e, 0x55, + 0x19, 0xea, 0x0a, 0xcd, 0x46, 0xaf, 0x40, 0xf3, 0xa8, 0x20, 0x41, 0x98, 0xc8, 0x8a, 0xd4, 0x72, + 0xda, 0x95, 0x02, 0xad, 0x57, 0xa0, 0xe7, 0x11, 0xcf, 0xa0, 0x1c, 0xd2, 0xc8, 0x17, 0x73, 0x79, + 0xe1, 0x3a, 0xce, 0x2c, 0x84, 0xa0, 0x24, 0x9f, 0xb1, 0x2b, 0xe3, 0xe5, 0xd9, 0xea, 0xc2, 0x41, + 0x7e, 0x77, 0x74, 0x09, 0xfb, 0x34, 0xdd, 0x5c, 0xa6, 0xd6, 0xd8, 0x6b, 0x56, 0xdb, 0x8d, 0x7f, + 0x79, 0xa7, 0xdc, 0x70, 0x58, 0xd1, 0xad, 0xd7, 0x50, 0xff, 0x07, 0x8e, 0x0c, 0xd8, 0xfb, 0x95, + 0xae, 0x65, 0xf6, 0x0a, 0x4e, 0x8f, 0xe8, 0x18, 0xf6, 0x57, 0x24, 0x5c, 0xd2, 0x2c, 0xb7, 0x32, + 0xac, 0x3f, 0x35, 0xd0, 0xb3, 0x39, 0x46, 0x17, 0xd9, 0x76, 0xd6, 0xe4, 0x72, 0x3d, 0x7b, 0x7a, + 0xe2, 0xed, 0xc2, 0x4e, 0x36, 0x41, 0x27, 0x0a, 0xcd, 0x3a, 0x2c, 0x37, 0xd3, 0x9f, 0x47, 0x10, + 0xbb, 0x31, 0xe3, 0x42, 0x56, 0xb5, 0x8e, 0xcb, 0x41, 0x3c, 0x66, 0x5c, 0x58, 0x0e, 0x94, 0xe4, + 0x8e, 0x30, 0xa0, 0xf6, 0x60, 0x3b, 0xd4, 0xa1, 0x22, 0x91, 0xc1, 0xf8, 0xf6, 0x6b, 0x43, 0x2b, + 0x9a, 0x97, 0xc6, 0xee, 0xc6, 0x7c, 0x37, 0x1a, 0xfc, 0x60, 0xec, 0x75, 0x7f, 0x86, 0xe3, 0x80, + 0x6d, 0x5f, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7, 0xda, 0x4f, 0xed, + 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0xf7, 0x5b, 0xf9, 0x7f, 0x59, 0x85, 0x49, 0xd3, + 0xdd, 0x98, 0xee, 0xea, 0xfc, 0xae, 0x2c, 0xbb, 0xfc, 0xe2, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x10, 0x93, 0x68, 0x41, 0xc2, 0x07, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 0740693b75b..ae5ce4947e2 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -68,8 +68,6 @@ var ( errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") - // errBalancerClosed indicates that the balancer is closed. - errBalancerClosed = errors.New("grpc: balancer is closed") // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" @@ -151,7 +149,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if channelz.IsOn() { if cc.dopts.channelzParentID != 0 { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - channelz.AddTraceEvent(cc.channelzID, 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Channel Created", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ @@ -161,7 +159,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * }) } else { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) - channelz.Info(cc.channelzID, "Channel Created") + channelz.Info(logger, cc.channelzID, "Channel Created") } cc.csMgr.channelzID = cc.channelzID } @@ -217,7 +215,14 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * defer func() { select { case <-ctx.Done(): - conn, err = nil, ctx.Err() + switch { + case ctx.Err() == err: + conn = nil + case err == nil || !cc.dopts.returnLastError: + conn, err = nil, ctx.Err() + default: + conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err) + } default: } }() @@ -240,13 +245,14 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * // Determine the resolver to use. cc.parsedTarget = grpcutil.ParseTarget(cc.target) - channelz.Infof(cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme) + unixScheme := strings.HasPrefix(cc.target, "unix:") + channelz.Infof(logger, cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme) resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme) if resolverBuilder == nil { // If resolver builder is still nil, the parsed target's scheme is // not registered. Fallback to default resolver and set Endpoint to // the original target. - channelz.Infof(cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) + channelz.Infof(logger, cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) cc.parsedTarget = resolver.Target{ Scheme: resolver.GetDefaultScheme(), Endpoint: target, @@ -262,6 +268,8 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * cc.authority = creds.Info().ServerName } else if cc.dopts.insecure && cc.dopts.authority != "" { cc.authority = cc.dopts.authority + } else if unixScheme { + cc.authority = "localhost" } else { // Use endpoint from "scheme://authority/endpoint" as the default // authority for ClientConn. @@ -311,7 +319,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if s == connectivity.Ready { break } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { - if err = cc.blockingpicker.connectionError(); err != nil { + if err = cc.connectionError(); err != nil { terr, ok := err.(interface { Temporary() bool }) @@ -322,6 +330,9 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } if !cc.WaitForStateChange(ctx, s) { // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err + } return nil, ctx.Err() } } @@ -414,7 +425,7 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { return } csm.state = state - channelz.Infof(csm.channelzID, "Channel Connectivity change to %v", state) + channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) if csm.notifyChan != nil { // There are other goroutines waiting on this channel. close(csm.notifyChan) @@ -490,6 +501,9 @@ type ClientConn struct { channelzID int64 // channelz unique identification number czData *channelzData + + lceMu sync.Mutex // protects lastConnectionError + lastConnectionError error } // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or @@ -664,9 +678,9 @@ func (cc *ClientConn) switchBalancer(name string) { return } - channelz.Infof(cc.channelzID, "ClientConn switching balancer to %q", name) + channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name) if cc.dopts.balancerBuilder != nil { - channelz.Info(cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") + channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") return } if cc.balancerWrapper != nil { @@ -675,11 +689,11 @@ func (cc *ClientConn) switchBalancer(name string) { builder := balancer.Get(name) if builder == nil { - channelz.Warningf(cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) - channelz.Infof(cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) + channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) + channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) builder = newPickfirstBuilder() } else { - channelz.Infof(cc.channelzID, "Channel switches to new LB policy %q", name) + channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name) } cc.curBalancerName = builder.Name() @@ -720,7 +734,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub } if channelz.IsOn() { ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") - channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel Created", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ @@ -818,7 +832,7 @@ func (ac *addrConn) connect() error { func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { ac.mu.Lock() defer ac.mu.Unlock() - channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) if ac.state == connectivity.Shutdown || ac.state == connectivity.TransientFailure || ac.state == connectivity.Idle { @@ -838,7 +852,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { break } } - channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) + channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) if curAddrFound { ac.addrs = addrs } @@ -849,9 +863,10 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { // GetMethodConfig gets the method config of the input method. // If there's an exact match for input method (i.e. /service/method), we return // the corresponding MethodConfig. -// If there isn't an exact match for the input method, we look for the default config -// under the service (i.e /service/). If there is a default MethodConfig for -// the service, we return it. +// If there isn't an exact match for the input method, we look for the service's default +// config under the service (i.e /service/) and then for the default for all services (empty string). +// +// If there is a default MethodConfig for the service, we return it. // Otherwise, we return an empty MethodConfig. func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { // TODO: Avoid the locking here. @@ -860,12 +875,14 @@ func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { if cc.sc == nil { return MethodConfig{} } - m, ok := cc.sc.Methods[method] - if !ok { - i := strings.LastIndex(method, "/") - m = cc.sc.Methods[method[:i+1]] + if m, ok := cc.sc.Methods[method]; ok { + return m + } + i := strings.LastIndex(method, "/") + if m, ok := cc.sc.Methods[method[:i+1]]; ok { + return m } - return m + return cc.sc.Methods[""] } func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { @@ -1009,7 +1026,7 @@ func (cc *ClientConn) Close() error { Severity: channelz.CtINFO, } } - channelz.AddTraceEvent(cc.channelzID, 0, ted) + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to // the entity being deleted, and thus prevent it from being deleted right away. channelz.RemoveEntry(cc.channelzID) @@ -1053,7 +1070,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) return } ac.state = s - channelz.Infof(ac.channelzID, "Subchannel Connectivity change to %v", s) + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) } @@ -1190,7 +1207,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T } ac.mu.Unlock() - channelz.Infof(ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) + channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) if err == nil { @@ -1199,7 +1216,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T if firstConnErr == nil { firstConnErr = err } - ac.cc.blockingpicker.updateConnectionError(err) + ac.cc.updateConnectionError(err) } // Couldn't connect to any address. @@ -1214,16 +1231,9 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne onCloseCalled := make(chan struct{}) reconnect := grpcsync.NewEvent() - authority := ac.cc.authority // addr.ServerName takes precedent over ClientConn authority, if present. - if addr.ServerName != "" { - authority = addr.ServerName - } - - target := transport.TargetInfo{ - Addr: addr.Addr, - Metadata: addr.Metadata, - Authority: authority, + if addr.ServerName == "" { + addr.ServerName = ac.cc.authority } once := sync.Once{} @@ -1269,10 +1279,10 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne copts.ChannelzParentID = ac.channelzID } - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onPrefaceReceipt, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. - channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err) + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err) return nil, nil, err } @@ -1280,7 +1290,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne case <-time.After(time.Until(connectDeadline)): // We didn't get the preface in time. newTr.Close() - channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) return nil, nil, errors.New("timed out waiting for server handshake") case <-prefaceReceived: // We got the preface - huzzah! things are good. @@ -1297,7 +1307,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne // // LB channel health checking is enabled when all requirements below are met: // 1. it is not disabled by the user with the WithDisableHealthCheck DialOption -// 2. internal.HealthCheckFunc is set by importing the grpc/healthcheck package +// 2. internal.HealthCheckFunc is set by importing the grpc/health package // 3. a service config with non-empty healthCheckConfig field is provided // 4. the load balancer requests it // @@ -1327,7 +1337,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { // The health package is not imported to set health check function. // // TODO: add a link to the health check doc in the error message. - channelz.Error(ac.channelzID, "Health check is requested but health check function is not set.") + channelz.Error(logger, ac.channelzID, "Health check is requested but health check function is not set.") return } @@ -1357,9 +1367,9 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) if err != nil { if status.Code(err) == codes.Unimplemented { - channelz.Error(ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") + channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") } else { - channelz.Errorf(ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) + channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) } } }() @@ -1424,7 +1434,7 @@ func (ac *addrConn) tearDown(err error) { ac.mu.Lock() } if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel Deleted", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ @@ -1532,3 +1542,15 @@ func (cc *ClientConn) getResolver(scheme string) resolver.Builder { } return resolver.Get(scheme) } + +func (cc *ClientConn) updateConnectionError(err error) { + cc.lceMu.Lock() + cc.lastConnectionError = err + cc.lceMu.Unlock() +} + +func (cc *ClientConn) connectionError() error { + cc.lceMu.Lock() + defer cc.lceMu.Unlock() + return cc.lastConnectionError +} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 02738839dd9..11b106182db 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -33,6 +33,9 @@ const ( OK Code = 0 // Canceled indicates the operation was canceled (typically by the caller). + // + // The gRPC framework will generate this error code when cancellation + // is requested. Canceled Code = 1 // Unknown error. An example of where this error may be returned is @@ -40,12 +43,17 @@ const ( // an error-space that is not known in this address space. Also // errors raised by APIs that do not return enough error information // may be converted to this error. + // + // The gRPC framework will generate this error code in the above two + // mentioned cases. Unknown Code = 2 // InvalidArgument indicates client specified an invalid argument. // Note that this differs from FailedPrecondition. It indicates arguments // that are problematic regardless of the state of the system // (e.g., a malformed file name). + // + // This error code will not be generated by the gRPC framework. InvalidArgument Code = 3 // DeadlineExceeded means operation expired before completion. @@ -53,14 +61,21 @@ const ( // returned even if the operation has completed successfully. For // example, a successful response from a server could have been delayed // long enough for the deadline to expire. + // + // The gRPC framework will generate this error code when the deadline is + // exceeded. DeadlineExceeded Code = 4 // NotFound means some requested entity (e.g., file or directory) was // not found. + // + // This error code will not be generated by the gRPC framework. NotFound Code = 5 // AlreadyExists means an attempt to create an entity failed because one // already exists. + // + // This error code will not be generated by the gRPC framework. AlreadyExists Code = 6 // PermissionDenied indicates the caller does not have permission to @@ -69,10 +84,17 @@ const ( // instead for those errors). It must not be // used if the caller cannot be identified (use Unauthenticated // instead for those errors). + // + // This error code will not be generated by the gRPC core framework, + // but expect authentication middleware to use it. PermissionDenied Code = 7 // ResourceExhausted indicates some resource has been exhausted, perhaps // a per-user quota, or perhaps the entire file system is out of space. + // + // This error code will be generated by the gRPC framework in + // out-of-memory and server overload situations, or when a message is + // larger than the configured maximum size. ResourceExhausted Code = 8 // FailedPrecondition indicates operation was rejected because the @@ -94,6 +116,8 @@ const ( // REST Get/Update/Delete on a resource and the resource on the // server does not match the condition. E.g., conflicting // read-modify-write on the same resource. + // + // This error code will not be generated by the gRPC framework. FailedPrecondition Code = 9 // Aborted indicates the operation was aborted, typically due to a @@ -102,6 +126,8 @@ const ( // // See litmus test above for deciding between FailedPrecondition, // Aborted, and Unavailable. + // + // This error code will not be generated by the gRPC framework. Aborted Code = 10 // OutOfRange means operation was attempted past the valid range. @@ -119,15 +145,26 @@ const ( // error) when it applies so that callers who are iterating through // a space can easily look for an OutOfRange error to detect when // they are done. + // + // This error code will not be generated by the gRPC framework. OutOfRange Code = 11 // Unimplemented indicates operation is not implemented or not // supported/enabled in this service. + // + // This error code will be generated by the gRPC framework. Most + // commonly, you will see this error code when a method implementation + // is missing on the server. It can also be generated for unknown + // compression algorithms or a disagreement as to whether an RPC should + // be streaming. Unimplemented Code = 12 // Internal errors. Means some invariants expected by underlying // system has been broken. If you see one of these errors, // something is very broken. + // + // This error code will be generated by the gRPC framework in several + // internal error conditions. Internal Code = 13 // Unavailable indicates the service is currently unavailable. @@ -137,13 +174,22 @@ const ( // // See litmus test above for deciding between FailedPrecondition, // Aborted, and Unavailable. + // + // This error code will be generated by the gRPC framework during + // abrupt shutdown of a server process or network connection. Unavailable Code = 14 // DataLoss indicates unrecoverable data loss or corruption. + // + // This error code will not be generated by the gRPC framework. DataLoss Code = 15 // Unauthenticated indicates the request does not have valid // authentication credentials for the operation. + // + // The gRPC framework will generate this error code when the + // authentication metadata is invalid or a Credentials callback fails, + // but also expect authentication middleware to generate it. Unauthenticated Code = 16 _maxCode = 17 diff --git a/vendor/google.golang.org/grpc/credentials/alts/alts.go b/vendor/google.golang.org/grpc/credentials/alts/alts.go index 5c9e8b1c471..729c4b43b5f 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/alts.go +++ b/vendor/google.golang.org/grpc/credentials/alts/alts.go @@ -67,6 +67,7 @@ var ( // ServerHandshake is running on a platform where the trustworthiness of // the handshaker service is not guaranteed. ErrUntrustedPlatform = errors.New("ALTS: untrusted platform. ALTS is only supported on GCP") + logger = grpclog.Component("alts") ) // AuthInfo exposes security information from the ALTS handshake to the @@ -307,7 +308,7 @@ func compareRPCVersions(v1, v2 *altspb.RpcProtocolVersions_Version) int { // agreed on. func checkRPCVersions(local, peer *altspb.RpcProtocolVersions) (bool, *altspb.RpcProtocolVersions_Version) { if local == nil || peer == nil { - grpclog.Error("invalid checkRPCVersions argument, either local or peer is nil.") + logger.Error("invalid checkRPCVersions argument, either local or peer is nil.") return false, nil } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go b/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go index 9c53d6b53fb..ebea57da1de 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go @@ -48,6 +48,7 @@ func newAuthInfo(result *altspb.HandshakerResult) *altsAuthInfo { PeerServiceAccount: result.GetPeerIdentity().GetServiceAccount(), LocalServiceAccount: result.GetLocalIdentity().GetServiceAccount(), PeerRpcVersions: result.GetPeerRpcVersions(), + PeerAttributes: result.GetPeerIdentity().GetAttributes(), }, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}, } @@ -87,3 +88,8 @@ func (s *altsAuthInfo) LocalServiceAccount() string { func (s *altsAuthInfo) PeerRPCVersions() *altspb.RpcProtocolVersions { return s.p.GetPeerRpcVersions() } + +// PeerAttributes returns the context's peer attributes. +func (s *altsAuthInfo) PeerAttributes() map[string]string { + return s.p.GetPeerAttributes() +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/common.go b/vendor/google.golang.org/grpc/credentials/alts/internal/common.go index 33fba81239a..3896e8cf2b5 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/common.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/common.go @@ -16,8 +16,6 @@ * */ -//go:generate ./regenerate.sh - // Package internal contains common core functionality for ALTS. package internal diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go index fd5a53d9a7a..0d64fb37a12 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go @@ -32,7 +32,7 @@ import ( // ALTSRecordCrypto is the interface for gRPC ALTS record protocol. type ALTSRecordCrypto interface { // Encrypt encrypts the plaintext and computes the tag (if any) of dst - // and plaintext, dst and plaintext do not overlap. + // and plaintext. dst and plaintext may fully overlap or not at all. Encrypt(dst, plaintext []byte) ([]byte, error) // EncryptionOverhead returns the tag size (if any) in bytes. EncryptionOverhead() int @@ -111,6 +111,7 @@ func NewConn(c net.Conn, side core.Side, recordProtocol string, key []byte, prot } overhead := MsgLenFieldSize + msgTypeFieldSize + crypto.EncryptionOverhead() payloadLengthLimit := altsRecordDefaultLength - overhead + var protectedBuf []byte if protected == nil { // We pre-allocate protected to be of size // 2*altsRecordDefaultLength-1 during initialization. We only @@ -120,16 +121,19 @@ func NewConn(c net.Conn, side core.Side, recordProtocol string, key []byte, prot // altsRecordDefaultLength (bytes) data into protected at one // time. Therefore, 2*altsRecordDefaultLength-1 is large enough // to buffer data read from the network. - protected = make([]byte, 0, 2*altsRecordDefaultLength-1) + protectedBuf = make([]byte, 0, 2*altsRecordDefaultLength-1) + } else { + protectedBuf = make([]byte, len(protected)) + copy(protectedBuf, protected) } altsConn := &conn{ Conn: c, crypto: crypto, payloadLengthLimit: payloadLengthLimit, - protected: protected, + protected: protectedBuf, writeBuf: make([]byte, altsWriteBufferInitialSize), - nextFrame: protected, + nextFrame: protectedBuf, overhead: overhead, } return altsConn, nil diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go index 0c7b568354d..77d759cd956 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go @@ -27,9 +27,12 @@ import ( ) var ( - // hsConn represents a connection to hypervisor handshaker service. - hsConn *grpc.ClientConn - mu sync.Mutex + // mu guards hsConnMap and hsDialer. + mu sync.Mutex + // hsConn represents a mapping from a hypervisor handshaker service address + // to a corresponding connection to a hypervisor handshaker service + // instance. + hsConnMap = make(map[string]*grpc.ClientConn) // hsDialer will be reassigned in tests. hsDialer = grpc.Dial ) @@ -41,7 +44,8 @@ func Dial(hsAddress string) (*grpc.ClientConn, error) { mu.Lock() defer mu.Unlock() - if hsConn == nil { + hsConn, ok := hsConnMap[hsAddress] + if !ok { // Create a new connection to the handshaker service. Note that // this connection stays open until the application is closed. var err error @@ -49,6 +53,7 @@ func Dial(hsAddress string) (*grpc.ClientConn, error) { if err != nil { return nil, err } + hsConnMap[hsAddress] = hsConn } return hsConn, nil } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index a2060de402b..6d9c304e798 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -4,12 +4,8 @@ package grpc_gcp import ( - context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" math "math" ) @@ -979,127 +975,3 @@ var fileDescriptor_54c074f40c7c7e99 = []byte{ 0x5f, 0xef, 0xa8, 0xf5, 0x83, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xc1, 0xf9, 0x9d, 0xf2, 0xd9, 0x0b, 0x00, 0x00, } - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// HandshakerServiceClient is the client API for HandshakerService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type HandshakerServiceClient interface { - // Handshaker service accepts a stream of handshaker request, returning a - // stream of handshaker response. Client is expected to send exactly one - // message with either client_start or server_start followed by one or more - // messages with next. Each time client sends a request, the handshaker - // service expects to respond. Client does not have to wait for service's - // response before sending next request. - DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) -} - -type handshakerServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewHandshakerServiceClient(cc grpc.ClientConnInterface) HandshakerServiceClient { - return &handshakerServiceClient{cc} -} - -func (c *handshakerServiceClient) DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) { - stream, err := c.cc.NewStream(ctx, &_HandshakerService_serviceDesc.Streams[0], "/grpc.gcp.HandshakerService/DoHandshake", opts...) - if err != nil { - return nil, err - } - x := &handshakerServiceDoHandshakeClient{stream} - return x, nil -} - -type HandshakerService_DoHandshakeClient interface { - Send(*HandshakerReq) error - Recv() (*HandshakerResp, error) - grpc.ClientStream -} - -type handshakerServiceDoHandshakeClient struct { - grpc.ClientStream -} - -func (x *handshakerServiceDoHandshakeClient) Send(m *HandshakerReq) error { - return x.ClientStream.SendMsg(m) -} - -func (x *handshakerServiceDoHandshakeClient) Recv() (*HandshakerResp, error) { - m := new(HandshakerResp) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// HandshakerServiceServer is the server API for HandshakerService service. -type HandshakerServiceServer interface { - // Handshaker service accepts a stream of handshaker request, returning a - // stream of handshaker response. Client is expected to send exactly one - // message with either client_start or server_start followed by one or more - // messages with next. Each time client sends a request, the handshaker - // service expects to respond. Client does not have to wait for service's - // response before sending next request. - DoHandshake(HandshakerService_DoHandshakeServer) error -} - -// UnimplementedHandshakerServiceServer can be embedded to have forward compatible implementations. -type UnimplementedHandshakerServiceServer struct { -} - -func (*UnimplementedHandshakerServiceServer) DoHandshake(srv HandshakerService_DoHandshakeServer) error { - return status.Errorf(codes.Unimplemented, "method DoHandshake not implemented") -} - -func RegisterHandshakerServiceServer(s *grpc.Server, srv HandshakerServiceServer) { - s.RegisterService(&_HandshakerService_serviceDesc, srv) -} - -func _HandshakerService_DoHandshake_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(HandshakerServiceServer).DoHandshake(&handshakerServiceDoHandshakeServer{stream}) -} - -type HandshakerService_DoHandshakeServer interface { - Send(*HandshakerResp) error - Recv() (*HandshakerReq, error) - grpc.ServerStream -} - -type handshakerServiceDoHandshakeServer struct { - grpc.ServerStream -} - -func (x *handshakerServiceDoHandshakeServer) Send(m *HandshakerResp) error { - return x.ServerStream.SendMsg(m) -} - -func (x *handshakerServiceDoHandshakeServer) Recv() (*HandshakerReq, error) { - m := new(HandshakerReq) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _HandshakerService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.gcp.HandshakerService", - HandlerType: (*HandshakerServiceServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "DoHandshake", - Handler: _HandshakerService_DoHandshake_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "grpc/gcp/handshaker.proto", -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go new file mode 100644 index 00000000000..0e973b8250e --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -0,0 +1,132 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package grpc_gcp + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// HandshakerServiceClient is the client API for HandshakerService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type HandshakerServiceClient interface { + // Handshaker service accepts a stream of handshaker request, returning a + // stream of handshaker response. Client is expected to send exactly one + // message with either client_start or server_start followed by one or more + // messages with next. Each time client sends a request, the handshaker + // service expects to respond. Client does not have to wait for service's + // response before sending next request. + DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) +} + +type handshakerServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewHandshakerServiceClient(cc grpc.ClientConnInterface) HandshakerServiceClient { + return &handshakerServiceClient{cc} +} + +func (c *handshakerServiceClient) DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) { + stream, err := c.cc.NewStream(ctx, &_HandshakerService_serviceDesc.Streams[0], "/grpc.gcp.HandshakerService/DoHandshake", opts...) + if err != nil { + return nil, err + } + x := &handshakerServiceDoHandshakeClient{stream} + return x, nil +} + +type HandshakerService_DoHandshakeClient interface { + Send(*HandshakerReq) error + Recv() (*HandshakerResp, error) + grpc.ClientStream +} + +type handshakerServiceDoHandshakeClient struct { + grpc.ClientStream +} + +func (x *handshakerServiceDoHandshakeClient) Send(m *HandshakerReq) error { + return x.ClientStream.SendMsg(m) +} + +func (x *handshakerServiceDoHandshakeClient) Recv() (*HandshakerResp, error) { + m := new(HandshakerResp) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// HandshakerServiceServer is the server API for HandshakerService service. +// All implementations should embed UnimplementedHandshakerServiceServer +// for forward compatibility +type HandshakerServiceServer interface { + // Handshaker service accepts a stream of handshaker request, returning a + // stream of handshaker response. Client is expected to send exactly one + // message with either client_start or server_start followed by one or more + // messages with next. Each time client sends a request, the handshaker + // service expects to respond. Client does not have to wait for service's + // response before sending next request. + DoHandshake(HandshakerService_DoHandshakeServer) error +} + +// UnimplementedHandshakerServiceServer should be embedded to have forward compatible implementations. +type UnimplementedHandshakerServiceServer struct { +} + +func (*UnimplementedHandshakerServiceServer) DoHandshake(HandshakerService_DoHandshakeServer) error { + return status.Errorf(codes.Unimplemented, "method DoHandshake not implemented") +} + +func RegisterHandshakerServiceServer(s *grpc.Server, srv HandshakerServiceServer) { + s.RegisterService(&_HandshakerService_serviceDesc, srv) +} + +func _HandshakerService_DoHandshake_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(HandshakerServiceServer).DoHandshake(&handshakerServiceDoHandshakeServer{stream}) +} + +type HandshakerService_DoHandshakeServer interface { + Send(*HandshakerResp) error + Recv() (*HandshakerReq, error) + grpc.ServerStream +} + +type handshakerServiceDoHandshakeServer struct { + grpc.ServerStream +} + +func (x *handshakerServiceDoHandshakeServer) Send(m *HandshakerResp) error { + return x.ServerStream.SendMsg(m) +} + +func (x *handshakerServiceDoHandshakeServer) Recv() (*HandshakerReq, error) { + m := new(HandshakerReq) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _HandshakerService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.gcp.HandshakerService", + HandlerType: (*HandshakerServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "DoHandshake", + Handler: _HandshakerService_DoHandshake_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/gcp/handshaker.proto", +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/regenerate.sh b/vendor/google.golang.org/grpc/credentials/alts/internal/regenerate.sh deleted file mode 100644 index a79c4201b36..00000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/regenerate.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash -# Copyright 2018 gRPC authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eux -o pipefail - -TMP=$(mktemp -d) - -function finish { - rm -rf "$TMP" -} -trap finish EXIT - -pushd "$TMP" -mkdir -p grpc/gcp -curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/gcp/altscontext.proto > grpc/gcp/altscontext.proto -curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/gcp/handshaker.proto > grpc/gcp/handshaker.proto -curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/gcp/transport_security_common.proto > grpc/gcp/transport_security_common.proto - -protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/gcp/*.proto -popd -rm -f proto/grpc_gcp/*.pb.go -cp "$TMP"/grpc/gcp/*.pb.go proto/grpc_gcp/ - diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index e438fda226f..02766443ae7 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -29,6 +29,7 @@ import ( "net" "github.com/golang/protobuf/proto" + "google.golang.org/grpc/attributes" "google.golang.org/grpc/internal" ) @@ -57,9 +58,11 @@ type PerRPCCredentials interface { type SecurityLevel int const ( - // NoSecurity indicates a connection is insecure. + // Invalid indicates an invalid security level. // The zero SecurityLevel value is invalid for backward compatibility. - NoSecurity SecurityLevel = iota + 1 + Invalid SecurityLevel = iota + // NoSecurity indicates a connection is insecure. + NoSecurity // IntegrityOnly indicates a connection only provides integrity protection. IntegrityOnly // PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection. @@ -124,15 +127,18 @@ var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gR // TransportCredentials defines the common interface for all the live gRPC wire // protocols and supported transport security protocols (e.g., TLS, SSL). type TransportCredentials interface { - // ClientHandshake does the authentication handshake specified by the corresponding - // authentication protocol on rawConn for clients. It returns the authenticated - // connection and the corresponding auth information about the connection. - // The auth information should embed CommonAuthInfo to return additional information about - // the credentials. Implementations must use the provided context to implement timely cancellation. - // gRPC will try to reconnect if the error returned is a temporary error - // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). - // If the returned error is a wrapper error, implementations should make sure that + // ClientHandshake does the authentication handshake specified by the + // corresponding authentication protocol on rawConn for clients. It returns + // the authenticated connection and the corresponding auth information + // about the connection. The auth information should embed CommonAuthInfo + // to return additional information about the credentials. Implementations + // must use the provided context to implement timely cancellation. gRPC + // will try to reconnect if the error returned is a temporary error + // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). If the + // returned error is a wrapper error, implementations should make sure that // the error implements Temporary() to have the correct retry behaviors. + // Additionally, ClientHandshakeInfo data will be available via the context + // passed to this call. // // If the returned net.Conn is closed, it MUST close the net.Conn provided. ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) @@ -193,6 +199,31 @@ func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { return } +// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes +// it possible to pass arbitrary data to the handshaker from gRPC, resolver, +// balancer etc. Individual credential implementations control the actual +// format of the data that they are willing to receive. +// +// This API is experimental. +type ClientHandshakeInfo struct { + // Attributes contains the attributes for the address. It could be provided + // by the gRPC, resolver, balancer etc. + Attributes *attributes.Attributes +} + +// clientHandshakeInfoKey is a struct used as the key to store +// ClientHandshakeInfo in a context. +type clientHandshakeInfoKey struct{} + +// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored +// in ctx. +// +// This API is experimental. +func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { + chi, _ := ctx.Value(clientHandshakeInfoKey{}).(ClientHandshakeInfo) + return chi +} + // CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. // It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method // or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. @@ -208,7 +239,7 @@ func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error { } if ci, ok := ri.AuthInfo.(internalInfo); ok { // CommonAuthInfo.SecurityLevel has an invalid value. - if ci.GetCommonAuthInfo().SecurityLevel == 0 { + if ci.GetCommonAuthInfo().SecurityLevel == Invalid { return nil } if ci.GetCommonAuthInfo().SecurityLevel < level { @@ -223,6 +254,9 @@ func init() { internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context { return context.WithValue(ctx, requestInfoKey{}, ri) } + internal.NewClientHandshakeInfoContext = func(ctx context.Context, chi ClientHandshakeInfo) context.Context { + return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) + } } // ChannelzSecurityInfo defines the interface that security protocols should implement diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go index 899e3372ce3..6657055d660 100644 --- a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go +++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go @@ -74,6 +74,8 @@ func NewJWTAccessFromKey(jsonKey []byte) (credentials.PerRPCCredentials, error) } func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + // TODO: the returned TokenSource is reusable. Store it in a sync.Map, with + // uri as the key, to avoid recreating for every RPC. ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, uri[0]) if err != nil { return nil, err @@ -177,9 +179,43 @@ func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.Per // NewApplicationDefault returns "Application Default Credentials". For more // detail, see https://developers.google.com/accounts/docs/application-default-credentials. func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.PerRPCCredentials, error) { - t, err := google.DefaultTokenSource(ctx, scope...) + creds, err := google.FindDefaultCredentials(ctx, scope...) if err != nil { return nil, err } - return TokenSource{t}, nil + + // If JSON is nil, the authentication is provided by the environment and not + // with a credentials file, e.g. when code is running on Google Cloud + // Platform. Use the returned token source. + if creds.JSON == nil { + return TokenSource{creds.TokenSource}, nil + } + + // If auth is provided by env variable or creds file, the behavior will be + // different based on whether scope is set. Because the returned + // creds.TokenSource does oauth with jwt by default, and it requires scope. + // We can only use it if scope is not empty, otherwise it will fail with + // missing scope error. + // + // If scope is set, use it, it should just work. + // + // If scope is not set, we try to use jwt directly without oauth (this only + // works if it's a service account). + + if len(scope) != 0 { + return TokenSource{creds.TokenSource}, nil + } + + // Try to convert JSON to a jwt config without setting the optional scope + // parameter to check if it's a service account (the function errors if it's + // not). This is necessary because the returned config doesn't show the type + // of the account. + if _, err := google.JWTConfigFromJSON(creds.JSON); err != nil { + // If this fails, it's not a service account, return the original + // TokenSource from above. + return TokenSource{creds.TokenSource}, nil + } + + // If it's a service account, create a JWT only access with the key. + return NewJWTAccessFromKey(creds.JSON) } diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 86e956bc8b7..1ba6f3a6b8f 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -25,8 +25,10 @@ import ( "fmt" "io/ioutil" "net" + "net/url" "google.golang.org/grpc/credentials/internal" + credinternal "google.golang.org/grpc/internal/credentials" ) // TLSInfo contains the auth information for a TLS authenticated connection. @@ -34,6 +36,8 @@ import ( type TLSInfo struct { State tls.ConnectionState CommonAuthInfo + // This API is experimental. + SPIFFEID *url.URL } // AuthType returns the type of TLSInfo as a string. @@ -94,7 +98,17 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon conn.Close() return nil, nil, ctx.Err() } - return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil + tlsInfo := TLSInfo{ + State: conn.ConnectionState(), + CommonAuthInfo: CommonAuthInfo{ + SecurityLevel: PrivacyAndIntegrity, + }, + } + id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) + if id != nil { + tlsInfo.SPIFFEID = id + } + return internal.WrapSyscallConn(rawConn, conn), tlsInfo, nil } func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { @@ -103,7 +117,17 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) conn.Close() return nil, nil, err } - return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil + tlsInfo := TLSInfo{ + State: conn.ConnectionState(), + CommonAuthInfo: CommonAuthInfo{ + SecurityLevel: PrivacyAndIntegrity, + }, + } + id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) + if id != nil { + tlsInfo.SPIFFEID = id + } + return internal.WrapSyscallConn(rawConn, conn), tlsInfo, nil } func (c *tlsCreds) Clone() TransportCredentials { diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 35bde1033a7..decb4c5ee89 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -27,7 +27,6 @@ import ( "google.golang.org/grpc/backoff" "google.golang.org/grpc/balancer" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" internalbackoff "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" @@ -46,18 +45,18 @@ type dialOptions struct { chainUnaryInts []UnaryClientInterceptor chainStreamInts []StreamClientInterceptor - cp Compressor - dc Decompressor - bs internalbackoff.Strategy - block bool - insecure bool - timeout time.Duration - scChan <-chan ServiceConfig - authority string - copts transport.ConnectOptions - callOptions []CallOption - // This is used by v1 balancer dial option WithBalancer to support v1 - // balancer, and also by WithBalancerName dial option. + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + insecure bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + copts transport.ConnectOptions + callOptions []CallOption + // This is used by WithBalancerName dial option. balancerBuilder balancer.Builder channelzParentID int64 disableServiceConfig bool @@ -199,19 +198,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithBalancer returns a DialOption which sets a load balancer with the v1 API. -// Name resolver will be ignored if this DialOption is specified. -// -// Deprecated: use the new balancer APIs in balancer package and -// WithBalancerName. Will be removed in a future 1.x release. -func WithBalancer(b Balancer) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.balancerBuilder = &balancerWrapperBuilder{ - b: b, - } - }) -} - // WithBalancerName sets the balancer that the ClientConn will be initialized // with. Balancer registered with balancerName will be used. This function // panics if no balancer was registered by balancerName. @@ -299,6 +285,19 @@ func WithBlock() DialOption { }) } +// WithReturnConnectionError returns a DialOption which makes the client connection +// return a string containing both the last connection error that occurred and +// the context.DeadlineExceeded error. +// Implies WithBlock() +// +// This API is EXPERIMENTAL. +func WithReturnConnectionError() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + o.returnLastError = true + }) +} + // WithInsecure returns a DialOption which disables transport security for this // ClientConn. Note that transport security is required unless WithInsecure is // set. @@ -423,7 +422,7 @@ func WithUserAgent(s string) DialOption { // for the client transport. func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { if kp.Time < internal.KeepaliveMinPingTime { - grpclog.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) + logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) kp.Time = internal.KeepaliveMinPingTime } return newFuncDialOption(func(o *dialOptions) { @@ -459,7 +458,7 @@ func WithStreamInterceptor(f StreamClientInterceptor) DialOption { } // WithChainStreamInterceptor returns a DialOption that specifies the chained -// interceptor for unary RPCs. The first interceptor will be the outer most, +// interceptor for streaming RPCs. The first interceptor will be the outer most, // while the last interceptor will be the inner most wrapper around the real call. // All interceptors added by this method will be chained, and the interceptor // defined by WithStreamInterceptor will always be prepended to the chain. diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index 187adbb117f..0022859ad74 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -16,6 +16,8 @@ * */ +//go:generate ./regenerate.sh + /* Package grpc implements an RPC system called gRPC. diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod index ecef1ab0cd5..31f2b01f64e 100644 --- a/vendor/google.golang.org/grpc/go.mod +++ b/vendor/google.golang.org/grpc/go.mod @@ -6,9 +6,8 @@ require ( github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f github.com/envoyproxy/go-control-plane v0.9.4 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b - github.com/golang/mock v1.1.1 github.com/golang/protobuf v1.3.3 - github.com/google/go-cmp v0.2.0 + github.com/google/go-cmp v0.4.0 golang.org/x/net v0.0.0-20190311183353-d8887717615a golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum index 0bf9f0747bd..be8078eace2 100644 --- a/vendor/google.golang.org/grpc/go.sum +++ b/vendor/google.golang.org/grpc/go.sum @@ -23,6 +23,8 @@ github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -50,6 +52,8 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go new file mode 100644 index 00000000000..b513281a34c --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -0,0 +1,117 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "fmt" + + "google.golang.org/grpc/internal/grpclog" +) + +// componentData records the settings for a component. +type componentData struct { + name string +} + +var cache = map[string]*componentData{} + +func (c *componentData) InfoDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.InfoDepth(depth+1, args...) +} + +func (c *componentData) WarningDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.WarningDepth(depth+1, args...) +} + +func (c *componentData) ErrorDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.ErrorDepth(depth+1, args...) +} + +func (c *componentData) FatalDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.FatalDepth(depth+1, args...) +} + +func (c *componentData) Info(args ...interface{}) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warning(args ...interface{}) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Error(args ...interface{}) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatal(args ...interface{}) { + c.FatalDepth(1, args...) +} + +func (c *componentData) Infof(format string, args ...interface{}) { + c.InfoDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Warningf(format string, args ...interface{}) { + c.WarningDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Errorf(format string, args ...interface{}) { + c.ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Fatalf(format string, args ...interface{}) { + c.FatalDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Infoln(args ...interface{}) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warningln(args ...interface{}) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Errorln(args ...interface{}) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatalln(args ...interface{}) { + c.FatalDepth(1, args...) +} + +func (c *componentData) V(l int) bool { + return grpclog.Logger.V(l) +} + +// Component creates a new component and returns it for logging. If a component +// with the name already exists, nothing will be created and it will be +// returned. SetLoggerV2 will panic if it is called with a logger created by +// Component. +func Component(componentName string) DepthLoggerV2 { + if cData, ok := cache[componentName]; ok { + return cData + } + c := &componentData{componentName} + cache[componentName] = c + return c +} diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 23612b7c41b..8eba2d0e0ef 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -67,6 +67,9 @@ type LoggerV2 interface { // SetLoggerV2 sets logger that is used in grpc to a V2 logger. // Not mutex-protected, should be called before any gRPC functions. func SetLoggerV2(l LoggerV2) { + if _, ok := l.(*componentData); ok { + panic("cannot use component logger as grpclog logger") + } grpclog.Logger = l grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) } @@ -203,6 +206,7 @@ func (g *loggerT) V(l int) bool { // // This API is EXPERIMENTAL. type DepthLoggerV2 interface { + LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. InfoDepth(depth int, args ...interface{}) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 4c2a527ec59..e9919c0073b 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -4,12 +4,8 @@ package grpc_health_v1 import ( - context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" math "math" ) @@ -163,181 +159,3 @@ var fileDescriptor_e265fd9d4e077217 = []byte{ 0xd3, 0x20, 0x46, 0xe8, 0x85, 0x19, 0x26, 0xb1, 0x81, 0x93, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x12, 0x7d, 0x96, 0xcb, 0x2d, 0x02, 0x00, 0x00, } - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// HealthClient is the client API for Health service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type HealthClient interface { - // If the requested service is unknown, the call will fail with status - // NOT_FOUND. - Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) - // Performs a watch for the serving status of the requested service. - // The server will immediately send back a message indicating the current - // serving status. It will then subsequently send a new message whenever - // the service's serving status changes. - // - // If the requested service is unknown when the call is received, the - // server will send a message setting the serving status to - // SERVICE_UNKNOWN but will *not* terminate the call. If at some - // future point, the serving status of the service becomes known, the - // server will send a new message with the service's serving status. - // - // If the call terminates with status UNIMPLEMENTED, then clients - // should assume this method is not supported and should not retry the - // call. If the call terminates with any other status (including OK), - // clients should retry the call with appropriate exponential backoff. - Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) -} - -type healthClient struct { - cc grpc.ClientConnInterface -} - -func NewHealthClient(cc grpc.ClientConnInterface) HealthClient { - return &healthClient{cc} -} - -func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { - out := new(HealthCheckResponse) - err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { - stream, err := c.cc.NewStream(ctx, &_Health_serviceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...) - if err != nil { - return nil, err - } - x := &healthWatchClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Health_WatchClient interface { - Recv() (*HealthCheckResponse, error) - grpc.ClientStream -} - -type healthWatchClient struct { - grpc.ClientStream -} - -func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { - m := new(HealthCheckResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// HealthServer is the server API for Health service. -type HealthServer interface { - // If the requested service is unknown, the call will fail with status - // NOT_FOUND. - Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) - // Performs a watch for the serving status of the requested service. - // The server will immediately send back a message indicating the current - // serving status. It will then subsequently send a new message whenever - // the service's serving status changes. - // - // If the requested service is unknown when the call is received, the - // server will send a message setting the serving status to - // SERVICE_UNKNOWN but will *not* terminate the call. If at some - // future point, the serving status of the service becomes known, the - // server will send a new message with the service's serving status. - // - // If the call terminates with status UNIMPLEMENTED, then clients - // should assume this method is not supported and should not retry the - // call. If the call terminates with any other status (including OK), - // clients should retry the call with appropriate exponential backoff. - Watch(*HealthCheckRequest, Health_WatchServer) error -} - -// UnimplementedHealthServer can be embedded to have forward compatible implementations. -type UnimplementedHealthServer struct { -} - -func (*UnimplementedHealthServer) Check(ctx context.Context, req *HealthCheckRequest) (*HealthCheckResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") -} -func (*UnimplementedHealthServer) Watch(req *HealthCheckRequest, srv Health_WatchServer) error { - return status.Errorf(codes.Unimplemented, "method Watch not implemented") -} - -func RegisterHealthServer(s *grpc.Server, srv HealthServer) { - s.RegisterService(&_Health_serviceDesc, srv) -} - -func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(HealthCheckRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(HealthServer).Check(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.health.v1.Health/Check", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(HealthCheckRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(HealthServer).Watch(m, &healthWatchServer{stream}) -} - -type Health_WatchServer interface { - Send(*HealthCheckResponse) error - grpc.ServerStream -} - -type healthWatchServer struct { - grpc.ServerStream -} - -func (x *healthWatchServer) Send(m *HealthCheckResponse) error { - return x.ServerStream.SendMsg(m) -} - -var _Health_serviceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.health.v1.Health", - HandlerType: (*HealthServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Check", - Handler: _Health_Check_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "Watch", - Handler: _Health_Watch_Handler, - ServerStreams: true, - }, - }, - Metadata: "grpc/health/v1/health.proto", -} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go new file mode 100644 index 00000000000..f87e3c92adb --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -0,0 +1,186 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package grpc_health_v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// HealthClient is the client API for Health service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type HealthClient interface { + // If the requested service is unknown, the call will fail with status + // NOT_FOUND. + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current + // serving status. It will then subsequently send a new message whenever + // the service's serving status changes. + // + // If the requested service is unknown when the call is received, the + // server will send a message setting the serving status to + // SERVICE_UNKNOWN but will *not* terminate the call. If at some + // future point, the serving status of the service becomes known, the + // server will send a new message with the service's serving status. + // + // If the call terminates with status UNIMPLEMENTED, then clients + // should assume this method is not supported and should not retry the + // call. If the call terminates with any other status (including OK), + // clients should retry the call with appropriate exponential backoff. + Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) +} + +type healthClient struct { + cc grpc.ClientConnInterface +} + +func NewHealthClient(cc grpc.ClientConnInterface) HealthClient { + return &healthClient{cc} +} + +func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + out := new(HealthCheckResponse) + err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { + stream, err := c.cc.NewStream(ctx, &_Health_serviceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...) + if err != nil { + return nil, err + } + x := &healthWatchClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Health_WatchClient interface { + Recv() (*HealthCheckResponse, error) + grpc.ClientStream +} + +type healthWatchClient struct { + grpc.ClientStream +} + +func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { + m := new(HealthCheckResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// HealthServer is the server API for Health service. +// All implementations should embed UnimplementedHealthServer +// for forward compatibility +type HealthServer interface { + // If the requested service is unknown, the call will fail with status + // NOT_FOUND. + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current + // serving status. It will then subsequently send a new message whenever + // the service's serving status changes. + // + // If the requested service is unknown when the call is received, the + // server will send a message setting the serving status to + // SERVICE_UNKNOWN but will *not* terminate the call. If at some + // future point, the serving status of the service becomes known, the + // server will send a new message with the service's serving status. + // + // If the call terminates with status UNIMPLEMENTED, then clients + // should assume this method is not supported and should not retry the + // call. If the call terminates with any other status (including OK), + // clients should retry the call with appropriate exponential backoff. + Watch(*HealthCheckRequest, Health_WatchServer) error +} + +// UnimplementedHealthServer should be embedded to have forward compatible implementations. +type UnimplementedHealthServer struct { +} + +func (*UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") +} +func (*UnimplementedHealthServer) Watch(*HealthCheckRequest, Health_WatchServer) error { + return status.Errorf(codes.Unimplemented, "method Watch not implemented") +} + +func RegisterHealthServer(s *grpc.Server, srv HealthServer) { + s.RegisterService(&_Health_serviceDesc, srv) +} + +func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.health.v1.Health/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(HealthCheckRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(HealthServer).Watch(m, &healthWatchServer{stream}) +} + +type Health_WatchServer interface { + Send(*HealthCheckResponse) error + grpc.ServerStream +} + +type healthWatchServer struct { + grpc.ServerStream +} + +func (x *healthWatchServer) Send(m *HealthCheckResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _Health_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.health.v1.Health", + HandlerType: (*HealthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _Health_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Watch", + Handler: _Health_Watch_Handler, + ServerStreams: true, + }, + }, + Metadata: "grpc/health/v1/health.proto", +} diff --git a/vendor/google.golang.org/grpc/health/regenerate.sh b/vendor/google.golang.org/grpc/health/regenerate.sh deleted file mode 100644 index b11eccb295b..00000000000 --- a/vendor/google.golang.org/grpc/health/regenerate.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# Copyright 2018 gRPC authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eux -o pipefail - -TMP=$(mktemp -d) - -function finish { - rm -rf "$TMP" -} -trap finish EXIT - -pushd "$TMP" -mkdir -p grpc/health/v1 -curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/health/v1/health.proto > grpc/health/v1/health.proto - -protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/health/v1/*.proto -popd -rm -f grpc_health_v1/*.pb.go -cp "$TMP"/grpc/health/v1/*.pb.go grpc_health_v1/ - diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go index 2262607f882..ed2b3df70f4 100644 --- a/vendor/google.golang.org/grpc/health/server.go +++ b/vendor/google.golang.org/grpc/health/server.go @@ -16,8 +16,6 @@ * */ -//go:generate ./regenerate.sh - // Package health provides a service that exposes server's health and it must be // imported to enable support for client-side health checks. package health @@ -35,6 +33,7 @@ import ( // Server implements `service Health`. type Server struct { + healthgrpc.UnimplementedHealthServer mu sync.RWMutex // If shutdown is true, it's expected all serving status is NOT_SERVING, and // will stay in NOT_SERVING. diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index 8b105167491..5cc3aeddb21 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -25,6 +25,7 @@ import ( "os" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcutil" ) // Logger is the global binary logger. It can be used to get binary logger for @@ -39,6 +40,8 @@ type Logger interface { // It is used to get a methodLogger for each individual method. var binLogger Logger +var grpclogLogger = grpclog.Component("binarylog") + // SetLogger sets the binarg logger. // // Only call this at init time. @@ -146,9 +149,9 @@ func (l *logger) setBlacklist(method string) error { // Each methodLogger returned by this method is a new instance. This is to // generate sequence id within the call. func (l *logger) getMethodLogger(methodName string) *MethodLogger { - s, m, err := parseMethodName(methodName) + s, m, err := grpcutil.ParseMethod(methodName) if err != nil { - grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err) + grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) return nil } if ml, ok := l.methods[s+"/"+m]; ok { diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go index be30d0e65e7..d8f4e7602fd 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -24,8 +24,6 @@ import ( "regexp" "strconv" "strings" - - "google.golang.org/grpc/grpclog" ) // NewLoggerFromConfigString reads the string and build a logger. It can be used @@ -52,7 +50,7 @@ func NewLoggerFromConfigString(s string) Logger { methods := strings.Split(s, ",") for _, method := range methods { if err := l.fillMethodLoggerWithConfigString(method); err != nil { - grpclog.Warningf("failed to parse binary log config: %v", err) + grpclogLogger.Warningf("failed to parse binary log config: %v", err) return nil } } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 160f6e8616f..5e1083539b4 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -27,7 +27,6 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) @@ -219,12 +218,12 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry { if m, ok := c.Message.(proto.Message); ok { data, err = proto.Marshal(m) if err != nil { - grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) } } else if b, ok := c.Message.([]byte); ok { data = b } else { - grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } ret := &pb.GrpcLogEntry{ Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, @@ -259,12 +258,12 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry { if m, ok := c.Message.(proto.Message); ok { data, err = proto.Marshal(m) if err != nil { - grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) } } else if b, ok := c.Message.([]byte); ok { data = b } else { - grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } ret := &pb.GrpcLogEntry{ Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, @@ -315,7 +314,7 @@ type ServerTrailer struct { func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { st, ok := status.FromError(c.Err) if !ok { - grpclog.Info("binarylogging: error in trailer is not a status error") + grpclogLogger.Info("binarylogging: error in trailer is not a status error") } var ( detailsBytes []byte @@ -325,7 +324,7 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { if stProto != nil && len(stProto.Details) != 0 { detailsBytes, err = proto.Marshal(stProto) if err != nil { - grpclog.Infof("binarylogging: failed to marshal status proto: %v", err) + grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) } } ret := &pb.GrpcLogEntry{ diff --git a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh b/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh deleted file mode 100644 index 113d40cbe16..00000000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# Copyright 2018 gRPC authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eux -o pipefail - -TMP=$(mktemp -d) - -function finish { - rm -rf "$TMP" -} -trap finish EXIT - -pushd "$TMP" -mkdir -p grpc/binarylog/grpc_binarylog_v1 -curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/binlog/v1/binarylog.proto > grpc/binarylog/grpc_binarylog_v1/binarylog.proto - -protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/binarylog/grpc_binarylog_v1/*.proto -popd -rm -f ./grpc_binarylog_v1/*.pb.go -cp "$TMP"/grpc/binarylog/grpc_binarylog_v1/*.pb.go ../../binarylog/grpc_binarylog_v1/ - diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go index a2e7c346dd0..835f51040cb 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -29,7 +29,6 @@ import ( "github.com/golang/protobuf/proto" pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" - "google.golang.org/grpc/grpclog" ) var ( @@ -78,7 +77,7 @@ type writerSink struct { func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { b, err := proto.Marshal(e) if err != nil { - grpclog.Infof("binary logging: failed to marshal proto message: %v", err) + grpclogLogger.Infof("binary logging: failed to marshal proto message: %v", err) } hdr := make([]byte, 4) binary.BigEndian.PutUint32(hdr, uint32(len(b))) diff --git a/vendor/google.golang.org/grpc/internal/binarylog/util.go b/vendor/google.golang.org/grpc/internal/binarylog/util.go deleted file mode 100644 index 15dc7803d8b..00000000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/util.go +++ /dev/null @@ -1,41 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package binarylog - -import ( - "errors" - "strings" -) - -// parseMethodName splits service and method from the input. It expects format -// "/service/method". -// -// TODO: move to internal/grpcutil. -func parseMethodName(methodName string) (service, method string, _ error) { - if !strings.HasPrefix(methodName, "/") { - return "", "", errors.New("invalid method name: should start with /") - } - methodName = methodName[1:] - - pos := strings.LastIndex(methodName, "/") - if pos < 0 { - return "", "", errors.New("invalid method name: suffix /method is missing") - } - return methodName[:pos], methodName[pos+1:], nil -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index e4252e5be9f..81d3dd33e62 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -30,7 +30,7 @@ import ( "sync/atomic" "time" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog" ) const ( @@ -216,7 +216,7 @@ func RegisterChannel(c Channel, pid int64, ref string) int64 { // by pid). It returns the unique channelz tracking id assigned to this subchannel. func RegisterSubChannel(c Channel, pid int64, ref string) int64 { if pid == 0 { - grpclog.ErrorDepth(0, "a SubChannel's parent id cannot be 0") + logger.Error("a SubChannel's parent id cannot be 0") return 0 } id := idGen.genID() @@ -253,7 +253,7 @@ func RegisterServer(s Server, ref string) int64 { // this listen socket. func RegisterListenSocket(s Socket, pid int64, ref string) int64 { if pid == 0 { - grpclog.ErrorDepth(0, "a ListenSocket's parent id cannot be 0") + logger.Error("a ListenSocket's parent id cannot be 0") return 0 } id := idGen.genID() @@ -268,7 +268,7 @@ func RegisterListenSocket(s Socket, pid int64, ref string) int64 { // this normal socket. func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { if pid == 0 { - grpclog.ErrorDepth(0, "a NormalSocket's parent id cannot be 0") + logger.Error("a NormalSocket's parent id cannot be 0") return 0 } id := idGen.genID() @@ -294,17 +294,17 @@ type TraceEventDesc struct { } // AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. -func AddTraceEvent(id int64, depth int, desc *TraceEventDesc) { +func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) { for d := desc; d != nil; d = d.Parent { switch d.Severity { case CtUNKNOWN: - grpclog.InfoDepth(depth+1, d.Desc) + l.InfoDepth(depth+1, d.Desc) case CtINFO: - grpclog.InfoDepth(depth+1, d.Desc) + l.InfoDepth(depth+1, d.Desc) case CtWarning: - grpclog.WarningDepth(depth+1, d.Desc) + l.WarningDepth(depth+1, d.Desc) case CtError: - grpclog.ErrorDepth(depth+1, d.Desc) + l.ErrorDepth(depth+1, d.Desc) } } if getMaxTraceEntry() == 0 { diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go index 59c7bedecd9..e94039ee20b 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -21,80 +21,82 @@ package channelz import ( "fmt" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog" ) -// Info logs through grpclog.Info and adds a trace event if channelz is on. -func Info(id int64, args ...interface{}) { +var logger = grpclog.Component("channelz") + +// Info logs and adds a trace event if channelz is on. +func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { if IsOn() { - AddTraceEvent(id, 1, &TraceEventDesc{ + AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtINFO, }) } else { - grpclog.InfoDepth(1, args...) + l.InfoDepth(1, args...) } } -// Infof logs through grpclog.Infof and adds a trace event if channelz is on. -func Infof(id int64, format string, args ...interface{}) { +// Infof logs and adds a trace event if channelz is on. +func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { msg := fmt.Sprintf(format, args...) if IsOn() { - AddTraceEvent(id, 1, &TraceEventDesc{ + AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: msg, Severity: CtINFO, }) } else { - grpclog.InfoDepth(1, msg) + l.InfoDepth(1, msg) } } -// Warning logs through grpclog.Warning and adds a trace event if channelz is on. -func Warning(id int64, args ...interface{}) { +// Warning logs and adds a trace event if channelz is on. +func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { if IsOn() { - AddTraceEvent(id, 1, &TraceEventDesc{ + AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtWarning, }) } else { - grpclog.WarningDepth(1, args...) + l.WarningDepth(1, args...) } } -// Warningf logs through grpclog.Warningf and adds a trace event if channelz is on. -func Warningf(id int64, format string, args ...interface{}) { +// Warningf logs and adds a trace event if channelz is on. +func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { msg := fmt.Sprintf(format, args...) if IsOn() { - AddTraceEvent(id, 1, &TraceEventDesc{ + AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: msg, Severity: CtWarning, }) } else { - grpclog.WarningDepth(1, msg) + l.WarningDepth(1, msg) } } -// Error logs through grpclog.Error and adds a trace event if channelz is on. -func Error(id int64, args ...interface{}) { +// Error logs and adds a trace event if channelz is on. +func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { if IsOn() { - AddTraceEvent(id, 1, &TraceEventDesc{ + AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtError, }) } else { - grpclog.ErrorDepth(1, args...) + l.ErrorDepth(1, args...) } } -// Errorf logs through grpclog.Errorf and adds a trace event if channelz is on. -func Errorf(id int64, format string, args ...interface{}) { +// Errorf logs and adds a trace event if channelz is on. +func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { msg := fmt.Sprintf(format, args...) if IsOn() { - AddTraceEvent(id, 1, &TraceEventDesc{ + AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: msg, Severity: CtError, }) } else { - grpclog.ErrorDepth(1, msg) + l.ErrorDepth(1, msg) } } diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 17c2274cb3d..075dc7d1671 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -26,7 +26,6 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" ) // entry represents a node in the channelz database. @@ -60,17 +59,17 @@ func (d *dummyEntry) addChild(id int64, e entry) { // the addrConn will create a new transport. And when registering the new transport in // channelz, its parent addrConn could have already been torn down and deleted // from channelz tracking, and thus reach the code here. - grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) + logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) } func (d *dummyEntry) deleteChild(id int64) { // It is possible for a normal program to reach here under race condition. // Refer to the example described in addChild(). - grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) + logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) } func (d *dummyEntry) triggerDelete() { - grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) + logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) } func (*dummyEntry) deleteSelfIfReady() { @@ -215,7 +214,7 @@ func (c *channel) addChild(id int64, e entry) { case *channel: c.nestedChans[id] = v.refName default: - grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) } } @@ -326,7 +325,7 @@ func (sc *subChannel) addChild(id int64, e entry) { if v, ok := e.(*normalSocket); ok { sc.sockets[id] = v.refName } else { - grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) } } @@ -493,11 +492,11 @@ type listenSocket struct { } func (ls *listenSocket) addChild(id int64, e entry) { - grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) + logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) } func (ls *listenSocket) deleteChild(id int64) { - grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id) + logger.Errorf("cannot delete a child (id = %d) from a listen socket", id) } func (ls *listenSocket) triggerDelete() { @@ -506,7 +505,7 @@ func (ls *listenSocket) triggerDelete() { } func (ls *listenSocket) deleteSelfIfReady() { - grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket") + logger.Errorf("cannot call deleteSelfIfReady on a listen socket") } func (ls *listenSocket) getParentID() int64 { @@ -522,11 +521,11 @@ type normalSocket struct { } func (ns *normalSocket) addChild(id int64, e entry) { - grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) + logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) } func (ns *normalSocket) deleteChild(id int64) { - grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id) + logger.Errorf("cannot delete a child (id = %d) from a normal socket", id) } func (ns *normalSocket) triggerDelete() { @@ -535,7 +534,7 @@ func (ns *normalSocket) triggerDelete() { } func (ns *normalSocket) deleteSelfIfReady() { - grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket") + logger.Errorf("cannot call deleteSelfIfReady on a normal socket") } func (ns *normalSocket) getParentID() int64 { @@ -594,7 +593,7 @@ func (s *server) addChild(id int64, e entry) { case *listenSocket: s.listenSockets[id] = v.refName default: - grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) } } diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go index 79edbefc433..19c2fc521dc 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go @@ -22,8 +22,6 @@ package channelz import ( "sync" - - "google.golang.org/grpc/grpclog" ) var once sync.Once @@ -39,6 +37,6 @@ type SocketOptionData struct { // Windows OS doesn't support Socket Option func (s *SocketOptionData) Getsockopt(fd uintptr) { once.Do(func() { - grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.") + logger.Warning("Channelz: socket options are not supported on non-linux os and appengine.") }) } diff --git a/vendor/google.golang.org/grpc/internal/credentials/go110.go b/vendor/google.golang.org/grpc/internal/credentials/go110.go new file mode 100644 index 00000000000..d55b5203626 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/go110.go @@ -0,0 +1,65 @@ +// +build go1.10 + +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials defines APIs for parsing SPIFFE ID. +// +// All APIs in this package are experimental. +package credentials + +import ( + "crypto/tls" + "net/url" + + "google.golang.org/grpc/grpclog" +) + +// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format +// is invalid, return nil with warning. +func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { + if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 { + return nil + } + var spiffeID *url.URL + for _, uri := range state.PeerCertificates[0].URIs { + if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") { + continue + } + // From this point, we assume the uri is intended for a SPIFFE ID. + if len(uri.String()) > 2048 { + grpclog.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes") + return nil + } + if len(uri.Host) == 0 || len(uri.RawPath) == 0 || len(uri.Path) == 0 { + grpclog.Warning("invalid SPIFFE ID: domain or workload ID is empty") + return nil + } + if len(uri.Host) > 255 { + grpclog.Warning("invalid SPIFFE ID: domain length larger than 255 characters") + return nil + } + // A valid SPIFFE certificate can only have exactly one URI SAN field. + if len(state.PeerCertificates[0].URIs) > 1 { + grpclog.Warning("invalid SPIFFE ID: multiple URI SANs") + return nil + } + spiffeID = uri + } + return spiffeID +} diff --git a/vendor/google.golang.org/grpc/internal/transport/log.go b/vendor/google.golang.org/grpc/internal/credentials/gobefore110.go similarity index 50% rename from vendor/google.golang.org/grpc/internal/transport/log.go rename to vendor/google.golang.org/grpc/internal/credentials/gobefore110.go index 879df80c4de..743713e19f8 100644 --- a/vendor/google.golang.org/grpc/internal/transport/log.go +++ b/vendor/google.golang.org/grpc/internal/credentials/gobefore110.go @@ -1,6 +1,8 @@ +// +build !go1.10 + /* * - * Copyright 2017 gRPC authors. + * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,29 +18,14 @@ * */ -// This file contains wrappers for grpclog functions. -// The transport package only logs to verbose level 2 by default. - -package transport - -import "google.golang.org/grpc/grpclog" - -const logLevel = 2 +package credentials -func infof(format string, args ...interface{}) { - if grpclog.V(logLevel) { - grpclog.Infof(format, args...) - } -} - -func warningf(format string, args ...interface{}) { - if grpclog.V(logLevel) { - grpclog.Warningf(format, args...) - } -} +import ( + "crypto/tls" + "net/url" +) -func errorf(format string, args ...interface{}) { - if grpclog.V(logLevel) { - grpclog.Errorf(format, args...) - } +//TODO(ZhenLian): delete this file when we remove Go 1.9 tests. +func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { + return nil } diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index ae6c8972fd7..73931a94bca 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -34,5 +34,5 @@ var ( // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". Retry = strings.EqualFold(os.Getenv(retryStr), "on") // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). - TXTErrIgnore = !strings.EqualFold(os.Getenv(retryStr), "false") + TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") ) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index 8c8e19fce1d..745a166f02c 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -19,6 +19,10 @@ // Package grpclog (internal) defines depth logging for grpc. package grpclog +import ( + "os" +) + // Logger is the logger used for the non-depth log functions. var Logger LoggerV2 @@ -30,7 +34,7 @@ func InfoDepth(depth int, args ...interface{}) { if DepthLogger != nil { DepthLogger.InfoDepth(depth, args...) } else { - Logger.Info(args...) + Logger.Infoln(args...) } } @@ -39,7 +43,7 @@ func WarningDepth(depth int, args ...interface{}) { if DepthLogger != nil { DepthLogger.WarningDepth(depth, args...) } else { - Logger.Warning(args...) + Logger.Warningln(args...) } } @@ -48,7 +52,7 @@ func ErrorDepth(depth int, args ...interface{}) { if DepthLogger != nil { DepthLogger.ErrorDepth(depth, args...) } else { - Logger.Error(args...) + Logger.Errorln(args...) } } @@ -57,8 +61,9 @@ func FatalDepth(depth int, args ...interface{}) { if DepthLogger != nil { DepthLogger.FatalDepth(depth, args...) } else { - Logger.Fatal(args...) + Logger.Fatalln(args...) } + os.Exit(1) } // LoggerV2 does underlying logging work for grpclog. diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go index f6e0dc1da8d..82af70e96f1 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -18,10 +18,15 @@ package grpclog +import ( + "fmt" +) + // PrefixLogger does logging with a prefix. // // Logging method on a nil logs without any prefix. type PrefixLogger struct { + logger DepthLoggerV2 prefix string } @@ -30,34 +35,47 @@ func (pl *PrefixLogger) Infof(format string, args ...interface{}) { if pl != nil { // Handle nil, so the tests can pass in a nil logger. format = pl.prefix + format + pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) + return } - Logger.Infof(format, args...) + InfoDepth(1, fmt.Sprintf(format, args...)) } // Warningf does warning logging. func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { if pl != nil { format = pl.prefix + format + pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) + return } - Logger.Warningf(format, args...) + WarningDepth(1, fmt.Sprintf(format, args...)) } // Errorf does error logging. func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { if pl != nil { format = pl.prefix + format + pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) + return } - Logger.Errorf(format, args...) + ErrorDepth(1, fmt.Sprintf(format, args...)) } // Debugf does info logging at verbose level 2. func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { - if Logger.V(2) { - pl.Infof(format, args...) + if !Logger.V(2) { + return + } + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) + return } + InfoDepth(1, fmt.Sprintf(format, args...)) } // NewPrefixLogger creates a prefix logger with the given prefix. -func NewPrefixLogger(prefix string) *PrefixLogger { - return &PrefixLogger{prefix: prefix} +func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger { + return &PrefixLogger{logger: logger, prefix: prefix} } diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go new file mode 100644 index 00000000000..b25b0baec3c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strconv" + "time" +) + +const maxTimeoutValue int64 = 100000000 - 1 + +// div does integer division and round-up the result. Note that this is +// equivalent to (d+r-1)/r but has less chance to overflow. +func div(d, r time.Duration) int64 { + if d%r > 0 { + return int64(d/r + 1) + } + return int64(d / r) +} + +// EncodeDuration encodes the duration to the format grpc-timeout header +// accepts. +// +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +func EncodeDuration(t time.Duration) string { + // TODO: This is simplistic and not bandwidth efficient. Improve it. + if t <= 0 { + return "0n" + } + if d := div(t, time.Nanosecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "n" + } + if d := div(t, time.Microsecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "u" + } + if d := div(t, time.Millisecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "m" + } + if d := div(t, time.Second); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "S" + } + if d := div(t, time.Minute); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "M" + } + // Note that maxTimeoutValue * time.Hour > MaxInt64. + return strconv.FormatInt(div(t, time.Hour), 10) + "H" +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go new file mode 100644 index 00000000000..6f22bd89115 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "context" + + "google.golang.org/grpc/metadata" +) + +type mdExtraKey struct{} + +// WithExtraMetadata creates a new context with incoming md attached. +func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context { + return context.WithValue(ctx, mdExtraKey{}, md) +} + +// ExtraMetadata returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) { + md, ok = ctx.Value(mdExtraKey{}).(metadata.MD) + return +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go new file mode 100644 index 00000000000..4e7475060c1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -0,0 +1,84 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "errors" + "strings" +) + +// ParseMethod splits service and method from the input. It expects format +// "/service/method". +// +func ParseMethod(methodName string) (service, method string, _ error) { + if !strings.HasPrefix(methodName, "/") { + return "", "", errors.New("invalid method name: should start with /") + } + methodName = methodName[1:] + + pos := strings.LastIndex(methodName, "/") + if pos < 0 { + return "", "", errors.New("invalid method name: suffix /method is missing") + } + return methodName[:pos], methodName[pos+1:], nil +} + +const baseContentType = "application/grpc" + +// ContentSubtype returns the content-subtype for the given content-type. The +// given content-type must be a valid content-type that starts with +// "application/grpc". A content-subtype will follow "application/grpc" after a +// "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If contentType is not a valid content-type for gRPC, the boolean +// will be false, otherwise true. If content-type == "application/grpc", +// "application/grpc+", or "application/grpc;", the boolean will be true, +// but no content-subtype will be returned. +// +// contentType is assumed to be lowercase already. +func ContentSubtype(contentType string) (string, bool) { + if contentType == baseContentType { + return "", true + } + if !strings.HasPrefix(contentType, baseContentType) { + return "", false + } + // guaranteed since != baseContentType and has baseContentType prefix + switch contentType[len(baseContentType)] { + case '+', ';': + // this will return true for "application/grpc+" or "application/grpc;" + // which the previous validContentType function tested to be valid, so we + // just say that no content-subtype is specified in this case + return contentType[len(baseContentType)+1:], true + default: + return "", false + } +} + +// ContentType builds full content type with the given sub-type. +// +// contentSubtype is assumed to be lowercase +func ContentType(contentSubtype string) string { + if contentSubtype == "" { + return baseContentType + } + return baseContentType + "+" + contentSubtype +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index c6fbe8bb1b2..818ca857998 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -25,6 +25,7 @@ import ( "time" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/serviceconfig" ) var ( @@ -40,9 +41,17 @@ var ( // NewRequestInfoContext creates a new context based on the argument context attaching // the passed in RequestInfo to the new context. NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context + // NewClientHandshakeInfoContext returns a copy of the input context with + // the passed in ClientHandshakeInfo struct added to it. + NewClientHandshakeInfoContext interface{} // func(context.Context, credentials.ClientHandshakeInfo) context.Context // ParseServiceConfigForTesting is for creating a fake // ClientConn for resolver testing only ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult + // EqualServiceConfigForTesting is for testing service config generation and + // parsing. Both a and b should be returned by ParseServiceConfigForTesting. + // This function compares the config without rawJSON stripped, in case the + // there's difference in white space. + EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index c368db62ea1..30423556658 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -32,6 +32,7 @@ import ( "sync" "time" + grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" @@ -43,6 +44,8 @@ import ( // addresses from SRV records. Must not be changed after init time. var EnableSRVLookups = false +var logger = grpclog.Component("dns") + func init() { resolver.Register(NewBuilder()) } @@ -251,7 +254,7 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) } addr := ip + ":" + strconv.Itoa(int(s.Port)) - newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target}) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) } } return newAddrs, nil @@ -271,7 +274,7 @@ func handleDNSError(err error, lookupType string) error { err = filterError(err) if err != nil { err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) - grpclog.Infoln(err) + logger.Info(err) } return err } @@ -294,7 +297,7 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { // TXT record must have "grpc_config=" attribute in order to be used as service config. if !strings.HasPrefix(res, txtAttribute) { - grpclog.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) + logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) // This is not an error; it is the equivalent of not having a service config. return nil } @@ -326,13 +329,15 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { if hostErr != nil && (srvErr != nil || len(srv) == 0) { return nil, hostErr } - state := &resolver.State{ - Addresses: append(addrs, srv...), + + state := resolver.State{Addresses: addrs} + if len(srv) > 0 { + state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) } if !d.disableServiceConfig { state.ServiceConfig = d.lookupTXT() } - return state, nil + return &state, nil } // formatIP returns ok = false if addr is not a valid textual representation of an IP address. @@ -418,12 +423,12 @@ func canaryingSC(js string) string { var rcs []rawChoice err := json.Unmarshal([]byte(js), &rcs) if err != nil { - grpclog.Warningf("dns: error parsing service config json: %v", err) + logger.Warningf("dns: error parsing service config json: %v", err) return "" } cliHostname, err := os.Hostname() if err != nil { - grpclog.Warningf("dns: error getting client hostname: %v", err) + logger.Warningf("dns: error getting client hostname: %v", err) return "" } var sc string diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go new file mode 100644 index 00000000000..9b26414d40e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -0,0 +1,90 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig contains utility functions to parse service config. +package serviceconfig + +import ( + "encoding/json" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/grpclog" + externalserviceconfig "google.golang.org/grpc/serviceconfig" +) + +// BalancerConfig is the balancer config part that service config's +// loadBalancingConfig fields can be unmarshalled to. It's a json unmarshaller. +// +// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247 +type BalancerConfig struct { + Name string + Config externalserviceconfig.LoadBalancingConfig +} + +type intermediateBalancerConfig []map[string]json.RawMessage + +// UnmarshalJSON implements json unmarshaller. +func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { + var ir intermediateBalancerConfig + err := json.Unmarshal(b, &ir) + if err != nil { + return err + } + + for i, lbcfg := range ir { + if len(lbcfg) != 1 { + return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) + } + var ( + name string + jsonCfg json.RawMessage + ) + // Get the key:value pair from the map. + for name, jsonCfg = range lbcfg { + } + builder := balancer.Get(name) + if builder == nil { + // If the balancer is not registered, move on to the next config. + // This is not an error. + continue + } + bc.Name = name + + parser, ok := builder.(balancer.ConfigParser) + if !ok { + if string(jsonCfg) != "{}" { + grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) + } + // Stop at this, though the builder doesn't support parsing config. + return nil + } + + cfg, err := parser.ParseConfig(jsonCfg) + if err != nil { + return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err) + } + bc.Config = cfg + return nil + } + // This is reached when the for loop iterates over all entries, but didn't + // return. This means we had a loadBalancingConfig slice but did not + // encounter a registered policy. The config is considered invalid in this + // case. + return fmt.Errorf("invalid loadBalancingConfig: no supported policies found") +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index 681260692e3..710223b8ded 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -97,7 +97,7 @@ func (s *Status) Err() error { if s.Code() == codes.OK { return nil } - return (*Error)(s.Proto()) + return &Error{e: s.Proto()} } // WithDetails returns a new status with the provided details messages appended to the status. @@ -136,26 +136,27 @@ func (s *Status) Details() []interface{} { return details } -// Error is an alias of a status proto. It implements error and Status, -// and a nil Error should never be returned by this package. -type Error spb.Status +// Error wraps a pointer of a status proto. It implements error and Status, +// and a nil *Error should never be returned by this package. +type Error struct { + e *spb.Status +} -func (se *Error) Error() string { - p := (*spb.Status)(se) - return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) +func (e *Error) Error() string { + return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(e.e.GetCode()), e.e.GetMessage()) } // GRPCStatus returns the Status represented by se. -func (se *Error) GRPCStatus() *Status { - return FromProto((*spb.Status)(se)) +func (e *Error) GRPCStatus() *Status { + return FromProto(e.e) } // Is implements future error.Is functionality. // A Error is equivalent if the code and message are identical. -func (se *Error) Is(target error) bool { +func (e *Error) Is(target error) bool { tse, ok := target.(*Error) if !ok { return false } - return proto.Equal((*spb.Status)(se), (*spb.Status)(tse)) + return proto.Equal(e.e, tse.e) } diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go index 43281a3e078..c50468a0fc8 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -32,11 +32,13 @@ import ( "google.golang.org/grpc/grpclog" ) +var logger = grpclog.Component("core") + // GetCPUTime returns the how much CPU time has passed since the start of this process. func GetCPUTime() int64 { var ts unix.Timespec if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil { - grpclog.Fatal(err) + logger.Fatal(err) } return ts.Nano() } diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index d3fd9dab333..adae60d6518 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -18,6 +18,8 @@ * */ +// Package syscall provides functionalities that grpc uses to get low-level +// operating system stats/info. package syscall import ( @@ -29,10 +31,11 @@ import ( ) var once sync.Once +var logger = grpclog.Component("core") func log() { once.Do(func() { - grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.") + logger.Info("CPU time info is unavailable on non-linux or appengine environment.") }) } diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index ddee20b6bef..40ef23923fd 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -505,7 +505,9 @@ func (l *loopyWriter) run() (err error) { // 1. When the connection is closed by some other known issue. // 2. User closed the connection. // 3. A graceful close of connection. - infof("transport: loopyWriter.run returning. %v", err) + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter.run returning. %v", err) + } err = nil } }() @@ -605,7 +607,9 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { if l.side == serverSide { str, ok := l.estdStreams[h.streamID] if !ok { - warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + if logger.V(logLevel) { + logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + } return nil } // Case 1.A: Server is responding back with headers. @@ -658,7 +662,9 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He l.hBuf.Reset() for _, f := range hf { if err := l.hEnc.WriteField(f); err != nil { - warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err) + if logger.V(logLevel) { + logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err) + } } } var ( @@ -857,38 +863,45 @@ func (l *loopyWriter) processData() (bool, error) { return false, nil } var ( - idx int buf []byte ) - if len(dataItem.h) != 0 { // data header has not been written out yet. - buf = dataItem.h - } else { - idx = 1 - buf = dataItem.d - } - size := http2MaxFrameLen - if len(buf) < size { - size = len(buf) - } + // Figure out the maximum size we can send + maxSize := http2MaxFrameLen if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. str.state = waitingOnStreamQuota return false, nil - } else if strQuota < size { - size = strQuota + } else if maxSize > strQuota { + maxSize = strQuota + } + if maxSize > int(l.sendQuota) { // connection-level flow control. + maxSize = int(l.sendQuota) + } + // Compute how much of the header and data we can send within quota and max frame length + hSize := min(maxSize, len(dataItem.h)) + dSize := min(maxSize-hSize, len(dataItem.d)) + if hSize != 0 { + if dSize == 0 { + buf = dataItem.h + } else { + // We can add some data to grpc message header to distribute bytes more equally across frames. + // Copy on the stack to avoid generating garbage + var localBuf [http2MaxFrameLen]byte + copy(localBuf[:hSize], dataItem.h) + copy(localBuf[hSize:], dataItem.d[:dSize]) + buf = localBuf[:hSize+dSize] + } + } else { + buf = dataItem.d } - if l.sendQuota < uint32(size) { // connection-level flow control. - size = int(l.sendQuota) - } + size := hSize + dSize + // Now that outgoing flow controls are checked we can replenish str's write quota str.wq.replenish(size) var endStream bool // If this is the last data message on this stream and all of it can be written in this iteration. - if dataItem.endStream && size == len(buf) { - // buf contains either data or it contains header but data is empty. - if idx == 1 || len(dataItem.d) == 0 { - endStream = true - } + if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size { + endStream = true } if dataItem.onEachWrite != nil { dataItem.onEachWrite() @@ -896,14 +909,10 @@ func (l *loopyWriter) processData() (bool, error) { if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { return false, err } - buf = buf[size:] str.bytesOutStanding += size l.sendQuota -= uint32(size) - if idx == 0 { - dataItem.h = buf - } else { - dataItem.d = buf - } + dataItem.h = dataItem.h[hSize:] + dataItem.d = dataItem.d[dSize:] if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. str.itl.dequeue() @@ -924,3 +933,10 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index fc44e976195..05d3871e628 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -39,6 +39,7 @@ import ( "golang.org/x/net/http2" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -57,7 +58,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta } contentType := r.Header.Get("Content-Type") // TODO: do we assume contentType is lowercase? we did before - contentSubtype, validContentType := contentSubtype(contentType) + contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) if !validContentType { return nil, errors.New("invalid gRPC request content-type") } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 1cc586f73e7..e7f2321131e 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -32,6 +32,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -41,6 +42,7 @@ import ( "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) @@ -161,7 +163,7 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -214,12 +216,20 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne } } if transportCreds != nil { - scheme = "https" - conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn) + // gRPC, resolver, balancer etc. can specify arbitrary data in the + // Attributes field of resolver.Address, which is shoved into connectCtx + // and passed to the credential handshaker. This makes it possible for + // address specific arbitrary data to reach the credential handshaker. + contextWithHandshakeInfo := internal.NewClientHandshakeInfoContext.(func(context.Context, credentials.ClientHandshakeInfo) context.Context) + connectCtx = contextWithHandshakeInfo(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) } isSecure = true + if transportCreds.Info().SecurityProtocol == "tls" { + scheme = "https" + } } dynamicWindow := true icwz := int32(initialWindowSize) @@ -345,7 +355,9 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) err := t.loopy.run() if err != nil { - errorf("transport: loopyWriter.run returning. Err: %v", err) + if logger.V(logLevel) { + logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) + } } // If it's a connection error, let reader goroutine handle it // since there might be data in the buffers. @@ -425,7 +437,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)}) headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) if callHdr.PreviousAttempts > 0 { @@ -440,7 +452,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) // Send out timeout regardless its value. The server can detect timeout context by itself. // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. timeout := time.Until(dl) - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)}) } for k, v := range authData { headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) @@ -554,13 +566,26 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call return callAuthData, nil } +// PerformedIOError wraps an error to indicate IO may have been performed +// before the error occurred. +type PerformedIOError struct { + Err error +} + +// Error implements error. +func (p PerformedIOError) Error() string { + return p.Err.Error() +} + // NewStream creates a stream and registers it into the transport as "active" // streams. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { ctx = peer.NewContext(ctx, t.getPeer()) headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { - return nil, err + // We may have performed I/O in the per-RPC creds callback, so do not + // allow transparent retry. + return nil, PerformedIOError{err} } s := t.newStream(ctx, callHdr) cleanup := func(err error) { @@ -854,18 +879,10 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e df := &dataFrame{ streamID: s.id, endStream: opts.Last, + h: hdr, + d: data, } - if hdr != nil || data != nil { // If it's not an empty data frame. - // Add some data to grpc message header so that we can equally - // distribute bytes across frames. - emptyLen := http2MaxFrameLen - len(hdr) - if emptyLen > len(data) { - emptyLen = len(data) - } - hdr = append(hdr, data[:emptyLen]...) - data = data[emptyLen:] - df.h, df.d = hdr, data - // TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler. + if hdr != nil || data != nil { // If it's not an empty data frame, check quota. if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { return err } @@ -999,7 +1016,9 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { - warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + if logger.V(logLevel) { + logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + } statusCode = codes.Unknown } if statusCode == codes.Canceled { @@ -1081,7 +1100,9 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { return } if f.ErrCode == http2.ErrCodeEnhanceYourCalm { - infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + if logger.V(logLevel) { + logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + } } id := f.LastStreamID if id > 0 && id%2 != 1 { @@ -1311,7 +1332,9 @@ func (t *http2Client) reader() { case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) default: - errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + if logger.V(logLevel) { + logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + } } } } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index fa33ffb1885..04cbedf7945 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -34,10 +34,10 @@ import ( "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/keepalive" @@ -289,7 +289,9 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler if err := t.loopy.run(); err != nil { - errorf("transport: loopyWriter.run returning. Err: %v", err) + if logger.V(logLevel) { + logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) + } } t.conn.Close() close(t.writerDone) @@ -360,7 +362,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } s.ctx, err = t.inTapHandle(s.ctx, info) if err != nil { - warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + } t.controlBuf.put(&cleanupStream{ streamID: s.id, rst: true, @@ -391,7 +395,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if streamID%2 != 1 || streamID <= t.maxStreamID { t.mu.Unlock() // illegal gRPC stream id. - errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + } s.cancel() return true } @@ -454,7 +460,9 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) if err != nil { if se, ok := err.(http2.StreamError); ok { - warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + } t.mu.Lock() s := t.activeStreams[se.StreamID] t.mu.Unlock() @@ -474,7 +482,9 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. t.Close() return } - warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) + } t.Close() return } @@ -497,7 +507,9 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. case *http2.GoAwayFrame: // TODO: Handle GoAway from the client appropriately. default: - errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + } } } } @@ -719,7 +731,9 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. - errorf("transport: Got too many pings from the client, closing the connection.") + if logger.V(logLevel) { + logger.Errorf("transport: Got too many pings from the client, closing the connection.") + } t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) } } @@ -752,7 +766,9 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { var sz int64 for _, f := range hdrFrame.hf { if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { - errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + if logger.V(logLevel) { + logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + } return false } } @@ -789,7 +805,7 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) if s.sendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) } @@ -839,7 +855,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { } } else { // Send a trailer only response. headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) } } headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) @@ -849,7 +865,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. - grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) + logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) } else { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) } @@ -909,13 +925,6 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e return ContextErr(s.ctx.Err()) } } - // Add some data to header frame so that we can equally distribute bytes across frames. - emptyLen := http2MaxFrameLen - len(hdr) - if emptyLen > len(data) { - emptyLen = len(data) - } - hdr = append(hdr, data[:emptyLen]...) - data = data[emptyLen:] df := &dataFrame{ streamID: s.id, h: hdr, @@ -987,7 +996,9 @@ func (t *http2Server) keepalive() { select { case <-ageTimer.C: // Close the connection after grace period. - infof("transport: closing server transport due to maximum connection age.") + if logger.V(logLevel) { + logger.Infof("transport: closing server transport due to maximum connection age.") + } t.Close() case <-t.done: } @@ -1004,7 +1015,9 @@ func (t *http2Server) keepalive() { continue } if outstandingPing && kpTimeoutLeft <= 0 { - infof("transport: closing server transport due to idleness.") + if logger.V(logLevel) { + logger.Infof("transport: closing server transport due to idleness.") + } t.Close() return } diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 8f5f3349d90..5e1e7a65da2 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -37,6 +37,8 @@ import ( "golang.org/x/net/http2/hpack" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/status" ) @@ -50,7 +52,7 @@ const ( // "proto" as a suffix after "+" or ";". See // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests // for more details. - baseContentType = "application/grpc" + ) var ( @@ -97,6 +99,7 @@ var ( // 504 Gateway timeout - UNAVAILABLE. http.StatusGatewayTimeout: codes.Unavailable, } + logger = grpclog.Component("transport") ) type parsedHeaderData struct { @@ -182,46 +185,6 @@ func isWhitelistedHeader(hdr string) bool { } } -// contentSubtype returns the content-subtype for the given content-type. The -// given content-type must be a valid content-type that starts with -// "application/grpc". A content-subtype will follow "application/grpc" after a -// "+" or ";". See -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. -// -// If contentType is not a valid content-type for gRPC, the boolean -// will be false, otherwise true. If content-type == "application/grpc", -// "application/grpc+", or "application/grpc;", the boolean will be true, -// but no content-subtype will be returned. -// -// contentType is assumed to be lowercase already. -func contentSubtype(contentType string) (string, bool) { - if contentType == baseContentType { - return "", true - } - if !strings.HasPrefix(contentType, baseContentType) { - return "", false - } - // guaranteed since != baseContentType and has baseContentType prefix - switch contentType[len(baseContentType)] { - case '+', ';': - // this will return true for "application/grpc+" or "application/grpc;" - // which the previous validContentType function tested to be valid, so we - // just say that no content-subtype is specified in this case - return contentType[len(baseContentType)+1:], true - default: - return "", false - } -} - -// contentSubtype is assumed to be lowercase -func contentType(contentSubtype string) string { - if contentSubtype == "" { - return baseContentType - } - return baseContentType + "+" + contentSubtype -} - func (d *decodeState) status() *status.Status { if d.data.statusGen == nil { // No status-details were provided; generate status using code/msg. @@ -340,7 +303,7 @@ func (d *decodeState) addMetadata(k, v string) { func (d *decodeState) processHeaderField(f hpack.HeaderField) { switch f.Name { case "content-type": - contentSubtype, validContentType := contentSubtype(f.Value) + contentSubtype, validContentType := grpcutil.ContentSubtype(f.Value) if !validContentType { d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value) return @@ -412,7 +375,9 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) { } v, err := decodeMetadataHeader(f.Name, f.Value) if err != nil { - errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) + if logger.V(logLevel) { + logger.Errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) + } return } d.addMetadata(f.Name, v) @@ -449,41 +414,6 @@ func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { return } -const maxTimeoutValue int64 = 100000000 - 1 - -// div does integer division and round-up the result. Note that this is -// equivalent to (d+r-1)/r but has less chance to overflow. -func div(d, r time.Duration) int64 { - if m := d % r; m > 0 { - return int64(d/r + 1) - } - return int64(d / r) -} - -// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it. -func encodeTimeout(t time.Duration) string { - if t <= 0 { - return "0n" - } - if d := div(t, time.Nanosecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "n" - } - if d := div(t, time.Microsecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "u" - } - if d := div(t, time.Millisecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "m" - } - if d := div(t, time.Second); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "S" - } - if d := div(t, time.Minute); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "M" - } - // Note that maxTimeoutValue * time.Hour > MaxInt64. - return strconv.FormatInt(div(t, time.Hour), 10) + "H" -} - func decodeTimeout(s string) (time.Duration, error) { size := len(s) if size < 2 { diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index a30da9eb324..b74030a9687 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -35,11 +35,14 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" ) +const logLevel = 2 + type bufferPool struct { pool sync.Pool } @@ -568,17 +571,10 @@ type ConnectOptions struct { MaxHeaderListSize *uint32 } -// TargetInfo contains the information of the target such as network address and metadata. -type TargetInfo struct { - Addr string - Metadata interface{} - Authority string -} - // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose) +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose) } // Options provides additional hints and information for message diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go deleted file mode 100644 index c9f79dc5336..00000000000 --- a/vendor/google.golang.org/grpc/naming/dns_resolver.go +++ /dev/null @@ -1,293 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package naming - -import ( - "context" - "errors" - "fmt" - "net" - "strconv" - "time" - - "google.golang.org/grpc/grpclog" -) - -const ( - defaultPort = "443" - defaultFreq = time.Minute * 30 -) - -var ( - errMissingAddr = errors.New("missing address") - errWatcherClose = errors.New("watcher has been closed") - - lookupHost = net.DefaultResolver.LookupHost - lookupSRV = net.DefaultResolver.LookupSRV -) - -// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and -// create watchers that poll the DNS server using the frequency set by freq. -func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) { - return &dnsResolver{freq: freq}, nil -} - -// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create -// watchers that poll the DNS server using the default frequency defined by defaultFreq. -func NewDNSResolver() (Resolver, error) { - return NewDNSResolverWithFreq(defaultFreq) -} - -// dnsResolver handles name resolution for names following the DNS scheme -type dnsResolver struct { - // frequency of polling the DNS server that the watchers created by this resolver will use. - freq time.Duration -} - -// formatIP returns ok = false if addr is not a valid textual representation of an IP address. -// If addr is an IPv4 address, return the addr and ok = true. -// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. -func formatIP(addr string) (addrIP string, ok bool) { - ip := net.ParseIP(addr) - if ip == nil { - return "", false - } - if ip.To4() != nil { - return addr, true - } - return "[" + addr + "]", true -} - -// parseTarget takes the user input target string, returns formatted host and port info. -// If target doesn't specify a port, set the port to be the defaultPort. -// If target is in IPv6 format and host-name is enclosed in square brackets, brackets -// are stripped when setting the host. -// examples: -// target: "www.google.com" returns host: "www.google.com", port: "443" -// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" -// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" -// target: ":80" returns host: "localhost", port: "80" -// target: ":" returns host: "localhost", port: "443" -func parseTarget(target string) (host, port string, err error) { - if target == "" { - return "", "", errMissingAddr - } - - if ip := net.ParseIP(target); ip != nil { - // target is an IPv4 or IPv6(without brackets) address - return target, defaultPort, nil - } - if host, port, err := net.SplitHostPort(target); err == nil { - // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port - if host == "" { - // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. - host = "localhost" - } - if port == "" { - // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. - port = defaultPort - } - return host, port, nil - } - if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil { - // target doesn't have port - return host, port, nil - } - return "", "", fmt.Errorf("invalid target address %v", target) -} - -// Resolve creates a watcher that watches the name resolution of the target. -func (r *dnsResolver) Resolve(target string) (Watcher, error) { - host, port, err := parseTarget(target) - if err != nil { - return nil, err - } - - if net.ParseIP(host) != nil { - ipWatcher := &ipWatcher{ - updateChan: make(chan *Update, 1), - } - host, _ = formatIP(host) - ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port} - return ipWatcher, nil - } - - ctx, cancel := context.WithCancel(context.Background()) - return &dnsWatcher{ - r: r, - host: host, - port: port, - ctx: ctx, - cancel: cancel, - t: time.NewTimer(0), - }, nil -} - -// dnsWatcher watches for the name resolution update for a specific target -type dnsWatcher struct { - r *dnsResolver - host string - port string - // The latest resolved address set - curAddrs map[string]*Update - ctx context.Context - cancel context.CancelFunc - t *time.Timer -} - -// ipWatcher watches for the name resolution update for an IP address. -type ipWatcher struct { - updateChan chan *Update -} - -// Next returns the address resolution Update for the target. For IP address, -// the resolution is itself, thus polling name server is unnecessary. Therefore, -// Next() will return an Update the first time it is called, and will be blocked -// for all following calls as no Update exists until watcher is closed. -func (i *ipWatcher) Next() ([]*Update, error) { - u, ok := <-i.updateChan - if !ok { - return nil, errWatcherClose - } - return []*Update{u}, nil -} - -// Close closes the ipWatcher. -func (i *ipWatcher) Close() { - close(i.updateChan) -} - -// AddressType indicates the address type returned by name resolution. -type AddressType uint8 - -const ( - // Backend indicates the server is a backend server. - Backend AddressType = iota - // GRPCLB indicates the server is a grpclb load balancer. - GRPCLB -) - -// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The -// name resolver used by the grpclb balancer is required to provide this type of metadata in -// its address updates. -type AddrMetadataGRPCLB struct { - // AddrType is the type of server (grpc load balancer or backend). - AddrType AddressType - // ServerName is the name of the grpc load balancer. Used for authentication. - ServerName string -} - -// compileUpdate compares the old resolved addresses and newly resolved addresses, -// and generates an update list -func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update { - var res []*Update - for a, u := range w.curAddrs { - if _, ok := newAddrs[a]; !ok { - u.Op = Delete - res = append(res, u) - } - } - for a, u := range newAddrs { - if _, ok := w.curAddrs[a]; !ok { - res = append(res, u) - } - } - return res -} - -func (w *dnsWatcher) lookupSRV() map[string]*Update { - newAddrs := make(map[string]*Update) - _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host) - if err != nil { - grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) - return nil - } - for _, s := range srvs { - lbAddrs, err := lookupHost(w.ctx, s.Target) - if err != nil { - grpclog.Warningf("grpc: failed load balancer address dns lookup due to %v.\n", err) - continue - } - for _, a := range lbAddrs { - a, ok := formatIP(a) - if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) - continue - } - addr := a + ":" + strconv.Itoa(int(s.Port)) - newAddrs[addr] = &Update{Addr: addr, - Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}} - } - } - return newAddrs -} - -func (w *dnsWatcher) lookupHost() map[string]*Update { - newAddrs := make(map[string]*Update) - addrs, err := lookupHost(w.ctx, w.host) - if err != nil { - grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) - return nil - } - for _, a := range addrs { - a, ok := formatIP(a) - if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) - continue - } - addr := a + ":" + w.port - newAddrs[addr] = &Update{Addr: addr} - } - return newAddrs -} - -func (w *dnsWatcher) lookup() []*Update { - newAddrs := w.lookupSRV() - if newAddrs == nil { - // If failed to get any balancer address (either no corresponding SRV for the - // target, or caused by failure during resolution/parsing of the balancer target), - // return any A record info available. - newAddrs = w.lookupHost() - } - result := w.compileUpdate(newAddrs) - w.curAddrs = newAddrs - return result -} - -// Next returns the resolved address update(delta) for the target. If there's no -// change, it will sleep for 30 mins and try to resolve again after that. -func (w *dnsWatcher) Next() ([]*Update, error) { - for { - select { - case <-w.ctx.Done(): - return nil, errWatcherClose - case <-w.t.C: - } - result := w.lookup() - // Next lookup should happen after an interval defined by w.r.freq. - w.t.Reset(w.r.freq) - if len(result) > 0 { - return result, nil - } - } -} - -func (w *dnsWatcher) Close() { - w.cancel() -} diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go deleted file mode 100644 index f4c1c8b6894..00000000000 --- a/vendor/google.golang.org/grpc/naming/naming.go +++ /dev/null @@ -1,68 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package naming defines the naming API and related data structures for gRPC. -// -// This package is deprecated: please use package resolver instead. -package naming - -// Operation defines the corresponding operations for a name resolution change. -// -// Deprecated: please use package resolver. -type Operation uint8 - -const ( - // Add indicates a new address is added. - Add Operation = iota - // Delete indicates an existing address is deleted. - Delete -) - -// Update defines a name resolution update. Notice that it is not valid having both -// empty string Addr and nil Metadata in an Update. -// -// Deprecated: please use package resolver. -type Update struct { - // Op indicates the operation of the update. - Op Operation - // Addr is the updated address. It is empty string if there is no address update. - Addr string - // Metadata is the updated metadata. It is nil if there is no metadata update. - // Metadata is not required for a custom naming implementation. - Metadata interface{} -} - -// Resolver creates a Watcher for a target to track its resolution changes. -// -// Deprecated: please use package resolver. -type Resolver interface { - // Resolve creates a Watcher for target. - Resolve(target string) (Watcher, error) -} - -// Watcher watches for the updates on the specified target. -// -// Deprecated: please use package resolver. -type Watcher interface { - // Next blocks until an update or error happens. It may return one or more - // updates. The first call should get the full set of the results. It should - // return an error if and only if Watcher cannot recover. - Next() ([]*Update, error) - // Close closes the Watcher. - Close() -} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 00447894f07..a58174b6f43 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -20,80 +20,31 @@ package grpc import ( "context" - "fmt" "io" "sync" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" ) -// v2PickerWrapper wraps a balancer.Picker while providing the -// balancer.V2Picker API. It requires a pickerWrapper to generate errors -// including the latest connectionError. To be deleted when balancer.Picker is -// updated to the balancer.V2Picker API. -type v2PickerWrapper struct { - picker balancer.Picker - connErr *connErr -} - -func (v *v2PickerWrapper) Pick(info balancer.PickInfo) (balancer.PickResult, error) { - sc, done, err := v.picker.Pick(info.Ctx, info) - if err != nil { - if err == balancer.ErrTransientFailure { - return balancer.PickResult{}, balancer.TransientFailureError(fmt.Errorf("%v, latest connection error: %v", err, v.connErr.connectionError())) - } - return balancer.PickResult{}, err - } - return balancer.PickResult{SubConn: sc, Done: done}, nil -} - // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { mu sync.Mutex done bool blockingCh chan struct{} - picker balancer.V2Picker - - // The latest connection error. TODO: remove when V1 picker is deprecated; - // balancer should be responsible for providing the error. - *connErr -} - -type connErr struct { - mu sync.Mutex - err error -} - -func (c *connErr) updateConnectionError(err error) { - c.mu.Lock() - c.err = err - c.mu.Unlock() -} - -func (c *connErr) connectionError() error { - c.mu.Lock() - err := c.err - c.mu.Unlock() - return err + picker balancer.Picker } func newPickerWrapper() *pickerWrapper { - return &pickerWrapper{blockingCh: make(chan struct{}), connErr: &connErr{}} + return &pickerWrapper{blockingCh: make(chan struct{})} } // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { - pw.updatePickerV2(&v2PickerWrapper{picker: p, connErr: pw.connErr}) -} - -// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. -func (pw *pickerWrapper) updatePickerV2(p balancer.V2Picker) { pw.mu.Lock() if pw.done { pw.mu.Unlock() @@ -154,8 +105,6 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. var errStr string if lastPickErr != nil { errStr = "latest balancer error: " + lastPickErr.Error() - } else if connectionErr := pw.connectionError(); connectionErr != nil { - errStr = "latest connection error: " + connectionErr.Error() } else { errStr = ctx.Err().Error() } @@ -180,23 +129,22 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if err == balancer.ErrNoSubConnAvailable { continue } - if tfe, ok := err.(interface{ IsTransientFailure() bool }); ok && tfe.IsTransientFailure() { - if !failfast { - lastPickErr = err - continue - } - return nil, nil, status.Error(codes.Unavailable, err.Error()) - } if _, ok := status.FromError(err); ok { + // Status error: end the RPC unconditionally with this status. return nil, nil, err } - // err is some other error. - return nil, nil, status.Error(codes.Unknown, err.Error()) + // For all other errors, wait for ready RPCs should block and other + // RPCs should fail with unavailable. + if !failfast { + lastPickErr = err + continue + } + return nil, nil, status.Error(codes.Unavailable, err.Error()) } acw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { - grpclog.Error("subconn returned from pick is not *acBalancerWrapper") + logger.Error("subconn returned from pick is not *acBalancerWrapper") continue } if t, ok := acw.getAddrConn().getReadyTransport(); ok { @@ -210,7 +158,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. // DoneInfo with default value works. pickResult.Done(balancer.DoneInfo{}) } - grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick") // If ok == false, ac.state is not READY. // A valid picker always returns READY subConn. This means the state of ac // just changed, and picker will be updated shortly. diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index c43dac9ad84..56e33f6c76b 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -20,13 +20,10 @@ package grpc import ( "errors" + "fmt" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/status" ) // PickFirstBalancerName is the name of the pick_first balancer. @@ -52,30 +49,16 @@ type pickfirstBalancer struct { sc balancer.SubConn } -var _ balancer.V2Balancer = &pickfirstBalancer{} // Assert we implement v2 - -func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { - if err != nil { - b.ResolverError(err) - return - } - b.UpdateClientConnState(balancer.ClientConnState{ResolverState: resolver.State{Addresses: addrs}}) // Ignore error -} - -func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - b.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s}) -} - func (b *pickfirstBalancer) ResolverError(err error) { switch b.state { case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: // Set a failing picker if we don't have a good picker. b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: status.Errorf(codes.Unavailable, "name resolver error: %v", err)}}, - ) + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) } - if grpclog.V(2) { - grpclog.Infof("pickfirstBalancer: ResolverError called with error %v", err) + if logger.V(2) { + logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) } } @@ -88,13 +71,13 @@ func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) e var err error b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) if err != nil { - if grpclog.V(2) { - grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + if logger.V(2) { + logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) } b.state = connectivity.TransientFailure b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: status.Errorf(codes.Unavailable, "error creating connection: %v", err)}}, - ) + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) return balancer.ErrBadResolverState } b.state = connectivity.Idle @@ -108,12 +91,12 @@ func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) e } func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { - if grpclog.V(2) { - grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) + if logger.V(2) { + logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s) } if b.sc != sc { - if grpclog.V(2) { - grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + if logger.V(2) { + logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized") } return } @@ -129,15 +112,9 @@ func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.S case connectivity.Connecting: b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) case connectivity.TransientFailure: - err := balancer.ErrTransientFailure - // TODO: this can be unconditional after the V1 API is removed, as - // SubConnState will always contain a connection error. - if s.ConnectionError != nil { - err = balancer.TransientFailureError(s.ConnectionError) - } b.cc.UpdateState(balancer.State{ ConnectivityState: s.ConnectivityState, - Picker: &picker{err: err}, + Picker: &picker{err: s.ConnectionError}, }) } } diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go index 89b89622447..382612d520d 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -1,15 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: grpc_reflection_v1alpha/reflection.proto +// source: reflection/grpc_reflection_v1alpha/reflection.proto package grpc_reflection_v1alpha import ( - context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" math "math" ) @@ -47,7 +43,7 @@ func (m *ServerReflectionRequest) Reset() { *m = ServerReflectionRequest func (m *ServerReflectionRequest) String() string { return proto.CompactTextString(m) } func (*ServerReflectionRequest) ProtoMessage() {} func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_42a8ac412db3cb03, []int{0} + return fileDescriptor_e8cf9f2921ad6c95, []int{0} } func (m *ServerReflectionRequest) XXX_Unmarshal(b []byte) error { @@ -105,7 +101,8 @@ func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_M func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} -func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() {} +func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() { +} func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} @@ -177,7 +174,7 @@ func (m *ExtensionRequest) Reset() { *m = ExtensionRequest{} } func (m *ExtensionRequest) String() string { return proto.CompactTextString(m) } func (*ExtensionRequest) ProtoMessage() {} func (*ExtensionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_42a8ac412db3cb03, []int{1} + return fileDescriptor_e8cf9f2921ad6c95, []int{1} } func (m *ExtensionRequest) XXX_Unmarshal(b []byte) error { @@ -234,7 +231,7 @@ func (m *ServerReflectionResponse) Reset() { *m = ServerReflectionRespon func (m *ServerReflectionResponse) String() string { return proto.CompactTextString(m) } func (*ServerReflectionResponse) ProtoMessage() {} func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_42a8ac412db3cb03, []int{2} + return fileDescriptor_e8cf9f2921ad6c95, []int{2} } func (m *ServerReflectionResponse) XXX_Unmarshal(b []byte) error { @@ -289,7 +286,8 @@ type ServerReflectionResponse_ErrorResponse struct { ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` } -func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() {} +func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() { +} func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { } @@ -360,7 +358,7 @@ func (m *FileDescriptorResponse) Reset() { *m = FileDescriptorResponse{} func (m *FileDescriptorResponse) String() string { return proto.CompactTextString(m) } func (*FileDescriptorResponse) ProtoMessage() {} func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_42a8ac412db3cb03, []int{3} + return fileDescriptor_e8cf9f2921ad6c95, []int{3} } func (m *FileDescriptorResponse) XXX_Unmarshal(b []byte) error { @@ -404,7 +402,7 @@ func (m *ExtensionNumberResponse) Reset() { *m = ExtensionNumberResponse func (m *ExtensionNumberResponse) String() string { return proto.CompactTextString(m) } func (*ExtensionNumberResponse) ProtoMessage() {} func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_42a8ac412db3cb03, []int{4} + return fileDescriptor_e8cf9f2921ad6c95, []int{4} } func (m *ExtensionNumberResponse) XXX_Unmarshal(b []byte) error { @@ -453,7 +451,7 @@ func (m *ListServiceResponse) Reset() { *m = ListServiceResponse{} } func (m *ListServiceResponse) String() string { return proto.CompactTextString(m) } func (*ListServiceResponse) ProtoMessage() {} func (*ListServiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_42a8ac412db3cb03, []int{5} + return fileDescriptor_e8cf9f2921ad6c95, []int{5} } func (m *ListServiceResponse) XXX_Unmarshal(b []byte) error { @@ -496,7 +494,7 @@ func (m *ServiceResponse) Reset() { *m = ServiceResponse{} } func (m *ServiceResponse) String() string { return proto.CompactTextString(m) } func (*ServiceResponse) ProtoMessage() {} func (*ServiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_42a8ac412db3cb03, []int{6} + return fileDescriptor_e8cf9f2921ad6c95, []int{6} } func (m *ServiceResponse) XXX_Unmarshal(b []byte) error { @@ -538,7 +536,7 @@ func (m *ErrorResponse) Reset() { *m = ErrorResponse{} } func (m *ErrorResponse) String() string { return proto.CompactTextString(m) } func (*ErrorResponse) ProtoMessage() {} func (*ErrorResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_42a8ac412db3cb03, []int{7} + return fileDescriptor_e8cf9f2921ad6c95, []int{7} } func (m *ErrorResponse) XXX_Unmarshal(b []byte) error { @@ -585,166 +583,52 @@ func init() { } func init() { - proto.RegisterFile("grpc_reflection_v1alpha/reflection.proto", fileDescriptor_42a8ac412db3cb03) -} - -var fileDescriptor_42a8ac412db3cb03 = []byte{ - // 656 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x51, 0x73, 0xd2, 0x40, - 0x10, 0x6e, 0x5a, 0x68, 0x87, 0x85, 0x02, 0x5e, 0x2b, 0xa4, 0x3a, 0x75, 0x98, 0x68, 0x35, 0x75, - 0x1c, 0xda, 0xe2, 0x8c, 0x3f, 0x80, 0xaa, 0x83, 0x33, 0xb5, 0x75, 0x0e, 0x5f, 0x1c, 0x1f, 0x6e, - 0x02, 0x2c, 0x34, 0x1a, 0x72, 0xf1, 0x2e, 0x45, 0x79, 0xf2, 0x47, 0xf8, 0xa3, 0xfc, 0x4b, 0x3e, - 0x3a, 0x77, 0x09, 0x21, 0xa4, 0x44, 0xa7, 0x4f, 0x30, 0xdf, 0xee, 0xde, 0xb7, 0xbb, 0xdf, 0xb7, - 0x01, 0x7b, 0x22, 0x82, 0x21, 0x13, 0x38, 0xf6, 0x70, 0x18, 0xba, 0xdc, 0x67, 0xb3, 0x33, 0xc7, - 0x0b, 0xae, 0x9d, 0x93, 0x25, 0xd4, 0x0e, 0x04, 0x0f, 0x39, 0x69, 0xaa, 0xcc, 0x76, 0x0a, 0x8e, - 0x33, 0xad, 0x3f, 0x9b, 0xd0, 0xec, 0xa3, 0x98, 0xa1, 0xa0, 0x49, 0x90, 0xe2, 0xb7, 0x1b, 0x94, - 0x21, 0x21, 0x50, 0xb8, 0xe6, 0x32, 0x34, 0x8d, 0x96, 0x61, 0x97, 0xa8, 0xfe, 0x4f, 0x9e, 0x43, - 0x7d, 0xec, 0x7a, 0xc8, 0x06, 0x73, 0xa6, 0x7e, 0x7d, 0x67, 0x8a, 0xe6, 0x96, 0x8a, 0xf7, 0x36, - 0x68, 0x55, 0x21, 0xdd, 0xf9, 0xdb, 0x18, 0x27, 0xaf, 0xa0, 0xa1, 0x73, 0x87, 0xdc, 0x0f, 0x1d, - 0xd7, 0x77, 0xfd, 0x09, 0x93, 0xf3, 0xe9, 0x80, 0x7b, 0x66, 0x21, 0xae, 0xd8, 0x57, 0xf1, 0xf3, - 0x24, 0xdc, 0xd7, 0x51, 0x32, 0x81, 0x83, 0x6c, 0x1d, 0xfe, 0x08, 0xd1, 0x97, 0x2e, 0xf7, 0xcd, - 0x62, 0xcb, 0xb0, 0xcb, 0x9d, 0xe3, 0x76, 0xce, 0x40, 0xed, 0x37, 0x8b, 0xcc, 0x78, 0x8a, 0xde, - 0x06, 0x6d, 0xae, 0xb2, 0x24, 0x19, 0xa4, 0x0b, 0x87, 0x8e, 0xe7, 0x2d, 0x1f, 0x67, 0xfe, 0xcd, - 0x74, 0x80, 0x42, 0x32, 0x3e, 0x66, 0xe1, 0x3c, 0x40, 0x73, 0x3b, 0xee, 0xf3, 0xc0, 0xf1, 0xbc, - 0xa4, 0xec, 0x32, 0x4a, 0xba, 0x1a, 0x7f, 0x9c, 0x07, 0x48, 0x8e, 0x60, 0xd7, 0x73, 0x65, 0xc8, - 0x24, 0x8a, 0x99, 0x3b, 0x44, 0x69, 0xee, 0xc4, 0x35, 0x15, 0x05, 0xf7, 0x63, 0xb4, 0x7b, 0x0f, - 0x6a, 0x53, 0x94, 0xd2, 0x99, 0x20, 0x13, 0x51, 0x63, 0xd6, 0x18, 0xea, 0xd9, 0x66, 0xc9, 0x33, - 0xa8, 0xa5, 0xa6, 0xd6, 0x3d, 0x44, 0xdb, 0xaf, 0x2e, 0x61, 0x4d, 0x7b, 0x0c, 0xf5, 0x6c, 0xdb, - 0xe6, 0x66, 0xcb, 0xb0, 0x8b, 0xb4, 0x86, 0xab, 0x8d, 0x5a, 0xbf, 0x0b, 0x60, 0xde, 0x96, 0x58, - 0x06, 0xdc, 0x97, 0x48, 0x0e, 0x01, 0x66, 0x8e, 0xe7, 0x8e, 0x58, 0x4a, 0xe9, 0x92, 0x46, 0x7a, - 0x4a, 0xee, 0xcf, 0x50, 0xe7, 0xc2, 0x9d, 0xb8, 0xbe, 0xe3, 0x2d, 0xfa, 0xd6, 0x34, 0xe5, 0xce, - 0x69, 0xae, 0x02, 0x39, 0x76, 0xa2, 0xb5, 0xc5, 0x4b, 0x8b, 0x61, 0xbf, 0x82, 0xa9, 0x75, 0x1e, - 0xa1, 0x1c, 0x0a, 0x37, 0x08, 0xb9, 0x60, 0x22, 0xee, 0x4b, 0x3b, 0xa4, 0xdc, 0x39, 0xc9, 0x25, - 0x51, 0x26, 0x7b, 0x9d, 0xd4, 0x2d, 0xc6, 0xe9, 0x6d, 0x50, 0x6d, 0xb9, 0xdb, 0x11, 0xf2, 0x1d, - 0x1e, 0xad, 0xd7, 0x3a, 0xa1, 0x2c, 0xfe, 0x67, 0xae, 0x8c, 0x01, 0x52, 0x9c, 0x0f, 0xd7, 0xd8, - 0x23, 0x21, 0x1e, 0x41, 0x63, 0xc5, 0x20, 0x4b, 0xc2, 0x6d, 0x4d, 0xf8, 0x22, 0x97, 0xf0, 0x62, - 0x69, 0xa0, 0x14, 0xd9, 0x7e, 0xda, 0x57, 0x09, 0xcb, 0x15, 0x54, 0x51, 0x88, 0xf4, 0x06, 0x77, - 0xf4, 0xeb, 0x4f, 0xf3, 0xc7, 0x51, 0xe9, 0xa9, 0x77, 0x77, 0x31, 0x0d, 0x74, 0x09, 0xd4, 0x97, - 0x86, 0x8d, 0x30, 0xeb, 0x02, 0x1a, 0xeb, 0xf7, 0x4e, 0x3a, 0x70, 0x3f, 0x2b, 0xa5, 0xfe, 0xf0, - 0x98, 0x46, 0x6b, 0xcb, 0xae, 0xd0, 0xbd, 0x55, 0x51, 0x3e, 0xa8, 0x90, 0xf5, 0x05, 0x9a, 0x39, - 0x2b, 0x25, 0x4f, 0xa0, 0x3a, 0x70, 0x24, 0xea, 0x03, 0x60, 0xfa, 0x1b, 0x13, 0x39, 0xb3, 0xa2, - 0x50, 0xe5, 0xff, 0x4b, 0xf5, 0x7d, 0x59, 0x7f, 0x03, 0x5b, 0xeb, 0x6e, 0xe0, 0x13, 0xec, 0xad, - 0xd9, 0x26, 0xe9, 0xc2, 0x4e, 0x2c, 0x8b, 0x6e, 0xb4, 0xdc, 0xb1, 0xff, 0xe9, 0xea, 0x54, 0x29, - 0x5d, 0x14, 0x5a, 0x47, 0x50, 0xcb, 0x3e, 0x4b, 0xa0, 0x90, 0x6a, 0x5a, 0xff, 0xb7, 0xfa, 0xb0, - 0xbb, 0xb2, 0x71, 0x75, 0x79, 0x91, 0x62, 0x43, 0x3e, 0x8a, 0x52, 0x8b, 0xb4, 0xa4, 0x91, 0x73, - 0x3e, 0x42, 0xf2, 0x18, 0x22, 0x41, 0x58, 0xac, 0x82, 0x3e, 0xbb, 0x12, 0xad, 0x68, 0xf0, 0x7d, - 0x84, 0x75, 0x7e, 0x19, 0x50, 0xcf, 0x9e, 0x1b, 0xf9, 0x09, 0xfb, 0x59, 0xec, 0x9d, 0x3f, 0xe6, - 0xe4, 0xce, 0x17, 0xfb, 0xe0, 0xec, 0x0e, 0x15, 0xd1, 0x54, 0xb6, 0x71, 0x6a, 0x0c, 0xb6, 0xb5, - 0xf4, 0x2f, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x85, 0x02, 0x09, 0x9d, 0x9f, 0x06, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// ServerReflectionClient is the client API for ServerReflection service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ServerReflectionClient interface { - // The reflection service is structured as a bidirectional stream, ensuring - // all related requests go to a single server. - ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) -} - -type serverReflectionClient struct { - cc grpc.ClientConnInterface -} - -func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { - return &serverReflectionClient{cc} -} - -func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { - stream, err := c.cc.NewStream(ctx, &_ServerReflection_serviceDesc.Streams[0], "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo", opts...) - if err != nil { - return nil, err - } - x := &serverReflectionServerReflectionInfoClient{stream} - return x, nil -} - -type ServerReflection_ServerReflectionInfoClient interface { - Send(*ServerReflectionRequest) error - Recv() (*ServerReflectionResponse, error) - grpc.ClientStream -} - -type serverReflectionServerReflectionInfoClient struct { - grpc.ClientStream -} - -func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { - m := new(ServerReflectionResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// ServerReflectionServer is the server API for ServerReflection service. -type ServerReflectionServer interface { - // The reflection service is structured as a bidirectional stream, ensuring - // all related requests go to a single server. - ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error -} - -// UnimplementedServerReflectionServer can be embedded to have forward compatible implementations. -type UnimplementedServerReflectionServer struct { -} - -func (*UnimplementedServerReflectionServer) ServerReflectionInfo(srv ServerReflection_ServerReflectionInfoServer) error { - return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") -} - -func RegisterServerReflectionServer(s *grpc.Server, srv ServerReflectionServer) { - s.RegisterService(&_ServerReflection_serviceDesc, srv) -} - -func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream}) -} - -type ServerReflection_ServerReflectionInfoServer interface { - Send(*ServerReflectionResponse) error - Recv() (*ServerReflectionRequest, error) - grpc.ServerStream -} - -type serverReflectionServerReflectionInfoServer struct { - grpc.ServerStream -} - -func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { - m := new(ServerReflectionRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _ServerReflection_serviceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.reflection.v1alpha.ServerReflection", - HandlerType: (*ServerReflectionServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "ServerReflectionInfo", - Handler: _ServerReflection_ServerReflectionInfo_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "grpc_reflection_v1alpha/reflection.proto", + proto.RegisterFile("reflection/grpc_reflection_v1alpha/reflection.proto", fileDescriptor_e8cf9f2921ad6c95) +} + +var fileDescriptor_e8cf9f2921ad6c95 = []byte{ + // 686 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0x41, 0x6f, 0xd3, 0x4c, + 0x10, 0xad, 0xdb, 0xa4, 0x55, 0x26, 0x69, 0x92, 0x6f, 0xdb, 0xaf, 0x71, 0x41, 0x45, 0x91, 0xa1, + 0x90, 0x22, 0x94, 0xb4, 0xa9, 0x84, 0x84, 0xb8, 0xa5, 0x80, 0x82, 0x54, 0x5a, 0xe4, 0x70, 0x01, + 0x0e, 0x2b, 0x27, 0x99, 0xb8, 0x06, 0xc7, 0x6b, 0x76, 0xdd, 0x40, 0x4e, 0xfc, 0x08, 0x7e, 0x14, + 0x7f, 0x89, 0x23, 0xda, 0xb5, 0x63, 0x3b, 0x6e, 0x4c, 0xd5, 0x53, 0x9c, 0x37, 0x33, 0xfb, 0x66, + 0xf6, 0xbd, 0xb1, 0xe1, 0x94, 0xe3, 0xc4, 0xc5, 0x51, 0xe0, 0x30, 0xaf, 0x63, 0x73, 0x7f, 0x44, + 0x93, 0xff, 0x74, 0x76, 0x62, 0xb9, 0xfe, 0x95, 0xd5, 0x49, 0xa0, 0xb6, 0xcf, 0x59, 0xc0, 0x48, + 0x43, 0x66, 0xb6, 0x53, 0x70, 0x94, 0x69, 0xfc, 0x59, 0x87, 0xc6, 0x00, 0xf9, 0x0c, 0xb9, 0x19, + 0x07, 0x4d, 0xfc, 0x76, 0x8d, 0x22, 0x20, 0x04, 0x0a, 0x57, 0x4c, 0x04, 0xba, 0xd6, 0xd4, 0x5a, + 0x25, 0x53, 0x3d, 0x93, 0xa7, 0x50, 0x9f, 0x38, 0x2e, 0xd2, 0xe1, 0x9c, 0xca, 0x5f, 0xcf, 0x9a, + 0xa2, 0xbe, 0x21, 0xe3, 0xfd, 0x35, 0xb3, 0x2a, 0x91, 0xde, 0xfc, 0x4d, 0x84, 0x93, 0xe7, 0xb0, + 0xa7, 0x72, 0x47, 0xcc, 0x0b, 0x2c, 0xc7, 0x73, 0x3c, 0x9b, 0x8a, 0xf9, 0x74, 0xc8, 0x5c, 0xbd, + 0x10, 0x55, 0xec, 0xca, 0xf8, 0x59, 0x1c, 0x1e, 0xa8, 0x28, 0xb1, 0x61, 0x3f, 0x5b, 0x87, 0x3f, + 0x02, 0xf4, 0x84, 0xc3, 0x3c, 0xbd, 0xd8, 0xd4, 0x5a, 0xe5, 0xee, 0x51, 0x3b, 0x67, 0xa0, 0xf6, + 0xeb, 0x45, 0x66, 0x34, 0x45, 0x7f, 0xcd, 0x6c, 0x2c, 0xb3, 0xc4, 0x19, 0xa4, 0x07, 0x07, 0x96, + 0xeb, 0x26, 0x87, 0x53, 0xef, 0x7a, 0x3a, 0x44, 0x2e, 0x28, 0x9b, 0xd0, 0x60, 0xee, 0xa3, 0xbe, + 0x19, 0xf5, 0xb9, 0x6f, 0xb9, 0x6e, 0x5c, 0x76, 0x11, 0x26, 0x5d, 0x4e, 0x3e, 0xcc, 0x7d, 0x24, + 0x87, 0xb0, 0xed, 0x3a, 0x22, 0xa0, 0x02, 0xf9, 0xcc, 0x19, 0xa1, 0xd0, 0xb7, 0xa2, 0x9a, 0x8a, + 0x84, 0x07, 0x11, 0xda, 0xfb, 0x0f, 0x6a, 0x53, 0x14, 0xc2, 0xb2, 0x91, 0xf2, 0xb0, 0x31, 0x63, + 0x02, 0xf5, 0x6c, 0xb3, 0xe4, 0x09, 0xd4, 0x52, 0x53, 0xab, 0x1e, 0xc2, 0xdb, 0xaf, 0x26, 0xb0, + 0xa2, 0x3d, 0x82, 0x7a, 0xb6, 0x6d, 0x7d, 0xbd, 0xa9, 0xb5, 0x8a, 0x66, 0x0d, 0x97, 0x1b, 0x35, + 0x7e, 0x17, 0x40, 0xbf, 0x29, 0xb1, 0xf0, 0x99, 0x27, 0x90, 0x1c, 0x00, 0xcc, 0x2c, 0xd7, 0x19, + 0xd3, 0x94, 0xd2, 0x25, 0x85, 0xf4, 0xa5, 0xdc, 0x9f, 0xa1, 0xce, 0xb8, 0x63, 0x3b, 0x9e, 0xe5, + 0x2e, 0xfa, 0x56, 0x34, 0xe5, 0xee, 0x71, 0xae, 0x02, 0x39, 0x76, 0x32, 0x6b, 0x8b, 0x93, 0x16, + 0xc3, 0x7e, 0x05, 0x5d, 0xe9, 0x3c, 0x46, 0x31, 0xe2, 0x8e, 0x1f, 0x30, 0x4e, 0x79, 0xd4, 0x97, + 0x72, 0x48, 0xb9, 0xdb, 0xc9, 0x25, 0x91, 0x26, 0x7b, 0x15, 0xd7, 0x2d, 0xc6, 0xe9, 0xaf, 0x99, + 0xca, 0x72, 0x37, 0x23, 0xe4, 0x3b, 0x3c, 0x58, 0xad, 0x75, 0x4c, 0x59, 0xbc, 0x65, 0xae, 0x8c, + 0x01, 0x52, 0x9c, 0xf7, 0x57, 0xd8, 0x23, 0x26, 0x1e, 0xc3, 0xde, 0x92, 0x41, 0x12, 0xc2, 0x4d, + 0x45, 0xf8, 0x2c, 0x97, 0xf0, 0x3c, 0x31, 0x50, 0x8a, 0x6c, 0x37, 0xed, 0xab, 0x98, 0xe5, 0x12, + 0xaa, 0xc8, 0x79, 0xfa, 0x06, 0xb7, 0xd4, 0xe9, 0x8f, 0xf3, 0xc7, 0x91, 0xe9, 0xa9, 0x73, 0xb7, + 0x31, 0x0d, 0xf4, 0x08, 0xd4, 0x13, 0xc3, 0x86, 0x98, 0x71, 0x0e, 0x7b, 0xab, 0xef, 0x9d, 0x74, + 0xe1, 0xff, 0xac, 0x94, 0xea, 0xc5, 0xa3, 0x6b, 0xcd, 0x8d, 0x56, 0xc5, 0xdc, 0x59, 0x16, 0xe5, + 0xbd, 0x0c, 0x19, 0x5f, 0xa0, 0x91, 0x73, 0xa5, 0xe4, 0x11, 0x54, 0x87, 0x96, 0x40, 0xb5, 0x00, + 0x54, 0xbd, 0x63, 0x42, 0x67, 0x56, 0x24, 0x2a, 0xfd, 0x7f, 0x21, 0xdf, 0x2f, 0xab, 0x77, 0x60, + 0x63, 0xd5, 0x0e, 0x7c, 0x84, 0x9d, 0x15, 0xb7, 0x49, 0x7a, 0xb0, 0x15, 0xc9, 0xa2, 0x1a, 0x2d, + 0x77, 0x5b, 0xff, 0x74, 0x75, 0xaa, 0xd4, 0x5c, 0x14, 0x1a, 0x87, 0x50, 0xcb, 0x1e, 0x4b, 0xa0, + 0x90, 0x6a, 0x5a, 0x3d, 0x1b, 0x03, 0xd8, 0x5e, 0xba, 0x71, 0xb9, 0x79, 0xa1, 0x62, 0x23, 0x36, + 0x0e, 0x53, 0x8b, 0x66, 0x49, 0x21, 0x67, 0x6c, 0x8c, 0xe4, 0x21, 0x84, 0x82, 0xd0, 0x48, 0x05, + 0xb5, 0x76, 0x25, 0xb3, 0xa2, 0xc0, 0x77, 0x21, 0xd6, 0xfd, 0xa5, 0x41, 0x3d, 0xbb, 0x6e, 0xe4, + 0x27, 0xec, 0x66, 0xb1, 0xb7, 0xde, 0x84, 0x91, 0x3b, 0x6f, 0xec, 0xbd, 0x93, 0x3b, 0x54, 0x84, + 0x53, 0xb5, 0xb4, 0x63, 0xad, 0xf7, 0xf2, 0xd3, 0x0b, 0x9b, 0x31, 0xdb, 0xc5, 0xb6, 0xcd, 0x5c, + 0xcb, 0xb3, 0xdb, 0x8c, 0xdb, 0xea, 0x53, 0xd5, 0xb9, 0xfd, 0xd3, 0x35, 0xdc, 0x54, 0xbe, 0x39, + 0xfd, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x6c, 0x74, 0x3a, 0x67, 0xe7, 0x06, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto index 99b00df0a29..ee2b82c0a5b 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto @@ -16,6 +16,8 @@ syntax = "proto3"; +option go_package = "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"; + package grpc.reflection.v1alpha; service ServerReflection { diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go new file mode 100644 index 00000000000..2294b2c6c9e --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -0,0 +1,124 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package grpc_reflection_v1alpha + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// ServerReflectionClient is the client API for ServerReflection service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ServerReflectionClient interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) +} + +type serverReflectionClient struct { + cc grpc.ClientConnInterface +} + +func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { + return &serverReflectionClient{cc} +} + +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { + stream, err := c.cc.NewStream(ctx, &_ServerReflection_serviceDesc.Streams[0], "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo", opts...) + if err != nil { + return nil, err + } + x := &serverReflectionServerReflectionInfoClient{stream} + return x, nil +} + +type ServerReflection_ServerReflectionInfoClient interface { + Send(*ServerReflectionRequest) error + Recv() (*ServerReflectionResponse, error) + grpc.ClientStream +} + +type serverReflectionServerReflectionInfoClient struct { + grpc.ClientStream +} + +func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { + m := new(ServerReflectionResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflectionServer is the server API for ServerReflection service. +// All implementations should embed UnimplementedServerReflectionServer +// for forward compatibility +type ServerReflectionServer interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error +} + +// UnimplementedServerReflectionServer should be embedded to have forward compatible implementations. +type UnimplementedServerReflectionServer struct { +} + +func (*UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { + return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") +} + +func RegisterServerReflectionServer(s *grpc.Server, srv ServerReflectionServer) { + s.RegisterService(&_ServerReflection_serviceDesc, srv) +} + +func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream}) +} + +type ServerReflection_ServerReflectionInfoServer interface { + Send(*ServerReflectionResponse) error + Recv() (*ServerReflectionRequest, error) + grpc.ServerStream +} + +type serverReflectionServerReflectionInfoServer struct { + grpc.ServerStream +} + +func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { + m := new(ServerReflectionRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _ServerReflection_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.reflection.v1alpha.ServerReflection", + HandlerType: (*ServerReflectionServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ServerReflectionInfo", + Handler: _ServerReflection_ServerReflectionInfo_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "reflection/grpc_reflection_v1alpha/reflection.proto", +} diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go index dd22a2da784..7b6dd414a27 100644 --- a/vendor/google.golang.org/grpc/reflection/serverreflection.go +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -16,8 +16,6 @@ * */ -//go:generate protoc --go_out=plugins=grpc:. grpc_reflection_v1alpha/reflection.proto - /* Package reflection implements server reflection service. @@ -57,6 +55,7 @@ import ( ) type serverReflectionServer struct { + rpb.UnimplementedServerReflectionServer s *grpc.Server initSymbols sync.Once diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh new file mode 100644 index 00000000000..987bc20251d --- /dev/null +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -0,0 +1,79 @@ +#!/bin/bash +# Copyright 2020 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu -o pipefail + +WORKDIR=$(mktemp -d) + +function finish { + rm -rf "$WORKDIR" +} +trap finish EXIT + +export GOBIN=${WORKDIR}/bin +export PATH=${GOBIN}:${PATH} +mkdir -p ${GOBIN} + +echo "go install github.com/golang/protobuf/protoc-gen-go" +(cd test/tools && go install github.com/golang/protobuf/protoc-gen-go) + +echo "go install cmd/protoc-gen-go-grpc" +(cd cmd/protoc-gen-go-grpc && go install .) + +echo "git clone https://github.com/grpc/grpc-proto" +git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto + +mkdir -p ${WORKDIR}/googleapis/google/rpc +echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" +curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto + +mkdir -p ${WORKDIR}/out + +SOURCES=( + ${WORKDIR}/googleapis/google/rpc/code.proto + ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto + ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto + ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto + ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto + ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto + ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto + ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto + ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto + ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto + $(git ls-files --exclude-standard --cached --others "*.proto") +) +OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config +for src in ${SOURCES[@]}; do + echo "protoc ${src}" + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},requireUnimplementedServers=false:${WORKDIR}/out \ + -I"." \ + -I${WORKDIR}/grpc-proto \ + -I${WORKDIR}/googleapis \ + ${src} +done + +# The go_package option in grpc/lookup/v1/rls.proto doesn't match the +# current location. Move it into the right place. +mkdir -p ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 +mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 + +# grpc_testingv3/testv3.pb.go is not re-generated because it was +# intentionally generated by an older version of protoc-gen-go. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go + +# grpc/service_config/service_config.proto does not have a go_package option. +cp ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config + +cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index fe14b2fb982..379275a2d9b 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -85,7 +85,10 @@ const ( Backend AddressType = iota // GRPCLB indicates the address is for a grpclb load balancer. // - // Deprecated: use Attributes in Address instead. + // Deprecated: to select the GRPCLB load balancing policy, use a service + // config with a corresponding loadBalancingConfig. To supply balancer + // addresses to the GRPCLB load balancing policy, set State.Attributes + // using balancer/grpclb/state.Set. GRPCLB ) diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index edfda866c00..265002a75e0 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -140,7 +140,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) { if ccr.done.HasFired() { return } - channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) if channelz.IsOn() { ccr.addChannelzTraceEvent(s) } @@ -152,7 +152,7 @@ func (ccr *ccResolverWrapper) ReportError(err error) { if ccr.done.HasFired() { return } - channelz.Warningf(ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err)) } @@ -161,7 +161,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { if ccr.done.HasFired() { return } - channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) if channelz.IsOn() { ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) } @@ -175,14 +175,14 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { if ccr.done.HasFired() { return } - channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) if ccr.cc.dopts.disableServiceConfig { - channelz.Info(ccr.cc.channelzID, "Service config lookups disabled; ignoring config") + channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") return } scpr := parseServiceConfig(sc) if scpr.Err != nil { - channelz.Warningf(ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) ccr.poll(balancer.ErrBadResolverState) return } @@ -215,7 +215,7 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.AddTraceEvent(ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), Severity: channelz.CtINFO, }) diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index cf9dbe7fd97..8644b8a7d0d 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -155,7 +155,6 @@ func (d *gzipDecompressor) Type() string { type callInfo struct { compressorType string failFast bool - stream ClientStream maxReceiveMessageSize *int maxSendMessageSize *int creds credentials.PerRPCCredentials @@ -180,7 +179,7 @@ type CallOption interface { // after is called after the call has completed. after cannot return an // error, so any failures should be reported via output parameters. - after(*callInfo) + after(*callInfo, *csAttempt) } // EmptyCallOption does not alter the Call configuration. @@ -188,8 +187,8 @@ type CallOption interface { // by interceptors. type EmptyCallOption struct{} -func (EmptyCallOption) before(*callInfo) error { return nil } -func (EmptyCallOption) after(*callInfo) {} +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo, *csAttempt) {} // Header returns a CallOptions that retrieves the header metadata // for a unary RPC. @@ -205,10 +204,8 @@ type HeaderCallOption struct { } func (o HeaderCallOption) before(c *callInfo) error { return nil } -func (o HeaderCallOption) after(c *callInfo) { - if c.stream != nil { - *o.HeaderAddr, _ = c.stream.Header() - } +func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) { + *o.HeaderAddr, _ = attempt.s.Header() } // Trailer returns a CallOptions that retrieves the trailer metadata @@ -225,10 +222,8 @@ type TrailerCallOption struct { } func (o TrailerCallOption) before(c *callInfo) error { return nil } -func (o TrailerCallOption) after(c *callInfo) { - if c.stream != nil { - *o.TrailerAddr = c.stream.Trailer() - } +func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) { + *o.TrailerAddr = attempt.s.Trailer() } // Peer returns a CallOption that retrieves peer information for a unary RPC. @@ -245,11 +240,9 @@ type PeerCallOption struct { } func (o PeerCallOption) before(c *callInfo) error { return nil } -func (o PeerCallOption) after(c *callInfo) { - if c.stream != nil { - if x, ok := peer.FromContext(c.stream.Context()); ok { - *o.PeerAddr = *x - } +func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { + if x, ok := peer.FromContext(attempt.s.Context()); ok { + *o.PeerAddr = *x } } @@ -285,7 +278,7 @@ func (o FailFastCallOption) before(c *callInfo) error { c.failFast = o.FailFast return nil } -func (o FailFastCallOption) after(c *callInfo) {} +func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size // in bytes the client can receive. @@ -304,7 +297,7 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { c.maxReceiveMessageSize = &o.MaxRecvMsgSize return nil } -func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {} +func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxCallSendMsgSize returns a CallOption which sets the maximum message size // in bytes the client can send. @@ -323,7 +316,7 @@ func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { c.maxSendMessageSize = &o.MaxSendMsgSize return nil } -func (o MaxSendMsgSizeCallOption) after(c *callInfo) {} +func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} // PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials // for a call. @@ -342,7 +335,7 @@ func (o PerRPCCredsCallOption) before(c *callInfo) error { c.creds = o.Creds return nil } -func (o PerRPCCredsCallOption) after(c *callInfo) {} +func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} // UseCompressor returns a CallOption which sets the compressor used when // sending the request. If WithCompressor is also set, UseCompressor has @@ -363,7 +356,7 @@ func (o CompressorCallOption) before(c *callInfo) error { c.compressorType = o.CompressorType return nil } -func (o CompressorCallOption) after(c *callInfo) {} +func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {} // CallContentSubtype returns a CallOption that will set the content-subtype // for a call. For example, if content-subtype is "json", the Content-Type over @@ -396,7 +389,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { c.contentSubtype = o.ContentSubtype return nil } -func (o ContentSubtypeCallOption) after(c *callInfo) {} +func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} // ForceCodec returns a CallOption that will set the given Codec to be // used for all request and response messages for a call. The result of calling @@ -428,7 +421,7 @@ func (o ForceCodecCallOption) before(c *callInfo) error { c.codec = o.Codec return nil } -func (o ForceCodecCallOption) after(c *callInfo) {} +func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} // CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of // an encoding.Codec. @@ -450,7 +443,7 @@ func (o CustomCodecCallOption) before(c *callInfo) error { c.codec = o.Codec return nil } -func (o CustomCodecCallOption) after(c *callInfo) {} +func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. @@ -471,7 +464,7 @@ func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize return nil } -func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {} +func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {} // The format of the payload: compressed or not? type payloadFormat uint8 diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index edfcdcaee9e..1b56cc2d11f 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -42,6 +42,7 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -58,6 +59,7 @@ const ( ) var statusOK = status.New(codes.OK, "") +var logger = grpclog.Component("core") type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) @@ -87,6 +89,12 @@ type service struct { mdata interface{} } +type serverWorkerData struct { + st transport.ServerTransport + wg *sync.WaitGroup + stream *transport.Stream +} + // Server is a gRPC server to serve RPC requests. type Server struct { opts serverOptions @@ -107,6 +115,8 @@ type Server struct { channelzID int64 // channelz unique identification number czData *channelzData + + serverWorkerChannels []chan *serverWorkerData } type serverOptions struct { @@ -133,6 +143,7 @@ type serverOptions struct { connectionTimeout time.Duration maxHeaderListSize *uint32 headerTableSize *uint32 + numServerWorkers uint32 } var defaultServerOptions = serverOptions{ @@ -213,7 +224,7 @@ func InitialConnWindowSize(s int32) ServerOption { // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { if kp.Time > 0 && kp.Time < time.Second { - grpclog.Warning("Adjusting keepalive ping interval to minimum period of 1s") + logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") kp.Time = time.Second } @@ -232,6 +243,12 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { // CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. // // This will override any lookups by content-subtype for Codecs registered with RegisterCodec. +// +// Deprecated: register codecs using encoding.RegisterCodec. The server will +// automatically use registered codecs based on the incoming requests' headers. +// See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. func CustomCodec(codec Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.codec = codec @@ -244,7 +261,8 @@ func CustomCodec(codec Codec) ServerOption { // default, server messages will be sent using the same compressor with which // request messages were sent. // -// Deprecated: use encoding.RegisterCompressor instead. +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. func RPCCompressor(cp Compressor) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.cp = cp @@ -255,7 +273,8 @@ func RPCCompressor(cp Compressor) ServerOption { // messages. It has higher priority than decompressors registered via // encoding.RegisterCompressor. // -// Deprecated: use encoding.RegisterCompressor instead. +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. func RPCDecompressor(dc Decompressor) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.dc = dc @@ -265,7 +284,7 @@ func RPCDecompressor(dc Decompressor) ServerOption { // MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. // If this is not set, gRPC uses the default limit. // -// Deprecated: use MaxRecvMsgSize instead. +// Deprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x. func MaxMsgSize(m int) ServerOption { return MaxRecvMsgSize(m) } @@ -335,7 +354,7 @@ func StreamInterceptor(i StreamServerInterceptor) ServerOption { } // ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor -// for stream RPCs. The first interceptor will be the outer most, +// for streaming RPCs. The first interceptor will be the outer most, // while the last interceptor will be the inner most wrapper around the real call. // All stream interceptors added by this method will be chained. func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption { @@ -410,6 +429,66 @@ func HeaderTableSize(s uint32) ServerOption { }) } +// NumStreamWorkers returns a ServerOption that sets the number of worker +// goroutines that should be used to process incoming streams. Setting this to +// zero (default) will disable workers and spawn a new goroutine for each +// stream. +// +// This API is EXPERIMENTAL. +func NumStreamWorkers(numServerWorkers uint32) ServerOption { + // TODO: If/when this API gets stabilized (i.e. stream workers become the + // only way streams are processed), change the behavior of the zero value to + // a sane default. Preliminary experiments suggest that a value equal to the + // number of CPUs available is most performant; requires thorough testing. + return newFuncServerOption(func(o *serverOptions) { + o.numServerWorkers = numServerWorkers + }) +} + +// serverWorkerResetThreshold defines how often the stack must be reset. Every +// N requests, by spawning a new goroutine in its place, a worker can reset its +// stack so that large stacks don't live in memory forever. 2^16 should allow +// each goroutine stack to live for at least a few seconds in a typical +// workload (assuming a QPS of a few thousand requests/sec). +const serverWorkerResetThreshold = 1 << 16 + +// serverWorkers blocks on a *transport.Stream channel forever and waits for +// data to be fed by serveStreams. This allows different requests to be +// processed by the same goroutine, removing the need for expensive stack +// re-allocations (see the runtime.morestack problem [1]). +// +// [1] https://github.com/golang/go/issues/18138 +func (s *Server) serverWorker(ch chan *serverWorkerData) { + // To make sure all server workers don't reset at the same time, choose a + // random number of iterations before resetting. + threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold) + for completed := 0; completed < threshold; completed++ { + data, ok := <-ch + if !ok { + return + } + s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) + data.wg.Done() + } + go s.serverWorker(ch) +} + +// initServerWorkers creates worker goroutines and channels to process incoming +// connections to reduce the time spent overall on runtime.morestack. +func (s *Server) initServerWorkers() { + s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers) + for i := uint32(0); i < s.opts.numServerWorkers; i++ { + s.serverWorkerChannels[i] = make(chan *serverWorkerData) + go s.serverWorker(s.serverWorkerChannels[i]) + } +} + +func (s *Server) stopServerWorkers() { + for i := uint32(0); i < s.opts.numServerWorkers; i++ { + close(s.serverWorkerChannels[i]) + } +} + // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { @@ -434,6 +513,10 @@ func NewServer(opt ...ServerOption) *Server { s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) } + if s.opts.numServerWorkers > 0 { + s.initServerWorkers() + } + if channelz.IsOn() { s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") } @@ -463,7 +546,7 @@ func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { ht := reflect.TypeOf(sd.HandlerType).Elem() st := reflect.TypeOf(ss) if !st.Implements(ht) { - grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) } s.register(sd, ss) } @@ -473,10 +556,10 @@ func (s *Server) register(sd *ServiceDesc, ss interface{}) { defer s.mu.Unlock() s.printf("RegisterService(%q)", sd.ServiceName) if s.serve { - grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) } if _, ok := s.m[sd.ServiceName]; ok { - grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) } srv := &service{ server: ss, @@ -682,7 +765,7 @@ func (s *Server) handleRawConn(rawConn net.Conn) { s.mu.Lock() s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) s.mu.Unlock() - channelz.Warningf(s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + channelz.Warningf(logger, s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) rawConn.Close() } rawConn.SetDeadline(time.Time{}) @@ -729,7 +812,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() c.Close() - channelz.Warning(s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) return nil } @@ -739,12 +822,27 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr func (s *Server) serveStreams(st transport.ServerTransport) { defer st.Close() var wg sync.WaitGroup + + var roundRobinCounter uint32 st.HandleStreams(func(stream *transport.Stream) { wg.Add(1) - go func() { - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) - }() + if s.opts.numServerWorkers > 0 { + data := &serverWorkerData{st: st, wg: &wg, stream: stream} + select { + case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data: + default: + // If all stream workers are busy, fallback to the default code path. + go func() { + s.handleStream(st, stream, s.traceInfo(st, stream)) + wg.Done() + }() + } + } else { + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() + } }, func(ctx context.Context, method string) context.Context { if !EnableTracing { return ctx @@ -868,12 +966,12 @@ func (s *Server) incrCallsFailed() { func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { - channelz.Error(s.channelzID, "grpc: server failed to encode response: ", err) + channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) return err } compData, err := compress(data, cp, comp) if err != nil { - channelz.Error(s.channelzID, "grpc: server failed to compress response: ", err) + channelz.Error(logger, s.channelzID, "grpc: server failed to compress response: ", err) return err } hdr, payload := msgHeader(data, compData) @@ -1047,7 +1145,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err != nil { if st, ok := status.FromError(err); ok { if e := t.WriteStatus(stream, st); e != nil { - channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) } } return err @@ -1092,7 +1190,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. trInfo.tr.SetError() } if e := t.WriteStatus(stream, appStatus); e != nil { - channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } if binlog != nil { if h, _ := stream.Header(); h.Len() > 0 { @@ -1121,7 +1219,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } if sts, ok := status.FromError(err); ok { if e := t.WriteStatus(stream, sts); e != nil { - channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } } else { switch st := err.(type) { @@ -1389,7 +1487,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - channelz.Warningf(s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() @@ -1430,7 +1528,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - channelz.Warningf(s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() @@ -1507,6 +1605,9 @@ func (s *Server) Stop() { for c := range st { c.Close() } + if s.opts.numServerWorkers > 0 { + s.stopServerWorkers() + } s.mu.Lock() if s.events != nil { diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 5a80a575a5e..5e434ca7f35 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -20,15 +20,16 @@ package grpc import ( "encoding/json" + "errors" "fmt" + "reflect" "strconv" "strings" "time" - "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" ) @@ -79,7 +80,7 @@ type ServiceConfig struct { serviceconfig.Config // LB is the load balancer the service providers recommends. The balancer - // specified via grpc.WithBalancer will override this. This is deprecated; + // specified via grpc.WithBalancerName will override this. This is deprecated; // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig // will be used. LB *string @@ -224,19 +225,27 @@ func parseDuration(s *string) (*time.Duration, error) { } type jsonName struct { - Service *string - Method *string + Service string + Method string } -func (j jsonName) generatePath() (string, bool) { - if j.Service == nil { - return "", false +var ( + errDuplicatedName = errors.New("duplicated name") + errEmptyServiceNonEmptyMethod = errors.New("cannot combine empty 'service' and non-empty 'method'") +) + +func (j jsonName) generatePath() (string, error) { + if j.Service == "" { + if j.Method != "" { + return "", errEmptyServiceNonEmptyMethod + } + return "", nil } - res := "/" + *j.Service + "/" - if j.Method != nil { - res += *j.Method + res := "/" + j.Service + "/" + if j.Method != "" { + res += j.Method } - return res, true + return res, nil } // TODO(lyuxuan): delete this struct after cleaning up old service config implementation. @@ -249,12 +258,10 @@ type jsonMC struct { RetryPolicy *jsonRetryPolicy } -type loadBalancingConfig map[string]json.RawMessage - // TODO(lyuxuan): delete this struct after cleaning up old service config implementation. type jsonSC struct { LoadBalancingPolicy *string - LoadBalancingConfig *[]loadBalancingConfig + LoadBalancingConfig *internalserviceconfig.BalancerConfig MethodConfig *[]jsonMC RetryThrottling *retryThrottlingPolicy HealthCheckConfig *healthCheckConfig @@ -270,7 +277,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { - grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) return &serviceconfig.ParseResult{Err: err} } sc := ServiceConfig{ @@ -280,53 +287,25 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { healthCheckConfig: rsc.HealthCheckConfig, rawJSONString: js, } - if rsc.LoadBalancingConfig != nil { - for i, lbcfg := range *rsc.LoadBalancingConfig { - if len(lbcfg) != 1 { - err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) - grpclog.Warningf(err.Error()) - return &serviceconfig.ParseResult{Err: err} - } - var name string - var jsonCfg json.RawMessage - for name, jsonCfg = range lbcfg { - } - builder := balancer.Get(name) - if builder == nil { - continue - } - sc.lbConfig = &lbConfig{name: name} - if parser, ok := builder.(balancer.ConfigParser); ok { - var err error - sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg) - if err != nil { - return &serviceconfig.ParseResult{Err: fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)} - } - } else if string(jsonCfg) != "{}" { - grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) - } - break - } - if sc.lbConfig == nil { - // We had a loadBalancingConfig field but did not encounter a - // supported policy. The config is considered invalid in this - // case. - err := fmt.Errorf("invalid loadBalancingConfig: no supported policies found") - grpclog.Warningf(err.Error()) - return &serviceconfig.ParseResult{Err: err} + if c := rsc.LoadBalancingConfig; c != nil { + sc.lbConfig = &lbConfig{ + name: c.Name, + cfg: c.Config, } } if rsc.MethodConfig == nil { return &serviceconfig.ParseResult{Config: &sc} } + + paths := map[string]struct{}{} for _, m := range *rsc.MethodConfig { if m.Name == nil { continue } d, err := parseDuration(m.Timeout) if err != nil { - grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) return &serviceconfig.ParseResult{Err: err} } @@ -335,7 +314,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { Timeout: d, } if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { - grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) return &serviceconfig.ParseResult{Err: err} } if m.MaxRequestMessageBytes != nil { @@ -352,10 +331,20 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes)) } } - for _, n := range *m.Name { - if path, valid := n.generatePath(); valid { - sc.Methods[path] = mc + for i, n := range *m.Name { + path, err := n.generatePath() + if err != nil { + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} + } + + if _, ok := paths[path]; ok { + err = errDuplicatedName + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} } + paths[path] = struct{}{} + sc.Methods[path] = mc } } @@ -388,7 +377,7 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { *mb <= 0 || jrp.BackoffMultiplier <= 0 || len(jrp.RetryableStatusCodes) == 0 { - grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) + logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) return nil, nil } @@ -432,3 +421,34 @@ func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { func newInt(b int) *int { return &b } + +func init() { + internal.EqualServiceConfigForTesting = equalServiceConfig +} + +// equalServiceConfig compares two configs. The rawJSONString field is ignored, +// because they may diff in white spaces. +// +// If any of them is NOT *ServiceConfig, return false. +func equalServiceConfig(a, b serviceconfig.Config) bool { + aa, ok := a.(*ServiceConfig) + if !ok { + return false + } + bb, ok := b.(*ServiceConfig) + if !ok { + return false + } + aaRaw := aa.rawJSONString + aa.rawJSONString = "" + bbRaw := bb.rawJSONString + bb.rawJSONString = "" + defer func() { + aa.rawJSONString = aaRaw + bb.rawJSONString = bbRaw + }() + // Using reflect.DeepEqual instead of cmp.Equal because many balancer + // configs are unexported, and cmp.Equal cannot compare unexported fields + // from unexported structs. + return reflect.DeepEqual(aa, bb) +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index a7970c79abe..63e476ee7ff 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -16,8 +16,6 @@ * */ -//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto - // Package stats is for collecting and reporting various network and RPC stats. // This package is for monitoring purpose only. All fields are read-only. // All APIs are experimental. diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 934ef68321c..fbc3fb11cb4 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -35,6 +35,7 @@ import ( "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -277,7 +278,6 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } cs.binlog = binarylog.GetMethodLogger(method) - cs.callInfo.stream = cs // Only this initial attempt has stats/tracing. // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. if err := cs.newAttemptLocked(sh, trInfo); err != nil { @@ -347,7 +347,16 @@ func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (r if err := cs.ctx.Err(); err != nil { return toRPCErr(err) } - t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method) + + ctx := cs.ctx + if cs.cc.parsedTarget.Scheme == "xds" { + // Add extra metadata (metadata that will be added by transport) to context + // so the balancer can see them. + ctx = grpcutil.WithExtraMetadata(cs.ctx, metadata.Pairs( + "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), + )) + } + t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { return err } @@ -365,6 +374,11 @@ func (a *csAttempt) newStream() error { cs.callHdr.PreviousAttempts = cs.numRetries s, err := a.t.NewStream(cs.ctx, cs.callHdr) if err != nil { + if _, ok := err.(transport.PerformedIOError); ok { + // Return without converting to an RPC error so retry code can + // inspect. + return err + } return toRPCErr(err) } cs.attempt.s = s @@ -460,11 +474,21 @@ func (cs *clientStream) commitAttempt() { // shouldRetry returns nil if the RPC should be retried; otherwise it returns // the error that should be returned by the operation. func (cs *clientStream) shouldRetry(err error) error { - if cs.attempt.s == nil && !cs.callInfo.failFast { - // In the event of any error from NewStream (attempt.s == nil), we - // never attempted to write anything to the wire, so we can retry - // indefinitely for non-fail-fast RPCs. - return nil + unprocessed := false + if cs.attempt.s == nil { + pioErr, ok := err.(transport.PerformedIOError) + if ok { + // Unwrap error. + err = toRPCErr(pioErr.Err) + } else { + unprocessed = true + } + if !ok && !cs.callInfo.failFast { + // In the event of a non-IO operation error from NewStream, we + // never attempted to write anything to the wire, so we can retry + // indefinitely for non-fail-fast RPCs. + return nil + } } if cs.finished || cs.committed { // RPC is finished or committed; cannot retry. @@ -473,13 +497,12 @@ func (cs *clientStream) shouldRetry(err error) error { // Wait for the trailers. if cs.attempt.s != nil { <-cs.attempt.s.Done() + unprocessed = cs.attempt.s.Unprocessed() } - if cs.firstAttempt && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) { + if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. - cs.firstAttempt = false return nil } - cs.firstAttempt = false if cs.cc.dopts.disableRetry { return err } @@ -497,13 +520,13 @@ func (cs *clientStream) shouldRetry(err error) error { if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { - channelz.Infof(cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) + channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) cs.retryThrottler.throttle() // This counts as a failure for throttling. return err } hasPushback = true } else if len(sps) > 1 { - channelz.Warningf(cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) + channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) cs.retryThrottler.throttle() // This counts as a failure for throttling. return err } @@ -565,6 +588,7 @@ func (cs *clientStream) retryLocked(lastErr error) error { cs.commitAttemptLocked() return err } + cs.firstAttempt = false if err := cs.newAttemptLocked(nil, nil); err != nil { return err } @@ -799,6 +823,15 @@ func (cs *clientStream) finish(err error) { } cs.finished = true cs.commitAttemptLocked() + if cs.attempt != nil { + cs.attempt.finish(err) + // after functions all rely upon having a stream. + if cs.attempt.s != nil { + for _, o := range cs.opts { + o.after(cs.callInfo, cs.attempt) + } + } + } cs.mu.Unlock() // For binary logging. only log cancel in finish (could be caused by RPC ctx // canceled or ClientConn closed). Trailer will be logged in RecvMsg. @@ -820,15 +853,6 @@ func (cs *clientStream) finish(err error) { cs.cc.incrCallsSucceeded() } } - if cs.attempt != nil { - cs.attempt.finish(err) - // after functions all rely upon having a stream. - if cs.attempt.s != nil { - for _, o := range cs.opts { - o.after(cs.callInfo) - } - } - } cs.cancel() } @@ -1066,7 +1090,6 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin t: t, } - as.callInfo.stream = as s, err := as.t.NewStream(as.ctx, as.callHdr) if err != nil { err = toRPCErr(err) diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index ca5d55fddf1..de387f7b86a 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.29.1" +const Version = "1.31.1" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index e12024fb863..8b7dff19adb 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -39,8 +39,7 @@ if [[ "$1" = "-install" ]]; then golang.org/x/lint/golint \ golang.org/x/tools/cmd/goimports \ honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell \ - github.com/golang/protobuf/protoc-gen-go + github.com/client9/misspell/cmd/misspell popd else # Ye olde `go get` incantation. @@ -50,8 +49,7 @@ if [[ "$1" = "-install" ]]; then golang.org/x/lint/golint \ golang.org/x/tools/cmd/goimports \ honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell \ - github.com/golang/protobuf/protoc-gen-go + github.com/client9/misspell/cmd/misspell fi if [[ -z "${VET_SKIP_PROTO}" ]]; then if [[ "${TRAVIS}" = "true" ]]; then @@ -88,6 +86,9 @@ git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpc # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" +# - Ensure all xds proto imports are renamed to *pb or *grpc. +git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' | not grep -v 'pb "\|grpc "' + # - Check imports that are illegal in appengine (until Go 1.11). # TODO: Remove when we drop Go 1.10 support go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go @@ -107,10 +108,10 @@ if [[ -z "${VET_SKIP_PROTO}" ]]; then (git status; git --no-pager diff; exit 1) fi -# - Check that our module is tidy. +# - Check that our modules are tidy. if go help mod >& /dev/null; then - go mod tidy && \ - git status --porcelain 2>&1 | fail_on_output || \ + find . -name 'go.mod' | xargs -IXXX bash -c 'cd $(dirname XXX); go mod tidy' + git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) fi @@ -123,18 +124,18 @@ staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true # Error if anything other than deprecation warnings are printed. not grep -v "is deprecated:.*SA1019" "${SC_OUT}" # Only ignore the following deprecated types/fields/functions. -not grep -Fv '.HandleResolvedAddrs -.HandleSubConnStateChange +not grep -Fv '.CredsBundle .HeaderMap +.Metadata is deprecated: use Attributes .NewAddress .NewServiceConfig -.Metadata is deprecated: use Attributes .Type is deprecated: use Attributes -.UpdateBalancerState +balancer.ErrTransientFailure balancer.Picker grpc.CallCustomCodec grpc.Code grpc.Compressor +grpc.CustomCodec grpc.Decompressor grpc.MaxMsgSize grpc.MethodConfig @@ -142,9 +143,7 @@ grpc.NewGZIPCompressor grpc.NewGZIPDecompressor grpc.RPCCompressor grpc.RPCDecompressor -grpc.RoundRobin grpc.ServiceConfig -grpc.WithBalancer grpc.WithBalancerName grpc.WithCompressor grpc.WithDecompressor @@ -154,10 +153,38 @@ grpc.WithServiceConfig grpc.WithTimeout http.CloseNotifier info.SecurityVersion -naming.Resolver -naming.Update -naming.Watcher resolver.Backend resolver.GRPCLB' "${SC_OUT}" +# - special golint on package comments. +lint_package_comment_per_package() { + # Number of files in this go package. + fileCount=$(go list -f '{{len .GoFiles}}' $1) + if [ ${fileCount} -eq 0 ]; then + return 0 + fi + # Number of package errors generated by golint. + lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment") + # golint complains about every file that's missing the package comment. If the + # number of files for this package is greater than the number of errors, there's + # at least one file with package comment, good. Otherwise, fail. + if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then + echo "Package $1 (with ${fileCount} files) is missing package comment" + return 1 + fi +} +lint_package_comment() { + set +ex + + count=0 + for i in $(go list ./...); do + lint_package_comment_per_package "$i" + ((count += $?)) + done + + set -ex + return $count +} +lint_package_comment + echo SUCCESS diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go index d8f86c05d8e..b2e3930fb0a 100644 --- a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go @@ -19,8 +19,7 @@ import ( "google.golang.org/protobuf/compiler/protogen" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/encoding/tag" - "google.golang.org/protobuf/internal/fieldnum" - "google.golang.org/protobuf/internal/genname" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/version" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/runtime/protoimpl" @@ -37,9 +36,14 @@ var GenerateVersionMarkers = true // Standard library dependencies. const ( + base64Package = protogen.GoImportPath("encoding/base64") mathPackage = protogen.GoImportPath("math") reflectPackage = protogen.GoImportPath("reflect") + sortPackage = protogen.GoImportPath("sort") + stringsPackage = protogen.GoImportPath("strings") syncPackage = protogen.GoImportPath("sync") + timePackage = protogen.GoImportPath("time") + utf8Package = protogen.GoImportPath("unicode/utf8") ) // Protobuf library dependencies. @@ -48,11 +52,13 @@ const ( // patched to support unique build environments that impose restrictions // on the dependencies of generated source code. var ( - protoPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/proto") - protoifacePackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/runtime/protoiface") - protoimplPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/runtime/protoimpl") - protoreflectPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/reflect/protoreflect") - protoV1Package goImportPath = protogen.GoImportPath("github.com/golang/protobuf/proto") + protoPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/proto") + protoifacePackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/runtime/protoiface") + protoimplPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/runtime/protoimpl") + protojsonPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/encoding/protojson") + protoreflectPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/reflect/protoreflect") + protoregistryPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/reflect/protoregistry") + protoV1Package goImportPath = protogen.GoImportPath("github.com/golang/protobuf/proto") ) type goImportPath interface { @@ -66,10 +72,12 @@ func GenerateFile(gen *protogen.Plugin, file *protogen.File) *protogen.Generated g := gen.NewGeneratedFile(filename, file.GoImportPath) f := newFileInfo(file) - genStandaloneComments(g, f, fieldnum.FileDescriptorProto_Syntax) + genStandaloneComments(g, f, int32(genid.FileDescriptorProto_Syntax_field_number)) genGeneratedHeader(gen, g, f) - genStandaloneComments(g, f, fieldnum.FileDescriptorProto_Package) - g.P("package ", f.GoPackageName) + genStandaloneComments(g, f, int32(genid.FileDescriptorProto_Package_field_number)) + + packageDoc := genPackageKnownComment(f) + g.P(packageDoc, "package ", f.GoPackageName) g.P() // Emit a static check that enforces a minimum version of the proto package. @@ -329,6 +337,7 @@ func genMessage(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { g.P("}") g.P() + genMessageKnownFunctions(g, f, m) genMessageDefaultDecls(g, f, m) genMessageMethods(g, f, m) genMessageOneofWrapperTypes(g, f, m) @@ -343,19 +352,19 @@ func genMessageFields(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { } func genMessageInternalFields(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo, sf *structFields) { - g.P(genname.State, " ", protoimplPackage.Ident("MessageState")) - sf.append(genname.State) - g.P(genname.SizeCache, " ", protoimplPackage.Ident("SizeCache")) - sf.append(genname.SizeCache) + g.P(genid.State_goname, " ", protoimplPackage.Ident("MessageState")) + sf.append(genid.State_goname) + g.P(genid.SizeCache_goname, " ", protoimplPackage.Ident("SizeCache")) + sf.append(genid.SizeCache_goname) if m.hasWeak { - g.P(genname.WeakFields, " ", protoimplPackage.Ident("WeakFields")) - sf.append(genname.WeakFields) + g.P(genid.WeakFields_goname, " ", protoimplPackage.Ident("WeakFields")) + sf.append(genid.WeakFields_goname) } - g.P(genname.UnknownFields, " ", protoimplPackage.Ident("UnknownFields")) - sf.append(genname.UnknownFields) + g.P(genid.UnknownFields_goname, " ", protoimplPackage.Ident("UnknownFields")) + sf.append(genid.UnknownFields_goname) if m.Desc.ExtensionRanges().Len() > 0 { - g.P(genname.ExtensionFields, " ", protoimplPackage.Ident("ExtensionFields")) - sf.append(genname.ExtensionFields) + g.P(genid.ExtensionFields_goname, " ", protoimplPackage.Ident("ExtensionFields")) + sf.append(genid.ExtensionFields_goname) } if sf.count > 0 { g.P() @@ -416,7 +425,7 @@ func genMessageField(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo, fie name := field.GoName if field.Desc.IsWeak() { - name = genname.WeakFieldPrefix + name + name = genid.WeakFieldPrefix_goname + name } g.Annotate(m.GoIdent.GoName+"."+name, field.Location) leadingComments := appendDeprecationSuffix(field.Comments.Leading, @@ -577,9 +586,9 @@ func genMessageGetterMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageI g.P(leadingComments, "func (x *", m.GoIdent, ") Get", field.GoName, "() ", protoPackage.Ident("Message"), "{") g.P("var w ", protoimplPackage.Ident("WeakFields")) g.P("if x != nil {") - g.P("w = x.", genname.WeakFields) + g.P("w = x.", genid.WeakFields_goname) if m.isTracked { - g.P("_ = x.", genname.WeakFieldPrefix+field.GoName) + g.P("_ = x.", genid.WeakFieldPrefix_goname+field.GoName) } g.P("}") g.P("return ", protoimplPackage.Ident("X"), ".GetWeak(w, ", field.Desc.Number(), ", ", strconv.Quote(string(field.Message.Desc.FullName())), ")") @@ -625,9 +634,9 @@ func genMessageSetterMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageI g.P(leadingComments, "func (x *", m.GoIdent, ") Set", field.GoName, "(v ", protoPackage.Ident("Message"), ") {") g.P("var w *", protoimplPackage.Ident("WeakFields")) g.P("if x != nil {") - g.P("w = &x.", genname.WeakFields) + g.P("w = &x.", genid.WeakFields_goname) if m.isTracked { - g.P("_ = x.", genname.WeakFieldPrefix+field.GoName) + g.P("_ = x.", genid.WeakFieldPrefix_goname+field.GoName) } g.P("}") g.P(protoimplPackage.Ident("X"), ".SetWeak(w, ", field.Desc.Number(), ", ", strconv.Quote(string(field.Message.Desc.FullName())), ", v)") diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go new file mode 100644 index 00000000000..9a1b7bdfa3f --- /dev/null +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go @@ -0,0 +1,1077 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal_gengo + +import ( + "strings" + + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/internal/genid" +) + +// Specialized support for well-known types are hard-coded into the generator +// as opposed to being injected in adjacent .go sources in the generated package +// in order to support specialized build systems like Bazel that always generate +// dynamically from the source .proto files. + +func genPackageKnownComment(f *fileInfo) protogen.Comments { + switch f.Desc.Path() { + case genid.File_google_protobuf_any_proto: + return ` Package anypb contains generated types for ` + genid.File_google_protobuf_any_proto + `. + + The Any message is a dynamic representation of any other message value. + It is functionally a tuple of the full name of the remote message type and + the serialized bytes of the remote message value. + + + Constructing an Any + + An Any message containing another message value is constructed using New: + + any, err := anypb.New(m) + if err != nil { + ... // handle error + } + ... // make use of any + + + Unmarshaling an Any + + With a populated Any message, the underlying message can be serialized into + a remote concrete message value in a few ways. + + If the exact concrete type is known, then a new (or pre-existing) instance + of that message can be passed to the UnmarshalTo method: + + m := new(foopb.MyMessage) + if err := any.UnmarshalTo(m); err != nil { + ... // handle error + } + ... // make use of m + + If the exact concrete type is not known, then the UnmarshalNew method can be + used to unmarshal the contents into a new instance of the remote message type: + + m, err := any.UnmarshalNew() + if err != nil { + ... // handle error + } + ... // make use of m + + UnmarshalNew uses the global type registry to resolve the message type and + construct a new instance of that message to unmarshal into. In order for a + message type to appear in the global registry, the Go type representing that + protobuf message type must be linked into the Go binary. For messages + generated by protoc-gen-go, this is achieved through an import of the + generated Go package representing a .proto file. + + A common pattern with UnmarshalNew is to use a type switch with the resulting + proto.Message value: + + switch m := m.(type) { + case *foopb.MyMessage: + ... // make use of m as a *foopb.MyMessage + case *barpb.OtherMessage: + ... // make use of m as a *barpb.OtherMessage + case *bazpb.SomeMessage: + ... // make use of m as a *bazpb.SomeMessage + } + + This pattern ensures that the generated packages containing the message types + listed in the case clauses are linked into the Go binary and therefore also + registered in the global registry. + + + Type checking an Any + + In order to type check whether an Any message represents some other message, + then use the MessageIs method: + + if any.MessageIs((*foopb.MyMessage)(nil)) { + ... // make use of any, knowing that it contains a foopb.MyMessage + } + + The MessageIs method can also be used with an allocated instance of the target + message type if the intention is to unmarshal into it if the type matches: + + m := new(foopb.MyMessage) + if any.MessageIs(m) { + if err := any.UnmarshalTo(m); err != nil { + ... // handle error + } + ... // make use of m + } + +` + case genid.File_google_protobuf_timestamp_proto: + return ` Package timestamppb contains generated types for ` + genid.File_google_protobuf_timestamp_proto + `. + + The Timestamp message represents a timestamp, + an instant in time since the Unix epoch (January 1st, 1970). + + + Conversion to a Go Time + + The AsTime method can be used to convert a Timestamp message to a + standard Go time.Time value in UTC: + + t := ts.AsTime() + ... // make use of t as a time.Time + + Converting to a time.Time is a common operation so that the extensive + set of time-based operations provided by the time package can be leveraged. + See https://golang.org/pkg/time for more information. + + The AsTime method performs the conversion on a best-effort basis. Timestamps + with denormal values (e.g., nanoseconds beyond 0 and 99999999, inclusive) + are normalized during the conversion to a time.Time. To manually check for + invalid Timestamps per the documented limitations in timestamp.proto, + additionally call the CheckValid method: + + if err := ts.CheckValid(); err != nil { + ... // handle error + } + + + Conversion from a Go Time + + The timestamppb.New function can be used to construct a Timestamp message + from a standard Go time.Time value: + + ts := timestamppb.New(t) + ... // make use of ts as a *timestamppb.Timestamp + + In order to construct a Timestamp representing the current time, use Now: + + ts := timestamppb.Now() + ... // make use of ts as a *timestamppb.Timestamp + +` + case genid.File_google_protobuf_duration_proto: + return ` Package durationpb contains generated types for ` + genid.File_google_protobuf_duration_proto + `. + + The Duration message represents a signed span of time. + + + Conversion to a Go Duration + + The AsDuration method can be used to convert a Duration message to a + standard Go time.Duration value: + + d := dur.AsDuration() + ... // make use of d as a time.Duration + + Converting to a time.Duration is a common operation so that the extensive + set of time-based operations provided by the time package can be leveraged. + See https://golang.org/pkg/time for more information. + + The AsDuration method performs the conversion on a best-effort basis. + Durations with denormal values (e.g., nanoseconds beyond -99999999 and + +99999999, inclusive; or seconds and nanoseconds with opposite signs) + are normalized during the conversion to a time.Duration. To manually check for + invalid Duration per the documented limitations in duration.proto, + additionally call the CheckValid method: + + if err := dur.CheckValid(); err != nil { + ... // handle error + } + + Note that the documented limitations in duration.proto does not protect a + Duration from overflowing the representable range of a time.Duration in Go. + The AsDuration method uses saturation arithmetic such that an overflow clamps + the resulting value to the closest representable value (e.g., math.MaxInt64 + for positive overflow and math.MinInt64 for negative overflow). + + + Conversion from a Go Duration + + The durationpb.New function can be used to construct a Duration message + from a standard Go time.Duration value: + + dur := durationpb.New(d) + ... // make use of d as a *durationpb.Duration + +` + case genid.File_google_protobuf_struct_proto: + return ` Package structpb contains generated types for ` + genid.File_google_protobuf_struct_proto + `. + + The messages (i.e., Value, Struct, and ListValue) defined in struct.proto are + used to represent arbitrary JSON. The Value message represents a JSON value, + the Struct message represents a JSON object, and the ListValue message + represents a JSON array. See https://json.org for more information. + + The Value, Struct, and ListValue types have generated MarshalJSON and + UnmarshalJSON methods such that they serialize JSON equivalent to what the + messages themselves represent. Use of these types with the + "google.golang.org/protobuf/encoding/protojson" package + ensures that they will be serialized as their JSON equivalent. + + + Conversion to and from a Go interface + + The standard Go "encoding/json" package has functionality to serialize + arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and + ListValue.AsSlice methods can convert the protobuf message representation into + a form represented by interface{}, map[string]interface{}, and []interface{}. + This form can be used with other packages that operate on such data structures + and also directly with the standard json package. + + In order to convert the interface{}, map[string]interface{}, and []interface{} + forms back as Value, Struct, and ListValue messages, use the NewStruct, + NewList, and NewValue constructor functions. + + + Example usage + + Consider the following example JSON object: + + { + "firstName": "John", + "lastName": "Smith", + "isAlive": true, + "age": 27, + "address": { + "streetAddress": "21 2nd Street", + "city": "New York", + "state": "NY", + "postalCode": "10021-3100" + }, + "phoneNumbers": [ + { + "type": "home", + "number": "212 555-1234" + }, + { + "type": "office", + "number": "646 555-4567" + } + ], + "children": [], + "spouse": null + } + + To construct a Value message representing the above JSON object: + + m, err := structpb.NewValue(map[string]interface{}{ + "firstName": "John", + "lastName": "Smith", + "isAlive": true, + "age": 27, + "address": map[string]interface{}{ + "streetAddress": "21 2nd Street", + "city": "New York", + "state": "NY", + "postalCode": "10021-3100", + }, + "phoneNumbers": []interface{}{ + map[string]interface{}{ + "type": "home", + "number": "212 555-1234", + }, + map[string]interface{}{ + "type": "office", + "number": "646 555-4567", + }, + }, + "children": []interface{}{}, + "spouse": nil, + }) + if err != nil { + ... // handle error + } + ... // make use of m as a *structpb.Value + +` + case genid.File_google_protobuf_field_mask_proto: + return ` Package fieldmaskpb contains generated types for ` + genid.File_google_protobuf_field_mask_proto + `. + + The FieldMask message represents a set of symbolic field paths. + The paths are specific to some target message type, + which is not stored within the FieldMask message itself. + + + Constructing a FieldMask + + The New function is used construct a FieldMask: + + var messageType *descriptorpb.DescriptorProto + fm, err := fieldmaskpb.New(messageType, "field.name", "field.number") + if err != nil { + ... // handle error + } + ... // make use of fm + + The "field.name" and "field.number" paths are valid paths according to the + google.protobuf.DescriptorProto message. Use of a path that does not correlate + to valid fields reachable from DescriptorProto would result in an error. + + Once a FieldMask message has been constructed, + the Append method can be used to insert additional paths to the path set: + + var messageType *descriptorpb.DescriptorProto + if err := fm.Append(messageType, "options"); err != nil { + ... // handle error + } + + + Type checking a FieldMask + + In order to verify that a FieldMask represents a set of fields that are + reachable from some target message type, use the IsValid method: + + var messageType *descriptorpb.DescriptorProto + if fm.IsValid(messageType) { + ... // make use of fm + } + + IsValid needs to be passed the target message type as an input since the + FieldMask message itself does not store the message type that the set of paths + are for. +` + default: + return "" + } +} + +func genMessageKnownFunctions(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + switch m.Desc.FullName() { + case genid.Any_message_fullname: + g.P("// New marshals src into a new Any instance.") + g.P("func New(src ", protoPackage.Ident("Message"), ") (*Any, error) {") + g.P(" dst := new(Any)") + g.P(" if err := dst.MarshalFrom(src); err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" return dst, nil") + g.P("}") + g.P() + + g.P("// MarshalFrom marshals src into dst as the underlying message") + g.P("// using the provided marshal options.") + g.P("//") + g.P("// If no options are specified, call dst.MarshalFrom instead.") + g.P("func MarshalFrom(dst *Any, src ", protoPackage.Ident("Message"), ", opts ", protoPackage.Ident("MarshalOptions"), ") error {") + g.P(" const urlPrefix = \"type.googleapis.com/\"") + g.P(" if src == nil {") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil source message\")") + g.P(" }") + g.P(" b, err := opts.Marshal(src)") + g.P(" if err != nil {") + g.P(" return err") + g.P(" }") + g.P(" dst.TypeUrl = urlPrefix + string(src.ProtoReflect().Descriptor().FullName())") + g.P(" dst.Value = b") + g.P(" return nil") + g.P("}") + g.P() + + g.P("// UnmarshalTo unmarshals the underlying message from src into dst") + g.P("// using the provided unmarshal options.") + g.P("// It reports an error if dst is not of the right message type.") + g.P("//") + g.P("// If no options are specified, call src.UnmarshalTo instead.") + g.P("func UnmarshalTo(src *Any, dst ", protoPackage.Ident("Message"), ", opts ", protoPackage.Ident("UnmarshalOptions"), ") error {") + g.P(" if src == nil {") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil source message\")") + g.P(" }") + g.P(" if !src.MessageIs(dst) {") + g.P(" got := dst.ProtoReflect().Descriptor().FullName()") + g.P(" want := src.MessageName()") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"mismatched message type: got %q, want %q\", got, want)") + g.P(" }") + g.P(" return opts.Unmarshal(src.GetValue(), dst)") + g.P("}") + g.P() + + g.P("// UnmarshalNew unmarshals the underlying message from src into dst,") + g.P("// which is newly created message using a type resolved from the type URL.") + g.P("// The message type is resolved according to opt.Resolver,") + g.P("// which should implement protoregistry.MessageTypeResolver.") + g.P("// It reports an error if the underlying message type could not be resolved.") + g.P("//") + g.P("// If no options are specified, call src.UnmarshalNew instead.") + g.P("func UnmarshalNew(src *Any, opts ", protoPackage.Ident("UnmarshalOptions"), ") (dst ", protoPackage.Ident("Message"), ", err error) {") + g.P(" if src.GetTypeUrl() == \"\" {") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid empty type URL\")") + g.P(" }") + g.P(" if opts.Resolver == nil {") + g.P(" opts.Resolver = ", protoregistryPackage.Ident("GlobalTypes")) + g.P(" }") + g.P(" r, ok := opts.Resolver.(", protoregistryPackage.Ident("MessageTypeResolver"), ")") + g.P(" if !ok {") + g.P(" return nil, ", protoregistryPackage.Ident("NotFound")) + g.P(" }") + g.P(" mt, err := r.FindMessageByURL(src.GetTypeUrl())") + g.P(" if err != nil {") + g.P(" if err == ", protoregistryPackage.Ident("NotFound"), " {") + g.P(" return nil, err") + g.P(" }") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"could not resolve %q: %v\", src.GetTypeUrl(), err)") + g.P(" }") + g.P(" dst = mt.New().Interface()") + g.P(" return dst, opts.Unmarshal(src.GetValue(), dst)") + g.P("}") + g.P() + + g.P("// MessageIs reports whether the underlying message is of the same type as m.") + g.P("func (x *Any) MessageIs(m ", protoPackage.Ident("Message"), ") bool {") + g.P(" if m == nil {") + g.P(" return false") + g.P(" }") + g.P(" url := x.GetTypeUrl()") + g.P(" name := string(m.ProtoReflect().Descriptor().FullName())") + g.P(" if !", stringsPackage.Ident("HasSuffix"), "(url, name) {") + g.P(" return false") + g.P(" }") + g.P(" return len(url) == len(name) || url[len(url)-len(name)-1] == '/'") + g.P("}") + g.P() + + g.P("// MessageName reports the full name of the underlying message,") + g.P("// returning an empty string if invalid.") + g.P("func (x *Any) MessageName() ", protoreflectPackage.Ident("FullName"), " {") + g.P(" url := x.GetTypeUrl()") + g.P(" name := ", protoreflectPackage.Ident("FullName"), "(url)") + g.P(" if i := ", stringsPackage.Ident("LastIndexByte"), "(url, '/'); i >= 0 {") + g.P(" name = name[i+len(\"/\"):]") + g.P(" }") + g.P(" if !name.IsValid() {") + g.P(" return \"\"") + g.P(" }") + g.P(" return name") + g.P("}") + g.P() + + g.P("// MarshalFrom marshals m into x as the underlying message.") + g.P("func (x *Any) MarshalFrom(m ", protoPackage.Ident("Message"), ") error {") + g.P(" return MarshalFrom(x, m, ", protoPackage.Ident("MarshalOptions"), "{})") + g.P("}") + g.P() + + g.P("// UnmarshalTo unmarshals the contents of the underlying message of x into m.") + g.P("// It resets m before performing the unmarshal operation.") + g.P("// It reports an error if m is not of the right message type.") + g.P("func (x *Any) UnmarshalTo(m ", protoPackage.Ident("Message"), ") error {") + g.P(" return UnmarshalTo(x, m, ", protoPackage.Ident("UnmarshalOptions"), "{})") + g.P("}") + g.P() + + g.P("// UnmarshalNew unmarshals the contents of the underlying message of x into") + g.P("// a newly allocated message of the specified type.") + g.P("// It reports an error if the underlying message type could not be resolved.") + g.P("func (x *Any) UnmarshalNew() (", protoPackage.Ident("Message"), ", error) {") + g.P(" return UnmarshalNew(x, ", protoPackage.Ident("UnmarshalOptions"), "{})") + g.P("}") + g.P() + + case genid.Timestamp_message_fullname: + g.P("// Now constructs a new Timestamp from the current time.") + g.P("func Now() *Timestamp {") + g.P(" return New(", timePackage.Ident("Now"), "())") + g.P("}") + g.P() + + g.P("// New constructs a new Timestamp from the provided time.Time.") + g.P("func New(t ", timePackage.Ident("Time"), ") *Timestamp {") + g.P(" return &Timestamp{Seconds: int64(t.Unix()), Nanos: int32(t.Nanosecond())}") + g.P("}") + g.P() + + g.P("// AsTime converts x to a time.Time.") + g.P("func (x *Timestamp) AsTime() ", timePackage.Ident("Time"), " {") + g.P(" return ", timePackage.Ident("Unix"), "(int64(x.GetSeconds()), int64(x.GetNanos())).UTC()") + g.P("}") + g.P() + + g.P("// IsValid reports whether the timestamp is valid.") + g.P("// It is equivalent to CheckValid == nil.") + g.P("func (x *Timestamp) IsValid() bool {") + g.P(" return x.check() == 0") + g.P("}") + g.P() + + g.P("// CheckValid returns an error if the timestamp is invalid.") + g.P("// In particular, it checks whether the value represents a date that is") + g.P("// in the range of 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive.") + g.P("// An error is reported for a nil Timestamp.") + g.P("func (x *Timestamp) CheckValid() error {") + g.P(" switch x.check() {") + g.P(" case invalidNil:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil Timestamp\")") + g.P(" case invalidUnderflow:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"timestamp (%v) before 0001-01-01\", x)") + g.P(" case invalidOverflow:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"timestamp (%v) after 9999-12-31\", x)") + g.P(" case invalidNanos:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"timestamp (%v) has out-of-range nanos\", x)") + g.P(" default:") + g.P(" return nil") + g.P(" }") + g.P("}") + g.P() + + g.P("const (") + g.P(" _ = iota") + g.P(" invalidNil") + g.P(" invalidUnderflow") + g.P(" invalidOverflow") + g.P(" invalidNanos") + g.P(")") + g.P() + + g.P("func (x *Timestamp) check() uint {") + g.P(" const minTimestamp = -62135596800 // Seconds between 1970-01-01T00:00:00Z and 0001-01-01T00:00:00Z, inclusive") + g.P(" const maxTimestamp = +253402300799 // Seconds between 1970-01-01T00:00:00Z and 9999-12-31T23:59:59Z, inclusive") + g.P(" secs := x.GetSeconds()") + g.P(" nanos := x.GetNanos()") + g.P(" switch {") + g.P(" case x == nil:") + g.P(" return invalidNil") + g.P(" case secs < minTimestamp:") + g.P(" return invalidUnderflow") + g.P(" case secs > maxTimestamp:") + g.P(" return invalidOverflow") + g.P(" case nanos < 0 || nanos >= 1e9:") + g.P(" return invalidNanos") + g.P(" default:") + g.P(" return 0") + g.P(" }") + g.P("}") + g.P() + + case genid.Duration_message_fullname: + g.P("// New constructs a new Duration from the provided time.Duration.") + g.P("func New(d ", timePackage.Ident("Duration"), ") *Duration {") + g.P(" nanos := d.Nanoseconds()") + g.P(" secs := nanos / 1e9") + g.P(" nanos -= secs * 1e9") + g.P(" return &Duration{Seconds: int64(secs), Nanos: int32(nanos)}") + g.P("}") + g.P() + + g.P("// AsDuration converts x to a time.Duration,") + g.P("// returning the closest duration value in the event of overflow.") + g.P("func (x *Duration) AsDuration() ", timePackage.Ident("Duration"), " {") + g.P(" secs := x.GetSeconds()") + g.P(" nanos := x.GetNanos()") + g.P(" d := ", timePackage.Ident("Duration"), "(secs) * ", timePackage.Ident("Second")) + g.P(" overflow := d/", timePackage.Ident("Second"), " != ", timePackage.Ident("Duration"), "(secs)") + g.P(" d += ", timePackage.Ident("Duration"), "(nanos) * ", timePackage.Ident("Nanosecond")) + g.P(" overflow = overflow || (secs < 0 && nanos < 0 && d > 0)") + g.P(" overflow = overflow || (secs > 0 && nanos > 0 && d < 0)") + g.P(" if overflow {") + g.P(" switch {") + g.P(" case secs < 0:") + g.P(" return ", timePackage.Ident("Duration"), "(", mathPackage.Ident("MinInt64"), ")") + g.P(" case secs > 0:") + g.P(" return ", timePackage.Ident("Duration"), "(", mathPackage.Ident("MaxInt64"), ")") + g.P(" }") + g.P(" }") + g.P(" return d") + g.P("}") + g.P() + + g.P("// IsValid reports whether the duration is valid.") + g.P("// It is equivalent to CheckValid == nil.") + g.P("func (x *Duration) IsValid() bool {") + g.P(" return x.check() == 0") + g.P("}") + g.P() + + g.P("// CheckValid returns an error if the duration is invalid.") + g.P("// In particular, it checks whether the value is within the range of") + g.P("// -10000 years to +10000 years inclusive.") + g.P("// An error is reported for a nil Duration.") + g.P("func (x *Duration) CheckValid() error {") + g.P(" switch x.check() {") + g.P(" case invalidNil:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil Duration\")") + g.P(" case invalidUnderflow:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) exceeds -10000 years\", x)") + g.P(" case invalidOverflow:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) exceeds +10000 years\", x)") + g.P(" case invalidNanosRange:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) has out-of-range nanos\", x)") + g.P(" case invalidNanosSign:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) has seconds and nanos with different signs\", x)") + g.P(" default:") + g.P(" return nil") + g.P(" }") + g.P("}") + g.P() + + g.P("const (") + g.P(" _ = iota") + g.P(" invalidNil") + g.P(" invalidUnderflow") + g.P(" invalidOverflow") + g.P(" invalidNanosRange") + g.P(" invalidNanosSign") + g.P(")") + g.P() + + g.P("func (x *Duration) check() uint {") + g.P(" const absDuration = 315576000000 // 10000yr * 365.25day/yr * 24hr/day * 60min/hr * 60sec/min") + g.P(" secs := x.GetSeconds()") + g.P(" nanos := x.GetNanos()") + g.P(" switch {") + g.P(" case x == nil:") + g.P(" return invalidNil") + g.P(" case secs < -absDuration:") + g.P(" return invalidUnderflow") + g.P(" case secs > +absDuration:") + g.P(" return invalidOverflow") + g.P(" case nanos <= -1e9 || nanos >= +1e9:") + g.P(" return invalidNanosRange") + g.P(" case (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0):") + g.P(" return invalidNanosSign") + g.P(" default:") + g.P(" return 0") + g.P(" }") + g.P("}") + g.P() + + case genid.Struct_message_fullname: + g.P("// NewStruct constructs a Struct from a general-purpose Go map.") + g.P("// The map keys must be valid UTF-8.") + g.P("// The map values are converted using NewValue.") + g.P("func NewStruct(v map[string]interface{}) (*Struct, error) {") + g.P(" x := &Struct{Fields: make(map[string]*Value, len(v))}") + g.P(" for k, v := range v {") + g.P(" if !", utf8Package.Ident("ValidString"), "(k) {") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid UTF-8 in string: %q\", k)") + g.P(" }") + g.P(" var err error") + g.P(" x.Fields[k], err = NewValue(v)") + g.P(" if err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" }") + g.P(" return x, nil") + g.P("}") + g.P() + + g.P("// AsMap converts x to a general-purpose Go map.") + g.P("// The map values are converted by calling Value.AsInterface.") + g.P("func (x *Struct) AsMap() map[string]interface{} {") + g.P(" vs := make(map[string]interface{})") + g.P(" for k, v := range x.GetFields() {") + g.P(" vs[k] = v.AsInterface()") + g.P(" }") + g.P(" return vs") + g.P("}") + g.P() + + g.P("func (x *Struct) MarshalJSON() ([]byte, error) {") + g.P(" return ", protojsonPackage.Ident("Marshal"), "(x)") + g.P("}") + g.P() + + g.P("func (x *Struct) UnmarshalJSON(b []byte) error {") + g.P(" return ", protojsonPackage.Ident("Unmarshal"), "(b, x)") + g.P("}") + g.P() + + case genid.ListValue_message_fullname: + g.P("// NewList constructs a ListValue from a general-purpose Go slice.") + g.P("// The slice elements are converted using NewValue.") + g.P("func NewList(v []interface{}) (*ListValue, error) {") + g.P(" x := &ListValue{Values: make([]*Value, len(v))}") + g.P(" for i, v := range v {") + g.P(" var err error") + g.P(" x.Values[i], err = NewValue(v)") + g.P(" if err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" }") + g.P(" return x, nil") + g.P("}") + g.P() + + g.P("// AsSlice converts x to a general-purpose Go slice.") + g.P("// The slice elements are converted by calling Value.AsInterface.") + g.P("func (x *ListValue) AsSlice() []interface{} {") + g.P(" vs := make([]interface{}, len(x.GetValues()))") + g.P(" for i, v := range x.GetValues() {") + g.P(" vs[i] = v.AsInterface()") + g.P(" }") + g.P(" return vs") + g.P("}") + g.P() + + g.P("func (x *ListValue) MarshalJSON() ([]byte, error) {") + g.P(" return ", protojsonPackage.Ident("Marshal"), "(x)") + g.P("}") + g.P() + + g.P("func (x *ListValue) UnmarshalJSON(b []byte) error {") + g.P(" return ", protojsonPackage.Ident("Unmarshal"), "(b, x)") + g.P("}") + g.P() + + case genid.Value_message_fullname: + g.P("// NewValue constructs a Value from a general-purpose Go interface.") + g.P("//") + g.P("// ╔════════════════════════╤════════════════════════════════════════════╗") + g.P("// ║ Go type │ Conversion ║") + g.P("// ╠════════════════════════╪════════════════════════════════════════════╣") + g.P("// ║ nil │ stored as NullValue ║") + g.P("// ║ bool │ stored as BoolValue ║") + g.P("// ║ int, int32, int64 │ stored as NumberValue ║") + g.P("// ║ uint, uint32, uint64 │ stored as NumberValue ║") + g.P("// ║ float32, float64 │ stored as NumberValue ║") + g.P("// ║ string │ stored as StringValue; must be valid UTF-8 ║") + g.P("// ║ []byte │ stored as StringValue; base64-encoded ║") + g.P("// ║ map[string]interface{} │ stored as StructValue ║") + g.P("// ║ []interface{} │ stored as ListValue ║") + g.P("// ╚════════════════════════╧════════════════════════════════════════════╝") + g.P("//") + g.P("// When converting an int64 or uint64 to a NumberValue, numeric precision loss") + g.P("// is possible since they are stored as a float64.") + g.P("func NewValue(v interface{}) (*Value, error) {") + g.P(" switch v := v.(type) {") + g.P(" case nil:") + g.P(" return NewNullValue(), nil") + g.P(" case bool:") + g.P(" return NewBoolValue(v), nil") + g.P(" case int:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case int32:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case int64:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case uint:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case uint32:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case uint64:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case float32:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case float64:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case string:") + g.P(" if !", utf8Package.Ident("ValidString"), "(v) {") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid UTF-8 in string: %q\", v)") + g.P(" }") + g.P(" return NewStringValue(v), nil") + g.P(" case []byte:") + g.P(" s := ", base64Package.Ident("StdEncoding"), ".EncodeToString(v)") + g.P(" return NewStringValue(s), nil") + g.P(" case map[string]interface{}:") + g.P(" v2, err := NewStruct(v)") + g.P(" if err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" return NewStructValue(v2), nil") + g.P(" case []interface{}:") + g.P(" v2, err := NewList(v)") + g.P(" if err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" return NewListValue(v2), nil") + g.P(" default:") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid type: %T\", v)") + g.P(" }") + g.P("}") + g.P() + + g.P("// NewNullValue constructs a new null Value.") + g.P("func NewNullValue() *Value {") + g.P(" return &Value{Kind: &Value_NullValue{NullValue: NullValue_NULL_VALUE}}") + g.P("}") + g.P() + + g.P("// NewBoolValue constructs a new boolean Value.") + g.P("func NewBoolValue(v bool) *Value {") + g.P(" return &Value{Kind: &Value_BoolValue{BoolValue: v}}") + g.P("}") + g.P() + + g.P("// NewNumberValue constructs a new number Value.") + g.P("func NewNumberValue(v float64) *Value {") + g.P(" return &Value{Kind: &Value_NumberValue{NumberValue: v}}") + g.P("}") + g.P() + + g.P("// NewStringValue constructs a new string Value.") + g.P("func NewStringValue(v string) *Value {") + g.P(" return &Value{Kind: &Value_StringValue{StringValue: v}}") + g.P("}") + g.P() + + g.P("// NewStructValue constructs a new struct Value.") + g.P("func NewStructValue(v *Struct) *Value {") + g.P(" return &Value{Kind: &Value_StructValue{StructValue: v}}") + g.P("}") + g.P() + + g.P("// NewListValue constructs a new list Value.") + g.P("func NewListValue(v *ListValue) *Value {") + g.P(" return &Value{Kind: &Value_ListValue{ListValue: v}}") + g.P("}") + g.P() + + g.P("// AsInterface converts x to a general-purpose Go interface.") + g.P("//") + g.P("// Calling Value.MarshalJSON and \"encoding/json\".Marshal on this output produce") + g.P("// semantically equivalent JSON (assuming no errors occur).") + g.P("//") + g.P("// Floating-point values (i.e., \"NaN\", \"Infinity\", and \"-Infinity\") are") + g.P("// converted as strings to remain compatible with MarshalJSON.") + g.P("func (x *Value) AsInterface() interface{} {") + g.P(" switch v := x.GetKind().(type) {") + g.P(" case *Value_NumberValue:") + g.P(" if v != nil {") + g.P(" switch {") + g.P(" case ", mathPackage.Ident("IsNaN"), "(v.NumberValue):") + g.P(" return \"NaN\"") + g.P(" case ", mathPackage.Ident("IsInf"), "(v.NumberValue, +1):") + g.P(" return \"Infinity\"") + g.P(" case ", mathPackage.Ident("IsInf"), "(v.NumberValue, -1):") + g.P(" return \"-Infinity\"") + g.P(" default:") + g.P(" return v.NumberValue") + g.P(" }") + g.P(" }") + g.P(" case *Value_StringValue:") + g.P(" if v != nil {") + g.P(" return v.StringValue") + g.P(" }") + g.P(" case *Value_BoolValue:") + g.P(" if v != nil {") + g.P(" return v.BoolValue") + g.P(" }") + g.P(" case *Value_StructValue:") + g.P(" if v != nil {") + g.P(" return v.StructValue.AsMap()") + g.P(" }") + g.P(" case *Value_ListValue:") + g.P(" if v != nil {") + g.P(" return v.ListValue.AsSlice()") + g.P(" }") + g.P(" }") + g.P(" return nil") + g.P("}") + g.P() + + g.P("func (x *Value) MarshalJSON() ([]byte, error) {") + g.P(" return ", protojsonPackage.Ident("Marshal"), "(x)") + g.P("}") + g.P() + + g.P("func (x *Value) UnmarshalJSON(b []byte) error {") + g.P(" return ", protojsonPackage.Ident("Unmarshal"), "(b, x)") + g.P("}") + g.P() + + case genid.FieldMask_message_fullname: + g.P("// New constructs a field mask from a list of paths and verifies that") + g.P("// each one is valid according to the specified message type.") + g.P("func New(m ", protoPackage.Ident("Message"), ", paths ...string) (*FieldMask, error) {") + g.P(" x := new(FieldMask)") + g.P(" return x, x.Append(m, paths...)") + g.P("}") + g.P() + + g.P("// Union returns the union of all the paths in the input field masks.") + g.P("func Union(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {") + g.P(" var out []string") + g.P(" out = append(out, mx.GetPaths()...)") + g.P(" out = append(out, my.GetPaths()...)") + g.P(" for _, m := range ms {") + g.P(" out = append(out, m.GetPaths()...)") + g.P(" }") + g.P(" return &FieldMask{Paths: normalizePaths(out)}") + g.P("}") + g.P() + + g.P("// Intersect returns the intersection of all the paths in the input field masks.") + g.P("func Intersect(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {") + g.P(" var ss1, ss2 []string // reused buffers for performance") + g.P(" intersect := func(out, in []string) []string {") + g.P(" ss1 = normalizePaths(append(ss1[:0], in...))") + g.P(" ss2 = normalizePaths(append(ss2[:0], out...))") + g.P(" out = out[:0]") + g.P(" for i1, i2 := 0, 0; i1 < len(ss1) && i2 < len(ss2); {") + g.P(" switch s1, s2 := ss1[i1], ss2[i2]; {") + g.P(" case hasPathPrefix(s1, s2):") + g.P(" out = append(out, s1)") + g.P(" i1++") + g.P(" case hasPathPrefix(s2, s1):") + g.P(" out = append(out, s2)") + g.P(" i2++") + g.P(" case lessPath(s1, s2):") + g.P(" i1++") + g.P(" case lessPath(s2, s1):") + g.P(" i2++") + g.P(" }") + g.P(" }") + g.P(" return out") + g.P(" }") + g.P() + g.P(" out := Union(mx, my, ms...).GetPaths()") + g.P(" out = intersect(out, mx.GetPaths())") + g.P(" out = intersect(out, my.GetPaths())") + g.P(" for _, m := range ms {") + g.P(" out = intersect(out, m.GetPaths())") + g.P(" }") + g.P(" return &FieldMask{Paths: normalizePaths(out)}") + g.P("}") + g.P() + + g.P("// IsValid reports whether all the paths are syntactically valid and") + g.P("// refer to known fields in the specified message type.") + g.P("// It reports false for a nil FieldMask.") + g.P("func (x *FieldMask) IsValid(m ", protoPackage.Ident("Message"), ") bool {") + g.P(" paths := x.GetPaths()") + g.P(" return x != nil && numValidPaths(m, paths) == len(paths)") + g.P("}") + g.P() + + g.P("// Append appends a list of paths to the mask and verifies that each one") + g.P("// is valid according to the specified message type.") + g.P("// An invalid path is not appended and breaks insertion of subsequent paths.") + g.P("func (x *FieldMask) Append(m ", protoPackage.Ident("Message"), ", paths ...string) error {") + g.P(" numValid := numValidPaths(m, paths)") + g.P(" x.Paths = append(x.Paths, paths[:numValid]...)") + g.P(" paths = paths[numValid:]") + g.P(" if len(paths) > 0 {") + g.P(" name := m.ProtoReflect().Descriptor().FullName()") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid path %q for message %q\", paths[0], name)") + g.P(" }") + g.P(" return nil") + g.P("}") + g.P() + + g.P("func numValidPaths(m ", protoPackage.Ident("Message"), ", paths []string) int {") + g.P(" md0 := m.ProtoReflect().Descriptor()") + g.P(" for i, path := range paths {") + g.P(" md := md0") + g.P(" if !rangeFields(path, func(field string) bool {") + g.P(" // Search the field within the message.") + g.P(" if md == nil {") + g.P(" return false // not within a message") + g.P(" }") + g.P(" fd := md.Fields().ByName(", protoreflectPackage.Ident("Name"), "(field))") + g.P(" // The real field name of a group is the message name.") + g.P(" if fd == nil {") + g.P(" gd := md.Fields().ByName(", protoreflectPackage.Ident("Name"), "(", stringsPackage.Ident("ToLower"), "(field)))") + g.P(" if gd != nil && gd.Kind() == ", protoreflectPackage.Ident("GroupKind"), " && string(gd.Message().Name()) == field {") + g.P(" fd = gd") + g.P(" }") + g.P(" } else if fd.Kind() == ", protoreflectPackage.Ident("GroupKind"), " && string(fd.Message().Name()) != field {") + g.P(" fd = nil") + g.P(" }") + g.P(" if fd == nil {") + g.P(" return false // message has does not have this field") + g.P(" }") + g.P() + g.P(" // Identify the next message to search within.") + g.P(" md = fd.Message() // may be nil") + g.P(" if fd.IsMap() {") + g.P(" md = fd.MapValue().Message() // may be nil") + g.P(" }") + g.P(" return true") + g.P(" }) {") + g.P(" return i") + g.P(" }") + g.P(" }") + g.P(" return len(paths)") + g.P("}") + g.P() + + g.P("// Normalize converts the mask to its canonical form where all paths are sorted") + g.P("// and redundant paths are removed.") + g.P("func (x *FieldMask) Normalize() {") + g.P(" x.Paths = normalizePaths(x.Paths)") + g.P("}") + g.P() + g.P("func normalizePaths(paths []string) []string {") + g.P(" ", sortPackage.Ident("Slice"), "(paths, func(i, j int) bool {") + g.P(" return lessPath(paths[i], paths[j])") + g.P(" })") + g.P() + g.P(" // Elide any path that is a prefix match on the previous.") + g.P(" out := paths[:0]") + g.P(" for _, path := range paths {") + g.P(" if len(out) > 0 && hasPathPrefix(path, out[len(out)-1]) {") + g.P(" continue") + g.P(" }") + g.P(" out = append(out, path)") + g.P(" }") + g.P(" return out") + g.P("}") + g.P() + + g.P("// hasPathPrefix is like strings.HasPrefix, but further checks for either") + g.P("// an exact matche or that the prefix is delimited by a dot.") + g.P("func hasPathPrefix(path, prefix string) bool {") + g.P(" return ", stringsPackage.Ident("HasPrefix"), "(path, prefix) && (len(path) == len(prefix) || path[len(prefix)] == '.')") + g.P("}") + g.P() + + g.P("// lessPath is a lexicographical comparison where dot is specially treated") + g.P("// as the smallest symbol.") + g.P("func lessPath(x, y string) bool {") + g.P(" for i := 0; i < len(x) && i < len(y); i++ {") + g.P(" if x[i] != y[i] {") + g.P(" return (x[i] - '.') < (y[i] - '.')") + g.P(" }") + g.P(" }") + g.P(" return len(x) < len(y)") + g.P("}") + g.P() + + g.P("// rangeFields is like strings.Split(path, \".\"), but avoids allocations by") + g.P("// iterating over each field in place and calling a iterator function.") + g.P("func rangeFields(path string, f func(field string) bool) bool {") + g.P(" for {") + g.P(" var field string") + g.P(" if i := ", stringsPackage.Ident("IndexByte"), "(path, '.'); i >= 0 {") + g.P(" field, path = path[:i], path[i:]") + g.P(" } else {") + g.P(" field, path = path, \"\"") + g.P(" }") + g.P() + g.P(" if !f(field) {") + g.P(" return false") + g.P(" }") + g.P() + g.P(" if len(path) == 0 {") + g.P(" return true") + g.P(" }") + g.P(" path = ", stringsPackage.Ident("TrimPrefix"), "(path, \".\")") + g.P(" }") + g.P("}") + g.P() + + case genid.BoolValue_message_fullname, + genid.Int32Value_message_fullname, + genid.Int64Value_message_fullname, + genid.UInt32Value_message_fullname, + genid.UInt64Value_message_fullname, + genid.FloatValue_message_fullname, + genid.DoubleValue_message_fullname, + genid.StringValue_message_fullname, + genid.BytesValue_message_fullname: + funcName := strings.TrimSuffix(m.GoIdent.GoName, "Value") + typeName := strings.ToLower(funcName) + switch typeName { + case "float": + typeName = "float32" + case "double": + typeName = "float64" + case "bytes": + typeName = "[]byte" + } + + g.P("// ", funcName, " stores v in a new ", m.GoIdent, " and returns a pointer to it.") + g.P("func ", funcName, "(v ", typeName, ") *", m.GoIdent, " {") + g.P(" return &", m.GoIdent, "{Value: v}") + g.P("}") + g.P() + } +} diff --git a/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go b/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go index b3f7a6ee1b0..3892d0583eb 100644 --- a/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go +++ b/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go @@ -30,7 +30,7 @@ import ( "strings" "google.golang.org/protobuf/encoding/prototext" - "google.golang.org/protobuf/internal/fieldnum" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protodesc" @@ -625,9 +625,9 @@ type Enum struct { func newEnum(gen *Plugin, f *File, parent *Message, desc protoreflect.EnumDescriptor) *Enum { var loc Location if parent != nil { - loc = parent.Location.appendPath(fieldnum.DescriptorProto_EnumType, int32(desc.Index())) + loc = parent.Location.appendPath(int32(genid.DescriptorProto_EnumType_field_number), int32(desc.Index())) } else { - loc = f.location(fieldnum.FileDescriptorProto_EnumType, int32(desc.Index())) + loc = f.location(int32(genid.FileDescriptorProto_EnumType_field_number), int32(desc.Index())) } enum := &Enum{ Desc: desc, @@ -664,7 +664,7 @@ func newEnumValue(gen *Plugin, f *File, message *Message, enum *Enum, desc proto parentIdent = message.GoIdent } name := parentIdent.GoName + "_" + string(desc.Name()) - loc := enum.Location.appendPath(fieldnum.EnumDescriptorProto_Value, int32(desc.Index())) + loc := enum.Location.appendPath(int32(genid.EnumDescriptorProto_Value_field_number), int32(desc.Index())) return &EnumValue{ Desc: desc, GoIdent: f.GoImportPath.Ident(name), @@ -694,9 +694,9 @@ type Message struct { func newMessage(gen *Plugin, f *File, parent *Message, desc protoreflect.MessageDescriptor) *Message { var loc Location if parent != nil { - loc = parent.Location.appendPath(fieldnum.DescriptorProto_NestedType, int32(desc.Index())) + loc = parent.Location.appendPath(int32(genid.DescriptorProto_NestedType_field_number), int32(desc.Index())) } else { - loc = f.location(fieldnum.FileDescriptorProto_MessageType, int32(desc.Index())) + loc = f.location(int32(genid.FileDescriptorProto_MessageType_field_number), int32(desc.Index())) } message := &Message{ Desc: desc, @@ -852,11 +852,11 @@ func newField(gen *Plugin, f *File, message *Message, desc protoreflect.FieldDes var loc Location switch { case desc.IsExtension() && message == nil: - loc = f.location(fieldnum.FileDescriptorProto_Extension, int32(desc.Index())) + loc = f.location(int32(genid.FileDescriptorProto_Extension_field_number), int32(desc.Index())) case desc.IsExtension() && message != nil: - loc = message.Location.appendPath(fieldnum.DescriptorProto_Extension, int32(desc.Index())) + loc = message.Location.appendPath(int32(genid.DescriptorProto_Extension_field_number), int32(desc.Index())) default: - loc = message.Location.appendPath(fieldnum.DescriptorProto_Field, int32(desc.Index())) + loc = message.Location.appendPath(int32(genid.DescriptorProto_Field_field_number), int32(desc.Index())) } camelCased := strs.GoCamelCase(string(desc.Name())) var parentPrefix string @@ -927,7 +927,7 @@ type Oneof struct { } func newOneof(gen *Plugin, f *File, message *Message, desc protoreflect.OneofDescriptor) *Oneof { - loc := message.Location.appendPath(fieldnum.DescriptorProto_OneofDecl, int32(desc.Index())) + loc := message.Location.appendPath(int32(genid.DescriptorProto_OneofDecl_field_number), int32(desc.Index())) camelCased := strs.GoCamelCase(string(desc.Name())) parentPrefix := message.GoIdent.GoName + "_" return &Oneof{ @@ -959,7 +959,7 @@ type Service struct { } func newService(gen *Plugin, f *File, desc protoreflect.ServiceDescriptor) *Service { - loc := f.location(fieldnum.FileDescriptorProto_Service, int32(desc.Index())) + loc := f.location(int32(genid.FileDescriptorProto_Service_field_number), int32(desc.Index())) service := &Service{ Desc: desc, GoName: strs.GoCamelCase(string(desc.Name())), @@ -988,7 +988,7 @@ type Method struct { } func newMethod(gen *Plugin, f *File, service *Service, desc protoreflect.MethodDescriptor) *Method { - loc := service.Location.appendPath(fieldnum.ServiceDescriptorProto_Method, int32(desc.Index())) + loc := service.Location.appendPath(int32(genid.ServiceDescriptorProto_Method_field_number), int32(desc.Index())) method := &Method{ Desc: desc, GoName: strs.GoCamelCase(string(desc.Name())), diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go new file mode 100644 index 00000000000..9bf4e8c1763 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -0,0 +1,691 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "encoding/base64" + "fmt" + "math" + "strconv" + "strings" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/set" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Unmarshal reads the given []byte into the given proto.Message. +func Unmarshal(b []byte, m proto.Message) error { + return UnmarshalOptions{}.Unmarshal(b, m) +} + +// UnmarshalOptions is a configurable JSON format parser. +type UnmarshalOptions struct { + pragma.NoUnkeyedLiterals + + // If AllowPartial is set, input for messages that will result in missing + // required fields will not return an error. + AllowPartial bool + + // If DiscardUnknown is set, unknown fields are ignored. + DiscardUnknown bool + + // Resolver is used for looking up types when unmarshaling + // google.protobuf.Any messages or extension fields. + // If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.MessageTypeResolver + protoregistry.ExtensionTypeResolver + } +} + +// Unmarshal reads the given []byte and populates the given proto.Message using +// options in UnmarshalOptions object. It will clear the message first before +// setting the fields. If it returns an error, the given message may be +// partially set. +func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { + return o.unmarshal(b, m) +} + +// unmarshal is a centralized function that all unmarshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for unmarshal that do not go through this. +func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { + proto.Reset(m) + + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + dec := decoder{json.NewDecoder(b), o} + if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { + return err + } + + // Check for EOF. + tok, err := dec.Read() + if err != nil { + return err + } + if tok.Kind() != json.EOF { + return dec.unexpectedTokenError(tok) + } + + if o.AllowPartial { + return nil + } + return proto.CheckInitialized(m) +} + +type decoder struct { + *json.Decoder + opts UnmarshalOptions +} + +// newError returns an error object with position info. +func (d decoder) newError(pos int, f string, x ...interface{}) error { + line, column := d.Position(pos) + head := fmt.Sprintf("(line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unexpectedTokenError returns a syntax error for the given unexpected token. +func (d decoder) unexpectedTokenError(tok json.Token) error { + return d.syntaxError(tok.Pos(), "unexpected token %s", tok.RawString()) +} + +// syntaxError returns a syntax error for given position. +func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { + line, column := d.Position(pos) + head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unmarshalMessage unmarshals a message into the given protoreflect.Message. +func (d decoder) unmarshalMessage(m pref.Message, skipTypeURL bool) error { + if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { + return unmarshal(d, m) + } + + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + if err := d.unmarshalFields(m, skipTypeURL); err != nil { + return err + } + + return nil +} + +// unmarshalFields unmarshals the fields into the given protoreflect.Message. +func (d decoder) unmarshalFields(m pref.Message, skipTypeURL bool) error { + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") + } + + var seenNums set.Ints + var seenOneofs set.Ints + fieldDescs := messageDesc.Fields() + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + default: + return d.unexpectedTokenError(tok) + case json.ObjectClose: + return nil + case json.Name: + // Continue below. + } + + name := tok.Name() + // Unmarshaling a non-custom embedded message in Any will contain the + // JSON field "@type" which should be skipped because it is not a field + // of the embedded message, but simply an artifact of the Any format. + if skipTypeURL && name == "@type" { + d.Read() + continue + } + + // Get the FieldDescriptor. + var fd pref.FieldDescriptor + if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") { + // Only extension names are in [name] format. + extName := pref.FullName(name[1 : len(name)-1]) + extType, err := d.findExtension(extName) + if err != nil && err != protoregistry.NotFound { + return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err) + } + if extType != nil { + fd = extType.TypeDescriptor() + if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() { + return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName()) + } + } + } else { + // The name can either be the JSON name or the proto field name. + fd = fieldDescs.ByJSONName(name) + if fd == nil { + fd = fieldDescs.ByName(pref.Name(name)) + if fd == nil { + // The proto name of a group field is in all lowercase, + // while the textual field name is the group message name. + gd := fieldDescs.ByName(pref.Name(strings.ToLower(name))) + if gd != nil && gd.Kind() == pref.GroupKind && gd.Message().Name() == pref.Name(name) { + fd = gd + } + } else if fd.Kind() == pref.GroupKind && fd.Message().Name() != pref.Name(name) { + fd = nil // reset since field name is actually the message name + } + } + } + if flags.ProtoLegacy { + if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { + fd = nil // reset since the weak reference is not linked in + } + } + + if fd == nil { + // Field is unknown. + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + } + + // Do not allow duplicate fields. + num := uint64(fd.Number()) + if seenNums.Has(num) { + return d.newError(tok.Pos(), "duplicate field %v", tok.RawString()) + } + seenNums.Set(num) + + // No need to set values for JSON null unless the field type is + // google.protobuf.Value or google.protobuf.NullValue. + if tok, _ := d.Peek(); tok.Kind() == json.Null && !isKnownValue(fd) && !isNullValue(fd) { + d.Read() + continue + } + + switch { + case fd.IsList(): + list := m.Mutable(fd).List() + if err := d.unmarshalList(list, fd); err != nil { + return err + } + case fd.IsMap(): + mmap := m.Mutable(fd).Map() + if err := d.unmarshalMap(mmap, fd); err != nil { + return err + } + default: + // If field is a oneof, check if it has already been set. + if od := fd.ContainingOneof(); od != nil { + idx := uint64(od.Index()) + if seenOneofs.Has(idx) { + return d.newError(tok.Pos(), "error parsing %s, oneof %v is already set", tok.RawString(), od.FullName()) + } + seenOneofs.Set(idx) + } + + // Required or optional fields. + if err := d.unmarshalSingular(m, fd); err != nil { + return err + } + } + } +} + +// findExtension returns protoreflect.ExtensionType from the resolver if found. +func (d decoder) findExtension(xtName pref.FullName) (pref.ExtensionType, error) { + xt, err := d.opts.Resolver.FindExtensionByName(xtName) + if err == nil { + return xt, nil + } + return messageset.FindMessageSetExtension(d.opts.Resolver, xtName) +} + +func isKnownValue(fd pref.FieldDescriptor) bool { + md := fd.Message() + return md != nil && md.FullName() == genid.Value_message_fullname +} + +func isNullValue(fd pref.FieldDescriptor) bool { + ed := fd.Enum() + return ed != nil && ed.FullName() == genid.NullValue_enum_fullname +} + +// unmarshalSingular unmarshals to the non-repeated field specified +// by the given FieldDescriptor. +func (d decoder) unmarshalSingular(m pref.Message, fd pref.FieldDescriptor) error { + var val pref.Value + var err error + switch fd.Kind() { + case pref.MessageKind, pref.GroupKind: + val = m.NewField(fd) + err = d.unmarshalMessage(val.Message(), false) + default: + val, err = d.unmarshalScalar(fd) + } + + if err != nil { + return err + } + m.Set(fd, val) + return nil +} + +// unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by +// the given FieldDescriptor. +func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) { + const b32 int = 32 + const b64 int = 64 + + tok, err := d.Read() + if err != nil { + return pref.Value{}, err + } + + kind := fd.Kind() + switch kind { + case pref.BoolKind: + if tok.Kind() == json.Bool { + return pref.ValueOfBool(tok.Bool()), nil + } + + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + if v, ok := unmarshalInt(tok, b32); ok { + return v, nil + } + + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + if v, ok := unmarshalInt(tok, b64); ok { + return v, nil + } + + case pref.Uint32Kind, pref.Fixed32Kind: + if v, ok := unmarshalUint(tok, b32); ok { + return v, nil + } + + case pref.Uint64Kind, pref.Fixed64Kind: + if v, ok := unmarshalUint(tok, b64); ok { + return v, nil + } + + case pref.FloatKind: + if v, ok := unmarshalFloat(tok, b32); ok { + return v, nil + } + + case pref.DoubleKind: + if v, ok := unmarshalFloat(tok, b64); ok { + return v, nil + } + + case pref.StringKind: + if tok.Kind() == json.String { + return pref.ValueOfString(tok.ParsedString()), nil + } + + case pref.BytesKind: + if v, ok := unmarshalBytes(tok); ok { + return v, nil + } + + case pref.EnumKind: + if v, ok := unmarshalEnum(tok, fd); ok { + return v, nil + } + + default: + panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) + } + + return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) +} + +func unmarshalInt(tok json.Token, bitSize int) (pref.Value, bool) { + switch tok.Kind() { + case json.Number: + return getInt(tok, bitSize) + + case json.String: + // Decode number from string. + s := strings.TrimSpace(tok.ParsedString()) + if len(s) != len(tok.ParsedString()) { + return pref.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return pref.Value{}, false + } + return getInt(tok, bitSize) + } + return pref.Value{}, false +} + +func getInt(tok json.Token, bitSize int) (pref.Value, bool) { + n, ok := tok.Int(bitSize) + if !ok { + return pref.Value{}, false + } + if bitSize == 32 { + return pref.ValueOfInt32(int32(n)), true + } + return pref.ValueOfInt64(n), true +} + +func unmarshalUint(tok json.Token, bitSize int) (pref.Value, bool) { + switch tok.Kind() { + case json.Number: + return getUint(tok, bitSize) + + case json.String: + // Decode number from string. + s := strings.TrimSpace(tok.ParsedString()) + if len(s) != len(tok.ParsedString()) { + return pref.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return pref.Value{}, false + } + return getUint(tok, bitSize) + } + return pref.Value{}, false +} + +func getUint(tok json.Token, bitSize int) (pref.Value, bool) { + n, ok := tok.Uint(bitSize) + if !ok { + return pref.Value{}, false + } + if bitSize == 32 { + return pref.ValueOfUint32(uint32(n)), true + } + return pref.ValueOfUint64(n), true +} + +func unmarshalFloat(tok json.Token, bitSize int) (pref.Value, bool) { + switch tok.Kind() { + case json.Number: + return getFloat(tok, bitSize) + + case json.String: + s := tok.ParsedString() + switch s { + case "NaN": + if bitSize == 32 { + return pref.ValueOfFloat32(float32(math.NaN())), true + } + return pref.ValueOfFloat64(math.NaN()), true + case "Infinity": + if bitSize == 32 { + return pref.ValueOfFloat32(float32(math.Inf(+1))), true + } + return pref.ValueOfFloat64(math.Inf(+1)), true + case "-Infinity": + if bitSize == 32 { + return pref.ValueOfFloat32(float32(math.Inf(-1))), true + } + return pref.ValueOfFloat64(math.Inf(-1)), true + } + + // Decode number from string. + if len(s) != len(strings.TrimSpace(s)) { + return pref.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return pref.Value{}, false + } + return getFloat(tok, bitSize) + } + return pref.Value{}, false +} + +func getFloat(tok json.Token, bitSize int) (pref.Value, bool) { + n, ok := tok.Float(bitSize) + if !ok { + return pref.Value{}, false + } + if bitSize == 32 { + return pref.ValueOfFloat32(float32(n)), true + } + return pref.ValueOfFloat64(n), true +} + +func unmarshalBytes(tok json.Token) (pref.Value, bool) { + if tok.Kind() != json.String { + return pref.Value{}, false + } + + s := tok.ParsedString() + enc := base64.StdEncoding + if strings.ContainsAny(s, "-_") { + enc = base64.URLEncoding + } + if len(s)%4 != 0 { + enc = enc.WithPadding(base64.NoPadding) + } + b, err := enc.DecodeString(s) + if err != nil { + return pref.Value{}, false + } + return pref.ValueOfBytes(b), true +} + +func unmarshalEnum(tok json.Token, fd pref.FieldDescriptor) (pref.Value, bool) { + switch tok.Kind() { + case json.String: + // Lookup EnumNumber based on name. + s := tok.ParsedString() + if enumVal := fd.Enum().Values().ByName(pref.Name(s)); enumVal != nil { + return pref.ValueOfEnum(enumVal.Number()), true + } + + case json.Number: + if n, ok := tok.Int(32); ok { + return pref.ValueOfEnum(pref.EnumNumber(n)), true + } + + case json.Null: + // This is only valid for google.protobuf.NullValue. + if isNullValue(fd) { + return pref.ValueOfEnum(0), true + } + } + + return pref.Value{}, false +} + +func (d decoder) unmarshalList(list pref.List, fd pref.FieldDescriptor) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ArrayOpen { + return d.unexpectedTokenError(tok) + } + + switch fd.Kind() { + case pref.MessageKind, pref.GroupKind: + for { + tok, err := d.Peek() + if err != nil { + return err + } + + if tok.Kind() == json.ArrayClose { + d.Read() + return nil + } + + val := list.NewElement() + if err := d.unmarshalMessage(val.Message(), false); err != nil { + return err + } + list.Append(val) + } + default: + for { + tok, err := d.Peek() + if err != nil { + return err + } + + if tok.Kind() == json.ArrayClose { + d.Read() + return nil + } + + val, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + list.Append(val) + } + } + + return nil +} + +func (d decoder) unmarshalMap(mmap pref.Map, fd pref.FieldDescriptor) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + // Determine ahead whether map entry is a scalar type or a message type in + // order to call the appropriate unmarshalMapValue func inside the for loop + // below. + var unmarshalMapValue func() (pref.Value, error) + switch fd.MapValue().Kind() { + case pref.MessageKind, pref.GroupKind: + unmarshalMapValue = func() (pref.Value, error) { + val := mmap.NewValue() + if err := d.unmarshalMessage(val.Message(), false); err != nil { + return pref.Value{}, err + } + return val, nil + } + default: + unmarshalMapValue = func() (pref.Value, error) { + return d.unmarshalScalar(fd.MapValue()) + } + } + +Loop: + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + default: + return d.unexpectedTokenError(tok) + case json.ObjectClose: + break Loop + case json.Name: + // Continue. + } + + // Unmarshal field name. + pkey, err := d.unmarshalMapKey(tok, fd.MapKey()) + if err != nil { + return err + } + + // Check for duplicate field name. + if mmap.Has(pkey) { + return d.newError(tok.Pos(), "duplicate map key %v", tok.RawString()) + } + + // Read and unmarshal field value. + pval, err := unmarshalMapValue() + if err != nil { + return err + } + + mmap.Set(pkey, pval) + } + + return nil +} + +// unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey. +// A map key type is any integral or string type. +func (d decoder) unmarshalMapKey(tok json.Token, fd pref.FieldDescriptor) (pref.MapKey, error) { + const b32 = 32 + const b64 = 64 + const base10 = 10 + + name := tok.Name() + kind := fd.Kind() + switch kind { + case pref.StringKind: + return pref.ValueOfString(name).MapKey(), nil + + case pref.BoolKind: + switch name { + case "true": + return pref.ValueOfBool(true).MapKey(), nil + case "false": + return pref.ValueOfBool(false).MapKey(), nil + } + + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + if n, err := strconv.ParseInt(name, base10, b32); err == nil { + return pref.ValueOfInt32(int32(n)).MapKey(), nil + } + + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + if n, err := strconv.ParseInt(name, base10, b64); err == nil { + return pref.ValueOfInt64(int64(n)).MapKey(), nil + } + + case pref.Uint32Kind, pref.Fixed32Kind: + if n, err := strconv.ParseUint(name, base10, b32); err == nil { + return pref.ValueOfUint32(uint32(n)).MapKey(), nil + } + + case pref.Uint64Kind, pref.Fixed64Kind: + if n, err := strconv.ParseUint(name, base10, b64); err == nil { + return pref.ValueOfUint64(uint64(n)).MapKey(), nil + } + + default: + panic(fmt.Sprintf("invalid kind for map key: %v", kind)) + } + + return pref.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString()) +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go new file mode 100644 index 00000000000..00ea2fecfb7 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protojson marshals and unmarshals protocol buffer messages as JSON +// format. It follows the guide at +// https://developers.google.com/protocol-buffers/docs/proto3#json. +// +// This package produces a different output than the standard "encoding/json" +// package, which does not operate correctly on protocol buffer messages. +package protojson diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go new file mode 100644 index 00000000000..7d619330081 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -0,0 +1,402 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "encoding/base64" + "fmt" + "sort" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const defaultIndent = " " + +// Format formats the message as a multiline string. +// This function is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func Format(m proto.Message) string { + return MarshalOptions{Multiline: true}.Format(m) +} + +// Marshal writes the given proto.Message in JSON format using default options. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func Marshal(m proto.Message) ([]byte, error) { + return MarshalOptions{}.Marshal(m) +} + +// MarshalOptions is a configurable JSON format marshaler. +type MarshalOptions struct { + pragma.NoUnkeyedLiterals + + // Multiline specifies whether the marshaler should format the output in + // indented-form with every textual element on a new line. + // If Indent is an empty string, then an arbitrary indent is chosen. + Multiline bool + + // Indent specifies the set of indentation characters to use in a multiline + // formatted output such that every entry is preceded by Indent and + // terminated by a newline. If non-empty, then Multiline is treated as true. + // Indent can only be composed of space or tab characters. + Indent string + + // AllowPartial allows messages that have missing required fields to marshal + // without returning an error. If AllowPartial is false (the default), + // Marshal will return error if there are any missing required fields. + AllowPartial bool + + // UseProtoNames uses proto field name instead of lowerCamelCase name in JSON + // field names. + UseProtoNames bool + + // UseEnumNumbers emits enum values as numbers. + UseEnumNumbers bool + + // EmitUnpopulated specifies whether to emit unpopulated fields. It does not + // emit unpopulated oneof fields or unpopulated extension fields. + // The JSON value emitted for unpopulated fields are as follows: + // ╔═══════╤════════════════════════════╗ + // ║ JSON │ Protobuf field ║ + // ╠═══════╪════════════════════════════╣ + // ║ false │ proto3 boolean fields ║ + // ║ 0 │ proto3 numeric fields ║ + // ║ "" │ proto3 string/bytes fields ║ + // ║ null │ proto2 scalar fields ║ + // ║ null │ message fields ║ + // ║ [] │ list fields ║ + // ║ {} │ map fields ║ + // ╚═══════╧════════════════════════════╝ + EmitUnpopulated bool + + // Resolver is used for looking up types when expanding google.protobuf.Any + // messages. If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.ExtensionTypeResolver + protoregistry.MessageTypeResolver + } +} + +// Format formats the message as a string. +// This method is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func (o MarshalOptions) Format(m proto.Message) string { + if m == nil || !m.ProtoReflect().IsValid() { + return "" // invalid syntax, but okay since this is for debugging + } + o.AllowPartial = true + b, _ := o.Marshal(m) + return string(b) +} + +// Marshal marshals the given proto.Message in the JSON format using options in +// MarshalOptions. Do not depend on the output being stable. It may change over +// time across different versions of the program. +func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { + return o.marshal(m) +} + +// marshal is a centralized function that all marshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for marshal that do not go through this. +func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { + if o.Multiline && o.Indent == "" { + o.Indent = defaultIndent + } + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + internalEnc, err := json.NewEncoder(o.Indent) + if err != nil { + return nil, err + } + + // Treat nil message interface as an empty message, + // in which case the output in an empty JSON object. + if m == nil { + return []byte("{}"), nil + } + + enc := encoder{internalEnc, o} + if err := enc.marshalMessage(m.ProtoReflect()); err != nil { + return nil, err + } + if o.AllowPartial { + return enc.Bytes(), nil + } + return enc.Bytes(), proto.CheckInitialized(m) +} + +type encoder struct { + *json.Encoder + opts MarshalOptions +} + +// marshalMessage marshals the given protoreflect.Message. +func (e encoder) marshalMessage(m pref.Message) error { + if marshal := wellKnownTypeMarshaler(m.Descriptor().FullName()); marshal != nil { + return marshal(e, m) + } + + e.StartObject() + defer e.EndObject() + if err := e.marshalFields(m); err != nil { + return err + } + + return nil +} + +// marshalFields marshals the fields in the given protoreflect.Message. +func (e encoder) marshalFields(m pref.Message) error { + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") + } + + // Marshal out known fields. + fieldDescs := messageDesc.Fields() + for i := 0; i < fieldDescs.Len(); { + fd := fieldDescs.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + if fd == nil { + continue // unpopulated oneofs are not affected by EmitUnpopulated + } + } else { + i++ + } + + val := m.Get(fd) + if !m.Has(fd) { + if !e.opts.EmitUnpopulated { + continue + } + isProto2Scalar := fd.Syntax() == pref.Proto2 && fd.Default().IsValid() + isSingularMessage := fd.Cardinality() != pref.Repeated && fd.Message() != nil + if isProto2Scalar || isSingularMessage { + // Use invalid value to emit null. + val = pref.Value{} + } + } + + name := fd.JSONName() + if e.opts.UseProtoNames { + name = string(fd.Name()) + // Use type name for group field name. + if fd.Kind() == pref.GroupKind { + name = string(fd.Message().Name()) + } + } + if err := e.WriteName(name); err != nil { + return err + } + if err := e.marshalValue(val, fd); err != nil { + return err + } + } + + // Marshal out extensions. + if err := e.marshalExtensions(m); err != nil { + return err + } + return nil +} + +// marshalValue marshals the given protoreflect.Value. +func (e encoder) marshalValue(val pref.Value, fd pref.FieldDescriptor) error { + switch { + case fd.IsList(): + return e.marshalList(val.List(), fd) + case fd.IsMap(): + return e.marshalMap(val.Map(), fd) + default: + return e.marshalSingular(val, fd) + } +} + +// marshalSingular marshals the given non-repeated field value. This includes +// all scalar types, enums, messages, and groups. +func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error { + if !val.IsValid() { + e.WriteNull() + return nil + } + + switch kind := fd.Kind(); kind { + case pref.BoolKind: + e.WriteBool(val.Bool()) + + case pref.StringKind: + if e.WriteString(val.String()) != nil { + return errors.InvalidUTF8(string(fd.FullName())) + } + + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + e.WriteInt(val.Int()) + + case pref.Uint32Kind, pref.Fixed32Kind: + e.WriteUint(val.Uint()) + + case pref.Int64Kind, pref.Sint64Kind, pref.Uint64Kind, + pref.Sfixed64Kind, pref.Fixed64Kind: + // 64-bit integers are written out as JSON string. + e.WriteString(val.String()) + + case pref.FloatKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 32) + + case pref.DoubleKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 64) + + case pref.BytesKind: + e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes())) + + case pref.EnumKind: + if fd.Enum().FullName() == genid.NullValue_enum_fullname { + e.WriteNull() + } else { + desc := fd.Enum().Values().ByNumber(val.Enum()) + if e.opts.UseEnumNumbers || desc == nil { + e.WriteInt(int64(val.Enum())) + } else { + e.WriteString(string(desc.Name())) + } + } + + case pref.MessageKind, pref.GroupKind: + if err := e.marshalMessage(val.Message()); err != nil { + return err + } + + default: + panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind)) + } + return nil +} + +// marshalList marshals the given protoreflect.List. +func (e encoder) marshalList(list pref.List, fd pref.FieldDescriptor) error { + e.StartArray() + defer e.EndArray() + + for i := 0; i < list.Len(); i++ { + item := list.Get(i) + if err := e.marshalSingular(item, fd); err != nil { + return err + } + } + return nil +} + +type mapEntry struct { + key pref.MapKey + value pref.Value +} + +// marshalMap marshals given protoreflect.Map. +func (e encoder) marshalMap(mmap pref.Map, fd pref.FieldDescriptor) error { + e.StartObject() + defer e.EndObject() + + // Get a sorted list based on keyType first. + entries := make([]mapEntry, 0, mmap.Len()) + mmap.Range(func(key pref.MapKey, val pref.Value) bool { + entries = append(entries, mapEntry{key: key, value: val}) + return true + }) + sortMap(fd.MapKey().Kind(), entries) + + // Write out sorted list. + for _, entry := range entries { + if err := e.WriteName(entry.key.String()); err != nil { + return err + } + if err := e.marshalSingular(entry.value, fd.MapValue()); err != nil { + return err + } + } + return nil +} + +// sortMap orders list based on value of key field for deterministic ordering. +func sortMap(keyKind pref.Kind, values []mapEntry) { + sort.Slice(values, func(i, j int) bool { + switch keyKind { + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind, + pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + return values[i].key.Int() < values[j].key.Int() + + case pref.Uint32Kind, pref.Fixed32Kind, + pref.Uint64Kind, pref.Fixed64Kind: + return values[i].key.Uint() < values[j].key.Uint() + } + return values[i].key.String() < values[j].key.String() + }) +} + +// marshalExtensions marshals extension fields. +func (e encoder) marshalExtensions(m pref.Message) error { + type entry struct { + key string + value pref.Value + desc pref.FieldDescriptor + } + + // Get a sorted list based on field key first. + var entries []entry + m.Range(func(fd pref.FieldDescriptor, v pref.Value) bool { + if !fd.IsExtension() { + return true + } + + // For MessageSet extensions, the name used is the parent message. + name := fd.FullName() + if messageset.IsMessageSetExtension(fd) { + name = name.Parent() + } + + // Use [name] format for JSON field name. + entries = append(entries, entry{ + key: string(name), + value: v, + desc: fd, + }) + return true + }) + + // Sort extensions lexicographically. + sort.Slice(entries, func(i, j int) bool { + return entries[i].key < entries[j].key + }) + + // Write out sorted list. + for _, entry := range entries { + // JSON field name is the proto field name enclosed in [], similar to + // textproto. This is consistent with Go v1 lib. C++ lib v3.7.0 does not + // marshal out extension fields. + if err := e.WriteName("[" + entry.key + "]"); err != nil { + return err + } + if err := e.marshalValue(entry.value, entry.desc); err != nil { + return err + } + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go new file mode 100644 index 00000000000..def7377c78b --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -0,0 +1,885 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "time" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type marshalFunc func(encoder, pref.Message) error + +// wellKnownTypeMarshaler returns a marshal function if the message type +// has specialized serialization behavior. It returns nil otherwise. +func wellKnownTypeMarshaler(name pref.FullName) marshalFunc { + if name.Parent() == genid.GoogleProtobuf_package { + switch name.Name() { + case genid.Any_message_name: + return encoder.marshalAny + case genid.Timestamp_message_name: + return encoder.marshalTimestamp + case genid.Duration_message_name: + return encoder.marshalDuration + case genid.BoolValue_message_name, + genid.Int32Value_message_name, + genid.Int64Value_message_name, + genid.UInt32Value_message_name, + genid.UInt64Value_message_name, + genid.FloatValue_message_name, + genid.DoubleValue_message_name, + genid.StringValue_message_name, + genid.BytesValue_message_name: + return encoder.marshalWrapperType + case genid.Struct_message_name: + return encoder.marshalStruct + case genid.ListValue_message_name: + return encoder.marshalListValue + case genid.Value_message_name: + return encoder.marshalKnownValue + case genid.FieldMask_message_name: + return encoder.marshalFieldMask + case genid.Empty_message_name: + return encoder.marshalEmpty + } + } + return nil +} + +type unmarshalFunc func(decoder, pref.Message) error + +// wellKnownTypeUnmarshaler returns a unmarshal function if the message type +// has specialized serialization behavior. It returns nil otherwise. +func wellKnownTypeUnmarshaler(name pref.FullName) unmarshalFunc { + if name.Parent() == genid.GoogleProtobuf_package { + switch name.Name() { + case genid.Any_message_name: + return decoder.unmarshalAny + case genid.Timestamp_message_name: + return decoder.unmarshalTimestamp + case genid.Duration_message_name: + return decoder.unmarshalDuration + case genid.BoolValue_message_name, + genid.Int32Value_message_name, + genid.Int64Value_message_name, + genid.UInt32Value_message_name, + genid.UInt64Value_message_name, + genid.FloatValue_message_name, + genid.DoubleValue_message_name, + genid.StringValue_message_name, + genid.BytesValue_message_name: + return decoder.unmarshalWrapperType + case genid.Struct_message_name: + return decoder.unmarshalStruct + case genid.ListValue_message_name: + return decoder.unmarshalListValue + case genid.Value_message_name: + return decoder.unmarshalKnownValue + case genid.FieldMask_message_name: + return decoder.unmarshalFieldMask + case genid.Empty_message_name: + return decoder.unmarshalEmpty + } + } + return nil +} + +// The JSON representation of an Any message uses the regular representation of +// the deserialized, embedded message, with an additional field `@type` which +// contains the type URL. If the embedded message type is well-known and has a +// custom JSON representation, that representation will be embedded adding a +// field `value` which holds the custom JSON in addition to the `@type` field. + +func (e encoder) marshalAny(m pref.Message) error { + fds := m.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + fdValue := fds.ByNumber(genid.Any_Value_field_number) + + // Start writing the JSON object. + e.StartObject() + defer e.EndObject() + + if !m.Has(fdType) { + if !m.Has(fdValue) { + // If message is empty, marshal out empty JSON object. + return nil + } else { + // Return error if type_url field is not set, but value is set. + return errors.New("%s: %v is not set", genid.Any_message_fullname, genid.Any_TypeUrl_field_name) + } + } + + typeVal := m.Get(fdType) + valueVal := m.Get(fdValue) + + // Marshal out @type field. + typeURL := typeVal.String() + e.WriteName("@type") + if err := e.WriteString(typeURL); err != nil { + return err + } + + // Resolve the type in order to unmarshal value field. + emt, err := e.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return errors.New("%s: unable to resolve %q: %v", genid.Any_message_fullname, typeURL, err) + } + + em := emt.New() + err = proto.UnmarshalOptions{ + AllowPartial: true, // never check required fields inside an Any + Resolver: e.opts.Resolver, + }.Unmarshal(valueVal.Bytes(), em.Interface()) + if err != nil { + return errors.New("%s: unable to unmarshal %q: %v", genid.Any_message_fullname, typeURL, err) + } + + // If type of value has custom JSON encoding, marshal out a field "value" + // with corresponding custom JSON encoding of the embedded message as a + // field. + if marshal := wellKnownTypeMarshaler(emt.Descriptor().FullName()); marshal != nil { + e.WriteName("value") + return marshal(e, em) + } + + // Else, marshal out the embedded message's fields in this Any object. + if err := e.marshalFields(em); err != nil { + return err + } + + return nil +} + +func (d decoder) unmarshalAny(m pref.Message) error { + // Peek to check for json.ObjectOpen to avoid advancing a read. + start, err := d.Peek() + if err != nil { + return err + } + if start.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(start) + } + + // Use another decoder to parse the unread bytes for @type field. This + // avoids advancing a read from current decoder because the current JSON + // object may contain the fields of the embedded type. + dec := decoder{d.Clone(), UnmarshalOptions{}} + tok, err := findTypeURL(dec) + switch err { + case errEmptyObject: + // An empty JSON object translates to an empty Any message. + d.Read() // Read json.ObjectOpen. + d.Read() // Read json.ObjectClose. + return nil + + case errMissingType: + if d.opts.DiscardUnknown { + // Treat all fields as unknowns, similar to an empty object. + return d.skipJSONValue() + } + // Use start.Pos() for line position. + return d.newError(start.Pos(), err.Error()) + + default: + if err != nil { + return err + } + } + + typeURL := tok.ParsedString() + emt, err := d.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return d.newError(tok.Pos(), "unable to resolve %v: %q", tok.RawString(), err) + } + + // Create new message for the embedded message type and unmarshal into it. + em := emt.New() + if unmarshal := wellKnownTypeUnmarshaler(emt.Descriptor().FullName()); unmarshal != nil { + // If embedded message is a custom type, + // unmarshal the JSON "value" field into it. + if err := d.unmarshalAnyValue(unmarshal, em); err != nil { + return err + } + } else { + // Else unmarshal the current JSON object into it. + if err := d.unmarshalMessage(em, true); err != nil { + return err + } + } + // Serialize the embedded message and assign the resulting bytes to the + // proto value field. + b, err := proto.MarshalOptions{ + AllowPartial: true, // No need to check required fields inside an Any. + Deterministic: true, + }.Marshal(em.Interface()) + if err != nil { + return d.newError(start.Pos(), "error in marshaling Any.value field: %v", err) + } + + fds := m.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + fdValue := fds.ByNumber(genid.Any_Value_field_number) + + m.Set(fdType, pref.ValueOfString(typeURL)) + m.Set(fdValue, pref.ValueOfBytes(b)) + return nil +} + +var errEmptyObject = fmt.Errorf(`empty object`) +var errMissingType = fmt.Errorf(`missing "@type" field`) + +// findTypeURL returns the token for the "@type" field value from the given +// JSON bytes. It is expected that the given bytes start with json.ObjectOpen. +// It returns errEmptyObject if the JSON object is empty or errMissingType if +// @type field does not exist. It returns other error if the @type field is not +// valid or other decoding issues. +func findTypeURL(d decoder) (json.Token, error) { + var typeURL string + var typeTok json.Token + numFields := 0 + // Skip start object. + d.Read() + +Loop: + for { + tok, err := d.Read() + if err != nil { + return json.Token{}, err + } + + switch tok.Kind() { + case json.ObjectClose: + if typeURL == "" { + // Did not find @type field. + if numFields > 0 { + return json.Token{}, errMissingType + } + return json.Token{}, errEmptyObject + } + break Loop + + case json.Name: + numFields++ + if tok.Name() != "@type" { + // Skip value. + if err := d.skipJSONValue(); err != nil { + return json.Token{}, err + } + continue + } + + // Return error if this was previously set already. + if typeURL != "" { + return json.Token{}, d.newError(tok.Pos(), `duplicate "@type" field`) + } + // Read field value. + tok, err := d.Read() + if err != nil { + return json.Token{}, err + } + if tok.Kind() != json.String { + return json.Token{}, d.newError(tok.Pos(), `@type field value is not a string: %v`, tok.RawString()) + } + typeURL = tok.ParsedString() + if typeURL == "" { + return json.Token{}, d.newError(tok.Pos(), `@type field contains empty value`) + } + typeTok = tok + } + } + + return typeTok, nil +} + +// skipJSONValue parses a JSON value (null, boolean, string, number, object and +// array) in order to advance the read to the next JSON value. It relies on +// the decoder returning an error if the types are not in valid sequence. +func (d decoder) skipJSONValue() error { + tok, err := d.Read() + if err != nil { + return err + } + // Only need to continue reading for objects and arrays. + switch tok.Kind() { + case json.ObjectOpen: + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + return nil + case json.Name: + // Skip object field value. + if err := d.skipJSONValue(); err != nil { + return err + } + } + } + + case json.ArrayOpen: + for { + tok, err := d.Peek() + if err != nil { + return err + } + switch tok.Kind() { + case json.ArrayClose: + d.Read() + return nil + default: + // Skip array item. + if err := d.skipJSONValue(); err != nil { + return err + } + } + } + } + return nil +} + +// unmarshalAnyValue unmarshals the given custom-type message from the JSON +// object's "value" field. +func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m pref.Message) error { + // Skip ObjectOpen, and start reading the fields. + d.Read() + + var found bool // Used for detecting duplicate "value". + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + if !found { + return d.newError(tok.Pos(), `missing "value" field`) + } + return nil + + case json.Name: + switch tok.Name() { + case "@type": + // Skip the value as this was previously parsed already. + d.Read() + + case "value": + if found { + return d.newError(tok.Pos(), `duplicate "value" field`) + } + // Unmarshal the field value into the given message. + if err := unmarshal(d, m); err != nil { + return err + } + found = true + + default: + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + } + } + } +} + +// Wrapper types are encoded as JSON primitives like string, number or boolean. + +func (e encoder) marshalWrapperType(m pref.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) + val := m.Get(fd) + return e.marshalSingular(val, fd) +} + +func (d decoder) unmarshalWrapperType(m pref.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) + val, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + m.Set(fd, val) + return nil +} + +// The JSON representation for Empty is an empty JSON object. + +func (e encoder) marshalEmpty(pref.Message) error { + e.StartObject() + e.EndObject() + return nil +} + +func (d decoder) unmarshalEmpty(pref.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + return nil + + case json.Name: + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + + default: + return d.unexpectedTokenError(tok) + } + } +} + +// The JSON representation for Struct is a JSON object that contains the encoded +// Struct.fields map and follows the serialization rules for a map. + +func (e encoder) marshalStruct(m pref.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) + return e.marshalMap(m.Get(fd).Map(), fd) +} + +func (d decoder) unmarshalStruct(m pref.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) + return d.unmarshalMap(m.Mutable(fd).Map(), fd) +} + +// The JSON representation for ListValue is JSON array that contains the encoded +// ListValue.values repeated field and follows the serialization rules for a +// repeated field. + +func (e encoder) marshalListValue(m pref.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) + return e.marshalList(m.Get(fd).List(), fd) +} + +func (d decoder) unmarshalListValue(m pref.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) + return d.unmarshalList(m.Mutable(fd).List(), fd) +} + +// The JSON representation for a Value is dependent on the oneof field that is +// set. Each of the field in the oneof has its own custom serialization rule. A +// Value message needs to be a oneof field set, else it is an error. + +func (e encoder) marshalKnownValue(m pref.Message) error { + od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name) + fd := m.WhichOneof(od) + if fd == nil { + return errors.New("%s: none of the oneof fields is set", genid.Value_message_fullname) + } + return e.marshalSingular(m.Get(fd), fd) +} + +func (d decoder) unmarshalKnownValue(m pref.Message) error { + tok, err := d.Peek() + if err != nil { + return err + } + + var fd pref.FieldDescriptor + var val pref.Value + switch tok.Kind() { + case json.Null: + d.Read() + fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number) + val = pref.ValueOfEnum(0) + + case json.Bool: + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number) + val = pref.ValueOfBool(tok.Bool()) + + case json.Number: + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_NumberValue_field_number) + var ok bool + val, ok = unmarshalFloat(tok, 64) + if !ok { + return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) + } + + case json.String: + // A JSON string may have been encoded from the number_value field, + // e.g. "NaN", "Infinity", etc. Parsing a proto double type also allows + // for it to be in JSON string form. Given this custom encoding spec, + // however, there is no way to identify that and hence a JSON string is + // always assigned to the string_value field, which means that certain + // encoding cannot be parsed back to the same field. + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number) + val = pref.ValueOfString(tok.ParsedString()) + + case json.ObjectOpen: + fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number) + val = m.NewField(fd) + if err := d.unmarshalStruct(val.Message()); err != nil { + return err + } + + case json.ArrayOpen: + fd = m.Descriptor().Fields().ByNumber(genid.Value_ListValue_field_number) + val = m.NewField(fd) + if err := d.unmarshalListValue(val.Message()); err != nil { + return err + } + + default: + return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) + } + + m.Set(fd, val) + return nil +} + +// The JSON representation for a Duration is a JSON string that ends in the +// suffix "s" (indicating seconds) and is preceded by the number of seconds, +// with nanoseconds expressed as fractional seconds. +// +// Durations less than one second are represented with a 0 seconds field and a +// positive or negative nanos field. For durations of one second or more, a +// non-zero value for the nanos field must be of the same sign as the seconds +// field. +// +// Duration.seconds must be from -315,576,000,000 to +315,576,000,000 inclusive. +// Duration.nanos must be from -999,999,999 to +999,999,999 inclusive. + +const ( + secondsInNanos = 999999999 + maxSecondsInDuration = 315576000000 +) + +func (e encoder) marshalDuration(m pref.Message) error { + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) + + secsVal := m.Get(fdSeconds) + nanosVal := m.Get(fdNanos) + secs := secsVal.Int() + nanos := nanosVal.Int() + if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { + return errors.New("%s: seconds out of range %v", genid.Duration_message_fullname, secs) + } + if nanos < -secondsInNanos || nanos > secondsInNanos { + return errors.New("%s: nanos out of range %v", genid.Duration_message_fullname, nanos) + } + if (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0) { + return errors.New("%s: signs of seconds and nanos do not match", genid.Duration_message_fullname) + } + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision, followed by the suffix "s". + f := "%d.%09d" + if nanos < 0 { + nanos = -nanos + if secs == 0 { + f = "-%d.%09d" + } + } + x := fmt.Sprintf(f, secs, nanos) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + e.WriteString(x + "s") + return nil +} + +func (d decoder) unmarshalDuration(m pref.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + + secs, nanos, ok := parseDuration(tok.ParsedString()) + if !ok { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Duration_message_fullname, tok.RawString()) + } + // Validate seconds. No need to validate nanos because parseDuration would + // have covered that already. + if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { + return d.newError(tok.Pos(), "%v value out of range: %v", genid.Duration_message_fullname, tok.RawString()) + } + + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) + + m.Set(fdSeconds, pref.ValueOfInt64(secs)) + m.Set(fdNanos, pref.ValueOfInt32(nanos)) + return nil +} + +// parseDuration parses the given input string for seconds and nanoseconds value +// for the Duration JSON format. The format is a decimal number with a suffix +// 's'. It can have optional plus/minus sign. There needs to be at least an +// integer or fractional part. Fractional part is limited to 9 digits only for +// nanoseconds precision, regardless of whether there are trailing zero digits. +// Example values are 1s, 0.1s, 1.s, .1s, +1s, -1s, -.1s. +func parseDuration(input string) (int64, int32, bool) { + b := []byte(input) + size := len(b) + if size < 2 { + return 0, 0, false + } + if b[size-1] != 's' { + return 0, 0, false + } + b = b[:size-1] + + // Read optional plus/minus symbol. + var neg bool + switch b[0] { + case '-': + neg = true + b = b[1:] + case '+': + b = b[1:] + } + if len(b) == 0 { + return 0, 0, false + } + + // Read the integer part. + var intp []byte + switch { + case b[0] == '0': + b = b[1:] + + case '1' <= b[0] && b[0] <= '9': + intp = b[0:] + b = b[1:] + n := 1 + for len(b) > 0 && '0' <= b[0] && b[0] <= '9' { + n++ + b = b[1:] + } + intp = intp[:n] + + case b[0] == '.': + // Continue below. + + default: + return 0, 0, false + } + + hasFrac := false + var frac [9]byte + if len(b) > 0 { + if b[0] != '.' { + return 0, 0, false + } + // Read the fractional part. + b = b[1:] + n := 0 + for len(b) > 0 && n < 9 && '0' <= b[0] && b[0] <= '9' { + frac[n] = b[0] + n++ + b = b[1:] + } + // It is not valid if there are more bytes left. + if len(b) > 0 { + return 0, 0, false + } + // Pad fractional part with 0s. + for i := n; i < 9; i++ { + frac[i] = '0' + } + hasFrac = true + } + + var secs int64 + if len(intp) > 0 { + var err error + secs, err = strconv.ParseInt(string(intp), 10, 64) + if err != nil { + return 0, 0, false + } + } + + var nanos int64 + if hasFrac { + nanob := bytes.TrimLeft(frac[:], "0") + if len(nanob) > 0 { + var err error + nanos, err = strconv.ParseInt(string(nanob), 10, 32) + if err != nil { + return 0, 0, false + } + } + } + + if neg { + if secs > 0 { + secs = -secs + } + if nanos > 0 { + nanos = -nanos + } + } + return secs, int32(nanos), true +} + +// The JSON representation for a Timestamp is a JSON string in the RFC 3339 +// format, i.e. "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" where +// {year} is always expressed using four digits while {month}, {day}, {hour}, +// {min}, and {sec} are zero-padded to two digits each. The fractional seconds, +// which can go up to 9 digits, up to 1 nanosecond resolution, is optional. The +// "Z" suffix indicates the timezone ("UTC"); the timezone is required. Encoding +// should always use UTC (as indicated by "Z") and a decoder should be able to +// accept both UTC and other timezones (as indicated by an offset). +// +// Timestamp.seconds must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z +// inclusive. +// Timestamp.nanos must be from 0 to 999,999,999 inclusive. + +const ( + maxTimestampSeconds = 253402300799 + minTimestampSeconds = -62135596800 +) + +func (e encoder) marshalTimestamp(m pref.Message) error { + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) + + secsVal := m.Get(fdSeconds) + nanosVal := m.Get(fdNanos) + secs := secsVal.Int() + nanos := nanosVal.Int() + if secs < minTimestampSeconds || secs > maxTimestampSeconds { + return errors.New("%s: seconds out of range %v", genid.Timestamp_message_fullname, secs) + } + if nanos < 0 || nanos > secondsInNanos { + return errors.New("%s: nanos out of range %v", genid.Timestamp_message_fullname, nanos) + } + // Uses RFC 3339, where generated output will be Z-normalized and uses 0, 3, + // 6 or 9 fractional digits. + t := time.Unix(secs, nanos).UTC() + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + e.WriteString(x + "Z") + return nil +} + +func (d decoder) unmarshalTimestamp(m pref.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + + t, err := time.Parse(time.RFC3339Nano, tok.ParsedString()) + if err != nil { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } + // Validate seconds. No need to validate nanos because time.Parse would have + // covered that already. + secs := t.Unix() + if secs < minTimestampSeconds || secs > maxTimestampSeconds { + return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString()) + } + + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) + + m.Set(fdSeconds, pref.ValueOfInt64(secs)) + m.Set(fdNanos, pref.ValueOfInt32(int32(t.Nanosecond()))) + return nil +} + +// The JSON representation for a FieldMask is a JSON string where paths are +// separated by a comma. Fields name in each path are converted to/from +// lower-camel naming conventions. Encoding should fail if the path name would +// end up differently after a round-trip. + +func (e encoder) marshalFieldMask(m pref.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) + list := m.Get(fd).List() + paths := make([]string, 0, list.Len()) + + for i := 0; i < list.Len(); i++ { + s := list.Get(i).String() + if !pref.FullName(s).IsValid() { + return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s) + } + // Return error if conversion to camelCase is not reversible. + cc := strs.JSONCamelCase(s) + if s != strs.JSONSnakeCase(cc) { + return errors.New("%s contains irreversible value %q", genid.FieldMask_Paths_field_fullname, s) + } + paths = append(paths, cc) + } + + e.WriteString(strings.Join(paths, ",")) + return nil +} + +func (d decoder) unmarshalFieldMask(m pref.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + str := strings.TrimSpace(tok.ParsedString()) + if str == "" { + return nil + } + paths := strings.Split(str, ",") + + fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) + list := m.Mutable(fd).List() + + for _, s0 := range paths { + s := strs.JSONSnakeCase(s0) + if strings.Contains(s0, "_") || !pref.FullName(s).IsValid() { + return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0) + } + list.Append(pref.ValueOfString(s)) + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index c2f8f28f2cb..cab95a42735 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -12,8 +12,8 @@ import ( "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/encoding/text" "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/fieldnum" "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/set" "google.golang.org/protobuf/internal/strs" @@ -108,7 +108,7 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { return errors.New("no support for proto1 MessageSets") } - if messageDesc.FullName() == "google.protobuf.Any" { + if messageDesc.FullName() == genid.Any_message_fullname { return d.unmarshalAny(m, checkDelims) } @@ -538,14 +538,13 @@ Loop: return d.unexpectedTokenError(tok) } - name := tok.IdentName() - switch name { - case "key": + switch name := pref.Name(tok.IdentName()); name { + case genid.MapEntry_Key_field_name: if !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") } if key.IsValid() { - return d.newError(tok.Pos(), `map entry "key" cannot be repeated`) + return d.newError(tok.Pos(), "map entry %q cannot be repeated", name) } val, err := d.unmarshalScalar(fd.MapKey()) if err != nil { @@ -553,14 +552,14 @@ Loop: } key = val.MapKey() - case "value": + case genid.MapEntry_Value_field_name: if kind := fd.MapValue().Kind(); (kind != pref.MessageKind) && (kind != pref.GroupKind) { if !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") } } if pval.IsValid() { - return d.newError(tok.Pos(), `map entry "value" cannot be repeated`) + return d.newError(tok.Pos(), "map entry %q cannot be repeated", name) } pval, err = unmarshalMapValue() if err != nil { @@ -597,13 +596,9 @@ Loop: func (d decoder) unmarshalAny(m pref.Message, checkDelims bool) error { var typeURL string var bValue []byte - - // hasFields tracks which valid fields have been seen in the loop below in - // order to flag an error if there are duplicates or conflicts. It may - // contain the strings "type_url", "value" and "expanded". The literal - // "expanded" is used to indicate that the expanded form has been - // encountered already. - hasFields := map[string]bool{} + var seenTypeUrl bool + var seenValue bool + var isExpanded bool if checkDelims { tok, err := d.Read() @@ -642,12 +637,12 @@ Loop: return d.syntaxError(tok.Pos(), "missing field separator :") } - switch tok.IdentName() { - case "type_url": - if hasFields["type_url"] { - return d.newError(tok.Pos(), "duplicate Any type_url field") + switch name := pref.Name(tok.IdentName()); name { + case genid.Any_TypeUrl_field_name: + if seenTypeUrl { + return d.newError(tok.Pos(), "duplicate %v field", genid.Any_TypeUrl_field_fullname) } - if hasFields["expanded"] { + if isExpanded { return d.newError(tok.Pos(), "conflict with [%s] field", typeURL) } tok, err := d.Read() @@ -657,15 +652,15 @@ Loop: var ok bool typeURL, ok = tok.String() if !ok { - return d.newError(tok.Pos(), "invalid Any type_url: %v", tok.RawString()) + return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_TypeUrl_field_fullname, tok.RawString()) } - hasFields["type_url"] = true + seenTypeUrl = true - case "value": - if hasFields["value"] { - return d.newError(tok.Pos(), "duplicate Any value field") + case genid.Any_Value_field_name: + if seenValue { + return d.newError(tok.Pos(), "duplicate %v field", genid.Any_Value_field_fullname) } - if hasFields["expanded"] { + if isExpanded { return d.newError(tok.Pos(), "conflict with [%s] field", typeURL) } tok, err := d.Read() @@ -674,22 +669,22 @@ Loop: } s, ok := tok.String() if !ok { - return d.newError(tok.Pos(), "invalid Any value: %v", tok.RawString()) + return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_Value_field_fullname, tok.RawString()) } bValue = []byte(s) - hasFields["value"] = true + seenValue = true default: if !d.opts.DiscardUnknown { - return d.newError(tok.Pos(), "invalid field name %q in google.protobuf.Any message", tok.RawString()) + return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname) } } case text.TypeName: - if hasFields["expanded"] { + if isExpanded { return d.newError(tok.Pos(), "cannot have more than one type") } - if hasFields["type_url"] { + if seenTypeUrl { return d.newError(tok.Pos(), "conflict with type_url field") } typeURL = tok.TypeName() @@ -698,21 +693,21 @@ Loop: if err != nil { return err } - hasFields["expanded"] = true + isExpanded = true default: if !d.opts.DiscardUnknown { - return d.newError(tok.Pos(), "invalid field name %q in google.protobuf.Any message", tok.RawString()) + return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname) } } } fds := m.Descriptor().Fields() if len(typeURL) > 0 { - m.Set(fds.ByNumber(fieldnum.Any_TypeUrl), pref.ValueOfString(typeURL)) + m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), pref.ValueOfString(typeURL)) } if len(bValue) > 0 { - m.Set(fds.ByNumber(fieldnum.Any_Value), pref.ValueOfBytes(bValue)) + m.Set(fds.ByNumber(genid.Any_Value_field_number), pref.ValueOfBytes(bValue)) } return nil } diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 41e5c773c0a..0877d71c519 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -14,8 +14,8 @@ import ( "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/encoding/text" "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/fieldnum" "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/mapsort" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" @@ -162,7 +162,7 @@ func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { } // Handle Any expansion. - if messageDesc.FullName() == "google.protobuf.Any" { + if messageDesc.FullName() == genid.Any_message_fullname { if e.marshalAny(m) { return nil } @@ -295,13 +295,13 @@ func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) e.StartMessage() defer e.EndMessage() - e.WriteName("key") + e.WriteName(string(genid.MapEntry_Key_field_name)) err = e.marshalSingular(key.Value(), fd.MapKey()) if err != nil { return false } - e.WriteName("value") + e.WriteName(string(genid.MapEntry_Value_field_name)) err = e.marshalSingular(val, fd.MapValue()) if err != nil { return false @@ -399,7 +399,7 @@ func (e encoder) marshalUnknown(b []byte) { func (e encoder) marshalAny(any pref.Message) bool { // Construct the embedded message. fds := any.Descriptor().Fields() - fdType := fds.ByNumber(fieldnum.Any_TypeUrl) + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) typeURL := any.Get(fdType).String() mt, err := e.opts.Resolver.FindMessageByURL(typeURL) if err != nil { @@ -408,7 +408,7 @@ func (e encoder) marshalAny(any pref.Message) bool { m := mt.New().Interface() // Unmarshal bytes into embedded message. - fdValue := fds.ByNumber(fieldnum.Any_Value) + fdValue := fds.ByNumber(genid.Any_Value_field_number) value := any.Get(fdValue) err = proto.UnmarshalOptions{ AllowPartial: true, diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go new file mode 100644 index 00000000000..b13fd29e81e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -0,0 +1,340 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "fmt" + "io" + "regexp" + "unicode/utf8" + + "google.golang.org/protobuf/internal/errors" +) + +// call specifies which Decoder method was invoked. +type call uint8 + +const ( + readCall call = iota + peekCall +) + +const unexpectedFmt = "unexpected token %s" + +// ErrUnexpectedEOF means that EOF was encountered in the middle of the input. +var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF) + +// Decoder is a token-based JSON decoder. +type Decoder struct { + // lastCall is last method called, either readCall or peekCall. + // Initial value is readCall. + lastCall call + + // lastToken contains the last read token. + lastToken Token + + // lastErr contains the last read error. + lastErr error + + // openStack is a stack containing ObjectOpen and ArrayOpen values. The + // top of stack represents the object or the array the current value is + // directly located in. + openStack []Kind + + // orig is used in reporting line and column. + orig []byte + // in contains the unconsumed input. + in []byte +} + +// NewDecoder returns a Decoder to read the given []byte. +func NewDecoder(b []byte) *Decoder { + return &Decoder{orig: b, in: b} +} + +// Peek looks ahead and returns the next token kind without advancing a read. +func (d *Decoder) Peek() (Token, error) { + defer func() { d.lastCall = peekCall }() + if d.lastCall == readCall { + d.lastToken, d.lastErr = d.Read() + } + return d.lastToken, d.lastErr +} + +// Read returns the next JSON token. +// It will return an error if there is no valid token. +func (d *Decoder) Read() (Token, error) { + const scalar = Null | Bool | Number | String + + defer func() { d.lastCall = readCall }() + if d.lastCall == peekCall { + return d.lastToken, d.lastErr + } + + tok, err := d.parseNext() + if err != nil { + return Token{}, err + } + + switch tok.kind { + case EOF: + if len(d.openStack) != 0 || + d.lastToken.kind&scalar|ObjectClose|ArrayClose == 0 { + return Token{}, ErrUnexpectedEOF + } + + case Null: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + + case Bool, Number: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + + case String: + if d.isValueNext() { + break + } + // This string token should only be for a field name. + if d.lastToken.kind&(ObjectOpen|comma) == 0 { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + if len(d.in) == 0 { + return Token{}, ErrUnexpectedEOF + } + if c := d.in[0]; c != ':' { + return Token{}, d.newSyntaxError(d.currPos(), `unexpected character %s, missing ":" after field name`, string(c)) + } + tok.kind = Name + d.consume(1) + + case ObjectOpen, ArrayOpen: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = append(d.openStack, tok.kind) + + case ObjectClose: + if len(d.openStack) == 0 || + d.lastToken.kind == comma || + d.openStack[len(d.openStack)-1] != ObjectOpen { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = d.openStack[:len(d.openStack)-1] + + case ArrayClose: + if len(d.openStack) == 0 || + d.lastToken.kind == comma || + d.openStack[len(d.openStack)-1] != ArrayOpen { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = d.openStack[:len(d.openStack)-1] + + case comma: + if len(d.openStack) == 0 || + d.lastToken.kind&(scalar|ObjectClose|ArrayClose) == 0 { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + } + + // Update d.lastToken only after validating token to be in the right sequence. + d.lastToken = tok + + if d.lastToken.kind == comma { + return d.Read() + } + return tok, nil +} + +// Any sequence that looks like a non-delimiter (for error reporting). +var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9]{1,32}|.)`) + +// parseNext parses for the next JSON token. It returns a Token object for +// different types, except for Name. It does not handle whether the next token +// is in a valid sequence or not. +func (d *Decoder) parseNext() (Token, error) { + // Trim leading spaces. + d.consume(0) + + in := d.in + if len(in) == 0 { + return d.consumeToken(EOF, 0), nil + } + + switch in[0] { + case 'n': + if n := matchWithDelim("null", in); n != 0 { + return d.consumeToken(Null, n), nil + } + + case 't': + if n := matchWithDelim("true", in); n != 0 { + return d.consumeBoolToken(true, n), nil + } + + case 'f': + if n := matchWithDelim("false", in); n != 0 { + return d.consumeBoolToken(false, n), nil + } + + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + if n, ok := parseNumber(in); ok { + return d.consumeToken(Number, n), nil + } + + case '"': + s, n, err := d.parseString(in) + if err != nil { + return Token{}, err + } + return d.consumeStringToken(s, n), nil + + case '{': + return d.consumeToken(ObjectOpen, 1), nil + + case '}': + return d.consumeToken(ObjectClose, 1), nil + + case '[': + return d.consumeToken(ArrayOpen, 1), nil + + case ']': + return d.consumeToken(ArrayClose, 1), nil + + case ',': + return d.consumeToken(comma, 1), nil + } + return Token{}, d.newSyntaxError(d.currPos(), "invalid value %s", errRegexp.Find(in)) +} + +// newSyntaxError returns an error with line and column information useful for +// syntax errors. +func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error { + e := errors.New(f, x...) + line, column := d.Position(pos) + return errors.New("syntax error (line %d:%d): %v", line, column, e) +} + +// Position returns line and column number of given index of the original input. +// It will panic if index is out of range. +func (d *Decoder) Position(idx int) (line int, column int) { + b := d.orig[:idx] + line = bytes.Count(b, []byte("\n")) + 1 + if i := bytes.LastIndexByte(b, '\n'); i >= 0 { + b = b[i+1:] + } + column = utf8.RuneCount(b) + 1 // ignore multi-rune characters + return line, column +} + +// currPos returns the current index position of d.in from d.orig. +func (d *Decoder) currPos() int { + return len(d.orig) - len(d.in) +} + +// matchWithDelim matches s with the input b and verifies that the match +// terminates with a delimiter of some form (e.g., r"[^-+_.a-zA-Z0-9]"). +// As a special case, EOF is considered a delimiter. It returns the length of s +// if there is a match, else 0. +func matchWithDelim(s string, b []byte) int { + if !bytes.HasPrefix(b, []byte(s)) { + return 0 + } + + n := len(s) + if n < len(b) && isNotDelim(b[n]) { + return 0 + } + return n +} + +// isNotDelim returns true if given byte is a not delimiter character. +func isNotDelim(c byte) bool { + return (c == '-' || c == '+' || c == '.' || c == '_' || + ('a' <= c && c <= 'z') || + ('A' <= c && c <= 'Z') || + ('0' <= c && c <= '9')) +} + +// consume consumes n bytes of input and any subsequent whitespace. +func (d *Decoder) consume(n int) { + d.in = d.in[n:] + for len(d.in) > 0 { + switch d.in[0] { + case ' ', '\n', '\r', '\t': + d.in = d.in[1:] + default: + return + } + } +} + +// isValueNext returns true if next type should be a JSON value: Null, +// Number, String or Bool. +func (d *Decoder) isValueNext() bool { + if len(d.openStack) == 0 { + return d.lastToken.kind == 0 + } + + start := d.openStack[len(d.openStack)-1] + switch start { + case ObjectOpen: + return d.lastToken.kind&Name != 0 + case ArrayOpen: + return d.lastToken.kind&(ArrayOpen|comma) != 0 + } + panic(fmt.Sprintf( + "unreachable logic in Decoder.isValueNext, lastToken.kind: %v, openStack: %v", + d.lastToken.kind, start)) +} + +// consumeToken constructs a Token for given Kind with raw value derived from +// current d.in and given size, and consumes the given size-lenght of it. +func (d *Decoder) consumeToken(kind Kind, size int) Token { + tok := Token{ + kind: kind, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + } + d.consume(size) + return tok +} + +// consumeBoolToken constructs a Token for a Bool kind with raw value derived from +// current d.in and given size. +func (d *Decoder) consumeBoolToken(b bool, size int) Token { + tok := Token{ + kind: Bool, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + boo: b, + } + d.consume(size) + return tok +} + +// consumeStringToken constructs a Token for a String kind with raw value derived +// from current d.in and given size. +func (d *Decoder) consumeStringToken(s string, size int) Token { + tok := Token{ + kind: String, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + str: s, + } + d.consume(size) + return tok +} + +// Clone returns a copy of the Decoder for use in reading ahead the next JSON +// object, array or other values without affecting current Decoder. +func (d *Decoder) Clone() *Decoder { + ret := *d + ret.openStack = append([]Kind(nil), ret.openStack...) + return &ret +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go new file mode 100644 index 00000000000..2999d713320 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go @@ -0,0 +1,254 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "strconv" +) + +// parseNumber reads the given []byte for a valid JSON number. If it is valid, +// it returns the number of bytes. Parsing logic follows the definition in +// https://tools.ietf.org/html/rfc7159#section-6, and is based off +// encoding/json.isValidNumber function. +func parseNumber(input []byte) (int, bool) { + var n int + + s := input + if len(s) == 0 { + return 0, false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return 0, false + } + } + + // Digits + switch { + case s[0] == '0': + s = s[1:] + n++ + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + n++ + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + + default: + return 0, false + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + n += 2 + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + n++ + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return 0, false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + } + + // Check that next byte is a delimiter or it is at the end. + if n < len(input) && isNotDelim(input[n]) { + return 0, false + } + + return n, true +} + +// numberParts is the result of parsing out a valid JSON number. It contains +// the parts of a number. The parts are used for integer conversion. +type numberParts struct { + neg bool + intp []byte + frac []byte + exp []byte +} + +// parseNumber constructs numberParts from given []byte. The logic here is +// similar to consumeNumber above with the difference of having to construct +// numberParts. The slice fields in numberParts are subslices of the input. +func parseNumberParts(input []byte) (numberParts, bool) { + var neg bool + var intp []byte + var frac []byte + var exp []byte + + s := input + if len(s) == 0 { + return numberParts{}, false + } + + // Optional - + if s[0] == '-' { + neg = true + s = s[1:] + if len(s) == 0 { + return numberParts{}, false + } + } + + // Digits + switch { + case s[0] == '0': + // Skip first 0 and no need to store. + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + intp = s + n := 1 + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + intp = intp[:n] + + default: + return numberParts{}, false + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + frac = s[1:] + n := 1 + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + frac = frac[:n] + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + exp = s + n := 0 + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return numberParts{}, false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + exp = exp[:n] + } + + return numberParts{ + neg: neg, + intp: intp, + frac: bytes.TrimRight(frac, "0"), // Remove unnecessary 0s to the right. + exp: exp, + }, true +} + +// normalizeToIntString returns an integer string in normal form without the +// E-notation for given numberParts. It will return false if it is not an +// integer or if the exponent exceeds than max/min int value. +func normalizeToIntString(n numberParts) (string, bool) { + intpSize := len(n.intp) + fracSize := len(n.frac) + + if intpSize == 0 && fracSize == 0 { + return "0", true + } + + var exp int + if len(n.exp) > 0 { + i, err := strconv.ParseInt(string(n.exp), 10, 32) + if err != nil { + return "", false + } + exp = int(i) + } + + var num []byte + if exp >= 0 { + // For positive E, shift fraction digits into integer part and also pad + // with zeroes as needed. + + // If there are more digits in fraction than the E value, then the + // number is not an integer. + if fracSize > exp { + return "", false + } + + // Make sure resulting digits are within max value limit to avoid + // unnecessarily constructing a large byte slice that may simply fail + // later on. + const maxDigits = 20 // Max uint64 value has 20 decimal digits. + if intpSize+exp > maxDigits { + return "", false + } + + // Set cap to make a copy of integer part when appended. + num = n.intp[:len(n.intp):len(n.intp)] + num = append(num, n.frac...) + for i := 0; i < exp-fracSize; i++ { + num = append(num, '0') + } + } else { + // For negative E, shift digits in integer part out. + + // If there are fractions, then the number is not an integer. + if fracSize > 0 { + return "", false + } + + // index is where the decimal point will be after adjusting for negative + // exponent. + index := intpSize + exp + if index < 0 { + return "", false + } + + num = n.intp + // If any of the digits being shifted to the right of the decimal point + // is non-zero, then the number is not an integer. + for i := index; i < intpSize; i++ { + if num[i] != '0' { + return "", false + } + } + num = num[:index] + } + + if n.neg { + return "-" + string(num), true + } + return string(num), true +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go new file mode 100644 index 00000000000..f7fea7d8dd4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go @@ -0,0 +1,91 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" + + "google.golang.org/protobuf/internal/strs" +) + +func (d *Decoder) parseString(in []byte) (string, int, error) { + in0 := in + if len(in) == 0 { + return "", 0, ErrUnexpectedEOF + } + if in[0] != '"' { + return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q at start of string", in[0]) + } + in = in[1:] + i := indexNeedEscapeInBytes(in) + in, out := in[i:], in[:i:i] // set cap to prevent mutations + for len(in) > 0 { + switch r, n := utf8.DecodeRune(in); { + case r == utf8.RuneError && n == 1: + return "", 0, d.newSyntaxError(d.currPos(), "invalid UTF-8 in string") + case r < ' ': + return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q in string", r) + case r == '"': + in = in[1:] + n := len(in0) - len(in) + return string(out), n, nil + case r == '\\': + if len(in) < 2 { + return "", 0, ErrUnexpectedEOF + } + switch r := in[1]; r { + case '"', '\\', '/': + in, out = in[2:], append(out, r) + case 'b': + in, out = in[2:], append(out, '\b') + case 'f': + in, out = in[2:], append(out, '\f') + case 'n': + in, out = in[2:], append(out, '\n') + case 'r': + in, out = in[2:], append(out, '\r') + case 't': + in, out = in[2:], append(out, '\t') + case 'u': + if len(in) < 6 { + return "", 0, ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + if err != nil { + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) + } + in = in[6:] + + r := rune(v) + if utf16.IsSurrogate(r) { + if len(in) < 6 { + return "", 0, ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + r = utf16.DecodeRune(r, rune(v)) + if in[0] != '\\' || in[1] != 'u' || + r == unicode.ReplacementChar || err != nil { + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) + } + in = in[6:] + } + out = append(out, string(r)...) + default: + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:2]) + } + default: + i := indexNeedEscapeInBytes(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + return "", 0, ErrUnexpectedEOF +} + +// indexNeedEscapeInBytes returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) } diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go new file mode 100644 index 00000000000..2eb7023b2fc --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go @@ -0,0 +1,193 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "fmt" + "strconv" +) + +// Kind represents a token kind expressible in the JSON format. +type Kind uint16 + +const ( + Invalid Kind = (1 << iota) / 2 + EOF + Null + Bool + Number + String + Name + ObjectOpen + ObjectClose + ArrayOpen + ArrayClose + + // comma is only for parsing in between tokens and + // does not need to be exported. + comma +) + +func (k Kind) String() string { + switch k { + case EOF: + return "eof" + case Null: + return "null" + case Bool: + return "bool" + case Number: + return "number" + case String: + return "string" + case ObjectOpen: + return "{" + case ObjectClose: + return "}" + case Name: + return "name" + case ArrayOpen: + return "[" + case ArrayClose: + return "]" + case comma: + return "," + } + return "" +} + +// Token provides a parsed token kind and value. +// +// Values are provided by the difference accessor methods. The accessor methods +// Name, Bool, and ParsedString will panic if called on the wrong kind. There +// are different accessor methods for the Number kind for converting to the +// appropriate Go numeric type and those methods have the ok return value. +type Token struct { + // Token kind. + kind Kind + // pos provides the position of the token in the original input. + pos int + // raw bytes of the serialized token. + // This is a subslice into the original input. + raw []byte + // boo is parsed boolean value. + boo bool + // str is parsed string value. + str string +} + +// Kind returns the token kind. +func (t Token) Kind() Kind { + return t.kind +} + +// RawString returns the read value in string. +func (t Token) RawString() string { + return string(t.raw) +} + +// Pos returns the token position from the input. +func (t Token) Pos() int { + return t.pos +} + +// Name returns the object name if token is Name, else it will return an error. +func (t Token) Name() string { + if t.kind == Name { + return t.str + } + panic(fmt.Sprintf("Token is not a Name: %v", t.RawString())) +} + +// Bool returns the bool value if token kind is Bool, else it panics. +func (t Token) Bool() bool { + if t.kind == Bool { + return t.boo + } + panic(fmt.Sprintf("Token is not a Bool: %v", t.RawString())) +} + +// ParsedString returns the string value for a JSON string token or the read +// value in string if token is not a string. +func (t Token) ParsedString() string { + if t.kind == String { + return t.str + } + panic(fmt.Sprintf("Token is not a String: %v", t.RawString())) +} + +// Float returns the floating-point number if token kind is Number. +// +// The floating-point precision is specified by the bitSize parameter: 32 for +// float32 or 64 for float64. If bitSize=32, the result still has type float64, +// but it will be convertible to float32 without changing its value. It will +// return false if the number exceeds the floating point limits for given +// bitSize. +func (t Token) Float(bitSize int) (float64, bool) { + if t.kind != Number { + return 0, false + } + f, err := strconv.ParseFloat(t.RawString(), bitSize) + if err != nil { + return 0, false + } + return f, true +} + +// Int returns the signed integer number if token is Number. +// +// The given bitSize specifies the integer type that the result must fit into. +// It returns false if the number is not an integer value or if the result +// exceeds the limits for given bitSize. +func (t Token) Int(bitSize int) (int64, bool) { + s, ok := t.getIntStr() + if !ok { + return 0, false + } + n, err := strconv.ParseInt(s, 10, bitSize) + if err != nil { + return 0, false + } + return n, true +} + +// Uint returns the signed integer number if token is Number, else it will +// return an error. +// +// The given bitSize specifies the unsigned integer type that the result must +// fit into. It returns false if the number is not an unsigned integer value +// or if the result exceeds the limits for given bitSize. +func (t Token) Uint(bitSize int) (uint64, bool) { + s, ok := t.getIntStr() + if !ok { + return 0, false + } + n, err := strconv.ParseUint(s, 10, bitSize) + if err != nil { + return 0, false + } + return n, true +} + +func (t Token) getIntStr() (string, bool) { + if t.kind != Number { + return "", false + } + parts, ok := parseNumberParts(t.raw) + if !ok { + return "", false + } + return normalizeToIntString(parts) +} + +// TokenEquals returns true if given Tokens are equal, else false. +func TokenEquals(x, y Token) bool { + return x.kind == y.kind && + x.pos == y.pos && + bytes.Equal(x.raw, y.raw) && + x.boo == y.boo && + x.str == y.str +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go new file mode 100644 index 00000000000..fbdf3487342 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go @@ -0,0 +1,276 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "math" + "math/bits" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/errors" +) + +// kind represents an encoding type. +type kind uint8 + +const ( + _ kind = (1 << iota) / 2 + name + scalar + objectOpen + objectClose + arrayOpen + arrayClose +) + +// Encoder provides methods to write out JSON constructs and values. The user is +// responsible for producing valid sequences of JSON constructs and values. +type Encoder struct { + indent string + lastKind kind + indents []byte + out []byte +} + +// NewEncoder returns an Encoder. +// +// If indent is a non-empty string, it causes every entry for an Array or Object +// to be preceded by the indent and trailed by a newline. +func NewEncoder(indent string) (*Encoder, error) { + e := &Encoder{} + if len(indent) > 0 { + if strings.Trim(indent, " \t") != "" { + return nil, errors.New("indent may only be composed of space or tab characters") + } + e.indent = indent + } + return e, nil +} + +// Bytes returns the content of the written bytes. +func (e *Encoder) Bytes() []byte { + return e.out +} + +// WriteNull writes out the null value. +func (e *Encoder) WriteNull() { + e.prepareNext(scalar) + e.out = append(e.out, "null"...) +} + +// WriteBool writes out the given boolean value. +func (e *Encoder) WriteBool(b bool) { + e.prepareNext(scalar) + if b { + e.out = append(e.out, "true"...) + } else { + e.out = append(e.out, "false"...) + } +} + +// WriteString writes out the given string in JSON string value. Returns error +// if input string contains invalid UTF-8. +func (e *Encoder) WriteString(s string) error { + e.prepareNext(scalar) + var err error + if e.out, err = appendString(e.out, s); err != nil { + return err + } + return nil +} + +// Sentinel error used for indicating invalid UTF-8. +var errInvalidUTF8 = errors.New("invalid UTF-8") + +func appendString(out []byte, in string) ([]byte, error) { + out = append(out, '"') + i := indexNeedEscapeInString(in) + in, out = in[i:], append(out, in[:i]...) + for len(in) > 0 { + switch r, n := utf8.DecodeRuneInString(in); { + case r == utf8.RuneError && n == 1: + return out, errInvalidUTF8 + case r < ' ' || r == '"' || r == '\\': + out = append(out, '\\') + switch r { + case '"', '\\': + out = append(out, byte(r)) + case '\b': + out = append(out, 'b') + case '\f': + out = append(out, 'f') + case '\n': + out = append(out, 'n') + case '\r': + out = append(out, 'r') + case '\t': + out = append(out, 't') + default: + out = append(out, 'u') + out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } + in = in[n:] + default: + i := indexNeedEscapeInString(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + out = append(out, '"') + return out, nil +} + +// indexNeedEscapeInString returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInString(s string) int { + for i, r := range s { + if r < ' ' || r == '\\' || r == '"' || r == utf8.RuneError { + return i + } + } + return len(s) +} + +// WriteFloat writes out the given float and bitSize in JSON number value. +func (e *Encoder) WriteFloat(n float64, bitSize int) { + e.prepareNext(scalar) + e.out = appendFloat(e.out, n, bitSize) +} + +// appendFloat formats given float in bitSize, and appends to the given []byte. +func appendFloat(out []byte, n float64, bitSize int) []byte { + switch { + case math.IsNaN(n): + return append(out, `"NaN"`...) + case math.IsInf(n, +1): + return append(out, `"Infinity"`...) + case math.IsInf(n, -1): + return append(out, `"-Infinity"`...) + } + + // JSON number formatting logic based on encoding/json. + // See floatEncoder.encode for reference. + fmt := byte('f') + if abs := math.Abs(n); abs != 0 { + if bitSize == 64 && (abs < 1e-6 || abs >= 1e21) || + bitSize == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { + fmt = 'e' + } + } + out = strconv.AppendFloat(out, n, fmt, -1, bitSize) + if fmt == 'e' { + n := len(out) + if n >= 4 && out[n-4] == 'e' && out[n-3] == '-' && out[n-2] == '0' { + out[n-2] = out[n-1] + out = out[:n-1] + } + } + return out +} + +// WriteInt writes out the given signed integer in JSON number value. +func (e *Encoder) WriteInt(n int64) { + e.prepareNext(scalar) + e.out = append(e.out, strconv.FormatInt(n, 10)...) +} + +// WriteUint writes out the given unsigned integer in JSON number value. +func (e *Encoder) WriteUint(n uint64) { + e.prepareNext(scalar) + e.out = append(e.out, strconv.FormatUint(n, 10)...) +} + +// StartObject writes out the '{' symbol. +func (e *Encoder) StartObject() { + e.prepareNext(objectOpen) + e.out = append(e.out, '{') +} + +// EndObject writes out the '}' symbol. +func (e *Encoder) EndObject() { + e.prepareNext(objectClose) + e.out = append(e.out, '}') +} + +// WriteName writes out the given string in JSON string value and the name +// separator ':'. Returns error if input string contains invalid UTF-8, which +// should not be likely as protobuf field names should be valid. +func (e *Encoder) WriteName(s string) error { + e.prepareNext(name) + var err error + // Append to output regardless of error. + e.out, err = appendString(e.out, s) + e.out = append(e.out, ':') + return err +} + +// StartArray writes out the '[' symbol. +func (e *Encoder) StartArray() { + e.prepareNext(arrayOpen) + e.out = append(e.out, '[') +} + +// EndArray writes out the ']' symbol. +func (e *Encoder) EndArray() { + e.prepareNext(arrayClose) + e.out = append(e.out, ']') +} + +// prepareNext adds possible comma and indentation for the next value based +// on last type and indent option. It also updates lastKind to next. +func (e *Encoder) prepareNext(next kind) { + defer func() { + // Set lastKind to next. + e.lastKind = next + }() + + if len(e.indent) == 0 { + // Need to add comma on the following condition. + if e.lastKind&(scalar|objectClose|arrayClose) != 0 && + next&(name|scalar|objectOpen|arrayOpen) != 0 { + e.out = append(e.out, ',') + // For single-line output, add a random extra space after each + // comma to make output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } + return + } + + switch { + case e.lastKind&(objectOpen|arrayOpen) != 0: + // If next type is NOT closing, add indent and newline. + if next&(objectClose|arrayClose) == 0 { + e.indents = append(e.indents, e.indent...) + e.out = append(e.out, '\n') + e.out = append(e.out, e.indents...) + } + + case e.lastKind&(scalar|objectClose|arrayClose) != 0: + switch { + // If next type is either a value or name, add comma and newline. + case next&(name|scalar|objectOpen|arrayOpen) != 0: + e.out = append(e.out, ',', '\n') + + // If next type is a closing object or array, adjust indentation. + case next&(objectClose|arrayClose) != 0: + e.indents = e.indents[:len(e.indents)-len(e.indent)] + e.out = append(e.out, '\n') + } + e.out = append(e.out, e.indents...) + + case e.lastKind&name != 0: + e.out = append(e.out, ' ') + // For multi-line output, add a random extra space after key: to make + // output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } +} diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go deleted file mode 100644 index 74c5fef2405..00000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Any. -const ( - Any_TypeUrl = 1 // optional string - Any_Value = 2 // optional bytes -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go deleted file mode 100644 index 9a6b5f29b57..00000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Api. -const ( - Api_Name = 1 // optional string - Api_Methods = 2 // repeated google.protobuf.Method - Api_Options = 3 // repeated google.protobuf.Option - Api_Version = 4 // optional string - Api_SourceContext = 5 // optional google.protobuf.SourceContext - Api_Mixins = 6 // repeated google.protobuf.Mixin - Api_Syntax = 7 // optional google.protobuf.Syntax -) - -// Field numbers for google.protobuf.Method. -const ( - Method_Name = 1 // optional string - Method_RequestTypeUrl = 2 // optional string - Method_RequestStreaming = 3 // optional bool - Method_ResponseTypeUrl = 4 // optional string - Method_ResponseStreaming = 5 // optional bool - Method_Options = 6 // repeated google.protobuf.Option - Method_Syntax = 7 // optional google.protobuf.Syntax -) - -// Field numbers for google.protobuf.Mixin. -const ( - Mixin_Name = 1 // optional string - Mixin_Root = 2 // optional string -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go deleted file mode 100644 index 6e37b59e922..00000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.FileDescriptorSet. -const ( - FileDescriptorSet_File = 1 // repeated google.protobuf.FileDescriptorProto -) - -// Field numbers for google.protobuf.FileDescriptorProto. -const ( - FileDescriptorProto_Name = 1 // optional string - FileDescriptorProto_Package = 2 // optional string - FileDescriptorProto_Dependency = 3 // repeated string - FileDescriptorProto_PublicDependency = 10 // repeated int32 - FileDescriptorProto_WeakDependency = 11 // repeated int32 - FileDescriptorProto_MessageType = 4 // repeated google.protobuf.DescriptorProto - FileDescriptorProto_EnumType = 5 // repeated google.protobuf.EnumDescriptorProto - FileDescriptorProto_Service = 6 // repeated google.protobuf.ServiceDescriptorProto - FileDescriptorProto_Extension = 7 // repeated google.protobuf.FieldDescriptorProto - FileDescriptorProto_Options = 8 // optional google.protobuf.FileOptions - FileDescriptorProto_SourceCodeInfo = 9 // optional google.protobuf.SourceCodeInfo - FileDescriptorProto_Syntax = 12 // optional string -) - -// Field numbers for google.protobuf.DescriptorProto. -const ( - DescriptorProto_Name = 1 // optional string - DescriptorProto_Field = 2 // repeated google.protobuf.FieldDescriptorProto - DescriptorProto_Extension = 6 // repeated google.protobuf.FieldDescriptorProto - DescriptorProto_NestedType = 3 // repeated google.protobuf.DescriptorProto - DescriptorProto_EnumType = 4 // repeated google.protobuf.EnumDescriptorProto - DescriptorProto_ExtensionRange = 5 // repeated google.protobuf.DescriptorProto.ExtensionRange - DescriptorProto_OneofDecl = 8 // repeated google.protobuf.OneofDescriptorProto - DescriptorProto_Options = 7 // optional google.protobuf.MessageOptions - DescriptorProto_ReservedRange = 9 // repeated google.protobuf.DescriptorProto.ReservedRange - DescriptorProto_ReservedName = 10 // repeated string -) - -// Field numbers for google.protobuf.DescriptorProto.ExtensionRange. -const ( - DescriptorProto_ExtensionRange_Start = 1 // optional int32 - DescriptorProto_ExtensionRange_End = 2 // optional int32 - DescriptorProto_ExtensionRange_Options = 3 // optional google.protobuf.ExtensionRangeOptions -) - -// Field numbers for google.protobuf.DescriptorProto.ReservedRange. -const ( - DescriptorProto_ReservedRange_Start = 1 // optional int32 - DescriptorProto_ReservedRange_End = 2 // optional int32 -) - -// Field numbers for google.protobuf.ExtensionRangeOptions. -const ( - ExtensionRangeOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.FieldDescriptorProto. -const ( - FieldDescriptorProto_Name = 1 // optional string - FieldDescriptorProto_Number = 3 // optional int32 - FieldDescriptorProto_Label = 4 // optional google.protobuf.FieldDescriptorProto.Label - FieldDescriptorProto_Type = 5 // optional google.protobuf.FieldDescriptorProto.Type - FieldDescriptorProto_TypeName = 6 // optional string - FieldDescriptorProto_Extendee = 2 // optional string - FieldDescriptorProto_DefaultValue = 7 // optional string - FieldDescriptorProto_OneofIndex = 9 // optional int32 - FieldDescriptorProto_JsonName = 10 // optional string - FieldDescriptorProto_Options = 8 // optional google.protobuf.FieldOptions - FieldDescriptorProto_Proto3Optional = 17 // optional bool -) - -// Field numbers for google.protobuf.OneofDescriptorProto. -const ( - OneofDescriptorProto_Name = 1 // optional string - OneofDescriptorProto_Options = 2 // optional google.protobuf.OneofOptions -) - -// Field numbers for google.protobuf.EnumDescriptorProto. -const ( - EnumDescriptorProto_Name = 1 // optional string - EnumDescriptorProto_Value = 2 // repeated google.protobuf.EnumValueDescriptorProto - EnumDescriptorProto_Options = 3 // optional google.protobuf.EnumOptions - EnumDescriptorProto_ReservedRange = 4 // repeated google.protobuf.EnumDescriptorProto.EnumReservedRange - EnumDescriptorProto_ReservedName = 5 // repeated string -) - -// Field numbers for google.protobuf.EnumDescriptorProto.EnumReservedRange. -const ( - EnumDescriptorProto_EnumReservedRange_Start = 1 // optional int32 - EnumDescriptorProto_EnumReservedRange_End = 2 // optional int32 -) - -// Field numbers for google.protobuf.EnumValueDescriptorProto. -const ( - EnumValueDescriptorProto_Name = 1 // optional string - EnumValueDescriptorProto_Number = 2 // optional int32 - EnumValueDescriptorProto_Options = 3 // optional google.protobuf.EnumValueOptions -) - -// Field numbers for google.protobuf.ServiceDescriptorProto. -const ( - ServiceDescriptorProto_Name = 1 // optional string - ServiceDescriptorProto_Method = 2 // repeated google.protobuf.MethodDescriptorProto - ServiceDescriptorProto_Options = 3 // optional google.protobuf.ServiceOptions -) - -// Field numbers for google.protobuf.MethodDescriptorProto. -const ( - MethodDescriptorProto_Name = 1 // optional string - MethodDescriptorProto_InputType = 2 // optional string - MethodDescriptorProto_OutputType = 3 // optional string - MethodDescriptorProto_Options = 4 // optional google.protobuf.MethodOptions - MethodDescriptorProto_ClientStreaming = 5 // optional bool - MethodDescriptorProto_ServerStreaming = 6 // optional bool -) - -// Field numbers for google.protobuf.FileOptions. -const ( - FileOptions_JavaPackage = 1 // optional string - FileOptions_JavaOuterClassname = 8 // optional string - FileOptions_JavaMultipleFiles = 10 // optional bool - FileOptions_JavaGenerateEqualsAndHash = 20 // optional bool - FileOptions_JavaStringCheckUtf8 = 27 // optional bool - FileOptions_OptimizeFor = 9 // optional google.protobuf.FileOptions.OptimizeMode - FileOptions_GoPackage = 11 // optional string - FileOptions_CcGenericServices = 16 // optional bool - FileOptions_JavaGenericServices = 17 // optional bool - FileOptions_PyGenericServices = 18 // optional bool - FileOptions_PhpGenericServices = 42 // optional bool - FileOptions_Deprecated = 23 // optional bool - FileOptions_CcEnableArenas = 31 // optional bool - FileOptions_ObjcClassPrefix = 36 // optional string - FileOptions_CsharpNamespace = 37 // optional string - FileOptions_SwiftPrefix = 39 // optional string - FileOptions_PhpClassPrefix = 40 // optional string - FileOptions_PhpNamespace = 41 // optional string - FileOptions_PhpMetadataNamespace = 44 // optional string - FileOptions_RubyPackage = 45 // optional string - FileOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.MessageOptions. -const ( - MessageOptions_MessageSetWireFormat = 1 // optional bool - MessageOptions_NoStandardDescriptorAccessor = 2 // optional bool - MessageOptions_Deprecated = 3 // optional bool - MessageOptions_MapEntry = 7 // optional bool - MessageOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.FieldOptions. -const ( - FieldOptions_Ctype = 1 // optional google.protobuf.FieldOptions.CType - FieldOptions_Packed = 2 // optional bool - FieldOptions_Jstype = 6 // optional google.protobuf.FieldOptions.JSType - FieldOptions_Lazy = 5 // optional bool - FieldOptions_Deprecated = 3 // optional bool - FieldOptions_Weak = 10 // optional bool - FieldOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.OneofOptions. -const ( - OneofOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.EnumOptions. -const ( - EnumOptions_AllowAlias = 2 // optional bool - EnumOptions_Deprecated = 3 // optional bool - EnumOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.EnumValueOptions. -const ( - EnumValueOptions_Deprecated = 1 // optional bool - EnumValueOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.ServiceOptions. -const ( - ServiceOptions_Deprecated = 33 // optional bool - ServiceOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.MethodOptions. -const ( - MethodOptions_Deprecated = 33 // optional bool - MethodOptions_IdempotencyLevel = 34 // optional google.protobuf.MethodOptions.IdempotencyLevel - MethodOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.UninterpretedOption. -const ( - UninterpretedOption_Name = 2 // repeated google.protobuf.UninterpretedOption.NamePart - UninterpretedOption_IdentifierValue = 3 // optional string - UninterpretedOption_PositiveIntValue = 4 // optional uint64 - UninterpretedOption_NegativeIntValue = 5 // optional int64 - UninterpretedOption_DoubleValue = 6 // optional double - UninterpretedOption_StringValue = 7 // optional bytes - UninterpretedOption_AggregateValue = 8 // optional string -) - -// Field numbers for google.protobuf.UninterpretedOption.NamePart. -const ( - UninterpretedOption_NamePart_NamePart = 1 // required string - UninterpretedOption_NamePart_IsExtension = 2 // required bool -) - -// Field numbers for google.protobuf.SourceCodeInfo. -const ( - SourceCodeInfo_Location = 1 // repeated google.protobuf.SourceCodeInfo.Location -) - -// Field numbers for google.protobuf.SourceCodeInfo.Location. -const ( - SourceCodeInfo_Location_Path = 1 // repeated int32 - SourceCodeInfo_Location_Span = 2 // repeated int32 - SourceCodeInfo_Location_LeadingComments = 3 // optional string - SourceCodeInfo_Location_TrailingComments = 4 // optional string - SourceCodeInfo_Location_LeadingDetachedComments = 6 // repeated string -) - -// Field numbers for google.protobuf.GeneratedCodeInfo. -const ( - GeneratedCodeInfo_Annotation = 1 // repeated google.protobuf.GeneratedCodeInfo.Annotation -) - -// Field numbers for google.protobuf.GeneratedCodeInfo.Annotation. -const ( - GeneratedCodeInfo_Annotation_Path = 1 // repeated int32 - GeneratedCodeInfo_Annotation_SourceFile = 2 // optional string - GeneratedCodeInfo_Annotation_Begin = 3 // optional int32 - GeneratedCodeInfo_Annotation_End = 4 // optional int32 -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/doc.go b/vendor/google.golang.org/protobuf/internal/fieldnum/doc.go deleted file mode 100644 index e5978859980..00000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package fieldnum contains constants for field numbers of fields in messages -// declared in descriptor.proto and any of the well-known types. -package fieldnum diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go deleted file mode 100644 index 8816c7358da..00000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Duration. -const ( - Duration_Seconds = 1 // optional int64 - Duration_Nanos = 2 // optional int32 -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go deleted file mode 100644 index 7e3bfa27bbc..00000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.FieldMask. -const ( - FieldMask_Paths = 1 // repeated string -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go deleted file mode 100644 index 241972b1f78..00000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.SourceContext. -const ( - SourceContext_FileName = 1 // optional string -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go deleted file mode 100644 index c460aab44a7..00000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Struct. -const ( - Struct_Fields = 1 // repeated google.protobuf.Struct.FieldsEntry -) - -// Field numbers for google.protobuf.Struct.FieldsEntry. -const ( - Struct_FieldsEntry_Key = 1 // optional string - Struct_FieldsEntry_Value = 2 // optional google.protobuf.Value -) - -// Field numbers for google.protobuf.Value. -const ( - Value_NullValue = 1 // optional google.protobuf.NullValue - Value_NumberValue = 2 // optional double - Value_StringValue = 3 // optional string - Value_BoolValue = 4 // optional bool - Value_StructValue = 5 // optional google.protobuf.Struct - Value_ListValue = 6 // optional google.protobuf.ListValue -) - -// Field numbers for google.protobuf.ListValue. -const ( - ListValue_Values = 1 // repeated google.protobuf.Value -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go deleted file mode 100644 index b4346fba547..00000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Timestamp. -const ( - Timestamp_Seconds = 1 // optional int64 - Timestamp_Nanos = 2 // optional int32 -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go deleted file mode 100644 index b392e95981a..00000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Type. -const ( - Type_Name = 1 // optional string - Type_Fields = 2 // repeated google.protobuf.Field - Type_Oneofs = 3 // repeated string - Type_Options = 4 // repeated google.protobuf.Option - Type_SourceContext = 5 // optional google.protobuf.SourceContext - Type_Syntax = 6 // optional google.protobuf.Syntax -) - -// Field numbers for google.protobuf.Field. -const ( - Field_Kind = 1 // optional google.protobuf.Field.Kind - Field_Cardinality = 2 // optional google.protobuf.Field.Cardinality - Field_Number = 3 // optional int32 - Field_Name = 4 // optional string - Field_TypeUrl = 6 // optional string - Field_OneofIndex = 7 // optional int32 - Field_Packed = 8 // optional bool - Field_Options = 9 // repeated google.protobuf.Option - Field_JsonName = 10 // optional string - Field_DefaultValue = 11 // optional string -) - -// Field numbers for google.protobuf.Enum. -const ( - Enum_Name = 1 // optional string - Enum_Enumvalue = 2 // repeated google.protobuf.EnumValue - Enum_Options = 3 // repeated google.protobuf.Option - Enum_SourceContext = 4 // optional google.protobuf.SourceContext - Enum_Syntax = 5 // optional google.protobuf.Syntax -) - -// Field numbers for google.protobuf.EnumValue. -const ( - EnumValue_Name = 1 // optional string - EnumValue_Number = 2 // optional int32 - EnumValue_Options = 3 // repeated google.protobuf.Option -) - -// Field numbers for google.protobuf.Option. -const ( - Option_Name = 1 // optional string - Option_Value = 2 // optional google.protobuf.Any -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go deleted file mode 100644 index 42f846a9f71..00000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.DoubleValue. -const ( - DoubleValue_Value = 1 // optional double -) - -// Field numbers for google.protobuf.FloatValue. -const ( - FloatValue_Value = 1 // optional float -) - -// Field numbers for google.protobuf.Int64Value. -const ( - Int64Value_Value = 1 // optional int64 -) - -// Field numbers for google.protobuf.UInt64Value. -const ( - UInt64Value_Value = 1 // optional uint64 -) - -// Field numbers for google.protobuf.Int32Value. -const ( - Int32Value_Value = 1 // optional int32 -) - -// Field numbers for google.protobuf.UInt32Value. -const ( - UInt32Value_Value = 1 // optional uint32 -) - -// Field numbers for google.protobuf.BoolValue. -const ( - BoolValue_Value = 1 // optional bool -) - -// Field numbers for google.protobuf.StringValue. -const ( - StringValue_Value = 1 // optional string -) - -// Field numbers for google.protobuf.BytesValue. -const ( - BytesValue_Value = 1 // optional bytes -) diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go index 462d384e9f7..d02d770c984 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/build.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/build.go @@ -7,7 +7,7 @@ package filedesc import ( "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/fieldnum" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" pref "google.golang.org/protobuf/reflect/protoreflect" preg "google.golang.org/protobuf/reflect/protoregistry" @@ -126,24 +126,24 @@ func (db *Builder) unmarshalCounts(b []byte, isFile bool) { b = b[m:] if isFile { switch num { - case fieldnum.FileDescriptorProto_EnumType: + case genid.FileDescriptorProto_EnumType_field_number: db.NumEnums++ - case fieldnum.FileDescriptorProto_MessageType: + case genid.FileDescriptorProto_MessageType_field_number: db.unmarshalCounts(v, false) db.NumMessages++ - case fieldnum.FileDescriptorProto_Extension: + case genid.FileDescriptorProto_Extension_field_number: db.NumExtensions++ - case fieldnum.FileDescriptorProto_Service: + case genid.FileDescriptorProto_Service_field_number: db.NumServices++ } } else { switch num { - case fieldnum.DescriptorProto_EnumType: + case genid.DescriptorProto_EnumType_field_number: db.NumEnums++ - case fieldnum.DescriptorProto_NestedType: + case genid.DescriptorProto_NestedType_field_number: db.unmarshalCounts(v, false) db.NumMessages++ - case fieldnum.DescriptorProto_Extension: + case genid.DescriptorProto_Extension_field_number: db.NumExtensions++ } } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 2540befd645..9385126fba6 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -13,6 +13,7 @@ import ( "google.golang.org/protobuf/internal/descfmt" "google.golang.org/protobuf/internal/descopts" "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" pref "google.golang.org/protobuf/reflect/protoreflect" @@ -302,13 +303,13 @@ func (fd *Field) MapKey() pref.FieldDescriptor { if !fd.IsMap() { return nil } - return fd.Message().Fields().ByNumber(1) + return fd.Message().Fields().ByNumber(genid.MapEntry_Key_field_number) } func (fd *Field) MapValue() pref.FieldDescriptor { if !fd.IsMap() { return nil } - return fd.Message().Fields().ByNumber(2) + return fd.Message().Fields().ByNumber(genid.MapEntry_Value_field_number) } func (fd *Field) HasDefault() bool { return fd.L1.Default.has } func (fd *Field) Default() pref.Value { return fd.L1.Default.get(fd) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index c0cddf86a46..66e1fee5224 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -8,7 +8,7 @@ import ( "sync" "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/fieldnum" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" pref "google.golang.org/protobuf/reflect/protoreflect" ) @@ -107,7 +107,7 @@ func (fd *File) unmarshalSeed(b []byte) { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.FileDescriptorProto_Syntax: + case genid.FileDescriptorProto_Syntax_field_number: switch string(v) { case "proto2": fd.L1.Syntax = pref.Proto2 @@ -116,36 +116,36 @@ func (fd *File) unmarshalSeed(b []byte) { default: panic("invalid syntax") } - case fieldnum.FileDescriptorProto_Name: + case genid.FileDescriptorProto_Name_field_number: fd.L1.Path = sb.MakeString(v) - case fieldnum.FileDescriptorProto_Package: + case genid.FileDescriptorProto_Package_field_number: fd.L1.Package = pref.FullName(sb.MakeString(v)) - case fieldnum.FileDescriptorProto_EnumType: - if prevField != fieldnum.FileDescriptorProto_EnumType { + case genid.FileDescriptorProto_EnumType_field_number: + if prevField != genid.FileDescriptorProto_EnumType_field_number { if numEnums > 0 { panic("non-contiguous repeated field") } posEnums = len(b0) - len(b) - n - m } numEnums++ - case fieldnum.FileDescriptorProto_MessageType: - if prevField != fieldnum.FileDescriptorProto_MessageType { + case genid.FileDescriptorProto_MessageType_field_number: + if prevField != genid.FileDescriptorProto_MessageType_field_number { if numMessages > 0 { panic("non-contiguous repeated field") } posMessages = len(b0) - len(b) - n - m } numMessages++ - case fieldnum.FileDescriptorProto_Extension: - if prevField != fieldnum.FileDescriptorProto_Extension { + case genid.FileDescriptorProto_Extension_field_number: + if prevField != genid.FileDescriptorProto_Extension_field_number { if numExtensions > 0 { panic("non-contiguous repeated field") } posExtensions = len(b0) - len(b) - n - m } numExtensions++ - case fieldnum.FileDescriptorProto_Service: - if prevField != fieldnum.FileDescriptorProto_Service { + case genid.FileDescriptorProto_Service_field_number: + if prevField != genid.FileDescriptorProto_Service_field_number { if numServices > 0 { panic("non-contiguous repeated field") } @@ -233,9 +233,9 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Desc v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.EnumDescriptorProto_Name: + case genid.EnumDescriptorProto_Name_field_number: ed.L0.FullName = appendFullName(sb, pd.FullName(), v) - case fieldnum.EnumDescriptorProto_Value: + case genid.EnumDescriptorProto_Value_field_number: numValues++ } default: @@ -260,7 +260,7 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Desc v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.EnumDescriptorProto_Value: + case genid.EnumDescriptorProto_Value_field_number: ed.L2.Values.List[i].unmarshalFull(v, sb, pf, ed, i) i++ } @@ -288,33 +288,33 @@ func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.D v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.DescriptorProto_Name: + case genid.DescriptorProto_Name_field_number: md.L0.FullName = appendFullName(sb, pd.FullName(), v) - case fieldnum.DescriptorProto_EnumType: - if prevField != fieldnum.DescriptorProto_EnumType { + case genid.DescriptorProto_EnumType_field_number: + if prevField != genid.DescriptorProto_EnumType_field_number { if numEnums > 0 { panic("non-contiguous repeated field") } posEnums = len(b0) - len(b) - n - m } numEnums++ - case fieldnum.DescriptorProto_NestedType: - if prevField != fieldnum.DescriptorProto_NestedType { + case genid.DescriptorProto_NestedType_field_number: + if prevField != genid.DescriptorProto_NestedType_field_number { if numMessages > 0 { panic("non-contiguous repeated field") } posMessages = len(b0) - len(b) - n - m } numMessages++ - case fieldnum.DescriptorProto_Extension: - if prevField != fieldnum.DescriptorProto_Extension { + case genid.DescriptorProto_Extension_field_number: + if prevField != genid.DescriptorProto_Extension_field_number { if numExtensions > 0 { panic("non-contiguous repeated field") } posExtensions = len(b0) - len(b) - n - m } numExtensions++ - case fieldnum.DescriptorProto_Options: + case genid.DescriptorProto_Options_field_number: md.unmarshalSeedOptions(v) } prevField = num @@ -375,9 +375,9 @@ func (md *Message) unmarshalSeedOptions(b []byte) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.MessageOptions_MapEntry: + case genid.MessageOptions_MapEntry_field_number: md.L1.IsMapEntry = protowire.DecodeBool(v) - case fieldnum.MessageOptions_MessageSetWireFormat: + case genid.MessageOptions_MessageSetWireFormat_field_number: md.L1.IsMessageSet = protowire.DecodeBool(v) } default: @@ -400,20 +400,20 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.FieldDescriptorProto_Number: + case genid.FieldDescriptorProto_Number_field_number: xd.L1.Number = pref.FieldNumber(v) - case fieldnum.FieldDescriptorProto_Label: + case genid.FieldDescriptorProto_Label_field_number: xd.L1.Cardinality = pref.Cardinality(v) - case fieldnum.FieldDescriptorProto_Type: + case genid.FieldDescriptorProto_Type_field_number: xd.L1.Kind = pref.Kind(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.FieldDescriptorProto_Name: + case genid.FieldDescriptorProto_Name_field_number: xd.L0.FullName = appendFullName(sb, pd.FullName(), v) - case fieldnum.FieldDescriptorProto_Extendee: + case genid.FieldDescriptorProto_Extendee_field_number: xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v)) } default: @@ -436,7 +436,7 @@ func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.D v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.ServiceDescriptorProto_Name: + case genid.ServiceDescriptorProto_Name_field_number: sd.L0.FullName = appendFullName(sb, pd.FullName(), v) } default: diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index bc215944a32..e672233e77e 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/descopts" - "google.golang.org/protobuf/internal/fieldnum" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" pref "google.golang.org/protobuf/reflect/protoreflect" @@ -143,35 +143,35 @@ func (fd *File) unmarshalFull(b []byte) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.FileDescriptorProto_PublicDependency: + case genid.FileDescriptorProto_PublicDependency_field_number: fd.L2.Imports[v].IsPublic = true - case fieldnum.FileDescriptorProto_WeakDependency: + case genid.FileDescriptorProto_WeakDependency_field_number: fd.L2.Imports[v].IsWeak = true } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.FileDescriptorProto_Dependency: + case genid.FileDescriptorProto_Dependency_field_number: path := sb.MakeString(v) imp, _ := fd.builder.FileRegistry.FindFileByPath(path) if imp == nil { imp = PlaceholderFile(path) } fd.L2.Imports = append(fd.L2.Imports, pref.FileImport{FileDescriptor: imp}) - case fieldnum.FileDescriptorProto_EnumType: + case genid.FileDescriptorProto_EnumType_field_number: fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb) enumIdx++ - case fieldnum.FileDescriptorProto_MessageType: + case genid.FileDescriptorProto_MessageType_field_number: fd.L1.Messages.List[messageIdx].unmarshalFull(v, sb) messageIdx++ - case fieldnum.FileDescriptorProto_Extension: + case genid.FileDescriptorProto_Extension_field_number: fd.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb) extensionIdx++ - case fieldnum.FileDescriptorProto_Service: + case genid.FileDescriptorProto_Service_field_number: fd.L1.Services.List[serviceIdx].unmarshalFull(v, sb) serviceIdx++ - case fieldnum.FileDescriptorProto_Options: + case genid.FileDescriptorProto_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: @@ -196,13 +196,13 @@ func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.EnumDescriptorProto_Value: + case genid.EnumDescriptorProto_Value_field_number: rawValues = append(rawValues, v) - case fieldnum.EnumDescriptorProto_ReservedName: + case genid.EnumDescriptorProto_ReservedName_field_number: ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) - case fieldnum.EnumDescriptorProto_ReservedRange: + case genid.EnumDescriptorProto_ReservedRange_field_number: ed.L2.ReservedRanges.List = append(ed.L2.ReservedRanges.List, unmarshalEnumReservedRange(v)) - case fieldnum.EnumDescriptorProto_Options: + case genid.EnumDescriptorProto_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: @@ -228,9 +228,9 @@ func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.EnumDescriptorProto_EnumReservedRange_Start: + case genid.EnumDescriptorProto_EnumReservedRange_Start_field_number: r[0] = pref.EnumNumber(v) - case fieldnum.EnumDescriptorProto_EnumReservedRange_End: + case genid.EnumDescriptorProto_EnumReservedRange_End_field_number: r[1] = pref.EnumNumber(v) } default: @@ -255,17 +255,17 @@ func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.EnumValueDescriptorProto_Number: + case genid.EnumValueDescriptorProto_Number_field_number: vd.L1.Number = pref.EnumNumber(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.EnumValueDescriptorProto_Name: + case genid.EnumValueDescriptorProto_Name_field_number: // NOTE: Enum values are in the same scope as the enum parent. vd.L0.FullName = appendFullName(sb, pd.Parent().FullName(), v) - case fieldnum.EnumValueDescriptorProto_Options: + case genid.EnumValueDescriptorProto_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: @@ -289,29 +289,29 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.DescriptorProto_Field: + case genid.DescriptorProto_Field_field_number: rawFields = append(rawFields, v) - case fieldnum.DescriptorProto_OneofDecl: + case genid.DescriptorProto_OneofDecl_field_number: rawOneofs = append(rawOneofs, v) - case fieldnum.DescriptorProto_ReservedName: + case genid.DescriptorProto_ReservedName_field_number: md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) - case fieldnum.DescriptorProto_ReservedRange: + case genid.DescriptorProto_ReservedRange_field_number: md.L2.ReservedRanges.List = append(md.L2.ReservedRanges.List, unmarshalMessageReservedRange(v)) - case fieldnum.DescriptorProto_ExtensionRange: + case genid.DescriptorProto_ExtensionRange_field_number: r, rawOptions := unmarshalMessageExtensionRange(v) opts := md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.ExtensionRange, rawOptions) md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, r) md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, opts) - case fieldnum.DescriptorProto_EnumType: + case genid.DescriptorProto_EnumType_field_number: md.L1.Enums.List[enumIdx].unmarshalFull(v, sb) enumIdx++ - case fieldnum.DescriptorProto_NestedType: + case genid.DescriptorProto_NestedType_field_number: md.L1.Messages.List[messageIdx].unmarshalFull(v, sb) messageIdx++ - case fieldnum.DescriptorProto_Extension: + case genid.DescriptorProto_Extension_field_number: md.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb) extensionIdx++ - case fieldnum.DescriptorProto_Options: + case genid.DescriptorProto_Options_field_number: md.unmarshalOptions(v) rawOptions = appendOptions(rawOptions, v) } @@ -347,9 +347,9 @@ func (md *Message) unmarshalOptions(b []byte) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.MessageOptions_MapEntry: + case genid.MessageOptions_MapEntry_field_number: md.L1.IsMapEntry = protowire.DecodeBool(v) - case fieldnum.MessageOptions_MessageSetWireFormat: + case genid.MessageOptions_MessageSetWireFormat_field_number: md.L1.IsMessageSet = protowire.DecodeBool(v) } default: @@ -368,9 +368,9 @@ func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.DescriptorProto_ReservedRange_Start: + case genid.DescriptorProto_ReservedRange_Start_field_number: r[0] = pref.FieldNumber(v) - case fieldnum.DescriptorProto_ReservedRange_End: + case genid.DescriptorProto_ReservedRange_End_field_number: r[1] = pref.FieldNumber(v) } default: @@ -390,16 +390,16 @@ func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.DescriptorProto_ExtensionRange_Start: + case genid.DescriptorProto_ExtensionRange_Start_field_number: r[0] = pref.FieldNumber(v) - case fieldnum.DescriptorProto_ExtensionRange_End: + case genid.DescriptorProto_ExtensionRange_End_field_number: r[1] = pref.FieldNumber(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.DescriptorProto_ExtensionRange_Options: + case genid.DescriptorProto_ExtensionRange_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: @@ -425,13 +425,13 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.FieldDescriptorProto_Number: + case genid.FieldDescriptorProto_Number_field_number: fd.L1.Number = pref.FieldNumber(v) - case fieldnum.FieldDescriptorProto_Label: + case genid.FieldDescriptorProto_Label_field_number: fd.L1.Cardinality = pref.Cardinality(v) - case fieldnum.FieldDescriptorProto_Type: + case genid.FieldDescriptorProto_Type_field_number: fd.L1.Kind = pref.Kind(v) - case fieldnum.FieldDescriptorProto_OneofIndex: + case genid.FieldDescriptorProto_OneofIndex_field_number: // In Message.unmarshalFull, we allocate slices for both // the field and oneof descriptors before unmarshaling either // of them. This ensures pointers to slice elements are stable. @@ -441,22 +441,22 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des panic("oneof type already set") } fd.L1.ContainingOneof = od - case fieldnum.FieldDescriptorProto_Proto3Optional: + case genid.FieldDescriptorProto_Proto3Optional_field_number: fd.L1.IsProto3Optional = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.FieldDescriptorProto_Name: + case genid.FieldDescriptorProto_Name_field_number: fd.L0.FullName = appendFullName(sb, pd.FullName(), v) - case fieldnum.FieldDescriptorProto_JsonName: + case genid.FieldDescriptorProto_JsonName_field_number: fd.L1.JSONName.Init(sb.MakeString(v)) - case fieldnum.FieldDescriptorProto_DefaultValue: + case genid.FieldDescriptorProto_DefaultValue_field_number: fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages - case fieldnum.FieldDescriptorProto_TypeName: + case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v - case fieldnum.FieldDescriptorProto_Options: + case genid.FieldDescriptorProto_Options_field_number: fd.unmarshalOptions(v) rawOptions = appendOptions(rawOptions, v) } @@ -488,10 +488,10 @@ func (fd *Field) unmarshalOptions(b []byte) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.FieldOptions_Packed: + case genid.FieldOptions_Packed_field_number: fd.L1.HasPacked = true fd.L1.IsPacked = protowire.DecodeBool(v) - case fieldnum.FieldOptions_Weak: + case genid.FieldOptions_Weak_field_number: fd.L1.IsWeak = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: fd.L1.HasEnforceUTF8 = true @@ -518,9 +518,9 @@ func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.OneofDescriptorProto_Name: + case genid.OneofDescriptorProto_Name_field_number: od.L0.FullName = appendFullName(sb, pd.FullName(), v) - case fieldnum.OneofDescriptorProto_Options: + case genid.OneofDescriptorProto_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: @@ -543,20 +543,20 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.FieldDescriptorProto_Proto3Optional: + case genid.FieldDescriptorProto_Proto3Optional_field_number: xd.L2.IsProto3Optional = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.FieldDescriptorProto_JsonName: + case genid.FieldDescriptorProto_JsonName_field_number: xd.L2.JSONName.Init(sb.MakeString(v)) - case fieldnum.FieldDescriptorProto_DefaultValue: + case genid.FieldDescriptorProto_DefaultValue_field_number: xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions - case fieldnum.FieldDescriptorProto_TypeName: + case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v - case fieldnum.FieldDescriptorProto_Options: + case genid.FieldDescriptorProto_Options_field_number: xd.unmarshalOptions(v) rawOptions = appendOptions(rawOptions, v) } @@ -586,7 +586,7 @@ func (xd *Extension) unmarshalOptions(b []byte) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.FieldOptions_Packed: + case genid.FieldOptions_Packed_field_number: xd.L2.IsPacked = protowire.DecodeBool(v) } default: @@ -608,9 +608,9 @@ func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.ServiceDescriptorProto_Method: + case genid.ServiceDescriptorProto_Method_field_number: rawMethods = append(rawMethods, v) - case fieldnum.ServiceDescriptorProto_Options: + case genid.ServiceDescriptorProto_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: @@ -641,22 +641,22 @@ func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.De v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.MethodDescriptorProto_ClientStreaming: + case genid.MethodDescriptorProto_ClientStreaming_field_number: md.L1.IsStreamingClient = protowire.DecodeBool(v) - case fieldnum.MethodDescriptorProto_ServerStreaming: + case genid.MethodDescriptorProto_ServerStreaming_field_number: md.L1.IsStreamingServer = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.MethodDescriptorProto_Name: + case genid.MethodDescriptorProto_Name_field_number: md.L0.FullName = appendFullName(sb, pd.FullName(), v) - case fieldnum.MethodDescriptorProto_InputType: + case genid.MethodDescriptorProto_InputType_field_number: md.L1.Input = PlaceholderMessage(makeFullName(sb, v)) - case fieldnum.MethodDescriptorProto_OutputType: + case genid.MethodDescriptorProto_OutputType_field_number: md.L1.Output = PlaceholderMessage(makeFullName(sb, v)) - case fieldnum.MethodDescriptorProto_Options: + case genid.MethodDescriptorProto_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go index 1b7089b6434..c876cd34d70 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go @@ -6,7 +6,6 @@ package filedesc import ( "fmt" - "math" "sort" "sync" @@ -185,10 +184,7 @@ func (p *FieldRanges) CheckValid(isMessageSet bool) error { // Unlike the FieldNumber.IsValid method, it allows ranges that cover the // reserved number range. func isValidFieldNumber(n protoreflect.FieldNumber, isMessageSet bool) bool { - if isMessageSet { - return protowire.MinValidNumber <= n && n <= math.MaxInt32 - } - return protowire.MinValidNumber <= n && n <= protowire.MaxValidNumber + return protowire.MinValidNumber <= n && (n <= protowire.MaxValidNumber || isMessageSet) } // CheckOverlap reports an error if p and q overlap. diff --git a/vendor/google.golang.org/protobuf/internal/genid/any_gen.go b/vendor/google.golang.org/protobuf/internal/genid/any_gen.go new file mode 100644 index 00000000000..e6f7d47ab6d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/any_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_any_proto = "google/protobuf/any.proto" + +// Names for google.protobuf.Any. +const ( + Any_message_name protoreflect.Name = "Any" + Any_message_fullname protoreflect.FullName = "google.protobuf.Any" +) + +// Field names for google.protobuf.Any. +const ( + Any_TypeUrl_field_name protoreflect.Name = "type_url" + Any_Value_field_name protoreflect.Name = "value" + + Any_TypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Any.type_url" + Any_Value_field_fullname protoreflect.FullName = "google.protobuf.Any.value" +) + +// Field numbers for google.protobuf.Any. +const ( + Any_TypeUrl_field_number protoreflect.FieldNumber = 1 + Any_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go new file mode 100644 index 00000000000..df8f9185013 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go @@ -0,0 +1,106 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_api_proto = "google/protobuf/api.proto" + +// Names for google.protobuf.Api. +const ( + Api_message_name protoreflect.Name = "Api" + Api_message_fullname protoreflect.FullName = "google.protobuf.Api" +) + +// Field names for google.protobuf.Api. +const ( + Api_Name_field_name protoreflect.Name = "name" + Api_Methods_field_name protoreflect.Name = "methods" + Api_Options_field_name protoreflect.Name = "options" + Api_Version_field_name protoreflect.Name = "version" + Api_SourceContext_field_name protoreflect.Name = "source_context" + Api_Mixins_field_name protoreflect.Name = "mixins" + Api_Syntax_field_name protoreflect.Name = "syntax" + + Api_Name_field_fullname protoreflect.FullName = "google.protobuf.Api.name" + Api_Methods_field_fullname protoreflect.FullName = "google.protobuf.Api.methods" + Api_Options_field_fullname protoreflect.FullName = "google.protobuf.Api.options" + Api_Version_field_fullname protoreflect.FullName = "google.protobuf.Api.version" + Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context" + Api_Mixins_field_fullname protoreflect.FullName = "google.protobuf.Api.mixins" + Api_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Api.syntax" +) + +// Field numbers for google.protobuf.Api. +const ( + Api_Name_field_number protoreflect.FieldNumber = 1 + Api_Methods_field_number protoreflect.FieldNumber = 2 + Api_Options_field_number protoreflect.FieldNumber = 3 + Api_Version_field_number protoreflect.FieldNumber = 4 + Api_SourceContext_field_number protoreflect.FieldNumber = 5 + Api_Mixins_field_number protoreflect.FieldNumber = 6 + Api_Syntax_field_number protoreflect.FieldNumber = 7 +) + +// Names for google.protobuf.Method. +const ( + Method_message_name protoreflect.Name = "Method" + Method_message_fullname protoreflect.FullName = "google.protobuf.Method" +) + +// Field names for google.protobuf.Method. +const ( + Method_Name_field_name protoreflect.Name = "name" + Method_RequestTypeUrl_field_name protoreflect.Name = "request_type_url" + Method_RequestStreaming_field_name protoreflect.Name = "request_streaming" + Method_ResponseTypeUrl_field_name protoreflect.Name = "response_type_url" + Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming" + Method_Options_field_name protoreflect.Name = "options" + Method_Syntax_field_name protoreflect.Name = "syntax" + + Method_Name_field_fullname protoreflect.FullName = "google.protobuf.Method.name" + Method_RequestTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.request_type_url" + Method_RequestStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.request_streaming" + Method_ResponseTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.response_type_url" + Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming" + Method_Options_field_fullname protoreflect.FullName = "google.protobuf.Method.options" + Method_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Method.syntax" +) + +// Field numbers for google.protobuf.Method. +const ( + Method_Name_field_number protoreflect.FieldNumber = 1 + Method_RequestTypeUrl_field_number protoreflect.FieldNumber = 2 + Method_RequestStreaming_field_number protoreflect.FieldNumber = 3 + Method_ResponseTypeUrl_field_number protoreflect.FieldNumber = 4 + Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5 + Method_Options_field_number protoreflect.FieldNumber = 6 + Method_Syntax_field_number protoreflect.FieldNumber = 7 +) + +// Names for google.protobuf.Mixin. +const ( + Mixin_message_name protoreflect.Name = "Mixin" + Mixin_message_fullname protoreflect.FullName = "google.protobuf.Mixin" +) + +// Field names for google.protobuf.Mixin. +const ( + Mixin_Name_field_name protoreflect.Name = "name" + Mixin_Root_field_name protoreflect.Name = "root" + + Mixin_Name_field_fullname protoreflect.FullName = "google.protobuf.Mixin.name" + Mixin_Root_field_fullname protoreflect.FullName = "google.protobuf.Mixin.root" +) + +// Field numbers for google.protobuf.Mixin. +const ( + Mixin_Name_field_number protoreflect.FieldNumber = 1 + Mixin_Root_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go new file mode 100644 index 00000000000..e3cdf1c2059 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -0,0 +1,829 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto" + +// Names for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" + FileDescriptorSet_message_fullname protoreflect.FullName = "google.protobuf.FileDescriptorSet" +) + +// Field names for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_File_field_name protoreflect.Name = "file" + + FileDescriptorSet_File_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorSet.file" +) + +// Field numbers for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_File_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_message_name protoreflect.Name = "FileDescriptorProto" + FileDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto" +) + +// Field names for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_Name_field_name protoreflect.Name = "name" + FileDescriptorProto_Package_field_name protoreflect.Name = "package" + FileDescriptorProto_Dependency_field_name protoreflect.Name = "dependency" + FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency" + FileDescriptorProto_WeakDependency_field_name protoreflect.Name = "weak_dependency" + FileDescriptorProto_MessageType_field_name protoreflect.Name = "message_type" + FileDescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" + FileDescriptorProto_Service_field_name protoreflect.Name = "service" + FileDescriptorProto_Extension_field_name protoreflect.Name = "extension" + FileDescriptorProto_Options_field_name protoreflect.Name = "options" + FileDescriptorProto_SourceCodeInfo_field_name protoreflect.Name = "source_code_info" + FileDescriptorProto_Syntax_field_name protoreflect.Name = "syntax" + + FileDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.name" + FileDescriptorProto_Package_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.package" + FileDescriptorProto_Dependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency" + FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency" + FileDescriptorProto_WeakDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency" + FileDescriptorProto_MessageType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type" + FileDescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type" + FileDescriptorProto_Service_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.service" + FileDescriptorProto_Extension_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.extension" + FileDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.options" + FileDescriptorProto_SourceCodeInfo_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.source_code_info" + FileDescriptorProto_Syntax_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.syntax" +) + +// Field numbers for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + FileDescriptorProto_Package_field_number protoreflect.FieldNumber = 2 + FileDescriptorProto_Dependency_field_number protoreflect.FieldNumber = 3 + FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10 + FileDescriptorProto_WeakDependency_field_number protoreflect.FieldNumber = 11 + FileDescriptorProto_MessageType_field_number protoreflect.FieldNumber = 4 + FileDescriptorProto_EnumType_field_number protoreflect.FieldNumber = 5 + FileDescriptorProto_Service_field_number protoreflect.FieldNumber = 6 + FileDescriptorProto_Extension_field_number protoreflect.FieldNumber = 7 + FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 + FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 + FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 +) + +// Names for google.protobuf.DescriptorProto. +const ( + DescriptorProto_message_name protoreflect.Name = "DescriptorProto" + DescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto" +) + +// Field names for google.protobuf.DescriptorProto. +const ( + DescriptorProto_Name_field_name protoreflect.Name = "name" + DescriptorProto_Field_field_name protoreflect.Name = "field" + DescriptorProto_Extension_field_name protoreflect.Name = "extension" + DescriptorProto_NestedType_field_name protoreflect.Name = "nested_type" + DescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" + DescriptorProto_ExtensionRange_field_name protoreflect.Name = "extension_range" + DescriptorProto_OneofDecl_field_name protoreflect.Name = "oneof_decl" + DescriptorProto_Options_field_name protoreflect.Name = "options" + DescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" + DescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + + DescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.name" + DescriptorProto_Field_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.field" + DescriptorProto_Extension_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.extension" + DescriptorProto_NestedType_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.nested_type" + DescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.enum_type" + DescriptorProto_ExtensionRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.extension_range" + DescriptorProto_OneofDecl_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.oneof_decl" + DescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.options" + DescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range" + DescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name" +) + +// Field numbers for google.protobuf.DescriptorProto. +const ( + DescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + DescriptorProto_Field_field_number protoreflect.FieldNumber = 2 + DescriptorProto_Extension_field_number protoreflect.FieldNumber = 6 + DescriptorProto_NestedType_field_number protoreflect.FieldNumber = 3 + DescriptorProto_EnumType_field_number protoreflect.FieldNumber = 4 + DescriptorProto_ExtensionRange_field_number protoreflect.FieldNumber = 5 + DescriptorProto_OneofDecl_field_number protoreflect.FieldNumber = 8 + DescriptorProto_Options_field_number protoreflect.FieldNumber = 7 + DescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 9 + DescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 10 +) + +// Names for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_message_name protoreflect.Name = "ExtensionRange" + DescriptorProto_ExtensionRange_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange" +) + +// Field names for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_Start_field_name protoreflect.Name = "start" + DescriptorProto_ExtensionRange_End_field_name protoreflect.Name = "end" + DescriptorProto_ExtensionRange_Options_field_name protoreflect.Name = "options" + + DescriptorProto_ExtensionRange_Start_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.start" + DescriptorProto_ExtensionRange_End_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.end" + DescriptorProto_ExtensionRange_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.options" +) + +// Field numbers for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_Start_field_number protoreflect.FieldNumber = 1 + DescriptorProto_ExtensionRange_End_field_number protoreflect.FieldNumber = 2 + DescriptorProto_ExtensionRange_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_message_name protoreflect.Name = "ReservedRange" + DescriptorProto_ReservedRange_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange" +) + +// Field names for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_Start_field_name protoreflect.Name = "start" + DescriptorProto_ReservedRange_End_field_name protoreflect.Name = "end" + + DescriptorProto_ReservedRange_Start_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange.start" + DescriptorProto_ReservedRange_End_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange.end" +) + +// Field numbers for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_Start_field_number protoreflect.FieldNumber = 1 + DescriptorProto_ReservedRange_End_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_message_name protoreflect.Name = "ExtensionRangeOptions" + ExtensionRangeOptions_message_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions" +) + +// Field names for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_message_name protoreflect.Name = "FieldDescriptorProto" + FieldDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto" +) + +// Field names for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_Name_field_name protoreflect.Name = "name" + FieldDescriptorProto_Number_field_name protoreflect.Name = "number" + FieldDescriptorProto_Label_field_name protoreflect.Name = "label" + FieldDescriptorProto_Type_field_name protoreflect.Name = "type" + FieldDescriptorProto_TypeName_field_name protoreflect.Name = "type_name" + FieldDescriptorProto_Extendee_field_name protoreflect.Name = "extendee" + FieldDescriptorProto_DefaultValue_field_name protoreflect.Name = "default_value" + FieldDescriptorProto_OneofIndex_field_name protoreflect.Name = "oneof_index" + FieldDescriptorProto_JsonName_field_name protoreflect.Name = "json_name" + FieldDescriptorProto_Options_field_name protoreflect.Name = "options" + FieldDescriptorProto_Proto3Optional_field_name protoreflect.Name = "proto3_optional" + + FieldDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.name" + FieldDescriptorProto_Number_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.number" + FieldDescriptorProto_Label_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.label" + FieldDescriptorProto_Type_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.type" + FieldDescriptorProto_TypeName_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.type_name" + FieldDescriptorProto_Extendee_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.extendee" + FieldDescriptorProto_DefaultValue_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.default_value" + FieldDescriptorProto_OneofIndex_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.oneof_index" + FieldDescriptorProto_JsonName_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.json_name" + FieldDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.options" + FieldDescriptorProto_Proto3Optional_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.proto3_optional" +) + +// Field numbers for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + FieldDescriptorProto_Number_field_number protoreflect.FieldNumber = 3 + FieldDescriptorProto_Label_field_number protoreflect.FieldNumber = 4 + FieldDescriptorProto_Type_field_number protoreflect.FieldNumber = 5 + FieldDescriptorProto_TypeName_field_number protoreflect.FieldNumber = 6 + FieldDescriptorProto_Extendee_field_number protoreflect.FieldNumber = 2 + FieldDescriptorProto_DefaultValue_field_number protoreflect.FieldNumber = 7 + FieldDescriptorProto_OneofIndex_field_number protoreflect.FieldNumber = 9 + FieldDescriptorProto_JsonName_field_number protoreflect.FieldNumber = 10 + FieldDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 + FieldDescriptorProto_Proto3Optional_field_number protoreflect.FieldNumber = 17 +) + +// Full and short names for google.protobuf.FieldDescriptorProto.Type. +const ( + FieldDescriptorProto_Type_enum_fullname = "google.protobuf.FieldDescriptorProto.Type" + FieldDescriptorProto_Type_enum_name = "Type" +) + +// Full and short names for google.protobuf.FieldDescriptorProto.Label. +const ( + FieldDescriptorProto_Label_enum_fullname = "google.protobuf.FieldDescriptorProto.Label" + FieldDescriptorProto_Label_enum_name = "Label" +) + +// Names for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_message_name protoreflect.Name = "OneofDescriptorProto" + OneofDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto" +) + +// Field names for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_Name_field_name protoreflect.Name = "name" + OneofDescriptorProto_Options_field_name protoreflect.Name = "options" + + OneofDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto.name" + OneofDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto.options" +) + +// Field numbers for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + OneofDescriptorProto_Options_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_message_name protoreflect.Name = "EnumDescriptorProto" + EnumDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto" +) + +// Field names for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_Name_field_name protoreflect.Name = "name" + EnumDescriptorProto_Value_field_name protoreflect.Name = "value" + EnumDescriptorProto_Options_field_name protoreflect.Name = "options" + EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" + EnumDescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + + EnumDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name" + EnumDescriptorProto_Value_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value" + EnumDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options" + EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range" + EnumDescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name" +) + +// Field numbers for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + EnumDescriptorProto_Value_field_number protoreflect.FieldNumber = 2 + EnumDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 + EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4 + EnumDescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_message_name protoreflect.Name = "EnumReservedRange" + EnumDescriptorProto_EnumReservedRange_message_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange" +) + +// Field names for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_Start_field_name protoreflect.Name = "start" + EnumDescriptorProto_EnumReservedRange_End_field_name protoreflect.Name = "end" + + EnumDescriptorProto_EnumReservedRange_Start_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange.start" + EnumDescriptorProto_EnumReservedRange_End_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange.end" +) + +// Field numbers for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_Start_field_number protoreflect.FieldNumber = 1 + EnumDescriptorProto_EnumReservedRange_End_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_message_name protoreflect.Name = "EnumValueDescriptorProto" + EnumValueDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto" +) + +// Field names for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_Name_field_name protoreflect.Name = "name" + EnumValueDescriptorProto_Number_field_name protoreflect.Name = "number" + EnumValueDescriptorProto_Options_field_name protoreflect.Name = "options" + + EnumValueDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.name" + EnumValueDescriptorProto_Number_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.number" + EnumValueDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.options" +) + +// Field numbers for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + EnumValueDescriptorProto_Number_field_number protoreflect.FieldNumber = 2 + EnumValueDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_message_name protoreflect.Name = "ServiceDescriptorProto" + ServiceDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto" +) + +// Field names for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_Name_field_name protoreflect.Name = "name" + ServiceDescriptorProto_Method_field_name protoreflect.Name = "method" + ServiceDescriptorProto_Options_field_name protoreflect.Name = "options" + + ServiceDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.name" + ServiceDescriptorProto_Method_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.method" + ServiceDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.options" +) + +// Field numbers for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + ServiceDescriptorProto_Method_field_number protoreflect.FieldNumber = 2 + ServiceDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_message_name protoreflect.Name = "MethodDescriptorProto" + MethodDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto" +) + +// Field names for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_Name_field_name protoreflect.Name = "name" + MethodDescriptorProto_InputType_field_name protoreflect.Name = "input_type" + MethodDescriptorProto_OutputType_field_name protoreflect.Name = "output_type" + MethodDescriptorProto_Options_field_name protoreflect.Name = "options" + MethodDescriptorProto_ClientStreaming_field_name protoreflect.Name = "client_streaming" + MethodDescriptorProto_ServerStreaming_field_name protoreflect.Name = "server_streaming" + + MethodDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.name" + MethodDescriptorProto_InputType_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.input_type" + MethodDescriptorProto_OutputType_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.output_type" + MethodDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.options" + MethodDescriptorProto_ClientStreaming_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.client_streaming" + MethodDescriptorProto_ServerStreaming_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.server_streaming" +) + +// Field numbers for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + MethodDescriptorProto_InputType_field_number protoreflect.FieldNumber = 2 + MethodDescriptorProto_OutputType_field_number protoreflect.FieldNumber = 3 + MethodDescriptorProto_Options_field_number protoreflect.FieldNumber = 4 + MethodDescriptorProto_ClientStreaming_field_number protoreflect.FieldNumber = 5 + MethodDescriptorProto_ServerStreaming_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.FileOptions. +const ( + FileOptions_message_name protoreflect.Name = "FileOptions" + FileOptions_message_fullname protoreflect.FullName = "google.protobuf.FileOptions" +) + +// Field names for google.protobuf.FileOptions. +const ( + FileOptions_JavaPackage_field_name protoreflect.Name = "java_package" + FileOptions_JavaOuterClassname_field_name protoreflect.Name = "java_outer_classname" + FileOptions_JavaMultipleFiles_field_name protoreflect.Name = "java_multiple_files" + FileOptions_JavaGenerateEqualsAndHash_field_name protoreflect.Name = "java_generate_equals_and_hash" + FileOptions_JavaStringCheckUtf8_field_name protoreflect.Name = "java_string_check_utf8" + FileOptions_OptimizeFor_field_name protoreflect.Name = "optimize_for" + FileOptions_GoPackage_field_name protoreflect.Name = "go_package" + FileOptions_CcGenericServices_field_name protoreflect.Name = "cc_generic_services" + FileOptions_JavaGenericServices_field_name protoreflect.Name = "java_generic_services" + FileOptions_PyGenericServices_field_name protoreflect.Name = "py_generic_services" + FileOptions_PhpGenericServices_field_name protoreflect.Name = "php_generic_services" + FileOptions_Deprecated_field_name protoreflect.Name = "deprecated" + FileOptions_CcEnableArenas_field_name protoreflect.Name = "cc_enable_arenas" + FileOptions_ObjcClassPrefix_field_name protoreflect.Name = "objc_class_prefix" + FileOptions_CsharpNamespace_field_name protoreflect.Name = "csharp_namespace" + FileOptions_SwiftPrefix_field_name protoreflect.Name = "swift_prefix" + FileOptions_PhpClassPrefix_field_name protoreflect.Name = "php_class_prefix" + FileOptions_PhpNamespace_field_name protoreflect.Name = "php_namespace" + FileOptions_PhpMetadataNamespace_field_name protoreflect.Name = "php_metadata_namespace" + FileOptions_RubyPackage_field_name protoreflect.Name = "ruby_package" + FileOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + FileOptions_JavaPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_package" + FileOptions_JavaOuterClassname_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_outer_classname" + FileOptions_JavaMultipleFiles_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_multiple_files" + FileOptions_JavaGenerateEqualsAndHash_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generate_equals_and_hash" + FileOptions_JavaStringCheckUtf8_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_string_check_utf8" + FileOptions_OptimizeFor_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.optimize_for" + FileOptions_GoPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.go_package" + FileOptions_CcGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_generic_services" + FileOptions_JavaGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generic_services" + FileOptions_PyGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.py_generic_services" + FileOptions_PhpGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_generic_services" + FileOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.deprecated" + FileOptions_CcEnableArenas_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_enable_arenas" + FileOptions_ObjcClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.objc_class_prefix" + FileOptions_CsharpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.csharp_namespace" + FileOptions_SwiftPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.swift_prefix" + FileOptions_PhpClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_class_prefix" + FileOptions_PhpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_namespace" + FileOptions_PhpMetadataNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace" + FileOptions_RubyPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.ruby_package" + FileOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.FileOptions. +const ( + FileOptions_JavaPackage_field_number protoreflect.FieldNumber = 1 + FileOptions_JavaOuterClassname_field_number protoreflect.FieldNumber = 8 + FileOptions_JavaMultipleFiles_field_number protoreflect.FieldNumber = 10 + FileOptions_JavaGenerateEqualsAndHash_field_number protoreflect.FieldNumber = 20 + FileOptions_JavaStringCheckUtf8_field_number protoreflect.FieldNumber = 27 + FileOptions_OptimizeFor_field_number protoreflect.FieldNumber = 9 + FileOptions_GoPackage_field_number protoreflect.FieldNumber = 11 + FileOptions_CcGenericServices_field_number protoreflect.FieldNumber = 16 + FileOptions_JavaGenericServices_field_number protoreflect.FieldNumber = 17 + FileOptions_PyGenericServices_field_number protoreflect.FieldNumber = 18 + FileOptions_PhpGenericServices_field_number protoreflect.FieldNumber = 42 + FileOptions_Deprecated_field_number protoreflect.FieldNumber = 23 + FileOptions_CcEnableArenas_field_number protoreflect.FieldNumber = 31 + FileOptions_ObjcClassPrefix_field_number protoreflect.FieldNumber = 36 + FileOptions_CsharpNamespace_field_number protoreflect.FieldNumber = 37 + FileOptions_SwiftPrefix_field_number protoreflect.FieldNumber = 39 + FileOptions_PhpClassPrefix_field_number protoreflect.FieldNumber = 40 + FileOptions_PhpNamespace_field_number protoreflect.FieldNumber = 41 + FileOptions_PhpMetadataNamespace_field_number protoreflect.FieldNumber = 44 + FileOptions_RubyPackage_field_number protoreflect.FieldNumber = 45 + FileOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.FileOptions.OptimizeMode. +const ( + FileOptions_OptimizeMode_enum_fullname = "google.protobuf.FileOptions.OptimizeMode" + FileOptions_OptimizeMode_enum_name = "OptimizeMode" +) + +// Names for google.protobuf.MessageOptions. +const ( + MessageOptions_message_name protoreflect.Name = "MessageOptions" + MessageOptions_message_fullname protoreflect.FullName = "google.protobuf.MessageOptions" +) + +// Field names for google.protobuf.MessageOptions. +const ( + MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" + MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" + MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" + MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" + MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.MessageOptions. +const ( + MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1 + MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2 + MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 + MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.FieldOptions. +const ( + FieldOptions_message_name protoreflect.Name = "FieldOptions" + FieldOptions_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions" +) + +// Field names for google.protobuf.FieldOptions. +const ( + FieldOptions_Ctype_field_name protoreflect.Name = "ctype" + FieldOptions_Packed_field_name protoreflect.Name = "packed" + FieldOptions_Jstype_field_name protoreflect.Name = "jstype" + FieldOptions_Lazy_field_name protoreflect.Name = "lazy" + FieldOptions_Deprecated_field_name protoreflect.Name = "deprecated" + FieldOptions_Weak_field_name protoreflect.Name = "weak" + FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" + FieldOptions_Packed_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.packed" + FieldOptions_Jstype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.jstype" + FieldOptions_Lazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.lazy" + FieldOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.deprecated" + FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" + FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.FieldOptions. +const ( + FieldOptions_Ctype_field_number protoreflect.FieldNumber = 1 + FieldOptions_Packed_field_number protoreflect.FieldNumber = 2 + FieldOptions_Jstype_field_number protoreflect.FieldNumber = 6 + FieldOptions_Lazy_field_number protoreflect.FieldNumber = 5 + FieldOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 + FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.FieldOptions.CType. +const ( + FieldOptions_CType_enum_fullname = "google.protobuf.FieldOptions.CType" + FieldOptions_CType_enum_name = "CType" +) + +// Full and short names for google.protobuf.FieldOptions.JSType. +const ( + FieldOptions_JSType_enum_fullname = "google.protobuf.FieldOptions.JSType" + FieldOptions_JSType_enum_name = "JSType" +) + +// Names for google.protobuf.OneofOptions. +const ( + OneofOptions_message_name protoreflect.Name = "OneofOptions" + OneofOptions_message_fullname protoreflect.FullName = "google.protobuf.OneofOptions" +) + +// Field names for google.protobuf.OneofOptions. +const ( + OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.OneofOptions. +const ( + OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.EnumOptions. +const ( + EnumOptions_message_name protoreflect.Name = "EnumOptions" + EnumOptions_message_fullname protoreflect.FullName = "google.protobuf.EnumOptions" +) + +// Field names for google.protobuf.EnumOptions. +const ( + EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" + EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" + EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" + EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.EnumOptions. +const ( + EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 + EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_message_name protoreflect.Name = "EnumValueOptions" + EnumValueOptions_message_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions" +) + +// Field names for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" + EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 + EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.ServiceOptions. +const ( + ServiceOptions_message_name protoreflect.Name = "ServiceOptions" + ServiceOptions_message_fullname protoreflect.FullName = "google.protobuf.ServiceOptions" +) + +// Field names for google.protobuf.ServiceOptions. +const ( + ServiceOptions_Deprecated_field_name protoreflect.Name = "deprecated" + ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + ServiceOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated" + ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.ServiceOptions. +const ( + ServiceOptions_Deprecated_field_number protoreflect.FieldNumber = 33 + ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.MethodOptions. +const ( + MethodOptions_message_name protoreflect.Name = "MethodOptions" + MethodOptions_message_fullname protoreflect.FullName = "google.protobuf.MethodOptions" +) + +// Field names for google.protobuf.MethodOptions. +const ( + MethodOptions_Deprecated_field_name protoreflect.Name = "deprecated" + MethodOptions_IdempotencyLevel_field_name protoreflect.Name = "idempotency_level" + MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + MethodOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.deprecated" + MethodOptions_IdempotencyLevel_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level" + MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.MethodOptions. +const ( + MethodOptions_Deprecated_field_number protoreflect.FieldNumber = 33 + MethodOptions_IdempotencyLevel_field_number protoreflect.FieldNumber = 34 + MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.MethodOptions.IdempotencyLevel. +const ( + MethodOptions_IdempotencyLevel_enum_fullname = "google.protobuf.MethodOptions.IdempotencyLevel" + MethodOptions_IdempotencyLevel_enum_name = "IdempotencyLevel" +) + +// Names for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_message_name protoreflect.Name = "UninterpretedOption" + UninterpretedOption_message_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption" +) + +// Field names for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_Name_field_name protoreflect.Name = "name" + UninterpretedOption_IdentifierValue_field_name protoreflect.Name = "identifier_value" + UninterpretedOption_PositiveIntValue_field_name protoreflect.Name = "positive_int_value" + UninterpretedOption_NegativeIntValue_field_name protoreflect.Name = "negative_int_value" + UninterpretedOption_DoubleValue_field_name protoreflect.Name = "double_value" + UninterpretedOption_StringValue_field_name protoreflect.Name = "string_value" + UninterpretedOption_AggregateValue_field_name protoreflect.Name = "aggregate_value" + + UninterpretedOption_Name_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.name" + UninterpretedOption_IdentifierValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.identifier_value" + UninterpretedOption_PositiveIntValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.positive_int_value" + UninterpretedOption_NegativeIntValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.negative_int_value" + UninterpretedOption_DoubleValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.double_value" + UninterpretedOption_StringValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.string_value" + UninterpretedOption_AggregateValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.aggregate_value" +) + +// Field numbers for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_Name_field_number protoreflect.FieldNumber = 2 + UninterpretedOption_IdentifierValue_field_number protoreflect.FieldNumber = 3 + UninterpretedOption_PositiveIntValue_field_number protoreflect.FieldNumber = 4 + UninterpretedOption_NegativeIntValue_field_number protoreflect.FieldNumber = 5 + UninterpretedOption_DoubleValue_field_number protoreflect.FieldNumber = 6 + UninterpretedOption_StringValue_field_number protoreflect.FieldNumber = 7 + UninterpretedOption_AggregateValue_field_number protoreflect.FieldNumber = 8 +) + +// Names for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_message_name protoreflect.Name = "NamePart" + UninterpretedOption_NamePart_message_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart" +) + +// Field names for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_NamePart_field_name protoreflect.Name = "name_part" + UninterpretedOption_NamePart_IsExtension_field_name protoreflect.Name = "is_extension" + + UninterpretedOption_NamePart_NamePart_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart.name_part" + UninterpretedOption_NamePart_IsExtension_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart.is_extension" +) + +// Field numbers for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_NamePart_field_number protoreflect.FieldNumber = 1 + UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_message_name protoreflect.Name = "SourceCodeInfo" + SourceCodeInfo_message_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo" +) + +// Field names for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_Location_field_name protoreflect.Name = "location" + + SourceCodeInfo_Location_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.location" +) + +// Field numbers for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_Location_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_message_name protoreflect.Name = "Location" + SourceCodeInfo_Location_message_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location" +) + +// Field names for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_Path_field_name protoreflect.Name = "path" + SourceCodeInfo_Location_Span_field_name protoreflect.Name = "span" + SourceCodeInfo_Location_LeadingComments_field_name protoreflect.Name = "leading_comments" + SourceCodeInfo_Location_TrailingComments_field_name protoreflect.Name = "trailing_comments" + SourceCodeInfo_Location_LeadingDetachedComments_field_name protoreflect.Name = "leading_detached_comments" + + SourceCodeInfo_Location_Path_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.path" + SourceCodeInfo_Location_Span_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.span" + SourceCodeInfo_Location_LeadingComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.leading_comments" + SourceCodeInfo_Location_TrailingComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.trailing_comments" + SourceCodeInfo_Location_LeadingDetachedComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.leading_detached_comments" +) + +// Field numbers for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_Path_field_number protoreflect.FieldNumber = 1 + SourceCodeInfo_Location_Span_field_number protoreflect.FieldNumber = 2 + SourceCodeInfo_Location_LeadingComments_field_number protoreflect.FieldNumber = 3 + SourceCodeInfo_Location_TrailingComments_field_number protoreflect.FieldNumber = 4 + SourceCodeInfo_Location_LeadingDetachedComments_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_message_name protoreflect.Name = "GeneratedCodeInfo" + GeneratedCodeInfo_message_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo" +) + +// Field names for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_Annotation_field_name protoreflect.Name = "annotation" + + GeneratedCodeInfo_Annotation_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.annotation" +) + +// Field numbers for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_Annotation_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_message_name protoreflect.Name = "Annotation" + GeneratedCodeInfo_Annotation_message_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation" +) + +// Field names for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_Path_field_name protoreflect.Name = "path" + GeneratedCodeInfo_Annotation_SourceFile_field_name protoreflect.Name = "source_file" + GeneratedCodeInfo_Annotation_Begin_field_name protoreflect.Name = "begin" + GeneratedCodeInfo_Annotation_End_field_name protoreflect.Name = "end" + + GeneratedCodeInfo_Annotation_Path_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.path" + GeneratedCodeInfo_Annotation_SourceFile_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.source_file" + GeneratedCodeInfo_Annotation_Begin_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.begin" + GeneratedCodeInfo_Annotation_End_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.end" +) + +// Field numbers for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_Path_field_number protoreflect.FieldNumber = 1 + GeneratedCodeInfo_Annotation_SourceFile_field_number protoreflect.FieldNumber = 2 + GeneratedCodeInfo_Annotation_Begin_field_number protoreflect.FieldNumber = 3 + GeneratedCodeInfo_Annotation_End_field_number protoreflect.FieldNumber = 4 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go new file mode 100644 index 00000000000..45ccd01211c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package genid contains constants for declarations in descriptor.proto +// and the well-known types. +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +const GoogleProtobuf_package protoreflect.FullName = "google.protobuf" diff --git a/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go b/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go new file mode 100644 index 00000000000..b070ef4fd69 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_duration_proto = "google/protobuf/duration.proto" + +// Names for google.protobuf.Duration. +const ( + Duration_message_name protoreflect.Name = "Duration" + Duration_message_fullname protoreflect.FullName = "google.protobuf.Duration" +) + +// Field names for google.protobuf.Duration. +const ( + Duration_Seconds_field_name protoreflect.Name = "seconds" + Duration_Nanos_field_name protoreflect.Name = "nanos" + + Duration_Seconds_field_fullname protoreflect.FullName = "google.protobuf.Duration.seconds" + Duration_Nanos_field_fullname protoreflect.FullName = "google.protobuf.Duration.nanos" +) + +// Field numbers for google.protobuf.Duration. +const ( + Duration_Seconds_field_number protoreflect.FieldNumber = 1 + Duration_Nanos_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go b/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go new file mode 100644 index 00000000000..762abb34a4e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go @@ -0,0 +1,19 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_empty_proto = "google/protobuf/empty.proto" + +// Names for google.protobuf.Empty. +const ( + Empty_message_name protoreflect.Name = "Empty" + Empty_message_fullname protoreflect.FullName = "google.protobuf.Empty" +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go b/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go new file mode 100644 index 00000000000..70bed453fee --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_field_mask_proto = "google/protobuf/field_mask.proto" + +// Names for google.protobuf.FieldMask. +const ( + FieldMask_message_name protoreflect.Name = "FieldMask" + FieldMask_message_fullname protoreflect.FullName = "google.protobuf.FieldMask" +) + +// Field names for google.protobuf.FieldMask. +const ( + FieldMask_Paths_field_name protoreflect.Name = "paths" + + FieldMask_Paths_field_fullname protoreflect.FullName = "google.protobuf.FieldMask.paths" +) + +// Field numbers for google.protobuf.FieldMask. +const ( + FieldMask_Paths_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/goname.go b/vendor/google.golang.org/protobuf/internal/genid/goname.go new file mode 100644 index 00000000000..693d2e9e1fe --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/goname.go @@ -0,0 +1,25 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +// Go names of implementation-specific struct fields in generated messages. +const ( + State_goname = "state" + + SizeCache_goname = "sizeCache" + SizeCacheA_goname = "XXX_sizecache" + + WeakFields_goname = "weakFields" + WeakFieldsA_goname = "XXX_weak" + + UnknownFields_goname = "unknownFields" + UnknownFieldsA_goname = "XXX_unrecognized" + + ExtensionFields_goname = "extensionFields" + ExtensionFieldsA_goname = "XXX_InternalExtensions" + ExtensionFieldsB_goname = "XXX_extensions" + + WeakFieldPrefix_goname = "XXX_weak_" +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go new file mode 100644 index 00000000000..8f9ea02ff2a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +// Generic field names and numbers for synthetic map entry messages. +const ( + MapEntry_Key_field_name protoreflect.Name = "key" + MapEntry_Value_field_name protoreflect.Name = "value" + + MapEntry_Key_field_number protoreflect.FieldNumber = 1 + MapEntry_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go b/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go new file mode 100644 index 00000000000..3e99ae16c84 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_source_context_proto = "google/protobuf/source_context.proto" + +// Names for google.protobuf.SourceContext. +const ( + SourceContext_message_name protoreflect.Name = "SourceContext" + SourceContext_message_fullname protoreflect.FullName = "google.protobuf.SourceContext" +) + +// Field names for google.protobuf.SourceContext. +const ( + SourceContext_FileName_field_name protoreflect.Name = "file_name" + + SourceContext_FileName_field_fullname protoreflect.FullName = "google.protobuf.SourceContext.file_name" +) + +// Field numbers for google.protobuf.SourceContext. +const ( + SourceContext_FileName_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go new file mode 100644 index 00000000000..1a38944b26e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go @@ -0,0 +1,116 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_struct_proto = "google/protobuf/struct.proto" + +// Full and short names for google.protobuf.NullValue. +const ( + NullValue_enum_fullname = "google.protobuf.NullValue" + NullValue_enum_name = "NullValue" +) + +// Names for google.protobuf.Struct. +const ( + Struct_message_name protoreflect.Name = "Struct" + Struct_message_fullname protoreflect.FullName = "google.protobuf.Struct" +) + +// Field names for google.protobuf.Struct. +const ( + Struct_Fields_field_name protoreflect.Name = "fields" + + Struct_Fields_field_fullname protoreflect.FullName = "google.protobuf.Struct.fields" +) + +// Field numbers for google.protobuf.Struct. +const ( + Struct_Fields_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_message_name protoreflect.Name = "FieldsEntry" + Struct_FieldsEntry_message_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry" +) + +// Field names for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_Key_field_name protoreflect.Name = "key" + Struct_FieldsEntry_Value_field_name protoreflect.Name = "value" + + Struct_FieldsEntry_Key_field_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry.key" + Struct_FieldsEntry_Value_field_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry.value" +) + +// Field numbers for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_Key_field_number protoreflect.FieldNumber = 1 + Struct_FieldsEntry_Value_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.Value. +const ( + Value_message_name protoreflect.Name = "Value" + Value_message_fullname protoreflect.FullName = "google.protobuf.Value" +) + +// Field names for google.protobuf.Value. +const ( + Value_NullValue_field_name protoreflect.Name = "null_value" + Value_NumberValue_field_name protoreflect.Name = "number_value" + Value_StringValue_field_name protoreflect.Name = "string_value" + Value_BoolValue_field_name protoreflect.Name = "bool_value" + Value_StructValue_field_name protoreflect.Name = "struct_value" + Value_ListValue_field_name protoreflect.Name = "list_value" + + Value_NullValue_field_fullname protoreflect.FullName = "google.protobuf.Value.null_value" + Value_NumberValue_field_fullname protoreflect.FullName = "google.protobuf.Value.number_value" + Value_StringValue_field_fullname protoreflect.FullName = "google.protobuf.Value.string_value" + Value_BoolValue_field_fullname protoreflect.FullName = "google.protobuf.Value.bool_value" + Value_StructValue_field_fullname protoreflect.FullName = "google.protobuf.Value.struct_value" + Value_ListValue_field_fullname protoreflect.FullName = "google.protobuf.Value.list_value" +) + +// Field numbers for google.protobuf.Value. +const ( + Value_NullValue_field_number protoreflect.FieldNumber = 1 + Value_NumberValue_field_number protoreflect.FieldNumber = 2 + Value_StringValue_field_number protoreflect.FieldNumber = 3 + Value_BoolValue_field_number protoreflect.FieldNumber = 4 + Value_StructValue_field_number protoreflect.FieldNumber = 5 + Value_ListValue_field_number protoreflect.FieldNumber = 6 +) + +// Oneof names for google.protobuf.Value. +const ( + Value_Kind_oneof_name protoreflect.Name = "kind" + + Value_Kind_oneof_fullname protoreflect.FullName = "google.protobuf.Value.kind" +) + +// Names for google.protobuf.ListValue. +const ( + ListValue_message_name protoreflect.Name = "ListValue" + ListValue_message_fullname protoreflect.FullName = "google.protobuf.ListValue" +) + +// Field names for google.protobuf.ListValue. +const ( + ListValue_Values_field_name protoreflect.Name = "values" + + ListValue_Values_field_fullname protoreflect.FullName = "google.protobuf.ListValue.values" +) + +// Field numbers for google.protobuf.ListValue. +const ( + ListValue_Values_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go b/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go new file mode 100644 index 00000000000..f5cd5634c2e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_timestamp_proto = "google/protobuf/timestamp.proto" + +// Names for google.protobuf.Timestamp. +const ( + Timestamp_message_name protoreflect.Name = "Timestamp" + Timestamp_message_fullname protoreflect.FullName = "google.protobuf.Timestamp" +) + +// Field names for google.protobuf.Timestamp. +const ( + Timestamp_Seconds_field_name protoreflect.Name = "seconds" + Timestamp_Nanos_field_name protoreflect.Name = "nanos" + + Timestamp_Seconds_field_fullname protoreflect.FullName = "google.protobuf.Timestamp.seconds" + Timestamp_Nanos_field_fullname protoreflect.FullName = "google.protobuf.Timestamp.nanos" +) + +// Field numbers for google.protobuf.Timestamp. +const ( + Timestamp_Seconds_field_number protoreflect.FieldNumber = 1 + Timestamp_Nanos_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go new file mode 100644 index 00000000000..3bc710138ad --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go @@ -0,0 +1,184 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_type_proto = "google/protobuf/type.proto" + +// Full and short names for google.protobuf.Syntax. +const ( + Syntax_enum_fullname = "google.protobuf.Syntax" + Syntax_enum_name = "Syntax" +) + +// Names for google.protobuf.Type. +const ( + Type_message_name protoreflect.Name = "Type" + Type_message_fullname protoreflect.FullName = "google.protobuf.Type" +) + +// Field names for google.protobuf.Type. +const ( + Type_Name_field_name protoreflect.Name = "name" + Type_Fields_field_name protoreflect.Name = "fields" + Type_Oneofs_field_name protoreflect.Name = "oneofs" + Type_Options_field_name protoreflect.Name = "options" + Type_SourceContext_field_name protoreflect.Name = "source_context" + Type_Syntax_field_name protoreflect.Name = "syntax" + + Type_Name_field_fullname protoreflect.FullName = "google.protobuf.Type.name" + Type_Fields_field_fullname protoreflect.FullName = "google.protobuf.Type.fields" + Type_Oneofs_field_fullname protoreflect.FullName = "google.protobuf.Type.oneofs" + Type_Options_field_fullname protoreflect.FullName = "google.protobuf.Type.options" + Type_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Type.source_context" + Type_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Type.syntax" +) + +// Field numbers for google.protobuf.Type. +const ( + Type_Name_field_number protoreflect.FieldNumber = 1 + Type_Fields_field_number protoreflect.FieldNumber = 2 + Type_Oneofs_field_number protoreflect.FieldNumber = 3 + Type_Options_field_number protoreflect.FieldNumber = 4 + Type_SourceContext_field_number protoreflect.FieldNumber = 5 + Type_Syntax_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.Field. +const ( + Field_message_name protoreflect.Name = "Field" + Field_message_fullname protoreflect.FullName = "google.protobuf.Field" +) + +// Field names for google.protobuf.Field. +const ( + Field_Kind_field_name protoreflect.Name = "kind" + Field_Cardinality_field_name protoreflect.Name = "cardinality" + Field_Number_field_name protoreflect.Name = "number" + Field_Name_field_name protoreflect.Name = "name" + Field_TypeUrl_field_name protoreflect.Name = "type_url" + Field_OneofIndex_field_name protoreflect.Name = "oneof_index" + Field_Packed_field_name protoreflect.Name = "packed" + Field_Options_field_name protoreflect.Name = "options" + Field_JsonName_field_name protoreflect.Name = "json_name" + Field_DefaultValue_field_name protoreflect.Name = "default_value" + + Field_Kind_field_fullname protoreflect.FullName = "google.protobuf.Field.kind" + Field_Cardinality_field_fullname protoreflect.FullName = "google.protobuf.Field.cardinality" + Field_Number_field_fullname protoreflect.FullName = "google.protobuf.Field.number" + Field_Name_field_fullname protoreflect.FullName = "google.protobuf.Field.name" + Field_TypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Field.type_url" + Field_OneofIndex_field_fullname protoreflect.FullName = "google.protobuf.Field.oneof_index" + Field_Packed_field_fullname protoreflect.FullName = "google.protobuf.Field.packed" + Field_Options_field_fullname protoreflect.FullName = "google.protobuf.Field.options" + Field_JsonName_field_fullname protoreflect.FullName = "google.protobuf.Field.json_name" + Field_DefaultValue_field_fullname protoreflect.FullName = "google.protobuf.Field.default_value" +) + +// Field numbers for google.protobuf.Field. +const ( + Field_Kind_field_number protoreflect.FieldNumber = 1 + Field_Cardinality_field_number protoreflect.FieldNumber = 2 + Field_Number_field_number protoreflect.FieldNumber = 3 + Field_Name_field_number protoreflect.FieldNumber = 4 + Field_TypeUrl_field_number protoreflect.FieldNumber = 6 + Field_OneofIndex_field_number protoreflect.FieldNumber = 7 + Field_Packed_field_number protoreflect.FieldNumber = 8 + Field_Options_field_number protoreflect.FieldNumber = 9 + Field_JsonName_field_number protoreflect.FieldNumber = 10 + Field_DefaultValue_field_number protoreflect.FieldNumber = 11 +) + +// Full and short names for google.protobuf.Field.Kind. +const ( + Field_Kind_enum_fullname = "google.protobuf.Field.Kind" + Field_Kind_enum_name = "Kind" +) + +// Full and short names for google.protobuf.Field.Cardinality. +const ( + Field_Cardinality_enum_fullname = "google.protobuf.Field.Cardinality" + Field_Cardinality_enum_name = "Cardinality" +) + +// Names for google.protobuf.Enum. +const ( + Enum_message_name protoreflect.Name = "Enum" + Enum_message_fullname protoreflect.FullName = "google.protobuf.Enum" +) + +// Field names for google.protobuf.Enum. +const ( + Enum_Name_field_name protoreflect.Name = "name" + Enum_Enumvalue_field_name protoreflect.Name = "enumvalue" + Enum_Options_field_name protoreflect.Name = "options" + Enum_SourceContext_field_name protoreflect.Name = "source_context" + Enum_Syntax_field_name protoreflect.Name = "syntax" + + Enum_Name_field_fullname protoreflect.FullName = "google.protobuf.Enum.name" + Enum_Enumvalue_field_fullname protoreflect.FullName = "google.protobuf.Enum.enumvalue" + Enum_Options_field_fullname protoreflect.FullName = "google.protobuf.Enum.options" + Enum_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Enum.source_context" + Enum_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Enum.syntax" +) + +// Field numbers for google.protobuf.Enum. +const ( + Enum_Name_field_number protoreflect.FieldNumber = 1 + Enum_Enumvalue_field_number protoreflect.FieldNumber = 2 + Enum_Options_field_number protoreflect.FieldNumber = 3 + Enum_SourceContext_field_number protoreflect.FieldNumber = 4 + Enum_Syntax_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.EnumValue. +const ( + EnumValue_message_name protoreflect.Name = "EnumValue" + EnumValue_message_fullname protoreflect.FullName = "google.protobuf.EnumValue" +) + +// Field names for google.protobuf.EnumValue. +const ( + EnumValue_Name_field_name protoreflect.Name = "name" + EnumValue_Number_field_name protoreflect.Name = "number" + EnumValue_Options_field_name protoreflect.Name = "options" + + EnumValue_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.name" + EnumValue_Number_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.number" + EnumValue_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.options" +) + +// Field numbers for google.protobuf.EnumValue. +const ( + EnumValue_Name_field_number protoreflect.FieldNumber = 1 + EnumValue_Number_field_number protoreflect.FieldNumber = 2 + EnumValue_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.Option. +const ( + Option_message_name protoreflect.Name = "Option" + Option_message_fullname protoreflect.FullName = "google.protobuf.Option" +) + +// Field names for google.protobuf.Option. +const ( + Option_Name_field_name protoreflect.Name = "name" + Option_Value_field_name protoreflect.Name = "value" + + Option_Name_field_fullname protoreflect.FullName = "google.protobuf.Option.name" + Option_Value_field_fullname protoreflect.FullName = "google.protobuf.Option.value" +) + +// Field numbers for google.protobuf.Option. +const ( + Option_Name_field_number protoreflect.FieldNumber = 1 + Option_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go new file mode 100644 index 00000000000..429384b85b0 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go @@ -0,0 +1,13 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +// Generic field name and number for messages in wrappers.proto. +const ( + WrapperValue_Value_field_name protoreflect.Name = "value" + WrapperValue_Value_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go new file mode 100644 index 00000000000..72527d2ab03 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go @@ -0,0 +1,175 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_wrappers_proto = "google/protobuf/wrappers.proto" + +// Names for google.protobuf.DoubleValue. +const ( + DoubleValue_message_name protoreflect.Name = "DoubleValue" + DoubleValue_message_fullname protoreflect.FullName = "google.protobuf.DoubleValue" +) + +// Field names for google.protobuf.DoubleValue. +const ( + DoubleValue_Value_field_name protoreflect.Name = "value" + + DoubleValue_Value_field_fullname protoreflect.FullName = "google.protobuf.DoubleValue.value" +) + +// Field numbers for google.protobuf.DoubleValue. +const ( + DoubleValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.FloatValue. +const ( + FloatValue_message_name protoreflect.Name = "FloatValue" + FloatValue_message_fullname protoreflect.FullName = "google.protobuf.FloatValue" +) + +// Field names for google.protobuf.FloatValue. +const ( + FloatValue_Value_field_name protoreflect.Name = "value" + + FloatValue_Value_field_fullname protoreflect.FullName = "google.protobuf.FloatValue.value" +) + +// Field numbers for google.protobuf.FloatValue. +const ( + FloatValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Int64Value. +const ( + Int64Value_message_name protoreflect.Name = "Int64Value" + Int64Value_message_fullname protoreflect.FullName = "google.protobuf.Int64Value" +) + +// Field names for google.protobuf.Int64Value. +const ( + Int64Value_Value_field_name protoreflect.Name = "value" + + Int64Value_Value_field_fullname protoreflect.FullName = "google.protobuf.Int64Value.value" +) + +// Field numbers for google.protobuf.Int64Value. +const ( + Int64Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.UInt64Value. +const ( + UInt64Value_message_name protoreflect.Name = "UInt64Value" + UInt64Value_message_fullname protoreflect.FullName = "google.protobuf.UInt64Value" +) + +// Field names for google.protobuf.UInt64Value. +const ( + UInt64Value_Value_field_name protoreflect.Name = "value" + + UInt64Value_Value_field_fullname protoreflect.FullName = "google.protobuf.UInt64Value.value" +) + +// Field numbers for google.protobuf.UInt64Value. +const ( + UInt64Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Int32Value. +const ( + Int32Value_message_name protoreflect.Name = "Int32Value" + Int32Value_message_fullname protoreflect.FullName = "google.protobuf.Int32Value" +) + +// Field names for google.protobuf.Int32Value. +const ( + Int32Value_Value_field_name protoreflect.Name = "value" + + Int32Value_Value_field_fullname protoreflect.FullName = "google.protobuf.Int32Value.value" +) + +// Field numbers for google.protobuf.Int32Value. +const ( + Int32Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.UInt32Value. +const ( + UInt32Value_message_name protoreflect.Name = "UInt32Value" + UInt32Value_message_fullname protoreflect.FullName = "google.protobuf.UInt32Value" +) + +// Field names for google.protobuf.UInt32Value. +const ( + UInt32Value_Value_field_name protoreflect.Name = "value" + + UInt32Value_Value_field_fullname protoreflect.FullName = "google.protobuf.UInt32Value.value" +) + +// Field numbers for google.protobuf.UInt32Value. +const ( + UInt32Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.BoolValue. +const ( + BoolValue_message_name protoreflect.Name = "BoolValue" + BoolValue_message_fullname protoreflect.FullName = "google.protobuf.BoolValue" +) + +// Field names for google.protobuf.BoolValue. +const ( + BoolValue_Value_field_name protoreflect.Name = "value" + + BoolValue_Value_field_fullname protoreflect.FullName = "google.protobuf.BoolValue.value" +) + +// Field numbers for google.protobuf.BoolValue. +const ( + BoolValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.StringValue. +const ( + StringValue_message_name protoreflect.Name = "StringValue" + StringValue_message_fullname protoreflect.FullName = "google.protobuf.StringValue" +) + +// Field names for google.protobuf.StringValue. +const ( + StringValue_Value_field_name protoreflect.Name = "value" + + StringValue_Value_field_fullname protoreflect.FullName = "google.protobuf.StringValue.value" +) + +// Field numbers for google.protobuf.StringValue. +const ( + StringValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.BytesValue. +const ( + BytesValue_message_name protoreflect.Name = "BytesValue" + BytesValue_message_fullname protoreflect.FullName = "google.protobuf.BytesValue" +) + +// Field names for google.protobuf.BytesValue. +const ( + BytesValue_Value_field_name protoreflect.Name = "value" + + BytesValue_Value_field_fullname protoreflect.FullName = "google.protobuf.BytesValue.value" +) + +// Field numbers for google.protobuf.BytesValue. +const ( + BytesValue_Value_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genname/name.go b/vendor/google.golang.org/protobuf/internal/genname/name.go deleted file mode 100644 index f45509fbdf3..00000000000 --- a/vendor/google.golang.org/protobuf/internal/genname/name.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package genname contains constants for generated names. -package genname - -const ( - State = "state" - - SizeCache = "sizeCache" - SizeCacheA = "XXX_sizecache" - - WeakFields = "weakFields" - WeakFieldsA = "XXX_weak" - - UnknownFields = "unknownFields" - UnknownFieldsA = "XXX_unrecognized" - - ExtensionFields = "extensionFields" - ExtensionFieldsA = "XXX_InternalExtensions" - ExtensionFieldsB = "XXX_extensions" - - WeakFieldPrefix = "XXX_weak_" -) diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go index 4d22c960429..b5974528db6 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -10,6 +10,7 @@ import ( "strconv" "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" pref "google.golang.org/protobuf/reflect/protoreflect" piface "google.golang.org/protobuf/runtime/protoiface" @@ -19,6 +20,12 @@ import ( // functions that we do not want to appear in godoc. type Export struct{} +// NewError formats a string according to the format specifier and arguments and +// returns an error that has a "proto" prefix. +func (Export) NewError(f string, x ...interface{}) error { + return errors.New(f, x...) +} + // enum is any enum type generated by protoc-gen-go // and must be a named int32 type. type enum = interface{} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index 35a67c25bfc..44885a761f6 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -10,6 +10,7 @@ import ( "sort" "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/genid" pref "google.golang.org/protobuf/reflect/protoreflect" ) @@ -134,7 +135,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo b = b[n:] err := errUnknown switch num { - case 1: + case genid.MapEntry_Key_field_number: var v pref.Value var o unmarshalOutput v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) @@ -143,7 +144,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo } key = v n = o.n - case 2: + case genid.MapEntry_Value_field_number: var v pref.Value var o unmarshalOutput v, o, err = mapi.valFuncs.unmarshal(b, val, num, wtyp, opts) diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 7dd994bd95d..c026a98180d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -12,7 +12,7 @@ import ( "sync" "sync/atomic" - "google.golang.org/protobuf/internal/genname" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" pref "google.golang.org/protobuf/reflect/protoreflect" ) @@ -148,19 +148,19 @@ func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { fieldLoop: for i := 0; i < t.NumField(); i++ { switch f := t.Field(i); f.Name { - case genname.SizeCache, genname.SizeCacheA: + case genid.SizeCache_goname, genid.SizeCacheA_goname: if f.Type == sizecacheType { si.sizecacheOffset = offsetOf(f, mi.Exporter) } - case genname.WeakFields, genname.WeakFieldsA: + case genid.WeakFields_goname, genid.WeakFieldsA_goname: if f.Type == weakFieldsType { si.weakOffset = offsetOf(f, mi.Exporter) } - case genname.UnknownFields, genname.UnknownFieldsA: + case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: if f.Type == unknownFieldsType { si.unknownOffset = offsetOf(f, mi.Exporter) } - case genname.ExtensionFields, genname.ExtensionFieldsA, genname.ExtensionFieldsB: + case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname: if f.Type == extensionFieldsType { si.extensionOffset = offsetOf(f, mi.Exporter) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go index 57de9cc85b1..08cfb6054b4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/validate.go +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -14,6 +14,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" pref "google.golang.org/protobuf/reflect/protoreflect" preg "google.golang.org/protobuf/reflect/protoregistry" @@ -282,9 +283,9 @@ State: switch { case st.typ == validationTypeMap: switch num { - case 1: + case genid.MapEntry_Key_field_number: vi.typ = st.keyType - case 2: + case genid.MapEntry_Value_field_number: vi.typ = st.valType vi.mi = st.mi vi.requiredBit = 1 diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 6b3001c66af..72cf770b427 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 24 + Minor = 25 Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 4974b16d544..42fc5195e87 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -9,6 +9,7 @@ import ( "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" @@ -220,13 +221,13 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto b = b[n:] err = errUnknown switch num { - case 1: + case genid.MapEntry_Key_field_number: key, n, err = o.unmarshalScalar(b, wtyp, keyField) if err != nil { break } haveKey = true - case 2: + case genid.MapEntry_Value_field_number: var v protoreflect.Value v, n, err = o.unmarshalScalar(b, wtyp, valField) if err != nil { diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index cf9641ab922..2d5fa9936ba 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -12,6 +12,7 @@ import ( "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/reflect/protoreflect" @@ -348,9 +349,9 @@ func checkValidMap(fd protoreflect.FieldDescriptor) error { kf := md.Fields().Get(0) vf := md.Fields().Get(1) switch { - case kf.Name() != "key" || kf.Number() != 1 || kf.Cardinality() != protoreflect.Optional || kf.ContainingOneof() != nil || kf.HasDefault(): + case kf.Name() != genid.MapEntry_Key_field_name || kf.Number() != genid.MapEntry_Key_field_number || kf.Cardinality() != protoreflect.Optional || kf.ContainingOneof() != nil || kf.HasDefault(): return errors.New("invalid key field") - case vf.Name() != "value" || vf.Number() != 2 || vf.Cardinality() != protoreflect.Optional || vf.ContainingOneof() != nil || vf.HasDefault(): + case vf.Name() != genid.MapEntry_Value_field_name || vf.Number() != genid.MapEntry_Value_field_number || vf.Cardinality() != protoreflect.Optional || vf.ContainingOneof() != nil || vf.HasDefault(): return errors.New("invalid value field") } switch kf.Kind() { diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index b669a4e7619..dd85915bd4b 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -128,7 +128,6 @@ package protoreflect import ( "fmt" - "regexp" "strings" "google.golang.org/protobuf/encoding/protowire" @@ -408,19 +407,14 @@ type EnumRanges interface { doNotImplement } -var ( - regexName = regexp.MustCompile(`^[_a-zA-Z][_a-zA-Z0-9]*$`) - regexFullName = regexp.MustCompile(`^[_a-zA-Z][_a-zA-Z0-9]*(\.[_a-zA-Z][_a-zA-Z0-9]*)*$`) -) - // Name is the short name for a proto declaration. This is not the name // as used in Go source code, which might not be identical to the proto name. type Name string // e.g., "Kind" -// IsValid reports whether n is a syntactically valid name. +// IsValid reports whether s is a syntactically valid name. // An empty name is invalid. -func (n Name) IsValid() bool { - return regexName.MatchString(string(n)) +func (s Name) IsValid() bool { + return consumeIdent(string(s)) == len(s) } // Names represent a list of names. @@ -443,10 +437,42 @@ type Names interface { // This should not have any leading or trailing dots. type FullName string // e.g., "google.protobuf.Field.Kind" -// IsValid reports whether n is a syntactically valid full name. +// IsValid reports whether s is a syntactically valid full name. // An empty full name is invalid. -func (n FullName) IsValid() bool { - return regexFullName.MatchString(string(n)) +func (s FullName) IsValid() bool { + i := consumeIdent(string(s)) + if i < 0 { + return false + } + for len(s) > i { + if s[i] != '.' { + return false + } + i++ + n := consumeIdent(string(s[i:])) + if n < 0 { + return false + } + i += n + } + return true +} + +func consumeIdent(s string) (i int) { + if len(s) == 0 || !isLetter(s[i]) { + return -1 + } + i++ + for len(s) > i && isLetterDigit(s[i]) { + i++ + } + return i +} +func isLetter(c byte) bool { + return c == '_' || ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') +} +func isLetterDigit(c byte) bool { + return isLetter(c) || ('0' <= c && c <= '9') } // Name returns the short name, which is the last identifier segment. diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 5f9498e4e44..82a473e2652 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -31,12 +31,100 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/any.proto +// Package anypb contains generated types for google/protobuf/any.proto. +// +// The Any message is a dynamic representation of any other message value. +// It is functionally a tuple of the full name of the remote message type and +// the serialized bytes of the remote message value. +// +// +// Constructing an Any +// +// An Any message containing another message value is constructed using New: +// +// any, err := anypb.New(m) +// if err != nil { +// ... // handle error +// } +// ... // make use of any +// +// +// Unmarshaling an Any +// +// With a populated Any message, the underlying message can be serialized into +// a remote concrete message value in a few ways. +// +// If the exact concrete type is known, then a new (or pre-existing) instance +// of that message can be passed to the UnmarshalTo method: +// +// m := new(foopb.MyMessage) +// if err := any.UnmarshalTo(m); err != nil { +// ... // handle error +// } +// ... // make use of m +// +// If the exact concrete type is not known, then the UnmarshalNew method can be +// used to unmarshal the contents into a new instance of the remote message type: +// +// m, err := any.UnmarshalNew() +// if err != nil { +// ... // handle error +// } +// ... // make use of m +// +// UnmarshalNew uses the global type registry to resolve the message type and +// construct a new instance of that message to unmarshal into. In order for a +// message type to appear in the global registry, the Go type representing that +// protobuf message type must be linked into the Go binary. For messages +// generated by protoc-gen-go, this is achieved through an import of the +// generated Go package representing a .proto file. +// +// A common pattern with UnmarshalNew is to use a type switch with the resulting +// proto.Message value: +// +// switch m := m.(type) { +// case *foopb.MyMessage: +// ... // make use of m as a *foopb.MyMessage +// case *barpb.OtherMessage: +// ... // make use of m as a *barpb.OtherMessage +// case *bazpb.SomeMessage: +// ... // make use of m as a *bazpb.SomeMessage +// } +// +// This pattern ensures that the generated packages containing the message types +// listed in the case clauses are linked into the Go binary and therefore also +// registered in the global registry. +// +// +// Type checking an Any +// +// In order to type check whether an Any message represents some other message, +// then use the MessageIs method: +// +// if any.MessageIs((*foopb.MyMessage)(nil)) { +// ... // make use of any, knowing that it contains a foopb.MyMessage +// } +// +// The MessageIs method can also be used with an allocated instance of the target +// message type if the intention is to unmarshal into it if the type matches: +// +// m := new(foopb.MyMessage) +// if any.MessageIs(m) { +// if err := any.UnmarshalTo(m); err != nil { +// ... // handle error +// } +// ... // make use of m +// } +// package anypb import ( + proto "google.golang.org/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoregistry "google.golang.org/protobuf/reflect/protoregistry" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" + strings "strings" sync "sync" ) @@ -158,6 +246,125 @@ type Any struct { Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } +// New marshals src into a new Any instance. +func New(src proto.Message) (*Any, error) { + dst := new(Any) + if err := dst.MarshalFrom(src); err != nil { + return nil, err + } + return dst, nil +} + +// MarshalFrom marshals src into dst as the underlying message +// using the provided marshal options. +// +// If no options are specified, call dst.MarshalFrom instead. +func MarshalFrom(dst *Any, src proto.Message, opts proto.MarshalOptions) error { + const urlPrefix = "type.googleapis.com/" + if src == nil { + return protoimpl.X.NewError("invalid nil source message") + } + b, err := opts.Marshal(src) + if err != nil { + return err + } + dst.TypeUrl = urlPrefix + string(src.ProtoReflect().Descriptor().FullName()) + dst.Value = b + return nil +} + +// UnmarshalTo unmarshals the underlying message from src into dst +// using the provided unmarshal options. +// It reports an error if dst is not of the right message type. +// +// If no options are specified, call src.UnmarshalTo instead. +func UnmarshalTo(src *Any, dst proto.Message, opts proto.UnmarshalOptions) error { + if src == nil { + return protoimpl.X.NewError("invalid nil source message") + } + if !src.MessageIs(dst) { + got := dst.ProtoReflect().Descriptor().FullName() + want := src.MessageName() + return protoimpl.X.NewError("mismatched message type: got %q, want %q", got, want) + } + return opts.Unmarshal(src.GetValue(), dst) +} + +// UnmarshalNew unmarshals the underlying message from src into dst, +// which is newly created message using a type resolved from the type URL. +// The message type is resolved according to opt.Resolver, +// which should implement protoregistry.MessageTypeResolver. +// It reports an error if the underlying message type could not be resolved. +// +// If no options are specified, call src.UnmarshalNew instead. +func UnmarshalNew(src *Any, opts proto.UnmarshalOptions) (dst proto.Message, err error) { + if src.GetTypeUrl() == "" { + return nil, protoimpl.X.NewError("invalid empty type URL") + } + if opts.Resolver == nil { + opts.Resolver = protoregistry.GlobalTypes + } + r, ok := opts.Resolver.(protoregistry.MessageTypeResolver) + if !ok { + return nil, protoregistry.NotFound + } + mt, err := r.FindMessageByURL(src.GetTypeUrl()) + if err != nil { + if err == protoregistry.NotFound { + return nil, err + } + return nil, protoimpl.X.NewError("could not resolve %q: %v", src.GetTypeUrl(), err) + } + dst = mt.New().Interface() + return dst, opts.Unmarshal(src.GetValue(), dst) +} + +// MessageIs reports whether the underlying message is of the same type as m. +func (x *Any) MessageIs(m proto.Message) bool { + if m == nil { + return false + } + url := x.GetTypeUrl() + name := string(m.ProtoReflect().Descriptor().FullName()) + if !strings.HasSuffix(url, name) { + return false + } + return len(url) == len(name) || url[len(url)-len(name)-1] == '/' +} + +// MessageName reports the full name of the underlying message, +// returning an empty string if invalid. +func (x *Any) MessageName() protoreflect.FullName { + url := x.GetTypeUrl() + name := protoreflect.FullName(url) + if i := strings.LastIndexByte(url, '/'); i >= 0 { + name = name[i+len("/"):] + } + if !name.IsValid() { + return "" + } + return name +} + +// MarshalFrom marshals m into x as the underlying message. +func (x *Any) MarshalFrom(m proto.Message) error { + return MarshalFrom(x, m, proto.MarshalOptions{}) +} + +// UnmarshalTo unmarshals the contents of the underlying message of x into m. +// It resets m before performing the unmarshal operation. +// It reports an error if m is not of the right message type. +func (x *Any) UnmarshalTo(m proto.Message) error { + return UnmarshalTo(x, m, proto.UnmarshalOptions{}) +} + +// UnmarshalNew unmarshals the contents of the underlying message of x into +// a newly allocated message of the specified type. +// It reports an error if the underlying message type could not be resolved. +func (x *Any) UnmarshalNew() (proto.Message, error) { + return UnmarshalNew(x, proto.UnmarshalOptions{}) +} + func (x *Any) Reset() { *x = Any{} if protoimpl.UnsafeEnabled { diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index 3997c604f41..f7a11099404 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -31,13 +31,58 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/duration.proto +// Package durationpb contains generated types for google/protobuf/duration.proto. +// +// The Duration message represents a signed span of time. +// +// +// Conversion to a Go Duration +// +// The AsDuration method can be used to convert a Duration message to a +// standard Go time.Duration value: +// +// d := dur.AsDuration() +// ... // make use of d as a time.Duration +// +// Converting to a time.Duration is a common operation so that the extensive +// set of time-based operations provided by the time package can be leveraged. +// See https://golang.org/pkg/time for more information. +// +// The AsDuration method performs the conversion on a best-effort basis. +// Durations with denormal values (e.g., nanoseconds beyond -99999999 and +// +99999999, inclusive; or seconds and nanoseconds with opposite signs) +// are normalized during the conversion to a time.Duration. To manually check for +// invalid Duration per the documented limitations in duration.proto, +// additionally call the CheckValid method: +// +// if err := dur.CheckValid(); err != nil { +// ... // handle error +// } +// +// Note that the documented limitations in duration.proto does not protect a +// Duration from overflowing the representable range of a time.Duration in Go. +// The AsDuration method uses saturation arithmetic such that an overflow clamps +// the resulting value to the closest representable value (e.g., math.MaxInt64 +// for positive overflow and math.MinInt64 for negative overflow). +// +// +// Conversion from a Go Duration +// +// The durationpb.New function can be used to construct a Duration message +// from a standard Go time.Duration value: +// +// dur := durationpb.New(d) +// ... // make use of d as a *durationpb.Duration +// package durationpb import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" reflect "reflect" sync "sync" + time "time" ) // A Duration represents a signed, fixed-length span of time represented @@ -118,6 +163,91 @@ type Duration struct { Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` } +// New constructs a new Duration from the provided time.Duration. +func New(d time.Duration) *Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &Duration{Seconds: int64(secs), Nanos: int32(nanos)} +} + +// AsDuration converts x to a time.Duration, +// returning the closest duration value in the event of overflow. +func (x *Duration) AsDuration() time.Duration { + secs := x.GetSeconds() + nanos := x.GetNanos() + d := time.Duration(secs) * time.Second + overflow := d/time.Second != time.Duration(secs) + d += time.Duration(nanos) * time.Nanosecond + overflow = overflow || (secs < 0 && nanos < 0 && d > 0) + overflow = overflow || (secs > 0 && nanos > 0 && d < 0) + if overflow { + switch { + case secs < 0: + return time.Duration(math.MinInt64) + case secs > 0: + return time.Duration(math.MaxInt64) + } + } + return d +} + +// IsValid reports whether the duration is valid. +// It is equivalent to CheckValid == nil. +func (x *Duration) IsValid() bool { + return x.check() == 0 +} + +// CheckValid returns an error if the duration is invalid. +// In particular, it checks whether the value is within the range of +// -10000 years to +10000 years inclusive. +// An error is reported for a nil Duration. +func (x *Duration) CheckValid() error { + switch x.check() { + case invalidNil: + return protoimpl.X.NewError("invalid nil Duration") + case invalidUnderflow: + return protoimpl.X.NewError("duration (%v) exceeds -10000 years", x) + case invalidOverflow: + return protoimpl.X.NewError("duration (%v) exceeds +10000 years", x) + case invalidNanosRange: + return protoimpl.X.NewError("duration (%v) has out-of-range nanos", x) + case invalidNanosSign: + return protoimpl.X.NewError("duration (%v) has seconds and nanos with different signs", x) + default: + return nil + } +} + +const ( + _ = iota + invalidNil + invalidUnderflow + invalidOverflow + invalidNanosRange + invalidNanosSign +) + +func (x *Duration) check() uint { + const absDuration = 315576000000 // 10000yr * 365.25day/yr * 24hr/day * 60min/hr * 60sec/min + secs := x.GetSeconds() + nanos := x.GetNanos() + switch { + case x == nil: + return invalidNil + case secs < -absDuration: + return invalidUnderflow + case secs > +absDuration: + return invalidOverflow + case nanos <= -1e9 || nanos >= +1e9: + return invalidNanosRange + case (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0): + return invalidNanosSign + default: + return 0 + } +} + func (x *Duration) Reset() { *x = Duration{} if protoimpl.UnsafeEnabled { diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index 2ccf95abd04..6a8d872c085 100644 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -31,12 +31,59 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/field_mask.proto +// Package fieldmaskpb contains generated types for google/protobuf/field_mask.proto. +// +// The FieldMask message represents a set of symbolic field paths. +// The paths are specific to some target message type, +// which is not stored within the FieldMask message itself. +// +// +// Constructing a FieldMask +// +// The New function is used construct a FieldMask: +// +// var messageType *descriptorpb.DescriptorProto +// fm, err := fieldmaskpb.New(messageType, "field.name", "field.number") +// if err != nil { +// ... // handle error +// } +// ... // make use of fm +// +// The "field.name" and "field.number" paths are valid paths according to the +// google.protobuf.DescriptorProto message. Use of a path that does not correlate +// to valid fields reachable from DescriptorProto would result in an error. +// +// Once a FieldMask message has been constructed, +// the Append method can be used to insert additional paths to the path set: +// +// var messageType *descriptorpb.DescriptorProto +// if err := fm.Append(messageType, "options"); err != nil { +// ... // handle error +// } +// +// +// Type checking a FieldMask +// +// In order to verify that a FieldMask represents a set of fields that are +// reachable from some target message type, use the IsValid method: +// +// var messageType *descriptorpb.DescriptorProto +// if fm.IsValid(messageType) { +// ... // make use of fm +// } +// +// IsValid needs to be passed the target message type as an input since the +// FieldMask message itself does not store the message type that the set of paths +// are for. package fieldmaskpb import ( + proto "google.golang.org/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" + sort "sort" + strings "strings" sync "sync" ) @@ -248,6 +295,176 @@ type FieldMask struct { Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` } +// New constructs a field mask from a list of paths and verifies that +// each one is valid according to the specified message type. +func New(m proto.Message, paths ...string) (*FieldMask, error) { + x := new(FieldMask) + return x, x.Append(m, paths...) +} + +// Union returns the union of all the paths in the input field masks. +func Union(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask { + var out []string + out = append(out, mx.GetPaths()...) + out = append(out, my.GetPaths()...) + for _, m := range ms { + out = append(out, m.GetPaths()...) + } + return &FieldMask{Paths: normalizePaths(out)} +} + +// Intersect returns the intersection of all the paths in the input field masks. +func Intersect(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask { + var ss1, ss2 []string // reused buffers for performance + intersect := func(out, in []string) []string { + ss1 = normalizePaths(append(ss1[:0], in...)) + ss2 = normalizePaths(append(ss2[:0], out...)) + out = out[:0] + for i1, i2 := 0, 0; i1 < len(ss1) && i2 < len(ss2); { + switch s1, s2 := ss1[i1], ss2[i2]; { + case hasPathPrefix(s1, s2): + out = append(out, s1) + i1++ + case hasPathPrefix(s2, s1): + out = append(out, s2) + i2++ + case lessPath(s1, s2): + i1++ + case lessPath(s2, s1): + i2++ + } + } + return out + } + + out := Union(mx, my, ms...).GetPaths() + out = intersect(out, mx.GetPaths()) + out = intersect(out, my.GetPaths()) + for _, m := range ms { + out = intersect(out, m.GetPaths()) + } + return &FieldMask{Paths: normalizePaths(out)} +} + +// IsValid reports whether all the paths are syntactically valid and +// refer to known fields in the specified message type. +// It reports false for a nil FieldMask. +func (x *FieldMask) IsValid(m proto.Message) bool { + paths := x.GetPaths() + return x != nil && numValidPaths(m, paths) == len(paths) +} + +// Append appends a list of paths to the mask and verifies that each one +// is valid according to the specified message type. +// An invalid path is not appended and breaks insertion of subsequent paths. +func (x *FieldMask) Append(m proto.Message, paths ...string) error { + numValid := numValidPaths(m, paths) + x.Paths = append(x.Paths, paths[:numValid]...) + paths = paths[numValid:] + if len(paths) > 0 { + name := m.ProtoReflect().Descriptor().FullName() + return protoimpl.X.NewError("invalid path %q for message %q", paths[0], name) + } + return nil +} + +func numValidPaths(m proto.Message, paths []string) int { + md0 := m.ProtoReflect().Descriptor() + for i, path := range paths { + md := md0 + if !rangeFields(path, func(field string) bool { + // Search the field within the message. + if md == nil { + return false // not within a message + } + fd := md.Fields().ByName(protoreflect.Name(field)) + // The real field name of a group is the message name. + if fd == nil { + gd := md.Fields().ByName(protoreflect.Name(strings.ToLower(field))) + if gd != nil && gd.Kind() == protoreflect.GroupKind && string(gd.Message().Name()) == field { + fd = gd + } + } else if fd.Kind() == protoreflect.GroupKind && string(fd.Message().Name()) != field { + fd = nil + } + if fd == nil { + return false // message has does not have this field + } + + // Identify the next message to search within. + md = fd.Message() // may be nil + if fd.IsMap() { + md = fd.MapValue().Message() // may be nil + } + return true + }) { + return i + } + } + return len(paths) +} + +// Normalize converts the mask to its canonical form where all paths are sorted +// and redundant paths are removed. +func (x *FieldMask) Normalize() { + x.Paths = normalizePaths(x.Paths) +} + +func normalizePaths(paths []string) []string { + sort.Slice(paths, func(i, j int) bool { + return lessPath(paths[i], paths[j]) + }) + + // Elide any path that is a prefix match on the previous. + out := paths[:0] + for _, path := range paths { + if len(out) > 0 && hasPathPrefix(path, out[len(out)-1]) { + continue + } + out = append(out, path) + } + return out +} + +// hasPathPrefix is like strings.HasPrefix, but further checks for either +// an exact matche or that the prefix is delimited by a dot. +func hasPathPrefix(path, prefix string) bool { + return strings.HasPrefix(path, prefix) && (len(path) == len(prefix) || path[len(prefix)] == '.') +} + +// lessPath is a lexicographical comparison where dot is specially treated +// as the smallest symbol. +func lessPath(x, y string) bool { + for i := 0; i < len(x) && i < len(y); i++ { + if x[i] != y[i] { + return (x[i] - '.') < (y[i] - '.') + } + } + return len(x) < len(y) +} + +// rangeFields is like strings.Split(path, "."), but avoids allocations by +// iterating over each field in place and calling a iterator function. +func rangeFields(path string, f func(field string) bool) bool { + for { + var field string + if i := strings.IndexByte(path, '.'); i >= 0 { + field, path = path[:i], path[i:] + } else { + field, path = path, "" + } + + if !f(field) { + return false + } + + if len(path) == 0 { + return true + } + path = strings.TrimPrefix(path, ".") + } +} + func (x *FieldMask) Reset() { *x = FieldMask{} if protoimpl.UnsafeEnabled { diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go index df098137b63..7433a4c41c1 100644 --- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -31,13 +31,105 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/struct.proto +// Package structpb contains generated types for google/protobuf/struct.proto. +// +// The messages (i.e., Value, Struct, and ListValue) defined in struct.proto are +// used to represent arbitrary JSON. The Value message represents a JSON value, +// the Struct message represents a JSON object, and the ListValue message +// represents a JSON array. See https://json.org for more information. +// +// The Value, Struct, and ListValue types have generated MarshalJSON and +// UnmarshalJSON methods such that they serialize JSON equivalent to what the +// messages themselves represent. Use of these types with the +// "google.golang.org/protobuf/encoding/protojson" package +// ensures that they will be serialized as their JSON equivalent. +// +// +// Conversion to and from a Go interface +// +// The standard Go "encoding/json" package has functionality to serialize +// arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and +// ListValue.AsSlice methods can convert the protobuf message representation into +// a form represented by interface{}, map[string]interface{}, and []interface{}. +// This form can be used with other packages that operate on such data structures +// and also directly with the standard json package. +// +// In order to convert the interface{}, map[string]interface{}, and []interface{} +// forms back as Value, Struct, and ListValue messages, use the NewStruct, +// NewList, and NewValue constructor functions. +// +// +// Example usage +// +// Consider the following example JSON object: +// +// { +// "firstName": "John", +// "lastName": "Smith", +// "isAlive": true, +// "age": 27, +// "address": { +// "streetAddress": "21 2nd Street", +// "city": "New York", +// "state": "NY", +// "postalCode": "10021-3100" +// }, +// "phoneNumbers": [ +// { +// "type": "home", +// "number": "212 555-1234" +// }, +// { +// "type": "office", +// "number": "646 555-4567" +// } +// ], +// "children": [], +// "spouse": null +// } +// +// To construct a Value message representing the above JSON object: +// +// m, err := structpb.NewValue(map[string]interface{}{ +// "firstName": "John", +// "lastName": "Smith", +// "isAlive": true, +// "age": 27, +// "address": map[string]interface{}{ +// "streetAddress": "21 2nd Street", +// "city": "New York", +// "state": "NY", +// "postalCode": "10021-3100", +// }, +// "phoneNumbers": []interface{}{ +// map[string]interface{}{ +// "type": "home", +// "number": "212 555-1234", +// }, +// map[string]interface{}{ +// "type": "office", +// "number": "646 555-4567", +// }, +// }, +// "children": []interface{}{}, +// "spouse": nil, +// }) +// if err != nil { +// ... // handle error +// } +// ... // make use of m as a *structpb.Value +// package structpb import ( + base64 "encoding/base64" + protojson "google.golang.org/protobuf/encoding/protojson" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" reflect "reflect" sync "sync" + utf8 "unicode/utf8" ) // `NullValue` is a singleton enumeration to represent the null value for the @@ -105,6 +197,42 @@ type Struct struct { Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } +// NewStruct constructs a Struct from a general-purpose Go map. +// The map keys must be valid UTF-8. +// The map values are converted using NewValue. +func NewStruct(v map[string]interface{}) (*Struct, error) { + x := &Struct{Fields: make(map[string]*Value, len(v))} + for k, v := range v { + if !utf8.ValidString(k) { + return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", k) + } + var err error + x.Fields[k], err = NewValue(v) + if err != nil { + return nil, err + } + } + return x, nil +} + +// AsMap converts x to a general-purpose Go map. +// The map values are converted by calling Value.AsInterface. +func (x *Struct) AsMap() map[string]interface{} { + vs := make(map[string]interface{}) + for k, v := range x.GetFields() { + vs[k] = v.AsInterface() + } + return vs +} + +func (x *Struct) MarshalJSON() ([]byte, error) { + return protojson.Marshal(x) +} + +func (x *Struct) UnmarshalJSON(b []byte) error { + return protojson.Unmarshal(b, x) +} + func (x *Struct) Reset() { *x = Struct{} if protoimpl.UnsafeEnabled { @@ -167,6 +295,151 @@ type Value struct { Kind isValue_Kind `protobuf_oneof:"kind"` } +// NewValue constructs a Value from a general-purpose Go interface. +// +// ╔════════════════════════╤════════════════════════════════════════════╗ +// ║ Go type │ Conversion ║ +// ╠════════════════════════╪════════════════════════════════════════════╣ +// ║ nil │ stored as NullValue ║ +// ║ bool │ stored as BoolValue ║ +// ║ int, int32, int64 │ stored as NumberValue ║ +// ║ uint, uint32, uint64 │ stored as NumberValue ║ +// ║ float32, float64 │ stored as NumberValue ║ +// ║ string │ stored as StringValue; must be valid UTF-8 ║ +// ║ []byte │ stored as StringValue; base64-encoded ║ +// ║ map[string]interface{} │ stored as StructValue ║ +// ║ []interface{} │ stored as ListValue ║ +// ╚════════════════════════╧════════════════════════════════════════════╝ +// +// When converting an int64 or uint64 to a NumberValue, numeric precision loss +// is possible since they are stored as a float64. +func NewValue(v interface{}) (*Value, error) { + switch v := v.(type) { + case nil: + return NewNullValue(), nil + case bool: + return NewBoolValue(v), nil + case int: + return NewNumberValue(float64(v)), nil + case int32: + return NewNumberValue(float64(v)), nil + case int64: + return NewNumberValue(float64(v)), nil + case uint: + return NewNumberValue(float64(v)), nil + case uint32: + return NewNumberValue(float64(v)), nil + case uint64: + return NewNumberValue(float64(v)), nil + case float32: + return NewNumberValue(float64(v)), nil + case float64: + return NewNumberValue(float64(v)), nil + case string: + if !utf8.ValidString(v) { + return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v) + } + return NewStringValue(v), nil + case []byte: + s := base64.StdEncoding.EncodeToString(v) + return NewStringValue(s), nil + case map[string]interface{}: + v2, err := NewStruct(v) + if err != nil { + return nil, err + } + return NewStructValue(v2), nil + case []interface{}: + v2, err := NewList(v) + if err != nil { + return nil, err + } + return NewListValue(v2), nil + default: + return nil, protoimpl.X.NewError("invalid type: %T", v) + } +} + +// NewNullValue constructs a new null Value. +func NewNullValue() *Value { + return &Value{Kind: &Value_NullValue{NullValue: NullValue_NULL_VALUE}} +} + +// NewBoolValue constructs a new boolean Value. +func NewBoolValue(v bool) *Value { + return &Value{Kind: &Value_BoolValue{BoolValue: v}} +} + +// NewNumberValue constructs a new number Value. +func NewNumberValue(v float64) *Value { + return &Value{Kind: &Value_NumberValue{NumberValue: v}} +} + +// NewStringValue constructs a new string Value. +func NewStringValue(v string) *Value { + return &Value{Kind: &Value_StringValue{StringValue: v}} +} + +// NewStructValue constructs a new struct Value. +func NewStructValue(v *Struct) *Value { + return &Value{Kind: &Value_StructValue{StructValue: v}} +} + +// NewListValue constructs a new list Value. +func NewListValue(v *ListValue) *Value { + return &Value{Kind: &Value_ListValue{ListValue: v}} +} + +// AsInterface converts x to a general-purpose Go interface. +// +// Calling Value.MarshalJSON and "encoding/json".Marshal on this output produce +// semantically equivalent JSON (assuming no errors occur). +// +// Floating-point values (i.e., "NaN", "Infinity", and "-Infinity") are +// converted as strings to remain compatible with MarshalJSON. +func (x *Value) AsInterface() interface{} { + switch v := x.GetKind().(type) { + case *Value_NumberValue: + if v != nil { + switch { + case math.IsNaN(v.NumberValue): + return "NaN" + case math.IsInf(v.NumberValue, +1): + return "Infinity" + case math.IsInf(v.NumberValue, -1): + return "-Infinity" + default: + return v.NumberValue + } + } + case *Value_StringValue: + if v != nil { + return v.StringValue + } + case *Value_BoolValue: + if v != nil { + return v.BoolValue + } + case *Value_StructValue: + if v != nil { + return v.StructValue.AsMap() + } + case *Value_ListValue: + if v != nil { + return v.ListValue.AsSlice() + } + } + return nil +} + +func (x *Value) MarshalJSON() ([]byte, error) { + return protojson.Marshal(x) +} + +func (x *Value) UnmarshalJSON(b []byte) error { + return protojson.Unmarshal(b, x) +} + func (x *Value) Reset() { *x = Value{} if protoimpl.UnsafeEnabled { @@ -306,6 +579,38 @@ type ListValue struct { Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` } +// NewList constructs a ListValue from a general-purpose Go slice. +// The slice elements are converted using NewValue. +func NewList(v []interface{}) (*ListValue, error) { + x := &ListValue{Values: make([]*Value, len(v))} + for i, v := range v { + var err error + x.Values[i], err = NewValue(v) + if err != nil { + return nil, err + } + } + return x, nil +} + +// AsSlice converts x to a general-purpose Go slice. +// The slice elements are converted by calling Value.AsInterface. +func (x *ListValue) AsSlice() []interface{} { + vs := make([]interface{}, len(x.GetValues())) + for i, v := range x.GetValues() { + vs[i] = v.AsInterface() + } + return vs +} + +func (x *ListValue) MarshalJSON() ([]byte, error) { + return protojson.Marshal(x) +} + +func (x *ListValue) UnmarshalJSON(b []byte) error { + return protojson.Unmarshal(b, x) +} + func (x *ListValue) Reset() { *x = ListValue{} if protoimpl.UnsafeEnabled { diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 6fe6d42f17a..c25e4bd7d0d 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -31,6 +31,48 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/timestamp.proto +// Package timestamppb contains generated types for google/protobuf/timestamp.proto. +// +// The Timestamp message represents a timestamp, +// an instant in time since the Unix epoch (January 1st, 1970). +// +// +// Conversion to a Go Time +// +// The AsTime method can be used to convert a Timestamp message to a +// standard Go time.Time value in UTC: +// +// t := ts.AsTime() +// ... // make use of t as a time.Time +// +// Converting to a time.Time is a common operation so that the extensive +// set of time-based operations provided by the time package can be leveraged. +// See https://golang.org/pkg/time for more information. +// +// The AsTime method performs the conversion on a best-effort basis. Timestamps +// with denormal values (e.g., nanoseconds beyond 0 and 99999999, inclusive) +// are normalized during the conversion to a time.Time. To manually check for +// invalid Timestamps per the documented limitations in timestamp.proto, +// additionally call the CheckValid method: +// +// if err := ts.CheckValid(); err != nil { +// ... // handle error +// } +// +// +// Conversion from a Go Time +// +// The timestamppb.New function can be used to construct a Timestamp message +// from a standard Go time.Time value: +// +// ts := timestamppb.New(t) +// ... // make use of ts as a *timestamppb.Timestamp +// +// In order to construct a Timestamp representing the current time, use Now: +// +// ts := timestamppb.Now() +// ... // make use of ts as a *timestamppb.Timestamp +// package timestamppb import ( @@ -38,6 +80,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + time "time" ) // A Timestamp represents a point in time independent of any time zone or local @@ -140,6 +183,73 @@ type Timestamp struct { Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` } +// Now constructs a new Timestamp from the current time. +func Now() *Timestamp { + return New(time.Now()) +} + +// New constructs a new Timestamp from the provided time.Time. +func New(t time.Time) *Timestamp { + return &Timestamp{Seconds: int64(t.Unix()), Nanos: int32(t.Nanosecond())} +} + +// AsTime converts x to a time.Time. +func (x *Timestamp) AsTime() time.Time { + return time.Unix(int64(x.GetSeconds()), int64(x.GetNanos())).UTC() +} + +// IsValid reports whether the timestamp is valid. +// It is equivalent to CheckValid == nil. +func (x *Timestamp) IsValid() bool { + return x.check() == 0 +} + +// CheckValid returns an error if the timestamp is invalid. +// In particular, it checks whether the value represents a date that is +// in the range of 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive. +// An error is reported for a nil Timestamp. +func (x *Timestamp) CheckValid() error { + switch x.check() { + case invalidNil: + return protoimpl.X.NewError("invalid nil Timestamp") + case invalidUnderflow: + return protoimpl.X.NewError("timestamp (%v) before 0001-01-01", x) + case invalidOverflow: + return protoimpl.X.NewError("timestamp (%v) after 9999-12-31", x) + case invalidNanos: + return protoimpl.X.NewError("timestamp (%v) has out-of-range nanos", x) + default: + return nil + } +} + +const ( + _ = iota + invalidNil + invalidUnderflow + invalidOverflow + invalidNanos +) + +func (x *Timestamp) check() uint { + const minTimestamp = -62135596800 // Seconds between 1970-01-01T00:00:00Z and 0001-01-01T00:00:00Z, inclusive + const maxTimestamp = +253402300799 // Seconds between 1970-01-01T00:00:00Z and 9999-12-31T23:59:59Z, inclusive + secs := x.GetSeconds() + nanos := x.GetNanos() + switch { + case x == nil: + return invalidNil + case secs < minTimestamp: + return invalidUnderflow + case secs > maxTimestamp: + return invalidOverflow + case nanos < 0 || nanos >= 1e9: + return invalidNanos + default: + return 0 + } +} + func (x *Timestamp) Reset() { *x = Timestamp{} if protoimpl.UnsafeEnabled { diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index 5c5ec2f1021..2355adf428e 100644 --- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -62,6 +62,11 @@ type DoubleValue struct { Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` } +// Double stores v in a new DoubleValue and returns a pointer to it. +func Double(v float64) *DoubleValue { + return &DoubleValue{Value: v} +} + func (x *DoubleValue) Reset() { *x = DoubleValue{} if protoimpl.UnsafeEnabled { @@ -113,6 +118,11 @@ type FloatValue struct { Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` } +// Float stores v in a new FloatValue and returns a pointer to it. +func Float(v float32) *FloatValue { + return &FloatValue{Value: v} +} + func (x *FloatValue) Reset() { *x = FloatValue{} if protoimpl.UnsafeEnabled { @@ -164,6 +174,11 @@ type Int64Value struct { Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` } +// Int64 stores v in a new Int64Value and returns a pointer to it. +func Int64(v int64) *Int64Value { + return &Int64Value{Value: v} +} + func (x *Int64Value) Reset() { *x = Int64Value{} if protoimpl.UnsafeEnabled { @@ -215,6 +230,11 @@ type UInt64Value struct { Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` } +// UInt64 stores v in a new UInt64Value and returns a pointer to it. +func UInt64(v uint64) *UInt64Value { + return &UInt64Value{Value: v} +} + func (x *UInt64Value) Reset() { *x = UInt64Value{} if protoimpl.UnsafeEnabled { @@ -266,6 +286,11 @@ type Int32Value struct { Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` } +// Int32 stores v in a new Int32Value and returns a pointer to it. +func Int32(v int32) *Int32Value { + return &Int32Value{Value: v} +} + func (x *Int32Value) Reset() { *x = Int32Value{} if protoimpl.UnsafeEnabled { @@ -317,6 +342,11 @@ type UInt32Value struct { Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` } +// UInt32 stores v in a new UInt32Value and returns a pointer to it. +func UInt32(v uint32) *UInt32Value { + return &UInt32Value{Value: v} +} + func (x *UInt32Value) Reset() { *x = UInt32Value{} if protoimpl.UnsafeEnabled { @@ -368,6 +398,11 @@ type BoolValue struct { Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` } +// Bool stores v in a new BoolValue and returns a pointer to it. +func Bool(v bool) *BoolValue { + return &BoolValue{Value: v} +} + func (x *BoolValue) Reset() { *x = BoolValue{} if protoimpl.UnsafeEnabled { @@ -419,6 +454,11 @@ type StringValue struct { Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` } +// String stores v in a new StringValue and returns a pointer to it. +func String(v string) *StringValue { + return &StringValue{Value: v} +} + func (x *StringValue) Reset() { *x = StringValue{} if protoimpl.UnsafeEnabled { @@ -470,6 +510,11 @@ type BytesValue struct { Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` } +// Bytes stores v in a new BytesValue and returns a pointer to it. +func Bytes(v []byte) *BytesValue { + return &BytesValue{Value: v} +} + func (x *BytesValue) Reset() { *x = BytesValue{} if protoimpl.UnsafeEnabled { diff --git a/vendor/honnef.co/go/tools/LICENSE b/vendor/honnef.co/go/tools/LICENSE deleted file mode 100644 index dfd03145460..00000000000 --- a/vendor/honnef.co/go/tools/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2016 Dominik Honnef - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/honnef.co/go/tools/LICENSE-THIRD-PARTY b/vendor/honnef.co/go/tools/LICENSE-THIRD-PARTY deleted file mode 100644 index 623d85e85b7..00000000000 --- a/vendor/honnef.co/go/tools/LICENSE-THIRD-PARTY +++ /dev/null @@ -1,284 +0,0 @@ -Staticcheck and its related tools make use of third party projects, -either by reusing their code, or by statically linking them into -resulting binaries. These projects are: - -* The Go Programming Language - https://golang.org/ - - Copyright (c) 2009 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -* github.com/BurntSushi/toml - https://github.com/BurntSushi/toml - - The MIT License (MIT) - - Copyright (c) 2013 TOML authors - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. - - -* github.com/google/renameio - https://github.com/google/renameio - - Copyright 2018 Google Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - -* github.com/kisielk/gotool - https://github.com/kisielk/gotool - - Copyright (c) 2013 Kamil Kisiel - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - All the files in this distribution are covered under either the MIT - license (see the file LICENSE) except some files mentioned below. - - match.go, match_test.go: - - Copyright (c) 2009 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -* github.com/rogpeppe/go-internal - https://github.com/rogpeppe/go-internal - - Copyright (c) 2018 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -* golang.org/x/mod/module - https://github.com/golang/mod - - Copyright (c) 2009 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -* golang.org/x/tools/go/analysis - https://github.com/golang/tools - - Copyright (c) 2009 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -* gogrep - https://github.com/mvdan/gogrep - - Copyright (c) 2017, Daniel Martí. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -* gosmith - https://github.com/dvyukov/gosmith - - Copyright (c) 2014 Dmitry Vyukov. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * The name of Dmitry Vyukov may be used to endorse or promote - products derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/honnef.co/go/tools/arg/arg.go b/vendor/honnef.co/go/tools/arg/arg.go deleted file mode 100644 index 1e7f30db42d..00000000000 --- a/vendor/honnef.co/go/tools/arg/arg.go +++ /dev/null @@ -1,48 +0,0 @@ -package arg - -var args = map[string]int{ - "(*encoding/json.Decoder).Decode.v": 0, - "(*encoding/json.Encoder).Encode.v": 0, - "(*encoding/xml.Decoder).Decode.v": 0, - "(*encoding/xml.Encoder).Encode.v": 0, - "(*sync.Pool).Put.x": 0, - "(*text/template.Template).Parse.text": 0, - "(io.Seeker).Seek.offset": 0, - "(time.Time).Sub.u": 0, - "append.elems": 1, - "append.slice": 0, - "bytes.Equal.a": 0, - "bytes.Equal.b": 1, - "encoding/binary.Write.data": 2, - "errors.New.text": 0, - "fmt.Fprintf.format": 1, - "fmt.Printf.format": 0, - "fmt.Sprintf.a[0]": 1, - "fmt.Sprintf.format": 0, - "json.Marshal.v": 0, - "json.Unmarshal.v": 1, - "len.v": 0, - "make.size[0]": 1, - "make.size[1]": 2, - "make.t": 0, - "net/url.Parse.rawurl": 0, - "os.OpenFile.flag": 1, - "os/exec.Command.name": 0, - "os/signal.Notify.c": 0, - "regexp.Compile.expr": 0, - "runtime.SetFinalizer.finalizer": 1, - "runtime.SetFinalizer.obj": 0, - "sort.Sort.data": 0, - "time.Parse.layout": 0, - "time.Sleep.d": 0, - "xml.Marshal.v": 0, - "xml.Unmarshal.v": 1, -} - -func Arg(name string) int { - n, ok := args[name] - if !ok { - panic("unknown argument " + name) - } - return n -} diff --git a/vendor/honnef.co/go/tools/cmd/staticcheck/README.md b/vendor/honnef.co/go/tools/cmd/staticcheck/README.md deleted file mode 100644 index 4d14577fdf7..00000000000 --- a/vendor/honnef.co/go/tools/cmd/staticcheck/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# staticcheck - -_staticcheck_ offers extensive analysis of Go code, covering a myriad -of categories. It will detect bugs, suggest code simplifications, -point out dead code, and more. - -## Installation - -See [the main README](https://github.com/dominikh/go-tools#installation) for installation instructions. - -## Documentation - -Detailed documentation can be found on -[staticcheck.io](https://staticcheck.io/docs/). - diff --git a/vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go b/vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go deleted file mode 100644 index 4f504dc39db..00000000000 --- a/vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go +++ /dev/null @@ -1,44 +0,0 @@ -// staticcheck analyses Go code and makes it better. -package main // import "honnef.co/go/tools/cmd/staticcheck" - -import ( - "log" - "os" - - "golang.org/x/tools/go/analysis" - "honnef.co/go/tools/lint" - "honnef.co/go/tools/lint/lintutil" - "honnef.co/go/tools/simple" - "honnef.co/go/tools/staticcheck" - "honnef.co/go/tools/stylecheck" - "honnef.co/go/tools/unused" -) - -func main() { - fs := lintutil.FlagSet("staticcheck") - wholeProgram := fs.Bool("unused.whole-program", false, "Run unused in whole program mode") - debug := fs.String("debug.unused-graph", "", "Write unused's object graph to `file`") - fs.Parse(os.Args[1:]) - - var cs []*analysis.Analyzer - for _, v := range simple.Analyzers { - cs = append(cs, v) - } - for _, v := range staticcheck.Analyzers { - cs = append(cs, v) - } - for _, v := range stylecheck.Analyzers { - cs = append(cs, v) - } - - u := unused.NewChecker(*wholeProgram) - if *debug != "" { - f, err := os.OpenFile(*debug, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) - if err != nil { - log.Fatal(err) - } - u.Debug = f - } - cums := []lint.CumulativeChecker{u} - lintutil.ProcessFlagSet(cs, cums, fs) -} diff --git a/vendor/honnef.co/go/tools/code/code.go b/vendor/honnef.co/go/tools/code/code.go deleted file mode 100644 index 6f4df8b9aa6..00000000000 --- a/vendor/honnef.co/go/tools/code/code.go +++ /dev/null @@ -1,481 +0,0 @@ -// Package code answers structural and type questions about Go code. -package code - -import ( - "flag" - "fmt" - "go/ast" - "go/constant" - "go/token" - "go/types" - "strings" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/go/ast/inspector" - "honnef.co/go/tools/facts" - "honnef.co/go/tools/go/types/typeutil" - "honnef.co/go/tools/ir" - "honnef.co/go/tools/lint" -) - -type Positioner interface { - Pos() token.Pos -} - -func CallName(call *ir.CallCommon) string { - if call.IsInvoke() { - return "" - } - switch v := call.Value.(type) { - case *ir.Function: - fn, ok := v.Object().(*types.Func) - if !ok { - return "" - } - return lint.FuncName(fn) - case *ir.Builtin: - return v.Name() - } - return "" -} - -func IsCallTo(call *ir.CallCommon, name string) bool { return CallName(call) == name } - -func IsCallToAny(call *ir.CallCommon, names ...string) bool { - q := CallName(call) - for _, name := range names { - if q == name { - return true - } - } - return false -} - -func IsType(T types.Type, name string) bool { return types.TypeString(T, nil) == name } - -func FilterDebug(instr []ir.Instruction) []ir.Instruction { - var out []ir.Instruction - for _, ins := range instr { - if _, ok := ins.(*ir.DebugRef); !ok { - out = append(out, ins) - } - } - return out -} - -func IsExample(fn *ir.Function) bool { - if !strings.HasPrefix(fn.Name(), "Example") { - return false - } - f := fn.Prog.Fset.File(fn.Pos()) - if f == nil { - return false - } - return strings.HasSuffix(f.Name(), "_test.go") -} - -func IsPointerLike(T types.Type) bool { - switch T := T.Underlying().(type) { - case *types.Interface, *types.Chan, *types.Map, *types.Signature, *types.Pointer: - return true - case *types.Basic: - return T.Kind() == types.UnsafePointer - } - return false -} - -func IsIdent(expr ast.Expr, ident string) bool { - id, ok := expr.(*ast.Ident) - return ok && id.Name == ident -} - -// isBlank returns whether id is the blank identifier "_". -// If id == nil, the answer is false. -func IsBlank(id ast.Expr) bool { - ident, _ := id.(*ast.Ident) - return ident != nil && ident.Name == "_" -} - -func IsIntLiteral(expr ast.Expr, literal string) bool { - lit, ok := expr.(*ast.BasicLit) - return ok && lit.Kind == token.INT && lit.Value == literal -} - -// Deprecated: use IsIntLiteral instead -func IsZero(expr ast.Expr) bool { - return IsIntLiteral(expr, "0") -} - -func IsOfType(pass *analysis.Pass, expr ast.Expr, name string) bool { - return IsType(pass.TypesInfo.TypeOf(expr), name) -} - -func IsInTest(pass *analysis.Pass, node Positioner) bool { - // FIXME(dh): this doesn't work for global variables with - // initializers - f := pass.Fset.File(node.Pos()) - return f != nil && strings.HasSuffix(f.Name(), "_test.go") -} - -// IsMain reports whether the package being processed is a package -// main. -func IsMain(pass *analysis.Pass) bool { - return pass.Pkg.Name() == "main" -} - -// IsMainLike reports whether the package being processed is a -// main-like package. A main-like package is a package that is -// package main, or that is intended to be used by a tool framework -// such as cobra to implement a command. -// -// Note that this function errs on the side of false positives; it may -// return true for packages that aren't main-like. IsMainLike is -// intended for analyses that wish to suppress diagnostics for -// main-like packages to avoid false positives. -func IsMainLike(pass *analysis.Pass) bool { - if pass.Pkg.Name() == "main" { - return true - } - for _, imp := range pass.Pkg.Imports() { - if imp.Path() == "github.com/spf13/cobra" { - return true - } - } - return false -} - -func SelectorName(pass *analysis.Pass, expr *ast.SelectorExpr) string { - info := pass.TypesInfo - sel := info.Selections[expr] - if sel == nil { - if x, ok := expr.X.(*ast.Ident); ok { - pkg, ok := info.ObjectOf(x).(*types.PkgName) - if !ok { - // This shouldn't happen - return fmt.Sprintf("%s.%s", x.Name, expr.Sel.Name) - } - return fmt.Sprintf("%s.%s", pkg.Imported().Path(), expr.Sel.Name) - } - panic(fmt.Sprintf("unsupported selector: %v", expr)) - } - return fmt.Sprintf("(%s).%s", sel.Recv(), sel.Obj().Name()) -} - -func IsNil(pass *analysis.Pass, expr ast.Expr) bool { - return pass.TypesInfo.Types[expr].IsNil() -} - -func BoolConst(pass *analysis.Pass, expr ast.Expr) bool { - val := pass.TypesInfo.ObjectOf(expr.(*ast.Ident)).(*types.Const).Val() - return constant.BoolVal(val) -} - -func IsBoolConst(pass *analysis.Pass, expr ast.Expr) bool { - // We explicitly don't support typed bools because more often than - // not, custom bool types are used as binary enums and the - // explicit comparison is desired. - - ident, ok := expr.(*ast.Ident) - if !ok { - return false - } - obj := pass.TypesInfo.ObjectOf(ident) - c, ok := obj.(*types.Const) - if !ok { - return false - } - basic, ok := c.Type().(*types.Basic) - if !ok { - return false - } - if basic.Kind() != types.UntypedBool && basic.Kind() != types.Bool { - return false - } - return true -} - -func ExprToInt(pass *analysis.Pass, expr ast.Expr) (int64, bool) { - tv := pass.TypesInfo.Types[expr] - if tv.Value == nil { - return 0, false - } - if tv.Value.Kind() != constant.Int { - return 0, false - } - return constant.Int64Val(tv.Value) -} - -func ExprToString(pass *analysis.Pass, expr ast.Expr) (string, bool) { - val := pass.TypesInfo.Types[expr].Value - if val == nil { - return "", false - } - if val.Kind() != constant.String { - return "", false - } - return constant.StringVal(val), true -} - -// Dereference returns a pointer's element type; otherwise it returns -// T. -func Dereference(T types.Type) types.Type { - if p, ok := T.Underlying().(*types.Pointer); ok { - return p.Elem() - } - return T -} - -// DereferenceR returns a pointer's element type; otherwise it returns -// T. If the element type is itself a pointer, DereferenceR will be -// applied recursively. -func DereferenceR(T types.Type) types.Type { - if p, ok := T.Underlying().(*types.Pointer); ok { - return DereferenceR(p.Elem()) - } - return T -} - -func CallNameAST(pass *analysis.Pass, call *ast.CallExpr) string { - switch fun := astutil.Unparen(call.Fun).(type) { - case *ast.SelectorExpr: - fn, ok := pass.TypesInfo.ObjectOf(fun.Sel).(*types.Func) - if !ok { - return "" - } - return lint.FuncName(fn) - case *ast.Ident: - obj := pass.TypesInfo.ObjectOf(fun) - switch obj := obj.(type) { - case *types.Func: - return lint.FuncName(obj) - case *types.Builtin: - return obj.Name() - default: - return "" - } - default: - return "" - } -} - -func IsCallToAST(pass *analysis.Pass, node ast.Node, name string) bool { - call, ok := node.(*ast.CallExpr) - if !ok { - return false - } - return CallNameAST(pass, call) == name -} - -func IsCallToAnyAST(pass *analysis.Pass, node ast.Node, names ...string) bool { - call, ok := node.(*ast.CallExpr) - if !ok { - return false - } - q := CallNameAST(pass, call) - for _, name := range names { - if q == name { - return true - } - } - return false -} - -func Preamble(f *ast.File) string { - cutoff := f.Package - if f.Doc != nil { - cutoff = f.Doc.Pos() - } - var out []string - for _, cmt := range f.Comments { - if cmt.Pos() >= cutoff { - break - } - out = append(out, cmt.Text()) - } - return strings.Join(out, "\n") -} - -func GroupSpecs(fset *token.FileSet, specs []ast.Spec) [][]ast.Spec { - if len(specs) == 0 { - return nil - } - groups := make([][]ast.Spec, 1) - groups[0] = append(groups[0], specs[0]) - - for _, spec := range specs[1:] { - g := groups[len(groups)-1] - if fset.PositionFor(spec.Pos(), false).Line-1 != - fset.PositionFor(g[len(g)-1].End(), false).Line { - - groups = append(groups, nil) - } - - groups[len(groups)-1] = append(groups[len(groups)-1], spec) - } - - return groups -} - -func IsObject(obj types.Object, name string) bool { - var path string - if pkg := obj.Pkg(); pkg != nil { - path = pkg.Path() + "." - } - return path+obj.Name() == name -} - -type Field struct { - Var *types.Var - Tag string - Path []int -} - -// FlattenFields recursively flattens T and embedded structs, -// returning a list of fields. If multiple fields with the same name -// exist, all will be returned. -func FlattenFields(T *types.Struct) []Field { - return flattenFields(T, nil, nil) -} - -func flattenFields(T *types.Struct, path []int, seen map[types.Type]bool) []Field { - if seen == nil { - seen = map[types.Type]bool{} - } - if seen[T] { - return nil - } - seen[T] = true - var out []Field - for i := 0; i < T.NumFields(); i++ { - field := T.Field(i) - tag := T.Tag(i) - np := append(path[:len(path):len(path)], i) - if field.Anonymous() { - if s, ok := Dereference(field.Type()).Underlying().(*types.Struct); ok { - out = append(out, flattenFields(s, np, seen)...) - } - } else { - out = append(out, Field{field, tag, np}) - } - } - return out -} - -func File(pass *analysis.Pass, node Positioner) *ast.File { - m := pass.ResultOf[facts.TokenFile].(map[*token.File]*ast.File) - return m[pass.Fset.File(node.Pos())] -} - -// IsGenerated reports whether pos is in a generated file, It ignores -// //line directives. -func IsGenerated(pass *analysis.Pass, pos token.Pos) bool { - _, ok := Generator(pass, pos) - return ok -} - -// Generator returns the generator that generated the file containing -// pos. It ignores //line directives. -func Generator(pass *analysis.Pass, pos token.Pos) (facts.Generator, bool) { - file := pass.Fset.PositionFor(pos, false).Filename - m := pass.ResultOf[facts.Generated].(map[string]facts.Generator) - g, ok := m[file] - return g, ok -} - -// MayHaveSideEffects reports whether expr may have side effects. If -// the purity argument is nil, this function implements a purely -// syntactic check, meaning that any function call may have side -// effects, regardless of the called function's body. Otherwise, -// purity will be consulted to determine the purity of function calls. -func MayHaveSideEffects(pass *analysis.Pass, expr ast.Expr, purity facts.PurityResult) bool { - switch expr := expr.(type) { - case *ast.BadExpr: - return true - case *ast.Ellipsis: - return MayHaveSideEffects(pass, expr.Elt, purity) - case *ast.FuncLit: - // the literal itself cannot have side ffects, only calling it - // might, which is handled by CallExpr. - return false - case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType: - // types cannot have side effects - return false - case *ast.BasicLit: - return false - case *ast.BinaryExpr: - return MayHaveSideEffects(pass, expr.X, purity) || MayHaveSideEffects(pass, expr.Y, purity) - case *ast.CallExpr: - if purity == nil { - return true - } - switch obj := typeutil.Callee(pass.TypesInfo, expr).(type) { - case *types.Func: - if _, ok := purity[obj]; !ok { - return true - } - case *types.Builtin: - switch obj.Name() { - case "len", "cap": - default: - return true - } - default: - return true - } - for _, arg := range expr.Args { - if MayHaveSideEffects(pass, arg, purity) { - return true - } - } - return false - case *ast.CompositeLit: - if MayHaveSideEffects(pass, expr.Type, purity) { - return true - } - for _, elt := range expr.Elts { - if MayHaveSideEffects(pass, elt, purity) { - return true - } - } - return false - case *ast.Ident: - return false - case *ast.IndexExpr: - return MayHaveSideEffects(pass, expr.X, purity) || MayHaveSideEffects(pass, expr.Index, purity) - case *ast.KeyValueExpr: - return MayHaveSideEffects(pass, expr.Key, purity) || MayHaveSideEffects(pass, expr.Value, purity) - case *ast.SelectorExpr: - return MayHaveSideEffects(pass, expr.X, purity) - case *ast.SliceExpr: - return MayHaveSideEffects(pass, expr.X, purity) || - MayHaveSideEffects(pass, expr.Low, purity) || - MayHaveSideEffects(pass, expr.High, purity) || - MayHaveSideEffects(pass, expr.Max, purity) - case *ast.StarExpr: - return MayHaveSideEffects(pass, expr.X, purity) - case *ast.TypeAssertExpr: - return MayHaveSideEffects(pass, expr.X, purity) - case *ast.UnaryExpr: - if MayHaveSideEffects(pass, expr.X, purity) { - return true - } - return expr.Op == token.ARROW - case *ast.ParenExpr: - return MayHaveSideEffects(pass, expr.X, purity) - case nil: - return false - default: - panic(fmt.Sprintf("internal error: unhandled type %T", expr)) - } -} - -func IsGoVersion(pass *analysis.Pass, minor int) bool { - version := pass.Analyzer.Flags.Lookup("go").Value.(flag.Getter).Get().(int) - return version >= minor -} - -func Preorder(pass *analysis.Pass, fn func(ast.Node), types ...ast.Node) { - pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder(types, fn) -} diff --git a/vendor/honnef.co/go/tools/config/config.go b/vendor/honnef.co/go/tools/config/config.go deleted file mode 100644 index 55115371b9f..00000000000 --- a/vendor/honnef.co/go/tools/config/config.go +++ /dev/null @@ -1,245 +0,0 @@ -package config - -import ( - "bytes" - "fmt" - "go/ast" - "go/token" - "os" - "path/filepath" - "reflect" - "strings" - - "github.com/BurntSushi/toml" - "golang.org/x/tools/go/analysis" -) - -// Dir looks at a list of absolute file names, which should make up a -// single package, and returns the path of the directory that may -// contain a staticcheck.conf file. It returns the empty string if no -// such directory could be determined, for example because all files -// were located in Go's build cache. -func Dir(files []string) string { - if len(files) == 0 { - return "" - } - cache, err := os.UserCacheDir() - if err != nil { - cache = "" - } - var path string - for _, p := range files { - // FIXME(dh): using strings.HasPrefix isn't technically - // correct, but it should be good enough for now. - if cache != "" && strings.HasPrefix(p, cache) { - // File in the build cache of the standard Go build system - continue - } - path = p - break - } - - if path == "" { - // The package only consists of generated files. - return "" - } - - dir := filepath.Dir(path) - return dir -} - -func dirAST(files []*ast.File, fset *token.FileSet) string { - names := make([]string, len(files)) - for i, f := range files { - names[i] = fset.PositionFor(f.Pos(), true).Filename - } - return Dir(names) -} - -var Analyzer = &analysis.Analyzer{ - Name: "config", - Doc: "loads configuration for the current package tree", - Run: func(pass *analysis.Pass) (interface{}, error) { - dir := dirAST(pass.Files, pass.Fset) - if dir == "" { - cfg := DefaultConfig - return &cfg, nil - } - cfg, err := Load(dir) - if err != nil { - return nil, fmt.Errorf("error loading staticcheck.conf: %s", err) - } - return &cfg, nil - }, - RunDespiteErrors: true, - ResultType: reflect.TypeOf((*Config)(nil)), -} - -func For(pass *analysis.Pass) *Config { - return pass.ResultOf[Analyzer].(*Config) -} - -func mergeLists(a, b []string) []string { - out := make([]string, 0, len(a)+len(b)) - for _, el := range b { - if el == "inherit" { - out = append(out, a...) - } else { - out = append(out, el) - } - } - - return out -} - -func normalizeList(list []string) []string { - if len(list) > 1 { - nlist := make([]string, 0, len(list)) - nlist = append(nlist, list[0]) - for i, el := range list[1:] { - if el != list[i] { - nlist = append(nlist, el) - } - } - list = nlist - } - - for _, el := range list { - if el == "inherit" { - // This should never happen, because the default config - // should not use "inherit" - panic(`unresolved "inherit"`) - } - } - - return list -} - -func (cfg Config) Merge(ocfg Config) Config { - if ocfg.Checks != nil { - cfg.Checks = mergeLists(cfg.Checks, ocfg.Checks) - } - if ocfg.Initialisms != nil { - cfg.Initialisms = mergeLists(cfg.Initialisms, ocfg.Initialisms) - } - if ocfg.DotImportWhitelist != nil { - cfg.DotImportWhitelist = mergeLists(cfg.DotImportWhitelist, ocfg.DotImportWhitelist) - } - if ocfg.HTTPStatusCodeWhitelist != nil { - cfg.HTTPStatusCodeWhitelist = mergeLists(cfg.HTTPStatusCodeWhitelist, ocfg.HTTPStatusCodeWhitelist) - } - return cfg -} - -type Config struct { - // TODO(dh): this implementation makes it impossible for external - // clients to add their own checkers with configuration. At the - // moment, we don't really care about that; we don't encourage - // that people use this package. In the future, we may. The - // obvious solution would be using map[string]interface{}, but - // that's obviously subpar. - - Checks []string `toml:"checks"` - Initialisms []string `toml:"initialisms"` - DotImportWhitelist []string `toml:"dot_import_whitelist"` - HTTPStatusCodeWhitelist []string `toml:"http_status_code_whitelist"` -} - -func (c Config) String() string { - buf := &bytes.Buffer{} - - fmt.Fprintf(buf, "Checks: %#v\n", c.Checks) - fmt.Fprintf(buf, "Initialisms: %#v\n", c.Initialisms) - fmt.Fprintf(buf, "DotImportWhitelist: %#v\n", c.DotImportWhitelist) - fmt.Fprintf(buf, "HTTPStatusCodeWhitelist: %#v", c.HTTPStatusCodeWhitelist) - - return buf.String() -} - -var DefaultConfig = Config{ - Checks: []string{"all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022"}, - Initialisms: []string{ - "ACL", "API", "ASCII", "CPU", "CSS", "DNS", - "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", - "IP", "JSON", "QPS", "RAM", "RPC", "SLA", - "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", - "UDP", "UI", "GID", "UID", "UUID", "URI", - "URL", "UTF8", "VM", "XML", "XMPP", "XSRF", - "XSS", "SIP", "RTP", "AMQP", "DB", "TS", - }, - DotImportWhitelist: []string{}, - HTTPStatusCodeWhitelist: []string{"200", "400", "404", "500"}, -} - -const ConfigName = "staticcheck.conf" - -func parseConfigs(dir string) ([]Config, error) { - var out []Config - - // TODO(dh): consider stopping at the GOPATH/module boundary - for dir != "" { - f, err := os.Open(filepath.Join(dir, ConfigName)) - if os.IsNotExist(err) { - ndir := filepath.Dir(dir) - if ndir == dir { - break - } - dir = ndir - continue - } - if err != nil { - return nil, err - } - var cfg Config - _, err = toml.DecodeReader(f, &cfg) - f.Close() - if err != nil { - return nil, err - } - out = append(out, cfg) - ndir := filepath.Dir(dir) - if ndir == dir { - break - } - dir = ndir - } - out = append(out, DefaultConfig) - if len(out) < 2 { - return out, nil - } - for i := 0; i < len(out)/2; i++ { - out[i], out[len(out)-1-i] = out[len(out)-1-i], out[i] - } - return out, nil -} - -func mergeConfigs(confs []Config) Config { - if len(confs) == 0 { - // This shouldn't happen because we always have at least a - // default config. - panic("trying to merge zero configs") - } - if len(confs) == 1 { - return confs[0] - } - conf := confs[0] - for _, oconf := range confs[1:] { - conf = conf.Merge(oconf) - } - return conf -} - -func Load(dir string) (Config, error) { - confs, err := parseConfigs(dir) - if err != nil { - return Config{}, err - } - conf := mergeConfigs(confs) - - conf.Checks = normalizeList(conf.Checks) - conf.Initialisms = normalizeList(conf.Initialisms) - conf.DotImportWhitelist = normalizeList(conf.DotImportWhitelist) - conf.HTTPStatusCodeWhitelist = normalizeList(conf.HTTPStatusCodeWhitelist) - - return conf, nil -} diff --git a/vendor/honnef.co/go/tools/config/example.conf b/vendor/honnef.co/go/tools/config/example.conf deleted file mode 100644 index a715a24d4fc..00000000000 --- a/vendor/honnef.co/go/tools/config/example.conf +++ /dev/null @@ -1,10 +0,0 @@ -checks = ["all", "-ST1003", "-ST1014"] -initialisms = ["ACL", "API", "ASCII", "CPU", "CSS", "DNS", - "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", - "IP", "JSON", "QPS", "RAM", "RPC", "SLA", - "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", - "UDP", "UI", "GID", "UID", "UUID", "URI", - "URL", "UTF8", "VM", "XML", "XMPP", "XSRF", - "XSS", "SIP", "RTP"] -dot_import_whitelist = [] -http_status_code_whitelist = ["200", "400", "404", "500"] diff --git a/vendor/honnef.co/go/tools/deprecated/stdlib.go b/vendor/honnef.co/go/tools/deprecated/stdlib.go deleted file mode 100644 index cabb8500a25..00000000000 --- a/vendor/honnef.co/go/tools/deprecated/stdlib.go +++ /dev/null @@ -1,119 +0,0 @@ -package deprecated - -type Deprecation struct { - DeprecatedSince int - AlternativeAvailableSince int -} - -var Stdlib = map[string]Deprecation{ - // FIXME(dh): AllowBinary isn't being detected as deprecated - // because the comment has a newline right after "Deprecated:" - "go/build.AllowBinary": {7, 7}, - "(archive/zip.FileHeader).CompressedSize": {1, 1}, - "(archive/zip.FileHeader).UncompressedSize": {1, 1}, - "(archive/zip.FileHeader).ModifiedTime": {10, 10}, - "(archive/zip.FileHeader).ModifiedDate": {10, 10}, - "(*archive/zip.FileHeader).ModTime": {10, 10}, - "(*archive/zip.FileHeader).SetModTime": {10, 10}, - "(go/doc.Package).Bugs": {1, 1}, - "os.SEEK_SET": {7, 7}, - "os.SEEK_CUR": {7, 7}, - "os.SEEK_END": {7, 7}, - "(net.Dialer).Cancel": {7, 7}, - "runtime.CPUProfile": {9, 0}, - "compress/flate.ReadError": {6, 6}, - "compress/flate.WriteError": {6, 6}, - "path/filepath.HasPrefix": {0, 0}, - "(net/http.Transport).Dial": {7, 7}, - "(*net/http.Transport).CancelRequest": {6, 5}, - "net/http.ErrWriteAfterFlush": {7, 0}, - "net/http.ErrHeaderTooLong": {8, 0}, - "net/http.ErrShortBody": {8, 0}, - "net/http.ErrMissingContentLength": {8, 0}, - "net/http/httputil.ErrPersistEOF": {0, 0}, - "net/http/httputil.ErrClosed": {0, 0}, - "net/http/httputil.ErrPipeline": {0, 0}, - "net/http/httputil.ServerConn": {0, 0}, - "net/http/httputil.NewServerConn": {0, 0}, - "net/http/httputil.ClientConn": {0, 0}, - "net/http/httputil.NewClientConn": {0, 0}, - "net/http/httputil.NewProxyClientConn": {0, 0}, - "(net/http.Request).Cancel": {7, 7}, - "(text/template/parse.PipeNode).Line": {1, 1}, - "(text/template/parse.ActionNode).Line": {1, 1}, - "(text/template/parse.BranchNode).Line": {1, 1}, - "(text/template/parse.TemplateNode).Line": {1, 1}, - "database/sql/driver.ColumnConverter": {9, 9}, - "database/sql/driver.Execer": {8, 8}, - "database/sql/driver.Queryer": {8, 8}, - "(database/sql/driver.Conn).Begin": {8, 8}, - "(database/sql/driver.Stmt).Exec": {8, 8}, - "(database/sql/driver.Stmt).Query": {8, 8}, - "syscall.StringByteSlice": {1, 1}, - "syscall.StringBytePtr": {1, 1}, - "syscall.StringSlicePtr": {1, 1}, - "syscall.StringToUTF16": {1, 1}, - "syscall.StringToUTF16Ptr": {1, 1}, - "(*regexp.Regexp).Copy": {12, 12}, - "(archive/tar.Header).Xattrs": {10, 10}, - "archive/tar.TypeRegA": {11, 1}, - "go/types.NewInterface": {11, 11}, - "(*go/types.Interface).Embedded": {11, 11}, - "go/importer.For": {12, 12}, - "encoding/json.InvalidUTF8Error": {2, 2}, - "encoding/json.UnmarshalFieldError": {2, 2}, - "encoding/csv.ErrTrailingComma": {2, 2}, - "(encoding/csv.Reader).TrailingComma": {2, 2}, - "(net.Dialer).DualStack": {12, 12}, - "net/http.ErrUnexpectedTrailer": {12, 12}, - "net/http.CloseNotifier": {11, 7}, - "net/http.ProtocolError": {8, 8}, - "(crypto/x509.CertificateRequest).Attributes": {5, 3}, - // This function has no alternative, but also no purpose. - "(*crypto/rc4.Cipher).Reset": {12, 0}, - "(net/http/httptest.ResponseRecorder).HeaderMap": {11, 7}, - "image.ZP": {13, 0}, - "image.ZR": {13, 0}, - "(*debug/gosym.LineTable).LineToPC": {2, 2}, - "(*debug/gosym.LineTable).PCToLine": {2, 2}, - "crypto/tls.VersionSSL30": {13, 0}, - "(crypto/tls.Config).NameToCertificate": {14, 14}, - "(*crypto/tls.Config).BuildNameToCertificate": {14, 14}, - "image/jpeg.Reader": {4, 0}, - - // All of these have been deprecated in favour of external libraries - "syscall.AttachLsf": {7, 0}, - "syscall.DetachLsf": {7, 0}, - "syscall.LsfSocket": {7, 0}, - "syscall.SetLsfPromisc": {7, 0}, - "syscall.LsfJump": {7, 0}, - "syscall.LsfStmt": {7, 0}, - "syscall.BpfStmt": {7, 0}, - "syscall.BpfJump": {7, 0}, - "syscall.BpfBuflen": {7, 0}, - "syscall.SetBpfBuflen": {7, 0}, - "syscall.BpfDatalink": {7, 0}, - "syscall.SetBpfDatalink": {7, 0}, - "syscall.SetBpfPromisc": {7, 0}, - "syscall.FlushBpf": {7, 0}, - "syscall.BpfInterface": {7, 0}, - "syscall.SetBpfInterface": {7, 0}, - "syscall.BpfTimeout": {7, 0}, - "syscall.SetBpfTimeout": {7, 0}, - "syscall.BpfStats": {7, 0}, - "syscall.SetBpfImmediate": {7, 0}, - "syscall.SetBpf": {7, 0}, - "syscall.CheckBpfVersion": {7, 0}, - "syscall.BpfHeadercmpl": {7, 0}, - "syscall.SetBpfHeadercmpl": {7, 0}, - "syscall.RouteRIB": {8, 0}, - "syscall.RoutingMessage": {8, 0}, - "syscall.RouteMessage": {8, 0}, - "syscall.InterfaceMessage": {8, 0}, - "syscall.InterfaceAddrMessage": {8, 0}, - "syscall.ParseRoutingMessage": {8, 0}, - "syscall.ParseRoutingSockaddr": {8, 0}, - "syscall.InterfaceAnnounceMessage": {7, 0}, - "syscall.InterfaceMulticastAddrMessage": {7, 0}, - "syscall.FormatMessage": {5, 0}, -} diff --git a/vendor/honnef.co/go/tools/edit/edit.go b/vendor/honnef.co/go/tools/edit/edit.go deleted file mode 100644 index f4cfba2347c..00000000000 --- a/vendor/honnef.co/go/tools/edit/edit.go +++ /dev/null @@ -1,67 +0,0 @@ -package edit - -import ( - "bytes" - "go/ast" - "go/format" - "go/token" - - "golang.org/x/tools/go/analysis" - "honnef.co/go/tools/pattern" -) - -type Ranger interface { - Pos() token.Pos - End() token.Pos -} - -type Range [2]token.Pos - -func (r Range) Pos() token.Pos { return r[0] } -func (r Range) End() token.Pos { return r[1] } - -func ReplaceWithString(fset *token.FileSet, old Ranger, new string) analysis.TextEdit { - return analysis.TextEdit{ - Pos: old.Pos(), - End: old.End(), - NewText: []byte(new), - } -} - -func ReplaceWithNode(fset *token.FileSet, old Ranger, new ast.Node) analysis.TextEdit { - buf := &bytes.Buffer{} - if err := format.Node(buf, fset, new); err != nil { - panic("internal error: " + err.Error()) - } - return analysis.TextEdit{ - Pos: old.Pos(), - End: old.End(), - NewText: buf.Bytes(), - } -} - -func ReplaceWithPattern(pass *analysis.Pass, after pattern.Pattern, state pattern.State, node Ranger) analysis.TextEdit { - r := pattern.NodeToAST(after.Root, state) - buf := &bytes.Buffer{} - format.Node(buf, pass.Fset, r) - return analysis.TextEdit{ - Pos: node.Pos(), - End: node.End(), - NewText: buf.Bytes(), - } -} - -func Delete(old Ranger) analysis.TextEdit { - return analysis.TextEdit{ - Pos: old.Pos(), - End: old.End(), - NewText: nil, - } -} - -func Fix(msg string, edits ...analysis.TextEdit) analysis.SuggestedFix { - return analysis.SuggestedFix{ - Message: msg, - TextEdits: edits, - } -} diff --git a/vendor/honnef.co/go/tools/facts/deprecated.go b/vendor/honnef.co/go/tools/facts/deprecated.go deleted file mode 100644 index 8587b0e0eae..00000000000 --- a/vendor/honnef.co/go/tools/facts/deprecated.go +++ /dev/null @@ -1,144 +0,0 @@ -package facts - -import ( - "go/ast" - "go/token" - "go/types" - "reflect" - "strings" - - "golang.org/x/tools/go/analysis" -) - -type IsDeprecated struct{ Msg string } - -func (*IsDeprecated) AFact() {} -func (d *IsDeprecated) String() string { return "Deprecated: " + d.Msg } - -type DeprecatedResult struct { - Objects map[types.Object]*IsDeprecated - Packages map[*types.Package]*IsDeprecated -} - -var Deprecated = &analysis.Analyzer{ - Name: "fact_deprecated", - Doc: "Mark deprecated objects", - Run: deprecated, - FactTypes: []analysis.Fact{(*IsDeprecated)(nil)}, - ResultType: reflect.TypeOf(DeprecatedResult{}), -} - -func deprecated(pass *analysis.Pass) (interface{}, error) { - var names []*ast.Ident - - extractDeprecatedMessage := func(docs []*ast.CommentGroup) string { - for _, doc := range docs { - if doc == nil { - continue - } - parts := strings.Split(doc.Text(), "\n\n") - last := parts[len(parts)-1] - if !strings.HasPrefix(last, "Deprecated: ") { - continue - } - alt := last[len("Deprecated: "):] - alt = strings.Replace(alt, "\n", " ", -1) - return alt - } - return "" - } - doDocs := func(names []*ast.Ident, docs []*ast.CommentGroup) { - alt := extractDeprecatedMessage(docs) - if alt == "" { - return - } - - for _, name := range names { - obj := pass.TypesInfo.ObjectOf(name) - pass.ExportObjectFact(obj, &IsDeprecated{alt}) - } - } - - var docs []*ast.CommentGroup - for _, f := range pass.Files { - docs = append(docs, f.Doc) - } - if alt := extractDeprecatedMessage(docs); alt != "" { - // Don't mark package syscall as deprecated, even though - // it is. A lot of people still use it for simple - // constants like SIGKILL, and I am not comfortable - // telling them to use x/sys for that. - if pass.Pkg.Path() != "syscall" { - pass.ExportPackageFact(&IsDeprecated{alt}) - } - } - - docs = docs[:0] - for _, f := range pass.Files { - fn := func(node ast.Node) bool { - if node == nil { - return true - } - var ret bool - switch node := node.(type) { - case *ast.GenDecl: - switch node.Tok { - case token.TYPE, token.CONST, token.VAR: - docs = append(docs, node.Doc) - return true - default: - return false - } - case *ast.FuncDecl: - docs = append(docs, node.Doc) - names = []*ast.Ident{node.Name} - ret = false - case *ast.TypeSpec: - docs = append(docs, node.Doc) - names = []*ast.Ident{node.Name} - ret = true - case *ast.ValueSpec: - docs = append(docs, node.Doc) - names = node.Names - ret = false - case *ast.File: - return true - case *ast.StructType: - for _, field := range node.Fields.List { - doDocs(field.Names, []*ast.CommentGroup{field.Doc}) - } - return false - case *ast.InterfaceType: - for _, field := range node.Methods.List { - doDocs(field.Names, []*ast.CommentGroup{field.Doc}) - } - return false - default: - return false - } - if len(names) == 0 || len(docs) == 0 { - return ret - } - doDocs(names, docs) - - docs = docs[:0] - names = nil - return ret - } - ast.Inspect(f, fn) - } - - out := DeprecatedResult{ - Objects: map[types.Object]*IsDeprecated{}, - Packages: map[*types.Package]*IsDeprecated{}, - } - - for _, fact := range pass.AllObjectFacts() { - out.Objects[fact.Object] = fact.Fact.(*IsDeprecated) - } - for _, fact := range pass.AllPackageFacts() { - out.Packages[fact.Package] = fact.Fact.(*IsDeprecated) - } - - return out, nil -} diff --git a/vendor/honnef.co/go/tools/facts/generated.go b/vendor/honnef.co/go/tools/facts/generated.go deleted file mode 100644 index 3e7aef11076..00000000000 --- a/vendor/honnef.co/go/tools/facts/generated.go +++ /dev/null @@ -1,90 +0,0 @@ -package facts - -import ( - "bufio" - "bytes" - "io" - "os" - "reflect" - "strings" - - "golang.org/x/tools/go/analysis" -) - -type Generator int - -// A list of known generators we can detect -const ( - Unknown Generator = iota - Goyacc - Cgo - Stringer -) - -var ( - // used by cgo before Go 1.11 - oldCgo = []byte("// Created by cgo - DO NOT EDIT") - prefix = []byte("// Code generated ") - suffix = []byte(" DO NOT EDIT.") - nl = []byte("\n") - crnl = []byte("\r\n") -) - -func isGenerated(path string) (Generator, bool) { - f, err := os.Open(path) - if err != nil { - return 0, false - } - defer f.Close() - br := bufio.NewReader(f) - for { - s, err := br.ReadBytes('\n') - if err != nil && err != io.EOF { - return 0, false - } - s = bytes.TrimSuffix(s, crnl) - s = bytes.TrimSuffix(s, nl) - if bytes.HasPrefix(s, prefix) && bytes.HasSuffix(s, suffix) { - text := string(s[len(prefix) : len(s)-len(suffix)]) - switch text { - case "by goyacc.": - return Goyacc, true - case "by cmd/cgo;": - return Cgo, true - } - if strings.HasPrefix(text, `by "stringer `) { - return Stringer, true - } - if strings.HasPrefix(text, `by goyacc `) { - return Goyacc, true - } - - return Unknown, true - } - if bytes.Equal(s, oldCgo) { - return Cgo, true - } - if err == io.EOF { - break - } - } - return 0, false -} - -var Generated = &analysis.Analyzer{ - Name: "isgenerated", - Doc: "annotate file names that have been code generated", - Run: func(pass *analysis.Pass) (interface{}, error) { - m := map[string]Generator{} - for _, f := range pass.Files { - path := pass.Fset.PositionFor(f.Pos(), false).Filename - g, ok := isGenerated(path) - if ok { - m[path] = g - } - } - return m, nil - }, - RunDespiteErrors: true, - ResultType: reflect.TypeOf(map[string]Generator{}), -} diff --git a/vendor/honnef.co/go/tools/facts/purity.go b/vendor/honnef.co/go/tools/facts/purity.go deleted file mode 100644 index 099ee23e3b8..00000000000 --- a/vendor/honnef.co/go/tools/facts/purity.go +++ /dev/null @@ -1,177 +0,0 @@ -package facts - -import ( - "go/types" - "reflect" - - "golang.org/x/tools/go/analysis" - "honnef.co/go/tools/functions" - "honnef.co/go/tools/internal/passes/buildir" - "honnef.co/go/tools/ir" -) - -type IsPure struct{} - -func (*IsPure) AFact() {} -func (d *IsPure) String() string { return "is pure" } - -type PurityResult map[*types.Func]*IsPure - -var Purity = &analysis.Analyzer{ - Name: "fact_purity", - Doc: "Mark pure functions", - Run: purity, - Requires: []*analysis.Analyzer{buildir.Analyzer}, - FactTypes: []analysis.Fact{(*IsPure)(nil)}, - ResultType: reflect.TypeOf(PurityResult{}), -} - -var pureStdlib = map[string]struct{}{ - "errors.New": {}, - "fmt.Errorf": {}, - "fmt.Sprintf": {}, - "fmt.Sprint": {}, - "sort.Reverse": {}, - "strings.Map": {}, - "strings.Repeat": {}, - "strings.Replace": {}, - "strings.Title": {}, - "strings.ToLower": {}, - "strings.ToLowerSpecial": {}, - "strings.ToTitle": {}, - "strings.ToTitleSpecial": {}, - "strings.ToUpper": {}, - "strings.ToUpperSpecial": {}, - "strings.Trim": {}, - "strings.TrimFunc": {}, - "strings.TrimLeft": {}, - "strings.TrimLeftFunc": {}, - "strings.TrimPrefix": {}, - "strings.TrimRight": {}, - "strings.TrimRightFunc": {}, - "strings.TrimSpace": {}, - "strings.TrimSuffix": {}, - "(*net/http.Request).WithContext": {}, -} - -func purity(pass *analysis.Pass) (interface{}, error) { - seen := map[*ir.Function]struct{}{} - irpkg := pass.ResultOf[buildir.Analyzer].(*buildir.IR).Pkg - var check func(fn *ir.Function) (ret bool) - check = func(fn *ir.Function) (ret bool) { - if fn.Object() == nil { - // TODO(dh): support closures - return false - } - if pass.ImportObjectFact(fn.Object(), new(IsPure)) { - return true - } - if fn.Pkg != irpkg { - // Function is in another package but wasn't marked as - // pure, ergo it isn't pure - return false - } - // Break recursion - if _, ok := seen[fn]; ok { - return false - } - - seen[fn] = struct{}{} - defer func() { - if ret { - pass.ExportObjectFact(fn.Object(), &IsPure{}) - } - }() - - if functions.IsStub(fn) { - return false - } - - if _, ok := pureStdlib[fn.Object().(*types.Func).FullName()]; ok { - return true - } - - if fn.Signature.Results().Len() == 0 { - // A function with no return values is empty or is doing some - // work we cannot see (for example because of build tags); - // don't consider it pure. - return false - } - - for _, param := range fn.Params { - // TODO(dh): this may not be strictly correct. pure code - // can, to an extent, operate on non-basic types. - if _, ok := param.Type().Underlying().(*types.Basic); !ok { - return false - } - } - - // Don't consider external functions pure. - if fn.Blocks == nil { - return false - } - checkCall := func(common *ir.CallCommon) bool { - if common.IsInvoke() { - return false - } - builtin, ok := common.Value.(*ir.Builtin) - if !ok { - if common.StaticCallee() != fn { - if common.StaticCallee() == nil { - return false - } - if !check(common.StaticCallee()) { - return false - } - } - } else { - switch builtin.Name() { - case "len", "cap": - default: - return false - } - } - return true - } - for _, b := range fn.Blocks { - for _, ins := range b.Instrs { - switch ins := ins.(type) { - case *ir.Call: - if !checkCall(ins.Common()) { - return false - } - case *ir.Defer: - if !checkCall(&ins.Call) { - return false - } - case *ir.Select: - return false - case *ir.Send: - return false - case *ir.Go: - return false - case *ir.Panic: - return false - case *ir.Store: - return false - case *ir.FieldAddr: - return false - case *ir.Alloc: - return false - case *ir.Load: - return false - } - } - } - return true - } - for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs { - check(fn) - } - - out := PurityResult{} - for _, fact := range pass.AllObjectFacts() { - out[fact.Object.(*types.Func)] = fact.Fact.(*IsPure) - } - return out, nil -} diff --git a/vendor/honnef.co/go/tools/facts/token.go b/vendor/honnef.co/go/tools/facts/token.go deleted file mode 100644 index 26e76ff73d5..00000000000 --- a/vendor/honnef.co/go/tools/facts/token.go +++ /dev/null @@ -1,24 +0,0 @@ -package facts - -import ( - "go/ast" - "go/token" - "reflect" - - "golang.org/x/tools/go/analysis" -) - -var TokenFile = &analysis.Analyzer{ - Name: "tokenfileanalyzer", - Doc: "creates a mapping of *token.File to *ast.File", - Run: func(pass *analysis.Pass) (interface{}, error) { - m := map[*token.File]*ast.File{} - for _, af := range pass.Files { - tf := pass.Fset.File(af.Pos()) - m[tf] = af - } - return m, nil - }, - RunDespiteErrors: true, - ResultType: reflect.TypeOf(map[*token.File]*ast.File{}), -} diff --git a/vendor/honnef.co/go/tools/functions/loops.go b/vendor/honnef.co/go/tools/functions/loops.go deleted file mode 100644 index a8af7010084..00000000000 --- a/vendor/honnef.co/go/tools/functions/loops.go +++ /dev/null @@ -1,54 +0,0 @@ -package functions - -import "honnef.co/go/tools/ir" - -type Loop struct{ *ir.BlockSet } - -func FindLoops(fn *ir.Function) []Loop { - if fn.Blocks == nil { - return nil - } - tree := fn.DomPreorder() - var sets []Loop - for _, h := range tree { - for _, n := range h.Preds { - if !h.Dominates(n) { - continue - } - // n is a back-edge to h - // h is the loop header - if n == h { - set := Loop{ir.NewBlockSet(len(fn.Blocks))} - set.Add(n) - sets = append(sets, set) - continue - } - set := Loop{ir.NewBlockSet(len(fn.Blocks))} - set.Add(h) - set.Add(n) - for _, b := range allPredsBut(n, h, nil) { - set.Add(b) - } - sets = append(sets, set) - } - } - return sets -} - -func allPredsBut(b, but *ir.BasicBlock, list []*ir.BasicBlock) []*ir.BasicBlock { -outer: - for _, pred := range b.Preds { - if pred == but { - continue - } - for _, p := range list { - // TODO improve big-o complexity of this function - if pred == p { - continue outer - } - } - list = append(list, pred) - list = allPredsBut(pred, but, list) - } - return list -} diff --git a/vendor/honnef.co/go/tools/functions/stub.go b/vendor/honnef.co/go/tools/functions/stub.go deleted file mode 100644 index 4d5de10b85c..00000000000 --- a/vendor/honnef.co/go/tools/functions/stub.go +++ /dev/null @@ -1,32 +0,0 @@ -package functions - -import ( - "honnef.co/go/tools/ir" -) - -// IsStub reports whether a function is a stub. A function is -// considered a stub if it has no instructions or if all it does is -// return a constant value. -func IsStub(fn *ir.Function) bool { - for _, b := range fn.Blocks { - for _, instr := range b.Instrs { - switch instr.(type) { - case *ir.Const: - // const naturally has no side-effects - case *ir.Panic: - // panic is a stub if it only uses constants - case *ir.Return: - // return is a stub if it only uses constants - case *ir.DebugRef: - case *ir.Jump: - // if there are no disallowed instructions, then we're - // only jumping to the exit block (or possibly - // somewhere else that's stubby?) - default: - // all other instructions are assumed to do actual work - return false - } - } - } - return true -} diff --git a/vendor/honnef.co/go/tools/functions/terminates.go b/vendor/honnef.co/go/tools/functions/terminates.go deleted file mode 100644 index c4984673f64..00000000000 --- a/vendor/honnef.co/go/tools/functions/terminates.go +++ /dev/null @@ -1,70 +0,0 @@ -package functions - -import ( - "go/types" - - "honnef.co/go/tools/ir" -) - -// Terminates reports whether fn is supposed to return, that is if it -// has at least one theoretic path that returns from the function. -// Explicit panics do not count as terminating. -func Terminates(fn *ir.Function) bool { - if fn.Blocks == nil { - // assuming that a function terminates is the conservative - // choice - return true - } - - for _, block := range fn.Blocks { - if _, ok := block.Control().(*ir.Return); ok { - if len(block.Preds) == 0 { - return true - } - for _, pred := range block.Preds { - switch ctrl := pred.Control().(type) { - case *ir.Panic: - // explicit panics do not count as terminating - case *ir.If: - // Check if we got here by receiving from a closed - // time.Tick channel – this cannot happen at - // runtime and thus doesn't constitute termination - iff := ctrl - if !ok { - return true - } - ex, ok := iff.Cond.(*ir.Extract) - if !ok { - return true - } - if ex.Index != 1 { - return true - } - recv, ok := ex.Tuple.(*ir.Recv) - if !ok { - return true - } - call, ok := recv.Chan.(*ir.Call) - if !ok { - return true - } - fn, ok := call.Common().Value.(*ir.Function) - if !ok { - return true - } - fn2, ok := fn.Object().(*types.Func) - if !ok { - return true - } - if fn2.FullName() != "time.Tick" { - return true - } - default: - // we've reached the exit block - return true - } - } - } - } - return false -} diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/callee.go b/vendor/honnef.co/go/tools/go/types/typeutil/callee.go deleted file mode 100644 index 38f596daf9e..00000000000 --- a/vendor/honnef.co/go/tools/go/types/typeutil/callee.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeutil - -import ( - "go/ast" - "go/types" - - "golang.org/x/tools/go/ast/astutil" -) - -// Callee returns the named target of a function call, if any: -// a function, method, builtin, or variable. -func Callee(info *types.Info, call *ast.CallExpr) types.Object { - var obj types.Object - switch fun := astutil.Unparen(call.Fun).(type) { - case *ast.Ident: - obj = info.Uses[fun] // type, var, builtin, or declared func - case *ast.SelectorExpr: - if sel, ok := info.Selections[fun]; ok { - obj = sel.Obj() // method or field - } else { - obj = info.Uses[fun.Sel] // qualified identifier? - } - } - if _, ok := obj.(*types.TypeName); ok { - return nil // T(x) is a conversion, not a call - } - return obj -} - -// StaticCallee returns the target (function or method) of a static -// function call, if any. It returns nil for calls to builtins. -func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func { - if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) { - return f - } - return nil -} - -func interfaceMethod(f *types.Func) bool { - recv := f.Type().(*types.Signature).Recv() - return recv != nil && types.IsInterface(recv.Type()) -} diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/identical.go b/vendor/honnef.co/go/tools/go/types/typeutil/identical.go deleted file mode 100644 index c0ca441c327..00000000000 --- a/vendor/honnef.co/go/tools/go/types/typeutil/identical.go +++ /dev/null @@ -1,75 +0,0 @@ -package typeutil - -import ( - "go/types" -) - -// Identical reports whether x and y are identical types. -// Unlike types.Identical, receivers of Signature types are not ignored. -// Unlike types.Identical, interfaces are compared via pointer equality (except for the empty interface, which gets deduplicated). -// Unlike types.Identical, structs are compared via pointer equality. -func Identical(x, y types.Type) (ret bool) { - if !types.Identical(x, y) { - return false - } - - switch x := x.(type) { - case *types.Struct: - y, ok := y.(*types.Struct) - if !ok { - // should be impossible - return true - } - return x == y - case *types.Interface: - // The issue with interfaces, typeutil.Map and types.Identical - // - // types.Identical, when comparing two interfaces, only looks at the set - // of all methods, not differentiating between implicit (embedded) and - // explicit methods. - // - // When we see the following two types, in source order - // - // type I1 interface { foo() } - // type I2 interface { I1 } - // - // then we will first correctly process I1 and its underlying type. When - // we get to I2, we will see that its underlying type is identical to - // that of I1 and not process it again. This, however, means that we will - // not record the fact that I2 embeds I1. If only I2 is reachable via the - // graph root, then I1 will not be considered used. - // - // We choose to be lazy and compare interfaces by their - // pointers. This will obviously miss identical interfaces, - // but this only has a runtime cost, it doesn't affect - // correctness. - y, ok := y.(*types.Interface) - if !ok { - // should be impossible - return true - } - if x.NumEmbeddeds() == 0 && - y.NumEmbeddeds() == 0 && - x.NumMethods() == 0 && - y.NumMethods() == 0 { - // all truly empty interfaces are the same - return true - } - return x == y - case *types.Signature: - y, ok := y.(*types.Signature) - if !ok { - // should be impossible - return true - } - if x.Recv() == y.Recv() { - return true - } - if x.Recv() == nil || y.Recv() == nil { - return false - } - return Identical(x.Recv().Type(), y.Recv().Type()) - default: - return true - } -} diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/imports.go b/vendor/honnef.co/go/tools/go/types/typeutil/imports.go deleted file mode 100644 index 9c441dba9c0..00000000000 --- a/vendor/honnef.co/go/tools/go/types/typeutil/imports.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeutil - -import "go/types" - -// Dependencies returns all dependencies of the specified packages. -// -// Dependent packages appear in topological order: if package P imports -// package Q, Q appears earlier than P in the result. -// The algorithm follows import statements in the order they -// appear in the source code, so the result is a total order. -// -func Dependencies(pkgs ...*types.Package) []*types.Package { - var result []*types.Package - seen := make(map[*types.Package]bool) - var visit func(pkgs []*types.Package) - visit = func(pkgs []*types.Package) { - for _, p := range pkgs { - if !seen[p] { - seen[p] = true - visit(p.Imports()) - result = append(result, p) - } - } - } - visit(pkgs) - return result -} diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/map.go b/vendor/honnef.co/go/tools/go/types/typeutil/map.go deleted file mode 100644 index f929353ccbd..00000000000 --- a/vendor/honnef.co/go/tools/go/types/typeutil/map.go +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package typeutil defines various utilities for types, such as Map, -// a mapping from types.Type to interface{} values. -package typeutil - -import ( - "bytes" - "fmt" - "go/types" - "reflect" -) - -// Map is a hash-table-based mapping from types (types.Type) to -// arbitrary interface{} values. The concrete types that implement -// the Type interface are pointers. Since they are not canonicalized, -// == cannot be used to check for equivalence, and thus we cannot -// simply use a Go map. -// -// Just as with map[K]V, a nil *Map is a valid empty map. -// -// Not thread-safe. -// -// This fork handles Signatures correctly, respecting method -// receivers. Furthermore, it doesn't deduplicate interfaces or -// structs. Interfaces aren't deduplicated as not to conflate implicit -// and explicit methods. Structs aren't deduplicated because we track -// fields of each type separately. -// -type Map struct { - hasher Hasher // shared by many Maps - table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused - length int // number of map entries -} - -// entry is an entry (key/value association) in a hash bucket. -type entry struct { - key types.Type - value interface{} -} - -// SetHasher sets the hasher used by Map. -// -// All Hashers are functionally equivalent but contain internal state -// used to cache the results of hashing previously seen types. -// -// A single Hasher created by MakeHasher() may be shared among many -// Maps. This is recommended if the instances have many keys in -// common, as it will amortize the cost of hash computation. -// -// A Hasher may grow without bound as new types are seen. Even when a -// type is deleted from the map, the Hasher never shrinks, since other -// types in the map may reference the deleted type indirectly. -// -// Hashers are not thread-safe, and read-only operations such as -// Map.Lookup require updates to the hasher, so a full Mutex lock (not a -// read-lock) is require around all Map operations if a shared -// hasher is accessed from multiple threads. -// -// If SetHasher is not called, the Map will create a private hasher at -// the first call to Insert. -// -func (m *Map) SetHasher(hasher Hasher) { - m.hasher = hasher -} - -// Delete removes the entry with the given key, if any. -// It returns true if the entry was found. -// -func (m *Map) Delete(key types.Type) bool { - if m != nil && m.table != nil { - hash := m.hasher.Hash(key) - bucket := m.table[hash] - for i, e := range bucket { - if e.key != nil && Identical(key, e.key) { - // We can't compact the bucket as it - // would disturb iterators. - bucket[i] = entry{} - m.length-- - return true - } - } - } - return false -} - -// At returns the map entry for the given key. -// The result is nil if the entry is not present. -// -func (m *Map) At(key types.Type) interface{} { - if m != nil && m.table != nil { - for _, e := range m.table[m.hasher.Hash(key)] { - if e.key != nil && Identical(key, e.key) { - return e.value - } - } - } - return nil -} - -// Set sets the map entry for key to val, -// and returns the previous entry, if any. -func (m *Map) Set(key types.Type, value interface{}) (prev interface{}) { - if m.table != nil { - hash := m.hasher.Hash(key) - bucket := m.table[hash] - var hole *entry - for i, e := range bucket { - if e.key == nil { - hole = &bucket[i] - } else if Identical(key, e.key) { - prev = e.value - bucket[i].value = value - return - } - } - - if hole != nil { - *hole = entry{key, value} // overwrite deleted entry - } else { - m.table[hash] = append(bucket, entry{key, value}) - } - } else { - if m.hasher.memo == nil { - m.hasher = MakeHasher() - } - hash := m.hasher.Hash(key) - m.table = map[uint32][]entry{hash: {entry{key, value}}} - } - - m.length++ - return -} - -// Len returns the number of map entries. -func (m *Map) Len() int { - if m != nil { - return m.length - } - return 0 -} - -// Iterate calls function f on each entry in the map in unspecified order. -// -// If f should mutate the map, Iterate provides the same guarantees as -// Go maps: if f deletes a map entry that Iterate has not yet reached, -// f will not be invoked for it, but if f inserts a map entry that -// Iterate has not yet reached, whether or not f will be invoked for -// it is unspecified. -// -func (m *Map) Iterate(f func(key types.Type, value interface{})) { - if m != nil { - for _, bucket := range m.table { - for _, e := range bucket { - if e.key != nil { - f(e.key, e.value) - } - } - } - } -} - -// Keys returns a new slice containing the set of map keys. -// The order is unspecified. -func (m *Map) Keys() []types.Type { - keys := make([]types.Type, 0, m.Len()) - m.Iterate(func(key types.Type, _ interface{}) { - keys = append(keys, key) - }) - return keys -} - -func (m *Map) toString(values bool) string { - if m == nil { - return "{}" - } - var buf bytes.Buffer - fmt.Fprint(&buf, "{") - sep := "" - m.Iterate(func(key types.Type, value interface{}) { - fmt.Fprint(&buf, sep) - sep = ", " - fmt.Fprint(&buf, key) - if values { - fmt.Fprintf(&buf, ": %q", value) - } - }) - fmt.Fprint(&buf, "}") - return buf.String() -} - -// String returns a string representation of the map's entries. -// Values are printed using fmt.Sprintf("%v", v). -// Order is unspecified. -// -func (m *Map) String() string { - return m.toString(true) -} - -// KeysString returns a string representation of the map's key set. -// Order is unspecified. -// -func (m *Map) KeysString() string { - return m.toString(false) -} - -//////////////////////////////////////////////////////////////////////// -// Hasher - -// A Hasher maps each type to its hash value. -// For efficiency, a hasher uses memoization; thus its memory -// footprint grows monotonically over time. -// Hashers are not thread-safe. -// Hashers have reference semantics. -// Call MakeHasher to create a Hasher. -type Hasher struct { - memo map[types.Type]uint32 -} - -// MakeHasher returns a new Hasher instance. -func MakeHasher() Hasher { - return Hasher{make(map[types.Type]uint32)} -} - -// Hash computes a hash value for the given type t such that -// Identical(t, t') => Hash(t) == Hash(t'). -func (h Hasher) Hash(t types.Type) uint32 { - hash, ok := h.memo[t] - if !ok { - hash = h.hashFor(t) - h.memo[t] = hash - } - return hash -} - -// hashString computes the Fowler–Noll–Vo hash of s. -func hashString(s string) uint32 { - var h uint32 - for i := 0; i < len(s); i++ { - h ^= uint32(s[i]) - h *= 16777619 - } - return h -} - -// hashFor computes the hash of t. -func (h Hasher) hashFor(t types.Type) uint32 { - // See Identical for rationale. - switch t := t.(type) { - case *types.Basic: - return uint32(t.Kind()) - - case *types.Array: - return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem()) - - case *types.Slice: - return 9049 + 2*h.Hash(t.Elem()) - - case *types.Struct: - var hash uint32 = 9059 - for i, n := 0, t.NumFields(); i < n; i++ { - f := t.Field(i) - if f.Anonymous() { - hash += 8861 - } - hash += hashString(t.Tag(i)) - hash += hashString(f.Name()) // (ignore f.Pkg) - hash += h.Hash(f.Type()) - } - return hash - - case *types.Pointer: - return 9067 + 2*h.Hash(t.Elem()) - - case *types.Signature: - var hash uint32 = 9091 - if t.Variadic() { - hash *= 8863 - } - return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) - - case *types.Interface: - var hash uint32 = 9103 - for i, n := 0, t.NumMethods(); i < n; i++ { - // See go/types.identicalMethods for rationale. - // Method order is not significant. - // Ignore m.Pkg(). - m := t.Method(i) - hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type()) - } - return hash - - case *types.Map: - return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem()) - - case *types.Chan: - return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem()) - - case *types.Named: - // Not safe with a copying GC; objects may move. - return uint32(reflect.ValueOf(t.Obj()).Pointer()) - - case *types.Tuple: - return h.hashTuple(t) - } - panic(t) -} - -func (h Hasher) hashTuple(tuple *types.Tuple) uint32 { - // See go/types.identicalTypes for rationale. - n := tuple.Len() - var hash uint32 = 9137 + 2*uint32(n) - for i := 0; i < n; i++ { - hash += 3 * h.Hash(tuple.At(i).Type()) - } - return hash -} diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/methodsetcache.go b/vendor/honnef.co/go/tools/go/types/typeutil/methodsetcache.go deleted file mode 100644 index 32084610f49..00000000000 --- a/vendor/honnef.co/go/tools/go/types/typeutil/methodsetcache.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file implements a cache of method sets. - -package typeutil - -import ( - "go/types" - "sync" -) - -// A MethodSetCache records the method set of each type T for which -// MethodSet(T) is called so that repeat queries are fast. -// The zero value is a ready-to-use cache instance. -type MethodSetCache struct { - mu sync.Mutex - named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N - others map[types.Type]*types.MethodSet // all other types -} - -// MethodSet returns the method set of type T. It is thread-safe. -// -// If cache is nil, this function is equivalent to types.NewMethodSet(T). -// Utility functions can thus expose an optional *MethodSetCache -// parameter to clients that care about performance. -// -func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet { - if cache == nil { - return types.NewMethodSet(T) - } - cache.mu.Lock() - defer cache.mu.Unlock() - - switch T := T.(type) { - case *types.Named: - return cache.lookupNamed(T).value - - case *types.Pointer: - if N, ok := T.Elem().(*types.Named); ok { - return cache.lookupNamed(N).pointer - } - } - - // all other types - // (The map uses pointer equivalence, not type identity.) - mset := cache.others[T] - if mset == nil { - mset = types.NewMethodSet(T) - if cache.others == nil { - cache.others = make(map[types.Type]*types.MethodSet) - } - cache.others[T] = mset - } - return mset -} - -func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } { - if cache.named == nil { - cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet }) - } - // Avoid recomputing mset(*T) for each distinct Pointer - // instance whose underlying type is a named type. - msets, ok := cache.named[named] - if !ok { - msets.value = types.NewMethodSet(named) - msets.pointer = types.NewMethodSet(types.NewPointer(named)) - cache.named[named] = msets - } - return msets -} diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/ui.go b/vendor/honnef.co/go/tools/go/types/typeutil/ui.go deleted file mode 100644 index 9849c24cef3..00000000000 --- a/vendor/honnef.co/go/tools/go/types/typeutil/ui.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeutil - -// This file defines utilities for user interfaces that display types. - -import "go/types" - -// IntuitiveMethodSet returns the intuitive method set of a type T, -// which is the set of methods you can call on an addressable value of -// that type. -// -// The result always contains MethodSet(T), and is exactly MethodSet(T) -// for interface types and for pointer-to-concrete types. -// For all other concrete types T, the result additionally -// contains each method belonging to *T if there is no identically -// named method on T itself. -// -// This corresponds to user intuition about method sets; -// this function is intended only for user interfaces. -// -// The order of the result is as for types.MethodSet(T). -// -func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection { - isPointerToConcrete := func(T types.Type) bool { - ptr, ok := T.(*types.Pointer) - return ok && !types.IsInterface(ptr.Elem()) - } - - var result []*types.Selection - mset := msets.MethodSet(T) - if types.IsInterface(T) || isPointerToConcrete(T) { - for i, n := 0, mset.Len(); i < n; i++ { - result = append(result, mset.At(i)) - } - } else { - // T is some other concrete type. - // Report methods of T and *T, preferring those of T. - pmset := msets.MethodSet(types.NewPointer(T)) - for i, n := 0, pmset.Len(); i < n; i++ { - meth := pmset.At(i) - if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil { - meth = m - } - result = append(result, meth) - } - - } - return result -} diff --git a/vendor/honnef.co/go/tools/internal/cache/cache.go b/vendor/honnef.co/go/tools/internal/cache/cache.go deleted file mode 100644 index 6b41811cf25..00000000000 --- a/vendor/honnef.co/go/tools/internal/cache/cache.go +++ /dev/null @@ -1,496 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cache implements a build artifact cache. -// -// This package is a slightly modified fork of Go's -// cmd/go/internal/cache package. -package cache - -import ( - "bytes" - "crypto/sha256" - "encoding/hex" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - "honnef.co/go/tools/internal/renameio" -) - -// An ActionID is a cache action key, the hash of a complete description of a -// repeatable computation (command line, environment variables, -// input file contents, executable contents). -type ActionID [HashSize]byte - -// An OutputID is a cache output key, the hash of an output of a computation. -type OutputID [HashSize]byte - -// A Cache is a package cache, backed by a file system directory tree. -type Cache struct { - dir string - now func() time.Time -} - -// Open opens and returns the cache in the given directory. -// -// It is safe for multiple processes on a single machine to use the -// same cache directory in a local file system simultaneously. -// They will coordinate using operating system file locks and may -// duplicate effort but will not corrupt the cache. -// -// However, it is NOT safe for multiple processes on different machines -// to share a cache directory (for example, if the directory were stored -// in a network file system). File locking is notoriously unreliable in -// network file systems and may not suffice to protect the cache. -// -func Open(dir string) (*Cache, error) { - info, err := os.Stat(dir) - if err != nil { - return nil, err - } - if !info.IsDir() { - return nil, &os.PathError{Op: "open", Path: dir, Err: fmt.Errorf("not a directory")} - } - for i := 0; i < 256; i++ { - name := filepath.Join(dir, fmt.Sprintf("%02x", i)) - if err := os.MkdirAll(name, 0777); err != nil { - return nil, err - } - } - c := &Cache{ - dir: dir, - now: time.Now, - } - return c, nil -} - -// fileName returns the name of the file corresponding to the given id. -func (c *Cache) fileName(id [HashSize]byte, key string) string { - return filepath.Join(c.dir, fmt.Sprintf("%02x", id[0]), fmt.Sprintf("%x", id)+"-"+key) -} - -var errMissing = errors.New("cache entry not found") - -const ( - // action entry file is "v1 \n" - hexSize = HashSize * 2 - entrySize = 2 + 1 + hexSize + 1 + hexSize + 1 + 20 + 1 + 20 + 1 -) - -// verify controls whether to run the cache in verify mode. -// In verify mode, the cache always returns errMissing from Get -// but then double-checks in Put that the data being written -// exactly matches any existing entry. This provides an easy -// way to detect program behavior that would have been different -// had the cache entry been returned from Get. -// -// verify is enabled by setting the environment variable -// GODEBUG=gocacheverify=1. -var verify = false - -// DebugTest is set when GODEBUG=gocachetest=1 is in the environment. -var DebugTest = false - -func init() { initEnv() } - -func initEnv() { - verify = false - debugHash = false - debug := strings.Split(os.Getenv("GODEBUG"), ",") - for _, f := range debug { - if f == "gocacheverify=1" { - verify = true - } - if f == "gocachehash=1" { - debugHash = true - } - if f == "gocachetest=1" { - DebugTest = true - } - } -} - -// Get looks up the action ID in the cache, -// returning the corresponding output ID and file size, if any. -// Note that finding an output ID does not guarantee that the -// saved file for that output ID is still available. -func (c *Cache) Get(id ActionID) (Entry, error) { - if verify { - return Entry{}, errMissing - } - return c.get(id) -} - -type Entry struct { - OutputID OutputID - Size int64 - Time time.Time -} - -// get is Get but does not respect verify mode, so that Put can use it. -func (c *Cache) get(id ActionID) (Entry, error) { - missing := func() (Entry, error) { - return Entry{}, errMissing - } - f, err := os.Open(c.fileName(id, "a")) - if err != nil { - return missing() - } - defer f.Close() - entry := make([]byte, entrySize+1) // +1 to detect whether f is too long - if n, err := io.ReadFull(f, entry); n != entrySize || err != io.ErrUnexpectedEOF { - return missing() - } - if entry[0] != 'v' || entry[1] != '1' || entry[2] != ' ' || entry[3+hexSize] != ' ' || entry[3+hexSize+1+hexSize] != ' ' || entry[3+hexSize+1+hexSize+1+20] != ' ' || entry[entrySize-1] != '\n' { - return missing() - } - eid, entry := entry[3:3+hexSize], entry[3+hexSize:] - eout, entry := entry[1:1+hexSize], entry[1+hexSize:] - esize, entry := entry[1:1+20], entry[1+20:] - //lint:ignore SA4006 See https://github.com/dominikh/go-tools/issues/465 - etime, entry := entry[1:1+20], entry[1+20:] - var buf [HashSize]byte - if _, err := hex.Decode(buf[:], eid); err != nil || buf != id { - return missing() - } - if _, err := hex.Decode(buf[:], eout); err != nil { - return missing() - } - i := 0 - for i < len(esize) && esize[i] == ' ' { - i++ - } - size, err := strconv.ParseInt(string(esize[i:]), 10, 64) - if err != nil || size < 0 { - return missing() - } - i = 0 - for i < len(etime) && etime[i] == ' ' { - i++ - } - tm, err := strconv.ParseInt(string(etime[i:]), 10, 64) - if err != nil || tm < 0 { - return missing() - } - - c.used(c.fileName(id, "a")) - - return Entry{buf, size, time.Unix(0, tm)}, nil -} - -// GetFile looks up the action ID in the cache and returns -// the name of the corresponding data file. -func (c *Cache) GetFile(id ActionID) (file string, entry Entry, err error) { - entry, err = c.Get(id) - if err != nil { - return "", Entry{}, err - } - file = c.OutputFile(entry.OutputID) - info, err := os.Stat(file) - if err != nil || info.Size() != entry.Size { - return "", Entry{}, errMissing - } - return file, entry, nil -} - -// GetBytes looks up the action ID in the cache and returns -// the corresponding output bytes. -// GetBytes should only be used for data that can be expected to fit in memory. -func (c *Cache) GetBytes(id ActionID) ([]byte, Entry, error) { - entry, err := c.Get(id) - if err != nil { - return nil, entry, err - } - data, _ := ioutil.ReadFile(c.OutputFile(entry.OutputID)) - if sha256.Sum256(data) != entry.OutputID { - return nil, entry, errMissing - } - return data, entry, nil -} - -// OutputFile returns the name of the cache file storing output with the given OutputID. -func (c *Cache) OutputFile(out OutputID) string { - file := c.fileName(out, "d") - c.used(file) - return file -} - -// Time constants for cache expiration. -// -// We set the mtime on a cache file on each use, but at most one per mtimeInterval (1 hour), -// to avoid causing many unnecessary inode updates. The mtimes therefore -// roughly reflect "time of last use" but may in fact be older by at most an hour. -// -// We scan the cache for entries to delete at most once per trimInterval (1 day). -// -// When we do scan the cache, we delete entries that have not been used for -// at least trimLimit (5 days). Statistics gathered from a month of usage by -// Go developers found that essentially all reuse of cached entries happened -// within 5 days of the previous reuse. See golang.org/issue/22990. -const ( - mtimeInterval = 1 * time.Hour - trimInterval = 24 * time.Hour - trimLimit = 5 * 24 * time.Hour -) - -// used makes a best-effort attempt to update mtime on file, -// so that mtime reflects cache access time. -// -// Because the reflection only needs to be approximate, -// and to reduce the amount of disk activity caused by using -// cache entries, used only updates the mtime if the current -// mtime is more than an hour old. This heuristic eliminates -// nearly all of the mtime updates that would otherwise happen, -// while still keeping the mtimes useful for cache trimming. -func (c *Cache) used(file string) { - info, err := os.Stat(file) - if err == nil && c.now().Sub(info.ModTime()) < mtimeInterval { - return - } - os.Chtimes(file, c.now(), c.now()) -} - -// Trim removes old cache entries that are likely not to be reused. -func (c *Cache) Trim() { - now := c.now() - - // We maintain in dir/trim.txt the time of the last completed cache trim. - // If the cache has been trimmed recently enough, do nothing. - // This is the common case. - data, _ := renameio.ReadFile(filepath.Join(c.dir, "trim.txt")) - t, err := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) - if err == nil && now.Sub(time.Unix(t, 0)) < trimInterval { - return - } - - // Trim each of the 256 subdirectories. - // We subtract an additional mtimeInterval - // to account for the imprecision of our "last used" mtimes. - cutoff := now.Add(-trimLimit - mtimeInterval) - for i := 0; i < 256; i++ { - subdir := filepath.Join(c.dir, fmt.Sprintf("%02x", i)) - c.trimSubdir(subdir, cutoff) - } - - // Ignore errors from here: if we don't write the complete timestamp, the - // cache will appear older than it is, and we'll trim it again next time. - renameio.WriteFile(filepath.Join(c.dir, "trim.txt"), []byte(fmt.Sprintf("%d", now.Unix())), 0666) -} - -// trimSubdir trims a single cache subdirectory. -func (c *Cache) trimSubdir(subdir string, cutoff time.Time) { - // Read all directory entries from subdir before removing - // any files, in case removing files invalidates the file offset - // in the directory scan. Also, ignore error from f.Readdirnames, - // because we don't care about reporting the error and we still - // want to process any entries found before the error. - f, err := os.Open(subdir) - if err != nil { - return - } - names, _ := f.Readdirnames(-1) - f.Close() - - for _, name := range names { - // Remove only cache entries (xxxx-a and xxxx-d). - if !strings.HasSuffix(name, "-a") && !strings.HasSuffix(name, "-d") { - continue - } - entry := filepath.Join(subdir, name) - info, err := os.Stat(entry) - if err == nil && info.ModTime().Before(cutoff) { - os.Remove(entry) - } - } -} - -// putIndexEntry adds an entry to the cache recording that executing the action -// with the given id produces an output with the given output id (hash) and size. -func (c *Cache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify bool) error { - // Note: We expect that for one reason or another it may happen - // that repeating an action produces a different output hash - // (for example, if the output contains a time stamp or temp dir name). - // While not ideal, this is also not a correctness problem, so we - // don't make a big deal about it. In particular, we leave the action - // cache entries writable specifically so that they can be overwritten. - // - // Setting GODEBUG=gocacheverify=1 does make a big deal: - // in verify mode we are double-checking that the cache entries - // are entirely reproducible. As just noted, this may be unrealistic - // in some cases but the check is also useful for shaking out real bugs. - entry := fmt.Sprintf("v1 %x %x %20d %20d\n", id, out, size, time.Now().UnixNano()) - - if verify && allowVerify { - old, err := c.get(id) - if err == nil && (old.OutputID != out || old.Size != size) { - // panic to show stack trace, so we can see what code is generating this cache entry. - msg := fmt.Sprintf("go: internal cache error: cache verify failed: id=%x changed:<<<\n%s\n>>>\nold: %x %d\nnew: %x %d", id, reverseHash(id), out, size, old.OutputID, old.Size) - panic(msg) - } - } - file := c.fileName(id, "a") - - // Copy file to cache directory. - mode := os.O_WRONLY | os.O_CREATE - f, err := os.OpenFile(file, mode, 0666) - if err != nil { - return err - } - _, err = f.WriteString(entry) - if err == nil { - // Truncate the file only *after* writing it. - // (This should be a no-op, but truncate just in case of previous corruption.) - // - // This differs from ioutil.WriteFile, which truncates to 0 *before* writing - // via os.O_TRUNC. Truncating only after writing ensures that a second write - // of the same content to the same file is idempotent, and does not — even - // temporarily! — undo the effect of the first write. - err = f.Truncate(int64(len(entry))) - } - if closeErr := f.Close(); err == nil { - err = closeErr - } - if err != nil { - // TODO(bcmills): This Remove potentially races with another go command writing to file. - // Can we eliminate it? - os.Remove(file) - return err - } - os.Chtimes(file, c.now(), c.now()) // mainly for tests - - return nil -} - -// Put stores the given output in the cache as the output for the action ID. -// It may read file twice. The content of file must not change between the two passes. -func (c *Cache) Put(id ActionID, file io.ReadSeeker) (OutputID, int64, error) { - return c.put(id, file, true) -} - -// PutNoVerify is like Put but disables the verify check -// when GODEBUG=goverifycache=1 is set. -// It is meant for data that is OK to cache but that we expect to vary slightly from run to run, -// like test output containing times and the like. -func (c *Cache) PutNoVerify(id ActionID, file io.ReadSeeker) (OutputID, int64, error) { - return c.put(id, file, false) -} - -func (c *Cache) put(id ActionID, file io.ReadSeeker, allowVerify bool) (OutputID, int64, error) { - // Compute output ID. - h := sha256.New() - if _, err := file.Seek(0, 0); err != nil { - return OutputID{}, 0, err - } - size, err := io.Copy(h, file) - if err != nil { - return OutputID{}, 0, err - } - var out OutputID - h.Sum(out[:0]) - - // Copy to cached output file (if not already present). - if err := c.copyFile(file, out, size); err != nil { - return out, size, err - } - - // Add to cache index. - return out, size, c.putIndexEntry(id, out, size, allowVerify) -} - -// PutBytes stores the given bytes in the cache as the output for the action ID. -func (c *Cache) PutBytes(id ActionID, data []byte) error { - _, _, err := c.Put(id, bytes.NewReader(data)) - return err -} - -// copyFile copies file into the cache, expecting it to have the given -// output ID and size, if that file is not present already. -func (c *Cache) copyFile(file io.ReadSeeker, out OutputID, size int64) error { - name := c.fileName(out, "d") - info, err := os.Stat(name) - if err == nil && info.Size() == size { - // Check hash. - if f, err := os.Open(name); err == nil { - h := sha256.New() - io.Copy(h, f) - f.Close() - var out2 OutputID - h.Sum(out2[:0]) - if out == out2 { - return nil - } - } - // Hash did not match. Fall through and rewrite file. - } - - // Copy file to cache directory. - mode := os.O_RDWR | os.O_CREATE - if err == nil && info.Size() > size { // shouldn't happen but fix in case - mode |= os.O_TRUNC - } - f, err := os.OpenFile(name, mode, 0666) - if err != nil { - return err - } - defer f.Close() - if size == 0 { - // File now exists with correct size. - // Only one possible zero-length file, so contents are OK too. - // Early return here makes sure there's a "last byte" for code below. - return nil - } - - // From here on, if any of the I/O writing the file fails, - // we make a best-effort attempt to truncate the file f - // before returning, to avoid leaving bad bytes in the file. - - // Copy file to f, but also into h to double-check hash. - if _, err := file.Seek(0, 0); err != nil { - f.Truncate(0) - return err - } - h := sha256.New() - w := io.MultiWriter(f, h) - if _, err := io.CopyN(w, file, size-1); err != nil { - f.Truncate(0) - return err - } - // Check last byte before writing it; writing it will make the size match - // what other processes expect to find and might cause them to start - // using the file. - buf := make([]byte, 1) - if _, err := file.Read(buf); err != nil { - f.Truncate(0) - return err - } - h.Write(buf) - sum := h.Sum(nil) - if !bytes.Equal(sum, out[:]) { - f.Truncate(0) - return fmt.Errorf("file content changed underfoot") - } - - // Commit cache file entry. - if _, err := f.Write(buf); err != nil { - f.Truncate(0) - return err - } - if err := f.Close(); err != nil { - // Data might not have been written, - // but file may look like it is the right size. - // To be extra careful, remove cached file. - os.Remove(name) - return err - } - os.Chtimes(name, c.now(), c.now()) // mainly for tests - - return nil -} diff --git a/vendor/honnef.co/go/tools/internal/cache/default.go b/vendor/honnef.co/go/tools/internal/cache/default.go deleted file mode 100644 index 3034f76a538..00000000000 --- a/vendor/honnef.co/go/tools/internal/cache/default.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cache - -import ( - "fmt" - "io/ioutil" - "log" - "os" - "path/filepath" - "sync" -) - -// Default returns the default cache to use. -func Default() (*Cache, error) { - defaultOnce.Do(initDefaultCache) - return defaultCache, defaultDirErr -} - -var ( - defaultOnce sync.Once - defaultCache *Cache -) - -// cacheREADME is a message stored in a README in the cache directory. -// Because the cache lives outside the normal Go trees, we leave the -// README as a courtesy to explain where it came from. -const cacheREADME = `This directory holds cached build artifacts from staticcheck. -` - -// initDefaultCache does the work of finding the default cache -// the first time Default is called. -func initDefaultCache() { - dir := DefaultDir() - if err := os.MkdirAll(dir, 0777); err != nil { - log.Fatalf("failed to initialize build cache at %s: %s\n", dir, err) - } - if _, err := os.Stat(filepath.Join(dir, "README")); err != nil { - // Best effort. - ioutil.WriteFile(filepath.Join(dir, "README"), []byte(cacheREADME), 0666) - } - - c, err := Open(dir) - if err != nil { - log.Fatalf("failed to initialize build cache at %s: %s\n", dir, err) - } - defaultCache = c -} - -var ( - defaultDirOnce sync.Once - defaultDir string - defaultDirErr error -) - -// DefaultDir returns the effective STATICCHECK_CACHE setting. -func DefaultDir() string { - // Save the result of the first call to DefaultDir for later use in - // initDefaultCache. cmd/go/main.go explicitly sets GOCACHE so that - // subprocesses will inherit it, but that means initDefaultCache can't - // otherwise distinguish between an explicit "off" and a UserCacheDir error. - - defaultDirOnce.Do(func() { - defaultDir = os.Getenv("STATICCHECK_CACHE") - if filepath.IsAbs(defaultDir) { - return - } - if defaultDir != "" { - defaultDirErr = fmt.Errorf("STATICCHECK_CACHE is not an absolute path") - return - } - - // Compute default location. - dir, err := os.UserCacheDir() - if err != nil { - defaultDirErr = fmt.Errorf("STATICCHECK_CACHE is not defined and %v", err) - return - } - defaultDir = filepath.Join(dir, "staticcheck") - }) - - return defaultDir -} diff --git a/vendor/honnef.co/go/tools/internal/cache/hash.go b/vendor/honnef.co/go/tools/internal/cache/hash.go deleted file mode 100644 index a53543ec501..00000000000 --- a/vendor/honnef.co/go/tools/internal/cache/hash.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cache - -import ( - "bytes" - "crypto/sha256" - "fmt" - "hash" - "io" - "os" - "sync" -) - -var debugHash = false // set when GODEBUG=gocachehash=1 - -// HashSize is the number of bytes in a hash. -const HashSize = 32 - -// A Hash provides access to the canonical hash function used to index the cache. -// The current implementation uses salted SHA256, but clients must not assume this. -type Hash struct { - h hash.Hash - name string // for debugging - buf *bytes.Buffer // for verify -} - -// hashSalt is a salt string added to the beginning of every hash -// created by NewHash. Using the Staticcheck version makes sure that different -// versions of the command do not address the same cache -// entries, so that a bug in one version does not affect the execution -// of other versions. This salt will result in additional ActionID files -// in the cache, but not additional copies of the large output files, -// which are still addressed by unsalted SHA256. -var hashSalt []byte - -func SetSalt(b []byte) { - hashSalt = b -} - -// Subkey returns an action ID corresponding to mixing a parent -// action ID with a string description of the subkey. -func Subkey(parent ActionID, desc string) ActionID { - h := sha256.New() - h.Write([]byte("subkey:")) - h.Write(parent[:]) - h.Write([]byte(desc)) - var out ActionID - h.Sum(out[:0]) - if debugHash { - fmt.Fprintf(os.Stderr, "HASH subkey %x %q = %x\n", parent, desc, out) - } - if verify { - hashDebug.Lock() - hashDebug.m[out] = fmt.Sprintf("subkey %x %q", parent, desc) - hashDebug.Unlock() - } - return out -} - -// NewHash returns a new Hash. -// The caller is expected to Write data to it and then call Sum. -func NewHash(name string) *Hash { - h := &Hash{h: sha256.New(), name: name} - if debugHash { - fmt.Fprintf(os.Stderr, "HASH[%s]\n", h.name) - } - h.Write(hashSalt) - if verify { - h.buf = new(bytes.Buffer) - } - return h -} - -// Write writes data to the running hash. -func (h *Hash) Write(b []byte) (int, error) { - if debugHash { - fmt.Fprintf(os.Stderr, "HASH[%s]: %q\n", h.name, b) - } - if h.buf != nil { - h.buf.Write(b) - } - return h.h.Write(b) -} - -// Sum returns the hash of the data written previously. -func (h *Hash) Sum() [HashSize]byte { - var out [HashSize]byte - h.h.Sum(out[:0]) - if debugHash { - fmt.Fprintf(os.Stderr, "HASH[%s]: %x\n", h.name, out) - } - if h.buf != nil { - hashDebug.Lock() - if hashDebug.m == nil { - hashDebug.m = make(map[[HashSize]byte]string) - } - hashDebug.m[out] = h.buf.String() - hashDebug.Unlock() - } - return out -} - -// In GODEBUG=gocacheverify=1 mode, -// hashDebug holds the input to every computed hash ID, -// so that we can work backward from the ID involved in a -// cache entry mismatch to a description of what should be there. -var hashDebug struct { - sync.Mutex - m map[[HashSize]byte]string -} - -// reverseHash returns the input used to compute the hash id. -func reverseHash(id [HashSize]byte) string { - hashDebug.Lock() - s := hashDebug.m[id] - hashDebug.Unlock() - return s -} - -var hashFileCache struct { - sync.Mutex - m map[string][HashSize]byte -} - -// FileHash returns the hash of the named file. -// It caches repeated lookups for a given file, -// and the cache entry for a file can be initialized -// using SetFileHash. -// The hash used by FileHash is not the same as -// the hash used by NewHash. -func FileHash(file string) ([HashSize]byte, error) { - hashFileCache.Lock() - out, ok := hashFileCache.m[file] - hashFileCache.Unlock() - - if ok { - return out, nil - } - - h := sha256.New() - f, err := os.Open(file) - if err != nil { - if debugHash { - fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err) - } - return [HashSize]byte{}, err - } - _, err = io.Copy(h, f) - f.Close() - if err != nil { - if debugHash { - fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err) - } - return [HashSize]byte{}, err - } - h.Sum(out[:0]) - if debugHash { - fmt.Fprintf(os.Stderr, "HASH %s: %x\n", file, out) - } - - SetFileHash(file, out) - return out, nil -} - -// SetFileHash sets the hash returned by FileHash for file. -func SetFileHash(file string, sum [HashSize]byte) { - hashFileCache.Lock() - if hashFileCache.m == nil { - hashFileCache.m = make(map[string][HashSize]byte) - } - hashFileCache.m[file] = sum - hashFileCache.Unlock() -} diff --git a/vendor/honnef.co/go/tools/internal/passes/buildir/buildir.go b/vendor/honnef.co/go/tools/internal/passes/buildir/buildir.go deleted file mode 100644 index 39469770248..00000000000 --- a/vendor/honnef.co/go/tools/internal/passes/buildir/buildir.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package buildir defines an Analyzer that constructs the IR -// of an error-free package and returns the set of all -// functions within it. It does not report any diagnostics itself but -// may be used as an input to other analyzers. -// -// THIS INTERFACE IS EXPERIMENTAL AND MAY BE SUBJECT TO INCOMPATIBLE CHANGE. -package buildir - -import ( - "go/ast" - "go/types" - "reflect" - - "golang.org/x/tools/go/analysis" - "honnef.co/go/tools/ir" -) - -type willExit struct{} -type willUnwind struct{} - -func (*willExit) AFact() {} -func (*willUnwind) AFact() {} - -var Analyzer = &analysis.Analyzer{ - Name: "buildir", - Doc: "build IR for later passes", - Run: run, - ResultType: reflect.TypeOf(new(IR)), - FactTypes: []analysis.Fact{new(willExit), new(willUnwind)}, -} - -// IR provides intermediate representation for all the -// non-blank source functions in the current package. -type IR struct { - Pkg *ir.Package - SrcFuncs []*ir.Function -} - -func run(pass *analysis.Pass) (interface{}, error) { - // Plundered from ssautil.BuildPackage. - - // We must create a new Program for each Package because the - // analysis API provides no place to hang a Program shared by - // all Packages. Consequently, IR Packages and Functions do not - // have a canonical representation across an analysis session of - // multiple packages. This is unlikely to be a problem in - // practice because the analysis API essentially forces all - // packages to be analysed independently, so any given call to - // Analysis.Run on a package will see only IR objects belonging - // to a single Program. - - mode := ir.GlobalDebug - - prog := ir.NewProgram(pass.Fset, mode) - - // Create IR packages for all imports. - // Order is not significant. - created := make(map[*types.Package]bool) - var createAll func(pkgs []*types.Package) - createAll = func(pkgs []*types.Package) { - for _, p := range pkgs { - if !created[p] { - created[p] = true - irpkg := prog.CreatePackage(p, nil, nil, true) - for _, fn := range irpkg.Functions { - if ast.IsExported(fn.Name()) { - var exit willExit - var unwind willUnwind - if pass.ImportObjectFact(fn.Object(), &exit) { - fn.WillExit = true - } - if pass.ImportObjectFact(fn.Object(), &unwind) { - fn.WillUnwind = true - } - } - } - createAll(p.Imports()) - } - } - } - createAll(pass.Pkg.Imports()) - - // Create and build the primary package. - irpkg := prog.CreatePackage(pass.Pkg, pass.Files, pass.TypesInfo, false) - irpkg.Build() - - // Compute list of source functions, including literals, - // in source order. - var addAnons func(f *ir.Function) - funcs := make([]*ir.Function, len(irpkg.Functions)) - copy(funcs, irpkg.Functions) - addAnons = func(f *ir.Function) { - for _, anon := range f.AnonFuncs { - funcs = append(funcs, anon) - addAnons(anon) - } - } - for _, fn := range irpkg.Functions { - addAnons(fn) - if fn.WillExit { - pass.ExportObjectFact(fn.Object(), new(willExit)) - } - if fn.WillUnwind { - pass.ExportObjectFact(fn.Object(), new(willUnwind)) - } - } - - return &IR{Pkg: irpkg, SrcFuncs: funcs}, nil -} diff --git a/vendor/honnef.co/go/tools/internal/renameio/renameio.go b/vendor/honnef.co/go/tools/internal/renameio/renameio.go deleted file mode 100644 index a279d1a1eba..00000000000 --- a/vendor/honnef.co/go/tools/internal/renameio/renameio.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package renameio writes files atomically by renaming temporary files. -package renameio - -import ( - "bytes" - "io" - "math/rand" - "os" - "path/filepath" - "strconv" - - "honnef.co/go/tools/internal/robustio" -) - -const patternSuffix = ".tmp" - -// Pattern returns a glob pattern that matches the unrenamed temporary files -// created when writing to filename. -func Pattern(filename string) string { - return filepath.Join(filepath.Dir(filename), filepath.Base(filename)+patternSuffix) -} - -// WriteFile is like ioutil.WriteFile, but first writes data to an arbitrary -// file in the same directory as filename, then renames it atomically to the -// final name. -// -// That ensures that the final location, if it exists, is always a complete file. -func WriteFile(filename string, data []byte, perm os.FileMode) (err error) { - return WriteToFile(filename, bytes.NewReader(data), perm) -} - -// WriteToFile is a variant of WriteFile that accepts the data as an io.Reader -// instead of a slice. -func WriteToFile(filename string, data io.Reader, perm os.FileMode) (err error) { - f, err := tempFile(filepath.Dir(filename), filepath.Base(filename), perm) - if err != nil { - return err - } - defer func() { - // Only call os.Remove on f.Name() if we failed to rename it: otherwise, - // some other process may have created a new file with the same name after - // that. - if err != nil { - f.Close() - os.Remove(f.Name()) - } - }() - - if _, err := io.Copy(f, data); err != nil { - return err - } - // Sync the file before renaming it: otherwise, after a crash the reader may - // observe a 0-length file instead of the actual contents. - // See https://golang.org/issue/22397#issuecomment-380831736. - if err := f.Sync(); err != nil { - return err - } - if err := f.Close(); err != nil { - return err - } - - return robustio.Rename(f.Name(), filename) -} - -// tempFile creates a new temporary file with given permission bits. -func tempFile(dir, prefix string, perm os.FileMode) (f *os.File, err error) { - for i := 0; i < 10000; i++ { - name := filepath.Join(dir, prefix+strconv.Itoa(rand.Intn(1000000000))+patternSuffix) - f, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, perm) - if os.IsExist(err) { - continue - } - break - } - return -} - -// ReadFile is like ioutil.ReadFile, but on Windows retries spurious errors that -// may occur if the file is concurrently replaced. -// -// Errors are classified heuristically and retries are bounded, so even this -// function may occasionally return a spurious error on Windows. -// If so, the error will likely wrap one of: -// - syscall.ERROR_ACCESS_DENIED -// - syscall.ERROR_FILE_NOT_FOUND -// - internal/syscall/windows.ERROR_SHARING_VIOLATION -func ReadFile(filename string) ([]byte, error) { - return robustio.ReadFile(filename) -} diff --git a/vendor/honnef.co/go/tools/internal/robustio/robustio.go b/vendor/honnef.co/go/tools/internal/robustio/robustio.go deleted file mode 100644 index 76e47ad1ffa..00000000000 --- a/vendor/honnef.co/go/tools/internal/robustio/robustio.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package robustio wraps I/O functions that are prone to failure on Windows, -// transparently retrying errors up to an arbitrary timeout. -// -// Errors are classified heuristically and retries are bounded, so the functions -// in this package do not completely eliminate spurious errors. However, they do -// significantly reduce the rate of failure in practice. -// -// If so, the error will likely wrap one of: -// The functions in this package do not completely eliminate spurious errors, -// but substantially reduce their rate of occurrence in practice. -package robustio - -// Rename is like os.Rename, but on Windows retries errors that may occur if the -// file is concurrently read or overwritten. -// -// (See golang.org/issue/31247 and golang.org/issue/32188.) -func Rename(oldpath, newpath string) error { - return rename(oldpath, newpath) -} - -// ReadFile is like ioutil.ReadFile, but on Windows retries errors that may -// occur if the file is concurrently replaced. -// -// (See golang.org/issue/31247 and golang.org/issue/32188.) -func ReadFile(filename string) ([]byte, error) { - return readFile(filename) -} - -// RemoveAll is like os.RemoveAll, but on Windows retries errors that may occur -// if an executable file in the directory has recently been executed. -// -// (See golang.org/issue/19491.) -func RemoveAll(path string) error { - return removeAll(path) -} - -// IsEphemeralError reports whether err is one of the errors that the functions -// in this package attempt to mitigate. -// -// Errors considered ephemeral include: -// - syscall.ERROR_ACCESS_DENIED -// - syscall.ERROR_FILE_NOT_FOUND -// - internal/syscall/windows.ERROR_SHARING_VIOLATION -// -// This set may be expanded in the future; programs must not rely on the -// non-ephemerality of any given error. -func IsEphemeralError(err error) bool { - return isEphemeralError(err) -} diff --git a/vendor/honnef.co/go/tools/internal/robustio/robustio_darwin.go b/vendor/honnef.co/go/tools/internal/robustio/robustio_darwin.go deleted file mode 100644 index 1ac0d10d7f1..00000000000 --- a/vendor/honnef.co/go/tools/internal/robustio/robustio_darwin.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package robustio - -import ( - "os" - "syscall" -) - -const errFileNotFound = syscall.ENOENT - -// isEphemeralError returns true if err may be resolved by waiting. -func isEphemeralError(err error) bool { - switch werr := err.(type) { - case *os.PathError: - err = werr.Err - case *os.LinkError: - err = werr.Err - case *os.SyscallError: - err = werr.Err - - } - if errno, ok := err.(syscall.Errno); ok { - return errno == errFileNotFound - } - return false -} diff --git a/vendor/honnef.co/go/tools/internal/robustio/robustio_flaky.go b/vendor/honnef.co/go/tools/internal/robustio/robustio_flaky.go deleted file mode 100644 index e0bf5b9b3b9..00000000000 --- a/vendor/honnef.co/go/tools/internal/robustio/robustio_flaky.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows darwin - -package robustio - -import ( - "io/ioutil" - "math/rand" - "os" - "syscall" - "time" -) - -const arbitraryTimeout = 500 * time.Millisecond - -const ERROR_SHARING_VIOLATION = 32 - -// retry retries ephemeral errors from f up to an arbitrary timeout -// to work around filesystem flakiness on Windows and Darwin. -func retry(f func() (err error, mayRetry bool)) error { - var ( - bestErr error - lowestErrno syscall.Errno - start time.Time - nextSleep time.Duration = 1 * time.Millisecond - ) - for { - err, mayRetry := f() - if err == nil || !mayRetry { - return err - } - - if errno, ok := err.(syscall.Errno); ok && (lowestErrno == 0 || errno < lowestErrno) { - bestErr = err - lowestErrno = errno - } else if bestErr == nil { - bestErr = err - } - - if start.IsZero() { - start = time.Now() - } else if d := time.Since(start) + nextSleep; d >= arbitraryTimeout { - break - } - time.Sleep(nextSleep) - nextSleep += time.Duration(rand.Int63n(int64(nextSleep))) - } - - return bestErr -} - -// rename is like os.Rename, but retries ephemeral errors. -// -// On windows it wraps os.Rename, which (as of 2019-06-04) uses MoveFileEx with -// MOVEFILE_REPLACE_EXISTING. -// -// Windows also provides a different system call, ReplaceFile, -// that provides similar semantics, but perhaps preserves more metadata. (The -// documentation on the differences between the two is very sparse.) -// -// Empirical error rates with MoveFileEx are lower under modest concurrency, so -// for now we're sticking with what the os package already provides. -func rename(oldpath, newpath string) (err error) { - return retry(func() (err error, mayRetry bool) { - err = os.Rename(oldpath, newpath) - return err, isEphemeralError(err) - }) -} - -// readFile is like ioutil.ReadFile, but retries ephemeral errors. -func readFile(filename string) ([]byte, error) { - var b []byte - err := retry(func() (err error, mayRetry bool) { - b, err = ioutil.ReadFile(filename) - - // Unlike in rename, we do not retry errFileNotFound here: it can occur - // as a spurious error, but the file may also genuinely not exist, so the - // increase in robustness is probably not worth the extra latency. - - return err, isEphemeralError(err) && err != errFileNotFound - }) - return b, err -} - -func removeAll(path string) error { - return retry(func() (err error, mayRetry bool) { - err = os.RemoveAll(path) - return err, isEphemeralError(err) - }) -} diff --git a/vendor/honnef.co/go/tools/internal/robustio/robustio_other.go b/vendor/honnef.co/go/tools/internal/robustio/robustio_other.go deleted file mode 100644 index a2428856f2e..00000000000 --- a/vendor/honnef.co/go/tools/internal/robustio/robustio_other.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//+build !windows,!darwin - -package robustio - -import ( - "io/ioutil" - "os" -) - -func rename(oldpath, newpath string) error { - return os.Rename(oldpath, newpath) -} - -func readFile(filename string) ([]byte, error) { - return ioutil.ReadFile(filename) -} - -func removeAll(path string) error { - return os.RemoveAll(path) -} - -func isEphemeralError(err error) bool { - return false -} diff --git a/vendor/honnef.co/go/tools/internal/robustio/robustio_windows.go b/vendor/honnef.co/go/tools/internal/robustio/robustio_windows.go deleted file mode 100644 index a35237d44ae..00000000000 --- a/vendor/honnef.co/go/tools/internal/robustio/robustio_windows.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package robustio - -import ( - "os" - "syscall" -) - -const errFileNotFound = syscall.ERROR_FILE_NOT_FOUND - -// isEphemeralError returns true if err may be resolved by waiting. -func isEphemeralError(err error) bool { - switch werr := err.(type) { - case *os.PathError: - err = werr.Err - case *os.LinkError: - err = werr.Err - case *os.SyscallError: - err = werr.Err - } - if errno, ok := err.(syscall.Errno); ok { - switch errno { - case syscall.ERROR_ACCESS_DENIED, - syscall.ERROR_FILE_NOT_FOUND, - ERROR_SHARING_VIOLATION: - return true - } - } - return false -} diff --git a/vendor/honnef.co/go/tools/internal/sharedcheck/lint.go b/vendor/honnef.co/go/tools/internal/sharedcheck/lint.go deleted file mode 100644 index e9abf0d893e..00000000000 --- a/vendor/honnef.co/go/tools/internal/sharedcheck/lint.go +++ /dev/null @@ -1,71 +0,0 @@ -package sharedcheck - -import ( - "go/ast" - "go/types" - - "golang.org/x/tools/go/analysis" - "honnef.co/go/tools/code" - "honnef.co/go/tools/internal/passes/buildir" - "honnef.co/go/tools/ir" - . "honnef.co/go/tools/lint/lintdsl" -) - -func CheckRangeStringRunes(pass *analysis.Pass) (interface{}, error) { - for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs { - cb := func(node ast.Node) bool { - rng, ok := node.(*ast.RangeStmt) - if !ok || !code.IsBlank(rng.Key) { - return true - } - - v, _ := fn.ValueForExpr(rng.X) - - // Check that we're converting from string to []rune - val, _ := v.(*ir.Convert) - if val == nil { - return true - } - Tsrc, ok := val.X.Type().(*types.Basic) - if !ok || Tsrc.Kind() != types.String { - return true - } - Tdst, ok := val.Type().(*types.Slice) - if !ok { - return true - } - TdstElem, ok := Tdst.Elem().(*types.Basic) - if !ok || TdstElem.Kind() != types.Int32 { - return true - } - - // Check that the result of the conversion is only used to - // range over - refs := val.Referrers() - if refs == nil { - return true - } - - // Expect two refs: one for obtaining the length of the slice, - // one for accessing the elements - if len(code.FilterDebug(*refs)) != 2 { - // TODO(dh): right now, we check that only one place - // refers to our slice. This will miss cases such as - // ranging over the slice twice. Ideally, we'd ensure that - // the slice is only used for ranging over (without - // accessing the key), but that is harder to do because in - // IR form, ranging over a slice looks like an ordinary - // loop with index increments and slice accesses. We'd - // have to look at the associated AST node to check that - // it's a range statement. - return true - } - - pass.Reportf(rng.Pos(), "should range over string, not []rune(string)") - - return true - } - Inspect(fn.Source(), cb) - } - return nil, nil -} diff --git a/vendor/honnef.co/go/tools/ir/blockopt.go b/vendor/honnef.co/go/tools/ir/blockopt.go deleted file mode 100644 index d7a0e35676a..00000000000 --- a/vendor/honnef.co/go/tools/ir/blockopt.go +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ir - -// Simple block optimizations to simplify the control flow graph. - -// TODO(adonovan): opt: instead of creating several "unreachable" blocks -// per function in the Builder, reuse a single one (e.g. at Blocks[1]) -// to reduce garbage. - -import ( - "fmt" - "os" -) - -// If true, perform sanity checking and show progress at each -// successive iteration of optimizeBlocks. Very verbose. -const debugBlockOpt = false - -// markReachable sets Index=-1 for all blocks reachable from b. -func markReachable(b *BasicBlock) { - b.gaps = -1 - for _, succ := range b.Succs { - if succ.gaps == 0 { - markReachable(succ) - } - } -} - -// deleteUnreachableBlocks marks all reachable blocks of f and -// eliminates (nils) all others, including possibly cyclic subgraphs. -// -func deleteUnreachableBlocks(f *Function) { - const white, black = 0, -1 - // We borrow b.gaps temporarily as the mark bit. - for _, b := range f.Blocks { - b.gaps = white - } - markReachable(f.Blocks[0]) - // In SSI form, we need the exit to be reachable for correct - // post-dominance information. In original form, however, we - // cannot unconditionally mark it reachable because we won't - // be adding fake edges, and this breaks the calculation of - // dominance information. - markReachable(f.Exit) - for i, b := range f.Blocks { - if b.gaps == white { - for _, c := range b.Succs { - if c.gaps == black { - c.removePred(b) // delete white->black edge - } - } - if debugBlockOpt { - fmt.Fprintln(os.Stderr, "unreachable", b) - } - f.Blocks[i] = nil // delete b - } - } - f.removeNilBlocks() -} - -// jumpThreading attempts to apply simple jump-threading to block b, -// in which a->b->c become a->c if b is just a Jump. -// The result is true if the optimization was applied. -// -func jumpThreading(f *Function, b *BasicBlock) bool { - if b.Index == 0 { - return false // don't apply to entry block - } - if b.Instrs == nil { - return false - } - for _, pred := range b.Preds { - switch pred.Control().(type) { - case *ConstantSwitch: - // don't optimize away the head blocks of switch statements - return false - } - } - if _, ok := b.Instrs[0].(*Jump); !ok { - return false // not just a jump - } - c := b.Succs[0] - if c == b { - return false // don't apply to degenerate jump-to-self. - } - if c.hasPhi() { - return false // not sound without more effort - } - for j, a := range b.Preds { - a.replaceSucc(b, c) - - // If a now has two edges to c, replace its degenerate If by Jump. - if len(a.Succs) == 2 && a.Succs[0] == c && a.Succs[1] == c { - jump := new(Jump) - jump.setBlock(a) - a.Instrs[len(a.Instrs)-1] = jump - a.Succs = a.Succs[:1] - c.removePred(b) - } else { - if j == 0 { - c.replacePred(b, a) - } else { - c.Preds = append(c.Preds, a) - } - } - - if debugBlockOpt { - fmt.Fprintln(os.Stderr, "jumpThreading", a, b, c) - } - } - f.Blocks[b.Index] = nil // delete b - return true -} - -// fuseBlocks attempts to apply the block fusion optimization to block -// a, in which a->b becomes ab if len(a.Succs)==len(b.Preds)==1. -// The result is true if the optimization was applied. -// -func fuseBlocks(f *Function, a *BasicBlock) bool { - if len(a.Succs) != 1 { - return false - } - if a.Succs[0] == f.Exit { - return false - } - b := a.Succs[0] - if len(b.Preds) != 1 { - return false - } - if _, ok := a.Instrs[len(a.Instrs)-1].(*Panic); ok { - // panics aren't simple jumps, they have side effects. - return false - } - - // Degenerate &&/|| ops may result in a straight-line CFG - // containing φ-nodes. (Ideally we'd replace such them with - // their sole operand but that requires Referrers, built later.) - if b.hasPhi() { - return false // not sound without further effort - } - - // Eliminate jump at end of A, then copy all of B across. - a.Instrs = append(a.Instrs[:len(a.Instrs)-1], b.Instrs...) - for _, instr := range b.Instrs { - instr.setBlock(a) - } - - // A inherits B's successors - a.Succs = append(a.succs2[:0], b.Succs...) - - // Fix up Preds links of all successors of B. - for _, c := range b.Succs { - c.replacePred(b, a) - } - - if debugBlockOpt { - fmt.Fprintln(os.Stderr, "fuseBlocks", a, b) - } - - f.Blocks[b.Index] = nil // delete b - return true -} - -// optimizeBlocks() performs some simple block optimizations on a -// completed function: dead block elimination, block fusion, jump -// threading. -// -func optimizeBlocks(f *Function) { - if debugBlockOpt { - f.WriteTo(os.Stderr) - mustSanityCheck(f, nil) - } - - deleteUnreachableBlocks(f) - - // Loop until no further progress. - changed := true - for changed { - changed = false - - if debugBlockOpt { - f.WriteTo(os.Stderr) - mustSanityCheck(f, nil) - } - - for _, b := range f.Blocks { - // f.Blocks will temporarily contain nils to indicate - // deleted blocks; we remove them at the end. - if b == nil { - continue - } - - // Fuse blocks. b->c becomes bc. - if fuseBlocks(f, b) { - changed = true - } - - // a->b->c becomes a->c if b contains only a Jump. - if jumpThreading(f, b) { - changed = true - continue // (b was disconnected) - } - } - } - f.removeNilBlocks() -} diff --git a/vendor/honnef.co/go/tools/ir/builder.go b/vendor/honnef.co/go/tools/ir/builder.go deleted file mode 100644 index fdf4cb1a91a..00000000000 --- a/vendor/honnef.co/go/tools/ir/builder.go +++ /dev/null @@ -1,2474 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ir - -// This file implements the BUILD phase of IR construction. -// -// IR construction has two phases, CREATE and BUILD. In the CREATE phase -// (create.go), all packages are constructed and type-checked and -// definitions of all package members are created, method-sets are -// computed, and wrapper methods are synthesized. -// ir.Packages are created in arbitrary order. -// -// In the BUILD phase (builder.go), the builder traverses the AST of -// each Go source function and generates IR instructions for the -// function body. Initializer expressions for package-level variables -// are emitted to the package's init() function in the order specified -// by go/types.Info.InitOrder, then code for each function in the -// package is generated in lexical order. -// -// The builder's and Program's indices (maps) are populated and -// mutated during the CREATE phase, but during the BUILD phase they -// remain constant. The sole exception is Prog.methodSets and its -// related maps, which are protected by a dedicated mutex. - -import ( - "fmt" - "go/ast" - "go/constant" - "go/token" - "go/types" - "os" -) - -type opaqueType struct { - types.Type - name string -} - -func (t *opaqueType) String() string { return t.name } - -var ( - varOk = newVar("ok", tBool) - varIndex = newVar("index", tInt) - - // Type constants. - tBool = types.Typ[types.Bool] - tByte = types.Typ[types.Byte] - tInt = types.Typ[types.Int] - tInvalid = types.Typ[types.Invalid] - tString = types.Typ[types.String] - tUntypedNil = types.Typ[types.UntypedNil] - tRangeIter = &opaqueType{nil, "iter"} // the type of all "range" iterators - tEface = types.NewInterfaceType(nil, nil).Complete() -) - -// builder holds state associated with the package currently being built. -// Its methods contain all the logic for AST-to-IR conversion. -type builder struct { - printFunc string - - blocksets [5]BlockSet -} - -// cond emits to fn code to evaluate boolean condition e and jump -// to t or f depending on its value, performing various simplifications. -// -// Postcondition: fn.currentBlock is nil. -// -func (b *builder) cond(fn *Function, e ast.Expr, t, f *BasicBlock) *If { - switch e := e.(type) { - case *ast.ParenExpr: - return b.cond(fn, e.X, t, f) - - case *ast.BinaryExpr: - switch e.Op { - case token.LAND: - ltrue := fn.newBasicBlock("cond.true") - b.cond(fn, e.X, ltrue, f) - fn.currentBlock = ltrue - return b.cond(fn, e.Y, t, f) - - case token.LOR: - lfalse := fn.newBasicBlock("cond.false") - b.cond(fn, e.X, t, lfalse) - fn.currentBlock = lfalse - return b.cond(fn, e.Y, t, f) - } - - case *ast.UnaryExpr: - if e.Op == token.NOT { - return b.cond(fn, e.X, f, t) - } - } - - // A traditional compiler would simplify "if false" (etc) here - // but we do not, for better fidelity to the source code. - // - // The value of a constant condition may be platform-specific, - // and may cause blocks that are reachable in some configuration - // to be hidden from subsequent analyses such as bug-finding tools. - return emitIf(fn, b.expr(fn, e), t, f, e) -} - -// logicalBinop emits code to fn to evaluate e, a &&- or -// ||-expression whose reified boolean value is wanted. -// The value is returned. -// -func (b *builder) logicalBinop(fn *Function, e *ast.BinaryExpr) Value { - rhs := fn.newBasicBlock("binop.rhs") - done := fn.newBasicBlock("binop.done") - - // T(e) = T(e.X) = T(e.Y) after untyped constants have been - // eliminated. - // TODO(adonovan): not true; MyBool==MyBool yields UntypedBool. - t := fn.Pkg.typeOf(e) - - var short Value // value of the short-circuit path - switch e.Op { - case token.LAND: - b.cond(fn, e.X, rhs, done) - short = emitConst(fn, NewConst(constant.MakeBool(false), t)) - - case token.LOR: - b.cond(fn, e.X, done, rhs) - short = emitConst(fn, NewConst(constant.MakeBool(true), t)) - } - - // Is rhs unreachable? - if rhs.Preds == nil { - // Simplify false&&y to false, true||y to true. - fn.currentBlock = done - return short - } - - // Is done unreachable? - if done.Preds == nil { - // Simplify true&&y (or false||y) to y. - fn.currentBlock = rhs - return b.expr(fn, e.Y) - } - - // All edges from e.X to done carry the short-circuit value. - var edges []Value - for range done.Preds { - edges = append(edges, short) - } - - // The edge from e.Y to done carries the value of e.Y. - fn.currentBlock = rhs - edges = append(edges, b.expr(fn, e.Y)) - emitJump(fn, done, e) - fn.currentBlock = done - - phi := &Phi{Edges: edges} - phi.typ = t - return done.emit(phi, e) -} - -// exprN lowers a multi-result expression e to IR form, emitting code -// to fn and returning a single Value whose type is a *types.Tuple. -// The caller must access the components via Extract. -// -// Multi-result expressions include CallExprs in a multi-value -// assignment or return statement, and "value,ok" uses of -// TypeAssertExpr, IndexExpr (when X is a map), and Recv. -// -func (b *builder) exprN(fn *Function, e ast.Expr) Value { - typ := fn.Pkg.typeOf(e).(*types.Tuple) - switch e := e.(type) { - case *ast.ParenExpr: - return b.exprN(fn, e.X) - - case *ast.CallExpr: - // Currently, no built-in function nor type conversion - // has multiple results, so we can avoid some of the - // cases for single-valued CallExpr. - var c Call - b.setCall(fn, e, &c.Call) - c.typ = typ - return fn.emit(&c, e) - - case *ast.IndexExpr: - mapt := fn.Pkg.typeOf(e.X).Underlying().(*types.Map) - lookup := &MapLookup{ - X: b.expr(fn, e.X), - Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key(), e), - CommaOk: true, - } - lookup.setType(typ) - return fn.emit(lookup, e) - - case *ast.TypeAssertExpr: - return emitTypeTest(fn, b.expr(fn, e.X), typ.At(0).Type(), e) - - case *ast.UnaryExpr: // must be receive <- - return emitRecv(fn, b.expr(fn, e.X), true, typ, e) - } - panic(fmt.Sprintf("exprN(%T) in %s", e, fn)) -} - -// builtin emits to fn IR instructions to implement a call to the -// built-in function obj with the specified arguments -// and return type. It returns the value defined by the result. -// -// The result is nil if no special handling was required; in this case -// the caller should treat this like an ordinary library function -// call. -// -func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ types.Type, source ast.Node) Value { - switch obj.Name() { - case "make": - switch typ.Underlying().(type) { - case *types.Slice: - n := b.expr(fn, args[1]) - m := n - if len(args) == 3 { - m = b.expr(fn, args[2]) - } - if m, ok := m.(*Const); ok { - // treat make([]T, n, m) as new([m]T)[:n] - cap := m.Int64() - at := types.NewArray(typ.Underlying().(*types.Slice).Elem(), cap) - alloc := emitNew(fn, at, source) - v := &Slice{ - X: alloc, - High: n, - } - v.setType(typ) - return fn.emit(v, source) - } - v := &MakeSlice{ - Len: n, - Cap: m, - } - v.setType(typ) - return fn.emit(v, source) - - case *types.Map: - var res Value - if len(args) == 2 { - res = b.expr(fn, args[1]) - } - v := &MakeMap{Reserve: res} - v.setType(typ) - return fn.emit(v, source) - - case *types.Chan: - var sz Value = emitConst(fn, intConst(0)) - if len(args) == 2 { - sz = b.expr(fn, args[1]) - } - v := &MakeChan{Size: sz} - v.setType(typ) - return fn.emit(v, source) - } - - case "new": - alloc := emitNew(fn, deref(typ), source) - return alloc - - case "len", "cap": - // Special case: len or cap of an array or *array is - // based on the type, not the value which may be nil. - // We must still evaluate the value, though. (If it - // was side-effect free, the whole call would have - // been constant-folded.) - t := deref(fn.Pkg.typeOf(args[0])).Underlying() - if at, ok := t.(*types.Array); ok { - b.expr(fn, args[0]) // for effects only - return emitConst(fn, intConst(at.Len())) - } - // Otherwise treat as normal. - - case "panic": - fn.emit(&Panic{ - X: emitConv(fn, b.expr(fn, args[0]), tEface, source), - }, source) - addEdge(fn.currentBlock, fn.Exit) - fn.currentBlock = fn.newBasicBlock("unreachable") - return emitConst(fn, NewConst(constant.MakeBool(true), tBool)) // any non-nil Value will do - } - return nil // treat all others as a regular function call -} - -// addr lowers a single-result addressable expression e to IR form, -// emitting code to fn and returning the location (an lvalue) defined -// by the expression. -// -// If escaping is true, addr marks the base variable of the -// addressable expression e as being a potentially escaping pointer -// value. For example, in this code: -// -// a := A{ -// b: [1]B{B{c: 1}} -// } -// return &a.b[0].c -// -// the application of & causes a.b[0].c to have its address taken, -// which means that ultimately the local variable a must be -// heap-allocated. This is a simple but very conservative escape -// analysis. -// -// Operations forming potentially escaping pointers include: -// - &x, including when implicit in method call or composite literals. -// - a[:] iff a is an array (not *array) -// - references to variables in lexically enclosing functions. -// -func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue { - switch e := e.(type) { - case *ast.Ident: - if isBlankIdent(e) { - return blank{} - } - obj := fn.Pkg.objectOf(e) - v := fn.Prog.packageLevelValue(obj) // var (address) - if v == nil { - v = fn.lookup(obj, escaping) - } - return &address{addr: v, expr: e} - - case *ast.CompositeLit: - t := deref(fn.Pkg.typeOf(e)) - var v *Alloc - if escaping { - v = emitNew(fn, t, e) - } else { - v = fn.addLocal(t, e) - } - var sb storebuf - b.compLit(fn, v, e, true, &sb) - sb.emit(fn) - return &address{addr: v, expr: e} - - case *ast.ParenExpr: - return b.addr(fn, e.X, escaping) - - case *ast.SelectorExpr: - sel, ok := fn.Pkg.info.Selections[e] - if !ok { - // qualified identifier - return b.addr(fn, e.Sel, escaping) - } - if sel.Kind() != types.FieldVal { - panic(sel) - } - wantAddr := true - v := b.receiver(fn, e.X, wantAddr, escaping, sel, e) - last := len(sel.Index()) - 1 - return &address{ - addr: emitFieldSelection(fn, v, sel.Index()[last], true, e.Sel), - expr: e.Sel, - } - - case *ast.IndexExpr: - var x Value - var et types.Type - switch t := fn.Pkg.typeOf(e.X).Underlying().(type) { - case *types.Array: - x = b.addr(fn, e.X, escaping).address(fn) - et = types.NewPointer(t.Elem()) - case *types.Pointer: // *array - x = b.expr(fn, e.X) - et = types.NewPointer(t.Elem().Underlying().(*types.Array).Elem()) - case *types.Slice: - x = b.expr(fn, e.X) - et = types.NewPointer(t.Elem()) - case *types.Map: - return &element{ - m: b.expr(fn, e.X), - k: emitConv(fn, b.expr(fn, e.Index), t.Key(), e.Index), - t: t.Elem(), - } - default: - panic("unexpected container type in IndexExpr: " + t.String()) - } - v := &IndexAddr{ - X: x, - Index: emitConv(fn, b.expr(fn, e.Index), tInt, e.Index), - } - v.setType(et) - return &address{addr: fn.emit(v, e), expr: e} - - case *ast.StarExpr: - return &address{addr: b.expr(fn, e.X), expr: e} - } - - panic(fmt.Sprintf("unexpected address expression: %T", e)) -} - -type store struct { - lhs lvalue - rhs Value - source ast.Node -} - -type storebuf struct{ stores []store } - -func (sb *storebuf) store(lhs lvalue, rhs Value, source ast.Node) { - sb.stores = append(sb.stores, store{lhs, rhs, source}) -} - -func (sb *storebuf) emit(fn *Function) { - for _, s := range sb.stores { - s.lhs.store(fn, s.rhs, s.source) - } -} - -// assign emits to fn code to initialize the lvalue loc with the value -// of expression e. If isZero is true, assign assumes that loc holds -// the zero value for its type. -// -// This is equivalent to loc.store(fn, b.expr(fn, e)), but may generate -// better code in some cases, e.g., for composite literals in an -// addressable location. -// -// If sb is not nil, assign generates code to evaluate expression e, but -// not to update loc. Instead, the necessary stores are appended to the -// storebuf sb so that they can be executed later. This allows correct -// in-place update of existing variables when the RHS is a composite -// literal that may reference parts of the LHS. -// -func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb *storebuf, source ast.Node) { - // Can we initialize it in place? - if e, ok := unparen(e).(*ast.CompositeLit); ok { - // A CompositeLit never evaluates to a pointer, - // so if the type of the location is a pointer, - // an &-operation is implied. - if _, ok := loc.(blank); !ok { // avoid calling blank.typ() - if isPointer(loc.typ()) { - ptr := b.addr(fn, e, true).address(fn) - // copy address - if sb != nil { - sb.store(loc, ptr, source) - } else { - loc.store(fn, ptr, source) - } - return - } - } - - if _, ok := loc.(*address); ok { - if isInterface(loc.typ()) { - // e.g. var x interface{} = T{...} - // Can't in-place initialize an interface value. - // Fall back to copying. - } else { - // x = T{...} or x := T{...} - addr := loc.address(fn) - if sb != nil { - b.compLit(fn, addr, e, isZero, sb) - } else { - var sb storebuf - b.compLit(fn, addr, e, isZero, &sb) - sb.emit(fn) - } - - // Subtle: emit debug ref for aggregate types only; - // slice and map are handled by store ops in compLit. - switch loc.typ().Underlying().(type) { - case *types.Struct, *types.Array: - emitDebugRef(fn, e, addr, true) - } - - return - } - } - } - - // simple case: just copy - rhs := b.expr(fn, e) - if sb != nil { - sb.store(loc, rhs, source) - } else { - loc.store(fn, rhs, source) - } -} - -// expr lowers a single-result expression e to IR form, emitting code -// to fn and returning the Value defined by the expression. -// -func (b *builder) expr(fn *Function, e ast.Expr) Value { - e = unparen(e) - - tv := fn.Pkg.info.Types[e] - - // Is expression a constant? - if tv.Value != nil { - return emitConst(fn, NewConst(tv.Value, tv.Type)) - } - - var v Value - if tv.Addressable() { - // Prefer pointer arithmetic ({Index,Field}Addr) followed - // by Load over subelement extraction (e.g. Index, Field), - // to avoid large copies. - v = b.addr(fn, e, false).load(fn, e) - } else { - v = b.expr0(fn, e, tv) - } - if fn.debugInfo() { - emitDebugRef(fn, e, v, false) - } - return v -} - -func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { - switch e := e.(type) { - case *ast.BasicLit: - panic("non-constant BasicLit") // unreachable - - case *ast.FuncLit: - fn2 := &Function{ - name: fmt.Sprintf("%s$%d", fn.Name(), 1+len(fn.AnonFuncs)), - Signature: fn.Pkg.typeOf(e.Type).Underlying().(*types.Signature), - parent: fn, - Pkg: fn.Pkg, - Prog: fn.Prog, - functionBody: new(functionBody), - } - fn2.source = e - fn.AnonFuncs = append(fn.AnonFuncs, fn2) - fn2.initHTML(b.printFunc) - b.buildFunction(fn2) - if fn2.FreeVars == nil { - return fn2 - } - v := &MakeClosure{Fn: fn2} - v.setType(tv.Type) - for _, fv := range fn2.FreeVars { - v.Bindings = append(v.Bindings, fv.outer) - fv.outer = nil - } - return fn.emit(v, e) - - case *ast.TypeAssertExpr: // single-result form only - return emitTypeAssert(fn, b.expr(fn, e.X), tv.Type, e) - - case *ast.CallExpr: - if fn.Pkg.info.Types[e.Fun].IsType() { - // Explicit type conversion, e.g. string(x) or big.Int(x) - x := b.expr(fn, e.Args[0]) - y := emitConv(fn, x, tv.Type, e) - return y - } - // Call to "intrinsic" built-ins, e.g. new, make, panic. - if id, ok := unparen(e.Fun).(*ast.Ident); ok { - if obj, ok := fn.Pkg.info.Uses[id].(*types.Builtin); ok { - if v := b.builtin(fn, obj, e.Args, tv.Type, e); v != nil { - return v - } - } - } - // Regular function call. - var v Call - b.setCall(fn, e, &v.Call) - v.setType(tv.Type) - return fn.emit(&v, e) - - case *ast.UnaryExpr: - switch e.Op { - case token.AND: // &X --- potentially escaping. - addr := b.addr(fn, e.X, true) - if _, ok := unparen(e.X).(*ast.StarExpr); ok { - // &*p must panic if p is nil (http://golang.org/s/go12nil). - // For simplicity, we'll just (suboptimally) rely - // on the side effects of a load. - // TODO(adonovan): emit dedicated nilcheck. - addr.load(fn, e) - } - return addr.address(fn) - case token.ADD: - return b.expr(fn, e.X) - case token.NOT, token.SUB, token.XOR: // ! <- - ^ - v := &UnOp{ - Op: e.Op, - X: b.expr(fn, e.X), - } - v.setType(tv.Type) - return fn.emit(v, e) - case token.ARROW: - return emitRecv(fn, b.expr(fn, e.X), false, tv.Type, e) - default: - panic(e.Op) - } - - case *ast.BinaryExpr: - switch e.Op { - case token.LAND, token.LOR: - return b.logicalBinop(fn, e) - case token.SHL, token.SHR: - fallthrough - case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, token.AND, token.OR, token.XOR, token.AND_NOT: - return emitArith(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), tv.Type, e) - - case token.EQL, token.NEQ, token.GTR, token.LSS, token.LEQ, token.GEQ: - cmp := emitCompare(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), e) - // The type of x==y may be UntypedBool. - return emitConv(fn, cmp, types.Default(tv.Type), e) - default: - panic("illegal op in BinaryExpr: " + e.Op.String()) - } - - case *ast.SliceExpr: - var low, high, max Value - var x Value - switch fn.Pkg.typeOf(e.X).Underlying().(type) { - case *types.Array: - // Potentially escaping. - x = b.addr(fn, e.X, true).address(fn) - case *types.Basic, *types.Slice, *types.Pointer: // *array - x = b.expr(fn, e.X) - default: - panic("unreachable") - } - if e.High != nil { - high = b.expr(fn, e.High) - } - if e.Low != nil { - low = b.expr(fn, e.Low) - } - if e.Slice3 { - max = b.expr(fn, e.Max) - } - v := &Slice{ - X: x, - Low: low, - High: high, - Max: max, - } - v.setType(tv.Type) - return fn.emit(v, e) - - case *ast.Ident: - obj := fn.Pkg.info.Uses[e] - // Universal built-in or nil? - switch obj := obj.(type) { - case *types.Builtin: - return &Builtin{name: obj.Name(), sig: tv.Type.(*types.Signature)} - case *types.Nil: - return emitConst(fn, nilConst(tv.Type)) - } - // Package-level func or var? - if v := fn.Prog.packageLevelValue(obj); v != nil { - if _, ok := obj.(*types.Var); ok { - return emitLoad(fn, v, e) // var (address) - } - return v // (func) - } - // Local var. - return emitLoad(fn, fn.lookup(obj, false), e) // var (address) - - case *ast.SelectorExpr: - sel, ok := fn.Pkg.info.Selections[e] - if !ok { - // qualified identifier - return b.expr(fn, e.Sel) - } - switch sel.Kind() { - case types.MethodExpr: - // (*T).f or T.f, the method f from the method-set of type T. - // The result is a "thunk". - return emitConv(fn, makeThunk(fn.Prog, sel), tv.Type, e) - - case types.MethodVal: - // e.f where e is an expression and f is a method. - // The result is a "bound". - obj := sel.Obj().(*types.Func) - rt := recvType(obj) - wantAddr := isPointer(rt) - escaping := true - v := b.receiver(fn, e.X, wantAddr, escaping, sel, e) - if isInterface(rt) { - // If v has interface type I, - // we must emit a check that v is non-nil. - // We use: typeassert v.(I). - emitTypeAssert(fn, v, rt, e) - } - c := &MakeClosure{ - Fn: makeBound(fn.Prog, obj), - Bindings: []Value{v}, - } - c.source = e.Sel - c.setType(tv.Type) - return fn.emit(c, e) - - case types.FieldVal: - indices := sel.Index() - last := len(indices) - 1 - v := b.expr(fn, e.X) - v = emitImplicitSelections(fn, v, indices[:last], e) - v = emitFieldSelection(fn, v, indices[last], false, e.Sel) - return v - } - - panic("unexpected expression-relative selector") - - case *ast.IndexExpr: - switch t := fn.Pkg.typeOf(e.X).Underlying().(type) { - case *types.Array: - // Non-addressable array (in a register). - v := &Index{ - X: b.expr(fn, e.X), - Index: emitConv(fn, b.expr(fn, e.Index), tInt, e.Index), - } - v.setType(t.Elem()) - return fn.emit(v, e) - - case *types.Map: - // Maps are not addressable. - mapt := fn.Pkg.typeOf(e.X).Underlying().(*types.Map) - v := &MapLookup{ - X: b.expr(fn, e.X), - Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key(), e.Index), - } - v.setType(mapt.Elem()) - return fn.emit(v, e) - - case *types.Basic: // => string - // Strings are not addressable. - v := &StringLookup{ - X: b.expr(fn, e.X), - Index: b.expr(fn, e.Index), - } - v.setType(tByte) - return fn.emit(v, e) - - case *types.Slice, *types.Pointer: // *array - // Addressable slice/array; use IndexAddr and Load. - return b.addr(fn, e, false).load(fn, e) - - default: - panic("unexpected container type in IndexExpr: " + t.String()) - } - - case *ast.CompositeLit, *ast.StarExpr: - // Addressable types (lvalues) - return b.addr(fn, e, false).load(fn, e) - } - - panic(fmt.Sprintf("unexpected expr: %T", e)) -} - -// stmtList emits to fn code for all statements in list. -func (b *builder) stmtList(fn *Function, list []ast.Stmt) { - for _, s := range list { - b.stmt(fn, s) - } -} - -// receiver emits to fn code for expression e in the "receiver" -// position of selection e.f (where f may be a field or a method) and -// returns the effective receiver after applying the implicit field -// selections of sel. -// -// wantAddr requests that the result is an an address. If -// !sel.Indirect(), this may require that e be built in addr() mode; it -// must thus be addressable. -// -// escaping is defined as per builder.addr(). -// -func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, sel *types.Selection, source ast.Node) Value { - var v Value - if wantAddr && !sel.Indirect() && !isPointer(fn.Pkg.typeOf(e)) { - v = b.addr(fn, e, escaping).address(fn) - } else { - v = b.expr(fn, e) - } - - last := len(sel.Index()) - 1 - v = emitImplicitSelections(fn, v, sel.Index()[:last], source) - if !wantAddr && isPointer(v.Type()) { - v = emitLoad(fn, v, e) - } - return v -} - -// setCallFunc populates the function parts of a CallCommon structure -// (Func, Method, Recv, Args[0]) based on the kind of invocation -// occurring in e. -// -func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) { - // Is this a method call? - if selector, ok := unparen(e.Fun).(*ast.SelectorExpr); ok { - sel, ok := fn.Pkg.info.Selections[selector] - if ok && sel.Kind() == types.MethodVal { - obj := sel.Obj().(*types.Func) - recv := recvType(obj) - wantAddr := isPointer(recv) - escaping := true - v := b.receiver(fn, selector.X, wantAddr, escaping, sel, selector) - if isInterface(recv) { - // Invoke-mode call. - c.Value = v - c.Method = obj - } else { - // "Call"-mode call. - c.Value = fn.Prog.declaredFunc(obj) - c.Args = append(c.Args, v) - } - return - } - - // sel.Kind()==MethodExpr indicates T.f() or (*T).f(): - // a statically dispatched call to the method f in the - // method-set of T or *T. T may be an interface. - // - // e.Fun would evaluate to a concrete method, interface - // wrapper function, or promotion wrapper. - // - // For now, we evaluate it in the usual way. - // - // TODO(adonovan): opt: inline expr() here, to make the - // call static and to avoid generation of wrappers. - // It's somewhat tricky as it may consume the first - // actual parameter if the call is "invoke" mode. - // - // Examples: - // type T struct{}; func (T) f() {} // "call" mode - // type T interface { f() } // "invoke" mode - // - // type S struct{ T } - // - // var s S - // S.f(s) - // (*S).f(&s) - // - // Suggested approach: - // - consume the first actual parameter expression - // and build it with b.expr(). - // - apply implicit field selections. - // - use MethodVal logic to populate fields of c. - } - - // Evaluate the function operand in the usual way. - c.Value = b.expr(fn, e.Fun) -} - -// emitCallArgs emits to f code for the actual parameters of call e to -// a (possibly built-in) function of effective type sig. -// The argument values are appended to args, which is then returned. -// -func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallExpr, args []Value) []Value { - // f(x, y, z...): pass slice z straight through. - if e.Ellipsis != 0 { - for i, arg := range e.Args { - v := emitConv(fn, b.expr(fn, arg), sig.Params().At(i).Type(), arg) - args = append(args, v) - } - return args - } - - offset := len(args) // 1 if call has receiver, 0 otherwise - - // Evaluate actual parameter expressions. - // - // If this is a chained call of the form f(g()) where g has - // multiple return values (MRV), they are flattened out into - // args; a suffix of them may end up in a varargs slice. - for _, arg := range e.Args { - v := b.expr(fn, arg) - if ttuple, ok := v.Type().(*types.Tuple); ok { // MRV chain - for i, n := 0, ttuple.Len(); i < n; i++ { - args = append(args, emitExtract(fn, v, i, arg)) - } - } else { - args = append(args, v) - } - } - - // Actual->formal assignability conversions for normal parameters. - np := sig.Params().Len() // number of normal parameters - if sig.Variadic() { - np-- - } - for i := 0; i < np; i++ { - args[offset+i] = emitConv(fn, args[offset+i], sig.Params().At(i).Type(), args[offset+i].Source()) - } - - // Actual->formal assignability conversions for variadic parameter, - // and construction of slice. - if sig.Variadic() { - varargs := args[offset+np:] - st := sig.Params().At(np).Type().(*types.Slice) - vt := st.Elem() - if len(varargs) == 0 { - args = append(args, emitConst(fn, nilConst(st))) - } else { - // Replace a suffix of args with a slice containing it. - at := types.NewArray(vt, int64(len(varargs))) - a := emitNew(fn, at, e) - a.source = e - for i, arg := range varargs { - iaddr := &IndexAddr{ - X: a, - Index: emitConst(fn, intConst(int64(i))), - } - iaddr.setType(types.NewPointer(vt)) - fn.emit(iaddr, e) - emitStore(fn, iaddr, arg, arg.Source()) - } - s := &Slice{X: a} - s.setType(st) - args[offset+np] = fn.emit(s, args[offset+np].Source()) - args = args[:offset+np+1] - } - } - return args -} - -// setCall emits to fn code to evaluate all the parameters of a function -// call e, and populates *c with those values. -// -func (b *builder) setCall(fn *Function, e *ast.CallExpr, c *CallCommon) { - // First deal with the f(...) part and optional receiver. - b.setCallFunc(fn, e, c) - - // Then append the other actual parameters. - sig, _ := fn.Pkg.typeOf(e.Fun).Underlying().(*types.Signature) - if sig == nil { - panic(fmt.Sprintf("no signature for call of %s", e.Fun)) - } - c.Args = b.emitCallArgs(fn, sig, e, c.Args) -} - -// assignOp emits to fn code to perform loc = val. -func (b *builder) assignOp(fn *Function, loc lvalue, val Value, op token.Token, source ast.Node) { - oldv := loc.load(fn, source) - loc.store(fn, emitArith(fn, op, oldv, emitConv(fn, val, oldv.Type(), source), loc.typ(), source), source) -} - -// localValueSpec emits to fn code to define all of the vars in the -// function-local ValueSpec, spec. -// -func (b *builder) localValueSpec(fn *Function, spec *ast.ValueSpec) { - switch { - case len(spec.Values) == len(spec.Names): - // e.g. var x, y = 0, 1 - // 1:1 assignment - for i, id := range spec.Names { - if !isBlankIdent(id) { - fn.addLocalForIdent(id) - } - lval := b.addr(fn, id, false) // non-escaping - b.assign(fn, lval, spec.Values[i], true, nil, spec) - } - - case len(spec.Values) == 0: - // e.g. var x, y int - // Locals are implicitly zero-initialized. - for _, id := range spec.Names { - if !isBlankIdent(id) { - lhs := fn.addLocalForIdent(id) - if fn.debugInfo() { - emitDebugRef(fn, id, lhs, true) - } - } - } - - default: - // e.g. var x, y = pos() - tuple := b.exprN(fn, spec.Values[0]) - for i, id := range spec.Names { - if !isBlankIdent(id) { - fn.addLocalForIdent(id) - lhs := b.addr(fn, id, false) // non-escaping - lhs.store(fn, emitExtract(fn, tuple, i, id), id) - } - } - } -} - -// assignStmt emits code to fn for a parallel assignment of rhss to lhss. -// isDef is true if this is a short variable declaration (:=). -// -// Note the similarity with localValueSpec. -// -func (b *builder) assignStmt(fn *Function, lhss, rhss []ast.Expr, isDef bool, source ast.Node) { - // Side effects of all LHSs and RHSs must occur in left-to-right order. - lvals := make([]lvalue, len(lhss)) - isZero := make([]bool, len(lhss)) - for i, lhs := range lhss { - var lval lvalue = blank{} - if !isBlankIdent(lhs) { - if isDef { - if obj := fn.Pkg.info.Defs[lhs.(*ast.Ident)]; obj != nil { - fn.addNamedLocal(obj, lhs) - isZero[i] = true - } - } - lval = b.addr(fn, lhs, false) // non-escaping - } - lvals[i] = lval - } - if len(lhss) == len(rhss) { - // Simple assignment: x = f() (!isDef) - // Parallel assignment: x, y = f(), g() (!isDef) - // or short var decl: x, y := f(), g() (isDef) - // - // In all cases, the RHSs may refer to the LHSs, - // so we need a storebuf. - var sb storebuf - for i := range rhss { - b.assign(fn, lvals[i], rhss[i], isZero[i], &sb, source) - } - sb.emit(fn) - } else { - // e.g. x, y = pos() - tuple := b.exprN(fn, rhss[0]) - emitDebugRef(fn, rhss[0], tuple, false) - for i, lval := range lvals { - lval.store(fn, emitExtract(fn, tuple, i, source), source) - } - } -} - -// arrayLen returns the length of the array whose composite literal elements are elts. -func (b *builder) arrayLen(fn *Function, elts []ast.Expr) int64 { - var max int64 = -1 - var i int64 = -1 - for _, e := range elts { - if kv, ok := e.(*ast.KeyValueExpr); ok { - i = b.expr(fn, kv.Key).(*Const).Int64() - } else { - i++ - } - if i > max { - max = i - } - } - return max + 1 -} - -// compLit emits to fn code to initialize a composite literal e at -// address addr with type typ. -// -// Nested composite literals are recursively initialized in place -// where possible. If isZero is true, compLit assumes that addr -// holds the zero value for typ. -// -// Because the elements of a composite literal may refer to the -// variables being updated, as in the second line below, -// x := T{a: 1} -// x = T{a: x.a} -// all the reads must occur before all the writes. Thus all stores to -// loc are emitted to the storebuf sb for later execution. -// -// A CompositeLit may have pointer type only in the recursive (nested) -// case when the type name is implicit. e.g. in []*T{{}}, the inner -// literal has type *T behaves like &T{}. -// In that case, addr must hold a T, not a *T. -// -func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero bool, sb *storebuf) { - typ := deref(fn.Pkg.typeOf(e)) - switch t := typ.Underlying().(type) { - case *types.Struct: - if !isZero && len(e.Elts) != t.NumFields() { - // memclear - sb.store(&address{addr, nil}, zeroValue(fn, deref(addr.Type()), e), e) - isZero = true - } - for i, e := range e.Elts { - fieldIndex := i - if kv, ok := e.(*ast.KeyValueExpr); ok { - fname := kv.Key.(*ast.Ident).Name - for i, n := 0, t.NumFields(); i < n; i++ { - sf := t.Field(i) - if sf.Name() == fname { - fieldIndex = i - e = kv.Value - break - } - } - } - sf := t.Field(fieldIndex) - faddr := &FieldAddr{ - X: addr, - Field: fieldIndex, - } - faddr.setType(types.NewPointer(sf.Type())) - fn.emit(faddr, e) - b.assign(fn, &address{addr: faddr, expr: e}, e, isZero, sb, e) - } - - case *types.Array, *types.Slice: - var at *types.Array - var array Value - switch t := t.(type) { - case *types.Slice: - at = types.NewArray(t.Elem(), b.arrayLen(fn, e.Elts)) - alloc := emitNew(fn, at, e) - array = alloc - case *types.Array: - at = t - array = addr - - if !isZero && int64(len(e.Elts)) != at.Len() { - // memclear - sb.store(&address{array, nil}, zeroValue(fn, deref(array.Type()), e), e) - } - } - - var idx *Const - for _, e := range e.Elts { - if kv, ok := e.(*ast.KeyValueExpr); ok { - idx = b.expr(fn, kv.Key).(*Const) - e = kv.Value - } else { - var idxval int64 - if idx != nil { - idxval = idx.Int64() + 1 - } - idx = emitConst(fn, intConst(idxval)) - } - iaddr := &IndexAddr{ - X: array, - Index: idx, - } - iaddr.setType(types.NewPointer(at.Elem())) - fn.emit(iaddr, e) - if t != at { // slice - // backing array is unaliased => storebuf not needed. - b.assign(fn, &address{addr: iaddr, expr: e}, e, true, nil, e) - } else { - b.assign(fn, &address{addr: iaddr, expr: e}, e, true, sb, e) - } - } - - if t != at { // slice - s := &Slice{X: array} - s.setType(typ) - sb.store(&address{addr: addr, expr: e}, fn.emit(s, e), e) - } - - case *types.Map: - m := &MakeMap{Reserve: emitConst(fn, intConst(int64(len(e.Elts))))} - m.setType(typ) - fn.emit(m, e) - for _, e := range e.Elts { - e := e.(*ast.KeyValueExpr) - - // If a key expression in a map literal is itself a - // composite literal, the type may be omitted. - // For example: - // map[*struct{}]bool{{}: true} - // An &-operation may be implied: - // map[*struct{}]bool{&struct{}{}: true} - var key Value - if _, ok := unparen(e.Key).(*ast.CompositeLit); ok && isPointer(t.Key()) { - // A CompositeLit never evaluates to a pointer, - // so if the type of the location is a pointer, - // an &-operation is implied. - key = b.addr(fn, e.Key, true).address(fn) - } else { - key = b.expr(fn, e.Key) - } - - loc := element{ - m: m, - k: emitConv(fn, key, t.Key(), e), - t: t.Elem(), - } - - // We call assign() only because it takes care - // of any &-operation required in the recursive - // case, e.g., - // map[int]*struct{}{0: {}} implies &struct{}{}. - // In-place update is of course impossible, - // and no storebuf is needed. - b.assign(fn, &loc, e.Value, true, nil, e) - } - sb.store(&address{addr: addr, expr: e}, m, e) - - default: - panic("unexpected CompositeLit type: " + t.String()) - } -} - -func (b *builder) switchStmt(fn *Function, s *ast.SwitchStmt, label *lblock) { - if s.Tag == nil { - b.switchStmtDynamic(fn, s, label) - return - } - dynamic := false - for _, iclause := range s.Body.List { - clause := iclause.(*ast.CaseClause) - for _, cond := range clause.List { - if fn.Pkg.info.Types[unparen(cond)].Value == nil { - dynamic = true - break - } - } - } - - if dynamic { - b.switchStmtDynamic(fn, s, label) - return - } - - if s.Init != nil { - b.stmt(fn, s.Init) - } - - entry := fn.currentBlock - tag := b.expr(fn, s.Tag) - - heads := make([]*BasicBlock, 0, len(s.Body.List)) - bodies := make([]*BasicBlock, len(s.Body.List)) - conds := make([]Value, 0, len(s.Body.List)) - - hasDefault := false - done := fn.newBasicBlock(fmt.Sprintf("switch.done")) - if label != nil { - label._break = done - } - for i, stmt := range s.Body.List { - body := fn.newBasicBlock(fmt.Sprintf("switch.body.%d", i)) - bodies[i] = body - cas := stmt.(*ast.CaseClause) - if cas.List == nil { - // default branch - hasDefault = true - head := fn.newBasicBlock(fmt.Sprintf("switch.head.%d", i)) - conds = append(conds, nil) - heads = append(heads, head) - fn.currentBlock = head - emitJump(fn, body, cas) - } - for j, cond := range stmt.(*ast.CaseClause).List { - fn.currentBlock = entry - head := fn.newBasicBlock(fmt.Sprintf("switch.head.%d.%d", i, j)) - conds = append(conds, b.expr(fn, cond)) - heads = append(heads, head) - fn.currentBlock = head - emitJump(fn, body, cond) - } - } - - for i, stmt := range s.Body.List { - clause := stmt.(*ast.CaseClause) - body := bodies[i] - fn.currentBlock = body - fallthru := done - if i+1 < len(bodies) { - fallthru = bodies[i+1] - } - fn.targets = &targets{ - tail: fn.targets, - _break: done, - _fallthrough: fallthru, - } - b.stmtList(fn, clause.Body) - fn.targets = fn.targets.tail - emitJump(fn, done, stmt) - } - - if !hasDefault { - head := fn.newBasicBlock(fmt.Sprintf("switch.head.implicit-default")) - body := fn.newBasicBlock("switch.body.implicit-default") - fn.currentBlock = head - emitJump(fn, body, s) - fn.currentBlock = body - emitJump(fn, done, s) - heads = append(heads, head) - conds = append(conds, nil) - } - - if len(heads) != len(conds) { - panic(fmt.Sprintf("internal error: %d heads for %d conds", len(heads), len(conds))) - } - for _, head := range heads { - addEdge(entry, head) - } - fn.currentBlock = entry - entry.emit(&ConstantSwitch{ - Tag: tag, - Conds: conds, - }, s) - fn.currentBlock = done -} - -// switchStmt emits to fn code for the switch statement s, optionally -// labelled by label. -// -func (b *builder) switchStmtDynamic(fn *Function, s *ast.SwitchStmt, label *lblock) { - // We treat SwitchStmt like a sequential if-else chain. - // Multiway dispatch can be recovered later by irutil.Switches() - // to those cases that are free of side effects. - if s.Init != nil { - b.stmt(fn, s.Init) - } - kTrue := emitConst(fn, NewConst(constant.MakeBool(true), tBool)) - - var tagv Value = kTrue - var tagSource ast.Node = s - if s.Tag != nil { - tagv = b.expr(fn, s.Tag) - tagSource = s.Tag - } - // lifting only considers loads and stores, but we want different - // sigma nodes for the different comparisons. use a temporary and - // load it in every branch. - tag := fn.addLocal(tagv.Type(), tagSource) - emitStore(fn, tag, tagv, tagSource) - - done := fn.newBasicBlock("switch.done") - if label != nil { - label._break = done - } - // We pull the default case (if present) down to the end. - // But each fallthrough label must point to the next - // body block in source order, so we preallocate a - // body block (fallthru) for the next case. - // Unfortunately this makes for a confusing block order. - var dfltBody *[]ast.Stmt - var dfltFallthrough *BasicBlock - var fallthru, dfltBlock *BasicBlock - ncases := len(s.Body.List) - for i, clause := range s.Body.List { - body := fallthru - if body == nil { - body = fn.newBasicBlock("switch.body") // first case only - } - - // Preallocate body block for the next case. - fallthru = done - if i+1 < ncases { - fallthru = fn.newBasicBlock("switch.body") - } - - cc := clause.(*ast.CaseClause) - if cc.List == nil { - // Default case. - dfltBody = &cc.Body - dfltFallthrough = fallthru - dfltBlock = body - continue - } - - var nextCond *BasicBlock - for _, cond := range cc.List { - nextCond = fn.newBasicBlock("switch.next") - if tagv == kTrue { - // emit a proper if/else chain instead of a comparison - // of a value against true. - // - // NOTE(dh): adonovan had a todo saying "don't forget - // conversions though". As far as I can tell, there - // aren't any conversions that we need to take care of - // here. `case bool(a) && bool(b)` as well as `case - // bool(a && b)` are being taken care of by b.cond, - // and `case a` where a is not of type bool is - // invalid. - b.cond(fn, cond, body, nextCond) - } else { - cond := emitCompare(fn, token.EQL, emitLoad(fn, tag, cond), b.expr(fn, cond), cond) - emitIf(fn, cond, body, nextCond, cond.Source()) - } - - fn.currentBlock = nextCond - } - fn.currentBlock = body - fn.targets = &targets{ - tail: fn.targets, - _break: done, - _fallthrough: fallthru, - } - b.stmtList(fn, cc.Body) - fn.targets = fn.targets.tail - emitJump(fn, done, s) - fn.currentBlock = nextCond - } - if dfltBlock != nil { - // The lack of a Source for the jump doesn't matter, block - // fusing will get rid of the jump later. - - emitJump(fn, dfltBlock, s) - fn.currentBlock = dfltBlock - fn.targets = &targets{ - tail: fn.targets, - _break: done, - _fallthrough: dfltFallthrough, - } - b.stmtList(fn, *dfltBody) - fn.targets = fn.targets.tail - } - emitJump(fn, done, s) - fn.currentBlock = done -} - -func (b *builder) typeSwitchStmt(fn *Function, s *ast.TypeSwitchStmt, label *lblock) { - if s.Init != nil { - b.stmt(fn, s.Init) - } - - var tag Value - switch e := s.Assign.(type) { - case *ast.ExprStmt: // x.(type) - tag = b.expr(fn, unparen(e.X).(*ast.TypeAssertExpr).X) - case *ast.AssignStmt: // y := x.(type) - tag = b.expr(fn, unparen(e.Rhs[0]).(*ast.TypeAssertExpr).X) - default: - panic("unreachable") - } - tagPtr := fn.addLocal(tag.Type(), tag.Source()) - emitStore(fn, tagPtr, tag, tag.Source()) - - // +1 in case there's no explicit default case - heads := make([]*BasicBlock, 0, len(s.Body.List)+1) - - entry := fn.currentBlock - done := fn.newBasicBlock("done") - if label != nil { - label._break = done - } - - // set up type switch and constant switch, populate their conditions - tswtch := &TypeSwitch{ - Tag: emitLoad(fn, tagPtr, tag.Source()), - Conds: make([]types.Type, 0, len(s.Body.List)+1), - } - cswtch := &ConstantSwitch{ - Conds: make([]Value, 0, len(s.Body.List)+1), - } - - rets := make([]types.Type, 0, len(s.Body.List)+1) - index := 0 - var default_ *ast.CaseClause - for _, clause := range s.Body.List { - cc := clause.(*ast.CaseClause) - if obj := fn.Pkg.info.Implicits[cc]; obj != nil { - fn.addNamedLocal(obj, cc) - } - if cc.List == nil { - // default case - default_ = cc - } else { - for _, expr := range cc.List { - tswtch.Conds = append(tswtch.Conds, fn.Pkg.typeOf(expr)) - cswtch.Conds = append(cswtch.Conds, emitConst(fn, intConst(int64(index)))) - index++ - } - if len(cc.List) == 1 { - rets = append(rets, fn.Pkg.typeOf(cc.List[0])) - } else { - for range cc.List { - rets = append(rets, tag.Type()) - } - } - } - } - - // default branch - rets = append(rets, tag.Type()) - - var vars []*types.Var - vars = append(vars, varIndex) - for _, typ := range rets { - vars = append(vars, anonVar(typ)) - } - tswtch.setType(types.NewTuple(vars...)) - // default branch - fn.currentBlock = entry - fn.emit(tswtch, s) - cswtch.Conds = append(cswtch.Conds, emitConst(fn, intConst(int64(-1)))) - // in theory we should add a local and stores/loads for tswtch, to - // generate sigma nodes in the branches. however, there isn't any - // useful information we could possibly attach to it. - cswtch.Tag = emitExtract(fn, tswtch, 0, s) - fn.emit(cswtch, s) - - // build heads and bodies - index = 0 - for _, clause := range s.Body.List { - cc := clause.(*ast.CaseClause) - if cc.List == nil { - continue - } - - body := fn.newBasicBlock("typeswitch.body") - for _, expr := range cc.List { - head := fn.newBasicBlock("typeswitch.head") - heads = append(heads, head) - fn.currentBlock = head - - if obj := fn.Pkg.info.Implicits[cc]; obj != nil { - // In a switch y := x.(type), each case clause - // implicitly declares a distinct object y. - // In a single-type case, y has that type. - // In multi-type cases, 'case nil' and default, - // y has the same type as the interface operand. - - l := fn.objects[obj] - if rets[index] == tUntypedNil { - emitStore(fn, l, emitConst(fn, nilConst(tswtch.Tag.Type())), s.Assign) - } else { - x := emitExtract(fn, tswtch, index+1, s.Assign) - emitStore(fn, l, x, nil) - } - } - - emitJump(fn, body, expr) - index++ - } - fn.currentBlock = body - fn.targets = &targets{ - tail: fn.targets, - _break: done, - } - b.stmtList(fn, cc.Body) - fn.targets = fn.targets.tail - emitJump(fn, done, clause) - } - - if default_ == nil { - // implicit default - heads = append(heads, done) - } else { - body := fn.newBasicBlock("typeswitch.default") - heads = append(heads, body) - fn.currentBlock = body - fn.targets = &targets{ - tail: fn.targets, - _break: done, - } - if obj := fn.Pkg.info.Implicits[default_]; obj != nil { - l := fn.objects[obj] - x := emitExtract(fn, tswtch, index+1, s.Assign) - emitStore(fn, l, x, s) - } - b.stmtList(fn, default_.Body) - fn.targets = fn.targets.tail - emitJump(fn, done, s) - } - - fn.currentBlock = entry - for _, head := range heads { - addEdge(entry, head) - } - fn.currentBlock = done -} - -// selectStmt emits to fn code for the select statement s, optionally -// labelled by label. -// -func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) (noreturn bool) { - if len(s.Body.List) == 0 { - instr := &Select{Blocking: true} - instr.setType(types.NewTuple(varIndex, varOk)) - fn.emit(instr, s) - fn.emit(new(Unreachable), s) - addEdge(fn.currentBlock, fn.Exit) - return true - } - - // A blocking select of a single case degenerates to a - // simple send or receive. - // TODO(adonovan): opt: is this optimization worth its weight? - if len(s.Body.List) == 1 { - clause := s.Body.List[0].(*ast.CommClause) - if clause.Comm != nil { - b.stmt(fn, clause.Comm) - done := fn.newBasicBlock("select.done") - if label != nil { - label._break = done - } - fn.targets = &targets{ - tail: fn.targets, - _break: done, - } - b.stmtList(fn, clause.Body) - fn.targets = fn.targets.tail - emitJump(fn, done, clause) - fn.currentBlock = done - return false - } - } - - // First evaluate all channels in all cases, and find - // the directions of each state. - var states []*SelectState - blocking := true - debugInfo := fn.debugInfo() - for _, clause := range s.Body.List { - var st *SelectState - switch comm := clause.(*ast.CommClause).Comm.(type) { - case nil: // default case - blocking = false - continue - - case *ast.SendStmt: // ch<- i - ch := b.expr(fn, comm.Chan) - st = &SelectState{ - Dir: types.SendOnly, - Chan: ch, - Send: emitConv(fn, b.expr(fn, comm.Value), - ch.Type().Underlying().(*types.Chan).Elem(), comm), - Pos: comm.Arrow, - } - if debugInfo { - st.DebugNode = comm - } - - case *ast.AssignStmt: // x := <-ch - recv := unparen(comm.Rhs[0]).(*ast.UnaryExpr) - st = &SelectState{ - Dir: types.RecvOnly, - Chan: b.expr(fn, recv.X), - Pos: recv.OpPos, - } - if debugInfo { - st.DebugNode = recv - } - - case *ast.ExprStmt: // <-ch - recv := unparen(comm.X).(*ast.UnaryExpr) - st = &SelectState{ - Dir: types.RecvOnly, - Chan: b.expr(fn, recv.X), - Pos: recv.OpPos, - } - if debugInfo { - st.DebugNode = recv - } - } - states = append(states, st) - } - - // We dispatch on the (fair) result of Select using a - // switch on the returned index. - sel := &Select{ - States: states, - Blocking: blocking, - } - sel.source = s - var vars []*types.Var - vars = append(vars, varIndex, varOk) - for _, st := range states { - if st.Dir == types.RecvOnly { - tElem := st.Chan.Type().Underlying().(*types.Chan).Elem() - vars = append(vars, anonVar(tElem)) - } - } - sel.setType(types.NewTuple(vars...)) - fn.emit(sel, s) - idx := emitExtract(fn, sel, 0, s) - - done := fn.newBasicBlock("select.done") - if label != nil { - label._break = done - } - - entry := fn.currentBlock - swtch := &ConstantSwitch{ - Tag: idx, - // one condition per case - Conds: make([]Value, 0, len(s.Body.List)+1), - } - // note that we don't need heads; a select case can only have a single condition - var bodies []*BasicBlock - - state := 0 - r := 2 // index in 'sel' tuple of value; increments if st.Dir==RECV - for _, cc := range s.Body.List { - clause := cc.(*ast.CommClause) - if clause.Comm == nil { - body := fn.newBasicBlock("select.default") - fn.currentBlock = body - bodies = append(bodies, body) - fn.targets = &targets{ - tail: fn.targets, - _break: done, - } - b.stmtList(fn, clause.Body) - emitJump(fn, done, s) - fn.targets = fn.targets.tail - swtch.Conds = append(swtch.Conds, emitConst(fn, intConst(-1))) - continue - } - swtch.Conds = append(swtch.Conds, emitConst(fn, intConst(int64(state)))) - body := fn.newBasicBlock("select.body") - fn.currentBlock = body - bodies = append(bodies, body) - fn.targets = &targets{ - tail: fn.targets, - _break: done, - } - switch comm := clause.Comm.(type) { - case *ast.ExprStmt: // <-ch - if debugInfo { - v := emitExtract(fn, sel, r, comm) - emitDebugRef(fn, states[state].DebugNode.(ast.Expr), v, false) - } - r++ - - case *ast.AssignStmt: // x := <-states[state].Chan - if comm.Tok == token.DEFINE { - fn.addLocalForIdent(comm.Lhs[0].(*ast.Ident)) - } - x := b.addr(fn, comm.Lhs[0], false) // non-escaping - v := emitExtract(fn, sel, r, comm) - if debugInfo { - emitDebugRef(fn, states[state].DebugNode.(ast.Expr), v, false) - } - x.store(fn, v, comm) - - if len(comm.Lhs) == 2 { // x, ok := ... - if comm.Tok == token.DEFINE { - fn.addLocalForIdent(comm.Lhs[1].(*ast.Ident)) - } - ok := b.addr(fn, comm.Lhs[1], false) // non-escaping - ok.store(fn, emitExtract(fn, sel, 1, comm), comm) - } - r++ - } - b.stmtList(fn, clause.Body) - fn.targets = fn.targets.tail - emitJump(fn, done, s) - state++ - } - fn.currentBlock = entry - fn.emit(swtch, s) - for _, body := range bodies { - addEdge(entry, body) - } - fn.currentBlock = done - return false -} - -// forStmt emits to fn code for the for statement s, optionally -// labelled by label. -// -func (b *builder) forStmt(fn *Function, s *ast.ForStmt, label *lblock) { - // ...init... - // jump loop - // loop: - // if cond goto body else done - // body: - // ...body... - // jump post - // post: (target of continue) - // ...post... - // jump loop - // done: (target of break) - if s.Init != nil { - b.stmt(fn, s.Init) - } - body := fn.newBasicBlock("for.body") - done := fn.newBasicBlock("for.done") // target of 'break' - loop := body // target of back-edge - if s.Cond != nil { - loop = fn.newBasicBlock("for.loop") - } - cont := loop // target of 'continue' - if s.Post != nil { - cont = fn.newBasicBlock("for.post") - } - if label != nil { - label._break = done - label._continue = cont - } - emitJump(fn, loop, s) - fn.currentBlock = loop - if loop != body { - b.cond(fn, s.Cond, body, done) - fn.currentBlock = body - } - fn.targets = &targets{ - tail: fn.targets, - _break: done, - _continue: cont, - } - b.stmt(fn, s.Body) - fn.targets = fn.targets.tail - emitJump(fn, cont, s) - - if s.Post != nil { - fn.currentBlock = cont - b.stmt(fn, s.Post) - emitJump(fn, loop, s) // back-edge - } - fn.currentBlock = done -} - -// rangeIndexed emits to fn the header for an integer-indexed loop -// over array, *array or slice value x. -// The v result is defined only if tv is non-nil. -// forPos is the position of the "for" token. -// -func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, source ast.Node) (k, v Value, loop, done *BasicBlock) { - // - // length = len(x) - // index = -1 - // loop: (target of continue) - // index++ - // if index < length goto body else done - // body: - // k = index - // v = x[index] - // ...body... - // jump loop - // done: (target of break) - - // Determine number of iterations. - var length Value - if arr, ok := deref(x.Type()).Underlying().(*types.Array); ok { - // For array or *array, the number of iterations is - // known statically thanks to the type. We avoid a - // data dependence upon x, permitting later dead-code - // elimination if x is pure, static unrolling, etc. - // Ranging over a nil *array may have >0 iterations. - // We still generate code for x, in case it has effects. - length = emitConst(fn, intConst(arr.Len())) - } else { - // length = len(x). - var c Call - c.Call.Value = makeLen(x.Type()) - c.Call.Args = []Value{x} - c.setType(tInt) - length = fn.emit(&c, source) - } - - index := fn.addLocal(tInt, source) - emitStore(fn, index, emitConst(fn, intConst(-1)), source) - - loop = fn.newBasicBlock("rangeindex.loop") - emitJump(fn, loop, source) - fn.currentBlock = loop - - incr := &BinOp{ - Op: token.ADD, - X: emitLoad(fn, index, source), - Y: emitConst(fn, intConst(1)), - } - incr.setType(tInt) - emitStore(fn, index, fn.emit(incr, source), source) - - body := fn.newBasicBlock("rangeindex.body") - done = fn.newBasicBlock("rangeindex.done") - emitIf(fn, emitCompare(fn, token.LSS, incr, length, source), body, done, source) - fn.currentBlock = body - - k = emitLoad(fn, index, source) - if tv != nil { - switch t := x.Type().Underlying().(type) { - case *types.Array: - instr := &Index{ - X: x, - Index: k, - } - instr.setType(t.Elem()) - v = fn.emit(instr, source) - - case *types.Pointer: // *array - instr := &IndexAddr{ - X: x, - Index: k, - } - instr.setType(types.NewPointer(t.Elem().Underlying().(*types.Array).Elem())) - v = emitLoad(fn, fn.emit(instr, source), source) - - case *types.Slice: - instr := &IndexAddr{ - X: x, - Index: k, - } - instr.setType(types.NewPointer(t.Elem())) - v = emitLoad(fn, fn.emit(instr, source), source) - - default: - panic("rangeIndexed x:" + t.String()) - } - } - return -} - -// rangeIter emits to fn the header for a loop using -// Range/Next/Extract to iterate over map or string value x. -// tk and tv are the types of the key/value results k and v, or nil -// if the respective component is not wanted. -// -func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, source ast.Node) (k, v Value, loop, done *BasicBlock) { - // - // it = range x - // loop: (target of continue) - // okv = next it (ok, key, value) - // ok = extract okv #0 - // if ok goto body else done - // body: - // k = extract okv #1 - // v = extract okv #2 - // ...body... - // jump loop - // done: (target of break) - // - - if tk == nil { - tk = tInvalid - } - if tv == nil { - tv = tInvalid - } - - rng := &Range{X: x} - rng.setType(tRangeIter) - it := fn.emit(rng, source) - - loop = fn.newBasicBlock("rangeiter.loop") - emitJump(fn, loop, source) - fn.currentBlock = loop - - _, isString := x.Type().Underlying().(*types.Basic) - - okv := &Next{ - Iter: it, - IsString: isString, - } - okv.setType(types.NewTuple( - varOk, - newVar("k", tk), - newVar("v", tv), - )) - fn.emit(okv, source) - - body := fn.newBasicBlock("rangeiter.body") - done = fn.newBasicBlock("rangeiter.done") - emitIf(fn, emitExtract(fn, okv, 0, source), body, done, source) - fn.currentBlock = body - - if tk != tInvalid { - k = emitExtract(fn, okv, 1, source) - } - if tv != tInvalid { - v = emitExtract(fn, okv, 2, source) - } - return -} - -// rangeChan emits to fn the header for a loop that receives from -// channel x until it fails. -// tk is the channel's element type, or nil if the k result is -// not wanted -// pos is the position of the '=' or ':=' token. -// -func (b *builder) rangeChan(fn *Function, x Value, tk types.Type, source ast.Node) (k Value, loop, done *BasicBlock) { - // - // loop: (target of continue) - // ko = <-x (key, ok) - // ok = extract ko #1 - // if ok goto body else done - // body: - // k = extract ko #0 - // ... - // goto loop - // done: (target of break) - - loop = fn.newBasicBlock("rangechan.loop") - emitJump(fn, loop, source) - fn.currentBlock = loop - retv := emitRecv(fn, x, true, types.NewTuple(newVar("k", x.Type().Underlying().(*types.Chan).Elem()), varOk), source) - body := fn.newBasicBlock("rangechan.body") - done = fn.newBasicBlock("rangechan.done") - emitIf(fn, emitExtract(fn, retv, 1, source), body, done, source) - fn.currentBlock = body - if tk != nil { - k = emitExtract(fn, retv, 0, source) - } - return -} - -// rangeStmt emits to fn code for the range statement s, optionally -// labelled by label. -// -func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock, source ast.Node) { - var tk, tv types.Type - if s.Key != nil && !isBlankIdent(s.Key) { - tk = fn.Pkg.typeOf(s.Key) - } - if s.Value != nil && !isBlankIdent(s.Value) { - tv = fn.Pkg.typeOf(s.Value) - } - - // If iteration variables are defined (:=), this - // occurs once outside the loop. - // - // Unlike a short variable declaration, a RangeStmt - // using := never redeclares an existing variable; it - // always creates a new one. - if s.Tok == token.DEFINE { - if tk != nil { - fn.addLocalForIdent(s.Key.(*ast.Ident)) - } - if tv != nil { - fn.addLocalForIdent(s.Value.(*ast.Ident)) - } - } - - x := b.expr(fn, s.X) - - var k, v Value - var loop, done *BasicBlock - switch rt := x.Type().Underlying().(type) { - case *types.Slice, *types.Array, *types.Pointer: // *array - k, v, loop, done = b.rangeIndexed(fn, x, tv, source) - - case *types.Chan: - k, loop, done = b.rangeChan(fn, x, tk, source) - - case *types.Map, *types.Basic: // string - k, v, loop, done = b.rangeIter(fn, x, tk, tv, source) - - default: - panic("Cannot range over: " + rt.String()) - } - - // Evaluate both LHS expressions before we update either. - var kl, vl lvalue - if tk != nil { - kl = b.addr(fn, s.Key, false) // non-escaping - } - if tv != nil { - vl = b.addr(fn, s.Value, false) // non-escaping - } - if tk != nil { - kl.store(fn, k, s) - } - if tv != nil { - vl.store(fn, v, s) - } - - if label != nil { - label._break = done - label._continue = loop - } - - fn.targets = &targets{ - tail: fn.targets, - _break: done, - _continue: loop, - } - b.stmt(fn, s.Body) - fn.targets = fn.targets.tail - emitJump(fn, loop, source) // back-edge - fn.currentBlock = done -} - -// stmt lowers statement s to IR form, emitting code to fn. -func (b *builder) stmt(fn *Function, _s ast.Stmt) { - // The label of the current statement. If non-nil, its _goto - // target is always set; its _break and _continue are set only - // within the body of switch/typeswitch/select/for/range. - // It is effectively an additional default-nil parameter of stmt(). - var label *lblock -start: - switch s := _s.(type) { - case *ast.EmptyStmt: - // ignore. (Usually removed by gofmt.) - - case *ast.DeclStmt: // Con, Var or Typ - d := s.Decl.(*ast.GenDecl) - if d.Tok == token.VAR { - for _, spec := range d.Specs { - if vs, ok := spec.(*ast.ValueSpec); ok { - b.localValueSpec(fn, vs) - } - } - } - - case *ast.LabeledStmt: - label = fn.labelledBlock(s.Label) - emitJump(fn, label._goto, s) - fn.currentBlock = label._goto - _s = s.Stmt - goto start // effectively: tailcall stmt(fn, s.Stmt, label) - - case *ast.ExprStmt: - b.expr(fn, s.X) - - case *ast.SendStmt: - instr := &Send{ - Chan: b.expr(fn, s.Chan), - X: emitConv(fn, b.expr(fn, s.Value), - fn.Pkg.typeOf(s.Chan).Underlying().(*types.Chan).Elem(), s), - } - fn.emit(instr, s) - - case *ast.IncDecStmt: - op := token.ADD - if s.Tok == token.DEC { - op = token.SUB - } - loc := b.addr(fn, s.X, false) - b.assignOp(fn, loc, emitConst(fn, NewConst(constant.MakeInt64(1), loc.typ())), op, s) - - case *ast.AssignStmt: - switch s.Tok { - case token.ASSIGN, token.DEFINE: - b.assignStmt(fn, s.Lhs, s.Rhs, s.Tok == token.DEFINE, _s) - - default: // +=, etc. - op := s.Tok + token.ADD - token.ADD_ASSIGN - b.assignOp(fn, b.addr(fn, s.Lhs[0], false), b.expr(fn, s.Rhs[0]), op, s) - } - - case *ast.GoStmt: - // The "intrinsics" new/make/len/cap are forbidden here. - // panic is treated like an ordinary function call. - v := Go{} - b.setCall(fn, s.Call, &v.Call) - fn.emit(&v, s) - - case *ast.DeferStmt: - // The "intrinsics" new/make/len/cap are forbidden here. - // panic is treated like an ordinary function call. - v := Defer{} - b.setCall(fn, s.Call, &v.Call) - fn.hasDefer = true - fn.emit(&v, s) - - case *ast.ReturnStmt: - // TODO(dh): we could emit tigher position information by - // using the ith returned expression - - var results []Value - if len(s.Results) == 1 && fn.Signature.Results().Len() > 1 { - // Return of one expression in a multi-valued function. - tuple := b.exprN(fn, s.Results[0]) - ttuple := tuple.Type().(*types.Tuple) - for i, n := 0, ttuple.Len(); i < n; i++ { - results = append(results, - emitConv(fn, emitExtract(fn, tuple, i, s), - fn.Signature.Results().At(i).Type(), s)) - } - } else { - // 1:1 return, or no-arg return in non-void function. - for i, r := range s.Results { - v := emitConv(fn, b.expr(fn, r), fn.Signature.Results().At(i).Type(), s) - results = append(results, v) - } - } - - ret := fn.results() - for i, r := range results { - emitStore(fn, ret[i], r, s) - } - - emitJump(fn, fn.Exit, s) - fn.currentBlock = fn.newBasicBlock("unreachable") - - case *ast.BranchStmt: - var block *BasicBlock - switch s.Tok { - case token.BREAK: - if s.Label != nil { - block = fn.labelledBlock(s.Label)._break - } else { - for t := fn.targets; t != nil && block == nil; t = t.tail { - block = t._break - } - } - - case token.CONTINUE: - if s.Label != nil { - block = fn.labelledBlock(s.Label)._continue - } else { - for t := fn.targets; t != nil && block == nil; t = t.tail { - block = t._continue - } - } - - case token.FALLTHROUGH: - for t := fn.targets; t != nil && block == nil; t = t.tail { - block = t._fallthrough - } - - case token.GOTO: - block = fn.labelledBlock(s.Label)._goto - } - j := emitJump(fn, block, s) - j.Comment = s.Tok.String() - fn.currentBlock = fn.newBasicBlock("unreachable") - - case *ast.BlockStmt: - b.stmtList(fn, s.List) - - case *ast.IfStmt: - if s.Init != nil { - b.stmt(fn, s.Init) - } - then := fn.newBasicBlock("if.then") - done := fn.newBasicBlock("if.done") - els := done - if s.Else != nil { - els = fn.newBasicBlock("if.else") - } - instr := b.cond(fn, s.Cond, then, els) - instr.source = s - fn.currentBlock = then - b.stmt(fn, s.Body) - emitJump(fn, done, s) - - if s.Else != nil { - fn.currentBlock = els - b.stmt(fn, s.Else) - emitJump(fn, done, s) - } - - fn.currentBlock = done - - case *ast.SwitchStmt: - b.switchStmt(fn, s, label) - - case *ast.TypeSwitchStmt: - b.typeSwitchStmt(fn, s, label) - - case *ast.SelectStmt: - if b.selectStmt(fn, s, label) { - // the select has no cases, it blocks forever - fn.currentBlock = fn.newBasicBlock("unreachable") - } - - case *ast.ForStmt: - b.forStmt(fn, s, label) - - case *ast.RangeStmt: - b.rangeStmt(fn, s, label, s) - - default: - panic(fmt.Sprintf("unexpected statement kind: %T", s)) - } -} - -// buildFunction builds IR code for the body of function fn. Idempotent. -func (b *builder) buildFunction(fn *Function) { - if fn.Blocks != nil { - return // building already started - } - - var recvField *ast.FieldList - var body *ast.BlockStmt - var functype *ast.FuncType - switch n := fn.source.(type) { - case nil: - return // not a Go source function. (Synthetic, or from object file.) - case *ast.FuncDecl: - functype = n.Type - recvField = n.Recv - body = n.Body - case *ast.FuncLit: - functype = n.Type - body = n.Body - default: - panic(n) - } - - if fn.Package().Pkg.Path() == "syscall" && fn.Name() == "Exit" { - // syscall.Exit is a stub and the way os.Exit terminates the - // process. Note that there are other functions in the runtime - // that also terminate or unwind that we cannot analyze. - // However, they aren't stubs, so buildExits ends up getting - // called on them, so that's where we handle those special - // cases. - fn.WillExit = true - } - - if body == nil { - // External function. - if fn.Params == nil { - // This condition ensures we add a non-empty - // params list once only, but we may attempt - // the degenerate empty case repeatedly. - // TODO(adonovan): opt: don't do that. - - // We set Function.Params even though there is no body - // code to reference them. This simplifies clients. - if recv := fn.Signature.Recv(); recv != nil { - // XXX synthesize an ast.Node - fn.addParamObj(recv, nil) - } - params := fn.Signature.Params() - for i, n := 0, params.Len(); i < n; i++ { - // XXX synthesize an ast.Node - fn.addParamObj(params.At(i), nil) - } - } - return - } - if fn.Prog.mode&LogSource != 0 { - defer logStack("build function %s @ %s", fn, fn.Prog.Fset.Position(fn.Pos()))() - } - fn.blocksets = b.blocksets - fn.startBody() - fn.createSyntacticParams(recvField, functype) - fn.exitBlock() - b.stmt(fn, body) - if cb := fn.currentBlock; cb != nil && (cb == fn.Blocks[0] || cb.Preds != nil) { - // Control fell off the end of the function's body block. - // - // Block optimizations eliminate the current block, if - // unreachable. It is a builder invariant that - // if this no-arg return is ill-typed for - // fn.Signature.Results, this block must be - // unreachable. The sanity checker checks this. - // fn.emit(new(RunDefers)) - // fn.emit(new(Return)) - emitJump(fn, fn.Exit, nil) - } - optimizeBlocks(fn) - buildFakeExits(fn) - b.buildExits(fn) - b.addUnreachables(fn) - fn.finishBody() - b.blocksets = fn.blocksets - fn.functionBody = nil -} - -// buildFuncDecl builds IR code for the function or method declared -// by decl in package pkg. -// -func (b *builder) buildFuncDecl(pkg *Package, decl *ast.FuncDecl) { - id := decl.Name - if isBlankIdent(id) { - return // discard - } - fn := pkg.values[pkg.info.Defs[id]].(*Function) - if decl.Recv == nil && id.Name == "init" { - var v Call - v.Call.Value = fn - v.setType(types.NewTuple()) - pkg.init.emit(&v, decl) - } - fn.source = decl - b.buildFunction(fn) -} - -// Build calls Package.Build for each package in prog. -// -// Build is intended for whole-program analysis; a typical compiler -// need only build a single package. -// -// Build is idempotent and thread-safe. -// -func (prog *Program) Build() { - for _, p := range prog.packages { - p.Build() - } -} - -// Build builds IR code for all functions and vars in package p. -// -// Precondition: CreatePackage must have been called for all of p's -// direct imports (and hence its direct imports must have been -// error-free). -// -// Build is idempotent and thread-safe. -// -func (p *Package) Build() { p.buildOnce.Do(p.build) } - -func (p *Package) build() { - if p.info == nil { - return // synthetic package, e.g. "testmain" - } - - // Ensure we have runtime type info for all exported members. - // TODO(adonovan): ideally belongs in memberFromObject, but - // that would require package creation in topological order. - for name, mem := range p.Members { - if ast.IsExported(name) { - p.Prog.needMethodsOf(mem.Type()) - } - } - if p.Prog.mode&LogSource != 0 { - defer logStack("build %s", p)() - } - init := p.init - init.startBody() - init.exitBlock() - - var done *BasicBlock - - // Make init() skip if package is already initialized. - initguard := p.Var("init$guard") - doinit := init.newBasicBlock("init.start") - done = init.Exit - emitIf(init, emitLoad(init, initguard, nil), done, doinit, nil) - init.currentBlock = doinit - emitStore(init, initguard, emitConst(init, NewConst(constant.MakeBool(true), tBool)), nil) - - // Call the init() function of each package we import. - for _, pkg := range p.Pkg.Imports() { - prereq := p.Prog.packages[pkg] - if prereq == nil { - panic(fmt.Sprintf("Package(%q).Build(): unsatisfied import: Program.CreatePackage(%q) was not called", p.Pkg.Path(), pkg.Path())) - } - var v Call - v.Call.Value = prereq.init - v.setType(types.NewTuple()) - init.emit(&v, nil) - } - - b := builder{ - printFunc: p.printFunc, - } - - // Initialize package-level vars in correct order. - for _, varinit := range p.info.InitOrder { - if init.Prog.mode&LogSource != 0 { - fmt.Fprintf(os.Stderr, "build global initializer %v @ %s\n", - varinit.Lhs, p.Prog.Fset.Position(varinit.Rhs.Pos())) - } - if len(varinit.Lhs) == 1 { - // 1:1 initialization: var x, y = a(), b() - var lval lvalue - if v := varinit.Lhs[0]; v.Name() != "_" { - lval = &address{addr: p.values[v].(*Global)} - } else { - lval = blank{} - } - // TODO(dh): do emit position information - b.assign(init, lval, varinit.Rhs, true, nil, nil) - } else { - // n:1 initialization: var x, y := f() - tuple := b.exprN(init, varinit.Rhs) - for i, v := range varinit.Lhs { - if v.Name() == "_" { - continue - } - emitStore(init, p.values[v].(*Global), emitExtract(init, tuple, i, nil), nil) - } - } - } - - // Build all package-level functions, init functions - // and methods, including unreachable/blank ones. - // We build them in source order, but it's not significant. - for _, file := range p.files { - for _, decl := range file.Decls { - if decl, ok := decl.(*ast.FuncDecl); ok { - b.buildFuncDecl(p, decl) - } - } - } - - // Finish up init(). - emitJump(init, done, nil) - init.finishBody() - - p.info = nil // We no longer need ASTs or go/types deductions. - - if p.Prog.mode&SanityCheckFunctions != 0 { - sanityCheckPackage(p) - } -} - -// Like ObjectOf, but panics instead of returning nil. -// Only valid during p's create and build phases. -func (p *Package) objectOf(id *ast.Ident) types.Object { - if o := p.info.ObjectOf(id); o != nil { - return o - } - panic(fmt.Sprintf("no types.Object for ast.Ident %s @ %s", - id.Name, p.Prog.Fset.Position(id.Pos()))) -} - -// Like TypeOf, but panics instead of returning nil. -// Only valid during p's create and build phases. -func (p *Package) typeOf(e ast.Expr) types.Type { - if T := p.info.TypeOf(e); T != nil { - return T - } - panic(fmt.Sprintf("no type for %T @ %s", - e, p.Prog.Fset.Position(e.Pos()))) -} diff --git a/vendor/honnef.co/go/tools/ir/const.go b/vendor/honnef.co/go/tools/ir/const.go deleted file mode 100644 index 7cdf006e83a..00000000000 --- a/vendor/honnef.co/go/tools/ir/const.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ir - -// This file defines the Const SSA value type. - -import ( - "fmt" - "go/constant" - "go/types" - "strconv" -) - -// NewConst returns a new constant of the specified value and type. -// val must be valid according to the specification of Const.Value. -// -func NewConst(val constant.Value, typ types.Type) *Const { - return &Const{ - register: register{ - typ: typ, - }, - Value: val, - } -} - -// intConst returns an 'int' constant that evaluates to i. -// (i is an int64 in case the host is narrower than the target.) -func intConst(i int64) *Const { - return NewConst(constant.MakeInt64(i), tInt) -} - -// nilConst returns a nil constant of the specified type, which may -// be any reference type, including interfaces. -// -func nilConst(typ types.Type) *Const { - return NewConst(nil, typ) -} - -// stringConst returns a 'string' constant that evaluates to s. -func stringConst(s string) *Const { - return NewConst(constant.MakeString(s), tString) -} - -// zeroConst returns a new "zero" constant of the specified type, -// which must not be an array or struct type: the zero values of -// aggregates are well-defined but cannot be represented by Const. -// -func zeroConst(t types.Type) *Const { - switch t := t.(type) { - case *types.Basic: - switch { - case t.Info()&types.IsBoolean != 0: - return NewConst(constant.MakeBool(false), t) - case t.Info()&types.IsNumeric != 0: - return NewConst(constant.MakeInt64(0), t) - case t.Info()&types.IsString != 0: - return NewConst(constant.MakeString(""), t) - case t.Kind() == types.UnsafePointer: - fallthrough - case t.Kind() == types.UntypedNil: - return nilConst(t) - default: - panic(fmt.Sprint("zeroConst for unexpected type:", t)) - } - case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature: - return nilConst(t) - case *types.Named: - return NewConst(zeroConst(t.Underlying()).Value, t) - case *types.Array, *types.Struct, *types.Tuple: - panic(fmt.Sprint("zeroConst applied to aggregate:", t)) - } - panic(fmt.Sprint("zeroConst: unexpected ", t)) -} - -func (c *Const) RelString(from *types.Package) string { - var p string - if c.Value == nil { - p = "nil" - } else if c.Value.Kind() == constant.String { - v := constant.StringVal(c.Value) - const max = 20 - // TODO(adonovan): don't cut a rune in half. - if len(v) > max { - v = v[:max-3] + "..." // abbreviate - } - p = strconv.Quote(v) - } else { - p = c.Value.String() - } - return fmt.Sprintf("Const <%s> {%s}", relType(c.Type(), from), p) -} - -func (c *Const) String() string { - return c.RelString(c.Parent().pkg()) -} - -// IsNil returns true if this constant represents a typed or untyped nil value. -func (c *Const) IsNil() bool { - return c.Value == nil -} - -// Int64 returns the numeric value of this constant truncated to fit -// a signed 64-bit integer. -// -func (c *Const) Int64() int64 { - switch x := constant.ToInt(c.Value); x.Kind() { - case constant.Int: - if i, ok := constant.Int64Val(x); ok { - return i - } - return 0 - case constant.Float: - f, _ := constant.Float64Val(x) - return int64(f) - } - panic(fmt.Sprintf("unexpected constant value: %T", c.Value)) -} - -// Uint64 returns the numeric value of this constant truncated to fit -// an unsigned 64-bit integer. -// -func (c *Const) Uint64() uint64 { - switch x := constant.ToInt(c.Value); x.Kind() { - case constant.Int: - if u, ok := constant.Uint64Val(x); ok { - return u - } - return 0 - case constant.Float: - f, _ := constant.Float64Val(x) - return uint64(f) - } - panic(fmt.Sprintf("unexpected constant value: %T", c.Value)) -} - -// Float64 returns the numeric value of this constant truncated to fit -// a float64. -// -func (c *Const) Float64() float64 { - f, _ := constant.Float64Val(c.Value) - return f -} - -// Complex128 returns the complex value of this constant truncated to -// fit a complex128. -// -func (c *Const) Complex128() complex128 { - re, _ := constant.Float64Val(constant.Real(c.Value)) - im, _ := constant.Float64Val(constant.Imag(c.Value)) - return complex(re, im) -} diff --git a/vendor/honnef.co/go/tools/ir/create.go b/vendor/honnef.co/go/tools/ir/create.go deleted file mode 100644 index ff81a244bdc..00000000000 --- a/vendor/honnef.co/go/tools/ir/create.go +++ /dev/null @@ -1,275 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ir - -// This file implements the CREATE phase of IR construction. -// See builder.go for explanation. - -import ( - "fmt" - "go/ast" - "go/token" - "go/types" - "os" - "sync" - - "golang.org/x/tools/go/types/typeutil" -) - -// NewProgram returns a new IR Program. -// -// mode controls diagnostics and checking during IR construction. -// -func NewProgram(fset *token.FileSet, mode BuilderMode) *Program { - prog := &Program{ - Fset: fset, - imported: make(map[string]*Package), - packages: make(map[*types.Package]*Package), - thunks: make(map[selectionKey]*Function), - bounds: make(map[*types.Func]*Function), - mode: mode, - } - - h := typeutil.MakeHasher() // protected by methodsMu, in effect - prog.methodSets.SetHasher(h) - prog.canon.SetHasher(h) - - return prog -} - -// memberFromObject populates package pkg with a member for the -// typechecker object obj. -// -// For objects from Go source code, syntax is the associated syntax -// tree (for funcs and vars only); it will be used during the build -// phase. -// -func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) { - name := obj.Name() - switch obj := obj.(type) { - case *types.Builtin: - if pkg.Pkg != types.Unsafe { - panic("unexpected builtin object: " + obj.String()) - } - - case *types.TypeName: - pkg.Members[name] = &Type{ - object: obj, - pkg: pkg, - } - - case *types.Const: - c := &NamedConst{ - object: obj, - Value: NewConst(obj.Val(), obj.Type()), - pkg: pkg, - } - pkg.values[obj] = c.Value - pkg.Members[name] = c - - case *types.Var: - g := &Global{ - Pkg: pkg, - name: name, - object: obj, - typ: types.NewPointer(obj.Type()), // address - } - pkg.values[obj] = g - pkg.Members[name] = g - - case *types.Func: - sig := obj.Type().(*types.Signature) - if sig.Recv() == nil && name == "init" { - pkg.ninit++ - name = fmt.Sprintf("init#%d", pkg.ninit) - } - fn := &Function{ - name: name, - object: obj, - Signature: sig, - Pkg: pkg, - Prog: pkg.Prog, - } - - fn.source = syntax - fn.initHTML(pkg.printFunc) - if syntax == nil { - fn.Synthetic = "loaded from gc object file" - } else { - fn.functionBody = new(functionBody) - } - - pkg.values[obj] = fn - pkg.Functions = append(pkg.Functions, fn) - if sig.Recv() == nil { - pkg.Members[name] = fn // package-level function - } - - default: // (incl. *types.Package) - panic("unexpected Object type: " + obj.String()) - } -} - -// membersFromDecl populates package pkg with members for each -// typechecker object (var, func, const or type) associated with the -// specified decl. -// -func membersFromDecl(pkg *Package, decl ast.Decl) { - switch decl := decl.(type) { - case *ast.GenDecl: // import, const, type or var - switch decl.Tok { - case token.CONST: - for _, spec := range decl.Specs { - for _, id := range spec.(*ast.ValueSpec).Names { - if !isBlankIdent(id) { - memberFromObject(pkg, pkg.info.Defs[id], nil) - } - } - } - - case token.VAR: - for _, spec := range decl.Specs { - for _, id := range spec.(*ast.ValueSpec).Names { - if !isBlankIdent(id) { - memberFromObject(pkg, pkg.info.Defs[id], spec) - } - } - } - - case token.TYPE: - for _, spec := range decl.Specs { - id := spec.(*ast.TypeSpec).Name - if !isBlankIdent(id) { - memberFromObject(pkg, pkg.info.Defs[id], nil) - } - } - } - - case *ast.FuncDecl: - id := decl.Name - if !isBlankIdent(id) { - memberFromObject(pkg, pkg.info.Defs[id], decl) - } - } -} - -// CreatePackage constructs and returns an IR Package from the -// specified type-checked, error-free file ASTs, and populates its -// Members mapping. -// -// importable determines whether this package should be returned by a -// subsequent call to ImportedPackage(pkg.Path()). -// -// The real work of building IR form for each function is not done -// until a subsequent call to Package.Build(). -// -func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info *types.Info, importable bool) *Package { - p := &Package{ - Prog: prog, - Members: make(map[string]Member), - values: make(map[types.Object]Value), - Pkg: pkg, - info: info, // transient (CREATE and BUILD phases) - files: files, // transient (CREATE and BUILD phases) - printFunc: prog.PrintFunc, - } - - // Add init() function. - p.init = &Function{ - name: "init", - Signature: new(types.Signature), - Synthetic: "package initializer", - Pkg: p, - Prog: prog, - functionBody: new(functionBody), - } - p.init.initHTML(prog.PrintFunc) - p.Members[p.init.name] = p.init - p.Functions = append(p.Functions, p.init) - - // CREATE phase. - // Allocate all package members: vars, funcs, consts and types. - if len(files) > 0 { - // Go source package. - for _, file := range files { - for _, decl := range file.Decls { - membersFromDecl(p, decl) - } - } - } else { - // GC-compiled binary package (or "unsafe") - // No code. - // No position information. - scope := p.Pkg.Scope() - for _, name := range scope.Names() { - obj := scope.Lookup(name) - memberFromObject(p, obj, nil) - if obj, ok := obj.(*types.TypeName); ok { - if named, ok := obj.Type().(*types.Named); ok { - for i, n := 0, named.NumMethods(); i < n; i++ { - memberFromObject(p, named.Method(i), nil) - } - } - } - } - } - - // Add initializer guard variable. - initguard := &Global{ - Pkg: p, - name: "init$guard", - typ: types.NewPointer(tBool), - } - p.Members[initguard.Name()] = initguard - - if prog.mode&GlobalDebug != 0 { - p.SetDebugMode(true) - } - - if prog.mode&PrintPackages != 0 { - printMu.Lock() - p.WriteTo(os.Stdout) - printMu.Unlock() - } - - if importable { - prog.imported[p.Pkg.Path()] = p - } - prog.packages[p.Pkg] = p - - return p -} - -// printMu serializes printing of Packages/Functions to stdout. -var printMu sync.Mutex - -// AllPackages returns a new slice containing all packages in the -// program prog in unspecified order. -// -func (prog *Program) AllPackages() []*Package { - pkgs := make([]*Package, 0, len(prog.packages)) - for _, pkg := range prog.packages { - pkgs = append(pkgs, pkg) - } - return pkgs -} - -// ImportedPackage returns the importable Package whose PkgPath -// is path, or nil if no such Package has been created. -// -// A parameter to CreatePackage determines whether a package should be -// considered importable. For example, no import declaration can resolve -// to the ad-hoc main package created by 'go build foo.go'. -// -// TODO(adonovan): rethink this function and the "importable" concept; -// most packages are importable. This function assumes that all -// types.Package.Path values are unique within the ir.Program, which is -// false---yet this function remains very convenient. -// Clients should use (*Program).Package instead where possible. -// IR doesn't really need a string-keyed map of packages. -// -func (prog *Program) ImportedPackage(path string) *Package { - return prog.imported[path] -} diff --git a/vendor/honnef.co/go/tools/ir/doc.go b/vendor/honnef.co/go/tools/ir/doc.go deleted file mode 100644 index a5f42e4f47d..00000000000 --- a/vendor/honnef.co/go/tools/ir/doc.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ir defines a representation of the elements of Go programs -// (packages, types, functions, variables and constants) using a -// static single-information (SSI) form intermediate representation -// (IR) for the bodies of functions. -// -// THIS INTERFACE IS EXPERIMENTAL AND IS LIKELY TO CHANGE. -// -// For an introduction to SSA form, upon which SSI builds, see -// http://en.wikipedia.org/wiki/Static_single_assignment_form. -// This page provides a broader reading list: -// http://www.dcs.gla.ac.uk/~jsinger/ssa.html. -// -// For an introduction to SSI form, see The static single information -// form by C. Scott Ananian. -// -// The level of abstraction of the IR form is intentionally close to -// the source language to facilitate construction of source analysis -// tools. It is not intended for machine code generation. -// -// The simplest way to create the IR of a package is -// to load typed syntax trees using golang.org/x/tools/go/packages, then -// invoke the irutil.Packages helper function. See ExampleLoadPackages -// and ExampleWholeProgram for examples. -// The resulting ir.Program contains all the packages and their -// members, but IR code is not created for function bodies until a -// subsequent call to (*Package).Build or (*Program).Build. -// -// The builder initially builds a naive IR form in which all local -// variables are addresses of stack locations with explicit loads and -// stores. Registerisation of eligible locals and φ-node insertion -// using dominance and dataflow are then performed as a second pass -// called "lifting" to improve the accuracy and performance of -// subsequent analyses; this pass can be skipped by setting the -// NaiveForm builder flag. -// -// The primary interfaces of this package are: -// -// - Member: a named member of a Go package. -// - Value: an expression that yields a value. -// - Instruction: a statement that consumes values and performs computation. -// - Node: a Value or Instruction (emphasizing its membership in the IR value graph) -// -// A computation that yields a result implements both the Value and -// Instruction interfaces. The following table shows for each -// concrete type which of these interfaces it implements. -// -// Value? Instruction? Member? -// *Alloc ✔ ✔ -// *BinOp ✔ ✔ -// *BlankStore ✔ -// *Builtin ✔ -// *Call ✔ ✔ -// *ChangeInterface ✔ ✔ -// *ChangeType ✔ ✔ -// *Const ✔ ✔ -// *Convert ✔ ✔ -// *DebugRef ✔ -// *Defer ✔ ✔ -// *Extract ✔ ✔ -// *Field ✔ ✔ -// *FieldAddr ✔ ✔ -// *FreeVar ✔ -// *Function ✔ ✔ (func) -// *Global ✔ ✔ (var) -// *Go ✔ ✔ -// *If ✔ -// *Index ✔ ✔ -// *IndexAddr ✔ ✔ -// *Jump ✔ -// *Load ✔ ✔ -// *MakeChan ✔ ✔ -// *MakeClosure ✔ ✔ -// *MakeInterface ✔ ✔ -// *MakeMap ✔ ✔ -// *MakeSlice ✔ ✔ -// *MapLookup ✔ ✔ -// *MapUpdate ✔ ✔ -// *NamedConst ✔ (const) -// *Next ✔ ✔ -// *Panic ✔ -// *Parameter ✔ ✔ -// *Phi ✔ ✔ -// *Range ✔ ✔ -// *Recv ✔ ✔ -// *Return ✔ -// *RunDefers ✔ -// *Select ✔ ✔ -// *Send ✔ ✔ -// *Sigma ✔ ✔ -// *Slice ✔ ✔ -// *Store ✔ ✔ -// *StringLookup ✔ ✔ -// *Type ✔ (type) -// *TypeAssert ✔ ✔ -// *UnOp ✔ ✔ -// *Unreachable ✔ -// -// Other key types in this package include: Program, Package, Function -// and BasicBlock. -// -// The program representation constructed by this package is fully -// resolved internally, i.e. it does not rely on the names of Values, -// Packages, Functions, Types or BasicBlocks for the correct -// interpretation of the program. Only the identities of objects and -// the topology of the IR and type graphs are semantically -// significant. (There is one exception: Ids, used to identify field -// and method names, contain strings.) Avoidance of name-based -// operations simplifies the implementation of subsequent passes and -// can make them very efficient. Many objects are nonetheless named -// to aid in debugging, but it is not essential that the names be -// either accurate or unambiguous. The public API exposes a number of -// name-based maps for client convenience. -// -// The ir/irutil package provides various utilities that depend only -// on the public API of this package. -// -// TODO(adonovan): Consider the exceptional control-flow implications -// of defer and recover(). -// -// TODO(adonovan): write a how-to document for all the various cases -// of trying to determine corresponding elements across the four -// domains of source locations, ast.Nodes, types.Objects, -// ir.Values/Instructions. -// -package ir // import "honnef.co/go/tools/ir" diff --git a/vendor/honnef.co/go/tools/ir/dom.go b/vendor/honnef.co/go/tools/ir/dom.go deleted file mode 100644 index 08c147df9b9..00000000000 --- a/vendor/honnef.co/go/tools/ir/dom.go +++ /dev/null @@ -1,461 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ir - -// This file defines algorithms related to dominance. - -// Dominator tree construction ---------------------------------------- -// -// We use the algorithm described in Lengauer & Tarjan. 1979. A fast -// algorithm for finding dominators in a flowgraph. -// http://doi.acm.org/10.1145/357062.357071 -// -// We also apply the optimizations to SLT described in Georgiadis et -// al, Finding Dominators in Practice, JGAA 2006, -// http://jgaa.info/accepted/2006/GeorgiadisTarjanWerneck2006.10.1.pdf -// to avoid the need for buckets of size > 1. - -import ( - "bytes" - "fmt" - "io" - "math/big" - "os" - "sort" -) - -// Idom returns the block that immediately dominates b: -// its parent in the dominator tree, if any. -// The entry node (b.Index==0) does not have a parent. -// -func (b *BasicBlock) Idom() *BasicBlock { return b.dom.idom } - -// Dominees returns the list of blocks that b immediately dominates: -// its children in the dominator tree. -// -func (b *BasicBlock) Dominees() []*BasicBlock { return b.dom.children } - -// Dominates reports whether b dominates c. -func (b *BasicBlock) Dominates(c *BasicBlock) bool { - return b.dom.pre <= c.dom.pre && c.dom.post <= b.dom.post -} - -type byDomPreorder []*BasicBlock - -func (a byDomPreorder) Len() int { return len(a) } -func (a byDomPreorder) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byDomPreorder) Less(i, j int) bool { return a[i].dom.pre < a[j].dom.pre } - -// DomPreorder returns a new slice containing the blocks of f in -// dominator tree preorder. -// -func (f *Function) DomPreorder() []*BasicBlock { - n := len(f.Blocks) - order := make(byDomPreorder, n) - copy(order, f.Blocks) - sort.Sort(order) - return order -} - -// domInfo contains a BasicBlock's dominance information. -type domInfo struct { - idom *BasicBlock // immediate dominator (parent in domtree) - children []*BasicBlock // nodes immediately dominated by this one - pre, post int32 // pre- and post-order numbering within domtree -} - -// buildDomTree computes the dominator tree of f using the LT algorithm. -// Precondition: all blocks are reachable (e.g. optimizeBlocks has been run). -// -func buildDomTree(fn *Function) { - // The step numbers refer to the original LT paper; the - // reordering is due to Georgiadis. - - // Clear any previous domInfo. - for _, b := range fn.Blocks { - b.dom = domInfo{} - } - - idoms := make([]*BasicBlock, len(fn.Blocks)) - - order := make([]*BasicBlock, 0, len(fn.Blocks)) - seen := fn.blockset(0) - var dfs func(b *BasicBlock) - dfs = func(b *BasicBlock) { - if !seen.Add(b) { - return - } - for _, succ := range b.Succs { - dfs(succ) - } - if fn.fakeExits.Has(b) { - dfs(fn.Exit) - } - order = append(order, b) - b.post = len(order) - 1 - } - dfs(fn.Blocks[0]) - - for i := 0; i < len(order)/2; i++ { - o := len(order) - i - 1 - order[i], order[o] = order[o], order[i] - } - - idoms[fn.Blocks[0].Index] = fn.Blocks[0] - changed := true - for changed { - changed = false - // iterate over all nodes in reverse postorder, except for the - // entry node - for _, b := range order[1:] { - var newIdom *BasicBlock - do := func(p *BasicBlock) { - if idoms[p.Index] == nil { - return - } - if newIdom == nil { - newIdom = p - } else { - finger1 := p - finger2 := newIdom - for finger1 != finger2 { - for finger1.post < finger2.post { - finger1 = idoms[finger1.Index] - } - for finger2.post < finger1.post { - finger2 = idoms[finger2.Index] - } - } - newIdom = finger1 - } - } - for _, p := range b.Preds { - do(p) - } - if b == fn.Exit { - for _, p := range fn.Blocks { - if fn.fakeExits.Has(p) { - do(p) - } - } - } - - if idoms[b.Index] != newIdom { - idoms[b.Index] = newIdom - changed = true - } - } - } - - for i, b := range idoms { - fn.Blocks[i].dom.idom = b - if b == nil { - // malformed CFG - continue - } - if i == b.Index { - continue - } - b.dom.children = append(b.dom.children, fn.Blocks[i]) - } - - numberDomTree(fn.Blocks[0], 0, 0) - - // printDomTreeDot(os.Stderr, fn) // debugging - // printDomTreeText(os.Stderr, root, 0) // debugging - - if fn.Prog.mode&SanityCheckFunctions != 0 { - sanityCheckDomTree(fn) - } -} - -// buildPostDomTree is like buildDomTree, but builds the post-dominator tree instead. -func buildPostDomTree(fn *Function) { - // The step numbers refer to the original LT paper; the - // reordering is due to Georgiadis. - - // Clear any previous domInfo. - for _, b := range fn.Blocks { - b.pdom = domInfo{} - } - - idoms := make([]*BasicBlock, len(fn.Blocks)) - - order := make([]*BasicBlock, 0, len(fn.Blocks)) - seen := fn.blockset(0) - var dfs func(b *BasicBlock) - dfs = func(b *BasicBlock) { - if !seen.Add(b) { - return - } - for _, pred := range b.Preds { - dfs(pred) - } - if b == fn.Exit { - for _, p := range fn.Blocks { - if fn.fakeExits.Has(p) { - dfs(p) - } - } - } - order = append(order, b) - b.post = len(order) - 1 - } - dfs(fn.Exit) - - for i := 0; i < len(order)/2; i++ { - o := len(order) - i - 1 - order[i], order[o] = order[o], order[i] - } - - idoms[fn.Exit.Index] = fn.Exit - changed := true - for changed { - changed = false - // iterate over all nodes in reverse postorder, except for the - // exit node - for _, b := range order[1:] { - var newIdom *BasicBlock - do := func(p *BasicBlock) { - if idoms[p.Index] == nil { - return - } - if newIdom == nil { - newIdom = p - } else { - finger1 := p - finger2 := newIdom - for finger1 != finger2 { - for finger1.post < finger2.post { - finger1 = idoms[finger1.Index] - } - for finger2.post < finger1.post { - finger2 = idoms[finger2.Index] - } - } - newIdom = finger1 - } - } - for _, p := range b.Succs { - do(p) - } - if fn.fakeExits.Has(b) { - do(fn.Exit) - } - - if idoms[b.Index] != newIdom { - idoms[b.Index] = newIdom - changed = true - } - } - } - - for i, b := range idoms { - fn.Blocks[i].pdom.idom = b - if b == nil { - // malformed CFG - continue - } - if i == b.Index { - continue - } - b.pdom.children = append(b.pdom.children, fn.Blocks[i]) - } - - numberPostDomTree(fn.Exit, 0, 0) - - // printPostDomTreeDot(os.Stderr, fn) // debugging - // printPostDomTreeText(os.Stderr, fn.Exit, 0) // debugging - - if fn.Prog.mode&SanityCheckFunctions != 0 { // XXX - sanityCheckDomTree(fn) // XXX - } -} - -// numberDomTree sets the pre- and post-order numbers of a depth-first -// traversal of the dominator tree rooted at v. These are used to -// answer dominance queries in constant time. -// -func numberDomTree(v *BasicBlock, pre, post int32) (int32, int32) { - v.dom.pre = pre - pre++ - for _, child := range v.dom.children { - pre, post = numberDomTree(child, pre, post) - } - v.dom.post = post - post++ - return pre, post -} - -// numberPostDomTree sets the pre- and post-order numbers of a depth-first -// traversal of the post-dominator tree rooted at v. These are used to -// answer post-dominance queries in constant time. -// -func numberPostDomTree(v *BasicBlock, pre, post int32) (int32, int32) { - v.pdom.pre = pre - pre++ - for _, child := range v.pdom.children { - pre, post = numberPostDomTree(child, pre, post) - } - v.pdom.post = post - post++ - return pre, post -} - -// Testing utilities ---------------------------------------- - -// sanityCheckDomTree checks the correctness of the dominator tree -// computed by the LT algorithm by comparing against the dominance -// relation computed by a naive Kildall-style forward dataflow -// analysis (Algorithm 10.16 from the "Dragon" book). -// -func sanityCheckDomTree(f *Function) { - n := len(f.Blocks) - - // D[i] is the set of blocks that dominate f.Blocks[i], - // represented as a bit-set of block indices. - D := make([]big.Int, n) - - one := big.NewInt(1) - - // all is the set of all blocks; constant. - var all big.Int - all.Set(one).Lsh(&all, uint(n)).Sub(&all, one) - - // Initialization. - for i := range f.Blocks { - if i == 0 { - // A root is dominated only by itself. - D[i].SetBit(&D[0], 0, 1) - } else { - // All other blocks are (initially) dominated - // by every block. - D[i].Set(&all) - } - } - - // Iteration until fixed point. - for changed := true; changed; { - changed = false - for i, b := range f.Blocks { - if i == 0 { - continue - } - // Compute intersection across predecessors. - var x big.Int - x.Set(&all) - for _, pred := range b.Preds { - x.And(&x, &D[pred.Index]) - } - if b == f.Exit { - for _, p := range f.Blocks { - if f.fakeExits.Has(p) { - x.And(&x, &D[p.Index]) - } - } - } - x.SetBit(&x, i, 1) // a block always dominates itself. - if D[i].Cmp(&x) != 0 { - D[i].Set(&x) - changed = true - } - } - } - - // Check the entire relation. O(n^2). - ok := true - for i := 0; i < n; i++ { - for j := 0; j < n; j++ { - b, c := f.Blocks[i], f.Blocks[j] - actual := b.Dominates(c) - expected := D[j].Bit(i) == 1 - if actual != expected { - fmt.Fprintf(os.Stderr, "dominates(%s, %s)==%t, want %t\n", b, c, actual, expected) - ok = false - } - } - } - - preorder := f.DomPreorder() - for _, b := range f.Blocks { - if got := preorder[b.dom.pre]; got != b { - fmt.Fprintf(os.Stderr, "preorder[%d]==%s, want %s\n", b.dom.pre, got, b) - ok = false - } - } - - if !ok { - panic("sanityCheckDomTree failed for " + f.String()) - } - -} - -// Printing functions ---------------------------------------- - -// printDomTree prints the dominator tree as text, using indentation. -//lint:ignore U1000 used during debugging -func printDomTreeText(buf *bytes.Buffer, v *BasicBlock, indent int) { - fmt.Fprintf(buf, "%*s%s\n", 4*indent, "", v) - for _, child := range v.dom.children { - printDomTreeText(buf, child, indent+1) - } -} - -// printDomTreeDot prints the dominator tree of f in AT&T GraphViz -// (.dot) format. -//lint:ignore U1000 used during debugging -func printDomTreeDot(buf io.Writer, f *Function) { - fmt.Fprintln(buf, "//", f) - fmt.Fprintln(buf, "digraph domtree {") - for i, b := range f.Blocks { - v := b.dom - fmt.Fprintf(buf, "\tn%d [label=\"%s (%d, %d)\",shape=\"rectangle\"];\n", v.pre, b, v.pre, v.post) - // TODO(adonovan): improve appearance of edges - // belonging to both dominator tree and CFG. - - // Dominator tree edge. - if i != 0 { - fmt.Fprintf(buf, "\tn%d -> n%d [style=\"solid\",weight=100];\n", v.idom.dom.pre, v.pre) - } - // CFG edges. - for _, pred := range b.Preds { - fmt.Fprintf(buf, "\tn%d -> n%d [style=\"dotted\",weight=0];\n", pred.dom.pre, v.pre) - } - } - fmt.Fprintln(buf, "}") -} - -// printDomTree prints the dominator tree as text, using indentation. -//lint:ignore U1000 used during debugging -func printPostDomTreeText(buf io.Writer, v *BasicBlock, indent int) { - fmt.Fprintf(buf, "%*s%s\n", 4*indent, "", v) - for _, child := range v.pdom.children { - printPostDomTreeText(buf, child, indent+1) - } -} - -// printDomTreeDot prints the dominator tree of f in AT&T GraphViz -// (.dot) format. -//lint:ignore U1000 used during debugging -func printPostDomTreeDot(buf io.Writer, f *Function) { - fmt.Fprintln(buf, "//", f) - fmt.Fprintln(buf, "digraph pdomtree {") - for _, b := range f.Blocks { - v := b.pdom - fmt.Fprintf(buf, "\tn%d [label=\"%s (%d, %d)\",shape=\"rectangle\"];\n", v.pre, b, v.pre, v.post) - // TODO(adonovan): improve appearance of edges - // belonging to both dominator tree and CFG. - - // Dominator tree edge. - if b != f.Exit { - fmt.Fprintf(buf, "\tn%d -> n%d [style=\"solid\",weight=100];\n", v.idom.pdom.pre, v.pre) - } - // CFG edges. - for _, pred := range b.Preds { - fmt.Fprintf(buf, "\tn%d -> n%d [style=\"dotted\",weight=0];\n", pred.pdom.pre, v.pre) - } - } - fmt.Fprintln(buf, "}") -} diff --git a/vendor/honnef.co/go/tools/ir/emit.go b/vendor/honnef.co/go/tools/ir/emit.go deleted file mode 100644 index 5fa137af9ec..00000000000 --- a/vendor/honnef.co/go/tools/ir/emit.go +++ /dev/null @@ -1,450 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ir - -// Helpers for emitting IR instructions. - -import ( - "fmt" - "go/ast" - "go/constant" - "go/token" - "go/types" -) - -// emitNew emits to f a new (heap Alloc) instruction allocating an -// object of type typ. pos is the optional source location. -// -func emitNew(f *Function, typ types.Type, source ast.Node) *Alloc { - v := &Alloc{Heap: true} - v.setType(types.NewPointer(typ)) - f.emit(v, source) - return v -} - -// emitLoad emits to f an instruction to load the address addr into a -// new temporary, and returns the value so defined. -// -func emitLoad(f *Function, addr Value, source ast.Node) *Load { - v := &Load{X: addr} - v.setType(deref(addr.Type())) - f.emit(v, source) - return v -} - -func emitRecv(f *Function, ch Value, commaOk bool, typ types.Type, source ast.Node) Value { - recv := &Recv{ - Chan: ch, - CommaOk: commaOk, - } - recv.setType(typ) - return f.emit(recv, source) -} - -// emitDebugRef emits to f a DebugRef pseudo-instruction associating -// expression e with value v. -// -func emitDebugRef(f *Function, e ast.Expr, v Value, isAddr bool) { - if !f.debugInfo() { - return // debugging not enabled - } - if v == nil || e == nil { - panic("nil") - } - var obj types.Object - e = unparen(e) - if id, ok := e.(*ast.Ident); ok { - if isBlankIdent(id) { - return - } - obj = f.Pkg.objectOf(id) - switch obj.(type) { - case *types.Nil, *types.Const, *types.Builtin: - return - } - } - f.emit(&DebugRef{ - X: v, - Expr: e, - IsAddr: isAddr, - object: obj, - }, nil) -} - -// emitArith emits to f code to compute the binary operation op(x, y) -// where op is an eager shift, logical or arithmetic operation. -// (Use emitCompare() for comparisons and Builder.logicalBinop() for -// non-eager operations.) -// -func emitArith(f *Function, op token.Token, x, y Value, t types.Type, source ast.Node) Value { - switch op { - case token.SHL, token.SHR: - x = emitConv(f, x, t, source) - // y may be signed or an 'untyped' constant. - // TODO(adonovan): whence signed values? - if b, ok := y.Type().Underlying().(*types.Basic); ok && b.Info()&types.IsUnsigned == 0 { - y = emitConv(f, y, types.Typ[types.Uint64], source) - } - - case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, token.AND, token.OR, token.XOR, token.AND_NOT: - x = emitConv(f, x, t, source) - y = emitConv(f, y, t, source) - - default: - panic("illegal op in emitArith: " + op.String()) - - } - v := &BinOp{ - Op: op, - X: x, - Y: y, - } - v.setType(t) - return f.emit(v, source) -} - -// emitCompare emits to f code compute the boolean result of -// comparison comparison 'x op y'. -// -func emitCompare(f *Function, op token.Token, x, y Value, source ast.Node) Value { - xt := x.Type().Underlying() - yt := y.Type().Underlying() - - // Special case to optimise a tagless SwitchStmt so that - // these are equivalent - // switch { case e: ...} - // switch true { case e: ... } - // if e==true { ... } - // even in the case when e's type is an interface. - // TODO(adonovan): opt: generalise to x==true, false!=y, etc. - if x, ok := x.(*Const); ok && op == token.EQL && x.Value != nil && x.Value.Kind() == constant.Bool && constant.BoolVal(x.Value) { - if yt, ok := yt.(*types.Basic); ok && yt.Info()&types.IsBoolean != 0 { - return y - } - } - - if types.Identical(xt, yt) { - // no conversion necessary - } else if _, ok := xt.(*types.Interface); ok { - y = emitConv(f, y, x.Type(), source) - } else if _, ok := yt.(*types.Interface); ok { - x = emitConv(f, x, y.Type(), source) - } else if _, ok := x.(*Const); ok { - x = emitConv(f, x, y.Type(), source) - } else if _, ok := y.(*Const); ok { - y = emitConv(f, y, x.Type(), source) - //lint:ignore SA9003 no-op - } else { - // other cases, e.g. channels. No-op. - } - - v := &BinOp{ - Op: op, - X: x, - Y: y, - } - v.setType(tBool) - return f.emit(v, source) -} - -// isValuePreserving returns true if a conversion from ut_src to -// ut_dst is value-preserving, i.e. just a change of type. -// Precondition: neither argument is a named type. -// -func isValuePreserving(ut_src, ut_dst types.Type) bool { - // Identical underlying types? - if structTypesIdentical(ut_dst, ut_src) { - return true - } - - switch ut_dst.(type) { - case *types.Chan: - // Conversion between channel types? - _, ok := ut_src.(*types.Chan) - return ok - - case *types.Pointer: - // Conversion between pointers with identical base types? - _, ok := ut_src.(*types.Pointer) - return ok - } - return false -} - -// emitConv emits to f code to convert Value val to exactly type typ, -// and returns the converted value. Implicit conversions are required -// by language assignability rules in assignments, parameter passing, -// etc. Conversions cannot fail dynamically. -// -func emitConv(f *Function, val Value, typ types.Type, source ast.Node) Value { - t_src := val.Type() - - // Identical types? Conversion is a no-op. - if types.Identical(t_src, typ) { - return val - } - - ut_dst := typ.Underlying() - ut_src := t_src.Underlying() - - // Just a change of type, but not value or representation? - if isValuePreserving(ut_src, ut_dst) { - c := &ChangeType{X: val} - c.setType(typ) - return f.emit(c, source) - } - - // Conversion to, or construction of a value of, an interface type? - if _, ok := ut_dst.(*types.Interface); ok { - // Assignment from one interface type to another? - if _, ok := ut_src.(*types.Interface); ok { - c := &ChangeInterface{X: val} - c.setType(typ) - return f.emit(c, source) - } - - // Untyped nil constant? Return interface-typed nil constant. - if ut_src == tUntypedNil { - return emitConst(f, nilConst(typ)) - } - - // Convert (non-nil) "untyped" literals to their default type. - if t, ok := ut_src.(*types.Basic); ok && t.Info()&types.IsUntyped != 0 { - val = emitConv(f, val, types.Default(ut_src), source) - } - - f.Pkg.Prog.needMethodsOf(val.Type()) - mi := &MakeInterface{X: val} - mi.setType(typ) - return f.emit(mi, source) - } - - // Conversion of a compile-time constant value? - if c, ok := val.(*Const); ok { - if _, ok := ut_dst.(*types.Basic); ok || c.IsNil() { - // Conversion of a compile-time constant to - // another constant type results in a new - // constant of the destination type and - // (initially) the same abstract value. - // We don't truncate the value yet. - return emitConst(f, NewConst(c.Value, typ)) - } - - // We're converting from constant to non-constant type, - // e.g. string -> []byte/[]rune. - } - - // A representation-changing conversion? - // At least one of {ut_src,ut_dst} must be *Basic. - // (The other may be []byte or []rune.) - _, ok1 := ut_src.(*types.Basic) - _, ok2 := ut_dst.(*types.Basic) - if ok1 || ok2 { - c := &Convert{X: val} - c.setType(typ) - return f.emit(c, source) - } - - panic(fmt.Sprintf("in %s: cannot convert %s (%s) to %s", f, val, val.Type(), typ)) -} - -// emitStore emits to f an instruction to store value val at location -// addr, applying implicit conversions as required by assignability rules. -// -func emitStore(f *Function, addr, val Value, source ast.Node) *Store { - s := &Store{ - Addr: addr, - Val: emitConv(f, val, deref(addr.Type()), source), - } - // make sure we call getMem after the call to emitConv, which may - // itself update the memory state - f.emit(s, source) - return s -} - -// emitJump emits to f a jump to target, and updates the control-flow graph. -// Postcondition: f.currentBlock is nil. -// -func emitJump(f *Function, target *BasicBlock, source ast.Node) *Jump { - b := f.currentBlock - j := new(Jump) - b.emit(j, source) - addEdge(b, target) - f.currentBlock = nil - return j -} - -// emitIf emits to f a conditional jump to tblock or fblock based on -// cond, and updates the control-flow graph. -// Postcondition: f.currentBlock is nil. -// -func emitIf(f *Function, cond Value, tblock, fblock *BasicBlock, source ast.Node) *If { - b := f.currentBlock - stmt := &If{Cond: cond} - b.emit(stmt, source) - addEdge(b, tblock) - addEdge(b, fblock) - f.currentBlock = nil - return stmt -} - -// emitExtract emits to f an instruction to extract the index'th -// component of tuple. It returns the extracted value. -// -func emitExtract(f *Function, tuple Value, index int, source ast.Node) Value { - e := &Extract{Tuple: tuple, Index: index} - e.setType(tuple.Type().(*types.Tuple).At(index).Type()) - return f.emit(e, source) -} - -// emitTypeAssert emits to f a type assertion value := x.(t) and -// returns the value. x.Type() must be an interface. -// -func emitTypeAssert(f *Function, x Value, t types.Type, source ast.Node) Value { - a := &TypeAssert{X: x, AssertedType: t} - a.setType(t) - return f.emit(a, source) -} - -// emitTypeTest emits to f a type test value,ok := x.(t) and returns -// a (value, ok) tuple. x.Type() must be an interface. -// -func emitTypeTest(f *Function, x Value, t types.Type, source ast.Node) Value { - a := &TypeAssert{ - X: x, - AssertedType: t, - CommaOk: true, - } - a.setType(types.NewTuple( - newVar("value", t), - varOk, - )) - return f.emit(a, source) -} - -// emitTailCall emits to f a function call in tail position. The -// caller is responsible for all fields of 'call' except its type. -// Intended for wrapper methods. -// Precondition: f does/will not use deferred procedure calls. -// Postcondition: f.currentBlock is nil. -// -func emitTailCall(f *Function, call *Call, source ast.Node) { - tresults := f.Signature.Results() - nr := tresults.Len() - if nr == 1 { - call.typ = tresults.At(0).Type() - } else { - call.typ = tresults - } - tuple := f.emit(call, source) - var ret Return - switch nr { - case 0: - // no-op - case 1: - ret.Results = []Value{tuple} - default: - for i := 0; i < nr; i++ { - v := emitExtract(f, tuple, i, source) - // TODO(adonovan): in principle, this is required: - // v = emitConv(f, o.Type, f.Signature.Results[i].Type) - // but in practice emitTailCall is only used when - // the types exactly match. - ret.Results = append(ret.Results, v) - } - } - - f.Exit = f.newBasicBlock("exit") - emitJump(f, f.Exit, source) - f.currentBlock = f.Exit - f.emit(&ret, source) - f.currentBlock = nil -} - -// emitImplicitSelections emits to f code to apply the sequence of -// implicit field selections specified by indices to base value v, and -// returns the selected value. -// -// If v is the address of a struct, the result will be the address of -// a field; if it is the value of a struct, the result will be the -// value of a field. -// -func emitImplicitSelections(f *Function, v Value, indices []int, source ast.Node) Value { - for _, index := range indices { - fld := deref(v.Type()).Underlying().(*types.Struct).Field(index) - - if isPointer(v.Type()) { - instr := &FieldAddr{ - X: v, - Field: index, - } - instr.setType(types.NewPointer(fld.Type())) - v = f.emit(instr, source) - // Load the field's value iff indirectly embedded. - if isPointer(fld.Type()) { - v = emitLoad(f, v, source) - } - } else { - instr := &Field{ - X: v, - Field: index, - } - instr.setType(fld.Type()) - v = f.emit(instr, source) - } - } - return v -} - -// emitFieldSelection emits to f code to select the index'th field of v. -// -// If wantAddr, the input must be a pointer-to-struct and the result -// will be the field's address; otherwise the result will be the -// field's value. -// Ident id is used for position and debug info. -// -func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast.Ident) Value { - fld := deref(v.Type()).Underlying().(*types.Struct).Field(index) - if isPointer(v.Type()) { - instr := &FieldAddr{ - X: v, - Field: index, - } - instr.setSource(id) - instr.setType(types.NewPointer(fld.Type())) - v = f.emit(instr, id) - // Load the field's value iff we don't want its address. - if !wantAddr { - v = emitLoad(f, v, id) - } - } else { - instr := &Field{ - X: v, - Field: index, - } - instr.setSource(id) - instr.setType(fld.Type()) - v = f.emit(instr, id) - } - emitDebugRef(f, id, v, wantAddr) - return v -} - -// zeroValue emits to f code to produce a zero value of type t, -// and returns it. -// -func zeroValue(f *Function, t types.Type, source ast.Node) Value { - switch t.Underlying().(type) { - case *types.Struct, *types.Array: - return emitLoad(f, f.addLocal(t, source), source) - default: - return emitConst(f, zeroConst(t)) - } -} - -func emitConst(f *Function, c *Const) *Const { - f.consts = append(f.consts, c) - return c -} diff --git a/vendor/honnef.co/go/tools/ir/exits.go b/vendor/honnef.co/go/tools/ir/exits.go deleted file mode 100644 index 10cda7bb66e..00000000000 --- a/vendor/honnef.co/go/tools/ir/exits.go +++ /dev/null @@ -1,271 +0,0 @@ -package ir - -import ( - "go/types" -) - -func (b *builder) buildExits(fn *Function) { - if obj := fn.Object(); obj != nil { - switch obj.Pkg().Path() { - case "runtime": - switch obj.Name() { - case "exit": - fn.WillExit = true - return - case "throw": - fn.WillExit = true - return - case "Goexit": - fn.WillUnwind = true - return - } - case "github.com/sirupsen/logrus": - switch obj.(*types.Func).FullName() { - case "(*github.com/sirupsen/logrus.Logger).Exit": - // Technically, this method does not unconditionally exit - // the process. It dynamically calls a function stored in - // the logger. If the function is nil, it defaults to - // os.Exit. - // - // The main intent of this method is to terminate the - // process, and that's what the vast majority of people - // will use it for. We'll happily accept some false - // negatives to avoid a lot of false positives. - fn.WillExit = true - return - case "(*github.com/sirupsen/logrus.Logger).Panic", - "(*github.com/sirupsen/logrus.Logger).Panicf", - "(*github.com/sirupsen/logrus.Logger).Panicln": - - // These methods will always panic, but that's not - // statically known from the code alone, because they - // take a detour through the generic Log methods. - fn.WillUnwind = true - return - case "(*github.com/sirupsen/logrus.Entry).Panicf", - "(*github.com/sirupsen/logrus.Entry).Panicln": - - // Entry.Panic has an explicit panic, but Panicf and - // Panicln do not, relying fully on the generic Log - // method. - fn.WillUnwind = true - return - case "(*github.com/sirupsen/logrus.Logger).Log", - "(*github.com/sirupsen/logrus.Logger).Logf", - "(*github.com/sirupsen/logrus.Logger).Logln": - // TODO(dh): we cannot handle these case. Whether they - // exit or unwind depends on the level, which is set - // via the first argument. We don't currently support - // call-site-specific exit information. - } - } - } - - buildDomTree(fn) - - isRecoverCall := func(instr Instruction) bool { - if instr, ok := instr.(*Call); ok { - if builtin, ok := instr.Call.Value.(*Builtin); ok { - if builtin.Name() == "recover" { - return true - } - } - } - return false - } - - // All panics branch to the exit block, which means that if every - // possible path through the function panics, then all - // predecessors of the exit block must panic. - willPanic := true - for _, pred := range fn.Exit.Preds { - if _, ok := pred.Control().(*Panic); !ok { - willPanic = false - } - } - if willPanic { - recovers := false - recoverLoop: - for _, u := range fn.Blocks { - for _, instr := range u.Instrs { - if instr, ok := instr.(*Defer); ok { - call := instr.Call.StaticCallee() - if call == nil { - // not a static call, so we can't be sure the - // deferred call isn't calling recover - recovers = true - break recoverLoop - } - if len(call.Blocks) == 0 { - // external function, we don't know what's - // happening inside it - // - // TODO(dh): this includes functions from - // imported packages, due to how go/analysis - // works. We could introduce another fact, - // like we've done for exiting and unwinding, - // but it doesn't seem worth it. Virtually all - // uses of recover will be in closures. - recovers = true - break recoverLoop - } - for _, y := range call.Blocks { - for _, instr2 := range y.Instrs { - if isRecoverCall(instr2) { - recovers = true - break recoverLoop - } - } - } - } - } - } - if !recovers { - fn.WillUnwind = true - return - } - } - - // TODO(dh): don't check that any specific call dominates the exit - // block. instead, check that all calls combined cover every - // possible path through the function. - exits := NewBlockSet(len(fn.Blocks)) - unwinds := NewBlockSet(len(fn.Blocks)) - for _, u := range fn.Blocks { - for _, instr := range u.Instrs { - if instr, ok := instr.(CallInstruction); ok { - switch instr.(type) { - case *Defer, *Call: - default: - continue - } - if instr.Common().IsInvoke() { - // give up - return - } - var call *Function - switch instr.Common().Value.(type) { - case *Function, *MakeClosure: - call = instr.Common().StaticCallee() - case *Builtin: - // the only builtins that affect control flow are - // panic and recover, and we've already handled - // those - continue - default: - // dynamic dispatch - return - } - // buildFunction is idempotent. if we're part of a - // (mutually) recursive call chain, then buildFunction - // will immediately return, and fn.WillExit will be false. - if call.Package() == fn.Package() { - b.buildFunction(call) - } - dom := u.Dominates(fn.Exit) - if call.WillExit { - if dom { - fn.WillExit = true - return - } - exits.Add(u) - } else if call.WillUnwind { - if dom { - fn.WillUnwind = true - return - } - unwinds.Add(u) - } - } - } - } - - // depth-first search trying to find a path to the exit block that - // doesn't cross any of the blacklisted blocks - seen := NewBlockSet(len(fn.Blocks)) - var findPath func(root *BasicBlock, bl *BlockSet) bool - findPath = func(root *BasicBlock, bl *BlockSet) bool { - if root == fn.Exit { - return true - } - if seen.Has(root) { - return false - } - if bl.Has(root) { - return false - } - seen.Add(root) - for _, succ := range root.Succs { - if findPath(succ, bl) { - return true - } - } - return false - } - - if exits.Num() > 0 { - if !findPath(fn.Blocks[0], exits) { - fn.WillExit = true - return - } - } - if unwinds.Num() > 0 { - seen.Clear() - if !findPath(fn.Blocks[0], unwinds) { - fn.WillUnwind = true - return - } - } -} - -func (b *builder) addUnreachables(fn *Function) { - for _, bb := range fn.Blocks { - for i, instr := range bb.Instrs { - if instr, ok := instr.(*Call); ok { - var call *Function - switch v := instr.Common().Value.(type) { - case *Function: - call = v - case *MakeClosure: - call = v.Fn.(*Function) - } - if call == nil { - continue - } - if call.Package() == fn.Package() { - // make sure we have information on all functions in this package - b.buildFunction(call) - } - if call.WillExit { - // This call will cause the process to terminate. - // Remove remaining instructions in the block and - // replace any control flow with Unreachable. - for _, succ := range bb.Succs { - succ.removePred(bb) - } - bb.Succs = bb.Succs[:0] - - bb.Instrs = bb.Instrs[:i+1] - bb.emit(new(Unreachable), instr.Source()) - addEdge(bb, fn.Exit) - break - } else if call.WillUnwind { - // This call will cause the goroutine to terminate - // and defers to run (i.e. a panic or - // runtime.Goexit). Remove remaining instructions - // in the block and replace any control flow with - // an unconditional jump to the exit block. - for _, succ := range bb.Succs { - succ.removePred(bb) - } - bb.Succs = bb.Succs[:0] - - bb.Instrs = bb.Instrs[:i+1] - bb.emit(new(Jump), instr.Source()) - addEdge(bb, fn.Exit) - break - } - } - } - } -} diff --git a/vendor/honnef.co/go/tools/ir/func.go b/vendor/honnef.co/go/tools/ir/func.go deleted file mode 100644 index 386d82b6708..00000000000 --- a/vendor/honnef.co/go/tools/ir/func.go +++ /dev/null @@ -1,961 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ir - -// This file implements the Function and BasicBlock types. - -import ( - "bytes" - "fmt" - "go/ast" - "go/constant" - "go/format" - "go/token" - "go/types" - "io" - "os" - "strings" -) - -// addEdge adds a control-flow graph edge from from to to. -func addEdge(from, to *BasicBlock) { - from.Succs = append(from.Succs, to) - to.Preds = append(to.Preds, from) -} - -// Control returns the last instruction in the block. -func (b *BasicBlock) Control() Instruction { - if len(b.Instrs) == 0 { - return nil - } - return b.Instrs[len(b.Instrs)-1] -} - -// SIgmaFor returns the sigma node for v coming from pred. -func (b *BasicBlock) SigmaFor(v Value, pred *BasicBlock) *Sigma { - for _, instr := range b.Instrs { - sigma, ok := instr.(*Sigma) - if !ok { - // no more sigmas - return nil - } - if sigma.From == pred && sigma.X == v { - return sigma - } - } - return nil -} - -// Parent returns the function that contains block b. -func (b *BasicBlock) Parent() *Function { return b.parent } - -// String returns a human-readable label of this block. -// It is not guaranteed unique within the function. -// -func (b *BasicBlock) String() string { - return fmt.Sprintf("%d", b.Index) -} - -// emit appends an instruction to the current basic block. -// If the instruction defines a Value, it is returned. -// -func (b *BasicBlock) emit(i Instruction, source ast.Node) Value { - i.setSource(source) - i.setBlock(b) - b.Instrs = append(b.Instrs, i) - v, _ := i.(Value) - return v -} - -// predIndex returns the i such that b.Preds[i] == c or panics if -// there is none. -func (b *BasicBlock) predIndex(c *BasicBlock) int { - for i, pred := range b.Preds { - if pred == c { - return i - } - } - panic(fmt.Sprintf("no edge %s -> %s", c, b)) -} - -// succIndex returns the i such that b.Succs[i] == c or -1 if there is none. -func (b *BasicBlock) succIndex(c *BasicBlock) int { - for i, succ := range b.Succs { - if succ == c { - return i - } - } - return -1 -} - -// hasPhi returns true if b.Instrs contains φ-nodes. -func (b *BasicBlock) hasPhi() bool { - _, ok := b.Instrs[0].(*Phi) - return ok -} - -func (b *BasicBlock) Phis() []Instruction { - return b.phis() -} - -// phis returns the prefix of b.Instrs containing all the block's φ-nodes. -func (b *BasicBlock) phis() []Instruction { - for i, instr := range b.Instrs { - if _, ok := instr.(*Phi); !ok { - return b.Instrs[:i] - } - } - return nil // unreachable in well-formed blocks -} - -// replacePred replaces all occurrences of p in b's predecessor list with q. -// Ordinarily there should be at most one. -// -func (b *BasicBlock) replacePred(p, q *BasicBlock) { - for i, pred := range b.Preds { - if pred == p { - b.Preds[i] = q - } - } -} - -// replaceSucc replaces all occurrences of p in b's successor list with q. -// Ordinarily there should be at most one. -// -func (b *BasicBlock) replaceSucc(p, q *BasicBlock) { - for i, succ := range b.Succs { - if succ == p { - b.Succs[i] = q - } - } -} - -// removePred removes all occurrences of p in b's -// predecessor list and φ-nodes. -// Ordinarily there should be at most one. -// -func (b *BasicBlock) removePred(p *BasicBlock) { - phis := b.phis() - - // We must preserve edge order for φ-nodes. - j := 0 - for i, pred := range b.Preds { - if pred != p { - b.Preds[j] = b.Preds[i] - // Strike out φ-edge too. - for _, instr := range phis { - phi := instr.(*Phi) - phi.Edges[j] = phi.Edges[i] - } - j++ - } - } - // Nil out b.Preds[j:] and φ-edges[j:] to aid GC. - for i := j; i < len(b.Preds); i++ { - b.Preds[i] = nil - for _, instr := range phis { - instr.(*Phi).Edges[i] = nil - } - } - b.Preds = b.Preds[:j] - for _, instr := range phis { - phi := instr.(*Phi) - phi.Edges = phi.Edges[:j] - } -} - -// Destinations associated with unlabelled for/switch/select stmts. -// We push/pop one of these as we enter/leave each construct and for -// each BranchStmt we scan for the innermost target of the right type. -// -type targets struct { - tail *targets // rest of stack - _break *BasicBlock - _continue *BasicBlock - _fallthrough *BasicBlock -} - -// Destinations associated with a labelled block. -// We populate these as labels are encountered in forward gotos or -// labelled statements. -// -type lblock struct { - _goto *BasicBlock - _break *BasicBlock - _continue *BasicBlock -} - -// labelledBlock returns the branch target associated with the -// specified label, creating it if needed. -// -func (f *Function) labelledBlock(label *ast.Ident) *lblock { - lb := f.lblocks[label.Obj] - if lb == nil { - lb = &lblock{_goto: f.newBasicBlock(label.Name)} - if f.lblocks == nil { - f.lblocks = make(map[*ast.Object]*lblock) - } - f.lblocks[label.Obj] = lb - } - return lb -} - -// addParam adds a (non-escaping) parameter to f.Params of the -// specified name, type and source position. -// -func (f *Function) addParam(name string, typ types.Type, source ast.Node) *Parameter { - var b *BasicBlock - if len(f.Blocks) > 0 { - b = f.Blocks[0] - } - v := &Parameter{ - name: name, - } - v.setBlock(b) - v.setType(typ) - v.setSource(source) - f.Params = append(f.Params, v) - if b != nil { - // There may be no blocks if this function has no body. We - // still create params, but aren't interested in the - // instruction. - f.Blocks[0].Instrs = append(f.Blocks[0].Instrs, v) - } - return v -} - -func (f *Function) addParamObj(obj types.Object, source ast.Node) *Parameter { - name := obj.Name() - if name == "" { - name = fmt.Sprintf("arg%d", len(f.Params)) - } - param := f.addParam(name, obj.Type(), source) - param.object = obj - return param -} - -// addSpilledParam declares a parameter that is pre-spilled to the -// stack; the function body will load/store the spilled location. -// Subsequent lifting will eliminate spills where possible. -// -func (f *Function) addSpilledParam(obj types.Object, source ast.Node) { - param := f.addParamObj(obj, source) - spill := &Alloc{} - spill.setType(types.NewPointer(obj.Type())) - spill.source = source - f.objects[obj] = spill - f.Locals = append(f.Locals, spill) - f.emit(spill, source) - emitStore(f, spill, param, source) - // f.emit(&Store{Addr: spill, Val: param}) -} - -// startBody initializes the function prior to generating IR code for its body. -// Precondition: f.Type() already set. -// -func (f *Function) startBody() { - entry := f.newBasicBlock("entry") - f.currentBlock = entry - f.objects = make(map[types.Object]Value) // needed for some synthetics, e.g. init -} - -func (f *Function) blockset(i int) *BlockSet { - bs := &f.blocksets[i] - if len(bs.values) != len(f.Blocks) { - if cap(bs.values) >= len(f.Blocks) { - bs.values = bs.values[:len(f.Blocks)] - bs.Clear() - } else { - bs.values = make([]bool, len(f.Blocks)) - } - } else { - bs.Clear() - } - return bs -} - -func (f *Function) exitBlock() { - old := f.currentBlock - - f.Exit = f.newBasicBlock("exit") - f.currentBlock = f.Exit - - ret := f.results() - results := make([]Value, len(ret)) - // Run function calls deferred in this - // function when explicitly returning from it. - f.emit(new(RunDefers), nil) - for i, r := range ret { - results[i] = emitLoad(f, r, nil) - } - - f.emit(&Return{Results: results}, nil) - f.currentBlock = old -} - -// createSyntacticParams populates f.Params and generates code (spills -// and named result locals) for all the parameters declared in the -// syntax. In addition it populates the f.objects mapping. -// -// Preconditions: -// f.startBody() was called. -// Postcondition: -// len(f.Params) == len(f.Signature.Params) + (f.Signature.Recv() ? 1 : 0) -// -func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.FuncType) { - // Receiver (at most one inner iteration). - if recv != nil { - for _, field := range recv.List { - for _, n := range field.Names { - f.addSpilledParam(f.Pkg.info.Defs[n], n) - } - // Anonymous receiver? No need to spill. - if field.Names == nil { - f.addParamObj(f.Signature.Recv(), field) - } - } - } - - // Parameters. - if functype.Params != nil { - n := len(f.Params) // 1 if has recv, 0 otherwise - for _, field := range functype.Params.List { - for _, n := range field.Names { - f.addSpilledParam(f.Pkg.info.Defs[n], n) - } - // Anonymous parameter? No need to spill. - if field.Names == nil { - f.addParamObj(f.Signature.Params().At(len(f.Params)-n), field) - } - } - } - - // Named results. - if functype.Results != nil { - for _, field := range functype.Results.List { - // Implicit "var" decl of locals for named results. - for _, n := range field.Names { - f.namedResults = append(f.namedResults, f.addLocalForIdent(n)) - } - } - - if len(f.namedResults) == 0 { - sig := f.Signature.Results() - for i := 0; i < sig.Len(); i++ { - // XXX position information - v := f.addLocal(sig.At(i).Type(), nil) - f.implicitResults = append(f.implicitResults, v) - } - } - } -} - -func numberNodes(f *Function) { - var base ID - for _, b := range f.Blocks { - for _, instr := range b.Instrs { - if instr == nil { - continue - } - base++ - instr.setID(base) - } - } -} - -// buildReferrers populates the def/use information in all non-nil -// Value.Referrers slice. -// Precondition: all such slices are initially empty. -func buildReferrers(f *Function) { - var rands []*Value - for _, b := range f.Blocks { - for _, instr := range b.Instrs { - rands = instr.Operands(rands[:0]) // recycle storage - for _, rand := range rands { - if r := *rand; r != nil { - if ref := r.Referrers(); ref != nil { - *ref = append(*ref, instr) - } - } - } - } - } -} - -func (f *Function) emitConsts() { - if len(f.Blocks) == 0 { - f.consts = nil - return - } - - // TODO(dh): our deduplication only works on booleans and - // integers. other constants are represented as pointers to - // things. - if len(f.consts) == 0 { - return - } else if len(f.consts) <= 32 { - f.emitConstsFew() - } else { - f.emitConstsMany() - } -} - -func (f *Function) emitConstsFew() { - dedup := make([]*Const, 0, 32) - for _, c := range f.consts { - if len(*c.Referrers()) == 0 { - continue - } - found := false - for _, d := range dedup { - if c.typ == d.typ && c.Value == d.Value { - replaceAll(c, d) - found = true - break - } - } - if !found { - dedup = append(dedup, c) - } - } - - instrs := make([]Instruction, len(f.Blocks[0].Instrs)+len(dedup)) - for i, c := range dedup { - instrs[i] = c - c.setBlock(f.Blocks[0]) - } - copy(instrs[len(dedup):], f.Blocks[0].Instrs) - f.Blocks[0].Instrs = instrs - f.consts = nil -} - -func (f *Function) emitConstsMany() { - type constKey struct { - typ types.Type - value constant.Value - } - - m := make(map[constKey]Value, len(f.consts)) - areNil := 0 - for i, c := range f.consts { - if len(*c.Referrers()) == 0 { - f.consts[i] = nil - areNil++ - continue - } - - k := constKey{ - typ: c.typ, - value: c.Value, - } - if dup, ok := m[k]; !ok { - m[k] = c - } else { - f.consts[i] = nil - areNil++ - replaceAll(c, dup) - } - } - - instrs := make([]Instruction, len(f.Blocks[0].Instrs)+len(f.consts)-areNil) - i := 0 - for _, c := range f.consts { - if c != nil { - instrs[i] = c - c.setBlock(f.Blocks[0]) - i++ - } - } - copy(instrs[i:], f.Blocks[0].Instrs) - f.Blocks[0].Instrs = instrs - f.consts = nil -} - -// buildFakeExits ensures that every block in the function is -// reachable in reverse from the Exit block. This is required to build -// a full post-dominator tree, and to ensure the exit block's -// inclusion in the dominator tree. -func buildFakeExits(fn *Function) { - // Find back-edges via forward DFS - fn.fakeExits = BlockSet{values: make([]bool, len(fn.Blocks))} - seen := fn.blockset(0) - backEdges := fn.blockset(1) - - var dfs func(b *BasicBlock) - dfs = func(b *BasicBlock) { - if !seen.Add(b) { - backEdges.Add(b) - return - } - for _, pred := range b.Succs { - dfs(pred) - } - } - dfs(fn.Blocks[0]) -buildLoop: - for { - seen := fn.blockset(2) - var dfs func(b *BasicBlock) - dfs = func(b *BasicBlock) { - if !seen.Add(b) { - return - } - for _, pred := range b.Preds { - dfs(pred) - } - if b == fn.Exit { - for _, b := range fn.Blocks { - if fn.fakeExits.Has(b) { - dfs(b) - } - } - } - } - dfs(fn.Exit) - - for _, b := range fn.Blocks { - if !seen.Has(b) && backEdges.Has(b) { - // Block b is not reachable from the exit block. Add a - // fake jump from b to exit, then try again. Note that we - // only add one fake edge at a time, as it may make - // multiple blocks reachable. - // - // We only consider those blocks that have back edges. - // Any unreachable block that doesn't have a back edge - // must flow into a loop, which by definition has a - // back edge. Thus, by looking for loops, we should - // need fewer fake edges overall. - fn.fakeExits.Add(b) - continue buildLoop - } - } - - break - } -} - -// finishBody() finalizes the function after IR code generation of its body. -func (f *Function) finishBody() { - f.objects = nil - f.currentBlock = nil - f.lblocks = nil - - // Remove from f.Locals any Allocs that escape to the heap. - j := 0 - for _, l := range f.Locals { - if !l.Heap { - f.Locals[j] = l - j++ - } - } - // Nil out f.Locals[j:] to aid GC. - for i := j; i < len(f.Locals); i++ { - f.Locals[i] = nil - } - f.Locals = f.Locals[:j] - - optimizeBlocks(f) - buildReferrers(f) - buildDomTree(f) - buildPostDomTree(f) - - if f.Prog.mode&NaiveForm == 0 { - lift(f) - } - - // emit constants after lifting, because lifting may produce new constants. - f.emitConsts() - - f.namedResults = nil // (used by lifting) - f.implicitResults = nil - - numberNodes(f) - - defer f.wr.Close() - f.wr.WriteFunc("start", "start", f) - - if f.Prog.mode&PrintFunctions != 0 { - printMu.Lock() - f.WriteTo(os.Stdout) - printMu.Unlock() - } - - if f.Prog.mode&SanityCheckFunctions != 0 { - mustSanityCheck(f, nil) - } -} - -func isUselessPhi(phi *Phi) (Value, bool) { - var v0 Value - for _, e := range phi.Edges { - if e == phi { - continue - } - if v0 == nil { - v0 = e - } - if v0 != e { - if v0, ok := v0.(*Const); ok { - if e, ok := e.(*Const); ok { - if v0.typ == e.typ && v0.Value == e.Value { - continue - } - } - } - return nil, false - } - } - return v0, true -} - -func (f *Function) RemoveNilBlocks() { - f.removeNilBlocks() -} - -// removeNilBlocks eliminates nils from f.Blocks and updates each -// BasicBlock.Index. Use this after any pass that may delete blocks. -// -func (f *Function) removeNilBlocks() { - j := 0 - for _, b := range f.Blocks { - if b != nil { - b.Index = j - f.Blocks[j] = b - j++ - } - } - // Nil out f.Blocks[j:] to aid GC. - for i := j; i < len(f.Blocks); i++ { - f.Blocks[i] = nil - } - f.Blocks = f.Blocks[:j] -} - -// SetDebugMode sets the debug mode for package pkg. If true, all its -// functions will include full debug info. This greatly increases the -// size of the instruction stream, and causes Functions to depend upon -// the ASTs, potentially keeping them live in memory for longer. -// -func (pkg *Package) SetDebugMode(debug bool) { - // TODO(adonovan): do we want ast.File granularity? - pkg.debug = debug -} - -// debugInfo reports whether debug info is wanted for this function. -func (f *Function) debugInfo() bool { - return f.Pkg != nil && f.Pkg.debug -} - -// addNamedLocal creates a local variable, adds it to function f and -// returns it. Its name and type are taken from obj. Subsequent -// calls to f.lookup(obj) will return the same local. -// -func (f *Function) addNamedLocal(obj types.Object, source ast.Node) *Alloc { - l := f.addLocal(obj.Type(), source) - f.objects[obj] = l - return l -} - -func (f *Function) addLocalForIdent(id *ast.Ident) *Alloc { - return f.addNamedLocal(f.Pkg.info.Defs[id], id) -} - -// addLocal creates an anonymous local variable of type typ, adds it -// to function f and returns it. pos is the optional source location. -// -func (f *Function) addLocal(typ types.Type, source ast.Node) *Alloc { - v := &Alloc{} - v.setType(types.NewPointer(typ)) - f.Locals = append(f.Locals, v) - f.emit(v, source) - return v -} - -// lookup returns the address of the named variable identified by obj -// that is local to function f or one of its enclosing functions. -// If escaping, the reference comes from a potentially escaping pointer -// expression and the referent must be heap-allocated. -// -func (f *Function) lookup(obj types.Object, escaping bool) Value { - if v, ok := f.objects[obj]; ok { - if alloc, ok := v.(*Alloc); ok && escaping { - alloc.Heap = true - } - return v // function-local var (address) - } - - // Definition must be in an enclosing function; - // plumb it through intervening closures. - if f.parent == nil { - panic("no ir.Value for " + obj.String()) - } - outer := f.parent.lookup(obj, true) // escaping - v := &FreeVar{ - name: obj.Name(), - typ: outer.Type(), - outer: outer, - parent: f, - } - f.objects[obj] = v - f.FreeVars = append(f.FreeVars, v) - return v -} - -// emit emits the specified instruction to function f. -func (f *Function) emit(instr Instruction, source ast.Node) Value { - return f.currentBlock.emit(instr, source) -} - -// RelString returns the full name of this function, qualified by -// package name, receiver type, etc. -// -// The specific formatting rules are not guaranteed and may change. -// -// Examples: -// "math.IsNaN" // a package-level function -// "(*bytes.Buffer).Bytes" // a declared method or a wrapper -// "(*bytes.Buffer).Bytes$thunk" // thunk (func wrapping method; receiver is param 0) -// "(*bytes.Buffer).Bytes$bound" // bound (func wrapping method; receiver supplied by closure) -// "main.main$1" // an anonymous function in main -// "main.init#1" // a declared init function -// "main.init" // the synthesized package initializer -// -// When these functions are referred to from within the same package -// (i.e. from == f.Pkg.Object), they are rendered without the package path. -// For example: "IsNaN", "(*Buffer).Bytes", etc. -// -// All non-synthetic functions have distinct package-qualified names. -// (But two methods may have the same name "(T).f" if one is a synthetic -// wrapper promoting a non-exported method "f" from another package; in -// that case, the strings are equal but the identifiers "f" are distinct.) -// -func (f *Function) RelString(from *types.Package) string { - // Anonymous? - if f.parent != nil { - // An anonymous function's Name() looks like "parentName$1", - // but its String() should include the type/package/etc. - parent := f.parent.RelString(from) - for i, anon := range f.parent.AnonFuncs { - if anon == f { - return fmt.Sprintf("%s$%d", parent, 1+i) - } - } - - return f.name // should never happen - } - - // Method (declared or wrapper)? - if recv := f.Signature.Recv(); recv != nil { - return f.relMethod(from, recv.Type()) - } - - // Thunk? - if f.method != nil { - return f.relMethod(from, f.method.Recv()) - } - - // Bound? - if len(f.FreeVars) == 1 && strings.HasSuffix(f.name, "$bound") { - return f.relMethod(from, f.FreeVars[0].Type()) - } - - // Package-level function? - // Prefix with package name for cross-package references only. - if p := f.pkg(); p != nil && p != from { - return fmt.Sprintf("%s.%s", p.Path(), f.name) - } - - // Unknown. - return f.name -} - -func (f *Function) relMethod(from *types.Package, recv types.Type) string { - return fmt.Sprintf("(%s).%s", relType(recv, from), f.name) -} - -// writeSignature writes to buf the signature sig in declaration syntax. -func writeSignature(buf *bytes.Buffer, from *types.Package, name string, sig *types.Signature, params []*Parameter) { - buf.WriteString("func ") - if recv := sig.Recv(); recv != nil { - buf.WriteString("(") - if n := params[0].Name(); n != "" { - buf.WriteString(n) - buf.WriteString(" ") - } - types.WriteType(buf, params[0].Type(), types.RelativeTo(from)) - buf.WriteString(") ") - } - buf.WriteString(name) - types.WriteSignature(buf, sig, types.RelativeTo(from)) -} - -func (f *Function) pkg() *types.Package { - if f.Pkg != nil { - return f.Pkg.Pkg - } - return nil -} - -var _ io.WriterTo = (*Function)(nil) // *Function implements io.Writer - -func (f *Function) WriteTo(w io.Writer) (int64, error) { - var buf bytes.Buffer - WriteFunction(&buf, f) - n, err := w.Write(buf.Bytes()) - return int64(n), err -} - -// WriteFunction writes to buf a human-readable "disassembly" of f. -func WriteFunction(buf *bytes.Buffer, f *Function) { - fmt.Fprintf(buf, "# Name: %s\n", f.String()) - if f.Pkg != nil { - fmt.Fprintf(buf, "# Package: %s\n", f.Pkg.Pkg.Path()) - } - if syn := f.Synthetic; syn != "" { - fmt.Fprintln(buf, "# Synthetic:", syn) - } - if pos := f.Pos(); pos.IsValid() { - fmt.Fprintf(buf, "# Location: %s\n", f.Prog.Fset.Position(pos)) - } - - if f.parent != nil { - fmt.Fprintf(buf, "# Parent: %s\n", f.parent.Name()) - } - - from := f.pkg() - - if f.FreeVars != nil { - buf.WriteString("# Free variables:\n") - for i, fv := range f.FreeVars { - fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, fv.Name(), relType(fv.Type(), from)) - } - } - - if len(f.Locals) > 0 { - buf.WriteString("# Locals:\n") - for i, l := range f.Locals { - fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, l.Name(), relType(deref(l.Type()), from)) - } - } - writeSignature(buf, from, f.Name(), f.Signature, f.Params) - buf.WriteString(":\n") - - if f.Blocks == nil { - buf.WriteString("\t(external)\n") - } - - for _, b := range f.Blocks { - if b == nil { - // Corrupt CFG. - fmt.Fprintf(buf, ".nil:\n") - continue - } - fmt.Fprintf(buf, "b%d:", b.Index) - if len(b.Preds) > 0 { - fmt.Fprint(buf, " ←") - for _, pred := range b.Preds { - fmt.Fprintf(buf, " b%d", pred.Index) - } - } - if b.Comment != "" { - fmt.Fprintf(buf, " # %s", b.Comment) - } - buf.WriteByte('\n') - - if false { // CFG debugging - fmt.Fprintf(buf, "\t# CFG: %s --> %s --> %s\n", b.Preds, b, b.Succs) - } - - buf2 := &bytes.Buffer{} - for _, instr := range b.Instrs { - buf.WriteString("\t") - switch v := instr.(type) { - case Value: - // Left-align the instruction. - if name := v.Name(); name != "" { - fmt.Fprintf(buf, "%s = ", name) - } - buf.WriteString(instr.String()) - case nil: - // Be robust against bad transforms. - buf.WriteString("") - default: - buf.WriteString(instr.String()) - } - buf.WriteString("\n") - - if f.Prog.mode&PrintSource != 0 { - if s := instr.Source(); s != nil { - buf2.Reset() - format.Node(buf2, f.Prog.Fset, s) - for { - line, err := buf2.ReadString('\n') - if len(line) == 0 { - break - } - buf.WriteString("\t\t> ") - buf.WriteString(line) - if line[len(line)-1] != '\n' { - buf.WriteString("\n") - } - if err != nil { - break - } - } - } - } - } - buf.WriteString("\n") - } -} - -// newBasicBlock adds to f a new basic block and returns it. It does -// not automatically become the current block for subsequent calls to emit. -// comment is an optional string for more readable debugging output. -// -func (f *Function) newBasicBlock(comment string) *BasicBlock { - b := &BasicBlock{ - Index: len(f.Blocks), - Comment: comment, - parent: f, - } - b.Succs = b.succs2[:0] - f.Blocks = append(f.Blocks, b) - return b -} - -// NewFunction returns a new synthetic Function instance belonging to -// prog, with its name and signature fields set as specified. -// -// The caller is responsible for initializing the remaining fields of -// the function object, e.g. Pkg, Params, Blocks. -// -// It is practically impossible for clients to construct well-formed -// IR functions/packages/programs directly, so we assume this is the -// job of the Builder alone. NewFunction exists to provide clients a -// little flexibility. For example, analysis tools may wish to -// construct fake Functions for the root of the callgraph, a fake -// "reflect" package, etc. -// -// TODO(adonovan): think harder about the API here. -// -func (prog *Program) NewFunction(name string, sig *types.Signature, provenance string) *Function { - return &Function{Prog: prog, name: name, Signature: sig, Synthetic: provenance} -} - -//lint:ignore U1000 we may make use of this for functions loaded from export data -type extentNode [2]token.Pos - -func (n extentNode) Pos() token.Pos { return n[0] } -func (n extentNode) End() token.Pos { return n[1] } - -func (f *Function) initHTML(name string) { - if name == "" { - return - } - if rel := f.RelString(nil); rel == name { - f.wr = NewHTMLWriter("ir.html", rel, "") - } -} diff --git a/vendor/honnef.co/go/tools/ir/html.go b/vendor/honnef.co/go/tools/ir/html.go deleted file mode 100644 index c18375333a9..00000000000 --- a/vendor/honnef.co/go/tools/ir/html.go +++ /dev/null @@ -1,1124 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Copyright 2019 Dominik Honnef. All rights reserved. - -package ir - -import ( - "bytes" - "fmt" - "go/types" - "html" - "io" - "log" - "os" - "os/exec" - "path/filepath" - "reflect" - "sort" - "strings" -) - -func live(f *Function) []bool { - max := 0 - var ops []*Value - - for _, b := range f.Blocks { - for _, instr := range b.Instrs { - if int(instr.ID()) > max { - max = int(instr.ID()) - } - } - } - - out := make([]bool, max+1) - var q []Node - for _, b := range f.Blocks { - for _, instr := range b.Instrs { - switch instr.(type) { - case *BlankStore, *Call, *ConstantSwitch, *Defer, *Go, *If, *Jump, *MapUpdate, *Next, *Panic, *Recv, *Return, *RunDefers, *Send, *Store, *Unreachable: - out[instr.ID()] = true - q = append(q, instr) - } - } - } - - for len(q) > 0 { - v := q[len(q)-1] - q = q[:len(q)-1] - for _, op := range v.Operands(ops) { - if *op == nil { - continue - } - if !out[(*op).ID()] { - out[(*op).ID()] = true - q = append(q, *op) - } - } - } - - return out -} - -type funcPrinter interface { - startBlock(b *BasicBlock, reachable bool) - endBlock(b *BasicBlock) - value(v Node, live bool) - startDepCycle() - endDepCycle() - named(n string, vals []Value) -} - -func namedValues(f *Function) map[types.Object][]Value { - names := map[types.Object][]Value{} - for _, b := range f.Blocks { - for _, instr := range b.Instrs { - if instr, ok := instr.(*DebugRef); ok { - if obj := instr.object; obj != nil { - names[obj] = append(names[obj], instr.X) - } - } - } - } - // XXX deduplicate values - return names -} - -func fprintFunc(p funcPrinter, f *Function) { - // XXX does our IR form preserve unreachable blocks? - // reachable, live := findlive(f) - - l := live(f) - for _, b := range f.Blocks { - // XXX - // p.startBlock(b, reachable[b.Index]) - p.startBlock(b, true) - - end := len(b.Instrs) - 1 - if end < 0 { - end = 0 - } - for _, v := range b.Instrs[:end] { - if _, ok := v.(*DebugRef); !ok { - p.value(v, l[v.ID()]) - } - } - p.endBlock(b) - } - - names := namedValues(f) - keys := make([]types.Object, 0, len(names)) - for key := range names { - keys = append(keys, key) - } - sort.Slice(keys, func(i, j int) bool { - return keys[i].Pos() < keys[j].Pos() - }) - for _, key := range keys { - p.named(key.Name(), names[key]) - } -} - -func opName(v Node) string { - switch v := v.(type) { - case *Call: - if v.Common().IsInvoke() { - return "Invoke" - } - return "Call" - case *Alloc: - if v.Heap { - return "HeapAlloc" - } - return "StackAlloc" - case *Select: - if v.Blocking { - return "SelectBlocking" - } - return "SelectNonBlocking" - default: - return reflect.ValueOf(v).Type().Elem().Name() - } -} - -type HTMLWriter struct { - w io.WriteCloser - path string - dot *dotWriter -} - -func NewHTMLWriter(path string, funcname, cfgMask string) *HTMLWriter { - out, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - log.Fatalf("%v", err) - } - pwd, err := os.Getwd() - if err != nil { - log.Fatalf("%v", err) - } - html := HTMLWriter{w: out, path: filepath.Join(pwd, path)} - html.dot = newDotWriter() - html.start(funcname) - return &html -} - -func (w *HTMLWriter) start(name string) { - if w == nil { - return - } - w.WriteString("") - w.WriteString(` - - - - - -`) - w.WriteString("") - w.WriteString("

") - w.WriteString(html.EscapeString(name)) - w.WriteString("

") - w.WriteString(` -help -
- -

-Click on a value or block to toggle highlighting of that value/block -and its uses. (Values and blocks are highlighted by ID, and IDs of -dead items may be reused, so not all highlights necessarily correspond -to the clicked item.) -

- -

-Faded out values and blocks are dead code that has not been eliminated. -

- -

-Values printed in italics have a dependency cycle. -

- -

-CFG: Dashed edge is for unlikely branches. Blue color is for backward edges. -Edge with a dot means that this edge follows the order in which blocks were laidout. -

- -
-`) - w.WriteString("") - w.WriteString("") -} - -func (w *HTMLWriter) Close() { - if w == nil { - return - } - io.WriteString(w.w, "") - io.WriteString(w.w, "
") - io.WriteString(w.w, "") - io.WriteString(w.w, "") - w.w.Close() - fmt.Printf("dumped IR to %v\n", w.path) -} - -// WriteFunc writes f in a column headed by title. -// phase is used for collapsing columns and should be unique across the table. -func (w *HTMLWriter) WriteFunc(phase, title string, f *Function) { - if w == nil { - return - } - w.WriteColumn(phase, title, "", funcHTML(f, phase, w.dot)) -} - -// WriteColumn writes raw HTML in a column headed by title. -// It is intended for pre- and post-compilation log output. -func (w *HTMLWriter) WriteColumn(phase, title, class, html string) { - if w == nil { - return - } - id := strings.Replace(phase, " ", "-", -1) - // collapsed column - w.Printf("
%v
", id, phase) - - if class == "" { - w.Printf("", id) - } else { - w.Printf("", id, class) - } - w.WriteString("

" + title + "

") - w.WriteString(html) - w.WriteString("") -} - -func (w *HTMLWriter) Printf(msg string, v ...interface{}) { - if _, err := fmt.Fprintf(w.w, msg, v...); err != nil { - log.Fatalf("%v", err) - } -} - -func (w *HTMLWriter) WriteString(s string) { - if _, err := io.WriteString(w.w, s); err != nil { - log.Fatalf("%v", err) - } -} - -func valueHTML(v Node) string { - if v == nil { - return "<nil>" - } - // TODO: Using the value ID as the class ignores the fact - // that value IDs get recycled and that some values - // are transmuted into other values. - class := fmt.Sprintf("t%d", v.ID()) - var label string - switch v := v.(type) { - case *Function: - label = v.RelString(nil) - case *Builtin: - label = v.Name() - default: - label = class - } - return fmt.Sprintf("%s", class, label) -} - -func valueLongHTML(v Node) string { - // TODO: Any intra-value formatting? - // I'm wary of adding too much visual noise, - // but a little bit might be valuable. - // We already have visual noise in the form of punctuation - // maybe we could replace some of that with formatting. - s := fmt.Sprintf("", v.ID()) - - linenumber := "(?)" - if v.Pos().IsValid() { - line := v.Parent().Prog.Fset.Position(v.Pos()).Line - linenumber = fmt.Sprintf("(%d)", line, line) - } - - s += fmt.Sprintf("%s %s = %s", valueHTML(v), linenumber, opName(v)) - - if v, ok := v.(Value); ok { - s += " <" + html.EscapeString(v.Type().String()) + ">" - } - - switch v := v.(type) { - case *Parameter: - s += fmt.Sprintf(" {%s}", html.EscapeString(v.name)) - case *BinOp: - s += fmt.Sprintf(" {%s}", html.EscapeString(v.Op.String())) - case *UnOp: - s += fmt.Sprintf(" {%s}", html.EscapeString(v.Op.String())) - case *Extract: - name := v.Tuple.Type().(*types.Tuple).At(v.Index).Name() - s += fmt.Sprintf(" [%d] (%s)", v.Index, name) - case *Field: - st := v.X.Type().Underlying().(*types.Struct) - // Be robust against a bad index. - name := "?" - if 0 <= v.Field && v.Field < st.NumFields() { - name = st.Field(v.Field).Name() - } - s += fmt.Sprintf(" [%d] (%s)", v.Field, name) - case *FieldAddr: - st := deref(v.X.Type()).Underlying().(*types.Struct) - // Be robust against a bad index. - name := "?" - if 0 <= v.Field && v.Field < st.NumFields() { - name = st.Field(v.Field).Name() - } - - s += fmt.Sprintf(" [%d] (%s)", v.Field, name) - case *Recv: - s += fmt.Sprintf(" {%t}", v.CommaOk) - case *Call: - if v.Common().IsInvoke() { - s += fmt.Sprintf(" {%s}", html.EscapeString(v.Common().Method.FullName())) - } - case *Const: - if v.Value == nil { - s += " {<nil>}" - } else { - s += fmt.Sprintf(" {%s}", html.EscapeString(v.Value.String())) - } - case *Sigma: - s += fmt.Sprintf(" [#%s]", v.From) - } - for _, a := range v.Operands(nil) { - s += fmt.Sprintf(" %s", valueHTML(*a)) - } - - // OPT(dh): we're calling namedValues many times on the same function. - allNames := namedValues(v.Parent()) - var names []string - for name, values := range allNames { - for _, value := range values { - if v == value { - names = append(names, name.Name()) - break - } - } - } - if len(names) != 0 { - s += " (" + strings.Join(names, ", ") + ")" - } - - s += "" - return s -} - -func blockHTML(b *BasicBlock) string { - // TODO: Using the value ID as the class ignores the fact - // that value IDs get recycled and that some values - // are transmuted into other values. - s := html.EscapeString(b.String()) - return fmt.Sprintf("%s", s, s) -} - -func blockLongHTML(b *BasicBlock) string { - var kind string - var term Instruction - if len(b.Instrs) > 0 { - term = b.Control() - kind = opName(term) - } - // TODO: improve this for HTML? - s := fmt.Sprintf("%s", b.Index, kind) - - if term != nil { - ops := term.Operands(nil) - if len(ops) > 0 { - var ss []string - for _, op := range ops { - ss = append(ss, valueHTML(*op)) - } - s += " " + strings.Join(ss, ", ") - } - } - if len(b.Succs) > 0 { - s += " →" // right arrow - for _, c := range b.Succs { - s += " " + blockHTML(c) - } - } - return s -} - -func funcHTML(f *Function, phase string, dot *dotWriter) string { - buf := new(bytes.Buffer) - if dot != nil { - dot.writeFuncSVG(buf, phase, f) - } - fmt.Fprint(buf, "") - p := htmlFuncPrinter{w: buf} - fprintFunc(p, f) - - // fprintFunc(&buf, f) // TODO: HTML, not text,
for line breaks, etc. - fmt.Fprint(buf, "
") - return buf.String() -} - -type htmlFuncPrinter struct { - w io.Writer -} - -func (p htmlFuncPrinter) startBlock(b *BasicBlock, reachable bool) { - var dead string - if !reachable { - dead = "dead-block" - } - fmt.Fprintf(p.w, "
    ", b, dead) - fmt.Fprintf(p.w, "
  • %s:", blockHTML(b)) - if len(b.Preds) > 0 { - io.WriteString(p.w, " ←") // left arrow - for _, pred := range b.Preds { - fmt.Fprintf(p.w, " %s", blockHTML(pred)) - } - } - if len(b.Instrs) > 0 { - io.WriteString(p.w, ``) - } - io.WriteString(p.w, "
  • ") - if len(b.Instrs) > 0 { // start list of values - io.WriteString(p.w, "
  • ") - io.WriteString(p.w, "
      ") - } -} - -func (p htmlFuncPrinter) endBlock(b *BasicBlock) { - if len(b.Instrs) > 0 { // end list of values - io.WriteString(p.w, "
    ") - io.WriteString(p.w, "
  • ") - } - io.WriteString(p.w, "
  • ") - fmt.Fprint(p.w, blockLongHTML(b)) - io.WriteString(p.w, "
  • ") - io.WriteString(p.w, "
") -} - -func (p htmlFuncPrinter) value(v Node, live bool) { - var dead string - if !live { - dead = "dead-value" - } - fmt.Fprintf(p.w, "
  • ", dead) - fmt.Fprint(p.w, valueLongHTML(v)) - io.WriteString(p.w, "
  • ") -} - -func (p htmlFuncPrinter) startDepCycle() { - fmt.Fprintln(p.w, "") -} - -func (p htmlFuncPrinter) endDepCycle() { - fmt.Fprintln(p.w, "") -} - -func (p htmlFuncPrinter) named(n string, vals []Value) { - fmt.Fprintf(p.w, "
  • name %s: ", n) - for _, val := range vals { - fmt.Fprintf(p.w, "%s ", valueHTML(val)) - } - fmt.Fprintf(p.w, "
  • ") -} - -type dotWriter struct { - path string - broken bool -} - -// newDotWriter returns non-nil value when mask is valid. -// dotWriter will generate SVGs only for the phases specified in the mask. -// mask can contain following patterns and combinations of them: -// * - all of them; -// x-y - x through y, inclusive; -// x,y - x and y, but not the passes between. -func newDotWriter() *dotWriter { - path, err := exec.LookPath("dot") - if err != nil { - fmt.Println(err) - return nil - } - return &dotWriter{path: path} -} - -func (d *dotWriter) writeFuncSVG(w io.Writer, phase string, f *Function) { - if d.broken { - return - } - cmd := exec.Command(d.path, "-Tsvg") - pipe, err := cmd.StdinPipe() - if err != nil { - d.broken = true - fmt.Println(err) - return - } - buf := new(bytes.Buffer) - cmd.Stdout = buf - bufErr := new(bytes.Buffer) - cmd.Stderr = bufErr - err = cmd.Start() - if err != nil { - d.broken = true - fmt.Println(err) - return - } - fmt.Fprint(pipe, `digraph "" { margin=0; size="4,40"; ranksep=.2; `) - id := strings.Replace(phase, " ", "-", -1) - fmt.Fprintf(pipe, `id="g_graph_%s";`, id) - fmt.Fprintf(pipe, `node [style=filled,fillcolor=white,fontsize=16,fontname="Menlo,Times,serif",margin="0.01,0.03"];`) - fmt.Fprintf(pipe, `edge [fontsize=16,fontname="Menlo,Times,serif"];`) - for _, b := range f.Blocks { - layout := "" - fmt.Fprintf(pipe, `%v [label="%v%s\n%v",id="graph_node_%v_%v"];`, b, b, layout, b.Control().String(), id, b) - } - indexOf := make([]int, len(f.Blocks)) - for i, b := range f.Blocks { - indexOf[b.Index] = i - } - - // XXX - /* - ponums := make([]int32, len(f.Blocks)) - _ = postorderWithNumbering(f, ponums) - isBackEdge := func(from, to int) bool { - return ponums[from] <= ponums[to] - } - */ - isBackEdge := func(from, to int) bool { return false } - - for _, b := range f.Blocks { - for i, s := range b.Succs { - style := "solid" - color := "black" - arrow := "vee" - if isBackEdge(b.Index, s.Index) { - color = "blue" - } - fmt.Fprintf(pipe, `%v -> %v [label=" %d ",style="%s",color="%s",arrowhead="%s"];`, b, s, i, style, color, arrow) - } - } - fmt.Fprint(pipe, "}") - pipe.Close() - err = cmd.Wait() - if err != nil { - d.broken = true - fmt.Printf("dot: %v\n%v\n", err, bufErr.String()) - return - } - - svgID := "svg_graph_" + id - fmt.Fprintf(w, `
    `, svgID, svgID) - // For now, an awful hack: edit the html as it passes through - // our fingers, finding ' 0 { - fset = initial[0].Fset - } - - prog := ir.NewProgram(fset, mode) - if opts != nil { - prog.PrintFunc = opts.PrintFunc - } - - isInitial := make(map[*packages.Package]bool, len(initial)) - for _, p := range initial { - isInitial[p] = true - } - - irmap := make(map[*packages.Package]*ir.Package) - packages.Visit(initial, nil, func(p *packages.Package) { - if p.Types != nil && !p.IllTyped { - var files []*ast.File - if deps || isInitial[p] { - files = p.Syntax - } - irmap[p] = prog.CreatePackage(p.Types, files, p.TypesInfo, true) - } - }) - - var irpkgs []*ir.Package - for _, p := range initial { - irpkgs = append(irpkgs, irmap[p]) // may be nil - } - return prog, irpkgs -} - -// CreateProgram returns a new program in IR form, given a program -// loaded from source. An IR package is created for each transitively -// error-free package of lprog. -// -// Code for bodies of functions is not built until Build is called -// on the result. -// -// The mode parameter controls diagnostics and checking during IR construction. -// -// Deprecated: use golang.org/x/tools/go/packages and the Packages -// function instead; see ir.ExampleLoadPackages. -// -func CreateProgram(lprog *loader.Program, mode ir.BuilderMode) *ir.Program { - prog := ir.NewProgram(lprog.Fset, mode) - - for _, info := range lprog.AllPackages { - if info.TransitivelyErrorFree { - prog.CreatePackage(info.Pkg, info.Files, &info.Info, info.Importable) - } - } - - return prog -} - -// BuildPackage builds an IR program with IR for a single package. -// -// It populates pkg by type-checking the specified file ASTs. All -// dependencies are loaded using the importer specified by tc, which -// typically loads compiler export data; IR code cannot be built for -// those packages. BuildPackage then constructs an ir.Program with all -// dependency packages created, and builds and returns the IR package -// corresponding to pkg. -// -// The caller must have set pkg.Path() to the import path. -// -// The operation fails if there were any type-checking or import errors. -// -// See ../ir/example_test.go for an example. -// -func BuildPackage(tc *types.Config, fset *token.FileSet, pkg *types.Package, files []*ast.File, mode ir.BuilderMode) (*ir.Package, *types.Info, error) { - if fset == nil { - panic("no token.FileSet") - } - if pkg.Path() == "" { - panic("package has no import path") - } - - info := &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), - } - if err := types.NewChecker(tc, fset, pkg, info).Files(files); err != nil { - return nil, nil, err - } - - prog := ir.NewProgram(fset, mode) - - // Create IR packages for all imports. - // Order is not significant. - created := make(map[*types.Package]bool) - var createAll func(pkgs []*types.Package) - createAll = func(pkgs []*types.Package) { - for _, p := range pkgs { - if !created[p] { - created[p] = true - prog.CreatePackage(p, nil, nil, true) - createAll(p.Imports()) - } - } - } - createAll(pkg.Imports()) - - // Create and build the primary package. - irpkg := prog.CreatePackage(pkg, files, info, false) - irpkg.Build() - return irpkg, info, nil -} diff --git a/vendor/honnef.co/go/tools/ir/irutil/switch.go b/vendor/honnef.co/go/tools/ir/irutil/switch.go deleted file mode 100644 index f44cbca9e9e..00000000000 --- a/vendor/honnef.co/go/tools/ir/irutil/switch.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package irutil - -// This file implements discovery of switch and type-switch constructs -// from low-level control flow. -// -// Many techniques exist for compiling a high-level switch with -// constant cases to efficient machine code. The optimal choice will -// depend on the data type, the specific case values, the code in the -// body of each case, and the hardware. -// Some examples: -// - a lookup table (for a switch that maps constants to constants) -// - a computed goto -// - a binary tree -// - a perfect hash -// - a two-level switch (to partition constant strings by their first byte). - -import ( - "bytes" - "fmt" - "go/token" - "go/types" - - "honnef.co/go/tools/ir" -) - -// A ConstCase represents a single constant comparison. -// It is part of a Switch. -type ConstCase struct { - Block *ir.BasicBlock // block performing the comparison - Body *ir.BasicBlock // body of the case - Value *ir.Const // case comparand -} - -// A TypeCase represents a single type assertion. -// It is part of a Switch. -type TypeCase struct { - Block *ir.BasicBlock // block performing the type assert - Body *ir.BasicBlock // body of the case - Type types.Type // case type - Binding ir.Value // value bound by this case -} - -// A Switch is a logical high-level control flow operation -// (a multiway branch) discovered by analysis of a CFG containing -// only if/else chains. It is not part of the ir.Instruction set. -// -// One of ConstCases and TypeCases has length >= 2; -// the other is nil. -// -// In a value switch, the list of cases may contain duplicate constants. -// A type switch may contain duplicate types, or types assignable -// to an interface type also in the list. -// TODO(adonovan): eliminate such duplicates. -// -type Switch struct { - Start *ir.BasicBlock // block containing start of if/else chain - X ir.Value // the switch operand - ConstCases []ConstCase // ordered list of constant comparisons - TypeCases []TypeCase // ordered list of type assertions - Default *ir.BasicBlock // successor if all comparisons fail -} - -func (sw *Switch) String() string { - // We represent each block by the String() of its - // first Instruction, e.g. "print(42:int)". - var buf bytes.Buffer - if sw.ConstCases != nil { - fmt.Fprintf(&buf, "switch %s {\n", sw.X.Name()) - for _, c := range sw.ConstCases { - fmt.Fprintf(&buf, "case %s: %s\n", c.Value.Name(), c.Body.Instrs[0]) - } - } else { - fmt.Fprintf(&buf, "switch %s.(type) {\n", sw.X.Name()) - for _, c := range sw.TypeCases { - fmt.Fprintf(&buf, "case %s %s: %s\n", - c.Binding.Name(), c.Type, c.Body.Instrs[0]) - } - } - if sw.Default != nil { - fmt.Fprintf(&buf, "default: %s\n", sw.Default.Instrs[0]) - } - fmt.Fprintf(&buf, "}") - return buf.String() -} - -// Switches examines the control-flow graph of fn and returns the -// set of inferred value and type switches. A value switch tests an -// ir.Value for equality against two or more compile-time constant -// values. Switches involving link-time constants (addresses) are -// ignored. A type switch type-asserts an ir.Value against two or -// more types. -// -// The switches are returned in dominance order. -// -// The resulting switches do not necessarily correspond to uses of the -// 'switch' keyword in the source: for example, a single source-level -// switch statement with non-constant cases may result in zero, one or -// many Switches, one per plural sequence of constant cases. -// Switches may even be inferred from if/else- or goto-based control flow. -// (In general, the control flow constructs of the source program -// cannot be faithfully reproduced from the IR.) -// -func Switches(fn *ir.Function) []Switch { - // Traverse the CFG in dominance order, so we don't - // enter an if/else-chain in the middle. - var switches []Switch - seen := make(map[*ir.BasicBlock]bool) // TODO(adonovan): opt: use ir.blockSet - for _, b := range fn.DomPreorder() { - if x, k := isComparisonBlock(b); x != nil { - // Block b starts a switch. - sw := Switch{Start: b, X: x} - valueSwitch(&sw, k, seen) - if len(sw.ConstCases) > 1 { - switches = append(switches, sw) - } - } - - if y, x, T := isTypeAssertBlock(b); y != nil { - // Block b starts a type switch. - sw := Switch{Start: b, X: x} - typeSwitch(&sw, y, T, seen) - if len(sw.TypeCases) > 1 { - switches = append(switches, sw) - } - } - } - return switches -} - -func isSameX(x1 ir.Value, x2 ir.Value) bool { - if x1 == x2 { - return true - } - if x2, ok := x2.(*ir.Sigma); ok { - return isSameX(x1, x2.X) - } - return false -} - -func valueSwitch(sw *Switch, k *ir.Const, seen map[*ir.BasicBlock]bool) { - b := sw.Start - x := sw.X - for isSameX(sw.X, x) { - if seen[b] { - break - } - seen[b] = true - - sw.ConstCases = append(sw.ConstCases, ConstCase{ - Block: b, - Body: b.Succs[0], - Value: k, - }) - b = b.Succs[1] - n := 0 - for _, instr := range b.Instrs { - switch instr.(type) { - case *ir.If, *ir.BinOp: - n++ - case *ir.Sigma, *ir.Phi, *ir.DebugRef: - default: - n += 1000 - } - } - if n != 2 { - // Block b contains not just 'if x == k' and σ/ϕ nodes, - // so it may have side effects that - // make it unsafe to elide. - break - } - if len(b.Preds) != 1 { - // Block b has multiple predecessors, - // so it cannot be treated as a case. - break - } - x, k = isComparisonBlock(b) - } - sw.Default = b -} - -func typeSwitch(sw *Switch, y ir.Value, T types.Type, seen map[*ir.BasicBlock]bool) { - b := sw.Start - x := sw.X - for isSameX(sw.X, x) { - if seen[b] { - break - } - seen[b] = true - - sw.TypeCases = append(sw.TypeCases, TypeCase{ - Block: b, - Body: b.Succs[0], - Type: T, - Binding: y, - }) - b = b.Succs[1] - n := 0 - for _, instr := range b.Instrs { - switch instr.(type) { - case *ir.TypeAssert, *ir.Extract, *ir.If: - n++ - case *ir.Sigma, *ir.Phi: - default: - n += 1000 - } - } - if n != 4 { - // Block b contains not just - // {TypeAssert; Extract #0; Extract #1; If} - // so it may have side effects that - // make it unsafe to elide. - break - } - if len(b.Preds) != 1 { - // Block b has multiple predecessors, - // so it cannot be treated as a case. - break - } - y, x, T = isTypeAssertBlock(b) - } - sw.Default = b -} - -// isComparisonBlock returns the operands (v, k) if a block ends with -// a comparison v==k, where k is a compile-time constant. -// -func isComparisonBlock(b *ir.BasicBlock) (v ir.Value, k *ir.Const) { - if n := len(b.Instrs); n >= 2 { - if i, ok := b.Instrs[n-1].(*ir.If); ok { - if binop, ok := i.Cond.(*ir.BinOp); ok && binop.Block() == b && binop.Op == token.EQL { - if k, ok := binop.Y.(*ir.Const); ok { - return binop.X, k - } - if k, ok := binop.X.(*ir.Const); ok { - return binop.Y, k - } - } - } - } - return -} - -// isTypeAssertBlock returns the operands (y, x, T) if a block ends with -// a type assertion "if y, ok := x.(T); ok {". -// -func isTypeAssertBlock(b *ir.BasicBlock) (y, x ir.Value, T types.Type) { - if n := len(b.Instrs); n >= 4 { - if i, ok := b.Instrs[n-1].(*ir.If); ok { - if ext1, ok := i.Cond.(*ir.Extract); ok && ext1.Block() == b && ext1.Index == 1 { - if ta, ok := ext1.Tuple.(*ir.TypeAssert); ok && ta.Block() == b { - // hack: relies upon instruction ordering. - if ext0, ok := b.Instrs[n-3].(*ir.Extract); ok { - return ext0, ta.X, ta.AssertedType - } - } - } - } - } - return -} diff --git a/vendor/honnef.co/go/tools/ir/irutil/util.go b/vendor/honnef.co/go/tools/ir/irutil/util.go deleted file mode 100644 index 04b25f5f9be..00000000000 --- a/vendor/honnef.co/go/tools/ir/irutil/util.go +++ /dev/null @@ -1,70 +0,0 @@ -package irutil - -import ( - "honnef.co/go/tools/ir" -) - -func Reachable(from, to *ir.BasicBlock) bool { - if from == to { - return true - } - if from.Dominates(to) { - return true - } - - found := false - Walk(from, func(b *ir.BasicBlock) bool { - if b == to { - found = true - return false - } - return true - }) - return found -} - -func Walk(b *ir.BasicBlock, fn func(*ir.BasicBlock) bool) { - seen := map[*ir.BasicBlock]bool{} - wl := []*ir.BasicBlock{b} - for len(wl) > 0 { - b := wl[len(wl)-1] - wl = wl[:len(wl)-1] - if seen[b] { - continue - } - seen[b] = true - if !fn(b) { - continue - } - wl = append(wl, b.Succs...) - } -} - -func Vararg(x *ir.Slice) ([]ir.Value, bool) { - var out []ir.Value - slice, ok := x.X.(*ir.Alloc) - if !ok { - return nil, false - } - for _, ref := range *slice.Referrers() { - if ref == x { - continue - } - if ref.Block() != x.Block() { - return nil, false - } - idx, ok := ref.(*ir.IndexAddr) - if !ok { - return nil, false - } - if len(*idx.Referrers()) != 1 { - return nil, false - } - store, ok := (*idx.Referrers())[0].(*ir.Store) - if !ok { - return nil, false - } - out = append(out, store.Val) - } - return out, true -} diff --git a/vendor/honnef.co/go/tools/ir/irutil/visit.go b/vendor/honnef.co/go/tools/ir/irutil/visit.go deleted file mode 100644 index 657c9cde747..00000000000 --- a/vendor/honnef.co/go/tools/ir/irutil/visit.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package irutil // import "honnef.co/go/tools/ir/irutil" - -import "honnef.co/go/tools/ir" - -// This file defines utilities for visiting the IR of -// a Program. -// -// TODO(adonovan): test coverage. - -// AllFunctions finds and returns the set of functions potentially -// needed by program prog, as determined by a simple linker-style -// reachability algorithm starting from the members and method-sets of -// each package. The result may include anonymous functions and -// synthetic wrappers. -// -// Precondition: all packages are built. -// -func AllFunctions(prog *ir.Program) map[*ir.Function]bool { - visit := visitor{ - prog: prog, - seen: make(map[*ir.Function]bool), - } - visit.program() - return visit.seen -} - -type visitor struct { - prog *ir.Program - seen map[*ir.Function]bool -} - -func (visit *visitor) program() { - for _, pkg := range visit.prog.AllPackages() { - for _, mem := range pkg.Members { - if fn, ok := mem.(*ir.Function); ok { - visit.function(fn) - } - } - } - for _, T := range visit.prog.RuntimeTypes() { - mset := visit.prog.MethodSets.MethodSet(T) - for i, n := 0, mset.Len(); i < n; i++ { - visit.function(visit.prog.MethodValue(mset.At(i))) - } - } -} - -func (visit *visitor) function(fn *ir.Function) { - if !visit.seen[fn] { - visit.seen[fn] = true - var buf [10]*ir.Value // avoid alloc in common case - for _, b := range fn.Blocks { - for _, instr := range b.Instrs { - for _, op := range instr.Operands(buf[:0]) { - if fn, ok := (*op).(*ir.Function); ok { - visit.function(fn) - } - } - } - } - } -} - -// MainPackages returns the subset of the specified packages -// named "main" that define a main function. -// The result may include synthetic "testmain" packages. -func MainPackages(pkgs []*ir.Package) []*ir.Package { - var mains []*ir.Package - for _, pkg := range pkgs { - if pkg.Pkg.Name() == "main" && pkg.Func("main") != nil { - mains = append(mains, pkg) - } - } - return mains -} diff --git a/vendor/honnef.co/go/tools/ir/lift.go b/vendor/honnef.co/go/tools/ir/lift.go deleted file mode 100644 index 71d5c8cb060..00000000000 --- a/vendor/honnef.co/go/tools/ir/lift.go +++ /dev/null @@ -1,1063 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ir - -// This file defines the lifting pass which tries to "lift" Alloc -// cells (new/local variables) into SSA registers, replacing loads -// with the dominating stored value, eliminating loads and stores, and -// inserting φ- and σ-nodes as needed. - -// Cited papers and resources: -// -// Ron Cytron et al. 1991. Efficiently computing SSA form... -// http://doi.acm.org/10.1145/115372.115320 -// -// Cooper, Harvey, Kennedy. 2001. A Simple, Fast Dominance Algorithm. -// Software Practice and Experience 2001, 4:1-10. -// http://www.hipersoft.rice.edu/grads/publications/dom14.pdf -// -// Daniel Berlin, llvmdev mailing list, 2012. -// http://lists.cs.uiuc.edu/pipermail/llvmdev/2012-January/046638.html -// (Be sure to expand the whole thread.) -// -// C. Scott Ananian. 1997. The static single information form. -// -// Jeremy Singer. 2006. Static program analysis based on virtual register renaming. - -// TODO(adonovan): opt: there are many optimizations worth evaluating, and -// the conventional wisdom for SSA construction is that a simple -// algorithm well engineered often beats those of better asymptotic -// complexity on all but the most egregious inputs. -// -// Danny Berlin suggests that the Cooper et al. algorithm for -// computing the dominance frontier is superior to Cytron et al. -// Furthermore he recommends that rather than computing the DF for the -// whole function then renaming all alloc cells, it may be cheaper to -// compute the DF for each alloc cell separately and throw it away. -// -// Consider exploiting liveness information to avoid creating dead -// φ-nodes which we then immediately remove. -// -// Also see many other "TODO: opt" suggestions in the code. - -import ( - "fmt" - "go/types" - "os" -) - -// If true, show diagnostic information at each step of lifting. -// Very verbose. -const debugLifting = false - -// domFrontier maps each block to the set of blocks in its dominance -// frontier. The outer slice is conceptually a map keyed by -// Block.Index. The inner slice is conceptually a set, possibly -// containing duplicates. -// -// TODO(adonovan): opt: measure impact of dups; consider a packed bit -// representation, e.g. big.Int, and bitwise parallel operations for -// the union step in the Children loop. -// -// domFrontier's methods mutate the slice's elements but not its -// length, so their receivers needn't be pointers. -// -type domFrontier [][]*BasicBlock - -func (df domFrontier) add(u, v *BasicBlock) { - df[u.Index] = append(df[u.Index], v) -} - -// build builds the dominance frontier df for the dominator tree of -// fn, using the algorithm found in A Simple, Fast Dominance -// Algorithm, Figure 5. -// -// TODO(adonovan): opt: consider Berlin approach, computing pruned SSA -// by pruning the entire IDF computation, rather than merely pruning -// the DF -> IDF step. -func (df domFrontier) build(fn *Function) { - for _, b := range fn.Blocks { - if len(b.Preds) >= 2 { - for _, p := range b.Preds { - runner := p - for runner != b.dom.idom { - df.add(runner, b) - runner = runner.dom.idom - } - } - } - } -} - -func buildDomFrontier(fn *Function) domFrontier { - df := make(domFrontier, len(fn.Blocks)) - df.build(fn) - return df -} - -type postDomFrontier [][]*BasicBlock - -func (rdf postDomFrontier) add(u, v *BasicBlock) { - rdf[u.Index] = append(rdf[u.Index], v) -} - -func (rdf postDomFrontier) build(fn *Function) { - for _, b := range fn.Blocks { - if len(b.Succs) >= 2 { - for _, s := range b.Succs { - runner := s - for runner != b.pdom.idom { - rdf.add(runner, b) - runner = runner.pdom.idom - } - } - } - } -} - -func buildPostDomFrontier(fn *Function) postDomFrontier { - rdf := make(postDomFrontier, len(fn.Blocks)) - rdf.build(fn) - return rdf -} - -func removeInstr(refs []Instruction, instr Instruction) []Instruction { - i := 0 - for _, ref := range refs { - if ref == instr { - continue - } - refs[i] = ref - i++ - } - for j := i; j != len(refs); j++ { - refs[j] = nil // aid GC - } - return refs[:i] -} - -func clearInstrs(instrs []Instruction) { - for i := range instrs { - instrs[i] = nil - } -} - -// lift replaces local and new Allocs accessed only with -// load/store by IR registers, inserting φ- and σ-nodes where necessary. -// The result is a program in pruned SSI form. -// -// Preconditions: -// - fn has no dead blocks (blockopt has run). -// - Def/use info (Operands and Referrers) is up-to-date. -// - The dominator tree is up-to-date. -// -func lift(fn *Function) { - // TODO(adonovan): opt: lots of little optimizations may be - // worthwhile here, especially if they cause us to avoid - // buildDomFrontier. For example: - // - // - Alloc never loaded? Eliminate. - // - Alloc never stored? Replace all loads with a zero constant. - // - Alloc stored once? Replace loads with dominating store; - // don't forget that an Alloc is itself an effective store - // of zero. - // - Alloc used only within a single block? - // Use degenerate algorithm avoiding φ-nodes. - // - Consider synergy with scalar replacement of aggregates (SRA). - // e.g. *(&x.f) where x is an Alloc. - // Perhaps we'd get better results if we generated this as x.f - // i.e. Field(x, .f) instead of Load(FieldIndex(x, .f)). - // Unclear. - // - // But we will start with the simplest correct code. - var df domFrontier - var rdf postDomFrontier - var closure *closure - var newPhis newPhiMap - var newSigmas newSigmaMap - - // During this pass we will replace some BasicBlock.Instrs - // (allocs, loads and stores) with nil, keeping a count in - // BasicBlock.gaps. At the end we will reset Instrs to the - // concatenation of all non-dead newPhis and non-nil Instrs - // for the block, reusing the original array if space permits. - - // While we're here, we also eliminate 'rundefers' - // instructions in functions that contain no 'defer' - // instructions. - usesDefer := false - - // Determine which allocs we can lift and number them densely. - // The renaming phase uses this numbering for compact maps. - numAllocs := 0 - for _, b := range fn.Blocks { - b.gaps = 0 - b.rundefers = 0 - for _, instr := range b.Instrs { - switch instr := instr.(type) { - case *Alloc: - if !liftable(instr) { - instr.index = -1 - continue - } - index := -1 - if numAllocs == 0 { - df = buildDomFrontier(fn) - rdf = buildPostDomFrontier(fn) - if len(fn.Blocks) > 2 { - closure = transitiveClosure(fn) - } - newPhis = make(newPhiMap, len(fn.Blocks)) - newSigmas = make(newSigmaMap, len(fn.Blocks)) - - if debugLifting { - title := false - for i, blocks := range df { - if blocks != nil { - if !title { - fmt.Fprintf(os.Stderr, "Dominance frontier of %s:\n", fn) - title = true - } - fmt.Fprintf(os.Stderr, "\t%s: %s\n", fn.Blocks[i], blocks) - } - } - } - } - liftAlloc(closure, df, rdf, instr, newPhis, newSigmas) - index = numAllocs - numAllocs++ - instr.index = index - case *Defer: - usesDefer = true - case *RunDefers: - b.rundefers++ - } - } - } - - if numAllocs > 0 { - // renaming maps an alloc (keyed by index) to its replacement - // value. Initially the renaming contains nil, signifying the - // zero constant of the appropriate type; we construct the - // Const lazily at most once on each path through the domtree. - // TODO(adonovan): opt: cache per-function not per subtree. - renaming := make([]Value, numAllocs) - - // Renaming. - rename(fn.Blocks[0], renaming, newPhis, newSigmas) - - simplifyPhis(newPhis) - - // Eliminate dead φ- and σ-nodes. - markLiveNodes(fn.Blocks, newPhis, newSigmas) - } - - // Prepend remaining live φ-nodes to each block and possibly kill rundefers. - for _, b := range fn.Blocks { - var head []Instruction - if numAllocs > 0 { - nps := newPhis[b.Index] - head = make([]Instruction, 0, len(nps)) - for _, pred := range b.Preds { - nss := newSigmas[pred.Index] - idx := pred.succIndex(b) - for _, newSigma := range nss { - if sigma := newSigma.sigmas[idx]; sigma != nil && sigma.live { - head = append(head, sigma) - - // we didn't populate referrers before, as most - // sigma nodes will be killed - if refs := sigma.X.Referrers(); refs != nil { - *refs = append(*refs, sigma) - } - } else if sigma != nil { - sigma.block = nil - } - } - } - for _, np := range nps { - if np.phi.live { - head = append(head, np.phi) - } else { - for _, edge := range np.phi.Edges { - if refs := edge.Referrers(); refs != nil { - *refs = removeInstr(*refs, np.phi) - } - } - np.phi.block = nil - } - } - } - - rundefersToKill := b.rundefers - if usesDefer { - rundefersToKill = 0 - } - - j := len(head) - if j+b.gaps+rundefersToKill == 0 { - continue // fast path: no new phis or gaps - } - - // We could do straight copies instead of element-wise copies - // when both b.gaps and rundefersToKill are zero. However, - // that seems to only be the case ~1% of the time, which - // doesn't seem worth the extra branch. - - // Remove dead instructions, add phis and sigmas - ns := len(b.Instrs) + j - b.gaps - rundefersToKill - if ns <= cap(b.Instrs) { - // b.Instrs has enough capacity to store all instructions - - // OPT(dh): check cap vs the actually required space; if - // there is a big enough difference, it may be worth - // allocating a new slice, to avoid pinning memory. - dst := b.Instrs[:cap(b.Instrs)] - i := len(dst) - 1 - for n := len(b.Instrs) - 1; n >= 0; n-- { - instr := dst[n] - if instr == nil { - continue - } - if !usesDefer { - if _, ok := instr.(*RunDefers); ok { - continue - } - } - dst[i] = instr - i-- - } - off := i + 1 - len(head) - // aid GC - clearInstrs(dst[:off]) - dst = dst[off:] - copy(dst, head) - b.Instrs = dst - } else { - // not enough space, so allocate a new slice and copy - // over. - dst := make([]Instruction, ns) - copy(dst, head) - - for _, instr := range b.Instrs { - if instr == nil { - continue - } - if !usesDefer { - if _, ok := instr.(*RunDefers); ok { - continue - } - } - dst[j] = instr - j++ - } - b.Instrs = dst - } - } - - // Remove any fn.Locals that were lifted. - j := 0 - for _, l := range fn.Locals { - if l.index < 0 { - fn.Locals[j] = l - j++ - } - } - // Nil out fn.Locals[j:] to aid GC. - for i := j; i < len(fn.Locals); i++ { - fn.Locals[i] = nil - } - fn.Locals = fn.Locals[:j] -} - -func hasDirectReferrer(instr Instruction) bool { - for _, instr := range *instr.Referrers() { - switch instr.(type) { - case *Phi, *Sigma: - // ignore - default: - return true - } - } - return false -} - -func markLiveNodes(blocks []*BasicBlock, newPhis newPhiMap, newSigmas newSigmaMap) { - // Phi and sigma nodes are considered live if a non-phi, non-sigma - // node uses them. Once we find a node that is live, we mark all - // of its operands as used, too. - for _, npList := range newPhis { - for _, np := range npList { - phi := np.phi - if !phi.live && hasDirectReferrer(phi) { - markLivePhi(phi) - } - } - } - for _, npList := range newSigmas { - for _, np := range npList { - for _, sigma := range np.sigmas { - if sigma != nil && !sigma.live && hasDirectReferrer(sigma) { - markLiveSigma(sigma) - } - } - } - } - // Existing φ-nodes due to && and || operators - // are all considered live (see Go issue 19622). - for _, b := range blocks { - for _, phi := range b.phis() { - markLivePhi(phi.(*Phi)) - } - } -} - -func markLivePhi(phi *Phi) { - phi.live = true - for _, rand := range phi.Edges { - switch rand := rand.(type) { - case *Phi: - if !rand.live { - markLivePhi(rand) - } - case *Sigma: - if !rand.live { - markLiveSigma(rand) - } - } - } -} - -func markLiveSigma(sigma *Sigma) { - sigma.live = true - switch rand := sigma.X.(type) { - case *Phi: - if !rand.live { - markLivePhi(rand) - } - case *Sigma: - if !rand.live { - markLiveSigma(rand) - } - } -} - -// simplifyPhis replaces trivial phis with non-phi alternatives. Phi -// nodes where all edges are identical, or consist of only the phi -// itself and one other value, may be replaced with the value. -func simplifyPhis(newPhis newPhiMap) { - // find all phis that are trivial and can be replaced with a - // non-phi value. run until we reach a fixpoint, because replacing - // a phi may make other phis trivial. - for changed := true; changed; { - changed = false - for _, npList := range newPhis { - for _, np := range npList { - if np.phi.live { - // we're reusing 'live' to mean 'dead' in the context of simplifyPhis - continue - } - if r, ok := isUselessPhi(np.phi); ok { - // useless phi, replace its uses with the - // replacement value. the dead phi pass will clean - // up the phi afterwards. - replaceAll(np.phi, r) - np.phi.live = true - changed = true - } - } - } - } - - for _, npList := range newPhis { - for _, np := range npList { - np.phi.live = false - } - } -} - -type BlockSet struct { - idx int - values []bool - count int -} - -func NewBlockSet(size int) *BlockSet { - return &BlockSet{values: make([]bool, size)} -} - -func (s *BlockSet) Set(s2 *BlockSet) { - copy(s.values, s2.values) - s.count = 0 - for _, v := range s.values { - if v { - s.count++ - } - } -} - -func (s *BlockSet) Num() int { - return s.count -} - -func (s *BlockSet) Has(b *BasicBlock) bool { - if b.Index >= len(s.values) { - return false - } - return s.values[b.Index] -} - -// add adds b to the set and returns true if the set changed. -func (s *BlockSet) Add(b *BasicBlock) bool { - if s.values[b.Index] { - return false - } - s.count++ - s.values[b.Index] = true - s.idx = b.Index - - return true -} - -func (s *BlockSet) Clear() { - for j := range s.values { - s.values[j] = false - } - s.count = 0 -} - -// take removes an arbitrary element from a set s and -// returns its index, or returns -1 if empty. -func (s *BlockSet) Take() int { - // [i, end] - for i := s.idx; i < len(s.values); i++ { - if s.values[i] { - s.values[i] = false - s.idx = i - s.count-- - return i - } - } - - // [start, i) - for i := 0; i < s.idx; i++ { - if s.values[i] { - s.values[i] = false - s.idx = i - s.count-- - return i - } - } - - return -1 -} - -type closure struct { - span []uint32 - reachables []interval -} - -type interval uint32 - -const ( - flagMask = 1 << 31 - numBits = 20 - lengthBits = 32 - numBits - 1 - lengthMask = (1<>numBits - } else { - // large interval - i++ - start = uint32(inv & numMask) - end = uint32(r[i]) - } - if idx >= start && idx <= end { - return true - } - } - return false -} - -func (c closure) reachable(id int) []interval { - return c.reachables[c.span[id]:c.span[id+1]] -} - -func (c closure) walk(current *BasicBlock, b *BasicBlock, visited []bool) { - visited[b.Index] = true - for _, succ := range b.Succs { - if visited[succ.Index] { - continue - } - visited[succ.Index] = true - c.walk(current, succ, visited) - } -} - -func transitiveClosure(fn *Function) *closure { - reachable := make([]bool, len(fn.Blocks)) - c := &closure{} - c.span = make([]uint32, len(fn.Blocks)+1) - - addInterval := func(start, end uint32) { - if l := end - start; l <= 1<= 0 { // store of zero to Alloc cell - // Replace dominated loads by the zero value. - renaming[instr.index] = nil - if debugLifting { - fmt.Fprintf(os.Stderr, "\tkill alloc %s\n", instr) - } - // Delete the Alloc. - u.Instrs[i] = nil - u.gaps++ - } - - case *Store: - if alloc, ok := instr.Addr.(*Alloc); ok && alloc.index >= 0 { // store to Alloc cell - // Replace dominated loads by the stored value. - renaming[alloc.index] = instr.Val - if debugLifting { - fmt.Fprintf(os.Stderr, "\tkill store %s; new value: %s\n", - instr, instr.Val.Name()) - } - if refs := instr.Addr.Referrers(); refs != nil { - *refs = removeInstr(*refs, instr) - } - if refs := instr.Val.Referrers(); refs != nil { - *refs = removeInstr(*refs, instr) - } - // Delete the Store. - u.Instrs[i] = nil - u.gaps++ - } - - case *Load: - if alloc, ok := instr.X.(*Alloc); ok && alloc.index >= 0 { // load of Alloc cell - // In theory, we wouldn't be able to replace loads - // directly, because a loaded value could be used in - // different branches, in which case it should be - // replaced with different sigma nodes. But we can't - // simply defer replacement, either, because then - // later stores might incorrectly affect this load. - // - // To avoid doing renaming on _all_ values (instead of - // just loads and stores like we're doing), we make - // sure during code generation that each load is only - // used in one block. For example, in constant switch - // statements, where the tag is only evaluated once, - // we store it in a temporary and load it for each - // comparison, so that we have individual loads to - // replace. - newval := renamed(u.Parent(), renaming, alloc) - if debugLifting { - fmt.Fprintf(os.Stderr, "\tupdate load %s = %s with %s\n", - instr.Name(), instr, newval) - } - replaceAll(instr, newval) - u.Instrs[i] = nil - u.gaps++ - } - - case *DebugRef: - if x, ok := instr.X.(*Alloc); ok && x.index >= 0 { - if instr.IsAddr { - instr.X = renamed(u.Parent(), renaming, x) - instr.IsAddr = false - - // Add DebugRef to instr.X's referrers. - if refs := instr.X.Referrers(); refs != nil { - *refs = append(*refs, instr) - } - } else { - // A source expression denotes the address - // of an Alloc that was optimized away. - instr.X = nil - - // Delete the DebugRef. - u.Instrs[i] = nil - u.gaps++ - } - } - } - } - - // update all outgoing sigma nodes with the dominating store - for _, sigmas := range newSigmas[u.Index] { - for _, sigma := range sigmas.sigmas { - if sigma == nil { - continue - } - sigma.X = renamed(u.Parent(), renaming, sigmas.alloc) - } - } - - // For each φ-node in a CFG successor, rename the edge. - for succi, v := range u.Succs { - phis := newPhis[v.Index] - if len(phis) == 0 { - continue - } - i := v.predIndex(u) - for _, np := range phis { - phi := np.phi - alloc := np.alloc - // if there's a sigma node, use it, else use the dominating value - var newval Value - for _, sigmas := range newSigmas[u.Index] { - if sigmas.alloc == alloc && sigmas.sigmas[succi] != nil { - newval = sigmas.sigmas[succi] - break - } - } - if newval == nil { - newval = renamed(u.Parent(), renaming, alloc) - } - if debugLifting { - fmt.Fprintf(os.Stderr, "\tsetphi %s edge %s -> %s (#%d) (alloc=%s) := %s\n", - phi.Name(), u, v, i, alloc.Name(), newval.Name()) - } - phi.Edges[i] = newval - if prefs := newval.Referrers(); prefs != nil { - *prefs = append(*prefs, phi) - } - } - } - - // Continue depth-first recursion over domtree, pushing a - // fresh copy of the renaming map for each subtree. - r := make([]Value, len(renaming)) - for _, v := range u.dom.children { - // XXX add debugging - copy(r, renaming) - - // on entry to a block, the incoming sigma nodes become the new values for their alloc - if idx := u.succIndex(v); idx != -1 { - for _, sigma := range newSigmas[u.Index] { - if sigma.sigmas[idx] != nil { - r[sigma.alloc.index] = sigma.sigmas[idx] - } - } - } - rename(v, r, newPhis, newSigmas) - } - -} diff --git a/vendor/honnef.co/go/tools/ir/lvalue.go b/vendor/honnef.co/go/tools/ir/lvalue.go deleted file mode 100644 index f676a1f7abe..00000000000 --- a/vendor/honnef.co/go/tools/ir/lvalue.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ir - -// lvalues are the union of addressable expressions and map-index -// expressions. - -import ( - "go/ast" - "go/types" -) - -// An lvalue represents an assignable location that may appear on the -// left-hand side of an assignment. This is a generalization of a -// pointer to permit updates to elements of maps. -// -type lvalue interface { - store(fn *Function, v Value, source ast.Node) // stores v into the location - load(fn *Function, source ast.Node) Value // loads the contents of the location - address(fn *Function) Value // address of the location - typ() types.Type // returns the type of the location -} - -// An address is an lvalue represented by a true pointer. -type address struct { - addr Value - expr ast.Expr // source syntax of the value (not address) [debug mode] -} - -func (a *address) load(fn *Function, source ast.Node) Value { - return emitLoad(fn, a.addr, source) -} - -func (a *address) store(fn *Function, v Value, source ast.Node) { - store := emitStore(fn, a.addr, v, source) - if a.expr != nil { - // store.Val is v, converted for assignability. - emitDebugRef(fn, a.expr, store.Val, false) - } -} - -func (a *address) address(fn *Function) Value { - if a.expr != nil { - emitDebugRef(fn, a.expr, a.addr, true) - } - return a.addr -} - -func (a *address) typ() types.Type { - return deref(a.addr.Type()) -} - -// An element is an lvalue represented by m[k], the location of an -// element of a map. These locations are not addressable -// since pointers cannot be formed from them, but they do support -// load() and store(). -// -type element struct { - m, k Value // map - t types.Type // map element type -} - -func (e *element) load(fn *Function, source ast.Node) Value { - l := &MapLookup{ - X: e.m, - Index: e.k, - } - l.setType(e.t) - return fn.emit(l, source) -} - -func (e *element) store(fn *Function, v Value, source ast.Node) { - up := &MapUpdate{ - Map: e.m, - Key: e.k, - Value: emitConv(fn, v, e.t, source), - } - fn.emit(up, source) -} - -func (e *element) address(fn *Function) Value { - panic("map elements are not addressable") -} - -func (e *element) typ() types.Type { - return e.t -} - -// A blank is a dummy variable whose name is "_". -// It is not reified: loads are illegal and stores are ignored. -// -type blank struct{} - -func (bl blank) load(fn *Function, source ast.Node) Value { - panic("blank.load is illegal") -} - -func (bl blank) store(fn *Function, v Value, source ast.Node) { - s := &BlankStore{ - Val: v, - } - fn.emit(s, source) -} - -func (bl blank) address(fn *Function) Value { - panic("blank var is not addressable") -} - -func (bl blank) typ() types.Type { - // This should be the type of the blank Ident; the typechecker - // doesn't provide this yet, but fortunately, we don't need it - // yet either. - panic("blank.typ is unimplemented") -} diff --git a/vendor/honnef.co/go/tools/ir/methods.go b/vendor/honnef.co/go/tools/ir/methods.go deleted file mode 100644 index 517f448b8c3..00000000000 --- a/vendor/honnef.co/go/tools/ir/methods.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ir - -// This file defines utilities for population of method sets. - -import ( - "fmt" - "go/types" -) - -// MethodValue returns the Function implementing method sel, building -// wrapper methods on demand. It returns nil if sel denotes an -// abstract (interface) method. -// -// Precondition: sel.Kind() == MethodVal. -// -// Thread-safe. -// -// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) -// -func (prog *Program) MethodValue(sel *types.Selection) *Function { - if sel.Kind() != types.MethodVal { - panic(fmt.Sprintf("MethodValue(%s) kind != MethodVal", sel)) - } - T := sel.Recv() - if isInterface(T) { - return nil // abstract method - } - if prog.mode&LogSource != 0 { - defer logStack("MethodValue %s %v", T, sel)() - } - - prog.methodsMu.Lock() - defer prog.methodsMu.Unlock() - - return prog.addMethod(prog.createMethodSet(T), sel) -} - -// LookupMethod returns the implementation of the method of type T -// identified by (pkg, name). It returns nil if the method exists but -// is abstract, and panics if T has no such method. -// -func (prog *Program) LookupMethod(T types.Type, pkg *types.Package, name string) *Function { - sel := prog.MethodSets.MethodSet(T).Lookup(pkg, name) - if sel == nil { - panic(fmt.Sprintf("%s has no method %s", T, types.Id(pkg, name))) - } - return prog.MethodValue(sel) -} - -// methodSet contains the (concrete) methods of a non-interface type. -type methodSet struct { - mapping map[string]*Function // populated lazily - complete bool // mapping contains all methods -} - -// Precondition: !isInterface(T). -// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) -func (prog *Program) createMethodSet(T types.Type) *methodSet { - mset, ok := prog.methodSets.At(T).(*methodSet) - if !ok { - mset = &methodSet{mapping: make(map[string]*Function)} - prog.methodSets.Set(T, mset) - } - return mset -} - -// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) -func (prog *Program) addMethod(mset *methodSet, sel *types.Selection) *Function { - if sel.Kind() == types.MethodExpr { - panic(sel) - } - id := sel.Obj().Id() - fn := mset.mapping[id] - if fn == nil { - obj := sel.Obj().(*types.Func) - - needsPromotion := len(sel.Index()) > 1 - needsIndirection := !isPointer(recvType(obj)) && isPointer(sel.Recv()) - if needsPromotion || needsIndirection { - fn = makeWrapper(prog, sel) - } else { - fn = prog.declaredFunc(obj) - } - if fn.Signature.Recv() == nil { - panic(fn) // missing receiver - } - mset.mapping[id] = fn - } - return fn -} - -// RuntimeTypes returns a new unordered slice containing all -// concrete types in the program for which a complete (non-empty) -// method set is required at run-time. -// -// Thread-safe. -// -// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) -// -func (prog *Program) RuntimeTypes() []types.Type { - prog.methodsMu.Lock() - defer prog.methodsMu.Unlock() - - var res []types.Type - prog.methodSets.Iterate(func(T types.Type, v interface{}) { - if v.(*methodSet).complete { - res = append(res, T) - } - }) - return res -} - -// declaredFunc returns the concrete function/method denoted by obj. -// Panic ensues if there is none. -// -func (prog *Program) declaredFunc(obj *types.Func) *Function { - if v := prog.packageLevelValue(obj); v != nil { - return v.(*Function) - } - panic("no concrete method: " + obj.String()) -} - -// needMethodsOf ensures that runtime type information (including the -// complete method set) is available for the specified type T and all -// its subcomponents. -// -// needMethodsOf must be called for at least every type that is an -// operand of some MakeInterface instruction, and for the type of -// every exported package member. -// -// Precondition: T is not a method signature (*Signature with Recv()!=nil). -// -// Thread-safe. (Called via emitConv from multiple builder goroutines.) -// -// TODO(adonovan): make this faster. It accounts for 20% of SSA build time. -// -// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) -// -func (prog *Program) needMethodsOf(T types.Type) { - prog.methodsMu.Lock() - prog.needMethods(T, false) - prog.methodsMu.Unlock() -} - -// Precondition: T is not a method signature (*Signature with Recv()!=nil). -// Recursive case: skip => don't create methods for T. -// -// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) -// -func (prog *Program) needMethods(T types.Type, skip bool) { - // Each package maintains its own set of types it has visited. - if prevSkip, ok := prog.runtimeTypes.At(T).(bool); ok { - // needMethods(T) was previously called - if !prevSkip || skip { - return // already seen, with same or false 'skip' value - } - } - prog.runtimeTypes.Set(T, skip) - - tmset := prog.MethodSets.MethodSet(T) - - if !skip && !isInterface(T) && tmset.Len() > 0 { - // Create methods of T. - mset := prog.createMethodSet(T) - if !mset.complete { - mset.complete = true - n := tmset.Len() - for i := 0; i < n; i++ { - prog.addMethod(mset, tmset.At(i)) - } - } - } - - // Recursion over signatures of each method. - for i := 0; i < tmset.Len(); i++ { - sig := tmset.At(i).Type().(*types.Signature) - prog.needMethods(sig.Params(), false) - prog.needMethods(sig.Results(), false) - } - - switch t := T.(type) { - case *types.Basic: - // nop - - case *types.Interface: - // nop---handled by recursion over method set. - - case *types.Pointer: - prog.needMethods(t.Elem(), false) - - case *types.Slice: - prog.needMethods(t.Elem(), false) - - case *types.Chan: - prog.needMethods(t.Elem(), false) - - case *types.Map: - prog.needMethods(t.Key(), false) - prog.needMethods(t.Elem(), false) - - case *types.Signature: - if t.Recv() != nil { - panic(fmt.Sprintf("Signature %s has Recv %s", t, t.Recv())) - } - prog.needMethods(t.Params(), false) - prog.needMethods(t.Results(), false) - - case *types.Named: - // A pointer-to-named type can be derived from a named - // type via reflection. It may have methods too. - prog.needMethods(types.NewPointer(T), false) - - // Consider 'type T struct{S}' where S has methods. - // Reflection provides no way to get from T to struct{S}, - // only to S, so the method set of struct{S} is unwanted, - // so set 'skip' flag during recursion. - prog.needMethods(t.Underlying(), true) - - case *types.Array: - prog.needMethods(t.Elem(), false) - - case *types.Struct: - for i, n := 0, t.NumFields(); i < n; i++ { - prog.needMethods(t.Field(i).Type(), false) - } - - case *types.Tuple: - for i, n := 0, t.Len(); i < n; i++ { - prog.needMethods(t.At(i).Type(), false) - } - - default: - panic(T) - } -} diff --git a/vendor/honnef.co/go/tools/ir/mode.go b/vendor/honnef.co/go/tools/ir/mode.go deleted file mode 100644 index da548fdbb29..00000000000 --- a/vendor/honnef.co/go/tools/ir/mode.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ir - -// This file defines the BuilderMode type and its command-line flag. - -import ( - "bytes" - "fmt" -) - -// BuilderMode is a bitmask of options for diagnostics and checking. -// -// *BuilderMode satisfies the flag.Value interface. Example: -// -// var mode = ir.BuilderMode(0) -// func init() { flag.Var(&mode, "build", ir.BuilderModeDoc) } -// -type BuilderMode uint - -const ( - PrintPackages BuilderMode = 1 << iota // Print package inventory to stdout - PrintFunctions // Print function IR code to stdout - PrintSource // Print source code when printing function IR - LogSource // Log source locations as IR builder progresses - SanityCheckFunctions // Perform sanity checking of function bodies - NaiveForm // Build naïve IR form: don't replace local loads/stores with registers - GlobalDebug // Enable debug info for all packages -) - -const BuilderModeDoc = `Options controlling the IR builder. -The value is a sequence of zero or more of these letters: -C perform sanity [C]hecking of the IR form. -D include [D]ebug info for every function. -P print [P]ackage inventory. -F print [F]unction IR code. -A print [A]ST nodes responsible for IR instructions -S log [S]ource locations as IR builder progresses. -N build [N]aive IR form: don't replace local loads/stores with registers. -` - -func (m BuilderMode) String() string { - var buf bytes.Buffer - if m&GlobalDebug != 0 { - buf.WriteByte('D') - } - if m&PrintPackages != 0 { - buf.WriteByte('P') - } - if m&PrintFunctions != 0 { - buf.WriteByte('F') - } - if m&PrintSource != 0 { - buf.WriteByte('A') - } - if m&LogSource != 0 { - buf.WriteByte('S') - } - if m&SanityCheckFunctions != 0 { - buf.WriteByte('C') - } - if m&NaiveForm != 0 { - buf.WriteByte('N') - } - return buf.String() -} - -// Set parses the flag characters in s and updates *m. -func (m *BuilderMode) Set(s string) error { - var mode BuilderMode - for _, c := range s { - switch c { - case 'D': - mode |= GlobalDebug - case 'P': - mode |= PrintPackages - case 'F': - mode |= PrintFunctions - case 'A': - mode |= PrintSource - case 'S': - mode |= LogSource - case 'C': - mode |= SanityCheckFunctions - case 'N': - mode |= NaiveForm - default: - return fmt.Errorf("unknown BuilderMode option: %q", c) - } - } - *m = mode - return nil -} - -// Get returns m. -func (m BuilderMode) Get() interface{} { return m } diff --git a/vendor/honnef.co/go/tools/ir/print.go b/vendor/honnef.co/go/tools/ir/print.go deleted file mode 100644 index c16c08efa65..00000000000 --- a/vendor/honnef.co/go/tools/ir/print.go +++ /dev/null @@ -1,472 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ir - -// This file implements the String() methods for all Value and -// Instruction types. - -import ( - "bytes" - "fmt" - "go/types" - "io" - "reflect" - "sort" - - "golang.org/x/tools/go/types/typeutil" -) - -// relName returns the name of v relative to i. -// In most cases, this is identical to v.Name(), but references to -// Functions (including methods) and Globals use RelString and -// all types are displayed with relType, so that only cross-package -// references are package-qualified. -// -func relName(v Value, i Instruction) string { - if v == nil { - return "" - } - var from *types.Package - if i != nil { - from = i.Parent().pkg() - } - switch v := v.(type) { - case Member: // *Function or *Global - return v.RelString(from) - } - return v.Name() -} - -func relType(t types.Type, from *types.Package) string { - return types.TypeString(t, types.RelativeTo(from)) -} - -func relString(m Member, from *types.Package) string { - // NB: not all globals have an Object (e.g. init$guard), - // so use Package().Object not Object.Package(). - if pkg := m.Package().Pkg; pkg != nil && pkg != from { - return fmt.Sprintf("%s.%s", pkg.Path(), m.Name()) - } - return m.Name() -} - -// Value.String() -// -// This method is provided only for debugging. -// It never appears in disassembly, which uses Value.Name(). - -func (v *Parameter) String() string { - from := v.Parent().pkg() - return fmt.Sprintf("Parameter <%s> {%s}", relType(v.Type(), from), v.name) -} - -func (v *FreeVar) String() string { - from := v.Parent().pkg() - return fmt.Sprintf("FreeVar <%s> %s", relType(v.Type(), from), v.Name()) -} - -func (v *Builtin) String() string { - return fmt.Sprintf("Builtin %s", v.Name()) -} - -// Instruction.String() - -func (v *Alloc) String() string { - from := v.Parent().pkg() - storage := "Stack" - if v.Heap { - storage = "Heap" - } - return fmt.Sprintf("%sAlloc <%s>", storage, relType(v.Type(), from)) -} - -func (v *Sigma) String() string { - from := v.Parent().pkg() - s := fmt.Sprintf("Sigma <%s> [b%d] %s", relType(v.Type(), from), v.From.Index, v.X.Name()) - return s -} - -func (v *Phi) String() string { - var b bytes.Buffer - fmt.Fprintf(&b, "Phi <%s>", v.Type()) - for i, edge := range v.Edges { - b.WriteString(" ") - // Be robust against malformed CFG. - if v.block == nil { - b.WriteString("??") - continue - } - block := -1 - if i < len(v.block.Preds) { - block = v.block.Preds[i].Index - } - fmt.Fprintf(&b, "%d:", block) - edgeVal := "" // be robust - if edge != nil { - edgeVal = relName(edge, v) - } - b.WriteString(edgeVal) - } - return b.String() -} - -func printCall(v *CallCommon, prefix string, instr Instruction) string { - var b bytes.Buffer - if !v.IsInvoke() { - if value, ok := instr.(Value); ok { - fmt.Fprintf(&b, "%s <%s> %s", prefix, relType(value.Type(), instr.Parent().pkg()), relName(v.Value, instr)) - } else { - fmt.Fprintf(&b, "%s %s", prefix, relName(v.Value, instr)) - } - } else { - if value, ok := instr.(Value); ok { - fmt.Fprintf(&b, "%sInvoke <%s> %s.%s", prefix, relType(value.Type(), instr.Parent().pkg()), relName(v.Value, instr), v.Method.Name()) - } else { - fmt.Fprintf(&b, "%sInvoke %s.%s", prefix, relName(v.Value, instr), v.Method.Name()) - } - } - for _, arg := range v.Args { - b.WriteString(" ") - b.WriteString(relName(arg, instr)) - } - return b.String() -} - -func (c *CallCommon) String() string { - return printCall(c, "", nil) -} - -func (v *Call) String() string { - return printCall(&v.Call, "Call", v) -} - -func (v *BinOp) String() string { - return fmt.Sprintf("BinOp <%s> {%s} %s %s", relType(v.Type(), v.Parent().pkg()), v.Op.String(), relName(v.X, v), relName(v.Y, v)) -} - -func (v *UnOp) String() string { - return fmt.Sprintf("UnOp <%s> {%s} %s", relType(v.Type(), v.Parent().pkg()), v.Op.String(), relName(v.X, v)) -} - -func (v *Load) String() string { - return fmt.Sprintf("Load <%s> %s", relType(v.Type(), v.Parent().pkg()), relName(v.X, v)) -} - -func printConv(prefix string, v, x Value) string { - from := v.Parent().pkg() - return fmt.Sprintf("%s <%s> %s", - prefix, - relType(v.Type(), from), - relName(x, v.(Instruction))) -} - -func (v *ChangeType) String() string { return printConv("ChangeType", v, v.X) } -func (v *Convert) String() string { return printConv("Convert", v, v.X) } -func (v *ChangeInterface) String() string { return printConv("ChangeInterface", v, v.X) } -func (v *MakeInterface) String() string { return printConv("MakeInterface", v, v.X) } - -func (v *MakeClosure) String() string { - from := v.Parent().pkg() - var b bytes.Buffer - fmt.Fprintf(&b, "MakeClosure <%s> %s", relType(v.Type(), from), relName(v.Fn, v)) - if v.Bindings != nil { - for _, c := range v.Bindings { - b.WriteString(" ") - b.WriteString(relName(c, v)) - } - } - return b.String() -} - -func (v *MakeSlice) String() string { - from := v.Parent().pkg() - return fmt.Sprintf("MakeSlice <%s> %s %s", - relType(v.Type(), from), - relName(v.Len, v), - relName(v.Cap, v)) -} - -func (v *Slice) String() string { - from := v.Parent().pkg() - return fmt.Sprintf("Slice <%s> %s %s %s %s", - relType(v.Type(), from), relName(v.X, v), relName(v.Low, v), relName(v.High, v), relName(v.Max, v)) -} - -func (v *MakeMap) String() string { - res := "" - if v.Reserve != nil { - res = relName(v.Reserve, v) - } - from := v.Parent().pkg() - return fmt.Sprintf("MakeMap <%s> %s", relType(v.Type(), from), res) -} - -func (v *MakeChan) String() string { - from := v.Parent().pkg() - return fmt.Sprintf("MakeChan <%s> %s", relType(v.Type(), from), relName(v.Size, v)) -} - -func (v *FieldAddr) String() string { - from := v.Parent().pkg() - st := deref(v.X.Type()).Underlying().(*types.Struct) - // Be robust against a bad index. - name := "?" - if 0 <= v.Field && v.Field < st.NumFields() { - name = st.Field(v.Field).Name() - } - return fmt.Sprintf("FieldAddr <%s> [%d] (%s) %s", relType(v.Type(), from), v.Field, name, relName(v.X, v)) -} - -func (v *Field) String() string { - st := v.X.Type().Underlying().(*types.Struct) - // Be robust against a bad index. - name := "?" - if 0 <= v.Field && v.Field < st.NumFields() { - name = st.Field(v.Field).Name() - } - from := v.Parent().pkg() - return fmt.Sprintf("Field <%s> [%d] (%s) %s", relType(v.Type(), from), v.Field, name, relName(v.X, v)) -} - -func (v *IndexAddr) String() string { - from := v.Parent().pkg() - return fmt.Sprintf("IndexAddr <%s> %s %s", relType(v.Type(), from), relName(v.X, v), relName(v.Index, v)) -} - -func (v *Index) String() string { - from := v.Parent().pkg() - return fmt.Sprintf("Index <%s> %s %s", relType(v.Type(), from), relName(v.X, v), relName(v.Index, v)) -} - -func (v *MapLookup) String() string { - from := v.Parent().pkg() - return fmt.Sprintf("MapLookup <%s> %s %s", relType(v.Type(), from), relName(v.X, v), relName(v.Index, v)) -} - -func (v *StringLookup) String() string { - from := v.Parent().pkg() - return fmt.Sprintf("StringLookup <%s> %s %s", relType(v.Type(), from), relName(v.X, v), relName(v.Index, v)) -} - -func (v *Range) String() string { - from := v.Parent().pkg() - return fmt.Sprintf("Range <%s> %s", relType(v.Type(), from), relName(v.X, v)) -} - -func (v *Next) String() string { - from := v.Parent().pkg() - return fmt.Sprintf("Next <%s> %s", relType(v.Type(), from), relName(v.Iter, v)) -} - -func (v *TypeAssert) String() string { - from := v.Parent().pkg() - return fmt.Sprintf("TypeAssert <%s> %s", relType(v.Type(), from), relName(v.X, v)) -} - -func (v *Extract) String() string { - from := v.Parent().pkg() - name := v.Tuple.Type().(*types.Tuple).At(v.Index).Name() - return fmt.Sprintf("Extract <%s> [%d] (%s) %s", relType(v.Type(), from), v.Index, name, relName(v.Tuple, v)) -} - -func (s *Jump) String() string { - // Be robust against malformed CFG. - block := -1 - if s.block != nil && len(s.block.Succs) == 1 { - block = s.block.Succs[0].Index - } - str := fmt.Sprintf("Jump → b%d", block) - if s.Comment != "" { - str = fmt.Sprintf("%s # %s", str, s.Comment) - } - return str -} - -func (s *Unreachable) String() string { - // Be robust against malformed CFG. - block := -1 - if s.block != nil && len(s.block.Succs) == 1 { - block = s.block.Succs[0].Index - } - return fmt.Sprintf("Unreachable → b%d", block) -} - -func (s *If) String() string { - // Be robust against malformed CFG. - tblock, fblock := -1, -1 - if s.block != nil && len(s.block.Succs) == 2 { - tblock = s.block.Succs[0].Index - fblock = s.block.Succs[1].Index - } - return fmt.Sprintf("If %s → b%d b%d", relName(s.Cond, s), tblock, fblock) -} - -func (s *ConstantSwitch) String() string { - var b bytes.Buffer - fmt.Fprintf(&b, "ConstantSwitch %s", relName(s.Tag, s)) - for _, cond := range s.Conds { - fmt.Fprintf(&b, " %s", relName(cond, s)) - } - fmt.Fprint(&b, " →") - for _, succ := range s.block.Succs { - fmt.Fprintf(&b, " b%d", succ.Index) - } - return b.String() -} - -func (s *TypeSwitch) String() string { - from := s.Parent().pkg() - var b bytes.Buffer - fmt.Fprintf(&b, "TypeSwitch <%s> %s", relType(s.typ, from), relName(s.Tag, s)) - for _, cond := range s.Conds { - fmt.Fprintf(&b, " %q", relType(cond, s.block.parent.pkg())) - } - return b.String() -} - -func (s *Go) String() string { - return printCall(&s.Call, "Go", s) -} - -func (s *Panic) String() string { - // Be robust against malformed CFG. - block := -1 - if s.block != nil && len(s.block.Succs) == 1 { - block = s.block.Succs[0].Index - } - return fmt.Sprintf("Panic %s → b%d", relName(s.X, s), block) -} - -func (s *Return) String() string { - var b bytes.Buffer - b.WriteString("Return") - for _, r := range s.Results { - b.WriteString(" ") - b.WriteString(relName(r, s)) - } - return b.String() -} - -func (*RunDefers) String() string { - return "RunDefers" -} - -func (s *Send) String() string { - return fmt.Sprintf("Send %s %s", relName(s.Chan, s), relName(s.X, s)) -} - -func (recv *Recv) String() string { - from := recv.Parent().pkg() - return fmt.Sprintf("Recv <%s> %s", relType(recv.Type(), from), relName(recv.Chan, recv)) -} - -func (s *Defer) String() string { - return printCall(&s.Call, "Defer", s) -} - -func (s *Select) String() string { - var b bytes.Buffer - for i, st := range s.States { - if i > 0 { - b.WriteString(", ") - } - if st.Dir == types.RecvOnly { - b.WriteString("<-") - b.WriteString(relName(st.Chan, s)) - } else { - b.WriteString(relName(st.Chan, s)) - b.WriteString("<-") - b.WriteString(relName(st.Send, s)) - } - } - non := "" - if !s.Blocking { - non = "Non" - } - from := s.Parent().pkg() - return fmt.Sprintf("Select%sBlocking <%s> [%s]", non, relType(s.Type(), from), b.String()) -} - -func (s *Store) String() string { - return fmt.Sprintf("Store {%s} %s %s", - s.Val.Type(), relName(s.Addr, s), relName(s.Val, s)) -} - -func (s *BlankStore) String() string { - return fmt.Sprintf("BlankStore %s", relName(s.Val, s)) -} - -func (s *MapUpdate) String() string { - return fmt.Sprintf("MapUpdate %s %s %s", relName(s.Map, s), relName(s.Key, s), relName(s.Value, s)) -} - -func (s *DebugRef) String() string { - p := s.Parent().Prog.Fset.Position(s.Pos()) - var descr interface{} - if s.object != nil { - descr = s.object // e.g. "var x int" - } else { - descr = reflect.TypeOf(s.Expr) // e.g. "*ast.CallExpr" - } - var addr string - if s.IsAddr { - addr = "address of " - } - return fmt.Sprintf("; %s%s @ %d:%d is %s", addr, descr, p.Line, p.Column, s.X.Name()) -} - -func (p *Package) String() string { - return "package " + p.Pkg.Path() -} - -var _ io.WriterTo = (*Package)(nil) // *Package implements io.Writer - -func (p *Package) WriteTo(w io.Writer) (int64, error) { - var buf bytes.Buffer - WritePackage(&buf, p) - n, err := w.Write(buf.Bytes()) - return int64(n), err -} - -// WritePackage writes to buf a human-readable summary of p. -func WritePackage(buf *bytes.Buffer, p *Package) { - fmt.Fprintf(buf, "%s:\n", p) - - var names []string - maxname := 0 - for name := range p.Members { - if l := len(name); l > maxname { - maxname = l - } - names = append(names, name) - } - - from := p.Pkg - sort.Strings(names) - for _, name := range names { - switch mem := p.Members[name].(type) { - case *NamedConst: - fmt.Fprintf(buf, " const %-*s %s = %s\n", - maxname, name, mem.Name(), mem.Value.RelString(from)) - - case *Function: - fmt.Fprintf(buf, " func %-*s %s\n", - maxname, name, relType(mem.Type(), from)) - - case *Type: - fmt.Fprintf(buf, " type %-*s %s\n", - maxname, name, relType(mem.Type().Underlying(), from)) - for _, meth := range typeutil.IntuitiveMethodSet(mem.Type(), &p.Prog.MethodSets) { - fmt.Fprintf(buf, " %s\n", types.SelectionString(meth, types.RelativeTo(from))) - } - - case *Global: - fmt.Fprintf(buf, " var %-*s %s\n", - maxname, name, relType(mem.Type().(*types.Pointer).Elem(), from)) - } - } - - fmt.Fprintf(buf, "\n") -} diff --git a/vendor/honnef.co/go/tools/ir/sanity.go b/vendor/honnef.co/go/tools/ir/sanity.go deleted file mode 100644 index ff9edbc6463..00000000000 --- a/vendor/honnef.co/go/tools/ir/sanity.go +++ /dev/null @@ -1,555 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ir - -// An optional pass for sanity-checking invariants of the IR representation. -// Currently it checks CFG invariants but little at the instruction level. - -import ( - "fmt" - "go/types" - "io" - "os" - "strings" -) - -type sanity struct { - reporter io.Writer - fn *Function - block *BasicBlock - instrs map[Instruction]struct{} - insane bool -} - -// sanityCheck performs integrity checking of the IR representation -// of the function fn and returns true if it was valid. Diagnostics -// are written to reporter if non-nil, os.Stderr otherwise. Some -// diagnostics are only warnings and do not imply a negative result. -// -// Sanity-checking is intended to facilitate the debugging of code -// transformation passes. -// -func sanityCheck(fn *Function, reporter io.Writer) bool { - if reporter == nil { - reporter = os.Stderr - } - return (&sanity{reporter: reporter}).checkFunction(fn) -} - -// mustSanityCheck is like sanityCheck but panics instead of returning -// a negative result. -// -func mustSanityCheck(fn *Function, reporter io.Writer) { - if !sanityCheck(fn, reporter) { - fn.WriteTo(os.Stderr) - panic("SanityCheck failed") - } -} - -func (s *sanity) diagnostic(prefix, format string, args ...interface{}) { - fmt.Fprintf(s.reporter, "%s: function %s", prefix, s.fn) - if s.block != nil { - fmt.Fprintf(s.reporter, ", block %s", s.block) - } - io.WriteString(s.reporter, ": ") - fmt.Fprintf(s.reporter, format, args...) - io.WriteString(s.reporter, "\n") -} - -func (s *sanity) errorf(format string, args ...interface{}) { - s.insane = true - s.diagnostic("Error", format, args...) -} - -func (s *sanity) warnf(format string, args ...interface{}) { - s.diagnostic("Warning", format, args...) -} - -// findDuplicate returns an arbitrary basic block that appeared more -// than once in blocks, or nil if all were unique. -func findDuplicate(blocks []*BasicBlock) *BasicBlock { - if len(blocks) < 2 { - return nil - } - if blocks[0] == blocks[1] { - return blocks[0] - } - // Slow path: - m := make(map[*BasicBlock]bool) - for _, b := range blocks { - if m[b] { - return b - } - m[b] = true - } - return nil -} - -func (s *sanity) checkInstr(idx int, instr Instruction) { - switch instr := instr.(type) { - case *If, *Jump, *Return, *Panic, *Unreachable, *ConstantSwitch: - s.errorf("control flow instruction not at end of block") - case *Sigma: - if idx > 0 { - prev := s.block.Instrs[idx-1] - if _, ok := prev.(*Sigma); !ok { - s.errorf("Sigma instruction follows a non-Sigma: %T", prev) - } - } - case *Phi: - if idx == 0 { - // It suffices to apply this check to just the first phi node. - if dup := findDuplicate(s.block.Preds); dup != nil { - s.errorf("phi node in block with duplicate predecessor %s", dup) - } - } else { - prev := s.block.Instrs[idx-1] - switch prev.(type) { - case *Phi, *Sigma: - default: - s.errorf("Phi instruction follows a non-Phi, non-Sigma: %T", prev) - } - } - if ne, np := len(instr.Edges), len(s.block.Preds); ne != np { - s.errorf("phi node has %d edges but %d predecessors", ne, np) - - } else { - for i, e := range instr.Edges { - if e == nil { - s.errorf("phi node '%v' has no value for edge #%d from %s", instr, i, s.block.Preds[i]) - } - } - } - - case *Alloc: - if !instr.Heap { - found := false - for _, l := range s.fn.Locals { - if l == instr { - found = true - break - } - } - if !found { - s.errorf("local alloc %s = %s does not appear in Function.Locals", instr.Name(), instr) - } - } - - case *BinOp: - case *Call: - case *ChangeInterface: - case *ChangeType: - case *Convert: - if _, ok := instr.X.Type().Underlying().(*types.Basic); !ok { - if _, ok := instr.Type().Underlying().(*types.Basic); !ok { - s.errorf("convert %s -> %s: at least one type must be basic", instr.X.Type(), instr.Type()) - } - } - - case *Defer: - case *Extract: - case *Field: - case *FieldAddr: - case *Go: - case *Index: - case *IndexAddr: - case *MapLookup: - case *StringLookup: - case *MakeChan: - case *MakeClosure: - numFree := len(instr.Fn.(*Function).FreeVars) - numBind := len(instr.Bindings) - if numFree != numBind { - s.errorf("MakeClosure has %d Bindings for function %s with %d free vars", - numBind, instr.Fn, numFree) - - } - if recv := instr.Type().(*types.Signature).Recv(); recv != nil { - s.errorf("MakeClosure's type includes receiver %s", recv.Type()) - } - - case *MakeInterface: - case *MakeMap: - case *MakeSlice: - case *MapUpdate: - case *Next: - case *Range: - case *RunDefers: - case *Select: - case *Send: - case *Slice: - case *Store: - case *TypeAssert: - case *UnOp: - case *DebugRef: - case *BlankStore: - case *Load: - case *Parameter: - case *Const: - case *Recv: - case *TypeSwitch: - default: - panic(fmt.Sprintf("Unknown instruction type: %T", instr)) - } - - if call, ok := instr.(CallInstruction); ok { - if call.Common().Signature() == nil { - s.errorf("nil signature: %s", call) - } - } - - // Check that value-defining instructions have valid types - // and a valid referrer list. - if v, ok := instr.(Value); ok { - t := v.Type() - if t == nil { - s.errorf("no type: %s = %s", v.Name(), v) - } else if t == tRangeIter { - // not a proper type; ignore. - } else if b, ok := t.Underlying().(*types.Basic); ok && b.Info()&types.IsUntyped != 0 { - if _, ok := v.(*Const); !ok { - s.errorf("instruction has 'untyped' result: %s = %s : %s", v.Name(), v, t) - } - } - s.checkReferrerList(v) - } - - // Untyped constants are legal as instruction Operands(), - // for example: - // _ = "foo"[0] - // or: - // if wordsize==64 {...} - - // All other non-Instruction Values can be found via their - // enclosing Function or Package. -} - -func (s *sanity) checkFinalInstr(instr Instruction) { - switch instr := instr.(type) { - case *If: - if nsuccs := len(s.block.Succs); nsuccs != 2 { - s.errorf("If-terminated block has %d successors; expected 2", nsuccs) - return - } - if s.block.Succs[0] == s.block.Succs[1] { - s.errorf("If-instruction has same True, False target blocks: %s", s.block.Succs[0]) - return - } - - case *Jump: - if nsuccs := len(s.block.Succs); nsuccs != 1 { - s.errorf("Jump-terminated block has %d successors; expected 1", nsuccs) - return - } - - case *Return: - if nsuccs := len(s.block.Succs); nsuccs != 0 { - s.errorf("Return-terminated block has %d successors; expected none", nsuccs) - return - } - if na, nf := len(instr.Results), s.fn.Signature.Results().Len(); nf != na { - s.errorf("%d-ary return in %d-ary function", na, nf) - } - - case *Panic: - if nsuccs := len(s.block.Succs); nsuccs != 1 { - s.errorf("Panic-terminated block has %d successors; expected one", nsuccs) - return - } - - case *Unreachable: - if nsuccs := len(s.block.Succs); nsuccs != 1 { - s.errorf("Unreachable-terminated block has %d successors; expected one", nsuccs) - return - } - - case *ConstantSwitch: - - default: - s.errorf("non-control flow instruction at end of block") - } -} - -func (s *sanity) checkBlock(b *BasicBlock, index int) { - s.block = b - - if b.Index != index { - s.errorf("block has incorrect Index %d", b.Index) - } - if b.parent != s.fn { - s.errorf("block has incorrect parent %s", b.parent) - } - - // Check all blocks are reachable. - // (The entry block is always implicitly reachable, the exit block may be unreachable.) - if index > 1 && len(b.Preds) == 0 { - s.warnf("unreachable block") - if b.Instrs == nil { - // Since this block is about to be pruned, - // tolerating transient problems in it - // simplifies other optimizations. - return - } - } - - // Check predecessor and successor relations are dual, - // and that all blocks in CFG belong to same function. - for _, a := range b.Preds { - found := false - for _, bb := range a.Succs { - if bb == b { - found = true - break - } - } - if !found { - s.errorf("expected successor edge in predecessor %s; found only: %s", a, a.Succs) - } - if a.parent != s.fn { - s.errorf("predecessor %s belongs to different function %s", a, a.parent) - } - } - for _, c := range b.Succs { - found := false - for _, bb := range c.Preds { - if bb == b { - found = true - break - } - } - if !found { - s.errorf("expected predecessor edge in successor %s; found only: %s", c, c.Preds) - } - if c.parent != s.fn { - s.errorf("successor %s belongs to different function %s", c, c.parent) - } - } - - // Check each instruction is sane. - n := len(b.Instrs) - if n == 0 { - s.errorf("basic block contains no instructions") - } - var rands [10]*Value // reuse storage - for j, instr := range b.Instrs { - if instr == nil { - s.errorf("nil instruction at index %d", j) - continue - } - if b2 := instr.Block(); b2 == nil { - s.errorf("nil Block() for instruction at index %d", j) - continue - } else if b2 != b { - s.errorf("wrong Block() (%s) for instruction at index %d ", b2, j) - continue - } - if j < n-1 { - s.checkInstr(j, instr) - } else { - s.checkFinalInstr(instr) - } - - // Check Instruction.Operands. - operands: - for i, op := range instr.Operands(rands[:0]) { - if op == nil { - s.errorf("nil operand pointer %d of %s", i, instr) - continue - } - val := *op - if val == nil { - continue // a nil operand is ok - } - - // Check that "untyped" types only appear on constant operands. - if _, ok := (*op).(*Const); !ok { - if basic, ok := (*op).Type().(*types.Basic); ok { - if basic.Info()&types.IsUntyped != 0 { - s.errorf("operand #%d of %s is untyped: %s", i, instr, basic) - } - } - } - - // Check that Operands that are also Instructions belong to same function. - // TODO(adonovan): also check their block dominates block b. - if val, ok := val.(Instruction); ok { - if val.Block() == nil { - s.errorf("operand %d of %s is an instruction (%s) that belongs to no block", i, instr, val) - } else if val.Parent() != s.fn { - s.errorf("operand %d of %s is an instruction (%s) from function %s", i, instr, val, val.Parent()) - } - } - - // Check that each function-local operand of - // instr refers back to instr. (NB: quadratic) - switch val := val.(type) { - case *Const, *Global, *Builtin: - continue // not local - case *Function: - if val.parent == nil { - continue // only anon functions are local - } - } - - // TODO(adonovan): check val.Parent() != nil <=> val.Referrers() is defined. - - if refs := val.Referrers(); refs != nil { - for _, ref := range *refs { - if ref == instr { - continue operands - } - } - s.errorf("operand %d of %s (%s) does not refer to us", i, instr, val) - } else { - s.errorf("operand %d of %s (%s) has no referrers", i, instr, val) - } - } - } -} - -func (s *sanity) checkReferrerList(v Value) { - refs := v.Referrers() - if refs == nil { - s.errorf("%s has missing referrer list", v.Name()) - return - } - for i, ref := range *refs { - if _, ok := s.instrs[ref]; !ok { - if val, ok := ref.(Value); ok { - s.errorf("%s.Referrers()[%d] = %s = %s is not an instruction belonging to this function", v.Name(), i, val.Name(), val) - } else { - s.errorf("%s.Referrers()[%d] = %s is not an instruction belonging to this function", v.Name(), i, ref) - } - } - } -} - -func (s *sanity) checkFunction(fn *Function) bool { - // TODO(adonovan): check Function invariants: - // - check params match signature - // - check transient fields are nil - // - warn if any fn.Locals do not appear among block instructions. - s.fn = fn - if fn.Prog == nil { - s.errorf("nil Prog") - } - - _ = fn.String() // must not crash - _ = fn.RelString(fn.pkg()) // must not crash - - // All functions have a package, except delegates (which are - // shared across packages, or duplicated as weak symbols in a - // separate-compilation model), and error.Error. - if fn.Pkg == nil { - if strings.HasPrefix(fn.Synthetic, "wrapper ") || - strings.HasPrefix(fn.Synthetic, "bound ") || - strings.HasPrefix(fn.Synthetic, "thunk ") || - strings.HasSuffix(fn.name, "Error") { - // ok - } else { - s.errorf("nil Pkg") - } - } - if src, syn := fn.Synthetic == "", fn.source != nil; src != syn { - s.errorf("got fromSource=%t, hasSyntax=%t; want same values", src, syn) - } - for i, l := range fn.Locals { - if l.Parent() != fn { - s.errorf("Local %s at index %d has wrong parent", l.Name(), i) - } - if l.Heap { - s.errorf("Local %s at index %d has Heap flag set", l.Name(), i) - } - } - // Build the set of valid referrers. - s.instrs = make(map[Instruction]struct{}) - for _, b := range fn.Blocks { - for _, instr := range b.Instrs { - s.instrs[instr] = struct{}{} - } - } - for i, p := range fn.Params { - if p.Parent() != fn { - s.errorf("Param %s at index %d has wrong parent", p.Name(), i) - } - // Check common suffix of Signature and Params match type. - if sig := fn.Signature; sig != nil { - j := i - len(fn.Params) + sig.Params().Len() // index within sig.Params - if j < 0 { - continue - } - if !types.Identical(p.Type(), sig.Params().At(j).Type()) { - s.errorf("Param %s at index %d has wrong type (%s, versus %s in Signature)", p.Name(), i, p.Type(), sig.Params().At(j).Type()) - - } - } - - s.checkReferrerList(p) - } - for i, fv := range fn.FreeVars { - if fv.Parent() != fn { - s.errorf("FreeVar %s at index %d has wrong parent", fv.Name(), i) - } - s.checkReferrerList(fv) - } - - if fn.Blocks != nil && len(fn.Blocks) == 0 { - // Function _had_ blocks (so it's not external) but - // they were "optimized" away, even the entry block. - s.errorf("Blocks slice is non-nil but empty") - } - for i, b := range fn.Blocks { - if b == nil { - s.warnf("nil *BasicBlock at f.Blocks[%d]", i) - continue - } - s.checkBlock(b, i) - } - - s.block = nil - for i, anon := range fn.AnonFuncs { - if anon.Parent() != fn { - s.errorf("AnonFuncs[%d]=%s but %s.Parent()=%s", i, anon, anon, anon.Parent()) - } - } - s.fn = nil - return !s.insane -} - -// sanityCheckPackage checks invariants of packages upon creation. -// It does not require that the package is built. -// Unlike sanityCheck (for functions), it just panics at the first error. -func sanityCheckPackage(pkg *Package) { - if pkg.Pkg == nil { - panic(fmt.Sprintf("Package %s has no Object", pkg)) - } - _ = pkg.String() // must not crash - - for name, mem := range pkg.Members { - if name != mem.Name() { - panic(fmt.Sprintf("%s: %T.Name() = %s, want %s", - pkg.Pkg.Path(), mem, mem.Name(), name)) - } - obj := mem.Object() - if obj == nil { - // This check is sound because fields - // {Global,Function}.object have type - // types.Object. (If they were declared as - // *types.{Var,Func}, we'd have a non-empty - // interface containing a nil pointer.) - - continue // not all members have typechecker objects - } - if obj.Name() != name { - if obj.Name() == "init" && strings.HasPrefix(mem.Name(), "init#") { - // Ok. The name of a declared init function varies between - // its types.Func ("init") and its ir.Function ("init#%d"). - } else { - panic(fmt.Sprintf("%s: %T.Object().Name() = %s, want %s", - pkg.Pkg.Path(), mem, obj.Name(), name)) - } - } - } -} diff --git a/vendor/honnef.co/go/tools/ir/source.go b/vendor/honnef.co/go/tools/ir/source.go deleted file mode 100644 index 93d1ccbd290..00000000000 --- a/vendor/honnef.co/go/tools/ir/source.go +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ir - -// This file defines utilities for working with source positions -// or source-level named entities ("objects"). - -// TODO(adonovan): test that {Value,Instruction}.Pos() positions match -// the originating syntax, as specified. - -import ( - "go/ast" - "go/token" - "go/types" -) - -// EnclosingFunction returns the function that contains the syntax -// node denoted by path. -// -// Syntax associated with package-level variable specifications is -// enclosed by the package's init() function. -// -// Returns nil if not found; reasons might include: -// - the node is not enclosed by any function. -// - the node is within an anonymous function (FuncLit) and -// its IR function has not been created yet -// (pkg.Build() has not yet been called). -// -func EnclosingFunction(pkg *Package, path []ast.Node) *Function { - // Start with package-level function... - fn := findEnclosingPackageLevelFunction(pkg, path) - if fn == nil { - return nil // not in any function - } - - // ...then walk down the nested anonymous functions. - n := len(path) -outer: - for i := range path { - if lit, ok := path[n-1-i].(*ast.FuncLit); ok { - for _, anon := range fn.AnonFuncs { - if anon.Pos() == lit.Type.Func { - fn = anon - continue outer - } - } - // IR function not found: - // - package not yet built, or maybe - // - builder skipped FuncLit in dead block - // (in principle; but currently the Builder - // generates even dead FuncLits). - return nil - } - } - return fn -} - -// HasEnclosingFunction returns true if the AST node denoted by path -// is contained within the declaration of some function or -// package-level variable. -// -// Unlike EnclosingFunction, the behaviour of this function does not -// depend on whether IR code for pkg has been built, so it can be -// used to quickly reject check inputs that will cause -// EnclosingFunction to fail, prior to IR building. -// -func HasEnclosingFunction(pkg *Package, path []ast.Node) bool { - return findEnclosingPackageLevelFunction(pkg, path) != nil -} - -// findEnclosingPackageLevelFunction returns the Function -// corresponding to the package-level function enclosing path. -// -func findEnclosingPackageLevelFunction(pkg *Package, path []ast.Node) *Function { - if n := len(path); n >= 2 { // [... {Gen,Func}Decl File] - switch decl := path[n-2].(type) { - case *ast.GenDecl: - if decl.Tok == token.VAR && n >= 3 { - // Package-level 'var' initializer. - return pkg.init - } - - case *ast.FuncDecl: - // Declared function/method. - fn := findNamedFunc(pkg, decl.Pos()) - if fn == nil && decl.Recv == nil && decl.Name.Name == "init" { - // Hack: return non-nil when IR is not yet - // built so that HasEnclosingFunction works. - return pkg.init - } - return fn - } - } - return nil // not in any function -} - -// findNamedFunc returns the named function whose FuncDecl.Ident is at -// position pos. -// -func findNamedFunc(pkg *Package, pos token.Pos) *Function { - for _, fn := range pkg.Functions { - if fn.Pos() == pos { - return fn - } - } - return nil -} - -// ValueForExpr returns the IR Value that corresponds to non-constant -// expression e. -// -// It returns nil if no value was found, e.g. -// - the expression is not lexically contained within f; -// - f was not built with debug information; or -// - e is a constant expression. (For efficiency, no debug -// information is stored for constants. Use -// go/types.Info.Types[e].Value instead.) -// - e is a reference to nil or a built-in function. -// - the value was optimised away. -// -// If e is an addressable expression used in an lvalue context, -// value is the address denoted by e, and isAddr is true. -// -// The types of e (or &e, if isAddr) and the result are equal -// (modulo "untyped" bools resulting from comparisons). -// -// (Tip: to find the ir.Value given a source position, use -// astutil.PathEnclosingInterval to locate the ast.Node, then -// EnclosingFunction to locate the Function, then ValueForExpr to find -// the ir.Value.) -// -func (f *Function) ValueForExpr(e ast.Expr) (value Value, isAddr bool) { - if f.debugInfo() { // (opt) - e = unparen(e) - for _, b := range f.Blocks { - for _, instr := range b.Instrs { - if ref, ok := instr.(*DebugRef); ok { - if ref.Expr == e { - return ref.X, ref.IsAddr - } - } - } - } - } - return -} - -// --- Lookup functions for source-level named entities (types.Objects) --- - -// Package returns the IR Package corresponding to the specified -// type-checker package object. -// It returns nil if no such IR package has been created. -// -func (prog *Program) Package(obj *types.Package) *Package { - return prog.packages[obj] -} - -// packageLevelValue returns the package-level value corresponding to -// the specified named object, which may be a package-level const -// (*Const), var (*Global) or func (*Function) of some package in -// prog. It returns nil if the object is not found. -// -func (prog *Program) packageLevelValue(obj types.Object) Value { - if pkg, ok := prog.packages[obj.Pkg()]; ok { - return pkg.values[obj] - } - return nil -} - -// FuncValue returns the concrete Function denoted by the source-level -// named function obj, or nil if obj denotes an interface method. -// -// TODO(adonovan): check the invariant that obj.Type() matches the -// result's Signature, both in the params/results and in the receiver. -// -func (prog *Program) FuncValue(obj *types.Func) *Function { - fn, _ := prog.packageLevelValue(obj).(*Function) - return fn -} - -// ConstValue returns the IR Value denoted by the source-level named -// constant obj. -// -func (prog *Program) ConstValue(obj *types.Const) *Const { - // TODO(adonovan): opt: share (don't reallocate) - // Consts for const objects and constant ast.Exprs. - - // Universal constant? {true,false,nil} - if obj.Parent() == types.Universe { - return NewConst(obj.Val(), obj.Type()) - } - // Package-level named constant? - if v := prog.packageLevelValue(obj); v != nil { - return v.(*Const) - } - return NewConst(obj.Val(), obj.Type()) -} - -// VarValue returns the IR Value that corresponds to a specific -// identifier denoting the source-level named variable obj. -// -// VarValue returns nil if a local variable was not found, perhaps -// because its package was not built, the debug information was not -// requested during IR construction, or the value was optimized away. -// -// ref is the path to an ast.Ident (e.g. from PathEnclosingInterval), -// and that ident must resolve to obj. -// -// pkg is the package enclosing the reference. (A reference to a var -// always occurs within a function, so we need to know where to find it.) -// -// If the identifier is a field selector and its base expression is -// non-addressable, then VarValue returns the value of that field. -// For example: -// func f() struct {x int} -// f().x // VarValue(x) returns a *Field instruction of type int -// -// All other identifiers denote addressable locations (variables). -// For them, VarValue may return either the variable's address or its -// value, even when the expression is evaluated only for its value; the -// situation is reported by isAddr, the second component of the result. -// -// If !isAddr, the returned value is the one associated with the -// specific identifier. For example, -// var x int // VarValue(x) returns Const 0 here -// x = 1 // VarValue(x) returns Const 1 here -// -// It is not specified whether the value or the address is returned in -// any particular case, as it may depend upon optimizations performed -// during IR code generation, such as registerization, constant -// folding, avoidance of materialization of subexpressions, etc. -// -func (prog *Program) VarValue(obj *types.Var, pkg *Package, ref []ast.Node) (value Value, isAddr bool) { - // All references to a var are local to some function, possibly init. - fn := EnclosingFunction(pkg, ref) - if fn == nil { - return // e.g. def of struct field; IR not built? - } - - id := ref[0].(*ast.Ident) - - // Defining ident of a parameter? - if id.Pos() == obj.Pos() { - for _, param := range fn.Params { - if param.Object() == obj { - return param, false - } - } - } - - // Other ident? - for _, b := range fn.Blocks { - for _, instr := range b.Instrs { - if dr, ok := instr.(*DebugRef); ok { - if dr.Pos() == id.Pos() { - return dr.X, dr.IsAddr - } - } - } - } - - // Defining ident of package-level var? - if v := prog.packageLevelValue(obj); v != nil { - return v.(*Global), true - } - - return // e.g. debug info not requested, or var optimized away -} diff --git a/vendor/honnef.co/go/tools/ir/ssa.go b/vendor/honnef.co/go/tools/ir/ssa.go deleted file mode 100644 index 49693045f0b..00000000000 --- a/vendor/honnef.co/go/tools/ir/ssa.go +++ /dev/null @@ -1,1856 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ir - -// This package defines a high-level intermediate representation for -// Go programs using static single-information (SSI) form. - -import ( - "fmt" - "go/ast" - "go/constant" - "go/token" - "go/types" - "sync" - - "golang.org/x/tools/go/types/typeutil" -) - -type ID int - -// A Program is a partial or complete Go program converted to IR form. -type Program struct { - Fset *token.FileSet // position information for the files of this Program - PrintFunc string // create ir.html for function specified in PrintFunc - imported map[string]*Package // all importable Packages, keyed by import path - packages map[*types.Package]*Package // all loaded Packages, keyed by object - mode BuilderMode // set of mode bits for IR construction - MethodSets typeutil.MethodSetCache // cache of type-checker's method-sets - - methodsMu sync.Mutex // guards the following maps: - methodSets typeutil.Map // maps type to its concrete methodSet - runtimeTypes typeutil.Map // types for which rtypes are needed - canon typeutil.Map // type canonicalization map - bounds map[*types.Func]*Function // bounds for curried x.Method closures - thunks map[selectionKey]*Function // thunks for T.Method expressions -} - -// A Package is a single analyzed Go package containing Members for -// all package-level functions, variables, constants and types it -// declares. These may be accessed directly via Members, or via the -// type-specific accessor methods Func, Type, Var and Const. -// -// Members also contains entries for "init" (the synthetic package -// initializer) and "init#%d", the nth declared init function, -// and unspecified other things too. -// -type Package struct { - Prog *Program // the owning program - Pkg *types.Package // the corresponding go/types.Package - Members map[string]Member // all package members keyed by name (incl. init and init#%d) - Functions []*Function // all functions, excluding anonymous ones - values map[types.Object]Value // package members (incl. types and methods), keyed by object - init *Function // Func("init"); the package's init function - debug bool // include full debug info in this package - printFunc string // which function to print in HTML form - - // The following fields are set transiently, then cleared - // after building. - buildOnce sync.Once // ensures package building occurs once - ninit int32 // number of init functions - info *types.Info // package type information - files []*ast.File // package ASTs -} - -// A Member is a member of a Go package, implemented by *NamedConst, -// *Global, *Function, or *Type; they are created by package-level -// const, var, func and type declarations respectively. -// -type Member interface { - Name() string // declared name of the package member - String() string // package-qualified name of the package member - RelString(*types.Package) string // like String, but relative refs are unqualified - Object() types.Object // typechecker's object for this member, if any - Type() types.Type // type of the package member - Token() token.Token // token.{VAR,FUNC,CONST,TYPE} - Package() *Package // the containing package -} - -// A Type is a Member of a Package representing a package-level named type. -type Type struct { - object *types.TypeName - pkg *Package -} - -// A NamedConst is a Member of a Package representing a package-level -// named constant. -// -// Pos() returns the position of the declaring ast.ValueSpec.Names[*] -// identifier. -// -// NB: a NamedConst is not a Value; it contains a constant Value, which -// it augments with the name and position of its 'const' declaration. -// -type NamedConst struct { - object *types.Const - Value *Const - pkg *Package -} - -// A Value is an IR value that can be referenced by an instruction. -type Value interface { - setID(ID) - - // Name returns the name of this value, and determines how - // this Value appears when used as an operand of an - // Instruction. - // - // This is the same as the source name for Parameters, - // Builtins, Functions, FreeVars, Globals. - // For constants, it is a representation of the constant's value - // and type. For all other Values this is the name of the - // virtual register defined by the instruction. - // - // The name of an IR Value is not semantically significant, - // and may not even be unique within a function. - Name() string - - // ID returns the ID of this value. IDs are unique within a single - // function and are densely numbered, but may contain gaps. - // Values and other Instructions share the same ID space. - // Globally, values are identified by their addresses. However, - // IDs exist to facilitate efficient storage of mappings between - // values and data when analysing functions. - // - // NB: IDs are allocated late in the IR construction process and - // are not available to early stages of said process. - ID() ID - - // If this value is an Instruction, String returns its - // disassembled form; otherwise it returns unspecified - // human-readable information about the Value, such as its - // kind, name and type. - String() string - - // Type returns the type of this value. Many instructions - // (e.g. IndexAddr) change their behaviour depending on the - // types of their operands. - Type() types.Type - - // Parent returns the function to which this Value belongs. - // It returns nil for named Functions, Builtin and Global. - Parent() *Function - - // Referrers returns the list of instructions that have this - // value as one of their operands; it may contain duplicates - // if an instruction has a repeated operand. - // - // Referrers actually returns a pointer through which the - // caller may perform mutations to the object's state. - // - // Referrers is currently only defined if Parent()!=nil, - // i.e. for the function-local values FreeVar, Parameter, - // Functions (iff anonymous) and all value-defining instructions. - // It returns nil for named Functions, Builtin and Global. - // - // Instruction.Operands contains the inverse of this relation. - Referrers() *[]Instruction - - Operands(rands []*Value) []*Value // nil for non-Instructions - - // Source returns the AST node responsible for creating this - // value. A single AST node may be responsible for more than one - // value, and not all values have an associated AST node. - // - // Do not use this method to find a Value given an ast.Expr; use - // ValueForExpr instead. - Source() ast.Node - - // Pos returns Source().Pos() if Source is not nil, else it - // returns token.NoPos. - Pos() token.Pos -} - -// An Instruction is an IR instruction that computes a new Value or -// has some effect. -// -// An Instruction that defines a value (e.g. BinOp) also implements -// the Value interface; an Instruction that only has an effect (e.g. Store) -// does not. -// -type Instruction interface { - setSource(ast.Node) - setID(ID) - - // String returns the disassembled form of this value. - // - // Examples of Instructions that are Values: - // "BinOp {+} t1 t2" (BinOp) - // "Call len t1" (Call) - // Note that the name of the Value is not printed. - // - // Examples of Instructions that are not Values: - // "Return t1" (Return) - // "Store {int} t2 t1" (Store) - // - // (The separation of Value.Name() from Value.String() is useful - // for some analyses which distinguish the operation from the - // value it defines, e.g., 'y = local int' is both an allocation - // of memory 'local int' and a definition of a pointer y.) - String() string - - // ID returns the ID of this instruction. IDs are unique within a single - // function and are densely numbered, but may contain gaps. - // Globally, instructions are identified by their addresses. However, - // IDs exist to facilitate efficient storage of mappings between - // instructions and data when analysing functions. - // - // NB: IDs are allocated late in the IR construction process and - // are not available to early stages of said process. - ID() ID - - // Parent returns the function to which this instruction - // belongs. - Parent() *Function - - // Block returns the basic block to which this instruction - // belongs. - Block() *BasicBlock - - // setBlock sets the basic block to which this instruction belongs. - setBlock(*BasicBlock) - - // Operands returns the operands of this instruction: the - // set of Values it references. - // - // Specifically, it appends their addresses to rands, a - // user-provided slice, and returns the resulting slice, - // permitting avoidance of memory allocation. - // - // The operands are appended in undefined order, but the order - // is consistent for a given Instruction; the addresses are - // always non-nil but may point to a nil Value. Clients may - // store through the pointers, e.g. to effect a value - // renaming. - // - // Value.Referrers is a subset of the inverse of this - // relation. (Referrers are not tracked for all types of - // Values.) - Operands(rands []*Value) []*Value - - Referrers() *[]Instruction // nil for non-Values - - // Source returns the AST node responsible for creating this - // instruction. A single AST node may be responsible for more than - // one instruction, and not all instructions have an associated - // AST node. - Source() ast.Node - - // Pos returns Source().Pos() if Source is not nil, else it - // returns token.NoPos. - Pos() token.Pos -} - -// A Node is a node in the IR value graph. Every concrete type that -// implements Node is also either a Value, an Instruction, or both. -// -// Node contains the methods common to Value and Instruction, plus the -// Operands and Referrers methods generalized to return nil for -// non-Instructions and non-Values, respectively. -// -// Node is provided to simplify IR graph algorithms. Clients should -// use the more specific and informative Value or Instruction -// interfaces where appropriate. -// -type Node interface { - setID(ID) - - // Common methods: - ID() ID - String() string - Source() ast.Node - Pos() token.Pos - Parent() *Function - - // Partial methods: - Operands(rands []*Value) []*Value // nil for non-Instructions - Referrers() *[]Instruction // nil for non-Values -} - -// Function represents the parameters, results, and code of a function -// or method. -// -// If Blocks is nil, this indicates an external function for which no -// Go source code is available. In this case, FreeVars and Locals -// are nil too. Clients performing whole-program analysis must -// handle external functions specially. -// -// Blocks contains the function's control-flow graph (CFG). -// Blocks[0] is the function entry point; block order is not otherwise -// semantically significant, though it may affect the readability of -// the disassembly. -// To iterate over the blocks in dominance order, use DomPreorder(). -// -// A nested function (Parent()!=nil) that refers to one or more -// lexically enclosing local variables ("free variables") has FreeVars. -// Such functions cannot be called directly but require a -// value created by MakeClosure which, via its Bindings, supplies -// values for these parameters. -// -// If the function is a method (Signature.Recv() != nil) then the first -// element of Params is the receiver parameter. -// -// A Go package may declare many functions called "init". -// For each one, Object().Name() returns "init" but Name() returns -// "init#1", etc, in declaration order. -// -// Pos() returns the declaring ast.FuncLit.Type.Func or the position -// of the ast.FuncDecl.Name, if the function was explicit in the -// source. Synthetic wrappers, for which Synthetic != "", may share -// the same position as the function they wrap. -// Syntax.Pos() always returns the position of the declaring "func" token. -// -// Type() returns the function's Signature. -// -type Function struct { - node - - name string - object types.Object // a declared *types.Func or one of its wrappers - method *types.Selection // info about provenance of synthetic methods - Signature *types.Signature - - Synthetic string // provenance of synthetic function; "" for true source functions - parent *Function // enclosing function if anon; nil if global - Pkg *Package // enclosing package; nil for shared funcs (wrappers and error.Error) - Prog *Program // enclosing program - Params []*Parameter // function parameters; for methods, includes receiver - FreeVars []*FreeVar // free variables whose values must be supplied by closure - Locals []*Alloc // local variables of this function - Blocks []*BasicBlock // basic blocks of the function; nil => external - Exit *BasicBlock // The function's exit block - AnonFuncs []*Function // anonymous functions directly beneath this one - referrers []Instruction // referring instructions (iff Parent() != nil) - WillExit bool // Calling this function will always terminate the process - WillUnwind bool // Calling this function will always unwind (it will call runtime.Goexit or panic) - - *functionBody -} - -type functionBody struct { - // The following fields are set transiently during building, - // then cleared. - currentBlock *BasicBlock // where to emit code - objects map[types.Object]Value // addresses of local variables - namedResults []*Alloc // tuple of named results - implicitResults []*Alloc // tuple of results - targets *targets // linked stack of branch targets - lblocks map[*ast.Object]*lblock // labelled blocks - consts []*Const - wr *HTMLWriter - fakeExits BlockSet - blocksets [5]BlockSet - hasDefer bool -} - -func (fn *Function) results() []*Alloc { - if len(fn.namedResults) > 0 { - return fn.namedResults - } - return fn.implicitResults -} - -// BasicBlock represents an IR basic block. -// -// The final element of Instrs is always an explicit transfer of -// control (If, Jump, Return, Panic, or Unreachable). -// -// A block may contain no Instructions only if it is unreachable, -// i.e., Preds is nil. Empty blocks are typically pruned. -// -// BasicBlocks and their Preds/Succs relation form a (possibly cyclic) -// graph independent of the IR Value graph: the control-flow graph or -// CFG. It is illegal for multiple edges to exist between the same -// pair of blocks. -// -// Each BasicBlock is also a node in the dominator tree of the CFG. -// The tree may be navigated using Idom()/Dominees() and queried using -// Dominates(). -// -// The order of Preds and Succs is significant (to Phi and If -// instructions, respectively). -// -type BasicBlock struct { - Index int // index of this block within Parent().Blocks - Comment string // optional label; no semantic significance - parent *Function // parent function - Instrs []Instruction // instructions in order - Preds, Succs []*BasicBlock // predecessors and successors - succs2 [2]*BasicBlock // initial space for Succs - dom domInfo // dominator tree info - pdom domInfo // post-dominator tree info - post int - gaps int // number of nil Instrs (transient) - rundefers int // number of rundefers (transient) -} - -// Pure values ---------------------------------------- - -// A FreeVar represents a free variable of the function to which it -// belongs. -// -// FreeVars are used to implement anonymous functions, whose free -// variables are lexically captured in a closure formed by -// MakeClosure. The value of such a free var is an Alloc or another -// FreeVar and is considered a potentially escaping heap address, with -// pointer type. -// -// FreeVars are also used to implement bound method closures. Such a -// free var represents the receiver value and may be of any type that -// has concrete methods. -// -// Pos() returns the position of the value that was captured, which -// belongs to an enclosing function. -// -type FreeVar struct { - node - - name string - typ types.Type - parent *Function - referrers []Instruction - - // Transiently needed during building. - outer Value // the Value captured from the enclosing context. -} - -// A Parameter represents an input parameter of a function. -// -type Parameter struct { - register - - name string - object types.Object // a *types.Var; nil for non-source locals -} - -// A Const represents the value of a constant expression. -// -// The underlying type of a constant may be any boolean, numeric, or -// string type. In addition, a Const may represent the nil value of -// any reference type---interface, map, channel, pointer, slice, or -// function---but not "untyped nil". -// -// All source-level constant expressions are represented by a Const -// of the same type and value. -// -// Value holds the exact value of the constant, independent of its -// Type(), using the same representation as package go/constant uses for -// constants, or nil for a typed nil value. -// -// Pos() returns token.NoPos. -// -// Example printed form: -// Const {42} -// Const {"test"} -// Const {(3 + 4i)} -// -type Const struct { - register - - Value constant.Value -} - -// A Global is a named Value holding the address of a package-level -// variable. -// -// Pos() returns the position of the ast.ValueSpec.Names[*] -// identifier. -// -type Global struct { - node - - name string - object types.Object // a *types.Var; may be nil for synthetics e.g. init$guard - typ types.Type - - Pkg *Package -} - -// A Builtin represents a specific use of a built-in function, e.g. len. -// -// Builtins are immutable values. Builtins do not have addresses. -// Builtins can only appear in CallCommon.Func. -// -// Name() indicates the function: one of the built-in functions from the -// Go spec (excluding "make" and "new") or one of these ir-defined -// intrinsics: -// -// // wrapnilchk returns ptr if non-nil, panics otherwise. -// // (For use in indirection wrappers.) -// func ir:wrapnilchk(ptr *T, recvType, methodName string) *T -// -// Object() returns a *types.Builtin for built-ins defined by the spec, -// nil for others. -// -// Type() returns a *types.Signature representing the effective -// signature of the built-in for this call. -// -type Builtin struct { - node - - name string - sig *types.Signature -} - -// Value-defining instructions ---------------------------------------- - -// The Alloc instruction reserves space for a variable of the given type, -// zero-initializes it, and yields its address. -// -// Alloc values are always addresses, and have pointer types, so the -// type of the allocated variable is actually -// Type().Underlying().(*types.Pointer).Elem(). -// -// If Heap is false, Alloc allocates space in the function's -// activation record (frame); we refer to an Alloc(Heap=false) as a -// "stack" alloc. Each stack Alloc returns the same address each time -// it is executed within the same activation; the space is -// re-initialized to zero. -// -// If Heap is true, Alloc allocates space in the heap; we -// refer to an Alloc(Heap=true) as a "heap" alloc. Each heap Alloc -// returns a different address each time it is executed. -// -// When Alloc is applied to a channel, map or slice type, it returns -// the address of an uninitialized (nil) reference of that kind; store -// the result of MakeSlice, MakeMap or MakeChan in that location to -// instantiate these types. -// -// Pos() returns the ast.CompositeLit.Lbrace for a composite literal, -// or the ast.CallExpr.Rparen for a call to new() or for a call that -// allocates a varargs slice. -// -// Example printed form: -// t1 = StackAlloc <*int> -// t2 = HeapAlloc <*int> (new) -// -type Alloc struct { - register - Heap bool - index int // dense numbering; for lifting -} - -var _ Instruction = (*Sigma)(nil) -var _ Value = (*Sigma)(nil) - -// The Sigma instruction represents an SSI σ-node, which splits values -// at branches in the control flow. -// -// Conceptually, σ-nodes exist at the end of blocks that branch and -// constitute parallel assignments to one value per destination block. -// However, such a representation would be awkward to work with, so -// instead we place σ-nodes at the beginning of branch targets. The -// From field denotes to which incoming edge the node applies. -// -// Within a block, all σ-nodes must appear before all non-σ nodes. -// -// Example printed form: -// t2 = Sigma [#0] t1 (x) -// -type Sigma struct { - register - From *BasicBlock - X Value - - live bool // used during lifting -} - -// The Phi instruction represents an SSA φ-node, which combines values -// that differ across incoming control-flow edges and yields a new -// value. Within a block, all φ-nodes must appear before all non-φ, non-σ -// nodes. -// -// Pos() returns the position of the && or || for short-circuit -// control-flow joins, or that of the *Alloc for φ-nodes inserted -// during SSA renaming. -// -// Example printed form: -// t3 = Phi 2:t1 4:t2 (x) -// -type Phi struct { - register - Edges []Value // Edges[i] is value for Block().Preds[i] - - live bool // used during lifting -} - -// The Call instruction represents a function or method call. -// -// The Call instruction yields the function result if there is exactly -// one. Otherwise it returns a tuple, the components of which are -// accessed via Extract. -// -// See CallCommon for generic function call documentation. -// -// Pos() returns the ast.CallExpr.Lparen, if explicit in the source. -// -// Example printed form: -// t3 = Call <()> println t1 t2 -// t4 = Call <()> foo$1 -// t6 = Invoke t5.String -// -type Call struct { - register - Call CallCommon -} - -// The BinOp instruction yields the result of binary operation X Op Y. -// -// Pos() returns the ast.BinaryExpr.OpPos, if explicit in the source. -// -// Example printed form: -// t3 = BinOp {+} t2 t1 -// -type BinOp struct { - register - // One of: - // ADD SUB MUL QUO REM + - * / % - // AND OR XOR SHL SHR AND_NOT & | ^ << >> &^ - // EQL NEQ LSS LEQ GTR GEQ == != < <= < >= - Op token.Token - X, Y Value -} - -// The UnOp instruction yields the result of Op X. -// XOR is bitwise complement. -// SUB is negation. -// NOT is logical negation. -// -// -// Example printed form: -// t2 = UnOp {^} t1 -// -type UnOp struct { - register - Op token.Token // One of: NOT SUB XOR ! - ^ - X Value -} - -// The Load instruction loads a value from a memory address. -// -// For implicit memory loads, Pos() returns the position of the -// most closely associated source-level construct; the details are not -// specified. -// -// Example printed form: -// t2 = Load t1 -// -type Load struct { - register - X Value -} - -// The ChangeType instruction applies to X a value-preserving type -// change to Type(). -// -// Type changes are permitted: -// - between a named type and its underlying type. -// - between two named types of the same underlying type. -// - between (possibly named) pointers to identical base types. -// - from a bidirectional channel to a read- or write-channel, -// optionally adding/removing a name. -// -// This operation cannot fail dynamically. -// -// Pos() returns the ast.CallExpr.Lparen, if the instruction arose -// from an explicit conversion in the source. -// -// Example printed form: -// t2 = ChangeType <*T> t1 -// -type ChangeType struct { - register - X Value -} - -// The Convert instruction yields the conversion of value X to type -// Type(). One or both of those types is basic (but possibly named). -// -// A conversion may change the value and representation of its operand. -// Conversions are permitted: -// - between real numeric types. -// - between complex numeric types. -// - between string and []byte or []rune. -// - between pointers and unsafe.Pointer. -// - between unsafe.Pointer and uintptr. -// - from (Unicode) integer to (UTF-8) string. -// A conversion may imply a type name change also. -// -// This operation cannot fail dynamically. -// -// Conversions of untyped string/number/bool constants to a specific -// representation are eliminated during IR construction. -// -// Pos() returns the ast.CallExpr.Lparen, if the instruction arose -// from an explicit conversion in the source. -// -// Example printed form: -// t2 = Convert <[]byte> t1 -// -type Convert struct { - register - X Value -} - -// ChangeInterface constructs a value of one interface type from a -// value of another interface type known to be assignable to it. -// This operation cannot fail. -// -// Pos() returns the ast.CallExpr.Lparen if the instruction arose from -// an explicit T(e) conversion; the ast.TypeAssertExpr.Lparen if the -// instruction arose from an explicit e.(T) operation; or token.NoPos -// otherwise. -// -// Example printed form: -// t2 = ChangeInterface t1 -// -type ChangeInterface struct { - register - X Value -} - -// MakeInterface constructs an instance of an interface type from a -// value of a concrete type. -// -// Use Program.MethodSets.MethodSet(X.Type()) to find the method-set -// of X, and Program.MethodValue(m) to find the implementation of a method. -// -// To construct the zero value of an interface type T, use: -// NewConst(constant.MakeNil(), T, pos) -// -// Pos() returns the ast.CallExpr.Lparen, if the instruction arose -// from an explicit conversion in the source. -// -// Example printed form: -// t2 = MakeInterface t1 -// -type MakeInterface struct { - register - X Value -} - -// The MakeClosure instruction yields a closure value whose code is -// Fn and whose free variables' values are supplied by Bindings. -// -// Type() returns a (possibly named) *types.Signature. -// -// Pos() returns the ast.FuncLit.Type.Func for a function literal -// closure or the ast.SelectorExpr.Sel for a bound method closure. -// -// Example printed form: -// t1 = MakeClosure foo$1 t1 t2 -// t5 = MakeClosure (T).foo$bound t4 -// -type MakeClosure struct { - register - Fn Value // always a *Function - Bindings []Value // values for each free variable in Fn.FreeVars -} - -// The MakeMap instruction creates a new hash-table-based map object -// and yields a value of kind map. -// -// Type() returns a (possibly named) *types.Map. -// -// Pos() returns the ast.CallExpr.Lparen, if created by make(map), or -// the ast.CompositeLit.Lbrack if created by a literal. -// -// Example printed form: -// t1 = MakeMap -// t2 = MakeMap t1 -// -type MakeMap struct { - register - Reserve Value // initial space reservation; nil => default -} - -// The MakeChan instruction creates a new channel object and yields a -// value of kind chan. -// -// Type() returns a (possibly named) *types.Chan. -// -// Pos() returns the ast.CallExpr.Lparen for the make(chan) that -// created it. -// -// Example printed form: -// t3 = MakeChan t1 -// t4 = MakeChan t2 -// -type MakeChan struct { - register - Size Value // int; size of buffer; zero => synchronous. -} - -// The MakeSlice instruction yields a slice of length Len backed by a -// newly allocated array of length Cap. -// -// Both Len and Cap must be non-nil Values of integer type. -// -// (Alloc(types.Array) followed by Slice will not suffice because -// Alloc can only create arrays of constant length.) -// -// Type() returns a (possibly named) *types.Slice. -// -// Pos() returns the ast.CallExpr.Lparen for the make([]T) that -// created it. -// -// Example printed form: -// t3 = MakeSlice <[]string> t1 t2 -// t4 = MakeSlice t1 t2 -// -type MakeSlice struct { - register - Len Value - Cap Value -} - -// The Slice instruction yields a slice of an existing string, slice -// or *array X between optional integer bounds Low and High. -// -// Dynamically, this instruction panics if X evaluates to a nil *array -// pointer. -// -// Type() returns string if the type of X was string, otherwise a -// *types.Slice with the same element type as X. -// -// Pos() returns the ast.SliceExpr.Lbrack if created by a x[:] slice -// operation, the ast.CompositeLit.Lbrace if created by a literal, or -// NoPos if not explicit in the source (e.g. a variadic argument slice). -// -// Example printed form: -// t4 = Slice <[]int> t3 t2 t1 -// -type Slice struct { - register - X Value // slice, string, or *array - Low, High, Max Value // each may be nil -} - -// The FieldAddr instruction yields the address of Field of *struct X. -// -// The field is identified by its index within the field list of the -// struct type of X. -// -// Dynamically, this instruction panics if X evaluates to a nil -// pointer. -// -// Type() returns a (possibly named) *types.Pointer. -// -// Pos() returns the position of the ast.SelectorExpr.Sel for the -// field, if explicit in the source. -// -// Example printed form: -// t2 = FieldAddr <*int> [0] (X) t1 -// -type FieldAddr struct { - register - X Value // *struct - Field int // field is X.Type().Underlying().(*types.Pointer).Elem().Underlying().(*types.Struct).Field(Field) -} - -// The Field instruction yields the Field of struct X. -// -// The field is identified by its index within the field list of the -// struct type of X; by using numeric indices we avoid ambiguity of -// package-local identifiers and permit compact representations. -// -// Pos() returns the position of the ast.SelectorExpr.Sel for the -// field, if explicit in the source. -// -// Example printed form: -// t2 = FieldAddr [0] (X) t1 -// -type Field struct { - register - X Value // struct - Field int // index into X.Type().(*types.Struct).Fields -} - -// The IndexAddr instruction yields the address of the element at -// index Index of collection X. Index is an integer expression. -// -// The elements of maps and strings are not addressable; use StringLookup, MapLookup or -// MapUpdate instead. -// -// Dynamically, this instruction panics if X evaluates to a nil *array -// pointer. -// -// Type() returns a (possibly named) *types.Pointer. -// -// Pos() returns the ast.IndexExpr.Lbrack for the index operation, if -// explicit in the source. -// -// Example printed form: -// t3 = IndexAddr <*int> t2 t1 -// -type IndexAddr struct { - register - X Value // slice or *array, - Index Value // numeric index -} - -// The Index instruction yields element Index of array X. -// -// Pos() returns the ast.IndexExpr.Lbrack for the index operation, if -// explicit in the source. -// -// Example printed form: -// t3 = Index t2 t1 -// -type Index struct { - register - X Value // array - Index Value // integer index -} - -// The MapLookup instruction yields element Index of collection X, a map. -// -// If CommaOk, the result is a 2-tuple of the value above and a -// boolean indicating the result of a map membership test for the key. -// The components of the tuple are accessed using Extract. -// -// Pos() returns the ast.IndexExpr.Lbrack, if explicit in the source. -// -// Example printed form: -// t4 = MapLookup t3 t1 -// t6 = MapLookup <(string, bool)> t3 t2 -// -type MapLookup struct { - register - X Value // map - Index Value // key-typed index - CommaOk bool // return a value,ok pair -} - -// The StringLookup instruction yields element Index of collection X, a string. -// Index is an integer expression. -// -// Pos() returns the ast.IndexExpr.Lbrack, if explicit in the source. -// -// Example printed form: -// t3 = StringLookup t2 t1 -// -type StringLookup struct { - register - X Value // string - Index Value // numeric index -} - -// SelectState is a helper for Select. -// It represents one goal state and its corresponding communication. -// -type SelectState struct { - Dir types.ChanDir // direction of case (SendOnly or RecvOnly) - Chan Value // channel to use (for send or receive) - Send Value // value to send (for send) - Pos token.Pos // position of token.ARROW - DebugNode ast.Node // ast.SendStmt or ast.UnaryExpr(<-) [debug mode] -} - -// The Select instruction tests whether (or blocks until) one -// of the specified sent or received states is entered. -// -// Let n be the number of States for which Dir==RECV and Tᵢ (0 ≤ i < n) -// be the element type of each such state's Chan. -// Select returns an n+2-tuple -// (index int, recvOk bool, r₀ T₀, ... rₙ-1 Tₙ-1) -// The tuple's components, described below, must be accessed via the -// Extract instruction. -// -// If Blocking, select waits until exactly one state holds, i.e. a -// channel becomes ready for the designated operation of sending or -// receiving; select chooses one among the ready states -// pseudorandomly, performs the send or receive operation, and sets -// 'index' to the index of the chosen channel. -// -// If !Blocking, select doesn't block if no states hold; instead it -// returns immediately with index equal to -1. -// -// If the chosen channel was used for a receive, the rᵢ component is -// set to the received value, where i is the index of that state among -// all n receive states; otherwise rᵢ has the zero value of type Tᵢ. -// Note that the receive index i is not the same as the state -// index index. -// -// The second component of the triple, recvOk, is a boolean whose value -// is true iff the selected operation was a receive and the receive -// successfully yielded a value. -// -// Pos() returns the ast.SelectStmt.Select. -// -// Example printed form: -// t6 = SelectNonBlocking <(index int, ok bool, int)> [<-t4, t5<-t1] -// t11 = SelectBlocking <(index int, ok bool)> [] -// -type Select struct { - register - States []*SelectState - Blocking bool -} - -// The Range instruction yields an iterator over the domain and range -// of X, which must be a string or map. -// -// Elements are accessed via Next. -// -// Type() returns an opaque and degenerate "rangeIter" type. -// -// Pos() returns the ast.RangeStmt.For. -// -// Example printed form: -// t2 = Range t1 -// -type Range struct { - register - X Value // string or map -} - -// The Next instruction reads and advances the (map or string) -// iterator Iter and returns a 3-tuple value (ok, k, v). If the -// iterator is not exhausted, ok is true and k and v are the next -// elements of the domain and range, respectively. Otherwise ok is -// false and k and v are undefined. -// -// Components of the tuple are accessed using Extract. -// -// The IsString field distinguishes iterators over strings from those -// over maps, as the Type() alone is insufficient: consider -// map[int]rune. -// -// Type() returns a *types.Tuple for the triple (ok, k, v). -// The types of k and/or v may be types.Invalid. -// -// Example printed form: -// t5 = Next <(ok bool, k int, v rune)> t2 -// t5 = Next <(ok bool, k invalid type, v invalid type)> t2 -// -type Next struct { - register - Iter Value - IsString bool // true => string iterator; false => map iterator. -} - -// The TypeAssert instruction tests whether interface value X has type -// AssertedType. -// -// If !CommaOk, on success it returns v, the result of the conversion -// (defined below); on failure it panics. -// -// If CommaOk: on success it returns a pair (v, true) where v is the -// result of the conversion; on failure it returns (z, false) where z -// is AssertedType's zero value. The components of the pair must be -// accessed using the Extract instruction. -// -// If AssertedType is a concrete type, TypeAssert checks whether the -// dynamic type in interface X is equal to it, and if so, the result -// of the conversion is a copy of the value in the interface. -// -// If AssertedType is an interface, TypeAssert checks whether the -// dynamic type of the interface is assignable to it, and if so, the -// result of the conversion is a copy of the interface value X. -// If AssertedType is a superinterface of X.Type(), the operation will -// fail iff the operand is nil. (Contrast with ChangeInterface, which -// performs no nil-check.) -// -// Type() reflects the actual type of the result, possibly a -// 2-types.Tuple; AssertedType is the asserted type. -// -// Pos() returns the ast.CallExpr.Lparen if the instruction arose from -// an explicit T(e) conversion; the ast.TypeAssertExpr.Lparen if the -// instruction arose from an explicit e.(T) operation; or the -// ast.CaseClause.Case if the instruction arose from a case of a -// type-switch statement. -// -// Example printed form: -// t2 = TypeAssert t1 -// t4 = TypeAssert <(value fmt.Stringer, ok bool)> t1 -// -type TypeAssert struct { - register - X Value - AssertedType types.Type - CommaOk bool -} - -// The Extract instruction yields component Index of Tuple. -// -// This is used to access the results of instructions with multiple -// return values, such as Call, TypeAssert, Next, Recv, -// MapLookup and others. -// -// Example printed form: -// t7 = Extract [1] (ok) t4 -// -type Extract struct { - register - Tuple Value - Index int -} - -// Instructions executed for effect. They do not yield a value. -------------------- - -// The Jump instruction transfers control to the sole successor of its -// owning block. -// -// A Jump must be the last instruction of its containing BasicBlock. -// -// Pos() returns NoPos. -// -// Example printed form: -// Jump → b1 -// -type Jump struct { - anInstruction - Comment string -} - -// The Unreachable pseudo-instruction signals that execution cannot -// continue after the preceding function call because it terminates -// the process. -// -// The instruction acts as a control instruction, jumping to the exit -// block. However, this jump will never execute. -// -// An Unreachable instruction must be the last instruction of its -// containing BasicBlock. -// -// Example printed form: -// Unreachable → b1 -// -type Unreachable struct { - anInstruction -} - -// The If instruction transfers control to one of the two successors -// of its owning block, depending on the boolean Cond: the first if -// true, the second if false. -// -// An If instruction must be the last instruction of its containing -// BasicBlock. -// -// Pos() returns the *ast.IfStmt, if explicit in the source. -// -// Example printed form: -// If t2 → b1 b2 -// -type If struct { - anInstruction - Cond Value -} - -type ConstantSwitch struct { - anInstruction - Tag Value - // Constant branch conditions. A nil Value denotes the (implicit - // or explicit) default branch. - Conds []Value -} - -type TypeSwitch struct { - register - Tag Value - Conds []types.Type -} - -// The Return instruction returns values and control back to the calling -// function. -// -// len(Results) is always equal to the number of results in the -// function's signature. -// -// If len(Results) > 1, Return returns a tuple value with the specified -// components which the caller must access using Extract instructions. -// -// There is no instruction to return a ready-made tuple like those -// returned by a "value,ok"-mode TypeAssert, MapLookup or Recv or -// a tail-call to a function with multiple result parameters. -// -// Return must be the last instruction of its containing BasicBlock. -// Such a block has no successors. -// -// Pos() returns the ast.ReturnStmt.Return, if explicit in the source. -// -// Example printed form: -// Return -// Return t1 t2 -// -type Return struct { - anInstruction - Results []Value -} - -// The RunDefers instruction pops and invokes the entire stack of -// procedure calls pushed by Defer instructions in this function. -// -// It is legal to encounter multiple 'rundefers' instructions in a -// single control-flow path through a function; this is useful in -// the combined init() function, for example. -// -// Pos() returns NoPos. -// -// Example printed form: -// RunDefers -// -type RunDefers struct { - anInstruction -} - -// The Panic instruction initiates a panic with value X. -// -// A Panic instruction must be the last instruction of its containing -// BasicBlock, which must have one successor, the exit block. -// -// NB: 'go panic(x)' and 'defer panic(x)' do not use this instruction; -// they are treated as calls to a built-in function. -// -// Pos() returns the ast.CallExpr.Lparen if this panic was explicit -// in the source. -// -// Example printed form: -// Panic t1 -// -type Panic struct { - anInstruction - X Value // an interface{} -} - -// The Go instruction creates a new goroutine and calls the specified -// function within it. -// -// See CallCommon for generic function call documentation. -// -// Pos() returns the ast.GoStmt.Go. -// -// Example printed form: -// Go println t1 -// Go t3 -// GoInvoke t4.Bar t2 -// -type Go struct { - anInstruction - Call CallCommon -} - -// The Defer instruction pushes the specified call onto a stack of -// functions to be called by a RunDefers instruction or by a panic. -// -// See CallCommon for generic function call documentation. -// -// Pos() returns the ast.DeferStmt.Defer. -// -// Example printed form: -// Defer println t1 -// Defer t3 -// DeferInvoke t4.Bar t2 -// -type Defer struct { - anInstruction - Call CallCommon -} - -// The Send instruction sends X on channel Chan. -// -// Pos() returns the ast.SendStmt.Arrow, if explicit in the source. -// -// Example printed form: -// Send t2 t1 -// -type Send struct { - anInstruction - Chan, X Value -} - -// The Recv instruction receives from channel Chan. -// -// If CommaOk, the result is a 2-tuple of the value above -// and a boolean indicating the success of the receive. The -// components of the tuple are accessed using Extract. -// -// Pos() returns the ast.UnaryExpr.OpPos, if explicit in the source. -// For receive operations implicit in ranging over a channel, -// Pos() returns the ast.RangeStmt.For. -// -// Example printed form: -// t2 = Recv t1 -// t3 = Recv <(int, bool)> t1 -type Recv struct { - register - Chan Value - CommaOk bool -} - -// The Store instruction stores Val at address Addr. -// Stores can be of arbitrary types. -// -// Pos() returns the position of the source-level construct most closely -// associated with the memory store operation. -// Since implicit memory stores are numerous and varied and depend upon -// implementation choices, the details are not specified. -// -// Example printed form: -// Store {int} t2 t1 -// -type Store struct { - anInstruction - Addr Value - Val Value -} - -// The BlankStore instruction is emitted for assignments to the blank -// identifier. -// -// BlankStore is a pseudo-instruction: it has no dynamic effect. -// -// Pos() returns NoPos. -// -// Example printed form: -// BlankStore t1 -// -type BlankStore struct { - anInstruction - Val Value -} - -// The MapUpdate instruction updates the association of Map[Key] to -// Value. -// -// Pos() returns the ast.KeyValueExpr.Colon or ast.IndexExpr.Lbrack, -// if explicit in the source. -// -// Example printed form: -// MapUpdate t3 t1 t2 -// -type MapUpdate struct { - anInstruction - Map Value - Key Value - Value Value -} - -// A DebugRef instruction maps a source-level expression Expr to the -// IR value X that represents the value (!IsAddr) or address (IsAddr) -// of that expression. -// -// DebugRef is a pseudo-instruction: it has no dynamic effect. -// -// Pos() returns Expr.Pos(), the start position of the source-level -// expression. This is not the same as the "designated" token as -// documented at Value.Pos(). e.g. CallExpr.Pos() does not return the -// position of the ("designated") Lparen token. -// -// DebugRefs are generated only for functions built with debugging -// enabled; see Package.SetDebugMode() and the GlobalDebug builder -// mode flag. -// -// DebugRefs are not emitted for ast.Idents referring to constants or -// predeclared identifiers, since they are trivial and numerous. -// Nor are they emitted for ast.ParenExprs. -// -// (By representing these as instructions, rather than out-of-band, -// consistency is maintained during transformation passes by the -// ordinary SSA renaming machinery.) -// -// Example printed form: -// ; *ast.CallExpr @ 102:9 is t5 -// ; var x float64 @ 109:72 is x -// ; address of *ast.CompositeLit @ 216:10 is t0 -// -type DebugRef struct { - anInstruction - Expr ast.Expr // the referring expression (never *ast.ParenExpr) - object types.Object // the identity of the source var/func - IsAddr bool // Expr is addressable and X is the address it denotes - X Value // the value or address of Expr -} - -// Embeddable mix-ins and helpers for common parts of other structs. ----------- - -// register is a mix-in embedded by all IR values that are also -// instructions, i.e. virtual registers, and provides a uniform -// implementation of most of the Value interface: Value.Name() is a -// numbered register (e.g. "t0"); the other methods are field accessors. -// -// Temporary names are automatically assigned to each register on -// completion of building a function in IR form. -// -type register struct { - anInstruction - typ types.Type // type of virtual register - referrers []Instruction -} - -type node struct { - source ast.Node - id ID -} - -func (n *node) setID(id ID) { n.id = id } -func (n node) ID() ID { return n.id } - -func (n *node) setSource(source ast.Node) { n.source = source } -func (n *node) Source() ast.Node { return n.source } - -func (n *node) Pos() token.Pos { - if n.source != nil { - return n.source.Pos() - } - return token.NoPos -} - -// anInstruction is a mix-in embedded by all Instructions. -// It provides the implementations of the Block and setBlock methods. -type anInstruction struct { - node - block *BasicBlock // the basic block of this instruction -} - -// CallCommon is contained by Go, Defer and Call to hold the -// common parts of a function or method call. -// -// Each CallCommon exists in one of two modes, function call and -// interface method invocation, or "call" and "invoke" for short. -// -// 1. "call" mode: when Method is nil (!IsInvoke), a CallCommon -// represents an ordinary function call of the value in Value, -// which may be a *Builtin, a *Function or any other value of kind -// 'func'. -// -// Value may be one of: -// (a) a *Function, indicating a statically dispatched call -// to a package-level function, an anonymous function, or -// a method of a named type. -// (b) a *MakeClosure, indicating an immediately applied -// function literal with free variables. -// (c) a *Builtin, indicating a statically dispatched call -// to a built-in function. -// (d) any other value, indicating a dynamically dispatched -// function call. -// StaticCallee returns the identity of the callee in cases -// (a) and (b), nil otherwise. -// -// Args contains the arguments to the call. If Value is a method, -// Args[0] contains the receiver parameter. -// -// Example printed form: -// t3 = Call <()> println t1 t2 -// Go t3 -// Defer t3 -// -// 2. "invoke" mode: when Method is non-nil (IsInvoke), a CallCommon -// represents a dynamically dispatched call to an interface method. -// In this mode, Value is the interface value and Method is the -// interface's abstract method. Note: an abstract method may be -// shared by multiple interfaces due to embedding; Value.Type() -// provides the specific interface used for this call. -// -// Value is implicitly supplied to the concrete method implementation -// as the receiver parameter; in other words, Args[0] holds not the -// receiver but the first true argument. -// -// Example printed form: -// t6 = Invoke t5.String -// GoInvoke t4.Bar t2 -// DeferInvoke t4.Bar t2 -// -// For all calls to variadic functions (Signature().Variadic()), -// the last element of Args is a slice. -// -type CallCommon struct { - Value Value // receiver (invoke mode) or func value (call mode) - Method *types.Func // abstract method (invoke mode) - Args []Value // actual parameters (in static method call, includes receiver) - Results Value -} - -// IsInvoke returns true if this call has "invoke" (not "call") mode. -func (c *CallCommon) IsInvoke() bool { - return c.Method != nil -} - -// Signature returns the signature of the called function. -// -// For an "invoke"-mode call, the signature of the interface method is -// returned. -// -// In either "call" or "invoke" mode, if the callee is a method, its -// receiver is represented by sig.Recv, not sig.Params().At(0). -// -func (c *CallCommon) Signature() *types.Signature { - if c.Method != nil { - return c.Method.Type().(*types.Signature) - } - return c.Value.Type().Underlying().(*types.Signature) -} - -// StaticCallee returns the callee if this is a trivially static -// "call"-mode call to a function. -func (c *CallCommon) StaticCallee() *Function { - switch fn := c.Value.(type) { - case *Function: - return fn - case *MakeClosure: - return fn.Fn.(*Function) - } - return nil -} - -// Description returns a description of the mode of this call suitable -// for a user interface, e.g., "static method call". -func (c *CallCommon) Description() string { - switch fn := c.Value.(type) { - case *Builtin: - return "built-in function call" - case *MakeClosure: - return "static function closure call" - case *Function: - if fn.Signature.Recv() != nil { - return "static method call" - } - return "static function call" - } - if c.IsInvoke() { - return "dynamic method call" // ("invoke" mode) - } - return "dynamic function call" -} - -// The CallInstruction interface, implemented by *Go, *Defer and *Call, -// exposes the common parts of function-calling instructions, -// yet provides a way back to the Value defined by *Call alone. -// -type CallInstruction interface { - Instruction - Common() *CallCommon // returns the common parts of the call - Value() *Call -} - -func (s *Call) Common() *CallCommon { return &s.Call } -func (s *Defer) Common() *CallCommon { return &s.Call } -func (s *Go) Common() *CallCommon { return &s.Call } - -func (s *Call) Value() *Call { return s } -func (s *Defer) Value() *Call { return nil } -func (s *Go) Value() *Call { return nil } - -func (v *Builtin) Type() types.Type { return v.sig } -func (v *Builtin) Name() string { return v.name } -func (*Builtin) Referrers() *[]Instruction { return nil } -func (v *Builtin) Pos() token.Pos { return token.NoPos } -func (v *Builtin) Object() types.Object { return types.Universe.Lookup(v.name) } -func (v *Builtin) Parent() *Function { return nil } - -func (v *FreeVar) Type() types.Type { return v.typ } -func (v *FreeVar) Name() string { return v.name } -func (v *FreeVar) Referrers() *[]Instruction { return &v.referrers } -func (v *FreeVar) Parent() *Function { return v.parent } - -func (v *Global) Type() types.Type { return v.typ } -func (v *Global) Name() string { return v.name } -func (v *Global) Parent() *Function { return nil } -func (v *Global) Referrers() *[]Instruction { return nil } -func (v *Global) Token() token.Token { return token.VAR } -func (v *Global) Object() types.Object { return v.object } -func (v *Global) String() string { return v.RelString(nil) } -func (v *Global) Package() *Package { return v.Pkg } -func (v *Global) RelString(from *types.Package) string { return relString(v, from) } - -func (v *Function) Name() string { return v.name } -func (v *Function) Type() types.Type { return v.Signature } -func (v *Function) Token() token.Token { return token.FUNC } -func (v *Function) Object() types.Object { return v.object } -func (v *Function) String() string { return v.RelString(nil) } -func (v *Function) Package() *Package { return v.Pkg } -func (v *Function) Parent() *Function { return v.parent } -func (v *Function) Referrers() *[]Instruction { - if v.parent != nil { - return &v.referrers - } - return nil -} - -func (v *Parameter) Object() types.Object { return v.object } - -func (v *Alloc) Type() types.Type { return v.typ } -func (v *Alloc) Referrers() *[]Instruction { return &v.referrers } - -func (v *register) Type() types.Type { return v.typ } -func (v *register) setType(typ types.Type) { v.typ = typ } -func (v *register) Name() string { return fmt.Sprintf("t%d", v.id) } -func (v *register) Referrers() *[]Instruction { return &v.referrers } - -func (v *anInstruction) Parent() *Function { return v.block.parent } -func (v *anInstruction) Block() *BasicBlock { return v.block } -func (v *anInstruction) setBlock(block *BasicBlock) { v.block = block } -func (v *anInstruction) Referrers() *[]Instruction { return nil } - -func (t *Type) Name() string { return t.object.Name() } -func (t *Type) Pos() token.Pos { return t.object.Pos() } -func (t *Type) Type() types.Type { return t.object.Type() } -func (t *Type) Token() token.Token { return token.TYPE } -func (t *Type) Object() types.Object { return t.object } -func (t *Type) String() string { return t.RelString(nil) } -func (t *Type) Package() *Package { return t.pkg } -func (t *Type) RelString(from *types.Package) string { return relString(t, from) } - -func (c *NamedConst) Name() string { return c.object.Name() } -func (c *NamedConst) Pos() token.Pos { return c.object.Pos() } -func (c *NamedConst) String() string { return c.RelString(nil) } -func (c *NamedConst) Type() types.Type { return c.object.Type() } -func (c *NamedConst) Token() token.Token { return token.CONST } -func (c *NamedConst) Object() types.Object { return c.object } -func (c *NamedConst) Package() *Package { return c.pkg } -func (c *NamedConst) RelString(from *types.Package) string { return relString(c, from) } - -// Func returns the package-level function of the specified name, -// or nil if not found. -// -func (p *Package) Func(name string) (f *Function) { - f, _ = p.Members[name].(*Function) - return -} - -// Var returns the package-level variable of the specified name, -// or nil if not found. -// -func (p *Package) Var(name string) (g *Global) { - g, _ = p.Members[name].(*Global) - return -} - -// Const returns the package-level constant of the specified name, -// or nil if not found. -// -func (p *Package) Const(name string) (c *NamedConst) { - c, _ = p.Members[name].(*NamedConst) - return -} - -// Type returns the package-level type of the specified name, -// or nil if not found. -// -func (p *Package) Type(name string) (t *Type) { - t, _ = p.Members[name].(*Type) - return -} - -func (s *DebugRef) Pos() token.Pos { return s.Expr.Pos() } - -// Operands. - -func (v *Alloc) Operands(rands []*Value) []*Value { - return rands -} - -func (v *BinOp) Operands(rands []*Value) []*Value { - return append(rands, &v.X, &v.Y) -} - -func (c *CallCommon) Operands(rands []*Value) []*Value { - rands = append(rands, &c.Value) - for i := range c.Args { - rands = append(rands, &c.Args[i]) - } - return rands -} - -func (s *Go) Operands(rands []*Value) []*Value { - return s.Call.Operands(rands) -} - -func (s *Call) Operands(rands []*Value) []*Value { - return s.Call.Operands(rands) -} - -func (s *Defer) Operands(rands []*Value) []*Value { - return s.Call.Operands(rands) -} - -func (v *ChangeInterface) Operands(rands []*Value) []*Value { - return append(rands, &v.X) -} - -func (v *ChangeType) Operands(rands []*Value) []*Value { - return append(rands, &v.X) -} - -func (v *Convert) Operands(rands []*Value) []*Value { - return append(rands, &v.X) -} - -func (s *DebugRef) Operands(rands []*Value) []*Value { - return append(rands, &s.X) -} - -func (v *Extract) Operands(rands []*Value) []*Value { - return append(rands, &v.Tuple) -} - -func (v *Field) Operands(rands []*Value) []*Value { - return append(rands, &v.X) -} - -func (v *FieldAddr) Operands(rands []*Value) []*Value { - return append(rands, &v.X) -} - -func (s *If) Operands(rands []*Value) []*Value { - return append(rands, &s.Cond) -} - -func (s *ConstantSwitch) Operands(rands []*Value) []*Value { - rands = append(rands, &s.Tag) - for i := range s.Conds { - rands = append(rands, &s.Conds[i]) - } - return rands -} - -func (s *TypeSwitch) Operands(rands []*Value) []*Value { - rands = append(rands, &s.Tag) - return rands -} - -func (v *Index) Operands(rands []*Value) []*Value { - return append(rands, &v.X, &v.Index) -} - -func (v *IndexAddr) Operands(rands []*Value) []*Value { - return append(rands, &v.X, &v.Index) -} - -func (*Jump) Operands(rands []*Value) []*Value { - return rands -} - -func (*Unreachable) Operands(rands []*Value) []*Value { - return rands -} - -func (v *MapLookup) Operands(rands []*Value) []*Value { - return append(rands, &v.X, &v.Index) -} - -func (v *StringLookup) Operands(rands []*Value) []*Value { - return append(rands, &v.X, &v.Index) -} - -func (v *MakeChan) Operands(rands []*Value) []*Value { - return append(rands, &v.Size) -} - -func (v *MakeClosure) Operands(rands []*Value) []*Value { - rands = append(rands, &v.Fn) - for i := range v.Bindings { - rands = append(rands, &v.Bindings[i]) - } - return rands -} - -func (v *MakeInterface) Operands(rands []*Value) []*Value { - return append(rands, &v.X) -} - -func (v *MakeMap) Operands(rands []*Value) []*Value { - return append(rands, &v.Reserve) -} - -func (v *MakeSlice) Operands(rands []*Value) []*Value { - return append(rands, &v.Len, &v.Cap) -} - -func (v *MapUpdate) Operands(rands []*Value) []*Value { - return append(rands, &v.Map, &v.Key, &v.Value) -} - -func (v *Next) Operands(rands []*Value) []*Value { - return append(rands, &v.Iter) -} - -func (s *Panic) Operands(rands []*Value) []*Value { - return append(rands, &s.X) -} - -func (v *Sigma) Operands(rands []*Value) []*Value { - return append(rands, &v.X) -} - -func (v *Phi) Operands(rands []*Value) []*Value { - for i := range v.Edges { - rands = append(rands, &v.Edges[i]) - } - return rands -} - -func (v *Range) Operands(rands []*Value) []*Value { - return append(rands, &v.X) -} - -func (s *Return) Operands(rands []*Value) []*Value { - for i := range s.Results { - rands = append(rands, &s.Results[i]) - } - return rands -} - -func (*RunDefers) Operands(rands []*Value) []*Value { - return rands -} - -func (v *Select) Operands(rands []*Value) []*Value { - for i := range v.States { - rands = append(rands, &v.States[i].Chan, &v.States[i].Send) - } - return rands -} - -func (s *Send) Operands(rands []*Value) []*Value { - return append(rands, &s.Chan, &s.X) -} - -func (recv *Recv) Operands(rands []*Value) []*Value { - return append(rands, &recv.Chan) -} - -func (v *Slice) Operands(rands []*Value) []*Value { - return append(rands, &v.X, &v.Low, &v.High, &v.Max) -} - -func (s *Store) Operands(rands []*Value) []*Value { - return append(rands, &s.Addr, &s.Val) -} - -func (s *BlankStore) Operands(rands []*Value) []*Value { - return append(rands, &s.Val) -} - -func (v *TypeAssert) Operands(rands []*Value) []*Value { - return append(rands, &v.X) -} - -func (v *UnOp) Operands(rands []*Value) []*Value { - return append(rands, &v.X) -} - -func (v *Load) Operands(rands []*Value) []*Value { - return append(rands, &v.X) -} - -// Non-Instruction Values: -func (v *Builtin) Operands(rands []*Value) []*Value { return rands } -func (v *FreeVar) Operands(rands []*Value) []*Value { return rands } -func (v *Const) Operands(rands []*Value) []*Value { return rands } -func (v *Function) Operands(rands []*Value) []*Value { return rands } -func (v *Global) Operands(rands []*Value) []*Value { return rands } -func (v *Parameter) Operands(rands []*Value) []*Value { return rands } diff --git a/vendor/honnef.co/go/tools/ir/staticcheck.conf b/vendor/honnef.co/go/tools/ir/staticcheck.conf deleted file mode 100644 index d7b38bc3563..00000000000 --- a/vendor/honnef.co/go/tools/ir/staticcheck.conf +++ /dev/null @@ -1,3 +0,0 @@ -# ssa/... is mostly imported from upstream and we don't want to -# deviate from it too much, hence disabling SA1019 -checks = ["inherit", "-SA1019"] diff --git a/vendor/honnef.co/go/tools/ir/util.go b/vendor/honnef.co/go/tools/ir/util.go deleted file mode 100644 index df0f8bf971d..00000000000 --- a/vendor/honnef.co/go/tools/ir/util.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ir - -// This file defines a number of miscellaneous utility functions. - -import ( - "fmt" - "go/ast" - "go/token" - "go/types" - "io" - "os" - - "golang.org/x/tools/go/ast/astutil" -) - -//// AST utilities - -func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) } - -// isBlankIdent returns true iff e is an Ident with name "_". -// They have no associated types.Object, and thus no type. -// -func isBlankIdent(e ast.Expr) bool { - id, ok := e.(*ast.Ident) - return ok && id.Name == "_" -} - -//// Type utilities. Some of these belong in go/types. - -// isPointer returns true for types whose underlying type is a pointer. -func isPointer(typ types.Type) bool { - _, ok := typ.Underlying().(*types.Pointer) - return ok -} - -func isInterface(T types.Type) bool { return types.IsInterface(T) } - -// deref returns a pointer's element type; otherwise it returns typ. -func deref(typ types.Type) types.Type { - if p, ok := typ.Underlying().(*types.Pointer); ok { - return p.Elem() - } - return typ -} - -// recvType returns the receiver type of method obj. -func recvType(obj *types.Func) types.Type { - return obj.Type().(*types.Signature).Recv().Type() -} - -// logStack prints the formatted "start" message to stderr and -// returns a closure that prints the corresponding "end" message. -// Call using 'defer logStack(...)()' to show builder stack on panic. -// Don't forget trailing parens! -// -func logStack(format string, args ...interface{}) func() { - msg := fmt.Sprintf(format, args...) - io.WriteString(os.Stderr, msg) - io.WriteString(os.Stderr, "\n") - return func() { - io.WriteString(os.Stderr, msg) - io.WriteString(os.Stderr, " end\n") - } -} - -// newVar creates a 'var' for use in a types.Tuple. -func newVar(name string, typ types.Type) *types.Var { - return types.NewParam(token.NoPos, nil, name, typ) -} - -// anonVar creates an anonymous 'var' for use in a types.Tuple. -func anonVar(typ types.Type) *types.Var { - return newVar("", typ) -} - -var lenResults = types.NewTuple(anonVar(tInt)) - -// makeLen returns the len builtin specialized to type func(T)int. -func makeLen(T types.Type) *Builtin { - lenParams := types.NewTuple(anonVar(T)) - return &Builtin{ - name: "len", - sig: types.NewSignature(nil, lenParams, lenResults, false), - } -} diff --git a/vendor/honnef.co/go/tools/ir/wrappers.go b/vendor/honnef.co/go/tools/ir/wrappers.go deleted file mode 100644 index 7dd33474806..00000000000 --- a/vendor/honnef.co/go/tools/ir/wrappers.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ir - -// This file defines synthesis of Functions that delegate to declared -// methods; they come in three kinds: -// -// (1) wrappers: methods that wrap declared methods, performing -// implicit pointer indirections and embedded field selections. -// -// (2) thunks: funcs that wrap declared methods. Like wrappers, -// thunks perform indirections and field selections. The thunk's -// first parameter is used as the receiver for the method call. -// -// (3) bounds: funcs that wrap declared methods. The bound's sole -// free variable, supplied by a closure, is used as the receiver -// for the method call. No indirections or field selections are -// performed since they can be done before the call. - -import ( - "fmt" - - "go/types" -) - -// -- wrappers ----------------------------------------------------------- - -// makeWrapper returns a synthetic method that delegates to the -// declared method denoted by meth.Obj(), first performing any -// necessary pointer indirections or field selections implied by meth. -// -// The resulting method's receiver type is meth.Recv(). -// -// This function is versatile but quite subtle! Consider the -// following axes of variation when making changes: -// - optional receiver indirection -// - optional implicit field selections -// - meth.Obj() may denote a concrete or an interface method -// - the result may be a thunk or a wrapper. -// -// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) -// -func makeWrapper(prog *Program, sel *types.Selection) *Function { - obj := sel.Obj().(*types.Func) // the declared function - sig := sel.Type().(*types.Signature) // type of this wrapper - - var recv *types.Var // wrapper's receiver or thunk's params[0] - name := obj.Name() - var description string - var start int // first regular param - if sel.Kind() == types.MethodExpr { - name += "$thunk" - description = "thunk" - recv = sig.Params().At(0) - start = 1 - } else { - description = "wrapper" - recv = sig.Recv() - } - - description = fmt.Sprintf("%s for %s", description, sel.Obj()) - if prog.mode&LogSource != 0 { - defer logStack("make %s to (%s)", description, recv.Type())() - } - fn := &Function{ - name: name, - method: sel, - object: obj, - Signature: sig, - Synthetic: description, - Prog: prog, - functionBody: new(functionBody), - } - fn.initHTML(prog.PrintFunc) - fn.startBody() - fn.addSpilledParam(recv, nil) - createParams(fn, start) - - indices := sel.Index() - - var v Value = fn.Locals[0] // spilled receiver - if isPointer(sel.Recv()) { - v = emitLoad(fn, v, nil) - - // For simple indirection wrappers, perform an informative nil-check: - // "value method (T).f called using nil *T pointer" - if len(indices) == 1 && !isPointer(recvType(obj)) { - var c Call - c.Call.Value = &Builtin{ - name: "ir:wrapnilchk", - sig: types.NewSignature(nil, - types.NewTuple(anonVar(sel.Recv()), anonVar(tString), anonVar(tString)), - types.NewTuple(anonVar(sel.Recv())), false), - } - c.Call.Args = []Value{ - v, - emitConst(fn, stringConst(deref(sel.Recv()).String())), - emitConst(fn, stringConst(sel.Obj().Name())), - } - c.setType(v.Type()) - v = fn.emit(&c, nil) - } - } - - // Invariant: v is a pointer, either - // value of *A receiver param, or - // address of A spilled receiver. - - // We use pointer arithmetic (FieldAddr possibly followed by - // Load) in preference to value extraction (Field possibly - // preceded by Load). - - v = emitImplicitSelections(fn, v, indices[:len(indices)-1], nil) - - // Invariant: v is a pointer, either - // value of implicit *C field, or - // address of implicit C field. - - var c Call - if r := recvType(obj); !isInterface(r) { // concrete method - if !isPointer(r) { - v = emitLoad(fn, v, nil) - } - c.Call.Value = prog.declaredFunc(obj) - c.Call.Args = append(c.Call.Args, v) - } else { - c.Call.Method = obj - c.Call.Value = emitLoad(fn, v, nil) - } - for _, arg := range fn.Params[1:] { - c.Call.Args = append(c.Call.Args, arg) - } - emitTailCall(fn, &c, nil) - fn.finishBody() - return fn -} - -// createParams creates parameters for wrapper method fn based on its -// Signature.Params, which do not include the receiver. -// start is the index of the first regular parameter to use. -// -func createParams(fn *Function, start int) { - tparams := fn.Signature.Params() - for i, n := start, tparams.Len(); i < n; i++ { - fn.addParamObj(tparams.At(i), nil) - } -} - -// -- bounds ----------------------------------------------------------- - -// makeBound returns a bound method wrapper (or "bound"), a synthetic -// function that delegates to a concrete or interface method denoted -// by obj. The resulting function has no receiver, but has one free -// variable which will be used as the method's receiver in the -// tail-call. -// -// Use MakeClosure with such a wrapper to construct a bound method -// closure. e.g.: -// -// type T int or: type T interface { meth() } -// func (t T) meth() -// var t T -// f := t.meth -// f() // calls t.meth() -// -// f is a closure of a synthetic wrapper defined as if by: -// -// f := func() { return t.meth() } -// -// Unlike makeWrapper, makeBound need perform no indirection or field -// selections because that can be done before the closure is -// constructed. -// -// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu) -// -func makeBound(prog *Program, obj *types.Func) *Function { - prog.methodsMu.Lock() - defer prog.methodsMu.Unlock() - fn, ok := prog.bounds[obj] - if !ok { - description := fmt.Sprintf("bound method wrapper for %s", obj) - if prog.mode&LogSource != 0 { - defer logStack("%s", description)() - } - fn = &Function{ - name: obj.Name() + "$bound", - object: obj, - Signature: changeRecv(obj.Type().(*types.Signature), nil), // drop receiver - Synthetic: description, - Prog: prog, - functionBody: new(functionBody), - } - fn.initHTML(prog.PrintFunc) - - fv := &FreeVar{name: "recv", typ: recvType(obj), parent: fn} - fn.FreeVars = []*FreeVar{fv} - fn.startBody() - createParams(fn, 0) - var c Call - - if !isInterface(recvType(obj)) { // concrete - c.Call.Value = prog.declaredFunc(obj) - c.Call.Args = []Value{fv} - } else { - c.Call.Value = fv - c.Call.Method = obj - } - for _, arg := range fn.Params { - c.Call.Args = append(c.Call.Args, arg) - } - emitTailCall(fn, &c, nil) - fn.finishBody() - - prog.bounds[obj] = fn - } - return fn -} - -// -- thunks ----------------------------------------------------------- - -// makeThunk returns a thunk, a synthetic function that delegates to a -// concrete or interface method denoted by sel.Obj(). The resulting -// function has no receiver, but has an additional (first) regular -// parameter. -// -// Precondition: sel.Kind() == types.MethodExpr. -// -// type T int or: type T interface { meth() } -// func (t T) meth() -// f := T.meth -// var t T -// f(t) // calls t.meth() -// -// f is a synthetic wrapper defined as if by: -// -// f := func(t T) { return t.meth() } -// -// TODO(adonovan): opt: currently the stub is created even when used -// directly in a function call: C.f(i, 0). This is less efficient -// than inlining the stub. -// -// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu) -// -func makeThunk(prog *Program, sel *types.Selection) *Function { - if sel.Kind() != types.MethodExpr { - panic(sel) - } - - key := selectionKey{ - kind: sel.Kind(), - recv: sel.Recv(), - obj: sel.Obj(), - index: fmt.Sprint(sel.Index()), - indirect: sel.Indirect(), - } - - prog.methodsMu.Lock() - defer prog.methodsMu.Unlock() - - // Canonicalize key.recv to avoid constructing duplicate thunks. - canonRecv, ok := prog.canon.At(key.recv).(types.Type) - if !ok { - canonRecv = key.recv - prog.canon.Set(key.recv, canonRecv) - } - key.recv = canonRecv - - fn, ok := prog.thunks[key] - if !ok { - fn = makeWrapper(prog, sel) - if fn.Signature.Recv() != nil { - panic(fn) // unexpected receiver - } - prog.thunks[key] = fn - } - return fn -} - -func changeRecv(s *types.Signature, recv *types.Var) *types.Signature { - return types.NewSignature(recv, s.Params(), s.Results(), s.Variadic()) -} - -// selectionKey is like types.Selection but a usable map key. -type selectionKey struct { - kind types.SelectionKind - recv types.Type // canonicalized via Program.canon - obj types.Object - index string - indirect bool -} diff --git a/vendor/honnef.co/go/tools/ir/write.go b/vendor/honnef.co/go/tools/ir/write.go deleted file mode 100644 index b936bc98528..00000000000 --- a/vendor/honnef.co/go/tools/ir/write.go +++ /dev/null @@ -1,5 +0,0 @@ -package ir - -func NewJump(parent *BasicBlock) *Jump { - return &Jump{anInstruction{block: parent}, ""} -} diff --git a/vendor/honnef.co/go/tools/lint/lint.go b/vendor/honnef.co/go/tools/lint/lint.go deleted file mode 100644 index 1a70e0c298e..00000000000 --- a/vendor/honnef.co/go/tools/lint/lint.go +++ /dev/null @@ -1,539 +0,0 @@ -// Package lint provides the foundation for tools like staticcheck -package lint // import "honnef.co/go/tools/lint" - -import ( - "bytes" - "encoding/gob" - "fmt" - "go/scanner" - "go/token" - "go/types" - "path/filepath" - "sort" - "strings" - "sync" - "sync/atomic" - "unicode" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/packages" - "honnef.co/go/tools/config" - "honnef.co/go/tools/internal/cache" -) - -type Documentation struct { - Title string - Text string - Since string - NonDefault bool - Options []string -} - -func (doc *Documentation) String() string { - b := &strings.Builder{} - fmt.Fprintf(b, "%s\n\n", doc.Title) - if doc.Text != "" { - fmt.Fprintf(b, "%s\n\n", doc.Text) - } - fmt.Fprint(b, "Available since\n ") - if doc.Since == "" { - fmt.Fprint(b, "unreleased") - } else { - fmt.Fprintf(b, "%s", doc.Since) - } - if doc.NonDefault { - fmt.Fprint(b, ", non-default") - } - fmt.Fprint(b, "\n") - if len(doc.Options) > 0 { - fmt.Fprintf(b, "\nOptions\n") - for _, opt := range doc.Options { - fmt.Fprintf(b, " %s", opt) - } - fmt.Fprint(b, "\n") - } - return b.String() -} - -type Ignore interface { - Match(p Problem) bool -} - -type LineIgnore struct { - File string - Line int - Checks []string - Matched bool - Pos token.Position -} - -func (li *LineIgnore) Match(p Problem) bool { - pos := p.Pos - if pos.Filename != li.File || pos.Line != li.Line { - return false - } - for _, c := range li.Checks { - if m, _ := filepath.Match(c, p.Check); m { - li.Matched = true - return true - } - } - return false -} - -func (li *LineIgnore) String() string { - matched := "not matched" - if li.Matched { - matched = "matched" - } - return fmt.Sprintf("%s:%d %s (%s)", li.File, li.Line, strings.Join(li.Checks, ", "), matched) -} - -type FileIgnore struct { - File string - Checks []string -} - -func (fi *FileIgnore) Match(p Problem) bool { - if p.Pos.Filename != fi.File { - return false - } - for _, c := range fi.Checks { - if m, _ := filepath.Match(c, p.Check); m { - return true - } - } - return false -} - -type Severity uint8 - -const ( - Error Severity = iota - Warning - Ignored -) - -// Problem represents a problem in some source code. -type Problem struct { - Pos token.Position - End token.Position - Message string - Check string - Severity Severity - Related []Related -} - -type Related struct { - Pos token.Position - End token.Position - Message string -} - -func (p Problem) Equal(o Problem) bool { - return p.Pos == o.Pos && - p.End == o.End && - p.Message == o.Message && - p.Check == o.Check && - p.Severity == o.Severity -} - -func (p *Problem) String() string { - return fmt.Sprintf("%s (%s)", p.Message, p.Check) -} - -// A Linter lints Go source code. -type Linter struct { - Checkers []*analysis.Analyzer - CumulativeCheckers []CumulativeChecker - GoVersion int - Config config.Config - Stats Stats - RepeatAnalyzers uint -} - -type CumulativeChecker interface { - Analyzer() *analysis.Analyzer - Result() []types.Object - ProblemObject(*token.FileSet, types.Object) Problem -} - -func (l *Linter) Lint(cfg *packages.Config, patterns []string) ([]Problem, error) { - var allAnalyzers []*analysis.Analyzer - allAnalyzers = append(allAnalyzers, l.Checkers...) - for _, cum := range l.CumulativeCheckers { - allAnalyzers = append(allAnalyzers, cum.Analyzer()) - } - - // The -checks command line flag overrules all configuration - // files, which means that for `-checks="foo"`, no check other - // than foo can ever be reported to the user. Make use of this - // fact to cull the list of analyses we need to run. - - // replace "inherit" with "all", as we don't want to base the - // list of all checks on the default configuration, which - // disables certain checks. - checks := make([]string, len(l.Config.Checks)) - copy(checks, l.Config.Checks) - for i, c := range checks { - if c == "inherit" { - checks[i] = "all" - } - } - - allowed := FilterChecks(allAnalyzers, checks) - var allowedAnalyzers []*analysis.Analyzer - for _, c := range l.Checkers { - if allowed[c.Name] { - allowedAnalyzers = append(allowedAnalyzers, c) - } - } - hasCumulative := false - for _, cum := range l.CumulativeCheckers { - a := cum.Analyzer() - if allowed[a.Name] { - hasCumulative = true - allowedAnalyzers = append(allowedAnalyzers, a) - } - } - - r, err := NewRunner(&l.Stats) - if err != nil { - return nil, err - } - r.goVersion = l.GoVersion - r.repeatAnalyzers = l.RepeatAnalyzers - - pkgs, err := r.Run(cfg, patterns, allowedAnalyzers, hasCumulative) - if err != nil { - return nil, err - } - - tpkgToPkg := map[*types.Package]*Package{} - for _, pkg := range pkgs { - tpkgToPkg[pkg.Types] = pkg - - for _, e := range pkg.errs { - switch e := e.(type) { - case types.Error: - p := Problem{ - Pos: e.Fset.PositionFor(e.Pos, false), - Message: e.Msg, - Severity: Error, - Check: "compile", - } - pkg.problems = append(pkg.problems, p) - case packages.Error: - msg := e.Msg - if len(msg) != 0 && msg[0] == '\n' { - // TODO(dh): See https://github.com/golang/go/issues/32363 - msg = msg[1:] - } - - var pos token.Position - if e.Pos == "" { - // Under certain conditions (malformed package - // declarations, multiple packages in the same - // directory), go list emits an error on stderr - // instead of JSON. Those errors do not have - // associated position information in - // go/packages.Error, even though the output on - // stderr may contain it. - if p, n, err := parsePos(msg); err == nil { - if abs, err := filepath.Abs(p.Filename); err == nil { - p.Filename = abs - } - pos = p - msg = msg[n+2:] - } - } else { - var err error - pos, _, err = parsePos(e.Pos) - if err != nil { - panic(fmt.Sprintf("internal error: %s", e)) - } - } - p := Problem{ - Pos: pos, - Message: msg, - Severity: Error, - Check: "compile", - } - pkg.problems = append(pkg.problems, p) - case scanner.ErrorList: - for _, e := range e { - p := Problem{ - Pos: e.Pos, - Message: e.Msg, - Severity: Error, - Check: "compile", - } - pkg.problems = append(pkg.problems, p) - } - case error: - p := Problem{ - Pos: token.Position{}, - Message: e.Error(), - Severity: Error, - Check: "compile", - } - pkg.problems = append(pkg.problems, p) - } - } - } - - atomic.StoreUint32(&r.stats.State, StateCumulative) - for _, cum := range l.CumulativeCheckers { - for _, res := range cum.Result() { - pkg := tpkgToPkg[res.Pkg()] - if pkg == nil { - panic(fmt.Sprintf("analyzer %s flagged object %s in package %s, a package that we aren't tracking", cum.Analyzer(), res, res.Pkg())) - } - allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks) - if allowedChecks[cum.Analyzer().Name] { - pos := DisplayPosition(pkg.Fset, res.Pos()) - // FIXME(dh): why are we ignoring generated files - // here? Surely this is specific to 'unused', not all - // cumulative checkers - if _, ok := pkg.gen[pos.Filename]; ok { - continue - } - p := cum.ProblemObject(pkg.Fset, res) - pkg.problems = append(pkg.problems, p) - } - } - } - - for _, pkg := range pkgs { - if !pkg.fromSource { - // Don't cache packages that we loaded from the cache - continue - } - cpkg := cachedPackage{ - Problems: pkg.problems, - Ignores: pkg.ignores, - Config: pkg.cfg, - } - buf := &bytes.Buffer{} - if err := gob.NewEncoder(buf).Encode(cpkg); err != nil { - return nil, err - } - id := cache.Subkey(pkg.actionID, "data "+r.problemsCacheKey) - if err := r.cache.PutBytes(id, buf.Bytes()); err != nil { - return nil, err - } - } - - var problems []Problem - // Deduplicate line ignores. When U1000 processes a package and - // its test variant, it will only emit a single problem for an - // unused object, not two problems. We will, however, have two - // line ignores, one per package. Without deduplication, one line - // ignore will be marked as matched, while the other one won't, - // subsequently reporting a "this linter directive didn't match - // anything" error. - ignores := map[token.Position]Ignore{} - for _, pkg := range pkgs { - for _, ig := range pkg.ignores { - if lig, ok := ig.(*LineIgnore); ok { - ig = ignores[lig.Pos] - if ig == nil { - ignores[lig.Pos] = lig - ig = lig - } - } - for i := range pkg.problems { - p := &pkg.problems[i] - if ig.Match(*p) { - p.Severity = Ignored - } - } - } - - if pkg.cfg == nil { - // The package failed to load, otherwise we would have a - // valid config. Pass through all errors. - problems = append(problems, pkg.problems...) - } else { - for _, p := range pkg.problems { - allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks) - allowedChecks["compile"] = true - if allowedChecks[p.Check] { - problems = append(problems, p) - } - } - } - - for _, ig := range pkg.ignores { - ig, ok := ig.(*LineIgnore) - if !ok { - continue - } - ig = ignores[ig.Pos].(*LineIgnore) - if ig.Matched { - continue - } - - couldveMatched := false - allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks) - for _, c := range ig.Checks { - if !allowedChecks[c] { - continue - } - couldveMatched = true - break - } - - if !couldveMatched { - // The ignored checks were disabled for the containing package. - // Don't flag the ignore for not having matched. - continue - } - p := Problem{ - Pos: ig.Pos, - Message: "this linter directive didn't match anything; should it be removed?", - Check: "", - } - problems = append(problems, p) - } - } - - if len(problems) == 0 { - return nil, nil - } - - sort.Slice(problems, func(i, j int) bool { - pi := problems[i].Pos - pj := problems[j].Pos - - if pi.Filename != pj.Filename { - return pi.Filename < pj.Filename - } - if pi.Line != pj.Line { - return pi.Line < pj.Line - } - if pi.Column != pj.Column { - return pi.Column < pj.Column - } - - return problems[i].Message < problems[j].Message - }) - - var out []Problem - out = append(out, problems[0]) - for i, p := range problems[1:] { - // We may encounter duplicate problems because one file - // can be part of many packages. - if !problems[i].Equal(p) { - out = append(out, p) - } - } - return out, nil -} - -func FilterChecks(allChecks []*analysis.Analyzer, checks []string) map[string]bool { - // OPT(dh): this entire computation could be cached per package - allowedChecks := map[string]bool{} - - for _, check := range checks { - b := true - if len(check) > 1 && check[0] == '-' { - b = false - check = check[1:] - } - if check == "*" || check == "all" { - // Match all - for _, c := range allChecks { - allowedChecks[c.Name] = b - } - } else if strings.HasSuffix(check, "*") { - // Glob - prefix := check[:len(check)-1] - isCat := strings.IndexFunc(prefix, func(r rune) bool { return unicode.IsNumber(r) }) == -1 - - for _, c := range allChecks { - idx := strings.IndexFunc(c.Name, func(r rune) bool { return unicode.IsNumber(r) }) - if isCat { - // Glob is S*, which should match S1000 but not SA1000 - cat := c.Name[:idx] - if prefix == cat { - allowedChecks[c.Name] = b - } - } else { - // Glob is S1* - if strings.HasPrefix(c.Name, prefix) { - allowedChecks[c.Name] = b - } - } - } - } else { - // Literal check name - allowedChecks[check] = b - } - } - return allowedChecks -} - -func DisplayPosition(fset *token.FileSet, p token.Pos) token.Position { - if p == token.NoPos { - return token.Position{} - } - - // Only use the adjusted position if it points to another Go file. - // This means we'll point to the original file for cgo files, but - // we won't point to a YACC grammar file. - pos := fset.PositionFor(p, false) - adjPos := fset.PositionFor(p, true) - - if filepath.Ext(adjPos.Filename) == ".go" { - return adjPos - } - return pos -} - -var bufferPool = &sync.Pool{ - New: func() interface{} { - buf := bytes.NewBuffer(nil) - buf.Grow(64) - return buf - }, -} - -func FuncName(f *types.Func) string { - buf := bufferPool.Get().(*bytes.Buffer) - buf.Reset() - if f.Type() != nil { - sig := f.Type().(*types.Signature) - if recv := sig.Recv(); recv != nil { - buf.WriteByte('(') - if _, ok := recv.Type().(*types.Interface); ok { - // gcimporter creates abstract methods of - // named interfaces using the interface type - // (not the named type) as the receiver. - // Don't print it in full. - buf.WriteString("interface") - } else { - types.WriteType(buf, recv.Type(), nil) - } - buf.WriteByte(')') - buf.WriteByte('.') - } else if f.Pkg() != nil { - writePackage(buf, f.Pkg()) - } - } - buf.WriteString(f.Name()) - s := buf.String() - bufferPool.Put(buf) - return s -} - -func writePackage(buf *bytes.Buffer, pkg *types.Package) { - if pkg == nil { - return - } - s := pkg.Path() - if s != "" { - buf.WriteString(s) - buf.WriteByte('.') - } -} diff --git a/vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go b/vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go deleted file mode 100644 index 4408aff25e4..00000000000 --- a/vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go +++ /dev/null @@ -1,58 +0,0 @@ -// Package lintdsl provides helpers for implementing static analysis -// checks. Dot-importing this package is encouraged. -package lintdsl - -import ( - "bytes" - "fmt" - "go/ast" - "go/format" - - "golang.org/x/tools/go/analysis" - "honnef.co/go/tools/pattern" -) - -func Inspect(node ast.Node, fn func(node ast.Node) bool) { - if node == nil { - return - } - ast.Inspect(node, fn) -} - -func Match(pass *analysis.Pass, q pattern.Pattern, node ast.Node) (*pattern.Matcher, bool) { - // Note that we ignore q.Relevant – callers of Match usually use - // AST inspectors that already filter on nodes we're interested - // in. - m := &pattern.Matcher{TypesInfo: pass.TypesInfo} - ok := m.Match(q.Root, node) - return m, ok -} - -func MatchAndEdit(pass *analysis.Pass, before, after pattern.Pattern, node ast.Node) (*pattern.Matcher, []analysis.TextEdit, bool) { - m, ok := Match(pass, before, node) - if !ok { - return m, nil, false - } - r := pattern.NodeToAST(after.Root, m.State) - buf := &bytes.Buffer{} - format.Node(buf, pass.Fset, r) - edit := []analysis.TextEdit{{ - Pos: node.Pos(), - End: node.End(), - NewText: buf.Bytes(), - }} - return m, edit, true -} - -func Selector(x, sel string) *ast.SelectorExpr { - return &ast.SelectorExpr{ - X: &ast.Ident{Name: x}, - Sel: &ast.Ident{Name: sel}, - } -} - -// ExhaustiveTypeSwitch panics when called. It can be used to ensure -// that type switches are exhaustive. -func ExhaustiveTypeSwitch(v interface{}) { - panic(fmt.Sprintf("internal error: unhandled case %T", v)) -} diff --git a/vendor/honnef.co/go/tools/lint/lintutil/format/format.go b/vendor/honnef.co/go/tools/lint/lintutil/format/format.go deleted file mode 100644 index b28f8885b87..00000000000 --- a/vendor/honnef.co/go/tools/lint/lintutil/format/format.go +++ /dev/null @@ -1,162 +0,0 @@ -// Package format provides formatters for linter problems. -package format - -import ( - "encoding/json" - "fmt" - "go/token" - "io" - "os" - "path/filepath" - "text/tabwriter" - - "honnef.co/go/tools/lint" -) - -func shortPath(path string) string { - cwd, err := os.Getwd() - if err != nil { - return path - } - if rel, err := filepath.Rel(cwd, path); err == nil && len(rel) < len(path) { - return rel - } - return path -} - -func relativePositionString(pos token.Position) string { - s := shortPath(pos.Filename) - if pos.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", pos.Line, pos.Column) - } - if s == "" { - s = "-" - } - return s -} - -type Statter interface { - Stats(total, errors, warnings, ignored int) -} - -type Formatter interface { - Format(p lint.Problem) -} - -type Text struct { - W io.Writer -} - -func (o Text) Format(p lint.Problem) { - fmt.Fprintf(o.W, "%s: %s\n", relativePositionString(p.Pos), p.String()) - for _, r := range p.Related { - fmt.Fprintf(o.W, "\t%s: %s\n", relativePositionString(r.Pos), r.Message) - } -} - -type JSON struct { - W io.Writer -} - -func severity(s lint.Severity) string { - switch s { - case lint.Error: - return "error" - case lint.Warning: - return "warning" - case lint.Ignored: - return "ignored" - } - return "" -} - -func (o JSON) Format(p lint.Problem) { - type location struct { - File string `json:"file"` - Line int `json:"line"` - Column int `json:"column"` - } - type related struct { - Location location `json:"location"` - End location `json:"end"` - Message string `json:"message"` - } - jp := struct { - Code string `json:"code"` - Severity string `json:"severity,omitempty"` - Location location `json:"location"` - End location `json:"end"` - Message string `json:"message"` - Related []related `json:"related,omitempty"` - }{ - Code: p.Check, - Severity: severity(p.Severity), - Location: location{ - File: p.Pos.Filename, - Line: p.Pos.Line, - Column: p.Pos.Column, - }, - End: location{ - File: p.End.Filename, - Line: p.End.Line, - Column: p.End.Column, - }, - Message: p.Message, - } - for _, r := range p.Related { - jp.Related = append(jp.Related, related{ - Location: location{ - File: r.Pos.Filename, - Line: r.Pos.Line, - Column: r.Pos.Column, - }, - End: location{ - File: r.End.Filename, - Line: r.End.Line, - Column: r.End.Column, - }, - Message: r.Message, - }) - } - _ = json.NewEncoder(o.W).Encode(jp) -} - -type Stylish struct { - W io.Writer - - prevFile string - tw *tabwriter.Writer -} - -func (o *Stylish) Format(p lint.Problem) { - pos := p.Pos - if pos.Filename == "" { - pos.Filename = "-" - } - - if pos.Filename != o.prevFile { - if o.prevFile != "" { - o.tw.Flush() - fmt.Fprintln(o.W) - } - fmt.Fprintln(o.W, pos.Filename) - o.prevFile = pos.Filename - o.tw = tabwriter.NewWriter(o.W, 0, 4, 2, ' ', 0) - } - fmt.Fprintf(o.tw, " (%d, %d)\t%s\t%s\n", pos.Line, pos.Column, p.Check, p.Message) - for _, r := range p.Related { - fmt.Fprintf(o.tw, " (%d, %d)\t\t %s\n", r.Pos.Line, r.Pos.Column, r.Message) - } -} - -func (o *Stylish) Stats(total, errors, warnings, ignored int) { - if o.tw != nil { - o.tw.Flush() - fmt.Fprintln(o.W) - } - fmt.Fprintf(o.W, " ✖ %d problems (%d errors, %d warnings, %d ignored)\n", - total, errors, warnings, ignored) -} diff --git a/vendor/honnef.co/go/tools/lint/lintutil/stats.go b/vendor/honnef.co/go/tools/lint/lintutil/stats.go deleted file mode 100644 index ba8caf0afdd..00000000000 --- a/vendor/honnef.co/go/tools/lint/lintutil/stats.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !aix,!android,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris - -package lintutil - -import "os" - -var infoSignals = []os.Signal{} diff --git a/vendor/honnef.co/go/tools/lint/lintutil/stats_bsd.go b/vendor/honnef.co/go/tools/lint/lintutil/stats_bsd.go deleted file mode 100644 index 3a62ede031c..00000000000 --- a/vendor/honnef.co/go/tools/lint/lintutil/stats_bsd.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build darwin dragonfly freebsd netbsd openbsd - -package lintutil - -import ( - "os" - "syscall" -) - -var infoSignals = []os.Signal{syscall.SIGINFO} diff --git a/vendor/honnef.co/go/tools/lint/lintutil/stats_posix.go b/vendor/honnef.co/go/tools/lint/lintutil/stats_posix.go deleted file mode 100644 index 53f21c666b1..00000000000 --- a/vendor/honnef.co/go/tools/lint/lintutil/stats_posix.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build aix android linux solaris - -package lintutil - -import ( - "os" - "syscall" -) - -var infoSignals = []os.Signal{syscall.SIGUSR1} diff --git a/vendor/honnef.co/go/tools/lint/lintutil/util.go b/vendor/honnef.co/go/tools/lint/lintutil/util.go deleted file mode 100644 index 7c3dbdec193..00000000000 --- a/vendor/honnef.co/go/tools/lint/lintutil/util.go +++ /dev/null @@ -1,444 +0,0 @@ -// Copyright (c) 2013 The Go Authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd. - -// Package lintutil provides helpers for writing linter command lines. -package lintutil // import "honnef.co/go/tools/lint/lintutil" - -import ( - "crypto/sha256" - "errors" - "flag" - "fmt" - "go/build" - "go/token" - "io" - "log" - "os" - "os/signal" - "regexp" - "runtime" - "runtime/pprof" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "honnef.co/go/tools/config" - "honnef.co/go/tools/internal/cache" - "honnef.co/go/tools/lint" - "honnef.co/go/tools/lint/lintutil/format" - "honnef.co/go/tools/version" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/buildutil" - "golang.org/x/tools/go/packages" -) - -func NewVersionFlag() flag.Getter { - tags := build.Default.ReleaseTags - v := tags[len(tags)-1][2:] - version := new(VersionFlag) - if err := version.Set(v); err != nil { - panic(fmt.Sprintf("internal error: %s", err)) - } - return version -} - -type VersionFlag int - -func (v *VersionFlag) String() string { - return fmt.Sprintf("1.%d", *v) - -} - -func (v *VersionFlag) Set(s string) error { - if len(s) < 3 { - return errors.New("invalid Go version") - } - if s[0] != '1' { - return errors.New("invalid Go version") - } - if s[1] != '.' { - return errors.New("invalid Go version") - } - i, err := strconv.Atoi(s[2:]) - *v = VersionFlag(i) - return err -} - -func (v *VersionFlag) Get() interface{} { - return int(*v) -} - -func usage(name string, flags *flag.FlagSet) func() { - return func() { - fmt.Fprintf(os.Stderr, "Usage of %s:\n", name) - fmt.Fprintf(os.Stderr, "\t%s [flags] # runs on package in current directory\n", name) - fmt.Fprintf(os.Stderr, "\t%s [flags] packages\n", name) - fmt.Fprintf(os.Stderr, "\t%s [flags] directory\n", name) - fmt.Fprintf(os.Stderr, "\t%s [flags] files... # must be a single package\n", name) - fmt.Fprintf(os.Stderr, "Flags:\n") - flags.PrintDefaults() - } -} - -type list []string - -func (list *list) String() string { - return `"` + strings.Join(*list, ",") + `"` -} - -func (list *list) Set(s string) error { - if s == "" { - *list = nil - return nil - } - - *list = strings.Split(s, ",") - return nil -} - -func FlagSet(name string) *flag.FlagSet { - flags := flag.NewFlagSet("", flag.ExitOnError) - flags.Usage = usage(name, flags) - flags.String("tags", "", "List of `build tags`") - flags.Bool("tests", true, "Include tests") - flags.Bool("version", false, "Print version and exit") - flags.Bool("show-ignored", false, "Don't filter ignored problems") - flags.String("f", "text", "Output `format` (valid choices are 'stylish', 'text' and 'json')") - flags.String("explain", "", "Print description of `check`") - - flags.String("debug.cpuprofile", "", "Write CPU profile to `file`") - flags.String("debug.memprofile", "", "Write memory profile to `file`") - flags.Bool("debug.version", false, "Print detailed version information about this program") - flags.Bool("debug.no-compile-errors", false, "Don't print compile errors") - flags.String("debug.measure-analyzers", "", "Write analysis measurements to `file`. `file` will be opened for appending if it already exists.") - flags.Uint("debug.repeat-analyzers", 0, "Run analyzers `num` times") - - checks := list{"inherit"} - fail := list{"all"} - flags.Var(&checks, "checks", "Comma-separated list of `checks` to enable.") - flags.Var(&fail, "fail", "Comma-separated list of `checks` that can cause a non-zero exit status.") - - tags := build.Default.ReleaseTags - v := tags[len(tags)-1][2:] - version := new(VersionFlag) - if err := version.Set(v); err != nil { - panic(fmt.Sprintf("internal error: %s", err)) - } - - flags.Var(version, "go", "Target Go `version` in the format '1.x'") - return flags -} - -func findCheck(cs []*analysis.Analyzer, check string) (*analysis.Analyzer, bool) { - for _, c := range cs { - if c.Name == check { - return c, true - } - } - return nil, false -} - -func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs *flag.FlagSet) { - tags := fs.Lookup("tags").Value.(flag.Getter).Get().(string) - tests := fs.Lookup("tests").Value.(flag.Getter).Get().(bool) - goVersion := fs.Lookup("go").Value.(flag.Getter).Get().(int) - formatter := fs.Lookup("f").Value.(flag.Getter).Get().(string) - printVersion := fs.Lookup("version").Value.(flag.Getter).Get().(bool) - showIgnored := fs.Lookup("show-ignored").Value.(flag.Getter).Get().(bool) - explain := fs.Lookup("explain").Value.(flag.Getter).Get().(string) - - cpuProfile := fs.Lookup("debug.cpuprofile").Value.(flag.Getter).Get().(string) - memProfile := fs.Lookup("debug.memprofile").Value.(flag.Getter).Get().(string) - debugVersion := fs.Lookup("debug.version").Value.(flag.Getter).Get().(bool) - debugNoCompile := fs.Lookup("debug.no-compile-errors").Value.(flag.Getter).Get().(bool) - debugRepeat := fs.Lookup("debug.repeat-analyzers").Value.(flag.Getter).Get().(uint) - - var measureAnalyzers func(analysis *analysis.Analyzer, pkg *lint.Package, d time.Duration) - if path := fs.Lookup("debug.measure-analyzers").Value.(flag.Getter).Get().(string); path != "" { - f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) - if err != nil { - log.Fatal(err) - } - - mu := &sync.Mutex{} - measureAnalyzers = func(analysis *analysis.Analyzer, pkg *lint.Package, d time.Duration) { - mu.Lock() - defer mu.Unlock() - if _, err := fmt.Fprintf(f, "%s\t%s\t%d\n", analysis.Name, pkg.ID, d.Nanoseconds()); err != nil { - log.Println("error writing analysis measurements:", err) - } - } - } - - cfg := config.Config{} - cfg.Checks = *fs.Lookup("checks").Value.(*list) - - exit := func(code int) { - if cpuProfile != "" { - pprof.StopCPUProfile() - } - if memProfile != "" { - f, err := os.Create(memProfile) - if err != nil { - panic(err) - } - runtime.GC() - pprof.WriteHeapProfile(f) - } - os.Exit(code) - } - if cpuProfile != "" { - f, err := os.Create(cpuProfile) - if err != nil { - log.Fatal(err) - } - pprof.StartCPUProfile(f) - } - - if debugVersion { - version.Verbose() - exit(0) - } - - if printVersion { - version.Print() - exit(0) - } - - // Validate that the tags argument is well-formed. go/packages - // doesn't detect malformed build flags and returns unhelpful - // errors. - tf := buildutil.TagsFlag{} - if err := tf.Set(tags); err != nil { - fmt.Fprintln(os.Stderr, fmt.Errorf("invalid value %q for flag -tags: %s", tags, err)) - exit(1) - } - - if explain != "" { - var haystack []*analysis.Analyzer - haystack = append(haystack, cs...) - for _, cum := range cums { - haystack = append(haystack, cum.Analyzer()) - } - check, ok := findCheck(haystack, explain) - if !ok { - fmt.Fprintln(os.Stderr, "Couldn't find check", explain) - exit(1) - } - if check.Doc == "" { - fmt.Fprintln(os.Stderr, explain, "has no documentation") - exit(1) - } - fmt.Println(check.Doc) - exit(0) - } - - ps, err := Lint(cs, cums, fs.Args(), &Options{ - Tags: tags, - LintTests: tests, - GoVersion: goVersion, - Config: cfg, - PrintAnalyzerMeasurement: measureAnalyzers, - RepeatAnalyzers: debugRepeat, - }) - if err != nil { - fmt.Fprintln(os.Stderr, err) - exit(1) - } - - var f format.Formatter - switch formatter { - case "text": - f = format.Text{W: os.Stdout} - case "stylish": - f = &format.Stylish{W: os.Stdout} - case "json": - f = format.JSON{W: os.Stdout} - default: - fmt.Fprintf(os.Stderr, "unsupported output format %q\n", formatter) - exit(2) - } - - var ( - total int - errors int - warnings int - ignored int - ) - - fail := *fs.Lookup("fail").Value.(*list) - analyzers := make([]*analysis.Analyzer, len(cs), len(cs)+len(cums)) - copy(analyzers, cs) - for _, cum := range cums { - analyzers = append(analyzers, cum.Analyzer()) - } - shouldExit := lint.FilterChecks(analyzers, fail) - shouldExit["compile"] = true - - total = len(ps) - for _, p := range ps { - if p.Check == "compile" && debugNoCompile { - continue - } - if p.Severity == lint.Ignored && !showIgnored { - ignored++ - continue - } - if shouldExit[p.Check] { - errors++ - } else { - p.Severity = lint.Warning - warnings++ - } - f.Format(p) - } - if f, ok := f.(format.Statter); ok { - f.Stats(total, errors, warnings, ignored) - } - if errors > 0 { - exit(1) - } - exit(0) -} - -type Options struct { - Config config.Config - - Tags string - LintTests bool - GoVersion int - PrintAnalyzerMeasurement func(analysis *analysis.Analyzer, pkg *lint.Package, d time.Duration) - RepeatAnalyzers uint -} - -func computeSalt() ([]byte, error) { - if version.Version != "devel" { - return []byte(version.Version), nil - } - p, err := os.Executable() - if err != nil { - return nil, err - } - f, err := os.Open(p) - if err != nil { - return nil, err - } - defer f.Close() - h := sha256.New() - if _, err := io.Copy(h, f); err != nil { - return nil, err - } - return h.Sum(nil), nil -} - -func Lint(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, paths []string, opt *Options) ([]lint.Problem, error) { - salt, err := computeSalt() - if err != nil { - return nil, fmt.Errorf("could not compute salt for cache: %s", err) - } - cache.SetSalt(salt) - - if opt == nil { - opt = &Options{} - } - - l := &lint.Linter{ - Checkers: cs, - CumulativeCheckers: cums, - GoVersion: opt.GoVersion, - Config: opt.Config, - RepeatAnalyzers: opt.RepeatAnalyzers, - } - l.Stats.PrintAnalyzerMeasurement = opt.PrintAnalyzerMeasurement - cfg := &packages.Config{} - if opt.LintTests { - cfg.Tests = true - } - if opt.Tags != "" { - cfg.BuildFlags = append(cfg.BuildFlags, "-tags", opt.Tags) - } - - printStats := func() { - // Individual stats are read atomically, but overall there - // is no synchronisation. For printing rough progress - // information, this doesn't matter. - switch atomic.LoadUint32(&l.Stats.State) { - case lint.StateInitializing: - fmt.Fprintln(os.Stderr, "Status: initializing") - case lint.StateGraph: - fmt.Fprintln(os.Stderr, "Status: loading package graph") - case lint.StateProcessing: - fmt.Fprintf(os.Stderr, "Packages: %d/%d initial, %d/%d total; Workers: %d/%d; Problems: %d\n", - atomic.LoadUint32(&l.Stats.ProcessedInitialPackages), - atomic.LoadUint32(&l.Stats.InitialPackages), - atomic.LoadUint32(&l.Stats.ProcessedPackages), - atomic.LoadUint32(&l.Stats.TotalPackages), - atomic.LoadUint32(&l.Stats.ActiveWorkers), - atomic.LoadUint32(&l.Stats.TotalWorkers), - atomic.LoadUint32(&l.Stats.Problems), - ) - case lint.StateCumulative: - fmt.Fprintln(os.Stderr, "Status: processing cumulative checkers") - } - } - if len(infoSignals) > 0 { - ch := make(chan os.Signal, 1) - signal.Notify(ch, infoSignals...) - defer signal.Stop(ch) - go func() { - for range ch { - printStats() - } - }() - } - - ps, err := l.Lint(cfg, paths) - return ps, err -} - -var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?$`) - -func parsePos(pos string) token.Position { - if pos == "-" || pos == "" { - return token.Position{} - } - parts := posRe.FindStringSubmatch(pos) - if parts == nil { - panic(fmt.Sprintf("internal error: malformed position %q", pos)) - } - file := parts[1] - line, _ := strconv.Atoi(parts[2]) - col, _ := strconv.Atoi(parts[3]) - return token.Position{ - Filename: file, - Line: line, - Column: col, - } -} - -func InitializeAnalyzers(docs map[string]*lint.Documentation, analyzers map[string]*analysis.Analyzer) map[string]*analysis.Analyzer { - out := make(map[string]*analysis.Analyzer, len(analyzers)) - for k, v := range analyzers { - vc := *v - out[k] = &vc - - vc.Name = k - doc, ok := docs[k] - if !ok { - panic(fmt.Sprintf("missing documentation for check %s", k)) - } - vc.Doc = doc.String() - if vc.Flags.Usage == nil { - fs := flag.NewFlagSet("", flag.PanicOnError) - fs.Var(NewVersionFlag(), "go", "Target Go version") - vc.Flags = *fs - } - } - return out -} diff --git a/vendor/honnef.co/go/tools/lint/runner.go b/vendor/honnef.co/go/tools/lint/runner.go deleted file mode 100644 index 74106ced826..00000000000 --- a/vendor/honnef.co/go/tools/lint/runner.go +++ /dev/null @@ -1,1114 +0,0 @@ -package lint - -/* -Package loading - -Conceptually, package loading in the runner can be imagined as a -graph-shaped work list. We iteratively pop off leaf nodes (packages -that have no unloaded dependencies) and load data from export data, -our cache, or source. - -Specifically, non-initial packages are loaded from export data and the -fact cache if possible, otherwise from source. Initial packages are -loaded from export data, the fact cache and the (problems, ignores, -config) cache if possible, otherwise from source. - -The appeal of this approach is that it is both simple to implement and -easily parallelizable. Each leaf node can be processed independently, -and new leaf nodes appear as their dependencies are being processed. - -The downside of this approach, however, is that we're doing more work -than necessary. Imagine an initial package A, which has the following -dependency chain: A->B->C->D – in the current implementation, we will -load all 4 packages. However, if package A can be loaded fully from -cached information, then none of its dependencies are necessary, and -we could avoid loading them. - - -Parallelism - -Runner implements parallel processing of packages by spawning one -goroutine per package in the dependency graph, without any semaphores. -Each goroutine initially waits on the completion of all of its -dependencies, thus establishing correct order of processing. Once all -dependencies finish processing, the goroutine will load the package -from export data or source – this loading is guarded by a semaphore, -sized according to the number of CPU cores. This way, we only have as -many packages occupying memory and CPU resources as there are actual -cores to process them. - -This combination of unbounded goroutines but bounded package loading -means that if we have many parallel, independent subgraphs, they will -all execute in parallel, while not wasting resources for long linear -chains or trying to process more subgraphs in parallel than the system -can handle. - - -Caching - -We make use of several caches. These caches are Go's export data, our -facts cache, and our (problems, ignores, config) cache. - -Initial packages will either be loaded from a combination of all three -caches, or from source. Non-initial packages will either be loaded -from a combination of export data and facts cache, or from source. - -The facts cache is separate from the (problems, ignores, config) cache -because when we process non-initial packages, we generate facts, but -we discard problems and ignores. - -The facts cache is keyed by (package, analyzer), whereas the -(problems, ignores, config) cache is keyed by (package, list of -analyzes). The difference between the two exists because there are -only a handful of analyses that produce facts, but hundreds of -analyses that don't. Creating one cache entry per fact-generating -analysis is feasible, creating one cache entry per normal analysis has -significant performance and storage overheads. - -The downside of keying by the list of analyzes is, naturally, that a -change in list of analyzes changes the cache key. `staticcheck -checks -A` and `staticcheck -checks A,B` will therefore need their own cache -entries and not reuse each other's work. This problem does not affect -the facts cache. - -*/ - -import ( - "bytes" - "encoding/gob" - "encoding/hex" - "fmt" - "go/ast" - "go/token" - "go/types" - "reflect" - "regexp" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/packages" - "golang.org/x/tools/go/types/objectpath" - "honnef.co/go/tools/config" - "honnef.co/go/tools/facts" - "honnef.co/go/tools/internal/cache" - "honnef.co/go/tools/loader" -) - -func init() { - gob.Register(&FileIgnore{}) - gob.Register(&LineIgnore{}) -} - -// If enabled, abuse of the go/analysis API will lead to panics -const sanityCheck = true - -// OPT(dh): for a dependency tree A->B->C->D, if we have cached data -// for B, there should be no need to load C and D individually. Go's -// export data for B contains all the data we need on types, and our -// fact cache could store the union of B, C and D in B. -// -// This may change unused's behavior, however, as it may observe fewer -// interfaces from transitive dependencies. - -// OPT(dh): every single package will have the same value for -// canClearTypes. We could move the Package.decUse method to runner to -// eliminate this field. This is probably not worth it, though. There -// are only thousands of packages, so the field only takes up -// kilobytes of memory. - -// OPT(dh): do we really need the Package.gen field? it's based -// trivially on pkg.results and merely caches the result of a type -// assertion. How often do we actually use the field? - -type Package struct { - // dependents is initially set to 1 plus the number of packages - // that directly import this package. It is atomically decreased - // by 1 every time a dependent has been processed or when the - // package itself has been processed. Once the value reaches zero, - // the package is no longer needed. - dependents uint64 - - *packages.Package - Imports []*Package - initial bool - // fromSource is set to true for packages that have been loaded - // from source. This is the case for initial packages, packages - // with missing export data, and packages with no cached facts. - fromSource bool - // hash stores the package hash, as computed by packageHash - hash string - actionID cache.ActionID - done chan struct{} - - resultsMu sync.Mutex - // results maps analyzer IDs to analyzer results. it is - // implemented as a deduplicating concurrent cache. - results []*result - - cfg *config.Config - // gen maps file names to the code generator that created them - gen map[string]facts.Generator - problems []Problem - ignores []Ignore - errs []error - - // these slices are indexed by analysis - facts []map[types.Object][]analysis.Fact - pkgFacts [][]analysis.Fact - - // canClearTypes is set to true if we can discard type - // information after the package and its dependents have been - // processed. This is the case when no cumulative checkers are - // being run. - canClearTypes bool -} - -type cachedPackage struct { - Problems []Problem - Ignores []Ignore - Config *config.Config -} - -func (pkg *Package) decUse() { - ret := atomic.AddUint64(&pkg.dependents, ^uint64(0)) - if ret == 0 { - // nobody depends on this package anymore - if pkg.canClearTypes { - pkg.Types = nil - } - pkg.facts = nil - pkg.pkgFacts = nil - - for _, imp := range pkg.Imports { - imp.decUse() - } - } -} - -type result struct { - v interface{} - err error - ready chan struct{} -} - -type Runner struct { - cache *cache.Cache - goVersion int - stats *Stats - repeatAnalyzers uint - - analyzerIDs analyzerIDs - problemsCacheKey string - - // limits parallelism of loading packages - loadSem chan struct{} -} - -type analyzerIDs struct { - m map[*analysis.Analyzer]int -} - -func (ids analyzerIDs) get(a *analysis.Analyzer) int { - id, ok := ids.m[a] - if !ok { - panic(fmt.Sprintf("no analyzer ID for %s", a.Name)) - } - return id -} - -type Fact struct { - Path string - Fact analysis.Fact -} - -type analysisAction struct { - analyzer *analysis.Analyzer - analyzerID int - pkg *Package - newPackageFacts []analysis.Fact - problems []Problem - - pkgFacts map[*types.Package][]analysis.Fact -} - -func (ac *analysisAction) String() string { - return fmt.Sprintf("%s @ %s", ac.analyzer, ac.pkg) -} - -func (ac *analysisAction) allObjectFacts() []analysis.ObjectFact { - out := make([]analysis.ObjectFact, 0, len(ac.pkg.facts[ac.analyzerID])) - for obj, facts := range ac.pkg.facts[ac.analyzerID] { - for _, fact := range facts { - out = append(out, analysis.ObjectFact{ - Object: obj, - Fact: fact, - }) - } - } - return out -} - -func (ac *analysisAction) allPackageFacts() []analysis.PackageFact { - out := make([]analysis.PackageFact, 0, len(ac.pkgFacts)) - for pkg, facts := range ac.pkgFacts { - for _, fact := range facts { - out = append(out, analysis.PackageFact{ - Package: pkg, - Fact: fact, - }) - } - } - return out -} - -func (ac *analysisAction) importObjectFact(obj types.Object, fact analysis.Fact) bool { - if sanityCheck && len(ac.analyzer.FactTypes) == 0 { - panic("analysis doesn't export any facts") - } - for _, f := range ac.pkg.facts[ac.analyzerID][obj] { - if reflect.TypeOf(f) == reflect.TypeOf(fact) { - reflect.ValueOf(fact).Elem().Set(reflect.ValueOf(f).Elem()) - return true - } - } - return false -} - -func (ac *analysisAction) importPackageFact(pkg *types.Package, fact analysis.Fact) bool { - if sanityCheck && len(ac.analyzer.FactTypes) == 0 { - panic("analysis doesn't export any facts") - } - for _, f := range ac.pkgFacts[pkg] { - if reflect.TypeOf(f) == reflect.TypeOf(fact) { - reflect.ValueOf(fact).Elem().Set(reflect.ValueOf(f).Elem()) - return true - } - } - return false -} - -func (ac *analysisAction) exportObjectFact(obj types.Object, fact analysis.Fact) { - if sanityCheck && len(ac.analyzer.FactTypes) == 0 { - panic("analysis doesn't export any facts") - } - ac.pkg.facts[ac.analyzerID][obj] = append(ac.pkg.facts[ac.analyzerID][obj], fact) -} - -func (ac *analysisAction) exportPackageFact(fact analysis.Fact) { - if sanityCheck && len(ac.analyzer.FactTypes) == 0 { - panic("analysis doesn't export any facts") - } - ac.pkgFacts[ac.pkg.Types] = append(ac.pkgFacts[ac.pkg.Types], fact) - ac.newPackageFacts = append(ac.newPackageFacts, fact) -} - -func (ac *analysisAction) report(pass *analysis.Pass, d analysis.Diagnostic) { - p := Problem{ - Pos: DisplayPosition(pass.Fset, d.Pos), - End: DisplayPosition(pass.Fset, d.End), - Message: d.Message, - Check: pass.Analyzer.Name, - } - for _, r := range d.Related { - p.Related = append(p.Related, Related{ - Pos: DisplayPosition(pass.Fset, r.Pos), - End: DisplayPosition(pass.Fset, r.End), - Message: r.Message, - }) - } - ac.problems = append(ac.problems, p) -} - -func (r *Runner) runAnalysis(ac *analysisAction) (ret interface{}, err error) { - ac.pkg.resultsMu.Lock() - res := ac.pkg.results[r.analyzerIDs.get(ac.analyzer)] - if res != nil { - ac.pkg.resultsMu.Unlock() - <-res.ready - return res.v, res.err - } else { - res = &result{ - ready: make(chan struct{}), - } - ac.pkg.results[r.analyzerIDs.get(ac.analyzer)] = res - ac.pkg.resultsMu.Unlock() - - defer func() { - res.v = ret - res.err = err - close(res.ready) - }() - - pass := new(analysis.Pass) - *pass = analysis.Pass{ - Analyzer: ac.analyzer, - Fset: ac.pkg.Fset, - Files: ac.pkg.Syntax, - // type information may be nil or may be populated. if it is - // nil, it will get populated later. - Pkg: ac.pkg.Types, - TypesInfo: ac.pkg.TypesInfo, - TypesSizes: ac.pkg.TypesSizes, - ResultOf: map[*analysis.Analyzer]interface{}{}, - ImportObjectFact: ac.importObjectFact, - ImportPackageFact: ac.importPackageFact, - ExportObjectFact: ac.exportObjectFact, - ExportPackageFact: ac.exportPackageFact, - Report: func(d analysis.Diagnostic) { - ac.report(pass, d) - }, - AllObjectFacts: ac.allObjectFacts, - AllPackageFacts: ac.allPackageFacts, - } - - if !ac.pkg.initial { - // Don't report problems in dependencies - pass.Report = func(analysis.Diagnostic) {} - } - return r.runAnalysisUser(pass, ac) - } -} - -func (r *Runner) loadCachedPackage(pkg *Package, analyzers []*analysis.Analyzer) (cachedPackage, bool) { - // OPT(dh): we can cache this computation, it'll be the same for all packages - id := cache.Subkey(pkg.actionID, "data "+r.problemsCacheKey) - - b, _, err := r.cache.GetBytes(id) - if err != nil { - return cachedPackage{}, false - } - var cpkg cachedPackage - if err := gob.NewDecoder(bytes.NewReader(b)).Decode(&cpkg); err != nil { - return cachedPackage{}, false - } - return cpkg, true -} - -func (r *Runner) loadCachedFacts(a *analysis.Analyzer, pkg *Package) ([]Fact, bool) { - if len(a.FactTypes) == 0 { - return nil, true - } - - var facts []Fact - // Look in the cache for facts - aID := passActionID(pkg, a) - aID = cache.Subkey(aID, "facts") - b, _, err := r.cache.GetBytes(aID) - if err != nil { - // No cached facts, analyse this package like a user-provided one, but ignore diagnostics - return nil, false - } - - if err := gob.NewDecoder(bytes.NewReader(b)).Decode(&facts); err != nil { - // Cached facts are broken, analyse this package like a user-provided one, but ignore diagnostics - return nil, false - } - return facts, true -} - -type dependencyError struct { - dep string - err error -} - -func (err dependencyError) nested() dependencyError { - if o, ok := err.err.(dependencyError); ok { - return o.nested() - } - return err -} - -func (err dependencyError) Error() string { - if o, ok := err.err.(dependencyError); ok { - return o.Error() - } - return fmt.Sprintf("error running dependency %s: %s", err.dep, err.err) -} - -func (r *Runner) makeAnalysisAction(a *analysis.Analyzer, pkg *Package) *analysisAction { - aid := r.analyzerIDs.get(a) - ac := &analysisAction{ - analyzer: a, - analyzerID: aid, - pkg: pkg, - } - - if len(a.FactTypes) == 0 { - return ac - } - - // Merge all package facts of dependencies - ac.pkgFacts = map[*types.Package][]analysis.Fact{} - seen := map[*Package]struct{}{} - var dfs func(*Package) - dfs = func(pkg *Package) { - if _, ok := seen[pkg]; ok { - return - } - seen[pkg] = struct{}{} - s := pkg.pkgFacts[aid] - ac.pkgFacts[pkg.Types] = s[0:len(s):len(s)] - for _, imp := range pkg.Imports { - dfs(imp) - } - } - dfs(pkg) - - return ac -} - -// analyzes that we always want to run, even if they're not being run -// explicitly or as dependencies. these are necessary for the inner -// workings of the runner. -var injectedAnalyses = []*analysis.Analyzer{facts.Generated, config.Analyzer} - -func (r *Runner) runAnalysisUser(pass *analysis.Pass, ac *analysisAction) (interface{}, error) { - if !ac.pkg.fromSource { - panic(fmt.Sprintf("internal error: %s was not loaded from source", ac.pkg)) - } - - // User-provided package, analyse it - // First analyze it with dependencies - for _, req := range ac.analyzer.Requires { - acReq := r.makeAnalysisAction(req, ac.pkg) - ret, err := r.runAnalysis(acReq) - if err != nil { - // We couldn't run a dependency, no point in going on - return nil, dependencyError{req.Name, err} - } - - pass.ResultOf[req] = ret - } - - // Then with this analyzer - var ret interface{} - for i := uint(0); i < r.repeatAnalyzers+1; i++ { - var err error - t := time.Now() - ret, err = ac.analyzer.Run(pass) - r.stats.MeasureAnalyzer(ac.analyzer, ac.pkg, time.Since(t)) - if err != nil { - return nil, err - } - } - - if len(ac.analyzer.FactTypes) > 0 { - // Merge new facts into the package and persist them. - var facts []Fact - for _, fact := range ac.newPackageFacts { - id := r.analyzerIDs.get(ac.analyzer) - ac.pkg.pkgFacts[id] = append(ac.pkg.pkgFacts[id], fact) - facts = append(facts, Fact{"", fact}) - } - for obj, afacts := range ac.pkg.facts[ac.analyzerID] { - if obj.Pkg() != ac.pkg.Package.Types { - continue - } - path, err := objectpath.For(obj) - if err != nil { - continue - } - for _, fact := range afacts { - facts = append(facts, Fact{string(path), fact}) - } - } - - if err := r.cacheData(facts, ac.pkg, ac.analyzer, "facts"); err != nil { - return nil, err - } - } - - return ret, nil -} - -func (r *Runner) cacheData(v interface{}, pkg *Package, a *analysis.Analyzer, subkey string) error { - buf := &bytes.Buffer{} - if err := gob.NewEncoder(buf).Encode(v); err != nil { - return err - } - aID := passActionID(pkg, a) - aID = cache.Subkey(aID, subkey) - if err := r.cache.PutBytes(aID, buf.Bytes()); err != nil { - return err - } - return nil -} - -func NewRunner(stats *Stats) (*Runner, error) { - cache, err := cache.Default() - if err != nil { - return nil, err - } - - return &Runner{ - cache: cache, - stats: stats, - }, nil -} - -// Run loads packages corresponding to patterns and analyses them with -// analyzers. It returns the loaded packages, which contain reported -// diagnostics as well as extracted ignore directives. -// -// Note that diagnostics have not been filtered at this point yet, to -// accommodate cumulative analyzes that require additional steps to -// produce diagnostics. -func (r *Runner) Run(cfg *packages.Config, patterns []string, analyzers []*analysis.Analyzer, hasCumulative bool) ([]*Package, error) { - checkerNames := make([]string, len(analyzers)) - for i, a := range analyzers { - checkerNames[i] = a.Name - } - sort.Strings(checkerNames) - r.problemsCacheKey = strings.Join(checkerNames, " ") - - var allAnalyzers []*analysis.Analyzer - r.analyzerIDs = analyzerIDs{m: map[*analysis.Analyzer]int{}} - id := 0 - seen := map[*analysis.Analyzer]struct{}{} - var dfs func(a *analysis.Analyzer) - dfs = func(a *analysis.Analyzer) { - if _, ok := seen[a]; ok { - return - } - seen[a] = struct{}{} - allAnalyzers = append(allAnalyzers, a) - r.analyzerIDs.m[a] = id - id++ - for _, f := range a.FactTypes { - gob.Register(f) - } - for _, req := range a.Requires { - dfs(req) - } - } - for _, a := range analyzers { - if v := a.Flags.Lookup("go"); v != nil { - v.Value.Set(fmt.Sprintf("1.%d", r.goVersion)) - } - dfs(a) - } - for _, a := range injectedAnalyses { - dfs(a) - } - // Run all analyzers on all packages (subject to further - // restrictions enforced later). This guarantees that if analyzer - // A1 depends on A2, and A2 has facts, that A2 will run on the - // dependencies of user-provided packages, even though A1 won't. - analyzers = allAnalyzers - - var dcfg packages.Config - if cfg != nil { - dcfg = *cfg - } - - atomic.StoreUint32(&r.stats.State, StateGraph) - initialPkgs, err := loader.Graph(dcfg, patterns...) - if err != nil { - return nil, err - } - defer r.cache.Trim() - - var allPkgs []*Package - m := map[*packages.Package]*Package{} - packages.Visit(initialPkgs, nil, func(l *packages.Package) { - m[l] = &Package{ - Package: l, - results: make([]*result, len(r.analyzerIDs.m)), - facts: make([]map[types.Object][]analysis.Fact, len(r.analyzerIDs.m)), - pkgFacts: make([][]analysis.Fact, len(r.analyzerIDs.m)), - done: make(chan struct{}), - // every package needs itself - dependents: 1, - canClearTypes: !hasCumulative, - } - allPkgs = append(allPkgs, m[l]) - for i := range m[l].facts { - m[l].facts[i] = map[types.Object][]analysis.Fact{} - } - for _, err := range l.Errors { - m[l].errs = append(m[l].errs, err) - } - for _, v := range l.Imports { - m[v].dependents++ - m[l].Imports = append(m[l].Imports, m[v]) - } - - m[l].hash, err = r.packageHash(m[l]) - m[l].actionID = packageActionID(m[l]) - if err != nil { - m[l].errs = append(m[l].errs, err) - } - }) - - pkgs := make([]*Package, len(initialPkgs)) - for i, l := range initialPkgs { - pkgs[i] = m[l] - pkgs[i].initial = true - } - - atomic.StoreUint32(&r.stats.InitialPackages, uint32(len(initialPkgs))) - atomic.StoreUint32(&r.stats.TotalPackages, uint32(len(allPkgs))) - atomic.StoreUint32(&r.stats.State, StateProcessing) - - var wg sync.WaitGroup - wg.Add(len(allPkgs)) - r.loadSem = make(chan struct{}, runtime.GOMAXPROCS(-1)) - atomic.StoreUint32(&r.stats.TotalWorkers, uint32(cap(r.loadSem))) - for _, pkg := range allPkgs { - pkg := pkg - go func() { - r.processPkg(pkg, analyzers) - - if pkg.initial { - atomic.AddUint32(&r.stats.ProcessedInitialPackages, 1) - } - atomic.AddUint32(&r.stats.Problems, uint32(len(pkg.problems))) - wg.Done() - }() - } - wg.Wait() - - return pkgs, nil -} - -var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?`) - -func parsePos(pos string) (token.Position, int, error) { - if pos == "-" || pos == "" { - return token.Position{}, 0, nil - } - parts := posRe.FindStringSubmatch(pos) - if parts == nil { - return token.Position{}, 0, fmt.Errorf("malformed position %q", pos) - } - file := parts[1] - line, _ := strconv.Atoi(parts[2]) - col, _ := strconv.Atoi(parts[3]) - return token.Position{ - Filename: file, - Line: line, - Column: col, - }, len(parts[0]), nil -} - -// loadPkg loads a Go package. It may be loaded from a combination of -// caches, or from source. -func (r *Runner) loadPkg(pkg *Package, analyzers []*analysis.Analyzer) error { - if pkg.Types != nil { - panic(fmt.Sprintf("internal error: %s has already been loaded", pkg.Package)) - } - - if pkg.initial { - // Try to load cached package - cpkg, ok := r.loadCachedPackage(pkg, analyzers) - if ok { - pkg.problems = cpkg.Problems - pkg.ignores = cpkg.Ignores - pkg.cfg = cpkg.Config - } else { - pkg.fromSource = true - return loader.LoadFromSource(pkg.Package) - } - } - - // At this point we're either working with a non-initial package, - // or we managed to load cached problems for the package. We still - // need export data and facts. - - // OPT(dh): we don't need type information for this package if no - // other package depends on it. this may be the case for initial - // packages. - - // Load package from export data - if err := loader.LoadFromExport(pkg.Package); err != nil { - // We asked Go to give us up to date export data, yet - // we can't load it. There must be something wrong. - // - // Attempt loading from source. This should fail (because - // otherwise there would be export data); we just want to - // get the compile errors. If loading from source succeeds - // we discard the result, anyway. Otherwise we'll fail - // when trying to reload from export data later. - // - // FIXME(dh): we no longer reload from export data, so - // theoretically we should be able to continue - pkg.fromSource = true - if err := loader.LoadFromSource(pkg.Package); err != nil { - return err - } - // Make sure this package can't be imported successfully - pkg.Package.Errors = append(pkg.Package.Errors, packages.Error{ - Pos: "-", - Msg: fmt.Sprintf("could not load export data: %s", err), - Kind: packages.ParseError, - }) - return fmt.Errorf("could not load export data: %s", err) - } - - failed := false - seen := make([]bool, len(r.analyzerIDs.m)) - var dfs func(*analysis.Analyzer) - dfs = func(a *analysis.Analyzer) { - if seen[r.analyzerIDs.get(a)] { - return - } - seen[r.analyzerIDs.get(a)] = true - - if len(a.FactTypes) > 0 { - facts, ok := r.loadCachedFacts(a, pkg) - if !ok { - failed = true - return - } - - for _, f := range facts { - if f.Path == "" { - // This is a package fact - pkg.pkgFacts[r.analyzerIDs.get(a)] = append(pkg.pkgFacts[r.analyzerIDs.get(a)], f.Fact) - continue - } - obj, err := objectpath.Object(pkg.Types, objectpath.Path(f.Path)) - if err != nil { - // Be lenient about these errors. For example, when - // analysing io/ioutil from source, we may get a fact - // for methods on the devNull type, and objectpath - // will happily create a path for them. However, when - // we later load io/ioutil from export data, the path - // no longer resolves. - // - // If an exported type embeds the unexported type, - // then (part of) the unexported type will become part - // of the type information and our path will resolve - // again. - continue - } - pkg.facts[r.analyzerIDs.get(a)][obj] = append(pkg.facts[r.analyzerIDs.get(a)][obj], f.Fact) - } - } - - for _, req := range a.Requires { - dfs(req) - } - } - for _, a := range analyzers { - dfs(a) - } - - if !failed { - return nil - } - - // We failed to load some cached facts - pkg.fromSource = true - // XXX we added facts to the maps, we need to get rid of those - return loader.LoadFromSource(pkg.Package) -} - -type analysisError struct { - analyzer *analysis.Analyzer - pkg *Package - err error -} - -func (err analysisError) Error() string { - return fmt.Sprintf("error running analyzer %s on %s: %s", err.analyzer, err.pkg, err.err) -} - -// processPkg processes a package. This involves loading the package, -// either from export data or from source. For packages loaded from -// source, the provides analyzers will be run on the package. -func (r *Runner) processPkg(pkg *Package, analyzers []*analysis.Analyzer) { - defer func() { - // Clear information we no longer need. Make sure to do this - // when returning from processPkg so that we clear - // dependencies, not just initial packages. - pkg.TypesInfo = nil - pkg.Syntax = nil - pkg.results = nil - - atomic.AddUint32(&r.stats.ProcessedPackages, 1) - pkg.decUse() - close(pkg.done) - }() - - // Ensure all packages have the generated map and config. This is - // required by internals of the runner. Analyses that themselves - // make use of either have an explicit dependency so that other - // runners work correctly, too. - analyzers = append(analyzers[0:len(analyzers):len(analyzers)], injectedAnalyses...) - - if len(pkg.errs) != 0 { - return - } - - for _, imp := range pkg.Imports { - <-imp.done - if len(imp.errs) > 0 { - if imp.initial { - // Don't print the error of the dependency since it's - // an initial package and we're already printing the - // error. - pkg.errs = append(pkg.errs, fmt.Errorf("could not analyze dependency %s of %s", imp, pkg)) - } else { - var s string - for _, err := range imp.errs { - s += "\n\t" + err.Error() - } - pkg.errs = append(pkg.errs, fmt.Errorf("could not analyze dependency %s of %s: %s", imp, pkg, s)) - } - return - } - } - if pkg.PkgPath == "unsafe" { - pkg.Types = types.Unsafe - return - } - - r.loadSem <- struct{}{} - atomic.AddUint32(&r.stats.ActiveWorkers, 1) - defer func() { - <-r.loadSem - atomic.AddUint32(&r.stats.ActiveWorkers, ^uint32(0)) - }() - if err := r.loadPkg(pkg, analyzers); err != nil { - pkg.errs = append(pkg.errs, err) - return - } - - // A package's object facts is the union of all of its dependencies. - for _, imp := range pkg.Imports { - for ai, m := range imp.facts { - for obj, facts := range m { - pkg.facts[ai][obj] = facts[0:len(facts):len(facts)] - } - } - } - - if !pkg.fromSource { - // Nothing left to do for the package. - return - } - - // Run analyses on initial packages and those missing facts - var wg sync.WaitGroup - wg.Add(len(analyzers)) - errs := make([]error, len(analyzers)) - var acs []*analysisAction - for i, a := range analyzers { - i := i - a := a - ac := r.makeAnalysisAction(a, pkg) - acs = append(acs, ac) - go func() { - defer wg.Done() - // Only initial packages and packages with missing - // facts will have been loaded from source. - if pkg.initial || len(a.FactTypes) > 0 { - if _, err := r.runAnalysis(ac); err != nil { - errs[i] = analysisError{a, pkg, err} - return - } - } - }() - } - wg.Wait() - - depErrors := map[dependencyError]int{} - for _, err := range errs { - if err == nil { - continue - } - switch err := err.(type) { - case analysisError: - switch err := err.err.(type) { - case dependencyError: - depErrors[err.nested()]++ - default: - pkg.errs = append(pkg.errs, err) - } - default: - pkg.errs = append(pkg.errs, err) - } - } - for err, count := range depErrors { - pkg.errs = append(pkg.errs, - fmt.Errorf("could not run %s@%s, preventing %d analyzers from running: %s", err.dep, pkg, count, err.err)) - } - - // We can't process ignores at this point because `unused` needs - // to see more than one package to make its decision. - // - // OPT(dh): can't we guard this block of code by pkg.initial? - ignores, problems := parseDirectives(pkg.Package) - pkg.ignores = append(pkg.ignores, ignores...) - pkg.problems = append(pkg.problems, problems...) - for _, ac := range acs { - pkg.problems = append(pkg.problems, ac.problems...) - } - - if pkg.initial { - // Only initial packages have these analyzers run, and only - // initial packages need these. - if pkg.results[r.analyzerIDs.get(config.Analyzer)].v != nil { - pkg.cfg = pkg.results[r.analyzerIDs.get(config.Analyzer)].v.(*config.Config) - } - pkg.gen = pkg.results[r.analyzerIDs.get(facts.Generated)].v.(map[string]facts.Generator) - } - - // In a previous version of the code, we would throw away all type - // information and reload it from export data. That was - // nonsensical. The *types.Package doesn't keep any information - // live that export data wouldn't also. We only need to discard - // the AST and the TypesInfo maps; that happens after we return - // from processPkg. -} - -func parseDirective(s string) (cmd string, args []string) { - if !strings.HasPrefix(s, "//lint:") { - return "", nil - } - s = strings.TrimPrefix(s, "//lint:") - fields := strings.Split(s, " ") - return fields[0], fields[1:] -} - -// parseDirectives extracts all linter directives from the source -// files of the package. Malformed directives are returned as problems. -func parseDirectives(pkg *packages.Package) ([]Ignore, []Problem) { - var ignores []Ignore - var problems []Problem - - for _, f := range pkg.Syntax { - found := false - commentLoop: - for _, cg := range f.Comments { - for _, c := range cg.List { - if strings.Contains(c.Text, "//lint:") { - found = true - break commentLoop - } - } - } - if !found { - continue - } - cm := ast.NewCommentMap(pkg.Fset, f, f.Comments) - for node, cgs := range cm { - for _, cg := range cgs { - for _, c := range cg.List { - if !strings.HasPrefix(c.Text, "//lint:") { - continue - } - cmd, args := parseDirective(c.Text) - switch cmd { - case "ignore", "file-ignore": - if len(args) < 2 { - p := Problem{ - Pos: DisplayPosition(pkg.Fset, c.Pos()), - Message: "malformed linter directive; missing the required reason field?", - Severity: Error, - Check: "compile", - } - problems = append(problems, p) - continue - } - default: - // unknown directive, ignore - continue - } - checks := strings.Split(args[0], ",") - pos := DisplayPosition(pkg.Fset, node.Pos()) - var ig Ignore - switch cmd { - case "ignore": - ig = &LineIgnore{ - File: pos.Filename, - Line: pos.Line, - Checks: checks, - Pos: DisplayPosition(pkg.Fset, c.Pos()), - } - case "file-ignore": - ig = &FileIgnore{ - File: pos.Filename, - Checks: checks, - } - } - ignores = append(ignores, ig) - } - } - } - } - - return ignores, problems -} - -// packageHash computes a package's hash. The hash is based on all Go -// files that make up the package, as well as the hashes of imported -// packages. -func (r *Runner) packageHash(pkg *Package) (string, error) { - key := cache.NewHash("package hash") - fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath) - fmt.Fprintf(key, "go %d\n", r.goVersion) - for _, f := range pkg.CompiledGoFiles { - h, err := cache.FileHash(f) - if err != nil { - return "", err - } - fmt.Fprintf(key, "file %s %x\n", f, h) - } - - // Actually load the configuration to calculate its hash. This - // will take into consideration inheritance of configuration - // files, as well as the default configuration. - // - // OPT(dh): doing this means we'll load the config twice: once for - // computing the hash, and once when analyzing the package from - // source. - cdir := config.Dir(pkg.GoFiles) - if cdir == "" { - fmt.Fprintf(key, "file %s %x\n", config.ConfigName, [cache.HashSize]byte{}) - } else { - cfg, err := config.Load(cdir) - if err != nil { - return "", err - } - h := cache.NewHash(config.ConfigName) - if _, err := h.Write([]byte(cfg.String())); err != nil { - return "", err - } - fmt.Fprintf(key, "file %s %x\n", config.ConfigName, h.Sum()) - } - - imps := make([]*Package, len(pkg.Imports)) - copy(imps, pkg.Imports) - sort.Slice(imps, func(i, j int) bool { - return imps[i].PkgPath < imps[j].PkgPath - }) - for _, dep := range imps { - if dep.PkgPath == "unsafe" { - continue - } - - fmt.Fprintf(key, "import %s %s\n", dep.PkgPath, dep.hash) - } - h := key.Sum() - return hex.EncodeToString(h[:]), nil -} - -func packageActionID(pkg *Package) cache.ActionID { - key := cache.NewHash("package ID") - fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath) - fmt.Fprintf(key, "pkghash %s\n", pkg.hash) - return key.Sum() -} - -// passActionID computes an ActionID for an analysis pass. -func passActionID(pkg *Package, analyzer *analysis.Analyzer) cache.ActionID { - return cache.Subkey(pkg.actionID, fmt.Sprintf("analyzer %s", analyzer.Name)) -} diff --git a/vendor/honnef.co/go/tools/lint/stats.go b/vendor/honnef.co/go/tools/lint/stats.go deleted file mode 100644 index 85eb9784489..00000000000 --- a/vendor/honnef.co/go/tools/lint/stats.go +++ /dev/null @@ -1,38 +0,0 @@ -package lint - -import ( - "time" - - "golang.org/x/tools/go/analysis" -) - -const ( - StateInitializing = 0 - StateGraph = 1 - StateProcessing = 2 - StateCumulative = 3 -) - -type Stats struct { - State uint32 - - InitialPackages uint32 - TotalPackages uint32 - ProcessedPackages uint32 - ProcessedInitialPackages uint32 - Problems uint32 - ActiveWorkers uint32 - TotalWorkers uint32 - PrintAnalyzerMeasurement func(*analysis.Analyzer, *Package, time.Duration) -} - -type AnalysisMeasurementKey struct { - Analysis string - Pkg string -} - -func (s *Stats) MeasureAnalyzer(analysis *analysis.Analyzer, pkg *Package, d time.Duration) { - if s.PrintAnalyzerMeasurement != nil { - s.PrintAnalyzerMeasurement(analysis, pkg, d) - } -} diff --git a/vendor/honnef.co/go/tools/loader/loader.go b/vendor/honnef.co/go/tools/loader/loader.go deleted file mode 100644 index a14f274d293..00000000000 --- a/vendor/honnef.co/go/tools/loader/loader.go +++ /dev/null @@ -1,210 +0,0 @@ -package loader - -import ( - "errors" - "fmt" - "go/ast" - "go/parser" - "go/scanner" - "go/token" - "go/types" - "log" - "os" - - "golang.org/x/tools/go/gcexportdata" - "golang.org/x/tools/go/packages" -) - -// Graph resolves patterns and returns packages with all the -// information required to later load type information, and optionally -// syntax trees. -// -// The provided config can set any setting with the exception of Mode. -func Graph(cfg packages.Config, patterns ...string) ([]*packages.Package, error) { - cfg.Mode = packages.NeedName | packages.NeedImports | packages.NeedDeps | packages.NeedExportsFile | packages.NeedFiles | packages.NeedCompiledGoFiles | packages.NeedTypesSizes - pkgs, err := packages.Load(&cfg, patterns...) - if err != nil { - return nil, err - } - fset := token.NewFileSet() - packages.Visit(pkgs, nil, func(pkg *packages.Package) { - pkg.Fset = fset - }) - - n := 0 - for _, pkg := range pkgs { - if len(pkg.CompiledGoFiles) == 0 && len(pkg.Errors) == 0 && pkg.PkgPath != "unsafe" { - // If a package consists only of test files, then - // go/packages incorrectly(?) returns an empty package for - // the non-test variant. Get rid of those packages. See - // #646. - // - // Do not, however, skip packages that have errors. Those, - // too, may have no files, but we want to print the - // errors. - continue - } - pkgs[n] = pkg - n++ - } - return pkgs[:n], nil -} - -// LoadFromExport loads a package from export data. All of its -// dependencies must have been loaded already. -func LoadFromExport(pkg *packages.Package) error { - pkg.IllTyped = true - for path, pkg := range pkg.Imports { - if pkg.Types == nil { - return fmt.Errorf("dependency %q hasn't been loaded yet", path) - } - } - if pkg.ExportFile == "" { - return fmt.Errorf("no export data for %q", pkg.ID) - } - f, err := os.Open(pkg.ExportFile) - if err != nil { - return err - } - defer f.Close() - - r, err := gcexportdata.NewReader(f) - if err != nil { - return err - } - - view := make(map[string]*types.Package) // view seen by gcexportdata - seen := make(map[*packages.Package]bool) // all visited packages - var visit func(pkgs map[string]*packages.Package) - visit = func(pkgs map[string]*packages.Package) { - for _, pkg := range pkgs { - if !seen[pkg] { - seen[pkg] = true - view[pkg.PkgPath] = pkg.Types - visit(pkg.Imports) - } - } - } - visit(pkg.Imports) - tpkg, err := gcexportdata.Read(r, pkg.Fset, view, pkg.PkgPath) - if err != nil { - return err - } - pkg.Types = tpkg - pkg.IllTyped = false - return nil -} - -// LoadFromSource loads a package from source. All of its dependencies -// must have been loaded already. -func LoadFromSource(pkg *packages.Package) error { - pkg.IllTyped = true - pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name) - - // OPT(dh): many packages have few files, much fewer than there - // are CPU cores. Additionally, parsing each individual file is - // very fast. A naive parallel implementation of this loop won't - // be faster, and tends to be slower due to extra scheduling, - // bookkeeping and potentially false sharing of cache lines. - pkg.Syntax = make([]*ast.File, len(pkg.CompiledGoFiles)) - for i, file := range pkg.CompiledGoFiles { - f, err := parser.ParseFile(pkg.Fset, file, nil, parser.ParseComments) - if err != nil { - pkg.Errors = append(pkg.Errors, convertError(err)...) - return err - } - pkg.Syntax[i] = f - } - pkg.TypesInfo = &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), - } - - importer := func(path string) (*types.Package, error) { - if path == "unsafe" { - return types.Unsafe, nil - } - if path == "C" { - // go/packages doesn't tell us that cgo preprocessing - // failed. When we subsequently try to parse the package, - // we'll encounter the raw C import. - return nil, errors.New("cgo preprocessing failed") - } - imp := pkg.Imports[path] - if imp == nil { - return nil, nil - } - if len(imp.Errors) > 0 { - return nil, imp.Errors[0] - } - return imp.Types, nil - } - tc := &types.Config{ - Importer: importerFunc(importer), - Error: func(err error) { - pkg.Errors = append(pkg.Errors, convertError(err)...) - }, - } - err := types.NewChecker(tc, pkg.Fset, pkg.Types, pkg.TypesInfo).Files(pkg.Syntax) - if err != nil { - return err - } - pkg.IllTyped = false - return nil -} - -func convertError(err error) []packages.Error { - var errs []packages.Error - // taken from go/packages - switch err := err.(type) { - case packages.Error: - // from driver - errs = append(errs, err) - - case *os.PathError: - // from parser - errs = append(errs, packages.Error{ - Pos: err.Path + ":1", - Msg: err.Err.Error(), - Kind: packages.ParseError, - }) - - case scanner.ErrorList: - // from parser - for _, err := range err { - errs = append(errs, packages.Error{ - Pos: err.Pos.String(), - Msg: err.Msg, - Kind: packages.ParseError, - }) - } - - case types.Error: - // from type checker - errs = append(errs, packages.Error{ - Pos: err.Fset.Position(err.Pos).String(), - Msg: err.Msg, - Kind: packages.TypeError, - }) - - default: - // unexpected impoverished error from parser? - errs = append(errs, packages.Error{ - Pos: "-", - Msg: err.Error(), - Kind: packages.UnknownError, - }) - - // If you see this error message, please file a bug. - log.Printf("internal error: error %q (%T) without position", err, err) - } - return errs -} - -type importerFunc func(path string) (*types.Package, error) - -func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } diff --git a/vendor/honnef.co/go/tools/pattern/convert.go b/vendor/honnef.co/go/tools/pattern/convert.go deleted file mode 100644 index dfcd1560d74..00000000000 --- a/vendor/honnef.co/go/tools/pattern/convert.go +++ /dev/null @@ -1,242 +0,0 @@ -package pattern - -import ( - "fmt" - "go/ast" - "go/token" - "go/types" - "reflect" -) - -var astTypes = map[string]reflect.Type{ - "Ellipsis": reflect.TypeOf(ast.Ellipsis{}), - "RangeStmt": reflect.TypeOf(ast.RangeStmt{}), - "AssignStmt": reflect.TypeOf(ast.AssignStmt{}), - "IndexExpr": reflect.TypeOf(ast.IndexExpr{}), - "Ident": reflect.TypeOf(ast.Ident{}), - "ValueSpec": reflect.TypeOf(ast.ValueSpec{}), - "GenDecl": reflect.TypeOf(ast.GenDecl{}), - "BinaryExpr": reflect.TypeOf(ast.BinaryExpr{}), - "ForStmt": reflect.TypeOf(ast.ForStmt{}), - "ArrayType": reflect.TypeOf(ast.ArrayType{}), - "DeferStmt": reflect.TypeOf(ast.DeferStmt{}), - "MapType": reflect.TypeOf(ast.MapType{}), - "ReturnStmt": reflect.TypeOf(ast.ReturnStmt{}), - "SliceExpr": reflect.TypeOf(ast.SliceExpr{}), - "StarExpr": reflect.TypeOf(ast.StarExpr{}), - "UnaryExpr": reflect.TypeOf(ast.UnaryExpr{}), - "SendStmt": reflect.TypeOf(ast.SendStmt{}), - "SelectStmt": reflect.TypeOf(ast.SelectStmt{}), - "ImportSpec": reflect.TypeOf(ast.ImportSpec{}), - "IfStmt": reflect.TypeOf(ast.IfStmt{}), - "GoStmt": reflect.TypeOf(ast.GoStmt{}), - "Field": reflect.TypeOf(ast.Field{}), - "SelectorExpr": reflect.TypeOf(ast.SelectorExpr{}), - "StructType": reflect.TypeOf(ast.StructType{}), - "KeyValueExpr": reflect.TypeOf(ast.KeyValueExpr{}), - "FuncType": reflect.TypeOf(ast.FuncType{}), - "FuncLit": reflect.TypeOf(ast.FuncLit{}), - "FuncDecl": reflect.TypeOf(ast.FuncDecl{}), - "ChanType": reflect.TypeOf(ast.ChanType{}), - "CallExpr": reflect.TypeOf(ast.CallExpr{}), - "CaseClause": reflect.TypeOf(ast.CaseClause{}), - "CommClause": reflect.TypeOf(ast.CommClause{}), - "CompositeLit": reflect.TypeOf(ast.CompositeLit{}), - "EmptyStmt": reflect.TypeOf(ast.EmptyStmt{}), - "SwitchStmt": reflect.TypeOf(ast.SwitchStmt{}), - "TypeSwitchStmt": reflect.TypeOf(ast.TypeSwitchStmt{}), - "TypeAssertExpr": reflect.TypeOf(ast.TypeAssertExpr{}), - "TypeSpec": reflect.TypeOf(ast.TypeSpec{}), - "InterfaceType": reflect.TypeOf(ast.InterfaceType{}), - "BranchStmt": reflect.TypeOf(ast.BranchStmt{}), - "IncDecStmt": reflect.TypeOf(ast.IncDecStmt{}), - "BasicLit": reflect.TypeOf(ast.BasicLit{}), -} - -func ASTToNode(node interface{}) Node { - switch node := node.(type) { - case *ast.File: - panic("cannot convert *ast.File to Node") - case nil: - return Nil{} - case string: - return String(node) - case token.Token: - return Token(node) - case *ast.ExprStmt: - return ASTToNode(node.X) - case *ast.BlockStmt: - if node == nil { - return Nil{} - } - return ASTToNode(node.List) - case *ast.FieldList: - if node == nil { - return Nil{} - } - return ASTToNode(node.List) - case *ast.BasicLit: - if node == nil { - return Nil{} - } - case *ast.ParenExpr: - return ASTToNode(node.X) - } - - if node, ok := node.(ast.Node); ok { - name := reflect.TypeOf(node).Elem().Name() - T, ok := structNodes[name] - if !ok { - panic(fmt.Sprintf("internal error: unhandled type %T", node)) - } - - if reflect.ValueOf(node).IsNil() { - return Nil{} - } - v := reflect.ValueOf(node).Elem() - objs := make([]Node, T.NumField()) - for i := 0; i < T.NumField(); i++ { - f := v.FieldByName(T.Field(i).Name) - objs[i] = ASTToNode(f.Interface()) - } - - n, err := populateNode(name, objs, false) - if err != nil { - panic(fmt.Sprintf("internal error: %s", err)) - } - return n - } - - s := reflect.ValueOf(node) - if s.Kind() == reflect.Slice { - if s.Len() == 0 { - return List{} - } - if s.Len() == 1 { - return ASTToNode(s.Index(0).Interface()) - } - - tail := List{} - for i := s.Len() - 1; i >= 0; i-- { - head := ASTToNode(s.Index(i).Interface()) - l := List{ - Head: head, - Tail: tail, - } - tail = l - } - return tail - } - - panic(fmt.Sprintf("internal error: unhandled type %T", node)) -} - -func NodeToAST(node Node, state State) interface{} { - switch node := node.(type) { - case Binding: - v, ok := state[node.Name] - if !ok { - // really we want to return an error here - panic("XXX") - } - switch v := v.(type) { - case types.Object: - return &ast.Ident{Name: v.Name()} - default: - return v - } - case Builtin, Any, Object, Function, Not, Or: - panic("XXX") - case List: - if (node == List{}) { - return []ast.Node{} - } - x := []ast.Node{NodeToAST(node.Head, state).(ast.Node)} - x = append(x, NodeToAST(node.Tail, state).([]ast.Node)...) - return x - case Token: - return token.Token(node) - case String: - return string(node) - case Nil: - return nil - } - - name := reflect.TypeOf(node).Name() - T, ok := astTypes[name] - if !ok { - panic(fmt.Sprintf("internal error: unhandled type %T", node)) - } - v := reflect.ValueOf(node) - out := reflect.New(T) - for i := 0; i < T.NumField(); i++ { - fNode := v.FieldByName(T.Field(i).Name) - if (fNode == reflect.Value{}) { - continue - } - fAST := out.Elem().FieldByName(T.Field(i).Name) - switch fAST.Type().Kind() { - case reflect.Slice: - c := reflect.ValueOf(NodeToAST(fNode.Interface().(Node), state)) - if c.Kind() != reflect.Slice { - // it's a single node in the pattern, we have to wrap - // it in a slice - slice := reflect.MakeSlice(fAST.Type(), 1, 1) - slice.Index(0).Set(c) - c = slice - } - switch fAST.Interface().(type) { - case []ast.Node: - switch cc := c.Interface().(type) { - case []ast.Node: - fAST.Set(c) - case []ast.Expr: - var slice []ast.Node - for _, el := range cc { - slice = append(slice, el) - } - fAST.Set(reflect.ValueOf(slice)) - default: - panic("XXX") - } - case []ast.Expr: - switch cc := c.Interface().(type) { - case []ast.Node: - var slice []ast.Expr - for _, el := range cc { - slice = append(slice, el.(ast.Expr)) - } - fAST.Set(reflect.ValueOf(slice)) - case []ast.Expr: - fAST.Set(c) - default: - panic("XXX") - } - default: - panic("XXX") - } - case reflect.Int: - c := reflect.ValueOf(NodeToAST(fNode.Interface().(Node), state)) - switch c.Kind() { - case reflect.String: - tok, ok := tokensByString[c.Interface().(string)] - if !ok { - // really we want to return an error here - panic("XXX") - } - fAST.SetInt(int64(tok)) - case reflect.Int: - fAST.Set(c) - default: - panic(fmt.Sprintf("internal error: unexpected kind %s", c.Kind())) - } - default: - r := NodeToAST(fNode.Interface().(Node), state) - if r != nil { - fAST.Set(reflect.ValueOf(r)) - } - } - } - - return out.Interface().(ast.Node) -} diff --git a/vendor/honnef.co/go/tools/pattern/doc.go b/vendor/honnef.co/go/tools/pattern/doc.go deleted file mode 100644 index 05d86c25144..00000000000 --- a/vendor/honnef.co/go/tools/pattern/doc.go +++ /dev/null @@ -1,273 +0,0 @@ -/* -Package pattern implements a simple language for pattern matching Go ASTs. - -Design decisions and trade-offs - -The language is designed specifically for the task of filtering ASTs -to simplify the implementation of analyses in staticcheck. -It is also intended to be trivial to parse and execute. - -To that end, we make certain decisions that make the language more -suited to its task, while making certain queries infeasible. - -Furthermore, it is fully expected that the majority of analyses will still require ordinary Go code -to further process the filtered AST, to make use of type information and to enforce complex invariants. -It is not our goal to design a scripting language for writing entire checks in. - -The language - -At its core, patterns are a representation of Go ASTs, allowing for the use of placeholders to enable pattern matching. -Their syntax is inspired by LISP and Haskell, but unlike LISP, the core unit of patterns isn't the list, but the node. -There is a fixed set of nodes, identified by name, and with the exception of the Or node, all nodes have a fixed number of arguments. -In addition to nodes, there are atoms, which represent basic units such as strings or the nil value. - -Pattern matching is implemented via bindings, represented by the Binding node. -A Binding can match nodes and associate them with names, to later recall the nodes. -This allows for expressing "this node must be equal to that node" constraints. - -To simplify writing and reading patterns, a small amount of additional syntax exists on top of nodes and atoms. -This additional syntax doesn't add any new features of its own, it simply provides shortcuts to creating nodes and atoms. - -To show an example of a pattern, first consider this snippet of Go code: - - if x := fn(); x != nil { - for _, v := range x { - println(v, x) - } - } - -The corresponding AST expressed as an idiomatic pattern would look as follows: - - (IfStmt - (AssignStmt (Ident "x") ":=" (CallExpr (Ident "fn") [])) - (BinaryExpr (Ident "x") "!=" (Ident "nil")) - (RangeStmt - (Ident "_") (Ident "v") ":=" (Ident "x") - (CallExpr (Ident "println") [(Ident "v") (Ident "x")])) - nil) - -Two things are worth noting about this representation. -First, the [el1 el2 ...] syntax is a short-hand for creating lists. -It is a short-hand for el1:el2:[], which itself is a short-hand for (List el1 (List el2 (List nil nil)). -Second, note the absence of a lot of lists in places that normally accept lists. -For example, assignment assigns a number of right-hands to a number of left-hands, yet our AssignStmt is lacking any form of list. -This is due to the fact that a single node can match a list of exactly one element. -Thus, the two following forms have identical matching behavior: - - (AssignStmt (Ident "x") ":=" (CallExpr (Ident "fn") [])) - (AssignStmt [(Ident "x")] ":=" [(CallExpr (Ident "fn") [])]) - -This section serves as an overview of the language's syntax. -More in-depth explanations of the matching behavior as well as an exhaustive list of node types follows in the coming sections. - -Pattern matching - -TODO write about pattern matching - -- inspired by haskell syntax, but much, much simpler and naive - -Node types - -The language contains two kinds of nodes: those that map to nodes in the AST, and those that implement additional logic. - -Nodes that map directly to AST nodes are named identically to the types in the go/ast package. -What follows is an exhaustive list of these nodes: - - (ArrayType len elt) - (AssignStmt lhs tok rhs) - (BasicLit kind value) - (BinaryExpr x op y) - (BranchStmt tok label) - (CallExpr fun args) - (CaseClause list body) - (ChanType dir value) - (CommClause comm body) - (CompositeLit type elts) - (DeferStmt call) - (Ellipsis elt) - (EmptyStmt) - (Field names type tag) - (ForStmt init cond post body) - (FuncDecl recv name type body) - (FuncLit type body) - (FuncType params results) - (GenDecl specs) - (GoStmt call) - (Ident name) - (IfStmt init cond body else) - (ImportSpec name path) - (IncDecStmt x tok) - (IndexExpr x index) - (InterfaceType methods) - (KeyValueExpr key value) - (MapType key value) - (RangeStmt key value tok x body) - (ReturnStmt results) - (SelectStmt body) - (SelectorExpr x sel) - (SendStmt chan value) - (SliceExpr x low high max) - (StarExpr x) - (StructType fields) - (SwitchStmt init tag body) - (TypeAssertExpr) - (TypeSpec name type) - (TypeSwitchStmt init assign body) - (UnaryExpr op x) - (ValueSpec names type values) - -Additionally, there are the String, Token and nil atoms. -Strings are double-quoted string literals, as in (Ident "someName"). -Tokens are also represented as double-quoted string literals, but are converted to token.Token values in contexts that require tokens, -such as in (BinaryExpr x "<" y), where "<" is transparently converted to token.LSS during matching. -The keyword 'nil' denotes the nil value, which represents the absence of any value. - -We also defines the (List head tail) node, which is used to represent sequences of elements as a singly linked list. -The head is a single element, and the tail is the remainder of the list. -For example, - - (List "foo" (List "bar" (List "baz" (List nil nil)))) - -represents a list of three elements, "foo", "bar" and "baz". There is dedicated syntax for writing lists, which looks as follows: - - ["foo" "bar" "baz"] - -This syntax is itself syntactic sugar for the following form: - - "foo":"bar":"baz":[] - -This form is of particular interest for pattern matching, as it allows matching on the head and tail. For example, - - "foo":"bar":_ - -would match any list with at least two elements, where the first two elements are "foo" and "bar". This is equivalent to writing - - (List "foo" (List "bar" _)) - -Note that it is not possible to match from the end of the list. -That is, there is no way to express a query such as "a list of any length where the last element is foo". - -Note that unlike in LISP, nil and empty lists are distinct from one another. -In patterns, with respect to lists, nil is akin to Go's untyped nil. -It will match a nil ast.Node, but it will not match a nil []ast.Expr. Nil will, however, match pointers to named types such as *ast.Ident. -Similarly, lists are akin to Go's -slices. An empty list will match both a nil and an empty []ast.Expr, but it will not match a nil ast.Node. - -Due to the difference between nil and empty lists, an empty list is represented as (List nil nil), i.e. a list with no head or tail. -Similarly, a list of one element is represented as (List el (List nil nil)). Unlike in LISP, it cannot be represented by (List el nil). - -Finally, there are nodes that implement special logic or matching behavior. - -(Any) matches any value. The underscore (_) maps to this node, making the following two forms equivalent: - - (Ident _) - (Ident (Any)) - -(Builtin name) matches a built-in identifier or function by name. -This is a type-aware variant of (Ident name). -Instead of only comparing the name, it resolves the object behind the name and makes sure it's a pre-declared identifier. - -For example, in the following piece of code - - func fn() { - println(true) - true := false - println(true) - } - -the pattern - - (Builtin "true") - -will match exactly once, on the first use of 'true' in the function. -Subsequent occurrences of 'true' no longer refer to the pre-declared identifier. - -(Object name) matches an identifier by name, but yields the -types.Object it refers to. - -(Function name) matches ast.Idents and ast.SelectorExprs that refer to a function with a given fully qualified name. -For example, "net/url.PathEscape" matches the PathEscape function in the net/url package, -and "(net/url.EscapeError).Error" refers to the Error method on the net/url.EscapeError type, -either on an instance of the type, or on the type itself. - -For example, the following patterns match the following lines of code: - - (CallExpr (Function "fmt.Println") _) // pattern 1 - (CallExpr (Function "(net/url.EscapeError).Error") _) // pattern 2 - - fmt.Println("hello, world") // matches pattern 1 - var x url.EscapeError - x.Error() // matches pattern 2 - (url.EscapeError).Error(x) // also matches pattern 2 - -(Binding name node) creates or uses a binding. -Bindings work like variable assignments, allowing referring to already matched nodes. -As an example, bindings are necessary to match self-assignment of the form "x = x", -since we need to express that the right-hand side is identical to the left-hand side. - -If a binding's node is not nil, the matcher will attempt to match a node according to the pattern. -If a binding's node is nil, the binding will either recall an existing value, or match the Any node. -It is an error to provide a non-nil node to a binding that has already been bound. - -Referring back to the earlier example, the following pattern will match self-assignment of idents: - - (AssignStmt (Binding "lhs" (Ident _)) "=" (Binding "lhs" nil)) - -Because bindings are a crucial component of pattern matching, there is special syntax for creating and recalling bindings. -Lower-case names refer to bindings. If standing on its own, the name "foo" will be equivalent to (Binding "foo" nil). -If a name is followed by an at-sign (@) then it will create a binding for the node that follows. -Together, this allows us to rewrite the earlier example as follows: - - (AssignStmt lhs@(Ident _) "=" lhs) - -(Or nodes...) is a variadic node that tries matching each node until one succeeds. For example, the following pattern matches all idents of name "foo" or "bar": - - (Ident (Or "foo" "bar")) - -We could also have written - - (Or (Ident "foo") (Ident "bar")) - -and achieved the same result. We can also mix different kinds of nodes: - - (Or (Ident "foo") (CallExpr (Ident "bar") _)) - -When using bindings inside of nodes used inside Or, all or none of the bindings will be bound. -That is, partially matched nodes that ultimately failed to match will not produce any bindings observable outside of the matching attempt. -We can thus write - - (Or (Ident name) (CallExpr name)) - -and 'name' will either be a String if the first option matched, or an Ident or SelectorExpr if the second option matched. - -(Not node) - -The Not node negates a match. For example, (Not (Ident _)) will match all nodes that aren't identifiers. - -ChanDir(0) - -Automatic unnesting of AST nodes - -The Go AST has several types of nodes that wrap other nodes. -To simplify matching, we automatically unwrap some of these nodes. - -These nodes are ExprStmt (for using expressions in a statement context), -ParenExpr (for parenthesized expressions), -DeclStmt (for declarations in a statement context), -and LabeledStmt (for labeled statements). - -Thus, the query - - (FuncLit _ [(CallExpr _ _)] - -will match a function literal containing a single function call, -even though in the actual Go AST, the CallExpr is nested inside an ExprStmt, -as function bodies are made up of sequences of statements. - -On the flip-side, there is no way to specifically match these wrapper nodes. -For example, there is no way of searching for unnecessary parentheses, like in the following piece of Go code: - - ((x)) += 2 - -*/ -package pattern diff --git a/vendor/honnef.co/go/tools/pattern/fuzz.go b/vendor/honnef.co/go/tools/pattern/fuzz.go deleted file mode 100644 index 52e7df9742b..00000000000 --- a/vendor/honnef.co/go/tools/pattern/fuzz.go +++ /dev/null @@ -1,50 +0,0 @@ -// +build gofuzz - -package pattern - -import ( - "go/ast" - goparser "go/parser" - "go/token" - "os" - "path/filepath" - "strings" -) - -var files []*ast.File - -func init() { - fset := token.NewFileSet() - filepath.Walk("/usr/lib/go/src", func(path string, info os.FileInfo, err error) error { - if err != nil { - // XXX error handling - panic(err) - } - if !strings.HasSuffix(path, ".go") { - return nil - } - f, err := goparser.ParseFile(fset, path, nil, 0) - if err != nil { - return nil - } - files = append(files, f) - return nil - }) -} - -func Fuzz(data []byte) int { - p := &Parser{} - pat, err := p.Parse(string(data)) - if err != nil { - if strings.Contains(err.Error(), "internal error") { - panic(err) - } - return 0 - } - _ = pat.Root.String() - - for _, f := range files { - Match(pat.Root, f) - } - return 1 -} diff --git a/vendor/honnef.co/go/tools/pattern/lexer.go b/vendor/honnef.co/go/tools/pattern/lexer.go deleted file mode 100644 index fb72e392bde..00000000000 --- a/vendor/honnef.co/go/tools/pattern/lexer.go +++ /dev/null @@ -1,221 +0,0 @@ -package pattern - -import ( - "fmt" - "go/token" - "unicode" - "unicode/utf8" -) - -type lexer struct { - f *token.File - - input string - start int - pos int - width int - items chan item -} - -type itemType int - -const eof = -1 - -const ( - itemError itemType = iota - itemLeftParen - itemRightParen - itemLeftBracket - itemRightBracket - itemTypeName - itemVariable - itemAt - itemColon - itemBlank - itemString - itemEOF -) - -func (typ itemType) String() string { - switch typ { - case itemError: - return "ERROR" - case itemLeftParen: - return "(" - case itemRightParen: - return ")" - case itemLeftBracket: - return "[" - case itemRightBracket: - return "]" - case itemTypeName: - return "TYPE" - case itemVariable: - return "VAR" - case itemAt: - return "@" - case itemColon: - return ":" - case itemBlank: - return "_" - case itemString: - return "STRING" - case itemEOF: - return "EOF" - default: - return fmt.Sprintf("itemType(%d)", typ) - } -} - -type item struct { - typ itemType - val string - pos int -} - -type stateFn func(*lexer) stateFn - -func (l *lexer) run() { - for state := lexStart; state != nil; { - state = state(l) - } - close(l.items) -} - -func (l *lexer) emitValue(t itemType, value string) { - l.items <- item{t, value, l.start} - l.start = l.pos -} - -func (l *lexer) emit(t itemType) { - l.items <- item{t, l.input[l.start:l.pos], l.start} - l.start = l.pos -} - -func lexStart(l *lexer) stateFn { - switch r := l.next(); { - case r == eof: - l.emit(itemEOF) - return nil - case unicode.IsSpace(r): - l.ignore() - case r == '(': - l.emit(itemLeftParen) - case r == ')': - l.emit(itemRightParen) - case r == '[': - l.emit(itemLeftBracket) - case r == ']': - l.emit(itemRightBracket) - case r == '@': - l.emit(itemAt) - case r == ':': - l.emit(itemColon) - case r == '_': - l.emit(itemBlank) - case r == '"': - l.backup() - return lexString - case unicode.IsUpper(r): - l.backup() - return lexType - case unicode.IsLower(r): - l.backup() - return lexVariable - default: - return l.errorf("unexpected character %c", r) - } - return lexStart -} - -func (l *lexer) next() (r rune) { - if l.pos >= len(l.input) { - l.width = 0 - return eof - } - r, l.width = utf8.DecodeRuneInString(l.input[l.pos:]) - - if r == '\n' { - l.f.AddLine(l.pos) - } - - l.pos += l.width - - return r -} - -func (l *lexer) ignore() { - l.start = l.pos -} - -func (l *lexer) backup() { - l.pos -= l.width -} - -func (l *lexer) errorf(format string, args ...interface{}) stateFn { - // TODO(dh): emit position information in errors - l.items <- item{ - itemError, - fmt.Sprintf(format, args...), - l.start, - } - return nil -} - -func isAlphaNumeric(r rune) bool { - return r >= '0' && r <= '9' || - r >= 'a' && r <= 'z' || - r >= 'A' && r <= 'Z' -} - -func lexString(l *lexer) stateFn { - l.next() // skip quote - escape := false - - var runes []rune - for { - switch r := l.next(); r { - case eof: - return l.errorf("unterminated string") - case '"': - if !escape { - l.emitValue(itemString, string(runes)) - return lexStart - } else { - runes = append(runes, '"') - escape = false - } - case '\\': - if escape { - runes = append(runes, '\\') - escape = false - } else { - escape = true - } - default: - runes = append(runes, r) - } - } -} - -func lexType(l *lexer) stateFn { - l.next() - for { - if !isAlphaNumeric(l.next()) { - l.backup() - l.emit(itemTypeName) - return lexStart - } - } -} - -func lexVariable(l *lexer) stateFn { - l.next() - for { - if !isAlphaNumeric(l.next()) { - l.backup() - l.emit(itemVariable) - return lexStart - } - } -} diff --git a/vendor/honnef.co/go/tools/pattern/match.go b/vendor/honnef.co/go/tools/pattern/match.go deleted file mode 100644 index ff039baa75d..00000000000 --- a/vendor/honnef.co/go/tools/pattern/match.go +++ /dev/null @@ -1,513 +0,0 @@ -package pattern - -import ( - "fmt" - "go/ast" - "go/token" - "go/types" - "reflect" - - "honnef.co/go/tools/lint" -) - -var tokensByString = map[string]Token{ - "INT": Token(token.INT), - "FLOAT": Token(token.FLOAT), - "IMAG": Token(token.IMAG), - "CHAR": Token(token.CHAR), - "STRING": Token(token.STRING), - "+": Token(token.ADD), - "-": Token(token.SUB), - "*": Token(token.MUL), - "/": Token(token.QUO), - "%": Token(token.REM), - "&": Token(token.AND), - "|": Token(token.OR), - "^": Token(token.XOR), - "<<": Token(token.SHL), - ">>": Token(token.SHR), - "&^": Token(token.AND_NOT), - "+=": Token(token.ADD_ASSIGN), - "-=": Token(token.SUB_ASSIGN), - "*=": Token(token.MUL_ASSIGN), - "/=": Token(token.QUO_ASSIGN), - "%=": Token(token.REM_ASSIGN), - "&=": Token(token.AND_ASSIGN), - "|=": Token(token.OR_ASSIGN), - "^=": Token(token.XOR_ASSIGN), - "<<=": Token(token.SHL_ASSIGN), - ">>=": Token(token.SHR_ASSIGN), - "&^=": Token(token.AND_NOT_ASSIGN), - "&&": Token(token.LAND), - "||": Token(token.LOR), - "<-": Token(token.ARROW), - "++": Token(token.INC), - "--": Token(token.DEC), - "==": Token(token.EQL), - "<": Token(token.LSS), - ">": Token(token.GTR), - "=": Token(token.ASSIGN), - "!": Token(token.NOT), - "!=": Token(token.NEQ), - "<=": Token(token.LEQ), - ">=": Token(token.GEQ), - ":=": Token(token.DEFINE), - "...": Token(token.ELLIPSIS), - "IMPORT": Token(token.IMPORT), - "VAR": Token(token.VAR), - "TYPE": Token(token.TYPE), - "CONST": Token(token.CONST), -} - -func maybeToken(node Node) (Node, bool) { - if node, ok := node.(String); ok { - if tok, ok := tokensByString[string(node)]; ok { - return tok, true - } - return node, false - } - return node, false -} - -func isNil(v interface{}) bool { - if v == nil { - return true - } - if _, ok := v.(Nil); ok { - return true - } - return false -} - -type matcher interface { - Match(*Matcher, interface{}) (interface{}, bool) -} - -type State = map[string]interface{} - -type Matcher struct { - TypesInfo *types.Info - State State -} - -func (m *Matcher) fork() *Matcher { - state := make(State, len(m.State)) - for k, v := range m.State { - state[k] = v - } - return &Matcher{ - TypesInfo: m.TypesInfo, - State: state, - } -} - -func (m *Matcher) merge(mc *Matcher) { - m.State = mc.State -} - -func (m *Matcher) Match(a Node, b ast.Node) bool { - m.State = State{} - _, ok := match(m, a, b) - return ok -} - -func Match(a Node, b ast.Node) (*Matcher, bool) { - m := &Matcher{} - ret := m.Match(a, b) - return m, ret -} - -// Match two items, which may be (Node, AST) or (AST, AST) -func match(m *Matcher, l, r interface{}) (interface{}, bool) { - if _, ok := r.(Node); ok { - panic("Node mustn't be on right side of match") - } - - switch l := l.(type) { - case *ast.ParenExpr: - return match(m, l.X, r) - case *ast.ExprStmt: - return match(m, l.X, r) - case *ast.DeclStmt: - return match(m, l.Decl, r) - case *ast.LabeledStmt: - return match(m, l.Stmt, r) - case *ast.BlockStmt: - return match(m, l.List, r) - case *ast.FieldList: - return match(m, l.List, r) - } - - switch r := r.(type) { - case *ast.ParenExpr: - return match(m, l, r.X) - case *ast.ExprStmt: - return match(m, l, r.X) - case *ast.DeclStmt: - return match(m, l, r.Decl) - case *ast.LabeledStmt: - return match(m, l, r.Stmt) - case *ast.BlockStmt: - if r == nil { - return match(m, l, nil) - } - return match(m, l, r.List) - case *ast.FieldList: - if r == nil { - return match(m, l, nil) - } - return match(m, l, r.List) - case *ast.BasicLit: - if r == nil { - return match(m, l, nil) - } - } - - if l, ok := l.(matcher); ok { - return l.Match(m, r) - } - - if l, ok := l.(Node); ok { - // Matching of pattern with concrete value - return matchNodeAST(m, l, r) - } - - if l == nil || r == nil { - return nil, l == r - } - - { - ln, ok1 := l.(ast.Node) - rn, ok2 := r.(ast.Node) - if ok1 && ok2 { - return matchAST(m, ln, rn) - } - } - - { - obj, ok := l.(types.Object) - if ok { - switch r := r.(type) { - case *ast.Ident: - return obj, obj == m.TypesInfo.ObjectOf(r) - case *ast.SelectorExpr: - return obj, obj == m.TypesInfo.ObjectOf(r.Sel) - default: - return obj, false - } - } - } - - { - ln, ok1 := l.([]ast.Expr) - rn, ok2 := r.([]ast.Expr) - if ok1 || ok2 { - if ok1 && !ok2 { - rn = []ast.Expr{r.(ast.Expr)} - } else if !ok1 && ok2 { - ln = []ast.Expr{l.(ast.Expr)} - } - - if len(ln) != len(rn) { - return nil, false - } - for i, ll := range ln { - if _, ok := match(m, ll, rn[i]); !ok { - return nil, false - } - } - return r, true - } - } - - { - ln, ok1 := l.([]ast.Stmt) - rn, ok2 := r.([]ast.Stmt) - if ok1 || ok2 { - if ok1 && !ok2 { - rn = []ast.Stmt{r.(ast.Stmt)} - } else if !ok1 && ok2 { - ln = []ast.Stmt{l.(ast.Stmt)} - } - - if len(ln) != len(rn) { - return nil, false - } - for i, ll := range ln { - if _, ok := match(m, ll, rn[i]); !ok { - return nil, false - } - } - return r, true - } - } - - panic(fmt.Sprintf("unsupported comparison: %T and %T", l, r)) -} - -// Match a Node with an AST node -func matchNodeAST(m *Matcher, a Node, b interface{}) (interface{}, bool) { - switch b := b.(type) { - case []ast.Stmt: - // 'a' is not a List or we'd be using its Match - // implementation. - - if len(b) != 1 { - return nil, false - } - return match(m, a, b[0]) - case []ast.Expr: - // 'a' is not a List or we'd be using its Match - // implementation. - - if len(b) != 1 { - return nil, false - } - return match(m, a, b[0]) - case ast.Node: - ra := reflect.ValueOf(a) - rb := reflect.ValueOf(b).Elem() - - if ra.Type().Name() != rb.Type().Name() { - return nil, false - } - - for i := 0; i < ra.NumField(); i++ { - af := ra.Field(i) - fieldName := ra.Type().Field(i).Name - bf := rb.FieldByName(fieldName) - if (bf == reflect.Value{}) { - panic(fmt.Sprintf("internal error: could not find field %s in type %t when comparing with %T", fieldName, b, a)) - } - ai := af.Interface() - bi := bf.Interface() - if ai == nil { - return b, bi == nil - } - if _, ok := match(m, ai.(Node), bi); !ok { - return b, false - } - } - return b, true - case nil: - return nil, a == Nil{} - default: - panic(fmt.Sprintf("unhandled type %T", b)) - } -} - -// Match two AST nodes -func matchAST(m *Matcher, a, b ast.Node) (interface{}, bool) { - ra := reflect.ValueOf(a) - rb := reflect.ValueOf(b) - - if ra.Type() != rb.Type() { - return nil, false - } - if ra.IsNil() || rb.IsNil() { - return rb, ra.IsNil() == rb.IsNil() - } - - ra = ra.Elem() - rb = rb.Elem() - for i := 0; i < ra.NumField(); i++ { - af := ra.Field(i) - bf := rb.Field(i) - if af.Type() == rtTokPos || af.Type() == rtObject || af.Type() == rtCommentGroup { - continue - } - - switch af.Kind() { - case reflect.Slice: - if af.Len() != bf.Len() { - return nil, false - } - for j := 0; j < af.Len(); j++ { - if _, ok := match(m, af.Index(j).Interface().(ast.Node), bf.Index(j).Interface().(ast.Node)); !ok { - return nil, false - } - } - case reflect.String: - if af.String() != bf.String() { - return nil, false - } - case reflect.Int: - if af.Int() != bf.Int() { - return nil, false - } - case reflect.Bool: - if af.Bool() != bf.Bool() { - return nil, false - } - case reflect.Ptr, reflect.Interface: - if _, ok := match(m, af.Interface(), bf.Interface()); !ok { - return nil, false - } - default: - panic(fmt.Sprintf("internal error: unhandled kind %s (%T)", af.Kind(), af.Interface())) - } - } - return b, true -} - -func (b Binding) Match(m *Matcher, node interface{}) (interface{}, bool) { - if isNil(b.Node) { - v, ok := m.State[b.Name] - if ok { - // Recall value - return match(m, v, node) - } - // Matching anything - b.Node = Any{} - } - - // Store value - if _, ok := m.State[b.Name]; ok { - panic(fmt.Sprintf("binding already created: %s", b.Name)) - } - new, ret := match(m, b.Node, node) - if ret { - m.State[b.Name] = new - } - return new, ret -} - -func (Any) Match(m *Matcher, node interface{}) (interface{}, bool) { - return node, true -} - -func (l List) Match(m *Matcher, node interface{}) (interface{}, bool) { - v := reflect.ValueOf(node) - if v.Kind() == reflect.Slice { - if isNil(l.Head) { - return node, v.Len() == 0 - } - if v.Len() == 0 { - return nil, false - } - // OPT(dh): don't check the entire tail if head didn't match - _, ok1 := match(m, l.Head, v.Index(0).Interface()) - _, ok2 := match(m, l.Tail, v.Slice(1, v.Len()).Interface()) - return node, ok1 && ok2 - } - // Our empty list does not equal an untyped Go nil. This way, we can - // tell apart an if with no else and an if with an empty else. - return nil, false -} - -func (s String) Match(m *Matcher, node interface{}) (interface{}, bool) { - switch o := node.(type) { - case token.Token: - if tok, ok := maybeToken(s); ok { - return match(m, tok, node) - } - return nil, false - case string: - return o, string(s) == o - default: - return nil, false - } -} - -func (tok Token) Match(m *Matcher, node interface{}) (interface{}, bool) { - o, ok := node.(token.Token) - if !ok { - return nil, false - } - return o, token.Token(tok) == o -} - -func (Nil) Match(m *Matcher, node interface{}) (interface{}, bool) { - return nil, isNil(node) -} - -func (builtin Builtin) Match(m *Matcher, node interface{}) (interface{}, bool) { - ident, ok := node.(*ast.Ident) - if !ok { - return nil, false - } - obj := m.TypesInfo.ObjectOf(ident) - if obj != types.Universe.Lookup(ident.Name) { - return nil, false - } - return match(m, builtin.Name, ident.Name) -} - -func (obj Object) Match(m *Matcher, node interface{}) (interface{}, bool) { - ident, ok := node.(*ast.Ident) - if !ok { - return nil, false - } - - id := m.TypesInfo.ObjectOf(ident) - _, ok = match(m, obj.Name, ident.Name) - return id, ok -} - -func (fn Function) Match(m *Matcher, node interface{}) (interface{}, bool) { - var name string - var obj types.Object - switch node := node.(type) { - case *ast.Ident: - obj = m.TypesInfo.ObjectOf(node) - switch obj := obj.(type) { - case *types.Func: - name = lint.FuncName(obj) - case *types.Builtin: - name = obj.Name() - default: - return nil, false - } - case *ast.SelectorExpr: - var ok bool - obj, ok = m.TypesInfo.ObjectOf(node.Sel).(*types.Func) - if !ok { - return nil, false - } - name = lint.FuncName(obj.(*types.Func)) - default: - return nil, false - } - _, ok := match(m, fn.Name, name) - return obj, ok -} - -func (or Or) Match(m *Matcher, node interface{}) (interface{}, bool) { - for _, opt := range or.Nodes { - mc := m.fork() - if ret, ok := match(mc, opt, node); ok { - m.merge(mc) - return ret, true - } - } - return nil, false -} - -func (not Not) Match(m *Matcher, node interface{}) (interface{}, bool) { - _, ok := match(m, not.Node, node) - if ok { - return nil, false - } - return node, true -} - -var ( - // Types of fields in go/ast structs that we want to skip - rtTokPos = reflect.TypeOf(token.Pos(0)) - rtObject = reflect.TypeOf((*ast.Object)(nil)) - rtCommentGroup = reflect.TypeOf((*ast.CommentGroup)(nil)) -) - -var ( - _ matcher = Binding{} - _ matcher = Any{} - _ matcher = List{} - _ matcher = String("") - _ matcher = Token(0) - _ matcher = Nil{} - _ matcher = Builtin{} - _ matcher = Object{} - _ matcher = Function{} - _ matcher = Or{} - _ matcher = Not{} -) diff --git a/vendor/honnef.co/go/tools/pattern/parser.go b/vendor/honnef.co/go/tools/pattern/parser.go deleted file mode 100644 index 009238b8608..00000000000 --- a/vendor/honnef.co/go/tools/pattern/parser.go +++ /dev/null @@ -1,455 +0,0 @@ -package pattern - -import ( - "fmt" - "go/ast" - "go/token" - "reflect" -) - -type Pattern struct { - Root Node - // Relevant contains instances of ast.Node that could potentially - // initiate a successful match of the pattern. - Relevant []reflect.Type -} - -func MustParse(s string) Pattern { - p := &Parser{AllowTypeInfo: true} - pat, err := p.Parse(s) - if err != nil { - panic(err) - } - return pat -} - -func roots(node Node) []reflect.Type { - switch node := node.(type) { - case Or: - var out []reflect.Type - for _, el := range node.Nodes { - out = append(out, roots(el)...) - } - return out - case Not: - return roots(node.Node) - case Binding: - return roots(node.Node) - case Nil, nil: - // this branch is reached via bindings - return allTypes - default: - Ts, ok := nodeToASTTypes[reflect.TypeOf(node)] - if !ok { - panic(fmt.Sprintf("internal error: unhandled type %T", node)) - } - return Ts - } -} - -var allTypes = []reflect.Type{ - reflect.TypeOf((*ast.RangeStmt)(nil)), - reflect.TypeOf((*ast.AssignStmt)(nil)), - reflect.TypeOf((*ast.IndexExpr)(nil)), - reflect.TypeOf((*ast.Ident)(nil)), - reflect.TypeOf((*ast.ValueSpec)(nil)), - reflect.TypeOf((*ast.GenDecl)(nil)), - reflect.TypeOf((*ast.BinaryExpr)(nil)), - reflect.TypeOf((*ast.ForStmt)(nil)), - reflect.TypeOf((*ast.ArrayType)(nil)), - reflect.TypeOf((*ast.DeferStmt)(nil)), - reflect.TypeOf((*ast.MapType)(nil)), - reflect.TypeOf((*ast.ReturnStmt)(nil)), - reflect.TypeOf((*ast.SliceExpr)(nil)), - reflect.TypeOf((*ast.StarExpr)(nil)), - reflect.TypeOf((*ast.UnaryExpr)(nil)), - reflect.TypeOf((*ast.SendStmt)(nil)), - reflect.TypeOf((*ast.SelectStmt)(nil)), - reflect.TypeOf((*ast.ImportSpec)(nil)), - reflect.TypeOf((*ast.IfStmt)(nil)), - reflect.TypeOf((*ast.GoStmt)(nil)), - reflect.TypeOf((*ast.Field)(nil)), - reflect.TypeOf((*ast.SelectorExpr)(nil)), - reflect.TypeOf((*ast.StructType)(nil)), - reflect.TypeOf((*ast.KeyValueExpr)(nil)), - reflect.TypeOf((*ast.FuncType)(nil)), - reflect.TypeOf((*ast.FuncLit)(nil)), - reflect.TypeOf((*ast.FuncDecl)(nil)), - reflect.TypeOf((*ast.ChanType)(nil)), - reflect.TypeOf((*ast.CallExpr)(nil)), - reflect.TypeOf((*ast.CaseClause)(nil)), - reflect.TypeOf((*ast.CommClause)(nil)), - reflect.TypeOf((*ast.CompositeLit)(nil)), - reflect.TypeOf((*ast.EmptyStmt)(nil)), - reflect.TypeOf((*ast.SwitchStmt)(nil)), - reflect.TypeOf((*ast.TypeSwitchStmt)(nil)), - reflect.TypeOf((*ast.TypeAssertExpr)(nil)), - reflect.TypeOf((*ast.TypeSpec)(nil)), - reflect.TypeOf((*ast.InterfaceType)(nil)), - reflect.TypeOf((*ast.BranchStmt)(nil)), - reflect.TypeOf((*ast.IncDecStmt)(nil)), - reflect.TypeOf((*ast.BasicLit)(nil)), -} - -var nodeToASTTypes = map[reflect.Type][]reflect.Type{ - reflect.TypeOf(String("")): nil, - reflect.TypeOf(Token(0)): nil, - reflect.TypeOf(List{}): {reflect.TypeOf((*ast.BlockStmt)(nil)), reflect.TypeOf((*ast.FieldList)(nil))}, - reflect.TypeOf(Builtin{}): {reflect.TypeOf((*ast.Ident)(nil))}, - reflect.TypeOf(Object{}): {reflect.TypeOf((*ast.Ident)(nil))}, - reflect.TypeOf(Function{}): {reflect.TypeOf((*ast.Ident)(nil)), reflect.TypeOf((*ast.SelectorExpr)(nil))}, - reflect.TypeOf(Any{}): allTypes, - reflect.TypeOf(RangeStmt{}): {reflect.TypeOf((*ast.RangeStmt)(nil))}, - reflect.TypeOf(AssignStmt{}): {reflect.TypeOf((*ast.AssignStmt)(nil))}, - reflect.TypeOf(IndexExpr{}): {reflect.TypeOf((*ast.IndexExpr)(nil))}, - reflect.TypeOf(Ident{}): {reflect.TypeOf((*ast.Ident)(nil))}, - reflect.TypeOf(ValueSpec{}): {reflect.TypeOf((*ast.ValueSpec)(nil))}, - reflect.TypeOf(GenDecl{}): {reflect.TypeOf((*ast.GenDecl)(nil))}, - reflect.TypeOf(BinaryExpr{}): {reflect.TypeOf((*ast.BinaryExpr)(nil))}, - reflect.TypeOf(ForStmt{}): {reflect.TypeOf((*ast.ForStmt)(nil))}, - reflect.TypeOf(ArrayType{}): {reflect.TypeOf((*ast.ArrayType)(nil))}, - reflect.TypeOf(DeferStmt{}): {reflect.TypeOf((*ast.DeferStmt)(nil))}, - reflect.TypeOf(MapType{}): {reflect.TypeOf((*ast.MapType)(nil))}, - reflect.TypeOf(ReturnStmt{}): {reflect.TypeOf((*ast.ReturnStmt)(nil))}, - reflect.TypeOf(SliceExpr{}): {reflect.TypeOf((*ast.SliceExpr)(nil))}, - reflect.TypeOf(StarExpr{}): {reflect.TypeOf((*ast.StarExpr)(nil))}, - reflect.TypeOf(UnaryExpr{}): {reflect.TypeOf((*ast.UnaryExpr)(nil))}, - reflect.TypeOf(SendStmt{}): {reflect.TypeOf((*ast.SendStmt)(nil))}, - reflect.TypeOf(SelectStmt{}): {reflect.TypeOf((*ast.SelectStmt)(nil))}, - reflect.TypeOf(ImportSpec{}): {reflect.TypeOf((*ast.ImportSpec)(nil))}, - reflect.TypeOf(IfStmt{}): {reflect.TypeOf((*ast.IfStmt)(nil))}, - reflect.TypeOf(GoStmt{}): {reflect.TypeOf((*ast.GoStmt)(nil))}, - reflect.TypeOf(Field{}): {reflect.TypeOf((*ast.Field)(nil))}, - reflect.TypeOf(SelectorExpr{}): {reflect.TypeOf((*ast.SelectorExpr)(nil))}, - reflect.TypeOf(StructType{}): {reflect.TypeOf((*ast.StructType)(nil))}, - reflect.TypeOf(KeyValueExpr{}): {reflect.TypeOf((*ast.KeyValueExpr)(nil))}, - reflect.TypeOf(FuncType{}): {reflect.TypeOf((*ast.FuncType)(nil))}, - reflect.TypeOf(FuncLit{}): {reflect.TypeOf((*ast.FuncLit)(nil))}, - reflect.TypeOf(FuncDecl{}): {reflect.TypeOf((*ast.FuncDecl)(nil))}, - reflect.TypeOf(ChanType{}): {reflect.TypeOf((*ast.ChanType)(nil))}, - reflect.TypeOf(CallExpr{}): {reflect.TypeOf((*ast.CallExpr)(nil))}, - reflect.TypeOf(CaseClause{}): {reflect.TypeOf((*ast.CaseClause)(nil))}, - reflect.TypeOf(CommClause{}): {reflect.TypeOf((*ast.CommClause)(nil))}, - reflect.TypeOf(CompositeLit{}): {reflect.TypeOf((*ast.CompositeLit)(nil))}, - reflect.TypeOf(EmptyStmt{}): {reflect.TypeOf((*ast.EmptyStmt)(nil))}, - reflect.TypeOf(SwitchStmt{}): {reflect.TypeOf((*ast.SwitchStmt)(nil))}, - reflect.TypeOf(TypeSwitchStmt{}): {reflect.TypeOf((*ast.TypeSwitchStmt)(nil))}, - reflect.TypeOf(TypeAssertExpr{}): {reflect.TypeOf((*ast.TypeAssertExpr)(nil))}, - reflect.TypeOf(TypeSpec{}): {reflect.TypeOf((*ast.TypeSpec)(nil))}, - reflect.TypeOf(InterfaceType{}): {reflect.TypeOf((*ast.InterfaceType)(nil))}, - reflect.TypeOf(BranchStmt{}): {reflect.TypeOf((*ast.BranchStmt)(nil))}, - reflect.TypeOf(IncDecStmt{}): {reflect.TypeOf((*ast.IncDecStmt)(nil))}, - reflect.TypeOf(BasicLit{}): {reflect.TypeOf((*ast.BasicLit)(nil))}, -} - -var requiresTypeInfo = map[string]bool{ - "Function": true, - "Builtin": true, - "Object": true, -} - -type Parser struct { - // Allow nodes that rely on type information - AllowTypeInfo bool - - lex *lexer - cur item - last *item - items chan item -} - -func (p *Parser) Parse(s string) (Pattern, error) { - p.cur = item{} - p.last = nil - p.items = nil - - fset := token.NewFileSet() - p.lex = &lexer{ - f: fset.AddFile("